From 583e386e8f80a23809ee83c87158c6b8d7a19383 Mon Sep 17 00:00:00 2001 From: zeke Date: Fri, 17 Apr 2020 16:33:13 -0700 Subject: [PATCH] delete unused code --- bin/node-template/node/build.rs | 4 +- bin/node-template/node/src/chain_spec.rs | 191 +- bin/node-template/node/src/cli.rs | 8 +- bin/node-template/node/src/command.rs | 90 +- bin/node-template/node/src/main.rs | 2 +- bin/node-template/node/src/service.rs | 415 +- bin/node-template/pallets/template/src/lib.rs | 146 +- .../pallets/template/src/mock.rs | 65 +- .../pallets/template/src/tests.rs | 32 +- bin/node-template/runtime/build.rs | 12 +- bin/node-template/runtime/src/lib.rs | 430 +- bin/node/bench/src/core.rs | 140 +- bin/node/bench/src/generator.rs | 144 +- bin/node/bench/src/import.rs | 181 +- bin/node/bench/src/main.rs | 175 +- bin/node/bench/src/state_sizes.rs | 9478 ++++++++--------- bin/node/bench/src/tempdb.rs | 82 +- bin/node/bench/src/trie.rs | 352 +- bin/node/cli/bin/main.rs | 2 +- bin/node/cli/build.rs | 87 +- bin/node/cli/src/browser.rs | 49 +- bin/node/cli/src/chain_spec.rs | 752 +- bin/node/cli/src/cli.rs | 72 +- bin/node/cli/src/command.rs | 224 +- bin/node/cli/src/factory_impl.rs | 324 +- bin/node/cli/src/lib.rs | 4 +- bin/node/cli/src/service.rs | 951 +- bin/node/cli/tests/build_spec_works.rs | 22 +- bin/node/cli/tests/check_block_works.rs | 18 +- bin/node/cli/tests/common.rs | 70 +- bin/node/cli/tests/factory.rs | 26 +- .../tests/import_export_and_revert_work.rs | 69 +- bin/node/cli/tests/inspect_works.rs | 18 +- bin/node/cli/tests/purge_chain_works.rs | 24 +- .../tests/running_the_node_and_interrupt.rs | 54 +- bin/node/cli/tests/version.rs | 92 +- bin/node/executor/benches/bench.rs | 322 +- bin/node/executor/src/lib.rs | 2 +- bin/node/executor/tests/basic.rs | 1300 +-- bin/node/executor/tests/common.rs | 226 +- bin/node/executor/tests/fees.rs | 586 +- bin/node/executor/tests/submit_transaction.rs | 314 +- bin/node/inspect/src/cli.rs | 60 +- bin/node/inspect/src/command.rs | 64 +- bin/node/inspect/src/lib.rs | 488 +- bin/node/primitives/src/lib.rs | 5 +- bin/node/rpc-client/src/main.rs | 56 +- bin/node/rpc/src/lib.rs | 209 +- bin/node/runtime/build.rs | 12 +- bin/node/runtime/src/constants.rs | 72 +- bin/node/runtime/src/impls.rs | 642 +- bin/node/runtime/src/lib.rs | 1339 +-- bin/node/testing/src/bench.rs | 833 +- bin/node/testing/src/client.rs | 56 +- bin/node/testing/src/genesis.rs | 194 +- bin/node/testing/src/keyring.rs | 106 +- bin/node/testing/src/lib.rs | 2 +- bin/node/transaction-factory/src/lib.rs | 272 +- bin/utils/chain-spec-builder/build.rs | 6 +- bin/utils/chain-spec-builder/src/main.rs | 425 +- bin/utils/subkey/src/main.rs | 1138 +- bin/utils/subkey/src/rpc.rs | 43 +- bin/utils/subkey/src/vanity.rs | 315 +- client/api/src/backend.rs | 810 +- client/api/src/call_executor.rs | 204 +- client/api/src/client.rs | 272 +- client/api/src/execution_extensions.rs | 289 +- client/api/src/lib.rs | 77 +- client/api/src/light.rs | 538 +- client/api/src/notifications.rs | 915 +- client/api/src/proof_provider.rs | 94 +- client/authority-discovery/build.rs | 2 +- client/authority-discovery/src/addr_cache.rs | 314 +- client/authority-discovery/src/error.rs | 60 +- client/authority-discovery/src/lib.rs | 1038 +- client/authority-discovery/src/tests.rs | 710 +- .../basic-authorship/src/basic_authorship.rs | 1061 +- client/basic-authorship/src/lib.rs | 2 +- client/block-builder/src/lib.rs | 382 +- client/chain-spec/derive/src/impls.rs | 328 +- client/chain-spec/derive/src/lib.rs | 16 +- client/chain-spec/src/chain_spec.rs | 656 +- client/chain-spec/src/extension.rs | 511 +- client/chain-spec/src/lib.rs | 56 +- client/cli/src/arg_enums.rs | 174 +- client/cli/src/commands/build_spec_cmd.rs | 78 +- client/cli/src/commands/check_block_cmd.rs | 104 +- client/cli/src/commands/export_blocks_cmd.rs | 135 +- client/cli/src/commands/import_blocks_cmd.rs | 88 +- client/cli/src/commands/mod.rs | 24 +- client/cli/src/commands/purge_chain_cmd.rs | 88 +- client/cli/src/commands/revert_cmd.rs | 56 +- client/cli/src/commands/runcmd.rs | 952 +- client/cli/src/config.rs | 867 +- client/cli/src/error.rs | 68 +- client/cli/src/lib.rs | 426 +- client/cli/src/params/import_params.rs | 257 +- client/cli/src/params/keystore_params.rs | 80 +- client/cli/src/params/mod.rs | 48 +- client/cli/src/params/network_params.rs | 233 +- client/cli/src/params/node_key_params.rs | 337 +- client/cli/src/params/pruning_params.rs | 60 +- client/cli/src/params/shared_params.rs | 132 +- .../cli/src/params/transaction_pool_params.rs | 36 +- client/cli/src/runner.rs | 345 +- client/consensus/aura/src/digests.rs | 53 +- client/consensus/aura/src/lib.rs | 1794 ++-- client/consensus/babe/rpc/src/lib.rs | 373 +- client/consensus/babe/src/authorship.rs | 276 +- client/consensus/babe/src/aux_schema.rs | 282 +- client/consensus/babe/src/lib.rs | 2124 ++-- client/consensus/babe/src/tests.rs | 1391 +-- client/consensus/babe/src/verification.rs | 298 +- client/consensus/epochs/src/lib.rs | 1529 ++- client/consensus/epochs/src/migration.rs | 52 +- client/consensus/manual-seal/src/error.rs | 124 +- .../manual-seal/src/finalize_block.rs | 71 +- client/consensus/manual-seal/src/lib.rs | 747 +- client/consensus/manual-seal/src/rpc.rs | 222 +- .../manual-seal/src/seal_new_block.rs | 193 +- client/consensus/pow/src/lib.rs | 1096 +- client/consensus/slots/build.rs | 6 +- client/consensus/slots/src/aux_schema.rs | 403 +- client/consensus/slots/src/lib.rs | 853 +- client/consensus/slots/src/slots.rs | 234 +- client/consensus/uncles/src/lib.rs | 66 +- client/db/src/bench.rs | 565 +- client/db/src/cache/list_cache.rs | 4556 +++++--- client/db/src/cache/list_entry.rs | 330 +- client/db/src/cache/list_storage.rs | 689 +- client/db/src/cache/mod.rs | 628 +- client/db/src/changes_tries_storage.rs | 2180 ++-- client/db/src/children.rs | 146 +- client/db/src/lib.rs | 4366 ++++---- client/db/src/light.rs | 2639 +++-- client/db/src/offchain.rs | 208 +- client/db/src/parity_db.rs | 59 +- client/db/src/stats.rs | 240 +- client/db/src/storage_cache.rs | 3701 ++++--- client/db/src/subdb.rs | 99 +- client/db/src/upgrade.rs | 157 +- client/db/src/utils.rs | 589 +- client/executor/common/src/error.rs | 176 +- client/executor/common/src/sandbox.rs | 871 +- client/executor/common/src/util.rs | 202 +- client/executor/common/src/wasm_runtime.rs | 16 +- client/executor/runtime-test/build.rs | 12 +- client/executor/runtime-test/src/lib.rs | 635 +- client/executor/src/integration_tests/mod.rs | 818 +- .../executor/src/integration_tests/sandbox.rs | 269 +- client/executor/src/lib.rs | 76 +- client/executor/src/native_executor.rs | 664 +- client/executor/src/wasm_runtime.rs | 703 +- client/executor/wasmi/src/lib.rs | 1274 +-- client/executor/wasmtime/src/host.rs | 568 +- client/executor/wasmtime/src/imports.rs | 444 +- .../executor/wasmtime/src/instance_wrapper.rs | 534 +- .../src/instance_wrapper/globals_snapshot.rs | 170 +- client/executor/wasmtime/src/lib.rs | 5 +- client/executor/wasmtime/src/runtime.rs | 254 +- client/executor/wasmtime/src/state_holder.rs | 14 +- client/executor/wasmtime/src/util.rs | 12 +- client/finality-grandpa/src/authorities.rs | 1619 +-- client/finality-grandpa/src/aux_schema.rs | 1082 +- .../src/communication/gossip.rs | 4774 +++++---- .../finality-grandpa/src/communication/mod.rs | 1621 +-- .../src/communication/periodic.rs | 121 +- .../src/communication/tests.rs | 928 +- .../finality-grandpa/src/consensus_changes.rs | 97 +- client/finality-grandpa/src/environment.rs | 2132 ++-- client/finality-grandpa/src/finality_proof.rs | 1996 ++-- client/finality-grandpa/src/import.rs | 1175 +- client/finality-grandpa/src/justification.rs | 375 +- client/finality-grandpa/src/lib.rs | 1434 +-- client/finality-grandpa/src/light_import.rs | 1366 +-- client/finality-grandpa/src/observer.rs | 757 +- client/finality-grandpa/src/tests.rs | 3334 +++--- client/finality-grandpa/src/until_imported.rs | 1913 ++-- client/finality-grandpa/src/voting_rule.rs | 428 +- client/informant/src/display.rs | 249 +- client/informant/src/lib.rs | 88 +- client/keystore/src/lib.rs | 1004 +- client/network-gossip/src/bridge.rs | 427 +- client/network-gossip/src/lib.rs | 110 +- client/network-gossip/src/state_machine.rs | 1126 +- client/network-gossip/src/validator.rs | 125 +- client/network/build.rs | 6 +- client/network/src/behaviour.rs | 519 +- client/network/src/chain.rs | 48 +- client/network/src/config.rs | 811 +- client/network/src/debug_info.rs | 588 +- client/network/src/discovery.rs | 1391 +-- client/network/src/error.rs | 66 +- client/network/src/lib.rs | 10 +- client/network/src/network_state.rs | 118 +- client/network/src/on_demand_layer.rs | 329 +- client/network/src/protocol.rs | 3949 +++---- client/network/src/protocol/block_requests.rs | 538 +- client/network/src/protocol/event.rs | 100 +- client/network/src/protocol/generic_proto.rs | 2 +- .../src/protocol/generic_proto/behaviour.rs | 2272 ++-- .../src/protocol/generic_proto/handler.rs | 2 +- .../protocol/generic_proto/handler/group.rs | 938 +- .../protocol/generic_proto/handler/legacy.rs | 1087 +- .../generic_proto/handler/notif_in.rs | 411 +- .../generic_proto/handler/notif_out.rs | 688 +- .../src/protocol/generic_proto/tests.rs | 781 +- .../src/protocol/generic_proto/upgrade.rs | 14 +- .../protocol/generic_proto/upgrade/collec.rs | 82 +- .../protocol/generic_proto/upgrade/legacy.rs | 453 +- .../generic_proto/upgrade/notifications.rs | 1072 +- .../src/protocol/light_client_handler.rs | 4008 +++---- client/network/src/protocol/message.rs | 974 +- client/network/src/protocol/sync.rs | 2849 ++--- client/network/src/protocol/sync/blocks.rs | 642 +- .../src/protocol/sync/extra_requests.rs | 1120 +- client/network/src/protocol/util.rs | 79 +- client/network/src/service.rs | 2792 ++--- client/network/src/service/out_events.rs | 333 +- client/network/src/service/tests.rs | 453 +- client/network/src/transport.rs | 181 +- client/network/src/utils.rs | 10 +- client/network/test/src/block_import.rs | 125 +- client/network/test/src/lib.rs | 1710 +-- client/network/test/src/sync.rs | 1302 ++- client/offchain/src/api.rs | 696 +- client/offchain/src/api/http.rs | 1896 ++-- client/offchain/src/api/http_dummy.rs | 140 +- client/offchain/src/api/timestamp.rs | 53 +- client/offchain/src/lib.rs | 336 +- client/peerset/src/lib.rs | 1293 +-- client/peerset/src/peersstate.rs | 1526 +-- client/peerset/tests/fuzz.rs | 261 +- client/rpc-api/src/author/error.rs | 82 +- client/rpc-api/src/author/hash.rs | 10 +- client/rpc-api/src/author/mod.rs | 138 +- client/rpc-api/src/chain/error.rs | 43 +- client/rpc-api/src/chain/mod.rs | 172 +- client/rpc-api/src/errors.rs | 12 +- client/rpc-api/src/helpers.rs | 14 +- client/rpc-api/src/lib.rs | 2 +- client/rpc-api/src/offchain/error.rs | 30 +- client/rpc-api/src/offchain/mod.rs | 16 +- client/rpc-api/src/state/error.rs | 89 +- client/rpc-api/src/state/mod.rs | 314 +- client/rpc-api/src/subscriptions.rs | 156 +- client/rpc-api/src/system/error.rs | 38 +- client/rpc-api/src/system/helpers.rs | 141 +- client/rpc-api/src/system/mod.rs | 144 +- client/rpc-servers/src/lib.rs | 193 +- client/rpc/src/author/mod.rs | 364 +- client/rpc/src/author/tests.rs | 477 +- client/rpc/src/chain/chain_full.rs | 88 +- client/rpc/src/chain/chain_light.rs | 169 +- client/rpc/src/chain/mod.rs | 528 +- client/rpc/src/chain/tests.rs | 485 +- client/rpc/src/lib.rs | 2 +- client/rpc/src/metadata.rs | 42 +- client/rpc/src/offchain/mod.rs | 58 +- client/rpc/src/offchain/tests.rs | 26 +- client/rpc/src/state/mod.rs | 739 +- client/rpc/src/state/state_full.rs | 1124 +- client/rpc/src/state/state_light.rs | 1470 +-- client/rpc/src/state/tests.rs | 847 +- client/rpc/src/system/mod.rs | 244 +- client/rpc/src/system/tests.rs | 443 +- client/service/src/builder.rs | 2195 ++-- client/service/src/chain_ops.rs | 550 +- client/service/src/config.rs | 224 +- client/service/src/error.rs | 72 +- client/service/src/lib.rs | 1234 ++- client/service/src/metrics.rs | 836 +- client/service/src/status_sinks.rs | 206 +- client/service/src/task_manager.rs | 426 +- .../src/task_manager/prometheus_future.rs | 58 +- client/service/test/src/lib.rs | 1065 +- client/src/block_rules.rs | 63 +- client/src/call_executor.rs | 416 +- client/src/cht.rs | 699 +- client/src/client.rs | 7606 +++++++------ client/src/genesis.rs | 442 +- client/src/in_mem.rs | 1470 +-- client/src/leaves.rs | 662 +- client/src/lib.rs | 47 +- client/src/light/backend.rs | 1038 +- client/src/light/blockchain.rs | 549 +- client/src/light/call_executor.rs | 930 +- client/src/light/fetcher.rs | 1774 +-- client/src/light/mod.rs | 90 +- client/state-db/src/lib.rs | 1131 +- client/state-db/src/noncanonical.rs | 1971 ++-- client/state-db/src/pruning.rs | 696 +- client/state-db/src/test.rs | 107 +- client/telemetry/src/async_record.rs | 251 +- client/telemetry/src/lib.rs | 406 +- client/telemetry/src/worker.rs | 363 +- client/telemetry/src/worker/node.rs | 446 +- client/tracing/src/lib.rs | 389 +- .../transaction-pool/graph/benches/basics.rs | 269 +- .../transaction-pool/graph/src/base_pool.rs | 2235 ++-- client/transaction-pool/graph/src/future.rs | 443 +- client/transaction-pool/graph/src/lib.rs | 6 +- client/transaction-pool/graph/src/listener.rs | 218 +- client/transaction-pool/graph/src/pool.rs | 1985 ++-- client/transaction-pool/graph/src/ready.rs | 1313 +-- client/transaction-pool/graph/src/rotator.rs | 338 +- .../graph/src/validated_pool.rs | 1130 +- client/transaction-pool/graph/src/watcher.rs | 190 +- client/transaction-pool/src/api.rs | 455 +- client/transaction-pool/src/error.rs | 48 +- client/transaction-pool/src/lib.rs | 901 +- client/transaction-pool/src/metrics.rs | 68 +- client/transaction-pool/src/revalidation.rs | 637 +- client/transaction-pool/src/testing/pool.rs | 1127 +- frame/assets/src/lib.rs | 497 +- frame/aura/src/lib.rs | 301 +- frame/aura/src/mock.rs | 92 +- frame/aura/src/tests.rs | 10 +- frame/authority-discovery/src/lib.rs | 413 +- frame/authorship/src/lib.rs | 1297 +-- frame/babe/src/lib.rs | 963 +- frame/babe/src/mock.rs | 196 +- frame/babe/src/tests.rs | 176 +- frame/balances/src/benchmarking.rs | 258 +- frame/balances/src/lib.rs | 2154 ++-- frame/balances/src/tests.rs | 15 +- frame/balances/src/tests_composite.rs | 182 +- frame/balances/src/tests_local.rs | 199 +- frame/benchmark/src/benchmarking.rs | 204 +- frame/benchmark/src/lib.rs | 288 +- frame/benchmarking/src/analysis.rs | 527 +- frame/benchmarking/src/lib.rs | 9 +- frame/benchmarking/src/tests.rs | 317 +- frame/benchmarking/src/utils.rs | 200 +- frame/collective/src/benchmarking.rs | 258 +- frame/collective/src/lib.rs | 1977 ++-- frame/contracts/common/src/lib.rs | 22 +- frame/contracts/rpc/runtime-api/src/lib.rs | 94 +- frame/contracts/rpc/src/lib.rs | 382 +- frame/contracts/src/account_db.rs | 690 +- frame/contracts/src/exec.rs | 3271 +++--- frame/contracts/src/gas.rs | 575 +- frame/contracts/src/lib.rs | 1719 +-- frame/contracts/src/rent.rs | 542 +- frame/contracts/src/tests.rs | 3331 +++--- frame/contracts/src/wasm/code_cache.rs | 79 +- frame/contracts/src/wasm/env_def/macros.rs | 266 +- frame/contracts/src/wasm/env_def/mod.rs | 89 +- frame/contracts/src/wasm/mod.rs | 1792 ++-- frame/contracts/src/wasm/prepare.rs | 1090 +- frame/contracts/src/wasm/runtime.rs | 511 +- frame/democracy/src/benchmarking.rs | 653 +- frame/democracy/src/conviction.rs | 152 +- frame/democracy/src/lib.rs | 3022 +++--- frame/democracy/src/tests.rs | 311 +- frame/democracy/src/tests/cancellation.rs | 113 +- frame/democracy/src/tests/delegation.rs | 428 +- .../democracy/src/tests/external_proposing.rs | 490 +- frame/democracy/src/tests/fast_tracking.rs | 131 +- frame/democracy/src/tests/lock_voting.rs | 650 +- frame/democracy/src/tests/preimage.rs | 246 +- frame/democracy/src/tests/proxying.rs | 190 +- frame/democracy/src/tests/public_proposals.rs | 108 +- frame/democracy/src/tests/scheduling.rs | 187 +- frame/democracy/src/tests/voting.rs | 281 +- frame/democracy/src/types.rs | 339 +- frame/democracy/src/vote.rs | 267 +- frame/democracy/src/vote_threshold.rs | 153 +- frame/elections-phragmen/src/lib.rs | 4505 ++++---- frame/elections/src/lib.rs | 2153 ++-- frame/elections/src/mock.rs | 365 +- frame/elections/src/tests.rs | 3823 ++++--- frame/evm/src/backend.rs | 303 +- frame/evm/src/lib.rs | 704 +- frame/example-offchain-worker/src/lib.rs | 972 +- frame/example-offchain-worker/src/tests.rs | 305 +- frame/example/src/lib.rs | 935 +- frame/executive/src/lib.rs | 1664 +-- frame/finality-tracker/src/lib.rs | 609 +- frame/generic-asset/src/lib.rs | 2120 ++-- frame/generic-asset/src/mock.rs | 172 +- frame/generic-asset/src/tests.rs | 1642 +-- frame/grandpa/src/lib.rs | 805 +- frame/grandpa/src/mock.rs | 102 +- frame/grandpa/src/tests.rs | 526 +- frame/identity/src/benchmarking.rs | 524 +- frame/identity/src/lib.rs | 2205 ++-- frame/im-online/src/benchmarking.rs | 117 +- frame/im-online/src/lib.rs | 1108 +- frame/im-online/src/mock.rs | 183 +- frame/im-online/src/tests.rs | 566 +- frame/indices/src/address.rs | 234 +- frame/indices/src/lib.rs | 442 +- frame/indices/src/mock.rs | 108 +- frame/indices/src/tests.rs | 115 +- frame/membership/src/lib.rs | 917 +- frame/metadata/src/lib.rs | 389 +- frame/nicks/src/lib.rs | 694 +- frame/offences/benchmarking/src/lib.rs | 277 +- frame/offences/src/lib.rs | 492 +- frame/offences/src/mock.rs | 215 +- frame/offences/src/tests.rs | 437 +- frame/randomness-collective-flip/src/lib.rs | 462 +- frame/recovery/src/lib.rs | 960 +- frame/recovery/src/mock.rs | 142 +- frame/recovery/src/tests.rs | 713 +- frame/scheduler/src/lib.rs | 918 +- frame/scored-pool/src/lib.rs | 669 +- frame/scored-pool/src/mock.rs | 194 +- frame/scored-pool/src/tests.rs | 417 +- frame/session/benchmarking/src/lib.rs | 67 +- frame/session/benchmarking/src/mock.rs | 201 +- frame/session/src/historical.rs | 699 +- frame/session/src/lib.rs | 1161 +- frame/session/src/mock.rs | 269 +- frame/session/src/tests.rs | 517 +- frame/society/src/lib.rs | 2698 ++--- frame/society/src/mock.rs | 287 +- frame/society/src/tests.rs | 1698 +-- frame/staking/reward-curve/src/lib.rs | 557 +- frame/staking/reward-curve/src/log.rs | 105 +- frame/staking/reward-curve/tests/test.rs | 40 +- frame/staking/src/benchmarking.rs | 865 +- frame/staking/src/inflation.rs | 173 +- frame/staking/src/lib.rs | 5367 +++++----- frame/staking/src/mock.rs | 1560 +-- frame/staking/src/offchain_election.rs | 304 +- frame/staking/src/slashing.rs | 1455 +-- frame/staking/src/testing_utils.rs | 478 +- frame/staking/src/tests.rs | 9304 ++++++++-------- frame/sudo/src/lib.rs | 243 +- .../procedural/src/construct_runtime/mod.rs | 628 +- .../procedural/src/construct_runtime/parse.rs | 606 +- frame/support/procedural/src/lib.rs | 8 +- .../src/storage/genesis_config/builder_def.rs | 118 +- .../genesis_config/genesis_config_def.rs | 254 +- .../src/storage/genesis_config/mod.rs | 326 +- .../support/procedural/src/storage/getters.rs | 106 +- .../procedural/src/storage/instance_trait.rs | 209 +- .../procedural/src/storage/metadata.rs | 379 +- frame/support/procedural/src/storage/mod.rs | 714 +- frame/support/procedural/src/storage/parse.rs | 719 +- .../procedural/src/storage/storage_struct.rs | 367 +- .../procedural/src/storage/store_trait.rs | 65 +- .../procedural/tools/derive/src/lib.rs | 230 +- frame/support/procedural/tools/src/lib.rs | 90 +- frame/support/procedural/tools/src/syn_ext.rs | 276 +- frame/support/src/debug.rs | 110 +- frame/support/src/dispatch.rs | 535 +- frame/support/src/error.rs | 4 +- frame/support/src/event.rs | 487 +- frame/support/src/hash.rs | 200 +- frame/support/src/inherent.rs | 7 +- frame/support/src/lib.rs | 905 +- frame/support/src/metadata.rs | 687 +- frame/support/src/origin.rs | 80 +- frame/support/src/storage/child.rs | 207 +- .../src/storage/generator/double_map.rs | 1044 +- frame/support/src/storage/generator/map.rs | 632 +- frame/support/src/storage/generator/mod.rs | 149 +- frame/support/src/storage/generator/value.rs | 314 +- frame/support/src/storage/hashed.rs | 102 +- frame/support/src/storage/migration.rs | 272 +- frame/support/src/storage/mod.rs | 1029 +- frame/support/src/storage/unhashed.rs | 50 +- frame/support/src/traits.rs | 1926 ++-- frame/support/src/unsigned.rs | 135 +- frame/support/src/weights.rs | 561 +- frame/support/test/src/lib.rs | 12 +- .../test/tests/construct_runtime_ui.rs | 8 +- frame/support/test/tests/decl_error.rs | 154 +- frame/support/test/tests/decl_storage.rs | 1174 +- frame/support/test/tests/decl_storage_ui.rs | 8 +- frame/support/test/tests/final_keys.rs | 327 +- frame/support/test/tests/genesisconfig.rs | 22 +- frame/support/test/tests/instance.rs | 633 +- frame/support/test/tests/issue2219.rs | 276 +- frame/support/test/tests/reserved_keyword.rs | 8 +- frame/support/test/tests/system.rs | 72 +- frame/system/benches/bench.rs | 138 +- frame/system/rpc/runtime-api/src/lib.rs | 16 +- frame/system/src/lib.rs | 3856 +++---- frame/system/src/offchain.rs | 507 +- frame/timestamp/src/benchmarking.rs | 36 +- frame/timestamp/src/lib.rs | 439 +- .../rpc/runtime-api/src/lib.rs | 140 +- frame/transaction-payment/rpc/src/lib.rs | 114 +- frame/transaction-payment/src/lib.rs | 1630 +-- frame/treasury/src/benchmarking.rs | 370 +- frame/treasury/src/lib.rs | 1094 +- frame/treasury/src/tests.rs | 814 +- frame/utility/src/benchmarking.rs | 270 +- frame/utility/src/lib.rs | 944 +- frame/utility/src/tests.rs | 795 +- frame/vesting/src/benchmarking.rs | 168 +- frame/vesting/src/lib.rs | 1256 +-- primitives/allocator/src/error.rs | 29 +- primitives/allocator/src/freeing_bump.rs | 1263 +-- primitives/allocator/src/lib.rs | 2 +- .../api/proc-macro/src/decl_runtime_apis.rs | 1630 +-- .../api/proc-macro/src/impl_runtime_apis.rs | 1293 +-- primitives/api/proc-macro/src/lib.rs | 8 +- .../proc-macro/src/mock_impl_runtime_apis.rs | 631 +- primitives/api/proc-macro/src/utils.rs | 361 +- primitives/api/src/lib.rs | 425 +- primitives/api/test/benches/bench.rs | 97 +- primitives/api/test/tests/decl_and_impl.rs | 230 +- primitives/api/test/tests/runtime_calls.rs | 266 +- primitives/api/test/tests/trybuild.rs | 8 +- primitives/application-crypto/src/ed25519.rs | 78 +- primitives/application-crypto/src/lib.rs | 601 +- primitives/application-crypto/src/sr25519.rs | 78 +- primitives/application-crypto/src/traits.rs | 168 +- .../application-crypto/test/src/ed25519.rs | 36 +- primitives/application-crypto/test/src/lib.rs | 2 +- .../application-crypto/test/src/sr25519.rs | 39 +- primitives/arithmetic/benches/bench.rs | 78 +- primitives/arithmetic/fuzzer/src/biguint.rs | 308 +- .../fuzzer/src/per_thing_rational.rs | 176 +- .../arithmetic/fuzzer/src/rational128.rs | 72 +- primitives/arithmetic/src/biguint.rs | 1302 +-- primitives/arithmetic/src/fixed128.rs | 1405 +-- primitives/arithmetic/src/fixed64.rs | 563 +- primitives/arithmetic/src/helpers_128bit.rs | 137 +- primitives/arithmetic/src/lib.rs | 44 +- primitives/arithmetic/src/per_things.rs | 604 +- primitives/arithmetic/src/rational128.rs | 707 +- primitives/arithmetic/src/traits.rs | 265 +- primitives/authority-discovery/src/lib.rs | 55 +- primitives/authorship/src/lib.rs | 79 +- primitives/block-builder/src/lib.rs | 42 +- primitives/blockchain/src/backend.rs | 463 +- primitives/blockchain/src/error.rs | 242 +- primitives/blockchain/src/header_metadata.rs | 381 +- primitives/blockchain/src/lib.rs | 4 +- primitives/chain-spec/src/lib.rs | 22 +- primitives/consensus/aura/src/inherents.rs | 97 +- primitives/consensus/aura/src/lib.rs | 88 +- primitives/consensus/babe/src/digests.rs | 237 +- primitives/consensus/babe/src/inherents.rs | 70 +- primitives/consensus/babe/src/lib.rs | 110 +- .../consensus/common/src/block_import.rs | 572 +- .../consensus/common/src/block_validation.rs | 40 +- primitives/consensus/common/src/error.rs | 131 +- primitives/consensus/common/src/evaluation.rs | 96 +- .../consensus/common/src/import_queue.rs | 344 +- .../common/src/import_queue/basic_queue.rs | 815 +- .../common/src/import_queue/buffered_link.rs | 241 +- primitives/consensus/common/src/lib.rs | 273 +- .../consensus/common/src/offline_tracker.rs | 203 +- .../consensus/common/src/select_chain.rs | 38 +- primitives/consensus/pow/src/lib.rs | 46 +- primitives/consensus/vrf/src/schnorrkel.rs | 230 +- primitives/core/benches/bench.rs | 155 +- primitives/core/src/changes_trie.rs | 603 +- primitives/core/src/crypto.rs | 1690 +-- primitives/core/src/ecdsa.rs | 949 +- primitives/core/src/ed25519.rs | 997 +- primitives/core/src/hash.rs | 148 +- primitives/core/src/hasher.rs | 34 +- primitives/core/src/hashing.rs | 138 +- primitives/core/src/hexdisplay.rs | 104 +- primitives/core/src/lib.rs | 295 +- primitives/core/src/offchain/mod.rs | 1189 ++- primitives/core/src/offchain/storage.rs | 70 +- primitives/core/src/offchain/testing.rs | 543 +- primitives/core/src/sandbox.rs | 112 +- primitives/core/src/sr25519.rs | 1308 +-- primitives/core/src/tasks.rs | 33 +- primitives/core/src/testing.rs | 373 +- primitives/core/src/traits.rs | 430 +- primitives/core/src/u32_trait.rs | 550 +- primitives/core/src/uint.rs | 134 +- primitives/database/src/kvdb.rs | 55 +- primitives/database/src/lib.rs | 275 +- primitives/database/src/mem.rs | 83 +- primitives/debug-derive/src/impls.rs | 368 +- primitives/debug-derive/src/lib.rs | 3 +- primitives/debug-derive/tests/tests.rs | 67 +- primitives/externalities/src/extensions.rs | 141 +- primitives/externalities/src/lib.rs | 374 +- primitives/externalities/src/scope_limited.rs | 7 +- primitives/finality-grandpa/src/lib.rs | 244 +- primitives/finality-tracker/src/lib.rs | 52 +- primitives/inherents/src/lib.rs | 911 +- primitives/io/src/batch_verifier.rs | 303 +- primitives/io/src/lib.rs | 2164 ++-- primitives/keyring/src/ed25519.rs | 278 +- primitives/keyring/src/lib.rs | 4 +- primitives/keyring/src/sr25519.rs | 292 +- primitives/offchain/src/lib.rs | 24 +- primitives/panic-handler/src/lib.rs | 223 +- primitives/phragmen/benches/phragmen.rs | 315 +- primitives/phragmen/compact/src/assignment.rs | 235 +- primitives/phragmen/compact/src/lib.rs | 249 +- primitives/phragmen/compact/src/staked.rs | 237 +- primitives/phragmen/fuzzer/src/common.rs | 16 +- primitives/phragmen/fuzzer/src/equalize.rs | 227 +- primitives/phragmen/fuzzer/src/reduce.rs | 184 +- primitives/phragmen/src/helpers.rs | 111 +- primitives/phragmen/src/lib.rs | 1083 +- primitives/phragmen/src/mock.rs | 671 +- primitives/phragmen/src/node.rs | 450 +- primitives/phragmen/src/reduce.rs | 1913 ++-- primitives/phragmen/src/tests.rs | 1984 ++-- primitives/rpc/src/lib.rs | 18 +- primitives/rpc/src/list.rs | 56 +- primitives/rpc/src/number.rs | 94 +- .../runtime-interface/proc-macro/src/lib.rs | 34 +- .../proc-macro/src/pass_by/codec.rs | 44 +- .../proc-macro/src/pass_by/enum_.rs | 136 +- .../proc-macro/src/pass_by/inner.rs | 137 +- .../bare_function_interface.rs | 314 +- .../host_function_interface.rs | 636 +- .../proc-macro/src/runtime_interface/mod.rs | 53 +- .../src/runtime_interface/trait_decl_impl.rs | 239 +- .../runtime-interface/proc-macro/src/utils.rs | 380 +- primitives/runtime-interface/src/host.rs | 38 +- primitives/runtime-interface/src/impls.rs | 394 +- primitives/runtime-interface/src/lib.rs | 17 +- primitives/runtime-interface/src/pass_by.rs | 275 +- primitives/runtime-interface/src/util.rs | 40 +- primitives/runtime-interface/src/wasm.rs | 125 +- .../test-wasm-deprecated/build.rs | 12 +- .../test-wasm-deprecated/src/lib.rs | 28 +- .../runtime-interface/test-wasm/build.rs | 12 +- .../runtime-interface/test-wasm/src/lib.rs | 416 +- primitives/runtime-interface/test/src/lib.rs | 115 +- primitives/runtime-interface/tests/ui.rs | 8 +- primitives/runtime/src/curve.rs | 264 +- primitives/runtime/src/generic/block.rs | 104 +- .../runtime/src/generic/checked_extrinsic.rs | 103 +- primitives/runtime/src/generic/digest.rs | 684 +- primitives/runtime/src/generic/era.rs | 319 +- primitives/runtime/src/generic/header.rs | 387 +- primitives/runtime/src/generic/mod.rs | 56 +- primitives/runtime/src/generic/tests.rs | 61 +- .../src/generic/unchecked_extrinsic.rs | 712 +- primitives/runtime/src/lib.rs | 821 +- primitives/runtime/src/offchain/http.rs | 911 +- primitives/runtime/src/offchain/storage.rs | 242 +- .../runtime/src/random_number_generator.rs | 102 +- primitives/runtime/src/runtime_string.rs | 112 +- primitives/runtime/src/testing.rs | 576 +- primitives/runtime/src/traits.rs | 1844 ++-- .../runtime/src/transaction_validity.rs | 651 +- primitives/sandbox/src/lib.rs | 240 +- primitives/serializer/src/lib.rs | 13 +- primitives/session/src/lib.rs | 56 +- primitives/staking/src/lib.rs | 1 - primitives/staking/src/offence.rs | 207 +- primitives/state-machine/src/backend.rs | 709 +- primitives/state-machine/src/basic.rs | 735 +- .../state-machine/src/changes_trie/build.rs | 2243 ++-- .../src/changes_trie/build_cache.rs | 410 +- .../src/changes_trie/build_iterator.rs | 813 +- .../src/changes_trie/changes_iterator.rs | 1340 ++- .../state-machine/src/changes_trie/input.rs | 234 +- .../state-machine/src/changes_trie/mod.rs | 622 +- .../state-machine/src/changes_trie/prune.rs | 320 +- .../state-machine/src/changes_trie/storage.rs | 330 +- .../src/changes_trie/surface_iterator.rs | 501 +- primitives/state-machine/src/error.rs | 21 +- primitives/state-machine/src/ext.rs | 1598 +-- .../state-machine/src/in_memory_backend.rs | 695 +- primitives/state-machine/src/lib.rs | 1884 ++-- .../state-machine/src/overlayed_changes.rs | 1790 ++-- .../state-machine/src/proving_backend.rs | 794 +- primitives/state-machine/src/stats.rs | 160 +- primitives/state-machine/src/testing.rs | 419 +- primitives/state-machine/src/trie_backend.rs | 668 +- .../state-machine/src/trie_backend_essence.rs | 834 +- primitives/std/src/lib.rs | 32 +- primitives/storage/src/lib.rs | 376 +- primitives/test-primitives/src/lib.rs | 51 +- primitives/timestamp/src/lib.rs | 104 +- primitives/transaction-pool/src/error.rs | 94 +- primitives/transaction-pool/src/lib.rs | 4 +- primitives/transaction-pool/src/pool.rs | 371 +- .../transaction-pool/src/runtime_api.rs | 38 +- primitives/trie/benches/bench.rs | 18 +- primitives/trie/src/error.rs | 48 +- primitives/trie/src/lib.rs | 1349 +-- primitives/trie/src/node_codec.rs | 462 +- primitives/trie/src/node_header.rs | 148 +- primitives/trie/src/storage_proof.rs | 130 +- primitives/trie/src/trie_stream.rs | 200 +- primitives/utils/src/lib.rs | 2 +- primitives/utils/src/metrics.rs | 37 +- primitives/utils/src/mpsc.rs | 403 +- primitives/version/src/lib.rs | 375 +- primitives/wasm-interface/src/lib.rs | 581 +- primitives/wasm-interface/src/wasmi_impl.rs | 90 +- test-utils/client/src/client_ext.rs | 271 +- test-utils/client/src/lib.rs | 399 +- test-utils/runtime/build.rs | 20 +- .../runtime/client/src/block_builder_ext.rs | 92 +- test-utils/runtime/client/src/lib.rs | 553 +- test-utils/runtime/client/src/trait_tests.rs | 908 +- test-utils/runtime/src/genesismap.rs | 165 +- test-utils/runtime/src/lib.rs | 1687 +-- test-utils/runtime/src/system.rs | 839 +- .../runtime/transaction-pool/src/lib.rs | 451 +- test-utils/src/lib.rs | 20 +- utils/browser/src/lib.rs | 317 +- utils/build-script-utils/src/git.rs | 190 +- utils/build-script-utils/src/lib.rs | 2 +- utils/build-script-utils/src/version.rs | 71 +- utils/fork-tree/src/lib.rs | 2905 +++-- utils/frame/benchmarking-cli/src/command.rs | 207 +- utils/frame/benchmarking-cli/src/lib.rs | 78 +- utils/frame/rpc/support/src/lib.rs | 100 +- utils/frame/rpc/system/src/lib.rs | 366 +- utils/prometheus/src/lib.rs | 229 +- utils/prometheus/src/networking.rs | 68 +- utils/wasm-builder-runner/src/lib.rs | 641 +- utils/wasm-builder/src/lib.rs | 255 +- utils/wasm-builder/src/prerequisites.rs | 74 +- utils/wasm-builder/src/wasm_project.rs | 859 +- 719 files changed, 213542 insertions(+), 193019 deletions(-) diff --git a/bin/node-template/node/build.rs b/bin/node-template/node/build.rs index e3bfe3116b..f9d839f9be 100644 --- a/bin/node-template/node/build.rs +++ b/bin/node-template/node/build.rs @@ -1,7 +1,7 @@ use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; fn main() { - generate_cargo_keys(); + generate_cargo_keys(); - rerun_if_git_head_changed(); + rerun_if_git_head_changed(); } diff --git a/bin/node-template/node/src/chain_spec.rs b/bin/node-template/node/src/chain_spec.rs index fb53edd9a1..ac58731291 100644 --- a/bin/node-template/node/src/chain_spec.rs +++ b/bin/node-template/node/src/chain_spec.rs @@ -1,12 +1,12 @@ -use sp_core::{Pair, Public, sr25519}; use node_template_runtime::{ - AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, - SudoConfig, SystemConfig, WASM_BINARY, Signature + AccountId, AuraConfig, BalancesConfig, GenesisConfig, GrandpaConfig, Signature, SudoConfig, + SystemConfig, WASM_BINARY, }; +use sc_service::ChainType; use sp_consensus_aura::sr25519::AuthorityId as AuraId; +use sp_core::{sr25519, Pair, Public}; use sp_finality_grandpa::AuthorityId as GrandpaId; -use sp_runtime::traits::{Verify, IdentifyAccount}; -use sc_service::ChainType; +use sp_runtime::traits::{IdentifyAccount, Verify}; // Note this is the URL for the telemetry server //const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; @@ -16,109 +16,116 @@ pub type ChainSpec = sc_service::GenericChainSpec; /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() } type AccountPublic = ::Signer; /// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, { - AccountPublic::from(get_from_seed::(seed)).into_account() + AccountPublic::from(get_from_seed::(seed)).into_account() } /// Helper function to generate an authority key for Aura pub fn authority_keys_from_seed(s: &str) -> (AuraId, GrandpaId) { - ( - get_from_seed::(s), - get_from_seed::(s), - ) + (get_from_seed::(s), get_from_seed::(s)) } pub fn development_config() -> ChainSpec { - ChainSpec::from_genesis( - "Development", - "dev", - ChainType::Development, - || testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - ], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - ], - true, - ), - vec![], - None, - None, - None, - None, - ) + ChainSpec::from_genesis( + "Development", + "dev", + ChainType::Development, + || { + testnet_genesis( + vec![authority_keys_from_seed("Alice")], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + ], + true, + ) + }, + vec![], + None, + None, + None, + None, + ) } pub fn local_testnet_config() -> ChainSpec { - ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - ChainType::Local, - || testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - authority_keys_from_seed("Bob"), - ], - get_account_id_from_seed::("Alice"), - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ], - true, - ), - vec![], - None, - None, - None, - None, - ) + ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + ChainType::Local, + || { + testnet_genesis( + vec![ + authority_keys_from_seed("Alice"), + authority_keys_from_seed("Bob"), + ], + get_account_id_from_seed::("Alice"), + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ], + true, + ) + }, + vec![], + None, + None, + None, + None, + ) } -fn testnet_genesis(initial_authorities: Vec<(AuraId, GrandpaId)>, - root_key: AccountId, - endowed_accounts: Vec, - _enable_println: bool) -> GenesisConfig { - GenesisConfig { - system: Some(SystemConfig { - code: WASM_BINARY.to_vec(), - changes_trie_config: Default::default(), - }), - balances: Some(BalancesConfig { - balances: endowed_accounts.iter().cloned().map(|k|(k, 1 << 60)).collect(), - }), - aura: Some(AuraConfig { - authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), - }), - grandpa: Some(GrandpaConfig { - authorities: initial_authorities.iter().map(|x| (x.1.clone(), 1)).collect(), - }), - sudo: Some(SudoConfig { - key: root_key, - }), - } +fn testnet_genesis( + initial_authorities: Vec<(AuraId, GrandpaId)>, + root_key: AccountId, + endowed_accounts: Vec, + _enable_println: bool, +) -> GenesisConfig { + GenesisConfig { + system: Some(SystemConfig { + code: WASM_BINARY.to_vec(), + changes_trie_config: Default::default(), + }), + balances: Some(BalancesConfig { + balances: endowed_accounts + .iter() + .cloned() + .map(|k| (k, 1 << 60)) + .collect(), + }), + aura: Some(AuraConfig { + authorities: initial_authorities.iter().map(|x| (x.0.clone())).collect(), + }), + grandpa: Some(GrandpaConfig { + authorities: initial_authorities + .iter() + .map(|x| (x.1.clone(), 1)) + .collect(), + }), + sudo: Some(SudoConfig { key: root_key }), + } } diff --git a/bin/node-template/node/src/cli.rs b/bin/node-template/node/src/cli.rs index 0091ef7d75..afdf49cbcd 100644 --- a/bin/node-template/node/src/cli.rs +++ b/bin/node-template/node/src/cli.rs @@ -3,9 +3,9 @@ use structopt::StructOpt; #[derive(Debug, StructOpt)] pub struct Cli { - #[structopt(subcommand)] - pub subcommand: Option, + #[structopt(subcommand)] + pub subcommand: Option, - #[structopt(flatten)] - pub run: RunCmd, + #[structopt(flatten)] + pub run: RunCmd, } diff --git a/bin/node-template/node/src/command.rs b/bin/node-template/node/src/command.rs index 7950df9a0b..4e11f157ff 100644 --- a/bin/node-template/node/src/command.rs +++ b/bin/node-template/node/src/command.rs @@ -21,61 +21,61 @@ use sc_cli::SubstrateCli; use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; impl SubstrateCli for Cli { - fn impl_name() -> &'static str { - "Substrate Node" - } + fn impl_name() -> &'static str { + "Substrate Node" + } - fn impl_version() -> &'static str { - env!("SUBSTRATE_CLI_IMPL_VERSION") - } + fn impl_version() -> &'static str { + env!("SUBSTRATE_CLI_IMPL_VERSION") + } - fn description() -> &'static str { - env!("CARGO_PKG_DESCRIPTION") - } + fn description() -> &'static str { + env!("CARGO_PKG_DESCRIPTION") + } - fn author() -> &'static str { - env!("CARGO_PKG_AUTHORS") - } + fn author() -> &'static str { + env!("CARGO_PKG_AUTHORS") + } - fn support_url() -> &'static str { - "support.anonymous.an" - } + fn support_url() -> &'static str { + "support.anonymous.an" + } - fn copyright_start_year() -> i32 { - 2017 - } + fn copyright_start_year() -> i32 { + 2017 + } - fn executable_name() -> &'static str { - env!("CARGO_PKG_NAME") - } + fn executable_name() -> &'static str { + env!("CARGO_PKG_NAME") + } - fn load_spec(&self, id: &str) -> Result, String> { - Ok(match id { - "dev" => Box::new(chain_spec::development_config()), - "" | "local" => Box::new(chain_spec::local_testnet_config()), - path => Box::new(chain_spec::ChainSpec::from_json_file( - std::path::PathBuf::from(path), - )?), - }) - } + fn load_spec(&self, id: &str) -> Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()), + "" | "local" => Box::new(chain_spec::local_testnet_config()), + path => Box::new(chain_spec::ChainSpec::from_json_file( + std::path::PathBuf::from(path), + )?), + }) + } } /// Parse and run command line arguments pub fn run() -> sc_cli::Result<()> { - let cli = Cli::from_args(); + let cli = Cli::from_args(); - match &cli.subcommand { - Some(subcommand) => { - let runner = cli.create_runner(subcommand)?; - runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) - } - None => { - let runner = cli.create_runner(&cli.run)?; - runner.run_node( - service::new_light, - service::new_full, - node_template_runtime::VERSION - ) - } - } + match &cli.subcommand { + Some(subcommand) => { + let runner = cli.create_runner(subcommand)?; + runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + } + None => { + let runner = cli.create_runner(&cli.run)?; + runner.run_node( + service::new_light, + service::new_full, + node_template_runtime::VERSION, + ) + } + } } diff --git a/bin/node-template/node/src/main.rs b/bin/node-template/node/src/main.rs index 369e6932a0..0e4e82f5b9 100644 --- a/bin/node-template/node/src/main.rs +++ b/bin/node-template/node/src/main.rs @@ -8,5 +8,5 @@ mod cli; mod command; fn main() -> sc_cli::Result<()> { - command::run() + command::run() } diff --git a/bin/node-template/node/src/service.rs b/bin/node-template/node/src/service.rs index b8e4d73db6..bd530e0f23 100644 --- a/bin/node-template/node/src/service.rs +++ b/bin/node-template/node/src/service.rs @@ -1,16 +1,18 @@ //! Service and ServiceFactory implementation. Specialized wrapper over substrate service. -use std::sync::Arc; -use std::time::Duration; +use node_template_runtime::{self, opaque::Block, RuntimeApi}; use sc_client::LongestChain; use sc_client_api::ExecutorProvider; -use node_template_runtime::{self, opaque::Block, RuntimeApi}; -use sc_service::{error::{Error as ServiceError}, AbstractService, Configuration, ServiceBuilder}; -use sp_inherents::InherentDataProviders; use sc_executor::native_executor_instance; pub use sc_executor::NativeExecutor; -use sp_consensus_aura::sr25519::{AuthorityPair as AuraPair}; -use sc_finality_grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider}; +use sc_finality_grandpa::{ + self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider, +}; +use sc_service::{error::Error as ServiceError, AbstractService, Configuration, ServiceBuilder}; +use sp_consensus_aura::sr25519::AuthorityPair as AuraPair; +use sp_inherents::InherentDataProviders; +use std::sync::Arc; +use std::time::Duration; // Our native executor instance. native_executor_instance!( @@ -24,204 +26,215 @@ native_executor_instance!( /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. macro_rules! new_full_start { - ($config:expr) => {{ - use std::sync::Arc; - let mut import_setup = None; - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - - let builder = sc_service::ServiceBuilder::new_full::< - node_template_runtime::opaque::Block, node_template_runtime::RuntimeApi, crate::service::Executor - >($config)? - .with_select_chain(|_config, backend| { - Ok(sc_client::LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|config, client, _fetcher, prometheus_registry| { - let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); - Ok(sc_transaction_pool::BasicPool::new(config, std::sync::Arc::new(pool_api), prometheus_registry)) - })? - .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { - let select_chain = select_chain.take() - .ok_or_else(|| sc_service::Error::SelectChainRequired)?; - - let (grandpa_block_import, grandpa_link) = - sc_finality_grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; - - let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( - grandpa_block_import.clone(), client.clone(), - ); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair>( - sc_consensus_aura::slot_duration(&*client)?, - aura_block_import, - Some(Box::new(grandpa_block_import.clone())), - None, - client, - inherent_data_providers.clone(), - )?; - - import_setup = Some((grandpa_block_import, grandpa_link)); - - Ok(import_queue) - })?; - - (builder, import_setup, inherent_data_providers) - }} + ($config:expr) => {{ + use std::sync::Arc; + let mut import_setup = None; + let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + + let builder = sc_service::ServiceBuilder::new_full::< + node_template_runtime::opaque::Block, + node_template_runtime::RuntimeApi, + crate::service::Executor, + >($config)? + .with_select_chain(|_config, backend| Ok(sc_client::LongestChain::new(backend.clone())))? + .with_transaction_pool(|config, client, _fetcher, prometheus_registry| { + let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); + Ok(sc_transaction_pool::BasicPool::new( + config, + std::sync::Arc::new(pool_api), + prometheus_registry, + )) + })? + .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { + let select_chain = select_chain + .take() + .ok_or_else(|| sc_service::Error::SelectChainRequired)?; + + let (grandpa_block_import, grandpa_link) = sc_finality_grandpa::block_import( + client.clone(), + &(client.clone() as Arc<_>), + select_chain, + )?; + + let aura_block_import = sc_consensus_aura::AuraBlockImport::<_, _, _, AuraPair>::new( + grandpa_block_import.clone(), + client.clone(), + ); + + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair>( + sc_consensus_aura::slot_duration(&*client)?, + aura_block_import, + Some(Box::new(grandpa_block_import.clone())), + None, + client, + inherent_data_providers.clone(), + )?; + + import_setup = Some((grandpa_block_import, grandpa_link)); + + Ok(import_queue) + })?; + + (builder, import_setup, inherent_data_providers) + }}; } /// Builds a new service for a full client. -pub fn new_full(config: Configuration) - -> Result -{ - let role = config.role.clone(); - let force_authoring = config.force_authoring; - let name = config.network.node_name.clone(); - let disable_grandpa = config.disable_grandpa; - - let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config); - - let (block_import, grandpa_link) = - import_setup.take() - .expect("Link Half and Block Import are present for Full Services or setup failed before. qed"); - - let service = builder - .with_finality_proof_provider(|client, backend| { - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client as Arc>; - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) - })? - .build()?; - - if role.is_authority() { - let proposer = - sc_basic_authorship::ProposerFactory::new(service.client(), service.transaction_pool()); - - let client = service.client(); - let select_chain = service.select_chain() - .ok_or(ServiceError::SelectChainRequired)?; - - let can_author_with = - sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); - - let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( - sc_consensus_aura::slot_duration(&*client)?, - client, - select_chain, - block_import, - proposer, - service.network(), - inherent_data_providers.clone(), - force_authoring, - service.keystore(), - can_author_with, - )?; - - // the AURA authoring task is considered essential, i.e. if it - // fails we take down the service with it. - service.spawn_essential_task("aura", aura); - } - - // if the node isn't actively participating in consensus then it doesn't - // need a keystore, regardless of which protocol we use below. - let keystore = if role.is_authority() { - Some(service.keystore()) - } else { - None - }; - - let grandpa_config = sc_finality_grandpa::Config { - // FIXME #1578 make this available through chainspec - gossip_duration: Duration::from_millis(333), - justification_period: 512, - name: Some(name), - observer_enabled: false, - keystore, - is_authority: role.is_network_authority(), - }; - - let enable_grandpa = !disable_grandpa; - if enable_grandpa { - // start the full GRANDPA voter - // NOTE: non-authorities could run the GRANDPA observer protocol, but at - // this point the full voter should provide better guarantees of block - // and vote data availability than the observer. The observer has not - // been tested extensively yet and having most nodes in a network run it - // could lead to finality stalls. - let grandpa_config = sc_finality_grandpa::GrandpaParams { - config: grandpa_config, - link: grandpa_link, - network: service.network(), - inherent_data_providers: inherent_data_providers.clone(), - telemetry_on_connect: Some(service.telemetry_on_connect_stream()), - voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), - prometheus_registry: service.prometheus_registry() - }; - - // the GRANDPA voter task is considered infallible, i.e. - // if it fails we take down the service with it. - service.spawn_essential_task( - "grandpa-voter", - sc_finality_grandpa::run_grandpa_voter(grandpa_config)? - ); - } else { - sc_finality_grandpa::setup_disabled_grandpa( - service.client(), - &inherent_data_providers, - service.network(), - )?; - } - - Ok(service) +pub fn new_full(config: Configuration) -> Result { + let role = config.role.clone(); + let force_authoring = config.force_authoring; + let name = config.network.node_name.clone(); + let disable_grandpa = config.disable_grandpa; + + let (builder, mut import_setup, inherent_data_providers) = new_full_start!(config); + + let (block_import, grandpa_link) = import_setup.take().expect( + "Link Half and Block Import are present for Full Services or setup failed before. qed", + ); + + let service = builder + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) + })? + .build()?; + + if role.is_authority() { + let proposer = + sc_basic_authorship::ProposerFactory::new(service.client(), service.transaction_pool()); + + let client = service.client(); + let select_chain = service + .select_chain() + .ok_or(ServiceError::SelectChainRequired)?; + + let can_author_with = + sp_consensus::CanAuthorWithNativeVersion::new(client.executor().clone()); + + let aura = sc_consensus_aura::start_aura::<_, _, _, _, _, AuraPair, _, _, _>( + sc_consensus_aura::slot_duration(&*client)?, + client, + select_chain, + block_import, + proposer, + service.network(), + inherent_data_providers.clone(), + force_authoring, + service.keystore(), + can_author_with, + )?; + + // the AURA authoring task is considered essential, i.e. if it + // fails we take down the service with it. + service.spawn_essential_task("aura", aura); + } + + // if the node isn't actively participating in consensus then it doesn't + // need a keystore, regardless of which protocol we use below. + let keystore = if role.is_authority() { + Some(service.keystore()) + } else { + None + }; + + let grandpa_config = sc_finality_grandpa::Config { + // FIXME #1578 make this available through chainspec + gossip_duration: Duration::from_millis(333), + justification_period: 512, + name: Some(name), + observer_enabled: false, + keystore, + is_authority: role.is_network_authority(), + }; + + let enable_grandpa = !disable_grandpa; + if enable_grandpa { + // start the full GRANDPA voter + // NOTE: non-authorities could run the GRANDPA observer protocol, but at + // this point the full voter should provide better guarantees of block + // and vote data availability than the observer. The observer has not + // been tested extensively yet and having most nodes in a network run it + // could lead to finality stalls. + let grandpa_config = sc_finality_grandpa::GrandpaParams { + config: grandpa_config, + link: grandpa_link, + network: service.network(), + inherent_data_providers: inherent_data_providers.clone(), + telemetry_on_connect: Some(service.telemetry_on_connect_stream()), + voting_rule: sc_finality_grandpa::VotingRulesBuilder::default().build(), + prometheus_registry: service.prometheus_registry(), + }; + + // the GRANDPA voter task is considered infallible, i.e. + // if it fails we take down the service with it. + service.spawn_essential_task( + "grandpa-voter", + sc_finality_grandpa::run_grandpa_voter(grandpa_config)?, + ); + } else { + sc_finality_grandpa::setup_disabled_grandpa( + service.client(), + &inherent_data_providers, + service.network(), + )?; + } + + Ok(service) } /// Builds a new service for a light client. -pub fn new_light(config: Configuration) - -> Result -{ - let inherent_data_providers = InherentDataProviders::new(); - - ServiceBuilder::new_light::(config)? - .with_select_chain(|_config, backend| { - Ok(LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|config, client, fetcher, prometheus_registry| { - let fetcher = fetcher - .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; - - let pool_api = sc_transaction_pool::LightChainApi::new(client.clone(), fetcher.clone()); - let pool = sc_transaction_pool::BasicPool::with_revalidation_type( - config, Arc::new(pool_api), prometheus_registry, sc_transaction_pool::RevalidationType::Light, - ); - Ok(pool) - })? - .with_import_queue_and_fprb(|_config, client, backend, fetcher, _select_chain, _tx_pool| { - let fetch_checker = fetcher - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = sc_finality_grandpa::light_block_import( - client.clone(), - backend, - &(client.clone() as Arc<_>), - Arc::new(fetch_checker), - )?; - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair>( - sc_consensus_aura::slot_duration(&*client)?, - grandpa_block_import, - None, - Some(Box::new(finality_proof_import)), - client, - inherent_data_providers.clone(), - )?; - - Ok((import_queue, finality_proof_request_builder)) - })? - .with_finality_proof_provider(|client, backend| { - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client as Arc>; - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) - })? - .build() +pub fn new_light(config: Configuration) -> Result { + let inherent_data_providers = InherentDataProviders::new(); + + ServiceBuilder::new_light::(config)? + .with_select_chain(|_config, backend| Ok(LongestChain::new(backend.clone())))? + .with_transaction_pool(|config, client, fetcher, prometheus_registry| { + let fetcher = fetcher + .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; + + let pool_api = sc_transaction_pool::LightChainApi::new(client.clone(), fetcher.clone()); + let pool = sc_transaction_pool::BasicPool::with_revalidation_type( + config, + Arc::new(pool_api), + prometheus_registry, + sc_transaction_pool::RevalidationType::Light, + ); + Ok(pool) + })? + .with_import_queue_and_fprb( + |_config, client, backend, fetcher, _select_chain, _tx_pool| { + let fetch_checker = fetcher + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| { + "Trying to start light import queue without active fetch checker" + })?; + let grandpa_block_import = sc_finality_grandpa::light_block_import( + client.clone(), + backend, + &(client.clone() as Arc<_>), + Arc::new(fetch_checker), + )?; + let finality_proof_import = grandpa_block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + let import_queue = sc_consensus_aura::import_queue::<_, _, _, AuraPair>( + sc_consensus_aura::slot_duration(&*client)?, + grandpa_block_import, + None, + Some(Box::new(finality_proof_import)), + client, + inherent_data_providers.clone(), + )?; + + Ok((import_queue, finality_proof_request_builder)) + }, + )? + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) + })? + .build() } diff --git a/bin/node-template/pallets/template/src/lib.rs b/bin/node-template/pallets/template/src/lib.rs index adddbac21b..fa2566bab1 100644 --- a/bin/node-template/pallets/template/src/lib.rs +++ b/bin/node-template/pallets/template/src/lib.rs @@ -1,5 +1,6 @@ #![cfg_attr(not(feature = "std"), no_std)] +use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; /// A FRAME pallet template with necessary imports /// Feel free to remove or edit this file as needed. @@ -8,9 +9,7 @@ /// For more guidance on Substrate FRAME, see the example pallet /// https://github.com/paritytech/substrate/blob/master/frame/example/src/lib.rs - -use frame_support::{decl_module, decl_storage, decl_event, decl_error, dispatch}; -use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, dispatch}; use frame_system::{self as system, ensure_signed}; #[cfg(test)] @@ -21,90 +20,93 @@ mod tests; /// The pallet's configuration trait. pub trait Trait: system::Trait { - // Add other types and constants required to configure this pallet. + // Add other types and constants required to configure this pallet. - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; } // This pallet's storage items. decl_storage! { - // It is important to update your storage name so that your pallet's - // storage items are isolated from other pallets. - // ---------------------------------vvvvvvvvvvvvvv - trait Store for Module as TemplateModule { - // Just a dummy storage item. - // Here we are declaring a StorageValue, `Something` as a Option - // `get(fn something)` is the default getter which returns either the stored `u32` or `None` if nothing stored - Something get(fn something): Option; - } + // It is important to update your storage name so that your pallet's + // storage items are isolated from other pallets. + // ---------------------------------vvvvvvvvvvvvvv + trait Store for Module as TemplateModule { + // Just a dummy storage item. + // Here we are declaring a StorageValue, `Something` as a Option + // `get(fn something)` is the default getter which returns either the stored `u32` or `None` if nothing stored + Something get(fn something): Option; + } } // The pallet's events decl_event!( - pub enum Event where AccountId = ::AccountId { - /// Just a dummy event. - /// Event `Something` is declared with a parameter of the type `u32` and `AccountId` - /// To emit this event, we call the deposit function, from our runtime functions - SomethingStored(u32, AccountId), - } + pub enum Event + where + AccountId = ::AccountId, + { + /// Just a dummy event. + /// Event `Something` is declared with a parameter of the type `u32` and `AccountId` + /// To emit this event, we call the deposit function, from our runtime functions + SomethingStored(u32, AccountId), + } ); // The pallet's errors decl_error! { - pub enum Error for Module { - /// Value was None - NoneValue, - /// Value reached maximum and cannot be incremented further - StorageOverflow, - } + pub enum Error for Module { + /// Value was None + NoneValue, + /// Value reached maximum and cannot be incremented further + StorageOverflow, + } } // The pallet's dispatchable functions. decl_module! { - /// The module declaration. - pub struct Module for enum Call where origin: T::Origin { - // Initializing errors - // this includes information about your errors in the node's metadata. - // it is needed only if you are using errors in your pallet - type Error = Error; - - // Initializing events - // this is needed only if you are using events in your pallet - fn deposit_event() = default; - - /// Just a dummy entry point. - /// function that can be called by the external world as an extrinsics call - /// takes a parameter of the type `AccountId`, stores it, and emits an event - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn do_something(origin, something: u32) -> dispatch::DispatchResult { - // Check it was signed and get the signer. See also: ensure_root and ensure_none - let who = ensure_signed(origin)?; - - // Code to execute when something calls this. - // For example: the following line stores the passed in u32 in the storage - Something::put(something); - - // Here we are raising the Something event - Self::deposit_event(RawEvent::SomethingStored(something, who)); - Ok(()) - } - - /// Another dummy entry point. - /// takes no parameters, attempts to increment storage value, and possibly throws an error - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn cause_error(origin) -> dispatch::DispatchResult { - // Check it was signed and get the signer. See also: ensure_root and ensure_none - let _who = ensure_signed(origin)?; - - match Something::get() { - None => Err(Error::::NoneValue)?, - Some(old) => { - let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; - Something::put(new); - Ok(()) - }, - } - } - } + /// The module declaration. + pub struct Module for enum Call where origin: T::Origin { + // Initializing errors + // this includes information about your errors in the node's metadata. + // it is needed only if you are using errors in your pallet + type Error = Error; + + // Initializing events + // this is needed only if you are using events in your pallet + fn deposit_event() = default; + + /// Just a dummy entry point. + /// function that can be called by the external world as an extrinsics call + /// takes a parameter of the type `AccountId`, stores it, and emits an event + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn do_something(origin, something: u32) -> dispatch::DispatchResult { + // Check it was signed and get the signer. See also: ensure_root and ensure_none + let who = ensure_signed(origin)?; + + // Code to execute when something calls this. + // For example: the following line stores the passed in u32 in the storage + Something::put(something); + + // Here we are raising the Something event + Self::deposit_event(RawEvent::SomethingStored(something, who)); + Ok(()) + } + + /// Another dummy entry point. + /// takes no parameters, attempts to increment storage value, and possibly throws an error + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn cause_error(origin) -> dispatch::DispatchResult { + // Check it was signed and get the signer. See also: ensure_root and ensure_none + let _who = ensure_signed(origin)?; + + match Something::get() { + None => Err(Error::::NoneValue)?, + Some(old) => { + let new = old.checked_add(1).ok_or(Error::::StorageOverflow)?; + Something::put(new); + Ok(()) + }, + } + } + } } diff --git a/bin/node-template/pallets/template/src/mock.rs b/bin/node-template/pallets/template/src/mock.rs index fdabf7d03a..32e0f9d8e9 100644 --- a/bin/node-template/pallets/template/src/mock.rs +++ b/bin/node-template/pallets/template/src/mock.rs @@ -1,15 +1,17 @@ // Creating mock runtime here use crate::{Module, Trait}; -use sp_core::H256; use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; +use frame_system as system; +use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, Perbill, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; -use frame_system as system; impl_outer_origin! { - pub enum Origin for Test {} + pub enum Origin for Test {} } // For testing the pallet, we construct most of a mock runtime. This means @@ -18,40 +20,43 @@ impl_outer_origin! { #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } impl system::Trait for Test { - type Origin = Origin; - type Call = (); - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl Trait for Test { - type Event = (); + type Event = (); } pub type TemplateModule = Module; // This function basically just builds a genesis storage key/value store according to // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::default().build_storage::().unwrap().into() + system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into() } diff --git a/bin/node-template/pallets/template/src/tests.rs b/bin/node-template/pallets/template/src/tests.rs index ec123a50c7..46957f4b8c 100644 --- a/bin/node-template/pallets/template/src/tests.rs +++ b/bin/node-template/pallets/template/src/tests.rs @@ -1,26 +1,26 @@ // Tests to be written here -use crate::{Error, mock::*}; -use frame_support::{assert_ok, assert_noop}; +use crate::{mock::*, Error}; +use frame_support::{assert_noop, assert_ok}; #[test] fn it_works_for_default_value() { - new_test_ext().execute_with(|| { - // Just a dummy test for the dummy function `do_something` - // calling the `do_something` function with a value 42 - assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); - // asserting that the stored value is equal to what we stored - assert_eq!(TemplateModule::something(), Some(42)); - }); + new_test_ext().execute_with(|| { + // Just a dummy test for the dummy function `do_something` + // calling the `do_something` function with a value 42 + assert_ok!(TemplateModule::do_something(Origin::signed(1), 42)); + // asserting that the stored value is equal to what we stored + assert_eq!(TemplateModule::something(), Some(42)); + }); } #[test] fn correct_error_for_none_value() { - new_test_ext().execute_with(|| { - // Ensure the correct error is thrown on None value - assert_noop!( - TemplateModule::cause_error(Origin::signed(1)), - Error::::NoneValue - ); - }); + new_test_ext().execute_with(|| { + // Ensure the correct error is thrown on None value + assert_noop!( + TemplateModule::cause_error(Origin::signed(1)), + Error::::NoneValue + ); + }); } diff --git a/bin/node-template/runtime/build.rs b/bin/node-template/runtime/build.rs index 39f7f56feb..498cc7f280 100644 --- a/bin/node-template/runtime/build.rs +++ b/bin/node-template/runtime/build.rs @@ -1,10 +1,10 @@ use wasm_builder_runner::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_from_crates("1.0.9") - .export_heap_base() - .import_memory() - .build() + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates("1.0.9") + .export_heap_base() + .import_memory() + .build() } diff --git a/bin/node-template/runtime/src/lib.rs b/bin/node-template/runtime/src/lib.rs index 863f263077..d46a713021 100644 --- a/bin/node-template/runtime/src/lib.rs +++ b/bin/node-template/runtime/src/lib.rs @@ -2,40 +2,39 @@ #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit="256"] +#![recursion_limit = "256"] // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); -use sp_std::prelude::*; +use grandpa::fg_primitives; +use grandpa::AuthorityList as GrandpaAuthorityList; +use sp_api::impl_runtime_apis; +use sp_consensus_aura::sr25519::AuthorityId as AuraId; use sp_core::OpaqueMetadata; -use sp_runtime::{ - ApplyExtrinsicResult, generic, create_runtime_str, impl_opaque_keys, MultiSignature, - transaction_validity::{TransactionValidity, TransactionSource}, -}; use sp_runtime::traits::{ - BlakeTwo256, Block as BlockT, IdentityLookup, Verify, ConvertInto, IdentifyAccount + BlakeTwo256, Block as BlockT, ConvertInto, IdentifyAccount, IdentityLookup, Verify, }; -use sp_api::impl_runtime_apis; -use sp_consensus_aura::sr25519::AuthorityId as AuraId; -use grandpa::AuthorityList as GrandpaAuthorityList; -use grandpa::fg_primitives; -use sp_version::RuntimeVersion; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, MultiSignature, +}; +use sp_std::prelude::*; #[cfg(feature = "std")] use sp_version::NativeVersion; +use sp_version::RuntimeVersion; // A few exports that help ease life for downstream crates. -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use timestamp::Call as TimestampCall; pub use balances::Call as BalancesCall; -pub use sp_runtime::{Permill, Perbill}; pub use frame_support::{ - StorageValue, construct_runtime, parameter_types, - traits::Randomness, - weights::Weight, + construct_runtime, parameter_types, traits::Randomness, weights::Weight, StorageValue, }; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; +pub use sp_runtime::{Perbill, Permill}; +pub use timestamp::Call as TimestampCall; /// Importing a template pallet pub use template; @@ -71,34 +70,34 @@ pub type DigestItem = generic::DigestItem; /// of data like extrinsics, allowing for them to continue syncing the network through upgrades /// to even the core data structures. pub mod opaque { - use super::*; - - pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; - - /// Opaque block header type. - pub type Header = generic::Header; - /// Opaque block type. - pub type Block = generic::Block; - /// Opaque block identifier type. - pub type BlockId = generic::BlockId; - - impl_opaque_keys! { - pub struct SessionKeys { - pub aura: Aura, - pub grandpa: Grandpa, - } - } + use super::*; + + pub use sp_runtime::OpaqueExtrinsic as UncheckedExtrinsic; + + /// Opaque block header type. + pub type Header = generic::Header; + /// Opaque block type. + pub type Block = generic::Block; + /// Opaque block identifier type. + pub type BlockId = generic::BlockId; + + impl_opaque_keys! { + pub struct SessionKeys { + pub aura: Aura, + pub grandpa: Grandpa, + } + } } /// This runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("node-template"), - impl_name: create_runtime_str!("node-template"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + spec_name: create_runtime_str!("node-template"), + impl_name: create_runtime_str!("node-template"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, }; pub const MILLISECS_PER_BLOCK: u64 = 6000; @@ -113,121 +112,121 @@ pub const DAYS: BlockNumber = HOURS * 24; /// The version information used to identify this runtime when compiled natively. #[cfg(feature = "std")] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } } parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - /// We allow for 2 seconds of compute with a 6 second average block time. - pub const MaximumBlockWeight: Weight = 2_000_000_000_000; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; - pub const Version: RuntimeVersion = VERSION; + pub const BlockHashCount: BlockNumber = 250; + /// We allow for 2 seconds of compute with a 6 second average block time. + pub const MaximumBlockWeight: Weight = 2_000_000_000_000; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; + pub const Version: RuntimeVersion = VERSION; } impl system::Trait for Runtime { - /// The identifier used to distinguish between accounts. - type AccountId = AccountId; - /// The aggregated dispatch type that is available for extrinsics. - type Call = Call; - /// The lookup mechanism to get account ID from whatever is passed in dispatchers. - type Lookup = IdentityLookup; - /// The index type for storing how many extrinsics an account has signed. - type Index = Index; - /// The index type for blocks. - type BlockNumber = BlockNumber; - /// The type for hashing blocks and tries. - type Hash = Hash; - /// The hashing algorithm used. - type Hashing = BlakeTwo256; - /// The header type. - type Header = generic::Header; - /// The ubiquitous event type. - type Event = Event; - /// The ubiquitous origin type. - type Origin = Origin; - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount = BlockHashCount; - /// Maximum weight of each block. - type MaximumBlockWeight = MaximumBlockWeight; - /// The weight of database operations that the runtime can invoke. - type DbWeight = (); - /// Maximum size of all encoded transactions (in bytes) that are allowed in one block. - type MaximumBlockLength = MaximumBlockLength; - /// Portion of the block weight that is available to all normal transactions. - type AvailableBlockRatio = AvailableBlockRatio; - /// Version of the runtime. - type Version = Version; - /// Converts a module to the index of the module in `construct_runtime!`. - /// - /// This type is being generated by `construct_runtime!`. - type ModuleToIndex = ModuleToIndex; - /// What to do if a new account is created. - type OnNewAccount = (); - /// What to do if an account is fully reaped from the system. - type OnKilledAccount = (); - /// The data to be stored in an account. - type AccountData = balances::AccountData; + /// The identifier used to distinguish between accounts. + type AccountId = AccountId; + /// The aggregated dispatch type that is available for extrinsics. + type Call = Call; + /// The lookup mechanism to get account ID from whatever is passed in dispatchers. + type Lookup = IdentityLookup; + /// The index type for storing how many extrinsics an account has signed. + type Index = Index; + /// The index type for blocks. + type BlockNumber = BlockNumber; + /// The type for hashing blocks and tries. + type Hash = Hash; + /// The hashing algorithm used. + type Hashing = BlakeTwo256; + /// The header type. + type Header = generic::Header; + /// The ubiquitous event type. + type Event = Event; + /// The ubiquitous origin type. + type Origin = Origin; + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount = BlockHashCount; + /// Maximum weight of each block. + type MaximumBlockWeight = MaximumBlockWeight; + /// The weight of database operations that the runtime can invoke. + type DbWeight = (); + /// Maximum size of all encoded transactions (in bytes) that are allowed in one block. + type MaximumBlockLength = MaximumBlockLength; + /// Portion of the block weight that is available to all normal transactions. + type AvailableBlockRatio = AvailableBlockRatio; + /// Version of the runtime. + type Version = Version; + /// Converts a module to the index of the module in `construct_runtime!`. + /// + /// This type is being generated by `construct_runtime!`. + type ModuleToIndex = ModuleToIndex; + /// What to do if a new account is created. + type OnNewAccount = (); + /// What to do if an account is fully reaped from the system. + type OnKilledAccount = (); + /// The data to be stored in an account. + type AccountData = balances::AccountData; } impl aura::Trait for Runtime { - type AuthorityId = AuraId; + type AuthorityId = AuraId; } impl grandpa::Trait for Runtime { - type Event = Event; + type Event = Event; } parameter_types! { - pub const MinimumPeriod: u64 = SLOT_DURATION / 2; + pub const MinimumPeriod: u64 = SLOT_DURATION / 2; } impl timestamp::Trait for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = MinimumPeriod; + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = MinimumPeriod; } parameter_types! { - pub const ExistentialDeposit: u128 = 500; + pub const ExistentialDeposit: u128 = 500; } impl balances::Trait for Runtime { - /// The type for recording an account's balance. - type Balance = Balance; - /// The ubiquitous event type. - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + /// The type for recording an account's balance. + type Balance = Balance; + /// The ubiquitous event type. + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const TransactionBaseFee: Balance = 0; - pub const TransactionByteFee: Balance = 1; + pub const TransactionBaseFee: Balance = 0; + pub const TransactionByteFee: Balance = 1; } impl transaction_payment::Trait for Runtime { - type Currency = balances::Module; - type OnTransactionPayment = (); - type TransactionBaseFee = TransactionBaseFee; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = ConvertInto; - type FeeMultiplierUpdate = (); + type Currency = balances::Module; + type OnTransactionPayment = (); + type TransactionBaseFee = TransactionBaseFee; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = ConvertInto; + type FeeMultiplierUpdate = (); } impl sudo::Trait for Runtime { - type Event = Event; - type Call = Call; + type Event = Event; + type Call = Call; } /// Used for the module template in `./template.rs` impl template::Trait for Runtime { - type Event = Event; + type Event = Event; } construct_runtime!( @@ -261,106 +260,107 @@ pub type SignedBlock = generic::SignedBlock; pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( - system::CheckVersion, - system::CheckGenesis, - system::CheckEra, - system::CheckNonce, - system::CheckWeight, - transaction_payment::ChargeTransactionPayment + system::CheckVersion, + system::CheckGenesis, + system::CheckEra, + system::CheckNonce, + system::CheckWeight, + transaction_payment::ChargeTransactionPayment, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive, Runtime, AllModules>; +pub type Executive = + frame_executive::Executive, Runtime, AllModules>; impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents( - block: Block, - data: sp_inherents::InherentData, - ) -> sp_inherents::CheckInherentsResult { - data.check_extrinsics(&block) - } - - fn random_seed() -> ::Hash { - RandomnessCollectiveFlip::random_seed() - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { - Aura::slot_duration() - } - - fn authorities() -> Vec { - Aura::authorities() - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - opaque::SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - opaque::SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl fg_primitives::GrandpaApi for Runtime { - fn grandpa_authorities() -> GrandpaAuthorityList { - Grandpa::grandpa_authorities() - } - } + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: sp_inherents::InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents( + block: Block, + data: sp_inherents::InherentData, + ) -> sp_inherents::CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + RandomnessCollectiveFlip::random_seed() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> u64 { + Aura::slot_duration() + } + + fn authorities() -> Vec { + Aura::authorities() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + opaque::SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + opaque::SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + } } diff --git a/bin/node/bench/src/core.rs b/bin/node/bench/src/core.rs index 9105fcbd01..b313919378 100644 --- a/bin/node/bench/src/core.rs +++ b/bin/node/bench/src/core.rs @@ -14,80 +14,87 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{fmt, borrow::{Cow, ToOwned}}; use serde::Serialize; +use std::{ + borrow::{Cow, ToOwned}, + fmt, +}; pub struct Path(Vec); impl Path { - pub fn new(initial: &'static [&'static str]) -> Self { - Path(initial.iter().map(|x| x.to_string()).collect()) - } + pub fn new(initial: &'static [&'static str]) -> Self { + Path(initial.iter().map(|x| x.to_string()).collect()) + } } impl Path { - pub fn push(&mut self, item: &str) { - self.0.push(item.to_string()); - } + pub fn push(&mut self, item: &str) { + self.0.push(item.to_string()); + } - pub fn full(&self) -> String { - self.0.iter().fold(String::new(), |mut val, next| { val.push_str("::"); val.push_str(next); val }) - } + pub fn full(&self) -> String { + self.0.iter().fold(String::new(), |mut val, next| { + val.push_str("::"); + val.push_str(next); + val + }) + } - pub fn has(&self, path: &str) -> bool { - self.full().contains(path) - } + pub fn has(&self, path: &str) -> bool { + self.full().contains(path) + } } pub trait BenchmarkDescription { - fn path(&self) -> Path; + fn path(&self) -> Path; - fn setup(self: Box) -> Box; + fn setup(self: Box) -> Box; - fn name(&self) -> Cow<'static, str>; + fn name(&self) -> Cow<'static, str>; } pub trait Benchmark { - fn run(&mut self, mode: Mode) -> std::time::Duration; + fn run(&mut self, mode: Mode) -> std::time::Duration; } #[derive(Debug, Clone, Serialize)] pub struct BenchmarkOutput { - name: String, - raw_average: u64, - average: u64, + name: String, + raw_average: u64, + average: u64, } struct NsFormatter(u64); impl fmt::Display for NsFormatter { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let v = self.0; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let v = self.0; - if v < 100 { - return write!(f, "{} ns", v) - } + if v < 100 { + return write!(f, "{} ns", v); + } - if self.0 < 100_000 { - return write!(f, "{:.1} µs", v as f64 / 1000.0) - } + if self.0 < 100_000 { + return write!(f, "{:.1} µs", v as f64 / 1000.0); + } - if self.0 < 1_000_000 { - return write!(f, "{:.2} ms", v as f64 / 1_000_000.0) - } + if self.0 < 1_000_000 { + return write!(f, "{:.2} ms", v as f64 / 1_000_000.0); + } - if self.0 < 100_000_000 { - return write!(f, "{} ms", v as f64 / 1_000_000.0) - } + if self.0 < 100_000_000 { + return write!(f, "{} ms", v as f64 / 1_000_000.0); + } - write!(f, "{:.2} s", v as f64 / 1_000_000_000.0) - } + write!(f, "{:.2} s", v as f64 / 1_000_000_000.0) + } } #[derive(Debug, Clone, Copy, PartialEq)] pub enum Mode { - Regular, - Profile, + Regular, + Profile, } impl std::str::FromStr for Mode { @@ -104,38 +111,35 @@ impl std::str::FromStr for Mode { impl fmt::Display for BenchmarkOutput { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!( - f, - "{}: avg {}, w_avg {}", - self.name, - NsFormatter(self.raw_average), - NsFormatter(self.average), - ) + f, + "{}: avg {}, w_avg {}", + self.name, + NsFormatter(self.raw_average), + NsFormatter(self.average), + ) } } -pub fn run_benchmark( - benchmark: Box, - mode: Mode, -) -> BenchmarkOutput { - let name = benchmark.name().to_owned(); - let mut benchmark = benchmark.setup(); - - let mut durations: Vec = vec![]; - for _ in 0..50 { - let duration = benchmark.run(mode); - durations.push(duration.as_nanos()); - } - - durations.sort(); - - let raw_average = (durations.iter().sum::() / (durations.len() as u128)) as u64; - let average = (durations.iter().skip(10).take(30).sum::() / 30) as u64; - - BenchmarkOutput { - name: name.into(), - raw_average, - average, - } +pub fn run_benchmark(benchmark: Box, mode: Mode) -> BenchmarkOutput { + let name = benchmark.name().to_owned(); + let mut benchmark = benchmark.setup(); + + let mut durations: Vec = vec![]; + for _ in 0..50 { + let duration = benchmark.run(mode); + durations.push(duration.as_nanos()); + } + + durations.sort(); + + let raw_average = (durations.iter().sum::() / (durations.len() as u128)) as u64; + let average = (durations.iter().skip(10).take(30).sum::() / 30) as u64; + + BenchmarkOutput { + name: name.into(), + raw_average, + average, + } } macro_rules! matrix( @@ -157,4 +161,4 @@ macro_rules! matrix( } }; () => { vec![] } -); \ No newline at end of file +); diff --git a/bin/node/bench/src/generator.rs b/bin/node/bench/src/generator.rs index 8a161c5705..8bd932fcc8 100644 --- a/bin/node/bench/src/generator.rs +++ b/bin/node/bench/src/generator.rs @@ -16,92 +16,98 @@ use std::{collections::HashMap, sync::Arc}; +use hash_db::{AsHashDB, HashDB, Hasher as _, Prefix}; use kvdb::KeyValueDB; use node_primitives::Hash; -use sp_trie::{DBValue, trie_types::TrieDBMut, TrieMut}; -use hash_db::{HashDB, AsHashDB, Prefix, Hasher as _}; +use sp_trie::{trie_types::TrieDBMut, DBValue, TrieMut}; type Hasher = sp_core::Blake2Hasher; /// Will fill your database `db` with trie data from `key_values` and /// return root. pub fn generate_trie( - db: Arc, - key_values: impl IntoIterator, Vec)>, + db: Arc, + key_values: impl IntoIterator, Vec)>, ) -> Hash { - let mut root = Hash::default(); - - let (db, overlay) = { - let mut overlay = HashMap::new(); - overlay.insert( - hex::decode("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").expect("null key is valid"), - Some(vec![0]), - ); - let mut trie_generator = TrieGenerator { db, overlay: &mut overlay }; - { - let mut trie_db = TrieDBMut::new(&mut trie_generator, &mut root); - - for (key, value) in key_values { - trie_db.insert(&key, &value).expect("trie insertion failed"); - } - - trie_db.commit(); - } - ( trie_generator.db, overlay ) - }; - - let mut transaction = db.transaction(); - for (key, value) in overlay.into_iter() { - match value { - Some(value) => transaction.put(0, &key[..], &value[..]), - None => transaction.delete(0, &key[..]), - } - } - db.write(transaction).expect("Failed to write transaction"); - - root + let mut root = Hash::default(); + + let (db, overlay) = { + let mut overlay = HashMap::new(); + overlay.insert( + hex::decode("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314") + .expect("null key is valid"), + Some(vec![0]), + ); + let mut trie_generator = TrieGenerator { + db, + overlay: &mut overlay, + }; + { + let mut trie_db = TrieDBMut::new(&mut trie_generator, &mut root); + + for (key, value) in key_values { + trie_db.insert(&key, &value).expect("trie insertion failed"); + } + + trie_db.commit(); + } + (trie_generator.db, overlay) + }; + + let mut transaction = db.transaction(); + for (key, value) in overlay.into_iter() { + match value { + Some(value) => transaction.put(0, &key[..], &value[..]), + None => transaction.delete(0, &key[..]), + } + } + db.write(transaction).expect("Failed to write transaction"); + + root } /// Immutable generated trie database with root. struct TrieGenerator<'a> { - db: Arc, - overlay: &'a mut HashMap, Option>>, + db: Arc, + overlay: &'a mut HashMap, Option>>, } impl<'a> AsHashDB for TrieGenerator<'a> { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { + &*self + } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { - &mut *self - } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn HashDB + 'b) { + &mut *self + } } impl<'a> HashDB for TrieGenerator<'a> { - fn get(&self, key: &Hash, prefix: Prefix) -> Option { - let key = sp_trie::prefixed_key::(key, prefix); - if let Some(value) = self.overlay.get(&key) { - return value.clone(); - } - self.db.get(0, &key).expect("Database backend error") - } - - fn contains(&self, hash: &Hash, prefix: Prefix) -> bool { - self.get(hash, prefix).is_some() - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> Hash { - let key = Hasher::hash(value); - self.emplace(key, prefix, value.to_vec()); - key - } - - fn emplace(&mut self, key: Hash, prefix: Prefix, value: DBValue) { - let key = sp_trie::prefixed_key::(&key, prefix); - self.overlay.insert(key, Some(value)); - } - - fn remove(&mut self, key: &Hash, prefix: Prefix) { - let key = sp_trie::prefixed_key::(key, prefix); - self.overlay.insert(key, None); - } -} \ No newline at end of file + fn get(&self, key: &Hash, prefix: Prefix) -> Option { + let key = sp_trie::prefixed_key::(key, prefix); + if let Some(value) = self.overlay.get(&key) { + return value.clone(); + } + self.db.get(0, &key).expect("Database backend error") + } + + fn contains(&self, hash: &Hash, prefix: Prefix) -> bool { + self.get(hash, prefix).is_some() + } + + fn insert(&mut self, prefix: Prefix, value: &[u8]) -> Hash { + let key = Hasher::hash(value); + self.emplace(key, prefix, value.to_vec()); + key + } + + fn emplace(&mut self, key: Hash, prefix: Prefix, value: DBValue) { + let key = sp_trie::prefixed_key::(&key, prefix); + self.overlay.insert(key, Some(value)); + } + + fn remove(&mut self, key: &Hash, prefix: Prefix) { + let key = sp_trie::prefixed_key::(key, prefix); + self.overlay.insert(key, None); + } +} diff --git a/bin/node/bench/src/import.rs b/bin/node/bench/src/import.rs index 2dea292ab0..a56da77283 100644 --- a/bin/node/bench/src/import.rs +++ b/bin/node/bench/src/import.rs @@ -30,111 +30,116 @@ use std::borrow::Cow; -use node_testing::bench::{BenchDb, Profile, BlockType, KeyTypes}; use node_primitives::Block; +use node_testing::bench::{BenchDb, BlockType, KeyTypes, Profile}; use sc_client_api::backend::Backend; use sp_runtime::generic::BlockId; -use crate::core::{self, Path, Mode}; +use crate::core::{self, Mode, Path}; #[derive(Clone, Copy, Debug)] -pub enum SizeType { Small, Medium, Large } +pub enum SizeType { + Small, + Medium, + Large, +} impl SizeType { - fn transactions(&self) -> usize { - match self { - SizeType::Small => 10, - SizeType::Medium => 100, - SizeType::Large => 500, - } - } + fn transactions(&self) -> usize { + match self { + SizeType::Small => 10, + SizeType::Medium => 100, + SizeType::Large => 500, + } + } } pub struct ImportBenchmarkDescription { - pub profile: Profile, - pub key_types: KeyTypes, - pub size: SizeType, + pub profile: Profile, + pub key_types: KeyTypes, + pub size: SizeType, } pub struct ImportBenchmark { - profile: Profile, - database: BenchDb, - block: Block, + profile: Profile, + database: BenchDb, + block: Block, } impl core::BenchmarkDescription for ImportBenchmarkDescription { - fn path(&self) -> Path { - - let mut path = Path::new(&["node", "import"]); - - match self.profile { - Profile::Wasm => path.push("wasm"), - Profile::Native => path.push("native"), - } - - match self.key_types { - KeyTypes::Sr25519 => path.push("sr25519"), - KeyTypes::Ed25519 => path.push("ed25519"), - } - - match self.size { - SizeType::Small => path.push("small"), - SizeType::Medium => path.push("medium"), - SizeType::Large => path.push("large"), - } - - path - } - - fn setup(self: Box) -> Box { - let profile = self.profile; - let mut bench_db = BenchDb::with_key_types(self.size.transactions(), self.key_types); - let block = bench_db.generate_block(BlockType::RandomTransfers(self.size.transactions())); - Box::new(ImportBenchmark { - database: bench_db, - block, - profile, - }) - } - - fn name(&self) -> Cow<'static, str> { - match self.profile { - Profile::Wasm => "Import benchmark (random transfers, wasm)".into(), - Profile::Native => "Import benchmark (random transfers, native)".into(), - } - } + fn path(&self) -> Path { + let mut path = Path::new(&["node", "import"]); + + match self.profile { + Profile::Wasm => path.push("wasm"), + Profile::Native => path.push("native"), + } + + match self.key_types { + KeyTypes::Sr25519 => path.push("sr25519"), + KeyTypes::Ed25519 => path.push("ed25519"), + } + + match self.size { + SizeType::Small => path.push("small"), + SizeType::Medium => path.push("medium"), + SizeType::Large => path.push("large"), + } + + path + } + + fn setup(self: Box) -> Box { + let profile = self.profile; + let mut bench_db = BenchDb::with_key_types(self.size.transactions(), self.key_types); + let block = bench_db.generate_block(BlockType::RandomTransfers(self.size.transactions())); + Box::new(ImportBenchmark { + database: bench_db, + block, + profile, + }) + } + + fn name(&self) -> Cow<'static, str> { + match self.profile { + Profile::Wasm => "Import benchmark (random transfers, wasm)".into(), + Profile::Native => "Import benchmark (random transfers, native)".into(), + } + } } impl core::Benchmark for ImportBenchmark { - fn run(&mut self, mode: Mode) -> std::time::Duration { - let mut context = self.database.create_context(self.profile); - - let _ = context.client.runtime_version_at(&BlockId::Number(0)) - .expect("Failed to get runtime version") - .spec_version; - - let start = std::time::Instant::now(); - context.import_block(self.block.clone()); - let elapsed = start.elapsed(); - - if mode == Mode::Profile { - std::thread::park_timeout(std::time::Duration::from_secs(2)); - } - - log::info!( - target: "bench-logistics", - "imported block with {} tx, took: {:#?}", - self.block.extrinsics.len(), - elapsed, - ); - - log::info!( - target: "bench-logistics", - "usage info: {}", - context.backend.usage_info() - .expect("RocksDB backend always provides usage info!"), - ); - - elapsed - } -} \ No newline at end of file + fn run(&mut self, mode: Mode) -> std::time::Duration { + let mut context = self.database.create_context(self.profile); + + let _ = context + .client + .runtime_version_at(&BlockId::Number(0)) + .expect("Failed to get runtime version") + .spec_version; + + let start = std::time::Instant::now(); + context.import_block(self.block.clone()); + let elapsed = start.elapsed(); + + if mode == Mode::Profile { + std::thread::park_timeout(std::time::Duration::from_secs(2)); + } + + log::info!( + target: "bench-logistics", + "imported block with {} tx, took: {:#?}", + self.block.extrinsics.len(), + elapsed, + ); + + log::info!( + target: "bench-logistics", + "usage info: {}", + context.backend.usage_info() + .expect("RocksDB backend always provides usage info!"), + ); + + elapsed + } +} diff --git a/bin/node/bench/src/main.rs b/bin/node/bench/src/main.rs index c821746b33..99599e32e8 100644 --- a/bin/node/bench/src/main.rs +++ b/bin/node/bench/src/main.rs @@ -14,100 +14,107 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -#[macro_use] mod core; -mod import; -mod trie; +#[macro_use] +mod core; mod generator; -mod tempdb; +mod import; mod state_sizes; +mod tempdb; +mod trie; use crate::core::{run_benchmark, Mode as BenchmarkMode}; use import::{ImportBenchmarkDescription, SizeType}; -use trie::{TrieBenchmarkDescription, DatabaseSize}; -use node_testing::bench::{Profile, KeyTypes}; +use node_testing::bench::{KeyTypes, Profile}; use structopt::StructOpt; +use trie::{DatabaseSize, TrieBenchmarkDescription}; #[derive(Debug, StructOpt)] #[structopt(name = "node-bench", about = "Node integration benchmarks")] struct Opt { - /// Show list of all available benchmarks. - /// - /// Will output ("name", "path"). Benchmarks can then be filtered by path. - #[structopt(short, long)] - list: bool, - - /// Machine readable json output. - /// - /// This also suppresses all regular output (except to stderr) - #[structopt(short, long)] - json: bool, - - /// Filter benchmarks. - /// - /// Run with `--list` for the hint of what to filter. - filter: Option, - - /// Mode - /// - /// "regular" for regular becnhmark - /// - /// "profile" mode adds pauses between measurable runs, - /// so that actual interval can be selected in the profiler of choice. - #[structopt(short, long, default_value = "regular")] - mode: BenchmarkMode, + /// Show list of all available benchmarks. + /// + /// Will output ("name", "path"). Benchmarks can then be filtered by path. + #[structopt(short, long)] + list: bool, + + /// Machine readable json output. + /// + /// This also suppresses all regular output (except to stderr) + #[structopt(short, long)] + json: bool, + + /// Filter benchmarks. + /// + /// Run with `--list` for the hint of what to filter. + filter: Option, + + /// Mode + /// + /// "regular" for regular becnhmark + /// + /// "profile" mode adds pauses between measurable runs, + /// so that actual interval can be selected in the profiler of choice. + #[structopt(short, long, default_value = "regular")] + mode: BenchmarkMode, } fn main() { - let opt = Opt::from_args(); - - if !opt.json { - sc_cli::init_logger(""); - } - - let benchmarks = matrix!( - profile in [Profile::Wasm, Profile::Native] => - ImportBenchmarkDescription { - profile: *profile, - key_types: KeyTypes::Sr25519, - size: SizeType::Medium, - }, - ImportBenchmarkDescription { - profile: Profile::Native, - key_types: KeyTypes::Ed25519, - size: SizeType::Medium, - }, - size in [SizeType::Small, SizeType::Large] => - ImportBenchmarkDescription { - profile: Profile::Native, - key_types: KeyTypes::Sr25519, - size: *size, - }, - size in [ - DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small, - DatabaseSize::Medium, DatabaseSize::Large, - ] => TrieBenchmarkDescription { database_size: *size }, - ); - - if opt.list { - for benchmark in benchmarks.iter() { - log::info!("{}: {}", benchmark.name(), benchmark.path().full()) - } - return; - } - - let mut results = Vec::new(); - for benchmark in benchmarks { - if opt.filter.as_ref().map(|f| benchmark.path().has(f)).unwrap_or(true) { - log::info!("Starting {}", benchmark.name()); - let result = run_benchmark(benchmark, opt.mode); - log::info!("{}", result); - - results.push(result); - } - } - - if opt.json { - let json_result: String = serde_json::to_string(&results).expect("Failed to construct json"); - println!("{}", json_result); - } -} \ No newline at end of file + let opt = Opt::from_args(); + + if !opt.json { + sc_cli::init_logger(""); + } + + let benchmarks = matrix!( + profile in [Profile::Wasm, Profile::Native] => + ImportBenchmarkDescription { + profile: *profile, + key_types: KeyTypes::Sr25519, + size: SizeType::Medium, + }, + ImportBenchmarkDescription { + profile: Profile::Native, + key_types: KeyTypes::Ed25519, + size: SizeType::Medium, + }, + size in [SizeType::Small, SizeType::Large] => + ImportBenchmarkDescription { + profile: Profile::Native, + key_types: KeyTypes::Sr25519, + size: *size, + }, + size in [ + DatabaseSize::Empty, DatabaseSize::Smallest, DatabaseSize::Small, + DatabaseSize::Medium, DatabaseSize::Large, + ] => TrieBenchmarkDescription { database_size: *size }, + ); + + if opt.list { + for benchmark in benchmarks.iter() { + log::info!("{}: {}", benchmark.name(), benchmark.path().full()) + } + return; + } + + let mut results = Vec::new(); + for benchmark in benchmarks { + if opt + .filter + .as_ref() + .map(|f| benchmark.path().has(f)) + .unwrap_or(true) + { + log::info!("Starting {}", benchmark.name()); + let result = run_benchmark(benchmark, opt.mode); + log::info!("{}", result); + + results.push(result); + } + } + + if opt.json { + let json_result: String = + serde_json::to_string(&results).expect("Failed to construct json"); + println!("{}", json_result); + } +} diff --git a/bin/node/bench/src/state_sizes.rs b/bin/node/bench/src/state_sizes.rs index d35989f61b..894438c647 100644 --- a/bin/node/bench/src/state_sizes.rs +++ b/bin/node/bench/src/state_sizes.rs @@ -15,4742 +15,4742 @@ // along with Parity. If not, see . /// Kusama value size distribution -pub const KUSAMA_STATE_DISTRIBUTION: &'static[(u32, u32)] = &[ - (32, 35), - (33, 20035), - (34, 5369), - (35, 184), - (36, 54599), - (37, 1515056), - (38, 885), - (39, 69965), - (41, 210754), - (42, 467), - (43, 3241), - (44, 32660), - (45, 231141), - (46, 220016), - (47, 248931), - (48, 157232), - (49, 143236), - (50, 2428), - (51, 1476159), - (52, 31), - (53, 112), - (54, 711), - (55, 1934), - (56, 39), - (57, 407), - (58, 6929), - (59, 6568), - (60, 26), - (61, 268673), - (62, 118137), - (63, 84640), - (64, 193232), - (65, 2584210), - (66, 1002), - (67, 2993), - (68, 4266), - (69, 5633381), - (70, 277369), - (71, 5106), - (72, 722), - (73, 1882), - (74, 8178), - (75, 4045), - (76, 1596), - (77, 5335), - (78, 14591), - (79, 9645), - (80, 44171), - (81, 13474), - (82, 51090), - (83, 2595), - (84, 6276), - (85, 382195), - (86, 1062), - (87, 3846), - (88, 5663), - (89, 3811), - (90, 1580), - (91, 5729), - (92, 19144), - (93, 197), - (94, 235), - (95, 545), - (96, 54914), - (97, 3858), - (98, 1610), - (99, 635), - (100, 2481), - (101, 6457), - (102, 3753951), - (103, 11821), - (104, 11114), - (105, 2601), - (106, 2518), - (107, 521925), - (108, 297), - (109, 411), - (110, 668), - (111, 4500), - (112, 704), - (113, 316), - (114, 59), - (115, 291), - (116, 1727), - (117, 6010), - (118, 51874), - (119, 13969), - (120, 9496), - (121, 274), - (122, 810), - (123, 643), - (124, 69), - (125, 41), - (126, 329), - (127, 175435), - (128, 2641), - (129, 2658), - (130, 415277), - (131, 2705), - (132, 2314), - (133, 4290), - (134, 693), - (135, 1957478), - (136, 1111), - (137, 1474503), - (138, 3656), - (139, 940), - (140, 1755692), - (141, 61), - (142, 4140), - (143, 47), - (144, 6725), - (145, 610), - (146, 250), - (147, 48), - (148, 28), - (149, 132), - (150, 123489), - (151, 7476), - (152, 55), - (153, 68), - (154, 170), - (155, 566), - (156, 8110), - (157, 1243), - (158, 1445), - (159, 2569), - (160, 1096), - (161, 865), - (162, 634), - (163, 372411), - (164, 685), - (165, 3481), - (166, 1467), - (167, 2146), - (168, 556539), - (169, 566), - (170, 5080), - (171, 202), - (172, 123), - (173, 100750), - (174, 667), - (175, 433), - (176, 737), - (177, 315), - (178, 317), - (179, 656), - (180, 2522), - (181, 315), - (182, 406), - (183, 4680), - (184, 4941), - (185, 828), - (186, 782), - (187, 565), - (188, 584), - (189, 376), - (190, 321), - (191, 418), - (192, 167), - (193, 362), - (194, 2198), - (195, 180), - (196, 787), - (197, 2680), - (198, 501), - (199, 843), - (200, 287), - (201, 608362), - (202, 1157), - (203, 959), - (204, 1683623), - (205, 440), - (206, 756), - (207, 812), - (208, 1147), - (209, 723), - (210, 856), - (211, 496), - (212, 916), - (213, 615), - (214, 488), - (215, 522), - (216, 8265), - (217, 32574), - (218, 417), - (219, 247), - (220, 579), - (221, 68), - (222, 126), - (223, 306), - (224, 310), - (225, 24), - (226, 37), - (227, 160), - (228, 11), - (229, 3288), - (230, 349), - (231, 23), - (232, 14), - (233, 45), - (234, 452840), - (235, 118), - (236, 741), - (237, 390), - (238, 517), - (239, 694), - (240, 765), - (241, 542), - (242, 417), - (243, 617), - (244, 1307), - (245, 583), - (246, 1640), - (247, 735), - (248, 478), - (249, 4312), - (250, 5426), - (251, 1067), - (252, 435), - (253, 202), - (254, 122), - (255, 486), - (256, 180), - (257, 279), - (258, 406), - (259, 160), - (260, 2759), - (261, 2600), - (262, 686), - (263, 95), - (264, 164), - (265, 150), - (266, 1013), - (267, 552618), - (268, 217), - (269, 188), - (270, 284), - (271, 416), - (272, 453), - (273, 95), - (274, 42), - (275, 68), - (276, 90), - (277, 123), - (278, 340), - (279, 98), - (280, 2795), - (281, 261), - (282, 7370), - (283, 5768), - (284, 3285), - (285, 461), - (286, 363), - (287, 456), - (288, 1475), - (289, 211), - (290, 153), - (291, 282), - (292, 241), - (293, 2924), - (294, 261), - (295, 1070), - (296, 1301), - (297, 688), - (298, 592), - (299, 95), - (300, 686447), - (301, 42), - (302, 385), - (303, 24), - (304, 931), - (305, 49), - (306, 23), - (307, 67), - (308, 32), - (309, 38), - (310, 2), - (311, 7), - (312, 198), - (313, 11), - (314, 38), - (315, 3704), - (316, 7406), - (317, 116), - (318, 229), - (319, 100), - (320, 437), - (321, 244), - (322, 285), - (323, 433), - (324, 382), - (325, 3171), - (326, 761), - (327, 324), - (328, 2264), - (329, 340), - (330, 353), - (331, 110), - (332, 403), - (333, 731366), - (334, 223), - (335, 350), - (336, 600), - (337, 219), - (338, 112), - (339, 10), - (340, 761), - (341, 35), - (342, 99), - (343, 83), - (344, 136), - (345, 7), - (346, 836), - (347, 11), - (348, 10832), - (349, 8931), - (350, 33), - (351, 64), - (352, 66), - (353, 54), - (354, 78), - (355, 198), - (356, 722), - (357, 2647), - (358, 64), - (359, 71), - (360, 2242), - (361, 1462), - (362, 505), - (363, 444), - (364, 597), - (365, 372), - (366, 664852), - (367, 464), - (368, 605), - (369, 123), - (370, 64), - (371, 117), - (372, 328), - (373, 123), - (374, 227), - (375, 151), - (376, 881), - (377, 111), - (378, 30), - (379, 73), - (380, 2126), - (381, 3662), - (382, 9107), - (383, 18), - (384, 294), - (385, 12), - (386, 262), - (387, 127), - (388, 269), - (389, 2566), - (390, 14), - (391, 17), - (392, 80), - (393, 67), - (394, 1470), - (395, 25), - (396, 220), - (397, 131), - (398, 225), - (399, 484755), - (400, 597), - (401, 300), - (402, 253), - (403, 359), - (404, 523), - (405, 311), - (406, 238), - (407, 999), - (408, 424), - (409, 165), - (410, 96), - (411, 248), - (412, 1771), - (413, 139), - (414, 7374), - (415, 11186), - (416, 1355), - (417, 1283666), - (418, 9), - (419, 116), - (420, 3897), - (421, 2554), - (422, 1), - (423, 1), - (424, 16878), - (425, 3198212), - (426, 335), - (427, 1676), - (428, 80), - (429, 19), - (430, 47), - (431, 495), - (432, 421946), - (433, 73), - (434, 95), - (435, 105), - (436, 184), - (437, 56903), - (438, 132), - (439, 87), - (440, 207411), - (441, 230), - (442, 372), - (443, 361), - (444, 387), - (445, 299), - (446, 175), - (447, 7487), - (448, 16346), - (449, 37), - (450, 98313), - (451, 307), - (452, 304), - (453, 2675), - (454, 229), - (455, 130), - (456, 134), - (457, 50), - (458, 238), - (459, 2), - (460, 2267), - (461, 7), - (462, 1), - (463, 8), - (464, 395), - (465, 1279781), - (466, 9), - (467, 12), - (468, 633), - (469, 37), - (470, 13), - (471, 54), - (472, 247), - (473, 82), - (474, 119), - (475, 114), - (476, 332), - (477, 79), - (478, 116), - (479, 128), - (480, 4206), - (481, 20732), - (482, 311), - (483, 343), - (484, 527), - (485, 2750), - (486, 76), - (487, 152), - (488, 510), - (489, 63), - (490, 257), - (491, 79), - (492, 825), - (493, 4198), - (494, 389), - (495, 72), - (496, 1547), - (497, 34), - (498, 631996), - (499, 5), - (500, 2334), - (501, 34), - (502, 7), - (503, 7), - (504, 7682), - (505, 6), - (506, 26), - (507, 22), - (508, 461), - (509, 95), - (510, 36), - (511, 46), - (512, 2741), - (513, 38455), - (514, 29678), - (515, 179), - (516, 1637), - (517, 2597), - (518, 166), - (519, 230), - (520, 2736), - (521, 187), - (522, 361), - (523, 310), - (524, 3327), - (525, 76), - (526, 8070), - (527, 35), - (528, 3310), - (529, 118), - (530, 167), - (531, 214180), - (532, 4597), - (533, 153), - (534, 126), - (535, 23), - (536, 13920), - (537, 10), - (538, 11), - (539, 50), - (540, 50739), - (541, 8), - (542, 347), - (543, 77), - (544, 451575), - (545, 16), - (546, 218814), - (547, 1859026), - (548, 303), - (549, 2511), - (550, 27), - (551, 28), - (552, 188), - (553, 46), - (554, 216), - (555, 63), - (556, 202), - (557, 192), - (558, 257), - (559, 170377), - (560, 902), - (561, 424), - (562, 186), - (563, 145), - (564, 342), - (565, 76), - (566, 41), - (567, 26), - (568, 136), - (569, 1336), - (570, 988), - (571, 131), - (572, 766), - (573, 95), - (574, 57), - (575, 16), - (576, 47), - (577, 63), - (578, 5), - (579, 140), - (580, 1263808), - (581, 2498), - (583, 2), - (584, 706), - (585, 49), - (586, 502), - (587, 16), - (588, 115), - (589, 25), - (590, 31), - (591, 34), - (592, 818), - (593, 60), - (594, 84), - (595, 116), - (596, 446), - (597, 111), - (598, 151), - (599, 153), - (600, 1408), - (601, 165), - (602, 575), - (603, 163), - (604, 309), - (605, 52), - (606, 40), - (607, 116), - (608, 749), - (609, 231), - (610, 171), - (611, 218), - (612, 1145), - (613, 2572), - (614, 27), - (615, 26), - (616, 2060), - (617, 173), - (618, 1094), - (619, 66), - (620, 14235), - (622, 294), - (623, 2), - (624, 79374), - (625, 1), - (626, 3), - (627, 7), - (628, 335), - (629, 27), - (630, 47), - (631, 113), - (632, 589), - (633, 56), - (634, 75), - (635, 85), - (636, 740), - (637, 118), - (638, 180), - (639, 149), - (640, 1169), - (641, 135), - (642, 169), - (643, 170), - (644, 1802), - (645, 2481), - (646, 28), - (647, 78), - (648, 5585), - (649, 173), - (650, 135), - (651, 177), - (652, 6553), - (653, 129), - (654, 55), - (655, 6), - (656, 13250), - (657, 5), - (658, 15), - (659, 3), - (660, 39892), - (661, 28), - (663, 1), - (664, 575061), - (665, 1), - (666, 5), - (667, 73), - (668, 39), - (669, 62), - (670, 50), - (671, 27), - (672, 33), - (673, 48), - (674, 44), - (675, 151), - (676, 70), - (677, 2540), - (678, 150), - (679, 109), - (680, 117), - (681, 95), - (682, 80), - (683, 44), - (684, 34), - (685, 31), - (686, 125), - (687, 146), - (688, 423), - (689, 142), - (690, 154), - (691, 135), - (692, 194), - (693, 48), - (694, 6), - (695, 141), - (696, 47), - (697, 9), - (699, 1), - (701, 1), - (702, 2), - (703, 81), - (704, 3), - (705, 4), - (706, 23), - (707, 131), - (708, 31), - (709, 2458), - (710, 346), - (711, 43), - (712, 46), - (713, 48), - (714, 85), - (715, 119), - (716, 89), - (717, 97), - (718, 95), - (719, 137), - (720, 437), - (721, 64), - (722, 28), - (723, 29), - (724, 121), - (725, 162), - (726, 241), - (727, 219), - (728, 143), - (729, 92), - (730, 100), - (731, 42), - (732, 38), - (733, 60), - (734, 2), - (735, 71), - (736, 12), - (737, 9), - (738, 7), - (739, 193), - (740, 2), - (741, 2404), - (742, 3), - (743, 11), - (744, 5), - (745, 5), - (746, 9), - (747, 16), - (748, 27), - (749, 32), - (750, 57), - (751, 54), - (752, 383), - (753, 61), - (754, 48), - (755, 84), - (756, 108), - (757, 134), - (758, 121), - (759, 160), - (760, 80), - (761, 68), - (762, 192), - (763, 107), - (764, 270), - (765, 58), - (766, 125), - (767, 151), - (768, 75), - (769, 94), - (770, 91), - (771, 187), - (772, 57), - (773, 2371), - (774, 8), - (775, 93), - (776, 107), - (777, 20), - (779, 1), - (780, 22), - (781, 1), - (783, 6), - (784, 318), - (785, 25), - (786, 31), - (787, 23), - (788, 28), - (789, 62), - (790, 53), - (791, 41), - (792, 68), - (793, 60), - (794, 88), - (795, 108), - (796, 63), - (797, 100), - (798, 68), - (799, 72), - (800, 83), - (801, 46), - (802, 36), - (803, 157), - (804, 139), - (805, 2439), - (806, 73), - (807, 81), - (808, 99), - (809, 66), - (810, 45), - (811, 98), - (812, 1), - (814, 31), - (815, 1), - (816, 312), - (818, 155), - (819, 2), - (820, 12), - (821, 27), - (822, 97), - (823, 23), - (824, 7), - (825, 15), - (826, 37), - (827, 39), - (828, 28), - (829, 33), - (830, 53), - (831, 101), - (832, 189), - (833, 94), - (834, 66), - (835, 173), - (836, 74), - (837, 2402), - (838, 64), - (839, 28), - (840, 20), - (841, 13), - (842, 32), - (843, 72), - (844, 68), - (845, 50), - (846, 41), - (847, 114), - (848, 345), - (849, 33), - (850, 17), - (851, 6), - (852, 61), - (853, 101), - (854, 123), - (855, 28), - (856, 3), - (857, 3), - (858, 30), - (859, 12), - (860, 28), - (861, 16), - (862, 20), - (863, 7), - (864, 23), - (865, 28), - (866, 40), - (867, 159), - (868, 40), - (869, 2361), - (870, 92), - (871, 88), - (872, 193), - (873, 61), - (874, 58), - (875, 67), - (876, 65), - (877, 46), - (878, 55), - (879, 30), - (880, 334), - (881, 74), - (882, 121), - (883, 107), - (884, 36), - (885, 66), - (886, 22), - (887, 25), - (888, 24), - (889, 10), - (890, 44), - (891, 5), - (892, 84), - (893, 4), - (894, 1), - (895, 7), - (896, 3), - (897, 8), - (898, 3), - (899, 126), - (900, 13), - (901, 2280), - (902, 74), - (903, 36), - (904, 46), - (905, 52), - (906, 24), - (907, 23), - (908, 43), - (909, 31), - (910, 66), - (911, 65), - (912, 376), - (913, 77), - (914, 85), - (915, 60), - (916, 29), - (917, 64), - (918, 48), - (919, 135), - (920, 21), - (921, 34), - (922, 26), - (923, 22), - (924, 52), - (925, 28), - (926, 142), - (927, 18), - (928, 14), - (929, 30), - (930, 56), - (931, 113), - (933, 2264), - (934, 14), - (935, 4), - (936, 10), - (937, 18), - (938, 2), - (939, 30), - (940, 9), - (941, 29), - (942, 10), - (943, 17), - (944, 296), - (945, 31), - (946, 40), - (947, 26), - (948, 70), - (949, 66), - (950, 44), - (951, 57), - (952, 55), - (953, 56), - (954, 51), - (955, 133), - (956, 39), - (957, 49), - (958, 45), - (959, 26), - (960, 30), - (961, 35), - (962, 40), - (963, 148), - (964, 34), - (965, 2264), - (966, 50), - (967, 21), - (968, 2), - (970, 24), - (972, 45), - (973, 8), - (974, 11), - (975, 20), - (976, 287), - (977, 20), - (978, 6), - (979, 9), - (980, 99), - (981, 32), - (982, 10), - (983, 13), - (984, 26), - (985, 30), - (986, 31), - (987, 38), - (988, 25), - (989, 32), - (990, 44), - (991, 125), - (992, 58), - (993, 44), - (994, 25), - (995, 140), - (996, 25), - (997, 2222), - (998, 16), - (999, 25), - (1000, 38), - (1001, 66), - (1002, 31), - (1003, 38), - (1004, 38), - (1005, 10), - (1006, 7), - (1008, 283), - (1009, 3), - (1010, 1), - (1011, 17), - (1012, 4), - (1013, 51), - (1014, 1), - (1015, 1), - (1016, 3), - (1017, 12), - (1018, 11), - (1019, 21), - (1020, 31), - (1021, 14), - (1022, 14), - (1023, 23), - (1024, 25), - (1025, 42), - (1026, 39), - (1027, 220), - (1028, 33), - (1029, 2206), - (1030, 24), - (1031, 64), - (1032, 36), - (1033, 61), - (1034, 123), - (1035, 32), - (1036, 20), - (1037, 15), - (1038, 11), - (1039, 33), - (1040, 311), - (1041, 58), - (1042, 80), - (1043, 29), - (1044, 10), - (1045, 48), - (1046, 18), - (1047, 22), - (1048, 3), - (1049, 17), - (1050, 1), - (1051, 2), - (1052, 5), - (1053, 4), - (1054, 4), - (1055, 1), - (1056, 4), - (1057, 15), - (1058, 11), - (1059, 135), - (1060, 59), - (1061, 2132), - (1062, 32), - (1063, 116), - (1064, 37), - (1065, 44), - (1066, 42), - (1067, 28), - (1068, 10), - (1069, 36), - (1070, 59), - (1071, 48), - (1072, 332), - (1073, 59), - (1074, 43), - (1075, 19), - (1076, 19), - (1077, 31), - (1078, 31), - (1079, 20), - (1080, 38), - (1081, 58), - (1082, 37), - (1083, 47), - (1084, 19), - (1085, 24), - (1086, 12), - (1087, 26), - (1088, 89), - (1089, 3), - (1091, 108), - (1093, 2112), - (1094, 13), - (1095, 4), - (1096, 4), - (1097, 17), - (1098, 7), - (1099, 105), - (1100, 12), - (1101, 10), - (1102, 17), - (1103, 19), - (1104, 329), - (1105, 28), - (1106, 58), - (1107, 21), - (1108, 22), - (1109, 63), - (1110, 29), - (1111, 53), - (1112, 84), - (1113, 28), - (1114, 30), - (1115, 22), - (1116, 40), - (1117, 16), - (1118, 20), - (1119, 75), - (1120, 43), - (1121, 49), - (1122, 25), - (1123, 118), - (1124, 8), - (1125, 2083), - (1126, 21), - (1127, 3), - (1128, 43), - (1129, 1), - (1130, 1), - (1132, 3), - (1133, 1), - (1134, 3), - (1135, 83), - (1136, 266), - (1137, 7), - (1138, 22), - (1139, 14), - (1140, 30), - (1141, 54), - (1142, 125), - (1143, 44), - (1144, 34), - (1145, 19), - (1146, 21), - (1147, 19), - (1148, 46), - (1149, 45), - (1150, 54), - (1151, 22), - (1152, 30), - (1153, 20), - (1154, 7), - (1155, 143), - (1156, 23), - (1157, 2078), - (1158, 30), - (1159, 23), - (1160, 12), - (1161, 18), - (1162, 6), - (1164, 5), - (1165, 1), - (1168, 254), - (1169, 1), - (1170, 3), - (1171, 95), - (1172, 37), - (1173, 23), - (1174, 7), - (1175, 11), - (1176, 5), - (1177, 14), - (1178, 15), - (1179, 19), - (1180, 10), - (1181, 28), - (1182, 87), - (1183, 35), - (1184, 30), - (1185, 30), - (1186, 38), - (1187, 148), - (1188, 49), - (1189, 2056), - (1190, 42), - (1191, 41), - (1192, 14), - (1193, 36), - (1194, 37), - (1195, 22), - (1196, 108), - (1197, 62), - (1198, 55), - (1199, 43), - (1200, 261), - (1201, 16), - (1202, 1), - (1203, 9), - (1204, 3), - (1205, 32), - (1207, 81), - (1208, 3), - (1210, 3), - (1212, 4), - (1213, 9), - (1214, 5), - (1215, 6), - (1216, 4), - (1217, 8), - (1218, 13), - (1219, 120), - (1220, 11), - (1221, 1989), - (1222, 11), - (1223, 20), - (1224, 15), - (1225, 21), - (1226, 23), - (1227, 50), - (1228, 37), - (1229, 51), - (1230, 37), - (1231, 21), - (1232, 256), - (1233, 26), - (1234, 25), - (1235, 21), - (1236, 79), - (1237, 50), - (1238, 21), - (1239, 2), - (1240, 6), - (1241, 8), - (1243, 95), - (1244, 1), - (1247, 1), - (1248, 1), - (1249, 1), - (1250, 96), - (1251, 112), - (1252, 43), - (1253, 1960), - (1254, 7), - (1255, 13), - (1256, 16), - (1257, 20), - (1258, 19), - (1259, 17), - (1260, 12), - (1261, 5), - (1262, 12), - (1263, 29), - (1264, 272), - (1265, 63), - (1266, 37), - (1267, 36), - (1268, 25), - (1269, 55), - (1270, 38), - (1271, 7), - (1272, 37), - (1273, 10), - (1274, 16), - (1275, 28), - (1276, 18), - (1277, 11), - (1278, 8), - (1279, 91), - (1280, 1), - (1282, 1), - (1283, 110), - (1284, 20), - (1285, 1923), - (1287, 3), - (1288, 1), - (1290, 23), - (1291, 4), - (1292, 4), - (1293, 12), - (1294, 19), - (1295, 8), - (1296, 248), - (1297, 21), - (1298, 12), - (1299, 31), - (1300, 10), - (1301, 60), - (1302, 1), - (1303, 8), - (1304, 99), - (1305, 29), - (1306, 29), - (1307, 28), - (1308, 33), - (1309, 19), - (1310, 8), - (1311, 1), - (1313, 11), - (1314, 12), - (1315, 236), - (1316, 18), - (1317, 1891), - (1318, 2), - (1322, 21), - (1324, 1), - (1326, 8), - (1327, 3), - (1328, 235), - (1329, 4), - (1330, 1), - (1331, 2), - (1332, 5), - (1333, 38), - (1334, 2), - (1335, 30), - (1336, 18), - (1337, 31), - (1338, 8), - (1339, 5), - (1340, 11), - (1341, 9), - (1342, 12), - (1343, 11), - (1344, 79), - (1345, 37), - (1346, 19), - (1347, 136), - (1348, 9), - (1349, 1861), - (1350, 8), - (1351, 112), - (1352, 10), - (1353, 3), - (1354, 16), - (1355, 4), - (1356, 12), - (1357, 18), - (1358, 67), - (1359, 6), - (1360, 229), - (1361, 1), - (1362, 1), - (1364, 1), - (1365, 27), - (1366, 6), - (1368, 14), - (1370, 8), - (1371, 29), - (1372, 3), - (1373, 21), - (1374, 8), - (1375, 6), - (1376, 3), - (1377, 9), - (1378, 9), - (1379, 120), - (1380, 5), - (1381, 1833), - (1382, 45), - (1383, 35), - (1384, 23), - (1385, 25), - (1386, 26), - (1387, 159), - (1388, 24), - (1389, 16), - (1390, 16), - (1391, 14), - (1392, 273), - (1393, 17), - (1394, 9), - (1395, 5), - (1396, 14), - (1397, 24), - (1398, 27), - (1400, 2), - (1404, 5), - (1405, 8), - (1406, 3), - (1407, 25), - (1408, 2), - (1409, 22), - (1410, 10), - (1411, 111), - (1412, 89), - (1413, 1793), - (1414, 4), - (1415, 9), - (1416, 16), - (1417, 13), - (1418, 13), - (1419, 13), - (1420, 15), - (1421, 19), - (1422, 26), - (1423, 110), - (1424, 229), - (1425, 11), - (1426, 10), - (1427, 7), - (1428, 7), - (1429, 28), - (1430, 12), - (1431, 11), - (1432, 14), - (1433, 2), - (1434, 2), - (1436, 1), - (1437, 1), - (1438, 13), - (1439, 1), - (1440, 1), - (1441, 1), - (1442, 2), - (1443, 132), - (1444, 5), - (1445, 1795), - (1448, 11), - (1449, 10), - (1450, 11), - (1451, 8), - (1452, 47), - (1453, 6), - (1454, 8), - (1455, 12), - (1456, 229), - (1457, 15), - (1458, 12), - (1459, 121), - (1460, 15), - (1461, 48), - (1462, 49), - (1463, 22), - (1464, 11), - (1465, 9), - (1466, 81), - (1467, 1), - (1468, 1), - (1469, 6), - (1470, 6), - (1471, 6), - (1472, 9), - (1473, 12), - (1474, 2), - (1475, 109), - (1476, 5), - (1477, 1721), - (1478, 1), - (1479, 28), - (1480, 7), - (1481, 23), - (1482, 2), - (1483, 12), - (1484, 5), - (1485, 3), - (1486, 2), - (1487, 4), - (1488, 219), - (1489, 7), - (1490, 8), - (1491, 10), - (1492, 16), - (1493, 32), - (1494, 25), - (1495, 96), - (1496, 13), - (1497, 15), - (1498, 16), - (1499, 12), - (1500, 14), - (1501, 19), - (1502, 7), - (1503, 11), - (1504, 3), - (1505, 8), - (1506, 41), - (1507, 108), - (1508, 25), - (1509, 1719), - (1510, 8), - (1511, 10), - (1514, 2), - (1515, 25), - (1516, 2), - (1517, 32), - (1518, 6), - (1519, 7), - (1520, 273), - (1521, 2), - (1522, 6), - (1523, 5), - (1524, 6), - (1525, 36), - (1526, 3), - (1527, 12), - (1528, 7), - (1529, 9), - (1530, 12), - (1531, 107), - (1532, 44), - (1533, 17), - (1534, 12), - (1535, 18), - (1536, 12), - (1537, 26), - (1538, 35), - (1539, 131), - (1540, 15), - (1541, 1693), - (1542, 11), - (1543, 7), - (1544, 2), - (1545, 6), - (1546, 14), - (1547, 6), - (1548, 2), - (1549, 24), - (1550, 2), - (1551, 33), - (1552, 206), - (1553, 18), - (1555, 1), - (1556, 7), - (1557, 38), - (1558, 6), - (1559, 3), - (1560, 21), - (1562, 2), - (1563, 5), - (1564, 7), - (1565, 5), - (1566, 6), - (1567, 110), - (1568, 9), - (1569, 16), - (1570, 13), - (1571, 109), - (1572, 6), - (1573, 1664), - (1574, 53), - (1575, 14), - (1576, 21), - (1577, 31), - (1578, 42), - (1579, 13), - (1580, 10), - (1581, 12), - (1582, 11), - (1583, 85), - (1584, 202), - (1585, 7), - (1586, 6), - (1587, 25), - (1588, 5), - (1589, 41), - (1590, 4), - (1591, 5), - (1593, 1), - (1595, 5), - (1596, 11), - (1598, 1), - (1599, 1), - (1600, 1), - (1601, 4), - (1602, 19), - (1603, 200), - (1604, 10), - (1605, 1640), - (1606, 15), - (1607, 14), - (1608, 7), - (1609, 12), - (1610, 5), - (1611, 2), - (1612, 3), - (1613, 7), - (1614, 37), - (1615, 4), - (1616, 203), - (1617, 13), - (1618, 3), - (1619, 12), - (1620, 38), - (1621, 22), - (1622, 12), - (1623, 43), - (1624, 19), - (1625, 35), - (1626, 15), - (1627, 26), - (1628, 43), - (1629, 2), - (1630, 10), - (1631, 1), - (1633, 1), - (1634, 1), - (1635, 110), - (1637, 1612), - (1638, 1), - (1639, 107), - (1640, 1), - (1641, 2), - (1643, 7), - (1644, 9), - (1645, 8), - (1646, 3), - (1647, 19), - (1648, 206), - (1649, 2), - (1650, 9), - (1651, 8), - (1652, 19), - (1653, 22), - (1654, 4), - (1655, 13), - (1656, 3), - (1657, 5), - (1658, 5), - (1659, 35), - (1660, 10), - (1661, 26), - (1662, 8), - (1663, 10), - (1664, 7), - (1665, 4), - (1666, 2), - (1667, 110), - (1668, 12), - (1669, 1594), - (1670, 1), - (1671, 2), - (1672, 15), - (1673, 4), - (1674, 2), - (1675, 303), - (1676, 12), - (1678, 1), - (1680, 194), - (1681, 1), - (1682, 40), - (1683, 2), - (1684, 2), - (1685, 19), - (1686, 16), - (1687, 2), - (1688, 6), - (1689, 9), - (1690, 18), - (1691, 15), - (1692, 5), - (1693, 7), - (1694, 6), - (1695, 32), - (1696, 4), - (1697, 34), - (1698, 1), - (1699, 117), - (1700, 5), - (1701, 1590), - (1702, 20), - (1703, 4), - (1704, 6), - (1705, 20), - (1707, 2), - (1710, 3), - (1711, 89), - (1712, 195), - (1713, 4), - (1714, 2), - (1715, 1), - (1716, 3), - (1717, 16), - (1718, 9), - (1719, 2), - (1720, 3), - (1723, 18), - (1724, 1), - (1725, 2), - (1726, 3), - (1727, 3), - (1728, 9), - (1729, 5), - (1730, 7), - (1731, 132), - (1732, 28), - (1733, 1585), - (1734, 5), - (1735, 3), - (1736, 5), - (1737, 27), - (1738, 4), - (1739, 19), - (1740, 15), - (1741, 4), - (1742, 15), - (1743, 9), - (1744, 183), - (1745, 12), - (1747, 119), - (1748, 1), - (1749, 15), - (1750, 5), - (1754, 1), - (1757, 2), - (1758, 8), - (1759, 7), - (1760, 7), - (1761, 2), - (1762, 13), - (1763, 113), - (1764, 8), - (1765, 1547), - (1766, 7), - (1767, 21), - (1768, 3), - (1769, 34), - (1770, 5), - (1772, 6), - (1773, 7), - (1774, 12), - (1775, 9), - (1776, 189), - (1777, 25), - (1778, 10), - (1779, 4), - (1780, 1), - (1781, 21), - (1782, 3), - (1783, 186), - (1784, 2), - (1787, 1), - (1788, 10), - (1789, 8), - (1790, 1), - (1791, 34), - (1792, 1), - (1793, 1), - (1794, 1), - (1795, 108), - (1796, 4), - (1797, 1519), - (1798, 9), - (1799, 9), - (1800, 3), - (1801, 6), - (1802, 4), - (1803, 35), - (1804, 15), - (1805, 30), - (1806, 5), - (1807, 7), - (1808, 192), - (1809, 8), - (1811, 4), - (1812, 24), - (1813, 36), - (1814, 4), - (1815, 14), - (1816, 2), - (1817, 2), - (1818, 4), - (1819, 72), - (1820, 3), - (1822, 1), - (1823, 4), - (1825, 1), - (1826, 5), - (1827, 104), - (1828, 1), - (1829, 1494), - (1830, 11), - (1831, 5), - (1832, 2), - (1833, 2), - (1834, 2), - (1835, 4), - (1836, 9), - (1837, 1), - (1838, 14), - (1839, 33), - (1840, 188), - (1841, 27), - (1842, 13), - (1843, 10), - (1844, 28), - (1845, 52), - (1846, 17), - (1847, 40), - (1848, 35), - (1849, 6), - (1850, 6), - (1851, 2), - (1853, 4), - (1854, 6), - (1855, 77), - (1856, 1), - (1859, 106), - (1860, 2), - (1861, 1466), - (1863, 2), - (1866, 1), - (1869, 1), - (1870, 2), - (1872, 179), - (1873, 1), - (1874, 9), - (1875, 29), - (1876, 15), - (1877, 43), - (1878, 2), - (1880, 8), - (1881, 13), - (1882, 18), - (1883, 12), - (1884, 14), - (1885, 18), - (1886, 16), - (1887, 6), - (1888, 2), - (1889, 3), - (1890, 9), - (1891, 196), - (1892, 13), - (1893, 1456), - (1894, 14), - (1895, 8), - (1896, 2), - (1898, 1), - (1899, 17), - (1900, 5), - (1901, 1), - (1904, 175), - (1905, 1), - (1906, 2), - (1907, 3), - (1908, 6), - (1909, 10), - (1910, 3), - (1911, 22), - (1912, 6), - (1913, 22), - (1914, 6), - (1915, 10), - (1916, 5), - (1917, 2), - (1918, 6), - (1919, 4), - (1920, 7), - (1921, 14), - (1922, 4), - (1923, 107), - (1924, 10), - (1925, 1434), - (1926, 7), - (1927, 76), - (1928, 4), - (1929, 7), - (1930, 10), - (1931, 14), - (1932, 6), - (1933, 15), - (1934, 4), - (1935, 2), - (1936, 182), - (1937, 2), - (1939, 11), - (1940, 1), - (1941, 4), - (1942, 2), - (1943, 9), - (1944, 1), - (1947, 24), - (1949, 22), - (1952, 15), - (1953, 14), - (1954, 5), - (1955, 111), - (1956, 11), - (1957, 1435), - (1958, 5), - (1959, 5), - (1960, 10), - (1961, 6), - (1962, 11), - (1963, 95), - (1964, 11), - (1965, 7), - (1966, 7), - (1967, 2), - (1968, 182), - (1969, 6), - (1970, 15), - (1972, 7), - (1973, 11), - (1974, 6), - (1975, 2), - (1976, 6), - (1977, 3), - (1978, 2), - (1983, 24), - (1985, 26), - (1986, 3), - (1987, 109), - (1988, 3), - (1989, 1421), - (1990, 1), - (1991, 3), - (1992, 8), - (1993, 4), - (1994, 6), - (1995, 5), - (1996, 13), - (1997, 6), - (1998, 10), - (1999, 92), - (2000, 181), - (2001, 5), - (2002, 5), - (2003, 1), - (2004, 1), - (2005, 14), - (2006, 12), - (2007, 10), - (2008, 7), - (2009, 9), - (2010, 6), - (2011, 8), - (2012, 13), - (2013, 2), - (2014, 2), - (2018, 1), - (2019, 128), - (2021, 1429), - (2022, 4), - (2026, 2), - (2027, 2), - (2030, 7), - (2032, 175), - (2033, 1), - (2035, 90), - (2036, 3), - (2037, 11), - (2038, 2), - (2039, 4), - (2040, 3), - (2041, 2), - (2042, 1), - (2043, 2), - (2044, 5), - (2045, 1), - (2046, 3), - (2047, 21), - (2048, 5), - (2050, 16), - (2051, 120), - (2053, 1403), - (2054, 4), - (2055, 29), - (2057, 26), - (2058, 3), - (2059, 4), - (2060, 4), - (2061, 7), - (2063, 1), - (2065, 170), - (2066, 3), - (2067, 2), - (2068, 7), - (2069, 13), - (2071, 77), - (2072, 1), - (2075, 4), - (2077, 1), - (2078, 2), - (2079, 5), - (2080, 4), - (2081, 3), - (2082, 3), - (2083, 2), - (2084, 293), - (2085, 6), - (2086, 1395), - (2087, 2), - (2089, 4), - (2090, 10), - (2091, 26), - (2092, 14), - (2093, 25), - (2097, 170), - (2099, 2), - (2100, 1), - (2101, 8), - (2102, 5), - (2104, 2), - (2105, 2), - (2107, 90), - (2108, 1), - (2110, 15), - (2112, 1), - (2113, 1), - (2114, 3), - (2115, 8), - (2116, 3), - (2117, 5), - (2118, 1380), - (2119, 4), - (2120, 1), - (2121, 3), - (2122, 1), - (2123, 6), - (2124, 24), - (2125, 1), - (2127, 33), - (2128, 4), - (2129, 197), - (2132, 1), - (2133, 3), - (2134, 8), - (2141, 1), - (2143, 95), - (2144, 6), - (2146, 1), - (2147, 1), - (2148, 3), - (2150, 1369), - (2152, 1), - (2153, 1), - (2155, 5), - (2156, 7), - (2157, 12), - (2158, 2), - (2159, 6), - (2160, 7), - (2161, 174), - (2162, 22), - (2163, 27), - (2164, 5), - (2165, 24), - (2166, 6), - (2169, 8), - (2170, 2), - (2171, 1), - (2172, 1), - (2174, 8), - (2175, 10), - (2176, 2), - (2177, 3), - (2179, 72), - (2180, 4), - (2181, 1), - (2182, 1366), - (2183, 2), - (2184, 5), - (2185, 4), - (2188, 3), - (2191, 1), - (2192, 2), - (2193, 169), - (2198, 7), - (2199, 27), - (2201, 28), - (2205, 2), - (2206, 2), - (2209, 9), - (2213, 8), - (2214, 1364), - (2215, 95), - (2216, 1), - (2217, 2), - (2218, 1), - (2219, 1), - (2220, 3), - (2221, 2), - (2222, 3), - (2223, 41), - (2225, 168), - (2228, 1), - (2229, 6), - (2230, 8), - (2231, 1), - (2232, 2), - (2233, 6), - (2234, 1), - (2235, 41), - (2236, 2), - (2237, 17), - (2240, 7), - (2242, 6), - (2244, 1), - (2246, 1350), - (2249, 2), - (2250, 4), - (2251, 89), - (2252, 1), - (2257, 167), - (2260, 4), - (2261, 3), - (2262, 6), - (2265, 1), - (2269, 2), - (2270, 4), - (2271, 32), - (2273, 21), - (2274, 1), - (2275, 3), - (2276, 1), - (2277, 2), - (2278, 1344), - (2279, 2), - (2280, 1), - (2281, 1), - (2284, 1), - (2287, 98), - (2288, 2), - (2289, 168), - (2292, 3), - (2293, 3), - (2294, 4), - (2298, 3), - (2303, 9), - (2307, 26), - (2308, 1), - (2309, 30), - (2310, 1344), - (2314, 1), - (2318, 1), - (2321, 164), - (2323, 1), - (2324, 82), - (2325, 1), - (2326, 5), - (2327, 1), - (2334, 6), - (2338, 1), - (2339, 1), - (2340, 1), - (2342, 1337), - (2343, 55), - (2344, 27), - (2345, 6), - (2346, 25), - (2347, 1), - (2348, 18), - (2350, 1), - (2351, 3), - (2352, 2), - (2353, 166), - (2358, 6), - (2360, 87), - (2361, 3), - (2362, 1), - (2373, 9), - (2374, 1330), - (2376, 1), - (2377, 1), - (2378, 11), - (2379, 4), - (2380, 28), - (2382, 29), - (2383, 2), - (2384, 8), - (2385, 169), - (2386, 4), - (2387, 9), - (2388, 8), - (2389, 4), - (2390, 15), - (2392, 1), - (2396, 117), - (2397, 4), - (2399, 1), - (2406, 1330), - (2410, 1), - (2414, 1), - (2415, 4), - (2416, 26), - (2417, 164), - (2418, 31), - (2421, 3), - (2422, 4), - (2424, 6), - (2425, 3), - (2426, 3), - (2427, 5), - (2428, 1), - (2429, 2), - (2432, 100), - (2433, 1), - (2435, 1), - (2436, 1), - (2438, 1328), - (2441, 10), - (2443, 11), - (2448, 2), - (2449, 163), - (2451, 1), - (2452, 27), - (2453, 8), - (2454, 24), - (2455, 1), - (2456, 2), - (2457, 2), - (2460, 4), - (2465, 5), - (2466, 3), - (2468, 95), - (2469, 6), - (2470, 1324), - (2471, 1), - (2472, 1), - (2476, 2), - (2477, 2), - (2478, 2), - (2479, 4), - (2481, 163), - (2484, 2), - (2485, 6), - (2486, 2), - (2488, 23), - (2489, 1), - (2490, 26), - (2491, 1), - (2493, 1), - (2494, 1), - (2495, 3), - (2496, 1), - (2500, 3), - (2502, 1327), - (2503, 1), - (2504, 93), - (2505, 2), - (2506, 1), - (2511, 4), - (2513, 166), - (2516, 3), - (2517, 5), - (2518, 8), - (2519, 2), - (2521, 1), - (2524, 27), - (2526, 20), - (2532, 1), - (2534, 1320), - (2535, 1), - (2540, 114), - (2541, 1), - (2543, 1), - (2545, 163), - (2550, 3), - (2555, 3), - (2557, 4), - (2558, 3), - (2559, 2), - (2560, 26), - (2561, 6), - (2562, 26), - (2564, 5), - (2565, 1), - (2566, 1325), - (2567, 5), - (2568, 9), - (2569, 10), - (2570, 2), - (2571, 1), - (2576, 97), - (2577, 165), - (2582, 3), - (2583, 5), - (2593, 2), - (2596, 42), - (2597, 1), - (2598, 1336), - (2602, 1), - (2609, 163), - (2612, 97), - (2613, 1), - (2614, 2), - (2619, 1), - (2621, 2), - (2624, 2), - (2628, 2), - (2630, 1684946), - (2632, 27), - (2633, 2), - (2634, 25), - (2635, 1), - (2637, 4), - (2639, 1), - (2640, 1), - (2641, 163), - (2644, 1), - (2645, 3), - (2646, 2), - (2648, 112), - (2649, 1), - (2653, 5), - (2659, 3), - (2660, 1), - (2661, 1), - (2662, 1315), - (2664, 1), - (2668, 30), - (2669, 1), - (2670, 26), - (2673, 163), - (2674, 2), - (2675, 1), - (2678, 7), - (2679, 1), - (2680, 1), - (2684, 90), - (2685, 1), - (2686, 1), - (2694, 1315), - (2699, 1), - (2701, 1), - (2704, 30), - (2705, 163), - (2706, 27), - (2710, 2), - (2712, 1), - (2720, 112), - (2721, 2), - (2723, 5), - (2726, 1316), - (2736, 1), - (2737, 165), - (2738, 2), - (2740, 25), - (2742, 33), - (2745, 1), - (2756, 97), - (2757, 1), - (2758, 1315), - (2769, 163), - (2774, 3), - (2776, 32), - (2778, 34), - (2781, 1), - (2782, 1), - (2784, 1), - (2790, 1313), - (2792, 94), - (2793, 12), - (2796, 1), - (2800, 1), - (2801, 163), - (2804, 2), - (2805, 6), - (2806, 2), - (2807, 2), - (2809, 1), - (2810, 1), - (2812, 23), - (2814, 33), - (2815, 3), - (2816, 1), - (2820, 2), - (2821, 1), - (2822, 1314), - (2824, 1), - (2828, 104), - (2829, 1), - (2833, 163), - (2837, 6), - (2838, 4), - (2839, 1), - (2848, 32), - (2849, 4), - (2850, 32), - (2852, 4), - (2853, 1), - (2854, 1312), - (2861, 1), - (2863, 52), - (2864, 111), - (2865, 164), - (2868, 2), - (2869, 15), - (2870, 2), - (2871, 1), - (2884, 30), - (2886, 1333), - (2890, 2), - (2891, 2), - (2892, 3), - (2893, 4), - (2894, 2), - (2897, 163), - (2899, 3), - (2900, 230), - (2901, 1), - (2902, 2), - (2908, 2), - (2911, 1), - (2918, 1312), - (2920, 42), - (2922, 25), - (2923, 1), - (2925, 1), - (2929, 165), - (2930, 2), - (2931, 5), - (2932, 4), - (2933, 8), - (2934, 2), - (2936, 110), - (2937, 1), - (2938, 1), - (2939, 1), - (2948, 1), - (2950, 1313), - (2956, 38), - (2958, 32), - (2961, 163), - (2964, 1), - (2966, 4), - (2967, 2), - (2969, 1), - (2971, 1), - (2972, 151), - (2973, 1), - (2975, 3), - (2976, 4), - (2977, 3), - (2978, 1), - (2979, 1), - (2980, 1), - (2982, 1312), - (2992, 28), - (2993, 163), - (2994, 29), - (2998, 2), - (3006, 1), - (3007, 2), - (3008, 188), - (3009, 2), - (3014, 1311), - (3015, 5), - (3016, 9), - (3017, 1), - (3020, 1), - (3025, 164), - (3028, 27), - (3030, 31), - (3044, 223), - (3045, 1), - (3046, 1311), - (3048, 1), - (3057, 163), - (3061, 2), - (3062, 4), - (3064, 41), - (3066, 35), - (3076, 2), - (3078, 1310), - (3080, 151), - (3081, 2), - (3089, 163), - (3094, 2), - (3100, 35), - (3101, 2), - (3102, 38), - (3104, 2), - (3110, 1310), - (3116, 106), - (3117, 2), - (3121, 163), - (3125, 5), - (3126, 2), - (3132, 2), - (3136, 36), - (3138, 39), - (3140, 2), - (3141, 1), - (3142, 1309), - (3143, 1), - (3144, 1), - (3152, 120), - (3153, 164), - (3155, 1), - (3157, 1), - (3158, 2), - (3163, 1), - (3164, 1), - (3172, 34), - (3174, 1343), - (3185, 163), - (3188, 136), - (3189, 1), - (3190, 2), - (3203, 1), - (3204, 1), - (3206, 1308), - (3208, 53), - (3210, 52), - (3217, 163), - (3220, 38), - (3221, 114), - (3222, 2), - (3224, 141), - (3225, 5), - (3230, 1), - (3236, 38), - (3238, 1308), - (3244, 35), - (3246, 46), - (3249, 163), - (3254, 2), - (3260, 105), - (3261, 4), - (3263, 1), - (3270, 1308), - (3280, 38), - (3281, 163), - (3282, 28), - (3286, 3), - (3292, 1), - (3296, 138), - (3297, 1), - (3301, 1), - (3302, 1308), - (3304, 1), - (3313, 163), - (3316, 33), - (3318, 34), - (3329, 1), - (3331, 1), - (3332, 120), - (3333, 1), - (3334, 1309), - (3345, 163), - (3350, 3), - (3352, 34), - (3354, 31), - (3357, 1), - (3366, 1307), - (3368, 230), - (3369, 6), - (3377, 163), - (3382, 2), - (3388, 37), - (3390, 45), - (3398, 1307), - (3404, 3128), - (3405, 2), - (3409, 163), - (3414, 2), - (3424, 40), - (3426, 23), - (3430, 1307), - (3440, 117), - (3441, 164), - (3446, 2), - (3460, 30), - (3462, 1344), - (3469, 1), - (3473, 163), - (3476, 116), - (3477, 1), - (3478, 3), - (3494, 1305), - (3496, 36), - (3498, 38), - (3501, 2), - (3504, 2), - (3505, 163), - (3510, 2), - (3512, 124), - (3513, 4), - (3515, 1), - (3525, 1), - (3526, 1305), - (3532, 27), - (3534, 33), - (3537, 165), - (3541, 2), - (3542, 2), - (3544, 2), - (3548, 119), - (3549, 1), - (3558, 1305), - (3568, 29), - (3569, 163), - (3570, 53), - (3574, 2), - (3581, 6), - (3584, 115), - (3585, 2), - (3590, 1306), - (3601, 163), - (3604, 39), - (3606, 45), - (3620, 107), - (3621, 1), - (3622, 1304), - (3633, 163), - (3634, 1), - (3637, 1), - (3638, 2), - (3640, 43), - (3642, 35), - (3654, 1305), - (3656, 126), - (3657, 2), - (3661, 1), - (3664, 1), - (3665, 163), - (3670, 3), - (3676, 32), - (3678, 48), - (3679, 1), - (3686, 1303), - (3692, 128), - (3693, 2), - (3697, 163), - (3702, 3), - (3712, 33), - (3714, 28), - (3718, 1302), - (3728, 137), - (3729, 165), - (3734, 2), - (3748, 54), - (3749, 1), - (3750, 1333), - (3758, 1), - (3761, 163), - (3764, 125), - (3765, 2), - (3766, 3), - (3782, 1301), - (3784, 32), - (3786, 50), - (3793, 163), - (3798, 2), - (3800, 123), - (3801, 3), - (3805, 1), - (3814, 1301), - (3820, 53), - (3822, 30), - (3825, 163), - (3830, 2), - (3833, 1), - (3836, 109), - (3837, 3), - (3846, 1301), - (3856, 35), - (3857, 163), - (3858, 54), - (3860, 20), - (3861, 51), - (3862, 2), - (3872, 124), - (3873, 2), - (3876, 17), - (3878, 1302), - (3882, 1), - (3889, 163), - (3892, 45), - (3894, 47), - (3901, 2), - (3903, 1), - (3904, 2), - (3908, 138), - (3909, 2), - (3910, 1300), - (3917, 2), - (3921, 163), - (3926, 2), - (3928, 38), - (3930, 37), - (3942, 1300), - (3944, 137), - (3945, 2), - (3953, 163), - (3958, 2), - (3964, 66), - (3966, 37), - (3971, 1), - (3974, 1300), - (3980, 166), - (3981, 1), - (3985, 163), - (3990, 2), - (4000, 35), - (4002, 54), - (4006, 1300), - (4016, 150), - (4017, 164), - (4021, 38), - (4022, 2), - (4024, 38), - (4036, 47), - (4038, 1347), - (4049, 163), - (4052, 134), - (4053, 10), - (4054, 2), - (4068, 1), - (4070, 1300), - (4072, 52), - (4074, 40), - (4075, 1), - (4081, 163), - (4085, 7), - (4086, 2), - (4088, 123), - (4089, 4), - (4100, 2), - (4102, 1300), - (4108, 38), - (4110, 43), - (4113, 163), - (4118, 2), - (4119, 2), - (4124, 159), - (4125, 3), - (4128, 1), - (4134, 1299), - (4141, 1), - (4144, 51), - (4145, 163), - (4146, 41), - (4150, 2), - (4152, 30), - (4160, 153), - (4161, 1), - (4164, 2), - (4166, 1299), - (4177, 163), - (4180, 225), - (4181, 596), - (4182, 50), - (4187, 1), - (4196, 373), - (4197, 3), - (4198, 1299), - (4209, 163), - (4214, 2), - (4216, 66), - (4217, 3), - (4218, 69), - (4221, 1), - (4230, 1299), - (4232, 158), - (4233, 2), - (4241, 163), - (4246, 2), - (4252, 45), - (4253, 1), - (4254, 48), - (4262, 1300), - (4267, 2), - (4268, 145), - (4269, 3), - (4270, 1), - (4271, 1), - (4273, 163), - (4278, 3), - (4288, 75), - (4290, 36), - (4294, 1298), - (4301, 1), - (4304, 173), - (4305, 166), - (4309, 2), - (4310, 2), - (4324, 52), - (4326, 1359), - (4337, 163), - (4340, 195), - (4341, 2), - (4342, 3), - (4358, 1297), - (4360, 76), - (4362, 56), - (4365, 2), - (4369, 163), - (4374, 2), - (4376, 171), - (4377, 1), - (4390, 1298), - (4396, 52), - (4398, 49), - (4401, 163), - (4406, 3), - (4407, 2), - (4412, 170), - (4413, 2), - (4421, 1), - (4422, 1296), - (4432, 57), - (4433, 163), - (4434, 51), - (4436, 1), - (4438, 2), - (4448, 481), - (4449, 2), - (4451, 1), - (4454, 1295), - (4463, 1), - (4465, 163), - (4468, 74), - (4470, 92), - (4484, 448), - (4485, 3), - (4486, 1295), - (4487, 1), - (4497, 163), - (4502, 2), - (4504, 52), - (4506, 65), - (4518, 1295), - (4519, 2), - (4520, 631), - (4521, 3), - (4529, 164), - (4530, 1), - (4532, 1), - (4533, 3), - (4534, 2), - (4540, 55), - (4542, 48), - (4550, 1294), - (4556, 2358), - (4557, 3), - (4561, 163), - (4562, 1), - (4566, 2), - (4576, 58), - (4578, 74), - (4582, 1294), - (4592, 193), - (4593, 167), - (4598, 2), - (4612, 66), - (4614, 1363), - (4621, 2), - (4625, 163), - (4628, 218), - (4629, 3), - (4630, 2), - (4635, 3), - (4640, 1), - (4645, 1), - (4646, 1295), - (4648, 57), - (4650, 90), - (4657, 163), - (4662, 3), - (4664, 194), - (4665, 1), - (4678, 1295), - (4684, 49), - (4685, 1), - (4686, 85), - (4689, 163), - (4694, 4), - (4700, 183), - (4701, 3), - (4710, 1291), - (4720, 61), - (4721, 163), - (4722, 75), - (4726, 3), - (4736, 175), - (4737, 4), - (4742, 1291), - (4753, 163), - (4756, 84), - (4758, 53), - (4772, 210), - (4773, 4), - (4774, 1291), - (4785, 163), - (4790, 2), - (4792, 54), - (4794, 66), - (4799, 2), - (4806, 1292), - (4808, 180), - (4809, 6), - (4817, 164), - (4820, 32), - (4821, 132), - (4822, 3), - (4824, 17), - (4828, 70), - (4830, 62), - (4836, 42), - (4838, 1290), - (4844, 199), - (4845, 3), - (4849, 163), - (4854, 2), - (4864, 104), - (4866, 98), - (4870, 1290), - (4873, 1), - (4880, 184), - (4881, 164), - (4886, 2), - (4900, 88), - (4902, 1387), - (4909, 1), - (4913, 163), - (4916, 187), - (4917, 6), - (4918, 2), - (4934, 1290), - (4936, 65), - (4938, 59), - (4945, 163), - (4948, 1), - (4950, 2), - (4952, 198), - (4953, 3), - (4966, 1290), - (4972, 64), - (4974, 108), - (4977, 163), - (4982, 2), - (4988, 199), - (4989, 8), - (4998, 1290), - (5008, 82), - (5009, 163), - (5010, 113), - (5012, 3), - (5013, 9), - (5014, 2), - (5017, 1), - (5024, 228), - (5025, 2), - (5028, 4), - (5030, 1290), - (5041, 162), - (5044, 96), - (5046, 71), - (5060, 275), - (5061, 6), - (5062, 1291), - (5064, 1), - (5070, 1), - (5073, 162), - (5078, 3), - (5080, 66), - (5082, 153), - (5094, 1289), - (5096, 272), - (5097, 10), - (5101, 2), - (5104, 2), - (5105, 162), - (5110, 2), - (5116, 87), - (5118, 80), - (5126, 1289), - (5132, 266), - (5133, 5), - (5135, 1), - (5137, 162), - (5140, 190), - (5141, 681), - (5142, 2), - (5152, 104), - (5154, 184), - (5156, 238), - (5158, 1289), - (5168, 257), - (5169, 165), - (5174, 2), - (5188, 99), - (5190, 1435), - (5201, 162), - (5204, 228), - (5205, 6), - (5206, 2), - (5221, 206), - (5222, 1289), - (5224, 312), - (5226, 110), - (5231, 1), - (5233, 162), - (5238, 2), - (5240, 266), - (5241, 7), - (5254, 1289), - (5260, 87), - (5262, 243), - (5265, 162), - (5270, 2), - (5274, 8), - (5276, 318), - (5277, 7), - (5286, 1289), - (5288, 86), - (5296, 88), - (5297, 162), - (5298, 123), - (5302, 3), - (5312, 351), - (5313, 1), - (5318, 1289), - (5329, 162), - (5332, 115), - (5334, 173), - (5339, 6), - (5344, 1), - (5348, 313), - (5349, 3), - (5350, 1289), - (5352, 24), - (5353, 14), - (5361, 162), - (5366, 3), - (5368, 157), - (5370, 107), - (5374, 1), - (5382, 1289), - (5384, 293), - (5385, 4), - (5388, 4), - (5393, 162), - (5396, 1), - (5398, 2), - (5404, 142), - (5406, 201), - (5407, 1), - (5414, 1289), - (5417, 3), - (5420, 285), - (5421, 5), - (5423, 1), - (5425, 162), - (5430, 2), - (5436, 1), - (5440, 142), - (5442, 210), - (5444, 1), - (5446, 1294), - (5456, 318), - (5457, 166), - (5462, 3), - (5476, 123), - (5478, 1608), - (5482, 2), - (5489, 162), - (5492, 329), - (5493, 2), - (5494, 2), - (5504, 1), - (5506, 1), - (5510, 1289), - (5511, 1), - (5512, 165), - (5514, 167), - (5521, 163), - (5522, 1), - (5526, 2), - (5528, 367), - (5529, 8), - (5542, 1289), - (5548, 192), - (5550, 291), - (5553, 162), - (5558, 2), - (5564, 399), - (5565, 13), - (5574, 1289), - (5584, 188), - (5585, 163), - (5586, 356), - (5590, 2), - (5592, 1), - (5599, 1), - (5600, 375), - (5601, 3), - (5606, 1290), - (5608, 1), - (5617, 162), - (5618, 1), - (5620, 261), - (5622, 667), - (5623, 1), - (5626, 1), - (5633, 1), - (5636, 406), - (5637, 4), - (5638, 1289), - (5639, 1), - (5649, 162), - (5654, 2), - (5656, 468), - (5658, 1159), - (5662, 1), - (5670, 1289), - (5671, 1), - (5672, 349), - (5673, 8), - (5675, 1), - (5681, 162), - (5686, 2), - (5692, 321), - (5694, 3067), - (5702, 1289), - (5706, 1), - (5708, 443), - (5709, 7), - (5713, 162), - (5718, 2), - (5728, 496), - (5730, 4577), - (5734, 1289), - (5744, 383), - (5745, 165), - (5750, 3), - (5756, 1), - (5758, 1), - (5764, 5847), - (5766, 8966), - (5775, 1), - (5777, 162), - (5780, 616), - (5781, 240), - (5782, 2), - (5784, 1), - (5788, 1), - (5796, 81), - (5798, 1289), - (5799, 1), - (5800, 5543), - (5802, 13287), - (5809, 162), - (5814, 2), - (5816, 409), - (5817, 3), - (5830, 1289), - (5833, 1), - (5836, 123), - (5838, 59), - (5841, 162), - (5846, 2), - (5852, 480), - (5853, 10), - (5862, 1289), - (5872, 191), - (5873, 162), - (5874, 38), - (5878, 2), - (5888, 616), - (5889, 12), - (5894, 1289), - (5905, 162), - (5908, 139), - (5910, 54), - (5922, 1), - (5924, 675), - (5925, 9), - (5926, 1289), - (5937, 162), - (5942, 2), - (5944, 153), - (5946, 48), - (5958, 1289), - (5960, 614), - (5961, 33), - (5969, 162), - (5974, 2), - (5980, 140), - (5982, 95), - (5990, 1289), - (5996, 628), - (5997, 10), - (6001, 162), - (6006, 2), - (6016, 155), - (6018, 67), - (6021, 42), - (6022, 1289), - (6024, 42), - (6032, 772), - (6033, 177), - (6038, 2), - (6049, 1), - (6052, 109), - (6054, 1340), - (6065, 162), - (6068, 749), - (6069, 11), - (6070, 2), - (6086, 1289), - (6088, 364), - (6090, 49), - (6096, 1), - (6097, 162), - (6102, 2), - (6104, 975), - (6105, 4), - (6106, 1), - (6118, 1289), - (6124, 273), - (6126, 58), - (6129, 162), - (6134, 2), - (6138, 1), - (6140, 1053), - (6141, 13), - (6150, 1289), - (6152, 1), - (6153, 2), - (6160, 372), - (6161, 162), - (6162, 70), - (6164, 1), - (6166, 2), - (6172, 1), - (6176, 1088), - (6177, 96), - (6178, 1), - (6182, 1290), - (6188, 4), - (6193, 162), - (6194, 1), - (6196, 346), - (6198, 101), - (6206, 1), - (6212, 1352), - (6213, 4), - (6214, 1290), - (6219, 2), - (6223, 1), - (6225, 162), - (6230, 1), - (6232, 321), - (6234, 170), - (6246, 1290), - (6248, 1755), - (6249, 4), - (6257, 162), - (6261, 4), - (6262, 1), - (6264, 4), - (6268, 616), - (6270, 141), - (6275, 1), - (6278, 1289), - (6280, 1), - (6281, 1), - (6284, 2516), - (6285, 73), - (6289, 162), - (6294, 1), - (6304, 409), - (6306, 163), - (6310, 1289), - (6314, 2), - (6320, 2276), - (6321, 210), - (6326, 1), - (6340, 445), - (6342, 1437), - (6353, 162), - (6356, 4090), - (6357, 55), - (6358, 1), - (6364, 1), - (6374, 1290), - (6376, 929), - (6378, 270), - (6385, 162), - (6390, 1), - (6392, 6135), - (6393, 16), - (6400, 1), - (6406, 1289), - (6412, 607), - (6414, 386), - (6417, 162), - (6420, 1), - (6421, 238), - (6422, 1), - (6424, 238), - (6428, 15189), - (6429, 227), - (6438, 1289), - (6443, 1), - (6448, 1211), - (6449, 162), - (6450, 1135), - (6453, 2), - (6454, 1), - (6464, 66588), - (6465, 77), - (6470, 1289), - (6474, 31), - (6481, 162), - (6484, 21001), - (6486, 9926), - (6488, 95), - (6498, 1), - (6500, 51017), - (6501, 2547), - (6502, 1289), - (6513, 162), - (6518, 1), - (6520, 11978), - (6522, 2546), - (6534, 1289), - (6536, 1), - (6537, 4), - (6539, 7), - (6545, 162), - (6546, 1), - (6550, 1), - (6553, 27), - (6566, 1289), - (6572, 1), - (6573, 2), - (6574, 1), - (6577, 163), - (6582, 2), - (6587, 1), - (6588, 17), - (6598, 1289), - (6600, 1), - (6603, 1), - (6605, 1), - (6606, 2), - (6608, 1), - (6609, 163), - (6610, 1), - (6614, 1), - (6623, 4), - (6630, 1289), - (6631, 1), - (6633, 1), - (6635, 1), - (6640, 1), - (6641, 162), - (6644, 1), - (6645, 2), - (6646, 2), - (6662, 1289), - (6666, 1), - (6670, 1), - (6673, 162), - (6678, 1), - (6679, 1), - (6680, 1), - (6681, 5), - (6686, 1), - (6694, 1289), - (6705, 162), - (6710, 1), - (6711, 1), - (6714, 1), - (6716, 1), - (6717, 10), - (6726, 1289), - (6734, 1), - (6737, 163), - (6738, 1), - (6740, 2), - (6742, 1), - (6752, 1), - (6753, 1), - (6757, 1), - (6758, 1289), - (6769, 162), - (6770, 1), - (6774, 1), - (6775, 1), - (6788, 1), - (6789, 3), - (6790, 1289), - (6797, 1), - (6801, 162), - (6802, 1), - (6803, 1), - (6806, 1), - (6818, 1), - (6819, 1), - (6822, 1289), - (6824, 1), - (6825, 5), - (6833, 162), - (6834, 1), - (6837, 1), - (6838, 1), - (6844, 2), - (6854, 1289), - (6860, 1), - (6861, 5), - (6865, 163), - (6869, 1), - (6870, 1), - (6872, 1), - (6875, 1), - (6881, 3), - (6886, 1289), - (6896, 1), - (6897, 166), - (6902, 1), - (6915, 1), - (6918, 1289), - (6929, 162), - (6932, 2), - (6933, 1), - (6934, 1), - (6947, 1), - (6950, 1290), - (6961, 162), - (6966, 1), - (6969, 2), - (6982, 1289), - (6993, 162), - (6998, 1), - (7004, 1), - (7005, 1), - (7014, 1289), - (7025, 162), - (7030, 1), - (7032, 1), - (7034, 1), - (7040, 1), - (7041, 1), - (7046, 1289), - (7057, 162), - (7058, 1), - (7059, 1), - (7062, 1), - (7070, 1), - (7076, 1), - (7077, 3), - (7078, 1289), - (7084, 1), - (7089, 162), - (7094, 1), - (7110, 1289), - (7112, 1), - (7113, 5), - (7121, 162), - (7124, 1), - (7126, 1), - (7133, 1), - (7142, 1289), - (7148, 1), - (7149, 12), - (7153, 162), - (7158, 1), - (7174, 1289), - (7184, 1), - (7185, 170), - (7190, 1), - (7206, 1289), - (7217, 162), - (7220, 1), - (7221, 82), - (7222, 1), - (7224, 81), - (7229, 1), - (7237, 1), - (7238, 1289), - (7242, 1), - (7243, 1), - (7248, 1), - (7249, 162), - (7254, 1), - (7256, 1), - (7257, 1), - (7266, 4), - (7270, 1289), - (7274, 13), - (7280, 20), - (7281, 162), - (7286, 1), - (7288, 12), - (7292, 1), - (7293, 5), - (7296, 1), - (7302, 1289), - (7308, 1), - (7313, 162), - (7315, 1), - (7318, 1), - (7328, 1), - (7329, 1), - (7334, 1290), - (7345, 162), - (7349, 1), - (7350, 1), - (7353, 1), - (7364, 1), - (7365, 1), - (7366, 1290), - (7377, 162), - (7382, 1), - (7392, 1), - (7398, 1289), - (7400, 1), - (7401, 4), - (7406, 1), - (7409, 162), - (7411, 1), - (7414, 1), - (7430, 1289), - (7431, 3), - (7436, 1), - (7437, 2), - (7441, 162), - (7445, 5), - (7446, 1), - (7448, 1), - (7460, 1), - (7462, 1289), - (7472, 1), - (7473, 166), - (7474, 1), - (7478, 1), - (7494, 1289), - (7505, 162), - (7508, 3), - (7509, 2), - (7510, 2), - (7525, 1), - (7526, 1289), - (7532, 1), - (7537, 162), - (7542, 1), - (7544, 1), - (7545, 9), - (7546, 1), - (7558, 1289), - (7569, 162), - (7574, 1), - (7580, 1), - (7581, 6), - (7590, 1289), - (7601, 162), - (7606, 1), - (7616, 1), - (7617, 6), - (7622, 1289), - (7623, 1), - (7625, 1), - (7633, 162), - (7638, 1), - (7652, 1), - (7653, 11), - (7654, 1289), - (7657, 1), - (7665, 162), - (7670, 1), - (7686, 1289), - (7688, 1), - (7689, 1), - (7697, 162), - (7702, 1), - (7708, 1), - (7715, 1), - (7717, 2), - (7718, 1289), - (7724, 1), - (7725, 3), - (7729, 162), - (7734, 1), - (7746, 1), - (7750, 1289), - (7760, 1), - (7761, 167), - (7766, 1), - (7782, 1289), - (7793, 162), - (7794, 1), - (7796, 1), - (7797, 1), - (7798, 1), - (7814, 1289), - (7820, 1), - (7825, 162), - (7826, 1), - (7830, 1), - (7832, 1), - (7833, 14), - (7842, 1), - (7846, 1289), - (7857, 162), - (7862, 1), - (7863, 1), - (7868, 1), - (7869, 4), - (7878, 1289), - (7885, 1), - (7889, 162), - (7894, 1), - (7904, 1), - (7905, 2), - (7910, 1289), - (7921, 162), - (7926, 1), - (7929, 1), - (7940, 1), - (7941, 2), - (7942, 1289), - (7953, 162), - (7958, 1), - (7963, 1), - (7973, 1), - (7974, 1289), - (7976, 1), - (7977, 16), - (7985, 162), - (7989, 1), - (7990, 1), - (7991, 1), - (7997, 1), - (8000, 1), - (8006, 1289), - (8012, 1), - (8013, 14), - (8017, 162), - (8022, 1), - (8038, 1289), - (8048, 1), - (8049, 185), - (8054, 2), - (8070, 1289), - (8081, 162), - (8084, 1), - (8085, 24), - (8086, 1), - (8102, 1289), - (8113, 162), - (8118, 1), - (8119, 1), - (8120, 1), - (8121, 1), - (8126, 1), - (8134, 1289), - (8140, 1), - (8145, 162), - (8150, 1), - (8157, 20), - (8166, 1289), - (8177, 162), - (8182, 1), - (8192, 1), - (8193, 1), - (8198, 1289), - (8209, 162), - (8214, 1), - (8228, 1), - (8229, 32), - (8230, 1290), - (8246, 1), - (8264, 1), - (8265, 27), - (8269, 1), - (8276, 1), - (8282, 1), - (8300, 1), - (8301, 133), - (8336, 2), - (8337, 60), - (8348, 3), - (8356, 1), - (8358, 1), - (8372, 1), - (8373, 196), - (8408, 1), - (8444, 1), - (8468, 1), - (8480, 1), - (8499, 1), - (8516, 1), - (8552, 1), - (8555, 1), - (8588, 1), - (8624, 1), - (8660, 3), - (8675, 1), - (8696, 1), - (8704, 1), - (8724, 1), - (8732, 1), - (8768, 1), - (8779, 1), - (8804, 1), - (8840, 1), - (8852, 2), - (8876, 1), - (8912, 1), - (8948, 1), - (8984, 1), - (9020, 1), - (9128, 1), - (9164, 1), - (9192, 1), - (9200, 2), - (9236, 1), - (9272, 1), - (9308, 1), - (9344, 1), - (9380, 1), - (9416, 1), - (9452, 1), - (9524, 1), - (9560, 1), - (9589, 1), - (9632, 1), - (9642, 1), - (9704, 1), - (9776, 1), - (9848, 1), - (9992, 1), - (10064, 1), - (10100, 1), - (10136, 1), - (10172, 1), - (10208, 1), - (10244, 1), - (10280, 1), - (10316, 1), - (10388, 1), - (10532, 1), - (10572, 1), - (10620, 1), - (10640, 1), - (10669, 1), - (10748, 1), - (10856, 1), - (10964, 1), - (11067, 1), - (11072, 1), - (11180, 1), - (11216, 1), - (11252, 1), - (11288, 1), - (11324, 1), - (11348, 2), - (11360, 1), - (11396, 1), - (11432, 1), - (11468, 1), - (11504, 1), - (11540, 1), - (11576, 1), - (11612, 1), - (11648, 1), - (11756, 1), - (11792, 1), - (11828, 1), - (11864, 1), - (11936, 1), - (12008, 1), - (12080, 1), - (12152, 1), - (12188, 1), - (12224, 1), - (12260, 1), - (12296, 1), - (12332, 1), - (12360, 1), - (12368, 1), - (12404, 1), - (12440, 1), - (12476, 1), - (12501, 2), - (12512, 1), - (12548, 1), - (12584, 1), - (12620, 1), - (12656, 1), - (12693, 1), - (12728, 1), - (12885, 1), - (13123, 1), - (13269, 1), - (13461, 1), - (13653, 1), - (13664, 1), - (13740, 1), - (13872, 1), - (13946, 1), - (14109, 1), - (14613, 2), - (14805, 2), - (14945, 1), - (14997, 1), - (15176, 1), - (15276, 1), - (15384, 1), - (15492, 1), - (15600, 1), - (15708, 1), - (15716, 1), - (15765, 1), - (15816, 1), - (15924, 1), - (16068, 1), - (16104, 1), - (16140, 1), - (16176, 1), - (16212, 1), - (16248, 1), - (16284, 1), - (16320, 1), - (16356, 1), - (16392, 1), - (16430, 1), - (16468, 1), - (16504, 1), - (16540, 1), - (16727, 2), - (16728, 1), - (16919, 2), - (16921, 1), - (16938, 1), - (17111, 6), - (17413, 1), - (17430, 1), - (17495, 1), - (17880, 1), - (18647, 2), - (18672, 1), - (19223, 38), - (19680, 1), - (20436, 1), - (21156, 1), - (21732, 1), - (22380, 1), - (22992, 1), - (23063, 17), - (23244, 1), - (23532, 1), - (23892, 1), - (24108, 1), - (24215, 1), - (24324, 1), - (24407, 2), - (24504, 1), - (24720, 1), - (24900, 1), - (24983, 205), - (25440, 1), - (25620, 1), - (26088, 1), - (26268, 1), - (26448, 1), - (26664, 1), - (26988, 1), - (27276, 1), - (27492, 1), - (27744, 1), - (28032, 1), - (28284, 1), - (28536, 1), - (28823, 42), - (28896, 1), - (29184, 1), - (29292, 1), - (29400, 1), - (29796, 1), - (29975, 4), - (30156, 1), - (30228, 1), - (30743, 238), - (30768, 1), - (31056, 1), - (31092, 1), - (31416, 1), - (32100, 1), - (32712, 1), - (33144, 1), - (33324, 1), - (33792, 1), - (34008, 1), - (34440, 1), - (34583, 81), - (34656, 1), - (34872, 1), - (34944, 1), - (35160, 1), - (35304, 1), - (35376, 1), - (35412, 1), - (35556, 1), - (35628, 1), - (35664, 1), - (35808, 1), - (36204, 1), - (36744, 1), - (37788, 1), - (39372, 1), - (40956, 1), - (41640, 1), - (41892, 1), - (42144, 1), - (42576, 1), - (42936, 1), - (43476, 1), - (45096, 1), - (47256, 1), - (47760, 1), - (47796, 1), - (47868, 1), - (48228, 1), - (48948, 1), - (49128, 1), - (49452, 1), - (49560, 1), - (49668, 1), - (49776, 1), - (50352, 1), - (50964, 1), - (52008, 1), - (53880, 1), - (55284, 1), - (55860, 1), - (56040, 1), - (56400, 1), - (56904, 1), - (57444, 1), - (59424, 1), - (60156, 1), - (60626, 1), - (60641, 1), - (61260, 1), - (62520, 1), - (64392, 1), - (65976, 1), - (67308, 1), - (68064, 1), - (68748, 1), - (69216, 1), - (69504, 1), - (69648, 1), - (69684, 1), - (69720, 1), - (69756, 1), - (69792, 1), - (69828, 1), - (70224, 1), - (70620, 1), - (71016, 1), - (71412, 1), - (71772, 1), - (71952, 1), - (72024, 1), - (72096, 1), - (72168, 1), - (72240, 1), - (72312, 1), - (72348, 1), - (72420, 1), - (72492, 1), - (72600, 1), - (72672, 1), - (72780, 1), - (72996, 1), - (73320, 1), - (73356, 1), - (73500, 1), - (73536, 1), - (73572, 1), - (73608, 1), - (73680, 1), - (73716, 1), - (73788, 1), - (73896, 1), - (74040, 1), - (74112, 1), - (74170, 1), - (74184, 1), - (74185, 1), - (74220, 1), - (74256, 1), - (74292, 1), - (74328, 1), - (74364, 1), - (74400, 1), - (74436, 1), - (74472, 1), - (74616, 1), - (74976, 1), - (75156, 1), - (75228, 1), - (75336, 1), - (75408, 1), - (75588, 1), - (75696, 1), - (75804, 1), - (75984, 1), - (76056, 1), - (76164, 1), - (76308, 1), - (76452, 1), - (76560, 1), - (76776, 1), - (76920, 1), - (77064, 1), - (77208, 1), - (77316, 1), - (77532, 1), - (77676, 1), - (77748, 1), - (77820, 1), - (77928, 1), - (78000, 1), - (78036, 1), - (78072, 1), - (78108, 1), - (78180, 1), - (78324, 1), - (78396, 1), - (78576, 1), - (78684, 1), - (78828, 1), - (78864, 1), - (78900, 1), - (78972, 1), - (79080, 1), - (79116, 1), - (79152, 1), - (79512, 1), - (79872, 1), - (80268, 1), - (80592, 1), - (80700, 1), - (80916, 1), - (81168, 1), - (81276, 1), - (81528, 1), - (81708, 1), - (81816, 1), - (81888, 1), - (82068, 1), - (82176, 1), - (82284, 1), - (82356, 1), - (82716, 1), - (83004, 1), - (83312, 1), - (83436, 1), - (83688, 1), - (83904, 1), - (84012, 1), - (84408, 1), - (84660, 1), - (85056, 1), - (85488, 1), - (85776, 1), - (85992, 1), - (86172, 1), - (86424, 1), - (86615, 1), - (86640, 1), - (86928, 1), - (87072, 1), - (87288, 1), - (87576, 1), - (87684, 1), - (87756, 1), - (87972, 1), - (88044, 1), - (88152, 1), - (88368, 1), - (88728, 1), - (88836, 1), - (88944, 1), - (89088, 1), - (89448, 1), - (89592, 1), - (89700, 1), - (89808, 1), - (89952, 1), - (90060, 1), - (90204, 1), - (90348, 1), - (90528, 1), - (90636, 1), - (90744, 1), - (90816, 1), - (91032, 1), - (91068, 1), - (91140, 1), - (91212, 1), - (91284, 1), - (91860, 1), - (92112, 1), - (92292, 1), - (92400, 1), - (92544, 1), - (92652, 1), - (92796, 1), - (92904, 1), - (92976, 1), - (93192, 1), - (93300, 1), - (93444, 1), - (93516, 1), - (93624, 1), - (93696, 1), - (93840, 1), - (93984, 1), - (94056, 1), - (94128, 1), - (94164, 1), - (94200, 1), - (94236, 1), - (94272, 1), - (94344, 1), - (94452, 1), - (94524, 1), - (94596, 1), - (94704, 1), - (94776, 1), - (94884, 1), - (94956, 1), - (95172, 1), - (95244, 1), - (95280, 1), - (95316, 1), - (95352, 1), - (95388, 1), - (95424, 1), - (95460, 1), - (95496, 1), - (95604, 1), - (95676, 1), - (95784, 1), - (95856, 1), - (95928, 1), - (96000, 1), - (96036, 1), - (96072, 1), - (96108, 1), - (96144, 1), - (96180, 1), - (96216, 1), - (96288, 1), - (96576, 1), - (98029, 1), - (98304, 1), - (98527, 1), - (98628, 1), - (99276, 1), - (99528, 1), - (99780, 1), - (99996, 1), - (100212, 1), - (100428, 1), - (100680, 1), - (100752, 1), - (100788, 1), - (100860, 1), - (100932, 1), - (101004, 1), - (101076, 1), - (101148, 1), - (101220, 1), - (101256, 1), - (101328, 1), - (101364, 1), - (101400, 1), - (101436, 1), - (101472, 1), - (101508, 1), - (101544, 1), - (101616, 1), - (101652, 1), - (101724, 1), - (101832, 1), - (101904, 1), - (101940, 1), - (101976, 1), - (102012, 1), - (102048, 1), - (102084, 1), - (102120, 1), - (102264, 1), - (102516, 1), - (102588, 1), - (102624, 1), - (102660, 1), - (102696, 1), - (102732, 1), - (102768, 1), - (102804, 1), - (102840, 1), - (102876, 1), - (102912, 1), - (102948, 1), - (102984, 1), - (103056, 1), - (103092, 1), - (103128, 1), - (103164, 1), - (103200, 1), - (103236, 1), - (103272, 1), - (103308, 1), - (103344, 1), - (103380, 1), - (103452, 1), - (103560, 1), - (103596, 1), - (103632, 1), - (103668, 1), - (103704, 1), - (103740, 1), - (103776, 1), - (103848, 1), - (103920, 1), - (103956, 1), - (104028, 1), - (104100, 1), - (104136, 1), - (104208, 1), - (104244, 1), - (104316, 1), - (104352, 1), - (104388, 1), - (104424, 1), - (104460, 1), - (104496, 1), - (104532, 1), - (104568, 1), - (104604, 1), - (104676, 1), - (104712, 1), - (104748, 1), - (104784, 1), - (104820, 1), - (104856, 1), - (104892, 1), - (104928, 1), - (104964, 1), - (105000, 1), - (105036, 1), - (105072, 1), - (105108, 1), - (105216, 1), - (105324, 1), - (105360, 1), - (105396, 1), - (105432, 1), - (105468, 1), - (105504, 1), - (105540, 1), - (105576, 1), - (105612, 1), - (105648, 1), - (105684, 1), - (105720, 1), - (105756, 1), - (105792, 1), - (105828, 1), - (105864, 1), - (105900, 1), - (105936, 1), - (110580, 1), - (115224, 1), - (118788, 1), - (121056, 1), - (121452, 1), - (121848, 1), - (122244, 1), - (122604, 1), - (122928, 1), - (123252, 1), - (123288, 1), - (123360, 1), - (123432, 1), - (123468, 1), - (123504, 1), - (123540, 1), - (123612, 1), - (123684, 1), - (123756, 1), - (123828, 1), - (123900, 1), - (123972, 1), - (124080, 1), - (124188, 1), - (124296, 1), - (124404, 1), - (124548, 1), - (124764, 1), - (124872, 1), - (124980, 1), - (125088, 1), - (125196, 1), - (125304, 1), - (125412, 1), - (125448, 1), - (125520, 1), - (125628, 1), - (125700, 1), - (125772, 1), - (125844, 1), - (125880, 1), - (125916, 1), - (125952, 1), - (125988, 1), - (126024, 1), - (126060, 1), - (126096, 1), - (126168, 1), - (126276, 1), - (126312, 1), - (126348, 1), - (126420, 1), - (126492, 1), - (126564, 1), - (126636, 1), - (126708, 1), - (126780, 1), - (126852, 1), - (126960, 1), - (127068, 1), - (127176, 1), - (127212, 1), - (127248, 1), - (127284, 1), - (127320, 1), - (127356, 1), - (127392, 1), - (127464, 1), - (127536, 1), - (127608, 1), - (127644, 1), - (127680, 1), - (127716, 1), - (127788, 1), - (127860, 1), - (127932, 1), - (128004, 1), - (128076, 1), - (128148, 1), - (128220, 1), - (128256, 1), - (128292, 1), - (128328, 1), - (128364, 1), - (128400, 1), - (128436, 1), - (128472, 1), - (128508, 1), - (128544, 1), - (128580, 1), - (128616, 1), - (128652, 1), - (128688, 1), - (128724, 1), - (128760, 1), - (128832, 1), - (128904, 1), - (128976, 1), - (129048, 1), - (129120, 1), - (129192, 1), - (129228, 1), - (129264, 1), - (129300, 1), - (129372, 1), - (129408, 1), - (129444, 1), - (129480, 1), - (129516, 1), - (129552, 1), - (129588, 1), - (129660, 1), - (129732, 1), - (129768, 1), - (129804, 1), - (129840, 1), - (129876, 1), - (129912, 1), - (129948, 1), - (129984, 1), - (130020, 1), - (130056, 1), - (130128, 1), - (130200, 1), - (130236, 1), - (130272, 1), - (130308, 1), - (130380, 1), - (130452, 1), - (130524, 1), - (130560, 1), - (130596, 1), - (130632, 1), - (130668, 1), - (130704, 1), - (130776, 1), - (130812, 1), - (130848, 1), - (130920, 1), - (130992, 1), - (131064, 1), - (131136, 1), - (131172, 1), - (131208, 1), - (131244, 1), - (131316, 1), - (131388, 1), - (131424, 1), - (131532, 1), - (131640, 1), - (131784, 1), - (131892, 1), - (131964, 1), - (132036, 1), - (132108, 1), - (132180, 1), - (132252, 1), - (132324, 1), - (132360, 1), - (132432, 1), - (132504, 1), - (132576, 1), - (132684, 1), - (132792, 1), - (132900, 1), - (132972, 1), - (133044, 1), - (133116, 1), - (133188, 1), - (133260, 1), - (133332, 1), - (133368, 1), - (133404, 1), - (133440, 1), - (133476, 1), - (133512, 1), - (133548, 1), - (133620, 1), - (133692, 1), - (133764, 1), - (133836, 1), - (133908, 1), - (133980, 1), - (134016, 1), - (134052, 1), - (134088, 1), - (134124, 1), - (134160, 1), - (134196, 1), - (134232, 1), - (134268, 1), - (134304, 1), - (134340, 1), - (134376, 1), - (134412, 1), - (134484, 1), - (134592, 1), - (134700, 1), - (134808, 1), - (134916, 1), - (134988, 1), - (135024, 1), - (135060, 1), - (135096, 1), - (135132, 1), - (135168, 1), - (135204, 1), - (135240, 1), - (135276, 1), - (135312, 1), - (135348, 1), - (135384, 1), - (135456, 1), - (135492, 1), - (135528, 1), - (135564, 1), - (135600, 1), - (135636, 1), - (135672, 1), - (135708, 1), - (135744, 1), - (135780, 1), - (135816, 1), - (135852, 1), - (135888, 1), - (135924, 1), - (135960, 1), - (135996, 1), - (136032, 1), - (136068, 1), - (136140, 1), - (136212, 1), - (136284, 1), - (136356, 1), - (136428, 1), - (136500, 1), - (136572, 1), - (136608, 1), - (136644, 1), - (136680, 1), - (136716, 1), - (136752, 1), - (136788, 1), - (136824, 1), - (136860, 1), - (136896, 1), - (136932, 1), - (136968, 1), - (137004, 1), - (137040, 1), - (137076, 1), - (137112, 1), - (137148, 1), - (137184, 1), - (137256, 1), - (137328, 1), - (137400, 1), - (137472, 1), - (137544, 1), - (137580, 1), - (137616, 1), - (137652, 1), - (137688, 1), - (137724, 1), - (137796, 1), - (137832, 1), - (137868, 1), - (137904, 1), - (137940, 1), - (137976, 1), - (138012, 1), - (138048, 1), - (138084, 1), - (138120, 1), - (138228, 1), - (138300, 1), - (138336, 1), - (138372, 1), - (138408, 1), - (138444, 1), - (138480, 1), - (138516, 1), - (138552, 1), - (138588, 1), - (138624, 1), - (138696, 1), - (138768, 1), - (138840, 1), - (138912, 1), - (138948, 1), - (138984, 1), - (139020, 1), - (139056, 1), - (139092, 1), - (139128, 1), - (139164, 1), - (139200, 1), - (139272, 1), - (139308, 1), - (139380, 1), - (139452, 1), - (139488, 1), - (139524, 1), - (139596, 1), - (139632, 1), - (139668, 1), - (139704, 1), - (139740, 1), - (139776, 1), - (139848, 1), - (139884, 1), - (139920, 1), - (139956, 1), - (139992, 1), - (140028, 1), - (140064, 1), - (140136, 1), - (140172, 1), - (140208, 1), - (140244, 1), - (140280, 1), - (140316, 1), - (140352, 1), - (140424, 1), - (140460, 1), - (140496, 1), - (140532, 1), - (140604, 1), - (140640, 1), - (140676, 1), - (140712, 1), - (140748, 1), - (140784, 1), - (140820, 1), - (140856, 1), - (140928, 1), - (141036, 1), - (141072, 1), - (141108, 1), - (141144, 1), - (141180, 1), - (141216, 1), - (141252, 1), - (141324, 1), - (141396, 1), - (141432, 1), - (141468, 1), - (141504, 1), - (141612, 1), - (142152, 1), - (142188, 1), - (142260, 1), - (142296, 1), - (142800, 1), - (143304, 1), - (143376, 1), - (143448, 1), - (143520, 1), - (143592, 1), - (143664, 1), - (143700, 1), - (143736, 1), - (143772, 1), - (143808, 1), - (143844, 1), - (143880, 1), - (143952, 1), - (144096, 1), - (144240, 1), - (144348, 1), - (144456, 1), - (144564, 1), - (144672, 1), - (144708, 1), - (144744, 1), - (144780, 1), - (144816, 1), - (144852, 1), - (144888, 1), - (144924, 1), - (144960, 1), - (144996, 1), - (145032, 1), - (145068, 1), - (145104, 1), - (145140, 1), - (145176, 1), - (145212, 1), - (145248, 1), - (145284, 1), - (145320, 1), - (145356, 1), - (145392, 1), - (145464, 1), - (145500, 1), - (145536, 1), - (145572, 1), - (145644, 1), - (145716, 1), - (145752, 1), - (145788, 1), - (145824, 1), - (145860, 1), - (145896, 1), - (145932, 1), - (145968, 1), - (146004, 1), - (146040, 1), - (146076, 1), - (146112, 1), - (146148, 1), - (146184, 1), - (146220, 1), - (146256, 1), - (146292, 1), - (146328, 1), - (146364, 1), - (146400, 1), - (146436, 1), - (146472, 1), - (146508, 1), - (146544, 1), - (146580, 1), - (146616, 1), - (146652, 1), - (146688, 1), - (146724, 1), - (146760, 1), - (146796, 1), - (146832, 1), - (146868, 1), - (146940, 1), - (146976, 1), - (147012, 1), - (147048, 1), - (147084, 1), - (147120, 1), - (147156, 1), - (147192, 1), - (147228, 1), - (147264, 1), - (147300, 1), - (147336, 1), - (147372, 1), - (147408, 1), - (147444, 1), - (147480, 1), - (147516, 1), - (147552, 1), - (147588, 1), - (147624, 1), - (147660, 1), - (147732, 1), - (147768, 1), - (147804, 1), - (147840, 1), - (147876, 1), - (147912, 1), - (147948, 1), - (147984, 1), - (148020, 1), - (148056, 1), - (148092, 1), - (148128, 1), - (148164, 1), - (148200, 1), - (148236, 1), - (148272, 1), - (1070556, 1), - (1079378, 1), - (1085421, 1), - (1086835, 1), - (1121118, 1), - (1121208, 1), - (1124515, 1), - (1128287, 1), - (1128379, 1), - (1153308, 1), - (1153342, 4), - (1153344, 5), - (1153398, 1), - (1153571, 1), - (1153663, 1), - (1153670, 1), - (1153672, 3), - (1153688, 3), - (1154504, 1), - (1154538, 5), - (1154540, 6), - (1154596, 1), - (1164963, 1), - (1165053, 1), - (1166494, 1), - (1166586, 1), - (1175528, 1), - (1175636, 1), - (1177016, 1), - (1193653, 1), - (1193743, 1), - (1205060, 1), - (1205152, 1), - (1323322, 1), - (1323414, 1), - (1336354, 1), - (1336444, 1), - (1348925, 1), - (1349015, 1), - (1353326, 1), - (1353418, 1), - (1426757, 1), - (1426845, 1), - (1426847, 1), - (1426937, 1), - (1476463, 1), - (1476553, 1), - (1516580, 1), - (1516670, 1), - (1605731, 1), - (1605821, 1), -]; \ No newline at end of file +pub const KUSAMA_STATE_DISTRIBUTION: &'static [(u32, u32)] = &[ + (32, 35), + (33, 20035), + (34, 5369), + (35, 184), + (36, 54599), + (37, 1515056), + (38, 885), + (39, 69965), + (41, 210754), + (42, 467), + (43, 3241), + (44, 32660), + (45, 231141), + (46, 220016), + (47, 248931), + (48, 157232), + (49, 143236), + (50, 2428), + (51, 1476159), + (52, 31), + (53, 112), + (54, 711), + (55, 1934), + (56, 39), + (57, 407), + (58, 6929), + (59, 6568), + (60, 26), + (61, 268673), + (62, 118137), + (63, 84640), + (64, 193232), + (65, 2584210), + (66, 1002), + (67, 2993), + (68, 4266), + (69, 5633381), + (70, 277369), + (71, 5106), + (72, 722), + (73, 1882), + (74, 8178), + (75, 4045), + (76, 1596), + (77, 5335), + (78, 14591), + (79, 9645), + (80, 44171), + (81, 13474), + (82, 51090), + (83, 2595), + (84, 6276), + (85, 382195), + (86, 1062), + (87, 3846), + (88, 5663), + (89, 3811), + (90, 1580), + (91, 5729), + (92, 19144), + (93, 197), + (94, 235), + (95, 545), + (96, 54914), + (97, 3858), + (98, 1610), + (99, 635), + (100, 2481), + (101, 6457), + (102, 3753951), + (103, 11821), + (104, 11114), + (105, 2601), + (106, 2518), + (107, 521925), + (108, 297), + (109, 411), + (110, 668), + (111, 4500), + (112, 704), + (113, 316), + (114, 59), + (115, 291), + (116, 1727), + (117, 6010), + (118, 51874), + (119, 13969), + (120, 9496), + (121, 274), + (122, 810), + (123, 643), + (124, 69), + (125, 41), + (126, 329), + (127, 175435), + (128, 2641), + (129, 2658), + (130, 415277), + (131, 2705), + (132, 2314), + (133, 4290), + (134, 693), + (135, 1957478), + (136, 1111), + (137, 1474503), + (138, 3656), + (139, 940), + (140, 1755692), + (141, 61), + (142, 4140), + (143, 47), + (144, 6725), + (145, 610), + (146, 250), + (147, 48), + (148, 28), + (149, 132), + (150, 123489), + (151, 7476), + (152, 55), + (153, 68), + (154, 170), + (155, 566), + (156, 8110), + (157, 1243), + (158, 1445), + (159, 2569), + (160, 1096), + (161, 865), + (162, 634), + (163, 372411), + (164, 685), + (165, 3481), + (166, 1467), + (167, 2146), + (168, 556539), + (169, 566), + (170, 5080), + (171, 202), + (172, 123), + (173, 100750), + (174, 667), + (175, 433), + (176, 737), + (177, 315), + (178, 317), + (179, 656), + (180, 2522), + (181, 315), + (182, 406), + (183, 4680), + (184, 4941), + (185, 828), + (186, 782), + (187, 565), + (188, 584), + (189, 376), + (190, 321), + (191, 418), + (192, 167), + (193, 362), + (194, 2198), + (195, 180), + (196, 787), + (197, 2680), + (198, 501), + (199, 843), + (200, 287), + (201, 608362), + (202, 1157), + (203, 959), + (204, 1683623), + (205, 440), + (206, 756), + (207, 812), + (208, 1147), + (209, 723), + (210, 856), + (211, 496), + (212, 916), + (213, 615), + (214, 488), + (215, 522), + (216, 8265), + (217, 32574), + (218, 417), + (219, 247), + (220, 579), + (221, 68), + (222, 126), + (223, 306), + (224, 310), + (225, 24), + (226, 37), + (227, 160), + (228, 11), + (229, 3288), + (230, 349), + (231, 23), + (232, 14), + (233, 45), + (234, 452840), + (235, 118), + (236, 741), + (237, 390), + (238, 517), + (239, 694), + (240, 765), + (241, 542), + (242, 417), + (243, 617), + (244, 1307), + (245, 583), + (246, 1640), + (247, 735), + (248, 478), + (249, 4312), + (250, 5426), + (251, 1067), + (252, 435), + (253, 202), + (254, 122), + (255, 486), + (256, 180), + (257, 279), + (258, 406), + (259, 160), + (260, 2759), + (261, 2600), + (262, 686), + (263, 95), + (264, 164), + (265, 150), + (266, 1013), + (267, 552618), + (268, 217), + (269, 188), + (270, 284), + (271, 416), + (272, 453), + (273, 95), + (274, 42), + (275, 68), + (276, 90), + (277, 123), + (278, 340), + (279, 98), + (280, 2795), + (281, 261), + (282, 7370), + (283, 5768), + (284, 3285), + (285, 461), + (286, 363), + (287, 456), + (288, 1475), + (289, 211), + (290, 153), + (291, 282), + (292, 241), + (293, 2924), + (294, 261), + (295, 1070), + (296, 1301), + (297, 688), + (298, 592), + (299, 95), + (300, 686447), + (301, 42), + (302, 385), + (303, 24), + (304, 931), + (305, 49), + (306, 23), + (307, 67), + (308, 32), + (309, 38), + (310, 2), + (311, 7), + (312, 198), + (313, 11), + (314, 38), + (315, 3704), + (316, 7406), + (317, 116), + (318, 229), + (319, 100), + (320, 437), + (321, 244), + (322, 285), + (323, 433), + (324, 382), + (325, 3171), + (326, 761), + (327, 324), + (328, 2264), + (329, 340), + (330, 353), + (331, 110), + (332, 403), + (333, 731366), + (334, 223), + (335, 350), + (336, 600), + (337, 219), + (338, 112), + (339, 10), + (340, 761), + (341, 35), + (342, 99), + (343, 83), + (344, 136), + (345, 7), + (346, 836), + (347, 11), + (348, 10832), + (349, 8931), + (350, 33), + (351, 64), + (352, 66), + (353, 54), + (354, 78), + (355, 198), + (356, 722), + (357, 2647), + (358, 64), + (359, 71), + (360, 2242), + (361, 1462), + (362, 505), + (363, 444), + (364, 597), + (365, 372), + (366, 664852), + (367, 464), + (368, 605), + (369, 123), + (370, 64), + (371, 117), + (372, 328), + (373, 123), + (374, 227), + (375, 151), + (376, 881), + (377, 111), + (378, 30), + (379, 73), + (380, 2126), + (381, 3662), + (382, 9107), + (383, 18), + (384, 294), + (385, 12), + (386, 262), + (387, 127), + (388, 269), + (389, 2566), + (390, 14), + (391, 17), + (392, 80), + (393, 67), + (394, 1470), + (395, 25), + (396, 220), + (397, 131), + (398, 225), + (399, 484755), + (400, 597), + (401, 300), + (402, 253), + (403, 359), + (404, 523), + (405, 311), + (406, 238), + (407, 999), + (408, 424), + (409, 165), + (410, 96), + (411, 248), + (412, 1771), + (413, 139), + (414, 7374), + (415, 11186), + (416, 1355), + (417, 1283666), + (418, 9), + (419, 116), + (420, 3897), + (421, 2554), + (422, 1), + (423, 1), + (424, 16878), + (425, 3198212), + (426, 335), + (427, 1676), + (428, 80), + (429, 19), + (430, 47), + (431, 495), + (432, 421946), + (433, 73), + (434, 95), + (435, 105), + (436, 184), + (437, 56903), + (438, 132), + (439, 87), + (440, 207411), + (441, 230), + (442, 372), + (443, 361), + (444, 387), + (445, 299), + (446, 175), + (447, 7487), + (448, 16346), + (449, 37), + (450, 98313), + (451, 307), + (452, 304), + (453, 2675), + (454, 229), + (455, 130), + (456, 134), + (457, 50), + (458, 238), + (459, 2), + (460, 2267), + (461, 7), + (462, 1), + (463, 8), + (464, 395), + (465, 1279781), + (466, 9), + (467, 12), + (468, 633), + (469, 37), + (470, 13), + (471, 54), + (472, 247), + (473, 82), + (474, 119), + (475, 114), + (476, 332), + (477, 79), + (478, 116), + (479, 128), + (480, 4206), + (481, 20732), + (482, 311), + (483, 343), + (484, 527), + (485, 2750), + (486, 76), + (487, 152), + (488, 510), + (489, 63), + (490, 257), + (491, 79), + (492, 825), + (493, 4198), + (494, 389), + (495, 72), + (496, 1547), + (497, 34), + (498, 631996), + (499, 5), + (500, 2334), + (501, 34), + (502, 7), + (503, 7), + (504, 7682), + (505, 6), + (506, 26), + (507, 22), + (508, 461), + (509, 95), + (510, 36), + (511, 46), + (512, 2741), + (513, 38455), + (514, 29678), + (515, 179), + (516, 1637), + (517, 2597), + (518, 166), + (519, 230), + (520, 2736), + (521, 187), + (522, 361), + (523, 310), + (524, 3327), + (525, 76), + (526, 8070), + (527, 35), + (528, 3310), + (529, 118), + (530, 167), + (531, 214180), + (532, 4597), + (533, 153), + (534, 126), + (535, 23), + (536, 13920), + (537, 10), + (538, 11), + (539, 50), + (540, 50739), + (541, 8), + (542, 347), + (543, 77), + (544, 451575), + (545, 16), + (546, 218814), + (547, 1859026), + (548, 303), + (549, 2511), + (550, 27), + (551, 28), + (552, 188), + (553, 46), + (554, 216), + (555, 63), + (556, 202), + (557, 192), + (558, 257), + (559, 170377), + (560, 902), + (561, 424), + (562, 186), + (563, 145), + (564, 342), + (565, 76), + (566, 41), + (567, 26), + (568, 136), + (569, 1336), + (570, 988), + (571, 131), + (572, 766), + (573, 95), + (574, 57), + (575, 16), + (576, 47), + (577, 63), + (578, 5), + (579, 140), + (580, 1263808), + (581, 2498), + (583, 2), + (584, 706), + (585, 49), + (586, 502), + (587, 16), + (588, 115), + (589, 25), + (590, 31), + (591, 34), + (592, 818), + (593, 60), + (594, 84), + (595, 116), + (596, 446), + (597, 111), + (598, 151), + (599, 153), + (600, 1408), + (601, 165), + (602, 575), + (603, 163), + (604, 309), + (605, 52), + (606, 40), + (607, 116), + (608, 749), + (609, 231), + (610, 171), + (611, 218), + (612, 1145), + (613, 2572), + (614, 27), + (615, 26), + (616, 2060), + (617, 173), + (618, 1094), + (619, 66), + (620, 14235), + (622, 294), + (623, 2), + (624, 79374), + (625, 1), + (626, 3), + (627, 7), + (628, 335), + (629, 27), + (630, 47), + (631, 113), + (632, 589), + (633, 56), + (634, 75), + (635, 85), + (636, 740), + (637, 118), + (638, 180), + (639, 149), + (640, 1169), + (641, 135), + (642, 169), + (643, 170), + (644, 1802), + (645, 2481), + (646, 28), + (647, 78), + (648, 5585), + (649, 173), + (650, 135), + (651, 177), + (652, 6553), + (653, 129), + (654, 55), + (655, 6), + (656, 13250), + (657, 5), + (658, 15), + (659, 3), + (660, 39892), + (661, 28), + (663, 1), + (664, 575061), + (665, 1), + (666, 5), + (667, 73), + (668, 39), + (669, 62), + (670, 50), + (671, 27), + (672, 33), + (673, 48), + (674, 44), + (675, 151), + (676, 70), + (677, 2540), + (678, 150), + (679, 109), + (680, 117), + (681, 95), + (682, 80), + (683, 44), + (684, 34), + (685, 31), + (686, 125), + (687, 146), + (688, 423), + (689, 142), + (690, 154), + (691, 135), + (692, 194), + (693, 48), + (694, 6), + (695, 141), + (696, 47), + (697, 9), + (699, 1), + (701, 1), + (702, 2), + (703, 81), + (704, 3), + (705, 4), + (706, 23), + (707, 131), + (708, 31), + (709, 2458), + (710, 346), + (711, 43), + (712, 46), + (713, 48), + (714, 85), + (715, 119), + (716, 89), + (717, 97), + (718, 95), + (719, 137), + (720, 437), + (721, 64), + (722, 28), + (723, 29), + (724, 121), + (725, 162), + (726, 241), + (727, 219), + (728, 143), + (729, 92), + (730, 100), + (731, 42), + (732, 38), + (733, 60), + (734, 2), + (735, 71), + (736, 12), + (737, 9), + (738, 7), + (739, 193), + (740, 2), + (741, 2404), + (742, 3), + (743, 11), + (744, 5), + (745, 5), + (746, 9), + (747, 16), + (748, 27), + (749, 32), + (750, 57), + (751, 54), + (752, 383), + (753, 61), + (754, 48), + (755, 84), + (756, 108), + (757, 134), + (758, 121), + (759, 160), + (760, 80), + (761, 68), + (762, 192), + (763, 107), + (764, 270), + (765, 58), + (766, 125), + (767, 151), + (768, 75), + (769, 94), + (770, 91), + (771, 187), + (772, 57), + (773, 2371), + (774, 8), + (775, 93), + (776, 107), + (777, 20), + (779, 1), + (780, 22), + (781, 1), + (783, 6), + (784, 318), + (785, 25), + (786, 31), + (787, 23), + (788, 28), + (789, 62), + (790, 53), + (791, 41), + (792, 68), + (793, 60), + (794, 88), + (795, 108), + (796, 63), + (797, 100), + (798, 68), + (799, 72), + (800, 83), + (801, 46), + (802, 36), + (803, 157), + (804, 139), + (805, 2439), + (806, 73), + (807, 81), + (808, 99), + (809, 66), + (810, 45), + (811, 98), + (812, 1), + (814, 31), + (815, 1), + (816, 312), + (818, 155), + (819, 2), + (820, 12), + (821, 27), + (822, 97), + (823, 23), + (824, 7), + (825, 15), + (826, 37), + (827, 39), + (828, 28), + (829, 33), + (830, 53), + (831, 101), + (832, 189), + (833, 94), + (834, 66), + (835, 173), + (836, 74), + (837, 2402), + (838, 64), + (839, 28), + (840, 20), + (841, 13), + (842, 32), + (843, 72), + (844, 68), + (845, 50), + (846, 41), + (847, 114), + (848, 345), + (849, 33), + (850, 17), + (851, 6), + (852, 61), + (853, 101), + (854, 123), + (855, 28), + (856, 3), + (857, 3), + (858, 30), + (859, 12), + (860, 28), + (861, 16), + (862, 20), + (863, 7), + (864, 23), + (865, 28), + (866, 40), + (867, 159), + (868, 40), + (869, 2361), + (870, 92), + (871, 88), + (872, 193), + (873, 61), + (874, 58), + (875, 67), + (876, 65), + (877, 46), + (878, 55), + (879, 30), + (880, 334), + (881, 74), + (882, 121), + (883, 107), + (884, 36), + (885, 66), + (886, 22), + (887, 25), + (888, 24), + (889, 10), + (890, 44), + (891, 5), + (892, 84), + (893, 4), + (894, 1), + (895, 7), + (896, 3), + (897, 8), + (898, 3), + (899, 126), + (900, 13), + (901, 2280), + (902, 74), + (903, 36), + (904, 46), + (905, 52), + (906, 24), + (907, 23), + (908, 43), + (909, 31), + (910, 66), + (911, 65), + (912, 376), + (913, 77), + (914, 85), + (915, 60), + (916, 29), + (917, 64), + (918, 48), + (919, 135), + (920, 21), + (921, 34), + (922, 26), + (923, 22), + (924, 52), + (925, 28), + (926, 142), + (927, 18), + (928, 14), + (929, 30), + (930, 56), + (931, 113), + (933, 2264), + (934, 14), + (935, 4), + (936, 10), + (937, 18), + (938, 2), + (939, 30), + (940, 9), + (941, 29), + (942, 10), + (943, 17), + (944, 296), + (945, 31), + (946, 40), + (947, 26), + (948, 70), + (949, 66), + (950, 44), + (951, 57), + (952, 55), + (953, 56), + (954, 51), + (955, 133), + (956, 39), + (957, 49), + (958, 45), + (959, 26), + (960, 30), + (961, 35), + (962, 40), + (963, 148), + (964, 34), + (965, 2264), + (966, 50), + (967, 21), + (968, 2), + (970, 24), + (972, 45), + (973, 8), + (974, 11), + (975, 20), + (976, 287), + (977, 20), + (978, 6), + (979, 9), + (980, 99), + (981, 32), + (982, 10), + (983, 13), + (984, 26), + (985, 30), + (986, 31), + (987, 38), + (988, 25), + (989, 32), + (990, 44), + (991, 125), + (992, 58), + (993, 44), + (994, 25), + (995, 140), + (996, 25), + (997, 2222), + (998, 16), + (999, 25), + (1000, 38), + (1001, 66), + (1002, 31), + (1003, 38), + (1004, 38), + (1005, 10), + (1006, 7), + (1008, 283), + (1009, 3), + (1010, 1), + (1011, 17), + (1012, 4), + (1013, 51), + (1014, 1), + (1015, 1), + (1016, 3), + (1017, 12), + (1018, 11), + (1019, 21), + (1020, 31), + (1021, 14), + (1022, 14), + (1023, 23), + (1024, 25), + (1025, 42), + (1026, 39), + (1027, 220), + (1028, 33), + (1029, 2206), + (1030, 24), + (1031, 64), + (1032, 36), + (1033, 61), + (1034, 123), + (1035, 32), + (1036, 20), + (1037, 15), + (1038, 11), + (1039, 33), + (1040, 311), + (1041, 58), + (1042, 80), + (1043, 29), + (1044, 10), + (1045, 48), + (1046, 18), + (1047, 22), + (1048, 3), + (1049, 17), + (1050, 1), + (1051, 2), + (1052, 5), + (1053, 4), + (1054, 4), + (1055, 1), + (1056, 4), + (1057, 15), + (1058, 11), + (1059, 135), + (1060, 59), + (1061, 2132), + (1062, 32), + (1063, 116), + (1064, 37), + (1065, 44), + (1066, 42), + (1067, 28), + (1068, 10), + (1069, 36), + (1070, 59), + (1071, 48), + (1072, 332), + (1073, 59), + (1074, 43), + (1075, 19), + (1076, 19), + (1077, 31), + (1078, 31), + (1079, 20), + (1080, 38), + (1081, 58), + (1082, 37), + (1083, 47), + (1084, 19), + (1085, 24), + (1086, 12), + (1087, 26), + (1088, 89), + (1089, 3), + (1091, 108), + (1093, 2112), + (1094, 13), + (1095, 4), + (1096, 4), + (1097, 17), + (1098, 7), + (1099, 105), + (1100, 12), + (1101, 10), + (1102, 17), + (1103, 19), + (1104, 329), + (1105, 28), + (1106, 58), + (1107, 21), + (1108, 22), + (1109, 63), + (1110, 29), + (1111, 53), + (1112, 84), + (1113, 28), + (1114, 30), + (1115, 22), + (1116, 40), + (1117, 16), + (1118, 20), + (1119, 75), + (1120, 43), + (1121, 49), + (1122, 25), + (1123, 118), + (1124, 8), + (1125, 2083), + (1126, 21), + (1127, 3), + (1128, 43), + (1129, 1), + (1130, 1), + (1132, 3), + (1133, 1), + (1134, 3), + (1135, 83), + (1136, 266), + (1137, 7), + (1138, 22), + (1139, 14), + (1140, 30), + (1141, 54), + (1142, 125), + (1143, 44), + (1144, 34), + (1145, 19), + (1146, 21), + (1147, 19), + (1148, 46), + (1149, 45), + (1150, 54), + (1151, 22), + (1152, 30), + (1153, 20), + (1154, 7), + (1155, 143), + (1156, 23), + (1157, 2078), + (1158, 30), + (1159, 23), + (1160, 12), + (1161, 18), + (1162, 6), + (1164, 5), + (1165, 1), + (1168, 254), + (1169, 1), + (1170, 3), + (1171, 95), + (1172, 37), + (1173, 23), + (1174, 7), + (1175, 11), + (1176, 5), + (1177, 14), + (1178, 15), + (1179, 19), + (1180, 10), + (1181, 28), + (1182, 87), + (1183, 35), + (1184, 30), + (1185, 30), + (1186, 38), + (1187, 148), + (1188, 49), + (1189, 2056), + (1190, 42), + (1191, 41), + (1192, 14), + (1193, 36), + (1194, 37), + (1195, 22), + (1196, 108), + (1197, 62), + (1198, 55), + (1199, 43), + (1200, 261), + (1201, 16), + (1202, 1), + (1203, 9), + (1204, 3), + (1205, 32), + (1207, 81), + (1208, 3), + (1210, 3), + (1212, 4), + (1213, 9), + (1214, 5), + (1215, 6), + (1216, 4), + (1217, 8), + (1218, 13), + (1219, 120), + (1220, 11), + (1221, 1989), + (1222, 11), + (1223, 20), + (1224, 15), + (1225, 21), + (1226, 23), + (1227, 50), + (1228, 37), + (1229, 51), + (1230, 37), + (1231, 21), + (1232, 256), + (1233, 26), + (1234, 25), + (1235, 21), + (1236, 79), + (1237, 50), + (1238, 21), + (1239, 2), + (1240, 6), + (1241, 8), + (1243, 95), + (1244, 1), + (1247, 1), + (1248, 1), + (1249, 1), + (1250, 96), + (1251, 112), + (1252, 43), + (1253, 1960), + (1254, 7), + (1255, 13), + (1256, 16), + (1257, 20), + (1258, 19), + (1259, 17), + (1260, 12), + (1261, 5), + (1262, 12), + (1263, 29), + (1264, 272), + (1265, 63), + (1266, 37), + (1267, 36), + (1268, 25), + (1269, 55), + (1270, 38), + (1271, 7), + (1272, 37), + (1273, 10), + (1274, 16), + (1275, 28), + (1276, 18), + (1277, 11), + (1278, 8), + (1279, 91), + (1280, 1), + (1282, 1), + (1283, 110), + (1284, 20), + (1285, 1923), + (1287, 3), + (1288, 1), + (1290, 23), + (1291, 4), + (1292, 4), + (1293, 12), + (1294, 19), + (1295, 8), + (1296, 248), + (1297, 21), + (1298, 12), + (1299, 31), + (1300, 10), + (1301, 60), + (1302, 1), + (1303, 8), + (1304, 99), + (1305, 29), + (1306, 29), + (1307, 28), + (1308, 33), + (1309, 19), + (1310, 8), + (1311, 1), + (1313, 11), + (1314, 12), + (1315, 236), + (1316, 18), + (1317, 1891), + (1318, 2), + (1322, 21), + (1324, 1), + (1326, 8), + (1327, 3), + (1328, 235), + (1329, 4), + (1330, 1), + (1331, 2), + (1332, 5), + (1333, 38), + (1334, 2), + (1335, 30), + (1336, 18), + (1337, 31), + (1338, 8), + (1339, 5), + (1340, 11), + (1341, 9), + (1342, 12), + (1343, 11), + (1344, 79), + (1345, 37), + (1346, 19), + (1347, 136), + (1348, 9), + (1349, 1861), + (1350, 8), + (1351, 112), + (1352, 10), + (1353, 3), + (1354, 16), + (1355, 4), + (1356, 12), + (1357, 18), + (1358, 67), + (1359, 6), + (1360, 229), + (1361, 1), + (1362, 1), + (1364, 1), + (1365, 27), + (1366, 6), + (1368, 14), + (1370, 8), + (1371, 29), + (1372, 3), + (1373, 21), + (1374, 8), + (1375, 6), + (1376, 3), + (1377, 9), + (1378, 9), + (1379, 120), + (1380, 5), + (1381, 1833), + (1382, 45), + (1383, 35), + (1384, 23), + (1385, 25), + (1386, 26), + (1387, 159), + (1388, 24), + (1389, 16), + (1390, 16), + (1391, 14), + (1392, 273), + (1393, 17), + (1394, 9), + (1395, 5), + (1396, 14), + (1397, 24), + (1398, 27), + (1400, 2), + (1404, 5), + (1405, 8), + (1406, 3), + (1407, 25), + (1408, 2), + (1409, 22), + (1410, 10), + (1411, 111), + (1412, 89), + (1413, 1793), + (1414, 4), + (1415, 9), + (1416, 16), + (1417, 13), + (1418, 13), + (1419, 13), + (1420, 15), + (1421, 19), + (1422, 26), + (1423, 110), + (1424, 229), + (1425, 11), + (1426, 10), + (1427, 7), + (1428, 7), + (1429, 28), + (1430, 12), + (1431, 11), + (1432, 14), + (1433, 2), + (1434, 2), + (1436, 1), + (1437, 1), + (1438, 13), + (1439, 1), + (1440, 1), + (1441, 1), + (1442, 2), + (1443, 132), + (1444, 5), + (1445, 1795), + (1448, 11), + (1449, 10), + (1450, 11), + (1451, 8), + (1452, 47), + (1453, 6), + (1454, 8), + (1455, 12), + (1456, 229), + (1457, 15), + (1458, 12), + (1459, 121), + (1460, 15), + (1461, 48), + (1462, 49), + (1463, 22), + (1464, 11), + (1465, 9), + (1466, 81), + (1467, 1), + (1468, 1), + (1469, 6), + (1470, 6), + (1471, 6), + (1472, 9), + (1473, 12), + (1474, 2), + (1475, 109), + (1476, 5), + (1477, 1721), + (1478, 1), + (1479, 28), + (1480, 7), + (1481, 23), + (1482, 2), + (1483, 12), + (1484, 5), + (1485, 3), + (1486, 2), + (1487, 4), + (1488, 219), + (1489, 7), + (1490, 8), + (1491, 10), + (1492, 16), + (1493, 32), + (1494, 25), + (1495, 96), + (1496, 13), + (1497, 15), + (1498, 16), + (1499, 12), + (1500, 14), + (1501, 19), + (1502, 7), + (1503, 11), + (1504, 3), + (1505, 8), + (1506, 41), + (1507, 108), + (1508, 25), + (1509, 1719), + (1510, 8), + (1511, 10), + (1514, 2), + (1515, 25), + (1516, 2), + (1517, 32), + (1518, 6), + (1519, 7), + (1520, 273), + (1521, 2), + (1522, 6), + (1523, 5), + (1524, 6), + (1525, 36), + (1526, 3), + (1527, 12), + (1528, 7), + (1529, 9), + (1530, 12), + (1531, 107), + (1532, 44), + (1533, 17), + (1534, 12), + (1535, 18), + (1536, 12), + (1537, 26), + (1538, 35), + (1539, 131), + (1540, 15), + (1541, 1693), + (1542, 11), + (1543, 7), + (1544, 2), + (1545, 6), + (1546, 14), + (1547, 6), + (1548, 2), + (1549, 24), + (1550, 2), + (1551, 33), + (1552, 206), + (1553, 18), + (1555, 1), + (1556, 7), + (1557, 38), + (1558, 6), + (1559, 3), + (1560, 21), + (1562, 2), + (1563, 5), + (1564, 7), + (1565, 5), + (1566, 6), + (1567, 110), + (1568, 9), + (1569, 16), + (1570, 13), + (1571, 109), + (1572, 6), + (1573, 1664), + (1574, 53), + (1575, 14), + (1576, 21), + (1577, 31), + (1578, 42), + (1579, 13), + (1580, 10), + (1581, 12), + (1582, 11), + (1583, 85), + (1584, 202), + (1585, 7), + (1586, 6), + (1587, 25), + (1588, 5), + (1589, 41), + (1590, 4), + (1591, 5), + (1593, 1), + (1595, 5), + (1596, 11), + (1598, 1), + (1599, 1), + (1600, 1), + (1601, 4), + (1602, 19), + (1603, 200), + (1604, 10), + (1605, 1640), + (1606, 15), + (1607, 14), + (1608, 7), + (1609, 12), + (1610, 5), + (1611, 2), + (1612, 3), + (1613, 7), + (1614, 37), + (1615, 4), + (1616, 203), + (1617, 13), + (1618, 3), + (1619, 12), + (1620, 38), + (1621, 22), + (1622, 12), + (1623, 43), + (1624, 19), + (1625, 35), + (1626, 15), + (1627, 26), + (1628, 43), + (1629, 2), + (1630, 10), + (1631, 1), + (1633, 1), + (1634, 1), + (1635, 110), + (1637, 1612), + (1638, 1), + (1639, 107), + (1640, 1), + (1641, 2), + (1643, 7), + (1644, 9), + (1645, 8), + (1646, 3), + (1647, 19), + (1648, 206), + (1649, 2), + (1650, 9), + (1651, 8), + (1652, 19), + (1653, 22), + (1654, 4), + (1655, 13), + (1656, 3), + (1657, 5), + (1658, 5), + (1659, 35), + (1660, 10), + (1661, 26), + (1662, 8), + (1663, 10), + (1664, 7), + (1665, 4), + (1666, 2), + (1667, 110), + (1668, 12), + (1669, 1594), + (1670, 1), + (1671, 2), + (1672, 15), + (1673, 4), + (1674, 2), + (1675, 303), + (1676, 12), + (1678, 1), + (1680, 194), + (1681, 1), + (1682, 40), + (1683, 2), + (1684, 2), + (1685, 19), + (1686, 16), + (1687, 2), + (1688, 6), + (1689, 9), + (1690, 18), + (1691, 15), + (1692, 5), + (1693, 7), + (1694, 6), + (1695, 32), + (1696, 4), + (1697, 34), + (1698, 1), + (1699, 117), + (1700, 5), + (1701, 1590), + (1702, 20), + (1703, 4), + (1704, 6), + (1705, 20), + (1707, 2), + (1710, 3), + (1711, 89), + (1712, 195), + (1713, 4), + (1714, 2), + (1715, 1), + (1716, 3), + (1717, 16), + (1718, 9), + (1719, 2), + (1720, 3), + (1723, 18), + (1724, 1), + (1725, 2), + (1726, 3), + (1727, 3), + (1728, 9), + (1729, 5), + (1730, 7), + (1731, 132), + (1732, 28), + (1733, 1585), + (1734, 5), + (1735, 3), + (1736, 5), + (1737, 27), + (1738, 4), + (1739, 19), + (1740, 15), + (1741, 4), + (1742, 15), + (1743, 9), + (1744, 183), + (1745, 12), + (1747, 119), + (1748, 1), + (1749, 15), + (1750, 5), + (1754, 1), + (1757, 2), + (1758, 8), + (1759, 7), + (1760, 7), + (1761, 2), + (1762, 13), + (1763, 113), + (1764, 8), + (1765, 1547), + (1766, 7), + (1767, 21), + (1768, 3), + (1769, 34), + (1770, 5), + (1772, 6), + (1773, 7), + (1774, 12), + (1775, 9), + (1776, 189), + (1777, 25), + (1778, 10), + (1779, 4), + (1780, 1), + (1781, 21), + (1782, 3), + (1783, 186), + (1784, 2), + (1787, 1), + (1788, 10), + (1789, 8), + (1790, 1), + (1791, 34), + (1792, 1), + (1793, 1), + (1794, 1), + (1795, 108), + (1796, 4), + (1797, 1519), + (1798, 9), + (1799, 9), + (1800, 3), + (1801, 6), + (1802, 4), + (1803, 35), + (1804, 15), + (1805, 30), + (1806, 5), + (1807, 7), + (1808, 192), + (1809, 8), + (1811, 4), + (1812, 24), + (1813, 36), + (1814, 4), + (1815, 14), + (1816, 2), + (1817, 2), + (1818, 4), + (1819, 72), + (1820, 3), + (1822, 1), + (1823, 4), + (1825, 1), + (1826, 5), + (1827, 104), + (1828, 1), + (1829, 1494), + (1830, 11), + (1831, 5), + (1832, 2), + (1833, 2), + (1834, 2), + (1835, 4), + (1836, 9), + (1837, 1), + (1838, 14), + (1839, 33), + (1840, 188), + (1841, 27), + (1842, 13), + (1843, 10), + (1844, 28), + (1845, 52), + (1846, 17), + (1847, 40), + (1848, 35), + (1849, 6), + (1850, 6), + (1851, 2), + (1853, 4), + (1854, 6), + (1855, 77), + (1856, 1), + (1859, 106), + (1860, 2), + (1861, 1466), + (1863, 2), + (1866, 1), + (1869, 1), + (1870, 2), + (1872, 179), + (1873, 1), + (1874, 9), + (1875, 29), + (1876, 15), + (1877, 43), + (1878, 2), + (1880, 8), + (1881, 13), + (1882, 18), + (1883, 12), + (1884, 14), + (1885, 18), + (1886, 16), + (1887, 6), + (1888, 2), + (1889, 3), + (1890, 9), + (1891, 196), + (1892, 13), + (1893, 1456), + (1894, 14), + (1895, 8), + (1896, 2), + (1898, 1), + (1899, 17), + (1900, 5), + (1901, 1), + (1904, 175), + (1905, 1), + (1906, 2), + (1907, 3), + (1908, 6), + (1909, 10), + (1910, 3), + (1911, 22), + (1912, 6), + (1913, 22), + (1914, 6), + (1915, 10), + (1916, 5), + (1917, 2), + (1918, 6), + (1919, 4), + (1920, 7), + (1921, 14), + (1922, 4), + (1923, 107), + (1924, 10), + (1925, 1434), + (1926, 7), + (1927, 76), + (1928, 4), + (1929, 7), + (1930, 10), + (1931, 14), + (1932, 6), + (1933, 15), + (1934, 4), + (1935, 2), + (1936, 182), + (1937, 2), + (1939, 11), + (1940, 1), + (1941, 4), + (1942, 2), + (1943, 9), + (1944, 1), + (1947, 24), + (1949, 22), + (1952, 15), + (1953, 14), + (1954, 5), + (1955, 111), + (1956, 11), + (1957, 1435), + (1958, 5), + (1959, 5), + (1960, 10), + (1961, 6), + (1962, 11), + (1963, 95), + (1964, 11), + (1965, 7), + (1966, 7), + (1967, 2), + (1968, 182), + (1969, 6), + (1970, 15), + (1972, 7), + (1973, 11), + (1974, 6), + (1975, 2), + (1976, 6), + (1977, 3), + (1978, 2), + (1983, 24), + (1985, 26), + (1986, 3), + (1987, 109), + (1988, 3), + (1989, 1421), + (1990, 1), + (1991, 3), + (1992, 8), + (1993, 4), + (1994, 6), + (1995, 5), + (1996, 13), + (1997, 6), + (1998, 10), + (1999, 92), + (2000, 181), + (2001, 5), + (2002, 5), + (2003, 1), + (2004, 1), + (2005, 14), + (2006, 12), + (2007, 10), + (2008, 7), + (2009, 9), + (2010, 6), + (2011, 8), + (2012, 13), + (2013, 2), + (2014, 2), + (2018, 1), + (2019, 128), + (2021, 1429), + (2022, 4), + (2026, 2), + (2027, 2), + (2030, 7), + (2032, 175), + (2033, 1), + (2035, 90), + (2036, 3), + (2037, 11), + (2038, 2), + (2039, 4), + (2040, 3), + (2041, 2), + (2042, 1), + (2043, 2), + (2044, 5), + (2045, 1), + (2046, 3), + (2047, 21), + (2048, 5), + (2050, 16), + (2051, 120), + (2053, 1403), + (2054, 4), + (2055, 29), + (2057, 26), + (2058, 3), + (2059, 4), + (2060, 4), + (2061, 7), + (2063, 1), + (2065, 170), + (2066, 3), + (2067, 2), + (2068, 7), + (2069, 13), + (2071, 77), + (2072, 1), + (2075, 4), + (2077, 1), + (2078, 2), + (2079, 5), + (2080, 4), + (2081, 3), + (2082, 3), + (2083, 2), + (2084, 293), + (2085, 6), + (2086, 1395), + (2087, 2), + (2089, 4), + (2090, 10), + (2091, 26), + (2092, 14), + (2093, 25), + (2097, 170), + (2099, 2), + (2100, 1), + (2101, 8), + (2102, 5), + (2104, 2), + (2105, 2), + (2107, 90), + (2108, 1), + (2110, 15), + (2112, 1), + (2113, 1), + (2114, 3), + (2115, 8), + (2116, 3), + (2117, 5), + (2118, 1380), + (2119, 4), + (2120, 1), + (2121, 3), + (2122, 1), + (2123, 6), + (2124, 24), + (2125, 1), + (2127, 33), + (2128, 4), + (2129, 197), + (2132, 1), + (2133, 3), + (2134, 8), + (2141, 1), + (2143, 95), + (2144, 6), + (2146, 1), + (2147, 1), + (2148, 3), + (2150, 1369), + (2152, 1), + (2153, 1), + (2155, 5), + (2156, 7), + (2157, 12), + (2158, 2), + (2159, 6), + (2160, 7), + (2161, 174), + (2162, 22), + (2163, 27), + (2164, 5), + (2165, 24), + (2166, 6), + (2169, 8), + (2170, 2), + (2171, 1), + (2172, 1), + (2174, 8), + (2175, 10), + (2176, 2), + (2177, 3), + (2179, 72), + (2180, 4), + (2181, 1), + (2182, 1366), + (2183, 2), + (2184, 5), + (2185, 4), + (2188, 3), + (2191, 1), + (2192, 2), + (2193, 169), + (2198, 7), + (2199, 27), + (2201, 28), + (2205, 2), + (2206, 2), + (2209, 9), + (2213, 8), + (2214, 1364), + (2215, 95), + (2216, 1), + (2217, 2), + (2218, 1), + (2219, 1), + (2220, 3), + (2221, 2), + (2222, 3), + (2223, 41), + (2225, 168), + (2228, 1), + (2229, 6), + (2230, 8), + (2231, 1), + (2232, 2), + (2233, 6), + (2234, 1), + (2235, 41), + (2236, 2), + (2237, 17), + (2240, 7), + (2242, 6), + (2244, 1), + (2246, 1350), + (2249, 2), + (2250, 4), + (2251, 89), + (2252, 1), + (2257, 167), + (2260, 4), + (2261, 3), + (2262, 6), + (2265, 1), + (2269, 2), + (2270, 4), + (2271, 32), + (2273, 21), + (2274, 1), + (2275, 3), + (2276, 1), + (2277, 2), + (2278, 1344), + (2279, 2), + (2280, 1), + (2281, 1), + (2284, 1), + (2287, 98), + (2288, 2), + (2289, 168), + (2292, 3), + (2293, 3), + (2294, 4), + (2298, 3), + (2303, 9), + (2307, 26), + (2308, 1), + (2309, 30), + (2310, 1344), + (2314, 1), + (2318, 1), + (2321, 164), + (2323, 1), + (2324, 82), + (2325, 1), + (2326, 5), + (2327, 1), + (2334, 6), + (2338, 1), + (2339, 1), + (2340, 1), + (2342, 1337), + (2343, 55), + (2344, 27), + (2345, 6), + (2346, 25), + (2347, 1), + (2348, 18), + (2350, 1), + (2351, 3), + (2352, 2), + (2353, 166), + (2358, 6), + (2360, 87), + (2361, 3), + (2362, 1), + (2373, 9), + (2374, 1330), + (2376, 1), + (2377, 1), + (2378, 11), + (2379, 4), + (2380, 28), + (2382, 29), + (2383, 2), + (2384, 8), + (2385, 169), + (2386, 4), + (2387, 9), + (2388, 8), + (2389, 4), + (2390, 15), + (2392, 1), + (2396, 117), + (2397, 4), + (2399, 1), + (2406, 1330), + (2410, 1), + (2414, 1), + (2415, 4), + (2416, 26), + (2417, 164), + (2418, 31), + (2421, 3), + (2422, 4), + (2424, 6), + (2425, 3), + (2426, 3), + (2427, 5), + (2428, 1), + (2429, 2), + (2432, 100), + (2433, 1), + (2435, 1), + (2436, 1), + (2438, 1328), + (2441, 10), + (2443, 11), + (2448, 2), + (2449, 163), + (2451, 1), + (2452, 27), + (2453, 8), + (2454, 24), + (2455, 1), + (2456, 2), + (2457, 2), + (2460, 4), + (2465, 5), + (2466, 3), + (2468, 95), + (2469, 6), + (2470, 1324), + (2471, 1), + (2472, 1), + (2476, 2), + (2477, 2), + (2478, 2), + (2479, 4), + (2481, 163), + (2484, 2), + (2485, 6), + (2486, 2), + (2488, 23), + (2489, 1), + (2490, 26), + (2491, 1), + (2493, 1), + (2494, 1), + (2495, 3), + (2496, 1), + (2500, 3), + (2502, 1327), + (2503, 1), + (2504, 93), + (2505, 2), + (2506, 1), + (2511, 4), + (2513, 166), + (2516, 3), + (2517, 5), + (2518, 8), + (2519, 2), + (2521, 1), + (2524, 27), + (2526, 20), + (2532, 1), + (2534, 1320), + (2535, 1), + (2540, 114), + (2541, 1), + (2543, 1), + (2545, 163), + (2550, 3), + (2555, 3), + (2557, 4), + (2558, 3), + (2559, 2), + (2560, 26), + (2561, 6), + (2562, 26), + (2564, 5), + (2565, 1), + (2566, 1325), + (2567, 5), + (2568, 9), + (2569, 10), + (2570, 2), + (2571, 1), + (2576, 97), + (2577, 165), + (2582, 3), + (2583, 5), + (2593, 2), + (2596, 42), + (2597, 1), + (2598, 1336), + (2602, 1), + (2609, 163), + (2612, 97), + (2613, 1), + (2614, 2), + (2619, 1), + (2621, 2), + (2624, 2), + (2628, 2), + (2630, 1684946), + (2632, 27), + (2633, 2), + (2634, 25), + (2635, 1), + (2637, 4), + (2639, 1), + (2640, 1), + (2641, 163), + (2644, 1), + (2645, 3), + (2646, 2), + (2648, 112), + (2649, 1), + (2653, 5), + (2659, 3), + (2660, 1), + (2661, 1), + (2662, 1315), + (2664, 1), + (2668, 30), + (2669, 1), + (2670, 26), + (2673, 163), + (2674, 2), + (2675, 1), + (2678, 7), + (2679, 1), + (2680, 1), + (2684, 90), + (2685, 1), + (2686, 1), + (2694, 1315), + (2699, 1), + (2701, 1), + (2704, 30), + (2705, 163), + (2706, 27), + (2710, 2), + (2712, 1), + (2720, 112), + (2721, 2), + (2723, 5), + (2726, 1316), + (2736, 1), + (2737, 165), + (2738, 2), + (2740, 25), + (2742, 33), + (2745, 1), + (2756, 97), + (2757, 1), + (2758, 1315), + (2769, 163), + (2774, 3), + (2776, 32), + (2778, 34), + (2781, 1), + (2782, 1), + (2784, 1), + (2790, 1313), + (2792, 94), + (2793, 12), + (2796, 1), + (2800, 1), + (2801, 163), + (2804, 2), + (2805, 6), + (2806, 2), + (2807, 2), + (2809, 1), + (2810, 1), + (2812, 23), + (2814, 33), + (2815, 3), + (2816, 1), + (2820, 2), + (2821, 1), + (2822, 1314), + (2824, 1), + (2828, 104), + (2829, 1), + (2833, 163), + (2837, 6), + (2838, 4), + (2839, 1), + (2848, 32), + (2849, 4), + (2850, 32), + (2852, 4), + (2853, 1), + (2854, 1312), + (2861, 1), + (2863, 52), + (2864, 111), + (2865, 164), + (2868, 2), + (2869, 15), + (2870, 2), + (2871, 1), + (2884, 30), + (2886, 1333), + (2890, 2), + (2891, 2), + (2892, 3), + (2893, 4), + (2894, 2), + (2897, 163), + (2899, 3), + (2900, 230), + (2901, 1), + (2902, 2), + (2908, 2), + (2911, 1), + (2918, 1312), + (2920, 42), + (2922, 25), + (2923, 1), + (2925, 1), + (2929, 165), + (2930, 2), + (2931, 5), + (2932, 4), + (2933, 8), + (2934, 2), + (2936, 110), + (2937, 1), + (2938, 1), + (2939, 1), + (2948, 1), + (2950, 1313), + (2956, 38), + (2958, 32), + (2961, 163), + (2964, 1), + (2966, 4), + (2967, 2), + (2969, 1), + (2971, 1), + (2972, 151), + (2973, 1), + (2975, 3), + (2976, 4), + (2977, 3), + (2978, 1), + (2979, 1), + (2980, 1), + (2982, 1312), + (2992, 28), + (2993, 163), + (2994, 29), + (2998, 2), + (3006, 1), + (3007, 2), + (3008, 188), + (3009, 2), + (3014, 1311), + (3015, 5), + (3016, 9), + (3017, 1), + (3020, 1), + (3025, 164), + (3028, 27), + (3030, 31), + (3044, 223), + (3045, 1), + (3046, 1311), + (3048, 1), + (3057, 163), + (3061, 2), + (3062, 4), + (3064, 41), + (3066, 35), + (3076, 2), + (3078, 1310), + (3080, 151), + (3081, 2), + (3089, 163), + (3094, 2), + (3100, 35), + (3101, 2), + (3102, 38), + (3104, 2), + (3110, 1310), + (3116, 106), + (3117, 2), + (3121, 163), + (3125, 5), + (3126, 2), + (3132, 2), + (3136, 36), + (3138, 39), + (3140, 2), + (3141, 1), + (3142, 1309), + (3143, 1), + (3144, 1), + (3152, 120), + (3153, 164), + (3155, 1), + (3157, 1), + (3158, 2), + (3163, 1), + (3164, 1), + (3172, 34), + (3174, 1343), + (3185, 163), + (3188, 136), + (3189, 1), + (3190, 2), + (3203, 1), + (3204, 1), + (3206, 1308), + (3208, 53), + (3210, 52), + (3217, 163), + (3220, 38), + (3221, 114), + (3222, 2), + (3224, 141), + (3225, 5), + (3230, 1), + (3236, 38), + (3238, 1308), + (3244, 35), + (3246, 46), + (3249, 163), + (3254, 2), + (3260, 105), + (3261, 4), + (3263, 1), + (3270, 1308), + (3280, 38), + (3281, 163), + (3282, 28), + (3286, 3), + (3292, 1), + (3296, 138), + (3297, 1), + (3301, 1), + (3302, 1308), + (3304, 1), + (3313, 163), + (3316, 33), + (3318, 34), + (3329, 1), + (3331, 1), + (3332, 120), + (3333, 1), + (3334, 1309), + (3345, 163), + (3350, 3), + (3352, 34), + (3354, 31), + (3357, 1), + (3366, 1307), + (3368, 230), + (3369, 6), + (3377, 163), + (3382, 2), + (3388, 37), + (3390, 45), + (3398, 1307), + (3404, 3128), + (3405, 2), + (3409, 163), + (3414, 2), + (3424, 40), + (3426, 23), + (3430, 1307), + (3440, 117), + (3441, 164), + (3446, 2), + (3460, 30), + (3462, 1344), + (3469, 1), + (3473, 163), + (3476, 116), + (3477, 1), + (3478, 3), + (3494, 1305), + (3496, 36), + (3498, 38), + (3501, 2), + (3504, 2), + (3505, 163), + (3510, 2), + (3512, 124), + (3513, 4), + (3515, 1), + (3525, 1), + (3526, 1305), + (3532, 27), + (3534, 33), + (3537, 165), + (3541, 2), + (3542, 2), + (3544, 2), + (3548, 119), + (3549, 1), + (3558, 1305), + (3568, 29), + (3569, 163), + (3570, 53), + (3574, 2), + (3581, 6), + (3584, 115), + (3585, 2), + (3590, 1306), + (3601, 163), + (3604, 39), + (3606, 45), + (3620, 107), + (3621, 1), + (3622, 1304), + (3633, 163), + (3634, 1), + (3637, 1), + (3638, 2), + (3640, 43), + (3642, 35), + (3654, 1305), + (3656, 126), + (3657, 2), + (3661, 1), + (3664, 1), + (3665, 163), + (3670, 3), + (3676, 32), + (3678, 48), + (3679, 1), + (3686, 1303), + (3692, 128), + (3693, 2), + (3697, 163), + (3702, 3), + (3712, 33), + (3714, 28), + (3718, 1302), + (3728, 137), + (3729, 165), + (3734, 2), + (3748, 54), + (3749, 1), + (3750, 1333), + (3758, 1), + (3761, 163), + (3764, 125), + (3765, 2), + (3766, 3), + (3782, 1301), + (3784, 32), + (3786, 50), + (3793, 163), + (3798, 2), + (3800, 123), + (3801, 3), + (3805, 1), + (3814, 1301), + (3820, 53), + (3822, 30), + (3825, 163), + (3830, 2), + (3833, 1), + (3836, 109), + (3837, 3), + (3846, 1301), + (3856, 35), + (3857, 163), + (3858, 54), + (3860, 20), + (3861, 51), + (3862, 2), + (3872, 124), + (3873, 2), + (3876, 17), + (3878, 1302), + (3882, 1), + (3889, 163), + (3892, 45), + (3894, 47), + (3901, 2), + (3903, 1), + (3904, 2), + (3908, 138), + (3909, 2), + (3910, 1300), + (3917, 2), + (3921, 163), + (3926, 2), + (3928, 38), + (3930, 37), + (3942, 1300), + (3944, 137), + (3945, 2), + (3953, 163), + (3958, 2), + (3964, 66), + (3966, 37), + (3971, 1), + (3974, 1300), + (3980, 166), + (3981, 1), + (3985, 163), + (3990, 2), + (4000, 35), + (4002, 54), + (4006, 1300), + (4016, 150), + (4017, 164), + (4021, 38), + (4022, 2), + (4024, 38), + (4036, 47), + (4038, 1347), + (4049, 163), + (4052, 134), + (4053, 10), + (4054, 2), + (4068, 1), + (4070, 1300), + (4072, 52), + (4074, 40), + (4075, 1), + (4081, 163), + (4085, 7), + (4086, 2), + (4088, 123), + (4089, 4), + (4100, 2), + (4102, 1300), + (4108, 38), + (4110, 43), + (4113, 163), + (4118, 2), + (4119, 2), + (4124, 159), + (4125, 3), + (4128, 1), + (4134, 1299), + (4141, 1), + (4144, 51), + (4145, 163), + (4146, 41), + (4150, 2), + (4152, 30), + (4160, 153), + (4161, 1), + (4164, 2), + (4166, 1299), + (4177, 163), + (4180, 225), + (4181, 596), + (4182, 50), + (4187, 1), + (4196, 373), + (4197, 3), + (4198, 1299), + (4209, 163), + (4214, 2), + (4216, 66), + (4217, 3), + (4218, 69), + (4221, 1), + (4230, 1299), + (4232, 158), + (4233, 2), + (4241, 163), + (4246, 2), + (4252, 45), + (4253, 1), + (4254, 48), + (4262, 1300), + (4267, 2), + (4268, 145), + (4269, 3), + (4270, 1), + (4271, 1), + (4273, 163), + (4278, 3), + (4288, 75), + (4290, 36), + (4294, 1298), + (4301, 1), + (4304, 173), + (4305, 166), + (4309, 2), + (4310, 2), + (4324, 52), + (4326, 1359), + (4337, 163), + (4340, 195), + (4341, 2), + (4342, 3), + (4358, 1297), + (4360, 76), + (4362, 56), + (4365, 2), + (4369, 163), + (4374, 2), + (4376, 171), + (4377, 1), + (4390, 1298), + (4396, 52), + (4398, 49), + (4401, 163), + (4406, 3), + (4407, 2), + (4412, 170), + (4413, 2), + (4421, 1), + (4422, 1296), + (4432, 57), + (4433, 163), + (4434, 51), + (4436, 1), + (4438, 2), + (4448, 481), + (4449, 2), + (4451, 1), + (4454, 1295), + (4463, 1), + (4465, 163), + (4468, 74), + (4470, 92), + (4484, 448), + (4485, 3), + (4486, 1295), + (4487, 1), + (4497, 163), + (4502, 2), + (4504, 52), + (4506, 65), + (4518, 1295), + (4519, 2), + (4520, 631), + (4521, 3), + (4529, 164), + (4530, 1), + (4532, 1), + (4533, 3), + (4534, 2), + (4540, 55), + (4542, 48), + (4550, 1294), + (4556, 2358), + (4557, 3), + (4561, 163), + (4562, 1), + (4566, 2), + (4576, 58), + (4578, 74), + (4582, 1294), + (4592, 193), + (4593, 167), + (4598, 2), + (4612, 66), + (4614, 1363), + (4621, 2), + (4625, 163), + (4628, 218), + (4629, 3), + (4630, 2), + (4635, 3), + (4640, 1), + (4645, 1), + (4646, 1295), + (4648, 57), + (4650, 90), + (4657, 163), + (4662, 3), + (4664, 194), + (4665, 1), + (4678, 1295), + (4684, 49), + (4685, 1), + (4686, 85), + (4689, 163), + (4694, 4), + (4700, 183), + (4701, 3), + (4710, 1291), + (4720, 61), + (4721, 163), + (4722, 75), + (4726, 3), + (4736, 175), + (4737, 4), + (4742, 1291), + (4753, 163), + (4756, 84), + (4758, 53), + (4772, 210), + (4773, 4), + (4774, 1291), + (4785, 163), + (4790, 2), + (4792, 54), + (4794, 66), + (4799, 2), + (4806, 1292), + (4808, 180), + (4809, 6), + (4817, 164), + (4820, 32), + (4821, 132), + (4822, 3), + (4824, 17), + (4828, 70), + (4830, 62), + (4836, 42), + (4838, 1290), + (4844, 199), + (4845, 3), + (4849, 163), + (4854, 2), + (4864, 104), + (4866, 98), + (4870, 1290), + (4873, 1), + (4880, 184), + (4881, 164), + (4886, 2), + (4900, 88), + (4902, 1387), + (4909, 1), + (4913, 163), + (4916, 187), + (4917, 6), + (4918, 2), + (4934, 1290), + (4936, 65), + (4938, 59), + (4945, 163), + (4948, 1), + (4950, 2), + (4952, 198), + (4953, 3), + (4966, 1290), + (4972, 64), + (4974, 108), + (4977, 163), + (4982, 2), + (4988, 199), + (4989, 8), + (4998, 1290), + (5008, 82), + (5009, 163), + (5010, 113), + (5012, 3), + (5013, 9), + (5014, 2), + (5017, 1), + (5024, 228), + (5025, 2), + (5028, 4), + (5030, 1290), + (5041, 162), + (5044, 96), + (5046, 71), + (5060, 275), + (5061, 6), + (5062, 1291), + (5064, 1), + (5070, 1), + (5073, 162), + (5078, 3), + (5080, 66), + (5082, 153), + (5094, 1289), + (5096, 272), + (5097, 10), + (5101, 2), + (5104, 2), + (5105, 162), + (5110, 2), + (5116, 87), + (5118, 80), + (5126, 1289), + (5132, 266), + (5133, 5), + (5135, 1), + (5137, 162), + (5140, 190), + (5141, 681), + (5142, 2), + (5152, 104), + (5154, 184), + (5156, 238), + (5158, 1289), + (5168, 257), + (5169, 165), + (5174, 2), + (5188, 99), + (5190, 1435), + (5201, 162), + (5204, 228), + (5205, 6), + (5206, 2), + (5221, 206), + (5222, 1289), + (5224, 312), + (5226, 110), + (5231, 1), + (5233, 162), + (5238, 2), + (5240, 266), + (5241, 7), + (5254, 1289), + (5260, 87), + (5262, 243), + (5265, 162), + (5270, 2), + (5274, 8), + (5276, 318), + (5277, 7), + (5286, 1289), + (5288, 86), + (5296, 88), + (5297, 162), + (5298, 123), + (5302, 3), + (5312, 351), + (5313, 1), + (5318, 1289), + (5329, 162), + (5332, 115), + (5334, 173), + (5339, 6), + (5344, 1), + (5348, 313), + (5349, 3), + (5350, 1289), + (5352, 24), + (5353, 14), + (5361, 162), + (5366, 3), + (5368, 157), + (5370, 107), + (5374, 1), + (5382, 1289), + (5384, 293), + (5385, 4), + (5388, 4), + (5393, 162), + (5396, 1), + (5398, 2), + (5404, 142), + (5406, 201), + (5407, 1), + (5414, 1289), + (5417, 3), + (5420, 285), + (5421, 5), + (5423, 1), + (5425, 162), + (5430, 2), + (5436, 1), + (5440, 142), + (5442, 210), + (5444, 1), + (5446, 1294), + (5456, 318), + (5457, 166), + (5462, 3), + (5476, 123), + (5478, 1608), + (5482, 2), + (5489, 162), + (5492, 329), + (5493, 2), + (5494, 2), + (5504, 1), + (5506, 1), + (5510, 1289), + (5511, 1), + (5512, 165), + (5514, 167), + (5521, 163), + (5522, 1), + (5526, 2), + (5528, 367), + (5529, 8), + (5542, 1289), + (5548, 192), + (5550, 291), + (5553, 162), + (5558, 2), + (5564, 399), + (5565, 13), + (5574, 1289), + (5584, 188), + (5585, 163), + (5586, 356), + (5590, 2), + (5592, 1), + (5599, 1), + (5600, 375), + (5601, 3), + (5606, 1290), + (5608, 1), + (5617, 162), + (5618, 1), + (5620, 261), + (5622, 667), + (5623, 1), + (5626, 1), + (5633, 1), + (5636, 406), + (5637, 4), + (5638, 1289), + (5639, 1), + (5649, 162), + (5654, 2), + (5656, 468), + (5658, 1159), + (5662, 1), + (5670, 1289), + (5671, 1), + (5672, 349), + (5673, 8), + (5675, 1), + (5681, 162), + (5686, 2), + (5692, 321), + (5694, 3067), + (5702, 1289), + (5706, 1), + (5708, 443), + (5709, 7), + (5713, 162), + (5718, 2), + (5728, 496), + (5730, 4577), + (5734, 1289), + (5744, 383), + (5745, 165), + (5750, 3), + (5756, 1), + (5758, 1), + (5764, 5847), + (5766, 8966), + (5775, 1), + (5777, 162), + (5780, 616), + (5781, 240), + (5782, 2), + (5784, 1), + (5788, 1), + (5796, 81), + (5798, 1289), + (5799, 1), + (5800, 5543), + (5802, 13287), + (5809, 162), + (5814, 2), + (5816, 409), + (5817, 3), + (5830, 1289), + (5833, 1), + (5836, 123), + (5838, 59), + (5841, 162), + (5846, 2), + (5852, 480), + (5853, 10), + (5862, 1289), + (5872, 191), + (5873, 162), + (5874, 38), + (5878, 2), + (5888, 616), + (5889, 12), + (5894, 1289), + (5905, 162), + (5908, 139), + (5910, 54), + (5922, 1), + (5924, 675), + (5925, 9), + (5926, 1289), + (5937, 162), + (5942, 2), + (5944, 153), + (5946, 48), + (5958, 1289), + (5960, 614), + (5961, 33), + (5969, 162), + (5974, 2), + (5980, 140), + (5982, 95), + (5990, 1289), + (5996, 628), + (5997, 10), + (6001, 162), + (6006, 2), + (6016, 155), + (6018, 67), + (6021, 42), + (6022, 1289), + (6024, 42), + (6032, 772), + (6033, 177), + (6038, 2), + (6049, 1), + (6052, 109), + (6054, 1340), + (6065, 162), + (6068, 749), + (6069, 11), + (6070, 2), + (6086, 1289), + (6088, 364), + (6090, 49), + (6096, 1), + (6097, 162), + (6102, 2), + (6104, 975), + (6105, 4), + (6106, 1), + (6118, 1289), + (6124, 273), + (6126, 58), + (6129, 162), + (6134, 2), + (6138, 1), + (6140, 1053), + (6141, 13), + (6150, 1289), + (6152, 1), + (6153, 2), + (6160, 372), + (6161, 162), + (6162, 70), + (6164, 1), + (6166, 2), + (6172, 1), + (6176, 1088), + (6177, 96), + (6178, 1), + (6182, 1290), + (6188, 4), + (6193, 162), + (6194, 1), + (6196, 346), + (6198, 101), + (6206, 1), + (6212, 1352), + (6213, 4), + (6214, 1290), + (6219, 2), + (6223, 1), + (6225, 162), + (6230, 1), + (6232, 321), + (6234, 170), + (6246, 1290), + (6248, 1755), + (6249, 4), + (6257, 162), + (6261, 4), + (6262, 1), + (6264, 4), + (6268, 616), + (6270, 141), + (6275, 1), + (6278, 1289), + (6280, 1), + (6281, 1), + (6284, 2516), + (6285, 73), + (6289, 162), + (6294, 1), + (6304, 409), + (6306, 163), + (6310, 1289), + (6314, 2), + (6320, 2276), + (6321, 210), + (6326, 1), + (6340, 445), + (6342, 1437), + (6353, 162), + (6356, 4090), + (6357, 55), + (6358, 1), + (6364, 1), + (6374, 1290), + (6376, 929), + (6378, 270), + (6385, 162), + (6390, 1), + (6392, 6135), + (6393, 16), + (6400, 1), + (6406, 1289), + (6412, 607), + (6414, 386), + (6417, 162), + (6420, 1), + (6421, 238), + (6422, 1), + (6424, 238), + (6428, 15189), + (6429, 227), + (6438, 1289), + (6443, 1), + (6448, 1211), + (6449, 162), + (6450, 1135), + (6453, 2), + (6454, 1), + (6464, 66588), + (6465, 77), + (6470, 1289), + (6474, 31), + (6481, 162), + (6484, 21001), + (6486, 9926), + (6488, 95), + (6498, 1), + (6500, 51017), + (6501, 2547), + (6502, 1289), + (6513, 162), + (6518, 1), + (6520, 11978), + (6522, 2546), + (6534, 1289), + (6536, 1), + (6537, 4), + (6539, 7), + (6545, 162), + (6546, 1), + (6550, 1), + (6553, 27), + (6566, 1289), + (6572, 1), + (6573, 2), + (6574, 1), + (6577, 163), + (6582, 2), + (6587, 1), + (6588, 17), + (6598, 1289), + (6600, 1), + (6603, 1), + (6605, 1), + (6606, 2), + (6608, 1), + (6609, 163), + (6610, 1), + (6614, 1), + (6623, 4), + (6630, 1289), + (6631, 1), + (6633, 1), + (6635, 1), + (6640, 1), + (6641, 162), + (6644, 1), + (6645, 2), + (6646, 2), + (6662, 1289), + (6666, 1), + (6670, 1), + (6673, 162), + (6678, 1), + (6679, 1), + (6680, 1), + (6681, 5), + (6686, 1), + (6694, 1289), + (6705, 162), + (6710, 1), + (6711, 1), + (6714, 1), + (6716, 1), + (6717, 10), + (6726, 1289), + (6734, 1), + (6737, 163), + (6738, 1), + (6740, 2), + (6742, 1), + (6752, 1), + (6753, 1), + (6757, 1), + (6758, 1289), + (6769, 162), + (6770, 1), + (6774, 1), + (6775, 1), + (6788, 1), + (6789, 3), + (6790, 1289), + (6797, 1), + (6801, 162), + (6802, 1), + (6803, 1), + (6806, 1), + (6818, 1), + (6819, 1), + (6822, 1289), + (6824, 1), + (6825, 5), + (6833, 162), + (6834, 1), + (6837, 1), + (6838, 1), + (6844, 2), + (6854, 1289), + (6860, 1), + (6861, 5), + (6865, 163), + (6869, 1), + (6870, 1), + (6872, 1), + (6875, 1), + (6881, 3), + (6886, 1289), + (6896, 1), + (6897, 166), + (6902, 1), + (6915, 1), + (6918, 1289), + (6929, 162), + (6932, 2), + (6933, 1), + (6934, 1), + (6947, 1), + (6950, 1290), + (6961, 162), + (6966, 1), + (6969, 2), + (6982, 1289), + (6993, 162), + (6998, 1), + (7004, 1), + (7005, 1), + (7014, 1289), + (7025, 162), + (7030, 1), + (7032, 1), + (7034, 1), + (7040, 1), + (7041, 1), + (7046, 1289), + (7057, 162), + (7058, 1), + (7059, 1), + (7062, 1), + (7070, 1), + (7076, 1), + (7077, 3), + (7078, 1289), + (7084, 1), + (7089, 162), + (7094, 1), + (7110, 1289), + (7112, 1), + (7113, 5), + (7121, 162), + (7124, 1), + (7126, 1), + (7133, 1), + (7142, 1289), + (7148, 1), + (7149, 12), + (7153, 162), + (7158, 1), + (7174, 1289), + (7184, 1), + (7185, 170), + (7190, 1), + (7206, 1289), + (7217, 162), + (7220, 1), + (7221, 82), + (7222, 1), + (7224, 81), + (7229, 1), + (7237, 1), + (7238, 1289), + (7242, 1), + (7243, 1), + (7248, 1), + (7249, 162), + (7254, 1), + (7256, 1), + (7257, 1), + (7266, 4), + (7270, 1289), + (7274, 13), + (7280, 20), + (7281, 162), + (7286, 1), + (7288, 12), + (7292, 1), + (7293, 5), + (7296, 1), + (7302, 1289), + (7308, 1), + (7313, 162), + (7315, 1), + (7318, 1), + (7328, 1), + (7329, 1), + (7334, 1290), + (7345, 162), + (7349, 1), + (7350, 1), + (7353, 1), + (7364, 1), + (7365, 1), + (7366, 1290), + (7377, 162), + (7382, 1), + (7392, 1), + (7398, 1289), + (7400, 1), + (7401, 4), + (7406, 1), + (7409, 162), + (7411, 1), + (7414, 1), + (7430, 1289), + (7431, 3), + (7436, 1), + (7437, 2), + (7441, 162), + (7445, 5), + (7446, 1), + (7448, 1), + (7460, 1), + (7462, 1289), + (7472, 1), + (7473, 166), + (7474, 1), + (7478, 1), + (7494, 1289), + (7505, 162), + (7508, 3), + (7509, 2), + (7510, 2), + (7525, 1), + (7526, 1289), + (7532, 1), + (7537, 162), + (7542, 1), + (7544, 1), + (7545, 9), + (7546, 1), + (7558, 1289), + (7569, 162), + (7574, 1), + (7580, 1), + (7581, 6), + (7590, 1289), + (7601, 162), + (7606, 1), + (7616, 1), + (7617, 6), + (7622, 1289), + (7623, 1), + (7625, 1), + (7633, 162), + (7638, 1), + (7652, 1), + (7653, 11), + (7654, 1289), + (7657, 1), + (7665, 162), + (7670, 1), + (7686, 1289), + (7688, 1), + (7689, 1), + (7697, 162), + (7702, 1), + (7708, 1), + (7715, 1), + (7717, 2), + (7718, 1289), + (7724, 1), + (7725, 3), + (7729, 162), + (7734, 1), + (7746, 1), + (7750, 1289), + (7760, 1), + (7761, 167), + (7766, 1), + (7782, 1289), + (7793, 162), + (7794, 1), + (7796, 1), + (7797, 1), + (7798, 1), + (7814, 1289), + (7820, 1), + (7825, 162), + (7826, 1), + (7830, 1), + (7832, 1), + (7833, 14), + (7842, 1), + (7846, 1289), + (7857, 162), + (7862, 1), + (7863, 1), + (7868, 1), + (7869, 4), + (7878, 1289), + (7885, 1), + (7889, 162), + (7894, 1), + (7904, 1), + (7905, 2), + (7910, 1289), + (7921, 162), + (7926, 1), + (7929, 1), + (7940, 1), + (7941, 2), + (7942, 1289), + (7953, 162), + (7958, 1), + (7963, 1), + (7973, 1), + (7974, 1289), + (7976, 1), + (7977, 16), + (7985, 162), + (7989, 1), + (7990, 1), + (7991, 1), + (7997, 1), + (8000, 1), + (8006, 1289), + (8012, 1), + (8013, 14), + (8017, 162), + (8022, 1), + (8038, 1289), + (8048, 1), + (8049, 185), + (8054, 2), + (8070, 1289), + (8081, 162), + (8084, 1), + (8085, 24), + (8086, 1), + (8102, 1289), + (8113, 162), + (8118, 1), + (8119, 1), + (8120, 1), + (8121, 1), + (8126, 1), + (8134, 1289), + (8140, 1), + (8145, 162), + (8150, 1), + (8157, 20), + (8166, 1289), + (8177, 162), + (8182, 1), + (8192, 1), + (8193, 1), + (8198, 1289), + (8209, 162), + (8214, 1), + (8228, 1), + (8229, 32), + (8230, 1290), + (8246, 1), + (8264, 1), + (8265, 27), + (8269, 1), + (8276, 1), + (8282, 1), + (8300, 1), + (8301, 133), + (8336, 2), + (8337, 60), + (8348, 3), + (8356, 1), + (8358, 1), + (8372, 1), + (8373, 196), + (8408, 1), + (8444, 1), + (8468, 1), + (8480, 1), + (8499, 1), + (8516, 1), + (8552, 1), + (8555, 1), + (8588, 1), + (8624, 1), + (8660, 3), + (8675, 1), + (8696, 1), + (8704, 1), + (8724, 1), + (8732, 1), + (8768, 1), + (8779, 1), + (8804, 1), + (8840, 1), + (8852, 2), + (8876, 1), + (8912, 1), + (8948, 1), + (8984, 1), + (9020, 1), + (9128, 1), + (9164, 1), + (9192, 1), + (9200, 2), + (9236, 1), + (9272, 1), + (9308, 1), + (9344, 1), + (9380, 1), + (9416, 1), + (9452, 1), + (9524, 1), + (9560, 1), + (9589, 1), + (9632, 1), + (9642, 1), + (9704, 1), + (9776, 1), + (9848, 1), + (9992, 1), + (10064, 1), + (10100, 1), + (10136, 1), + (10172, 1), + (10208, 1), + (10244, 1), + (10280, 1), + (10316, 1), + (10388, 1), + (10532, 1), + (10572, 1), + (10620, 1), + (10640, 1), + (10669, 1), + (10748, 1), + (10856, 1), + (10964, 1), + (11067, 1), + (11072, 1), + (11180, 1), + (11216, 1), + (11252, 1), + (11288, 1), + (11324, 1), + (11348, 2), + (11360, 1), + (11396, 1), + (11432, 1), + (11468, 1), + (11504, 1), + (11540, 1), + (11576, 1), + (11612, 1), + (11648, 1), + (11756, 1), + (11792, 1), + (11828, 1), + (11864, 1), + (11936, 1), + (12008, 1), + (12080, 1), + (12152, 1), + (12188, 1), + (12224, 1), + (12260, 1), + (12296, 1), + (12332, 1), + (12360, 1), + (12368, 1), + (12404, 1), + (12440, 1), + (12476, 1), + (12501, 2), + (12512, 1), + (12548, 1), + (12584, 1), + (12620, 1), + (12656, 1), + (12693, 1), + (12728, 1), + (12885, 1), + (13123, 1), + (13269, 1), + (13461, 1), + (13653, 1), + (13664, 1), + (13740, 1), + (13872, 1), + (13946, 1), + (14109, 1), + (14613, 2), + (14805, 2), + (14945, 1), + (14997, 1), + (15176, 1), + (15276, 1), + (15384, 1), + (15492, 1), + (15600, 1), + (15708, 1), + (15716, 1), + (15765, 1), + (15816, 1), + (15924, 1), + (16068, 1), + (16104, 1), + (16140, 1), + (16176, 1), + (16212, 1), + (16248, 1), + (16284, 1), + (16320, 1), + (16356, 1), + (16392, 1), + (16430, 1), + (16468, 1), + (16504, 1), + (16540, 1), + (16727, 2), + (16728, 1), + (16919, 2), + (16921, 1), + (16938, 1), + (17111, 6), + (17413, 1), + (17430, 1), + (17495, 1), + (17880, 1), + (18647, 2), + (18672, 1), + (19223, 38), + (19680, 1), + (20436, 1), + (21156, 1), + (21732, 1), + (22380, 1), + (22992, 1), + (23063, 17), + (23244, 1), + (23532, 1), + (23892, 1), + (24108, 1), + (24215, 1), + (24324, 1), + (24407, 2), + (24504, 1), + (24720, 1), + (24900, 1), + (24983, 205), + (25440, 1), + (25620, 1), + (26088, 1), + (26268, 1), + (26448, 1), + (26664, 1), + (26988, 1), + (27276, 1), + (27492, 1), + (27744, 1), + (28032, 1), + (28284, 1), + (28536, 1), + (28823, 42), + (28896, 1), + (29184, 1), + (29292, 1), + (29400, 1), + (29796, 1), + (29975, 4), + (30156, 1), + (30228, 1), + (30743, 238), + (30768, 1), + (31056, 1), + (31092, 1), + (31416, 1), + (32100, 1), + (32712, 1), + (33144, 1), + (33324, 1), + (33792, 1), + (34008, 1), + (34440, 1), + (34583, 81), + (34656, 1), + (34872, 1), + (34944, 1), + (35160, 1), + (35304, 1), + (35376, 1), + (35412, 1), + (35556, 1), + (35628, 1), + (35664, 1), + (35808, 1), + (36204, 1), + (36744, 1), + (37788, 1), + (39372, 1), + (40956, 1), + (41640, 1), + (41892, 1), + (42144, 1), + (42576, 1), + (42936, 1), + (43476, 1), + (45096, 1), + (47256, 1), + (47760, 1), + (47796, 1), + (47868, 1), + (48228, 1), + (48948, 1), + (49128, 1), + (49452, 1), + (49560, 1), + (49668, 1), + (49776, 1), + (50352, 1), + (50964, 1), + (52008, 1), + (53880, 1), + (55284, 1), + (55860, 1), + (56040, 1), + (56400, 1), + (56904, 1), + (57444, 1), + (59424, 1), + (60156, 1), + (60626, 1), + (60641, 1), + (61260, 1), + (62520, 1), + (64392, 1), + (65976, 1), + (67308, 1), + (68064, 1), + (68748, 1), + (69216, 1), + (69504, 1), + (69648, 1), + (69684, 1), + (69720, 1), + (69756, 1), + (69792, 1), + (69828, 1), + (70224, 1), + (70620, 1), + (71016, 1), + (71412, 1), + (71772, 1), + (71952, 1), + (72024, 1), + (72096, 1), + (72168, 1), + (72240, 1), + (72312, 1), + (72348, 1), + (72420, 1), + (72492, 1), + (72600, 1), + (72672, 1), + (72780, 1), + (72996, 1), + (73320, 1), + (73356, 1), + (73500, 1), + (73536, 1), + (73572, 1), + (73608, 1), + (73680, 1), + (73716, 1), + (73788, 1), + (73896, 1), + (74040, 1), + (74112, 1), + (74170, 1), + (74184, 1), + (74185, 1), + (74220, 1), + (74256, 1), + (74292, 1), + (74328, 1), + (74364, 1), + (74400, 1), + (74436, 1), + (74472, 1), + (74616, 1), + (74976, 1), + (75156, 1), + (75228, 1), + (75336, 1), + (75408, 1), + (75588, 1), + (75696, 1), + (75804, 1), + (75984, 1), + (76056, 1), + (76164, 1), + (76308, 1), + (76452, 1), + (76560, 1), + (76776, 1), + (76920, 1), + (77064, 1), + (77208, 1), + (77316, 1), + (77532, 1), + (77676, 1), + (77748, 1), + (77820, 1), + (77928, 1), + (78000, 1), + (78036, 1), + (78072, 1), + (78108, 1), + (78180, 1), + (78324, 1), + (78396, 1), + (78576, 1), + (78684, 1), + (78828, 1), + (78864, 1), + (78900, 1), + (78972, 1), + (79080, 1), + (79116, 1), + (79152, 1), + (79512, 1), + (79872, 1), + (80268, 1), + (80592, 1), + (80700, 1), + (80916, 1), + (81168, 1), + (81276, 1), + (81528, 1), + (81708, 1), + (81816, 1), + (81888, 1), + (82068, 1), + (82176, 1), + (82284, 1), + (82356, 1), + (82716, 1), + (83004, 1), + (83312, 1), + (83436, 1), + (83688, 1), + (83904, 1), + (84012, 1), + (84408, 1), + (84660, 1), + (85056, 1), + (85488, 1), + (85776, 1), + (85992, 1), + (86172, 1), + (86424, 1), + (86615, 1), + (86640, 1), + (86928, 1), + (87072, 1), + (87288, 1), + (87576, 1), + (87684, 1), + (87756, 1), + (87972, 1), + (88044, 1), + (88152, 1), + (88368, 1), + (88728, 1), + (88836, 1), + (88944, 1), + (89088, 1), + (89448, 1), + (89592, 1), + (89700, 1), + (89808, 1), + (89952, 1), + (90060, 1), + (90204, 1), + (90348, 1), + (90528, 1), + (90636, 1), + (90744, 1), + (90816, 1), + (91032, 1), + (91068, 1), + (91140, 1), + (91212, 1), + (91284, 1), + (91860, 1), + (92112, 1), + (92292, 1), + (92400, 1), + (92544, 1), + (92652, 1), + (92796, 1), + (92904, 1), + (92976, 1), + (93192, 1), + (93300, 1), + (93444, 1), + (93516, 1), + (93624, 1), + (93696, 1), + (93840, 1), + (93984, 1), + (94056, 1), + (94128, 1), + (94164, 1), + (94200, 1), + (94236, 1), + (94272, 1), + (94344, 1), + (94452, 1), + (94524, 1), + (94596, 1), + (94704, 1), + (94776, 1), + (94884, 1), + (94956, 1), + (95172, 1), + (95244, 1), + (95280, 1), + (95316, 1), + (95352, 1), + (95388, 1), + (95424, 1), + (95460, 1), + (95496, 1), + (95604, 1), + (95676, 1), + (95784, 1), + (95856, 1), + (95928, 1), + (96000, 1), + (96036, 1), + (96072, 1), + (96108, 1), + (96144, 1), + (96180, 1), + (96216, 1), + (96288, 1), + (96576, 1), + (98029, 1), + (98304, 1), + (98527, 1), + (98628, 1), + (99276, 1), + (99528, 1), + (99780, 1), + (99996, 1), + (100212, 1), + (100428, 1), + (100680, 1), + (100752, 1), + (100788, 1), + (100860, 1), + (100932, 1), + (101004, 1), + (101076, 1), + (101148, 1), + (101220, 1), + (101256, 1), + (101328, 1), + (101364, 1), + (101400, 1), + (101436, 1), + (101472, 1), + (101508, 1), + (101544, 1), + (101616, 1), + (101652, 1), + (101724, 1), + (101832, 1), + (101904, 1), + (101940, 1), + (101976, 1), + (102012, 1), + (102048, 1), + (102084, 1), + (102120, 1), + (102264, 1), + (102516, 1), + (102588, 1), + (102624, 1), + (102660, 1), + (102696, 1), + (102732, 1), + (102768, 1), + (102804, 1), + (102840, 1), + (102876, 1), + (102912, 1), + (102948, 1), + (102984, 1), + (103056, 1), + (103092, 1), + (103128, 1), + (103164, 1), + (103200, 1), + (103236, 1), + (103272, 1), + (103308, 1), + (103344, 1), + (103380, 1), + (103452, 1), + (103560, 1), + (103596, 1), + (103632, 1), + (103668, 1), + (103704, 1), + (103740, 1), + (103776, 1), + (103848, 1), + (103920, 1), + (103956, 1), + (104028, 1), + (104100, 1), + (104136, 1), + (104208, 1), + (104244, 1), + (104316, 1), + (104352, 1), + (104388, 1), + (104424, 1), + (104460, 1), + (104496, 1), + (104532, 1), + (104568, 1), + (104604, 1), + (104676, 1), + (104712, 1), + (104748, 1), + (104784, 1), + (104820, 1), + (104856, 1), + (104892, 1), + (104928, 1), + (104964, 1), + (105000, 1), + (105036, 1), + (105072, 1), + (105108, 1), + (105216, 1), + (105324, 1), + (105360, 1), + (105396, 1), + (105432, 1), + (105468, 1), + (105504, 1), + (105540, 1), + (105576, 1), + (105612, 1), + (105648, 1), + (105684, 1), + (105720, 1), + (105756, 1), + (105792, 1), + (105828, 1), + (105864, 1), + (105900, 1), + (105936, 1), + (110580, 1), + (115224, 1), + (118788, 1), + (121056, 1), + (121452, 1), + (121848, 1), + (122244, 1), + (122604, 1), + (122928, 1), + (123252, 1), + (123288, 1), + (123360, 1), + (123432, 1), + (123468, 1), + (123504, 1), + (123540, 1), + (123612, 1), + (123684, 1), + (123756, 1), + (123828, 1), + (123900, 1), + (123972, 1), + (124080, 1), + (124188, 1), + (124296, 1), + (124404, 1), + (124548, 1), + (124764, 1), + (124872, 1), + (124980, 1), + (125088, 1), + (125196, 1), + (125304, 1), + (125412, 1), + (125448, 1), + (125520, 1), + (125628, 1), + (125700, 1), + (125772, 1), + (125844, 1), + (125880, 1), + (125916, 1), + (125952, 1), + (125988, 1), + (126024, 1), + (126060, 1), + (126096, 1), + (126168, 1), + (126276, 1), + (126312, 1), + (126348, 1), + (126420, 1), + (126492, 1), + (126564, 1), + (126636, 1), + (126708, 1), + (126780, 1), + (126852, 1), + (126960, 1), + (127068, 1), + (127176, 1), + (127212, 1), + (127248, 1), + (127284, 1), + (127320, 1), + (127356, 1), + (127392, 1), + (127464, 1), + (127536, 1), + (127608, 1), + (127644, 1), + (127680, 1), + (127716, 1), + (127788, 1), + (127860, 1), + (127932, 1), + (128004, 1), + (128076, 1), + (128148, 1), + (128220, 1), + (128256, 1), + (128292, 1), + (128328, 1), + (128364, 1), + (128400, 1), + (128436, 1), + (128472, 1), + (128508, 1), + (128544, 1), + (128580, 1), + (128616, 1), + (128652, 1), + (128688, 1), + (128724, 1), + (128760, 1), + (128832, 1), + (128904, 1), + (128976, 1), + (129048, 1), + (129120, 1), + (129192, 1), + (129228, 1), + (129264, 1), + (129300, 1), + (129372, 1), + (129408, 1), + (129444, 1), + (129480, 1), + (129516, 1), + (129552, 1), + (129588, 1), + (129660, 1), + (129732, 1), + (129768, 1), + (129804, 1), + (129840, 1), + (129876, 1), + (129912, 1), + (129948, 1), + (129984, 1), + (130020, 1), + (130056, 1), + (130128, 1), + (130200, 1), + (130236, 1), + (130272, 1), + (130308, 1), + (130380, 1), + (130452, 1), + (130524, 1), + (130560, 1), + (130596, 1), + (130632, 1), + (130668, 1), + (130704, 1), + (130776, 1), + (130812, 1), + (130848, 1), + (130920, 1), + (130992, 1), + (131064, 1), + (131136, 1), + (131172, 1), + (131208, 1), + (131244, 1), + (131316, 1), + (131388, 1), + (131424, 1), + (131532, 1), + (131640, 1), + (131784, 1), + (131892, 1), + (131964, 1), + (132036, 1), + (132108, 1), + (132180, 1), + (132252, 1), + (132324, 1), + (132360, 1), + (132432, 1), + (132504, 1), + (132576, 1), + (132684, 1), + (132792, 1), + (132900, 1), + (132972, 1), + (133044, 1), + (133116, 1), + (133188, 1), + (133260, 1), + (133332, 1), + (133368, 1), + (133404, 1), + (133440, 1), + (133476, 1), + (133512, 1), + (133548, 1), + (133620, 1), + (133692, 1), + (133764, 1), + (133836, 1), + (133908, 1), + (133980, 1), + (134016, 1), + (134052, 1), + (134088, 1), + (134124, 1), + (134160, 1), + (134196, 1), + (134232, 1), + (134268, 1), + (134304, 1), + (134340, 1), + (134376, 1), + (134412, 1), + (134484, 1), + (134592, 1), + (134700, 1), + (134808, 1), + (134916, 1), + (134988, 1), + (135024, 1), + (135060, 1), + (135096, 1), + (135132, 1), + (135168, 1), + (135204, 1), + (135240, 1), + (135276, 1), + (135312, 1), + (135348, 1), + (135384, 1), + (135456, 1), + (135492, 1), + (135528, 1), + (135564, 1), + (135600, 1), + (135636, 1), + (135672, 1), + (135708, 1), + (135744, 1), + (135780, 1), + (135816, 1), + (135852, 1), + (135888, 1), + (135924, 1), + (135960, 1), + (135996, 1), + (136032, 1), + (136068, 1), + (136140, 1), + (136212, 1), + (136284, 1), + (136356, 1), + (136428, 1), + (136500, 1), + (136572, 1), + (136608, 1), + (136644, 1), + (136680, 1), + (136716, 1), + (136752, 1), + (136788, 1), + (136824, 1), + (136860, 1), + (136896, 1), + (136932, 1), + (136968, 1), + (137004, 1), + (137040, 1), + (137076, 1), + (137112, 1), + (137148, 1), + (137184, 1), + (137256, 1), + (137328, 1), + (137400, 1), + (137472, 1), + (137544, 1), + (137580, 1), + (137616, 1), + (137652, 1), + (137688, 1), + (137724, 1), + (137796, 1), + (137832, 1), + (137868, 1), + (137904, 1), + (137940, 1), + (137976, 1), + (138012, 1), + (138048, 1), + (138084, 1), + (138120, 1), + (138228, 1), + (138300, 1), + (138336, 1), + (138372, 1), + (138408, 1), + (138444, 1), + (138480, 1), + (138516, 1), + (138552, 1), + (138588, 1), + (138624, 1), + (138696, 1), + (138768, 1), + (138840, 1), + (138912, 1), + (138948, 1), + (138984, 1), + (139020, 1), + (139056, 1), + (139092, 1), + (139128, 1), + (139164, 1), + (139200, 1), + (139272, 1), + (139308, 1), + (139380, 1), + (139452, 1), + (139488, 1), + (139524, 1), + (139596, 1), + (139632, 1), + (139668, 1), + (139704, 1), + (139740, 1), + (139776, 1), + (139848, 1), + (139884, 1), + (139920, 1), + (139956, 1), + (139992, 1), + (140028, 1), + (140064, 1), + (140136, 1), + (140172, 1), + (140208, 1), + (140244, 1), + (140280, 1), + (140316, 1), + (140352, 1), + (140424, 1), + (140460, 1), + (140496, 1), + (140532, 1), + (140604, 1), + (140640, 1), + (140676, 1), + (140712, 1), + (140748, 1), + (140784, 1), + (140820, 1), + (140856, 1), + (140928, 1), + (141036, 1), + (141072, 1), + (141108, 1), + (141144, 1), + (141180, 1), + (141216, 1), + (141252, 1), + (141324, 1), + (141396, 1), + (141432, 1), + (141468, 1), + (141504, 1), + (141612, 1), + (142152, 1), + (142188, 1), + (142260, 1), + (142296, 1), + (142800, 1), + (143304, 1), + (143376, 1), + (143448, 1), + (143520, 1), + (143592, 1), + (143664, 1), + (143700, 1), + (143736, 1), + (143772, 1), + (143808, 1), + (143844, 1), + (143880, 1), + (143952, 1), + (144096, 1), + (144240, 1), + (144348, 1), + (144456, 1), + (144564, 1), + (144672, 1), + (144708, 1), + (144744, 1), + (144780, 1), + (144816, 1), + (144852, 1), + (144888, 1), + (144924, 1), + (144960, 1), + (144996, 1), + (145032, 1), + (145068, 1), + (145104, 1), + (145140, 1), + (145176, 1), + (145212, 1), + (145248, 1), + (145284, 1), + (145320, 1), + (145356, 1), + (145392, 1), + (145464, 1), + (145500, 1), + (145536, 1), + (145572, 1), + (145644, 1), + (145716, 1), + (145752, 1), + (145788, 1), + (145824, 1), + (145860, 1), + (145896, 1), + (145932, 1), + (145968, 1), + (146004, 1), + (146040, 1), + (146076, 1), + (146112, 1), + (146148, 1), + (146184, 1), + (146220, 1), + (146256, 1), + (146292, 1), + (146328, 1), + (146364, 1), + (146400, 1), + (146436, 1), + (146472, 1), + (146508, 1), + (146544, 1), + (146580, 1), + (146616, 1), + (146652, 1), + (146688, 1), + (146724, 1), + (146760, 1), + (146796, 1), + (146832, 1), + (146868, 1), + (146940, 1), + (146976, 1), + (147012, 1), + (147048, 1), + (147084, 1), + (147120, 1), + (147156, 1), + (147192, 1), + (147228, 1), + (147264, 1), + (147300, 1), + (147336, 1), + (147372, 1), + (147408, 1), + (147444, 1), + (147480, 1), + (147516, 1), + (147552, 1), + (147588, 1), + (147624, 1), + (147660, 1), + (147732, 1), + (147768, 1), + (147804, 1), + (147840, 1), + (147876, 1), + (147912, 1), + (147948, 1), + (147984, 1), + (148020, 1), + (148056, 1), + (148092, 1), + (148128, 1), + (148164, 1), + (148200, 1), + (148236, 1), + (148272, 1), + (1070556, 1), + (1079378, 1), + (1085421, 1), + (1086835, 1), + (1121118, 1), + (1121208, 1), + (1124515, 1), + (1128287, 1), + (1128379, 1), + (1153308, 1), + (1153342, 4), + (1153344, 5), + (1153398, 1), + (1153571, 1), + (1153663, 1), + (1153670, 1), + (1153672, 3), + (1153688, 3), + (1154504, 1), + (1154538, 5), + (1154540, 6), + (1154596, 1), + (1164963, 1), + (1165053, 1), + (1166494, 1), + (1166586, 1), + (1175528, 1), + (1175636, 1), + (1177016, 1), + (1193653, 1), + (1193743, 1), + (1205060, 1), + (1205152, 1), + (1323322, 1), + (1323414, 1), + (1336354, 1), + (1336444, 1), + (1348925, 1), + (1349015, 1), + (1353326, 1), + (1353418, 1), + (1426757, 1), + (1426845, 1), + (1426847, 1), + (1426937, 1), + (1476463, 1), + (1476553, 1), + (1516580, 1), + (1516670, 1), + (1605731, 1), + (1605821, 1), +]; diff --git a/bin/node/bench/src/tempdb.rs b/bin/node/bench/src/tempdb.rs index 067db6a565..82f5b7b7a3 100644 --- a/bin/node/bench/src/tempdb.rs +++ b/bin/node/bench/src/tempdb.rs @@ -14,55 +14,59 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::sync::Arc; use kvdb::KeyValueDB; -use kvdb_rocksdb::{DatabaseConfig, Database}; +use kvdb_rocksdb::{Database, DatabaseConfig}; +use std::sync::Arc; pub struct TempDatabase(tempfile::TempDir); impl TempDatabase { - pub fn new() -> Self { - let dir = tempfile::tempdir().expect("temp dir creation failed"); - log::trace!( - target: "bench-logistics", - "Created temp db at {}", - dir.path().to_string_lossy(), - ); + pub fn new() -> Self { + let dir = tempfile::tempdir().expect("temp dir creation failed"); + log::trace!( + target: "bench-logistics", + "Created temp db at {}", + dir.path().to_string_lossy(), + ); - TempDatabase(dir) - } + TempDatabase(dir) + } - pub fn open(&mut self) -> Arc { - let db_cfg = DatabaseConfig::with_columns(1); - let db = Database::open(&db_cfg, &self.0.path().to_string_lossy()).expect("Database backend error"); - Arc::new(db) - } + pub fn open(&mut self) -> Arc { + let db_cfg = DatabaseConfig::with_columns(1); + let db = Database::open(&db_cfg, &self.0.path().to_string_lossy()) + .expect("Database backend error"); + Arc::new(db) + } } impl Clone for TempDatabase { - fn clone(&self) -> Self { - let new_dir = tempfile::tempdir().expect("temp dir creation failed"); - let self_dir = self.0.path(); + fn clone(&self) -> Self { + let new_dir = tempfile::tempdir().expect("temp dir creation failed"); + let self_dir = self.0.path(); - log::trace!( - target: "bench-logistics", - "Cloning db ({}) to {}", - self_dir.to_string_lossy(), - new_dir.path().to_string_lossy(), - ); - let self_db_files = std::fs::read_dir(self_dir) - .expect("failed to list file in seed dir") - .map(|f_result| - f_result.expect("failed to read file in seed db") - .path() - .clone() - ).collect(); - fs_extra::copy_items( - &self_db_files, - new_dir.path(), - &fs_extra::dir::CopyOptions::new(), - ).expect("Copy of seed database is ok"); + log::trace!( + target: "bench-logistics", + "Cloning db ({}) to {}", + self_dir.to_string_lossy(), + new_dir.path().to_string_lossy(), + ); + let self_db_files = std::fs::read_dir(self_dir) + .expect("failed to list file in seed dir") + .map(|f_result| { + f_result + .expect("failed to read file in seed db") + .path() + .clone() + }) + .collect(); + fs_extra::copy_items( + &self_db_files, + new_dir.path(), + &fs_extra::dir::CopyOptions::new(), + ) + .expect("Copy of seed database is ok"); - TempDatabase(new_dir) - } + TempDatabase(new_dir) + } } diff --git a/bin/node/bench/src/trie.rs b/bin/node/bench/src/trie.rs index 6f75741fa7..50fc7024d0 100644 --- a/bin/node/bench/src/trie.rs +++ b/bin/node/bench/src/trie.rs @@ -16,19 +16,19 @@ //! Trie benchmark (integrated). -use std::{borrow::Cow, sync::Arc}; +use hash_db::Prefix; use kvdb::KeyValueDB; use lazy_static::lazy_static; use rand::Rng; -use hash_db::Prefix; use sp_state_machine::Backend as _; +use std::{borrow::Cow, sync::Arc}; use node_primitives::Hash; use crate::{ - core::{self, Mode, Path}, - generator::generate_trie, - tempdb::TempDatabase, + core::{self, Mode, Path}, + generator::generate_trie, + tempdb::TempDatabase, }; pub const SAMPLE_SIZE: usize = 100; @@ -37,202 +37,204 @@ pub type KeyValues = Vec<(Vec, Vec)>; #[derive(Clone, Copy, Debug, derive_more::Display)] pub enum DatabaseSize { - #[display(fmt = "empty")] - Empty, - #[display(fmt = "smallest")] - Smallest, - #[display(fmt = "small")] - Small, - #[display(fmt = "medium")] - Medium, - #[display(fmt = "large")] - Large, - #[display(fmt = "largest")] - Largest, + #[display(fmt = "empty")] + Empty, + #[display(fmt = "smallest")] + Smallest, + #[display(fmt = "small")] + Small, + #[display(fmt = "medium")] + Medium, + #[display(fmt = "large")] + Large, + #[display(fmt = "largest")] + Largest, } lazy_static! { - static ref KUSAMA_STATE_DISTRIBUTION: SizePool = - SizePool::from_histogram(crate::state_sizes::KUSAMA_STATE_DISTRIBUTION); + static ref KUSAMA_STATE_DISTRIBUTION: SizePool = + SizePool::from_histogram(crate::state_sizes::KUSAMA_STATE_DISTRIBUTION); } impl DatabaseSize { - /// Should be multiple of SAMPLE_SIZE! - fn keys(&self) -> usize { - let val = match *self { - Self::Empty => 200, // still need some keys to query - Self::Smallest => 1_000, - Self::Small => 10_000, - Self::Medium => 100_000, - Self::Large => 200_000, - Self::Largest => 1_000_000, - }; - - assert_eq!(val % SAMPLE_SIZE, 0); - - val - } + /// Should be multiple of SAMPLE_SIZE! + fn keys(&self) -> usize { + let val = match *self { + Self::Empty => 200, // still need some keys to query + Self::Smallest => 1_000, + Self::Small => 10_000, + Self::Medium => 100_000, + Self::Large => 200_000, + Self::Largest => 1_000_000, + }; + + assert_eq!(val % SAMPLE_SIZE, 0); + + val + } } pub struct TrieBenchmarkDescription { - pub database_size: DatabaseSize, + pub database_size: DatabaseSize, } pub struct TrieBenchmark { - database: TempDatabase, - root: Hash, - warmup_keys: KeyValues, - query_keys: KeyValues, + database: TempDatabase, + root: Hash, + warmup_keys: KeyValues, + query_keys: KeyValues, } impl core::BenchmarkDescription for TrieBenchmarkDescription { - fn path(&self) -> Path { - let mut path = Path::new(&["trie"]); - path.push(&format!("{}", self.database_size)); - path - } - - fn setup(self: Box) -> Box { - let mut database = TempDatabase::new(); - - // TODO: make seedable - let mut rng = rand::thread_rng(); - let warmup_prefix = KUSAMA_STATE_DISTRIBUTION.key(&mut rng); - - let mut key_values = KeyValues::new(); - let mut warmup_keys = KeyValues::new(); - let mut query_keys = KeyValues::new(); - let every_x_key = self.database_size.keys() / SAMPLE_SIZE; - for idx in 0..self.database_size.keys() { - let kv = ( - KUSAMA_STATE_DISTRIBUTION.key(&mut rng).to_vec(), - KUSAMA_STATE_DISTRIBUTION.value(&mut rng), - ); - if idx % every_x_key == 0 { - // warmup keys go to separate tree with high prob - let mut actual_warmup_key = warmup_prefix.clone(); - actual_warmup_key[16..].copy_from_slice(&kv.0[16..]); - warmup_keys.push((actual_warmup_key.clone(), kv.1.clone())); - key_values.push((actual_warmup_key.clone(), kv.1.clone())); - } else if idx % every_x_key == 1 { - query_keys.push(kv.clone()); - } - - key_values.push(kv) - } - - assert_eq!(warmup_keys.len(), SAMPLE_SIZE); - assert_eq!(query_keys.len(), SAMPLE_SIZE); - - let root = generate_trie( - database.open(), - key_values, - ); - - Box::new(TrieBenchmark { - database, - root, - warmup_keys, - query_keys, - }) - } - - fn name(&self) -> Cow<'static, str> { - - fn pretty_print(v: usize) -> String { - let mut print = String::new(); - for (idx, val) in v.to_string().chars().rev().enumerate() { - if idx != 0 && idx % 3 == 0 { - print.insert(0, ','); - } - print.insert(0, val); - } - print - } - - format!( - "Trie benchmark({} database ({} keys))", - self.database_size, - pretty_print(self.database_size.keys()), - ).into() - } + fn path(&self) -> Path { + let mut path = Path::new(&["trie"]); + path.push(&format!("{}", self.database_size)); + path + } + + fn setup(self: Box) -> Box { + let mut database = TempDatabase::new(); + + // TODO: make seedable + let mut rng = rand::thread_rng(); + let warmup_prefix = KUSAMA_STATE_DISTRIBUTION.key(&mut rng); + + let mut key_values = KeyValues::new(); + let mut warmup_keys = KeyValues::new(); + let mut query_keys = KeyValues::new(); + let every_x_key = self.database_size.keys() / SAMPLE_SIZE; + for idx in 0..self.database_size.keys() { + let kv = ( + KUSAMA_STATE_DISTRIBUTION.key(&mut rng).to_vec(), + KUSAMA_STATE_DISTRIBUTION.value(&mut rng), + ); + if idx % every_x_key == 0 { + // warmup keys go to separate tree with high prob + let mut actual_warmup_key = warmup_prefix.clone(); + actual_warmup_key[16..].copy_from_slice(&kv.0[16..]); + warmup_keys.push((actual_warmup_key.clone(), kv.1.clone())); + key_values.push((actual_warmup_key.clone(), kv.1.clone())); + } else if idx % every_x_key == 1 { + query_keys.push(kv.clone()); + } + + key_values.push(kv) + } + + assert_eq!(warmup_keys.len(), SAMPLE_SIZE); + assert_eq!(query_keys.len(), SAMPLE_SIZE); + + let root = generate_trie(database.open(), key_values); + + Box::new(TrieBenchmark { + database, + root, + warmup_keys, + query_keys, + }) + } + + fn name(&self) -> Cow<'static, str> { + fn pretty_print(v: usize) -> String { + let mut print = String::new(); + for (idx, val) in v.to_string().chars().rev().enumerate() { + if idx != 0 && idx % 3 == 0 { + print.insert(0, ','); + } + print.insert(0, val); + } + print + } + + format!( + "Trie benchmark({} database ({} keys))", + self.database_size, + pretty_print(self.database_size.keys()), + ) + .into() + } } struct Storage(Arc); impl sp_state_machine::Storage for Storage { - fn get(&self, key: &Hash, prefix: Prefix) -> Result>, String> { - let key = sp_trie::prefixed_key::(key, prefix); - self.0.get(0, &key).map_err(|e| format!("Database backend error: {:?}", e)) - } + fn get(&self, key: &Hash, prefix: Prefix) -> Result>, String> { + let key = sp_trie::prefixed_key::(key, prefix); + self.0 + .get(0, &key) + .map_err(|e| format!("Database backend error: {:?}", e)) + } } impl core::Benchmark for TrieBenchmark { - fn run(&mut self, mode: Mode) -> std::time::Duration { - let mut db = self.database.clone(); - let storage: Arc> = - Arc::new(Storage(db.open())); - - let trie_backend = sp_state_machine::TrieBackend::new( - storage, - self.root, - ); - for (warmup_key, warmup_value) in self.warmup_keys.iter() { - let value = trie_backend.storage(&warmup_key[..]) - .expect("Failed to get key: db error") - .expect("Warmup key should exist"); - - // sanity for warmup keys - assert_eq!(&value, warmup_value); - } - - if mode == Mode::Profile { - std::thread::park_timeout(std::time::Duration::from_secs(3)); - } - - let started = std::time::Instant::now(); - for (key, _) in self.query_keys.iter() { - let _ = trie_backend.storage(&key[..]); - } - let elapsed = started.elapsed(); - - if mode == Mode::Profile { - std::thread::park_timeout(std::time::Duration::from_secs(1)); - } - - elapsed / (SAMPLE_SIZE as u32) - } + fn run(&mut self, mode: Mode) -> std::time::Duration { + let mut db = self.database.clone(); + let storage: Arc> = + Arc::new(Storage(db.open())); + + let trie_backend = sp_state_machine::TrieBackend::new(storage, self.root); + for (warmup_key, warmup_value) in self.warmup_keys.iter() { + let value = trie_backend + .storage(&warmup_key[..]) + .expect("Failed to get key: db error") + .expect("Warmup key should exist"); + + // sanity for warmup keys + assert_eq!(&value, warmup_value); + } + + if mode == Mode::Profile { + std::thread::park_timeout(std::time::Duration::from_secs(3)); + } + + let started = std::time::Instant::now(); + for (key, _) in self.query_keys.iter() { + let _ = trie_backend.storage(&key[..]); + } + let elapsed = started.elapsed(); + + if mode == Mode::Profile { + std::thread::park_timeout(std::time::Duration::from_secs(1)); + } + + elapsed / (SAMPLE_SIZE as u32) + } } struct SizePool { - distribution: std::collections::BTreeMap, - total: u32, + distribution: std::collections::BTreeMap, + total: u32, } impl SizePool { - fn from_histogram(h: &[(u32, u32)]) -> SizePool { - let mut distribution = std::collections::BTreeMap::default(); - let mut total = 0; - for (size, count) in h { - total += count; - distribution.insert(total, *size); - } - SizePool { distribution, total } - } - - fn value(&self, rng: &mut R) -> Vec { - let sr = (rng.next_u64() % self.total as u64) as u32; - let mut range = self.distribution.range((std::ops::Bound::Included(sr), std::ops::Bound::Unbounded)); - let size = *range.next().unwrap().1 as usize; - let mut v = Vec::new(); - v.resize(size, 0); - rng.fill_bytes(&mut v); - v - } - - fn key(&self, rng: &mut R) -> Vec { - let mut key = [0u8; 32]; - rng.fill_bytes(&mut key[..]); - key.to_vec() - } -} \ No newline at end of file + fn from_histogram(h: &[(u32, u32)]) -> SizePool { + let mut distribution = std::collections::BTreeMap::default(); + let mut total = 0; + for (size, count) in h { + total += count; + distribution.insert(total, *size); + } + SizePool { + distribution, + total, + } + } + + fn value(&self, rng: &mut R) -> Vec { + let sr = (rng.next_u64() % self.total as u64) as u32; + let mut range = self + .distribution + .range((std::ops::Bound::Included(sr), std::ops::Bound::Unbounded)); + let size = *range.next().unwrap().1 as usize; + let mut v = Vec::new(); + v.resize(size, 0); + rng.fill_bytes(&mut v); + v + } + + fn key(&self, rng: &mut R) -> Vec { + let mut key = [0u8; 32]; + rng.fill_bytes(&mut key[..]); + key.to_vec() + } +} diff --git a/bin/node/cli/bin/main.rs b/bin/node/cli/bin/main.rs index cfad84a4cb..c21b74c88a 100644 --- a/bin/node/cli/bin/main.rs +++ b/bin/node/cli/bin/main.rs @@ -19,5 +19,5 @@ #![warn(missing_docs)] fn main() -> sc_cli::Result<()> { - node_cli::run() + node_cli::run() } diff --git a/bin/node/cli/build.rs b/bin/node/cli/build.rs index 12e0cab58a..033c5e25b5 100644 --- a/bin/node/cli/build.rs +++ b/bin/node/cli/build.rs @@ -15,47 +15,56 @@ // along with Substrate. If not, see . fn main() { - #[cfg(feature = "cli")] - cli::main(); + #[cfg(feature = "cli")] + cli::main(); } #[cfg(feature = "cli")] mod cli { - include!("src/cli.rs"); - - use std::{fs, env, path::Path}; - use sc_cli::structopt::clap::Shell; - use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; - - pub fn main() { - build_shell_completion(); - generate_cargo_keys(); - - rerun_if_git_head_changed(); - } - - /// Build shell completion scripts for all known shells - /// Full list in https://github.com/kbknapp/clap-rs/blob/e9d0562a1dc5dfe731ed7c767e6cee0af08f0cf9/src/app/parser.rs#L123 - fn build_shell_completion() { - for shell in &[Shell::Bash, Shell::Fish, Shell::Zsh, Shell::Elvish, Shell::PowerShell] { - build_completion(shell); - } - } - - /// Build the shell auto-completion for a given Shell - fn build_completion(shell: &Shell) { - let outdir = match env::var_os("OUT_DIR") { - None => return, - Some(dir) => dir, - }; - let path = Path::new(&outdir) - .parent().unwrap() - .parent().unwrap() - .parent().unwrap() - .join("completion-scripts"); - - fs::create_dir(&path).ok(); - - Cli::clap().gen_completions("substrate-node", *shell, &path); - } + include!("src/cli.rs"); + + use sc_cli::structopt::clap::Shell; + use std::{env, fs, path::Path}; + use substrate_build_script_utils::{generate_cargo_keys, rerun_if_git_head_changed}; + + pub fn main() { + build_shell_completion(); + generate_cargo_keys(); + + rerun_if_git_head_changed(); + } + + /// Build shell completion scripts for all known shells + /// Full list in https://github.com/kbknapp/clap-rs/blob/e9d0562a1dc5dfe731ed7c767e6cee0af08f0cf9/src/app/parser.rs#L123 + fn build_shell_completion() { + for shell in &[ + Shell::Bash, + Shell::Fish, + Shell::Zsh, + Shell::Elvish, + Shell::PowerShell, + ] { + build_completion(shell); + } + } + + /// Build the shell auto-completion for a given Shell + fn build_completion(shell: &Shell) { + let outdir = match env::var_os("OUT_DIR") { + None => return, + Some(dir) => dir, + }; + let path = Path::new(&outdir) + .parent() + .unwrap() + .parent() + .unwrap() + .parent() + .unwrap() + .join("completion-scripts"); + + fs::create_dir(&path).ok(); + + Cli::clap().gen_completions("substrate-node", *shell, &path); + } } diff --git a/bin/node/cli/src/browser.rs b/bin/node/cli/src/browser.rs index 6cd98dfe8d..b69804675d 100644 --- a/bin/node/cli/src/browser.rs +++ b/bin/node/cli/src/browser.rs @@ -15,41 +15,42 @@ // along with Substrate. If not, see . use crate::chain_spec::ChainSpec; -use log::info; -use wasm_bindgen::prelude::*; -use sc_service::Configuration; use browser_utils::{ - Client, - browser_configuration, set_console_error_panic_hook, init_console_log, + browser_configuration, init_console_log, set_console_error_panic_hook, Client, }; +use log::info; +use sc_service::Configuration; use std::str::FromStr; +use wasm_bindgen::prelude::*; /// Starts the client. #[wasm_bindgen] pub async fn start_client(chain_spec: String, log_level: String) -> Result { - start_inner(chain_spec, log_level) - .await - .map_err(|err| JsValue::from_str(&err.to_string())) + start_inner(chain_spec, log_level) + .await + .map_err(|err| JsValue::from_str(&err.to_string())) } -async fn start_inner(chain_spec: String, log_level: String) -> Result> { - set_console_error_panic_hook(); - init_console_log(log::Level::from_str(&log_level)?)?; - let chain_spec = ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec()) - .map_err(|e| format!("{:?}", e))?; +async fn start_inner( + chain_spec: String, + log_level: String, +) -> Result> { + set_console_error_panic_hook(); + init_console_log(log::Level::from_str(&log_level)?)?; + let chain_spec = ChainSpec::from_json_bytes(chain_spec.as_bytes().to_vec()) + .map_err(|e| format!("{:?}", e))?; - let config = browser_configuration(chain_spec).await?; + let config = browser_configuration(chain_spec).await?; - info!("Substrate browser node"); - info!("✌️ version {}", config.impl_version); - info!("❤️ by Parity Technologies, 2017-2020"); - info!("📋 Chain specification: {}", config.chain_spec.name()); - info!("🏷 Node name: {}", config.network.node_name); - info!("👤 Role: {:?}", config.role); + info!("Substrate browser node"); + info!("✌️ version {}", config.impl_version); + info!("❤️ by Parity Technologies, 2017-2020"); + info!("📋 Chain specification: {}", config.chain_spec.name()); + info!("🏷 Node name: {}", config.network.node_name); + info!("👤 Role: {:?}", config.role); - // Create the service. This is the most heavy initialization step. - let service = crate::service::new_light(config) - .map_err(|e| format!("{:?}", e))?; + // Create the service. This is the most heavy initialization step. + let service = crate::service::new_light(config).map_err(|e| format!("{:?}", e))?; - Ok(browser_utils::start_client(service)) + Ok(browser_utils::start_client(service)) } diff --git a/bin/node/cli/src/chain_spec.rs b/bin/node/cli/src/chain_spec.rs index 6d67c61381..c41a726f80 100644 --- a/bin/node/cli/src/chain_spec.rs +++ b/bin/node/cli/src/chain_spec.rs @@ -16,25 +16,28 @@ //! Substrate chain configurations. -use sc_chain_spec::ChainSpecExtension; -use sp_core::{Pair, Public, crypto::UncheckedInto, sr25519}; -use serde::{Serialize, Deserialize}; +use grandpa_primitives::AuthorityId as GrandpaId; +use hex_literal::hex; +use node_runtime::constants::currency::*; +use node_runtime::Block; use node_runtime::{ - AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, CouncilConfig, - DemocracyConfig,GrandpaConfig, ImOnlineConfig, SessionConfig, SessionKeys, StakerStatus, - StakingConfig, ElectionsConfig, IndicesConfig, SocietyConfig, SudoConfig, SystemConfig, - TechnicalCommitteeConfig, WASM_BINARY, + AuthorityDiscoveryConfig, BabeConfig, BalancesConfig, ContractsConfig, CouncilConfig, + DemocracyConfig, ElectionsConfig, GrandpaConfig, ImOnlineConfig, IndicesConfig, SessionConfig, + SessionKeys, SocietyConfig, StakerStatus, StakingConfig, SudoConfig, SystemConfig, + TechnicalCommitteeConfig, WASM_BINARY, }; -use node_runtime::Block; -use node_runtime::constants::currency::*; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use sc_chain_spec::ChainSpecExtension; use sc_service::ChainType; -use hex_literal::hex; use sc_telemetry::TelemetryEndpoints; -use grandpa_primitives::{AuthorityId as GrandpaId}; -use sp_consensus_babe::{AuthorityId as BabeId}; -use pallet_im_online::sr25519::{AuthorityId as ImOnlineId}; +use serde::{Deserialize, Serialize}; use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use sp_runtime::{Perbill, traits::{Verify, IdentifyAccount}}; +use sp_consensus_babe::AuthorityId as BabeId; +use sp_core::{crypto::UncheckedInto, sr25519, Pair, Public}; +use sp_runtime::{ + traits::{IdentifyAccount, Verify}, + Perbill, +}; pub use node_primitives::{AccountId, Balance, Signature}; pub use node_runtime::GenesisConfig; @@ -50,402 +53,429 @@ const STAGING_TELEMETRY_URL: &str = "wss://telemetry.polkadot.io/submit/"; #[derive(Default, Clone, Serialize, Deserialize, ChainSpecExtension)] #[serde(rename_all = "camelCase")] pub struct Extensions { - /// Block numbers with known hashes. - pub fork_blocks: sc_client::ForkBlocks, - /// Known bad block hashes. - pub bad_blocks: sc_client::BadBlocks, + /// Block numbers with known hashes. + pub fork_blocks: sc_client::ForkBlocks, + /// Known bad block hashes. + pub bad_blocks: sc_client::BadBlocks, } /// Specialized `ChainSpec`. -pub type ChainSpec = sc_service::GenericChainSpec< - GenesisConfig, - Extensions, ->; +pub type ChainSpec = sc_service::GenericChainSpec; /// Flaming Fir testnet generator pub fn flaming_fir_config() -> Result { - ChainSpec::from_json_bytes(&include_bytes!("../res/flaming-fir.json")[..]) + ChainSpec::from_json_bytes(&include_bytes!("../res/flaming-fir.json")[..]) } fn session_keys( - grandpa: GrandpaId, - babe: BabeId, - im_online: ImOnlineId, - authority_discovery: AuthorityDiscoveryId, + grandpa: GrandpaId, + babe: BabeId, + im_online: ImOnlineId, + authority_discovery: AuthorityDiscoveryId, ) -> SessionKeys { - SessionKeys { grandpa, babe, im_online, authority_discovery } + SessionKeys { + grandpa, + babe, + im_online, + authority_discovery, + } } fn staging_testnet_config_genesis() -> GenesisConfig { - // stash, controller, session-key - // generated with secret: - // for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done - // and - // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done - - let initial_authorities: Vec<(AccountId, AccountId, GrandpaId, BabeId, ImOnlineId, AuthorityDiscoveryId)> = vec![( - // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy - hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), - // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq - hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), - // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC - hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 - hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"].unchecked_into(), - ),( - // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 - hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), - // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF - hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), - // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE - hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ - hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"].unchecked_into(), - ),( - // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp - hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), - // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 - hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), - // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d - hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH - hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"].unchecked_into(), - ),( - // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 - hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), - // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn - hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), - // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 - hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x - hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"].unchecked_into(), - )]; - - // generated with secret: subkey inspect "$secret"/fir - let root_key: AccountId = hex![ - // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo - "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" - ].into(); - - let endowed_accounts: Vec = vec![root_key.clone()]; - - testnet_genesis( - initial_authorities, - root_key, - Some(endowed_accounts), - false, - ) + // stash, controller, session-key + // generated with secret: + // for i in 1 2 3 4 ; do for j in stash controller; do subkey inspect "$secret"/fir/$j/$i; done; done + // and + // for i in 1 2 3 4 ; do for j in session; do subkey --ed25519 inspect "$secret"//fir//$j//$i; done; done + + let initial_authorities: Vec<( + AccountId, + AccountId, + GrandpaId, + BabeId, + ImOnlineId, + AuthorityDiscoveryId, + )> = vec![ + ( + // 5Fbsd6WXDGiLTxunqeK5BATNiocfCqu9bS1yArVjCgeBLkVy + hex!["9c7a2ee14e565db0c69f78c7b4cd839fbf52b607d867e9e9c5a79042898a0d12"].into(), + // 5EnCiV7wSHeNhjW3FSUwiJNkcc2SBkPLn5Nj93FmbLtBjQUq + hex!["781ead1e2fa9ccb74b44c19d29cb2a7a4b5be3972927ae98cd3877523976a276"].into(), + // 5Fb9ayurnxnaXj56CjmyQLBiadfRCqUbL2VWNbbe1nZU6wiC + hex!["9becad03e6dcac03cee07edebca5475314861492cdfc96a2144a67bbe9699332"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + // 5EZaeQ8djPcq9pheJUhgerXQZt9YaHnMJpiHMRhwQeinqUW8 + hex!["6e7e4eb42cbd2e0ab4cae8708ce5509580b8c04d11f6758dbf686d50fe9f9106"] + .unchecked_into(), + ), + ( + // 5ERawXCzCWkjVq3xz1W5KGNtVx2VdefvZ62Bw1FEuZW4Vny2 + hex!["68655684472b743e456907b398d3a44c113f189e56d1bbfd55e889e295dfde78"].into(), + // 5Gc4vr42hH1uDZc93Nayk5G7i687bAQdHHc9unLuyeawHipF + hex!["c8dc79e36b29395413399edaec3e20fcca7205fb19776ed8ddb25d6f427ec40e"].into(), + // 5EockCXN6YkiNCDjpqqnbcqd4ad35nU4RmA1ikM4YeRN4WcE + hex!["7932cff431e748892fa48e10c63c17d30f80ca42e4de3921e641249cd7fa3c2f"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + // 5DhLtiaQd1L1LU9jaNeeu9HJkP6eyg3BwXA7iNMzKm7qqruQ + hex!["482dbd7297a39fa145c570552249c2ca9dd47e281f0c500c971b59c9dcdcd82e"] + .unchecked_into(), + ), + ( + // 5DyVtKWPidondEu8iHZgi6Ffv9yrJJ1NDNLom3X9cTDi98qp + hex!["547ff0ab649283a7ae01dbc2eb73932eba2fb09075e9485ff369082a2ff38d65"].into(), + // 5FeD54vGVNpFX3PndHPXJ2MDakc462vBCD5mgtWRnWYCpZU9 + hex!["9e42241d7cd91d001773b0b616d523dd80e13c6c2cab860b1234ef1b9ffc1526"].into(), + // 5E1jLYfLdUQKrFrtqoKgFrRvxM3oQPMbf6DfcsrugZZ5Bn8d + hex!["5633b70b80a6c8bb16270f82cca6d56b27ed7b76c8fd5af2986a25a4788ce440"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + // 5DhKqkHRkndJu8vq7pi2Q5S3DfftWJHGxbEUNH43b46qNspH + hex!["482a3389a6cf42d8ed83888cfd920fec738ea30f97e44699ada7323f08c3380a"] + .unchecked_into(), + ), + ( + // 5HYZnKWe5FVZQ33ZRJK1rG3WaLMztxWrrNDb1JRwaHHVWyP9 + hex!["f26cdb14b5aec7b2789fd5ca80f979cef3761897ae1f37ffb3e154cbcc1c2663"].into(), + // 5EPQdAQ39WQNLCRjWsCk5jErsCitHiY5ZmjfWzzbXDoAoYbn + hex!["66bc1e5d275da50b72b15de072a2468a5ad414919ca9054d2695767cf650012f"].into(), + // 5DMa31Hd5u1dwoRKgC4uvqyrdK45RHv3CpwvpUC1EzuwDit4 + hex!["3919132b851ef0fd2dae42a7e734fe547af5a6b809006100f48944d7fae8e8ef"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + // 5C4vDQxA8LTck2xJEy4Yg1hM9qjDt4LvTQaMo4Y8ne43aU6x + hex!["00299981a2b92f878baaf5dbeba5c18d4e70f2a1fcd9c61b32ea18daf38f4378"] + .unchecked_into(), + ), + ]; + + // generated with secret: subkey inspect "$secret"/fir + let root_key: AccountId = hex![ + // 5Ff3iXP75ruzroPWRP2FYBHWnmGGBSb63857BgnzCoXNxfPo + "9ee5e5bdc0ec239eb164f865ecc345ce4c88e76ee002e0f7e318097347471809" + ] + .into(); + + let endowed_accounts: Vec = vec![root_key.clone()]; + + testnet_genesis(initial_authorities, root_key, Some(endowed_accounts), false) } /// Staging testnet config. pub fn staging_testnet_config() -> ChainSpec { - let boot_nodes = vec![]; - ChainSpec::from_genesis( - "Staging Testnet", - "staging_testnet", - ChainType::Live, - staging_testnet_config_genesis, - boot_nodes, - Some(TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)]) - .expect("Staging telemetry url is valid; qed")), - None, - None, - Default::default(), - ) + let boot_nodes = vec![]; + ChainSpec::from_genesis( + "Staging Testnet", + "staging_testnet", + ChainType::Live, + staging_testnet_config_genesis, + boot_nodes, + Some( + TelemetryEndpoints::new(vec![(STAGING_TELEMETRY_URL.to_string(), 0)]) + .expect("Staging telemetry url is valid; qed"), + ), + None, + None, + Default::default(), + ) } /// Helper function to generate a crypto pair from seed pub fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() } /// Helper function to generate an account ID from seed -pub fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> +pub fn get_account_id_from_seed(seed: &str) -> AccountId +where + AccountPublic: From<::Public>, { - AccountPublic::from(get_from_seed::(seed)).into_account() + AccountPublic::from(get_from_seed::(seed)).into_account() } /// Helper function to generate stash, controller and session key from seed -pub fn authority_keys_from_seed(seed: &str) -> ( - AccountId, - AccountId, - GrandpaId, - BabeId, - ImOnlineId, - AuthorityDiscoveryId, +pub fn authority_keys_from_seed( + seed: &str, +) -> ( + AccountId, + AccountId, + GrandpaId, + BabeId, + ImOnlineId, + AuthorityDiscoveryId, ) { - ( - get_account_id_from_seed::(&format!("{}//stash", seed)), - get_account_id_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - get_from_seed::(seed), - ) + ( + get_account_id_from_seed::(&format!("{}//stash", seed)), + get_account_id_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + get_from_seed::(seed), + ) } /// Helper function to create GenesisConfig for testing pub fn testnet_genesis( - initial_authorities: Vec<( - AccountId, - AccountId, - GrandpaId, - BabeId, - ImOnlineId, - AuthorityDiscoveryId, - )>, - root_key: AccountId, - endowed_accounts: Option>, - enable_println: bool, + initial_authorities: Vec<( + AccountId, + AccountId, + GrandpaId, + BabeId, + ImOnlineId, + AuthorityDiscoveryId, + )>, + root_key: AccountId, + endowed_accounts: Option>, + enable_println: bool, ) -> GenesisConfig { - let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { - vec![ - get_account_id_from_seed::("Alice"), - get_account_id_from_seed::("Bob"), - get_account_id_from_seed::("Charlie"), - get_account_id_from_seed::("Dave"), - get_account_id_from_seed::("Eve"), - get_account_id_from_seed::("Ferdie"), - get_account_id_from_seed::("Alice//stash"), - get_account_id_from_seed::("Bob//stash"), - get_account_id_from_seed::("Charlie//stash"), - get_account_id_from_seed::("Dave//stash"), - get_account_id_from_seed::("Eve//stash"), - get_account_id_from_seed::("Ferdie//stash"), - ] - }); - let num_endowed_accounts = endowed_accounts.len(); - - const ENDOWMENT: Balance = 10_000_000 * DOLLARS; - const STASH: Balance = 100 * DOLLARS; - - GenesisConfig { - frame_system: Some(SystemConfig { - code: WASM_BINARY.to_vec(), - changes_trie_config: Default::default(), - }), - pallet_balances: Some(BalancesConfig { - balances: endowed_accounts.iter().cloned() - .map(|k| (k, ENDOWMENT)) - .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) - .collect(), - }), - pallet_indices: Some(IndicesConfig { - indices: vec![], - }), - pallet_session: Some(SessionConfig { - keys: initial_authorities.iter().map(|x| { - (x.0.clone(), x.0.clone(), session_keys( - x.2.clone(), - x.3.clone(), - x.4.clone(), - x.5.clone(), - )) - }).collect::>(), - }), - pallet_staking: Some(StakingConfig { - validator_count: initial_authorities.len() as u32 * 2, - minimum_validator_count: initial_authorities.len() as u32, - stakers: initial_authorities.iter().map(|x| { - (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator) - }).collect(), - invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), - slash_reward_fraction: Perbill::from_percent(10), - .. Default::default() - }), - pallet_democracy: Some(DemocracyConfig::default()), - pallet_elections_phragmen: Some(ElectionsConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .map(|member| (member, STASH)) - .collect(), - }), - pallet_collective_Instance1: Some(CouncilConfig::default()), - pallet_collective_Instance2: Some(TechnicalCommitteeConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .collect(), - phantom: Default::default(), - }), - pallet_contracts: Some(ContractsConfig { - current_schedule: pallet_contracts::Schedule { - enable_println, // this should only be enabled on development chains - ..Default::default() - }, - gas_price: 1 * MILLICENTS, - }), - pallet_sudo: Some(SudoConfig { - key: root_key, - }), - pallet_babe: Some(BabeConfig { - authorities: vec![], - }), - pallet_im_online: Some(ImOnlineConfig { - keys: vec![], - }), - pallet_authority_discovery: Some(AuthorityDiscoveryConfig { - keys: vec![], - }), - pallet_grandpa: Some(GrandpaConfig { - authorities: vec![], - }), - pallet_membership_Instance1: Some(Default::default()), - pallet_treasury: Some(Default::default()), - pallet_society: Some(SocietyConfig { - members: endowed_accounts.iter() - .take((num_endowed_accounts + 1) / 2) - .cloned() - .collect(), - pot: 0, - max_members: 999, - }), - pallet_vesting: Some(Default::default()), - } + let endowed_accounts: Vec = endowed_accounts.unwrap_or_else(|| { + vec![ + get_account_id_from_seed::("Alice"), + get_account_id_from_seed::("Bob"), + get_account_id_from_seed::("Charlie"), + get_account_id_from_seed::("Dave"), + get_account_id_from_seed::("Eve"), + get_account_id_from_seed::("Ferdie"), + get_account_id_from_seed::("Alice//stash"), + get_account_id_from_seed::("Bob//stash"), + get_account_id_from_seed::("Charlie//stash"), + get_account_id_from_seed::("Dave//stash"), + get_account_id_from_seed::("Eve//stash"), + get_account_id_from_seed::("Ferdie//stash"), + ] + }); + let num_endowed_accounts = endowed_accounts.len(); + + const ENDOWMENT: Balance = 10_000_000 * DOLLARS; + const STASH: Balance = 100 * DOLLARS; + + GenesisConfig { + frame_system: Some(SystemConfig { + code: WASM_BINARY.to_vec(), + changes_trie_config: Default::default(), + }), + pallet_balances: Some(BalancesConfig { + balances: endowed_accounts + .iter() + .cloned() + .map(|k| (k, ENDOWMENT)) + .chain(initial_authorities.iter().map(|x| (x.0.clone(), STASH))) + .collect(), + }), + pallet_indices: Some(IndicesConfig { indices: vec![] }), + pallet_session: Some(SessionConfig { + keys: initial_authorities + .iter() + .map(|x| { + ( + x.0.clone(), + x.0.clone(), + session_keys(x.2.clone(), x.3.clone(), x.4.clone(), x.5.clone()), + ) + }) + .collect::>(), + }), + pallet_staking: Some(StakingConfig { + validator_count: initial_authorities.len() as u32 * 2, + minimum_validator_count: initial_authorities.len() as u32, + stakers: initial_authorities + .iter() + .map(|x| (x.0.clone(), x.1.clone(), STASH, StakerStatus::Validator)) + .collect(), + invulnerables: initial_authorities.iter().map(|x| x.0.clone()).collect(), + slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() + }), + pallet_democracy: Some(DemocracyConfig::default()), + pallet_elections_phragmen: Some(ElectionsConfig { + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .map(|member| (member, STASH)) + .collect(), + }), + pallet_collective_Instance1: Some(CouncilConfig::default()), + pallet_collective_Instance2: Some(TechnicalCommitteeConfig { + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .collect(), + phantom: Default::default(), + }), + pallet_contracts: Some(ContractsConfig { + current_schedule: pallet_contracts::Schedule { + enable_println, // this should only be enabled on development chains + ..Default::default() + }, + gas_price: 1 * MILLICENTS, + }), + pallet_sudo: Some(SudoConfig { key: root_key }), + pallet_babe: Some(BabeConfig { + authorities: vec![], + }), + pallet_im_online: Some(ImOnlineConfig { keys: vec![] }), + pallet_authority_discovery: Some(AuthorityDiscoveryConfig { keys: vec![] }), + pallet_grandpa: Some(GrandpaConfig { + authorities: vec![], + }), + pallet_membership_Instance1: Some(Default::default()), + pallet_treasury: Some(Default::default()), + pallet_society: Some(SocietyConfig { + members: endowed_accounts + .iter() + .take((num_endowed_accounts + 1) / 2) + .cloned() + .collect(), + pot: 0, + max_members: 999, + }), + pallet_vesting: Some(Default::default()), + } } fn development_config_genesis() -> GenesisConfig { - testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - ], - get_account_id_from_seed::("Alice"), - None, - true, - ) + testnet_genesis( + vec![authority_keys_from_seed("Alice")], + get_account_id_from_seed::("Alice"), + None, + true, + ) } /// Development config (single validator Alice) pub fn development_config() -> ChainSpec { - ChainSpec::from_genesis( - "Development", - "dev", - ChainType::Development, - development_config_genesis, - vec![], - None, - None, - None, - Default::default(), - ) + ChainSpec::from_genesis( + "Development", + "dev", + ChainType::Development, + development_config_genesis, + vec![], + None, + None, + None, + Default::default(), + ) } fn local_testnet_genesis() -> GenesisConfig { - testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - authority_keys_from_seed("Bob"), - ], - get_account_id_from_seed::("Alice"), - None, - false, - ) + testnet_genesis( + vec![ + authority_keys_from_seed("Alice"), + authority_keys_from_seed("Bob"), + ], + get_account_id_from_seed::("Alice"), + None, + false, + ) } /// Local testnet config (multivalidator Alice + Bob) pub fn local_testnet_config() -> ChainSpec { - ChainSpec::from_genesis( - "Local Testnet", - "local_testnet", - ChainType::Local, - local_testnet_genesis, - vec![], - None, - None, - None, - Default::default(), - ) + ChainSpec::from_genesis( + "Local Testnet", + "local_testnet", + ChainType::Local, + local_testnet_genesis, + vec![], + None, + None, + None, + Default::default(), + ) } #[cfg(test)] pub(crate) mod tests { - use super::*; - use crate::service::{new_full, new_light}; - use sc_service_test; - use sp_runtime::BuildStorage; - - fn local_testnet_genesis_instant_single() -> GenesisConfig { - testnet_genesis( - vec![ - authority_keys_from_seed("Alice"), - ], - get_account_id_from_seed::("Alice"), - None, - false, - ) - } - - /// Local testnet config (single validator - Alice) - pub fn integration_test_config_with_single_authority() -> ChainSpec { - ChainSpec::from_genesis( - "Integration Test", - "test", - ChainType::Development, - local_testnet_genesis_instant_single, - vec![], - None, - None, - None, - Default::default(), - ) - } - - /// Local testnet config (multivalidator Alice + Bob) - pub fn integration_test_config_with_two_authorities() -> ChainSpec { - ChainSpec::from_genesis( - "Integration Test", - "test", - ChainType::Development, - local_testnet_genesis, - vec![], - None, - None, - None, - Default::default(), - ) - } - - #[test] - #[ignore] - fn test_connectivity() { - sc_service_test::connectivity( - integration_test_config_with_two_authorities(), - |config| new_full(config), - |config| new_light(config), - ); - } - - #[test] - fn test_create_development_chain_spec() { - development_config().build_storage().unwrap(); - } - - #[test] - fn test_create_local_testnet_chain_spec() { - local_testnet_config().build_storage().unwrap(); - } - - #[test] - fn test_staging_test_net_chain_spec() { - staging_testnet_config().build_storage().unwrap(); - } + use super::*; + use crate::service::{new_full, new_light}; + use sc_service_test; + use sp_runtime::BuildStorage; + + fn local_testnet_genesis_instant_single() -> GenesisConfig { + testnet_genesis( + vec![authority_keys_from_seed("Alice")], + get_account_id_from_seed::("Alice"), + None, + false, + ) + } + + /// Local testnet config (single validator - Alice) + pub fn integration_test_config_with_single_authority() -> ChainSpec { + ChainSpec::from_genesis( + "Integration Test", + "test", + ChainType::Development, + local_testnet_genesis_instant_single, + vec![], + None, + None, + None, + Default::default(), + ) + } + + /// Local testnet config (multivalidator Alice + Bob) + pub fn integration_test_config_with_two_authorities() -> ChainSpec { + ChainSpec::from_genesis( + "Integration Test", + "test", + ChainType::Development, + local_testnet_genesis, + vec![], + None, + None, + None, + Default::default(), + ) + } + + #[test] + #[ignore] + fn test_connectivity() { + sc_service_test::connectivity( + integration_test_config_with_two_authorities(), + |config| new_full(config), + |config| new_light(config), + ); + } + + #[test] + fn test_create_development_chain_spec() { + development_config().build_storage().unwrap(); + } + + #[test] + fn test_create_local_testnet_chain_spec() { + local_testnet_config().build_storage().unwrap(); + } + + #[test] + fn test_staging_test_net_chain_spec() { + staging_testnet_config().build_storage().unwrap(); + } } diff --git a/bin/node/cli/src/cli.rs b/bin/node/cli/src/cli.rs index 44b18fd716..9ab8012744 100644 --- a/bin/node/cli/src/cli.rs +++ b/bin/node/cli/src/cli.rs @@ -20,57 +20,57 @@ use structopt::StructOpt; /// An overarching CLI command definition. #[derive(Clone, Debug, StructOpt)] pub struct Cli { - /// Possible subcommand with parameters. - #[structopt(subcommand)] - pub subcommand: Option, - #[allow(missing_docs)] - #[structopt(flatten)] - pub run: RunCmd, + /// Possible subcommand with parameters. + #[structopt(subcommand)] + pub subcommand: Option, + #[allow(missing_docs)] + #[structopt(flatten)] + pub run: RunCmd, } /// Possible subcommands of the main binary. #[derive(Clone, Debug, StructOpt)] pub enum Subcommand { - /// A set of base subcommands handled by `sc_cli`. - #[structopt(flatten)] - Base(sc_cli::Subcommand), - /// The custom factory subcommmand for manufacturing transactions. - #[structopt( - name = "factory", - about = "Manufactures num transactions from Alice to random accounts. \ + /// A set of base subcommands handled by `sc_cli`. + #[structopt(flatten)] + Base(sc_cli::Subcommand), + /// The custom factory subcommmand for manufacturing transactions. + #[structopt( + name = "factory", + about = "Manufactures num transactions from Alice to random accounts. \ Only supported for development or local testnet." - )] - Factory(FactoryCmd), + )] + Factory(FactoryCmd), - /// The custom inspect subcommmand for decoding blocks and extrinsics. - #[structopt( - name = "inspect", - about = "Decode given block or extrinsic using current native runtime." - )] - Inspect(node_inspect::cli::InspectCmd), + /// The custom inspect subcommmand for decoding blocks and extrinsics. + #[structopt( + name = "inspect", + about = "Decode given block or extrinsic using current native runtime." + )] + Inspect(node_inspect::cli::InspectCmd), - /// The custom benchmark subcommmand benchmarking runtime pallets. - #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] - Benchmark(frame_benchmarking_cli::BenchmarkCmd), + /// The custom benchmark subcommmand benchmarking runtime pallets. + #[structopt(name = "benchmark", about = "Benchmark runtime pallets.")] + Benchmark(frame_benchmarking_cli::BenchmarkCmd), } /// The `factory` command used to generate transactions. /// Please note: this command currently only works on an empty database! #[derive(Debug, StructOpt, Clone)] pub struct FactoryCmd { - /// Number of blocks to generate. - #[structopt(long = "blocks", default_value = "1")] - pub blocks: u32, + /// Number of blocks to generate. + #[structopt(long = "blocks", default_value = "1")] + pub blocks: u32, - /// Number of transactions to push per block. - #[structopt(long = "transactions", default_value = "8")] - pub transactions: u32, + /// Number of transactions to push per block. + #[structopt(long = "transactions", default_value = "8")] + pub transactions: u32, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub import_params: ImportParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub import_params: ImportParams, } diff --git a/bin/node/cli/src/command.rs b/bin/node/cli/src/command.rs index ab7d6ea65e..e4874a2cb1 100644 --- a/bin/node/cli/src/command.rs +++ b/bin/node/cli/src/command.rs @@ -22,128 +22,126 @@ use sc_cli::{CliConfiguration, ImportParams, Result, SharedParams, SubstrateCli} use sc_service::Configuration; impl SubstrateCli for Cli { - fn impl_name() -> &'static str { - "Substrate Node" - } - - fn impl_version() -> &'static str { - env!("SUBSTRATE_CLI_IMPL_VERSION") - } - - fn description() -> &'static str { - env!("CARGO_PKG_DESCRIPTION") - } - - fn author() -> &'static str { - env!("CARGO_PKG_AUTHORS") - } - - fn support_url() -> &'static str { - "https://github.com/paritytech/substrate/issues/new" - } - - fn copyright_start_year() -> i32 { - 2017 - } - - fn executable_name() -> &'static str { - "substrate" - } - - fn load_spec(&self, id: &str) -> std::result::Result, String> { - Ok(match id { - "dev" => Box::new(chain_spec::development_config()), - "local" => Box::new(chain_spec::local_testnet_config()), - "" | "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), - "staging" => Box::new(chain_spec::staging_testnet_config()), - path => Box::new(chain_spec::ChainSpec::from_json_file( - std::path::PathBuf::from(path), - )?), - }) - } + fn impl_name() -> &'static str { + "Substrate Node" + } + + fn impl_version() -> &'static str { + env!("SUBSTRATE_CLI_IMPL_VERSION") + } + + fn description() -> &'static str { + env!("CARGO_PKG_DESCRIPTION") + } + + fn author() -> &'static str { + env!("CARGO_PKG_AUTHORS") + } + + fn support_url() -> &'static str { + "https://github.com/paritytech/substrate/issues/new" + } + + fn copyright_start_year() -> i32 { + 2017 + } + + fn executable_name() -> &'static str { + "substrate" + } + + fn load_spec(&self, id: &str) -> std::result::Result, String> { + Ok(match id { + "dev" => Box::new(chain_spec::development_config()), + "local" => Box::new(chain_spec::local_testnet_config()), + "" | "fir" | "flaming-fir" => Box::new(chain_spec::flaming_fir_config()?), + "staging" => Box::new(chain_spec::staging_testnet_config()), + path => Box::new(chain_spec::ChainSpec::from_json_file( + std::path::PathBuf::from(path), + )?), + }) + } } /// Parse command line arguments into service configuration. pub fn run() -> Result<()> { - sc_cli::reset_signal_pipe_handler()?; - - let cli = Cli::from_args(); - - match &cli.subcommand { - None => { - let runner = cli.create_runner(&cli.run)?; - runner.run_node( - service::new_light, - service::new_full, - node_runtime::VERSION - ) - } - Some(Subcommand::Inspect(cmd)) => { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| cmd.run::(config)) - } - Some(Subcommand::Benchmark(cmd)) => { - if cfg!(feature = "runtime-benchmarks") { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| cmd.run::(config)) - } else { - println!("Benchmarking wasn't enabled when building the node. \ - You can enable it with `--features runtime-benchmarks`."); - Ok(()) - } - } - Some(Subcommand::Factory(cmd)) => { - let runner = cli.create_runner(cmd)?; - - runner.sync_run(|config| cmd.run(config)) - } - Some(Subcommand::Base(subcommand)) => { - let runner = cli.create_runner(subcommand)?; - - runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) - } - } + sc_cli::reset_signal_pipe_handler()?; + + let cli = Cli::from_args(); + + match &cli.subcommand { + None => { + let runner = cli.create_runner(&cli.run)?; + runner.run_node(service::new_light, service::new_full, node_runtime::VERSION) + } + Some(Subcommand::Inspect(cmd)) => { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| cmd.run::(config)) + } + Some(Subcommand::Benchmark(cmd)) => { + if cfg!(feature = "runtime-benchmarks") { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| cmd.run::(config)) + } else { + println!( + "Benchmarking wasn't enabled when building the node. \ + You can enable it with `--features runtime-benchmarks`." + ); + Ok(()) + } + } + Some(Subcommand::Factory(cmd)) => { + let runner = cli.create_runner(cmd)?; + + runner.sync_run(|config| cmd.run(config)) + } + Some(Subcommand::Base(subcommand)) => { + let runner = cli.create_runner(subcommand)?; + + runner.run_subcommand(subcommand, |config| Ok(new_full_start!(config).0)) + } + } } impl CliConfiguration for FactoryCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - fn import_params(&self) -> Option<&ImportParams> { - Some(&self.import_params) - } + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } } impl FactoryCmd { - fn run(&self, config: Configuration) -> Result<()> { - match config.chain_spec.id() { - "dev" | "local" => {} - _ => return Err("Factory is only supported for development and local testnet.".into()), - } - - // Setup tracing. - if let Some(tracing_targets) = self.import_params.tracing_targets.as_ref() { - let subscriber = sc_tracing::ProfilingSubscriber::new( - self.import_params.tracing_receiver.into(), - tracing_targets, - ); - if let Err(e) = tracing::subscriber::set_global_default(subscriber) { - return Err(format!("Unable to set global default subscriber {}", e).into()); - } - } - - let factory_state = FactoryState::new(self.blocks, self.transactions); - - let service_builder = new_full_start!(config).0; - node_transaction_factory::factory( - factory_state, - service_builder.client(), - service_builder - .select_chain() - .expect("The select_chain is always initialized by new_full_start!; qed"), - ) - } + fn run(&self, config: Configuration) -> Result<()> { + match config.chain_spec.id() { + "dev" | "local" => {} + _ => return Err("Factory is only supported for development and local testnet.".into()), + } + + // Setup tracing. + if let Some(tracing_targets) = self.import_params.tracing_targets.as_ref() { + let subscriber = sc_tracing::ProfilingSubscriber::new( + self.import_params.tracing_receiver.into(), + tracing_targets, + ); + if let Err(e) = tracing::subscriber::set_global_default(subscriber) { + return Err(format!("Unable to set global default subscriber {}", e).into()); + } + } + + let factory_state = FactoryState::new(self.blocks, self.transactions); + + let service_builder = new_full_start!(config).0; + node_transaction_factory::factory( + factory_state, + service_builder.client(), + service_builder + .select_chain() + .expect("The select_chain is always initialized by new_full_start!; qed"), + ) + } } diff --git a/bin/node/cli/src/factory_impl.rs b/bin/node/cli/src/factory_impl.rs index 1d1eabe29c..afba8df91c 100644 --- a/bin/node/cli/src/factory_impl.rs +++ b/bin/node/cli/src/factory_impl.rs @@ -18,187 +18,203 @@ //! using the cli to manufacture transactions and distribute them //! to accounts. -use rand::{Rng, SeedableRng}; use rand::rngs::StdRng; +use rand::{Rng, SeedableRng}; -use codec::{Encode, Decode}; -use sp_keyring::sr25519::Keyring; -use node_runtime::{ - Call, CheckedExtrinsic, UncheckedExtrinsic, SignedExtra, BalancesCall, ExistentialDeposit, - MinimumPeriod -}; +use codec::{Decode, Encode}; use node_primitives::Signature; -use sp_core::{sr25519, crypto::Pair}; -use sp_runtime::{ - generic::Era, traits::{Block as BlockT, Header as HeaderT, SignedExtension, Verify, IdentifyAccount} +use node_runtime::{ + BalancesCall, Call, CheckedExtrinsic, ExistentialDeposit, MinimumPeriod, SignedExtra, + UncheckedExtrinsic, }; use node_transaction_factory::RuntimeAdapter; +use sp_core::{crypto::Pair, sr25519}; +use sp_finality_tracker; use sp_inherents::InherentData; +use sp_keyring::sr25519::Keyring; +use sp_runtime::{ + generic::Era, + traits::{Block as BlockT, Header as HeaderT, IdentifyAccount, SignedExtension, Verify}, +}; use sp_timestamp; -use sp_finality_tracker; type AccountPublic = ::Signer; pub struct FactoryState { - blocks: u32, - transactions: u32, - block_number: N, - index: u32, + blocks: u32, + transactions: u32, + block_number: N, + index: u32, } type Number = <::Header as HeaderT>::Number; impl FactoryState { - fn build_extra(index: node_primitives::Index, phase: u64) -> node_runtime::SignedExtra { - ( - frame_system::CheckVersion::new(), - frame_system::CheckGenesis::new(), - frame_system::CheckEra::from(Era::mortal(256, phase)), - frame_system::CheckNonce::from(index), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(0), - Default::default(), - ) - } + fn build_extra(index: node_primitives::Index, phase: u64) -> node_runtime::SignedExtra { + ( + frame_system::CheckVersion::new(), + frame_system::CheckGenesis::new(), + frame_system::CheckEra::from(Era::mortal(256, phase)), + frame_system::CheckNonce::from(index), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(0), + Default::default(), + ) + } } impl RuntimeAdapter for FactoryState { - type AccountId = node_primitives::AccountId; - type Balance = node_primitives::Balance; - type Block = node_primitives::Block; - type Phase = sp_runtime::generic::Phase; - type Secret = sr25519::Pair; - type Index = node_primitives::Index; - - type Number = Number; - - fn new( - blocks: u32, - transactions: u32, - ) -> FactoryState { - FactoryState { - blocks, - transactions, - block_number: 0, - index: 0, - } - } - - fn block_number(&self) -> u32 { - self.block_number - } - - fn blocks(&self) -> u32 { - self.blocks - } - - fn transactions(&self) -> u32 { - self.transactions - } - - fn set_block_number(&mut self, value: u32) { - self.block_number = value; - } - - fn transfer_extrinsic( - &mut self, - sender: &Self::AccountId, - key: &Self::Secret, - destination: &Self::AccountId, - amount: &Self::Balance, - version: u32, - genesis_hash: &::Hash, - prior_block_hash: &::Hash, - ) -> ::Extrinsic { - let phase = self.block_number() as Self::Phase; - let extra = Self::build_extra(self.index, phase); - self.index += 1; - - sign::(CheckedExtrinsic { - signed: Some((sender.clone(), extra)), - function: Call::Balances( - BalancesCall::transfer( - pallet_indices::address::Address::Id(destination.clone().into()), - (*amount).into() - ) - ) - }, key, (version, genesis_hash.clone(), prior_block_hash.clone(), (), (), (), ())) - } - - fn inherent_extrinsics(&self) -> InherentData { - let timestamp = (self.block_number as u64 + 1) * MinimumPeriod::get(); - - let mut inherent = InherentData::new(); - inherent.put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) - .expect("Failed putting timestamp inherent"); - inherent.put_data(sp_finality_tracker::INHERENT_IDENTIFIER, &self.block_number) - .expect("Failed putting finalized number inherent"); - inherent - } - - fn minimum_balance() -> Self::Balance { - ExistentialDeposit::get() - } - - fn master_account_id() -> Self::AccountId { - Keyring::Alice.to_account_id() - } - - fn master_account_secret() -> Self::Secret { - Keyring::Alice.pair() - } - - /// Generates a random `AccountId` from `seed`. - fn gen_random_account_id(seed: u32) -> Self::AccountId { - let pair: sr25519::Pair = sr25519::Pair::from_seed(&gen_seed_bytes(seed)); - AccountPublic::from(pair.public()).into_account() - } - - /// Generates a random `Secret` from `seed`. - fn gen_random_account_secret(seed: u32) -> Self::Secret { - let pair: sr25519::Pair = sr25519::Pair::from_seed(&gen_seed_bytes(seed)); - pair - } + type AccountId = node_primitives::AccountId; + type Balance = node_primitives::Balance; + type Block = node_primitives::Block; + type Phase = sp_runtime::generic::Phase; + type Secret = sr25519::Pair; + type Index = node_primitives::Index; + + type Number = Number; + + fn new(blocks: u32, transactions: u32) -> FactoryState { + FactoryState { + blocks, + transactions, + block_number: 0, + index: 0, + } + } + + fn block_number(&self) -> u32 { + self.block_number + } + + fn blocks(&self) -> u32 { + self.blocks + } + + fn transactions(&self) -> u32 { + self.transactions + } + + fn set_block_number(&mut self, value: u32) { + self.block_number = value; + } + + fn transfer_extrinsic( + &mut self, + sender: &Self::AccountId, + key: &Self::Secret, + destination: &Self::AccountId, + amount: &Self::Balance, + version: u32, + genesis_hash: &::Hash, + prior_block_hash: &::Hash, + ) -> ::Extrinsic { + let phase = self.block_number() as Self::Phase; + let extra = Self::build_extra(self.index, phase); + self.index += 1; + + sign::( + CheckedExtrinsic { + signed: Some((sender.clone(), extra)), + function: Call::Balances(BalancesCall::transfer( + pallet_indices::address::Address::Id(destination.clone().into()), + (*amount).into(), + )), + }, + key, + ( + version, + genesis_hash.clone(), + prior_block_hash.clone(), + (), + (), + (), + (), + ), + ) + } + + fn inherent_extrinsics(&self) -> InherentData { + let timestamp = (self.block_number as u64 + 1) * MinimumPeriod::get(); + + let mut inherent = InherentData::new(); + inherent + .put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) + .expect("Failed putting timestamp inherent"); + inherent + .put_data(sp_finality_tracker::INHERENT_IDENTIFIER, &self.block_number) + .expect("Failed putting finalized number inherent"); + inherent + } + + fn minimum_balance() -> Self::Balance { + ExistentialDeposit::get() + } + + fn master_account_id() -> Self::AccountId { + Keyring::Alice.to_account_id() + } + + fn master_account_secret() -> Self::Secret { + Keyring::Alice.pair() + } + + /// Generates a random `AccountId` from `seed`. + fn gen_random_account_id(seed: u32) -> Self::AccountId { + let pair: sr25519::Pair = sr25519::Pair::from_seed(&gen_seed_bytes(seed)); + AccountPublic::from(pair.public()).into_account() + } + + /// Generates a random `Secret` from `seed`. + fn gen_random_account_secret(seed: u32) -> Self::Secret { + let pair: sr25519::Pair = sr25519::Pair::from_seed(&gen_seed_bytes(seed)); + pair + } } fn gen_seed_bytes(seed: u32) -> [u8; 32] { - let mut rng: StdRng = SeedableRng::seed_from_u64(seed as u64); + let mut rng: StdRng = SeedableRng::seed_from_u64(seed as u64); - let mut seed_bytes = [0u8; 32]; - for i in 0..32 { - seed_bytes[i] = rng.gen::(); - } - seed_bytes + let mut seed_bytes = [0u8; 32]; + for i in 0..32 { + seed_bytes[i] = rng.gen::(); + } + seed_bytes } /// Creates an `UncheckedExtrinsic` containing the appropriate signature for /// a `CheckedExtrinsics`. fn sign( - xt: CheckedExtrinsic, - key: &sr25519::Pair, - additional_signed: ::AdditionalSigned, + xt: CheckedExtrinsic, + key: &sr25519::Pair, + additional_signed: ::AdditionalSigned, ) -> ::Extrinsic { - let s = match xt.signed { - Some((signed, extra)) => { - let payload = (xt.function, extra.clone(), additional_signed); - let signature = payload.using_encoded(|b| { - if b.len() > 256 { - key.sign(&sp_io::hashing::blake2_256(b)) - } else { - key.sign(b) - } - }).into(); - UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), - function: payload.0, - } - } - None => UncheckedExtrinsic { - signature: None, - function: xt.function, - }, - }; - - let e = Encode::encode(&s); - Decode::decode(&mut &e[..]).expect("Failed to decode signed unchecked extrinsic") + let s = match xt.signed { + Some((signed, extra)) => { + let payload = (xt.function, extra.clone(), additional_signed); + let signature = payload + .using_encoded(|b| { + if b.len() > 256 { + key.sign(&sp_io::hashing::blake2_256(b)) + } else { + key.sign(b) + } + }) + .into(); + UncheckedExtrinsic { + signature: Some(( + pallet_indices::address::Address::Id(signed), + signature, + extra, + )), + function: payload.0, + } + } + None => UncheckedExtrinsic { + signature: None, + function: xt.function, + }, + }; + + let e = Encode::encode(&s); + Decode::decode(&mut &e[..]).expect("Failed to decode signed unchecked extrinsic") } diff --git a/bin/node/cli/src/lib.rs b/bin/node/cli/src/lib.rs index 1e2c790bfa..a4a01d9299 100644 --- a/bin/node/cli/src/lib.rs +++ b/bin/node/cli/src/lib.rs @@ -37,9 +37,9 @@ mod browser; #[cfg(feature = "cli")] mod cli; #[cfg(feature = "cli")] -mod factory_impl; -#[cfg(feature = "cli")] mod command; +#[cfg(feature = "cli")] +mod factory_impl; #[cfg(feature = "browser")] pub use browser::*; diff --git a/bin/node/cli/src/service.rs b/bin/node/cli/src/service.rs index 0acd553ea0..98d81265d7 100644 --- a/bin/node/cli/src/service.rs +++ b/bin/node/cli/src/service.rs @@ -20,93 +20,102 @@ use std::sync::Arc; -use sc_consensus_babe; -use sc_client::{self, LongestChain}; -use grandpa::{self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider}; +use grandpa::{ + self, FinalityProofProvider as GrandpaFinalityProofProvider, StorageAndProofProvider, +}; use node_executor; use node_primitives::Block; use node_runtime::RuntimeApi; +use sc_client::{self, LongestChain}; +use sc_consensus_babe; use sc_service::{ - AbstractService, ServiceBuilder, config::Configuration, error::{Error as ServiceError}, + config::Configuration, error::Error as ServiceError, AbstractService, ServiceBuilder, }; use sp_inherents::InherentDataProviders; -use sc_service::{Service, NetworkStatus}; +use node_executor::NativeExecutor; use sc_client::{Client, LocalCallExecutor}; use sc_client_db::Backend; -use sp_runtime::traits::Block as BlockT; -use node_executor::NativeExecutor; use sc_network::NetworkService; use sc_offchain::OffchainWorkers; +use sc_service::{NetworkStatus, Service}; +use sp_runtime::traits::Block as BlockT; /// Starts a `ServiceBuilder` for a full service. /// /// Use this macro if you don't actually need the full service, but just the builder in order to /// be able to perform chain operations. macro_rules! new_full_start { - ($config:expr) => {{ - use std::sync::Arc; - type RpcExtension = jsonrpc_core::IoHandler; - let mut import_setup = None; - let inherent_data_providers = sp_inherents::InherentDataProviders::new(); - - let builder = sc_service::ServiceBuilder::new_full::< - node_primitives::Block, node_runtime::RuntimeApi, node_executor::Executor - >($config)? - .with_select_chain(|_config, backend| { - Ok(sc_client::LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|config, client, _fetcher, prometheus_registry| { - let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); - Ok(sc_transaction_pool::BasicPool::new(config, std::sync::Arc::new(pool_api), prometheus_registry)) - })? - .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { - let select_chain = select_chain.take() - .ok_or_else(|| sc_service::Error::SelectChainRequired)?; - let (grandpa_block_import, grandpa_link) = grandpa::block_import( - client.clone(), - &(client.clone() as Arc<_>), - select_chain, - )?; - let justification_import = grandpa_block_import.clone(); - - let (block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - let import_queue = sc_consensus_babe::import_queue( - babe_link.clone(), - block_import.clone(), - Some(Box::new(justification_import)), - None, - client, - inherent_data_providers.clone(), - )?; - - import_setup = Some((block_import, grandpa_link, babe_link)); - Ok(import_queue) - })? - .with_rpc_extensions(|builder| -> std::result::Result { - let babe_link = import_setup.as_ref().map(|s| &s.2) - .expect("BabeLink is present for full services or set up failed; qed."); - let deps = node_rpc::FullDeps { - client: builder.client().clone(), - pool: builder.pool(), - select_chain: builder.select_chain().cloned() - .expect("SelectChain is present for full services or set up failed; qed."), - babe: node_rpc::BabeDeps { - keystore: builder.keystore(), - babe_config: sc_consensus_babe::BabeLink::config(babe_link).clone(), - shared_epoch_changes: sc_consensus_babe::BabeLink::epoch_changes(babe_link).clone() - } - }; - Ok(node_rpc::create_full(deps)) - })?; - - (builder, import_setup, inherent_data_providers) - }} + ($config:expr) => {{ + use std::sync::Arc; + type RpcExtension = jsonrpc_core::IoHandler; + let mut import_setup = None; + let inherent_data_providers = sp_inherents::InherentDataProviders::new(); + + let builder = sc_service::ServiceBuilder::new_full::< + node_primitives::Block, + node_runtime::RuntimeApi, + node_executor::Executor, + >($config)? + .with_select_chain(|_config, backend| Ok(sc_client::LongestChain::new(backend.clone())))? + .with_transaction_pool(|config, client, _fetcher, prometheus_registry| { + let pool_api = sc_transaction_pool::FullChainApi::new(client.clone()); + Ok(sc_transaction_pool::BasicPool::new( + config, + std::sync::Arc::new(pool_api), + prometheus_registry, + )) + })? + .with_import_queue(|_config, client, mut select_chain, _transaction_pool| { + let select_chain = select_chain + .take() + .ok_or_else(|| sc_service::Error::SelectChainRequired)?; + let (grandpa_block_import, grandpa_link) = + grandpa::block_import(client.clone(), &(client.clone() as Arc<_>), select_chain)?; + let justification_import = grandpa_block_import.clone(); + + let (block_import, babe_link) = sc_consensus_babe::block_import( + sc_consensus_babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let import_queue = sc_consensus_babe::import_queue( + babe_link.clone(), + block_import.clone(), + Some(Box::new(justification_import)), + None, + client, + inherent_data_providers.clone(), + )?; + + import_setup = Some((block_import, grandpa_link, babe_link)); + Ok(import_queue) + })? + .with_rpc_extensions(|builder| -> std::result::Result { + let babe_link = import_setup + .as_ref() + .map(|s| &s.2) + .expect("BabeLink is present for full services or set up failed; qed."); + let deps = node_rpc::FullDeps { + client: builder.client().clone(), + pool: builder.pool(), + select_chain: builder + .select_chain() + .cloned() + .expect("SelectChain is present for full services or set up failed; qed."), + babe: node_rpc::BabeDeps { + keystore: builder.keystore(), + babe_config: sc_consensus_babe::BabeLink::config(babe_link).clone(), + shared_epoch_changes: sc_consensus_babe::BabeLink::epoch_changes(babe_link) + .clone(), + }, + }; + Ok(node_rpc::create_full(deps)) + })?; + + (builder, import_setup, inherent_data_providers) + }}; } /// Creates a full service from the configuration. @@ -267,408 +276,426 @@ macro_rules! new_full { } type ConcreteBlock = node_primitives::Block; -type ConcreteClient = - Client< - Backend, - LocalCallExecutor, NativeExecutor>, - ConcreteBlock, - node_runtime::RuntimeApi - >; +type ConcreteClient = Client< + Backend, + LocalCallExecutor, NativeExecutor>, + ConcreteBlock, + node_runtime::RuntimeApi, +>; type ConcreteBackend = Backend; type ConcreteTransactionPool = sc_transaction_pool::BasicPool< - sc_transaction_pool::FullChainApi, - ConcreteBlock + sc_transaction_pool::FullChainApi, + ConcreteBlock, >; /// Builds a new service for a full client. -pub fn new_full(config: Configuration) --> Result< - Service< - ConcreteBlock, - ConcreteClient, - LongestChain, - NetworkStatus, - NetworkService::Hash>, - ConcreteTransactionPool, - OffchainWorkers< - ConcreteClient, - >::OffchainStorage, - ConcreteBlock, - > - >, - ServiceError, -> -{ - new_full!(config).map(|(service, _)| service) +pub fn new_full( + config: Configuration, +) -> Result< + Service< + ConcreteBlock, + ConcreteClient, + LongestChain, + NetworkStatus, + NetworkService::Hash>, + ConcreteTransactionPool, + OffchainWorkers< + ConcreteClient, + >::OffchainStorage, + ConcreteBlock, + >, + >, + ServiceError, +> { + new_full!(config).map(|(service, _)| service) } /// Builds a new service for a light client. -pub fn new_light(config: Configuration) --> Result { - type RpcExtension = jsonrpc_core::IoHandler; - let inherent_data_providers = InherentDataProviders::new(); - - let service = ServiceBuilder::new_light::(config)? - .with_select_chain(|_config, backend| { - Ok(LongestChain::new(backend.clone())) - })? - .with_transaction_pool(|config, client, fetcher, prometheus_registry| { - let fetcher = fetcher - .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; - let pool_api = sc_transaction_pool::LightChainApi::new(client.clone(), fetcher.clone()); - let pool = sc_transaction_pool::BasicPool::with_revalidation_type( - config, Arc::new(pool_api), prometheus_registry, sc_transaction_pool::RevalidationType::Light, - ); - Ok(pool) - })? - .with_import_queue_and_fprb(|_config, client, backend, fetcher, _select_chain, _tx_pool| { - let fetch_checker = fetcher - .map(|fetcher| fetcher.checker().clone()) - .ok_or_else(|| "Trying to start light import queue without active fetch checker")?; - let grandpa_block_import = grandpa::light_block_import( - client.clone(), - backend, - &(client.clone() as Arc<_>), - Arc::new(fetch_checker), - )?; - - let finality_proof_import = grandpa_block_import.clone(); - let finality_proof_request_builder = - finality_proof_import.create_finality_proof_request_builder(); - - let (babe_block_import, babe_link) = sc_consensus_babe::block_import( - sc_consensus_babe::Config::get_or_compute(&*client)?, - grandpa_block_import, - client.clone(), - )?; - - let import_queue = sc_consensus_babe::import_queue( - babe_link, - babe_block_import, - None, - Some(Box::new(finality_proof_import)), - client.clone(), - inherent_data_providers.clone(), - )?; - - Ok((import_queue, finality_proof_request_builder)) - })? - .with_finality_proof_provider(|client, backend| { - // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider - let provider = client as Arc>; - Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) - })? - .with_rpc_extensions(|builder,| -> - Result - { - let fetcher = builder.fetcher() - .ok_or_else(|| "Trying to start node RPC without active fetcher")?; - let remote_blockchain = builder.remote_backend() - .ok_or_else(|| "Trying to start node RPC without active remote blockchain")?; - - let light_deps = node_rpc::LightDeps { - remote_blockchain, - fetcher, - client: builder.client().clone(), - pool: builder.pool(), - }; - Ok(node_rpc::create_light(light_deps)) - })? - .build()?; - - Ok(service) +pub fn new_light(config: Configuration) -> Result { + type RpcExtension = jsonrpc_core::IoHandler; + let inherent_data_providers = InherentDataProviders::new(); + + let service = ServiceBuilder::new_light::(config)? + .with_select_chain(|_config, backend| Ok(LongestChain::new(backend.clone())))? + .with_transaction_pool(|config, client, fetcher, prometheus_registry| { + let fetcher = fetcher + .ok_or_else(|| "Trying to start light transaction pool without active fetcher")?; + let pool_api = sc_transaction_pool::LightChainApi::new(client.clone(), fetcher.clone()); + let pool = sc_transaction_pool::BasicPool::with_revalidation_type( + config, + Arc::new(pool_api), + prometheus_registry, + sc_transaction_pool::RevalidationType::Light, + ); + Ok(pool) + })? + .with_import_queue_and_fprb( + |_config, client, backend, fetcher, _select_chain, _tx_pool| { + let fetch_checker = fetcher + .map(|fetcher| fetcher.checker().clone()) + .ok_or_else(|| { + "Trying to start light import queue without active fetch checker" + })?; + let grandpa_block_import = grandpa::light_block_import( + client.clone(), + backend, + &(client.clone() as Arc<_>), + Arc::new(fetch_checker), + )?; + + let finality_proof_import = grandpa_block_import.clone(); + let finality_proof_request_builder = + finality_proof_import.create_finality_proof_request_builder(); + + let (babe_block_import, babe_link) = sc_consensus_babe::block_import( + sc_consensus_babe::Config::get_or_compute(&*client)?, + grandpa_block_import, + client.clone(), + )?; + + let import_queue = sc_consensus_babe::import_queue( + babe_link, + babe_block_import, + None, + Some(Box::new(finality_proof_import)), + client.clone(), + inherent_data_providers.clone(), + )?; + + Ok((import_queue, finality_proof_request_builder)) + }, + )? + .with_finality_proof_provider(|client, backend| { + // GenesisAuthoritySetProvider is implemented for StorageAndProofProvider + let provider = client as Arc>; + Ok(Arc::new(GrandpaFinalityProofProvider::new(backend, provider)) as _) + })? + .with_rpc_extensions(|builder| -> Result { + let fetcher = builder + .fetcher() + .ok_or_else(|| "Trying to start node RPC without active fetcher")?; + let remote_blockchain = builder + .remote_backend() + .ok_or_else(|| "Trying to start node RPC without active remote blockchain")?; + + let light_deps = node_rpc::LightDeps { + remote_blockchain, + fetcher, + client: builder.client().clone(), + pool: builder.pool(), + }; + Ok(node_rpc::create_light(light_deps)) + })? + .build()?; + + Ok(service) } #[cfg(test)] mod tests { - use std::{sync::Arc, borrow::Cow, any::Any}; - use sc_consensus_babe::{ - CompatibleDigestItem, BabeIntermediate, INTERMEDIATE_KEY - }; - use sc_consensus_epochs::descendent_query; - use sp_consensus::{ - Environment, Proposer, BlockImportParams, BlockOrigin, ForkChoiceStrategy, BlockImport, - RecordProof, - }; - use node_primitives::{Block, DigestItem, Signature}; - use node_runtime::{BalancesCall, Call, UncheckedExtrinsic, Address}; - use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; - use codec::{Encode, Decode}; - use sp_core::{crypto::Pair as CryptoPair, H256}; - use sp_runtime::{ - generic::{BlockId, Era, Digest, SignedPayload}, - traits::{Block as BlockT, Header as HeaderT}, - traits::Verify, - OpaqueExtrinsic, - }; - use sp_timestamp; - use sp_finality_tracker; - use sp_keyring::AccountKeyring; - use sc_service::AbstractService; - use crate::service::{new_full, new_light}; - use sp_runtime::traits::IdentifyAccount; - use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; - - type AccountPublic = ::Signer; - - #[cfg(feature = "rhd")] - fn test_sync() { - use sp_core::ed25519::Pair; - - use {service_test, Factory}; - use sc_client::{BlockImportParams, BlockOrigin}; - - let alice: Arc = Arc::new(Keyring::Alice.into()); - let bob: Arc = Arc::new(Keyring::Bob.into()); - let validators = vec![alice.public().0.into(), bob.public().0.into()]; - let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob]; - let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap(); - let block_factory = |service: &::FullService| { - let block_id = BlockId::number(service.client().chain_info().best_number); - let parent_header = service.client().best_header(&block_id) - .expect("db error") - .expect("best block should exist"); - - futures::executor::block_on( - service.transaction_pool().maintain( - ChainEvent::NewBlock { - is_new_best: true, - id: block_id.clone(), - retracted: vec![], - header: parent_header, - }, - ) - ); - - let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone()); - let proposer_factory = consensus::ProposerFactory { - client: service.client().clone(), - transaction_pool: service.transaction_pool().clone(), - network: consensus_net, - force_delay: 0, - handle: dummy_runtime.executor(), - }; - let (proposer, _, _) = proposer_factory.init(&parent_header, &validators, alice.clone()).unwrap(); - let block = proposer.propose().expect("Error making test block"); - BlockImportParams { - origin: BlockOrigin::File, - justification: Vec::new(), - internal_justification: Vec::new(), - finalized: false, - body: Some(block.extrinsics), - storage_changes: None, - header: block.header, - auxiliary: Vec::new(), - } - }; - let extrinsic_factory = - |service: &SyncService<::FullService>| - { - let payload = ( - 0, - Call::Balances(BalancesCall::transfer(RawAddress::Id(bob.public().0.into()), 69.into())), - Era::immortal(), - service.client().genesis_hash() - ); - let signature = alice.sign(&payload.encode()).into(); - let id = alice.public().0.into(); - let xt = UncheckedExtrinsic { - signature: Some((RawAddress::Id(id), signature, payload.0, Era::immortal())), - function: payload.1, - }.encode(); - let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); - OpaqueExtrinsic(v) - }; - sc_service_test::sync( - sc_chain_spec::integration_test_config(), - |config| new_full(config), - |mut config| new_light(config), - block_factory, - extrinsic_factory, - ); - } - - #[test] - // It is "ignored", but the node-cli ignored tests are running on the CI. - // This can be run locally with `cargo test --release -p node-cli test_sync -- --ignored`. - #[ignore] - fn test_sync() { - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = sc_keystore::Store::open(keystore_path.path(), None) - .expect("Creates keystore"); - let alice = keystore.write().insert_ephemeral_from_seed::("//Alice") - .expect("Creates authority pair"); - - let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); - - // For the block factory - let mut slot_num = 1u64; - - // For the extrinsics factory - let bob = Arc::new(AccountKeyring::Bob.pair()); - let charlie = Arc::new(AccountKeyring::Charlie.pair()); - let mut index = 0; - - sc_service_test::sync( - chain_spec, - |config| { - let mut setup_handles = None; - new_full!(config, | - block_import: &sc_consensus_babe::BabeBlockImport, - babe_link: &sc_consensus_babe::BabeLink, - | { - setup_handles = Some((block_import.clone(), babe_link.clone())); - }).map(move |(node, x)| (node, (x, setup_handles.unwrap()))) - }, - |config| new_light(config), - |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { - let mut inherent_data = inherent_data_providers - .create_inherent_data() - .expect("Creates inherent data."); - inherent_data.replace_data(sp_finality_tracker::INHERENT_IDENTIFIER, &1u64); - - let parent_id = BlockId::number(service.client().chain_info().best_number); - let parent_header = service.client().header(&parent_id).unwrap().unwrap(); - let parent_hash = parent_header.hash(); - let parent_number = *parent_header.number(); - - futures::executor::block_on( - service.transaction_pool().maintain( - ChainEvent::NewBlock { - is_new_best: true, - id: parent_id.clone(), - retracted: vec![], - header: parent_header.clone(), - }, - ) - ); - - let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( - service.client(), - service.transaction_pool() - ); - - let epoch_descriptor = babe_link.epoch_changes().lock().epoch_descriptor_for_child_of( - descendent_query(&*service.client()), - &parent_hash, - parent_number, - slot_num, - ).unwrap().unwrap(); - - let mut digest = Digest::::default(); - - // even though there's only one authority some slots might be empty, - // so we must keep trying the next slots until we can claim one. - let babe_pre_digest = loop { - inherent_data.replace_data(sp_timestamp::INHERENT_IDENTIFIER, &(slot_num * SLOT_DURATION)); - if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( - slot_num, - &parent_header, - &*service.client(), - &keystore, - &babe_link, - ) { - break babe_pre_digest; - } - - slot_num += 1; - }; - - digest.push(::babe_pre_digest(babe_pre_digest)); - - let new_block = futures::executor::block_on(async move { - let proposer = proposer_factory.init(&parent_header).await; - proposer.unwrap().propose( - inherent_data, - digest, - std::time::Duration::from_secs(1), - RecordProof::Yes, - ).await - }).expect("Error making test block").block; - - let (new_header, new_body) = new_block.deconstruct(); - let pre_hash = new_header.hash(); - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let to_sign = pre_hash.encode(); - let signature = alice.sign(&to_sign[..]); - let item = ::babe_seal( - signature.into(), - ); - slot_num += 1; - - let mut params = BlockImportParams::new(BlockOrigin::File, new_header); - params.post_digests.push(item); - params.body = Some(new_body); - params.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, - ); - params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - block_import.import_block(params, Default::default()) - .expect("error importing test block"); - }, - |service, _| { - let amount = 5 * CENTS; - let to: Address = AccountPublic::from(bob.public()).into_account().into(); - let from: Address = AccountPublic::from(charlie.public()).into_account().into(); - let genesis_hash = service.client().block_hash(0).unwrap().unwrap(); - let best_block_id = BlockId::number(service.client().chain_info().best_number); - let version = service.client().runtime_version_at(&best_block_id).unwrap().spec_version; - let signer = charlie.clone(); - - let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); - - let check_version = frame_system::CheckVersion::new(); - let check_genesis = frame_system::CheckGenesis::new(); - let check_era = frame_system::CheckEra::from(Era::Immortal); - let check_nonce = frame_system::CheckNonce::from(index); - let check_weight = frame_system::CheckWeight::new(); - let payment = pallet_transaction_payment::ChargeTransactionPayment::from(0); - let extra = ( - check_version, - check_genesis, - check_era, - check_nonce, - check_weight, - payment, - Default::default(), - ); - let raw_payload = SignedPayload::from_raw( - function, - extra, - (version, genesis_hash, genesis_hash, (), (), (), ()) - ); - let signature = raw_payload.using_encoded(|payload| { - signer.sign(payload) - }); - let (function, extra, _) = raw_payload.deconstruct(); - let xt = UncheckedExtrinsic::new_signed( - function, - from.into(), - signature.into(), - extra, - ).encode(); - let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); - - index += 1; - OpaqueExtrinsic(v) - }, - ); - } - - #[test] - #[ignore] - fn test_consensus() { - sc_service_test::consensus( - crate::chain_spec::tests::integration_test_config_with_two_authorities(), - |config| new_full(config), - |config| new_light(config), - vec![ - "//Alice".into(), - "//Bob".into(), - ], - ) - } + use crate::service::{new_full, new_light}; + use codec::{Decode, Encode}; + use node_primitives::{Block, DigestItem, Signature}; + use node_runtime::constants::{currency::CENTS, time::SLOT_DURATION}; + use node_runtime::{Address, BalancesCall, Call, UncheckedExtrinsic}; + use sc_consensus_babe::{BabeIntermediate, CompatibleDigestItem, INTERMEDIATE_KEY}; + use sc_consensus_epochs::descendent_query; + use sc_service::AbstractService; + use sp_consensus::{ + BlockImport, BlockImportParams, BlockOrigin, Environment, ForkChoiceStrategy, Proposer, + RecordProof, + }; + use sp_core::{crypto::Pair as CryptoPair, H256}; + use sp_finality_tracker; + use sp_keyring::AccountKeyring; + use sp_runtime::traits::IdentifyAccount; + use sp_runtime::{ + generic::{BlockId, Digest, Era, SignedPayload}, + traits::Verify, + traits::{Block as BlockT, Header as HeaderT}, + OpaqueExtrinsic, + }; + use sp_timestamp; + use sp_transaction_pool::{ChainEvent, MaintainedTransactionPool}; + use std::{any::Any, borrow::Cow, sync::Arc}; + + type AccountPublic = ::Signer; + + #[cfg(feature = "rhd")] + fn test_sync() { + use sp_core::ed25519::Pair; + + use sc_client::{BlockImportParams, BlockOrigin}; + use {service_test, Factory}; + + let alice: Arc = Arc::new(Keyring::Alice.into()); + let bob: Arc = Arc::new(Keyring::Bob.into()); + let validators = vec![alice.public().0.into(), bob.public().0.into()]; + let keys: Vec<&ed25519::Pair> = vec![&*alice, &*bob]; + let dummy_runtime = ::tokio::runtime::Runtime::new().unwrap(); + let block_factory = |service: &::FullService| { + let block_id = BlockId::number(service.client().chain_info().best_number); + let parent_header = service + .client() + .best_header(&block_id) + .expect("db error") + .expect("best block should exist"); + + futures::executor::block_on(service.transaction_pool().maintain( + ChainEvent::NewBlock { + is_new_best: true, + id: block_id.clone(), + retracted: vec![], + header: parent_header, + }, + )); + + let consensus_net = ConsensusNetwork::new(service.network(), service.client().clone()); + let proposer_factory = consensus::ProposerFactory { + client: service.client().clone(), + transaction_pool: service.transaction_pool().clone(), + network: consensus_net, + force_delay: 0, + handle: dummy_runtime.executor(), + }; + let (proposer, _, _) = proposer_factory + .init(&parent_header, &validators, alice.clone()) + .unwrap(); + let block = proposer.propose().expect("Error making test block"); + BlockImportParams { + origin: BlockOrigin::File, + justification: Vec::new(), + internal_justification: Vec::new(), + finalized: false, + body: Some(block.extrinsics), + storage_changes: None, + header: block.header, + auxiliary: Vec::new(), + } + }; + let extrinsic_factory = + |service: &SyncService<::FullService>| { + let payload = ( + 0, + Call::Balances(BalancesCall::transfer( + RawAddress::Id(bob.public().0.into()), + 69.into(), + )), + Era::immortal(), + service.client().genesis_hash(), + ); + let signature = alice.sign(&payload.encode()).into(); + let id = alice.public().0.into(); + let xt = UncheckedExtrinsic { + signature: Some((RawAddress::Id(id), signature, payload.0, Era::immortal())), + function: payload.1, + } + .encode(); + let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); + OpaqueExtrinsic(v) + }; + sc_service_test::sync( + sc_chain_spec::integration_test_config(), + |config| new_full(config), + |mut config| new_light(config), + block_factory, + extrinsic_factory, + ); + } + + #[test] + // It is "ignored", but the node-cli ignored tests are running on the CI. + // This can be run locally with `cargo test --release -p node-cli test_sync -- --ignored`. + #[ignore] + fn test_sync() { + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = + sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + let alice = keystore + .write() + .insert_ephemeral_from_seed::("//Alice") + .expect("Creates authority pair"); + + let chain_spec = crate::chain_spec::tests::integration_test_config_with_single_authority(); + + // For the block factory + let mut slot_num = 1u64; + + // For the extrinsics factory + let bob = Arc::new(AccountKeyring::Bob.pair()); + let charlie = Arc::new(AccountKeyring::Charlie.pair()); + let mut index = 0; + + sc_service_test::sync( + chain_spec, + |config| { + let mut setup_handles = None; + new_full!( + config, + |block_import: &sc_consensus_babe::BabeBlockImport, + babe_link: &sc_consensus_babe::BabeLink| { + setup_handles = Some((block_import.clone(), babe_link.clone())); + } + ) + .map(move |(node, x)| (node, (x, setup_handles.unwrap()))) + }, + |config| new_light(config), + |service, &mut (ref inherent_data_providers, (ref mut block_import, ref babe_link))| { + let mut inherent_data = inherent_data_providers + .create_inherent_data() + .expect("Creates inherent data."); + inherent_data.replace_data(sp_finality_tracker::INHERENT_IDENTIFIER, &1u64); + + let parent_id = BlockId::number(service.client().chain_info().best_number); + let parent_header = service.client().header(&parent_id).unwrap().unwrap(); + let parent_hash = parent_header.hash(); + let parent_number = *parent_header.number(); + + futures::executor::block_on(service.transaction_pool().maintain( + ChainEvent::NewBlock { + is_new_best: true, + id: parent_id.clone(), + retracted: vec![], + header: parent_header.clone(), + }, + )); + + let mut proposer_factory = sc_basic_authorship::ProposerFactory::new( + service.client(), + service.transaction_pool(), + ); + + let epoch_descriptor = babe_link + .epoch_changes() + .lock() + .epoch_descriptor_for_child_of( + descendent_query(&*service.client()), + &parent_hash, + parent_number, + slot_num, + ) + .unwrap() + .unwrap(); + + let mut digest = Digest::::default(); + + // even though there's only one authority some slots might be empty, + // so we must keep trying the next slots until we can claim one. + let babe_pre_digest = loop { + inherent_data.replace_data( + sp_timestamp::INHERENT_IDENTIFIER, + &(slot_num * SLOT_DURATION), + ); + if let Some(babe_pre_digest) = sc_consensus_babe::test_helpers::claim_slot( + slot_num, + &parent_header, + &*service.client(), + &keystore, + &babe_link, + ) { + break babe_pre_digest; + } + + slot_num += 1; + }; + + digest.push(::babe_pre_digest( + babe_pre_digest, + )); + + let new_block = futures::executor::block_on(async move { + let proposer = proposer_factory.init(&parent_header).await; + proposer + .unwrap() + .propose( + inherent_data, + digest, + std::time::Duration::from_secs(1), + RecordProof::Yes, + ) + .await + }) + .expect("Error making test block") + .block; + + let (new_header, new_body) = new_block.deconstruct(); + let pre_hash = new_header.hash(); + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let to_sign = pre_hash.encode(); + let signature = alice.sign(&to_sign[..]); + let item = ::babe_seal(signature.into()); + slot_num += 1; + + let mut params = BlockImportParams::new(BlockOrigin::File, new_header); + params.post_digests.push(item); + params.body = Some(new_body); + params.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + block_import + .import_block(params, Default::default()) + .expect("error importing test block"); + }, + |service, _| { + let amount = 5 * CENTS; + let to: Address = AccountPublic::from(bob.public()).into_account().into(); + let from: Address = AccountPublic::from(charlie.public()).into_account().into(); + let genesis_hash = service.client().block_hash(0).unwrap().unwrap(); + let best_block_id = BlockId::number(service.client().chain_info().best_number); + let version = service + .client() + .runtime_version_at(&best_block_id) + .unwrap() + .spec_version; + let signer = charlie.clone(); + + let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); + + let check_version = frame_system::CheckVersion::new(); + let check_genesis = frame_system::CheckGenesis::new(); + let check_era = frame_system::CheckEra::from(Era::Immortal); + let check_nonce = frame_system::CheckNonce::from(index); + let check_weight = frame_system::CheckWeight::new(); + let payment = pallet_transaction_payment::ChargeTransactionPayment::from(0); + let extra = ( + check_version, + check_genesis, + check_era, + check_nonce, + check_weight, + payment, + Default::default(), + ); + let raw_payload = SignedPayload::from_raw( + function, + extra, + (version, genesis_hash, genesis_hash, (), (), (), ()), + ); + let signature = raw_payload.using_encoded(|payload| signer.sign(payload)); + let (function, extra, _) = raw_payload.deconstruct(); + let xt = + UncheckedExtrinsic::new_signed(function, from.into(), signature.into(), extra) + .encode(); + let v: Vec = Decode::decode(&mut xt.as_slice()).unwrap(); + + index += 1; + OpaqueExtrinsic(v) + }, + ); + } + + #[test] + #[ignore] + fn test_consensus() { + sc_service_test::consensus( + crate::chain_spec::tests::integration_test_config_with_two_authorities(), + |config| new_full(config), + |config| new_light(config), + vec!["//Alice".into(), "//Bob".into()], + ) + } } diff --git a/bin/node/cli/tests/build_spec_works.rs b/bin/node/cli/tests/build_spec_works.rs index 2eca71a5b5..7e134e93ca 100644 --- a/bin/node/cli/tests/build_spec_works.rs +++ b/bin/node/cli/tests/build_spec_works.rs @@ -20,18 +20,18 @@ use tempfile::tempdir; #[test] fn build_spec_works() { - let base_path = tempdir().expect("could not create a temp dir"); + let base_path = tempdir().expect("could not create a temp dir"); - let output = Command::new(cargo_bin("substrate")) - .args(&["build-spec", "--dev", "-d"]) - .arg(base_path.path()) - .output() - .unwrap(); - assert!(output.status.success()); + let output = Command::new(cargo_bin("substrate")) + .args(&["build-spec", "--dev", "-d"]) + .arg(base_path.path()) + .output() + .unwrap(); + assert!(output.status.success()); - // Make sure that the `dev` chain folder exists, but the `db` doesn't - assert!(base_path.path().join("chains/dev/").exists()); - assert!(!base_path.path().join("chains/dev/db").exists()); + // Make sure that the `dev` chain folder exists, but the `db` doesn't + assert!(base_path.path().join("chains/dev/").exists()); + assert!(!base_path.path().join("chains/dev/db").exists()); - let _value: serde_json::Value = serde_json::from_slice(output.stdout.as_slice()).unwrap(); + let _value: serde_json::Value = serde_json::from_slice(output.stdout.as_slice()).unwrap(); } diff --git a/bin/node/cli/tests/check_block_works.rs b/bin/node/cli/tests/check_block_works.rs index 6bfb82a8bf..c97ec10021 100644 --- a/bin/node/cli/tests/check_block_works.rs +++ b/bin/node/cli/tests/check_block_works.rs @@ -24,15 +24,15 @@ mod common; #[test] fn check_block_works() { - let base_path = tempdir().expect("could not create a temp dir"); + let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_dev_node_for_a_while(base_path.path()); - let status = Command::new(cargo_bin("substrate")) - .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) - .arg(base_path.path()) - .arg("1") - .status() - .unwrap(); - assert!(status.success()); + let status = Command::new(cargo_bin("substrate")) + .args(&["check-block", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .arg("1") + .status() + .unwrap(); + assert!(status.success()); } diff --git a/bin/node/cli/tests/common.rs b/bin/node/cli/tests/common.rs index 34e371195c..0965bf957a 100644 --- a/bin/node/cli/tests/common.rs +++ b/bin/node/cli/tests/common.rs @@ -17,50 +17,60 @@ #![cfg(unix)] #![allow(dead_code)] -use std::{process::{Child, ExitStatus}, thread, time::Duration, path::Path}; use assert_cmd::cargo::cargo_bin; -use std::{convert::TryInto, process::Command}; use nix::sys::signal::{kill, Signal::SIGINT}; use nix::unistd::Pid; +use std::{convert::TryInto, process::Command}; +use std::{ + path::Path, + process::{Child, ExitStatus}, + thread, + time::Duration, +}; /// Wait for the given `child` the given number of `secs`. /// /// Returns the `Some(exit status)` or `None` if the process did not finish in the given time. pub fn wait_for(child: &mut Child, secs: usize) -> Option { - for i in 0..secs { - match child.try_wait().unwrap() { - Some(status) => { - if i > 5 { - eprintln!("Child process took {} seconds to exit gracefully", i); - } - return Some(status) - }, - None => thread::sleep(Duration::from_secs(1)), - } - } - eprintln!("Took too long to exit (> {} seconds). Killing...", secs); - let _ = child.kill(); - child.wait().unwrap(); + for i in 0..secs { + match child.try_wait().unwrap() { + Some(status) => { + if i > 5 { + eprintln!("Child process took {} seconds to exit gracefully", i); + } + return Some(status); + } + None => thread::sleep(Duration::from_secs(1)), + } + } + eprintln!("Took too long to exit (> {} seconds). Killing...", secs); + let _ = child.kill(); + child.wait().unwrap(); - None + None } /// Run the node for a while (30 seconds) pub fn run_dev_node_for_a_while(base_path: &Path) { - let mut cmd = Command::new(cargo_bin("substrate")); + let mut cmd = Command::new(cargo_bin("substrate")); - let mut cmd = cmd - .args(&["--dev"]) - .arg("-d") - .arg(base_path) - .spawn() - .unwrap(); + let mut cmd = cmd + .args(&["--dev"]) + .arg("-d") + .arg(base_path) + .spawn() + .unwrap(); - // Let it produce some blocks. - thread::sleep(Duration::from_secs(30)); - assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); + // Let it produce some blocks. + thread::sleep(Duration::from_secs(30)); + assert!( + cmd.try_wait().unwrap().is_none(), + "the process should still be running" + ); - // Stop the process - kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); - assert!(wait_for(&mut cmd, 40).map(|x| x.success()).unwrap_or_default()); + // Stop the process + kill(Pid::from_raw(cmd.id().try_into().unwrap()), SIGINT).unwrap(); + assert!(wait_for(&mut cmd, 40) + .map(|x| x.success()) + .unwrap_or_default()); } diff --git a/bin/node/cli/tests/factory.rs b/bin/node/cli/tests/factory.rs index 2930cd52e2..c68278c6df 100644 --- a/bin/node/cli/tests/factory.rs +++ b/bin/node/cli/tests/factory.rs @@ -24,17 +24,17 @@ mod common; #[test] fn factory_works() { - let base_path = tempdir().expect("could not create a temp dir"); - - let status = Command::new(cargo_bin("substrate")) - .stdout(Stdio::null()) - .args(&["factory", "--dev", "-d"]) - .arg(base_path.path()) - .status() - .unwrap(); - assert!(status.success()); - - // Make sure that the `dev` chain folder exists & `db` - assert!(base_path.path().join("chains/dev/").exists()); - assert!(base_path.path().join("chains/dev/db").exists()); + let base_path = tempdir().expect("could not create a temp dir"); + + let status = Command::new(cargo_bin("substrate")) + .stdout(Stdio::null()) + .args(&["factory", "--dev", "-d"]) + .arg(base_path.path()) + .status() + .unwrap(); + assert!(status.success()); + + // Make sure that the `dev` chain folder exists & `db` + assert!(base_path.path().join("chains/dev/").exists()); + assert!(base_path.path().join("chains/dev/db").exists()); } diff --git a/bin/node/cli/tests/import_export_and_revert_work.rs b/bin/node/cli/tests/import_export_and_revert_work.rs index 131265e3b4..4a43d76646 100644 --- a/bin/node/cli/tests/import_export_and_revert_work.rs +++ b/bin/node/cli/tests/import_export_and_revert_work.rs @@ -17,43 +17,46 @@ #![cfg(unix)] use assert_cmd::cargo::cargo_bin; -use std::{process::Command, fs}; +use std::{fs, process::Command}; use tempfile::tempdir; mod common; #[test] fn import_export_and_revert_work() { - let base_path = tempdir().expect("could not create a temp dir"); - let exported_blocks = base_path.path().join("exported_blocks"); - - common::run_dev_node_for_a_while(base_path.path()); - - let status = Command::new(cargo_bin("substrate")) - .args(&["export-blocks", "--dev", "--pruning", "archive", "-d"]) - .arg(base_path.path()) - .arg(&exported_blocks) - .status() - .unwrap(); - assert!(status.success()); - - let metadata = fs::metadata(&exported_blocks).unwrap(); - assert!(metadata.len() > 0, "file exported_blocks should not be empty"); - - let _ = fs::remove_dir_all(base_path.path().join("db")); - - let status = Command::new(cargo_bin("substrate")) - .args(&["import-blocks", "--dev", "--pruning", "archive", "-d"]) - .arg(base_path.path()) - .arg(&exported_blocks) - .status() - .unwrap(); - assert!(status.success()); - - let status = Command::new(cargo_bin("substrate")) - .args(&["revert", "--dev", "--pruning", "archive", "-d"]) - .arg(base_path.path()) - .status() - .unwrap(); - assert!(status.success()); + let base_path = tempdir().expect("could not create a temp dir"); + let exported_blocks = base_path.path().join("exported_blocks"); + + common::run_dev_node_for_a_while(base_path.path()); + + let status = Command::new(cargo_bin("substrate")) + .args(&["export-blocks", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .arg(&exported_blocks) + .status() + .unwrap(); + assert!(status.success()); + + let metadata = fs::metadata(&exported_blocks).unwrap(); + assert!( + metadata.len() > 0, + "file exported_blocks should not be empty" + ); + + let _ = fs::remove_dir_all(base_path.path().join("db")); + + let status = Command::new(cargo_bin("substrate")) + .args(&["import-blocks", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .arg(&exported_blocks) + .status() + .unwrap(); + assert!(status.success()); + + let status = Command::new(cargo_bin("substrate")) + .args(&["revert", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .status() + .unwrap(); + assert!(status.success()); } diff --git a/bin/node/cli/tests/inspect_works.rs b/bin/node/cli/tests/inspect_works.rs index 441b08ccf4..63a9094573 100644 --- a/bin/node/cli/tests/inspect_works.rs +++ b/bin/node/cli/tests/inspect_works.rs @@ -24,15 +24,15 @@ mod common; #[test] fn inspect_works() { - let base_path = tempdir().expect("could not create a temp dir"); + let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_dev_node_for_a_while(base_path.path()); - let status = Command::new(cargo_bin("substrate")) - .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) - .arg(base_path.path()) - .args(&["block", "1"]) - .status() - .unwrap(); - assert!(status.success()); + let status = Command::new(cargo_bin("substrate")) + .args(&["inspect", "--dev", "--pruning", "archive", "-d"]) + .arg(base_path.path()) + .args(&["block", "1"]) + .status() + .unwrap(); + assert!(status.success()); } diff --git a/bin/node/cli/tests/purge_chain_works.rs b/bin/node/cli/tests/purge_chain_works.rs index 020259d0c5..095bbd6a21 100644 --- a/bin/node/cli/tests/purge_chain_works.rs +++ b/bin/node/cli/tests/purge_chain_works.rs @@ -23,19 +23,19 @@ mod common; #[test] #[cfg(unix)] fn purge_chain_works() { - let base_path = tempdir().expect("could not create a temp dir"); + let base_path = tempdir().expect("could not create a temp dir"); - common::run_dev_node_for_a_while(base_path.path()); + common::run_dev_node_for_a_while(base_path.path()); - let status = Command::new(cargo_bin("substrate")) - .args(&["purge-chain", "--dev", "-d"]) - .arg(base_path.path()) - .arg("-y") - .status() - .unwrap(); - assert!(status.success()); + let status = Command::new(cargo_bin("substrate")) + .args(&["purge-chain", "--dev", "-d"]) + .arg(base_path.path()) + .arg("-y") + .status() + .unwrap(); + assert!(status.success()); - // Make sure that the `dev` chain folder exists, but the `db` is deleted. - assert!(base_path.path().join("chains/dev/").exists()); - assert!(!base_path.path().join("chains/dev/db").exists()); + // Make sure that the `dev` chain folder exists, but the `db` is deleted. + assert!(base_path.path().join("chains/dev/").exists()); + assert!(!base_path.path().join("chains/dev/db").exists()); } diff --git a/bin/node/cli/tests/running_the_node_and_interrupt.rs b/bin/node/cli/tests/running_the_node_and_interrupt.rs index 67efedccbe..9ea612df44 100644 --- a/bin/node/cli/tests/running_the_node_and_interrupt.rs +++ b/bin/node/cli/tests/running_the_node_and_interrupt.rs @@ -23,28 +23,34 @@ mod common; #[test] #[cfg(unix)] fn running_the_node_works_and_can_be_interrupted() { - use nix::sys::signal::{kill, Signal::{self, SIGINT, SIGTERM}}; - use nix::unistd::Pid; - - fn run_command_and_kill(signal: Signal) { - let base_path = tempdir().expect("could not create a temp dir"); - let mut cmd = Command::new(cargo_bin("substrate")) - .args(&["--dev", "-d"]) - .arg(base_path.path()) - .spawn() - .unwrap(); - - thread::sleep(Duration::from_secs(20)); - assert!(cmd.try_wait().unwrap().is_none(), "the process should still be running"); - kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); - assert_eq!( - common::wait_for(&mut cmd, 30).map(|x| x.success()), - Some(true), - "the process must exit gracefully after signal {}", - signal, - ); - } - - run_command_and_kill(SIGINT); - run_command_and_kill(SIGTERM); + use nix::sys::signal::{ + kill, + Signal::{self, SIGINT, SIGTERM}, + }; + use nix::unistd::Pid; + + fn run_command_and_kill(signal: Signal) { + let base_path = tempdir().expect("could not create a temp dir"); + let mut cmd = Command::new(cargo_bin("substrate")) + .args(&["--dev", "-d"]) + .arg(base_path.path()) + .spawn() + .unwrap(); + + thread::sleep(Duration::from_secs(20)); + assert!( + cmd.try_wait().unwrap().is_none(), + "the process should still be running" + ); + kill(Pid::from_raw(cmd.id().try_into().unwrap()), signal).unwrap(); + assert_eq!( + common::wait_for(&mut cmd, 30).map(|x| x.success()), + Some(true), + "the process must exit gracefully after signal {}", + signal, + ); + } + + run_command_and_kill(SIGINT); + run_command_and_kill(SIGTERM); } diff --git a/bin/node/cli/tests/version.rs b/bin/node/cli/tests/version.rs index 5555efd385..537c3de8af 100644 --- a/bin/node/cli/tests/version.rs +++ b/bin/node/cli/tests/version.rs @@ -20,64 +20,64 @@ use regex::Regex; use std::process::Command; fn expected_regex() -> Regex { - Regex::new(r"^substrate (\d+\.\d+\.\d+(?:-.+?)?)-([a-f\d]+)-(.+?)-(.+?)(?:-(.+))?$").unwrap() + Regex::new(r"^substrate (\d+\.\d+\.\d+(?:-.+?)?)-([a-f\d]+)-(.+?)-(.+?)(?:-(.+))?$").unwrap() } #[test] fn version_is_full() { - let expected = expected_regex(); - let output = Command::new(cargo_bin("substrate")) - .args(&["--version"]) - .output() - .unwrap(); + let expected = expected_regex(); + let output = Command::new(cargo_bin("substrate")) + .args(&["--version"]) + .output() + .unwrap(); - assert!( - output.status.success(), - "command returned with non-success exit code" - ); + assert!( + output.status.success(), + "command returned with non-success exit code" + ); - let output = String::from_utf8_lossy(&output.stdout).trim().to_owned(); - let captures = expected - .captures(output.as_str()) - .expect("could not parse version in output"); + let output = String::from_utf8_lossy(&output.stdout).trim().to_owned(); + let captures = expected + .captures(output.as_str()) + .expect("could not parse version in output"); - assert_eq!(&captures[1], env!("CARGO_PKG_VERSION")); - assert_eq!(&captures[3], TARGET_ARCH.as_str()); - assert_eq!(&captures[4], TARGET_OS.as_str()); - assert_eq!( - captures.get(5).map(|x| x.as_str()), - TARGET_ENV.map(|x| x.as_str()) - ); + assert_eq!(&captures[1], env!("CARGO_PKG_VERSION")); + assert_eq!(&captures[3], TARGET_ARCH.as_str()); + assert_eq!(&captures[4], TARGET_OS.as_str()); + assert_eq!( + captures.get(5).map(|x| x.as_str()), + TARGET_ENV.map(|x| x.as_str()) + ); } #[test] fn test_regex_matches_properly() { - let expected = expected_regex(); + let expected = expected_regex(); - let captures = expected - .captures("substrate 2.0.0-da487d19d-x86_64-linux-gnu") - .unwrap(); - assert_eq!(&captures[1], "2.0.0"); - assert_eq!(&captures[2], "da487d19d"); - assert_eq!(&captures[3], "x86_64"); - assert_eq!(&captures[4], "linux"); - assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); + let captures = expected + .captures("substrate 2.0.0-da487d19d-x86_64-linux-gnu") + .unwrap(); + assert_eq!(&captures[1], "2.0.0"); + assert_eq!(&captures[2], "da487d19d"); + assert_eq!(&captures[3], "x86_64"); + assert_eq!(&captures[4], "linux"); + assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); - let captures = expected - .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux-gnu") - .unwrap(); - assert_eq!(&captures[1], "2.0.0-alpha.5"); - assert_eq!(&captures[2], "da487d19d"); - assert_eq!(&captures[3], "x86_64"); - assert_eq!(&captures[4], "linux"); - assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); + let captures = expected + .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux-gnu") + .unwrap(); + assert_eq!(&captures[1], "2.0.0-alpha.5"); + assert_eq!(&captures[2], "da487d19d"); + assert_eq!(&captures[3], "x86_64"); + assert_eq!(&captures[4], "linux"); + assert_eq!(captures.get(5).map(|x| x.as_str()), Some("gnu")); - let captures = expected - .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux") - .unwrap(); - assert_eq!(&captures[1], "2.0.0-alpha.5"); - assert_eq!(&captures[2], "da487d19d"); - assert_eq!(&captures[3], "x86_64"); - assert_eq!(&captures[4], "linux"); - assert_eq!(captures.get(5).map(|x| x.as_str()), None); + let captures = expected + .captures("substrate 2.0.0-alpha.5-da487d19d-x86_64-linux") + .unwrap(); + assert_eq!(&captures[1], "2.0.0-alpha.5"); + assert_eq!(&captures[2], "da487d19d"); + assert_eq!(&captures[3], "x86_64"); + assert_eq!(&captures[4], "linux"); + assert_eq!(captures.get(5).map(|x| x.as_str()), None); } diff --git a/bin/node/executor/benches/bench.rs b/bin/node/executor/benches/bench.rs index 4f335df90d..9fc9372c66 100644 --- a/bin/node/executor/benches/bench.rs +++ b/bin/node/executor/benches/bench.rs @@ -15,21 +15,21 @@ // along with Substrate. If not, see . use codec::{Decode, Encode}; -use criterion::{BatchSize, Criterion, criterion_group, criterion_main}; +use criterion::{criterion_group, criterion_main, BatchSize, Criterion}; +use frame_support::Hashable; use node_executor::Executor; use node_primitives::{BlockNumber, Hash}; +use node_runtime::constants::currency::*; use node_runtime::{ - Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, UncheckedExtrinsic, + Block, BuildStorage, Call, CheckedExtrinsic, GenesisConfig, Header, UncheckedExtrinsic, }; -use node_runtime::constants::currency::*; use node_testing::keyring::*; -use sp_core::{NativeOrEncoded, NeverNativeValue}; +use sc_executor::{Externalities, NativeExecutor, RuntimeInfo, WasmExecutionMethod}; use sp_core::storage::well_known_keys; use sp_core::traits::{CodeExecutor, RuntimeCode}; -use frame_support::Hashable; -use sp_state_machine::TestExternalities as CoreTestExternalities; -use sc_executor::{NativeExecutor, RuntimeInfo, WasmExecutionMethod, Externalities}; +use sp_core::{NativeOrEncoded, NeverNativeValue}; use sp_runtime::traits::BlakeTwo256; +use sp_state_machine::TestExternalities as CoreTestExternalities; criterion_group!(benches, bench_execute_block); criterion_main!(benches); @@ -47,167 +47,179 @@ type TestExternalities = CoreTestExternalities; #[derive(Debug)] enum ExecutionMethod { - Native, - Wasm(WasmExecutionMethod), + Native, + Wasm(WasmExecutionMethod), } fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - node_testing::keyring::sign(xt, VERSION, GENESIS_HASH) + node_testing::keyring::sign(xt, VERSION, GENESIS_HASH) } fn new_test_ext(genesis_config: &GenesisConfig) -> TestExternalities { - let mut test_ext = TestExternalities::new_with_code( - COMPACT_CODE, - genesis_config.build_storage().unwrap(), - ); - test_ext.ext().place_storage(well_known_keys::HEAP_PAGES.to_vec(), Some(HEAP_PAGES.encode())); - test_ext + let mut test_ext = + TestExternalities::new_with_code(COMPACT_CODE, genesis_config.build_storage().unwrap()); + test_ext.ext().place_storage( + well_known_keys::HEAP_PAGES.to_vec(), + Some(HEAP_PAGES.encode()), + ); + test_ext } fn construct_block( - executor: &NativeExecutor, - ext: &mut E, - number: BlockNumber, - parent_hash: Hash, - extrinsics: Vec, + executor: &NativeExecutor, + ext: &mut E, + number: BlockNumber, + parent_hash: Hash, + extrinsics: Vec, ) -> (Vec, Hash) { - use sp_trie::{TrieConfiguration, trie_types::Layout}; - - // sign extrinsics. - let extrinsics = extrinsics.into_iter().map(sign).collect::>(); - - // calculate the header fields that we can. - let extrinsics_root = Layout::::ordered_trie_root( - extrinsics.iter().map(Encode::encode) - ).to_fixed_bytes() - .into(); - - let header = Header { - parent_hash, - number, - extrinsics_root, - state_root: Default::default(), - digest: Default::default(), - }; - - let runtime_code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(COMPACT_CODE.into()), - hash: vec![1, 2, 3], - heap_pages: None, - }; - - // execute the block to get the real header. - executor.call:: _>( - ext, - &runtime_code, - "Core_initialize_block", - &header.encode(), - true, - None, - ).0.unwrap(); - - for i in extrinsics.iter() { - executor.call:: _>( - ext, - &runtime_code, - "BlockBuilder_apply_extrinsic", - &i.encode(), - true, - None, - ).0.unwrap(); - } - - let header = match executor.call:: _>( - ext, - &runtime_code, - "BlockBuilder_finalize_block", - &[0u8;0], - true, - None, - ).0.unwrap() { - NativeOrEncoded::Native(_) => unreachable!(), - NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), - }; - - let hash = header.blake2_256(); - (Block { header, extrinsics }.encode(), hash.into()) + use sp_trie::{trie_types::Layout, TrieConfiguration}; + + // sign extrinsics. + let extrinsics = extrinsics.into_iter().map(sign).collect::>(); + + // calculate the header fields that we can. + let extrinsics_root = + Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); + + let header = Header { + parent_hash, + number, + extrinsics_root, + state_root: Default::default(), + digest: Default::default(), + }; + + let runtime_code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(COMPACT_CODE.into()), + hash: vec![1, 2, 3], + heap_pages: None, + }; + + // execute the block to get the real header. + executor + .call:: _>( + ext, + &runtime_code, + "Core_initialize_block", + &header.encode(), + true, + None, + ) + .0 + .unwrap(); + + for i in extrinsics.iter() { + executor + .call:: _>( + ext, + &runtime_code, + "BlockBuilder_apply_extrinsic", + &i.encode(), + true, + None, + ) + .0 + .unwrap(); + } + + let header = match executor + .call:: _>( + ext, + &runtime_code, + "BlockBuilder_finalize_block", + &[0u8; 0], + true, + None, + ) + .0 + .unwrap() + { + NativeOrEncoded::Native(_) => unreachable!(), + NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), + }; + + let hash = header.blake2_256(); + (Block { header, extrinsics }.encode(), hash.into()) } - -fn test_blocks(genesis_config: &GenesisConfig, executor: &NativeExecutor) - -> Vec<(Vec, Hash)> -{ - let mut test_ext = new_test_ext(genesis_config); - let mut block1_extrinsics = vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), - }, - ]; - block1_extrinsics.extend((0..20).map(|i| { - CheckedExtrinsic { - signed: Some((alice(), signed_extra(i, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 1 * DOLLARS)), - } - })); - let block1 = construct_block( - executor, - &mut test_ext.ext(), - 1, - GENESIS_HASH.into(), - block1_extrinsics, - ); - - vec![block1] +fn test_blocks( + genesis_config: &GenesisConfig, + executor: &NativeExecutor, +) -> Vec<(Vec, Hash)> { + let mut test_ext = new_test_ext(genesis_config); + let mut block1_extrinsics = vec![CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + }]; + block1_extrinsics.extend((0..20).map(|i| CheckedExtrinsic { + signed: Some((alice(), signed_extra(i, 0))), + function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 1 * DOLLARS)), + })); + let block1 = construct_block( + executor, + &mut test_ext.ext(), + 1, + GENESIS_HASH.into(), + block1_extrinsics, + ); + + vec![block1] } fn bench_execute_block(c: &mut Criterion) { - c.bench_function_over_inputs( - "execute blocks", - |b, strategy| { - let genesis_config = node_testing::genesis::config(false, Some(COMPACT_CODE)); - let (use_native, wasm_method) = match strategy { - ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted), - ExecutionMethod::Wasm(wasm_method) => (false, *wasm_method), - }; - - let executor = NativeExecutor::new(wasm_method, None, 8); - let runtime_code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(COMPACT_CODE.into()), - hash: vec![1, 2, 3], - heap_pages: None, - }; - - // Get the runtime version to initialize the runtimes cache. - { - let mut test_ext = new_test_ext(&genesis_config); - executor.runtime_version(&mut test_ext.ext(), &runtime_code).unwrap(); - } - - let blocks = test_blocks(&genesis_config, &executor); - - b.iter_batched_ref( - || new_test_ext(&genesis_config), - |test_ext| { - for block in blocks.iter() { - executor.call:: _>( - &mut test_ext.ext(), - &runtime_code, - "Core_execute_block", - &block.0, - use_native, - None, - ).0.unwrap(); - } - }, - BatchSize::LargeInput, - ); - }, - vec![ - ExecutionMethod::Native, - ExecutionMethod::Wasm(WasmExecutionMethod::Interpreted), - #[cfg(feature = "wasmtime")] - ExecutionMethod::Wasm(WasmExecutionMethod::Compiled), - ], - ); + c.bench_function_over_inputs( + "execute blocks", + |b, strategy| { + let genesis_config = node_testing::genesis::config(false, Some(COMPACT_CODE)); + let (use_native, wasm_method) = match strategy { + ExecutionMethod::Native => (true, WasmExecutionMethod::Interpreted), + ExecutionMethod::Wasm(wasm_method) => (false, *wasm_method), + }; + + let executor = NativeExecutor::new(wasm_method, None, 8); + let runtime_code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(COMPACT_CODE.into()), + hash: vec![1, 2, 3], + heap_pages: None, + }; + + // Get the runtime version to initialize the runtimes cache. + { + let mut test_ext = new_test_ext(&genesis_config); + executor + .runtime_version(&mut test_ext.ext(), &runtime_code) + .unwrap(); + } + + let blocks = test_blocks(&genesis_config, &executor); + + b.iter_batched_ref( + || new_test_ext(&genesis_config), + |test_ext| { + for block in blocks.iter() { + executor + .call:: _>( + &mut test_ext.ext(), + &runtime_code, + "Core_execute_block", + &block.0, + use_native, + None, + ) + .0 + .unwrap(); + } + }, + BatchSize::LargeInput, + ); + }, + vec![ + ExecutionMethod::Native, + ExecutionMethod::Wasm(WasmExecutionMethod::Interpreted), + #[cfg(feature = "wasmtime")] + ExecutionMethod::Wasm(WasmExecutionMethod::Compiled), + ], + ); } diff --git a/bin/node/executor/src/lib.rs b/bin/node/executor/src/lib.rs index bcc7f48507..eb61f7ce72 100644 --- a/bin/node/executor/src/lib.rs +++ b/bin/node/executor/src/lib.rs @@ -17,8 +17,8 @@ //! A `CodeExecutor` specialization which uses natively compiled runtime when the wasm to be //! executed is equivalent to the natively compiled code. -pub use sc_executor::NativeExecutor; use sc_executor::native_executor_instance; +pub use sc_executor::NativeExecutor; // Declare an instance of the native executor named `Executor`. Include the wasm binary as the // equivalent wasm code. diff --git a/bin/node/executor/tests/basic.rs b/bin/node/executor/tests/basic.rs index 7fdf4e9c59..a01378481e 100644 --- a/bin/node/executor/tests/basic.rs +++ b/bin/node/executor/tests/basic.rs @@ -14,34 +14,36 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use codec::{Encode, Decode, Joiner}; +use codec::{Decode, Encode, Joiner}; use frame_support::{ - StorageValue, StorageMap, - traits::Currency, - weights::{GetDispatchInfo, DispatchInfo, DispatchClass}, + traits::Currency, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo}, + StorageMap, StorageValue, }; +use frame_system::{self, EventRecord, Phase}; +use pallet_contracts::ContractAddressFor; use sp_core::{ - NeverNativeValue, map, traits::Externalities, storage::{well_known_keys, Storage}, + map, + storage::{well_known_keys, Storage}, + traits::Externalities, + NeverNativeValue, }; use sp_runtime::{ - ApplyExtrinsicResult, Fixed128, - traits::{Hash as HashT, Convert, BlakeTwo256}, - transaction_validity::InvalidTransaction, + traits::{BlakeTwo256, Convert, Hash as HashT}, + transaction_validity::InvalidTransaction, + ApplyExtrinsicResult, Fixed128, }; -use pallet_contracts::ContractAddressFor; -use frame_system::{self, EventRecord, Phase}; +use node_primitives::{Balance, Hash}; use node_runtime::{ - Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Call, Runtime, Balances, - System, TransactionPayment, Event, TransactionBaseFee, TransactionByteFee, - constants::currency::*, + constants::currency::*, Balances, Block, Call, CheckedExtrinsic, Event, Header, Runtime, + System, TransactionBaseFee, TransactionByteFee, TransactionPayment, UncheckedExtrinsic, }; -use node_primitives::{Balance, Hash}; -use wabt; use node_testing::keyring::*; +use wabt; pub mod common; -use self::common::{*, sign}; +use self::common::{sign, *}; /// The wasm runtime binary which hasn't undergone the compacting process. /// @@ -52,443 +54,503 @@ pub const BLOATY_CODE: &[u8] = node_runtime::WASM_BINARY_BLOATY; /// Default transfer fee fn transfer_fee(extrinsic: &E, fee_multiplier: Fixed128) -> Balance { - let length_fee = TransactionByteFee::get() * (extrinsic.encode().len() as Balance); + let length_fee = TransactionByteFee::get() * (extrinsic.encode().len() as Balance); - let weight = default_transfer_call().get_dispatch_info().weight; - let weight_fee = - ::WeightToFee::convert(weight); + let weight = default_transfer_call().get_dispatch_info().weight; + let weight_fee = ::WeightToFee::convert(weight); - let base_fee = TransactionBaseFee::get(); - base_fee + fee_multiplier.saturated_multiply_accumulate(length_fee + weight_fee) + let base_fee = TransactionBaseFee::get(); + base_fee + fee_multiplier.saturated_multiply_accumulate(length_fee + weight_fee) } fn xt() -> UncheckedExtrinsic { - sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(default_transfer_call()), - }) + sign(CheckedExtrinsic { + signed: Some((alice(), signed_extra(0, 0))), + function: Call::Balances(default_transfer_call()), + }) } fn set_heap_pages(ext: &mut E, heap_pages: u64) { - ext.place_storage(well_known_keys::HEAP_PAGES.to_vec(), Some(heap_pages.encode())); + ext.place_storage( + well_known_keys::HEAP_PAGES.to_vec(), + Some(heap_pages.encode()), + ); } fn changes_trie_block() -> (Vec, Hash) { - construct_block( - &mut new_test_ext(COMPACT_CODE, true), - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), - }, - CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 69 * DOLLARS)), - }, - ] - ) + construct_block( + &mut new_test_ext(COMPACT_CODE, true), + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + }, + CheckedExtrinsic { + signed: Some((alice(), signed_extra(0, 0))), + function: Call::Balances(pallet_balances::Call::transfer( + bob().into(), + 69 * DOLLARS, + )), + }, + ], + ) } /// block 1 and 2 must be created together to ensure transactions are only signed once (since they /// are not guaranteed to be deterministic) and to ensure that the correct state is propagated /// from block1's execution to block2 to derive the correct storage_root. fn blocks() -> ((Vec, Hash), (Vec, Hash)) { - let mut t = new_test_ext(COMPACT_CODE, false); - let block1 = construct_block( - &mut t, - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), - }, - CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 69 * DOLLARS)), - }, - ] - ); - let block2 = construct_block( - &mut t, - 2, - block1.1.clone(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), - }, - CheckedExtrinsic { - signed: Some((bob(), signed_extra(0, 0))), - function: Call::Balances(pallet_balances::Call::transfer(alice().into(), 5 * DOLLARS)), - }, - CheckedExtrinsic { - signed: Some((alice(), signed_extra(1, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 15 * DOLLARS)), - } - ] - ); - - // session change => consensus authorities change => authorities change digest item appears - let digest = Header::decode(&mut &block2.0[..]).unwrap().digest; - assert_eq!(digest.logs().len(), 0); - - (block1, block2) + let mut t = new_test_ext(COMPACT_CODE, false); + let block1 = construct_block( + &mut t, + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + }, + CheckedExtrinsic { + signed: Some((alice(), signed_extra(0, 0))), + function: Call::Balances(pallet_balances::Call::transfer( + bob().into(), + 69 * DOLLARS, + )), + }, + ], + ); + let block2 = construct_block( + &mut t, + 2, + block1.1.clone(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), + }, + CheckedExtrinsic { + signed: Some((bob(), signed_extra(0, 0))), + function: Call::Balances(pallet_balances::Call::transfer( + alice().into(), + 5 * DOLLARS, + )), + }, + CheckedExtrinsic { + signed: Some((alice(), signed_extra(1, 0))), + function: Call::Balances(pallet_balances::Call::transfer( + bob().into(), + 15 * DOLLARS, + )), + }, + ], + ); + + // session change => consensus authorities change => authorities change digest item appears + let digest = Header::decode(&mut &block2.0[..]).unwrap().digest; + assert_eq!(digest.logs().len(), 0); + + (block1, block2) } fn block_with_size(time: u64, nonce: u32, size: usize) -> (Vec, Hash) { - construct_block( - &mut new_test_ext(COMPACT_CODE, false), - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), - }, - CheckedExtrinsic { - signed: Some((alice(), signed_extra(nonce, 0))), - function: Call::System(frame_system::Call::remark(vec![0; size])), - } - ] - ) + construct_block( + &mut new_test_ext(COMPACT_CODE, false), + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), + }, + CheckedExtrinsic { + signed: Some((alice(), signed_extra(nonce, 0))), + function: Call::System(frame_system::Call::remark(vec![0; size])), + }, + ], + ) } #[test] fn panic_execution_with_foreign_code_gives_error() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { - top: map![ - >::hashed_key_for(alice()) => { - (69u128, 0u8, 0u128, 0u128, 0u128).encode() - }, - >::hashed_key().to_vec() => { - 69_u128.encode() - }, - >::hashed_key_for(0) => { - vec![0u8; 32] - } - ], - children: map![], - }); - - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ).0; - assert!(r.is_ok()); - let v = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ).0.unwrap(); - let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); - assert_eq!(r, Err(InvalidTransaction::Payment.into())); + let mut t = TestExternalities::::new_with_code( + BLOATY_CODE, + Storage { + top: map![ + >::hashed_key_for(alice()) => { + (69u128, 0u8, 0u128, 0u128, 0u128).encode() + }, + >::hashed_key().to_vec() => { + 69_u128.encode() + }, + >::hashed_key_for(0) => { + vec![0u8; 32] + } + ], + children: map![], + }, + ); + + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; + assert!(r.is_ok()); + let v = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0 + .unwrap(); + let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); + assert_eq!(r, Err(InvalidTransaction::Payment.into())); } #[test] fn bad_extrinsic_with_native_equivalent_code_gives_error() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { - top: map![ - >::hashed_key_for(alice()) => { - (0u32, 0u8, 69u128, 0u128, 0u128, 0u128).encode() - }, - >::hashed_key().to_vec() => { - 69_u128.encode() - }, - >::hashed_key_for(0) => { - vec![0u8; 32] - } - ], - children: map![], - }); - - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ).0; - assert!(r.is_ok()); - let v = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ).0.unwrap(); - let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); - assert_eq!(r, Err(InvalidTransaction::Payment.into())); + let mut t = TestExternalities::::new_with_code( + COMPACT_CODE, + Storage { + top: map![ + >::hashed_key_for(alice()) => { + (0u32, 0u8, 69u128, 0u128, 0u128, 0u128).encode() + }, + >::hashed_key().to_vec() => { + 69_u128.encode() + }, + >::hashed_key_for(0) => { + vec![0u8; 32] + } + ], + children: map![], + }, + ); + + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; + assert!(r.is_ok()); + let v = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0 + .unwrap(); + let r = ApplyExtrinsicResult::decode(&mut &v.as_encoded()[..]).unwrap(); + assert_eq!(r, Err(InvalidTransaction::Payment.into())); } #[test] fn successful_execution_with_native_equivalent_code_gives_ok() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { - top: map![ - >::hashed_key_for(alice()) => { - (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() - }, - >::hashed_key().to_vec() => { - (111 * DOLLARS).encode() - }, - >::hashed_key_for(0) => vec![0u8; 32] - ], - children: map![], - }); - - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ).0; - assert!(r.is_ok()); - - let fm = t.execute_with(TransactionPayment::next_fee_multiplier); - - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ).0; - assert!(r.is_ok()); - - t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); - assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); - }); + let mut t = TestExternalities::::new_with_code( + COMPACT_CODE, + Storage { + top: map![ + >::hashed_key_for(alice()) => { + (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + }, + >::hashed_key().to_vec() => { + (111 * DOLLARS).encode() + }, + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }, + ); + + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; + assert!(r.is_ok()); + + let fm = t.execute_with(TransactionPayment::next_fee_multiplier); + + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0; + assert!(r.is_ok()); + + t.execute_with(|| { + let fees = transfer_fee(&xt(), fm); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); + }); } #[test] fn successful_execution_with_foreign_code_gives_ok() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { - top: map![ - >::hashed_key_for(alice()) => { - (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() - }, - >::hashed_key().to_vec() => { - (111 * DOLLARS).encode() - }, - >::hashed_key_for(0) => vec![0u8; 32] - ], - children: map![], - }); - - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ).0; - assert!(r.is_ok()); - - let fm = t.execute_with(TransactionPayment::next_fee_multiplier); - - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - true, - None, - ).0; - assert!(r.is_ok()); - - t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); - assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); - }); + let mut t = TestExternalities::::new_with_code( + BLOATY_CODE, + Storage { + top: map![ + >::hashed_key_for(alice()) => { + (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + }, + >::hashed_key().to_vec() => { + (111 * DOLLARS).encode() + }, + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }, + ); + + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; + assert!(r.is_ok()); + + let fm = t.execute_with(TransactionPayment::next_fee_multiplier); + + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + true, + None, + ) + .0; + assert!(r.is_ok()); + + t.execute_with(|| { + let fees = transfer_fee(&xt(), fm); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); + }); } #[test] fn full_native_block_import_works() { - let mut t = new_test_ext(COMPACT_CODE, false); - - let (block1, block2) = blocks(); - - let mut alice_last_known_balance: Balance = Default::default(); - let mut fm = t.execute_with(TransactionPayment::next_fee_multiplier); - - executor_call:: _>( - &mut t, - "Core_execute_block", - &block1.0, - true, - None, - ).0.unwrap(); - - t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); - assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); - alice_last_known_balance = Balances::total_balance(&alice()); - let events = vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 10_000_000, class: DispatchClass::Mandatory, pays_fee: true } - )), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_balances(pallet_balances::RawEvent::Transfer( - alice().into(), - bob().into(), - 69 * DOLLARS, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 200_000_000, class: DispatchClass::Normal, pays_fee: true } - )), - topics: vec![], - }, - ]; - assert_eq!(System::events(), events); - }); - - fm = t.execute_with(TransactionPayment::next_fee_multiplier); - - executor_call:: _>( - &mut t, - "Core_execute_block", - &block2.0, - true, - None, - ).0.unwrap(); - - t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); - assert_eq!( - Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - fees, - ); - assert_eq!( - Balances::total_balance(&bob()), - 179 * DOLLARS - fees, - ); - let events = vec![ - EventRecord { - phase: Phase::ApplyExtrinsic(0), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 10_000_000, class: DispatchClass::Mandatory, pays_fee: true } - )), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_balances( - pallet_balances::RawEvent::Transfer( - bob().into(), - alice().into(), - 5 * DOLLARS, - ) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(1), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 200_000_000, class: DispatchClass::Normal, pays_fee: true } - )), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: Event::pallet_balances( - pallet_balances::RawEvent::Transfer( - alice().into(), - bob().into(), - 15 * DOLLARS, - ) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), - topics: vec![], - }, - EventRecord { - phase: Phase::ApplyExtrinsic(2), - event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( - DispatchInfo { weight: 200_000_000, class: DispatchClass::Normal, pays_fee: true } - )), - topics: vec![], - }, - ]; - assert_eq!(System::events(), events); - }); + let mut t = new_test_ext(COMPACT_CODE, false); + + let (block1, block2) = blocks(); + + let mut alice_last_known_balance: Balance = Default::default(); + let mut fm = t.execute_with(TransactionPayment::next_fee_multiplier); + + executor_call:: _>( + &mut t, + "Core_execute_block", + &block1.0, + true, + None, + ) + .0 + .unwrap(); + + t.execute_with(|| { + let fees = transfer_fee(&xt(), fm); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); + alice_last_known_balance = Balances::total_balance(&alice()); + let events = vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + DispatchInfo { + weight: 10_000_000, + class: DispatchClass::Mandatory, + pays_fee: true, + }, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::pallet_balances(pallet_balances::RawEvent::Transfer( + alice().into(), + bob().into(), + 69 * DOLLARS, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + DispatchInfo { + weight: 200_000_000, + class: DispatchClass::Normal, + pays_fee: true, + }, + )), + topics: vec![], + }, + ]; + assert_eq!(System::events(), events); + }); + + fm = t.execute_with(TransactionPayment::next_fee_multiplier); + + executor_call:: _>( + &mut t, + "Core_execute_block", + &block2.0, + true, + None, + ) + .0 + .unwrap(); + + t.execute_with(|| { + let fees = transfer_fee(&xt(), fm); + assert_eq!( + Balances::total_balance(&alice()), + alice_last_known_balance - 10 * DOLLARS - fees, + ); + assert_eq!(Balances::total_balance(&bob()), 179 * DOLLARS - fees,); + let events = vec![ + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + DispatchInfo { + weight: 10_000_000, + class: DispatchClass::Mandatory, + pays_fee: true, + }, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::pallet_balances(pallet_balances::RawEvent::Transfer( + bob().into(), + alice().into(), + 5 * DOLLARS, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + DispatchInfo { + weight: 200_000_000, + class: DispatchClass::Normal, + pays_fee: true, + }, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: Event::pallet_balances(pallet_balances::RawEvent::Transfer( + alice().into(), + bob().into(), + 15 * DOLLARS, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: Event::pallet_treasury(pallet_treasury::RawEvent::Deposit(fees * 8 / 10)), + topics: vec![], + }, + EventRecord { + phase: Phase::ApplyExtrinsic(2), + event: Event::frame_system(frame_system::RawEvent::ExtrinsicSuccess( + DispatchInfo { + weight: 200_000_000, + class: DispatchClass::Normal, + pays_fee: true, + }, + )), + topics: vec![], + }, + ]; + assert_eq!(System::events(), events); + }); } #[test] fn full_wasm_block_import_works() { - let mut t = new_test_ext(COMPACT_CODE, false); - - let (block1, block2) = blocks(); - - let mut alice_last_known_balance: Balance = Default::default(); - let mut fm = t.execute_with(TransactionPayment::next_fee_multiplier); - - executor_call:: _>( - &mut t, - "Core_execute_block", - &block1.0, - false, - None, - ).0.unwrap(); - - t.execute_with(|| { - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - transfer_fee(&xt(), fm)); - assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); - alice_last_known_balance = Balances::total_balance(&alice()); - }); - - fm = t.execute_with(TransactionPayment::next_fee_multiplier); - - executor_call:: _>( - &mut t, - "Core_execute_block", - &block2.0, - false, - None, - ).0.unwrap(); - - t.execute_with(|| { - assert_eq!( - Balances::total_balance(&alice()), - alice_last_known_balance - 10 * DOLLARS - transfer_fee(&xt(), fm), - ); - assert_eq!( - Balances::total_balance(&bob()), - 179 * DOLLARS - 1 * transfer_fee(&xt(), fm), - ); - }); + let mut t = new_test_ext(COMPACT_CODE, false); + + let (block1, block2) = blocks(); + + let mut alice_last_known_balance: Balance = Default::default(); + let mut fm = t.execute_with(TransactionPayment::next_fee_multiplier); + + executor_call:: _>( + &mut t, + "Core_execute_block", + &block1.0, + false, + None, + ) + .0 + .unwrap(); + + t.execute_with(|| { + assert_eq!( + Balances::total_balance(&alice()), + 42 * DOLLARS - transfer_fee(&xt(), fm) + ); + assert_eq!(Balances::total_balance(&bob()), 169 * DOLLARS); + alice_last_known_balance = Balances::total_balance(&alice()); + }); + + fm = t.execute_with(TransactionPayment::next_fee_multiplier); + + executor_call:: _>( + &mut t, + "Core_execute_block", + &block2.0, + false, + None, + ) + .0 + .unwrap(); + + t.execute_with(|| { + assert_eq!( + Balances::total_balance(&alice()), + alice_last_known_balance - 10 * DOLLARS - transfer_fee(&xt(), fm), + ); + assert_eq!( + Balances::total_balance(&bob()), + 179 * DOLLARS - 1 * transfer_fee(&xt(), fm), + ); + }); } const CODE_TRANSFER: &str = r#" @@ -585,235 +647,261 @@ const CODE_TRANSFER: &str = r#" #[test] fn deploying_wasm_contract_should_work() { - let transfer_code = wabt::wat2wasm(CODE_TRANSFER).unwrap(); - let transfer_ch = ::Hashing::hash(&transfer_code); - - let addr = ::DetermineContractAddress::contract_address_for( - &transfer_ch, - &[], - &charlie(), - ); - - let b = construct_block( - &mut new_test_ext(COMPACT_CODE, false), - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), - function: Call::Contracts( - pallet_contracts::Call::put_code::(10_000, transfer_code) - ), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), - function: Call::Contracts( - pallet_contracts::Call::instantiate::(1 * DOLLARS, 10_000, transfer_ch, Vec::new()) - ), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(2, 0))), - function: Call::Contracts( - pallet_contracts::Call::call::( - pallet_indices::address::Address::Id(addr.clone()), - 10, - 10_000, - vec![0x00, 0x01, 0x02, 0x03] - ) - ), - }, - ] - ); - - let mut t = new_test_ext(COMPACT_CODE, false); - - executor_call:: _>( - &mut t, - "Core_execute_block", - &b.0, - false, - None, - ).0.unwrap(); - - t.execute_with(|| { - // Verify that the contract constructor worked well and code of TRANSFER contract is actually deployed. - assert_eq!( - &pallet_contracts::ContractInfoOf::::get(addr) - .and_then(|c| c.get_alive()) - .unwrap() - .code_hash, - &transfer_ch - ); - }); + let transfer_code = wabt::wat2wasm(CODE_TRANSFER).unwrap(); + let transfer_ch = ::Hashing::hash(&transfer_code); + + let addr = ::DetermineContractAddress::contract_address_for( + &transfer_ch, + &[], + &charlie(), + ); + + let b = construct_block( + &mut new_test_ext(COMPACT_CODE, false), + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + }, + CheckedExtrinsic { + signed: Some((charlie(), signed_extra(0, 0))), + function: Call::Contracts(pallet_contracts::Call::put_code::( + 10_000, + transfer_code, + )), + }, + CheckedExtrinsic { + signed: Some((charlie(), signed_extra(1, 0))), + function: Call::Contracts(pallet_contracts::Call::instantiate::( + 1 * DOLLARS, + 10_000, + transfer_ch, + Vec::new(), + )), + }, + CheckedExtrinsic { + signed: Some((charlie(), signed_extra(2, 0))), + function: Call::Contracts(pallet_contracts::Call::call::( + pallet_indices::address::Address::Id(addr.clone()), + 10, + 10_000, + vec![0x00, 0x01, 0x02, 0x03], + )), + }, + ], + ); + + let mut t = new_test_ext(COMPACT_CODE, false); + + executor_call:: _>(&mut t, "Core_execute_block", &b.0, false, None) + .0 + .unwrap(); + + t.execute_with(|| { + // Verify that the contract constructor worked well and code of TRANSFER contract is actually deployed. + assert_eq!( + &pallet_contracts::ContractInfoOf::::get(addr) + .and_then(|c| c.get_alive()) + .unwrap() + .code_hash, + &transfer_ch + ); + }); } #[test] fn wasm_big_block_import_fails() { - let mut t = new_test_ext(COMPACT_CODE, false); - - set_heap_pages(&mut t.ext(), 4); - - let result = executor_call:: _>( - &mut t, - "Core_execute_block", - &block_with_size(42, 0, 120_000).0, - false, - None, - ).0; - assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) + let mut t = new_test_ext(COMPACT_CODE, false); + + set_heap_pages(&mut t.ext(), 4); + + let result = executor_call:: _>( + &mut t, + "Core_execute_block", + &block_with_size(42, 0, 120_000).0, + false, + None, + ) + .0; + assert!(result.is_err()); // Err(Wasmi(Trap(Trap { kind: Host(AllocatorOutOfSpace) }))) } #[test] fn native_big_block_import_succeeds() { - let mut t = new_test_ext(COMPACT_CODE, false); - - executor_call:: _>( - &mut t, - "Core_execute_block", - &block_with_size(42, 0, 120_000).0, - true, - None, - ).0.unwrap(); + let mut t = new_test_ext(COMPACT_CODE, false); + + executor_call:: _>( + &mut t, + "Core_execute_block", + &block_with_size(42, 0, 120_000).0, + true, + None, + ) + .0 + .unwrap(); } #[test] fn native_big_block_import_fails_on_fallback() { - let mut t = new_test_ext(COMPACT_CODE, false); - - assert!( - executor_call:: _>( - &mut t, - "Core_execute_block", - &block_with_size(42, 0, 120_000).0, - false, - None, - ).0.is_err() - ); + let mut t = new_test_ext(COMPACT_CODE, false); + + assert!(executor_call:: _>( + &mut t, + "Core_execute_block", + &block_with_size(42, 0, 120_000).0, + false, + None, + ) + .0 + .is_err()); } #[test] fn panic_execution_gives_error() { - let mut t = TestExternalities::::new_with_code(BLOATY_CODE, Storage { - top: map![ - >::hashed_key().to_vec() => { - 0_u128.encode() - }, - >::hashed_key_for(0) => vec![0u8; 32] - ], - children: map![], - }); - - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - false, - None, - ).0; - assert!(r.is_ok()); - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - false, - None, - ).0.unwrap().into_encoded(); - let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap(); - assert_eq!(r, Err(InvalidTransaction::Payment.into())); + let mut t = TestExternalities::::new_with_code( + BLOATY_CODE, + Storage { + top: map![ + >::hashed_key().to_vec() => { + 0_u128.encode() + }, + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }, + ); + + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + false, + None, + ) + .0; + assert!(r.is_ok()); + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + false, + None, + ) + .0 + .unwrap() + .into_encoded(); + let r = ApplyExtrinsicResult::decode(&mut &r[..]).unwrap(); + assert_eq!(r, Err(InvalidTransaction::Payment.into())); } #[test] fn successful_execution_gives_ok() { - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { - top: map![ - >::hashed_key_for(alice()) => { - (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() - }, - >::hashed_key().to_vec() => { - (111 * DOLLARS).encode() - }, - >::hashed_key_for(0) => vec![0u8; 32] - ], - children: map![], - }); - - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - false, - None, - ).0; - assert!(r.is_ok()); - let fm = t.execute_with(TransactionPayment::next_fee_multiplier); - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt()), - false, - None, - ).0.unwrap().into_encoded(); - ApplyExtrinsicResult::decode(&mut &r[..]) - .unwrap() - .expect("Extrinsic could not be applied") - .expect("Extrinsic failed"); - - t.execute_with(|| { - let fees = transfer_fee(&xt(), fm); - assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); - assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); - }); + let mut t = TestExternalities::::new_with_code( + COMPACT_CODE, + Storage { + top: map![ + >::hashed_key_for(alice()) => { + (0u32, 0u8, 111 * DOLLARS, 0u128, 0u128, 0u128).encode() + }, + >::hashed_key().to_vec() => { + (111 * DOLLARS).encode() + }, + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }, + ); + + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + false, + None, + ) + .0; + assert!(r.is_ok()); + let fm = t.execute_with(TransactionPayment::next_fee_multiplier); + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt()), + false, + None, + ) + .0 + .unwrap() + .into_encoded(); + ApplyExtrinsicResult::decode(&mut &r[..]) + .unwrap() + .expect("Extrinsic could not be applied") + .expect("Extrinsic failed"); + + t.execute_with(|| { + let fees = transfer_fee(&xt(), fm); + assert_eq!(Balances::total_balance(&alice()), 42 * DOLLARS - fees); + assert_eq!(Balances::total_balance(&bob()), 69 * DOLLARS); + }); } #[test] fn full_native_block_import_works_with_changes_trie() { - let block1 = changes_trie_block(); - let block_data = block1.0; - let block = Block::decode(&mut &block_data[..]).unwrap(); - - let mut t = new_test_ext(COMPACT_CODE, true); - executor_call:: _>( - &mut t, - "Core_execute_block", - &block.encode(), - true, - None, - ).0.unwrap(); - - assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some()); + let block1 = changes_trie_block(); + let block_data = block1.0; + let block = Block::decode(&mut &block_data[..]).unwrap(); + + let mut t = new_test_ext(COMPACT_CODE, true); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block.encode(), + true, + None, + ) + .0 + .unwrap(); + + assert!(t + .ext() + .storage_changes_root(&GENESIS_HASH) + .unwrap() + .is_some()); } #[test] fn full_wasm_block_import_works_with_changes_trie() { - let block1 = changes_trie_block(); - - let mut t = new_test_ext(COMPACT_CODE, true); - executor_call:: _>( - &mut t, - "Core_execute_block", - &block1.0, - false, - None, - ).0.unwrap(); - - assert!(t.ext().storage_changes_root(&GENESIS_HASH).unwrap().is_some()); + let block1 = changes_trie_block(); + + let mut t = new_test_ext(COMPACT_CODE, true); + executor_call:: _>( + &mut t, + "Core_execute_block", + &block1.0, + false, + None, + ) + .0 + .unwrap(); + + assert!(t + .ext() + .storage_changes_root(&GENESIS_HASH) + .unwrap() + .is_some()); } #[test] fn should_import_block_with_test_client() { - use node_testing::client::{ - ClientBlockImportExt, TestClientBuilderExt, TestClientBuilder, - sp_consensus::BlockOrigin, - }; + use node_testing::client::{ + sp_consensus::BlockOrigin, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, + }; - let mut client = TestClientBuilder::new().build(); - let block1 = changes_trie_block(); - let block_data = block1.0; - let block = node_primitives::Block::decode(&mut &block_data[..]).unwrap(); + let mut client = TestClientBuilder::new().build(); + let block1 = changes_trie_block(); + let block_data = block1.0; + let block = node_primitives::Block::decode(&mut &block_data[..]).unwrap(); - client.import(BlockOrigin::Own, block).unwrap(); + client.import(BlockOrigin::Own, block).unwrap(); } diff --git a/bin/node/executor/tests/common.rs b/bin/node/executor/tests/common.rs index 6b6ef272f8..caeb01163e 100644 --- a/bin/node/executor/tests/common.rs +++ b/bin/node/executor/tests/common.rs @@ -14,20 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use frame_support::Hashable; -use sp_state_machine::TestExternalities as CoreTestExternalities; -use sp_core::{NeverNativeValue, NativeOrEncoded, traits::{CodeExecutor, RuntimeCode}}; -use sp_runtime::{ApplyExtrinsicResult, traits::{Header as HeaderT, BlakeTwo256}}; -use sc_executor::{NativeExecutor, WasmExecutionMethod}; use sc_executor::error::Result; +use sc_executor::{NativeExecutor, WasmExecutionMethod}; +use sp_core::{ + traits::{CodeExecutor, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, +}; +use sp_runtime::{ + traits::{BlakeTwo256, Header as HeaderT}, + ApplyExtrinsicResult, +}; +use sp_state_machine::TestExternalities as CoreTestExternalities; use node_executor::Executor; +use node_primitives::{BlockNumber, Hash}; use node_runtime::{ - Header, Block, UncheckedExtrinsic, CheckedExtrinsic, Runtime, BuildStorage, - constants::currency::*, + constants::currency::*, Block, BuildStorage, CheckedExtrinsic, Header, Runtime, + UncheckedExtrinsic, }; -use node_primitives::{Hash, BlockNumber}; use node_testing::keyring::*; use sp_externalities::Externalities; @@ -46,58 +52,60 @@ pub const VERSION: u32 = node_runtime::VERSION.spec_version; pub type TestExternalities = CoreTestExternalities; pub fn sign(xt: CheckedExtrinsic) -> UncheckedExtrinsic { - node_testing::keyring::sign(xt, VERSION, GENESIS_HASH) + node_testing::keyring::sign(xt, VERSION, GENESIS_HASH) } pub fn default_transfer_call() -> pallet_balances::Call { - pallet_balances::Call::transfer::(bob().into(), 69 * DOLLARS) + pallet_balances::Call::transfer::(bob().into(), 69 * DOLLARS) } pub fn from_block_number(n: u32) -> Header { - Header::new(n, Default::default(), Default::default(), [69; 32].into(), Default::default()) + Header::new( + n, + Default::default(), + Default::default(), + [69; 32].into(), + Default::default(), + ) } pub fn executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) + NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) } pub fn executor_call< - R:Decode + Encode + PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe + R: Decode + Encode + PartialEq, + NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, >( - t: &mut TestExternalities, - method: &str, - data: &[u8], - use_native: bool, - native_call: Option, + t: &mut TestExternalities, + method: &str, + data: &[u8], + use_native: bool, + native_call: Option, ) -> (Result>, bool) { - let mut t = t.ext(); - - let code = t.storage(sp_core::storage::well_known_keys::CODE).unwrap(); - let heap_pages = t.storage(sp_core::storage::well_known_keys::HEAP_PAGES); - let runtime_code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(code.as_slice().into()), - hash: sp_core::blake2_256(&code).to_vec(), - heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), - }; - - executor().call::( - &mut t, - &runtime_code, - method, - data, - use_native, - native_call, - ) + let mut t = t.ext(); + + let code = t.storage(sp_core::storage::well_known_keys::CODE).unwrap(); + let heap_pages = t.storage(sp_core::storage::well_known_keys::HEAP_PAGES); + let runtime_code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(code.as_slice().into()), + hash: sp_core::blake2_256(&code).to_vec(), + heap_pages: heap_pages.and_then(|hp| Decode::decode(&mut &hp[..]).ok()), + }; + + executor().call::(&mut t, &runtime_code, method, data, use_native, native_call) } pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalities { - let mut ext = TestExternalities::new_with_code( - code, - node_testing::genesis::config(support_changes_trie, Some(code)).build_storage().unwrap(), - ); - ext.changes_trie_storage().insert(0, GENESIS_HASH.into(), Default::default()); - ext + let mut ext = TestExternalities::new_with_code( + code, + node_testing::genesis::config(support_changes_trie, Some(code)) + .build_storage() + .unwrap(), + ); + ext.changes_trie_storage() + .insert(0, GENESIS_HASH.into(), Default::default()); + ext } /// Construct a fake block. @@ -105,66 +113,76 @@ pub fn new_test_ext(code: &[u8], support_changes_trie: bool) -> TestExternalitie /// `extrinsics` must be a list of valid extrinsics, i.e. none of the extrinsics for example /// can report `ExhaustResources`. Otherwise, this function panics. pub fn construct_block( - env: &mut TestExternalities, - number: BlockNumber, - parent_hash: Hash, - extrinsics: Vec, + env: &mut TestExternalities, + number: BlockNumber, + parent_hash: Hash, + extrinsics: Vec, ) -> (Vec, Hash) { - use sp_trie::{TrieConfiguration, trie_types::Layout}; - - // sign extrinsics. - let extrinsics = extrinsics.into_iter().map(sign).collect::>(); - - // calculate the header fields that we can. - let extrinsics_root = - Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) - .to_fixed_bytes() - .into(); - - let header = Header { - parent_hash, - number, - extrinsics_root, - state_root: Default::default(), - digest: Default::default(), - }; - - // execute the block to get the real header. - executor_call:: _>( - env, - "Core_initialize_block", - &header.encode(), - true, - None, - ).0.unwrap(); - - for extrinsic in extrinsics.iter() { - // Try to apply the `extrinsic`. It should be valid, in the sense that it passes - // all pre-inclusion checks. - let r = executor_call:: _>( - env, - "BlockBuilder_apply_extrinsic", - &extrinsic.encode(), - true, - None, - ).0.expect("application of an extrinsic failed").into_encoded(); - match ApplyExtrinsicResult::decode(&mut &r[..]).expect("apply result deserialization failed") { - Ok(_) => {}, - Err(e) => panic!("Applying extrinsic failed: {:?}", e), - } - } - - let header = match executor_call:: _>( - env, - "BlockBuilder_finalize_block", - &[0u8;0], - true, - None, - ).0.unwrap() { - NativeOrEncoded::Native(_) => unreachable!(), - NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), - }; - - let hash = header.blake2_256(); - (Block { header, extrinsics }.encode(), hash.into()) + use sp_trie::{trie_types::Layout, TrieConfiguration}; + + // sign extrinsics. + let extrinsics = extrinsics.into_iter().map(sign).collect::>(); + + // calculate the header fields that we can. + let extrinsics_root = + Layout::::ordered_trie_root(extrinsics.iter().map(Encode::encode)) + .to_fixed_bytes() + .into(); + + let header = Header { + parent_hash, + number, + extrinsics_root, + state_root: Default::default(), + digest: Default::default(), + }; + + // execute the block to get the real header. + executor_call:: _>( + env, + "Core_initialize_block", + &header.encode(), + true, + None, + ) + .0 + .unwrap(); + + for extrinsic in extrinsics.iter() { + // Try to apply the `extrinsic`. It should be valid, in the sense that it passes + // all pre-inclusion checks. + let r = executor_call:: _>( + env, + "BlockBuilder_apply_extrinsic", + &extrinsic.encode(), + true, + None, + ) + .0 + .expect("application of an extrinsic failed") + .into_encoded(); + match ApplyExtrinsicResult::decode(&mut &r[..]) + .expect("apply result deserialization failed") + { + Ok(_) => {} + Err(e) => panic!("Applying extrinsic failed: {:?}", e), + } + } + + let header = match executor_call:: _>( + env, + "BlockBuilder_finalize_block", + &[0u8; 0], + true, + None, + ) + .0 + .unwrap() + { + NativeOrEncoded::Native(_) => unreachable!(), + NativeOrEncoded::Encoded(h) => Header::decode(&mut &h[..]).unwrap(), + }; + + let hash = header.blake2_256(); + (Block { header, extrinsics }.encode(), hash.into()) } diff --git a/bin/node/executor/tests/fees.rs b/bin/node/executor/tests/fees.rs index 32fef3b326..a537ae8ad0 100644 --- a/bin/node/executor/tests/fees.rs +++ b/bin/node/executor/tests/fees.rs @@ -15,315 +15,327 @@ // along with Substrate. If not, see . use codec::{Encode, Joiner}; -use frame_support::{ - StorageValue, StorageMap, - traits::Currency, - weights::GetDispatchInfo, -}; -use sp_core::{NeverNativeValue, map, storage::Storage}; -use sp_runtime::{Fixed128, Perbill, traits::{Convert, BlakeTwo256}}; +use frame_support::{traits::Currency, weights::GetDispatchInfo, StorageMap, StorageValue}; +use node_primitives::Balance; +use node_runtime::impls::LinearWeightToFee; use node_runtime::{ - CheckedExtrinsic, Call, Runtime, Balances, TransactionPayment, TransactionBaseFee, - TransactionByteFee, WeightFeeCoefficient, - constants::currency::*, + constants::currency::*, Balances, Call, CheckedExtrinsic, Runtime, TransactionBaseFee, + TransactionByteFee, TransactionPayment, WeightFeeCoefficient, }; -use node_runtime::impls::LinearWeightToFee; -use node_primitives::Balance; use node_testing::keyring::*; +use sp_core::{map, storage::Storage, NeverNativeValue}; +use sp_runtime::{ + traits::{BlakeTwo256, Convert}, + Fixed128, Perbill, +}; pub mod common; -use self::common::{*, sign}; +use self::common::{sign, *}; #[test] fn fee_multiplier_increases_and_decreases_on_big_weight() { - let mut t = new_test_ext(COMPACT_CODE, false); - - // initial fee multiplier must be zero - let mut prev_multiplier = Fixed128::from_parts(0); - - t.execute_with(|| { - assert_eq!(TransactionPayment::next_fee_multiplier(), prev_multiplier); - }); - - let mut tt = new_test_ext(COMPACT_CODE, false); - - // big one in terms of weight. - let block1 = construct_block( - &mut tt, - 1, - GENESIS_HASH.into(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(0, 0))), - function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(90))), - } - ] - ); - - // small one in terms of weight. - let block2 = construct_block( - &mut tt, - 2, - block1.1.clone(), - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(1, 0))), - function: Call::System(frame_system::Call::remark(vec![0; 1])), - } - ] - ); - - println!( - "++ Block 1 size: {} / Block 2 size {}", - block1.0.encode().len(), - block2.0.encode().len(), - ); - - // execute a big block. - executor_call:: _>( - &mut t, - "Core_execute_block", - &block1.0, - true, - None, - ).0.unwrap(); - - // weight multiplier is increased for next block. - t.execute_with(|| { - let fm = TransactionPayment::next_fee_multiplier(); - println!("After a big block: {:?} -> {:?}", prev_multiplier, fm); - assert!(fm > prev_multiplier); - prev_multiplier = fm; - }); - - // execute a big block. - executor_call:: _>( - &mut t, - "Core_execute_block", - &block2.0, - true, - None, - ).0.unwrap(); - - // weight multiplier is increased for next block. - t.execute_with(|| { - let fm = TransactionPayment::next_fee_multiplier(); - println!("After a small block: {:?} -> {:?}", prev_multiplier, fm); - assert!(fm < prev_multiplier); - }); + let mut t = new_test_ext(COMPACT_CODE, false); + + // initial fee multiplier must be zero + let mut prev_multiplier = Fixed128::from_parts(0); + + t.execute_with(|| { + assert_eq!(TransactionPayment::next_fee_multiplier(), prev_multiplier); + }); + + let mut tt = new_test_ext(COMPACT_CODE, false); + + // big one in terms of weight. + let block1 = construct_block( + &mut tt, + 1, + GENESIS_HASH.into(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(42 * 1000)), + }, + CheckedExtrinsic { + signed: Some((charlie(), signed_extra(0, 0))), + function: Call::System(frame_system::Call::fill_block(Perbill::from_percent(90))), + }, + ], + ); + + // small one in terms of weight. + let block2 = construct_block( + &mut tt, + 2, + block1.1.clone(), + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(52 * 1000)), + }, + CheckedExtrinsic { + signed: Some((charlie(), signed_extra(1, 0))), + function: Call::System(frame_system::Call::remark(vec![0; 1])), + }, + ], + ); + + println!( + "++ Block 1 size: {} / Block 2 size {}", + block1.0.encode().len(), + block2.0.encode().len(), + ); + + // execute a big block. + executor_call:: _>( + &mut t, + "Core_execute_block", + &block1.0, + true, + None, + ) + .0 + .unwrap(); + + // weight multiplier is increased for next block. + t.execute_with(|| { + let fm = TransactionPayment::next_fee_multiplier(); + println!("After a big block: {:?} -> {:?}", prev_multiplier, fm); + assert!(fm > prev_multiplier); + prev_multiplier = fm; + }); + + // execute a big block. + executor_call:: _>( + &mut t, + "Core_execute_block", + &block2.0, + true, + None, + ) + .0 + .unwrap(); + + // weight multiplier is increased for next block. + t.execute_with(|| { + let fm = TransactionPayment::next_fee_multiplier(); + println!("After a small block: {:?} -> {:?}", prev_multiplier, fm); + assert!(fm < prev_multiplier); + }); } #[test] fn transaction_fee_is_correct_ultimate() { - // This uses the exact values of substrate-node. - // - // weight of transfer call as of now: 1_000_000 - // if weight of the cheapest weight would be 10^7, this would be 10^9, which is: - // - 1 MILLICENTS in substrate node. - // - 1 milli-dot based on current polkadot runtime. - // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) - let mut t = TestExternalities::::new_with_code(COMPACT_CODE, Storage { - top: map![ - >::hashed_key_for(alice()) => { - (0u32, 0u8, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() - }, - >::hashed_key_for(bob()) => { - (0u32, 0u8, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() - }, - >::hashed_key().to_vec() => { - (110 * DOLLARS).encode() - }, - >::hashed_key_for(0) => vec![0u8; 32] - ], - children: map![], - }); - - let tip = 1_000_000; - let xt = sign(CheckedExtrinsic { - signed: Some((alice(), signed_extra(0, tip))), - function: Call::Balances(default_transfer_call()), - }); - - let r = executor_call:: _>( - &mut t, - "Core_initialize_block", - &vec![].and(&from_block_number(1u32)), - true, - None, - ).0; - - assert!(r.is_ok()); - let r = executor_call:: _>( - &mut t, - "BlockBuilder_apply_extrinsic", - &vec![].and(&xt.clone()), - true, - None, - ).0; - assert!(r.is_ok()); - - t.execute_with(|| { - assert_eq!(Balances::total_balance(&bob()), (10 + 69) * DOLLARS); - // Components deducted from alice's balances: - // - Weight fee - // - Length fee - // - Tip - // - Creation-fee of bob's account. - let mut balance_alice = (100 - 69) * DOLLARS; - - let length_fee = TransactionBaseFee::get() + - TransactionByteFee::get() * - (xt.clone().encode().len() as Balance); - balance_alice -= length_fee; - - let weight = default_transfer_call().get_dispatch_info().weight; - let weight_fee = LinearWeightToFee::::convert(weight); - - // we know that weight to fee multiplier is effect-less in block 1. - // current weight of transfer = 200_000_000 - // Linear weight to fee is 1:1 right now (1 weight = 1 unit of balance) - assert_eq!(weight_fee, weight as Balance); - balance_alice -= weight_fee; - balance_alice -= tip; - - assert_eq!(Balances::total_balance(&alice()), balance_alice); - }); + // This uses the exact values of substrate-node. + // + // weight of transfer call as of now: 1_000_000 + // if weight of the cheapest weight would be 10^7, this would be 10^9, which is: + // - 1 MILLICENTS in substrate node. + // - 1 milli-dot based on current polkadot runtime. + // (this baed on assigning 0.1 CENT to the cheapest tx with `weight = 100`) + let mut t = TestExternalities::::new_with_code( + COMPACT_CODE, + Storage { + top: map![ + >::hashed_key_for(alice()) => { + (0u32, 0u8, 100 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() + }, + >::hashed_key_for(bob()) => { + (0u32, 0u8, 10 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS, 0 * DOLLARS).encode() + }, + >::hashed_key().to_vec() => { + (110 * DOLLARS).encode() + }, + >::hashed_key_for(0) => vec![0u8; 32] + ], + children: map![], + }, + ); + + let tip = 1_000_000; + let xt = sign(CheckedExtrinsic { + signed: Some((alice(), signed_extra(0, tip))), + function: Call::Balances(default_transfer_call()), + }); + + let r = executor_call:: _>( + &mut t, + "Core_initialize_block", + &vec![].and(&from_block_number(1u32)), + true, + None, + ) + .0; + + assert!(r.is_ok()); + let r = executor_call:: _>( + &mut t, + "BlockBuilder_apply_extrinsic", + &vec![].and(&xt.clone()), + true, + None, + ) + .0; + assert!(r.is_ok()); + + t.execute_with(|| { + assert_eq!(Balances::total_balance(&bob()), (10 + 69) * DOLLARS); + // Components deducted from alice's balances: + // - Weight fee + // - Length fee + // - Tip + // - Creation-fee of bob's account. + let mut balance_alice = (100 - 69) * DOLLARS; + + let length_fee = TransactionBaseFee::get() + + TransactionByteFee::get() * (xt.clone().encode().len() as Balance); + balance_alice -= length_fee; + + let weight = default_transfer_call().get_dispatch_info().weight; + let weight_fee = LinearWeightToFee::::convert(weight); + + // we know that weight to fee multiplier is effect-less in block 1. + // current weight of transfer = 200_000_000 + // Linear weight to fee is 1:1 right now (1 weight = 1 unit of balance) + assert_eq!(weight_fee, weight as Balance); + balance_alice -= weight_fee; + balance_alice -= tip; + + assert_eq!(Balances::total_balance(&alice()), balance_alice); + }); } #[test] #[should_panic] #[cfg(feature = "stress-test")] fn block_weight_capacity_report() { - // Just report how many transfer calls you could fit into a block. The number should at least - // be a few hundred (250 at the time of writing but can change over time). Runs until panic. - use node_primitives::Index; - - // execution ext. - let mut t = new_test_ext(COMPACT_CODE, false); - // setup ext. - let mut tt = new_test_ext(COMPACT_CODE, false); - - let factor = 50; - let mut time = 10; - let mut nonce: Index = 0; - let mut block_number = 1; - let mut previous_hash: Hash = GENESIS_HASH.into(); - - loop { - let num_transfers = block_number * factor; - let mut xts = (0..num_transfers).map(|i| CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), - function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 0)), - }).collect::>(); - - xts.insert(0, CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), - }); - - // NOTE: this is super slow. Can probably be improved. - let block = construct_block( - &mut tt, - block_number, - previous_hash, - xts - ); - - let len = block.0.len(); - print!( - "++ Executing block with {} transfers. Block size = {} bytes / {} kb / {} mb", - num_transfers, - len, - len / 1024, - len / 1024 / 1024, - ); - - let r = executor_call:: _>( - &mut t, - "Core_execute_block", - &block.0, - true, - None, - ).0; - - println!(" || Result = {:?}", r); - assert!(r.is_ok()); - - previous_hash = block.1; - nonce += num_transfers; - time += 10; - block_number += 1; - } + // Just report how many transfer calls you could fit into a block. The number should at least + // be a few hundred (250 at the time of writing but can change over time). Runs until panic. + use node_primitives::Index; + + // execution ext. + let mut t = new_test_ext(COMPACT_CODE, false); + // setup ext. + let mut tt = new_test_ext(COMPACT_CODE, false); + + let factor = 50; + let mut time = 10; + let mut nonce: Index = 0; + let mut block_number = 1; + let mut previous_hash: Hash = GENESIS_HASH.into(); + + loop { + let num_transfers = block_number * factor; + let mut xts = (0..num_transfers) + .map(|i| CheckedExtrinsic { + signed: Some((charlie(), signed_extra(nonce + i as Index, 0))), + function: Call::Balances(pallet_balances::Call::transfer(bob().into(), 0)), + }) + .collect::>(); + + xts.insert( + 0, + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), + }, + ); + + // NOTE: this is super slow. Can probably be improved. + let block = construct_block(&mut tt, block_number, previous_hash, xts); + + let len = block.0.len(); + print!( + "++ Executing block with {} transfers. Block size = {} bytes / {} kb / {} mb", + num_transfers, + len, + len / 1024, + len / 1024 / 1024, + ); + + let r = executor_call:: _>( + &mut t, + "Core_execute_block", + &block.0, + true, + None, + ) + .0; + + println!(" || Result = {:?}", r); + assert!(r.is_ok()); + + previous_hash = block.1; + nonce += num_transfers; + time += 10; + block_number += 1; + } } #[test] #[should_panic] #[cfg(feature = "stress-test")] fn block_length_capacity_report() { - // Just report how big a block can get. Executes until panic. Should be ignored unless if - // manually inspected. The number should at least be a few megabytes (5 at the time of - // writing but can change over time). - use node_primitives::Index; - - // execution ext. - let mut t = new_test_ext(COMPACT_CODE, false); - // setup ext. - let mut tt = new_test_ext(COMPACT_CODE, false); - - let factor = 256 * 1024; - let mut time = 10; - let mut nonce: Index = 0; - let mut block_number = 1; - let mut previous_hash: Hash = GENESIS_HASH.into(); - - loop { - // NOTE: this is super slow. Can probably be improved. - let block = construct_block( - &mut tt, - block_number, - previous_hash, - vec![ - CheckedExtrinsic { - signed: None, - function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), - }, - CheckedExtrinsic { - signed: Some((charlie(), signed_extra(nonce, 0))), - function: Call::System(frame_system::Call::remark(vec![0u8; (block_number * factor) as usize])), - }, - ] - ); - - let len = block.0.len(); - print!( - "++ Executing block with big remark. Block size = {} bytes / {} kb / {} mb", - len, - len / 1024, - len / 1024 / 1024, - ); - - let r = executor_call:: _>( - &mut t, - "Core_execute_block", - &block.0, - true, - None, - ).0; - - println!(" || Result = {:?}", r); - assert!(r.is_ok()); - - previous_hash = block.1; - nonce += 1; - time += 10; - block_number += 1; - } + // Just report how big a block can get. Executes until panic. Should be ignored unless if + // manually inspected. The number should at least be a few megabytes (5 at the time of + // writing but can change over time). + use node_primitives::Index; + + // execution ext. + let mut t = new_test_ext(COMPACT_CODE, false); + // setup ext. + let mut tt = new_test_ext(COMPACT_CODE, false); + + let factor = 256 * 1024; + let mut time = 10; + let mut nonce: Index = 0; + let mut block_number = 1; + let mut previous_hash: Hash = GENESIS_HASH.into(); + + loop { + // NOTE: this is super slow. Can probably be improved. + let block = construct_block( + &mut tt, + block_number, + previous_hash, + vec![ + CheckedExtrinsic { + signed: None, + function: Call::Timestamp(pallet_timestamp::Call::set(time * 1000)), + }, + CheckedExtrinsic { + signed: Some((charlie(), signed_extra(nonce, 0))), + function: Call::System(frame_system::Call::remark(vec![ + 0u8; + (block_number * factor) + as usize + ])), + }, + ], + ); + + let len = block.0.len(); + print!( + "++ Executing block with big remark. Block size = {} bytes / {} kb / {} mb", + len, + len / 1024, + len / 1024 / 1024, + ); + + let r = executor_call:: _>( + &mut t, + "Core_execute_block", + &block.0, + true, + None, + ) + .0; + + println!(" || Result = {:?}", r); + assert!(r.is_ok()); + + previous_hash = block.1; + nonce += 1; + time += 10; + block_number += 1; + } } diff --git a/bin/node/executor/tests/submit_transaction.rs b/bin/node/executor/tests/submit_transaction.rs index d92f3e3202..368c1a2e6d 100644 --- a/bin/node/executor/tests/submit_transaction.rs +++ b/bin/node/executor/tests/submit_transaction.rs @@ -14,19 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use node_runtime::{ - Call, Executive, Indices, Runtime, TransactionSubmitterOf, UncheckedExtrinsic, -}; +use codec::Decode; +use frame_system::offchain::{SubmitSignedTransaction, SubmitUnsignedTransaction}; +use node_runtime::{Call, Executive, Indices, Runtime, TransactionSubmitterOf, UncheckedExtrinsic}; +use pallet_im_online::sr25519::AuthorityPair as Key; use sp_application_crypto::AppKey; +use sp_core::offchain::{testing::TestTransactionPoolExt, TransactionPoolExt}; use sp_core::testing::KeyStore; use sp_core::traits::KeystoreExt; -use sp_core::offchain::{ - TransactionPoolExt, - testing::TestTransactionPoolExt, -}; -use frame_system::offchain::{SubmitSignedTransaction, SubmitUnsignedTransaction}; -use pallet_im_online::sr25519::AuthorityPair as Key; -use codec::Decode; pub mod common; use self::common::*; @@ -35,154 +30,181 @@ type SubmitTransaction = TransactionSubmitterOf> - ::submit_unsigned(call) - .unwrap(); - - assert_eq!(state.read().transactions.len(), 1) - }); + let mut t = new_test_ext(COMPACT_CODE, false); + let (pool, state) = TestTransactionPoolExt::new(); + t.register_extension(TransactionPoolExt::new(pool)); + + t.execute_with(|| { + let signature = Default::default(); + let heartbeat_data = pallet_im_online::Heartbeat { + block_number: 1, + network_state: Default::default(), + session_index: 1, + authority_index: 0, + }; + + let call = pallet_im_online::Call::heartbeat(heartbeat_data, signature); + >::submit_unsigned(call) + .unwrap(); + + assert_eq!(state.read().transactions.len(), 1) + }); } const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; #[test] fn should_submit_signed_transaction() { - let mut t = new_test_ext(COMPACT_CODE, false); - let (pool, state) = TestTransactionPoolExt::new(); - t.register_extension(TransactionPoolExt::new(pool)); - - let keystore = KeyStore::new(); - keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))).unwrap(); - keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter2", PHRASE))).unwrap(); - keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter3", PHRASE))).unwrap(); - t.register_extension(KeystoreExt(keystore)); - - t.execute_with(|| { - let keys = > - ::find_all_local_keys(); - assert_eq!(keys.len(), 3, "Missing keys: {:?}", keys); - - let can_sign = > - ::can_sign(); - assert!(can_sign, "Since there are keys, `can_sign` should return true"); - - let call = pallet_balances::Call::transfer(Default::default(), Default::default()); - let results = - >::submit_signed(call); - - let len = results.len(); - assert_eq!(len, 3); - assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); - assert_eq!(state.read().transactions.len(), len); - }); + let mut t = new_test_ext(COMPACT_CODE, false); + let (pool, state) = TestTransactionPoolExt::new(); + t.register_extension(TransactionPoolExt::new(pool)); + + let keystore = KeyStore::new(); + keystore + .write() + .sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); + keystore + .write() + .sr25519_generate_new(Key::ID, Some(&format!("{}/hunter2", PHRASE))) + .unwrap(); + keystore + .write() + .sr25519_generate_new(Key::ID, Some(&format!("{}/hunter3", PHRASE))) + .unwrap(); + t.register_extension(KeystoreExt(keystore)); + + t.execute_with(|| { + let keys = + >::find_all_local_keys(); + assert_eq!(keys.len(), 3, "Missing keys: {:?}", keys); + + let can_sign = >::can_sign(); + assert!( + can_sign, + "Since there are keys, `can_sign` should return true" + ); + + let call = pallet_balances::Call::transfer(Default::default(), Default::default()); + let results = + >::submit_signed(call); + + let len = results.len(); + assert_eq!(len, 3); + assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); + assert_eq!(state.read().transactions.len(), len); + }); } #[test] fn should_submit_signed_twice_from_the_same_account() { - let mut t = new_test_ext(COMPACT_CODE, false); - let (pool, state) = TestTransactionPoolExt::new(); - t.register_extension(TransactionPoolExt::new(pool)); - - let keystore = KeyStore::new(); - keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))).unwrap(); - t.register_extension(KeystoreExt(keystore)); - - t.execute_with(|| { - let call = pallet_balances::Call::transfer(Default::default(), Default::default()); - let results = - >::submit_signed(call); - - let len = results.len(); - assert_eq!(len, 1); - assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); - assert_eq!(state.read().transactions.len(), 1); - - // submit another one from the same account. The nonce should be incremented. - let call = pallet_balances::Call::transfer(Default::default(), Default::default()); - let results = - >::submit_signed(call); - - let len = results.len(); - assert_eq!(len, 1); - assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); - assert_eq!(state.read().transactions.len(), 2); - - // now check that the transaction nonces are not equal - let s = state.read(); - fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { - let extra = tx.signature.unwrap().2; - extra.3 - } - let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); - let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); - assert!( - nonce1 != nonce2, - "Transactions should have different nonces. Got: {:?}", nonce1 - ); - }); + let mut t = new_test_ext(COMPACT_CODE, false); + let (pool, state) = TestTransactionPoolExt::new(); + t.register_extension(TransactionPoolExt::new(pool)); + + let keystore = KeyStore::new(); + keystore + .write() + .sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); + t.register_extension(KeystoreExt(keystore)); + + t.execute_with(|| { + let call = pallet_balances::Call::transfer(Default::default(), Default::default()); + let results = + >::submit_signed(call); + + let len = results.len(); + assert_eq!(len, 1); + assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); + assert_eq!(state.read().transactions.len(), 1); + + // submit another one from the same account. The nonce should be incremented. + let call = pallet_balances::Call::transfer(Default::default(), Default::default()); + let results = + >::submit_signed(call); + + let len = results.len(); + assert_eq!(len, 1); + assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); + assert_eq!(state.read().transactions.len(), 2); + + // now check that the transaction nonces are not equal + let s = state.read(); + fn nonce(tx: UncheckedExtrinsic) -> frame_system::CheckNonce { + let extra = tx.signature.unwrap().2; + extra.3 + } + let nonce1 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[0]).unwrap()); + let nonce2 = nonce(UncheckedExtrinsic::decode(&mut &*s.transactions[1]).unwrap()); + assert!( + nonce1 != nonce2, + "Transactions should have different nonces. Got: {:?}", + nonce1 + ); + }); } #[test] fn submitted_transaction_should_be_valid() { - use codec::Encode; - use frame_support::storage::StorageMap; - use sp_runtime::transaction_validity::{ValidTransaction, TransactionSource}; - use sp_runtime::traits::StaticLookup; - - let mut t = new_test_ext(COMPACT_CODE, false); - let (pool, state) = TestTransactionPoolExt::new(); - t.register_extension(TransactionPoolExt::new(pool)); - - let keystore = KeyStore::new(); - keystore.write().sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))).unwrap(); - t.register_extension(KeystoreExt(keystore)); - - t.execute_with(|| { - let call = pallet_balances::Call::transfer(Default::default(), Default::default()); - let results = - >::submit_signed(call); - let len = results.len(); - assert_eq!(len, 1); - assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); - }); - - // check that transaction is valid, but reset environment storage, - // since CreateTransaction increments the nonce - let tx0 = state.read().transactions[0].clone(); - let mut t = new_test_ext(COMPACT_CODE, false); - t.execute_with(|| { - let source = TransactionSource::External; - let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); - // add balance to the account - let author = extrinsic.signature.clone().unwrap().0; - let address = Indices::lookup(author).unwrap(); - let data = pallet_balances::AccountData { free: 5_000_000_000_000, ..Default::default() }; - let account = frame_system::AccountInfo { nonce: 0u32, refcount: 0u8, data }; - >::insert(&address, account); - - // check validity - let res = Executive::validate_transaction(source, extrinsic); - - assert_eq!(res.unwrap(), ValidTransaction { - priority: 2_410_600_000_000, - requires: vec![], - provides: vec![(address, 0).encode()], - longevity: 128, - propagate: true, - }); - }); + use codec::Encode; + use frame_support::storage::StorageMap; + use sp_runtime::traits::StaticLookup; + use sp_runtime::transaction_validity::{TransactionSource, ValidTransaction}; + + let mut t = new_test_ext(COMPACT_CODE, false); + let (pool, state) = TestTransactionPoolExt::new(); + t.register_extension(TransactionPoolExt::new(pool)); + + let keystore = KeyStore::new(); + keystore + .write() + .sr25519_generate_new(Key::ID, Some(&format!("{}/hunter1", PHRASE))) + .unwrap(); + t.register_extension(KeystoreExt(keystore)); + + t.execute_with(|| { + let call = pallet_balances::Call::transfer(Default::default(), Default::default()); + let results = + >::submit_signed(call); + let len = results.len(); + assert_eq!(len, 1); + assert_eq!(results.into_iter().filter_map(|x| x.1.ok()).count(), len); + }); + + // check that transaction is valid, but reset environment storage, + // since CreateTransaction increments the nonce + let tx0 = state.read().transactions[0].clone(); + let mut t = new_test_ext(COMPACT_CODE, false); + t.execute_with(|| { + let source = TransactionSource::External; + let extrinsic = UncheckedExtrinsic::decode(&mut &*tx0).unwrap(); + // add balance to the account + let author = extrinsic.signature.clone().unwrap().0; + let address = Indices::lookup(author).unwrap(); + let data = pallet_balances::AccountData { + free: 5_000_000_000_000, + ..Default::default() + }; + let account = frame_system::AccountInfo { + nonce: 0u32, + refcount: 0u8, + data, + }; + >::insert(&address, account); + + // check validity + let res = Executive::validate_transaction(source, extrinsic); + + assert_eq!( + res.unwrap(), + ValidTransaction { + priority: 2_410_600_000_000, + requires: vec![], + provides: vec![(address, 0).encode()], + longevity: 128, + propagate: true, + } + ); + }); } diff --git a/bin/node/inspect/src/cli.rs b/bin/node/inspect/src/cli.rs index 5d51bd5848..e144896d4d 100644 --- a/bin/node/inspect/src/cli.rs +++ b/bin/node/inspect/src/cli.rs @@ -16,47 +16,47 @@ //! Structs to easily compose inspect sub-command for CLI. -use std::fmt::Debug; use sc_cli::{ImportParams, SharedParams}; +use std::fmt::Debug; use structopt::StructOpt; /// The `inspect` command used to print decoded chain data. #[derive(Debug, StructOpt, Clone)] pub struct InspectCmd { - #[allow(missing_docs)] - #[structopt(flatten)] - pub command: InspectSubCmd, + #[allow(missing_docs)] + #[structopt(flatten)] + pub command: InspectSubCmd, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub import_params: ImportParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub import_params: ImportParams, } /// A possible inspect sub-commands. #[derive(Debug, StructOpt, Clone)] pub enum InspectSubCmd { - /// Decode block with native version of runtime and print out the details. - Block { - /// Address of the block to print out. - /// - /// Can be either a block hash (no 0x prefix) or a number to retrieve existing block, - /// or a 0x-prefixed bytes hex string, representing SCALE encoding of - /// a block. - #[structopt(value_name = "HASH or NUMBER or BYTES")] - input: String, - }, - /// Decode extrinsic with native version of runtime and print out the details. - Extrinsic { - /// Address of an extrinsic to print out. - /// - /// Can be either a block hash (no 0x prefix) or number and the index, in the form - /// of `{block}:{index}` or a 0x-prefixed bytes hex string, - /// representing SCALE encoding of an extrinsic. - #[structopt(value_name = "BLOCK:INDEX or BYTES")] - input: String, - }, + /// Decode block with native version of runtime and print out the details. + Block { + /// Address of the block to print out. + /// + /// Can be either a block hash (no 0x prefix) or a number to retrieve existing block, + /// or a 0x-prefixed bytes hex string, representing SCALE encoding of + /// a block. + #[structopt(value_name = "HASH or NUMBER or BYTES")] + input: String, + }, + /// Decode extrinsic with native version of runtime and print out the details. + Extrinsic { + /// Address of an extrinsic to print out. + /// + /// Can be either a block hash (no 0x prefix) or number and the index, in the form + /// of `{block}:{index}` or a 0x-prefixed bytes hex string, + /// representing SCALE encoding of an extrinsic. + #[structopt(value_name = "BLOCK:INDEX or BYTES")] + input: String, + }, } diff --git a/bin/node/inspect/src/command.rs b/bin/node/inspect/src/command.rs index 2212907f76..50180cd788 100644 --- a/bin/node/inspect/src/command.rs +++ b/bin/node/inspect/src/command.rs @@ -24,40 +24,40 @@ use sp_runtime::traits::Block; use std::str::FromStr; impl InspectCmd { - /// Run the inspect command, passing the inspector. - pub fn run(&self, config: Configuration) -> Result<()> - where - B: Block, - B::Hash: FromStr, - RA: Send + Sync + 'static, - EX: NativeExecutionDispatch + 'static, - { - let client = new_full_client::(&config)?; - let inspect = Inspector::::new(client); - - match &self.command { - InspectSubCmd::Block { input } => { - let input = input.parse()?; - let res = inspect.block(input).map_err(|e| format!("{}", e))?; - println!("{}", res); - Ok(()) - } - InspectSubCmd::Extrinsic { input } => { - let input = input.parse()?; - let res = inspect.extrinsic(input).map_err(|e| format!("{}", e))?; - println!("{}", res); - Ok(()) - } - } - } + /// Run the inspect command, passing the inspector. + pub fn run(&self, config: Configuration) -> Result<()> + where + B: Block, + B::Hash: FromStr, + RA: Send + Sync + 'static, + EX: NativeExecutionDispatch + 'static, + { + let client = new_full_client::(&config)?; + let inspect = Inspector::::new(client); + + match &self.command { + InspectSubCmd::Block { input } => { + let input = input.parse()?; + let res = inspect.block(input).map_err(|e| format!("{}", e))?; + println!("{}", res); + Ok(()) + } + InspectSubCmd::Extrinsic { input } => { + let input = input.parse()?; + let res = inspect.extrinsic(input).map_err(|e| format!("{}", e))?; + println!("{}", res); + Ok(()) + } + } + } } impl CliConfiguration for InspectCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - fn import_params(&self) -> Option<&ImportParams> { - Some(&self.import_params) - } + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } } diff --git a/bin/node/inspect/src/lib.rs b/bin/node/inspect/src/lib.rs index b8101d98a3..5e019e4f41 100644 --- a/bin/node/inspect/src/lib.rs +++ b/bin/node/inspect/src/lib.rs @@ -25,303 +25,305 @@ pub mod cli; pub mod command; -use std::{ - fmt, - fmt::Debug, - marker::PhantomData, - str::FromStr, -}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sc_client_api::BlockBackend; use sp_blockchain::HeaderBackend; use sp_core::hexdisplay::HexDisplay; use sp_runtime::{ - generic::BlockId, - traits::{Block, HashFor, NumberFor, Hash} + generic::BlockId, + traits::{Block, Hash, HashFor, NumberFor}, }; +use std::{fmt, fmt::Debug, marker::PhantomData, str::FromStr}; /// A helper type for a generic block input. -pub type BlockAddressFor = BlockAddress< - as Hash>::Output, - NumberFor ->; +pub type BlockAddressFor = + BlockAddress< as Hash>::Output, NumberFor>; /// A Pretty formatter implementation. pub trait PrettyPrinter { - /// Nicely format block. - fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result; - /// Nicely format extrinsic. - fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result; + /// Nicely format block. + fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result; + /// Nicely format extrinsic. + fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) + -> fmt::Result; } /// Default dummy debug printer. #[derive(Default)] pub struct DebugPrinter; impl PrettyPrinter for DebugPrinter { - fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result { - writeln!(fmt, "Header:")?; - writeln!(fmt, "{:?}", block.header())?; - writeln!(fmt, "Block bytes: {:?}", HexDisplay::from(&block.encode()))?; - writeln!(fmt, "Extrinsics ({})", block.extrinsics().len())?; - for (idx, ex) in block.extrinsics().iter().enumerate() { - writeln!(fmt, "- {}:", idx)?; - >::fmt_extrinsic(self, fmt, ex)?; - } - Ok(()) - } - - fn fmt_extrinsic(&self, fmt: &mut fmt::Formatter, extrinsic: &TBlock::Extrinsic) -> fmt::Result { - writeln!(fmt, " {:?}", extrinsic)?; - writeln!(fmt, " Bytes: {:?}", HexDisplay::from(&extrinsic.encode()))?; - Ok(()) - } + fn fmt_block(&self, fmt: &mut fmt::Formatter, block: &TBlock) -> fmt::Result { + writeln!(fmt, "Header:")?; + writeln!(fmt, "{:?}", block.header())?; + writeln!(fmt, "Block bytes: {:?}", HexDisplay::from(&block.encode()))?; + writeln!(fmt, "Extrinsics ({})", block.extrinsics().len())?; + for (idx, ex) in block.extrinsics().iter().enumerate() { + writeln!(fmt, "- {}:", idx)?; + >::fmt_extrinsic(self, fmt, ex)?; + } + Ok(()) + } + + fn fmt_extrinsic( + &self, + fmt: &mut fmt::Formatter, + extrinsic: &TBlock::Extrinsic, + ) -> fmt::Result { + writeln!(fmt, " {:?}", extrinsic)?; + writeln!(fmt, " Bytes: {:?}", HexDisplay::from(&extrinsic.encode()))?; + Ok(()) + } } /// Aggregated error for `Inspector` operations. #[derive(Debug, derive_more::From, derive_more::Display)] pub enum Error { - /// Could not decode Block or Extrinsic. - Codec(codec::Error), - /// Error accessing blockchain DB. - Blockchain(sp_blockchain::Error), - /// Given block has not been found. - NotFound(String), + /// Could not decode Block or Extrinsic. + Codec(codec::Error), + /// Error accessing blockchain DB. + Blockchain(sp_blockchain::Error), + /// Given block has not been found. + NotFound(String), } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match *self { - Self::Codec(ref e) => Some(e), - Self::Blockchain(ref e) => Some(e), - Self::NotFound(_) => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match *self { + Self::Codec(ref e) => Some(e), + Self::Blockchain(ref e) => Some(e), + Self::NotFound(_) => None, + } + } } /// A helper trait to access block headers and bodies. -pub trait ChainAccess: - HeaderBackend + - BlockBackend -{} +pub trait ChainAccess: HeaderBackend + BlockBackend {} -impl ChainAccess for T where - TBlock: Block, - T: sp_blockchain::HeaderBackend + sc_client_api::BlockBackend, -{} +impl ChainAccess for T +where + TBlock: Block, + T: sp_blockchain::HeaderBackend + sc_client_api::BlockBackend, +{ +} /// Blockchain inspector. pub struct Inspector = DebugPrinter> { - printer: TPrinter, - chain: Box>, - _block: PhantomData, + printer: TPrinter, + chain: Box>, + _block: PhantomData, } impl> Inspector { - /// Create new instance of the inspector with default printer. - pub fn new( - chain: impl ChainAccess + 'static, - ) -> Self where TPrinter: Default { - Self::with_printer(chain, Default::default()) - } - - /// Customize pretty-printing of the data. - pub fn with_printer( - chain: impl ChainAccess + 'static, - printer: TPrinter, - ) -> Self { - Inspector { - chain: Box::new(chain) as _, - printer, - _block: Default::default(), - } - } - - /// Get a pretty-printed block. - pub fn block(&self, input: BlockAddressFor) -> Result { - struct BlockPrinter<'a, A, B>(A, &'a B); - impl<'a, A: Block, B: PrettyPrinter> fmt::Display for BlockPrinter<'a, A, B> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - self.1.fmt_block(fmt, &self.0) - } - } - - let block = self.get_block(input)?; - Ok(format!("{}", BlockPrinter(block, &self.printer))) - } - - fn get_block(&self, input: BlockAddressFor) -> Result { - Ok(match input { - BlockAddress::Bytes(bytes) => { - TBlock::decode(&mut &*bytes)? - }, - BlockAddress::Number(number) => { - let id = BlockId::number(number); - let not_found = format!("Could not find block {:?}", id); - let body = self.chain.block_body(&id)? - .ok_or_else(|| Error::NotFound(not_found.clone()))?; - let header = self.chain.header(id)? - .ok_or_else(|| Error::NotFound(not_found.clone()))?; - TBlock::new(header, body) - }, - BlockAddress::Hash(hash) => { - let id = BlockId::hash(hash); - let not_found = format!("Could not find block {:?}", id); - let body = self.chain.block_body(&id)? - .ok_or_else(|| Error::NotFound(not_found.clone()))?; - let header = self.chain.header(id)? - .ok_or_else(|| Error::NotFound(not_found.clone()))?; - TBlock::new(header, body) - }, - }) - } - - /// Get a pretty-printed extrinsic. - pub fn extrinsic( - &self, - input: ExtrinsicAddress< as Hash>::Output, NumberFor>, - ) -> Result { - struct ExtrinsicPrinter<'a, A: Block, B>(A::Extrinsic, &'a B); - impl<'a, A: Block, B: PrettyPrinter> fmt::Display for ExtrinsicPrinter<'a, A, B> { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - self.1.fmt_extrinsic(fmt, &self.0) - } - } - - let ext = match input { - ExtrinsicAddress::Block(block, index) => { - let block = self.get_block(block)?; - block.extrinsics() - .get(index) - .cloned() - .ok_or_else(|| Error::NotFound(format!( - "Could not find extrinsic {} in block {:?}", index, block - )))? - }, - ExtrinsicAddress::Bytes(bytes) => { - TBlock::Extrinsic::decode(&mut &*bytes)? - } - }; - - Ok(format!("{}", ExtrinsicPrinter(ext, &self.printer))) - } + /// Create new instance of the inspector with default printer. + pub fn new(chain: impl ChainAccess + 'static) -> Self + where + TPrinter: Default, + { + Self::with_printer(chain, Default::default()) + } + + /// Customize pretty-printing of the data. + pub fn with_printer(chain: impl ChainAccess + 'static, printer: TPrinter) -> Self { + Inspector { + chain: Box::new(chain) as _, + printer, + _block: Default::default(), + } + } + + /// Get a pretty-printed block. + pub fn block(&self, input: BlockAddressFor) -> Result { + struct BlockPrinter<'a, A, B>(A, &'a B); + impl<'a, A: Block, B: PrettyPrinter> fmt::Display for BlockPrinter<'a, A, B> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.1.fmt_block(fmt, &self.0) + } + } + + let block = self.get_block(input)?; + Ok(format!("{}", BlockPrinter(block, &self.printer))) + } + + fn get_block(&self, input: BlockAddressFor) -> Result { + Ok(match input { + BlockAddress::Bytes(bytes) => TBlock::decode(&mut &*bytes)?, + BlockAddress::Number(number) => { + let id = BlockId::number(number); + let not_found = format!("Could not find block {:?}", id); + let body = self + .chain + .block_body(&id)? + .ok_or_else(|| Error::NotFound(not_found.clone()))?; + let header = self + .chain + .header(id)? + .ok_or_else(|| Error::NotFound(not_found.clone()))?; + TBlock::new(header, body) + } + BlockAddress::Hash(hash) => { + let id = BlockId::hash(hash); + let not_found = format!("Could not find block {:?}", id); + let body = self + .chain + .block_body(&id)? + .ok_or_else(|| Error::NotFound(not_found.clone()))?; + let header = self + .chain + .header(id)? + .ok_or_else(|| Error::NotFound(not_found.clone()))?; + TBlock::new(header, body) + } + }) + } + + /// Get a pretty-printed extrinsic. + pub fn extrinsic( + &self, + input: ExtrinsicAddress< as Hash>::Output, NumberFor>, + ) -> Result { + struct ExtrinsicPrinter<'a, A: Block, B>(A::Extrinsic, &'a B); + impl<'a, A: Block, B: PrettyPrinter> fmt::Display for ExtrinsicPrinter<'a, A, B> { + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + self.1.fmt_extrinsic(fmt, &self.0) + } + } + + let ext = match input { + ExtrinsicAddress::Block(block, index) => { + let block = self.get_block(block)?; + block.extrinsics().get(index).cloned().ok_or_else(|| { + Error::NotFound(format!( + "Could not find extrinsic {} in block {:?}", + index, block + )) + })? + } + ExtrinsicAddress::Bytes(bytes) => TBlock::Extrinsic::decode(&mut &*bytes)?, + }; + + Ok(format!("{}", ExtrinsicPrinter(ext, &self.printer))) + } } /// A block to retrieve. #[derive(Debug, Clone, PartialEq)] pub enum BlockAddress { - /// Get block by hash. - Hash(Hash), - /// Get block by number. - Number(Number), - /// Raw SCALE-encoded bytes. - Bytes(Vec), + /// Get block by hash. + Hash(Hash), + /// Get block by number. + Number(Number), + /// Raw SCALE-encoded bytes. + Bytes(Vec), } impl FromStr for BlockAddress { - type Err = String; - - fn from_str(s: &str) -> Result { - // try to parse hash first - if let Ok(hash) = s.parse() { - return Ok(Self::Hash(hash)) - } - - // then number - if let Ok(number) = s.parse() { - return Ok(Self::Number(number)) - } - - // then assume it's bytes (hex-encoded) - sp_core::bytes::from_hex(s) - .map(Self::Bytes) - .map_err(|e| format!( + type Err = String; + + fn from_str(s: &str) -> Result { + // try to parse hash first + if let Ok(hash) = s.parse() { + return Ok(Self::Hash(hash)); + } + + // then number + if let Ok(number) = s.parse() { + return Ok(Self::Number(number)); + } + + // then assume it's bytes (hex-encoded) + sp_core::bytes::from_hex(s).map(Self::Bytes).map_err(|e| { + format!( "Given string does not look like hash or number. It could not be parsed as bytes either: {}", e - )) - } + ) + }) + } } /// An extrinsic address to decode and print out. #[derive(Debug, Clone, PartialEq)] pub enum ExtrinsicAddress { - /// Extrinsic as part of existing block. - Block(BlockAddress, usize), - /// Raw SCALE-encoded extrinsic bytes. - Bytes(Vec), + /// Extrinsic as part of existing block. + Block(BlockAddress, usize), + /// Raw SCALE-encoded extrinsic bytes. + Bytes(Vec), } impl FromStr for ExtrinsicAddress { - type Err = String; - - fn from_str(s: &str) -> Result { - // first try raw bytes - if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { - return Ok(bytes) - } - - // split by a bunch of different characters - let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); - let block = it.next() - .expect("First element of split iterator is never empty; qed") - .parse()?; - - let index = it.next() - .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? - .parse() - .map_err(|e| format!("Invalid index format: {}", e))?; - - Ok(Self::Block(block, index)) - } + type Err = String; + + fn from_str(s: &str) -> Result { + // first try raw bytes + if let Ok(bytes) = sp_core::bytes::from_hex(s).map(Self::Bytes) { + return Ok(bytes); + } + + // split by a bunch of different characters + let mut it = s.split(|c| c == '.' || c == ':' || c == ' '); + let block = it + .next() + .expect("First element of split iterator is never empty; qed") + .parse()?; + + let index = it + .next() + .ok_or_else(|| format!("Extrinsic index missing: example \"5:0\""))? + .parse() + .map_err(|e| format!("Invalid index format: {}", e))?; + + Ok(Self::Block(block, index)) + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::hash::H160 as Hash; - - #[test] - fn should_parse_block_strings() { - type BlockAddress = super::BlockAddress; - - let b0 = BlockAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258"); - let b1 = BlockAddress::from_str("1234"); - let b2 = BlockAddress::from_str("0"); - let b3 = BlockAddress::from_str("0x0012345f"); - - - assert_eq!(b0, Ok(BlockAddress::Hash( - "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() - ))); - assert_eq!(b1, Ok(BlockAddress::Number(1234))); - assert_eq!(b2, Ok(BlockAddress::Number(0))); - assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); - } - - #[test] - fn should_parse_extrinsic_address() { - type BlockAddress = super::BlockAddress; - type ExtrinsicAddress = super::ExtrinsicAddress; - - let e0 = ExtrinsicAddress::from_str("1234"); - let b0 = ExtrinsicAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258:5"); - let b1 = ExtrinsicAddress::from_str("1234:0"); - let b2 = ExtrinsicAddress::from_str("0 0"); - let b3 = ExtrinsicAddress::from_str("0x0012345f"); - - - assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); - assert_eq!(b0, Ok(ExtrinsicAddress::Block( - BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), - 5 - ))); - assert_eq!(b1, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(1234), - 0 - ))); - assert_eq!(b2, Ok(ExtrinsicAddress::Block( - BlockAddress::Number(0), - 0 - ))); - assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); - } + use super::*; + use sp_core::hash::H160 as Hash; + + #[test] + fn should_parse_block_strings() { + type BlockAddress = super::BlockAddress; + + let b0 = BlockAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258"); + let b1 = BlockAddress::from_str("1234"); + let b2 = BlockAddress::from_str("0"); + let b3 = BlockAddress::from_str("0x0012345f"); + + assert_eq!( + b0, + Ok(BlockAddress::Hash( + "3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap() + )) + ); + assert_eq!(b1, Ok(BlockAddress::Number(1234))); + assert_eq!(b2, Ok(BlockAddress::Number(0))); + assert_eq!(b3, Ok(BlockAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); + } + + #[test] + fn should_parse_extrinsic_address() { + type BlockAddress = super::BlockAddress; + type ExtrinsicAddress = super::ExtrinsicAddress; + + let e0 = ExtrinsicAddress::from_str("1234"); + let b0 = ExtrinsicAddress::from_str("3BfC20f0B9aFcAcE800D73D2191166FF16540258:5"); + let b1 = ExtrinsicAddress::from_str("1234:0"); + let b2 = ExtrinsicAddress::from_str("0 0"); + let b3 = ExtrinsicAddress::from_str("0x0012345f"); + + assert_eq!(e0, Err("Extrinsic index missing: example \"5:0\"".into())); + assert_eq!( + b0, + Ok(ExtrinsicAddress::Block( + BlockAddress::Hash("3BfC20f0B9aFcAcE800D73D2191166FF16540258".parse().unwrap()), + 5 + )) + ); + assert_eq!( + b1, + Ok(ExtrinsicAddress::Block(BlockAddress::Number(1234), 0)) + ); + assert_eq!(b2, Ok(ExtrinsicAddress::Block(BlockAddress::Number(0), 0))); + assert_eq!(b3, Ok(ExtrinsicAddress::Bytes(vec![0, 0x12, 0x34, 0x5f]))); + } } diff --git a/bin/node/primitives/src/lib.rs b/bin/node/primitives/src/lib.rs index 97e8f50c27..030024c68d 100644 --- a/bin/node/primitives/src/lib.rs +++ b/bin/node/primitives/src/lib.rs @@ -17,11 +17,12 @@ //! Low-level types used throughout the Substrate code. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] use sp_runtime::{ - generic, traits::{Verify, BlakeTwo256, IdentifyAccount}, OpaqueExtrinsic, MultiSignature + generic, + traits::{BlakeTwo256, IdentifyAccount, Verify}, + MultiSignature, OpaqueExtrinsic, }; /// An index to a block. diff --git a/bin/node/rpc-client/src/main.rs b/bin/node/rpc-client/src/main.rs index c547d30002..7979d7da65 100644 --- a/bin/node/rpc-client/src/main.rs +++ b/bin/node/rpc-client/src/main.rs @@ -23,30 +23,22 @@ use futures::Future; use hyper::rt; +use jsonrpc_core_client::{transports::http, RpcError}; use node_primitives::Hash; -use sc_rpc::author::{ - AuthorClient, - hash::ExtrinsicOrHash, -}; -use jsonrpc_core_client::{ - transports::http, - RpcError, -}; +use sc_rpc::author::{hash::ExtrinsicOrHash, AuthorClient}; fn main() { - env_logger::init(); + env_logger::init(); - rt::run(rt::lazy(|| { - let uri = "http://localhost:9933"; + rt::run(rt::lazy(|| { + let uri = "http://localhost:9933"; - http::connect(uri) - .and_then(|client: AuthorClient| { - remove_all_extrinsics(client) - }) - .map_err(|e| { - println!("Error: {:?}", e); - }) - })) + http::connect(uri) + .and_then(|client: AuthorClient| remove_all_extrinsics(client)) + .map_err(|e| { + println!("Error: {:?}", e); + }) + })) } /// Remove all pending extrinsics from the node. @@ -57,14 +49,20 @@ fn main() { /// /// As the result of running the code the entire content of the transaction pool is going /// to be removed and the extrinsics are going to be temporarily banned. -fn remove_all_extrinsics(client: AuthorClient) -> impl Future { - client.pending_extrinsics() - .and_then(move |pending| { - client.remove_extrinsic( - pending.into_iter().map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())).collect() - ) - }) - .map(|removed| { - println!("Removed extrinsics: {:?}", removed); - }) +fn remove_all_extrinsics( + client: AuthorClient, +) -> impl Future { + client + .pending_extrinsics() + .and_then(move |pending| { + client.remove_extrinsic( + pending + .into_iter() + .map(|tx| ExtrinsicOrHash::Extrinsic(tx.into())) + .collect(), + ) + }) + .map(|removed| { + println!("Removed extrinsics: {:?}", removed); + }) } diff --git a/bin/node/rpc/src/lib.rs b/bin/node/rpc/src/lib.rs index 4e1cfa5673..f6d9aa1ff5 100644 --- a/bin/node/rpc/src/lib.rs +++ b/bin/node/rpc/src/lib.rs @@ -29,130 +29,135 @@ #![warn(missing_docs)] -use std::{sync::Arc, fmt}; +use std::{fmt, sync::Arc}; -use node_primitives::{Block, BlockNumber, AccountId, Index, Balance}; +use node_primitives::{AccountId, Balance, Block, BlockNumber, Index}; use node_runtime::UncheckedExtrinsic; +use sc_consensus_babe::{Config, Epoch}; +use sc_consensus_babe_rpc::BabeRPCHandler; +use sc_consensus_epochs::SharedEpochChanges; +use sc_keystore::KeyStorePtr; use sp_api::ProvideRuntimeApi; -use sp_transaction_pool::TransactionPool; -use sp_blockchain::{Error as BlockChainError, HeaderMetadata, HeaderBackend}; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; use sp_consensus::SelectChain; -use sc_keystore::KeyStorePtr; use sp_consensus_babe::BabeApi; -use sc_consensus_epochs::SharedEpochChanges; -use sc_consensus_babe::{Config, Epoch}; -use sc_consensus_babe_rpc::BabeRPCHandler; +use sp_transaction_pool::TransactionPool; /// Light client extra dependencies. pub struct LightDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// Remote access to the blockchain (async). - pub remote_blockchain: Arc>, - /// Fetcher instance. - pub fetcher: Arc, + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// Remote access to the blockchain (async). + pub remote_blockchain: Arc>, + /// Fetcher instance. + pub fetcher: Arc, } /// Extra dependencies for BABE. pub struct BabeDeps { - /// BABE protocol config. - pub babe_config: Config, - /// BABE pending epoch changes. - pub shared_epoch_changes: SharedEpochChanges, - /// The keystore that manages the keys of the node. - pub keystore: KeyStorePtr, + /// BABE protocol config. + pub babe_config: Config, + /// BABE pending epoch changes. + pub shared_epoch_changes: SharedEpochChanges, + /// The keystore that manages the keys of the node. + pub keystore: KeyStorePtr, } /// Full client dependencies. pub struct FullDeps { - /// The client instance to use. - pub client: Arc, - /// Transaction pool instance. - pub pool: Arc

, - /// The SelectChain Strategy - pub select_chain: SC, - /// BABE specific dependencies. - pub babe: BabeDeps, + /// The client instance to use. + pub client: Arc, + /// Transaction pool instance. + pub pool: Arc

, + /// The SelectChain Strategy + pub select_chain: SC, + /// BABE specific dependencies. + pub babe: BabeDeps, } /// Instantiate all Full RPC extensions. -pub fn create_full( - deps: FullDeps, -) -> jsonrpc_core::IoHandler where - C: ProvideRuntimeApi, - C: HeaderBackend + HeaderMetadata + 'static, - C: Send + Sync + 'static, - C::Api: substrate_frame_rpc_system::AccountNonceApi, - C::Api: pallet_contracts_rpc::ContractsRuntimeApi, - C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi, - C::Api: BabeApi, - ::Error: fmt::Debug, - P: TransactionPool + 'static, - M: jsonrpc_core::Metadata + Default, - SC: SelectChain +'static, +pub fn create_full(deps: FullDeps) -> jsonrpc_core::IoHandler +where + C: ProvideRuntimeApi, + C: HeaderBackend + HeaderMetadata + 'static, + C: Send + Sync + 'static, + C::Api: substrate_frame_rpc_system::AccountNonceApi, + C::Api: pallet_contracts_rpc::ContractsRuntimeApi, + C::Api: pallet_transaction_payment_rpc::TransactionPaymentRuntimeApi< + Block, + Balance, + UncheckedExtrinsic, + >, + C::Api: BabeApi, + ::Error: fmt::Debug, + P: TransactionPool + 'static, + M: jsonrpc_core::Metadata + Default, + SC: SelectChain + 'static, { - use substrate_frame_rpc_system::{FullSystem, SystemApi}; - use pallet_contracts_rpc::{Contracts, ContractsApi}; - use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; - - let mut io = jsonrpc_core::IoHandler::default(); - let FullDeps { - client, - pool, - select_chain, - babe - } = deps; - let BabeDeps { - keystore, - babe_config, - shared_epoch_changes, - } = babe; - - io.extend_with( - SystemApi::to_delegate(FullSystem::new(client.clone(), pool)) - ); - // Making synchronous calls in light client freezes the browser currently, - // more context: https://github.com/paritytech/substrate/pull/3480 - // These RPCs should use an asynchronous caller instead. - io.extend_with( - ContractsApi::to_delegate(Contracts::new(client.clone())) - ); - io.extend_with( - TransactionPaymentApi::to_delegate(TransactionPayment::new(client.clone())) - ); - io.extend_with( - sc_consensus_babe_rpc::BabeApi::to_delegate( - BabeRPCHandler::new(client, shared_epoch_changes, keystore, babe_config, select_chain) - ) - ); - - io + use pallet_contracts_rpc::{Contracts, ContractsApi}; + use pallet_transaction_payment_rpc::{TransactionPayment, TransactionPaymentApi}; + use substrate_frame_rpc_system::{FullSystem, SystemApi}; + + let mut io = jsonrpc_core::IoHandler::default(); + let FullDeps { + client, + pool, + select_chain, + babe, + } = deps; + let BabeDeps { + keystore, + babe_config, + shared_epoch_changes, + } = babe; + + io.extend_with(SystemApi::to_delegate(FullSystem::new( + client.clone(), + pool, + ))); + // Making synchronous calls in light client freezes the browser currently, + // more context: https://github.com/paritytech/substrate/pull/3480 + // These RPCs should use an asynchronous caller instead. + io.extend_with(ContractsApi::to_delegate(Contracts::new(client.clone()))); + io.extend_with(TransactionPaymentApi::to_delegate(TransactionPayment::new( + client.clone(), + ))); + io.extend_with(sc_consensus_babe_rpc::BabeApi::to_delegate( + BabeRPCHandler::new( + client, + shared_epoch_changes, + keystore, + babe_config, + select_chain, + ), + )); + + io } /// Instantiate all Light RPC extensions. -pub fn create_light( - deps: LightDeps, -) -> jsonrpc_core::IoHandler where - C: sc_client::blockchain::HeaderBackend, - C: Send + Sync + 'static, - F: sc_client::light::fetcher::Fetcher + 'static, - P: TransactionPool + 'static, - M: jsonrpc_core::Metadata + Default, +pub fn create_light(deps: LightDeps) -> jsonrpc_core::IoHandler +where + C: sc_client::blockchain::HeaderBackend, + C: Send + Sync + 'static, + F: sc_client::light::fetcher::Fetcher + 'static, + P: TransactionPool + 'static, + M: jsonrpc_core::Metadata + Default, { - use substrate_frame_rpc_system::{LightSystem, SystemApi}; - - let LightDeps { - client, - pool, - remote_blockchain, - fetcher - } = deps; - let mut io = jsonrpc_core::IoHandler::default(); - io.extend_with( - SystemApi::::to_delegate(LightSystem::new(client, remote_blockchain, fetcher, pool)) - ); - - io + use substrate_frame_rpc_system::{LightSystem, SystemApi}; + + let LightDeps { + client, + pool, + remote_blockchain, + fetcher, + } = deps; + let mut io = jsonrpc_core::IoHandler::default(); + io.extend_with(SystemApi::::to_delegate( + LightSystem::new(client, remote_blockchain, fetcher, pool), + )); + + io } diff --git a/bin/node/runtime/build.rs b/bin/node/runtime/build.rs index 647b476814..c54e86d8db 100644 --- a/bin/node/runtime/build.rs +++ b/bin/node/runtime/build.rs @@ -17,10 +17,10 @@ use wasm_builder_runner::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") - .export_heap_base() - .import_memory() - .build() + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") + .export_heap_base() + .import_memory() + .build() } diff --git a/bin/node/runtime/src/constants.rs b/bin/node/runtime/src/constants.rs index bf12492f8d..6d408749de 100644 --- a/bin/node/runtime/src/constants.rs +++ b/bin/node/runtime/src/constants.rs @@ -18,51 +18,51 @@ /// Money matters. pub mod currency { - use node_primitives::Balance; + use node_primitives::Balance; - pub const MILLICENTS: Balance = 1_000_000_000; - pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. - pub const DOLLARS: Balance = 100 * CENTS; + pub const MILLICENTS: Balance = 1_000_000_000; + pub const CENTS: Balance = 1_000 * MILLICENTS; // assume this is worth about a cent. + pub const DOLLARS: Balance = 100 * CENTS; } /// Time. pub mod time { - use node_primitives::{Moment, BlockNumber}; + use node_primitives::{BlockNumber, Moment}; - /// Since BABE is probabilistic this is the average expected block time that - /// we are targetting. Blocks will be produced at a minimum duration defined - /// by `SLOT_DURATION`, but some slots will not be allocated to any - /// authority and hence no block will be produced. We expect to have this - /// block time on average following the defined slot duration and the value - /// of `c` configured for BABE (where `1 - c` represents the probability of - /// a slot being empty). - /// This value is only used indirectly to define the unit constants below - /// that are expressed in blocks. The rest of the code should use - /// `SLOT_DURATION` instead (like the Timestamp pallet for calculating the - /// minimum period). - /// - /// If using BABE with secondary slots (default) then all of the slots will - /// always be assigned, in which case `MILLISECS_PER_BLOCK` and - /// `SLOT_DURATION` should have the same value. - /// - /// - pub const MILLISECS_PER_BLOCK: Moment = 3000; - pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; + /// Since BABE is probabilistic this is the average expected block time that + /// we are targetting. Blocks will be produced at a minimum duration defined + /// by `SLOT_DURATION`, but some slots will not be allocated to any + /// authority and hence no block will be produced. We expect to have this + /// block time on average following the defined slot duration and the value + /// of `c` configured for BABE (where `1 - c` represents the probability of + /// a slot being empty). + /// This value is only used indirectly to define the unit constants below + /// that are expressed in blocks. The rest of the code should use + /// `SLOT_DURATION` instead (like the Timestamp pallet for calculating the + /// minimum period). + /// + /// If using BABE with secondary slots (default) then all of the slots will + /// always be assigned, in which case `MILLISECS_PER_BLOCK` and + /// `SLOT_DURATION` should have the same value. + /// + /// + pub const MILLISECS_PER_BLOCK: Moment = 3000; + pub const SECS_PER_BLOCK: Moment = MILLISECS_PER_BLOCK / 1000; - pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; + pub const SLOT_DURATION: Moment = MILLISECS_PER_BLOCK; - // 1 in 4 blocks (on average, not counting collisions) will be primary BABE blocks. - pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); + // 1 in 4 blocks (on average, not counting collisions) will be primary BABE blocks. + pub const PRIMARY_PROBABILITY: (u64, u64) = (1, 4); - pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES; - pub const EPOCH_DURATION_IN_SLOTS: u64 = { - const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; + pub const EPOCH_DURATION_IN_BLOCKS: BlockNumber = 10 * MINUTES; + pub const EPOCH_DURATION_IN_SLOTS: u64 = { + const SLOT_FILL_RATE: f64 = MILLISECS_PER_BLOCK as f64 / SLOT_DURATION as f64; - (EPOCH_DURATION_IN_BLOCKS as f64 * SLOT_FILL_RATE) as u64 - }; + (EPOCH_DURATION_IN_BLOCKS as f64 * SLOT_FILL_RATE) as u64 + }; - // These time units are defined in number of blocks. - pub const MINUTES: BlockNumber = 60 / (SECS_PER_BLOCK as BlockNumber); - pub const HOURS: BlockNumber = MINUTES * 60; - pub const DAYS: BlockNumber = HOURS * 24; + // These time units are defined in number of blocks. + pub const MINUTES: BlockNumber = 60 / (SECS_PER_BLOCK as BlockNumber); + pub const HOURS: BlockNumber = MINUTES * 60; + pub const DAYS: BlockNumber = HOURS * 24; } diff --git a/bin/node/runtime/src/impls.rs b/bin/node/runtime/src/impls.rs index f613dc5af5..493584b61b 100644 --- a/bin/node/runtime/src/impls.rs +++ b/bin/node/runtime/src/impls.rs @@ -16,18 +16,21 @@ //! Some configurable implementations as associated type for the substrate runtime. +use crate::{Authorship, Balances, MaximumBlockWeight, NegativeImbalance, System}; use core::num::NonZeroI128; +use frame_support::{ + traits::{Currency, Get, OnUnbalanced}, + weights::Weight, +}; use node_primitives::Balance; use sp_runtime::traits::{Convert, Saturating}; use sp_runtime::{Fixed128, Perquintill}; -use frame_support::{traits::{OnUnbalanced, Currency, Get}, weights::Weight}; -use crate::{Balances, System, Authorship, MaximumBlockWeight, NegativeImbalance}; pub struct Author; impl OnUnbalanced for Author { - fn on_nonzero_unbalanced(amount: NegativeImbalance) { - Balances::resolve_creating(&Authorship::author(), amount); - } + fn on_nonzero_unbalanced(amount: NegativeImbalance) { + Balances::resolve_creating(&Authorship::author(), amount); + } } /// Struct that handles the conversion of Balance -> `u64`. This is used for staking's election @@ -35,15 +38,21 @@ impl OnUnbalanced for Author { pub struct CurrencyToVoteHandler; impl CurrencyToVoteHandler { - fn factor() -> Balance { (Balances::total_issuance() / u64::max_value() as Balance).max(1) } + fn factor() -> Balance { + (Balances::total_issuance() / u64::max_value() as Balance).max(1) + } } impl Convert for CurrencyToVoteHandler { - fn convert(x: Balance) -> u64 { (x / Self::factor()) as u64 } + fn convert(x: Balance) -> u64 { + (x / Self::factor()) as u64 + } } impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> Balance { x * Self::factor() } + fn convert(x: u128) -> Balance { + x * Self::factor() + } } /// Convert from weight to balance via a simple coefficient multiplication @@ -51,11 +60,11 @@ impl Convert for CurrencyToVoteHandler { pub struct LinearWeightToFee(sp_std::marker::PhantomData); impl> Convert for LinearWeightToFee { - fn convert(w: Weight) -> Balance { - // setting this to zero will disable the weight fee. - let coefficient = C::get(); - Balance::from(w).saturating_mul(coefficient) - } + fn convert(w: Weight) -> Balance { + // setting this to zero will disable the weight fee. + let coefficient = C::get(); + Balance::from(w).saturating_mul(coefficient) + } } /// Update the given multiplier based on the following formula @@ -69,314 +78,321 @@ impl> Convert for LinearWeightToFee { pub struct TargetedFeeAdjustment(sp_std::marker::PhantomData); impl> Convert for TargetedFeeAdjustment { - fn convert(multiplier: Fixed128) -> Fixed128 { - let block_weight = System::all_extrinsics_weight(); - let max_weight = MaximumBlockWeight::get(); - let target_weight = (T::get() * max_weight) as u128; - let block_weight = block_weight as u128; - - // determines if the first_term is positive - let positive = block_weight >= target_weight; - let diff_abs = block_weight.max(target_weight) - block_weight.min(target_weight); - // safe, diff_abs cannot exceed u64 and it can always be computed safely even with the lossy - // `Fixed128::from_rational`. - let diff = Fixed128::from_rational( - diff_abs as i128, - NonZeroI128::new(max_weight.max(1) as i128).unwrap(), - ); - let diff_squared = diff.saturating_mul(diff); - - // 0.00004 = 4/100_000 = 40_000/10^9 - let v = Fixed128::from_rational(4, NonZeroI128::new(100_000).unwrap()); - // 0.00004^2 = 16/10^10 Taking the future /2 into account... 8/10^10 - let v_squared_2 = Fixed128::from_rational(8, NonZeroI128::new(10_000_000_000).unwrap()); - - let first_term = v.saturating_mul(diff); - let second_term = v_squared_2.saturating_mul(diff_squared); - - if positive { - // Note: this is merely bounded by how big the multiplier and the inner value can go, - // not by any economical reasoning. - let excess = first_term.saturating_add(second_term); - multiplier.saturating_add(excess) - } else { - // Defensive-only: first_term > second_term. Safe subtraction. - let negative = first_term.saturating_sub(second_term); - multiplier.saturating_sub(negative) - // despite the fact that apply_to saturates weight (final fee cannot go below 0) - // it is crucially important to stop here and don't further reduce the weight fee - // multiplier. While at -1, it means that the network is so un-congested that all - // transactions have no weight fee. We stop here and only increase if the network - // became more busy. - .max(Fixed128::from_natural(-1)) - } - } + fn convert(multiplier: Fixed128) -> Fixed128 { + let block_weight = System::all_extrinsics_weight(); + let max_weight = MaximumBlockWeight::get(); + let target_weight = (T::get() * max_weight) as u128; + let block_weight = block_weight as u128; + + // determines if the first_term is positive + let positive = block_weight >= target_weight; + let diff_abs = block_weight.max(target_weight) - block_weight.min(target_weight); + // safe, diff_abs cannot exceed u64 and it can always be computed safely even with the lossy + // `Fixed128::from_rational`. + let diff = Fixed128::from_rational( + diff_abs as i128, + NonZeroI128::new(max_weight.max(1) as i128).unwrap(), + ); + let diff_squared = diff.saturating_mul(diff); + + // 0.00004 = 4/100_000 = 40_000/10^9 + let v = Fixed128::from_rational(4, NonZeroI128::new(100_000).unwrap()); + // 0.00004^2 = 16/10^10 Taking the future /2 into account... 8/10^10 + let v_squared_2 = Fixed128::from_rational(8, NonZeroI128::new(10_000_000_000).unwrap()); + + let first_term = v.saturating_mul(diff); + let second_term = v_squared_2.saturating_mul(diff_squared); + + if positive { + // Note: this is merely bounded by how big the multiplier and the inner value can go, + // not by any economical reasoning. + let excess = first_term.saturating_add(second_term); + multiplier.saturating_add(excess) + } else { + // Defensive-only: first_term > second_term. Safe subtraction. + let negative = first_term.saturating_sub(second_term); + multiplier + .saturating_sub(negative) + // despite the fact that apply_to saturates weight (final fee cannot go below 0) + // it is crucially important to stop here and don't further reduce the weight fee + // multiplier. While at -1, it means that the network is so un-congested that all + // transactions have no weight fee. We stop here and only increase if the network + // became more busy. + .max(Fixed128::from_natural(-1)) + } + } } #[cfg(test)] mod tests { - use super::*; - use sp_runtime::assert_eq_error_rate; - use crate::{MaximumBlockWeight, AvailableBlockRatio, Runtime}; - use crate::{constants::currency::*, TransactionPayment, TargetBlockFullness}; - use frame_support::weights::Weight; - use core::num::NonZeroI128; - - fn max() -> Weight { - MaximumBlockWeight::get() - } - - fn target() -> Weight { - TargetBlockFullness::get() * max() - } - - // poc reference implementation. - fn fee_multiplier_update(block_weight: Weight, previous: Fixed128) -> Fixed128 { - let block_weight = block_weight as f64; - let v: f64 = 0.00004; - - // maximum tx weight - let m = max() as f64; - // Ideal saturation in terms of weight - let ss = target() as f64; - // Current saturation in terms of weight - let s = block_weight; - - let fm = v * (s/m - ss/m) + v.powi(2) * (s/m - ss/m).powi(2) / 2.0; - let addition_fm = Fixed128::from_parts((fm * Fixed128::accuracy() as f64).round() as i128); - previous.saturating_add(addition_fm) - } - - fn run_with_system_weight(w: Weight, assertions: F) where F: Fn() -> () { - let mut t: sp_io::TestExternalities = - frame_system::GenesisConfig::default().build_storage::().unwrap().into(); - t.execute_with(|| { - System::set_block_limits(w, 0); - assertions() - }); - } - - #[test] - fn fee_multiplier_update_poc_works() { - let fm = Fixed128::from_rational(0, NonZeroI128::new(1).unwrap()); - let test_set = vec![ - (0, fm.clone()), - (100, fm.clone()), - (target(), fm.clone()), - (max() / 2, fm.clone()), - (max(), fm.clone()), - ]; - test_set.into_iter().for_each(|(w, fm)| { - run_with_system_weight(w, || { - assert_eq_error_rate!( - fee_multiplier_update(w, fm), - TargetedFeeAdjustment::::convert(fm), - // Error is only 1 in 10^18 - Fixed128::from_parts(1), - ); - }) - }) - } - - #[test] - fn empty_chain_simulation() { - // just a few txs per_block. - let block_weight = 0; - run_with_system_weight(block_weight, || { - let mut fm = Fixed128::default(); - let mut iterations: u64 = 0; - loop { - let next = TargetedFeeAdjustment::::convert(fm); - fm = next; - if fm == Fixed128::from_natural(-1) { break; } - iterations += 1; - } - println!("iteration {}, new fm = {:?}. Weight fee is now zero", iterations, fm); - assert!(iterations > 50_000, "This assertion is just a warning; Don't panic. \ + use super::*; + use crate::{constants::currency::*, TargetBlockFullness, TransactionPayment}; + use crate::{AvailableBlockRatio, MaximumBlockWeight, Runtime}; + use core::num::NonZeroI128; + use frame_support::weights::Weight; + use sp_runtime::assert_eq_error_rate; + + fn max() -> Weight { + MaximumBlockWeight::get() + } + + fn target() -> Weight { + TargetBlockFullness::get() * max() + } + + // poc reference implementation. + fn fee_multiplier_update(block_weight: Weight, previous: Fixed128) -> Fixed128 { + let block_weight = block_weight as f64; + let v: f64 = 0.00004; + + // maximum tx weight + let m = max() as f64; + // Ideal saturation in terms of weight + let ss = target() as f64; + // Current saturation in terms of weight + let s = block_weight; + + let fm = v * (s / m - ss / m) + v.powi(2) * (s / m - ss / m).powi(2) / 2.0; + let addition_fm = Fixed128::from_parts((fm * Fixed128::accuracy() as f64).round() as i128); + previous.saturating_add(addition_fm) + } + + fn run_with_system_weight(w: Weight, assertions: F) + where + F: Fn() -> (), + { + let mut t: sp_io::TestExternalities = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into(); + t.execute_with(|| { + System::set_block_limits(w, 0); + assertions() + }); + } + + #[test] + fn fee_multiplier_update_poc_works() { + let fm = Fixed128::from_rational(0, NonZeroI128::new(1).unwrap()); + let test_set = vec![ + (0, fm.clone()), + (100, fm.clone()), + (target(), fm.clone()), + (max() / 2, fm.clone()), + (max(), fm.clone()), + ]; + test_set.into_iter().for_each(|(w, fm)| { + run_with_system_weight(w, || { + assert_eq_error_rate!( + fee_multiplier_update(w, fm), + TargetedFeeAdjustment::::convert(fm), + // Error is only 1 in 10^18 + Fixed128::from_parts(1), + ); + }) + }) + } + + #[test] + fn empty_chain_simulation() { + // just a few txs per_block. + let block_weight = 0; + run_with_system_weight(block_weight, || { + let mut fm = Fixed128::default(); + let mut iterations: u64 = 0; + loop { + let next = TargetedFeeAdjustment::::convert(fm); + fm = next; + if fm == Fixed128::from_natural(-1) { + break; + } + iterations += 1; + } + println!( + "iteration {}, new fm = {:?}. Weight fee is now zero", + iterations, fm + ); + assert!( + iterations > 50_000, + "This assertion is just a warning; Don't panic. \ Current substrate/polkadot node are configured with a _slow adjusting fee_ \ mechanism. Hence, it is really unlikely that fees collapse to zero even on an \ empty chain in less than at least of couple of thousands of empty blocks. But this \ simulation indicates that fees collapsed to zero after {} almost-empty blocks. \ Check it", - iterations, - ); - }) - } - - #[test] - #[ignore] - fn congested_chain_simulation() { - // `cargo test congested_chain_simulation -- --nocapture` to get some insight. - - // almost full. The entire quota of normal transactions is taken. - let block_weight = AvailableBlockRatio::get() * max() - 100; - - // Default substrate minimum. - let tx_weight = 10_000; - - run_with_system_weight(block_weight, || { - // initial value configured on module - let mut fm = Fixed128::default(); - assert_eq!(fm, TransactionPayment::next_fee_multiplier()); - - let mut iterations: u64 = 0; - loop { - let next = TargetedFeeAdjustment::::convert(fm); - // if no change, panic. This should never happen in this case. - if fm == next { panic!("The fee should ever increase"); } - fm = next; - iterations += 1; - let fee = ::WeightToFee::convert(tx_weight); - let adjusted_fee = fm.saturated_multiply_accumulate(fee); - println!( - "iteration {}, new fm = {:?}. Fee at this point is: {} units / {} millicents, \ + iterations, + ); + }) + } + + #[test] + #[ignore] + fn congested_chain_simulation() { + // `cargo test congested_chain_simulation -- --nocapture` to get some insight. + + // almost full. The entire quota of normal transactions is taken. + let block_weight = AvailableBlockRatio::get() * max() - 100; + + // Default substrate minimum. + let tx_weight = 10_000; + + run_with_system_weight(block_weight, || { + // initial value configured on module + let mut fm = Fixed128::default(); + assert_eq!(fm, TransactionPayment::next_fee_multiplier()); + + let mut iterations: u64 = 0; + loop { + let next = TargetedFeeAdjustment::::convert(fm); + // if no change, panic. This should never happen in this case. + if fm == next { + panic!("The fee should ever increase"); + } + fm = next; + iterations += 1; + let fee = + ::WeightToFee::convert(tx_weight); + let adjusted_fee = fm.saturated_multiply_accumulate(fee); + println!( + "iteration {}, new fm = {:?}. Fee at this point is: {} units / {} millicents, \ {} cents, {} dollars", - iterations, - fm, - adjusted_fee, - adjusted_fee / MILLICENTS, - adjusted_fee / CENTS, - adjusted_fee / DOLLARS, - ); - } - }); - } - - #[test] - fn stateless_weight_mul() { - // This test will show that heavy blocks have a weight multiplier greater than 0 - // and light blocks will have a weight multiplier less than 0. - run_with_system_weight(target() / 4, || { - // `fee_multiplier_update` is enough as it is the absolute truth value. - let next = TargetedFeeAdjustment::::convert(Fixed128::default()); - assert_eq!( - next, - fee_multiplier_update(target() / 4 ,Fixed128::default()) - ); - - // Light block. Fee is reduced a little. - assert!(next < Fixed128::zero()) - }); - run_with_system_weight(target() / 2, || { - let next = TargetedFeeAdjustment::::convert(Fixed128::default()); - assert_eq!( - next, - fee_multiplier_update(target() / 2 ,Fixed128::default()) - ); - - // Light block. Fee is reduced a little. - assert!(next < Fixed128::zero()) - - }); - run_with_system_weight(target(), || { - // ideal. Original fee. No changes. - let next = TargetedFeeAdjustment::::convert(Fixed128::default()); - assert_eq!(next, Fixed128::zero()) - }); - run_with_system_weight(target() * 2, || { - // More than ideal. Fee is increased. - let next = TargetedFeeAdjustment::::convert(Fixed128::default()); - assert_eq!( - next, - fee_multiplier_update(target() * 2 ,Fixed128::default()) - ); - - // Heavy block. Fee is increased a little. - assert!(next > Fixed128::zero()) - }); - } - - #[test] - fn stateful_weight_mul_grow_to_infinity() { - run_with_system_weight(target() * 2, || { - let mut original = Fixed128::default(); - let mut next = Fixed128::default(); - - (0..1_000).for_each(|_| { - next = TargetedFeeAdjustment::::convert(original); - assert_eq!( - next, - fee_multiplier_update(target() * 2, original), - ); - // must always increase - assert!(next > original); - original = next; - }); - }); - } - - #[test] - fn stateful_weight_mil_collapse_to_minus_one() { - run_with_system_weight(0, || { - let mut original = Fixed128::default(); // 0 - let mut next; - - // decreases - next = TargetedFeeAdjustment::::convert(original); - assert_eq!( - next, - fee_multiplier_update(0, original), - ); - assert!(next < original); - original = next; - - // keeps decreasing - next = TargetedFeeAdjustment::::convert(original); - assert_eq!( - next, - fee_multiplier_update(0, original), - ); - assert!(next < original); - - // ... stops going down at -1 - assert_eq!( - TargetedFeeAdjustment::::convert(Fixed128::from_natural(-1)), - Fixed128::from_natural(-1) - ); - }) - } - - #[test] - fn weight_to_fee_should_not_overflow_on_large_weights() { - let kb = 1024 as Weight; - let mb = kb * kb; - let max_fm = Fixed128::from_natural(i128::max_value()); - - // check that for all values it can compute, correctly. - vec![ - 0, - 1, - 10, - 1000, - kb, - 10 * kb, - 100 * kb, - mb, - 10 * mb, - 2147483647, - 4294967295, - MaximumBlockWeight::get() / 2, - MaximumBlockWeight::get(), - Weight::max_value() / 2, - Weight::max_value(), - ].into_iter().for_each(|i| { - run_with_system_weight(i, || { - let next = TargetedFeeAdjustment::::convert(Fixed128::default()); - let truth = fee_multiplier_update(i, Fixed128::default()); - assert_eq_error_rate!(truth, next, Fixed128::from_parts(50_000_000)); - }); - }); - - // Some values that are all above the target and will cause an increase. - let t = target(); - vec![t + 100, t * 2, t * 4] - .into_iter() - .for_each(|i| { - run_with_system_weight(i, || { - let fm = TargetedFeeAdjustment::::convert(max_fm); - // won't grow. The convert saturates everything. - assert_eq!(fm, max_fm); - }) - }); - } + iterations, + fm, + adjusted_fee, + adjusted_fee / MILLICENTS, + adjusted_fee / CENTS, + adjusted_fee / DOLLARS, + ); + } + }); + } + + #[test] + fn stateless_weight_mul() { + // This test will show that heavy blocks have a weight multiplier greater than 0 + // and light blocks will have a weight multiplier less than 0. + run_with_system_weight(target() / 4, || { + // `fee_multiplier_update` is enough as it is the absolute truth value. + let next = TargetedFeeAdjustment::::convert(Fixed128::default()); + assert_eq!( + next, + fee_multiplier_update(target() / 4, Fixed128::default()) + ); + + // Light block. Fee is reduced a little. + assert!(next < Fixed128::zero()) + }); + run_with_system_weight(target() / 2, || { + let next = TargetedFeeAdjustment::::convert(Fixed128::default()); + assert_eq!( + next, + fee_multiplier_update(target() / 2, Fixed128::default()) + ); + + // Light block. Fee is reduced a little. + assert!(next < Fixed128::zero()) + }); + run_with_system_weight(target(), || { + // ideal. Original fee. No changes. + let next = TargetedFeeAdjustment::::convert(Fixed128::default()); + assert_eq!(next, Fixed128::zero()) + }); + run_with_system_weight(target() * 2, || { + // More than ideal. Fee is increased. + let next = TargetedFeeAdjustment::::convert(Fixed128::default()); + assert_eq!( + next, + fee_multiplier_update(target() * 2, Fixed128::default()) + ); + + // Heavy block. Fee is increased a little. + assert!(next > Fixed128::zero()) + }); + } + + #[test] + fn stateful_weight_mul_grow_to_infinity() { + run_with_system_weight(target() * 2, || { + let mut original = Fixed128::default(); + let mut next = Fixed128::default(); + + (0..1_000).for_each(|_| { + next = TargetedFeeAdjustment::::convert(original); + assert_eq!(next, fee_multiplier_update(target() * 2, original),); + // must always increase + assert!(next > original); + original = next; + }); + }); + } + + #[test] + fn stateful_weight_mil_collapse_to_minus_one() { + run_with_system_weight(0, || { + let mut original = Fixed128::default(); // 0 + let mut next; + + // decreases + next = TargetedFeeAdjustment::::convert(original); + assert_eq!(next, fee_multiplier_update(0, original),); + assert!(next < original); + original = next; + + // keeps decreasing + next = TargetedFeeAdjustment::::convert(original); + assert_eq!(next, fee_multiplier_update(0, original),); + assert!(next < original); + + // ... stops going down at -1 + assert_eq!( + TargetedFeeAdjustment::::convert(Fixed128::from_natural(-1)), + Fixed128::from_natural(-1) + ); + }) + } + + #[test] + fn weight_to_fee_should_not_overflow_on_large_weights() { + let kb = 1024 as Weight; + let mb = kb * kb; + let max_fm = Fixed128::from_natural(i128::max_value()); + + // check that for all values it can compute, correctly. + vec![ + 0, + 1, + 10, + 1000, + kb, + 10 * kb, + 100 * kb, + mb, + 10 * mb, + 2147483647, + 4294967295, + MaximumBlockWeight::get() / 2, + MaximumBlockWeight::get(), + Weight::max_value() / 2, + Weight::max_value(), + ] + .into_iter() + .for_each(|i| { + run_with_system_weight(i, || { + let next = + TargetedFeeAdjustment::::convert(Fixed128::default()); + let truth = fee_multiplier_update(i, Fixed128::default()); + assert_eq_error_rate!(truth, next, Fixed128::from_parts(50_000_000)); + }); + }); + + // Some values that are all above the target and will cause an increase. + let t = target(); + vec![t + 100, t * 2, t * 4].into_iter().for_each(|i| { + run_with_system_weight(i, || { + let fm = TargetedFeeAdjustment::::convert(max_fm); + // won't grow. The convert saturates everything. + assert_eq!(fm, max_fm); + }) + }); + } } diff --git a/bin/node/runtime/src/lib.rs b/bin/node/runtime/src/lib.rs index 29dddd8336..544c84e14e 100644 --- a/bin/node/runtime/src/lib.rs +++ b/bin/node/runtime/src/lib.rs @@ -18,645 +18,669 @@ #![cfg_attr(not(feature = "std"), no_std)] // `construct_runtime!` does a lot of recursion and requires us to increase the limit to 256. -#![recursion_limit="256"] +#![recursion_limit = "256"] -use sp_std::prelude::*; use frame_support::{ - construct_runtime, parameter_types, debug, - weights::Weight, - traits::{Currency, Randomness, OnUnbalanced, Imbalance}, + construct_runtime, debug, parameter_types, + traits::{Currency, Imbalance, OnUnbalanced, Randomness}, + weights::Weight, }; -use sp_core::u32_trait::{_1, _2, _3, _4}; +use frame_system::offchain::TransactionSubmitter; pub use node_primitives::{AccountId, Signature}; use node_primitives::{AccountIndex, Balance, BlockNumber, Hash, Index, Moment}; +use pallet_contracts_rpc_runtime_api::ContractExecResult; +use pallet_grandpa::fg_primitives; +use pallet_grandpa::AuthorityList as GrandpaAuthorityList; +use pallet_im_online::sr25519::AuthorityId as ImOnlineId; +use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; use sp_api::impl_runtime_apis; -use sp_runtime::{ - Permill, ModuleId, Perbill, Perquintill, Percent, ApplyExtrinsicResult, - impl_opaque_keys, generic, create_runtime_str, -}; +use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; +use sp_core::u32_trait::{_1, _2, _3, _4}; +use sp_core::OpaqueMetadata; +use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::curve::PiecewiseLinear; -use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource, TransactionPriority}; use sp_runtime::traits::{ - self, BlakeTwo256, Block as BlockT, StaticLookup, SaturatedConversion, - ConvertInto, OpaqueKeys, + self, BlakeTwo256, Block as BlockT, ConvertInto, OpaqueKeys, SaturatedConversion, StaticLookup, }; -use sp_version::RuntimeVersion; +use sp_runtime::transaction_validity::{ + TransactionPriority, TransactionSource, TransactionValidity, +}; +use sp_runtime::{ + create_runtime_str, generic, impl_opaque_keys, ApplyExtrinsicResult, ModuleId, Perbill, + Percent, Permill, Perquintill, +}; +use sp_std::prelude::*; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; -use sp_core::OpaqueMetadata; -use pallet_grandpa::AuthorityList as GrandpaAuthorityList; -use pallet_grandpa::fg_primitives; -use pallet_im_online::sr25519::{AuthorityId as ImOnlineId}; -use sp_authority_discovery::AuthorityId as AuthorityDiscoveryId; -use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; -use pallet_contracts_rpc_runtime_api::ContractExecResult; -use frame_system::offchain::TransactionSubmitter; -use sp_inherents::{InherentData, CheckInherentsResult}; +use sp_version::RuntimeVersion; -#[cfg(any(feature = "std", test))] -pub use sp_runtime::BuildStorage; -pub use pallet_timestamp::Call as TimestampCall; +pub use frame_support::StorageValue; pub use pallet_balances::Call as BalancesCall; pub use pallet_contracts::Gas; -pub use frame_support::StorageValue; pub use pallet_staking::StakerStatus; +pub use pallet_timestamp::Call as TimestampCall; +#[cfg(any(feature = "std", test))] +pub use sp_runtime::BuildStorage; /// Implementations of some helper traits passed into runtime modules as associated types. pub mod impls; -use impls::{CurrencyToVoteHandler, Author, LinearWeightToFee, TargetedFeeAdjustment}; +use impls::{Author, CurrencyToVoteHandler, LinearWeightToFee, TargetedFeeAdjustment}; /// Constant values used within the runtime. pub mod constants; -use constants::{time::*, currency::*}; +use constants::{currency::*, time::*}; // Make the WASM binary available. #[cfg(feature = "std")] include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// A transaction submitter with the given key type. -pub type TransactionSubmitterOf = TransactionSubmitter; +pub type TransactionSubmitterOf = + TransactionSubmitter; /// Submits transaction with the node's public and signature type. Adheres to the signed extension /// format of the chain. impl frame_system::offchain::CreateTransaction for Runtime { - type Public = ::Signer; - type Signature = Signature; - - fn create_transaction>( - call: Call, - public: Self::Public, - account: AccountId, - index: Index, - ) -> Option<(Call, ::SignaturePayload)> { - // take the biggest period possible. - let period = BlockHashCount::get() - .checked_next_power_of_two() - .map(|c| c / 2) - .unwrap_or(2) as u64; - let current_block = System::block_number() - .saturated_into::() - // The `System::block_number` is initialized with `n+1`, - // so the actual block number is `n`. - .saturating_sub(1); - let tip = 0; - let extra: SignedExtra = ( - frame_system::CheckVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), - frame_system::CheckNonce::::from(index), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(tip), - Default::default(), - ); - let raw_payload = SignedPayload::new(call, extra).map_err(|e| { - debug::warn!("Unable to create signed payload: {:?}", e); - }).ok()?; - let signature = TSigner::sign(public, &raw_payload)?; - let address = Indices::unlookup(account); - let (call, extra, _) = raw_payload.deconstruct(); - Some((call, (address, signature, extra))) - } + type Public = ::Signer; + type Signature = Signature; + + fn create_transaction< + TSigner: frame_system::offchain::Signer, + >( + call: Call, + public: Self::Public, + account: AccountId, + index: Index, + ) -> Option<( + Call, + ::SignaturePayload, + )> { + // take the biggest period possible. + let period = BlockHashCount::get() + .checked_next_power_of_two() + .map(|c| c / 2) + .unwrap_or(2) as u64; + let current_block = System::block_number() + .saturated_into::() + // The `System::block_number` is initialized with `n+1`, + // so the actual block number is `n`. + .saturating_sub(1); + let tip = 0; + let extra: SignedExtra = ( + frame_system::CheckVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(generic::Era::mortal(period, current_block)), + frame_system::CheckNonce::::from(index), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(tip), + Default::default(), + ); + let raw_payload = SignedPayload::new(call, extra) + .map_err(|e| { + debug::warn!("Unable to create signed payload: {:?}", e); + }) + .ok()?; + let signature = TSigner::sign(public, &raw_payload)?; + let address = Indices::unlookup(account); + let (call, extra, _) = raw_payload.deconstruct(); + Some((call, (address, signature, extra))) + } } /// Runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("node"), - impl_name: create_runtime_str!("substrate-node"), - authoring_version: 10, - // Per convention: if the runtime behavior changes, increment spec_version - // and set impl_version to 0. If only runtime - // implementation changes and behavior does not, then leave spec_version as - // is and increment impl_version. - spec_version: 243, - impl_version: 0, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + spec_name: create_runtime_str!("node"), + impl_name: create_runtime_str!("substrate-node"), + authoring_version: 10, + // Per convention: if the runtime behavior changes, increment spec_version + // and set impl_version to 0. If only runtime + // implementation changes and behavior does not, then leave spec_version as + // is and increment impl_version. + spec_version: 243, + impl_version: 0, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, }; /// Native version. #[cfg(any(feature = "std", test))] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } } type NegativeImbalance = >::NegativeImbalance; pub struct DealWithFees; impl OnUnbalanced for DealWithFees { - fn on_unbalanceds(mut fees_then_tips: impl Iterator) { - if let Some(fees) = fees_then_tips.next() { - // for fees, 80% to treasury, 20% to author - let mut split = fees.ration(80, 20); - if let Some(tips) = fees_then_tips.next() { - // for tips, if any, 80% to treasury, 20% to author (though this can be anything) - tips.ration_merge_into(80, 20, &mut split); - } - Treasury::on_unbalanced(split.0); - Author::on_unbalanced(split.1); - } - } + fn on_unbalanceds(mut fees_then_tips: impl Iterator) { + if let Some(fees) = fees_then_tips.next() { + // for fees, 80% to treasury, 20% to author + let mut split = fees.ration(80, 20); + if let Some(tips) = fees_then_tips.next() { + // for tips, if any, 80% to treasury, 20% to author (though this can be anything) + tips.ration_merge_into(80, 20, &mut split); + } + Treasury::on_unbalanced(split.0); + Author::on_unbalanced(split.1); + } + } } parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - /// We allow for 2 seconds of compute with a 6 second average block time. - pub const MaximumBlockWeight: Weight = 2_000_000_000_000; - pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; - pub const Version: RuntimeVersion = VERSION; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const BlockHashCount: BlockNumber = 250; + /// We allow for 2 seconds of compute with a 6 second average block time. + pub const MaximumBlockWeight: Weight = 2_000_000_000_000; + pub const MaximumBlockLength: u32 = 5 * 1024 * 1024; + pub const Version: RuntimeVersion = VERSION; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } impl frame_system::Trait for Runtime { - type Origin = Origin; - type Call = Call; - type Index = Index; - type BlockNumber = BlockNumber; - type Hash = Hash; - type Hashing = BlakeTwo256; - type AccountId = AccountId; - type Lookup = Indices; - type Header = generic::Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = Version; - type ModuleToIndex = ModuleToIndex; - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Call = Call; + type Index = Index; + type BlockNumber = BlockNumber; + type Hash = Hash; + type Hashing = BlakeTwo256; + type AccountId = AccountId; + type Lookup = Indices; + type Header = generic::Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = Version; + type ModuleToIndex = ModuleToIndex; + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - // One storage item; value is size 4+4+16+32 bytes = 56 bytes. - pub const MultisigDepositBase: Balance = 30 * CENTS; - // Additional storage item size of 32 bytes. - pub const MultisigDepositFactor: Balance = 5 * CENTS; - pub const MaxSignatories: u16 = 100; + // One storage item; value is size 4+4+16+32 bytes = 56 bytes. + pub const MultisigDepositBase: Balance = 30 * CENTS; + // Additional storage item size of 32 bytes. + pub const MultisigDepositFactor: Balance = 5 * CENTS; + pub const MaxSignatories: u16 = 100; } impl pallet_utility::Trait for Runtime { - type Event = Event; - type Call = Call; - type Currency = Balances; - type MultisigDepositBase = MultisigDepositBase; - type MultisigDepositFactor = MultisigDepositFactor; - type MaxSignatories = MaxSignatories; + type Event = Event; + type Call = Call; + type Currency = Balances; + type MultisigDepositBase = MultisigDepositBase; + type MultisigDepositFactor = MultisigDepositFactor; + type MaxSignatories = MaxSignatories; } parameter_types! { - pub const MaximumWeight: Weight = 2_000_000; + pub const MaximumWeight: Weight = 2_000_000; } impl pallet_scheduler::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type Call = Call; - type MaximumWeight = MaximumWeight; + type Event = Event; + type Origin = Origin; + type Call = Call; + type MaximumWeight = MaximumWeight; } parameter_types! { - pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; - pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; + pub const EpochDuration: u64 = EPOCH_DURATION_IN_SLOTS; + pub const ExpectedBlockTime: Moment = MILLISECS_PER_BLOCK; } impl pallet_babe::Trait for Runtime { - type EpochDuration = EpochDuration; - type ExpectedBlockTime = ExpectedBlockTime; - type EpochChangeTrigger = pallet_babe::ExternalTrigger; + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; + type EpochChangeTrigger = pallet_babe::ExternalTrigger; } parameter_types! { - pub const IndexDeposit: Balance = 1 * DOLLARS; + pub const IndexDeposit: Balance = 1 * DOLLARS; } impl pallet_indices::Trait for Runtime { - type AccountIndex = AccountIndex; - type Event = Event; - type Currency = Balances; - type Deposit = IndexDeposit; + type AccountIndex = AccountIndex; + type Event = Event; + type Currency = Balances; + type Deposit = IndexDeposit; } parameter_types! { - pub const ExistentialDeposit: Balance = 1 * DOLLARS; + pub const ExistentialDeposit: Balance = 1 * DOLLARS; } impl pallet_balances::Trait for Runtime { - type Balance = Balance; - type DustRemoval = (); - type Event = Event; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Module; + type Balance = Balance; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = frame_system::Module; } parameter_types! { - pub const TransactionBaseFee: Balance = 1 * CENTS; - pub const TransactionByteFee: Balance = 10 * MILLICENTS; - // In the Substrate node, a weight of 10_000_000 (smallest non-zero weight) - // is mapped to 10_000_000 units of fees, hence: - pub const WeightFeeCoefficient: Balance = 1; - // for a sane configuration, this should always be less than `AvailableBlockRatio`. - pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); + pub const TransactionBaseFee: Balance = 1 * CENTS; + pub const TransactionByteFee: Balance = 10 * MILLICENTS; + // In the Substrate node, a weight of 10_000_000 (smallest non-zero weight) + // is mapped to 10_000_000 units of fees, hence: + pub const WeightFeeCoefficient: Balance = 1; + // for a sane configuration, this should always be less than `AvailableBlockRatio`. + pub const TargetBlockFullness: Perquintill = Perquintill::from_percent(25); } impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = DealWithFees; - type TransactionBaseFee = TransactionBaseFee; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = LinearWeightToFee; - type FeeMultiplierUpdate = TargetedFeeAdjustment; + type Currency = Balances; + type OnTransactionPayment = DealWithFees; + type TransactionBaseFee = TransactionBaseFee; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = LinearWeightToFee; + type FeeMultiplierUpdate = TargetedFeeAdjustment; } parameter_types! { - pub const MinimumPeriod: Moment = SLOT_DURATION / 2; + pub const MinimumPeriod: Moment = SLOT_DURATION / 2; } impl pallet_timestamp::Trait for Runtime { - type Moment = Moment; - type OnTimestampSet = Babe; - type MinimumPeriod = MinimumPeriod; + type Moment = Moment; + type OnTimestampSet = Babe; + type MinimumPeriod = MinimumPeriod; } parameter_types! { - pub const UncleGenerations: BlockNumber = 5; + pub const UncleGenerations: BlockNumber = 5; } impl pallet_authorship::Trait for Runtime { - type FindAuthor = pallet_session::FindAccountFromAuthorIndex; - type UncleGenerations = UncleGenerations; - type FilterUncle = (); - type EventHandler = (Staking, ImOnline); + type FindAuthor = pallet_session::FindAccountFromAuthorIndex; + type UncleGenerations = UncleGenerations; + type FilterUncle = (); + type EventHandler = (Staking, ImOnline); } impl_opaque_keys! { - pub struct SessionKeys { - pub grandpa: Grandpa, - pub babe: Babe, - pub im_online: ImOnline, - pub authority_discovery: AuthorityDiscovery, - } + pub struct SessionKeys { + pub grandpa: Grandpa, + pub babe: Babe, + pub im_online: ImOnline, + pub authority_discovery: AuthorityDiscovery, + } } parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(17); } impl pallet_session::Trait for Runtime { - type Event = Event; - type ValidatorId = ::AccountId; - type ValidatorIdOf = pallet_staking::StashOf; - type ShouldEndSession = Babe; - type SessionManager = Staking; - type SessionHandler = ::KeyTypeIdProviders; - type Keys = SessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type NextSessionRotation = Babe; + type Event = Event; + type ValidatorId = ::AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type ShouldEndSession = Babe; + type SessionManager = Staking; + type SessionHandler = ::KeyTypeIdProviders; + type Keys = SessionKeys; + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = Babe; } impl pallet_session::historical::Trait for Runtime { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } pallet_staking_reward_curve::build! { - const REWARD_CURVE: PiecewiseLinear<'static> = curve!( - min_inflation: 0_025_000, - max_inflation: 0_100_000, - ideal_stake: 0_500_000, - falloff: 0_050_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); + const REWARD_CURVE: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); } parameter_types! { - pub const SessionsPerEra: sp_staking::SessionIndex = 6; - pub const BondingDuration: pallet_staking::EraIndex = 24 * 28; - pub const SlashDeferDuration: pallet_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. - pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; - pub const ElectionLookahead: BlockNumber = 25; // 10 minutes per session => 100 block. - pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const SessionsPerEra: sp_staking::SessionIndex = 6; + pub const BondingDuration: pallet_staking::EraIndex = 24 * 28; + pub const SlashDeferDuration: pallet_staking::EraIndex = 24 * 7; // 1/4 the bonding duration. + pub const RewardCurve: &'static PiecewiseLinear<'static> = &REWARD_CURVE; + pub const ElectionLookahead: BlockNumber = 25; // 10 minutes per session => 100 block. + pub const MaxNominatorRewardedPerValidator: u32 = 64; } impl pallet_staking::Trait for Runtime { - type Currency = Balances; - type UnixTime = Timestamp; - type CurrencyToVote = CurrencyToVoteHandler; - type RewardRemainder = Treasury; - type Event = Event; - type Slash = Treasury; // send the slashed funds to the treasury. - type Reward = (); // rewards are minted from the void - type SessionsPerEra = SessionsPerEra; - type BondingDuration = BondingDuration; - type SlashDeferDuration = SlashDeferDuration; - /// A super-majority of the council can cancel the slash. - type SlashCancelOrigin = pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; - type SessionInterface = Self; - type RewardCurve = RewardCurve; - type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type SubmitTransaction = TransactionSubmitterOf<()>; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = StakingUnsignedPriority; + type Currency = Balances; + type UnixTime = Timestamp; + type CurrencyToVote = CurrencyToVoteHandler; + type RewardRemainder = Treasury; + type Event = Event; + type Slash = Treasury; // send the slashed funds to the treasury. + type Reward = (); // rewards are minted from the void + type SessionsPerEra = SessionsPerEra; + type BondingDuration = BondingDuration; + type SlashDeferDuration = SlashDeferDuration; + /// A super-majority of the council can cancel the slash. + type SlashCancelOrigin = + pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; + type SessionInterface = Self; + type RewardCurve = RewardCurve; + type NextNewSession = Session; + type ElectionLookahead = ElectionLookahead; + type Call = Call; + type SubmitTransaction = TransactionSubmitterOf<()>; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type UnsignedPriority = StakingUnsignedPriority; } parameter_types! { - pub const LaunchPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; - pub const VotingPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; - pub const FastTrackVotingPeriod: BlockNumber = 3 * 24 * 60 * MINUTES; - pub const InstantAllowed: bool = true; - pub const MinimumDeposit: Balance = 100 * DOLLARS; - pub const EnactmentPeriod: BlockNumber = 30 * 24 * 60 * MINUTES; - pub const CooloffPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; - // One cent: $10,000 / MB - pub const PreimageByteDeposit: Balance = 1 * CENTS; + pub const LaunchPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; + pub const VotingPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; + pub const FastTrackVotingPeriod: BlockNumber = 3 * 24 * 60 * MINUTES; + pub const InstantAllowed: bool = true; + pub const MinimumDeposit: Balance = 100 * DOLLARS; + pub const EnactmentPeriod: BlockNumber = 30 * 24 * 60 * MINUTES; + pub const CooloffPeriod: BlockNumber = 28 * 24 * 60 * MINUTES; + // One cent: $10,000 / MB + pub const PreimageByteDeposit: Balance = 1 * CENTS; } impl pallet_democracy::Trait for Runtime { - type Proposal = Call; - type Event = Event; - type Currency = Balances; - type EnactmentPeriod = EnactmentPeriod; - type LaunchPeriod = LaunchPeriod; - type VotingPeriod = VotingPeriod; - type MinimumDeposit = MinimumDeposit; - /// A straight majority of the council can decide what their next motion is. - type ExternalOrigin = pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; - /// A super-majority can have the next scheduled referendum be a straight majority-carries vote. - type ExternalMajorityOrigin = pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; - /// A unanimous council can have the next scheduled referendum be a straight default-carries - /// (NTB) vote. - type ExternalDefaultOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; - /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote - /// be tabled immediately and with a shorter voting/enactment period. - type FastTrackOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; - type InstantOrigin = pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; - type InstantAllowed = InstantAllowed; - type FastTrackVotingPeriod = FastTrackVotingPeriod; - // To cancel a proposal which has been passed, 2/3 of the council must agree to it. - type CancellationOrigin = pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; - // Any single technical committee member may veto a coming council proposal, however they can - // only do it once and it lasts only for the cooloff period. - type VetoOrigin = pallet_collective::EnsureMember; - type CooloffPeriod = CooloffPeriod; - type PreimageByteDeposit = PreimageByteDeposit; - type Slash = Treasury; - type Scheduler = Scheduler; + type Proposal = Call; + type Event = Event; + type Currency = Balances; + type EnactmentPeriod = EnactmentPeriod; + type LaunchPeriod = LaunchPeriod; + type VotingPeriod = VotingPeriod; + type MinimumDeposit = MinimumDeposit; + /// A straight majority of the council can decide what their next motion is. + type ExternalOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _2, AccountId, CouncilCollective>; + /// A super-majority can have the next scheduled referendum be a straight majority-carries vote. + type ExternalMajorityOrigin = + pallet_collective::EnsureProportionAtLeast<_3, _4, AccountId, CouncilCollective>; + /// A unanimous council can have the next scheduled referendum be a straight default-carries + /// (NTB) vote. + type ExternalDefaultOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, CouncilCollective>; + /// Two thirds of the technical committee can have an ExternalMajority/ExternalDefault vote + /// be tabled immediately and with a shorter voting/enactment period. + type FastTrackOrigin = + pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, TechnicalCollective>; + type InstantOrigin = + pallet_collective::EnsureProportionAtLeast<_1, _1, AccountId, TechnicalCollective>; + type InstantAllowed = InstantAllowed; + type FastTrackVotingPeriod = FastTrackVotingPeriod; + // To cancel a proposal which has been passed, 2/3 of the council must agree to it. + type CancellationOrigin = + pallet_collective::EnsureProportionAtLeast<_2, _3, AccountId, CouncilCollective>; + // Any single technical committee member may veto a coming council proposal, however they can + // only do it once and it lasts only for the cooloff period. + type VetoOrigin = pallet_collective::EnsureMember; + type CooloffPeriod = CooloffPeriod; + type PreimageByteDeposit = PreimageByteDeposit; + type Slash = Treasury; + type Scheduler = Scheduler; } parameter_types! { - pub const CouncilMotionDuration: BlockNumber = 5 * DAYS; + pub const CouncilMotionDuration: BlockNumber = 5 * DAYS; } type CouncilCollective = pallet_collective::Instance1; impl pallet_collective::Trait for Runtime { - type Origin = Origin; - type Proposal = Call; - type Event = Event; - type MotionDuration = CouncilMotionDuration; + type Origin = Origin; + type Proposal = Call; + type Event = Event; + type MotionDuration = CouncilMotionDuration; } parameter_types! { - pub const CandidacyBond: Balance = 10 * DOLLARS; - pub const VotingBond: Balance = 1 * DOLLARS; - pub const TermDuration: BlockNumber = 7 * DAYS; - pub const DesiredMembers: u32 = 13; - pub const DesiredRunnersUp: u32 = 7; + pub const CandidacyBond: Balance = 10 * DOLLARS; + pub const VotingBond: Balance = 1 * DOLLARS; + pub const TermDuration: BlockNumber = 7 * DAYS; + pub const DesiredMembers: u32 = 13; + pub const DesiredRunnersUp: u32 = 7; } impl pallet_elections_phragmen::Trait for Runtime { - type Event = Event; - type Currency = Balances; - type ChangeMembers = Council; - // NOTE: this implies that council's genesis members cannot be set directly and must come from - // this module. - type InitializeMembers = Council; - type CurrencyToVote = CurrencyToVoteHandler; - type CandidacyBond = CandidacyBond; - type VotingBond = VotingBond; - type LoserCandidate = (); - type BadReport = (); - type KickedMember = (); - type DesiredMembers = DesiredMembers; - type DesiredRunnersUp = DesiredRunnersUp; - type TermDuration = TermDuration; + type Event = Event; + type Currency = Balances; + type ChangeMembers = Council; + // NOTE: this implies that council's genesis members cannot be set directly and must come from + // this module. + type InitializeMembers = Council; + type CurrencyToVote = CurrencyToVoteHandler; + type CandidacyBond = CandidacyBond; + type VotingBond = VotingBond; + type LoserCandidate = (); + type BadReport = (); + type KickedMember = (); + type DesiredMembers = DesiredMembers; + type DesiredRunnersUp = DesiredRunnersUp; + type TermDuration = TermDuration; } parameter_types! { - pub const TechnicalMotionDuration: BlockNumber = 5 * DAYS; + pub const TechnicalMotionDuration: BlockNumber = 5 * DAYS; } type TechnicalCollective = pallet_collective::Instance2; impl pallet_collective::Trait for Runtime { - type Origin = Origin; - type Proposal = Call; - type Event = Event; - type MotionDuration = TechnicalMotionDuration; + type Origin = Origin; + type Proposal = Call; + type Event = Event; + type MotionDuration = TechnicalMotionDuration; } impl pallet_membership::Trait for Runtime { - type Event = Event; - type AddOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; - type RemoveOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; - type SwapOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; - type ResetOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; - type PrimeOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; - type MembershipInitialized = TechnicalCommittee; - type MembershipChanged = TechnicalCommittee; + type Event = Event; + type AddOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type RemoveOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type SwapOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type ResetOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type PrimeOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type MembershipInitialized = TechnicalCommittee; + type MembershipChanged = TechnicalCommittee; } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: Balance = 1 * DOLLARS; - pub const SpendPeriod: BlockNumber = 1 * DAYS; - pub const Burn: Permill = Permill::from_percent(50); - pub const TipCountdown: BlockNumber = 1 * DAYS; - pub const TipFindersFee: Percent = Percent::from_percent(20); - pub const TipReportDepositBase: Balance = 1 * DOLLARS; + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: Balance = 1 * DOLLARS; + pub const SpendPeriod: BlockNumber = 1 * DAYS; + pub const Burn: Permill = Permill::from_percent(50); + pub const TipCountdown: BlockNumber = 1 * DAYS; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: Balance = 1 * DOLLARS; pub const TipReportDepositPerByte: Balance = 1 * CENTS; pub const TreasuryModuleId: ModuleId = ModuleId(*b"py/trsry"); } impl pallet_treasury::Trait for Runtime { type ModuleId = TreasuryModuleId; - type Currency = Balances; - type ApproveOrigin = pallet_collective::EnsureMembers<_4, AccountId, CouncilCollective>; - type RejectOrigin = pallet_collective::EnsureMembers<_2, AccountId, CouncilCollective>; - type Tippers = Elections; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type TipReportDepositPerByte = TipReportDepositPerByte; - type Event = Event; - type ProposalRejection = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type SpendPeriod = SpendPeriod; - type Burn = Burn; + type Currency = Balances; + type ApproveOrigin = pallet_collective::EnsureMembers<_4, AccountId, CouncilCollective>; + type RejectOrigin = pallet_collective::EnsureMembers<_2, AccountId, CouncilCollective>; + type Tippers = Elections; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type TipReportDepositPerByte = TipReportDepositPerByte; + type Event = Event; + type ProposalRejection = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; } parameter_types! { - pub const ContractTransactionBaseFee: Balance = 1 * CENTS; - pub const ContractTransactionByteFee: Balance = 10 * MILLICENTS; - pub const ContractFee: Balance = 1 * CENTS; - pub const TombstoneDeposit: Balance = 1 * DOLLARS; - pub const RentByteFee: Balance = 1 * DOLLARS; - pub const RentDepositOffset: Balance = 1000 * DOLLARS; - pub const SurchargeReward: Balance = 150 * DOLLARS; + pub const ContractTransactionBaseFee: Balance = 1 * CENTS; + pub const ContractTransactionByteFee: Balance = 10 * MILLICENTS; + pub const ContractFee: Balance = 1 * CENTS; + pub const TombstoneDeposit: Balance = 1 * DOLLARS; + pub const RentByteFee: Balance = 1 * DOLLARS; + pub const RentDepositOffset: Balance = 1000 * DOLLARS; + pub const SurchargeReward: Balance = 150 * DOLLARS; } impl pallet_contracts::Trait for Runtime { - type Currency = Balances; - type Time = Timestamp; - type Randomness = RandomnessCollectiveFlip; - type Call = Call; - type Event = Event; - type DetermineContractAddress = pallet_contracts::SimpleAddressDeterminer; - type ComputeDispatchFee = pallet_contracts::DefaultDispatchFeeComputor; - type TrieIdGenerator = pallet_contracts::TrieIdFromParentCounter; - type GasPayment = (); - type RentPayment = (); - type SignedClaimHandicap = pallet_contracts::DefaultSignedClaimHandicap; - type TombstoneDeposit = TombstoneDeposit; - type StorageSizeOffset = pallet_contracts::DefaultStorageSizeOffset; - type RentByteFee = RentByteFee; - type RentDepositOffset = RentDepositOffset; - type SurchargeReward = SurchargeReward; - type TransactionBaseFee = ContractTransactionBaseFee; - type TransactionByteFee = ContractTransactionByteFee; - type ContractFee = ContractFee; - type CallBaseFee = pallet_contracts::DefaultCallBaseFee; - type InstantiateBaseFee = pallet_contracts::DefaultInstantiateBaseFee; - type MaxDepth = pallet_contracts::DefaultMaxDepth; - type MaxValueSize = pallet_contracts::DefaultMaxValueSize; - type BlockGasLimit = pallet_contracts::DefaultBlockGasLimit; + type Currency = Balances; + type Time = Timestamp; + type Randomness = RandomnessCollectiveFlip; + type Call = Call; + type Event = Event; + type DetermineContractAddress = pallet_contracts::SimpleAddressDeterminer; + type ComputeDispatchFee = pallet_contracts::DefaultDispatchFeeComputor; + type TrieIdGenerator = pallet_contracts::TrieIdFromParentCounter; + type GasPayment = (); + type RentPayment = (); + type SignedClaimHandicap = pallet_contracts::DefaultSignedClaimHandicap; + type TombstoneDeposit = TombstoneDeposit; + type StorageSizeOffset = pallet_contracts::DefaultStorageSizeOffset; + type RentByteFee = RentByteFee; + type RentDepositOffset = RentDepositOffset; + type SurchargeReward = SurchargeReward; + type TransactionBaseFee = ContractTransactionBaseFee; + type TransactionByteFee = ContractTransactionByteFee; + type ContractFee = ContractFee; + type CallBaseFee = pallet_contracts::DefaultCallBaseFee; + type InstantiateBaseFee = pallet_contracts::DefaultInstantiateBaseFee; + type MaxDepth = pallet_contracts::DefaultMaxDepth; + type MaxValueSize = pallet_contracts::DefaultMaxValueSize; + type BlockGasLimit = pallet_contracts::DefaultBlockGasLimit; } impl pallet_sudo::Trait for Runtime { - type Event = Event; - type Call = Call; + type Event = Event; + type Call = Call; } parameter_types! { - pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_SLOTS as _; - pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); - /// We prioritize im-online heartbeats over phragmen solution submission. - pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; + pub const SessionDuration: BlockNumber = EPOCH_DURATION_IN_SLOTS as _; + pub const ImOnlineUnsignedPriority: TransactionPriority = TransactionPriority::max_value(); + /// We prioritize im-online heartbeats over phragmen solution submission. + pub const StakingUnsignedPriority: TransactionPriority = TransactionPriority::max_value() / 2; } impl pallet_im_online::Trait for Runtime { - type AuthorityId = ImOnlineId; - type Event = Event; - type Call = Call; - type SubmitTransaction = TransactionSubmitterOf; - type SessionDuration = SessionDuration; - type ReportUnresponsiveness = Offences; - type UnsignedPriority = ImOnlineUnsignedPriority; + type AuthorityId = ImOnlineId; + type Event = Event; + type Call = Call; + type SubmitTransaction = TransactionSubmitterOf; + type SessionDuration = SessionDuration; + type ReportUnresponsiveness = Offences; + type UnsignedPriority = ImOnlineUnsignedPriority; } impl pallet_offences::Trait for Runtime { - type Event = Event; - type IdentificationTuple = pallet_session::historical::IdentificationTuple; - type OnOffenceHandler = Staking; + type Event = Event; + type IdentificationTuple = pallet_session::historical::IdentificationTuple; + type OnOffenceHandler = Staking; } impl pallet_authority_discovery::Trait for Runtime {} impl pallet_grandpa::Trait for Runtime { - type Event = Event; + type Event = Event; } parameter_types! { - pub const WindowSize: BlockNumber = 101; - pub const ReportLatency: BlockNumber = 1000; + pub const WindowSize: BlockNumber = 101; + pub const ReportLatency: BlockNumber = 1000; } impl pallet_finality_tracker::Trait for Runtime { - type OnFinalizationStalled = (); - type WindowSize = WindowSize; - type ReportLatency = ReportLatency; + type OnFinalizationStalled = (); + type WindowSize = WindowSize; + type ReportLatency = ReportLatency; } parameter_types! { - pub const BasicDeposit: Balance = 10 * DOLLARS; // 258 bytes on-chain - pub const FieldDeposit: Balance = 250 * CENTS; // 66 bytes on-chain - pub const SubAccountDeposit: Balance = 2 * DOLLARS; // 53 bytes on-chain - pub const MaxSubAccounts: u32 = 100; - pub const MaxAdditionalFields: u32 = 100; + pub const BasicDeposit: Balance = 10 * DOLLARS; // 258 bytes on-chain + pub const FieldDeposit: Balance = 250 * CENTS; // 66 bytes on-chain + pub const SubAccountDeposit: Balance = 2 * DOLLARS; // 53 bytes on-chain + pub const MaxSubAccounts: u32 = 100; + pub const MaxAdditionalFields: u32 = 100; } impl pallet_identity::Trait for Runtime { - type Event = Event; - type Currency = Balances; - type BasicDeposit = BasicDeposit; - type FieldDeposit = FieldDeposit; - type SubAccountDeposit = SubAccountDeposit; - type MaxSubAccounts = MaxSubAccounts; - type MaxAdditionalFields = MaxAdditionalFields; - type Slashed = Treasury; - type ForceOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; - type RegistrarOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type Event = Event; + type Currency = Balances; + type BasicDeposit = BasicDeposit; + type FieldDeposit = FieldDeposit; + type SubAccountDeposit = SubAccountDeposit; + type MaxSubAccounts = MaxSubAccounts; + type MaxAdditionalFields = MaxAdditionalFields; + type Slashed = Treasury; + type ForceOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type RegistrarOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; } parameter_types! { - pub const ConfigDepositBase: Balance = 5 * DOLLARS; - pub const FriendDepositFactor: Balance = 50 * CENTS; - pub const MaxFriends: u16 = 9; - pub const RecoveryDeposit: Balance = 5 * DOLLARS; + pub const ConfigDepositBase: Balance = 5 * DOLLARS; + pub const FriendDepositFactor: Balance = 50 * CENTS; + pub const MaxFriends: u16 = 9; + pub const RecoveryDeposit: Balance = 5 * DOLLARS; } impl pallet_recovery::Trait for Runtime { - type Event = Event; - type Call = Call; - type Currency = Balances; - type ConfigDepositBase = ConfigDepositBase; - type FriendDepositFactor = FriendDepositFactor; - type MaxFriends = MaxFriends; - type RecoveryDeposit = RecoveryDeposit; + type Event = Event; + type Call = Call; + type Currency = Balances; + type ConfigDepositBase = ConfigDepositBase; + type FriendDepositFactor = FriendDepositFactor; + type MaxFriends = MaxFriends; + type RecoveryDeposit = RecoveryDeposit; } parameter_types! { - pub const CandidateDeposit: Balance = 10 * DOLLARS; - pub const WrongSideDeduction: Balance = 2 * DOLLARS; - pub const MaxStrikes: u32 = 10; - pub const RotationPeriod: BlockNumber = 80 * HOURS; - pub const PeriodSpend: Balance = 500 * DOLLARS; - pub const MaxLockDuration: BlockNumber = 36 * 30 * DAYS; - pub const ChallengePeriod: BlockNumber = 7 * DAYS; + pub const CandidateDeposit: Balance = 10 * DOLLARS; + pub const WrongSideDeduction: Balance = 2 * DOLLARS; + pub const MaxStrikes: u32 = 10; + pub const RotationPeriod: BlockNumber = 80 * HOURS; + pub const PeriodSpend: Balance = 500 * DOLLARS; + pub const MaxLockDuration: BlockNumber = 36 * 30 * DAYS; + pub const ChallengePeriod: BlockNumber = 7 * DAYS; } impl pallet_society::Trait for Runtime { - type Event = Event; - type Currency = Balances; - type Randomness = RandomnessCollectiveFlip; - type CandidateDeposit = CandidateDeposit; - type WrongSideDeduction = WrongSideDeduction; - type MaxStrikes = MaxStrikes; - type PeriodSpend = PeriodSpend; - type MembershipChanged = (); - type RotationPeriod = RotationPeriod; - type MaxLockDuration = MaxLockDuration; - type FounderSetOrigin = pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; - type SuspensionJudgementOrigin = pallet_society::EnsureFounder; - type ChallengePeriod = ChallengePeriod; + type Event = Event; + type Currency = Balances; + type Randomness = RandomnessCollectiveFlip; + type CandidateDeposit = CandidateDeposit; + type WrongSideDeduction = WrongSideDeduction; + type MaxStrikes = MaxStrikes; + type PeriodSpend = PeriodSpend; + type MembershipChanged = (); + type RotationPeriod = RotationPeriod; + type MaxLockDuration = MaxLockDuration; + type FounderSetOrigin = + pallet_collective::EnsureProportionMoreThan<_1, _2, AccountId, CouncilCollective>; + type SuspensionJudgementOrigin = pallet_society::EnsureFounder; + type ChallengePeriod = ChallengePeriod; } parameter_types! { - pub const MinVestedTransfer: Balance = 100 * DOLLARS; + pub const MinVestedTransfer: Balance = 100 * DOLLARS; } impl pallet_vesting::Trait for Runtime { - type Event = Event; - type Currency = Balances; - type BlockNumberToBalance = ConvertInto; - type MinVestedTransfer = MinVestedTransfer; + type Event = Event; + type Currency = Balances; + type BlockNumberToBalance = ConvertInto; + type MinVestedTransfer = MinVestedTransfer; } construct_runtime!( @@ -709,13 +733,13 @@ pub type SignedBlock = generic::SignedBlock; pub type BlockId = generic::BlockId; /// The SignedExtension to the basic transaction logic. pub type SignedExtra = ( - frame_system::CheckVersion, - frame_system::CheckGenesis, - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - pallet_contracts::CheckBlockGasLimit, + frame_system::CheckVersion, + frame_system::CheckGenesis, + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + pallet_contracts::CheckBlockGasLimit, ); /// Unchecked extrinsic type as expected by this runtime. pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; @@ -724,230 +748,237 @@ pub type SignedPayload = generic::SignedPayload; /// Extrinsic type that has already been checked. pub type CheckedExtrinsic = generic::CheckedExtrinsic; /// Executive: handles dispatch to the various modules. -pub type Executive = frame_executive::Executive, Runtime, AllModules>; +pub type Executive = frame_executive::Executive< + Runtime, + Block, + frame_system::ChainContext, + Runtime, + AllModules, +>; impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - VERSION - } - - fn execute_block(block: Block) { - Executive::execute_block(block) - } - - fn initialize_block(header: &::Header) { - Executive::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - Runtime::metadata().into() - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - Executive::apply_extrinsic(extrinsic) - } - - fn finalize_block() -> ::Header { - Executive::finalize_block() - } - - fn inherent_extrinsics(data: InherentData) -> Vec<::Extrinsic> { - data.create_extrinsics() - } - - fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult { - data.check_extrinsics(&block) - } - - fn random_seed() -> ::Hash { - RandomnessCollectiveFlip::random_seed() - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - ) -> TransactionValidity { - Executive::validate_transaction(source, tx) - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - Executive::offchain_worker(header) - } - } - - impl fg_primitives::GrandpaApi for Runtime { - fn grandpa_authorities() -> GrandpaAuthorityList { - Grandpa::grandpa_authorities() - } - } - - impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeConfiguration { - // The choice of `c` parameter (where `1 - c` represents the - // probability of a slot being empty), is done in accordance to the - // slot duration and expected target block time, for safely - // resisting network delays of maximum two seconds. - // - sp_consensus_babe::BabeConfiguration { - slot_duration: Babe::slot_duration(), - epoch_length: EpochDuration::get(), - c: PRIMARY_PROBABILITY, - genesis_authorities: Babe::authorities(), - randomness: Babe::randomness(), - secondary_slots: true, - } - } - - fn current_epoch_start() -> sp_consensus_babe::SlotNumber { - Babe::current_epoch_start() - } - } - - impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { - fn authorities() -> Vec { - AuthorityDiscovery::authorities() - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(account: AccountId) -> Index { - System::account_nonce(account) - } - } - - impl pallet_contracts_rpc_runtime_api::ContractsApi - for Runtime - { - fn call( - origin: AccountId, - dest: AccountId, - value: Balance, - gas_limit: u64, - input_data: Vec, - ) -> ContractExecResult { - let exec_result = - Contracts::bare_call(origin, dest.into(), value, gas_limit, input_data); - match exec_result { - Ok(v) => ContractExecResult::Success { - status: v.status, - data: v.data, - }, - Err(_) => ContractExecResult::Error, - } - } - - fn get_storage( - address: AccountId, - key: [u8; 32], - ) -> pallet_contracts_primitives::GetStorageResult { - Contracts::get_storage(address, key) - } - - fn rent_projection( - address: AccountId, - ) -> pallet_contracts_primitives::RentProjectionResult { - Contracts::rent_projection(address) - } - } - - impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< - Block, - Balance, - UncheckedExtrinsic, - > for Runtime { - fn query_info(uxt: UncheckedExtrinsic, len: u32) -> RuntimeDispatchInfo { - TransactionPayment::query_info(uxt, len) - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(seed: Option>) -> Vec { - SessionKeys::generate(seed) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - #[cfg(feature = "runtime-benchmarks")] - impl frame_benchmarking::Benchmark for Runtime { - fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, - ) -> Result, sp_runtime::RuntimeString> { - use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; - // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. - // To get around that, we separated the Session benchmarks into its own crate, which is why - // we need these two lines below. - use pallet_session_benchmarking::Module as SessionBench; - use pallet_offences_benchmarking::Module as OffencesBench; - - impl pallet_session_benchmarking::Trait for Runtime {} - impl pallet_offences_benchmarking::Trait for Runtime {} - - let mut batches = Vec::::new(); - let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat); - - add_benchmark!(params, batches, b"balances", Balances); - add_benchmark!(params, batches, b"collective", Council); - add_benchmark!(params, batches, b"democracy", Democracy); - add_benchmark!(params, batches, b"identity", Identity); - add_benchmark!(params, batches, b"im-online", ImOnline); - add_benchmark!(params, batches, b"session", SessionBench::); - add_benchmark!(params, batches, b"staking", Staking); - add_benchmark!(params, batches, b"timestamp", Timestamp); - add_benchmark!(params, batches, b"treasury", Treasury); - add_benchmark!(params, batches, b"utility", Utility); - add_benchmark!(params, batches, b"vesting", Vesting); - add_benchmark!(params, batches, b"offences", OffencesBench::); - - if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } - Ok(batches) - } - } + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + VERSION + } + + fn execute_block(block: Block) { + Executive::execute_block(block) + } + + fn initialize_block(header: &::Header) { + Executive::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + Runtime::metadata().into() + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + Executive::apply_extrinsic(extrinsic) + } + + fn finalize_block() -> ::Header { + Executive::finalize_block() + } + + fn inherent_extrinsics(data: InherentData) -> Vec<::Extrinsic> { + data.create_extrinsics() + } + + fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult { + data.check_extrinsics(&block) + } + + fn random_seed() -> ::Hash { + RandomnessCollectiveFlip::random_seed() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity { + Executive::validate_transaction(source, tx) + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + Executive::offchain_worker(header) + } + } + + impl fg_primitives::GrandpaApi for Runtime { + fn grandpa_authorities() -> GrandpaAuthorityList { + Grandpa::grandpa_authorities() + } + } + + impl sp_consensus_babe::BabeApi for Runtime { + fn configuration() -> sp_consensus_babe::BabeConfiguration { + // The choice of `c` parameter (where `1 - c` represents the + // probability of a slot being empty), is done in accordance to the + // slot duration and expected target block time, for safely + // resisting network delays of maximum two seconds. + // + sp_consensus_babe::BabeConfiguration { + slot_duration: Babe::slot_duration(), + epoch_length: EpochDuration::get(), + c: PRIMARY_PROBABILITY, + genesis_authorities: Babe::authorities(), + randomness: Babe::randomness(), + secondary_slots: true, + } + } + + fn current_epoch_start() -> sp_consensus_babe::SlotNumber { + Babe::current_epoch_start() + } + } + + impl sp_authority_discovery::AuthorityDiscoveryApi for Runtime { + fn authorities() -> Vec { + AuthorityDiscovery::authorities() + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(account: AccountId) -> Index { + System::account_nonce(account) + } + } + + impl pallet_contracts_rpc_runtime_api::ContractsApi + for Runtime + { + fn call( + origin: AccountId, + dest: AccountId, + value: Balance, + gas_limit: u64, + input_data: Vec, + ) -> ContractExecResult { + let exec_result = + Contracts::bare_call(origin, dest.into(), value, gas_limit, input_data); + match exec_result { + Ok(v) => ContractExecResult::Success { + status: v.status, + data: v.data, + }, + Err(_) => ContractExecResult::Error, + } + } + + fn get_storage( + address: AccountId, + key: [u8; 32], + ) -> pallet_contracts_primitives::GetStorageResult { + Contracts::get_storage(address, key) + } + + fn rent_projection( + address: AccountId, + ) -> pallet_contracts_primitives::RentProjectionResult { + Contracts::rent_projection(address) + } + } + + impl pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi< + Block, + Balance, + UncheckedExtrinsic, + > for Runtime { + fn query_info(uxt: UncheckedExtrinsic, len: u32) -> RuntimeDispatchInfo { + TransactionPayment::query_info(uxt, len) + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(seed: Option>) -> Vec { + SessionKeys::generate(seed) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + #[cfg(feature = "runtime-benchmarks")] + impl frame_benchmarking::Benchmark for Runtime { + fn dispatch_benchmark( + pallet: Vec, + benchmark: Vec, + lowest_range_values: Vec, + highest_range_values: Vec, + steps: Vec, + repeat: u32, + ) -> Result, sp_runtime::RuntimeString> { + use frame_benchmarking::{Benchmarking, BenchmarkBatch, add_benchmark}; + // Trying to add benchmarks directly to the Session Pallet caused cyclic dependency issues. + // To get around that, we separated the Session benchmarks into its own crate, which is why + // we need these two lines below. + use pallet_session_benchmarking::Module as SessionBench; + use pallet_offences_benchmarking::Module as OffencesBench; + + impl pallet_session_benchmarking::Trait for Runtime {} + impl pallet_offences_benchmarking::Trait for Runtime {} + + let mut batches = Vec::::new(); + let params = (&pallet, &benchmark, &lowest_range_values, &highest_range_values, &steps, repeat); + + add_benchmark!(params, batches, b"balances", Balances); + add_benchmark!(params, batches, b"collective", Council); + add_benchmark!(params, batches, b"democracy", Democracy); + add_benchmark!(params, batches, b"identity", Identity); + add_benchmark!(params, batches, b"im-online", ImOnline); + add_benchmark!(params, batches, b"session", SessionBench::); + add_benchmark!(params, batches, b"staking", Staking); + add_benchmark!(params, batches, b"timestamp", Timestamp); + add_benchmark!(params, batches, b"treasury", Treasury); + add_benchmark!(params, batches, b"utility", Utility); + add_benchmark!(params, batches, b"vesting", Vesting); + add_benchmark!(params, batches, b"offences", OffencesBench::); + + if batches.is_empty() { return Err("Benchmark not found for this pallet.".into()) } + Ok(batches) + } + } } #[cfg(test)] mod tests { - use super::*; - use frame_system::offchain::{SignAndSubmitTransaction, SubmitSignedTransaction}; - - #[test] - fn validate_transaction_submitter_bounds() { - fn is_submit_signed_transaction() where - T: SubmitSignedTransaction< - Runtime, - Call, - >, - {} - - fn is_sign_and_submit_transaction() where - T: SignAndSubmitTransaction< - Runtime, - Call, - Extrinsic=UncheckedExtrinsic, - CreateTransaction=Runtime, - Signer=ImOnlineId, - >, - {} - - is_submit_signed_transaction::>(); - is_sign_and_submit_transaction::>(); - } + use super::*; + use frame_system::offchain::{SignAndSubmitTransaction, SubmitSignedTransaction}; + + #[test] + fn validate_transaction_submitter_bounds() { + fn is_submit_signed_transaction() + where + T: SubmitSignedTransaction, + { + } + + fn is_sign_and_submit_transaction() + where + T: SignAndSubmitTransaction< + Runtime, + Call, + Extrinsic = UncheckedExtrinsic, + CreateTransaction = Runtime, + Signer = ImOnlineId, + >, + { + } + + is_submit_signed_transaction::>(); + is_sign_and_submit_transaction::>(); + } } diff --git a/bin/node/testing/src/bench.rs b/bin/node/testing/src/bench.rs index ea48f02650..95d37f9300 100644 --- a/bin/node/testing/src/bench.rs +++ b/bin/node/testing/src/bench.rs @@ -20,44 +20,37 @@ //! can pregenerate seed database and `clone` it for every iteration of your benchmarks //! or tests to get consistent, smooth benchmark experience! -use std::{sync::Arc, path::Path, collections::BTreeMap}; +use std::{collections::BTreeMap, path::Path, sync::Arc}; -use node_primitives::Block; -use crate::client::{Client, Backend}; +use crate::client::{Backend, Client}; use crate::keyring::*; -use sc_client_db::PruningMode; -use sc_executor::{NativeExecutor, WasmExecutionMethod}; -use sp_consensus::{ - BlockOrigin, BlockImport, BlockImportParams, - ForkChoiceStrategy, ImportResult, ImportedAux -}; -use sp_runtime::{ - generic::BlockId, - OpaqueExtrinsic, - traits::{Block as BlockT, Verify, Zero, IdentifyAccount}, -}; use codec::{Decode, Encode}; +use futures::{executor, task}; +use node_primitives::Block; use node_runtime::{ - Call, - CheckedExtrinsic, - constants::currency::DOLLARS, - UncheckedExtrinsic, - MinimumPeriod, - BalancesCall, - AccountId, - Signature, + constants::currency::DOLLARS, AccountId, BalancesCall, Call, CheckedExtrinsic, MinimumPeriod, + Signature, UncheckedExtrinsic, +}; +use sc_block_builder::BlockBuilderProvider; +use sc_client_api::{ + execution_extensions::{ExecutionExtensions, ExecutionStrategies}, + ExecutionStrategy, }; -use sp_core::{ExecutionContext, blake2_256, traits::CloneableSpawn}; +use sc_client_db::PruningMode; +use sc_executor::{NativeExecutor, WasmExecutionMethod}; use sp_api::ProvideRuntimeApi; use sp_block_builder::BlockBuilder; +use sp_consensus::{ + BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy, ImportResult, ImportedAux, +}; +use sp_core::{blake2_256, traits::CloneableSpawn, ExecutionContext}; +use sp_core::{ed25519, sr25519, Pair, Public}; use sp_inherents::InherentData; -use sc_client_api::{ - ExecutionStrategy, - execution_extensions::{ExecutionExtensions, ExecutionStrategies}, +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, IdentifyAccount, Verify, Zero}, + OpaqueExtrinsic, }; -use sp_core::{Pair, Public, sr25519, ed25519}; -use sc_block_builder::BlockBuilderProvider; -use futures::{executor, task}; /// Keyring full of accounts for benching. /// @@ -68,22 +61,22 @@ use futures::{executor, task}; /// //endowed-user//N #[derive(Clone)] pub struct BenchKeyring { - accounts: BTreeMap, + accounts: BTreeMap, } #[derive(Clone)] enum BenchPair { - Sr25519(sr25519::Pair), - Ed25519(ed25519::Pair), + Sr25519(sr25519::Pair), + Ed25519(ed25519::Pair), } impl BenchPair { - fn sign(&self, payload: &[u8]) -> Signature { - match self { - Self::Sr25519(pair) => pair.sign(payload).into(), - Self::Ed25519(pair) => pair.sign(payload).into(), - } - } + fn sign(&self, payload: &[u8]) -> Signature { + match self { + Self::Sr25519(pair) => pair.sign(payload).into(), + Self::Ed25519(pair) => pair.sign(payload).into(), + } + } } /// Pre-initialized benchmarking database. @@ -91,56 +84,62 @@ impl BenchPair { /// This is prepared database with genesis and keyring /// that can be cloned and then used for any benchmarking. pub struct BenchDb { - keyring: BenchKeyring, - directory_guard: Guard, + keyring: BenchKeyring, + directory_guard: Guard, } impl Clone for BenchDb { - fn clone(&self) -> Self { - let keyring = self.keyring.clone(); - let dir = tempfile::tempdir().expect("temp dir creation failed"); - - let seed_dir = self.directory_guard.0.path(); - - log::trace!( - target: "bench-logistics", - "Copying seed db from {} to {}", - seed_dir.to_string_lossy(), - dir.path().to_string_lossy(), - ); - let seed_db_files = std::fs::read_dir(seed_dir) - .expect("failed to list file in seed dir") - .map(|f_result| - f_result.expect("failed to read file in seed db") - .path() - .clone() - ).collect(); - fs_extra::copy_items( - &seed_db_files, - dir.path(), - &fs_extra::dir::CopyOptions::new(), - ).expect("Copy of seed database is ok"); - - BenchDb { keyring, directory_guard: Guard(dir) } - } + fn clone(&self) -> Self { + let keyring = self.keyring.clone(); + let dir = tempfile::tempdir().expect("temp dir creation failed"); + + let seed_dir = self.directory_guard.0.path(); + + log::trace!( + target: "bench-logistics", + "Copying seed db from {} to {}", + seed_dir.to_string_lossy(), + dir.path().to_string_lossy(), + ); + let seed_db_files = std::fs::read_dir(seed_dir) + .expect("failed to list file in seed dir") + .map(|f_result| { + f_result + .expect("failed to read file in seed db") + .path() + .clone() + }) + .collect(); + fs_extra::copy_items( + &seed_db_files, + dir.path(), + &fs_extra::dir::CopyOptions::new(), + ) + .expect("Copy of seed database is ok"); + + BenchDb { + keyring, + directory_guard: Guard(dir), + } + } } /// Type of block for generation #[derive(Debug, PartialEq, Clone, Copy)] pub enum BlockType { - /// Bunch of random transfers. - RandomTransfers(usize), - /// Bunch of random transfers that drain all of the source balance. - RandomTransfersReaping(usize), + /// Bunch of random transfers. + RandomTransfers(usize), + /// Bunch of random transfers that drain all of the source balance. + RandomTransfersReaping(usize), } impl BlockType { - /// Number of transactions for this block type. - pub fn transactions(&self) -> usize { - match self { - Self::RandomTransfers(v) | Self::RandomTransfersReaping(v) => *v, - } - } + /// Number of transactions for this block type. + pub fn transactions(&self) -> usize { + match self { + Self::RandomTransfers(v) | Self::RandomTransfersReaping(v) => *v, + } + } } /// Benchmarking task executor. @@ -148,373 +147,415 @@ impl BlockType { /// Uses multiple threads as the regular executable. #[derive(Debug, Clone)] pub struct TaskExecutor { - pool: executor::ThreadPool, + pool: executor::ThreadPool, } impl TaskExecutor { - fn new() -> Self { - Self { - pool: executor::ThreadPool::new() - .expect("Failed to create task executor") - } - } + fn new() -> Self { + Self { + pool: executor::ThreadPool::new().expect("Failed to create task executor"), + } + } } impl task::Spawn for TaskExecutor { - fn spawn_obj(&self, future: task::FutureObj<'static, ()>) - -> Result<(), task::SpawnError> { - self.pool.spawn_obj(future) - } + fn spawn_obj(&self, future: task::FutureObj<'static, ()>) -> Result<(), task::SpawnError> { + self.pool.spawn_obj(future) + } } impl CloneableSpawn for TaskExecutor { - fn clone(&self) -> Box { - Box::new(Clone::clone(self)) - } + fn clone(&self) -> Box { + Box::new(Clone::clone(self)) + } } impl BenchDb { - /// New immutable benchmarking database. - /// - /// See [`new`] method documentation for more information about the purpose - /// of this structure. - pub fn with_key_types(keyring_length: usize, key_types: KeyTypes) -> Self { - let keyring = BenchKeyring::new(keyring_length, key_types); - - let dir = tempfile::tempdir().expect("temp dir creation failed"); - log::trace!( - target: "bench-logistics", - "Created seed db at {}", - dir.path().to_string_lossy(), - ); - let (_client, _backend) = Self::bench_client(dir.path(), Profile::Native, &keyring); - let directory_guard = Guard(dir); - - BenchDb { keyring, directory_guard } - } - - /// New immutable benchmarking database. - /// - /// This will generate database files in random temporary directory - /// and keep it there until struct is dropped. - /// - /// You can `clone` this database or you can `create_context` from it - /// (which also does `clone`) to run actual operation against new database - /// which will be identical to the original. - pub fn new(keyring_length: usize) -> Self { - Self::with_key_types(keyring_length, KeyTypes::Sr25519) - } - - // This should return client that is doing everything that full node - // is doing. - // - // - This client should use best wasm execution method. - // - This client should work with real database only. - fn bench_client(dir: &std::path::Path, profile: Profile, keyring: &BenchKeyring) -> (Client, std::sync::Arc) { - let db_config = sc_client_db::DatabaseSettings { - state_cache_size: 16*1024*1024, - state_cache_child_ratio: Some((0, 100)), - pruning: PruningMode::ArchiveAll, - source: sc_client_db::DatabaseSettingsSrc::RocksDb { - path: dir.into(), - cache_size: 512, - }, - }; - - let (client, backend) = sc_client_db::new_client( - db_config, - NativeExecutor::new(WasmExecutionMethod::Compiled, None, 8), - &keyring.generate_genesis(), - None, - None, - ExecutionExtensions::new(profile.into_execution_strategies(), None), - Box::new(TaskExecutor::new()), - None, - ).expect("Should not fail"); - - (client, backend) - } - - /// Generate new block using this database. - pub fn generate_block(&mut self, block_type: BlockType) -> Block { - let (client, _backend) = Self::bench_client( - self.directory_guard.path(), - Profile::Wasm, - &self.keyring, - ); - - let version = client.runtime_version_at(&BlockId::number(0)) - .expect("There should be runtime version at 0") - .spec_version; - - let genesis_hash = client.block_hash(Zero::zero()) - .expect("Database error?") - .expect("Genesis block always exists; qed") - .into(); - - let mut block = client - .new_block(Default::default()) - .expect("Block creation failed"); - - let timestamp = 1 * MinimumPeriod::get(); - - let mut inherent_data = InherentData::new(); - inherent_data.put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) - .expect("Put timestamp failed"); - inherent_data.put_data(sp_finality_tracker::INHERENT_IDENTIFIER, &0) - .expect("Put finality tracker failed"); - - for extrinsic in client.runtime_api() - .inherent_extrinsics_with_context( - &BlockId::number(0), - ExecutionContext::BlockConstruction, - inherent_data, - ).expect("Get inherents failed") - { - block.push(extrinsic).expect("Push inherent failed"); - } - - let mut iteration = 0; - let start = std::time::Instant::now(); - for _ in 0..block_type.transactions() { - - let sender = self.keyring.at(iteration); - let receiver = get_account_id_from_seed::( - &format!("random-user//{}", iteration) - ); - - let signed = self.keyring.sign( - CheckedExtrinsic { - signed: Some((sender, signed_extra(0, node_runtime::ExistentialDeposit::get() + 1))), - function: Call::Balances( - BalancesCall::transfer( - pallet_indices::address::Address::Id(receiver), - match block_type { - BlockType::RandomTransfers(_) => node_runtime::ExistentialDeposit::get() + 1, - BlockType::RandomTransfersReaping(_) => 100*DOLLARS - node_runtime::ExistentialDeposit::get() - 1, - } - ) - ), - }, - version, - genesis_hash, - ); - - let encoded = Encode::encode(&signed); - - let opaque = OpaqueExtrinsic::decode(&mut &encoded[..]) - .expect("Failed to decode opaque"); - - match block.push(opaque) { - Err(sp_blockchain::Error::ApplyExtrinsicFailed( - sp_blockchain::ApplyExtrinsicFailed::Validity(e) - )) if e.exhausted_resources() => { - break; - }, - Err(err) => panic!("Error pushing transaction: {:?}", err), - Ok(_) => {}, - } - iteration += 1; - } - let block = block.build().expect("Block build failed").block; - - log::info!( - target: "bench-logistics", - "Block construction: {:#?} ({} tx)", - start.elapsed(), block.extrinsics.len() - ); - - block - } - - /// Database path. - pub fn path(&self) -> &Path { - self.directory_guard.path() - } - - /// Clone this database and create context for testing/benchmarking. - pub fn create_context(&self, profile: Profile) -> BenchContext { - let BenchDb { directory_guard, keyring } = self.clone(); - let (client, backend) = Self::bench_client(directory_guard.path(), profile, &keyring); - - BenchContext { - client, backend, db_guard: directory_guard, - } - } + /// New immutable benchmarking database. + /// + /// See [`new`] method documentation for more information about the purpose + /// of this structure. + pub fn with_key_types(keyring_length: usize, key_types: KeyTypes) -> Self { + let keyring = BenchKeyring::new(keyring_length, key_types); + + let dir = tempfile::tempdir().expect("temp dir creation failed"); + log::trace!( + target: "bench-logistics", + "Created seed db at {}", + dir.path().to_string_lossy(), + ); + let (_client, _backend) = Self::bench_client(dir.path(), Profile::Native, &keyring); + let directory_guard = Guard(dir); + + BenchDb { + keyring, + directory_guard, + } + } + + /// New immutable benchmarking database. + /// + /// This will generate database files in random temporary directory + /// and keep it there until struct is dropped. + /// + /// You can `clone` this database or you can `create_context` from it + /// (which also does `clone`) to run actual operation against new database + /// which will be identical to the original. + pub fn new(keyring_length: usize) -> Self { + Self::with_key_types(keyring_length, KeyTypes::Sr25519) + } + + // This should return client that is doing everything that full node + // is doing. + // + // - This client should use best wasm execution method. + // - This client should work with real database only. + fn bench_client( + dir: &std::path::Path, + profile: Profile, + keyring: &BenchKeyring, + ) -> (Client, std::sync::Arc) { + let db_config = sc_client_db::DatabaseSettings { + state_cache_size: 16 * 1024 * 1024, + state_cache_child_ratio: Some((0, 100)), + pruning: PruningMode::ArchiveAll, + source: sc_client_db::DatabaseSettingsSrc::RocksDb { + path: dir.into(), + cache_size: 512, + }, + }; + + let (client, backend) = sc_client_db::new_client( + db_config, + NativeExecutor::new(WasmExecutionMethod::Compiled, None, 8), + &keyring.generate_genesis(), + None, + None, + ExecutionExtensions::new(profile.into_execution_strategies(), None), + Box::new(TaskExecutor::new()), + None, + ) + .expect("Should not fail"); + + (client, backend) + } + + /// Generate new block using this database. + pub fn generate_block(&mut self, block_type: BlockType) -> Block { + let (client, _backend) = + Self::bench_client(self.directory_guard.path(), Profile::Wasm, &self.keyring); + + let version = client + .runtime_version_at(&BlockId::number(0)) + .expect("There should be runtime version at 0") + .spec_version; + + let genesis_hash = client + .block_hash(Zero::zero()) + .expect("Database error?") + .expect("Genesis block always exists; qed") + .into(); + + let mut block = client + .new_block(Default::default()) + .expect("Block creation failed"); + + let timestamp = 1 * MinimumPeriod::get(); + + let mut inherent_data = InherentData::new(); + inherent_data + .put_data(sp_timestamp::INHERENT_IDENTIFIER, ×tamp) + .expect("Put timestamp failed"); + inherent_data + .put_data(sp_finality_tracker::INHERENT_IDENTIFIER, &0) + .expect("Put finality tracker failed"); + + for extrinsic in client + .runtime_api() + .inherent_extrinsics_with_context( + &BlockId::number(0), + ExecutionContext::BlockConstruction, + inherent_data, + ) + .expect("Get inherents failed") + { + block.push(extrinsic).expect("Push inherent failed"); + } + + let mut iteration = 0; + let start = std::time::Instant::now(); + for _ in 0..block_type.transactions() { + let sender = self.keyring.at(iteration); + let receiver = + get_account_id_from_seed::(&format!("random-user//{}", iteration)); + + let signed = self.keyring.sign( + CheckedExtrinsic { + signed: Some(( + sender, + signed_extra(0, node_runtime::ExistentialDeposit::get() + 1), + )), + function: Call::Balances(BalancesCall::transfer( + pallet_indices::address::Address::Id(receiver), + match block_type { + BlockType::RandomTransfers(_) => { + node_runtime::ExistentialDeposit::get() + 1 + } + BlockType::RandomTransfersReaping(_) => { + 100 * DOLLARS - node_runtime::ExistentialDeposit::get() - 1 + } + }, + )), + }, + version, + genesis_hash, + ); + + let encoded = Encode::encode(&signed); + + let opaque = + OpaqueExtrinsic::decode(&mut &encoded[..]).expect("Failed to decode opaque"); + + match block.push(opaque) { + Err(sp_blockchain::Error::ApplyExtrinsicFailed( + sp_blockchain::ApplyExtrinsicFailed::Validity(e), + )) if e.exhausted_resources() => { + break; + } + Err(err) => panic!("Error pushing transaction: {:?}", err), + Ok(_) => {} + } + iteration += 1; + } + let block = block.build().expect("Block build failed").block; + + log::info!( + target: "bench-logistics", + "Block construction: {:#?} ({} tx)", + start.elapsed(), block.extrinsics.len() + ); + + block + } + + /// Database path. + pub fn path(&self) -> &Path { + self.directory_guard.path() + } + + /// Clone this database and create context for testing/benchmarking. + pub fn create_context(&self, profile: Profile) -> BenchContext { + let BenchDb { + directory_guard, + keyring, + } = self.clone(); + let (client, backend) = Self::bench_client(directory_guard.path(), profile, &keyring); + + BenchContext { + client, + backend, + db_guard: directory_guard, + } + } } /// Key types to be used in benching keyring pub enum KeyTypes { - /// sr25519 signing keys - Sr25519, - /// ed25519 signing keys - Ed25519, + /// sr25519 signing keys + Sr25519, + /// ed25519 signing keys + Ed25519, } impl BenchKeyring { - /// New keyring. - /// - /// `length` is the number of accounts generated. - pub fn new(length: usize, key_types: KeyTypes) -> Self { - let mut accounts = BTreeMap::new(); - - for n in 0..length { - let seed = format!("//endowed-user/{}", n); - let (account_id, pair) = match key_types { - KeyTypes::Sr25519 => { - let pair = sr25519::Pair::from_string(&seed, None).expect("failed to generate pair"); - let account_id = AccountPublic::from(pair.public()).into_account(); - (account_id, BenchPair::Sr25519(pair)) - }, - KeyTypes::Ed25519 => { - let pair = ed25519::Pair::from_seed(&blake2_256(seed.as_bytes())); - let account_id = AccountPublic::from(pair.public()).into_account(); - (account_id, BenchPair::Ed25519(pair)) - }, - }; - accounts.insert(account_id, pair); - } - - Self { accounts } - } - - /// Generated account id-s from keyring keypairs. - pub fn collect_account_ids(&self) -> Vec { - self.accounts.keys().cloned().collect() - } - - /// Get account id at position `index` - pub fn at(&self, index: usize) -> AccountId { - self.accounts.keys().nth(index).expect("Failed to get account").clone() - } - - /// Sign transaction with keypair from this keyring. - pub fn sign(&self, xt: CheckedExtrinsic, version: u32, genesis_hash: [u8; 32]) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { - let payload = (xt.function, extra.clone(), version, genesis_hash, genesis_hash); - let key = self.accounts.get(&signed).expect("Account id not found in keyring"); - let signature = payload.using_encoded(|b| { - if b.len() > 256 { - key.sign(&sp_io::hashing::blake2_256(b)) - } else { - key.sign(b) - } - }).into(); - UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), - function: payload.0, - } - } - None => UncheckedExtrinsic { - signature: None, - function: xt.function, - }, - } - } - - /// Generate genesis with accounts from this keyring endowed with some balance. - pub fn generate_genesis(&self) -> node_runtime::GenesisConfig { - crate::genesis::config_endowed( - false, - Some(node_runtime::WASM_BINARY), - self.collect_account_ids(), - ) - } + /// New keyring. + /// + /// `length` is the number of accounts generated. + pub fn new(length: usize, key_types: KeyTypes) -> Self { + let mut accounts = BTreeMap::new(); + + for n in 0..length { + let seed = format!("//endowed-user/{}", n); + let (account_id, pair) = match key_types { + KeyTypes::Sr25519 => { + let pair = + sr25519::Pair::from_string(&seed, None).expect("failed to generate pair"); + let account_id = AccountPublic::from(pair.public()).into_account(); + (account_id, BenchPair::Sr25519(pair)) + } + KeyTypes::Ed25519 => { + let pair = ed25519::Pair::from_seed(&blake2_256(seed.as_bytes())); + let account_id = AccountPublic::from(pair.public()).into_account(); + (account_id, BenchPair::Ed25519(pair)) + } + }; + accounts.insert(account_id, pair); + } + + Self { accounts } + } + + /// Generated account id-s from keyring keypairs. + pub fn collect_account_ids(&self) -> Vec { + self.accounts.keys().cloned().collect() + } + + /// Get account id at position `index` + pub fn at(&self, index: usize) -> AccountId { + self.accounts + .keys() + .nth(index) + .expect("Failed to get account") + .clone() + } + + /// Sign transaction with keypair from this keyring. + pub fn sign( + &self, + xt: CheckedExtrinsic, + version: u32, + genesis_hash: [u8; 32], + ) -> UncheckedExtrinsic { + match xt.signed { + Some((signed, extra)) => { + let payload = ( + xt.function, + extra.clone(), + version, + genesis_hash, + genesis_hash, + ); + let key = self + .accounts + .get(&signed) + .expect("Account id not found in keyring"); + let signature = payload + .using_encoded(|b| { + if b.len() > 256 { + key.sign(&sp_io::hashing::blake2_256(b)) + } else { + key.sign(b) + } + }) + .into(); + UncheckedExtrinsic { + signature: Some(( + pallet_indices::address::Address::Id(signed), + signature, + extra, + )), + function: payload.0, + } + } + None => UncheckedExtrinsic { + signature: None, + function: xt.function, + }, + } + } + + /// Generate genesis with accounts from this keyring endowed with some balance. + pub fn generate_genesis(&self) -> node_runtime::GenesisConfig { + crate::genesis::config_endowed( + false, + Some(node_runtime::WASM_BINARY), + self.collect_account_ids(), + ) + } } /// Profile for exetion strategies. #[derive(Clone, Copy, Debug)] pub enum Profile { - /// As native as possible. - Native, - /// As wasm as possible. - Wasm, + /// As native as possible. + Native, + /// As wasm as possible. + Wasm, } impl Profile { - fn into_execution_strategies(self) -> ExecutionStrategies { - match self { - Profile::Wasm => ExecutionStrategies { - syncing: ExecutionStrategy::AlwaysWasm, - importing: ExecutionStrategy::AlwaysWasm, - block_construction: ExecutionStrategy::AlwaysWasm, - offchain_worker: ExecutionStrategy::AlwaysWasm, - other: ExecutionStrategy::AlwaysWasm, - }, - Profile::Native => ExecutionStrategies { - syncing: ExecutionStrategy::NativeElseWasm, - importing: ExecutionStrategy::NativeElseWasm, - block_construction: ExecutionStrategy::NativeElseWasm, - offchain_worker: ExecutionStrategy::NativeElseWasm, - other: ExecutionStrategy::NativeElseWasm, - } - } - } + fn into_execution_strategies(self) -> ExecutionStrategies { + match self { + Profile::Wasm => ExecutionStrategies { + syncing: ExecutionStrategy::AlwaysWasm, + importing: ExecutionStrategy::AlwaysWasm, + block_construction: ExecutionStrategy::AlwaysWasm, + offchain_worker: ExecutionStrategy::AlwaysWasm, + other: ExecutionStrategy::AlwaysWasm, + }, + Profile::Native => ExecutionStrategies { + syncing: ExecutionStrategy::NativeElseWasm, + importing: ExecutionStrategy::NativeElseWasm, + block_construction: ExecutionStrategy::NativeElseWasm, + offchain_worker: ExecutionStrategy::NativeElseWasm, + other: ExecutionStrategy::NativeElseWasm, + }, + } + } } struct Guard(tempfile::TempDir); impl Guard { - fn path(&self) -> &Path { - self.0.path() - } + fn path(&self) -> &Path { + self.0.path() + } } /// Benchmarking/test context holding instantiated client and backend references. pub struct BenchContext { - /// Node client. - pub client: Client, - /// Node backend. - pub backend: Arc, + /// Node client. + pub client: Client, + /// Node backend. + pub backend: Arc, - db_guard: Guard, + db_guard: Guard, } type AccountPublic = ::Signer; fn get_from_seed(seed: &str) -> ::Public { - TPublic::Pair::from_string(&format!("//{}", seed), None) - .expect("static values are valid; qed") - .public() + TPublic::Pair::from_string(&format!("//{}", seed), None) + .expect("static values are valid; qed") + .public() } fn get_account_id_from_seed(seed: &str) -> AccountId where - AccountPublic: From<::Public> + AccountPublic: From<::Public>, { - AccountPublic::from(get_from_seed::(seed)).into_account() + AccountPublic::from(get_from_seed::(seed)).into_account() } impl BenchContext { - /// Import some block. - pub fn import_block(&mut self, block: Block) { - let mut import_params = BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header.clone()); - import_params.body = Some(block.extrinsics().to_vec()); - import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - assert_eq!(self.client.chain_info().best_number, 0); - - assert_eq!( - self.client.import_block(import_params, Default::default()) - .expect("Failed to import block"), - ImportResult::Imported( - ImportedAux { - header_only: false, - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - } - ) - ); - - assert_eq!(self.client.chain_info().best_number, 1); - } - - /// Database path for the current context. - pub fn path(&self) -> &Path { - self.db_guard.path() - } + /// Import some block. + pub fn import_block(&mut self, block: Block) { + let mut import_params = + BlockImportParams::new(BlockOrigin::NetworkBroadcast, block.header.clone()); + import_params.body = Some(block.extrinsics().to_vec()); + import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + assert_eq!(self.client.chain_info().best_number, 0); + + assert_eq!( + self.client + .import_block(import_params, Default::default()) + .expect("Failed to import block"), + ImportResult::Imported(ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: false, + is_new_best: true, + }) + ); + + assert_eq!(self.client.chain_info().best_number, 1); + } + + /// Database path for the current context. + pub fn path(&self) -> &Path { + self.db_guard.path() + } } diff --git a/bin/node/testing/src/client.rs b/bin/node/testing/src/client.rs index 963bac7041..afb8fa5052 100644 --- a/bin/node/testing/src/client.rs +++ b/bin/node/testing/src/client.rs @@ -29,10 +29,10 @@ pub type Backend = sc_client_db::Backend; /// Test client type. pub type Client = sc_client::Client< - Backend, - sc_client::LocalCallExecutor, - node_primitives::Block, - node_runtime::RuntimeApi, + Backend, + sc_client::LocalCallExecutor, + node_primitives::Block, + node_runtime::RuntimeApi, >; /// Transaction for node-runtime. @@ -41,37 +41,39 @@ pub type Transaction = sc_client_api::backend::TransactionFor Storage { - crate::genesis::config(self.support_changes_trie, None).build_storage().unwrap() - } + fn genesis_storage(&self) -> Storage { + crate::genesis::config(self.support_changes_trie, None) + .build_storage() + .unwrap() + } } /// A `test-runtime` extensions to `TestClientBuilder`. pub trait TestClientBuilderExt: Sized { - /// Create test client builder. - fn new() -> Self; + /// Create test client builder. + fn new() -> Self; - /// Build the test client. - fn build(self) -> Client; + /// Build the test client. + fn build(self) -> Client; } -impl TestClientBuilderExt for substrate_test_client::TestClientBuilder< - node_primitives::Block, - sc_client::LocalCallExecutor, - Backend, - GenesisParameters, -> { - fn new() -> Self{ - Self::default() - } - - fn build(self) -> Client { - self.build_with_native_executor(None).0 - } +impl TestClientBuilderExt + for substrate_test_client::TestClientBuilder< + node_primitives::Block, + sc_client::LocalCallExecutor, + Backend, + GenesisParameters, + > +{ + fn new() -> Self { + Self::default() + } + + fn build(self) -> Client { + self.build_with_native_executor(None).0 + } } - - diff --git a/bin/node/testing/src/genesis.rs b/bin/node/testing/src/genesis.rs index 8a57010770..196a456c8f 100644 --- a/bin/node/testing/src/genesis.rs +++ b/bin/node/testing/src/genesis.rs @@ -17,106 +17,126 @@ //! Genesis Configuration. use crate::keyring::*; -use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; +use node_runtime::constants::currency::*; use node_runtime::{ - GenesisConfig, BalancesConfig, SessionConfig, StakingConfig, SystemConfig, - GrandpaConfig, IndicesConfig, ContractsConfig, SocietyConfig, WASM_BINARY, - AccountId, + AccountId, BalancesConfig, ContractsConfig, GenesisConfig, GrandpaConfig, IndicesConfig, + SessionConfig, SocietyConfig, StakingConfig, SystemConfig, WASM_BINARY, }; -use node_runtime::constants::currency::*; use sp_core::ChangesTrieConfiguration; +use sp_keyring::{Ed25519Keyring, Sr25519Keyring}; use sp_runtime::Perbill; /// Create genesis runtime configuration for tests. pub fn config(support_changes_trie: bool, code: Option<&[u8]>) -> GenesisConfig { - config_endowed(support_changes_trie, code, Default::default()) + config_endowed(support_changes_trie, code, Default::default()) } /// Create genesis runtime configuration for tests with some extra /// endowed accounts. pub fn config_endowed( - support_changes_trie: bool, - code: Option<&[u8]>, - extra_endowed: Vec, + support_changes_trie: bool, + code: Option<&[u8]>, + extra_endowed: Vec, ) -> GenesisConfig { + let mut endowed = vec![ + (alice(), 111 * DOLLARS), + (bob(), 100 * DOLLARS), + (charlie(), 100_000_000 * DOLLARS), + (dave(), 111 * DOLLARS), + (eve(), 101 * DOLLARS), + (ferdie(), 100 * DOLLARS), + ]; - let mut endowed = vec![ - (alice(), 111 * DOLLARS), - (bob(), 100 * DOLLARS), - (charlie(), 100_000_000 * DOLLARS), - (dave(), 111 * DOLLARS), - (eve(), 101 * DOLLARS), - (ferdie(), 100 * DOLLARS), - ]; - - endowed.extend( - extra_endowed.into_iter().map(|endowed| (endowed, 100*DOLLARS)) - ); + endowed.extend( + extra_endowed + .into_iter() + .map(|endowed| (endowed, 100 * DOLLARS)), + ); - GenesisConfig { - frame_system: Some(SystemConfig { - changes_trie_config: if support_changes_trie { Some(ChangesTrieConfiguration { - digest_interval: 2, - digest_levels: 2, - }) } else { None }, - code: code.map(|x| x.to_vec()).unwrap_or_else(|| WASM_BINARY.to_vec()), - }), - pallet_indices: Some(IndicesConfig { - indices: vec![], - }), - pallet_balances: Some(BalancesConfig { - balances: endowed, - }), - pallet_session: Some(SessionConfig { - keys: vec![ - (dave(), alice(), to_session_keys( - &Ed25519Keyring::Alice, - &Sr25519Keyring::Alice, - )), - (eve(), bob(), to_session_keys( - &Ed25519Keyring::Bob, - &Sr25519Keyring::Bob, - )), - (ferdie(), charlie(), to_session_keys( - &Ed25519Keyring::Charlie, - &Sr25519Keyring::Charlie, - )), - ] - }), - pallet_staking: Some(StakingConfig { - stakers: vec![ - (dave(), alice(), 111 * DOLLARS, pallet_staking::StakerStatus::Validator), - (eve(), bob(), 100 * DOLLARS, pallet_staking::StakerStatus::Validator), - (ferdie(), charlie(), 100 * DOLLARS, pallet_staking::StakerStatus::Validator) - ], - validator_count: 3, - minimum_validator_count: 0, - slash_reward_fraction: Perbill::from_percent(10), - invulnerables: vec![alice(), bob(), charlie()], - .. Default::default() - }), - pallet_contracts: Some(ContractsConfig { - current_schedule: Default::default(), - gas_price: 1 * MILLICENTS, - }), - pallet_babe: Some(Default::default()), - pallet_grandpa: Some(GrandpaConfig { - authorities: vec![], - }), - pallet_im_online: Some(Default::default()), - pallet_authority_discovery: Some(Default::default()), - pallet_democracy: Some(Default::default()), - pallet_collective_Instance1: Some(Default::default()), - pallet_collective_Instance2: Some(Default::default()), - pallet_membership_Instance1: Some(Default::default()), - pallet_elections_phragmen: Some(Default::default()), - pallet_sudo: Some(Default::default()), - pallet_treasury: Some(Default::default()), - pallet_society: Some(SocietyConfig { - members: vec![alice(), bob()], - pot: 0, - max_members: 999, - }), - pallet_vesting: Some(Default::default()), - } + GenesisConfig { + frame_system: Some(SystemConfig { + changes_trie_config: if support_changes_trie { + Some(ChangesTrieConfiguration { + digest_interval: 2, + digest_levels: 2, + }) + } else { + None + }, + code: code + .map(|x| x.to_vec()) + .unwrap_or_else(|| WASM_BINARY.to_vec()), + }), + pallet_indices: Some(IndicesConfig { indices: vec![] }), + pallet_balances: Some(BalancesConfig { balances: endowed }), + pallet_session: Some(SessionConfig { + keys: vec![ + ( + dave(), + alice(), + to_session_keys(&Ed25519Keyring::Alice, &Sr25519Keyring::Alice), + ), + ( + eve(), + bob(), + to_session_keys(&Ed25519Keyring::Bob, &Sr25519Keyring::Bob), + ), + ( + ferdie(), + charlie(), + to_session_keys(&Ed25519Keyring::Charlie, &Sr25519Keyring::Charlie), + ), + ], + }), + pallet_staking: Some(StakingConfig { + stakers: vec![ + ( + dave(), + alice(), + 111 * DOLLARS, + pallet_staking::StakerStatus::Validator, + ), + ( + eve(), + bob(), + 100 * DOLLARS, + pallet_staking::StakerStatus::Validator, + ), + ( + ferdie(), + charlie(), + 100 * DOLLARS, + pallet_staking::StakerStatus::Validator, + ), + ], + validator_count: 3, + minimum_validator_count: 0, + slash_reward_fraction: Perbill::from_percent(10), + invulnerables: vec![alice(), bob(), charlie()], + ..Default::default() + }), + pallet_contracts: Some(ContractsConfig { + current_schedule: Default::default(), + gas_price: 1 * MILLICENTS, + }), + pallet_babe: Some(Default::default()), + pallet_grandpa: Some(GrandpaConfig { + authorities: vec![], + }), + pallet_im_online: Some(Default::default()), + pallet_authority_discovery: Some(Default::default()), + pallet_democracy: Some(Default::default()), + pallet_collective_Instance1: Some(Default::default()), + pallet_collective_Instance2: Some(Default::default()), + pallet_membership_Instance1: Some(Default::default()), + pallet_elections_phragmen: Some(Default::default()), + pallet_sudo: Some(Default::default()), + pallet_treasury: Some(Default::default()), + pallet_society: Some(SocietyConfig { + members: vec![alice(), bob()], + pot: 0, + max_members: 999, + }), + pallet_vesting: Some(Default::default()), + } } diff --git a/bin/node/testing/src/keyring.rs b/bin/node/testing/src/keyring.rs index 6b0d06875d..b470f8fee4 100644 --- a/bin/node/testing/src/keyring.rs +++ b/bin/node/testing/src/keyring.rs @@ -16,89 +16,101 @@ //! Test accounts. -use sp_keyring::{AccountKeyring, Sr25519Keyring, Ed25519Keyring}; +use codec::Encode; use node_primitives::{AccountId, Balance, Index}; -use node_runtime::{CheckedExtrinsic, UncheckedExtrinsic, SessionKeys, SignedExtra}; +use node_runtime::{CheckedExtrinsic, SessionKeys, SignedExtra, UncheckedExtrinsic}; +use sp_keyring::{AccountKeyring, Ed25519Keyring, Sr25519Keyring}; use sp_runtime::generic::Era; -use codec::Encode; /// Alice's account id. pub fn alice() -> AccountId { - AccountKeyring::Alice.into() + AccountKeyring::Alice.into() } /// Bob's account id. pub fn bob() -> AccountId { - AccountKeyring::Bob.into() + AccountKeyring::Bob.into() } /// Charlie's account id. pub fn charlie() -> AccountId { - AccountKeyring::Charlie.into() + AccountKeyring::Charlie.into() } /// Dave's account id. pub fn dave() -> AccountId { - AccountKeyring::Dave.into() + AccountKeyring::Dave.into() } /// Eve's account id. pub fn eve() -> AccountId { - AccountKeyring::Eve.into() + AccountKeyring::Eve.into() } /// Ferdie's account id. pub fn ferdie() -> AccountId { - AccountKeyring::Ferdie.into() + AccountKeyring::Ferdie.into() } /// Convert keyrings into `SessionKeys`. pub fn to_session_keys( - ed25519_keyring: &Ed25519Keyring, - sr25519_keyring: &Sr25519Keyring, + ed25519_keyring: &Ed25519Keyring, + sr25519_keyring: &Sr25519Keyring, ) -> SessionKeys { - SessionKeys { - grandpa: ed25519_keyring.to_owned().public().into(), - babe: sr25519_keyring.to_owned().public().into(), - im_online: sr25519_keyring.to_owned().public().into(), - authority_discovery: sr25519_keyring.to_owned().public().into(), - } + SessionKeys { + grandpa: ed25519_keyring.to_owned().public().into(), + babe: sr25519_keyring.to_owned().public().into(), + im_online: sr25519_keyring.to_owned().public().into(), + authority_discovery: sr25519_keyring.to_owned().public().into(), + } } /// Returns transaction extra. pub fn signed_extra(nonce: Index, extra_fee: Balance) -> SignedExtra { - ( - frame_system::CheckVersion::new(), - frame_system::CheckGenesis::new(), - frame_system::CheckEra::from(Era::mortal(256, 0)), - frame_system::CheckNonce::from(nonce), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(extra_fee), - Default::default(), - ) + ( + frame_system::CheckVersion::new(), + frame_system::CheckGenesis::new(), + frame_system::CheckEra::from(Era::mortal(256, 0)), + frame_system::CheckNonce::from(nonce), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(extra_fee), + Default::default(), + ) } /// Sign given `CheckedExtrinsic`. pub fn sign(xt: CheckedExtrinsic, version: u32, genesis_hash: [u8; 32]) -> UncheckedExtrinsic { - match xt.signed { - Some((signed, extra)) => { - let payload = (xt.function, extra.clone(), version, genesis_hash, genesis_hash); - let key = AccountKeyring::from_account_id(&signed).unwrap(); - let signature = payload.using_encoded(|b| { - if b.len() > 256 { - key.sign(&sp_io::hashing::blake2_256(b)) - } else { - key.sign(b) - } - }).into(); - UncheckedExtrinsic { - signature: Some((pallet_indices::address::Address::Id(signed), signature, extra)), - function: payload.0, - } - } - None => UncheckedExtrinsic { - signature: None, - function: xt.function, - }, - } + match xt.signed { + Some((signed, extra)) => { + let payload = ( + xt.function, + extra.clone(), + version, + genesis_hash, + genesis_hash, + ); + let key = AccountKeyring::from_account_id(&signed).unwrap(); + let signature = payload + .using_encoded(|b| { + if b.len() > 256 { + key.sign(&sp_io::hashing::blake2_256(b)) + } else { + key.sign(b) + } + }) + .into(); + UncheckedExtrinsic { + signature: Some(( + pallet_indices::address::Address::Id(signed), + signature, + extra, + )), + function: payload.0, + } + } + None => UncheckedExtrinsic { + signature: None, + function: xt.function, + }, + } } diff --git a/bin/node/testing/src/lib.rs b/bin/node/testing/src/lib.rs index 6a06d31801..6f36d42f82 100644 --- a/bin/node/testing/src/lib.rs +++ b/bin/node/testing/src/lib.rs @@ -18,7 +18,7 @@ #![warn(missing_docs)] +pub mod bench; pub mod client; pub mod genesis; pub mod keyring; -pub mod bench; diff --git a/bin/node/transaction-factory/src/lib.rs b/bin/node/transaction-factory/src/lib.rs index 44cb178be1..8290bffa59 100644 --- a/bin/node/transaction-factory/src/lib.rs +++ b/bin/node/transaction-factory/src/lib.rs @@ -19,149 +19,163 @@ //! //! The factory currently only works on an empty database! -use std::collections::HashMap; -use std::sync::Arc; use std::cmp::PartialOrd; +use std::collections::HashMap; use std::fmt::Display; +use std::sync::Arc; use log::info; -use sp_block_builder::BlockBuilder; +use codec::{Decode, Encode}; use sc_block_builder::BlockBuilderProvider; -use sp_api::{ProvideRuntimeApi, ApiExt, CallApiAt, TransactionFor}; -use sp_consensus::{ - BlockOrigin, BlockImportParams, InherentData, - ForkChoiceStrategy, SelectChain -}; +use sp_api::{ApiExt, CallApiAt, ProvideRuntimeApi, TransactionFor}; +use sp_block_builder::BlockBuilder; +use sp_blockchain::HeaderBackend; use sp_consensus::block_import::BlockImport; -use codec::{Decode, Encode}; +use sp_consensus::{BlockImportParams, BlockOrigin, ForkChoiceStrategy, InherentData, SelectChain}; use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, AtLeast32Bit, One, Zero, -}; -use sp_blockchain::HeaderBackend; +use sp_runtime::traits::{AtLeast32Bit, Block as BlockT, Header as HeaderT, One, Zero}; pub trait RuntimeAdapter { - type AccountId: Display; - type Balance: Display + AtLeast32Bit + From; - type Block: BlockT; - type Index: Copy; - type Number: Display + PartialOrd + AtLeast32Bit + Zero + One; - type Phase: Copy; - type Secret; - - fn new(blocks: u32, transactions: u32) -> Self; - - fn blocks(&self) -> u32; - fn transactions(&self) -> u32; - - fn block_number(&self) -> u32; - fn set_block_number(&mut self, value: u32); - - fn transfer_extrinsic( - &mut self, - sender: &Self::AccountId, - key: &Self::Secret, - destination: &Self::AccountId, - amount: &Self::Balance, - version: u32, - genesis_hash: &::Hash, - prior_block_hash: &::Hash, - ) -> ::Extrinsic; - - fn inherent_extrinsics(&self) -> InherentData; - - fn minimum_balance() -> Self::Balance; - fn master_account_id() -> Self::AccountId; - fn master_account_secret() -> Self::Secret; - - fn gen_random_account_id(seed: u32) -> Self::AccountId; - fn gen_random_account_secret(seed: u32) -> Self::Secret; + type AccountId: Display; + type Balance: Display + AtLeast32Bit + From; + type Block: BlockT; + type Index: Copy; + type Number: Display + PartialOrd + AtLeast32Bit + Zero + One; + type Phase: Copy; + type Secret; + + fn new(blocks: u32, transactions: u32) -> Self; + + fn blocks(&self) -> u32; + fn transactions(&self) -> u32; + + fn block_number(&self) -> u32; + fn set_block_number(&mut self, value: u32); + + fn transfer_extrinsic( + &mut self, + sender: &Self::AccountId, + key: &Self::Secret, + destination: &Self::AccountId, + amount: &Self::Balance, + version: u32, + genesis_hash: &::Hash, + prior_block_hash: &::Hash, + ) -> ::Extrinsic; + + fn inherent_extrinsics(&self) -> InherentData; + + fn minimum_balance() -> Self::Balance; + fn master_account_id() -> Self::AccountId; + fn master_account_secret() -> Self::Secret; + + fn gen_random_account_id(seed: u32) -> Self::AccountId; + fn gen_random_account_secret(seed: u32) -> Self::Secret; } /// Manufactures transactions. The exact amount depends on `num` and `rounds`. pub fn factory( - mut factory_state: RA, - client: &Arc, - select_chain: &Sc, + mut factory_state: RA, + client: &Arc, + select_chain: &Sc, ) -> sc_cli::Result<()> - where - Backend: sc_client_api::backend::Backend + Send, - Block: BlockT, - Client: BlockBuilderProvider + CallApiAt - + ProvideRuntimeApi + HeaderBackend, - Client::Api: BlockBuilder + ApiExt, - Sc: SelectChain, - RA: RuntimeAdapter, - Block::Hash: From, - for<'a> &'a Client: BlockImport>, +where + Backend: sc_client_api::backend::Backend + Send, + Block: BlockT, + Client: BlockBuilderProvider + + CallApiAt + + ProvideRuntimeApi + + HeaderBackend, + Client::Api: BlockBuilder + + ApiExt, + Sc: SelectChain, + RA: RuntimeAdapter, + Block::Hash: From, + for<'a> &'a Client: BlockImport>, { - let best_header: Result<::Header, sc_cli::Error> = - select_chain.best_chain().map_err(|e| format!("{:?}", e).into()); - let mut best_hash = best_header?.hash(); - let mut best_block_id = BlockId::::hash(best_hash); - let version = client.runtime_version_at(&best_block_id)?.spec_version; - let genesis_hash = client.hash(Zero::zero())? - .expect("Genesis block always exists; qed").into(); - - while factory_state.block_number() < factory_state.blocks() { - let from = (RA::master_account_id(), RA::master_account_secret()); - let amount = RA::minimum_balance(); - - let inherents = RA::inherent_extrinsics(&factory_state); - let inherents = client.runtime_api().inherent_extrinsics(&best_block_id, inherents) - .expect("Failed to create inherent extrinsics"); - - let tx_per_block = factory_state.transactions(); - - let mut block = client.new_block(Default::default()).expect("Failed to create new block"); - - for tx_num in 0..tx_per_block { - let seed = tx_num * (factory_state.block_number() + 1); - let to = RA::gen_random_account_id(seed); - - let transfer = factory_state.transfer_extrinsic( - &from.0, - &from.1, - &to, - &amount, - version, - &genesis_hash, - &best_hash, - ); - - info!("Pushing transfer {}/{} to {} into block.", tx_num + 1, tx_per_block, to); - - block.push( - Decode::decode(&mut &transfer.encode()[..]) - .expect("Failed to decode transfer extrinsic") - ).expect("Failed to push transfer extrinsic into block"); - } - - for inherent in inherents { - block.push(inherent).expect("Failed ..."); - } - - let block = block.build().expect("Failed to bake block").block; - - factory_state.set_block_number(factory_state.block_number() + 1); - - info!( - "Created block {} with hash {}.", - factory_state.block_number(), - best_hash, - ); - - best_hash = block.header().hash(); - best_block_id = BlockId::::hash(best_hash); - - let mut import = BlockImportParams::new(BlockOrigin::File, block.header().clone()); - import.body = Some(block.extrinsics().to_vec()); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - (&**client).import_block(import, HashMap::new()).expect("Failed to import block"); - - info!("Imported block at {}", factory_state.block_number()); - } - - Ok(()) + let best_header: Result<::Header, sc_cli::Error> = select_chain + .best_chain() + .map_err(|e| format!("{:?}", e).into()); + let mut best_hash = best_header?.hash(); + let mut best_block_id = BlockId::::hash(best_hash); + let version = client.runtime_version_at(&best_block_id)?.spec_version; + let genesis_hash = client + .hash(Zero::zero())? + .expect("Genesis block always exists; qed") + .into(); + + while factory_state.block_number() < factory_state.blocks() { + let from = (RA::master_account_id(), RA::master_account_secret()); + let amount = RA::minimum_balance(); + + let inherents = RA::inherent_extrinsics(&factory_state); + let inherents = client + .runtime_api() + .inherent_extrinsics(&best_block_id, inherents) + .expect("Failed to create inherent extrinsics"); + + let tx_per_block = factory_state.transactions(); + + let mut block = client + .new_block(Default::default()) + .expect("Failed to create new block"); + + for tx_num in 0..tx_per_block { + let seed = tx_num * (factory_state.block_number() + 1); + let to = RA::gen_random_account_id(seed); + + let transfer = factory_state.transfer_extrinsic( + &from.0, + &from.1, + &to, + &amount, + version, + &genesis_hash, + &best_hash, + ); + + info!( + "Pushing transfer {}/{} to {} into block.", + tx_num + 1, + tx_per_block, + to + ); + + block + .push( + Decode::decode(&mut &transfer.encode()[..]) + .expect("Failed to decode transfer extrinsic"), + ) + .expect("Failed to push transfer extrinsic into block"); + } + + for inherent in inherents { + block.push(inherent).expect("Failed ..."); + } + + let block = block.build().expect("Failed to bake block").block; + + factory_state.set_block_number(factory_state.block_number() + 1); + + info!( + "Created block {} with hash {}.", + factory_state.block_number(), + best_hash, + ); + + best_hash = block.header().hash(); + best_block_id = BlockId::::hash(best_hash); + + let mut import = BlockImportParams::new(BlockOrigin::File, block.header().clone()); + import.body = Some(block.extrinsics().to_vec()); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + (&**client) + .import_block(import, HashMap::new()) + .expect("Failed to import block"); + + info!("Imported block at {}", factory_state.block_number()); + } + + Ok(()) } diff --git a/bin/utils/chain-spec-builder/build.rs b/bin/utils/chain-spec-builder/build.rs index 513cc234d4..4b546b4a43 100644 --- a/bin/utils/chain-spec-builder/build.rs +++ b/bin/utils/chain-spec-builder/build.rs @@ -17,7 +17,7 @@ use std::env; fn main() { - if let Ok(profile) = env::var("PROFILE") { - println!("cargo:rustc-cfg=build_type=\"{}\"", profile); - } + if let Ok(profile) = env::var("PROFILE") { + println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + } } diff --git a/bin/utils/chain-spec-builder/src/main.rs b/bin/utils/chain-spec-builder/src/main.rs index 1440187014..22f83ea11c 100644 --- a/bin/utils/chain-spec-builder/src/main.rs +++ b/bin/utils/chain-spec-builder/src/main.rs @@ -14,261 +14,240 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{fs, path::{Path, PathBuf}}; +use std::{ + fs, + path::{Path, PathBuf}, +}; use ansi_term::Style; -use rand::{Rng, distributions::Alphanumeric, rngs::OsRng}; +use rand::{distributions::Alphanumeric, rngs::OsRng, Rng}; use structopt::StructOpt; -use sc_keystore::{Store as Keystore}; use node_cli::chain_spec::{self, AccountId}; -use sp_core::{sr25519, crypto::{Public, Ss58Codec}, traits::BareCryptoStore}; +use sc_keystore::Store as Keystore; +use sp_core::{ + crypto::{Public, Ss58Codec}, + sr25519, + traits::BareCryptoStore, +}; /// A utility to easily create a testnet chain spec definition with a given set /// of authorities and endowed accounts and/or generate random accounts. #[derive(StructOpt)] #[structopt(rename_all = "kebab-case")] enum ChainSpecBuilder { - /// Create a new chain spec with the given authorities, endowed and sudo - /// accounts. - New { - /// Authority key seed. - #[structopt(long, short, required = true)] - authority_seeds: Vec, - /// Endowed account address (SS58 format). - #[structopt(long, short)] - endowed_accounts: Vec, - /// Sudo account address (SS58 format). - #[structopt(long, short)] - sudo_account: String, - /// The path where the chain spec should be saved. - #[structopt(long, short, default_value = "./chain_spec.json")] - chain_spec_path: PathBuf, - }, - /// Create a new chain spec with the given number of authorities and endowed - /// accounts. Random keys will be generated as required. - Generate { - /// The number of authorities. - #[structopt(long, short)] - authorities: usize, - /// The number of endowed accounts. - #[structopt(long, short, default_value = "0")] - endowed: usize, - /// The path where the chain spec should be saved. - #[structopt(long, short, default_value = "./chain_spec.json")] - chain_spec_path: PathBuf, - /// Path to use when saving generated keystores for each authority. - /// - /// At this path, a new folder will be created for each authority's - /// keystore named `auth-$i` where `i` is the authority index, i.e. - /// `auth-0`, `auth-1`, etc. - #[structopt(long, short)] - keystore_path: Option, - }, + /// Create a new chain spec with the given authorities, endowed and sudo + /// accounts. + New { + /// Authority key seed. + #[structopt(long, short, required = true)] + authority_seeds: Vec, + /// Endowed account address (SS58 format). + #[structopt(long, short)] + endowed_accounts: Vec, + /// Sudo account address (SS58 format). + #[structopt(long, short)] + sudo_account: String, + /// The path where the chain spec should be saved. + #[structopt(long, short, default_value = "./chain_spec.json")] + chain_spec_path: PathBuf, + }, + /// Create a new chain spec with the given number of authorities and endowed + /// accounts. Random keys will be generated as required. + Generate { + /// The number of authorities. + #[structopt(long, short)] + authorities: usize, + /// The number of endowed accounts. + #[structopt(long, short, default_value = "0")] + endowed: usize, + /// The path where the chain spec should be saved. + #[structopt(long, short, default_value = "./chain_spec.json")] + chain_spec_path: PathBuf, + /// Path to use when saving generated keystores for each authority. + /// + /// At this path, a new folder will be created for each authority's + /// keystore named `auth-$i` where `i` is the authority index, i.e. + /// `auth-0`, `auth-1`, etc. + #[structopt(long, short)] + keystore_path: Option, + }, } impl ChainSpecBuilder { - /// Returns the path where the chain spec should be saved. - fn chain_spec_path(&self) -> &Path { - match self { - ChainSpecBuilder::New { chain_spec_path, .. } => - chain_spec_path.as_path(), - ChainSpecBuilder::Generate { chain_spec_path, .. } => - chain_spec_path.as_path(), - } - } + /// Returns the path where the chain spec should be saved. + fn chain_spec_path(&self) -> &Path { + match self { + ChainSpecBuilder::New { + chain_spec_path, .. + } => chain_spec_path.as_path(), + ChainSpecBuilder::Generate { + chain_spec_path, .. + } => chain_spec_path.as_path(), + } + } } fn genesis_constructor( - authority_seeds: &[String], - endowed_accounts: &[AccountId], - sudo_account: &AccountId, + authority_seeds: &[String], + endowed_accounts: &[AccountId], + sudo_account: &AccountId, ) -> chain_spec::GenesisConfig { - let authorities = authority_seeds - .iter() - .map(AsRef::as_ref) - .map(chain_spec::authority_keys_from_seed) - .collect::>(); - - let enable_println = true; - - chain_spec::testnet_genesis( - authorities, - sudo_account.clone(), - Some(endowed_accounts.to_vec()), - enable_println, - ) + let authorities = authority_seeds + .iter() + .map(AsRef::as_ref) + .map(chain_spec::authority_keys_from_seed) + .collect::>(); + + let enable_println = true; + + chain_spec::testnet_genesis( + authorities, + sudo_account.clone(), + Some(endowed_accounts.to_vec()), + enable_println, + ) } fn generate_chain_spec( - authority_seeds: Vec, - endowed_accounts: Vec, - sudo_account: String, + authority_seeds: Vec, + endowed_accounts: Vec, + sudo_account: String, ) -> Result { - let parse_account = |address: &String| { - AccountId::from_string(address) - .map_err(|err| format!("Failed to parse account address: {:?}", err)) - }; - - let endowed_accounts = endowed_accounts - .iter() - .map(parse_account) - .collect::, String>>()?; - - let sudo_account = parse_account(&sudo_account)?; - - let chain_spec = chain_spec::ChainSpec::from_genesis( - "Custom", - "custom", - sc_chain_spec::ChainType::Live, - move || genesis_constructor(&authority_seeds, &endowed_accounts, &sudo_account), - vec![], - None, - None, - None, - Default::default(), - ); - - chain_spec.as_json(false).map_err(|err| err.to_string()) + let parse_account = |address: &String| { + AccountId::from_string(address) + .map_err(|err| format!("Failed to parse account address: {:?}", err)) + }; + + let endowed_accounts = endowed_accounts + .iter() + .map(parse_account) + .collect::, String>>()?; + + let sudo_account = parse_account(&sudo_account)?; + + let chain_spec = chain_spec::ChainSpec::from_genesis( + "Custom", + "custom", + sc_chain_spec::ChainType::Live, + move || genesis_constructor(&authority_seeds, &endowed_accounts, &sudo_account), + vec![], + None, + None, + None, + Default::default(), + ); + + chain_spec.as_json(false).map_err(|err| err.to_string()) } -fn generate_authority_keys_and_store( - seeds: &[String], - keystore_path: &Path, -) -> Result<(), String> { - for (n, seed) in seeds.into_iter().enumerate() { - let keystore = Keystore::open( - keystore_path.join(format!("auth-{}", n)), - None, - ).map_err(|err| err.to_string())?; - - let (_, _, grandpa, babe, im_online, authority_discovery) = - chain_spec::authority_keys_from_seed(seed); - - let insert_key = |key_type, public| { - keystore.write().insert_unknown( - key_type, - &format!("//{}", seed), - public, - ).map_err(|_| format!("Failed to insert key: {}", grandpa)) - }; - - insert_key( - sp_core::crypto::key_types::BABE, - babe.as_slice(), - )?; - - insert_key( - sp_core::crypto::key_types::GRANDPA, - grandpa.as_slice(), - )?; - - insert_key( - sp_core::crypto::key_types::IM_ONLINE, - im_online.as_slice(), - )?; - - insert_key( - sp_core::crypto::key_types::AUTHORITY_DISCOVERY, - authority_discovery.as_slice(), - )?; - } - - Ok(()) +fn generate_authority_keys_and_store(seeds: &[String], keystore_path: &Path) -> Result<(), String> { + for (n, seed) in seeds.into_iter().enumerate() { + let keystore = Keystore::open(keystore_path.join(format!("auth-{}", n)), None) + .map_err(|err| err.to_string())?; + + let (_, _, grandpa, babe, im_online, authority_discovery) = + chain_spec::authority_keys_from_seed(seed); + + let insert_key = |key_type, public| { + keystore + .write() + .insert_unknown(key_type, &format!("//{}", seed), public) + .map_err(|_| format!("Failed to insert key: {}", grandpa)) + }; + + insert_key(sp_core::crypto::key_types::BABE, babe.as_slice())?; + + insert_key(sp_core::crypto::key_types::GRANDPA, grandpa.as_slice())?; + + insert_key(sp_core::crypto::key_types::IM_ONLINE, im_online.as_slice())?; + + insert_key( + sp_core::crypto::key_types::AUTHORITY_DISCOVERY, + authority_discovery.as_slice(), + )?; + } + + Ok(()) } -fn print_seeds( - authority_seeds: &[String], - endowed_seeds: &[String], - sudo_seed: &str, -) { - let header = Style::new().bold().underline(); - let entry = Style::new().bold(); - - println!("{}", header.paint("Authority seeds")); - - for (n, seed) in authority_seeds.iter().enumerate() { - println!("{} //{}", - entry.paint(format!("auth-{}:", n)), - seed, - ); - } - - println!(); - - if !endowed_seeds.is_empty() { - println!("{}", header.paint("Endowed seeds")); - for (n, seed) in endowed_seeds.iter().enumerate() { - println!("{} //{}", - entry.paint(format!("endowed-{}:", n)), - seed, - ); - } - - println!(); - } - - println!("{}", header.paint("Sudo seed")); - println!("//{}", sudo_seed); +fn print_seeds(authority_seeds: &[String], endowed_seeds: &[String], sudo_seed: &str) { + let header = Style::new().bold().underline(); + let entry = Style::new().bold(); + + println!("{}", header.paint("Authority seeds")); + + for (n, seed) in authority_seeds.iter().enumerate() { + println!("{} //{}", entry.paint(format!("auth-{}:", n)), seed,); + } + + println!(); + + if !endowed_seeds.is_empty() { + println!("{}", header.paint("Endowed seeds")); + for (n, seed) in endowed_seeds.iter().enumerate() { + println!("{} //{}", entry.paint(format!("endowed-{}:", n)), seed,); + } + + println!(); + } + + println!("{}", header.paint("Sudo seed")); + println!("//{}", sudo_seed); } fn main() -> Result<(), String> { - #[cfg(build_type="debug")] - println!( + #[cfg(build_type = "debug")] + println!( "The chain spec builder builds a chain specification that includes a Substrate runtime compiled as WASM. To \ ensure proper functioning of the included runtime compile (or run) the chain spec builder binary in \ `--release` mode.\n", ); - let builder = ChainSpecBuilder::from_args(); - let chain_spec_path = builder.chain_spec_path().to_path_buf(); - - let (authority_seeds, endowed_accounts, sudo_account) = match builder { - ChainSpecBuilder::Generate { authorities, endowed, keystore_path, .. } => { - let authorities = authorities.max(1); - let rand_str = || -> String { - OsRng.sample_iter(&Alphanumeric) - .take(32) - .collect() - }; - - let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); - let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); - let sudo_seed = rand_str(); - - print_seeds( - &authority_seeds, - &endowed_seeds, - &sudo_seed, - ); - - if let Some(keystore_path) = keystore_path { - generate_authority_keys_and_store( - &authority_seeds, - &keystore_path, - )?; - } - - let endowed_accounts = endowed_seeds.iter().map(|seed| { - chain_spec::get_account_id_from_seed::(seed) - .to_ss58check() - }).collect(); - - let sudo_account = chain_spec::get_account_id_from_seed::(&sudo_seed) - .to_ss58check(); - - (authority_seeds, endowed_accounts, sudo_account) - }, - ChainSpecBuilder::New { authority_seeds, endowed_accounts, sudo_account, .. } => { - (authority_seeds, endowed_accounts, sudo_account) - }, - }; - - let json = generate_chain_spec( - authority_seeds, - endowed_accounts, - sudo_account, - )?; - - fs::write(chain_spec_path, json).map_err(|err| err.to_string()) + let builder = ChainSpecBuilder::from_args(); + let chain_spec_path = builder.chain_spec_path().to_path_buf(); + + let (authority_seeds, endowed_accounts, sudo_account) = match builder { + ChainSpecBuilder::Generate { + authorities, + endowed, + keystore_path, + .. + } => { + let authorities = authorities.max(1); + let rand_str = || -> String { OsRng.sample_iter(&Alphanumeric).take(32).collect() }; + + let authority_seeds = (0..authorities).map(|_| rand_str()).collect::>(); + let endowed_seeds = (0..endowed).map(|_| rand_str()).collect::>(); + let sudo_seed = rand_str(); + + print_seeds(&authority_seeds, &endowed_seeds, &sudo_seed); + + if let Some(keystore_path) = keystore_path { + generate_authority_keys_and_store(&authority_seeds, &keystore_path)?; + } + + let endowed_accounts = endowed_seeds + .iter() + .map(|seed| { + chain_spec::get_account_id_from_seed::(seed).to_ss58check() + }) + .collect(); + + let sudo_account = + chain_spec::get_account_id_from_seed::(&sudo_seed).to_ss58check(); + + (authority_seeds, endowed_accounts, sudo_account) + } + ChainSpecBuilder::New { + authority_seeds, + endowed_accounts, + sudo_account, + .. + } => (authority_seeds, endowed_accounts, sudo_account), + }; + + let json = generate_chain_spec(authority_seeds, endowed_accounts, sudo_account)?; + + fs::write(chain_spec_path, json).map_err(|err| err.to_string()) } diff --git a/bin/utils/subkey/src/main.rs b/bin/utils/subkey/src/main.rs index 08a46f1190..02a1028f54 100644 --- a/bin/utils/subkey/src/main.rs +++ b/bin/utils/subkey/src/main.rs @@ -24,183 +24,208 @@ use codec::{Decode, Encode}; use hex_literal::hex; use itertools::Itertools; use libp2p::identity::{ed25519 as libp2p_ed25519, PublicKey}; -use node_primitives::{Balance, Hash, Index, AccountId, Signature}; +use node_primitives::{AccountId, Balance, Hash, Index, Signature}; use node_runtime::{BalancesCall, Call, Runtime, SignedPayload, UncheckedExtrinsic, VERSION}; use serde_json::json; use sp_core::{ - crypto::{set_default_ss58_version, Ss58AddressFormat, Ss58Codec}, - ed25519, sr25519, ecdsa, Pair, Public, H256, hexdisplay::HexDisplay, + crypto::{set_default_ss58_version, Ss58AddressFormat, Ss58Codec}, + ecdsa, ed25519, + hexdisplay::HexDisplay, + sr25519, Pair, Public, H256, +}; +use sp_runtime::{ + generic::Era, + traits::{IdentifyAccount, Verify}, }; -use sp_runtime::{traits::{IdentifyAccount, Verify}, generic::Era}; use std::{ - convert::{TryInto, TryFrom}, io::{stdin, Read}, str::FromStr, path::PathBuf, fs, fmt, + convert::{TryFrom, TryInto}, + fmt, fs, + io::{stdin, Read}, + path::PathBuf, + str::FromStr, }; mod rpc; mod vanity; enum OutputType { - Json, - Text, + Json, + Text, } impl<'a> TryFrom<&'a str> for OutputType { - type Error = (); - - fn try_from(s: &'a str) -> Result { - match s { - "json" => Ok(OutputType::Json), - "text" => Ok(OutputType::Text), - _ => Err(()), - } - } + type Error = (); + fn try_from(s: &'a str) -> Result { + match s { + "json" => Ok(OutputType::Json), + "text" => Ok(OutputType::Text), + _ => Err(()), + } + } } trait Crypto: Sized { - type Pair: Pair; - type Public: Public + Ss58Codec + AsRef<[u8]> + std::hash::Hash; - fn pair_from_suri(suri: &str, password: Option<&str>) -> Self::Pair { - Self::Pair::from_string(suri, password).expect("Invalid phrase") - } - fn ss58_from_pair(pair: &Self::Pair) -> String where - ::Public: PublicT, - { - pair.public().into_runtime().into_account().to_ss58check() - } - fn public_from_pair(pair: &Self::Pair) -> Self::Public { - pair.public() - } - fn print_from_uri( - uri: &str, - password: Option<&str>, - network_override: Option, - output: OutputType, - ) where - ::Public: PublicT, - { - let v = network_override.unwrap_or_default(); - if let Ok((pair, seed)) = Self::Pair::from_phrase(uri, password) { - let public_key = Self::public_from_pair(&pair); - - match output { - OutputType::Json => { - let json = json!({ - "secretPhrase": uri, - "networkId": String::from(v), - "secretSeed": format_seed::(seed), - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key), - "ss58Address": Self::ss58_from_pair(&pair), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Secret phrase `{}` is account:\n \ + type Pair: Pair; + type Public: Public + Ss58Codec + AsRef<[u8]> + std::hash::Hash; + fn pair_from_suri(suri: &str, password: Option<&str>) -> Self::Pair { + Self::Pair::from_string(suri, password).expect("Invalid phrase") + } + fn ss58_from_pair(pair: &Self::Pair) -> String + where + ::Public: PublicT, + { + pair.public().into_runtime().into_account().to_ss58check() + } + fn public_from_pair(pair: &Self::Pair) -> Self::Public { + pair.public() + } + fn print_from_uri( + uri: &str, + password: Option<&str>, + network_override: Option, + output: OutputType, + ) where + ::Public: PublicT, + { + let v = network_override.unwrap_or_default(); + if let Ok((pair, seed)) = Self::Pair::from_phrase(uri, password) { + let public_key = Self::public_from_pair(&pair); + + match output { + OutputType::Json => { + let json = json!({ + "secretPhrase": uri, + "networkId": String::from(v), + "secretSeed": format_seed::(seed), + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key), + "ss58Address": Self::ss58_from_pair(&pair), + }); + println!( + "{}", + serde_json::to_string_pretty(&json).expect("Json pretty print failed") + ); + } + OutputType::Text => { + println!( + "Secret phrase `{}` is account:\n \ Network ID/version: {}\n \ Secret seed: {}\n \ Public key (hex): {}\n \ Account ID: {}\n \ SS58 Address: {}", - uri, - String::from(v), - format_seed::(seed), - format_public_key::(public_key.clone()), - format_account_id::(public_key), - Self::ss58_from_pair(&pair), - ); - }, - } - } else if let Ok((pair, seed)) = Self::Pair::from_string_with_seed(uri, password) { - let public_key = Self::public_from_pair(&pair); - - match output { - OutputType::Json => { - let json = json!({ - "secretKeyUri": uri, - "networkId": String::from(v), - "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key), - "ss58Address": Self::ss58_from_pair(&pair), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Secret Key URI `{}` is account:\n \ + uri, + String::from(v), + format_seed::(seed), + format_public_key::(public_key.clone()), + format_account_id::(public_key), + Self::ss58_from_pair(&pair), + ); + } + } + } else if let Ok((pair, seed)) = Self::Pair::from_string_with_seed(uri, password) { + let public_key = Self::public_from_pair(&pair); + + match output { + OutputType::Json => { + let json = json!({ + "secretKeyUri": uri, + "networkId": String::from(v), + "secretSeed": if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key), + "ss58Address": Self::ss58_from_pair(&pair), + }); + println!( + "{}", + serde_json::to_string_pretty(&json).expect("Json pretty print failed") + ); + } + OutputType::Text => { + println!( + "Secret Key URI `{}` is account:\n \ Network ID/version: {}\n \ Secret seed: {}\n \ Public key (hex): {}\n \ Account ID: {}\n \ SS58 Address: {}", - uri, - String::from(v), - if let Some(seed) = seed { format_seed::(seed) } else { "n/a".into() }, - format_public_key::(public_key.clone()), - format_account_id::(public_key), - Self::ss58_from_pair(&pair), - ); - }, - } - } else if let Ok((public_key, v)) = - ::Public::from_string_with_version(uri) - { - let v = network_override.unwrap_or(v); - - match output { - OutputType::Json => { - let json = json!({ - "publicKeyUri": uri, - "networkId": String::from(v), - "publicKey": format_public_key::(public_key.clone()), - "accountId": format_account_id::(public_key.clone()), - "ss58Address": public_key.to_ss58check_with_version(v), - }); - println!("{}", serde_json::to_string_pretty(&json).expect("Json pretty print failed")); - }, - OutputType::Text => { - println!("Public Key URI `{}` is account:\n \ + uri, + String::from(v), + if let Some(seed) = seed { + format_seed::(seed) + } else { + "n/a".into() + }, + format_public_key::(public_key.clone()), + format_account_id::(public_key), + Self::ss58_from_pair(&pair), + ); + } + } + } else if let Ok((public_key, v)) = + ::Public::from_string_with_version(uri) + { + let v = network_override.unwrap_or(v); + + match output { + OutputType::Json => { + let json = json!({ + "publicKeyUri": uri, + "networkId": String::from(v), + "publicKey": format_public_key::(public_key.clone()), + "accountId": format_account_id::(public_key.clone()), + "ss58Address": public_key.to_ss58check_with_version(v), + }); + println!( + "{}", + serde_json::to_string_pretty(&json).expect("Json pretty print failed") + ); + } + OutputType::Text => { + println!( + "Public Key URI `{}` is account:\n \ Network ID/version: {}\n \ Public key (hex): {}\n \ Account ID: {}\n \ SS58 Address: {}", - uri, - String::from(v), - format_public_key::(public_key.clone()), - format_account_id::(public_key.clone()), - public_key.to_ss58check_with_version(v), - ); - }, - } - } else { - eprintln!("Invalid phrase/URI given"); - } - } + uri, + String::from(v), + format_public_key::(public_key.clone()), + format_account_id::(public_key.clone()), + public_key.to_ss58check_with_version(v), + ); + } + } + } else { + eprintln!("Invalid phrase/URI given"); + } + } } struct Ed25519; impl Crypto for Ed25519 { - type Pair = ed25519::Pair; - type Public = ed25519::Public; + type Pair = ed25519::Pair; + type Public = ed25519::Public; - fn pair_from_suri(suri: &str, password_override: Option<&str>) -> Self::Pair { - ed25519::Pair::from_legacy_string(suri, password_override) - } + fn pair_from_suri(suri: &str, password_override: Option<&str>) -> Self::Pair { + ed25519::Pair::from_legacy_string(suri, password_override) + } } struct Sr25519; impl Crypto for Sr25519 { - type Pair = sr25519::Pair; - type Public = sr25519::Public; + type Pair = sr25519::Pair; + type Public = sr25519::Public; } struct Ecdsa; impl Crypto for Ecdsa { - type Pair = ecdsa::Pair; - type Public = ecdsa::Public; + type Pair = ecdsa::Pair; + type Public = ecdsa::Public; } type SignatureOf = <::Pair as Pair>::Signature; @@ -209,29 +234,58 @@ type SeedOf = <::Pair as Pair>::Seed; type AccountPublic = ::Signer; trait SignatureT: AsRef<[u8]> + AsMut<[u8]> + Default { - /// Converts the signature into a runtime account signature, if possible. If not possible, bombs out. - fn into_runtime(self) -> Signature { - panic!("This cryptography isn't supported for this runtime.") - } + /// Converts the signature into a runtime account signature, if possible. If not possible, bombs out. + fn into_runtime(self) -> Signature { + panic!("This cryptography isn't supported for this runtime.") + } } trait PublicT: Sized + AsRef<[u8]> + Ss58Codec { - /// Converts the public key into a runtime account public key, if possible. If not possible, bombs out. - fn into_runtime(self) -> AccountPublic { - panic!("This cryptography isn't supported for this runtime.") - } + /// Converts the public key into a runtime account public key, if possible. If not possible, bombs out. + fn into_runtime(self) -> AccountPublic { + panic!("This cryptography isn't supported for this runtime.") + } } -impl SignatureT for sr25519::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl SignatureT for ed25519::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl SignatureT for ecdsa::Signature { fn into_runtime(self) -> Signature { self.into() } } -impl PublicT for sr25519::Public { fn into_runtime(self) -> AccountPublic { self.into() } } -impl PublicT for ed25519::Public { fn into_runtime(self) -> AccountPublic { self.into() } } -impl PublicT for ecdsa::Public { fn into_runtime(self) -> AccountPublic { self.into() } } +impl SignatureT for sr25519::Signature { + fn into_runtime(self) -> Signature { + self.into() + } +} +impl SignatureT for ed25519::Signature { + fn into_runtime(self) -> Signature { + self.into() + } +} +impl SignatureT for ecdsa::Signature { + fn into_runtime(self) -> Signature { + self.into() + } +} +impl PublicT for sr25519::Public { + fn into_runtime(self) -> AccountPublic { + self.into() + } +} +impl PublicT for ed25519::Public { + fn into_runtime(self) -> AccountPublic { + self.into() + } +} +impl PublicT for ecdsa::Public { + fn into_runtime(self) -> AccountPublic { + self.into() + } +} fn get_usage() -> String { - let networks = Ss58AddressFormat::all().iter().cloned().map(String::from).join("/"); - let default_network = String::from(Ss58AddressFormat::default()); - format!(" + let networks = Ss58AddressFormat::all() + .iter() + .cloned() + .map(String::from) + .join("/"); + let default_network = String::from(Ss58AddressFormat::default()); + format!( + " -e, --ed25519 'Use Ed25519/BIP39 cryptography' -k, --secp256k1 'Use SECP256k1/ECDSA/BIP39 cryptography' -s, --sr25519 'Use Schnorr/Ristretto x25519/BIP39 cryptography' @@ -239,16 +293,18 @@ fn get_usage() -> String { [password] -p, --password 'The password for the key' --password-interactive 'You will be prompted for the password for the key.' [output] -o, --output 'Specify an output format. One of text, json. Default is text.' - ", networks, default_network) + ", + networks, default_network + ) } fn get_app<'a, 'b>(usage: &'a str) -> App<'a, 'b> { - App::new("subkey") - .author("Parity Team ") - .about("Utility for generating and restoring with Substrate keys") - .version(env!("CARGO_PKG_VERSION")) - .args_from_usage(usage) - .subcommands(vec![ + App::new("subkey") + .author("Parity Team ") + .about("Utility for generating and restoring with Substrate keys") + .version(env!("CARGO_PKG_VERSION")) + .args_from_usage(usage) + .subcommands(vec![ SubCommand::with_name("generate") .about("Generate a random account") .args_from_usage("[words] -w, --words \ @@ -322,16 +378,16 @@ fn get_app<'a, 'b>(usage: &'a str) -> App<'a, 'b> { } fn main() -> Result<(), Error> { - let usage = get_usage(); - let matches = get_app(&usage).get_matches(); + let usage = get_usage(); + let matches = get_app(&usage).get_matches(); - if matches.is_present("ed25519") { - return execute::(matches); - } - if matches.is_present("secp256k1") { - return execute::(matches) - } - return execute::(matches) + if matches.is_present("ed25519") { + return execute::(matches); + } + if matches.is_present("secp256k1") { + return execute::(matches); + } + return execute::(matches); } /// Get `URI` from CLI or prompt the user. @@ -341,454 +397,476 @@ fn main() -> Result<(), Error> { /// If the `URI` given as CLI argument is a file, the file content is taken as `URI`. /// If no `URI` is given to the CLI, the user is prompted for it. fn get_uri(match_name: &str, matches: &ArgMatches) -> Result { - let uri = if let Some(uri) = matches.value_of(match_name) { - let file = PathBuf::from(uri); - if file.is_file() { - fs::read_to_string(uri)? - .trim_end() - .into() - } else { - uri.into() - } - } else { - rpassword::read_password_from_tty(Some("URI: "))? - }; - - Ok(uri) + let uri = if let Some(uri) = matches.value_of(match_name) { + let file = PathBuf::from(uri); + if file.is_file() { + fs::read_to_string(uri)?.trim_end().into() + } else { + uri.into() + } + } else { + rpassword::read_password_from_tty(Some("URI: "))? + }; + + Ok(uri) } #[derive(derive_more::Display, derive_more::From)] enum Error { - Static(&'static str), - Io(std::io::Error), - Formatted(String), + Static(&'static str), + Io(std::io::Error), + Formatted(String), } impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, f) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } } fn static_err(msg: &'static str) -> Result<(), Error> { - Err(Error::Static(msg)) + Err(Error::Static(msg)) } fn execute(matches: ArgMatches) -> Result<(), Error> where - SignatureOf: SignatureT, - PublicOf: PublicT, + SignatureOf: SignatureT, + PublicOf: PublicT, { - let password_interactive = matches.is_present("password-interactive"); - let password = matches.value_of("password"); - - let password = if password.is_some() && password_interactive { - return static_err("`--password` given and `--password-interactive` selected!"); - } else if password_interactive { - Some( - rpassword::read_password_from_tty(Some("Key password: "))? - ) - } else { - password.map(Into::into) - }; - let password = password.as_ref().map(String::as_str); - - let maybe_network: Option = match matches.value_of("network").map(|network| { - network - .try_into() - .map_err(|_| Error::Static("Invalid network name. See --help for available networks.")) - }) { - Some(Err(e)) => return Err(e), - Some(Ok(v)) => Some(v), - None => None, - }; - - if let Some(network) = maybe_network { - set_default_ss58_version(network); - } - - let output: OutputType = match matches.value_of("output").map(TryInto::try_into) { - Some(Err(_)) => return Err(Error::Static("Invalid output name. See --help for available outputs.")), - Some(Ok(v)) => v, - None => OutputType::Text, - }; - - match matches.subcommand() { - ("generate", Some(matches)) => { - let mnemonic = generate_mnemonic(matches)?; - C::print_from_uri(mnemonic.phrase(), password, maybe_network, output); - } - ("generate-node-key", Some(matches)) => { - let file = matches.value_of("file").ok_or(Error::Static("Output file name is required"))?; - - let keypair = libp2p_ed25519::Keypair::generate(); - let secret = keypair.secret(); - let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); - - fs::write(file, secret.as_ref())?; - - println!("{}", peer_id); - } - ("inspect", Some(matches)) => { - C::print_from_uri(&get_uri("uri", &matches)?, password, maybe_network, output); - } - ("sign", Some(matches)) => { - let suri = get_uri("suri", &matches)?; - let should_decode = matches.is_present("hex"); - - let message = read_message_from_stdin(should_decode)?; - let signature = do_sign::(&suri, message, password)?; - println!("{}", signature); - } - ("verify", Some(matches)) => { - let uri = get_uri("uri", &matches)?; - let should_decode = matches.is_present("hex"); - - let message = read_message_from_stdin(should_decode)?; - let is_valid_signature = do_verify::(matches, &uri, message)?; - if is_valid_signature { - println!("Signature verifies correctly."); - } else { - return static_err("Signature invalid."); - } - } - ("vanity", Some(matches)) => { - let desired: String = matches - .value_of("pattern") - .map(str::to_string) - .unwrap_or_default(); - let result = vanity::generate_key::(&desired)?; - let formated_seed = format_seed::(result.seed); - C::print_from_uri(&formated_seed, None, maybe_network, output); - } - ("transfer", Some(matches)) => { - let signer = read_pair::(matches.value_of("from"), password)?; - let index = read_required_parameter::(matches, "index")?; - let genesis_hash = read_genesis_hash(matches)?; - - let to: AccountId = read_account_id(matches.value_of("to")); - let amount = read_required_parameter::(matches, "amount")?; - let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); - - let extrinsic = create_extrinsic::(function, index, signer, genesis_hash); - - print_extrinsic(extrinsic); - } - ("sign-transaction", Some(matches)) => { - let signer = read_pair::(matches.value_of("suri"), password)?; - let index = read_required_parameter::(matches, "nonce")?; - let genesis_hash = read_genesis_hash(matches)?; - - let call = matches.value_of("call").expect("call is required; qed"); - let function: Call = hex::decode(&call) - .ok() - .and_then(|x| Decode::decode(&mut &x[..]).ok()) - .unwrap(); - - let extrinsic = create_extrinsic::(function, index, signer, genesis_hash); - - print_extrinsic(extrinsic); - } - ("insert", Some(matches)) => { - let suri = get_uri("suri", &matches)?; - let pair = read_pair::(Some(&suri), password)?; - let node_url = matches.value_of("node-url").unwrap_or("http://localhost:9933"); - let key_type = matches.value_of("key-type").ok_or(Error::Static("Key type id is required"))?; - - // Just checking - let _key_type_id = sp_core::crypto::KeyTypeId::try_from(key_type) - .map_err(|_| Error::Static("Cannot convert argument to keytype: argument should be 4-character string"))?; - - let rpc = rpc::RpcClient::new(node_url.to_string()); - - rpc.insert_key( - key_type.to_string(), - suri, - sp_core::Bytes(pair.public().as_ref().to_vec()), - ); - } - _ => print_usage(&matches), - } - - Ok(()) + let password_interactive = matches.is_present("password-interactive"); + let password = matches.value_of("password"); + + let password = if password.is_some() && password_interactive { + return static_err("`--password` given and `--password-interactive` selected!"); + } else if password_interactive { + Some(rpassword::read_password_from_tty(Some("Key password: "))?) + } else { + password.map(Into::into) + }; + let password = password.as_ref().map(String::as_str); + + let maybe_network: Option = + match matches.value_of("network").map(|network| { + network.try_into().map_err(|_| { + Error::Static("Invalid network name. See --help for available networks.") + }) + }) { + Some(Err(e)) => return Err(e), + Some(Ok(v)) => Some(v), + None => None, + }; + + if let Some(network) = maybe_network { + set_default_ss58_version(network); + } + + let output: OutputType = match matches.value_of("output").map(TryInto::try_into) { + Some(Err(_)) => { + return Err(Error::Static( + "Invalid output name. See --help for available outputs.", + )) + } + Some(Ok(v)) => v, + None => OutputType::Text, + }; + + match matches.subcommand() { + ("generate", Some(matches)) => { + let mnemonic = generate_mnemonic(matches)?; + C::print_from_uri(mnemonic.phrase(), password, maybe_network, output); + } + ("generate-node-key", Some(matches)) => { + let file = matches + .value_of("file") + .ok_or(Error::Static("Output file name is required"))?; + + let keypair = libp2p_ed25519::Keypair::generate(); + let secret = keypair.secret(); + let peer_id = PublicKey::Ed25519(keypair.public()).into_peer_id(); + + fs::write(file, secret.as_ref())?; + + println!("{}", peer_id); + } + ("inspect", Some(matches)) => { + C::print_from_uri(&get_uri("uri", &matches)?, password, maybe_network, output); + } + ("sign", Some(matches)) => { + let suri = get_uri("suri", &matches)?; + let should_decode = matches.is_present("hex"); + + let message = read_message_from_stdin(should_decode)?; + let signature = do_sign::(&suri, message, password)?; + println!("{}", signature); + } + ("verify", Some(matches)) => { + let uri = get_uri("uri", &matches)?; + let should_decode = matches.is_present("hex"); + + let message = read_message_from_stdin(should_decode)?; + let is_valid_signature = do_verify::(matches, &uri, message)?; + if is_valid_signature { + println!("Signature verifies correctly."); + } else { + return static_err("Signature invalid."); + } + } + ("vanity", Some(matches)) => { + let desired: String = matches + .value_of("pattern") + .map(str::to_string) + .unwrap_or_default(); + let result = vanity::generate_key::(&desired)?; + let formated_seed = format_seed::(result.seed); + C::print_from_uri(&formated_seed, None, maybe_network, output); + } + ("transfer", Some(matches)) => { + let signer = read_pair::(matches.value_of("from"), password)?; + let index = read_required_parameter::(matches, "index")?; + let genesis_hash = read_genesis_hash(matches)?; + + let to: AccountId = read_account_id(matches.value_of("to")); + let amount = read_required_parameter::(matches, "amount")?; + let function = Call::Balances(BalancesCall::transfer(to.into(), amount)); + + let extrinsic = create_extrinsic::(function, index, signer, genesis_hash); + + print_extrinsic(extrinsic); + } + ("sign-transaction", Some(matches)) => { + let signer = read_pair::(matches.value_of("suri"), password)?; + let index = read_required_parameter::(matches, "nonce")?; + let genesis_hash = read_genesis_hash(matches)?; + + let call = matches.value_of("call").expect("call is required; qed"); + let function: Call = hex::decode(&call) + .ok() + .and_then(|x| Decode::decode(&mut &x[..]).ok()) + .unwrap(); + + let extrinsic = create_extrinsic::(function, index, signer, genesis_hash); + + print_extrinsic(extrinsic); + } + ("insert", Some(matches)) => { + let suri = get_uri("suri", &matches)?; + let pair = read_pair::(Some(&suri), password)?; + let node_url = matches + .value_of("node-url") + .unwrap_or("http://localhost:9933"); + let key_type = matches + .value_of("key-type") + .ok_or(Error::Static("Key type id is required"))?; + + // Just checking + let _key_type_id = sp_core::crypto::KeyTypeId::try_from(key_type).map_err(|_| { + Error::Static( + "Cannot convert argument to keytype: argument should be 4-character string", + ) + })?; + + let rpc = rpc::RpcClient::new(node_url.to_string()); + + rpc.insert_key( + key_type.to_string(), + suri, + sp_core::Bytes(pair.public().as_ref().to_vec()), + ); + } + _ => print_usage(&matches), + } + + Ok(()) } /// Creates a new randomly generated mnemonic phrase. fn generate_mnemonic(matches: &ArgMatches) -> Result { - let words = match matches.value_of("words") { - Some(words) => { - let num = usize::from_str(words).map_err(|_| Error::Static("Invalid number given for --words"))?; - MnemonicType::for_word_count(num) - .map_err(|_| Error::Static("Invalid number of words given for phrase: must be 12/15/18/21/24"))? - }, - None => MnemonicType::Words12, - }; - Ok(Mnemonic::new(words, Language::English)) + let words = match matches.value_of("words") { + Some(words) => { + let num = usize::from_str(words) + .map_err(|_| Error::Static("Invalid number given for --words"))?; + MnemonicType::for_word_count(num).map_err(|_| { + Error::Static("Invalid number of words given for phrase: must be 12/15/18/21/24") + })? + } + None => MnemonicType::Words12, + }; + Ok(Mnemonic::new(words, Language::English)) } fn do_sign(suri: &str, message: Vec, password: Option<&str>) -> Result where - SignatureOf: SignatureT, - PublicOf: PublicT, + SignatureOf: SignatureT, + PublicOf: PublicT, { - let pair = read_pair::(Some(suri), password)?; - let signature = pair.sign(&message); - Ok(format_signature::(&signature)) + let pair = read_pair::(Some(suri), password)?; + let signature = pair.sign(&message); + Ok(format_signature::(&signature)) } fn do_verify(matches: &ArgMatches, uri: &str, message: Vec) -> Result where - SignatureOf: SignatureT, - PublicOf: PublicT, + SignatureOf: SignatureT, + PublicOf: PublicT, { - - let signature = read_signature::(matches)?; - let pubkey = read_public_key::(Some(uri)); - Ok(<::Pair as Pair>::verify(&signature, &message, &pubkey)) + let signature = read_signature::(matches)?; + let pubkey = read_public_key::(Some(uri)); + Ok(<::Pair as Pair>::verify( + &signature, &message, &pubkey, + )) } fn decode_hex>(message: T) -> Result, Error> { - hex::decode(message).map_err(|e| Error::Formatted(format!("Invalid hex ({})", e))) + hex::decode(message).map_err(|e| Error::Formatted(format!("Invalid hex ({})", e))) } fn read_message_from_stdin(should_decode: bool) -> Result, Error> { - let mut message = vec![]; - stdin() - .lock() - .read_to_end(&mut message)?; - if should_decode { - message = decode_hex(&message)?; - } - Ok(message) -} - -fn read_required_parameter(matches: &ArgMatches, name: &str) -> Result where - ::Err: std::fmt::Debug, + let mut message = vec![]; + stdin().lock().read_to_end(&mut message)?; + if should_decode { + message = decode_hex(&message)?; + } + Ok(message) +} + +fn read_required_parameter(matches: &ArgMatches, name: &str) -> Result +where + ::Err: std::fmt::Debug, { - let str_value = matches - .value_of(name) - .expect("parameter is required; thus it can't be None; qed"); - str::parse::(str_value).map_err(|_| - Error::Formatted(format!("Invalid `{}' parameter; expecting an integer.", name)) - ) + let str_value = matches + .value_of(name) + .expect("parameter is required; thus it can't be None; qed"); + str::parse::(str_value).map_err(|_| { + Error::Formatted(format!( + "Invalid `{}' parameter; expecting an integer.", + name + )) + }) } fn read_genesis_hash(matches: &ArgMatches) -> Result { - let genesis_hash: Hash = match matches.value_of("genesis").unwrap_or("alex") { - "elm" => hex!["10c08714a10c7da78f40a60f6f732cf0dba97acfb5e2035445b032386157d5c3"].into(), - "alex" => hex!["dcd1346701ca8396496e52aa2785b1748deb6db09551b72159dcb3e08991025b"].into(), - h => Decode::decode(&mut &decode_hex(h)?[..]) - .expect("Invalid genesis hash or unrecognized chain identifier"), - }; - println!( - "Using a genesis hash of {}", - HexDisplay::from(&genesis_hash.as_ref()) - ); - Ok(genesis_hash) + let genesis_hash: Hash = match matches.value_of("genesis").unwrap_or("alex") { + "elm" => hex!["10c08714a10c7da78f40a60f6f732cf0dba97acfb5e2035445b032386157d5c3"].into(), + "alex" => hex!["dcd1346701ca8396496e52aa2785b1748deb6db09551b72159dcb3e08991025b"].into(), + h => Decode::decode(&mut &decode_hex(h)?[..]) + .expect("Invalid genesis hash or unrecognized chain identifier"), + }; + println!( + "Using a genesis hash of {}", + HexDisplay::from(&genesis_hash.as_ref()) + ); + Ok(genesis_hash) } fn read_signature(matches: &ArgMatches) -> Result, Error> where - SignatureOf: SignatureT, - PublicOf: PublicT, + SignatureOf: SignatureT, + PublicOf: PublicT, { - let sig_data = matches - .value_of("sig") - .expect("signature parameter is required; thus it can't be None; qed"); - let mut signature = <::Pair as Pair>::Signature::default(); - let sig_data = decode_hex(sig_data)?; - if sig_data.len() != signature.as_ref().len() { - return Err(Error::Formatted(format!( - "signature has an invalid length. read {} bytes, expected {} bytes", - sig_data.len(), - signature.as_ref().len(), - ))); - } - signature.as_mut().copy_from_slice(&sig_data); - Ok(signature) + let sig_data = matches + .value_of("sig") + .expect("signature parameter is required; thus it can't be None; qed"); + let mut signature = <::Pair as Pair>::Signature::default(); + let sig_data = decode_hex(sig_data)?; + if sig_data.len() != signature.as_ref().len() { + return Err(Error::Formatted(format!( + "signature has an invalid length. read {} bytes, expected {} bytes", + sig_data.len(), + signature.as_ref().len(), + ))); + } + signature.as_mut().copy_from_slice(&sig_data); + Ok(signature) } fn read_public_key(matched_uri: Option<&str>) -> PublicOf where - PublicOf: PublicT, + PublicOf: PublicT, { - let uri = matched_uri.expect("parameter is required; thus it can't be None; qed"); - let uri = if uri.starts_with("0x") { - &uri[2..] - } else { - uri - }; - if let Ok(pubkey_vec) = hex::decode(uri) { - ::Public::from_slice(pubkey_vec.as_slice()) - } else { - ::Public::from_string(uri) - .ok() - .expect("Invalid URI; expecting either a secret URI or a public URI.") - } + let uri = matched_uri.expect("parameter is required; thus it can't be None; qed"); + let uri = if uri.starts_with("0x") { + &uri[2..] + } else { + uri + }; + if let Ok(pubkey_vec) = hex::decode(uri) { + ::Public::from_slice(pubkey_vec.as_slice()) + } else { + ::Public::from_string(uri) + .ok() + .expect("Invalid URI; expecting either a secret URI or a public URI.") + } } fn read_account_id(matched_uri: Option<&str>) -> AccountId { - let uri = matched_uri.expect("parameter is required; thus it can't be None; qed"); - let uri = if uri.starts_with("0x") { - &uri[2..] - } else { - uri - }; - if let Ok(data_vec) = hex::decode(uri) { - AccountId::try_from(data_vec.as_slice()) - .expect("Invalid hex length for account ID; should be 32 bytes") - } else { - AccountId::from_ss58check(uri).ok() - .expect("Invalid SS58-check address given for account ID.") - } + let uri = matched_uri.expect("parameter is required; thus it can't be None; qed"); + let uri = if uri.starts_with("0x") { + &uri[2..] + } else { + uri + }; + if let Ok(data_vec) = hex::decode(uri) { + AccountId::try_from(data_vec.as_slice()) + .expect("Invalid hex length for account ID; should be 32 bytes") + } else { + AccountId::from_ss58check(uri) + .ok() + .expect("Invalid SS58-check address given for account ID.") + } } fn read_pair( - matched_suri: Option<&str>, - password: Option<&str>, -) -> Result<::Pair, Error> where - SignatureOf: SignatureT, - PublicOf: PublicT, + matched_suri: Option<&str>, + password: Option<&str>, +) -> Result<::Pair, Error> +where + SignatureOf: SignatureT, + PublicOf: PublicT, { - let suri = matched_suri.ok_or(Error::Static("parameter is required; thus it can't be None; qed"))?; - Ok(C::pair_from_suri(suri, password)) + let suri = matched_suri.ok_or(Error::Static( + "parameter is required; thus it can't be None; qed", + ))?; + Ok(C::pair_from_suri(suri, password)) } fn format_signature(signature: &SignatureOf) -> String { - format!("{}", HexDisplay::from(&signature.as_ref())) + format!("{}", HexDisplay::from(&signature.as_ref())) } fn format_seed(seed: SeedOf) -> String { - format!("0x{}", HexDisplay::from(&seed.as_ref())) + format!("0x{}", HexDisplay::from(&seed.as_ref())) } fn format_public_key(public_key: PublicOf) -> String { - format!("0x{}", HexDisplay::from(&public_key.as_ref())) + format!("0x{}", HexDisplay::from(&public_key.as_ref())) } -fn format_account_id(public_key: PublicOf) -> String where - PublicOf: PublicT, +fn format_account_id(public_key: PublicOf) -> String +where + PublicOf: PublicT, { - format!("0x{}", HexDisplay::from(&public_key.into_runtime().into_account().as_ref())) + format!( + "0x{}", + HexDisplay::from(&public_key.into_runtime().into_account().as_ref()) + ) } fn create_extrinsic( - function: Call, - index: Index, - signer: C::Pair, - genesis_hash: H256, -) -> UncheckedExtrinsic where - PublicOf: PublicT, - SignatureOf: SignatureT, + function: Call, + index: Index, + signer: C::Pair, + genesis_hash: H256, +) -> UncheckedExtrinsic +where + PublicOf: PublicT, + SignatureOf: SignatureT, { - let extra = |i: Index, f: Balance| { - ( - frame_system::CheckVersion::::new(), - frame_system::CheckGenesis::::new(), - frame_system::CheckEra::::from(Era::Immortal), - frame_system::CheckNonce::::from(i), - frame_system::CheckWeight::::new(), - pallet_transaction_payment::ChargeTransactionPayment::::from(f), - Default::default(), - ) - }; - let raw_payload = SignedPayload::from_raw( - function, - extra(index, 0), - ( - VERSION.spec_version as u32, - genesis_hash, - genesis_hash, - (), - (), - (), - (), - ), - ); - let signature = raw_payload.using_encoded(|payload| signer.sign(payload)).into_runtime(); - let signer = signer.public().into_runtime(); - let (function, extra, _) = raw_payload.deconstruct(); - - UncheckedExtrinsic::new_signed( - function, - signer.into_account().into(), - signature, - extra, - ) + let extra = |i: Index, f: Balance| { + ( + frame_system::CheckVersion::::new(), + frame_system::CheckGenesis::::new(), + frame_system::CheckEra::::from(Era::Immortal), + frame_system::CheckNonce::::from(i), + frame_system::CheckWeight::::new(), + pallet_transaction_payment::ChargeTransactionPayment::::from(f), + Default::default(), + ) + }; + let raw_payload = SignedPayload::from_raw( + function, + extra(index, 0), + ( + VERSION.spec_version as u32, + genesis_hash, + genesis_hash, + (), + (), + (), + (), + ), + ); + let signature = raw_payload + .using_encoded(|payload| signer.sign(payload)) + .into_runtime(); + let signer = signer.public().into_runtime(); + let (function, extra, _) = raw_payload.deconstruct(); + + UncheckedExtrinsic::new_signed(function, signer.into_account().into(), signature, extra) } fn print_extrinsic(extrinsic: UncheckedExtrinsic) { - println!("0x{}", HexDisplay::from(&extrinsic.encode())); + println!("0x{}", HexDisplay::from(&extrinsic.encode())); } fn print_usage(matches: &ArgMatches) { - println!("{}", matches.usage()); + println!("{}", matches.usage()); } #[cfg(test)] mod tests { - use super::*; - - fn test_generate_sign_verify() - where - SignatureOf: SignatureT, - PublicOf: PublicT, - { - let usage = get_usage(); - let app = get_app(&usage); - let password = None; - - // Generate public key and seed. - let arg_vec = vec!["subkey", "generate"]; - - let matches = app.clone().get_matches_from(arg_vec); - let matches = matches.subcommand().1.unwrap(); - let mnemonic = generate_mnemonic(matches).expect("generate failed"); - - let (pair, seed) = - <::Pair as Pair>::from_phrase(mnemonic.phrase(), password) - .unwrap(); - let public_key = CryptoType::public_from_pair(&pair); - let public_key = format_public_key::(public_key); - let seed = format_seed::(seed); - let message = "Blah Blah\n".as_bytes().to_vec(); - - let signature = do_sign::(&seed, message.clone(), password).expect("signing failed"); - - // Verify the previous signature. - let arg_vec = vec!["subkey", "verify", &signature[..], &public_key[..]]; - - let matches = get_app(&usage).get_matches_from(arg_vec); - let matches = matches.subcommand().1.unwrap(); - - assert!(do_verify::(matches, &public_key, message).expect("verify failed")); - } - - #[test] - fn generate_sign_verify_should_work_for_ed25519() { - test_generate_sign_verify::(); - } - - #[test] - fn generate_sign_verify_should_work_for_sr25519() { - test_generate_sign_verify::(); - } - - #[test] - fn should_work() { - let s = "0123456789012345678901234567890123456789012345678901234567890123"; - - let d1: Hash = hex::decode(s) - .ok() - .and_then(|x| Decode::decode(&mut &x[..]).ok()) - .unwrap(); - - let d2: Hash = { - let mut gh: [u8; 32] = Default::default(); - gh.copy_from_slice(hex::decode(s).unwrap().as_ref()); - Hash::from(gh) - }; - - assert_eq!(d1, d2); - } + use super::*; + + fn test_generate_sign_verify() + where + SignatureOf: SignatureT, + PublicOf: PublicT, + { + let usage = get_usage(); + let app = get_app(&usage); + let password = None; + + // Generate public key and seed. + let arg_vec = vec!["subkey", "generate"]; + + let matches = app.clone().get_matches_from(arg_vec); + let matches = matches.subcommand().1.unwrap(); + let mnemonic = generate_mnemonic(matches).expect("generate failed"); + + let (pair, seed) = + <::Pair as Pair>::from_phrase(mnemonic.phrase(), password) + .unwrap(); + let public_key = CryptoType::public_from_pair(&pair); + let public_key = format_public_key::(public_key); + let seed = format_seed::(seed); + let message = "Blah Blah\n".as_bytes().to_vec(); + + let signature = + do_sign::(&seed, message.clone(), password).expect("signing failed"); + + // Verify the previous signature. + let arg_vec = vec!["subkey", "verify", &signature[..], &public_key[..]]; + + let matches = get_app(&usage).get_matches_from(arg_vec); + let matches = matches.subcommand().1.unwrap(); + + assert!(do_verify::(matches, &public_key, message).expect("verify failed")); + } + + #[test] + fn generate_sign_verify_should_work_for_ed25519() { + test_generate_sign_verify::(); + } + + #[test] + fn generate_sign_verify_should_work_for_sr25519() { + test_generate_sign_verify::(); + } + + #[test] + fn should_work() { + let s = "0123456789012345678901234567890123456789012345678901234567890123"; + + let d1: Hash = hex::decode(s) + .ok() + .and_then(|x| Decode::decode(&mut &x[..]).ok()) + .unwrap(); + + let d2: Hash = { + let mut gh: [u8; 32] = Default::default(); + gh.copy_from_slice(hex::decode(s).unwrap().as_ref()); + Hash::from(gh) + }; + + assert_eq!(d1, d2); + } } diff --git a/bin/utils/subkey/src/rpc.rs b/bin/utils/subkey/src/rpc.rs index e08ccc19a2..9985b24625 100644 --- a/bin/utils/subkey/src/rpc.rs +++ b/bin/utils/subkey/src/rpc.rs @@ -18,32 +18,31 @@ use futures::Future; use hyper::rt; +use jsonrpc_core_client::transports::http; use node_primitives::Hash; use sc_rpc::author::AuthorClient; -use jsonrpc_core_client::transports::http; use sp_core::Bytes; -pub struct RpcClient { url: String } +pub struct RpcClient { + url: String, +} impl RpcClient { - pub fn new(url: String) -> Self { Self { url } } - - pub fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) { - let url = self.url.clone(); - - rt::run( - http::connect(&url) - .and_then(|client: AuthorClient| { - client.insert_key(key_type, suri, public).map(|_| ()) - }) - .map_err(|e| { - eprintln!("Error inserting key: {:?}", e); - }) - ); - } + pub fn new(url: String) -> Self { + Self { url } + } + + pub fn insert_key(&self, key_type: String, suri: String, public: Bytes) { + let url = self.url.clone(); + + rt::run( + http::connect(&url) + .and_then(|client: AuthorClient| { + client.insert_key(key_type, suri, public).map(|_| ()) + }) + .map_err(|e| { + eprintln!("Error inserting key: {:?}", e); + }), + ); + } } diff --git a/bin/utils/subkey/src/vanity.rs b/bin/utils/subkey/src/vanity.rs index f921470946..4d34f19210 100644 --- a/bin/utils/subkey/src/vanity.rs +++ b/bin/utils/subkey/src/vanity.rs @@ -14,190 +14,191 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::{PublicOf, PublicT, Crypto}; -use sp_core::Pair; +use super::{Crypto, PublicOf, PublicT}; use rand::{rngs::OsRng, RngCore}; +use sp_core::Pair; fn good_waypoint(done: u64) -> u64 { - match done { - 0..=1_000_000 => 100_000, - 0..=10_000_000 => 1_000_000, - 0..=100_000_000 => 10_000_000, - _ => 100_000_000, - } + match done { + 0..=1_000_000 => 100_000, + 0..=10_000_000 => 1_000_000, + 0..=100_000_000 => 10_000_000, + _ => 100_000_000, + } } fn next_seed(seed: &mut [u8]) { - for i in 0..seed.len() { - match seed[i] { - 255 => { - seed[i] = 0; - } - _ => { - seed[i] += 1; - break; - } - } - } + for i in 0..seed.len() { + match seed[i] { + 255 => { + seed[i] = 0; + } + _ => { + seed[i] += 1; + break; + } + } + } } /// A structure used to carry both Pair and seed. /// This should usually NOT been used. If unsure, use Pair. pub(super) struct KeyPair { - pub pair: C::Pair, - pub seed: ::Seed, - pub score: usize, + pub pair: C::Pair, + pub seed: ::Seed, + pub score: usize, } /// Calculate the score of a key based on the desired /// input. fn calculate_score(_desired: &str, key: &str) -> usize { - for truncate in 0.._desired.len() { - let snip_size = _desired.len() - truncate; - let truncated = &_desired[0..snip_size]; - if let Some(pos) = key.find(truncated) { - return (47 - pos) + (snip_size * 48); - } - } - 0 + for truncate in 0.._desired.len() { + let snip_size = _desired.len() - truncate; + let truncated = &_desired[0..snip_size]; + if let Some(pos) = key.find(truncated) { + return (47 - pos) + (snip_size * 48); + } + } + 0 } /// Validate whether the char is allowed to be used in base58. /// num 0, lower l, upper I and O are not allowed. -fn validate_base58(c :char) -> bool { - c.is_alphanumeric() && !"0lIO".contains(c) +fn validate_base58(c: char) -> bool { + c.is_alphanumeric() && !"0lIO".contains(c) } -pub(super) fn generate_key(desired: &str) -> Result, &'static str> where - PublicOf: PublicT, +pub(super) fn generate_key(desired: &str) -> Result, &'static str> +where + PublicOf: PublicT, { - if desired.is_empty() { - return Err("Pattern must not be empty"); - } + if desired.is_empty() { + return Err("Pattern must not be empty"); + } - if !desired.chars().all(validate_base58) { - return Err("Pattern can only contains valid characters in base58 \ + if !desired.chars().all(validate_base58) { + return Err("Pattern can only contains valid characters in base58 \ (all alphanumeric except for 0, l, I and O)"); - } - - eprintln!("Generating key containing pattern '{}'", desired); - - let top = 45 + (desired.len() * 48); - let mut best = 0; - let mut seed = ::Seed::default(); - let mut done = 0; - - loop { - if done % 100000 == 0 { - OsRng.fill_bytes(seed.as_mut()); - } else { - next_seed(seed.as_mut()); - } - - let p = C::Pair::from_seed(&seed); - let ss58 = C::ss58_from_pair(&p); - let score = calculate_score(&desired, &ss58); - if score > best || desired.len() < 2 { - best = score; - let keypair = KeyPair { - pair: p, - seed: seed.clone(), - score: score, - }; - if best >= top { - eprintln!("best: {} == top: {}", best, top); - return Ok(keypair); - } - } - done += 1; - - if done % good_waypoint(done) == 0 { - eprintln!("{} keys searched; best is {}/{} complete", done, best, top); - } - } + } + + eprintln!("Generating key containing pattern '{}'", desired); + + let top = 45 + (desired.len() * 48); + let mut best = 0; + let mut seed = ::Seed::default(); + let mut done = 0; + + loop { + if done % 100000 == 0 { + OsRng.fill_bytes(seed.as_mut()); + } else { + next_seed(seed.as_mut()); + } + + let p = C::Pair::from_seed(&seed); + let ss58 = C::ss58_from_pair(&p); + let score = calculate_score(&desired, &ss58); + if score > best || desired.len() < 2 { + best = score; + let keypair = KeyPair { + pair: p, + seed: seed.clone(), + score: score, + }; + if best >= top { + eprintln!("best: {} == top: {}", best, top); + return Ok(keypair); + } + } + done += 1; + + if done % good_waypoint(done) == 0 { + eprintln!("{} keys searched; best is {}/{} complete", done, best, top); + } + } } #[cfg(test)] mod tests { - use super::super::Ed25519; - use super::*; - use sp_core::{crypto::Ss58Codec, Pair}; - #[cfg(feature = "bench")] - use test::Bencher; - - #[test] - fn test_generation_with_single_char() { - assert!(generate_key::("j") - .unwrap() - .pair - .public() - .to_ss58check() - .contains("j")); - } - - #[test] - fn test_score_1_char_100() { - let score = calculate_score("j", "5jolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); - assert_eq!(score, 94); - } - - #[test] - fn test_score_100() { - let score = calculate_score( - "Polkadot", - "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim", - ); - assert_eq!(score, 430); - } - - #[test] - fn test_score_50_2() { - // 50% for the position + 50% for the size - assert_eq!( - calculate_score( - "Polkadot", - "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim" - ), - 238 - ); - } - - #[test] - fn test_score_0() { - assert_eq!( - calculate_score( - "Polkadot", - "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK" - ), - 0 - ); - } - - #[test] - fn test_invalid_pattern() { - assert!(generate_key::("").is_err()); - assert!(generate_key::("0").is_err()); - assert!(generate_key::("l").is_err()); - assert!(generate_key::("I").is_err()); - assert!(generate_key::("O").is_err()); - assert!(generate_key::("!").is_err()); - } - - #[test] - fn test_valid_pattern() { - assert!(generate_key::("o").is_ok()); - assert!(generate_key::("L").is_ok()); - } - - #[cfg(feature = "bench")] - #[bench] - fn bench_paranoiac(b: &mut Bencher) { - b.iter(|| generate_key("polk")); - } - - #[cfg(feature = "bench")] - #[bench] - fn bench_not_paranoiac(b: &mut Bencher) { - b.iter(|| generate_key("polk")); - } + use super::super::Ed25519; + use super::*; + use sp_core::{crypto::Ss58Codec, Pair}; + #[cfg(feature = "bench")] + use test::Bencher; + + #[test] + fn test_generation_with_single_char() { + assert!(generate_key::("j") + .unwrap() + .pair + .public() + .to_ss58check() + .contains("j")); + } + + #[test] + fn test_score_1_char_100() { + let score = calculate_score("j", "5jolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim"); + assert_eq!(score, 94); + } + + #[test] + fn test_score_100() { + let score = calculate_score( + "Polkadot", + "5PolkadotwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim", + ); + assert_eq!(score, 430); + } + + #[test] + fn test_score_50_2() { + // 50% for the position + 50% for the size + assert_eq!( + calculate_score( + "Polkadot", + "5PolkXXXXwHY5k9GpdTgpqs9xjuNvtv8EcwCFpEeyEf3KHim" + ), + 238 + ); + } + + #[test] + fn test_score_0() { + assert_eq!( + calculate_score( + "Polkadot", + "5GUWv4bLCchGUHJrzULXnh4JgXsMpTKRnjuXTY7Qo1Kh9uYK" + ), + 0 + ); + } + + #[test] + fn test_invalid_pattern() { + assert!(generate_key::("").is_err()); + assert!(generate_key::("0").is_err()); + assert!(generate_key::("l").is_err()); + assert!(generate_key::("I").is_err()); + assert!(generate_key::("O").is_err()); + assert!(generate_key::("!").is_err()); + } + + #[test] + fn test_valid_pattern() { + assert!(generate_key::("o").is_ok()); + assert!(generate_key::("L").is_ok()); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_paranoiac(b: &mut Bencher) { + b.iter(|| generate_key("polk")); + } + + #[cfg(feature = "bench")] + #[bench] + fn bench_not_paranoiac(b: &mut Bencher) { + b.iter(|| generate_key("polk")); + } } diff --git a/client/api/src/backend.rs b/client/api/src/backend.rs index d10e62cc54..268a1f9371 100644 --- a/client/api/src/backend.rs +++ b/client/api/src/backend.rs @@ -16,27 +16,25 @@ //! Substrate Client data backend -use std::sync::Arc; -use std::collections::HashMap; -use sp_core::ChangesTrieConfigurationRange; -use sp_core::offchain::OffchainStorage; -use sp_runtime::{generic::BlockId, Justification, Storage}; -use sp_runtime::traits::{Block as BlockT, NumberFor, HashFor}; -use sp_state_machine::{ - ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, -}; -use sp_storage::{StorageData, StorageKey, ChildInfo}; use crate::{ - blockchain::{ - Backend as BlockchainBackend, well_known_cache_keys - }, - light::RemoteBlockchain, - UsageInfo, + blockchain::{well_known_cache_keys, Backend as BlockchainBackend}, + light::RemoteBlockchain, + UsageInfo, }; +use parking_lot::RwLock; use sp_blockchain; use sp_consensus::BlockOrigin; -use parking_lot::RwLock; +use sp_core::offchain::OffchainStorage; +use sp_core::ChangesTrieConfigurationRange; +use sp_runtime::traits::{Block as BlockT, HashFor, NumberFor}; +use sp_runtime::{generic::BlockId, Justification, Storage}; +use sp_state_machine::{ + ChangesTrieState, ChangesTrieStorage as StateChangesTrieStorage, ChangesTrieTransaction, + ChildStorageCollection, StorageCollection, +}; +use sp_storage::{ChildInfo, StorageData, StorageKey}; +use std::collections::HashMap; +use std::sync::Arc; pub use sp_state_machine::Backend as StateBackend; use std::marker::PhantomData; @@ -55,305 +53,322 @@ pub type TransactionFor = TransactionForSB, /// Contains information about the block that just got imported, /// including storage changes, reorged blocks, etc. pub struct ImportSummary { - /// Block hash of the imported block. - pub hash: Block::Hash, - /// Import origin. - pub origin: BlockOrigin, - /// Header of the imported block. - pub header: Block::Header, - /// Is this block a new best block. - pub is_new_best: bool, - /// Optional storage changes. - pub storage_changes: Option<(StorageCollection, ChildStorageCollection)>, - /// Blocks that got retracted because of this one got imported. - pub retracted: Vec, + /// Block hash of the imported block. + pub hash: Block::Hash, + /// Import origin. + pub origin: BlockOrigin, + /// Header of the imported block. + pub header: Block::Header, + /// Is this block a new best block. + pub is_new_best: bool, + /// Optional storage changes. + pub storage_changes: Option<(StorageCollection, ChildStorageCollection)>, + /// Blocks that got retracted because of this one got imported. + pub retracted: Vec, } /// Import operation wrapper pub struct ClientImportOperation> { - /// DB Operation. - pub op: B::BlockImportOperation, - /// Summary of imported block. - pub notify_imported: Option>, - /// A list of hashes of blocks that got finalized. - pub notify_finalized: Vec, + /// DB Operation. + pub op: B::BlockImportOperation, + /// Summary of imported block. + pub notify_imported: Option>, + /// A list of hashes of blocks that got finalized. + pub notify_finalized: Vec, } /// State of a new block. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum NewBlockState { - /// Normal block. - Normal, - /// New best block. - Best, - /// Newly finalized block (implicitly best). - Final, + /// Normal block. + Normal, + /// New best block. + Best, + /// Newly finalized block (implicitly best). + Final, } impl NewBlockState { - /// Whether this block is the new best block. - pub fn is_best(self) -> bool { - match self { - NewBlockState::Best | NewBlockState::Final => true, - NewBlockState::Normal => false, - } - } - - /// Whether this block is considered final. - pub fn is_final(self) -> bool { - match self { - NewBlockState::Final => true, - NewBlockState::Best | NewBlockState::Normal => false, - } - } + /// Whether this block is the new best block. + pub fn is_best(self) -> bool { + match self { + NewBlockState::Best | NewBlockState::Final => true, + NewBlockState::Normal => false, + } + } + + /// Whether this block is considered final. + pub fn is_final(self) -> bool { + match self { + NewBlockState::Final => true, + NewBlockState::Best | NewBlockState::Normal => false, + } + } } /// Block insertion operation. /// /// Keeps hold if the inserted block state and data. pub trait BlockImportOperation { - /// Associated state backend type. - type State: StateBackend>; - - /// Returns pending state. - /// - /// Returns None for backends with locally-unavailable state data. - fn state(&self) -> sp_blockchain::Result>; - - /// Append block data to the transaction. - fn set_block_data( - &mut self, - header: Block::Header, - body: Option>, - justification: Option, - state: NewBlockState, - ) -> sp_blockchain::Result<()>; - - /// Update cached data. - fn update_cache(&mut self, cache: HashMap>); - - /// Inject storage data into the database. - fn update_db_storage( - &mut self, - update: TransactionForSB, - ) -> sp_blockchain::Result<()>; - - /// Inject storage data into the database replacing any existing data. - fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result; - - /// Set storage changes. - fn update_storage( - &mut self, - update: StorageCollection, - child_update: ChildStorageCollection, - ) -> sp_blockchain::Result<()>; - - /// Inject changes trie data into the database. - fn update_changes_trie( - &mut self, - update: ChangesTrieTransaction, NumberFor>, - ) -> sp_blockchain::Result<()>; - - /// Insert auxiliary keys. - /// - /// Values are `None` if should be deleted. - fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> - where I: IntoIterator, Option>)>; - - /// Mark a block as finalized. - fn mark_finalized( - &mut self, - id: BlockId, - justification: Option, - ) -> sp_blockchain::Result<()>; - /// Mark a block as new head. If both block import and set head are specified, set head - /// overrides block import's best block rule. - fn mark_head(&mut self, id: BlockId) -> sp_blockchain::Result<()>; + /// Associated state backend type. + type State: StateBackend>; + + /// Returns pending state. + /// + /// Returns None for backends with locally-unavailable state data. + fn state(&self) -> sp_blockchain::Result>; + + /// Append block data to the transaction. + fn set_block_data( + &mut self, + header: Block::Header, + body: Option>, + justification: Option, + state: NewBlockState, + ) -> sp_blockchain::Result<()>; + + /// Update cached data. + fn update_cache(&mut self, cache: HashMap>); + + /// Inject storage data into the database. + fn update_db_storage( + &mut self, + update: TransactionForSB, + ) -> sp_blockchain::Result<()>; + + /// Inject storage data into the database replacing any existing data. + fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result; + + /// Set storage changes. + fn update_storage( + &mut self, + update: StorageCollection, + child_update: ChildStorageCollection, + ) -> sp_blockchain::Result<()>; + + /// Inject changes trie data into the database. + fn update_changes_trie( + &mut self, + update: ChangesTrieTransaction, NumberFor>, + ) -> sp_blockchain::Result<()>; + + /// Insert auxiliary keys. + /// + /// Values are `None` if should be deleted. + fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> + where + I: IntoIterator, Option>)>; + + /// Mark a block as finalized. + fn mark_finalized( + &mut self, + id: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()>; + /// Mark a block as new head. If both block import and set head are specified, set head + /// overrides block import's best block rule. + fn mark_head(&mut self, id: BlockId) -> sp_blockchain::Result<()>; } /// Interface for performing operations on the backend. pub trait LockImportRun> { - /// Lock the import lock, and run operations inside. - fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From; + /// Lock the import lock, and run operations inside. + fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From; } /// Finalize Facilities pub trait Finalizer> { - /// Mark all blocks up to given as finalized in operation. - /// - /// If `justification` is provided it is stored with the given finalized - /// block (any other finalized blocks are left unjustified). - /// - /// If the block being finalized is on a different fork from the current - /// best block the finalized block is set as best, this might be slightly - /// inaccurate (i.e. outdated). Usages that require determining an accurate - /// best block should use `SelectChain` instead of the client. - fn apply_finality( - &self, - operation: &mut ClientImportOperation, - id: BlockId, - justification: Option, - notify: bool, - ) -> sp_blockchain::Result<()>; - - - /// Finalize a block. - /// - /// This will implicitly finalize all blocks up to it and - /// fire finality notifications. - /// - /// If the block being finalized is on a different fork from the current - /// best block, the finalized block is set as best. This might be slightly - /// inaccurate (i.e. outdated). Usages that require determining an accurate - /// best block should use `SelectChain` instead of the client. - /// - /// Pass a flag to indicate whether finality notifications should be propagated. - /// This is usually tied to some synchronization state, where we don't send notifications - /// while performing major synchronization work. - fn finalize_block( - &self, - id: BlockId, - justification: Option, - notify: bool, - ) -> sp_blockchain::Result<()>; - + /// Mark all blocks up to given as finalized in operation. + /// + /// If `justification` is provided it is stored with the given finalized + /// block (any other finalized blocks are left unjustified). + /// + /// If the block being finalized is on a different fork from the current + /// best block the finalized block is set as best, this might be slightly + /// inaccurate (i.e. outdated). Usages that require determining an accurate + /// best block should use `SelectChain` instead of the client. + fn apply_finality( + &self, + operation: &mut ClientImportOperation, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()>; + + /// Finalize a block. + /// + /// This will implicitly finalize all blocks up to it and + /// fire finality notifications. + /// + /// If the block being finalized is on a different fork from the current + /// best block, the finalized block is set as best. This might be slightly + /// inaccurate (i.e. outdated). Usages that require determining an accurate + /// best block should use `SelectChain` instead of the client. + /// + /// Pass a flag to indicate whether finality notifications should be propagated. + /// This is usually tied to some synchronization state, where we don't send notifications + /// while performing major synchronization work. + fn finalize_block( + &self, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()>; } /// Provides access to an auxiliary database. pub trait AuxStore { - /// Insert auxiliary data into key-value store. - /// - /// Deletions occur after insertions. - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()>; - - /// Query auxiliary data from key-value store. - fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>>; + /// Insert auxiliary data into key-value store. + /// + /// Deletions occur after insertions. + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()>; + + /// Query auxiliary data from key-value store. + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>>; } /// An `Iterator` that iterates keys in a given block under a prefix. pub struct KeyIterator<'a, State, Block> { - state: State, - prefix: Option<&'a StorageKey>, - current_key: Vec, - _phantom: PhantomData, + state: State, + prefix: Option<&'a StorageKey>, + current_key: Vec, + _phantom: PhantomData, } -impl <'a, State, Block> KeyIterator<'a, State, Block> { - /// create a KeyIterator instance - pub fn new(state: State, prefix: Option<&'a StorageKey>, current_key: Vec) -> Self { - Self { - state, - prefix, - current_key, - _phantom: PhantomData, - } - } +impl<'a, State, Block> KeyIterator<'a, State, Block> { + /// create a KeyIterator instance + pub fn new(state: State, prefix: Option<&'a StorageKey>, current_key: Vec) -> Self { + Self { + state, + prefix, + current_key, + _phantom: PhantomData, + } + } } -impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> where - Block: BlockT, - State: StateBackend>, +impl<'a, State, Block> Iterator for KeyIterator<'a, State, Block> +where + Block: BlockT, + State: StateBackend>, { - type Item = StorageKey; - - fn next(&mut self) -> Option { - let next_key = self.state - .next_storage_key(&self.current_key) - .ok() - .flatten()?; - // this terminates the iterator the first time it fails. - if let Some(prefix) = self.prefix { - if !next_key.starts_with(&prefix.0[..]) { - return None; - } - } - self.current_key = next_key.clone(); - Some(StorageKey(next_key)) - } + type Item = StorageKey; + + fn next(&mut self) -> Option { + let next_key = self + .state + .next_storage_key(&self.current_key) + .ok() + .flatten()?; + // this terminates the iterator the first time it fails. + if let Some(prefix) = self.prefix { + if !next_key.starts_with(&prefix.0[..]) { + return None; + } + } + self.current_key = next_key.clone(); + Some(StorageKey(next_key)) + } } /// Provides acess to storage primitives pub trait StorageProvider> { - /// Given a `BlockId` and a key, return the value under the key in that block. - fn storage(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result>; - - /// Given a `BlockId` and a key prefix, return the matching storage keys in that block. - fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result>; - - /// Given a `BlockId` and a key, return the value under the hash in that block. - fn storage_hash(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result>; - - /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block. - fn storage_pairs( - &self, - id: &BlockId, - key_prefix: &StorageKey - ) -> sp_blockchain::Result>; - - /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block. - fn storage_keys_iter<'a>( - &self, - id: &BlockId, - prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> - ) -> sp_blockchain::Result>; - - /// Given a `BlockId`, a key and a child storage key, return the value under the key in that block. - fn child_storage( - &self, - id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, - key: &StorageKey - ) -> sp_blockchain::Result>; - - /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys. - fn child_storage_keys( - &self, - id: &BlockId, - child_storage_key: &StorageKey, - child_info: ChildInfo, - key_prefix: &StorageKey - ) -> sp_blockchain::Result>; - - /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block. - fn child_storage_hash( - &self, - id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, - key: &StorageKey - ) -> sp_blockchain::Result>; - - /// Get longest range within [first; last] that is possible to use in `key_changes` - /// and `key_changes_proof` calls. - /// Range could be shortened from the beginning if some changes tries have been pruned. - /// Returns Ok(None) if changes tries are not supported. - fn max_key_changes_range( - &self, - first: NumberFor, - last: BlockId, - ) -> sp_blockchain::Result, BlockId)>>; - - /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. - /// Works only for runtimes that are supporting changes tries. - /// - /// Changes are returned in descending order (i.e. last block comes first). - fn key_changes( - &self, - first: NumberFor, - last: BlockId, - storage_key: Option<&StorageKey>, - key: &StorageKey - ) -> sp_blockchain::Result, u32)>>; + /// Given a `BlockId` and a key, return the value under the key in that block. + fn storage( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result>; + + /// Given a `BlockId` and a key prefix, return the matching storage keys in that block. + fn storage_keys( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result>; + + /// Given a `BlockId` and a key, return the value under the hash in that block. + fn storage_hash( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result>; + + /// Given a `BlockId` and a key prefix, return the matching child storage keys and values in that block. + fn storage_pairs( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result>; + + /// Given a `BlockId` and a key prefix, return a `KeyIterator` iterates matching storage keys in that block. + fn storage_keys_iter<'a>( + &self, + id: &BlockId, + prefix: Option<&'a StorageKey>, + start_key: Option<&StorageKey>, + ) -> sp_blockchain::Result>; + + /// Given a `BlockId`, a key and a child storage key, return the value under the key in that block. + fn child_storage( + &self, + id: &BlockId, + storage_key: &StorageKey, + child_info: ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result>; + + /// Given a `BlockId`, a key prefix, and a child storage key, return the matching child storage keys. + fn child_storage_keys( + &self, + id: &BlockId, + child_storage_key: &StorageKey, + child_info: ChildInfo, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result>; + + /// Given a `BlockId`, a key and a child storage key, return the hash under the key in that block. + fn child_storage_hash( + &self, + id: &BlockId, + storage_key: &StorageKey, + child_info: ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result>; + + /// Get longest range within [first; last] that is possible to use in `key_changes` + /// and `key_changes_proof` calls. + /// Range could be shortened from the beginning if some changes tries have been pruned. + /// Returns Ok(None) if changes tries are not supported. + fn max_key_changes_range( + &self, + first: NumberFor, + last: BlockId, + ) -> sp_blockchain::Result, BlockId)>>; + + /// Get pairs of (block, extrinsic) where key has been changed at given blocks range. + /// Works only for runtimes that are supporting changes tries. + /// + /// Changes are returned in descending order (i.e. last block comes first). + fn key_changes( + &self, + first: NumberFor, + last: BlockId, + storage_key: Option<&StorageKey>, + key: &StorageKey, + ) -> sp_blockchain::Result, u32)>>; } /// Client backend. @@ -367,109 +382,116 @@ pub trait StorageProvider> { /// The same applies for live `BlockImportOperation`s: while an import operation building on a /// parent `P` is alive, the state for `P` should not be pruned. pub trait Backend: AuxStore + Send + Sync { - /// Associated block insertion operation type. - type BlockImportOperation: BlockImportOperation; - /// Associated blockchain backend type. - type Blockchain: BlockchainBackend; - /// Associated state backend type. - type State: StateBackend> + Send; - /// Offchain workers local storage. - type OffchainStorage: OffchainStorage; - - /// Begin a new block insertion transaction with given parent block id. - /// - /// When constructing the genesis, this is called with all-zero hash. - fn begin_operation(&self) -> sp_blockchain::Result; - - /// Note an operation to contain state transition. - fn begin_state_operation( - &self, - operation: &mut Self::BlockImportOperation, - block: BlockId, - ) -> sp_blockchain::Result<()>; - - /// Commit block insertion. - fn commit_operation(&self, transaction: Self::BlockImportOperation) -> sp_blockchain::Result<()>; - - /// Finalize block with given Id. - /// - /// This should only be called if the parent of the given block has been finalized. - fn finalize_block( - &self, - block: BlockId, - justification: Option, - ) -> sp_blockchain::Result<()>; - - /// Returns reference to blockchain backend. - fn blockchain(&self) -> &Self::Blockchain; - - /// Returns current usage statistics. - fn usage_info(&self) -> Option; - - /// Returns reference to changes trie storage. - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage>; - - /// Returns a handle to offchain storage. - fn offchain_storage(&self) -> Option; - - /// Returns true if state for given block is available. - fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { - self.state_at(BlockId::Hash(hash.clone())).is_ok() - } - - /// Returns state backend with post-state of given block. - fn state_at(&self, block: BlockId) -> sp_blockchain::Result; - - /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set - /// it will attempt to revert past any finalized block, this is unsafe and - /// can potentially leave the node in an inconsistent state. - /// - /// Returns the number of blocks that were successfully reverted. - fn revert( - &self, - n: NumberFor, - revert_finalized: bool, - ) -> sp_blockchain::Result>; - - /// Insert auxiliary data into key-value store. - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> - { - AuxStore::insert_aux(self, insert, delete) - } - /// Query auxiliary data from key-value store. - fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { - AuxStore::get_aux(self, key) - } - - /// Gain access to the import lock around this backend. - /// - /// _Note_ Backend isn't expected to acquire the lock by itself ever. Rather - /// the using components should acquire and hold the lock whenever they do - /// something that the import of a block would interfere with, e.g. importing - /// a new block or calculating the best head. - fn get_import_lock(&self) -> &RwLock<()>; + /// Associated block insertion operation type. + type BlockImportOperation: BlockImportOperation; + /// Associated blockchain backend type. + type Blockchain: BlockchainBackend; + /// Associated state backend type. + type State: StateBackend> + Send; + /// Offchain workers local storage. + type OffchainStorage: OffchainStorage; + + /// Begin a new block insertion transaction with given parent block id. + /// + /// When constructing the genesis, this is called with all-zero hash. + fn begin_operation(&self) -> sp_blockchain::Result; + + /// Note an operation to contain state transition. + fn begin_state_operation( + &self, + operation: &mut Self::BlockImportOperation, + block: BlockId, + ) -> sp_blockchain::Result<()>; + + /// Commit block insertion. + fn commit_operation( + &self, + transaction: Self::BlockImportOperation, + ) -> sp_blockchain::Result<()>; + + /// Finalize block with given Id. + /// + /// This should only be called if the parent of the given block has been finalized. + fn finalize_block( + &self, + block: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()>; + + /// Returns reference to blockchain backend. + fn blockchain(&self) -> &Self::Blockchain; + + /// Returns current usage statistics. + fn usage_info(&self) -> Option; + + /// Returns reference to changes trie storage. + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage>; + + /// Returns a handle to offchain storage. + fn offchain_storage(&self) -> Option; + + /// Returns true if state for given block is available. + fn have_state_at(&self, hash: &Block::Hash, _number: NumberFor) -> bool { + self.state_at(BlockId::Hash(hash.clone())).is_ok() + } + + /// Returns state backend with post-state of given block. + fn state_at(&self, block: BlockId) -> sp_blockchain::Result; + + /// Attempts to revert the chain by `n` blocks. If `revert_finalized` is set + /// it will attempt to revert past any finalized block, this is unsafe and + /// can potentially leave the node in an inconsistent state. + /// + /// Returns the number of blocks that were successfully reverted. + fn revert( + &self, + n: NumberFor, + revert_finalized: bool, + ) -> sp_blockchain::Result>; + + /// Insert auxiliary data into key-value store. + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + AuxStore::insert_aux(self, insert, delete) + } + /// Query auxiliary data from key-value store. + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + AuxStore::get_aux(self, key) + } + + /// Gain access to the import lock around this backend. + /// + /// _Note_ Backend isn't expected to acquire the lock by itself ever. Rather + /// the using components should acquire and hold the lock whenever they do + /// something that the import of a block would interfere with, e.g. importing + /// a new block or calculating the best head. + fn get_import_lock(&self) -> &RwLock<()>; } /// Changes trie storage that supports pruning. pub trait PrunableStateChangesTrieStorage: - StateChangesTrieStorage, NumberFor> + StateChangesTrieStorage, NumberFor> { - /// Get reference to StateChangesTrieStorage. - fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; - /// Get configuration at given block. - fn configuration_at(&self, at: &BlockId) -> sp_blockchain::Result< - ChangesTrieConfigurationRange, Block::Hash> - >; - /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. - /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). - fn oldest_pruned_digest_range_end(&self) -> NumberFor; + /// Get reference to StateChangesTrieStorage. + fn storage(&self) -> &dyn StateChangesTrieStorage, NumberFor>; + /// Get configuration at given block. + fn configuration_at( + &self, + at: &BlockId, + ) -> sp_blockchain::Result, Block::Hash>>; + /// Get end block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. + /// It is guaranteed that we have no any changes tries before (and including) this block. + /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). + fn oldest_pruned_digest_range_end(&self) -> NumberFor; } /// Mark for all Backend implementations, that are making use of state data, stored locally. @@ -477,29 +499,33 @@ pub trait LocalBackend: Backend {} /// Mark for all Backend implementations, that are fetching required state data from remote nodes. pub trait RemoteBackend: Backend { - /// Returns true if the state for given block is available locally. - fn is_local_state_available(&self, block: &BlockId) -> bool; - - /// Returns reference to blockchain backend. - /// - /// Returned backend either resolves blockchain data - /// locally, or prepares request to fetch that data from remote node. - fn remote_blockchain(&self) -> Arc>; + /// Returns true if the state for given block is available locally. + fn is_local_state_available(&self, block: &BlockId) -> bool; + + /// Returns reference to blockchain backend. + /// + /// Returned backend either resolves blockchain data + /// locally, or prepares request to fetch that data from remote node. + fn remote_blockchain(&self) -> Arc>; } /// Return changes tries state at given block. pub fn changes_tries_state_at_block<'a, Block: BlockT>( - block: &BlockId, - maybe_storage: Option<&'a dyn PrunableStateChangesTrieStorage>, + block: &BlockId, + maybe_storage: Option<&'a dyn PrunableStateChangesTrieStorage>, ) -> sp_blockchain::Result, NumberFor>>> { - let storage = match maybe_storage { - Some(storage) => storage, - None => return Ok(None), - }; - - let config_range = storage.configuration_at(block)?; - match config_range.config { - Some(config) => Ok(Some(ChangesTrieState::new(config, config_range.zero.0, storage.storage()))), - None => Ok(None), - } + let storage = match maybe_storage { + Some(storage) => storage, + None => return Ok(None), + }; + + let config_range = storage.configuration_at(block)?; + match config_range.config { + Some(config) => Ok(Some(ChangesTrieState::new( + config, + config_range.zero.0, + storage.storage(), + ))), + None => Ok(None), + } } diff --git a/client/api/src/call_executor.rs b/client/api/src/call_executor.rs index 3afd29be8d..6d4ae1811c 100644 --- a/client/api/src/call_executor.rs +++ b/client/api/src/call_executor.rs @@ -16,118 +16,120 @@ //! A method call executor interface. -use std::{panic::UnwindSafe, result, cell::RefCell}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use sc_executor::{NativeVersion, RuntimeVersion}; +use sp_core::NativeOrEncoded; +use sp_externalities::Extensions; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HashFor}, -}; -use sp_state_machine::{ - OverlayedChanges, ExecutionManager, ExecutionStrategy, StorageProof, + generic::BlockId, + traits::{Block as BlockT, HashFor}, }; -use sc_executor::{RuntimeVersion, NativeVersion}; -use sp_externalities::Extensions; -use sp_core::NativeOrEncoded; +use sp_state_machine::{ExecutionManager, ExecutionStrategy, OverlayedChanges, StorageProof}; +use std::{cell::RefCell, panic::UnwindSafe, result}; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; use crate::execution_extensions::ExecutionExtensions; +use sp_api::{InitializeBlock, ProofRecorder, StorageTransactionCache}; /// Executor Provider pub trait ExecutorProvider { - /// executor instance - type Executor: CallExecutor; + /// executor instance + type Executor: CallExecutor; - /// Get call executor reference. - fn executor(&self) -> &Self::Executor; + /// Get call executor reference. + fn executor(&self) -> &Self::Executor; - /// Get a reference to the execution extensions. - fn execution_extensions(&self) -> &ExecutionExtensions; + /// Get a reference to the execution extensions. + fn execution_extensions(&self) -> &ExecutionExtensions; } /// Method call executor. pub trait CallExecutor { - /// Externalities error type. - type Error: sp_state_machine::Error; - - /// The backend used by the node. - type Backend: crate::backend::Backend; - - /// Execute a call to a contract on top of state in a block of given hash. - /// - /// No changes are made. - fn call( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - extensions: Option, - ) -> Result, sp_blockchain::Error>; - - /// Execute a contextual call on top of state in a block of a given hash. - /// - /// No changes are made. - /// Before executing the method, passed header is installed as the current header - /// of the execution context. - fn contextual_call< - 'a, - IB: Fn() -> sp_blockchain::Result<()>, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - initialize_block_fn: IB, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &RefCell, - storage_transaction_cache: Option<&RefCell< - StorageTransactionCache>::State>, - >>, - initialize_block: InitializeBlock<'a, B>, - execution_manager: ExecutionManager, - native_call: Option, - proof_recorder: &Option>, - extensions: Option, - ) -> sp_blockchain::Result> where ExecutionManager: Clone; - - /// Extract RuntimeVersion of given block - /// - /// No changes are made. - fn runtime_version(&self, id: &BlockId) -> Result; - - /// Execute a call to a contract on top of given state, gathering execution proof. - /// - /// No changes are made. - fn prove_at_state>>( - &self, - mut state: S, - overlay: &mut OverlayedChanges, - method: &str, - call_data: &[u8] - ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) - as Box - )?; - self.prove_at_trie_state(trie_state, overlay, method, call_data) - } - - /// Execute a call to a contract on top of given trie state, gathering execution proof. - /// - /// No changes are made. - fn prove_at_trie_state>>( - &self, - trie_state: &sp_state_machine::TrieBackend>, - overlay: &mut OverlayedChanges, - method: &str, - call_data: &[u8] - ) -> Result<(Vec, StorageProof), sp_blockchain::Error>; - - /// Get runtime version if supported. - fn native_runtime_version(&self) -> Option<&NativeVersion>; + /// Externalities error type. + type Error: sp_state_machine::Error; + + /// The backend used by the node. + type Backend: crate::backend::Backend; + + /// Execute a call to a contract on top of state in a block of given hash. + /// + /// No changes are made. + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + strategy: ExecutionStrategy, + extensions: Option, + ) -> Result, sp_blockchain::Error>; + + /// Execute a contextual call on top of state in a block of a given hash. + /// + /// No changes are made. + /// Before executing the method, passed header is installed as the current header + /// of the execution context. + fn contextual_call< + 'a, + IB: Fn() -> sp_blockchain::Result<()>, + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + initialize_block_fn: IB, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &RefCell, + storage_transaction_cache: Option< + &RefCell< + StorageTransactionCache>::State>, + >, + >, + initialize_block: InitializeBlock<'a, B>, + execution_manager: ExecutionManager, + native_call: Option, + proof_recorder: &Option>, + extensions: Option, + ) -> sp_blockchain::Result> + where + ExecutionManager: Clone; + + /// Extract RuntimeVersion of given block + /// + /// No changes are made. + fn runtime_version(&self, id: &BlockId) -> Result; + + /// Execute a call to a contract on top of given state, gathering execution proof. + /// + /// No changes are made. + fn prove_at_state>>( + &self, + mut state: S, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { + let trie_state = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; + self.prove_at_trie_state(trie_state, overlay, method, call_data) + } + + /// Execute a call to a contract on top of given trie state, gathering execution proof. + /// + /// No changes are made. + fn prove_at_trie_state>>( + &self, + trie_state: &sp_state_machine::TrieBackend>, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, StorageProof), sp_blockchain::Error>; + + /// Get runtime version if supported. + fn native_runtime_version(&self) -> Option<&NativeVersion>; } diff --git a/client/api/src/client.rs b/client/api/src/client.rs index c855cd3a08..efeeddab50 100644 --- a/client/api/src/client.rs +++ b/client/api/src/client.rs @@ -16,19 +16,19 @@ //! A set of APIs supported by the client along with their primitives. -use std::{fmt, collections::HashSet}; +use sp_consensus::BlockOrigin; use sp_core::storage::StorageKey; use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - generic::{BlockId, SignedBlock}, - Justification, + generic::{BlockId, SignedBlock}, + traits::{Block as BlockT, NumberFor}, + Justification, }; -use sp_consensus::BlockOrigin; +use std::{collections::HashSet, fmt}; use crate::blockchain::Info; use crate::notifications::StorageEventStream; -use sp_utils::mpsc::TracingUnboundedReceiver; use sp_blockchain; +use sp_utils::mpsc::TracingUnboundedReceiver; /// Type that implements `futures::Stream` of block import events. pub type ImportNotifications = TracingUnboundedReceiver>; @@ -50,62 +50,66 @@ pub type BadBlocks = Option::Hash>>; /// Figure out the block type for a given type (for now, just a `Client`). pub trait BlockOf { - /// The type of the block. - type Type: BlockT; + /// The type of the block. + type Type: BlockT; } /// A source of blockchain events. pub trait BlockchainEvents { - /// Get block import event stream. Not guaranteed to be fired for every - /// imported block. - fn import_notification_stream(&self) -> ImportNotifications; - - /// Get a stream of finality notifications. Not guaranteed to be fired for every - /// finalized block. - fn finality_notification_stream(&self) -> FinalityNotifications; - - /// Get storage changes event stream. - /// - /// Passing `None` as `filter_keys` subscribes to all storage changes. - fn storage_changes_notification_stream( - &self, - filter_keys: Option<&[StorageKey]>, - child_filter_keys: Option<&[(StorageKey, Option>)]>, - ) -> sp_blockchain::Result>; + /// Get block import event stream. Not guaranteed to be fired for every + /// imported block. + fn import_notification_stream(&self) -> ImportNotifications; + + /// Get a stream of finality notifications. Not guaranteed to be fired for every + /// finalized block. + fn finality_notification_stream(&self) -> FinalityNotifications; + + /// Get storage changes event stream. + /// + /// Passing `None` as `filter_keys` subscribes to all storage changes. + fn storage_changes_notification_stream( + &self, + filter_keys: Option<&[StorageKey]>, + child_filter_keys: Option<&[(StorageKey, Option>)]>, + ) -> sp_blockchain::Result>; } /// Interface for fetching block data. pub trait BlockBackend { - /// Get block body by ID. Returns `None` if the body is not stored. - fn block_body( - &self, - id: &BlockId - ) -> sp_blockchain::Result::Extrinsic>>>; + /// Get block body by ID. Returns `None` if the body is not stored. + fn block_body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Extrinsic>>>; - /// Get full block by id. - fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; + /// Get full block by id. + fn block(&self, id: &BlockId) -> sp_blockchain::Result>>; - /// Get block status. - fn block_status(&self, id: &BlockId) -> sp_blockchain::Result; + /// Get block status. + fn block_status(&self, id: &BlockId) + -> sp_blockchain::Result; - /// Get block justification set by id. - fn justification(&self, id: &BlockId) -> sp_blockchain::Result>; + /// Get block justification set by id. + fn justification(&self, id: &BlockId) -> sp_blockchain::Result>; } /// Provide a list of potential uncle headers for a given block. pub trait ProvideUncles { - /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. - fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) - -> sp_blockchain::Result>; + /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. + fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result>; } /// Client info #[derive(Debug)] pub struct ClientInfo { - /// Best block hash. - pub chain: Info, - /// Usage info, if backend supports this. - pub usage: Option, + /// Best block hash. + pub chain: Info, + /// Usage info, if backend supports this. + pub usage: Option, } /// A wrapper to store the size of some memory. @@ -113,78 +117,82 @@ pub struct ClientInfo { pub struct MemorySize(usize); impl MemorySize { - /// Creates `Self` from the given `bytes` size. - pub fn from_bytes(bytes: usize) -> Self { - Self(bytes) - } - - /// Returns the memory size as bytes. - pub fn as_bytes(self) -> usize { - self.0 - } + /// Creates `Self` from the given `bytes` size. + pub fn from_bytes(bytes: usize) -> Self { + Self(bytes) + } + + /// Returns the memory size as bytes. + pub fn as_bytes(self) -> usize { + self.0 + } } impl fmt::Display for MemorySize { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if self.0 < 1024 { - write!(f, "{} bytes", self.0) - } else if self.0 < 1024 * 1024 { - write!(f, "{:.2} KiB", self.0 as f64 / 1024f64) - } else if self.0 < 1024 * 1024 * 1024 { - write!(f, "{:.2} MiB", self.0 as f64 / (1024f64 * 1024f64)) - } else { - write!(f, "{:.2} GiB", self.0 as f64 / (1024f64 * 1024f64 * 1024f64)) - } - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if self.0 < 1024 { + write!(f, "{} bytes", self.0) + } else if self.0 < 1024 * 1024 { + write!(f, "{:.2} KiB", self.0 as f64 / 1024f64) + } else if self.0 < 1024 * 1024 * 1024 { + write!(f, "{:.2} MiB", self.0 as f64 / (1024f64 * 1024f64)) + } else { + write!( + f, + "{:.2} GiB", + self.0 as f64 / (1024f64 * 1024f64 * 1024f64) + ) + } + } } /// Memory statistics for state db. #[derive(Default, Clone, Debug)] pub struct StateDbMemoryInfo { - /// Memory usage of the non-canonical overlay - pub non_canonical: MemorySize, - /// Memory usage of the pruning window. - pub pruning: Option, - /// Memory usage of the pinned blocks. - pub pinned: MemorySize, + /// Memory usage of the non-canonical overlay + pub non_canonical: MemorySize, + /// Memory usage of the pruning window. + pub pruning: Option, + /// Memory usage of the pinned blocks. + pub pinned: MemorySize, } /// Memory statistics for client instance. #[derive(Default, Clone, Debug)] pub struct MemoryInfo { - /// Size of state cache. - pub state_cache: MemorySize, - /// Size of backend database cache. - pub database_cache: MemorySize, - /// Size of the state db. - pub state_db: StateDbMemoryInfo, + /// Size of state cache. + pub state_cache: MemorySize, + /// Size of backend database cache. + pub database_cache: MemorySize, + /// Size of the state db. + pub state_db: StateDbMemoryInfo, } /// I/O statistics for client instance. #[derive(Default, Clone, Debug)] pub struct IoInfo { - /// Number of transactions. - pub transactions: u64, - /// Total bytes read from disk. - pub bytes_read: u64, - /// Total bytes written to disk. - pub bytes_written: u64, - /// Total key writes to disk. - pub writes: u64, - /// Total key reads from disk. - pub reads: u64, - /// Average size of the transaction. - pub average_transaction_size: u64, - /// State reads (keys) - pub state_reads: u64, - /// State reads (keys) from cache. - pub state_reads_cache: u64, - /// State reads (keys) - pub state_writes: u64, - /// State write (keys) already cached. - pub state_writes_cache: u64, - /// State write (trie nodes) to backend db. - pub state_writes_nodes: u64, + /// Number of transactions. + pub transactions: u64, + /// Total bytes read from disk. + pub bytes_read: u64, + /// Total bytes written to disk. + pub bytes_written: u64, + /// Total key writes to disk. + pub writes: u64, + /// Total key reads from disk. + pub reads: u64, + /// Average size of the transaction. + pub average_transaction_size: u64, + /// State reads (keys) + pub state_reads: u64, + /// State reads (keys) from cache. + pub state_reads_cache: u64, + /// State reads (keys) + pub state_writes: u64, + /// State write (keys) already cached. + pub state_writes_cache: u64, + /// State write (trie nodes) to backend db. + pub state_writes_nodes: u64, } /// Usage statistics for running client instance. @@ -194,55 +202,55 @@ pub struct IoInfo { /// gathering of the statistics. #[derive(Default, Clone, Debug)] pub struct UsageInfo { - /// Memory statistics. - pub memory: MemoryInfo, - /// I/O statistics. - pub io: IoInfo, + /// Memory statistics. + pub memory: MemoryInfo, + /// I/O statistics. + pub io: IoInfo, } impl fmt::Display for UsageInfo { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "caches: ({} state, {} db overlay), \ + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "caches: ({} state, {} db overlay), \ state db: ({} non-canonical, {} pruning, {} pinned), \ i/o: ({} tx, {} write, {} read, {} avg tx, {}/{} key cache reads/total, {} trie nodes writes)", - self.memory.state_cache, - self.memory.database_cache, - self.memory.state_db.non_canonical, - self.memory.state_db.pruning.unwrap_or_default(), - self.memory.state_db.pinned, - self.io.transactions, - self.io.bytes_written, - self.io.bytes_read, - self.io.average_transaction_size, - self.io.state_reads_cache, - self.io.state_reads, - self.io.state_writes_nodes, - ) - } + self.memory.state_cache, + self.memory.database_cache, + self.memory.state_db.non_canonical, + self.memory.state_db.pruning.unwrap_or_default(), + self.memory.state_db.pinned, + self.io.transactions, + self.io.bytes_written, + self.io.bytes_read, + self.io.average_transaction_size, + self.io.state_reads_cache, + self.io.state_reads, + self.io.state_writes_nodes, + ) + } } /// Summary of an imported block #[derive(Clone, Debug)] pub struct BlockImportNotification { - /// Imported block header hash. - pub hash: Block::Hash, - /// Imported block origin. - pub origin: BlockOrigin, - /// Imported block header. - pub header: Block::Header, - /// Is this the new best block. - pub is_new_best: bool, - /// List of retracted blocks ordered by block number. - pub retracted: Vec, + /// Imported block header hash. + pub hash: Block::Hash, + /// Imported block origin. + pub origin: BlockOrigin, + /// Imported block header. + pub header: Block::Header, + /// Is this the new best block. + pub is_new_best: bool, + /// List of retracted blocks ordered by block number. + pub retracted: Vec, } /// Summary of a finalized block. #[derive(Clone, Debug)] pub struct FinalityNotification { - /// Imported block header hash. - pub hash: Block::Hash, - /// Imported block header. - pub header: Block::Header, + /// Imported block header hash. + pub hash: Block::Hash, + /// Imported block header. + pub header: Block::Header, } diff --git a/client/api/src/execution_extensions.rs b/client/api/src/execution_extensions.rs index 10d33c20e6..8cf7131a90 100644 --- a/client/api/src/execution_extensions.rs +++ b/client/api/src/execution_extensions.rs @@ -20,58 +20,55 @@ //! strategy for the runtime calls and provide the right `Externalities` //! extensions to support APIs for particular execution context & capabilities. -use std::sync::{Weak, Arc}; use codec::Decode; +use parking_lot::RwLock; use sp_core::{ - ExecutionContext, - offchain::{self, OffchainExt, TransactionPoolExt}, - traits::{BareCryptoStorePtr, KeystoreExt}, -}; -use sp_runtime::{ - generic::BlockId, - traits, + offchain::{self, OffchainExt, TransactionPoolExt}, + traits::{BareCryptoStorePtr, KeystoreExt}, + ExecutionContext, }; -use sp_state_machine::{ExecutionStrategy, ExecutionManager, DefaultHandler}; use sp_externalities::Extensions; -use parking_lot::RwLock; +use sp_runtime::{generic::BlockId, traits}; +use sp_state_machine::{DefaultHandler, ExecutionManager, ExecutionStrategy}; +use std::sync::{Arc, Weak}; /// Execution strategies settings. #[derive(Debug, Clone)] pub struct ExecutionStrategies { - /// Execution strategy used when syncing. - pub syncing: ExecutionStrategy, - /// Execution strategy used when importing blocks. - pub importing: ExecutionStrategy, - /// Execution strategy used when constructing blocks. - pub block_construction: ExecutionStrategy, - /// Execution strategy used for offchain workers. - pub offchain_worker: ExecutionStrategy, - /// Execution strategy used in other cases. - pub other: ExecutionStrategy, + /// Execution strategy used when syncing. + pub syncing: ExecutionStrategy, + /// Execution strategy used when importing blocks. + pub importing: ExecutionStrategy, + /// Execution strategy used when constructing blocks. + pub block_construction: ExecutionStrategy, + /// Execution strategy used for offchain workers. + pub offchain_worker: ExecutionStrategy, + /// Execution strategy used in other cases. + pub other: ExecutionStrategy, } impl Default for ExecutionStrategies { - fn default() -> ExecutionStrategies { - ExecutionStrategies { - syncing: ExecutionStrategy::NativeElseWasm, - importing: ExecutionStrategy::NativeElseWasm, - block_construction: ExecutionStrategy::AlwaysWasm, - offchain_worker: ExecutionStrategy::NativeWhenPossible, - other: ExecutionStrategy::NativeElseWasm, - } - } + fn default() -> ExecutionStrategies { + ExecutionStrategies { + syncing: ExecutionStrategy::NativeElseWasm, + importing: ExecutionStrategy::NativeElseWasm, + block_construction: ExecutionStrategy::AlwaysWasm, + offchain_worker: ExecutionStrategy::NativeWhenPossible, + other: ExecutionStrategy::NativeElseWasm, + } + } } /// Generate the starting set of ExternalitiesExtensions based upon the given capabilities pub trait ExtensionsFactory: Send + Sync { - /// Make `Extensions` for given `Capabilities`. - fn extensions_for(&self, capabilities: offchain::Capabilities) -> Extensions; + /// Make `Extensions` for given `Capabilities`. + fn extensions_for(&self, capabilities: offchain::Capabilities) -> Extensions; } impl ExtensionsFactory for () { - fn extensions_for(&self, _capabilities: offchain::Capabilities) -> Extensions { - Extensions::new() - } + fn extensions_for(&self, _capabilities: offchain::Capabilities) -> Extensions { + Extensions::new() + } } /// A producer of execution extensions for offchain calls. @@ -80,126 +77,132 @@ impl ExtensionsFactory for () { /// and is responsible for producing a correct `Extensions` object. /// for each call, based on required `Capabilities`. pub struct ExecutionExtensions { - strategies: ExecutionStrategies, - keystore: Option, - // FIXME: these two are only RwLock because of https://github.com/paritytech/substrate/issues/4587 - // remove when fixed. - transaction_pool: RwLock>>>, - extensions_factory: RwLock>, + strategies: ExecutionStrategies, + keystore: Option, + // FIXME: these two are only RwLock because of https://github.com/paritytech/substrate/issues/4587 + // remove when fixed. + transaction_pool: + RwLock>>>, + extensions_factory: RwLock>, } impl Default for ExecutionExtensions { - fn default() -> Self { - Self { - strategies: Default::default(), - keystore: None, - transaction_pool: RwLock::new(None), - extensions_factory: RwLock::new(Box::new(())), - } - } + fn default() -> Self { + Self { + strategies: Default::default(), + keystore: None, + transaction_pool: RwLock::new(None), + extensions_factory: RwLock::new(Box::new(())), + } + } } impl ExecutionExtensions { - /// Create new `ExecutionExtensions` given a `keystore` and `ExecutionStrategies`. - pub fn new( - strategies: ExecutionStrategies, - keystore: Option, - ) -> Self { - let transaction_pool = RwLock::new(None); - let extensions_factory = Box::new(()); - Self { strategies, keystore, extensions_factory: RwLock::new(extensions_factory), transaction_pool } - } - - /// Get a reference to the execution strategies. - pub fn strategies(&self) -> &ExecutionStrategies { - &self.strategies - } - - /// Set the new extensions_factory - pub fn set_extensions_factory(&self, maker: Box) { - *self.extensions_factory.write() = maker; - } - - /// Register transaction pool extension. - /// - /// To break retain cycle between `Client` and `TransactionPool` we require this - /// extension to be a `Weak` reference. - /// That's also the reason why it's being registered lazily instead of - /// during initialization. - pub fn register_transaction_pool(&self, pool: Weak>) { - *self.transaction_pool.write() = Some(pool); - } - - /// Create `ExecutionManager` and `Extensions` for given offchain call. - /// - /// Based on the execution context and capabilities it produces - /// the right manager and extensions object to support desired set of APIs. - pub fn manager_and_extensions( - &self, - at: &BlockId, - context: ExecutionContext, - ) -> ( - ExecutionManager>, - Extensions, - ) { - let manager = match context { - ExecutionContext::BlockConstruction => - self.strategies.block_construction.get_manager(), - ExecutionContext::Syncing => - self.strategies.syncing.get_manager(), - ExecutionContext::Importing => - self.strategies.importing.get_manager(), - ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => - self.strategies.offchain_worker.get_manager(), - ExecutionContext::OffchainCall(_) => - self.strategies.other.get_manager(), - }; - - let capabilities = context.capabilities(); - - let mut extensions = self.extensions_factory.read().extensions_for(capabilities); - - if capabilities.has(offchain::Capability::Keystore) { - if let Some(keystore) = self.keystore.as_ref() { - extensions.register(KeystoreExt(keystore.clone())); - } - } - - if capabilities.has(offchain::Capability::TransactionPool) { - if let Some(pool) = self.transaction_pool.read().as_ref().and_then(|x| x.upgrade()) { - extensions.register(TransactionPoolExt(Box::new(TransactionPoolAdapter { - at: *at, - pool, - }) as _)); - } - } - - if let ExecutionContext::OffchainCall(Some(ext)) = context { - extensions.register( - OffchainExt::new(offchain::LimitedExternalities::new(capabilities, ext.0)) - ) - } - - (manager, extensions) - } + /// Create new `ExecutionExtensions` given a `keystore` and `ExecutionStrategies`. + pub fn new(strategies: ExecutionStrategies, keystore: Option) -> Self { + let transaction_pool = RwLock::new(None); + let extensions_factory = Box::new(()); + Self { + strategies, + keystore, + extensions_factory: RwLock::new(extensions_factory), + transaction_pool, + } + } + + /// Get a reference to the execution strategies. + pub fn strategies(&self) -> &ExecutionStrategies { + &self.strategies + } + + /// Set the new extensions_factory + pub fn set_extensions_factory(&self, maker: Box) { + *self.extensions_factory.write() = maker; + } + + /// Register transaction pool extension. + /// + /// To break retain cycle between `Client` and `TransactionPool` we require this + /// extension to be a `Weak` reference. + /// That's also the reason why it's being registered lazily instead of + /// during initialization. + pub fn register_transaction_pool( + &self, + pool: Weak>, + ) { + *self.transaction_pool.write() = Some(pool); + } + + /// Create `ExecutionManager` and `Extensions` for given offchain call. + /// + /// Based on the execution context and capabilities it produces + /// the right manager and extensions object to support desired set of APIs. + pub fn manager_and_extensions( + &self, + at: &BlockId, + context: ExecutionContext, + ) -> (ExecutionManager>, Extensions) { + let manager = match context { + ExecutionContext::BlockConstruction => self.strategies.block_construction.get_manager(), + ExecutionContext::Syncing => self.strategies.syncing.get_manager(), + ExecutionContext::Importing => self.strategies.importing.get_manager(), + ExecutionContext::OffchainCall(Some((_, capabilities))) if capabilities.has_all() => { + self.strategies.offchain_worker.get_manager() + } + ExecutionContext::OffchainCall(_) => self.strategies.other.get_manager(), + }; + + let capabilities = context.capabilities(); + + let mut extensions = self.extensions_factory.read().extensions_for(capabilities); + + if capabilities.has(offchain::Capability::Keystore) { + if let Some(keystore) = self.keystore.as_ref() { + extensions.register(KeystoreExt(keystore.clone())); + } + } + + if capabilities.has(offchain::Capability::TransactionPool) { + if let Some(pool) = self + .transaction_pool + .read() + .as_ref() + .and_then(|x| x.upgrade()) + { + extensions + .register(TransactionPoolExt( + Box::new(TransactionPoolAdapter { at: *at, pool }) as _, + )); + } + } + + if let ExecutionContext::OffchainCall(Some(ext)) = context { + extensions.register(OffchainExt::new(offchain::LimitedExternalities::new( + capabilities, + ext.0, + ))) + } + + (manager, extensions) + } } /// A wrapper type to pass `BlockId` to the actual transaction pool. struct TransactionPoolAdapter { - at: BlockId, - pool: Arc>, + at: BlockId, + pool: Arc>, } impl offchain::TransactionPool for TransactionPoolAdapter { - fn submit_transaction(&mut self, data: Vec) -> Result<(), ()> { - let xt = match Block::Extrinsic::decode(&mut &*data) { - Ok(xt) => xt, - Err(e) => { - log::warn!("Unable to decode extrinsic: {:?}: {}", data, e.what()); - return Err(()); - }, - }; - - self.pool.submit_at(&self.at, xt) - } + fn submit_transaction(&mut self, data: Vec) -> Result<(), ()> { + let xt = match Block::Extrinsic::decode(&mut &*data) { + Ok(xt) => xt, + Err(e) => { + log::warn!("Unable to decode extrinsic: {:?}: {}", data, e.what()); + return Err(()); + } + }; + + self.pool.submit_at(&self.at, xt) + } } diff --git a/client/api/src/lib.rs b/client/api/src/lib.rs index e4080323c1..4567c75f80 100644 --- a/client/api/src/lib.rs +++ b/client/api/src/lib.rs @@ -25,55 +25,60 @@ pub mod light; pub mod notifications; pub mod proof_provider; -pub use sp_blockchain as blockchain; pub use backend::*; -pub use notifications::*; pub use call_executor::*; pub use client::*; pub use light::*; pub use notifications::*; +pub use notifications::*; pub use proof_provider::*; +pub use sp_blockchain as blockchain; -pub use sp_state_machine::{StorageProof, ExecutionStrategy, CloneableSpawn}; +pub use sp_state_machine::{CloneableSpawn, ExecutionStrategy, StorageProof}; /// Utility methods for the client. pub mod utils { - use sp_blockchain::{HeaderBackend, HeaderMetadata, Error}; - use sp_runtime::traits::Block as BlockT; - use std::borrow::Borrow; + use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; + use sp_runtime::traits::Block as BlockT; + use std::borrow::Borrow; - /// Returns a function for checking block ancestry, the returned function will - /// return `true` if the given hash (second parameter) is a descendent of the - /// base (first parameter). If the `current` parameter is defined, it should - /// represent the current block `hash` and its `parent hash`, if given the - /// function that's returned will assume that `hash` isn't part of the local DB - /// yet, and all searches in the DB will instead reference the parent. - pub fn is_descendent_of<'a, Block: BlockT, T>( - client: &'a T, - current: Option<(Block::Hash, Block::Hash)>, - ) -> impl Fn(&Block::Hash, &Block::Hash) -> Result + 'a - where T: HeaderBackend + HeaderMetadata, - { - move |base, hash| { - if base == hash { return Ok(false); } + /// Returns a function for checking block ancestry, the returned function will + /// return `true` if the given hash (second parameter) is a descendent of the + /// base (first parameter). If the `current` parameter is defined, it should + /// represent the current block `hash` and its `parent hash`, if given the + /// function that's returned will assume that `hash` isn't part of the local DB + /// yet, and all searches in the DB will instead reference the parent. + pub fn is_descendent_of<'a, Block: BlockT, T>( + client: &'a T, + current: Option<(Block::Hash, Block::Hash)>, + ) -> impl Fn(&Block::Hash, &Block::Hash) -> Result + 'a + where + T: HeaderBackend + HeaderMetadata, + { + move |base, hash| { + if base == hash { + return Ok(false); + } - let current = current.as_ref().map(|(c, p)| (c.borrow(), p.borrow())); + let current = current.as_ref().map(|(c, p)| (c.borrow(), p.borrow())); - let mut hash = hash; - if let Some((current_hash, current_parent_hash)) = current { - if base == current_hash { return Ok(false); } - if hash == current_hash { - if base == current_parent_hash { - return Ok(true); - } else { - hash = current_parent_hash; - } - } - } + let mut hash = hash; + if let Some((current_hash, current_parent_hash)) = current { + if base == current_hash { + return Ok(false); + } + if hash == current_hash { + if base == current_parent_hash { + return Ok(true); + } else { + hash = current_parent_hash; + } + } + } - let ancestor = sp_blockchain::lowest_common_ancestor(client, *hash, *base)?; + let ancestor = sp_blockchain::lowest_common_ancestor(client, *hash, *base)?; - Ok(ancestor.hash == *base) - } - } + Ok(ancestor.hash == *base) + } + } } diff --git a/client/api/src/light.rs b/client/api/src/light.rs index c0bebc1740..1e5b6a9b6f 100644 --- a/client/api/src/light.rs +++ b/client/api/src/light.rs @@ -16,179 +16,180 @@ //! Substrate light client interfaces -use std::sync::Arc; use std::collections::{BTreeMap, HashMap}; use std::future::Future; +use std::sync::Arc; -use sp_runtime::{ - traits::{ - Block as BlockT, Header as HeaderT, NumberFor, - }, - generic::BlockId +use crate::{ + backend::{AuxStore, NewBlockState}, + UsageInfo, }; -use sp_core::ChangesTrieConfigurationRange; -use sp_state_machine::StorageProof; use sp_blockchain::{ - HeaderMetadata, well_known_cache_keys, HeaderBackend, Cache as BlockchainCache, - Error as ClientError, Result as ClientResult, + well_known_cache_keys, Cache as BlockchainCache, Error as ClientError, HeaderBackend, + HeaderMetadata, Result as ClientResult, +}; +use sp_core::ChangesTrieConfigurationRange; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor}, }; -use crate::{backend::{AuxStore, NewBlockState}, UsageInfo}; +use sp_state_machine::StorageProof; /// Remote call request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct RemoteCallRequest { - /// Call at state of given block. - pub block: Header::Hash, - /// Header of block at which call is performed. - pub header: Header, - /// Method to call. - pub method: String, - /// Call data. - pub call_data: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// Call at state of given block. + pub block: Header::Hash, + /// Header of block at which call is performed. + pub header: Header, + /// Method to call. + pub method: String, + /// Call data. + pub call_data: Vec, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Remote canonical header request. #[derive(Clone, Debug, Default, PartialEq, Eq, Hash)] pub struct RemoteHeaderRequest { - /// The root of CHT this block is included in. - pub cht_root: Header::Hash, - /// Number of the header to query. - pub block: Header::Number, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// The root of CHT this block is included in. + pub cht_root: Header::Hash, + /// Number of the header to query. + pub block: Header::Number, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Remote storage read request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct RemoteReadRequest { - /// Read at state of given block. - pub block: Header::Hash, - /// Header of block at which read is performed. - pub header: Header, - /// Storage key to read. - pub keys: Vec>, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// Read at state of given block. + pub block: Header::Hash, + /// Header of block at which read is performed. + pub header: Header, + /// Storage key to read. + pub keys: Vec>, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Remote storage read child request. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct RemoteReadChildRequest { - /// Read at state of given block. - pub block: Header::Hash, - /// Header of block at which read is performed. - pub header: Header, - /// Storage key for child. - pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, - /// Child storage key to read. - pub keys: Vec>, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// Read at state of given block. + pub block: Header::Hash, + /// Header of block at which read is performed. + pub header: Header, + /// Storage key for child. + pub storage_key: Vec, + /// Child trie source information. + pub child_info: Vec, + /// Child type, its required to resolve `child_info` + /// content and choose child implementation. + pub child_type: u32, + /// Child storage key to read. + pub keys: Vec>, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Remote key changes read request. #[derive(Clone, Debug, PartialEq, Eq)] pub struct RemoteChangesRequest { - /// All changes trie configurations that are valid within [first_block; last_block]. - pub changes_trie_configs: Vec>, - /// Query changes from range of blocks, starting (and including) with this hash... - pub first_block: (Header::Number, Header::Hash), - /// ...ending (and including) with this hash. Should come after first_block and - /// be the part of the same fork. - pub last_block: (Header::Number, Header::Hash), - /// Only use digests from blocks up to this hash. Should be last_block OR come - /// after this block and be the part of the same fork. - pub max_block: (Header::Number, Header::Hash), - /// Known changes trie roots for the range of blocks [tries_roots.0..max_block]. - /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. - pub tries_roots: (Header::Number, Header::Hash, Vec), - /// Optional Child Storage key to read. - pub storage_key: Option>, - /// Storage key to read. - pub key: Vec, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// All changes trie configurations that are valid within [first_block; last_block]. + pub changes_trie_configs: Vec>, + /// Query changes from range of blocks, starting (and including) with this hash... + pub first_block: (Header::Number, Header::Hash), + /// ...ending (and including) with this hash. Should come after first_block and + /// be the part of the same fork. + pub last_block: (Header::Number, Header::Hash), + /// Only use digests from blocks up to this hash. Should be last_block OR come + /// after this block and be the part of the same fork. + pub max_block: (Header::Number, Header::Hash), + /// Known changes trie roots for the range of blocks [tries_roots.0..max_block]. + /// Proofs for roots of ascendants of tries_roots.0 are provided by the remote node. + pub tries_roots: (Header::Number, Header::Hash, Vec), + /// Optional Child Storage key to read. + pub storage_key: Option>, + /// Storage key to read. + pub key: Vec, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Key changes read proof. #[derive(Debug, PartialEq, Eq)] pub struct ChangesProof { - /// Max block that has been used in changes query. - pub max_block: Header::Number, - /// All touched nodes of all changes tries. - pub proof: Vec>, - /// All changes tries roots that have been touched AND are missing from - /// the requester' node. It is a map of block number => changes trie root. - pub roots: BTreeMap, - /// The proofs for all changes tries roots that have been touched AND are - /// missing from the requester' node. It is a map of CHT number => proof. - pub roots_proof: StorageProof, + /// Max block that has been used in changes query. + pub max_block: Header::Number, + /// All touched nodes of all changes tries. + pub proof: Vec>, + /// All changes tries roots that have been touched AND are missing from + /// the requester' node. It is a map of block number => changes trie root. + pub roots: BTreeMap, + /// The proofs for all changes tries roots that have been touched AND are + /// missing from the requester' node. It is a map of CHT number => proof. + pub roots_proof: StorageProof, } /// Remote block body request #[derive(Clone, Default, Debug, PartialEq, Eq, Hash)] pub struct RemoteBodyRequest { - /// Header of the requested block body - pub header: Header, - /// Number of times to retry request. None means that default RETRY_COUNT is used. - pub retry_count: Option, + /// Header of the requested block body + pub header: Header, + /// Number of times to retry request. None means that default RETRY_COUNT is used. + pub retry_count: Option, } /// Light client data fetcher. Implementations of this trait must check if remote data /// is correct (see FetchedDataChecker) and return already checked data. pub trait Fetcher: Send + Sync { - /// Remote header future. - type RemoteHeaderResult: Future> + Unpin + Send + 'static; - /// Remote storage read future. - type RemoteReadResult: Future, Option>>, - ClientError, - >> + Unpin + Send + 'static; - /// Remote call result future. - type RemoteCallResult: Future, - ClientError, - >> + Unpin + Send + 'static; - /// Remote changes result future. - type RemoteChangesResult: Future, u32)>, - ClientError, - >> + Unpin + Send + 'static; - /// Remote block body result future. - type RemoteBodyResult: Future, - ClientError, - >> + Unpin + Send + 'static; - - /// Fetch remote header. - fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult; - /// Fetch remote storage value. - fn remote_read( - &self, - request: RemoteReadRequest - ) -> Self::RemoteReadResult; - /// Fetch remote storage child value. - fn remote_read_child( - &self, - request: RemoteReadChildRequest - ) -> Self::RemoteReadResult; - /// Fetch remote call result. - fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; - /// Fetch remote changes ((block number, extrinsic index)) where given key has been changed - /// at a given blocks range. - fn remote_changes(&self, request: RemoteChangesRequest) -> Self::RemoteChangesResult; - /// Fetch remote block body - fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult; + /// Remote header future. + type RemoteHeaderResult: Future> + + Unpin + + Send + + 'static; + /// Remote storage read future. + type RemoteReadResult: Future, Option>>, ClientError>> + + Unpin + + Send + + 'static; + /// Remote call result future. + type RemoteCallResult: Future, ClientError>> + Unpin + Send + 'static; + /// Remote changes result future. + type RemoteChangesResult: Future, u32)>, ClientError>> + + Unpin + + Send + + 'static; + /// Remote block body result future. + type RemoteBodyResult: Future, ClientError>> + + Unpin + + Send + + 'static; + + /// Fetch remote header. + fn remote_header( + &self, + request: RemoteHeaderRequest, + ) -> Self::RemoteHeaderResult; + /// Fetch remote storage value. + fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult; + /// Fetch remote storage child value. + fn remote_read_child( + &self, + request: RemoteReadChildRequest, + ) -> Self::RemoteReadResult; + /// Fetch remote call result. + fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult; + /// Fetch remote changes ((block number, extrinsic index)) where given key has been changed + /// at a given blocks range. + fn remote_changes( + &self, + request: RemoteChangesRequest, + ) -> Self::RemoteChangesResult; + /// Fetch remote block body + fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult; } /// Light client remote data checker. @@ -196,159 +197,164 @@ pub trait Fetcher: Send + Sync { /// Implementations of this trait should not use any prunable blockchain data /// except that is passed to its methods. pub trait FetchChecker: Send + Sync { - /// Check remote header proof. - fn check_header_proof( - &self, - request: &RemoteHeaderRequest, - header: Option, - remote_proof: StorageProof, - ) -> ClientResult; - /// Check remote storage read proof. - fn check_read_proof( - &self, - request: &RemoteReadRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>>; - /// Check remote storage read proof. - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>>; - /// Check remote method execution proof. - fn check_execution_proof( - &self, - request: &RemoteCallRequest, - remote_proof: StorageProof, - ) -> ClientResult>; - /// Check remote changes query proof. - fn check_changes_proof( - &self, - request: &RemoteChangesRequest, - proof: ChangesProof - ) -> ClientResult, u32)>>; - /// Check remote body proof. - fn check_body_proof( - &self, - request: &RemoteBodyRequest, - body: Vec - ) -> ClientResult>; + /// Check remote header proof. + fn check_header_proof( + &self, + request: &RemoteHeaderRequest, + header: Option, + remote_proof: StorageProof, + ) -> ClientResult; + /// Check remote storage read proof. + fn check_read_proof( + &self, + request: &RemoteReadRequest, + remote_proof: StorageProof, + ) -> ClientResult, Option>>>; + /// Check remote storage read proof. + fn check_read_child_proof( + &self, + request: &RemoteReadChildRequest, + remote_proof: StorageProof, + ) -> ClientResult, Option>>>; + /// Check remote method execution proof. + fn check_execution_proof( + &self, + request: &RemoteCallRequest, + remote_proof: StorageProof, + ) -> ClientResult>; + /// Check remote changes query proof. + fn check_changes_proof( + &self, + request: &RemoteChangesRequest, + proof: ChangesProof, + ) -> ClientResult, u32)>>; + /// Check remote body proof. + fn check_body_proof( + &self, + request: &RemoteBodyRequest, + body: Vec, + ) -> ClientResult>; } - /// Light client blockchain storage. -pub trait Storage: AuxStore + HeaderBackend + HeaderMetadata { - /// Store new header. Should refuse to revert any finalized blocks. - /// - /// Takes new authorities, the leaf state of the new block, and - /// any auxiliary storage updates to place in the same operation. - fn import_header( - &self, - header: Block::Header, - cache: HashMap>, - state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()>; - - /// Set an existing block as new best block. - fn set_head(&self, block: BlockId) -> ClientResult<()>; - - /// Mark historic header as finalized. - fn finalize_header(&self, block: BlockId) -> ClientResult<()>; - - /// Get last finalized header. - fn last_finalized(&self) -> ClientResult; - - /// Get headers CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult>; - - /// Get changes trie CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult>; - - /// Get storage cache. - fn cache(&self) -> Option>>; - - /// Get storage usage statistics. - fn usage_info(&self) -> Option; +pub trait Storage: + AuxStore + HeaderBackend + HeaderMetadata +{ + /// Store new header. Should refuse to revert any finalized blocks. + /// + /// Takes new authorities, the leaf state of the new block, and + /// any auxiliary storage updates to place in the same operation. + fn import_header( + &self, + header: Block::Header, + cache: HashMap>, + state: NewBlockState, + aux_ops: Vec<(Vec, Option>)>, + ) -> ClientResult<()>; + + /// Set an existing block as new best block. + fn set_head(&self, block: BlockId) -> ClientResult<()>; + + /// Mark historic header as finalized. + fn finalize_header(&self, block: BlockId) -> ClientResult<()>; + + /// Get last finalized header. + fn last_finalized(&self) -> ClientResult; + + /// Get headers CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult>; + + /// Get changes trie CHT root for given block. Returns None if the block is not pruned (not a part of any CHT). + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult>; + + /// Get storage cache. + fn cache(&self) -> Option>>; + + /// Get storage usage statistics. + fn usage_info(&self) -> Option; } /// Remote header. #[derive(Debug)] pub enum LocalOrRemote { - /// When data is available locally, it is returned. - Local(Data), - /// When data is unavailable locally, the request to fetch it from remote node is returned. - Remote(Request), - /// When data is unknown. - Unknown, + /// When data is available locally, it is returned. + Local(Data), + /// When data is unavailable locally, the request to fetch it from remote node is returned. + Remote(Request), + /// When data is unknown. + Unknown, } /// Futures-based blockchain backend that either resolves blockchain data /// locally, or fetches required data from remote node. pub trait RemoteBlockchain: Send + Sync { - /// Get block header. - fn header(&self, id: BlockId) -> ClientResult, - >>; + /// Get block header. + fn header( + &self, + id: BlockId, + ) -> ClientResult>>; } - - #[cfg(test)] pub mod tests { - use futures::future::Ready; - use parking_lot::Mutex; - use sp_blockchain::Error as ClientError; - use sp_test_primitives::{Block, Header, Extrinsic}; - use super::*; - - pub type OkCallFetcher = Mutex>; - - fn not_implemented_in_tests() -> Ready> - where - E: std::convert::From<&'static str>, - { - futures::future::ready(Err("Not implemented on test node".into())) - } - - impl Fetcher for OkCallFetcher { - type RemoteHeaderResult = Ready>; - type RemoteReadResult = Ready, Option>>, ClientError>>; - type RemoteCallResult = Ready, ClientError>>; - type RemoteChangesResult = Ready, u32)>, ClientError>>; - type RemoteBodyResult = Ready, ClientError>>; - - fn remote_header(&self, _request: RemoteHeaderRequest

) -> Self::RemoteHeaderResult { - not_implemented_in_tests() - } - - fn remote_read(&self, _request: RemoteReadRequest
) -> Self::RemoteReadResult { - not_implemented_in_tests() - } - - fn remote_read_child(&self, _request: RemoteReadChildRequest
) -> Self::RemoteReadResult { - not_implemented_in_tests() - } - - fn remote_call(&self, _request: RemoteCallRequest
) -> Self::RemoteCallResult { - futures::future::ready(Ok((*self.lock()).clone())) - } - - fn remote_changes(&self, _request: RemoteChangesRequest
) -> Self::RemoteChangesResult { - not_implemented_in_tests() - } - - fn remote_body(&self, _request: RemoteBodyRequest
) -> Self::RemoteBodyResult { - not_implemented_in_tests() - } - } + use super::*; + use futures::future::Ready; + use parking_lot::Mutex; + use sp_blockchain::Error as ClientError; + use sp_test_primitives::{Block, Extrinsic, Header}; + + pub type OkCallFetcher = Mutex>; + + fn not_implemented_in_tests() -> Ready> + where + E: std::convert::From<&'static str>, + { + futures::future::ready(Err("Not implemented on test node".into())) + } + + impl Fetcher for OkCallFetcher { + type RemoteHeaderResult = Ready>; + type RemoteReadResult = Ready, Option>>, ClientError>>; + type RemoteCallResult = Ready, ClientError>>; + type RemoteChangesResult = Ready, u32)>, ClientError>>; + type RemoteBodyResult = Ready, ClientError>>; + + fn remote_header(&self, _request: RemoteHeaderRequest
) -> Self::RemoteHeaderResult { + not_implemented_in_tests() + } + + fn remote_read(&self, _request: RemoteReadRequest
) -> Self::RemoteReadResult { + not_implemented_in_tests() + } + + fn remote_read_child( + &self, + _request: RemoteReadChildRequest
, + ) -> Self::RemoteReadResult { + not_implemented_in_tests() + } + + fn remote_call(&self, _request: RemoteCallRequest
) -> Self::RemoteCallResult { + futures::future::ready(Ok((*self.lock()).clone())) + } + + fn remote_changes( + &self, + _request: RemoteChangesRequest
, + ) -> Self::RemoteChangesResult { + not_implemented_in_tests() + } + + fn remote_body(&self, _request: RemoteBodyRequest
) -> Self::RemoteBodyResult { + not_implemented_in_tests() + } + } } diff --git a/client/api/src/notifications.rs b/client/api/src/notifications.rs index f154eade44..56879b56b3 100644 --- a/client/api/src/notifications.rs +++ b/client/api/src/notifications.rs @@ -17,53 +17,68 @@ //! Storage notifications use std::{ - collections::{HashSet, HashMap}, - sync::Arc, + collections::{HashMap, HashSet}, + sync::Arc, }; -use fnv::{FnvHashSet, FnvHashMap}; -use sp_core::storage::{StorageKey, StorageData}; +use fnv::{FnvHashMap, FnvHashSet}; +use sp_core::storage::{StorageData, StorageKey}; use sp_runtime::traits::Block as BlockT; -use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; /// Storage change set #[derive(Debug)] pub struct StorageChangeSet { - changes: Arc)>>, - child_changes: Arc)>)>>, - filter: Option>, - child_filters: Option>>>, + changes: Arc)>>, + child_changes: Arc)>)>>, + filter: Option>, + child_filters: Option>>>, } impl StorageChangeSet { - /// Convert the change set into iterator over storage items. - pub fn iter<'a>(&'a self) - -> impl Iterator, &'a StorageKey, Option<&'a StorageData>)> + 'a { - let top = self.changes - .iter() - .filter(move |&(key, _)| match self.filter { - Some(ref filter) => filter.contains(key), - None => true, - }) - .map(move |(k,v)| (None, k, v.as_ref())); - let children = self.child_changes - .iter() - .filter_map(move |(sk, changes)| { - if let Some(cf) = self.child_filters.as_ref() { - if let Some(filter) = cf.get(sk) { - Some(changes - .iter() - .filter(move |&(key, _)| match filter { - Some(ref filter) => filter.contains(key), - None => true, - }) - .map(move |(k,v)| (Some(sk), k, v.as_ref()))) - } else { None } - } else { None } - }) - .flatten(); - top.chain(children) - } + /// Convert the change set into iterator over storage items. + pub fn iter<'a>( + &'a self, + ) -> impl Iterator< + Item = ( + Option<&'a StorageKey>, + &'a StorageKey, + Option<&'a StorageData>, + ), + > + 'a { + let top = self + .changes + .iter() + .filter(move |&(key, _)| match self.filter { + Some(ref filter) => filter.contains(key), + None => true, + }) + .map(move |(k, v)| (None, k, v.as_ref())); + let children = self + .child_changes + .iter() + .filter_map(move |(sk, changes)| { + if let Some(cf) = self.child_filters.as_ref() { + if let Some(filter) = cf.get(sk) { + Some( + changes + .iter() + .filter(move |&(key, _)| match filter { + Some(ref filter) => filter.contains(key), + None => true, + }) + .map(move |(k, v)| (Some(sk), k, v.as_ref())), + ) + } else { + None + } + } else { + None + } + }) + .flatten(); + top.chain(children) + } } /// Type that implements `futures::Stream` of storage change events. @@ -74,407 +89,447 @@ type SubscriberId = u64; /// Manages storage listeners. #[derive(Debug)] pub struct StorageNotifications { - next_id: SubscriberId, - wildcard_listeners: FnvHashSet, - listeners: HashMap>, - child_listeners: HashMap>, - FnvHashSet - )>, - sinks: FnvHashMap, - Option>, - Option>>>, - )>, + next_id: SubscriberId, + wildcard_listeners: FnvHashSet, + listeners: HashMap>, + child_listeners: HashMap< + StorageKey, + ( + HashMap>, + FnvHashSet, + ), + >, + sinks: FnvHashMap< + SubscriberId, + ( + TracingUnboundedSender<(Block::Hash, StorageChangeSet)>, + Option>, + Option>>>, + ), + >, } impl Default for StorageNotifications { - fn default() -> Self { - StorageNotifications { - next_id: Default::default(), - wildcard_listeners: Default::default(), - listeners: Default::default(), - child_listeners: Default::default(), - sinks: Default::default(), - } - } + fn default() -> Self { + StorageNotifications { + next_id: Default::default(), + wildcard_listeners: Default::default(), + listeners: Default::default(), + child_listeners: Default::default(), + sinks: Default::default(), + } + } } impl StorageNotifications { - /// Trigger notification to all listeners. - /// - /// Note the changes are going to be filtered by listener's filter key. - /// In fact no event might be sent if clients are not interested in the changes. - pub fn trigger( - &mut self, - hash: &Block::Hash, - changeset: impl Iterator, Option>)>, - child_changeset: impl Iterator< - Item=(Vec, impl Iterator, Option>)>) - >, - ) { - let has_wildcard = !self.wildcard_listeners.is_empty(); - - // early exit if no listeners - if !has_wildcard && self.listeners.is_empty() && self.child_listeners.is_empty() { - return; - } - - let mut subscribers = self.wildcard_listeners.clone(); - let mut changes = Vec::new(); - let mut child_changes = Vec::new(); - - // Collect subscribers and changes - for (k, v) in changeset { - let k = StorageKey(k); - let listeners = self.listeners.get(&k); - - if let Some(ref listeners) = listeners { - subscribers.extend(listeners.iter()); - } - - if has_wildcard || listeners.is_some() { - changes.push((k, v.map(StorageData))); - } - } - for (sk, changeset) in child_changeset { - let sk = StorageKey(sk); - if let Some((cl, cw)) = self.child_listeners.get(&sk) { - let mut changes = Vec::new(); - for (k, v) in changeset { - let k = StorageKey(k); - let listeners = cl.get(&k); - - if let Some(ref listeners) = listeners { - subscribers.extend(listeners.iter()); - } - - subscribers.extend(cw.iter()); - - if !cw.is_empty() || listeners.is_some() { - changes.push((k, v.map(StorageData))); - } - } - if !changes.is_empty() { - child_changes.push((sk, changes)); - } - } - } - - // Don't send empty notifications - if changes.is_empty() && child_changes.is_empty() { - return; - } - - let changes = Arc::new(changes); - let child_changes = Arc::new(child_changes); - // Trigger the events - for subscriber in subscribers { - let should_remove = { - let &(ref sink, ref filter, ref child_filters) = self.sinks.get(&subscriber) - .expect("subscribers returned from self.listeners are always in self.sinks; qed"); - sink.unbounded_send((hash.clone(), StorageChangeSet { - changes: changes.clone(), - child_changes: child_changes.clone(), - filter: filter.clone(), - child_filters: child_filters.clone(), - })).is_err() - }; - - if should_remove { - self.remove_subscriber(subscriber); - } - } - } - - fn remove_subscriber_from( - subscriber: &SubscriberId, - filters: &Option>, - listeners: &mut HashMap>, - wildcards: &mut FnvHashSet, - ){ - match filters { - None => { - wildcards.remove(subscriber); - }, - Some(filters) => { - - for key in filters.iter() { - let remove_key = match listeners.get_mut(key) { - Some(ref mut set) => { - set.remove(subscriber); - set.is_empty() - }, - None => false, - }; - - if remove_key { - listeners.remove(key); - } - } - } - } - } - - fn remove_subscriber(&mut self, subscriber: SubscriberId) { - if let Some((_, filters, child_filters)) = self.sinks.remove(&subscriber) { - Self::remove_subscriber_from( - &subscriber, - &filters, - &mut self.listeners, - &mut self.wildcard_listeners, - ); - if let Some(child_filters) = child_filters.as_ref() { - for (c_key, filters) in child_filters { - - if let Some((listeners, wildcards)) = self.child_listeners.get_mut(&c_key) { - Self::remove_subscriber_from( - &subscriber, - &filters, - &mut *listeners, - &mut *wildcards, - ); - - if listeners.is_empty() && wildcards.is_empty() { - self.child_listeners.remove(&c_key); - } - } - } - } - } - } - - fn listen_from( - current_id: SubscriberId, - filter_keys: &Option>, - listeners: &mut HashMap>, - wildcards: &mut FnvHashSet, - ) -> Option> - { - match filter_keys { - None => { - wildcards.insert(current_id); - None - }, - Some(keys) => Some(keys.as_ref().iter().map(|key| { - listeners - .entry(key.clone()) - .or_insert_with(Default::default) - .insert(current_id); - key.clone() - }).collect()) - } - } - - /// Start listening for particular storage keys. - pub fn listen( - &mut self, - filter_keys: Option<&[StorageKey]>, - filter_child_keys: Option<&[(StorageKey, Option>)]>, - ) -> StorageEventStream { - self.next_id += 1; - let current_id = self.next_id; - - // add subscriber for every key - let keys = Self::listen_from( - current_id, - &filter_keys, - &mut self.listeners, - &mut self.wildcard_listeners, - ); - let child_keys = filter_child_keys.map(|filter_child_keys| { - filter_child_keys.iter().map(|(c_key, o_keys)| { - let (c_listeners, c_wildcards) = self.child_listeners - .entry(c_key.clone()) - .or_insert_with(Default::default); - - (c_key.clone(), Self::listen_from( - current_id, - o_keys, - &mut *c_listeners, - &mut *c_wildcards, - )) - }).collect() - }); - - - // insert sink - let (tx, rx) = tracing_unbounded("mpsc_storage_notification_items"); - self.sinks.insert(current_id, (tx, keys, child_keys)); - rx - } + /// Trigger notification to all listeners. + /// + /// Note the changes are going to be filtered by listener's filter key. + /// In fact no event might be sent if clients are not interested in the changes. + pub fn trigger( + &mut self, + hash: &Block::Hash, + changeset: impl Iterator, Option>)>, + child_changeset: impl Iterator< + Item = (Vec, impl Iterator, Option>)>), + >, + ) { + let has_wildcard = !self.wildcard_listeners.is_empty(); + + // early exit if no listeners + if !has_wildcard && self.listeners.is_empty() && self.child_listeners.is_empty() { + return; + } + + let mut subscribers = self.wildcard_listeners.clone(); + let mut changes = Vec::new(); + let mut child_changes = Vec::new(); + + // Collect subscribers and changes + for (k, v) in changeset { + let k = StorageKey(k); + let listeners = self.listeners.get(&k); + + if let Some(ref listeners) = listeners { + subscribers.extend(listeners.iter()); + } + + if has_wildcard || listeners.is_some() { + changes.push((k, v.map(StorageData))); + } + } + for (sk, changeset) in child_changeset { + let sk = StorageKey(sk); + if let Some((cl, cw)) = self.child_listeners.get(&sk) { + let mut changes = Vec::new(); + for (k, v) in changeset { + let k = StorageKey(k); + let listeners = cl.get(&k); + + if let Some(ref listeners) = listeners { + subscribers.extend(listeners.iter()); + } + + subscribers.extend(cw.iter()); + + if !cw.is_empty() || listeners.is_some() { + changes.push((k, v.map(StorageData))); + } + } + if !changes.is_empty() { + child_changes.push((sk, changes)); + } + } + } + + // Don't send empty notifications + if changes.is_empty() && child_changes.is_empty() { + return; + } + + let changes = Arc::new(changes); + let child_changes = Arc::new(child_changes); + // Trigger the events + for subscriber in subscribers { + let should_remove = { + let &(ref sink, ref filter, ref child_filters) = + self.sinks.get(&subscriber).expect( + "subscribers returned from self.listeners are always in self.sinks; qed", + ); + sink.unbounded_send(( + hash.clone(), + StorageChangeSet { + changes: changes.clone(), + child_changes: child_changes.clone(), + filter: filter.clone(), + child_filters: child_filters.clone(), + }, + )) + .is_err() + }; + + if should_remove { + self.remove_subscriber(subscriber); + } + } + } + + fn remove_subscriber_from( + subscriber: &SubscriberId, + filters: &Option>, + listeners: &mut HashMap>, + wildcards: &mut FnvHashSet, + ) { + match filters { + None => { + wildcards.remove(subscriber); + } + Some(filters) => { + for key in filters.iter() { + let remove_key = match listeners.get_mut(key) { + Some(ref mut set) => { + set.remove(subscriber); + set.is_empty() + } + None => false, + }; + + if remove_key { + listeners.remove(key); + } + } + } + } + } + + fn remove_subscriber(&mut self, subscriber: SubscriberId) { + if let Some((_, filters, child_filters)) = self.sinks.remove(&subscriber) { + Self::remove_subscriber_from( + &subscriber, + &filters, + &mut self.listeners, + &mut self.wildcard_listeners, + ); + if let Some(child_filters) = child_filters.as_ref() { + for (c_key, filters) in child_filters { + if let Some((listeners, wildcards)) = self.child_listeners.get_mut(&c_key) { + Self::remove_subscriber_from( + &subscriber, + &filters, + &mut *listeners, + &mut *wildcards, + ); + + if listeners.is_empty() && wildcards.is_empty() { + self.child_listeners.remove(&c_key); + } + } + } + } + } + } + + fn listen_from( + current_id: SubscriberId, + filter_keys: &Option>, + listeners: &mut HashMap>, + wildcards: &mut FnvHashSet, + ) -> Option> { + match filter_keys { + None => { + wildcards.insert(current_id); + None + } + Some(keys) => Some( + keys.as_ref() + .iter() + .map(|key| { + listeners + .entry(key.clone()) + .or_insert_with(Default::default) + .insert(current_id); + key.clone() + }) + .collect(), + ), + } + } + + /// Start listening for particular storage keys. + pub fn listen( + &mut self, + filter_keys: Option<&[StorageKey]>, + filter_child_keys: Option<&[(StorageKey, Option>)]>, + ) -> StorageEventStream { + self.next_id += 1; + let current_id = self.next_id; + + // add subscriber for every key + let keys = Self::listen_from( + current_id, + &filter_keys, + &mut self.listeners, + &mut self.wildcard_listeners, + ); + let child_keys = filter_child_keys.map(|filter_child_keys| { + filter_child_keys + .iter() + .map(|(c_key, o_keys)| { + let (c_listeners, c_wildcards) = self + .child_listeners + .entry(c_key.clone()) + .or_insert_with(Default::default); + + ( + c_key.clone(), + Self::listen_from(current_id, o_keys, &mut *c_listeners, &mut *c_wildcards), + ) + }) + .collect() + }); + + // insert sink + let (tx, rx) = tracing_unbounded("mpsc_storage_notification_items"); + self.sinks.insert(current_id, (tx, keys, child_keys)); + rx + } } #[cfg(test)] mod tests { - use sp_runtime::testing::{H256 as Hash, Block as RawBlock, ExtrinsicWrapper}; - use super::*; - use std::iter::{empty, Empty}; - - type TestChangeSet = ( - Vec<(StorageKey, Option)>, - Vec<(StorageKey, Vec<(StorageKey, Option)>)>, - ); - - #[cfg(test)] - impl From for StorageChangeSet { - fn from(changes: TestChangeSet) -> Self { - // warning hardcoded child trie wildcard to test upon - let child_filters = Some([ - (StorageKey(vec![4]), None), - (StorageKey(vec![5]), None), - ].iter().cloned().collect()); - StorageChangeSet { - changes: Arc::new(changes.0), - child_changes: Arc::new(changes.1), - filter: None, - child_filters, - } - } - } - - #[cfg(test)] - impl PartialEq for StorageChangeSet { - fn eq(&self, other: &Self) -> bool { - self.iter().eq(other.iter()) - } - } - - type Block = RawBlock>; - - #[test] - fn triggering_change_should_notify_wildcard_listeners() { - // given - let mut notifications = StorageNotifications::::default(); - let child_filter = [(StorageKey(vec![4]), None)]; - let mut recv = futures::executor::block_on_stream( - notifications.listen(None, Some(&child_filter[..])) - ); - - // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![3], None), - ]; - let c_changeset_1 = vec![ - (vec![5], Some(vec![4])), - (vec![6], None), - ]; - let c_changeset = vec![(vec![4], c_changeset_1)]; - notifications.trigger( - &Hash::from_low_u64_be(1), - changeset.into_iter(), - c_changeset.into_iter().map(|(a,b)| (a, b.into_iter())), - ); - - // then - assert_eq!(recv.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![2]), Some(StorageData(vec![3]))), - (StorageKey(vec![3]), None), - ], vec![(StorageKey(vec![4]), vec![ - (StorageKey(vec![5]), Some(StorageData(vec![4]))), - (StorageKey(vec![6]), None), - ])]).into())); - } - - #[test] - fn should_only_notify_interested_listeners() { - // given - let mut notifications = StorageNotifications::::default(); - let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))]; - let mut recv1 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![1])]), None) - ); - let mut recv2 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![2])]), None) - ); - let mut recv3 = futures::executor::block_on_stream( - notifications.listen(Some(&[]), Some(&child_filter)) - ); - - // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![1], None), - ]; - let c_changeset_1 = vec![ - (vec![5], Some(vec![4])), - (vec![6], None), - ]; - - let c_changeset = vec![(vec![4], c_changeset_1)]; - notifications.trigger( - &Hash::from_low_u64_be(1), - changeset.into_iter(), - c_changeset.into_iter().map(|(a,b)| (a, b.into_iter())), - ); - - // then - assert_eq!(recv1.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![1]), None), - ], vec![]).into())); - assert_eq!(recv2.next().unwrap(), (Hash::from_low_u64_be(1), (vec![ - (StorageKey(vec![2]), Some(StorageData(vec![3]))), - ], vec![]).into())); - assert_eq!(recv3.next().unwrap(), (Hash::from_low_u64_be(1), (vec![], - vec![ - (StorageKey(vec![4]), vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))]), - ]).into())); - - } - - #[test] - fn should_cleanup_subscribers_if_dropped() { - // given - let mut notifications = StorageNotifications::::default(); - { - let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))]; - let _recv1 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![1])]), None) - ); - let _recv2 = futures::executor::block_on_stream( - notifications.listen(Some(&[StorageKey(vec![2])]), None) - ); - let _recv3 = futures::executor::block_on_stream( - notifications.listen(None, None) - ); - let _recv4 = futures::executor::block_on_stream( - notifications.listen(None, Some(&child_filter)) - ); - assert_eq!(notifications.listeners.len(), 2); - assert_eq!(notifications.wildcard_listeners.len(), 2); - assert_eq!(notifications.child_listeners.len(), 1); - } - - // when - let changeset = vec![ - (vec![2], Some(vec![3])), - (vec![1], None), - ]; - let c_changeset = empty::<(_, Empty<_>)>(); - notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter(), c_changeset); - - // then - assert_eq!(notifications.listeners.len(), 0); - assert_eq!(notifications.wildcard_listeners.len(), 0); - assert_eq!(notifications.child_listeners.len(), 0); - } - - #[test] - fn should_not_send_empty_notifications() { - // given - let mut recv = { - let mut notifications = StorageNotifications::::default(); - let recv = futures::executor::block_on_stream(notifications.listen(None, None)); - - // when - let changeset = vec![]; - let c_changeset = empty::<(_, Empty<_>)>(); - notifications.trigger(&Hash::from_low_u64_be(1), changeset.into_iter(), c_changeset); - recv - }; - - // then - assert_eq!(recv.next(), None); - } + use super::*; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256 as Hash}; + use std::iter::{empty, Empty}; + + type TestChangeSet = ( + Vec<(StorageKey, Option)>, + Vec<(StorageKey, Vec<(StorageKey, Option)>)>, + ); + + #[cfg(test)] + impl From for StorageChangeSet { + fn from(changes: TestChangeSet) -> Self { + // warning hardcoded child trie wildcard to test upon + let child_filters = Some( + [(StorageKey(vec![4]), None), (StorageKey(vec![5]), None)] + .iter() + .cloned() + .collect(), + ); + StorageChangeSet { + changes: Arc::new(changes.0), + child_changes: Arc::new(changes.1), + filter: None, + child_filters, + } + } + } + + #[cfg(test)] + impl PartialEq for StorageChangeSet { + fn eq(&self, other: &Self) -> bool { + self.iter().eq(other.iter()) + } + } + + type Block = RawBlock>; + + #[test] + fn triggering_change_should_notify_wildcard_listeners() { + // given + let mut notifications = StorageNotifications::::default(); + let child_filter = [(StorageKey(vec![4]), None)]; + let mut recv = + futures::executor::block_on_stream(notifications.listen(None, Some(&child_filter[..]))); + + // when + let changeset = vec![(vec![2], Some(vec![3])), (vec![3], None)]; + let c_changeset_1 = vec![(vec![5], Some(vec![4])), (vec![6], None)]; + let c_changeset = vec![(vec![4], c_changeset_1)]; + notifications.trigger( + &Hash::from_low_u64_be(1), + changeset.into_iter(), + c_changeset.into_iter().map(|(a, b)| (a, b.into_iter())), + ); + + // then + assert_eq!( + recv.next().unwrap(), + ( + Hash::from_low_u64_be(1), + ( + vec![ + (StorageKey(vec![2]), Some(StorageData(vec![3]))), + (StorageKey(vec![3]), None), + ], + vec![( + StorageKey(vec![4]), + vec![ + (StorageKey(vec![5]), Some(StorageData(vec![4]))), + (StorageKey(vec![6]), None), + ] + )] + ) + .into() + ) + ); + } + + #[test] + fn should_only_notify_interested_listeners() { + // given + let mut notifications = StorageNotifications::::default(); + let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))]; + let mut recv1 = futures::executor::block_on_stream( + notifications.listen(Some(&[StorageKey(vec![1])]), None), + ); + let mut recv2 = futures::executor::block_on_stream( + notifications.listen(Some(&[StorageKey(vec![2])]), None), + ); + let mut recv3 = futures::executor::block_on_stream( + notifications.listen(Some(&[]), Some(&child_filter)), + ); + + // when + let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)]; + let c_changeset_1 = vec![(vec![5], Some(vec![4])), (vec![6], None)]; + + let c_changeset = vec![(vec![4], c_changeset_1)]; + notifications.trigger( + &Hash::from_low_u64_be(1), + changeset.into_iter(), + c_changeset.into_iter().map(|(a, b)| (a, b.into_iter())), + ); + + // then + assert_eq!( + recv1.next().unwrap(), + ( + Hash::from_low_u64_be(1), + (vec![(StorageKey(vec![1]), None),], vec![]).into() + ) + ); + assert_eq!( + recv2.next().unwrap(), + ( + Hash::from_low_u64_be(1), + ( + vec![(StorageKey(vec![2]), Some(StorageData(vec![3]))),], + vec![] + ) + .into() + ) + ); + assert_eq!( + recv3.next().unwrap(), + ( + Hash::from_low_u64_be(1), + ( + vec![], + vec![( + StorageKey(vec![4]), + vec![(StorageKey(vec![5]), Some(StorageData(vec![4])))] + ),] + ) + .into() + ) + ); + } + + #[test] + fn should_cleanup_subscribers_if_dropped() { + // given + let mut notifications = StorageNotifications::::default(); + { + let child_filter = [(StorageKey(vec![4]), Some(vec![StorageKey(vec![5])]))]; + let _recv1 = futures::executor::block_on_stream( + notifications.listen(Some(&[StorageKey(vec![1])]), None), + ); + let _recv2 = futures::executor::block_on_stream( + notifications.listen(Some(&[StorageKey(vec![2])]), None), + ); + let _recv3 = futures::executor::block_on_stream(notifications.listen(None, None)); + let _recv4 = + futures::executor::block_on_stream(notifications.listen(None, Some(&child_filter))); + assert_eq!(notifications.listeners.len(), 2); + assert_eq!(notifications.wildcard_listeners.len(), 2); + assert_eq!(notifications.child_listeners.len(), 1); + } + + // when + let changeset = vec![(vec![2], Some(vec![3])), (vec![1], None)]; + let c_changeset = empty::<(_, Empty<_>)>(); + notifications.trigger( + &Hash::from_low_u64_be(1), + changeset.into_iter(), + c_changeset, + ); + + // then + assert_eq!(notifications.listeners.len(), 0); + assert_eq!(notifications.wildcard_listeners.len(), 0); + assert_eq!(notifications.child_listeners.len(), 0); + } + + #[test] + fn should_not_send_empty_notifications() { + // given + let mut recv = { + let mut notifications = StorageNotifications::::default(); + let recv = futures::executor::block_on_stream(notifications.listen(None, None)); + + // when + let changeset = vec![]; + let c_changeset = empty::<(_, Empty<_>)>(); + notifications.trigger( + &Hash::from_low_u64_be(1), + changeset.into_iter(), + c_changeset, + ); + recv + }; + + // then + assert_eq!(recv.next(), None); + } } diff --git a/client/api/src/proof_provider.rs b/client/api/src/proof_provider.rs index 2d9876f7ad..cce6f5c3d2 100644 --- a/client/api/src/proof_provider.rs +++ b/client/api/src/proof_provider.rs @@ -14,58 +14,58 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . //! Proof utilities -use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT}, -}; -use crate::{StorageProof, ChangesProof}; +use crate::{ChangesProof, StorageProof}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_storage::{ChildInfo, StorageKey}; /// Interface for providing block proving utilities. pub trait ProofProvider { - /// Reads storage value at a given block + key, returning read proof. - fn read_proof( - &self, - id: &BlockId, - keys: &mut dyn Iterator, - ) -> sp_blockchain::Result; + /// Reads storage value at a given block + key, returning read proof. + fn read_proof( + &self, + id: &BlockId, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result; - /// Reads child storage value at a given block + storage_key + key, returning - /// read proof. - fn read_child_proof( - &self, - id: &BlockId, - storage_key: &[u8], - child_info: ChildInfo, - keys: &mut dyn Iterator, - ) -> sp_blockchain::Result; + /// Reads child storage value at a given block + storage_key + key, returning + /// read proof. + fn read_child_proof( + &self, + id: &BlockId, + storage_key: &[u8], + child_info: ChildInfo, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result; - /// Execute a call to a contract on top of state in a block of given hash - /// AND returning execution proof. - /// - /// No changes are made. - fn execution_proof( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - ) -> sp_blockchain::Result<(Vec, StorageProof)>; - /// Reads given header and generates CHT-based header proof. - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)>; + /// Execute a call to a contract on top of state in a block of given hash + /// AND returning execution proof. + /// + /// No changes are made. + fn execution_proof( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + ) -> sp_blockchain::Result<(Vec, StorageProof)>; + /// Reads given header and generates CHT-based header proof. + fn header_proof( + &self, + id: &BlockId, + ) -> sp_blockchain::Result<(Block::Header, StorageProof)>; - /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. - /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using - /// changes tries from ascendants of this block, we should provide proofs for changes tries roots - /// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants - /// of this block. - /// Works only for runtimes that are supporting changes tries. - fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&StorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result>; + /// Get proof for computation of (block, extrinsic) pairs where key has been changed at given blocks range. + /// `min` is the hash of the first block, which changes trie root is known to the requester - when we're using + /// changes tries from ascendants of this block, we should provide proofs for changes tries roots + /// `max` is the hash of the last block known to the requester - we can't use changes tries from descendants + /// of this block. + /// Works only for runtimes that are supporting changes tries. + fn key_changes_proof( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + storage_key: Option<&StorageKey>, + key: &StorageKey, + ) -> sp_blockchain::Result>; } diff --git a/client/authority-discovery/build.rs b/client/authority-discovery/build.rs index ed632575f3..36e0103d15 100644 --- a/client/authority-discovery/build.rs +++ b/client/authority-discovery/build.rs @@ -1,3 +1,3 @@ fn main() { - prost_build::compile_protos(&["src/schema/dht.proto"], &["src/schema"]).unwrap(); + prost_build::compile_protos(&["src/schema/dht.proto"], &["src/schema"]).unwrap(); } diff --git a/client/authority-discovery/src/addr_cache.rs b/client/authority-discovery/src/addr_cache.rs index 96f589c5d3..9548e7d1d1 100644 --- a/client/authority-discovery/src/addr_cache.rs +++ b/client/authority-discovery/src/addr_cache.rs @@ -16,11 +16,11 @@ use rand::{rngs::StdRng, seq::SliceRandom, Rng, SeedableRng}; use std::{ - clone::Clone, - cmp::{Eq, Ord, PartialEq}, - collections::BTreeMap, - convert::AsRef, - hash::Hash, + clone::Clone, + cmp::{Eq, Ord, PartialEq}, + collections::BTreeMap, + convert::AsRef, + hash::Hash, }; /// The maximum number of authority connections initialized through the authority discovery module. @@ -36,165 +36,165 @@ const MAX_NUM_AUTHORITY_CONN: usize = 10; // group. To ensure this map doesn't grow indefinitely `purge_old_authorities_from_cache` // function is called each time we add a new entry. pub(super) struct AddrCache { - cache: BTreeMap>, - - /// Random number to seed address selection RNG. - /// - /// A node should only try to connect to a subset of all authorities. To choose this subset one - /// uses randomness. The choice should differ between nodes to prevent hot spots, but not within - /// each node between each update to prevent connection churn. Thus before each selection we - /// seed an RNG with the same seed. - rand_addr_selection_seed: u64, + cache: BTreeMap>, + + /// Random number to seed address selection RNG. + /// + /// A node should only try to connect to a subset of all authorities. To choose this subset one + /// uses randomness. The choice should differ between nodes to prevent hot spots, but not within + /// each node between each update to prevent connection churn. Thus before each selection we + /// seed an RNG with the same seed. + rand_addr_selection_seed: u64, } impl AddrCache where - Id: Clone + Eq + Hash + Ord, - Addr: Clone + PartialEq + AsRef<[u8]>, + Id: Clone + Eq + Hash + Ord, + Addr: Clone + PartialEq + AsRef<[u8]>, { - pub fn new() -> Self { - AddrCache { - cache: BTreeMap::new(), - rand_addr_selection_seed: rand::thread_rng().gen(), - } - } - - pub fn insert(&mut self, id: Id, mut addresses: Vec) { - if addresses.is_empty() { - return; - } - - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); - self.cache.insert(id, addresses); - } - - // Each node should connect to a subset of all authorities. In order to prevent hot spots, this - // selection is based on randomness. Selecting randomly each time we alter the address cache - // would result in connection churn. To reduce this churn a node generates a seed on startup and - // uses this seed for a new rng on each update. (One could as well use ones peer id as a seed. - // Given that the peer id is publicly known, it would make this process predictable by others, - // which might be used as an attack.) - pub fn get_subset(&self) -> Vec { - let mut rng = StdRng::seed_from_u64(self.rand_addr_selection_seed); - - let mut addresses = self - .cache - .iter() - .map(|(_peer_id, addresses)| { - addresses - .choose(&mut rng) - .expect("an empty address vector is never inserted into the cache") - }) - .cloned() - .collect::>(); - - addresses.dedup(); - addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); - - addresses - .choose_multiple(&mut rng, MAX_NUM_AUTHORITY_CONN) - .cloned() - .collect() - } - - pub fn retain_ids(&mut self, ids: &Vec) { - let to_remove = self - .cache - .iter() - .filter(|(id, _addresses)| !ids.contains(id)) - .map(|entry| entry.0) - .cloned() - .collect::>(); - - for key in to_remove { - self.cache.remove(&key); - } - } + pub fn new() -> Self { + AddrCache { + cache: BTreeMap::new(), + rand_addr_selection_seed: rand::thread_rng().gen(), + } + } + + pub fn insert(&mut self, id: Id, mut addresses: Vec) { + if addresses.is_empty() { + return; + } + + addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); + self.cache.insert(id, addresses); + } + + // Each node should connect to a subset of all authorities. In order to prevent hot spots, this + // selection is based on randomness. Selecting randomly each time we alter the address cache + // would result in connection churn. To reduce this churn a node generates a seed on startup and + // uses this seed for a new rng on each update. (One could as well use ones peer id as a seed. + // Given that the peer id is publicly known, it would make this process predictable by others, + // which might be used as an attack.) + pub fn get_subset(&self) -> Vec { + let mut rng = StdRng::seed_from_u64(self.rand_addr_selection_seed); + + let mut addresses = self + .cache + .iter() + .map(|(_peer_id, addresses)| { + addresses + .choose(&mut rng) + .expect("an empty address vector is never inserted into the cache") + }) + .cloned() + .collect::>(); + + addresses.dedup(); + addresses.sort_unstable_by(|a, b| a.as_ref().cmp(b.as_ref())); + + addresses + .choose_multiple(&mut rng, MAX_NUM_AUTHORITY_CONN) + .cloned() + .collect() + } + + pub fn retain_ids(&mut self, ids: &Vec) { + let to_remove = self + .cache + .iter() + .filter(|(id, _addresses)| !ids.contains(id)) + .map(|entry| entry.0) + .cloned() + .collect::>(); + + for key in to_remove { + self.cache.remove(&key); + } + } } #[cfg(test)] mod tests { - use super::*; - use quickcheck::{QuickCheck, TestResult}; - - #[test] - fn returns_addresses_of_same_authorities_on_repeated_calls() { - fn property(input: Vec<(u32, Vec)>) -> TestResult { - // Expect less than 1000 authorities. - if input.len() > 1000 { - return TestResult::discard(); - } - - // Expect less than 100 addresses per authority. - for i in &input { - if i.1.len() > 100 { - return TestResult::discard(); - } - } - - let mut c = AddrCache::new(); - - for (id, addresses) in input { - c.insert(id, addresses); - } - - let result = c.get_subset(); - assert!(result.len() <= MAX_NUM_AUTHORITY_CONN); - - for _ in 1..100 { - assert_eq!(c.get_subset(), result); - } - - TestResult::passed() - } - - QuickCheck::new() - .max_tests(10) - .quickcheck(property as fn(Vec<(u32, Vec)>) -> TestResult) - } - - #[test] - fn returns_same_addresses_of_first_authority_when_second_authority_changes() { - let mut c = AddrCache::new(); - - // Insert addresses of first authority. - let addresses = (1..100) - .map(|i| format!("{:?}", i)) - .collect::>(); - c.insert(1, addresses); - let first_subset = c.get_subset(); - assert_eq!(1, first_subset.len()); - - // Insert address of second authority. - c.insert(2, vec!["a".to_string()]); - let second_subset = c.get_subset(); - assert_eq!(2, second_subset.len()); - - // Expect same address of first authority. - assert!(second_subset.contains(&first_subset[0])); - - // Alter address of second authority. - c.insert(2, vec!["b".to_string()]); - let second_subset = c.get_subset(); - assert_eq!(2, second_subset.len()); - - // Expect same address of first authority. - assert!(second_subset.contains(&first_subset[0])); - } - - #[test] - fn retains_only_entries_of_provided_ids() { - let mut cache = AddrCache::new(); - - cache.insert(1, vec![vec![10]]); - cache.insert(2, vec![vec![20]]); - cache.insert(3, vec![vec![30]]); - - cache.retain_ids(&vec![1, 3]); - - let mut subset = cache.get_subset(); - subset.sort(); - - assert_eq!(vec![vec![10], vec![30]], subset); - } + use super::*; + use quickcheck::{QuickCheck, TestResult}; + + #[test] + fn returns_addresses_of_same_authorities_on_repeated_calls() { + fn property(input: Vec<(u32, Vec)>) -> TestResult { + // Expect less than 1000 authorities. + if input.len() > 1000 { + return TestResult::discard(); + } + + // Expect less than 100 addresses per authority. + for i in &input { + if i.1.len() > 100 { + return TestResult::discard(); + } + } + + let mut c = AddrCache::new(); + + for (id, addresses) in input { + c.insert(id, addresses); + } + + let result = c.get_subset(); + assert!(result.len() <= MAX_NUM_AUTHORITY_CONN); + + for _ in 1..100 { + assert_eq!(c.get_subset(), result); + } + + TestResult::passed() + } + + QuickCheck::new() + .max_tests(10) + .quickcheck(property as fn(Vec<(u32, Vec)>) -> TestResult) + } + + #[test] + fn returns_same_addresses_of_first_authority_when_second_authority_changes() { + let mut c = AddrCache::new(); + + // Insert addresses of first authority. + let addresses = (1..100) + .map(|i| format!("{:?}", i)) + .collect::>(); + c.insert(1, addresses); + let first_subset = c.get_subset(); + assert_eq!(1, first_subset.len()); + + // Insert address of second authority. + c.insert(2, vec!["a".to_string()]); + let second_subset = c.get_subset(); + assert_eq!(2, second_subset.len()); + + // Expect same address of first authority. + assert!(second_subset.contains(&first_subset[0])); + + // Alter address of second authority. + c.insert(2, vec!["b".to_string()]); + let second_subset = c.get_subset(); + assert_eq!(2, second_subset.len()); + + // Expect same address of first authority. + assert!(second_subset.contains(&first_subset[0])); + } + + #[test] + fn retains_only_entries_of_provided_ids() { + let mut cache = AddrCache::new(); + + cache.insert(1, vec![vec![10]]); + cache.insert(2, vec![vec![20]]); + cache.insert(3, vec![vec![30]]); + + cache.retain_ids(&vec![1, 3]); + + let mut subset = cache.get_subset(); + subset.sort(); + + assert_eq!(vec![vec![10], vec![30]], subset); + } } diff --git a/client/authority-discovery/src/error.rs b/client/authority-discovery/src/error.rs index b1358485c3..4bbf5b6315 100644 --- a/client/authority-discovery/src/error.rs +++ b/client/authority-discovery/src/error.rs @@ -24,34 +24,34 @@ pub type Result = std::result::Result; /// Error type for the authority discovery module. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Received dht value found event with records with different keys. - ReceivingDhtValueFoundEventWithDifferentKeys, - /// Received dht value found event with no records. - ReceivingDhtValueFoundEventWithNoRecords, - /// Failed to verify a dht payload with the given signature. - VerifyingDhtPayload, - /// Failed to hash the authority id to be used as a dht key. - HashingAuthorityId(libp2p::core::multiaddr::multihash::EncodeError), - /// Failed calling into the Substrate runtime. - CallingRuntime(sp_blockchain::Error), - /// From the Dht we only get the hashed authority id. In order to retrieve the actual authority id and to ensure it - /// is actually an authority, we match the hash against the hash of the authority id of all other authorities. This - /// error is the result of the above failing. - MatchingHashedAuthorityIdWithAuthorityId, - /// Failed to set the authority discovery peerset priority group in the peerset module. - SettingPeersetPriorityGroup(String), - /// Failed to encode a protobuf payload. - EncodingProto(prost::EncodeError), - /// Failed to decode a protobuf payload. - DecodingProto(prost::DecodeError), - /// Failed to encode or decode scale payload. - EncodingDecodingScale(codec::Error), - /// Failed to parse a libp2p multi address. - ParsingMultiaddress(libp2p::core::multiaddr::Error), - /// Failed to sign using a specific public key. - MissingSignature(CryptoTypePublicPair), - /// Failed to sign using all public keys. - Signing, - /// Failed to register Prometheus metric. - Prometheus(prometheus_endpoint::PrometheusError), + /// Received dht value found event with records with different keys. + ReceivingDhtValueFoundEventWithDifferentKeys, + /// Received dht value found event with no records. + ReceivingDhtValueFoundEventWithNoRecords, + /// Failed to verify a dht payload with the given signature. + VerifyingDhtPayload, + /// Failed to hash the authority id to be used as a dht key. + HashingAuthorityId(libp2p::core::multiaddr::multihash::EncodeError), + /// Failed calling into the Substrate runtime. + CallingRuntime(sp_blockchain::Error), + /// From the Dht we only get the hashed authority id. In order to retrieve the actual authority id and to ensure it + /// is actually an authority, we match the hash against the hash of the authority id of all other authorities. This + /// error is the result of the above failing. + MatchingHashedAuthorityIdWithAuthorityId, + /// Failed to set the authority discovery peerset priority group in the peerset module. + SettingPeersetPriorityGroup(String), + /// Failed to encode a protobuf payload. + EncodingProto(prost::EncodeError), + /// Failed to decode a protobuf payload. + DecodingProto(prost::DecodeError), + /// Failed to encode or decode scale payload. + EncodingDecodingScale(codec::Error), + /// Failed to parse a libp2p multi address. + ParsingMultiaddress(libp2p::core::multiaddr::Error), + /// Failed to sign using a specific public key. + MissingSignature(CryptoTypePublicPair), + /// Failed to sign using all public keys. + Signing, + /// Failed to register Prometheus metric. + Prometheus(prometheus_endpoint::PrometheusError), } diff --git a/client/authority-discovery/src/lib.rs b/client/authority-discovery/src/lib.rs index 2cf455f17b..39f3ade231 100644 --- a/client/authority-discovery/src/lib.rs +++ b/client/authority-discovery/src/lib.rs @@ -55,31 +55,33 @@ use std::sync::Arc; use std::time::{Duration, Instant}; use futures::task::{Context, Poll}; -use futures::{Future, FutureExt, ready, Stream, StreamExt}; +use futures::{ready, Future, FutureExt, Stream, StreamExt}; use futures_timer::Delay; +use addr_cache::AddrCache; use codec::{Decode, Encode}; use error::{Error, Result}; use log::{debug, error, log_enabled, warn}; -use prometheus_endpoint::{Counter, CounterVec, Gauge, Opts, U64, register}; +use prometheus_endpoint::{register, Counter, CounterVec, Gauge, Opts, U64}; use prost::Message; use sc_client_api::blockchain::HeaderBackend; -use sc_network::{Multiaddr, config::MultiaddrWithPeerId, DhtEvent, ExHashT, NetworkStateInfo}; -use sp_authority_discovery::{AuthorityDiscoveryApi, AuthorityId, AuthoritySignature, AuthorityPair}; +use sc_network::{config::MultiaddrWithPeerId, DhtEvent, ExHashT, Multiaddr, NetworkStateInfo}; +use sp_api::ProvideRuntimeApi; +use sp_authority_discovery::{ + AuthorityDiscoveryApi, AuthorityId, AuthorityPair, AuthoritySignature, +}; use sp_core::crypto::{key_types, Pair}; use sp_core::traits::BareCryptoStorePtr; -use sp_runtime::{traits::Block as BlockT, generic::BlockId}; -use sp_api::ProvideRuntimeApi; -use addr_cache::AddrCache; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; #[cfg(test)] mod tests; -mod error; mod addr_cache; +mod error; /// Dht payload schemas generated from Protobuf definitions via Prost crate in build.rs. mod schema { - include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); + include!(concat!(env!("OUT_DIR"), "/authority_discovery.rs")); } type Interval = Box + Unpin + Send + Sync>; @@ -95,548 +97,576 @@ const AUTHORITIES_PRIORITY_GROUP_NAME: &'static str = "authorities"; /// Role an authority discovery module can run as. pub enum Role { - /// Actual authority as well as a reference to its key store. - Authority(BareCryptoStorePtr), - /// Sentry node that guards an authority. - /// - /// No reference to its key store needed, as sentry nodes don't have an identity to sign - /// addresses with in the first place. - Sentry, + /// Actual authority as well as a reference to its key store. + Authority(BareCryptoStorePtr), + /// Sentry node that guards an authority. + /// + /// No reference to its key store needed, as sentry nodes don't have an identity to sign + /// addresses with in the first place. + Sentry, } /// An `AuthorityDiscovery` makes a given authority discoverable and discovers other authorities. pub struct AuthorityDiscovery where - Block: BlockT + 'static, - Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: AuthorityDiscoveryApi, + Block: BlockT + 'static, + Network: NetworkProvider, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: AuthorityDiscoveryApi, { - client: Arc, + client: Arc, - network: Arc, - /// List of sentry node public addresses. - // - // There are 3 states: - // - None: No addresses were specified. - // - Some(vec![]): Addresses were specified, but none could be parsed as proper - // Multiaddresses. - // - Some(vec![a, b, c, ...]): Valid addresses were specified. - sentry_nodes: Option>, - /// Channel we receive Dht events on. - dht_event_rx: Pin + Send>>, + network: Arc, + /// List of sentry node public addresses. + // + // There are 3 states: + // - None: No addresses were specified. + // - Some(vec![]): Addresses were specified, but none could be parsed as proper + // Multiaddresses. + // - Some(vec![a, b, c, ...]): Valid addresses were specified. + sentry_nodes: Option>, + /// Channel we receive Dht events on. + dht_event_rx: Pin + Send>>, - /// Interval to be proactive, publishing own addresses. - publish_interval: Interval, - /// Interval on which to query for addresses of other authorities. - query_interval: Interval, + /// Interval to be proactive, publishing own addresses. + publish_interval: Interval, + /// Interval on which to query for addresses of other authorities. + query_interval: Interval, - addr_cache: addr_cache::AddrCache, + addr_cache: addr_cache::AddrCache, - metrics: Option, + metrics: Option, - role: Role, + role: Role, - phantom: PhantomData, + phantom: PhantomData, } impl AuthorityDiscovery where - Block: BlockT + Unpin + 'static, - Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: - AuthorityDiscoveryApi, - Self: Future, + Block: BlockT + Unpin + 'static, + Network: NetworkProvider, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: + AuthorityDiscoveryApi, + Self: Future, { - /// Return a new authority discovery. - /// - /// Note: When specifying `sentry_nodes` this module will not advertise the public addresses of - /// the node itself but only the public addresses of its sentry nodes. - pub fn new( - client: Arc, - network: Arc, - sentry_nodes: Vec, - dht_event_rx: Pin + Send>>, - role: Role, - prometheus_registry: Option, - ) -> Self { - // Kademlia's default time-to-live for Dht records is 36h, republishing records every 24h. - // Given that a node could restart at any point in time, one can not depend on the - // republishing process, thus publishing own external addresses should happen on an interval - // < 36h. - let publish_interval = interval_at( - Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME, - Duration::from_secs(12 * 60 * 60), - ); - - // External addresses of other authorities can change at any given point in time. The - // interval on which to query for external addresses of other authorities is a trade off - // between efficiency and performance. - let query_interval = interval_at( - Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME, - Duration::from_secs(10 * 60), - ); - - let sentry_nodes = if !sentry_nodes.is_empty() { - Some(sentry_nodes.into_iter().map(|ma| ma.concat()).collect::>()) - } else { - None - }; - - let addr_cache = AddrCache::new(); - - let metrics = match prometheus_registry { - Some(registry) => { - match Metrics::register(®istry) { - Ok(metrics) => Some(metrics), - Err(e) => { - error!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); - None - }, - } - }, - None => None, - }; - - AuthorityDiscovery { - client, - network, - sentry_nodes, - dht_event_rx, - publish_interval, - query_interval, - addr_cache, - role, - metrics, - phantom: PhantomData, - } - } - - /// Publish either our own or if specified the public addresses of our sentry nodes. - fn publish_ext_addresses(&mut self) -> Result<()> { - let key_store = match &self.role { - Role::Authority(key_store) => key_store, - // Only authority nodes can put addresses (their own or the ones of their sentry nodes) - // on the Dht. Sentry nodes don't have a known identity to authenticate such addresses, - // thus `publish_ext_addresses` becomes a no-op. - Role::Sentry => return Ok(()), - }; - - if let Some(metrics) = &self.metrics { - metrics.publish.inc() - } - - let addresses: Vec<_> = match &self.sentry_nodes { - Some(addrs) => addrs.clone().into_iter() - .map(|a| a.to_vec()) - .collect(), - None => self.network.external_addresses() - .into_iter() - .map(|a| a.with(libp2p::core::multiaddr::Protocol::P2p( - self.network.local_peer_id().into(), - ))) - .map(|a| a.to_vec()) - .collect(), - }; - - if let Some(metrics) = &self.metrics { - metrics.amount_last_published.set(addresses.len() as u64); - } - - let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { addresses } - .encode(&mut serialized_addresses) - .map_err(Error::EncodingProto)?; - - let keys = AuthorityDiscovery::get_own_public_keys_within_authority_set( - &key_store, - &self.client, - )?.into_iter().map(Into::into).collect::>(); - - let signatures = key_store.read() - .sign_with_all( - key_types::AUTHORITY_DISCOVERY, - keys.clone(), - serialized_addresses.as_slice(), - ) - .map_err(|_| Error::Signing)?; - - for (sign_result, key) in signatures.iter().zip(keys) { - let mut signed_addresses = vec![]; - - // sign_with_all returns Result signature - // is generated for a public key that is supported. - // Verify that all signatures exist for all provided keys. - let signature = sign_result.as_ref().map_err(|_| Error::MissingSignature(key.clone()))?; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses.clone(), - signature: Encode::encode(&signature), - } - .encode(&mut signed_addresses) - .map_err(Error::EncodingProto)?; - - self.network.put_value( - hash_authority_id(key.1.as_ref()), - signed_addresses, - ); - } - - Ok(()) - } - - fn request_addresses_of_others(&mut self) -> Result<()> { - let id = BlockId::hash(self.client.info().best_hash); - - let authorities = self - .client - .runtime_api() - .authorities(&id) - .map_err(Error::CallingRuntime)?; - - for authority_id in authorities.iter() { - if let Some(metrics) = &self.metrics { - metrics.request.inc(); - } - - self.network - .get_value(&hash_authority_id(authority_id.as_ref())); - } - - Ok(()) - } - - /// Handle incoming Dht events. - /// - /// Returns either: - /// - Poll::Pending when there are no more events to handle or - /// - Poll::Ready(()) when the dht event stream terminated. - fn handle_dht_events(&mut self, cx: &mut Context) -> Poll<()>{ - loop { - match ready!(self.dht_event_rx.poll_next_unpin(cx)) { - Some(DhtEvent::ValueFound(v)) => { - if let Some(metrics) = &self.metrics { - metrics.dht_event_received.with_label_values(&["value_found"]).inc(); - } - - if log_enabled!(log::Level::Debug) { - let hashes = v.iter().map(|(hash, _value)| hash.clone()); - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' found on Dht.", hashes, - ); - } - - if let Err(e) = self.handle_dht_value_found_event(v) { - error!( - target: LOG_TARGET, - "Failed to handle Dht value found event: {:?}", e, - ); - } - } - Some(DhtEvent::ValueNotFound(hash)) => { - if let Some(metrics) = &self.metrics { - metrics.dht_event_received.with_label_values(&["value_not_found"]).inc(); - } - - debug!( - target: LOG_TARGET, - "Value for hash '{:?}' not found on Dht.", hash - ) - }, - Some(DhtEvent::ValuePut(hash)) => { - if let Some(metrics) = &self.metrics { - metrics.dht_event_received.with_label_values(&["value_put"]).inc(); - } - - debug!( - target: LOG_TARGET, - "Successfully put hash '{:?}' on Dht.", hash, - ) - }, - Some(DhtEvent::ValuePutFailed(hash)) => { - if let Some(metrics) = &self.metrics { - metrics.dht_event_received.with_label_values(&["value_put_failed"]).inc(); - } - - warn!( - target: LOG_TARGET, - "Failed to put hash '{:?}' on Dht.", hash - ) - }, - None => { - debug!(target: LOG_TARGET, "Dht event stream terminated."); - return Poll::Ready(()); - }, - } - } - } - - fn handle_dht_value_found_event( - &mut self, - values: Vec<(libp2p::kad::record::Key, Vec)>, - ) -> Result<()> { - // Ensure `values` is not empty and all its keys equal. - let remote_key = values.iter().fold(Ok(None), |acc, (key, _)| { - match acc { - Ok(None) => Ok(Some(key.clone())), - Ok(Some(ref prev_key)) if prev_key != key => Err( - Error::ReceivingDhtValueFoundEventWithDifferentKeys - ), - x @ Ok(_) => x, - Err(e) => Err(e), - } - })?.ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; - - let authorities = { - let block_id = BlockId::hash(self.client.info().best_hash); - // From the Dht we only get the hashed authority id. In order to retrieve the actual - // authority id and to ensure it is actually an authority, we match the hash against the - // hash of the authority id of all other authorities. - let authorities = self.client.runtime_api().authorities(&block_id)?; - self.addr_cache.retain_ids(&authorities); - authorities - .into_iter() - .map(|id| (hash_authority_id(id.as_ref()), id)) - .collect::>() - }; - - // Check if the event origins from an authority in the current authority set. - let authority_id: &AuthorityId = authorities - .get(&remote_key) - .ok_or(Error::MatchingHashedAuthorityIdWithAuthorityId)?; - - let remote_addresses: Vec = values.into_iter() - .map(|(_k, v)| { - let schema::SignedAuthorityAddresses { signature, addresses } = - schema::SignedAuthorityAddresses::decode(v.as_slice()) - .map_err(Error::DecodingProto)?; - - let signature = AuthoritySignature::decode(&mut &signature[..]) - .map_err(Error::EncodingDecodingScale)?; - - if !AuthorityPair::verify(&signature, &addresses, authority_id) { - return Err(Error::VerifyingDhtPayload); - } - - let addresses = schema::AuthorityAddresses::decode(addresses.as_slice()) - .map(|a| a.addresses) - .map_err(Error::DecodingProto)? - .into_iter() - .map(|a| a.try_into()) - .collect::>() - .map_err(Error::ParsingMultiaddress)?; - - Ok(addresses) - }) - .collect::>>>()? - .into_iter().flatten().collect(); - - if !remote_addresses.is_empty() { - self.addr_cache.insert(authority_id.clone(), remote_addresses); - self.update_peer_set_priority_group()?; - } - - Ok(()) - } - - /// Retrieve our public keys within the current authority set. - // - // A node might have multiple authority discovery keys within its keystore, e.g. an old one and - // one for the upcoming session. In addition it could be participating in the current authority - // set with two keys. The function does not return all of the local authority discovery public - // keys, but only the ones intersecting with the current authority set. - fn get_own_public_keys_within_authority_set( - key_store: &BareCryptoStorePtr, - client: &Client, - ) -> Result> { - let local_pub_keys = key_store.read() - .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) - .into_iter() - .collect::>(); - - let id = BlockId::hash(client.info().best_hash); - let current_authorities = client.runtime_api() - .authorities(&id) - .map_err(Error::CallingRuntime)? - .into_iter() - .map(std::convert::Into::into) - .collect::>(); - - let intersection = local_pub_keys.intersection(¤t_authorities) - .cloned() - .map(std::convert::Into::into) - .collect(); - - Ok(intersection) - } - - /// Update the peer set 'authority' priority group. - fn update_peer_set_priority_group(&self) -> Result<()> { - let addresses = self.addr_cache.get_subset(); - - debug!( - target: LOG_TARGET, - "Applying priority group {:?} to peerset.", addresses, - ); - self.network - .set_priority_group( - AUTHORITIES_PRIORITY_GROUP_NAME.to_string(), - addresses.into_iter().collect(), - ) - .map_err(Error::SettingPeersetPriorityGroup)?; - - Ok(()) - } + /// Return a new authority discovery. + /// + /// Note: When specifying `sentry_nodes` this module will not advertise the public addresses of + /// the node itself but only the public addresses of its sentry nodes. + pub fn new( + client: Arc, + network: Arc, + sentry_nodes: Vec, + dht_event_rx: Pin + Send>>, + role: Role, + prometheus_registry: Option, + ) -> Self { + // Kademlia's default time-to-live for Dht records is 36h, republishing records every 24h. + // Given that a node could restart at any point in time, one can not depend on the + // republishing process, thus publishing own external addresses should happen on an interval + // < 36h. + let publish_interval = interval_at( + Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME, + Duration::from_secs(12 * 60 * 60), + ); + + // External addresses of other authorities can change at any given point in time. The + // interval on which to query for external addresses of other authorities is a trade off + // between efficiency and performance. + let query_interval = interval_at( + Instant::now() + LIBP2P_KADEMLIA_BOOTSTRAP_TIME, + Duration::from_secs(10 * 60), + ); + + let sentry_nodes = if !sentry_nodes.is_empty() { + Some( + sentry_nodes + .into_iter() + .map(|ma| ma.concat()) + .collect::>(), + ) + } else { + None + }; + + let addr_cache = AddrCache::new(); + + let metrics = match prometheus_registry { + Some(registry) => match Metrics::register(®istry) { + Ok(metrics) => Some(metrics), + Err(e) => { + error!(target: LOG_TARGET, "Failed to register metrics: {:?}", e); + None + } + }, + None => None, + }; + + AuthorityDiscovery { + client, + network, + sentry_nodes, + dht_event_rx, + publish_interval, + query_interval, + addr_cache, + role, + metrics, + phantom: PhantomData, + } + } + + /// Publish either our own or if specified the public addresses of our sentry nodes. + fn publish_ext_addresses(&mut self) -> Result<()> { + let key_store = match &self.role { + Role::Authority(key_store) => key_store, + // Only authority nodes can put addresses (their own or the ones of their sentry nodes) + // on the Dht. Sentry nodes don't have a known identity to authenticate such addresses, + // thus `publish_ext_addresses` becomes a no-op. + Role::Sentry => return Ok(()), + }; + + if let Some(metrics) = &self.metrics { + metrics.publish.inc() + } + + let addresses: Vec<_> = match &self.sentry_nodes { + Some(addrs) => addrs.clone().into_iter().map(|a| a.to_vec()).collect(), + None => self + .network + .external_addresses() + .into_iter() + .map(|a| { + a.with(libp2p::core::multiaddr::Protocol::P2p( + self.network.local_peer_id().into(), + )) + }) + .map(|a| a.to_vec()) + .collect(), + }; + + if let Some(metrics) = &self.metrics { + metrics.amount_last_published.set(addresses.len() as u64); + } + + let mut serialized_addresses = vec![]; + schema::AuthorityAddresses { addresses } + .encode(&mut serialized_addresses) + .map_err(Error::EncodingProto)?; + + let keys = + AuthorityDiscovery::get_own_public_keys_within_authority_set(&key_store, &self.client)? + .into_iter() + .map(Into::into) + .collect::>(); + + let signatures = key_store + .read() + .sign_with_all( + key_types::AUTHORITY_DISCOVERY, + keys.clone(), + serialized_addresses.as_slice(), + ) + .map_err(|_| Error::Signing)?; + + for (sign_result, key) in signatures.iter().zip(keys) { + let mut signed_addresses = vec![]; + + // sign_with_all returns Result signature + // is generated for a public key that is supported. + // Verify that all signatures exist for all provided keys. + let signature = sign_result + .as_ref() + .map_err(|_| Error::MissingSignature(key.clone()))?; + schema::SignedAuthorityAddresses { + addresses: serialized_addresses.clone(), + signature: Encode::encode(&signature), + } + .encode(&mut signed_addresses) + .map_err(Error::EncodingProto)?; + + self.network + .put_value(hash_authority_id(key.1.as_ref()), signed_addresses); + } + + Ok(()) + } + + fn request_addresses_of_others(&mut self) -> Result<()> { + let id = BlockId::hash(self.client.info().best_hash); + + let authorities = self + .client + .runtime_api() + .authorities(&id) + .map_err(Error::CallingRuntime)?; + + for authority_id in authorities.iter() { + if let Some(metrics) = &self.metrics { + metrics.request.inc(); + } + + self.network + .get_value(&hash_authority_id(authority_id.as_ref())); + } + + Ok(()) + } + + /// Handle incoming Dht events. + /// + /// Returns either: + /// - Poll::Pending when there are no more events to handle or + /// - Poll::Ready(()) when the dht event stream terminated. + fn handle_dht_events(&mut self, cx: &mut Context) -> Poll<()> { + loop { + match ready!(self.dht_event_rx.poll_next_unpin(cx)) { + Some(DhtEvent::ValueFound(v)) => { + if let Some(metrics) = &self.metrics { + metrics + .dht_event_received + .with_label_values(&["value_found"]) + .inc(); + } + + if log_enabled!(log::Level::Debug) { + let hashes = v.iter().map(|(hash, _value)| hash.clone()); + debug!( + target: LOG_TARGET, + "Value for hash '{:?}' found on Dht.", hashes, + ); + } + + if let Err(e) = self.handle_dht_value_found_event(v) { + error!( + target: LOG_TARGET, + "Failed to handle Dht value found event: {:?}", e, + ); + } + } + Some(DhtEvent::ValueNotFound(hash)) => { + if let Some(metrics) = &self.metrics { + metrics + .dht_event_received + .with_label_values(&["value_not_found"]) + .inc(); + } + + debug!( + target: LOG_TARGET, + "Value for hash '{:?}' not found on Dht.", hash + ) + } + Some(DhtEvent::ValuePut(hash)) => { + if let Some(metrics) = &self.metrics { + metrics + .dht_event_received + .with_label_values(&["value_put"]) + .inc(); + } + + debug!( + target: LOG_TARGET, + "Successfully put hash '{:?}' on Dht.", hash, + ) + } + Some(DhtEvent::ValuePutFailed(hash)) => { + if let Some(metrics) = &self.metrics { + metrics + .dht_event_received + .with_label_values(&["value_put_failed"]) + .inc(); + } + + warn!( + target: LOG_TARGET, + "Failed to put hash '{:?}' on Dht.", hash + ) + } + None => { + debug!(target: LOG_TARGET, "Dht event stream terminated."); + return Poll::Ready(()); + } + } + } + } + + fn handle_dht_value_found_event( + &mut self, + values: Vec<(libp2p::kad::record::Key, Vec)>, + ) -> Result<()> { + // Ensure `values` is not empty and all its keys equal. + let remote_key = values + .iter() + .fold(Ok(None), |acc, (key, _)| match acc { + Ok(None) => Ok(Some(key.clone())), + Ok(Some(ref prev_key)) if prev_key != key => { + Err(Error::ReceivingDhtValueFoundEventWithDifferentKeys) + } + x @ Ok(_) => x, + Err(e) => Err(e), + })? + .ok_or(Error::ReceivingDhtValueFoundEventWithNoRecords)?; + + let authorities = { + let block_id = BlockId::hash(self.client.info().best_hash); + // From the Dht we only get the hashed authority id. In order to retrieve the actual + // authority id and to ensure it is actually an authority, we match the hash against the + // hash of the authority id of all other authorities. + let authorities = self.client.runtime_api().authorities(&block_id)?; + self.addr_cache.retain_ids(&authorities); + authorities + .into_iter() + .map(|id| (hash_authority_id(id.as_ref()), id)) + .collect::>() + }; + + // Check if the event origins from an authority in the current authority set. + let authority_id: &AuthorityId = authorities + .get(&remote_key) + .ok_or(Error::MatchingHashedAuthorityIdWithAuthorityId)?; + + let remote_addresses: Vec = values + .into_iter() + .map(|(_k, v)| { + let schema::SignedAuthorityAddresses { + signature, + addresses, + } = schema::SignedAuthorityAddresses::decode(v.as_slice()) + .map_err(Error::DecodingProto)?; + + let signature = AuthoritySignature::decode(&mut &signature[..]) + .map_err(Error::EncodingDecodingScale)?; + + if !AuthorityPair::verify(&signature, &addresses, authority_id) { + return Err(Error::VerifyingDhtPayload); + } + + let addresses = schema::AuthorityAddresses::decode(addresses.as_slice()) + .map(|a| a.addresses) + .map_err(Error::DecodingProto)? + .into_iter() + .map(|a| a.try_into()) + .collect::>() + .map_err(Error::ParsingMultiaddress)?; + + Ok(addresses) + }) + .collect::>>>()? + .into_iter() + .flatten() + .collect(); + + if !remote_addresses.is_empty() { + self.addr_cache + .insert(authority_id.clone(), remote_addresses); + self.update_peer_set_priority_group()?; + } + + Ok(()) + } + + /// Retrieve our public keys within the current authority set. + // + // A node might have multiple authority discovery keys within its keystore, e.g. an old one and + // one for the upcoming session. In addition it could be participating in the current authority + // set with two keys. The function does not return all of the local authority discovery public + // keys, but only the ones intersecting with the current authority set. + fn get_own_public_keys_within_authority_set( + key_store: &BareCryptoStorePtr, + client: &Client, + ) -> Result> { + let local_pub_keys = key_store + .read() + .sr25519_public_keys(key_types::AUTHORITY_DISCOVERY) + .into_iter() + .collect::>(); + + let id = BlockId::hash(client.info().best_hash); + let current_authorities = client + .runtime_api() + .authorities(&id) + .map_err(Error::CallingRuntime)? + .into_iter() + .map(std::convert::Into::into) + .collect::>(); + + let intersection = local_pub_keys + .intersection(¤t_authorities) + .cloned() + .map(std::convert::Into::into) + .collect(); + + Ok(intersection) + } + + /// Update the peer set 'authority' priority group. + fn update_peer_set_priority_group(&self) -> Result<()> { + let addresses = self.addr_cache.get_subset(); + + debug!( + target: LOG_TARGET, + "Applying priority group {:?} to peerset.", addresses, + ); + self.network + .set_priority_group( + AUTHORITIES_PRIORITY_GROUP_NAME.to_string(), + addresses.into_iter().collect(), + ) + .map_err(Error::SettingPeersetPriorityGroup)?; + + Ok(()) + } } impl Future for AuthorityDiscovery where - Block: BlockT + Unpin + 'static, - Network: NetworkProvider, - Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, - >::Api: - AuthorityDiscoveryApi, + Block: BlockT + Unpin + 'static, + Network: NetworkProvider, + Client: ProvideRuntimeApi + Send + Sync + 'static + HeaderBackend, + >::Api: + AuthorityDiscoveryApi, { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - // Process incoming events. - if let Poll::Ready(()) = self.handle_dht_events(cx) { - // `handle_dht_events` returns `Poll::Ready(())` when the Dht event stream terminated. - // Termination of the Dht event stream implies that the underlying network terminated, - // thus authority discovery should terminate as well. - return Poll::Ready(()); - } - - - // Publish own addresses. - if let Poll::Ready(_) = self.publish_interval.poll_next_unpin(cx) { - // Register waker of underlying task for next interval. - while let Poll::Ready(_) = self.publish_interval.poll_next_unpin(cx) {} - - if let Err(e) = self.publish_ext_addresses() { - error!( - target: LOG_TARGET, - "Failed to publish external addresses: {:?}", e, - ); - } - } - - // Request addresses of authorities. - if let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) { - // Register waker of underlying task for next interval. - while let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) {} - - if let Err(e) = self.request_addresses_of_others() { - error!( - target: LOG_TARGET, - "Failed to request addresses of authorities: {:?}", e, - ); - } - } - - Poll::Pending - } + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + // Process incoming events. + if let Poll::Ready(()) = self.handle_dht_events(cx) { + // `handle_dht_events` returns `Poll::Ready(())` when the Dht event stream terminated. + // Termination of the Dht event stream implies that the underlying network terminated, + // thus authority discovery should terminate as well. + return Poll::Ready(()); + } + + // Publish own addresses. + if let Poll::Ready(_) = self.publish_interval.poll_next_unpin(cx) { + // Register waker of underlying task for next interval. + while let Poll::Ready(_) = self.publish_interval.poll_next_unpin(cx) {} + + if let Err(e) = self.publish_ext_addresses() { + error!( + target: LOG_TARGET, + "Failed to publish external addresses: {:?}", e, + ); + } + } + + // Request addresses of authorities. + if let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) { + // Register waker of underlying task for next interval. + while let Poll::Ready(_) = self.query_interval.poll_next_unpin(cx) {} + + if let Err(e) = self.request_addresses_of_others() { + error!( + target: LOG_TARGET, + "Failed to request addresses of authorities: {:?}", e, + ); + } + } + + Poll::Pending + } } /// NetworkProvider provides AuthorityDiscovery with all necessary hooks into the underlying /// Substrate networking. Using this trait abstraction instead of NetworkService directly is /// necessary to unit test AuthorityDiscovery. pub trait NetworkProvider: NetworkStateInfo { - /// Modify a peerset priority group. - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String>; - - /// Start putting a value in the Dht. - fn put_value(&self, key: libp2p::kad::record::Key, value: Vec); - - /// Start getting a value from the Dht. - fn get_value(&self, key: &libp2p::kad::record::Key); + /// Modify a peerset priority group. + fn set_priority_group( + &self, + group_id: String, + peers: HashSet, + ) -> std::result::Result<(), String>; + + /// Start putting a value in the Dht. + fn put_value(&self, key: libp2p::kad::record::Key, value: Vec); + + /// Start getting a value from the Dht. + fn get_value(&self, key: &libp2p::kad::record::Key); } impl NetworkProvider for sc_network::NetworkService where - B: BlockT + 'static, - H: ExHashT, + B: BlockT + 'static, + H: ExHashT, { - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String> { - self.set_priority_group(group_id, peers) - } - fn put_value(&self, key: libp2p::kad::record::Key, value: Vec) { - self.put_value(key, value) - } - fn get_value(&self, key: &libp2p::kad::record::Key) { - self.get_value(key) - } + fn set_priority_group( + &self, + group_id: String, + peers: HashSet, + ) -> std::result::Result<(), String> { + self.set_priority_group(group_id, peers) + } + fn put_value(&self, key: libp2p::kad::record::Key, value: Vec) { + self.put_value(key, value) + } + fn get_value(&self, key: &libp2p::kad::record::Key) { + self.get_value(key) + } } fn hash_authority_id(id: &[u8]) -> libp2p::kad::record::Key { - libp2p::kad::record::Key::new(&libp2p::multihash::Sha2_256::digest(id)) + libp2p::kad::record::Key::new(&libp2p::multihash::Sha2_256::digest(id)) } fn interval_at(start: Instant, duration: Duration) -> Interval { - let stream = futures::stream::unfold(start, move |next| { - let time_until_next = next.saturating_duration_since(Instant::now()); + let stream = futures::stream::unfold(start, move |next| { + let time_until_next = next.saturating_duration_since(Instant::now()); - Delay::new(time_until_next).map(move |_| Some(((), next + duration))) - }); + Delay::new(time_until_next).map(move |_| Some(((), next + duration))) + }); - Box::new(stream) + Box::new(stream) } /// Prometheus metrics for an `AuthorityDiscovery`. #[derive(Clone)] pub(crate) struct Metrics { - publish: Counter, - amount_last_published: Gauge, - request: Counter, - dht_event_received: CounterVec, + publish: Counter, + amount_last_published: Gauge, + request: Counter, + dht_event_received: CounterVec, } impl Metrics { - pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { - Ok(Self { - publish: register( - Counter::new( - "authority_discovery_times_published_total", - "Number of times authority discovery has published external addresses." - )?, - registry, - )?, - amount_last_published: register( - Gauge::new( - "authority_discovery_amount_external_addresses_last_published", - "Number of external addresses published when authority discovery last \ - published addresses." - )?, - registry, - )?, - request: register( - Counter::new( - "authority_discovery_authority_addresses_requested_total", - "Number of times authority discovery has requested external addresses of a \ - single authority." - )?, - registry, - )?, - dht_event_received: register( - CounterVec::new( - Opts::new( - "authority_discovery_dht_event_received", - "Number of dht events received by authority discovery." - ), - &["name"], - )?, - registry, - )?, - }) - } + pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { + Ok(Self { + publish: register( + Counter::new( + "authority_discovery_times_published_total", + "Number of times authority discovery has published external addresses.", + )?, + registry, + )?, + amount_last_published: register( + Gauge::new( + "authority_discovery_amount_external_addresses_last_published", + "Number of external addresses published when authority discovery last \ + published addresses.", + )?, + registry, + )?, + request: register( + Counter::new( + "authority_discovery_authority_addresses_requested_total", + "Number of times authority discovery has requested external addresses of a \ + single authority.", + )?, + registry, + )?, + dht_event_received: register( + CounterVec::new( + Opts::new( + "authority_discovery_dht_event_received", + "Number of dht events received by authority discovery.", + ), + &["name"], + )?, + registry, + )?, + }) + } } diff --git a/client/authority-discovery/src/tests.rs b/client/authority-discovery/src/tests.rs index c9b5e392d8..626d5aca8d 100644 --- a/client/authority-discovery/src/tests.rs +++ b/client/authority-discovery/src/tests.rs @@ -14,445 +14,459 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{iter::FromIterator, sync::{Arc, Mutex}}; +use std::{ + iter::FromIterator, + sync::{Arc, Mutex}, +}; use futures::channel::mpsc::channel; use futures::executor::{block_on, LocalPool}; use futures::future::{poll_fn, FutureExt}; +use futures::poll; use futures::sink::SinkExt; use futures::task::LocalSpawn; -use futures::poll; use libp2p::{kad, PeerId}; -use sp_api::{ProvideRuntimeApi, ApiRef}; +use sp_api::{ApiRef, ProvideRuntimeApi}; use sp_core::testing::KeyStore; -use sp_runtime::traits::{Zero, Block as BlockT, NumberFor}; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use substrate_test_runtime_client::runtime::Block; use super::*; #[test] fn interval_at_with_start_now() { - let start = Instant::now(); + let start = Instant::now(); - let mut interval = interval_at( - std::time::Instant::now(), - std::time::Duration::from_secs(10), - ); + let mut interval = interval_at( + std::time::Instant::now(), + std::time::Duration::from_secs(10), + ); - futures::executor::block_on(async { - interval.next().await; - }); + futures::executor::block_on(async { + interval.next().await; + }); - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_secs(1), - "Expected low resolution instant interval to fire within less than a second.", - ); + assert!( + Instant::now().saturating_duration_since(start) < Duration::from_secs(1), + "Expected low resolution instant interval to fire within less than a second.", + ); } #[test] fn interval_at_is_queuing_ticks() { - let start = Instant::now(); + let start = Instant::now(); - let interval = interval_at(start, std::time::Duration::from_millis(100)); + let interval = interval_at(start, std::time::Duration::from_millis(100)); - // Let's wait for 200ms, thus 3 elements should be queued up (1st at 0ms, 2nd at 100ms, 3rd - // at 200ms). - std::thread::sleep(Duration::from_millis(200)); + // Let's wait for 200ms, thus 3 elements should be queued up (1st at 0ms, 2nd at 100ms, 3rd + // at 200ms). + std::thread::sleep(Duration::from_millis(200)); - futures::executor::block_on(async { - interval.take(3).collect::>().await; - }); + futures::executor::block_on(async { + interval.take(3).collect::>().await; + }); - // Make sure we did not wait for more than 300 ms, which would imply that `at_interval` is - // not queuing ticks. - assert!( - Instant::now().saturating_duration_since(start) < Duration::from_millis(300), - "Expect interval to /queue/ events when not polled for a while.", - ); + // Make sure we did not wait for more than 300 ms, which would imply that `at_interval` is + // not queuing ticks. + assert!( + Instant::now().saturating_duration_since(start) < Duration::from_millis(300), + "Expect interval to /queue/ events when not polled for a while.", + ); } #[test] fn interval_at_with_initial_delay() { - let start = Instant::now(); + let start = Instant::now(); - let mut interval = interval_at( - std::time::Instant::now() + Duration::from_millis(100), - std::time::Duration::from_secs(10), - ); + let mut interval = interval_at( + std::time::Instant::now() + Duration::from_millis(100), + std::time::Duration::from_secs(10), + ); - futures::executor::block_on(async { - interval.next().await; - }); + futures::executor::block_on(async { + interval.next().await; + }); - assert!( - Instant::now().saturating_duration_since(start) > Duration::from_millis(100), - "Expected interval with initial delay not to fire right away.", - ); + assert!( + Instant::now().saturating_duration_since(start) > Duration::from_millis(100), + "Expected interval with initial delay not to fire right away.", + ); } #[derive(Clone)] struct TestApi { - authorities: Vec, + authorities: Vec, } impl ProvideRuntimeApi for TestApi { - type Api = RuntimeApi; - - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RuntimeApi { - authorities: self.authorities.clone(), - }.into() - } + type Api = RuntimeApi; + + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + RuntimeApi { + authorities: self.authorities.clone(), + } + .into() + } } /// Blockchain database header backend. Does not perform any validation. impl HeaderBackend for TestApi { - fn header( - &self, - _id: BlockId, - ) -> std::result::Result, sp_blockchain::Error> { - Ok(None) - } - - fn info(&self) -> sc_client_api::blockchain::Info { - sc_client_api::blockchain::Info { - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - number_leaves: Default::default(), - } - } - - fn status( - &self, - _id: BlockId, - ) -> std::result::Result { - Ok(sc_client_api::blockchain::BlockStatus::Unknown) - } - - fn number( - &self, - _hash: Block::Hash, - ) -> std::result::Result>, sp_blockchain::Error> { - Ok(None) - } - - fn hash( - &self, - _number: NumberFor, - ) -> std::result::Result, sp_blockchain::Error> { - Ok(None) - } + fn header( + &self, + _id: BlockId, + ) -> std::result::Result, sp_blockchain::Error> { + Ok(None) + } + + fn info(&self) -> sc_client_api::blockchain::Info { + sc_client_api::blockchain::Info { + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + number_leaves: Default::default(), + } + } + + fn status( + &self, + _id: BlockId, + ) -> std::result::Result { + Ok(sc_client_api::blockchain::BlockStatus::Unknown) + } + + fn number( + &self, + _hash: Block::Hash, + ) -> std::result::Result>, sp_blockchain::Error> { + Ok(None) + } + + fn hash( + &self, + _number: NumberFor, + ) -> std::result::Result, sp_blockchain::Error> { + Ok(None) + } } struct RuntimeApi { - authorities: Vec, + authorities: Vec, } sp_api::mock_impl_runtime_apis! { - impl AuthorityDiscoveryApi for RuntimeApi { - type Error = sp_blockchain::Error; + impl AuthorityDiscoveryApi for RuntimeApi { + type Error = sp_blockchain::Error; - fn authorities(&self) -> Vec { - self.authorities.clone() - } - } + fn authorities(&self) -> Vec { + self.authorities.clone() + } + } } #[derive(Default)] struct TestNetwork { - // Whenever functions on `TestNetwork` are called, the function arguments are added to the - // vectors below. - pub put_value_call: Arc)>>>, - pub get_value_call: Arc>>, - pub set_priority_group_call: Arc)>>>, + // Whenever functions on `TestNetwork` are called, the function arguments are added to the + // vectors below. + pub put_value_call: Arc)>>>, + pub get_value_call: Arc>>, + pub set_priority_group_call: Arc)>>>, } impl NetworkProvider for TestNetwork { - fn set_priority_group( - &self, - group_id: String, - peers: HashSet, - ) -> std::result::Result<(), String> { - self.set_priority_group_call - .lock() - .unwrap() - .push((group_id, peers)); - Ok(()) - } - fn put_value(&self, key: kad::record::Key, value: Vec) { - self.put_value_call.lock().unwrap().push((key, value)); - } - fn get_value(&self, key: &kad::record::Key) { - self.get_value_call.lock().unwrap().push(key.clone()); - } + fn set_priority_group( + &self, + group_id: String, + peers: HashSet, + ) -> std::result::Result<(), String> { + self.set_priority_group_call + .lock() + .unwrap() + .push((group_id, peers)); + Ok(()) + } + fn put_value(&self, key: kad::record::Key, value: Vec) { + self.put_value_call.lock().unwrap().push((key, value)); + } + fn get_value(&self, key: &kad::record::Key) { + self.get_value_call.lock().unwrap().push(key.clone()); + } } impl NetworkStateInfo for TestNetwork { - fn local_peer_id(&self) -> PeerId { - PeerId::random() - } + fn local_peer_id(&self) -> PeerId { + PeerId::random() + } - fn external_addresses(&self) -> Vec { - vec![] - } + fn external_addresses(&self) -> Vec { + vec![] + } } #[test] fn new_registers_metrics() { - let (_dht_event_tx, dht_event_rx) = channel(1000); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); - - let registry = prometheus_endpoint::Registry::new(); - - AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - Some(registry.clone()), - ); - - assert!(registry.gather().len() > 0); + let (_dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let test_api = Arc::new(TestApi { + authorities: vec![], + }); + + let registry = prometheus_endpoint::Registry::new(); + + AuthorityDiscovery::new( + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + Some(registry.clone()), + ); + + assert!(registry.gather().len() > 0); } #[test] fn publish_ext_addresses_puts_record_on_dht() { - let (_dht_event_tx, dht_event_rx) = channel(1000); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - let public = key_store - .write() - .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) - .unwrap(); - let test_api = Arc::new(TestApi { - authorities: vec![public.into()], - }); - - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); - - authority_discovery.publish_ext_addresses().unwrap(); - - // Expect authority discovery to put a new record onto the dht. - assert_eq!(network.put_value_call.lock().unwrap().len(), 1); + let (_dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let public = key_store + .write() + .sr25519_generate_new(key_types::AUTHORITY_DISCOVERY, None) + .unwrap(); + let test_api = Arc::new(TestApi { + authorities: vec![public.into()], + }); + + let mut authority_discovery = AuthorityDiscovery::new( + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + authority_discovery.publish_ext_addresses().unwrap(); + + // Expect authority discovery to put a new record onto the dht. + assert_eq!(network.put_value_call.lock().unwrap().len(), 1); } #[test] fn request_addresses_of_others_triggers_dht_get_query() { - let _ = ::env_logger::try_init(); - let (_dht_event_tx, dht_event_rx) = channel(1000); + let _ = ::env_logger::try_init(); + let (_dht_event_tx, dht_event_rx) = channel(1000); - // Generate authority keys - let authority_1_key_pair = AuthorityPair::from_seed_slice(&[1; 32]).unwrap(); - let authority_2_key_pair = AuthorityPair::from_seed_slice(&[2; 32]).unwrap(); + // Generate authority keys + let authority_1_key_pair = AuthorityPair::from_seed_slice(&[1; 32]).unwrap(); + let authority_2_key_pair = AuthorityPair::from_seed_slice(&[2; 32]).unwrap(); - let test_api = Arc::new(TestApi { - authorities: vec![authority_1_key_pair.public(), authority_2_key_pair.public()], - }); + let test_api = Arc::new(TestApi { + authorities: vec![authority_1_key_pair.public(), authority_2_key_pair.public()], + }); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); + let mut authority_discovery = AuthorityDiscovery::new( + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); - authority_discovery.request_addresses_of_others().unwrap(); + authority_discovery.request_addresses_of_others().unwrap(); - // Expect authority discovery to request new records from the dht. - assert_eq!(network.get_value_call.lock().unwrap().len(), 2); + // Expect authority discovery to request new records from the dht. + assert_eq!(network.get_value_call.lock().unwrap().len(), 2); } #[test] fn handle_dht_events_with_value_found_should_call_set_priority_group() { - let _ = ::env_logger::try_init(); - - // Create authority discovery. - - let (mut dht_event_tx, dht_event_rx) = channel(1000); - let key_pair = AuthorityPair::from_seed_slice(&[1; 32]).unwrap(); - let test_api = Arc::new(TestApi { - authorities: vec![key_pair.public()], - }); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); - - // Create sample dht event. - - let authority_id_1 = hash_authority_id(key_pair.public().as_ref()); - let address_1: Multiaddr = "/ip6/2001:db8::".parse().unwrap(); - - let mut serialized_addresses = vec![]; - schema::AuthorityAddresses { - addresses: vec![address_1.to_vec()], - } - .encode(&mut serialized_addresses) - .unwrap(); - - let signature = key_pair.sign(serialized_addresses.as_ref()).encode(); - let mut signed_addresses = vec![]; - schema::SignedAuthorityAddresses { - addresses: serialized_addresses, - signature, - } - .encode(&mut signed_addresses) - .unwrap(); - - let dht_event = sc_network::DhtEvent::ValueFound(vec![(authority_id_1, signed_addresses)]); - dht_event_tx.try_send(dht_event).unwrap(); - - // Make authority discovery handle the event. - let f = |cx: &mut Context<'_>| -> Poll<()> { - if let Poll::Ready(e) = authority_discovery.handle_dht_events(cx) { - panic!("Unexpected error: {:?}", e); - } - - // Expect authority discovery to set the priority set. - assert_eq!(network.set_priority_group_call.lock().unwrap().len(), 1); - - assert_eq!( - network.set_priority_group_call.lock().unwrap()[0], - ( - "authorities".to_string(), - HashSet::from_iter(vec![address_1.clone()].into_iter()) - ) - ); - - Poll::Ready(()) - }; - - let _ = block_on(poll_fn(f)); + let _ = ::env_logger::try_init(); + + // Create authority discovery. + + let (mut dht_event_tx, dht_event_rx) = channel(1000); + let key_pair = AuthorityPair::from_seed_slice(&[1; 32]).unwrap(); + let test_api = Arc::new(TestApi { + authorities: vec![key_pair.public()], + }); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + + let mut authority_discovery = AuthorityDiscovery::new( + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + // Create sample dht event. + + let authority_id_1 = hash_authority_id(key_pair.public().as_ref()); + let address_1: Multiaddr = "/ip6/2001:db8::".parse().unwrap(); + + let mut serialized_addresses = vec![]; + schema::AuthorityAddresses { + addresses: vec![address_1.to_vec()], + } + .encode(&mut serialized_addresses) + .unwrap(); + + let signature = key_pair.sign(serialized_addresses.as_ref()).encode(); + let mut signed_addresses = vec![]; + schema::SignedAuthorityAddresses { + addresses: serialized_addresses, + signature, + } + .encode(&mut signed_addresses) + .unwrap(); + + let dht_event = sc_network::DhtEvent::ValueFound(vec![(authority_id_1, signed_addresses)]); + dht_event_tx.try_send(dht_event).unwrap(); + + // Make authority discovery handle the event. + let f = |cx: &mut Context<'_>| -> Poll<()> { + if let Poll::Ready(e) = authority_discovery.handle_dht_events(cx) { + panic!("Unexpected error: {:?}", e); + } + + // Expect authority discovery to set the priority set. + assert_eq!(network.set_priority_group_call.lock().unwrap().len(), 1); + + assert_eq!( + network.set_priority_group_call.lock().unwrap()[0], + ( + "authorities".to_string(), + HashSet::from_iter(vec![address_1.clone()].into_iter()) + ) + ); + + Poll::Ready(()) + }; + + let _ = block_on(poll_fn(f)); } #[test] fn terminate_when_event_stream_terminates() { - let (dht_event_tx, dht_event_rx) = channel(1000); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); - - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); - - block_on(async { - assert_eq!(Poll::Pending, poll!(&mut authority_discovery)); - - // Simulate termination of the network through dropping the sender side of the dht event - // channel. - drop(dht_event_tx); - - assert_eq!( - Poll::Ready(()), poll!(&mut authority_discovery), - "Expect the authority discovery module to terminate once the sending side of the dht \ + let (dht_event_tx, dht_event_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let test_api = Arc::new(TestApi { + authorities: vec![], + }); + + let mut authority_discovery = AuthorityDiscovery::new( + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + block_on(async { + assert_eq!(Poll::Pending, poll!(&mut authority_discovery)); + + // Simulate termination of the network through dropping the sender side of the dht event + // channel. + drop(dht_event_tx); + + assert_eq!( + Poll::Ready(()), + poll!(&mut authority_discovery), + "Expect the authority discovery module to terminate once the sending side of the dht \ event channel is terminated.", - ); - }); + ); + }); } #[test] fn dont_stop_polling_when_error_is_returned() { - #[derive(PartialEq, Debug)] - enum Event { - Processed, - End, - }; - - let (mut dht_event_tx, dht_event_rx) = channel(1000); - let (mut discovery_update_tx, mut discovery_update_rx) = channel(1000); - let network: Arc = Arc::new(Default::default()); - let key_store = KeyStore::new(); - let test_api = Arc::new(TestApi { - authorities: vec![], - }); - let mut pool = LocalPool::new(); - - let mut authority_discovery = AuthorityDiscovery::new( - test_api, - network.clone(), - vec![], - dht_event_rx.boxed(), - Role::Authority(key_store), - None, - ); - - // Spawn the authority discovery to make sure it is polled independently. - // - // As this is a local pool, only one future at a time will have the CPU and - // can make progress until the future returns `Pending`. - pool.spawner().spawn_local_obj( - futures::future::poll_fn(move |ctx| { - match std::pin::Pin::new(&mut authority_discovery).poll(ctx) { - Poll::Ready(()) => {}, - Poll::Pending => { - discovery_update_tx.send(Event::Processed).now_or_never(); - return Poll::Pending; - }, - } - let _ = discovery_update_tx.send(Event::End).now_or_never().unwrap(); - Poll::Ready(()) - }).boxed_local().into(), - ).expect("Spawns authority discovery"); - - pool.run_until( - // The future that drives the event stream - async { - // Send an event that should generate an error - let _ = dht_event_tx.send(DhtEvent::ValueFound(Default::default())).now_or_never(); - // Send the same event again to make sure that the event stream needs to be polled twice - // to be woken up again. - let _ = dht_event_tx.send(DhtEvent::ValueFound(Default::default())).now_or_never(); - - // Now we call `await` and give the control to the authority discovery future. - assert_eq!(Some(Event::Processed), discovery_update_rx.next().await); - - // Drop the event rx to stop the authority discovery. If it was polled correctly, it - // should end properly. - drop(dht_event_tx); - - assert!( - discovery_update_rx.collect::>() - .await - .into_iter() - .any(|evt| evt == Event::End), - "The authority discovery should have ended", - ); - } - ); + #[derive(PartialEq, Debug)] + enum Event { + Processed, + End, + }; + + let (mut dht_event_tx, dht_event_rx) = channel(1000); + let (mut discovery_update_tx, mut discovery_update_rx) = channel(1000); + let network: Arc = Arc::new(Default::default()); + let key_store = KeyStore::new(); + let test_api = Arc::new(TestApi { + authorities: vec![], + }); + let mut pool = LocalPool::new(); + + let mut authority_discovery = AuthorityDiscovery::new( + test_api, + network.clone(), + vec![], + dht_event_rx.boxed(), + Role::Authority(key_store), + None, + ); + + // Spawn the authority discovery to make sure it is polled independently. + // + // As this is a local pool, only one future at a time will have the CPU and + // can make progress until the future returns `Pending`. + pool.spawner() + .spawn_local_obj( + futures::future::poll_fn(move |ctx| { + match std::pin::Pin::new(&mut authority_discovery).poll(ctx) { + Poll::Ready(()) => {} + Poll::Pending => { + discovery_update_tx.send(Event::Processed).now_or_never(); + return Poll::Pending; + } + } + let _ = discovery_update_tx.send(Event::End).now_or_never().unwrap(); + Poll::Ready(()) + }) + .boxed_local() + .into(), + ) + .expect("Spawns authority discovery"); + + pool.run_until( + // The future that drives the event stream + async { + // Send an event that should generate an error + let _ = dht_event_tx + .send(DhtEvent::ValueFound(Default::default())) + .now_or_never(); + // Send the same event again to make sure that the event stream needs to be polled twice + // to be woken up again. + let _ = dht_event_tx + .send(DhtEvent::ValueFound(Default::default())) + .now_or_never(); + + // Now we call `await` and give the control to the authority discovery future. + assert_eq!(Some(Event::Processed), discovery_update_rx.next().await); + + // Drop the event rx to stop the authority discovery. If it was polled correctly, it + // should end properly. + drop(dht_event_tx); + + assert!( + discovery_update_rx + .collect::>() + .await + .into_iter() + .any(|evt| evt == Event::End), + "The authority discovery should have ended", + ); + }, + ); } diff --git a/client/basic-authorship/src/basic_authorship.rs b/client/basic-authorship/src/basic_authorship.rs index e1e99938e3..5342f45ea4 100644 --- a/client/basic-authorship/src/basic_authorship.rs +++ b/client/basic-authorship/src/basic_authorship.rs @@ -18,277 +18,296 @@ // FIXME #1021 move this into sp-consensus -use std::{time, sync::Arc}; -use sc_client_api::backend; use codec::Decode; +use futures::{executor, future, future::Either}; +use log::{debug, error, info, trace, warn}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use sc_client_api::backend; +use sc_telemetry::{telemetry, CONSENSUS_INFO}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_blockchain::{ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed, HeaderBackend}; use sp_consensus::{evaluation, Proposal, RecordProof}; -use sp_inherents::InherentData; -use log::{error, info, debug, trace, warn}; use sp_core::ExecutionContext; +use sp_inherents::InherentData; use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Hash as HashT, Header as HeaderT, DigestFor, BlakeTwo256}, + generic::BlockId, + traits::{BlakeTwo256, Block as BlockT, DigestFor, Hash as HashT, Header as HeaderT}, }; -use sp_transaction_pool::{TransactionPool, InPoolTransaction}; -use sc_telemetry::{telemetry, CONSENSUS_INFO}; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; -use sp_api::{ProvideRuntimeApi, ApiExt}; -use futures::{executor, future, future::Either}; -use sp_blockchain::{HeaderBackend, ApplyExtrinsicFailed::Validity, Error::ApplyExtrinsicFailed}; +use sp_transaction_pool::{InPoolTransaction, TransactionPool}; use std::marker::PhantomData; +use std::{sync::Arc, time}; /// Proposer factory. pub struct ProposerFactory { - /// The client instance. - client: Arc, - /// The transaction pool. - transaction_pool: Arc, - /// phantom member to pin the `Backend` type. - _phantom: PhantomData, + /// The client instance. + client: Arc, + /// The transaction pool. + transaction_pool: Arc, + /// phantom member to pin the `Backend` type. + _phantom: PhantomData, } impl ProposerFactory { - pub fn new(client: Arc, transaction_pool: Arc) -> Self { - ProposerFactory { - client, - transaction_pool, - _phantom: PhantomData, - } - } + pub fn new(client: Arc, transaction_pool: Arc) -> Self { + ProposerFactory { + client, + transaction_pool, + _phantom: PhantomData, + } + } } impl ProposerFactory - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: ApiExt> + + BlockBuilderApi, { - pub fn init_with_now( - &mut self, - parent_header: &::Header, - now: Box time::Instant + Send + Sync>, - ) -> Proposer { - let parent_hash = parent_header.hash(); - - let id = BlockId::hash(parent_hash); - - info!("🙌 Starting consensus session on top of parent {:?}", parent_hash); - - let proposer = Proposer { - inner: Arc::new(ProposerInner { - client: self.client.clone(), - parent_hash, - parent_id: id, - parent_number: *parent_header.number(), - transaction_pool: self.transaction_pool.clone(), - now, - _phantom: PhantomData, - }), - }; - - proposer - } + pub fn init_with_now( + &mut self, + parent_header: &::Header, + now: Box time::Instant + Send + Sync>, + ) -> Proposer { + let parent_hash = parent_header.hash(); + + let id = BlockId::hash(parent_hash); + + info!( + "🙌 Starting consensus session on top of parent {:?}", + parent_hash + ); + + let proposer = Proposer { + inner: Arc::new(ProposerInner { + client: self.client.clone(), + parent_hash, + parent_id: id, + parent_number: *parent_header.number(), + transaction_pool: self.transaction_pool.clone(), + now, + _phantom: PhantomData, + }), + }; + + proposer + } } -impl sp_consensus::Environment for - ProposerFactory - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +impl sp_consensus::Environment for ProposerFactory +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: ApiExt> + + BlockBuilderApi, { - type CreateProposer = future::Ready>; - type Proposer = Proposer; - type Error = sp_blockchain::Error; - - fn init( - &mut self, - parent_header: &::Header, - ) -> Self::CreateProposer { - future::ready(Ok(self.init_with_now(parent_header, Box::new(time::Instant::now)))) - } + type CreateProposer = future::Ready>; + type Proposer = Proposer; + type Error = sp_blockchain::Error; + + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { + future::ready(Ok( + self.init_with_now(parent_header, Box::new(time::Instant::now)) + )) + } } /// The proposer logic. pub struct Proposer { - inner: Arc>, + inner: Arc>, } /// Proposer inner, to wrap parameters under Arc. struct ProposerInner { - client: Arc, - parent_hash: ::Hash, - parent_id: BlockId, - parent_number: <::Header as HeaderT>::Number, - transaction_pool: Arc, - now: Box time::Instant + Send + Sync>, - _phantom: PhantomData, + client: Arc, + parent_hash: ::Hash, + parent_id: BlockId, + parent_number: <::Header as HeaderT>::Number, + transaction_pool: Arc, + now: Box time::Instant + Send + Sync>, + _phantom: PhantomData, } -impl sp_consensus::Proposer for - Proposer - where - A: TransactionPool + 'static, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +impl sp_consensus::Proposer for Proposer +where + A: TransactionPool + 'static, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: ApiExt> + + BlockBuilderApi, { - type Transaction = backend::TransactionFor; - type Proposal = tokio_executor::blocking::Blocking< - Result, Self::Error> - >; - type Error = sp_blockchain::Error; - - fn propose( - &mut self, - inherent_data: InherentData, - inherent_digests: DigestFor, - max_duration: time::Duration, - record_proof: RecordProof, - ) -> Self::Proposal { - let inner = self.inner.clone(); - tokio_executor::blocking::run(move || { - // leave some time for evaluation and block finalization (33%) - let deadline = (inner.now)() + max_duration - max_duration / 3; - inner.propose_with(inherent_data, inherent_digests, deadline, record_proof) - }) - } + type Transaction = backend::TransactionFor; + type Proposal = + tokio_executor::blocking::Blocking, Self::Error>>; + type Error = sp_blockchain::Error; + + fn propose( + &mut self, + inherent_data: InherentData, + inherent_digests: DigestFor, + max_duration: time::Duration, + record_proof: RecordProof, + ) -> Self::Proposal { + let inner = self.inner.clone(); + tokio_executor::blocking::run(move || { + // leave some time for evaluation and block finalization (33%) + let deadline = (inner.now)() + max_duration - max_duration / 3; + inner.propose_with(inherent_data, inherent_digests, deadline, record_proof) + }) + } } impl ProposerInner - where - A: TransactionPool, - B: backend::Backend + Send + Sync + 'static, - Block: BlockT, - C: BlockBuilderProvider + HeaderBackend + ProvideRuntimeApi - + Send + Sync + 'static, - C::Api: ApiExt> - + BlockBuilderApi, +where + A: TransactionPool, + B: backend::Backend + Send + Sync + 'static, + Block: BlockT, + C: BlockBuilderProvider + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + 'static, + C::Api: ApiExt> + + BlockBuilderApi, { - fn propose_with( - &self, - inherent_data: InherentData, - inherent_digests: DigestFor, - deadline: time::Instant, - record_proof: RecordProof, - ) -> Result>, sp_blockchain::Error> { - /// If the block is full we will attempt to push at most - /// this number of transactions before quitting for real. - /// It allows us to increase block utilization. - const MAX_SKIPPED_TRANSACTIONS: usize = 8; - - let mut block_builder = self.client.new_block_at( - &self.parent_id, - inherent_digests, - record_proof, - )?; - - // We don't check the API versions any further here since the dispatch compatibility - // check should be enough. - for inherent in self.client.runtime_api() - .inherent_extrinsics_with_context( - &self.parent_id, - ExecutionContext::BlockConstruction, - inherent_data - )? - { - match block_builder.push(inherent) { - Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => - warn!("⚠️ Dropping non-mandatory inherent from overweight block."), - Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => { - error!("❌️ Mandatory inherent extrinsic returned error. Block cannot be produced."); - Err(ApplyExtrinsicFailed(Validity(e)))? - } - Err(e) => { - warn!("❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", e); - } - Ok(_) => {} - } - } - - // proceed with transactions - let mut is_first = true; - let mut skipped = 0; - let mut unqueue_invalid = Vec::new(); - let pending_iterator = match executor::block_on(future::select( - self.transaction_pool.ready_at(self.parent_number), - futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8), - )) { - Either::Left((iterator, _)) => iterator, - Either::Right(_) => { - log::warn!( + fn propose_with( + &self, + inherent_data: InherentData, + inherent_digests: DigestFor, + deadline: time::Instant, + record_proof: RecordProof, + ) -> Result>, sp_blockchain::Error> { + /// If the block is full we will attempt to push at most + /// this number of transactions before quitting for real. + /// It allows us to increase block utilization. + const MAX_SKIPPED_TRANSACTIONS: usize = 8; + + let mut block_builder = + self.client + .new_block_at(&self.parent_id, inherent_digests, record_proof)?; + + // We don't check the API versions any further here since the dispatch compatibility + // check should be enough. + for inherent in self.client.runtime_api().inherent_extrinsics_with_context( + &self.parent_id, + ExecutionContext::BlockConstruction, + inherent_data, + )? { + match block_builder.push(inherent) { + Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { + warn!("⚠️ Dropping non-mandatory inherent from overweight block.") + } + Err(ApplyExtrinsicFailed(Validity(e))) if e.was_mandatory() => { + error!( + "❌️ Mandatory inherent extrinsic returned error. Block cannot be produced." + ); + Err(ApplyExtrinsicFailed(Validity(e)))? + } + Err(e) => { + warn!( + "❗️ Inherent extrinsic returned unexpected error: {}. Dropping.", + e + ); + } + Ok(_) => {} + } + } + + // proceed with transactions + let mut is_first = true; + let mut skipped = 0; + let mut unqueue_invalid = Vec::new(); + let pending_iterator = match executor::block_on(future::select( + self.transaction_pool.ready_at(self.parent_number), + futures_timer::Delay::new(deadline.saturating_duration_since((self.now)()) / 8), + )) { + Either::Left((iterator, _)) => iterator, + Either::Right(_) => { + log::warn!( "Timeout fired waiting for transaction pool to be ready. Proceeding to block production anyway.", ); - self.transaction_pool.ready() - } - }; - - debug!("Attempting to push transactions from the pool."); - debug!("Pool status: {:?}", self.transaction_pool.status()); - for pending_tx in pending_iterator { - if (self.now)() > deadline { - debug!( - "Consensus deadline reached when pushing block transactions, \ + self.transaction_pool.ready() + } + }; + + debug!("Attempting to push transactions from the pool."); + debug!("Pool status: {:?}", self.transaction_pool.status()); + for pending_tx in pending_iterator { + if (self.now)() > deadline { + debug!( + "Consensus deadline reached when pushing block transactions, \ proceeding with proposing." - ); - break; - } - - let pending_tx_data = pending_tx.data().clone(); - let pending_tx_hash = pending_tx.hash().clone(); - trace!("[{:?}] Pushing to the block.", pending_tx_hash); - match sc_block_builder::BlockBuilder::push(&mut block_builder, pending_tx_data) { - Ok(()) => { - debug!("[{:?}] Pushed to the block.", pending_tx_hash); - } - Err(ApplyExtrinsicFailed(Validity(e))) - if e.exhausted_resources() => { - if is_first { - debug!("[{:?}] Invalid transaction: FullBlock on empty block", pending_tx_hash); - unqueue_invalid.push(pending_tx_hash); - } else if skipped < MAX_SKIPPED_TRANSACTIONS { - skipped += 1; - debug!( - "Block seems full, but will try {} more transactions before quitting.", - MAX_SKIPPED_TRANSACTIONS - skipped, - ); - } else { - debug!("Block is full, proceed with proposing."); - break; - } - } - Err(e) if skipped > 0 => { - trace!( - "[{:?}] Ignoring invalid transaction when skipping: {}", - pending_tx_hash, - e - ); - } - Err(e) => { - debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); - unqueue_invalid.push(pending_tx_hash); - } - } - - is_first = false; - } - - self.transaction_pool.remove_invalid(&unqueue_invalid); - - let (block, storage_changes, proof) = block_builder.build()?.into_inner(); - - info!("🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", + ); + break; + } + + let pending_tx_data = pending_tx.data().clone(); + let pending_tx_hash = pending_tx.hash().clone(); + trace!("[{:?}] Pushing to the block.", pending_tx_hash); + match sc_block_builder::BlockBuilder::push(&mut block_builder, pending_tx_data) { + Ok(()) => { + debug!("[{:?}] Pushed to the block.", pending_tx_hash); + } + Err(ApplyExtrinsicFailed(Validity(e))) if e.exhausted_resources() => { + if is_first { + debug!( + "[{:?}] Invalid transaction: FullBlock on empty block", + pending_tx_hash + ); + unqueue_invalid.push(pending_tx_hash); + } else if skipped < MAX_SKIPPED_TRANSACTIONS { + skipped += 1; + debug!( + "Block seems full, but will try {} more transactions before quitting.", + MAX_SKIPPED_TRANSACTIONS - skipped, + ); + } else { + debug!("Block is full, proceed with proposing."); + break; + } + } + Err(e) if skipped > 0 => { + trace!( + "[{:?}] Ignoring invalid transaction when skipping: {}", + pending_tx_hash, + e + ); + } + Err(e) => { + debug!("[{:?}] Invalid transaction: {}", pending_tx_hash, e); + unqueue_invalid.push(pending_tx_hash); + } + } + + is_first = false; + } + + self.transaction_pool.remove_invalid(&unqueue_invalid); + + let (block, storage_changes, proof) = block_builder.build()?.into_inner(); + + info!("🎁 Prepared block for proposing at {} [hash: {:?}; parent_hash: {}; extrinsics ({}): [{}]]", block.header().number(), ::Hash::from(block.header().hash()), block.header().parent_hash(), @@ -299,222 +318,262 @@ impl ProposerInner .collect::>() .join(", ") ); - telemetry!(CONSENSUS_INFO; "prepared_block_for_proposing"; - "number" => ?block.header().number(), - "hash" => ?::Hash::from(block.header().hash()), - ); - - if Decode::decode(&mut block.encode().as_slice()).as_ref() != Ok(&block) { - error!("Failed to verify block encoding/decoding"); - } - - if let Err(err) = evaluation::evaluate_initial(&block, &self.parent_hash, self.parent_number) { - error!("Failed to evaluate authored block: {:?}", err); - } - - Ok(Proposal { block, proof, storage_changes }) - } + telemetry!(CONSENSUS_INFO; "prepared_block_for_proposing"; + "number" => ?block.header().number(), + "hash" => ?::Hash::from(block.header().hash()), + ); + + if Decode::decode(&mut block.encode().as_slice()).as_ref() != Ok(&block) { + error!("Failed to verify block encoding/decoding"); + } + + if let Err(err) = + evaluation::evaluate_initial(&block, &self.parent_hash, self.parent_number) + { + error!("Failed to evaluate authored block: {:?}", err); + } + + Ok(Proposal { + block, + proof, + storage_changes, + }) + } } #[cfg(test)] mod tests { - use super::*; - - use parking_lot::Mutex; - use sp_consensus::{BlockOrigin, Proposer}; - use substrate_test_runtime_client::{ - prelude::*, - runtime::{Extrinsic, Transfer}, - }; - use sp_transaction_pool::{ChainEvent, MaintainedTransactionPool, TransactionSource}; - use sc_transaction_pool::{BasicPool, FullChainApi}; - use sp_api::Core; - use backend::Backend; - use sp_blockchain::HeaderBackend; - use sp_runtime::traits::NumberFor; - - const SOURCE: TransactionSource = TransactionSource::External; - - fn extrinsic(nonce: u64) -> Extrinsic { - Transfer { - amount: Default::default(), - nonce, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }.into_signed_tx() - } - - fn chain_event(block_number: u64, header: B::Header) -> ChainEvent - where NumberFor: From - { - ChainEvent::NewBlock { - id: BlockId::Number(block_number.into()), - retracted: vec![], - is_new_best: true, - header, - } - } - - #[test] - fn should_cease_building_block_when_deadline_is_reached() { - // given - let client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 - ); - - futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0), extrinsic(1)]) - ).unwrap(); - - futures::executor::block_on( - txpool.maintain(chain_event( - 0, - client.header(&BlockId::Number(0u64)).expect("header get error").expect("there should be header") - )) - ); - - let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); - - let cell = Mutex::new((false, time::Instant::now())); - let mut proposer = proposer_factory.init_with_now( - &client.header(&BlockId::number(0)).unwrap().unwrap(), - Box::new(move || { - let mut value = cell.lock(); - if !value.0 { - value.0 = true; - return value.1; - } - let old = value.1; - let new = old + time::Duration::from_secs(2); - *value = (true, new); - old - }) - ); - - // when - let deadline = time::Duration::from_secs(3); - let block = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) - ).map(|r| r.block).unwrap(); - - // then - // block should have some extrinsics although we have some more in the pool. - assert_eq!(block.extrinsics().len(), 1); - assert_eq!(txpool.ready().count(), 2); - } - - #[test] - fn should_not_panic_when_deadline_is_reached() { - let client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 - ); - - let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); - - let cell = Mutex::new((false, time::Instant::now())); - let mut proposer = proposer_factory.init_with_now( - &client.header(&BlockId::number(0)).unwrap().unwrap(), - Box::new(move || { - let mut value = cell.lock(); - if !value.0 { - value.0 = true; - return value.1; - } - let new = value.1 + time::Duration::from_secs(160); - *value = (true, new); - new - }) - ); - - let deadline = time::Duration::from_secs(1); - futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) - ).map(|r| r.block).unwrap(); - } - - #[test] - fn proposed_storage_changes_should_match_execute_block_storage_changes() { - let (client, backend) = substrate_test_runtime_client::TestClientBuilder::new() - .build_with_backend(); - let client = Arc::new(client); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 - ); - - let genesis_hash = client.info().best_hash; - let block_id = BlockId::Hash(genesis_hash); - - futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![extrinsic(0)]), - ).unwrap(); - - futures::executor::block_on( - txpool.maintain(chain_event( - 0, - client.header(&BlockId::Number(0u64)).expect("header get error").expect("there should be header") - )) - ); - - let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); - - let mut proposer = proposer_factory.init_with_now( - &client.header(&block_id).unwrap().unwrap(), - Box::new(move || time::Instant::now()), - ); - - let deadline = time::Duration::from_secs(9); - let proposal = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No), - ).unwrap(); - - assert_eq!(proposal.block.extrinsics().len(), 1); - - let api = client.runtime_api(); - api.execute_block(&block_id, proposal.block).unwrap(); - - let state = backend.state_at(block_id).unwrap(); - let changes_trie_state = backend::changes_tries_state_at_block( - &block_id, - backend.changes_trie_storage(), - ).unwrap(); - - let storage_changes = api.into_storage_changes(&state, changes_trie_state.as_ref(), genesis_hash) - .unwrap(); - - assert_eq!( - proposal.storage_changes.transaction_storage_root, - storage_changes.transaction_storage_root, - ); - } - - #[test] - fn should_not_remove_invalid_transactions_when_skipping() { - // given - let mut client = Arc::new(substrate_test_runtime_client::new()); - let txpool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 - ); - - futures::executor::block_on( - txpool.submit_at(&BlockId::number(0), SOURCE, vec![ + use super::*; + + use backend::Backend; + use parking_lot::Mutex; + use sc_transaction_pool::{BasicPool, FullChainApi}; + use sp_api::Core; + use sp_blockchain::HeaderBackend; + use sp_consensus::{BlockOrigin, Proposer}; + use sp_runtime::traits::NumberFor; + use sp_transaction_pool::{ChainEvent, MaintainedTransactionPool, TransactionSource}; + use substrate_test_runtime_client::{ + prelude::*, + runtime::{Extrinsic, Transfer}, + }; + + const SOURCE: TransactionSource = TransactionSource::External; + + fn extrinsic(nonce: u64) -> Extrinsic { + Transfer { + amount: Default::default(), + nonce, + from: AccountKeyring::Alice.into(), + to: Default::default(), + } + .into_signed_tx() + } + + fn chain_event(block_number: u64, header: B::Header) -> ChainEvent + where + NumberFor: From, + { + ChainEvent::NewBlock { + id: BlockId::Number(block_number.into()), + retracted: vec![], + is_new_best: true, + header, + } + } + + #[test] + fn should_cease_building_block_when_deadline_is_reached() { + // given + let client = Arc::new(substrate_test_runtime_client::new()); + let txpool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ) + .0, + ); + + futures::executor::block_on(txpool.submit_at( + &BlockId::number(0), + SOURCE, + vec![extrinsic(0), extrinsic(1)], + )) + .unwrap(); + + futures::executor::block_on( + txpool.maintain(chain_event( + 0, + client + .header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"), + )), + ); + + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); + + let cell = Mutex::new((false, time::Instant::now())); + let mut proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(0)).unwrap().unwrap(), + Box::new(move || { + let mut value = cell.lock(); + if !value.0 { + value.0 = true; + return value.1; + } + let old = value.1; + let new = old + time::Duration::from_secs(2); + *value = (true, new); + old + }), + ); + + // when + let deadline = time::Duration::from_secs(3); + let block = futures::executor::block_on(proposer.propose( + Default::default(), + Default::default(), + deadline, + RecordProof::No, + )) + .map(|r| r.block) + .unwrap(); + + // then + // block should have some extrinsics although we have some more in the pool. + assert_eq!(block.extrinsics().len(), 1); + assert_eq!(txpool.ready().count(), 2); + } + + #[test] + fn should_not_panic_when_deadline_is_reached() { + let client = Arc::new(substrate_test_runtime_client::new()); + let txpool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ) + .0, + ); + + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); + + let cell = Mutex::new((false, time::Instant::now())); + let mut proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(0)).unwrap().unwrap(), + Box::new(move || { + let mut value = cell.lock(); + if !value.0 { + value.0 = true; + return value.1; + } + let new = value.1 + time::Duration::from_secs(160); + *value = (true, new); + new + }), + ); + + let deadline = time::Duration::from_secs(1); + futures::executor::block_on(proposer.propose( + Default::default(), + Default::default(), + deadline, + RecordProof::No, + )) + .map(|r| r.block) + .unwrap(); + } + + #[test] + fn proposed_storage_changes_should_match_execute_block_storage_changes() { + let (client, backend) = + substrate_test_runtime_client::TestClientBuilder::new().build_with_backend(); + let client = Arc::new(client); + let txpool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ) + .0, + ); + + let genesis_hash = client.info().best_hash; + let block_id = BlockId::Hash(genesis_hash); + + futures::executor::block_on(txpool.submit_at( + &BlockId::number(0), + SOURCE, + vec![extrinsic(0)], + )) + .unwrap(); + + futures::executor::block_on( + txpool.maintain(chain_event( + 0, + client + .header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"), + )), + ); + + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); + + let mut proposer = proposer_factory.init_with_now( + &client.header(&block_id).unwrap().unwrap(), + Box::new(move || time::Instant::now()), + ); + + let deadline = time::Duration::from_secs(9); + let proposal = futures::executor::block_on(proposer.propose( + Default::default(), + Default::default(), + deadline, + RecordProof::No, + )) + .unwrap(); + + assert_eq!(proposal.block.extrinsics().len(), 1); + + let api = client.runtime_api(); + api.execute_block(&block_id, proposal.block).unwrap(); + + let state = backend.state_at(block_id).unwrap(); + let changes_trie_state = + backend::changes_tries_state_at_block(&block_id, backend.changes_trie_storage()) + .unwrap(); + + let storage_changes = api + .into_storage_changes(&state, changes_trie_state.as_ref(), genesis_hash) + .unwrap(); + + assert_eq!( + proposal.storage_changes.transaction_storage_root, + storage_changes.transaction_storage_root, + ); + } + + #[test] + fn should_not_remove_invalid_transactions_when_skipping() { + // given + let mut client = Arc::new(substrate_test_runtime_client::new()); + let txpool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ) + .0, + ); + + futures::executor::block_on(txpool.submit_at( + &BlockId::number(0), + SOURCE, + vec![ extrinsic(0), extrinsic(1), Transfer { @@ -532,55 +591,63 @@ mod tests { }.into_resources_exhausting_tx(), extrinsic(5), extrinsic(6), - ]) - ).unwrap(); - - let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); - let mut propose_block = | - client: &TestClient, - number, - expected_block_extrinsics, - expected_pool_transactions, - | { - let mut proposer = proposer_factory.init_with_now( - &client.header(&BlockId::number(number)).unwrap().unwrap(), - Box::new(move || time::Instant::now()), - ); - - // when - let deadline = time::Duration::from_secs(9); - let block = futures::executor::block_on( - proposer.propose(Default::default(), Default::default(), deadline, RecordProof::No) - ).map(|r| r.block).unwrap(); - - // then - // block should have some extrinsics although we have some more in the pool. - assert_eq!(block.extrinsics().len(), expected_block_extrinsics); - assert_eq!(txpool.ready().count(), expected_pool_transactions); - - block - }; - - futures::executor::block_on( - txpool.maintain(chain_event( - 0, - client.header(&BlockId::Number(0u64)).expect("header get error").expect("there should be header") - )) - ); - - // let's create one block and import it - let block = propose_block(&client, 0, 2, 7); - client.import(BlockOrigin::Own, block).unwrap(); - - futures::executor::block_on( - txpool.maintain(chain_event( - 1, - client.header(&BlockId::Number(1)).expect("header get error").expect("there should be header") - )) - ); - - // now let's make sure that we can still make some progress - let block = propose_block(&client, 1, 2, 5); - client.import(BlockOrigin::Own, block).unwrap(); - } + ], + )) + .unwrap(); + + let mut proposer_factory = ProposerFactory::new(client.clone(), txpool.clone()); + let mut propose_block = + |client: &TestClient, number, expected_block_extrinsics, expected_pool_transactions| { + let mut proposer = proposer_factory.init_with_now( + &client.header(&BlockId::number(number)).unwrap().unwrap(), + Box::new(move || time::Instant::now()), + ); + + // when + let deadline = time::Duration::from_secs(9); + let block = futures::executor::block_on(proposer.propose( + Default::default(), + Default::default(), + deadline, + RecordProof::No, + )) + .map(|r| r.block) + .unwrap(); + + // then + // block should have some extrinsics although we have some more in the pool. + assert_eq!(block.extrinsics().len(), expected_block_extrinsics); + assert_eq!(txpool.ready().count(), expected_pool_transactions); + + block + }; + + futures::executor::block_on( + txpool.maintain(chain_event( + 0, + client + .header(&BlockId::Number(0u64)) + .expect("header get error") + .expect("there should be header"), + )), + ); + + // let's create one block and import it + let block = propose_block(&client, 0, 2, 7); + client.import(BlockOrigin::Own, block).unwrap(); + + futures::executor::block_on( + txpool.maintain(chain_event( + 1, + client + .header(&BlockId::Number(1)) + .expect("header get error") + .expect("there should be header"), + )), + ); + + // now let's make sure that we can still make some progress + let block = propose_block(&client, 1, 2, 5); + client.import(BlockOrigin::Own, block).unwrap(); + } } diff --git a/client/basic-authorship/src/lib.rs b/client/basic-authorship/src/lib.rs index 5eb60f1cd5..80274e9ff9 100644 --- a/client/basic-authorship/src/lib.rs +++ b/client/basic-authorship/src/lib.rs @@ -55,4 +55,4 @@ mod basic_authorship; -pub use crate::basic_authorship::{ProposerFactory, Proposer}; +pub use crate::basic_authorship::{Proposer, ProposerFactory}; diff --git a/client/block-builder/src/lib.rs b/client/block-builder/src/lib.rs index 480a759e30..aa6fb892ef 100644 --- a/client/block-builder/src/lib.rs +++ b/client/block-builder/src/lib.rs @@ -26,14 +26,14 @@ use codec::Encode; -use sp_runtime::{ - generic::BlockId, - traits::{Header as HeaderT, Hash, Block as BlockT, HashFor, DigestFor, NumberFor, One}, -}; +use sp_api::{ApiErrorFor, ApiExt, ApiRef, Core, ProvideRuntimeApi, StorageChanges, StorageProof}; use sp_blockchain::{ApplyExtrinsicFailed, Error}; -use sp_core::ExecutionContext; -use sp_api::{Core, ApiExt, ApiErrorFor, ApiRef, ProvideRuntimeApi, StorageChanges, StorageProof}; use sp_consensus::RecordProof; +use sp_core::ExecutionContext; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, DigestFor, Hash, HashFor, Header as HeaderT, NumberFor, One}, +}; pub use sp_block_builder::BlockBuilder as BlockBuilderApi; @@ -46,206 +46,212 @@ use sc_client_api::backend; /// can be used to proof that the build block contains the expected data. The `proof` will /// only be set when proof recording was activated. pub struct BuiltBlock>> { - /// The actual block that was build. - pub block: Block, - /// The changes that need to be applied to the backend to get the state of the build block. - pub storage_changes: StorageChanges, - /// An optional proof that was recorded while building the block. - pub proof: Option, + /// The actual block that was build. + pub block: Block, + /// The changes that need to be applied to the backend to get the state of the build block. + pub storage_changes: StorageChanges, + /// An optional proof that was recorded while building the block. + pub proof: Option, } -impl>> BuiltBlock { - /// Convert into the inner values. - pub fn into_inner(self) -> (Block, StorageChanges, Option) { - (self.block, self.storage_changes, self.proof) - } +impl>> + BuiltBlock +{ + /// Convert into the inner values. + pub fn into_inner( + self, + ) -> ( + Block, + StorageChanges, + Option, + ) { + (self.block, self.storage_changes, self.proof) + } } /// Block builder provider pub trait BlockBuilderProvider - where - Block: BlockT, - B: backend::Backend, - Self: Sized, - RA: ProvideRuntimeApi, +where + Block: BlockT, + B: backend::Backend, + Self: Sized, + RA: ProvideRuntimeApi, { - /// Create a new block, built on top of `parent`. - /// - /// When proof recording is enabled, all accessed trie nodes are saved. - /// These recorded trie nodes can be used by a third party to proof the - /// output of this block builder without having access to the full storage. - fn new_block_at>( - &self, - parent: &BlockId, - inherent_digests: DigestFor, - record_proof: R, - ) -> sp_blockchain::Result>; - - /// Create a new block, built on the head of the chain. - fn new_block( - &self, - inherent_digests: DigestFor, - ) -> sp_blockchain::Result>; + /// Create a new block, built on top of `parent`. + /// + /// When proof recording is enabled, all accessed trie nodes are saved. + /// These recorded trie nodes can be used by a third party to proof the + /// output of this block builder without having access to the full storage. + fn new_block_at>( + &self, + parent: &BlockId, + inherent_digests: DigestFor, + record_proof: R, + ) -> sp_blockchain::Result>; + + /// Create a new block, built on the head of the chain. + fn new_block( + &self, + inherent_digests: DigestFor, + ) -> sp_blockchain::Result>; } /// Utility for building new (valid) blocks from a stream of extrinsics. pub struct BlockBuilder<'a, Block: BlockT, A: ProvideRuntimeApi, B> { - extrinsics: Vec, - api: ApiRef<'a, A::Api>, - block_id: BlockId, - parent_hash: Block::Hash, - backend: &'a B, + extrinsics: Vec, + api: ApiRef<'a, A::Api>, + block_id: BlockId, + parent_hash: Block::Hash, + backend: &'a B, } impl<'a, Block, A, B> BlockBuilder<'a, Block, A, B> where - Block: BlockT, - A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + - ApiExt>, - B: backend::Backend, + Block: BlockT, + A: ProvideRuntimeApi + 'a, + A::Api: BlockBuilderApi + + ApiExt>, + B: backend::Backend, { - /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. - /// - /// While proof recording is enabled, all accessed trie nodes are saved. - /// These recorded trie nodes can be used by a third party to prove the - /// output of this block builder without having access to the full storage. - pub fn new( - api: &'a A, - parent_hash: Block::Hash, - parent_number: NumberFor, - record_proof: RecordProof, - inherent_digests: DigestFor, - backend: &'a B, - ) -> Result> { - let header = <::Header as HeaderT>::new( - parent_number + One::one(), - Default::default(), - Default::default(), - parent_hash, - inherent_digests, - ); - - let mut api = api.runtime_api(); - - if record_proof.yes() { - api.record_proof(); - } - - let block_id = BlockId::Hash(parent_hash); - - api.initialize_block_with_context( - &block_id, ExecutionContext::BlockConstruction, &header, - )?; - - Ok(Self { - parent_hash, - extrinsics: Vec::new(), - api, - block_id, - backend, - }) - } - - /// Push onto the block's list of extrinsics. - /// - /// This will ensure the extrinsic can be validly executed (by executing it). - pub fn push(&mut self, xt: ::Extrinsic) -> Result<(), ApiErrorFor> { - let block_id = &self.block_id; - let extrinsics = &mut self.extrinsics; - - self.api.map_api_result(|api| { - match api.apply_extrinsic_with_context( - block_id, - ExecutionContext::BlockConstruction, - xt.clone(), - )? { - Ok(_) => { - extrinsics.push(xt); - Ok(()) - } - Err(tx_validity) => Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), - } - }) - } - - /// Consume the builder to build a valid `Block` containing all pushed extrinsics. - /// - /// Returns the build `Block`, the changes to the storage and an optional `StorageProof` - /// supplied by `self.api`, combined as [`BuiltBlock`]. - /// The storage proof will be `Some(_)` when proof recording was enabled. - pub fn build(mut self) -> Result< - BuiltBlock>, - ApiErrorFor - > { - let header = self.api.finalize_block_with_context( - &self.block_id, ExecutionContext::BlockConstruction - )?; - - debug_assert_eq!( - header.extrinsics_root().clone(), - HashFor::::ordered_trie_root( - self.extrinsics.iter().map(Encode::encode).collect(), - ), - ); - - let proof = self.api.extract_proof(); - - let state = self.backend.state_at(self.block_id)?; - let changes_trie_state = backend::changes_tries_state_at_block( - &self.block_id, - self.backend.changes_trie_storage(), - )?; - let parent_hash = self.parent_hash; - - let storage_changes = self.api.into_storage_changes( - &state, - changes_trie_state.as_ref(), - parent_hash, - )?; - - Ok(BuiltBlock { - block: ::new(header, self.extrinsics), - storage_changes, - proof, - }) - } + /// Create a new instance of builder based on the given `parent_hash` and `parent_number`. + /// + /// While proof recording is enabled, all accessed trie nodes are saved. + /// These recorded trie nodes can be used by a third party to prove the + /// output of this block builder without having access to the full storage. + pub fn new( + api: &'a A, + parent_hash: Block::Hash, + parent_number: NumberFor, + record_proof: RecordProof, + inherent_digests: DigestFor, + backend: &'a B, + ) -> Result> { + let header = <::Header as HeaderT>::new( + parent_number + One::one(), + Default::default(), + Default::default(), + parent_hash, + inherent_digests, + ); + + let mut api = api.runtime_api(); + + if record_proof.yes() { + api.record_proof(); + } + + let block_id = BlockId::Hash(parent_hash); + + api.initialize_block_with_context(&block_id, ExecutionContext::BlockConstruction, &header)?; + + Ok(Self { + parent_hash, + extrinsics: Vec::new(), + api, + block_id, + backend, + }) + } + + /// Push onto the block's list of extrinsics. + /// + /// This will ensure the extrinsic can be validly executed (by executing it). + pub fn push(&mut self, xt: ::Extrinsic) -> Result<(), ApiErrorFor> { + let block_id = &self.block_id; + let extrinsics = &mut self.extrinsics; + + self.api.map_api_result(|api| { + match api.apply_extrinsic_with_context( + block_id, + ExecutionContext::BlockConstruction, + xt.clone(), + )? { + Ok(_) => { + extrinsics.push(xt); + Ok(()) + } + Err(tx_validity) => Err(ApplyExtrinsicFailed::Validity(tx_validity).into()), + } + }) + } + + /// Consume the builder to build a valid `Block` containing all pushed extrinsics. + /// + /// Returns the build `Block`, the changes to the storage and an optional `StorageProof` + /// supplied by `self.api`, combined as [`BuiltBlock`]. + /// The storage proof will be `Some(_)` when proof recording was enabled. + pub fn build( + mut self, + ) -> Result>, ApiErrorFor> { + let header = self + .api + .finalize_block_with_context(&self.block_id, ExecutionContext::BlockConstruction)?; + + debug_assert_eq!( + header.extrinsics_root().clone(), + HashFor::::ordered_trie_root( + self.extrinsics.iter().map(Encode::encode).collect(), + ), + ); + + let proof = self.api.extract_proof(); + + let state = self.backend.state_at(self.block_id)?; + let changes_trie_state = backend::changes_tries_state_at_block( + &self.block_id, + self.backend.changes_trie_storage(), + )?; + let parent_hash = self.parent_hash; + + let storage_changes = + self.api + .into_storage_changes(&state, changes_trie_state.as_ref(), parent_hash)?; + + Ok(BuiltBlock { + block: ::new(header, self.extrinsics), + storage_changes, + proof, + }) + } } #[cfg(test)] mod tests { - use super::*; - use sp_blockchain::HeaderBackend; - use sp_core::Blake2Hasher; - use sp_state_machine::Backend; - use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; - - #[test] - fn block_building_storage_proof_does_not_include_runtime_by_default() { - let builder = substrate_test_runtime_client::TestClientBuilder::new(); - let backend = builder.backend(); - let client = builder.build(); - - let block = BlockBuilder::new( - &client, - client.info().best_hash, - client.info().best_number, - RecordProof::Yes, - Default::default(), - &*backend, - ).unwrap().build().unwrap(); - - let proof = block.proof.expect("Proof is build on request"); - - let backend = sp_state_machine::create_proof_check_backend::( - block.storage_changes.transaction_storage_root, - proof, - ).unwrap(); - - assert!( - backend.storage(&sp_core::storage::well_known_keys::CODE) - .unwrap_err() - .contains("Database missing expected key"), - ); - } + use super::*; + use sp_blockchain::HeaderBackend; + use sp_core::Blake2Hasher; + use sp_state_machine::Backend; + use substrate_test_runtime_client::{DefaultTestClientBuilderExt, TestClientBuilderExt}; + + #[test] + fn block_building_storage_proof_does_not_include_runtime_by_default() { + let builder = substrate_test_runtime_client::TestClientBuilder::new(); + let backend = builder.backend(); + let client = builder.build(); + + let block = BlockBuilder::new( + &client, + client.info().best_hash, + client.info().best_number, + RecordProof::Yes, + Default::default(), + &*backend, + ) + .unwrap() + .build() + .unwrap(); + + let proof = block.proof.expect("Proof is build on request"); + + let backend = sp_state_machine::create_proof_check_backend::( + block.storage_changes.transaction_storage_root, + proof, + ) + .unwrap(); + + assert!(backend + .storage(&sp_core::storage::well_known_keys::CODE) + .unwrap_err() + .contains("Database missing expected key"),); + } } diff --git a/client/chain-spec/derive/src/impls.rs b/client/chain-spec/derive/src/impls.rs index 1b22f16581..f518e4b29a 100644 --- a/client/chain-spec/derive/src/impls.rs +++ b/client/chain-spec/derive/src/impls.rs @@ -15,9 +15,9 @@ // along with Substrate. If not, see . use proc_macro2::{Span, TokenStream}; -use quote::quote; -use syn::{DeriveInput, Ident, Error}; use proc_macro_crate::crate_name; +use quote::quote; +use syn::{DeriveInput, Error, Ident}; const CRATE_NAME: &str = "sc-chain-spec"; const ATTRIBUTE_NAME: &str = "forks"; @@ -27,173 +27,193 @@ const ATTRIBUTE_NAME: &str = "forks"; /// The struct that derives this implementation will be usable within the `ChainSpec` file. /// The derive implements a by-type accessor method. pub fn extension_derive(ast: &DeriveInput) -> proc_macro::TokenStream { - derive(ast, |crate_name, name, generics: &syn::Generics, field_names, field_types, fields| { - let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - let forks = fields.named.iter().find_map(|f| { - if f.attrs.iter().any(|attr| attr.path.is_ident(ATTRIBUTE_NAME)) { - let typ = &f.ty; - Some(quote! { #typ }) - } else { - None - } - }).unwrap_or_else(|| quote! { #crate_name::NoExtension }); - - quote! { - impl #impl_generics #crate_name::Extension for #name #ty_generics #where_clause { - type Forks = #forks; - - fn get(&self) -> Option<&T> { - use std::any::{Any, TypeId}; - - match TypeId::of::() { - #( x if x == TypeId::of::<#field_types>() => Any::downcast_ref(&self.#field_names) ),*, - _ => None, - } - } - - fn get_any(&self, t: std::any::TypeId) -> &dyn std::any::Any { - use std::any::{Any, TypeId}; - - match t { - #( x if x == TypeId::of::<#field_types>() => &self.#field_names ),*, - _ => self, - } - } - } - } - }) + derive( + ast, + |crate_name, name, generics: &syn::Generics, field_names, field_types, fields| { + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + let forks = fields + .named + .iter() + .find_map(|f| { + if f.attrs + .iter() + .any(|attr| attr.path.is_ident(ATTRIBUTE_NAME)) + { + let typ = &f.ty; + Some(quote! { #typ }) + } else { + None + } + }) + .unwrap_or_else(|| quote! { #crate_name::NoExtension }); + + quote! { + impl #impl_generics #crate_name::Extension for #name #ty_generics #where_clause { + type Forks = #forks; + + fn get(&self) -> Option<&T> { + use std::any::{Any, TypeId}; + + match TypeId::of::() { + #( x if x == TypeId::of::<#field_types>() => Any::downcast_ref(&self.#field_names) ),*, + _ => None, + } + } + + fn get_any(&self, t: std::any::TypeId) -> &dyn std::any::Any { + use std::any::{Any, TypeId}; + + match t { + #( x if x == TypeId::of::<#field_types>() => &self.#field_names ),*, + _ => self, + } + } + } + } + }, + ) } - /// Implements required traits and creates `Fork` structs for `ChainSpec` custom parameter group. pub fn group_derive(ast: &DeriveInput) -> proc_macro::TokenStream { - derive(ast, |crate_name, name, generics: &syn::Generics, field_names, field_types, _fields| { - let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); - let fork_name = Ident::new(&format!("{}Fork", name), Span::call_site()); - - let fork_fields = generate_fork_fields(&crate_name, &field_names, &field_types); - let to_fork = generate_base_to_fork(&fork_name, &field_names); - let combine_with = generate_combine_with(&field_names); - let to_base = generate_fork_to_base(name, &field_names); - - quote! { - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecExtension)] - pub struct #fork_name #ty_generics #where_clause { - #fork_fields - } - - impl #impl_generics #crate_name::Group for #name #ty_generics #where_clause { - type Fork = #fork_name #ty_generics; - - fn to_fork(self) -> Self::Fork { - use #crate_name::Group; - #to_fork - } - } - - impl #impl_generics #crate_name::Fork for #fork_name #ty_generics #where_clause { - type Base = #name #ty_generics; - - fn combine_with(&mut self, other: Self) { - use #crate_name::Fork; - #combine_with - } - - fn to_base(self) -> Option { - use #crate_name::Fork; - #to_base - } - } - } - }) + derive( + ast, + |crate_name, name, generics: &syn::Generics, field_names, field_types, _fields| { + let (impl_generics, ty_generics, where_clause) = generics.split_for_impl(); + let fork_name = Ident::new(&format!("{}Fork", name), Span::call_site()); + + let fork_fields = generate_fork_fields(&crate_name, &field_names, &field_types); + let to_fork = generate_base_to_fork(&fork_name, &field_names); + let combine_with = generate_combine_with(&field_names); + let to_base = generate_fork_to_base(name, &field_names); + + quote! { + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecExtension)] + pub struct #fork_name #ty_generics #where_clause { + #fork_fields + } + + impl #impl_generics #crate_name::Group for #name #ty_generics #where_clause { + type Fork = #fork_name #ty_generics; + + fn to_fork(self) -> Self::Fork { + use #crate_name::Group; + #to_fork + } + } + + impl #impl_generics #crate_name::Fork for #fork_name #ty_generics #where_clause { + type Base = #name #ty_generics; + + fn combine_with(&mut self, other: Self) { + use #crate_name::Fork; + #combine_with + } + + fn to_base(self) -> Option { + use #crate_name::Fork; + #to_base + } + } + } + }, + ) } pub fn derive( - ast: &DeriveInput, - derive: impl Fn( - &Ident, &Ident, &syn::Generics, Vec<&Ident>, Vec<&syn::Type>, &syn::FieldsNamed, - ) -> TokenStream, + ast: &DeriveInput, + derive: impl Fn( + &Ident, + &Ident, + &syn::Generics, + Vec<&Ident>, + Vec<&syn::Type>, + &syn::FieldsNamed, + ) -> TokenStream, ) -> proc_macro::TokenStream { - let err = || { - let err = Error::new( - Span::call_site(), - "ChainSpecGroup is only available for structs with named fields." - ).to_compile_error(); - quote!( #err ).into() - }; - - let data = match &ast.data { - syn::Data::Struct(ref data) => data, - _ => return err(), - }; - - let fields = match &data.fields { - syn::Fields::Named(ref named) => named, - _ => return err(), - }; - - const PROOF: &str = "CARGO_PKG_NAME always defined when compiling; qed"; - let name = &ast.ident; - let crate_name = match crate_name(CRATE_NAME) { - Ok(chain_spec_name) => chain_spec_name, - Err(e) => if std::env::var("CARGO_PKG_NAME").expect(PROOF) == CRATE_NAME { - // we return the name of the crate here instead of `crate` to support doc tests. - CRATE_NAME.replace("-", "_") - } else { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - return quote!( #err ).into() - }, - }; - let crate_name = Ident::new(&crate_name, Span::call_site()); - let field_names = fields.named.iter().flat_map(|x| x.ident.as_ref()).collect::>(); - let field_types = fields.named.iter().map(|x| &x.ty).collect::>(); - - derive(&crate_name, name, &ast.generics, field_names, field_types, fields).into() + let err = || { + let err = Error::new( + Span::call_site(), + "ChainSpecGroup is only available for structs with named fields.", + ) + .to_compile_error(); + quote!( #err ).into() + }; + + let data = match &ast.data { + syn::Data::Struct(ref data) => data, + _ => return err(), + }; + + let fields = match &data.fields { + syn::Fields::Named(ref named) => named, + _ => return err(), + }; + + const PROOF: &str = "CARGO_PKG_NAME always defined when compiling; qed"; + let name = &ast.ident; + let crate_name = match crate_name(CRATE_NAME) { + Ok(chain_spec_name) => chain_spec_name, + Err(e) => { + if std::env::var("CARGO_PKG_NAME").expect(PROOF) == CRATE_NAME { + // we return the name of the crate here instead of `crate` to support doc tests. + CRATE_NAME.replace("-", "_") + } else { + let err = Error::new(Span::call_site(), &e).to_compile_error(); + return quote!( #err ).into(); + } + } + }; + let crate_name = Ident::new(&crate_name, Span::call_site()); + let field_names = fields + .named + .iter() + .flat_map(|x| x.ident.as_ref()) + .collect::>(); + let field_types = fields.named.iter().map(|x| &x.ty).collect::>(); + + derive( + &crate_name, + name, + &ast.generics, + field_names, + field_types, + fields, + ) + .into() } -fn generate_fork_fields( - crate_name: &Ident, - names: &[&Ident], - types: &[&syn::Type], -) -> TokenStream { - let crate_name = std::iter::repeat(crate_name); - quote! { - #( pub #names: Option<<#types as #crate_name::Group>::Fork>, )* - } +fn generate_fork_fields(crate_name: &Ident, names: &[&Ident], types: &[&syn::Type]) -> TokenStream { + let crate_name = std::iter::repeat(crate_name); + quote! { + #( pub #names: Option<<#types as #crate_name::Group>::Fork>, )* + } } -fn generate_base_to_fork( - fork_name: &Ident, - names: &[&Ident], -) -> TokenStream { - let names2 = names.to_vec(); - - quote!{ - #fork_name { - #( #names: Some(self.#names2.to_fork()), )* - } - } +fn generate_base_to_fork(fork_name: &Ident, names: &[&Ident]) -> TokenStream { + let names2 = names.to_vec(); + + quote! { + #fork_name { + #( #names: Some(self.#names2.to_fork()), )* + } + } } -fn generate_combine_with( - names: &[&Ident], -) -> TokenStream { - let names2 = names.to_vec(); +fn generate_combine_with(names: &[&Ident]) -> TokenStream { + let names2 = names.to_vec(); - quote!{ - #( self.#names.combine_with(other.#names2); )* - } + quote! { + #( self.#names.combine_with(other.#names2); )* + } } -fn generate_fork_to_base( - fork: &Ident, - names: &[&Ident], -) -> TokenStream { - let names2 = names.to_vec(); - - quote!{ - Some(#fork { - #( #names: self.#names2?.to_base()?, )* - }) - } +fn generate_fork_to_base(fork: &Ident, names: &[&Ident]) -> TokenStream { + let names2 = names.to_vec(); + + quote! { + Some(#fork { + #( #names: self.#names2?.to_base()?, )* + }) + } } diff --git a/client/chain-spec/derive/src/lib.rs b/client/chain-spec/derive/src/lib.rs index 0dc053f7e3..5a23c7703c 100644 --- a/client/chain-spec/derive/src/lib.rs +++ b/client/chain-spec/derive/src/lib.rs @@ -22,16 +22,16 @@ use proc_macro::TokenStream; #[proc_macro_derive(ChainSpecGroup)] pub fn group_derive(input: TokenStream) -> TokenStream { - match syn::parse(input) { - Ok(ast) => impls::group_derive(&ast), - Err(e) => e.to_compile_error().into(), - } + match syn::parse(input) { + Ok(ast) => impls::group_derive(&ast), + Err(e) => e.to_compile_error().into(), + } } #[proc_macro_derive(ChainSpecExtension, attributes(forks))] pub fn extensions_derive(input: TokenStream) -> TokenStream { - match syn::parse(input) { - Ok(ast) => impls::extension_derive(&ast), - Err(e) => e.to_compile_error().into(), - } + match syn::parse(input) { + Ok(ast) => impls::extension_derive(&ast), + Err(e) => e.to_compile_error().into(), + } } diff --git a/client/chain-spec/src/chain_spec.rs b/client/chain-spec/src/chain_spec.rs index 9f3a10ee89..c01a41ba3f 100644 --- a/client/chain-spec/src/chain_spec.rs +++ b/client/chain-spec/src/chain_spec.rs @@ -16,89 +16,98 @@ //! Substrate chain configurations. +use crate::{extension::GetExtension, ChainType, Properties, RuntimeGenesis}; +use sc_network::config::MultiaddrWithPeerId; +use sc_telemetry::TelemetryEndpoints; +use serde::{Deserialize, Serialize}; +use serde_json as json; +use sp_core::storage::{ChildInfo, Storage, StorageChild, StorageData, StorageKey}; +use sp_runtime::BuildStorage; use std::borrow::Cow; use std::collections::HashMap; use std::fs::File; use std::path::PathBuf; use std::sync::Arc; -use serde::{Serialize, Deserialize}; -use sp_core::storage::{StorageKey, StorageData, ChildInfo, Storage, StorageChild}; -use sp_runtime::BuildStorage; -use serde_json as json; -use crate::{RuntimeGenesis, ChainType, extension::GetExtension, Properties}; -use sc_network::config::MultiaddrWithPeerId; -use sc_telemetry::TelemetryEndpoints; enum GenesisSource { - File(PathBuf), - Binary(Cow<'static, [u8]>), - Factory(Arc G + Send + Sync>), + File(PathBuf), + Binary(Cow<'static, [u8]>), + Factory(Arc G + Send + Sync>), } impl Clone for GenesisSource { - fn clone(&self) -> Self { - match *self { - GenesisSource::File(ref path) => GenesisSource::File(path.clone()), - GenesisSource::Binary(ref d) => GenesisSource::Binary(d.clone()), - GenesisSource::Factory(ref f) => GenesisSource::Factory(f.clone()), - } - } + fn clone(&self) -> Self { + match *self { + GenesisSource::File(ref path) => GenesisSource::File(path.clone()), + GenesisSource::Binary(ref d) => GenesisSource::Binary(d.clone()), + GenesisSource::Factory(ref f) => GenesisSource::Factory(f.clone()), + } + } } impl GenesisSource { - fn resolve(&self) -> Result, String> { - #[derive(Serialize, Deserialize)] - struct GenesisContainer { - genesis: Genesis, - } - - match self { - GenesisSource::File(path) => { - let file = File::open(path) - .map_err(|e| format!("Error opening spec file: {}", e))?; - let genesis: GenesisContainer = json::from_reader(file) - .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(genesis.genesis) - }, - GenesisSource::Binary(buf) => { - let genesis: GenesisContainer = json::from_reader(buf.as_ref()) - .map_err(|e| format!("Error parsing embedded file: {}", e))?; - Ok(genesis.genesis) - }, - GenesisSource::Factory(f) => Ok(Genesis::Runtime(f())), - } - } + fn resolve(&self) -> Result, String> { + #[derive(Serialize, Deserialize)] + struct GenesisContainer { + genesis: Genesis, + } + + match self { + GenesisSource::File(path) => { + let file = + File::open(path).map_err(|e| format!("Error opening spec file: {}", e))?; + let genesis: GenesisContainer = json::from_reader(file) + .map_err(|e| format!("Error parsing spec file: {}", e))?; + Ok(genesis.genesis) + } + GenesisSource::Binary(buf) => { + let genesis: GenesisContainer = json::from_reader(buf.as_ref()) + .map_err(|e| format!("Error parsing embedded file: {}", e))?; + Ok(genesis.genesis) + } + GenesisSource::Factory(f) => Ok(Genesis::Runtime(f())), + } + } } impl BuildStorage for ChainSpec { - fn build_storage(&self) -> Result { - match self.genesis.resolve()? { - Genesis::Runtime(gc) => gc.build_storage(), - Genesis::Raw(RawGenesis { top: map, children: children_map }) => Ok(Storage { - top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - children: children_map.into_iter().map(|(sk, child_content)| { - let child_info = ChildInfo::resolve_child_info( - child_content.child_type, - child_content.child_info.as_slice(), - ).expect("chain spec contains correct content").to_owned(); - ( - sk.0, - StorageChild { - data: child_content.data.into_iter().map(|(k, v)| (k.0, v.0)).collect(), - child_info, - }, - ) - }).collect(), - }), - } - } - - fn assimilate_storage( - &self, - _: &mut Storage, - ) -> Result<(), String> { - Err("`assimilate_storage` not implemented for `ChainSpec`.".into()) - } + fn build_storage(&self) -> Result { + match self.genesis.resolve()? { + Genesis::Runtime(gc) => gc.build_storage(), + Genesis::Raw(RawGenesis { + top: map, + children: children_map, + }) => Ok(Storage { + top: map.into_iter().map(|(k, v)| (k.0, v.0)).collect(), + children: children_map + .into_iter() + .map(|(sk, child_content)| { + let child_info = ChildInfo::resolve_child_info( + child_content.child_type, + child_content.child_info.as_slice(), + ) + .expect("chain spec contains correct content") + .to_owned(); + ( + sk.0, + StorageChild { + data: child_content + .data + .into_iter() + .map(|(k, v)| (k.0, v.0)) + .collect(), + child_info, + }, + ) + }) + .collect(), + }), + } + } + + fn assimilate_storage(&self, _: &mut Storage) -> Result<(), String> { + Err("`assimilate_storage` not implemented for `ChainSpec`.".into()) + } } type GenesisStorage = HashMap; @@ -107,9 +116,9 @@ type GenesisStorage = HashMap; #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] struct ChildRawStorage { - data: GenesisStorage, - child_info: Vec, - child_type: u32, + data: GenesisStorage, + child_info: Vec, + child_type: u32, } #[derive(Serialize, Deserialize)] @@ -117,16 +126,16 @@ struct ChildRawStorage { #[serde(deny_unknown_fields)] /// Storage content for genesis block. struct RawGenesis { - top: GenesisStorage, - children: HashMap, + top: GenesisStorage, + children: HashMap, } #[derive(Serialize, Deserialize)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] enum Genesis { - Runtime(G), - Raw(RawGenesis), + Runtime(G), + Raw(RawGenesis), } /// A configuration of a client. Does not include runtime storage initialization. @@ -134,20 +143,20 @@ enum Genesis { #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] struct ClientSpec { - name: String, - id: String, - #[serde(default)] - chain_type: ChainType, - boot_nodes: Vec, - telemetry_endpoints: Option, - protocol_id: Option, - properties: Option, - #[serde(flatten)] - extensions: E, - // Never used, left only for backward compatibility. - consensus_engine: (), - #[serde(skip_serializing)] - genesis: serde::de::IgnoredAny, + name: String, + id: String, + #[serde(default)] + chain_type: ChainType, + boot_nodes: Vec, + telemetry_endpoints: Option, + protocol_id: Option, + properties: Option, + #[serde(flatten)] + extensions: E, + // Never used, left only for backward compatibility. + consensus_engine: (), + #[serde(skip_serializing)] + genesis: serde::de::IgnoredAny, } /// A type denoting empty extensions. @@ -157,267 +166,274 @@ pub type NoExtension = Option<()>; /// A configuration of a chain. Can be used to build a genesis block. pub struct ChainSpec { - client_spec: ClientSpec, - genesis: GenesisSource, + client_spec: ClientSpec, + genesis: GenesisSource, } impl Clone for ChainSpec { - fn clone(&self) -> Self { - ChainSpec { - client_spec: self.client_spec.clone(), - genesis: self.genesis.clone(), - } - } + fn clone(&self) -> Self { + ChainSpec { + client_spec: self.client_spec.clone(), + genesis: self.genesis.clone(), + } + } } impl ChainSpec { - /// A list of bootnode addresses. - pub fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { - &self.client_spec.boot_nodes - } - - /// Spec name. - pub fn name(&self) -> &str { - &self.client_spec.name - } - - /// Spec id. - pub fn id(&self) -> &str { - &self.client_spec.id - } - - /// Telemetry endpoints (if any) - pub fn telemetry_endpoints(&self) -> &Option { - &self.client_spec.telemetry_endpoints - } - - /// Network protocol id. - pub fn protocol_id(&self) -> Option<&str> { - self.client_spec.protocol_id.as_ref().map(String::as_str) - } - - /// Additional loosly-typed properties of the chain. - /// - /// Returns an empty JSON object if 'properties' not defined in config - pub fn properties(&self) -> Properties { - self.client_spec.properties.as_ref().unwrap_or(&json::map::Map::new()).clone() - } - - /// Add a bootnode to the list. - pub fn add_boot_node(&mut self, addr: MultiaddrWithPeerId) { - self.client_spec.boot_nodes.push(addr) - } - - /// Returns a reference to defined chain spec extensions. - pub fn extensions(&self) -> &E { - &self.client_spec.extensions - } - - /// Create hardcoded spec. - pub fn from_genesis G + 'static + Send + Sync>( - name: &str, - id: &str, - chain_type: ChainType, - constructor: F, - boot_nodes: Vec, - telemetry_endpoints: Option, - protocol_id: Option<&str>, - properties: Option, - extensions: E, - ) -> Self { - let client_spec = ClientSpec { - name: name.to_owned(), - id: id.to_owned(), - chain_type, - boot_nodes, - telemetry_endpoints, - protocol_id: protocol_id.map(str::to_owned), - properties, - extensions, - consensus_engine: (), - genesis: Default::default(), - }; - - ChainSpec { - client_spec, - genesis: GenesisSource::Factory(Arc::new(constructor)), - } - } - - /// Type of the chain. - fn chain_type(&self) -> ChainType { - self.client_spec.chain_type.clone() - } + /// A list of bootnode addresses. + pub fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { + &self.client_spec.boot_nodes + } + + /// Spec name. + pub fn name(&self) -> &str { + &self.client_spec.name + } + + /// Spec id. + pub fn id(&self) -> &str { + &self.client_spec.id + } + + /// Telemetry endpoints (if any) + pub fn telemetry_endpoints(&self) -> &Option { + &self.client_spec.telemetry_endpoints + } + + /// Network protocol id. + pub fn protocol_id(&self) -> Option<&str> { + self.client_spec.protocol_id.as_ref().map(String::as_str) + } + + /// Additional loosly-typed properties of the chain. + /// + /// Returns an empty JSON object if 'properties' not defined in config + pub fn properties(&self) -> Properties { + self.client_spec + .properties + .as_ref() + .unwrap_or(&json::map::Map::new()) + .clone() + } + + /// Add a bootnode to the list. + pub fn add_boot_node(&mut self, addr: MultiaddrWithPeerId) { + self.client_spec.boot_nodes.push(addr) + } + + /// Returns a reference to defined chain spec extensions. + pub fn extensions(&self) -> &E { + &self.client_spec.extensions + } + + /// Create hardcoded spec. + pub fn from_genesis G + 'static + Send + Sync>( + name: &str, + id: &str, + chain_type: ChainType, + constructor: F, + boot_nodes: Vec, + telemetry_endpoints: Option, + protocol_id: Option<&str>, + properties: Option, + extensions: E, + ) -> Self { + let client_spec = ClientSpec { + name: name.to_owned(), + id: id.to_owned(), + chain_type, + boot_nodes, + telemetry_endpoints, + protocol_id: protocol_id.map(str::to_owned), + properties, + extensions, + consensus_engine: (), + genesis: Default::default(), + }; + + ChainSpec { + client_spec, + genesis: GenesisSource::Factory(Arc::new(constructor)), + } + } + + /// Type of the chain. + fn chain_type(&self) -> ChainType { + self.client_spec.chain_type.clone() + } } impl ChainSpec { - /// Parse json content into a `ChainSpec` - pub fn from_json_bytes(json: impl Into>) -> Result { - let json = json.into(); - let client_spec = json::from_slice(json.as_ref()) - .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { - client_spec, - genesis: GenesisSource::Binary(json), - }) - } - - /// Parse json file into a `ChainSpec` - pub fn from_json_file(path: PathBuf) -> Result { - let file = File::open(&path) - .map_err(|e| format!("Error opening spec file: {}", e))?; - let client_spec = json::from_reader(file) - .map_err(|e| format!("Error parsing spec file: {}", e))?; - Ok(ChainSpec { - client_spec, - genesis: GenesisSource::File(path), - }) - } + /// Parse json content into a `ChainSpec` + pub fn from_json_bytes(json: impl Into>) -> Result { + let json = json.into(); + let client_spec = json::from_slice(json.as_ref()) + .map_err(|e| format!("Error parsing spec file: {}", e))?; + Ok(ChainSpec { + client_spec, + genesis: GenesisSource::Binary(json), + }) + } + + /// Parse json file into a `ChainSpec` + pub fn from_json_file(path: PathBuf) -> Result { + let file = File::open(&path).map_err(|e| format!("Error opening spec file: {}", e))?; + let client_spec = + json::from_reader(file).map_err(|e| format!("Error parsing spec file: {}", e))?; + Ok(ChainSpec { + client_spec, + genesis: GenesisSource::File(path), + }) + } } impl ChainSpec { - /// Dump to json string. - pub fn as_json(&self, raw: bool) -> Result { - #[derive(Serialize, Deserialize)] - struct Container { - #[serde(flatten)] - client_spec: ClientSpec, - genesis: Genesis, - - }; - let genesis = match (raw, self.genesis.resolve()?) { - (true, Genesis::Runtime(g)) => { - let storage = g.build_storage()?; - let top = storage.top.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(); - let children = storage.children.into_iter() - .map(|(sk, child)| { - let info = child.child_info.as_ref(); - let (info, ci_type) = info.info(); - ( - StorageKey(sk), - ChildRawStorage { - data: child.data.into_iter() - .map(|(k, v)| (StorageKey(k), StorageData(v))) - .collect(), - child_info: info.to_vec(), - child_type: ci_type, - }, - )}) - .collect(); - - Genesis::Raw(RawGenesis { top, children }) - }, - (_, genesis) => genesis, - }; - let container = Container { - client_spec: self.client_spec.clone(), - genesis, - }; - json::to_string_pretty(&container) - .map_err(|e| format!("Error generating spec json: {}", e)) - } + /// Dump to json string. + pub fn as_json(&self, raw: bool) -> Result { + #[derive(Serialize, Deserialize)] + struct Container { + #[serde(flatten)] + client_spec: ClientSpec, + genesis: Genesis, + }; + let genesis = match (raw, self.genesis.resolve()?) { + (true, Genesis::Runtime(g)) => { + let storage = g.build_storage()?; + let top = storage + .top + .into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(); + let children = storage + .children + .into_iter() + .map(|(sk, child)| { + let info = child.child_info.as_ref(); + let (info, ci_type) = info.info(); + ( + StorageKey(sk), + ChildRawStorage { + data: child + .data + .into_iter() + .map(|(k, v)| (StorageKey(k), StorageData(v))) + .collect(), + child_info: info.to_vec(), + child_type: ci_type, + }, + ) + }) + .collect(); + + Genesis::Raw(RawGenesis { top, children }) + } + (_, genesis) => genesis, + }; + let container = Container { + client_spec: self.client_spec.clone(), + genesis, + }; + json::to_string_pretty(&container).map_err(|e| format!("Error generating spec json: {}", e)) + } } impl crate::ChainSpec for ChainSpec where - G: RuntimeGenesis, - E: GetExtension + serde::Serialize + Clone + Send, + G: RuntimeGenesis, + E: GetExtension + serde::Serialize + Clone + Send, { - fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { - ChainSpec::boot_nodes(self) - } + fn boot_nodes(&self) -> &[MultiaddrWithPeerId] { + ChainSpec::boot_nodes(self) + } - fn name(&self) -> &str { - ChainSpec::name(self) - } + fn name(&self) -> &str { + ChainSpec::name(self) + } - fn id(&self) -> &str { - ChainSpec::id(self) - } + fn id(&self) -> &str { + ChainSpec::id(self) + } - fn chain_type(&self) -> ChainType { - ChainSpec::chain_type(self) - } + fn chain_type(&self) -> ChainType { + ChainSpec::chain_type(self) + } - fn telemetry_endpoints(&self) -> &Option { - ChainSpec::telemetry_endpoints(self) - } + fn telemetry_endpoints(&self) -> &Option { + ChainSpec::telemetry_endpoints(self) + } - fn protocol_id(&self) -> Option<&str> { - ChainSpec::protocol_id(self) - } + fn protocol_id(&self) -> Option<&str> { + ChainSpec::protocol_id(self) + } - fn properties(&self) -> Properties { - ChainSpec::properties(self) - } + fn properties(&self) -> Properties { + ChainSpec::properties(self) + } - fn add_boot_node(&mut self, addr: MultiaddrWithPeerId) { - ChainSpec::add_boot_node(self, addr) - } + fn add_boot_node(&mut self, addr: MultiaddrWithPeerId) { + ChainSpec::add_boot_node(self, addr) + } - fn extensions(&self) -> &dyn GetExtension { - ChainSpec::extensions(self) as &dyn GetExtension - } + fn extensions(&self) -> &dyn GetExtension { + ChainSpec::extensions(self) as &dyn GetExtension + } - fn as_json(&self, raw: bool) -> Result { - ChainSpec::as_json(self, raw) - } + fn as_json(&self, raw: bool) -> Result { + ChainSpec::as_json(self, raw) + } - fn as_storage_builder(&self) -> &dyn BuildStorage { - self - } + fn as_storage_builder(&self) -> &dyn BuildStorage { + self + } } #[cfg(test)] mod tests { - use super::*; - - #[derive(Debug, Serialize, Deserialize)] - struct Genesis(HashMap); - - impl BuildStorage for Genesis { - fn assimilate_storage( - &self, - storage: &mut Storage, - ) -> Result<(), String> { - storage.top.extend( - self.0.iter().map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())) - ); - Ok(()) - } - } - - type TestSpec = ChainSpec; - - #[test] - fn should_deserialize_example_chain_spec() { - let spec1 = TestSpec::from_json_bytes(Cow::Owned( - include_bytes!("../res/chain_spec.json").to_vec() - )).unwrap(); - let spec2 = TestSpec::from_json_file( - PathBuf::from("./res/chain_spec.json") - ).unwrap(); - - assert_eq!(spec1.as_json(false), spec2.as_json(false)); - assert_eq!(spec2.chain_type(), ChainType::Live) - } - - #[derive(Debug, Serialize, Deserialize)] - #[serde(rename_all = "camelCase")] - struct Extension1 { - my_property: String, - } - - type TestSpec2 = ChainSpec; - - #[test] - fn should_deserialize_chain_spec_with_extensions() { - let spec = TestSpec2::from_json_bytes(Cow::Owned( - include_bytes!("../res/chain_spec2.json").to_vec() - )).unwrap(); - - assert_eq!(spec.extensions().my_property, "Test Extension"); - } + use super::*; + + #[derive(Debug, Serialize, Deserialize)] + struct Genesis(HashMap); + + impl BuildStorage for Genesis { + fn assimilate_storage(&self, storage: &mut Storage) -> Result<(), String> { + storage.top.extend( + self.0 + .iter() + .map(|(a, b)| (a.clone().into_bytes(), b.clone().into_bytes())), + ); + Ok(()) + } + } + + type TestSpec = ChainSpec; + + #[test] + fn should_deserialize_example_chain_spec() { + let spec1 = TestSpec::from_json_bytes(Cow::Owned( + include_bytes!("../res/chain_spec.json").to_vec(), + )) + .unwrap(); + let spec2 = TestSpec::from_json_file(PathBuf::from("./res/chain_spec.json")).unwrap(); + + assert_eq!(spec1.as_json(false), spec2.as_json(false)); + assert_eq!(spec2.chain_type(), ChainType::Live) + } + + #[derive(Debug, Serialize, Deserialize)] + #[serde(rename_all = "camelCase")] + struct Extension1 { + my_property: String, + } + + type TestSpec2 = ChainSpec; + + #[test] + fn should_deserialize_chain_spec_with_extensions() { + let spec = TestSpec2::from_json_bytes(Cow::Owned( + include_bytes!("../res/chain_spec2.json").to_vec(), + )) + .unwrap(); + + assert_eq!(spec.extensions().my_property, "Test Extension"); + } } diff --git a/client/chain-spec/src/extension.rs b/client/chain-spec/src/extension.rs index c0338203eb..cc2eb2a787 100644 --- a/client/chain-spec/src/extension.rs +++ b/client/chain-spec/src/extension.rs @@ -16,22 +16,22 @@ //! Chain Spec extensions helpers. +use std::any::{Any, TypeId}; use std::fmt::Debug; -use std::any::{TypeId, Any}; use std::collections::BTreeMap; -use serde::{Serialize, Deserialize, de::DeserializeOwned}; +use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// A `ChainSpec` extension. /// /// This trait is implemented automatically by `ChainSpecGroup` macro. pub trait Group: Clone + Sized { - /// An associated type containing fork definition. - type Fork: Fork; + /// An associated type containing fork definition. + type Fork: Fork; - /// Convert to fork type. - fn to_fork(self) -> Self::Fork; + /// Convert to fork type. + fn to_fork(self) -> Self::Fork; } /// A `ChainSpec` extension fork definition. @@ -42,17 +42,17 @@ pub trait Group: Clone + Sized { /// The forks can be combined (summed up) to specify /// a complete set of parameters pub trait Fork: Serialize + DeserializeOwned + Clone + Sized { - /// A base `Group` type. - type Base: Group; + /// A base `Group` type. + type Base: Group; - /// Combine with another struct. - /// - /// All parameters set in `other` should override the - /// ones in the current struct. - fn combine_with(&mut self, other: Self); + /// Combine with another struct. + /// + /// All parameters set in `other` should override the + /// ones in the current struct. + fn combine_with(&mut self, other: Self); - /// Attempt to convert to the base type if all parameters are set. - fn to_base(self) -> Option; + /// Attempt to convert to the base type if all parameters are set. + fn to_base(self) -> Option; } macro_rules! impl_trivial { @@ -88,29 +88,29 @@ macro_rules! impl_trivial { impl_trivial!((), u8, u16, u32, u64, usize, String, Vec); impl Group for Option { - type Fork = Option; + type Fork = Option; - fn to_fork(self) -> Self::Fork { - self.map(|a| a.to_fork()) - } + fn to_fork(self) -> Self::Fork { + self.map(|a| a.to_fork()) + } } impl Fork for Option { - type Base = Option; - - fn combine_with(&mut self, other: Self) { - *self = match (self.take(), other) { - (Some(mut a), Some(b)) => { - a.combine_with(b); - Some(a) - }, - (a, b) => a.or(b), - }; - } - - fn to_base(self) -> Option { - self.map(|x| x.to_base()) - } + type Base = Option; + + fn combine_with(&mut self, other: Self) { + *self = match (self.take(), other) { + (Some(mut a), Some(b)) => { + a.combine_with(b); + Some(a) + } + (a, b) => a.or(b), + }; + } + + fn to_base(self) -> Option { + self.map(|x| x.to_base()) + } } /// A collection of `ChainSpec` extensions. @@ -118,206 +118,219 @@ impl Fork for Option { /// This type can be passed around and allows the core /// modules to request a strongly-typed, but optional configuration. pub trait Extension: Serialize + DeserializeOwned + Clone { - type Forks: IsForks; - - /// Get an extension of specific type. - fn get(&self) -> Option<&T>; - /// Get an extension of specific type as refernce to `Any` - fn get_any(&self, t: TypeId) -> &dyn Any; - - /// Get forkable extensions of specific type. - fn forks(&self) -> Option> where - BlockNumber: Ord + Clone + 'static, - T: Group + 'static, - ::Extension: Extension, - <::Extension as Group>::Fork: Extension, - { - self.get::::Extension>>()? - .for_type() - } + type Forks: IsForks; + + /// Get an extension of specific type. + fn get(&self) -> Option<&T>; + /// Get an extension of specific type as refernce to `Any` + fn get_any(&self, t: TypeId) -> &dyn Any; + + /// Get forkable extensions of specific type. + fn forks(&self) -> Option> + where + BlockNumber: Ord + Clone + 'static, + T: Group + 'static, + ::Extension: Extension, + <::Extension as Group>::Fork: Extension, + { + self.get::::Extension>>()? + .for_type() + } } impl Extension for crate::NoExtension { - type Forks = Self; - - fn get(&self) -> Option<&T> { None } - fn get_any(&self, _t: TypeId) -> &dyn Any { self } + type Forks = Self; + + fn get(&self) -> Option<&T> { + None + } + fn get_any(&self, _t: TypeId) -> &dyn Any { + self + } } pub trait IsForks { - type BlockNumber: Ord + 'static; - type Extension: Group + 'static; + type BlockNumber: Ord + 'static; + type Extension: Group + 'static; } impl IsForks for Option<()> { - type BlockNumber = u64; - type Extension = Self; + type BlockNumber = u64; + type Extension = Self; } #[derive(Clone, Debug, Serialize, Deserialize, PartialEq)] #[serde(deny_unknown_fields)] pub struct Forks { - forks: BTreeMap, - #[serde(flatten)] - base: T, + forks: BTreeMap, + #[serde(flatten)] + base: T, } impl Default for Forks { - fn default() -> Self { - Self { - base: Default::default(), - forks: Default::default(), - } - } + fn default() -> Self { + Self { + base: Default::default(), + forks: Default::default(), + } + } } -impl Forks where - T::Fork: Debug, +impl Forks +where + T::Fork: Debug, { - /// Create new fork definition given the base and the forks. - pub fn new(base: T, forks: BTreeMap) -> Self { - Self { base, forks } - } - - /// Return a set of parameters for `Group` including all forks up to `block` (inclusive). - pub fn at_block(&self, block: B) -> T { - let mut start = self.base.clone().to_fork(); - - for (_, fork) in self.forks.range(..=block) { - start.combine_with(fork.clone()); - } - - start - .to_base() - .expect("We start from the `base` object, so it's always fully initialized; qed") - } + /// Create new fork definition given the base and the forks. + pub fn new(base: T, forks: BTreeMap) -> Self { + Self { base, forks } + } + + /// Return a set of parameters for `Group` including all forks up to `block` (inclusive). + pub fn at_block(&self, block: B) -> T { + let mut start = self.base.clone().to_fork(); + + for (_, fork) in self.forks.range(..=block) { + start.combine_with(fork.clone()); + } + + start + .to_base() + .expect("We start from the `base` object, so it's always fully initialized; qed") + } } -impl IsForks for Forks where - B: Ord + 'static, - T: Group + 'static, +impl IsForks for Forks +where + B: Ord + 'static, + T: Group + 'static, { - type BlockNumber = B; - type Extension = T; + type BlockNumber = B; + type Extension = T; } -impl Forks where - T::Fork: Extension, +impl Forks +where + T::Fork: Extension, { - /// Get forks definition for a subset of this extension. - /// - /// Returns the `Forks` struct, but limited to a particular type - /// within the extension. - pub fn for_type(&self) -> Option> where - X: Group + 'static, - { - let base = self.base.get::()?.clone(); - let forks = self.forks.iter().filter_map(|(k, v)| { - Some((k.clone(), v.get::>()?.clone()?)) - }).collect(); - - Some(Forks { - base, - forks, - }) - } + /// Get forks definition for a subset of this extension. + /// + /// Returns the `Forks` struct, but limited to a particular type + /// within the extension. + pub fn for_type(&self) -> Option> + where + X: Group + 'static, + { + let base = self.base.get::()?.clone(); + let forks = self + .forks + .iter() + .filter_map(|(k, v)| Some((k.clone(), v.get::>()?.clone()?))) + .collect(); + + Some(Forks { base, forks }) + } } -impl Extension for Forks where - B: Serialize + DeserializeOwned + Ord + Clone + 'static, - E: Extension + Group + 'static, +impl Extension for Forks +where + B: Serialize + DeserializeOwned + Ord + Clone + 'static, + E: Extension + Group + 'static, { - type Forks = Self; - - fn get(&self) -> Option<&T> { - match TypeId::of::() { - x if x == TypeId::of::() => Any::downcast_ref(&self.base), - _ => self.base.get(), - } - } - - fn get_any(&self, t: TypeId) -> &dyn Any { - match t { - x if x == TypeId::of::() => &self.base, - _ => self.base.get_any(t), - } - } - - fn forks(&self) -> Option> where - BlockNumber: Ord + Clone + 'static, - T: Group + 'static, - ::Extension: Extension, - <::Extension as Group>::Fork: Extension, - { - if TypeId::of::() == TypeId::of::() { - Any::downcast_ref(&self.for_type::()?).cloned() - } else { - self.get::::Extension>>()? - .for_type() - } - } + type Forks = Self; + + fn get(&self) -> Option<&T> { + match TypeId::of::() { + x if x == TypeId::of::() => Any::downcast_ref(&self.base), + _ => self.base.get(), + } + } + + fn get_any(&self, t: TypeId) -> &dyn Any { + match t { + x if x == TypeId::of::() => &self.base, + _ => self.base.get_any(t), + } + } + + fn forks(&self) -> Option> + where + BlockNumber: Ord + Clone + 'static, + T: Group + 'static, + ::Extension: Extension, + <::Extension as Group>::Fork: Extension, + { + if TypeId::of::() == TypeId::of::() { + Any::downcast_ref(&self.for_type::()?).cloned() + } else { + self.get::::Extension>>()? + .for_type() + } + } } /// A subset if the `Extension` trait that only allows for quering extensions. pub trait GetExtension { - /// Get an extension of specific type. - fn get_any(&self, t: TypeId) -> &dyn Any; + /// Get an extension of specific type. + fn get_any(&self, t: TypeId) -> &dyn Any; } -impl GetExtension for E { - fn get_any(&self, t: TypeId) -> &dyn Any { - Extension::get_any(self, t) - } +impl GetExtension for E { + fn get_any(&self, t: TypeId) -> &dyn Any { + Extension::get_any(self, t) + } } /// Helper function that queries an extension by type from `GetExtension` /// trait object. pub fn get_extension(e: &dyn GetExtension) -> Option<&T> { - Any::downcast_ref(GetExtension::get_any(e, TypeId::of::())) + Any::downcast_ref(GetExtension::get_any(e, TypeId::of::())) } #[cfg(test)] mod tests { - use super::*; - use sc_chain_spec_derive::{ChainSpecGroup, ChainSpecExtension}; - // Make the proc macro work for tests and doc tests. - use crate as sc_chain_spec; - - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup)] - #[serde(deny_unknown_fields)] - pub struct Extension1 { - pub test: u64, - } - - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup)] - #[serde(deny_unknown_fields)] - pub struct Extension2 { - pub test: u8, - } - - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension)] - #[serde(deny_unknown_fields)] - pub struct Extensions { - pub ext1: Extension1, - pub ext2: Extension2, - } - - #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecExtension)] - #[serde(deny_unknown_fields)] - pub struct Ext2 { - #[serde(flatten)] - ext1: Extension1, - #[forks] - forkable: Forks, - } - - #[test] - fn forks_should_work_correctly() { - use super::Extension as _ ; - - // We first need to deserialize into a `Value` because of the following bug: - // https://github.com/serde-rs/json/issues/505 - let ext_val: serde_json::Value = serde_json::from_str(r#" + use super::*; + use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; + // Make the proc macro work for tests and doc tests. + use crate as sc_chain_spec; + + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup)] + #[serde(deny_unknown_fields)] + pub struct Extension1 { + pub test: u64, + } + + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup)] + #[serde(deny_unknown_fields)] + pub struct Extension2 { + pub test: u8, + } + + #[derive( + Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecGroup, ChainSpecExtension, + )] + #[serde(deny_unknown_fields)] + pub struct Extensions { + pub ext1: Extension1, + pub ext2: Extension2, + } + + #[derive(Debug, Clone, PartialEq, Serialize, Deserialize, ChainSpecExtension)] + #[serde(deny_unknown_fields)] + pub struct Ext2 { + #[serde(flatten)] + ext1: Extension1, + #[forks] + forkable: Forks, + } + + #[test] + fn forks_should_work_correctly() { + use super::Extension as _; + + // We first need to deserialize into a `Value` because of the following bug: + // https://github.com/serde-rs/json/issues/505 + let ext_val: serde_json::Value = serde_json::from_str( + r#" { "test": 11, "forkable": { @@ -340,54 +353,72 @@ mod tests { } } } - "#).unwrap(); - - let ext: Ext2 = serde_json::from_value(ext_val).unwrap(); - - assert_eq!(ext.get::(), Some(&Extension1 { - test: 11 - })); - - // get forks definition - let forks = ext.get::>().unwrap(); - assert_eq!(forks.at_block(0), Extensions { - ext1: Extension1 { test: 15 }, - ext2: Extension2 { test: 123 }, - }); - assert_eq!(forks.at_block(1), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 123 }, - }); - assert_eq!(forks.at_block(2), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 5 }, - }); - assert_eq!(forks.at_block(4), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 5 }, - }); - assert_eq!(forks.at_block(5), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 1 }, - }); - assert_eq!(forks.at_block(10), Extensions { - ext1: Extension1 { test: 5 }, - ext2: Extension2 { test: 1 }, - }); - assert!(forks.at_block(10).get::().is_some()); - - // filter forks for `Extension2` - let ext2 = forks.for_type::().unwrap(); - assert_eq!(ext2.at_block(0), Extension2 { test: 123 }); - assert_eq!(ext2.at_block(2), Extension2 { test: 5 }); - assert_eq!(ext2.at_block(10), Extension2 { test: 1 }); - - // make sure that it can return forks correctly - let ext2_2 = forks.forks::().unwrap(); - assert_eq!(ext2, ext2_2); - - // also ext should be able to return forks correctly: - let ext2_3 = ext.forks::().unwrap(); - assert_eq!(ext2_2, ext2_3); - } + "#, + ) + .unwrap(); + + let ext: Ext2 = serde_json::from_value(ext_val).unwrap(); + + assert_eq!(ext.get::(), Some(&Extension1 { test: 11 })); + + // get forks definition + let forks = ext.get::>().unwrap(); + assert_eq!( + forks.at_block(0), + Extensions { + ext1: Extension1 { test: 15 }, + ext2: Extension2 { test: 123 }, + } + ); + assert_eq!( + forks.at_block(1), + Extensions { + ext1: Extension1 { test: 5 }, + ext2: Extension2 { test: 123 }, + } + ); + assert_eq!( + forks.at_block(2), + Extensions { + ext1: Extension1 { test: 5 }, + ext2: Extension2 { test: 5 }, + } + ); + assert_eq!( + forks.at_block(4), + Extensions { + ext1: Extension1 { test: 5 }, + ext2: Extension2 { test: 5 }, + } + ); + assert_eq!( + forks.at_block(5), + Extensions { + ext1: Extension1 { test: 5 }, + ext2: Extension2 { test: 1 }, + } + ); + assert_eq!( + forks.at_block(10), + Extensions { + ext1: Extension1 { test: 5 }, + ext2: Extension2 { test: 1 }, + } + ); + assert!(forks.at_block(10).get::().is_some()); + + // filter forks for `Extension2` + let ext2 = forks.for_type::().unwrap(); + assert_eq!(ext2.at_block(0), Extension2 { test: 123 }); + assert_eq!(ext2.at_block(2), Extension2 { test: 5 }); + assert_eq!(ext2.at_block(10), Extension2 { test: 1 }); + + // make sure that it can return forks correctly + let ext2_2 = forks.forks::().unwrap(); + assert_eq!(ext2, ext2_2); + + // also ext should be able to return forks correctly: + let ext2_3 = ext.forks::().unwrap(); + assert_eq!(ext2_2, ext2_3); + } } diff --git a/client/chain-spec/src/lib.rs b/client/chain-spec/src/lib.rs index de83e170e0..c28e6b0f8d 100644 --- a/client/chain-spec/src/lib.rs +++ b/client/chain-spec/src/lib.rs @@ -111,14 +111,14 @@ mod chain_spec; mod extension; pub use chain_spec::{ChainSpec as GenericChainSpec, NoExtension}; -pub use extension::{Group, Fork, Forks, Extension, GetExtension, get_extension}; +pub use extension::{get_extension, Extension, Fork, Forks, GetExtension, Group}; pub use sc_chain_spec_derive::{ChainSpecExtension, ChainSpecGroup}; -pub use sp_chain_spec::{Properties, ChainType}; +pub use sp_chain_spec::{ChainType, Properties}; -use serde::{Serialize, de::DeserializeOwned}; -use sp_runtime::BuildStorage; use sc_network::config::MultiaddrWithPeerId; use sc_telemetry::TelemetryEndpoints; +use serde::{de::DeserializeOwned, Serialize}; +use sp_runtime::BuildStorage; /// A set of traits for the runtime genesis config. pub trait RuntimeGenesis: Serialize + DeserializeOwned + BuildStorage {} @@ -126,28 +126,28 @@ impl RuntimeGenesis for T {} /// Common interface of a chain specification. pub trait ChainSpec: BuildStorage + Send { - /// Spec name. - fn name(&self) -> &str; - /// Spec id. - fn id(&self) -> &str; - /// Type of the chain. - fn chain_type(&self) -> ChainType; - /// A list of bootnode addresses. - fn boot_nodes(&self) -> &[MultiaddrWithPeerId]; - /// Telemetry endpoints (if any) - fn telemetry_endpoints(&self) -> &Option; - /// Network protocol id. - fn protocol_id(&self) -> Option<&str>; - /// Additional loosly-typed properties of the chain. - /// - /// Returns an empty JSON object if 'properties' not defined in config - fn properties(&self) -> Properties; - /// Returns a reference to defined chain spec extensions. - fn extensions(&self) -> &dyn GetExtension; - /// Add a bootnode to the list. - fn add_boot_node(&mut self, addr: MultiaddrWithPeerId); - /// Return spec as JSON. - fn as_json(&self, raw: bool) -> Result; - /// Return StorageBuilder for this spec. - fn as_storage_builder(&self) -> &dyn BuildStorage; + /// Spec name. + fn name(&self) -> &str; + /// Spec id. + fn id(&self) -> &str; + /// Type of the chain. + fn chain_type(&self) -> ChainType; + /// A list of bootnode addresses. + fn boot_nodes(&self) -> &[MultiaddrWithPeerId]; + /// Telemetry endpoints (if any) + fn telemetry_endpoints(&self) -> &Option; + /// Network protocol id. + fn protocol_id(&self) -> Option<&str>; + /// Additional loosly-typed properties of the chain. + /// + /// Returns an empty JSON object if 'properties' not defined in config + fn properties(&self) -> Properties; + /// Returns a reference to defined chain spec extensions. + fn extensions(&self) -> &dyn GetExtension; + /// Add a bootnode to the list. + fn add_boot_node(&mut self, addr: MultiaddrWithPeerId); + /// Return spec as JSON. + fn as_json(&self, raw: bool) -> Result; + /// Return StorageBuilder for this spec. + fn as_storage_builder(&self) -> &dyn BuildStorage; } diff --git a/client/cli/src/arg_enums.rs b/client/cli/src/arg_enums.rs index 2adccdd879..73dd495341 100644 --- a/client/cli/src/arg_enums.rs +++ b/client/cli/src/arg_enums.rs @@ -20,120 +20,120 @@ use structopt::clap::arg_enum; arg_enum! { - /// How to execute Wasm runtime code - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy)] - pub enum WasmExecutionMethod { - // Uses an interpreter. - Interpreted, - // Uses a compiled runtime. - Compiled, - } + /// How to execute Wasm runtime code + #[allow(missing_docs)] + #[derive(Debug, Clone, Copy)] + pub enum WasmExecutionMethod { + // Uses an interpreter. + Interpreted, + // Uses a compiled runtime. + Compiled, + } } impl WasmExecutionMethod { - /// Returns list of variants that are not disabled by feature flags. - pub fn enabled_variants() -> Vec<&'static str> { - Self::variants() - .iter() - .cloned() - .filter(|&name| cfg!(feature = "wasmtime") || name != "Compiled") - .collect() - } + /// Returns list of variants that are not disabled by feature flags. + pub fn enabled_variants() -> Vec<&'static str> { + Self::variants() + .iter() + .cloned() + .filter(|&name| cfg!(feature = "wasmtime") || name != "Compiled") + .collect() + } } impl Into for WasmExecutionMethod { - fn into(self) -> sc_service::config::WasmExecutionMethod { - match self { - WasmExecutionMethod::Interpreted => { - sc_service::config::WasmExecutionMethod::Interpreted - } - #[cfg(feature = "wasmtime")] - WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled, - #[cfg(not(feature = "wasmtime"))] - WasmExecutionMethod::Compiled => panic!( - "Substrate must be compiled with \"wasmtime\" feature for compiled Wasm execution" - ), - } - } + fn into(self) -> sc_service::config::WasmExecutionMethod { + match self { + WasmExecutionMethod::Interpreted => { + sc_service::config::WasmExecutionMethod::Interpreted + } + #[cfg(feature = "wasmtime")] + WasmExecutionMethod::Compiled => sc_service::config::WasmExecutionMethod::Compiled, + #[cfg(not(feature = "wasmtime"))] + WasmExecutionMethod::Compiled => panic!( + "Substrate must be compiled with \"wasmtime\" feature for compiled Wasm execution" + ), + } + } } arg_enum! { - #[allow(missing_docs)] - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - pub enum TracingReceiver { - Log, - Telemetry, - } + #[allow(missing_docs)] + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum TracingReceiver { + Log, + Telemetry, + } } impl Into for TracingReceiver { - fn into(self) -> sc_tracing::TracingReceiver { - match self { - TracingReceiver::Log => sc_tracing::TracingReceiver::Log, - TracingReceiver::Telemetry => sc_tracing::TracingReceiver::Telemetry, - } - } + fn into(self) -> sc_tracing::TracingReceiver { + match self { + TracingReceiver::Log => sc_tracing::TracingReceiver::Log, + TracingReceiver::Telemetry => sc_tracing::TracingReceiver::Telemetry, + } + } } arg_enum! { - #[allow(missing_docs)] - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - pub enum NodeKeyType { - Ed25519 - } + #[allow(missing_docs)] + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + pub enum NodeKeyType { + Ed25519 + } } arg_enum! { - /// How to execute blocks - #[derive(Debug, Clone, Copy, PartialEq, Eq)] - pub enum ExecutionStrategy { - // Execute with native build (if available, WebAssembly otherwise). - Native, - // Only execute with the WebAssembly build. - Wasm, - // Execute with both native (where available) and WebAssembly builds. - Both, - // Execute with the native build if possible; if it fails, then execute with WebAssembly. - NativeElseWasm, - } + /// How to execute blocks + #[derive(Debug, Clone, Copy, PartialEq, Eq)] + pub enum ExecutionStrategy { + // Execute with native build (if available, WebAssembly otherwise). + Native, + // Only execute with the WebAssembly build. + Wasm, + // Execute with both native (where available) and WebAssembly builds. + Both, + // Execute with the native build if possible; if it fails, then execute with WebAssembly. + NativeElseWasm, + } } impl Into for ExecutionStrategy { - fn into(self) -> sc_client_api::ExecutionStrategy { - match self { - ExecutionStrategy::Native => sc_client_api::ExecutionStrategy::NativeWhenPossible, - ExecutionStrategy::Wasm => sc_client_api::ExecutionStrategy::AlwaysWasm, - ExecutionStrategy::Both => sc_client_api::ExecutionStrategy::Both, - ExecutionStrategy::NativeElseWasm => sc_client_api::ExecutionStrategy::NativeElseWasm, - } - } + fn into(self) -> sc_client_api::ExecutionStrategy { + match self { + ExecutionStrategy::Native => sc_client_api::ExecutionStrategy::NativeWhenPossible, + ExecutionStrategy::Wasm => sc_client_api::ExecutionStrategy::AlwaysWasm, + ExecutionStrategy::Both => sc_client_api::ExecutionStrategy::Both, + ExecutionStrategy::NativeElseWasm => sc_client_api::ExecutionStrategy::NativeElseWasm, + } + } } impl ExecutionStrategy { - /// Returns the variant as `'&static str`. - pub fn as_str(&self) -> &'static str { - match self { - Self::Native => "Native", - Self::Wasm => "Wasm", - Self::Both => "Both", - Self::NativeElseWasm => "NativeElseWasm", - } - } + /// Returns the variant as `'&static str`. + pub fn as_str(&self) -> &'static str { + match self { + Self::Native => "Native", + Self::Wasm => "Wasm", + Self::Both => "Both", + Self::NativeElseWasm => "NativeElseWasm", + } + } } arg_enum! { - /// Database backend - #[allow(missing_docs)] - #[derive(Debug, Clone, Copy)] - pub enum Database { - // Facebooks RocksDB - RocksDb, - // Subdb. https://github.com/paritytech/subdb/ - SubDb, - // ParityDb. https://github.com/paritytech/parity-db/ - ParityDb, - } + /// Database backend + #[allow(missing_docs)] + #[derive(Debug, Clone, Copy)] + pub enum Database { + // Facebooks RocksDB + RocksDb, + // Subdb. https://github.com/paritytech/subdb/ + SubDb, + // ParityDb. https://github.com/paritytech/parity-db/ + ParityDb, + } } /// Default value for the `--execution-syncing` parameter. diff --git a/client/cli/src/commands/build_spec_cmd.rs b/client/cli/src/commands/build_spec_cmd.rs index a01101fa79..616e504b02 100644 --- a/client/cli/src/commands/build_spec_cmd.rs +++ b/client/cli/src/commands/build_spec_cmd.rs @@ -26,57 +26,57 @@ use structopt::StructOpt; /// The `build-spec` command used to build a specification. #[derive(Debug, StructOpt, Clone)] pub struct BuildSpecCmd { - /// Force raw genesis storage output. - #[structopt(long = "raw")] - pub raw: bool, + /// Force raw genesis storage output. + #[structopt(long = "raw")] + pub raw: bool, - /// Disable adding the default bootnode to the specification. - /// - /// By default the `/ip4/127.0.0.1/tcp/30333/p2p/NODE_PEER_ID` bootnode is added to the - /// specification when no bootnode exists. - #[structopt(long = "disable-default-bootnode")] - pub disable_default_bootnode: bool, + /// Disable adding the default bootnode to the specification. + /// + /// By default the `/ip4/127.0.0.1/tcp/30333/p2p/NODE_PEER_ID` bootnode is added to the + /// specification when no bootnode exists. + #[structopt(long = "disable-default-bootnode")] + pub disable_default_bootnode: bool, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub node_key_params: NodeKeyParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub node_key_params: NodeKeyParams, } impl BuildSpecCmd { - /// Run the build-spec command - pub fn run(&self, config: Configuration) -> error::Result<()> { - info!("Building chain spec"); - let mut spec = config.chain_spec; - let raw_output = self.raw; + /// Run the build-spec command + pub fn run(&self, config: Configuration) -> error::Result<()> { + info!("Building chain spec"); + let mut spec = config.chain_spec; + let raw_output = self.raw; - if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { - let keys = config.network.node_key.into_keypair()?; - let peer_id = keys.public().into_peer_id(); - let addr = MultiaddrWithPeerId { - multiaddr: build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16)], - peer_id, - }; - spec.add_boot_node(addr) - } + if spec.boot_nodes().is_empty() && !self.disable_default_bootnode { + let keys = config.network.node_key.into_keypair()?; + let peer_id = keys.public().into_peer_id(); + let addr = MultiaddrWithPeerId { + multiaddr: build_multiaddr![Ip4([127, 0, 0, 1]), Tcp(30333u16)], + peer_id, + }; + spec.add_boot_node(addr) + } - let json = sc_service::chain_ops::build_spec(&*spec, raw_output)?; + let json = sc_service::chain_ops::build_spec(&*spec, raw_output)?; - print!("{}", json); + print!("{}", json); - Ok(()) - } + Ok(()) + } } impl CliConfiguration for BuildSpecCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - fn node_key_params(&self) -> Option<&NodeKeyParams> { - Some(&self.node_key_params) - } + fn node_key_params(&self) -> Option<&NodeKeyParams> { + Some(&self.node_key_params) + } } diff --git a/client/cli/src/commands/check_block_cmd.rs b/client/cli/src/commands/check_block_cmd.rs index ac4fe63da9..292c1ea021 100644 --- a/client/cli/src/commands/check_block_cmd.rs +++ b/client/cli/src/commands/check_block_cmd.rs @@ -28,70 +28,66 @@ use structopt::StructOpt; /// The `check-block` command used to validate blocks. #[derive(Debug, StructOpt, Clone)] pub struct CheckBlockCmd { - /// Block hash or number - #[structopt(value_name = "HASH or NUMBER")] - pub input: String, + /// Block hash or number + #[structopt(value_name = "HASH or NUMBER")] + pub input: String, - /// The default number of 64KB pages to ever allocate for Wasm execution. - /// - /// Don't alter this unless you know what you're doing. - #[structopt(long = "default-heap-pages", value_name = "COUNT")] - pub default_heap_pages: Option, + /// The default number of 64KB pages to ever allocate for Wasm execution. + /// + /// Don't alter this unless you know what you're doing. + #[structopt(long = "default-heap-pages", value_name = "COUNT")] + pub default_heap_pages: Option, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub import_params: ImportParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub import_params: ImportParams, } impl CheckBlockCmd { - /// Run the check-block command - pub async fn run( - &self, - config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - let input = if self.input.starts_with("0x") { - &self.input[2..] - } else { - &self.input[..] - }; - let block_id = match FromStr::from_str(input) { - Ok(hash) => BlockId::hash(hash), - Err(_) => match self.input.parse::() { - Ok(n) => BlockId::number((n as u32).into()), - Err(_) => { - return Err(error::Error::Input( - "Invalid hash or number specified".into(), - )) - } - }, - }; + /// Run the check-block command + pub async fn run(&self, config: Configuration, builder: B) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + let input = if self.input.starts_with("0x") { + &self.input[2..] + } else { + &self.input[..] + }; + let block_id = match FromStr::from_str(input) { + Ok(hash) => BlockId::hash(hash), + Err(_) => match self.input.parse::() { + Ok(n) => BlockId::number((n as u32).into()), + Err(_) => { + return Err(error::Error::Input( + "Invalid hash or number specified".into(), + )) + } + }, + }; - let start = std::time::Instant::now(); - builder(config)?.check_block(block_id).await?; - println!("Completed in {} ms.", start.elapsed().as_millis()); + let start = std::time::Instant::now(); + builder(config)?.check_block(block_id).await?; + println!("Completed in {} ms.", start.elapsed().as_millis()); - Ok(()) - } + Ok(()) + } } impl CliConfiguration for CheckBlockCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - fn import_params(&self) -> Option<&ImportParams> { - Some(&self.import_params) - } + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } } diff --git a/client/cli/src/commands/export_blocks_cmd.rs b/client/cli/src/commands/export_blocks_cmd.rs index 297d83506b..e2336bf37c 100644 --- a/client/cli/src/commands/export_blocks_cmd.rs +++ b/client/cli/src/commands/export_blocks_cmd.rs @@ -18,9 +18,7 @@ use crate::error; use crate::params::{BlockNumber, PruningParams, SharedParams}; use crate::CliConfiguration; use log::info; -use sc_service::{ - config::DatabaseConfig, Configuration, ServiceBuilderCommand, -}; +use sc_service::{config::DatabaseConfig, Configuration, ServiceBuilderCommand}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; use std::fmt::Debug; use std::fs; @@ -31,76 +29,77 @@ use structopt::StructOpt; /// The `export-blocks` command used to export blocks. #[derive(Debug, StructOpt, Clone)] pub struct ExportBlocksCmd { - /// Output file name or stdout if unspecified. - #[structopt(parse(from_os_str))] - pub output: Option, - - /// Specify starting block number. - /// - /// Default is 1. - #[structopt(long = "from", value_name = "BLOCK")] - pub from: Option, - - /// Specify last block number. - /// - /// Default is best block. - #[structopt(long = "to", value_name = "BLOCK")] - pub to: Option, - - /// Use binary output rather than JSON. - #[structopt(long = "binary", value_name = "BOOL", parse(try_from_str), default_value("false"))] - pub binary: bool, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub pruning_params: PruningParams, + /// Output file name or stdout if unspecified. + #[structopt(parse(from_os_str))] + pub output: Option, + + /// Specify starting block number. + /// + /// Default is 1. + #[structopt(long = "from", value_name = "BLOCK")] + pub from: Option, + + /// Specify last block number. + /// + /// Default is best block. + #[structopt(long = "to", value_name = "BLOCK")] + pub to: Option, + + /// Use binary output rather than JSON. + #[structopt( + long = "binary", + value_name = "BOOL", + parse(try_from_str), + default_value("false") + )] + pub binary: bool, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub pruning_params: PruningParams, } impl ExportBlocksCmd { - /// Run the export-blocks command - pub async fn run( - &self, - config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - if let DatabaseConfig::RocksDb { ref path, .. } = &config.database { - info!("DB path: {}", path.display()); - } - - let from = self.from.as_ref().and_then(|f| f.parse().ok()).unwrap_or(1); - let to = self.to.as_ref().and_then(|t| t.parse().ok()); - - let binary = self.binary; - - let file: Box = match &self.output { - Some(filename) => Box::new(fs::File::create(filename)?), - None => Box::new(io::stdout()), - }; - - builder(config)? - .export_blocks(file, from.into(), to, binary) - .await - .map_err(Into::into) - } + /// Run the export-blocks command + pub async fn run(&self, config: Configuration, builder: B) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + if let DatabaseConfig::RocksDb { ref path, .. } = &config.database { + info!("DB path: {}", path.display()); + } + + let from = self.from.as_ref().and_then(|f| f.parse().ok()).unwrap_or(1); + let to = self.to.as_ref().and_then(|t| t.parse().ok()); + + let binary = self.binary; + + let file: Box = match &self.output { + Some(filename) => Box::new(fs::File::create(filename)?), + None => Box::new(io::stdout()), + }; + + builder(config)? + .export_blocks(file, from.into(), to, binary) + .await + .map_err(Into::into) + } } impl CliConfiguration for ExportBlocksCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - fn pruning_params(&self) -> Option<&PruningParams> { - Some(&self.pruning_params) - } + fn pruning_params(&self) -> Option<&PruningParams> { + Some(&self.pruning_params) + } } diff --git a/client/cli/src/commands/import_blocks_cmd.rs b/client/cli/src/commands/import_blocks_cmd.rs index ce95640f46..7ea3e203d1 100644 --- a/client/cli/src/commands/import_blocks_cmd.rs +++ b/client/cli/src/commands/import_blocks_cmd.rs @@ -29,23 +29,23 @@ use structopt::StructOpt; /// The `import-blocks` command used to import blocks. #[derive(Debug, StructOpt, Clone)] pub struct ImportBlocksCmd { - /// Input file or stdin if unspecified. - #[structopt(parse(from_os_str))] - pub input: Option, + /// Input file or stdin if unspecified. + #[structopt(parse(from_os_str))] + pub input: Option, - /// The default number of 64KB pages to ever allocate for Wasm execution. - /// - /// Don't alter this unless you know what you're doing. - #[structopt(long = "default-heap-pages", value_name = "COUNT")] - pub default_heap_pages: Option, + /// The default number of 64KB pages to ever allocate for Wasm execution. + /// + /// Don't alter this unless you know what you're doing. + #[structopt(long = "default-heap-pages", value_name = "COUNT")] + pub default_heap_pages: Option, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub import_params: ImportParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub import_params: ImportParams, } /// Internal trait used to cast to a dynamic type that implements Read and Seek. @@ -54,41 +54,37 @@ trait ReadPlusSeek: Read + Seek {} impl ReadPlusSeek for T {} impl ImportBlocksCmd { - /// Run the import-blocks command - pub async fn run( - &self, - config: Configuration, - builder: B, - ) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - let file: Box = match &self.input { - Some(filename) => Box::new(fs::File::open(filename)?), - None => { - let mut buffer = Vec::new(); - io::stdin().read_to_end(&mut buffer)?; - Box::new(io::Cursor::new(buffer)) - } - }; + /// Run the import-blocks command + pub async fn run(&self, config: Configuration, builder: B) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + let file: Box = match &self.input { + Some(filename) => Box::new(fs::File::open(filename)?), + None => { + let mut buffer = Vec::new(); + io::stdin().read_to_end(&mut buffer)?; + Box::new(io::Cursor::new(buffer)) + } + }; - builder(config)? - .import_blocks(file, false) - .await - .map_err(Into::into) - } + builder(config)? + .import_blocks(file, false) + .await + .map_err(Into::into) + } } impl CliConfiguration for ImportBlocksCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - fn import_params(&self) -> Option<&ImportParams> { - Some(&self.import_params) - } + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } } diff --git a/client/cli/src/commands/mod.rs b/client/cli/src/commands/mod.rs index ae2fe55467..f78fee996b 100644 --- a/client/cli/src/commands/mod.rs +++ b/client/cli/src/commands/mod.rs @@ -39,23 +39,23 @@ use structopt::StructOpt; /// `Run` are exported as main executable parameters. #[derive(Debug, Clone, StructOpt)] pub enum Subcommand { - /// Build a spec.json file, outputs to stdout. - BuildSpec(BuildSpecCmd), + /// Build a spec.json file, outputs to stdout. + BuildSpec(BuildSpecCmd), - /// Export blocks to a file. - ExportBlocks(ExportBlocksCmd), + /// Export blocks to a file. + ExportBlocks(ExportBlocksCmd), - /// Import blocks from file. - ImportBlocks(ImportBlocksCmd), + /// Import blocks from file. + ImportBlocks(ImportBlocksCmd), - /// Validate a single block. - CheckBlock(CheckBlockCmd), + /// Validate a single block. + CheckBlock(CheckBlockCmd), - /// Revert chain to the previous state. - Revert(RevertCmd), + /// Revert chain to the previous state. + Revert(RevertCmd), - /// Remove the whole chain data. - PurgeChain(PurgeChainCmd), + /// Remove the whole chain data. + PurgeChain(PurgeChainCmd), } // TODO: move to config.rs? diff --git a/client/cli/src/commands/purge_chain_cmd.rs b/client/cli/src/commands/purge_chain_cmd.rs index 3be2883bd5..37f1e41e0a 100644 --- a/client/cli/src/commands/purge_chain_cmd.rs +++ b/client/cli/src/commands/purge_chain_cmd.rs @@ -26,59 +26,59 @@ use structopt::StructOpt; /// The `purge-chain` command used to remove the whole chain. #[derive(Debug, StructOpt, Clone)] pub struct PurgeChainCmd { - /// Skip interactive prompt by answering yes automatically. - #[structopt(short = "y")] - pub yes: bool, + /// Skip interactive prompt by answering yes automatically. + #[structopt(short = "y")] + pub yes: bool, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, } impl PurgeChainCmd { - /// Run the purge command - pub fn run(&self, config: Configuration) -> error::Result<()> { - let db_path = match &config.database { - DatabaseConfig::RocksDb { path, .. } => path, - _ => { - eprintln!("Cannot purge custom database implementation"); - return Ok(()); - } - }; + /// Run the purge command + pub fn run(&self, config: Configuration) -> error::Result<()> { + let db_path = match &config.database { + DatabaseConfig::RocksDb { path, .. } => path, + _ => { + eprintln!("Cannot purge custom database implementation"); + return Ok(()); + } + }; - if !self.yes { - print!("Are you sure to remove {:?}? [y/N]: ", &db_path); - io::stdout().flush().expect("failed to flush stdout"); + if !self.yes { + print!("Are you sure to remove {:?}? [y/N]: ", &db_path); + io::stdout().flush().expect("failed to flush stdout"); - let mut input = String::new(); - io::stdin().read_line(&mut input)?; - let input = input.trim(); + let mut input = String::new(); + io::stdin().read_line(&mut input)?; + let input = input.trim(); - match input.chars().nth(0) { - Some('y') | Some('Y') => {}, - _ => { - println!("Aborted"); - return Ok(()); - }, - } - } + match input.chars().nth(0) { + Some('y') | Some('Y') => {} + _ => { + println!("Aborted"); + return Ok(()); + } + } + } - match fs::remove_dir_all(&db_path) { - Ok(_) => { - println!("{:?} removed.", &db_path); - Ok(()) - }, - Err(ref err) if err.kind() == io::ErrorKind::NotFound => { - eprintln!("{:?} did not exist.", &db_path); - Ok(()) - }, - Err(err) => Result::Err(err.into()), - } - } + match fs::remove_dir_all(&db_path) { + Ok(_) => { + println!("{:?} removed.", &db_path); + Ok(()) + } + Err(ref err) if err.kind() == io::ErrorKind::NotFound => { + eprintln!("{:?} did not exist.", &db_path); + Ok(()) + } + Err(err) => Result::Err(err.into()), + } + } } impl CliConfiguration for PurgeChainCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } } diff --git a/client/cli/src/commands/revert_cmd.rs b/client/cli/src/commands/revert_cmd.rs index f7629ff2f6..cef01d6dad 100644 --- a/client/cli/src/commands/revert_cmd.rs +++ b/client/cli/src/commands/revert_cmd.rs @@ -25,42 +25,42 @@ use structopt::StructOpt; /// The `revert` command used revert the chain to a previous state. #[derive(Debug, StructOpt, Clone)] pub struct RevertCmd { - /// Number of blocks to revert. - #[structopt(default_value = "256")] - pub num: BlockNumber, + /// Number of blocks to revert. + #[structopt(default_value = "256")] + pub num: BlockNumber, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub pruning_params: PruningParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub pruning_params: PruningParams, } impl RevertCmd { - /// Run the revert command - pub fn run(&self, config: Configuration, builder: B) -> error::Result<()> - where - B: FnOnce(Configuration) -> Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - { - let blocks = self.num.parse()?; - builder(config)?.revert_chain(blocks)?; + /// Run the revert command + pub fn run(&self, config: Configuration, builder: B) -> error::Result<()> + where + B: FnOnce(Configuration) -> Result, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + { + let blocks = self.num.parse()?; + builder(config)?.revert_chain(blocks)?; - Ok(()) - } + Ok(()) + } } impl CliConfiguration for RevertCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } - fn pruning_params(&self) -> Option<&PruningParams> { - Some(&self.pruning_params) - } + fn pruning_params(&self) -> Option<&PruningParams> { + Some(&self.pruning_params) + } } diff --git a/client/cli/src/commands/runcmd.rs b/client/cli/src/commands/runcmd.rs index b3ce6ce6d1..a599a07e87 100644 --- a/client/cli/src/commands/runcmd.rs +++ b/client/cli/src/commands/runcmd.rs @@ -23,517 +23,517 @@ use crate::params::TransactionPoolParams; use crate::CliConfiguration; use regex::Regex; use sc_service::{ - config::{MultiaddrWithPeerId, PrometheusConfig, TransactionPoolOptions}, - ChainSpec, Role, + config::{MultiaddrWithPeerId, PrometheusConfig, TransactionPoolOptions}, + ChainSpec, Role, }; use sc_telemetry::TelemetryEndpoints; use std::net::SocketAddr; use structopt::{clap::arg_enum, StructOpt}; arg_enum! { - /// Whether off-chain workers are enabled. - #[allow(missing_docs)] - #[derive(Debug, Clone)] - pub enum OffchainWorkerEnabled { - Always, - Never, - WhenValidating, - } + /// Whether off-chain workers are enabled. + #[allow(missing_docs)] + #[derive(Debug, Clone)] + pub enum OffchainWorkerEnabled { + Always, + Never, + WhenValidating, + } } /// The `run` command used to run a node. #[derive(Debug, StructOpt, Clone)] pub struct RunCmd { - /// Enable validator mode. - /// - /// The node will be started with the authority role and actively - /// participate in any consensus task that it can (e.g. depending on - /// availability of local keys). - #[structopt( + /// Enable validator mode. + /// + /// The node will be started with the authority role and actively + /// participate in any consensus task that it can (e.g. depending on + /// availability of local keys). + #[structopt( long = "validator", conflicts_with_all = &[ "sentry" ] )] - pub validator: bool, - - /// Enable sentry mode. - /// - /// The node will be started with the authority role and participate in - /// consensus tasks as an "observer", it will never actively participate - /// regardless of whether it could (e.g. keys are available locally). This - /// mode is useful as a secure proxy for validators (which would run - /// detached from the network), since we want this node to participate in - /// the full consensus protocols in order to have all needed consensus data - /// available to relay to private nodes. - #[structopt( + pub validator: bool, + + /// Enable sentry mode. + /// + /// The node will be started with the authority role and participate in + /// consensus tasks as an "observer", it will never actively participate + /// regardless of whether it could (e.g. keys are available locally). This + /// mode is useful as a secure proxy for validators (which would run + /// detached from the network), since we want this node to participate in + /// the full consensus protocols in order to have all needed consensus data + /// available to relay to private nodes. + #[structopt( long = "sentry", conflicts_with_all = &[ "validator", "light" ], parse(try_from_str) )] - pub sentry: Vec, - - /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA observer. - #[structopt(long = "no-grandpa")] - pub no_grandpa: bool, - - /// Experimental: Run in light client mode. - #[structopt(long = "light", conflicts_with = "sentry")] - pub light: bool, - - /// Listen to all RPC interfaces. - /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. - /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. - #[structopt(long = "rpc-external")] - pub rpc_external: bool, - - /// Listen to all RPC interfaces. - /// - /// Same as `--rpc-external`. - #[structopt(long = "unsafe-rpc-external")] - pub unsafe_rpc_external: bool, - - /// Listen to all Websocket interfaces. - /// - /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy - /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. - /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. - #[structopt(long = "ws-external")] - pub ws_external: bool, - - /// Listen to all Websocket interfaces. - /// - /// Same as `--ws-external` but doesn't warn you about it. - #[structopt(long = "unsafe-ws-external")] - pub unsafe_ws_external: bool, - - /// Listen to all Prometheus data source interfaces. - /// - /// Default is local. - #[structopt(long = "prometheus-external")] - pub prometheus_external: bool, - - /// Specify HTTP RPC server TCP port. - #[structopt(long = "rpc-port", value_name = "PORT")] - pub rpc_port: Option, - - /// Specify WebSockets RPC server TCP port. - #[structopt(long = "ws-port", value_name = "PORT")] - pub ws_port: Option, - - /// Maximum number of WS RPC server connections. - #[structopt(long = "ws-max-connections", value_name = "COUNT")] - pub ws_max_connections: Option, - - /// Specify browser Origins allowed to access the HTTP & WS RPC servers. - /// - /// A comma-separated list of origins (protocol://domain or special `null` - /// value). Value of `all` will disable origin validation. Default is to - /// allow localhost and https://polkadot.js.org origins. When running in - /// --dev mode the default is to allow all origins. - #[structopt(long = "rpc-cors", value_name = "ORIGINS", parse(try_from_str = parse_cors))] - pub rpc_cors: Option, - - /// Specify Prometheus data source server TCP Port. - #[structopt(long = "prometheus-port", value_name = "PORT")] - pub prometheus_port: Option, - - /// Do not expose a Prometheus metric endpoint. - /// - /// Prometheus metric endpoint is enabled by default. - #[structopt(long = "no-prometheus")] - pub no_prometheus: bool, - - /// The human-readable name for this node. - /// - /// The node name will be reported to the telemetry server, if enabled. - #[structopt(long = "name", value_name = "NAME")] - pub name: Option, - - /// Disable connecting to the Substrate telemetry server. - /// - /// Telemetry is on by default on global chains. - #[structopt(long = "no-telemetry")] - pub no_telemetry: bool, - - /// The URL of the telemetry server to connect to. - /// - /// This flag can be passed multiple times as a means to specify multiple - /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting - /// the least verbosity. - /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. - #[structopt(long = "telemetry-url", value_name = "URL VERBOSITY", parse(try_from_str = parse_telemetry_endpoints))] - pub telemetry_endpoints: Vec<(String, u8)>, - - /// Should execute offchain workers on every block. - /// - /// By default it's only enabled for nodes that are authoring new blocks. - #[structopt( + pub sentry: Vec, + + /// Disable GRANDPA voter when running in validator mode, otherwise disable the GRANDPA observer. + #[structopt(long = "no-grandpa")] + pub no_grandpa: bool, + + /// Experimental: Run in light client mode. + #[structopt(long = "light", conflicts_with = "sentry")] + pub light: bool, + + /// Listen to all RPC interfaces. + /// + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy + /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// Use `--unsafe-rpc-external` to suppress the warning if you understand the risks. + #[structopt(long = "rpc-external")] + pub rpc_external: bool, + + /// Listen to all RPC interfaces. + /// + /// Same as `--rpc-external`. + #[structopt(long = "unsafe-rpc-external")] + pub unsafe_rpc_external: bool, + + /// Listen to all Websocket interfaces. + /// + /// Default is local. Note: not all RPC methods are safe to be exposed publicly. Use an RPC proxy + /// server to filter out dangerous methods. More details: https://github.com/paritytech/substrate/wiki/Public-RPC. + /// Use `--unsafe-ws-external` to suppress the warning if you understand the risks. + #[structopt(long = "ws-external")] + pub ws_external: bool, + + /// Listen to all Websocket interfaces. + /// + /// Same as `--ws-external` but doesn't warn you about it. + #[structopt(long = "unsafe-ws-external")] + pub unsafe_ws_external: bool, + + /// Listen to all Prometheus data source interfaces. + /// + /// Default is local. + #[structopt(long = "prometheus-external")] + pub prometheus_external: bool, + + /// Specify HTTP RPC server TCP port. + #[structopt(long = "rpc-port", value_name = "PORT")] + pub rpc_port: Option, + + /// Specify WebSockets RPC server TCP port. + #[structopt(long = "ws-port", value_name = "PORT")] + pub ws_port: Option, + + /// Maximum number of WS RPC server connections. + #[structopt(long = "ws-max-connections", value_name = "COUNT")] + pub ws_max_connections: Option, + + /// Specify browser Origins allowed to access the HTTP & WS RPC servers. + /// + /// A comma-separated list of origins (protocol://domain or special `null` + /// value). Value of `all` will disable origin validation. Default is to + /// allow localhost and https://polkadot.js.org origins. When running in + /// --dev mode the default is to allow all origins. + #[structopt(long = "rpc-cors", value_name = "ORIGINS", parse(try_from_str = parse_cors))] + pub rpc_cors: Option, + + /// Specify Prometheus data source server TCP Port. + #[structopt(long = "prometheus-port", value_name = "PORT")] + pub prometheus_port: Option, + + /// Do not expose a Prometheus metric endpoint. + /// + /// Prometheus metric endpoint is enabled by default. + #[structopt(long = "no-prometheus")] + pub no_prometheus: bool, + + /// The human-readable name for this node. + /// + /// The node name will be reported to the telemetry server, if enabled. + #[structopt(long = "name", value_name = "NAME")] + pub name: Option, + + /// Disable connecting to the Substrate telemetry server. + /// + /// Telemetry is on by default on global chains. + #[structopt(long = "no-telemetry")] + pub no_telemetry: bool, + + /// The URL of the telemetry server to connect to. + /// + /// This flag can be passed multiple times as a means to specify multiple + /// telemetry endpoints. Verbosity levels range from 0-9, with 0 denoting + /// the least verbosity. + /// Expected format is 'URL VERBOSITY', e.g. `--telemetry-url 'wss://foo/bar 0'`. + #[structopt(long = "telemetry-url", value_name = "URL VERBOSITY", parse(try_from_str = parse_telemetry_endpoints))] + pub telemetry_endpoints: Vec<(String, u8)>, + + /// Should execute offchain workers on every block. + /// + /// By default it's only enabled for nodes that are authoring new blocks. + #[structopt( long = "offchain-worker", value_name = "ENABLED", possible_values = &OffchainWorkerEnabled::variants(), case_insensitive = true, default_value = "WhenValidating" )] - pub offchain_worker: OffchainWorkerEnabled, + pub offchain_worker: OffchainWorkerEnabled, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: SharedParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub import_params: ImportParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub import_params: ImportParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub network_params: NetworkParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub network_params: NetworkParams, - #[allow(missing_docs)] - #[structopt(flatten)] - pub pool_config: TransactionPoolParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub pool_config: TransactionPoolParams, - /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. - #[structopt(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] - pub alice: bool, + /// Shortcut for `--name Alice --validator` with session keys for `Alice` added to keystore. + #[structopt(long, conflicts_with_all = &["bob", "charlie", "dave", "eve", "ferdie", "one", "two"])] + pub alice: bool, - /// Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] - pub bob: bool, + /// Shortcut for `--name Bob --validator` with session keys for `Bob` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "charlie", "dave", "eve", "ferdie", "one", "two"])] + pub bob: bool, - /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] - pub charlie: bool, + /// Shortcut for `--name Charlie --validator` with session keys for `Charlie` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "dave", "eve", "ferdie", "one", "two"])] + pub charlie: bool, - /// Shortcut for `--name Dave --validator` with session keys for `Dave` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] - pub dave: bool, + /// Shortcut for `--name Dave --validator` with session keys for `Dave` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "eve", "ferdie", "one", "two"])] + pub dave: bool, - /// Shortcut for `--name Eve --validator` with session keys for `Eve` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] - pub eve: bool, + /// Shortcut for `--name Eve --validator` with session keys for `Eve` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "ferdie", "one", "two"])] + pub eve: bool, - /// Shortcut for `--name Ferdie --validator` with session keys for `Ferdie` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] - pub ferdie: bool, + /// Shortcut for `--name Ferdie --validator` with session keys for `Ferdie` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "one", "two"])] + pub ferdie: bool, - /// Shortcut for `--name One --validator` with session keys for `One` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] - pub one: bool, + /// Shortcut for `--name One --validator` with session keys for `One` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "two"])] + pub one: bool, - /// Shortcut for `--name Two --validator` with session keys for `Two` added to keystore. - #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] - pub two: bool, + /// Shortcut for `--name Two --validator` with session keys for `Two` added to keystore. + #[structopt(long, conflicts_with_all = &["alice", "bob", "charlie", "dave", "eve", "ferdie", "one"])] + pub two: bool, - /// Enable authoring even when offline. - #[structopt(long = "force-authoring")] - pub force_authoring: bool, + /// Enable authoring even when offline. + #[structopt(long = "force-authoring")] + pub force_authoring: bool, - #[allow(missing_docs)] - #[structopt(flatten)] - pub keystore_params: KeystoreParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub keystore_params: KeystoreParams, - /// The size of the instances cache for each runtime. - /// - /// The default value is 8 and the values higher than 256 are ignored. - #[structopt(long)] - pub max_runtime_instances: Option, + /// The size of the instances cache for each runtime. + /// + /// The default value is 8 and the values higher than 256 are ignored. + #[structopt(long)] + pub max_runtime_instances: Option, - /// Specify a list of sentry node public addresses. - #[structopt( + /// Specify a list of sentry node public addresses. + #[structopt( long = "sentry-nodes", value_name = "ADDR", conflicts_with_all = &[ "sentry" ] )] - pub sentry_nodes: Vec, + pub sentry_nodes: Vec, } impl RunCmd { - /// Get the `Sr25519Keyring` matching one of the flag. - pub fn get_keyring(&self) -> Option { - use sp_keyring::Sr25519Keyring::*; - - if self.alice { - Some(Alice) - } else if self.bob { - Some(Bob) - } else if self.charlie { - Some(Charlie) - } else if self.dave { - Some(Dave) - } else if self.eve { - Some(Eve) - } else if self.ferdie { - Some(Ferdie) - } else if self.one { - Some(One) - } else if self.two { - Some(Two) - } else { - None - } - } + /// Get the `Sr25519Keyring` matching one of the flag. + pub fn get_keyring(&self) -> Option { + use sp_keyring::Sr25519Keyring::*; + + if self.alice { + Some(Alice) + } else if self.bob { + Some(Bob) + } else if self.charlie { + Some(Charlie) + } else if self.dave { + Some(Dave) + } else if self.eve { + Some(Eve) + } else if self.ferdie { + Some(Ferdie) + } else if self.one { + Some(One) + } else if self.two { + Some(Two) + } else { + None + } + } } impl CliConfiguration for RunCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } - - fn import_params(&self) -> Option<&ImportParams> { - Some(&self.import_params) - } - - fn network_params(&self) -> Option<&NetworkParams> { - Some(&self.network_params) - } - - fn keystore_params(&self) -> Option<&KeystoreParams> { - Some(&self.keystore_params) - } - - fn node_name(&self) -> Result { - let name: String = match (self.name.as_ref(), self.get_keyring()) { - (Some(name), _) => name.to_string(), - (_, Some(keyring)) => keyring.to_string(), - (None, None) => crate::generate_node_name(), - }; - - is_node_name_valid(&name).map_err(|msg| { - Error::Input(format!( - "Invalid node name '{}'. Reason: {}. If unsure, use none.", - name, msg - )); - })?; - - Ok(name) - } - - fn dev_key_seed(&self, is_dev: bool) -> Result> { - Ok(self.get_keyring().map(|a| format!("//{}", a)).or_else(|| { - if is_dev && !self.light { - Some("//Alice".into()) - } else { - None - } - })) - } - - fn telemetry_endpoints( - &self, - chain_spec: &Box, - ) -> Result> { - Ok(if self.no_telemetry { - None - } else if !self.telemetry_endpoints.is_empty() { - Some( - TelemetryEndpoints::new(self.telemetry_endpoints.clone()) - .map_err(|e| e.to_string())?, - ) - } else { - chain_spec.telemetry_endpoints().clone() - }) - } - - fn role(&self, is_dev: bool) -> Result { - let keyring = self.get_keyring(); - let is_light = self.light; - let is_authority = (self.validator || is_dev || keyring.is_some()) && !is_light; - - Ok(if is_light { - sc_service::Role::Light - } else if is_authority { - sc_service::Role::Authority { - sentry_nodes: self.sentry_nodes.clone(), - } - } else if !self.sentry.is_empty() { - sc_service::Role::Sentry { - validators: self.sentry.clone(), - } - } else { - sc_service::Role::Full - }) - } - - fn force_authoring(&self) -> Result { - // Imply forced authoring on --dev - Ok(self.shared_params.dev || self.force_authoring) - } - - fn prometheus_config(&self) -> Result> { - if self.no_prometheus { - Ok(None) - } else { - let prometheus_interface: &str = if self.prometheus_external { - "0.0.0.0" - } else { - "127.0.0.1" - }; - - Ok(Some(PrometheusConfig::new_with_default_registry( - parse_address( - &format!("{}:{}", prometheus_interface, 9615), - self.prometheus_port, - )?, - ))) - } - } - - fn disable_grandpa(&self) -> Result { - Ok(self.no_grandpa) - } - - fn rpc_ws_max_connections(&self) -> Result> { - Ok(self.ws_max_connections) - } - - fn rpc_cors(&self, is_dev: bool) -> Result>> { - Ok(self - .rpc_cors - .clone() - .unwrap_or_else(|| { - if is_dev { - log::warn!("Running in --dev mode, RPC CORS has been disabled."); - Cors::All - } else { - Cors::List(vec![ - "http://localhost:*".into(), - "http://127.0.0.1:*".into(), - "https://localhost:*".into(), - "https://127.0.0.1:*".into(), - "https://polkadot.js.org".into(), - ]) - } - }) - .into()) - } - - fn rpc_http(&self) -> Result> { - let rpc_interface: &str = - interface_str(self.rpc_external, self.unsafe_rpc_external, self.validator)?; - - Ok(Some(parse_address( - &format!("{}:{}", rpc_interface, 9933), - self.rpc_port, - )?)) - } - - fn rpc_ws(&self) -> Result> { - let ws_interface: &str = - interface_str(self.ws_external, self.unsafe_ws_external, self.validator)?; - - Ok(Some(parse_address( - &format!("{}:{}", ws_interface, 9944), - self.ws_port, - )?)) - } - - fn offchain_worker(&self, role: &Role) -> Result { - Ok(match (&self.offchain_worker, role) { - (OffchainWorkerEnabled::WhenValidating, Role::Authority { .. }) => true, - (OffchainWorkerEnabled::Always, _) => true, - (OffchainWorkerEnabled::Never, _) => false, - (OffchainWorkerEnabled::WhenValidating, _) => false, - }) - } - - fn transaction_pool(&self) -> Result { - Ok(self.pool_config.transaction_pool()) - } - - fn max_runtime_instances(&self) -> Result> { - Ok(self.max_runtime_instances.map(|x| x.min(256))) - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } + + fn import_params(&self) -> Option<&ImportParams> { + Some(&self.import_params) + } + + fn network_params(&self) -> Option<&NetworkParams> { + Some(&self.network_params) + } + + fn keystore_params(&self) -> Option<&KeystoreParams> { + Some(&self.keystore_params) + } + + fn node_name(&self) -> Result { + let name: String = match (self.name.as_ref(), self.get_keyring()) { + (Some(name), _) => name.to_string(), + (_, Some(keyring)) => keyring.to_string(), + (None, None) => crate::generate_node_name(), + }; + + is_node_name_valid(&name).map_err(|msg| { + Error::Input(format!( + "Invalid node name '{}'. Reason: {}. If unsure, use none.", + name, msg + )); + })?; + + Ok(name) + } + + fn dev_key_seed(&self, is_dev: bool) -> Result> { + Ok(self.get_keyring().map(|a| format!("//{}", a)).or_else(|| { + if is_dev && !self.light { + Some("//Alice".into()) + } else { + None + } + })) + } + + fn telemetry_endpoints( + &self, + chain_spec: &Box, + ) -> Result> { + Ok(if self.no_telemetry { + None + } else if !self.telemetry_endpoints.is_empty() { + Some( + TelemetryEndpoints::new(self.telemetry_endpoints.clone()) + .map_err(|e| e.to_string())?, + ) + } else { + chain_spec.telemetry_endpoints().clone() + }) + } + + fn role(&self, is_dev: bool) -> Result { + let keyring = self.get_keyring(); + let is_light = self.light; + let is_authority = (self.validator || is_dev || keyring.is_some()) && !is_light; + + Ok(if is_light { + sc_service::Role::Light + } else if is_authority { + sc_service::Role::Authority { + sentry_nodes: self.sentry_nodes.clone(), + } + } else if !self.sentry.is_empty() { + sc_service::Role::Sentry { + validators: self.sentry.clone(), + } + } else { + sc_service::Role::Full + }) + } + + fn force_authoring(&self) -> Result { + // Imply forced authoring on --dev + Ok(self.shared_params.dev || self.force_authoring) + } + + fn prometheus_config(&self) -> Result> { + if self.no_prometheus { + Ok(None) + } else { + let prometheus_interface: &str = if self.prometheus_external { + "0.0.0.0" + } else { + "127.0.0.1" + }; + + Ok(Some(PrometheusConfig::new_with_default_registry( + parse_address( + &format!("{}:{}", prometheus_interface, 9615), + self.prometheus_port, + )?, + ))) + } + } + + fn disable_grandpa(&self) -> Result { + Ok(self.no_grandpa) + } + + fn rpc_ws_max_connections(&self) -> Result> { + Ok(self.ws_max_connections) + } + + fn rpc_cors(&self, is_dev: bool) -> Result>> { + Ok(self + .rpc_cors + .clone() + .unwrap_or_else(|| { + if is_dev { + log::warn!("Running in --dev mode, RPC CORS has been disabled."); + Cors::All + } else { + Cors::List(vec![ + "http://localhost:*".into(), + "http://127.0.0.1:*".into(), + "https://localhost:*".into(), + "https://127.0.0.1:*".into(), + "https://polkadot.js.org".into(), + ]) + } + }) + .into()) + } + + fn rpc_http(&self) -> Result> { + let rpc_interface: &str = + interface_str(self.rpc_external, self.unsafe_rpc_external, self.validator)?; + + Ok(Some(parse_address( + &format!("{}:{}", rpc_interface, 9933), + self.rpc_port, + )?)) + } + + fn rpc_ws(&self) -> Result> { + let ws_interface: &str = + interface_str(self.ws_external, self.unsafe_ws_external, self.validator)?; + + Ok(Some(parse_address( + &format!("{}:{}", ws_interface, 9944), + self.ws_port, + )?)) + } + + fn offchain_worker(&self, role: &Role) -> Result { + Ok(match (&self.offchain_worker, role) { + (OffchainWorkerEnabled::WhenValidating, Role::Authority { .. }) => true, + (OffchainWorkerEnabled::Always, _) => true, + (OffchainWorkerEnabled::Never, _) => false, + (OffchainWorkerEnabled::WhenValidating, _) => false, + }) + } + + fn transaction_pool(&self) -> Result { + Ok(self.pool_config.transaction_pool()) + } + + fn max_runtime_instances(&self) -> Result> { + Ok(self.max_runtime_instances.map(|x| x.min(256))) + } } /// Check whether a node name is considered as valid. pub fn is_node_name_valid(_name: &str) -> std::result::Result<(), &str> { - let name = _name.to_string(); - if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { - return Err("Node name too long"); - } - - let invalid_chars = r"[\\.@]"; - let re = Regex::new(invalid_chars).unwrap(); - if re.is_match(&name) { - return Err("Node name should not contain invalid chars such as '.' and '@'"); - } - - let invalid_patterns = r"(https?:\\/+)?(www)+"; - let re = Regex::new(invalid_patterns).unwrap(); - if re.is_match(&name) { - return Err("Node name should not contain urls"); - } - - Ok(()) + let name = _name.to_string(); + if name.chars().count() >= crate::NODE_NAME_MAX_LENGTH { + return Err("Node name too long"); + } + + let invalid_chars = r"[\\.@]"; + let re = Regex::new(invalid_chars).unwrap(); + if re.is_match(&name) { + return Err("Node name should not contain invalid chars such as '.' and '@'"); + } + + let invalid_patterns = r"(https?:\\/+)?(www)+"; + let re = Regex::new(invalid_patterns).unwrap(); + if re.is_match(&name) { + return Err("Node name should not contain urls"); + } + + Ok(()) } fn parse_address(address: &str, port: Option) -> std::result::Result { - let mut address: SocketAddr = address - .parse() - .map_err(|_| format!("Invalid address: {}", address))?; - if let Some(port) = port { - address.set_port(port); - } - - Ok(address) + let mut address: SocketAddr = address + .parse() + .map_err(|_| format!("Invalid address: {}", address))?; + if let Some(port) = port { + address.set_port(port); + } + + Ok(address) } fn interface_str( - is_external: bool, - is_unsafe_external: bool, - is_validator: bool, + is_external: bool, + is_unsafe_external: bool, + is_validator: bool, ) -> Result<&'static str> { - if is_external && is_validator { - return Err(Error::Input( - "--rpc-external and --ws-external options shouldn't be \ + if is_external && is_validator { + return Err(Error::Input( + "--rpc-external and --ws-external options shouldn't be \ used if the node is running as a validator. Use `--unsafe-rpc-external` if you understand \ the risks. See the options description for more information." - .to_owned(), - )); - } + .to_owned(), + )); + } - if is_external || is_unsafe_external { - log::warn!( - "It isn't safe to expose RPC publicly without a proxy server that filters \ + if is_external || is_unsafe_external { + log::warn!( + "It isn't safe to expose RPC publicly without a proxy server that filters \ available set of RPC methods." - ); + ); - Ok("0.0.0.0") - } else { - Ok("127.0.0.1") - } + Ok("0.0.0.0") + } else { + Ok("127.0.0.1") + } } #[derive(Debug)] enum TelemetryParsingError { - MissingVerbosity, - VerbosityParsingError(std::num::ParseIntError), + MissingVerbosity, + VerbosityParsingError(std::num::ParseIntError), } impl std::error::Error for TelemetryParsingError {} impl std::fmt::Display for TelemetryParsingError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match &*self { - TelemetryParsingError::MissingVerbosity => write!(f, "Verbosity level missing"), - TelemetryParsingError::VerbosityParsingError(e) => write!(f, "{}", e), - } - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match &*self { + TelemetryParsingError::MissingVerbosity => write!(f, "Verbosity level missing"), + TelemetryParsingError::VerbosityParsingError(e) => write!(f, "{}", e), + } + } } fn parse_telemetry_endpoints(s: &str) -> std::result::Result<(String, u8), TelemetryParsingError> { - let pos = s.find(' '); - match pos { - None => Err(TelemetryParsingError::MissingVerbosity), - Some(pos_) => { - let url = s[..pos_].to_string(); - let verbosity = s[pos_ + 1..] - .parse() - .map_err(TelemetryParsingError::VerbosityParsingError)?; - Ok((url, verbosity)) - } - } + let pos = s.find(' '); + match pos { + None => Err(TelemetryParsingError::MissingVerbosity), + Some(pos_) => { + let url = s[..pos_].to_string(); + let verbosity = s[pos_ + 1..] + .parse() + .map_err(TelemetryParsingError::VerbosityParsingError)?; + Ok((url, verbosity)) + } + } } /// CORS setting @@ -542,58 +542,58 @@ fn parse_telemetry_endpoints(s: &str) -> std::result::Result<(String, u8), Telem /// handling of `structopt`. #[derive(Clone, Debug)] pub enum Cors { - /// All hosts allowed. - All, - /// Only hosts on the list are allowed. - List(Vec), + /// All hosts allowed. + All, + /// Only hosts on the list are allowed. + List(Vec), } impl From for Option> { - fn from(cors: Cors) -> Self { - match cors { - Cors::All => None, - Cors::List(list) => Some(list), - } - } + fn from(cors: Cors) -> Self { + match cors { + Cors::All => None, + Cors::List(list) => Some(list), + } + } } /// Parse cors origins. fn parse_cors(s: &str) -> std::result::Result> { - let mut is_all = false; - let mut origins = Vec::new(); - for part in s.split(',') { - match part { - "all" | "*" => { - is_all = true; - break; - } - other => origins.push(other.to_owned()), - } - } - - Ok(if is_all { - Cors::All - } else { - Cors::List(origins) - }) + let mut is_all = false; + let mut origins = Vec::new(); + for part in s.split(',') { + match part { + "all" | "*" => { + is_all = true; + break; + } + other => origins.push(other.to_owned()), + } + } + + Ok(if is_all { + Cors::All + } else { + Cors::List(origins) + }) } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn tests_node_name_good() { - assert!(is_node_name_valid("short name").is_ok()); - } - - #[test] - fn tests_node_name_bad() { - assert!(is_node_name_valid("long names are not very cool for the ui").is_err()); - assert!(is_node_name_valid("Dots.not.Ok").is_err()); - assert!(is_node_name_valid("http://visit.me").is_err()); - assert!(is_node_name_valid("https://visit.me").is_err()); - assert!(is_node_name_valid("www.visit.me").is_err()); - assert!(is_node_name_valid("email@domain").is_err()); - } + use super::*; + + #[test] + fn tests_node_name_good() { + assert!(is_node_name_valid("short name").is_ok()); + } + + #[test] + fn tests_node_name_bad() { + assert!(is_node_name_valid("long names are not very cool for the ui").is_err()); + assert!(is_node_name_valid("Dots.not.Ok").is_err()); + assert!(is_node_name_valid("http://visit.me").is_err()); + assert!(is_node_name_valid("https://visit.me").is_err()); + assert!(is_node_name_valid("www.visit.me").is_err()); + assert!(is_node_name_valid("email@domain").is_err()); + } } diff --git a/client/cli/src/config.rs b/client/cli/src/config.rs index 04a6647402..a0d86d3dbc 100644 --- a/client/cli/src/config.rs +++ b/client/cli/src/config.rs @@ -16,18 +16,18 @@ //! Configuration trait for a CLI based on substrate +use crate::arg_enums::Database; use crate::error::Result; use crate::{ - init_logger, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, - PruningParams, SharedParams, SubstrateCli, + init_logger, ImportParams, KeystoreParams, NetworkParams, NodeKeyParams, PruningParams, + SharedParams, SubstrateCli, }; -use crate::arg_enums::Database; use app_dirs::{AppDataType, AppInfo}; use names::{Generator, Name}; use sc_service::config::{ - Configuration, DatabaseConfig, ExecutionStrategies, ExtTransport, KeystoreConfig, - NetworkConfiguration, NodeKeyConfig, PrometheusConfig, PruningMode, Role, TelemetryEndpoints, - TransactionPoolOptions, WasmExecutionMethod, + Configuration, DatabaseConfig, ExecutionStrategies, ExtTransport, KeystoreConfig, + NetworkConfiguration, NodeKeyConfig, PrometheusConfig, PruningMode, Role, TelemetryEndpoints, + TransactionPoolOptions, WasmExecutionMethod, }; use sc_service::{ChainSpec, TracingReceiver}; use std::future::Future; @@ -44,436 +44,435 @@ pub(crate) const DEFAULT_NETWORK_CONFIG_PATH: &'static str = "network"; /// A trait that allows converting an object to a Configuration pub trait CliConfiguration: Sized { - /// Get the SharedParams for this object - fn shared_params(&self) -> &SharedParams; - - /// Get the ImportParams for this object - fn import_params(&self) -> Option<&ImportParams> { - None - } - - /// Get the PruningParams for this object - fn pruning_params(&self) -> Option<&PruningParams> { - self.import_params().map(|x| &x.pruning_params) - } - - /// Get the KeystoreParams for this object - fn keystore_params(&self) -> Option<&KeystoreParams> { - None - } - - /// Get the NetworkParams for this object - fn network_params(&self) -> Option<&NetworkParams> { - None - } - - /// Get the NodeKeyParams for this object - fn node_key_params(&self) -> Option<&NodeKeyParams> { - self.network_params() - .map(|x| &x.node_key_params) - } - - /// Get the base path of the configuration (if any) - /// - /// By default this is retrieved from `SharedParams`. - fn base_path(&self) -> Result> { - Ok(self.shared_params().base_path()) - } - - /// Returns `true` if the node is for development or not - /// - /// By default this is retrieved from `SharedParams`. - fn is_dev(&self) -> Result { - Ok(self.shared_params().is_dev()) - } - - /// Gets the role - /// - /// By default this is `Role::Full`. - fn role(&self, _is_dev: bool) -> Result { - Ok(Role::Full) - } - - /// Get the transaction pool options - /// - /// By default this is `TransactionPoolOptions::default()`. - fn transaction_pool(&self) -> Result { - Ok(Default::default()) - } - - /// Get the network configuration - /// - /// By default this is retrieved from `NetworkParams` if it is available otherwise it creates - /// a default `NetworkConfiguration` based on `node_name`, `client_id`, `node_key` and - /// `net_config_dir`. - fn network_config( - &self, - chain_spec: &Box, - is_dev: bool, - net_config_dir: PathBuf, - client_id: &str, - node_name: &str, - node_key: NodeKeyConfig, - ) -> Result { - Ok(if let Some(network_params) = self.network_params() { - network_params.network_config( - chain_spec, - is_dev, - Some(net_config_dir), - client_id, - node_name, - node_key, - ) - } else { - NetworkConfiguration::new( - node_name, - client_id, - node_key, - Some(net_config_dir), - ) - }) - } - - /// Get the keystore configuration. - /// - /// Bu default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses - /// `KeystoreConfig::InMemory`. - fn keystore_config(&self, base_path: &PathBuf) -> Result { - self.keystore_params() - .map(|x| x.keystore_config(base_path)) - .unwrap_or(Ok(KeystoreConfig::InMemory)) - } - - /// Get the database cache size. - /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `None`. - fn database_cache_size(&self) -> Result> { - Ok(self.import_params() - .map(|x| x.database_cache_size()) - .unwrap_or(Default::default())) - } - - /// Get the database backend variant. - /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `None`. - fn database(&self) -> Result> { - Ok(self.import_params().map(|x| x.database())) - } - - /// Get the database configuration. - /// - /// By default this is retrieved from `SharedParams` - fn database_config(&self, - base_path: &PathBuf, - cache_size: usize, - database: Database, - ) -> Result { - Ok(self.shared_params().database_config( - base_path, - cache_size, - database, - )) - } - - /// Get the state cache size. - /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `0`. - fn state_cache_size(&self) -> Result { - Ok(self.import_params() - .map(|x| x.state_cache_size()) - .unwrap_or(Default::default())) - } - - /// Get the state cache child ratio (if any). - /// - /// By default this is `None`. - fn state_cache_child_ratio(&self) -> Result> { - Ok(Default::default()) - } - - /// Get the pruning mode. - /// - /// By default this is retrieved from `PruningMode` if it is available. Otherwise its - /// `PruningMode::default()`. - fn pruning(&self, is_dev: bool, role: &Role) -> Result { - self.pruning_params() - .map(|x| x.pruning(is_dev, role)) - .unwrap_or(Ok(Default::default())) - } - - /// Get the chain ID (string). - /// - /// By default this is retrieved from `SharedParams`. - fn chain_id(&self, is_dev: bool) -> Result { - Ok(self.shared_params().chain_id(is_dev)) - } - - /// Get the name of the node. - /// - /// By default a random name is generated. - fn node_name(&self) -> Result { - Ok(generate_node_name()) - } - - /// Get the WASM execution method. - /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its - /// `WasmExecutionMethod::default()`. - fn wasm_method(&self) -> Result { - Ok(self.import_params() - .map(|x| x.wasm_method()) - .unwrap_or(Default::default())) - } - - /// Get the execution strategies. - /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its - /// `ExecutionStrategies::default()`. - fn execution_strategies(&self, is_dev: bool) -> Result { - Ok(self.import_params() - .map(|x| x.execution_strategies(is_dev)) - .unwrap_or(Default::default())) - } - - /// Get the RPC HTTP address (`None` if disabled). - /// - /// By default this is `None`. - fn rpc_http(&self) -> Result> { - Ok(Default::default()) - } - - /// Get the RPC websocket address (`None` if disabled). - /// - /// By default this is `None`. - fn rpc_ws(&self) -> Result> { - Ok(Default::default()) - } - - /// Get the RPC websockets maximum connections (`None` if unlimited). - /// - /// By default this is `None`. - fn rpc_ws_max_connections(&self) -> Result> { - Ok(Default::default()) - } - - /// Get the RPC cors (`None` if disabled) - /// - /// By default this is `None`. - fn rpc_cors(&self, _is_dev: bool) -> Result>> { - Ok(Some(Vec::new())) - } - - /// Get the prometheus configuration (`None` if disabled) - /// - /// By default this is `None`. - fn prometheus_config(&self) -> Result> { - Ok(Default::default()) - } - - /// Get the telemetry endpoints (if any) - /// - /// By default this is retrieved from the chain spec loaded by `load_spec`. - fn telemetry_endpoints( - &self, - chain_spec: &Box, - ) -> Result> { - Ok(chain_spec.telemetry_endpoints().clone()) - } - - /// Get the telemetry external transport - /// - /// By default this is `None`. - fn telemetry_external_transport(&self) -> Result> { - Ok(Default::default()) - } - - /// Get the default value for heap pages - /// - /// By default this is `None`. - fn default_heap_pages(&self) -> Result> { - Ok(Default::default()) - } - - /// Returns `Ok(true)` if offchain worker should be used - /// - /// By default this is `false`. - fn offchain_worker(&self, _role: &Role) -> Result { - Ok(Default::default()) - } - - /// Returns `Ok(true)` if authoring should be forced - /// - /// By default this is `false`. - fn force_authoring(&self) -> Result { - Ok(Default::default()) - } - - /// Returns `Ok(true)` if grandpa should be disabled - /// - /// By default this is `false`. - fn disable_grandpa(&self) -> Result { - Ok(Default::default()) - } - - /// Get the development key seed from the current object - /// - /// By default this is `None`. - fn dev_key_seed(&self, _is_dev: bool) -> Result> { - Ok(Default::default()) - } - - /// Get the tracing targets from the current object (if any) - /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its - /// `None`. - fn tracing_targets(&self) -> Result> { - Ok(self.import_params() - .map(|x| x.tracing_targets()) - .unwrap_or(Default::default())) - } - - /// Get the TracingReceiver value from the current object - /// - /// By default this is retrieved from `ImportParams` if it is available. Otherwise its - /// `TracingReceiver::default()`. - fn tracing_receiver(&self) -> Result { - Ok(self.import_params() - .map(|x| x.tracing_receiver()) - .unwrap_or(Default::default())) - } - - /// Get the node key from the current object - /// - /// By default this is retrieved from `NodeKeyParams` if it is available. Otherwise its - /// `NodeKeyConfig::default()`. - fn node_key(&self, net_config_dir: &PathBuf) -> Result { - self.node_key_params() - .map(|x| x.node_key(net_config_dir)) - .unwrap_or(Ok(Default::default())) - } - - /// Get maximum runtime instances - /// - /// By default this is `None`. - fn max_runtime_instances(&self) -> Result> { - Ok(Default::default()) - } - - /// Activate or not the automatic announcing of blocks after import - /// - /// By default this is `false`. - fn announce_block(&self) -> Result { - Ok(true) - } - - /// Create a Configuration object from the current object - fn create_configuration( - &self, - cli: &C, - task_executor: Arc + Send>>) + Send + Sync>, - ) -> Result { - let is_dev = self.is_dev()?; - let chain_id = self.chain_id(is_dev)?; - let chain_spec = cli.load_spec(chain_id.as_str())?; - let config_dir = self - .base_path()? - .unwrap_or_else(|| { - app_dirs::get_app_root( - AppDataType::UserData, - &AppInfo { - name: C::executable_name(), - author: C::author(), - }, - ) - .expect("app directories exist on all supported platforms; qed") - }) - .join("chains") - .join(chain_spec.id()); - let net_config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); - let client_id = C::client_id(); - let database_cache_size = self.database_cache_size()?.unwrap_or(128); - let database = self.database()?.unwrap_or(Database::RocksDb); - let node_key = self.node_key(&net_config_dir)?; - let role = self.role(is_dev)?; - let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); - - Ok(Configuration { - impl_name: C::impl_name(), - impl_version: C::impl_version(), - task_executor, - transaction_pool: self.transaction_pool()?, - network: self.network_config( - &chain_spec, - is_dev, - net_config_dir, - client_id.as_str(), - self.node_name()?.as_str(), - node_key, - )?, - keystore: self.keystore_config(&config_dir)?, - database: self.database_config(&config_dir, database_cache_size, database)?, - state_cache_size: self.state_cache_size()?, - state_cache_child_ratio: self.state_cache_child_ratio()?, - pruning: self.pruning(is_dev, &role)?, - wasm_method: self.wasm_method()?, - execution_strategies: self.execution_strategies(is_dev)?, - rpc_http: self.rpc_http()?, - rpc_ws: self.rpc_ws()?, - rpc_ws_max_connections: self.rpc_ws_max_connections()?, - rpc_cors: self.rpc_cors(is_dev)?, - prometheus_config: self.prometheus_config()?, - telemetry_endpoints: self.telemetry_endpoints(&chain_spec)?, - telemetry_external_transport: self.telemetry_external_transport()?, - default_heap_pages: self.default_heap_pages()?, - offchain_worker: self.offchain_worker(&role)?, - force_authoring: self.force_authoring()?, - disable_grandpa: self.disable_grandpa()?, - dev_key_seed: self.dev_key_seed(is_dev)?, - tracing_targets: self.tracing_targets()?, - tracing_receiver: self.tracing_receiver()?, - chain_spec, - max_runtime_instances, - announce_block: self.announce_block()?, - role, - }) - } - - /// Get the filters for the logging. - /// - /// By default this is retrieved from `SharedParams`. - fn log_filters(&self) -> Result> { - Ok(self.shared_params().log_filters()) - } - - /// Initialize substrate. This must be done only once. - /// - /// This method: - /// - /// 1. Set the panic handler - /// 2. Raise the FD limit - /// 3. Initialize the logger - fn init(&self) -> Result<()> { - let logger_pattern = self.log_filters()?.unwrap_or_default(); - - sp_panic_handler::set(C::support_url(), C::impl_version()); - - fdlimit::raise_fd_limit(); - init_logger(logger_pattern.as_str()); - - Ok(()) - } + /// Get the SharedParams for this object + fn shared_params(&self) -> &SharedParams; + + /// Get the ImportParams for this object + fn import_params(&self) -> Option<&ImportParams> { + None + } + + /// Get the PruningParams for this object + fn pruning_params(&self) -> Option<&PruningParams> { + self.import_params().map(|x| &x.pruning_params) + } + + /// Get the KeystoreParams for this object + fn keystore_params(&self) -> Option<&KeystoreParams> { + None + } + + /// Get the NetworkParams for this object + fn network_params(&self) -> Option<&NetworkParams> { + None + } + + /// Get the NodeKeyParams for this object + fn node_key_params(&self) -> Option<&NodeKeyParams> { + self.network_params().map(|x| &x.node_key_params) + } + + /// Get the base path of the configuration (if any) + /// + /// By default this is retrieved from `SharedParams`. + fn base_path(&self) -> Result> { + Ok(self.shared_params().base_path()) + } + + /// Returns `true` if the node is for development or not + /// + /// By default this is retrieved from `SharedParams`. + fn is_dev(&self) -> Result { + Ok(self.shared_params().is_dev()) + } + + /// Gets the role + /// + /// By default this is `Role::Full`. + fn role(&self, _is_dev: bool) -> Result { + Ok(Role::Full) + } + + /// Get the transaction pool options + /// + /// By default this is `TransactionPoolOptions::default()`. + fn transaction_pool(&self) -> Result { + Ok(Default::default()) + } + + /// Get the network configuration + /// + /// By default this is retrieved from `NetworkParams` if it is available otherwise it creates + /// a default `NetworkConfiguration` based on `node_name`, `client_id`, `node_key` and + /// `net_config_dir`. + fn network_config( + &self, + chain_spec: &Box, + is_dev: bool, + net_config_dir: PathBuf, + client_id: &str, + node_name: &str, + node_key: NodeKeyConfig, + ) -> Result { + Ok(if let Some(network_params) = self.network_params() { + network_params.network_config( + chain_spec, + is_dev, + Some(net_config_dir), + client_id, + node_name, + node_key, + ) + } else { + NetworkConfiguration::new(node_name, client_id, node_key, Some(net_config_dir)) + }) + } + + /// Get the keystore configuration. + /// + /// Bu default this is retrieved from `KeystoreParams` if it is available. Otherwise it uses + /// `KeystoreConfig::InMemory`. + fn keystore_config(&self, base_path: &PathBuf) -> Result { + self.keystore_params() + .map(|x| x.keystore_config(base_path)) + .unwrap_or(Ok(KeystoreConfig::InMemory)) + } + + /// Get the database cache size. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `None`. + fn database_cache_size(&self) -> Result> { + Ok(self + .import_params() + .map(|x| x.database_cache_size()) + .unwrap_or(Default::default())) + } + + /// Get the database backend variant. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `None`. + fn database(&self) -> Result> { + Ok(self.import_params().map(|x| x.database())) + } + + /// Get the database configuration. + /// + /// By default this is retrieved from `SharedParams` + fn database_config( + &self, + base_path: &PathBuf, + cache_size: usize, + database: Database, + ) -> Result { + Ok(self + .shared_params() + .database_config(base_path, cache_size, database)) + } + + /// Get the state cache size. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its `0`. + fn state_cache_size(&self) -> Result { + Ok(self + .import_params() + .map(|x| x.state_cache_size()) + .unwrap_or(Default::default())) + } + + /// Get the state cache child ratio (if any). + /// + /// By default this is `None`. + fn state_cache_child_ratio(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the pruning mode. + /// + /// By default this is retrieved from `PruningMode` if it is available. Otherwise its + /// `PruningMode::default()`. + fn pruning(&self, is_dev: bool, role: &Role) -> Result { + self.pruning_params() + .map(|x| x.pruning(is_dev, role)) + .unwrap_or(Ok(Default::default())) + } + + /// Get the chain ID (string). + /// + /// By default this is retrieved from `SharedParams`. + fn chain_id(&self, is_dev: bool) -> Result { + Ok(self.shared_params().chain_id(is_dev)) + } + + /// Get the name of the node. + /// + /// By default a random name is generated. + fn node_name(&self) -> Result { + Ok(generate_node_name()) + } + + /// Get the WASM execution method. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// `WasmExecutionMethod::default()`. + fn wasm_method(&self) -> Result { + Ok(self + .import_params() + .map(|x| x.wasm_method()) + .unwrap_or(Default::default())) + } + + /// Get the execution strategies. + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// `ExecutionStrategies::default()`. + fn execution_strategies(&self, is_dev: bool) -> Result { + Ok(self + .import_params() + .map(|x| x.execution_strategies(is_dev)) + .unwrap_or(Default::default())) + } + + /// Get the RPC HTTP address (`None` if disabled). + /// + /// By default this is `None`. + fn rpc_http(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the RPC websocket address (`None` if disabled). + /// + /// By default this is `None`. + fn rpc_ws(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the RPC websockets maximum connections (`None` if unlimited). + /// + /// By default this is `None`. + fn rpc_ws_max_connections(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the RPC cors (`None` if disabled) + /// + /// By default this is `None`. + fn rpc_cors(&self, _is_dev: bool) -> Result>> { + Ok(Some(Vec::new())) + } + + /// Get the prometheus configuration (`None` if disabled) + /// + /// By default this is `None`. + fn prometheus_config(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the telemetry endpoints (if any) + /// + /// By default this is retrieved from the chain spec loaded by `load_spec`. + fn telemetry_endpoints( + &self, + chain_spec: &Box, + ) -> Result> { + Ok(chain_spec.telemetry_endpoints().clone()) + } + + /// Get the telemetry external transport + /// + /// By default this is `None`. + fn telemetry_external_transport(&self) -> Result> { + Ok(Default::default()) + } + + /// Get the default value for heap pages + /// + /// By default this is `None`. + fn default_heap_pages(&self) -> Result> { + Ok(Default::default()) + } + + /// Returns `Ok(true)` if offchain worker should be used + /// + /// By default this is `false`. + fn offchain_worker(&self, _role: &Role) -> Result { + Ok(Default::default()) + } + + /// Returns `Ok(true)` if authoring should be forced + /// + /// By default this is `false`. + fn force_authoring(&self) -> Result { + Ok(Default::default()) + } + + /// Returns `Ok(true)` if grandpa should be disabled + /// + /// By default this is `false`. + fn disable_grandpa(&self) -> Result { + Ok(Default::default()) + } + + /// Get the development key seed from the current object + /// + /// By default this is `None`. + fn dev_key_seed(&self, _is_dev: bool) -> Result> { + Ok(Default::default()) + } + + /// Get the tracing targets from the current object (if any) + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// `None`. + fn tracing_targets(&self) -> Result> { + Ok(self + .import_params() + .map(|x| x.tracing_targets()) + .unwrap_or(Default::default())) + } + + /// Get the TracingReceiver value from the current object + /// + /// By default this is retrieved from `ImportParams` if it is available. Otherwise its + /// `TracingReceiver::default()`. + fn tracing_receiver(&self) -> Result { + Ok(self + .import_params() + .map(|x| x.tracing_receiver()) + .unwrap_or(Default::default())) + } + + /// Get the node key from the current object + /// + /// By default this is retrieved from `NodeKeyParams` if it is available. Otherwise its + /// `NodeKeyConfig::default()`. + fn node_key(&self, net_config_dir: &PathBuf) -> Result { + self.node_key_params() + .map(|x| x.node_key(net_config_dir)) + .unwrap_or(Ok(Default::default())) + } + + /// Get maximum runtime instances + /// + /// By default this is `None`. + fn max_runtime_instances(&self) -> Result> { + Ok(Default::default()) + } + + /// Activate or not the automatic announcing of blocks after import + /// + /// By default this is `false`. + fn announce_block(&self) -> Result { + Ok(true) + } + + /// Create a Configuration object from the current object + fn create_configuration( + &self, + cli: &C, + task_executor: Arc + Send>>) + Send + Sync>, + ) -> Result { + let is_dev = self.is_dev()?; + let chain_id = self.chain_id(is_dev)?; + let chain_spec = cli.load_spec(chain_id.as_str())?; + let config_dir = self + .base_path()? + .unwrap_or_else(|| { + app_dirs::get_app_root( + AppDataType::UserData, + &AppInfo { + name: C::executable_name(), + author: C::author(), + }, + ) + .expect("app directories exist on all supported platforms; qed") + }) + .join("chains") + .join(chain_spec.id()); + let net_config_dir = config_dir.join(DEFAULT_NETWORK_CONFIG_PATH); + let client_id = C::client_id(); + let database_cache_size = self.database_cache_size()?.unwrap_or(128); + let database = self.database()?.unwrap_or(Database::RocksDb); + let node_key = self.node_key(&net_config_dir)?; + let role = self.role(is_dev)?; + let max_runtime_instances = self.max_runtime_instances()?.unwrap_or(8); + + Ok(Configuration { + impl_name: C::impl_name(), + impl_version: C::impl_version(), + task_executor, + transaction_pool: self.transaction_pool()?, + network: self.network_config( + &chain_spec, + is_dev, + net_config_dir, + client_id.as_str(), + self.node_name()?.as_str(), + node_key, + )?, + keystore: self.keystore_config(&config_dir)?, + database: self.database_config(&config_dir, database_cache_size, database)?, + state_cache_size: self.state_cache_size()?, + state_cache_child_ratio: self.state_cache_child_ratio()?, + pruning: self.pruning(is_dev, &role)?, + wasm_method: self.wasm_method()?, + execution_strategies: self.execution_strategies(is_dev)?, + rpc_http: self.rpc_http()?, + rpc_ws: self.rpc_ws()?, + rpc_ws_max_connections: self.rpc_ws_max_connections()?, + rpc_cors: self.rpc_cors(is_dev)?, + prometheus_config: self.prometheus_config()?, + telemetry_endpoints: self.telemetry_endpoints(&chain_spec)?, + telemetry_external_transport: self.telemetry_external_transport()?, + default_heap_pages: self.default_heap_pages()?, + offchain_worker: self.offchain_worker(&role)?, + force_authoring: self.force_authoring()?, + disable_grandpa: self.disable_grandpa()?, + dev_key_seed: self.dev_key_seed(is_dev)?, + tracing_targets: self.tracing_targets()?, + tracing_receiver: self.tracing_receiver()?, + chain_spec, + max_runtime_instances, + announce_block: self.announce_block()?, + role, + }) + } + + /// Get the filters for the logging. + /// + /// By default this is retrieved from `SharedParams`. + fn log_filters(&self) -> Result> { + Ok(self.shared_params().log_filters()) + } + + /// Initialize substrate. This must be done only once. + /// + /// This method: + /// + /// 1. Set the panic handler + /// 2. Raise the FD limit + /// 3. Initialize the logger + fn init(&self) -> Result<()> { + let logger_pattern = self.log_filters()?.unwrap_or_default(); + + sp_panic_handler::set(C::support_url(), C::impl_version()); + + fdlimit::raise_fd_limit(); + init_logger(logger_pattern.as_str()); + + Ok(()) + } } /// Generate a valid random name for the node pub fn generate_node_name() -> String { - loop { - let node_name = Generator::with_naming(Name::Numbered) - .next() - .expect("RNG is available on all supported platforms; qed"); - let count = node_name.chars().count(); - - if count < NODE_NAME_MAX_LENGTH { - return node_name; - } - }; + loop { + let node_name = Generator::with_naming(Name::Numbered) + .next() + .expect("RNG is available on all supported platforms; qed"); + let count = node_name.chars().count(); + + if count < NODE_NAME_MAX_LENGTH { + return node_name; + } + } } diff --git a/client/cli/src/error.rs b/client/cli/src/error.rs index edc1adecc7..5b0afe031f 100644 --- a/client/cli/src/error.rs +++ b/client/cli/src/error.rs @@ -22,49 +22,49 @@ pub type Result = std::result::Result; /// Error type for the CLI. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Io error - Io(std::io::Error), - /// Cli error - Cli(clap::Error), - /// Service error - Service(sc_service::Error), - /// Client error - Client(sp_blockchain::Error), - /// Input error - #[from(ignore)] - Input(String), - /// Invalid listen multiaddress - #[display(fmt="Invalid listen multiaddress")] - InvalidListenMultiaddress, - /// Other uncategorized error. - #[from(ignore)] - Other(String), + /// Io error + Io(std::io::Error), + /// Cli error + Cli(clap::Error), + /// Service error + Service(sc_service::Error), + /// Client error + Client(sp_blockchain::Error), + /// Input error + #[from(ignore)] + Input(String), + /// Invalid listen multiaddress + #[display(fmt = "Invalid listen multiaddress")] + InvalidListenMultiaddress, + /// Other uncategorized error. + #[from(ignore)] + Other(String), } /// Must be implemented explicitly because `derive_more` won't generate this /// case due to conflicting derive for `Other(String)`. impl std::convert::From for Error { - fn from(s: String) -> Error { - Error::Input(s) - } + fn from(s: String) -> Error { + Error::Input(s) + } } impl std::convert::From<&str> for Error { - fn from(s: &str) -> Error { - Error::Input(s.to_string()) - } + fn from(s: &str) -> Error { + Error::Input(s.to_string()) + } } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Io(ref err) => Some(err), - Error::Cli(ref err) => Some(err), - Error::Service(ref err) => Some(err), - Error::Client(ref err) => Some(err), - Error::Input(_) => None, - Error::InvalidListenMultiaddress => None, - Error::Other(_) => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Io(ref err) => Some(err), + Error::Cli(ref err) => Some(err), + Error::Service(ref err) => Some(err), + Error::Client(ref err) => Some(err), + Error::Input(_) => None, + Error::InvalidListenMultiaddress => None, + Error::Other(_) => None, + } + } } diff --git a/client/cli/src/lib.rs b/client/cli/src/lib.rs index 25b71059b1..da073bf34d 100644 --- a/client/cli/src/lib.rs +++ b/client/cli/src/lib.rs @@ -42,8 +42,8 @@ use std::pin::Pin; use std::sync::Arc; pub use structopt; use structopt::{ - clap::{self, AppSettings}, - StructOpt, + clap::{self, AppSettings}, + StructOpt, }; /// Substrate client CLI @@ -55,214 +55,216 @@ use structopt::{ /// StructOpt must not be in scope to use from_args (or the similar methods). This trait provides /// its own implementation that will fill the necessary field based on the trait's functions. pub trait SubstrateCli: Sized { - /// Implementation name. - fn impl_name() -> &'static str; - - /// Implementation version. - /// - /// By default this will look like this: 2.0.0-b950f731c-x86_64-linux-gnu where the hash is the - /// short commit hash of the commit of in the Git repository. - fn impl_version() -> &'static str; - - /// Executable file name. - fn executable_name() -> &'static str; - - /// Executable file description. - fn description() -> &'static str; - - /// Executable file author. - fn author() -> &'static str; - - /// Support URL. - fn support_url() -> &'static str; - - /// Copyright starting year (x-current year) - fn copyright_start_year() -> i32; - - /// Chain spec factory - fn load_spec(&self, id: &str) -> std::result::Result, String>; - - /// Helper function used to parse the command line arguments. This is the equivalent of - /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of - /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. - /// - /// To allow running the node without subcommand, tt also sets a few more settings: - /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. - /// - /// Gets the struct from the command line arguments. Print the - /// error message and quit the program in case of failure. - fn from_args() -> Self - where - Self: StructOpt + Sized, - { - ::from_iter(&mut std::env::args_os()) - } - - /// Helper function used to parse the command line arguments. This is the equivalent of - /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of - /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. - /// - /// To allow running the node without subcommand, it also sets a few more settings: - /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. - /// - /// Gets the struct from any iterator such as a `Vec` of your making. - /// Print the error message and quit the program in case of failure. - fn from_iter(iter: I) -> Self - where - Self: StructOpt + Sized, - I: IntoIterator, - I::Item: Into + Clone, - { - let app = ::clap(); - - let mut full_version = Self::impl_version().to_string(); - full_version.push_str("\n"); - - let app = app - .name(Self::executable_name()) - .author(Self::author()) - .about(Self::description()) - .version(full_version.as_str()) - .settings(&[ - AppSettings::GlobalVersion, - AppSettings::ArgsNegateSubcommands, - AppSettings::SubcommandsNegateReqs, - ]); - - ::from_clap(&app.get_matches_from(iter)) - } - - /// Helper function used to parse the command line arguments. This is the equivalent of - /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of - /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. - /// - /// To allow running the node without subcommand, it also sets a few more settings: - /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. - /// - /// Gets the struct from any iterator such as a `Vec` of your making. - /// Print the error message and quit the program in case of failure. - /// - /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are - /// used. It will return a [`clap::Error`], where the [`kind`] is a - /// [`ErrorKind::HelpDisplayed`] or [`ErrorKind::VersionDisplayed`] respectively. You must call - /// [`Error::exit`] or perform a [`std::process::exit`]. - fn try_from_iter(iter: I) -> clap::Result - where - Self: StructOpt + Sized, - I: IntoIterator, - I::Item: Into + Clone, - { - let app = ::clap(); - - let mut full_version = Self::impl_version().to_string(); - full_version.push_str("\n"); - - let app = app - .name(Self::executable_name()) - .author(Self::author()) - .about(Self::description()) - .version(full_version.as_str()); - - let matches = app.get_matches_from_safe(iter)?; - - Ok(::from_clap(&matches)) - } - - /// Returns the client ID: `{impl_name}/v{impl_version}` - fn client_id() -> String { - format!("{}/v{}", Self::impl_name(), Self::impl_version()) - } - - /// Only create a Configuration for the command provided in argument - fn create_configuration( - &self, - command: &T, - task_executor: Arc + Send>>) + Send + Sync>, - ) -> error::Result { - command.create_configuration(self, task_executor) - } - - /// Create a runner for the command provided in argument. This will create a Configuration and - /// a tokio runtime - fn create_runner(&self, command: &T) -> error::Result> { - command.init::()?; - Runner::new(self, command) - } + /// Implementation name. + fn impl_name() -> &'static str; + + /// Implementation version. + /// + /// By default this will look like this: 2.0.0-b950f731c-x86_64-linux-gnu where the hash is the + /// short commit hash of the commit of in the Git repository. + fn impl_version() -> &'static str; + + /// Executable file name. + fn executable_name() -> &'static str; + + /// Executable file description. + fn description() -> &'static str; + + /// Executable file author. + fn author() -> &'static str; + + /// Support URL. + fn support_url() -> &'static str; + + /// Copyright starting year (x-current year) + fn copyright_start_year() -> i32; + + /// Chain spec factory + fn load_spec(&self, id: &str) -> std::result::Result, String>; + + /// Helper function used to parse the command line arguments. This is the equivalent of + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of + /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// + /// To allow running the node without subcommand, tt also sets a few more settings: + /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. + /// + /// Gets the struct from the command line arguments. Print the + /// error message and quit the program in case of failure. + fn from_args() -> Self + where + Self: StructOpt + Sized, + { + ::from_iter(&mut std::env::args_os()) + } + + /// Helper function used to parse the command line arguments. This is the equivalent of + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of + /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// + /// To allow running the node without subcommand, it also sets a few more settings: + /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. + /// + /// Gets the struct from any iterator such as a `Vec` of your making. + /// Print the error message and quit the program in case of failure. + fn from_iter(iter: I) -> Self + where + Self: StructOpt + Sized, + I: IntoIterator, + I::Item: Into + Clone, + { + let app = ::clap(); + + let mut full_version = Self::impl_version().to_string(); + full_version.push_str("\n"); + + let app = app + .name(Self::executable_name()) + .author(Self::author()) + .about(Self::description()) + .version(full_version.as_str()) + .settings(&[ + AppSettings::GlobalVersion, + AppSettings::ArgsNegateSubcommands, + AppSettings::SubcommandsNegateReqs, + ]); + + ::from_clap(&app.get_matches_from(iter)) + } + + /// Helper function used to parse the command line arguments. This is the equivalent of + /// `structopt`'s `from_iter()` except that it takes a `VersionInfo` argument to provide the name of + /// the application, author, "about" and version. It will also set `AppSettings::GlobalVersion`. + /// + /// To allow running the node without subcommand, it also sets a few more settings: + /// `AppSettings::ArgsNegateSubcommands` and `AppSettings::SubcommandsNegateReqs`. + /// + /// Gets the struct from any iterator such as a `Vec` of your making. + /// Print the error message and quit the program in case of failure. + /// + /// **NOTE:** This method WILL NOT exit when `--help` or `--version` (or short versions) are + /// used. It will return a [`clap::Error`], where the [`kind`] is a + /// [`ErrorKind::HelpDisplayed`] or [`ErrorKind::VersionDisplayed`] respectively. You must call + /// [`Error::exit`] or perform a [`std::process::exit`]. + fn try_from_iter(iter: I) -> clap::Result + where + Self: StructOpt + Sized, + I: IntoIterator, + I::Item: Into + Clone, + { + let app = ::clap(); + + let mut full_version = Self::impl_version().to_string(); + full_version.push_str("\n"); + + let app = app + .name(Self::executable_name()) + .author(Self::author()) + .about(Self::description()) + .version(full_version.as_str()); + + let matches = app.get_matches_from_safe(iter)?; + + Ok(::from_clap(&matches)) + } + + /// Returns the client ID: `{impl_name}/v{impl_version}` + fn client_id() -> String { + format!("{}/v{}", Self::impl_name(), Self::impl_version()) + } + + /// Only create a Configuration for the command provided in argument + fn create_configuration( + &self, + command: &T, + task_executor: Arc + Send>>) + Send + Sync>, + ) -> error::Result { + command.create_configuration(self, task_executor) + } + + /// Create a runner for the command provided in argument. This will create a Configuration and + /// a tokio runtime + fn create_runner(&self, command: &T) -> error::Result> { + command.init::()?; + Runner::new(self, command) + } } /// Initialize the logger pub fn init_logger(pattern: &str) { - use ansi_term::Colour; - - let mut builder = env_logger::Builder::new(); - // Disable info logging by default for some modules: - builder.filter(Some("ws"), log::LevelFilter::Off); - builder.filter(Some("hyper"), log::LevelFilter::Warn); - builder.filter(Some("cranelift_wasm"), log::LevelFilter::Warn); - // Always log the special target `sc_tracing`, overrides global level - builder.filter(Some("sc_tracing"), log::LevelFilter::Info); - // Enable info for others. - builder.filter(None, log::LevelFilter::Info); - - if let Ok(lvl) = std::env::var("RUST_LOG") { - builder.parse_filters(&lvl); - } - - builder.parse_filters(pattern); - let isatty = atty::is(atty::Stream::Stderr); - let enable_color = isatty; - - builder.format(move |buf, record| { - let now = time::now(); - let timestamp = - time::strftime("%Y-%m-%d %H:%M:%S", &now).expect("Error formatting log timestamp"); - - let mut output = if log::max_level() <= log::LevelFilter::Info { - format!( - "{} {}", - Colour::Black.bold().paint(timestamp), - record.args(), - ) - } else { - let name = ::std::thread::current() - .name() - .map_or_else(Default::default, |x| { - format!("{}", Colour::Blue.bold().paint(x)) - }); - let millis = (now.tm_nsec as f32 / 1000000.0).floor() as usize; - let timestamp = format!("{}.{}", timestamp, millis); - format!( - "{} {} {} {} {}", - Colour::Black.bold().paint(timestamp), - name, - record.level(), - record.target(), - record.args() - ) - }; - - if !isatty && record.level() <= log::Level::Info && atty::is(atty::Stream::Stdout) { - // duplicate INFO/WARN output to console - println!("{}", output); - } - - if !enable_color { - output = kill_color(output.as_ref()); - } - - writeln!(buf, "{}", output) - }); - - if builder.try_init().is_err() { - info!("💬 Not registering Substrate logger, as there is already a global logger registered!"); - } + use ansi_term::Colour; + + let mut builder = env_logger::Builder::new(); + // Disable info logging by default for some modules: + builder.filter(Some("ws"), log::LevelFilter::Off); + builder.filter(Some("hyper"), log::LevelFilter::Warn); + builder.filter(Some("cranelift_wasm"), log::LevelFilter::Warn); + // Always log the special target `sc_tracing`, overrides global level + builder.filter(Some("sc_tracing"), log::LevelFilter::Info); + // Enable info for others. + builder.filter(None, log::LevelFilter::Info); + + if let Ok(lvl) = std::env::var("RUST_LOG") { + builder.parse_filters(&lvl); + } + + builder.parse_filters(pattern); + let isatty = atty::is(atty::Stream::Stderr); + let enable_color = isatty; + + builder.format(move |buf, record| { + let now = time::now(); + let timestamp = + time::strftime("%Y-%m-%d %H:%M:%S", &now).expect("Error formatting log timestamp"); + + let mut output = if log::max_level() <= log::LevelFilter::Info { + format!( + "{} {}", + Colour::Black.bold().paint(timestamp), + record.args(), + ) + } else { + let name = ::std::thread::current() + .name() + .map_or_else(Default::default, |x| { + format!("{}", Colour::Blue.bold().paint(x)) + }); + let millis = (now.tm_nsec as f32 / 1000000.0).floor() as usize; + let timestamp = format!("{}.{}", timestamp, millis); + format!( + "{} {} {} {} {}", + Colour::Black.bold().paint(timestamp), + name, + record.level(), + record.target(), + record.args() + ) + }; + + if !isatty && record.level() <= log::Level::Info && atty::is(atty::Stream::Stdout) { + // duplicate INFO/WARN output to console + println!("{}", output); + } + + if !enable_color { + output = kill_color(output.as_ref()); + } + + writeln!(buf, "{}", output) + }); + + if builder.try_init().is_err() { + info!( + "💬 Not registering Substrate logger, as there is already a global logger registered!" + ); + } } fn kill_color(s: &str) -> String { - lazy_static! { - static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); - } - RE.replace_all(s, "").to_string() + lazy_static! { + static ref RE: Regex = Regex::new("\x1b\\[[^m]+m").expect("Error initializing color regex"); + } + RE.replace_all(s, "").to_string() } /// Reset the signal pipe (`SIGPIPE`) handler to the default one provided by the system. @@ -270,15 +272,15 @@ fn kill_color(s: &str) -> String { /// /// This should be called before calling any cli method or printing any output. pub fn reset_signal_pipe_handler() -> Result<()> { - #[cfg(target_family = "unix")] - { - use nix::sys::signal; + #[cfg(target_family = "unix")] + { + use nix::sys::signal; - unsafe { - signal::signal(signal::Signal::SIGPIPE, signal::SigHandler::SigDfl) - .map_err(|e| Error::Other(e.to_string()))?; - } - } + unsafe { + signal::signal(signal::Signal::SIGPIPE, signal::SigHandler::SigDfl) + .map_err(|e| Error::Other(e.to_string()))?; + } + } - Ok(()) + Ok(()) } diff --git a/client/cli/src/params/import_params.rs b/client/cli/src/params/import_params.rs index 95b04b039a..12b0c18ad8 100644 --- a/client/cli/src/params/import_params.rs +++ b/client/cli/src/params/import_params.rs @@ -15,9 +15,9 @@ // along with Substrate. If not, see . use crate::arg_enums::{ - ExecutionStrategy, TracingReceiver, WasmExecutionMethod, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, - DEFAULT_EXECUTION_IMPORT_BLOCK, DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, - DEFAULT_EXECUTION_SYNCING, Database, + Database, ExecutionStrategy, TracingReceiver, WasmExecutionMethod, + DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, DEFAULT_EXECUTION_IMPORT_BLOCK, + DEFAULT_EXECUTION_OFFCHAIN_WORKER, DEFAULT_EXECUTION_OTHER, DEFAULT_EXECUTION_SYNCING, }; use crate::params::PruningParams; use crate::Result; @@ -28,182 +28,189 @@ use structopt::StructOpt; /// Parameters for block import. #[derive(Debug, StructOpt, Clone)] pub struct ImportParams { - #[allow(missing_docs)] - #[structopt(flatten)] - pub pruning_params: PruningParams, - - /// Force start with unsafe pruning settings. - /// - /// When running as a validator it is highly recommended to disable state - /// pruning (i.e. 'archive') which is the default. The node will refuse to - /// start as a validator if pruning is enabled unless this option is set. - #[structopt(long = "unsafe-pruning")] - pub unsafe_pruning: bool, - - /// Method for executing Wasm runtime code. - #[structopt( + #[allow(missing_docs)] + #[structopt(flatten)] + pub pruning_params: PruningParams, + + /// Force start with unsafe pruning settings. + /// + /// When running as a validator it is highly recommended to disable state + /// pruning (i.e. 'archive') which is the default. The node will refuse to + /// start as a validator if pruning is enabled unless this option is set. + #[structopt(long = "unsafe-pruning")] + pub unsafe_pruning: bool, + + /// Method for executing Wasm runtime code. + #[structopt( long = "wasm-execution", value_name = "METHOD", possible_values = &WasmExecutionMethod::enabled_variants(), case_insensitive = true, default_value = "Interpreted" )] - pub wasm_method: WasmExecutionMethod, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub execution_strategies: ExecutionStrategiesParams, - - /// Select database backend to use. - #[structopt( - long = "database", - alias = "db", - value_name = "DB", - case_insensitive = true, - default_value = "RocksDb" - )] - pub database: Database, - - /// Limit the memory the database cache can use. - #[structopt(long = "db-cache", value_name = "MiB")] - pub database_cache_size: Option, - - /// Specify the state cache size. - #[structopt(long = "state-cache-size", value_name = "Bytes", default_value = "67108864")] - pub state_cache_size: usize, - - /// Comma separated list of targets for tracing. - #[structopt(long = "tracing-targets", value_name = "TARGETS")] - pub tracing_targets: Option, - - /// Receiver to process tracing messages. - #[structopt( + pub wasm_method: WasmExecutionMethod, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub execution_strategies: ExecutionStrategiesParams, + + /// Select database backend to use. + #[structopt( + long = "database", + alias = "db", + value_name = "DB", + case_insensitive = true, + default_value = "RocksDb" + )] + pub database: Database, + + /// Limit the memory the database cache can use. + #[structopt(long = "db-cache", value_name = "MiB")] + pub database_cache_size: Option, + + /// Specify the state cache size. + #[structopt( + long = "state-cache-size", + value_name = "Bytes", + default_value = "67108864" + )] + pub state_cache_size: usize, + + /// Comma separated list of targets for tracing. + #[structopt(long = "tracing-targets", value_name = "TARGETS")] + pub tracing_targets: Option, + + /// Receiver to process tracing messages. + #[structopt( long = "tracing-receiver", value_name = "RECEIVER", possible_values = &TracingReceiver::variants(), case_insensitive = true, default_value = "Log" )] - pub tracing_receiver: TracingReceiver, + pub tracing_receiver: TracingReceiver, } impl ImportParams { - /// Receiver to process tracing messages. - pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { - self.tracing_receiver.clone().into() - } - - /// Comma separated list of targets for tracing. - pub fn tracing_targets(&self) -> Option { - self.tracing_targets.clone() - } - - /// Specify the state cache size. - pub fn state_cache_size(&self) -> usize { - self.state_cache_size - } - - /// Get the WASM execution method from the parameters - pub fn wasm_method(&self) -> sc_service::config::WasmExecutionMethod { - self.wasm_method.into() - } - - /// Get execution strategies for the parameters - pub fn execution_strategies( - &self, - is_dev: bool, - ) -> ExecutionStrategies { - let exec = &self.execution_strategies; - let exec_all_or = |strat: ExecutionStrategy, default: ExecutionStrategy| { - exec.execution.unwrap_or(if strat == default && is_dev { - ExecutionStrategy::Native - } else { - strat - }).into() - }; - - ExecutionStrategies { - syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING), - importing: exec_all_or(exec.execution_import_block, DEFAULT_EXECUTION_IMPORT_BLOCK), - block_construction: - exec_all_or(exec.execution_block_construction, DEFAULT_EXECUTION_BLOCK_CONSTRUCTION), - offchain_worker: - exec_all_or(exec.execution_offchain_worker, DEFAULT_EXECUTION_OFFCHAIN_WORKER), - other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER), - } - } - - /// Get the pruning mode from the parameters - pub fn pruning(&self, unsafe_pruning: bool, role: &Role) -> Result { - self.pruning_params.pruning(unsafe_pruning, role) - } - - /// Limit the memory the database cache can use. - pub fn database_cache_size(&self) -> Option { - self.database_cache_size - } - - /// Limit the memory the database cache can use. - pub fn database(&self) -> Database { - self.database - } + /// Receiver to process tracing messages. + pub fn tracing_receiver(&self) -> sc_service::TracingReceiver { + self.tracing_receiver.clone().into() + } + + /// Comma separated list of targets for tracing. + pub fn tracing_targets(&self) -> Option { + self.tracing_targets.clone() + } + + /// Specify the state cache size. + pub fn state_cache_size(&self) -> usize { + self.state_cache_size + } + + /// Get the WASM execution method from the parameters + pub fn wasm_method(&self) -> sc_service::config::WasmExecutionMethod { + self.wasm_method.into() + } + + /// Get execution strategies for the parameters + pub fn execution_strategies(&self, is_dev: bool) -> ExecutionStrategies { + let exec = &self.execution_strategies; + let exec_all_or = |strat: ExecutionStrategy, default: ExecutionStrategy| { + exec.execution + .unwrap_or(if strat == default && is_dev { + ExecutionStrategy::Native + } else { + strat + }) + .into() + }; + + ExecutionStrategies { + syncing: exec_all_or(exec.execution_syncing, DEFAULT_EXECUTION_SYNCING), + importing: exec_all_or(exec.execution_import_block, DEFAULT_EXECUTION_IMPORT_BLOCK), + block_construction: exec_all_or( + exec.execution_block_construction, + DEFAULT_EXECUTION_BLOCK_CONSTRUCTION, + ), + offchain_worker: exec_all_or( + exec.execution_offchain_worker, + DEFAULT_EXECUTION_OFFCHAIN_WORKER, + ), + other: exec_all_or(exec.execution_other, DEFAULT_EXECUTION_OTHER), + } + } + + /// Get the pruning mode from the parameters + pub fn pruning(&self, unsafe_pruning: bool, role: &Role) -> Result { + self.pruning_params.pruning(unsafe_pruning, role) + } + + /// Limit the memory the database cache can use. + pub fn database_cache_size(&self) -> Option { + self.database_cache_size + } + + /// Limit the memory the database cache can use. + pub fn database(&self) -> Database { + self.database + } } /// Execution strategies parameters. #[derive(Debug, StructOpt, Clone)] pub struct ExecutionStrategiesParams { - /// The means of execution used when calling into the runtime while syncing blocks. - #[structopt( + /// The means of execution used when calling into the runtime while syncing blocks. + #[structopt( long = "execution-syncing", value_name = "STRATEGY", possible_values = &ExecutionStrategy::variants(), case_insensitive = true, default_value = DEFAULT_EXECUTION_SYNCING.as_str(), )] - pub execution_syncing: ExecutionStrategy, + pub execution_syncing: ExecutionStrategy, - /// The means of execution used when calling into the runtime while importing blocks. - #[structopt( + /// The means of execution used when calling into the runtime while importing blocks. + #[structopt( long = "execution-import-block", value_name = "STRATEGY", possible_values = &ExecutionStrategy::variants(), case_insensitive = true, default_value = DEFAULT_EXECUTION_IMPORT_BLOCK.as_str(), )] - pub execution_import_block: ExecutionStrategy, + pub execution_import_block: ExecutionStrategy, - /// The means of execution used when calling into the runtime while constructing blocks. - #[structopt( + /// The means of execution used when calling into the runtime while constructing blocks. + #[structopt( long = "execution-block-construction", value_name = "STRATEGY", possible_values = &ExecutionStrategy::variants(), case_insensitive = true, default_value = DEFAULT_EXECUTION_BLOCK_CONSTRUCTION.as_str(), )] - pub execution_block_construction: ExecutionStrategy, + pub execution_block_construction: ExecutionStrategy, - /// The means of execution used when calling into the runtime while using an off-chain worker. - #[structopt( + /// The means of execution used when calling into the runtime while using an off-chain worker. + #[structopt( long = "execution-offchain-worker", value_name = "STRATEGY", possible_values = &ExecutionStrategy::variants(), case_insensitive = true, default_value = DEFAULT_EXECUTION_OFFCHAIN_WORKER.as_str(), )] - pub execution_offchain_worker: ExecutionStrategy, + pub execution_offchain_worker: ExecutionStrategy, - /// The means of execution used when calling into the runtime while not syncing, importing or constructing blocks. - #[structopt( + /// The means of execution used when calling into the runtime while not syncing, importing or constructing blocks. + #[structopt( long = "execution-other", value_name = "STRATEGY", possible_values = &ExecutionStrategy::variants(), case_insensitive = true, default_value = DEFAULT_EXECUTION_OTHER.as_str(), )] - pub execution_other: ExecutionStrategy, + pub execution_other: ExecutionStrategy, - /// The execution strategy that should be used by all execution contexts. - #[structopt( + /// The execution strategy that should be used by all execution contexts. + #[structopt( long = "execution", value_name = "STRATEGY", possible_values = &ExecutionStrategy::variants(), @@ -216,5 +223,5 @@ pub struct ExecutionStrategiesParams { "execution-syncing", ] )] - pub execution: Option, + pub execution: Option, } diff --git a/client/cli/src/params/keystore_params.rs b/client/cli/src/params/keystore_params.rs index c6131c2f64..9306ac132a 100644 --- a/client/cli/src/params/keystore_params.rs +++ b/client/cli/src/params/keystore_params.rs @@ -26,67 +26,67 @@ const DEFAULT_KEYSTORE_CONFIG_PATH: &'static str = "keystore"; /// Parameters of the keystore #[derive(Debug, StructOpt, Clone)] pub struct KeystoreParams { - /// Specify custom keystore path. - #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] - pub keystore_path: Option, + /// Specify custom keystore path. + #[structopt(long = "keystore-path", value_name = "PATH", parse(from_os_str))] + pub keystore_path: Option, - /// Use interactive shell for entering the password used by the keystore. - #[structopt( + /// Use interactive shell for entering the password used by the keystore. + #[structopt( long = "password-interactive", conflicts_with_all = &[ "password", "password-filename" ] )] - pub password_interactive: bool, + pub password_interactive: bool, - /// Password used by the keystore. - #[structopt( + /// Password used by the keystore. + #[structopt( long = "password", conflicts_with_all = &[ "password-interactive", "password-filename" ] )] - pub password: Option, + pub password: Option, - /// File that contains the password used by the keystore. - #[structopt( + /// File that contains the password used by the keystore. + #[structopt( long = "password-filename", value_name = "PATH", parse(from_os_str), conflicts_with_all = &[ "password-interactive", "password" ] )] - pub password_filename: Option, + pub password_filename: Option, } impl KeystoreParams { - /// Get the keystore configuration for the parameters - pub fn keystore_config(&self, base_path: &PathBuf) -> Result { - let password = if self.password_interactive { - #[cfg(not(target_os = "unknown"))] - { - Some(input_keystore_password()?.into()) - } - #[cfg(target_os = "unknown")] - None - } else if let Some(ref file) = self.password_filename { - Some( - fs::read_to_string(file) - .map_err(|e| format!("{}", e))? - .into(), - ) - } else if let Some(ref password) = self.password { - Some(password.clone().into()) - } else { - None - }; + /// Get the keystore configuration for the parameters + pub fn keystore_config(&self, base_path: &PathBuf) -> Result { + let password = if self.password_interactive { + #[cfg(not(target_os = "unknown"))] + { + Some(input_keystore_password()?.into()) + } + #[cfg(target_os = "unknown")] + None + } else if let Some(ref file) = self.password_filename { + Some( + fs::read_to_string(file) + .map_err(|e| format!("{}", e))? + .into(), + ) + } else if let Some(ref password) = self.password { + Some(password.clone().into()) + } else { + None + }; - let path = self - .keystore_path - .clone() - .unwrap_or(base_path.join(DEFAULT_KEYSTORE_CONFIG_PATH)); + let path = self + .keystore_path + .clone() + .unwrap_or(base_path.join(DEFAULT_KEYSTORE_CONFIG_PATH)); - Ok(KeystoreConfig::Path { path, password }) - } + Ok(KeystoreConfig::Path { path, password }) + } } #[cfg(not(target_os = "unknown"))] fn input_keystore_password() -> Result { - rpassword::read_password_from_tty(Some("Keystore password: ")) - .map_err(|e| format!("{:?}", e).into()) + rpassword::read_password_from_tty(Some("Keystore password: ")) + .map_err(|e| format!("{:?}", e).into()) } diff --git a/client/cli/src/params/mod.rs b/client/cli/src/params/mod.rs index 9097bf8589..8ff87c9c62 100644 --- a/client/cli/src/params/mod.rs +++ b/client/cli/src/params/mod.rs @@ -38,32 +38,32 @@ pub use crate::params::transaction_pool_params::*; pub struct BlockNumber(String); impl FromStr for BlockNumber { - type Err = String; + type Err = String; - fn from_str(block_number: &str) -> Result { - if block_number.chars().any(|d| !d.is_digit(10)) { - Err(format!( - "Invalid block number: {}, expected decimal formatted unsigned integer", - block_number, - )) - } else { - Ok(Self(block_number.to_owned())) - } - } + fn from_str(block_number: &str) -> Result { + if block_number.chars().any(|d| !d.is_digit(10)) { + Err(format!( + "Invalid block number: {}, expected decimal formatted unsigned integer", + block_number, + )) + } else { + Ok(Self(block_number.to_owned())) + } + } } impl BlockNumber { - /// Wrapper on top of `std::str::parse` but with `Error` as a `String` - /// - /// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate - /// documentation. - pub fn parse(&self) -> Result - where - N: FromStr, - N::Err: std::fmt::Debug, - { - self.0 - .parse() - .map_err(|e| format!("BlockNumber: {} parsing failed because of {:?}", self.0, e)) - } + /// Wrapper on top of `std::str::parse` but with `Error` as a `String` + /// + /// See `https://doc.rust-lang.org/std/primitive.str.html#method.parse` for more elaborate + /// documentation. + pub fn parse(&self) -> Result + where + N: FromStr, + N::Err: std::fmt::Debug, + { + self.0 + .parse() + .map_err(|e| format!("BlockNumber: {} parsing failed because of {:?}", self.0, e)) + } } diff --git a/client/cli/src/params/network_params.rs b/client/cli/src/params/network_params.rs index 635fc51cfd..2965e130d9 100644 --- a/client/cli/src/params/network_params.rs +++ b/client/cli/src/params/network_params.rs @@ -16,130 +16,131 @@ use crate::params::node_key_params::NodeKeyParams; use sc_network::{ - config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, TransportConfig}, - multiaddr::Protocol, + config::{NetworkConfiguration, NodeKeyConfig, NonReservedPeerMode, TransportConfig}, + multiaddr::Protocol, +}; +use sc_service::{ + config::{Multiaddr, MultiaddrWithPeerId}, + ChainSpec, }; -use sc_service::{ChainSpec, config::{Multiaddr, MultiaddrWithPeerId}}; use std::path::PathBuf; use structopt::StructOpt; /// Parameters used to create the network configuration. #[derive(Debug, StructOpt, Clone)] pub struct NetworkParams { - /// Specify a list of bootnodes. - #[structopt(long = "bootnodes", value_name = "ADDR")] - pub bootnodes: Vec, - - /// Specify a list of reserved node addresses. - #[structopt(long = "reserved-nodes", value_name = "ADDR")] - pub reserved_nodes: Vec, - - /// Whether to only allow connections to/from reserved nodes. - /// - /// If you are a validator your node might still connect to other validator - /// nodes regardless of whether they are defined as reserved nodes. - #[structopt(long = "reserved-only")] - pub reserved_only: bool, - - /// Listen on this multiaddress. - #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] - pub listen_addr: Vec, - - /// Specify p2p protocol TCP port. - #[structopt(long = "port", value_name = "PORT", conflicts_with_all = &[ "listen-addr" ])] - pub port: Option, - - /// Forbid connecting to private IPv4 addresses (as specified in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with - /// `--reserved-nodes` or `--bootnodes`. - #[structopt(long = "no-private-ipv4")] - pub no_private_ipv4: bool, - - /// Specify the number of outgoing connections we're trying to maintain. - #[structopt(long = "out-peers", value_name = "COUNT", default_value = "25")] - pub out_peers: u32, - - /// Specify the maximum number of incoming connections we're accepting. - #[structopt(long = "in-peers", value_name = "COUNT", default_value = "25")] - pub in_peers: u32, - - /// Disable mDNS discovery. - /// - /// By default, the network will use mDNS to discover other nodes on the - /// local network. This disables it. Automatically implied when using --dev. - #[structopt(long = "no-mdns")] - pub no_mdns: bool, - - /// Maximum number of peers from which to ask for the same blocks in parallel. - /// - /// This allows downloading announced blocks from multiple peers. Decrease to save - /// traffic and risk increased latency. - #[structopt( - long = "max-parallel-downloads", - value_name = "COUNT", - default_value = "5" - )] - pub max_parallel_downloads: u32, - - #[allow(missing_docs)] - #[structopt(flatten)] - pub node_key_params: NodeKeyParams, - - /// Experimental feature flag. - #[structopt(long = "use-yamux-flow-control")] - pub use_yamux_flow_control: bool, + /// Specify a list of bootnodes. + #[structopt(long = "bootnodes", value_name = "ADDR")] + pub bootnodes: Vec, + + /// Specify a list of reserved node addresses. + #[structopt(long = "reserved-nodes", value_name = "ADDR")] + pub reserved_nodes: Vec, + + /// Whether to only allow connections to/from reserved nodes. + /// + /// If you are a validator your node might still connect to other validator + /// nodes regardless of whether they are defined as reserved nodes. + #[structopt(long = "reserved-only")] + pub reserved_only: bool, + + /// Listen on this multiaddress. + #[structopt(long = "listen-addr", value_name = "LISTEN_ADDR")] + pub listen_addr: Vec, + + /// Specify p2p protocol TCP port. + #[structopt(long = "port", value_name = "PORT", conflicts_with_all = &[ "listen-addr" ])] + pub port: Option, + + /// Forbid connecting to private IPv4 addresses (as specified in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)), unless the address was passed with + /// `--reserved-nodes` or `--bootnodes`. + #[structopt(long = "no-private-ipv4")] + pub no_private_ipv4: bool, + + /// Specify the number of outgoing connections we're trying to maintain. + #[structopt(long = "out-peers", value_name = "COUNT", default_value = "25")] + pub out_peers: u32, + + /// Specify the maximum number of incoming connections we're accepting. + #[structopt(long = "in-peers", value_name = "COUNT", default_value = "25")] + pub in_peers: u32, + + /// Disable mDNS discovery. + /// + /// By default, the network will use mDNS to discover other nodes on the + /// local network. This disables it. Automatically implied when using --dev. + #[structopt(long = "no-mdns")] + pub no_mdns: bool, + + /// Maximum number of peers from which to ask for the same blocks in parallel. + /// + /// This allows downloading announced blocks from multiple peers. Decrease to save + /// traffic and risk increased latency. + #[structopt( + long = "max-parallel-downloads", + value_name = "COUNT", + default_value = "5" + )] + pub max_parallel_downloads: u32, + + #[allow(missing_docs)] + #[structopt(flatten)] + pub node_key_params: NodeKeyParams, + + /// Experimental feature flag. + #[structopt(long = "use-yamux-flow-control")] + pub use_yamux_flow_control: bool, } impl NetworkParams { - /// Fill the given `NetworkConfiguration` by looking at the cli parameters. - pub fn network_config( - &self, - chain_spec: &Box, - is_dev: bool, - net_config_path: Option, - client_id: &str, - node_name: &str, - node_key: NodeKeyConfig, - ) -> NetworkConfiguration { - let port = self.port.unwrap_or(30333); - - let listen_addresses = if self.listen_addr.is_empty() { - vec![ - Multiaddr::empty() - .with(Protocol::Ip4([0, 0, 0, 0].into())) - .with(Protocol::Tcp(port)), - ] - } else { - self.listen_addr.clone() - }; - - let mut boot_nodes = chain_spec.boot_nodes().to_vec(); - boot_nodes.extend(self.bootnodes.clone()); - - NetworkConfiguration { - boot_nodes, - net_config_path, - reserved_nodes: self.reserved_nodes.clone(), - non_reserved_mode: if self.reserved_only { - NonReservedPeerMode::Deny - } else { - NonReservedPeerMode::Accept - }, - listen_addresses, - public_addresses: Vec::new(), - notifications_protocols: Vec::new(), - node_key, - node_name: node_name.to_string(), - client_version: client_id.to_string(), - in_peers: self.in_peers, - out_peers: self.out_peers, - transport: TransportConfig::Normal { - enable_mdns: !is_dev && !self.no_mdns, - allow_private_ipv4: !self.no_private_ipv4, - wasm_external_transport: None, - use_yamux_flow_control: self.use_yamux_flow_control, - }, - max_parallel_downloads: self.max_parallel_downloads, - } - } + /// Fill the given `NetworkConfiguration` by looking at the cli parameters. + pub fn network_config( + &self, + chain_spec: &Box, + is_dev: bool, + net_config_path: Option, + client_id: &str, + node_name: &str, + node_key: NodeKeyConfig, + ) -> NetworkConfiguration { + let port = self.port.unwrap_or(30333); + + let listen_addresses = if self.listen_addr.is_empty() { + vec![Multiaddr::empty() + .with(Protocol::Ip4([0, 0, 0, 0].into())) + .with(Protocol::Tcp(port))] + } else { + self.listen_addr.clone() + }; + + let mut boot_nodes = chain_spec.boot_nodes().to_vec(); + boot_nodes.extend(self.bootnodes.clone()); + + NetworkConfiguration { + boot_nodes, + net_config_path, + reserved_nodes: self.reserved_nodes.clone(), + non_reserved_mode: if self.reserved_only { + NonReservedPeerMode::Deny + } else { + NonReservedPeerMode::Accept + }, + listen_addresses, + public_addresses: Vec::new(), + notifications_protocols: Vec::new(), + node_key, + node_name: node_name.to_string(), + client_version: client_id.to_string(), + in_peers: self.in_peers, + out_peers: self.out_peers, + transport: TransportConfig::Normal { + enable_mdns: !is_dev && !self.no_mdns, + allow_private_ipv4: !self.no_private_ipv4, + wasm_external_transport: None, + use_yamux_flow_control: self.use_yamux_flow_control, + }, + max_parallel_downloads: self.max_parallel_downloads, + } + } } diff --git a/client/cli/src/params/node_key_params.rs b/client/cli/src/params/node_key_params.rs index 2913ff2c10..f152cf58de 100644 --- a/client/cli/src/params/node_key_params.rs +++ b/client/cli/src/params/node_key_params.rs @@ -31,195 +31,192 @@ const NODE_KEY_ED25519_FILE: &str = "secret_ed25519"; /// used for libp2p networking. #[derive(Debug, StructOpt, Clone)] pub struct NodeKeyParams { - /// The secret key to use for libp2p networking. - /// - /// The value is a string that is parsed according to the choice of - /// `--node-key-type` as follows: - /// - /// `ed25519`: - /// The value is parsed as a hex-encoded Ed25519 32 byte secret key, - /// i.e. 64 hex characters. - /// - /// The value of this option takes precedence over `--node-key-file`. - /// - /// WARNING: Secrets provided as command-line arguments are easily exposed. - /// Use of this option should be limited to development and testing. To use - /// an externally managed secret key, use `--node-key-file` instead. - #[structopt(long = "node-key", value_name = "KEY")] - pub node_key: Option, - - /// The type of secret key to use for libp2p networking. - /// - /// The secret key of the node is obtained as follows: - /// - /// * If the `--node-key` option is given, the value is parsed as a secret key - /// according to the type. See the documentation for `--node-key`. - /// - /// * If the `--node-key-file` option is given, the secret key is read from the - /// specified file. See the documentation for `--node-key-file`. - /// - /// * Otherwise, the secret key is read from a file with a predetermined, - /// type-specific name from the chain-specific network config directory - /// inside the base directory specified by `--base-dir`. If this file does - /// not exist, it is created with a newly generated secret key of the - /// chosen type. - /// - /// The node's secret key determines the corresponding public key and hence the - /// node's peer ID in the context of libp2p. - #[structopt( + /// The secret key to use for libp2p networking. + /// + /// The value is a string that is parsed according to the choice of + /// `--node-key-type` as follows: + /// + /// `ed25519`: + /// The value is parsed as a hex-encoded Ed25519 32 byte secret key, + /// i.e. 64 hex characters. + /// + /// The value of this option takes precedence over `--node-key-file`. + /// + /// WARNING: Secrets provided as command-line arguments are easily exposed. + /// Use of this option should be limited to development and testing. To use + /// an externally managed secret key, use `--node-key-file` instead. + #[structopt(long = "node-key", value_name = "KEY")] + pub node_key: Option, + + /// The type of secret key to use for libp2p networking. + /// + /// The secret key of the node is obtained as follows: + /// + /// * If the `--node-key` option is given, the value is parsed as a secret key + /// according to the type. See the documentation for `--node-key`. + /// + /// * If the `--node-key-file` option is given, the secret key is read from the + /// specified file. See the documentation for `--node-key-file`. + /// + /// * Otherwise, the secret key is read from a file with a predetermined, + /// type-specific name from the chain-specific network config directory + /// inside the base directory specified by `--base-dir`. If this file does + /// not exist, it is created with a newly generated secret key of the + /// chosen type. + /// + /// The node's secret key determines the corresponding public key and hence the + /// node's peer ID in the context of libp2p. + #[structopt( long = "node-key-type", value_name = "TYPE", possible_values = &NodeKeyType::variants(), case_insensitive = true, default_value = "Ed25519" )] - pub node_key_type: NodeKeyType, - - /// The file from which to read the node's secret key to use for libp2p networking. - /// - /// The contents of the file are parsed according to the choice of `--node-key-type` - /// as follows: - /// - /// `ed25519`: - /// The file must contain an unencoded 32 byte Ed25519 secret key. - /// - /// If the file does not exist, it is created with a newly generated secret key of - /// the chosen type. - #[structopt(long = "node-key-file", value_name = "FILE")] - pub node_key_file: Option, + pub node_key_type: NodeKeyType, + + /// The file from which to read the node's secret key to use for libp2p networking. + /// + /// The contents of the file are parsed according to the choice of `--node-key-type` + /// as follows: + /// + /// `ed25519`: + /// The file must contain an unencoded 32 byte Ed25519 secret key. + /// + /// If the file does not exist, it is created with a newly generated secret key of + /// the chosen type. + #[structopt(long = "node-key-file", value_name = "FILE")] + pub node_key_file: Option, } impl NodeKeyParams { - /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context - /// of an optional network config storage directory. - pub fn node_key(&self, net_config_dir: &PathBuf) -> error::Result { - Ok(match self.node_key_type { - NodeKeyType::Ed25519 => { - let secret = if let Some(node_key) = self.node_key.as_ref() { - parse_ed25519_secret(node_key)? - } else { - let path = self - .node_key_file - .clone() - .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)); - - sc_network::config::Secret::File(path) - }; - - NodeKeyConfig::Ed25519(secret) - } - }) - } + /// Create a `NodeKeyConfig` from the given `NodeKeyParams` in the context + /// of an optional network config storage directory. + pub fn node_key(&self, net_config_dir: &PathBuf) -> error::Result { + Ok(match self.node_key_type { + NodeKeyType::Ed25519 => { + let secret = if let Some(node_key) = self.node_key.as_ref() { + parse_ed25519_secret(node_key)? + } else { + let path = self + .node_key_file + .clone() + .unwrap_or_else(|| net_config_dir.join(NODE_KEY_ED25519_FILE)); + + sc_network::config::Secret::File(path) + }; + + NodeKeyConfig::Ed25519(secret) + } + }) + } } /// Create an error caused by an invalid node key argument. fn invalid_node_key(e: impl std::fmt::Display) -> error::Error { - error::Error::Input(format!("Invalid node key: {}", e)) + error::Error::Input(format!("Invalid node key: {}", e)) } /// Parse a Ed25519 secret key from a hex string into a `sc_network::Secret`. fn parse_ed25519_secret(hex: &str) -> error::Result { - H256::from_str(&hex) - .map_err(invalid_node_key) - .and_then(|bytes| { - sc_network::config::identity::ed25519::SecretKey::from_bytes(bytes) - .map(sc_network::config::Secret::Input) - .map_err(invalid_node_key) - }) + H256::from_str(&hex) + .map_err(invalid_node_key) + .and_then(|bytes| { + sc_network::config::identity::ed25519::SecretKey::from_bytes(bytes) + .map(sc_network::config::Secret::Input) + .map_err(invalid_node_key) + }) } #[cfg(test)] mod tests { - use super::*; - use sc_network::config::identity::ed25519; - - #[test] - fn test_node_key_config_input() { - fn secret_input(net_config_dir: &PathBuf) -> error::Result<()> { - NodeKeyType::variants().iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - let sk = match node_key_type { - NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec(), - }; - let params = NodeKeyParams { - node_key_type, - node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))), - node_key_file: None, - }; - params.node_key(net_config_dir).and_then(|c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) - if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => - { - Ok(()) - } - _ => Err(error::Error::Input("Unexpected node key config".into())), - }) - }) - } - - assert!(secret_input(&PathBuf::from_str("x").unwrap()).is_ok()); - } - - #[test] - fn test_node_key_config_file() { - fn secret_file(net_config_dir: &PathBuf) -> error::Result<()> { - NodeKeyType::variants().iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - let tmp = tempfile::Builder::new().prefix("alice").tempdir()?; - let file = tmp.path().join(format!("{}_mysecret", t)).to_path_buf(); - let params = NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: Some(file.clone()), - }; - params.node_key(net_config_dir).and_then(|c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if node_key_type == NodeKeyType::Ed25519 && f == &file => - { - Ok(()) - } - _ => Err(error::Error::Input("Unexpected node key config".into())), - }) - }) - } - - assert!(secret_file(&PathBuf::from_str("x").unwrap()).is_ok()); - } - - #[test] - fn test_node_key_config_default() { - fn with_def_params(f: F) -> error::Result<()> - where - F: Fn(NodeKeyParams) -> error::Result<()>, - { - NodeKeyType::variants().iter().try_for_each(|t| { - let node_key_type = NodeKeyType::from_str(t).unwrap(); - f(NodeKeyParams { - node_key_type, - node_key: None, - node_key_file: None, - }) - }) - } - - fn some_config_dir(net_config_dir: &PathBuf) -> error::Result<()> { - with_def_params(|params| { - let dir = PathBuf::from(net_config_dir.clone()); - let typ = params.node_key_type; - params - .node_key(net_config_dir) - .and_then(move |c| match c { - NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) - if typ == NodeKeyType::Ed25519 - && f == &dir.join(NODE_KEY_ED25519_FILE) => - { - Ok(()) - } - _ => Err(error::Error::Input("Unexpected node key config".into())), - }) - }) - } - - assert!(some_config_dir(&PathBuf::from_str("x").unwrap()).is_ok()); - } + use super::*; + use sc_network::config::identity::ed25519; + + #[test] + fn test_node_key_config_input() { + fn secret_input(net_config_dir: &PathBuf) -> error::Result<()> { + NodeKeyType::variants().iter().try_for_each(|t| { + let node_key_type = NodeKeyType::from_str(t).unwrap(); + let sk = match node_key_type { + NodeKeyType::Ed25519 => ed25519::SecretKey::generate().as_ref().to_vec(), + }; + let params = NodeKeyParams { + node_key_type, + node_key: Some(format!("{:x}", H256::from_slice(sk.as_ref()))), + node_key_file: None, + }; + params.node_key(net_config_dir).and_then(|c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::Input(ref ski)) + if node_key_type == NodeKeyType::Ed25519 && &sk[..] == ski.as_ref() => + { + Ok(()) + } + _ => Err(error::Error::Input("Unexpected node key config".into())), + }) + }) + } + + assert!(secret_input(&PathBuf::from_str("x").unwrap()).is_ok()); + } + + #[test] + fn test_node_key_config_file() { + fn secret_file(net_config_dir: &PathBuf) -> error::Result<()> { + NodeKeyType::variants().iter().try_for_each(|t| { + let node_key_type = NodeKeyType::from_str(t).unwrap(); + let tmp = tempfile::Builder::new().prefix("alice").tempdir()?; + let file = tmp.path().join(format!("{}_mysecret", t)).to_path_buf(); + let params = NodeKeyParams { + node_key_type, + node_key: None, + node_key_file: Some(file.clone()), + }; + params.node_key(net_config_dir).and_then(|c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) + if node_key_type == NodeKeyType::Ed25519 && f == &file => + { + Ok(()) + } + _ => Err(error::Error::Input("Unexpected node key config".into())), + }) + }) + } + + assert!(secret_file(&PathBuf::from_str("x").unwrap()).is_ok()); + } + + #[test] + fn test_node_key_config_default() { + fn with_def_params(f: F) -> error::Result<()> + where + F: Fn(NodeKeyParams) -> error::Result<()>, + { + NodeKeyType::variants().iter().try_for_each(|t| { + let node_key_type = NodeKeyType::from_str(t).unwrap(); + f(NodeKeyParams { + node_key_type, + node_key: None, + node_key_file: None, + }) + }) + } + + fn some_config_dir(net_config_dir: &PathBuf) -> error::Result<()> { + with_def_params(|params| { + let dir = PathBuf::from(net_config_dir.clone()); + let typ = params.node_key_type; + params.node_key(net_config_dir).and_then(move |c| match c { + NodeKeyConfig::Ed25519(sc_network::config::Secret::File(ref f)) + if typ == NodeKeyType::Ed25519 && f == &dir.join(NODE_KEY_ED25519_FILE) => + { + Ok(()) + } + _ => Err(error::Error::Input("Unexpected node key config".into())), + }) + }) + } + + assert!(some_config_dir(&PathBuf::from_str("x").unwrap()).is_ok()); + } } diff --git a/client/cli/src/params/pruning_params.rs b/client/cli/src/params/pruning_params.rs index ed8f7ab168..b25438b014 100644 --- a/client/cli/src/params/pruning_params.rs +++ b/client/cli/src/params/pruning_params.rs @@ -21,39 +21,39 @@ use structopt::StructOpt; /// Parameters to define the pruning mode #[derive(Debug, StructOpt, Clone)] pub struct PruningParams { - /// Specify the state pruning mode, a number of blocks to keep or 'archive'. - /// - /// Default is to keep all block states if the node is running as a - /// validator (i.e. 'archive'), otherwise state is only kept for the last - /// 256 blocks. - #[structopt(long = "pruning", value_name = "PRUNING_MODE")] - pub pruning: Option, + /// Specify the state pruning mode, a number of blocks to keep or 'archive'. + /// + /// Default is to keep all block states if the node is running as a + /// validator (i.e. 'archive'), otherwise state is only kept for the last + /// 256 blocks. + #[structopt(long = "pruning", value_name = "PRUNING_MODE")] + pub pruning: Option, } impl PruningParams { - /// Get the pruning value from the parameters - pub fn pruning(&self, unsafe_pruning: bool, role: &Role) -> error::Result { - // by default we disable pruning if the node is an authority (i.e. - // `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the - // node is an authority and pruning is enabled explicitly, then we error - // unless `unsafe_pruning` is set. - Ok(match &self.pruning { - Some(ref s) if s == "archive" => PruningMode::ArchiveAll, - None if role.is_network_authority() => PruningMode::ArchiveAll, - None => PruningMode::default(), - Some(s) => { - if role.is_network_authority() && !unsafe_pruning { - return Err(error::Error::Input( - "Validators should run with state pruning disabled (i.e. archive). \ + /// Get the pruning value from the parameters + pub fn pruning(&self, unsafe_pruning: bool, role: &Role) -> error::Result { + // by default we disable pruning if the node is an authority (i.e. + // `ArchiveAll`), otherwise we keep state for the last 256 blocks. if the + // node is an authority and pruning is enabled explicitly, then we error + // unless `unsafe_pruning` is set. + Ok(match &self.pruning { + Some(ref s) if s == "archive" => PruningMode::ArchiveAll, + None if role.is_network_authority() => PruningMode::ArchiveAll, + None => PruningMode::default(), + Some(s) => { + if role.is_network_authority() && !unsafe_pruning { + return Err(error::Error::Input( + "Validators should run with state pruning disabled (i.e. archive). \ You can ignore this check with `--unsafe-pruning`." - .to_string(), - )); - } + .to_string(), + )); + } - PruningMode::keep_blocks(s.parse().map_err(|_| { - error::Error::Input("Invalid pruning mode specified".to_string()) - })?) - } - }) - } + PruningMode::keep_blocks(s.parse().map_err(|_| { + error::Error::Input("Invalid pruning mode specified".to_string()) + })?) + } + }) + } } diff --git a/client/cli/src/params/shared_params.rs b/client/cli/src/params/shared_params.rs index d6dd1bd9c1..d1858d6484 100644 --- a/client/cli/src/params/shared_params.rs +++ b/client/cli/src/params/shared_params.rs @@ -14,87 +14,87 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::arg_enums::Database; use sc_service::config::DatabaseConfig; use std::path::PathBuf; use structopt::StructOpt; -use crate::arg_enums::Database; /// Shared parameters used by all `CoreParams`. #[derive(Debug, StructOpt, Clone)] pub struct SharedParams { - /// Specify the chain specification (one of dev, local, or staging). - #[structopt(long = "chain", value_name = "CHAIN_SPEC")] - pub chain: Option, + /// Specify the chain specification (one of dev, local, or staging). + #[structopt(long = "chain", value_name = "CHAIN_SPEC")] + pub chain: Option, - /// Specify the development chain. - #[structopt(long = "dev")] - pub dev: bool, + /// Specify the development chain. + #[structopt(long = "dev")] + pub dev: bool, - /// Specify custom base path. - #[structopt( - long = "base-path", - short = "d", - value_name = "PATH", - parse(from_os_str) - )] - pub base_path: Option, + /// Specify custom base path. + #[structopt( + long = "base-path", + short = "d", + value_name = "PATH", + parse(from_os_str) + )] + pub base_path: Option, - /// Sets a custom logging filter. Syntax is =, e.g. -lsync=debug. - /// - /// Log levels (least to most verbose) are error, warn, info, debug, and trace. - /// By default, all targets log `info`. The global log level can be set with -l. - #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] - pub log: Option, + /// Sets a custom logging filter. Syntax is =, e.g. -lsync=debug. + /// + /// Log levels (least to most verbose) are error, warn, info, debug, and trace. + /// By default, all targets log `info`. The global log level can be set with -l. + #[structopt(short = "l", long = "log", value_name = "LOG_PATTERN")] + pub log: Option, } impl SharedParams { - /// Specify custom base path. - pub fn base_path(&self) -> Option { - self.base_path.clone() - } + /// Specify custom base path. + pub fn base_path(&self) -> Option { + self.base_path.clone() + } - /// Specify the development chain. - pub fn is_dev(&self) -> bool { - self.dev - } + /// Specify the development chain. + pub fn is_dev(&self) -> bool { + self.dev + } - /// Get the chain spec for the parameters provided - pub fn chain_id(&self, is_dev: bool) -> String { - match self.chain { - Some(ref chain) => chain.clone(), - None => { - if is_dev { - "dev".into() - } else { - "".into() - } - } - } - } + /// Get the chain spec for the parameters provided + pub fn chain_id(&self, is_dev: bool) -> String { + match self.chain { + Some(ref chain) => chain.clone(), + None => { + if is_dev { + "dev".into() + } else { + "".into() + } + } + } + } - /// Get the database configuration object for the parameters provided - pub fn database_config( - &self, - base_path: &PathBuf, - cache_size: usize, - database: Database, - ) -> DatabaseConfig { - match database { - Database::RocksDb => DatabaseConfig::RocksDb { - path: base_path.join("db"), - cache_size, - }, - Database::SubDb => DatabaseConfig::SubDb { - path: base_path.join("subdb"), - }, - Database::ParityDb => DatabaseConfig::ParityDb { - path: base_path.join("paritydb"), - }, - } - } + /// Get the database configuration object for the parameters provided + pub fn database_config( + &self, + base_path: &PathBuf, + cache_size: usize, + database: Database, + ) -> DatabaseConfig { + match database { + Database::RocksDb => DatabaseConfig::RocksDb { + path: base_path.join("db"), + cache_size, + }, + Database::SubDb => DatabaseConfig::SubDb { + path: base_path.join("subdb"), + }, + Database::ParityDb => DatabaseConfig::ParityDb { + path: base_path.join("paritydb"), + }, + } + } - /// Get the filters for the logging - pub fn log_filters(&self) -> Option { - self.log.clone() - } + /// Get the filters for the logging + pub fn log_filters(&self) -> Option { + self.log.clone() + } } diff --git a/client/cli/src/params/transaction_pool_params.rs b/client/cli/src/params/transaction_pool_params.rs index dfcdf9af70..941a8a6aa4 100644 --- a/client/cli/src/params/transaction_pool_params.rs +++ b/client/cli/src/params/transaction_pool_params.rs @@ -20,29 +20,29 @@ use structopt::StructOpt; /// Parameters used to create the pool configuration. #[derive(Debug, StructOpt, Clone)] pub struct TransactionPoolParams { - /// Maximum number of transactions in the transaction pool. - #[structopt(long = "pool-limit", value_name = "COUNT", default_value = "8192")] - pub pool_limit: usize, + /// Maximum number of transactions in the transaction pool. + #[structopt(long = "pool-limit", value_name = "COUNT", default_value = "8192")] + pub pool_limit: usize, - /// Maximum number of kilobytes of all transactions stored in the pool. - #[structopt(long = "pool-kbytes", value_name = "COUNT", default_value = "20480")] - pub pool_kbytes: usize, + /// Maximum number of kilobytes of all transactions stored in the pool. + #[structopt(long = "pool-kbytes", value_name = "COUNT", default_value = "20480")] + pub pool_kbytes: usize, } impl TransactionPoolParams { - /// Fill the given `PoolConfiguration` by looking at the cli parameters. - pub fn transaction_pool(&self) -> TransactionPoolOptions { - let mut opts = TransactionPoolOptions::default(); + /// Fill the given `PoolConfiguration` by looking at the cli parameters. + pub fn transaction_pool(&self) -> TransactionPoolOptions { + let mut opts = TransactionPoolOptions::default(); - // ready queue - opts.ready.count = self.pool_limit; - opts.ready.total_bytes = self.pool_kbytes * 1024; + // ready queue + opts.ready.count = self.pool_limit; + opts.ready.total_bytes = self.pool_kbytes * 1024; - // future queue - let factor = 10; - opts.future.count = self.pool_limit / factor; - opts.future.total_bytes = self.pool_kbytes * 1024 / factor; + // future queue + let factor = 10; + opts.future.count = self.pool_limit / factor; + opts.future.total_bytes = self.pool_kbytes * 1024 / factor; - opts - } + opts + } } diff --git a/client/cli/src/runner.rs b/client/cli/src/runner.rs index 6ebe84f9c5..0ebddc5b2b 100644 --- a/client/cli/src/runner.rs +++ b/client/cli/src/runner.rs @@ -16,8 +16,8 @@ use crate::CliConfiguration; use crate::Result; -use crate::SubstrateCli; use crate::Subcommand; +use crate::SubstrateCli; use chrono::prelude::*; use futures::pin_mut; use futures::select; @@ -33,209 +33,210 @@ use std::sync::Arc; #[cfg(target_family = "unix")] async fn main(func: F) -> std::result::Result<(), Box> where - F: Future> + future::FusedFuture, - E: 'static + std::error::Error, + F: Future> + future::FusedFuture, + E: 'static + std::error::Error, { - use tokio::signal::unix::{signal, SignalKind}; + use tokio::signal::unix::{signal, SignalKind}; - let mut stream_int = signal(SignalKind::interrupt())?; - let mut stream_term = signal(SignalKind::terminate())?; + let mut stream_int = signal(SignalKind::interrupt())?; + let mut stream_term = signal(SignalKind::terminate())?; - let t1 = stream_int.recv().fuse(); - let t2 = stream_term.recv().fuse(); - let t3 = func; + let t1 = stream_int.recv().fuse(); + let t2 = stream_term.recv().fuse(); + let t3 = func; - pin_mut!(t1, t2, t3); + pin_mut!(t1, t2, t3); - select! { - _ = t1 => {}, - _ = t2 => {}, - res = t3 => res?, - } + select! { + _ = t1 => {}, + _ = t2 => {}, + res = t3 => res?, + } - Ok(()) + Ok(()) } #[cfg(not(unix))] async fn main(func: F) -> std::result::Result<(), Box> where - F: Future> + future::FusedFuture, - E: 'static + std::error::Error, + F: Future> + future::FusedFuture, + E: 'static + std::error::Error, { - use tokio::signal::ctrl_c; + use tokio::signal::ctrl_c; - let t1 = ctrl_c().fuse(); - let t2 = func; + let t1 = ctrl_c().fuse(); + let t2 = func; - pin_mut!(t1, t2); + pin_mut!(t1, t2); - select! { - _ = t1 => {}, - res = t2 => res?, - } + select! { + _ = t1 => {}, + res = t2 => res?, + } - Ok(()) + Ok(()) } /// Build a tokio runtime with all features pub fn build_runtime() -> std::result::Result { - tokio::runtime::Builder::new() - .threaded_scheduler() - .on_thread_start(||{ - TOKIO_THREADS_ALIVE.inc(); - TOKIO_THREADS_TOTAL.inc(); - }) - .on_thread_stop(||{ - TOKIO_THREADS_ALIVE.dec(); - }) - .enable_all() - .build() + tokio::runtime::Builder::new() + .threaded_scheduler() + .on_thread_start(|| { + TOKIO_THREADS_ALIVE.inc(); + TOKIO_THREADS_TOTAL.inc(); + }) + .on_thread_stop(|| { + TOKIO_THREADS_ALIVE.dec(); + }) + .enable_all() + .build() } fn run_until_exit(mut tokio_runtime: tokio::runtime::Runtime, future: FUT) -> Result<()> where - FUT: Future> + future::Future, - ERR: 'static + std::error::Error, + FUT: Future> + future::Future, + ERR: 'static + std::error::Error, { - let f = future.fuse(); - pin_mut!(f); + let f = future.fuse(); + pin_mut!(f); - tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; + tokio_runtime.block_on(main(f)).map_err(|e| e.to_string())?; - Ok(()) + Ok(()) } /// A Substrate CLI runtime that can be used to run a node or a command pub struct Runner { - config: Configuration, - tokio_runtime: tokio::runtime::Runtime, - phantom: PhantomData, + config: Configuration, + tokio_runtime: tokio::runtime::Runtime, + phantom: PhantomData, } impl Runner { - /// Create a new runtime with the command provided in argument - pub fn new(cli: &C, command: &T) -> Result> { - let tokio_runtime = build_runtime()?; - - let task_executor = { - let runtime_handle = tokio_runtime.handle().clone(); - Arc::new(move |fut| { - runtime_handle.spawn(fut); - }) - }; - - Ok(Runner { - config: command.create_configuration(cli, task_executor)?, - tokio_runtime, - phantom: PhantomData, - }) - } - - /// A helper function that runs an `AbstractService` with tokio and stops if the process receives - /// the signal `SIGTERM` or `SIGINT`. - pub fn run_node( - self, - new_light: FNL, - new_full: FNF, - runtime_version: sp_version::RuntimeVersion, - ) -> Result<()> where - FNL: FnOnce(Configuration) -> sc_service::error::Result, - FNF: FnOnce(Configuration) -> sc_service::error::Result, - SL: AbstractService + Unpin, - SF: AbstractService + Unpin, - { - info!("{}", C::impl_name()); - info!("✌️ version {}", C::impl_version()); - info!( - "❤️ by {}, {}-{}", - C::author(), - C::copyright_start_year(), - Local::today().year(), - ); - info!("📋 Chain specification: {}", self.config.chain_spec.name()); - info!("🏷 Node name: {}", self.config.network.node_name); - info!("👤 Role: {}", self.config.display_role()); - info!("⛓ Native runtime: {}", runtime_version); - - match self.config.role { - Role::Light => self.run_service_until_exit(new_light), - _ => self.run_service_until_exit(new_full), - } - } - - /// A helper function that runs a future with tokio and stops if the process receives the signal - /// `SIGTERM` or `SIGINT`. - pub fn run_subcommand(self, subcommand: &Subcommand, builder: B) -> Result<()> - where - B: FnOnce(Configuration) -> sc_service::error::Result, - BC: ServiceBuilderCommand + Unpin, - BB: sp_runtime::traits::Block + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: Debug, - ::Hash: std::str::FromStr, - { - match subcommand { - Subcommand::BuildSpec(cmd) => cmd.run(self.config), - Subcommand::ExportBlocks(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) - } - Subcommand::ImportBlocks(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) - } - Subcommand::CheckBlock(cmd) => { - run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) - } - Subcommand::Revert(cmd) => cmd.run(self.config, builder), - Subcommand::PurgeChain(cmd) => cmd.run(self.config), - } - } - - fn run_service_until_exit(mut self, service_builder: F) -> Result<()> - where - F: FnOnce(Configuration) -> std::result::Result, - T: AbstractService + Unpin, - { - let service = service_builder(self.config)?; - - let informant_future = sc_informant::build(&service, sc_informant::OutputFormat::Coloured); - let _informant_handle = self.tokio_runtime.spawn(informant_future); - - // we eagerly drop the service so that the internal exit future is fired, - // but we need to keep holding a reference to the global telemetry guard - // and drop the runtime first. - let _telemetry = service.telemetry(); - - let f = service.fuse(); - pin_mut!(f); - - self.tokio_runtime - .block_on(main(f)) - .map_err(|e| e.to_string())?; - drop(self.tokio_runtime); - - Ok(()) - } - - /// A helper function that runs a command with the configuration of this node - pub fn sync_run(self, runner: impl FnOnce(Configuration) -> Result<()>) -> Result<()> { - runner(self.config) - } - - /// A helper function that runs a future with tokio and stops if the process receives - /// the signal SIGTERM or SIGINT - pub fn async_run(self, runner: impl FnOnce(Configuration) -> FUT) -> Result<()> - where - FUT: Future>, - { - run_until_exit(self.tokio_runtime, runner(self.config)) - } - - /// Get an immutable reference to the node Configuration - pub fn config(&self) -> &Configuration { - &self.config - } - - /// Get a mutable reference to the node Configuration - pub fn config_mut(&mut self) -> &Configuration { - &mut self.config - } + /// Create a new runtime with the command provided in argument + pub fn new(cli: &C, command: &T) -> Result> { + let tokio_runtime = build_runtime()?; + + let task_executor = { + let runtime_handle = tokio_runtime.handle().clone(); + Arc::new(move |fut| { + runtime_handle.spawn(fut); + }) + }; + + Ok(Runner { + config: command.create_configuration(cli, task_executor)?, + tokio_runtime, + phantom: PhantomData, + }) + } + + /// A helper function that runs an `AbstractService` with tokio and stops if the process receives + /// the signal `SIGTERM` or `SIGINT`. + pub fn run_node( + self, + new_light: FNL, + new_full: FNF, + runtime_version: sp_version::RuntimeVersion, + ) -> Result<()> + where + FNL: FnOnce(Configuration) -> sc_service::error::Result, + FNF: FnOnce(Configuration) -> sc_service::error::Result, + SL: AbstractService + Unpin, + SF: AbstractService + Unpin, + { + info!("{}", C::impl_name()); + info!("✌️ version {}", C::impl_version()); + info!( + "❤️ by {}, {}-{}", + C::author(), + C::copyright_start_year(), + Local::today().year(), + ); + info!("📋 Chain specification: {}", self.config.chain_spec.name()); + info!("🏷 Node name: {}", self.config.network.node_name); + info!("👤 Role: {}", self.config.display_role()); + info!("⛓ Native runtime: {}", runtime_version); + + match self.config.role { + Role::Light => self.run_service_until_exit(new_light), + _ => self.run_service_until_exit(new_full), + } + } + + /// A helper function that runs a future with tokio and stops if the process receives the signal + /// `SIGTERM` or `SIGINT`. + pub fn run_subcommand(self, subcommand: &Subcommand, builder: B) -> Result<()> + where + B: FnOnce(Configuration) -> sc_service::error::Result, + BC: ServiceBuilderCommand + Unpin, + BB: sp_runtime::traits::Block + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: Debug, + ::Hash: std::str::FromStr, + { + match subcommand { + Subcommand::BuildSpec(cmd) => cmd.run(self.config), + Subcommand::ExportBlocks(cmd) => { + run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + } + Subcommand::ImportBlocks(cmd) => { + run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + } + Subcommand::CheckBlock(cmd) => { + run_until_exit(self.tokio_runtime, cmd.run(self.config, builder)) + } + Subcommand::Revert(cmd) => cmd.run(self.config, builder), + Subcommand::PurgeChain(cmd) => cmd.run(self.config), + } + } + + fn run_service_until_exit(mut self, service_builder: F) -> Result<()> + where + F: FnOnce(Configuration) -> std::result::Result, + T: AbstractService + Unpin, + { + let service = service_builder(self.config)?; + + let informant_future = sc_informant::build(&service, sc_informant::OutputFormat::Coloured); + let _informant_handle = self.tokio_runtime.spawn(informant_future); + + // we eagerly drop the service so that the internal exit future is fired, + // but we need to keep holding a reference to the global telemetry guard + // and drop the runtime first. + let _telemetry = service.telemetry(); + + let f = service.fuse(); + pin_mut!(f); + + self.tokio_runtime + .block_on(main(f)) + .map_err(|e| e.to_string())?; + drop(self.tokio_runtime); + + Ok(()) + } + + /// A helper function that runs a command with the configuration of this node + pub fn sync_run(self, runner: impl FnOnce(Configuration) -> Result<()>) -> Result<()> { + runner(self.config) + } + + /// A helper function that runs a future with tokio and stops if the process receives + /// the signal SIGTERM or SIGINT + pub fn async_run(self, runner: impl FnOnce(Configuration) -> FUT) -> Result<()> + where + FUT: Future>, + { + run_until_exit(self.tokio_runtime, runner(self.config)) + } + + /// Get an immutable reference to the node Configuration + pub fn config(&self) -> &Configuration { + &self.config + } + + /// Get a mutable reference to the node Configuration + pub fn config_mut(&mut self) -> &Configuration { + &mut self.config + } } diff --git a/client/consensus/aura/src/digests.rs b/client/consensus/aura/src/digests.rs index 8dd42fc01d..2962432645 100644 --- a/client/consensus/aura/src/digests.rs +++ b/client/consensus/aura/src/digests.rs @@ -19,47 +19,48 @@ //! This implements the digests for AuRa, to allow the private //! `CompatibleDigestItem` trait to appear in public interfaces. -use sp_core::Pair; +use codec::{Codec, Encode}; use sp_consensus_aura::AURA_ENGINE_ID; +use sp_core::Pair; use sp_runtime::generic::{DigestItem, OpaqueDigestItemId}; -use codec::{Encode, Codec}; use std::fmt::Debug; type Signature

=

::Signature; /// A digest item which is usable with aura consensus. pub trait CompatibleDigestItem: Sized { - /// Construct a digest item which contains a signature on the hash. - fn aura_seal(signature: Signature

) -> Self; + /// Construct a digest item which contains a signature on the hash. + fn aura_seal(signature: Signature

) -> Self; - /// If this item is an Aura seal, return the signature. - fn as_aura_seal(&self) -> Option>; + /// If this item is an Aura seal, return the signature. + fn as_aura_seal(&self) -> Option>; - /// Construct a digest item which contains the slot number - fn aura_pre_digest(slot_num: u64) -> Self; + /// Construct a digest item which contains the slot number + fn aura_pre_digest(slot_num: u64) -> Self; - /// If this item is an AuRa pre-digest, return the slot number - fn as_aura_pre_digest(&self) -> Option; + /// If this item is an AuRa pre-digest, return the slot number + fn as_aura_pre_digest(&self) -> Option; } -impl CompatibleDigestItem

for DigestItem where - P: Pair, - Signature

: Codec, - Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static +impl CompatibleDigestItem

for DigestItem +where + P: Pair, + Signature

: Codec, + Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static, { - fn aura_seal(signature: Signature

) -> Self { - DigestItem::Seal(AURA_ENGINE_ID, signature.encode()) - } + fn aura_seal(signature: Signature

) -> Self { + DigestItem::Seal(AURA_ENGINE_ID, signature.encode()) + } - fn as_aura_seal(&self) -> Option> { - self.try_to(OpaqueDigestItemId::Seal(&AURA_ENGINE_ID)) - } + fn as_aura_seal(&self) -> Option> { + self.try_to(OpaqueDigestItemId::Seal(&AURA_ENGINE_ID)) + } - fn aura_pre_digest(slot_num: u64) -> Self { - DigestItem::PreRuntime(AURA_ENGINE_ID, slot_num.encode()) - } + fn aura_pre_digest(slot_num: u64) -> Self { + DigestItem::PreRuntime(AURA_ENGINE_ID, slot_num.encode()) + } - fn as_aura_pre_digest(&self) -> Option { - self.try_to(OpaqueDigestItemId::PreRuntime(&AURA_ENGINE_ID)) - } + fn as_aura_pre_digest(&self) -> Option { + self.try_to(OpaqueDigestItemId::PreRuntime(&AURA_ENGINE_ID)) + } } diff --git a/client/consensus/aura/src/lib.rs b/client/consensus/aura/src/lib.rs index 56674546d3..f31431dee0 100644 --- a/client/consensus/aura/src/lib.rs +++ b/client/consensus/aura/src/lib.rs @@ -29,57 +29,61 @@ //! NOTE: Aura itself is designed to be generic over the crypto used. #![forbid(missing_docs, unsafe_code)] use std::{ - sync::Arc, time::Duration, thread, marker::PhantomData, hash::Hash, fmt::Debug, pin::Pin, - collections::HashMap + collections::HashMap, fmt::Debug, hash::Hash, marker::PhantomData, pin::Pin, sync::Arc, thread, + time::Duration, }; use futures::prelude::*; -use parking_lot::Mutex; use log::{debug, info, trace}; +use parking_lot::Mutex; -use codec::{Encode, Decode, Codec}; +use codec::{Codec, Decode, Encode}; -use sp_consensus::{ - self, BlockImport, Environment, Proposer, CanAuthorWith, ForkChoiceStrategy, BlockImportParams, - BlockOrigin, Error as ConsensusError, SelectChain, SlotData, BlockCheckParams, ImportResult +use sc_client::BlockOf; +use sc_client_api::backend::AuxStore; +use sp_api::ProvideRuntimeApi; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{ + self, + well_known_cache_keys::{self, Id as CacheKeyId}, + HeaderBackend, ProvideCache, Result as CResult, }; use sp_consensus::import_queue::{ - Verifier, BasicQueue, BoxJustificationImport, BoxFinalityProofImport, + BasicQueue, BoxFinalityProofImport, BoxJustificationImport, Verifier, }; -use sc_client_api::backend::AuxStore; -use sc_client::BlockOf; -use sp_blockchain::{ - self, Result as CResult, well_known_cache_keys::{self, Id as CacheKeyId}, - ProvideCache, HeaderBackend, +use sp_consensus::{ + self, BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, + Environment, Error as ConsensusError, ForkChoiceStrategy, ImportResult, Proposer, SelectChain, + SlotData, +}; +use sp_runtime::traits::{Block as BlockT, DigestItemFor, Header, Member, Zero}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + Justification, }; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_runtime::{generic::{BlockId, OpaqueDigestItemId}, Justification}; -use sp_runtime::traits::{Block as BlockT, Header, DigestItemFor, Zero, Member}; -use sp_api::ProvideRuntimeApi; +use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_TRACE}; use sp_core::crypto::Pair; -use sp_inherents::{InherentDataProviders, InherentData}; +use sp_inherents::{InherentData, InherentDataProviders}; use sp_timestamp::{ - TimestampInherentData, InherentType as TimestampInherent, InherentError as TIError + InherentError as TIError, InherentType as TimestampInherent, TimestampInherentData, }; -use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sc_consensus_slots::{ - CheckedHeader, SlotWorker, SlotInfo, SlotCompatible, StorageChanges, check_equivocation, + check_equivocation, CheckedHeader, SlotCompatible, SlotInfo, SlotWorker, StorageChanges, }; use sc_keystore::KeyStorePtr; use sp_api::ApiExt; +pub use digests::CompatibleDigestItem; +pub use sp_consensus::SyncOracle; pub use sp_consensus_aura::{ - ConsensusLog, AuraApi, AURA_ENGINE_ID, - inherents::{ - InherentType as AuraInherent, - AuraInherentData, INHERENT_IDENTIFIER, InherentDataProvider, - }, + inherents::{ + AuraInherentData, InherentDataProvider, InherentType as AuraInherent, INHERENT_IDENTIFIER, + }, + AuraApi, ConsensusLog, AURA_ENGINE_ID, }; -pub use sp_consensus::SyncOracle; -pub use digests::CompatibleDigestItem; mod digests; @@ -89,315 +93,340 @@ type AuthorityId

=

::Public; pub type SlotDuration = sc_consensus_slots::SlotDuration; /// Get type of `SlotDuration` for Aura. -pub fn slot_duration(client: &C) -> CResult where - A: Codec, - B: BlockT, - C: AuxStore + ProvideRuntimeApi, - C::Api: AuraApi, +pub fn slot_duration(client: &C) -> CResult +where + A: Codec, + B: BlockT, + C: AuxStore + ProvideRuntimeApi, + C::Api: AuraApi, { - SlotDuration::get_or_compute(client, |a, b| a.slot_duration(b)) + SlotDuration::get_or_compute(client, |a, b| a.slot_duration(b)) } /// Get slot author for given block along with authorities. fn slot_author(slot_num: u64, authorities: &[AuthorityId

]) -> Option<&AuthorityId

> { - if authorities.is_empty() { return None } - - let idx = slot_num % (authorities.len() as u64); - assert!( - idx <= usize::max_value() as u64, - "It is impossible to have a vector with length beyond the address space; qed", - ); - - let current_author = authorities.get(idx as usize) - .expect("authorities not empty; index constrained to list length;\ - this is a valid index; qed"); - - Some(current_author) + if authorities.is_empty() { + return None; + } + + let idx = slot_num % (authorities.len() as u64); + assert!( + idx <= usize::max_value() as u64, + "It is impossible to have a vector with length beyond the address space; qed", + ); + + let current_author = authorities.get(idx as usize).expect( + "authorities not empty; index constrained to list length;\ + this is a valid index; qed", + ); + + Some(current_author) } #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)] struct AuraSlotCompatible; impl SlotCompatible for AuraSlotCompatible { - fn extract_timestamp_and_slot( - &self, - data: &InherentData - ) -> Result<(TimestampInherent, AuraInherent, std::time::Duration), sp_consensus::Error> { - data.timestamp_inherent_data() - .and_then(|t| data.aura_inherent_data().map(|a| (t, a))) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (x, y, Default::default())) - } + fn extract_timestamp_and_slot( + &self, + data: &InherentData, + ) -> Result<(TimestampInherent, AuraInherent, std::time::Duration), sp_consensus::Error> { + data.timestamp_inherent_data() + .and_then(|t| data.aura_inherent_data().map(|a| (t, a))) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + .map(|(x, y)| (x, y, Default::default())) + } } /// Start the aura worker. The returned future should be run in a futures executor. pub fn start_aura( - slot_duration: SlotDuration, - client: Arc, - select_chain: SC, - block_import: I, - env: E, - sync_oracle: SO, - inherent_data_providers: InherentDataProviders, - force_authoring: bool, - keystore: KeyStorePtr, - can_author_with: CAW, -) -> Result, sp_consensus::Error> where - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + Send + Sync, - C::Api: AuraApi>, - SC: SelectChain, - E: Environment + Send + Sync + 'static, - E::Proposer: Proposer>, - P: Pair + Send + Sync, - P::Public: Hash + Member + Encode + Decode, - P::Signature: Hash + Member + Encode + Decode, - I: BlockImport> + Send + Sync + 'static, - Error: std::error::Error + Send + From + 'static, - SO: SyncOracle + Send + Sync + Clone, - CAW: CanAuthorWith + Send, + slot_duration: SlotDuration, + client: Arc, + select_chain: SC, + block_import: I, + env: E, + sync_oracle: SO, + inherent_data_providers: InherentDataProviders, + force_authoring: bool, + keystore: KeyStorePtr, + can_author_with: CAW, +) -> Result, sp_consensus::Error> +where + B: BlockT, + C: ProvideRuntimeApi + BlockOf + ProvideCache + AuxStore + Send + Sync, + C::Api: AuraApi>, + SC: SelectChain, + E: Environment + Send + Sync + 'static, + E::Proposer: Proposer>, + P: Pair + Send + Sync, + P::Public: Hash + Member + Encode + Decode, + P::Signature: Hash + Member + Encode + Decode, + I: BlockImport> + Send + Sync + 'static, + Error: std::error::Error + Send + From + 'static, + SO: SyncOracle + Send + Sync + Clone, + CAW: CanAuthorWith + Send, { - let worker = AuraWorker { - client: client.clone(), - block_import: Arc::new(Mutex::new(block_import)), - env, - keystore, - sync_oracle: sync_oracle.clone(), - force_authoring, - _key_type: PhantomData::

, - }; - register_aura_inherent_data_provider( - &inherent_data_providers, - slot_duration.slot_duration() - )?; - Ok(sc_consensus_slots::start_slot_worker::<_, _, _, _, _, AuraSlotCompatible, _>( - slot_duration, - select_chain, - worker, - sync_oracle, - inherent_data_providers, - AuraSlotCompatible, - can_author_with, - )) + let worker = AuraWorker { + client: client.clone(), + block_import: Arc::new(Mutex::new(block_import)), + env, + keystore, + sync_oracle: sync_oracle.clone(), + force_authoring, + _key_type: PhantomData::

, + }; + register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.slot_duration())?; + Ok(sc_consensus_slots::start_slot_worker::< + _, + _, + _, + _, + _, + AuraSlotCompatible, + _, + >( + slot_duration, + select_chain, + worker, + sync_oracle, + inherent_data_providers, + AuraSlotCompatible, + can_author_with, + )) } struct AuraWorker { - client: Arc, - block_import: Arc>, - env: E, - keystore: KeyStorePtr, - sync_oracle: SO, - force_authoring: bool, - _key_type: PhantomData

, + client: Arc, + block_import: Arc>, + env: E, + keystore: KeyStorePtr, + sync_oracle: SO, + force_authoring: bool, + _key_type: PhantomData

, } -impl sc_consensus_slots::SimpleSlotWorker for AuraWorker where - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync, - C::Api: AuraApi>, - E: Environment, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - P: Pair + Send + Sync, - P::Public: Member + Encode + Decode + Hash, - P::Signature: Member + Encode + Decode + Hash + Debug, - SO: SyncOracle + Send + Clone, - Error: std::error::Error + Send + From + 'static, +impl sc_consensus_slots::SimpleSlotWorker + for AuraWorker +where + B: BlockT, + C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync, + C::Api: AuraApi>, + E: Environment, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + P: Pair + Send + Sync, + P::Public: Member + Encode + Decode + Hash, + P::Signature: Member + Encode + Decode + Hash + Debug, + SO: SyncOracle + Send + Clone, + Error: std::error::Error + Send + From + 'static, { - type BlockImport = I; - type SyncOracle = SO; - type CreateProposer = Pin> + Send + 'static - >>; - type Proposer = E::Proposer; - type Claim = P; - type EpochData = Vec>; - - fn logging_target(&self) -> &'static str { - "aura" - } - - fn block_import(&self) -> Arc> { - self.block_import.clone() - } - - fn epoch_data( - &self, - header: &B::Header, - _slot_number: u64, - ) -> Result { - authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) - } - - fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { - Some(epoch_data.len()) - } - - fn claim_slot( - &self, - _header: &B::Header, - slot_number: u64, - epoch_data: &Self::EpochData, - ) -> Option { - let expected_author = slot_author::

(slot_number, epoch_data); - - expected_author.and_then(|p| { - self.keystore.read() - .key_pair_by_type::

(&p, sp_application_crypto::key_types::AURA).ok() - }) - } - - fn pre_digest_data( - &self, - slot_number: u64, - _claim: &Self::Claim, - ) -> Vec> { - vec![ - as CompatibleDigestItem

>::aura_pre_digest(slot_number), - ] - } - - fn block_import_params(&self) -> Box, - StorageChanges, B>, - Self::Claim, - Self::EpochData, - ) -> sp_consensus::BlockImportParams> + Send> { - Box::new(|header, header_hash, body, storage_changes, pair, _epoch| { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let signature = pair.sign(header_hash.as_ref()); - let signature_digest_item = as CompatibleDigestItem

>::aura_seal(signature); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(signature_digest_item); - import_block.body = Some(body); - import_block.storage_changes = Some(storage_changes); - import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - import_block - }) - } - - fn force_authoring(&self) -> bool { - self.force_authoring - } - - fn sync_oracle(&mut self) -> &mut Self::SyncOracle { - &mut self.sync_oracle - } - - fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin(self.env.init(block).map_err(|e| { - sp_consensus::Error::ClientImport(format!("{:?}", e)).into() - })) - } - - fn proposing_remaining_duration( - &self, - head: &B::Header, - slot_info: &SlotInfo - ) -> Option { - // never give more than 20 times more lenience. - const BACKOFF_CAP: u64 = 20; - - let slot_remaining = self.slot_remaining_duration(slot_info); - let parent_slot = match find_pre_digest::(head) { - Err(_) => return Some(slot_remaining), - Ok(d) => d, - }; - - // we allow a lenience of the number of slots since the head of the - // chain was produced, minus 1 (since there is always a difference of at least 1) - // - // linear back-off. - // in normal cases we only attempt to issue blocks up to the end of the slot. - // when the chain has been stalled for a few slots, we give more lenience. - let slot_lenience = slot_info.number.saturating_sub(parent_slot + 1); - let slot_lenience = std::cmp::min(slot_lenience, BACKOFF_CAP); - let slot_lenience = Duration::from_secs(slot_lenience * slot_info.duration); - Some(slot_lenience + slot_remaining) - } + type BlockImport = I; + type SyncOracle = SO; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type Claim = P; + type EpochData = Vec>; + + fn logging_target(&self) -> &'static str { + "aura" + } + + fn block_import(&self) -> Arc> { + self.block_import.clone() + } + + fn epoch_data( + &self, + header: &B::Header, + _slot_number: u64, + ) -> Result { + authorities(self.client.as_ref(), &BlockId::Hash(header.hash())) + } + + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option { + Some(epoch_data.len()) + } + + fn claim_slot( + &self, + _header: &B::Header, + slot_number: u64, + epoch_data: &Self::EpochData, + ) -> Option { + let expected_author = slot_author::

(slot_number, epoch_data); + + expected_author.and_then(|p| { + self.keystore + .read() + .key_pair_by_type::

(&p, sp_application_crypto::key_types::AURA) + .ok() + }) + } + + fn pre_digest_data( + &self, + slot_number: u64, + _claim: &Self::Claim, + ) -> Vec> { + vec![ as CompatibleDigestItem

>::aura_pre_digest(slot_number)] + } + + fn block_import_params( + &self, + ) -> Box< + dyn Fn( + B::Header, + &B::Hash, + Vec, + StorageChanges, B>, + Self::Claim, + Self::EpochData, + ) -> sp_consensus::BlockImportParams> + + Send, + > { + Box::new(|header, header_hash, body, storage_changes, pair, _epoch| { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let signature = pair.sign(header_hash.as_ref()); + let signature_digest_item = + as CompatibleDigestItem

>::aura_seal(signature); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(signature_digest_item); + import_block.body = Some(body); + import_block.storage_changes = Some(storage_changes); + import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + import_block + }) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)).into()), + ) + } + + fn proposing_remaining_duration( + &self, + head: &B::Header, + slot_info: &SlotInfo, + ) -> Option { + // never give more than 20 times more lenience. + const BACKOFF_CAP: u64 = 20; + + let slot_remaining = self.slot_remaining_duration(slot_info); + let parent_slot = match find_pre_digest::(head) { + Err(_) => return Some(slot_remaining), + Ok(d) => d, + }; + + // we allow a lenience of the number of slots since the head of the + // chain was produced, minus 1 (since there is always a difference of at least 1) + // + // linear back-off. + // in normal cases we only attempt to issue blocks up to the end of the slot. + // when the chain has been stalled for a few slots, we give more lenience. + let slot_lenience = slot_info.number.saturating_sub(parent_slot + 1); + let slot_lenience = std::cmp::min(slot_lenience, BACKOFF_CAP); + let slot_lenience = Duration::from_secs(slot_lenience * slot_info.duration); + Some(slot_lenience + slot_remaining) + } } -impl SlotWorker for AuraWorker where - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync + Send, - C::Api: AuraApi>, - E: Environment + Send + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - P: Pair + Send + Sync, - P::Public: Member + Encode + Decode + Hash, - P::Signature: Member + Encode + Decode + Hash + Debug, - SO: SyncOracle + Send + Sync + Clone, - Error: std::error::Error + Send + From + 'static, +impl SlotWorker for AuraWorker +where + B: BlockT, + C: ProvideRuntimeApi + BlockOf + ProvideCache + Sync + Send, + C::Api: AuraApi>, + E: Environment + Send + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + P: Pair + Send + Sync, + P::Public: Member + Encode + Decode + Hash, + P::Signature: Member + Encode + Decode + Hash + Debug, + SO: SyncOracle + Send + Sync + Clone, + Error: std::error::Error + Send + From + 'static, { - type OnSlot = Pin> + Send>>; + type OnSlot = Pin> + Send>>; - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { - >::on_slot(self, chain_head, slot_info) - } + fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { + >::on_slot(self, chain_head, slot_info) + } } fn aura_err(error: Error) -> Error { - debug!(target: "aura", "{}", error); - error + debug!(target: "aura", "{}", error); + error } #[derive(derive_more::Display, Debug)] enum Error { - #[display(fmt = "Multiple Aura pre-runtime headers")] - MultipleHeaders, - #[display(fmt = "No Aura pre-runtime digest found")] - NoDigestFound, - #[display(fmt = "Header {:?} is unsealed", _0)] - HeaderUnsealed(B::Hash), - #[display(fmt = "Header {:?} has a bad seal", _0)] - HeaderBadSeal(B::Hash), - #[display(fmt = "Slot Author not found")] - SlotAuthorNotFound, - #[display(fmt = "Bad signature on {:?}", _0)] - BadSignature(B::Hash), - #[display(fmt = "Rejecting block too far in future")] - TooFarInFuture, - Client(sp_blockchain::Error), - DataProvider(String), - Runtime(String), - #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] - SlotNumberMustIncrease(u64, u64), - #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] - ParentUnavailable(B::Hash, B::Hash), + #[display(fmt = "Multiple Aura pre-runtime headers")] + MultipleHeaders, + #[display(fmt = "No Aura pre-runtime digest found")] + NoDigestFound, + #[display(fmt = "Header {:?} is unsealed", _0)] + HeaderUnsealed(B::Hash), + #[display(fmt = "Header {:?} has a bad seal", _0)] + HeaderBadSeal(B::Hash), + #[display(fmt = "Slot Author not found")] + SlotAuthorNotFound, + #[display(fmt = "Bad signature on {:?}", _0)] + BadSignature(B::Hash), + #[display(fmt = "Rejecting block too far in future")] + TooFarInFuture, + Client(sp_blockchain::Error), + DataProvider(String), + Runtime(String), + #[display( + fmt = "Slot number must increase: parent slot: {}, this slot: {}", + _0, + _1 + )] + SlotNumberMustIncrease(u64, u64), + #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] + ParentUnavailable(B::Hash, B::Hash), } impl std::convert::From> for String { - fn from(error: Error) -> String { - error.to_string() - } + fn from(error: Error) -> String { + error.to_string() + } } fn find_pre_digest(header: &B::Header) -> Result> - where DigestItemFor: CompatibleDigestItem

, - P::Signature: Decode, - P::Public: Encode + Decode + PartialEq + Clone, +where + DigestItemFor: CompatibleDigestItem

, + P::Signature: Decode, + P::Public: Encode + Decode + PartialEq + Clone, { - if header.number().is_zero() { - return Ok(0); - } - - let mut pre_digest: Option = None; - for log in header.digest().logs() { - trace!(target: "aura", "Checking log {:?}", log); - match (log.as_aura_pre_digest(), pre_digest.is_some()) { - (Some(_), true) => Err(aura_err(Error::MultipleHeaders))?, - (None, _) => trace!(target: "aura", "Ignoring digest not meant for us"), - (s, false) => pre_digest = s, - } - } - pre_digest.ok_or_else(|| aura_err(Error::NoDigestFound)) + if header.number().is_zero() { + return Ok(0); + } + + let mut pre_digest: Option = None; + for log in header.digest().logs() { + trace!(target: "aura", "Checking log {:?}", log); + match (log.as_aura_pre_digest(), pre_digest.is_some()) { + (Some(_), true) => Err(aura_err(Error::MultipleHeaders))?, + (None, _) => trace!(target: "aura", "Ignoring digest not meant for us"), + (s, false) => pre_digest = s, + } + } + pre_digest.ok_or_else(|| aura_err(Error::NoDigestFound)) } /// check a header has been signed by the right key. If the slot is too far in the future, an error will be returned. @@ -406,284 +435,298 @@ fn find_pre_digest(header: &B::Header) -> Result( - client: &C, - slot_now: u64, - mut header: B::Header, - hash: B::Hash, - authorities: &[AuthorityId

], -) -> Result)>, Error> where - DigestItemFor: CompatibleDigestItem

, - P::Signature: Decode, - C: sc_client_api::backend::AuxStore, - P::Public: Encode + Decode + PartialEq + Clone, + client: &C, + slot_now: u64, + mut header: B::Header, + hash: B::Hash, + authorities: &[AuthorityId

], +) -> Result)>, Error> +where + DigestItemFor: CompatibleDigestItem

, + P::Signature: Decode, + C: sc_client_api::backend::AuxStore, + P::Public: Encode + Decode + PartialEq + Clone, { - let seal = match header.digest_mut().pop() { - Some(x) => x, - None => return Err(Error::HeaderUnsealed(hash)), - }; - - let sig = seal.as_aura_seal().ok_or_else(|| { - aura_err(Error::HeaderBadSeal(hash)) - })?; - - let slot_num = find_pre_digest::(&header)?; - - if slot_num > slot_now { - header.digest_mut().push(seal); - Ok(CheckedHeader::Deferred(header, slot_num)) - } else { - // check the signature is valid under the expected authority and - // chain state. - let expected_author = match slot_author::

(slot_num, &authorities) { - None => return Err(Error::SlotAuthorNotFound), - Some(author) => author, - }; - - let pre_hash = header.hash(); - - if P::verify(&sig, pre_hash.as_ref(), expected_author) { - if let Some(equivocation_proof) = check_equivocation( - client, - slot_now, - slot_num, - &header, - expected_author, - ).map_err(Error::Client)? { - info!( - "Slot author is equivocating at slot {} with headers {:?} and {:?}", - slot_num, - equivocation_proof.fst_header().hash(), - equivocation_proof.snd_header().hash(), - ); - } - - Ok(CheckedHeader::Checked(header, (slot_num, seal))) - } else { - Err(Error::BadSignature(hash)) - } - } + let seal = match header.digest_mut().pop() { + Some(x) => x, + None => return Err(Error::HeaderUnsealed(hash)), + }; + + let sig = seal + .as_aura_seal() + .ok_or_else(|| aura_err(Error::HeaderBadSeal(hash)))?; + + let slot_num = find_pre_digest::(&header)?; + + if slot_num > slot_now { + header.digest_mut().push(seal); + Ok(CheckedHeader::Deferred(header, slot_num)) + } else { + // check the signature is valid under the expected authority and + // chain state. + let expected_author = match slot_author::

(slot_num, &authorities) { + None => return Err(Error::SlotAuthorNotFound), + Some(author) => author, + }; + + let pre_hash = header.hash(); + + if P::verify(&sig, pre_hash.as_ref(), expected_author) { + if let Some(equivocation_proof) = + check_equivocation(client, slot_now, slot_num, &header, expected_author) + .map_err(Error::Client)? + { + info!( + "Slot author is equivocating at slot {} with headers {:?} and {:?}", + slot_num, + equivocation_proof.fst_header().hash(), + equivocation_proof.snd_header().hash(), + ); + } + + Ok(CheckedHeader::Checked(header, (slot_num, seal))) + } else { + Err(Error::BadSignature(hash)) + } + } } /// A verifier for Aura blocks. pub struct AuraVerifier { - client: Arc, - phantom: PhantomData

, - inherent_data_providers: sp_inherents::InherentDataProviders, + client: Arc, + phantom: PhantomData

, + inherent_data_providers: sp_inherents::InherentDataProviders, } impl AuraVerifier - where P: Send + Sync + 'static +where + P: Send + Sync + 'static, { - fn check_inherents( - &self, - block: B, - block_id: BlockId, - inherent_data: InherentData, - timestamp_now: u64, - ) -> Result<(), Error> - where C: ProvideRuntimeApi, C::Api: BlockBuilderApi - { - const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; - - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(Error::Client)?; - - if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { - Some(TIError::ValidAtTimestamp(timestamp)) => { - // halt import until timestamp is valid. - // reject when too far ahead. - if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { - return Err(Error::TooFarInFuture); - } - - let diff = timestamp.saturating_sub(timestamp_now); - info!( - target: "aura", - "halting for block {} seconds in the future", - diff - ); - telemetry!(CONSENSUS_INFO; "aura.halting_for_future_block"; - "diff" => ?diff - ); - thread::sleep(Duration::from_secs(diff)); - Ok(()) - }, - Some(TIError::Other(e)) => Err(Error::Runtime(e.into())), - None => Err(Error::DataProvider( - self.inherent_data_providers.error_to_string(&i, &e) - )), - }) - } else { - Ok(()) - } - } + fn check_inherents( + &self, + block: B, + block_id: BlockId, + inherent_data: InherentData, + timestamp_now: u64, + ) -> Result<(), Error> + where + C: ProvideRuntimeApi, + C::Api: BlockBuilderApi, + { + const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; + + let inherent_res = self + .client + .runtime_api() + .check_inherents(&block_id, block, inherent_data) + .map_err(Error::Client)?; + + if !inherent_res.ok() { + inherent_res + .into_errors() + .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { + Some(TIError::ValidAtTimestamp(timestamp)) => { + // halt import until timestamp is valid. + // reject when too far ahead. + if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { + return Err(Error::TooFarInFuture); + } + + let diff = timestamp.saturating_sub(timestamp_now); + info!( + target: "aura", + "halting for block {} seconds in the future", + diff + ); + telemetry!(CONSENSUS_INFO; "aura.halting_for_future_block"; + "diff" => ?diff + ); + thread::sleep(Duration::from_secs(diff)); + Ok(()) + } + Some(TIError::Other(e)) => Err(Error::Runtime(e.into())), + None => Err(Error::DataProvider( + self.inherent_data_providers.error_to_string(&i, &e), + )), + }) + } else { + Ok(()) + } + } } #[forbid(deprecated)] -impl Verifier for AuraVerifier where - C: ProvideRuntimeApi + - Send + - Sync + - sc_client_api::backend::AuxStore + - ProvideCache + - BlockOf, - C::Api: BlockBuilderApi + AuraApi> + ApiExt, - DigestItemFor: CompatibleDigestItem

, - P: Pair + Send + Sync + 'static, - P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, - P::Signature: Encode + Decode, +impl Verifier for AuraVerifier +where + C: ProvideRuntimeApi + + Send + + Sync + + sc_client_api::backend::AuxStore + + ProvideCache + + BlockOf, + C::Api: + BlockBuilderApi + AuraApi> + ApiExt, + DigestItemFor: CompatibleDigestItem

, + P: Pair + Send + Sync + 'static, + P::Public: Send + Sync + Hash + Eq + Clone + Decode + Encode + Debug + 'static, + P::Signature: Encode + Decode, { - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - let mut inherent_data = self.inherent_data_providers - .create_inherent_data() - .map_err(|e| e.into_string())?; - let (timestamp_now, slot_now, _) = AuraSlotCompatible.extract_timestamp_and_slot(&inherent_data) - .map_err(|e| format!("Could not extract timestamp and slot: {:?}", e))?; - let hash = header.hash(); - let parent_hash = *header.parent_hash(); - let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) - .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; - - // we add one to allow for some small drift. - // FIXME #1019 in the future, alter this queue to allow deferring of - // headers - let checked_header = check_header::( - &self.client, - slot_now + 1, - header, - hash, - &authorities[..], - ).map_err(|e| e.to_string())?; - match checked_header { - CheckedHeader::Checked(pre_header, (slot_num, seal)) => { - // if the body is passed through, we need to use the runtime - // to check that the internally-set timestamp in the inherents - // actually matches the slot set in the seal. - if let Some(inner_body) = body.take() { - inherent_data.aura_replace_inherent_data(slot_num); - let block = B::new(pre_header.clone(), inner_body); - - // skip the inherents verification if the runtime API is old. - if self.client - .runtime_api() - .has_api_with::, _>( - &BlockId::Hash(parent_hash), - |v| v >= 2, - ) - .map_err(|e| format!("{:?}", e))? - { - self.check_inherents( - block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - timestamp_now, - ).map_err(|e| e.to_string())?; - } - - let (_, inner_body) = block.deconstruct(); - body = Some(inner_body); - } - - trace!(target: "aura", "Checked {:?}; importing.", pre_header); - telemetry!(CONSENSUS_TRACE; "aura.checked_and_importing"; "pre_header" => ?pre_header); - - // Look for an authorities-change log. - let maybe_keys = pre_header.digest() - .logs() - .iter() - .filter_map(|l| l.try_to::>>( - OpaqueDigestItemId::Consensus(&AURA_ENGINE_ID) - )) - .find_map(|l| match l { - ConsensusLog::AuthoritiesChange(a) => Some( - vec![(well_known_cache_keys::AUTHORITIES, a.encode())] - ), - _ => None, - }); - - let mut import_block = BlockImportParams::new(origin, pre_header); - import_block.post_digests.push(seal); - import_block.body = body; - import_block.justification = justification; - import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); - import_block.post_hash = Some(hash); - - Ok((import_block, maybe_keys)) - } - CheckedHeader::Deferred(a, b) => { - debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!(CONSENSUS_DEBUG; "aura.header_too_far_in_future"; - "hash" => ?hash, "a" => ?a, "b" => ?b - ); - Err(format!("Header {:?} rejected: too far in the future", hash)) - } - } - } + fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + mut body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + let mut inherent_data = self + .inherent_data_providers + .create_inherent_data() + .map_err(|e| e.into_string())?; + let (timestamp_now, slot_now, _) = AuraSlotCompatible + .extract_timestamp_and_slot(&inherent_data) + .map_err(|e| format!("Could not extract timestamp and slot: {:?}", e))?; + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + let authorities = authorities(self.client.as_ref(), &BlockId::Hash(parent_hash)) + .map_err(|e| format!("Could not fetch authorities at {:?}: {:?}", parent_hash, e))?; + + // we add one to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of + // headers + let checked_header = + check_header::(&self.client, slot_now + 1, header, hash, &authorities[..]) + .map_err(|e| e.to_string())?; + match checked_header { + CheckedHeader::Checked(pre_header, (slot_num, seal)) => { + // if the body is passed through, we need to use the runtime + // to check that the internally-set timestamp in the inherents + // actually matches the slot set in the seal. + if let Some(inner_body) = body.take() { + inherent_data.aura_replace_inherent_data(slot_num); + let block = B::new(pre_header.clone(), inner_body); + + // skip the inherents verification if the runtime API is old. + if self + .client + .runtime_api() + .has_api_with::, _>( + &BlockId::Hash(parent_hash), + |v| v >= 2, + ) + .map_err(|e| format!("{:?}", e))? + { + self.check_inherents( + block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + timestamp_now, + ) + .map_err(|e| e.to_string())?; + } + + let (_, inner_body) = block.deconstruct(); + body = Some(inner_body); + } + + trace!(target: "aura", "Checked {:?}; importing.", pre_header); + telemetry!(CONSENSUS_TRACE; "aura.checked_and_importing"; "pre_header" => ?pre_header); + + // Look for an authorities-change log. + let maybe_keys = pre_header + .digest() + .logs() + .iter() + .filter_map(|l| { + l.try_to::>>(OpaqueDigestItemId::Consensus( + &AURA_ENGINE_ID, + )) + }) + .find_map(|l| match l { + ConsensusLog::AuthoritiesChange(a) => { + Some(vec![(well_known_cache_keys::AUTHORITIES, a.encode())]) + } + _ => None, + }); + + let mut import_block = BlockImportParams::new(origin, pre_header); + import_block.post_digests.push(seal); + import_block.body = body; + import_block.justification = justification; + import_block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + import_block.post_hash = Some(hash); + + Ok((import_block, maybe_keys)) + } + CheckedHeader::Deferred(a, b) => { + debug!(target: "aura", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!(CONSENSUS_DEBUG; "aura.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(format!("Header {:?} rejected: too far in the future", hash)) + } + } + } } -fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> where - A: Codec, - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache, - C::Api: AuraApi, +fn initialize_authorities_cache(client: &C) -> Result<(), ConsensusError> +where + A: Codec, + B: BlockT, + C: ProvideRuntimeApi + BlockOf + ProvideCache, + C::Api: AuraApi, { - // no cache => no initialization - let cache = match client.cache() { - Some(cache) => cache, - None => return Ok(()), - }; - - // check if we already have initialized the cache - let map_err = |error| sp_consensus::Error::from(sp_consensus::Error::ClientImport( - format!( - "Error initializing authorities cache: {}", - error, - ))); - - let genesis_id = BlockId::Number(Zero::zero()); - let genesis_authorities: Option> = cache - .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) - .unwrap_or(None) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); - if genesis_authorities.is_some() { - return Ok(()); - } - - let genesis_authorities = authorities(client, &genesis_id)?; - cache.initialize(&well_known_cache_keys::AUTHORITIES, genesis_authorities.encode()) - .map_err(map_err)?; - - Ok(()) + // no cache => no initialization + let cache = match client.cache() { + Some(cache) => cache, + None => return Ok(()), + }; + + // check if we already have initialized the cache + let map_err = |error| { + sp_consensus::Error::from(sp_consensus::Error::ClientImport(format!( + "Error initializing authorities cache: {}", + error, + ))) + }; + + let genesis_id = BlockId::Number(Zero::zero()); + let genesis_authorities: Option> = cache + .get_at(&well_known_cache_keys::AUTHORITIES, &genesis_id) + .unwrap_or(None) + .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()); + if genesis_authorities.is_some() { + return Ok(()); + } + + let genesis_authorities = authorities(client, &genesis_id)?; + cache + .initialize( + &well_known_cache_keys::AUTHORITIES, + genesis_authorities.encode(), + ) + .map_err(map_err)?; + + Ok(()) } #[allow(deprecated)] -fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> where - A: Codec, - B: BlockT, - C: ProvideRuntimeApi + BlockOf + ProvideCache, - C::Api: AuraApi, +fn authorities(client: &C, at: &BlockId) -> Result, ConsensusError> +where + A: Codec, + B: BlockT, + C: ProvideRuntimeApi + BlockOf + ProvideCache, + C::Api: AuraApi, { - client - .cache() - .and_then(|cache| cache - .get_at(&well_known_cache_keys::AUTHORITIES, at) - .unwrap_or(None) - .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()) - ) - .or_else(|| AuraApi::authorities(&*client.runtime_api(), at).ok()) - .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet.into()) + client + .cache() + .and_then(|cache| { + cache + .get_at(&well_known_cache_keys::AUTHORITIES, at) + .unwrap_or(None) + .and_then(|(_, _, v)| Decode::decode(&mut &v[..]).ok()) + }) + .or_else(|| AuraApi::authorities(&*client.runtime_api(), at).ok()) + .ok_or_else(|| sp_consensus::Error::InvalidAuthoritiesSet.into()) } /// The Aura import queue type. @@ -691,330 +734,365 @@ pub type AuraImportQueue = BasicQueue; /// Register the aura inherent data provider, if not registered already. fn register_aura_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, - slot_duration: u64, + inherent_data_providers: &InherentDataProviders, + slot_duration: u64, ) -> Result<(), sp_consensus::Error> { - if !inherent_data_providers.has_provider(&INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(InherentDataProvider::new(slot_duration)) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) - } + if !inherent_data_providers.has_provider(&INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(InherentDataProvider::new(slot_duration)) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + } else { + Ok(()) + } } /// A block-import handler for Aura. pub struct AuraBlockImport, P> { - inner: I, - client: Arc, - _phantom: PhantomData<(Block, P)>, + inner: I, + client: Arc, + _phantom: PhantomData<(Block, P)>, } impl, P> Clone for AuraBlockImport { - fn clone(&self) -> Self { - AuraBlockImport { - inner: self.inner.clone(), - client: self.client.clone(), - _phantom: PhantomData, - } - } + fn clone(&self) -> Self { + AuraBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + _phantom: PhantomData, + } + } } impl, P> AuraBlockImport { - /// New aura block import. - pub fn new( - inner: I, - client: Arc, - ) -> Self { - Self { - inner, - client, - _phantom: PhantomData, - } - } + /// New aura block import. + pub fn new(inner: I, client: Arc) -> Self { + Self { + inner, + client, + _phantom: PhantomData, + } + } } -impl BlockImport for AuraBlockImport where - I: BlockImport> + Send + Sync, - I::Error: Into, - C: HeaderBackend + ProvideRuntimeApi, - P: Pair + Send + Sync + 'static, - P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, - P::Signature: Encode + Decode, +impl BlockImport for AuraBlockImport +where + I: BlockImport> + Send + Sync, + I::Error: Into, + C: HeaderBackend + ProvideRuntimeApi, + P: Pair + Send + Sync + 'static, + P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, + P::Signature: Encode + Decode, { - type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).map_err(Into::into) - } - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - let hash = block.post_hash(); - let slot_number = find_pre_digest::(&block.header) - .expect("valid Aura headers must contain a predigest; \ - header has been already verified; qed"); - - let parent_hash = *block.header.parent_hash(); - let parent_header = self.client.header(BlockId::Hash(parent_hash)) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| ConsensusError::ChainLookup(aura_err( - Error::::ParentUnavailable(parent_hash, hash) - ).into()))?; - - let parent_slot = find_pre_digest::(&parent_header) - .expect("valid Aura headers contain a pre-digest; \ - parent header has already been verified; qed"); - - // make sure that slot number is strictly increasing - if slot_number <= parent_slot { - return Err( - ConsensusError::ClientImport(aura_err( - Error::::SlotNumberMustIncrease(parent_slot, slot_number) - ).into()) - ); - } - - self.inner.import_block(block, new_cache).map_err(Into::into) - } + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + fn check_block(&mut self, block: BlockCheckParams) -> Result { + self.inner.check_block(block).map_err(Into::into) + } + + fn import_block( + &mut self, + block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let slot_number = find_pre_digest::(&block.header).expect( + "valid Aura headers must contain a predigest; \ + header has been already verified; qed", + ); + + let parent_hash = *block.header.parent_hash(); + let parent_header = self + .client + .header(BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + aura_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + + let parent_slot = find_pre_digest::(&parent_header).expect( + "valid Aura headers contain a pre-digest; \ + parent header has already been verified; qed", + ); + + // make sure that slot number is strictly increasing + if slot_number <= parent_slot { + return Err(ConsensusError::ClientImport( + aura_err(Error::::SlotNumberMustIncrease( + parent_slot, + slot_number, + )) + .into(), + )); + } + + self.inner + .import_block(block, new_cache) + .map_err(Into::into) + } } /// Start an import queue for the Aura consensus algorithm. pub fn import_queue( - slot_duration: SlotDuration, - block_import: I, - justification_import: Option>, - finality_proof_import: Option>, - client: Arc, - inherent_data_providers: InherentDataProviders, -) -> Result>, sp_consensus::Error> where - B: BlockT, - C::Api: BlockBuilderApi + AuraApi> + ApiExt, - C: 'static + ProvideRuntimeApi + BlockOf + ProvideCache + Send + Sync + AuxStore + HeaderBackend, - I: BlockImport> + Send + Sync + 'static, - DigestItemFor: CompatibleDigestItem

, - P: Pair + Send + Sync + 'static, - P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, - P::Signature: Encode + Decode, + slot_duration: SlotDuration, + block_import: I, + justification_import: Option>, + finality_proof_import: Option>, + client: Arc, + inherent_data_providers: InherentDataProviders, +) -> Result>, sp_consensus::Error> +where + B: BlockT, + C::Api: + BlockBuilderApi + AuraApi> + ApiExt, + C: 'static + + ProvideRuntimeApi + + BlockOf + + ProvideCache + + Send + + Sync + + AuxStore + + HeaderBackend, + I: BlockImport> + + Send + + Sync + + 'static, + DigestItemFor: CompatibleDigestItem

, + P: Pair + Send + Sync + 'static, + P::Public: Clone + Eq + Send + Sync + Hash + Debug + Encode + Decode, + P::Signature: Encode + Decode, { - register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; - initialize_authorities_cache(&*client)?; - - let verifier = AuraVerifier { - client: client.clone(), - inherent_data_providers, - phantom: PhantomData, - }; - Ok(BasicQueue::new( - verifier, - Box::new(block_import), - justification_import, - finality_proof_import, - )) + register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get())?; + initialize_authorities_cache(&*client)?; + + let verifier = AuraVerifier { + client: client.clone(), + inherent_data_providers, + phantom: PhantomData, + }; + Ok(BasicQueue::new( + verifier, + Box::new(block_import), + justification_import, + finality_proof_import, + )) } #[cfg(test)] mod tests { - use super::*; - use sp_consensus::{NoNetwork as DummyOracle, Proposal, RecordProof}; - use sc_network_test::{Block as TestBlock, *}; - use sp_runtime::traits::{Block as BlockT, DigestFor}; - use sc_network::config::ProtocolConfig; - use parking_lot::Mutex; - use sp_keyring::sr25519::Keyring; - use sc_client::BlockchainEvents; - use sp_consensus_aura::sr25519::AuthorityPair; - use std::task::Poll; - use sc_block_builder::BlockBuilderProvider; - - type Error = sp_blockchain::Error; - - type TestClient = sc_client::Client< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, - TestBlock, - substrate_test_runtime_client::runtime::RuntimeApi - >; - - struct DummyFactory(Arc); - struct DummyProposer(u64, Arc); - - impl Environment for DummyFactory { - type Proposer = DummyProposer; - type CreateProposer = futures::future::Ready>; - type Error = Error; - - fn init(&mut self, parent_header: &::Header) - -> Self::CreateProposer - { - futures::future::ready(Ok(DummyProposer(parent_header.number + 1, self.0.clone()))) - } - } - - impl Proposer for DummyProposer { - type Error = Error; - type Transaction = sc_client_api::TransactionFor< - substrate_test_runtime_client::Backend, - TestBlock - >; - type Proposal = future::Ready, Error>>; - - fn propose( - &mut self, - _: InherentData, - digests: DigestFor, - _: Duration, - _: RecordProof, - ) -> Self::Proposal { - let r = self.1.new_block(digests).unwrap().build().map_err(|e| e.into()); - - future::ready(r.map(|b| Proposal { - block: b.block, - proof: b.proof, - storage_changes: b.storage_changes, - })) - } - } - - const SLOT_DURATION: u64 = 1000; - - pub struct AuraTestNet { - peers: Vec>, - } - - impl TestNetFactory for AuraTestNet { - type Verifier = AuraVerifier; - type PeerData = (); - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - AuraTestNet { - peers: Vec::new(), - } - } - - fn make_verifier(&self, client: PeersClient, _cfg: &ProtocolConfig, _peer_data: &()) - -> Self::Verifier - { - match client { - PeersClient::Full(client, _) => { - let slot_duration = slot_duration(&*client).expect("slot duration available"); - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, - slot_duration.get() - ).expect("Registers aura inherent data provider"); - - assert_eq!(slot_duration.get(), SLOT_DURATION); - AuraVerifier { - client, - inherent_data_providers, - phantom: Default::default(), - } - }, - PeersClient::Light(_, _) => unreachable!("No (yet) tests for light client + Aura"), - } - } - - fn peer(&mut self, i: usize) -> &mut Peer { - &mut self.peers[i] - } - - fn peers(&self) -> &Vec> { - &self.peers - } - - fn mut_peers>)>(&mut self, closure: F) { - closure(&mut self.peers); - } - } - - #[test] - #[allow(deprecated)] - fn authoring_blocks() { - let _ = env_logger::try_init(); - let net = AuraTestNet::new(3); - - let peers = &[ - (0, Keyring::Alice), - (1, Keyring::Bob), - (2, Keyring::Charlie), - ]; - - let net = Arc::new(Mutex::new(net)); - let mut import_notifications = Vec::new(); - let mut aura_futures = Vec::new(); - - let mut keystore_paths = Vec::new(); - for (peer_id, key) in peers { - let mut net = net.lock(); - let peer = net.peer(*peer_id); - let client = peer.client().as_full().expect("full clients are created").clone(); - let select_chain = peer.select_chain().expect("full client has a select chain"); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore."); - - keystore.write().insert_ephemeral_from_seed::(&key.to_seed()) - .expect("Creates authority key"); - keystore_paths.push(keystore_path); - - let environ = DummyFactory(client.clone()); - import_notifications.push( - client.import_notification_stream() - .take_while(|n| future::ready(!(n.origin != BlockOrigin::Own && n.header.number() < &5))) - .for_each(move |_| future::ready(())) - ); - - let slot_duration = slot_duration(&*client).expect("slot duration available"); - - let inherent_data_providers = InherentDataProviders::new(); - register_aura_inherent_data_provider( - &inherent_data_providers, slot_duration.get() - ).expect("Registers aura inherent data provider"); - - aura_futures.push(start_aura::<_, _, _, _, _, AuthorityPair, _, _, _>( - slot_duration, - client.clone(), - select_chain, - client, - environ, - DummyOracle, - inherent_data_providers, - false, - keystore, - sp_consensus::AlwaysCanAuthor, - ).expect("Starts aura")); - } - - futures::executor::block_on(future::select( - future::poll_fn(move |cx| { - net.lock().poll(cx); - Poll::<()>::Pending - }), - future::select( - future::join_all(aura_futures), - future::join_all(import_notifications) - ) - )); - } - - #[test] - fn authorities_call_works() { - let client = substrate_test_runtime_client::new(); - - assert_eq!(client.chain_info().best_number, 0); - assert_eq!(authorities(&client, &BlockId::Number(0)).unwrap(), vec![ - Keyring::Alice.public().into(), - Keyring::Bob.public().into(), - Keyring::Charlie.public().into() - ]); - } + use super::*; + use parking_lot::Mutex; + use sc_block_builder::BlockBuilderProvider; + use sc_client::BlockchainEvents; + use sc_network::config::ProtocolConfig; + use sc_network_test::{Block as TestBlock, *}; + use sp_consensus::{NoNetwork as DummyOracle, Proposal, RecordProof}; + use sp_consensus_aura::sr25519::AuthorityPair; + use sp_keyring::sr25519::Keyring; + use sp_runtime::traits::{Block as BlockT, DigestFor}; + use std::task::Poll; + + type Error = sp_blockchain::Error; + + type TestClient = sc_client::Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + TestBlock, + substrate_test_runtime_client::runtime::RuntimeApi, + >; + + struct DummyFactory(Arc); + struct DummyProposer(u64, Arc); + + impl Environment for DummyFactory { + type Proposer = DummyProposer; + type CreateProposer = futures::future::Ready>; + type Error = Error; + + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { + futures::future::ready(Ok(DummyProposer(parent_header.number + 1, self.0.clone()))) + } + } + + impl Proposer for DummyProposer { + type Error = Error; + type Transaction = + sc_client_api::TransactionFor; + type Proposal = future::Ready, Error>>; + + fn propose( + &mut self, + _: InherentData, + digests: DigestFor, + _: Duration, + _: RecordProof, + ) -> Self::Proposal { + let r = self + .1 + .new_block(digests) + .unwrap() + .build() + .map_err(|e| e.into()); + + future::ready(r.map(|b| Proposal { + block: b.block, + proof: b.proof, + storage_changes: b.storage_changes, + })) + } + } + + const SLOT_DURATION: u64 = 1000; + + pub struct AuraTestNet { + peers: Vec>, + } + + impl TestNetFactory for AuraTestNet { + type Verifier = AuraVerifier; + type PeerData = (); + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + AuraTestNet { peers: Vec::new() } + } + + fn make_verifier( + &self, + client: PeersClient, + _cfg: &ProtocolConfig, + _peer_data: &(), + ) -> Self::Verifier { + match client { + PeersClient::Full(client, _) => { + let slot_duration = slot_duration(&*client).expect("slot duration available"); + let inherent_data_providers = InherentDataProviders::new(); + register_aura_inherent_data_provider( + &inherent_data_providers, + slot_duration.get(), + ) + .expect("Registers aura inherent data provider"); + + assert_eq!(slot_duration.get(), SLOT_DURATION); + AuraVerifier { + client, + inherent_data_providers, + phantom: Default::default(), + } + } + PeersClient::Light(_, _) => unreachable!("No (yet) tests for light client + Aura"), + } + } + + fn peer(&mut self, i: usize) -> &mut Peer { + &mut self.peers[i] + } + + fn peers(&self) -> &Vec> { + &self.peers + } + + fn mut_peers>)>(&mut self, closure: F) { + closure(&mut self.peers); + } + } + + #[test] + #[allow(deprecated)] + fn authoring_blocks() { + let _ = env_logger::try_init(); + let net = AuraTestNet::new(3); + + let peers = &[ + (0, Keyring::Alice), + (1, Keyring::Bob), + (2, Keyring::Charlie), + ]; + + let net = Arc::new(Mutex::new(net)); + let mut import_notifications = Vec::new(); + let mut aura_futures = Vec::new(); + + let mut keystore_paths = Vec::new(); + for (peer_id, key) in peers { + let mut net = net.lock(); + let peer = net.peer(*peer_id); + let client = peer + .client() + .as_full() + .expect("full clients are created") + .clone(); + let select_chain = peer.select_chain().expect("full client has a select chain"); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = + sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore."); + + keystore + .write() + .insert_ephemeral_from_seed::(&key.to_seed()) + .expect("Creates authority key"); + keystore_paths.push(keystore_path); + + let environ = DummyFactory(client.clone()); + import_notifications.push( + client + .import_notification_stream() + .take_while(|n| { + future::ready(!(n.origin != BlockOrigin::Own && n.header.number() < &5)) + }) + .for_each(move |_| future::ready(())), + ); + + let slot_duration = slot_duration(&*client).expect("slot duration available"); + + let inherent_data_providers = InherentDataProviders::new(); + register_aura_inherent_data_provider(&inherent_data_providers, slot_duration.get()) + .expect("Registers aura inherent data provider"); + + aura_futures.push( + start_aura::<_, _, _, _, _, AuthorityPair, _, _, _>( + slot_duration, + client.clone(), + select_chain, + client, + environ, + DummyOracle, + inherent_data_providers, + false, + keystore, + sp_consensus::AlwaysCanAuthor, + ) + .expect("Starts aura"), + ); + } + + futures::executor::block_on(future::select( + future::poll_fn(move |cx| { + net.lock().poll(cx); + Poll::<()>::Pending + }), + future::select( + future::join_all(aura_futures), + future::join_all(import_notifications), + ), + )); + } + + #[test] + fn authorities_call_works() { + let client = substrate_test_runtime_client::new(); + + assert_eq!(client.chain_info().best_number, 0); + assert_eq!( + authorities(&client, &BlockId::Number(0)).unwrap(), + vec![ + Keyring::Alice.public().into(), + Keyring::Bob.public().into(), + Keyring::Charlie.public().into() + ] + ); + } } diff --git a/client/consensus/babe/rpc/src/lib.rs b/client/consensus/babe/rpc/src/lib.rs index cb78504b1f..fc1a9bbb5b 100644 --- a/client/consensus/babe/rpc/src/lib.rs +++ b/client/consensus/babe/rpc/src/lib.rs @@ -16,26 +16,19 @@ //! RPC api for babe. -use sc_consensus_babe::{Epoch, authorship, Config}; use futures::{FutureExt as _, TryFutureExt as _}; -use jsonrpc_core::{ - Error as RpcError, - futures::future as rpc_future, -}; +use jsonrpc_core::{futures::future as rpc_future, Error as RpcError}; use jsonrpc_derive::rpc; +use sc_consensus_babe::{authorship, Config, Epoch}; use sc_consensus_epochs::{descendent_query, Epoch as EpochT, SharedEpochChanges}; -use sp_consensus_babe::{ - AuthorityId, - BabeApi as BabeRuntimeApi, - digests::PreDigest, -}; -use serde::{Deserialize, Serialize}; use sc_keystore::KeyStorePtr; -use sp_api::{ProvideRuntimeApi, BlockId}; +use serde::{Deserialize, Serialize}; +use sp_api::{BlockId, ProvideRuntimeApi}; +use sp_blockchain::{Error as BlockChainError, HeaderBackend, HeaderMetadata}; +use sp_consensus::{Error as ConsensusError, SelectChain}; +use sp_consensus_babe::{digests::PreDigest, AuthorityId, BabeApi as BabeRuntimeApi}; use sp_core::crypto::Pair; use sp_runtime::traits::{Block as BlockT, Header as _}; -use sp_consensus::{SelectChain, Error as ConsensusError}; -use sp_blockchain::{HeaderBackend, HeaderMetadata, Error as BlockChainError}; use std::{collections::HashMap, fmt, sync::Arc}; type FutureResult = Box + Send>; @@ -43,201 +36,227 @@ type FutureResult = Box + /// Provides rpc methods for interacting with Babe. #[rpc] pub trait BabeApi { - /// Returns data about which slots (primary or secondary) can be claimed in the current epoch - /// with the keys in the keystore. - #[rpc(name = "babe_epochAuthorship")] - fn epoch_authorship(&self) -> FutureResult>; + /// Returns data about which slots (primary or secondary) can be claimed in the current epoch + /// with the keys in the keystore. + #[rpc(name = "babe_epochAuthorship")] + fn epoch_authorship(&self) -> FutureResult>; } /// Implements the BabeRPC trait for interacting with Babe. pub struct BabeRPCHandler { - /// shared reference to the client. - client: Arc, - /// shared reference to EpochChanges - shared_epoch_changes: SharedEpochChanges, - /// shared reference to the Keystore - keystore: KeyStorePtr, - /// config (actually holds the slot duration) - babe_config: Config, - /// The SelectChain strategy - select_chain: SC, + /// shared reference to the client. + client: Arc, + /// shared reference to EpochChanges + shared_epoch_changes: SharedEpochChanges, + /// shared reference to the Keystore + keystore: KeyStorePtr, + /// config (actually holds the slot duration) + babe_config: Config, + /// The SelectChain strategy + select_chain: SC, } impl BabeRPCHandler { - /// Creates a new instance of the BabeRpc handler. - pub fn new( - client: Arc, - shared_epoch_changes: SharedEpochChanges, - keystore: KeyStorePtr, - babe_config: Config, - select_chain: SC, - ) -> Self { - Self { - client, - shared_epoch_changes, - keystore, - babe_config, - select_chain, - } - } + /// Creates a new instance of the BabeRpc handler. + pub fn new( + client: Arc, + shared_epoch_changes: SharedEpochChanges, + keystore: KeyStorePtr, + babe_config: Config, + select_chain: SC, + ) -> Self { + Self { + client, + shared_epoch_changes, + keystore, + babe_config, + select_chain, + } + } } impl BabeApi for BabeRPCHandler - where - B: BlockT, - C: ProvideRuntimeApi + HeaderBackend + HeaderMetadata + 'static, - C::Api: BabeRuntimeApi, - ::Error: fmt::Debug, - SC: SelectChain + Clone + 'static, +where + B: BlockT, + C: ProvideRuntimeApi + + HeaderBackend + + HeaderMetadata + + 'static, + C::Api: BabeRuntimeApi, + ::Error: fmt::Debug, + SC: SelectChain + Clone + 'static, { - fn epoch_authorship(&self) -> FutureResult> { - let ( - babe_config, - keystore, - shared_epoch, - client, - select_chain, - ) = ( - self.babe_config.clone(), - self.keystore.clone(), - self.shared_epoch_changes.clone(), - self.client.clone(), - self.select_chain.clone(), - ); - let future = async move { - let header = select_chain.best_chain().map_err(Error::Consensus)?; - let epoch_start = client.runtime_api() - .current_epoch_start(&BlockId::Hash(header.hash())) - .map_err(|err| { - Error::StringError(format!("{:?}", err)) - })?; - let epoch = epoch_data(&shared_epoch, &client, &babe_config, epoch_start, &select_chain)?; - let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); - - let mut claims: HashMap = HashMap::new(); - - for slot_number in epoch_start..epoch_end { - let epoch = epoch_data(&shared_epoch, &client, &babe_config, slot_number, &select_chain)?; - if let Some((claim, key)) = authorship::claim_slot(slot_number, &epoch, &babe_config, &keystore) { - match claim { - PreDigest::Primary { .. } => { - claims.entry(key.public()).or_default().primary.push(slot_number); - } - PreDigest::Secondary { .. } => { - claims.entry(key.public()).or_default().secondary.push(slot_number); - } - }; - } - } - - Ok(claims) - }.boxed(); - - Box::new(future.compat()) - } + fn epoch_authorship(&self) -> FutureResult> { + let (babe_config, keystore, shared_epoch, client, select_chain) = ( + self.babe_config.clone(), + self.keystore.clone(), + self.shared_epoch_changes.clone(), + self.client.clone(), + self.select_chain.clone(), + ); + let future = async move { + let header = select_chain.best_chain().map_err(Error::Consensus)?; + let epoch_start = client + .runtime_api() + .current_epoch_start(&BlockId::Hash(header.hash())) + .map_err(|err| Error::StringError(format!("{:?}", err)))?; + let epoch = epoch_data( + &shared_epoch, + &client, + &babe_config, + epoch_start, + &select_chain, + )?; + let (epoch_start, epoch_end) = (epoch.start_slot(), epoch.end_slot()); + + let mut claims: HashMap = HashMap::new(); + + for slot_number in epoch_start..epoch_end { + let epoch = epoch_data( + &shared_epoch, + &client, + &babe_config, + slot_number, + &select_chain, + )?; + if let Some((claim, key)) = + authorship::claim_slot(slot_number, &epoch, &babe_config, &keystore) + { + match claim { + PreDigest::Primary { .. } => { + claims + .entry(key.public()) + .or_default() + .primary + .push(slot_number); + } + PreDigest::Secondary { .. } => { + claims + .entry(key.public()) + .or_default() + .secondary + .push(slot_number); + } + }; + } + } + + Ok(claims) + } + .boxed(); + + Box::new(future.compat()) + } } /// Holds information about the `slot_number`'s that can be claimed by a given key. #[derive(Default, Debug, Deserialize, Serialize)] pub struct EpochAuthorship { - /// the array of primary slots that can be claimed - primary: Vec, - /// the array of secondary slots that can be claimed - secondary: Vec, + /// the array of primary slots that can be claimed + primary: Vec, + /// the array of secondary slots that can be claimed + secondary: Vec, } /// Errors encountered by the RPC #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Consensus error - Consensus(ConsensusError), - /// Errors that can be formatted as a String - StringError(String) + /// Consensus error + Consensus(ConsensusError), + /// Errors that can be formatted as a String + StringError(String), } impl From for jsonrpc_core::Error { - fn from(error: Error) -> Self { - jsonrpc_core::Error { - message: format!("{}", error).into(), - code: jsonrpc_core::ErrorCode::ServerError(1234), - data: None, - } - } + fn from(error: Error) -> Self { + jsonrpc_core::Error { + message: format!("{}", error).into(), + code: jsonrpc_core::ErrorCode::ServerError(1234), + data: None, + } + } } /// fetches the epoch data for a given slot_number. fn epoch_data( - epoch_changes: &SharedEpochChanges, - client: &Arc, - babe_config: &Config, - slot_number: u64, - select_chain: &SC, + epoch_changes: &SharedEpochChanges, + client: &Arc, + babe_config: &Config, + slot_number: u64, + select_chain: &SC, ) -> Result - where - B: BlockT, - C: HeaderBackend + HeaderMetadata + 'static, - SC: SelectChain, +where + B: BlockT, + C: HeaderBackend + HeaderMetadata + 'static, + SC: SelectChain, { - let parent = select_chain.best_chain()?; - epoch_changes.lock().epoch_data_for_child_of( - descendent_query(&**client), - &parent.hash(), - parent.number().clone(), - slot_number, - |slot| babe_config.genesis_epoch(slot), - ) - .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(format!("{:?}", e))))? - .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) + let parent = select_chain.best_chain()?; + epoch_changes + .lock() + .epoch_data_for_child_of( + descendent_query(&**client), + &parent.hash(), + parent.number().clone(), + slot_number, + |slot| babe_config.genesis_epoch(slot), + ) + .map_err(|e| Error::Consensus(ConsensusError::ChainLookup(format!("{:?}", e))))? + .ok_or(Error::Consensus(ConsensusError::InvalidAuthoritiesSet)) } #[cfg(test)] mod tests { - use super::*; - use substrate_test_runtime_client::{ - DefaultTestClientBuilderExt, - TestClientBuilderExt, - TestClientBuilder, - }; - use sp_application_crypto::AppPair; - use sp_keyring::Ed25519Keyring; - use sc_keystore::Store; - - use std::sync::Arc; - use sc_consensus_babe::{Config, block_import, AuthorityPair}; - use jsonrpc_core::IoHandler; - - /// creates keystore backed by a temp file - fn create_temp_keystore(authority: Ed25519Keyring) -> (KeyStorePtr, tempfile::TempDir) { - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = Store::open(keystore_path.path(), None).expect("Creates keystore"); - keystore.write().insert_ephemeral_from_seed::

(&authority.to_seed()) - .expect("Creates authority key"); - - (keystore, keystore_path) - } - - #[test] - fn rpc() { - let builder = TestClientBuilder::new(); - let (client, longest_chain) = builder.build_with_longest_chain(); - let client = Arc::new(client); - let config = Config::get_or_compute(&*client).expect("config available"); - let (_, link) = block_import( - config.clone(), - client.clone(), - client.clone(), - ).expect("can initialize block-import"); - - let epoch_changes = link.epoch_changes().clone(); - let select_chain = longest_chain; - let keystore = create_temp_keystore::(Ed25519Keyring::Alice).0; - let handler = BabeRPCHandler::new(client.clone(), epoch_changes, keystore, config, select_chain); - let mut io = IoHandler::new(); - - io.extend_with(BabeApi::to_delegate(handler)); - let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params": [],"id":1}"#; - let response = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4]}},"id":1}"#; - - assert_eq!(Some(response.into()), io.handle_request_sync(request)); - } + use super::*; + use sc_keystore::Store; + use sp_application_crypto::AppPair; + use sp_keyring::Ed25519Keyring; + use substrate_test_runtime_client::{ + DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + + use jsonrpc_core::IoHandler; + use sc_consensus_babe::{block_import, AuthorityPair, Config}; + use std::sync::Arc; + + /// creates keystore backed by a temp file + fn create_temp_keystore( + authority: Ed25519Keyring, + ) -> (KeyStorePtr, tempfile::TempDir) { + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = Store::open(keystore_path.path(), None).expect("Creates keystore"); + keystore + .write() + .insert_ephemeral_from_seed::

(&authority.to_seed()) + .expect("Creates authority key"); + + (keystore, keystore_path) + } + + #[test] + fn rpc() { + let builder = TestClientBuilder::new(); + let (client, longest_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let config = Config::get_or_compute(&*client).expect("config available"); + let (_, link) = block_import(config.clone(), client.clone(), client.clone()) + .expect("can initialize block-import"); + + let epoch_changes = link.epoch_changes().clone(); + let select_chain = longest_chain; + let keystore = create_temp_keystore::(Ed25519Keyring::Alice).0; + let handler = BabeRPCHandler::new( + client.clone(), + epoch_changes, + keystore, + config, + select_chain, + ); + let mut io = IoHandler::new(); + + io.extend_with(BabeApi::to_delegate(handler)); + let request = r#"{"jsonrpc":"2.0","method":"babe_epochAuthorship","params": [],"id":1}"#; + let response = r#"{"jsonrpc":"2.0","result":{"5GrwvaEF5zXb26Fz9rcQpDWS57CtERHpNehXCPcNoHGKutQY":{"primary":[0],"secondary":[1,2,4]}},"id":1}"#; + + assert_eq!(Some(response.into()), io.handle_request_sync(request)); + } } diff --git a/client/consensus/babe/src/authorship.rs b/client/consensus/babe/src/authorship.rs index 074e582bff..5ffc748d4d 100644 --- a/client/consensus/babe/src/authorship.rs +++ b/client/consensus/babe/src/authorship.rs @@ -16,128 +16,120 @@ //! BABE authority selection and slot claiming. +use super::Epoch; +use codec::Encode; use merlin::Transcript; +use sc_keystore::KeyStorePtr; +use schnorrkel::vrf::VRFInOut; +use sp_consensus_babe::digests::{PreDigest, PrimaryPreDigest, SecondaryPreDigest}; use sp_consensus_babe::{ - AuthorityId, BabeAuthorityWeight, BABE_ENGINE_ID, BABE_VRF_PREFIX, - SlotNumber, AuthorityPair, BabeConfiguration + AuthorityId, AuthorityPair, BabeAuthorityWeight, BabeConfiguration, SlotNumber, BABE_ENGINE_ID, + BABE_VRF_PREFIX, }; -use sp_consensus_babe::digests::{PreDigest, PrimaryPreDigest, SecondaryPreDigest}; use sp_consensus_vrf::schnorrkel::{VRFOutput, VRFProof}; -use sp_core::{U256, blake2_256}; -use codec::Encode; -use schnorrkel::vrf::VRFInOut; use sp_core::Pair; -use sc_keystore::KeyStorePtr; -use super::Epoch; +use sp_core::{blake2_256, U256}; /// Calculates the primary selection threshold for a given authority, taking /// into account `c` (`1 - c` represents the probability of a slot being empty). pub(super) fn calculate_primary_threshold( - c: (u64, u64), - authorities: &[(AuthorityId, BabeAuthorityWeight)], - authority_index: usize, + c: (u64, u64), + authorities: &[(AuthorityId, BabeAuthorityWeight)], + authority_index: usize, ) -> u128 { - use num_bigint::BigUint; - use num_rational::BigRational; - use num_traits::{cast::ToPrimitive, identities::One}; + use num_bigint::BigUint; + use num_rational::BigRational; + use num_traits::{cast::ToPrimitive, identities::One}; - let c = c.0 as f64 / c.1 as f64; + let c = c.0 as f64 / c.1 as f64; - let theta = - authorities[authority_index].1 as f64 / - authorities.iter().map(|(_, weight)| weight).sum::() as f64; + let theta = authorities[authority_index].1 as f64 + / authorities.iter().map(|(_, weight)| weight).sum::() as f64; - let calc = || { - let p = BigRational::from_float(1f64 - (1f64 - c).powf(theta))?; - let numer = p.numer().to_biguint()?; - let denom = p.denom().to_biguint()?; - ((BigUint::one() << 128) * numer / denom).to_u128() - }; + let calc = || { + let p = BigRational::from_float(1f64 - (1f64 - c).powf(theta))?; + let numer = p.numer().to_biguint()?; + let denom = p.denom().to_biguint()?; + ((BigUint::one() << 128) * numer / denom).to_u128() + }; - calc().unwrap_or(u128::max_value()) + calc().unwrap_or(u128::max_value()) } /// Returns true if the given VRF output is lower than the given threshold, /// false otherwise. pub(super) fn check_primary_threshold(inout: &VRFInOut, threshold: u128) -> bool { - u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(BABE_VRF_PREFIX)) < threshold + u128::from_le_bytes(inout.make_bytes::<[u8; 16]>(BABE_VRF_PREFIX)) < threshold } /// Get the expected secondary author for the given slot and with given /// authorities. This should always assign the slot to some authority unless the /// authorities list is empty. pub(super) fn secondary_slot_author( - slot_number: u64, - authorities: &[(AuthorityId, BabeAuthorityWeight)], - randomness: [u8; 32], + slot_number: u64, + authorities: &[(AuthorityId, BabeAuthorityWeight)], + randomness: [u8; 32], ) -> Option<&AuthorityId> { - if authorities.is_empty() { - return None; - } + if authorities.is_empty() { + return None; + } - let rand = U256::from((randomness, slot_number).using_encoded(blake2_256)); + let rand = U256::from((randomness, slot_number).using_encoded(blake2_256)); - let authorities_len = U256::from(authorities.len()); - let idx = rand % authorities_len; + let authorities_len = U256::from(authorities.len()); + let idx = rand % authorities_len; - let expected_author = authorities.get(idx.as_u32() as usize) - .expect("authorities not empty; index constrained to list length; \ - this is a valid index; qed"); + let expected_author = authorities.get(idx.as_u32() as usize).expect( + "authorities not empty; index constrained to list length; \ + this is a valid index; qed", + ); - Some(&expected_author.0) + Some(&expected_author.0) } -pub(super) fn make_transcript( - randomness: &[u8], - slot_number: u64, - epoch: u64, -) -> Transcript { - let mut transcript = Transcript::new(&BABE_ENGINE_ID); - transcript.append_u64(b"slot number", slot_number); - transcript.append_u64(b"current epoch", epoch); - transcript.append_message(b"chain randomness", randomness); - transcript +pub(super) fn make_transcript(randomness: &[u8], slot_number: u64, epoch: u64) -> Transcript { + let mut transcript = Transcript::new(&BABE_ENGINE_ID); + transcript.append_u64(b"slot number", slot_number); + transcript.append_u64(b"current epoch", epoch); + transcript.append_message(b"chain randomness", randomness); + transcript } - /// Claim a secondary slot if it is our turn to propose, returning the /// pre-digest to use when authoring the block, or `None` if it is not our turn /// to propose. fn claim_secondary_slot( - slot_number: SlotNumber, - authorities: &[(AuthorityId, BabeAuthorityWeight)], - keystore: &KeyStorePtr, - randomness: [u8; 32], + slot_number: SlotNumber, + authorities: &[(AuthorityId, BabeAuthorityWeight)], + keystore: &KeyStorePtr, + randomness: [u8; 32], ) -> Option<(PreDigest, AuthorityPair)> { - if authorities.is_empty() { - return None; - } - - let expected_author = super::authorship::secondary_slot_author( - slot_number, - authorities, - randomness, - )?; - - let keystore = keystore.read(); - - for (pair, authority_index) in authorities.iter() - .enumerate() - .flat_map(|(i, a)| { - keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) - }) - { - if pair.public() == *expected_author { - let pre_digest = PreDigest::Secondary(SecondaryPreDigest { - slot_number, - authority_index: authority_index as u32, - }); - - return Some((pre_digest, pair)); - } - } - - None + if authorities.is_empty() { + return None; + } + + let expected_author = + super::authorship::secondary_slot_author(slot_number, authorities, randomness)?; + + let keystore = keystore.read(); + + for (pair, authority_index) in authorities.iter().enumerate().flat_map(|(i, a)| { + keystore + .key_pair::(&a.0) + .ok() + .map(|kp| (kp, i)) + }) { + if pair.public() == *expected_author { + let pre_digest = PreDigest::Secondary(SecondaryPreDigest { + slot_number, + authority_index: authority_index as u32, + }); + + return Some((pre_digest, pair)); + } + } + + None } /// Tries to claim the given slot number. This method starts by trying to claim @@ -145,29 +137,23 @@ fn claim_secondary_slot( /// secondary slots enabled for the given epoch, we will fallback to trying to /// claim a secondary slot. pub fn claim_slot( - slot_number: SlotNumber, - epoch: &Epoch, - config: &BabeConfiguration, - keystore: &KeyStorePtr, + slot_number: SlotNumber, + epoch: &Epoch, + config: &BabeConfiguration, + keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { - claim_primary_slot(slot_number, epoch, config.c, keystore) - .or_else(|| { - if config.secondary_slots { - claim_secondary_slot( - slot_number, - &epoch.authorities, - keystore, - epoch.randomness, - ) - } else { - None - } - }) + claim_primary_slot(slot_number, epoch, config.c, keystore).or_else(|| { + if config.secondary_slots { + claim_secondary_slot(slot_number, &epoch.authorities, keystore, epoch.randomness) + } else { + None + } + }) } fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { - use sp_core::crypto::IsWrappedBy; - sp_core::sr25519::Pair::from_ref(q).as_ref() + use sp_core::crypto::IsWrappedBy; + sp_core::sr25519::Pair::from_ref(q).as_ref() } /// Claim a primary slot if it is our turn. Returns `None` if it is not our turn. @@ -175,44 +161,52 @@ fn get_keypair(q: &AuthorityPair) -> &schnorrkel::Keypair { /// the VRF. If the VRF produces a value less than `threshold`, it is our turn, /// so it returns `Some(_)`. Otherwise, it returns `None`. fn claim_primary_slot( - slot_number: SlotNumber, - epoch: &Epoch, - c: (u64, u64), - keystore: &KeyStorePtr, + slot_number: SlotNumber, + epoch: &Epoch, + c: (u64, u64), + keystore: &KeyStorePtr, ) -> Option<(PreDigest, AuthorityPair)> { - let Epoch { authorities, randomness, epoch_index, .. } = epoch; - let keystore = keystore.read(); - - for (pair, authority_index) in authorities.iter() - .enumerate() - .flat_map(|(i, a)| { - keystore.key_pair::(&a.0).ok().map(|kp| (kp, i)) - }) - { - let transcript = super::authorship::make_transcript(randomness, slot_number, *epoch_index); - - // Compute the threshold we will use. - // - // We already checked that authorities contains `key.public()`, so it can't - // be empty. Therefore, this division in `calculate_threshold` is safe. - let threshold = super::authorship::calculate_primary_threshold(c, authorities, authority_index); - - let pre_digest = get_keypair(&pair) - .vrf_sign_after_check(transcript, |inout| super::authorship::check_primary_threshold(inout, threshold)) - .map(|s| { - PreDigest::Primary(PrimaryPreDigest { - slot_number, - vrf_output: VRFOutput(s.0.to_output()), - vrf_proof: VRFProof(s.1), - authority_index: authority_index as u32, - }) - }); - - // early exit on first successful claim - if let Some(pre_digest) = pre_digest { - return Some((pre_digest, pair)); - } - } - - None + let Epoch { + authorities, + randomness, + epoch_index, + .. + } = epoch; + let keystore = keystore.read(); + + for (pair, authority_index) in authorities.iter().enumerate().flat_map(|(i, a)| { + keystore + .key_pair::(&a.0) + .ok() + .map(|kp| (kp, i)) + }) { + let transcript = super::authorship::make_transcript(randomness, slot_number, *epoch_index); + + // Compute the threshold we will use. + // + // We already checked that authorities contains `key.public()`, so it can't + // be empty. Therefore, this division in `calculate_threshold` is safe. + let threshold = + super::authorship::calculate_primary_threshold(c, authorities, authority_index); + + let pre_digest = get_keypair(&pair) + .vrf_sign_after_check(transcript, |inout| { + super::authorship::check_primary_threshold(inout, threshold) + }) + .map(|s| { + PreDigest::Primary(PrimaryPreDigest { + slot_number, + vrf_output: VRFOutput(s.0.to_output()), + vrf_proof: VRFProof(s.1), + authority_index: authority_index as u32, + }) + }); + + // early exit on first successful claim + if let Some(pre_digest) = pre_digest { + return Some((pre_digest, pair)); + } + } + + None } diff --git a/client/consensus/babe/src/aux_schema.rs b/client/consensus/babe/src/aux_schema.rs index e014c8975a..679d9eda1b 100644 --- a/client/consensus/babe/src/aux_schema.rs +++ b/client/consensus/babe/src/aux_schema.rs @@ -16,183 +16,183 @@ //! Schema for BABE epoch changes in the aux-db. -use std::sync::Arc; -use parking_lot::Mutex; -use log::info; use codec::{Decode, Encode}; +use log::info; +use parking_lot::Mutex; +use std::sync::Arc; +use crate::Epoch; use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use sp_runtime::traits::Block as BlockT; +use sc_consensus_epochs::{migration::EpochChangesForV0, EpochChangesFor, SharedEpochChanges}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_consensus_babe::BabeBlockWeight; -use sc_consensus_epochs::{EpochChangesFor, SharedEpochChanges, migration::EpochChangesForV0}; -use crate::Epoch; +use sp_runtime::traits::Block as BlockT; const BABE_EPOCH_CHANGES_VERSION: &[u8] = b"babe_epoch_changes_version"; const BABE_EPOCH_CHANGES_KEY: &[u8] = b"babe_epoch_changes"; const BABE_EPOCH_CHANGES_CURRENT_VERSION: u32 = 1; fn block_weight_key(block_hash: H) -> Vec { - (b"block_weight", block_hash).encode() + (b"block_weight", block_hash).encode() } fn load_decode(backend: &B, key: &[u8]) -> ClientResult> - where - B: AuxStore, - T: Decode, +where + B: AuxStore, + T: Decode, { - let corrupt = |e: codec::Error| { - ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e.what())) - }; - match backend.get_aux(key)? { - None => Ok(None), - Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt) - } + let corrupt = |e: codec::Error| { + ClientError::Backend(format!("BABE DB is corrupted. Decode error: {}", e.what())) + }; + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]).map(Some).map_err(corrupt), + } } /// Load or initialize persistent epoch change data from backend. pub(crate) fn load_epoch_changes( - backend: &B, + backend: &B, ) -> ClientResult> { - let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; - - let maybe_epoch_changes = match version { - None => load_decode::<_, EpochChangesForV0>( - backend, - BABE_EPOCH_CHANGES_KEY, - )?.map(|v0| v0.migrate()), - Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => load_decode::<_, EpochChangesFor>( - backend, - BABE_EPOCH_CHANGES_KEY, - )?, - Some(other) => { - return Err(ClientError::Backend( - format!("Unsupported BABE DB version: {:?}", other) - )) - }, - }; - - let epoch_changes = Arc::new(Mutex::new(maybe_epoch_changes.unwrap_or_else(|| { - info!(target: "babe", - "👶 Creating empty BABE epoch changes on what appears to be first startup." - ); - EpochChangesFor::::default() - }))); - - // rebalance the tree after deserialization. this isn't strictly necessary - // since the tree is now rebalanced on every update operation. but since the - // tree wasn't rebalanced initially it's useful to temporarily leave it here - // to avoid having to wait until an import for rebalancing. - epoch_changes.lock().rebalance(); - - Ok(epoch_changes) + let version = load_decode::<_, u32>(backend, BABE_EPOCH_CHANGES_VERSION)?; + + let maybe_epoch_changes = match version { + None => load_decode::<_, EpochChangesForV0>(backend, BABE_EPOCH_CHANGES_KEY)? + .map(|v0| v0.migrate()), + Some(BABE_EPOCH_CHANGES_CURRENT_VERSION) => { + load_decode::<_, EpochChangesFor>(backend, BABE_EPOCH_CHANGES_KEY)? + } + Some(other) => { + return Err(ClientError::Backend(format!( + "Unsupported BABE DB version: {:?}", + other + ))) + } + }; + + let epoch_changes = Arc::new(Mutex::new(maybe_epoch_changes.unwrap_or_else(|| { + info!(target: "babe", + "👶 Creating empty BABE epoch changes on what appears to be first startup." + ); + EpochChangesFor::::default() + }))); + + // rebalance the tree after deserialization. this isn't strictly necessary + // since the tree is now rebalanced on every update operation. but since the + // tree wasn't rebalanced initially it's useful to temporarily leave it here + // to avoid having to wait until an import for rebalancing. + epoch_changes.lock().rebalance(); + + Ok(epoch_changes) } /// Update the epoch changes on disk after a change. pub(crate) fn write_epoch_changes( - epoch_changes: &EpochChangesFor, - write_aux: F, -) -> R where - F: FnOnce(&[(&'static [u8], &[u8])]) -> R, + epoch_changes: &EpochChangesFor, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { - BABE_EPOCH_CHANGES_CURRENT_VERSION.using_encoded(|version| { - let encoded_epoch_changes = epoch_changes.encode(); - write_aux( - &[(BABE_EPOCH_CHANGES_KEY, encoded_epoch_changes.as_slice()), - (BABE_EPOCH_CHANGES_VERSION, version)], - ) - }) + BABE_EPOCH_CHANGES_CURRENT_VERSION.using_encoded(|version| { + let encoded_epoch_changes = epoch_changes.encode(); + write_aux(&[ + (BABE_EPOCH_CHANGES_KEY, encoded_epoch_changes.as_slice()), + (BABE_EPOCH_CHANGES_VERSION, version), + ]) + }) } /// Write the cumulative chain-weight of a block ot aux storage. pub(crate) fn write_block_weight( - block_hash: H, - block_weight: &BabeBlockWeight, - write_aux: F, -) -> R where - F: FnOnce(&[(Vec, &[u8])]) -> R, + block_hash: H, + block_weight: &BabeBlockWeight, + write_aux: F, +) -> R +where + F: FnOnce(&[(Vec, &[u8])]) -> R, { - let key = block_weight_key(block_hash); - block_weight.using_encoded(|s| - write_aux( - &[(key, s)], - ) - ) + let key = block_weight_key(block_hash); + block_weight.using_encoded(|s| write_aux(&[(key, s)])) } /// Load the cumulative chain-weight associated with a block. pub(crate) fn load_block_weight( - backend: &B, - block_hash: H, + backend: &B, + block_hash: H, ) -> ClientResult> { - load_decode(backend, block_weight_key(block_hash).as_slice()) + load_decode(backend, block_weight_key(block_hash).as_slice()) } #[cfg(test)] mod test { - use super::*; - use crate::Epoch; - use fork_tree::ForkTree; - use substrate_test_runtime_client; - use sp_core::H256; - use sp_runtime::traits::NumberFor; - use sc_consensus_epochs::{PersistedEpoch, PersistedEpochHeader, EpochHeader}; - use sp_consensus::Error as ConsensusError; - use sc_network_test::Block as TestBlock; - - #[test] - fn load_decode_from_v0_epoch_changes() { - let epoch = Epoch { - start_slot: 0, - authorities: vec![], - randomness: [0; 32], - epoch_index: 1, - duration: 100, - }; - let client = substrate_test_runtime_client::new(); - let mut v0_tree = ForkTree::, _>::new(); - v0_tree.import::<_, ConsensusError>( - Default::default(), - Default::default(), - PersistedEpoch::Regular(epoch), - &|_, _| Ok(false), // Test is single item only so this can be set to false. - ).unwrap(); - - client.insert_aux( - &[(BABE_EPOCH_CHANGES_KEY, - &EpochChangesForV0::::from_raw(v0_tree).encode()[..])], - &[], - ).unwrap(); - - assert_eq!( - load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), - None, - ); - - let epoch_changes = load_epoch_changes::(&client).unwrap(); - - assert!( - epoch_changes.lock() - .tree() - .iter() - .map(|(_, _, epoch)| epoch.clone()) - .collect::>() == - vec![PersistedEpochHeader::Regular(EpochHeader { - start_slot: 0, - end_slot: 100, - })], - ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. - - write_epoch_changes::( - &epoch_changes.lock(), - |values| { - client.insert_aux(values, &[]).unwrap(); - }, - ); - - assert_eq!( - load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), - Some(1), - ); - } + use super::*; + use crate::Epoch; + use fork_tree::ForkTree; + use sc_consensus_epochs::{EpochHeader, PersistedEpoch, PersistedEpochHeader}; + use sc_network_test::Block as TestBlock; + use sp_consensus::Error as ConsensusError; + use sp_core::H256; + use sp_runtime::traits::NumberFor; + use substrate_test_runtime_client; + + #[test] + fn load_decode_from_v0_epoch_changes() { + let epoch = Epoch { + start_slot: 0, + authorities: vec![], + randomness: [0; 32], + epoch_index: 1, + duration: 100, + }; + let client = substrate_test_runtime_client::new(); + let mut v0_tree = ForkTree::, _>::new(); + v0_tree + .import::<_, ConsensusError>( + Default::default(), + Default::default(), + PersistedEpoch::Regular(epoch), + &|_, _| Ok(false), // Test is single item only so this can be set to false. + ) + .unwrap(); + + client + .insert_aux( + &[( + BABE_EPOCH_CHANGES_KEY, + &EpochChangesForV0::::from_raw(v0_tree).encode()[..], + )], + &[], + ) + .unwrap(); + + assert_eq!( + load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), + None, + ); + + let epoch_changes = load_epoch_changes::(&client).unwrap(); + + assert!( + epoch_changes + .lock() + .tree() + .iter() + .map(|(_, _, epoch)| epoch.clone()) + .collect::>() + == vec![PersistedEpochHeader::Regular(EpochHeader { + start_slot: 0, + end_slot: 100, + })], + ); // PersistedEpochHeader does not implement Debug, so we use assert! directly. + + write_epoch_changes::(&epoch_changes.lock(), |values| { + client.insert_aux(values, &[]).unwrap(); + }); + + assert_eq!( + load_decode::<_, u32>(&client, BABE_EPOCH_CHANGES_VERSION).unwrap(), + Some(1), + ); + } } diff --git a/client/consensus/babe/src/lib.rs b/client/consensus/babe/src/lib.rs index 092bf8153b..74356eb4b9 100644 --- a/client/consensus/babe/src/lib.rs +++ b/client/consensus/babe/src/lib.rs @@ -58,174 +58,184 @@ #![forbid(unsafe_code)] #![warn(missing_docs)] -pub use sp_consensus_babe::{ - BabeApi, ConsensusLog, BABE_ENGINE_ID, SlotNumber, BabeConfiguration, - AuthorityId, AuthorityPair, AuthoritySignature, - BabeAuthorityWeight, VRF_OUTPUT_LENGTH, - digests::{ - CompatibleDigestItem, NextEpochDescriptor, PreDigest, PrimaryPreDigest, SecondaryPreDigest, - }, -}; +use parking_lot::Mutex; +use sc_client_api::{backend::AuxStore, BlockchainEvents, ProvideUncles}; +use sc_keystore::KeyStorePtr; +use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_TRACE}; +use sp_api::{NumberFor, ProvideRuntimeApi}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_consensus::import_queue::{BasicQueue, CacheKeyId, Verifier}; +use sp_consensus::import_queue::{BoxFinalityProofImport, BoxJustificationImport}; pub use sp_consensus::SyncOracle; -use std::{ - collections::HashMap, sync::Arc, u64, pin::Pin, time::{Instant, Duration}, - any::Any, borrow::Cow +use sp_consensus::{ + self, BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Environment, + Error as ConsensusError, ForkChoiceStrategy, Proposer, SelectChain, SlotData, }; +use sp_consensus::{CanAuthorWith, ImportResult}; use sp_consensus_babe; -use sp_consensus::{ImportResult, CanAuthorWith}; -use sp_consensus::import_queue::{ - BoxJustificationImport, BoxFinalityProofImport, -}; -use sp_runtime::{ - generic::{BlockId, OpaqueDigestItemId}, Justification, - traits::{Block as BlockT, Header, DigestItemFor, Zero}, +use sp_consensus_babe::inherents::BabeInherentData; +pub use sp_consensus_babe::{ + digests::{ + CompatibleDigestItem, NextEpochDescriptor, PreDigest, PrimaryPreDigest, SecondaryPreDigest, + }, + AuthorityId, AuthorityPair, AuthoritySignature, BabeApi, BabeAuthorityWeight, + BabeConfiguration, ConsensusLog, SlotNumber, BABE_ENGINE_ID, VRF_OUTPUT_LENGTH, }; -use sp_api::{ProvideRuntimeApi, NumberFor}; -use sc_keystore::KeyStorePtr; -use parking_lot::Mutex; use sp_core::Pair; -use sp_inherents::{InherentDataProviders, InherentData}; -use sc_telemetry::{telemetry, CONSENSUS_TRACE, CONSENSUS_DEBUG}; -use sp_consensus::{ - self, BlockImport, Environment, Proposer, BlockCheckParams, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, Error as ConsensusError, - SelectChain, SlotData, +use sp_inherents::{InherentData, InherentDataProviders}; +use sp_runtime::{ + generic::{BlockId, OpaqueDigestItemId}, + traits::{Block as BlockT, DigestItemFor, Header, Zero}, + Justification, }; -use sp_consensus_babe::inherents::BabeInherentData; -use sp_timestamp::{TimestampInherentData, InherentType as TimestampInherent}; -use sp_consensus::import_queue::{Verifier, BasicQueue, CacheKeyId}; -use sc_client_api::{ - backend::AuxStore, - BlockchainEvents, ProvideUncles, +use sp_timestamp::{InherentType as TimestampInherent, TimestampInherentData}; +use std::{ + any::Any, + borrow::Cow, + collections::HashMap, + pin::Pin, + sync::Arc, + time::{Duration, Instant}, + u64, }; -use sp_block_builder::BlockBuilder as BlockBuilderApi; +use codec::{Decode, Encode}; use futures::prelude::*; use log::{debug, info, log, trace, warn}; -use sc_consensus_slots::{ - SlotWorker, SlotInfo, SlotCompatible, StorageChanges, CheckedHeader, check_equivocation, -}; use sc_consensus_epochs::{ - descendent_query, SharedEpochChanges, EpochChangesFor, Epoch as EpochT, ViableEpochDescriptor, + descendent_query, Epoch as EpochT, EpochChangesFor, SharedEpochChanges, ViableEpochDescriptor, }; -use sp_blockchain::{ - Result as ClientResult, Error as ClientError, - HeaderBackend, ProvideCache, HeaderMetadata +use sc_consensus_slots::{ + check_equivocation, CheckedHeader, SlotCompatible, SlotInfo, SlotWorker, StorageChanges, }; use schnorrkel::SignatureError; -use codec::{Encode, Decode}; use sp_api::ApiExt; +use sp_blockchain::{ + Error as ClientError, HeaderBackend, HeaderMetadata, ProvideCache, Result as ClientResult, +}; -mod aux_schema; -mod verification; pub mod authorship; +mod aux_schema; #[cfg(test)] mod tests; +mod verification; /// BABE epoch information #[derive(Decode, Encode, Default, PartialEq, Eq, Clone, Debug)] pub struct Epoch { - /// The epoch index - pub epoch_index: u64, - /// The starting slot of the epoch, - pub start_slot: SlotNumber, - /// The duration of this epoch - pub duration: SlotNumber, - /// The authorities and their weights - pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - /// Randomness for this epoch - pub randomness: [u8; VRF_OUTPUT_LENGTH], + /// The epoch index + pub epoch_index: u64, + /// The starting slot of the epoch, + pub start_slot: SlotNumber, + /// The duration of this epoch + pub duration: SlotNumber, + /// The authorities and their weights + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// Randomness for this epoch + pub randomness: [u8; VRF_OUTPUT_LENGTH], } impl EpochT for Epoch { - type NextEpochDescriptor = NextEpochDescriptor; - type SlotNumber = SlotNumber; - - fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { - Epoch { - epoch_index: self.epoch_index + 1, - start_slot: self.start_slot + self.duration, - duration: self.duration, - authorities: descriptor.authorities, - randomness: descriptor.randomness, - } - } - - fn start_slot(&self) -> SlotNumber { - self.start_slot - } - - fn end_slot(&self) -> SlotNumber { - self.start_slot + self.duration - } + type NextEpochDescriptor = NextEpochDescriptor; + type SlotNumber = SlotNumber; + + fn increment(&self, descriptor: NextEpochDescriptor) -> Epoch { + Epoch { + epoch_index: self.epoch_index + 1, + start_slot: self.start_slot + self.duration, + duration: self.duration, + authorities: descriptor.authorities, + randomness: descriptor.randomness, + } + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } } #[derive(derive_more::Display, Debug)] enum Error { - #[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")] - MultiplePreRuntimeDigests, - #[display(fmt = "No BABE pre-runtime digest found")] - NoPreRuntimeDigest, - #[display(fmt = "Multiple BABE epoch change digests, rejecting!")] - MultipleEpochChangeDigests, - #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] - Extraction(sp_consensus::Error), - #[display(fmt = "Could not fetch epoch at {:?}", _0)] - FetchEpoch(B::Hash), - #[display(fmt = "Header {:?} rejected: too far in the future", _0)] - TooFarInFuture(B::Hash), - #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] - ParentUnavailable(B::Hash, B::Hash), - #[display(fmt = "Slot number must increase: parent slot: {}, this slot: {}", _0, _1)] - SlotNumberMustIncrease(u64, u64), - #[display(fmt = "Header {:?} has a bad seal", _0)] - HeaderBadSeal(B::Hash), - #[display(fmt = "Header {:?} is unsealed", _0)] - HeaderUnsealed(B::Hash), - #[display(fmt = "Slot author not found")] - SlotAuthorNotFound, - #[display(fmt = "Secondary slot assignments are disabled for the current epoch.")] - SecondarySlotAssignmentsDisabled, - #[display(fmt = "Bad signature on {:?}", _0)] - BadSignature(B::Hash), - #[display(fmt = "Invalid author: Expected secondary author: {:?}, got: {:?}.", _0, _1)] - InvalidAuthor(AuthorityId, AuthorityId), - #[display(fmt = "No secondary author expected.")] - NoSecondaryAuthorExpected, - #[display(fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", _0, _1)] - VRFVerificationOfBlockFailed(AuthorityId, u128), - #[display(fmt = "VRF verification failed: {:?}", _0)] - VRFVerificationFailed(SignatureError), - #[display(fmt = "Could not fetch parent header: {:?}", _0)] - FetchParentHeader(sp_blockchain::Error), - #[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)] - ExpectedEpochChange(B::Hash, u64), - #[display(fmt = "Unexpected epoch change")] - UnexpectedEpochChange, - #[display(fmt = "Parent block of {} has no associated weight", _0)] - ParentBlockNoAssociatedWeight(B::Hash), - #[display(fmt = "Checking inherents failed: {}", _0)] - CheckInherents(String), - Client(sp_blockchain::Error), - Runtime(sp_inherents::Error), - ForkTree(Box>), + #[display(fmt = "Multiple BABE pre-runtime digests, rejecting!")] + MultiplePreRuntimeDigests, + #[display(fmt = "No BABE pre-runtime digest found")] + NoPreRuntimeDigest, + #[display(fmt = "Multiple BABE epoch change digests, rejecting!")] + MultipleEpochChangeDigests, + #[display(fmt = "Could not extract timestamp and slot: {:?}", _0)] + Extraction(sp_consensus::Error), + #[display(fmt = "Could not fetch epoch at {:?}", _0)] + FetchEpoch(B::Hash), + #[display(fmt = "Header {:?} rejected: too far in the future", _0)] + TooFarInFuture(B::Hash), + #[display(fmt = "Parent ({}) of {} unavailable. Cannot import", _0, _1)] + ParentUnavailable(B::Hash, B::Hash), + #[display( + fmt = "Slot number must increase: parent slot: {}, this slot: {}", + _0, + _1 + )] + SlotNumberMustIncrease(u64, u64), + #[display(fmt = "Header {:?} has a bad seal", _0)] + HeaderBadSeal(B::Hash), + #[display(fmt = "Header {:?} is unsealed", _0)] + HeaderUnsealed(B::Hash), + #[display(fmt = "Slot author not found")] + SlotAuthorNotFound, + #[display(fmt = "Secondary slot assignments are disabled for the current epoch.")] + SecondarySlotAssignmentsDisabled, + #[display(fmt = "Bad signature on {:?}", _0)] + BadSignature(B::Hash), + #[display( + fmt = "Invalid author: Expected secondary author: {:?}, got: {:?}.", + _0, + _1 + )] + InvalidAuthor(AuthorityId, AuthorityId), + #[display(fmt = "No secondary author expected.")] + NoSecondaryAuthorExpected, + #[display( + fmt = "VRF verification of block by author {:?} failed: threshold {} exceeded", + _0, + _1 + )] + VRFVerificationOfBlockFailed(AuthorityId, u128), + #[display(fmt = "VRF verification failed: {:?}", _0)] + VRFVerificationFailed(SignatureError), + #[display(fmt = "Could not fetch parent header: {:?}", _0)] + FetchParentHeader(sp_blockchain::Error), + #[display(fmt = "Expected epoch change to happen at {:?}, s{}", _0, _1)] + ExpectedEpochChange(B::Hash, u64), + #[display(fmt = "Unexpected epoch change")] + UnexpectedEpochChange, + #[display(fmt = "Parent block of {} has no associated weight", _0)] + ParentBlockNoAssociatedWeight(B::Hash), + #[display(fmt = "Checking inherents failed: {}", _0)] + CheckInherents(String), + Client(sp_blockchain::Error), + Runtime(sp_inherents::Error), + ForkTree(Box>), } impl std::convert::From> for String { - fn from(error: Error) -> String { - error.to_string() - } + fn from(error: Error) -> String { + error.to_string() + } } fn babe_err(error: Error) -> Error { - debug!(target: "babe", "{}", error); - error + debug!(target: "babe", "{}", error); + error } /// Intermediate value passed to block importer. pub struct BabeIntermediate { - /// The epoch descriptor. - pub epoch_descriptor: ViableEpochDescriptor, Epoch>, + /// The epoch descriptor. + pub epoch_descriptor: ViableEpochDescriptor, Epoch>, } /// Intermediate key for Babe engine. @@ -239,587 +249,633 @@ pub static INTERMEDIATE_KEY: &[u8] = b"babe1"; pub struct Config(sc_consensus_slots::SlotDuration); impl Config { - /// Either fetch the slot duration from disk or compute it from the genesis - /// state. - pub fn get_or_compute(client: &C) -> ClientResult where - C: AuxStore + ProvideRuntimeApi, C::Api: BabeApi, - { - trace!(target: "babe", "Getting slot duration"); - match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| a.configuration(b)).map(Self) { - Ok(s) => Ok(s), - Err(s) => { - warn!(target: "babe", "Failed to get slot duration"); - Err(s) - } - } - } - - /// Create the genesis epoch (epoch #0). This is defined to start at the slot of - /// the first block, so that has to be provided. - pub fn genesis_epoch(&self, slot_number: SlotNumber) -> Epoch { - Epoch { - epoch_index: 0, - start_slot: slot_number, - duration: self.epoch_length, - authorities: self.genesis_authorities.clone(), - randomness: self.randomness.clone(), - } - } + /// Either fetch the slot duration from disk or compute it from the genesis + /// state. + pub fn get_or_compute(client: &C) -> ClientResult + where + C: AuxStore + ProvideRuntimeApi, + C::Api: BabeApi, + { + trace!(target: "babe", "Getting slot duration"); + match sc_consensus_slots::SlotDuration::get_or_compute(client, |a, b| a.configuration(b)) + .map(Self) + { + Ok(s) => Ok(s), + Err(s) => { + warn!(target: "babe", "Failed to get slot duration"); + Err(s) + } + } + } + + /// Create the genesis epoch (epoch #0). This is defined to start at the slot of + /// the first block, so that has to be provided. + pub fn genesis_epoch(&self, slot_number: SlotNumber) -> Epoch { + Epoch { + epoch_index: 0, + start_slot: slot_number, + duration: self.epoch_length, + authorities: self.genesis_authorities.clone(), + randomness: self.randomness.clone(), + } + } } impl std::ops::Deref for Config { - type Target = BabeConfiguration; + type Target = BabeConfiguration; - fn deref(&self) -> &BabeConfiguration { - &*self.0 - } + fn deref(&self) -> &BabeConfiguration { + &*self.0 + } } /// Parameters for BABE. pub struct BabeParams { - /// The keystore that manages the keys of the node. - pub keystore: KeyStorePtr, + /// The keystore that manages the keys of the node. + pub keystore: KeyStorePtr, - /// The client to use - pub client: Arc, + /// The client to use + pub client: Arc, - /// The SelectChain Strategy - pub select_chain: SC, + /// The SelectChain Strategy + pub select_chain: SC, - /// The environment we are producing blocks for. - pub env: E, + /// The environment we are producing blocks for. + pub env: E, - /// The underlying block-import object to supply our produced blocks to. - /// This must be a `BabeBlockImport` or a wrapper of it, otherwise - /// critical consensus logic will be omitted. - pub block_import: I, + /// The underlying block-import object to supply our produced blocks to. + /// This must be a `BabeBlockImport` or a wrapper of it, otherwise + /// critical consensus logic will be omitted. + pub block_import: I, - /// A sync oracle - pub sync_oracle: SO, + /// A sync oracle + pub sync_oracle: SO, - /// Providers for inherent data. - pub inherent_data_providers: InherentDataProviders, + /// Providers for inherent data. + pub inherent_data_providers: InherentDataProviders, - /// Force authoring of blocks even if we are offline - pub force_authoring: bool, + /// Force authoring of blocks even if we are offline + pub force_authoring: bool, - /// The source of timestamps for relative slots - pub babe_link: BabeLink, + /// The source of timestamps for relative slots + pub babe_link: BabeLink, - /// Checks if the current native implementation can author with a runtime at a given block. - pub can_author_with: CAW, + /// Checks if the current native implementation can author with a runtime at a given block. + pub can_author_with: CAW, } /// Start the babe worker. -pub fn start_babe(BabeParams { - keystore, - client, - select_chain, - env, - block_import, - sync_oracle, - inherent_data_providers, - force_authoring, - babe_link, - can_author_with, -}: BabeParams) -> Result< - impl futures::Future, - sp_consensus::Error, -> where - B: BlockT, - C: ProvideRuntimeApi + ProvideCache + ProvideUncles + BlockchainEvents - + HeaderBackend + HeaderMetadata + Send + Sync + 'static, - C::Api: BabeApi, - SC: SelectChain + 'static, - E: Environment + Send + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send - + Sync + 'static, - Error: std::error::Error + Send + From + From + 'static, - SO: SyncOracle + Send + Sync + Clone, - CAW: CanAuthorWith + Send, +pub fn start_babe( + BabeParams { + keystore, + client, + select_chain, + env, + block_import, + sync_oracle, + inherent_data_providers, + force_authoring, + babe_link, + can_author_with, + }: BabeParams, +) -> Result, sp_consensus::Error> +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + ProvideUncles + + BlockchainEvents + + HeaderBackend + + HeaderMetadata + + Send + + Sync + + 'static, + C::Api: BabeApi, + SC: SelectChain + 'static, + E: Environment + Send + Sync, + E::Proposer: Proposer>, + I: BlockImport> + + Send + + Sync + + 'static, + Error: std::error::Error + Send + From + From + 'static, + SO: SyncOracle + Send + Sync + Clone, + CAW: CanAuthorWith + Send, { - let config = babe_link.config; - let worker = BabeWorker { - client: client.clone(), - block_import: Arc::new(Mutex::new(block_import)), - env, - sync_oracle: sync_oracle.clone(), - force_authoring, - keystore, - epoch_changes: babe_link.epoch_changes.clone(), - config: config.clone(), - }; - - register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; - sc_consensus_uncles::register_uncles_inherent_data_provider( - client.clone(), - select_chain.clone(), - &inherent_data_providers, - )?; - - info!(target: "babe", "👶 Starting BABE Authorship worker"); - Ok(sc_consensus_slots::start_slot_worker( - config.0, - select_chain, - worker, - sync_oracle, - inherent_data_providers, - babe_link.time_source, - can_author_with, - )) + let config = babe_link.config; + let worker = BabeWorker { + client: client.clone(), + block_import: Arc::new(Mutex::new(block_import)), + env, + sync_oracle: sync_oracle.clone(), + force_authoring, + keystore, + epoch_changes: babe_link.epoch_changes.clone(), + config: config.clone(), + }; + + register_babe_inherent_data_provider(&inherent_data_providers, config.slot_duration())?; + sc_consensus_uncles::register_uncles_inherent_data_provider( + client.clone(), + select_chain.clone(), + &inherent_data_providers, + )?; + + info!(target: "babe", "👶 Starting BABE Authorship worker"); + Ok(sc_consensus_slots::start_slot_worker( + config.0, + select_chain, + worker, + sync_oracle, + inherent_data_providers, + babe_link.time_source, + can_author_with, + )) } struct BabeWorker { - client: Arc, - block_import: Arc>, - env: E, - sync_oracle: SO, - force_authoring: bool, - keystore: KeyStorePtr, - epoch_changes: SharedEpochChanges, - config: Config, + client: Arc, + block_import: Arc>, + env: E, + sync_oracle: SO, + force_authoring: bool, + keystore: KeyStorePtr, + epoch_changes: SharedEpochChanges, + config: Config, } -impl sc_consensus_slots::SimpleSlotWorker for BabeWorker where - B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata, - C::Api: BabeApi, - E: Environment, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - SO: SyncOracle + Send + Clone, - Error: std::error::Error + Send + From + From + 'static, +impl sc_consensus_slots::SimpleSlotWorker for BabeWorker +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata, + C::Api: BabeApi, + E: Environment, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Clone, + Error: std::error::Error + Send + From + From + 'static, { - type EpochData = ViableEpochDescriptor, Epoch>; - type Claim = (PreDigest, AuthorityPair); - type SyncOracle = SO; - type CreateProposer = Pin> + Send + 'static - >>; - type Proposer = E::Proposer; - type BlockImport = I; - - fn logging_target(&self) -> &'static str { - "babe" - } - - fn block_import(&self) -> Arc> { - self.block_import.clone() - } - - fn epoch_data( - &self, - parent: &B::Header, - slot_number: u64, - ) -> Result { - self.epoch_changes.lock().epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent.hash(), - parent.number().clone(), - slot_number, - ) - .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? - .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) - } - - fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { - self.epoch_changes.lock() - .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot)) - .map(|epoch| epoch.as_ref().authorities.len()) - } - - fn claim_slot( - &self, - _parent_header: &B::Header, - slot_number: SlotNumber, - epoch_descriptor: &ViableEpochDescriptor, Epoch>, - ) -> Option { - debug!(target: "babe", "Attempting to claim slot {}", slot_number); - let s = authorship::claim_slot( - slot_number, - self.epoch_changes.lock().viable_epoch( - &epoch_descriptor, - |slot| self.config.genesis_epoch(slot) - )?.as_ref(), - &*self.config, - &self.keystore, - ); - - if let Some(_) = s { - debug!(target: "babe", "Claimed slot {}", slot_number); - } - - s - } - - fn pre_digest_data( - &self, - _slot_number: u64, - claim: &Self::Claim, - ) -> Vec> { - vec![ - as CompatibleDigestItem>::babe_pre_digest(claim.0.clone()), - ] - } - - fn block_import_params(&self) -> Box, - StorageChanges, - Self::Claim, - Self::EpochData, - ) -> sp_consensus::BlockImportParams + Send> { - Box::new(|header, header_hash, body, storage_changes, (_, pair), epoch_descriptor| { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let signature = pair.sign(header_hash.as_ref()); - let digest_item = as CompatibleDigestItem>::babe_seal(signature); - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(digest_item); - import_block.body = Some(body); - import_block.storage_changes = Some(storage_changes); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, - ); - - import_block - }) - } - - fn force_authoring(&self) -> bool { - self.force_authoring - } - - fn sync_oracle(&mut self) -> &mut Self::SyncOracle { - &mut self.sync_oracle - } - - fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { - Box::pin(self.env.init(block).map_err(|e| { - sp_consensus::Error::ClientImport(format!("{:?}", e)) - })) - } - - fn proposing_remaining_duration( - &self, - head: &B::Header, - slot_info: &SlotInfo - ) -> Option { - // never give more than 2^this times the lenience. - const BACKOFF_CAP: u64 = 8; - - // how many slots it takes before we double the lenience. - const BACKOFF_STEP: u64 = 2; - - let slot_remaining = self.slot_remaining_duration(slot_info); - let parent_slot = match find_pre_digest::(head) { - Err(_) => return Some(slot_remaining), - Ok(d) => d.slot_number(), - }; - - // we allow a lenience of the number of slots since the head of the - // chain was produced, minus 1 (since there is always a difference of at least 1) - // - // exponential back-off. - // in normal cases we only attempt to issue blocks up to the end of the slot. - // when the chain has been stalled for a few slots, we give more lenience. - let slot_lenience = slot_info.number.saturating_sub(parent_slot + 1); - - let slot_lenience = std::cmp::min(slot_lenience, BACKOFF_CAP); - let slot_duration = slot_info.duration << (slot_lenience / BACKOFF_STEP); - - if slot_lenience >= 1 { - debug!(target: "babe", "No block for {} slots. Applying 2^({}/{}) lenience", + type EpochData = ViableEpochDescriptor, Epoch>; + type Claim = (PreDigest, AuthorityPair); + type SyncOracle = SO; + type CreateProposer = + Pin> + Send + 'static>>; + type Proposer = E::Proposer; + type BlockImport = I; + + fn logging_target(&self) -> &'static str { + "babe" + } + + fn block_import(&self) -> Arc> { + self.block_import.clone() + } + + fn epoch_data( + &self, + parent: &B::Header, + slot_number: u64, + ) -> Result { + self.epoch_changes + .lock() + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent.hash(), + parent.number().clone(), + slot_number, + ) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + .ok_or(sp_consensus::Error::InvalidAuthoritiesSet) + } + + fn authorities_len(&self, epoch_descriptor: &Self::EpochData) -> Option { + self.epoch_changes + .lock() + .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot)) + .map(|epoch| epoch.as_ref().authorities.len()) + } + + fn claim_slot( + &self, + _parent_header: &B::Header, + slot_number: SlotNumber, + epoch_descriptor: &ViableEpochDescriptor, Epoch>, + ) -> Option { + debug!(target: "babe", "Attempting to claim slot {}", slot_number); + let s = authorship::claim_slot( + slot_number, + self.epoch_changes + .lock() + .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot))? + .as_ref(), + &*self.config, + &self.keystore, + ); + + if let Some(_) = s { + debug!(target: "babe", "Claimed slot {}", slot_number); + } + + s + } + + fn pre_digest_data( + &self, + _slot_number: u64, + claim: &Self::Claim, + ) -> Vec> { + vec![ as CompatibleDigestItem>::babe_pre_digest( + claim.0.clone(), + )] + } + + fn block_import_params( + &self, + ) -> Box< + dyn Fn( + B::Header, + &B::Hash, + Vec, + StorageChanges, + Self::Claim, + Self::EpochData, + ) -> sp_consensus::BlockImportParams + + Send, + > { + Box::new( + |header, header_hash, body, storage_changes, (_, pair), epoch_descriptor| { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let signature = pair.sign(header_hash.as_ref()); + let digest_item = as CompatibleDigestItem>::babe_seal(signature); + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(digest_item); + import_block.body = Some(body); + import_block.storage_changes = Some(storage_changes); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + + import_block + }, + ) + } + + fn force_authoring(&self) -> bool { + self.force_authoring + } + + fn sync_oracle(&mut self) -> &mut Self::SyncOracle { + &mut self.sync_oracle + } + + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer { + Box::pin( + self.env + .init(block) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))), + ) + } + + fn proposing_remaining_duration( + &self, + head: &B::Header, + slot_info: &SlotInfo, + ) -> Option { + // never give more than 2^this times the lenience. + const BACKOFF_CAP: u64 = 8; + + // how many slots it takes before we double the lenience. + const BACKOFF_STEP: u64 = 2; + + let slot_remaining = self.slot_remaining_duration(slot_info); + let parent_slot = match find_pre_digest::(head) { + Err(_) => return Some(slot_remaining), + Ok(d) => d.slot_number(), + }; + + // we allow a lenience of the number of slots since the head of the + // chain was produced, minus 1 (since there is always a difference of at least 1) + // + // exponential back-off. + // in normal cases we only attempt to issue blocks up to the end of the slot. + // when the chain has been stalled for a few slots, we give more lenience. + let slot_lenience = slot_info.number.saturating_sub(parent_slot + 1); + + let slot_lenience = std::cmp::min(slot_lenience, BACKOFF_CAP); + let slot_duration = slot_info.duration << (slot_lenience / BACKOFF_STEP); + + if slot_lenience >= 1 { + debug!(target: "babe", "No block for {} slots. Applying 2^({}/{}) lenience", slot_lenience, slot_lenience, BACKOFF_STEP); - } + } - let slot_lenience = Duration::from_secs(slot_duration); - Some(slot_lenience + slot_remaining) - } + let slot_lenience = Duration::from_secs(slot_duration); + Some(slot_lenience + slot_remaining) + } } -impl SlotWorker for BabeWorker where - B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata + Send + Sync, - C::Api: BabeApi, - E: Environment + Send + Sync, - E::Proposer: Proposer>, - I: BlockImport> + Send + Sync + 'static, - SO: SyncOracle + Send + Sync + Clone, - Error: std::error::Error + Send + From + From + 'static, +impl SlotWorker for BabeWorker +where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata + + Send + + Sync, + C::Api: BabeApi, + E: Environment + Send + Sync, + E::Proposer: Proposer>, + I: BlockImport> + Send + Sync + 'static, + SO: SyncOracle + Send + Sync + Clone, + Error: std::error::Error + Send + From + From + 'static, { - type OnSlot = Pin> + Send>>; + type OnSlot = Pin> + Send>>; - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { - >::on_slot(self, chain_head, slot_info) - } + fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot { + >::on_slot(self, chain_head, slot_info) + } } /// Extract the BABE pre digest from the given header. Pre-runtime digests are /// mandatory, the function will return `Err` if none is found. -fn find_pre_digest(header: &B::Header) -> Result> -{ - // genesis block doesn't contain a pre digest so let's generate a - // dummy one to not break any invariants in the rest of the code - if header.number().is_zero() { - return Ok(PreDigest::Secondary(SecondaryPreDigest { - slot_number: 0, - authority_index: 0, - })); - } - - let mut pre_digest: Option<_> = None; - for log in header.digest().logs() { - trace!(target: "babe", "Checking log {:?}, looking for pre runtime digest", log); - match (log.as_babe_pre_digest(), pre_digest.is_some()) { - (Some(_), true) => return Err(babe_err(Error::MultiplePreRuntimeDigests)), - (None, _) => trace!(target: "babe", "Ignoring digest not meant for us"), - (s, false) => pre_digest = s, - } - } - pre_digest.ok_or_else(|| babe_err(Error::NoPreRuntimeDigest)) +fn find_pre_digest(header: &B::Header) -> Result> { + // genesis block doesn't contain a pre digest so let's generate a + // dummy one to not break any invariants in the rest of the code + if header.number().is_zero() { + return Ok(PreDigest::Secondary(SecondaryPreDigest { + slot_number: 0, + authority_index: 0, + })); + } + + let mut pre_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "babe", "Checking log {:?}, looking for pre runtime digest", log); + match (log.as_babe_pre_digest(), pre_digest.is_some()) { + (Some(_), true) => return Err(babe_err(Error::MultiplePreRuntimeDigests)), + (None, _) => trace!(target: "babe", "Ignoring digest not meant for us"), + (s, false) => pre_digest = s, + } + } + pre_digest.ok_or_else(|| babe_err(Error::NoPreRuntimeDigest)) } /// Extract the BABE epoch change digest from the given header, if it exists. -fn find_next_epoch_digest(header: &B::Header) - -> Result, Error> - where DigestItemFor: CompatibleDigestItem, +fn find_next_epoch_digest( + header: &B::Header, +) -> Result, Error> +where + DigestItemFor: CompatibleDigestItem, { - let mut epoch_digest: Option<_> = None; - for log in header.digest().logs() { - trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); - let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); - match (log, epoch_digest.is_some()) { - (Some(ConsensusLog::NextEpochData(_)), true) => return Err(babe_err(Error::MultipleEpochChangeDigests)), - (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), - _ => trace!(target: "babe", "Ignoring digest not meant for us"), - } - } - - Ok(epoch_digest) + let mut epoch_digest: Option<_> = None; + for log in header.digest().logs() { + trace!(target: "babe", "Checking log {:?}, looking for epoch change digest.", log); + let log = log.try_to::(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)); + match (log, epoch_digest.is_some()) { + (Some(ConsensusLog::NextEpochData(_)), true) => { + return Err(babe_err(Error::MultipleEpochChangeDigests)) + } + (Some(ConsensusLog::NextEpochData(epoch)), false) => epoch_digest = Some(epoch), + _ => trace!(target: "babe", "Ignoring digest not meant for us"), + } + } + + Ok(epoch_digest) } - #[derive(Default, Clone)] struct TimeSource(Arc, Vec<(Instant, u64)>)>>); impl SlotCompatible for TimeSource { - fn extract_timestamp_and_slot( - &self, - data: &InherentData, - ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { - trace!(target: "babe", "extract timestamp"); - data.timestamp_inherent_data() - .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) - } + fn extract_timestamp_and_slot( + &self, + data: &InherentData, + ) -> Result<(TimestampInherent, u64, std::time::Duration), sp_consensus::Error> { + trace!(target: "babe", "extract timestamp"); + data.timestamp_inherent_data() + .and_then(|t| data.babe_inherent_data().map(|a| (t, a))) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + .map(|(x, y)| (x, y, self.0.lock().0.take().unwrap_or_default())) + } } /// State that must be shared between the import queue and the authoring logic. #[derive(Clone)] pub struct BabeLink { - time_source: TimeSource, - epoch_changes: SharedEpochChanges, - config: Config, + time_source: TimeSource, + epoch_changes: SharedEpochChanges, + config: Config, } impl BabeLink { - /// Get the epoch changes of this link. - pub fn epoch_changes(&self) -> &SharedEpochChanges { - &self.epoch_changes - } - - /// Get the config of this link. - pub fn config(&self) -> &Config { - &self.config - } + /// Get the epoch changes of this link. + pub fn epoch_changes(&self) -> &SharedEpochChanges { + &self.epoch_changes + } + + /// Get the config of this link. + pub fn config(&self) -> &Config { + &self.config + } } /// A verifier for Babe blocks. pub struct BabeVerifier { - client: Arc, - inherent_data_providers: sp_inherents::InherentDataProviders, - config: Config, - epoch_changes: SharedEpochChanges, - time_source: TimeSource, + client: Arc, + inherent_data_providers: sp_inherents::InherentDataProviders, + config: Config, + epoch_changes: SharedEpochChanges, + time_source: TimeSource, } impl BabeVerifier - where - Block: BlockT, - Client: HeaderBackend + HeaderMetadata + ProvideRuntimeApi, - Client::Api: BlockBuilderApi, +where + Block: BlockT, + Client: HeaderBackend + HeaderMetadata + ProvideRuntimeApi, + Client::Api: BlockBuilderApi, { - fn check_inherents( - &self, - block: Block, - block_id: BlockId, - inherent_data: InherentData, - ) -> Result<(), Error> - { - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(Error::Client)?; - - if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| { - Err(Error::CheckInherents(self.inherent_data_providers.error_to_string(&i, &e))) - }) - } else { - Ok(()) - } - } + fn check_inherents( + &self, + block: Block, + block_id: BlockId, + inherent_data: InherentData, + ) -> Result<(), Error> { + let inherent_res = self + .client + .runtime_api() + .check_inherents(&block_id, block, inherent_data) + .map_err(Error::Client)?; + + if !inherent_res.ok() { + inherent_res.into_errors().try_for_each(|(i, e)| { + Err(Error::CheckInherents( + self.inherent_data_providers.error_to_string(&i, &e), + )) + }) + } else { + Ok(()) + } + } } -impl Verifier for BabeVerifier where - Block: BlockT, - Client: HeaderMetadata + HeaderBackend + ProvideRuntimeApi - + Send + Sync + AuxStore + ProvideCache, - Client::Api: BlockBuilderApi + BabeApi, +impl Verifier for BabeVerifier +where + Block: BlockT, + Client: HeaderMetadata + + HeaderBackend + + ProvideRuntimeApi + + Send + + Sync + + AuxStore + + ProvideCache, + Client::Api: BlockBuilderApi + + BabeApi, { - fn verify( - &mut self, - origin: BlockOrigin, - header: Block::Header, - justification: Option, - mut body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - trace!( - target: "babe", - "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", - origin, - header, - justification, - body, - ); - - debug!(target: "babe", "We have {:?} logs in this header", header.digest().logs().len()); - let mut inherent_data = self - .inherent_data_providers - .create_inherent_data() - .map_err(Error::::Runtime)?; - - let (_, slot_now, _) = self.time_source.extract_timestamp_and_slot(&inherent_data) - .map_err(Error::::Extraction)?; - - let hash = header.hash(); - let parent_hash = *header.parent_hash(); - - let parent_header_metadata = self.client.header_metadata(parent_hash) - .map_err(Error::::FetchParentHeader)?; - - let pre_digest = find_pre_digest::(&header)?; - let epoch_changes = self.epoch_changes.lock(); - let epoch_descriptor = epoch_changes.epoch_descriptor_for_child_of( - descendent_query(&*self.client), - &parent_hash, - parent_header_metadata.number, - pre_digest.slot_number(), - ) - .map_err(|e| Error::::ForkTree(Box::new(e)))? - .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| self.config.genesis_epoch(slot) - ).ok_or_else(|| Error::::FetchEpoch(parent_hash))?; - - // We add one to the current slot to allow for some small drift. - // FIXME #1019 in the future, alter this queue to allow deferring of headers - let v_params = verification::VerificationParams { - header: header.clone(), - pre_digest: Some(pre_digest.clone()), - slot_now: slot_now + 1, - epoch: viable_epoch.as_ref(), - config: &self.config, - }; - - match verification::check_header::(v_params)? { - CheckedHeader::Checked(pre_header, verified_info) => { - let babe_pre_digest = verified_info.pre_digest.as_babe_pre_digest() - .expect("check_header always returns a pre-digest digest item; qed"); - - let slot_number = babe_pre_digest.slot_number(); - - let author = verified_info.author; - - // the header is valid but let's check if there was something else already - // proposed at the same slot by the given author - if let Some(equivocation_proof) = check_equivocation( - &*self.client, - slot_now, - babe_pre_digest.slot_number(), - &header, - &author, - ).map_err(|e| e.to_string())? { - info!( - "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", - author, - babe_pre_digest.slot_number(), - equivocation_proof.fst_header().hash(), - equivocation_proof.snd_header().hash(), - ); - } - - // if the body is passed through, we need to use the runtime - // to check that the internally-set timestamp in the inherents - // actually matches the slot set in the seal. - if let Some(inner_body) = body.take() { - inherent_data.babe_replace_inherent_data(slot_number); - let block = Block::new(pre_header.clone(), inner_body); - - self.check_inherents( - block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - )?; - - let (_, inner_body) = block.deconstruct(); - body = Some(inner_body); - } - - trace!(target: "babe", "Checked {:?}; importing.", pre_header); - telemetry!( + fn verify( + &mut self, + origin: BlockOrigin, + header: Block::Header, + justification: Option, + mut body: Option>, + ) -> Result< + ( + BlockImportParams, + Option)>>, + ), + String, + > { + trace!( + target: "babe", + "Verifying origin: {:?} header: {:?} justification: {:?} body: {:?}", + origin, + header, + justification, + body, + ); + + debug!(target: "babe", "We have {:?} logs in this header", header.digest().logs().len()); + let mut inherent_data = self + .inherent_data_providers + .create_inherent_data() + .map_err(Error::::Runtime)?; + + let (_, slot_now, _) = self + .time_source + .extract_timestamp_and_slot(&inherent_data) + .map_err(Error::::Extraction)?; + + let hash = header.hash(); + let parent_hash = *header.parent_hash(); + + let parent_header_metadata = self + .client + .header_metadata(parent_hash) + .map_err(Error::::FetchParentHeader)?; + + let pre_digest = find_pre_digest::(&header)?; + let epoch_changes = self.epoch_changes.lock(); + let epoch_descriptor = epoch_changes + .epoch_descriptor_for_child_of( + descendent_query(&*self.client), + &parent_hash, + parent_header_metadata.number, + pre_digest.slot_number(), + ) + .map_err(|e| Error::::ForkTree(Box::new(e)))? + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot)) + .ok_or_else(|| Error::::FetchEpoch(parent_hash))?; + + // We add one to the current slot to allow for some small drift. + // FIXME #1019 in the future, alter this queue to allow deferring of headers + let v_params = verification::VerificationParams { + header: header.clone(), + pre_digest: Some(pre_digest.clone()), + slot_now: slot_now + 1, + epoch: viable_epoch.as_ref(), + config: &self.config, + }; + + match verification::check_header::(v_params)? { + CheckedHeader::Checked(pre_header, verified_info) => { + let babe_pre_digest = verified_info + .pre_digest + .as_babe_pre_digest() + .expect("check_header always returns a pre-digest digest item; qed"); + + let slot_number = babe_pre_digest.slot_number(); + + let author = verified_info.author; + + // the header is valid but let's check if there was something else already + // proposed at the same slot by the given author + if let Some(equivocation_proof) = check_equivocation( + &*self.client, + slot_now, + babe_pre_digest.slot_number(), + &header, + &author, + ) + .map_err(|e| e.to_string())? + { + info!( + "Slot author {:?} is equivocating at slot {} with headers {:?} and {:?}", + author, + babe_pre_digest.slot_number(), + equivocation_proof.fst_header().hash(), + equivocation_proof.snd_header().hash(), + ); + } + + // if the body is passed through, we need to use the runtime + // to check that the internally-set timestamp in the inherents + // actually matches the slot set in the seal. + if let Some(inner_body) = body.take() { + inherent_data.babe_replace_inherent_data(slot_number); + let block = Block::new(pre_header.clone(), inner_body); + + self.check_inherents(block.clone(), BlockId::Hash(parent_hash), inherent_data)?; + + let (_, inner_body) = block.deconstruct(); + body = Some(inner_body); + } + + trace!(target: "babe", "Checked {:?}; importing.", pre_header); + telemetry!( CONSENSUS_TRACE; "babe.checked_and_importing"; "pre_header" => ?pre_header); - let mut import_block = BlockImportParams::new(origin, pre_header); - import_block.post_digests.push(verified_info.seal); - import_block.body = body; - import_block.justification = justification; - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, - ); - import_block.post_hash = Some(hash); - - Ok((import_block, Default::default())) - } - CheckedHeader::Deferred(a, b) => { - debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); - telemetry!(CONSENSUS_DEBUG; "babe.header_too_far_in_future"; - "hash" => ?hash, "a" => ?a, "b" => ?b - ); - Err(Error::::TooFarInFuture(hash).into()) - } - } - } + let mut import_block = BlockImportParams::new(origin, pre_header); + import_block.post_digests.push(verified_info.seal); + import_block.body = body; + import_block.justification = justification; + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + import_block.post_hash = Some(hash); + + Ok((import_block, Default::default())) + } + CheckedHeader::Deferred(a, b) => { + debug!(target: "babe", "Checking {:?} failed; {:?}, {:?}.", hash, a, b); + telemetry!(CONSENSUS_DEBUG; "babe.header_too_far_in_future"; + "hash" => ?hash, "a" => ?a, "b" => ?b + ); + Err(Error::::TooFarInFuture(hash).into()) + } + } + } } /// The BABE import queue type. @@ -827,18 +883,20 @@ pub type BabeImportQueue = BasicQueue; /// Register the babe inherent data provider, if not registered already. fn register_babe_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, - slot_duration: u64, + inherent_data_providers: &InherentDataProviders, + slot_duration: u64, ) -> Result<(), sp_consensus::Error> { - debug!(target: "babe", "Registering"); - if !inherent_data_providers.has_provider(&sp_consensus_babe::inherents::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_consensus_babe::inherents::InherentDataProvider::new(slot_duration)) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) - } + debug!(target: "babe", "Registering"); + if !inherent_data_providers.has_provider(&sp_consensus_babe::inherents::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(sp_consensus_babe::inherents::InherentDataProvider::new( + slot_duration, + )) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + } else { + Ok(()) + } } /// A block-import handler for BABE. @@ -850,304 +908,322 @@ fn register_babe_inherent_data_provider( /// /// The epoch change tree should be pruned as blocks are finalized. pub struct BabeBlockImport { - inner: I, - client: Arc, - epoch_changes: SharedEpochChanges, - config: Config, + inner: I, + client: Arc, + epoch_changes: SharedEpochChanges, + config: Config, } impl Clone for BabeBlockImport { - fn clone(&self) -> Self { - BabeBlockImport { - inner: self.inner.clone(), - client: self.client.clone(), - epoch_changes: self.epoch_changes.clone(), - config: self.config.clone(), - } - } + fn clone(&self) -> Self { + BabeBlockImport { + inner: self.inner.clone(), + client: self.client.clone(), + epoch_changes: self.epoch_changes.clone(), + config: self.config.clone(), + } + } } impl BabeBlockImport { - fn new( - client: Arc, - epoch_changes: SharedEpochChanges, - block_import: I, - config: Config, - ) -> Self { - BabeBlockImport { - client, - inner: block_import, - epoch_changes, - config, - } - } + fn new( + client: Arc, + epoch_changes: SharedEpochChanges, + block_import: I, + config: Config, + ) -> Self { + BabeBlockImport { + client, + inner: block_import, + epoch_changes, + config, + } + } } -impl BlockImport for BabeBlockImport where - Block: BlockT, - Inner: BlockImport> + Send + Sync, - Inner::Error: Into, - Client: HeaderBackend + HeaderMetadata - + AuxStore + ProvideRuntimeApi + ProvideCache + Send + Sync, - Client::Api: BabeApi + ApiExt, +impl BlockImport for BabeBlockImport +where + Block: BlockT, + Inner: BlockImport> + Send + Sync, + Inner::Error: Into, + Client: HeaderBackend + + HeaderMetadata + + AuxStore + + ProvideRuntimeApi + + ProvideCache + + Send + + Sync, + Client::Api: BabeApi + ApiExt, { - type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; - - fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - let hash = block.post_hash(); - let number = block.header.number().clone(); - - // early exit if block already in chain, otherwise the check for - // epoch changes will error when trying to re-import an epoch change - match self.client.status(BlockId::Hash(hash)) { - Ok(sp_blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), - Ok(sp_blockchain::BlockStatus::Unknown) => {}, - Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), - } - - let pre_digest = find_pre_digest::(&block.header) - .expect("valid babe headers must contain a predigest; \ - header has been already verified; qed"); - let slot_number = pre_digest.slot_number(); - - let parent_hash = *block.header.parent_hash(); - let parent_header = self.client.header(BlockId::Hash(parent_hash)) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? - .ok_or_else(|| ConsensusError::ChainLookup(babe_err( - Error::::ParentUnavailable(parent_hash, hash) - ).into()))?; - - let parent_slot = find_pre_digest::(&parent_header) - .map(|d| d.slot_number()) - .expect("parent is non-genesis; valid BABE headers contain a pre-digest; \ - header has already been verified; qed"); - - // make sure that slot number is strictly increasing - if slot_number <= parent_slot { - return Err( - ConsensusError::ClientImport(babe_err( - Error::::SlotNumberMustIncrease(parent_slot, slot_number) - ).into()) - ); - } - - let mut epoch_changes = self.epoch_changes.lock(); - - // check if there's any epoch change expected to happen at this slot. - // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true - // if this is the first block in its chain for that epoch. - // - // also provides the total weight of the chain, including the imported block. - let (epoch_descriptor, first_in_epoch, parent_weight) = { - let parent_weight = if *parent_header.number() == Zero::zero() { - 0 - } else { - aux_schema::load_block_weight(&*self.client, parent_hash) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - .ok_or_else(|| ConsensusError::ClientImport( - babe_err(Error::::ParentBlockNoAssociatedWeight(hash)).into() - ))? - }; - - let intermediate = block.take_intermediate::>( - INTERMEDIATE_KEY - )?; - - let epoch_descriptor = intermediate.epoch_descriptor; - let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); - (epoch_descriptor, first_in_epoch, parent_weight) - }; - - let total_weight = parent_weight + pre_digest.added_weight(); - - // search for this all the time so we can reject unexpected announcements. - let next_epoch_digest = find_next_epoch_digest::(&block.header) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - match (first_in_epoch, next_epoch_digest.is_some()) { - (true, true) => {}, - (false, false) => {}, - (true, false) => { - return Err( - ConsensusError::ClientImport( - babe_err(Error::::ExpectedEpochChange(hash, slot_number)).into(), - ) - ); - }, - (false, true) => { - return Err(ConsensusError::ClientImport(Error::::UnexpectedEpochChange.into())); - }, - } - - // if there's a pending epoch we'll save the previous epoch changes here - // this way we can revert it if there's any error - let mut old_epoch_changes = None; - - let info = self.client.info(); - - if let Some(next_epoch_descriptor) = next_epoch_digest { - old_epoch_changes = Some(epoch_changes.clone()); - - let viable_epoch = epoch_changes.viable_epoch( - &epoch_descriptor, - |slot| self.config.genesis_epoch(slot), - ).ok_or_else(|| { - ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) - })?; - - // restrict info logging during initial sync to avoid spam - let log_level = if block.origin == BlockOrigin::NetworkInitialSync { - log::Level::Debug - } else { - log::Level::Info - }; - - log!(target: "babe", - log_level, - "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", - viable_epoch.as_ref().epoch_index, - hash, - slot_number, - viable_epoch.as_ref().start_slot, - ); - - let next_epoch = viable_epoch.increment(next_epoch_descriptor); - - log!(target: "babe", - log_level, - "👶 Next epoch starts at slot {}", - next_epoch.as_ref().start_slot, - ); - - // prune the tree of epochs not part of the finalized chain or - // that are not live anymore, and then track the given epoch change - // in the tree. - // NOTE: it is important that these operations are done in this - // order, otherwise if pruning after import the `is_descendent_of` - // used by pruning may not know about the block that is being - // imported. - let prune_and_import = || { - prune_finalized( - self.client.clone(), - &mut epoch_changes, - )?; - - epoch_changes.import( - descendent_query(&*self.client), - hash, - number, - *block.header.parent_hash(), - next_epoch, - ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; - - Ok(()) - }; - - if let Err(e) = prune_and_import() { - debug!(target: "babe", "Failed to launch next epoch: {:?}", e); - *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); - return Err(e); - } - - crate::aux_schema::write_epoch_changes::( - &*epoch_changes, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) - ); - } - - aux_schema::write_block_weight( - hash, - &total_weight, - |values| block.auxiliary.extend( - values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ), - ); - - // The fork choice rule is that we pick the heaviest chain (i.e. - // more primary blocks), if there's a tie we go with the longest - // chain. - block.fork_choice = { - let (last_best, last_best_number) = (info.best_hash, info.best_number); - - let last_best_weight = if &last_best == block.header.parent_hash() { - // the parent=genesis case is already covered for loading parent weight, - // so we don't need to cover again here. - parent_weight - } else { - aux_schema::load_block_weight(&*self.client, last_best) - .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? - .ok_or_else( - || ConsensusError::ChainLookup(format!("No block weight for parent header.")) - )? - }; - - Some(ForkChoiceStrategy::Custom(if total_weight > last_best_weight { - true - } else if total_weight == last_best_weight { - number > last_best_number - } else { - false - })) - }; - - let import_result = self.inner.import_block(block, new_cache); - - // revert to the original epoch changes in case there's an error - // importing the block - if let Err(_) = import_result { - if let Some(old_epoch_changes) = old_epoch_changes { - *epoch_changes = old_epoch_changes; - } - } - - import_result.map_err(Into::into) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).map_err(Into::into) - } + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = block.header.number().clone(); + + // early exit if block already in chain, otherwise the check for + // epoch changes will error when trying to re-import an epoch change + match self.client.status(BlockId::Hash(hash)) { + Ok(sp_blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + Ok(sp_blockchain::BlockStatus::Unknown) => {} + Err(e) => return Err(ConsensusError::ClientImport(e.to_string())), + } + + let pre_digest = find_pre_digest::(&block.header).expect( + "valid babe headers must contain a predigest; \ + header has been already verified; qed", + ); + let slot_number = pre_digest.slot_number(); + + let parent_hash = *block.header.parent_hash(); + let parent_header = self + .client + .header(BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ChainLookup( + babe_err(Error::::ParentUnavailable(parent_hash, hash)).into(), + ) + })?; + + let parent_slot = find_pre_digest::(&parent_header) + .map(|d| d.slot_number()) + .expect( + "parent is non-genesis; valid BABE headers contain a pre-digest; \ + header has already been verified; qed", + ); + + // make sure that slot number is strictly increasing + if slot_number <= parent_slot { + return Err(ConsensusError::ClientImport( + babe_err(Error::::SlotNumberMustIncrease( + parent_slot, + slot_number, + )) + .into(), + )); + } + + let mut epoch_changes = self.epoch_changes.lock(); + + // check if there's any epoch change expected to happen at this slot. + // `epoch` is the epoch to verify the block under, and `first_in_epoch` is true + // if this is the first block in its chain for that epoch. + // + // also provides the total weight of the chain, including the imported block. + let (epoch_descriptor, first_in_epoch, parent_weight) = { + let parent_weight = if *parent_header.number() == Zero::zero() { + 0 + } else { + aux_schema::load_block_weight(&*self.client, parent_hash) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + .ok_or_else(|| { + ConsensusError::ClientImport( + babe_err(Error::::ParentBlockNoAssociatedWeight(hash)).into(), + ) + })? + }; + + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; + + let epoch_descriptor = intermediate.epoch_descriptor; + let first_in_epoch = parent_slot < epoch_descriptor.start_slot(); + (epoch_descriptor, first_in_epoch, parent_weight) + }; + + let total_weight = parent_weight + pre_digest.added_weight(); + + // search for this all the time so we can reject unexpected announcements. + let next_epoch_digest = find_next_epoch_digest::(&block.header) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + match (first_in_epoch, next_epoch_digest.is_some()) { + (true, true) => {} + (false, false) => {} + (true, false) => { + return Err(ConsensusError::ClientImport( + babe_err(Error::::ExpectedEpochChange(hash, slot_number)).into(), + )); + } + (false, true) => { + return Err(ConsensusError::ClientImport( + Error::::UnexpectedEpochChange.into(), + )); + } + } + + // if there's a pending epoch we'll save the previous epoch changes here + // this way we can revert it if there's any error + let mut old_epoch_changes = None; + + let info = self.client.info(); + + if let Some(next_epoch_descriptor) = next_epoch_digest { + old_epoch_changes = Some(epoch_changes.clone()); + + let viable_epoch = epoch_changes + .viable_epoch(&epoch_descriptor, |slot| self.config.genesis_epoch(slot)) + .ok_or_else(|| { + ConsensusError::ClientImport(Error::::FetchEpoch(parent_hash).into()) + })?; + + // restrict info logging during initial sync to avoid spam + let log_level = if block.origin == BlockOrigin::NetworkInitialSync { + log::Level::Debug + } else { + log::Level::Info + }; + + log!(target: "babe", + log_level, + "👶 New epoch {} launching at block {} (block slot {} >= start slot {}).", + viable_epoch.as_ref().epoch_index, + hash, + slot_number, + viable_epoch.as_ref().start_slot, + ); + + let next_epoch = viable_epoch.increment(next_epoch_descriptor); + + log!(target: "babe", + log_level, + "👶 Next epoch starts at slot {}", + next_epoch.as_ref().start_slot, + ); + + // prune the tree of epochs not part of the finalized chain or + // that are not live anymore, and then track the given epoch change + // in the tree. + // NOTE: it is important that these operations are done in this + // order, otherwise if pruning after import the `is_descendent_of` + // used by pruning may not know about the block that is being + // imported. + let prune_and_import = || { + prune_finalized(self.client.clone(), &mut epoch_changes)?; + + epoch_changes + .import( + descendent_query(&*self.client), + hash, + number, + *block.header.parent_hash(), + next_epoch, + ) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + + Ok(()) + }; + + if let Err(e) = prune_and_import() { + debug!(target: "babe", "Failed to launch next epoch: {:?}", e); + *epoch_changes = old_epoch_changes.expect("set `Some` above and not taken; qed"); + return Err(e); + } + + crate::aux_schema::write_epoch_changes::(&*epoch_changes, |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + } + + aux_schema::write_block_weight(hash, &total_weight, |values| { + block + .auxiliary + .extend(values.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }); + + // The fork choice rule is that we pick the heaviest chain (i.e. + // more primary blocks), if there's a tie we go with the longest + // chain. + block.fork_choice = { + let (last_best, last_best_number) = (info.best_hash, info.best_number); + + let last_best_weight = if &last_best == block.header.parent_hash() { + // the parent=genesis case is already covered for loading parent weight, + // so we don't need to cover again here. + parent_weight + } else { + aux_schema::load_block_weight(&*self.client, last_best) + .map_err(|e| ConsensusError::ChainLookup(format!("{:?}", e)))? + .ok_or_else(|| { + ConsensusError::ChainLookup(format!("No block weight for parent header.")) + })? + }; + + Some(ForkChoiceStrategy::Custom( + if total_weight > last_best_weight { + true + } else if total_weight == last_best_weight { + number > last_best_number + } else { + false + }, + )) + }; + + let import_result = self.inner.import_block(block, new_cache); + + // revert to the original epoch changes in case there's an error + // importing the block + if let Err(_) = import_result { + if let Some(old_epoch_changes) = old_epoch_changes { + *epoch_changes = old_epoch_changes; + } + } + + import_result.map_err(Into::into) + } + + fn check_block(&mut self, block: BlockCheckParams) -> Result { + self.inner.check_block(block).map_err(Into::into) + } } /// Gets the best finalized block and its slot, and prunes the given epoch tree. fn prune_finalized( - client: Arc, - epoch_changes: &mut EpochChangesFor, -) -> Result<(), ConsensusError> where - Block: BlockT, - Client: HeaderBackend + HeaderMetadata, + client: Arc, + epoch_changes: &mut EpochChangesFor, +) -> Result<(), ConsensusError> +where + Block: BlockT, + Client: HeaderBackend + HeaderMetadata, { - let info = client.info(); - - let finalized_slot = { - let finalized_header = client.header(BlockId::Hash(info.finalized_hash)) - .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))? - .expect("best finalized hash was given by client; \ - finalized headers must exist in db; qed"); - - find_pre_digest::(&finalized_header) - .expect("finalized header must be valid; \ - valid blocks have a pre-digest; qed") - .slot_number() - }; - - epoch_changes.prune_finalized( - descendent_query(&*client), - &info.finalized_hash, - info.finalized_number, - finalized_slot, - ).map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; - - Ok(()) + let info = client.info(); + + let finalized_slot = { + let finalized_header = client + .header(BlockId::Hash(info.finalized_hash)) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))? + .expect( + "best finalized hash was given by client; \ + finalized headers must exist in db; qed", + ); + + find_pre_digest::(&finalized_header) + .expect( + "finalized header must be valid; \ + valid blocks have a pre-digest; qed", + ) + .slot_number() + }; + + epoch_changes + .prune_finalized( + descendent_query(&*client), + &info.finalized_hash, + info.finalized_number, + finalized_slot, + ) + .map_err(|e| ConsensusError::ClientImport(format!("{:?}", e)))?; + + Ok(()) } /// Produce a BABE block-import object to be used later on in the construction of @@ -1156,35 +1232,28 @@ fn prune_finalized( /// Also returns a link object used to correctly instantiate the import queue /// and background worker. pub fn block_import( - config: Config, - wrapped_block_import: I, - client: Arc, -) -> ClientResult<(BabeBlockImport, BabeLink)> where - Client: AuxStore + HeaderBackend + HeaderMetadata, + config: Config, + wrapped_block_import: I, + client: Arc, +) -> ClientResult<(BabeBlockImport, BabeLink)> +where + Client: AuxStore + HeaderBackend + HeaderMetadata, { - let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; - let link = BabeLink { - epoch_changes: epoch_changes.clone(), - time_source: Default::default(), - config: config.clone(), - }; - - // NOTE: this isn't entirely necessary, but since we didn't use to prune the - // epoch tree it is useful as a migration, so that nodes prune long trees on - // startup rather than waiting until importing the next epoch change block. - prune_finalized( - client.clone(), - &mut epoch_changes.lock(), - )?; - - let import = BabeBlockImport::new( - client, - epoch_changes, - wrapped_block_import, - config, - ); - - Ok((import, link)) + let epoch_changes = aux_schema::load_epoch_changes::(&*client)?; + let link = BabeLink { + epoch_changes: epoch_changes.clone(), + time_source: Default::default(), + config: config.clone(), + }; + + // NOTE: this isn't entirely necessary, but since we didn't use to prune the + // epoch tree it is useful as a migration, so that nodes prune long trees on + // startup rather than waiting until importing the next epoch change block. + prune_finalized(client.clone(), &mut epoch_changes.lock())?; + + let import = BabeBlockImport::new(client, epoch_changes, wrapped_block_import, config); + + Ok((import, link)) } /// Start an import queue for the BABE consensus algorithm. @@ -1197,72 +1266,79 @@ pub fn block_import( /// The block import object provided must be the `BabeBlockImport` or a wrapper /// of it, otherwise crucial import logic will be omitted. pub fn import_queue( - babe_link: BabeLink, - block_import: Inner, - justification_import: Option>, - finality_proof_import: Option>, - client: Arc, - inherent_data_providers: InherentDataProviders, -) -> ClientResult>> where - Inner: BlockImport> - + Send + Sync + 'static, - Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, - Client: HeaderBackend + HeaderMetadata, - Client::Api: BlockBuilderApi + BabeApi + ApiExt, + babe_link: BabeLink, + block_import: Inner, + justification_import: Option>, + finality_proof_import: Option>, + client: Arc, + inherent_data_providers: InherentDataProviders, +) -> ClientResult>> +where + Inner: BlockImport< + Block, + Error = ConsensusError, + Transaction = sp_api::TransactionFor, + > + Send + + Sync + + 'static, + Client: ProvideRuntimeApi + ProvideCache + Send + Sync + AuxStore + 'static, + Client: HeaderBackend + HeaderMetadata, + Client::Api: + BlockBuilderApi + BabeApi + ApiExt, { - register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; - - let verifier = BabeVerifier { - client: client.clone(), - inherent_data_providers, - config: babe_link.config, - epoch_changes: babe_link.epoch_changes, - time_source: babe_link.time_source, - }; - - Ok(BasicQueue::new( - verifier, - Box::new(block_import), - justification_import, - finality_proof_import, - )) + register_babe_inherent_data_provider(&inherent_data_providers, babe_link.config.slot_duration)?; + + let verifier = BabeVerifier { + client: client.clone(), + inherent_data_providers, + config: babe_link.config, + epoch_changes: babe_link.epoch_changes, + time_source: babe_link.time_source, + }; + + Ok(BasicQueue::new( + verifier, + Box::new(block_import), + justification_import, + finality_proof_import, + )) } /// BABE test helpers. Utility methods for manually authoring blocks. #[cfg(feature = "test-helpers")] pub mod test_helpers { - use super::*; - - /// Try to claim the given slot and return a `BabePreDigest` if - /// successful. - pub fn claim_slot( - slot_number: u64, - parent: &B::Header, - client: &C, - keystore: &KeyStorePtr, - link: &BabeLink, - ) -> Option where - B: BlockT, - C: ProvideRuntimeApi + - ProvideCache + - HeaderBackend + - HeaderMetadata, - C::Api: BabeApi, - { - let epoch_changes = link.epoch_changes.lock(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(client), - &parent.hash(), - parent.number().clone(), - slot_number, - |slot| link.config.genesis_epoch(slot), - ).unwrap().unwrap(); - - authorship::claim_slot( - slot_number, - &epoch, - &link.config, - keystore, - ).map(|(digest, _)| digest) - } + use super::*; + + /// Try to claim the given slot and return a `BabePreDigest` if + /// successful. + pub fn claim_slot( + slot_number: u64, + parent: &B::Header, + client: &C, + keystore: &KeyStorePtr, + link: &BabeLink, + ) -> Option + where + B: BlockT, + C: ProvideRuntimeApi + + ProvideCache + + HeaderBackend + + HeaderMetadata, + C::Api: BabeApi, + { + let epoch_changes = link.epoch_changes.lock(); + let epoch = epoch_changes + .epoch_data_for_child_of( + descendent_query(client), + &parent.hash(), + parent.number().clone(), + slot_number, + |slot| link.config.genesis_epoch(slot), + ) + .unwrap() + .unwrap(); + + authorship::claim_slot(slot_number, &epoch, &link.config, keystore) + .map(|(digest, _)| digest) + } } diff --git a/client/consensus/babe/src/tests.rs b/client/consensus/babe/src/tests.rs index 20b924669d..b75ae4fcd2 100644 --- a/client/consensus/babe/src/tests.rs +++ b/client/consensus/babe/src/tests.rs @@ -22,773 +22,862 @@ use super::*; use authorship::claim_slot; -use sp_consensus_babe::{AuthorityPair, SlotNumber}; +use log::debug; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; -use sp_consensus::{ - NoNetwork as DummyOracle, Proposal, RecordProof, - import_queue::{BoxBlockImport, BoxJustificationImport, BoxFinalityProofImport}, -}; +use sc_client_api::{backend::TransactionFor, BlockchainEvents}; +use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; use sc_network_test::*; use sc_network_test::{Block as TestBlock, PeersClient}; -use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; -use sp_runtime::{generic::DigestItem, traits::{Block as BlockT, DigestFor}}; -use sc_client_api::{BlockchainEvents, backend::TransactionFor}; -use log::debug; -use std::{time::Duration, cell::RefCell, task::Poll}; +use sp_consensus::{ + import_queue::{BoxBlockImport, BoxFinalityProofImport, BoxJustificationImport}, + NoNetwork as DummyOracle, Proposal, RecordProof, +}; +use sp_consensus_babe::{AuthorityPair, SlotNumber}; +use sp_runtime::{ + generic::DigestItem, + traits::{Block as BlockT, DigestFor}, +}; +use std::{cell::RefCell, task::Poll, time::Duration}; type Item = DigestItem; type Error = sp_blockchain::Error; type TestClient = sc_client::Client< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, - TestBlock, - substrate_test_runtime_client::runtime::RuntimeApi, + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + TestBlock, + substrate_test_runtime_client::runtime::RuntimeApi, >; #[derive(Copy, Clone, PartialEq)] enum Stage { - PreSeal, - PostSeal, + PreSeal, + PostSeal, } type Mutator = Arc; #[derive(Clone)] struct DummyFactory { - client: Arc, - epoch_changes: SharedEpochChanges, - config: Config, - mutator: Mutator, + client: Arc, + epoch_changes: SharedEpochChanges, + config: Config, + mutator: Mutator, } struct DummyProposer { - factory: DummyFactory, - parent_hash: Hash, - parent_number: u64, - parent_slot: SlotNumber, + factory: DummyFactory, + parent_hash: Hash, + parent_number: u64, + parent_slot: SlotNumber, } impl Environment for DummyFactory { - type CreateProposer = future::Ready>; - type Proposer = DummyProposer; - type Error = Error; - - fn init(&mut self, parent_header: &::Header) - -> Self::CreateProposer - { - - let parent_slot = crate::find_pre_digest::(parent_header) - .expect("parent header has a pre-digest") - .slot_number(); - - future::ready(Ok(DummyProposer { - factory: self.clone(), - parent_hash: parent_header.hash(), - parent_number: *parent_header.number(), - parent_slot, - })) - } + type CreateProposer = future::Ready>; + type Proposer = DummyProposer; + type Error = Error; + + fn init(&mut self, parent_header: &::Header) -> Self::CreateProposer { + let parent_slot = crate::find_pre_digest::(parent_header) + .expect("parent header has a pre-digest") + .slot_number(); + + future::ready(Ok(DummyProposer { + factory: self.clone(), + parent_hash: parent_header.hash(), + parent_number: *parent_header.number(), + parent_slot, + })) + } } impl DummyProposer { - fn propose_with(&mut self, pre_digests: DigestFor) - -> future::Ready< - Result< - Proposal< - TestBlock, - sc_client_api::TransactionFor - >, - Error - > - > - { - let block_builder = self.factory.client.new_block_at( - &BlockId::Hash(self.parent_hash), - pre_digests, - false, - ).unwrap(); - - let mut block = match block_builder.build().map_err(|e| e.into()) { - Ok(b) => b.block, - Err(e) => return future::ready(Err(e)), - }; - - let this_slot = crate::find_pre_digest::(block.header()) - .expect("baked block has valid pre-digest") - .slot_number(); - - // figure out if we should add a consensus digest, since the test runtime - // doesn't. - let epoch_changes = self.factory.epoch_changes.lock(); - let epoch = epoch_changes.epoch_data_for_child_of( - descendent_query(&*self.factory.client), - &self.parent_hash, - self.parent_number, - this_slot, - |slot| self.factory.config.genesis_epoch(slot), - ) - .expect("client has data to find epoch") - .expect("can compute epoch for baked block"); - - let first_in_epoch = self.parent_slot < epoch.start_slot; - if first_in_epoch { - // push a `Consensus` digest signalling next change. - // we just reuse the same randomness and authorities as the prior - // epoch. this will break when we add light client support, since - // that will re-check the randomness logic off-chain. - let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { - authorities: epoch.authorities.clone(), - randomness: epoch.randomness.clone(), - }).encode(); - let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); - block.header.digest_mut().push(digest) - } - - // mutate the block header according to the mutator. - (self.factory.mutator)(&mut block.header, Stage::PreSeal); - - future::ready(Ok(Proposal { block, proof: None, storage_changes: Default::default() })) - } + fn propose_with( + &mut self, + pre_digests: DigestFor, + ) -> future::Ready< + Result< + Proposal< + TestBlock, + sc_client_api::TransactionFor, + >, + Error, + >, + > { + let block_builder = self + .factory + .client + .new_block_at(&BlockId::Hash(self.parent_hash), pre_digests, false) + .unwrap(); + + let mut block = match block_builder.build().map_err(|e| e.into()) { + Ok(b) => b.block, + Err(e) => return future::ready(Err(e)), + }; + + let this_slot = crate::find_pre_digest::(block.header()) + .expect("baked block has valid pre-digest") + .slot_number(); + + // figure out if we should add a consensus digest, since the test runtime + // doesn't. + let epoch_changes = self.factory.epoch_changes.lock(); + let epoch = epoch_changes + .epoch_data_for_child_of( + descendent_query(&*self.factory.client), + &self.parent_hash, + self.parent_number, + this_slot, + |slot| self.factory.config.genesis_epoch(slot), + ) + .expect("client has data to find epoch") + .expect("can compute epoch for baked block"); + + let first_in_epoch = self.parent_slot < epoch.start_slot; + if first_in_epoch { + // push a `Consensus` digest signalling next change. + // we just reuse the same randomness and authorities as the prior + // epoch. this will break when we add light client support, since + // that will re-check the randomness logic off-chain. + let digest_data = ConsensusLog::NextEpochData(NextEpochDescriptor { + authorities: epoch.authorities.clone(), + randomness: epoch.randomness.clone(), + }) + .encode(); + let digest = DigestItem::Consensus(BABE_ENGINE_ID, digest_data); + block.header.digest_mut().push(digest) + } + + // mutate the block header according to the mutator. + (self.factory.mutator)(&mut block.header, Stage::PreSeal); + + future::ready(Ok(Proposal { + block, + proof: None, + storage_changes: Default::default(), + })) + } } impl Proposer for DummyProposer { - type Error = Error; - type Transaction = sc_client_api::TransactionFor; - type Proposal = future::Ready, Error>>; - - fn propose( - &mut self, - _: InherentData, - pre_digests: DigestFor, - _: Duration, - _: RecordProof, - ) -> Self::Proposal { - self.propose_with(pre_digests) - } + type Error = Error; + type Transaction = + sc_client_api::TransactionFor; + type Proposal = future::Ready, Error>>; + + fn propose( + &mut self, + _: InherentData, + pre_digests: DigestFor, + _: Duration, + _: RecordProof, + ) -> Self::Proposal { + self.propose_with(pre_digests) + } } thread_local! { - static MUTATOR: RefCell = RefCell::new(Arc::new(|_, _|())); + static MUTATOR: RefCell = RefCell::new(Arc::new(|_, _|())); } #[derive(Clone)] struct PanickingBlockImport(B); impl> BlockImport for PanickingBlockImport { - type Error = B::Error; - type Transaction = B::Transaction; - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - Ok(self.0.import_block(block, new_cache).expect("importing block failed")) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - Ok(self.0.check_block(block).expect("checking block failed")) - } + type Error = B::Error; + type Transaction = B::Transaction; + + fn import_block( + &mut self, + block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + Ok(self + .0 + .import_block(block, new_cache) + .expect("importing block failed")) + } + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + Ok(self.0.check_block(block).expect("checking block failed")) + } } pub struct BabeTestNet { - peers: Vec>>, + peers: Vec>>, } type TestHeader = ::Header; type TestExtrinsic = ::Extrinsic; pub struct TestVerifier { - inner: BabeVerifier, - mutator: Mutator, + inner: BabeVerifier, + mutator: Mutator, } impl Verifier for TestVerifier { - /// Verify the given data and return the BlockImportParams and an optional - /// new set of validators to import. If not, err with an Error-Message - /// presented to the User in the logs. - fn verify( - &mut self, - origin: BlockOrigin, - mut header: TestHeader, - justification: Option, - body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - // apply post-sealing mutations (i.e. stripping seal, if desired). - (self.mutator)(&mut header, Stage::PostSeal); - self.inner.verify(origin, header, justification, body) - } + /// Verify the given data and return the BlockImportParams and an optional + /// new set of validators to import. If not, err with an Error-Message + /// presented to the User in the logs. + fn verify( + &mut self, + origin: BlockOrigin, + mut header: TestHeader, + justification: Option, + body: Option>, + ) -> Result< + ( + BlockImportParams, + Option)>>, + ), + String, + > { + // apply post-sealing mutations (i.e. stripping seal, if desired). + (self.mutator)(&mut header, Stage::PostSeal); + self.inner.verify(origin, header, justification, body) + } } pub struct PeerData { - link: BabeLink, - inherent_data_providers: InherentDataProviders, - block_import: Mutex< - Option>> - >, + link: BabeLink, + inherent_data_providers: InherentDataProviders, + block_import: Mutex< + Option< + BoxBlockImport< + TestBlock, + TransactionFor, + >, + >, + >, } impl TestNetFactory for BabeTestNet { - type Verifier = TestVerifier; - type PeerData = Option; - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - debug!(target: "babe", "Creating test network from config"); - BabeTestNet { - peers: Vec::new(), - } - } - - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Option, - ) - { - let client = client.as_full().expect("only full clients are tested"); - let inherent_data_providers = InherentDataProviders::new(); - - let config = Config::get_or_compute(&*client).expect("config available"); - let (block_import, link) = crate::block_import( - config, - client.clone(), - client.clone(), - ).expect("can initialize block-import"); - - let block_import = PanickingBlockImport(block_import); - - let data_block_import = Mutex::new( - Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>) - ); - ( - BlockImportAdapter::new_full(block_import), - None, - None, - None, - Some(PeerData { link, inherent_data_providers, block_import: data_block_import }), - ) - } - - fn make_verifier( - &self, - client: PeersClient, - _cfg: &ProtocolConfig, - maybe_link: &Option, - ) - -> Self::Verifier - { - let client = client.as_full().expect("only full clients are used in test"); - trace!(target: "babe", "Creating a verifier"); - - // ensure block import and verifier are linked correctly. - let data = maybe_link.as_ref().expect("babe link always provided to verifier instantiation"); - - TestVerifier { - inner: BabeVerifier { - client: client.clone(), - inherent_data_providers: data.inherent_data_providers.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - time_source: data.link.time_source.clone(), - }, - mutator: MUTATOR.with(|m| m.borrow().clone()), - } - } - - fn peer(&mut self, i: usize) -> &mut Peer { - trace!(target: "babe", "Retrieving a peer"); - &mut self.peers[i] - } - - fn peers(&self) -> &Vec> { - trace!(target: "babe", "Retrieving peers"); - &self.peers - } - - fn mut_peers>)>( - &mut self, - closure: F, - ) { - closure(&mut self.peers); - } + type Verifier = TestVerifier; + type PeerData = Option; + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + debug!(target: "babe", "Creating test network from config"); + BabeTestNet { peers: Vec::new() } + } + + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option>, + Option>, + Option, + ) { + let client = client.as_full().expect("only full clients are tested"); + let inherent_data_providers = InherentDataProviders::new(); + + let config = Config::get_or_compute(&*client).expect("config available"); + let (block_import, link) = crate::block_import(config, client.clone(), client.clone()) + .expect("can initialize block-import"); + + let block_import = PanickingBlockImport(block_import); + + let data_block_import = + Mutex::new(Some(Box::new(block_import.clone()) as BoxBlockImport<_, _>)); + ( + BlockImportAdapter::new_full(block_import), + None, + None, + None, + Some(PeerData { + link, + inherent_data_providers, + block_import: data_block_import, + }), + ) + } + + fn make_verifier( + &self, + client: PeersClient, + _cfg: &ProtocolConfig, + maybe_link: &Option, + ) -> Self::Verifier { + let client = client + .as_full() + .expect("only full clients are used in test"); + trace!(target: "babe", "Creating a verifier"); + + // ensure block import and verifier are linked correctly. + let data = maybe_link + .as_ref() + .expect("babe link always provided to verifier instantiation"); + + TestVerifier { + inner: BabeVerifier { + client: client.clone(), + inherent_data_providers: data.inherent_data_providers.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + time_source: data.link.time_source.clone(), + }, + mutator: MUTATOR.with(|m| m.borrow().clone()), + } + } + + fn peer(&mut self, i: usize) -> &mut Peer { + trace!(target: "babe", "Retrieving a peer"); + &mut self.peers[i] + } + + fn peers(&self) -> &Vec> { + trace!(target: "babe", "Retrieving peers"); + &self.peers + } + + fn mut_peers>)>(&mut self, closure: F) { + closure(&mut self.peers); + } } #[test] #[should_panic] fn rejects_empty_block() { - env_logger::try_init().unwrap(); - let mut net = BabeTestNet::new(3); - let block_builder = |builder: BlockBuilder<_, _, _>| { - builder.build().unwrap().block - }; - net.mut_peers(|peer| { - peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder); - }) + env_logger::try_init().unwrap(); + let mut net = BabeTestNet::new(3); + let block_builder = |builder: BlockBuilder<_, _, _>| builder.build().unwrap().block; + net.mut_peers(|peer| { + peer[0].generate_blocks(1, BlockOrigin::NetworkInitialSync, block_builder); + }) } -fn run_one_test( - mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static, -) { - let _ = env_logger::try_init(); - let mutator = Arc::new(mutator) as Mutator; - - MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); - let net = BabeTestNet::new(3); - - let peers = &[ - (0, "//Alice"), - (1, "//Bob"), - (2, "//Charlie"), - ]; - - let net = Arc::new(Mutex::new(net)); - let mut import_notifications = Vec::new(); - let mut babe_futures = Vec::new(); - let mut keystore_paths = Vec::new(); - - for (peer_id, seed) in peers { - let mut net = net.lock(); - let peer = net.peer(*peer_id); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); - let select_chain = peer.select_chain().expect("Full client has select_chain"); - - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); - keystore.write().insert_ephemeral_from_seed::(seed).expect("Generates authority key"); - keystore_paths.push(keystore_path); - - let mut got_own = false; - let mut got_other = false; - - let data = peer.data.as_ref().expect("babe link set up during initialization"); - - let environ = DummyFactory { - client: client.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - mutator: mutator.clone(), - }; - - import_notifications.push( - // run each future until we get one of our own blocks with number higher than 5 - // that was produced locally. - client.import_notification_stream() - .take_while(move |n| future::ready(n.header.number() < &5 || { - if n.origin == BlockOrigin::Own { - got_own = true; - } else { - got_other = true; - } - - // continue until we have at least one block of our own - // and one of another peer. - !(got_own && got_other) - })) - .for_each(|_| future::ready(()) ) - ); - - - babe_futures.push(start_babe(BabeParams { - block_import: data.block_import.lock().take().expect("import set up during init"), - select_chain, - client, - env: environ, - sync_oracle: DummyOracle, - inherent_data_providers: data.inherent_data_providers.clone(), - force_authoring: false, - babe_link: data.link.clone(), - keystore, - can_author_with: sp_consensus::AlwaysCanAuthor, - }).expect("Starts babe")); - } - - futures::executor::block_on(future::select( - futures::future::poll_fn(move |cx| { - let mut net = net.lock(); - net.poll(cx); - for p in net.peers() { - for (h, e) in p.failed_verifications() { - panic!("Verification failed for {:?}: {}", h, e); - } - } - - Poll::<()>::Pending - }), - future::select(future::join_all(import_notifications), future::join_all(babe_futures)) - )); +fn run_one_test(mutator: impl Fn(&mut TestHeader, Stage) + Send + Sync + 'static) { + let _ = env_logger::try_init(); + let mutator = Arc::new(mutator) as Mutator; + + MUTATOR.with(|m| *m.borrow_mut() = mutator.clone()); + let net = BabeTestNet::new(3); + + let peers = &[(0, "//Alice"), (1, "//Bob"), (2, "//Charlie")]; + + let net = Arc::new(Mutex::new(net)); + let mut import_notifications = Vec::new(); + let mut babe_futures = Vec::new(); + let mut keystore_paths = Vec::new(); + + for (peer_id, seed) in peers { + let mut net = net.lock(); + let peer = net.peer(*peer_id); + let client = peer + .client() + .as_full() + .expect("Only full clients are used in tests") + .clone(); + let select_chain = peer.select_chain().expect("Full client has select_chain"); + + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = + sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + keystore + .write() + .insert_ephemeral_from_seed::(seed) + .expect("Generates authority key"); + keystore_paths.push(keystore_path); + + let mut got_own = false; + let mut got_other = false; + + let data = peer + .data + .as_ref() + .expect("babe link set up during initialization"); + + let environ = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: mutator.clone(), + }; + + import_notifications.push( + // run each future until we get one of our own blocks with number higher than 5 + // that was produced locally. + client + .import_notification_stream() + .take_while(move |n| { + future::ready( + n.header.number() < &5 || { + if n.origin == BlockOrigin::Own { + got_own = true; + } else { + got_other = true; + } + + // continue until we have at least one block of our own + // and one of another peer. + !(got_own && got_other) + }, + ) + }) + .for_each(|_| future::ready(())), + ); + + babe_futures.push( + start_babe(BabeParams { + block_import: data + .block_import + .lock() + .take() + .expect("import set up during init"), + select_chain, + client, + env: environ, + sync_oracle: DummyOracle, + inherent_data_providers: data.inherent_data_providers.clone(), + force_authoring: false, + babe_link: data.link.clone(), + keystore, + can_author_with: sp_consensus::AlwaysCanAuthor, + }) + .expect("Starts babe"), + ); + } + + futures::executor::block_on(future::select( + futures::future::poll_fn(move |cx| { + let mut net = net.lock(); + net.poll(cx); + for p in net.peers() { + for (h, e) in p.failed_verifications() { + panic!("Verification failed for {:?}: {}", h, e); + } + } + + Poll::<()>::Pending + }), + future::select( + future::join_all(import_notifications), + future::join_all(babe_futures), + ), + )); } #[test] fn authoring_blocks() { - run_one_test(|_, _| ()) + run_one_test(|_, _| ()) } #[test] #[should_panic] fn rejects_missing_inherent_digest() { - run_one_test(|header: &mut TestHeader, stage| { - let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); - header.digest_mut().logs = v.into_iter() - .filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none()) - .collect() - }) + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); + header.digest_mut().logs = v + .into_iter() + .filter(|v| stage == Stage::PostSeal || v.as_babe_pre_digest().is_none()) + .collect() + }) } #[test] #[should_panic] fn rejects_missing_seals() { - run_one_test(|header: &mut TestHeader, stage| { - let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); - header.digest_mut().logs = v.into_iter() - .filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none()) - .collect() - }) + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); + header.digest_mut().logs = v + .into_iter() + .filter(|v| stage == Stage::PreSeal || v.as_babe_seal().is_none()) + .collect() + }) } #[test] #[should_panic] fn rejects_missing_consensus_digests() { - run_one_test(|header: &mut TestHeader, stage| { - let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); - header.digest_mut().logs = v.into_iter() - .filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none()) - .collect() - }); + run_one_test(|header: &mut TestHeader, stage| { + let v = std::mem::replace(&mut header.digest_mut().logs, vec![]); + header.digest_mut().logs = v + .into_iter() + .filter(|v| stage == Stage::PostSeal || v.as_next_epoch_descriptor().is_none()) + .collect() + }); } #[test] fn wrong_consensus_engine_id_rejected() { - let _ = env_logger::try_init(); - let sig = AuthorityPair::generate().0.sign(b""); - let bad_seal: Item = DigestItem::Seal([0; 4], sig.to_vec()); - assert!(bad_seal.as_babe_pre_digest().is_none()); - assert!(bad_seal.as_babe_seal().is_none()) + let _ = env_logger::try_init(); + let sig = AuthorityPair::generate().0.sign(b""); + let bad_seal: Item = DigestItem::Seal([0; 4], sig.to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); + assert!(bad_seal.as_babe_seal().is_none()) } #[test] fn malformed_pre_digest_rejected() { - let _ = env_logger::try_init(); - let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, [0; 64].to_vec()); - assert!(bad_seal.as_babe_pre_digest().is_none()); + let _ = env_logger::try_init(); + let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, [0; 64].to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); } #[test] fn sig_is_not_pre_digest() { - let _ = env_logger::try_init(); - let sig = AuthorityPair::generate().0.sign(b""); - let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, sig.to_vec()); - assert!(bad_seal.as_babe_pre_digest().is_none()); - assert!(bad_seal.as_babe_seal().is_some()) + let _ = env_logger::try_init(); + let sig = AuthorityPair::generate().0.sign(b""); + let bad_seal: Item = DigestItem::Seal(BABE_ENGINE_ID, sig.to_vec()); + assert!(bad_seal.as_babe_pre_digest().is_none()); + assert!(bad_seal.as_babe_seal().is_some()) } #[test] fn can_author_block() { - let _ = env_logger::try_init(); - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); - let pair = keystore.write().insert_ephemeral_from_seed::("//Alice") - .expect("Generates authority pair"); - - let mut i = 0; - let epoch = Epoch { - start_slot: 0, - authorities: vec![(pair.public(), 1)], - randomness: [0; 32], - epoch_index: 1, - duration: 100, - }; - - let mut config = crate::BabeConfiguration { - slot_duration: 1000, - epoch_length: 100, - c: (3, 10), - genesis_authorities: Vec::new(), - randomness: [0; 32], - secondary_slots: true, - }; - - // with secondary slots enabled it should never be empty - match claim_slot(i, &epoch, &config, &keystore) { - None => i += 1, - Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), - } - - // otherwise with only vrf-based primary slots we might need to try a couple - // of times. - config.secondary_slots = false; - loop { - match claim_slot(i, &epoch, &config, &keystore) { - None => i += 1, - Some(s) => { - debug!(target: "babe", "Authored block {:?}", s.0); - break; - } - } - } + let _ = env_logger::try_init(); + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + let pair = keystore + .write() + .insert_ephemeral_from_seed::("//Alice") + .expect("Generates authority pair"); + + let mut i = 0; + let epoch = Epoch { + start_slot: 0, + authorities: vec![(pair.public(), 1)], + randomness: [0; 32], + epoch_index: 1, + duration: 100, + }; + + let mut config = crate::BabeConfiguration { + slot_duration: 1000, + epoch_length: 100, + c: (3, 10), + genesis_authorities: Vec::new(), + randomness: [0; 32], + secondary_slots: true, + }; + + // with secondary slots enabled it should never be empty + match claim_slot(i, &epoch, &config, &keystore) { + None => i += 1, + Some(s) => debug!(target: "babe", "Authored block {:?}", s.0), + } + + // otherwise with only vrf-based primary slots we might need to try a couple + // of times. + config.secondary_slots = false; + loop { + match claim_slot(i, &epoch, &config, &keystore) { + None => i += 1, + Some(s) => { + debug!(target: "babe", "Authored block {:?}", s.0); + break; + } + } + } } // Propose and import a new BABE block on top of the given parent. fn propose_and_import_block( - parent: &TestHeader, - slot_number: Option, - proposer_factory: &mut DummyFactory, - block_import: &mut BoxBlockImport, + parent: &TestHeader, + slot_number: Option, + proposer_factory: &mut DummyFactory, + block_import: &mut BoxBlockImport, ) -> sp_core::H256 { - let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap(); - - let slot_number = slot_number.unwrap_or_else(|| { - let parent_pre_digest = find_pre_digest::(parent).unwrap(); - parent_pre_digest.slot_number() + 1 - }); - - let pre_digest = sp_runtime::generic::Digest { - logs: vec![ - Item::babe_pre_digest( - PreDigest::Secondary(SecondaryPreDigest { - authority_index: 0, - slot_number, - }), - ), - ], - }; - - let parent_hash = parent.hash(); - - let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)).unwrap().block; - - let epoch_descriptor = proposer_factory.epoch_changes.lock().epoch_descriptor_for_child_of( - descendent_query(&*proposer_factory.client), - &parent_hash, - *parent.number(), - slot_number, - ).unwrap().unwrap(); - - let seal = { - // sign the pre-sealed hash of the block and then - // add it to a digest item. - let pair = AuthorityPair::from_seed(&[1; 32]); - let pre_hash = block.header.hash(); - let signature = pair.sign(pre_hash.as_ref()); - Item::babe_seal(signature) - }; - - let post_hash = { - block.header.digest_mut().push(seal.clone()); - let h = block.header.hash(); - block.header.digest_mut().pop(); - h - }; - - let mut import = BlockImportParams::new(BlockOrigin::Own, block.header); - import.post_digests.push(seal); - import.body = Some(block.extrinsics); - import.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, - ); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - let import_result = block_import.import_block(import, Default::default()).unwrap(); - - match import_result { - ImportResult::Imported(_) => {}, - _ => panic!("expected block to be imported"), - } - - post_hash + let mut proposer = futures::executor::block_on(proposer_factory.init(parent)).unwrap(); + + let slot_number = slot_number.unwrap_or_else(|| { + let parent_pre_digest = find_pre_digest::(parent).unwrap(); + parent_pre_digest.slot_number() + 1 + }); + + let pre_digest = sp_runtime::generic::Digest { + logs: vec![Item::babe_pre_digest(PreDigest::Secondary( + SecondaryPreDigest { + authority_index: 0, + slot_number, + }, + ))], + }; + + let parent_hash = parent.hash(); + + let mut block = futures::executor::block_on(proposer.propose_with(pre_digest)) + .unwrap() + .block; + + let epoch_descriptor = proposer_factory + .epoch_changes + .lock() + .epoch_descriptor_for_child_of( + descendent_query(&*proposer_factory.client), + &parent_hash, + *parent.number(), + slot_number, + ) + .unwrap() + .unwrap(); + + let seal = { + // sign the pre-sealed hash of the block and then + // add it to a digest item. + let pair = AuthorityPair::from_seed(&[1; 32]); + let pre_hash = block.header.hash(); + let signature = pair.sign(pre_hash.as_ref()); + Item::babe_seal(signature) + }; + + let post_hash = { + block.header.digest_mut().push(seal.clone()); + let h = block.header.hash(); + block.header.digest_mut().pop(); + h + }; + + let mut import = BlockImportParams::new(BlockOrigin::Own, block.header); + import.post_digests.push(seal); + import.body = Some(block.extrinsics); + import.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(BabeIntermediate:: { epoch_descriptor }) as Box, + ); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + let import_result = block_import + .import_block(import, Default::default()) + .unwrap(); + + match import_result { + ImportResult::Imported(_) => {} + _ => panic!("expected block to be imported"), + } + + post_hash } #[test] fn importing_block_one_sets_genesis_epoch() { - let mut net = BabeTestNet::new(1); - - let peer = net.peer(0); - let data = peer.data.as_ref().expect("babe link set up during initialization"); - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); - - let mut proposer_factory = DummyFactory { - client: client.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - mutator: Arc::new(|_, _| ()), - }; - - let mut block_import = data.block_import.lock().take().expect("import set up during init"); - - let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); - - let block_hash = propose_and_import_block( - &genesis_header, - Some(999), - &mut proposer_factory, - &mut block_import, - ); - - let genesis_epoch = data.link.config.genesis_epoch(999); - - let epoch_changes = data.link.epoch_changes.lock(); - let epoch_for_second_block = epoch_changes.epoch_data_for_child_of( - descendent_query(&*client), - &block_hash, - 1, - 1000, - |slot| data.link.config.genesis_epoch(slot), - ).unwrap().unwrap(); - - assert_eq!(epoch_for_second_block, genesis_epoch); + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer + .data + .as_ref() + .expect("babe link set up during initialization"); + let client = peer + .client() + .as_full() + .expect("Only full clients are used in tests") + .clone(); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let mut block_import = data + .block_import + .lock() + .take() + .expect("import set up during init"); + + let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + let block_hash = propose_and_import_block( + &genesis_header, + Some(999), + &mut proposer_factory, + &mut block_import, + ); + + let genesis_epoch = data.link.config.genesis_epoch(999); + + let epoch_changes = data.link.epoch_changes.lock(); + let epoch_for_second_block = epoch_changes + .epoch_data_for_child_of(descendent_query(&*client), &block_hash, 1, 1000, |slot| { + data.link.config.genesis_epoch(slot) + }) + .unwrap() + .unwrap(); + + assert_eq!(epoch_for_second_block, genesis_epoch); } #[test] fn importing_epoch_change_block_prunes_tree() { - use sc_client_api::Finalizer; - - let mut net = BabeTestNet::new(1); - - let peer = net.peer(0); - let data = peer.data.as_ref().expect("babe link set up during initialization"); - - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); - let mut block_import = data.block_import.lock().take().expect("import set up during init"); - let epoch_changes = data.link.epoch_changes.clone(); - - let mut proposer_factory = DummyFactory { - client: client.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - mutator: Arc::new(|_, _| ()), - }; - - // This is just boilerplate code for proposing and importing n valid BABE - // blocks that are built on top of the given parent. The proposer takes care - // of producing epoch change digests according to the epoch duration (which - // is set to 6 slots in the test runtime). - let mut propose_and_import_blocks = |parent_id, n| { - let mut hashes = Vec::new(); - let mut parent_header = client.header(&parent_id).unwrap().unwrap(); - - for _ in 0..n { - let block_hash = propose_and_import_block( - &parent_header, - None, - &mut proposer_factory, - &mut block_import, - ); - hashes.push(block_hash); - parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap(); - } - - hashes - }; - - // This is the block tree that we're going to use in this test. Each node - // represents an epoch change block, the epoch duration is 6 slots. - // - // *---- F (#7) - // / *------ G (#19) - H (#25) - // / / - // A (#1) - B (#7) - C (#13) - D (#19) - E (#25) - // \ - // *------ I (#25) - - // Create and import the canon chain and keep track of fork blocks (A, C, D) - // from the diagram above. - let canon_hashes = propose_and_import_blocks(BlockId::Number(0), 30); - - // Create the forks - let fork_1 = propose_and_import_blocks(BlockId::Hash(canon_hashes[0]), 10); - let fork_2 = propose_and_import_blocks(BlockId::Hash(canon_hashes[12]), 15); - let fork_3 = propose_and_import_blocks(BlockId::Hash(canon_hashes[18]), 10); - - // We should be tracking a total of 9 epochs in the fork tree - assert_eq!( - epoch_changes.lock().tree().iter().count(), - 9, - ); - - // And only one root - assert_eq!( - epoch_changes.lock().tree().roots().count(), - 1, - ); - - // We finalize block #13 from the canon chain, so on the next epoch - // change the tree should be pruned, to not contain F (#7). - client.finalize_block(BlockId::Hash(canon_hashes[12]), None, false).unwrap(); - propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 7); - - // at this point no hashes from the first fork must exist on the tree - assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_1.contains(h)), - ); - - // but the epoch changes from the other forks must still exist - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)) - ); - - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), - ); - - // finalizing block #25 from the canon chain should prune out the second fork - client.finalize_block(BlockId::Hash(canon_hashes[24]), None, false).unwrap(); - propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 8); - - // at this point no hashes from the second fork must exist on the tree - assert!( - !epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_2.contains(h)), - ); - - // while epoch changes from the last fork should still exist - assert!( - epoch_changes.lock().tree().iter().map(|(h, _, _)| h).any(|h| fork_3.contains(h)), - ); + use sc_client_api::Finalizer; + + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer + .data + .as_ref() + .expect("babe link set up during initialization"); + + let client = peer + .client() + .as_full() + .expect("Only full clients are used in tests") + .clone(); + let mut block_import = data + .block_import + .lock() + .take() + .expect("import set up during init"); + let epoch_changes = data.link.epoch_changes.clone(); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + // This is just boilerplate code for proposing and importing n valid BABE + // blocks that are built on top of the given parent. The proposer takes care + // of producing epoch change digests according to the epoch duration (which + // is set to 6 slots in the test runtime). + let mut propose_and_import_blocks = |parent_id, n| { + let mut hashes = Vec::new(); + let mut parent_header = client.header(&parent_id).unwrap().unwrap(); + + for _ in 0..n { + let block_hash = propose_and_import_block( + &parent_header, + None, + &mut proposer_factory, + &mut block_import, + ); + hashes.push(block_hash); + parent_header = client.header(&BlockId::Hash(block_hash)).unwrap().unwrap(); + } + + hashes + }; + + // This is the block tree that we're going to use in this test. Each node + // represents an epoch change block, the epoch duration is 6 slots. + // + // *---- F (#7) + // / *------ G (#19) - H (#25) + // / / + // A (#1) - B (#7) - C (#13) - D (#19) - E (#25) + // \ + // *------ I (#25) + + // Create and import the canon chain and keep track of fork blocks (A, C, D) + // from the diagram above. + let canon_hashes = propose_and_import_blocks(BlockId::Number(0), 30); + + // Create the forks + let fork_1 = propose_and_import_blocks(BlockId::Hash(canon_hashes[0]), 10); + let fork_2 = propose_and_import_blocks(BlockId::Hash(canon_hashes[12]), 15); + let fork_3 = propose_and_import_blocks(BlockId::Hash(canon_hashes[18]), 10); + + // We should be tracking a total of 9 epochs in the fork tree + assert_eq!(epoch_changes.lock().tree().iter().count(), 9,); + + // And only one root + assert_eq!(epoch_changes.lock().tree().roots().count(), 1,); + + // We finalize block #13 from the canon chain, so on the next epoch + // change the tree should be pruned, to not contain F (#7). + client + .finalize_block(BlockId::Hash(canon_hashes[12]), None, false) + .unwrap(); + propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 7); + + // at this point no hashes from the first fork must exist on the tree + assert!(!epoch_changes + .lock() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_1.contains(h)),); + + // but the epoch changes from the other forks must still exist + assert!(epoch_changes + .lock() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_2.contains(h))); + + assert!(epoch_changes + .lock() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_3.contains(h)),); + + // finalizing block #25 from the canon chain should prune out the second fork + client + .finalize_block(BlockId::Hash(canon_hashes[24]), None, false) + .unwrap(); + propose_and_import_blocks(BlockId::Hash(client.chain_info().best_hash), 8); + + // at this point no hashes from the second fork must exist on the tree + assert!(!epoch_changes + .lock() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_2.contains(h)),); + + // while epoch changes from the last fork should still exist + assert!(epoch_changes + .lock() + .tree() + .iter() + .map(|(h, _, _)| h) + .any(|h| fork_3.contains(h)),); } #[test] #[should_panic] fn verify_slots_are_strictly_increasing() { - let mut net = BabeTestNet::new(1); - - let peer = net.peer(0); - let data = peer.data.as_ref().expect("babe link set up during initialization"); - - let client = peer.client().as_full().expect("Only full clients are used in tests").clone(); - let mut block_import = data.block_import.lock().take().expect("import set up during init"); - - let mut proposer_factory = DummyFactory { - client: client.clone(), - config: data.link.config.clone(), - epoch_changes: data.link.epoch_changes.clone(), - mutator: Arc::new(|_, _| ()), - }; - - let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); - - // we should have no issue importing this block - let b1 = propose_and_import_block( - &genesis_header, - Some(999), - &mut proposer_factory, - &mut block_import, - ); - - let b1 = client.header(&BlockId::Hash(b1)).unwrap().unwrap(); - - // we should fail to import this block since the slot number didn't increase. - // we will panic due to the `PanickingBlockImport` defined above. - propose_and_import_block( - &b1, - Some(999), - &mut proposer_factory, - &mut block_import, - ); + let mut net = BabeTestNet::new(1); + + let peer = net.peer(0); + let data = peer + .data + .as_ref() + .expect("babe link set up during initialization"); + + let client = peer + .client() + .as_full() + .expect("Only full clients are used in tests") + .clone(); + let mut block_import = data + .block_import + .lock() + .take() + .expect("import set up during init"); + + let mut proposer_factory = DummyFactory { + client: client.clone(), + config: data.link.config.clone(), + epoch_changes: data.link.epoch_changes.clone(), + mutator: Arc::new(|_, _| ()), + }; + + let genesis_header = client.header(&BlockId::Number(0)).unwrap().unwrap(); + + // we should have no issue importing this block + let b1 = propose_and_import_block( + &genesis_header, + Some(999), + &mut proposer_factory, + &mut block_import, + ); + + let b1 = client.header(&BlockId::Hash(b1)).unwrap().unwrap(); + + // we should fail to import this block since the slot number didn't increase. + // we will panic due to the `PanickingBlockImport` defined above. + propose_and_import_block(&b1, Some(999), &mut proposer_factory, &mut block_import); } diff --git a/client/consensus/babe/src/verification.rs b/client/consensus/babe/src/verification.rs index 2fd37280b3..f4f83bb598 100644 --- a/client/consensus/babe/src/verification.rs +++ b/client/consensus/babe/src/verification.rs @@ -15,31 +15,33 @@ // along with Substrate. If not, see . //! Verification for BABE headers. -use sp_runtime::{traits::Header, traits::DigestItemFor}; -use sp_core::{Pair, Public}; -use sp_consensus_babe::{AuthoritySignature, SlotNumber, AuthorityPair, AuthorityId}; -use sp_consensus_babe::digests::{ - PreDigest, PrimaryPreDigest, SecondaryPreDigest, CompatibleDigestItem +use super::authorship::{ + calculate_primary_threshold, check_primary_threshold, make_transcript, secondary_slot_author, }; -use sc_consensus_slots::CheckedHeader; +use super::{babe_err, find_pre_digest, BlockT, Epoch, Error}; use log::{debug, trace}; -use super::{find_pre_digest, babe_err, Epoch, BlockT, Error}; -use super::authorship::{make_transcript, calculate_primary_threshold, check_primary_threshold, secondary_slot_author}; +use sc_consensus_slots::CheckedHeader; +use sp_consensus_babe::digests::{ + CompatibleDigestItem, PreDigest, PrimaryPreDigest, SecondaryPreDigest, +}; +use sp_consensus_babe::{AuthorityId, AuthorityPair, AuthoritySignature, SlotNumber}; +use sp_core::{Pair, Public}; +use sp_runtime::{traits::DigestItemFor, traits::Header}; /// BABE verification parameters pub(super) struct VerificationParams<'a, B: 'a + BlockT> { - /// the header being verified. - pub(super) header: B::Header, - /// the pre-digest of the header being verified. this is optional - if prior - /// verification code had to read it, it can be included here to avoid duplicate - /// work. - pub(super) pre_digest: Option, - /// the slot number of the current time. - pub(super) slot_now: SlotNumber, - /// epoch descriptor of the epoch this block _should_ be under, if it's valid. - pub(super) epoch: &'a Epoch, - /// genesis config of this BABE chain. - pub(super) config: &'a super::Config, + /// the header being verified. + pub(super) header: B::Header, + /// the pre-digest of the header being verified. this is optional - if prior + /// verification code had to read it, it can be included here to avoid duplicate + /// work. + pub(super) pre_digest: Option, + /// the slot number of the current time. + pub(super) slot_now: SlotNumber, + /// epoch descriptor of the epoch this block _should_ be under, if it's valid. + pub(super) epoch: &'a Epoch, + /// genesis config of this BABE chain. + pub(super) config: &'a super::Config, } /// Check a header has been signed by the right key. If the slot is too far in @@ -54,84 +56,76 @@ pub(super) struct VerificationParams<'a, B: 'a + BlockT> { /// The given header can either be from a primary or secondary slot assignment, /// with each having different validation logic. pub(super) fn check_header( - params: VerificationParams, -) -> Result>, Error> where - DigestItemFor: CompatibleDigestItem, + params: VerificationParams, +) -> Result>, Error> +where + DigestItemFor: CompatibleDigestItem, { - let VerificationParams { - mut header, - pre_digest, - slot_now, - epoch, - config, - } = params; - - let authorities = &epoch.authorities; - let pre_digest = pre_digest.map(Ok).unwrap_or_else(|| find_pre_digest::(&header))?; - - trace!(target: "babe", "Checking header"); - let seal = match header.digest_mut().pop() { - Some(x) => x, - None => return Err(babe_err(Error::HeaderUnsealed(header.hash()))), - }; - - let sig = seal.as_babe_seal().ok_or_else(|| { - babe_err(Error::HeaderBadSeal(header.hash())) - })?; - - // the pre-hash of the header doesn't include the seal - // and that's what we sign - let pre_hash = header.hash(); - - if pre_digest.slot_number() > slot_now { - header.digest_mut().push(seal); - return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); - } - - let author = match authorities.get(pre_digest.authority_index() as usize) { - Some(author) => author.0.clone(), - None => return Err(babe_err(Error::SlotAuthorNotFound)), - }; - - match &pre_digest { - PreDigest::Primary(primary) => { - debug!(target: "babe", "Verifying Primary block"); - - check_primary_header::( - pre_hash, - primary, - sig, - &epoch, - config.c, - )?; - }, - PreDigest::Secondary(secondary) if config.secondary_slots => { - debug!(target: "babe", "Verifying Secondary block"); - - check_secondary_header::( - pre_hash, - secondary, - sig, - &epoch, - )?; - }, - _ => { - return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)); - } - } - - let info = VerifiedHeaderInfo { - pre_digest: CompatibleDigestItem::babe_pre_digest(pre_digest), - seal, - author, - }; - Ok(CheckedHeader::Checked(header, info)) + let VerificationParams { + mut header, + pre_digest, + slot_now, + epoch, + config, + } = params; + + let authorities = &epoch.authorities; + let pre_digest = pre_digest + .map(Ok) + .unwrap_or_else(|| find_pre_digest::(&header))?; + + trace!(target: "babe", "Checking header"); + let seal = match header.digest_mut().pop() { + Some(x) => x, + None => return Err(babe_err(Error::HeaderUnsealed(header.hash()))), + }; + + let sig = seal + .as_babe_seal() + .ok_or_else(|| babe_err(Error::HeaderBadSeal(header.hash())))?; + + // the pre-hash of the header doesn't include the seal + // and that's what we sign + let pre_hash = header.hash(); + + if pre_digest.slot_number() > slot_now { + header.digest_mut().push(seal); + return Ok(CheckedHeader::Deferred(header, pre_digest.slot_number())); + } + + let author = match authorities.get(pre_digest.authority_index() as usize) { + Some(author) => author.0.clone(), + None => return Err(babe_err(Error::SlotAuthorNotFound)), + }; + + match &pre_digest { + PreDigest::Primary(primary) => { + debug!(target: "babe", "Verifying Primary block"); + + check_primary_header::(pre_hash, primary, sig, &epoch, config.c)?; + } + PreDigest::Secondary(secondary) if config.secondary_slots => { + debug!(target: "babe", "Verifying Secondary block"); + + check_secondary_header::(pre_hash, secondary, sig, &epoch)?; + } + _ => { + return Err(babe_err(Error::SecondarySlotAssignmentsDisabled)); + } + } + + let info = VerifiedHeaderInfo { + pre_digest: CompatibleDigestItem::babe_pre_digest(pre_digest), + seal, + author, + }; + Ok(CheckedHeader::Checked(header, info)) } pub(super) struct VerifiedHeaderInfo { - pub(super) pre_digest: DigestItemFor, - pub(super) seal: DigestItemFor, - pub(super) author: AuthorityId, + pub(super) pre_digest: DigestItemFor, + pub(super) seal: DigestItemFor, + pub(super) author: AuthorityId, } /// Check a primary slot proposal header. We validate that the given header is @@ -139,43 +133,40 @@ pub(super) struct VerifiedHeaderInfo { /// is valid. Additionally, the weight of this block must increase compared to /// its parent since it is a primary block. fn check_primary_header( - pre_hash: B::Hash, - pre_digest: &PrimaryPreDigest, - signature: AuthoritySignature, - epoch: &Epoch, - c: (u64, u64), + pre_hash: B::Hash, + pre_digest: &PrimaryPreDigest, + signature: AuthoritySignature, + epoch: &Epoch, + c: (u64, u64), ) -> Result<(), Error> { - let author = &epoch.authorities[pre_digest.authority_index as usize].0; - - if AuthorityPair::verify(&signature, pre_hash, &author) { - let (inout, _) = { - let transcript = make_transcript( - &epoch.randomness, - pre_digest.slot_number, - epoch.epoch_index, - ); - - schnorrkel::PublicKey::from_bytes(author.as_slice()).and_then(|p| { - p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) - }).map_err(|s| { - babe_err(Error::VRFVerificationFailed(s)) - })? - }; - - let threshold = calculate_primary_threshold( - c, - &epoch.authorities, - pre_digest.authority_index as usize, - ); - - if !check_primary_threshold(&inout, threshold) { - return Err(babe_err(Error::VRFVerificationOfBlockFailed(author.clone(), threshold))); - } - - Ok(()) - } else { - Err(babe_err(Error::BadSignature(pre_hash))) - } + let author = &epoch.authorities[pre_digest.authority_index as usize].0; + + if AuthorityPair::verify(&signature, pre_hash, &author) { + let (inout, _) = { + let transcript = + make_transcript(&epoch.randomness, pre_digest.slot_number, epoch.epoch_index); + + schnorrkel::PublicKey::from_bytes(author.as_slice()) + .and_then(|p| { + p.vrf_verify(transcript, &pre_digest.vrf_output, &pre_digest.vrf_proof) + }) + .map_err(|s| babe_err(Error::VRFVerificationFailed(s)))? + }; + + let threshold = + calculate_primary_threshold(c, &epoch.authorities, pre_digest.authority_index as usize); + + if !check_primary_threshold(&inout, threshold) { + return Err(babe_err(Error::VRFVerificationOfBlockFailed( + author.clone(), + threshold, + ))); + } + + Ok(()) + } else { + Err(babe_err(Error::BadSignature(pre_hash))) + } } /// Check a secondary slot proposal header. We validate that the given header is @@ -183,28 +174,29 @@ fn check_primary_header( /// of computing. Additionally, the weight of this block must stay the same /// compared to its parent since it is a secondary block. fn check_secondary_header( - pre_hash: B::Hash, - pre_digest: &SecondaryPreDigest, - signature: AuthoritySignature, - epoch: &Epoch, + pre_hash: B::Hash, + pre_digest: &SecondaryPreDigest, + signature: AuthoritySignature, + epoch: &Epoch, ) -> Result<(), Error> { - // check the signature is valid under the expected authority and - // chain state. - let expected_author = secondary_slot_author( - pre_digest.slot_number, - &epoch.authorities, - epoch.randomness, - ).ok_or_else(|| Error::NoSecondaryAuthorExpected)?; - - let author = &epoch.authorities[pre_digest.authority_index as usize].0; - - if expected_author != author { - return Err(Error::InvalidAuthor(expected_author.clone(), author.clone())); - } - - if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { - Ok(()) - } else { - Err(Error::BadSignature(pre_hash)) - } + // check the signature is valid under the expected authority and + // chain state. + let expected_author = + secondary_slot_author(pre_digest.slot_number, &epoch.authorities, epoch.randomness) + .ok_or_else(|| Error::NoSecondaryAuthorExpected)?; + + let author = &epoch.authorities[pre_digest.authority_index as usize].0; + + if expected_author != author { + return Err(Error::InvalidAuthor( + expected_author.clone(), + author.clone(), + )); + } + + if AuthorityPair::verify(&signature, pre_hash.as_ref(), author) { + Ok(()) + } else { + Err(Error::BadSignature(pre_hash)) + } } diff --git a/client/consensus/epochs/src/lib.rs b/client/consensus/epochs/src/lib.rs index 001c172b34..d8e90ebbbf 100644 --- a/client/consensus/epochs/src/lib.rs +++ b/client/consensus/epochs/src/lib.rs @@ -18,35 +18,39 @@ pub mod migration; -use std::{sync::Arc, ops::Add, collections::BTreeMap, borrow::{Borrow, BorrowMut}}; -use parking_lot::Mutex; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use fork_tree::ForkTree; +use parking_lot::Mutex; use sc_client_api::utils::is_descendent_of; -use sp_blockchain::{HeaderMetadata, HeaderBackend, Error as ClientError}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::{Block as BlockT, NumberFor, One, Zero}; +use std::{ + borrow::{Borrow, BorrowMut}, + collections::BTreeMap, + ops::Add, + sync::Arc, +}; /// A builder for `is_descendent_of` functions. pub trait IsDescendentOfBuilder { - /// The error returned by the function. - type Error: std::error::Error; - /// A function that can tell you if the second parameter is a descendent of - /// the first. - type IsDescendentOf: Fn(&Hash, &Hash) -> Result; - - /// Build an `is_descendent_of` function. - /// - /// The `current` parameter can be `Some` with the details a fresh block whose - /// details aren't yet stored, but its parent is. - /// - /// The format of `current` when `Some` is `(current, current_parent)`. - fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) - -> Self::IsDescendentOf; + /// The error returned by the function. + type Error: std::error::Error; + /// A function that can tell you if the second parameter is a descendent of + /// the first. + type IsDescendentOf: Fn(&Hash, &Hash) -> Result; + + /// Build an `is_descendent_of` function. + /// + /// The `current` parameter can be `Some` with the details a fresh block whose + /// details aren't yet stored, but its parent is. + /// + /// The format of `current` when `Some` is `(current, current_parent)`. + fn build_is_descendent_of(&self, current: Option<(Hash, Hash)>) -> Self::IsDescendentOf; } /// Produce a descendent query object given the client. pub fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder<&H, Block> { - HeaderBackendDescendentBuilder(client, std::marker::PhantomData) + HeaderBackendDescendentBuilder(client, std::marker::PhantomData) } /// Wrapper to get around unconstrained type errors when implementing @@ -54,18 +58,20 @@ pub fn descendent_query(client: &H) -> HeaderBackendDescendentBuilder< pub struct HeaderBackendDescendentBuilder(H, std::marker::PhantomData); impl<'a, H, Block> IsDescendentOfBuilder - for HeaderBackendDescendentBuilder<&'a H, Block> where - H: HeaderBackend + HeaderMetadata, - Block: BlockT, + for HeaderBackendDescendentBuilder<&'a H, Block> +where + H: HeaderBackend + HeaderMetadata, + Block: BlockT, { - type Error = ClientError; - type IsDescendentOf = Box Result + 'a>; - - fn build_is_descendent_of(&self, current: Option<(Block::Hash, Block::Hash)>) - -> Self::IsDescendentOf - { - Box::new(is_descendent_of(self.0, current)) - } + type Error = ClientError; + type IsDescendentOf = Box Result + 'a>; + + fn build_is_descendent_of( + &self, + current: Option<(Block::Hash, Block::Hash)>, + ) -> Self::IsDescendentOf { + Box::new(is_descendent_of(self.0, current)) + } } /// Epoch data, distinguish whether it is genesis or not. @@ -73,68 +79,68 @@ impl<'a, H, Block> IsDescendentOfBuilder /// Once an epoch is created, it must have a known `start_slot` and `end_slot`, which cannot be /// changed. Consensus engine may modify any other data in the epoch, if needed. pub trait Epoch { - /// Descriptor for the next epoch. - type NextEpochDescriptor; - /// Type of the slot number. - type SlotNumber: Ord + Copy; - - /// The starting slot of the epoch. - fn start_slot(&self) -> Self::SlotNumber; - /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, - /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. - fn end_slot(&self) -> Self::SlotNumber; - /// Increment the epoch data, using the next epoch descriptor. - fn increment(&self, descriptor: Self::NextEpochDescriptor) -> Self; + /// Descriptor for the next epoch. + type NextEpochDescriptor; + /// Type of the slot number. + type SlotNumber: Ord + Copy; + + /// The starting slot of the epoch. + fn start_slot(&self) -> Self::SlotNumber; + /// Produce the "end slot" of the epoch. This is NOT inclusive to the epoch, + /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. + fn end_slot(&self) -> Self::SlotNumber; + /// Increment the epoch data, using the next epoch descriptor. + fn increment(&self, descriptor: Self::NextEpochDescriptor) -> Self; } impl<'a, E: Epoch> From<&'a E> for EpochHeader { - fn from(epoch: &'a E) -> EpochHeader { - Self { - start_slot: epoch.start_slot(), - end_slot: epoch.end_slot(), - } - } + fn from(epoch: &'a E) -> EpochHeader { + Self { + start_slot: epoch.start_slot(), + end_slot: epoch.end_slot(), + } + } } /// Header of epoch data, consisting of start and end slot. #[derive(Eq, PartialEq, Encode, Decode, Debug)] pub struct EpochHeader { - /// The starting slot of the epoch. - pub start_slot: E::SlotNumber, - /// The end slot of the epoch. This is NOT inclusive to the epoch, - /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. - pub end_slot: E::SlotNumber, + /// The starting slot of the epoch. + pub start_slot: E::SlotNumber, + /// The end slot of the epoch. This is NOT inclusive to the epoch, + /// i.e. the slots covered by the epoch are `self.start_slot() .. self.end_slot()`. + pub end_slot: E::SlotNumber, } impl Clone for EpochHeader { - fn clone(&self) -> Self { - Self { - start_slot: self.start_slot, - end_slot: self.end_slot, - } - } + fn clone(&self) -> Self { + Self { + start_slot: self.start_slot, + end_slot: self.end_slot, + } + } } /// Position of the epoch identifier. #[derive(PartialEq, Eq, PartialOrd, Ord, Copy, Clone, Debug)] pub enum EpochIdentifierPosition { - /// The identifier points to a genesis epoch `epoch_0`. - Genesis0, - /// The identifier points to a genesis epoch `epoch_1`. - Genesis1, - /// The identifier points to a regular epoch. - Regular, + /// The identifier points to a genesis epoch `epoch_0`. + Genesis0, + /// The identifier points to a genesis epoch `epoch_1`. + Genesis1, + /// The identifier points to a regular epoch. + Regular, } /// Epoch identifier. #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Debug)] pub struct EpochIdentifier { - /// Location of the epoch. - pub position: EpochIdentifierPosition, - /// Hash of the block when the epoch is signaled. - pub hash: Hash, - /// Number of the block when the epoch is signaled. - pub number: Number, + /// Location of the epoch. + pub position: EpochIdentifierPosition, + /// Hash of the block when the epoch is signaled. + pub hash: Hash, + /// Number of the block when the epoch is signaled. + pub number: Number, } /// The viable epoch under which a block can be verified. @@ -142,128 +148,128 @@ pub struct EpochIdentifier { /// If this is the first non-genesis block in the chain, then it will /// hold an `UnimportedGenesis` epoch. pub enum ViableEpoch { - /// Unimported genesis viable epoch data. - UnimportedGenesis(E), - /// Regular viable epoch data. - Signaled(ERef), + /// Unimported genesis viable epoch data. + UnimportedGenesis(E), + /// Regular viable epoch data. + Signaled(ERef), } -impl AsRef for ViableEpoch where - ERef: Borrow, +impl AsRef for ViableEpoch +where + ERef: Borrow, { - fn as_ref(&self) -> &E { - match *self { - ViableEpoch::UnimportedGenesis(ref e) => e, - ViableEpoch::Signaled(ref e) => e.borrow(), - } - } + fn as_ref(&self) -> &E { + match *self { + ViableEpoch::UnimportedGenesis(ref e) => e, + ViableEpoch::Signaled(ref e) => e.borrow(), + } + } } -impl AsMut for ViableEpoch where - ERef: BorrowMut, +impl AsMut for ViableEpoch +where + ERef: BorrowMut, { - fn as_mut(&mut self) -> &mut E { - match *self { - ViableEpoch::UnimportedGenesis(ref mut e) => e, - ViableEpoch::Signaled(ref mut e) => e.borrow_mut(), - } - } + fn as_mut(&mut self) -> &mut E { + match *self { + ViableEpoch::UnimportedGenesis(ref mut e) => e, + ViableEpoch::Signaled(ref mut e) => e.borrow_mut(), + } + } } -impl ViableEpoch where - E: Epoch + Clone, - ERef: Borrow, +impl ViableEpoch +where + E: Epoch + Clone, + ERef: Borrow, { - /// Extract the underlying epoch, disregarding the fact that a genesis - /// epoch may be unimported. - pub fn into_cloned_inner(self) -> E { - match self { - ViableEpoch::UnimportedGenesis(e) => e, - ViableEpoch::Signaled(e) => e.borrow().clone(), - } - } - - /// Get cloned value for the viable epoch. - pub fn into_cloned(self) -> ViableEpoch { - match self { - ViableEpoch::UnimportedGenesis(e) => - ViableEpoch::UnimportedGenesis(e), - ViableEpoch::Signaled(e) => ViableEpoch::Signaled(e.borrow().clone()), - } - } - - /// Increment the epoch, yielding an `IncrementedEpoch` to be imported - /// into the fork-tree. - pub fn increment( - &self, - next_descriptor: E::NextEpochDescriptor - ) -> IncrementedEpoch { - let next = self.as_ref().increment(next_descriptor); - let to_persist = match *self { - ViableEpoch::UnimportedGenesis(ref epoch_0) => - PersistedEpoch::Genesis(epoch_0.clone(), next), - ViableEpoch::Signaled(_) => PersistedEpoch::Regular(next), - }; - - IncrementedEpoch(to_persist) - } + /// Extract the underlying epoch, disregarding the fact that a genesis + /// epoch may be unimported. + pub fn into_cloned_inner(self) -> E { + match self { + ViableEpoch::UnimportedGenesis(e) => e, + ViableEpoch::Signaled(e) => e.borrow().clone(), + } + } + + /// Get cloned value for the viable epoch. + pub fn into_cloned(self) -> ViableEpoch { + match self { + ViableEpoch::UnimportedGenesis(e) => ViableEpoch::UnimportedGenesis(e), + ViableEpoch::Signaled(e) => ViableEpoch::Signaled(e.borrow().clone()), + } + } + + /// Increment the epoch, yielding an `IncrementedEpoch` to be imported + /// into the fork-tree. + pub fn increment(&self, next_descriptor: E::NextEpochDescriptor) -> IncrementedEpoch { + let next = self.as_ref().increment(next_descriptor); + let to_persist = match *self { + ViableEpoch::UnimportedGenesis(ref epoch_0) => { + PersistedEpoch::Genesis(epoch_0.clone(), next) + } + ViableEpoch::Signaled(_) => PersistedEpoch::Regular(next), + }; + + IncrementedEpoch(to_persist) + } } /// Descriptor for a viable epoch. #[derive(PartialEq, Eq, Clone, Debug)] pub enum ViableEpochDescriptor { - /// The epoch is an unimported genesis, with given start slot number. - UnimportedGenesis(E::SlotNumber), - /// The epoch is signaled and has been imported, with given identifier and header. - Signaled(EpochIdentifier, EpochHeader) + /// The epoch is an unimported genesis, with given start slot number. + UnimportedGenesis(E::SlotNumber), + /// The epoch is signaled and has been imported, with given identifier and header. + Signaled(EpochIdentifier, EpochHeader), } impl ViableEpochDescriptor { - /// Start slot of the descriptor. - pub fn start_slot(&self) -> E::SlotNumber { - match self { - Self::UnimportedGenesis(start_slot) => *start_slot, - Self::Signaled(_, header) => header.start_slot, - } - } + /// Start slot of the descriptor. + pub fn start_slot(&self) -> E::SlotNumber { + match self { + Self::UnimportedGenesis(start_slot) => *start_slot, + Self::Signaled(_, header) => header.start_slot, + } + } } /// Persisted epoch stored in EpochChanges. #[derive(Clone, Encode, Decode, Debug)] pub enum PersistedEpoch { - /// Genesis persisted epoch data. epoch_0, epoch_1. - Genesis(E, E), - /// Regular persisted epoch data. epoch_n. - Regular(E), + /// Genesis persisted epoch data. epoch_0, epoch_1. + Genesis(E, E), + /// Regular persisted epoch data. epoch_n. + Regular(E), } impl<'a, E: Epoch> From<&'a PersistedEpoch> for PersistedEpochHeader { - fn from(epoch: &'a PersistedEpoch) -> Self { - match epoch { - PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => - PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()), - PersistedEpoch::Regular(ref epoch_n) => - PersistedEpochHeader::Regular(epoch_n.into()), - } - } + fn from(epoch: &'a PersistedEpoch) -> Self { + match epoch { + PersistedEpoch::Genesis(ref epoch_0, ref epoch_1) => { + PersistedEpochHeader::Genesis(epoch_0.into(), epoch_1.into()) + } + PersistedEpoch::Regular(ref epoch_n) => PersistedEpochHeader::Regular(epoch_n.into()), + } + } } /// Persisted epoch header stored in ForkTree. #[derive(Encode, Decode, PartialEq, Eq)] pub enum PersistedEpochHeader { - /// Genesis persisted epoch header. epoch_0, epoch_1. - Genesis(EpochHeader, EpochHeader), - /// Regular persisted epoch header. epoch_n. - Regular(EpochHeader), + /// Genesis persisted epoch header. epoch_0, epoch_1. + Genesis(EpochHeader, EpochHeader), + /// Regular persisted epoch header. epoch_n. + Regular(EpochHeader), } impl Clone for PersistedEpochHeader { - fn clone(&self) -> Self { - match self { - Self::Genesis(epoch_0, epoch_1) => Self::Genesis(epoch_0.clone(), epoch_1.clone()), - Self::Regular(epoch_n) => Self::Regular(epoch_n.clone()), - } - } + fn clone(&self) -> Self { + match self { + Self::Genesis(epoch_0, epoch_1) => Self::Genesis(epoch_0.clone(), epoch_1.clone()), + Self::Regular(epoch_n) => Self::Regular(epoch_n.clone()), + } + } } /// A fresh, incremented epoch to import into the underlying fork-tree. @@ -273,12 +279,12 @@ impl Clone for PersistedEpochHeader { pub struct IncrementedEpoch(PersistedEpoch); impl AsRef for IncrementedEpoch { - fn as_ref(&self) -> &E { - match self.0 { - PersistedEpoch::Genesis(_, ref epoch_1) => epoch_1, - PersistedEpoch::Regular(ref epoch_n) => epoch_n, - } - } + fn as_ref(&self) -> &E { + match self.0 { + PersistedEpoch::Genesis(_, ref epoch_1) => epoch_1, + PersistedEpoch::Regular(ref epoch_n) => epoch_n, + } + } } /// Tree of all epoch changes across all *seen* forks. Data stored in tree is @@ -298,624 +304,613 @@ impl AsRef for IncrementedEpoch { /// Further epochs (epoch_2, ..., epoch_n) each get their own entry. #[derive(Clone, Encode, Decode)] pub struct EpochChanges { - inner: ForkTree>, - epochs: BTreeMap<(Hash, Number), PersistedEpoch>, + inner: ForkTree>, + epochs: BTreeMap<(Hash, Number), PersistedEpoch>, } // create a fake header hash which hasn't been included in the chain. fn fake_head_hash + AsMut<[u8]> + Clone>(parent_hash: &H) -> H { - let mut h = parent_hash.clone(); - // dirty trick: flip the first bit of the parent hash to create a hash - // which has not been in the chain before (assuming a strong hash function). - h.as_mut()[0] ^= 0b10000000; - h + let mut h = parent_hash.clone(); + // dirty trick: flip the first bit of the parent hash to create a hash + // which has not been in the chain before (assuming a strong hash function). + h.as_mut()[0] ^= 0b10000000; + h } -impl Default for EpochChanges where - Hash: PartialEq + Ord, - Number: Ord, +impl Default for EpochChanges +where + Hash: PartialEq + Ord, + Number: Ord, { - fn default() -> Self { - EpochChanges { inner: ForkTree::new(), epochs: BTreeMap::new() } - } + fn default() -> Self { + EpochChanges { + inner: ForkTree::new(), + epochs: BTreeMap::new(), + } + } } -impl EpochChanges where - Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy, - Number: Ord + One + Zero + Add + Copy, +impl EpochChanges +where + Hash: PartialEq + Ord + AsRef<[u8]> + AsMut<[u8]> + Copy, + Number: Ord + One + Zero + Add + Copy, { - /// Create a new epoch change. - pub fn new() -> Self { - Self::default() - } - - /// Rebalances the tree of epoch changes so that it is sorted by length of - /// fork (longest fork first). - pub fn rebalance(&mut self) { - self.inner.rebalance() - } - - /// Prune out finalized epochs, except for the ancestor of the finalized - /// block. The given slot should be the slot number at which the finalized - /// block was authored. - pub fn prune_finalized>( - &mut self, - descendent_of_builder: D, - hash: &Hash, - number: Number, - slot: E::SlotNumber, - ) -> Result<(), fork_tree::Error> { - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(None); - - let predicate = |epoch: &PersistedEpochHeader| match *epoch { - PersistedEpochHeader::Genesis(_, ref epoch_1) => - slot >= epoch_1.end_slot, - PersistedEpochHeader::Regular(ref epoch_n) => - slot >= epoch_n.end_slot, - }; - - // prune any epochs which could not be _live_ as of the children of the - // finalized block, i.e. re-root the fork tree to the oldest ancestor of - // (hash, number) where epoch.end_slot() >= finalized_slot - let removed = self.inner.prune( - hash, - &number, - &is_descendent_of, - &predicate, - )?; - - for (hash, number, _) in removed { - self.epochs.remove(&(hash, number)); - } - - Ok(()) - } - - /// Get a reference to an epoch with given identifier. - pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { - self.epochs.get(&(id.hash, id.number)) - .and_then(|v| { - match v { - PersistedEpoch::Genesis(ref epoch_0, _) - if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), - PersistedEpoch::Genesis(_, ref epoch_1) - if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), - PersistedEpoch::Regular(ref epoch_n) - if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), - _ => None, - } - }) - } - - /// Get a reference to a viable epoch with given descriptor. - pub fn viable_epoch( - &self, - descriptor: &ViableEpochDescriptor, - make_genesis: G, - ) -> Option> where - G: FnOnce(E::SlotNumber) -> E - { - match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch(&identifier).map(ViableEpoch::Signaled) - }, - } - } - - /// Get a mutable reference to an epoch with given identifier. - pub fn epoch_mut(&mut self, id: &EpochIdentifier) -> Option<&mut E> { - self.epochs.get_mut(&(id.hash, id.number)) - .and_then(|v| { - match v { - PersistedEpoch::Genesis(ref mut epoch_0, _) - if id.position == EpochIdentifierPosition::Genesis0 => Some(epoch_0), - PersistedEpoch::Genesis(_, ref mut epoch_1) - if id.position == EpochIdentifierPosition::Genesis1 => Some(epoch_1), - PersistedEpoch::Regular(ref mut epoch_n) - if id.position == EpochIdentifierPosition::Regular => Some(epoch_n), - _ => None, - } - }) - } - - /// Get a mutable reference to a viable epoch with given descriptor. - pub fn viable_epoch_mut( - &mut self, - descriptor: &ViableEpochDescriptor, - make_genesis: G, - ) -> Option> where - G: FnOnce(E::SlotNumber) -> E - { - match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch_mut(&identifier).map(ViableEpoch::Signaled) - }, - } - } - - /// Get the epoch data from an epoch descriptor. - /// - /// Note that this function ignores the fact that an genesis epoch might need to be imported. - /// Mostly useful for testing. - pub fn epoch_data( - &self, - descriptor: &ViableEpochDescriptor, - make_genesis: G - ) -> Option where - G: FnOnce(E::SlotNumber) -> E, - E: Clone, - { - match descriptor { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - Some(make_genesis(*slot_number)) - }, - ViableEpochDescriptor::Signaled(identifier, _) => { - self.epoch(&identifier).cloned() - }, - } - } - - /// Finds the epoch data for a child of the given block. Similar to - /// `epoch_descriptor_for_child_of` but returns the full data. - /// - /// Note that this function ignores the fact that an genesis epoch might need to be imported. - /// Mostly useful for testing. - pub fn epoch_data_for_child_of, G>( - &self, - descendent_of_builder: D, - parent_hash: &Hash, - parent_number: Number, - slot_number: E::SlotNumber, - make_genesis: G, - ) -> Result, fork_tree::Error> where - G: FnOnce(E::SlotNumber) -> E, - E: Clone, - { - let descriptor = self.epoch_descriptor_for_child_of( - descendent_of_builder, - parent_hash, - parent_number, - slot_number - )?; - - Ok(descriptor.and_then(|des| self.epoch_data(&des, make_genesis))) - } - - /// Finds the epoch for a child of the given block, assuming the given slot number. - /// - /// If the returned epoch is an `UnimportedGenesis` epoch, it should be imported into the - /// tree. - pub fn epoch_descriptor_for_child_of>( - &self, - descendent_of_builder: D, - parent_hash: &Hash, - parent_number: Number, - slot_number: E::SlotNumber, - ) -> Result>, fork_tree::Error> { - // find_node_where will give you the node in the fork-tree which is an ancestor - // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, - // then it won't be returned. we need to create a new fake chain head hash which - // "descends" from our parent-hash. - let fake_head_hash = fake_head_hash(parent_hash); - - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(Some((fake_head_hash, *parent_hash))); - - if parent_number == Zero::zero() { - // need to insert the genesis epoch. - return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot_number))) - } - - // We want to find the deepest node in the tree which is an ancestor - // of our block and where the start slot of the epoch was before the - // slot of our block. The genesis special-case doesn't need to look - // at epoch_1 -- all we're doing here is figuring out which node - // we need. - let predicate = |epoch: &PersistedEpochHeader| match *epoch { - PersistedEpochHeader::Genesis(ref epoch_0, _) => - epoch_0.start_slot <= slot_number, - PersistedEpochHeader::Regular(ref epoch_n) => - epoch_n.start_slot <= slot_number, - }; - - self.inner.find_node_where( - &fake_head_hash, - &(parent_number + One::one()), - &is_descendent_of, - &predicate, - ) - .map(|n| { - n.map(|node| (match node.data { - // Ok, we found our node. - // and here we figure out which of the internal epochs - // of a genesis node to use based on their start slot. - PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => - if epoch_1.start_slot <= slot_number { - (EpochIdentifierPosition::Genesis1, epoch_1.clone()) - } else { - (EpochIdentifierPosition::Genesis0, epoch_0.clone()) - }, - PersistedEpochHeader::Regular(ref epoch_n) => - (EpochIdentifierPosition::Regular, epoch_n.clone()), - }, node)).map(|((position, header), node)| { - ViableEpochDescriptor::Signaled(EpochIdentifier { - position, - hash: node.hash, - number: node.number - }, header) - }) - }) - } - - /// Import a new epoch-change, signalled at the given block. - /// - /// This assumes that the given block is prospective (i.e. has not been - /// imported yet), but its parent has. This is why the parent hash needs - /// to be provided. - pub fn import>( - &mut self, - descendent_of_builder: D, - hash: Hash, - number: Number, - parent_hash: Hash, - epoch: IncrementedEpoch, - ) -> Result<(), fork_tree::Error> { - let is_descendent_of = descendent_of_builder - .build_is_descendent_of(Some((hash, parent_hash))); - let header = PersistedEpochHeader::::from(&epoch.0); - - let res = self.inner.import( - hash, - number, - header, - &is_descendent_of, - ); - - match res { - Ok(_) | Err(fork_tree::Error::Duplicate) => { - self.epochs.insert((hash, number), epoch.0); - Ok(()) - }, - Err(e) => Err(e), - } - } - - /// Return the inner fork tree. - pub fn tree(&self) -> &ForkTree> { - &self.inner - } + /// Create a new epoch change. + pub fn new() -> Self { + Self::default() + } + + /// Rebalances the tree of epoch changes so that it is sorted by length of + /// fork (longest fork first). + pub fn rebalance(&mut self) { + self.inner.rebalance() + } + + /// Prune out finalized epochs, except for the ancestor of the finalized + /// block. The given slot should be the slot number at which the finalized + /// block was authored. + pub fn prune_finalized>( + &mut self, + descendent_of_builder: D, + hash: &Hash, + number: Number, + slot: E::SlotNumber, + ) -> Result<(), fork_tree::Error> { + let is_descendent_of = descendent_of_builder.build_is_descendent_of(None); + + let predicate = |epoch: &PersistedEpochHeader| match *epoch { + PersistedEpochHeader::Genesis(_, ref epoch_1) => slot >= epoch_1.end_slot, + PersistedEpochHeader::Regular(ref epoch_n) => slot >= epoch_n.end_slot, + }; + + // prune any epochs which could not be _live_ as of the children of the + // finalized block, i.e. re-root the fork tree to the oldest ancestor of + // (hash, number) where epoch.end_slot() >= finalized_slot + let removed = self + .inner + .prune(hash, &number, &is_descendent_of, &predicate)?; + + for (hash, number, _) in removed { + self.epochs.remove(&(hash, number)); + } + + Ok(()) + } + + /// Get a reference to an epoch with given identifier. + pub fn epoch(&self, id: &EpochIdentifier) -> Option<&E> { + self.epochs + .get(&(id.hash, id.number)) + .and_then(|v| match v { + PersistedEpoch::Genesis(ref epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => + { + Some(epoch_0) + } + PersistedEpoch::Genesis(_, ref epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => + { + Some(epoch_1) + } + PersistedEpoch::Regular(ref epoch_n) + if id.position == EpochIdentifierPosition::Regular => + { + Some(epoch_n) + } + _ => None, + }) + } + + /// Get a reference to a viable epoch with given descriptor. + pub fn viable_epoch( + &self, + descriptor: &ViableEpochDescriptor, + make_genesis: G, + ) -> Option> + where + G: FnOnce(E::SlotNumber) -> E, + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) + } + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch(&identifier).map(ViableEpoch::Signaled) + } + } + } + + /// Get a mutable reference to an epoch with given identifier. + pub fn epoch_mut(&mut self, id: &EpochIdentifier) -> Option<&mut E> { + self.epochs + .get_mut(&(id.hash, id.number)) + .and_then(|v| match v { + PersistedEpoch::Genesis(ref mut epoch_0, _) + if id.position == EpochIdentifierPosition::Genesis0 => + { + Some(epoch_0) + } + PersistedEpoch::Genesis(_, ref mut epoch_1) + if id.position == EpochIdentifierPosition::Genesis1 => + { + Some(epoch_1) + } + PersistedEpoch::Regular(ref mut epoch_n) + if id.position == EpochIdentifierPosition::Regular => + { + Some(epoch_n) + } + _ => None, + }) + } + + /// Get a mutable reference to a viable epoch with given descriptor. + pub fn viable_epoch_mut( + &mut self, + descriptor: &ViableEpochDescriptor, + make_genesis: G, + ) -> Option> + where + G: FnOnce(E::SlotNumber) -> E, + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + Some(ViableEpoch::UnimportedGenesis(make_genesis(*slot_number))) + } + ViableEpochDescriptor::Signaled(identifier, _) => { + self.epoch_mut(&identifier).map(ViableEpoch::Signaled) + } + } + } + + /// Get the epoch data from an epoch descriptor. + /// + /// Note that this function ignores the fact that an genesis epoch might need to be imported. + /// Mostly useful for testing. + pub fn epoch_data( + &self, + descriptor: &ViableEpochDescriptor, + make_genesis: G, + ) -> Option + where + G: FnOnce(E::SlotNumber) -> E, + E: Clone, + { + match descriptor { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + Some(make_genesis(*slot_number)) + } + ViableEpochDescriptor::Signaled(identifier, _) => self.epoch(&identifier).cloned(), + } + } + + /// Finds the epoch data for a child of the given block. Similar to + /// `epoch_descriptor_for_child_of` but returns the full data. + /// + /// Note that this function ignores the fact that an genesis epoch might need to be imported. + /// Mostly useful for testing. + pub fn epoch_data_for_child_of, G>( + &self, + descendent_of_builder: D, + parent_hash: &Hash, + parent_number: Number, + slot_number: E::SlotNumber, + make_genesis: G, + ) -> Result, fork_tree::Error> + where + G: FnOnce(E::SlotNumber) -> E, + E: Clone, + { + let descriptor = self.epoch_descriptor_for_child_of( + descendent_of_builder, + parent_hash, + parent_number, + slot_number, + )?; + + Ok(descriptor.and_then(|des| self.epoch_data(&des, make_genesis))) + } + + /// Finds the epoch for a child of the given block, assuming the given slot number. + /// + /// If the returned epoch is an `UnimportedGenesis` epoch, it should be imported into the + /// tree. + pub fn epoch_descriptor_for_child_of>( + &self, + descendent_of_builder: D, + parent_hash: &Hash, + parent_number: Number, + slot_number: E::SlotNumber, + ) -> Result>, fork_tree::Error> { + // find_node_where will give you the node in the fork-tree which is an ancestor + // of the `parent_hash` by default. if the last epoch was signalled at the parent_hash, + // then it won't be returned. we need to create a new fake chain head hash which + // "descends" from our parent-hash. + let fake_head_hash = fake_head_hash(parent_hash); + + let is_descendent_of = + descendent_of_builder.build_is_descendent_of(Some((fake_head_hash, *parent_hash))); + + if parent_number == Zero::zero() { + // need to insert the genesis epoch. + return Ok(Some(ViableEpochDescriptor::UnimportedGenesis(slot_number))); + } + + // We want to find the deepest node in the tree which is an ancestor + // of our block and where the start slot of the epoch was before the + // slot of our block. The genesis special-case doesn't need to look + // at epoch_1 -- all we're doing here is figuring out which node + // we need. + let predicate = |epoch: &PersistedEpochHeader| match *epoch { + PersistedEpochHeader::Genesis(ref epoch_0, _) => epoch_0.start_slot <= slot_number, + PersistedEpochHeader::Regular(ref epoch_n) => epoch_n.start_slot <= slot_number, + }; + + self.inner + .find_node_where( + &fake_head_hash, + &(parent_number + One::one()), + &is_descendent_of, + &predicate, + ) + .map(|n| { + n.map(|node| { + ( + match node.data { + // Ok, we found our node. + // and here we figure out which of the internal epochs + // of a genesis node to use based on their start slot. + PersistedEpochHeader::Genesis(ref epoch_0, ref epoch_1) => { + if epoch_1.start_slot <= slot_number { + (EpochIdentifierPosition::Genesis1, epoch_1.clone()) + } else { + (EpochIdentifierPosition::Genesis0, epoch_0.clone()) + } + } + PersistedEpochHeader::Regular(ref epoch_n) => { + (EpochIdentifierPosition::Regular, epoch_n.clone()) + } + }, + node, + ) + }) + .map(|((position, header), node)| { + ViableEpochDescriptor::Signaled( + EpochIdentifier { + position, + hash: node.hash, + number: node.number, + }, + header, + ) + }) + }) + } + + /// Import a new epoch-change, signalled at the given block. + /// + /// This assumes that the given block is prospective (i.e. has not been + /// imported yet), but its parent has. This is why the parent hash needs + /// to be provided. + pub fn import>( + &mut self, + descendent_of_builder: D, + hash: Hash, + number: Number, + parent_hash: Hash, + epoch: IncrementedEpoch, + ) -> Result<(), fork_tree::Error> { + let is_descendent_of = + descendent_of_builder.build_is_descendent_of(Some((hash, parent_hash))); + let header = PersistedEpochHeader::::from(&epoch.0); + + let res = self.inner.import(hash, number, header, &is_descendent_of); + + match res { + Ok(_) | Err(fork_tree::Error::Duplicate) => { + self.epochs.insert((hash, number), epoch.0); + Ok(()) + } + Err(e) => Err(e), + } + } + + /// Return the inner fork tree. + pub fn tree(&self) -> &ForkTree> { + &self.inner + } } /// Type alias to produce the epoch-changes tree from a block type. -pub type EpochChangesFor = EpochChanges<::Hash, NumberFor, Epoch>; +pub type EpochChangesFor = + EpochChanges<::Hash, NumberFor, Epoch>; /// A shared epoch changes tree. pub type SharedEpochChanges = Arc>>; #[cfg(test)] mod tests { - use super::*; - use super::Epoch as EpochT; - - #[derive(Debug, PartialEq)] - pub struct TestError; - - impl std::fmt::Display for TestError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "TestError") - } - } - - impl std::error::Error for TestError {} - - impl<'a, F: 'a , H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F - where F: Fn(&H, &H) -> Result - { - type Error = TestError; - type IsDescendentOf = Box Result + 'a>; - - fn build_is_descendent_of(&self, current: Option<(H, H)>) - -> Self::IsDescendentOf - { - let f = *self; - Box::new(move |base, head| { - let mut head = head; - - if let Some((ref c_head, ref c_parent)) = current { - if head == c_head { - if base == c_parent { - return Ok(true); - } else { - head = c_parent; - } - } - } - - f(base, head) - }) - } - } - - type Hash = [u8; 1]; - type SlotNumber = u64; - - #[derive(Debug, Clone, Eq, PartialEq)] - struct Epoch { - start_slot: SlotNumber, - duration: SlotNumber, - } - - impl EpochT for Epoch { - type NextEpochDescriptor = (); - type SlotNumber = SlotNumber; - - fn increment(&self, _: ()) -> Self { - Epoch { - start_slot: self.start_slot + self.duration, - duration: self.duration, - } - } - - fn end_slot(&self) -> SlotNumber { - self.start_slot + self.duration - } - - fn start_slot(&self) -> SlotNumber { - self.start_slot - } - } - - #[test] - fn genesis_epoch_is_created_but_not_imported() { - // - // A - B - // \ - // — C - // - let is_descendent_of = |base: &Hash, block: &Hash| -> Result { - match (base, *block) { - (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), - (b"B", b) | (b"C", b) => Ok(b == *b"D"), - (b"0", _) => Ok(true), - _ => Ok(false), - } - }; - - let epoch_changes = EpochChanges::<_, _, Epoch>::new(); - let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 10101, - ).unwrap().unwrap(); - - match genesis_epoch { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - assert_eq!(slot_number, 10101u64); - }, - _ => panic!("should be unimported genesis"), - }; - - let genesis_epoch_2 = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 10102, - ).unwrap().unwrap(); - - match genesis_epoch_2 { - ViableEpochDescriptor::UnimportedGenesis(slot_number) => { - assert_eq!(slot_number, 10102u64); - }, - _ => panic!("should be unimported genesis"), - }; - } - - #[test] - fn epoch_changes_between_blocks() { - // - // A - B - // \ - // — C - // - let is_descendent_of = |base: &Hash, block: &Hash| -> Result { - match (base, *block) { - (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), - (b"B", b) | (b"C", b) => Ok(b == *b"D"), - (b"0", _) => Ok(true), - _ => Ok(false), - } - }; - - let make_genesis = |slot| Epoch { - start_slot: slot, - duration: 100, - }; - - let mut epoch_changes = EpochChanges::<_, _, Epoch>::new(); - let genesis_epoch = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 100, - ).unwrap().unwrap(); - - assert_eq!(genesis_epoch, ViableEpochDescriptor::UnimportedGenesis(100)); - - let import_epoch_1 = epoch_changes - .viable_epoch(&genesis_epoch, &make_genesis) - .unwrap() - .increment(()); - let epoch_1 = import_epoch_1.as_ref().clone(); - - epoch_changes.import( - &is_descendent_of, - *b"A", - 1, - *b"0", - import_epoch_1, - ).unwrap(); - let genesis_epoch = epoch_changes.epoch_data(&genesis_epoch, &make_genesis).unwrap(); - - assert!(is_descendent_of(b"0", b"A").unwrap()); - - let end_slot = genesis_epoch.end_slot(); - assert_eq!(end_slot, epoch_1.start_slot); - - { - // x is still within the genesis epoch. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - end_slot - 1, - &make_genesis, - ).unwrap().unwrap(); - - assert_eq!(x, genesis_epoch); - } - - { - // x is now at the next epoch, because the block is now at the - // start slot of epoch 1. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - end_slot, - &make_genesis, - ).unwrap().unwrap(); - - assert_eq!(x, epoch_1); - } - - { - // x is now at the next epoch, because the block is now after - // start slot of epoch 1. - let x = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - epoch_1.end_slot() - 1, - &make_genesis, - ).unwrap().unwrap(); - - assert_eq!(x, epoch_1); - } - } - - #[test] - fn two_block_ones_dont_conflict() { - // X - Y - // / - // 0 - A - B - // - let is_descendent_of = |base: &Hash, block: &Hash| -> Result { - match (base, *block) { - (b"A", b) => Ok(b == *b"B"), - (b"X", b) => Ok(b == *b"Y"), - (b"0", _) => Ok(true), - _ => Ok(false), - } - }; - - let duration = 100; - - let make_genesis = |slot| Epoch { - start_slot: slot, - duration, - }; - - let mut epoch_changes = EpochChanges::new(); - let next_descriptor = (); - - // insert genesis epoch for A - { - let genesis_epoch_a_descriptor = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 100, - ).unwrap().unwrap(); - - let incremented_epoch = epoch_changes - .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) - .unwrap() - .increment(next_descriptor.clone()); - - epoch_changes.import( - &is_descendent_of, - *b"A", - 1, - *b"0", - incremented_epoch, - ).unwrap(); - } - - // insert genesis epoch for X - { - let genesis_epoch_x_descriptor = epoch_changes.epoch_descriptor_for_child_of( - &is_descendent_of, - b"0", - 0, - 1000, - ).unwrap().unwrap(); - - let incremented_epoch = epoch_changes - .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) - .unwrap() - .increment(next_descriptor.clone()); - - epoch_changes.import( - &is_descendent_of, - *b"X", - 1, - *b"0", - incremented_epoch, - ).unwrap(); - } - - // now check that the genesis epochs for our respective block 1s - // respect the chain structure. - { - let epoch_for_a_child = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"A", - 1, - 101, - &make_genesis, - ).unwrap().unwrap(); - - assert_eq!(epoch_for_a_child, make_genesis(100)); - - let epoch_for_x_child = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"X", - 1, - 1001, - &make_genesis, - ).unwrap().unwrap(); - - assert_eq!(epoch_for_x_child, make_genesis(1000)); - - let epoch_for_x_child_before_genesis = epoch_changes.epoch_data_for_child_of( - &is_descendent_of, - b"X", - 1, - 101, - &make_genesis, - ).unwrap(); - - // even though there is a genesis epoch at that slot, it's not in - // this chain. - assert!(epoch_for_x_child_before_genesis.is_none()); - } - } + use super::Epoch as EpochT; + use super::*; + + #[derive(Debug, PartialEq)] + pub struct TestError; + + impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TestError") + } + } + + impl std::error::Error for TestError {} + + impl<'a, F: 'a, H: 'a + PartialEq + std::fmt::Debug> IsDescendentOfBuilder for &'a F + where + F: Fn(&H, &H) -> Result, + { + type Error = TestError; + type IsDescendentOf = Box Result + 'a>; + + fn build_is_descendent_of(&self, current: Option<(H, H)>) -> Self::IsDescendentOf { + let f = *self; + Box::new(move |base, head| { + let mut head = head; + + if let Some((ref c_head, ref c_parent)) = current { + if head == c_head { + if base == c_parent { + return Ok(true); + } else { + head = c_parent; + } + } + } + + f(base, head) + }) + } + } + + type Hash = [u8; 1]; + type SlotNumber = u64; + + #[derive(Debug, Clone, Eq, PartialEq)] + struct Epoch { + start_slot: SlotNumber, + duration: SlotNumber, + } + + impl EpochT for Epoch { + type NextEpochDescriptor = (); + type SlotNumber = SlotNumber; + + fn increment(&self, _: ()) -> Self { + Epoch { + start_slot: self.start_slot + self.duration, + duration: self.duration, + } + } + + fn end_slot(&self) -> SlotNumber { + self.start_slot + self.duration + } + + fn start_slot(&self) -> SlotNumber { + self.start_slot + } + } + + #[test] + fn genesis_epoch_is_created_but_not_imported() { + // + // A - B + // \ + // — C + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), + (b"B", b) | (b"C", b) => Ok(b == *b"D"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let epoch_changes = EpochChanges::<_, _, Epoch>::new(); + let genesis_epoch = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 10101) + .unwrap() + .unwrap(); + + match genesis_epoch { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + assert_eq!(slot_number, 10101u64); + } + _ => panic!("should be unimported genesis"), + }; + + let genesis_epoch_2 = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 10102) + .unwrap() + .unwrap(); + + match genesis_epoch_2 { + ViableEpochDescriptor::UnimportedGenesis(slot_number) => { + assert_eq!(slot_number, 10102u64); + } + _ => panic!("should be unimported genesis"), + }; + } + + #[test] + fn epoch_changes_between_blocks() { + // + // A - B + // \ + // — C + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B" || b == *b"C" || b == *b"D"), + (b"B", b) | (b"C", b) => Ok(b == *b"D"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let make_genesis = |slot| Epoch { + start_slot: slot, + duration: 100, + }; + + let mut epoch_changes = EpochChanges::<_, _, Epoch>::new(); + let genesis_epoch = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); + + assert_eq!(genesis_epoch, ViableEpochDescriptor::UnimportedGenesis(100)); + + let import_epoch_1 = epoch_changes + .viable_epoch(&genesis_epoch, &make_genesis) + .unwrap() + .increment(()); + let epoch_1 = import_epoch_1.as_ref().clone(); + + epoch_changes + .import(&is_descendent_of, *b"A", 1, *b"0", import_epoch_1) + .unwrap(); + let genesis_epoch = epoch_changes + .epoch_data(&genesis_epoch, &make_genesis) + .unwrap(); + + assert!(is_descendent_of(b"0", b"A").unwrap()); + + let end_slot = genesis_epoch.end_slot(); + assert_eq!(end_slot, epoch_1.start_slot); + + { + // x is still within the genesis epoch. + let x = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, end_slot - 1, &make_genesis) + .unwrap() + .unwrap(); + + assert_eq!(x, genesis_epoch); + } + + { + // x is now at the next epoch, because the block is now at the + // start slot of epoch 1. + let x = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, end_slot, &make_genesis) + .unwrap() + .unwrap(); + + assert_eq!(x, epoch_1); + } + + { + // x is now at the next epoch, because the block is now after + // start slot of epoch 1. + let x = epoch_changes + .epoch_data_for_child_of( + &is_descendent_of, + b"A", + 1, + epoch_1.end_slot() - 1, + &make_genesis, + ) + .unwrap() + .unwrap(); + + assert_eq!(x, epoch_1); + } + } + + #[test] + fn two_block_ones_dont_conflict() { + // X - Y + // / + // 0 - A - B + // + let is_descendent_of = |base: &Hash, block: &Hash| -> Result { + match (base, *block) { + (b"A", b) => Ok(b == *b"B"), + (b"X", b) => Ok(b == *b"Y"), + (b"0", _) => Ok(true), + _ => Ok(false), + } + }; + + let duration = 100; + + let make_genesis = |slot| Epoch { + start_slot: slot, + duration, + }; + + let mut epoch_changes = EpochChanges::new(); + let next_descriptor = (); + + // insert genesis epoch for A + { + let genesis_epoch_a_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 100) + .unwrap() + .unwrap(); + + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_a_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + + epoch_changes + .import(&is_descendent_of, *b"A", 1, *b"0", incremented_epoch) + .unwrap(); + } + + // insert genesis epoch for X + { + let genesis_epoch_x_descriptor = epoch_changes + .epoch_descriptor_for_child_of(&is_descendent_of, b"0", 0, 1000) + .unwrap() + .unwrap(); + + let incremented_epoch = epoch_changes + .viable_epoch(&genesis_epoch_x_descriptor, &make_genesis) + .unwrap() + .increment(next_descriptor.clone()); + + epoch_changes + .import(&is_descendent_of, *b"X", 1, *b"0", incremented_epoch) + .unwrap(); + } + + // now check that the genesis epochs for our respective block 1s + // respect the chain structure. + { + let epoch_for_a_child = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"A", 1, 101, &make_genesis) + .unwrap() + .unwrap(); + + assert_eq!(epoch_for_a_child, make_genesis(100)); + + let epoch_for_x_child = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"X", 1, 1001, &make_genesis) + .unwrap() + .unwrap(); + + assert_eq!(epoch_for_x_child, make_genesis(1000)); + + let epoch_for_x_child_before_genesis = epoch_changes + .epoch_data_for_child_of(&is_descendent_of, b"X", 1, 101, &make_genesis) + .unwrap(); + + // even though there is a genesis epoch at that slot, it's not in + // this chain. + assert!(epoch_for_x_child_before_genesis.is_none()); + } + } } diff --git a/client/consensus/epochs/src/migration.rs b/client/consensus/epochs/src/migration.rs index e4717b5584..20eeef5449 100644 --- a/client/consensus/epochs/src/migration.rs +++ b/client/consensus/epochs/src/migration.rs @@ -16,40 +16,42 @@ //! Migration types for epoch changes. -use std::collections::BTreeMap; -use codec::{Encode, Decode}; +use crate::{Epoch, EpochChanges, PersistedEpoch, PersistedEpochHeader}; +use codec::{Decode, Encode}; use fork_tree::ForkTree; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use crate::{Epoch, EpochChanges, PersistedEpoch, PersistedEpochHeader}; +use std::collections::BTreeMap; /// Legacy definition of epoch changes. #[derive(Clone, Encode, Decode)] pub struct EpochChangesV0 { - inner: ForkTree>, + inner: ForkTree>, } /// Type alias for legacy definition of epoch changes. -pub type EpochChangesForV0 = EpochChangesV0<::Hash, NumberFor, Epoch>; +pub type EpochChangesForV0 = + EpochChangesV0<::Hash, NumberFor, Epoch>; -impl EpochChangesV0 where - Hash: PartialEq + Ord + Copy, - Number: Ord + Copy, +impl EpochChangesV0 +where + Hash: PartialEq + Ord + Copy, + Number: Ord + Copy, { - /// Create a new value of this type from raw. - pub fn from_raw(inner: ForkTree>) -> Self { - Self { inner } - } - - /// Migrate the type into current epoch changes definition. - pub fn migrate(self) -> EpochChanges { - let mut epochs = BTreeMap::new(); - - let inner = self.inner.map(&mut |hash, number, data| { - let header = PersistedEpochHeader::from(&data); - epochs.insert((*hash, *number), data); - header - }); - - EpochChanges { inner, epochs } - } + /// Create a new value of this type from raw. + pub fn from_raw(inner: ForkTree>) -> Self { + Self { inner } + } + + /// Migrate the type into current epoch changes definition. + pub fn migrate(self) -> EpochChanges { + let mut epochs = BTreeMap::new(); + + let inner = self.inner.map(&mut |hash, number, data| { + let header = PersistedEpochHeader::from(&data); + epochs.insert((*hash, *number), data); + header + }); + + EpochChanges { inner, epochs } + } } diff --git a/client/consensus/manual-seal/src/error.rs b/client/consensus/manual-seal/src/error.rs index d6ee9f1767..07d4ea114f 100644 --- a/client/consensus/manual-seal/src/error.rs +++ b/client/consensus/manual-seal/src/error.rs @@ -16,83 +16,83 @@ //! A manual sealing engine: the engine listens for rpc calls to seal blocks and create forks. //! This is suitable for a testing environment. -use sp_consensus::{Error as ConsensusError, ImportResult}; +use futures::channel::{mpsc::SendError, oneshot}; use sp_blockchain::Error as BlockchainError; +use sp_consensus::{Error as ConsensusError, ImportResult}; use sp_inherents::Error as InherentsError; -use futures::channel::{oneshot, mpsc::SendError}; /// Error code for rpc mod codes { - pub const SERVER_SHUTTING_DOWN: i64 = 10_000; - pub const BLOCK_IMPORT_FAILED: i64 = 11_000; - pub const EMPTY_TRANSACTION_POOL: i64 = 12_000; - pub const BLOCK_NOT_FOUND: i64 = 13_000; - pub const CONSENSUS_ERROR: i64 = 14_000; - pub const INHERENTS_ERROR: i64 = 15_000; - pub const BLOCKCHAIN_ERROR: i64 = 16_000; - pub const UNKNOWN_ERROR: i64 = 20_000; + pub const SERVER_SHUTTING_DOWN: i64 = 10_000; + pub const BLOCK_IMPORT_FAILED: i64 = 11_000; + pub const EMPTY_TRANSACTION_POOL: i64 = 12_000; + pub const BLOCK_NOT_FOUND: i64 = 13_000; + pub const CONSENSUS_ERROR: i64 = 14_000; + pub const INHERENTS_ERROR: i64 = 15_000; + pub const BLOCKCHAIN_ERROR: i64 = 16_000; + pub const UNKNOWN_ERROR: i64 = 20_000; } /// errors encountered by background block authorship task #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// An error occurred while importing the block - #[display(fmt = "Block import failed: {:?}", _0)] - BlockImportError(ImportResult), - /// Transaction pool is empty, cannot create a block - #[display(fmt = "Transaction pool is empty, set create_empty to true,\ + /// An error occurred while importing the block + #[display(fmt = "Block import failed: {:?}", _0)] + BlockImportError(ImportResult), + /// Transaction pool is empty, cannot create a block + #[display(fmt = "Transaction pool is empty, set create_empty to true,\ if you want to create empty blocks")] - EmptyTransactionPool, - /// encountered during creation of Proposer. - #[display(fmt = "Consensus Error: {}", _0)] - ConsensusError(ConsensusError), - /// Failed to create Inherents data - #[display(fmt = "Inherents Error: {}", _0)] - InherentError(InherentsError), - /// error encountered during finalization - #[display(fmt = "Finalization Error: {}", _0)] - BlockchainError(BlockchainError), - /// Supplied parent_hash doesn't exist in chain - #[display(fmt = "Supplied parent_hash: {} doesn't exist in chain", _0)] - #[from(ignore)] - BlockNotFound(String), - /// Some string error - #[display(fmt = "{}", _0)] - #[from(ignore)] - StringError(String), - ///send error - #[display(fmt = "Consensus process is terminating")] - Canceled(oneshot::Canceled), - ///send error - #[display(fmt = "Consensus process is terminating")] - SendError(SendError), - /// Some other error. - #[display(fmt="Other error: {}", _0)] - Other(Box), + EmptyTransactionPool, + /// encountered during creation of Proposer. + #[display(fmt = "Consensus Error: {}", _0)] + ConsensusError(ConsensusError), + /// Failed to create Inherents data + #[display(fmt = "Inherents Error: {}", _0)] + InherentError(InherentsError), + /// error encountered during finalization + #[display(fmt = "Finalization Error: {}", _0)] + BlockchainError(BlockchainError), + /// Supplied parent_hash doesn't exist in chain + #[display(fmt = "Supplied parent_hash: {} doesn't exist in chain", _0)] + #[from(ignore)] + BlockNotFound(String), + /// Some string error + #[display(fmt = "{}", _0)] + #[from(ignore)] + StringError(String), + ///send error + #[display(fmt = "Consensus process is terminating")] + Canceled(oneshot::Canceled), + ///send error + #[display(fmt = "Consensus process is terminating")] + SendError(SendError), + /// Some other error. + #[display(fmt = "Other error: {}", _0)] + Other(Box), } impl Error { - fn to_code(&self) -> i64 { - use Error::*; - match self { - BlockImportError(_) => codes::BLOCK_IMPORT_FAILED, - BlockNotFound(_) => codes::BLOCK_NOT_FOUND, - EmptyTransactionPool => codes::EMPTY_TRANSACTION_POOL, - ConsensusError(_) => codes::CONSENSUS_ERROR, - InherentError(_) => codes::INHERENTS_ERROR, - BlockchainError(_) => codes::BLOCKCHAIN_ERROR, - SendError(_) | Canceled(_) => codes::SERVER_SHUTTING_DOWN, - _ => codes::UNKNOWN_ERROR - } - } + fn to_code(&self) -> i64 { + use Error::*; + match self { + BlockImportError(_) => codes::BLOCK_IMPORT_FAILED, + BlockNotFound(_) => codes::BLOCK_NOT_FOUND, + EmptyTransactionPool => codes::EMPTY_TRANSACTION_POOL, + ConsensusError(_) => codes::CONSENSUS_ERROR, + InherentError(_) => codes::INHERENTS_ERROR, + BlockchainError(_) => codes::BLOCKCHAIN_ERROR, + SendError(_) | Canceled(_) => codes::SERVER_SHUTTING_DOWN, + _ => codes::UNKNOWN_ERROR, + } + } } impl std::convert::From for jsonrpc_core::Error { - fn from(error: Error) -> Self { - jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::ServerError(error.to_code()), - message: format!("{}", error), - data: None - } - } + fn from(error: Error) -> Self { + jsonrpc_core::Error { + code: jsonrpc_core::ErrorCode::ServerError(error.to_code()), + message: format!("{}", error), + data: None, + } + } } diff --git a/client/consensus/manual-seal/src/finalize_block.rs b/client/consensus/manual-seal/src/finalize_block.rs index 5780a25f97..05095eb07b 100644 --- a/client/consensus/manual-seal/src/finalize_block.rs +++ b/client/consensus/manual-seal/src/finalize_block.rs @@ -17,53 +17,48 @@ //! Block finalization utilities use crate::rpc; -use sp_runtime::{ - Justification, - traits::Block as BlockT, - generic::BlockId, -}; -use std::sync::Arc; use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT, Justification}; use std::marker::PhantomData; +use std::sync::Arc; /// params for block finalization. pub struct FinalizeBlockParams { - /// hash of the block - pub hash: ::Hash, - /// sender to report errors/success to the rpc. - pub sender: rpc::Sender<()>, - /// finalization justification - pub justification: Option, - /// Finalizer trait object. - pub finalizer: Arc, - /// phantom type to pin the Backend type - pub _phantom: PhantomData, + /// hash of the block + pub hash: ::Hash, + /// sender to report errors/success to the rpc. + pub sender: rpc::Sender<()>, + /// finalization justification + pub justification: Option, + /// Finalizer trait object. + pub finalizer: Arc, + /// phantom type to pin the Backend type + pub _phantom: PhantomData, } - /// finalizes a block in the backend with the given params. pub async fn finalize_block(params: FinalizeBlockParams) - where - B: BlockT, - F: Finalizer, - CB: ClientBackend, +where + B: BlockT, + F: Finalizer, + CB: ClientBackend, { - let FinalizeBlockParams { - hash, - mut sender, - justification, - finalizer, - .. - } = params; + let FinalizeBlockParams { + hash, + mut sender, + justification, + finalizer, + .. + } = params; - match finalizer.finalize_block(BlockId::Hash(hash), justification, true) { - Err(e) => { - log::warn!("Failed to finalize block {:?}", e); - rpc::send_result(&mut sender, Err(e.into())) - } - Ok(()) => { - log::info!("✅ Successfully finalized block: {}", hash); - rpc::send_result(&mut sender, Ok(())) - } - } + match finalizer.finalize_block(BlockId::Hash(hash), justification, true) { + Err(e) => { + log::warn!("Failed to finalize block {:?}", e); + rpc::send_result(&mut sender, Err(e.into())) + } + Ok(()) => { + log::info!("✅ Successfully finalized block: {}", hash); + rpc::send_result(&mut sender, Ok(())) + } + } } diff --git a/client/consensus/manual-seal/src/lib.rs b/client/consensus/manual-seal/src/lib.rs index 687d072aaa..ac0e0eb0a5 100644 --- a/client/consensus/manual-seal/src/lib.rs +++ b/client/consensus/manual-seal/src/lib.rs @@ -18,423 +18,434 @@ //! This is suitable for a testing environment. use futures::prelude::*; +use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; +use sc_transaction_pool::txpool; +use sp_blockchain::HeaderBackend; use sp_consensus::{ - Environment, Proposer, ForkChoiceStrategy, BlockImportParams, BlockOrigin, SelectChain, - import_queue::{BasicQueue, CacheKeyId, Verifier, BoxBlockImport}, + import_queue::{BasicQueue, BoxBlockImport, CacheKeyId, Verifier}, + BlockImportParams, BlockOrigin, Environment, ForkChoiceStrategy, Proposer, SelectChain, }; -use sp_blockchain::HeaderBackend; use sp_inherents::InherentDataProviders; use sp_runtime::{traits::Block as BlockT, Justification}; -use sc_client_api::backend::{Backend as ClientBackend, Finalizer}; -use sc_transaction_pool::txpool; -use std::{sync::Arc, marker::PhantomData}; +use std::{marker::PhantomData, sync::Arc}; mod error; mod finalize_block; -mod seal_new_block; pub mod rpc; +mod seal_new_block; -use self::{ - finalize_block::{finalize_block, FinalizeBlockParams}, - seal_new_block::{seal_new_block, SealBlockParams}, -}; pub use self::{ - error::Error, - rpc::{EngineCommand, CreatedBlock}, + error::Error, + rpc::{CreatedBlock, EngineCommand}, +}; +use self::{ + finalize_block::{finalize_block, FinalizeBlockParams}, + seal_new_block::{seal_new_block, SealBlockParams}, }; -use sc_client_api::{TransactionFor, Backend}; +use sc_client_api::{Backend, TransactionFor}; /// The verifier for the manual seal engine; instantly finalizes. struct ManualSealVerifier; impl Verifier for ManualSealVerifier { - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - let mut import_params = BlockImportParams::new(origin, header); - import_params.justification = justification; - import_params.body = body; - import_params.finalized = false; - import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + let mut import_params = BlockImportParams::new(origin, header); + import_params.justification = justification; + import_params.body = body; + import_params.finalized = false; + import_params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - Ok((import_params, None)) - } + Ok((import_params, None)) + } } /// Instantiate the import queue for the manual seal consensus engine. pub fn import_queue( - block_import: BoxBlockImport> + block_import: BoxBlockImport>, ) -> BasicQueue> - where - Block: BlockT, - B: Backend + 'static, +where + Block: BlockT, + B: Backend + 'static, { - BasicQueue::new( - ManualSealVerifier, - Box::new(block_import), - None, - None, - ) + BasicQueue::new(ManualSealVerifier, Box::new(block_import), None, None) } /// Creates the background authorship task for the manual seal engine. pub async fn run_manual_seal( - mut block_import: BoxBlockImport, - mut env: E, - client: Arc, - pool: Arc>, - mut commands_stream: S, - select_chain: SC, - inherent_data_providers: InherentDataProviders, -) - where - A: txpool::ChainApi::Hash> + 'static, - B: BlockT + 'static, - C: HeaderBackend + Finalizer + 'static, - CB: ClientBackend + 'static, - E: Environment + 'static, - E::Error: std::fmt::Display, - >::Error: std::fmt::Display, - S: Stream::Hash>> + Unpin + 'static, - SC: SelectChain + 'static, + mut block_import: BoxBlockImport, + mut env: E, + client: Arc, + pool: Arc>, + mut commands_stream: S, + select_chain: SC, + inherent_data_providers: InherentDataProviders, +) where + A: txpool::ChainApi::Hash> + 'static, + B: BlockT + 'static, + C: HeaderBackend + Finalizer + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Error: std::fmt::Display, + >::Error: std::fmt::Display, + S: Stream::Hash>> + Unpin + 'static, + SC: SelectChain + 'static, { - while let Some(command) = commands_stream.next().await { - match command { - EngineCommand::SealNewBlock { - create_empty, - finalize, - parent_hash, - sender, - } => { - seal_new_block( - SealBlockParams { - sender, - parent_hash, - finalize, - create_empty, - env: &mut env, - select_chain: &select_chain, - block_import: &mut block_import, - inherent_data_provider: &inherent_data_providers, - pool: pool.clone(), - client: client.clone(), - } - ).await; - } - EngineCommand::FinalizeBlock { hash, sender, justification } => { - finalize_block( - FinalizeBlockParams { - hash, - sender, - justification, - finalizer: client.clone(), - _phantom: PhantomData, - } - ).await - } - } - } + while let Some(command) = commands_stream.next().await { + match command { + EngineCommand::SealNewBlock { + create_empty, + finalize, + parent_hash, + sender, + } => { + seal_new_block(SealBlockParams { + sender, + parent_hash, + finalize, + create_empty, + env: &mut env, + select_chain: &select_chain, + block_import: &mut block_import, + inherent_data_provider: &inherent_data_providers, + pool: pool.clone(), + client: client.clone(), + }) + .await; + } + EngineCommand::FinalizeBlock { + hash, + sender, + justification, + } => { + finalize_block(FinalizeBlockParams { + hash, + sender, + justification, + finalizer: client.clone(), + _phantom: PhantomData, + }) + .await + } + } + } } /// runs the background authorship task for the instant seal engine. /// instant-seal creates a new block for every transaction imported into /// the transaction pool. pub async fn run_instant_seal( - block_import: BoxBlockImport, - env: E, - client: Arc, - pool: Arc>, - select_chain: SC, - inherent_data_providers: InherentDataProviders, -) - where - A: txpool::ChainApi::Hash> + 'static, - B: BlockT + 'static, - C: HeaderBackend + Finalizer + 'static, - CB: ClientBackend + 'static, - E: Environment + 'static, - E::Error: std::fmt::Display, - >::Error: std::fmt::Display, - SC: SelectChain + 'static + block_import: BoxBlockImport, + env: E, + client: Arc, + pool: Arc>, + select_chain: SC, + inherent_data_providers: InherentDataProviders, +) where + A: txpool::ChainApi::Hash> + 'static, + B: BlockT + 'static, + C: HeaderBackend + Finalizer + 'static, + CB: ClientBackend + 'static, + E: Environment + 'static, + E::Error: std::fmt::Display, + >::Error: std::fmt::Display, + SC: SelectChain + 'static, { - // instant-seal creates blocks as soon as transactions are imported - // into the transaction pool. - let commands_stream = pool.validated_pool() - .import_notification_stream() - .map(|_| { - EngineCommand::SealNewBlock { - create_empty: false, - finalize: false, - parent_hash: None, - sender: None, - } - }); + // instant-seal creates blocks as soon as transactions are imported + // into the transaction pool. + let commands_stream = + pool.validated_pool() + .import_notification_stream() + .map(|_| EngineCommand::SealNewBlock { + create_empty: false, + finalize: false, + parent_hash: None, + sender: None, + }); - run_manual_seal( - block_import, - env, - client, - pool, - commands_stream, - select_chain, - inherent_data_providers, - ).await + run_manual_seal( + block_import, + env, + client, + pool, + commands_stream, + select_chain, + inherent_data_providers, + ) + .await } #[cfg(test)] mod tests { - use super::*; - use substrate_test_runtime_client::{ - DefaultTestClientBuilderExt, - TestClientBuilderExt, - AccountKeyring::*, - TestClientBuilder, - }; - use sc_transaction_pool::{ - BasicPool, - txpool::Options, - }; - use substrate_test_runtime_transaction_pool::{TestApi, uxt}; - use sp_transaction_pool::{TransactionPool, MaintainedTransactionPool, TransactionSource}; - use sp_runtime::generic::BlockId; - use sp_consensus::ImportedAux; - use sp_inherents::InherentDataProviders; - use sc_basic_authorship::ProposerFactory; + use super::*; + use sc_basic_authorship::ProposerFactory; + use sc_transaction_pool::{txpool::Options, BasicPool}; + use sp_consensus::ImportedAux; + use sp_inherents::InherentDataProviders; + use sp_runtime::generic::BlockId; + use sp_transaction_pool::{MaintainedTransactionPool, TransactionPool, TransactionSource}; + use substrate_test_runtime_client::{ + AccountKeyring::*, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + use substrate_test_runtime_transaction_pool::{uxt, TestApi}; - fn api() -> Arc { - Arc::new(TestApi::empty()) - } + fn api() -> Arc { + Arc::new(TestApi::empty()) + } - const SOURCE: TransactionSource = TransactionSource::External; + const SOURCE: TransactionSource = TransactionSource::External; - #[tokio::test] - async fn instant_seal() { - let builder = TestClientBuilder::new(); - let (client, select_chain) = builder.build_with_longest_chain(); - let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); - let pool = Arc::new(BasicPool::new(Options::default(), api(), None).0); - let env = ProposerFactory::new( - client.clone(), - pool.clone() - ); - // this test checks that blocks are created as soon as transactions are imported into the pool. - let (sender, receiver) = futures::channel::oneshot::channel(); - let mut sender = Arc::new(Some(sender)); - let stream = pool.pool().validated_pool().import_notification_stream() - .map(move |_| { - // we're only going to submit one tx so this fn will only be called once. - let mut_sender = Arc::get_mut(&mut sender).unwrap(); - let sender = std::mem::replace(mut_sender, None); - EngineCommand::SealNewBlock { - create_empty: false, - finalize: true, - parent_hash: None, - sender - } - }); - let future = run_manual_seal( - Box::new(client.clone()), - env, - client.clone(), - pool.pool().clone(), - stream, - select_chain, - inherent_data_providers, - ); - std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); - // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; - // assert that it was successfully imported - assert!(result.is_ok()); - // assert that the background task returns ok - let created_block = receiver.await.unwrap().unwrap(); - assert_eq!( - created_block, - CreatedBlock { - hash: created_block.hash.clone(), - aux: ImportedAux { - header_only: false, - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - } - } - ); - // assert that there's a new block in the db. - assert!(client.header(&BlockId::Number(1)).unwrap().is_some()) - } + #[tokio::test] + async fn instant_seal() { + let builder = TestClientBuilder::new(); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let inherent_data_providers = InherentDataProviders::new(); + let pool = Arc::new(BasicPool::new(Options::default(), api(), None).0); + let env = ProposerFactory::new(client.clone(), pool.clone()); + // this test checks that blocks are created as soon as transactions are imported into the pool. + let (sender, receiver) = futures::channel::oneshot::channel(); + let mut sender = Arc::new(Some(sender)); + let stream = pool + .pool() + .validated_pool() + .import_notification_stream() + .map(move |_| { + // we're only going to submit one tx so this fn will only be called once. + let mut_sender = Arc::get_mut(&mut sender).unwrap(); + let sender = std::mem::replace(mut_sender, None); + EngineCommand::SealNewBlock { + create_empty: false, + finalize: true, + parent_hash: None, + sender, + } + }); + let future = run_manual_seal( + Box::new(client.clone()), + env, + client.clone(), + pool.pool().clone(), + stream, + select_chain, + inherent_data_providers, + ); + std::thread::spawn(|| { + let mut rt = tokio::runtime::Runtime::new().unwrap(); + // spawn the background authorship task + rt.block_on(future); + }); + // submit a transaction to pool. + let result = pool + .submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)) + .await; + // assert that it was successfully imported + assert!(result.is_ok()); + // assert that the background task returns ok + let created_block = receiver.await.unwrap().unwrap(); + assert_eq!( + created_block, + CreatedBlock { + hash: created_block.hash.clone(), + aux: ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: false, + is_new_best: true, + } + } + ); + // assert that there's a new block in the db. + assert!(client.header(&BlockId::Number(1)).unwrap().is_some()) + } - #[tokio::test] - async fn manual_seal_and_finalization() { - let builder = TestClientBuilder::new(); - let (client, select_chain) = builder.build_with_longest_chain(); - let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); - let pool = Arc::new(BasicPool::new(Options::default(), api(), None).0); - let env = ProposerFactory::new( - client.clone(), - pool.clone() - ); - // this test checks that blocks are created as soon as an engine command is sent over the stream. - let (mut sink, stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal( - Box::new(client.clone()), - env, - client.clone(), - pool.pool().clone(), - stream, - select_chain, - inherent_data_providers, - ); - std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); - // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; - // assert that it was successfully imported - assert!(result.is_ok()); - let (tx, rx) = futures::channel::oneshot::channel(); - sink.send(EngineCommand::SealNewBlock { - parent_hash: None, - sender: Some(tx), - create_empty: false, - finalize: false, - }).await.unwrap(); - let created_block = rx.await.unwrap().unwrap(); + #[tokio::test] + async fn manual_seal_and_finalization() { + let builder = TestClientBuilder::new(); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let inherent_data_providers = InherentDataProviders::new(); + let pool = Arc::new(BasicPool::new(Options::default(), api(), None).0); + let env = ProposerFactory::new(client.clone(), pool.clone()); + // this test checks that blocks are created as soon as an engine command is sent over the stream. + let (mut sink, stream) = futures::channel::mpsc::channel(1024); + let future = run_manual_seal( + Box::new(client.clone()), + env, + client.clone(), + pool.pool().clone(), + stream, + select_chain, + inherent_data_providers, + ); + std::thread::spawn(|| { + let mut rt = tokio::runtime::Runtime::new().unwrap(); + // spawn the background authorship task + rt.block_on(future); + }); + // submit a transaction to pool. + let result = pool + .submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)) + .await; + // assert that it was successfully imported + assert!(result.is_ok()); + let (tx, rx) = futures::channel::oneshot::channel(); + sink.send(EngineCommand::SealNewBlock { + parent_hash: None, + sender: Some(tx), + create_empty: false, + finalize: false, + }) + .await + .unwrap(); + let created_block = rx.await.unwrap().unwrap(); - // assert that the background task returns ok - assert_eq!( - created_block, - CreatedBlock { - hash: created_block.hash.clone(), - aux: ImportedAux { - header_only: false, - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - } - } - ); - // assert that there's a new block in the db. - let header = client.header(&BlockId::Number(1)).unwrap().unwrap(); - let (tx, rx) = futures::channel::oneshot::channel(); - sink.send(EngineCommand::FinalizeBlock { - sender: Some(tx), - hash: header.hash(), - justification: None - }).await.unwrap(); - // assert that the background task returns ok - assert_eq!(rx.await.unwrap().unwrap(), ()); - } + // assert that the background task returns ok + assert_eq!( + created_block, + CreatedBlock { + hash: created_block.hash.clone(), + aux: ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: false, + is_new_best: true, + } + } + ); + // assert that there's a new block in the db. + let header = client.header(&BlockId::Number(1)).unwrap().unwrap(); + let (tx, rx) = futures::channel::oneshot::channel(); + sink.send(EngineCommand::FinalizeBlock { + sender: Some(tx), + hash: header.hash(), + justification: None, + }) + .await + .unwrap(); + // assert that the background task returns ok + assert_eq!(rx.await.unwrap().unwrap(), ()); + } - #[tokio::test] - async fn manual_seal_fork_blocks() { - let builder = TestClientBuilder::new(); - let (client, select_chain) = builder.build_with_longest_chain(); - let client = Arc::new(client); - let inherent_data_providers = InherentDataProviders::new(); - let pool_api = api(); - let pool = Arc::new(BasicPool::new(Options::default(), pool_api.clone(), None).0); - let env = ProposerFactory::new( - client.clone(), - pool.clone(), - ); - // this test checks that blocks are created as soon as an engine command is sent over the stream. - let (mut sink, stream) = futures::channel::mpsc::channel(1024); - let future = run_manual_seal( - Box::new(client.clone()), - env, - client.clone(), - pool.pool().clone(), - stream, - select_chain, - inherent_data_providers, - ); - std::thread::spawn(|| { - let mut rt = tokio::runtime::Runtime::new().unwrap(); - // spawn the background authorship task - rt.block_on(future); - }); - // submit a transaction to pool. - let result = pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)).await; - // assert that it was successfully imported - assert!(result.is_ok()); + #[tokio::test] + async fn manual_seal_fork_blocks() { + let builder = TestClientBuilder::new(); + let (client, select_chain) = builder.build_with_longest_chain(); + let client = Arc::new(client); + let inherent_data_providers = InherentDataProviders::new(); + let pool_api = api(); + let pool = Arc::new(BasicPool::new(Options::default(), pool_api.clone(), None).0); + let env = ProposerFactory::new(client.clone(), pool.clone()); + // this test checks that blocks are created as soon as an engine command is sent over the stream. + let (mut sink, stream) = futures::channel::mpsc::channel(1024); + let future = run_manual_seal( + Box::new(client.clone()), + env, + client.clone(), + pool.pool().clone(), + stream, + select_chain, + inherent_data_providers, + ); + std::thread::spawn(|| { + let mut rt = tokio::runtime::Runtime::new().unwrap(); + // spawn the background authorship task + rt.block_on(future); + }); + // submit a transaction to pool. + let result = pool + .submit_one(&BlockId::Number(0), SOURCE, uxt(Alice, 0)) + .await; + // assert that it was successfully imported + assert!(result.is_ok()); - let (tx, rx) = futures::channel::oneshot::channel(); - sink.send(EngineCommand::SealNewBlock { - parent_hash: None, - sender: Some(tx), - create_empty: false, - finalize: false, - }).await.unwrap(); - let created_block = rx.await.unwrap().unwrap(); - pool_api.increment_nonce(Alice.into()); + let (tx, rx) = futures::channel::oneshot::channel(); + sink.send(EngineCommand::SealNewBlock { + parent_hash: None, + sender: Some(tx), + create_empty: false, + finalize: false, + }) + .await + .unwrap(); + let created_block = rx.await.unwrap().unwrap(); + pool_api.increment_nonce(Alice.into()); - // assert that the background task returns ok - assert_eq!( - created_block, - CreatedBlock { - hash: created_block.hash.clone(), - aux: ImportedAux { - header_only: false, - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true - } - } - ); - // assert that there's a new block in the db. - assert!(client.header(&BlockId::Number(0)).unwrap().is_some()); - assert!(pool.submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)).await.is_ok()); + // assert that the background task returns ok + assert_eq!( + created_block, + CreatedBlock { + hash: created_block.hash.clone(), + aux: ImportedAux { + header_only: false, + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: false, + is_new_best: true + } + } + ); + // assert that there's a new block in the db. + assert!(client.header(&BlockId::Number(0)).unwrap().is_some()); + assert!(pool + .submit_one(&BlockId::Number(1), SOURCE, uxt(Alice, 1)) + .await + .is_ok()); - pool.maintain(sp_transaction_pool::ChainEvent::NewBlock { - id: BlockId::Number(1), - header: client.header(&BlockId::Number(1)).expect("db error").expect("imported above"), - is_new_best: true, - retracted: vec![], - }).await; + pool.maintain(sp_transaction_pool::ChainEvent::NewBlock { + id: BlockId::Number(1), + header: client + .header(&BlockId::Number(1)) + .expect("db error") + .expect("imported above"), + is_new_best: true, + retracted: vec![], + }) + .await; - let (tx1, rx1) = futures::channel::oneshot::channel(); - assert!(sink.send(EngineCommand::SealNewBlock { - parent_hash: Some(created_block.hash), - sender: Some(tx1), - create_empty: false, - finalize: false, - }).await.is_ok()); - assert_matches::assert_matches!( - rx1.await.expect("should be no error receiving"), - Ok(_) - ); - assert!(client.header(&BlockId::Number(1)).unwrap().is_some()); - pool_api.increment_nonce(Alice.into()); + let (tx1, rx1) = futures::channel::oneshot::channel(); + assert!(sink + .send(EngineCommand::SealNewBlock { + parent_hash: Some(created_block.hash), + sender: Some(tx1), + create_empty: false, + finalize: false, + }) + .await + .is_ok()); + assert_matches::assert_matches!(rx1.await.expect("should be no error receiving"), Ok(_)); + assert!(client.header(&BlockId::Number(1)).unwrap().is_some()); + pool_api.increment_nonce(Alice.into()); - assert!(pool.submit_one(&BlockId::Number(2), SOURCE, uxt(Alice, 2)).await.is_ok()); - let (tx2, rx2) = futures::channel::oneshot::channel(); - assert!(sink.send(EngineCommand::SealNewBlock { - parent_hash: Some(created_block.hash), - sender: Some(tx2), - create_empty: false, - finalize: false, - }).await.is_ok()); - let imported = rx2.await.unwrap().unwrap(); - // assert that fork block is in the db - assert!(client.header(&BlockId::Hash(imported.hash)).unwrap().is_some()) - } + assert!(pool + .submit_one(&BlockId::Number(2), SOURCE, uxt(Alice, 2)) + .await + .is_ok()); + let (tx2, rx2) = futures::channel::oneshot::channel(); + assert!(sink + .send(EngineCommand::SealNewBlock { + parent_hash: Some(created_block.hash), + sender: Some(tx2), + create_empty: false, + finalize: false, + }) + .await + .is_ok()); + let imported = rx2.await.unwrap().unwrap(); + // assert that fork block is in the db + assert!(client + .header(&BlockId::Hash(imported.hash)) + .unwrap() + .is_some()) + } } diff --git a/client/consensus/manual-seal/src/rpc.rs b/client/consensus/manual-seal/src/rpc.rs index f3f0fe4a12..70a58bac52 100644 --- a/client/consensus/manual-seal/src/rpc.rs +++ b/client/consensus/manual-seal/src/rpc.rs @@ -15,18 +15,16 @@ // along with Substrate. If not, see . //! RPC interface for the ManualSeal Engine. -use sp_consensus::ImportedAux; -use jsonrpc_core::Error; -use jsonrpc_derive::rpc; +pub use self::gen_client::Client as ManualSealClient; use futures::{ - channel::{mpsc, oneshot}, - TryFutureExt, - FutureExt, - SinkExt + channel::{mpsc, oneshot}, + FutureExt, SinkExt, TryFutureExt, }; +use jsonrpc_core::Error; +use jsonrpc_derive::rpc; use serde::{Deserialize, Serialize}; +use sp_consensus::ImportedAux; use sp_runtime::Justification; -pub use self::gen_client::Client as ManualSealClient; /// Future's type for jsonrpc type FutureResult = Box + Send>; @@ -35,129 +33,139 @@ pub type Sender = Option /// Message sent to the background authorship task, usually by RPC. pub enum EngineCommand { - /// Tells the engine to propose a new block - /// - /// if create_empty == true, it will create empty blocks if there are no transactions - /// in the transaction pool. - /// - /// if finalize == true, the block will be instantly finalized. - SealNewBlock { - /// if true, empty blocks(without extrinsics) will be created. - /// otherwise, will return Error::EmptyTransactionPool. - create_empty: bool, - /// instantly finalize this block? - finalize: bool, - /// specify the parent hash of the about-to-created block - parent_hash: Option, - /// sender to report errors/success to the rpc. - sender: Sender>, - }, - /// Tells the engine to finalize the block with the supplied hash - FinalizeBlock { - /// hash of the block - hash: Hash, - /// sender to report errors/success to the rpc. - sender: Sender<()>, - /// finalization justification - justification: Option, - } + /// Tells the engine to propose a new block + /// + /// if create_empty == true, it will create empty blocks if there are no transactions + /// in the transaction pool. + /// + /// if finalize == true, the block will be instantly finalized. + SealNewBlock { + /// if true, empty blocks(without extrinsics) will be created. + /// otherwise, will return Error::EmptyTransactionPool. + create_empty: bool, + /// instantly finalize this block? + finalize: bool, + /// specify the parent hash of the about-to-created block + parent_hash: Option, + /// sender to report errors/success to the rpc. + sender: Sender>, + }, + /// Tells the engine to finalize the block with the supplied hash + FinalizeBlock { + /// hash of the block + hash: Hash, + /// sender to report errors/success to the rpc. + sender: Sender<()>, + /// finalization justification + justification: Option, + }, } /// RPC trait that provides methods for interacting with the manual-seal authorship task over rpc. #[rpc] pub trait ManualSealApi { - /// Instructs the manual-seal authorship task to create a new block - #[rpc(name = "engine_createBlock")] - fn create_block( - &self, - create_empty: bool, - finalize: bool, - parent_hash: Option - ) -> FutureResult>; - - /// Instructs the manual-seal authorship task to finalize a block - #[rpc(name = "engine_finalizeBlock")] - fn finalize_block( - &self, - hash: Hash, - justification: Option - ) -> FutureResult; + /// Instructs the manual-seal authorship task to create a new block + #[rpc(name = "engine_createBlock")] + fn create_block( + &self, + create_empty: bool, + finalize: bool, + parent_hash: Option, + ) -> FutureResult>; + + /// Instructs the manual-seal authorship task to finalize a block + #[rpc(name = "engine_finalizeBlock")] + fn finalize_block( + &self, + hash: Hash, + justification: Option, + ) -> FutureResult; } /// A struct that implements the [`ManualSealApi`]. pub struct ManualSeal { - import_block_channel: mpsc::Sender>, + import_block_channel: mpsc::Sender>, } /// return type of `engine_createBlock` #[derive(Debug, Deserialize, Serialize, PartialEq, Eq)] pub struct CreatedBlock { - /// hash of the created block. - pub hash: Hash, - /// some extra details about the import operation - pub aux: ImportedAux + /// hash of the created block. + pub hash: Hash, + /// some extra details about the import operation + pub aux: ImportedAux, } impl ManualSeal { - /// Create new `ManualSeal` with the given reference to the client. - pub fn new(import_block_channel: mpsc::Sender>) -> Self { - Self { import_block_channel } - } + /// Create new `ManualSeal` with the given reference to the client. + pub fn new(import_block_channel: mpsc::Sender>) -> Self { + Self { + import_block_channel, + } + } } impl ManualSealApi for ManualSeal { - fn create_block( - &self, - create_empty: bool, - finalize: bool, - parent_hash: Option - ) -> FutureResult> { - let mut sink = self.import_block_channel.clone(); - let future = async move { - let (sender, receiver) = oneshot::channel(); - let command = EngineCommand::SealNewBlock { - create_empty, - finalize, - parent_hash, - sender: Some(sender), - }; - sink.send(command).await?; - receiver.await? - }.boxed(); - - Box::new(future.map_err(Error::from).compat()) - } - - fn finalize_block(&self, hash: Hash, justification: Option) -> FutureResult { - let mut sink = self.import_block_channel.clone(); - let future = async move { - let (sender, receiver) = oneshot::channel(); - sink.send( - EngineCommand::FinalizeBlock { hash, sender: Some(sender), justification } - ).await?; - - receiver.await?.map(|_| true) - }; - - Box::new(future.boxed().map_err(Error::from).compat()) - } + fn create_block( + &self, + create_empty: bool, + finalize: bool, + parent_hash: Option, + ) -> FutureResult> { + let mut sink = self.import_block_channel.clone(); + let future = async move { + let (sender, receiver) = oneshot::channel(); + let command = EngineCommand::SealNewBlock { + create_empty, + finalize, + parent_hash, + sender: Some(sender), + }; + sink.send(command).await?; + receiver.await? + } + .boxed(); + + Box::new(future.map_err(Error::from).compat()) + } + + fn finalize_block( + &self, + hash: Hash, + justification: Option, + ) -> FutureResult { + let mut sink = self.import_block_channel.clone(); + let future = async move { + let (sender, receiver) = oneshot::channel(); + sink.send(EngineCommand::FinalizeBlock { + hash, + sender: Some(sender), + justification, + }) + .await?; + + receiver.await?.map(|_| true) + }; + + Box::new(future.boxed().map_err(Error::from).compat()) + } } /// report any errors or successes encountered by the authorship task back /// to the rpc pub fn send_result( - sender: &mut Sender, - result: std::result::Result + sender: &mut Sender, + result: std::result::Result, ) { - if let Some(sender) = sender.take() { - if let Err(err) = sender.send(result) { - log::warn!("Server is shutting down: {:?}", err) - } - } else { - // instant seal doesn't report errors over rpc, simply log them. - match result { - Ok(r) => log::info!("Instant Seal success: {:?}", r), - Err(e) => log::error!("Instant Seal encountered an error: {}", e) - } - } + if let Some(sender) = sender.take() { + if let Err(err) = sender.send(result) { + log::warn!("Server is shutting down: {:?}", err) + } + } else { + // instant seal doesn't report errors over rpc, simply log them. + match result { + Ok(r) => log::info!("Instant Seal success: {:?}", r), + Err(e) => log::error!("Instant Seal encountered an error: {}", e), + } + } } diff --git a/client/consensus/manual-seal/src/seal_new_block.rs b/client/consensus/manual-seal/src/seal_new_block.rs index 88b58ef4cc..2d60744fa3 100644 --- a/client/consensus/manual-seal/src/seal_new_block.rs +++ b/client/consensus/manual-seal/src/seal_new_block.rs @@ -16,122 +16,127 @@ //! Block sealing utilities -use crate::{Error, rpc}; -use std::sync::Arc; -use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT}, - generic::BlockId, -}; +use crate::{rpc, Error}; use futures::prelude::*; -use sc_transaction_pool::txpool; use rpc::CreatedBlock; +use sc_transaction_pool::txpool; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, +}; +use std::sync::Arc; +use sp_blockchain::HeaderBackend; use sp_consensus::{ - self, BlockImport, Environment, Proposer, - ForkChoiceStrategy, BlockImportParams, BlockOrigin, - ImportResult, SelectChain, - import_queue::BoxBlockImport, + self, import_queue::BoxBlockImport, BlockImport, BlockImportParams, BlockOrigin, Environment, + ForkChoiceStrategy, ImportResult, Proposer, SelectChain, }; -use sp_blockchain::HeaderBackend; +use sp_inherents::InherentDataProviders; use std::collections::HashMap; use std::time::Duration; -use sp_inherents::InherentDataProviders; /// max duration for creating a proposal in secs const MAX_PROPOSAL_DURATION: u64 = 10; /// params for sealing a new block pub struct SealBlockParams<'a, B: BlockT, SC, HB, E, T, P: txpool::ChainApi> { - /// if true, empty blocks(without extrinsics) will be created. - /// otherwise, will return Error::EmptyTransactionPool. - pub create_empty: bool, - /// instantly finalize this block? - pub finalize: bool, - /// specify the parent hash of the about-to-created block - pub parent_hash: Option<::Hash>, - /// sender to report errors/success to the rpc. - pub sender: rpc::Sender::Hash>>, - /// transaction pool - pub pool: Arc>, - /// header backend - pub client: Arc, - /// Environment trait object for creating a proposer - pub env: &'a mut E, - /// SelectChain object - pub select_chain: &'a SC, - /// block import object - pub block_import: &'a mut BoxBlockImport, - /// inherent data provider - pub inherent_data_provider: &'a InherentDataProviders, + /// if true, empty blocks(without extrinsics) will be created. + /// otherwise, will return Error::EmptyTransactionPool. + pub create_empty: bool, + /// instantly finalize this block? + pub finalize: bool, + /// specify the parent hash of the about-to-created block + pub parent_hash: Option<::Hash>, + /// sender to report errors/success to the rpc. + pub sender: rpc::Sender::Hash>>, + /// transaction pool + pub pool: Arc>, + /// header backend + pub client: Arc, + /// Environment trait object for creating a proposer + pub env: &'a mut E, + /// SelectChain object + pub select_chain: &'a SC, + /// block import object + pub block_import: &'a mut BoxBlockImport, + /// inherent data provider + pub inherent_data_provider: &'a InherentDataProviders, } /// seals a new block with the given params pub async fn seal_new_block( - SealBlockParams { - create_empty, - finalize, - pool, - parent_hash, - client, - select_chain, - block_import, - env, - inherent_data_provider, - mut sender, - .. - }: SealBlockParams<'_, B, SC, HB, E, T, P> -) - where - B: BlockT, - HB: HeaderBackend, - E: Environment, - >::Error: std::fmt::Display, - >::Error: std::fmt::Display, - P: txpool::ChainApi::Hash>, - SC: SelectChain, + SealBlockParams { + create_empty, + finalize, + pool, + parent_hash, + client, + select_chain, + block_import, + env, + inherent_data_provider, + mut sender, + .. + }: SealBlockParams<'_, B, SC, HB, E, T, P>, +) where + B: BlockT, + HB: HeaderBackend, + E: Environment, + >::Error: std::fmt::Display, + >::Error: std::fmt::Display, + P: txpool::ChainApi::Hash>, + SC: SelectChain, { - let future = async { - if pool.validated_pool().status().ready == 0 && !create_empty { - return Err(Error::EmptyTransactionPool) - } + let future = async { + if pool.validated_pool().status().ready == 0 && !create_empty { + return Err(Error::EmptyTransactionPool); + } - // get the header to build this new block on. - // use the parent_hash supplied via `EngineCommand` - // or fetch the best_block. - let header = match parent_hash { - Some(hash) => { - match client.header(BlockId::Hash(hash))? { - Some(header) => header, - None => return Err(Error::BlockNotFound(format!("{}", hash))), - } - } - None => select_chain.best_chain()? - }; + // get the header to build this new block on. + // use the parent_hash supplied via `EngineCommand` + // or fetch the best_block. + let header = match parent_hash { + Some(hash) => match client.header(BlockId::Hash(hash))? { + Some(header) => header, + None => return Err(Error::BlockNotFound(format!("{}", hash))), + }, + None => select_chain.best_chain()?, + }; - let mut proposer = env.init(&header) - .map_err(|err| Error::StringError(format!("{}", err))).await?; - let id = inherent_data_provider.create_inherent_data()?; - let inherents_len = id.len(); - let proposal = proposer.propose(id, Default::default(), Duration::from_secs(MAX_PROPOSAL_DURATION), false.into()) - .map_err(|err| Error::StringError(format!("{}", err))).await?; + let mut proposer = env + .init(&header) + .map_err(|err| Error::StringError(format!("{}", err))) + .await?; + let id = inherent_data_provider.create_inherent_data()?; + let inherents_len = id.len(); + let proposal = proposer + .propose( + id, + Default::default(), + Duration::from_secs(MAX_PROPOSAL_DURATION), + false.into(), + ) + .map_err(|err| Error::StringError(format!("{}", err))) + .await?; - if proposal.block.extrinsics().len() == inherents_len && !create_empty { - return Err(Error::EmptyTransactionPool) - } + if proposal.block.extrinsics().len() == inherents_len && !create_empty { + return Err(Error::EmptyTransactionPool); + } - let (header, body) = proposal.block.deconstruct(); - let mut params = BlockImportParams::new(BlockOrigin::Own, header.clone()); - params.body = Some(body); - params.finalized = finalize; - params.fork_choice = Some(ForkChoiceStrategy::LongestChain); + let (header, body) = proposal.block.deconstruct(); + let mut params = BlockImportParams::new(BlockOrigin::Own, header.clone()); + params.body = Some(body); + params.finalized = finalize; + params.fork_choice = Some(ForkChoiceStrategy::LongestChain); - match block_import.import_block(params, HashMap::new())? { - ImportResult::Imported(aux) => { - Ok(CreatedBlock { hash: ::Header::hash(&header), aux }) - }, - other => Err(other.into()), - } - }; + match block_import.import_block(params, HashMap::new())? { + ImportResult::Imported(aux) => Ok(CreatedBlock { + hash: ::Header::hash(&header), + aux, + }), + other => Err(other.into()), + } + }; - rpc::send_result(&mut sender, future.await) + rpc::send_result(&mut sender, future.await) } diff --git a/client/consensus/pow/src/lib.rs b/client/consensus/pow/src/lib.rs index de41ea7bd2..8341c9f584 100644 --- a/client/consensus/pow/src/lib.rs +++ b/client/consensus/pow/src/lib.rs @@ -29,76 +29,76 @@ //! as the storage, but it is not recommended as it won't work well with light //! clients. -use std::sync::Arc; -use std::any::Any; -use std::borrow::Cow; -use std::thread; -use std::collections::HashMap; -use std::marker::PhantomData; -use sc_client_api::{BlockOf, backend::AuxStore}; -use sp_blockchain::{HeaderBackend, ProvideCache, well_known_cache_keys::Id as CacheKeyId}; -use sp_block_builder::BlockBuilder as BlockBuilderApi; -use sp_runtime::{Justification, RuntimeString}; -use sp_runtime::generic::{BlockId, Digest, DigestItem}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use codec::{Decode, Encode}; +use log::*; +use sc_client_api; +use sc_client_api::{backend::AuxStore, BlockOf}; use sp_api::ProvideRuntimeApi; -use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; -use sp_inherents::{InherentDataProviders, InherentData}; +use sp_block_builder::BlockBuilder as BlockBuilderApi; +use sp_blockchain::{well_known_cache_keys::Id as CacheKeyId, HeaderBackend, ProvideCache}; +use sp_consensus::import_queue::{BasicQueue, BoxBlockImport, Verifier}; use sp_consensus::{ - BlockImportParams, BlockOrigin, ForkChoiceStrategy, SyncOracle, Environment, Proposer, - SelectChain, Error as ConsensusError, CanAuthorWith, RecordProof, BlockImport, - BlockCheckParams, ImportResult, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, CanAuthorWith, Environment, + Error as ConsensusError, ForkChoiceStrategy, ImportResult, Proposer, RecordProof, SelectChain, + SyncOracle, }; -use sp_consensus::import_queue::{BoxBlockImport, BasicQueue, Verifier}; -use codec::{Encode, Decode}; -use sc_client_api; -use log::*; +use sp_consensus_pow::{Seal, TotalDifficulty, POW_ENGINE_ID}; +use sp_inherents::{InherentData, InherentDataProviders}; +use sp_runtime::generic::{BlockId, Digest, DigestItem}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT}; +use sp_runtime::{Justification, RuntimeString}; use sp_timestamp::{InherentError as TIError, TimestampInherentData}; +use std::any::Any; +use std::borrow::Cow; +use std::collections::HashMap; +use std::marker::PhantomData; +use std::sync::Arc; +use std::thread; #[derive(derive_more::Display, Debug)] pub enum Error { - #[display(fmt = "Header uses the wrong engine {:?}", _0)] - WrongEngine([u8; 4]), - #[display(fmt = "Header {:?} is unsealed", _0)] - HeaderUnsealed(B::Hash), - #[display(fmt = "PoW validation error: invalid seal")] - InvalidSeal, - #[display(fmt = "PoW validation error: preliminary verification failed")] - FailedPreliminaryVerify, - #[display(fmt = "Rejecting block too far in future")] - TooFarInFuture, - #[display(fmt = "Fetching best header failed using select chain: {:?}", _0)] - BestHeaderSelectChain(ConsensusError), - #[display(fmt = "Fetching best header failed: {:?}", _0)] - BestHeader(sp_blockchain::Error), - #[display(fmt = "Best header does not exist")] - NoBestHeader, - #[display(fmt = "Block proposing error: {:?}", _0)] - BlockProposingError(String), - #[display(fmt = "Fetch best hash failed via select chain: {:?}", _0)] - BestHashSelectChain(ConsensusError), - #[display(fmt = "Error with block built on {:?}: {:?}", _0, _1)] - BlockBuiltError(B::Hash, ConsensusError), - #[display(fmt = "Creating inherents failed: {}", _0)] - CreateInherents(sp_inherents::Error), - #[display(fmt = "Checking inherents failed: {}", _0)] - CheckInherents(String), - Client(sp_blockchain::Error), - Codec(codec::Error), - Environment(String), - Runtime(RuntimeString) + #[display(fmt = "Header uses the wrong engine {:?}", _0)] + WrongEngine([u8; 4]), + #[display(fmt = "Header {:?} is unsealed", _0)] + HeaderUnsealed(B::Hash), + #[display(fmt = "PoW validation error: invalid seal")] + InvalidSeal, + #[display(fmt = "PoW validation error: preliminary verification failed")] + FailedPreliminaryVerify, + #[display(fmt = "Rejecting block too far in future")] + TooFarInFuture, + #[display(fmt = "Fetching best header failed using select chain: {:?}", _0)] + BestHeaderSelectChain(ConsensusError), + #[display(fmt = "Fetching best header failed: {:?}", _0)] + BestHeader(sp_blockchain::Error), + #[display(fmt = "Best header does not exist")] + NoBestHeader, + #[display(fmt = "Block proposing error: {:?}", _0)] + BlockProposingError(String), + #[display(fmt = "Fetch best hash failed via select chain: {:?}", _0)] + BestHashSelectChain(ConsensusError), + #[display(fmt = "Error with block built on {:?}: {:?}", _0, _1)] + BlockBuiltError(B::Hash, ConsensusError), + #[display(fmt = "Creating inherents failed: {}", _0)] + CreateInherents(sp_inherents::Error), + #[display(fmt = "Checking inherents failed: {}", _0)] + CheckInherents(String), + Client(sp_blockchain::Error), + Codec(codec::Error), + Environment(String), + Runtime(RuntimeString), } impl std::convert::From> for String { - fn from(error: Error) -> String { - error.to_string() - } + fn from(error: Error) -> String { + error.to_string() + } } impl std::convert::From> for ConsensusError { - fn from(error: Error) -> ConsensusError { - ConsensusError::ClientImport(error.to_string()) - } + fn from(error: Error) -> ConsensusError { + ConsensusError::ClientImport(error.to_string()) + } } /// Auxiliary storage prefix for PoW engine. @@ -106,14 +106,18 @@ pub const POW_AUX_PREFIX: [u8; 4] = *b"PoW:"; /// Get the auxiliary storage key used by engine to store total difficulty. fn aux_key>(hash: &T) -> Vec { - POW_AUX_PREFIX.iter().chain(hash.as_ref()).copied().collect() + POW_AUX_PREFIX + .iter() + .chain(hash.as_ref()) + .copied() + .collect() } /// Intermediate value passed to block importer. #[derive(Encode, Decode, Clone, Debug, Default)] pub struct PowIntermediate { - /// Difficulty of the block, if known. - pub difficulty: Option, + /// Difficulty of the block, if known. + pub difficulty: Option, } /// Intermediate key for PoW engine. @@ -122,333 +126,352 @@ pub static INTERMEDIATE_KEY: &[u8] = b"pow1"; /// Auxiliary storage data for PoW. #[derive(Encode, Decode, Clone, Debug, Default)] pub struct PowAux { - /// Difficulty of the current block. - pub difficulty: Difficulty, - /// Total difficulty up to current block. - pub total_difficulty: Difficulty, + /// Difficulty of the current block. + pub difficulty: Difficulty, + /// Total difficulty up to current block. + pub total_difficulty: Difficulty, } -impl PowAux where - Difficulty: Decode + Default, +impl PowAux +where + Difficulty: Decode + Default, { - /// Read the auxiliary from client. - pub fn read(client: &C, hash: &B::Hash) -> Result> { - let key = aux_key(&hash); - - match client.get_aux(&key).map_err(Error::Client)? { - Some(bytes) => Self::decode(&mut &bytes[..]).map_err(Error::Codec), - None => Ok(Self::default()), - } - } + /// Read the auxiliary from client. + pub fn read(client: &C, hash: &B::Hash) -> Result> { + let key = aux_key(&hash); + + match client.get_aux(&key).map_err(Error::Client)? { + Some(bytes) => Self::decode(&mut &bytes[..]).map_err(Error::Codec), + None => Ok(Self::default()), + } + } } /// Algorithm used for proof of work. pub trait PowAlgorithm { - /// Difficulty for the algorithm. - type Difficulty: TotalDifficulty + Default + Encode + Decode + Ord + Clone + Copy; - - /// Get the next block's difficulty. - /// - /// This function will be called twice during the import process, so the implementation - /// should be properly cached. - fn difficulty(&self, parent: B::Hash) -> Result>; - /// Verify that the seal is valid against given pre hash when parent block is not yet imported. - /// - /// None means that preliminary verify is not available for this algorithm. - fn preliminary_verify( - &self, - _pre_hash: &B::Hash, - _seal: &Seal, - ) -> Result, Error> { - Ok(None) - } - /// Verify that the difficulty is valid against given seal. - fn verify( - &self, - parent: &BlockId, - pre_hash: &B::Hash, - seal: &Seal, - difficulty: Self::Difficulty, - ) -> Result>; - /// Mine a seal that satisfies the given difficulty. - fn mine( - &self, - parent: &BlockId, - pre_hash: &B::Hash, - difficulty: Self::Difficulty, - round: u32, - ) -> Result, Error>; + /// Difficulty for the algorithm. + type Difficulty: TotalDifficulty + Default + Encode + Decode + Ord + Clone + Copy; + + /// Get the next block's difficulty. + /// + /// This function will be called twice during the import process, so the implementation + /// should be properly cached. + fn difficulty(&self, parent: B::Hash) -> Result>; + /// Verify that the seal is valid against given pre hash when parent block is not yet imported. + /// + /// None means that preliminary verify is not available for this algorithm. + fn preliminary_verify( + &self, + _pre_hash: &B::Hash, + _seal: &Seal, + ) -> Result, Error> { + Ok(None) + } + /// Verify that the difficulty is valid against given seal. + fn verify( + &self, + parent: &BlockId, + pre_hash: &B::Hash, + seal: &Seal, + difficulty: Self::Difficulty, + ) -> Result>; + /// Mine a seal that satisfies the given difficulty. + fn mine( + &self, + parent: &BlockId, + pre_hash: &B::Hash, + difficulty: Self::Difficulty, + round: u32, + ) -> Result, Error>; } /// A block importer for PoW. pub struct PowBlockImport { - algorithm: Algorithm, - inner: I, - select_chain: Option, - client: Arc, - inherent_data_providers: sp_inherents::InherentDataProviders, - check_inherents_after: <::Header as HeaderT>::Number, + algorithm: Algorithm, + inner: I, + select_chain: Option, + client: Arc, + inherent_data_providers: sp_inherents::InherentDataProviders, + check_inherents_after: <::Header as HeaderT>::Number, } -impl Clone for PowBlockImport { - fn clone(&self) -> Self { - Self { - algorithm: self.algorithm.clone(), - inner: self.inner.clone(), - select_chain: self.select_chain.clone(), - client: self.client.clone(), - inherent_data_providers: self.inherent_data_providers.clone(), - check_inherents_after: self.check_inherents_after.clone(), - } - } +impl Clone + for PowBlockImport +{ + fn clone(&self) -> Self { + Self { + algorithm: self.algorithm.clone(), + inner: self.inner.clone(), + select_chain: self.select_chain.clone(), + client: self.client.clone(), + inherent_data_providers: self.inherent_data_providers.clone(), + check_inherents_after: self.check_inherents_after.clone(), + } + } } -impl PowBlockImport where - B: BlockT, - I: BlockImport> + Send + Sync, - I::Error: Into, - C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, - C::Api: BlockBuilderApi, - Algorithm: PowAlgorithm, +impl PowBlockImport +where + B: BlockT, + I: BlockImport> + Send + Sync, + I::Error: Into, + C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, + C::Api: BlockBuilderApi, + Algorithm: PowAlgorithm, { - /// Create a new block import suitable to be used in PoW - pub fn new( - inner: I, - client: Arc, - algorithm: Algorithm, - check_inherents_after: <::Header as HeaderT>::Number, - select_chain: Option, - inherent_data_providers: sp_inherents::InherentDataProviders, - ) -> Self { - Self { inner, client, algorithm, check_inherents_after, - select_chain, inherent_data_providers } - } - - fn check_inherents( - &self, - block: B, - block_id: BlockId, - inherent_data: InherentData, - timestamp_now: u64, - ) -> Result<(), Error> { - const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; - - if *block.header().number() < self.check_inherents_after { - return Ok(()) - } - - let inherent_res = self.client.runtime_api().check_inherents( - &block_id, - block, - inherent_data, - ).map_err(Error::Client)?; - - if !inherent_res.ok() { - inherent_res - .into_errors() - .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { - Some(TIError::ValidAtTimestamp(timestamp)) => { - if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { - return Err(Error::TooFarInFuture); - } - - Ok(()) - }, - Some(TIError::Other(e)) => Err(Error::Runtime(e)), - None => Err(Error::CheckInherents( - self.inherent_data_providers.error_to_string(&i, &e) - )), - }) - } else { - Ok(()) - } - } + /// Create a new block import suitable to be used in PoW + pub fn new( + inner: I, + client: Arc, + algorithm: Algorithm, + check_inherents_after: <::Header as HeaderT>::Number, + select_chain: Option, + inherent_data_providers: sp_inherents::InherentDataProviders, + ) -> Self { + Self { + inner, + client, + algorithm, + check_inherents_after, + select_chain, + inherent_data_providers, + } + } + + fn check_inherents( + &self, + block: B, + block_id: BlockId, + inherent_data: InherentData, + timestamp_now: u64, + ) -> Result<(), Error> { + const MAX_TIMESTAMP_DRIFT_SECS: u64 = 60; + + if *block.header().number() < self.check_inherents_after { + return Ok(()); + } + + let inherent_res = self + .client + .runtime_api() + .check_inherents(&block_id, block, inherent_data) + .map_err(Error::Client)?; + + if !inherent_res.ok() { + inherent_res + .into_errors() + .try_for_each(|(i, e)| match TIError::try_from(&i, &e) { + Some(TIError::ValidAtTimestamp(timestamp)) => { + if timestamp > timestamp_now + MAX_TIMESTAMP_DRIFT_SECS { + return Err(Error::TooFarInFuture); + } + + Ok(()) + } + Some(TIError::Other(e)) => Err(Error::Runtime(e)), + None => Err(Error::CheckInherents( + self.inherent_data_providers.error_to_string(&i, &e), + )), + }) + } else { + Ok(()) + } + } } -impl BlockImport for PowBlockImport where - B: BlockT, - I: BlockImport> + Send + Sync, - I::Error: Into, - S: SelectChain, - C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, - C::Api: BlockBuilderApi, - Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, +impl BlockImport for PowBlockImport +where + B: BlockT, + I: BlockImport> + Send + Sync, + I::Error: Into, + S: SelectChain, + C: ProvideRuntimeApi + Send + Sync + HeaderBackend + AuxStore + ProvideCache + BlockOf, + C::Api: BlockBuilderApi, + Algorithm: PowAlgorithm, + Algorithm::Difficulty: 'static, { - type Error = ConsensusError; - type Transaction = sp_api::TransactionFor; - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block).map_err(Into::into) - } - - fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - let best_hash = match self.select_chain.as_ref() { - Some(select_chain) => select_chain.best_chain() - .map_err(|e| format!("Fetch best chain failed via select chain: {:?}", e))? - .hash(), - None => self.client.info().best_hash, - }; - - let parent_hash = *block.header.parent_hash(); - let best_aux = PowAux::read::<_, B>(self.client.as_ref(), &best_hash)?; - let mut aux = PowAux::read::<_, B>(self.client.as_ref(), &parent_hash)?; - - if let Some(inner_body) = block.body.take() { - let inherent_data = self.inherent_data_providers - .create_inherent_data().map_err(|e| e.into_string())?; - let timestamp_now = inherent_data.timestamp_inherent_data().map_err(|e| e.into_string())?; - - let check_block = B::new(block.header.clone(), inner_body); - - self.check_inherents( - check_block.clone(), - BlockId::Hash(parent_hash), - inherent_data, - timestamp_now - )?; - - block.body = Some(check_block.deconstruct().1); - } - - let inner_seal = match block.post_digests.last() { - Some(DigestItem::Seal(id, seal)) => { - if id == &POW_ENGINE_ID { - seal.clone() - } else { - return Err(Error::::WrongEngine(*id).into()) - } - }, - _ => return Err(Error::::HeaderUnsealed(block.header.hash()).into()), - }; - - let intermediate = block.take_intermediate::>( - INTERMEDIATE_KEY - )?; - - let difficulty = match intermediate.difficulty { - Some(difficulty) => difficulty, - None => self.algorithm.difficulty(parent_hash)?, - }; - - let pre_hash = block.header.hash(); - if !self.algorithm.verify( - &BlockId::hash(parent_hash), - &pre_hash, - &inner_seal, - difficulty, - )? { - return Err(Error::::InvalidSeal.into()) - } - - aux.difficulty = difficulty; - aux.total_difficulty.increment(difficulty); - - let key = aux_key(&block.post_hash()); - block.auxiliary.push((key, Some(aux.encode()))); - if block.fork_choice.is_none() { - block.fork_choice = Some(ForkChoiceStrategy::Custom( - aux.total_difficulty > best_aux.total_difficulty - )); - } - - self.inner.import_block(block, new_cache).map_err(Into::into) - } + type Error = ConsensusError; + type Transaction = sp_api::TransactionFor; + + fn check_block(&mut self, block: BlockCheckParams) -> Result { + self.inner.check_block(block).map_err(Into::into) + } + + fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let best_hash = match self.select_chain.as_ref() { + Some(select_chain) => select_chain + .best_chain() + .map_err(|e| format!("Fetch best chain failed via select chain: {:?}", e))? + .hash(), + None => self.client.info().best_hash, + }; + + let parent_hash = *block.header.parent_hash(); + let best_aux = PowAux::read::<_, B>(self.client.as_ref(), &best_hash)?; + let mut aux = PowAux::read::<_, B>(self.client.as_ref(), &parent_hash)?; + + if let Some(inner_body) = block.body.take() { + let inherent_data = self + .inherent_data_providers + .create_inherent_data() + .map_err(|e| e.into_string())?; + let timestamp_now = inherent_data + .timestamp_inherent_data() + .map_err(|e| e.into_string())?; + + let check_block = B::new(block.header.clone(), inner_body); + + self.check_inherents( + check_block.clone(), + BlockId::Hash(parent_hash), + inherent_data, + timestamp_now, + )?; + + block.body = Some(check_block.deconstruct().1); + } + + let inner_seal = match block.post_digests.last() { + Some(DigestItem::Seal(id, seal)) => { + if id == &POW_ENGINE_ID { + seal.clone() + } else { + return Err(Error::::WrongEngine(*id).into()); + } + } + _ => return Err(Error::::HeaderUnsealed(block.header.hash()).into()), + }; + + let intermediate = + block.take_intermediate::>(INTERMEDIATE_KEY)?; + + let difficulty = match intermediate.difficulty { + Some(difficulty) => difficulty, + None => self.algorithm.difficulty(parent_hash)?, + }; + + let pre_hash = block.header.hash(); + if !self.algorithm.verify( + &BlockId::hash(parent_hash), + &pre_hash, + &inner_seal, + difficulty, + )? { + return Err(Error::::InvalidSeal.into()); + } + + aux.difficulty = difficulty; + aux.total_difficulty.increment(difficulty); + + let key = aux_key(&block.post_hash()); + block.auxiliary.push((key, Some(aux.encode()))); + if block.fork_choice.is_none() { + block.fork_choice = Some(ForkChoiceStrategy::Custom( + aux.total_difficulty > best_aux.total_difficulty, + )); + } + + self.inner + .import_block(block, new_cache) + .map_err(Into::into) + } } /// A verifier for PoW blocks. pub struct PowVerifier { - algorithm: Algorithm, - _marker: PhantomData, + algorithm: Algorithm, + _marker: PhantomData, } impl PowVerifier { - pub fn new( - algorithm: Algorithm, - ) -> Self { - Self { algorithm, _marker: PhantomData } - } - - fn check_header( - &self, - mut header: B::Header, - ) -> Result<(B::Header, DigestItem), Error> where - Algorithm: PowAlgorithm, - { - let hash = header.hash(); - - let (seal, inner_seal) = match header.digest_mut().pop() { - Some(DigestItem::Seal(id, seal)) => { - if id == POW_ENGINE_ID { - (DigestItem::Seal(id, seal.clone()), seal) - } else { - return Err(Error::WrongEngine(id)) - } - }, - _ => return Err(Error::HeaderUnsealed(hash)), - }; - - let pre_hash = header.hash(); - - if !self.algorithm.preliminary_verify(&pre_hash, &inner_seal)?.unwrap_or(true) { - return Err(Error::FailedPreliminaryVerify); - } - - Ok((header, seal)) - } + pub fn new(algorithm: Algorithm) -> Self { + Self { + algorithm, + _marker: PhantomData, + } + } + + fn check_header( + &self, + mut header: B::Header, + ) -> Result<(B::Header, DigestItem), Error> + where + Algorithm: PowAlgorithm, + { + let hash = header.hash(); + + let (seal, inner_seal) = match header.digest_mut().pop() { + Some(DigestItem::Seal(id, seal)) => { + if id == POW_ENGINE_ID { + (DigestItem::Seal(id, seal.clone()), seal) + } else { + return Err(Error::WrongEngine(id)); + } + } + _ => return Err(Error::HeaderUnsealed(hash)), + }; + + let pre_hash = header.hash(); + + if !self + .algorithm + .preliminary_verify(&pre_hash, &inner_seal)? + .unwrap_or(true) + { + return Err(Error::FailedPreliminaryVerify); + } + + Ok((header, seal)) + } } -impl Verifier for PowVerifier where - Algorithm: PowAlgorithm + Send + Sync, - Algorithm::Difficulty: 'static, +impl Verifier for PowVerifier +where + Algorithm: PowAlgorithm + Send + Sync, + Algorithm::Difficulty: 'static, { - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String> { - let hash = header.hash(); - let (checked_header, seal) = self.check_header(header)?; - - let intermediate = PowIntermediate:: { - difficulty: None, - }; - - let mut import_block = BlockImportParams::new(origin, checked_header); - import_block.post_digests.push(seal); - import_block.body = body; - import_block.justification = justification; - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box - ); - import_block.post_hash = Some(hash); - - Ok((import_block, None)) - } + fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + let hash = header.hash(); + let (checked_header, seal) = self.check_header(header)?; + + let intermediate = PowIntermediate:: { difficulty: None }; + + let mut import_block = BlockImportParams::new(origin, checked_header); + import_block.post_digests.push(seal); + import_block.body = body; + import_block.justification = justification; + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(intermediate) as Box, + ); + import_block.post_hash = Some(hash); + + Ok((import_block, None)) + } } /// Register the PoW inherent data provider, if not registered already. pub fn register_pow_inherent_data_provider( - inherent_data_providers: &InherentDataProviders, + inherent_data_providers: &InherentDataProviders, ) -> Result<(), sp_consensus::Error> { - if !inherent_data_providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_timestamp::InherentDataProvider) - .map_err(Into::into) - .map_err(sp_consensus::Error::InherentData) - } else { - Ok(()) - } + if !inherent_data_providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(sp_timestamp::InherentDataProvider) + .map_err(Into::into) + .map_err(sp_consensus::Error::InherentData) + } else { + Ok(()) + } } /// The PoW import queue type. @@ -456,27 +479,20 @@ pub type PowImportQueue = BasicQueue; /// Import queue for PoW engine. pub fn import_queue( - block_import: BoxBlockImport, - algorithm: Algorithm, - inherent_data_providers: InherentDataProviders, -) -> Result< - PowImportQueue, - sp_consensus::Error -> where - B: BlockT, - Transaction: Send + Sync + 'static, - Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, + block_import: BoxBlockImport, + algorithm: Algorithm, + inherent_data_providers: InherentDataProviders, +) -> Result, sp_consensus::Error> +where + B: BlockT, + Transaction: Send + Sync + 'static, + Algorithm: PowAlgorithm + Clone + Send + Sync + 'static, { - register_pow_inherent_data_provider(&inherent_data_providers)?; + register_pow_inherent_data_provider(&inherent_data_providers)?; - let verifier = PowVerifier::new(algorithm); + let verifier = PowVerifier::new(algorithm); - Ok(BasicQueue::new( - verifier, - block_import, - None, - None - )) + Ok(BasicQueue::new(verifier, block_import, None, None)) } /// Start the background mining thread for PoW. Note that because PoW mining @@ -490,178 +506,180 @@ pub fn import_queue( /// CPU miner runs each time. This parameter should be tweaked so that each /// mining round is within sub-second time. pub fn start_mine( - mut block_import: BoxBlockImport>, - client: Arc, - algorithm: Algorithm, - mut env: E, - preruntime: Option>, - round: u32, - mut sync_oracle: SO, - build_time: std::time::Duration, - select_chain: Option, - inherent_data_providers: sp_inherents::InherentDataProviders, - can_author_with: CAW, + mut block_import: BoxBlockImport>, + client: Arc, + algorithm: Algorithm, + mut env: E, + preruntime: Option>, + round: u32, + mut sync_oracle: SO, + build_time: std::time::Duration, + select_chain: Option, + inherent_data_providers: sp_inherents::InherentDataProviders, + can_author_with: CAW, ) where - C: HeaderBackend + AuxStore + ProvideRuntimeApi + 'static, - Algorithm: PowAlgorithm + Send + Sync + 'static, - E: Environment + Send + Sync + 'static, - E::Error: std::fmt::Debug, - E::Proposer: Proposer>, - SO: SyncOracle + Send + Sync + 'static, - S: SelectChain + 'static, - CAW: CanAuthorWith + Send + 'static, + C: HeaderBackend + AuxStore + ProvideRuntimeApi + 'static, + Algorithm: PowAlgorithm + Send + Sync + 'static, + E: Environment + Send + Sync + 'static, + E::Error: std::fmt::Debug, + E::Proposer: Proposer>, + SO: SyncOracle + Send + Sync + 'static, + S: SelectChain + 'static, + CAW: CanAuthorWith + Send + 'static, { - if let Err(_) = register_pow_inherent_data_provider(&inherent_data_providers) { - warn!("Registering inherent data provider for timestamp failed"); - } - - thread::spawn(move || { - loop { - match mine_loop( - &mut block_import, - client.as_ref(), - &algorithm, - &mut env, - preruntime.as_ref(), - round, - &mut sync_oracle, - build_time.clone(), - select_chain.as_ref(), - &inherent_data_providers, - &can_author_with, - ) { - Ok(()) => (), - Err(e) => error!( - "Mining block failed with {:?}. Sleep for 1 second before restarting...", - e - ), - } - std::thread::sleep(std::time::Duration::new(1, 0)); - } - }); + if let Err(_) = register_pow_inherent_data_provider(&inherent_data_providers) { + warn!("Registering inherent data provider for timestamp failed"); + } + + thread::spawn(move || loop { + match mine_loop( + &mut block_import, + client.as_ref(), + &algorithm, + &mut env, + preruntime.as_ref(), + round, + &mut sync_oracle, + build_time.clone(), + select_chain.as_ref(), + &inherent_data_providers, + &can_author_with, + ) { + Ok(()) => (), + Err(e) => error!( + "Mining block failed with {:?}. Sleep for 1 second before restarting...", + e + ), + } + std::thread::sleep(std::time::Duration::new(1, 0)); + }); } fn mine_loop( - block_import: &mut BoxBlockImport>, - client: &C, - algorithm: &Algorithm, - env: &mut E, - preruntime: Option<&Vec>, - round: u32, - sync_oracle: &mut SO, - build_time: std::time::Duration, - select_chain: Option<&S>, - inherent_data_providers: &sp_inherents::InherentDataProviders, - can_author_with: &CAW, -) -> Result<(), Error> where - C: HeaderBackend + AuxStore + ProvideRuntimeApi, - Algorithm: PowAlgorithm, - Algorithm::Difficulty: 'static, - E: Environment, - E::Proposer: Proposer>, - E::Error: std::fmt::Debug, - SO: SyncOracle, - S: SelectChain, - sp_api::TransactionFor: 'static, - CAW: CanAuthorWith, + block_import: &mut BoxBlockImport>, + client: &C, + algorithm: &Algorithm, + env: &mut E, + preruntime: Option<&Vec>, + round: u32, + sync_oracle: &mut SO, + build_time: std::time::Duration, + select_chain: Option<&S>, + inherent_data_providers: &sp_inherents::InherentDataProviders, + can_author_with: &CAW, +) -> Result<(), Error> +where + C: HeaderBackend + AuxStore + ProvideRuntimeApi, + Algorithm: PowAlgorithm, + Algorithm::Difficulty: 'static, + E: Environment, + E::Proposer: Proposer>, + E::Error: std::fmt::Debug, + SO: SyncOracle, + S: SelectChain, + sp_api::TransactionFor: 'static, + CAW: CanAuthorWith, { - 'outer: loop { - if sync_oracle.is_major_syncing() { - debug!(target: "pow", "Skipping proposal due to sync."); - std::thread::sleep(std::time::Duration::new(1, 0)); - continue 'outer - } - - let (best_hash, best_header) = match select_chain { - Some(select_chain) => { - let header = select_chain.best_chain() - .map_err(Error::BestHeaderSelectChain)?; - let hash = header.hash(); - (hash, header) - }, - None => { - let hash = client.info().best_hash; - let header = client.header(BlockId::Hash(hash)) - .map_err(Error::BestHeader)? - .ok_or(Error::NoBestHeader)?; - (hash, header) - }, - }; - - if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { - warn!( - target: "pow", - "Skipping proposal `can_author_with` returned: {} \ - Probably a node update is required!", - err, - ); - std::thread::sleep(std::time::Duration::from_secs(1)); - continue 'outer - } - - let mut proposer = futures::executor::block_on(env.init(&best_header)) - .map_err(|e| Error::Environment(format!("{:?}", e)))?; - - let inherent_data = inherent_data_providers - .create_inherent_data().map_err(Error::CreateInherents)?; - let mut inherent_digest = Digest::default(); - if let Some(preruntime) = &preruntime { - inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, preruntime.to_vec())); - } - let proposal = futures::executor::block_on(proposer.propose( - inherent_data, - inherent_digest, - build_time.clone(), - RecordProof::No, - )).map_err(|e| Error::BlockProposingError(format!("{:?}", e)))?; - - let (header, body) = proposal.block.deconstruct(); - let (difficulty, seal) = { - let difficulty = algorithm.difficulty(best_hash)?; - - loop { - let seal = algorithm.mine( - &BlockId::Hash(best_hash), - &header.hash(), - difficulty, - round, - )?; - - if let Some(seal) = seal { - break (difficulty, seal) - } - - if best_hash != client.info().best_hash { - continue 'outer - } - } - }; - - let (hash, seal) = { - let seal = DigestItem::Seal(POW_ENGINE_ID, seal); - let mut header = header.clone(); - header.digest_mut().push(seal); - let hash = header.hash(); - let seal = header.digest_mut().pop() - .expect("Pushed one seal above; length greater than zero; qed"); - (hash, seal) - }; - - let intermediate = PowIntermediate:: { - difficulty: Some(difficulty), - }; - - let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); - import_block.post_digests.push(seal); - import_block.body = Some(body); - import_block.storage_changes = Some(proposal.storage_changes); - import_block.intermediates.insert( - Cow::from(INTERMEDIATE_KEY), - Box::new(intermediate) as Box - ); - import_block.post_hash = Some(hash); - - block_import.import_block(import_block, HashMap::default()) - .map_err(|e| Error::BlockBuiltError(best_hash, e))?; - } + 'outer: loop { + if sync_oracle.is_major_syncing() { + debug!(target: "pow", "Skipping proposal due to sync."); + std::thread::sleep(std::time::Duration::new(1, 0)); + continue 'outer; + } + + let (best_hash, best_header) = match select_chain { + Some(select_chain) => { + let header = select_chain + .best_chain() + .map_err(Error::BestHeaderSelectChain)?; + let hash = header.hash(); + (hash, header) + } + None => { + let hash = client.info().best_hash; + let header = client + .header(BlockId::Hash(hash)) + .map_err(Error::BestHeader)? + .ok_or(Error::NoBestHeader)?; + (hash, header) + } + }; + + if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(best_hash)) { + warn!( + target: "pow", + "Skipping proposal `can_author_with` returned: {} \ + Probably a node update is required!", + err, + ); + std::thread::sleep(std::time::Duration::from_secs(1)); + continue 'outer; + } + + let mut proposer = futures::executor::block_on(env.init(&best_header)) + .map_err(|e| Error::Environment(format!("{:?}", e)))?; + + let inherent_data = inherent_data_providers + .create_inherent_data() + .map_err(Error::CreateInherents)?; + let mut inherent_digest = Digest::default(); + if let Some(preruntime) = &preruntime { + inherent_digest.push(DigestItem::PreRuntime(POW_ENGINE_ID, preruntime.to_vec())); + } + let proposal = futures::executor::block_on(proposer.propose( + inherent_data, + inherent_digest, + build_time.clone(), + RecordProof::No, + )) + .map_err(|e| Error::BlockProposingError(format!("{:?}", e)))?; + + let (header, body) = proposal.block.deconstruct(); + let (difficulty, seal) = { + let difficulty = algorithm.difficulty(best_hash)?; + + loop { + let seal = + algorithm.mine(&BlockId::Hash(best_hash), &header.hash(), difficulty, round)?; + + if let Some(seal) = seal { + break (difficulty, seal); + } + + if best_hash != client.info().best_hash { + continue 'outer; + } + } + }; + + let (hash, seal) = { + let seal = DigestItem::Seal(POW_ENGINE_ID, seal); + let mut header = header.clone(); + header.digest_mut().push(seal); + let hash = header.hash(); + let seal = header + .digest_mut() + .pop() + .expect("Pushed one seal above; length greater than zero; qed"); + (hash, seal) + }; + + let intermediate = PowIntermediate:: { + difficulty: Some(difficulty), + }; + + let mut import_block = BlockImportParams::new(BlockOrigin::Own, header); + import_block.post_digests.push(seal); + import_block.body = Some(body); + import_block.storage_changes = Some(proposal.storage_changes); + import_block.intermediates.insert( + Cow::from(INTERMEDIATE_KEY), + Box::new(intermediate) as Box, + ); + import_block.post_hash = Some(hash); + + block_import + .import_block(import_block, HashMap::default()) + .map_err(|e| Error::BlockBuiltError(best_hash, e))?; + } } diff --git a/client/consensus/slots/build.rs b/client/consensus/slots/build.rs index 513cc234d4..4b546b4a43 100644 --- a/client/consensus/slots/build.rs +++ b/client/consensus/slots/build.rs @@ -17,7 +17,7 @@ use std::env; fn main() { - if let Ok(profile) = env::var("PROFILE") { - println!("cargo:rustc-cfg=build_type=\"{}\"", profile); - } + if let Ok(profile) = env::var("PROFILE") { + println!("cargo:rustc-cfg=build_type=\"{}\"", profile); + } } diff --git a/client/consensus/slots/src/aux_schema.rs b/client/consensus/slots/src/aux_schema.rs index df4772a8e9..49b00e4794 100644 --- a/client/consensus/slots/src/aux_schema.rs +++ b/client/consensus/slots/src/aux_schema.rs @@ -16,9 +16,9 @@ //! Schema for slots in the aux-db. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_runtime::traits::Header; const SLOT_HEADER_MAP_KEY: &[u8] = b"slot_header_map"; @@ -30,236 +30,213 @@ pub const MAX_SLOT_CAPACITY: u64 = 1000; pub const PRUNING_BOUND: u64 = 2 * MAX_SLOT_CAPACITY; fn load_decode(backend: &C, key: &[u8]) -> ClientResult> - where - C: AuxStore, - T: Decode, +where + C: AuxStore, + T: Decode, { - match backend.get_aux(key)? { - None => Ok(None), - Some(t) => T::decode(&mut &t[..]) - .map_err( - |e| ClientError::Backend(format!("Slots DB is corrupted. Decode error: {}", e.what())), - ) - .map(Some) - } + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]) + .map_err(|e| { + ClientError::Backend(format!("Slots DB is corrupted. Decode error: {}", e.what())) + }) + .map(Some), + } } /// Represents an equivocation proof. #[derive(Debug, Clone)] pub struct EquivocationProof { - slot: u64, - fst_header: H, - snd_header: H, + slot: u64, + fst_header: H, + snd_header: H, } impl EquivocationProof { - /// Get the slot number where the equivocation happened. - pub fn slot(&self) -> u64 { - self.slot - } - - /// Get the first header involved in the equivocation. - pub fn fst_header(&self) -> &H { - &self.fst_header - } - - /// Get the second header involved in the equivocation. - pub fn snd_header(&self) -> &H { - &self.snd_header - } + /// Get the slot number where the equivocation happened. + pub fn slot(&self) -> u64 { + self.slot + } + + /// Get the first header involved in the equivocation. + pub fn fst_header(&self) -> &H { + &self.fst_header + } + + /// Get the second header involved in the equivocation. + pub fn snd_header(&self) -> &H { + &self.snd_header + } } /// Checks if the header is an equivocation and returns the proof in that case. /// /// Note: it detects equivocations only when slot_now - slot <= MAX_SLOT_CAPACITY. pub fn check_equivocation( - backend: &C, - slot_now: u64, - slot: u64, - header: &H, - signer: &P, + backend: &C, + slot_now: u64, + slot: u64, + header: &H, + signer: &P, ) -> ClientResult>> - where - H: Header, - C: AuxStore, - P: Clone + Encode + Decode + PartialEq, +where + H: Header, + C: AuxStore, + P: Clone + Encode + Decode + PartialEq, { - // We don't check equivocations for old headers out of our capacity. - if slot_now - slot > MAX_SLOT_CAPACITY { - return Ok(None) - } - - // Key for this slot. - let mut curr_slot_key = SLOT_HEADER_MAP_KEY.to_vec(); - slot.using_encoded(|s| curr_slot_key.extend(s)); - - // Get headers of this slot. - let mut headers_with_sig = load_decode::<_, Vec<(H, P)>>(backend, &curr_slot_key[..])? - .unwrap_or_else(Vec::new); - - // Get first slot saved. - let slot_header_start = SLOT_HEADER_START.to_vec(); - let first_saved_slot = load_decode::<_, u64>(backend, &slot_header_start[..])? - .unwrap_or(slot); - - for (prev_header, prev_signer) in headers_with_sig.iter() { - // A proof of equivocation consists of two headers: - // 1) signed by the same voter, - if prev_signer == signer { - // 2) with different hash - if header.hash() != prev_header.hash() { - return Ok(Some(EquivocationProof { - slot, // 3) and mentioning the same slot. - fst_header: prev_header.clone(), - snd_header: header.clone(), - })); - } else { - // We don't need to continue in case of duplicated header, - // since it's already saved and a possible equivocation - // would have been detected before. - return Ok(None) - } - } - } - - let mut keys_to_delete = vec![]; - let mut new_first_saved_slot = first_saved_slot; - - if slot_now - first_saved_slot >= PRUNING_BOUND { - let prefix = SLOT_HEADER_MAP_KEY.to_vec(); - new_first_saved_slot = slot_now.saturating_sub(MAX_SLOT_CAPACITY); - - for s in first_saved_slot..new_first_saved_slot { - let mut p = prefix.clone(); - s.using_encoded(|s| p.extend(s)); - keys_to_delete.push(p); - } - } - - headers_with_sig.push((header.clone(), signer.clone())); - - backend.insert_aux( - &[ - (&curr_slot_key[..], headers_with_sig.encode().as_slice()), - (&slot_header_start[..], new_first_saved_slot.encode().as_slice()), - ], - &keys_to_delete.iter().map(|k| &k[..]).collect::>()[..], - )?; - - Ok(None) + // We don't check equivocations for old headers out of our capacity. + if slot_now - slot > MAX_SLOT_CAPACITY { + return Ok(None); + } + + // Key for this slot. + let mut curr_slot_key = SLOT_HEADER_MAP_KEY.to_vec(); + slot.using_encoded(|s| curr_slot_key.extend(s)); + + // Get headers of this slot. + let mut headers_with_sig = + load_decode::<_, Vec<(H, P)>>(backend, &curr_slot_key[..])?.unwrap_or_else(Vec::new); + + // Get first slot saved. + let slot_header_start = SLOT_HEADER_START.to_vec(); + let first_saved_slot = load_decode::<_, u64>(backend, &slot_header_start[..])?.unwrap_or(slot); + + for (prev_header, prev_signer) in headers_with_sig.iter() { + // A proof of equivocation consists of two headers: + // 1) signed by the same voter, + if prev_signer == signer { + // 2) with different hash + if header.hash() != prev_header.hash() { + return Ok(Some(EquivocationProof { + slot, // 3) and mentioning the same slot. + fst_header: prev_header.clone(), + snd_header: header.clone(), + })); + } else { + // We don't need to continue in case of duplicated header, + // since it's already saved and a possible equivocation + // would have been detected before. + return Ok(None); + } + } + } + + let mut keys_to_delete = vec![]; + let mut new_first_saved_slot = first_saved_slot; + + if slot_now - first_saved_slot >= PRUNING_BOUND { + let prefix = SLOT_HEADER_MAP_KEY.to_vec(); + new_first_saved_slot = slot_now.saturating_sub(MAX_SLOT_CAPACITY); + + for s in first_saved_slot..new_first_saved_slot { + let mut p = prefix.clone(); + s.using_encoded(|s| p.extend(s)); + keys_to_delete.push(p); + } + } + + headers_with_sig.push((header.clone(), signer.clone())); + + backend.insert_aux( + &[ + (&curr_slot_key[..], headers_with_sig.encode().as_slice()), + ( + &slot_header_start[..], + new_first_saved_slot.encode().as_slice(), + ), + ], + &keys_to_delete + .iter() + .map(|k| &k[..]) + .collect::>()[..], + )?; + + Ok(None) } #[cfg(test)] mod test { - use sp_core::{sr25519, Pair}; - use sp_core::hash::H256; - use sp_runtime::testing::{Header as HeaderTest, Digest as DigestTest}; - use substrate_test_runtime_client; - - use super::{MAX_SLOT_CAPACITY, PRUNING_BOUND, check_equivocation}; - - fn create_header(number: u64) -> HeaderTest { - // so that different headers for the same number get different hashes - let parent_hash = H256::random(); - - let header = HeaderTest { - parent_hash, - number, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: DigestTest { logs: vec![], }, - }; - - header - } - - #[test] - fn check_equivocation_works() { - let client = substrate_test_runtime_client::new(); - let (pair, _seed) = sr25519::Pair::generate(); - let public = pair.public(); - - let header1 = create_header(1); // @ slot 2 - let header2 = create_header(2); // @ slot 2 - let header3 = create_header(2); // @ slot 4 - let header4 = create_header(3); // @ slot MAX_SLOT_CAPACITY + 4 - let header5 = create_header(4); // @ slot MAX_SLOT_CAPACITY + 4 - let header6 = create_header(3); // @ slot 4 - - // It's ok to sign same headers. - assert!( - check_equivocation( - &client, - 2, - 2, - &header1, - &public, - ).unwrap().is_none(), - ); - - assert!( - check_equivocation( - &client, - 3, - 2, - &header1, - &public, - ).unwrap().is_none(), - ); - - // But not two different headers at the same slot. - assert!( - check_equivocation( - &client, - 4, - 2, - &header2, - &public, - ).unwrap().is_some(), - ); - - // Different slot is ok. - assert!( - check_equivocation( - &client, - 5, - 4, - &header3, - &public, - ).unwrap().is_none(), - ); - - // Here we trigger pruning and save header 4. - assert!( - check_equivocation( - &client, - PRUNING_BOUND + 2, - MAX_SLOT_CAPACITY + 4, - &header4, - &public, - ).unwrap().is_none(), - ); - - // This fails because header 5 is an equivocation of header 4. - assert!( - check_equivocation( - &client, - PRUNING_BOUND + 3, - MAX_SLOT_CAPACITY + 4, - &header5, - &public, - ).unwrap().is_some(), - ); - - // This is ok because we pruned the corresponding header. Shows that we are pruning. - assert!( - check_equivocation( - &client, - PRUNING_BOUND + 4, - 4, - &header6, - &public, - ).unwrap().is_none(), - ); - } + use sp_core::hash::H256; + use sp_core::{sr25519, Pair}; + use sp_runtime::testing::{Digest as DigestTest, Header as HeaderTest}; + use substrate_test_runtime_client; + + use super::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; + + fn create_header(number: u64) -> HeaderTest { + // so that different headers for the same number get different hashes + let parent_hash = H256::random(); + + let header = HeaderTest { + parent_hash, + number, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: DigestTest { logs: vec![] }, + }; + + header + } + + #[test] + fn check_equivocation_works() { + let client = substrate_test_runtime_client::new(); + let (pair, _seed) = sr25519::Pair::generate(); + let public = pair.public(); + + let header1 = create_header(1); // @ slot 2 + let header2 = create_header(2); // @ slot 2 + let header3 = create_header(2); // @ slot 4 + let header4 = create_header(3); // @ slot MAX_SLOT_CAPACITY + 4 + let header5 = create_header(4); // @ slot MAX_SLOT_CAPACITY + 4 + let header6 = create_header(3); // @ slot 4 + + // It's ok to sign same headers. + assert!(check_equivocation(&client, 2, 2, &header1, &public,) + .unwrap() + .is_none(),); + + assert!(check_equivocation(&client, 3, 2, &header1, &public,) + .unwrap() + .is_none(),); + + // But not two different headers at the same slot. + assert!(check_equivocation(&client, 4, 2, &header2, &public,) + .unwrap() + .is_some(),); + + // Different slot is ok. + assert!(check_equivocation(&client, 5, 4, &header3, &public,) + .unwrap() + .is_none(),); + + // Here we trigger pruning and save header 4. + assert!(check_equivocation( + &client, + PRUNING_BOUND + 2, + MAX_SLOT_CAPACITY + 4, + &header4, + &public, + ) + .unwrap() + .is_none(),); + + // This fails because header 5 is an equivocation of header 4. + assert!(check_equivocation( + &client, + PRUNING_BOUND + 3, + MAX_SLOT_CAPACITY + 4, + &header5, + &public, + ) + .unwrap() + .is_some(),); + + // This is ok because we pruned the corresponding header. Shows that we are pruning. + assert!( + check_equivocation(&client, PRUNING_BOUND + 4, 4, &header6, &public,) + .unwrap() + .is_none(), + ); + } } diff --git a/client/consensus/slots/src/lib.rs b/client/consensus/slots/src/lib.rs index 2a0739a831..33b9d39ba0 100644 --- a/client/consensus/slots/src/lib.rs +++ b/client/consensus/slots/src/lib.rs @@ -23,310 +23,354 @@ #![deny(warnings)] #![forbid(unsafe_code, missing_docs)] -mod slots; mod aux_schema; +mod slots; -pub use slots::{SignedDuration, SlotInfo}; -use slots::Slots; pub use aux_schema::{check_equivocation, MAX_SLOT_CAPACITY, PRUNING_BOUND}; +use slots::Slots; +pub use slots::{SignedDuration, SlotInfo}; use codec::{Decode, Encode}; -use sp_consensus::{BlockImport, Proposer, SyncOracle, SelectChain, CanAuthorWith, SlotData, RecordProof}; -use futures::{prelude::*, future::{self, Either}}; +use futures::{ + future::{self, Either}, + prelude::*, +}; use futures_timer::Delay; -use sp_inherents::{InherentData, InherentDataProviders}; use log::{debug, error, info, warn}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; -use sp_api::{ProvideRuntimeApi, ApiRef}; -use std::{fmt::Debug, ops::Deref, pin::Pin, sync::Arc, time::{Instant, Duration}}; -use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_WARN, CONSENSUS_INFO}; use parking_lot::Mutex; +use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO, CONSENSUS_WARN}; +use sp_api::{ApiRef, ProvideRuntimeApi}; +use sp_consensus::{ + BlockImport, CanAuthorWith, Proposer, RecordProof, SelectChain, SlotData, SyncOracle, +}; +use sp_inherents::{InherentData, InherentDataProviders}; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor}; +use std::{ + fmt::Debug, + ops::Deref, + pin::Pin, + sync::Arc, + time::{Duration, Instant}, +}; /// The changes that need to applied to the storage to create the state for a block. /// /// See [`sp_state_machine::StorageChanges`] for more information. pub type StorageChanges = - sp_state_machine::StorageChanges, NumberFor>; + sp_state_machine::StorageChanges, NumberFor>; /// A worker that should be invoked at every new slot. pub trait SlotWorker { - /// The type of the future that will be returned when a new slot is - /// triggered. - type OnSlot: Future>; + /// The type of the future that will be returned when a new slot is + /// triggered. + type OnSlot: Future>; - /// Called when a new slot is triggered. - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot; + /// Called when a new slot is triggered. + fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) -> Self::OnSlot; } /// A skeleton implementation for `SlotWorker` which tries to claim a slot at /// its beginning and tries to produce a block if successfully claimed, timing /// out if block production takes too long. pub trait SimpleSlotWorker { - /// A handle to a `BlockImport`. - type BlockImport: BlockImport>::Transaction> - + Send + 'static; - - /// A handle to a `SyncOracle`. - type SyncOracle: SyncOracle; - - /// The type of future resolving to the proposer. - type CreateProposer: Future> - + Send + Unpin + 'static; - - /// The type of proposer to use to build blocks. - type Proposer: Proposer; - - /// Data associated with a slot claim. - type Claim: Send + 'static; - - /// Epoch data necessary for authoring. - type EpochData: Send + 'static; - - /// The logging target to use when logging messages. - fn logging_target(&self) -> &'static str; - - /// A handle to a `BlockImport`. - fn block_import(&self) -> Arc>; - - /// Returns the epoch data necessary for authoring. For time-dependent epochs, - /// use the provided slot number as a canonical source of time. - fn epoch_data(&self, header: &B::Header, slot_number: u64) -> Result; - - /// Returns the number of authorities given the epoch data. - /// None indicate that the authorities information is incomplete. - fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; - - /// Tries to claim the given slot, returning an object with claim data if successful. - fn claim_slot( - &self, - header: &B::Header, - slot_number: u64, - epoch_data: &Self::EpochData, - ) -> Option; - - /// Return the pre digest data to include in a block authored with the given claim. - fn pre_digest_data( - &self, - slot_number: u64, - claim: &Self::Claim, - ) -> Vec>; - - /// Returns a function which produces a `BlockImportParams`. - fn block_import_params(&self) -> Box< - dyn Fn( - B::Header, - &B::Hash, - Vec, - StorageChanges<>::Transaction, B>, - Self::Claim, - Self::EpochData, - ) -> sp_consensus::BlockImportParams< - B, - >::Transaction - > - + Send - >; - - /// Whether to force authoring if offline. - fn force_authoring(&self) -> bool; - - /// Returns a handle to a `SyncOracle`. - fn sync_oracle(&mut self) -> &mut Self::SyncOracle; - - /// Returns a `Proposer` to author on top of the given block. - fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer; - - /// Remaining duration of the slot. - fn slot_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { - let now = Instant::now(); - if now < slot_info.ends_at { - slot_info.ends_at.duration_since(now) - } else { - Duration::from_millis(0) - } - } - - /// Remaining duration for proposing. None means unlimited. - fn proposing_remaining_duration( - &self, - _head: &B::Header, - slot_info: &SlotInfo - ) -> Option { - Some(self.slot_remaining_duration(slot_info)) - } - - /// Implements the `on_slot` functionality from `SlotWorker`. - fn on_slot(&mut self, chain_head: B::Header, slot_info: SlotInfo) - -> Pin> + Send>> where - Self: Send + Sync, - >::Proposal: Unpin + Send + 'static, - { - let (timestamp, slot_number, slot_duration) = - (slot_info.timestamp, slot_info.number, slot_info.duration); - - { - let slot_now = SignedDuration::default().slot_now(slot_duration); - if slot_now > slot_number { - // if this is behind, return. - debug!(target: self.logging_target(), - "Skipping proposal slot {} since our current view is {}", - slot_number, slot_now, - ); - - return Box::pin(future::ready(Ok(()))); - } - } - - let epoch_data = match self.epoch_data(&chain_head, slot_number) { - Ok(epoch_data) => epoch_data, - Err(err) => { - warn!("Unable to fetch epoch data at block {:?}: {:?}", chain_head.hash(), err); - - telemetry!( - CONSENSUS_WARN; "slots.unable_fetching_authorities"; - "slot" => ?chain_head.hash(), - "err" => ?err, - ); - - return Box::pin(future::ready(Ok(()))); - } - }; - - let authorities_len = self.authorities_len(&epoch_data); - - if !self.force_authoring() && - self.sync_oracle().is_offline() && - authorities_len.map(|a| a > 1).unwrap_or(false) - { - debug!(target: self.logging_target(), "Skipping proposal slot. Waiting for the network."); - telemetry!( - CONSENSUS_DEBUG; - "slots.skipping_proposal_slot"; - "authorities_len" => authorities_len, - ); - - return Box::pin(future::ready(Ok(()))); - } - - let claim = match self.claim_slot(&chain_head, slot_number, &epoch_data) { - None => return Box::pin(future::ready(Ok(()))), - Some(claim) => claim, - }; - - debug!( - target: self.logging_target(), "Starting authorship at slot {}; timestamp = {}", - slot_number, - timestamp, - ); - - telemetry!(CONSENSUS_DEBUG; "slots.starting_authorship"; - "slot_num" => slot_number, - "timestamp" => timestamp, - ); - - let awaiting_proposer = self.proposer(&chain_head).map_err(move |err| { - warn!("Unable to author block in slot {:?}: {:?}", slot_number, err); - - telemetry!(CONSENSUS_WARN; "slots.unable_authoring_block"; - "slot" => slot_number, "err" => ?err - ); - - err - }); - - let slot_remaining_duration = self.slot_remaining_duration(&slot_info); - let proposing_remaining_duration = self.proposing_remaining_duration(&chain_head, &slot_info); - let logs = self.pre_digest_data(slot_number, &claim); - - // deadline our production to approx. the end of the slot - let proposing = awaiting_proposer.and_then(move |mut proposer| proposer.propose( - slot_info.inherent_data, - sp_runtime::generic::Digest { - logs, - }, - slot_remaining_duration, - RecordProof::No, - ).map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e)))); - - let delay: Box + Unpin + Send> = match proposing_remaining_duration { - Some(r) => Box::new(Delay::new(r)), - None => Box::new(future::pending()), - }; - - let proposal_work = - Box::new(futures::future::select(proposing, delay).map(move |v| match v { - futures::future::Either::Left((b, _)) => b.map(|b| (b, claim)), - futures::future::Either::Right(_) => { - info!("⌛️ Discarding proposal for slot {}; block production took too long", slot_number); - // If the node was compiled with debug, tell the user to use release optimizations. - #[cfg(build_type="debug")] - info!("👉 Recompile your node in `--release` mode to mitigate this problem."); - telemetry!(CONSENSUS_INFO; "slots.discarding_proposal_took_too_long"; - "slot" => slot_number, - ); - Err(sp_consensus::Error::ClientImport("Timeout in the Slots proposer".into())) - }, - })); - - let block_import_params_maker = self.block_import_params(); - let block_import = self.block_import(); - let logging_target = self.logging_target(); - - Box::pin(proposal_work.map_ok(move |(proposal, claim)| { - let (header, body) = proposal.block.deconstruct(); - let header_num = *header.number(); - let header_hash = header.hash(); - let parent_hash = *header.parent_hash(); - - let block_import_params = block_import_params_maker( - header, - &header_hash, - body, - proposal.storage_changes, - claim, - epoch_data, - ); - - info!( - "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", - header_num, - block_import_params.post_hash(), - header_hash, - ); - - telemetry!(CONSENSUS_INFO; "slots.pre_sealed_block"; - "header_num" => ?header_num, - "hash_now" => ?block_import_params.post_hash(), - "hash_previously" => ?header_hash, - ); - - if let Err(err) = block_import.lock().import_block(block_import_params, Default::default()) { - warn!(target: logging_target, - "Error with block built on {:?}: {:?}", - parent_hash, - err, - ); - - telemetry!(CONSENSUS_WARN; "slots.err_with_block_built_on"; - "hash" => ?parent_hash, "err" => ?err, - ); - } - })) - } + /// A handle to a `BlockImport`. + type BlockImport: BlockImport>::Transaction> + + Send + + 'static; + + /// A handle to a `SyncOracle`. + type SyncOracle: SyncOracle; + + /// The type of future resolving to the proposer. + type CreateProposer: Future> + + Send + + Unpin + + 'static; + + /// The type of proposer to use to build blocks. + type Proposer: Proposer; + + /// Data associated with a slot claim. + type Claim: Send + 'static; + + /// Epoch data necessary for authoring. + type EpochData: Send + 'static; + + /// The logging target to use when logging messages. + fn logging_target(&self) -> &'static str; + + /// A handle to a `BlockImport`. + fn block_import(&self) -> Arc>; + + /// Returns the epoch data necessary for authoring. For time-dependent epochs, + /// use the provided slot number as a canonical source of time. + fn epoch_data( + &self, + header: &B::Header, + slot_number: u64, + ) -> Result; + + /// Returns the number of authorities given the epoch data. + /// None indicate that the authorities information is incomplete. + fn authorities_len(&self, epoch_data: &Self::EpochData) -> Option; + + /// Tries to claim the given slot, returning an object with claim data if successful. + fn claim_slot( + &self, + header: &B::Header, + slot_number: u64, + epoch_data: &Self::EpochData, + ) -> Option; + + /// Return the pre digest data to include in a block authored with the given claim. + fn pre_digest_data( + &self, + slot_number: u64, + claim: &Self::Claim, + ) -> Vec>; + + /// Returns a function which produces a `BlockImportParams`. + fn block_import_params( + &self, + ) -> Box< + dyn Fn( + B::Header, + &B::Hash, + Vec, + StorageChanges<>::Transaction, B>, + Self::Claim, + Self::EpochData, + ) -> sp_consensus::BlockImportParams< + B, + >::Transaction, + > + Send, + >; + + /// Whether to force authoring if offline. + fn force_authoring(&self) -> bool; + + /// Returns a handle to a `SyncOracle`. + fn sync_oracle(&mut self) -> &mut Self::SyncOracle; + + /// Returns a `Proposer` to author on top of the given block. + fn proposer(&mut self, block: &B::Header) -> Self::CreateProposer; + + /// Remaining duration of the slot. + fn slot_remaining_duration(&self, slot_info: &SlotInfo) -> Duration { + let now = Instant::now(); + if now < slot_info.ends_at { + slot_info.ends_at.duration_since(now) + } else { + Duration::from_millis(0) + } + } + + /// Remaining duration for proposing. None means unlimited. + fn proposing_remaining_duration( + &self, + _head: &B::Header, + slot_info: &SlotInfo, + ) -> Option { + Some(self.slot_remaining_duration(slot_info)) + } + + /// Implements the `on_slot` functionality from `SlotWorker`. + fn on_slot( + &mut self, + chain_head: B::Header, + slot_info: SlotInfo, + ) -> Pin> + Send>> + where + Self: Send + Sync, + >::Proposal: Unpin + Send + 'static, + { + let (timestamp, slot_number, slot_duration) = + (slot_info.timestamp, slot_info.number, slot_info.duration); + + { + let slot_now = SignedDuration::default().slot_now(slot_duration); + if slot_now > slot_number { + // if this is behind, return. + debug!(target: self.logging_target(), + "Skipping proposal slot {} since our current view is {}", + slot_number, slot_now, + ); + + return Box::pin(future::ready(Ok(()))); + } + } + + let epoch_data = match self.epoch_data(&chain_head, slot_number) { + Ok(epoch_data) => epoch_data, + Err(err) => { + warn!( + "Unable to fetch epoch data at block {:?}: {:?}", + chain_head.hash(), + err + ); + + telemetry!( + CONSENSUS_WARN; "slots.unable_fetching_authorities"; + "slot" => ?chain_head.hash(), + "err" => ?err, + ); + + return Box::pin(future::ready(Ok(()))); + } + }; + + let authorities_len = self.authorities_len(&epoch_data); + + if !self.force_authoring() + && self.sync_oracle().is_offline() + && authorities_len.map(|a| a > 1).unwrap_or(false) + { + debug!(target: self.logging_target(), "Skipping proposal slot. Waiting for the network."); + telemetry!( + CONSENSUS_DEBUG; + "slots.skipping_proposal_slot"; + "authorities_len" => authorities_len, + ); + + return Box::pin(future::ready(Ok(()))); + } + + let claim = match self.claim_slot(&chain_head, slot_number, &epoch_data) { + None => return Box::pin(future::ready(Ok(()))), + Some(claim) => claim, + }; + + debug!( + target: self.logging_target(), "Starting authorship at slot {}; timestamp = {}", + slot_number, + timestamp, + ); + + telemetry!(CONSENSUS_DEBUG; "slots.starting_authorship"; + "slot_num" => slot_number, + "timestamp" => timestamp, + ); + + let awaiting_proposer = self.proposer(&chain_head).map_err(move |err| { + warn!( + "Unable to author block in slot {:?}: {:?}", + slot_number, err + ); + + telemetry!(CONSENSUS_WARN; "slots.unable_authoring_block"; + "slot" => slot_number, "err" => ?err + ); + + err + }); + + let slot_remaining_duration = self.slot_remaining_duration(&slot_info); + let proposing_remaining_duration = + self.proposing_remaining_duration(&chain_head, &slot_info); + let logs = self.pre_digest_data(slot_number, &claim); + + // deadline our production to approx. the end of the slot + let proposing = awaiting_proposer.and_then(move |mut proposer| { + proposer + .propose( + slot_info.inherent_data, + sp_runtime::generic::Digest { logs }, + slot_remaining_duration, + RecordProof::No, + ) + .map_err(|e| sp_consensus::Error::ClientImport(format!("{:?}", e))) + }); + + let delay: Box + Unpin + Send> = match proposing_remaining_duration + { + Some(r) => Box::new(Delay::new(r)), + None => Box::new(future::pending()), + }; + + let proposal_work = Box::new(futures::future::select(proposing, delay).map( + move |v| match v { + futures::future::Either::Left((b, _)) => b.map(|b| (b, claim)), + futures::future::Either::Right(_) => { + info!( + "⌛️ Discarding proposal for slot {}; block production took too long", + slot_number + ); + // If the node was compiled with debug, tell the user to use release optimizations. + #[cfg(build_type = "debug")] + info!("👉 Recompile your node in `--release` mode to mitigate this problem."); + telemetry!(CONSENSUS_INFO; "slots.discarding_proposal_took_too_long"; + "slot" => slot_number, + ); + Err(sp_consensus::Error::ClientImport( + "Timeout in the Slots proposer".into(), + )) + } + }, + )); + + let block_import_params_maker = self.block_import_params(); + let block_import = self.block_import(); + let logging_target = self.logging_target(); + + Box::pin(proposal_work.map_ok(move |(proposal, claim)| { + let (header, body) = proposal.block.deconstruct(); + let header_num = *header.number(); + let header_hash = header.hash(); + let parent_hash = *header.parent_hash(); + + let block_import_params = block_import_params_maker( + header, + &header_hash, + body, + proposal.storage_changes, + claim, + epoch_data, + ); + + info!( + "🔖 Pre-sealed block for proposal at {}. Hash now {:?}, previously {:?}.", + header_num, + block_import_params.post_hash(), + header_hash, + ); + + telemetry!(CONSENSUS_INFO; "slots.pre_sealed_block"; + "header_num" => ?header_num, + "hash_now" => ?block_import_params.post_hash(), + "hash_previously" => ?header_hash, + ); + + if let Err(err) = block_import + .lock() + .import_block(block_import_params, Default::default()) + { + warn!( + target: logging_target, + "Error with block built on {:?}: {:?}", parent_hash, err, + ); + + telemetry!(CONSENSUS_WARN; "slots.err_with_block_built_on"; + "hash" => ?parent_hash, "err" => ?err, + ); + } + })) + } } /// Slot compatible inherent data. pub trait SlotCompatible { - /// Extract timestamp and slot from inherent data. - fn extract_timestamp_and_slot( - &self, - inherent: &InherentData, - ) -> Result<(u64, u64, std::time::Duration), sp_consensus::Error>; - - /// Get the difference between chain time and local time. Defaults to - /// always returning zero. - fn time_offset() -> SignedDuration { Default::default() } + /// Extract timestamp and slot from inherent data. + fn extract_timestamp_and_slot( + &self, + inherent: &InherentData, + ) -> Result<(u64, u64, std::time::Duration), sp_consensus::Error>; + + /// Get the difference between chain time and local time. Defaults to + /// always returning zero. + fn time_offset() -> SignedDuration { + Default::default() + } } /// Start a new slot worker. @@ -334,85 +378,88 @@ pub trait SlotCompatible { /// Every time a new slot is triggered, `worker.on_slot` is called and the future it returns is /// polled until completion, unless we are major syncing. pub fn start_slot_worker( - slot_duration: SlotDuration, - client: C, - mut worker: W, - mut sync_oracle: SO, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, - can_author_with: CAW, + slot_duration: SlotDuration, + client: C, + mut worker: W, + mut sync_oracle: SO, + inherent_data_providers: InherentDataProviders, + timestamp_extractor: SC, + can_author_with: CAW, ) -> impl Future where - B: BlockT, - C: SelectChain, - W: SlotWorker, - W::OnSlot: Unpin, - SO: SyncOracle + Send, - SC: SlotCompatible + Unpin, - T: SlotData + Clone, - CAW: CanAuthorWith + Send, + B: BlockT, + C: SelectChain, + W: SlotWorker, + W::OnSlot: Unpin, + SO: SyncOracle + Send, + SC: SlotCompatible + Unpin, + T: SlotData + Clone, + CAW: CanAuthorWith + Send, { - let SlotDuration(slot_duration) = slot_duration; - - // rather than use a timer interval, we schedule our waits ourselves - Slots::::new( - slot_duration.slot_duration(), - inherent_data_providers, - timestamp_extractor, - ).inspect_err(|e| debug!(target: "slots", "Faulty timer: {:?}", e)) - .try_for_each(move |slot_info| { - // only propose when we are not syncing. - if sync_oracle.is_major_syncing() { - debug!(target: "slots", "Skipping proposal slot due to sync."); - return Either::Right(future::ready(Ok(()))); - } - - let slot_num = slot_info.number; - let chain_head = match client.best_chain() { - Ok(x) => x, - Err(e) => { - warn!(target: "slots", "Unable to author block in slot {}. \ + let SlotDuration(slot_duration) = slot_duration; + + // rather than use a timer interval, we schedule our waits ourselves + Slots::::new( + slot_duration.slot_duration(), + inherent_data_providers, + timestamp_extractor, + ) + .inspect_err(|e| debug!(target: "slots", "Faulty timer: {:?}", e)) + .try_for_each(move |slot_info| { + // only propose when we are not syncing. + if sync_oracle.is_major_syncing() { + debug!(target: "slots", "Skipping proposal slot due to sync."); + return Either::Right(future::ready(Ok(()))); + } + + let slot_num = slot_info.number; + let chain_head = match client.best_chain() { + Ok(x) => x, + Err(e) => { + warn!(target: "slots", "Unable to author block in slot {}. \ no best block header: {:?}", slot_num, e); - return Either::Right(future::ready(Ok(()))); - } - }; - - if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(chain_head.hash())) { - warn!( - target: "slots", - "Unable to author block in slot {},. `can_author_with` returned: {} \ - Probably a node update is required!", - slot_num, - err, - ); - Either::Right(future::ready(Ok(()))) - } else { - Either::Left( - worker.on_slot(chain_head, slot_info) - .map_err(|e| { - warn!(target: "slots", "Encountered consensus error: {:?}", e); - }) - .or_else(|_| future::ready(Ok(()))) - ) - } - }).then(|res| { - if let Err(err) = res { - warn!(target: "slots", "Slots stream terminated with an error: {:?}", err); - } - future::ready(()) - }) + return Either::Right(future::ready(Ok(()))); + } + }; + + if let Err(err) = can_author_with.can_author_with(&BlockId::Hash(chain_head.hash())) { + warn!( + target: "slots", + "Unable to author block in slot {},. `can_author_with` returned: {} \ + Probably a node update is required!", + slot_num, + err, + ); + Either::Right(future::ready(Ok(()))) + } else { + Either::Left( + worker + .on_slot(chain_head, slot_info) + .map_err(|e| { + warn!(target: "slots", "Encountered consensus error: {:?}", e); + }) + .or_else(|_| future::ready(Ok(()))), + ) + } + }) + .then(|res| { + if let Err(err) = res { + warn!(target: "slots", "Slots stream terminated with an error: {:?}", err); + } + future::ready(()) + }) } /// A header which has been checked pub enum CheckedHeader { - /// A header which has slot in the future. this is the full header (not stripped) - /// and the slot in which it should be processed. - Deferred(H, u64), - /// A header which is fully checked, including signature. This is the pre-header - /// accompanied by the seal components. - /// - /// Includes the digest item that encoded the seal. - Checked(H, S), + /// A header which has slot in the future. this is the full header (not stripped) + /// and the slot in which it should be processed. + Deferred(H, u64), + /// A header which is fully checked, including signature. This is the pre-header + /// accompanied by the seal components. + /// + /// Includes the digest item that encoded the seal. + Checked(H, S), } /// A slot duration. Create with `get_or_compute`. @@ -422,64 +469,66 @@ pub enum CheckedHeader { pub struct SlotDuration(T); impl Deref for SlotDuration { - type Target = T; - fn deref(&self) -> &T { - &self.0 - } + type Target = T; + fn deref(&self) -> &T { + &self.0 + } } impl SlotData for SlotDuration { - /// Get the slot duration in milliseconds. - fn slot_duration(&self) -> u64 - where T: SlotData, - { - self.0.slot_duration() - } - - const SLOT_KEY: &'static [u8] = T::SLOT_KEY; + /// Get the slot duration in milliseconds. + fn slot_duration(&self) -> u64 + where + T: SlotData, + { + self.0.slot_duration() + } + + const SLOT_KEY: &'static [u8] = T::SLOT_KEY; } impl SlotDuration { - /// Either fetch the slot duration from disk or compute it from the - /// genesis state. - /// - /// `slot_key` is marked as `'static`, as it should really be a - /// compile-time constant. - pub fn get_or_compute(client: &C, cb: CB) -> sp_blockchain::Result where - C: sc_client_api::backend::AuxStore, - C: ProvideRuntimeApi, - CB: FnOnce(ApiRef, &BlockId) -> sp_blockchain::Result, - T: SlotData + Encode + Decode + Debug, - { - match client.get_aux(T::SLOT_KEY)? { - Some(v) => ::decode(&mut &v[..]) - .map(SlotDuration) - .map_err(|_| { - sp_blockchain::Error::Backend({ - error!(target: "slots", "slot duration kept in invalid format"); - "slot duration kept in invalid format".to_string() - }) - }), - None => { - use sp_runtime::traits::Zero; - let genesis_slot_duration = - cb(client.runtime_api(), &BlockId::number(Zero::zero()))?; - - info!( - "⏱ Loaded block-time = {:?} milliseconds from genesis on first-launch", - genesis_slot_duration - ); - - genesis_slot_duration - .using_encoded(|s| client.insert_aux(&[(T::SLOT_KEY, &s[..])], &[]))?; - - Ok(SlotDuration(genesis_slot_duration)) - } - } - } - - /// Returns slot data value. - pub fn get(&self) -> T { - self.0.clone() - } + /// Either fetch the slot duration from disk or compute it from the + /// genesis state. + /// + /// `slot_key` is marked as `'static`, as it should really be a + /// compile-time constant. + pub fn get_or_compute(client: &C, cb: CB) -> sp_blockchain::Result + where + C: sc_client_api::backend::AuxStore, + C: ProvideRuntimeApi, + CB: FnOnce(ApiRef, &BlockId) -> sp_blockchain::Result, + T: SlotData + Encode + Decode + Debug, + { + match client.get_aux(T::SLOT_KEY)? { + Some(v) => ::decode(&mut &v[..]) + .map(SlotDuration) + .map_err(|_| { + sp_blockchain::Error::Backend({ + error!(target: "slots", "slot duration kept in invalid format"); + "slot duration kept in invalid format".to_string() + }) + }), + None => { + use sp_runtime::traits::Zero; + let genesis_slot_duration = + cb(client.runtime_api(), &BlockId::number(Zero::zero()))?; + + info!( + "⏱ Loaded block-time = {:?} milliseconds from genesis on first-launch", + genesis_slot_duration + ); + + genesis_slot_duration + .using_encoded(|s| client.insert_aux(&[(T::SLOT_KEY, &s[..])], &[]))?; + + Ok(SlotDuration(genesis_slot_duration)) + } + } + } + + /// Returns slot data value. + pub fn get(&self) -> T { + self.0.clone() + } } diff --git a/client/consensus/slots/src/slots.rs b/client/consensus/slots/src/slots.rs index 17a931b7c4..50c29b1673 100644 --- a/client/consensus/slots/src/slots.rs +++ b/client/consensus/slots/src/slots.rs @@ -19,149 +19,159 @@ //! This is used instead of `futures_timer::Interval` because it was unreliable. use super::SlotCompatible; -use sp_consensus::Error; use futures::{prelude::*, task::Context, task::Poll}; +use sp_consensus::Error; use sp_inherents::{InherentData, InherentDataProviders}; -use std::{pin::Pin, time::{Duration, Instant}}; use futures_timer::Delay; +use std::{ + pin::Pin, + time::{Duration, Instant}, +}; /// Returns current duration since unix epoch. pub fn duration_now() -> Duration { - use std::time::SystemTime; - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_else(|e| panic!( - "Current time {:?} is before unix epoch. Something is wrong: {:?}", - now, - e, - )) + use std::time::SystemTime; + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .unwrap_or_else(|e| { + panic!( + "Current time {:?} is before unix epoch. Something is wrong: {:?}", + now, e, + ) + }) } - /// A `Duration` with a sign (before or after). Immutable. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] pub struct SignedDuration { - offset: Duration, - is_positive: bool, + offset: Duration, + is_positive: bool, } impl SignedDuration { - /// Construct a `SignedDuration` - pub fn new(offset: Duration, is_positive: bool) -> Self { - Self { offset, is_positive } - } - - /// Get the slot for now. Panics if `slot_duration` is 0. - pub fn slot_now(&self, slot_duration: u64) -> u64 { - (if self.is_positive { - duration_now() + self.offset - } else { - duration_now() - self.offset - }.as_millis() as u64) / slot_duration - } + /// Construct a `SignedDuration` + pub fn new(offset: Duration, is_positive: bool) -> Self { + Self { + offset, + is_positive, + } + } + + /// Get the slot for now. Panics if `slot_duration` is 0. + pub fn slot_now(&self, slot_duration: u64) -> u64 { + (if self.is_positive { + duration_now() + self.offset + } else { + duration_now() - self.offset + } + .as_millis() as u64) + / slot_duration + } } /// Returns the duration until the next slot, based on current duration since pub fn time_until_next(now: Duration, slot_duration: u64) -> Duration { - let remaining_full_millis = slot_duration - (now.as_millis() as u64 % slot_duration) - 1; - Duration::from_millis(remaining_full_millis) + let remaining_full_millis = slot_duration - (now.as_millis() as u64 % slot_duration) - 1; + Duration::from_millis(remaining_full_millis) } /// Information about a slot. pub struct SlotInfo { - /// The slot number. - pub number: u64, - /// The last slot number produced. - pub last_number: u64, - /// Current timestamp. - pub timestamp: u64, - /// The instant at which the slot ends. - pub ends_at: Instant, - /// The inherent data. - pub inherent_data: InherentData, - /// Slot duration. - pub duration: u64, + /// The slot number. + pub number: u64, + /// The last slot number produced. + pub last_number: u64, + /// Current timestamp. + pub timestamp: u64, + /// The instant at which the slot ends. + pub ends_at: Instant, + /// The inherent data. + pub inherent_data: InherentData, + /// Slot duration. + pub duration: u64, } /// A stream that returns every time there is a new slot. pub(crate) struct Slots { - last_slot: u64, - slot_duration: u64, - inner_delay: Option, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, + last_slot: u64, + slot_duration: u64, + inner_delay: Option, + inherent_data_providers: InherentDataProviders, + timestamp_extractor: SC, } impl Slots { - /// Create a new `Slots` stream. - pub fn new( - slot_duration: u64, - inherent_data_providers: InherentDataProviders, - timestamp_extractor: SC, - ) -> Self { - Slots { - last_slot: 0, - slot_duration, - inner_delay: None, - inherent_data_providers, - timestamp_extractor, - } - } + /// Create a new `Slots` stream. + pub fn new( + slot_duration: u64, + inherent_data_providers: InherentDataProviders, + timestamp_extractor: SC, + ) -> Self { + Slots { + last_slot: 0, + slot_duration, + inner_delay: None, + inherent_data_providers, + timestamp_extractor, + } + } } impl Stream for Slots { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - loop { - let slot_duration = self.slot_duration; - self.inner_delay = match self.inner_delay.take() { - None => { - // schedule wait. - let wait_dur = time_until_next(duration_now(), slot_duration); - Some(Delay::new(wait_dur)) - } - Some(d) => Some(d), - }; - - if let Some(ref mut inner_delay) = self.inner_delay { - match Future::poll(Pin::new(inner_delay), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(()) => {} - } - } - - // timeout has fired. - - let inherent_data = match self.inherent_data_providers.create_inherent_data() { - Ok(id) => id, - Err(err) => return Poll::Ready(Some(Err(sp_consensus::Error::InherentData(err)))), - }; - let result = self.timestamp_extractor.extract_timestamp_and_slot(&inherent_data); - let (timestamp, slot_num, offset) = match result { - Ok(v) => v, - Err(err) => return Poll::Ready(Some(Err(err))), - }; - // reschedule delay for next slot. - let ends_in = offset + - time_until_next(Duration::from_millis(timestamp), slot_duration); - let ends_at = Instant::now() + ends_in; - self.inner_delay = Some(Delay::new(ends_in)); - - // never yield the same slot twice. - if slot_num > self.last_slot { - let last_slot = self.last_slot; - self.last_slot = slot_num; - - break Poll::Ready(Some(Ok(SlotInfo { - number: slot_num, - duration: self.slot_duration, - last_number: last_slot, - timestamp, - ends_at, - inherent_data, - }))) - } - } - } + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + loop { + let slot_duration = self.slot_duration; + self.inner_delay = match self.inner_delay.take() { + None => { + // schedule wait. + let wait_dur = time_until_next(duration_now(), slot_duration); + Some(Delay::new(wait_dur)) + } + Some(d) => Some(d), + }; + + if let Some(ref mut inner_delay) = self.inner_delay { + match Future::poll(Pin::new(inner_delay), cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(()) => {} + } + } + + // timeout has fired. + + let inherent_data = match self.inherent_data_providers.create_inherent_data() { + Ok(id) => id, + Err(err) => return Poll::Ready(Some(Err(sp_consensus::Error::InherentData(err)))), + }; + let result = self + .timestamp_extractor + .extract_timestamp_and_slot(&inherent_data); + let (timestamp, slot_num, offset) = match result { + Ok(v) => v, + Err(err) => return Poll::Ready(Some(Err(err))), + }; + // reschedule delay for next slot. + let ends_in = offset + time_until_next(Duration::from_millis(timestamp), slot_duration); + let ends_at = Instant::now() + ends_in; + self.inner_delay = Some(Delay::new(ends_in)); + + // never yield the same slot twice. + if slot_num > self.last_slot { + let last_slot = self.last_slot; + self.last_slot = slot_num; + + break Poll::Ready(Some(Ok(SlotInfo { + number: slot_num, + duration: self.slot_duration, + last_number: last_slot, + timestamp, + ends_at, + inherent_data, + }))); + } + } + } } diff --git a/client/consensus/uncles/src/lib.rs b/client/consensus/uncles/src/lib.rs index 2a129b2000..94db1638c6 100644 --- a/client/consensus/uncles/src/lib.rs +++ b/client/consensus/uncles/src/lib.rs @@ -17,49 +17,47 @@ //! Uncles functionality for Substrate. #![forbid(unsafe_code, missing_docs)] -use sp_consensus::SelectChain; -use sp_inherents::{InherentDataProviders}; use log::warn; use sc_client_api::ProvideUncles; +use sp_authorship; +use sp_consensus::SelectChain; +use sp_inherents::InherentDataProviders; use sp_runtime::traits::{Block as BlockT, Header}; use std::sync::Arc; -use sp_authorship; /// Maximum uncles generations we may provide to the runtime. const MAX_UNCLE_GENERATIONS: u32 = 8; /// Register uncles inherent data provider, if not registered already. pub fn register_uncles_inherent_data_provider( - client: Arc, - select_chain: SC, - inherent_data_providers: &InherentDataProviders, -) -> Result<(), sp_consensus::Error> where - B: BlockT, - C: ProvideUncles + Send + Sync + 'static, - SC: SelectChain + 'static, + client: Arc, + select_chain: SC, + inherent_data_providers: &InherentDataProviders, +) -> Result<(), sp_consensus::Error> +where + B: BlockT, + C: ProvideUncles + Send + Sync + 'static, + SC: SelectChain + 'static, { - if !inherent_data_providers.has_provider(&sp_authorship::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_authorship::InherentDataProvider::new(move || { - { - let chain_head = match select_chain.best_chain() { - Ok(x) => x, - Err(e) => { - warn!(target: "uncles", "Unable to get chain head: {:?}", e); - return Vec::new(); - } - }; - match client.uncles(chain_head.hash(), MAX_UNCLE_GENERATIONS.into()) { - Ok(uncles) => uncles, - Err(e) => { - warn!(target: "uncles", "Unable to get uncles: {:?}", e); - Vec::new() - } - } - } - })) - .map_err(|err| sp_consensus::Error::InherentData(err.into()))?; - } - Ok(()) + if !inherent_data_providers.has_provider(&sp_authorship::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(sp_authorship::InherentDataProvider::new(move || { + let chain_head = match select_chain.best_chain() { + Ok(x) => x, + Err(e) => { + warn!(target: "uncles", "Unable to get chain head: {:?}", e); + return Vec::new(); + } + }; + match client.uncles(chain_head.hash(), MAX_UNCLE_GENERATIONS.into()) { + Ok(uncles) => uncles, + Err(e) => { + warn!(target: "uncles", "Unable to get uncles: {:?}", e); + Vec::new() + } + } + })) + .map_err(|err| sp_consensus::Error::InherentData(err.into()))?; + } + Ok(()) } - diff --git a/client/db/src/bench.rs b/client/db/src/bench.rs index ddac2109d7..37dac8b1ee 100644 --- a/client/db/src/bench.rs +++ b/client/db/src/bench.rs @@ -16,283 +16,350 @@ //! State backend that's useful for benchmarking -use std::sync::Arc; use std::cell::{Cell, RefCell}; use std::collections::HashMap; +use std::sync::Arc; -use hash_db::{Prefix, Hasher}; -use sp_trie::{MemoryDB, prefixed_key}; +use crate::storage_cache::{new_shared_cache, CachingState, SharedCache}; +use hash_db::{Hasher, Prefix}; +use kvdb::{DBTransaction, KeyValueDB}; use sp_core::storage::ChildInfo; use sp_runtime::traits::{Block as BlockT, HashFor}; use sp_runtime::Storage; -use sp_state_machine::{DBValue, backend::Backend as StateBackend}; -use kvdb::{KeyValueDB, DBTransaction}; -use crate::storage_cache::{CachingState, SharedCache, new_shared_cache}; +use sp_state_machine::{backend::Backend as StateBackend, DBValue}; +use sp_trie::{prefixed_key, MemoryDB}; -type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor ->; +type DbState = + sp_state_machine::TrieBackend>>, HashFor>; type State = CachingState, B>; struct StorageDb { - db: Arc, - _block: std::marker::PhantomData, + db: Arc, + _block: std::marker::PhantomData, } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); - self.db.get(0, &key) - .map_err(|e| format!("Database backend error: {:?}", e)) - } + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + let key = prefixed_key::>(key, prefix); + self.db + .get(0, &key) + .map_err(|e| format!("Database backend error: {:?}", e)) + } } /// State that manages the backend database reference. Allows runtime to control the database. pub struct BenchmarkingState { - root: Cell, - genesis_root: B::Hash, - state: RefCell>>, - db: Cell>>, - genesis: HashMap, (Vec, i32)>, - record: Cell>>, - shared_cache: SharedCache, // shared cache is always empty + root: Cell, + genesis_root: B::Hash, + state: RefCell>>, + db: Cell>>, + genesis: HashMap, (Vec, i32)>, + record: Cell>>, + shared_cache: SharedCache, // shared cache is always empty } impl BenchmarkingState { - /// Create a new instance that creates a database in a temporary dir. - pub fn new(genesis: Storage, _cache_size_mb: Option) -> Result { - let mut root = B::Hash::default(); - let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); - - let mut state = BenchmarkingState { - state: RefCell::new(None), - db: Cell::new(None), - root: Cell::new(root), - genesis: Default::default(), - genesis_root: Default::default(), - record: Default::default(), - shared_cache: new_shared_cache(0, (1, 10)), - }; - - state.reopen()?; - let child_delta = genesis.children.into_iter().map(|(storage_key, child_content)| ( - storage_key, - child_content.data.into_iter().map(|(k, v)| (k, Some(v))), - child_content.child_info - )); - let (root, transaction): (B::Hash, _) = state.state.borrow_mut().as_mut().unwrap().full_storage_root( - genesis.top.into_iter().map(|(k, v)| (k, Some(v))), - child_delta, - ); - state.genesis = transaction.clone().drain(); - state.genesis_root = root.clone(); - state.commit(root, transaction)?; - state.record.take(); - Ok(state) - } - - fn reopen(&self) -> Result<(), String> { - *self.state.borrow_mut() = None; - let db = match self.db.take() { - Some(db) => db, - None => Arc::new(::kvdb_memorydb::create(1)), - }; - self.db.set(Some(db.clone())); - let storage_db = Arc::new(StorageDb:: { db, _block: Default::default() }); - *self.state.borrow_mut() = Some(State::new( - DbState::::new(storage_db, self.root.get()), - self.shared_cache.clone(), - None - )); - Ok(()) - } + /// Create a new instance that creates a database in a temporary dir. + pub fn new(genesis: Storage, _cache_size_mb: Option) -> Result { + let mut root = B::Hash::default(); + let mut mdb = MemoryDB::>::default(); + sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); + + let mut state = BenchmarkingState { + state: RefCell::new(None), + db: Cell::new(None), + root: Cell::new(root), + genesis: Default::default(), + genesis_root: Default::default(), + record: Default::default(), + shared_cache: new_shared_cache(0, (1, 10)), + }; + + state.reopen()?; + let child_delta = genesis + .children + .into_iter() + .map(|(storage_key, child_content)| { + ( + storage_key, + child_content.data.into_iter().map(|(k, v)| (k, Some(v))), + child_content.child_info, + ) + }); + let (root, transaction): (B::Hash, _) = state + .state + .borrow_mut() + .as_mut() + .unwrap() + .full_storage_root( + genesis.top.into_iter().map(|(k, v)| (k, Some(v))), + child_delta, + ); + state.genesis = transaction.clone().drain(); + state.genesis_root = root.clone(); + state.commit(root, transaction)?; + state.record.take(); + Ok(state) + } + + fn reopen(&self) -> Result<(), String> { + *self.state.borrow_mut() = None; + let db = match self.db.take() { + Some(db) => db, + None => Arc::new(::kvdb_memorydb::create(1)), + }; + self.db.set(Some(db.clone())); + let storage_db = Arc::new(StorageDb:: { + db, + _block: Default::default(), + }); + *self.state.borrow_mut() = Some(State::new( + DbState::::new(storage_db, self.root.get()), + self.shared_cache.clone(), + None, + )); + Ok(()) + } } fn state_err() -> String { - "State is not open".into() + "State is not open".into() } impl StateBackend> for BenchmarkingState { - type Error = as StateBackend>>::Error; - type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.storage(key) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.storage_hash(key) - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.child_storage(storage_key, child_info, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - self.state.borrow().as_ref().ok_or_else(state_err)?.exists_storage(key) - } - - fn exists_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result { - self.state.borrow().as_ref().ok_or_else(state_err)?.exists_child_storage(storage_key, child_info, key) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.borrow().as_ref().ok_or_else(state_err)?.next_child_storage_key(storage_key, child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - if let Some(ref state) = *self.state.borrow() { - state.for_keys_with_prefix(prefix, f) - } - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - if let Some(ref state) = *self.state.borrow() { - state.for_key_values_with_prefix(prefix, f) - } - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ) { - if let Some(ref state) = *self.state.borrow() { - state.for_keys_in_child_storage(storage_key, child_info, f) - } - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ) { - if let Some(ref state) = *self.state.borrow() { - state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) - } - } - - fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) where - I: IntoIterator, Option>)> - { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.storage_root(delta)) - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (B::Hash, bool, Self::Transaction) where - I: IntoIterator, Option>)>, - { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_storage_root(storage_key, child_info, delta)) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.pairs()) - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.keys(prefix)) - } - - fn child_keys( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - ) -> Vec> { - self.state.borrow().as_ref().map_or(Default::default(), |s| s.child_keys(storage_key, child_info, prefix)) - } - - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { - None - } - - fn commit(&self, storage_root: as Hasher>::Out, mut transaction: Self::Transaction) - -> Result<(), Self::Error> - { - if let Some(db) = self.db.take() { - let mut db_transaction = DBTransaction::new(); - let changes = transaction.drain(); - let mut keys = Vec::with_capacity(changes.len()); - for (key, (val, rc)) in changes { - if rc > 0 { - db_transaction.put(0, &key, &val); - } else if rc < 0 { - db_transaction.delete(0, &key); - } - keys.push(key); - } - self.record.set(keys); - db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; - self.root.set(storage_root); - self.db.set(Some(db)) - } else { - return Err("Trying to commit to a closed db".into()) - } - self.reopen() - } - - fn wipe(&self) -> Result<(), Self::Error> { - // Restore to genesis - let record = self.record.take(); - if let Some(db) = self.db.take() { - let mut db_transaction = DBTransaction::new(); - for key in record { - match self.genesis.get(&key) { - Some((v, _)) => db_transaction.put(0, &key, v), - None => db_transaction.delete(0, &key), - } - } - db.write(db_transaction).map_err(|_| String::from("Error committing transaction"))?; - self.db.set(Some(db)); - } - - self.root.set(self.genesis_root.clone()); - self.reopen()?; - Ok(()) - } - - fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { - self.state.borrow_mut().as_mut().map(|s| s.register_overlay_stats(stats)); - } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - self.state.borrow().as_ref().map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) - } + type Error = as StateBackend>>::Error; + type Transaction = as StateBackend>>::Transaction; + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .storage(key) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .storage_hash(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .child_storage(storage_key, child_info, key) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .exists_storage(key) + } + + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .exists_child_storage(storage_key, child_info, key) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .next_storage_key(key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state + .borrow() + .as_ref() + .ok_or_else(state_err)? + .next_child_storage_key(storage_key, child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + if let Some(ref state) = *self.state.borrow() { + state.for_keys_with_prefix(prefix, f) + } + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + if let Some(ref state) = *self.state.borrow() { + state.for_key_values_with_prefix(prefix, f) + } + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + if let Some(ref state) = *self.state.borrow() { + state.for_keys_in_child_storage(storage_key, child_info, f) + } + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + if let Some(ref state) = *self.state.borrow() { + state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) + } + } + + fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.storage_root(delta)) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (B::Hash, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| { + s.child_storage_root(storage_key, child_info, delta) + }) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.pairs()) + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| s.keys(prefix)) + } + + fn child_keys(&self, storage_key: &[u8], child_info: ChildInfo, prefix: &[u8]) -> Vec> { + self.state + .borrow() + .as_ref() + .map_or(Default::default(), |s| { + s.child_keys(storage_key, child_info, prefix) + }) + } + + fn as_trie_backend( + &mut self, + ) -> Option<&sp_state_machine::TrieBackend>> { + None + } + + fn commit( + &self, + storage_root: as Hasher>::Out, + mut transaction: Self::Transaction, + ) -> Result<(), Self::Error> { + if let Some(db) = self.db.take() { + let mut db_transaction = DBTransaction::new(); + let changes = transaction.drain(); + let mut keys = Vec::with_capacity(changes.len()); + for (key, (val, rc)) in changes { + if rc > 0 { + db_transaction.put(0, &key, &val); + } else if rc < 0 { + db_transaction.delete(0, &key); + } + keys.push(key); + } + self.record.set(keys); + db.write(db_transaction) + .map_err(|_| String::from("Error committing transaction"))?; + self.root.set(storage_root); + self.db.set(Some(db)) + } else { + return Err("Trying to commit to a closed db".into()); + } + self.reopen() + } + + fn wipe(&self) -> Result<(), Self::Error> { + // Restore to genesis + let record = self.record.take(); + if let Some(db) = self.db.take() { + let mut db_transaction = DBTransaction::new(); + for key in record { + match self.genesis.get(&key) { + Some((v, _)) => db_transaction.put(0, &key, v), + None => db_transaction.delete(0, &key), + } + } + db.write(db_transaction) + .map_err(|_| String::from("Error committing transaction"))?; + self.db.set(Some(db)); + } + + self.root.set(self.genesis_root.clone()); + self.reopen()?; + Ok(()) + } + + fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + self.state + .borrow_mut() + .as_mut() + .map(|s| s.register_overlay_stats(stats)); + } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + self.state + .borrow() + .as_ref() + .map_or(sp_state_machine::UsageInfo::empty(), |s| s.usage_info()) + } } impl std::fmt::Debug for BenchmarkingState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Bench DB") - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Bench DB") + } } diff --git a/client/db/src/cache/list_cache.rs b/client/db/src/cache/list_cache.rs index f3a8171342..c121aa243a 100644 --- a/client/db/src/cache/list_cache.rs +++ b/client/db/src/cache/list_cache.rs @@ -39,1739 +39,2969 @@ //! Finalized entry E1 is pruned when block B is finalized so that: //! EntryAt(B.number - prune_depth).points_to(E1) -use std::collections::{BTreeSet, BTreeMap}; +use std::collections::{BTreeMap, BTreeSet}; use log::warn; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, Zero, Bounded, CheckedSub -}; +use sp_runtime::traits::{Block as BlockT, Bounded, CheckedSub, NumberFor, Zero}; -use crate::cache::{CacheItemT, ComplexBlockId, EntryType}; use crate::cache::list_entry::{Entry, StorageEntry}; -use crate::cache::list_storage::{Storage, StorageTransaction, Metadata}; +use crate::cache::list_storage::{Metadata, Storage, StorageTransaction}; +use crate::cache::{CacheItemT, ComplexBlockId, EntryType}; /// Pruning strategy. #[derive(Debug, Clone, Copy)] pub enum PruningStrategy { - /// Prune entries when they're too far behind best finalized block. - ByDepth(N), - /// Do not prune old entries at all. - NeverPrune, + /// Prune entries when they're too far behind best finalized block. + ByDepth(N), + /// Do not prune old entries at all. + NeverPrune, } /// List-based cache. pub struct ListCache> { - /// Cache storage. - storage: S, - /// Pruning strategy. - pruning_strategy: PruningStrategy>, - /// Best finalized block. - best_finalized_block: ComplexBlockId, - /// Best finalized entry (if exists). - best_finalized_entry: Option>, - /// All unfinalized 'forks'. - unfinalized: Vec>, + /// Cache storage. + storage: S, + /// Pruning strategy. + pruning_strategy: PruningStrategy>, + /// Best finalized block. + best_finalized_block: ComplexBlockId, + /// Best finalized entry (if exists). + best_finalized_entry: Option>, + /// All unfinalized 'forks'. + unfinalized: Vec>, } /// All possible list cache operations that could be performed after transaction is committed. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub enum CommitOperation { - /// New block is appended to the fork without changing the cached value. - AppendNewBlock(usize, ComplexBlockId), - /// New block is appended to the fork with the different value. - AppendNewEntry(usize, Entry), - /// New fork is added with the given head entry. - AddNewFork(Entry), - /// New block is finalized and possibly: - /// - new entry is finalized AND/OR - /// - some forks are destroyed - BlockFinalized(ComplexBlockId, Option>, BTreeSet), - /// When best block is reverted - contains the forks that have to be updated - /// (they're either destroyed, or their best entry is updated to earlier block). - BlockReverted(BTreeMap>>), + /// New block is appended to the fork without changing the cached value. + AppendNewBlock(usize, ComplexBlockId), + /// New block is appended to the fork with the different value. + AppendNewEntry(usize, Entry), + /// New fork is added with the given head entry. + AddNewFork(Entry), + /// New block is finalized and possibly: + /// - new entry is finalized AND/OR + /// - some forks are destroyed + BlockFinalized( + ComplexBlockId, + Option>, + BTreeSet, + ), + /// When best block is reverted - contains the forks that have to be updated + /// (they're either destroyed, or their best entry is updated to earlier block). + BlockReverted(BTreeMap>>), } /// A set of commit operations. #[derive(Debug)] pub struct CommitOperations { - operations: Vec>, + operations: Vec>, } /// Single fork of list-based cache. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub struct Fork { - /// The best block of this fork. We do not save this field in the database to avoid - /// extra updates => it could be None after restart. It will be either filled when - /// the block is appended to this fork, or the whole fork will be abandoned when the - /// block from the other fork is finalized - best_block: Option>, - /// The head entry of this fork. - head: Entry, + /// The best block of this fork. We do not save this field in the database to avoid + /// extra updates => it could be None after restart. It will be either filled when + /// the block is appended to this fork, or the whole fork will be abandoned when the + /// block from the other fork is finalized + best_block: Option>, + /// The head entry of this fork. + head: Entry, } /// Outcome of Fork::try_append_or_fork. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub enum ForkAppendResult { - /// New entry should be appended to the end of the fork. - Append, - /// New entry should be forked from the fork, starting with entry at given block. - Fork(ComplexBlockId), + /// New entry should be appended to the end of the fork. + Append, + /// New entry should be forked from the fork, starting with entry at given block. + Fork(ComplexBlockId), } impl> ListCache { - /// Create new db list cache entry. - pub fn new( - storage: S, - pruning_strategy: PruningStrategy>, - best_finalized_block: ComplexBlockId, - ) -> ClientResult { - let (best_finalized_entry, unfinalized) = storage.read_meta() - .and_then(|meta| read_forks(&storage, meta))?; - - Ok(ListCache { - storage, - pruning_strategy, - best_finalized_block, - best_finalized_entry, - unfinalized, - }) - } - - /// Get reference to the storage. - pub fn storage(&self) -> &S { - &self.storage - } - - /// Get unfinalized forks reference. - #[cfg(test)] - pub fn unfinalized(&self) -> &[Fork] { - &self.unfinalized - } - - /// Get value valid at block. - pub fn value_at_block( - &self, - at: &ComplexBlockId, - ) -> ClientResult, Option>, T)>> { - let head = if at.number <= self.best_finalized_block.number { - // if the block is older than the best known finalized block - // => we're should search for the finalized value - - // BUT since we're not guaranteeing to provide correct values for forks - // behind the finalized block, check if the block is finalized first - if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { - return Err(ClientError::NotInFinalizedChain); - } - - self.best_finalized_entry.as_ref() - } else if self.unfinalized.is_empty() { - // there are no unfinalized entries - // => we should search for the finalized value - self.best_finalized_entry.as_ref() - } else { - // there are unfinalized entries - // => find the fork containing given block and read from this fork - // IF there's no matching fork, ensure that this isn't a block from a fork that has forked - // behind the best finalized block and search at finalized fork - - match self.find_unfinalized_fork(&at)? { - Some(fork) => Some(&fork.head), - None => match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) if chain::is_connected_to_block( - &self.storage, - &at, - &best_finalized_entry.valid_from, - )? => Some(best_finalized_entry), - _ => None, - }, - } - }; - - match head { - Some(head) => head.search_best_before(&self.storage, at.number) - .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), - None => Ok(None), - } - } - - /// When new block is inserted into database. - /// - /// None passed as value means that the value has not changed since previous block. - pub fn on_block_insert>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - value: Option, - entry_type: EntryType, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations.append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) - } - - /// When previously inserted block is finalized. - pub fn on_block_finalize>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations.append(self.do_on_block_finalize(tx, parent, block, operations)?)) - } - - /// When block is reverted. - pub fn on_block_revert>( - &self, - tx: &mut Tx, - reverted_block: &ComplexBlockId, - operations: &mut CommitOperations, - ) -> ClientResult<()> { - Ok(operations.append(Some(self.do_on_block_revert(tx, reverted_block)?))) - } - - /// When transaction is committed. - pub fn on_transaction_commit(&mut self, ops: CommitOperations) { - for op in ops.operations { - match op { - CommitOperation::AppendNewBlock(index, best_block) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; + /// Create new db list cache entry. + pub fn new( + storage: S, + pruning_strategy: PruningStrategy>, + best_finalized_block: ComplexBlockId, + ) -> ClientResult { + let (best_finalized_entry, unfinalized) = storage + .read_meta() + .and_then(|meta| read_forks(&storage, meta))?; + + Ok(ListCache { + storage, + pruning_strategy, + best_finalized_block, + best_finalized_entry, + unfinalized, + }) + } + + /// Get reference to the storage. + pub fn storage(&self) -> &S { + &self.storage + } + + /// Get unfinalized forks reference. + #[cfg(test)] + pub fn unfinalized(&self) -> &[Fork] { + &self.unfinalized + } + + /// Get value valid at block. + pub fn value_at_block( + &self, + at: &ComplexBlockId, + ) -> ClientResult, Option>, T)>> { + let head = if at.number <= self.best_finalized_block.number { + // if the block is older than the best known finalized block + // => we're should search for the finalized value + + // BUT since we're not guaranteeing to provide correct values for forks + // behind the finalized block, check if the block is finalized first + if !chain::is_finalized_block(&self.storage, &at, Bounded::max_value())? { + return Err(ClientError::NotInFinalizedChain); + } + + self.best_finalized_entry.as_ref() + } else if self.unfinalized.is_empty() { + // there are no unfinalized entries + // => we should search for the finalized value + self.best_finalized_entry.as_ref() + } else { + // there are unfinalized entries + // => find the fork containing given block and read from this fork + // IF there's no matching fork, ensure that this isn't a block from a fork that has forked + // behind the best finalized block and search at finalized fork + + match self.find_unfinalized_fork(&at)? { + Some(fork) => Some(&fork.head), + None => match self.best_finalized_entry.as_ref() { + Some(best_finalized_entry) + if chain::is_connected_to_block( + &self.storage, + &at, + &best_finalized_entry.valid_from, + )? => + { + Some(best_finalized_entry) + } + _ => None, + }, + } + }; + + match head { + Some(head) => head + .search_best_before(&self.storage, at.number) + .map(|e| e.map(|e| (e.0.valid_from, e.1, e.0.value))), + None => Ok(None), + } + } + + /// When new block is inserted into database. + /// + /// None passed as value means that the value has not changed since previous block. + pub fn on_block_insert>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + value: Option, + entry_type: EntryType, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations + .append(self.do_on_block_insert(tx, parent, block, value, entry_type, operations)?)) + } + + /// When previously inserted block is finalized. + pub fn on_block_finalize>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations.append(self.do_on_block_finalize(tx, parent, block, operations)?)) + } + + /// When block is reverted. + pub fn on_block_revert>( + &self, + tx: &mut Tx, + reverted_block: &ComplexBlockId, + operations: &mut CommitOperations, + ) -> ClientResult<()> { + Ok(operations.append(Some(self.do_on_block_revert(tx, reverted_block)?))) + } + + /// When transaction is committed. + pub fn on_transaction_commit(&mut self, ops: CommitOperations) { + for op in ops.operations { + match op { + CommitOperation::AppendNewBlock(index, best_block) => { + let mut fork = self.unfinalized.get_mut(index).expect( + "ListCache is a crate-private type; internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(best_block); - }, - CommitOperation::AppendNewEntry(index, entry) => { - let mut fork = self.unfinalized.get_mut(index) - .expect("ListCache is a crate-private type; + CommitOperation holds valid references while cache is locked; qed", + ); + fork.best_block = Some(best_block); + } + CommitOperation::AppendNewEntry(index, entry) => { + let mut fork = self.unfinalized.get_mut(index).expect( + "ListCache is a crate-private type; internal clients of ListCache are committing transaction while cache is locked; - CommitOperation holds valid references while cache is locked; qed"); - fork.best_block = Some(entry.valid_from.clone()); - fork.head = entry; - }, - CommitOperation::AddNewFork(entry) => { - self.unfinalized.push(Fork { - best_block: Some(entry.valid_from.clone()), - head: entry, - }); - }, - CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { - self.best_finalized_block = block; - if let Some(finalizing_entry) = finalizing_entry { - self.best_finalized_entry = Some(finalizing_entry); - } - for fork_index in forks.iter().rev() { - self.unfinalized.remove(*fork_index); - } - }, - CommitOperation::BlockReverted(forks) => { - for (fork_index, updated_fork) in forks.into_iter().rev() { - match updated_fork { - Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, - None => { self.unfinalized.remove(fork_index); }, - } - } - }, - } - } - } - - fn do_on_block_insert>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - value: Option, - entry_type: EntryType, - operations: &CommitOperations, - ) -> ClientResult>> { - // this guarantee is currently provided by LightStorage && we're relying on it here - let prev_operation = operations.operations.last(); - debug_assert!( - entry_type != EntryType::Final || - self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) - => best_finalized_block.hash == parent.hash, - _ => false, - } - ); - - // we do not store any values behind finalized - if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { - return Ok(None); - } - - // if the block is not final, it is possibly appended to/forking from existing unfinalized fork - let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis; - if !is_final { - let mut fork_and_action = None; - - // when value hasn't changed and block isn't final, there's nothing we need to do - if value.is_none() { - return Ok(None); - } - - // first: try to find fork that is known to has the best block we're appending to - for (index, fork) in self.unfinalized.iter().enumerate() { - if fork.try_append(&parent) { - fork_and_action = Some((index, ForkAppendResult::Append)); - break; - } - } - - // if not found, check cases: - // - we're appending to the fork for the first time after restart; - // - we're forking existing unfinalized fork from the middle; - if fork_and_action.is_none() { - let best_finalized_entry_block = self.best_finalized_entry.as_ref().map(|f| f.valid_from.number); - for (index, fork) in self.unfinalized.iter().enumerate() { - if let Some(action) = fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? { - fork_and_action = Some((index, action)); - break; - } - } - } - - // if we have found matching unfinalized fork => early exit - match fork_and_action { - // append to unfinalized fork - Some((index, ForkAppendResult::Append)) => { - let new_storage_entry = match self.unfinalized[index].head.try_update(value) { - Some(new_storage_entry) => new_storage_entry, - None => return Ok(Some(CommitOperation::AppendNewBlock(index, block))), - }; - - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - return Ok(Some(operation)); - }, - // fork from the middle of unfinalized fork - Some((_, ForkAppendResult::Fork(prev_valid_from))) => { - // it is possible that we're inserting extra (but still required) fork here - let new_storage_entry = StorageEntry { - prev_valid_from: Some(prev_valid_from), - value: value.expect("checked above that !value.is_none(); qed"), - }; - - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - return Ok(Some(operation)); - }, - None => (), - } - } - - // if we're here, then one of following is true: - // - either we're inserting final block => all ancestors are already finalized AND the only thing we can do - // is to try to update last finalized entry - // - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks - - let new_storage_entry = match self.best_finalized_entry.as_ref() { - Some(best_finalized_entry) => best_finalized_entry.try_update(value), - None if value.is_some() => Some(StorageEntry { - prev_valid_from: None, - value: value.expect("value.is_some(); qed"), - }), - None => None, - }; - - if !is_final { - return Ok(match new_storage_entry { - Some(new_storage_entry) => { - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - Some(operation) - }, - None => None, - }); - } - - // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); - self.prune_finalized_entries(tx, &block); - - match new_storage_entry { - Some(new_storage_entry) => { - tx.insert_storage_entry(&block, &new_storage_entry); - let operation = CommitOperation::BlockFinalized(block.clone(), Some(new_storage_entry.into_entry(block)), abandoned_forks); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - Ok(Some(operation)) - }, - None => Ok(Some(CommitOperation::BlockFinalized(block, None, abandoned_forks))), - } - } - - fn do_on_block_finalize>( - &self, - tx: &mut Tx, - parent: ComplexBlockId, - block: ComplexBlockId, - operations: &CommitOperations, - ) -> ClientResult>> { - // this guarantee is currently provided by db backend && we're relying on it here - let prev_operation = operations.operations.last(); - debug_assert!( - self.best_finalized_block.hash == parent.hash || - match prev_operation { - Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) - => best_finalized_block.hash == parent.hash, - _ => false, - } - ); - - // there could be at most one entry that is finalizing - let finalizing_entry = self.storage.read_entry(&block)? - .map(|entry| entry.into_entry(block.clone())); - - // cleanup database from abandoned unfinalized forks and obsolete finalized entries - let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); - self.prune_finalized_entries(tx, &block); - - let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - - Ok(Some(operation)) - } - - fn do_on_block_revert>( - &self, - tx: &mut Tx, - reverted_block: &ComplexBlockId, - ) -> ClientResult> { - // can't revert finalized blocks - debug_assert!(self.best_finalized_block.number < reverted_block.number); - - // iterate all unfinalized forks and truncate/destroy if required - let mut updated = BTreeMap::new(); - for (index, fork) in self.unfinalized.iter().enumerate() { - // we only need to truncate fork if its head is ancestor of truncated block - if fork.head.valid_from.number < reverted_block.number { - continue; - } - - // we only need to truncate fork if its head is connected to truncated block - if !chain::is_connected_to_block(&self.storage, reverted_block, &fork.head.valid_from)? { - continue; - } - - let updated_fork = fork.truncate( - &self.storage, - tx, - reverted_block.number, - self.best_finalized_block.number, - )?; - updated.insert(index, updated_fork); - } - - // schedule commit operation and update meta - let operation = CommitOperation::BlockReverted(updated); - tx.update_meta(self.best_finalized_entry.as_ref(), &self.unfinalized, &operation); - - Ok(operation) - } - - /// Prune old finalized entries. - fn prune_finalized_entries>( - &self, - tx: &mut Tx, - block: &ComplexBlockId - ) { - let prune_depth = match self.pruning_strategy { - PruningStrategy::ByDepth(prune_depth) => prune_depth, - PruningStrategy::NeverPrune => return, - }; - - let mut do_pruning = || -> ClientResult<()> { - // calculate last ancient block number - let ancient_block = match block.number.checked_sub(&prune_depth) { - Some(number) => match self.storage.read_id(number)? { - Some(hash) => ComplexBlockId::new(hash, number), - None => return Ok(()), - }, - None => return Ok(()), - }; - - // if there's an entry at this block: - // - remove reference from this entry to the previous entry - // - destroy fork starting with previous entry - let current_entry = match self.storage.read_entry(&ancient_block)? { - Some(current_entry) => current_entry, - None => return Ok(()), - }; - let first_entry_to_truncate = match current_entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - - // truncate ancient entry - tx.insert_storage_entry(&ancient_block, &StorageEntry { - prev_valid_from: None, - value: current_entry.value, - }); - - // destroy 'fork' ending with previous entry - destroy_fork( - first_entry_to_truncate, - &self.storage, - tx, - None, - ) - }; - - if let Err(error) = do_pruning() { - warn!(target: "db", "Failed to prune ancient cache entries: {}", error); - } - } - - /// Try to destroy abandoned forks (forked before best finalized block) when block is finalized. - fn destroy_abandoned_forks>( - &self, - tx: &mut Tx, - block: &ComplexBlockId, - prev_operation: Option<&CommitOperation>, - ) -> BTreeSet { - // if some block has been finalized already => take it into account - let prev_abandoned_forks = match prev_operation { - Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => Some(abandoned_forks), - _ => None, - }; - - let mut destroyed = prev_abandoned_forks.cloned().unwrap_or_else(|| BTreeSet::new()); - let live_unfinalized = self.unfinalized.iter() - .enumerate() - .filter(|(idx, _)| prev_abandoned_forks - .map(|prev_abandoned_forks| !prev_abandoned_forks.contains(idx)) - .unwrap_or(true)); - for (index, fork) in live_unfinalized { - if fork.head.valid_from.number == block.number { - destroyed.insert(index); - if fork.head.valid_from.hash != block.hash { - if let Err(error) = fork.destroy(&self.storage, tx, Some(block.number)) { - warn!(target: "db", "Failed to destroy abandoned unfinalized cache fork: {}", error); - } - } - } - } - - destroyed - } - - /// Search unfinalized fork where given block belongs. - fn find_unfinalized_fork( - &self, - block: &ComplexBlockId, - ) -> ClientResult>> { - for unfinalized in &self.unfinalized { - if unfinalized.matches(&self.storage, block)? { - return Ok(Some(&unfinalized)); - } - } - - Ok(None) - } + CommitOperation holds valid references while cache is locked; qed", + ); + fork.best_block = Some(entry.valid_from.clone()); + fork.head = entry; + } + CommitOperation::AddNewFork(entry) => { + self.unfinalized.push(Fork { + best_block: Some(entry.valid_from.clone()), + head: entry, + }); + } + CommitOperation::BlockFinalized(block, finalizing_entry, forks) => { + self.best_finalized_block = block; + if let Some(finalizing_entry) = finalizing_entry { + self.best_finalized_entry = Some(finalizing_entry); + } + for fork_index in forks.iter().rev() { + self.unfinalized.remove(*fork_index); + } + } + CommitOperation::BlockReverted(forks) => { + for (fork_index, updated_fork) in forks.into_iter().rev() { + match updated_fork { + Some(updated_fork) => self.unfinalized[fork_index] = updated_fork, + None => { + self.unfinalized.remove(fork_index); + } + } + } + } + } + } + } + + fn do_on_block_insert>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + value: Option, + entry_type: EntryType, + operations: &CommitOperations, + ) -> ClientResult>> { + // this guarantee is currently provided by LightStorage && we're relying on it here + let prev_operation = operations.operations.last(); + debug_assert!( + entry_type != EntryType::Final + || self.best_finalized_block.hash == parent.hash + || match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => + best_finalized_block.hash == parent.hash, + _ => false, + } + ); + + // we do not store any values behind finalized + if block.number != Zero::zero() && self.best_finalized_block.number >= block.number { + return Ok(None); + } + + // if the block is not final, it is possibly appended to/forking from existing unfinalized fork + let is_final = entry_type == EntryType::Final || entry_type == EntryType::Genesis; + if !is_final { + let mut fork_and_action = None; + + // when value hasn't changed and block isn't final, there's nothing we need to do + if value.is_none() { + return Ok(None); + } + + // first: try to find fork that is known to has the best block we're appending to + for (index, fork) in self.unfinalized.iter().enumerate() { + if fork.try_append(&parent) { + fork_and_action = Some((index, ForkAppendResult::Append)); + break; + } + } + + // if not found, check cases: + // - we're appending to the fork for the first time after restart; + // - we're forking existing unfinalized fork from the middle; + if fork_and_action.is_none() { + let best_finalized_entry_block = self + .best_finalized_entry + .as_ref() + .map(|f| f.valid_from.number); + for (index, fork) in self.unfinalized.iter().enumerate() { + if let Some(action) = + fork.try_append_or_fork(&self.storage, &parent, best_finalized_entry_block)? + { + fork_and_action = Some((index, action)); + break; + } + } + } + + // if we have found matching unfinalized fork => early exit + match fork_and_action { + // append to unfinalized fork + Some((index, ForkAppendResult::Append)) => { + let new_storage_entry = match self.unfinalized[index].head.try_update(value) { + Some(new_storage_entry) => new_storage_entry, + None => return Ok(Some(CommitOperation::AppendNewBlock(index, block))), + }; + + tx.insert_storage_entry(&block, &new_storage_entry); + let operation = + CommitOperation::AppendNewEntry(index, new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + return Ok(Some(operation)); + } + // fork from the middle of unfinalized fork + Some((_, ForkAppendResult::Fork(prev_valid_from))) => { + // it is possible that we're inserting extra (but still required) fork here + let new_storage_entry = StorageEntry { + prev_valid_from: Some(prev_valid_from), + value: value.expect("checked above that !value.is_none(); qed"), + }; + + tx.insert_storage_entry(&block, &new_storage_entry); + let operation = + CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + return Ok(Some(operation)); + } + None => (), + } + } + + // if we're here, then one of following is true: + // - either we're inserting final block => all ancestors are already finalized AND the only thing we can do + // is to try to update last finalized entry + // - either we're inserting non-final blocks that has no ancestors in any known unfinalized forks + + let new_storage_entry = match self.best_finalized_entry.as_ref() { + Some(best_finalized_entry) => best_finalized_entry.try_update(value), + None if value.is_some() => Some(StorageEntry { + prev_valid_from: None, + value: value.expect("value.is_some(); qed"), + }), + None => None, + }; + + if !is_final { + return Ok(match new_storage_entry { + Some(new_storage_entry) => { + tx.insert_storage_entry(&block, &new_storage_entry); + let operation = + CommitOperation::AddNewFork(new_storage_entry.into_entry(block)); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + Some(operation) + } + None => None, + }); + } + + // cleanup database from abandoned unfinalized forks and obsolete finalized entries + let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); + self.prune_finalized_entries(tx, &block); + + match new_storage_entry { + Some(new_storage_entry) => { + tx.insert_storage_entry(&block, &new_storage_entry); + let operation = CommitOperation::BlockFinalized( + block.clone(), + Some(new_storage_entry.into_entry(block)), + abandoned_forks, + ); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + Ok(Some(operation)) + } + None => Ok(Some(CommitOperation::BlockFinalized( + block, + None, + abandoned_forks, + ))), + } + } + + fn do_on_block_finalize>( + &self, + tx: &mut Tx, + parent: ComplexBlockId, + block: ComplexBlockId, + operations: &CommitOperations, + ) -> ClientResult>> { + // this guarantee is currently provided by db backend && we're relying on it here + let prev_operation = operations.operations.last(); + debug_assert!( + self.best_finalized_block.hash == parent.hash + || match prev_operation { + Some(&CommitOperation::BlockFinalized(ref best_finalized_block, _, _)) => + best_finalized_block.hash == parent.hash, + _ => false, + } + ); + + // there could be at most one entry that is finalizing + let finalizing_entry = self + .storage + .read_entry(&block)? + .map(|entry| entry.into_entry(block.clone())); + + // cleanup database from abandoned unfinalized forks and obsolete finalized entries + let abandoned_forks = self.destroy_abandoned_forks(tx, &block, prev_operation); + self.prune_finalized_entries(tx, &block); + + let operation = CommitOperation::BlockFinalized(block, finalizing_entry, abandoned_forks); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + + Ok(Some(operation)) + } + + fn do_on_block_revert>( + &self, + tx: &mut Tx, + reverted_block: &ComplexBlockId, + ) -> ClientResult> { + // can't revert finalized blocks + debug_assert!(self.best_finalized_block.number < reverted_block.number); + + // iterate all unfinalized forks and truncate/destroy if required + let mut updated = BTreeMap::new(); + for (index, fork) in self.unfinalized.iter().enumerate() { + // we only need to truncate fork if its head is ancestor of truncated block + if fork.head.valid_from.number < reverted_block.number { + continue; + } + + // we only need to truncate fork if its head is connected to truncated block + if !chain::is_connected_to_block(&self.storage, reverted_block, &fork.head.valid_from)? + { + continue; + } + + let updated_fork = fork.truncate( + &self.storage, + tx, + reverted_block.number, + self.best_finalized_block.number, + )?; + updated.insert(index, updated_fork); + } + + // schedule commit operation and update meta + let operation = CommitOperation::BlockReverted(updated); + tx.update_meta( + self.best_finalized_entry.as_ref(), + &self.unfinalized, + &operation, + ); + + Ok(operation) + } + + /// Prune old finalized entries. + fn prune_finalized_entries>( + &self, + tx: &mut Tx, + block: &ComplexBlockId, + ) { + let prune_depth = match self.pruning_strategy { + PruningStrategy::ByDepth(prune_depth) => prune_depth, + PruningStrategy::NeverPrune => return, + }; + + let mut do_pruning = || -> ClientResult<()> { + // calculate last ancient block number + let ancient_block = match block.number.checked_sub(&prune_depth) { + Some(number) => match self.storage.read_id(number)? { + Some(hash) => ComplexBlockId::new(hash, number), + None => return Ok(()), + }, + None => return Ok(()), + }; + + // if there's an entry at this block: + // - remove reference from this entry to the previous entry + // - destroy fork starting with previous entry + let current_entry = match self.storage.read_entry(&ancient_block)? { + Some(current_entry) => current_entry, + None => return Ok(()), + }; + let first_entry_to_truncate = match current_entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(()), + }; + + // truncate ancient entry + tx.insert_storage_entry( + &ancient_block, + &StorageEntry { + prev_valid_from: None, + value: current_entry.value, + }, + ); + + // destroy 'fork' ending with previous entry + destroy_fork(first_entry_to_truncate, &self.storage, tx, None) + }; + + if let Err(error) = do_pruning() { + warn!(target: "db", "Failed to prune ancient cache entries: {}", error); + } + } + + /// Try to destroy abandoned forks (forked before best finalized block) when block is finalized. + fn destroy_abandoned_forks>( + &self, + tx: &mut Tx, + block: &ComplexBlockId, + prev_operation: Option<&CommitOperation>, + ) -> BTreeSet { + // if some block has been finalized already => take it into account + let prev_abandoned_forks = match prev_operation { + Some(&CommitOperation::BlockFinalized(_, _, ref abandoned_forks)) => { + Some(abandoned_forks) + } + _ => None, + }; + + let mut destroyed = prev_abandoned_forks + .cloned() + .unwrap_or_else(|| BTreeSet::new()); + let live_unfinalized = self.unfinalized.iter().enumerate().filter(|(idx, _)| { + prev_abandoned_forks + .map(|prev_abandoned_forks| !prev_abandoned_forks.contains(idx)) + .unwrap_or(true) + }); + for (index, fork) in live_unfinalized { + if fork.head.valid_from.number == block.number { + destroyed.insert(index); + if fork.head.valid_from.hash != block.hash { + if let Err(error) = fork.destroy(&self.storage, tx, Some(block.number)) { + warn!(target: "db", "Failed to destroy abandoned unfinalized cache fork: {}", error); + } + } + } + } + + destroyed + } + + /// Search unfinalized fork where given block belongs. + fn find_unfinalized_fork( + &self, + block: &ComplexBlockId, + ) -> ClientResult>> { + for unfinalized in &self.unfinalized { + if unfinalized.matches(&self.storage, block)? { + return Ok(Some(&unfinalized)); + } + } + + Ok(None) + } } impl Fork { - /// Get reference to the head entry of this fork. - pub fn head(&self) -> &Entry { - &self.head - } - - /// Check if the block is the part of the fork. - pub fn matches>( - &self, - storage: &S, - block: &ComplexBlockId, - ) -> ClientResult { - let range = self.head.search_best_range_before(storage, block.number)?; - match range { - None => Ok(false), - Some((begin, end)) => chain::is_connected_to_range(storage, block, (&begin, end.as_ref())), - } - } - - /// Try to append NEW block to the fork. This method will only 'work' (return true) when block - /// is actually appended to the fork AND the best known block of the fork is known (i.e. some - /// block has been already appended to this fork after last restart). - pub fn try_append(&self, parent: &ComplexBlockId) -> bool { - // when the best block of the fork is known, the check is trivial - // - // most of calls will hopefully end here, because best_block is only unknown - // after restart and until new block is appended to the fork - self.best_block.as_ref() == Some(parent) - } - - /// Try to append new block to the fork OR fork it. - pub fn try_append_or_fork>( - &self, - storage: &S, - parent: &ComplexBlockId, - best_finalized_entry_block: Option>, - ) -> ClientResult>> { - // try to find entries that are (possibly) surrounding the parent block - let range = self.head.search_best_range_before(storage, parent.number)?; - let begin = match range { - Some((begin, _)) => begin, - None => return Ok(None), - }; - - // check if the parent is connected to the beginning of the range - if !chain::is_connected_to_block(storage, parent, &begin)? { - return Ok(None); - } - - // the block is connected to the begin-entry. If begin is the head entry - // => we need to append new block to the fork - if begin == self.head.valid_from { - return Ok(Some(ForkAppendResult::Append)); - } - - // the parent block belongs to this fork AND it is located after last finalized entry - // => we need to make a new fork - if best_finalized_entry_block.map(|f| begin.number > f).unwrap_or(true) { - return Ok(Some(ForkAppendResult::Fork(begin))); - } - - Ok(None) - } - - /// Destroy fork by deleting all unfinalized entries. - pub fn destroy, Tx: StorageTransaction>( - &self, - storage: &S, - tx: &mut Tx, - best_finalized_block: Option>, - ) -> ClientResult<()> { - destroy_fork( - self.head.valid_from.clone(), - storage, - tx, - best_finalized_block, - ) - } - - /// Truncate fork by deleting all entries that are descendants of given block. - pub fn truncate, Tx: StorageTransaction>( - &self, - storage: &S, - tx: &mut Tx, - reverting_block: NumberFor, - best_finalized_block: NumberFor, - ) -> ClientResult>> { - let mut current = self.head.valid_from.clone(); - loop { - // read pointer to previous entry - let entry = storage.require_entry(¤t)?; - - // truncation stops when we have reached the ancestor of truncated block - if current.number < reverting_block { - // if we have reached finalized block => destroy fork - if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(None); - } - - // else fork needs to be updated - return Ok(Some(Fork { - best_block: None, - head: entry.into_entry(current), - })); - } - - tx.remove_storage_entry(¤t); - - // truncation also stops when there are no more entries in the list - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(None), - }; - } - } + /// Get reference to the head entry of this fork. + pub fn head(&self) -> &Entry { + &self.head + } + + /// Check if the block is the part of the fork. + pub fn matches>( + &self, + storage: &S, + block: &ComplexBlockId, + ) -> ClientResult { + let range = self.head.search_best_range_before(storage, block.number)?; + match range { + None => Ok(false), + Some((begin, end)) => { + chain::is_connected_to_range(storage, block, (&begin, end.as_ref())) + } + } + } + + /// Try to append NEW block to the fork. This method will only 'work' (return true) when block + /// is actually appended to the fork AND the best known block of the fork is known (i.e. some + /// block has been already appended to this fork after last restart). + pub fn try_append(&self, parent: &ComplexBlockId) -> bool { + // when the best block of the fork is known, the check is trivial + // + // most of calls will hopefully end here, because best_block is only unknown + // after restart and until new block is appended to the fork + self.best_block.as_ref() == Some(parent) + } + + /// Try to append new block to the fork OR fork it. + pub fn try_append_or_fork>( + &self, + storage: &S, + parent: &ComplexBlockId, + best_finalized_entry_block: Option>, + ) -> ClientResult>> { + // try to find entries that are (possibly) surrounding the parent block + let range = self.head.search_best_range_before(storage, parent.number)?; + let begin = match range { + Some((begin, _)) => begin, + None => return Ok(None), + }; + + // check if the parent is connected to the beginning of the range + if !chain::is_connected_to_block(storage, parent, &begin)? { + return Ok(None); + } + + // the block is connected to the begin-entry. If begin is the head entry + // => we need to append new block to the fork + if begin == self.head.valid_from { + return Ok(Some(ForkAppendResult::Append)); + } + + // the parent block belongs to this fork AND it is located after last finalized entry + // => we need to make a new fork + if best_finalized_entry_block + .map(|f| begin.number > f) + .unwrap_or(true) + { + return Ok(Some(ForkAppendResult::Fork(begin))); + } + + Ok(None) + } + + /// Destroy fork by deleting all unfinalized entries. + pub fn destroy, Tx: StorageTransaction>( + &self, + storage: &S, + tx: &mut Tx, + best_finalized_block: Option>, + ) -> ClientResult<()> { + destroy_fork( + self.head.valid_from.clone(), + storage, + tx, + best_finalized_block, + ) + } + + /// Truncate fork by deleting all entries that are descendants of given block. + pub fn truncate, Tx: StorageTransaction>( + &self, + storage: &S, + tx: &mut Tx, + reverting_block: NumberFor, + best_finalized_block: NumberFor, + ) -> ClientResult>> { + let mut current = self.head.valid_from.clone(); + loop { + // read pointer to previous entry + let entry = storage.require_entry(¤t)?; + + // truncation stops when we have reached the ancestor of truncated block + if current.number < reverting_block { + // if we have reached finalized block => destroy fork + if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { + return Ok(None); + } + + // else fork needs to be updated + return Ok(Some(Fork { + best_block: None, + head: entry.into_entry(current), + })); + } + + tx.remove_storage_entry(¤t); + + // truncation also stops when there are no more entries in the list + current = match entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(None), + }; + } + } } impl Default for CommitOperations { - fn default() -> Self { - CommitOperations { operations: Vec::new() } - } + fn default() -> Self { + CommitOperations { + operations: Vec::new(), + } + } } // This should never be allowed for non-test code to avoid revealing its internals. #[cfg(test)] -impl From>> for CommitOperations { - fn from(operations: Vec>) -> Self { - CommitOperations { operations } - } +impl From>> + for CommitOperations +{ + fn from(operations: Vec>) -> Self { + CommitOperations { operations } + } } impl CommitOperations { - /// Append operation to the set. - fn append(&mut self, new_operation: Option>) { - let new_operation = match new_operation { - Some(new_operation) => new_operation, - None => return, - }; - - let last_operation = match self.operations.pop() { - Some(last_operation) => last_operation, - None => { - self.operations.push(new_operation); - return; - }, - }; - - // we are able (and obliged to) to merge two consequent block finalization operations - match last_operation { - CommitOperation::BlockFinalized(old_finalized_block, old_finalized_entry, old_abandoned_forks) => { - match new_operation { - CommitOperation::BlockFinalized(new_finalized_block, new_finalized_entry, new_abandoned_forks) => { - self.operations.push(CommitOperation::BlockFinalized( - new_finalized_block, - new_finalized_entry, - new_abandoned_forks, - )); - }, - _ => { - self.operations.push(CommitOperation::BlockFinalized( - old_finalized_block, - old_finalized_entry, - old_abandoned_forks, - )); - self.operations.push(new_operation); - }, - } - }, - _ => { - self.operations.push(last_operation); - self.operations.push(new_operation); - }, - } - } + /// Append operation to the set. + fn append(&mut self, new_operation: Option>) { + let new_operation = match new_operation { + Some(new_operation) => new_operation, + None => return, + }; + + let last_operation = match self.operations.pop() { + Some(last_operation) => last_operation, + None => { + self.operations.push(new_operation); + return; + } + }; + + // we are able (and obliged to) to merge two consequent block finalization operations + match last_operation { + CommitOperation::BlockFinalized( + old_finalized_block, + old_finalized_entry, + old_abandoned_forks, + ) => match new_operation { + CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + ) => { + self.operations.push(CommitOperation::BlockFinalized( + new_finalized_block, + new_finalized_entry, + new_abandoned_forks, + )); + } + _ => { + self.operations.push(CommitOperation::BlockFinalized( + old_finalized_block, + old_finalized_entry, + old_abandoned_forks, + )); + self.operations.push(new_operation); + } + }, + _ => { + self.operations.push(last_operation); + self.operations.push(new_operation); + } + } + } } /// Destroy fork by deleting all unfinalized entries. -pub fn destroy_fork, Tx: StorageTransaction>( - head_valid_from: ComplexBlockId, - storage: &S, - tx: &mut Tx, - best_finalized_block: Option>, +pub fn destroy_fork< + Block: BlockT, + T: CacheItemT, + S: Storage, + Tx: StorageTransaction, +>( + head_valid_from: ComplexBlockId, + storage: &S, + tx: &mut Tx, + best_finalized_block: Option>, ) -> ClientResult<()> { - let mut current = head_valid_from; - loop { - // optionally: deletion stops when we found entry at finalized block - if let Some(best_finalized_block) = best_finalized_block { - if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { - return Ok(()); - } - } - - // read pointer to previous entry - let entry = storage.require_entry(¤t)?; - tx.remove_storage_entry(¤t); - - // deletion stops when there are no more entries in the list - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(()), - }; - } + let mut current = head_valid_from; + loop { + // optionally: deletion stops when we found entry at finalized block + if let Some(best_finalized_block) = best_finalized_block { + if chain::is_finalized_block(storage, ¤t, best_finalized_block)? { + return Ok(()); + } + } + + // read pointer to previous entry + let entry = storage.require_entry(¤t)?; + tx.remove_storage_entry(¤t); + + // deletion stops when there are no more entries in the list + current = match entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(()), + }; + } } /// Blockchain related functions. mod chain { - use sp_runtime::traits::Header as HeaderT; - use super::*; - - /// Is the block1 connected both ends of the range. - pub fn is_connected_to_range>( - storage: &S, - block: &ComplexBlockId, - range: (&ComplexBlockId, Option<&ComplexBlockId>), - ) -> ClientResult { - let (begin, end) = range; - Ok(is_connected_to_block(storage, block, begin)? - && match end { - Some(end) => is_connected_to_block(storage, block, end)?, - None => true, - }) - } - - /// Is the block1 directly connected (i.e. part of the same fork) to block2? - pub fn is_connected_to_block>( - storage: &S, - block1: &ComplexBlockId, - block2: &ComplexBlockId, - ) -> ClientResult { - let (begin, end) = if *block1 > *block2 { (block2, block1) } else { (block1, block2) }; - let mut current = storage.read_header(&end.hash)? - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; - while *current.number() > begin.number { - current = storage.read_header(current.parent_hash())? - .ok_or_else(|| ClientError::UnknownBlock(format!("{}", current.parent_hash())))?; - } - - Ok(begin.hash == current.hash()) - } - - /// Returns true if the given block is finalized. - pub fn is_finalized_block>( - storage: &S, - block: &ComplexBlockId, - best_finalized_block: NumberFor, - ) -> ClientResult { - if block.number > best_finalized_block { - return Ok(false); - } - - storage.read_id(block.number) - .map(|hash| hash.as_ref() == Some(&block.hash)) - } + use super::*; + use sp_runtime::traits::Header as HeaderT; + + /// Is the block1 connected both ends of the range. + pub fn is_connected_to_range>( + storage: &S, + block: &ComplexBlockId, + range: (&ComplexBlockId, Option<&ComplexBlockId>), + ) -> ClientResult { + let (begin, end) = range; + Ok(is_connected_to_block(storage, block, begin)? + && match end { + Some(end) => is_connected_to_block(storage, block, end)?, + None => true, + }) + } + + /// Is the block1 directly connected (i.e. part of the same fork) to block2? + pub fn is_connected_to_block>( + storage: &S, + block1: &ComplexBlockId, + block2: &ComplexBlockId, + ) -> ClientResult { + let (begin, end) = if *block1 > *block2 { + (block2, block1) + } else { + (block1, block2) + }; + let mut current = storage + .read_header(&end.hash)? + .ok_or_else(|| ClientError::UnknownBlock(format!("{}", end.hash)))?; + while *current.number() > begin.number { + current = storage + .read_header(current.parent_hash())? + .ok_or_else(|| ClientError::UnknownBlock(format!("{}", current.parent_hash())))?; + } + + Ok(begin.hash == current.hash()) + } + + /// Returns true if the given block is finalized. + pub fn is_finalized_block>( + storage: &S, + block: &ComplexBlockId, + best_finalized_block: NumberFor, + ) -> ClientResult { + if block.number > best_finalized_block { + return Ok(false); + } + + storage + .read_id(block.number) + .map(|hash| hash.as_ref() == Some(&block.hash)) + } } /// Read list cache forks at blocks IDs. fn read_forks>( - storage: &S, - meta: Metadata, + storage: &S, + meta: Metadata, ) -> ClientResult<(Option>, Vec>)> { - let finalized = match meta.finalized { - Some(finalized) => Some(storage.require_entry(&finalized)? - .into_entry(finalized)), - None => None, - }; - - let unfinalized = meta.unfinalized.into_iter() - .map(|unfinalized| storage.require_entry(&unfinalized) - .map(|storage_entry| Fork { - best_block: None, - head: storage_entry.into_entry(unfinalized), - })) - .collect::>()?; - - Ok((finalized, unfinalized)) + let finalized = match meta.finalized { + Some(finalized) => Some(storage.require_entry(&finalized)?.into_entry(finalized)), + None => None, + }; + + let unfinalized = meta + .unfinalized + .into_iter() + .map(|unfinalized| { + storage + .require_entry(&unfinalized) + .map(|storage_entry| Fork { + best_block: None, + head: storage_entry.into_entry(unfinalized), + }) + }) + .collect::>()?; + + Ok((finalized, unfinalized)) } #[cfg(test)] pub mod tests { - use substrate_test_runtime_client::runtime::H256; - use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; - use sp_runtime::traits::Header as HeaderT; - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage, DummyTransaction}; - use super::*; - - type Block = RawBlock>; - - pub fn test_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(H256::from_low_u64_be(number), number) - } - - fn correct_id(number: u64) -> ComplexBlockId { - ComplexBlockId::new(test_header(number).hash(), number) - } - - fn fork_id(fork_nonce: u64, fork_from: u64, number: u64) -> ComplexBlockId { - ComplexBlockId::new(fork_header(fork_nonce, fork_from, number).hash(), number) - } - - fn test_header(number: u64) -> Header { - Header { - parent_hash: if number == 0 { Default::default() } else { test_header(number - 1).hash() }, - number, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - fn fork_header(fork_nonce: u64, fork_from: u64, number: u64) -> Header { - if fork_from == number { - test_header(number) - } else { - Header { - parent_hash: fork_header(fork_nonce, fork_from, number - 1).hash(), - number, - state_root: H256::from_low_u64_be(1 + fork_nonce), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - } - - #[test] - fn list_value_at_block_works() { - // when block is earlier than best finalized block AND it is not finalized - // --- 50 --- - // ----------> [100] - assert!(ListCache::<_, u64, _>::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)) - .unwrap().value_at_block(&test_id(50)).is_err()); - // when block is earlier than best finalized block AND it is finalized AND value is some - // [30] ---- 50 ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(50)).unwrap(), Some((test_id(30), Some(test_id(100)), 30))); - // when block is the best finalized block AND value is some - // ---> [100] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(100, H256::from_low_u64_be(100)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(100)).unwrap(), Some((test_id(100), None, 100))); - // when block is parallel to the best finalized block - // ---- 100 - // ---> [100] - assert!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 30 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)).is_err()); - - // when block is later than last finalized block AND there are no forks AND finalized value is Some - // ---> [100] --- 200 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(test_id(100)), Vec::new()) - .with_id(50, H256::from_low_u64_be(50)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(30)), value: 100 }), - PruningStrategy::ByDepth(1024), test_id(100) - ).unwrap().value_at_block(&test_id(200)).unwrap(), Some((test_id(100), None, 100))); - - // when block is later than last finalized block AND there are no matching forks - // AND block is connected to finalized block AND finalized value is Some - // --- 3 - // ---> [2] /---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); - // when block is later than last finalized block AND there are no matching forks - // AND block is not connected to finalized block - // --- 2 --- 3 - // 1 /---> [2] ---------> [4] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 2)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 1, 3)).unwrap(), None); - - // when block is later than last finalized block AND it appends to unfinalized fork from the end - // AND unfinalized value is Some - // ---> [2] ---> [4] ---> 5 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&correct_id(5)).unwrap(), Some((correct_id(4), None, 4))); - // when block is later than last finalized block AND it does not fits unfinalized fork - // AND it is connected to the finalized block AND finalized value is Some - // ---> [2] ----------> [4] - // \--- 3 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(fork_header(0, 2, 3)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap().value_at_block(&fork_id(0, 2, 3)).unwrap(), Some((correct_id(2), None, 2))); - } - - #[test] - fn list_on_block_insert_works() { - let nfin = EntryType::NonFinal; - let fin = EntryType::Final; - - // when trying to insert block < finalized number - let mut ops = Default::default(); - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() - .do_on_block_insert( - &mut DummyTransaction::new(), - test_id(49), - test_id(50), - Some(50), - nfin, - &mut ops, - ).unwrap().is_none()); - // when trying to insert block @ finalized number - assert!(ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), test_id(100)).unwrap() - .do_on_block_insert( - &mut DummyTransaction::new(), - test_id(99), - test_id(100), - Some(100), - nfin, - &Default::default(), - ).unwrap().is_none()); - - // when trying to insert non-final block AND it appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(None, vec![test_id(4)]) - .with_entry(test_id(4), StorageEntry { prev_valid_from: None, value: 4 }), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap(); - cache.unfinalized[0].best_block = Some(test_id(4)); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(4), nfin, &Default::default()).unwrap(), - Some(CommitOperation::AppendNewBlock(0, test_id(5))), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, test_id(4), test_id(5), Some(5), nfin, &Default::default()).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: test_id(5), value: 5 })), - ); - assert_eq!(*tx.inserted_entries(), vec![test_id(5).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![test_id(5)] })); - - // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block - let cache = ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: None, value: 4 }) - .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), test_id(2) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(4), - nfin, - &Default::default(), - ).unwrap(), - Some(CommitOperation::AppendNewBlock(0, correct_id(5))), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork - // AND new value is the same as in the fork' best block - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert( - &mut tx, - correct_id(4), - correct_id(5), - Some(5), - nfin, - &Default::default(), - ).unwrap(), - Some(CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(5), value: 5 })), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(5).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: None, unfinalized: vec![correct_id(5)] })); - - // when trying to insert non-final block AND it forks unfinalized fork - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(4)]) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 4 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(3), fork_id(0, 3, 4), Some(14), nfin, &Default::default()) - .unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: fork_id(0, 3, 4), value: 14 })), - ); - assert_eq!(*tx.inserted_entries(), vec![fork_id(0, 3, 4).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] })); - - // when trying to insert non-final block AND there are no unfinalized forks - // AND value is the same as last finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), nfin, &Default::default()) - .unwrap(), - None, - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when trying to insert non-final block AND there are no unfinalized forks - // AND value differs from last finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), nfin, &Default::default()) - .unwrap(), - Some(CommitOperation::AddNewFork(Entry { valid_from: correct_id(3), value: 3 })), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(3)] })); - - // when inserting finalized entry AND there are no previous finalized entries - let cache = ListCache::new(DummyStorage::new(), PruningStrategy::ByDepth(1024), correct_id(2)).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()) - .unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - Some(Entry { valid_from: correct_id(3), value: 3 }), - Default::default(), - )), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); - // when inserting finalized entry AND value is the same as in previous finalized - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert!(tx.updated_meta().is_none()); - // when inserting finalized entry AND value differs from previous finalized - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(3), fin, &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(3), - Some(Entry { valid_from: correct_id(3), value: 3 }), - Default::default(), - )), - ); - assert_eq!(*tx.inserted_entries(), vec![correct_id(3).hash].into_iter().collect()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(3)), unfinalized: vec![] })); - - // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_insert(&mut tx, correct_id(2), correct_id(3), Some(2), fin, &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), - ); - } - - #[test] - fn list_on_block_finalized_works() { - // finalization does not finalizes entry if it does not exists - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, Default::default())), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert_eq!( - *tx.updated_meta(), - Some(Metadata { finalized: Some(correct_id(2)), unfinalized: vec![correct_id(5)] }), - ); - // finalization finalizes entry - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }), - PruningStrategy::ByDepth(1024), correct_id(4) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized( - correct_id(5), - Some(Entry { valid_from: correct_id(5), value: 5 }), - vec![0].into_iter().collect(), - )), - ); - assert!(tx.inserted_entries().is_empty()); - assert!(tx.removed_entries().is_empty()); - assert_eq!(*tx.updated_meta(), Some(Metadata { finalized: Some(correct_id(5)), unfinalized: vec![] })); - // finalization removes abandoned forks - let cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: None, value: 13 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); - let mut tx = DummyTransaction::new(); - assert_eq!( - cache.do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()).unwrap(), - Some(CommitOperation::BlockFinalized(correct_id(3), None, vec![0].into_iter().collect())), - ); - } - - #[test] - fn list_transaction_commit_works() { - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(5)), value: 6 }), - PruningStrategy::ByDepth(1024), correct_id(2) - ).unwrap(); - - // when new block is appended to unfinalized fork - cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))].into()); - assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); - // when new entry is appended to unfinalized fork - cache.on_transaction_commit(vec![ - CommitOperation::AppendNewEntry(0, Entry { valid_from: correct_id(7), value: 7 }), - ].into()); - assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); - assert_eq!(cache.unfinalized[0].head, Entry { valid_from: correct_id(7), value: 7 }); - // when new fork is added - cache.on_transaction_commit(vec![ - CommitOperation::AddNewFork(Entry { valid_from: correct_id(10), value: 10 }), - ].into()); - assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); - assert_eq!(cache.unfinalized[2].head, Entry { valid_from: correct_id(10), value: 10 }); - // when block is finalized + entry is finalized + unfinalized forks are deleted - cache.on_transaction_commit(vec![CommitOperation::BlockFinalized( - correct_id(20), - Some(Entry { valid_from: correct_id(20), value: 20 }), - vec![0, 1, 2].into_iter().collect(), - )].into()); - assert_eq!(cache.best_finalized_block, correct_id(20)); - assert_eq!(cache.best_finalized_entry, Some(Entry { valid_from: correct_id(20), value: 20 })); - assert!(cache.unfinalized.is_empty()); - } - - #[test] - fn list_find_unfinalized_fork_works() { - // ----------> [3] - // --- [2] ---------> 4 ---> [5] - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: None, value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap().find_unfinalized_fork((&correct_id(4)).into()).unwrap().unwrap().head.valid_from, correct_id(5)); - // --- [2] ---------------> [5] - // ----------> [3] ---> 4 - assert_eq!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 2)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap() - .find_unfinalized_fork((&fork_id(0, 1, 4)).into()).unwrap().unwrap().head.valid_from, fork_id(0, 1, 3)); - // --- [2] ---------------> [5] - // ----------> [3] - // -----------------> 4 - assert!(ListCache::new( - DummyStorage::new() - .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) - .with_entry(fork_id(0, 1, 3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 13 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(2)), value: 5 }) - .with_entry(correct_id(2), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 2 }) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(0, 1, 3)) - .with_header(fork_header(0, 1, 4)) - .with_header(fork_header(1, 1, 2)) - .with_header(fork_header(1, 1, 3)) - .with_header(fork_header(1, 1, 4)), - PruningStrategy::ByDepth(1024), correct_id(0) - ).unwrap().find_unfinalized_fork((&fork_id(1, 1, 4)).into()).unwrap().is_none()); - } - - #[test] - fn fork_matches_works() { - // when block is not within list range - let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .matches(&storage, (&test_id(20)).into()).unwrap(), false); - // when block is not connected to the begin block - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 2, 4)) - .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&fork_id(0, 2, 4)).into()).unwrap(), false); - // when block is not connected to the end block - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 3, 4)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&fork_id(0, 3, 4)).into()).unwrap(), false); - // when block is connected to the begin block AND end is open - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: None, value: 100 }) - .with_header(test_header(5)) - .with_header(test_header(6)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&correct_id(6)).into()).unwrap(), true); - // when block is connected to the begin block AND to the end block - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .matches(&storage, (&correct_id(4)).into()).unwrap(), true); - } - - #[test] - fn fork_try_append_works() { - // when best block is unknown - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(100)), false); - // when best block is known but different - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(101)), false); - // when best block is known and the same - assert_eq!(Fork::<_, u64> { best_block: Some(test_id(100)), head: Entry { valid_from: test_id(100), value: 0 } } - .try_append(&test_id(100)), true); - } - - #[test] - fn fork_try_append_or_fork_works() { - // when there's no entry before parent - let storage = DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .try_append_or_fork(&storage, &test_id(30), None).unwrap(), None); - // when parent does not belong to the fork - let storage = DummyStorage::new() - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 2, 4)) - .with_header(fork_header(0, 2, 3)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 2, 4), None).unwrap(), None); - // when the entry before parent is the head entry - let storage = DummyStorage::new() - .with_entry( - ComplexBlockId::new(test_header(5).hash(), 5), - StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }, - ) - .with_header(test_header(6)) - .with_header(test_header(5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(5), value: 100 } } - .try_append_or_fork(&storage, &correct_id(6), None).unwrap(), Some(ForkAppendResult::Append)); - // when the parent located after last finalized entry - let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(6)) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), None).unwrap(), Some(ForkAppendResult::Fork(ComplexBlockId::new(test_header(3).hash(), 3)))); - // when the parent located before last finalized entry - let storage = DummyStorage::new() - .with_entry(correct_id(6), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 100 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: None, value: 200 }) - .with_header(test_header(6)) - .with_header(test_header(5)) - .with_header(test_header(4)) - .with_header(test_header(3)) - .with_header(fork_header(0, 4, 5)); - assert_eq!(Fork::<_, u64> { best_block: None, head: Entry { valid_from: correct_id(6), value: 100 } } - .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)).unwrap(), None); - } - - #[test] - fn fork_destroy_works() { - // when we reached finalized entry without iterations - let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert!(tx.removed_entries().is_empty()); - // when we reach finalized entry with iterations - let storage = DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(20)), value: 50 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(5)), value: 10 }) - .with_entry(test_id(5), StorageEntry { prev_valid_from: Some(test_id(3)), value: 5 }) - .with_entry(test_id(3), StorageEntry { prev_valid_from: None, value: 0 }); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert_eq!(*tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash, test_id(20).hash].into_iter().collect()); - // when we reach beginning of fork before finalized block - let storage = DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }); - let mut tx = DummyTransaction::new(); - Fork::<_, u64> { best_block: None, head: Entry { valid_from: test_id(100), value: 0 } } - .destroy(&storage, &mut tx, Some(200)).unwrap(); - assert_eq!(*tx.removed_entries(), - vec![test_id(100).hash, test_id(50).hash].into_iter().collect()); - } - - #[test] - fn is_connected_to_block_fails() { - // when storage returns error - assert!( - chain::is_connected_to_block::<_, u64, _>( - &FaultyStorage, - (&test_id(1)).into(), - &test_id(100), - ).is_err(), - ); - // when there's no header in the storage - assert!( - chain::is_connected_to_block::<_, u64, _>( - &DummyStorage::new(), - (&test_id(1)).into(), - &test_id(100), - ).is_err(), - ); - } - - #[test] - fn is_connected_to_block_works() { - // when without iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(1)), - (&test_id(1)).into(), &correct_id(1)).unwrap(), false); - // when with ASC iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&test_id(0)).into(), &correct_id(2)).unwrap(), false); - // when with DESC iterations we end up with different block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), &test_id(0)).unwrap(), false); - // when without iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(1)), - (&correct_id(1)).into(), &correct_id(1)).unwrap(), true); - // when with ASC iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(0)).into(), &correct_id(2)).unwrap(), true); - // when with DESC iterations we end up with the same block - assert_eq!(chain::is_connected_to_block::<_, u64, _>(&DummyStorage::new() - .with_header(test_header(0)) - .with_header(test_header(1)) - .with_header(test_header(2)), - (&correct_id(2)).into(), &correct_id(0)).unwrap(), true); - } - - #[test] - fn is_finalized_block_fails() { - // when storage returns error - assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err()); - - } - - #[test] - fn is_finalized_block_works() { - // when number of block is larger than last finalized block - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), false); - // when there's no hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), false); - // when there's different hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new() - .with_id(1, H256::from_low_u64_be(2)), &test_id(1), 100).unwrap(), false); - // when there's the same hash for this block number in the database - assert_eq!(chain::is_finalized_block::<_, u64, _>(&DummyStorage::new() - .with_id(1, H256::from_low_u64_be(1)), &test_id(1), 100).unwrap(), true); - } - - #[test] - fn read_forks_fails() { - // when storage returns error during finalized entry read - assert!(read_forks::(&FaultyStorage, Metadata { - finalized: Some(test_id(1)), - unfinalized: vec![], - }).is_err()); - // when storage returns error during unfinalized entry read - assert!(read_forks::(&FaultyStorage, Metadata { - finalized: None, - unfinalized: vec![test_id(1)], - }).is_err()); - // when finalized entry is not found - assert!(read_forks::(&DummyStorage::new(), Metadata { - finalized: Some(test_id(1)), - unfinalized: vec![], - }).is_err()); - // when unfinalized entry is not found - assert!(read_forks::(&DummyStorage::new(), Metadata { - finalized: None, - unfinalized: vec![test_id(1)], - }).is_err()); - } - - #[test] - fn read_forks_works() { - let storage = DummyStorage::new() - .with_entry(test_id(10), StorageEntry { prev_valid_from: Some(test_id(1)), value: 11 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(2)), value: 0 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: None, value: 33 }); - let expected = ( - Some(Entry { valid_from: test_id(10), value: 11 }), - vec![ - Fork { best_block: None, head: Entry { valid_from: test_id(20), value: 0 } }, - Fork { best_block: None, head: Entry { valid_from: test_id(30), value: 33 } }, - ], - ); - - assert_eq!(expected, read_forks(&storage, Metadata { - finalized: Some(test_id(10)), - unfinalized: vec![test_id(20), test_id(30)], - }).unwrap()); - } - - #[test] - fn ancient_entries_are_pruned_when_pruning_enabled() { - fn do_test(strategy: PruningStrategy) { - let cache = ListCache::new(DummyStorage::new() - .with_id(10, H256::from_low_u64_be(10)) - .with_id(20, H256::from_low_u64_be(20)) - .with_id(30, H256::from_low_u64_be(30)) - .with_entry(test_id(10), StorageEntry { prev_valid_from: None, value: 10 }) - .with_entry(test_id(20), StorageEntry { prev_valid_from: Some(test_id(10)), value: 20 }) - .with_entry(test_id(30), StorageEntry { prev_valid_from: Some(test_id(20)), value: 30 }), - strategy, test_id(9)).unwrap(); - let mut tx = DummyTransaction::new(); - - // when finalizing entry #10: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(10)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #19: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(19)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #20: no entries pruned - cache.prune_finalized_entries(&mut tx, &test_id(20)); - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - // when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is enabled) - cache.prune_finalized_entries(&mut tx, &test_id(30)); - match strategy { - PruningStrategy::NeverPrune => { - assert!(tx.removed_entries().is_empty()); - assert!(tx.inserted_entries().is_empty()); - }, - PruningStrategy::ByDepth(_) => { - assert_eq!(*tx.removed_entries(), vec![test_id(10).hash].into_iter().collect()); - assert_eq!(*tx.inserted_entries(), vec![test_id(20).hash].into_iter().collect()); - }, - } - } - - do_test(PruningStrategy::ByDepth(10)); - do_test(PruningStrategy::NeverPrune) - } - - #[test] - fn revert_block_works() { - // 1 -> (2) -> 3 -> 4 -> 5 - // \ - // -> 5'' - // \ - // -> (3') -> 4' -> 5' - let mut cache = ListCache::new( - DummyStorage::new() - .with_meta(Some(correct_id(1)), vec![correct_id(5), fork_id(1, 2, 5), fork_id(2, 4, 5)]) - .with_id(1, correct_id(1).hash) - .with_entry(correct_id(1), StorageEntry { prev_valid_from: None, value: 1 }) - .with_entry(correct_id(3), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 3 }) - .with_entry(correct_id(4), StorageEntry { prev_valid_from: Some(correct_id(3)), value: 4 }) - .with_entry(correct_id(5), StorageEntry { prev_valid_from: Some(correct_id(4)), value: 5 }) - .with_entry(fork_id(1, 2, 4), StorageEntry { prev_valid_from: Some(correct_id(1)), value: 14 }) - .with_entry(fork_id(1, 2, 5), StorageEntry { prev_valid_from: Some(fork_id(1, 2, 4)), value: 15 }) - .with_entry(fork_id(2, 4, 5), StorageEntry { prev_valid_from: Some(correct_id(4)), value: 25 }) - .with_header(test_header(1)) - .with_header(test_header(2)) - .with_header(test_header(3)) - .with_header(test_header(4)) - .with_header(test_header(5)) - .with_header(fork_header(1, 2, 3)) - .with_header(fork_header(1, 2, 4)) - .with_header(fork_header(1, 2, 5)) - .with_header(fork_header(2, 4, 5)), - PruningStrategy::ByDepth(1024), correct_id(1) - ).unwrap(); - - // when 5 is reverted: entry 5 is truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(5)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, Some(Fork { best_block: None, head: Entry { valid_from: correct_id(4), value: 4 } })), - ].into_iter().collect())); - cache.on_transaction_commit(vec![op].into()); - - // when 3 is reverted: entries 4+5' are truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(3)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, None), - (2, None), - ].into_iter().collect())); - cache.on_transaction_commit(vec![op].into()); - - // when 2 is reverted: entries 4'+5' are truncated - let op = cache.do_on_block_revert(&mut DummyTransaction::new(), &correct_id(2)).unwrap(); - assert_eq!(op, CommitOperation::BlockReverted(vec![ - (0, None), - ].into_iter().collect())); - cache.on_transaction_commit(vec![op].into()); - } - - #[test] - fn append_commit_operation_works() { - let mut ops = CommitOperations::default(); - ops.append(None); - assert_eq!(ops.operations, Vec::new()); - - ops.append(Some(CommitOperation::BlockFinalized( - test_id(10), - Some(Entry { valid_from: test_id(10), value: 10 }), - vec![5].into_iter().collect(), - ))); - assert_eq!( - ops.operations, - vec![CommitOperation::BlockFinalized( - test_id(10), - Some(Entry { valid_from: test_id(10), value: 10 }), - vec![5].into_iter().collect(), - )], - ); - - ops.append(Some(CommitOperation::BlockFinalized( - test_id(20), - Some(Entry { valid_from: test_id(20), value: 20 }), - vec![5, 6].into_iter().collect(), - ))); - - assert_eq!( - ops.operations, - vec![CommitOperation::BlockFinalized( - test_id(20), - Some(Entry { valid_from: test_id(20), value: 20 }), - vec![5, 6].into_iter().collect(), - )], - ); - } + use super::*; + use crate::cache::list_storage::tests::{DummyStorage, DummyTransaction, FaultyStorage}; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, Header}; + use sp_runtime::traits::Header as HeaderT; + use substrate_test_runtime_client::runtime::H256; + + type Block = RawBlock>; + + pub fn test_id(number: u64) -> ComplexBlockId { + ComplexBlockId::new(H256::from_low_u64_be(number), number) + } + + fn correct_id(number: u64) -> ComplexBlockId { + ComplexBlockId::new(test_header(number).hash(), number) + } + + fn fork_id(fork_nonce: u64, fork_from: u64, number: u64) -> ComplexBlockId { + ComplexBlockId::new(fork_header(fork_nonce, fork_from, number).hash(), number) + } + + fn test_header(number: u64) -> Header { + Header { + parent_hash: if number == 0 { + Default::default() + } else { + test_header(number - 1).hash() + }, + number, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + + fn fork_header(fork_nonce: u64, fork_from: u64, number: u64) -> Header { + if fork_from == number { + test_header(number) + } else { + Header { + parent_hash: fork_header(fork_nonce, fork_from, number - 1).hash(), + number, + state_root: H256::from_low_u64_be(1 + fork_nonce), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + } + + #[test] + fn list_value_at_block_works() { + // when block is earlier than best finalized block AND it is not finalized + // --- 50 --- + // ----------> [100] + assert!(ListCache::<_, u64, _>::new( + DummyStorage::new(), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(50)) + .is_err()); + // when block is earlier than best finalized block AND it is finalized AND value is some + // [30] ---- 50 ---> [100] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: 100 + } + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: 30 + } + ), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(50)) + .unwrap(), + Some((test_id(30), Some(test_id(100)), 30)) + ); + // when block is the best finalized block AND value is some + // ---> [100] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(100, H256::from_low_u64_be(100)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: 100 + } + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: 30 + } + ), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(100)) + .unwrap(), + Some((test_id(100), None, 100)) + ); + // when block is parallel to the best finalized block + // ---- 100 + // ---> [100] + assert!(ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: 100 + } + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: 30 + } + ), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&ComplexBlockId::new(H256::from_low_u64_be(2), 100)) + .is_err()); + + // when block is later than last finalized block AND there are no forks AND finalized value is Some + // ---> [100] --- 200 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(test_id(100)), Vec::new()) + .with_id(50, H256::from_low_u64_be(50)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: 100 + } + ), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .value_at_block(&test_id(200)) + .unwrap(), + Some((test_id(100), None, 100)) + ); + + // when block is later than last finalized block AND there are no matching forks + // AND block is connected to finalized block AND finalized value is Some + // --- 3 + // ---> [2] /---------> [4] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2 + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 4 + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + Some((correct_id(2), None, 2)) + ); + // when block is later than last finalized block AND there are no matching forks + // AND block is not connected to finalized block + // --- 2 --- 3 + // 1 /---> [2] ---------> [4] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2 + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 4 + } + ) + .with_header(test_header(1)) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 2)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 1, 3)) + .unwrap(), + None + ); + + // when block is later than last finalized block AND it appends to unfinalized fork from the end + // AND unfinalized value is Some + // ---> [2] ---> [4] ---> 5 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2 + } + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 4 + } + ) + .with_header(test_header(4)) + .with_header(test_header(5)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&correct_id(5)) + .unwrap(), + Some((correct_id(4), None, 4)) + ); + // when block is later than last finalized block AND it does not fits unfinalized fork + // AND it is connected to the finalized block AND finalized value is Some + // ---> [2] ----------> [4] + // \--- 3 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 4 + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2 + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(fork_header(0, 2, 3)), + PruningStrategy::ByDepth(1024), + test_id(2) + ) + .unwrap() + .value_at_block(&fork_id(0, 2, 3)) + .unwrap(), + Some((correct_id(2), None, 2)) + ); + } + + #[test] + fn list_on_block_insert_works() { + let nfin = EntryType::NonFinal; + let fin = EntryType::Final; + + // when trying to insert block < finalized number + let mut ops = Default::default(); + assert!(ListCache::new( + DummyStorage::new(), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .do_on_block_insert( + &mut DummyTransaction::new(), + test_id(49), + test_id(50), + Some(50), + nfin, + &mut ops, + ) + .unwrap() + .is_none()); + // when trying to insert block @ finalized number + assert!(ListCache::new( + DummyStorage::new(), + PruningStrategy::ByDepth(1024), + test_id(100) + ) + .unwrap() + .do_on_block_insert( + &mut DummyTransaction::new(), + test_id(99), + test_id(100), + Some(100), + nfin, + &Default::default(), + ) + .unwrap() + .is_none()); + + // when trying to insert non-final block AND it appends to the best block of unfinalized fork + // AND new value is the same as in the fork' best block + let mut cache = ListCache::new( + DummyStorage::new() + .with_meta(None, vec![test_id(4)]) + .with_entry( + test_id(4), + StorageEntry { + prev_valid_from: None, + value: 4, + }, + ), + PruningStrategy::ByDepth(1024), + test_id(2), + ) + .unwrap(); + cache.unfinalized[0].best_block = Some(test_id(4)); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + test_id(4), + test_id(5), + Some(4), + nfin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::AppendNewBlock(0, test_id(5))), + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // when trying to insert non-final block AND it appends to the best block of unfinalized fork + // AND new value is the same as in the fork' best block + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + test_id(4), + test_id(5), + Some(5), + nfin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::AppendNewEntry( + 0, + Entry { + valid_from: test_id(5), + value: 5 + } + )), + ); + assert_eq!( + *tx.inserted_entries(), + vec![test_id(5).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: None, + unfinalized: vec![test_id(5)] + }) + ); + + // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork + // AND new value is the same as in the fork' best block + let cache = ListCache::new( + DummyStorage::new() + .with_meta(None, vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: None, + value: 4, + }, + ) + .with_header(test_header(4)), + PruningStrategy::ByDepth(1024), + test_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(4), + nfin, + &Default::default(), + ) + .unwrap(), + Some(CommitOperation::AppendNewBlock(0, correct_id(5))), + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // when trying to insert non-final block AND it is the first block that appends to the best block of unfinalized fork + // AND new value is the same as in the fork' best block + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(4), + correct_id(5), + Some(5), + nfin, + &Default::default(), + ) + .unwrap(), + Some(CommitOperation::AppendNewEntry( + 0, + Entry { + valid_from: correct_id(5), + value: 5 + } + )), + ); + assert_eq!( + *tx.inserted_entries(), + vec![correct_id(5).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: None, + unfinalized: vec![correct_id(5)] + }) + ); + + // when trying to insert non-final block AND it forks unfinalized fork + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(4)]) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 4, + }, + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(3), + fork_id(0, 3, 4), + Some(14), + nfin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::AddNewFork(Entry { + valid_from: fork_id(0, 3, 4), + value: 14 + })), + ); + assert_eq!( + *tx.inserted_entries(), + vec![fork_id(0, 3, 4).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(2)), + unfinalized: vec![correct_id(4), fork_id(0, 3, 4)] + }) + ); + + // when trying to insert non-final block AND there are no unfinalized forks + // AND value is the same as last finalized + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + nfin, + &Default::default() + ) + .unwrap(), + None, + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // when trying to insert non-final block AND there are no unfinalized forks + // AND value differs from last finalized + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + nfin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::AddNewFork(Entry { + valid_from: correct_id(3), + value: 3 + })), + ); + assert_eq!( + *tx.inserted_entries(), + vec![correct_id(3).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(2)), + unfinalized: vec![correct_id(3)] + }) + ); + + // when inserting finalized entry AND there are no previous finalized entries + let cache = ListCache::new( + DummyStorage::new(), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + fin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + Some(Entry { + valid_from: correct_id(3), + value: 3 + }), + Default::default(), + )), + ); + assert_eq!( + *tx.inserted_entries(), + vec![correct_id(3).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(3)), + unfinalized: vec![] + }) + ); + // when inserting finalized entry AND value is the same as in previous finalized + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + fin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + Default::default() + )), + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert!(tx.updated_meta().is_none()); + // when inserting finalized entry AND value differs from previous finalized + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(3), + fin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + Some(Entry { + valid_from: correct_id(3), + value: 3 + }), + Default::default(), + )), + ); + assert_eq!( + *tx.inserted_entries(), + vec![correct_id(3).hash].into_iter().collect() + ); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(3)), + unfinalized: vec![] + }) + ); + + // inserting finalized entry removes abandoned fork EVEN if new entry is not inserted + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: None, + value: 13, + }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_insert( + &mut tx, + correct_id(2), + correct_id(3), + Some(2), + fin, + &Default::default() + ) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + vec![0].into_iter().collect() + )), + ); + } + + #[test] + fn list_on_block_finalized_works() { + // finalization does not finalizes entry if it does not exists + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(5)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 5, + }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + Default::default() + )), + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(2)), + unfinalized: vec![correct_id(5)] + }), + ); + // finalization finalizes entry + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(5)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 5, + }, + ), + PruningStrategy::ByDepth(1024), + correct_id(4), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_finalize(&mut tx, correct_id(4), correct_id(5), &Default::default()) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(5), + Some(Entry { + valid_from: correct_id(5), + value: 5 + }), + vec![0].into_iter().collect(), + )), + ); + assert!(tx.inserted_entries().is_empty()); + assert!(tx.removed_entries().is_empty()); + assert_eq!( + *tx.updated_meta(), + Some(Metadata { + finalized: Some(correct_id(5)), + unfinalized: vec![] + }) + ); + // finalization removes abandoned forks + let cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![fork_id(0, 1, 3)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: None, + value: 13, + }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + assert_eq!( + cache + .do_on_block_finalize(&mut tx, correct_id(2), correct_id(3), &Default::default()) + .unwrap(), + Some(CommitOperation::BlockFinalized( + correct_id(3), + None, + vec![0].into_iter().collect() + )), + ); + } + + #[test] + fn list_transaction_commit_works() { + let mut cache = ListCache::new( + DummyStorage::new() + .with_meta(Some(correct_id(2)), vec![correct_id(5), correct_id(6)]) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2, + }, + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 5, + }, + ) + .with_entry( + correct_id(6), + StorageEntry { + prev_valid_from: Some(correct_id(5)), + value: 6, + }, + ), + PruningStrategy::ByDepth(1024), + correct_id(2), + ) + .unwrap(); + + // when new block is appended to unfinalized fork + cache.on_transaction_commit(vec![CommitOperation::AppendNewBlock(0, correct_id(6))].into()); + assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(6))); + // when new entry is appended to unfinalized fork + cache.on_transaction_commit( + vec![CommitOperation::AppendNewEntry( + 0, + Entry { + valid_from: correct_id(7), + value: 7, + }, + )] + .into(), + ); + assert_eq!(cache.unfinalized[0].best_block, Some(correct_id(7))); + assert_eq!( + cache.unfinalized[0].head, + Entry { + valid_from: correct_id(7), + value: 7 + } + ); + // when new fork is added + cache.on_transaction_commit( + vec![CommitOperation::AddNewFork(Entry { + valid_from: correct_id(10), + value: 10, + })] + .into(), + ); + assert_eq!(cache.unfinalized[2].best_block, Some(correct_id(10))); + assert_eq!( + cache.unfinalized[2].head, + Entry { + valid_from: correct_id(10), + value: 10 + } + ); + // when block is finalized + entry is finalized + unfinalized forks are deleted + cache.on_transaction_commit( + vec![CommitOperation::BlockFinalized( + correct_id(20), + Some(Entry { + valid_from: correct_id(20), + value: 20, + }), + vec![0, 1, 2].into_iter().collect(), + )] + .into(), + ); + assert_eq!(cache.best_finalized_block, correct_id(20)); + assert_eq!( + cache.best_finalized_entry, + Some(Entry { + valid_from: correct_id(20), + value: 20 + }) + ); + assert!(cache.unfinalized.is_empty()); + } + + #[test] + fn list_find_unfinalized_fork_works() { + // ----------> [3] + // --- [2] ---------> 4 ---> [5] + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(None, vec![fork_id(0, 1, 3), correct_id(5)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: 13 + } + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 5 + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: None, + value: 2 + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)), + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&correct_id(4)).into()) + .unwrap() + .unwrap() + .head + .valid_from, + correct_id(5) + ); + // --- [2] ---------------> [5] + // ----------> [3] ---> 4 + assert_eq!( + ListCache::new( + DummyStorage::new() + .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: 13 + } + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 5 + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: 2 + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(fork_header(0, 1, 2)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 4)), + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&fork_id(0, 1, 4)).into()) + .unwrap() + .unwrap() + .head + .valid_from, + fork_id(0, 1, 3) + ); + // --- [2] ---------------> [5] + // ----------> [3] + // -----------------> 4 + assert!(ListCache::new( + DummyStorage::new() + .with_meta(None, vec![correct_id(5), fork_id(0, 1, 3)]) + .with_entry( + fork_id(0, 1, 3), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: 13 + } + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(2)), + value: 5 + } + ) + .with_entry( + correct_id(2), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: 2 + } + ) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(fork_header(0, 1, 3)) + .with_header(fork_header(0, 1, 4)) + .with_header(fork_header(1, 1, 2)) + .with_header(fork_header(1, 1, 3)) + .with_header(fork_header(1, 1, 4)), + PruningStrategy::ByDepth(1024), + correct_id(0) + ) + .unwrap() + .find_unfinalized_fork((&fork_id(1, 1, 4)).into()) + .unwrap() + .is_none()); + } + + #[test] + fn fork_matches_works() { + // when block is not within list range + let storage = DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: 100, + }, + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: None, + value: 50, + }, + ); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: 0 + } + } + .matches(&storage, (&test_id(20)).into()) + .unwrap(), + false + ); + // when block is not connected to the begin block + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: 100, + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: 200, + }, + ) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 2, 4)) + .with_header(fork_header(0, 2, 3)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: 100 + } + } + .matches(&storage, (&fork_id(0, 2, 4)).into()) + .unwrap(), + false + ); + // when block is not connected to the end block + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: 100, + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: 200, + }, + ) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 3, 4)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: 100 + } + } + .matches(&storage, (&fork_id(0, 3, 4)).into()) + .unwrap(), + false + ); + // when block is connected to the begin block AND end is open + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: None, + value: 100, + }, + ) + .with_header(test_header(5)) + .with_header(test_header(6)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: 100 + } + } + .matches(&storage, (&correct_id(6)).into()) + .unwrap(), + true + ); + // when block is connected to the begin block AND to the end block + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: 100, + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: 200, + }, + ) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: 100 + } + } + .matches(&storage, (&correct_id(4)).into()) + .unwrap(), + true + ); + } + + #[test] + fn fork_try_append_works() { + // when best block is unknown + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: 0 + } + } + .try_append(&test_id(100)), + false + ); + // when best block is known but different + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: 0 + } + } + .try_append(&test_id(101)), + false + ); + // when best block is known and the same + assert_eq!( + Fork::<_, u64> { + best_block: Some(test_id(100)), + head: Entry { + valid_from: test_id(100), + value: 0 + } + } + .try_append(&test_id(100)), + true + ); + } + + #[test] + fn fork_try_append_or_fork_works() { + // when there's no entry before parent + let storage = DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: 100, + }, + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: None, + value: 50, + }, + ); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: 0 + } + } + .try_append_or_fork(&storage, &test_id(30), None) + .unwrap(), + None + ); + // when parent does not belong to the fork + let storage = DummyStorage::new() + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: 100, + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: 200, + }, + ) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 2, 4)) + .with_header(fork_header(0, 2, 3)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: 100 + } + } + .try_append_or_fork(&storage, &fork_id(0, 2, 4), None) + .unwrap(), + None + ); + // when the entry before parent is the head entry + let storage = DummyStorage::new() + .with_entry( + ComplexBlockId::new(test_header(5).hash(), 5), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: 100, + }, + ) + .with_header(test_header(6)) + .with_header(test_header(5)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(5), + value: 100 + } + } + .try_append_or_fork(&storage, &correct_id(6), None) + .unwrap(), + Some(ForkAppendResult::Append) + ); + // when the parent located after last finalized entry + let storage = DummyStorage::new() + .with_entry( + correct_id(6), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: 100, + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: 200, + }, + ) + .with_header(test_header(6)) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 4, 5)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(6), + value: 100 + } + } + .try_append_or_fork(&storage, &fork_id(0, 4, 5), None) + .unwrap(), + Some(ForkAppendResult::Fork(ComplexBlockId::new( + test_header(3).hash(), + 3 + ))) + ); + // when the parent located before last finalized entry + let storage = DummyStorage::new() + .with_entry( + correct_id(6), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: 100, + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: None, + value: 200, + }, + ) + .with_header(test_header(6)) + .with_header(test_header(5)) + .with_header(test_header(4)) + .with_header(test_header(3)) + .with_header(fork_header(0, 4, 5)); + assert_eq!( + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: correct_id(6), + value: 100 + } + } + .try_append_or_fork(&storage, &fork_id(0, 4, 5), Some(3)) + .unwrap(), + None + ); + } + + #[test] + fn fork_destroy_works() { + // when we reached finalized entry without iterations + let storage = DummyStorage::new().with_id(100, H256::from_low_u64_be(100)); + let mut tx = DummyTransaction::new(); + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: 0, + }, + } + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert!(tx.removed_entries().is_empty()); + // when we reach finalized entry with iterations + let storage = DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: 100, + }, + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: Some(test_id(20)), + value: 50, + }, + ) + .with_entry( + test_id(20), + StorageEntry { + prev_valid_from: Some(test_id(10)), + value: 20, + }, + ) + .with_entry( + test_id(10), + StorageEntry { + prev_valid_from: Some(test_id(5)), + value: 10, + }, + ) + .with_entry( + test_id(5), + StorageEntry { + prev_valid_from: Some(test_id(3)), + value: 5, + }, + ) + .with_entry( + test_id(3), + StorageEntry { + prev_valid_from: None, + value: 0, + }, + ); + let mut tx = DummyTransaction::new(); + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: 0, + }, + } + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert_eq!( + *tx.removed_entries(), + vec![test_id(100).hash, test_id(50).hash, test_id(20).hash] + .into_iter() + .collect() + ); + // when we reach beginning of fork before finalized block + let storage = DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: 100, + }, + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: None, + value: 50, + }, + ); + let mut tx = DummyTransaction::new(); + Fork::<_, u64> { + best_block: None, + head: Entry { + valid_from: test_id(100), + value: 0, + }, + } + .destroy(&storage, &mut tx, Some(200)) + .unwrap(); + assert_eq!( + *tx.removed_entries(), + vec![test_id(100).hash, test_id(50).hash] + .into_iter() + .collect() + ); + } + + #[test] + fn is_connected_to_block_fails() { + // when storage returns error + assert!(chain::is_connected_to_block::<_, u64, _>( + &FaultyStorage, + (&test_id(1)).into(), + &test_id(100), + ) + .is_err(),); + // when there's no header in the storage + assert!(chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new(), + (&test_id(1)).into(), + &test_id(100), + ) + .is_err(),); + } + + #[test] + fn is_connected_to_block_works() { + // when without iterations we end up with different block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new().with_header(test_header(1)), + (&test_id(1)).into(), + &correct_id(1) + ) + .unwrap(), + false + ); + // when with ASC iterations we end up with different block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&test_id(0)).into(), + &correct_id(2) + ) + .unwrap(), + false + ); + // when with DESC iterations we end up with different block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(2)).into(), + &test_id(0) + ) + .unwrap(), + false + ); + // when without iterations we end up with the same block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new().with_header(test_header(1)), + (&correct_id(1)).into(), + &correct_id(1) + ) + .unwrap(), + true + ); + // when with ASC iterations we end up with the same block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(0)).into(), + &correct_id(2) + ) + .unwrap(), + true + ); + // when with DESC iterations we end up with the same block + assert_eq!( + chain::is_connected_to_block::<_, u64, _>( + &DummyStorage::new() + .with_header(test_header(0)) + .with_header(test_header(1)) + .with_header(test_header(2)), + (&correct_id(2)).into(), + &correct_id(0) + ) + .unwrap(), + true + ); + } + + #[test] + fn is_finalized_block_fails() { + // when storage returns error + assert!(chain::is_finalized_block::<_, u64, _>(&FaultyStorage, &test_id(1), 100).is_err()); + } + + #[test] + fn is_finalized_block_works() { + // when number of block is larger than last finalized block + assert_eq!( + chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(100), 1).unwrap(), + false + ); + // when there's no hash for this block number in the database + assert_eq!( + chain::is_finalized_block::<_, u64, _>(&DummyStorage::new(), &test_id(1), 100).unwrap(), + false + ); + // when there's different hash for this block number in the database + assert_eq!( + chain::is_finalized_block::<_, u64, _>( + &DummyStorage::new().with_id(1, H256::from_low_u64_be(2)), + &test_id(1), + 100 + ) + .unwrap(), + false + ); + // when there's the same hash for this block number in the database + assert_eq!( + chain::is_finalized_block::<_, u64, _>( + &DummyStorage::new().with_id(1, H256::from_low_u64_be(1)), + &test_id(1), + 100 + ) + .unwrap(), + true + ); + } + + #[test] + fn read_forks_fails() { + // when storage returns error during finalized entry read + assert!(read_forks::( + &FaultyStorage, + Metadata { + finalized: Some(test_id(1)), + unfinalized: vec![], + } + ) + .is_err()); + // when storage returns error during unfinalized entry read + assert!(read_forks::( + &FaultyStorage, + Metadata { + finalized: None, + unfinalized: vec![test_id(1)], + } + ) + .is_err()); + // when finalized entry is not found + assert!(read_forks::( + &DummyStorage::new(), + Metadata { + finalized: Some(test_id(1)), + unfinalized: vec![], + } + ) + .is_err()); + // when unfinalized entry is not found + assert!(read_forks::( + &DummyStorage::new(), + Metadata { + finalized: None, + unfinalized: vec![test_id(1)], + } + ) + .is_err()); + } + + #[test] + fn read_forks_works() { + let storage = DummyStorage::new() + .with_entry( + test_id(10), + StorageEntry { + prev_valid_from: Some(test_id(1)), + value: 11, + }, + ) + .with_entry( + test_id(20), + StorageEntry { + prev_valid_from: Some(test_id(2)), + value: 0, + }, + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: None, + value: 33, + }, + ); + let expected = ( + Some(Entry { + valid_from: test_id(10), + value: 11, + }), + vec![ + Fork { + best_block: None, + head: Entry { + valid_from: test_id(20), + value: 0, + }, + }, + Fork { + best_block: None, + head: Entry { + valid_from: test_id(30), + value: 33, + }, + }, + ], + ); + + assert_eq!( + expected, + read_forks( + &storage, + Metadata { + finalized: Some(test_id(10)), + unfinalized: vec![test_id(20), test_id(30)], + } + ) + .unwrap() + ); + } + + #[test] + fn ancient_entries_are_pruned_when_pruning_enabled() { + fn do_test(strategy: PruningStrategy) { + let cache = ListCache::new( + DummyStorage::new() + .with_id(10, H256::from_low_u64_be(10)) + .with_id(20, H256::from_low_u64_be(20)) + .with_id(30, H256::from_low_u64_be(30)) + .with_entry( + test_id(10), + StorageEntry { + prev_valid_from: None, + value: 10, + }, + ) + .with_entry( + test_id(20), + StorageEntry { + prev_valid_from: Some(test_id(10)), + value: 20, + }, + ) + .with_entry( + test_id(30), + StorageEntry { + prev_valid_from: Some(test_id(20)), + value: 30, + }, + ), + strategy, + test_id(9), + ) + .unwrap(); + let mut tx = DummyTransaction::new(); + + // when finalizing entry #10: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(10)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #19: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(19)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #20: no entries pruned + cache.prune_finalized_entries(&mut tx, &test_id(20)); + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + // when finalizing entry #30: entry 10 pruned + entry 20 is truncated (if pruning is enabled) + cache.prune_finalized_entries(&mut tx, &test_id(30)); + match strategy { + PruningStrategy::NeverPrune => { + assert!(tx.removed_entries().is_empty()); + assert!(tx.inserted_entries().is_empty()); + } + PruningStrategy::ByDepth(_) => { + assert_eq!( + *tx.removed_entries(), + vec![test_id(10).hash].into_iter().collect() + ); + assert_eq!( + *tx.inserted_entries(), + vec![test_id(20).hash].into_iter().collect() + ); + } + } + } + + do_test(PruningStrategy::ByDepth(10)); + do_test(PruningStrategy::NeverPrune) + } + + #[test] + fn revert_block_works() { + // 1 -> (2) -> 3 -> 4 -> 5 + // \ + // -> 5'' + // \ + // -> (3') -> 4' -> 5' + let mut cache = ListCache::new( + DummyStorage::new() + .with_meta( + Some(correct_id(1)), + vec![correct_id(5), fork_id(1, 2, 5), fork_id(2, 4, 5)], + ) + .with_id(1, correct_id(1).hash) + .with_entry( + correct_id(1), + StorageEntry { + prev_valid_from: None, + value: 1, + }, + ) + .with_entry( + correct_id(3), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: 3, + }, + ) + .with_entry( + correct_id(4), + StorageEntry { + prev_valid_from: Some(correct_id(3)), + value: 4, + }, + ) + .with_entry( + correct_id(5), + StorageEntry { + prev_valid_from: Some(correct_id(4)), + value: 5, + }, + ) + .with_entry( + fork_id(1, 2, 4), + StorageEntry { + prev_valid_from: Some(correct_id(1)), + value: 14, + }, + ) + .with_entry( + fork_id(1, 2, 5), + StorageEntry { + prev_valid_from: Some(fork_id(1, 2, 4)), + value: 15, + }, + ) + .with_entry( + fork_id(2, 4, 5), + StorageEntry { + prev_valid_from: Some(correct_id(4)), + value: 25, + }, + ) + .with_header(test_header(1)) + .with_header(test_header(2)) + .with_header(test_header(3)) + .with_header(test_header(4)) + .with_header(test_header(5)) + .with_header(fork_header(1, 2, 3)) + .with_header(fork_header(1, 2, 4)) + .with_header(fork_header(1, 2, 5)) + .with_header(fork_header(2, 4, 5)), + PruningStrategy::ByDepth(1024), + correct_id(1), + ) + .unwrap(); + + // when 5 is reverted: entry 5 is truncated + let op = cache + .do_on_block_revert(&mut DummyTransaction::new(), &correct_id(5)) + .unwrap(); + assert_eq!( + op, + CommitOperation::BlockReverted( + vec![( + 0, + Some(Fork { + best_block: None, + head: Entry { + valid_from: correct_id(4), + value: 4 + } + }) + ),] + .into_iter() + .collect() + ) + ); + cache.on_transaction_commit(vec![op].into()); + + // when 3 is reverted: entries 4+5' are truncated + let op = cache + .do_on_block_revert(&mut DummyTransaction::new(), &correct_id(3)) + .unwrap(); + assert_eq!( + op, + CommitOperation::BlockReverted(vec![(0, None), (2, None),].into_iter().collect()) + ); + cache.on_transaction_commit(vec![op].into()); + + // when 2 is reverted: entries 4'+5' are truncated + let op = cache + .do_on_block_revert(&mut DummyTransaction::new(), &correct_id(2)) + .unwrap(); + assert_eq!( + op, + CommitOperation::BlockReverted(vec![(0, None),].into_iter().collect()) + ); + cache.on_transaction_commit(vec![op].into()); + } + + #[test] + fn append_commit_operation_works() { + let mut ops = CommitOperations::default(); + ops.append(None); + assert_eq!(ops.operations, Vec::new()); + + ops.append(Some(CommitOperation::BlockFinalized( + test_id(10), + Some(Entry { + valid_from: test_id(10), + value: 10, + }), + vec![5].into_iter().collect(), + ))); + assert_eq!( + ops.operations, + vec![CommitOperation::BlockFinalized( + test_id(10), + Some(Entry { + valid_from: test_id(10), + value: 10 + }), + vec![5].into_iter().collect(), + )], + ); + + ops.append(Some(CommitOperation::BlockFinalized( + test_id(20), + Some(Entry { + valid_from: test_id(20), + value: 20, + }), + vec![5, 6].into_iter().collect(), + ))); + + assert_eq!( + ops.operations, + vec![CommitOperation::BlockFinalized( + test_id(20), + Some(Entry { + valid_from: test_id(20), + value: 20 + }), + vec![5, 6].into_iter().collect(), + )], + ); + } } diff --git a/client/db/src/cache/list_entry.rs b/client/db/src/cache/list_entry.rs index e184343290..acc3d85b03 100644 --- a/client/db/src/cache/list_entry.rs +++ b/client/db/src/cache/list_entry.rs @@ -16,21 +16,21 @@ //! List-cache storage entries. +use codec::{Decode, Encode}; use sp_blockchain::Result as ClientResult; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use codec::{Encode, Decode}; +use crate::cache::list_storage::Storage; use crate::cache::{CacheItemT, ComplexBlockId}; -use crate::cache::list_storage::{Storage}; /// Single list-based cache entry. #[derive(Debug)] #[cfg_attr(test, derive(PartialEq))] pub struct Entry { - /// first block, when this value became actual. - pub valid_from: ComplexBlockId, - /// Value stored at this entry. - pub value: T, + /// first block, when this value became actual. + pub valid_from: ComplexBlockId, + /// Value stored at this entry. + pub value: T, } /// Internal representation of the single list-based cache entry. The entry points to the @@ -38,124 +38,222 @@ pub struct Entry { #[derive(Debug, Encode, Decode)] #[cfg_attr(test, derive(Clone, PartialEq))] pub struct StorageEntry { - /// None if valid from the beginning. - pub prev_valid_from: Option>, - /// Value stored at this entry. - pub value: T, + /// None if valid from the beginning. + pub prev_valid_from: Option>, + /// Value stored at this entry. + pub value: T, } impl Entry { - /// Returns Some if the entry should be updated with the new value. - pub fn try_update(&self, value: Option) -> Option> { - match value { - Some(value) => match self.value == value { - true => None, - false => Some(StorageEntry { - prev_valid_from: Some(self.valid_from.clone()), - value, - }), - }, - None => None, - } - } - - /// Wrapper that calls search_before to get range where the given block fits. - pub fn search_best_range_before>( - &self, - storage: &S, - block: NumberFor, - ) -> ClientResult, Option>)>> { - Ok(self.search_best_before(storage, block)? - .map(|(entry, next)| (entry.valid_from, next))) - } - - /// Searches the list, ending with THIS entry for the best entry preceding (or at) - /// given block number. - /// If the entry is found, result is the entry and the block id of next entry (if exists). - /// NOTE that this function does not check that the passed block is actually linked to - /// the blocks it found. - pub fn search_best_before>( - &self, - storage: &S, - block: NumberFor, - ) -> ClientResult, Option>)>> { - // we're looking for the best value - let mut next = None; - let mut current = self.valid_from.clone(); - if block >= self.valid_from.number { - let value = self.value.clone(); - return Ok(Some((Entry { valid_from: current, value }, next))); - } - - // else - travel back in time - loop { - let entry = storage.require_entry(¤t)?; - if block >= current.number { - return Ok(Some((Entry { valid_from: current, value: entry.value }, next))); - } - - next = Some(current); - current = match entry.prev_valid_from { - Some(prev_valid_from) => prev_valid_from, - None => return Ok(None), - }; - } - } + /// Returns Some if the entry should be updated with the new value. + pub fn try_update(&self, value: Option) -> Option> { + match value { + Some(value) => match self.value == value { + true => None, + false => Some(StorageEntry { + prev_valid_from: Some(self.valid_from.clone()), + value, + }), + }, + None => None, + } + } + + /// Wrapper that calls search_before to get range where the given block fits. + pub fn search_best_range_before>( + &self, + storage: &S, + block: NumberFor, + ) -> ClientResult, Option>)>> { + Ok(self + .search_best_before(storage, block)? + .map(|(entry, next)| (entry.valid_from, next))) + } + + /// Searches the list, ending with THIS entry for the best entry preceding (or at) + /// given block number. + /// If the entry is found, result is the entry and the block id of next entry (if exists). + /// NOTE that this function does not check that the passed block is actually linked to + /// the blocks it found. + pub fn search_best_before>( + &self, + storage: &S, + block: NumberFor, + ) -> ClientResult, Option>)>> { + // we're looking for the best value + let mut next = None; + let mut current = self.valid_from.clone(); + if block >= self.valid_from.number { + let value = self.value.clone(); + return Ok(Some(( + Entry { + valid_from: current, + value, + }, + next, + ))); + } + + // else - travel back in time + loop { + let entry = storage.require_entry(¤t)?; + if block >= current.number { + return Ok(Some(( + Entry { + valid_from: current, + value: entry.value, + }, + next, + ))); + } + + next = Some(current); + current = match entry.prev_valid_from { + Some(prev_valid_from) => prev_valid_from, + None => return Ok(None), + }; + } + } } impl StorageEntry { - /// Converts storage entry into an entry, valid from given block. - pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry { - Entry { - valid_from, - value: self.value, - } - } + /// Converts storage entry into an entry, valid from given block. + pub fn into_entry(self, valid_from: ComplexBlockId) -> Entry { + Entry { + valid_from, + value: self.value, + } + } } #[cfg(test)] mod tests { - use crate::cache::list_cache::tests::test_id; - use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; - use super::*; - - #[test] - fn entry_try_update_works() { - // when trying to update with None value - assert_eq!(Entry::<_, u64> { valid_from: test_id(1), value: 42 }.try_update(None), None); - // when trying to update with the same Some value - assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(1)), None); - // when trying to update with different Some value - assert_eq!(Entry { valid_from: test_id(1), value: 1 }.try_update(Some(2)), - Some(StorageEntry { prev_valid_from: Some(test_id(1)), value: 2 })); - } - - #[test] - fn entry_search_best_before_fails() { - // when storage returns error - assert!(Entry::<_, u64> { valid_from: test_id(100), value: 42 } - .search_best_before(&FaultyStorage, 50).is_err()); - } - - #[test] - fn entry_search_best_before_works() { - // when block is better than our best block - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new(), 150).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(100), value: 100 }, None))); - // when block is found between two entries - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: Some(test_id(30)), value: 50 }), - 75).unwrap(), - Some((Entry::<_, u64> { valid_from: test_id(50), value: 50 }, Some(test_id(100))))); - // when block is not found - assert_eq!(Entry::<_, u64> { valid_from: test_id(100), value: 100 } - .search_best_before(&DummyStorage::new() - .with_entry(test_id(100), StorageEntry { prev_valid_from: Some(test_id(50)), value: 100 }) - .with_entry(test_id(50), StorageEntry { prev_valid_from: None, value: 50 }), - 30).unwrap(), - None); - } + use super::*; + use crate::cache::list_cache::tests::test_id; + use crate::cache::list_storage::tests::{DummyStorage, FaultyStorage}; + + #[test] + fn entry_try_update_works() { + // when trying to update with None value + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(1), + value: 42 + } + .try_update(None), + None + ); + // when trying to update with the same Some value + assert_eq!( + Entry { + valid_from: test_id(1), + value: 1 + } + .try_update(Some(1)), + None + ); + // when trying to update with different Some value + assert_eq!( + Entry { + valid_from: test_id(1), + value: 1 + } + .try_update(Some(2)), + Some(StorageEntry { + prev_valid_from: Some(test_id(1)), + value: 2 + }) + ); + } + + #[test] + fn entry_search_best_before_fails() { + // when storage returns error + assert!(Entry::<_, u64> { + valid_from: test_id(100), + value: 42 + } + .search_best_before(&FaultyStorage, 50) + .is_err()); + } + + #[test] + fn entry_search_best_before_works() { + // when block is better than our best block + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(100), + value: 100 + } + .search_best_before(&DummyStorage::new(), 150) + .unwrap(), + Some(( + Entry::<_, u64> { + valid_from: test_id(100), + value: 100 + }, + None + )) + ); + // when block is found between two entries + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(100), + value: 100 + } + .search_best_before( + &DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: 100 + } + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: Some(test_id(30)), + value: 50 + } + ), + 75 + ) + .unwrap(), + Some(( + Entry::<_, u64> { + valid_from: test_id(50), + value: 50 + }, + Some(test_id(100)) + )) + ); + // when block is not found + assert_eq!( + Entry::<_, u64> { + valid_from: test_id(100), + value: 100 + } + .search_best_before( + &DummyStorage::new() + .with_entry( + test_id(100), + StorageEntry { + prev_valid_from: Some(test_id(50)), + value: 100 + } + ) + .with_entry( + test_id(50), + StorageEntry { + prev_valid_from: None, + value: 50 + } + ), + 30 + ) + .unwrap(), + None + ); + } } diff --git a/client/db/src/cache/list_storage.rs b/client/db/src/cache/list_storage.rs index 07cd9fb866..4def9c76e0 100644 --- a/client/db/src/cache/list_storage.rs +++ b/client/db/src/cache/list_storage.rs @@ -18,367 +18,436 @@ use std::sync::Arc; +use crate::utils::{self, meta_keys}; +use codec::{Decode, Encode}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use codec::{Encode, Decode}; +use sp_database::{Database, Transaction}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_database::{Database, Transaction}; -use crate::utils::{self, meta_keys}; -use crate::cache::{CacheItemT, ComplexBlockId}; use crate::cache::list_cache::{CommitOperation, Fork}; use crate::cache::list_entry::{Entry, StorageEntry}; +use crate::cache::{CacheItemT, ComplexBlockId}; use crate::DbHash; /// Single list-cache metadata. #[derive(Debug)] #[cfg_attr(test, derive(Clone, PartialEq))] pub struct Metadata { - /// Block at which best finalized entry is stored. - pub finalized: Option>, - /// A set of blocks at which best unfinalized entries are stored. - pub unfinalized: Vec>, + /// Block at which best finalized entry is stored. + pub finalized: Option>, + /// A set of blocks at which best unfinalized entries are stored. + pub unfinalized: Vec>, } /// Readonly list-cache storage trait. pub trait Storage { - /// Reads hash of the block at given number. - fn read_id(&self, at: NumberFor) -> ClientResult>; - - /// Reads header of the block with given hash. - fn read_header(&self, at: &Block::Hash) -> ClientResult>; - - /// Reads cache metadata: best finalized entry (if some) and the list. - fn read_meta(&self) -> ClientResult>; - - /// Reads cache entry from the storage. - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>>; - - /// Reads referenced (and thus existing) cache entry from the storage. - fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> { - self.read_entry(at) - .and_then(|entry| entry - .ok_or_else(|| ClientError::from( - ClientError::Backend(format!("Referenced cache entry at {:?} is not found", at))))) - } + /// Reads hash of the block at given number. + fn read_id(&self, at: NumberFor) -> ClientResult>; + + /// Reads header of the block with given hash. + fn read_header(&self, at: &Block::Hash) -> ClientResult>; + + /// Reads cache metadata: best finalized entry (if some) and the list. + fn read_meta(&self) -> ClientResult>; + + /// Reads cache entry from the storage. + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>>; + + /// Reads referenced (and thus existing) cache entry from the storage. + fn require_entry(&self, at: &ComplexBlockId) -> ClientResult> { + self.read_entry(at).and_then(|entry| { + entry.ok_or_else(|| { + ClientError::from(ClientError::Backend(format!( + "Referenced cache entry at {:?} is not found", + at + ))) + }) + }) + } } /// List-cache storage transaction. pub trait StorageTransaction { - /// Insert storage entry at given block. - fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry); - - /// Delete storage entry at given block. - fn remove_storage_entry(&mut self, at: &ComplexBlockId); - - /// Update metadata of the cache. - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ); + /// Insert storage entry at given block. + fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry); + + /// Delete storage entry at given block. + fn remove_storage_entry(&mut self, at: &ComplexBlockId); + + /// Update metadata of the cache. + fn update_meta( + &mut self, + best_finalized_entry: Option<&Entry>, + unfinalized: &[Fork], + operation: &CommitOperation, + ); } /// A set of columns used by the DbStorage. #[derive(Debug)] pub struct DbColumns { - /// Column holding cache meta. - pub meta: u32, - /// Column holding the mapping of { block number => block hash } for blocks of the best chain. - pub key_lookup: u32, - /// Column holding the mapping of { block hash => block header }. - pub header: u32, - /// Column holding cache entries. - pub cache: u32, + /// Column holding cache meta. + pub meta: u32, + /// Column holding the mapping of { block number => block hash } for blocks of the best chain. + pub key_lookup: u32, + /// Column holding the mapping of { block hash => block header }. + pub header: u32, + /// Column holding cache entries. + pub cache: u32, } /// Database-backed list cache storage. pub struct DbStorage { - name: Vec, - meta_key: Vec, - db: Arc>, - columns: DbColumns, + name: Vec, + meta_key: Vec, + db: Arc>, + columns: DbColumns, } impl DbStorage { - /// Create new database-backed list cache storage. - pub fn new(name: Vec, db: Arc>, columns: DbColumns) -> Self { - let meta_key = meta::key(&name); - DbStorage { name, meta_key, db, columns } - } - - /// Get reference to the database. - pub fn db(&self) -> &Arc> { &self.db } - - /// Get reference to the database columns. - pub fn columns(&self) -> &DbColumns { &self.columns } - - /// Encode block id for storing as a key in cache column. - /// We append prefix to the actual encoding to allow several caches - /// store entries in the same column. - pub fn encode_block_id(&self, block: &ComplexBlockId) -> Vec { - let mut encoded = self.name.clone(); - encoded.extend(block.hash.as_ref()); - encoded - } + /// Create new database-backed list cache storage. + pub fn new(name: Vec, db: Arc>, columns: DbColumns) -> Self { + let meta_key = meta::key(&name); + DbStorage { + name, + meta_key, + db, + columns, + } + } + + /// Get reference to the database. + pub fn db(&self) -> &Arc> { + &self.db + } + + /// Get reference to the database columns. + pub fn columns(&self) -> &DbColumns { + &self.columns + } + + /// Encode block id for storing as a key in cache column. + /// We append prefix to the actual encoding to allow several caches + /// store entries in the same column. + pub fn encode_block_id(&self, block: &ComplexBlockId) -> Vec { + let mut encoded = self.name.clone(); + encoded.extend(block.hash.as_ref()); + encoded + } } impl Storage for DbStorage { - fn read_id(&self, at: NumberFor) -> ClientResult> { - utils::read_header::(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Number(at)) - .map(|maybe_header| maybe_header.map(|header| header.hash())) - } - - fn read_header(&self, at: &Block::Hash) -> ClientResult> { - utils::read_header::(&*self.db, self.columns.key_lookup, self.columns.header, BlockId::Hash(*at)) - } - - fn read_meta(&self) -> ClientResult> { - match self.db.get(self.columns.meta, &self.meta_key) { - Some(meta) => meta::decode(&*meta), - None => Ok(Metadata { - finalized: None, - unfinalized: Vec::new(), - }) - } - } - - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { - match self.db.get(self.columns.cache, &self.encode_block_id(at)) { - Some(entry) => StorageEntry::::decode(&mut &entry[..]) - .map_err(|_| ClientError::Backend("Failed to decode cache entry".into())) - .map(Some), - None => Ok(None), - } - } + fn read_id(&self, at: NumberFor) -> ClientResult> { + utils::read_header::( + &*self.db, + self.columns.key_lookup, + self.columns.header, + BlockId::Number(at), + ) + .map(|maybe_header| maybe_header.map(|header| header.hash())) + } + + fn read_header(&self, at: &Block::Hash) -> ClientResult> { + utils::read_header::( + &*self.db, + self.columns.key_lookup, + self.columns.header, + BlockId::Hash(*at), + ) + } + + fn read_meta(&self) -> ClientResult> { + match self.db.get(self.columns.meta, &self.meta_key) { + Some(meta) => meta::decode(&*meta), + None => Ok(Metadata { + finalized: None, + unfinalized: Vec::new(), + }), + } + } + + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>> { + match self.db.get(self.columns.cache, &self.encode_block_id(at)) { + Some(entry) => StorageEntry::::decode(&mut &entry[..]) + .map_err(|_| ClientError::Backend("Failed to decode cache entry".into())) + .map(Some), + None => Ok(None), + } + } } /// Database-backed list cache storage transaction. pub struct DbStorageTransaction<'a> { - storage: &'a DbStorage, - tx: &'a mut Transaction, + storage: &'a DbStorage, + tx: &'a mut Transaction, } impl<'a> DbStorageTransaction<'a> { - /// Create new database transaction. - pub fn new(storage: &'a DbStorage, tx: &'a mut Transaction) -> Self { - DbStorageTransaction { storage, tx } - } + /// Create new database transaction. + pub fn new(storage: &'a DbStorage, tx: &'a mut Transaction) -> Self { + DbStorageTransaction { storage, tx } + } } impl<'a, Block: BlockT, T: CacheItemT> StorageTransaction for DbStorageTransaction<'a> { - fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { - self.tx.set_from_vec(self.storage.columns.cache, &self.storage.encode_block_id(at), entry.encode()); - } - - fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.tx.remove(self.storage.columns.cache, &self.storage.encode_block_id(at)); - } - - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ) { - self.tx.set_from_vec( - self.storage.columns.meta, - &self.storage.meta_key, - meta::encode(best_finalized_entry, unfinalized, operation)); - } + fn insert_storage_entry(&mut self, at: &ComplexBlockId, entry: &StorageEntry) { + self.tx.set_from_vec( + self.storage.columns.cache, + &self.storage.encode_block_id(at), + entry.encode(), + ); + } + + fn remove_storage_entry(&mut self, at: &ComplexBlockId) { + self.tx.remove( + self.storage.columns.cache, + &self.storage.encode_block_id(at), + ); + } + + fn update_meta( + &mut self, + best_finalized_entry: Option<&Entry>, + unfinalized: &[Fork], + operation: &CommitOperation, + ) { + self.tx.set_from_vec( + self.storage.columns.meta, + &self.storage.meta_key, + meta::encode(best_finalized_entry, unfinalized, operation), + ); + } } /// Metadata related functions. mod meta { - use super::*; - - /// Convert cache name into cache metadata key. - pub fn key(name: &[u8]) -> Vec { - let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec(); - key_name.extend_from_slice(name); - key_name - } - - /// Encode cache metadata 'applying' commit operation before encoding. - pub fn encode( - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - op: &CommitOperation - ) -> Vec { - let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from); - let mut unfinalized = unfinalized.iter().map(|fork| &fork.head().valid_from).collect::>(); - - match op { - CommitOperation::AppendNewBlock(_, _) => (), - CommitOperation::AppendNewEntry(index, ref entry) => { - unfinalized[*index] = &entry.valid_from; - }, - CommitOperation::AddNewFork(ref entry) => { - unfinalized.push(&entry.valid_from); - }, - CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => { - if let Some(finalizing_entry) = finalizing_entry.as_ref() { - finalized = Some(&finalizing_entry.valid_from); - } - for fork_index in forks.iter().rev() { - unfinalized.remove(*fork_index); - } - }, - CommitOperation::BlockReverted(ref forks) => { - for (fork_index, updated_fork) in forks.iter().rev() { - match updated_fork { - Some(updated_fork) => unfinalized[*fork_index] = &updated_fork.head().valid_from, - None => { unfinalized.remove(*fork_index); }, - } - } - }, - } - - (finalized, unfinalized).encode() - } - - /// Decode meta information. - pub fn decode(encoded: &[u8]) -> ClientResult> { - let input = &mut &*encoded; - let finalized: Option> = Decode::decode(input) - .map_err(|_| ClientError::from(ClientError::Backend("Error decoding cache meta".into())))?; - let unfinalized: Vec> = Decode::decode(input) - .map_err(|_| ClientError::from(ClientError::Backend("Error decoding cache meta".into())))?; - - Ok(Metadata { finalized, unfinalized }) - } + use super::*; + + /// Convert cache name into cache metadata key. + pub fn key(name: &[u8]) -> Vec { + let mut key_name = meta_keys::CACHE_META_PREFIX.to_vec(); + key_name.extend_from_slice(name); + key_name + } + + /// Encode cache metadata 'applying' commit operation before encoding. + pub fn encode( + best_finalized_entry: Option<&Entry>, + unfinalized: &[Fork], + op: &CommitOperation, + ) -> Vec { + let mut finalized = best_finalized_entry.as_ref().map(|entry| &entry.valid_from); + let mut unfinalized = unfinalized + .iter() + .map(|fork| &fork.head().valid_from) + .collect::>(); + + match op { + CommitOperation::AppendNewBlock(_, _) => (), + CommitOperation::AppendNewEntry(index, ref entry) => { + unfinalized[*index] = &entry.valid_from; + } + CommitOperation::AddNewFork(ref entry) => { + unfinalized.push(&entry.valid_from); + } + CommitOperation::BlockFinalized(_, ref finalizing_entry, ref forks) => { + if let Some(finalizing_entry) = finalizing_entry.as_ref() { + finalized = Some(&finalizing_entry.valid_from); + } + for fork_index in forks.iter().rev() { + unfinalized.remove(*fork_index); + } + } + CommitOperation::BlockReverted(ref forks) => { + for (fork_index, updated_fork) in forks.iter().rev() { + match updated_fork { + Some(updated_fork) => { + unfinalized[*fork_index] = &updated_fork.head().valid_from + } + None => { + unfinalized.remove(*fork_index); + } + } + } + } + } + + (finalized, unfinalized).encode() + } + + /// Decode meta information. + pub fn decode(encoded: &[u8]) -> ClientResult> { + let input = &mut &*encoded; + let finalized: Option> = Decode::decode(input).map_err(|_| { + ClientError::from(ClientError::Backend("Error decoding cache meta".into())) + })?; + let unfinalized: Vec> = Decode::decode(input).map_err(|_| { + ClientError::from(ClientError::Backend("Error decoding cache meta".into())) + })?; + + Ok(Metadata { + finalized, + unfinalized, + }) + } } #[cfg(test)] pub mod tests { - use std::collections::{HashMap, HashSet}; - use super::*; - - pub struct FaultyStorage; - - impl Storage for FaultyStorage { - fn read_id(&self, _at: NumberFor) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_header(&self, _at: &Block::Hash) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_meta(&self) -> ClientResult> { - Err(ClientError::Backend("TestError".into())) - } - - fn read_entry(&self, _at: &ComplexBlockId) -> ClientResult>> { - Err(ClientError::Backend("TestError".into())) - } - } - - pub struct DummyStorage { - meta: Metadata, - ids: HashMap, Block::Hash>, - headers: HashMap, - entries: HashMap>, - } - - impl DummyStorage { - pub fn new() -> Self { - DummyStorage { - meta: Metadata { - finalized: None, - unfinalized: Vec::new(), - }, - ids: HashMap::new(), - headers: HashMap::new(), - entries: HashMap::new(), - } - } - - pub fn with_meta(mut self, finalized: Option>, unfinalized: Vec>) -> Self { - self.meta.finalized = finalized; - self.meta.unfinalized = unfinalized; - self - } - - pub fn with_id(mut self, at: NumberFor, id: Block::Hash) -> Self { - self.ids.insert(at, id); - self - } - - pub fn with_header(mut self, header: Block::Header) -> Self { - self.headers.insert(header.hash(), header); - self - } - - pub fn with_entry(mut self, at: ComplexBlockId, entry: StorageEntry) -> Self { - self.entries.insert(at.hash, entry); - self - } - } - - impl Storage for DummyStorage { - fn read_id(&self, at: NumberFor) -> ClientResult> { - Ok(self.ids.get(&at).cloned()) - } - - fn read_header(&self, at: &Block::Hash) -> ClientResult> { - Ok(self.headers.get(&at).cloned()) - } - - fn read_meta(&self) -> ClientResult> { - Ok(self.meta.clone()) - } - - fn read_entry(&self, at: &ComplexBlockId) -> ClientResult>> { - Ok(self.entries.get(&at.hash).cloned()) - } - } - - pub struct DummyTransaction { - updated_meta: Option>, - inserted_entries: HashSet, - removed_entries: HashSet, - } - - impl DummyTransaction { - pub fn new() -> Self { - DummyTransaction { - updated_meta: None, - inserted_entries: HashSet::new(), - removed_entries: HashSet::new(), - } - } - - pub fn inserted_entries(&self) -> &HashSet { - &self.inserted_entries - } - - pub fn removed_entries(&self) -> &HashSet { - &self.removed_entries - } - - pub fn updated_meta(&self) -> &Option> { - &self.updated_meta - } - } - - impl StorageTransaction for DummyTransaction { - fn insert_storage_entry(&mut self, at: &ComplexBlockId, _entry: &StorageEntry) { - self.inserted_entries.insert(at.hash); - } - - fn remove_storage_entry(&mut self, at: &ComplexBlockId) { - self.removed_entries.insert(at.hash); - } - - fn update_meta( - &mut self, - best_finalized_entry: Option<&Entry>, - unfinalized: &[Fork], - operation: &CommitOperation, - ) { - self.updated_meta = Some(meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap()); - } - } + use super::*; + use std::collections::{HashMap, HashSet}; + + pub struct FaultyStorage; + + impl Storage for FaultyStorage { + fn read_id(&self, _at: NumberFor) -> ClientResult> { + Err(ClientError::Backend("TestError".into())) + } + + fn read_header(&self, _at: &Block::Hash) -> ClientResult> { + Err(ClientError::Backend("TestError".into())) + } + + fn read_meta(&self) -> ClientResult> { + Err(ClientError::Backend("TestError".into())) + } + + fn read_entry( + &self, + _at: &ComplexBlockId, + ) -> ClientResult>> { + Err(ClientError::Backend("TestError".into())) + } + } + + pub struct DummyStorage { + meta: Metadata, + ids: HashMap, Block::Hash>, + headers: HashMap, + entries: HashMap>, + } + + impl DummyStorage { + pub fn new() -> Self { + DummyStorage { + meta: Metadata { + finalized: None, + unfinalized: Vec::new(), + }, + ids: HashMap::new(), + headers: HashMap::new(), + entries: HashMap::new(), + } + } + + pub fn with_meta( + mut self, + finalized: Option>, + unfinalized: Vec>, + ) -> Self { + self.meta.finalized = finalized; + self.meta.unfinalized = unfinalized; + self + } + + pub fn with_id(mut self, at: NumberFor, id: Block::Hash) -> Self { + self.ids.insert(at, id); + self + } + + pub fn with_header(mut self, header: Block::Header) -> Self { + self.headers.insert(header.hash(), header); + self + } + + pub fn with_entry( + mut self, + at: ComplexBlockId, + entry: StorageEntry, + ) -> Self { + self.entries.insert(at.hash, entry); + self + } + } + + impl Storage for DummyStorage { + fn read_id(&self, at: NumberFor) -> ClientResult> { + Ok(self.ids.get(&at).cloned()) + } + + fn read_header(&self, at: &Block::Hash) -> ClientResult> { + Ok(self.headers.get(&at).cloned()) + } + + fn read_meta(&self) -> ClientResult> { + Ok(self.meta.clone()) + } + + fn read_entry( + &self, + at: &ComplexBlockId, + ) -> ClientResult>> { + Ok(self.entries.get(&at.hash).cloned()) + } + } + + pub struct DummyTransaction { + updated_meta: Option>, + inserted_entries: HashSet, + removed_entries: HashSet, + } + + impl DummyTransaction { + pub fn new() -> Self { + DummyTransaction { + updated_meta: None, + inserted_entries: HashSet::new(), + removed_entries: HashSet::new(), + } + } + + pub fn inserted_entries(&self) -> &HashSet { + &self.inserted_entries + } + + pub fn removed_entries(&self) -> &HashSet { + &self.removed_entries + } + + pub fn updated_meta(&self) -> &Option> { + &self.updated_meta + } + } + + impl StorageTransaction for DummyTransaction { + fn insert_storage_entry( + &mut self, + at: &ComplexBlockId, + _entry: &StorageEntry, + ) { + self.inserted_entries.insert(at.hash); + } + + fn remove_storage_entry(&mut self, at: &ComplexBlockId) { + self.removed_entries.insert(at.hash); + } + + fn update_meta( + &mut self, + best_finalized_entry: Option<&Entry>, + unfinalized: &[Fork], + operation: &CommitOperation, + ) { + self.updated_meta = Some( + meta::decode(&meta::encode(best_finalized_entry, unfinalized, operation)).unwrap(), + ); + } + } } diff --git a/client/db/src/cache/mod.rs b/client/db/src/cache/mod.rs index 8d3e1f358b..f411d3b899 100644 --- a/client/db/src/cache/mod.rs +++ b/client/db/src/cache/mod.rs @@ -16,17 +16,23 @@ //! DB-backed cache of blockchain data. -use std::{sync::Arc, collections::{HashMap, hash_map::Entry}}; use parking_lot::RwLock; +use std::{ + collections::{hash_map::Entry, HashMap}, + sync::Arc, +}; -use sc_client_api::blockchain::{well_known_cache_keys::{self, Id as CacheKeyId}, Cache as BlockchainCache}; +use crate::utils::{self, COLUMN_META}; +use crate::DbHash; +use codec::{Decode, Encode}; +use sc_client_api::blockchain::{ + well_known_cache_keys::{self, Id as CacheKeyId}, + Cache as BlockchainCache, +}; use sp_blockchain::Result as ClientResult; use sp_database::{Database, Transaction}; -use codec::{Encode, Decode}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; -use crate::utils::{self, COLUMN_META}; -use crate::DbHash; use self::list_cache::{ListCache, PruningStrategy}; @@ -40,34 +46,34 @@ const PRUNE_DEPTH: u32 = 1024; /// The type of entry that is inserted to the cache. #[derive(Clone, Copy, Debug, PartialEq)] pub enum EntryType { - /// Non-final entry. - NonFinal, - /// Final entry. - Final, - /// Genesis entry (inserted during cache initialization). - Genesis, + /// Non-final entry. + NonFinal, + /// Final entry. + Final, + /// Genesis entry (inserted during cache initialization). + Genesis, } /// Block identifier that holds both hash and number. #[derive(Clone, Debug, Encode, Decode, PartialEq)] pub struct ComplexBlockId { - /// Hash of the block. - pub(crate) hash: Block::Hash, - /// Number of the block. - pub(crate) number: NumberFor, + /// Hash of the block. + pub(crate) hash: Block::Hash, + /// Number of the block. + pub(crate) number: NumberFor, } impl ComplexBlockId { - /// Create new complex block id. - pub fn new(hash: Block::Hash, number: NumberFor) -> Self { - ComplexBlockId { hash, number } - } + /// Create new complex block id. + pub fn new(hash: Block::Hash, number: NumberFor) -> Self { + ComplexBlockId { hash, number } + } } impl ::std::cmp::PartialOrd for ComplexBlockId { - fn partial_cmp(&self, other: &ComplexBlockId) -> Option<::std::cmp::Ordering> { - self.number.partial_cmp(&other.number) - } + fn partial_cmp(&self, other: &ComplexBlockId) -> Option<::std::cmp::Ordering> { + self.number.partial_cmp(&other.number) + } } /// All cache items must implement this trait. @@ -77,321 +83,333 @@ impl CacheItemT for T where T: Clone + Decode + Encode + PartialEq {} /// Database-backed blockchain data cache. pub struct DbCache { - cache_at: HashMap, self::list_storage::DbStorage>>, - db: Arc>, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - genesis_hash: Block::Hash, - best_finalized_block: ComplexBlockId, + cache_at: HashMap, self::list_storage::DbStorage>>, + db: Arc>, + key_lookup_column: u32, + header_column: u32, + cache_column: u32, + genesis_hash: Block::Hash, + best_finalized_block: ComplexBlockId, } impl DbCache { - /// Create new cache. - pub fn new( - db: Arc>, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - genesis_hash: Block::Hash, - best_finalized_block: ComplexBlockId, - ) -> Self { - Self { - cache_at: HashMap::new(), - db, - key_lookup_column, - header_column, - cache_column, - genesis_hash, - best_finalized_block, - } - } - - /// Set genesis block hash. - pub fn set_genesis_hash(&mut self, genesis_hash: Block::Hash) { - self.genesis_hash = genesis_hash; - } - - /// Begin cache transaction. - pub fn transaction<'a>(&'a mut self, tx: &'a mut Transaction) -> DbCacheTransaction<'a, Block> { - DbCacheTransaction { - cache: self, - tx, - cache_at_ops: HashMap::new(), - best_finalized_block: None, - } - } - - /// Begin cache transaction with given ops. - pub fn transaction_with_ops<'a>( - &'a mut self, - tx: &'a mut Transaction, - ops: DbCacheTransactionOps, - ) -> DbCacheTransaction<'a, Block> { - DbCacheTransaction { - cache: self, - tx, - cache_at_ops: ops.cache_at_ops, - best_finalized_block: ops.best_finalized_block, - } - } - - /// Run post-commit cache operations. - pub fn commit(&mut self, ops: DbCacheTransactionOps) -> ClientResult<()> { - for (name, ops) in ops.cache_at_ops.into_iter() { - self.get_cache(name)?.on_transaction_commit(ops); - } - if let Some(best_finalized_block) = ops.best_finalized_block { - self.best_finalized_block = best_finalized_block; - } - Ok(()) - } - - /// Creates `ListCache` with the given name or returns a reference to the existing. - pub(crate) fn get_cache( - &mut self, - name: CacheKeyId, - ) -> ClientResult<&mut ListCache, self::list_storage::DbStorage>> { - get_cache_helper( - &mut self.cache_at, - name, - &self.db, - self.key_lookup_column, - self.header_column, - self.cache_column, - &self.best_finalized_block - ) - } + /// Create new cache. + pub fn new( + db: Arc>, + key_lookup_column: u32, + header_column: u32, + cache_column: u32, + genesis_hash: Block::Hash, + best_finalized_block: ComplexBlockId, + ) -> Self { + Self { + cache_at: HashMap::new(), + db, + key_lookup_column, + header_column, + cache_column, + genesis_hash, + best_finalized_block, + } + } + + /// Set genesis block hash. + pub fn set_genesis_hash(&mut self, genesis_hash: Block::Hash) { + self.genesis_hash = genesis_hash; + } + + /// Begin cache transaction. + pub fn transaction<'a>( + &'a mut self, + tx: &'a mut Transaction, + ) -> DbCacheTransaction<'a, Block> { + DbCacheTransaction { + cache: self, + tx, + cache_at_ops: HashMap::new(), + best_finalized_block: None, + } + } + + /// Begin cache transaction with given ops. + pub fn transaction_with_ops<'a>( + &'a mut self, + tx: &'a mut Transaction, + ops: DbCacheTransactionOps, + ) -> DbCacheTransaction<'a, Block> { + DbCacheTransaction { + cache: self, + tx, + cache_at_ops: ops.cache_at_ops, + best_finalized_block: ops.best_finalized_block, + } + } + + /// Run post-commit cache operations. + pub fn commit(&mut self, ops: DbCacheTransactionOps) -> ClientResult<()> { + for (name, ops) in ops.cache_at_ops.into_iter() { + self.get_cache(name)?.on_transaction_commit(ops); + } + if let Some(best_finalized_block) = ops.best_finalized_block { + self.best_finalized_block = best_finalized_block; + } + Ok(()) + } + + /// Creates `ListCache` with the given name or returns a reference to the existing. + pub(crate) fn get_cache( + &mut self, + name: CacheKeyId, + ) -> ClientResult<&mut ListCache, self::list_storage::DbStorage>> { + get_cache_helper( + &mut self.cache_at, + name, + &self.db, + self.key_lookup_column, + self.header_column, + self.cache_column, + &self.best_finalized_block, + ) + } } // This helper is needed because otherwise the borrow checker will require to // clone all parameters outside of the closure. fn get_cache_helper<'a, Block: BlockT>( - cache_at: &'a mut HashMap, self::list_storage::DbStorage>>, - name: CacheKeyId, - db: &Arc>, - key_lookup: u32, - header: u32, - cache: u32, - best_finalized_block: &ComplexBlockId, + cache_at: &'a mut HashMap, self::list_storage::DbStorage>>, + name: CacheKeyId, + db: &Arc>, + key_lookup: u32, + header: u32, + cache: u32, + best_finalized_block: &ComplexBlockId, ) -> ClientResult<&'a mut ListCache, self::list_storage::DbStorage>> { - match cache_at.entry(name) { - Entry::Occupied(entry) => Ok(entry.into_mut()), - Entry::Vacant(entry) => { - let cache = ListCache::new( - self::list_storage::DbStorage::new(name.to_vec(), db.clone(), - self::list_storage::DbColumns { - meta: COLUMN_META, - key_lookup, - header, - cache, - }, - ), - cache_pruning_strategy(name), - best_finalized_block.clone(), - )?; - Ok(entry.insert(cache)) - } - } + match cache_at.entry(name) { + Entry::Occupied(entry) => Ok(entry.into_mut()), + Entry::Vacant(entry) => { + let cache = ListCache::new( + self::list_storage::DbStorage::new( + name.to_vec(), + db.clone(), + self::list_storage::DbColumns { + meta: COLUMN_META, + key_lookup, + header, + cache, + }, + ), + cache_pruning_strategy(name), + best_finalized_block.clone(), + )?; + Ok(entry.insert(cache)) + } + } } /// Cache operations that are to be committed after database transaction is committed. #[derive(Default)] pub struct DbCacheTransactionOps { - cache_at_ops: HashMap>>, - best_finalized_block: Option>, + cache_at_ops: HashMap>>, + best_finalized_block: Option>, } impl DbCacheTransactionOps { - /// Empty transaction ops. - pub fn empty() -> DbCacheTransactionOps { - DbCacheTransactionOps { - cache_at_ops: HashMap::new(), - best_finalized_block: None, - } - } + /// Empty transaction ops. + pub fn empty() -> DbCacheTransactionOps { + DbCacheTransactionOps { + cache_at_ops: HashMap::new(), + best_finalized_block: None, + } + } } /// Database-backed blockchain data cache transaction valid for single block import. pub struct DbCacheTransaction<'a, Block: BlockT> { - cache: &'a mut DbCache, - tx: &'a mut Transaction, - cache_at_ops: HashMap>>, - best_finalized_block: Option>, + cache: &'a mut DbCache, + tx: &'a mut Transaction, + cache_at_ops: HashMap>>, + best_finalized_block: Option>, } impl<'a, Block: BlockT> DbCacheTransaction<'a, Block> { - /// Convert transaction into post-commit operations set. - pub fn into_ops(self) -> DbCacheTransactionOps { - DbCacheTransactionOps { - cache_at_ops: self.cache_at_ops, - best_finalized_block: self.best_finalized_block, - } - } - - /// When new block is inserted into database. - pub fn on_block_insert( - mut self, - parent: ComplexBlockId, - block: ComplexBlockId, - data_at: HashMap>, - entry_type: EntryType, - ) -> ClientResult { - // prepare list of caches that are not update - // (we might still need to do some cache maintenance in this case) - let missed_caches = self.cache.cache_at.keys() - .filter(|cache| !data_at.contains_key(*cache)) - .cloned() - .collect::>(); - - let mut insert_op = |name: CacheKeyId, value: Option>| -> Result<(), sp_blockchain::Error> { - let cache = self.cache.get_cache(name)?; - let cache_ops = self.cache_at_ops.entry(name).or_default(); - cache.on_block_insert( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx, - ), - parent.clone(), - block.clone(), - value, - entry_type, - cache_ops, - )?; - - Ok(()) - }; - - data_at.into_iter().try_for_each(|(name, data)| insert_op(name, Some(data)))?; - missed_caches.into_iter().try_for_each(|name| insert_op(name, None))?; - - match entry_type { - EntryType::Final | EntryType::Genesis => - self.best_finalized_block = Some(block), - EntryType::NonFinal => (), - } - - Ok(self) - } - - /// When previously inserted block is finalized. - pub fn on_block_finalize( - mut self, - parent: ComplexBlockId, - block: ComplexBlockId, - ) -> ClientResult { - for (name, cache) in self.cache.cache_at.iter() { - let cache_ops = self.cache_at_ops.entry(*name).or_default(); - cache.on_block_finalize( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx - ), - parent.clone(), - block.clone(), - cache_ops, - )?; - } - - self.best_finalized_block = Some(block); - - Ok(self) - } - - /// When block is reverted. - pub fn on_block_revert( - mut self, - reverted_block: &ComplexBlockId, - ) -> ClientResult { - for (name, cache) in self.cache.cache_at.iter() { - let cache_ops = self.cache_at_ops.entry(*name).or_default(); - cache.on_block_revert( - &mut self::list_storage::DbStorageTransaction::new( - cache.storage(), - &mut self.tx - ), - reverted_block, - cache_ops, - )?; - } - - Ok(self) - } + /// Convert transaction into post-commit operations set. + pub fn into_ops(self) -> DbCacheTransactionOps { + DbCacheTransactionOps { + cache_at_ops: self.cache_at_ops, + best_finalized_block: self.best_finalized_block, + } + } + + /// When new block is inserted into database. + pub fn on_block_insert( + mut self, + parent: ComplexBlockId, + block: ComplexBlockId, + data_at: HashMap>, + entry_type: EntryType, + ) -> ClientResult { + // prepare list of caches that are not update + // (we might still need to do some cache maintenance in this case) + let missed_caches = self + .cache + .cache_at + .keys() + .filter(|cache| !data_at.contains_key(*cache)) + .cloned() + .collect::>(); + + let mut insert_op = |name: CacheKeyId, + value: Option>| + -> Result<(), sp_blockchain::Error> { + let cache = self.cache.get_cache(name)?; + let cache_ops = self.cache_at_ops.entry(name).or_default(); + cache.on_block_insert( + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), + parent.clone(), + block.clone(), + value, + entry_type, + cache_ops, + )?; + + Ok(()) + }; + + data_at + .into_iter() + .try_for_each(|(name, data)| insert_op(name, Some(data)))?; + missed_caches + .into_iter() + .try_for_each(|name| insert_op(name, None))?; + + match entry_type { + EntryType::Final | EntryType::Genesis => self.best_finalized_block = Some(block), + EntryType::NonFinal => (), + } + + Ok(self) + } + + /// When previously inserted block is finalized. + pub fn on_block_finalize( + mut self, + parent: ComplexBlockId, + block: ComplexBlockId, + ) -> ClientResult { + for (name, cache) in self.cache.cache_at.iter() { + let cache_ops = self.cache_at_ops.entry(*name).or_default(); + cache.on_block_finalize( + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), + parent.clone(), + block.clone(), + cache_ops, + )?; + } + + self.best_finalized_block = Some(block); + + Ok(self) + } + + /// When block is reverted. + pub fn on_block_revert(mut self, reverted_block: &ComplexBlockId) -> ClientResult { + for (name, cache) in self.cache.cache_at.iter() { + let cache_ops = self.cache_at_ops.entry(*name).or_default(); + cache.on_block_revert( + &mut self::list_storage::DbStorageTransaction::new(cache.storage(), &mut self.tx), + reverted_block, + cache_ops, + )?; + } + + Ok(self) + } } /// Synchronous implementation of database-backed blockchain data cache. pub struct DbCacheSync(pub RwLock>); impl BlockchainCache for DbCacheSync { - fn initialize(&self, key: &CacheKeyId, data: Vec) -> ClientResult<()> { - let mut cache = self.0.write(); - let genesis_hash = cache.genesis_hash; - let cache_contents = vec![(*key, data)].into_iter().collect(); - let db = cache.db.clone(); - let mut dbtx = Transaction::new(); - let tx = cache.transaction(&mut dbtx); - let tx = tx.on_block_insert( - ComplexBlockId::new(Default::default(), Zero::zero()), - ComplexBlockId::new(genesis_hash, Zero::zero()), - cache_contents, - EntryType::Genesis, - )?; - let tx_ops = tx.into_ops(); - db.commit(dbtx); - cache.commit(tx_ops)?; - Ok(()) - } - - fn get_at( - &self, - key: &CacheKeyId, - at: &BlockId, - ) -> ClientResult, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>> { - let mut cache = self.0.write(); - let cache = cache.get_cache(*key)?; - let storage = cache.storage(); - let db = storage.db(); - let columns = storage.columns(); - let at = match *at { - BlockId::Hash(hash) => { - let header = utils::require_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Hash(hash.clone()))?; - ComplexBlockId::new(hash, *header.number()) - }, - BlockId::Number(number) => { - let hash = utils::require_header::( - &**db, - columns.key_lookup, - columns.header, - BlockId::Number(number.clone()))?.hash(); - ComplexBlockId::new(hash, number) - }, - }; - - cache.value_at_block(&at) - .map(|block_and_value| block_and_value.map(|(begin_block, end_block, value)| - ( - (begin_block.number, begin_block.hash), - end_block.map(|end_block| (end_block.number, end_block.hash)), - value, - ))) - } + fn initialize(&self, key: &CacheKeyId, data: Vec) -> ClientResult<()> { + let mut cache = self.0.write(); + let genesis_hash = cache.genesis_hash; + let cache_contents = vec![(*key, data)].into_iter().collect(); + let db = cache.db.clone(); + let mut dbtx = Transaction::new(); + let tx = cache.transaction(&mut dbtx); + let tx = tx.on_block_insert( + ComplexBlockId::new(Default::default(), Zero::zero()), + ComplexBlockId::new(genesis_hash, Zero::zero()), + cache_contents, + EntryType::Genesis, + )?; + let tx_ops = tx.into_ops(); + db.commit(dbtx); + cache.commit(tx_ops)?; + Ok(()) + } + + fn get_at( + &self, + key: &CacheKeyId, + at: &BlockId, + ) -> ClientResult< + Option<( + (NumberFor, Block::Hash), + Option<(NumberFor, Block::Hash)>, + Vec, + )>, + > { + let mut cache = self.0.write(); + let cache = cache.get_cache(*key)?; + let storage = cache.storage(); + let db = storage.db(); + let columns = storage.columns(); + let at = match *at { + BlockId::Hash(hash) => { + let header = utils::require_header::( + &**db, + columns.key_lookup, + columns.header, + BlockId::Hash(hash.clone()), + )?; + ComplexBlockId::new(hash, *header.number()) + } + BlockId::Number(number) => { + let hash = utils::require_header::( + &**db, + columns.key_lookup, + columns.header, + BlockId::Number(number.clone()), + )? + .hash(); + ComplexBlockId::new(hash, number) + } + }; + + cache.value_at_block(&at).map(|block_and_value| { + block_and_value.map(|(begin_block, end_block, value)| { + ( + (begin_block.number, begin_block.hash), + end_block.map(|end_block| (end_block.number, end_block.hash)), + value, + ) + }) + }) + } } /// Get pruning strategy for given cache. fn cache_pruning_strategy>(cache: CacheKeyId) -> PruningStrategy { - // the cache is mostly used to store data from consensus engines - // this kind of data is only required for non-finalized blocks - // => by default we prune finalized cached entries - - match cache { - // we need to keep changes tries configurations forever (or at least until changes tries, - // that were built using this configuration, are pruned) to make it possible to refer - // to old changes tries - well_known_cache_keys::CHANGES_TRIE_CONFIG => PruningStrategy::NeverPrune, - _ => PruningStrategy::ByDepth(PRUNE_DEPTH.into()), - } + // the cache is mostly used to store data from consensus engines + // this kind of data is only required for non-finalized blocks + // => by default we prune finalized cached entries + + match cache { + // we need to keep changes tries configurations forever (or at least until changes tries, + // that were built using this configuration, are pruned) to make it possible to refer + // to old changes tries + well_known_cache_keys::CHANGES_TRIE_CONFIG => PruningStrategy::NeverPrune, + _ => PruningStrategy::ByDepth(PRUNE_DEPTH.into()), + } } diff --git a/client/db/src/changes_tries_storage.rs b/client/db/src/changes_tries_storage.rs index 5447e8b725..831cd0bca3 100644 --- a/client/db/src/changes_tries_storage.rs +++ b/client/db/src/changes_tries_storage.rs @@ -16,60 +16,62 @@ //! DB-backed changes tries storage. -use std::collections::{HashMap, HashSet}; -use std::sync::Arc; -use hash_db::Prefix; +use crate::cache::{ + ComplexBlockId, DbCache, DbCacheSync, DbCacheTransactionOps, EntryType as CacheEntryType, +}; +use crate::utils::{self, meta_keys, Meta}; +use crate::{Database, DbHash}; use codec::{Decode, Encode}; +use hash_db::Prefix; use parking_lot::RwLock; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sp_trie::MemoryDB; use sc_client_api::backend::PrunableStateChangesTrieStorage; use sp_blockchain::{well_known_cache_keys, Cache as BlockchainCache}; -use sp_core::{ChangesTrieConfiguration, ChangesTrieConfigurationRange, convert_hash}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_core::{convert_hash, ChangesTrieConfiguration, ChangesTrieConfigurationRange}; use sp_database::Transaction; +use sp_runtime::generic::{BlockId, ChangesTrieSignal, DigestItem}; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, HashFor, NumberFor, One, Zero, CheckedSub, + Block as BlockT, CheckedSub, HashFor, Header as HeaderT, NumberFor, One, Zero, }; -use sp_runtime::generic::{BlockId, DigestItem, ChangesTrieSignal}; use sp_state_machine::{ChangesTrieBuildCache, ChangesTrieCacheAction}; -use crate::{Database, DbHash}; -use crate::utils::{self, Meta, meta_keys}; -use crate::cache::{ - DbCacheSync, DbCache, DbCacheTransactionOps, - ComplexBlockId, EntryType as CacheEntryType, -}; +use sp_trie::MemoryDB; +use std::collections::{HashMap, HashSet}; +use std::sync::Arc; /// Extract new changes trie configuration (if available) from the header. -pub fn extract_new_configuration(header: &Header) -> Option<&Option> { - header.digest() - .log(DigestItem::as_changes_trie_signal) - .and_then(ChangesTrieSignal::as_new_configuration) +pub fn extract_new_configuration( + header: &Header, +) -> Option<&Option> { + header + .digest() + .log(DigestItem::as_changes_trie_signal) + .and_then(ChangesTrieSignal::as_new_configuration) } /// Opaque configuration cache transaction. During its lifetime, no-one should modify cache. This is currently /// guaranteed because import lock is held during block import/finalization. pub struct DbChangesTrieStorageTransaction { - /// Cache operations that must be performed after db transaction is committed. - cache_ops: DbCacheTransactionOps, - /// New configuration (if changed at current block). - new_config: Option>, + /// Cache operations that must be performed after db transaction is committed. + cache_ops: DbCacheTransactionOps, + /// New configuration (if changed at current block). + new_config: Option>, } impl DbChangesTrieStorageTransaction { - /// Consume self and return transaction with given new configuration. - pub fn with_new_config(mut self, new_config: Option>) -> Self { - self.new_config = new_config; - self - } + /// Consume self and return transaction with given new configuration. + pub fn with_new_config(mut self, new_config: Option>) -> Self { + self.new_config = new_config; + self + } } impl From> for DbChangesTrieStorageTransaction { - fn from(cache_ops: DbCacheTransactionOps) -> Self { - DbChangesTrieStorageTransaction { - cache_ops, - new_config: None, - } - } + fn from(cache_ops: DbCacheTransactionOps) -> Self { + DbChangesTrieStorageTransaction { + cache_ops, + new_config: None, + } + } } /// Changes tries storage. @@ -77,939 +79,1249 @@ impl From> for DbChangesTrieStorageT /// Stores all tries in separate DB column. /// Lock order: meta, tries_meta, cache, build_cache. pub struct DbChangesTrieStorage { - db: Arc>, - meta_column: u32, - changes_tries_column: u32, - key_lookup_column: u32, - header_column: u32, - meta: Arc, Block::Hash>>>, - tries_meta: RwLock>, - min_blocks_to_keep: Option, - /// The cache stores all ever existing changes tries configurations. - cache: DbCacheSync, - /// Build cache is a map of block => set of storage keys changed at this block. - /// They're used to build digest blocks - instead of reading+parsing tries from db - /// we just use keys sets from the cache. - build_cache: RwLock>>, + db: Arc>, + meta_column: u32, + changes_tries_column: u32, + key_lookup_column: u32, + header_column: u32, + meta: Arc, Block::Hash>>>, + tries_meta: RwLock>, + min_blocks_to_keep: Option, + /// The cache stores all ever existing changes tries configurations. + cache: DbCacheSync, + /// Build cache is a map of block => set of storage keys changed at this block. + /// They're used to build digest blocks - instead of reading+parsing tries from db + /// we just use keys sets from the cache. + build_cache: RwLock>>, } /// Persistent struct that contains all the changes tries metadata. #[derive(Decode, Encode, Debug)] struct ChangesTriesMeta { - /// Oldest unpruned max-level (or skewed) digest trie blocks range. - /// The range is inclusive from both sides. - /// Is None only if: - /// 1) we haven't yet finalized any blocks (except genesis) - /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are disabled - /// 3) changes tries pruning is disabled - pub oldest_digest_range: Option<(NumberFor, NumberFor)>, - /// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. - /// It is guaranteed that we have no any changes tries before (and including) this block. - /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). - pub oldest_pruned_digest_range_end: NumberFor, + /// Oldest unpruned max-level (or skewed) digest trie blocks range. + /// The range is inclusive from both sides. + /// Is None only if: + /// 1) we haven't yet finalized any blocks (except genesis) + /// 2) if best_finalized_block - min_blocks_to_keep points to the range where changes tries are disabled + /// 3) changes tries pruning is disabled + pub oldest_digest_range: Option<(NumberFor, NumberFor)>, + /// End block (inclusive) of oldest pruned max-level (or skewed) digest trie blocks range. + /// It is guaranteed that we have no any changes tries before (and including) this block. + /// It is guaranteed that all existing changes tries after this block are not yet pruned (if created). + pub oldest_pruned_digest_range_end: NumberFor, } impl DbChangesTrieStorage { - /// Create new changes trie storage. - pub fn new( - db: Arc>, - meta_column: u32, - changes_tries_column: u32, - key_lookup_column: u32, - header_column: u32, - cache_column: u32, - meta: Arc, Block::Hash>>>, - min_blocks_to_keep: Option, - ) -> ClientResult { - let (finalized_hash, finalized_number, genesis_hash) = { - let meta = meta.read(); - (meta.finalized_hash, meta.finalized_number, meta.genesis_hash) - }; - let tries_meta = read_tries_meta(&*db, meta_column)?; - Ok(Self { - db: db.clone(), - meta_column, - changes_tries_column, - key_lookup_column, - header_column, - meta, - min_blocks_to_keep, - cache: DbCacheSync(RwLock::new(DbCache::new( - db.clone(), - key_lookup_column, - header_column, - cache_column, - genesis_hash, - ComplexBlockId::new(finalized_hash, finalized_number), - ))), - build_cache: RwLock::new(ChangesTrieBuildCache::new()), - tries_meta: RwLock::new(tries_meta), - }) - } - - /// Commit new changes trie. - pub fn commit( - &self, - tx: &mut Transaction, - mut changes_trie: MemoryDB>, - parent_block: ComplexBlockId, - block: ComplexBlockId, - new_header: &Block::Header, - finalized: bool, - new_configuration: Option>, - cache_tx: Option>, - ) -> ClientResult> { - // insert changes trie, associated with block, into DB - for (key, (val, _)) in changes_trie.drain() { - tx.set(self.changes_tries_column, key.as_ref(), &val); - } - - // if configuration has not been changed AND block is not finalized => nothing to do here - let new_configuration = match new_configuration { - Some(new_configuration) => new_configuration, - None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), - None => return self.finalize( - tx, - parent_block.hash, - block.hash, - block.number, - Some(new_header), - cache_tx, - ), - }; - - // update configuration cache - let mut cache_at = HashMap::new(); - cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - Ok(DbChangesTrieStorageTransaction::from(match cache_tx { - Some(cache_tx) => self.cache.0.write() - .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(), - None => self.cache.0.write() - .transaction(tx) - .on_block_insert( - parent_block, - block, - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(), - }).with_new_config(Some(new_configuration))) - } - - /// Called when block is finalized. - pub fn finalize( - &self, - tx: &mut Transaction, - parent_block_hash: Block::Hash, - block_hash: Block::Hash, - block_num: NumberFor, - new_header: Option<&Block::Header>, - cache_tx: Option>, - ) -> ClientResult> { - // prune obsolete changes tries - self.prune(tx, block_hash, block_num, new_header.clone(), cache_tx.as_ref())?; - - // if we have inserted the block that we're finalizing in the same transaction - // => then we have already finalized it from the commit() call - if cache_tx.is_some() { - if let Some(new_header) = new_header { - if new_header.hash() == block_hash { - return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")); - } - } - } - - // and finalize configuration cache entries - let block = ComplexBlockId::new(block_hash, block_num); - let parent_block_num = block_num.checked_sub(&One::one()).unwrap_or_else(|| Zero::zero()); - let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); - Ok(match cache_tx { - Some(cache_tx) => DbChangesTrieStorageTransaction::from( - self.cache.0.write() - .transaction_with_ops(tx, cache_tx.cache_ops) - .on_block_finalize( - parent_block, - block, - )? - .into_ops() - ).with_new_config(cache_tx.new_config), - None => DbChangesTrieStorageTransaction::from( - self.cache.0.write() - .transaction(tx) - .on_block_finalize( - parent_block, - block, - )? - .into_ops() - ), - }) - } - - /// When block is reverted. - pub fn revert( - &self, - tx: &mut Transaction, - block: &ComplexBlockId, - ) -> ClientResult> { - Ok(self.cache.0.write().transaction(tx) - .on_block_revert(block)? - .into_ops() - .into()) - } - - /// When transaction has been committed. - pub fn post_commit(&self, tx: Option>) { - if let Some(tx) = tx { - self.cache.0.write().commit(tx.cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there is tx; qed"); - } - } - - /// Commit changes into changes trie build cache. - pub fn commit_build_cache(&self, cache_update: ChangesTrieCacheAction>) { - self.build_cache.write().perform(cache_update); - } - - /// Prune obsolete changes tries. - fn prune( - &self, - tx: &mut Transaction, - block_hash: Block::Hash, - block_num: NumberFor, - new_header: Option<&Block::Header>, - cache_tx: Option<&DbChangesTrieStorageTransaction>, - ) -> ClientResult<()> { - // never prune on archive nodes - let min_blocks_to_keep = match self.min_blocks_to_keep { - Some(min_blocks_to_keep) => min_blocks_to_keep, - None => return Ok(()), - }; - - let mut tries_meta = self.tries_meta.write(); - let mut next_digest_range_start = block_num; - loop { - // prune oldest digest if it is known - // it could be unknown if: - // 1) either we're finalizing block#1 - // 2) or we are (or were) in period where changes tries are disabled - if let Some((begin, end)) = tries_meta.oldest_digest_range { - if block_num <= end || block_num - end <= min_blocks_to_keep.into() { - break; - } - - tries_meta.oldest_pruned_digest_range_end = end; - sp_state_machine::prune_changes_tries( - &*self, - begin, - end, - &sp_state_machine::ChangesTrieAnchorBlockId { - hash: convert_hash(&block_hash), - number: block_num, - }, - |node| tx.remove(self.changes_tries_column, node.as_ref()), - ); - - next_digest_range_start = end + One::one(); - } - - // proceed to the next configuration range - let next_digest_range_start_hash = match block_num == next_digest_range_start { - true => block_hash, - false => utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - BlockId::Number(next_digest_range_start), - )?.hash(), - }; - - let config_for_new_block = new_header - .map(|header| *header.number() == next_digest_range_start) - .unwrap_or(false); - let next_config = match cache_tx { - Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { - let config = cache_tx - .new_config - .clone() - .expect("guarded by is_some(); qed"); - ChangesTrieConfigurationRange { - zero: (block_num, block_hash), - end: None, - config, - } - }, - _ if config_for_new_block => { - self.configuration_at(&BlockId::Hash(*new_header.expect( - "config_for_new_block is only true when new_header is passed; qed" - ).parent_hash()))? - }, - _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?, - }; - if let Some(config) = next_config.config { - let mut oldest_digest_range = config - .next_max_level_digest_range(next_config.zero.0, next_digest_range_start) - .unwrap_or_else(|| (next_digest_range_start, next_digest_range_start)); - - if let Some(end) = next_config.end { - if end.0 < oldest_digest_range.1 { - oldest_digest_range.1 = end.0; - } - } - - tries_meta.oldest_digest_range = Some(oldest_digest_range); - continue; - } - - tries_meta.oldest_digest_range = None; - break; - } - - write_tries_meta(tx, self.meta_column, &*tries_meta); - Ok(()) - } + /// Create new changes trie storage. + pub fn new( + db: Arc>, + meta_column: u32, + changes_tries_column: u32, + key_lookup_column: u32, + header_column: u32, + cache_column: u32, + meta: Arc, Block::Hash>>>, + min_blocks_to_keep: Option, + ) -> ClientResult { + let (finalized_hash, finalized_number, genesis_hash) = { + let meta = meta.read(); + ( + meta.finalized_hash, + meta.finalized_number, + meta.genesis_hash, + ) + }; + let tries_meta = read_tries_meta(&*db, meta_column)?; + Ok(Self { + db: db.clone(), + meta_column, + changes_tries_column, + key_lookup_column, + header_column, + meta, + min_blocks_to_keep, + cache: DbCacheSync(RwLock::new(DbCache::new( + db.clone(), + key_lookup_column, + header_column, + cache_column, + genesis_hash, + ComplexBlockId::new(finalized_hash, finalized_number), + ))), + build_cache: RwLock::new(ChangesTrieBuildCache::new()), + tries_meta: RwLock::new(tries_meta), + }) + } + + /// Commit new changes trie. + pub fn commit( + &self, + tx: &mut Transaction, + mut changes_trie: MemoryDB>, + parent_block: ComplexBlockId, + block: ComplexBlockId, + new_header: &Block::Header, + finalized: bool, + new_configuration: Option>, + cache_tx: Option>, + ) -> ClientResult> { + // insert changes trie, associated with block, into DB + for (key, (val, _)) in changes_trie.drain() { + tx.set(self.changes_tries_column, key.as_ref(), &val); + } + + // if configuration has not been changed AND block is not finalized => nothing to do here + let new_configuration = match new_configuration { + Some(new_configuration) => new_configuration, + None if !finalized => return Ok(DbCacheTransactionOps::empty().into()), + None => { + return self.finalize( + tx, + parent_block.hash, + block.hash, + block.number, + Some(new_header), + cache_tx, + ) + } + }; + + // update configuration cache + let mut cache_at = HashMap::new(); + cache_at.insert( + well_known_cache_keys::CHANGES_TRIE_CONFIG, + new_configuration.encode(), + ); + Ok(DbChangesTrieStorageTransaction::from(match cache_tx { + Some(cache_tx) => self + .cache + .0 + .write() + .transaction_with_ops(tx, cache_tx.cache_ops) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { + CacheEntryType::Final + } else { + CacheEntryType::NonFinal + }, + )? + .into_ops(), + None => self + .cache + .0 + .write() + .transaction(tx) + .on_block_insert( + parent_block, + block, + cache_at, + if finalized { + CacheEntryType::Final + } else { + CacheEntryType::NonFinal + }, + )? + .into_ops(), + }) + .with_new_config(Some(new_configuration))) + } + + /// Called when block is finalized. + pub fn finalize( + &self, + tx: &mut Transaction, + parent_block_hash: Block::Hash, + block_hash: Block::Hash, + block_num: NumberFor, + new_header: Option<&Block::Header>, + cache_tx: Option>, + ) -> ClientResult> { + // prune obsolete changes tries + self.prune( + tx, + block_hash, + block_num, + new_header.clone(), + cache_tx.as_ref(), + )?; + + // if we have inserted the block that we're finalizing in the same transaction + // => then we have already finalized it from the commit() call + if cache_tx.is_some() { + if let Some(new_header) = new_header { + if new_header.hash() == block_hash { + return Ok(cache_tx.expect("guarded by cache_tx.is_some(); qed")); + } + } + } + + // and finalize configuration cache entries + let block = ComplexBlockId::new(block_hash, block_num); + let parent_block_num = block_num + .checked_sub(&One::one()) + .unwrap_or_else(|| Zero::zero()); + let parent_block = ComplexBlockId::new(parent_block_hash, parent_block_num); + Ok(match cache_tx { + Some(cache_tx) => DbChangesTrieStorageTransaction::from( + self.cache + .0 + .write() + .transaction_with_ops(tx, cache_tx.cache_ops) + .on_block_finalize(parent_block, block)? + .into_ops(), + ) + .with_new_config(cache_tx.new_config), + None => DbChangesTrieStorageTransaction::from( + self.cache + .0 + .write() + .transaction(tx) + .on_block_finalize(parent_block, block)? + .into_ops(), + ), + }) + } + + /// When block is reverted. + pub fn revert( + &self, + tx: &mut Transaction, + block: &ComplexBlockId, + ) -> ClientResult> { + Ok(self + .cache + .0 + .write() + .transaction(tx) + .on_block_revert(block)? + .into_ops() + .into()) + } + + /// When transaction has been committed. + pub fn post_commit(&self, tx: Option>) { + if let Some(tx) = tx { + self.cache.0.write().commit(tx.cache_ops).expect( + "only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there is tx; qed", + ); + } + } + + /// Commit changes into changes trie build cache. + pub fn commit_build_cache( + &self, + cache_update: ChangesTrieCacheAction>, + ) { + self.build_cache.write().perform(cache_update); + } + + /// Prune obsolete changes tries. + fn prune( + &self, + tx: &mut Transaction, + block_hash: Block::Hash, + block_num: NumberFor, + new_header: Option<&Block::Header>, + cache_tx: Option<&DbChangesTrieStorageTransaction>, + ) -> ClientResult<()> { + // never prune on archive nodes + let min_blocks_to_keep = match self.min_blocks_to_keep { + Some(min_blocks_to_keep) => min_blocks_to_keep, + None => return Ok(()), + }; + + let mut tries_meta = self.tries_meta.write(); + let mut next_digest_range_start = block_num; + loop { + // prune oldest digest if it is known + // it could be unknown if: + // 1) either we're finalizing block#1 + // 2) or we are (or were) in period where changes tries are disabled + if let Some((begin, end)) = tries_meta.oldest_digest_range { + if block_num <= end || block_num - end <= min_blocks_to_keep.into() { + break; + } + + tries_meta.oldest_pruned_digest_range_end = end; + sp_state_machine::prune_changes_tries( + &*self, + begin, + end, + &sp_state_machine::ChangesTrieAnchorBlockId { + hash: convert_hash(&block_hash), + number: block_num, + }, + |node| tx.remove(self.changes_tries_column, node.as_ref()), + ); + + next_digest_range_start = end + One::one(); + } + + // proceed to the next configuration range + let next_digest_range_start_hash = match block_num == next_digest_range_start { + true => block_hash, + false => utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Number(next_digest_range_start), + )? + .hash(), + }; + + let config_for_new_block = new_header + .map(|header| *header.number() == next_digest_range_start) + .unwrap_or(false); + let next_config = match cache_tx { + Some(cache_tx) if config_for_new_block && cache_tx.new_config.is_some() => { + let config = cache_tx + .new_config + .clone() + .expect("guarded by is_some(); qed"); + ChangesTrieConfigurationRange { + zero: (block_num, block_hash), + end: None, + config, + } + } + _ if config_for_new_block => self.configuration_at(&BlockId::Hash( + *new_header + .expect("config_for_new_block is only true when new_header is passed; qed") + .parent_hash(), + ))?, + _ => self.configuration_at(&BlockId::Hash(next_digest_range_start_hash))?, + }; + if let Some(config) = next_config.config { + let mut oldest_digest_range = config + .next_max_level_digest_range(next_config.zero.0, next_digest_range_start) + .unwrap_or_else(|| (next_digest_range_start, next_digest_range_start)); + + if let Some(end) = next_config.end { + if end.0 < oldest_digest_range.1 { + oldest_digest_range.1 = end.0; + } + } + + tries_meta.oldest_digest_range = Some(oldest_digest_range); + continue; + } + + tries_meta.oldest_digest_range = None; + break; + } + + write_tries_meta(tx, self.meta_column, &*tries_meta); + Ok(()) + } } impl PrunableStateChangesTrieStorage for DbChangesTrieStorage { - fn storage(&self) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { - self - } - - fn configuration_at(&self, at: &BlockId) -> ClientResult< - ChangesTrieConfigurationRange, Block::Hash> - > { - self.cache - .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? - .and_then(|(zero, end, encoded)| Decode::decode(&mut &encoded[..]).ok() - .map(|config| ChangesTrieConfigurationRange { zero, end, config })) - .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) - } - - fn oldest_pruned_digest_range_end(&self) -> NumberFor { - self.tries_meta.read().oldest_pruned_digest_range_end - } + fn storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieStorage, NumberFor> { + self + } + + fn configuration_at( + &self, + at: &BlockId, + ) -> ClientResult, Block::Hash>> { + self.cache + .get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, at)? + .and_then(|(zero, end, encoded)| { + Decode::decode(&mut &encoded[..]) + .ok() + .map(|config| ChangesTrieConfigurationRange { zero, end, config }) + }) + .ok_or_else(|| ClientError::ErrorReadingChangesTriesConfig) + } + + fn oldest_pruned_digest_range_end(&self) -> NumberFor { + self.tries_meta.read().oldest_pruned_digest_range_end + } } impl sp_state_machine::ChangesTrieRootsStorage, NumberFor> - for DbChangesTrieStorage + for DbChangesTrieStorage { - fn build_anchor( - &self, - hash: Block::Hash, - ) -> Result>, String> { - utils::read_header::(&*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(hash)) - .map_err(|e| e.to_string()) - .and_then(|maybe_header| maybe_header.map(|header| - sp_state_machine::ChangesTrieAnchorBlockId { - hash, - number: *header.number(), - } - ).ok_or_else(|| format!("Unknown header: {}", hash))) - } - - fn root( - &self, - anchor: &sp_state_machine::ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - // check API requirement: we can't get NEXT block(s) based on anchor - if block > anchor.number { - return Err(format!("Can't get changes trie root at {} using anchor at {}", block, anchor.number)); - } - - // we need to get hash of the block to resolve changes trie root - let block_id = if block <= self.meta.read().finalized_number { - // if block is finalized, we could just read canonical hash - BlockId::Number(block) - } else { - // the block is not finalized - let mut current_num = anchor.number; - let mut current_hash: Block::Hash = convert_hash(&anchor.hash); - let maybe_anchor_header: Block::Header = utils::require_header::( - &*self.db, self.key_lookup_column, self.header_column, BlockId::Number(current_num) - ).map_err(|e| e.to_string())?; - if maybe_anchor_header.hash() == current_hash { - // if anchor is canonicalized, then the block is also canonicalized - BlockId::Number(block) - } else { - // else (block is not finalized + anchor is not canonicalized): - // => we should find the required block hash by traversing - // back from the anchor to the block with given number - while current_num != block { - let current_header: Block::Header = utils::require_header::( - &*self.db, self.key_lookup_column, self.header_column, BlockId::Hash(current_hash) - ).map_err(|e| e.to_string())?; - - current_hash = *current_header.parent_hash(); - current_num = current_num - One::one(); - } - - BlockId::Hash(current_hash) - } - }; - - Ok( - utils::require_header::( - &*self.db, - self.key_lookup_column, - self.header_column, - block_id, - ) - .map_err(|e| e.to_string())? - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned() - ) - } + fn build_anchor( + &self, + hash: Block::Hash, + ) -> Result>, String> + { + utils::read_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Hash(hash), + ) + .map_err(|e| e.to_string()) + .and_then(|maybe_header| { + maybe_header + .map(|header| sp_state_machine::ChangesTrieAnchorBlockId { + hash, + number: *header.number(), + }) + .ok_or_else(|| format!("Unknown header: {}", hash)) + }) + } + + fn root( + &self, + anchor: &sp_state_machine::ChangesTrieAnchorBlockId>, + block: NumberFor, + ) -> Result, String> { + // check API requirement: we can't get NEXT block(s) based on anchor + if block > anchor.number { + return Err(format!( + "Can't get changes trie root at {} using anchor at {}", + block, anchor.number + )); + } + + // we need to get hash of the block to resolve changes trie root + let block_id = if block <= self.meta.read().finalized_number { + // if block is finalized, we could just read canonical hash + BlockId::Number(block) + } else { + // the block is not finalized + let mut current_num = anchor.number; + let mut current_hash: Block::Hash = convert_hash(&anchor.hash); + let maybe_anchor_header: Block::Header = utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Number(current_num), + ) + .map_err(|e| e.to_string())?; + if maybe_anchor_header.hash() == current_hash { + // if anchor is canonicalized, then the block is also canonicalized + BlockId::Number(block) + } else { + // else (block is not finalized + anchor is not canonicalized): + // => we should find the required block hash by traversing + // back from the anchor to the block with given number + while current_num != block { + let current_header: Block::Header = utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + BlockId::Hash(current_hash), + ) + .map_err(|e| e.to_string())?; + + current_hash = *current_header.parent_hash(); + current_num = current_num - One::one(); + } + + BlockId::Hash(current_hash) + } + }; + + Ok(utils::require_header::( + &*self.db, + self.key_lookup_column, + self.header_column, + block_id, + ) + .map_err(|e| e.to_string())? + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned()) + } } impl sp_state_machine::ChangesTrieStorage, NumberFor> - for DbChangesTrieStorage + for DbChangesTrieStorage where - Block: BlockT, + Block: BlockT, { - fn as_roots_storage(&self) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { - self - } - - fn with_cached_changed_keys( - &self, - root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), - ) -> bool { - self.build_cache.read().with_changed_keys(root, functor) - } - - fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result>, String> { - Ok(self.db.get(self.changes_tries_column, key.as_ref())) - } + fn as_roots_storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> { + self + } + + fn with_cached_changed_keys( + &self, + root: &Block::Hash, + functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool { + self.build_cache.read().with_changed_keys(root, functor) + } + + fn get(&self, key: &Block::Hash, _prefix: Prefix) -> Result>, String> { + Ok(self.db.get(self.changes_tries_column, key.as_ref())) + } } /// Read changes tries metadata from database. fn read_tries_meta( - db: &dyn Database, - meta_column: u32, + db: &dyn Database, + meta_column: u32, ) -> ClientResult> { - match db.get(meta_column, meta_keys::CHANGES_TRIES_META) { - Some(h) => match Decode::decode(&mut &h[..]) { - Ok(h) => Ok(h), - Err(err) => Err(ClientError::Backend(format!("Error decoding changes tries metadata: {}", err))), - }, - None => Ok(ChangesTriesMeta { - oldest_digest_range: None, - oldest_pruned_digest_range_end: Zero::zero(), - }), - } + match db.get(meta_column, meta_keys::CHANGES_TRIES_META) { + Some(h) => match Decode::decode(&mut &h[..]) { + Ok(h) => Ok(h), + Err(err) => Err(ClientError::Backend(format!( + "Error decoding changes tries metadata: {}", + err + ))), + }, + None => Ok(ChangesTriesMeta { + oldest_digest_range: None, + oldest_pruned_digest_range_end: Zero::zero(), + }), + } } /// Write changes tries metadata from database. fn write_tries_meta( - tx: &mut Transaction, - meta_column: u32, - meta: &ChangesTriesMeta, + tx: &mut Transaction, + meta_column: u32, + meta: &ChangesTriesMeta, ) { - tx.set_from_vec(meta_column, meta_keys::CHANGES_TRIES_META, meta.encode()); + tx.set_from_vec(meta_column, meta_keys::CHANGES_TRIES_META, meta.encode()); } #[cfg(test)] mod tests { - use hash_db::EMPTY_PREFIX; - use sc_client_api::backend::{ - Backend as ClientBackend, NewBlockState, BlockImportOperation, PrunableStateChangesTrieStorage, - }; - use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; - use sp_core::H256; - use sp_runtime::testing::{Digest, Header}; - use sp_runtime::traits::{Hash, BlakeTwo256}; - use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; - use crate::Backend; - use crate::tests::{Block, insert_header, prepare_changes}; - use super::*; - - fn changes(number: u64) -> Option, Vec)>> { - Some(vec![(number.to_le_bytes().to_vec(), number.to_le_bytes().to_vec())]) - } - - fn insert_header_with_configuration_change( - backend: &Backend, - number: u64, - parent_hash: H256, - changes: Option, Vec)>>, - new_configuration: Option, - ) -> H256 { - let mut digest = Digest::default(); - let mut changes_trie_update = Default::default(); - if let Some(changes) = changes { - let (root, update) = prepare_changes(changes); - digest.push(DigestItem::ChangesTrieRoot(root)); - changes_trie_update = update; - } - digest.push(DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_configuration))); - - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new()), - digest, - extrinsics_root: Default::default(), - }; - let header_hash = header.hash(); - - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, None, None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); - backend.commit_operation(op).unwrap(); - - header_hash - } - - #[test] - fn changes_trie_storage_works() { - let backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.meta.write().finalized_number = 1000; - - let check_changes = |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { - let (changes_root, mut changes_trie_update) = prepare_changes(changes); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { - hash: backend.blockchain().header(BlockId::Number(block)).unwrap().unwrap().hash(), - number: block - }; - assert_eq!(backend.changes_tries_storage.root(&anchor, block), Ok(Some(changes_root))); - - let storage = backend.changes_tries_storage.storage(); - for (key, (val, _)) in changes_trie_update.drain() { - assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); - } - }; - - let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; - let changes1 = vec![ - (b"key_at_1".to_vec(), b"val_at_1".to_vec()), - (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), - ]; - let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; - - let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); - let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); - let _ = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); - - // check that the storage contains tries for all blocks - check_changes(&backend, 0, changes0); - check_changes(&backend, 1, changes1); - check_changes(&backend, 2, changes2); - } - - #[test] - fn changes_trie_storage_works_with_forks() { - let backend = Backend::::new_test(1000, 100); - - let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; - let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; - let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; - let block0 = insert_header(&backend, 0, Default::default(), Some(changes0.clone()), Default::default()); - let block1 = insert_header(&backend, 1, block0, Some(changes1.clone()), Default::default()); - let block2 = insert_header(&backend, 2, block1, Some(changes2.clone()), Default::default()); - - let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; - let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; - let block2_1_0 = insert_header(&backend, 3, block2, Some(changes2_1_0.clone()), Default::default()); - let block2_1_1 = insert_header(&backend, 4, block2_1_0, Some(changes2_1_1.clone()), Default::default()); - - let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; - let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; - let block2_2_0 = insert_header(&backend, 3, block2, Some(changes2_2_0.clone()), Default::default()); - let block2_2_1 = insert_header(&backend, 4, block2_2_0, Some(changes2_2_1.clone()), Default::default()); - - // finalize block1 - backend.changes_tries_storage.meta.write().finalized_number = 1; - - // branch1: when asking for finalized block hash - let (changes1_root, _) = prepare_changes(changes1); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch2: when asking for finalized block hash - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 1), Ok(Some(changes1_root))); - - // branch1: when asking for non-finalized block hash (search by traversal) - let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_1_0_root))); - - // branch2: when asking for non-finalized block hash (search using canonicalized hint) - let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_2_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // finalize first block of branch2 (block2_2_0) - backend.changes_tries_storage.meta.write().finalized_number = 3; - - // branch2: when asking for finalized block of this branch - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - - // branch1: when asking for finalized block of other branch - // => result is incorrect (returned for the block of branch1), but this is expected, - // because the other fork is abandoned (forked before finalized header) - let anchor = sp_state_machine::ChangesTrieAnchorBlockId { hash: block2_1_1, number: 4 }; - assert_eq!(backend.changes_tries_storage.root(&anchor, 3), Ok(Some(changes2_2_0_root))); - } - - #[test] - fn changes_tries_are_pruned_on_finalization() { - let mut backend = Backend::::new_test(1000, 100); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - - let parent_hash = |number| { - if number == 0 { - Default::default() - } else { - backend.blockchain().header(BlockId::Number(number - 1)).unwrap().unwrap().hash() - } - }; - - let insert_regular_header = |with_changes, number| { - insert_header( - &backend, - number, - parent_hash(number), - if with_changes { changes(number) } else { None }, - Default::default(), - ); - }; - - let is_pruned = |number| { - let trie_root = backend - .blockchain() - .header(BlockId::Number(number)) - .unwrap().unwrap() - .digest() - .log(DigestItem::as_changes_trie_root) - .cloned(); - match trie_root { - Some(trie_root) => backend.changes_tries_storage.get(&trie_root, EMPTY_PREFIX).unwrap().is_none(), - None => true, - } - }; - - let finalize_block = |number| { - let header = backend.blockchain().header(BlockId::Number(number)).unwrap().unwrap(); - let mut tx = Transaction::new(); - let cache_ops = backend.changes_tries_storage.finalize( - &mut tx, - *header.parent_hash(), - header.hash(), - number, - None, - None, - ).unwrap(); - backend.storage.db.commit(tx); - backend.changes_tries_storage.post_commit(Some(cache_ops)); - }; - - // configuration ranges: - // (0; 6] - None - // [7; 17] - Some(2^2): D2 is built at #10, #14; SD is built at #17 - // [18; 21] - None - // [22; 32] - Some(8^1): D1 is built at #29; SD is built at #32 - // [33; ... - Some(1) - let config_at_6 = Some(ChangesTrieConfiguration::new(2, 2)); - let config_at_17 = None; - let config_at_21 = Some(ChangesTrieConfiguration::new(8, 1)); - let config_at_32 = Some(ChangesTrieConfiguration::new(1, 0)); - - (0..6).for_each(|number| insert_regular_header(false, number)); - insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); - (7..17).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change(&backend, 17, parent_hash(17), changes(17), config_at_17); - (18..21).for_each(|number| insert_regular_header(false, number)); - insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); - (22..32).for_each(|number| insert_regular_header(true, number)); - insert_header_with_configuration_change(&backend, 32, parent_hash(32), changes(32), config_at_32); - (33..50).for_each(|number| insert_regular_header(true, number)); - - // when only genesis is finalized, nothing is pruned - (0..=6).for_each(|number| assert!(is_pruned(number))); - (7..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [1; 18] are finalized, nothing is pruned - (1..=18).for_each(|number| finalize_block(number)); - (0..=6).for_each(|number| assert!(is_pruned(number))); - (7..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 19 is finalized, changes tries for blocks [7; 10] are pruned - finalize_block(19); - (0..=10).for_each(|number| assert!(is_pruned(number))); - (11..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [20; 22] are finalized, nothing is pruned - (20..=22).for_each(|number| finalize_block(number)); - (0..=10).for_each(|number| assert!(is_pruned(number))); - (11..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 23 is finalized, changes tries for blocks [11; 14] are pruned - finalize_block(23); - (0..=14).for_each(|number| assert!(is_pruned(number))); - (15..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [24; 25] are finalized, nothing is pruned - (24..=25).for_each(|number| finalize_block(number)); - (0..=14).for_each(|number| assert!(is_pruned(number))); - (15..=17).for_each(|number| assert!(!is_pruned(number))); - (18..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 26 is finalized, changes tries for blocks [15; 17] are pruned - finalize_block(26); - (0..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [27; 37] are finalized, nothing is pruned - (27..=37).for_each(|number| finalize_block(number)); - (0..=21).for_each(|number| assert!(is_pruned(number))); - (22..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 38 is finalized, changes tries for blocks [22; 29] are pruned - finalize_block(38); - (0..=29).for_each(|number| assert!(is_pruned(number))); - (30..50).for_each(|number| assert!(!is_pruned(number))); - - // when blocks [39; 40] are finalized, nothing is pruned - (39..=40).for_each(|number| finalize_block(number)); - (0..=29).for_each(|number| assert!(is_pruned(number))); - (30..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 41 is finalized, changes tries for blocks [30; 32] are pruned - finalize_block(41); - (0..=32).for_each(|number| assert!(is_pruned(number))); - (33..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 42 is finalized, changes trie for block 33 is pruned - finalize_block(42); - (0..=33).for_each(|number| assert!(is_pruned(number))); - (34..50).for_each(|number| assert!(!is_pruned(number))); - - // when block 43 is finalized, changes trie for block 34 is pruned - finalize_block(43); - (0..=34).for_each(|number| assert!(is_pruned(number))); - (35..50).for_each(|number| assert!(!is_pruned(number))); - } - - #[test] - fn changes_tries_configuration_is_updated_on_block_insert() { - let backend = Backend::::new_test(1000, 100); - - // configurations at blocks - let config_at_1 = Some(ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - }); - let config_at_3 = Some(ChangesTrieConfiguration { - digest_interval: 8, - digest_levels: 1, - }); - let config_at_5 = None; - let config_at_7 = Some(ChangesTrieConfiguration { - digest_interval: 8, - digest_levels: 1, - }); - - // insert some blocks - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); - let block4 = insert_header(&backend, 4, block3, None, Default::default()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); - let block6 = insert_header(&backend, 6, block5, None, Default::default()); - let block7 = insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); - - // test configuration cache - let storage = &backend.changes_tries_storage; - assert_eq!( - storage.configuration_at(&BlockId::Hash(block1)).unwrap().config, - config_at_1.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block2)).unwrap().config, - config_at_1.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block3)).unwrap().config, - config_at_3.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block4)).unwrap().config, - config_at_3.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block5)).unwrap().config, - config_at_5.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block6)).unwrap().config, - config_at_5.clone(), - ); - assert_eq!( - storage.configuration_at(&BlockId::Hash(block7)).unwrap().config, - config_at_7.clone(), - ); - } - - #[test] - fn test_finalize_several_configuration_change_blocks_in_single_operation() { - let mut backend = Backend::::new_test(10, 10); - backend.changes_tries_storage.min_blocks_to_keep = Some(8); - - let configs = (0..=7).map(|i| Some(ChangesTrieConfiguration::new(2, i))).collect::>(); - - // insert unfinalized headers - let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, configs[0].clone()); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(1), configs[1].clone()); - let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(2), configs[2].clone()); - - let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); - let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); - let block2_1 = insert_header_with_configuration_change(&backend, 2, block1, changes(8), side_config2_1.clone()); - let _ = insert_header_with_configuration_change(&backend, 3, block2_1, changes(9), side_config2_2.clone()); - - // insert finalized header => 4 headers are finalized at once - let header3 = Header { - number: 3, - parent_hash: block2, - state_root: Default::default(), - digest: Digest { - logs: vec![ - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[3].clone())), - ], - }, - extrinsics_root: Default::default(), - }; - let block3 = header3.hash(); - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); - op.mark_finalized(BlockId::Hash(block1), None).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - op.set_block_data(header3, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - - // insert more unfinalized headers - let block4 = insert_header_with_configuration_change(&backend, 4, block3, changes(4), configs[4].clone()); - let block5 = insert_header_with_configuration_change(&backend, 5, block4, changes(5), configs[5].clone()); - let block6 = insert_header_with_configuration_change(&backend, 6, block5, changes(6), configs[6].clone()); - - // insert finalized header => 4 headers are finalized at once - let header7 = Header { - number: 7, - parent_hash: block6, - state_root: Default::default(), - digest: Digest { - logs: vec![ - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(configs[7].clone())), - ], - }, - extrinsics_root: Default::default(), - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block6)).unwrap(); - op.mark_finalized(BlockId::Hash(block4), None).unwrap(); - op.mark_finalized(BlockId::Hash(block5), None).unwrap(); - op.mark_finalized(BlockId::Hash(block6), None).unwrap(); - op.set_block_data(header7, None, None, NewBlockState::Final).unwrap(); - backend.commit_operation(op).unwrap(); - } - - #[test] - fn changes_tries_configuration_is_reverted() { - let backend = Backend::::new_test(10, 10); - - let config0 = Some(ChangesTrieConfiguration::new(2, 5)); - let block0 = insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); - let config1 = Some(ChangesTrieConfiguration::new(2, 6)); - let block1 = insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); - backend.finalize_block(BlockId::Number(1), Some(vec![42])).unwrap(); - let config2 = Some(ChangesTrieConfiguration::new(2, 7)); - let block2 = insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); - let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); - let _ = insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); - let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); - let block2_2 = insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); - let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); - let _ = insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); - - // before truncate there are 2 unfinalized forks - block2_1+block2_3 - assert_eq!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 4], - ); - - // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 - backend.revert(1, false).unwrap(); - assert_eq!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 3], - ); - - // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics), - // the 1st one points to the block #3 because it isn't truncated - backend.revert(1, false).unwrap(); - assert_eq!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>(), - vec![3, 2], - ); - - // after truncating block2 - there are no unfinalized forks - backend.revert(1, false).unwrap(); - assert!( - backend.changes_tries_storage.cache.0.write() - .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) - .unwrap() - .unfinalized() - .iter() - .map(|fork| fork.head().valid_from.number) - .collect::>() - .is_empty(), - ); - } + use super::*; + use crate::tests::{insert_header, prepare_changes, Block}; + use crate::Backend; + use hash_db::EMPTY_PREFIX; + use sc_client_api::backend::{ + Backend as ClientBackend, BlockImportOperation, NewBlockState, + PrunableStateChangesTrieStorage, + }; + use sp_blockchain::HeaderBackend as BlockchainHeaderBackend; + use sp_core::H256; + use sp_runtime::testing::{Digest, Header}; + use sp_runtime::traits::{BlakeTwo256, Hash}; + use sp_state_machine::{ChangesTrieRootsStorage, ChangesTrieStorage}; + + fn changes(number: u64) -> Option, Vec)>> { + Some(vec![( + number.to_le_bytes().to_vec(), + number.to_le_bytes().to_vec(), + )]) + } + + fn insert_header_with_configuration_change( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Option, Vec)>>, + new_configuration: Option, + ) -> H256 { + let mut digest = Digest::default(); + let mut changes_trie_update = Default::default(); + if let Some(changes) = changes { + let (root, update) = prepare_changes(changes); + digest.push(DigestItem::ChangesTrieRoot(root)); + changes_trie_update = update; + } + digest.push(DigestItem::ChangesTrieSignal( + ChangesTrieSignal::NewConfiguration(new_configuration), + )); + + let header = Header { + number, + parent_hash, + state_root: BlakeTwo256::trie_root(Vec::new()), + digest, + extrinsics_root: Default::default(), + }; + let header_hash = header.hash(); + + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, block_id).unwrap(); + op.set_block_data(header, None, None, NewBlockState::Best) + .unwrap(); + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) + .unwrap(); + backend.commit_operation(op).unwrap(); + + header_hash + } + + #[test] + fn changes_trie_storage_works() { + let backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.meta.write().finalized_number = 1000; + + let check_changes = + |backend: &Backend, block: u64, changes: Vec<(Vec, Vec)>| { + let (changes_root, mut changes_trie_update) = prepare_changes(changes); + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { + hash: backend + .blockchain() + .header(BlockId::Number(block)) + .unwrap() + .unwrap() + .hash(), + number: block, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, block), + Ok(Some(changes_root)) + ); + + let storage = backend.changes_tries_storage.storage(); + for (key, (val, _)) in changes_trie_update.drain() { + assert_eq!(storage.get(&key, EMPTY_PREFIX), Ok(Some(val))); + } + }; + + let changes0 = vec![(b"key_at_0".to_vec(), b"val_at_0".to_vec())]; + let changes1 = vec![ + (b"key_at_1".to_vec(), b"val_at_1".to_vec()), + (b"another_key_at_1".to_vec(), b"another_val_at_1".to_vec()), + ]; + let changes2 = vec![(b"key_at_2".to_vec(), b"val_at_2".to_vec())]; + + let block0 = insert_header( + &backend, + 0, + Default::default(), + Some(changes0.clone()), + Default::default(), + ); + let block1 = insert_header( + &backend, + 1, + block0, + Some(changes1.clone()), + Default::default(), + ); + let _ = insert_header( + &backend, + 2, + block1, + Some(changes2.clone()), + Default::default(), + ); + + // check that the storage contains tries for all blocks + check_changes(&backend, 0, changes0); + check_changes(&backend, 1, changes1); + check_changes(&backend, 2, changes2); + } + + #[test] + fn changes_trie_storage_works_with_forks() { + let backend = Backend::::new_test(1000, 100); + + let changes0 = vec![(b"k0".to_vec(), b"v0".to_vec())]; + let changes1 = vec![(b"k1".to_vec(), b"v1".to_vec())]; + let changes2 = vec![(b"k2".to_vec(), b"v2".to_vec())]; + let block0 = insert_header( + &backend, + 0, + Default::default(), + Some(changes0.clone()), + Default::default(), + ); + let block1 = insert_header( + &backend, + 1, + block0, + Some(changes1.clone()), + Default::default(), + ); + let block2 = insert_header( + &backend, + 2, + block1, + Some(changes2.clone()), + Default::default(), + ); + + let changes2_1_0 = vec![(b"k3".to_vec(), b"v3".to_vec())]; + let changes2_1_1 = vec![(b"k4".to_vec(), b"v4".to_vec())]; + let block2_1_0 = insert_header( + &backend, + 3, + block2, + Some(changes2_1_0.clone()), + Default::default(), + ); + let block2_1_1 = insert_header( + &backend, + 4, + block2_1_0, + Some(changes2_1_1.clone()), + Default::default(), + ); + + let changes2_2_0 = vec![(b"k5".to_vec(), b"v5".to_vec())]; + let changes2_2_1 = vec![(b"k6".to_vec(), b"v6".to_vec())]; + let block2_2_0 = insert_header( + &backend, + 3, + block2, + Some(changes2_2_0.clone()), + Default::default(), + ); + let block2_2_1 = insert_header( + &backend, + 4, + block2_2_0, + Some(changes2_2_1.clone()), + Default::default(), + ); + + // finalize block1 + backend.changes_tries_storage.meta.write().finalized_number = 1; + + // branch1: when asking for finalized block hash + let (changes1_root, _) = prepare_changes(changes1); + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { + hash: block2_1_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 1), + Ok(Some(changes1_root)) + ); + + // branch2: when asking for finalized block hash + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { + hash: block2_2_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 1), + Ok(Some(changes1_root)) + ); + + // branch1: when asking for non-finalized block hash (search by traversal) + let (changes2_1_0_root, _) = prepare_changes(changes2_1_0); + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { + hash: block2_1_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3), + Ok(Some(changes2_1_0_root)) + ); + + // branch2: when asking for non-finalized block hash (search using canonicalized hint) + let (changes2_2_0_root, _) = prepare_changes(changes2_2_0); + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { + hash: block2_2_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3), + Ok(Some(changes2_2_0_root)) + ); + + // finalize first block of branch2 (block2_2_0) + backend.changes_tries_storage.meta.write().finalized_number = 3; + + // branch2: when asking for finalized block of this branch + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3), + Ok(Some(changes2_2_0_root)) + ); + + // branch1: when asking for finalized block of other branch + // => result is incorrect (returned for the block of branch1), but this is expected, + // because the other fork is abandoned (forked before finalized header) + let anchor = sp_state_machine::ChangesTrieAnchorBlockId { + hash: block2_1_1, + number: 4, + }; + assert_eq!( + backend.changes_tries_storage.root(&anchor, 3), + Ok(Some(changes2_2_0_root)) + ); + } + + #[test] + fn changes_tries_are_pruned_on_finalization() { + let mut backend = Backend::::new_test(1000, 100); + backend.changes_tries_storage.min_blocks_to_keep = Some(8); + + let parent_hash = |number| { + if number == 0 { + Default::default() + } else { + backend + .blockchain() + .header(BlockId::Number(number - 1)) + .unwrap() + .unwrap() + .hash() + } + }; + + let insert_regular_header = |with_changes, number| { + insert_header( + &backend, + number, + parent_hash(number), + if with_changes { changes(number) } else { None }, + Default::default(), + ); + }; + + let is_pruned = |number| { + let trie_root = backend + .blockchain() + .header(BlockId::Number(number)) + .unwrap() + .unwrap() + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned(); + match trie_root { + Some(trie_root) => backend + .changes_tries_storage + .get(&trie_root, EMPTY_PREFIX) + .unwrap() + .is_none(), + None => true, + } + }; + + let finalize_block = |number| { + let header = backend + .blockchain() + .header(BlockId::Number(number)) + .unwrap() + .unwrap(); + let mut tx = Transaction::new(); + let cache_ops = backend + .changes_tries_storage + .finalize( + &mut tx, + *header.parent_hash(), + header.hash(), + number, + None, + None, + ) + .unwrap(); + backend.storage.db.commit(tx); + backend.changes_tries_storage.post_commit(Some(cache_ops)); + }; + + // configuration ranges: + // (0; 6] - None + // [7; 17] - Some(2^2): D2 is built at #10, #14; SD is built at #17 + // [18; 21] - None + // [22; 32] - Some(8^1): D1 is built at #29; SD is built at #32 + // [33; ... - Some(1) + let config_at_6 = Some(ChangesTrieConfiguration::new(2, 2)); + let config_at_17 = None; + let config_at_21 = Some(ChangesTrieConfiguration::new(8, 1)); + let config_at_32 = Some(ChangesTrieConfiguration::new(1, 0)); + + (0..6).for_each(|number| insert_regular_header(false, number)); + insert_header_with_configuration_change(&backend, 6, parent_hash(6), None, config_at_6); + (7..17).for_each(|number| insert_regular_header(true, number)); + insert_header_with_configuration_change( + &backend, + 17, + parent_hash(17), + changes(17), + config_at_17, + ); + (18..21).for_each(|number| insert_regular_header(false, number)); + insert_header_with_configuration_change(&backend, 21, parent_hash(21), None, config_at_21); + (22..32).for_each(|number| insert_regular_header(true, number)); + insert_header_with_configuration_change( + &backend, + 32, + parent_hash(32), + changes(32), + config_at_32, + ); + (33..50).for_each(|number| insert_regular_header(true, number)); + + // when only genesis is finalized, nothing is pruned + (0..=6).for_each(|number| assert!(is_pruned(number))); + (7..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [1; 18] are finalized, nothing is pruned + (1..=18).for_each(|number| finalize_block(number)); + (0..=6).for_each(|number| assert!(is_pruned(number))); + (7..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 19 is finalized, changes tries for blocks [7; 10] are pruned + finalize_block(19); + (0..=10).for_each(|number| assert!(is_pruned(number))); + (11..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [20; 22] are finalized, nothing is pruned + (20..=22).for_each(|number| finalize_block(number)); + (0..=10).for_each(|number| assert!(is_pruned(number))); + (11..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 23 is finalized, changes tries for blocks [11; 14] are pruned + finalize_block(23); + (0..=14).for_each(|number| assert!(is_pruned(number))); + (15..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [24; 25] are finalized, nothing is pruned + (24..=25).for_each(|number| finalize_block(number)); + (0..=14).for_each(|number| assert!(is_pruned(number))); + (15..=17).for_each(|number| assert!(!is_pruned(number))); + (18..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 26 is finalized, changes tries for blocks [15; 17] are pruned + finalize_block(26); + (0..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [27; 37] are finalized, nothing is pruned + (27..=37).for_each(|number| finalize_block(number)); + (0..=21).for_each(|number| assert!(is_pruned(number))); + (22..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 38 is finalized, changes tries for blocks [22; 29] are pruned + finalize_block(38); + (0..=29).for_each(|number| assert!(is_pruned(number))); + (30..50).for_each(|number| assert!(!is_pruned(number))); + + // when blocks [39; 40] are finalized, nothing is pruned + (39..=40).for_each(|number| finalize_block(number)); + (0..=29).for_each(|number| assert!(is_pruned(number))); + (30..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 41 is finalized, changes tries for blocks [30; 32] are pruned + finalize_block(41); + (0..=32).for_each(|number| assert!(is_pruned(number))); + (33..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 42 is finalized, changes trie for block 33 is pruned + finalize_block(42); + (0..=33).for_each(|number| assert!(is_pruned(number))); + (34..50).for_each(|number| assert!(!is_pruned(number))); + + // when block 43 is finalized, changes trie for block 34 is pruned + finalize_block(43); + (0..=34).for_each(|number| assert!(is_pruned(number))); + (35..50).for_each(|number| assert!(!is_pruned(number))); + } + + #[test] + fn changes_tries_configuration_is_updated_on_block_insert() { + let backend = Backend::::new_test(1000, 100); + + // configurations at blocks + let config_at_1 = Some(ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + }); + let config_at_3 = Some(ChangesTrieConfiguration { + digest_interval: 8, + digest_levels: 1, + }); + let config_at_5 = None; + let config_at_7 = Some(ChangesTrieConfiguration { + digest_interval: 8, + digest_levels: 1, + }); + + // insert some blocks + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = + insert_header_with_configuration_change(&backend, 1, block0, None, config_at_1.clone()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + let block3 = + insert_header_with_configuration_change(&backend, 3, block2, None, config_at_3.clone()); + let block4 = insert_header(&backend, 4, block3, None, Default::default()); + let block5 = + insert_header_with_configuration_change(&backend, 5, block4, None, config_at_5.clone()); + let block6 = insert_header(&backend, 6, block5, None, Default::default()); + let block7 = + insert_header_with_configuration_change(&backend, 7, block6, None, config_at_7.clone()); + + // test configuration cache + let storage = &backend.changes_tries_storage; + assert_eq!( + storage + .configuration_at(&BlockId::Hash(block1)) + .unwrap() + .config, + config_at_1.clone(), + ); + assert_eq!( + storage + .configuration_at(&BlockId::Hash(block2)) + .unwrap() + .config, + config_at_1.clone(), + ); + assert_eq!( + storage + .configuration_at(&BlockId::Hash(block3)) + .unwrap() + .config, + config_at_3.clone(), + ); + assert_eq!( + storage + .configuration_at(&BlockId::Hash(block4)) + .unwrap() + .config, + config_at_3.clone(), + ); + assert_eq!( + storage + .configuration_at(&BlockId::Hash(block5)) + .unwrap() + .config, + config_at_5.clone(), + ); + assert_eq!( + storage + .configuration_at(&BlockId::Hash(block6)) + .unwrap() + .config, + config_at_5.clone(), + ); + assert_eq!( + storage + .configuration_at(&BlockId::Hash(block7)) + .unwrap() + .config, + config_at_7.clone(), + ); + } + + #[test] + fn test_finalize_several_configuration_change_blocks_in_single_operation() { + let mut backend = Backend::::new_test(10, 10); + backend.changes_tries_storage.min_blocks_to_keep = Some(8); + + let configs = (0..=7) + .map(|i| Some(ChangesTrieConfiguration::new(2, i))) + .collect::>(); + + // insert unfinalized headers + let block0 = insert_header_with_configuration_change( + &backend, + 0, + Default::default(), + None, + configs[0].clone(), + ); + let block1 = insert_header_with_configuration_change( + &backend, + 1, + block0, + changes(1), + configs[1].clone(), + ); + let block2 = insert_header_with_configuration_change( + &backend, + 2, + block1, + changes(2), + configs[2].clone(), + ); + + let side_config2_1 = Some(ChangesTrieConfiguration::new(3, 2)); + let side_config2_2 = Some(ChangesTrieConfiguration::new(3, 3)); + let block2_1 = insert_header_with_configuration_change( + &backend, + 2, + block1, + changes(8), + side_config2_1.clone(), + ); + let _ = insert_header_with_configuration_change( + &backend, + 3, + block2_1, + changes(9), + side_config2_2.clone(), + ); + + // insert finalized header => 4 headers are finalized at once + let header3 = Header { + number: 3, + parent_hash: block2, + state_root: Default::default(), + digest: Digest { + logs: vec![DigestItem::ChangesTrieSignal( + ChangesTrieSignal::NewConfiguration(configs[3].clone()), + )], + }, + extrinsics_root: Default::default(), + }; + let block3 = header3.hash(); + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(block2)) + .unwrap(); + op.mark_finalized(BlockId::Hash(block1), None).unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + op.set_block_data(header3, None, None, NewBlockState::Final) + .unwrap(); + backend.commit_operation(op).unwrap(); + + // insert more unfinalized headers + let block4 = insert_header_with_configuration_change( + &backend, + 4, + block3, + changes(4), + configs[4].clone(), + ); + let block5 = insert_header_with_configuration_change( + &backend, + 5, + block4, + changes(5), + configs[5].clone(), + ); + let block6 = insert_header_with_configuration_change( + &backend, + 6, + block5, + changes(6), + configs[6].clone(), + ); + + // insert finalized header => 4 headers are finalized at once + let header7 = Header { + number: 7, + parent_hash: block6, + state_root: Default::default(), + digest: Digest { + logs: vec![DigestItem::ChangesTrieSignal( + ChangesTrieSignal::NewConfiguration(configs[7].clone()), + )], + }, + extrinsics_root: Default::default(), + }; + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(block6)) + .unwrap(); + op.mark_finalized(BlockId::Hash(block4), None).unwrap(); + op.mark_finalized(BlockId::Hash(block5), None).unwrap(); + op.mark_finalized(BlockId::Hash(block6), None).unwrap(); + op.set_block_data(header7, None, None, NewBlockState::Final) + .unwrap(); + backend.commit_operation(op).unwrap(); + } + + #[test] + fn changes_tries_configuration_is_reverted() { + let backend = Backend::::new_test(10, 10); + + let config0 = Some(ChangesTrieConfiguration::new(2, 5)); + let block0 = + insert_header_with_configuration_change(&backend, 0, Default::default(), None, config0); + let config1 = Some(ChangesTrieConfiguration::new(2, 6)); + let block1 = + insert_header_with_configuration_change(&backend, 1, block0, changes(0), config1); + backend + .finalize_block(BlockId::Number(1), Some(vec![42])) + .unwrap(); + let config2 = Some(ChangesTrieConfiguration::new(2, 7)); + let block2 = + insert_header_with_configuration_change(&backend, 2, block1, changes(1), config2); + let config2_1 = Some(ChangesTrieConfiguration::new(2, 8)); + let _ = + insert_header_with_configuration_change(&backend, 3, block2, changes(10), config2_1); + let config2_2 = Some(ChangesTrieConfiguration::new(2, 9)); + let block2_2 = + insert_header_with_configuration_change(&backend, 3, block2, changes(20), config2_2); + let config2_3 = Some(ChangesTrieConfiguration::new(2, 10)); + let _ = + insert_header_with_configuration_change(&backend, 4, block2_2, changes(30), config2_3); + + // before truncate there are 2 unfinalized forks - block2_1+block2_3 + assert_eq!( + backend + .changes_tries_storage + .cache + .0 + .write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![3, 4], + ); + + // after truncating block2_3 - there are 2 unfinalized forks - block2_1+block2_2 + backend.revert(1, false).unwrap(); + assert_eq!( + backend + .changes_tries_storage + .cache + .0 + .write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![3, 3], + ); + + // after truncating block2_1 && block2_2 - there are still two unfinalized forks (cache impl specifics), + // the 1st one points to the block #3 because it isn't truncated + backend.revert(1, false).unwrap(); + assert_eq!( + backend + .changes_tries_storage + .cache + .0 + .write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>(), + vec![3, 2], + ); + + // after truncating block2 - there are no unfinalized forks + backend.revert(1, false).unwrap(); + assert!(backend + .changes_tries_storage + .cache + .0 + .write() + .get_cache(well_known_cache_keys::CHANGES_TRIE_CONFIG) + .unwrap() + .unfinalized() + .iter() + .map(|fork| fork.head().valid_from.number) + .collect::>() + .is_empty(),); + } } diff --git a/client/db/src/children.rs b/client/db/src/children.rs index 3916321f17..f493884d83 100644 --- a/client/db/src/children.rs +++ b/client/db/src/children.rs @@ -16,104 +16,110 @@ //! Functionality for reading and storing children hashes from db. -use codec::{Encode, Decode}; +use crate::DbHash; +use codec::{Decode, Encode}; use sp_blockchain; -use std::hash::Hash; use sp_database::{Database, Transaction}; -use crate::DbHash; +use std::hash::Hash; /// Returns the hashes of the children blocks of the block with `parent_hash`. pub fn read_children< - K: Eq + Hash + Clone + Encode + Decode, - V: Eq + Hash + Clone + Encode + Decode, ->(db: &dyn Database, column: u32, prefix: &[u8], parent_hash: K) -> sp_blockchain::Result> { - let mut buf = prefix.to_vec(); - parent_hash.using_encoded(|s| buf.extend(s)); - - let raw_val_opt = db.get(column, &buf[..]); - - let raw_val = match raw_val_opt { - Some(val) => val, - None => return Ok(Vec::new()), - }; - - let children: Vec = match Decode::decode(&mut &raw_val[..]) { - Ok(children) => children, - Err(_) => return Err(sp_blockchain::Error::Backend("Error decoding children".into())), - }; - - Ok(children) + K: Eq + Hash + Clone + Encode + Decode, + V: Eq + Hash + Clone + Encode + Decode, +>( + db: &dyn Database, + column: u32, + prefix: &[u8], + parent_hash: K, +) -> sp_blockchain::Result> { + let mut buf = prefix.to_vec(); + parent_hash.using_encoded(|s| buf.extend(s)); + + let raw_val_opt = db.get(column, &buf[..]); + + let raw_val = match raw_val_opt { + Some(val) => val, + None => return Ok(Vec::new()), + }; + + let children: Vec = match Decode::decode(&mut &raw_val[..]) { + Ok(children) => children, + Err(_) => { + return Err(sp_blockchain::Error::Backend( + "Error decoding children".into(), + )) + } + }; + + Ok(children) } /// Insert the key-value pair (`parent_hash`, `children_hashes`) in the transaction. /// Any existing value is overwritten upon write. pub fn write_children< - K: Eq + Hash + Clone + Encode + Decode, - V: Eq + Hash + Clone + Encode + Decode, + K: Eq + Hash + Clone + Encode + Decode, + V: Eq + Hash + Clone + Encode + Decode, >( - tx: &mut Transaction, - column: u32, - prefix: &[u8], - parent_hash: K, - children_hashes: V, + tx: &mut Transaction, + column: u32, + prefix: &[u8], + parent_hash: K, + children_hashes: V, ) { - let mut key = prefix.to_vec(); - parent_hash.using_encoded(|s| key.extend(s)); - tx.set_from_vec(column, &key[..], children_hashes.encode()); + let mut key = prefix.to_vec(); + parent_hash.using_encoded(|s| key.extend(s)); + tx.set_from_vec(column, &key[..], children_hashes.encode()); } /// Prepare transaction to remove the children of `parent_hash`. -pub fn remove_children< - K: Eq + Hash + Clone + Encode + Decode, ->( - tx: &mut Transaction, - column: u32, - prefix: &[u8], - parent_hash: K, +pub fn remove_children( + tx: &mut Transaction, + column: u32, + prefix: &[u8], + parent_hash: K, ) { - let mut key = prefix.to_vec(); - parent_hash.using_encoded(|s| key.extend(s)); - tx.remove(column, &key); + let mut key = prefix.to_vec(); + parent_hash.using_encoded(|s| key.extend(s)); + tx.remove(column, &key); } - #[cfg(test)] mod tests { - use super::*; - use std::sync::Arc; + use super::*; + use std::sync::Arc; - #[test] - fn children_write_read_remove() { - const PREFIX: &[u8] = b"children"; - let db = Arc::new(sp_database::MemDb::default()); + #[test] + fn children_write_read_remove() { + const PREFIX: &[u8] = b"children"; + let db = Arc::new(sp_database::MemDb::default()); - let mut tx = Transaction::new(); + let mut tx = Transaction::new(); - let mut children1 = Vec::new(); - children1.push(1_3); - children1.push(1_5); - write_children(&mut tx, 0, PREFIX, 1_1, children1); + let mut children1 = Vec::new(); + children1.push(1_3); + children1.push(1_5); + write_children(&mut tx, 0, PREFIX, 1_1, children1); - let mut children2 = Vec::new(); - children2.push(1_4); - children2.push(1_6); - write_children(&mut tx, 0, PREFIX, 1_2, children2); + let mut children2 = Vec::new(); + children2.push(1_4); + children2.push(1_6); + write_children(&mut tx, 0, PREFIX, 1_2, children2); - db.commit(tx.clone()); + db.commit(tx.clone()); - let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(1) Getting r1 failed"); - let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(1) Getting r2 failed"); + let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(1) Getting r1 failed"); + let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(1) Getting r2 failed"); - assert_eq!(r1, vec![1_3, 1_5]); - assert_eq!(r2, vec![1_4, 1_6]); + assert_eq!(r1, vec![1_3, 1_5]); + assert_eq!(r2, vec![1_4, 1_6]); - remove_children(&mut tx, 0, PREFIX, 1_2); - db.commit(tx); + remove_children(&mut tx, 0, PREFIX, 1_2); + db.commit(tx); - let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(2) Getting r1 failed"); - let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(2) Getting r2 failed"); + let r1: Vec = read_children(&*db, 0, PREFIX, 1_1).expect("(2) Getting r1 failed"); + let r2: Vec = read_children(&*db, 0, PREFIX, 1_2).expect("(2) Getting r2 failed"); - assert_eq!(r1, vec![1_3, 1_5]); - assert_eq!(r2.len(), 0); - } + assert_eq!(r1, vec![1_3, 1_5]); + assert_eq!(r2.len(), 0); + } } diff --git a/client/db/src/lib.rs b/client/db/src/lib.rs index 782e0f6db2..01b5488629 100644 --- a/client/db/src/lib.rs +++ b/client/db/src/lib.rs @@ -32,66 +32,62 @@ pub mod offchain; #[cfg(any(feature = "kvdb-rocksdb", test))] pub mod bench; -mod children; mod cache; mod changes_tries_storage; -mod storage_cache; -#[cfg(any(feature = "kvdb-rocksdb", test))] -mod upgrade; -mod utils; -mod stats; +mod children; #[cfg(feature = "parity-db")] mod parity_db; +mod stats; +mod storage_cache; #[cfg(feature = "subdb")] mod subdb; +#[cfg(any(feature = "kvdb-rocksdb", test))] +mod upgrade; +mod utils; -use std::sync::Arc; -use std::path::{Path, PathBuf}; -use std::io; use std::collections::HashMap; +use std::io; +use std::path::{Path, PathBuf}; +use std::sync::Arc; +use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; +use crate::stats::StateUsageStats; +use crate::storage_cache::{new_shared_cache, CachingState, SharedCache, SyncingCachingState}; +use crate::utils::{meta_keys, read_db, read_meta, DatabaseType, Meta}; +use codec::{Decode, Encode}; +use hash_db::Prefix; +use log::{debug, trace, warn}; +use parking_lot::RwLock; +use prometheus_endpoint::Registry; +use sc_client::leaves::{FinalizationDisplaced, LeafSet}; use sc_client_api::{ - ForkBlocks, UsageInfo, MemoryInfo, BadBlocks, IoInfo, MemorySize, CloneableSpawn, - execution_extensions::ExecutionExtensions, - backend::{NewBlockState, PrunableStateChangesTrieStorage}, + backend::{NewBlockState, PrunableStateChangesTrieStorage}, + execution_extensions::ExecutionExtensions, + BadBlocks, CloneableSpawn, ForkBlocks, IoInfo, MemoryInfo, MemorySize, UsageInfo, }; +use sc_executor::RuntimeInfo; +use sc_state_db::StateDb; use sp_blockchain::{ - Result as ClientResult, Error as ClientError, - well_known_cache_keys, HeaderBackend, + well_known_cache_keys, Error as ClientError, HeaderBackend, Result as ClientResult, }; -use codec::{Decode, Encode}; -use hash_db::Prefix; -use sp_trie::{MemoryDB, PrefixedMemoryDB, prefixed_key}; -use sp_database::Transaction; -use parking_lot::RwLock; -use sp_core::{ChangesTrieConfiguration, traits::CodeExecutor}; +use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; use sp_core::storage::{well_known_keys, ChildInfo}; -use sp_runtime::{ - generic::BlockId, Justification, Storage, - BuildStorage, -}; +use sp_core::{traits::CodeExecutor, ChangesTrieConfiguration}; +use sp_database::Transaction; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, Zero, One, SaturatedConversion, HashFor, + Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, SaturatedConversion, Zero, }; -use sc_executor::RuntimeInfo; +use sp_runtime::{generic::BlockId, BuildStorage, Justification, Storage}; use sp_state_machine::{ - DBValue, ChangesTrieTransaction, ChangesTrieCacheAction, UsageInfo as StateUsageInfo, - StorageCollection, ChildStorageCollection, - backend::Backend as StateBackend, StateMachineStats, + backend::Backend as StateBackend, ChangesTrieCacheAction, ChangesTrieTransaction, + ChildStorageCollection, DBValue, StateMachineStats, StorageCollection, + UsageInfo as StateUsageInfo, }; -use crate::utils::{DatabaseType, Meta, meta_keys, read_db, read_meta}; -use crate::changes_tries_storage::{DbChangesTrieStorage, DbChangesTrieStorageTransaction}; -use sc_client::leaves::{LeafSet, FinalizationDisplaced}; -use sc_state_db::StateDb; -use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache}; -use crate::storage_cache::{CachingState, SyncingCachingState, SharedCache, new_shared_cache}; -use crate::stats::StateUsageStats; -use log::{trace, debug, warn}; -use prometheus_endpoint::Registry; +use sp_trie::{prefixed_key, MemoryDB, PrefixedMemoryDB}; // Re-export the Database trait so that one can pass an implementation of it. -pub use sp_database::Database; pub use sc_state_db::PruningMode; +pub use sp_database::Database; #[cfg(any(feature = "kvdb-rocksdb", test))] pub use bench::BenchmarkingState; @@ -103,9 +99,8 @@ const MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR: u32 = 32768; const DEFAULT_CHILD_RATIO: (usize, usize) = (1, 10); /// DB-backed patricia trie state, transaction type is an overlay of changes to commit. -pub type DbState = sp_state_machine::TrieBackend< - Arc>>, HashFor ->; +pub type DbState = + sp_state_machine::TrieBackend>>, HashFor>; /// Hash type that this backend uses for the database. pub type DbHash = [u8; 32]; @@ -115,640 +110,680 @@ pub type DbHash = [u8; 32]; /// It makes sure that the hash we are using stays pinned in storage /// until this structure is dropped. pub struct RefTrackingState { - state: DbState, - storage: Arc>, - parent_hash: Option, + state: DbState, + storage: Arc>, + parent_hash: Option, } impl RefTrackingState { - fn new(state: DbState, storage: Arc>, parent_hash: Option) -> Self { - RefTrackingState { - state, - parent_hash, - storage, - } - } + fn new(state: DbState, storage: Arc>, parent_hash: Option) -> Self { + RefTrackingState { + state, + parent_hash, + storage, + } + } } impl Drop for RefTrackingState { - fn drop(&mut self) { - if let Some(hash) = &self.parent_hash { - self.storage.state_db.unpin(hash); - } - } + fn drop(&mut self) { + if let Some(hash) = &self.parent_hash { + self.storage.state_db.unpin(hash); + } + } } impl std::fmt::Debug for RefTrackingState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Block {:?}", self.parent_hash) - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Block {:?}", self.parent_hash) + } } impl StateBackend> for RefTrackingState { - type Error = as StateBackend>>::Error; - type Transaction = as StateBackend>>::Transaction; - type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.storage(key) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.state.storage_hash(key) - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.child_storage(storage_key, child_info, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - self.state.exists_storage(key) - } - - fn exists_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result { - self.state.exists_child_storage(storage_key, child_info, key) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_key_values_with_prefix(prefix, f) - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ) { - self.state.for_keys_in_child_storage(storage_key, child_info, f) - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ) { - self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) - } - - fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) - where - I: IntoIterator, Option>)> - { - self.state.storage_root(delta) - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (B::Hash, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - { - self.state.child_storage_root(storage_key, child_info, delta) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.keys(prefix) - } - - fn child_keys( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - ) -> Vec> { - self.state.child_keys(storage_key, child_info, prefix) - } - - fn as_trie_backend(&mut self) - -> Option<&sp_state_machine::TrieBackend>> - { - self.state.as_trie_backend() - } - - fn register_overlay_stats(&mut self, stats: &StateMachineStats) { - self.state.register_overlay_stats(stats); - } - - fn usage_info(&self) -> StateUsageInfo { - self.state.usage_info() - } + type Error = as StateBackend>>::Error; + type Transaction = as StateBackend>>::Transaction; + type TrieBackendStorage = as StateBackend>>::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.state.storage(key) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.state.storage_hash(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state.child_storage(storage_key, child_info, key) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + self.state.exists_storage(key) + } + + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + self.state + .exists_child_storage(storage_key, child_info, key) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.state.next_storage_key(key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state + .next_child_storage_key(storage_key, child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_key_values_with_prefix(prefix, f) + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.state + .for_keys_in_child_storage(storage_key, child_info, f) + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.state + .for_child_keys_with_prefix(storage_key, child_info, prefix, f) + } + + fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.state.storage_root(delta) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (B::Hash, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.state + .child_storage_root(storage_key, child_info, delta) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.state.pairs() + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.state.keys(prefix) + } + + fn child_keys(&self, storage_key: &[u8], child_info: ChildInfo, prefix: &[u8]) -> Vec> { + self.state.child_keys(storage_key, child_info, prefix) + } + + fn as_trie_backend( + &mut self, + ) -> Option<&sp_state_machine::TrieBackend>> { + self.state.as_trie_backend() + } + + fn register_overlay_stats(&mut self, stats: &StateMachineStats) { + self.state.register_overlay_stats(stats); + } + + fn usage_info(&self) -> StateUsageInfo { + self.state.usage_info() + } } /// Database settings. pub struct DatabaseSettings { - /// State cache size. - pub state_cache_size: usize, - /// Ratio of cache size dedicated to child tries. - pub state_cache_child_ratio: Option<(usize, usize)>, - /// Pruning mode. - pub pruning: PruningMode, - /// Where to find the database. - pub source: DatabaseSettingsSrc, + /// State cache size. + pub state_cache_size: usize, + /// Ratio of cache size dedicated to child tries. + pub state_cache_child_ratio: Option<(usize, usize)>, + /// Pruning mode. + pub pruning: PruningMode, + /// Where to find the database. + pub source: DatabaseSettingsSrc, } /// Where to find the database.. #[derive(Clone)] pub enum DatabaseSettingsSrc { - /// Load a RocksDB database from a given path. Recommended for most uses. - RocksDb { - /// Path to the database. - path: PathBuf, - /// Cache size in MiB. - cache_size: usize, - }, - - /// Load a ParityDb database from a given path. - ParityDb { - /// Path to the database. - path: PathBuf, - }, - - /// Load a Subdb database from a given path. - SubDb { - /// Path to the database. - path: PathBuf, - }, - - /// Use a custom already-open database. - Custom(Arc>), + /// Load a RocksDB database from a given path. Recommended for most uses. + RocksDb { + /// Path to the database. + path: PathBuf, + /// Cache size in MiB. + cache_size: usize, + }, + + /// Load a ParityDb database from a given path. + ParityDb { + /// Path to the database. + path: PathBuf, + }, + + /// Load a Subdb database from a given path. + SubDb { + /// Path to the database. + path: PathBuf, + }, + + /// Use a custom already-open database. + Custom(Arc>), } impl DatabaseSettingsSrc { - /// Return dabase path for databases that are on the disk. - pub fn path(&self) -> Option<&Path> { - match self { - DatabaseSettingsSrc::RocksDb { path, .. } => Some(path.as_path()), - DatabaseSettingsSrc::ParityDb { path, .. } => Some(path.as_path()), - DatabaseSettingsSrc::SubDb { path, .. } => Some(path.as_path()), - DatabaseSettingsSrc::Custom(_) => None, - } - } + /// Return dabase path for databases that are on the disk. + pub fn path(&self) -> Option<&Path> { + match self { + DatabaseSettingsSrc::RocksDb { path, .. } => Some(path.as_path()), + DatabaseSettingsSrc::ParityDb { path, .. } => Some(path.as_path()), + DatabaseSettingsSrc::SubDb { path, .. } => Some(path.as_path()), + DatabaseSettingsSrc::Custom(_) => None, + } + } } /// Create an instance of db-backed client. pub fn new_client( - settings: DatabaseSettings, - executor: E, - genesis_storage: &dyn BuildStorage, - fork_blocks: ForkBlocks, - bad_blocks: BadBlocks, - execution_extensions: ExecutionExtensions, - spawn_handle: Box, - prometheus_registry: Option, -) -> Result<( - sc_client::Client< - Backend, - sc_client::LocalCallExecutor, E>, - Block, - RA, - >, - Arc>, - ), - sp_blockchain::Error, + settings: DatabaseSettings, + executor: E, + genesis_storage: &dyn BuildStorage, + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, + execution_extensions: ExecutionExtensions, + spawn_handle: Box, + prometheus_registry: Option, +) -> Result< + ( + sc_client::Client< + Backend, + sc_client::LocalCallExecutor, E>, + Block, + RA, + >, + Arc>, + ), + sp_blockchain::Error, > - where - Block: BlockT, - E: CodeExecutor + RuntimeInfo, +where + Block: BlockT, + E: CodeExecutor + RuntimeInfo, { - let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); - let executor = sc_client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle); - Ok(( - sc_client::Client::new( - backend.clone(), - executor, - genesis_storage, - fork_blocks, - bad_blocks, - execution_extensions, - prometheus_registry, - )?, - backend, - )) + let backend = Arc::new(Backend::new(settings, CANONICALIZATION_DELAY)?); + let executor = sc_client::LocalCallExecutor::new(backend.clone(), executor, spawn_handle); + Ok(( + sc_client::Client::new( + backend.clone(), + executor, + genesis_storage, + fork_blocks, + bad_blocks, + execution_extensions, + prometheus_registry, + )?, + backend, + )) } pub(crate) mod columns { - pub const META: u32 = crate::utils::COLUMN_META; - pub const STATE: u32 = 1; - pub const STATE_META: u32 = 2; - /// maps hashes to lookup keys and numbers to canon hashes. - pub const KEY_LOOKUP: u32 = 3; - pub const HEADER: u32 = 4; - pub const BODY: u32 = 5; - pub const JUSTIFICATION: u32 = 6; - pub const CHANGES_TRIE: u32 = 7; - pub const AUX: u32 = 8; - /// Offchain workers local storage - pub const OFFCHAIN: u32 = 9; - pub const CACHE: u32 = 10; + pub const META: u32 = crate::utils::COLUMN_META; + pub const STATE: u32 = 1; + pub const STATE_META: u32 = 2; + /// maps hashes to lookup keys and numbers to canon hashes. + pub const KEY_LOOKUP: u32 = 3; + pub const HEADER: u32 = 4; + pub const BODY: u32 = 5; + pub const JUSTIFICATION: u32 = 6; + pub const CHANGES_TRIE: u32 = 7; + pub const AUX: u32 = 8; + /// Offchain workers local storage + pub const OFFCHAIN: u32 = 9; + pub const CACHE: u32 = 10; } struct PendingBlock { - header: Block::Header, - justification: Option, - body: Option>, - leaf_state: NewBlockState, + header: Block::Header, + justification: Option, + body: Option>, + leaf_state: NewBlockState, } // wrapper that implements trait required for state_db struct StateMetaDb<'a>(&'a dyn Database); impl<'a> sc_state_db::MetaDb for StateMetaDb<'a> { - type Error = io::Error; + type Error = io::Error; - fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { - Ok(self.0.get(columns::STATE_META, key)) - } + fn get_meta(&self, key: &[u8]) -> Result>, Self::Error> { + Ok(self.0.get(columns::STATE_META, key)) + } } /// Block database pub struct BlockchainDb { - db: Arc>, - meta: Arc, Block::Hash>>>, - leaves: RwLock>>, - header_metadata_cache: HeaderMetadataCache, + db: Arc>, + meta: Arc, Block::Hash>>>, + leaves: RwLock>>, + header_metadata_cache: HeaderMetadataCache, } impl BlockchainDb { - fn new(db: Arc>) -> ClientResult { - let meta = read_meta::(&*db, columns::HEADER)?; - let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; - Ok(BlockchainDb { - db, - leaves: RwLock::new(leaves), - meta: Arc::new(RwLock::new(meta)), - header_metadata_cache: HeaderMetadataCache::default(), - }) - } - - fn update_meta( - &self, - hash: Block::Hash, - number: ::Number, - is_best: bool, - is_finalized: bool - ) { - let mut meta = self.meta.write(); - if number.is_zero() { - meta.genesis_hash = hash; - meta.finalized_hash = hash; - } - - if is_best { - meta.best_number = number; - meta.best_hash = hash; - } - - if is_finalized { - meta.finalized_number = number; - meta.finalized_hash = hash; - } - } + fn new(db: Arc>) -> ClientResult { + let meta = read_meta::(&*db, columns::HEADER)?; + let leaves = LeafSet::read_from_db(&*db, columns::META, meta_keys::LEAF_PREFIX)?; + Ok(BlockchainDb { + db, + leaves: RwLock::new(leaves), + meta: Arc::new(RwLock::new(meta)), + header_metadata_cache: HeaderMetadataCache::default(), + }) + } + + fn update_meta( + &self, + hash: Block::Hash, + number: ::Number, + is_best: bool, + is_finalized: bool, + ) { + let mut meta = self.meta.write(); + if number.is_zero() { + meta.genesis_hash = hash; + meta.finalized_hash = hash; + } + + if is_best { + meta.best_number = number; + meta.best_hash = hash; + } + + if is_finalized { + meta.finalized_number = number; + meta.finalized_hash = hash; + } + } } impl sc_client::blockchain::HeaderBackend for BlockchainDb { - fn header(&self, id: BlockId) -> ClientResult> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) - } - - fn info(&self) -> sc_client::blockchain::Info { - let meta = self.meta.read(); - sc_client::blockchain::Info { - best_hash: meta.best_hash, - best_number: meta.best_number, - genesis_hash: meta.genesis_hash, - finalized_hash: meta.finalized_hash, - finalized_number: meta.finalized_number, - number_leaves: self.leaves.read().count(), - } - } - - fn status(&self, id: BlockId) -> ClientResult { - let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), - BlockId::Number(n) => n <= self.meta.read().best_number, - }; - match exists { - true => Ok(sc_client::blockchain::BlockStatus::InChain), - false => Ok(sc_client::blockchain::BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - Ok(self.header_metadata(hash).ok().map(|header_metadata| header_metadata.number)) - } - - fn hash(&self, number: NumberFor) -> ClientResult> { - self.header(BlockId::Number(number)).and_then(|maybe_header| match maybe_header { - Some(header) => Ok(Some(header.hash().clone())), - None => Ok(None), - }) - } + fn header(&self, id: BlockId) -> ClientResult> { + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + } + + fn info(&self) -> sc_client::blockchain::Info { + let meta = self.meta.read(); + sc_client::blockchain::Info { + best_hash: meta.best_hash, + best_number: meta.best_number, + genesis_hash: meta.genesis_hash, + finalized_hash: meta.finalized_hash, + finalized_number: meta.finalized_number, + number_leaves: self.leaves.read().count(), + } + } + + fn status(&self, id: BlockId) -> ClientResult { + let exists = match id { + BlockId::Hash(_) => { + read_db(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?.is_some() + } + BlockId::Number(n) => n <= self.meta.read().best_number, + }; + match exists { + true => Ok(sc_client::blockchain::BlockStatus::InChain), + false => Ok(sc_client::blockchain::BlockStatus::Unknown), + } + } + + fn number(&self, hash: Block::Hash) -> ClientResult>> { + Ok(self + .header_metadata(hash) + .ok() + .map(|header_metadata| header_metadata.number)) + } + + fn hash(&self, number: NumberFor) -> ClientResult> { + self.header(BlockId::Number(number)) + .and_then(|maybe_header| match maybe_header { + Some(header) => Ok(Some(header.hash().clone())), + None => Ok(None), + }) + } } impl sc_client::blockchain::Backend for BlockchainDb { - fn body(&self, id: BlockId) -> ClientResult>> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { - Some(body) => match Decode::decode(&mut &body[..]) { - Ok(body) => Ok(Some(body)), - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding body: {}", err) - )), - } - None => Ok(None), - } - } - - fn justification(&self, id: BlockId) -> ClientResult> { - match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATION, id)? { - Some(justification) => match Decode::decode(&mut &justification[..]) { - Ok(justification) => Ok(Some(justification)), - Err(err) => return Err(sp_blockchain::Error::Backend( - format!("Error decoding justification: {}", err) - )), - } - None => Ok(None), - } - } - - fn last_finalized(&self) -> ClientResult { - Ok(self.meta.read().finalized_hash.clone()) - } - - fn cache(&self) -> Option>> { - None - } - - fn leaves(&self) -> ClientResult> { - Ok(self.leaves.read().hashes()) - } - - fn children(&self, parent_hash: Block::Hash) -> ClientResult> { - children::read_children(&*self.db, columns::META, meta_keys::CHILDREN_PREFIX, parent_hash) - } + fn body(&self, id: BlockId) -> ClientResult>> { + match read_db(&*self.db, columns::KEY_LOOKUP, columns::BODY, id)? { + Some(body) => match Decode::decode(&mut &body[..]) { + Ok(body) => Ok(Some(body)), + Err(err) => { + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding body: {}", + err + ))) + } + }, + None => Ok(None), + } + } + + fn justification(&self, id: BlockId) -> ClientResult> { + match read_db(&*self.db, columns::KEY_LOOKUP, columns::JUSTIFICATION, id)? { + Some(justification) => match Decode::decode(&mut &justification[..]) { + Ok(justification) => Ok(Some(justification)), + Err(err) => { + return Err(sp_blockchain::Error::Backend(format!( + "Error decoding justification: {}", + err + ))) + } + }, + None => Ok(None), + } + } + + fn last_finalized(&self) -> ClientResult { + Ok(self.meta.read().finalized_hash.clone()) + } + + fn cache(&self) -> Option>> { + None + } + + fn leaves(&self) -> ClientResult> { + Ok(self.leaves.read().hashes()) + } + + fn children(&self, parent_hash: Block::Hash) -> ClientResult> { + children::read_children( + &*self.db, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + ) + } } impl sc_client::blockchain::ProvideCache for BlockchainDb { - fn cache(&self) -> Option>> { - None - } + fn cache(&self) -> Option>> { + None + } } impl HeaderMetadata for BlockchainDb { - type Error = sp_blockchain::Error; - - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).or_else(|_| { - self.header(BlockId::hash(hash))?.map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header_metadata.hash, - header_metadata.clone(), - ); - header_metadata - }).ok_or(ClientError::UnknownBlock(format!("header not found in db: {}", hash))) - }) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.header_metadata_cache.insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.header_metadata_cache.remove_header_metadata(hash); - } + type Error = sp_blockchain::Error; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header_metadata_cache + .header_metadata(hash) + .or_else(|_| { + self.header(BlockId::hash(hash))? + .map(|header| { + let header_metadata = CachedHeaderMetadata::from(&header); + self.header_metadata_cache + .insert_header_metadata(header_metadata.hash, header_metadata.clone()); + header_metadata + }) + .ok_or(ClientError::UnknownBlock(format!( + "header not found in db: {}", + hash + ))) + }) + } + + fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { + self.header_metadata_cache + .insert_header_metadata(hash, metadata) + } + + fn remove_header_metadata(&self, hash: Block::Hash) { + self.header_metadata_cache.remove_header_metadata(hash); + } } /// Database transaction pub struct BlockImportOperation { - old_state: SyncingCachingState, Block>, - db_updates: PrefixedMemoryDB>, - storage_updates: StorageCollection, - child_storage_updates: ChildStorageCollection, - changes_trie_updates: MemoryDB>, - changes_trie_build_cache_update: Option>>, - changes_trie_config_update: Option>, - pending_block: Option>, - aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(BlockId, Option)>, - set_head: Option>, - commit_state: bool, + old_state: SyncingCachingState, Block>, + db_updates: PrefixedMemoryDB>, + storage_updates: StorageCollection, + child_storage_updates: ChildStorageCollection, + changes_trie_updates: MemoryDB>, + changes_trie_build_cache_update: Option>>, + changes_trie_config_update: Option>, + pending_block: Option>, + aux_ops: Vec<(Vec, Option>)>, + finalized_blocks: Vec<(BlockId, Option)>, + set_head: Option>, + commit_state: bool, } impl BlockImportOperation { - fn apply_aux(&mut self, transaction: &mut Transaction) { - for (key, maybe_val) in self.aux_ops.drain(..) { - match maybe_val { - Some(val) => transaction.set_from_vec(columns::AUX, &key, val), - None => transaction.remove(columns::AUX, &key), - } - } - } + fn apply_aux(&mut self, transaction: &mut Transaction) { + for (key, maybe_val) in self.aux_ops.drain(..) { + match maybe_val { + Some(val) => transaction.set_from_vec(columns::AUX, &key, val), + None => transaction.remove(columns::AUX, &key), + } + } + } } -impl sc_client_api::backend::BlockImportOperation for BlockImportOperation { - type State = SyncingCachingState, Block>; - - fn state(&self) -> ClientResult> { - Ok(Some(&self.old_state)) - } - - fn set_block_data( - &mut self, - header: Block::Header, - body: Option>, - justification: Option, - leaf_state: NewBlockState, - ) -> ClientResult<()> { - assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - if let Some(changes_trie_config_update) = changes_tries_storage::extract_new_configuration(&header) { - self.changes_trie_config_update = Some(changes_trie_config_update.clone()); - } - self.pending_block = Some(PendingBlock { - header, - body, - justification, - leaf_state, - }); - Ok(()) - } - - fn update_cache(&mut self, _cache: HashMap>) { - // Currently cache isn't implemented on full nodes. - } - - fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { - self.db_updates = update; - Ok(()) - } - - fn reset_storage( - &mut self, - storage: Storage, - ) -> ClientResult { - - if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - - for child_key in storage.children.keys() { - if !well_known_keys::is_child_storage_key(&child_key) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - } - - let child_delta = storage.children.into_iter().map(|(storage_key, child_content)| ( - storage_key, - child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info), - ); - - let mut changes_trie_config: Option = None; - let (root, transaction) = self.old_state.full_storage_root( - storage.top.into_iter().map(|(k, v)| { - if k == well_known_keys::CHANGES_TRIE_CONFIG { - changes_trie_config = Some( - Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis") - ); - } - (k, Some(v)) - }), - child_delta - ); - - self.db_updates = transaction; - self.changes_trie_config_update = Some(changes_trie_config); - self.commit_state = true; - Ok(root) - } - - fn update_changes_trie( - &mut self, - update: ChangesTrieTransaction, NumberFor>, - ) -> ClientResult<()> { - self.changes_trie_updates = update.0; - self.changes_trie_build_cache_update = Some(update.1); - Ok(()) - } - - fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where I: IntoIterator, Option>)> - { - self.aux_ops.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage( - &mut self, - update: StorageCollection, - child_update: ChildStorageCollection, - ) -> ClientResult<()> { - self.storage_updates = update; - self.child_storage_updates = child_update; - Ok(()) - } - - fn mark_finalized( - &mut self, - block: BlockId, - justification: Option, - ) -> ClientResult<()> { - self.finalized_blocks.push((block, justification)); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { - assert!(self.set_head.is_none(), "Only one set head per operation is allowed"); - self.set_head = Some(block); - Ok(()) - } +impl sc_client_api::backend::BlockImportOperation + for BlockImportOperation +{ + type State = SyncingCachingState, Block>; + + fn state(&self) -> ClientResult> { + Ok(Some(&self.old_state)) + } + + fn set_block_data( + &mut self, + header: Block::Header, + body: Option>, + justification: Option, + leaf_state: NewBlockState, + ) -> ClientResult<()> { + assert!( + self.pending_block.is_none(), + "Only one block per operation is allowed" + ); + if let Some(changes_trie_config_update) = + changes_tries_storage::extract_new_configuration(&header) + { + self.changes_trie_config_update = Some(changes_trie_config_update.clone()); + } + self.pending_block = Some(PendingBlock { + header, + body, + justification, + leaf_state, + }); + Ok(()) + } + + fn update_cache(&mut self, _cache: HashMap>) { + // Currently cache isn't implemented on full nodes. + } + + fn update_db_storage(&mut self, update: PrefixedMemoryDB>) -> ClientResult<()> { + self.db_updates = update; + Ok(()) + } + + fn reset_storage(&mut self, storage: Storage) -> ClientResult { + if storage + .top + .iter() + .any(|(k, _)| well_known_keys::is_child_storage_key(k)) + { + return Err(sp_blockchain::Error::GenesisInvalid.into()); + } + + for child_key in storage.children.keys() { + if !well_known_keys::is_child_storage_key(&child_key) { + return Err(sp_blockchain::Error::GenesisInvalid.into()); + } + } + + let child_delta = storage + .children + .into_iter() + .map(|(storage_key, child_content)| { + ( + storage_key, + child_content.data.into_iter().map(|(k, v)| (k, Some(v))), + child_content.child_info, + ) + }); + + let mut changes_trie_config: Option = None; + let (root, transaction) = self.old_state.full_storage_root( + storage.top.into_iter().map(|(k, v)| { + if k == well_known_keys::CHANGES_TRIE_CONFIG { + changes_trie_config = Some( + Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis"), + ); + } + (k, Some(v)) + }), + child_delta, + ); + + self.db_updates = transaction; + self.changes_trie_config_update = Some(changes_trie_config); + self.commit_state = true; + Ok(root) + } + + fn update_changes_trie( + &mut self, + update: ChangesTrieTransaction, NumberFor>, + ) -> ClientResult<()> { + self.changes_trie_updates = update.0; + self.changes_trie_build_cache_update = Some(update.1); + Ok(()) + } + + fn insert_aux(&mut self, ops: I) -> ClientResult<()> + where + I: IntoIterator, Option>)>, + { + self.aux_ops.append(&mut ops.into_iter().collect()); + Ok(()) + } + + fn update_storage( + &mut self, + update: StorageCollection, + child_update: ChildStorageCollection, + ) -> ClientResult<()> { + self.storage_updates = update; + self.child_storage_updates = child_update; + Ok(()) + } + + fn mark_finalized( + &mut self, + block: BlockId, + justification: Option, + ) -> ClientResult<()> { + self.finalized_blocks.push((block, justification)); + Ok(()) + } + + fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { + assert!( + self.set_head.is_none(), + "Only one set head per operation is allowed" + ); + self.set_head = Some(block); + Ok(()) + } } struct StorageDb { - pub db: Arc>, - pub state_db: StateDb>, + pub db: Arc>, + pub state_db: StateDb>, } impl sp_state_machine::Storage> for StorageDb { - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - let key = prefixed_key::>(key, prefix); - self.state_db.get(&key, self) - .map_err(|e| format!("Database backend error: {:?}", e)) - } + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + let key = prefixed_key::>(key, prefix); + self.state_db + .get(&key, self) + .map_err(|e| format!("Database backend error: {:?}", e)) + } } impl sc_state_db::NodeDb for StorageDb { - type Error = io::Error; - type Key = [u8]; + type Error = io::Error; + type Key = [u8]; - fn get(&self, key: &[u8]) -> Result>, Self::Error> { - Ok(self.db.get(columns::STATE, key)) - } + fn get(&self, key: &[u8]) -> Result>, Self::Error> { + Ok(self.db.get(columns::STATE, key)) + } } struct DbGenesisStorage(pub Block::Hash); impl DbGenesisStorage { - pub fn new() -> Self { - let mut root = Block::Hash::default(); - let mut mdb = MemoryDB::>::default(); - sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); - DbGenesisStorage(root) - } + pub fn new() -> Self { + let mut root = Block::Hash::default(); + let mut mdb = MemoryDB::>::default(); + sp_state_machine::TrieDBMut::>::new(&mut mdb, &mut root); + DbGenesisStorage(root) + } } impl sp_state_machine::Storage> for DbGenesisStorage { - fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { - Ok(None) - } + fn get(&self, _key: &Block::Hash, _prefix: Prefix) -> Result, String> { + Ok(None) + } } /// Frozen `value` at time `at`. /// /// Used as inner structure under lock in `FrozenForDuration`. struct Frozen { - at: std::time::Instant, - value: Option, + at: std::time::Instant, + value: Option, } /// Some value frozen for period of time. @@ -757,29 +792,39 @@ struct Frozen { /// current frozen value is returned. Otherwise, you have to provide /// a new value which will be again frozen for `duration`. pub(crate) struct FrozenForDuration { - duration: std::time::Duration, - value: parking_lot::Mutex>, + duration: std::time::Duration, + value: parking_lot::Mutex>, } impl FrozenForDuration { - fn new(duration: std::time::Duration) -> Self { - Self { - duration, - value: Frozen { at: std::time::Instant::now(), value: None }.into(), - } - } - - fn take_or_else(&self, f: F) -> T where F: FnOnce() -> T { - let mut lock = self.value.lock(); - if lock.at.elapsed() > self.duration || lock.value.is_none() { - let new_value = f(); - lock.at = std::time::Instant::now(); - lock.value = Some(new_value.clone()); - new_value - } else { - lock.value.as_ref().expect("checked with lock above").clone() - } - } + fn new(duration: std::time::Duration) -> Self { + Self { + duration, + value: Frozen { + at: std::time::Instant::now(), + value: None, + } + .into(), + } + } + + fn take_or_else(&self, f: F) -> T + where + F: FnOnce() -> T, + { + let mut lock = self.value.lock(); + if lock.at.elapsed() > self.duration || lock.value.is_none() { + let new_value = f(); + lock.at = std::time::Instant::now(); + lock.value = Some(new_value.clone()); + new_value + } else { + lock.value + .as_ref() + .expect("checked with lock above") + .clone() + } + } } /// Disk backend. @@ -787,1513 +832,1646 @@ impl FrozenForDuration { /// Disk backend keeps data in a key-value store. In archive mode, trie nodes are kept from all blocks. /// Otherwise, trie nodes are kept only from some recent blocks. pub struct Backend { - storage: Arc>, - offchain_storage: offchain::LocalStorage, - changes_tries_storage: DbChangesTrieStorage, - blockchain: BlockchainDb, - canonicalization_delay: u64, - shared_cache: SharedCache, - import_lock: Arc>, - is_archive: bool, - io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, - state_usage: Arc, + storage: Arc>, + offchain_storage: offchain::LocalStorage, + changes_tries_storage: DbChangesTrieStorage, + blockchain: BlockchainDb, + canonicalization_delay: u64, + shared_cache: SharedCache, + import_lock: Arc>, + is_archive: bool, + io_stats: FrozenForDuration<(kvdb::IoStats, StateUsageInfo)>, + state_usage: Arc, } impl Backend { - /// Create a new instance of database backend. - /// - /// The pruning window is how old a block must be before the state is pruned. - pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { - let db = crate::utils::open_database::(&config, DatabaseType::Full)?; - Self::from_database(db as Arc<_>, canonicalization_delay, &config) - } - - /// Create new memory-backed client backend for tests. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { - let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); - let db = sp_database::as_database(db); - let db_setting = DatabaseSettings { - state_cache_size: 16777216, - state_cache_child_ratio: Some((50, 100)), - pruning: PruningMode::keep_blocks(keep_blocks), - source: DatabaseSettingsSrc::Custom(db), - }; - - Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") - } - - fn from_database( - db: Arc>, - canonicalization_delay: u64, - config: &DatabaseSettings, - ) -> ClientResult { - let is_archive_pruning = config.pruning.is_archive(); - let blockchain = BlockchainDb::new(db.clone())?; - let meta = blockchain.meta.clone(); - let map_e = |e: sc_state_db::Error| sp_blockchain::Error::from( - format!("State database error: {:?}", e) - ); - let state_db: StateDb<_, _> = StateDb::new(config.pruning.clone(), &StateMetaDb(&*db)) - .map_err(map_e)?; - let storage_db = StorageDb { - db: db.clone(), - state_db, - }; - let offchain_storage = offchain::LocalStorage::new(db.clone()); - let changes_tries_storage = DbChangesTrieStorage::new( - db, - columns::META, - columns::CHANGES_TRIE, - columns::KEY_LOOKUP, - columns::HEADER, - columns::CACHE, - meta, - if is_archive_pruning { - None - } else { - Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) - }, - )?; - - Ok(Backend { - storage: Arc::new(storage_db), - offchain_storage, - changes_tries_storage, - blockchain, - canonicalization_delay, - shared_cache: new_shared_cache( - config.state_cache_size, - config.state_cache_child_ratio.unwrap_or(DEFAULT_CHILD_RATIO), - ), - import_lock: Default::default(), - is_archive: is_archive_pruning, - io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), - state_usage: Arc::new(StateUsageStats::new()), - }) - } - - /// Handle setting head within a transaction. `route_to` should be the last - /// block that existed in the database. `best_to` should be the best block - /// to be set. - /// - /// In the case where the new best block is a block to be imported, `route_to` - /// should be the parent of `best_to`. In the case where we set an existing block - /// to be best, `route_to` should equal to `best_to`. - fn set_head_with_transaction( - &self, - transaction: &mut Transaction, - route_to: Block::Hash, - best_to: (NumberFor, Block::Hash), - ) -> ClientResult<(Vec, Vec)> { - let mut enacted = Vec::default(); - let mut retracted = Vec::default(); - - let meta = self.blockchain.meta.read(); - - // cannot find tree route with empty DB. - if meta.best_hash != Default::default() { - let tree_route = sp_blockchain::tree_route( - &self.blockchain, - meta.best_hash, - route_to, - )?; - - // uncanonicalize: check safety violations and ensure the numbers no longer - // point to these block hashes in the key mapping. - for r in tree_route.retracted() { - if r.hash == meta.finalized_hash { - warn!( - "Potential safety failure: reverting finalized block {:?}", - (&r.number, &r.hash) - ); - - return Err(::sp_blockchain::Error::NotInFinalizedChain.into()); - } - - retracted.push(r.hash.clone()); - utils::remove_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - r.number - )?; - } - - // canonicalize: set the number lookup to map to this block's hash. - for e in tree_route.enacted() { - enacted.push(e.hash.clone()); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - e.number, - e.hash - )?; - } - } - - let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?; - transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - best_to.0, - best_to.1, - )?; - - Ok((enacted, retracted)) - } - - fn ensure_sequential_finalization( - &self, - header: &Block::Header, - last_finalized: Option, - ) -> ClientResult<()> { - let last_finalized = last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); - if *header.parent_hash() != last_finalized { - return Err(::sp_blockchain::Error::NonSequentialFinalization( - format!("Last finalized {:?} not parent of {:?}", last_finalized, header.hash()), - ).into()); - } - Ok(()) - } - - fn finalize_block_with_transaction( - &self, - transaction: &mut Transaction, - hash: &Block::Hash, - header: &Block::Header, - last_finalized: Option, - justification: Option, - changes_trie_cache_ops: &mut Option>, - finalization_displaced: &mut Option>>, - ) -> ClientResult<(Block::Hash, ::Number, bool, bool)> { - // TODO: ensure best chain contains this block. - let number = *header.number(); - self.ensure_sequential_finalization(header, last_finalized)?; - self.note_finalized( - transaction, - false, - header, - *hash, - changes_trie_cache_ops, - finalization_displaced, - )?; - - if let Some(justification) = justification { - transaction.set_from_vec( - columns::JUSTIFICATION, - &utils::number_and_hash_to_lookup_key(number, hash)?, - justification.encode(), - ); - } - Ok((*hash, number, false, true)) - } - - // performs forced canonicalization with a delay after importing a non-finalized block. - fn force_delayed_canonicalize( - &self, - transaction: &mut Transaction, - hash: Block::Hash, - number: NumberFor, - ) - -> ClientResult<()> - { - let number_u64 = number.saturated_into::(); - if number_u64 > self.canonicalization_delay { - let new_canonical = number_u64 - self.canonicalization_delay; - - if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { - return Ok(()) - } - - let hash = if new_canonical == number_u64 { - hash - } else { - ::sc_client::blockchain::HeaderBackend::hash(&self.blockchain, new_canonical.saturated_into())? - .expect("existence of block with number `new_canonical` \ - implies existence of blocks with all numbers before it; qed") - }; - - trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); - let commit = self.storage.state_db.canonicalize_block(&hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; - apply_state_commit(transaction, commit); - }; - - Ok(()) - } - - fn try_commit_operation(&self, mut operation: BlockImportOperation) - -> ClientResult<()> - { - let mut transaction = Transaction::new(); - let mut finalization_displaced_leaves = None; - - operation.apply_aux(&mut transaction); - - let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len()); - let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; - - let mut changes_trie_cache_ops = None; - for (block, justification) in operation.finalized_blocks { - let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; - let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; - - meta_updates.push(self.finalize_block_with_transaction( - &mut transaction, - &block_hash, - &block_header, - Some(last_finalized_hash), - justification, - &mut changes_trie_cache_ops, - &mut finalization_displaced_leaves, - )?); - last_finalized_hash = block_hash; - } - - let imported = if let Some(pending_block) = operation.pending_block { - let hash = pending_block.header.hash(); - let parent_hash = *pending_block.header.parent_hash(); - let number = pending_block.header.number().clone(); - - // blocks are keyed by number + hash. - let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; - - let (enacted, retracted) = if pending_block.leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))? - } else { - (Default::default(), Default::default()) - }; - - utils::insert_hash_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; - - let header_metadata = CachedHeaderMetadata::from(&pending_block.header); - self.blockchain.insert_header_metadata( - header_metadata.hash, - header_metadata, - ); - - transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); - if let Some(body) = &pending_block.body { - transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); - } - if let Some(justification) = pending_block.justification { - transaction.set_from_vec(columns::JUSTIFICATION, &lookup_key, justification.encode()); - } - - if number.is_zero() { - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - - // for tests, because config is set from within the reset_storage - if operation.changes_trie_config_update.is_none() { - operation.changes_trie_config_update = Some(None); - } - } - - let finalized = if operation.commit_state { - let mut changeset: sc_state_db::ChangeSet> = sc_state_db::ChangeSet::default(); - let mut ops: u64 = 0; - let mut bytes: u64 = 0; - let mut removal: u64 = 0; - let mut bytes_removal: u64 = 0; - for (key, (val, rc)) in operation.db_updates.drain() { - if rc > 0 { - ops += 1; - bytes += key.len() as u64 + val.len() as u64; - - changeset.inserted.push((key, val.to_vec())); - } else if rc < 0 { - removal += 1; - bytes_removal += key.len() as u64; - - changeset.deleted.push(key); - } - } - self.state_usage.tally_writes_nodes(ops, bytes); - self.state_usage.tally_removed_nodes(removal, bytes_removal); - - let mut ops: u64 = 0; - let mut bytes: u64 = 0; - for (key, value) in operation.storage_updates.iter() - .chain(operation.child_storage_updates.iter().flat_map(|(_, s)| s.iter())) { - ops += 1; - bytes += key.len() as u64; - if let Some(v) = value.as_ref() { - bytes += v.len() as u64; - } - } - self.state_usage.tally_writes(ops, bytes); - let number_u64 = number.saturated_into::(); - let commit = self.storage.state_db.insert_block( - &hash, - number_u64, - &pending_block.header.parent_hash(), - changeset, - ).map_err(|e: sc_state_db::Error| - sp_blockchain::Error::from(format!("State database error: {:?}", e)) - )?; - apply_state_commit(&mut transaction, commit); - - // Check if need to finalize. Genesis is always finalized instantly. - let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); - finalized - } else { - false - }; - - let header = &pending_block.header; - let is_best = pending_block.leaf_state.is_best(); - let changes_trie_updates = operation.changes_trie_updates; - let changes_trie_config_update = operation.changes_trie_config_update; - changes_trie_cache_ops = Some(self.changes_tries_storage.commit( - &mut transaction, - changes_trie_updates, - cache::ComplexBlockId::new( - *header.parent_hash(), - if number.is_zero() { Zero::zero() } else { number - One::one() }, - ), - cache::ComplexBlockId::new(hash, number), - header, - finalized, - changes_trie_config_update, - changes_trie_cache_ops, - )?); - self.state_usage.merge_sm(operation.old_state.usage_info()); - // release state reference so that it can be finalized - let cache = operation.old_state.into_cache_changes(); - - if finalized { - // TODO: ensure best chain contains this block. - self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; - self.note_finalized( - &mut transaction, - true, - header, - hash, - &mut changes_trie_cache_ops, - &mut finalization_displaced_leaves, - )?; - } else { - // canonicalize blocks which are old enough, regardless of finality. - self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? - } - - debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, is_best); - - let displaced_leaf = { - let mut leaves = self.blockchain.leaves.write(); - let displaced_leaf = leaves.import(hash, number, parent_hash); - leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - - displaced_leaf - }; - - let mut children = children::read_children( - &*self.storage.db, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - )?; - children.push(hash); - children::write_children( - &mut transaction, - columns::META, - meta_keys::CHILDREN_PREFIX, - parent_hash, - children, - ); - - meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); - - Some((number, hash, enacted, retracted, displaced_leaf, is_best, cache)) - } else { - None - }; - - let cache_update = if let Some(set_head) = operation.set_head { - if let Some(header) = sc_client::blockchain::HeaderBackend::header(&self.blockchain, set_head)? { - let number = header.number(); - let hash = header.hash(); - - let (enacted, retracted) = self.set_head_with_transaction( - &mut transaction, - hash.clone(), - (number.clone(), hash.clone()) - )?; - meta_updates.push((hash, *number, true, false)); - Some((enacted, retracted)) - } else { - return Err(sp_blockchain::Error::UnknownBlock(format!("Cannot set head {:?}", set_head))) - } - } else { - None - }; - - self.storage.db.commit(transaction); - - if let Some(( - number, - hash, - enacted, - retracted, - _displaced_leaf, - is_best, - mut cache, - )) = imported { - cache.sync_cache( - &enacted, - &retracted, - operation.storage_updates, - operation.child_storage_updates, - Some(hash), - Some(number), - is_best, - ); - } - - if let Some(changes_trie_build_cache_update) = operation.changes_trie_build_cache_update { - self.changes_tries_storage.commit_build_cache(changes_trie_build_cache_update); - } - self.changes_tries_storage.post_commit(changes_trie_cache_ops); - - if let Some((enacted, retracted)) = cache_update { - self.shared_cache.lock().sync(&enacted, &retracted); - } - - for (hash, number, is_best, is_finalized) in meta_updates { - self.blockchain.update_meta(hash, number, is_best, is_finalized); - } - - Ok(()) - } - - // write stuff to a transaction after a new block is finalized. - // this canonicalizes finalized blocks. Fails if called with a block which - // was not a child of the last finalized block. - fn note_finalized( - &self, - transaction: &mut Transaction, - is_inserted: bool, - f_header: &Block::Header, - f_hash: Block::Hash, - changes_trie_cache_ops: &mut Option>, - displaced: &mut Option>> - ) -> ClientResult<()> { - let f_num = f_header.number().clone(); - - if self.storage.state_db.best_canonical().map(|c| f_num.saturated_into::() > c).unwrap_or(true) { - let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - - let commit = self.storage.state_db.canonicalize_block(&f_hash) - .map_err(|e: sc_state_db::Error| sp_blockchain::Error::from(format!("State database error: {:?}", e)))?; - apply_state_commit(transaction, commit); - - if !f_num.is_zero() { - let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( - transaction, - *f_header.parent_hash(), - f_hash, - f_num, - if is_inserted { Some(&f_header) } else { None }, - changes_trie_cache_ops.take(), - )?; - *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); - } - } - - let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); - match displaced { - x @ &mut None => *x = Some(new_displaced), - &mut Some(ref mut displaced) => displaced.merge(new_displaced), - } - - Ok(()) - } + /// Create a new instance of database backend. + /// + /// The pruning window is how old a block must be before the state is pruned. + pub fn new(config: DatabaseSettings, canonicalization_delay: u64) -> ClientResult { + let db = crate::utils::open_database::(&config, DatabaseType::Full)?; + Self::from_database(db as Arc<_>, canonicalization_delay, &config) + } + + /// Create new memory-backed client backend for tests. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test(keep_blocks: u32, canonicalization_delay: u64) -> Self { + let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); + let db = sp_database::as_database(db); + let db_setting = DatabaseSettings { + state_cache_size: 16777216, + state_cache_child_ratio: Some((50, 100)), + pruning: PruningMode::keep_blocks(keep_blocks), + source: DatabaseSettingsSrc::Custom(db), + }; + + Self::new(db_setting, canonicalization_delay).expect("failed to create test-db") + } + + fn from_database( + db: Arc>, + canonicalization_delay: u64, + config: &DatabaseSettings, + ) -> ClientResult { + let is_archive_pruning = config.pruning.is_archive(); + let blockchain = BlockchainDb::new(db.clone())?; + let meta = blockchain.meta.clone(); + let map_e = |e: sc_state_db::Error| { + sp_blockchain::Error::from(format!("State database error: {:?}", e)) + }; + let state_db: StateDb<_, _> = + StateDb::new(config.pruning.clone(), &StateMetaDb(&*db)).map_err(map_e)?; + let storage_db = StorageDb { + db: db.clone(), + state_db, + }; + let offchain_storage = offchain::LocalStorage::new(db.clone()); + let changes_tries_storage = DbChangesTrieStorage::new( + db, + columns::META, + columns::CHANGES_TRIE, + columns::KEY_LOOKUP, + columns::HEADER, + columns::CACHE, + meta, + if is_archive_pruning { + None + } else { + Some(MIN_BLOCKS_TO_KEEP_CHANGES_TRIES_FOR) + }, + )?; + + Ok(Backend { + storage: Arc::new(storage_db), + offchain_storage, + changes_tries_storage, + blockchain, + canonicalization_delay, + shared_cache: new_shared_cache( + config.state_cache_size, + config + .state_cache_child_ratio + .unwrap_or(DEFAULT_CHILD_RATIO), + ), + import_lock: Default::default(), + is_archive: is_archive_pruning, + io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), + state_usage: Arc::new(StateUsageStats::new()), + }) + } + + /// Handle setting head within a transaction. `route_to` should be the last + /// block that existed in the database. `best_to` should be the best block + /// to be set. + /// + /// In the case where the new best block is a block to be imported, `route_to` + /// should be the parent of `best_to`. In the case where we set an existing block + /// to be best, `route_to` should equal to `best_to`. + fn set_head_with_transaction( + &self, + transaction: &mut Transaction, + route_to: Block::Hash, + best_to: (NumberFor, Block::Hash), + ) -> ClientResult<(Vec, Vec)> { + let mut enacted = Vec::default(); + let mut retracted = Vec::default(); + + let meta = self.blockchain.meta.read(); + + // cannot find tree route with empty DB. + if meta.best_hash != Default::default() { + let tree_route = sp_blockchain::tree_route(&self.blockchain, meta.best_hash, route_to)?; + + // uncanonicalize: check safety violations and ensure the numbers no longer + // point to these block hashes in the key mapping. + for r in tree_route.retracted() { + if r.hash == meta.finalized_hash { + warn!( + "Potential safety failure: reverting finalized block {:?}", + (&r.number, &r.hash) + ); + + return Err(::sp_blockchain::Error::NotInFinalizedChain.into()); + } + + retracted.push(r.hash.clone()); + utils::remove_number_to_key_mapping(transaction, columns::KEY_LOOKUP, r.number)?; + } + + // canonicalize: set the number lookup to map to this block's hash. + for e in tree_route.enacted() { + enacted.push(e.hash.clone()); + utils::insert_number_to_key_mapping( + transaction, + columns::KEY_LOOKUP, + e.number, + e.hash, + )?; + } + } + + let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?; + transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); + utils::insert_number_to_key_mapping( + transaction, + columns::KEY_LOOKUP, + best_to.0, + best_to.1, + )?; + + Ok((enacted, retracted)) + } + + fn ensure_sequential_finalization( + &self, + header: &Block::Header, + last_finalized: Option, + ) -> ClientResult<()> { + let last_finalized = + last_finalized.unwrap_or_else(|| self.blockchain.meta.read().finalized_hash); + if *header.parent_hash() != last_finalized { + return Err(::sp_blockchain::Error::NonSequentialFinalization(format!( + "Last finalized {:?} not parent of {:?}", + last_finalized, + header.hash() + )) + .into()); + } + Ok(()) + } + + fn finalize_block_with_transaction( + &self, + transaction: &mut Transaction, + hash: &Block::Hash, + header: &Block::Header, + last_finalized: Option, + justification: Option, + changes_trie_cache_ops: &mut Option>, + finalization_displaced: &mut Option>>, + ) -> ClientResult<(Block::Hash, ::Number, bool, bool)> { + // TODO: ensure best chain contains this block. + let number = *header.number(); + self.ensure_sequential_finalization(header, last_finalized)?; + self.note_finalized( + transaction, + false, + header, + *hash, + changes_trie_cache_ops, + finalization_displaced, + )?; + + if let Some(justification) = justification { + transaction.set_from_vec( + columns::JUSTIFICATION, + &utils::number_and_hash_to_lookup_key(number, hash)?, + justification.encode(), + ); + } + Ok((*hash, number, false, true)) + } + + // performs forced canonicalization with a delay after importing a non-finalized block. + fn force_delayed_canonicalize( + &self, + transaction: &mut Transaction, + hash: Block::Hash, + number: NumberFor, + ) -> ClientResult<()> { + let number_u64 = number.saturated_into::(); + if number_u64 > self.canonicalization_delay { + let new_canonical = number_u64 - self.canonicalization_delay; + + if new_canonical <= self.storage.state_db.best_canonical().unwrap_or(0) { + return Ok(()); + } + + let hash = if new_canonical == number_u64 { + hash + } else { + ::sc_client::blockchain::HeaderBackend::hash( + &self.blockchain, + new_canonical.saturated_into(), + )? + .expect( + "existence of block with number `new_canonical` \ + implies existence of blocks with all numbers before it; qed", + ) + }; + + trace!(target: "db", "Canonicalize block #{} ({:?})", new_canonical, hash); + let commit = self.storage.state_db.canonicalize_block(&hash).map_err( + |e: sc_state_db::Error| { + sp_blockchain::Error::from(format!("State database error: {:?}", e)) + }, + )?; + apply_state_commit(transaction, commit); + }; + + Ok(()) + } + + fn try_commit_operation(&self, mut operation: BlockImportOperation) -> ClientResult<()> { + let mut transaction = Transaction::new(); + let mut finalization_displaced_leaves = None; + + operation.apply_aux(&mut transaction); + + let mut meta_updates = Vec::with_capacity(operation.finalized_blocks.len()); + let mut last_finalized_hash = self.blockchain.meta.read().finalized_hash; + + let mut changes_trie_cache_ops = None; + for (block, justification) in operation.finalized_blocks { + let block_hash = self.blockchain.expect_block_hash_from_id(&block)?; + let block_header = self.blockchain.expect_header(BlockId::Hash(block_hash))?; + + meta_updates.push(self.finalize_block_with_transaction( + &mut transaction, + &block_hash, + &block_header, + Some(last_finalized_hash), + justification, + &mut changes_trie_cache_ops, + &mut finalization_displaced_leaves, + )?); + last_finalized_hash = block_hash; + } + + let imported = if let Some(pending_block) = operation.pending_block { + let hash = pending_block.header.hash(); + let parent_hash = *pending_block.header.parent_hash(); + let number = pending_block.header.number().clone(); + + // blocks are keyed by number + hash. + let lookup_key = utils::number_and_hash_to_lookup_key(number, hash)?; + + let (enacted, retracted) = if pending_block.leaf_state.is_best() { + self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))? + } else { + (Default::default(), Default::default()) + }; + + utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; + + let header_metadata = CachedHeaderMetadata::from(&pending_block.header); + self.blockchain + .insert_header_metadata(header_metadata.hash, header_metadata); + + transaction.set_from_vec(columns::HEADER, &lookup_key, pending_block.header.encode()); + if let Some(body) = &pending_block.body { + transaction.set_from_vec(columns::BODY, &lookup_key, body.encode()); + } + if let Some(justification) = pending_block.justification { + transaction.set_from_vec( + columns::JUSTIFICATION, + &lookup_key, + justification.encode(), + ); + } + + if number.is_zero() { + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); + + // for tests, because config is set from within the reset_storage + if operation.changes_trie_config_update.is_none() { + operation.changes_trie_config_update = Some(None); + } + } + + let finalized = if operation.commit_state { + let mut changeset: sc_state_db::ChangeSet> = + sc_state_db::ChangeSet::default(); + let mut ops: u64 = 0; + let mut bytes: u64 = 0; + let mut removal: u64 = 0; + let mut bytes_removal: u64 = 0; + for (key, (val, rc)) in operation.db_updates.drain() { + if rc > 0 { + ops += 1; + bytes += key.len() as u64 + val.len() as u64; + + changeset.inserted.push((key, val.to_vec())); + } else if rc < 0 { + removal += 1; + bytes_removal += key.len() as u64; + + changeset.deleted.push(key); + } + } + self.state_usage.tally_writes_nodes(ops, bytes); + self.state_usage.tally_removed_nodes(removal, bytes_removal); + + let mut ops: u64 = 0; + let mut bytes: u64 = 0; + for (key, value) in operation.storage_updates.iter().chain( + operation + .child_storage_updates + .iter() + .flat_map(|(_, s)| s.iter()), + ) { + ops += 1; + bytes += key.len() as u64; + if let Some(v) = value.as_ref() { + bytes += v.len() as u64; + } + } + self.state_usage.tally_writes(ops, bytes); + let number_u64 = number.saturated_into::(); + let commit = self + .storage + .state_db + .insert_block( + &hash, + number_u64, + &pending_block.header.parent_hash(), + changeset, + ) + .map_err(|e: sc_state_db::Error| { + sp_blockchain::Error::from(format!("State database error: {:?}", e)) + })?; + apply_state_commit(&mut transaction, commit); + + // Check if need to finalize. Genesis is always finalized instantly. + let finalized = number_u64 == 0 || pending_block.leaf_state.is_final(); + finalized + } else { + false + }; + + let header = &pending_block.header; + let is_best = pending_block.leaf_state.is_best(); + let changes_trie_updates = operation.changes_trie_updates; + let changes_trie_config_update = operation.changes_trie_config_update; + changes_trie_cache_ops = Some(self.changes_tries_storage.commit( + &mut transaction, + changes_trie_updates, + cache::ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { + Zero::zero() + } else { + number - One::one() + }, + ), + cache::ComplexBlockId::new(hash, number), + header, + finalized, + changes_trie_config_update, + changes_trie_cache_ops, + )?); + self.state_usage.merge_sm(operation.old_state.usage_info()); + // release state reference so that it can be finalized + let cache = operation.old_state.into_cache_changes(); + + if finalized { + // TODO: ensure best chain contains this block. + self.ensure_sequential_finalization(header, Some(last_finalized_hash))?; + self.note_finalized( + &mut transaction, + true, + header, + hash, + &mut changes_trie_cache_ops, + &mut finalization_displaced_leaves, + )?; + } else { + // canonicalize blocks which are old enough, regardless of finality. + self.force_delayed_canonicalize(&mut transaction, hash, *header.number())? + } + + debug!(target: "db", "DB Commit {:?} ({}), best = {}", hash, number, is_best); + + let displaced_leaf = { + let mut leaves = self.blockchain.leaves.write(); + let displaced_leaf = leaves.import(hash, number, parent_hash); + leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); + + displaced_leaf + }; + + let mut children = children::read_children( + &*self.storage.db, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + )?; + children.push(hash); + children::write_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + parent_hash, + children, + ); + + meta_updates.push((hash, number, pending_block.leaf_state.is_best(), finalized)); + + Some(( + number, + hash, + enacted, + retracted, + displaced_leaf, + is_best, + cache, + )) + } else { + None + }; + + let cache_update = if let Some(set_head) = operation.set_head { + if let Some(header) = + sc_client::blockchain::HeaderBackend::header(&self.blockchain, set_head)? + { + let number = header.number(); + let hash = header.hash(); + + let (enacted, retracted) = self.set_head_with_transaction( + &mut transaction, + hash.clone(), + (number.clone(), hash.clone()), + )?; + meta_updates.push((hash, *number, true, false)); + Some((enacted, retracted)) + } else { + return Err(sp_blockchain::Error::UnknownBlock(format!( + "Cannot set head {:?}", + set_head + ))); + } + } else { + None + }; + + self.storage.db.commit(transaction); + + if let Some((number, hash, enacted, retracted, _displaced_leaf, is_best, mut cache)) = + imported + { + cache.sync_cache( + &enacted, + &retracted, + operation.storage_updates, + operation.child_storage_updates, + Some(hash), + Some(number), + is_best, + ); + } + + if let Some(changes_trie_build_cache_update) = operation.changes_trie_build_cache_update { + self.changes_tries_storage + .commit_build_cache(changes_trie_build_cache_update); + } + self.changes_tries_storage + .post_commit(changes_trie_cache_ops); + + if let Some((enacted, retracted)) = cache_update { + self.shared_cache.lock().sync(&enacted, &retracted); + } + + for (hash, number, is_best, is_finalized) in meta_updates { + self.blockchain + .update_meta(hash, number, is_best, is_finalized); + } + + Ok(()) + } + + // write stuff to a transaction after a new block is finalized. + // this canonicalizes finalized blocks. Fails if called with a block which + // was not a child of the last finalized block. + fn note_finalized( + &self, + transaction: &mut Transaction, + is_inserted: bool, + f_header: &Block::Header, + f_hash: Block::Hash, + changes_trie_cache_ops: &mut Option>, + displaced: &mut Option>>, + ) -> ClientResult<()> { + let f_num = f_header.number().clone(); + + if self + .storage + .state_db + .best_canonical() + .map(|c| f_num.saturated_into::() > c) + .unwrap_or(true) + { + let lookup_key = utils::number_and_hash_to_lookup_key(f_num, f_hash.clone())?; + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + + let commit = self.storage.state_db.canonicalize_block(&f_hash).map_err( + |e: sc_state_db::Error| { + sp_blockchain::Error::from(format!("State database error: {:?}", e)) + }, + )?; + apply_state_commit(transaction, commit); + + if !f_num.is_zero() { + let new_changes_trie_cache_ops = self.changes_tries_storage.finalize( + transaction, + *f_header.parent_hash(), + f_hash, + f_num, + if is_inserted { Some(&f_header) } else { None }, + changes_trie_cache_ops.take(), + )?; + *changes_trie_cache_ops = Some(new_changes_trie_cache_ops); + } + } + + let new_displaced = self.blockchain.leaves.write().finalize_height(f_num); + match displaced { + x @ &mut None => *x = Some(new_displaced), + &mut Some(ref mut displaced) => displaced.merge(new_displaced), + } + + Ok(()) + } } -fn apply_state_commit(transaction: &mut Transaction, commit: sc_state_db::CommitSet>) { - for (key, val) in commit.data.inserted.into_iter() { - transaction.set_from_vec(columns::STATE, &key[..], val); - } - for key in commit.data.deleted.into_iter() { - transaction.remove(columns::STATE, &key[..]); - } - for (key, val) in commit.meta.inserted.into_iter() { - transaction.set_from_vec(columns::STATE_META, &key[..], val); - } - for key in commit.meta.deleted.into_iter() { - transaction.remove(columns::STATE_META, &key[..]); - } +fn apply_state_commit( + transaction: &mut Transaction, + commit: sc_state_db::CommitSet>, +) { + for (key, val) in commit.data.inserted.into_iter() { + transaction.set_from_vec(columns::STATE, &key[..], val); + } + for key in commit.data.deleted.into_iter() { + transaction.remove(columns::STATE, &key[..]); + } + for (key, val) in commit.meta.inserted.into_iter() { + transaction.set_from_vec(columns::STATE_META, &key[..], val); + } + for key in commit.meta.deleted.into_iter() { + transaction.remove(columns::STATE_META, &key[..]); + } } -impl sc_client_api::backend::AuxStore for Backend where Block: BlockT { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { - let mut transaction = Transaction::new(); - for (k, v) in insert { - transaction.set(columns::AUX, k, v); - } - for k in delete { - transaction.remove(columns::AUX, k); - } - self.storage.db.commit(transaction); - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.storage.db.get(columns::AUX, key)) - } +impl sc_client_api::backend::AuxStore for Backend +where + Block: BlockT, +{ + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { + let mut transaction = Transaction::new(); + for (k, v) in insert { + transaction.set(columns::AUX, k, v); + } + for k in delete { + transaction.remove(columns::AUX, k); + } + self.storage.db.commit(transaction); + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + Ok(self.storage.db.get(columns::AUX, key)) + } } impl sc_client_api::backend::Backend for Backend { - type BlockImportOperation = BlockImportOperation; - type Blockchain = BlockchainDb; - type State = SyncingCachingState, Block>; - type OffchainStorage = offchain::LocalStorage; - - fn begin_operation(&self) -> ClientResult { - let mut old_state = self.state_at(BlockId::Hash(Default::default()))?; - old_state.disable_syncing(); - - Ok(BlockImportOperation { - pending_block: None, - old_state, - db_updates: PrefixedMemoryDB::default(), - storage_updates: Default::default(), - child_storage_updates: Default::default(), - changes_trie_config_update: None, - changes_trie_updates: MemoryDB::default(), - changes_trie_build_cache_update: None, - aux_ops: Vec::new(), - finalized_blocks: Vec::new(), - set_head: None, - commit_state: false, - }) - } - - fn begin_state_operation( - &self, - operation: &mut Self::BlockImportOperation, - block: BlockId, - ) -> ClientResult<()> { - operation.old_state = self.state_at(block)?; - operation.old_state.disable_syncing(); - - operation.commit_state = true; - Ok(()) - } - - fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> { - let usage = operation.old_state.usage_info(); - self.state_usage.merge_sm(usage); - - match self.try_commit_operation(operation) { - Ok(_) => { - self.storage.state_db.apply_pending(); - Ok(()) - }, - e @ Err(_) => { - self.storage.state_db.revert_pending(); - e - } - } - } - - fn finalize_block(&self, block: BlockId, justification: Option) - -> ClientResult<()> - { - let mut transaction = Transaction::new(); - let hash = self.blockchain.expect_block_hash_from_id(&block)?; - let header = self.blockchain.expect_header(block)?; - let mut displaced = None; - - let mut changes_trie_cache_ops = None; - let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( - &mut transaction, - &hash, - &header, - None, - justification, - &mut changes_trie_cache_ops, - &mut displaced, - )?; - self.storage.db.commit(transaction); - self.blockchain.update_meta(hash, number, is_best, is_finalized); - self.changes_tries_storage.post_commit(changes_trie_cache_ops); - Ok(()) - } - - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { - Some(&self.changes_tries_storage) - } - - fn offchain_storage(&self) -> Option { - Some(self.offchain_storage.clone()) - } - - fn usage_info(&self) -> Option { - let (io_stats, state_stats) = self.io_stats.take_or_else(|| - ( - // TODO: implement DB stats and cache size retrieval - kvdb::IoStats::empty(), - self.state_usage.take(), - ) - ); - let database_cache = MemorySize::from_bytes(0); - let state_cache = MemorySize::from_bytes( - (*&self.shared_cache).lock().used_storage_cache_size(), - ); - let state_db = self.storage.state_db.memory_info(); - - Some(UsageInfo { - memory: MemoryInfo { - state_cache, - database_cache, - state_db, - }, - io: IoInfo { - transactions: io_stats.transactions, - bytes_read: io_stats.bytes_read, - bytes_written: io_stats.bytes_written, - writes: io_stats.writes, - reads: io_stats.reads, - average_transaction_size: io_stats.avg_transaction_size() as u64, - state_reads: state_stats.reads.ops, - state_writes: state_stats.writes.ops, - state_writes_cache: state_stats.overlay_writes.ops, - state_reads_cache: state_stats.cache_reads.ops, - state_writes_nodes: state_stats.nodes_writes.ops, - }, - }) - } - - fn revert(&self, n: NumberFor, revert_finalized: bool) -> ClientResult> { - let mut best_number = self.blockchain.info().best_number; - let mut best_hash = self.blockchain.info().best_hash; - - let finalized = self.blockchain.info().finalized_number; - - let revertible = best_number - finalized; - let n = if !revert_finalized && revertible < n { - revertible - } else { - n - }; - - let mut revert_blocks = || -> ClientResult> { - for c in 0 .. n.saturated_into::() { - if best_number.is_zero() { - return Ok(c.saturated_into::>()) - } - let mut transaction = Transaction::new(); - match self.storage.state_db.revert_one() { - Some(commit) => { - apply_state_commit(&mut transaction, commit); - let removed_number = best_number; - let removed = self.blockchain.header(BlockId::Number(best_number))?.ok_or_else( - || sp_blockchain::Error::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best_number)))?; - - best_number -= One::one(); // prev block - best_hash = self.blockchain.hash(best_number)?.ok_or_else( - || sp_blockchain::Error::UnknownBlock( - format!("Error reverting to {}. Block hash not found.", best_number)))?; - - let update_finalized = best_number < finalized; - - let key = utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; - let changes_trie_cache_ops = self.changes_tries_storage.revert( - &mut transaction, - &cache::ComplexBlockId::new( - removed.hash(), - removed_number, - ), - )?; - if update_finalized { - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, key.clone()); - } - transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); - transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); - children::remove_children(&mut transaction, columns::META, meta_keys::CHILDREN_PREFIX, best_hash); - self.storage.db.commit(transaction); - self.changes_tries_storage.post_commit(Some(changes_trie_cache_ops)); - self.blockchain.update_meta(best_hash, best_number, true, update_finalized); - } - None => return Ok(c.saturated_into::>()) - } - } - - Ok(n) - }; - - let reverted = revert_blocks()?; - - let revert_leaves = || -> ClientResult<()> { - let mut transaction = Transaction::new(); - let mut leaves = self.blockchain.leaves.write(); - - leaves.revert(best_hash, best_number); - leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); - self.storage.db.commit(transaction); - - Ok(()) - }; - - revert_leaves()?; - - Ok(reverted) - } - - fn blockchain(&self) -> &BlockchainDb { - &self.blockchain - } - - fn state_at(&self, block: BlockId) -> ClientResult { - use sc_client::blockchain::HeaderBackend as BcHeaderBackend; - - // special case for genesis initialization - match block { - BlockId::Hash(h) if h == Default::default() => { - let genesis_storage = DbGenesisStorage::::new(); - let root = genesis_storage.0.clone(); - let db_state = DbState::::new(Arc::new(genesis_storage), root); - let state = RefTrackingState::new(db_state, self.storage.clone(), None); - let caching_state = CachingState::new( - state, - self.shared_cache.clone(), - None, - ); - return Ok(SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), - )); - }, - _ => {} - } - - match self.blockchain.header(block) { - Ok(Some(ref hdr)) => { - let hash = hdr.hash(); - if !self.have_state_at(&hash, *hdr.number()) { - return Err( - sp_blockchain::Error::UnknownBlock( - format!("State already discarded for {:?}", block) - ) - ) - } - if let Ok(()) = self.storage.state_db.pin(&hash) { - let root = hdr.state_root(); - let db_state = DbState::::new(self.storage.clone(), *root); - let state = RefTrackingState::new( - db_state, - self.storage.clone(), - Some(hash.clone()), - ); - let caching_state = CachingState::new( - state, - self.shared_cache.clone(), - Some(hash), - ); - Ok(SyncingCachingState::new( - caching_state, - self.state_usage.clone(), - self.blockchain.meta.clone(), - self.import_lock.clone(), - )) - } else { - Err( - sp_blockchain::Error::UnknownBlock( - format!("State already discarded for {:?}", block) - ) - ) - } - }, - Ok(None) => Err( - sp_blockchain::Error::UnknownBlock( - format!("Unknown state for block {:?}", block) - ) - ), - Err(e) => Err(e), - } - } - - fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { - if self.is_archive { - match self.blockchain.header(BlockId::Hash(hash.clone())) { - Ok(Some(header)) => { - sp_state_machine::Storage::get( - self.storage.as_ref(), - &header.state_root(), - (&[], None), - ).unwrap_or(None).is_some() - }, - _ => false, - } - } else { - !self.storage.state_db.is_pruned(hash, number.saturated_into::()) - } - } - - fn get_import_lock(&self) -> &RwLock<()> { - &*self.import_lock - } + type BlockImportOperation = BlockImportOperation; + type Blockchain = BlockchainDb; + type State = SyncingCachingState, Block>; + type OffchainStorage = offchain::LocalStorage; + + fn begin_operation(&self) -> ClientResult { + let mut old_state = self.state_at(BlockId::Hash(Default::default()))?; + old_state.disable_syncing(); + + Ok(BlockImportOperation { + pending_block: None, + old_state, + db_updates: PrefixedMemoryDB::default(), + storage_updates: Default::default(), + child_storage_updates: Default::default(), + changes_trie_config_update: None, + changes_trie_updates: MemoryDB::default(), + changes_trie_build_cache_update: None, + aux_ops: Vec::new(), + finalized_blocks: Vec::new(), + set_head: None, + commit_state: false, + }) + } + + fn begin_state_operation( + &self, + operation: &mut Self::BlockImportOperation, + block: BlockId, + ) -> ClientResult<()> { + operation.old_state = self.state_at(block)?; + operation.old_state.disable_syncing(); + + operation.commit_state = true; + Ok(()) + } + + fn commit_operation(&self, operation: Self::BlockImportOperation) -> ClientResult<()> { + let usage = operation.old_state.usage_info(); + self.state_usage.merge_sm(usage); + + match self.try_commit_operation(operation) { + Ok(_) => { + self.storage.state_db.apply_pending(); + Ok(()) + } + e @ Err(_) => { + self.storage.state_db.revert_pending(); + e + } + } + } + + fn finalize_block( + &self, + block: BlockId, + justification: Option, + ) -> ClientResult<()> { + let mut transaction = Transaction::new(); + let hash = self.blockchain.expect_block_hash_from_id(&block)?; + let header = self.blockchain.expect_header(block)?; + let mut displaced = None; + + let mut changes_trie_cache_ops = None; + let (hash, number, is_best, is_finalized) = self.finalize_block_with_transaction( + &mut transaction, + &hash, + &header, + None, + justification, + &mut changes_trie_cache_ops, + &mut displaced, + )?; + self.storage.db.commit(transaction); + self.blockchain + .update_meta(hash, number, is_best, is_finalized); + self.changes_tries_storage + .post_commit(changes_trie_cache_ops); + Ok(()) + } + + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { + Some(&self.changes_tries_storage) + } + + fn offchain_storage(&self) -> Option { + Some(self.offchain_storage.clone()) + } + + fn usage_info(&self) -> Option { + let (io_stats, state_stats) = self.io_stats.take_or_else(|| { + ( + // TODO: implement DB stats and cache size retrieval + kvdb::IoStats::empty(), + self.state_usage.take(), + ) + }); + let database_cache = MemorySize::from_bytes(0); + let state_cache = + MemorySize::from_bytes((*&self.shared_cache).lock().used_storage_cache_size()); + let state_db = self.storage.state_db.memory_info(); + + Some(UsageInfo { + memory: MemoryInfo { + state_cache, + database_cache, + state_db, + }, + io: IoInfo { + transactions: io_stats.transactions, + bytes_read: io_stats.bytes_read, + bytes_written: io_stats.bytes_written, + writes: io_stats.writes, + reads: io_stats.reads, + average_transaction_size: io_stats.avg_transaction_size() as u64, + state_reads: state_stats.reads.ops, + state_writes: state_stats.writes.ops, + state_writes_cache: state_stats.overlay_writes.ops, + state_reads_cache: state_stats.cache_reads.ops, + state_writes_nodes: state_stats.nodes_writes.ops, + }, + }) + } + + fn revert( + &self, + n: NumberFor, + revert_finalized: bool, + ) -> ClientResult> { + let mut best_number = self.blockchain.info().best_number; + let mut best_hash = self.blockchain.info().best_hash; + + let finalized = self.blockchain.info().finalized_number; + + let revertible = best_number - finalized; + let n = if !revert_finalized && revertible < n { + revertible + } else { + n + }; + + let mut revert_blocks = || -> ClientResult> { + for c in 0..n.saturated_into::() { + if best_number.is_zero() { + return Ok(c.saturated_into::>()); + } + let mut transaction = Transaction::new(); + match self.storage.state_db.revert_one() { + Some(commit) => { + apply_state_commit(&mut transaction, commit); + let removed_number = best_number; + let removed = self + .blockchain + .header(BlockId::Number(best_number))? + .ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best_number + )) + })?; + + best_number -= One::one(); // prev block + best_hash = self.blockchain.hash(best_number)?.ok_or_else(|| { + sp_blockchain::Error::UnknownBlock(format!( + "Error reverting to {}. Block hash not found.", + best_number + )) + })?; + + let update_finalized = best_number < finalized; + + let key = + utils::number_and_hash_to_lookup_key(best_number.clone(), &best_hash)?; + let changes_trie_cache_ops = self.changes_tries_storage.revert( + &mut transaction, + &cache::ComplexBlockId::new(removed.hash(), removed_number), + )?; + if update_finalized { + transaction.set_from_vec( + columns::META, + meta_keys::FINALIZED_BLOCK, + key.clone(), + ); + } + transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, key); + transaction.remove(columns::KEY_LOOKUP, removed.hash().as_ref()); + children::remove_children( + &mut transaction, + columns::META, + meta_keys::CHILDREN_PREFIX, + best_hash, + ); + self.storage.db.commit(transaction); + self.changes_tries_storage + .post_commit(Some(changes_trie_cache_ops)); + self.blockchain + .update_meta(best_hash, best_number, true, update_finalized); + } + None => return Ok(c.saturated_into::>()), + } + } + + Ok(n) + }; + + let reverted = revert_blocks()?; + + let revert_leaves = || -> ClientResult<()> { + let mut transaction = Transaction::new(); + let mut leaves = self.blockchain.leaves.write(); + + leaves.revert(best_hash, best_number); + leaves.prepare_transaction(&mut transaction, columns::META, meta_keys::LEAF_PREFIX); + self.storage.db.commit(transaction); + + Ok(()) + }; + + revert_leaves()?; + + Ok(reverted) + } + + fn blockchain(&self) -> &BlockchainDb { + &self.blockchain + } + + fn state_at(&self, block: BlockId) -> ClientResult { + use sc_client::blockchain::HeaderBackend as BcHeaderBackend; + + // special case for genesis initialization + match block { + BlockId::Hash(h) if h == Default::default() => { + let genesis_storage = DbGenesisStorage::::new(); + let root = genesis_storage.0.clone(); + let db_state = DbState::::new(Arc::new(genesis_storage), root); + let state = RefTrackingState::new(db_state, self.storage.clone(), None); + let caching_state = CachingState::new(state, self.shared_cache.clone(), None); + return Ok(SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + )); + } + _ => {} + } + + match self.blockchain.header(block) { + Ok(Some(ref hdr)) => { + let hash = hdr.hash(); + if !self.have_state_at(&hash, *hdr.number()) { + return Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + block + ))); + } + if let Ok(()) = self.storage.state_db.pin(&hash) { + let root = hdr.state_root(); + let db_state = DbState::::new(self.storage.clone(), *root); + let state = + RefTrackingState::new(db_state, self.storage.clone(), Some(hash.clone())); + let caching_state = + CachingState::new(state, self.shared_cache.clone(), Some(hash)); + Ok(SyncingCachingState::new( + caching_state, + self.state_usage.clone(), + self.blockchain.meta.clone(), + self.import_lock.clone(), + )) + } else { + Err(sp_blockchain::Error::UnknownBlock(format!( + "State already discarded for {:?}", + block + ))) + } + } + Ok(None) => Err(sp_blockchain::Error::UnknownBlock(format!( + "Unknown state for block {:?}", + block + ))), + Err(e) => Err(e), + } + } + + fn have_state_at(&self, hash: &Block::Hash, number: NumberFor) -> bool { + if self.is_archive { + match self.blockchain.header(BlockId::Hash(hash.clone())) { + Ok(Some(header)) => sp_state_machine::Storage::get( + self.storage.as_ref(), + &header.state_root(), + (&[], None), + ) + .unwrap_or(None) + .is_some(), + _ => false, + } + } else { + !self + .storage + .state_db + .is_pruned(hash, number.saturated_into::()) + } + } + + fn get_import_lock(&self) -> &RwLock<()> { + &*self.import_lock + } } impl sc_client_api::backend::LocalBackend for Backend {} #[cfg(test)] pub(crate) mod tests { - use hash_db::{HashDB, EMPTY_PREFIX}; - use super::*; - use crate::columns; - use sp_core::H256; - use sc_client_api::backend::{Backend as BTrait, BlockImportOperation as Op}; - use sc_client::blockchain::Backend as BLBTrait; - use sp_runtime::testing::{Header, Block as RawBlock, ExtrinsicWrapper}; - use sp_runtime::traits::{Hash, BlakeTwo256}; - use sp_runtime::generic::DigestItem; - use sp_state_machine::{TrieMut, TrieDBMut}; - use sp_blockchain::{lowest_common_ancestor, tree_route}; - - pub(crate) type Block = RawBlock>; - - pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { - let mut changes_root = H256::default(); - let mut changes_trie_update = MemoryDB::::default(); - { - let mut trie = TrieDBMut::::new( - &mut changes_trie_update, - &mut changes_root - ); - for (key, value) in changes { - trie.insert(&key, &value).unwrap(); - } - } - - (changes_root, changes_trie_update) - } - - pub fn insert_header( - backend: &Backend, - number: u64, - parent_hash: H256, - changes: Option, Vec)>>, - extrinsics_root: H256, - ) -> H256 { - use sp_runtime::testing::Digest; - - let mut digest = Digest::default(); - let mut changes_trie_update = Default::default(); - if let Some(changes) = changes { - let (root, update) = prepare_changes(changes); - digest.push(DigestItem::ChangesTrieRoot(root)); - changes_trie_update = update; - } - let header = Header { - number, - parent_hash, - state_root: BlakeTwo256::trie_root(Vec::new()), - digest, - extrinsics_root, - }; - let header_hash = header.hash(); - - let block_id = if number == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(number - 1) - }; - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, block_id).unwrap(); - op.set_block_data(header, Some(Vec::new()), None, NewBlockState::Best).unwrap(); - op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)).unwrap(); - backend.commit_operation(op).unwrap(); - - header_hash - } - - #[test] - fn block_hash_inserted_correctly() { - let backing = { - let db = Backend::::new_test(1, 0); - for i in 0..10 { - assert!(db.blockchain().hash(i).unwrap().is_none()); - - { - let id = if i == 0 { - BlockId::Hash(Default::default()) - } else { - BlockId::Number(i - 1) - }; - - let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, id).unwrap(); - let header = Header { - number: i, - parent_hash: if i == 0 { - Default::default() - } else { - db.blockchain.hash(i - 1).unwrap().unwrap() - }, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - db.commit_operation(op).unwrap(); - } - - assert!(db.blockchain().hash(i).unwrap().is_some()) - } - db.storage.db.clone() - }; - - let backend = Backend::::new(DatabaseSettings { - state_cache_size: 16777216, - state_cache_child_ratio: Some((50, 100)), - pruning: PruningMode::keep_blocks(1), - source: DatabaseSettingsSrc::Custom(backing), - }, 0).unwrap(); - assert_eq!(backend.blockchain().info().best_number, 9); - for i in 0..10 { - assert!(backend.blockchain().hash(i).unwrap().is_some()) - } - } - - #[test] - fn set_state_data() { - let db = Backend::::new_test(2, 0); - let hash = { - let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage = vec![ - (vec![1, 3, 5], vec![2, 4, 6]), - (vec![1, 2, 3], vec![9, 9, 9]), - ]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - let hash = header.hash(); - - op.reset_storage(Storage { - top: storage.iter().cloned().collect(), - children: Default::default(), - }).unwrap(); - op.set_block_data( - header.clone(), - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - db.commit_operation(op).unwrap(); - - let state = db.state_at(BlockId::Number(0)).unwrap(); - - assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); - assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); - assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); - - hash - }; - - { - let mut op = db.begin_operation().unwrap(); - db.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); - let mut header = Header { - number: 1, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage = vec![ - (vec![1, 3, 5], None), - (vec![5, 5, 5], Some(vec![4, 5, 6])), - ]; - - let (root, overlay) = op.old_state.storage_root(storage.iter().cloned()); - op.update_db_storage(overlay).unwrap(); - header.state_root = root.into(); - - op.update_storage(storage, Vec::new()).unwrap(); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - db.commit_operation(op).unwrap(); - - let state = db.state_at(BlockId::Number(1)).unwrap(); - - assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); - assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); - assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); - } - } - - #[test] - fn delete_only_when_negative_rc() { - let _ = ::env_logger::try_init(); - let key; - let backend = Backend::::new_test(1, 0); - - let hash = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default())).unwrap(); - let mut header = Header { - number: 0, - parent_hash: Default::default(), - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - let hash = header.hash(); - - op.reset_storage(Storage { - top: storage.iter().cloned().collect(), - children: Default::default(), - }).unwrap(); - - key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - backend.commit_operation(op).unwrap(); - assert_eq!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap(), &b"hello"[..]); - hash - }; - - let hash = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(0)).unwrap(); - let mut header = Header { - number: 1, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - let hash = header.hash(); - - op.db_updates.insert(EMPTY_PREFIX, b"hello"); - op.db_updates.remove(&key, EMPTY_PREFIX); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - backend.commit_operation(op).unwrap(); - assert_eq!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).unwrap(), &b"hello"[..]); - hash - }; - - let hash = { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(1)).unwrap(); - let mut header = Header { - number: 2, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - let hash = header.hash(); - - op.db_updates.remove(&key, EMPTY_PREFIX); - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - backend.commit_operation(op).unwrap(); - - - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_some()); - hash - }; - - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Number(2)).unwrap(); - let mut header = Header { - number: 3, - parent_hash: hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }; - - let storage: Vec<(_, _)> = vec![]; - - header.state_root = op.old_state.storage_root(storage - .iter() - .cloned() - .map(|(x, y)| (x, Some(y))) - ).0.into(); - - op.set_block_data( - header, - Some(vec![]), - None, - NewBlockState::Best, - ).unwrap(); - - backend.commit_operation(op).unwrap(); - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_none()); - } - - backend.finalize_block(BlockId::Number(1), None).unwrap(); - backend.finalize_block(BlockId::Number(2), None).unwrap(); - backend.finalize_block(BlockId::Number(3), None).unwrap(); - assert!(backend.storage.db.get( - columns::STATE, - &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) - ).is_none()); - } - - #[test] - fn tree_route_works() { - let backend = Backend::::new_test(1000, 100); - let blockchain = backend.blockchain(); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - - // fork from genesis: 3 prong. - let a1 = insert_header(&backend, 1, block0, None, Default::default()); - let a2 = insert_header(&backend, 2, a1, None, Default::default()); - let a3 = insert_header(&backend, 3, a2, None, Default::default()); - - // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, None, Default::default()); - - { - let tree_route = tree_route(blockchain, a3, b2).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); - } - - { - let tree_route = tree_route(blockchain, a1, a3).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); - } - - { - let tree_route = tree_route(blockchain, a3, a1).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); - assert!(tree_route.enacted().is_empty()); - } - - { - let tree_route = tree_route(blockchain, a2, a2).unwrap(); - - assert_eq!(tree_route.common_block().hash, a2); - assert!(tree_route.retracted().is_empty()); - assert!(tree_route.enacted().is_empty()); - } - } - - #[test] - fn tree_route_child() { - let backend = Backend::::new_test(1000, 100); - let blockchain = backend.blockchain(); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - - { - let tree_route = tree_route(blockchain, block0, block1).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![block1]); - } - } - - #[test] - fn lowest_common_ancestor_works() { - let backend = Backend::::new_test(1000, 100); - let blockchain = backend.blockchain(); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - - // fork from genesis: 3 prong. - let a1 = insert_header(&backend, 1, block0, None, Default::default()); - let a2 = insert_header(&backend, 2, a1, None, Default::default()); - let a3 = insert_header(&backend, 3, a2, None, Default::default()); - - // fork from genesis: 2 prong. - let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); - let b2 = insert_header(&backend, 2, b1, None, Default::default()); - - { - let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap(); - - assert_eq!(lca.hash, block0); - assert_eq!(lca.number, 0); - } - - { - let lca = lowest_common_ancestor(blockchain, a1, a3).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(blockchain, a3, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(blockchain, a2, a3).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - - { - let lca = lowest_common_ancestor(blockchain, a2, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(blockchain, a2, a2).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - } - - #[test] - fn test_tree_route_regression() { - // NOTE: this is a test for a regression introduced in #3665, the result - // of tree_route would be erroneously computed, since it was taking into - // account the `ancestor` in `CachedHeaderMetadata` for the comparison. - // in this test we simulate the same behavior with the side-effect - // triggering the issue being eviction of a previously fetched record - // from the cache, therefore this test is dependent on the LRU cache - // size for header metadata, which is currently set to 5000 elements. - let backend = Backend::::new_test(10000, 10000); - let blockchain = backend.blockchain(); - - let genesis = insert_header(&backend, 0, Default::default(), None, Default::default()); - - let block100 = (1..=100).fold(genesis, |parent, n| { - insert_header(&backend, n, parent, None, Default::default()) - }); - - let block7000 = (101..=7000).fold(block100, |parent, n| { - insert_header(&backend, n, parent, None, Default::default()) - }); - - // This will cause the ancestor of `block100` to be set to `genesis` as a side-effect. - lowest_common_ancestor(blockchain, genesis, block100).unwrap(); - - // While traversing the tree we will have to do 6900 calls to - // `header_metadata`, which will make sure we will exhaust our cache - // which only takes 5000 elements. In particular, the `CachedHeaderMetadata` struct for - // block #100 will be evicted and will get a new value (with ancestor set to its parent). - let tree_route = tree_route(blockchain, block100, block7000).unwrap(); - - assert!(tree_route.retracted().is_empty()); - } - - #[test] - fn test_leaves_with_complex_block_tree() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); - } - - #[test] - fn test_children_with_complex_block_tree() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_children_for_backend(backend); - } - - #[test] - fn test_blockchain_query_by_number_gets_canonical() { - let backend: Arc> = Arc::new(Backend::new_test(20, 20)); - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); - } - - #[test] - fn test_leaves_pruned_on_finality() { - let backend: Backend = Backend::new_test(10, 10); - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - - let block1_a = insert_header(&backend, 1, block0, None, Default::default()); - let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into()); - let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into()); - - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block1_a, block1_b, block1_c]); - - let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); - let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); - let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); - - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c, block1_c]); - - backend.finalize_block(BlockId::hash(block1_a), None).unwrap(); - backend.finalize_block(BlockId::hash(block2_a), None).unwrap(); - - // leaves at same height stay. Leaves at lower heights pruned. - assert_eq!(backend.blockchain().leaves().unwrap(), vec![block2_a, block2_b, block2_c]); - } - - #[test] - fn test_aux() { - let backend: Backend = Backend::new_test(0, 0); - assert!(backend.get_aux(b"test").unwrap().is_none()); - backend.insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]).unwrap(); - assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]); - backend.insert_aux(&[], &[&b"test"[..]]).unwrap(); - assert!(backend.get_aux(b"test").unwrap().is_none()); - } - - #[test] - fn test_finalize_block_with_justification() { - use sc_client::blockchain::{Backend as BlockChainBackend}; - - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let _ = insert_header(&backend, 1, block0, None, Default::default()); - - let justification = Some(vec![1, 2, 3]); - backend.finalize_block(BlockId::Number(1), justification.clone()).unwrap(); - - assert_eq!( - backend.blockchain().justification(BlockId::Number(1)).unwrap(), - justification, - ); - } - - #[test] - fn test_finalize_multiple_blocks_in_single_op() { - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - let block3 = insert_header(&backend, 3, block2, None, Default::default()); - let block4 = insert_header(&backend, 4, block3, None, Default::default()); - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); - op.mark_finalized(BlockId::Hash(block1), None).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - backend.commit_operation(op).unwrap(); - } - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block2)).unwrap(); - op.mark_finalized(BlockId::Hash(block3), None).unwrap(); - op.mark_finalized(BlockId::Hash(block4), None).unwrap(); - backend.commit_operation(op).unwrap(); - } - } - - #[test] - fn test_finalize_non_sequential() { - let backend = Backend::::new_test(10, 10); - - let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); - let block1 = insert_header(&backend, 1, block0, None, Default::default()); - let block2 = insert_header(&backend, 2, block1, None, Default::default()); - { - let mut op = backend.begin_operation().unwrap(); - backend.begin_state_operation(&mut op, BlockId::Hash(block0)).unwrap(); - op.mark_finalized(BlockId::Hash(block2), None).unwrap(); - backend.commit_operation(op).unwrap_err(); - } - } + use super::*; + use crate::columns; + use hash_db::{HashDB, EMPTY_PREFIX}; + use sc_client::blockchain::Backend as BLBTrait; + use sc_client_api::backend::{Backend as BTrait, BlockImportOperation as Op}; + use sp_blockchain::{lowest_common_ancestor, tree_route}; + use sp_core::H256; + use sp_runtime::generic::DigestItem; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, Header}; + use sp_runtime::traits::{BlakeTwo256, Hash}; + use sp_state_machine::{TrieDBMut, TrieMut}; + + pub(crate) type Block = RawBlock>; + + pub fn prepare_changes(changes: Vec<(Vec, Vec)>) -> (H256, MemoryDB) { + let mut changes_root = H256::default(); + let mut changes_trie_update = MemoryDB::::default(); + { + let mut trie = + TrieDBMut::::new(&mut changes_trie_update, &mut changes_root); + for (key, value) in changes { + trie.insert(&key, &value).unwrap(); + } + } + + (changes_root, changes_trie_update) + } + + pub fn insert_header( + backend: &Backend, + number: u64, + parent_hash: H256, + changes: Option, Vec)>>, + extrinsics_root: H256, + ) -> H256 { + use sp_runtime::testing::Digest; + + let mut digest = Digest::default(); + let mut changes_trie_update = Default::default(); + if let Some(changes) = changes { + let (root, update) = prepare_changes(changes); + digest.push(DigestItem::ChangesTrieRoot(root)); + changes_trie_update = update; + } + let header = Header { + number, + parent_hash, + state_root: BlakeTwo256::trie_root(Vec::new()), + digest, + extrinsics_root, + }; + let header_hash = header.hash(); + + let block_id = if number == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(number - 1) + }; + let mut op = backend.begin_operation().unwrap(); + backend.begin_state_operation(&mut op, block_id).unwrap(); + op.set_block_data(header, Some(Vec::new()), None, NewBlockState::Best) + .unwrap(); + op.update_changes_trie((changes_trie_update, ChangesTrieCacheAction::Clear)) + .unwrap(); + backend.commit_operation(op).unwrap(); + + header_hash + } + + #[test] + fn block_hash_inserted_correctly() { + let backing = { + let db = Backend::::new_test(1, 0); + for i in 0..10 { + assert!(db.blockchain().hash(i).unwrap().is_none()); + + { + let id = if i == 0 { + BlockId::Hash(Default::default()) + } else { + BlockId::Number(i - 1) + }; + + let mut op = db.begin_operation().unwrap(); + db.begin_state_operation(&mut op, id).unwrap(); + let header = Header { + number: i, + parent_hash: if i == 0 { + Default::default() + } else { + db.blockchain.hash(i - 1).unwrap().unwrap() + }, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + db.commit_operation(op).unwrap(); + } + + assert!(db.blockchain().hash(i).unwrap().is_some()) + } + db.storage.db.clone() + }; + + let backend = Backend::::new( + DatabaseSettings { + state_cache_size: 16777216, + state_cache_child_ratio: Some((50, 100)), + pruning: PruningMode::keep_blocks(1), + source: DatabaseSettingsSrc::Custom(backing), + }, + 0, + ) + .unwrap(); + assert_eq!(backend.blockchain().info().best_number, 9); + for i in 0..10 { + assert!(backend.blockchain().hash(i).unwrap().is_some()) + } + } + + #[test] + fn set_state_data() { + let db = Backend::::new_test(2, 0); + let hash = { + let mut op = db.begin_operation().unwrap(); + db.begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![ + (vec![1, 3, 5], vec![2, 4, 6]), + (vec![1, 2, 3], vec![9, 9, 9]), + ]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + let hash = header.hash(); + + op.reset_storage(Storage { + top: storage.iter().cloned().collect(), + children: Default::default(), + }) + .unwrap(); + op.set_block_data(header.clone(), Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + db.commit_operation(op).unwrap(); + + let state = db.state_at(BlockId::Number(0)).unwrap(); + + assert_eq!(state.storage(&[1, 3, 5]).unwrap(), Some(vec![2, 4, 6])); + assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); + assert_eq!(state.storage(&[5, 5, 5]).unwrap(), None); + + hash + }; + + { + let mut op = db.begin_operation().unwrap(); + db.begin_state_operation(&mut op, BlockId::Number(0)) + .unwrap(); + let mut header = Header { + number: 1, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage = vec![(vec![1, 3, 5], None), (vec![5, 5, 5], Some(vec![4, 5, 6]))]; + + let (root, overlay) = op.old_state.storage_root(storage.iter().cloned()); + op.update_db_storage(overlay).unwrap(); + header.state_root = root.into(); + + op.update_storage(storage, Vec::new()).unwrap(); + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + db.commit_operation(op).unwrap(); + + let state = db.state_at(BlockId::Number(1)).unwrap(); + + assert_eq!(state.storage(&[1, 3, 5]).unwrap(), None); + assert_eq!(state.storage(&[1, 2, 3]).unwrap(), Some(vec![9, 9, 9])); + assert_eq!(state.storage(&[5, 5, 5]).unwrap(), Some(vec![4, 5, 6])); + } + } + + #[test] + fn delete_only_when_negative_rc() { + let _ = ::env_logger::try_init(); + let key; + let backend = Backend::::new_test(1, 0); + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(Default::default())) + .unwrap(); + let mut header = Header { + number: 0, + parent_hash: Default::default(), + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + let hash = header.hash(); + + op.reset_storage(Storage { + top: storage.iter().cloned().collect(), + children: Default::default(), + }) + .unwrap(); + + key = op.db_updates.insert(EMPTY_PREFIX, b"hello"); + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + assert_eq!( + backend + .storage + .db + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + ) + .unwrap(), + &b"hello"[..] + ); + hash + }; + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Number(0)) + .unwrap(); + let mut header = Header { + number: 1, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + let hash = header.hash(); + + op.db_updates.insert(EMPTY_PREFIX, b"hello"); + op.db_updates.remove(&key, EMPTY_PREFIX); + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + assert_eq!( + backend + .storage + .db + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + ) + .unwrap(), + &b"hello"[..] + ); + hash + }; + + let hash = { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Number(1)) + .unwrap(); + let mut header = Header { + number: 2, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + let hash = header.hash(); + + op.db_updates.remove(&key, EMPTY_PREFIX); + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + + assert!(backend + .storage + .db + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + ) + .is_some()); + hash + }; + + { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Number(2)) + .unwrap(); + let mut header = Header { + number: 3, + parent_hash: hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }; + + let storage: Vec<(_, _)> = vec![]; + + header.state_root = op + .old_state + .storage_root(storage.iter().cloned().map(|(x, y)| (x, Some(y)))) + .0 + .into(); + + op.set_block_data(header, Some(vec![]), None, NewBlockState::Best) + .unwrap(); + + backend.commit_operation(op).unwrap(); + assert!(backend + .storage + .db + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + ) + .is_none()); + } + + backend.finalize_block(BlockId::Number(1), None).unwrap(); + backend.finalize_block(BlockId::Number(2), None).unwrap(); + backend.finalize_block(BlockId::Number(3), None).unwrap(); + assert!(backend + .storage + .db + .get( + columns::STATE, + &sp_trie::prefixed_key::(&key, EMPTY_PREFIX) + ) + .is_none()); + } + + #[test] + fn tree_route_works() { + let backend = Backend::::new_test(1000, 100); + let blockchain = backend.blockchain(); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + + // fork from genesis: 3 prong. + let a1 = insert_header(&backend, 1, block0, None, Default::default()); + let a2 = insert_header(&backend, 2, a1, None, Default::default()); + let a3 = insert_header(&backend, 3, a2, None, Default::default()); + + // fork from genesis: 2 prong. + let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); + let b2 = insert_header(&backend, 2, b1, None, Default::default()); + + { + let tree_route = tree_route(blockchain, a3, b2).unwrap(); + + assert_eq!(tree_route.common_block().hash, block0); + assert_eq!( + tree_route + .retracted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a3, a2, a1] + ); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![b1, b2] + ); + } + + { + let tree_route = tree_route(blockchain, a1, a3).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert!(tree_route.retracted().is_empty()); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a2, a3] + ); + } + + { + let tree_route = tree_route(blockchain, a3, a1).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert_eq!( + tree_route + .retracted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a3, a2] + ); + assert!(tree_route.enacted().is_empty()); + } + + { + let tree_route = tree_route(blockchain, a2, a2).unwrap(); + + assert_eq!(tree_route.common_block().hash, a2); + assert!(tree_route.retracted().is_empty()); + assert!(tree_route.enacted().is_empty()); + } + } + + #[test] + fn tree_route_child() { + let backend = Backend::::new_test(1000, 100); + let blockchain = backend.blockchain(); + + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + + { + let tree_route = tree_route(blockchain, block0, block1).unwrap(); + + assert_eq!(tree_route.common_block().hash, block0); + assert!(tree_route.retracted().is_empty()); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![block1] + ); + } + } + + #[test] + fn lowest_common_ancestor_works() { + let backend = Backend::::new_test(1000, 100); + let blockchain = backend.blockchain(); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + + // fork from genesis: 3 prong. + let a1 = insert_header(&backend, 1, block0, None, Default::default()); + let a2 = insert_header(&backend, 2, a1, None, Default::default()); + let a3 = insert_header(&backend, 3, a2, None, Default::default()); + + // fork from genesis: 2 prong. + let b1 = insert_header(&backend, 1, block0, None, H256::from([1; 32])); + let b2 = insert_header(&backend, 2, b1, None, Default::default()); + + { + let lca = lowest_common_ancestor(blockchain, a3, b2).unwrap(); + + assert_eq!(lca.hash, block0); + assert_eq!(lca.number, 0); + } + + { + let lca = lowest_common_ancestor(blockchain, a1, a3).unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor(blockchain, a3, a1).unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor(blockchain, a2, a3).unwrap(); + + assert_eq!(lca.hash, a2); + assert_eq!(lca.number, 2); + } + + { + let lca = lowest_common_ancestor(blockchain, a2, a1).unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor(blockchain, a2, a2).unwrap(); + + assert_eq!(lca.hash, a2); + assert_eq!(lca.number, 2); + } + } + + #[test] + fn test_tree_route_regression() { + // NOTE: this is a test for a regression introduced in #3665, the result + // of tree_route would be erroneously computed, since it was taking into + // account the `ancestor` in `CachedHeaderMetadata` for the comparison. + // in this test we simulate the same behavior with the side-effect + // triggering the issue being eviction of a previously fetched record + // from the cache, therefore this test is dependent on the LRU cache + // size for header metadata, which is currently set to 5000 elements. + let backend = Backend::::new_test(10000, 10000); + let blockchain = backend.blockchain(); + + let genesis = insert_header(&backend, 0, Default::default(), None, Default::default()); + + let block100 = (1..=100).fold(genesis, |parent, n| { + insert_header(&backend, n, parent, None, Default::default()) + }); + + let block7000 = (101..=7000).fold(block100, |parent, n| { + insert_header(&backend, n, parent, None, Default::default()) + }); + + // This will cause the ancestor of `block100` to be set to `genesis` as a side-effect. + lowest_common_ancestor(blockchain, genesis, block100).unwrap(); + + // While traversing the tree we will have to do 6900 calls to + // `header_metadata`, which will make sure we will exhaust our cache + // which only takes 5000 elements. In particular, the `CachedHeaderMetadata` struct for + // block #100 will be evicted and will get a new value (with ancestor set to its parent). + let tree_route = tree_route(blockchain, block100, block7000).unwrap(); + + assert!(tree_route.retracted().is_empty()); + } + + #[test] + fn test_leaves_with_complex_block_tree() { + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); + substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); + } + + #[test] + fn test_children_with_complex_block_tree() { + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); + substrate_test_runtime_client::trait_tests::test_children_for_backend(backend); + } + + #[test] + fn test_blockchain_query_by_number_gets_canonical() { + let backend: Arc> = + Arc::new(Backend::new_test(20, 20)); + substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical( + backend, + ); + } + + #[test] + fn test_leaves_pruned_on_finality() { + let backend: Backend = Backend::new_test(10, 10); + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + + let block1_a = insert_header(&backend, 1, block0, None, Default::default()); + let block1_b = insert_header(&backend, 1, block0, None, [1; 32].into()); + let block1_c = insert_header(&backend, 1, block0, None, [2; 32].into()); + + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![block1_a, block1_b, block1_c] + ); + + let block2_a = insert_header(&backend, 2, block1_a, None, Default::default()); + let block2_b = insert_header(&backend, 2, block1_b, None, Default::default()); + let block2_c = insert_header(&backend, 2, block1_b, None, [1; 32].into()); + + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![block2_a, block2_b, block2_c, block1_c] + ); + + backend + .finalize_block(BlockId::hash(block1_a), None) + .unwrap(); + backend + .finalize_block(BlockId::hash(block2_a), None) + .unwrap(); + + // leaves at same height stay. Leaves at lower heights pruned. + assert_eq!( + backend.blockchain().leaves().unwrap(), + vec![block2_a, block2_b, block2_c] + ); + } + + #[test] + fn test_aux() { + let backend: Backend = + Backend::new_test(0, 0); + assert!(backend.get_aux(b"test").unwrap().is_none()); + backend + .insert_aux(&[(&b"test"[..], &b"hello"[..])], &[]) + .unwrap(); + assert_eq!(b"hello", &backend.get_aux(b"test").unwrap().unwrap()[..]); + backend.insert_aux(&[], &[&b"test"[..]]).unwrap(); + assert!(backend.get_aux(b"test").unwrap().is_none()); + } + + #[test] + fn test_finalize_block_with_justification() { + use sc_client::blockchain::Backend as BlockChainBackend; + + let backend = Backend::::new_test(10, 10); + + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let _ = insert_header(&backend, 1, block0, None, Default::default()); + + let justification = Some(vec![1, 2, 3]); + backend + .finalize_block(BlockId::Number(1), justification.clone()) + .unwrap(); + + assert_eq!( + backend + .blockchain() + .justification(BlockId::Number(1)) + .unwrap(), + justification, + ); + } + + #[test] + fn test_finalize_multiple_blocks_in_single_op() { + let backend = Backend::::new_test(10, 10); + + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + let block3 = insert_header(&backend, 3, block2, None, Default::default()); + let block4 = insert_header(&backend, 4, block3, None, Default::default()); + { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(block0)) + .unwrap(); + op.mark_finalized(BlockId::Hash(block1), None).unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + backend.commit_operation(op).unwrap(); + } + { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(block2)) + .unwrap(); + op.mark_finalized(BlockId::Hash(block3), None).unwrap(); + op.mark_finalized(BlockId::Hash(block4), None).unwrap(); + backend.commit_operation(op).unwrap(); + } + } + + #[test] + fn test_finalize_non_sequential() { + let backend = Backend::::new_test(10, 10); + + let block0 = insert_header(&backend, 0, Default::default(), None, Default::default()); + let block1 = insert_header(&backend, 1, block0, None, Default::default()); + let block2 = insert_header(&backend, 2, block1, None, Default::default()); + { + let mut op = backend.begin_operation().unwrap(); + backend + .begin_state_operation(&mut op, BlockId::Hash(block0)) + .unwrap(); + op.mark_finalized(BlockId::Hash(block2), None).unwrap(); + backend.commit_operation(op).unwrap_err(); + } + } } diff --git a/client/db/src/light.rs b/client/db/src/light.rs index c87388a954..11358e5e16 100644 --- a/client/db/src/light.rs +++ b/client/db/src/light.rs @@ -16,38 +16,40 @@ //! RocksDB-based light client blockchain storage. -use std::{sync::Arc, collections::HashMap}; -use std::convert::TryInto; use parking_lot::RwLock; +use std::convert::TryInto; +use std::{collections::HashMap, sync::Arc}; -use sc_client_api::{backend::{AuxStore, NewBlockState}, UsageInfo}; -use sc_client::blockchain::{ - BlockStatus, Cache as BlockchainCache,Info as BlockchainInfo, +use crate::cache::{ComplexBlockId, DbCache, DbCacheSync, EntryType as CacheEntryType}; +use crate::utils::{ + self, block_id_to_lookup_key, meta_keys, read_db, read_meta, DatabaseType, Meta, }; +use crate::{DatabaseSettings, DbHash, FrozenForDuration}; +use codec::{Decode, Encode}; +use log::{debug, trace, warn}; +use sc_client::blockchain::{BlockStatus, Cache as BlockchainCache, Info as BlockchainInfo}; use sc_client::cht; +use sc_client::light::blockchain::Storage as LightBlockchainStorage; +use sc_client_api::{ + backend::{AuxStore, NewBlockState}, + UsageInfo, +}; use sp_blockchain::{ - CachedHeaderMetadata, HeaderMetadata, HeaderMetadataCache, - Error as ClientError, Result as ClientResult, - HeaderBackend as BlockchainHeaderBackend, - well_known_cache_keys, + well_known_cache_keys, CachedHeaderMetadata, Error as ClientError, + HeaderBackend as BlockchainHeaderBackend, HeaderMetadata, HeaderMetadataCache, + Result as ClientResult, }; use sp_database::{Database, Transaction}; -use sc_client::light::blockchain::Storage as LightBlockchainStorage; -use codec::{Decode, Encode}; -use sp_runtime::generic::{DigestItem, BlockId}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, One, NumberFor, HashFor}; -use crate::cache::{DbCacheSync, DbCache, ComplexBlockId, EntryType as CacheEntryType}; -use crate::utils::{self, meta_keys, DatabaseType, Meta, read_db, block_id_to_lookup_key, read_meta}; -use crate::{DatabaseSettings, FrozenForDuration, DbHash}; -use log::{trace, warn, debug}; +use sp_runtime::generic::{BlockId, DigestItem}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, One, Zero}; pub(crate) mod columns { - pub const META: u32 = crate::utils::COLUMN_META; - pub const KEY_LOOKUP: u32 = 1; - pub const HEADER: u32 = 2; - pub const CACHE: u32 = 3; - pub const CHT: u32 = 4; - pub const AUX: u32 = 5; + pub const META: u32 = crate::utils::COLUMN_META; + pub const KEY_LOOKUP: u32 = 1; + pub const HEADER: u32 = 2; + pub const CACHE: u32 = 3; + pub const CHT: u32 = 4; + pub const AUX: u32 = 5; } /// Prefix for headers CHT. @@ -58,1173 +60,1492 @@ const CHANGES_TRIE_CHT_PREFIX: u8 = 1; /// Light blockchain storage. Stores most recent headers + CHTs for older headers. /// Locks order: meta, cache. pub struct LightStorage { - db: Arc>, - meta: RwLock, Block::Hash>>, - cache: Arc>, - header_metadata_cache: HeaderMetadataCache, + db: Arc>, + meta: RwLock, Block::Hash>>, + cache: Arc>, + header_metadata_cache: HeaderMetadataCache, - #[cfg(not(target_os = "unknown"))] - io_stats: FrozenForDuration, + #[cfg(not(target_os = "unknown"))] + io_stats: FrozenForDuration, } impl LightStorage { - /// Create new storage with given settings. - pub fn new(config: DatabaseSettings) -> ClientResult { - let db = crate::utils::open_database::(&config, DatabaseType::Light)?; - Self::from_kvdb(db as Arc<_>) - } - - /// Create new memory-backed `LightStorage` for tests. - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test() -> Self { - let db = Arc::new(sp_database::MemDb::default()); - Self::from_kvdb(db as Arc<_>).expect("failed to create test-db") - } - - fn from_kvdb(db: Arc>) -> ClientResult { - let meta = read_meta::(&*db, columns::HEADER)?; - let cache = DbCache::new( - db.clone(), - columns::KEY_LOOKUP, - columns::HEADER, - columns::CACHE, - meta.genesis_hash, - ComplexBlockId::new(meta.finalized_hash, meta.finalized_number), - ); - - Ok(LightStorage { - db, - meta: RwLock::new(meta), - cache: Arc::new(DbCacheSync(RwLock::new(cache))), - header_metadata_cache: HeaderMetadataCache::default(), - #[cfg(not(target_os = "unknown"))] - io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), - }) - } - - #[cfg(test)] - pub(crate) fn cache(&self) -> &DbCacheSync { - &self.cache - } - - fn update_meta( - &self, - hash: Block::Hash, - number: NumberFor, - is_best: bool, - is_finalized: bool, - ) { - let mut meta = self.meta.write(); - - if number.is_zero() { - meta.genesis_hash = hash; - meta.finalized_hash = hash; - } - - if is_best { - meta.best_number = number; - meta.best_hash = hash; - } - - if is_finalized { - meta.finalized_number = number; - meta.finalized_hash = hash; - } - } + /// Create new storage with given settings. + pub fn new(config: DatabaseSettings) -> ClientResult { + let db = crate::utils::open_database::(&config, DatabaseType::Light)?; + Self::from_kvdb(db as Arc<_>) + } + + /// Create new memory-backed `LightStorage` for tests. + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test() -> Self { + let db = Arc::new(sp_database::MemDb::default()); + Self::from_kvdb(db as Arc<_>).expect("failed to create test-db") + } + + fn from_kvdb(db: Arc>) -> ClientResult { + let meta = read_meta::(&*db, columns::HEADER)?; + let cache = DbCache::new( + db.clone(), + columns::KEY_LOOKUP, + columns::HEADER, + columns::CACHE, + meta.genesis_hash, + ComplexBlockId::new(meta.finalized_hash, meta.finalized_number), + ); + + Ok(LightStorage { + db, + meta: RwLock::new(meta), + cache: Arc::new(DbCacheSync(RwLock::new(cache))), + header_metadata_cache: HeaderMetadataCache::default(), + #[cfg(not(target_os = "unknown"))] + io_stats: FrozenForDuration::new(std::time::Duration::from_secs(1)), + }) + } + + #[cfg(test)] + pub(crate) fn cache(&self) -> &DbCacheSync { + &self.cache + } + + fn update_meta( + &self, + hash: Block::Hash, + number: NumberFor, + is_best: bool, + is_finalized: bool, + ) { + let mut meta = self.meta.write(); + + if number.is_zero() { + meta.genesis_hash = hash; + meta.finalized_hash = hash; + } + + if is_best { + meta.best_number = number; + meta.best_hash = hash; + } + + if is_finalized { + meta.finalized_number = number; + meta.finalized_hash = hash; + } + } } impl BlockchainHeaderBackend for LightStorage - where - Block: BlockT, +where + Block: BlockT, { - fn header(&self, id: BlockId) -> ClientResult> { - utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) - } - - fn info(&self) -> BlockchainInfo { - let meta = self.meta.read(); - BlockchainInfo { - best_hash: meta.best_hash, - best_number: meta.best_number, - genesis_hash: meta.genesis_hash, - finalized_hash: meta.finalized_hash, - finalized_number: meta.finalized_number, - number_leaves: 1, - } - } - - fn status(&self, id: BlockId) -> ClientResult { - let exists = match id { - BlockId::Hash(_) => read_db( - &*self.db, - columns::KEY_LOOKUP, - columns::HEADER, - id - )?.is_some(), - BlockId::Number(n) => n <= self.meta.read().best_number, - }; - match exists { - true => Ok(BlockStatus::InChain), - false => Ok(BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - if let Some(lookup_key) = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? { - let number = utils::lookup_key_to_number(&lookup_key)?; - Ok(Some(number)) - } else { - Ok(None) - } - } - - fn hash(&self, number: NumberFor) -> ClientResult> { - Ok(self.header(BlockId::Number(number))?.map(|header| header.hash().clone())) - } + fn header(&self, id: BlockId) -> ClientResult> { + utils::read_header(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id) + } + + fn info(&self) -> BlockchainInfo { + let meta = self.meta.read(); + BlockchainInfo { + best_hash: meta.best_hash, + best_number: meta.best_number, + genesis_hash: meta.genesis_hash, + finalized_hash: meta.finalized_hash, + finalized_number: meta.finalized_number, + number_leaves: 1, + } + } + + fn status(&self, id: BlockId) -> ClientResult { + let exists = match id { + BlockId::Hash(_) => { + read_db(&*self.db, columns::KEY_LOOKUP, columns::HEADER, id)?.is_some() + } + BlockId::Number(n) => n <= self.meta.read().best_number, + }; + match exists { + true => Ok(BlockStatus::InChain), + false => Ok(BlockStatus::Unknown), + } + } + + fn number(&self, hash: Block::Hash) -> ClientResult>> { + if let Some(lookup_key) = + block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Hash(hash))? + { + let number = utils::lookup_key_to_number(&lookup_key)?; + Ok(Some(number)) + } else { + Ok(None) + } + } + + fn hash(&self, number: NumberFor) -> ClientResult> { + Ok(self + .header(BlockId::Number(number))? + .map(|header| header.hash().clone())) + } } impl HeaderMetadata for LightStorage { - type Error = ClientError; - - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header_metadata_cache.header_metadata(hash).or_else(|_| { - self.header(BlockId::hash(hash))?.map(|header| { - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header_metadata.hash, - header_metadata.clone(), - ); - header_metadata - }).ok_or(ClientError::UnknownBlock(format!("header not found in db: {}", hash))) - }) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.header_metadata_cache.insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.header_metadata_cache.remove_header_metadata(hash); - } + type Error = ClientError; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header_metadata_cache + .header_metadata(hash) + .or_else(|_| { + self.header(BlockId::hash(hash))? + .map(|header| { + let header_metadata = CachedHeaderMetadata::from(&header); + self.header_metadata_cache + .insert_header_metadata(header_metadata.hash, header_metadata.clone()); + header_metadata + }) + .ok_or(ClientError::UnknownBlock(format!( + "header not found in db: {}", + hash + ))) + }) + } + + fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { + self.header_metadata_cache + .insert_header_metadata(hash, metadata) + } + + fn remove_header_metadata(&self, hash: Block::Hash) { + self.header_metadata_cache.remove_header_metadata(hash); + } } impl LightStorage { - // Get block changes trie root, if available. - fn changes_trie_root(&self, block: BlockId) -> ClientResult> { - self.header(block) - .map(|header| header.and_then(|header| - header.digest().log(DigestItem::as_changes_trie_root) - .cloned())) - } - - /// Handle setting head within a transaction. `route_to` should be the last - /// block that existed in the database. `best_to` should be the best block - /// to be set. - /// - /// In the case where the new best block is a block to be imported, `route_to` - /// should be the parent of `best_to`. In the case where we set an existing block - /// to be best, `route_to` should equal to `best_to`. - fn set_head_with_transaction( - &self, - transaction: &mut Transaction, - route_to: Block::Hash, - best_to: (NumberFor, Block::Hash), - ) -> ClientResult<()> { - let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?; - - // handle reorg. - let meta = self.meta.read(); - if meta.best_hash != Default::default() { - let tree_route = sp_blockchain::tree_route(self, meta.best_hash, route_to)?; - - // update block number to hash lookup entries. - for retracted in tree_route.retracted() { - if retracted.hash == meta.finalized_hash { - // TODO: can we recover here? - warn!("Safety failure: reverting finalized block {:?}", - (&retracted.number, &retracted.hash)); - } - - utils::remove_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - retracted.number - )?; - } - - for enacted in tree_route.enacted() { - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - enacted.number, - enacted.hash - )?; - } - } - - transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); - utils::insert_number_to_key_mapping( - transaction, - columns::KEY_LOOKUP, - best_to.0, - best_to.1, - )?; - - Ok(()) - } - - // Note that a block is finalized. Only call with child of last finalized block. - fn note_finalized( - &self, - transaction: &mut Transaction, - header: &Block::Header, - hash: Block::Hash, - ) -> ClientResult<()> { - let meta = self.meta.read(); - if &meta.finalized_hash != header.parent_hash() { - return Err(::sp_blockchain::Error::NonSequentialFinalization( - format!("Last finalized {:?} not parent of {:?}", - meta.finalized_hash, hash), - ).into()) - } - - let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash)?; - transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); - - // build new CHT(s) if required - if let Some(new_cht_number) = cht::is_build_required(cht::size(), *header.number()) { - let new_cht_start: NumberFor = cht::start_number(cht::size(), new_cht_number); - - let mut current_num = new_cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - - let new_header_cht_root = cht::compute_root::, _>( - cht::size(), new_cht_number, cht_range.map(|num| self.hash(num)) - )?; - transaction.set( - columns::CHT, - &cht_key(HEADER_CHT_PREFIX, new_cht_start)?, - new_header_cht_root.as_ref() - ); - - // if the header includes changes trie root, let's build a changes tries roots CHT - if header.digest().log(DigestItem::as_changes_trie_root).is_some() { - let mut current_num = new_cht_start; - let cht_range = std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let new_changes_trie_cht_root = cht::compute_root::, _>( - cht::size(), new_cht_number, cht_range - .map(|num| self.changes_trie_root(BlockId::Number(num))) - )?; - transaction.set( - columns::CHT, - &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start)?, - new_changes_trie_cht_root.as_ref() - ); - } - - // prune headers that are replaced with CHT - let mut prune_block = new_cht_start; - let new_cht_end = cht::end_number(cht::size(), new_cht_number); - trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", + // Get block changes trie root, if available. + fn changes_trie_root(&self, block: BlockId) -> ClientResult> { + self.header(block).map(|header| { + header.and_then(|header| { + header + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned() + }) + }) + } + + /// Handle setting head within a transaction. `route_to` should be the last + /// block that existed in the database. `best_to` should be the best block + /// to be set. + /// + /// In the case where the new best block is a block to be imported, `route_to` + /// should be the parent of `best_to`. In the case where we set an existing block + /// to be best, `route_to` should equal to `best_to`. + fn set_head_with_transaction( + &self, + transaction: &mut Transaction, + route_to: Block::Hash, + best_to: (NumberFor, Block::Hash), + ) -> ClientResult<()> { + let lookup_key = utils::number_and_hash_to_lookup_key(best_to.0, &best_to.1)?; + + // handle reorg. + let meta = self.meta.read(); + if meta.best_hash != Default::default() { + let tree_route = sp_blockchain::tree_route(self, meta.best_hash, route_to)?; + + // update block number to hash lookup entries. + for retracted in tree_route.retracted() { + if retracted.hash == meta.finalized_hash { + // TODO: can we recover here? + warn!( + "Safety failure: reverting finalized block {:?}", + (&retracted.number, &retracted.hash) + ); + } + + utils::remove_number_to_key_mapping( + transaction, + columns::KEY_LOOKUP, + retracted.number, + )?; + } + + for enacted in tree_route.enacted() { + utils::insert_number_to_key_mapping( + transaction, + columns::KEY_LOOKUP, + enacted.number, + enacted.hash, + )?; + } + } + + transaction.set_from_vec(columns::META, meta_keys::BEST_BLOCK, lookup_key); + utils::insert_number_to_key_mapping( + transaction, + columns::KEY_LOOKUP, + best_to.0, + best_to.1, + )?; + + Ok(()) + } + + // Note that a block is finalized. Only call with child of last finalized block. + fn note_finalized( + &self, + transaction: &mut Transaction, + header: &Block::Header, + hash: Block::Hash, + ) -> ClientResult<()> { + let meta = self.meta.read(); + if &meta.finalized_hash != header.parent_hash() { + return Err(::sp_blockchain::Error::NonSequentialFinalization(format!( + "Last finalized {:?} not parent of {:?}", + meta.finalized_hash, hash + )) + .into()); + } + + let lookup_key = utils::number_and_hash_to_lookup_key(header.number().clone(), hash)?; + transaction.set_from_vec(columns::META, meta_keys::FINALIZED_BLOCK, lookup_key); + + // build new CHT(s) if required + if let Some(new_cht_number) = cht::is_build_required(cht::size(), *header.number()) { + let new_cht_start: NumberFor = cht::start_number(cht::size(), new_cht_number); + + let mut current_num = new_cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + + let new_header_cht_root = cht::compute_root::, _>( + cht::size(), + new_cht_number, + cht_range.map(|num| self.hash(num)), + )?; + transaction.set( + columns::CHT, + &cht_key(HEADER_CHT_PREFIX, new_cht_start)?, + new_header_cht_root.as_ref(), + ); + + // if the header includes changes trie root, let's build a changes tries roots CHT + if header + .digest() + .log(DigestItem::as_changes_trie_root) + .is_some() + { + let mut current_num = new_cht_start; + let cht_range = std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + let new_changes_trie_cht_root = + cht::compute_root::, _>( + cht::size(), + new_cht_number, + cht_range.map(|num| self.changes_trie_root(BlockId::Number(num))), + )?; + transaction.set( + columns::CHT, + &cht_key(CHANGES_TRIE_CHT_PREFIX, new_cht_start)?, + new_changes_trie_cht_root.as_ref(), + ); + } + + // prune headers that are replaced with CHT + let mut prune_block = new_cht_start; + let new_cht_end = cht::end_number(cht::size(), new_cht_number); + trace!(target: "db", "Replacing blocks [{}..{}] with CHT#{}", new_cht_start, new_cht_end, new_cht_number); - while prune_block <= new_cht_end { - if let Some(hash) = self.hash(prune_block)? { - let lookup_key = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Number(prune_block))? + while prune_block <= new_cht_end { + if let Some(hash) = self.hash(prune_block)? { + let lookup_key = block_id_to_lookup_key::(&*self.db, columns::KEY_LOOKUP, BlockId::Number(prune_block))? .expect("retrieved hash for `prune_block` right above. therefore retrieving lookup key must succeed. q.e.d."); - utils::remove_key_mappings( - transaction, - columns::KEY_LOOKUP, - prune_block, - hash - )?; - transaction.remove(columns::HEADER, &lookup_key); - } - prune_block += One::one(); - } - } - - Ok(()) - } - - /// Read CHT root of given type for the block. - fn read_cht_root( - &self, - cht_type: u8, - cht_size: NumberFor, - block: NumberFor - ) -> ClientResult> { - let no_cht_for_block = || ClientError::Backend(format!("Missing CHT for block {}", block)); - - let meta = self.meta.read(); - let max_cht_number = cht::max_cht_number(cht_size, meta.finalized_number); - let cht_number = cht::block_to_cht_number(cht_size, block).ok_or_else(no_cht_for_block)?; - match max_cht_number { - Some(max_cht_number) if cht_number <= max_cht_number => (), - _ => return Ok(None), - } - - let cht_start = cht::start_number(cht_size, cht_number); - self.db.get(columns::CHT, &cht_key(cht_type, cht_start)?) - .ok_or_else(no_cht_for_block) - .and_then(|hash| Block::Hash::decode(&mut &*hash).map_err(|_| no_cht_for_block())) - .map(Some) - } + utils::remove_key_mappings( + transaction, + columns::KEY_LOOKUP, + prune_block, + hash, + )?; + transaction.remove(columns::HEADER, &lookup_key); + } + prune_block += One::one(); + } + } + + Ok(()) + } + + /// Read CHT root of given type for the block. + fn read_cht_root( + &self, + cht_type: u8, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult> { + let no_cht_for_block = || ClientError::Backend(format!("Missing CHT for block {}", block)); + + let meta = self.meta.read(); + let max_cht_number = cht::max_cht_number(cht_size, meta.finalized_number); + let cht_number = cht::block_to_cht_number(cht_size, block).ok_or_else(no_cht_for_block)?; + match max_cht_number { + Some(max_cht_number) if cht_number <= max_cht_number => (), + _ => return Ok(None), + } + + let cht_start = cht::start_number(cht_size, cht_number); + self.db + .get(columns::CHT, &cht_key(cht_type, cht_start)?) + .ok_or_else(no_cht_for_block) + .and_then(|hash| Block::Hash::decode(&mut &*hash).map_err(|_| no_cht_for_block())) + .map(Some) + } } impl AuxStore for LightStorage - where Block: BlockT, +where + Block: BlockT, { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { - let mut transaction = Transaction::new(); - for (k, v) in insert { - transaction.set(columns::AUX, k, v); - } - for k in delete { - transaction.remove(columns::AUX, k); - } - self.db.commit(transaction); - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.db.get(columns::AUX, key)) - } + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { + let mut transaction = Transaction::new(); + for (k, v) in insert { + transaction.set(columns::AUX, k, v); + } + for k in delete { + transaction.remove(columns::AUX, k); + } + self.db.commit(transaction); + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + Ok(self.db.get(columns::AUX, key)) + } } impl LightBlockchainStorage for LightStorage - where Block: BlockT, +where + Block: BlockT, { - fn import_header( - &self, - header: Block::Header, - mut cache_at: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()> { - let mut transaction = Transaction::new(); - - let hash = header.hash(); - let number = *header.number(); - let parent_hash = *header.parent_hash(); - - for (key, maybe_val) in aux_ops { - match maybe_val { - Some(val) => transaction.set_from_vec(columns::AUX, &key, val), - None => transaction.remove(columns::AUX, &key), - } - } - - // blocks are keyed by number + hash. - let lookup_key = utils::number_and_hash_to_lookup_key(number, &hash)?; - - if leaf_state.is_best() { - self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; - } - - utils::insert_hash_to_key_mapping( - &mut transaction, - columns::KEY_LOOKUP, - number, - hash, - )?; - transaction.set_from_vec(columns::HEADER, &lookup_key, header.encode()); - - let header_metadata = CachedHeaderMetadata::from(&header); - self.header_metadata_cache.insert_header_metadata( - header.hash().clone(), - header_metadata, - ); - - let is_genesis = number.is_zero(); - if is_genesis { - self.cache.0.write().set_genesis_hash(hash); - transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); - } - - let finalized = match leaf_state { - _ if is_genesis => true, - NewBlockState::Final => true, - _ => false, - }; - - if finalized { - self.note_finalized( - &mut transaction, - &header, - hash, - )?; - } - - // update changes trie configuration cache - if !cache_at.contains_key(&well_known_cache_keys::CHANGES_TRIE_CONFIG) { - if let Some(new_configuration) = crate::changes_tries_storage::extract_new_configuration(&header) { - cache_at.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_configuration.encode()); - } - } - - { - let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(&mut transaction) - .on_block_insert( - ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), - ComplexBlockId::new(hash, number), - cache_at, - if finalized { CacheEntryType::Final } else { CacheEntryType::NonFinal }, - )? - .into_ops(); - - debug!("Light DB Commit {:?} ({})", hash, number); - - self.db.commit(transaction); - cache.commit(cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there are cache_ops; qed"); - } - - self.update_meta(hash, number, leaf_state.is_best(), finalized); - - Ok(()) - } - - fn set_head(&self, id: BlockId) -> ClientResult<()> { - if let Some(header) = self.header(id)? { - let hash = header.hash(); - let number = header.number(); - - let mut transaction = Transaction::new(); - self.set_head_with_transaction(&mut transaction, hash.clone(), (number.clone(), hash.clone()))?; - self.db.commit(transaction); - self.update_meta(hash, header.number().clone(), true, false); - Ok(()) - } else { - Err(ClientError::UnknownBlock(format!("Cannot set head {:?}", id))) - } - } - - fn header_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) - } - - fn changes_trie_cht_root( - &self, - cht_size: NumberFor, - block: NumberFor, - ) -> ClientResult> { - self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) - } - - fn finalize_header(&self, id: BlockId) -> ClientResult<()> { - if let Some(header) = self.header(id)? { - let mut transaction = Transaction::new(); - let hash = header.hash(); - let number = *header.number(); - self.note_finalized(&mut transaction, &header, hash.clone())?; - { - let mut cache = self.cache.0.write(); - let cache_ops = cache.transaction(&mut transaction) - .on_block_finalize( - ComplexBlockId::new(*header.parent_hash(), if number.is_zero() { Zero::zero() } else { number - One::one() }), - ComplexBlockId::new(hash, number) - )? - .into_ops(); - - self.db.commit(transaction); - cache.commit(cache_ops) - .expect("only fails if cache with given name isn't loaded yet;\ - cache is already loaded because there are cache_ops; qed"); - } - self.update_meta(hash, header.number().clone(), false, true); - - Ok(()) - } else { - Err(ClientError::UnknownBlock(format!("Cannot finalize block {:?}", id))) - } - } - - fn last_finalized(&self) -> ClientResult { - Ok(self.meta.read().finalized_hash.clone()) - } - - fn cache(&self) -> Option>> { - Some(self.cache.clone()) - } - - #[cfg(not(target_os = "unknown"))] - fn usage_info(&self) -> Option { - use sc_client_api::{MemoryInfo, IoInfo, MemorySize}; - - // TODO: reimplement IO stats - let database_cache = MemorySize::from_bytes(0); - let io_stats = self.io_stats.take_or_else(|| kvdb::IoStats::empty()); - - Some(UsageInfo { - memory: MemoryInfo { - database_cache, - state_cache: Default::default(), - state_db: Default::default(), - }, - io: IoInfo { - transactions: io_stats.transactions, - bytes_read: io_stats.bytes_read, - bytes_written: io_stats.bytes_written, - writes: io_stats.writes, - reads: io_stats.reads, - average_transaction_size: io_stats.avg_transaction_size() as u64, - // Light client does not track those - state_reads: 0, - state_writes: 0, - state_reads_cache: 0, - state_writes_cache: 0, - state_writes_nodes: 0, - } - }) - } - - #[cfg(target_os = "unknown")] - fn usage_info(&self) -> Option { - None - } + fn import_header( + &self, + header: Block::Header, + mut cache_at: HashMap>, + leaf_state: NewBlockState, + aux_ops: Vec<(Vec, Option>)>, + ) -> ClientResult<()> { + let mut transaction = Transaction::new(); + + let hash = header.hash(); + let number = *header.number(); + let parent_hash = *header.parent_hash(); + + for (key, maybe_val) in aux_ops { + match maybe_val { + Some(val) => transaction.set_from_vec(columns::AUX, &key, val), + None => transaction.remove(columns::AUX, &key), + } + } + + // blocks are keyed by number + hash. + let lookup_key = utils::number_and_hash_to_lookup_key(number, &hash)?; + + if leaf_state.is_best() { + self.set_head_with_transaction(&mut transaction, parent_hash, (number, hash))?; + } + + utils::insert_hash_to_key_mapping(&mut transaction, columns::KEY_LOOKUP, number, hash)?; + transaction.set_from_vec(columns::HEADER, &lookup_key, header.encode()); + + let header_metadata = CachedHeaderMetadata::from(&header); + self.header_metadata_cache + .insert_header_metadata(header.hash().clone(), header_metadata); + + let is_genesis = number.is_zero(); + if is_genesis { + self.cache.0.write().set_genesis_hash(hash); + transaction.set(columns::META, meta_keys::GENESIS_HASH, hash.as_ref()); + } + + let finalized = match leaf_state { + _ if is_genesis => true, + NewBlockState::Final => true, + _ => false, + }; + + if finalized { + self.note_finalized(&mut transaction, &header, hash)?; + } + + // update changes trie configuration cache + if !cache_at.contains_key(&well_known_cache_keys::CHANGES_TRIE_CONFIG) { + if let Some(new_configuration) = + crate::changes_tries_storage::extract_new_configuration(&header) + { + cache_at.insert( + well_known_cache_keys::CHANGES_TRIE_CONFIG, + new_configuration.encode(), + ); + } + } + + { + let mut cache = self.cache.0.write(); + let cache_ops = cache + .transaction(&mut transaction) + .on_block_insert( + ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { + Zero::zero() + } else { + number - One::one() + }, + ), + ComplexBlockId::new(hash, number), + cache_at, + if finalized { + CacheEntryType::Final + } else { + CacheEntryType::NonFinal + }, + )? + .into_ops(); + + debug!("Light DB Commit {:?} ({})", hash, number); + + self.db.commit(transaction); + cache.commit(cache_ops).expect( + "only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there are cache_ops; qed", + ); + } + + self.update_meta(hash, number, leaf_state.is_best(), finalized); + + Ok(()) + } + + fn set_head(&self, id: BlockId) -> ClientResult<()> { + if let Some(header) = self.header(id)? { + let hash = header.hash(); + let number = header.number(); + + let mut transaction = Transaction::new(); + self.set_head_with_transaction( + &mut transaction, + hash.clone(), + (number.clone(), hash.clone()), + )?; + self.db.commit(transaction); + self.update_meta(hash, header.number().clone(), true, false); + Ok(()) + } else { + Err(ClientError::UnknownBlock(format!( + "Cannot set head {:?}", + id + ))) + } + } + + fn header_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult> { + self.read_cht_root(HEADER_CHT_PREFIX, cht_size, block) + } + + fn changes_trie_cht_root( + &self, + cht_size: NumberFor, + block: NumberFor, + ) -> ClientResult> { + self.read_cht_root(CHANGES_TRIE_CHT_PREFIX, cht_size, block) + } + + fn finalize_header(&self, id: BlockId) -> ClientResult<()> { + if let Some(header) = self.header(id)? { + let mut transaction = Transaction::new(); + let hash = header.hash(); + let number = *header.number(); + self.note_finalized(&mut transaction, &header, hash.clone())?; + { + let mut cache = self.cache.0.write(); + let cache_ops = cache + .transaction(&mut transaction) + .on_block_finalize( + ComplexBlockId::new( + *header.parent_hash(), + if number.is_zero() { + Zero::zero() + } else { + number - One::one() + }, + ), + ComplexBlockId::new(hash, number), + )? + .into_ops(); + + self.db.commit(transaction); + cache.commit(cache_ops).expect( + "only fails if cache with given name isn't loaded yet;\ + cache is already loaded because there are cache_ops; qed", + ); + } + self.update_meta(hash, header.number().clone(), false, true); + + Ok(()) + } else { + Err(ClientError::UnknownBlock(format!( + "Cannot finalize block {:?}", + id + ))) + } + } + + fn last_finalized(&self) -> ClientResult { + Ok(self.meta.read().finalized_hash.clone()) + } + + fn cache(&self) -> Option>> { + Some(self.cache.clone()) + } + + #[cfg(not(target_os = "unknown"))] + fn usage_info(&self) -> Option { + use sc_client_api::{IoInfo, MemoryInfo, MemorySize}; + + // TODO: reimplement IO stats + let database_cache = MemorySize::from_bytes(0); + let io_stats = self.io_stats.take_or_else(|| kvdb::IoStats::empty()); + + Some(UsageInfo { + memory: MemoryInfo { + database_cache, + state_cache: Default::default(), + state_db: Default::default(), + }, + io: IoInfo { + transactions: io_stats.transactions, + bytes_read: io_stats.bytes_read, + bytes_written: io_stats.bytes_written, + writes: io_stats.writes, + reads: io_stats.reads, + average_transaction_size: io_stats.avg_transaction_size() as u64, + // Light client does not track those + state_reads: 0, + state_writes: 0, + state_reads_cache: 0, + state_writes_cache: 0, + state_writes_nodes: 0, + }, + }) + } + + #[cfg(target_os = "unknown")] + fn usage_info(&self) -> Option { + None + } } /// Build the key for inserting header-CHT at given block. fn cht_key>(cht_type: u8, block: N) -> ClientResult<[u8; 5]> { - let mut key = [cht_type; 5]; - key[1..].copy_from_slice(&utils::number_index_key(block)?); - Ok(key) + let mut key = [cht_type; 5]; + key[1..].copy_from_slice(&utils::number_index_key(block)?); + Ok(key) } #[cfg(test)] pub(crate) mod tests { - use sc_client::cht; - use sp_core::ChangesTrieConfiguration; - use sp_runtime::generic::{DigestItem, ChangesTrieSignal}; - use sp_runtime::testing::{H256 as Hash, Header, Block as RawBlock, ExtrinsicWrapper}; - use sp_blockchain::{lowest_common_ancestor, tree_route}; - use super::*; - - type Block = RawBlock>; - type AuthorityId = sp_core::ed25519::Public; - - pub fn default_header(parent: &Hash, number: u64) -> Header { - Header { - number: number.into(), - parent_hash: *parent, - state_root: Hash::random(), - digest: Default::default(), - extrinsics_root: Default::default(), - } - } - - fn header_with_changes_trie(parent: &Hash, number: u64) -> Header { - let mut header = default_header(parent, number); - header.digest.logs.push(DigestItem::ChangesTrieRoot([(number % 256) as u8; 32].into())); - header - } - - fn header_with_extrinsics_root(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header { - let mut header = default_header(parent, number); - header.extrinsics_root = extrinsics_root; - header - } - - pub fn insert_block Header>( - db: &LightStorage, - cache: HashMap>, - mut header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Best, Vec::new()).unwrap(); - hash - } - - fn insert_final_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Final, Vec::new()).unwrap(); - hash - } - - fn insert_non_best_block Header>( - db: &LightStorage, - cache: HashMap>, - header: F, - ) -> Hash { - let header = header(); - let hash = header.hash(); - db.import_header(header, cache, NewBlockState::Normal, Vec::new()).unwrap(); - hash - } - - #[test] - fn returns_known_header() { - let db = LightStorage::new_test(); - let known_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - let header_by_hash = db.header(BlockId::Hash(known_hash)).unwrap().unwrap(); - let header_by_number = db.header(BlockId::Number(0)).unwrap().unwrap(); - assert_eq!(header_by_hash, header_by_number); - } - - #[test] - fn does_not_return_unknown_header() { - let db = LightStorage::::new_test(); - assert!(db.header(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap().is_none()); - assert!(db.header(BlockId::Number(0)).unwrap().is_none()); - } - - #[test] - fn returns_info() { - let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - let info = db.info(); - assert_eq!(info.best_hash, genesis_hash); - assert_eq!(info.best_number, 0); - assert_eq!(info.genesis_hash, genesis_hash); - let best_hash = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - let info = db.info(); - assert_eq!(info.best_hash, best_hash); - assert_eq!(info.best_number, 1); - assert_eq!(info.genesis_hash, genesis_hash); - } - - #[test] - fn returns_block_status() { - let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.status(BlockId::Hash(genesis_hash)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); - assert_eq!(db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), BlockStatus::Unknown); - assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); - } - - #[test] - fn returns_block_hash() { - let db = LightStorage::new_test(); - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.hash(0).unwrap(), Some(genesis_hash)); - assert_eq!(db.hash(1).unwrap(), None); - } - - #[test] - fn import_header_works() { - let raw_db = Arc::new(sp_database::MemDb::default()); - let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); - - let genesis_hash = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(raw_db.count(columns::HEADER), 1); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), 2); - - let _ = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); - assert_eq!(raw_db.count(columns::HEADER), 2); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), 4); - } - - #[test] - fn finalized_ancient_headers_are_replaced_with_cht() { - fn insert_headers Header>(header_producer: F) -> - (Arc>, LightStorage) - { - let raw_db = Arc::new(sp_database::MemDb::default()); - let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); - let cht_size: u64 = cht::size(); - let ucht_size: usize = cht_size as _; - - // insert genesis block header (never pruned) - let mut prev_hash = insert_final_block(&db, HashMap::new(), || header_producer(&Default::default(), 0)); - - // insert SIZE blocks && ensure that nothing is pruned - - for number in 0..cht::size() { - prev_hash = insert_block(&db, HashMap::new(), || header_producer(&prev_hash, 1 + number)); - } - assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // insert next SIZE blocks && ensure that nothing is pruned - for number in 0..(cht_size as _) { - prev_hash = insert_block( - &db, - HashMap::new(), - || header_producer(&prev_hash, 1 + cht_size + number), - ); - } - assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of this CHT are pruned - // nothing is yet finalized, so nothing is pruned. - prev_hash = insert_block( - &db, - HashMap::new(), - || header_producer(&prev_hash, 1 + cht_size + cht_size), - ); - assert_eq!(raw_db.count(columns::HEADER), 2 + ucht_size + ucht_size); - assert_eq!(raw_db.count(columns::CHT), 0); - - // now finalize the block. - for i in (0..(ucht_size + ucht_size)).map(|i| i + 1) { - db.finalize_header(BlockId::Number(i as _)).unwrap(); - } - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - (raw_db, db) - } - - // when headers are created without changes tries roots - let (raw_db, db) = insert_headers(default_header); - let cht_size: u64 = cht::size(); - assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); - assert_eq!(raw_db.count(columns::KEY_LOOKUP), (2 * (1 + cht_size + 1)) as usize); - assert_eq!(raw_db.count(columns::CHT), 1); - assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); - assert!(db.header_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.header_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - assert!(db.changes_trie_cht_root(cht_size, cht_size / 2).is_err()); - assert!(db.changes_trie_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - - // when headers are created with changes tries roots - let (raw_db, db) = insert_headers(header_with_changes_trie); - assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); - assert_eq!(raw_db.count(columns::CHT), 2); - assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); - assert!(db.header_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.header_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - assert!(db.changes_trie_cht_root(cht_size, cht_size / 2).unwrap().is_some()); - assert!(db.changes_trie_cht_root(cht_size, cht_size + cht_size / 2).unwrap().is_none()); - } - - #[test] - fn get_cht_fails_for_genesis_block() { - assert!(LightStorage::::new_test().header_cht_root(cht::size(), 0).is_err()); - } - - #[test] - fn get_cht_fails_for_non_existent_cht() { - let cht_size: u64 = cht::size(); - assert!(LightStorage::::new_test().header_cht_root(cht_size, cht_size / 2).unwrap().is_none()); - } - - #[test] - fn get_cht_works() { - let db = LightStorage::new_test(); - - // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created - let mut prev_hash = insert_final_block(&db, HashMap::new(), || header_with_changes_trie(&Default::default(), 0)); - let cht_size: u64 = cht::size(); - let ucht_size: usize = cht_size as _; - for i in 1..1 + ucht_size + ucht_size + 1 { - prev_hash = insert_block(&db, HashMap::new(), || header_with_changes_trie(&prev_hash, i as u64)); - db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); - } - - let cht_root_1 = db.header_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); - let cht_root_2 = db.header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2).unwrap().unwrap(); - let cht_root_3 = db.header_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - - let cht_root_1 = db.changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0)).unwrap().unwrap(); - let cht_root_2 = db.changes_trie_cht_root( - cht_size, - cht::start_number(cht_size, 0) + cht_size / 2, - ).unwrap().unwrap(); - let cht_root_3 = db.changes_trie_cht_root(cht_size, cht::end_number(cht_size, 0)).unwrap().unwrap(); - assert_eq!(cht_root_1, cht_root_2); - assert_eq!(cht_root_2, cht_root_3); - } - - #[test] - fn tree_route_works() { - let db = LightStorage::new_test(); - let block0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - - // fork from genesis: 3 prong. - let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); - let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); - let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); - - // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, Hash::from([1; 32]))); - let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); - - { - let tree_route = tree_route(&db, a3, b2).unwrap(); - - assert_eq!(tree_route.common_block().hash, block0); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2, a1]); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![b1, b2]); - } - - { - let tree_route = tree_route(&db, a1, a3).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert!(tree_route.retracted().is_empty()); - assert_eq!(tree_route.enacted().iter().map(|r| r.hash).collect::>(), vec![a2, a3]); - } - - { - let tree_route = tree_route(&db, a3, a1).unwrap(); - - assert_eq!(tree_route.common_block().hash, a1); - assert_eq!(tree_route.retracted().iter().map(|r| r.hash).collect::>(), vec![a3, a2]); - assert!(tree_route.enacted().is_empty()); - } - - { - let tree_route = tree_route(&db, a2, a2).unwrap(); - - assert_eq!(tree_route.common_block().hash, a2); - assert!(tree_route.retracted().is_empty()); - assert!(tree_route.enacted().is_empty()); - } - } - - #[test] - fn lowest_common_ancestor_works() { - let db = LightStorage::new_test(); - let block0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - - // fork from genesis: 3 prong. - let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); - let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); - let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); - - // fork from genesis: 2 prong. - let b1 = insert_block(&db, HashMap::new(), || header_with_extrinsics_root(&block0, 1, Hash::from([1; 32]))); - let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); - - { - let lca = lowest_common_ancestor(&db, a3, b2).unwrap(); - - assert_eq!(lca.hash, block0); - assert_eq!(lca.number, 0); - } - - { - let lca = lowest_common_ancestor(&db, a1, a3).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a3, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a2, a3).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - - { - let lca = lowest_common_ancestor(&db, a2, a1).unwrap(); - - assert_eq!(lca.hash, a1); - assert_eq!(lca.number, 1); - } - - { - let lca = lowest_common_ancestor(&db, a2, a2).unwrap(); - - assert_eq!(lca.hash, a2); - assert_eq!(lca.number, 2); - } - } - - #[test] - fn authorities_are_cached() { - let db = LightStorage::new_test(); - - fn run_checks(db: &LightStorage, max: u64, checks: &[(u64, Option>)]) { - for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { - let actual = authorities(db.cache(), BlockId::Number(*at)); - assert_eq!(*expected, actual); - } - } - - fn same_authorities() -> HashMap> { - HashMap::new() - } - - fn make_authorities(authorities: Vec) -> HashMap> { - let mut map = HashMap::new(); - map.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); - map - } - - fn authorities(cache: &dyn BlockchainCache, at: BlockId) -> Option> { - cache.get_at(&well_known_cache_keys::AUTHORITIES, &at).unwrap_or(None) - .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) - } - - let auth1 = || AuthorityId::from_raw([1u8; 32]); - let auth2 = || AuthorityId::from_raw([2u8; 32]); - let auth3 = || AuthorityId::from_raw([3u8; 32]); - let auth4 = || AuthorityId::from_raw([4u8; 32]); - let auth5 = || AuthorityId::from_raw([5u8; 32]); - let auth6 = || AuthorityId::from_raw([6u8; 32]); - - let (hash2, hash6) = { - // first few blocks are instantly finalized - // B0(None) -> B1(None) -> B2(1) -> B3(1) -> B4(1, 2) -> B5(1, 2) -> B6(1, 2) - let checks = vec![ - (0, None), - (1, None), - (2, Some(vec![auth1()])), - (3, Some(vec![auth1()])), - (4, Some(vec![auth1(), auth2()])), - (5, Some(vec![auth1(), auth2()])), - (6, Some(vec![auth1(), auth2()])), - ]; - - let hash0 = insert_final_block(&db, same_authorities(), || default_header(&Default::default(), 0)); - run_checks(&db, 0, &checks); - let hash1 = insert_final_block(&db, same_authorities(), || default_header(&hash0, 1)); - run_checks(&db, 1, &checks); - let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || default_header(&hash1, 2)); - run_checks(&db, 2, &checks); - let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); - run_checks(&db, 3, &checks); - let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash3, 4)); - run_checks(&db, 4, &checks); - let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash4, 5)); - run_checks(&db, 5, &checks); - let hash6 = insert_final_block(&db, same_authorities(), || default_header(&hash5, 6)); - run_checks(&db, 6, &checks); - - (hash2, hash6) - }; - - { - // some older non-best blocks are inserted - // ... -> B2(1) -> B2_1(1) -> B2_2(2) - // => the cache ignores all writes before best finalized block - let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || default_header(&hash2, 3)); - assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_1))); - let hash2_2 = insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || default_header(&hash2_1, 4)); - assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_2))); - } - - let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = { - // inserting non-finalized blocks - // B6(None) -> B7(3) -> B8(3) - // \> B6_1(4) -> B6_2(4) - // \> B6_1_1(5) - // \> B6_1_2(6) -> B6_1_3(7) - - let hash7 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash6, 7)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - let hash8 = insert_block(&db, make_authorities(vec![auth3()]), || default_header(&hash7, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - let hash6_1 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6, 7)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - let hash6_2 = insert_block(&db, make_authorities(vec![auth4()]), || default_header(&hash6_1, 8)); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), Some(vec![auth3()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - - (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) - }; - - { - // finalize block hash6_1 - db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), Some(vec![auth5()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), Some(vec![auth6()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - // finalize block hash6_2 - db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); - assert_eq!( - authorities(db.cache(), BlockId::Hash(hash6)), - Some(vec![auth1(), auth2()]), - ); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1)), Some(vec![auth4()])); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), None); - assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_2)), Some(vec![auth4()])); - } - } - - #[test] - fn database_is_reopened() { - let db = LightStorage::new_test(); - let hash0 = insert_final_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!(db.info().best_hash, hash0); - assert_eq!(db.header(BlockId::Hash(hash0)).unwrap().unwrap().hash(), hash0); - - let db = db.db; - let db = LightStorage::from_kvdb(db).unwrap(); - assert_eq!(db.info().best_hash, hash0); - assert_eq!(db.header(BlockId::Hash::(hash0)).unwrap().unwrap().hash(), hash0); - } - - #[test] - fn aux_store_works() { - let db = LightStorage::::new_test(); - - // insert aux1 + aux2 using direct store access - db.insert_aux(&[(&[1][..], &[101][..]), (&[2][..], &[102][..])], ::std::iter::empty()).unwrap(); - - // check aux values - assert_eq!(db.get_aux(&[1]).unwrap(), Some(vec![101])); - assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); - assert_eq!(db.get_aux(&[3]).unwrap(), None); - - // delete aux1 + insert aux3 using import operation - db.import_header(default_header(&Default::default(), 0), HashMap::new(), NewBlockState::Best, vec![ - (vec![3], Some(vec![103])), - (vec![1], None), - ]).unwrap(); - - // check aux values - assert_eq!(db.get_aux(&[1]).unwrap(), None); - assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); - assert_eq!(db.get_aux(&[3]).unwrap(), Some(vec![103])); - } - - #[test] - fn cache_can_be_initialized_after_genesis_inserted() { - let (genesis_hash, storage) = { - let db = LightStorage::::new_test(); - - // before cache is initialized => Err - assert!(db.cache().get_at(b"test", &BlockId::Number(0)).is_err()); - - // insert genesis block (no value for cache is provided) - let mut genesis_hash = None; - insert_block(&db, HashMap::new(), || { - let header = default_header(&Default::default(), 0); - genesis_hash = Some(header.hash()); - header - }); - - // after genesis is inserted => None - assert_eq!(db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), None); - - // initialize cache - db.cache().initialize(b"test", vec![42]).unwrap(); - - // after genesis is inserted + cache is initialized => Some - assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), - Some(((0, genesis_hash.unwrap()), None, vec![42])), - ); - - (genesis_hash, db.db) - }; - - // restart && check that after restart value is read from the cache - let db = LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); - assert_eq!( - db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), - Some(((0, genesis_hash.unwrap()), None, vec![42])), - ); - } - - #[test] - fn changes_trie_configuration_is_tracked_on_light_client() { - let db = LightStorage::::new_test(); - - let new_config = Some(ChangesTrieConfiguration::new(2, 2)); - - // insert block#0 && block#1 (no value for cache is provided) - let hash0 = insert_block(&db, HashMap::new(), || default_header(&Default::default(), 0)); - assert_eq!( - db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(0)).unwrap() - .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), - None, - ); - - // insert configuration at block#1 (starts from block#2) - insert_block(&db, HashMap::new(), || { - let mut header = default_header(&hash0, 1); - header.digest_mut().push( - DigestItem::ChangesTrieSignal(ChangesTrieSignal::NewConfiguration(new_config.clone())) - ); - header - }); - assert_eq!( - db.cache().get_at(&well_known_cache_keys::CHANGES_TRIE_CONFIG, &BlockId::Number(1)).unwrap() - .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), - Some(new_config), - ); - } + use super::*; + use sc_client::cht; + use sp_blockchain::{lowest_common_ancestor, tree_route}; + use sp_core::ChangesTrieConfiguration; + use sp_runtime::generic::{ChangesTrieSignal, DigestItem}; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, Header, H256 as Hash}; + + type Block = RawBlock>; + type AuthorityId = sp_core::ed25519::Public; + + pub fn default_header(parent: &Hash, number: u64) -> Header { + Header { + number: number.into(), + parent_hash: *parent, + state_root: Hash::random(), + digest: Default::default(), + extrinsics_root: Default::default(), + } + } + + fn header_with_changes_trie(parent: &Hash, number: u64) -> Header { + let mut header = default_header(parent, number); + header.digest.logs.push(DigestItem::ChangesTrieRoot( + [(number % 256) as u8; 32].into(), + )); + header + } + + fn header_with_extrinsics_root(parent: &Hash, number: u64, extrinsics_root: Hash) -> Header { + let mut header = default_header(parent, number); + header.extrinsics_root = extrinsics_root; + header + } + + pub fn insert_block Header>( + db: &LightStorage, + cache: HashMap>, + mut header: F, + ) -> Hash { + let header = header(); + let hash = header.hash(); + db.import_header(header, cache, NewBlockState::Best, Vec::new()) + .unwrap(); + hash + } + + fn insert_final_block Header>( + db: &LightStorage, + cache: HashMap>, + header: F, + ) -> Hash { + let header = header(); + let hash = header.hash(); + db.import_header(header, cache, NewBlockState::Final, Vec::new()) + .unwrap(); + hash + } + + fn insert_non_best_block Header>( + db: &LightStorage, + cache: HashMap>, + header: F, + ) -> Hash { + let header = header(); + let hash = header.hash(); + db.import_header(header, cache, NewBlockState::Normal, Vec::new()) + .unwrap(); + hash + } + + #[test] + fn returns_known_header() { + let db = LightStorage::new_test(); + let known_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + let header_by_hash = db.header(BlockId::Hash(known_hash)).unwrap().unwrap(); + let header_by_number = db.header(BlockId::Number(0)).unwrap().unwrap(); + assert_eq!(header_by_hash, header_by_number); + } + + #[test] + fn does_not_return_unknown_header() { + let db = LightStorage::::new_test(); + assert!(db + .header(BlockId::Hash(Hash::from_low_u64_be(1))) + .unwrap() + .is_none()); + assert!(db.header(BlockId::Number(0)).unwrap().is_none()); + } + + #[test] + fn returns_info() { + let db = LightStorage::new_test(); + let genesis_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + let info = db.info(); + assert_eq!(info.best_hash, genesis_hash); + assert_eq!(info.best_number, 0); + assert_eq!(info.genesis_hash, genesis_hash); + let best_hash = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); + let info = db.info(); + assert_eq!(info.best_hash, best_hash); + assert_eq!(info.best_number, 1); + assert_eq!(info.genesis_hash, genesis_hash); + } + + #[test] + fn returns_block_status() { + let db = LightStorage::new_test(); + let genesis_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!( + db.status(BlockId::Hash(genesis_hash)).unwrap(), + BlockStatus::InChain + ); + assert_eq!(db.status(BlockId::Number(0)).unwrap(), BlockStatus::InChain); + assert_eq!( + db.status(BlockId::Hash(Hash::from_low_u64_be(1))).unwrap(), + BlockStatus::Unknown + ); + assert_eq!(db.status(BlockId::Number(1)).unwrap(), BlockStatus::Unknown); + } + + #[test] + fn returns_block_hash() { + let db = LightStorage::new_test(); + let genesis_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!(db.hash(0).unwrap(), Some(genesis_hash)); + assert_eq!(db.hash(1).unwrap(), None); + } + + #[test] + fn import_header_works() { + let raw_db = Arc::new(sp_database::MemDb::default()); + let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); + + let genesis_hash = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!(raw_db.count(columns::HEADER), 1); + assert_eq!(raw_db.count(columns::KEY_LOOKUP), 2); + + let _ = insert_block(&db, HashMap::new(), || default_header(&genesis_hash, 1)); + assert_eq!(raw_db.count(columns::HEADER), 2); + assert_eq!(raw_db.count(columns::KEY_LOOKUP), 4); + } + + #[test] + fn finalized_ancient_headers_are_replaced_with_cht() { + fn insert_headers Header>( + header_producer: F, + ) -> (Arc>, LightStorage) { + let raw_db = Arc::new(sp_database::MemDb::default()); + let db = LightStorage::from_kvdb(raw_db.clone()).unwrap(); + let cht_size: u64 = cht::size(); + let ucht_size: usize = cht_size as _; + + // insert genesis block header (never pruned) + let mut prev_hash = insert_final_block(&db, HashMap::new(), || { + header_producer(&Default::default(), 0) + }); + + // insert SIZE blocks && ensure that nothing is pruned + + for number in 0..cht::size() { + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + number) + }); + } + assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size); + assert_eq!(raw_db.count(columns::CHT), 0); + + // insert next SIZE blocks && ensure that nothing is pruned + for number in 0..(cht_size as _) { + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + cht_size + number) + }); + } + assert_eq!(raw_db.count(columns::HEADER), 1 + ucht_size + ucht_size); + assert_eq!(raw_db.count(columns::CHT), 0); + + // insert block #{2 * cht::size() + 1} && check that new CHT is created + headers of this CHT are pruned + // nothing is yet finalized, so nothing is pruned. + prev_hash = insert_block(&db, HashMap::new(), || { + header_producer(&prev_hash, 1 + cht_size + cht_size) + }); + assert_eq!(raw_db.count(columns::HEADER), 2 + ucht_size + ucht_size); + assert_eq!(raw_db.count(columns::CHT), 0); + + // now finalize the block. + for i in (0..(ucht_size + ucht_size)).map(|i| i + 1) { + db.finalize_header(BlockId::Number(i as _)).unwrap(); + } + db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); + (raw_db, db) + } + + // when headers are created without changes tries roots + let (raw_db, db) = insert_headers(default_header); + let cht_size: u64 = cht::size(); + assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); + assert_eq!( + raw_db.count(columns::KEY_LOOKUP), + (2 * (1 + cht_size + 1)) as usize + ); + assert_eq!(raw_db.count(columns::CHT), 1); + assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); + assert!(db + .header_cht_root(cht_size, cht_size / 2) + .unwrap() + .is_some()); + assert!(db + .header_cht_root(cht_size, cht_size + cht_size / 2) + .unwrap() + .is_none()); + assert!(db.changes_trie_cht_root(cht_size, cht_size / 2).is_err()); + assert!(db + .changes_trie_cht_root(cht_size, cht_size + cht_size / 2) + .unwrap() + .is_none()); + + // when headers are created with changes tries roots + let (raw_db, db) = insert_headers(header_with_changes_trie); + assert_eq!(raw_db.count(columns::HEADER), (1 + cht_size + 1) as usize); + assert_eq!(raw_db.count(columns::CHT), 2); + assert!((0..cht_size as _).all(|i| db.header(BlockId::Number(1 + i)).unwrap().is_none())); + assert!(db + .header_cht_root(cht_size, cht_size / 2) + .unwrap() + .is_some()); + assert!(db + .header_cht_root(cht_size, cht_size + cht_size / 2) + .unwrap() + .is_none()); + assert!(db + .changes_trie_cht_root(cht_size, cht_size / 2) + .unwrap() + .is_some()); + assert!(db + .changes_trie_cht_root(cht_size, cht_size + cht_size / 2) + .unwrap() + .is_none()); + } + + #[test] + fn get_cht_fails_for_genesis_block() { + assert!(LightStorage::::new_test() + .header_cht_root(cht::size(), 0) + .is_err()); + } + + #[test] + fn get_cht_fails_for_non_existent_cht() { + let cht_size: u64 = cht::size(); + assert!(LightStorage::::new_test() + .header_cht_root(cht_size, cht_size / 2) + .unwrap() + .is_none()); + } + + #[test] + fn get_cht_works() { + let db = LightStorage::new_test(); + + // insert 1 + SIZE + SIZE + 1 blocks so that CHT#0 is created + let mut prev_hash = insert_final_block(&db, HashMap::new(), || { + header_with_changes_trie(&Default::default(), 0) + }); + let cht_size: u64 = cht::size(); + let ucht_size: usize = cht_size as _; + for i in 1..1 + ucht_size + ucht_size + 1 { + prev_hash = insert_block(&db, HashMap::new(), || { + header_with_changes_trie(&prev_hash, i as u64) + }); + db.finalize_header(BlockId::Hash(prev_hash)).unwrap(); + } + + let cht_root_1 = db + .header_cht_root(cht_size, cht::start_number(cht_size, 0)) + .unwrap() + .unwrap(); + let cht_root_2 = db + .header_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap() + .unwrap(); + let cht_root_3 = db + .header_cht_root(cht_size, cht::end_number(cht_size, 0)) + .unwrap() + .unwrap(); + assert_eq!(cht_root_1, cht_root_2); + assert_eq!(cht_root_2, cht_root_3); + + let cht_root_1 = db + .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0)) + .unwrap() + .unwrap(); + let cht_root_2 = db + .changes_trie_cht_root(cht_size, cht::start_number(cht_size, 0) + cht_size / 2) + .unwrap() + .unwrap(); + let cht_root_3 = db + .changes_trie_cht_root(cht_size, cht::end_number(cht_size, 0)) + .unwrap() + .unwrap(); + assert_eq!(cht_root_1, cht_root_2); + assert_eq!(cht_root_2, cht_root_3); + } + + #[test] + fn tree_route_works() { + let db = LightStorage::new_test(); + let block0 = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + + // fork from genesis: 3 prong. + let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); + let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); + let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); + + // fork from genesis: 2 prong. + let b1 = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) + }); + let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); + + { + let tree_route = tree_route(&db, a3, b2).unwrap(); + + assert_eq!(tree_route.common_block().hash, block0); + assert_eq!( + tree_route + .retracted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a3, a2, a1] + ); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![b1, b2] + ); + } + + { + let tree_route = tree_route(&db, a1, a3).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert!(tree_route.retracted().is_empty()); + assert_eq!( + tree_route + .enacted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a2, a3] + ); + } + + { + let tree_route = tree_route(&db, a3, a1).unwrap(); + + assert_eq!(tree_route.common_block().hash, a1); + assert_eq!( + tree_route + .retracted() + .iter() + .map(|r| r.hash) + .collect::>(), + vec![a3, a2] + ); + assert!(tree_route.enacted().is_empty()); + } + + { + let tree_route = tree_route(&db, a2, a2).unwrap(); + + assert_eq!(tree_route.common_block().hash, a2); + assert!(tree_route.retracted().is_empty()); + assert!(tree_route.enacted().is_empty()); + } + } + + #[test] + fn lowest_common_ancestor_works() { + let db = LightStorage::new_test(); + let block0 = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + + // fork from genesis: 3 prong. + let a1 = insert_block(&db, HashMap::new(), || default_header(&block0, 1)); + let a2 = insert_block(&db, HashMap::new(), || default_header(&a1, 2)); + let a3 = insert_block(&db, HashMap::new(), || default_header(&a2, 3)); + + // fork from genesis: 2 prong. + let b1 = insert_block(&db, HashMap::new(), || { + header_with_extrinsics_root(&block0, 1, Hash::from([1; 32])) + }); + let b2 = insert_block(&db, HashMap::new(), || default_header(&b1, 2)); + + { + let lca = lowest_common_ancestor(&db, a3, b2).unwrap(); + + assert_eq!(lca.hash, block0); + assert_eq!(lca.number, 0); + } + + { + let lca = lowest_common_ancestor(&db, a1, a3).unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor(&db, a3, a1).unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor(&db, a2, a3).unwrap(); + + assert_eq!(lca.hash, a2); + assert_eq!(lca.number, 2); + } + + { + let lca = lowest_common_ancestor(&db, a2, a1).unwrap(); + + assert_eq!(lca.hash, a1); + assert_eq!(lca.number, 1); + } + + { + let lca = lowest_common_ancestor(&db, a2, a2).unwrap(); + + assert_eq!(lca.hash, a2); + assert_eq!(lca.number, 2); + } + } + + #[test] + fn authorities_are_cached() { + let db = LightStorage::new_test(); + + fn run_checks( + db: &LightStorage, + max: u64, + checks: &[(u64, Option>)], + ) { + for (at, expected) in checks.iter().take_while(|(at, _)| *at <= max) { + let actual = authorities(db.cache(), BlockId::Number(*at)); + assert_eq!(*expected, actual); + } + } + + fn same_authorities() -> HashMap> { + HashMap::new() + } + + fn make_authorities( + authorities: Vec, + ) -> HashMap> { + let mut map = HashMap::new(); + map.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); + map + } + + fn authorities( + cache: &dyn BlockchainCache, + at: BlockId, + ) -> Option> { + cache + .get_at(&well_known_cache_keys::AUTHORITIES, &at) + .unwrap_or(None) + .and_then(|(_, _, val)| Decode::decode(&mut &val[..]).ok()) + } + + let auth1 = || AuthorityId::from_raw([1u8; 32]); + let auth2 = || AuthorityId::from_raw([2u8; 32]); + let auth3 = || AuthorityId::from_raw([3u8; 32]); + let auth4 = || AuthorityId::from_raw([4u8; 32]); + let auth5 = || AuthorityId::from_raw([5u8; 32]); + let auth6 = || AuthorityId::from_raw([6u8; 32]); + + let (hash2, hash6) = { + // first few blocks are instantly finalized + // B0(None) -> B1(None) -> B2(1) -> B3(1) -> B4(1, 2) -> B5(1, 2) -> B6(1, 2) + let checks = vec![ + (0, None), + (1, None), + (2, Some(vec![auth1()])), + (3, Some(vec![auth1()])), + (4, Some(vec![auth1(), auth2()])), + (5, Some(vec![auth1(), auth2()])), + (6, Some(vec![auth1(), auth2()])), + ]; + + let hash0 = insert_final_block(&db, same_authorities(), || { + default_header(&Default::default(), 0) + }); + run_checks(&db, 0, &checks); + let hash1 = insert_final_block(&db, same_authorities(), || default_header(&hash0, 1)); + run_checks(&db, 1, &checks); + let hash2 = insert_final_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash1, 2) + }); + run_checks(&db, 2, &checks); + let hash3 = insert_final_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash2, 3) + }); + run_checks(&db, 3, &checks); + let hash4 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash3, 4) + }); + run_checks(&db, 4, &checks); + let hash5 = insert_final_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash4, 5) + }); + run_checks(&db, 5, &checks); + let hash6 = insert_final_block(&db, same_authorities(), || default_header(&hash5, 6)); + run_checks(&db, 6, &checks); + + (hash2, hash6) + }; + + { + // some older non-best blocks are inserted + // ... -> B2(1) -> B2_1(1) -> B2_2(2) + // => the cache ignores all writes before best finalized block + let hash2_1 = insert_non_best_block(&db, make_authorities(vec![auth1()]), || { + default_header(&hash2, 3) + }); + assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_1))); + let hash2_2 = + insert_non_best_block(&db, make_authorities(vec![auth1(), auth2()]), || { + default_header(&hash2_1, 4) + }); + assert_eq!(None, authorities(db.cache(), BlockId::Hash(hash2_2))); + } + + let (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) = { + // inserting non-finalized blocks + // B6(None) -> B7(3) -> B8(3) + // \> B6_1(4) -> B6_2(4) + // \> B6_1_1(5) + // \> B6_1_2(6) -> B6_1_3(7) + + let hash7 = insert_block(&db, make_authorities(vec![auth3()]), || { + default_header(&hash6, 7) + }); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + let hash8 = insert_block(&db, make_authorities(vec![auth3()]), || { + default_header(&hash7, 8) + }); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + let hash6_1 = insert_block(&db, make_authorities(vec![auth4()]), || { + default_header(&hash6, 7) + }); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + let hash6_1_1 = insert_non_best_block(&db, make_authorities(vec![auth5()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1_1)), + Some(vec![auth5()]) + ); + let hash6_1_2 = insert_non_best_block(&db, make_authorities(vec![auth6()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1_1)), + Some(vec![auth5()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1_2)), + Some(vec![auth6()]) + ); + let hash6_2 = insert_block(&db, make_authorities(vec![auth4()]), || { + default_header(&hash6_1, 8) + }); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash7)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash8)), + Some(vec![auth3()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1_1)), + Some(vec![auth5()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1_2)), + Some(vec![auth6()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_2)), + Some(vec![auth4()]) + ); + + (hash7, hash8, hash6_1, hash6_2, hash6_1_1, hash6_1_2) + }; + + { + // finalize block hash6_1 + db.finalize_header(BlockId::Hash(hash6_1)).unwrap(); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1_1)), + Some(vec![auth5()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1_2)), + Some(vec![auth6()]) + ); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_2)), + Some(vec![auth4()]) + ); + // finalize block hash6_2 + db.finalize_header(BlockId::Hash(hash6_2)).unwrap(); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6)), + Some(vec![auth1(), auth2()]), + ); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash7)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash8)), None); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_1)), + Some(vec![auth4()]) + ); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_1)), None); + assert_eq!(authorities(db.cache(), BlockId::Hash(hash6_1_2)), None); + assert_eq!( + authorities(db.cache(), BlockId::Hash(hash6_2)), + Some(vec![auth4()]) + ); + } + } + + #[test] + fn database_is_reopened() { + let db = LightStorage::new_test(); + let hash0 = insert_final_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!(db.info().best_hash, hash0); + assert_eq!( + db.header(BlockId::Hash(hash0)).unwrap().unwrap().hash(), + hash0 + ); + + let db = db.db; + let db = LightStorage::from_kvdb(db).unwrap(); + assert_eq!(db.info().best_hash, hash0); + assert_eq!( + db.header(BlockId::Hash::(hash0)) + .unwrap() + .unwrap() + .hash(), + hash0 + ); + } + + #[test] + fn aux_store_works() { + let db = LightStorage::::new_test(); + + // insert aux1 + aux2 using direct store access + db.insert_aux( + &[(&[1][..], &[101][..]), (&[2][..], &[102][..])], + ::std::iter::empty(), + ) + .unwrap(); + + // check aux values + assert_eq!(db.get_aux(&[1]).unwrap(), Some(vec![101])); + assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); + assert_eq!(db.get_aux(&[3]).unwrap(), None); + + // delete aux1 + insert aux3 using import operation + db.import_header( + default_header(&Default::default(), 0), + HashMap::new(), + NewBlockState::Best, + vec![(vec![3], Some(vec![103])), (vec![1], None)], + ) + .unwrap(); + + // check aux values + assert_eq!(db.get_aux(&[1]).unwrap(), None); + assert_eq!(db.get_aux(&[2]).unwrap(), Some(vec![102])); + assert_eq!(db.get_aux(&[3]).unwrap(), Some(vec![103])); + } + + #[test] + fn cache_can_be_initialized_after_genesis_inserted() { + let (genesis_hash, storage) = { + let db = LightStorage::::new_test(); + + // before cache is initialized => Err + assert!(db.cache().get_at(b"test", &BlockId::Number(0)).is_err()); + + // insert genesis block (no value for cache is provided) + let mut genesis_hash = None; + insert_block(&db, HashMap::new(), || { + let header = default_header(&Default::default(), 0); + genesis_hash = Some(header.hash()); + header + }); + + // after genesis is inserted => None + assert_eq!( + db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), + None + ); + + // initialize cache + db.cache().initialize(b"test", vec![42]).unwrap(); + + // after genesis is inserted + cache is initialized => Some + assert_eq!( + db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), + Some(((0, genesis_hash.unwrap()), None, vec![42])), + ); + + (genesis_hash, db.db) + }; + + // restart && check that after restart value is read from the cache + let db = + LightStorage::::from_kvdb(storage as Arc<_>).expect("failed to create test-db"); + assert_eq!( + db.cache().get_at(b"test", &BlockId::Number(0)).unwrap(), + Some(((0, genesis_hash.unwrap()), None, vec![42])), + ); + } + + #[test] + fn changes_trie_configuration_is_tracked_on_light_client() { + let db = LightStorage::::new_test(); + + let new_config = Some(ChangesTrieConfiguration::new(2, 2)); + + // insert block#0 && block#1 (no value for cache is provided) + let hash0 = insert_block(&db, HashMap::new(), || { + default_header(&Default::default(), 0) + }); + assert_eq!( + db.cache() + .get_at( + &well_known_cache_keys::CHANGES_TRIE_CONFIG, + &BlockId::Number(0) + ) + .unwrap() + .map(|(_, _, v)| ChangesTrieConfiguration::decode(&mut &v[..]).unwrap()), + None, + ); + + // insert configuration at block#1 (starts from block#2) + insert_block(&db, HashMap::new(), || { + let mut header = default_header(&hash0, 1); + header.digest_mut().push(DigestItem::ChangesTrieSignal( + ChangesTrieSignal::NewConfiguration(new_config.clone()), + )); + header + }); + assert_eq!( + db.cache() + .get_at( + &well_known_cache_keys::CHANGES_TRIE_CONFIG, + &BlockId::Number(1) + ) + .unwrap() + .map(|(_, _, v)| Option::::decode(&mut &v[..]).unwrap()), + Some(new_config), + ); + } } diff --git a/client/db/src/offchain.rs b/client/db/src/offchain.rs index 8c58d5f42c..3ec344edac 100644 --- a/client/db/src/offchain.rs +++ b/client/db/src/offchain.rs @@ -16,10 +16,7 @@ //! RocksDB-based offchain workers local storage. -use std::{ - collections::HashMap, - sync::Arc, -}; +use std::{collections::HashMap, sync::Arc}; use crate::{columns, Database, DbHash, Transaction}; use parking_lot::Mutex; @@ -27,115 +24,122 @@ use parking_lot::Mutex; /// Offchain local storage #[derive(Clone)] pub struct LocalStorage { - db: Arc>, - locks: Arc, Arc>>>>, + db: Arc>, + locks: Arc, Arc>>>>, } impl std::fmt::Debug for LocalStorage { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("LocalStorage") - .finish() - } + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_struct("LocalStorage").finish() + } } impl LocalStorage { - /// Create new offchain storage for tests (backed by memorydb) - #[cfg(any(test, feature = "test-helpers"))] - pub fn new_test() -> Self { - let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); - let db = sp_database::as_database(db); - Self::new(db as _) - } - - /// Create offchain local storage with given `KeyValueDB` backend. - pub fn new(db: Arc>) -> Self { - Self { - db, - locks: Default::default(), - } - } + /// Create new offchain storage for tests (backed by memorydb) + #[cfg(any(test, feature = "test-helpers"))] + pub fn new_test() -> Self { + let db = kvdb_memorydb::create(crate::utils::NUM_COLUMNS); + let db = sp_database::as_database(db); + Self::new(db as _) + } + + /// Create offchain local storage with given `KeyValueDB` backend. + pub fn new(db: Arc>) -> Self { + Self { + db, + locks: Default::default(), + } + } } impl sp_core::offchain::OffchainStorage for LocalStorage { - fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - let key: Vec = prefix.iter().chain(key).cloned().collect(); - let mut tx = Transaction::new(); - tx.set(columns::OFFCHAIN, &key, value); - - self.db.commit(tx); - } - - fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { - let key: Vec = prefix.iter().chain(key).cloned().collect(); - self.db.get(columns::OFFCHAIN, &key) - } - - fn compare_and_set( - &mut self, - prefix: &[u8], - item_key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - let key: Vec = prefix.iter().chain(item_key).cloned().collect(); - let key_lock = { - let mut locks = self.locks.lock(); - locks.entry(key.clone()).or_default().clone() - }; - - let is_set; - { - let _key_guard = key_lock.lock(); - let val = self.db.get(columns::OFFCHAIN, &key); - is_set = val.as_ref().map(|x| &**x) == old_value; - - if is_set { - self.set(prefix, item_key, new_value) - } - } - - // clean the lock map if we're the only entry - let mut locks = self.locks.lock(); - { - drop(key_lock); - let key_lock = locks.get_mut(&key); - if let Some(_) = key_lock.and_then(Arc::get_mut) { - locks.remove(&key); - } - } - is_set - } + fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { + let key: Vec = prefix.iter().chain(key).cloned().collect(); + let mut tx = Transaction::new(); + tx.set(columns::OFFCHAIN, &key, value); + + self.db.commit(tx); + } + + fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { + let key: Vec = prefix.iter().chain(key).cloned().collect(); + self.db.get(columns::OFFCHAIN, &key) + } + + fn compare_and_set( + &mut self, + prefix: &[u8], + item_key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + let key: Vec = prefix.iter().chain(item_key).cloned().collect(); + let key_lock = { + let mut locks = self.locks.lock(); + locks.entry(key.clone()).or_default().clone() + }; + + let is_set; + { + let _key_guard = key_lock.lock(); + let val = self.db.get(columns::OFFCHAIN, &key); + is_set = val.as_ref().map(|x| &**x) == old_value; + + if is_set { + self.set(prefix, item_key, new_value) + } + } + + // clean the lock map if we're the only entry + let mut locks = self.locks.lock(); + { + drop(key_lock); + let key_lock = locks.get_mut(&key); + if let Some(_) = key_lock.and_then(Arc::get_mut) { + locks.remove(&key); + } + } + is_set + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::offchain::OffchainStorage; - - #[test] - fn should_compare_and_set_and_clear_the_locks_map() { - let mut storage = LocalStorage::new_test(); - let prefix = b"prefix"; - let key = b"key"; - let value = b"value"; - - storage.set(prefix, key, value); - assert_eq!(storage.get(prefix, key), Some(value.to_vec())); - - assert_eq!(storage.compare_and_set(prefix, key, Some(value), b"asd"), true); - assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec())); - assert!(storage.locks.lock().is_empty(), "Locks map should be empty!"); - } - - #[test] - fn should_compare_and_set_on_empty_field() { - let mut storage = LocalStorage::new_test(); - let prefix = b"prefix"; - let key = b"key"; - - assert_eq!(storage.compare_and_set(prefix, key, None, b"asd"), true); - assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec())); - assert!(storage.locks.lock().is_empty(), "Locks map should be empty!"); - } - + use super::*; + use sp_core::offchain::OffchainStorage; + + #[test] + fn should_compare_and_set_and_clear_the_locks_map() { + let mut storage = LocalStorage::new_test(); + let prefix = b"prefix"; + let key = b"key"; + let value = b"value"; + + storage.set(prefix, key, value); + assert_eq!(storage.get(prefix, key), Some(value.to_vec())); + + assert_eq!( + storage.compare_and_set(prefix, key, Some(value), b"asd"), + true + ); + assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec())); + assert!( + storage.locks.lock().is_empty(), + "Locks map should be empty!" + ); + } + + #[test] + fn should_compare_and_set_on_empty_field() { + let mut storage = LocalStorage::new_test(); + let prefix = b"prefix"; + let key = b"key"; + + assert_eq!(storage.compare_and_set(prefix, key, None, b"asd"), true); + assert_eq!(storage.get(prefix, key), Some(b"asd".to_vec())); + assert!( + storage.locks.lock().is_empty(), + "Locks map should be empty!" + ); + } } diff --git a/client/db/src/parity_db.rs b/client/db/src/parity_db.rs index a4e64d310b..d3643107a0 100644 --- a/client/db/src/parity_db.rs +++ b/client/db/src/parity_db.rs @@ -15,42 +15,45 @@ // along with Substrate. If not, see . /// A `Database` adapter for parity-db. - -use sp_database::{Database, Change, Transaction, ColumnId}; +use sp_database::{Change, ColumnId, Database, Transaction}; struct DbAdapter(parity_db::Db); fn handle_err(result: parity_db::Result) -> T { - match result { - Ok(r) => r, - Err(e) => { - panic!("Critical database eror: {:?}", e); - } - } + match result { + Ok(r) => r, + Err(e) => { + panic!("Critical database eror: {:?}", e); + } + } } /// Wrap RocksDb database into a trait object that implements `sp_database::Database` -pub fn open(path: &std::path::Path, num_columns: u32) -> parity_db::Result>> { - let db = parity_db::Db::with_columns(path, num_columns as u8)?; - Ok(std::sync::Arc::new(DbAdapter(db))) +pub fn open( + path: &std::path::Path, + num_columns: u32, +) -> parity_db::Result>> { + let db = parity_db::Db::with_columns(path, num_columns as u8)?; + Ok(std::sync::Arc::new(DbAdapter(db))) } impl Database for DbAdapter { - fn commit(&self, transaction: Transaction) { - handle_err(self.0.commit(transaction.0.into_iter().map(|change| - match change { - Change::Set(col, key, value) => (col as u8, key, Some(value)), - Change::Remove(col, key) => (col as u8, key, None), - _ => unimplemented!(), - })) - ); - } - - fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - handle_err(self.0.get(col as u8, key)) - } - - fn lookup(&self, _hash: &H) -> Option> { - unimplemented!(); - } + fn commit(&self, transaction: Transaction) { + handle_err( + self.0 + .commit(transaction.0.into_iter().map(|change| match change { + Change::Set(col, key, value) => (col as u8, key, Some(value)), + Change::Remove(col, key) => (col as u8, key, None), + _ => unimplemented!(), + })), + ); + } + + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + handle_err(self.0.get(col as u8, key)) + } + + fn lookup(&self, _hash: &H) -> Option> { + unimplemented!(); + } } diff --git a/client/db/src/stats.rs b/client/db/src/stats.rs index 8bc93b5b64..034062225e 100644 --- a/client/db/src/stats.rs +++ b/client/db/src/stats.rs @@ -20,119 +20,135 @@ use std::sync::atomic::{AtomicU64, Ordering as AtomicOrdering}; /// Accumulated usage statistics for state queries. pub struct StateUsageStats { - started: std::time::Instant, - reads: AtomicU64, - bytes_read: AtomicU64, - writes: AtomicU64, - bytes_written: AtomicU64, - writes_nodes: AtomicU64, - bytes_written_nodes: AtomicU64, - removed_nodes: AtomicU64, - bytes_removed_nodes: AtomicU64, - reads_cache: AtomicU64, - bytes_read_cache: AtomicU64, + started: std::time::Instant, + reads: AtomicU64, + bytes_read: AtomicU64, + writes: AtomicU64, + bytes_written: AtomicU64, + writes_nodes: AtomicU64, + bytes_written_nodes: AtomicU64, + removed_nodes: AtomicU64, + bytes_removed_nodes: AtomicU64, + reads_cache: AtomicU64, + bytes_read_cache: AtomicU64, } impl StateUsageStats { - /// New empty usage stats. - pub fn new() -> Self { - Self { - started: std::time::Instant::now(), - reads: 0.into(), - bytes_read: 0.into(), - writes: 0.into(), - bytes_written: 0.into(), - writes_nodes: 0.into(), - bytes_written_nodes: 0.into(), - removed_nodes: 0.into(), - bytes_removed_nodes: 0.into(), - reads_cache: 0.into(), - bytes_read_cache: 0.into(), - } - } - - /// Tally one read operation, of some length. - pub fn tally_read(&self, data_bytes: u64, cache: bool) { - self.reads.fetch_add(1, AtomicOrdering::Relaxed); - self.bytes_read.fetch_add(data_bytes, AtomicOrdering::Relaxed); - if cache { - self.reads_cache.fetch_add(1, AtomicOrdering::Relaxed); - self.bytes_read_cache.fetch_add(data_bytes, AtomicOrdering::Relaxed); - } - } - - /// Tally one key read. - pub fn tally_key_read(&self, key: &[u8], val: Option<&Vec>, cache: bool) { - self.tally_read(key.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0), cache); - } - - /// Tally one child key read. - pub fn tally_child_key_read( - &self, - key: &(Vec, Vec), - val: Option>, - cache: bool, - ) -> Option> { - let bytes = key.0.len() + key.1.len() + val.as_ref().map(|x| x.len()).unwrap_or(0); - self.tally_read(bytes as u64, cache); - val - } - - /// Tally some write trie nodes operations, including their byte count. - pub fn tally_writes_nodes(&self, ops: u64, data_bytes: u64) { - self.writes_nodes.fetch_add(ops, AtomicOrdering::Relaxed); - self.bytes_written_nodes.fetch_add(data_bytes, AtomicOrdering::Relaxed); - } - - /// Tally some removed trie nodes operations, including their byte count. - pub fn tally_removed_nodes(&self, ops: u64, data_bytes: u64) { - self.removed_nodes.fetch_add(ops, AtomicOrdering::Relaxed); - self.bytes_removed_nodes.fetch_add(data_bytes, AtomicOrdering::Relaxed); - } - - /// Tally some write trie nodes operations, including their byte count. - pub fn tally_writes(&self, ops: u64, data_bytes: u64) { - self.writes.fetch_add(ops, AtomicOrdering::Relaxed); - self.bytes_written.fetch_add(data_bytes, AtomicOrdering::Relaxed); - } - - /// Merge state machine usage info. - pub fn merge_sm(&self, info: sp_state_machine::UsageInfo) { - self.reads.fetch_add(info.reads.ops, AtomicOrdering::Relaxed); - self.bytes_read.fetch_add(info.reads.bytes, AtomicOrdering::Relaxed); - self.writes_nodes.fetch_add(info.nodes_writes.ops, AtomicOrdering::Relaxed); - self.bytes_written_nodes.fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed); - self.removed_nodes.fetch_add(info.removed_nodes.ops, AtomicOrdering::Relaxed); - self.bytes_removed_nodes.fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed); - self.reads_cache.fetch_add(info.cache_reads.ops, AtomicOrdering::Relaxed); - self.bytes_read_cache.fetch_add(info.cache_reads.bytes, AtomicOrdering::Relaxed); - } - - /// Returns the collected `UsageInfo` and resets the internal state. - pub fn take(&self) -> sp_state_machine::UsageInfo { - use sp_state_machine::UsageUnit; - - fn unit(ops: &AtomicU64, bytes: &AtomicU64) -> UsageUnit { - UsageUnit { - ops: ops.swap(0, AtomicOrdering::Relaxed), - bytes: bytes.swap(0, AtomicOrdering::Relaxed), - } - } - - sp_state_machine::UsageInfo { - reads: unit(&self.reads, &self.bytes_read), - writes: unit(&self.writes, &self.bytes_written), - nodes_writes: unit(&self.writes_nodes, &self.bytes_written_nodes), - removed_nodes: unit(&self.removed_nodes, &self.bytes_removed_nodes), - cache_reads: unit(&self.reads_cache, &self.bytes_read_cache), - modified_reads: Default::default(), - overlay_writes: Default::default(), - // TODO: Proper tracking state of memory footprint here requires - // imposing `MallocSizeOf` requirement on half of the codebase, - // so it is an open question how to do it better - memory: 0, - started: self.started, - span: self.started.elapsed(), - } - } + /// New empty usage stats. + pub fn new() -> Self { + Self { + started: std::time::Instant::now(), + reads: 0.into(), + bytes_read: 0.into(), + writes: 0.into(), + bytes_written: 0.into(), + writes_nodes: 0.into(), + bytes_written_nodes: 0.into(), + removed_nodes: 0.into(), + bytes_removed_nodes: 0.into(), + reads_cache: 0.into(), + bytes_read_cache: 0.into(), + } + } + + /// Tally one read operation, of some length. + pub fn tally_read(&self, data_bytes: u64, cache: bool) { + self.reads.fetch_add(1, AtomicOrdering::Relaxed); + self.bytes_read + .fetch_add(data_bytes, AtomicOrdering::Relaxed); + if cache { + self.reads_cache.fetch_add(1, AtomicOrdering::Relaxed); + self.bytes_read_cache + .fetch_add(data_bytes, AtomicOrdering::Relaxed); + } + } + + /// Tally one key read. + pub fn tally_key_read(&self, key: &[u8], val: Option<&Vec>, cache: bool) { + self.tally_read( + key.len() as u64 + val.as_ref().map(|x| x.len() as u64).unwrap_or(0), + cache, + ); + } + + /// Tally one child key read. + pub fn tally_child_key_read( + &self, + key: &(Vec, Vec), + val: Option>, + cache: bool, + ) -> Option> { + let bytes = key.0.len() + key.1.len() + val.as_ref().map(|x| x.len()).unwrap_or(0); + self.tally_read(bytes as u64, cache); + val + } + + /// Tally some write trie nodes operations, including their byte count. + pub fn tally_writes_nodes(&self, ops: u64, data_bytes: u64) { + self.writes_nodes.fetch_add(ops, AtomicOrdering::Relaxed); + self.bytes_written_nodes + .fetch_add(data_bytes, AtomicOrdering::Relaxed); + } + + /// Tally some removed trie nodes operations, including their byte count. + pub fn tally_removed_nodes(&self, ops: u64, data_bytes: u64) { + self.removed_nodes.fetch_add(ops, AtomicOrdering::Relaxed); + self.bytes_removed_nodes + .fetch_add(data_bytes, AtomicOrdering::Relaxed); + } + + /// Tally some write trie nodes operations, including their byte count. + pub fn tally_writes(&self, ops: u64, data_bytes: u64) { + self.writes.fetch_add(ops, AtomicOrdering::Relaxed); + self.bytes_written + .fetch_add(data_bytes, AtomicOrdering::Relaxed); + } + + /// Merge state machine usage info. + pub fn merge_sm(&self, info: sp_state_machine::UsageInfo) { + self.reads + .fetch_add(info.reads.ops, AtomicOrdering::Relaxed); + self.bytes_read + .fetch_add(info.reads.bytes, AtomicOrdering::Relaxed); + self.writes_nodes + .fetch_add(info.nodes_writes.ops, AtomicOrdering::Relaxed); + self.bytes_written_nodes + .fetch_add(info.nodes_writes.bytes, AtomicOrdering::Relaxed); + self.removed_nodes + .fetch_add(info.removed_nodes.ops, AtomicOrdering::Relaxed); + self.bytes_removed_nodes + .fetch_add(info.removed_nodes.bytes, AtomicOrdering::Relaxed); + self.reads_cache + .fetch_add(info.cache_reads.ops, AtomicOrdering::Relaxed); + self.bytes_read_cache + .fetch_add(info.cache_reads.bytes, AtomicOrdering::Relaxed); + } + + /// Returns the collected `UsageInfo` and resets the internal state. + pub fn take(&self) -> sp_state_machine::UsageInfo { + use sp_state_machine::UsageUnit; + + fn unit(ops: &AtomicU64, bytes: &AtomicU64) -> UsageUnit { + UsageUnit { + ops: ops.swap(0, AtomicOrdering::Relaxed), + bytes: bytes.swap(0, AtomicOrdering::Relaxed), + } + } + + sp_state_machine::UsageInfo { + reads: unit(&self.reads, &self.bytes_read), + writes: unit(&self.writes, &self.bytes_written), + nodes_writes: unit(&self.writes_nodes, &self.bytes_written_nodes), + removed_nodes: unit(&self.removed_nodes, &self.bytes_removed_nodes), + cache_reads: unit(&self.reads_cache, &self.bytes_read_cache), + modified_reads: Default::default(), + overlay_writes: Default::default(), + // TODO: Proper tracking state of memory footprint here requires + // imposing `MallocSizeOf` requirement on half of the codebase, + // so it is an open question how to do it better + memory: 0, + started: self.started, + span: self.started.elapsed(), + } + } } diff --git a/client/db/src/storage_cache.rs b/client/db/src/storage_cache.rs index 6326899263..ac162f8c97 100644 --- a/client/db/src/storage_cache.rs +++ b/client/db/src/storage_cache.rs @@ -16,21 +16,21 @@ //! Global cache state. -use std::collections::{VecDeque, HashSet, HashMap}; -use std::sync::Arc; -use std::hash::Hash as StdHash; -use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; -use linked_hash_map::{LinkedHashMap, Entry}; +use crate::{stats::StateUsageStats, utils::Meta}; use hash_db::Hasher; -use sp_runtime::traits::{Block as BlockT, Header, HashFor, NumberFor}; +use linked_hash_map::{Entry, LinkedHashMap}; +use log::trace; +use parking_lot::{Mutex, RwLock, RwLockUpgradableReadGuard}; use sp_core::hexdisplay::HexDisplay; use sp_core::storage::ChildInfo; +use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor}; use sp_state_machine::{ - backend::Backend as StateBackend, TrieBackend, StorageKey, StorageValue, - StorageCollection, ChildStorageCollection, + backend::Backend as StateBackend, ChildStorageCollection, StorageCollection, StorageKey, + StorageValue, TrieBackend, }; -use log::trace; -use crate::{utils::Meta, stats::StateUsageStats}; +use std::collections::{HashMap, HashSet, VecDeque}; +use std::hash::Hash as StdHash; +use std::sync::Arc; const STATE_CACHE_BLOCKS: usize = 12; @@ -38,15 +38,15 @@ type ChildStorageKey = (Vec, Vec); /// Shared canonical state cache. pub struct Cache { - /// Storage cache. `None` indicates that key is known to be missing. - lru_storage: LRUMap>, - /// Storage hashes cache. `None` indicates that key is known to be missing. - lru_hashes: LRUMap>, - /// Storage cache for child trie. `None` indicates that key is known to be missing. - lru_child_storage: LRUMap>, - /// Information on the modifications in recently committed blocks; specifically which keys - /// changed in which block. Ordered by block number. - modifications: VecDeque>, + /// Storage cache. `None` indicates that key is known to be missing. + lru_storage: LRUMap>, + /// Storage hashes cache. `None` indicates that key is known to be missing. + lru_hashes: LRUMap>, + /// Storage cache for child trie. `None` indicates that key is known to be missing. + lru_child_storage: LRUMap>, + /// Information on the modifications in recently committed blocks; specifically which keys + /// changed in which block. Ordered by block number. + modifications: VecDeque>, } struct LRUMap(LinkedHashMap, usize, usize); @@ -58,162 +58,166 @@ struct LRUMap(LinkedHashMap, usize, usize); /// detail trait. If it need to become public please /// consider using `malloc_size_of`. trait EstimateSize { - /// Return a size estimation of additional size needed - /// to cache this struct (in bytes). - fn estimate_size(&self) -> usize; + /// Return a size estimation of additional size needed + /// to cache this struct (in bytes). + fn estimate_size(&self) -> usize; } impl EstimateSize for Vec { - fn estimate_size(&self) -> usize { - self.capacity() - } + fn estimate_size(&self) -> usize { + self.capacity() + } } impl EstimateSize for Option> { - fn estimate_size(&self) -> usize { - self.as_ref().map(|v|v.capacity()).unwrap_or(0) - } + fn estimate_size(&self) -> usize { + self.as_ref().map(|v| v.capacity()).unwrap_or(0) + } } struct OptionHOut>(Option); impl> EstimateSize for OptionHOut { - fn estimate_size(&self) -> usize { - // capacity would be better - self.0.as_ref().map(|v|v.as_ref().len()).unwrap_or(0) - } + fn estimate_size(&self) -> usize { + // capacity would be better + self.0.as_ref().map(|v| v.as_ref().len()).unwrap_or(0) + } } impl EstimateSize for (T, T) { - fn estimate_size(&self) -> usize { - self.0.estimate_size() + self.1.estimate_size() - } + fn estimate_size(&self) -> usize { + self.0.estimate_size() + self.1.estimate_size() + } } impl LRUMap { - fn remove(&mut self, k: &K) { - let map = &mut self.0; - let storage_used_size = &mut self.1; - if let Some(v) = map.remove(k) { - *storage_used_size -= k.estimate_size(); - *storage_used_size -= v.estimate_size(); - } - } - - fn add(&mut self, k: K, v: V) { - let lmap = &mut self.0; - let storage_used_size = &mut self.1; - let limit = self.2; - let klen = k.estimate_size(); - *storage_used_size += v.estimate_size(); - // TODO assert k v size fit into limit?? to avoid insert remove? - match lmap.entry(k) { - Entry::Occupied(mut entry) => { - // note that in this case we are not running pure lru as - // it would require to remove first - *storage_used_size -= entry.get().estimate_size(); - entry.insert(v); - }, - Entry::Vacant(entry) => { - *storage_used_size += klen; - entry.insert(v); - }, - }; - - while *storage_used_size > limit { - if let Some((k,v)) = lmap.pop_front() { - *storage_used_size -= k.estimate_size(); - *storage_used_size -= v.estimate_size(); - } else { - // can happen fairly often as we get value from multiple lru - // and only remove from a single lru - break; - } - } - } - - fn get(&mut self, k: &Q) -> Option<&mut V> - where K: std::borrow::Borrow, - Q: StdHash + Eq { - self.0.get_refresh(k) - } - - fn used_size(&self) -> usize { - self.1 - } - fn clear(&mut self) { - self.0.clear(); - self.1 = 0; - } - + fn remove(&mut self, k: &K) { + let map = &mut self.0; + let storage_used_size = &mut self.1; + if let Some(v) = map.remove(k) { + *storage_used_size -= k.estimate_size(); + *storage_used_size -= v.estimate_size(); + } + } + + fn add(&mut self, k: K, v: V) { + let lmap = &mut self.0; + let storage_used_size = &mut self.1; + let limit = self.2; + let klen = k.estimate_size(); + *storage_used_size += v.estimate_size(); + // TODO assert k v size fit into limit?? to avoid insert remove? + match lmap.entry(k) { + Entry::Occupied(mut entry) => { + // note that in this case we are not running pure lru as + // it would require to remove first + *storage_used_size -= entry.get().estimate_size(); + entry.insert(v); + } + Entry::Vacant(entry) => { + *storage_used_size += klen; + entry.insert(v); + } + }; + + while *storage_used_size > limit { + if let Some((k, v)) = lmap.pop_front() { + *storage_used_size -= k.estimate_size(); + *storage_used_size -= v.estimate_size(); + } else { + // can happen fairly often as we get value from multiple lru + // and only remove from a single lru + break; + } + } + } + + fn get(&mut self, k: &Q) -> Option<&mut V> + where + K: std::borrow::Borrow, + Q: StdHash + Eq, + { + self.0.get_refresh(k) + } + + fn used_size(&self) -> usize { + self.1 + } + fn clear(&mut self) { + self.0.clear(); + self.1 = 0; + } } impl Cache { - /// Returns the used memory size of the storage cache in bytes. - pub fn used_storage_cache_size(&self) -> usize { - self.lru_storage.used_size() - + self.lru_child_storage.used_size() - // ignore small hashes storage and self.lru_hashes.used_size() - } - - /// Synchronize the shared cache with the best block state. - /// - /// This function updates the shared cache by removing entries - /// that are invalidated by chain reorganization. It should be called - /// externally when chain reorg happens without importing a new block. - pub fn sync(&mut self, enacted: &[B::Hash], retracted: &[B::Hash]) { - trace!("Syncing shared cache, enacted = {:?}, retracted = {:?}", enacted, retracted); - - // Purge changes from re-enacted and retracted blocks. - let mut clear = false; - for block in enacted { - clear = clear || { - if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) { - trace!("Reverting enacted block {:?}", block); - m.is_canon = true; - for a in &m.storage { - trace!("Reverting enacted key {:?}", HexDisplay::from(a)); - self.lru_storage.remove(a); - } - for a in &m.child_storage { - trace!("Reverting enacted child key {:?}", a); - self.lru_child_storage.remove(a); - } - false - } else { - true - } - }; - } - - for block in retracted { - clear = clear || { - if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) { - trace!("Retracting block {:?}", block); - m.is_canon = false; - for a in &m.storage { - trace!("Retracted key {:?}", HexDisplay::from(a)); - self.lru_storage.remove(a); - } - for a in &m.child_storage { - trace!("Retracted child key {:?}", a); - self.lru_child_storage.remove(a); - } - false - } else { - true - } - }; - } - if clear { - // We don't know anything about the block; clear everything - trace!("Wiping cache"); - self.lru_storage.clear(); - self.lru_child_storage.clear(); - self.lru_hashes.clear(); - self.modifications.clear(); - } - } + /// Returns the used memory size of the storage cache in bytes. + pub fn used_storage_cache_size(&self) -> usize { + self.lru_storage.used_size() + self.lru_child_storage.used_size() + // ignore small hashes storage and self.lru_hashes.used_size() + } + + /// Synchronize the shared cache with the best block state. + /// + /// This function updates the shared cache by removing entries + /// that are invalidated by chain reorganization. It should be called + /// externally when chain reorg happens without importing a new block. + pub fn sync(&mut self, enacted: &[B::Hash], retracted: &[B::Hash]) { + trace!( + "Syncing shared cache, enacted = {:?}, retracted = {:?}", + enacted, + retracted + ); + + // Purge changes from re-enacted and retracted blocks. + let mut clear = false; + for block in enacted { + clear = clear || { + if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) { + trace!("Reverting enacted block {:?}", block); + m.is_canon = true; + for a in &m.storage { + trace!("Reverting enacted key {:?}", HexDisplay::from(a)); + self.lru_storage.remove(a); + } + for a in &m.child_storage { + trace!("Reverting enacted child key {:?}", a); + self.lru_child_storage.remove(a); + } + false + } else { + true + } + }; + } + + for block in retracted { + clear = clear || { + if let Some(m) = self.modifications.iter_mut().find(|m| &m.hash == block) { + trace!("Retracting block {:?}", block); + m.is_canon = false; + for a in &m.storage { + trace!("Retracted key {:?}", HexDisplay::from(a)); + self.lru_storage.remove(a); + } + for a in &m.child_storage { + trace!("Retracted child key {:?}", a); + self.lru_child_storage.remove(a); + } + false + } else { + true + } + }; + } + if clear { + // We don't know anything about the block; clear everything + trace!("Wiping cache"); + self.lru_storage.clear(); + self.lru_child_storage.clear(); + self.lru_hashes.clear(); + self.modifications.clear(); + } + } } pub type SharedCache = Arc>>; @@ -223,68 +227,68 @@ const FIX_LRU_HASH_SIZE: usize = 65_536; /// Create a new shared cache instance with given max memory usage. pub fn new_shared_cache( - shared_cache_size: usize, - child_ratio: (usize, usize), + shared_cache_size: usize, + child_ratio: (usize, usize), ) -> SharedCache { - let top = child_ratio.1.saturating_sub(child_ratio.0); - Arc::new( - Mutex::new( - Cache { - lru_storage: LRUMap( - LinkedHashMap::new(), 0, shared_cache_size * top / child_ratio.1 - ), - lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE), - lru_child_storage: LRUMap( - LinkedHashMap::new(), 0, shared_cache_size * child_ratio.0 / child_ratio.1 - ), - modifications: VecDeque::new(), - } - ) - ) + let top = child_ratio.1.saturating_sub(child_ratio.0); + Arc::new(Mutex::new(Cache { + lru_storage: LRUMap( + LinkedHashMap::new(), + 0, + shared_cache_size * top / child_ratio.1, + ), + lru_hashes: LRUMap(LinkedHashMap::new(), 0, FIX_LRU_HASH_SIZE), + lru_child_storage: LRUMap( + LinkedHashMap::new(), + 0, + shared_cache_size * child_ratio.0 / child_ratio.1, + ), + modifications: VecDeque::new(), + })) } #[derive(Debug)] /// Accumulates a list of storage changed in a block. struct BlockChanges { - /// Block number. - number: B::Number, - /// Block hash. - hash: B::Hash, - /// Parent block hash. - parent: B::Hash, - /// A set of modified storage keys. - storage: HashSet, - /// A set of modified child storage keys. - child_storage: HashSet, - /// Block is part of the canonical chain. - is_canon: bool, + /// Block number. + number: B::Number, + /// Block hash. + hash: B::Hash, + /// Parent block hash. + parent: B::Hash, + /// A set of modified storage keys. + storage: HashSet, + /// A set of modified child storage keys. + child_storage: HashSet, + /// Block is part of the canonical chain. + is_canon: bool, } /// Cached values specific to a state. struct LocalCache { - /// Storage cache. - /// - /// `None` indicates that key is known to be missing. - storage: HashMap>, - /// Storage hashes cache. - /// - /// `None` indicates that key is known to be missing. - hashes: HashMap>, - /// Child storage cache. - /// - /// `None` indicates that key is known to be missing. - child_storage: HashMap>, + /// Storage cache. + /// + /// `None` indicates that key is known to be missing. + storage: HashMap>, + /// Storage hashes cache. + /// + /// `None` indicates that key is known to be missing. + hashes: HashMap>, + /// Child storage cache. + /// + /// `None` indicates that key is known to be missing. + child_storage: HashMap>, } /// Cache changes. pub struct CacheChanges { - /// Shared canonical state cache. - shared_cache: SharedCache, - /// Local cache of values for this state. - local_cache: RwLock>>, - /// Hash of the block on top of which this instance was created or - /// `None` if cache is disabled - pub parent_hash: Option, + /// Shared canonical state cache. + shared_cache: SharedCache, + /// Local cache of values for this state. + local_cache: RwLock>>, + /// Hash of the block on top of which this instance was created or + /// `None` if cache is disabled + pub parent_hash: Option, } /// State cache abstraction. @@ -297,1590 +301,1791 @@ pub struct CacheChanges { /// in `sync_cache` along with the change overlay. /// For non-canonical clones local cache and changes are dropped. pub struct CachingState { - /// Usage statistics - usage: StateUsageStats, - /// State machine registered stats - overlay_stats: sp_state_machine::StateMachineStats, - /// Backing state. - state: S, - /// Cache data. - cache: CacheChanges, + /// Usage statistics + usage: StateUsageStats, + /// State machine registered stats + overlay_stats: sp_state_machine::StateMachineStats, + /// Backing state. + state: S, + /// Cache data. + cache: CacheChanges, } impl std::fmt::Debug for CachingState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "Block {:?}", self.cache.parent_hash) - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "Block {:?}", self.cache.parent_hash) + } } impl CacheChanges { - /// Propagate local cache into the shared cache and synchronize - /// the shared cache with the best block state. - /// - /// This function updates the shared cache by removing entries - /// that are invalidated by chain reorganization. `sync_cache` - /// should be called after the block has been committed and the - /// blockchain route has been calculated. - pub fn sync_cache( - &mut self, - enacted: &[B::Hash], - retracted: &[B::Hash], - changes: StorageCollection, - child_changes: ChildStorageCollection, - commit_hash: Option, - commit_number: Option>, - is_best: bool, - ) { - let mut cache = self.shared_cache.lock(); - trace!( - "Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}", - commit_number, - commit_hash, - self.parent_hash, - is_best, - ); - let cache = &mut *cache; - // Filter out committing block if any. - let enacted: Vec<_> = enacted - .iter() - .filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p)) - .cloned() - .collect(); - cache.sync(&enacted, retracted); - // Propagate cache only if committing on top of the latest canonical state - // blocks are ordered by number and only one block with a given number is marked as canonical - // (contributed to canonical state cache) - if let Some(_) = self.parent_hash { - let mut local_cache = self.local_cache.write(); - if is_best { - trace!( + /// Propagate local cache into the shared cache and synchronize + /// the shared cache with the best block state. + /// + /// This function updates the shared cache by removing entries + /// that are invalidated by chain reorganization. `sync_cache` + /// should be called after the block has been committed and the + /// blockchain route has been calculated. + pub fn sync_cache( + &mut self, + enacted: &[B::Hash], + retracted: &[B::Hash], + changes: StorageCollection, + child_changes: ChildStorageCollection, + commit_hash: Option, + commit_number: Option>, + is_best: bool, + ) { + let mut cache = self.shared_cache.lock(); + trace!( + "Syncing cache, id = (#{:?}, {:?}), parent={:?}, best={}", + commit_number, + commit_hash, + self.parent_hash, + is_best, + ); + let cache = &mut *cache; + // Filter out committing block if any. + let enacted: Vec<_> = enacted + .iter() + .filter(|h| commit_hash.as_ref().map_or(true, |p| *h != p)) + .cloned() + .collect(); + cache.sync(&enacted, retracted); + // Propagate cache only if committing on top of the latest canonical state + // blocks are ordered by number and only one block with a given number is marked as canonical + // (contributed to canonical state cache) + if let Some(_) = self.parent_hash { + let mut local_cache = self.local_cache.write(); + if is_best { + trace!( "Committing {} local, {} hashes, {} modified root entries, {} modified child entries", local_cache.storage.len(), local_cache.hashes.len(), changes.len(), child_changes.iter().map(|v|v.1.len()).sum::(), ); - for (k, v) in local_cache.storage.drain() { - cache.lru_storage.add(k, v); - } - for (k, v) in local_cache.child_storage.drain() { - cache.lru_child_storage.add(k, v); - } - for (k, v) in local_cache.hashes.drain() { - cache.lru_hashes.add(k, OptionHOut(v)); - } - } - } - - if let ( - Some(ref number), Some(ref hash), Some(ref parent)) - = (commit_number, commit_hash, self.parent_hash) - { - if cache.modifications.len() == STATE_CACHE_BLOCKS { - cache.modifications.pop_back(); - } - let mut modifications = HashSet::new(); - let mut child_modifications = HashSet::new(); - child_changes.into_iter().for_each(|(sk, changes)| - for (k, v) in changes.into_iter() { - let k = (sk.clone(), k); - if is_best { - cache.lru_child_storage.add(k.clone(), v); - } - child_modifications.insert(k); - } - ); - for (k, v) in changes.into_iter() { - if is_best { - cache.lru_hashes.remove(&k); - cache.lru_storage.add(k.clone(), v); - } - modifications.insert(k); - } - - // Save modified storage. These are ordered by the block number in reverse. - let block_changes = BlockChanges { - storage: modifications, - child_storage: child_modifications, - number: *number, - hash: hash.clone(), - is_canon: is_best, - parent: parent.clone(), - }; - let insert_at = cache.modifications.iter() - .enumerate() - .find(|(_, m)| m.number < *number) - .map(|(i, _)| i); - trace!("Inserting modifications at {:?}", insert_at); - if let Some(insert_at) = insert_at { - cache.modifications.insert(insert_at, block_changes); - } else { - cache.modifications.push_back(block_changes); - } - } - } + for (k, v) in local_cache.storage.drain() { + cache.lru_storage.add(k, v); + } + for (k, v) in local_cache.child_storage.drain() { + cache.lru_child_storage.add(k, v); + } + for (k, v) in local_cache.hashes.drain() { + cache.lru_hashes.add(k, OptionHOut(v)); + } + } + } + + if let (Some(ref number), Some(ref hash), Some(ref parent)) = + (commit_number, commit_hash, self.parent_hash) + { + if cache.modifications.len() == STATE_CACHE_BLOCKS { + cache.modifications.pop_back(); + } + let mut modifications = HashSet::new(); + let mut child_modifications = HashSet::new(); + child_changes.into_iter().for_each(|(sk, changes)| { + for (k, v) in changes.into_iter() { + let k = (sk.clone(), k); + if is_best { + cache.lru_child_storage.add(k.clone(), v); + } + child_modifications.insert(k); + } + }); + for (k, v) in changes.into_iter() { + if is_best { + cache.lru_hashes.remove(&k); + cache.lru_storage.add(k.clone(), v); + } + modifications.insert(k); + } + + // Save modified storage. These are ordered by the block number in reverse. + let block_changes = BlockChanges { + storage: modifications, + child_storage: child_modifications, + number: *number, + hash: hash.clone(), + is_canon: is_best, + parent: parent.clone(), + }; + let insert_at = cache + .modifications + .iter() + .enumerate() + .find(|(_, m)| m.number < *number) + .map(|(i, _)| i); + trace!("Inserting modifications at {:?}", insert_at); + if let Some(insert_at) = insert_at { + cache.modifications.insert(insert_at, block_changes); + } else { + cache.modifications.push_back(block_changes); + } + } + } } impl>, B: BlockT> CachingState { - /// Create a new instance wrapping generic State and shared cache. - pub(crate) fn new( - state: S, - shared_cache: SharedCache, - parent_hash: Option, - ) -> Self { - CachingState { - usage: StateUsageStats::new(), - overlay_stats: sp_state_machine::StateMachineStats::default(), - state, - cache: CacheChanges { - shared_cache, - local_cache: RwLock::new(LocalCache { - storage: Default::default(), - hashes: Default::default(), - child_storage: Default::default(), - }), - parent_hash, - }, - } - } - - /// Check if the key can be returned from cache by matching current block parent hash against canonical - /// state and filtering out entries modified in later blocks. - fn is_allowed( - key: Option<&[u8]>, - child_key: Option<&ChildStorageKey>, - parent_hash: &Option, - modifications: &VecDeque> - ) -> bool { - let mut parent = match *parent_hash { - None => { - trace!("Cache lookup skipped for {:?}: no parent hash", key.as_ref().map(HexDisplay::from)); - return false; - } - Some(ref parent) => parent, - }; - // Ignore all storage entries modified in later blocks. - // Modifications contains block ordered by the number - // We search for our parent in that list first and then for - // all its parents until we hit the canonical block, - // checking against all the intermediate modifications. - for m in modifications { - if &m.hash == parent { - if m.is_canon { - return true; - } - parent = &m.parent; - } - if let Some(key) = key { - if m.storage.contains(key) { - trace!("Cache lookup skipped for {:?}: modified in a later block", HexDisplay::from(&key)); - return false; - } - } - if let Some(child_key) = child_key { - if m.child_storage.contains(child_key) { - trace!("Cache lookup skipped for {:?}: modified in a later block", child_key); - return false; - } - } - } - trace!( - "Cache lookup skipped for {:?}: parent hash is unknown", - key.as_ref().map(HexDisplay::from), - ); - false - } + /// Create a new instance wrapping generic State and shared cache. + pub(crate) fn new( + state: S, + shared_cache: SharedCache, + parent_hash: Option, + ) -> Self { + CachingState { + usage: StateUsageStats::new(), + overlay_stats: sp_state_machine::StateMachineStats::default(), + state, + cache: CacheChanges { + shared_cache, + local_cache: RwLock::new(LocalCache { + storage: Default::default(), + hashes: Default::default(), + child_storage: Default::default(), + }), + parent_hash, + }, + } + } + + /// Check if the key can be returned from cache by matching current block parent hash against canonical + /// state and filtering out entries modified in later blocks. + fn is_allowed( + key: Option<&[u8]>, + child_key: Option<&ChildStorageKey>, + parent_hash: &Option, + modifications: &VecDeque>, + ) -> bool { + let mut parent = match *parent_hash { + None => { + trace!( + "Cache lookup skipped for {:?}: no parent hash", + key.as_ref().map(HexDisplay::from) + ); + return false; + } + Some(ref parent) => parent, + }; + // Ignore all storage entries modified in later blocks. + // Modifications contains block ordered by the number + // We search for our parent in that list first and then for + // all its parents until we hit the canonical block, + // checking against all the intermediate modifications. + for m in modifications { + if &m.hash == parent { + if m.is_canon { + return true; + } + parent = &m.parent; + } + if let Some(key) = key { + if m.storage.contains(key) { + trace!( + "Cache lookup skipped for {:?}: modified in a later block", + HexDisplay::from(&key) + ); + return false; + } + } + if let Some(child_key) = child_key { + if m.child_storage.contains(child_key) { + trace!( + "Cache lookup skipped for {:?}: modified in a later block", + child_key + ); + return false; + } + } + } + trace!( + "Cache lookup skipped for {:?}: parent hash is unknown", + key.as_ref().map(HexDisplay::from), + ); + false + } } impl>, B: BlockT> StateBackend> for CachingState { - type Error = S::Error; - type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - let local_cache = self.cache.local_cache.upgradable_read(); - // Note that local cache makes that lru is not refreshed - if let Some(entry) = local_cache.storage.get(key).cloned() { - trace!("Found in local cache: {:?}", HexDisplay::from(&key)); - self.usage.tally_key_read(key, entry.as_ref(), true); - - return Ok(entry) - } - let mut cache = self.cache.shared_cache.lock(); - if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) { - if let Some(entry) = cache.lru_storage.get(key).map(|a| a.clone()) { - trace!("Found in shared cache: {:?}", HexDisplay::from(&key)); - self.usage.tally_key_read(key, entry.as_ref(), true); - return Ok(entry) - } - } - trace!("Cache miss: {:?}", HexDisplay::from(&key)); - let value = self.state.storage(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache).storage.insert(key.to_vec(), value.clone()); - self.usage.tally_key_read(key, value.as_ref(), false); - Ok(value) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - let local_cache = self.cache.local_cache.upgradable_read(); - if let Some(entry) = local_cache.hashes.get(key).cloned() { - trace!("Found hash in local cache: {:?}", HexDisplay::from(&key)); - return Ok(entry) - } - let mut cache = self.cache.shared_cache.lock(); - if Self::is_allowed(Some(key), None, &self.cache.parent_hash, &cache.modifications) { - if let Some(entry) = cache.lru_hashes.get(key).map(|a| a.0.clone()) { - trace!("Found hash in shared cache: {:?}", HexDisplay::from(&key)); - return Ok(entry) - } - } - trace!("Cache hash miss: {:?}", HexDisplay::from(&key)); - let hash = self.state.storage_hash(key)?; - RwLockUpgradableReadGuard::upgrade(local_cache).hashes.insert(key.to_vec(), hash); - Ok(hash) - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - let key = (storage_key.to_vec(), key.to_vec()); - let local_cache = self.cache.local_cache.upgradable_read(); - if let Some(entry) = local_cache.child_storage.get(&key).cloned() { - trace!("Found in local cache: {:?}", key); - return Ok( - self.usage.tally_child_key_read(&key, entry, true) - ) - } - let mut cache = self.cache.shared_cache.lock(); - if Self::is_allowed(None, Some(&key), &self.cache.parent_hash, &cache.modifications) { - if let Some(entry) = cache.lru_child_storage.get(&key).map(|a| a.clone()) { - trace!("Found in shared cache: {:?}", key); - return Ok( - self.usage.tally_child_key_read(&key, entry, true) - ) - } - } - trace!("Cache miss: {:?}", key); - let value = self.state.child_storage(storage_key, child_info, &key.1[..])?; - - // just pass it through the usage counter - let value = self.usage.tally_child_key_read(&key, value, false); - - RwLockUpgradableReadGuard::upgrade(local_cache).child_storage.insert(key, value.clone()); - Ok(value) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.storage(key)?.is_some()) - } - - fn exists_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result { - self.state.exists_child_storage(storage_key, child_info, key) - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ) { - self.state.for_keys_in_child_storage(storage_key, child_info, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.state.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.state.next_child_storage_key(storage_key, child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.state.for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ) { - self.state.for_child_keys_with_prefix(storage_key, child_info, prefix, f) - } - - fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) - where - I: IntoIterator, Option>)>, - { - self.state.storage_root(delta) - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (B::Hash, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - { - self.state.child_storage_root(storage_key, child_info, delta) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.state.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.state.keys(prefix) - } - - fn child_keys( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - ) -> Vec> { - self.state.child_keys(storage_key, child_info, prefix) - } - - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { - self.state.as_trie_backend() - } - - fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { - self.overlay_stats.add(stats); - } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - let mut info = self.usage.take(); - info.include_state_machine_states(&self.overlay_stats); - info - } + type Error = S::Error; + type Transaction = S::Transaction; + type TrieBackendStorage = S::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + let local_cache = self.cache.local_cache.upgradable_read(); + // Note that local cache makes that lru is not refreshed + if let Some(entry) = local_cache.storage.get(key).cloned() { + trace!("Found in local cache: {:?}", HexDisplay::from(&key)); + self.usage.tally_key_read(key, entry.as_ref(), true); + + return Ok(entry); + } + let mut cache = self.cache.shared_cache.lock(); + if Self::is_allowed( + Some(key), + None, + &self.cache.parent_hash, + &cache.modifications, + ) { + if let Some(entry) = cache.lru_storage.get(key).map(|a| a.clone()) { + trace!("Found in shared cache: {:?}", HexDisplay::from(&key)); + self.usage.tally_key_read(key, entry.as_ref(), true); + return Ok(entry); + } + } + trace!("Cache miss: {:?}", HexDisplay::from(&key)); + let value = self.state.storage(key)?; + RwLockUpgradableReadGuard::upgrade(local_cache) + .storage + .insert(key.to_vec(), value.clone()); + self.usage.tally_key_read(key, value.as_ref(), false); + Ok(value) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + let local_cache = self.cache.local_cache.upgradable_read(); + if let Some(entry) = local_cache.hashes.get(key).cloned() { + trace!("Found hash in local cache: {:?}", HexDisplay::from(&key)); + return Ok(entry); + } + let mut cache = self.cache.shared_cache.lock(); + if Self::is_allowed( + Some(key), + None, + &self.cache.parent_hash, + &cache.modifications, + ) { + if let Some(entry) = cache.lru_hashes.get(key).map(|a| a.0.clone()) { + trace!("Found hash in shared cache: {:?}", HexDisplay::from(&key)); + return Ok(entry); + } + } + trace!("Cache hash miss: {:?}", HexDisplay::from(&key)); + let hash = self.state.storage_hash(key)?; + RwLockUpgradableReadGuard::upgrade(local_cache) + .hashes + .insert(key.to_vec(), hash); + Ok(hash) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + let key = (storage_key.to_vec(), key.to_vec()); + let local_cache = self.cache.local_cache.upgradable_read(); + if let Some(entry) = local_cache.child_storage.get(&key).cloned() { + trace!("Found in local cache: {:?}", key); + return Ok(self.usage.tally_child_key_read(&key, entry, true)); + } + let mut cache = self.cache.shared_cache.lock(); + if Self::is_allowed( + None, + Some(&key), + &self.cache.parent_hash, + &cache.modifications, + ) { + if let Some(entry) = cache.lru_child_storage.get(&key).map(|a| a.clone()) { + trace!("Found in shared cache: {:?}", key); + return Ok(self.usage.tally_child_key_read(&key, entry, true)); + } + } + trace!("Cache miss: {:?}", key); + let value = self + .state + .child_storage(storage_key, child_info, &key.1[..])?; + + // just pass it through the usage counter + let value = self.usage.tally_child_key_read(&key, value, false); + + RwLockUpgradableReadGuard::upgrade(local_cache) + .child_storage + .insert(key, value.clone()); + Ok(value) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + Ok(self.storage(key)?.is_some()) + } + + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + self.state + .exists_child_storage(storage_key, child_info, key) + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.state + .for_keys_in_child_storage(storage_key, child_info, f) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.state.next_storage_key(key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.state + .next_child_storage_key(storage_key, child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.state.for_key_values_with_prefix(prefix, f) + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.state + .for_child_keys_with_prefix(storage_key, child_info, prefix, f) + } + + fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.state.storage_root(delta) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (B::Hash, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.state + .child_storage_root(storage_key, child_info, delta) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.state.pairs() + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.state.keys(prefix) + } + + fn child_keys(&self, storage_key: &[u8], child_info: ChildInfo, prefix: &[u8]) -> Vec> { + self.state.child_keys(storage_key, child_info, prefix) + } + + fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { + self.state.as_trie_backend() + } + + fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + self.overlay_stats.add(stats); + } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + let mut info = self.usage.take(); + info.include_state_machine_states(&self.overlay_stats); + info + } } /// Extended [`CachingState`] that will sync the caches on drop. pub struct SyncingCachingState { - /// The usage statistics of the backend. These will be updated on drop. - state_usage: Arc, - /// Reference to the meta db. - meta: Arc, Block::Hash>>>, - /// Mutex to lock get exlusive access to the backend. - lock: Arc>, - /// The wrapped caching state. - /// - /// This is required to be a `Option`, because sometimes we want to extract - /// the cache changes and Rust does not allow to move fields from types that - /// implement `Drop`. - caching_state: Option>, - /// Disable syncing of the cache. This is by default always `false`. However, - /// we need to disable syncing when this is a state in a - /// [`BlockImportOperation`](crate::BlockImportOperation). The import operation - /// takes care to sync the cache and more importantly we want to prevent a dead - /// lock. - disable_syncing: bool, + /// The usage statistics of the backend. These will be updated on drop. + state_usage: Arc, + /// Reference to the meta db. + meta: Arc, Block::Hash>>>, + /// Mutex to lock get exlusive access to the backend. + lock: Arc>, + /// The wrapped caching state. + /// + /// This is required to be a `Option`, because sometimes we want to extract + /// the cache changes and Rust does not allow to move fields from types that + /// implement `Drop`. + caching_state: Option>, + /// Disable syncing of the cache. This is by default always `false`. However, + /// we need to disable syncing when this is a state in a + /// [`BlockImportOperation`](crate::BlockImportOperation). The import operation + /// takes care to sync the cache and more importantly we want to prevent a dead + /// lock. + disable_syncing: bool, } impl SyncingCachingState { - /// Create new automatic syncing state. - pub fn new( - caching_state: CachingState, - state_usage: Arc, - meta: Arc, B::Hash>>>, - lock: Arc>, - ) -> Self { - Self { - caching_state: Some(caching_state), - state_usage, - meta, - lock, - disable_syncing: false, - } - } - - /// Returns the reference to the internal [`CachingState`]. - fn caching_state(&self) -> &CachingState { - self.caching_state - .as_ref() - .expect("`caching_state` is always valid for the lifetime of the object; qed") - } - - /// Convert `Self` into the cache changes. - pub fn into_cache_changes(mut self) -> CacheChanges { - self.caching_state - .take() - .expect("`caching_state` is always valid for the lifetime of the object; qed") - .cache - } - - /// Disable syncing the cache on drop. - pub fn disable_syncing(&mut self) { - self.disable_syncing = true; - } + /// Create new automatic syncing state. + pub fn new( + caching_state: CachingState, + state_usage: Arc, + meta: Arc, B::Hash>>>, + lock: Arc>, + ) -> Self { + Self { + caching_state: Some(caching_state), + state_usage, + meta, + lock, + disable_syncing: false, + } + } + + /// Returns the reference to the internal [`CachingState`]. + fn caching_state(&self) -> &CachingState { + self.caching_state + .as_ref() + .expect("`caching_state` is always valid for the lifetime of the object; qed") + } + + /// Convert `Self` into the cache changes. + pub fn into_cache_changes(mut self) -> CacheChanges { + self.caching_state + .take() + .expect("`caching_state` is always valid for the lifetime of the object; qed") + .cache + } + + /// Disable syncing the cache on drop. + pub fn disable_syncing(&mut self) { + self.disable_syncing = true; + } } impl std::fmt::Debug for SyncingCachingState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.caching_state().fmt(f) - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.caching_state().fmt(f) + } } -impl>, B: BlockT> StateBackend> for SyncingCachingState { - type Error = S::Error; - type Transaction = S::Transaction; - type TrieBackendStorage = S::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.caching_state().storage(key) - } - - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.caching_state().storage_hash(key) - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.caching_state().child_storage(storage_key, child_info, key) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - self.caching_state().exists_storage(key) - } - - fn exists_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result { - self.caching_state().exists_child_storage(storage_key, child_info, key) - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ) { - self.caching_state().for_keys_in_child_storage(storage_key, child_info, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.caching_state().next_storage_key(key) - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.caching_state().next_child_storage_key(storage_key, child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.caching_state().for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.caching_state().for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ) { - self.caching_state().for_child_keys_with_prefix(storage_key, child_info, prefix, f) - } - - fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) - where - I: IntoIterator, Option>)>, - { - self.caching_state().storage_root(delta) - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (B::Hash, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - { - self.caching_state().child_storage_root(storage_key, child_info, delta) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.caching_state().pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.caching_state().keys(prefix) - } - - fn child_keys( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - ) -> Vec> { - self.caching_state().child_keys(storage_key, child_info, prefix) - } - - fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { - self.caching_state - .as_mut() - .expect("`caching_state` is valid for the lifetime of the object; qed") - .as_trie_backend() - } - - fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { - self.caching_state().register_overlay_stats(stats); - } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - self.caching_state().usage_info() - } +impl>, B: BlockT> StateBackend> + for SyncingCachingState +{ + type Error = S::Error; + type Transaction = S::Transaction; + type TrieBackendStorage = S::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.caching_state().storage(key) + } + + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.caching_state().storage_hash(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.caching_state() + .child_storage(storage_key, child_info, key) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + self.caching_state().exists_storage(key) + } + + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + self.caching_state() + .exists_child_storage(storage_key, child_info, key) + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.caching_state() + .for_keys_in_child_storage(storage_key, child_info, f) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.caching_state().next_storage_key(key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.caching_state() + .next_child_storage_key(storage_key, child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.caching_state().for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.caching_state().for_key_values_with_prefix(prefix, f) + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.caching_state() + .for_child_keys_with_prefix(storage_key, child_info, prefix, f) + } + + fn storage_root(&self, delta: I) -> (B::Hash, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.caching_state().storage_root(delta) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (B::Hash, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.caching_state() + .child_storage_root(storage_key, child_info, delta) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.caching_state().pairs() + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.caching_state().keys(prefix) + } + + fn child_keys(&self, storage_key: &[u8], child_info: ChildInfo, prefix: &[u8]) -> Vec> { + self.caching_state() + .child_keys(storage_key, child_info, prefix) + } + + fn as_trie_backend(&mut self) -> Option<&TrieBackend>> { + self.caching_state + .as_mut() + .expect("`caching_state` is valid for the lifetime of the object; qed") + .as_trie_backend() + } + + fn register_overlay_stats(&mut self, stats: &sp_state_machine::StateMachineStats) { + self.caching_state().register_overlay_stats(stats); + } + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + self.caching_state().usage_info() + } } impl Drop for SyncingCachingState { - fn drop(&mut self) { - if self.disable_syncing { - return; - } - - if let Some(mut caching_state) = self.caching_state.take() { - let _lock = self.lock.read(); - - self.state_usage.merge_sm(caching_state.usage.take()); - if let Some(hash) = caching_state.cache.parent_hash.clone() { - let is_best = self.meta.read().best_hash == hash; - caching_state.cache.sync_cache(&[], &[], vec![], vec![], None, None, is_best); - } - } - } + fn drop(&mut self) { + if self.disable_syncing { + return; + } + + if let Some(mut caching_state) = self.caching_state.take() { + let _lock = self.lock.read(); + + self.state_usage.merge_sm(caching_state.usage.take()); + if let Some(hash) = caching_state.cache.parent_hash.clone() { + let is_best = self.meta.read().best_hash == hash; + caching_state + .cache + .sync_cache(&[], &[], vec![], vec![], None, None, is_best); + } + } + } } #[cfg(test)] mod tests { - use super::*; - use sp_runtime::{ - traits::BlakeTwo256, - testing::{H256, Block as RawBlock, ExtrinsicWrapper}, - }; - use sp_state_machine::InMemoryBackend; - - type Block = RawBlock>; - - #[test] - fn smoke() { - //init_log(); - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let shared = new_shared_cache::(256 * 1024, (0, 1)); - - // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] - // state [ 5 5 4 3 2 2 ] - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h0), - Some(0), - true, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h1b), - Some(1), - false, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1b), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![4]))], - vec![], - Some(h2b), - Some(2), - false, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1a), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![5]))], - vec![], - Some(h2a), - Some(2), - true, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); - - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); - assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); - - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1a), - ); - assert!(s.storage(&key).unwrap().is_none()); - - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); - assert!(s.storage(&key).unwrap().is_none()); - - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1b), - ); - assert!(s.storage(&key).unwrap().is_none()); - - // reorg to 3b - // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); - s.cache.sync_cache( - &[h1b, h2b, h3b], - &[h1a, h2a, h3a], - vec![], - vec![], - Some(h3b), - Some(3), - true, - ); - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); - assert!(s.storage(&key).unwrap().is_none()); - } - - #[test] - fn simple_fork() { - let _ = ::env_logger::try_init(); - - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h1 = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3b = H256::random(); - - let shared = new_shared_cache::(256*1024, (0,1)); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h1), - Some(1), - true, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h2b), - Some(2), - false, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h3b), - Some(2), - false, - ); - - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); - assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); - } - - #[test] - fn double_fork() { - let root_parent = H256::random(); - let key = H256::random()[..].to_vec(); - let h1 = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let shared = new_shared_cache::(256*1024, (0,1)); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2a), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h3a), - Some(3), - true, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); - s.cache.sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h2b), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h3b), - Some(3), - false, - ); - - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h3a), - ); - assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); - } - - #[test] - fn should_track_used_size_correctly() { - let root_parent = H256::random(); - let shared = new_shared_cache::(109, ((109-36), 109)); - let h0 = H256::random(); - - let mut s = CachingState::new( - InMemoryBackend::::default(), shared.clone(), Some(root_parent.clone()), - ); - - let key = H256::random()[..].to_vec(); - let s_key = H256::random()[..].to_vec(); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![1, 2, 3]))], - vec![], - Some(h0), - Some(0), - true, - ); - // 32 key, 3 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 35 /* bytes */); - - let key = H256::random()[..].to_vec(); - s.cache.sync_cache( - &[], - &[], - vec![], - vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])], - Some(h0), - Some(0), - true, - ); - // 35 + (2 * 32) key, 2 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 101 /* bytes */); - } - - #[test] - fn should_remove_lru_items_based_on_tracking_used_size() { - let root_parent = H256::random(); - let shared = new_shared_cache::(36*3, (2,3)); - let h0 = H256::random(); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent), - ); - - let key = H256::random()[..].to_vec(); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![1, 2, 3, 4]))], - vec![], - Some(h0), - Some(0), - true, - ); - // 32 key, 4 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 36 /* bytes */); - - let key = H256::random()[..].to_vec(); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![1, 2]))], - vec![], - Some(h0), - Some(0), - true, - ); - // 32 key, 2 byte size - assert_eq!(shared.lock().used_storage_cache_size(), 34 /* bytes */); - } - - #[test] - fn fix_storage_mismatch_issue() { - let _ = ::env_logger::try_init(); - let root_parent = H256::random(); - - let key = H256::random()[..].to_vec(); - - let h0 = H256::random(); - let h1 = H256::random(); - - let shared = new_shared_cache::(256 * 1024, (0, 1)); - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(root_parent.clone()), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![2]))], - vec![], - Some(h0.clone()), - Some(0), - true, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h0), - ); - s.cache.sync_cache( - &[], - &[], - vec![(key.clone(), Some(vec![3]))], - vec![], - Some(h1), - Some(1), - true, - ); - - let mut s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); - assert_eq!(s.storage(&key).unwrap(), Some(vec![3])); - - // Restart (or unknown block?), clear caches. - { - let mut cache = s.cache.shared_cache.lock(); - let cache = &mut *cache; - cache.lru_storage.clear(); - cache.lru_hashes.clear(); - cache.lru_child_storage.clear(); - cache.modifications.clear(); - } - - // New value is written because of cache miss. - s.cache.local_cache.write().storage.insert(key.clone(), Some(vec![42])); - - // New value is propagated. - s.cache.sync_cache(&[], &[], vec![], vec![], None, None, true); - - let s = CachingState::new( - InMemoryBackend::::default(), - shared.clone(), - Some(h1), - ); - assert_eq!(s.storage(&key).unwrap(), None); - } + use super::*; + use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + traits::BlakeTwo256, + }; + use sp_state_machine::InMemoryBackend; + + type Block = RawBlock>; + + #[test] + fn smoke() { + //init_log(); + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h0 = H256::random(); + let h1a = H256::random(); + let h1b = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + + let shared = new_shared_cache::(256 * 1024, (0, 1)); + + // blocks [ 3a(c) 2a(c) 2b 1b 1a(c) 0 ] + // state [ 5 5 4 3 2 2 ] + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + vec![], + Some(h0), + Some(0), + true, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h0), + ); + s.cache + .sync_cache(&[], &[], vec![], vec![], Some(h1a), Some(1), true); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h0), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![3]))], + vec![], + Some(h1b), + Some(1), + false, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1b), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![4]))], + vec![], + Some(h2b), + Some(2), + false, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1a), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![5]))], + vec![], + Some(h2a), + Some(2), + true, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h2a), + ); + s.cache + .sync_cache(&[], &[], vec![], vec![], Some(h3a), Some(3), true); + + let s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h3a), + ); + assert_eq!(s.storage(&key).unwrap().unwrap(), vec![5]); + + let s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1a), + ); + assert!(s.storage(&key).unwrap().is_none()); + + let s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h2b), + ); + assert!(s.storage(&key).unwrap().is_none()); + + let s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1b), + ); + assert!(s.storage(&key).unwrap().is_none()); + + // reorg to 3b + // blocks [ 3b(c) 3a 2a 2b(c) 1b 1a 0 ] + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h2b), + ); + s.cache.sync_cache( + &[h1b, h2b, h3b], + &[h1a, h2a, h3a], + vec![], + vec![], + Some(h3b), + Some(3), + true, + ); + let s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h3a), + ); + assert!(s.storage(&key).unwrap().is_none()); + } + + #[test] + fn simple_fork() { + let _ = ::env_logger::try_init(); + + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h1 = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3b = H256::random(); + + let shared = new_shared_cache::(256 * 1024, (0, 1)); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + vec![], + Some(h1), + Some(1), + true, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1), + ); + s.cache + .sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![3]))], + vec![], + Some(h2b), + Some(2), + false, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h2b), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![3]))], + vec![], + Some(h3b), + Some(2), + false, + ); + + let s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h2a), + ); + assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); + } + + #[test] + fn double_fork() { + let root_parent = H256::random(); + let key = H256::random()[..].to_vec(); + let h1 = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + + let shared = new_shared_cache::(256 * 1024, (0, 1)); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent), + ); + s.cache + .sync_cache(&[], &[], vec![], vec![], Some(h1), Some(1), true); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1), + ); + s.cache + .sync_cache(&[], &[], vec![], vec![], Some(h2a), Some(2), true); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h2a), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + vec![], + Some(h3a), + Some(3), + true, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1), + ); + s.cache + .sync_cache(&[], &[], vec![], vec![], Some(h2b), Some(2), false); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h2b), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![3]))], + vec![], + Some(h3b), + Some(3), + false, + ); + + let s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h3a), + ); + assert_eq!(s.storage(&key).unwrap().unwrap(), vec![2]); + } + + #[test] + fn should_track_used_size_correctly() { + let root_parent = H256::random(); + let shared = new_shared_cache::(109, ((109 - 36), 109)); + let h0 = H256::random(); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent.clone()), + ); + + let key = H256::random()[..].to_vec(); + let s_key = H256::random()[..].to_vec(); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![1, 2, 3]))], + vec![], + Some(h0), + Some(0), + true, + ); + // 32 key, 3 byte size + assert_eq!(shared.lock().used_storage_cache_size(), 35 /* bytes */); + + let key = H256::random()[..].to_vec(); + s.cache.sync_cache( + &[], + &[], + vec![], + vec![(s_key.clone(), vec![(key.clone(), Some(vec![1, 2]))])], + Some(h0), + Some(0), + true, + ); + // 35 + (2 * 32) key, 2 byte size + assert_eq!( + shared.lock().used_storage_cache_size(), + 101 /* bytes */ + ); + } + + #[test] + fn should_remove_lru_items_based_on_tracking_used_size() { + let root_parent = H256::random(); + let shared = new_shared_cache::(36 * 3, (2, 3)); + let h0 = H256::random(); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent), + ); + + let key = H256::random()[..].to_vec(); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![1, 2, 3, 4]))], + vec![], + Some(h0), + Some(0), + true, + ); + // 32 key, 4 byte size + assert_eq!(shared.lock().used_storage_cache_size(), 36 /* bytes */); + + let key = H256::random()[..].to_vec(); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![1, 2]))], + vec![], + Some(h0), + Some(0), + true, + ); + // 32 key, 2 byte size + assert_eq!(shared.lock().used_storage_cache_size(), 34 /* bytes */); + } + + #[test] + fn fix_storage_mismatch_issue() { + let _ = ::env_logger::try_init(); + let root_parent = H256::random(); + + let key = H256::random()[..].to_vec(); + + let h0 = H256::random(); + let h1 = H256::random(); + + let shared = new_shared_cache::(256 * 1024, (0, 1)); + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(root_parent.clone()), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![2]))], + vec![], + Some(h0.clone()), + Some(0), + true, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h0), + ); + s.cache.sync_cache( + &[], + &[], + vec![(key.clone(), Some(vec![3]))], + vec![], + Some(h1), + Some(1), + true, + ); + + let mut s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1), + ); + assert_eq!(s.storage(&key).unwrap(), Some(vec![3])); + + // Restart (or unknown block?), clear caches. + { + let mut cache = s.cache.shared_cache.lock(); + let cache = &mut *cache; + cache.lru_storage.clear(); + cache.lru_hashes.clear(); + cache.lru_child_storage.clear(); + cache.modifications.clear(); + } + + // New value is written because of cache miss. + s.cache + .local_cache + .write() + .storage + .insert(key.clone(), Some(vec![42])); + + // New value is propagated. + s.cache + .sync_cache(&[], &[], vec![], vec![], None, None, true); + + let s = CachingState::new( + InMemoryBackend::::default(), + shared.clone(), + Some(h1), + ); + assert_eq!(s.storage(&key).unwrap(), None); + } } #[cfg(test)] mod qc { - use std::collections::{HashMap, hash_map::Entry}; - - use quickcheck::{quickcheck, TestResult, Arbitrary}; - - use super::*; - use sp_runtime::{ - traits::BlakeTwo256, - testing::{H256, Block as RawBlock, ExtrinsicWrapper}, - }; - use sp_state_machine::InMemoryBackend; - - type Block = RawBlock>; - - type KeySet = Vec<(Vec, Option>)>; - - type KeyMap = HashMap, Option>>; - - #[derive(Debug, Clone)] - struct Node { - hash: H256, - parent: H256, - state: KeyMap, - changes: KeySet, - } - - impl Node { - fn new_next(&self, hash: H256, changes: KeySet) -> Self { - let mut state = self.state.clone(); - - for (k, v) in self.state.iter() { state.insert(k.clone(), v.clone()); } - for (k, v) in changes.clone().into_iter() { state.insert(k, v); } - - Self { - hash, - parent: self.hash, - changes, - state, - } - } - - fn new(hash: H256, parent: H256, changes: KeySet) -> Self { - let mut state = KeyMap::new(); - - for (k, v) in changes.clone().into_iter() { state.insert(k, v); } - - Self { - hash, - parent, - state, - changes, - } - } - - fn purge(&mut self, other_changes: &KeySet) { - for (k, _) in other_changes.iter() { - self.state.remove(k); - } - } - } - - #[derive(Debug, Clone)] - enum Action { - Next { hash: H256, changes: KeySet }, - Fork { depth: usize, hash: H256, changes: KeySet }, - ReorgWithImport { depth: usize, hash: H256 }, - FinalizationReorg { fork_depth: usize, depth: usize }, - } - - impl Arbitrary for Action { - fn arbitrary(gen: &mut G) -> Self { - let path = gen.next_u32() as u8; - let mut buf = [0u8; 32]; - - match path { - 0..=175 => { - gen.fill_bytes(&mut buf[..]); - Action::Next { - hash: H256::from(&buf), - changes: { - let mut set = Vec::new(); - for _ in 0..gen.next_u32()/(64*256*256*256) { - set.push((vec![gen.next_u32() as u8], Some(vec![gen.next_u32() as u8]))); - } - set - } - } - }, - 176..=220 => { - gen.fill_bytes(&mut buf[..]); - Action::Fork { - hash: H256::from(&buf), - depth: ((gen.next_u32() as u8) / 32) as usize, - changes: { - let mut set = Vec::new(); - for _ in 0..gen.next_u32()/(64*256*256*256) { - set.push((vec![gen.next_u32() as u8], Some(vec![gen.next_u32() as u8]))); - } - set - } - } - }, - 221..=240 => { - gen.fill_bytes(&mut buf[..]); - Action::ReorgWithImport { - hash: H256::from(&buf), - depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7 - } - }, - _ => { - gen.fill_bytes(&mut buf[..]); - Action::FinalizationReorg { - fork_depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7 - depth: ((gen.next_u32() as u8) / 64) as usize, // 0-3 - } - }, - } - } - } - - struct Mutator { - shared: SharedCache, - canon: Vec, - forks: HashMap>, - } - - impl Mutator { - fn new_empty() -> Self { - let shared = new_shared_cache::(256*1024, (0,1)); - - Self { - shared, - canon: vec![], - forks: HashMap::new(), - } - } - - fn head_state(&self, hash: H256) -> CachingState, Block> { - CachingState::new( - InMemoryBackend::::default(), - self.shared.clone(), - Some(hash), - ) - } - - fn canon_head_state(&self) -> CachingState, Block> { - self.head_state(self.canon.last().expect("Expected to be one commit").hash) - } - - fn mutate_static( - &mut self, - action: Action, - ) -> CachingState, Block> { - self.mutate(action).expect("Expected to provide only valid actions to the mutate_static") - } - - fn canon_len(&self) -> usize { - return self.canon.len(); - } - - fn head_storage_ref(&self) -> &KeyMap { - &self.canon.last().expect("Expected to be one commit").state - } - - fn key_permutations() -> Vec> { - (0u8..255).map(|x| vec![x]).collect() - } - - fn mutate( - &mut self, - action: Action, - ) -> Result, Block>, ()> { - let state = match action { - Action::Fork { depth, hash, changes } => { - let pos = self.canon.len() as isize - depth as isize; - if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len()-1) as isize - // no fork on top also, thus len-1 - { - return Err(()); - } - - let pos = pos as usize; - - let fork_at = self.canon[pos].hash; - - let (total_h, parent) = match self.forks.entry(fork_at) { - Entry::Occupied(occupied) => { - let chain = occupied.into_mut(); - let parent = chain.last().expect("No empty forks are ever created").clone(); - let mut node = parent.new_next(hash, changes.clone()); - - for earlier in chain.iter() { - node.purge(&earlier.changes.clone()); - } - - chain.push(node); - - (pos + chain.len(), parent.hash) - }, - Entry::Vacant(vacant) => { - let canon_parent = &self.canon[pos]; - vacant.insert(vec![canon_parent.new_next(hash, changes.clone())]); - - (pos + 1, fork_at) - } - }; - - let mut state = CachingState::new( - InMemoryBackend::::default(), - self.shared.clone(), - Some(parent), - ); - - state.cache.sync_cache( - &[], - &[], - changes, - vec![], - Some(hash), - Some(total_h as u64), - false, - ); - - state - }, - Action::Next { hash, changes } => { - let (next, parent_hash) = match self.canon.last() { - None => { - let parent_hash = H256::from(&[0u8; 32]); - (Node::new(hash, parent_hash, changes.clone()), parent_hash) - }, - Some(parent) => { - (parent.new_next(hash, changes.clone()), parent.hash) - } - }; - - // delete cache entries for earlier - for node in self.canon.iter_mut() { - node.purge(&next.changes); - if let Some(fork) = self.forks.get_mut(&node.hash) { - for node in fork.iter_mut() { - node.purge(&next.changes); - } - } - } - - let mut state = CachingState::new( - InMemoryBackend::::default(), - self.shared.clone(), - Some(parent_hash), - ); - - state.cache.sync_cache( - &[], - &[], - next.changes.clone(), - vec![], - Some(hash), - Some(self.canon.len() as u64 + 1), - true, - ); - - self.canon.push(next); - - state - }, - Action::ReorgWithImport { depth, hash } => { - let pos = self.canon.len() as isize - depth as isize; - if pos < 0 || pos+1 >= self.canon.len() as isize { return Err(()); } - let fork_at = self.canon[pos as usize].hash; - let pos = pos as usize; - - match self.forks.get_mut(&fork_at) { - Some(chain) => { - let mut new_fork = self.canon.drain(pos+1..).collect::>(); - - let retracted: Vec = new_fork.iter().map(|node| node.hash).collect(); - let enacted: Vec = chain.iter().map(|node| node.hash).collect(); - - std::mem::swap(chain, &mut new_fork); - - let mut node = new_fork.last().map( - |node| node.new_next(hash, vec![]) - ).expect("No empty fork ever created!"); - - for invalidators in chain.iter().chain(new_fork.iter()) { - node.purge(&invalidators.changes); - } - - self.canon.extend(new_fork.into_iter()); - - self.canon.push(node); - - let mut state = CachingState::new( - InMemoryBackend::::default(), - self.shared.clone(), - Some(fork_at), - ); - - let height = pos as u64 + enacted.len() as u64 + 2; - state.cache.sync_cache( - &enacted[..], - &retracted[..], - vec![], - vec![], - Some(hash), - Some(height), - true, - ); - - state - } - None => { - return Err(()); // no reorg without a fork atm! - }, - } - }, - Action::FinalizationReorg { fork_depth, depth } => { - let pos = self.canon.len() as isize - fork_depth as isize; - if pos < 0 || pos+1 >= self.canon.len() as isize { return Err(()); } - let fork_at = self.canon[pos as usize].hash; - let pos = pos as usize; - - match self.forks.get_mut(&fork_at) { - Some(fork_chain) => { - let sync_pos = fork_chain.len() as isize - fork_chain.len() as isize - depth as isize; - if sync_pos < 0 || sync_pos >= fork_chain.len() as isize { return Err (()); } - let sync_pos = sync_pos as usize; - - let mut new_fork = self.canon.drain(pos+1..).collect::>(); - - let retracted: Vec = new_fork.iter().map(|node| node.hash).collect(); - let enacted: Vec = fork_chain.iter().take(sync_pos+1).map(|node| node.hash).collect(); - - std::mem::swap(fork_chain, &mut new_fork); - - self.shared.lock().sync(&retracted, &enacted); - - self.head_state( - self.canon.last() - .expect("wasn't forking to emptiness so there should be one!") - .hash - ) - }, - None => { - return Err(()); // no reorg to nothing pls! - } - } - - }, - }; - - Ok(state) - } - } - - #[test] - fn smoke() { - let key = H256::random()[..].to_vec(); - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h0, changes: vec![(key.clone(), Some(vec![2]))] }); - mutator.mutate_static(Action::Next { hash: h1a, changes: vec![] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: vec![(key.clone(), Some(vec![3]))] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: vec![(key.clone(), Some(vec![4]))] }); - mutator.mutate_static(Action::Next { hash: h2a, changes: vec![(key.clone(), Some(vec![5]))] }); - mutator.mutate_static(Action::Next { hash: h3a, changes: vec![] }); - - assert_eq!(mutator.head_state(h3a).storage(&key).unwrap().expect("there should be a value"), vec![5]); - assert!(mutator.head_state(h1a).storage(&key).unwrap().is_none()); - assert!(mutator.head_state(h2b).storage(&key).unwrap().is_none()); - assert!(mutator.head_state(h1b).storage(&key).unwrap().is_none()); - - mutator.mutate_static(Action::ReorgWithImport { depth: 4, hash: h3b }); - assert!(mutator.head_state(h3a).storage(&key).unwrap().is_none()); - } - - fn is_head_match(mutator: &Mutator) -> bool { - let head_state = mutator.canon_head_state(); - - for key in Mutator::key_permutations() { - match (head_state.storage(&key).unwrap(), mutator.head_storage_ref().get(&key)) { - (Some(x), Some(y)) => { - if Some(&x) != y.as_ref() { - eprintln!("{:?} != {:?}", x, y); - return false; - } - }, - (None, Some(_y)) => { - // TODO: cache miss is not tracked atm - }, - (Some(x), None) => { - eprintln!("{:?} != ", x); - return false; - }, - _ => continue, - } - } - true - } - - fn is_canon_match(mutator: &Mutator) -> bool { - for node in mutator.canon.iter() { - let head_state = mutator.head_state(node.hash); - for key in Mutator::key_permutations() { - match (head_state.storage(&key).unwrap(), node.state.get(&key)) { - (Some(x), Some(y)) => { - if Some(&x) != y.as_ref() { - eprintln!("at [{}]: {:?} != {:?}", node.hash, x, y); - return false; - } - }, - (None, Some(_y)) => { - // cache miss is not tracked atm - }, - (Some(x), None) => { - eprintln!("at [{}]: {:?} != ", node.hash, x); - return false; - }, - _ => continue, - } - } - } - true - } - - #[test] - fn reorg() { - let key = H256::random()[..].to_vec(); - let h0 = H256::random(); - let h1 = H256::random(); - let h2 = H256::random(); - let h1b = H256::random(); - let h2b = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h0, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h1, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h2, changes: vec![(key.clone(), Some(vec![2]))] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: vec![(key.clone(), Some(vec![3]))] }); - mutator.mutate_static(Action::ReorgWithImport { depth: 2, hash: h2b }); - - assert!(is_head_match(&mutator)) - } - - fn key(k: u8) -> Vec { vec![k] } - fn val(v: u8) -> Option> { Some(vec![v]) } - fn keyval(k: u8, v: u8) -> KeySet { vec![(key(k), val(v))] } - - #[test] - fn reorg2() { - let h0 = H256::random(); - let h1a = H256::random(); - let h1b = H256::random(); - let h2b = H256::random(); - let h2a = H256::random(); - let h3a = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h0, changes: keyval(1, 1) }); - mutator.mutate_static(Action::Next { hash: h1a, changes: keyval(1, 1) }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h1b, changes: keyval(2, 2 ) }); - - mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(3, 3) }); - mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(4, 4) }); - mutator.mutate_static(Action::ReorgWithImport { depth: 4, hash: h2b }); - - assert!(is_head_match(&mutator)) - } - - #[test] - fn fork2() { - let h1 = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - let h3b = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h1, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h2a, changes: vec![] }); - mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(1, 1) }); - - mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: vec![] }); - mutator.mutate_static(Action::Fork { depth: 2, hash: h3b, changes: keyval(1, 2) }); - - assert!(is_head_match(&mutator)) - } - - #[test] - fn fork3() { - let h1 = H256::random(); - let h2a = H256::random(); - let h2b = H256::random(); - let h3a = H256::random(); - - let mut mutator = Mutator::new_empty(); - mutator.mutate_static(Action::Next { hash: h1, changes: keyval(1, 1) }); - mutator.mutate_static(Action::Next { hash: h2a, changes: keyval(2, 2) }); - mutator.mutate_static(Action::Next { hash: h3a, changes: keyval(3, 3) }); - - mutator.mutate_static(Action::Fork { depth: 2, hash: h2b, changes: keyval(1, 3) }); - - assert!(is_canon_match(&mutator)) - } - - quickcheck! { - fn head_complete(actions: Vec) -> TestResult { - let mut mutator = Mutator::new_empty(); - - for action in actions.into_iter() { - if let Err(_) = mutator.mutate(action) { - return TestResult::discard(); - } - } - - if mutator.canon_len() == 0 { - return TestResult::discard(); - } - - TestResult::from_bool(is_head_match(&mutator)) - } - - fn canon_complete(actions: Vec) -> TestResult { - let mut mutator = Mutator::new_empty(); - - for action in actions.into_iter() { - if let Err(_) = mutator.mutate(action) { - return TestResult::discard(); - } - } - - if mutator.canon_len() == 0 { - return TestResult::discard(); - } - - TestResult::from_bool(is_canon_match(&mutator)) - } - } + use std::collections::{hash_map::Entry, HashMap}; + + use quickcheck::{quickcheck, Arbitrary, TestResult}; + + use super::*; + use sp_runtime::{ + testing::{Block as RawBlock, ExtrinsicWrapper, H256}, + traits::BlakeTwo256, + }; + use sp_state_machine::InMemoryBackend; + + type Block = RawBlock>; + + type KeySet = Vec<(Vec, Option>)>; + + type KeyMap = HashMap, Option>>; + + #[derive(Debug, Clone)] + struct Node { + hash: H256, + parent: H256, + state: KeyMap, + changes: KeySet, + } + + impl Node { + fn new_next(&self, hash: H256, changes: KeySet) -> Self { + let mut state = self.state.clone(); + + for (k, v) in self.state.iter() { + state.insert(k.clone(), v.clone()); + } + for (k, v) in changes.clone().into_iter() { + state.insert(k, v); + } + + Self { + hash, + parent: self.hash, + changes, + state, + } + } + + fn new(hash: H256, parent: H256, changes: KeySet) -> Self { + let mut state = KeyMap::new(); + + for (k, v) in changes.clone().into_iter() { + state.insert(k, v); + } + + Self { + hash, + parent, + state, + changes, + } + } + + fn purge(&mut self, other_changes: &KeySet) { + for (k, _) in other_changes.iter() { + self.state.remove(k); + } + } + } + + #[derive(Debug, Clone)] + enum Action { + Next { + hash: H256, + changes: KeySet, + }, + Fork { + depth: usize, + hash: H256, + changes: KeySet, + }, + ReorgWithImport { + depth: usize, + hash: H256, + }, + FinalizationReorg { + fork_depth: usize, + depth: usize, + }, + } + + impl Arbitrary for Action { + fn arbitrary(gen: &mut G) -> Self { + let path = gen.next_u32() as u8; + let mut buf = [0u8; 32]; + + match path { + 0..=175 => { + gen.fill_bytes(&mut buf[..]); + Action::Next { + hash: H256::from(&buf), + changes: { + let mut set = Vec::new(); + for _ in 0..gen.next_u32() / (64 * 256 * 256 * 256) { + set.push(( + vec![gen.next_u32() as u8], + Some(vec![gen.next_u32() as u8]), + )); + } + set + }, + } + } + 176..=220 => { + gen.fill_bytes(&mut buf[..]); + Action::Fork { + hash: H256::from(&buf), + depth: ((gen.next_u32() as u8) / 32) as usize, + changes: { + let mut set = Vec::new(); + for _ in 0..gen.next_u32() / (64 * 256 * 256 * 256) { + set.push(( + vec![gen.next_u32() as u8], + Some(vec![gen.next_u32() as u8]), + )); + } + set + }, + } + } + 221..=240 => { + gen.fill_bytes(&mut buf[..]); + Action::ReorgWithImport { + hash: H256::from(&buf), + depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7 + } + } + _ => { + gen.fill_bytes(&mut buf[..]); + Action::FinalizationReorg { + fork_depth: ((gen.next_u32() as u8) / 32) as usize, // 0-7 + depth: ((gen.next_u32() as u8) / 64) as usize, // 0-3 + } + } + } + } + } + + struct Mutator { + shared: SharedCache, + canon: Vec, + forks: HashMap>, + } + + impl Mutator { + fn new_empty() -> Self { + let shared = new_shared_cache::(256 * 1024, (0, 1)); + + Self { + shared, + canon: vec![], + forks: HashMap::new(), + } + } + + fn head_state(&self, hash: H256) -> CachingState, Block> { + CachingState::new( + InMemoryBackend::::default(), + self.shared.clone(), + Some(hash), + ) + } + + fn canon_head_state(&self) -> CachingState, Block> { + self.head_state(self.canon.last().expect("Expected to be one commit").hash) + } + + fn mutate_static( + &mut self, + action: Action, + ) -> CachingState, Block> { + self.mutate(action) + .expect("Expected to provide only valid actions to the mutate_static") + } + + fn canon_len(&self) -> usize { + return self.canon.len(); + } + + fn head_storage_ref(&self) -> &KeyMap { + &self.canon.last().expect("Expected to be one commit").state + } + + fn key_permutations() -> Vec> { + (0u8..255).map(|x| vec![x]).collect() + } + + fn mutate( + &mut self, + action: Action, + ) -> Result, Block>, ()> { + let state = match action { + Action::Fork { + depth, + hash, + changes, + } => { + let pos = self.canon.len() as isize - depth as isize; + if pos < 0 || self.canon.len() == 0 || pos >= (self.canon.len() - 1) as isize + // no fork on top also, thus len-1 + { + return Err(()); + } + + let pos = pos as usize; + + let fork_at = self.canon[pos].hash; + + let (total_h, parent) = match self.forks.entry(fork_at) { + Entry::Occupied(occupied) => { + let chain = occupied.into_mut(); + let parent = chain + .last() + .expect("No empty forks are ever created") + .clone(); + let mut node = parent.new_next(hash, changes.clone()); + + for earlier in chain.iter() { + node.purge(&earlier.changes.clone()); + } + + chain.push(node); + + (pos + chain.len(), parent.hash) + } + Entry::Vacant(vacant) => { + let canon_parent = &self.canon[pos]; + vacant.insert(vec![canon_parent.new_next(hash, changes.clone())]); + + (pos + 1, fork_at) + } + }; + + let mut state = CachingState::new( + InMemoryBackend::::default(), + self.shared.clone(), + Some(parent), + ); + + state.cache.sync_cache( + &[], + &[], + changes, + vec![], + Some(hash), + Some(total_h as u64), + false, + ); + + state + } + Action::Next { hash, changes } => { + let (next, parent_hash) = match self.canon.last() { + None => { + let parent_hash = H256::from(&[0u8; 32]); + (Node::new(hash, parent_hash, changes.clone()), parent_hash) + } + Some(parent) => (parent.new_next(hash, changes.clone()), parent.hash), + }; + + // delete cache entries for earlier + for node in self.canon.iter_mut() { + node.purge(&next.changes); + if let Some(fork) = self.forks.get_mut(&node.hash) { + for node in fork.iter_mut() { + node.purge(&next.changes); + } + } + } + + let mut state = CachingState::new( + InMemoryBackend::::default(), + self.shared.clone(), + Some(parent_hash), + ); + + state.cache.sync_cache( + &[], + &[], + next.changes.clone(), + vec![], + Some(hash), + Some(self.canon.len() as u64 + 1), + true, + ); + + self.canon.push(next); + + state + } + Action::ReorgWithImport { depth, hash } => { + let pos = self.canon.len() as isize - depth as isize; + if pos < 0 || pos + 1 >= self.canon.len() as isize { + return Err(()); + } + let fork_at = self.canon[pos as usize].hash; + let pos = pos as usize; + + match self.forks.get_mut(&fork_at) { + Some(chain) => { + let mut new_fork = self.canon.drain(pos + 1..).collect::>(); + + let retracted: Vec = + new_fork.iter().map(|node| node.hash).collect(); + let enacted: Vec = chain.iter().map(|node| node.hash).collect(); + + std::mem::swap(chain, &mut new_fork); + + let mut node = new_fork + .last() + .map(|node| node.new_next(hash, vec![])) + .expect("No empty fork ever created!"); + + for invalidators in chain.iter().chain(new_fork.iter()) { + node.purge(&invalidators.changes); + } + + self.canon.extend(new_fork.into_iter()); + + self.canon.push(node); + + let mut state = CachingState::new( + InMemoryBackend::::default(), + self.shared.clone(), + Some(fork_at), + ); + + let height = pos as u64 + enacted.len() as u64 + 2; + state.cache.sync_cache( + &enacted[..], + &retracted[..], + vec![], + vec![], + Some(hash), + Some(height), + true, + ); + + state + } + None => { + return Err(()); // no reorg without a fork atm! + } + } + } + Action::FinalizationReorg { fork_depth, depth } => { + let pos = self.canon.len() as isize - fork_depth as isize; + if pos < 0 || pos + 1 >= self.canon.len() as isize { + return Err(()); + } + let fork_at = self.canon[pos as usize].hash; + let pos = pos as usize; + + match self.forks.get_mut(&fork_at) { + Some(fork_chain) => { + let sync_pos = fork_chain.len() as isize + - fork_chain.len() as isize + - depth as isize; + if sync_pos < 0 || sync_pos >= fork_chain.len() as isize { + return Err(()); + } + let sync_pos = sync_pos as usize; + + let mut new_fork = self.canon.drain(pos + 1..).collect::>(); + + let retracted: Vec = + new_fork.iter().map(|node| node.hash).collect(); + let enacted: Vec = fork_chain + .iter() + .take(sync_pos + 1) + .map(|node| node.hash) + .collect(); + + std::mem::swap(fork_chain, &mut new_fork); + + self.shared.lock().sync(&retracted, &enacted); + + self.head_state( + self.canon + .last() + .expect("wasn't forking to emptiness so there should be one!") + .hash, + ) + } + None => { + return Err(()); // no reorg to nothing pls! + } + } + } + }; + + Ok(state) + } + } + + #[test] + fn smoke() { + let key = H256::random()[..].to_vec(); + let h0 = H256::random(); + let h1a = H256::random(); + let h1b = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + + let mut mutator = Mutator::new_empty(); + mutator.mutate_static(Action::Next { + hash: h0, + changes: vec![(key.clone(), Some(vec![2]))], + }); + mutator.mutate_static(Action::Next { + hash: h1a, + changes: vec![], + }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h1b, + changes: vec![(key.clone(), Some(vec![3]))], + }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h2b, + changes: vec![(key.clone(), Some(vec![4]))], + }); + mutator.mutate_static(Action::Next { + hash: h2a, + changes: vec![(key.clone(), Some(vec![5]))], + }); + mutator.mutate_static(Action::Next { + hash: h3a, + changes: vec![], + }); + + assert_eq!( + mutator + .head_state(h3a) + .storage(&key) + .unwrap() + .expect("there should be a value"), + vec![5] + ); + assert!(mutator.head_state(h1a).storage(&key).unwrap().is_none()); + assert!(mutator.head_state(h2b).storage(&key).unwrap().is_none()); + assert!(mutator.head_state(h1b).storage(&key).unwrap().is_none()); + + mutator.mutate_static(Action::ReorgWithImport { + depth: 4, + hash: h3b, + }); + assert!(mutator.head_state(h3a).storage(&key).unwrap().is_none()); + } + + fn is_head_match(mutator: &Mutator) -> bool { + let head_state = mutator.canon_head_state(); + + for key in Mutator::key_permutations() { + match ( + head_state.storage(&key).unwrap(), + mutator.head_storage_ref().get(&key), + ) { + (Some(x), Some(y)) => { + if Some(&x) != y.as_ref() { + eprintln!("{:?} != {:?}", x, y); + return false; + } + } + (None, Some(_y)) => { + // TODO: cache miss is not tracked atm + } + (Some(x), None) => { + eprintln!("{:?} != ", x); + return false; + } + _ => continue, + } + } + true + } + + fn is_canon_match(mutator: &Mutator) -> bool { + for node in mutator.canon.iter() { + let head_state = mutator.head_state(node.hash); + for key in Mutator::key_permutations() { + match (head_state.storage(&key).unwrap(), node.state.get(&key)) { + (Some(x), Some(y)) => { + if Some(&x) != y.as_ref() { + eprintln!("at [{}]: {:?} != {:?}", node.hash, x, y); + return false; + } + } + (None, Some(_y)) => { + // cache miss is not tracked atm + } + (Some(x), None) => { + eprintln!("at [{}]: {:?} != ", node.hash, x); + return false; + } + _ => continue, + } + } + } + true + } + + #[test] + fn reorg() { + let key = H256::random()[..].to_vec(); + let h0 = H256::random(); + let h1 = H256::random(); + let h2 = H256::random(); + let h1b = H256::random(); + let h2b = H256::random(); + + let mut mutator = Mutator::new_empty(); + mutator.mutate_static(Action::Next { + hash: h0, + changes: vec![], + }); + mutator.mutate_static(Action::Next { + hash: h1, + changes: vec![], + }); + mutator.mutate_static(Action::Next { + hash: h2, + changes: vec![(key.clone(), Some(vec![2]))], + }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h1b, + changes: vec![(key.clone(), Some(vec![3]))], + }); + mutator.mutate_static(Action::ReorgWithImport { + depth: 2, + hash: h2b, + }); + + assert!(is_head_match(&mutator)) + } + + fn key(k: u8) -> Vec { + vec![k] + } + fn val(v: u8) -> Option> { + Some(vec![v]) + } + fn keyval(k: u8, v: u8) -> KeySet { + vec![(key(k), val(v))] + } + + #[test] + fn reorg2() { + let h0 = H256::random(); + let h1a = H256::random(); + let h1b = H256::random(); + let h2b = H256::random(); + let h2a = H256::random(); + let h3a = H256::random(); + + let mut mutator = Mutator::new_empty(); + mutator.mutate_static(Action::Next { + hash: h0, + changes: keyval(1, 1), + }); + mutator.mutate_static(Action::Next { + hash: h1a, + changes: keyval(1, 1), + }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h1b, + changes: keyval(2, 2), + }); + + mutator.mutate_static(Action::Next { + hash: h2a, + changes: keyval(3, 3), + }); + mutator.mutate_static(Action::Next { + hash: h3a, + changes: keyval(4, 4), + }); + mutator.mutate_static(Action::ReorgWithImport { + depth: 4, + hash: h2b, + }); + + assert!(is_head_match(&mutator)) + } + + #[test] + fn fork2() { + let h1 = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + let h3b = H256::random(); + + let mut mutator = Mutator::new_empty(); + mutator.mutate_static(Action::Next { + hash: h1, + changes: vec![], + }); + mutator.mutate_static(Action::Next { + hash: h2a, + changes: vec![], + }); + mutator.mutate_static(Action::Next { + hash: h3a, + changes: keyval(1, 1), + }); + + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h2b, + changes: vec![], + }); + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h3b, + changes: keyval(1, 2), + }); + + assert!(is_head_match(&mutator)) + } + + #[test] + fn fork3() { + let h1 = H256::random(); + let h2a = H256::random(); + let h2b = H256::random(); + let h3a = H256::random(); + + let mut mutator = Mutator::new_empty(); + mutator.mutate_static(Action::Next { + hash: h1, + changes: keyval(1, 1), + }); + mutator.mutate_static(Action::Next { + hash: h2a, + changes: keyval(2, 2), + }); + mutator.mutate_static(Action::Next { + hash: h3a, + changes: keyval(3, 3), + }); + + mutator.mutate_static(Action::Fork { + depth: 2, + hash: h2b, + changes: keyval(1, 3), + }); + + assert!(is_canon_match(&mutator)) + } + + quickcheck! { + fn head_complete(actions: Vec) -> TestResult { + let mut mutator = Mutator::new_empty(); + + for action in actions.into_iter() { + if let Err(_) = mutator.mutate(action) { + return TestResult::discard(); + } + } + + if mutator.canon_len() == 0 { + return TestResult::discard(); + } + + TestResult::from_bool(is_head_match(&mutator)) + } + + fn canon_complete(actions: Vec) -> TestResult { + let mut mutator = Mutator::new_empty(); + + for action in actions.into_iter() { + if let Err(_) = mutator.mutate(action) { + return TestResult::discard(); + } + } + + if mutator.canon_len() == 0 { + return TestResult::discard(); + } + + TestResult::from_bool(is_canon_match(&mutator)) + } + } } diff --git a/client/db/src/subdb.rs b/client/db/src/subdb.rs index 2e436aa2c9..55de5da9f0 100644 --- a/client/db/src/subdb.rs +++ b/client/db/src/subdb.rs @@ -14,12 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -/// A `Database` adapter for subdb. - -use sp_database::{self, ColumnId}; -use parking_lot::RwLock; use blake2_rfc::blake2b::blake2b; use codec::Encode; +use parking_lot::RwLock; +/// A `Database` adapter for subdb. +use sp_database::{self, ColumnId}; use subdb::{Database, KeyType}; /// A database hidden behind an RwLock, so that it implements Send + Sync. @@ -29,59 +28,63 @@ pub struct DbAdapter(RwLock>); /// Wrap RocksDb database into a trait object that implements `sp_database::Database` pub fn open( - path: &std::path::Path, - _num_columns: u32, + path: &std::path::Path, + _num_columns: u32, ) -> Result>, subdb::Error> { - let db = subdb::Options::from_path(path.into()).open()?; - Ok(std::sync::Arc::new(DbAdapter(RwLock::new(db)))) + let db = subdb::Options::from_path(path.into()).open()?; + Ok(std::sync::Arc::new(DbAdapter(RwLock::new(db)))) } impl sp_database::Database for DbAdapter { - fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - let mut hash = H::default(); - (col, key).using_encoded(|d| - hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) - ); - self.0.read().get(&hash) - } + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + let mut hash = H::default(); + (col, key).using_encoded(|d| { + hash.as_mut() + .copy_from_slice(blake2b(32, &[], d).as_bytes()) + }); + self.0.read().get(&hash) + } - fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { - let mut hash = H::default(); - (col, key).using_encoded(|d| - hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) - ); - let _ = self.0.read().get_ref(&hash).map(|d| f(d.as_ref())); - } + fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { + let mut hash = H::default(); + (col, key).using_encoded(|d| { + hash.as_mut() + .copy_from_slice(blake2b(32, &[], d).as_bytes()) + }); + let _ = self.0.read().get_ref(&hash).map(|d| f(d.as_ref())); + } - fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) { - let mut hash = H::default(); - (col, key).using_encoded(|d| - hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) - ); - self.0.write().insert(&value, &hash); - } + fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) { + let mut hash = H::default(); + (col, key).using_encoded(|d| { + hash.as_mut() + .copy_from_slice(blake2b(32, &[], d).as_bytes()) + }); + self.0.write().insert(&value, &hash); + } - fn remove(&self, col: ColumnId, key: &[u8]) { - let mut hash = H::default(); - (col, key).using_encoded(|d| - hash.as_mut().copy_from_slice(blake2b(32, &[], d).as_bytes()) - ); - let _ = self.0.write().remove(&hash); - } + fn remove(&self, col: ColumnId, key: &[u8]) { + let mut hash = H::default(); + (col, key).using_encoded(|d| { + hash.as_mut() + .copy_from_slice(blake2b(32, &[], d).as_bytes()) + }); + let _ = self.0.write().remove(&hash); + } - fn lookup(&self, hash: &H) -> Option> { - self.0.read().get(hash) - } + fn lookup(&self, hash: &H) -> Option> { + self.0.read().get(hash) + } - fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { - let _ = self.0.read().get_ref(hash).map(|d| f(d.as_ref())); - } + fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { + let _ = self.0.read().get_ref(hash).map(|d| f(d.as_ref())); + } - fn store(&self, hash: &H, preimage: &[u8]) { - self.0.write().insert(preimage, hash); - } + fn store(&self, hash: &H, preimage: &[u8]) { + self.0.write().insert(preimage, hash); + } - fn release(&self, hash: &H) { - let _ = self.0.write().remove(hash); - } + fn release(&self, hash: &H) { + let _ = self.0.write().remove(hash); + } } diff --git a/client/db/src/upgrade.rs b/client/db/src/upgrade.rs index 95592d071f..7c272178b3 100644 --- a/client/db/src/upgrade.rs +++ b/client/db/src/upgrade.rs @@ -17,11 +17,11 @@ //! Database upgrade logic. use std::fs; -use std::io::{Read, Write, ErrorKind}; +use std::io::{ErrorKind, Read, Write}; use std::path::{Path, PathBuf}; -use sp_runtime::traits::Block as BlockT; use crate::utils::DatabaseType; +use sp_runtime::traits::Block as BlockT; /// Version file name. const VERSION_FILE_NAME: &'static str = "db_version"; @@ -30,94 +30,111 @@ const VERSION_FILE_NAME: &'static str = "db_version"; const CURRENT_VERSION: u32 = 1; /// Upgrade database to current version. -pub fn upgrade_db(db_path: &Path, _db_type: DatabaseType) -> sp_blockchain::Result<()> { - let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none()); - if !is_empty { - let db_version = current_version(db_path)?; - match db_version { - 0 => Err(sp_blockchain::Error::Backend(format!("Unsupported database version: {}", db_version)))?, - 1 => (), - _ => Err(sp_blockchain::Error::Backend(format!("Future database version: {}", db_version)))?, - } - } - - update_version(db_path) +pub fn upgrade_db( + db_path: &Path, + _db_type: DatabaseType, +) -> sp_blockchain::Result<()> { + let is_empty = db_path.read_dir().map_or(true, |mut d| d.next().is_none()); + if !is_empty { + let db_version = current_version(db_path)?; + match db_version { + 0 => Err(sp_blockchain::Error::Backend(format!( + "Unsupported database version: {}", + db_version + )))?, + 1 => (), + _ => Err(sp_blockchain::Error::Backend(format!( + "Future database version: {}", + db_version + )))?, + } + } + + update_version(db_path) } - /// Reads current database version from the file at given path. /// If the file does not exist returns 0. fn current_version(path: &Path) -> sp_blockchain::Result { - let unknown_version_err = || sp_blockchain::Error::Backend("Unknown database version".into()); - - match fs::File::open(version_file_path(path)) { - Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(0), - Err(_) => Err(unknown_version_err()), - Ok(mut file) => { - let mut s = String::new(); - file.read_to_string(&mut s).map_err(|_| unknown_version_err())?; - u32::from_str_radix(&s, 10).map_err(|_| unknown_version_err()) - }, - } + let unknown_version_err = || sp_blockchain::Error::Backend("Unknown database version".into()); + + match fs::File::open(version_file_path(path)) { + Err(ref err) if err.kind() == ErrorKind::NotFound => Ok(0), + Err(_) => Err(unknown_version_err()), + Ok(mut file) => { + let mut s = String::new(); + file.read_to_string(&mut s) + .map_err(|_| unknown_version_err())?; + u32::from_str_radix(&s, 10).map_err(|_| unknown_version_err()) + } + } } /// Maps database error to client error fn db_err(err: std::io::Error) -> sp_blockchain::Error { - sp_blockchain::Error::Backend(format!("{}", err)) + sp_blockchain::Error::Backend(format!("{}", err)) } /// Writes current database version to the file. /// Creates a new file if the version file does not exist yet. fn update_version(path: &Path) -> sp_blockchain::Result<()> { - fs::create_dir_all(path).map_err(db_err)?; - let mut file = fs::File::create(version_file_path(path)).map_err(db_err)?; - file.write_all(format!("{}", CURRENT_VERSION).as_bytes()).map_err(db_err)?; - Ok(()) + fs::create_dir_all(path).map_err(db_err)?; + let mut file = fs::File::create(version_file_path(path)).map_err(db_err)?; + file.write_all(format!("{}", CURRENT_VERSION).as_bytes()) + .map_err(db_err)?; + Ok(()) } /// Returns the version file path. fn version_file_path(path: &Path) -> PathBuf { - let mut file_path = path.to_owned(); - file_path.push(VERSION_FILE_NAME); - file_path + let mut file_path = path.to_owned(); + file_path.push(VERSION_FILE_NAME); + file_path } #[cfg(test)] mod tests { - use sc_state_db::PruningMode; - use crate::{DatabaseSettings, DatabaseSettingsSrc}; - use crate::tests::Block; - use super::*; - - fn create_db(db_path: &Path, version: Option) { - if let Some(version) = version { - fs::create_dir_all(db_path).unwrap(); - let mut file = fs::File::create(version_file_path(db_path)).unwrap(); - file.write_all(format!("{}", version).as_bytes()).unwrap(); - } - } - - fn open_database(db_path: &Path) -> sp_blockchain::Result<()> { - crate::utils::open_database::(&DatabaseSettings { - state_cache_size: 0, - state_cache_child_ratio: None, - pruning: PruningMode::ArchiveAll, - source: DatabaseSettingsSrc::RocksDb { path: db_path.to_owned(), cache_size: 128 }, - }, DatabaseType::Full).map(|_| ()) - } - - #[test] - fn downgrade_never_happens() { - let db_dir = tempfile::TempDir::new().unwrap(); - create_db(db_dir.path(), Some(CURRENT_VERSION + 1)); - assert!(open_database(db_dir.path()).is_err()); - } - - #[test] - fn open_empty_database_works() { - let db_dir = tempfile::TempDir::new().unwrap(); - open_database(db_dir.path()).unwrap(); - open_database(db_dir.path()).unwrap(); - assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION); - } + use super::*; + use crate::tests::Block; + use crate::{DatabaseSettings, DatabaseSettingsSrc}; + use sc_state_db::PruningMode; + + fn create_db(db_path: &Path, version: Option) { + if let Some(version) = version { + fs::create_dir_all(db_path).unwrap(); + let mut file = fs::File::create(version_file_path(db_path)).unwrap(); + file.write_all(format!("{}", version).as_bytes()).unwrap(); + } + } + + fn open_database(db_path: &Path) -> sp_blockchain::Result<()> { + crate::utils::open_database::( + &DatabaseSettings { + state_cache_size: 0, + state_cache_child_ratio: None, + pruning: PruningMode::ArchiveAll, + source: DatabaseSettingsSrc::RocksDb { + path: db_path.to_owned(), + cache_size: 128, + }, + }, + DatabaseType::Full, + ) + .map(|_| ()) + } + + #[test] + fn downgrade_never_happens() { + let db_dir = tempfile::TempDir::new().unwrap(); + create_db(db_dir.path(), Some(CURRENT_VERSION + 1)); + assert!(open_database(db_dir.path()).is_err()); + } + + #[test] + fn open_empty_database_works() { + let db_dir = tempfile::TempDir::new().unwrap(); + open_database(db_dir.path()).unwrap(); + open_database(db_dir.path()).unwrap(); + assert_eq!(current_version(db_dir.path()).unwrap(), CURRENT_VERSION); + } } diff --git a/client/db/src/utils.rs b/client/db/src/utils.rs index 9506dc4e7f..91df8a4bb0 100644 --- a/client/db/src/utils.rs +++ b/client/db/src/utils.rs @@ -17,20 +17,19 @@ //! Db-based backend utility structures and functions, used by both //! full and light storages. -use std::sync::Arc; use std::convert::TryInto; +use std::sync::Arc; use log::debug; +use crate::{Database, DatabaseSettings, DatabaseSettingsSrc, DbHash}; use codec::Decode; -use sp_trie::DBValue; use sp_database::Transaction; use sp_runtime::generic::BlockId; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, Zero, - UniqueSaturatedFrom, UniqueSaturatedInto, + Block as BlockT, Header as HeaderT, UniqueSaturatedFrom, UniqueSaturatedInto, Zero, }; -use crate::{DatabaseSettings, DatabaseSettingsSrc, Database, DbHash}; +use sp_trie::DBValue; /// Number of columns in the db. Must be the same for both full && light dbs. /// Otherwise RocksDb will fail to open database && check its type. @@ -41,37 +40,37 @@ pub const COLUMN_META: u32 = 0; /// Keys of entries in COLUMN_META. pub mod meta_keys { - /// Type of storage (full or light). - pub const TYPE: &[u8; 4] = b"type"; - /// Best block key. - pub const BEST_BLOCK: &[u8; 4] = b"best"; - /// Last finalized block key. - pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; - /// Meta information prefix for list-based caches. - pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; - /// Meta information for changes tries key. - pub const CHANGES_TRIES_META: &[u8; 5] = b"ctrie"; - /// Genesis block hash. - pub const GENESIS_HASH: &[u8; 3] = b"gen"; - /// Leaves prefix list key. - pub const LEAF_PREFIX: &[u8; 4] = b"leaf"; - /// Children prefix list key. - pub const CHILDREN_PREFIX: &[u8; 8] = b"children"; + /// Type of storage (full or light). + pub const TYPE: &[u8; 4] = b"type"; + /// Best block key. + pub const BEST_BLOCK: &[u8; 4] = b"best"; + /// Last finalized block key. + pub const FINALIZED_BLOCK: &[u8; 5] = b"final"; + /// Meta information prefix for list-based caches. + pub const CACHE_META_PREFIX: &[u8; 5] = b"cache"; + /// Meta information for changes tries key. + pub const CHANGES_TRIES_META: &[u8; 5] = b"ctrie"; + /// Genesis block hash. + pub const GENESIS_HASH: &[u8; 3] = b"gen"; + /// Leaves prefix list key. + pub const LEAF_PREFIX: &[u8; 4] = b"leaf"; + /// Children prefix list key. + pub const CHILDREN_PREFIX: &[u8; 8] = b"children"; } /// Database metadata. #[derive(Debug)] pub struct Meta { - /// Hash of the best known block. - pub best_hash: H, - /// Number of the best known block. - pub best_number: N, - /// Hash of the best finalized block. - pub finalized_hash: H, - /// Number of the best finalized block. - pub finalized_number: N, - /// Hash of the genesis block. - pub genesis_hash: H, + /// Hash of the best known block. + pub best_hash: H, + /// Number of the best known block. + pub best_number: N, + /// Hash of the best finalized block. + pub finalized_hash: H, + /// Number of the best finalized block. + pub finalized_number: N, + /// Hash of the genesis block. + pub genesis_hash: H, } /// A block lookup key: used for canonical lookup from block number to hash @@ -80,10 +79,10 @@ pub type NumberIndexKey = [u8; 4]; /// Database type. #[derive(Clone, Copy, Debug, PartialEq)] pub enum DatabaseType { - /// Full node database. - Full, - /// Light node database. - Light, + /// Full node database. + Full, + /// Light node database. + Light, } /// Convert block number into short lookup key (LE representation) for @@ -92,335 +91,345 @@ pub enum DatabaseType { /// In the current database schema, this kind of key is only used for /// lookups into an index, NOT for storing header data or others. pub fn number_index_key>(n: N) -> sp_blockchain::Result { - let n = n.try_into().map_err(|_| - sp_blockchain::Error::Backend("Block number cannot be converted to u32".into()) - )?; - - Ok([ - (n >> 24) as u8, - ((n >> 16) & 0xff) as u8, - ((n >> 8) & 0xff) as u8, - (n & 0xff) as u8 - ]) + let n = n.try_into().map_err(|_| { + sp_blockchain::Error::Backend("Block number cannot be converted to u32".into()) + })?; + + Ok([ + (n >> 24) as u8, + ((n >> 16) & 0xff) as u8, + ((n >> 8) & 0xff) as u8, + (n & 0xff) as u8, + ]) } /// Convert number and hash into long lookup key for blocks that are /// not in the canonical chain. -pub fn number_and_hash_to_lookup_key( - number: N, - hash: H, -) -> sp_blockchain::Result> where - N: TryInto, - H: AsRef<[u8]>, +pub fn number_and_hash_to_lookup_key(number: N, hash: H) -> sp_blockchain::Result> +where + N: TryInto, + H: AsRef<[u8]>, { - let mut lookup_key = number_index_key(number)?.to_vec(); - lookup_key.extend_from_slice(hash.as_ref()); - Ok(lookup_key) + let mut lookup_key = number_index_key(number)?.to_vec(); + lookup_key.extend_from_slice(hash.as_ref()); + Ok(lookup_key) } /// Convert block lookup key into block number. /// all block lookup keys start with the block number. -pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result where - N: From +pub fn lookup_key_to_number(key: &[u8]) -> sp_blockchain::Result +where + N: From, { - if key.len() < 4 { - return Err(sp_blockchain::Error::Backend("Invalid block key".into())); - } - Ok((key[0] as u32) << 24 - | (key[1] as u32) << 16 - | (key[2] as u32) << 8 - | (key[3] as u32)).map(Into::into) + if key.len() < 4 { + return Err(sp_blockchain::Error::Backend("Invalid block key".into())); + } + Ok((key[0] as u32) << 24 | (key[1] as u32) << 16 | (key[2] as u32) << 8 | (key[3] as u32)) + .map(Into::into) } /// Delete number to hash mapping in DB transaction. pub fn remove_number_to_key_mapping>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, + transaction: &mut Transaction, + key_lookup_col: u32, + number: N, ) -> sp_blockchain::Result<()> { - transaction.remove(key_lookup_col, number_index_key(number)?.as_ref()); - Ok(()) + transaction.remove(key_lookup_col, number_index_key(number)?.as_ref()); + Ok(()) } /// Remove key mappings. pub fn remove_key_mappings, H: AsRef<[u8]>>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, - hash: H, + transaction: &mut Transaction, + key_lookup_col: u32, + number: N, + hash: H, ) -> sp_blockchain::Result<()> { - remove_number_to_key_mapping(transaction, key_lookup_col, number)?; - transaction.remove(key_lookup_col, hash.as_ref()); - Ok(()) + remove_number_to_key_mapping(transaction, key_lookup_col, number)?; + transaction.remove(key_lookup_col, hash.as_ref()); + Ok(()) } /// Place a number mapping into the database. This maps number to current perceived /// block hash at that position. pub fn insert_number_to_key_mapping + Clone, H: AsRef<[u8]>>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, - hash: H, + transaction: &mut Transaction, + key_lookup_col: u32, + number: N, + hash: H, ) -> sp_blockchain::Result<()> { - transaction.set_from_vec( - key_lookup_col, - number_index_key(number.clone())?.as_ref(), - number_and_hash_to_lookup_key(number, hash)?, - ); - Ok(()) + transaction.set_from_vec( + key_lookup_col, + number_index_key(number.clone())?.as_ref(), + number_and_hash_to_lookup_key(number, hash)?, + ); + Ok(()) } /// Insert a hash to key mapping in the database. pub fn insert_hash_to_key_mapping, H: AsRef<[u8]> + Clone>( - transaction: &mut Transaction, - key_lookup_col: u32, - number: N, - hash: H, + transaction: &mut Transaction, + key_lookup_col: u32, + number: N, + hash: H, ) -> sp_blockchain::Result<()> { - transaction.set_from_vec( - key_lookup_col, - hash.clone().as_ref(), - number_and_hash_to_lookup_key(number, hash)?, - ); - Ok(()) + transaction.set_from_vec( + key_lookup_col, + hash.clone().as_ref(), + number_and_hash_to_lookup_key(number, hash)?, + ); + Ok(()) } /// Convert block id to block lookup key. /// block lookup key is the DB-key header, block and justification are stored under. /// looks up lookup key by hash from DB as necessary. pub fn block_id_to_lookup_key( - db: &dyn Database, - key_lookup_col: u32, - id: BlockId -) -> Result>, sp_blockchain::Error> where - Block: BlockT, - ::sp_runtime::traits::NumberFor: UniqueSaturatedFrom + UniqueSaturatedInto, + db: &dyn Database, + key_lookup_col: u32, + id: BlockId, +) -> Result>, sp_blockchain::Error> +where + Block: BlockT, + ::sp_runtime::traits::NumberFor: UniqueSaturatedFrom + UniqueSaturatedInto, { - Ok(match id { - BlockId::Number(n) => db.get( - key_lookup_col, - number_index_key(n)?.as_ref(), - ), - BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()) - }) + Ok(match id { + BlockId::Number(n) => db.get(key_lookup_col, number_index_key(n)?.as_ref()), + BlockId::Hash(h) => db.get(key_lookup_col, h.as_ref()), + }) } /// Opens the configured database. pub fn open_database( - config: &DatabaseSettings, - db_type: DatabaseType, + config: &DatabaseSettings, + db_type: DatabaseType, ) -> sp_blockchain::Result>> { - let db: Arc> = match &config.source { - #[cfg(any(feature = "kvdb-rocksdb", test))] - DatabaseSettingsSrc::RocksDb { path, cache_size } => { - // first upgrade database to required version - crate::upgrade::upgrade_db::(&path, db_type)?; - - // and now open database assuming that it has the latest version - let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS); - let state_col_budget = (*cache_size as f64 * 0.9) as usize; - let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); - let mut memory_budget = std::collections::HashMap::new(); - let path = path.to_str() - .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; - - for i in 0..NUM_COLUMNS { - if i == crate::columns::STATE { - memory_budget.insert(i, state_col_budget); - } else { - memory_budget.insert(i, other_col_budget); - } - } - - db_config.memory_budget = memory_budget; - - log::trace!( - target: "db", - "Open RocksDB database at {}, state column budget: {} MiB, others({}) column cache: {} MiB", - path, - state_col_budget, - NUM_COLUMNS, - other_col_budget, - ); - - let db = kvdb_rocksdb::Database::open(&db_config, &path) - .map_err(|err| sp_blockchain::Error::Backend(format!("{}", err)))?; - sp_database::as_database(db) - }, - #[cfg(feature = "subdb")] - DatabaseSettingsSrc::SubDb { path } => { - crate::subdb::open(&path, NUM_COLUMNS) - .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? - }, - #[cfg(feature = "parity-db")] - DatabaseSettingsSrc::ParityDb { path } => { - crate::parity_db::open(&path, NUM_COLUMNS) - .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))? - }, - DatabaseSettingsSrc::Custom(db) => db.clone(), - _ => { - let msg = "Trying to open a unsupported database".into(); - return Err(sp_blockchain::Error::Backend(msg)); - }, - }; - - check_database_type(&*db, db_type)?; - - Ok(db) + let db: Arc> = match &config.source { + #[cfg(any(feature = "kvdb-rocksdb", test))] + DatabaseSettingsSrc::RocksDb { path, cache_size } => { + // first upgrade database to required version + crate::upgrade::upgrade_db::(&path, db_type)?; + + // and now open database assuming that it has the latest version + let mut db_config = kvdb_rocksdb::DatabaseConfig::with_columns(NUM_COLUMNS); + let state_col_budget = (*cache_size as f64 * 0.9) as usize; + let other_col_budget = (cache_size - state_col_budget) / (NUM_COLUMNS as usize - 1); + let mut memory_budget = std::collections::HashMap::new(); + let path = path + .to_str() + .ok_or_else(|| sp_blockchain::Error::Backend("Invalid database path".into()))?; + + for i in 0..NUM_COLUMNS { + if i == crate::columns::STATE { + memory_budget.insert(i, state_col_budget); + } else { + memory_budget.insert(i, other_col_budget); + } + } + + db_config.memory_budget = memory_budget; + + log::trace!( + target: "db", + "Open RocksDB database at {}, state column budget: {} MiB, others({}) column cache: {} MiB", + path, + state_col_budget, + NUM_COLUMNS, + other_col_budget, + ); + + let db = kvdb_rocksdb::Database::open(&db_config, &path) + .map_err(|err| sp_blockchain::Error::Backend(format!("{}", err)))?; + sp_database::as_database(db) + } + #[cfg(feature = "subdb")] + DatabaseSettingsSrc::SubDb { path } => crate::subdb::open(&path, NUM_COLUMNS) + .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))?, + #[cfg(feature = "parity-db")] + DatabaseSettingsSrc::ParityDb { path } => crate::parity_db::open(&path, NUM_COLUMNS) + .map_err(|e| sp_blockchain::Error::Backend(format!("{:?}", e)))?, + DatabaseSettingsSrc::Custom(db) => db.clone(), + _ => { + let msg = "Trying to open a unsupported database".into(); + return Err(sp_blockchain::Error::Backend(msg)); + } + }; + + check_database_type(&*db, db_type)?; + + Ok(db) } /// Check database type. -pub fn check_database_type(db: &dyn Database, db_type: DatabaseType) -> sp_blockchain::Result<()> { - match db.get(COLUMN_META, meta_keys::TYPE) { - Some(stored_type) => { - if db_type.as_str().as_bytes() != &*stored_type { - return Err(sp_blockchain::Error::Backend( - format!("Unexpected database type. Expected: {}", db_type.as_str())).into()); - } - }, - None => { - let mut transaction = Transaction::new(); - transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); - db.commit(transaction) - }, - } - - Ok(()) +pub fn check_database_type( + db: &dyn Database, + db_type: DatabaseType, +) -> sp_blockchain::Result<()> { + match db.get(COLUMN_META, meta_keys::TYPE) { + Some(stored_type) => { + if db_type.as_str().as_bytes() != &*stored_type { + return Err(sp_blockchain::Error::Backend(format!( + "Unexpected database type. Expected: {}", + db_type.as_str() + )) + .into()); + } + } + None => { + let mut transaction = Transaction::new(); + transaction.set(COLUMN_META, meta_keys::TYPE, db_type.as_str().as_bytes()); + db.commit(transaction) + } + } + + Ok(()) } /// Read database column entry for the given block. pub fn read_db( - db: &dyn Database, - col_index: u32, - col: u32, - id: BlockId + db: &dyn Database, + col_index: u32, + col: u32, + id: BlockId, ) -> sp_blockchain::Result> - where - Block: BlockT, +where + Block: BlockT, { - block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { - Some(key) => Ok(db.get(col, key.as_ref())), - None => Ok(None), - }) + block_id_to_lookup_key(db, col_index, id).and_then(|key| match key { + Some(key) => Ok(db.get(col, key.as_ref())), + None => Ok(None), + }) } /// Read a header from the database. pub fn read_header( - db: &dyn Database, - col_index: u32, - col: u32, - id: BlockId, + db: &dyn Database, + col_index: u32, + col: u32, + id: BlockId, ) -> sp_blockchain::Result> { - match read_db(db, col_index, col, id)? { - Some(header) => match Block::Header::decode(&mut &header[..]) { - Ok(header) => Ok(Some(header)), - Err(_) => return Err( - sp_blockchain::Error::Backend("Error decoding header".into()) - ), - } - None => Ok(None), - } + match read_db(db, col_index, col, id)? { + Some(header) => match Block::Header::decode(&mut &header[..]) { + Ok(header) => Ok(Some(header)), + Err(_) => { + return Err(sp_blockchain::Error::Backend( + "Error decoding header".into(), + )) + } + }, + None => Ok(None), + } } /// Required header from the database. pub fn require_header( - db: &dyn Database, - col_index: u32, - col: u32, - id: BlockId, + db: &dyn Database, + col_index: u32, + col: u32, + id: BlockId, ) -> sp_blockchain::Result { - read_header(db, col_index, col, id) - .and_then(|header| header.ok_or_else(|| - sp_blockchain::Error::UnknownBlock(format!("Require header: {}", id)) - )) + read_header(db, col_index, col, id).and_then(|header| { + header.ok_or_else(|| sp_blockchain::Error::UnknownBlock(format!("Require header: {}", id))) + }) } /// Read meta from the database. -pub fn read_meta(db: &dyn Database, col_header: u32) -> Result< - Meta<<::Header as HeaderT>::Number, Block::Hash>, - sp_blockchain::Error, -> - where - Block: BlockT, +pub fn read_meta( + db: &dyn Database, + col_header: u32, +) -> Result::Header as HeaderT>::Number, Block::Hash>, sp_blockchain::Error> +where + Block: BlockT, { - let genesis_hash: Block::Hash = match read_genesis_hash(db)? { - Some(genesis_hash) => genesis_hash, - None => return Ok(Meta { - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - }), - }; - - let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { - if let Some(Some(header)) = match db.get(COLUMN_META, key) { - Some(id) => db.get(col_header, &id).map(|b| Block::Header::decode(&mut &b[..]).ok()), - None => None, - } - { - let hash = header.hash(); - debug!("DB Opened blockchain db, fetched {} = {:?} ({})", desc, hash, header.number()); - Ok((hash, *header.number())) - } else { - Ok((genesis_hash.clone(), Zero::zero())) - } - }; - - let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?; - let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?; - - Ok(Meta { - best_hash, - best_number, - finalized_hash, - finalized_number, - genesis_hash, - }) + let genesis_hash: Block::Hash = match read_genesis_hash(db)? { + Some(genesis_hash) => genesis_hash, + None => { + return Ok(Meta { + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + }) + } + }; + + let load_meta_block = |desc, key| -> Result<_, sp_blockchain::Error> { + if let Some(Some(header)) = match db.get(COLUMN_META, key) { + Some(id) => db + .get(col_header, &id) + .map(|b| Block::Header::decode(&mut &b[..]).ok()), + None => None, + } { + let hash = header.hash(); + debug!( + "DB Opened blockchain db, fetched {} = {:?} ({})", + desc, + hash, + header.number() + ); + Ok((hash, *header.number())) + } else { + Ok((genesis_hash.clone(), Zero::zero())) + } + }; + + let (best_hash, best_number) = load_meta_block("best", meta_keys::BEST_BLOCK)?; + let (finalized_hash, finalized_number) = load_meta_block("final", meta_keys::FINALIZED_BLOCK)?; + + Ok(Meta { + best_hash, + best_number, + finalized_hash, + finalized_number, + genesis_hash, + }) } /// Read genesis hash from database. -pub fn read_genesis_hash(db: &dyn Database) -> sp_blockchain::Result> { - match db.get(COLUMN_META, meta_keys::GENESIS_HASH) { - Some(h) => match Decode::decode(&mut &h[..]) { - Ok(h) => Ok(Some(h)), - Err(err) => Err(sp_blockchain::Error::Backend( - format!("Error decoding genesis hash: {}", err) - )), - }, - None => Ok(None), - } +pub fn read_genesis_hash( + db: &dyn Database, +) -> sp_blockchain::Result> { + match db.get(COLUMN_META, meta_keys::GENESIS_HASH) { + Some(h) => match Decode::decode(&mut &h[..]) { + Ok(h) => Ok(Some(h)), + Err(err) => Err(sp_blockchain::Error::Backend(format!( + "Error decoding genesis hash: {}", + err + ))), + }, + None => Ok(None), + } } impl DatabaseType { - /// Returns str representation of the type. - pub fn as_str(&self) -> &'static str { - match *self { - DatabaseType::Full => "full", - DatabaseType::Light => "light", - } - } + /// Returns str representation of the type. + pub fn as_str(&self) -> &'static str { + match *self { + DatabaseType::Full => "full", + DatabaseType::Light => "light", + } + } } #[cfg(test)] mod tests { - use super::*; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; - type Block = RawBlock>; - - #[test] - fn number_index_key_doesnt_panic() { - let id = BlockId::::Number(72340207214430721); - match id { - BlockId::Number(n) => number_index_key(n).expect_err("number should overflow u32"), - _ => unreachable!(), - }; - } - - #[test] - fn database_type_as_str_works() { - assert_eq!(DatabaseType::Full.as_str(), "full"); - assert_eq!(DatabaseType::Light.as_str(), "light"); - } + use super::*; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + type Block = RawBlock>; + + #[test] + fn number_index_key_doesnt_panic() { + let id = BlockId::::Number(72340207214430721); + match id { + BlockId::Number(n) => number_index_key(n).expect_err("number should overflow u32"), + _ => unreachable!(), + }; + } + + #[test] + fn database_type_as_str_works() { + assert_eq!(DatabaseType::Full.as_str(), "full"); + assert_eq!(DatabaseType::Light.as_str(), "light"); + } } diff --git a/client/executor/common/src/error.rs b/client/executor/common/src/error.rs index 66d520e942..1b215420a8 100644 --- a/client/executor/common/src/error.rs +++ b/client/executor/common/src/error.rs @@ -25,108 +25,108 @@ pub type Result = std::result::Result; /// Error type. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Unserializable Data - InvalidData(sp_serializer::Error), - /// Trap occurred during execution - Trap(wasmi::Trap), - /// Wasmi loading/instantiating error - Wasmi(wasmi::Error), - /// Error in the API. Parameter is an error message. - #[from(ignore)] - ApiError(String), - /// Method is not found - #[display(fmt="Method not found: '{}'", _0)] - #[from(ignore)] - MethodNotFound(String), - /// Code is invalid (expected single byte) - #[display(fmt="Invalid Code: {}", _0)] - #[from(ignore)] - InvalidCode(String), - /// Could not get runtime version. - #[display(fmt="On-chain runtime does not specify version")] - VersionInvalid, - /// Externalities have failed. - #[display(fmt="Externalities error")] - Externalities, - /// Invalid index. - #[display(fmt="Invalid index provided")] - InvalidIndex, - /// Invalid return type. - #[display(fmt="Invalid type returned (should be u64)")] - InvalidReturn, - /// Runtime failed. - #[display(fmt="Runtime error")] - Runtime, - /// Runtime panicked. - #[display(fmt="Runtime panicked: {}", _0)] - #[from(ignore)] - RuntimePanicked(String), - /// Invalid memory reference. - #[display(fmt="Invalid memory reference")] - InvalidMemoryReference, - /// The runtime must provide a global named `__heap_base` of type i32 for specifying where the - /// allocator is allowed to place its data. - #[display(fmt="The runtime doesn't provide a global named `__heap_base`")] - HeapBaseNotFoundOrInvalid, - /// The runtime WebAssembly module is not allowed to have the `start` function. - #[display(fmt="The runtime has the `start` function")] - RuntimeHasStartFn, - /// Some other error occurred - Other(String), - /// Some error occurred in the allocator - #[display(fmt="Error in allocator: {}", _0)] - Allocator(sp_allocator::Error), - /// Execution of a host function failed. - #[display(fmt="Host function {} execution failed with: {}", _0, _1)] - FunctionExecution(String, String), + /// Unserializable Data + InvalidData(sp_serializer::Error), + /// Trap occurred during execution + Trap(wasmi::Trap), + /// Wasmi loading/instantiating error + Wasmi(wasmi::Error), + /// Error in the API. Parameter is an error message. + #[from(ignore)] + ApiError(String), + /// Method is not found + #[display(fmt = "Method not found: '{}'", _0)] + #[from(ignore)] + MethodNotFound(String), + /// Code is invalid (expected single byte) + #[display(fmt = "Invalid Code: {}", _0)] + #[from(ignore)] + InvalidCode(String), + /// Could not get runtime version. + #[display(fmt = "On-chain runtime does not specify version")] + VersionInvalid, + /// Externalities have failed. + #[display(fmt = "Externalities error")] + Externalities, + /// Invalid index. + #[display(fmt = "Invalid index provided")] + InvalidIndex, + /// Invalid return type. + #[display(fmt = "Invalid type returned (should be u64)")] + InvalidReturn, + /// Runtime failed. + #[display(fmt = "Runtime error")] + Runtime, + /// Runtime panicked. + #[display(fmt = "Runtime panicked: {}", _0)] + #[from(ignore)] + RuntimePanicked(String), + /// Invalid memory reference. + #[display(fmt = "Invalid memory reference")] + InvalidMemoryReference, + /// The runtime must provide a global named `__heap_base` of type i32 for specifying where the + /// allocator is allowed to place its data. + #[display(fmt = "The runtime doesn't provide a global named `__heap_base`")] + HeapBaseNotFoundOrInvalid, + /// The runtime WebAssembly module is not allowed to have the `start` function. + #[display(fmt = "The runtime has the `start` function")] + RuntimeHasStartFn, + /// Some other error occurred + Other(String), + /// Some error occurred in the allocator + #[display(fmt = "Error in allocator: {}", _0)] + Allocator(sp_allocator::Error), + /// Execution of a host function failed. + #[display(fmt = "Host function {} execution failed with: {}", _0, _1)] + FunctionExecution(String, String), } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::InvalidData(ref err) => Some(err), - Error::Trap(ref err) => Some(err), - Error::Wasmi(ref err) => Some(err), - _ => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::InvalidData(ref err) => Some(err), + Error::Trap(ref err) => Some(err), + Error::Wasmi(ref err) => Some(err), + _ => None, + } + } } impl wasmi::HostError for Error {} impl From<&'static str> for Error { - fn from(err: &'static str) -> Error { - Error::Other(err.into()) - } + fn from(err: &'static str) -> Error { + Error::Other(err.into()) + } } impl From for Error { - fn from(err: WasmError) -> Error { - Error::Other(err.to_string()) - } + fn from(err: WasmError) -> Error { + Error::Other(err.to_string()) + } } /// Type for errors occurring during Wasm runtime construction. #[derive(Debug, derive_more::Display)] pub enum WasmError { - /// Code could not be read from the state. - CodeNotFound, - /// Failure to reinitialize runtime instance from snapshot. - ApplySnapshotFailed, - /// Failure to erase the wasm memory. - /// - /// Depending on the implementation might mean failure of allocating memory. - ErasingFailed(String), - /// Wasm code failed validation. - InvalidModule, - /// Wasm code could not be deserialized. - CantDeserializeWasm, - /// The module does not export a linear memory named `memory`. - InvalidMemory, - /// The number of heap pages requested is disallowed by the module. - InvalidHeapPages, - /// Instantiation error. - Instantiation(String), - /// Other error happenend. - Other(String), + /// Code could not be read from the state. + CodeNotFound, + /// Failure to reinitialize runtime instance from snapshot. + ApplySnapshotFailed, + /// Failure to erase the wasm memory. + /// + /// Depending on the implementation might mean failure of allocating memory. + ErasingFailed(String), + /// Wasm code failed validation. + InvalidModule, + /// Wasm code could not be deserialized. + CantDeserializeWasm, + /// The module does not export a linear memory named `memory`. + InvalidMemory, + /// The number of heap pages requested is disallowed by the module. + InvalidHeapPages, + /// Instantiation error. + Instantiation(String), + /// Other error happenend. + Other(String), } diff --git a/client/executor/common/src/sandbox.rs b/client/executor/common/src/sandbox.rs index ccfdc2f3e0..fce16fbd19 100644 --- a/client/executor/common/src/sandbox.rs +++ b/client/executor/common/src/sandbox.rs @@ -19,15 +19,15 @@ //! Sandboxing is baked by wasmi at the moment. In future, however, we would like to add/switch to //! a compiled execution engine. -use crate::error::{Result, Error}; -use std::{collections::HashMap, rc::Rc}; +use crate::error::{Error, Result}; use codec::{Decode, Encode}; use sp_core::sandbox as sandbox_primitives; +use sp_wasm_interface::{FunctionContext, Pointer, WordSize}; +use std::{collections::HashMap, rc::Rc}; use wasmi::{ - Externals, ImportResolver, MemoryInstance, MemoryRef, Module, ModuleInstance, - ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, memory_units::Pages, + memory_units::Pages, Externals, ImportResolver, MemoryInstance, MemoryRef, Module, + ModuleInstance, ModuleRef, RuntimeArgs, RuntimeValue, Trap, TrapKind, }; -use sp_wasm_interface::{FunctionContext, Pointer, WordSize}; /// Index of a function inside the supervisor. /// @@ -37,9 +37,9 @@ use sp_wasm_interface::{FunctionContext, Pointer, WordSize}; pub struct SupervisorFuncIndex(usize); impl From for usize { - fn from(index: SupervisorFuncIndex) -> Self { - index.0 - } + fn from(index: SupervisorFuncIndex) -> Self { + index.0 + } } /// Index of a function within guest index space. @@ -50,125 +50,123 @@ struct GuestFuncIndex(usize); /// This struct holds a mapping from guest index space to supervisor. struct GuestToSupervisorFunctionMapping { - funcs: Vec, + funcs: Vec, } impl GuestToSupervisorFunctionMapping { - fn new() -> GuestToSupervisorFunctionMapping { - GuestToSupervisorFunctionMapping { funcs: Vec::new() } - } - - fn define(&mut self, supervisor_func: SupervisorFuncIndex) -> GuestFuncIndex { - let idx = self.funcs.len(); - self.funcs.push(supervisor_func); - GuestFuncIndex(idx) - } - - fn func_by_guest_index(&self, guest_func_idx: GuestFuncIndex) -> Option { - self.funcs.get(guest_func_idx.0).cloned() - } + fn new() -> GuestToSupervisorFunctionMapping { + GuestToSupervisorFunctionMapping { funcs: Vec::new() } + } + + fn define(&mut self, supervisor_func: SupervisorFuncIndex) -> GuestFuncIndex { + let idx = self.funcs.len(); + self.funcs.push(supervisor_func); + GuestFuncIndex(idx) + } + + fn func_by_guest_index(&self, guest_func_idx: GuestFuncIndex) -> Option { + self.funcs.get(guest_func_idx.0).cloned() + } } struct Imports { - func_map: HashMap<(Vec, Vec), GuestFuncIndex>, - memories_map: HashMap<(Vec, Vec), MemoryRef>, + func_map: HashMap<(Vec, Vec), GuestFuncIndex>, + memories_map: HashMap<(Vec, Vec), MemoryRef>, } impl ImportResolver for Imports { - fn resolve_func( - &self, - module_name: &str, - field_name: &str, - signature: &::wasmi::Signature, - ) -> std::result::Result { - let key = ( - module_name.as_bytes().to_owned(), - field_name.as_bytes().to_owned(), - ); - let idx = *self.func_map.get(&key).ok_or_else(|| { - wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - )) - })?; - Ok(wasmi::FuncInstance::alloc_host(signature.clone(), idx.0)) - } - - fn resolve_memory( - &self, - module_name: &str, - field_name: &str, - _memory_type: &::wasmi::MemoryDescriptor, - ) -> std::result::Result { - let key = ( - module_name.as_bytes().to_vec(), - field_name.as_bytes().to_vec(), - ); - let mem = self.memories_map - .get(&key) - .ok_or_else(|| { - wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - )) - })? - .clone(); - Ok(mem) - } - - fn resolve_global( - &self, - module_name: &str, - field_name: &str, - _global_type: &::wasmi::GlobalDescriptor, - ) -> std::result::Result { - Err(wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - ))) - } - - fn resolve_table( - &self, - module_name: &str, - field_name: &str, - _table_type: &::wasmi::TableDescriptor, - ) -> std::result::Result { - Err(wasmi::Error::Instantiation(format!( - "Export {}:{} not found", - module_name, field_name - ))) - } + fn resolve_func( + &self, + module_name: &str, + field_name: &str, + signature: &::wasmi::Signature, + ) -> std::result::Result { + let key = ( + module_name.as_bytes().to_owned(), + field_name.as_bytes().to_owned(), + ); + let idx = *self.func_map.get(&key).ok_or_else(|| { + wasmi::Error::Instantiation(format!("Export {}:{} not found", module_name, field_name)) + })?; + Ok(wasmi::FuncInstance::alloc_host(signature.clone(), idx.0)) + } + + fn resolve_memory( + &self, + module_name: &str, + field_name: &str, + _memory_type: &::wasmi::MemoryDescriptor, + ) -> std::result::Result { + let key = ( + module_name.as_bytes().to_vec(), + field_name.as_bytes().to_vec(), + ); + let mem = self + .memories_map + .get(&key) + .ok_or_else(|| { + wasmi::Error::Instantiation(format!( + "Export {}:{} not found", + module_name, field_name + )) + })? + .clone(); + Ok(mem) + } + + fn resolve_global( + &self, + module_name: &str, + field_name: &str, + _global_type: &::wasmi::GlobalDescriptor, + ) -> std::result::Result { + Err(wasmi::Error::Instantiation(format!( + "Export {}:{} not found", + module_name, field_name + ))) + } + + fn resolve_table( + &self, + module_name: &str, + field_name: &str, + _table_type: &::wasmi::TableDescriptor, + ) -> std::result::Result { + Err(wasmi::Error::Instantiation(format!( + "Export {}:{} not found", + module_name, field_name + ))) + } } /// This trait encapsulates sandboxing capabilities. /// /// Note that this functions are only called in the `supervisor` context. pub trait SandboxCapabilities: FunctionContext { - /// Represents a function reference into the supervisor environment. - type SupervisorFuncRef; - - /// Invoke a function in the supervisor environment. - /// - /// This first invokes the dispatch_thunk function, passing in the function index of the - /// desired function to call and serialized arguments. The thunk calls the desired function - /// with the deserialized arguments, then serializes the result into memory and returns - /// reference. The pointer to and length of the result in linear memory is encoded into an i64, - /// with the upper 32 bits representing the pointer and the lower 32 bits representing the - /// length. - /// - /// # Errors - /// - /// Returns `Err` if the dispatch_thunk function has an incorrect signature or traps during - /// execution. - fn invoke( - &mut self, - dispatch_thunk: &Self::SupervisorFuncRef, - invoke_args_ptr: Pointer, - invoke_args_len: WordSize, - state: u32, - func_idx: SupervisorFuncIndex, - ) -> Result; + /// Represents a function reference into the supervisor environment. + type SupervisorFuncRef; + + /// Invoke a function in the supervisor environment. + /// + /// This first invokes the dispatch_thunk function, passing in the function index of the + /// desired function to call and serialized arguments. The thunk calls the desired function + /// with the deserialized arguments, then serializes the result into memory and returns + /// reference. The pointer to and length of the result in linear memory is encoded into an i64, + /// with the upper 32 bits representing the pointer and the lower 32 bits representing the + /// length. + /// + /// # Errors + /// + /// Returns `Err` if the dispatch_thunk function has an incorrect signature or traps during + /// execution. + fn invoke( + &mut self, + dispatch_thunk: &Self::SupervisorFuncRef, + invoke_args_ptr: Pointer, + invoke_args_len: WordSize, + state: u32, + func_idx: SupervisorFuncIndex, + ) -> Result; } /// Implementation of [`Externals`] that allows execution of guest module with @@ -176,40 +174,41 @@ pub trait SandboxCapabilities: FunctionContext { /// /// [`Externals`]: ../wasmi/trait.Externals.html pub struct GuestExternals<'a, FE: SandboxCapabilities + 'a> { - supervisor_externals: &'a mut FE, - sandbox_instance: &'a SandboxInstance, - state: u32, + supervisor_externals: &'a mut FE, + sandbox_instance: &'a SandboxInstance, + state: u32, } fn trap(msg: &'static str) -> Trap { - TrapKind::Host(Box::new(Error::Other(msg.into()))).into() + TrapKind::Host(Box::new(Error::Other(msg.into()))).into() } fn deserialize_result(serialized_result: &[u8]) -> std::result::Result, Trap> { - use self::sandbox_primitives::HostError; - use sp_wasm_interface::ReturnValue; - let result_val = std::result::Result::::decode(&mut &serialized_result[..]) - .map_err(|_| trap("Decoding Result failed!"))?; - - match result_val { - Ok(return_value) => Ok(match return_value { - ReturnValue::Unit => None, - ReturnValue::Value(typed_value) => Some(RuntimeValue::from(typed_value)), - }), - Err(HostError) => Err(trap("Supervisor function returned sandbox::HostError")), - } + use self::sandbox_primitives::HostError; + use sp_wasm_interface::ReturnValue; + let result_val = + std::result::Result::::decode(&mut &serialized_result[..]) + .map_err(|_| trap("Decoding Result failed!"))?; + + match result_val { + Ok(return_value) => Ok(match return_value { + ReturnValue::Unit => None, + ReturnValue::Value(typed_value) => Some(RuntimeValue::from(typed_value)), + }), + Err(HostError) => Err(trap("Supervisor function returned sandbox::HostError")), + } } impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { - fn invoke_index( - &mut self, - index: usize, - args: RuntimeArgs, - ) -> std::result::Result, Trap> { - // Make `index` typesafe again. - let index = GuestFuncIndex(index); - - let func_idx = self.sandbox_instance + fn invoke_index( + &mut self, + index: usize, + args: RuntimeArgs, + ) -> std::result::Result, Trap> { + // Make `index` typesafe again. + let index = GuestFuncIndex(index); + + let func_idx = self.sandbox_instance .guest_to_supervisor_mapping .func_by_guest_index(index) .expect( @@ -219,76 +218,76 @@ impl<'a, FE: SandboxCapabilities + 'a> Externals for GuestExternals<'a, FE> { qed" ); - // Serialize arguments into a byte vector. - let invoke_args_data: Vec = args.as_ref() - .iter() - .cloned() - .map(sp_wasm_interface::Value::from) - .collect::>() - .encode(); - - let state = self.state; - - // Move serialized arguments inside the memory and invoke dispatch thunk and - // then free allocated memory. - let invoke_args_len = invoke_args_data.len() as WordSize; - let invoke_args_ptr = self - .supervisor_externals - .allocate_memory(invoke_args_len) - .map_err(|_| trap("Can't allocate memory in supervisor for the arguments"))?; - self - .supervisor_externals - .write_memory(invoke_args_ptr, &invoke_args_data) - .map_err(|_| trap("Can't write invoke args into memory"))?; - let result = self.supervisor_externals.invoke( - &self.sandbox_instance.dispatch_thunk, - invoke_args_ptr, - invoke_args_len, - state, - func_idx, - )?; - self - .supervisor_externals - .deallocate_memory(invoke_args_ptr) - .map_err(|_| trap("Can't deallocate memory for dispatch thunk's invoke arguments"))?; - - // dispatch_thunk returns pointer to serialized arguments. - // Unpack pointer and len of the serialized result data. - let (serialized_result_val_ptr, serialized_result_val_len) = { - // Cast to u64 to use zero-extension. - let v = result as u64; - let ptr = (v as u64 >> 32) as u32; - let len = (v & 0xFFFFFFFF) as u32; - (Pointer::new(ptr), len) - }; - - let serialized_result_val = self.supervisor_externals - .read_memory(serialized_result_val_ptr, serialized_result_val_len) - .map_err(|_| trap("Can't read the serialized result from dispatch thunk"))?; - self.supervisor_externals - .deallocate_memory(serialized_result_val_ptr) - .map_err(|_| trap("Can't deallocate memory for dispatch thunk's result"))?; - - deserialize_result(&serialized_result_val) - } + // Serialize arguments into a byte vector. + let invoke_args_data: Vec = args + .as_ref() + .iter() + .cloned() + .map(sp_wasm_interface::Value::from) + .collect::>() + .encode(); + + let state = self.state; + + // Move serialized arguments inside the memory and invoke dispatch thunk and + // then free allocated memory. + let invoke_args_len = invoke_args_data.len() as WordSize; + let invoke_args_ptr = self + .supervisor_externals + .allocate_memory(invoke_args_len) + .map_err(|_| trap("Can't allocate memory in supervisor for the arguments"))?; + self.supervisor_externals + .write_memory(invoke_args_ptr, &invoke_args_data) + .map_err(|_| trap("Can't write invoke args into memory"))?; + let result = self.supervisor_externals.invoke( + &self.sandbox_instance.dispatch_thunk, + invoke_args_ptr, + invoke_args_len, + state, + func_idx, + )?; + self.supervisor_externals + .deallocate_memory(invoke_args_ptr) + .map_err(|_| trap("Can't deallocate memory for dispatch thunk's invoke arguments"))?; + + // dispatch_thunk returns pointer to serialized arguments. + // Unpack pointer and len of the serialized result data. + let (serialized_result_val_ptr, serialized_result_val_len) = { + // Cast to u64 to use zero-extension. + let v = result as u64; + let ptr = (v as u64 >> 32) as u32; + let len = (v & 0xFFFFFFFF) as u32; + (Pointer::new(ptr), len) + }; + + let serialized_result_val = self + .supervisor_externals + .read_memory(serialized_result_val_ptr, serialized_result_val_len) + .map_err(|_| trap("Can't read the serialized result from dispatch thunk"))?; + self.supervisor_externals + .deallocate_memory(serialized_result_val_ptr) + .map_err(|_| trap("Can't deallocate memory for dispatch thunk's result"))?; + + deserialize_result(&serialized_result_val) + } } fn with_guest_externals( - supervisor_externals: &mut FE, - sandbox_instance: &SandboxInstance, - state: u32, - f: F, + supervisor_externals: &mut FE, + sandbox_instance: &SandboxInstance, + state: u32, + f: F, ) -> R where - FE: SandboxCapabilities, - F: FnOnce(&mut GuestExternals) -> R, + FE: SandboxCapabilities, + F: FnOnce(&mut GuestExternals) -> R, { - let mut guest_externals = GuestExternals { - supervisor_externals, - sandbox_instance, - state, - }; - f(&mut guest_externals) + let mut guest_externals = GuestExternals { + supervisor_externals, + sandbox_instance, + state, + }; + f(&mut guest_externals) } /// Sandboxed instance of a wasm module. @@ -306,127 +305,119 @@ where /// /// [`invoke`]: #method.invoke pub struct SandboxInstance { - instance: ModuleRef, - dispatch_thunk: FR, - guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, + instance: ModuleRef, + dispatch_thunk: FR, + guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, } impl SandboxInstance { - /// Invoke an exported function by a name. - /// - /// `supervisor_externals` is required to execute the implementations - /// of the syscalls that published to a sandboxed module instance. - /// - /// The `state` parameter can be used to provide custom data for - /// these syscall implementations. - pub fn invoke>( - &self, - export_name: &str, - args: &[RuntimeValue], - supervisor_externals: &mut FE, - state: u32, - ) -> std::result::Result, wasmi::Error> { - with_guest_externals( - supervisor_externals, - self, - state, - |guest_externals| { - self.instance - .invoke_export(export_name, args, guest_externals) - }, - ) - } - - /// Get the value from a global with the given `name`. - /// - /// Returns `Some(_)` if the global could be found. - pub fn get_global_val(&self, name: &str) -> Option { - let global = self.instance - .export_by_name(name)? - .as_global()? - .get(); - - Some(global.into()) - } + /// Invoke an exported function by a name. + /// + /// `supervisor_externals` is required to execute the implementations + /// of the syscalls that published to a sandboxed module instance. + /// + /// The `state` parameter can be used to provide custom data for + /// these syscall implementations. + pub fn invoke>( + &self, + export_name: &str, + args: &[RuntimeValue], + supervisor_externals: &mut FE, + state: u32, + ) -> std::result::Result, wasmi::Error> { + with_guest_externals(supervisor_externals, self, state, |guest_externals| { + self.instance + .invoke_export(export_name, args, guest_externals) + }) + } + + /// Get the value from a global with the given `name`. + /// + /// Returns `Some(_)` if the global could be found. + pub fn get_global_val(&self, name: &str) -> Option { + let global = self.instance.export_by_name(name)?.as_global()?.get(); + + Some(global.into()) + } } /// Error occurred during instantiation of a sandboxed module. pub enum InstantiationError { - /// Something wrong with the environment definition. It either can't - /// be decoded, have a reference to a non-existent or torn down memory instance. - EnvironmentDefinitionCorrupted, - /// Provided module isn't recognized as a valid webassembly binary. - ModuleDecoding, - /// Module is a well-formed webassembly binary but could not be instantiated. This could - /// happen because, e.g. the module imports entries not provided by the environment. - Instantiation, - /// Module is well-formed, instantiated and linked, but while executing the start function - /// a trap was generated. - StartTrapped, + /// Something wrong with the environment definition. It either can't + /// be decoded, have a reference to a non-existent or torn down memory instance. + EnvironmentDefinitionCorrupted, + /// Provided module isn't recognized as a valid webassembly binary. + ModuleDecoding, + /// Module is a well-formed webassembly binary but could not be instantiated. This could + /// happen because, e.g. the module imports entries not provided by the environment. + Instantiation, + /// Module is well-formed, instantiated and linked, but while executing the start function + /// a trap was generated. + StartTrapped, } fn decode_environment_definition( - raw_env_def: &[u8], - memories: &[Option], + raw_env_def: &[u8], + memories: &[Option], ) -> std::result::Result<(Imports, GuestToSupervisorFunctionMapping), InstantiationError> { - let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut &raw_env_def[..]) - .map_err(|_| InstantiationError::EnvironmentDefinitionCorrupted)?; - - let mut func_map = HashMap::new(); - let mut memories_map = HashMap::new(); - let mut guest_to_supervisor_mapping = GuestToSupervisorFunctionMapping::new(); - - for entry in &env_def.entries { - let module = entry.module_name.clone(); - let field = entry.field_name.clone(); - - match entry.entity { - sandbox_primitives::ExternEntity::Function(func_idx) => { - let externals_idx = - guest_to_supervisor_mapping.define(SupervisorFuncIndex(func_idx as usize)); - func_map.insert((module, field), externals_idx); - } - sandbox_primitives::ExternEntity::Memory(memory_idx) => { - let memory_ref = memories - .get(memory_idx as usize) - .cloned() - .ok_or_else(|| InstantiationError::EnvironmentDefinitionCorrupted)? - .ok_or_else(|| InstantiationError::EnvironmentDefinitionCorrupted)?; - memories_map.insert((module, field), memory_ref); - } - } - } - - Ok(( - Imports { - func_map, - memories_map, - }, - guest_to_supervisor_mapping, - )) + let env_def = sandbox_primitives::EnvironmentDefinition::decode(&mut &raw_env_def[..]) + .map_err(|_| InstantiationError::EnvironmentDefinitionCorrupted)?; + + let mut func_map = HashMap::new(); + let mut memories_map = HashMap::new(); + let mut guest_to_supervisor_mapping = GuestToSupervisorFunctionMapping::new(); + + for entry in &env_def.entries { + let module = entry.module_name.clone(); + let field = entry.field_name.clone(); + + match entry.entity { + sandbox_primitives::ExternEntity::Function(func_idx) => { + let externals_idx = + guest_to_supervisor_mapping.define(SupervisorFuncIndex(func_idx as usize)); + func_map.insert((module, field), externals_idx); + } + sandbox_primitives::ExternEntity::Memory(memory_idx) => { + let memory_ref = memories + .get(memory_idx as usize) + .cloned() + .ok_or_else(|| InstantiationError::EnvironmentDefinitionCorrupted)? + .ok_or_else(|| InstantiationError::EnvironmentDefinitionCorrupted)?; + memories_map.insert((module, field), memory_ref); + } + } + } + + Ok(( + Imports { + func_map, + memories_map, + }, + guest_to_supervisor_mapping, + )) } /// An environment in which the guest module is instantiated. pub struct GuestEnvironment { - imports: Imports, - guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, + imports: Imports, + guest_to_supervisor_mapping: GuestToSupervisorFunctionMapping, } impl GuestEnvironment { - /// Decodes an environment definition from the given raw bytes. - /// - /// Returns `Err` if the definition cannot be decoded. - pub fn decode( - store: &Store, - raw_env_def: &[u8], - ) -> std::result::Result { - let (imports, guest_to_supervisor_mapping) = - decode_environment_definition(raw_env_def, &store.memories)?; - Ok(Self { - imports, - guest_to_supervisor_mapping, - }) - } + /// Decodes an environment definition from the given raw bytes. + /// + /// Returns `Err` if the definition cannot be decoded. + pub fn decode( + store: &Store, + raw_env_def: &[u8], + ) -> std::result::Result { + let (imports, guest_to_supervisor_mapping) = + decode_environment_definition(raw_env_def, &store.memories)?; + Ok(Self { + imports, + guest_to_supervisor_mapping, + }) + } } /// An unregistered sandboxed instance. @@ -434,16 +425,16 @@ impl GuestEnvironment { /// To finish off the instantiation the user must call `register`. #[must_use] pub struct UnregisteredInstance { - sandbox_instance: Rc>, + sandbox_instance: Rc>, } impl UnregisteredInstance { - /// Finalizes instantiation of this module. - pub fn register(self, store: &mut Store) -> u32 { - // At last, register the instance. - let instance_idx = store.register_sandbox_instance(self.sandbox_instance); - instance_idx - } + /// Finalizes instantiation of this module. + pub fn register(self, store: &mut Store) -> u32 { + // At last, register the instance. + let instance_idx = store.register_sandbox_instance(self.sandbox_instance); + instance_idx + } } /// Instantiate a guest module and return it's index in the store. @@ -461,145 +452,141 @@ impl UnregisteredInstance { /// /// [`EnvironmentDefinition`]: ../sandbox/struct.EnvironmentDefinition.html pub fn instantiate<'a, FE: SandboxCapabilities>( - supervisor_externals: &mut FE, - dispatch_thunk: FE::SupervisorFuncRef, - wasm: &[u8], - host_env: GuestEnvironment, - state: u32, + supervisor_externals: &mut FE, + dispatch_thunk: FE::SupervisorFuncRef, + wasm: &[u8], + host_env: GuestEnvironment, + state: u32, ) -> std::result::Result, InstantiationError> { - let module = Module::from_buffer(wasm).map_err(|_| InstantiationError::ModuleDecoding)?; - let instance = ModuleInstance::new(&module, &host_env.imports) - .map_err(|_| InstantiationError::Instantiation)?; - - let sandbox_instance = Rc::new(SandboxInstance { - // In general, it's not a very good idea to use `.not_started_instance()` for anything - // but for extracting memory and tables. But in this particular case, we are extracting - // for the purpose of running `start` function which should be ok. - instance: instance.not_started_instance().clone(), - dispatch_thunk, - guest_to_supervisor_mapping: host_env.guest_to_supervisor_mapping, - }); - - with_guest_externals( - supervisor_externals, - &sandbox_instance, - state, - |guest_externals| { - instance - .run_start(guest_externals) - .map_err(|_| InstantiationError::StartTrapped) - }, - )?; - - Ok(UnregisteredInstance { sandbox_instance }) + let module = Module::from_buffer(wasm).map_err(|_| InstantiationError::ModuleDecoding)?; + let instance = ModuleInstance::new(&module, &host_env.imports) + .map_err(|_| InstantiationError::Instantiation)?; + + let sandbox_instance = Rc::new(SandboxInstance { + // In general, it's not a very good idea to use `.not_started_instance()` for anything + // but for extracting memory and tables. But in this particular case, we are extracting + // for the purpose of running `start` function which should be ok. + instance: instance.not_started_instance().clone(), + dispatch_thunk, + guest_to_supervisor_mapping: host_env.guest_to_supervisor_mapping, + }); + + with_guest_externals( + supervisor_externals, + &sandbox_instance, + state, + |guest_externals| { + instance + .run_start(guest_externals) + .map_err(|_| InstantiationError::StartTrapped) + }, + )?; + + Ok(UnregisteredInstance { sandbox_instance }) } /// This struct keeps track of all sandboxed components. /// /// This is generic over a supervisor function reference type. pub struct Store { - // Memories and instances are `Some` until torn down. - instances: Vec>>>, - memories: Vec>, + // Memories and instances are `Some` until torn down. + instances: Vec>>>, + memories: Vec>, } impl Store { - /// Create a new empty sandbox store. - pub fn new() -> Self { - Store { - instances: Vec::new(), - memories: Vec::new(), - } - } - - /// Create a new memory instance and return it's index. - /// - /// # Errors - /// - /// Returns `Err` if the memory couldn't be created. - /// Typically happens if `initial` is more than `maximum`. - pub fn new_memory(&mut self, initial: u32, maximum: u32) -> Result { - let maximum = match maximum { - sandbox_primitives::MEM_UNLIMITED => None, - specified_limit => Some(Pages(specified_limit as usize)), - }; - - let mem = - MemoryInstance::alloc( - Pages(initial as usize), - maximum, - )?; - - let mem_idx = self.memories.len(); - self.memories.push(Some(mem)); - Ok(mem_idx as u32) - } - - /// Returns `SandboxInstance` by `instance_idx`. - /// - /// # Errors - /// - /// Returns `Err` If `instance_idx` isn't a valid index of an instance or - /// instance is already torndown. - pub fn instance(&self, instance_idx: u32) -> Result>> { - self.instances - .get(instance_idx as usize) - .cloned() - .ok_or_else(|| "Trying to access a non-existent instance")? - .ok_or_else(|| "Trying to access a torndown instance".into()) - } - - /// Returns reference to a memory instance by `memory_idx`. - /// - /// # Errors - /// - /// Returns `Err` If `memory_idx` isn't a valid index of an memory or - /// if memory has been torn down. - pub fn memory(&self, memory_idx: u32) -> Result { - self.memories - .get(memory_idx as usize) - .cloned() - .ok_or_else(|| "Trying to access a non-existent sandboxed memory")? - .ok_or_else(|| "Trying to access a torndown sandboxed memory".into()) - } - - /// Tear down the memory at the specified index. - /// - /// # Errors - /// - /// Returns `Err` if `memory_idx` isn't a valid index of an memory or - /// if it has been torn down. - pub fn memory_teardown(&mut self, memory_idx: u32) -> Result<()> { - match self.memories.get_mut(memory_idx as usize) { - None => Err("Trying to teardown a non-existent sandboxed memory".into()), - Some(None) => Err("Double teardown of a sandboxed memory".into()), - Some(memory) => { - *memory = None; - Ok(()) - } - } - } - - /// Tear down the instance at the specified index. - /// - /// # Errors - /// - /// Returns `Err` if `instance_idx` isn't a valid index of an instance or - /// if it has been torn down. - pub fn instance_teardown(&mut self, instance_idx: u32) -> Result<()> { - match self.instances.get_mut(instance_idx as usize) { - None => Err("Trying to teardown a non-existent instance".into()), - Some(None) => Err("Double teardown of an instance".into()), - Some(instance) => { - *instance = None; - Ok(()) - } - } - } - - fn register_sandbox_instance(&mut self, sandbox_instance: Rc>) -> u32 { - let instance_idx = self.instances.len(); - self.instances.push(Some(sandbox_instance)); - instance_idx as u32 - } + /// Create a new empty sandbox store. + pub fn new() -> Self { + Store { + instances: Vec::new(), + memories: Vec::new(), + } + } + + /// Create a new memory instance and return it's index. + /// + /// # Errors + /// + /// Returns `Err` if the memory couldn't be created. + /// Typically happens if `initial` is more than `maximum`. + pub fn new_memory(&mut self, initial: u32, maximum: u32) -> Result { + let maximum = match maximum { + sandbox_primitives::MEM_UNLIMITED => None, + specified_limit => Some(Pages(specified_limit as usize)), + }; + + let mem = MemoryInstance::alloc(Pages(initial as usize), maximum)?; + + let mem_idx = self.memories.len(); + self.memories.push(Some(mem)); + Ok(mem_idx as u32) + } + + /// Returns `SandboxInstance` by `instance_idx`. + /// + /// # Errors + /// + /// Returns `Err` If `instance_idx` isn't a valid index of an instance or + /// instance is already torndown. + pub fn instance(&self, instance_idx: u32) -> Result>> { + self.instances + .get(instance_idx as usize) + .cloned() + .ok_or_else(|| "Trying to access a non-existent instance")? + .ok_or_else(|| "Trying to access a torndown instance".into()) + } + + /// Returns reference to a memory instance by `memory_idx`. + /// + /// # Errors + /// + /// Returns `Err` If `memory_idx` isn't a valid index of an memory or + /// if memory has been torn down. + pub fn memory(&self, memory_idx: u32) -> Result { + self.memories + .get(memory_idx as usize) + .cloned() + .ok_or_else(|| "Trying to access a non-existent sandboxed memory")? + .ok_or_else(|| "Trying to access a torndown sandboxed memory".into()) + } + + /// Tear down the memory at the specified index. + /// + /// # Errors + /// + /// Returns `Err` if `memory_idx` isn't a valid index of an memory or + /// if it has been torn down. + pub fn memory_teardown(&mut self, memory_idx: u32) -> Result<()> { + match self.memories.get_mut(memory_idx as usize) { + None => Err("Trying to teardown a non-existent sandboxed memory".into()), + Some(None) => Err("Double teardown of a sandboxed memory".into()), + Some(memory) => { + *memory = None; + Ok(()) + } + } + } + + /// Tear down the instance at the specified index. + /// + /// # Errors + /// + /// Returns `Err` if `instance_idx` isn't a valid index of an instance or + /// if it has been torn down. + pub fn instance_teardown(&mut self, instance_idx: u32) -> Result<()> { + match self.instances.get_mut(instance_idx as usize) { + None => Err("Trying to teardown a non-existent instance".into()), + Some(None) => Err("Double teardown of an instance".into()), + Some(instance) => { + *instance = None; + Ok(()) + } + } + } + + fn register_sandbox_instance(&mut self, sandbox_instance: Rc>) -> u32 { + let instance_idx = self.instances.len(); + self.instances.push(Some(sandbox_instance)); + instance_idx as u32 + } } diff --git a/client/executor/common/src/util.rs b/client/executor/common/src/util.rs index 149db13bc0..0f74b41bf3 100644 --- a/client/executor/common/src/util.rs +++ b/client/executor/common/src/util.rs @@ -17,49 +17,49 @@ //! A set of utilities for resetting a wasm instance to its initial state. use crate::error::{self, Error}; -use std::mem; use parity_wasm::elements::{deserialize_buffer, DataSegment, Instruction, Module as RawModule}; +use std::mem; /// A bunch of information collected from a WebAssembly module. pub struct WasmModuleInfo { - raw_module: RawModule, + raw_module: RawModule, } impl WasmModuleInfo { - /// Create `WasmModuleInfo` from the given wasm code. - /// - /// Returns `None` if the wasm code cannot be deserialized. - pub fn new(wasm_code: &[u8]) -> Option { - let raw_module: RawModule = deserialize_buffer(wasm_code).ok()?; - Some(Self { raw_module }) - } - - /// Extract the data segments from the given wasm code. - /// - /// Returns `Err` if the given wasm code cannot be deserialized. - fn data_segments(&self) -> Vec { - self.raw_module - .data_section() - .map(|ds| ds.entries()) - .unwrap_or(&[]) - .to_vec() - } - - /// The number of globals defined in locally in this module. - pub fn declared_globals_count(&self) -> u32 { - self.raw_module - .global_section() - .map(|gs| gs.entries().len() as u32) - .unwrap_or(0) - } - - /// The number of imports of globals. - pub fn imported_globals_count(&self) -> u32 { - self.raw_module - .import_section() - .map(|is| is.globals() as u32) - .unwrap_or(0) - } + /// Create `WasmModuleInfo` from the given wasm code. + /// + /// Returns `None` if the wasm code cannot be deserialized. + pub fn new(wasm_code: &[u8]) -> Option { + let raw_module: RawModule = deserialize_buffer(wasm_code).ok()?; + Some(Self { raw_module }) + } + + /// Extract the data segments from the given wasm code. + /// + /// Returns `Err` if the given wasm code cannot be deserialized. + fn data_segments(&self) -> Vec { + self.raw_module + .data_section() + .map(|ds| ds.entries()) + .unwrap_or(&[]) + .to_vec() + } + + /// The number of globals defined in locally in this module. + pub fn declared_globals_count(&self) -> u32 { + self.raw_module + .global_section() + .map(|gs| gs.entries().len() as u32) + .unwrap_or(0) + } + + /// The number of imports of globals. + pub fn imported_globals_count(&self) -> u32 { + self.raw_module + .import_section() + .map(|is| is.globals() as u32) + .unwrap_or(0) + } } /// This is a snapshot of data segments specialzied for a particular instantiation. @@ -67,72 +67,72 @@ impl WasmModuleInfo { /// Note that this assumes that no mutable globals are used. #[derive(Clone)] pub struct DataSegmentsSnapshot { - /// The list of data segments represented by (offset, contents). - data_segments: Vec<(u32, Vec)>, + /// The list of data segments represented by (offset, contents). + data_segments: Vec<(u32, Vec)>, } impl DataSegmentsSnapshot { - /// Create a snapshot from the data segments from the module. - pub fn take(module: &WasmModuleInfo) -> error::Result { - let data_segments = module - .data_segments() - .into_iter() - .map(|mut segment| { - // Just replace contents of the segment since the segments will be discarded later - // anyway. - let contents = mem::replace(segment.value_mut(), vec![]); - - let init_expr = match segment.offset() { - Some(offset) => offset.code(), - // Return if the segment is passive - None => return Err(Error::from("Shared memory is not supported".to_string())), - }; - - // [op, End] - if init_expr.len() != 2 { - return Err(Error::from( - "initializer expression can have only up to 2 expressions in wasm 1.0" - .to_string(), - )); - } - let offset = match &init_expr[0] { - Instruction::I32Const(v) => *v as u32, - Instruction::GetGlobal(_) => { - // In a valid wasm file, initializer expressions can only refer imported - // globals. - // - // At the moment of writing the Substrate Runtime Interface does not provide - // any globals. There is nothing that prevents us from supporting this - // if/when we gain those. - return Err(Error::from( - "Imported globals are not supported yet".to_string(), - )); - } - insn => { - return Err(Error::from(format!( - "{:?} is not supported as initializer expression in wasm 1.0", - insn - ))) - } - }; - - Ok((offset, contents)) - }) - .collect::>>()?; - - Ok(Self { data_segments }) - } - - /// Apply the given snapshot to a linear memory. - /// - /// Linear memory interface is represented by a closure `memory_set`. - pub fn apply( - &self, - mut memory_set: impl FnMut(u32, &[u8]) -> Result<(), E>, - ) -> Result<(), E> { - for (offset, contents) in &self.data_segments { - memory_set(*offset, contents)?; - } - Ok(()) - } + /// Create a snapshot from the data segments from the module. + pub fn take(module: &WasmModuleInfo) -> error::Result { + let data_segments = module + .data_segments() + .into_iter() + .map(|mut segment| { + // Just replace contents of the segment since the segments will be discarded later + // anyway. + let contents = mem::replace(segment.value_mut(), vec![]); + + let init_expr = match segment.offset() { + Some(offset) => offset.code(), + // Return if the segment is passive + None => return Err(Error::from("Shared memory is not supported".to_string())), + }; + + // [op, End] + if init_expr.len() != 2 { + return Err(Error::from( + "initializer expression can have only up to 2 expressions in wasm 1.0" + .to_string(), + )); + } + let offset = match &init_expr[0] { + Instruction::I32Const(v) => *v as u32, + Instruction::GetGlobal(_) => { + // In a valid wasm file, initializer expressions can only refer imported + // globals. + // + // At the moment of writing the Substrate Runtime Interface does not provide + // any globals. There is nothing that prevents us from supporting this + // if/when we gain those. + return Err(Error::from( + "Imported globals are not supported yet".to_string(), + )); + } + insn => { + return Err(Error::from(format!( + "{:?} is not supported as initializer expression in wasm 1.0", + insn + ))) + } + }; + + Ok((offset, contents)) + }) + .collect::>>()?; + + Ok(Self { data_segments }) + } + + /// Apply the given snapshot to a linear memory. + /// + /// Linear memory interface is represented by a closure `memory_set`. + pub fn apply( + &self, + mut memory_set: impl FnMut(u32, &[u8]) -> Result<(), E>, + ) -> Result<(), E> { + for (offset, contents) in &self.data_segments { + memory_set(*offset, contents)?; + } + Ok(()) + } } diff --git a/client/executor/common/src/wasm_runtime.rs b/client/executor/common/src/wasm_runtime.rs index b59ca8ba93..e54162119f 100644 --- a/client/executor/common/src/wasm_runtime.rs +++ b/client/executor/common/src/wasm_runtime.rs @@ -23,19 +23,19 @@ use sp_wasm_interface::Value; /// /// This can be implemented by an execution engine. pub trait WasmModule: Sync + Send { - /// Create a new instance. - fn new_instance(&self) -> Result, Error>; + /// Create a new instance. + fn new_instance(&self) -> Result, Error>; } /// A trait that defines an abstract wasm module instance. /// /// This can be implemented by an execution engine. pub trait WasmInstance: Send { - /// Call a method on this WASM instance and reset it afterwards. - /// Returns the encoded result on success. - fn call(&self, method: &str, data: &[u8]) -> Result, Error>; + /// Call a method on this WASM instance and reset it afterwards. + /// Returns the encoded result on success. + fn call(&self, method: &str, data: &[u8]) -> Result, Error>; - /// Get the value from a global with the given `name`. - /// This method is only suitable for getting immutable globals. - fn get_global_const(&self, name: &str) -> Result, Error>; + /// Get the value from a global with the given `name`. + /// This method is only suitable for getting immutable globals. + fn get_global_const(&self, name: &str) -> Result, Error>; } diff --git a/client/executor/runtime-test/build.rs b/client/executor/runtime-test/build.rs index 647b476814..c54e86d8db 100644 --- a/client/executor/runtime-test/build.rs +++ b/client/executor/runtime-test/build.rs @@ -17,10 +17,10 @@ use wasm_builder_runner::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") - .export_heap_base() - .import_memory() - .build() + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") + .export_heap_base() + .import_memory() + .build() } diff --git a/client/executor/runtime-test/src/lib.rs b/client/executor/runtime-test/src/lib.rs index 38a16ae39e..a3cda033ff 100644 --- a/client/executor/runtime-test/src/lib.rs +++ b/client/executor/runtime-test/src/lib.rs @@ -6,26 +6,30 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); #[cfg(not(feature = "std"))] -use sp_std::{vec::Vec, vec}; +use sp_std::{vec, vec::Vec}; +#[cfg(not(feature = "std"))] +use sp_core::{ed25519, sr25519}; #[cfg(not(feature = "std"))] use sp_io::{ - storage, hashing::{blake2_128, blake2_256, sha2_256, twox_128, twox_256}, - crypto::{ed25519_verify, sr25519_verify}, + crypto::{ed25519_verify, sr25519_verify}, + hashing::{blake2_128, blake2_256, sha2_256, twox_128, twox_256}, + storage, }; #[cfg(not(feature = "std"))] -use sp_runtime::{print, traits::{BlakeTwo256, Hash}}; -#[cfg(not(feature = "std"))] -use sp_core::{ed25519, sr25519}; +use sp_runtime::{ + print, + traits::{BlakeTwo256, Hash}, +}; #[cfg(not(feature = "std"))] use sp_sandbox::Value; extern "C" { - #[allow(dead_code)] - fn missing_external(); + #[allow(dead_code)] + fn missing_external(); - #[allow(dead_code)] - fn yet_another_missing_external(); + #[allow(dead_code)] + fn yet_another_missing_external(); } #[cfg(not(feature = "std"))] @@ -34,318 +38,319 @@ extern "C" { static mut MUTABLE_STATIC: u64 = 32; sp_core::wasm_export_functions! { - fn test_calling_missing_external() { - unsafe { missing_external() } - } - - fn test_calling_yet_another_missing_external() { - unsafe { yet_another_missing_external() } - } - - fn test_data_in(input: Vec) -> Vec { - print("set_storage"); - storage::set(b"input", &input); - - print("storage"); - let foo = storage::get(b"foo").unwrap(); - - print("set_storage"); - storage::set(b"baz", &foo); - - print("finished!"); - b"all ok!".to_vec() - } - - fn test_clear_prefix(input: Vec) -> Vec { - storage::clear_prefix(&input); - b"all ok!".to_vec() - } - - fn test_empty_return() {} - - fn test_exhaust_heap() -> Vec { Vec::with_capacity(16777216) } - - fn test_panic() { panic!("test panic") } - - fn test_conditional_panic(input: Vec) -> Vec { - if input.len() > 0 { - panic!("test panic") - } - - input - } - - fn test_blake2_256(input: Vec) -> Vec { - blake2_256(&input).to_vec() - } - - fn test_blake2_128(input: Vec) -> Vec { - blake2_128(&input).to_vec() - } - - fn test_sha2_256(input: Vec) -> Vec { - sha2_256(&input).to_vec() - } - - fn test_twox_256(input: Vec) -> Vec { - twox_256(&input).to_vec() - } - - fn test_twox_128(input: Vec) -> Vec { - twox_128(&input).to_vec() - } - - fn test_ed25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) - } - - fn test_sr25519_verify(input: Vec) -> bool { - let mut pubkey = [0; 32]; - let mut sig = [0; 64]; - - pubkey.copy_from_slice(&input[0..32]); - sig.copy_from_slice(&input[32..96]); - - let msg = b"all ok!"; - sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) - } - - fn test_ordered_trie_root() -> Vec { - BlakeTwo256::ordered_trie_root( - vec![ - b"zero"[..].into(), - b"one"[..].into(), - b"two"[..].into(), - ], - ).as_ref().to_vec() - } - - fn test_sandbox(code: Vec) -> bool { - execute_sandboxed(&code, &[]).is_ok() - } - - fn test_sandbox_args(code: Vec) -> bool { - execute_sandboxed( - &code, - &[ - Value::I32(0x12345678), - Value::I64(0x1234567887654321), - ], - ).is_ok() - } - - fn test_sandbox_return_val(code: Vec) -> bool { - let ok = match execute_sandboxed( - &code, - &[ - Value::I32(0x1336), - ] - ) { - Ok(sp_sandbox::ReturnValue::Value(Value::I32(0x1337))) => true, - _ => false, - }; - - ok - } - - fn test_sandbox_instantiate(code: Vec) -> u8 { - let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - let code = match sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { - Ok(_) => 0, - Err(sp_sandbox::Error::Module) => 1, - Err(sp_sandbox::Error::Execution) => 2, - Err(sp_sandbox::Error::OutOfBounds) => 3, - }; - - code - } - - - fn test_sandbox_get_global_val(code: Vec) -> i64 { - let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - let instance = if let Ok(i) = sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { - i - } else { - return 20; - }; - - match instance.get_global_val("test_global") { - Some(sp_sandbox::Value::I64(val)) => val, - None => 30, - val => 40, - } - } - - fn test_offchain_local_storage() -> bool { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - sp_io::offchain::local_storage_set(kind, b"test", b"asd"); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); - - let res = sp_io::offchain::local_storage_compare_and_set( - kind, - b"test", - Some(b"asd".to_vec()), - b"", - ); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); - res - } - - fn test_offchain_local_storage_with_none() { - let kind = sp_core::offchain::StorageKind::PERSISTENT; - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); - - let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); - assert_eq!(res, true); - assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); - } - - fn test_offchain_http() -> bool { - use sp_core::offchain::HttpRequestStatus; - let run = || -> Option<()> { - let id = sp_io::offchain::http_request_start( - "POST", - "http://localhost:12345", - &[], - ).ok()?; - sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; - sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; - sp_io::offchain::http_request_write_body(id, &[], None).ok()?; - let status = sp_io::offchain::http_response_wait(&[id], None); - assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); - let headers = sp_io::offchain::http_response_headers(id); - assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); - let mut buffer = vec![0; 64]; - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 3); - assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); - let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; - assert_eq!(read, 0); - - Some(()) - }; - - run().is_some() - } - - // Just some test to make sure that `sp-allocator` compiles on `no_std`. - fn test_sp_allocator_compiles() { - sp_allocator::FreeingBumpHeapAllocator::new(0); - } - - fn returns_mutable_static() -> u64 { - unsafe { - MUTABLE_STATIC += 1; - MUTABLE_STATIC - } - } - - fn allocates_huge_stack_array(trap: bool) -> Vec { - // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). - // This will just decrease (stacks in wasm32-u-u grow downwards) the stack - // pointer. This won't trap on the current compilers. - let mut data = [0u8; 1024 * 768]; - - // Then make sure we actually write something to it. - // - // If: - // 1. the stack area is placed at the beginning of the linear memory space, and - // 2. the stack pointer points to out-of-bounds area, and - // 3. a write is performed around the current stack pointer. - // - // then a trap should happen. - // - for (i, v) in data.iter_mut().enumerate() { - *v = i as u8; // deliberate truncation - } - - if trap { - // There is a small chance of this to be pulled up in theory. In practice - // the probability of that is rather low. - panic!() - } - - data.to_vec() - } - - // Check that the heap at `heap_base + offset` don't contains the test message. - // After the check succeeds the test message is written into the heap. - // - // It is expected that the given pointer is not allocated. - fn check_and_set_in_heap(heap_base: u32, offset: u32) { - let test_message = b"Hello invalid heap memory"; - let ptr = unsafe { (heap_base + offset) as *mut u8 }; - - let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; - - assert_ne!(test_message, message_slice); - message_slice.copy_from_slice(test_message); - } - } + fn test_calling_missing_external() { + unsafe { missing_external() } + } + + fn test_calling_yet_another_missing_external() { + unsafe { yet_another_missing_external() } + } + + fn test_data_in(input: Vec) -> Vec { + print("set_storage"); + storage::set(b"input", &input); + + print("storage"); + let foo = storage::get(b"foo").unwrap(); + + print("set_storage"); + storage::set(b"baz", &foo); + + print("finished!"); + b"all ok!".to_vec() + } + + fn test_clear_prefix(input: Vec) -> Vec { + storage::clear_prefix(&input); + b"all ok!".to_vec() + } + + fn test_empty_return() {} + + fn test_exhaust_heap() -> Vec { Vec::with_capacity(16777216) } + + fn test_panic() { panic!("test panic") } + + fn test_conditional_panic(input: Vec) -> Vec { + if input.len() > 0 { + panic!("test panic") + } + + input + } + + fn test_blake2_256(input: Vec) -> Vec { + blake2_256(&input).to_vec() + } + + fn test_blake2_128(input: Vec) -> Vec { + blake2_128(&input).to_vec() + } + + fn test_sha2_256(input: Vec) -> Vec { + sha2_256(&input).to_vec() + } + + fn test_twox_256(input: Vec) -> Vec { + twox_256(&input).to_vec() + } + + fn test_twox_128(input: Vec) -> Vec { + twox_128(&input).to_vec() + } + + fn test_ed25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + ed25519_verify(&ed25519::Signature(sig), &msg[..], &ed25519::Public(pubkey)) + } + + fn test_sr25519_verify(input: Vec) -> bool { + let mut pubkey = [0; 32]; + let mut sig = [0; 64]; + + pubkey.copy_from_slice(&input[0..32]); + sig.copy_from_slice(&input[32..96]); + + let msg = b"all ok!"; + sr25519_verify(&sr25519::Signature(sig), &msg[..], &sr25519::Public(pubkey)) + } + + fn test_ordered_trie_root() -> Vec { + BlakeTwo256::ordered_trie_root( + vec![ + b"zero"[..].into(), + b"one"[..].into(), + b"two"[..].into(), + ], + ).as_ref().to_vec() + } + + fn test_sandbox(code: Vec) -> bool { + execute_sandboxed(&code, &[]).is_ok() + } + + fn test_sandbox_args(code: Vec) -> bool { + execute_sandboxed( + &code, + &[ + Value::I32(0x12345678), + Value::I64(0x1234567887654321), + ], + ).is_ok() + } + + fn test_sandbox_return_val(code: Vec) -> bool { + let ok = match execute_sandboxed( + &code, + &[ + Value::I32(0x1336), + ] + ) { + Ok(sp_sandbox::ReturnValue::Value(Value::I32(0x1337))) => true, + _ => false, + }; + + ok + } + + fn test_sandbox_instantiate(code: Vec) -> u8 { + let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); + let code = match sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { + Ok(_) => 0, + Err(sp_sandbox::Error::Module) => 1, + Err(sp_sandbox::Error::Execution) => 2, + Err(sp_sandbox::Error::OutOfBounds) => 3, + }; + + code + } + + + fn test_sandbox_get_global_val(code: Vec) -> i64 { + let env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); + let instance = if let Ok(i) = sp_sandbox::Instance::new(&code, &env_builder, &mut ()) { + i + } else { + return 20; + }; + + match instance.get_global_val("test_global") { + Some(sp_sandbox::Value::I64(val)) => val, + None => 30, + val => 40, + } + } + + fn test_offchain_local_storage() -> bool { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + sp_io::offchain::local_storage_set(kind, b"test", b"asd"); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"asd".to_vec())); + + let res = sp_io::offchain::local_storage_compare_and_set( + kind, + b"test", + Some(b"asd".to_vec()), + b"", + ); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"".to_vec())); + res + } + + fn test_offchain_local_storage_with_none() { + let kind = sp_core::offchain::StorageKind::PERSISTENT; + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), None); + + let res = sp_io::offchain::local_storage_compare_and_set(kind, b"test", None, b"value"); + assert_eq!(res, true); + assert_eq!(sp_io::offchain::local_storage_get(kind, b"test"), Some(b"value".to_vec())); + } + + fn test_offchain_http() -> bool { + use sp_core::offchain::HttpRequestStatus; + let run = || -> Option<()> { + let id = sp_io::offchain::http_request_start( + "POST", + "http://localhost:12345", + &[], + ).ok()?; + sp_io::offchain::http_request_add_header(id, "X-Auth", "test").ok()?; + sp_io::offchain::http_request_write_body(id, &[1, 2, 3, 4], None).ok()?; + sp_io::offchain::http_request_write_body(id, &[], None).ok()?; + let status = sp_io::offchain::http_response_wait(&[id], None); + assert!(status == vec![HttpRequestStatus::Finished(200)], "Expected Finished(200) status."); + let headers = sp_io::offchain::http_response_headers(id); + assert_eq!(headers, vec![(b"X-Auth".to_vec(), b"hello".to_vec())]); + let mut buffer = vec![0; 64]; + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 3); + assert_eq!(&buffer[0..read as usize], &[1, 2, 3]); + let read = sp_io::offchain::http_response_read_body(id, &mut buffer, None).ok()?; + assert_eq!(read, 0); + + Some(()) + }; + + run().is_some() + } + + // Just some test to make sure that `sp-allocator` compiles on `no_std`. + fn test_sp_allocator_compiles() { + sp_allocator::FreeingBumpHeapAllocator::new(0); + } + + fn returns_mutable_static() -> u64 { + unsafe { + MUTABLE_STATIC += 1; + MUTABLE_STATIC + } + } + + fn allocates_huge_stack_array(trap: bool) -> Vec { + // Allocate a stack frame that is approx. 75% of the stack (assuming it is 1MB). + // This will just decrease (stacks in wasm32-u-u grow downwards) the stack + // pointer. This won't trap on the current compilers. + let mut data = [0u8; 1024 * 768]; + + // Then make sure we actually write something to it. + // + // If: + // 1. the stack area is placed at the beginning of the linear memory space, and + // 2. the stack pointer points to out-of-bounds area, and + // 3. a write is performed around the current stack pointer. + // + // then a trap should happen. + // + for (i, v) in data.iter_mut().enumerate() { + *v = i as u8; // deliberate truncation + } + + if trap { + // There is a small chance of this to be pulled up in theory. In practice + // the probability of that is rather low. + panic!() + } + + data.to_vec() + } + + // Check that the heap at `heap_base + offset` don't contains the test message. + // After the check succeeds the test message is written into the heap. + // + // It is expected that the given pointer is not allocated. + fn check_and_set_in_heap(heap_base: u32, offset: u32) { + let test_message = b"Hello invalid heap memory"; + let ptr = unsafe { (heap_base + offset) as *mut u8 }; + + let message_slice = unsafe { sp_std::slice::from_raw_parts_mut(ptr, test_message.len()) }; + + assert_ne!(test_message, message_slice); + message_slice.copy_from_slice(test_message); + } +} #[cfg(not(feature = "std"))] fn execute_sandboxed( - code: &[u8], - args: &[Value], + code: &[u8], + args: &[Value], ) -> Result { - struct State { - counter: u32, - } - - fn env_assert( - _e: &mut State, - args: &[Value], - ) -> Result { - if args.len() != 1 { - return Err(sp_sandbox::HostError); - } - let condition = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; - if condition != 0 { - Ok(sp_sandbox::ReturnValue::Unit) - } else { - Err(sp_sandbox::HostError) - } - } - fn env_inc_counter( - e: &mut State, - args: &[Value], - ) -> Result { - if args.len() != 1 { - return Err(sp_sandbox::HostError); - } - let inc_by = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; - e.counter += inc_by as u32; - Ok(sp_sandbox::ReturnValue::Value(Value::I32(e.counter as i32))) - } - - let mut state = State { counter: 0 }; - - let env_builder = { - let mut env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); - env_builder.add_host_func("env", "assert", env_assert); - env_builder.add_host_func("env", "inc_counter", env_inc_counter); - let memory = match sp_sandbox::Memory::new(1, Some(16)) { - Ok(m) => m, - Err(_) => unreachable!(" + struct State { + counter: u32, + } + + fn env_assert( + _e: &mut State, + args: &[Value], + ) -> Result { + if args.len() != 1 { + return Err(sp_sandbox::HostError); + } + let condition = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; + if condition != 0 { + Ok(sp_sandbox::ReturnValue::Unit) + } else { + Err(sp_sandbox::HostError) + } + } + fn env_inc_counter( + e: &mut State, + args: &[Value], + ) -> Result { + if args.len() != 1 { + return Err(sp_sandbox::HostError); + } + let inc_by = args[0].as_i32().ok_or_else(|| sp_sandbox::HostError)?; + e.counter += inc_by as u32; + Ok(sp_sandbox::ReturnValue::Value(Value::I32(e.counter as i32))) + } + + let mut state = State { counter: 0 }; + + let env_builder = { + let mut env_builder = sp_sandbox::EnvironmentDefinitionBuilder::new(); + env_builder.add_host_func("env", "assert", env_assert); + env_builder.add_host_func("env", "inc_counter", env_inc_counter); + let memory = match sp_sandbox::Memory::new(1, Some(16)) { + Ok(m) => m, + Err(_) => unreachable!( + " Memory::new() can return Err only if parameters are borked; \ We passing params here explicitly and they're correct; \ Memory::new() can't return a Error qed" - ), - }; - env_builder.add_memory("env", "memory", memory.clone()); - env_builder - }; + ), + }; + env_builder.add_memory("env", "memory", memory.clone()); + env_builder + }; - let mut instance = sp_sandbox::Instance::new(code, &env_builder, &mut state)?; - let result = instance.invoke("call", args, &mut state); + let mut instance = sp_sandbox::Instance::new(code, &env_builder, &mut state)?; + let result = instance.invoke("call", args, &mut state); - result.map_err(|_| sp_sandbox::HostError) + result.map_err(|_| sp_sandbox::HostError) } diff --git a/client/executor/src/integration_tests/mod.rs b/client/executor/src/integration_tests/mod.rs index 72055b7788..ad419d372b 100644 --- a/client/executor/src/integration_tests/mod.rs +++ b/client/executor/src/integration_tests/mod.rs @@ -16,19 +16,21 @@ mod sandbox; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use hex_literal::hex; +use sc_runtime_test::WASM_BINARY; use sp_core::{ - blake2_128, blake2_256, ed25519, sr25519, map, Pair, - offchain::{OffchainExt, testing}, - traits::{Externalities, CallInWasm}, + blake2_128, blake2_256, ed25519, map, + offchain::{testing, OffchainExt}, + sr25519, + traits::{CallInWasm, Externalities}, + Pair, }; -use sc_runtime_test::WASM_BINARY; +use sp_runtime::traits::BlakeTwo256; use sp_state_machine::TestExternalities as CoreTestExternalities; -use test_case::test_case; -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{trie_types::Layout, TrieConfiguration}; use sp_wasm_interface::HostFunctions as _; -use sp_runtime::traits::BlakeTwo256; +use test_case::test_case; use crate::WasmExecutionMethod; @@ -36,49 +38,38 @@ pub type TestExternalities = CoreTestExternalities; type HostFunctions = sp_io::SubstrateHostFunctions; fn call_in_wasm( - function: &str, - call_data: &[u8], - execution_method: WasmExecutionMethod, - ext: &mut E, + function: &str, + call_data: &[u8], + execution_method: WasmExecutionMethod, + ext: &mut E, ) -> Result, String> { - let executor = crate::WasmExecutor::new( - execution_method, - Some(1024), - HostFunctions::host_functions(), - true, - 8, - ); - executor.call_in_wasm( - &WASM_BINARY[..], - None, - function, - call_data, - ext, - ) + let executor = crate::WasmExecutor::new( + execution_method, + Some(1024), + HostFunctions::host_functions(), + true, + 8, + ); + executor.call_in_wasm(&WASM_BINARY[..], None, function, call_data, ext) } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn returning_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let output = call_in_wasm( - "test_empty_return", - &[], - wasm_method, - &mut ext, - ).unwrap(); - assert_eq!(output, vec![0u8; 0]); + let output = call_in_wasm("test_empty_return", &[], wasm_method, &mut ext).unwrap(); + assert_eq!(output, vec![0u8; 0]); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn call_not_existing_function(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - match call_in_wasm( + match call_in_wasm( "test_calling_missing_external", &[], wasm_method, @@ -104,10 +95,10 @@ fn call_not_existing_function(wasm_method: WasmExecutionMethod) { #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - match call_in_wasm( + match call_in_wasm( "test_calling_yet_another_missing_external", &[], wasm_method, @@ -133,416 +124,392 @@ fn call_yet_another_not_existing_function(wasm_method: WasmExecutionMethod) { #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn panicking_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - - let output = call_in_wasm( - "test_panic", - &[], - wasm_method, - &mut ext, - ); - assert!(output.is_err()); - - let output = call_in_wasm( - "test_conditional_panic", - &[0], - wasm_method, - &mut ext, - ); - assert_eq!(Decode::decode(&mut &output.unwrap()[..]), Ok(Vec::::new())); - - let output = call_in_wasm( - "test_conditional_panic", - &vec![2].encode(), - wasm_method, - &mut ext, - ); - assert!(output.is_err()); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + + let output = call_in_wasm("test_panic", &[], wasm_method, &mut ext); + assert!(output.is_err()); + + let output = call_in_wasm("test_conditional_panic", &[0], wasm_method, &mut ext); + assert_eq!( + Decode::decode(&mut &output.unwrap()[..]), + Ok(Vec::::new()) + ); + + let output = call_in_wasm( + "test_conditional_panic", + &vec![2].encode(), + wasm_method, + &mut ext, + ); + assert!(output.is_err()); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn storage_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - - { - let mut ext = ext.ext(); - ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); - - let output = call_in_wasm( - "test_data_in", - &b"Hello world".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); - - assert_eq!(output, b"all ok!".to_vec().encode()); - } - - let expected = TestExternalities::new(sp_core::storage::Storage { - top: map![ - b"input".to_vec() => b"Hello world".to_vec(), - b"foo".to_vec() => b"bar".to_vec(), - b"baz".to_vec() => b"bar".to_vec() - ], - children: map![], - }); - assert_eq!(ext, expected); + let mut ext = TestExternalities::default(); + + { + let mut ext = ext.ext(); + ext.set_storage(b"foo".to_vec(), b"bar".to_vec()); + + let output = call_in_wasm( + "test_data_in", + &b"Hello world".to_vec().encode(), + wasm_method, + &mut ext, + ) + .unwrap(); + + assert_eq!(output, b"all ok!".to_vec().encode()); + } + + let expected = TestExternalities::new(sp_core::storage::Storage { + top: map![ + b"input".to_vec() => b"Hello world".to_vec(), + b"foo".to_vec() => b"bar".to_vec(), + b"baz".to_vec() => b"bar".to_vec() + ], + children: map![], + }); + assert_eq!(ext, expected); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn clear_prefix_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - { - let mut ext = ext.ext(); - ext.set_storage(b"aaa".to_vec(), b"1".to_vec()); - ext.set_storage(b"aab".to_vec(), b"2".to_vec()); - ext.set_storage(b"aba".to_vec(), b"3".to_vec()); - ext.set_storage(b"abb".to_vec(), b"4".to_vec()); - ext.set_storage(b"bbb".to_vec(), b"5".to_vec()); - - // This will clear all entries which prefix is "ab". - let output = call_in_wasm( - "test_clear_prefix", - &b"ab".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(); - - assert_eq!(output, b"all ok!".to_vec().encode()); - } - - let expected = TestExternalities::new(sp_core::storage::Storage { - top: map![ - b"aaa".to_vec() => b"1".to_vec(), - b"aab".to_vec() => b"2".to_vec(), - b"bbb".to_vec() => b"5".to_vec() - ], - children: map![], - }); - assert_eq!(expected, ext); + let mut ext = TestExternalities::default(); + { + let mut ext = ext.ext(); + ext.set_storage(b"aaa".to_vec(), b"1".to_vec()); + ext.set_storage(b"aab".to_vec(), b"2".to_vec()); + ext.set_storage(b"aba".to_vec(), b"3".to_vec()); + ext.set_storage(b"abb".to_vec(), b"4".to_vec()); + ext.set_storage(b"bbb".to_vec(), b"5".to_vec()); + + // This will clear all entries which prefix is "ab". + let output = call_in_wasm( + "test_clear_prefix", + &b"ab".to_vec().encode(), + wasm_method, + &mut ext, + ) + .unwrap(); + + assert_eq!(output, b"all ok!".to_vec().encode()); + } + + let expected = TestExternalities::new(sp_core::storage::Storage { + top: map![ + b"aaa".to_vec() => b"1".to_vec(), + b"aab".to_vec() => b"2".to_vec(), + b"bbb".to_vec() => b"5".to_vec() + ], + children: map![], + }); + assert_eq!(expected, ext); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn blake2_256_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - assert_eq!( - call_in_wasm( - "test_blake2_256", - &[0], - wasm_method, - &mut ext, - ).unwrap(), - blake2_256(&b""[..]).to_vec().encode(), - ); - assert_eq!( - call_in_wasm( - "test_blake2_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), - blake2_256(&b"Hello world!"[..]).to_vec().encode(), - ); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + assert_eq!( + call_in_wasm("test_blake2_256", &[0], wasm_method, &mut ext,).unwrap(), + blake2_256(&b""[..]).to_vec().encode(), + ); + assert_eq!( + call_in_wasm( + "test_blake2_256", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + blake2_256(&b"Hello world!"[..]).to_vec().encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn blake2_128_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - assert_eq!( - call_in_wasm( - "test_blake2_128", - &[0], - wasm_method, - &mut ext, - ).unwrap(), - blake2_128(&b""[..]).to_vec().encode(), - ); - assert_eq!( - call_in_wasm( - "test_blake2_128", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), - blake2_128(&b"Hello world!"[..]).to_vec().encode(), - ); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + assert_eq!( + call_in_wasm("test_blake2_128", &[0], wasm_method, &mut ext,).unwrap(), + blake2_128(&b""[..]).to_vec().encode(), + ); + assert_eq!( + call_in_wasm( + "test_blake2_128", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + blake2_128(&b"Hello world!"[..]).to_vec().encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn sha2_256_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - assert_eq!( - call_in_wasm( - "test_sha2_256", - &[0], - wasm_method, - &mut ext, - ) - .unwrap(), - hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") - .to_vec() - .encode(), - ); - assert_eq!( - call_in_wasm( - "test_sha2_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ) - .unwrap(), - hex!("c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a") - .to_vec() - .encode(), - ); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + assert_eq!( + call_in_wasm("test_sha2_256", &[0], wasm_method, &mut ext,).unwrap(), + hex!("e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855") + .to_vec() + .encode(), + ); + assert_eq!( + call_in_wasm( + "test_sha2_256", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + hex!("c0535e4be2b79ffd93291305436bf889314e4a3faec05ecffcbb7df31ad9e51a") + .to_vec() + .encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn twox_256_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - assert_eq!( - call_in_wasm( - "test_twox_256", - &[0], - wasm_method, - &mut ext, - ).unwrap(), - hex!( - "99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a" - ).to_vec().encode(), - ); - assert_eq!( - call_in_wasm( - "test_twox_256", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), - hex!( - "b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74" - ).to_vec().encode(), - ); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + assert_eq!( + call_in_wasm("test_twox_256", &[0], wasm_method, &mut ext,).unwrap(), + hex!("99e9d85137db46ef4bbea33613baafd56f963c64b1f3685a4eb4abd67ff6203a") + .to_vec() + .encode(), + ); + assert_eq!( + call_in_wasm( + "test_twox_256", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + hex!("b27dfd7f223f177f2a13647b533599af0c07f68bda23d96d059da2b451a35a74") + .to_vec() + .encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn twox_128_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - assert_eq!( - call_in_wasm( - "test_twox_128", - &[0], - wasm_method, - &mut ext, - ).unwrap(), - hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), - ); - assert_eq!( - call_in_wasm( - "test_twox_128", - &b"Hello world!".to_vec().encode(), - wasm_method, - &mut ext, - ).unwrap(), - hex!("b27dfd7f223f177f2a13647b533599af").to_vec().encode(), - ); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + assert_eq!( + call_in_wasm("test_twox_128", &[0], wasm_method, &mut ext,).unwrap(), + hex!("99e9d85137db46ef4bbea33613baafd5").to_vec().encode(), + ); + assert_eq!( + call_in_wasm( + "test_twox_128", + &b"Hello world!".to_vec().encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + hex!("b27dfd7f223f177f2a13647b533599af").to_vec().encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn ed25519_verify_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - let key = ed25519::Pair::from_seed(&blake2_256(b"test")); - let sig = key.sign(b"all ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(sig.as_ref()); - - assert_eq!( - call_in_wasm( - "test_ed25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); - - let other_sig = key.sign(b"all is not ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(other_sig.as_ref()); - - assert_eq!( - call_in_wasm( - "test_ed25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), - false.encode(), - ); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + let key = ed25519::Pair::from_seed(&blake2_256(b"test")); + let sig = key.sign(b"all ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(sig.as_ref()); + + assert_eq!( + call_in_wasm( + "test_ed25519_verify", + &calldata.encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + true.encode(), + ); + + let other_sig = key.sign(b"all is not ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(other_sig.as_ref()); + + assert_eq!( + call_in_wasm( + "test_ed25519_verify", + &calldata.encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + false.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn sr25519_verify_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - let key = sr25519::Pair::from_seed(&blake2_256(b"test")); - let sig = key.sign(b"all ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(sig.as_ref()); - - assert_eq!( - call_in_wasm( - "test_sr25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); - - let other_sig = key.sign(b"all is not ok!"); - let mut calldata = vec![]; - calldata.extend_from_slice(key.public().as_ref()); - calldata.extend_from_slice(other_sig.as_ref()); - - assert_eq!( - call_in_wasm( - "test_sr25519_verify", - &calldata.encode(), - wasm_method, - &mut ext, - ).unwrap(), - false.encode(), - ); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + let key = sr25519::Pair::from_seed(&blake2_256(b"test")); + let sig = key.sign(b"all ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(sig.as_ref()); + + assert_eq!( + call_in_wasm( + "test_sr25519_verify", + &calldata.encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + true.encode(), + ); + + let other_sig = key.sign(b"all is not ok!"); + let mut calldata = vec![]; + calldata.extend_from_slice(key.public().as_ref()); + calldata.extend_from_slice(other_sig.as_ref()); + + assert_eq!( + call_in_wasm( + "test_sr25519_verify", + &calldata.encode(), + wasm_method, + &mut ext, + ) + .unwrap(), + false.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn ordered_trie_root_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; - assert_eq!( - call_in_wasm( - "test_ordered_trie_root", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), - Layout::::ordered_trie_root(trie_input.iter()).as_bytes().encode(), - ); + let mut ext = TestExternalities::default(); + let trie_input = vec![b"zero".to_vec(), b"one".to_vec(), b"two".to_vec()]; + assert_eq!( + call_in_wasm("test_ordered_trie_root", &[0], wasm_method, &mut ext.ext(),).unwrap(), + Layout::::ordered_trie_root(trie_input.iter()) + .as_bytes() + .encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn offchain_local_storage_should_work(wasm_method: WasmExecutionMethod) { - use sp_core::offchain::OffchainStorage; - - let mut ext = TestExternalities::default(); - let (offchain, state) = testing::TestOffchainExt::new(); - ext.register_extension(OffchainExt::new(offchain)); - assert_eq!( - call_in_wasm( - "test_offchain_local_storage", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), - true.encode(), - ); - assert_eq!(state.read().persistent_storage.get(b"", b"test"), Some(vec![])); + use sp_core::offchain::OffchainStorage; + + let mut ext = TestExternalities::default(); + let (offchain, state) = testing::TestOffchainExt::new(); + ext.register_extension(OffchainExt::new(offchain)); + assert_eq!( + call_in_wasm( + "test_offchain_local_storage", + &[0], + wasm_method, + &mut ext.ext(), + ) + .unwrap(), + true.encode(), + ); + assert_eq!( + state.read().persistent_storage.get(b"", b"test"), + Some(vec![]) + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn offchain_http_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let (offchain, state) = testing::TestOffchainExt::new(); - ext.register_extension(OffchainExt::new(offchain)); - state.write().expect_request( - 0, - testing::PendingRequest { - method: "POST".into(), - uri: "http://localhost:12345".into(), - body: vec![1, 2, 3, 4], - headers: vec![("X-Auth".to_owned(), "test".to_owned())], - sent: true, - response: Some(vec![1, 2, 3]), - response_headers: vec![("X-Auth".to_owned(), "hello".to_owned())], - ..Default::default() - }, - ); - - assert_eq!( - call_in_wasm( - "test_offchain_http", - &[0], - wasm_method, - &mut ext.ext(), - ).unwrap(), - true.encode(), - ); + let mut ext = TestExternalities::default(); + let (offchain, state) = testing::TestOffchainExt::new(); + ext.register_extension(OffchainExt::new(offchain)); + state.write().expect_request( + 0, + testing::PendingRequest { + method: "POST".into(), + uri: "http://localhost:12345".into(), + body: vec![1, 2, 3, 4], + headers: vec![("X-Auth".to_owned(), "test".to_owned())], + sent: true, + response: Some(vec![1, 2, 3]), + response_headers: vec![("X-Auth".to_owned(), "hello".to_owned())], + ..Default::default() + }, + ); + + assert_eq!( + call_in_wasm("test_offchain_http", &[0], wasm_method, &mut ext.ext(),).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] #[should_panic(expected = "Allocator ran out of space")] fn should_trap_when_heap_exhausted(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - - let executor = crate::WasmExecutor::new( - wasm_method, - Some(17), // `17` is the initial number of pages compiled into the binary. - HostFunctions::host_functions(), - true, - 8, - ); - executor.call_in_wasm( - &WASM_BINARY[..], - None, - "test_exhaust_heap", - &[0], - &mut ext.ext(), - ).unwrap(); + let mut ext = TestExternalities::default(); + + let executor = crate::WasmExecutor::new( + wasm_method, + Some(17), // `17` is the initial number of pages compiled into the binary. + HostFunctions::host_functions(), + true, + 8, + ); + executor + .call_in_wasm( + &WASM_BINARY[..], + None, + "test_exhaust_heap", + &[0], + &mut ext.ext(), + ) + .unwrap(); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn returns_mutable_static(wasm_method: WasmExecutionMethod) { - let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( - wasm_method, - 1024, - &WASM_BINARY[..], - HostFunctions::host_functions(), - true, - ).expect("Creates runtime"); - - let instance = runtime.new_instance().unwrap(); - let res = instance.call("returns_mutable_static", &[0]).unwrap(); - assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); - - // We expect that every invocation will need to return the initial - // value plus one. If the value increases more than that then it is - // a sign that the wasm runtime preserves the memory content. - let res = instance.call("returns_mutable_static", &[0]).unwrap(); - assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); + let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( + wasm_method, + 1024, + &WASM_BINARY[..], + HostFunctions::host_functions(), + true, + ) + .expect("Creates runtime"); + + let instance = runtime.new_instance().unwrap(); + let res = instance.call("returns_mutable_static", &[0]).unwrap(); + assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); + + // We expect that every invocation will need to return the initial + // value plus one. If the value increases more than that then it is + // a sign that the wasm runtime preserves the memory content. + let res = instance.call("returns_mutable_static", &[0]).unwrap(); + assert_eq!(33, u64::decode(&mut &res[..]).unwrap()); } // If we didn't restore the wasm instance properly, on a trap the stack pointer would not be @@ -552,52 +519,55 @@ fn returns_mutable_static(wasm_method: WasmExecutionMethod) { #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn restoration_of_globals(wasm_method: WasmExecutionMethod) { - // Allocate 32 pages (of 65536 bytes) which gives the runtime 2048KB of heap to operate on - // (plus some additional space unused from the initial pages requested by the wasm runtime - // module). - // - // The fixture performs 2 allocations of 768KB and this theoretically gives 1536KB, however, due - // to our allocator algorithm there are inefficiencies. - const REQUIRED_MEMORY_PAGES: u64 = 32; - - let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( - wasm_method, - REQUIRED_MEMORY_PAGES, - &WASM_BINARY[..], - HostFunctions::host_functions(), - true, - ).expect("Creates runtime"); - let instance = runtime.new_instance().unwrap(); - - // On the first invocation we allocate approx. 768KB (75%) of stack and then trap. - let res = instance.call("allocates_huge_stack_array", &true.encode()); - assert!(res.is_err()); - - // On the second invocation we allocate yet another 768KB (75%) of stack - let res = instance.call("allocates_huge_stack_array", &false.encode()); - assert!(res.is_ok()); + // Allocate 32 pages (of 65536 bytes) which gives the runtime 2048KB of heap to operate on + // (plus some additional space unused from the initial pages requested by the wasm runtime + // module). + // + // The fixture performs 2 allocations of 768KB and this theoretically gives 1536KB, however, due + // to our allocator algorithm there are inefficiencies. + const REQUIRED_MEMORY_PAGES: u64 = 32; + + let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( + wasm_method, + REQUIRED_MEMORY_PAGES, + &WASM_BINARY[..], + HostFunctions::host_functions(), + true, + ) + .expect("Creates runtime"); + let instance = runtime.new_instance().unwrap(); + + // On the first invocation we allocate approx. 768KB (75%) of stack and then trap. + let res = instance.call("allocates_huge_stack_array", &true.encode()); + assert!(res.is_err()); + + // On the second invocation we allocate yet another 768KB (75%) of stack + let res = instance.call("allocates_huge_stack_array", &false.encode()); + assert!(res.is_ok()); } #[test_case(WasmExecutionMethod::Interpreted)] fn heap_is_reset_between_calls(wasm_method: WasmExecutionMethod) { - let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( - wasm_method, - 1024, - &WASM_BINARY[..], - HostFunctions::host_functions(), - true, - ).expect("Creates runtime"); - let instance = runtime.new_instance().unwrap(); - - let heap_base = instance.get_global_const("__heap_base") - .expect("`__heap_base` is valid") - .expect("`__heap_base` exists") - .as_i32() - .expect("`__heap_base` is an `i32`"); - - let params = (heap_base as u32, 512u32 * 64 * 1024).encode(); - instance.call("check_and_set_in_heap", ¶ms).unwrap(); - - // Cal it a second time to check that the heap was freed. - instance.call("check_and_set_in_heap", ¶ms).unwrap(); + let runtime = crate::wasm_runtime::create_wasm_runtime_with_code( + wasm_method, + 1024, + &WASM_BINARY[..], + HostFunctions::host_functions(), + true, + ) + .expect("Creates runtime"); + let instance = runtime.new_instance().unwrap(); + + let heap_base = instance + .get_global_const("__heap_base") + .expect("`__heap_base` is valid") + .expect("`__heap_base` exists") + .as_i32() + .expect("`__heap_base` is an `i32`"); + + let params = (heap_base as u32, 512u32 * 64 * 1024).encode(); + instance.call("check_and_set_in_heap", ¶ms).unwrap(); + + // Cal it a second time to check that the heap was freed. + instance.call("check_and_set_in_heap", ¶ms).unwrap(); } diff --git a/client/executor/src/integration_tests/sandbox.rs b/client/executor/src/integration_tests/sandbox.rs index 8e8b7896cf..dceec8092e 100644 --- a/client/executor/src/integration_tests/sandbox.rs +++ b/client/executor/src/integration_tests/sandbox.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::{TestExternalities, call_in_wasm}; +use super::{call_in_wasm, TestExternalities}; use crate::WasmExecutionMethod; use codec::Encode; @@ -24,10 +24,11 @@ use wabt; #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn sandbox_should_work(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -46,26 +47,25 @@ fn sandbox_should_work(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); + "#, + ) + .unwrap() + .encode(); + + assert_eq!( + call_in_wasm("test_sandbox", &code, wasm_method, &mut ext,).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn sandbox_trap(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (func (export "call") @@ -73,26 +73,24 @@ fn sandbox_trap(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap(); - - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - vec![0], - ); + "#, + ) + .unwrap(); + + assert_eq!( + call_in_wasm("test_sandbox", &code, wasm_method, &mut ext,).unwrap(), + vec![0], + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn start_called(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) (import "env" "inc_counter" (func $inc_counter (param i32) (result i32))) @@ -117,26 +115,25 @@ fn start_called(wasm_method: WasmExecutionMethod) { call $assert ) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox", - &code, - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); + "#, + ) + .unwrap() + .encode(); + + assert_eq!( + call_in_wasm("test_sandbox", &code, wasm_method, &mut ext,).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn invoke_args(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "assert" (func $assert (param i32))) @@ -157,26 +154,25 @@ fn invoke_args(wasm_method: WasmExecutionMethod) { ) ) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_args", - &code, - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); + "#, + ) + .unwrap() + .encode(); + + assert_eq!( + call_in_wasm("test_sandbox_args", &code, wasm_method, &mut ext,).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn return_val(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (func (export "call") (param $x i32) (result i32) (i32.add @@ -185,72 +181,65 @@ fn return_val(wasm_method: WasmExecutionMethod) { ) ) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_return_val", - &code, - wasm_method, - &mut ext, - ).unwrap(), - true.encode(), - ); + "#, + ) + .unwrap() + .encode(); + + assert_eq!( + call_in_wasm("test_sandbox_return_val", &code, wasm_method, &mut ext,).unwrap(), + true.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn unlinkable_module(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (import "env" "non-existent" (func)) (func (export "call") ) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), - 1u8.encode(), - ); + "#, + ) + .unwrap() + .encode(); + + assert_eq!( + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), + 1u8.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn corrupted_module(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); - - // Corrupted wasm file - let code = vec![0u8, 0, 0, 0, 1, 0, 0, 0].encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), - 1u8.encode(), - ); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); + + // Corrupted wasm file + let code = vec![0u8, 0, 0, 0, 1, 0, 0, 0].encode(); + + assert_eq!( + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), + 1u8.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn start_fn_ok(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (func (export "call") ) @@ -260,26 +249,25 @@ fn start_fn_ok(wasm_method: WasmExecutionMethod) { (start $start) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), - 0u8.encode(), - ); + "#, + ) + .unwrap() + .encode(); + + assert_eq!( + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), + 0u8.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn start_fn_traps(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (func (export "call") ) @@ -290,38 +278,35 @@ fn start_fn_traps(wasm_method: WasmExecutionMethod) { (start $start) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_instantiate", - &code, - wasm_method, - &mut ext, - ).unwrap(), - 2u8.encode(), - ); + "#, + ) + .unwrap() + .encode(); + + assert_eq!( + call_in_wasm("test_sandbox_instantiate", &code, wasm_method, &mut ext,).unwrap(), + 2u8.encode(), + ); } #[test_case(WasmExecutionMethod::Interpreted)] #[cfg_attr(feature = "wasmtime", test_case(WasmExecutionMethod::Compiled))] fn get_global_val_works(wasm_method: WasmExecutionMethod) { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let code = wabt::wat2wasm(r#" + let code = wabt::wat2wasm( + r#" (module (global (export "test_global") i64 (i64.const 500)) ) - "#).unwrap().encode(); - - assert_eq!( - call_in_wasm( - "test_sandbox_get_global_val", - &code, - wasm_method, - &mut ext, - ).unwrap(), - 500i64.encode(), - ); + "#, + ) + .unwrap() + .encode(); + + assert_eq!( + call_in_wasm("test_sandbox_get_global_val", &code, wasm_method, &mut ext,).unwrap(), + 500i64.encode(), + ); } diff --git a/client/executor/src/lib.rs b/client/executor/src/lib.rs index c3b41bd199..f0f1f480b9 100644 --- a/client/executor/src/lib.rs +++ b/client/executor/src/lib.rs @@ -27,66 +27,64 @@ //! wasm engine used, instance cache. #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "128"] #[macro_use] mod native_executor; -mod wasm_runtime; #[cfg(test)] mod integration_tests; +mod wasm_runtime; -pub use wasmi; -pub use native_executor::{with_externalities_safe, NativeExecutor, WasmExecutor, NativeExecutionDispatch}; -pub use sp_version::{RuntimeVersion, NativeVersion}; pub use codec::Codec; +pub use native_executor::{ + with_externalities_safe, NativeExecutionDispatch, NativeExecutor, WasmExecutor, +}; #[doc(hidden)] -pub use sp_core::traits::{Externalities, CallInWasm}; +pub use sp_core::traits::{CallInWasm, Externalities}; +pub use sp_version::{NativeVersion, RuntimeVersion}; #[doc(hidden)] pub use sp_wasm_interface; pub use wasm_runtime::WasmExecutionMethod; +pub use wasmi; pub use sc_executor_common::{error, sandbox}; /// Provides runtime information. pub trait RuntimeInfo { - /// Native runtime information. - fn native_version(&self) -> &NativeVersion; + /// Native runtime information. + fn native_version(&self) -> &NativeVersion; - /// Extract [`RuntimeVersion`](sp_version::RuntimeVersion) of the given `runtime_code`. - fn runtime_version( - &self, - ext: &mut dyn Externalities, - runtime_code: &sp_core::traits::RuntimeCode, - ) -> error::Result; + /// Extract [`RuntimeVersion`](sp_version::RuntimeVersion) of the given `runtime_code`. + fn runtime_version( + &self, + ext: &mut dyn Externalities, + runtime_code: &sp_core::traits::RuntimeCode, + ) -> error::Result; } #[cfg(test)] mod tests { - use super::*; - use sc_runtime_test::WASM_BINARY; - use sp_io::TestExternalities; - use sp_wasm_interface::HostFunctions; - use sp_core::traits::CallInWasm; + use super::*; + use sc_runtime_test::WASM_BINARY; + use sp_core::traits::CallInWasm; + use sp_io::TestExternalities; + use sp_wasm_interface::HostFunctions; - #[test] - fn call_in_interpreted_wasm_works() { - let mut ext = TestExternalities::default(); - let mut ext = ext.ext(); + #[test] + fn call_in_interpreted_wasm_works() { + let mut ext = TestExternalities::default(); + let mut ext = ext.ext(); - let executor = WasmExecutor::new( - WasmExecutionMethod::Interpreted, - Some(8), - sp_io::SubstrateHostFunctions::host_functions(), - true, - 8, - ); - let res = executor.call_in_wasm( - &WASM_BINARY[..], - None, - "test_empty_return", - &[], - &mut ext, - ).unwrap(); - assert_eq!(res, vec![0u8; 0]); - } + let executor = WasmExecutor::new( + WasmExecutionMethod::Interpreted, + Some(8), + sp_io::SubstrateHostFunctions::host_functions(), + true, + 8, + ); + let res = executor + .call_in_wasm(&WASM_BINARY[..], None, "test_empty_return", &[], &mut ext) + .unwrap(); + assert_eq!(res, vec![0u8; 0]); + } } diff --git a/client/executor/src/native_executor.rs b/client/executor/src/native_executor.rs index 778bc80800..4980b4e7a9 100644 --- a/client/executor/src/native_executor.rs +++ b/client/executor/src/native_executor.rs @@ -15,16 +15,24 @@ // along with Substrate. If not, see . use crate::{ - RuntimeInfo, error::{Error, Result}, - wasm_runtime::{RuntimeCache, WasmExecutionMethod}, + error::{Error, Result}, + wasm_runtime::{RuntimeCache, WasmExecutionMethod}, + RuntimeInfo, }; -use sp_version::{NativeVersion, RuntimeVersion}; use codec::{Decode, Encode}; -use sp_core::{NativeOrEncoded, traits::{CodeExecutor, Externalities, RuntimeCode}}; use log::trace; -use std::{result, panic::{UnwindSafe, AssertUnwindSafe}, sync::Arc}; -use sp_wasm_interface::{HostFunctions, Function}; use sc_executor_common::wasm_runtime::WasmInstance; +use sp_core::{ + traits::{CodeExecutor, Externalities, RuntimeCode}, + NativeOrEncoded, +}; +use sp_version::{NativeVersion, RuntimeVersion}; +use sp_wasm_interface::{Function, HostFunctions}; +use std::{ + panic::{AssertUnwindSafe, UnwindSafe}, + result, + sync::Arc, +}; /// Default num of pages for the heap const DEFAULT_HEAP_PAGES: u64 = 1024; @@ -33,348 +41,337 @@ const DEFAULT_HEAP_PAGES: u64 = 1024; /// /// If the inner closure panics, it will be caught and return an error. pub fn with_externalities_safe(ext: &mut dyn Externalities, f: F) -> Result - where F: UnwindSafe + FnOnce() -> U +where + F: UnwindSafe + FnOnce() -> U, { - sp_externalities::set_and_run_with_externalities( - ext, - move || { - // Substrate uses custom panic hook that terminates process on panic. Disable - // termination for the native call. - let _guard = sp_panic_handler::AbortGuard::force_unwind(); - std::panic::catch_unwind(f).map_err(|e| { - if let Some(err) = e.downcast_ref::() { - Error::RuntimePanicked(err.clone()) - } else if let Some(err) = e.downcast_ref::<&'static str>() { - Error::RuntimePanicked(err.to_string()) - } else { - Error::RuntimePanicked("Unknown panic".into()) - } - }) - }, - ) + sp_externalities::set_and_run_with_externalities(ext, move || { + // Substrate uses custom panic hook that terminates process on panic. Disable + // termination for the native call. + let _guard = sp_panic_handler::AbortGuard::force_unwind(); + std::panic::catch_unwind(f).map_err(|e| { + if let Some(err) = e.downcast_ref::() { + Error::RuntimePanicked(err.clone()) + } else if let Some(err) = e.downcast_ref::<&'static str>() { + Error::RuntimePanicked(err.to_string()) + } else { + Error::RuntimePanicked("Unknown panic".into()) + } + }) + }) } /// Delegate for dispatching a CodeExecutor call. /// /// By dispatching we mean that we execute a runtime function specified by it's name. pub trait NativeExecutionDispatch: Send + Sync { - /// Host functions for custom runtime interfaces that should be callable from within the runtime - /// besides the default Substrate runtime interfaces. - type ExtendHostFunctions: HostFunctions; + /// Host functions for custom runtime interfaces that should be callable from within the runtime + /// besides the default Substrate runtime interfaces. + type ExtendHostFunctions: HostFunctions; - /// Dispatch a method in the runtime. - /// - /// If the method with the specified name doesn't exist then `Err` is returned. - fn dispatch(ext: &mut dyn Externalities, method: &str, data: &[u8]) -> Result>; + /// Dispatch a method in the runtime. + /// + /// If the method with the specified name doesn't exist then `Err` is returned. + fn dispatch(ext: &mut dyn Externalities, method: &str, data: &[u8]) -> Result>; - /// Provide native runtime version. - fn native_version() -> NativeVersion; + /// Provide native runtime version. + fn native_version() -> NativeVersion; } /// An abstraction over Wasm code executor. Supports selecting execution backend and /// manages runtime cache. #[derive(Clone)] pub struct WasmExecutor { - /// Method used to execute fallback Wasm code. - method: WasmExecutionMethod, - /// The number of 64KB pages to allocate for Wasm execution. - default_heap_pages: u64, - /// The host functions registered with this instance. - host_functions: Arc>, - /// WASM runtime cache. - cache: Arc, - /// Allow missing function imports. - allow_missing_func_imports: bool, - /// The size of the instances cache. - max_runtime_instances: usize, + /// Method used to execute fallback Wasm code. + method: WasmExecutionMethod, + /// The number of 64KB pages to allocate for Wasm execution. + default_heap_pages: u64, + /// The host functions registered with this instance. + host_functions: Arc>, + /// WASM runtime cache. + cache: Arc, + /// Allow missing function imports. + allow_missing_func_imports: bool, + /// The size of the instances cache. + max_runtime_instances: usize, } impl WasmExecutor { - /// Create new instance. - /// - /// # Parameters - /// - /// `method` - Method used to execute Wasm code. - /// - /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. - /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. - pub fn new( - method: WasmExecutionMethod, - default_heap_pages: Option, - host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, - max_runtime_instances: usize, - ) -> Self { - WasmExecutor { - method, - default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES), - host_functions: Arc::new(host_functions), - cache: Arc::new(RuntimeCache::new(max_runtime_instances)), - allow_missing_func_imports, - max_runtime_instances, - } - } - - /// Execute the given closure `f` with the latest runtime (based on `runtime_code`). - /// - /// The closure `f` is expected to return `Err(_)` when there happened a `panic!` in native code - /// while executing the runtime in Wasm. If a `panic!` occurred, the runtime is invalidated to - /// prevent any poisoned state. Native runtime execution does not need to report back - /// any `panic!`. - /// - /// # Safety - /// - /// `runtime` and `ext` are given as `AssertUnwindSafe` to the closure. As described above, the - /// runtime is invalidated on any `panic!` to prevent a poisoned state. `ext` is already - /// implicitly handled as unwind safe, as we store it in a global variable while executing the - /// native runtime. - fn with_instance( - &self, - runtime_code: &RuntimeCode, - ext: &mut dyn Externalities, - f: F, - ) -> Result - where F: FnOnce( - AssertUnwindSafe<&dyn WasmInstance>, - Option<&RuntimeVersion>, - AssertUnwindSafe<&mut dyn Externalities>, - ) -> Result>, - { - match self.cache.with_instance( - runtime_code, - ext, - self.method, - self.default_heap_pages, - &*self.host_functions, - self.allow_missing_func_imports, - |instance, version, ext| { - let instance = AssertUnwindSafe(instance); - let ext = AssertUnwindSafe(ext); - f(instance, version, ext) - } - )? { - Ok(r) => r, - Err(e) => Err(e), - } - } + /// Create new instance. + /// + /// # Parameters + /// + /// `method` - Method used to execute Wasm code. + /// + /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. + /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + pub fn new( + method: WasmExecutionMethod, + default_heap_pages: Option, + host_functions: Vec<&'static dyn Function>, + allow_missing_func_imports: bool, + max_runtime_instances: usize, + ) -> Self { + WasmExecutor { + method, + default_heap_pages: default_heap_pages.unwrap_or(DEFAULT_HEAP_PAGES), + host_functions: Arc::new(host_functions), + cache: Arc::new(RuntimeCache::new(max_runtime_instances)), + allow_missing_func_imports, + max_runtime_instances, + } + } + + /// Execute the given closure `f` with the latest runtime (based on `runtime_code`). + /// + /// The closure `f` is expected to return `Err(_)` when there happened a `panic!` in native code + /// while executing the runtime in Wasm. If a `panic!` occurred, the runtime is invalidated to + /// prevent any poisoned state. Native runtime execution does not need to report back + /// any `panic!`. + /// + /// # Safety + /// + /// `runtime` and `ext` are given as `AssertUnwindSafe` to the closure. As described above, the + /// runtime is invalidated on any `panic!` to prevent a poisoned state. `ext` is already + /// implicitly handled as unwind safe, as we store it in a global variable while executing the + /// native runtime. + fn with_instance( + &self, + runtime_code: &RuntimeCode, + ext: &mut dyn Externalities, + f: F, + ) -> Result + where + F: FnOnce( + AssertUnwindSafe<&dyn WasmInstance>, + Option<&RuntimeVersion>, + AssertUnwindSafe<&mut dyn Externalities>, + ) -> Result>, + { + match self.cache.with_instance( + runtime_code, + ext, + self.method, + self.default_heap_pages, + &*self.host_functions, + self.allow_missing_func_imports, + |instance, version, ext| { + let instance = AssertUnwindSafe(instance); + let ext = AssertUnwindSafe(ext); + f(instance, version, ext) + }, + )? { + Ok(r) => r, + Err(e) => Err(e), + } + } } impl sp_core::traits::CallInWasm for WasmExecutor { - fn call_in_wasm( - &self, - wasm_code: &[u8], - code_hash: Option>, - method: &str, - call_data: &[u8], - ext: &mut dyn Externalities, - ) -> std::result::Result, String> { - if let Some(hash) = code_hash { - let code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_code.into()), - hash, - heap_pages: None, - }; - - self.with_instance(&code, ext, |instance, _, mut ext| { - with_externalities_safe( - &mut **ext, - move || instance.call(method, call_data), - ) - }).map_err(|e| e.to_string()) - } else { - let module = crate::wasm_runtime::create_wasm_runtime_with_code( - self.method, - self.default_heap_pages, - &wasm_code, - self.host_functions.to_vec(), - self.allow_missing_func_imports, - ) - .map_err(|e| format!("Failed to create module: {:?}", e))?; - - let instance = module.new_instance() - .map_err(|e| format!("Failed to create instance: {:?}", e))?; - - let instance = AssertUnwindSafe(instance); - let mut ext = AssertUnwindSafe(ext); - - with_externalities_safe( - &mut **ext, - move || instance.call(method, call_data), - ) - .and_then(|r| r) - .map_err(|e| e.to_string()) - } - } + fn call_in_wasm( + &self, + wasm_code: &[u8], + code_hash: Option>, + method: &str, + call_data: &[u8], + ext: &mut dyn Externalities, + ) -> std::result::Result, String> { + if let Some(hash) = code_hash { + let code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(wasm_code.into()), + hash, + heap_pages: None, + }; + + self.with_instance(&code, ext, |instance, _, mut ext| { + with_externalities_safe(&mut **ext, move || instance.call(method, call_data)) + }) + .map_err(|e| e.to_string()) + } else { + let module = crate::wasm_runtime::create_wasm_runtime_with_code( + self.method, + self.default_heap_pages, + &wasm_code, + self.host_functions.to_vec(), + self.allow_missing_func_imports, + ) + .map_err(|e| format!("Failed to create module: {:?}", e))?; + + let instance = module + .new_instance() + .map_err(|e| format!("Failed to create instance: {:?}", e))?; + + let instance = AssertUnwindSafe(instance); + let mut ext = AssertUnwindSafe(ext); + + with_externalities_safe(&mut **ext, move || instance.call(method, call_data)) + .and_then(|r| r) + .map_err(|e| e.to_string()) + } + } } /// A generic `CodeExecutor` implementation that uses a delegate to determine wasm code equivalence /// and dispatch to native code when possible, falling back on `WasmExecutor` when not. pub struct NativeExecutor { - /// Dummy field to avoid the compiler complaining about us not using `D`. - _dummy: std::marker::PhantomData, - /// Native runtime version info. - native_version: NativeVersion, - /// Fallback wasm executor. - wasm: WasmExecutor, + /// Dummy field to avoid the compiler complaining about us not using `D`. + _dummy: std::marker::PhantomData, + /// Native runtime version info. + native_version: NativeVersion, + /// Fallback wasm executor. + wasm: WasmExecutor, } impl NativeExecutor { - /// Create new instance. - /// - /// # Parameters - /// - /// `fallback_method` - Method used to execute fallback Wasm code. - /// - /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. - /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. - pub fn new( - fallback_method: WasmExecutionMethod, - default_heap_pages: Option, - max_runtime_instances: usize, - ) -> Self { - let mut host_functions = sp_io::SubstrateHostFunctions::host_functions(); - - // Add the custom host functions provided by the user. - host_functions.extend(D::ExtendHostFunctions::host_functions()); - let wasm_executor = WasmExecutor::new( - fallback_method, - default_heap_pages, - host_functions, - false, - max_runtime_instances, - ); - - NativeExecutor { - _dummy: Default::default(), - native_version: D::native_version(), - wasm: wasm_executor, - } - } + /// Create new instance. + /// + /// # Parameters + /// + /// `fallback_method` - Method used to execute fallback Wasm code. + /// + /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. + /// Defaults to `DEFAULT_HEAP_PAGES` if `None` is provided. + pub fn new( + fallback_method: WasmExecutionMethod, + default_heap_pages: Option, + max_runtime_instances: usize, + ) -> Self { + let mut host_functions = sp_io::SubstrateHostFunctions::host_functions(); + + // Add the custom host functions provided by the user. + host_functions.extend(D::ExtendHostFunctions::host_functions()); + let wasm_executor = WasmExecutor::new( + fallback_method, + default_heap_pages, + host_functions, + false, + max_runtime_instances, + ); + + NativeExecutor { + _dummy: Default::default(), + native_version: D::native_version(), + wasm: wasm_executor, + } + } } impl RuntimeInfo for NativeExecutor { - fn native_version(&self) -> &NativeVersion { - &self.native_version - } - - fn runtime_version( - &self, - ext: &mut dyn Externalities, - runtime_code: &RuntimeCode, - ) -> Result { - self.wasm.with_instance( - runtime_code, - ext, - |_instance, version, _ext| - Ok(version.cloned().ok_or_else(|| Error::ApiError("Unknown version".into()))) - ) - } + fn native_version(&self) -> &NativeVersion { + &self.native_version + } + + fn runtime_version( + &self, + ext: &mut dyn Externalities, + runtime_code: &RuntimeCode, + ) -> Result { + self.wasm + .with_instance(runtime_code, ext, |_instance, version, _ext| { + Ok(version + .cloned() + .ok_or_else(|| Error::ApiError("Unknown version".into()))) + }) + } } impl CodeExecutor for NativeExecutor { - type Error = Error; - - fn call< - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - ext: &mut dyn Externalities, - runtime_code: &RuntimeCode, - method: &str, - data: &[u8], - use_native: bool, - native_call: Option, - ) -> (Result>, bool) { - let mut used_native = false; - let result = self.wasm.with_instance( - runtime_code, - ext, - |instance, onchain_version, mut ext| { - let onchain_version = onchain_version.ok_or_else( - || Error::ApiError("Unknown version".into()) - )?; - match ( - use_native, - onchain_version.can_call_with(&self.native_version.runtime_version), - native_call, - ) { - (_, false, _) => { - trace!( - target: "executor", - "Request for native execution failed (native: {}, chain: {})", - self.native_version.runtime_version, - onchain_version, - ); - - with_externalities_safe( - &mut **ext, - move || instance.call(method, data).map(NativeOrEncoded::Encoded) - ) - } - (false, _, _) => { - with_externalities_safe( - &mut **ext, - move || instance.call(method, data).map(NativeOrEncoded::Encoded) - ) - }, - (true, true, Some(call)) => { - trace!( - target: "executor", - "Request for native execution with native call succeeded \ - (native: {}, chain: {}).", - self.native_version.runtime_version, - onchain_version, - ); - - used_native = true; - let res = with_externalities_safe(&mut **ext, move || (call)()) - .and_then(|r| r - .map(NativeOrEncoded::Native) - .map_err(|s| Error::ApiError(s.to_string())) - ); - - Ok(res) - } - _ => { - trace!( - target: "executor", - "Request for native execution succeeded (native: {}, chain: {})", - self.native_version.runtime_version, - onchain_version - ); - - used_native = true; - Ok(D::dispatch(&mut **ext, method, data).map(NativeOrEncoded::Encoded)) - } - } - } - ); - (result, used_native) - } + type Error = Error; + + fn call< + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + ext: &mut dyn Externalities, + runtime_code: &RuntimeCode, + method: &str, + data: &[u8], + use_native: bool, + native_call: Option, + ) -> (Result>, bool) { + let mut used_native = false; + let result = + self.wasm + .with_instance(runtime_code, ext, |instance, onchain_version, mut ext| { + let onchain_version = + onchain_version.ok_or_else(|| Error::ApiError("Unknown version".into()))?; + match ( + use_native, + onchain_version.can_call_with(&self.native_version.runtime_version), + native_call, + ) { + (_, false, _) => { + trace!( + target: "executor", + "Request for native execution failed (native: {}, chain: {})", + self.native_version.runtime_version, + onchain_version, + ); + + with_externalities_safe(&mut **ext, move || { + instance.call(method, data).map(NativeOrEncoded::Encoded) + }) + } + (false, _, _) => with_externalities_safe(&mut **ext, move || { + instance.call(method, data).map(NativeOrEncoded::Encoded) + }), + (true, true, Some(call)) => { + trace!( + target: "executor", + "Request for native execution with native call succeeded \ + (native: {}, chain: {}).", + self.native_version.runtime_version, + onchain_version, + ); + + used_native = true; + let res = with_externalities_safe(&mut **ext, move || (call)()) + .and_then(|r| { + r.map(NativeOrEncoded::Native) + .map_err(|s| Error::ApiError(s.to_string())) + }); + + Ok(res) + } + _ => { + trace!( + target: "executor", + "Request for native execution succeeded (native: {}, chain: {})", + self.native_version.runtime_version, + onchain_version + ); + + used_native = true; + Ok(D::dispatch(&mut **ext, method, data).map(NativeOrEncoded::Encoded)) + } + } + }); + (result, used_native) + } } impl Clone for NativeExecutor { - fn clone(&self) -> Self { - NativeExecutor { - _dummy: Default::default(), - native_version: D::native_version(), - wasm: self.wasm.clone(), - } - } + fn clone(&self) -> Self { + NativeExecutor { + _dummy: Default::default(), + native_version: D::native_version(), + wasm: self.wasm.clone(), + } + } } impl sp_core::traits::CallInWasm for NativeExecutor { - fn call_in_wasm( - &self, - wasm_blob: &[u8], - code_hash: Option>, - method: &str, - call_data: &[u8], - ext: &mut dyn Externalities, - ) -> std::result::Result, String> { - self.wasm.call_in_wasm(wasm_blob, code_hash, method, call_data, ext) - } + fn call_in_wasm( + &self, + wasm_blob: &[u8], + code_hash: Option>, + method: &str, + call_data: &[u8], + ext: &mut dyn Externalities, + ) -> std::result::Result, String> { + self.wasm + .call_in_wasm(wasm_blob, code_hash, method, call_data, ext) + } } /// Implements a `NativeExecutionDispatch` for provided parameters. @@ -453,37 +450,40 @@ macro_rules! native_executor_instance { #[cfg(test)] mod tests { - use super::*; - use sp_runtime_interface::runtime_interface; + use super::*; + use sp_runtime_interface::runtime_interface; - #[runtime_interface] - trait MyInterface { - fn say_hello_world(data: &str) { - println!("Hello world from: {}", data); - } - } + #[runtime_interface] + trait MyInterface { + fn say_hello_world(data: &str) { + println!("Hello world from: {}", data); + } + } - native_executor_instance!( + native_executor_instance!( pub MyExecutor, substrate_test_runtime::api::dispatch, substrate_test_runtime::native_version, (my_interface::HostFunctions, my_interface::HostFunctions), ); - #[test] - fn native_executor_registers_custom_interface() { - let executor = NativeExecutor::::new( - WasmExecutionMethod::Interpreted, - None, - 8, - ); - my_interface::HostFunctions::host_functions().iter().for_each(|function| { - assert_eq!( - executor.wasm.host_functions.iter().filter(|f| f == &function).count(), - 2, - ); - }); - - my_interface::say_hello_world("hey"); - } + #[test] + fn native_executor_registers_custom_interface() { + let executor = NativeExecutor::::new(WasmExecutionMethod::Interpreted, None, 8); + my_interface::HostFunctions::host_functions() + .iter() + .for_each(|function| { + assert_eq!( + executor + .wasm + .host_functions + .iter() + .filter(|f| f == &function) + .count(), + 2, + ); + }); + + my_interface::say_hello_world("hey"); + } } diff --git a/client/executor/src/wasm_runtime.rs b/client/executor/src/wasm_runtime.rs index 87a08f714d..07f79a31e5 100644 --- a/client/executor/src/wasm_runtime.rs +++ b/client/executor/src/wasm_runtime.rs @@ -19,114 +19,113 @@ //! The primary means of accessing the runtimes is through a cache which saves the reusable //! components of the runtime that are expensive to initialize. -use std::sync::Arc; use crate::error::{Error, WasmError}; -use parking_lot::Mutex; use codec::Decode; -use sp_core::traits::{Externalities, RuntimeCode, FetchRuntimeCode}; +use parking_lot::Mutex; +use sc_executor_common::wasm_runtime::{WasmInstance, WasmModule}; +use sp_core::traits::{Externalities, FetchRuntimeCode, RuntimeCode}; use sp_version::RuntimeVersion; use std::panic::AssertUnwindSafe; -use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance}; +use std::sync::Arc; use sp_wasm_interface::Function; /// Specification of different methods of executing the runtime Wasm code. #[derive(Debug, PartialEq, Eq, Hash, Copy, Clone)] pub enum WasmExecutionMethod { - /// Uses the Wasmi interpreter. - Interpreted, - /// Uses the Wasmtime compiled runtime. - #[cfg(feature = "wasmtime")] - Compiled, + /// Uses the Wasmi interpreter. + Interpreted, + /// Uses the Wasmtime compiled runtime. + #[cfg(feature = "wasmtime")] + Compiled, } impl Default for WasmExecutionMethod { - fn default() -> WasmExecutionMethod { - WasmExecutionMethod::Interpreted - } + fn default() -> WasmExecutionMethod { + WasmExecutionMethod::Interpreted + } } /// A Wasm runtime object along with its cached runtime version. struct VersionedRuntime { - /// Runtime code hash. - code_hash: Vec, - /// Wasm runtime type. - wasm_method: WasmExecutionMethod, - /// Shared runtime that can spawn instances. - module: Box, - /// The number of WebAssembly heap pages this instance was created with. - heap_pages: u64, - /// Runtime version according to `Core_version` if any. - version: Option, - /// Cached instance pool. - instances: Vec>>>, + /// Runtime code hash. + code_hash: Vec, + /// Wasm runtime type. + wasm_method: WasmExecutionMethod, + /// Shared runtime that can spawn instances. + module: Box, + /// The number of WebAssembly heap pages this instance was created with. + heap_pages: u64, + /// Runtime version according to `Core_version` if any. + version: Option, + /// Cached instance pool. + instances: Vec>>>, } impl VersionedRuntime { - /// Run the given closure `f` with an instance of this runtime. - fn with_instance<'c, R, F>( - &self, - ext: &mut dyn Externalities, - f: F, - ) -> Result - where F: FnOnce( - &dyn WasmInstance, - Option<&RuntimeVersion>, - &mut dyn Externalities) - -> Result, - { - // Find a free instance - let instance = self.instances - .iter() - .enumerate() - .find_map(|(index, i)| i.try_lock().map(|i| (index, i))); - - match instance { - Some((index, mut locked)) => { - let (instance, new_inst) = locked.take() - .map(|r| Ok((r, false))) - .unwrap_or_else(|| self.module.new_instance().map(|i| (i, true)))?; - - let result = f(&*instance, self.version.as_ref(), ext); - if let Err(e) = &result { - if new_inst { - log::warn!( - target: "wasm-runtime", - "Fresh runtime instance failed with {:?}", - e, - ) - } else { - log::warn!( - target: "wasm-runtime", - "Evicting failed runtime instance: {:?}", - e, - ); - } - } else { - *locked = Some(instance); - - if new_inst { - log::debug!( - target: "wasm-runtime", - "Allocated WASM instance {}/{}", - index + 1, - self.instances.len(), - ); - } - } - - result - }, - None => { - log::warn!(target: "wasm-runtime", "Ran out of free WASM instances"); - - // Allocate a new instance - let instance = self.module.new_instance()?; - - f(&*instance, self.version.as_ref(), ext) - } - } - } + /// Run the given closure `f` with an instance of this runtime. + fn with_instance<'c, R, F>(&self, ext: &mut dyn Externalities, f: F) -> Result + where + F: FnOnce( + &dyn WasmInstance, + Option<&RuntimeVersion>, + &mut dyn Externalities, + ) -> Result, + { + // Find a free instance + let instance = self + .instances + .iter() + .enumerate() + .find_map(|(index, i)| i.try_lock().map(|i| (index, i))); + + match instance { + Some((index, mut locked)) => { + let (instance, new_inst) = locked + .take() + .map(|r| Ok((r, false))) + .unwrap_or_else(|| self.module.new_instance().map(|i| (i, true)))?; + + let result = f(&*instance, self.version.as_ref(), ext); + if let Err(e) = &result { + if new_inst { + log::warn!( + target: "wasm-runtime", + "Fresh runtime instance failed with {:?}", + e, + ) + } else { + log::warn!( + target: "wasm-runtime", + "Evicting failed runtime instance: {:?}", + e, + ); + } + } else { + *locked = Some(instance); + + if new_inst { + log::debug!( + target: "wasm-runtime", + "Allocated WASM instance {}/{}", + index + 1, + self.instances.len(), + ); + } + } + + result + } + None => { + log::warn!(target: "wasm-runtime", "Ran out of free WASM instances"); + + // Allocate a new instance + let instance = self.module.new_instance()?; + + f(&*instance, self.version.as_ref(), ext) + } + } + } } const MAX_RUNTIMES: usize = 2; @@ -143,285 +142,289 @@ const MAX_RUNTIMES: usize = 2; /// /// The size of cache is equal to `MAX_RUNTIMES`. pub struct RuntimeCache { - /// A cache of runtimes along with metadata. - /// - /// Runtimes sorted by recent usage. The most recently used is at the front. - runtimes: Mutex<[Option>; MAX_RUNTIMES]>, - /// The size of the instances cache for each runtime. - max_runtime_instances: usize, + /// A cache of runtimes along with metadata. + /// + /// Runtimes sorted by recent usage. The most recently used is at the front. + runtimes: Mutex<[Option>; MAX_RUNTIMES]>, + /// The size of the instances cache for each runtime. + max_runtime_instances: usize, } impl RuntimeCache { - /// Creates a new instance of a runtimes cache. - pub fn new(max_runtime_instances: usize) -> RuntimeCache { - RuntimeCache { - runtimes: Default::default(), - max_runtime_instances, - } - } - - /// Prepares a WASM module instance and executes given function for it. - /// - /// This uses internal cache to find avaiable instance or create a new one. - /// # Parameters - /// - /// `code` - Provides external code or tells the executor to fetch it from storage. - /// - /// `runtime_code` - The runtime wasm code used setup the runtime. - /// - /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. - /// - /// `wasm_method` - Type of WASM backend to use. - /// - /// `host_functions` - The host functions that should be registered for the Wasm runtime. - /// - /// `allow_missing_func_imports` - Ignore missing function imports. - /// - /// `max_runtime_instances` - The size of the instances cache. - /// - /// `f` - Function to execute. - /// - /// # Returns result of `f` wrapped in an additonal result. - /// In case of failure one of two errors can be returned: - /// - /// `Err::InvalidCode` is returned for runtime code issues. - /// - /// `Error::InvalidMemoryReference` is returned if no memory export with the - /// identifier `memory` can be found in the runtime. - pub fn with_instance<'c, R, F>( - &self, - runtime_code: &'c RuntimeCode<'c>, - ext: &mut dyn Externalities, - wasm_method: WasmExecutionMethod, - default_heap_pages: u64, - host_functions: &[&'static dyn Function], - allow_missing_func_imports: bool, - f: F, - ) -> Result, Error> - where F: FnOnce( - &dyn WasmInstance, - Option<&RuntimeVersion>, - &mut dyn Externalities) - -> Result, - { - let code_hash = &runtime_code.hash; - let heap_pages = runtime_code.heap_pages.unwrap_or(default_heap_pages); - - let mut runtimes = self.runtimes.lock(); // this must be released prior to calling f - let pos = runtimes.iter().position(|r| r.as_ref().map_or( - false, - |r| r.wasm_method == wasm_method && - r.code_hash == *code_hash && - r.heap_pages == heap_pages - )); - - let runtime = match pos { - Some(n) => runtimes[n] - .clone() - .expect("`position` only returns `Some` for entries that are `Some`"), - None => { - let code = runtime_code.fetch_runtime_code().ok_or(WasmError::CodeNotFound)?; - - let result = create_versioned_wasm_runtime( - &code, - code_hash.clone(), - ext, - wasm_method, - heap_pages, - host_functions.into(), - allow_missing_func_imports, - self.max_runtime_instances, - ); - if let Err(ref err) = result { - log::warn!(target: "wasm-runtime", "Cannot create a runtime: {:?}", err); - } - Arc::new(result?) - } - }; - - // Rearrange runtimes by last recently used. - match pos { - Some(0) => {}, - Some(n) => { - for i in (1 .. n + 1).rev() { - runtimes.swap(i, i - 1); - } - } - None => { - runtimes[MAX_RUNTIMES-1] = Some(runtime.clone()); - for i in (1 .. MAX_RUNTIMES).rev() { - runtimes.swap(i, i - 1); - } - } - } - drop(runtimes); - - Ok(runtime.with_instance(ext, f)) - } + /// Creates a new instance of a runtimes cache. + pub fn new(max_runtime_instances: usize) -> RuntimeCache { + RuntimeCache { + runtimes: Default::default(), + max_runtime_instances, + } + } + + /// Prepares a WASM module instance and executes given function for it. + /// + /// This uses internal cache to find avaiable instance or create a new one. + /// # Parameters + /// + /// `code` - Provides external code or tells the executor to fetch it from storage. + /// + /// `runtime_code` - The runtime wasm code used setup the runtime. + /// + /// `default_heap_pages` - Number of 64KB pages to allocate for Wasm execution. + /// + /// `wasm_method` - Type of WASM backend to use. + /// + /// `host_functions` - The host functions that should be registered for the Wasm runtime. + /// + /// `allow_missing_func_imports` - Ignore missing function imports. + /// + /// `max_runtime_instances` - The size of the instances cache. + /// + /// `f` - Function to execute. + /// + /// # Returns result of `f` wrapped in an additonal result. + /// In case of failure one of two errors can be returned: + /// + /// `Err::InvalidCode` is returned for runtime code issues. + /// + /// `Error::InvalidMemoryReference` is returned if no memory export with the + /// identifier `memory` can be found in the runtime. + pub fn with_instance<'c, R, F>( + &self, + runtime_code: &'c RuntimeCode<'c>, + ext: &mut dyn Externalities, + wasm_method: WasmExecutionMethod, + default_heap_pages: u64, + host_functions: &[&'static dyn Function], + allow_missing_func_imports: bool, + f: F, + ) -> Result, Error> + where + F: FnOnce( + &dyn WasmInstance, + Option<&RuntimeVersion>, + &mut dyn Externalities, + ) -> Result, + { + let code_hash = &runtime_code.hash; + let heap_pages = runtime_code.heap_pages.unwrap_or(default_heap_pages); + + let mut runtimes = self.runtimes.lock(); // this must be released prior to calling f + let pos = runtimes.iter().position(|r| { + r.as_ref().map_or(false, |r| { + r.wasm_method == wasm_method + && r.code_hash == *code_hash + && r.heap_pages == heap_pages + }) + }); + + let runtime = match pos { + Some(n) => runtimes[n] + .clone() + .expect("`position` only returns `Some` for entries that are `Some`"), + None => { + let code = runtime_code + .fetch_runtime_code() + .ok_or(WasmError::CodeNotFound)?; + + let result = create_versioned_wasm_runtime( + &code, + code_hash.clone(), + ext, + wasm_method, + heap_pages, + host_functions.into(), + allow_missing_func_imports, + self.max_runtime_instances, + ); + if let Err(ref err) = result { + log::warn!(target: "wasm-runtime", "Cannot create a runtime: {:?}", err); + } + Arc::new(result?) + } + }; + + // Rearrange runtimes by last recently used. + match pos { + Some(0) => {} + Some(n) => { + for i in (1..n + 1).rev() { + runtimes.swap(i, i - 1); + } + } + None => { + runtimes[MAX_RUNTIMES - 1] = Some(runtime.clone()); + for i in (1..MAX_RUNTIMES).rev() { + runtimes.swap(i, i - 1); + } + } + } + drop(runtimes); + + Ok(runtime.with_instance(ext, f)) + } } /// Create a wasm runtime with the given `code`. pub fn create_wasm_runtime_with_code( - wasm_method: WasmExecutionMethod, - heap_pages: u64, - code: &[u8], - host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, + wasm_method: WasmExecutionMethod, + heap_pages: u64, + code: &[u8], + host_functions: Vec<&'static dyn Function>, + allow_missing_func_imports: bool, ) -> Result, WasmError> { - match wasm_method { - WasmExecutionMethod::Interpreted => - sc_executor_wasmi::create_runtime( - code, - heap_pages, - host_functions, - allow_missing_func_imports - ).map(|runtime| -> Box { Box::new(runtime) }), - #[cfg(feature = "wasmtime")] - WasmExecutionMethod::Compiled => - sc_executor_wasmtime::create_runtime( - code, - heap_pages, - host_functions, - allow_missing_func_imports - ).map(|runtime| -> Box { Box::new(runtime) }), - } + match wasm_method { + WasmExecutionMethod::Interpreted => sc_executor_wasmi::create_runtime( + code, + heap_pages, + host_functions, + allow_missing_func_imports, + ) + .map(|runtime| -> Box { Box::new(runtime) }), + #[cfg(feature = "wasmtime")] + WasmExecutionMethod::Compiled => sc_executor_wasmtime::create_runtime( + code, + heap_pages, + host_functions, + allow_missing_func_imports, + ) + .map(|runtime| -> Box { Box::new(runtime) }), + } } fn decode_version(version: &[u8]) -> Result { - let v: RuntimeVersion = sp_api::OldRuntimeVersion::decode(&mut &version[..]) - .map_err(|_| - WasmError::Instantiation( - "failed to decode \"Core_version\" result using old runtime version".into(), - ) - )?.into(); - - let core_api_id = sp_core::hashing::blake2_64(b"Core"); - if v.has_api_with(&core_api_id, |v| v >= 3) { - sp_api::RuntimeVersion::decode(&mut &version[..]) - .map_err(|_| - WasmError::Instantiation("failed to decode \"Core_version\" result".into()) - ) - } else { - Ok(v) - } + let v: RuntimeVersion = sp_api::OldRuntimeVersion::decode(&mut &version[..]) + .map_err(|_| { + WasmError::Instantiation( + "failed to decode \"Core_version\" result using old runtime version".into(), + ) + })? + .into(); + + let core_api_id = sp_core::hashing::blake2_64(b"Core"); + if v.has_api_with(&core_api_id, |v| v >= 3) { + sp_api::RuntimeVersion::decode(&mut &version[..]).map_err(|_| { + WasmError::Instantiation("failed to decode \"Core_version\" result".into()) + }) + } else { + Ok(v) + } } fn create_versioned_wasm_runtime( - code: &[u8], - code_hash: Vec, - ext: &mut dyn Externalities, - wasm_method: WasmExecutionMethod, - heap_pages: u64, - host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, - max_instances: usize, + code: &[u8], + code_hash: Vec, + ext: &mut dyn Externalities, + wasm_method: WasmExecutionMethod, + heap_pages: u64, + host_functions: Vec<&'static dyn Function>, + allow_missing_func_imports: bool, + max_instances: usize, ) -> Result { - #[cfg(not(target_os = "unknown"))] - let time = std::time::Instant::now(); - let mut runtime = create_wasm_runtime_with_code( - wasm_method, - heap_pages, - &code, - host_functions, - allow_missing_func_imports, - )?; - - // Call to determine runtime version. - let version_result = { - // `ext` is already implicitly handled as unwind safe, as we store it in a global variable. - let mut ext = AssertUnwindSafe(ext); - - // The following unwind safety assertion is OK because if the method call panics, the - // runtime will be dropped. - let runtime = AssertUnwindSafe(runtime.as_mut()); - crate::native_executor::with_externalities_safe( - &mut **ext, - move || runtime.new_instance()?.call("Core_version", &[]) - ).map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? - }; - let version = match version_result { - Ok(version) => Some(decode_version(&version)?), - Err(_) => None, - }; - #[cfg(not(target_os = "unknown"))] - log::debug!( - target: "wasm-runtime", - "Prepared new runtime version {:?} in {} ms.", - version, - time.elapsed().as_millis(), - ); - - let mut instances = Vec::with_capacity(max_instances); - instances.resize_with(max_instances, || Mutex::new(None)); - - Ok(VersionedRuntime { - code_hash, - module: runtime, - version, - heap_pages, - wasm_method, - instances, - }) + #[cfg(not(target_os = "unknown"))] + let time = std::time::Instant::now(); + let mut runtime = create_wasm_runtime_with_code( + wasm_method, + heap_pages, + &code, + host_functions, + allow_missing_func_imports, + )?; + + // Call to determine runtime version. + let version_result = { + // `ext` is already implicitly handled as unwind safe, as we store it in a global variable. + let mut ext = AssertUnwindSafe(ext); + + // The following unwind safety assertion is OK because if the method call panics, the + // runtime will be dropped. + let runtime = AssertUnwindSafe(runtime.as_mut()); + crate::native_executor::with_externalities_safe(&mut **ext, move || { + runtime.new_instance()?.call("Core_version", &[]) + }) + .map_err(|_| WasmError::Instantiation("panic in call to get runtime version".into()))? + }; + let version = match version_result { + Ok(version) => Some(decode_version(&version)?), + Err(_) => None, + }; + #[cfg(not(target_os = "unknown"))] + log::debug!( + target: "wasm-runtime", + "Prepared new runtime version {:?} in {} ms.", + version, + time.elapsed().as_millis(), + ); + + let mut instances = Vec::with_capacity(max_instances); + instances.resize_with(max_instances, || Mutex::new(None)); + + Ok(VersionedRuntime { + code_hash, + module: runtime, + version, + heap_pages, + wasm_method, + instances, + }) } #[cfg(test)] mod tests { - use super::*; - use sp_wasm_interface::HostFunctions; - use sp_api::{Core, RuntimeApiInfo}; - use substrate_test_runtime::Block; - use codec::Encode; - - #[test] - fn host_functions_are_equal() { - let host_functions = sp_io::SubstrateHostFunctions::host_functions(); - - let equal = &host_functions[..] == &host_functions[..]; - assert!(equal, "Host functions are not equal"); - } - - #[test] - fn old_runtime_version_decodes() { - let old_runtime_version = sp_api::OldRuntimeVersion { - spec_name: "test".into(), - impl_name: "test".into(), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 1)]), - }; - - let version = decode_version(&old_runtime_version.encode()).unwrap(); - assert_eq!(1, version.transaction_version); - } - - #[test] - fn old_runtime_version_decodes_fails_with_version_3() { - let old_runtime_version = sp_api::OldRuntimeVersion { - spec_name: "test".into(), - impl_name: "test".into(), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), - }; - - decode_version(&old_runtime_version.encode()).unwrap_err(); - } - - #[test] - fn new_runtime_version_decodes() { - let old_runtime_version = sp_api::RuntimeVersion { - spec_name: "test".into(), - impl_name: "test".into(), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), - transaction_version: 3, - }; - - let version = decode_version(&old_runtime_version.encode()).unwrap(); - assert_eq!(3, version.transaction_version); - } + use super::*; + use codec::Encode; + use sp_api::{Core, RuntimeApiInfo}; + use sp_wasm_interface::HostFunctions; + use substrate_test_runtime::Block; + + #[test] + fn host_functions_are_equal() { + let host_functions = sp_io::SubstrateHostFunctions::host_functions(); + + let equal = &host_functions[..] == &host_functions[..]; + assert!(equal, "Host functions are not equal"); + } + + #[test] + fn old_runtime_version_decodes() { + let old_runtime_version = sp_api::OldRuntimeVersion { + spec_name: "test".into(), + impl_name: "test".into(), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: sp_api::create_apis_vec!([(Core::::ID, 1)]), + }; + + let version = decode_version(&old_runtime_version.encode()).unwrap(); + assert_eq!(1, version.transaction_version); + } + + #[test] + fn old_runtime_version_decodes_fails_with_version_3() { + let old_runtime_version = sp_api::OldRuntimeVersion { + spec_name: "test".into(), + impl_name: "test".into(), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), + }; + + decode_version(&old_runtime_version.encode()).unwrap_err(); + } + + #[test] + fn new_runtime_version_decodes() { + let old_runtime_version = sp_api::RuntimeVersion { + spec_name: "test".into(), + impl_name: "test".into(), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: sp_api::create_apis_vec!([(Core::::ID, 3)]), + transaction_version: 3, + }; + + let version = decode_version(&old_runtime_version.encode()).unwrap(); + assert_eq!(3, version.transaction_version); + } } diff --git a/client/executor/wasmi/src/lib.rs b/client/executor/wasmi/src/lib.rs index e4b4aca409..42bf0bbf8d 100644 --- a/client/executor/wasmi/src/lib.rs +++ b/client/executor/wasmi/src/lib.rs @@ -16,518 +16,547 @@ //! This crate provides an implementation of `WasmModule` that is baked by wasmi. -use std::{str, cell::RefCell, sync::Arc}; -use wasmi::{ - Module, ModuleInstance, MemoryInstance, MemoryRef, TableRef, ImportsBuilder, ModuleRef, - memory_units::Pages, - RuntimeValue::{I32, I64, self}, +use codec::{Decode, Encode}; +use log::{debug, error, trace}; +use sc_executor_common::util::{DataSegmentsSnapshot, WasmModuleInfo}; +use sc_executor_common::wasm_runtime::{WasmInstance, WasmModule}; +use sc_executor_common::{ + error::{Error, WasmError}, + sandbox, }; -use codec::{Encode, Decode}; use sp_core::sandbox as sandbox_primitives; -use log::{error, trace, debug}; +use sp_runtime_interface::unpack_ptr_and_len; use sp_wasm_interface::{ - FunctionContext, Pointer, WordSize, Sandbox, MemoryId, Result as WResult, Function, + Function, FunctionContext, MemoryId, Pointer, Result as WResult, Sandbox, WordSize, }; -use sp_runtime_interface::unpack_ptr_and_len; -use sc_executor_common::wasm_runtime::{WasmModule, WasmInstance}; -use sc_executor_common::{ - error::{Error, WasmError}, - sandbox, +use std::{cell::RefCell, str, sync::Arc}; +use wasmi::{ + memory_units::Pages, + ImportsBuilder, MemoryInstance, MemoryRef, Module, ModuleInstance, ModuleRef, + RuntimeValue::{self, I32, I64}, + TableRef, }; -use sc_executor_common::util::{DataSegmentsSnapshot, WasmModuleInfo}; struct FunctionExecutor<'a> { - sandbox_store: sandbox::Store, - heap: sp_allocator::FreeingBumpHeapAllocator, - memory: MemoryRef, - table: Option, - host_functions: &'a [&'static dyn Function], - allow_missing_func_imports: bool, - missing_functions: &'a [String], + sandbox_store: sandbox::Store, + heap: sp_allocator::FreeingBumpHeapAllocator, + memory: MemoryRef, + table: Option, + host_functions: &'a [&'static dyn Function], + allow_missing_func_imports: bool, + missing_functions: &'a [String], } impl<'a> FunctionExecutor<'a> { - fn new( - m: MemoryRef, - heap_base: u32, - t: Option, - host_functions: &'a [&'static dyn Function], - allow_missing_func_imports: bool, - missing_functions: &'a [String], - ) -> Result { - Ok(FunctionExecutor { - sandbox_store: sandbox::Store::new(), - heap: sp_allocator::FreeingBumpHeapAllocator::new(heap_base), - memory: m, - table: t, - host_functions, - allow_missing_func_imports, - missing_functions, - }) - } + fn new( + m: MemoryRef, + heap_base: u32, + t: Option, + host_functions: &'a [&'static dyn Function], + allow_missing_func_imports: bool, + missing_functions: &'a [String], + ) -> Result { + Ok(FunctionExecutor { + sandbox_store: sandbox::Store::new(), + heap: sp_allocator::FreeingBumpHeapAllocator::new(heap_base), + memory: m, + table: t, + host_functions, + allow_missing_func_imports, + missing_functions, + }) + } } impl<'a> sandbox::SandboxCapabilities for FunctionExecutor<'a> { - type SupervisorFuncRef = wasmi::FuncRef; - - fn invoke( - &mut self, - dispatch_thunk: &Self::SupervisorFuncRef, - invoke_args_ptr: Pointer, - invoke_args_len: WordSize, - state: u32, - func_idx: sandbox::SupervisorFuncIndex, - ) -> Result { - let result = wasmi::FuncInstance::invoke( - dispatch_thunk, - &[ - RuntimeValue::I32(u32::from(invoke_args_ptr) as i32), - RuntimeValue::I32(invoke_args_len as i32), - RuntimeValue::I32(state as i32), - RuntimeValue::I32(usize::from(func_idx) as i32), - ], - self, - ); - match result { - Ok(Some(RuntimeValue::I64(val))) => Ok(val), - Ok(_) => return Err("Supervisor function returned unexpected result!".into()), - Err(err) => Err(Error::Trap(err)), - } - } + type SupervisorFuncRef = wasmi::FuncRef; + + fn invoke( + &mut self, + dispatch_thunk: &Self::SupervisorFuncRef, + invoke_args_ptr: Pointer, + invoke_args_len: WordSize, + state: u32, + func_idx: sandbox::SupervisorFuncIndex, + ) -> Result { + let result = wasmi::FuncInstance::invoke( + dispatch_thunk, + &[ + RuntimeValue::I32(u32::from(invoke_args_ptr) as i32), + RuntimeValue::I32(invoke_args_len as i32), + RuntimeValue::I32(state as i32), + RuntimeValue::I32(usize::from(func_idx) as i32), + ], + self, + ); + match result { + Ok(Some(RuntimeValue::I64(val))) => Ok(val), + Ok(_) => return Err("Supervisor function returned unexpected result!".into()), + Err(err) => Err(Error::Trap(err)), + } + } } impl<'a> FunctionContext for FunctionExecutor<'a> { - fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> WResult<()> { - self.memory.get_into(address.into(), dest).map_err(|e| e.to_string()) - } - - fn write_memory(&mut self, address: Pointer, data: &[u8]) -> WResult<()> { - self.memory.set(address.into(), data).map_err(|e| e.to_string()) - } - - fn allocate_memory(&mut self, size: WordSize) -> WResult> { - let heap = &mut self.heap; - self.memory.with_direct_access_mut(|mem| { - heap.allocate(mem, size).map_err(|e| e.to_string()) - }) - } - - fn deallocate_memory(&mut self, ptr: Pointer) -> WResult<()> { - let heap = &mut self.heap; - self.memory.with_direct_access_mut(|mem| { - heap.deallocate(mem, ptr).map_err(|e| e.to_string()) - }) - } - - fn sandbox(&mut self) -> &mut dyn Sandbox { - self - } + fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> WResult<()> { + self.memory + .get_into(address.into(), dest) + .map_err(|e| e.to_string()) + } + + fn write_memory(&mut self, address: Pointer, data: &[u8]) -> WResult<()> { + self.memory + .set(address.into(), data) + .map_err(|e| e.to_string()) + } + + fn allocate_memory(&mut self, size: WordSize) -> WResult> { + let heap = &mut self.heap; + self.memory + .with_direct_access_mut(|mem| heap.allocate(mem, size).map_err(|e| e.to_string())) + } + + fn deallocate_memory(&mut self, ptr: Pointer) -> WResult<()> { + let heap = &mut self.heap; + self.memory + .with_direct_access_mut(|mem| heap.deallocate(mem, ptr).map_err(|e| e.to_string())) + } + + fn sandbox(&mut self) -> &mut dyn Sandbox { + self + } } impl<'a> Sandbox for FunctionExecutor<'a> { - fn memory_get( - &mut self, - memory_id: MemoryId, - offset: WordSize, - buf_ptr: Pointer, - buf_len: WordSize, - ) -> WResult { - let sandboxed_memory = self.sandbox_store.memory(memory_id).map_err(|e| e.to_string())?; - - match MemoryInstance::transfer( - &sandboxed_memory, - offset as usize, - &self.memory, - buf_ptr.into(), - buf_len as usize, - ) { - Ok(()) => Ok(sandbox_primitives::ERR_OK), - Err(_) => Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - } - } - - fn memory_set( - &mut self, - memory_id: MemoryId, - offset: WordSize, - val_ptr: Pointer, - val_len: WordSize, - ) -> WResult { - let sandboxed_memory = self.sandbox_store.memory(memory_id).map_err(|e| e.to_string())?; - - match MemoryInstance::transfer( - &self.memory, - val_ptr.into(), - &sandboxed_memory, - offset as usize, - val_len as usize, - ) { - Ok(()) => Ok(sandbox_primitives::ERR_OK), - Err(_) => Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - } - } - - fn memory_teardown(&mut self, memory_id: MemoryId) -> WResult<()> { - self.sandbox_store.memory_teardown(memory_id).map_err(|e| e.to_string()) - } - - fn memory_new( - &mut self, - initial: u32, - maximum: u32, - ) -> WResult { - self.sandbox_store.new_memory(initial, maximum).map_err(|e| e.to_string()) - } - - fn invoke( - &mut self, - instance_id: u32, - export_name: &str, - args: &[u8], - return_val: Pointer, - return_val_len: WordSize, - state: u32, - ) -> WResult { - trace!(target: "sp-sandbox", "invoke, instance_idx={}", instance_id); - - // Deserialize arguments and convert them into wasmi types. - let args = Vec::::decode(&mut &args[..]) - .map_err(|_| "Can't decode serialized arguments for the invocation")? - .into_iter() - .map(Into::into) - .collect::>(); - - let instance = self.sandbox_store.instance(instance_id).map_err(|e| e.to_string())?; - let result = instance.invoke(export_name, &args, self, state); - - match result { - Ok(None) => Ok(sandbox_primitives::ERR_OK), - Ok(Some(val)) => { - // Serialize return value and write it back into the memory. - sp_wasm_interface::ReturnValue::Value(val.into()).using_encoded(|val| { - if val.len() > return_val_len as usize { - Err("Return value buffer is too small")?; - } - self.write_memory(return_val, val).map_err(|_| "Return value buffer is OOB")?; - Ok(sandbox_primitives::ERR_OK) - }) - } - Err(_) => Ok(sandbox_primitives::ERR_EXECUTION), - } - } - - fn instance_teardown(&mut self, instance_id: u32) -> WResult<()> { - self.sandbox_store.instance_teardown(instance_id).map_err(|e| e.to_string()) - } - - fn instance_new( - &mut self, - dispatch_thunk_id: u32, - wasm: &[u8], - raw_env_def: &[u8], - state: u32, - ) -> WResult { - // Extract a dispatch thunk from instance's table by the specified index. - let dispatch_thunk = { - let table = self.table.as_ref() - .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")?; - table.get(dispatch_thunk_id) - .map_err(|_| "dispatch_thunk_idx is out of the table bounds")? - .ok_or_else(|| "dispatch_thunk_idx points on an empty table entry")? - .clone() - }; - - let guest_env = match sandbox::GuestEnvironment::decode(&self.sandbox_store, raw_env_def) { - Ok(guest_env) => guest_env, - Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), - }; - - let instance_idx_or_err_code = - match sandbox::instantiate(self, dispatch_thunk, wasm, guest_env, state) - .map(|i| i.register(&mut self.sandbox_store)) - { - Ok(instance_idx) => instance_idx, - Err(sandbox::InstantiationError::StartTrapped) => - sandbox_primitives::ERR_EXECUTION, - Err(_) => sandbox_primitives::ERR_MODULE, - }; - - Ok(instance_idx_or_err_code as u32) - } - - fn get_global_val( - &self, - instance_idx: u32, - name: &str, - ) -> WResult> { - self.sandbox_store - .instance(instance_idx) - .map(|i| i.get_global_val(name)) - .map_err(|e| e.to_string()) - } + fn memory_get( + &mut self, + memory_id: MemoryId, + offset: WordSize, + buf_ptr: Pointer, + buf_len: WordSize, + ) -> WResult { + let sandboxed_memory = self + .sandbox_store + .memory(memory_id) + .map_err(|e| e.to_string())?; + + match MemoryInstance::transfer( + &sandboxed_memory, + offset as usize, + &self.memory, + buf_ptr.into(), + buf_len as usize, + ) { + Ok(()) => Ok(sandbox_primitives::ERR_OK), + Err(_) => Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + } + } + + fn memory_set( + &mut self, + memory_id: MemoryId, + offset: WordSize, + val_ptr: Pointer, + val_len: WordSize, + ) -> WResult { + let sandboxed_memory = self + .sandbox_store + .memory(memory_id) + .map_err(|e| e.to_string())?; + + match MemoryInstance::transfer( + &self.memory, + val_ptr.into(), + &sandboxed_memory, + offset as usize, + val_len as usize, + ) { + Ok(()) => Ok(sandbox_primitives::ERR_OK), + Err(_) => Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + } + } + + fn memory_teardown(&mut self, memory_id: MemoryId) -> WResult<()> { + self.sandbox_store + .memory_teardown(memory_id) + .map_err(|e| e.to_string()) + } + + fn memory_new(&mut self, initial: u32, maximum: u32) -> WResult { + self.sandbox_store + .new_memory(initial, maximum) + .map_err(|e| e.to_string()) + } + + fn invoke( + &mut self, + instance_id: u32, + export_name: &str, + args: &[u8], + return_val: Pointer, + return_val_len: WordSize, + state: u32, + ) -> WResult { + trace!(target: "sp-sandbox", "invoke, instance_idx={}", instance_id); + + // Deserialize arguments and convert them into wasmi types. + let args = Vec::::decode(&mut &args[..]) + .map_err(|_| "Can't decode serialized arguments for the invocation")? + .into_iter() + .map(Into::into) + .collect::>(); + + let instance = self + .sandbox_store + .instance(instance_id) + .map_err(|e| e.to_string())?; + let result = instance.invoke(export_name, &args, self, state); + + match result { + Ok(None) => Ok(sandbox_primitives::ERR_OK), + Ok(Some(val)) => { + // Serialize return value and write it back into the memory. + sp_wasm_interface::ReturnValue::Value(val.into()).using_encoded(|val| { + if val.len() > return_val_len as usize { + Err("Return value buffer is too small")?; + } + self.write_memory(return_val, val) + .map_err(|_| "Return value buffer is OOB")?; + Ok(sandbox_primitives::ERR_OK) + }) + } + Err(_) => Ok(sandbox_primitives::ERR_EXECUTION), + } + } + + fn instance_teardown(&mut self, instance_id: u32) -> WResult<()> { + self.sandbox_store + .instance_teardown(instance_id) + .map_err(|e| e.to_string()) + } + + fn instance_new( + &mut self, + dispatch_thunk_id: u32, + wasm: &[u8], + raw_env_def: &[u8], + state: u32, + ) -> WResult { + // Extract a dispatch thunk from instance's table by the specified index. + let dispatch_thunk = { + let table = self + .table + .as_ref() + .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")?; + table + .get(dispatch_thunk_id) + .map_err(|_| "dispatch_thunk_idx is out of the table bounds")? + .ok_or_else(|| "dispatch_thunk_idx points on an empty table entry")? + .clone() + }; + + let guest_env = match sandbox::GuestEnvironment::decode(&self.sandbox_store, raw_env_def) { + Ok(guest_env) => guest_env, + Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), + }; + + let instance_idx_or_err_code = + match sandbox::instantiate(self, dispatch_thunk, wasm, guest_env, state) + .map(|i| i.register(&mut self.sandbox_store)) + { + Ok(instance_idx) => instance_idx, + Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, + Err(_) => sandbox_primitives::ERR_MODULE, + }; + + Ok(instance_idx_or_err_code as u32) + } + + fn get_global_val( + &self, + instance_idx: u32, + name: &str, + ) -> WResult> { + self.sandbox_store + .instance(instance_idx) + .map(|i| i.get_global_val(name)) + .map_err(|e| e.to_string()) + } } /// Will be used on initialization of a module to resolve function and memory imports. struct Resolver<'a> { - /// All the hot functions that we export for the WASM blob. - host_functions: &'a [&'static dyn Function], - /// Should we allow missing function imports? - /// - /// If `true`, we return a stub that will return an error when being called. - allow_missing_func_imports: bool, - /// All the names of functions for that we did not provide a host function. - missing_functions: RefCell>, - /// Will be used as initial and maximum size of the imported memory. - heap_pages: usize, - /// By default, runtimes should import memory and this is `Some(_)` after - /// resolving. However, to be backwards compatible, we also support memory - /// exported by the WASM blob (this will be `None` after resolving). - import_memory: RefCell>, + /// All the hot functions that we export for the WASM blob. + host_functions: &'a [&'static dyn Function], + /// Should we allow missing function imports? + /// + /// If `true`, we return a stub that will return an error when being called. + allow_missing_func_imports: bool, + /// All the names of functions for that we did not provide a host function. + missing_functions: RefCell>, + /// Will be used as initial and maximum size of the imported memory. + heap_pages: usize, + /// By default, runtimes should import memory and this is `Some(_)` after + /// resolving. However, to be backwards compatible, we also support memory + /// exported by the WASM blob (this will be `None` after resolving). + import_memory: RefCell>, } impl<'a> Resolver<'a> { - fn new( - host_functions: &'a[&'static dyn Function], - allow_missing_func_imports: bool, - heap_pages: usize, - ) -> Resolver<'a> { - Resolver { - host_functions, - allow_missing_func_imports, - missing_functions: RefCell::new(Vec::new()), - heap_pages, - import_memory: Default::default(), - } - } + fn new( + host_functions: &'a [&'static dyn Function], + allow_missing_func_imports: bool, + heap_pages: usize, + ) -> Resolver<'a> { + Resolver { + host_functions, + allow_missing_func_imports, + missing_functions: RefCell::new(Vec::new()), + heap_pages, + import_memory: Default::default(), + } + } } impl<'a> wasmi::ModuleImportResolver for Resolver<'a> { - fn resolve_func(&self, name: &str, signature: &wasmi::Signature) - -> std::result::Result - { - let signature = sp_wasm_interface::Signature::from(signature); - for (function_index, function) in self.host_functions.iter().enumerate() { - if name == function.name() { - if signature == function.signature() { - return Ok( - wasmi::FuncInstance::alloc_host(signature.into(), function_index), - ) - } else { - return Err(wasmi::Error::Instantiation( - format!( - "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", - function.name(), - signature, - function.signature(), - ), - )) - } - } - } - - if self.allow_missing_func_imports { - trace!(target: "wasm-executor", "Could not find function `{}`, a stub will be provided instead.", name); - let id = self.missing_functions.borrow().len() + self.host_functions.len(); - self.missing_functions.borrow_mut().push(name.to_string()); - - Ok(wasmi::FuncInstance::alloc_host(signature.into(), id)) - } else { - Err(wasmi::Error::Instantiation( - format!("Export {} not found", name), - )) - } - } - - fn resolve_memory( - &self, - field_name: &str, - memory_type: &wasmi::MemoryDescriptor, - ) -> Result { - if field_name == "memory" { - match &mut *self.import_memory.borrow_mut() { - Some(_) => Err(wasmi::Error::Instantiation( - "Memory can not be imported twice!".into(), - )), - memory_ref @ None => { - if memory_type - .maximum() - .map(|m| m.saturating_sub(memory_type.initial())) - .map(|m| self.heap_pages > m as usize) - .unwrap_or(false) - { - Err(wasmi::Error::Instantiation(format!( - "Heap pages ({}) is greater than imported memory maximum ({}).", - self.heap_pages, - memory_type - .maximum() - .map(|m| m.saturating_sub(memory_type.initial())) - .expect("Maximum is set, checked above; qed"), - ))) - } else { - let memory = MemoryInstance::alloc( - Pages(memory_type.initial() as usize + self.heap_pages), - Some(Pages(memory_type.initial() as usize + self.heap_pages)), - )?; - *memory_ref = Some(memory.clone()); - Ok(memory) - } - } - } - } else { - Err(wasmi::Error::Instantiation( - format!("Unknown memory reference with name: {}", field_name), - )) - } - } + fn resolve_func( + &self, + name: &str, + signature: &wasmi::Signature, + ) -> std::result::Result { + let signature = sp_wasm_interface::Signature::from(signature); + for (function_index, function) in self.host_functions.iter().enumerate() { + if name == function.name() { + if signature == function.signature() { + return Ok(wasmi::FuncInstance::alloc_host( + signature.into(), + function_index, + )); + } else { + return Err(wasmi::Error::Instantiation(format!( + "Invalid signature for function `{}` expected `{:?}`, got `{:?}`", + function.name(), + signature, + function.signature(), + ))); + } + } + } + + if self.allow_missing_func_imports { + trace!(target: "wasm-executor", "Could not find function `{}`, a stub will be provided instead.", name); + let id = self.missing_functions.borrow().len() + self.host_functions.len(); + self.missing_functions.borrow_mut().push(name.to_string()); + + Ok(wasmi::FuncInstance::alloc_host(signature.into(), id)) + } else { + Err(wasmi::Error::Instantiation(format!( + "Export {} not found", + name + ))) + } + } + + fn resolve_memory( + &self, + field_name: &str, + memory_type: &wasmi::MemoryDescriptor, + ) -> Result { + if field_name == "memory" { + match &mut *self.import_memory.borrow_mut() { + Some(_) => Err(wasmi::Error::Instantiation( + "Memory can not be imported twice!".into(), + )), + memory_ref @ None => { + if memory_type + .maximum() + .map(|m| m.saturating_sub(memory_type.initial())) + .map(|m| self.heap_pages > m as usize) + .unwrap_or(false) + { + Err(wasmi::Error::Instantiation(format!( + "Heap pages ({}) is greater than imported memory maximum ({}).", + self.heap_pages, + memory_type + .maximum() + .map(|m| m.saturating_sub(memory_type.initial())) + .expect("Maximum is set, checked above; qed"), + ))) + } else { + let memory = MemoryInstance::alloc( + Pages(memory_type.initial() as usize + self.heap_pages), + Some(Pages(memory_type.initial() as usize + self.heap_pages)), + )?; + *memory_ref = Some(memory.clone()); + Ok(memory) + } + } + } + } else { + Err(wasmi::Error::Instantiation(format!( + "Unknown memory reference with name: {}", + field_name + ))) + } + } } impl<'a> wasmi::Externals for FunctionExecutor<'a> { - fn invoke_index(&mut self, index: usize, args: wasmi::RuntimeArgs) - -> Result, wasmi::Trap> - { - let mut args = args.as_ref().iter().copied().map(Into::into); - - if let Some(function) = self.host_functions.get(index) { - function.execute(self, &mut args) - .map_err(|msg| Error::FunctionExecution(function.name().to_string(), msg)) - .map_err(wasmi::Trap::from) - .map(|v| v.map(Into::into)) - } else if self.allow_missing_func_imports - && index >= self.host_functions.len() - && index < self.host_functions.len() + self.missing_functions.len() - { - Err(Error::from(format!( - "Function `{}` is only a stub. Calling a stub is not allowed.", - self.missing_functions[index - self.host_functions.len()], - )).into()) - } else { - Err(Error::from(format!("Could not find host function with index: {}", index)).into()) - } - } + fn invoke_index( + &mut self, + index: usize, + args: wasmi::RuntimeArgs, + ) -> Result, wasmi::Trap> { + let mut args = args.as_ref().iter().copied().map(Into::into); + + if let Some(function) = self.host_functions.get(index) { + function + .execute(self, &mut args) + .map_err(|msg| Error::FunctionExecution(function.name().to_string(), msg)) + .map_err(wasmi::Trap::from) + .map(|v| v.map(Into::into)) + } else if self.allow_missing_func_imports + && index >= self.host_functions.len() + && index < self.host_functions.len() + self.missing_functions.len() + { + Err(Error::from(format!( + "Function `{}` is only a stub. Calling a stub is not allowed.", + self.missing_functions[index - self.host_functions.len()], + )) + .into()) + } else { + Err(Error::from(format!( + "Could not find host function with index: {}", + index + )) + .into()) + } + } } fn get_mem_instance(module: &ModuleRef) -> Result { - Ok(module - .export_by_name("memory") - .ok_or_else(|| Error::InvalidMemoryReference)? - .as_memory() - .ok_or_else(|| Error::InvalidMemoryReference)? - .clone()) + Ok(module + .export_by_name("memory") + .ok_or_else(|| Error::InvalidMemoryReference)? + .as_memory() + .ok_or_else(|| Error::InvalidMemoryReference)? + .clone()) } /// Find the global named `__heap_base` in the given wasm module instance and /// tries to get its value. fn get_heap_base(module: &ModuleRef) -> Result { - let heap_base_val = module - .export_by_name("__heap_base") - .ok_or_else(|| Error::HeapBaseNotFoundOrInvalid)? - .as_global() - .ok_or_else(|| Error::HeapBaseNotFoundOrInvalid)? - .get(); - - match heap_base_val { - wasmi::RuntimeValue::I32(v) => Ok(v as u32), - _ => Err(Error::HeapBaseNotFoundOrInvalid), - } + let heap_base_val = module + .export_by_name("__heap_base") + .ok_or_else(|| Error::HeapBaseNotFoundOrInvalid)? + .as_global() + .ok_or_else(|| Error::HeapBaseNotFoundOrInvalid)? + .get(); + + match heap_base_val { + wasmi::RuntimeValue::I32(v) => Ok(v as u32), + _ => Err(Error::HeapBaseNotFoundOrInvalid), + } } /// Call a given method in the given wasm-module runtime. fn call_in_wasm_module( - module_instance: &ModuleRef, - memory: &MemoryRef, - method: &str, - data: &[u8], - host_functions: &[&'static dyn Function], - allow_missing_func_imports: bool, - missing_functions: &Vec, + module_instance: &ModuleRef, + memory: &MemoryRef, + method: &str, + data: &[u8], + host_functions: &[&'static dyn Function], + allow_missing_func_imports: bool, + missing_functions: &Vec, ) -> Result, Error> { - // Initialize FunctionExecutor. - let table: Option = module_instance - .export_by_name("__indirect_function_table") - .and_then(|e| e.as_table().cloned()); - let heap_base = get_heap_base(module_instance)?; - - let mut fec = FunctionExecutor::new( - memory.clone(), - heap_base, - table, - host_functions, - allow_missing_func_imports, - missing_functions, - )?; - - // Write the call data - let offset = fec.allocate_memory(data.len() as u32)?; - fec.write_memory(offset, data)?; - - let result = module_instance.invoke_export( - method, - &[I32(u32::from(offset) as i32), I32(data.len() as i32)], - &mut fec, - ); - - match result { - Ok(Some(I64(r))) => { - let (ptr, length) = unpack_ptr_and_len(r as u64); - memory.get(ptr.into(), length as usize).map_err(|_| Error::Runtime) - }, - Err(e) => { - trace!( - target: "wasm-executor", - "Failed to execute code with {} pages", - memory.current_size().0 - ); - Err(e.into()) - }, - _ => Err(Error::InvalidReturn), - } + // Initialize FunctionExecutor. + let table: Option = module_instance + .export_by_name("__indirect_function_table") + .and_then(|e| e.as_table().cloned()); + let heap_base = get_heap_base(module_instance)?; + + let mut fec = FunctionExecutor::new( + memory.clone(), + heap_base, + table, + host_functions, + allow_missing_func_imports, + missing_functions, + )?; + + // Write the call data + let offset = fec.allocate_memory(data.len() as u32)?; + fec.write_memory(offset, data)?; + + let result = module_instance.invoke_export( + method, + &[I32(u32::from(offset) as i32), I32(data.len() as i32)], + &mut fec, + ); + + match result { + Ok(Some(I64(r))) => { + let (ptr, length) = unpack_ptr_and_len(r as u64); + memory + .get(ptr.into(), length as usize) + .map_err(|_| Error::Runtime) + } + Err(e) => { + trace!( + target: "wasm-executor", + "Failed to execute code with {} pages", + memory.current_size().0 + ); + Err(e.into()) + } + _ => Err(Error::InvalidReturn), + } } /// Prepare module instance fn instantiate_module( - heap_pages: usize, - module: &Module, - host_functions: &[&'static dyn Function], - allow_missing_func_imports: bool, + heap_pages: usize, + module: &Module, + host_functions: &[&'static dyn Function], + allow_missing_func_imports: bool, ) -> Result<(ModuleRef, Vec, MemoryRef), Error> { - let resolver = Resolver::new(host_functions, allow_missing_func_imports, heap_pages); - // start module instantiation. Don't run 'start' function yet. - let intermediate_instance = ModuleInstance::new( - module, - &ImportsBuilder::new().with_resolver("env", &resolver), - )?; - - // Verify that the module has the heap base global variable. - let _ = get_heap_base(intermediate_instance.not_started_instance())?; - - - // Get the memory reference. Runtimes should import memory, but to be backwards - // compatible we also support exported memory. - let memory = match resolver.import_memory.into_inner() { - Some(memory) => memory, - None => { - debug!( - target: "wasm-executor", - "WASM blob does not imports memory, falling back to exported memory", - ); - - let memory = get_mem_instance(intermediate_instance.not_started_instance())?; - memory.grow(Pages(heap_pages)).map_err(|_| Error::Runtime)?; - - memory - } - }; - - if intermediate_instance.has_start() { - // Runtime is not allowed to have the `start` function. - Err(Error::RuntimeHasStartFn) - } else { - Ok(( - intermediate_instance.assert_no_start(), - resolver.missing_functions.into_inner(), - memory, - )) - } + let resolver = Resolver::new(host_functions, allow_missing_func_imports, heap_pages); + // start module instantiation. Don't run 'start' function yet. + let intermediate_instance = ModuleInstance::new( + module, + &ImportsBuilder::new().with_resolver("env", &resolver), + )?; + + // Verify that the module has the heap base global variable. + let _ = get_heap_base(intermediate_instance.not_started_instance())?; + + // Get the memory reference. Runtimes should import memory, but to be backwards + // compatible we also support exported memory. + let memory = match resolver.import_memory.into_inner() { + Some(memory) => memory, + None => { + debug!( + target: "wasm-executor", + "WASM blob does not imports memory, falling back to exported memory", + ); + + let memory = get_mem_instance(intermediate_instance.not_started_instance())?; + memory.grow(Pages(heap_pages)).map_err(|_| Error::Runtime)?; + + memory + } + }; + + if intermediate_instance.has_start() { + // Runtime is not allowed to have the `start` function. + Err(Error::RuntimeHasStartFn) + } else { + Ok(( + intermediate_instance.assert_no_start(), + resolver.missing_functions.into_inner(), + memory, + )) + } } /// A state snapshot of an instance taken just after instantiation. @@ -535,190 +564,191 @@ fn instantiate_module( /// It is used for restoring the state of the module after execution. #[derive(Clone)] struct GlobalValsSnapshot { - /// The list of all global mutable variables of the module in their sequential order. - global_mut_values: Vec, + /// The list of all global mutable variables of the module in their sequential order. + global_mut_values: Vec, } impl GlobalValsSnapshot { - // Returns `None` if instance is not valid. - fn take(module_instance: &ModuleRef) -> Self { - // Collect all values of mutable globals. - let global_mut_values = module_instance - .globals() - .iter() - .filter(|g| g.is_mutable()) - .map(|g| g.get()) - .collect(); - Self { global_mut_values } - } - - /// Reset the runtime instance to the initial version by restoring - /// the preserved memory and globals. - /// - /// Returns `Err` if applying the snapshot is failed. - fn apply(&self, instance: &ModuleRef) -> Result<(), WasmError> { - for (global_ref, global_val) in instance - .globals() - .iter() - .filter(|g| g.is_mutable()) - .zip(self.global_mut_values.iter()) - { - // the instance should be the same as used for preserving and - // we iterate the same way it as we do it for preserving values that means that the - // types should be the same and all the values are mutable. So no error is expected/ - global_ref - .set(*global_val) - .map_err(|_| WasmError::ApplySnapshotFailed)?; - } - Ok(()) - } + // Returns `None` if instance is not valid. + fn take(module_instance: &ModuleRef) -> Self { + // Collect all values of mutable globals. + let global_mut_values = module_instance + .globals() + .iter() + .filter(|g| g.is_mutable()) + .map(|g| g.get()) + .collect(); + Self { global_mut_values } + } + + /// Reset the runtime instance to the initial version by restoring + /// the preserved memory and globals. + /// + /// Returns `Err` if applying the snapshot is failed. + fn apply(&self, instance: &ModuleRef) -> Result<(), WasmError> { + for (global_ref, global_val) in instance + .globals() + .iter() + .filter(|g| g.is_mutable()) + .zip(self.global_mut_values.iter()) + { + // the instance should be the same as used for preserving and + // we iterate the same way it as we do it for preserving values that means that the + // types should be the same and all the values are mutable. So no error is expected/ + global_ref + .set(*global_val) + .map_err(|_| WasmError::ApplySnapshotFailed)?; + } + Ok(()) + } } /// A runtime along with initial copy of data segments. pub struct WasmiRuntime { - /// A wasm module. - module: Module, - /// The host functions registered for this instance. - host_functions: Arc>, - /// Enable stub generation for functions that are not available in `host_functions`. - /// These stubs will error when the wasm blob tries to call them. - allow_missing_func_imports: bool, - /// Numer of heap pages this runtime uses. - heap_pages: u64, - - global_vals_snapshot: GlobalValsSnapshot, - data_segments_snapshot: DataSegmentsSnapshot, + /// A wasm module. + module: Module, + /// The host functions registered for this instance. + host_functions: Arc>, + /// Enable stub generation for functions that are not available in `host_functions`. + /// These stubs will error when the wasm blob tries to call them. + allow_missing_func_imports: bool, + /// Numer of heap pages this runtime uses. + heap_pages: u64, + + global_vals_snapshot: GlobalValsSnapshot, + data_segments_snapshot: DataSegmentsSnapshot, } impl WasmModule for WasmiRuntime { - fn new_instance(&self) -> Result, Error> { - // Instantiate this module. - let (instance, missing_functions, memory) = instantiate_module( - self.heap_pages as usize, - &self.module, - &self.host_functions, - self.allow_missing_func_imports, - ).map_err(|e| WasmError::Instantiation(e.to_string()))?; - - Ok(Box::new(WasmiInstance { - instance, - memory, - global_vals_snapshot: self.global_vals_snapshot.clone(), - data_segments_snapshot: self.data_segments_snapshot.clone(), - host_functions: self.host_functions.clone(), - allow_missing_func_imports: self.allow_missing_func_imports, - missing_functions, - })) - } + fn new_instance(&self) -> Result, Error> { + // Instantiate this module. + let (instance, missing_functions, memory) = instantiate_module( + self.heap_pages as usize, + &self.module, + &self.host_functions, + self.allow_missing_func_imports, + ) + .map_err(|e| WasmError::Instantiation(e.to_string()))?; + + Ok(Box::new(WasmiInstance { + instance, + memory, + global_vals_snapshot: self.global_vals_snapshot.clone(), + data_segments_snapshot: self.data_segments_snapshot.clone(), + host_functions: self.host_functions.clone(), + allow_missing_func_imports: self.allow_missing_func_imports, + missing_functions, + })) + } } /// Create a new `WasmiRuntime` given the code. This function loads the module and /// stores it in the instance. pub fn create_runtime( - code: &[u8], - heap_pages: u64, - host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, + code: &[u8], + heap_pages: u64, + host_functions: Vec<&'static dyn Function>, + allow_missing_func_imports: bool, ) -> Result { - let module = Module::from_buffer(&code).map_err(|_| WasmError::InvalidModule)?; - - // Extract the data segments from the wasm code. - // - // A return of this error actually indicates that there is a problem in logic, since - // we just loaded and validated the `module` above. - let (data_segments_snapshot, global_vals_snapshot) = { - let (instance, _, _) = instantiate_module( - heap_pages as usize, - &module, - &host_functions, - allow_missing_func_imports, - ) - .map_err(|e| WasmError::Instantiation(e.to_string()))?; - - let data_segments_snapshot = DataSegmentsSnapshot::take( - &WasmModuleInfo::new(code) - .ok_or_else(|| WasmError::Other("cannot deserialize module".to_string()))?, - ) - .map_err(|e| WasmError::Other(e.to_string()))?; - let global_vals_snapshot = GlobalValsSnapshot::take(&instance); - - (data_segments_snapshot, global_vals_snapshot) - }; - - Ok(WasmiRuntime { - module, - data_segments_snapshot, - global_vals_snapshot, - host_functions: Arc::new(host_functions), - allow_missing_func_imports, - heap_pages, - }) + let module = Module::from_buffer(&code).map_err(|_| WasmError::InvalidModule)?; + + // Extract the data segments from the wasm code. + // + // A return of this error actually indicates that there is a problem in logic, since + // we just loaded and validated the `module` above. + let (data_segments_snapshot, global_vals_snapshot) = { + let (instance, _, _) = instantiate_module( + heap_pages as usize, + &module, + &host_functions, + allow_missing_func_imports, + ) + .map_err(|e| WasmError::Instantiation(e.to_string()))?; + + let data_segments_snapshot = DataSegmentsSnapshot::take( + &WasmModuleInfo::new(code) + .ok_or_else(|| WasmError::Other("cannot deserialize module".to_string()))?, + ) + .map_err(|e| WasmError::Other(e.to_string()))?; + let global_vals_snapshot = GlobalValsSnapshot::take(&instance); + + (data_segments_snapshot, global_vals_snapshot) + }; + + Ok(WasmiRuntime { + module, + data_segments_snapshot, + global_vals_snapshot, + host_functions: Arc::new(host_functions), + allow_missing_func_imports, + heap_pages, + }) } /// Wasmi instance wrapper along with the state snapshot. pub struct WasmiInstance { - /// A wasm module instance. - instance: ModuleRef, - /// The memory instance of used by the wasm module. - memory: MemoryRef, - /// The snapshot of global variable values just after instantiation. - global_vals_snapshot: GlobalValsSnapshot, - /// The snapshot of data segments. - data_segments_snapshot: DataSegmentsSnapshot, - /// The host functions registered for this instance. - host_functions: Arc>, - /// Enable stub generation for functions that are not available in `host_functions`. - /// These stubs will error when the wasm blob trie to call them. - allow_missing_func_imports: bool, - /// List of missing functions detected during function resolution - missing_functions: Vec, + /// A wasm module instance. + instance: ModuleRef, + /// The memory instance of used by the wasm module. + memory: MemoryRef, + /// The snapshot of global variable values just after instantiation. + global_vals_snapshot: GlobalValsSnapshot, + /// The snapshot of data segments. + data_segments_snapshot: DataSegmentsSnapshot, + /// The host functions registered for this instance. + host_functions: Arc>, + /// Enable stub generation for functions that are not available in `host_functions`. + /// These stubs will error when the wasm blob trie to call them. + allow_missing_func_imports: bool, + /// List of missing functions detected during function resolution + missing_functions: Vec, } // This is safe because `WasmiInstance` does not leak any references to `self.memory` and `self.instance` unsafe impl Send for WasmiInstance {} impl WasmInstance for WasmiInstance { - fn call(&self, method: &str, data: &[u8]) -> Result, Error> { - // We reuse a single wasm instance for multiple calls and a previous call (if any) - // altered the state. Therefore, we need to restore the instance to original state. - - // First, zero initialize the linear memory. - self.memory.erase().map_err(|e| { - // Snapshot restoration failed. This is pretty unexpected since this can happen - // if some invariant is broken or if the system is under extreme memory pressure - // (so erasing fails). - error!(target: "wasm-executor", "snapshot restoration failed: {}", e); - WasmError::ErasingFailed(e.to_string()) - })?; - - // Second, reapply data segments into the linear memory. - self.data_segments_snapshot - .apply(|offset, contents| self.memory.set(offset, contents))?; - - // Third, restore the global variables to their initial values. - self.global_vals_snapshot.apply(&self.instance)?; - - call_in_wasm_module( - &self.instance, - &self.memory, - method, - data, - self.host_functions.as_ref(), - self.allow_missing_func_imports, - self.missing_functions.as_ref(), - ) - } - - fn get_global_const(&self, name: &str) -> Result, Error> { - match self.instance.export_by_name(name) { - Some(global) => Ok(Some( - global - .as_global() - .ok_or_else(|| format!("`{}` is not a global", name))? - .get() - .into() - )), - None => Ok(None), - } - } + fn call(&self, method: &str, data: &[u8]) -> Result, Error> { + // We reuse a single wasm instance for multiple calls and a previous call (if any) + // altered the state. Therefore, we need to restore the instance to original state. + + // First, zero initialize the linear memory. + self.memory.erase().map_err(|e| { + // Snapshot restoration failed. This is pretty unexpected since this can happen + // if some invariant is broken or if the system is under extreme memory pressure + // (so erasing fails). + error!(target: "wasm-executor", "snapshot restoration failed: {}", e); + WasmError::ErasingFailed(e.to_string()) + })?; + + // Second, reapply data segments into the linear memory. + self.data_segments_snapshot + .apply(|offset, contents| self.memory.set(offset, contents))?; + + // Third, restore the global variables to their initial values. + self.global_vals_snapshot.apply(&self.instance)?; + + call_in_wasm_module( + &self.instance, + &self.memory, + method, + data, + self.host_functions.as_ref(), + self.allow_missing_func_imports, + self.missing_functions.as_ref(), + ) + } + + fn get_global_const(&self, name: &str) -> Result, Error> { + match self.instance.export_by_name(name) { + Some(global) => Ok(Some( + global + .as_global() + .ok_or_else(|| format!("`{}` is not a global", name))? + .get() + .into(), + )), + None => Ok(None), + } + } } diff --git a/client/executor/wasmtime/src/host.rs b/client/executor/wasmtime/src/host.rs index 29187ac663..e658a4ce1f 100644 --- a/client/executor/wasmtime/src/host.rs +++ b/client/executor/wasmtime/src/host.rs @@ -19,14 +19,14 @@ use crate::instance_wrapper::InstanceWrapper; use crate::util; -use std::{cell::RefCell, rc::Rc}; +use codec::{Decode, Encode}; use log::trace; -use codec::{Encode, Decode}; -use sp_allocator::FreeingBumpHeapAllocator; use sc_executor_common::error::Result; use sc_executor_common::sandbox::{self, SandboxCapabilities, SupervisorFuncIndex}; +use sp_allocator::FreeingBumpHeapAllocator; use sp_core::sandbox as sandbox_primitives; use sp_wasm_interface::{FunctionContext, MemoryId, Pointer, Sandbox, WordSize}; +use std::{cell::RefCell, rc::Rc}; use wasmtime::{Func, Val}; /// Wrapper type for pointer to a Wasm table entry. @@ -40,34 +40,34 @@ pub struct SupervisorFuncRef(Func); /// call, whereas the state is maintained for the duration of a Wasm runtime call, which may make /// many different host calls that must share state. pub struct HostState { - // We need some interior mutability here since the host state is shared between all host - // function handlers and the wasmtime backend's `impl WasmRuntime`. - // - // Furthermore, because of recursive calls (e.g. runtime can create and call an sandboxed - // instance which in turn can call the runtime back) we have to be very careful with borrowing - // those. - // - // Basically, most of the interactions should do temporary borrow immediately releasing the - // borrow after performing necessary queries/changes. - sandbox_store: RefCell>, - allocator: RefCell, - instance: Rc, + // We need some interior mutability here since the host state is shared between all host + // function handlers and the wasmtime backend's `impl WasmRuntime`. + // + // Furthermore, because of recursive calls (e.g. runtime can create and call an sandboxed + // instance which in turn can call the runtime back) we have to be very careful with borrowing + // those. + // + // Basically, most of the interactions should do temporary borrow immediately releasing the + // borrow after performing necessary queries/changes. + sandbox_store: RefCell>, + allocator: RefCell, + instance: Rc, } impl HostState { - /// Constructs a new `HostState`. - pub fn new(allocator: FreeingBumpHeapAllocator, instance: Rc) -> Self { - HostState { - sandbox_store: RefCell::new(sandbox::Store::new()), - allocator: RefCell::new(allocator), - instance, - } - } - - /// Materialize `HostContext` that can be used to invoke a substrate host `dyn Function`. - pub fn materialize<'a>(&'a self) -> HostContext<'a> { - HostContext(self) - } + /// Constructs a new `HostState`. + pub fn new(allocator: FreeingBumpHeapAllocator, instance: Rc) -> Self { + HostState { + sandbox_store: RefCell::new(sandbox::Store::new()), + allocator: RefCell::new(allocator), + instance, + } + } + + /// Materialize `HostContext` that can be used to invoke a substrate host `dyn Function`. + pub fn materialize<'a>(&'a self) -> HostContext<'a> { + HostContext(self) + } } /// A `HostContext` implements `FunctionContext` for making host calls from a Wasmtime @@ -76,269 +76,269 @@ impl HostState { pub struct HostContext<'a>(&'a HostState); impl<'a> std::ops::Deref for HostContext<'a> { - type Target = HostState; - fn deref(&self) -> &HostState { - self.0 - } + type Target = HostState; + fn deref(&self) -> &HostState { + self.0 + } } impl<'a> SandboxCapabilities for HostContext<'a> { - type SupervisorFuncRef = SupervisorFuncRef; - - fn invoke( - &mut self, - dispatch_thunk: &Self::SupervisorFuncRef, - invoke_args_ptr: Pointer, - invoke_args_len: WordSize, - state: u32, - func_idx: SupervisorFuncIndex, - ) -> Result { - let result = dispatch_thunk.0.call(&[ - Val::I32(u32::from(invoke_args_ptr) as i32), - Val::I32(invoke_args_len as i32), - Val::I32(state as i32), - Val::I32(usize::from(func_idx) as i32), - ]); - match result { - Ok(ret_vals) => { - let ret_val = if ret_vals.len() != 1 { - return Err(format!( - "Supervisor function returned {} results, expected 1", - ret_vals.len() - ) - .into()); - } else { - &ret_vals[0] - }; - - if let Some(ret_val) = ret_val.i64() { - Ok(ret_val) - } else { - return Err("Supervisor function returned unexpected result!".into()); - } - } - Err(err) => Err(err.message().to_string().into()), - } - } + type SupervisorFuncRef = SupervisorFuncRef; + + fn invoke( + &mut self, + dispatch_thunk: &Self::SupervisorFuncRef, + invoke_args_ptr: Pointer, + invoke_args_len: WordSize, + state: u32, + func_idx: SupervisorFuncIndex, + ) -> Result { + let result = dispatch_thunk.0.call(&[ + Val::I32(u32::from(invoke_args_ptr) as i32), + Val::I32(invoke_args_len as i32), + Val::I32(state as i32), + Val::I32(usize::from(func_idx) as i32), + ]); + match result { + Ok(ret_vals) => { + let ret_val = if ret_vals.len() != 1 { + return Err(format!( + "Supervisor function returned {} results, expected 1", + ret_vals.len() + ) + .into()); + } else { + &ret_vals[0] + }; + + if let Some(ret_val) = ret_val.i64() { + Ok(ret_val) + } else { + return Err("Supervisor function returned unexpected result!".into()); + } + } + Err(err) => Err(err.message().to_string().into()), + } + } } impl<'a> sp_wasm_interface::FunctionContext for HostContext<'a> { - fn read_memory_into( - &self, - address: Pointer, - dest: &mut [u8], - ) -> sp_wasm_interface::Result<()> { - self.instance - .read_memory_into(address, dest) - .map_err(|e| e.to_string()) - } - - fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { - self.instance - .write_memory_from(address, data) - .map_err(|e| e.to_string()) - } - - fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { - self.instance - .allocate(&mut *self.allocator.borrow_mut(), size) - .map_err(|e| e.to_string()) - } - - fn deallocate_memory(&mut self, ptr: Pointer) -> sp_wasm_interface::Result<()> { - self.instance - .deallocate(&mut *self.allocator.borrow_mut(), ptr) - .map_err(|e| e.to_string()) - } - - fn sandbox(&mut self) -> &mut dyn Sandbox { - self - } + fn read_memory_into( + &self, + address: Pointer, + dest: &mut [u8], + ) -> sp_wasm_interface::Result<()> { + self.instance + .read_memory_into(address, dest) + .map_err(|e| e.to_string()) + } + + fn write_memory(&mut self, address: Pointer, data: &[u8]) -> sp_wasm_interface::Result<()> { + self.instance + .write_memory_from(address, data) + .map_err(|e| e.to_string()) + } + + fn allocate_memory(&mut self, size: WordSize) -> sp_wasm_interface::Result> { + self.instance + .allocate(&mut *self.allocator.borrow_mut(), size) + .map_err(|e| e.to_string()) + } + + fn deallocate_memory(&mut self, ptr: Pointer) -> sp_wasm_interface::Result<()> { + self.instance + .deallocate(&mut *self.allocator.borrow_mut(), ptr) + .map_err(|e| e.to_string()) + } + + fn sandbox(&mut self) -> &mut dyn Sandbox { + self + } } impl<'a> Sandbox for HostContext<'a> { - fn memory_get( - &mut self, - memory_id: MemoryId, - offset: WordSize, - buf_ptr: Pointer, - buf_len: WordSize, - ) -> sp_wasm_interface::Result { - let sandboxed_memory = self - .sandbox_store - .borrow() - .memory(memory_id) - .map_err(|e| e.to_string())?; - sandboxed_memory.with_direct_access(|sandboxed_memory| { - let len = buf_len as usize; - let src_range = match util::checked_range(offset as usize, len, sandboxed_memory.len()) - { - Some(range) => range, - None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - }; - let supervisor_mem_size = self.instance.memory_size() as usize; - let dst_range = match util::checked_range(buf_ptr.into(), len, supervisor_mem_size) { - Some(range) => range, - None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - }; - self.instance - .write_memory_from( - Pointer::new(dst_range.start as u32), - &sandboxed_memory[src_range], - ) - .expect("ranges are checked above; write can't fail; qed"); - Ok(sandbox_primitives::ERR_OK) - }) - } - - fn memory_set( - &mut self, - memory_id: MemoryId, - offset: WordSize, - val_ptr: Pointer, - val_len: WordSize, - ) -> sp_wasm_interface::Result { - let sandboxed_memory = self - .sandbox_store - .borrow() - .memory(memory_id) - .map_err(|e| e.to_string())?; - sandboxed_memory.with_direct_access_mut(|sandboxed_memory| { - let len = val_len as usize; - let supervisor_mem_size = self.instance.memory_size() as usize; - let src_range = match util::checked_range(val_ptr.into(), len, supervisor_mem_size) { - Some(range) => range, - None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - }; - let dst_range = match util::checked_range(offset as usize, len, sandboxed_memory.len()) - { - Some(range) => range, - None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), - }; - self.instance - .read_memory_into( - Pointer::new(src_range.start as u32), - &mut sandboxed_memory[dst_range], - ) - .expect("ranges are checked above; read can't fail; qed"); - Ok(sandbox_primitives::ERR_OK) - }) - } - - fn memory_teardown(&mut self, memory_id: MemoryId) -> sp_wasm_interface::Result<()> { - self.sandbox_store - .borrow_mut() - .memory_teardown(memory_id) - .map_err(|e| e.to_string()) - } - - fn memory_new(&mut self, initial: u32, maximum: MemoryId) -> sp_wasm_interface::Result { - self.sandbox_store - .borrow_mut() - .new_memory(initial, maximum) - .map_err(|e| e.to_string()) - } - - fn invoke( - &mut self, - instance_id: u32, - export_name: &str, - args: &[u8], - return_val: Pointer, - return_val_len: u32, - state: u32, - ) -> sp_wasm_interface::Result { - trace!(target: "sp-sandbox", "invoke, instance_idx={}", instance_id); - - // Deserialize arguments and convert them into wasmi types. - let args = Vec::::decode(&mut &args[..]) - .map_err(|_| "Can't decode serialized arguments for the invocation")? - .into_iter() - .map(Into::into) - .collect::>(); - - let instance = self - .sandbox_store - .borrow() - .instance(instance_id) - .map_err(|e| e.to_string())?; - let result = instance.invoke(export_name, &args, self, state); - - match result { - Ok(None) => Ok(sandbox_primitives::ERR_OK), - Ok(Some(val)) => { - // Serialize return value and write it back into the memory. - sp_wasm_interface::ReturnValue::Value(val.into()).using_encoded(|val| { - if val.len() > return_val_len as usize { - Err("Return value buffer is too small")?; - } - ::write_memory(self, return_val, val) - .map_err(|_| "can't write return value")?; - Ok(sandbox_primitives::ERR_OK) - }) - } - Err(_) => Ok(sandbox_primitives::ERR_EXECUTION), - } - } - - fn instance_teardown(&mut self, instance_id: u32) -> sp_wasm_interface::Result<()> { - self.sandbox_store - .borrow_mut() - .instance_teardown(instance_id) - .map_err(|e| e.to_string()) - } - - fn instance_new( - &mut self, - dispatch_thunk_id: u32, - wasm: &[u8], - raw_env_def: &[u8], - state: u32, - ) -> sp_wasm_interface::Result { - // Extract a dispatch thunk from the instance's table by the specified index. - let dispatch_thunk = { - let table_item = self - .instance - .table() - .as_ref() - .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")? - .get(dispatch_thunk_id); - - let func_ref = table_item - .ok_or_else(|| "dispatch_thunk_id is out of bounds")? - .funcref() - .ok_or_else(|| "dispatch_thunk_idx should be a funcref")? - .clone(); - SupervisorFuncRef(func_ref) - }; - - let guest_env = - match sandbox::GuestEnvironment::decode(&*self.sandbox_store.borrow(), raw_env_def) { - Ok(guest_env) => guest_env, - Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), - }; - - let instance_idx_or_err_code = - match sandbox::instantiate(self, dispatch_thunk, wasm, guest_env, state) - .map(|i| i.register(&mut *self.sandbox_store.borrow_mut())) - { - Ok(instance_idx) => instance_idx, - Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, - Err(_) => sandbox_primitives::ERR_MODULE, - }; - - Ok(instance_idx_or_err_code as u32) - } - - fn get_global_val( - &self, - instance_idx: u32, - name: &str, - ) -> sp_wasm_interface::Result> { - self.sandbox_store - .borrow() - .instance(instance_idx) - .map(|i| i.get_global_val(name)) - .map_err(|e| e.to_string()) - } + fn memory_get( + &mut self, + memory_id: MemoryId, + offset: WordSize, + buf_ptr: Pointer, + buf_len: WordSize, + ) -> sp_wasm_interface::Result { + let sandboxed_memory = self + .sandbox_store + .borrow() + .memory(memory_id) + .map_err(|e| e.to_string())?; + sandboxed_memory.with_direct_access(|sandboxed_memory| { + let len = buf_len as usize; + let src_range = match util::checked_range(offset as usize, len, sandboxed_memory.len()) + { + Some(range) => range, + None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + }; + let supervisor_mem_size = self.instance.memory_size() as usize; + let dst_range = match util::checked_range(buf_ptr.into(), len, supervisor_mem_size) { + Some(range) => range, + None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + }; + self.instance + .write_memory_from( + Pointer::new(dst_range.start as u32), + &sandboxed_memory[src_range], + ) + .expect("ranges are checked above; write can't fail; qed"); + Ok(sandbox_primitives::ERR_OK) + }) + } + + fn memory_set( + &mut self, + memory_id: MemoryId, + offset: WordSize, + val_ptr: Pointer, + val_len: WordSize, + ) -> sp_wasm_interface::Result { + let sandboxed_memory = self + .sandbox_store + .borrow() + .memory(memory_id) + .map_err(|e| e.to_string())?; + sandboxed_memory.with_direct_access_mut(|sandboxed_memory| { + let len = val_len as usize; + let supervisor_mem_size = self.instance.memory_size() as usize; + let src_range = match util::checked_range(val_ptr.into(), len, supervisor_mem_size) { + Some(range) => range, + None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + }; + let dst_range = match util::checked_range(offset as usize, len, sandboxed_memory.len()) + { + Some(range) => range, + None => return Ok(sandbox_primitives::ERR_OUT_OF_BOUNDS), + }; + self.instance + .read_memory_into( + Pointer::new(src_range.start as u32), + &mut sandboxed_memory[dst_range], + ) + .expect("ranges are checked above; read can't fail; qed"); + Ok(sandbox_primitives::ERR_OK) + }) + } + + fn memory_teardown(&mut self, memory_id: MemoryId) -> sp_wasm_interface::Result<()> { + self.sandbox_store + .borrow_mut() + .memory_teardown(memory_id) + .map_err(|e| e.to_string()) + } + + fn memory_new(&mut self, initial: u32, maximum: MemoryId) -> sp_wasm_interface::Result { + self.sandbox_store + .borrow_mut() + .new_memory(initial, maximum) + .map_err(|e| e.to_string()) + } + + fn invoke( + &mut self, + instance_id: u32, + export_name: &str, + args: &[u8], + return_val: Pointer, + return_val_len: u32, + state: u32, + ) -> sp_wasm_interface::Result { + trace!(target: "sp-sandbox", "invoke, instance_idx={}", instance_id); + + // Deserialize arguments and convert them into wasmi types. + let args = Vec::::decode(&mut &args[..]) + .map_err(|_| "Can't decode serialized arguments for the invocation")? + .into_iter() + .map(Into::into) + .collect::>(); + + let instance = self + .sandbox_store + .borrow() + .instance(instance_id) + .map_err(|e| e.to_string())?; + let result = instance.invoke(export_name, &args, self, state); + + match result { + Ok(None) => Ok(sandbox_primitives::ERR_OK), + Ok(Some(val)) => { + // Serialize return value and write it back into the memory. + sp_wasm_interface::ReturnValue::Value(val.into()).using_encoded(|val| { + if val.len() > return_val_len as usize { + Err("Return value buffer is too small")?; + } + ::write_memory(self, return_val, val) + .map_err(|_| "can't write return value")?; + Ok(sandbox_primitives::ERR_OK) + }) + } + Err(_) => Ok(sandbox_primitives::ERR_EXECUTION), + } + } + + fn instance_teardown(&mut self, instance_id: u32) -> sp_wasm_interface::Result<()> { + self.sandbox_store + .borrow_mut() + .instance_teardown(instance_id) + .map_err(|e| e.to_string()) + } + + fn instance_new( + &mut self, + dispatch_thunk_id: u32, + wasm: &[u8], + raw_env_def: &[u8], + state: u32, + ) -> sp_wasm_interface::Result { + // Extract a dispatch thunk from the instance's table by the specified index. + let dispatch_thunk = { + let table_item = self + .instance + .table() + .as_ref() + .ok_or_else(|| "Runtime doesn't have a table; sandbox is unavailable")? + .get(dispatch_thunk_id); + + let func_ref = table_item + .ok_or_else(|| "dispatch_thunk_id is out of bounds")? + .funcref() + .ok_or_else(|| "dispatch_thunk_idx should be a funcref")? + .clone(); + SupervisorFuncRef(func_ref) + }; + + let guest_env = + match sandbox::GuestEnvironment::decode(&*self.sandbox_store.borrow(), raw_env_def) { + Ok(guest_env) => guest_env, + Err(_) => return Ok(sandbox_primitives::ERR_MODULE as u32), + }; + + let instance_idx_or_err_code = + match sandbox::instantiate(self, dispatch_thunk, wasm, guest_env, state) + .map(|i| i.register(&mut *self.sandbox_store.borrow_mut())) + { + Ok(instance_idx) => instance_idx, + Err(sandbox::InstantiationError::StartTrapped) => sandbox_primitives::ERR_EXECUTION, + Err(_) => sandbox_primitives::ERR_MODULE, + }; + + Ok(instance_idx_or_err_code as u32) + } + + fn get_global_val( + &self, + instance_idx: u32, + name: &str, + ) -> sp_wasm_interface::Result> { + self.sandbox_store + .borrow() + .instance(instance_idx) + .map(|i| i.get_global_val(name)) + .map_err(|e| e.to_string()) + } } diff --git a/client/executor/wasmtime/src/imports.rs b/client/executor/wasmtime/src/imports.rs index 48299ffd62..019eb26e9b 100644 --- a/client/executor/wasmtime/src/imports.rs +++ b/client/executor/wasmtime/src/imports.rs @@ -20,309 +20,307 @@ use sp_wasm_interface::{Function, Value, ValueType}; use std::any::Any; use std::rc::Rc; use wasmtime::{ - Callable, Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, - Trap, Val, + Callable, Extern, ExternType, Func, FuncType, ImportType, Limits, Memory, MemoryType, Module, + Trap, Val, }; pub struct Imports { - /// Contains the index into `externs` where the memory import is stored if any. `None` if there - /// is none. - pub memory_import_index: Option, - pub externs: Vec, + /// Contains the index into `externs` where the memory import is stored if any. `None` if there + /// is none. + pub memory_import_index: Option, + pub externs: Vec, } /// Goes over all imports of a module and prepares a vector of `Extern`s that can be used for /// instantiation of the module. Returns an error if there are imports that cannot be satisfied. pub fn resolve_imports( - module: &Module, - host_functions: &[&'static dyn Function], - heap_pages: u32, - allow_missing_func_imports: bool, + module: &Module, + host_functions: &[&'static dyn Function], + heap_pages: u32, + allow_missing_func_imports: bool, ) -> Result { - let mut externs = vec![]; - let mut memory_import_index = None; - for import_ty in module.imports() { - if import_ty.module() != "env" { - return Err(WasmError::Other(format!( - "host doesn't provide any imports from non-env module: {}:{}", - import_ty.module(), - import_ty.name() - ))); - } + let mut externs = vec![]; + let mut memory_import_index = None; + for import_ty in module.imports() { + if import_ty.module() != "env" { + return Err(WasmError::Other(format!( + "host doesn't provide any imports from non-env module: {}:{}", + import_ty.module(), + import_ty.name() + ))); + } - let resolved = match import_ty.name() { - "memory" => { - memory_import_index = Some(externs.len()); - resolve_memory_import(module, import_ty, heap_pages)? - } - _ => resolve_func_import( - module, - import_ty, - host_functions, - allow_missing_func_imports, - )?, - }; - externs.push(resolved); - } - Ok(Imports { - memory_import_index, - externs, - }) + let resolved = match import_ty.name() { + "memory" => { + memory_import_index = Some(externs.len()); + resolve_memory_import(module, import_ty, heap_pages)? + } + _ => resolve_func_import( + module, + import_ty, + host_functions, + allow_missing_func_imports, + )?, + }; + externs.push(resolved); + } + Ok(Imports { + memory_import_index, + externs, + }) } fn resolve_memory_import( - module: &Module, - import_ty: &ImportType, - heap_pages: u32, + module: &Module, + import_ty: &ImportType, + heap_pages: u32, ) -> Result { - let requested_memory_ty = match import_ty.ty() { - ExternType::Memory(memory_ty) => memory_ty, - _ => { - return Err(WasmError::Other(format!( - "this import must be of memory type: {}:{}", - import_ty.module(), - import_ty.name() - ))) - } - }; + let requested_memory_ty = match import_ty.ty() { + ExternType::Memory(memory_ty) => memory_ty, + _ => { + return Err(WasmError::Other(format!( + "this import must be of memory type: {}:{}", + import_ty.module(), + import_ty.name() + ))) + } + }; - // Increment the min (a.k.a initial) number of pages by `heap_pages` and check if it exceeds the - // maximum specified by the import. - let initial = requested_memory_ty - .limits() - .min() - .saturating_add(heap_pages); - if let Some(max) = requested_memory_ty.limits().max() { - if initial > max { - return Err(WasmError::Other(format!( + // Increment the min (a.k.a initial) number of pages by `heap_pages` and check if it exceeds the + // maximum specified by the import. + let initial = requested_memory_ty + .limits() + .min() + .saturating_add(heap_pages); + if let Some(max) = requested_memory_ty.limits().max() { + if initial > max { + return Err(WasmError::Other(format!( "incremented number of pages by heap_pages (total={}) is more than maximum requested\ by the runtime wasm module {}", initial, max, ))); - } - } + } + } - let memory_ty = MemoryType::new(Limits::new(initial, requested_memory_ty.limits().max())); - let memory = Memory::new(module.store(), memory_ty); - Ok(Extern::Memory(memory)) + let memory_ty = MemoryType::new(Limits::new(initial, requested_memory_ty.limits().max())); + let memory = Memory::new(module.store(), memory_ty); + Ok(Extern::Memory(memory)) } fn resolve_func_import( - module: &Module, - import_ty: &ImportType, - host_functions: &[&'static dyn Function], - allow_missing_func_imports: bool, + module: &Module, + import_ty: &ImportType, + host_functions: &[&'static dyn Function], + allow_missing_func_imports: bool, ) -> Result { - let func_ty = match import_ty.ty() { - ExternType::Func(func_ty) => func_ty, - _ => { - return Err(WasmError::Other(format!( - "host doesn't provide any non function imports besides 'memory': {}:{}", - import_ty.module(), - import_ty.name() - ))); - } - }; + let func_ty = match import_ty.ty() { + ExternType::Func(func_ty) => func_ty, + _ => { + return Err(WasmError::Other(format!( + "host doesn't provide any non function imports besides 'memory': {}:{}", + import_ty.module(), + import_ty.name() + ))); + } + }; - let host_func = match host_functions - .iter() - .find(|host_func| host_func.name() == import_ty.name()) - { - Some(host_func) => host_func, - None if allow_missing_func_imports => { - return Ok(MissingHostFuncHandler::new(import_ty).into_extern(module, func_ty)); - } - None => { - return Err(WasmError::Other(format!( - "host doesn't provide such function: {}:{}", - import_ty.module(), - import_ty.name() - ))); - } - }; - if !signature_matches(&func_ty, &wasmtime_func_sig(*host_func)) { - return Err(WasmError::Other(format!( - "signature mismatch for: {}:{}", - import_ty.module(), - import_ty.name() - ))); - } + let host_func = match host_functions + .iter() + .find(|host_func| host_func.name() == import_ty.name()) + { + Some(host_func) => host_func, + None if allow_missing_func_imports => { + return Ok(MissingHostFuncHandler::new(import_ty).into_extern(module, func_ty)); + } + None => { + return Err(WasmError::Other(format!( + "host doesn't provide such function: {}:{}", + import_ty.module(), + import_ty.name() + ))); + } + }; + if !signature_matches(&func_ty, &wasmtime_func_sig(*host_func)) { + return Err(WasmError::Other(format!( + "signature mismatch for: {}:{}", + import_ty.module(), + import_ty.name() + ))); + } - Ok(HostFuncHandler::new(*host_func).into_extern(module)) + Ok(HostFuncHandler::new(*host_func).into_extern(module)) } /// Returns `true` if `lhs` and `rhs` represent the same signature. fn signature_matches(lhs: &wasmtime::FuncType, rhs: &wasmtime::FuncType) -> bool { - lhs.params() == rhs.params() && lhs.results() == rhs.results() + lhs.params() == rhs.params() && lhs.results() == rhs.results() } /// This structure implements `Callable` and acts as a bridge between wasmtime and /// substrate host functions. struct HostFuncHandler { - host_func: &'static dyn Function, + host_func: &'static dyn Function, } impl HostFuncHandler { - fn new(host_func: &'static dyn Function) -> Self { - Self { - host_func, - } - } + fn new(host_func: &'static dyn Function) -> Self { + Self { host_func } + } - fn into_extern(self, module: &Module) -> Extern { - let func_ty = wasmtime_func_sig(self.host_func); - let func = Func::new(module.store(), func_ty, Rc::new(self)); - Extern::Func(func) - } + fn into_extern(self, module: &Module) -> Extern { + let func_ty = wasmtime_func_sig(self.host_func); + let func = Func::new(module.store(), func_ty, Rc::new(self)); + Extern::Func(func) + } } impl Callable for HostFuncHandler { - fn call( - &self, - wasmtime_params: &[Val], - wasmtime_results: &mut [Val], - ) -> Result<(), wasmtime::Trap> { - let unwind_result = state_holder::with_context(|host_ctx| { - let mut host_ctx = host_ctx.expect( - "host functions can be called only from wasm instance; + fn call( + &self, + wasmtime_params: &[Val], + wasmtime_results: &mut [Val], + ) -> Result<(), wasmtime::Trap> { + let unwind_result = state_holder::with_context(|host_ctx| { + let mut host_ctx = host_ctx.expect( + "host functions can be called only from wasm instance; wasm instance is always called initializing context; therefore host_ctx cannot be None; qed ", - ); - // `into_value` panics if it encounters a value that doesn't fit into the values - // available in substrate. - // - // This, however, cannot happen since the signature of this function is created from - // a `dyn Function` signature of which cannot have a non substrate value by definition. - let mut params = wasmtime_params.iter().cloned().map(into_value); + ); + // `into_value` panics if it encounters a value that doesn't fit into the values + // available in substrate. + // + // This, however, cannot happen since the signature of this function is created from + // a `dyn Function` signature of which cannot have a non substrate value by definition. + let mut params = wasmtime_params.iter().cloned().map(into_value); - std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { - self.host_func.execute(&mut host_ctx, &mut params) - })) - }); + std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { + self.host_func.execute(&mut host_ctx, &mut params) + })) + }); - let execution_result = match unwind_result { - Ok(execution_result) => execution_result, - Err(err) => return Err(Trap::new(stringify_panic_payload(err))), - }; + let execution_result = match unwind_result { + Ok(execution_result) => execution_result, + Err(err) => return Err(Trap::new(stringify_panic_payload(err))), + }; - match execution_result { - Ok(Some(ret_val)) => { - debug_assert!( - wasmtime_results.len() == 1, - "wasmtime function signature, therefore the number of results, should always \ + match execution_result { + Ok(Some(ret_val)) => { + debug_assert!( + wasmtime_results.len() == 1, + "wasmtime function signature, therefore the number of results, should always \ correspond to the number of results returned by the host function", - ); - wasmtime_results[0] = into_wasmtime_val(ret_val); - Ok(()) - } - Ok(None) => { - debug_assert!( - wasmtime_results.len() == 0, - "wasmtime function signature, therefore the number of results, should always \ + ); + wasmtime_results[0] = into_wasmtime_val(ret_val); + Ok(()) + } + Ok(None) => { + debug_assert!( + wasmtime_results.len() == 0, + "wasmtime function signature, therefore the number of results, should always \ correspond to the number of results returned by the host function", - ); - Ok(()) - } - Err(msg) => Err(Trap::new(msg)), - } - } + ); + Ok(()) + } + Err(msg) => Err(Trap::new(msg)), + } + } } /// A `Callable` handler for missing functions. struct MissingHostFuncHandler { - module: String, - name: String, + module: String, + name: String, } impl MissingHostFuncHandler { - fn new(import_ty: &ImportType) -> Self { - Self { - module: import_ty.module().to_string(), - name: import_ty.name().to_string(), - } - } + fn new(import_ty: &ImportType) -> Self { + Self { + module: import_ty.module().to_string(), + name: import_ty.name().to_string(), + } + } - fn into_extern(self, module: &Module, func_ty: &FuncType) -> Extern { - let func = Func::new(module.store(), func_ty.clone(), Rc::new(self)); - Extern::Func(func) - } + fn into_extern(self, module: &Module, func_ty: &FuncType) -> Extern { + let func = Func::new(module.store(), func_ty.clone(), Rc::new(self)); + Extern::Func(func) + } } impl Callable for MissingHostFuncHandler { - fn call( - &self, - _wasmtime_params: &[Val], - _wasmtime_results: &mut [Val], - ) -> Result<(), wasmtime::Trap> { - Err(Trap::new(format!( - "call to a missing function {}:{}", - self.module, self.name - ))) - } + fn call( + &self, + _wasmtime_params: &[Val], + _wasmtime_results: &mut [Val], + ) -> Result<(), wasmtime::Trap> { + Err(Trap::new(format!( + "call to a missing function {}:{}", + self.module, self.name + ))) + } } fn wasmtime_func_sig(func: &dyn Function) -> wasmtime::FuncType { - let params = func - .signature() - .args - .iter() - .cloned() - .map(into_wasmtime_val_type) - .collect::>() - .into_boxed_slice(); - let results = func - .signature() - .return_value - .iter() - .cloned() - .map(into_wasmtime_val_type) - .collect::>() - .into_boxed_slice(); - wasmtime::FuncType::new(params, results) + let params = func + .signature() + .args + .iter() + .cloned() + .map(into_wasmtime_val_type) + .collect::>() + .into_boxed_slice(); + let results = func + .signature() + .return_value + .iter() + .cloned() + .map(into_wasmtime_val_type) + .collect::>() + .into_boxed_slice(); + wasmtime::FuncType::new(params, results) } fn into_wasmtime_val_type(val_ty: ValueType) -> wasmtime::ValType { - match val_ty { - ValueType::I32 => wasmtime::ValType::I32, - ValueType::I64 => wasmtime::ValType::I64, - ValueType::F32 => wasmtime::ValType::F32, - ValueType::F64 => wasmtime::ValType::F64, - } + match val_ty { + ValueType::I32 => wasmtime::ValType::I32, + ValueType::I64 => wasmtime::ValType::I64, + ValueType::F32 => wasmtime::ValType::F32, + ValueType::F64 => wasmtime::ValType::F64, + } } /// Converts a `Val` into a substrate runtime interface `Value`. /// /// Panics if the given value doesn't have a corresponding variant in `Value`. fn into_value(val: Val) -> Value { - match val { - Val::I32(v) => Value::I32(v), - Val::I64(v) => Value::I64(v), - Val::F32(f_bits) => Value::F32(f_bits), - Val::F64(f_bits) => Value::F64(f_bits), - _ => panic!("Given value type is unsupported by substrate"), - } + match val { + Val::I32(v) => Value::I32(v), + Val::I64(v) => Value::I64(v), + Val::F32(f_bits) => Value::F32(f_bits), + Val::F64(f_bits) => Value::F64(f_bits), + _ => panic!("Given value type is unsupported by substrate"), + } } fn into_wasmtime_val(value: Value) -> wasmtime::Val { - match value { - Value::I32(v) => Val::I32(v), - Value::I64(v) => Val::I64(v), - Value::F32(f_bits) => Val::F32(f_bits), - Value::F64(f_bits) => Val::F64(f_bits), - } + match value { + Value::I32(v) => Val::I32(v), + Value::I64(v) => Val::I64(v), + Value::F32(f_bits) => Val::F32(f_bits), + Value::F64(f_bits) => Val::F64(f_bits), + } } /// Attempt to convert a opaque panic payload to a string. fn stringify_panic_payload(payload: Box) -> String { - match payload.downcast::<&'static str>() { - Ok(msg) => msg.to_string(), - Err(payload) => match payload.downcast::() { - Ok(msg) => *msg, - // At least we tried... - Err(_) => "Box".to_string(), - }, - } + match payload.downcast::<&'static str>() { + Ok(msg) => msg.to_string(), + Err(payload) => match payload.downcast::() { + Ok(msg) => *msg, + // At least we tried... + Err(_) => "Box".to_string(), + }, + } } diff --git a/client/executor/wasmtime/src/instance_wrapper.rs b/client/executor/wasmtime/src/instance_wrapper.rs index 469668802f..c5620b2abb 100644 --- a/client/executor/wasmtime/src/instance_wrapper.rs +++ b/client/executor/wasmtime/src/instance_wrapper.rs @@ -17,57 +17,57 @@ //! Defines data and logic needed for interaction with an WebAssembly instance of a substrate //! runtime module. -use crate::util; use crate::imports::Imports; +use crate::util; -use std::{slice, marker}; use sc_executor_common::{ - error::{Error, Result}, - util::{WasmModuleInfo, DataSegmentsSnapshot}, + error::{Error, Result}, + util::{DataSegmentsSnapshot, WasmModuleInfo}, }; -use sp_wasm_interface::{Pointer, WordSize, Value}; -use wasmtime::{Store, Instance, Module, Memory, Table, Val}; +use sp_wasm_interface::{Pointer, Value, WordSize}; +use std::{marker, slice}; +use wasmtime::{Instance, Memory, Module, Store, Table, Val}; mod globals_snapshot; pub use globals_snapshot::GlobalsSnapshot; pub struct ModuleWrapper { - imported_globals_count: u32, - globals_count: u32, - module: Module, - data_segments_snapshot: DataSegmentsSnapshot, + imported_globals_count: u32, + globals_count: u32, + module: Module, + data_segments_snapshot: DataSegmentsSnapshot, } impl ModuleWrapper { - pub fn new(store: &Store, code: &[u8]) -> Result { - let module = Module::new(&store, code) - .map_err(|e| Error::from(format!("cannot create module: {}", e)))?; - - let module_info = WasmModuleInfo::new(code) - .ok_or_else(|| Error::from("cannot deserialize module".to_string()))?; - let declared_globals_count = module_info.declared_globals_count(); - let imported_globals_count = module_info.imported_globals_count(); - let globals_count = imported_globals_count + declared_globals_count; - - let data_segments_snapshot = DataSegmentsSnapshot::take(&module_info) - .map_err(|e| Error::from(format!("cannot take data segments snapshot: {}", e)))?; - - Ok(Self { - module, - imported_globals_count, - globals_count, - data_segments_snapshot, - }) - } - - pub fn module(&self) -> &Module { - &self.module - } - - pub fn data_segments_snapshot(&self) -> &DataSegmentsSnapshot { - &self.data_segments_snapshot - } + pub fn new(store: &Store, code: &[u8]) -> Result { + let module = Module::new(&store, code) + .map_err(|e| Error::from(format!("cannot create module: {}", e)))?; + + let module_info = WasmModuleInfo::new(code) + .ok_or_else(|| Error::from("cannot deserialize module".to_string()))?; + let declared_globals_count = module_info.declared_globals_count(); + let imported_globals_count = module_info.imported_globals_count(); + let globals_count = imported_globals_count + declared_globals_count; + + let data_segments_snapshot = DataSegmentsSnapshot::take(&module_info) + .map_err(|e| Error::from(format!("cannot take data segments snapshot: {}", e)))?; + + Ok(Self { + module, + imported_globals_count, + globals_count, + data_segments_snapshot, + }) + } + + pub fn module(&self) -> &Module { + &self.module + } + + pub fn data_segments_snapshot(&self) -> &DataSegmentsSnapshot { + &self.data_segments_snapshot + } } /// Wrap the given WebAssembly Instance of a wasm module with Substrate-runtime. @@ -75,250 +75,250 @@ impl ModuleWrapper { /// This struct is a handy wrapper around a wasmtime `Instance` that provides substrate specific /// routines. pub struct InstanceWrapper { - instance: Instance, - globals_count: u32, - imported_globals_count: u32, - // The memory instance of the `instance`. - // - // It is important to make sure that we don't make any copies of this to make it easier to proof - // See `memory_as_slice` and `memory_as_slice_mut`. - memory: Memory, - table: Option, - // Make this struct explicitly !Send & !Sync. - _not_send_nor_sync: marker::PhantomData<*const ()>, + instance: Instance, + globals_count: u32, + imported_globals_count: u32, + // The memory instance of the `instance`. + // + // It is important to make sure that we don't make any copies of this to make it easier to proof + // See `memory_as_slice` and `memory_as_slice_mut`. + memory: Memory, + table: Option
, + // Make this struct explicitly !Send & !Sync. + _not_send_nor_sync: marker::PhantomData<*const ()>, } impl InstanceWrapper { - /// Create a new instance wrapper from the given wasm module. - pub fn new(module_wrapper: &ModuleWrapper, imports: &Imports, heap_pages: u32) -> Result { - let instance = Instance::new(&module_wrapper.module, &imports.externs) - .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; - - let memory = match imports.memory_import_index { - Some(memory_idx) => { - imports.externs[memory_idx] - .memory() - .expect("only memory can be at the `memory_idx`; qed") - .clone() - } - None => { - let memory = get_linear_memory(&instance)?; - if !memory.grow(heap_pages).is_ok() { - return Err("failed top increase the linear memory size".into()); - } - memory - }, - }; - - Ok(Self { - table: get_table(&instance), - instance, - globals_count: module_wrapper.globals_count, - imported_globals_count: module_wrapper.imported_globals_count, - memory, - _not_send_nor_sync: marker::PhantomData, - }) - } - - /// Resolves a substrate entrypoint by the given name. - /// - /// An entrypoint must have a signature `(i32, i32) -> i64`, otherwise this function will return - /// an error. - pub fn resolve_entrypoint(&self, name: &str) -> Result { - // Resolve the requested method and verify that it has a proper signature. - let export = self - .instance - .get_export(name) - .ok_or_else(|| Error::from(format!("Exported method {} is not found", name)))?; - let entrypoint = export - .func() - .ok_or_else(|| Error::from(format!("Export {} is not a function", name)))?; - match (entrypoint.ty().params(), entrypoint.ty().results()) { - (&[wasmtime::ValType::I32, wasmtime::ValType::I32], &[wasmtime::ValType::I64]) => {} - _ => { - return Err(Error::from(format!( - "method {} have an unsupported signature", - name - ))) - } - } - Ok(entrypoint.clone()) - } - - /// Returns an indirect function table of this instance. - pub fn table(&self) -> Option<&Table> { - self.table.as_ref() - } - - /// Returns the byte size of the linear memory instance attached to this instance. - pub fn memory_size(&self) -> u32 { - self.memory.data_size() as u32 - } - - /// Reads `__heap_base: i32` global variable and returns it. - /// - /// If it doesn't exist, not a global or of not i32 type returns an error. - pub fn extract_heap_base(&self) -> Result { - let heap_base_export = self - .instance - .get_export("__heap_base") - .ok_or_else(|| Error::from("__heap_base is not found"))?; - - let heap_base_global = heap_base_export - .global() - .ok_or_else(|| Error::from("__heap_base is not a global"))?; - - let heap_base = heap_base_global - .get() - .i32() - .ok_or_else(|| Error::from("__heap_base is not a i32"))?; - - Ok(heap_base as u32) - } - - /// Get the value from a global with the given `name`. - pub fn get_global_val(&self, name: &str) -> Result> { - let global = match self.instance.get_export(name) { - Some(global) => global, - None => return Ok(None), - }; - - let global = global.global().ok_or_else(|| format!("`{}` is not a global", name))?; - - match global.get() { - Val::I32(val) => Ok(Some(Value::I32(val))), - Val::I64(val) => Ok(Some(Value::I64(val))), - Val::F32(val) => Ok(Some(Value::F32(val))), - Val::F64(val) => Ok(Some(Value::F64(val))), - _ => Err("Unknown value type".into()), - } - } + /// Create a new instance wrapper from the given wasm module. + pub fn new(module_wrapper: &ModuleWrapper, imports: &Imports, heap_pages: u32) -> Result { + let instance = Instance::new(&module_wrapper.module, &imports.externs) + .map_err(|e| Error::from(format!("cannot instantiate: {}", e)))?; + + let memory = match imports.memory_import_index { + Some(memory_idx) => imports.externs[memory_idx] + .memory() + .expect("only memory can be at the `memory_idx`; qed") + .clone(), + None => { + let memory = get_linear_memory(&instance)?; + if !memory.grow(heap_pages).is_ok() { + return Err("failed top increase the linear memory size".into()); + } + memory + } + }; + + Ok(Self { + table: get_table(&instance), + instance, + globals_count: module_wrapper.globals_count, + imported_globals_count: module_wrapper.imported_globals_count, + memory, + _not_send_nor_sync: marker::PhantomData, + }) + } + + /// Resolves a substrate entrypoint by the given name. + /// + /// An entrypoint must have a signature `(i32, i32) -> i64`, otherwise this function will return + /// an error. + pub fn resolve_entrypoint(&self, name: &str) -> Result { + // Resolve the requested method and verify that it has a proper signature. + let export = self + .instance + .get_export(name) + .ok_or_else(|| Error::from(format!("Exported method {} is not found", name)))?; + let entrypoint = export + .func() + .ok_or_else(|| Error::from(format!("Export {} is not a function", name)))?; + match (entrypoint.ty().params(), entrypoint.ty().results()) { + (&[wasmtime::ValType::I32, wasmtime::ValType::I32], &[wasmtime::ValType::I64]) => {} + _ => { + return Err(Error::from(format!( + "method {} have an unsupported signature", + name + ))) + } + } + Ok(entrypoint.clone()) + } + + /// Returns an indirect function table of this instance. + pub fn table(&self) -> Option<&Table> { + self.table.as_ref() + } + + /// Returns the byte size of the linear memory instance attached to this instance. + pub fn memory_size(&self) -> u32 { + self.memory.data_size() as u32 + } + + /// Reads `__heap_base: i32` global variable and returns it. + /// + /// If it doesn't exist, not a global or of not i32 type returns an error. + pub fn extract_heap_base(&self) -> Result { + let heap_base_export = self + .instance + .get_export("__heap_base") + .ok_or_else(|| Error::from("__heap_base is not found"))?; + + let heap_base_global = heap_base_export + .global() + .ok_or_else(|| Error::from("__heap_base is not a global"))?; + + let heap_base = heap_base_global + .get() + .i32() + .ok_or_else(|| Error::from("__heap_base is not a i32"))?; + + Ok(heap_base as u32) + } + + /// Get the value from a global with the given `name`. + pub fn get_global_val(&self, name: &str) -> Result> { + let global = match self.instance.get_export(name) { + Some(global) => global, + None => return Ok(None), + }; + + let global = global + .global() + .ok_or_else(|| format!("`{}` is not a global", name))?; + + match global.get() { + Val::I32(val) => Ok(Some(Value::I32(val))), + Val::I64(val) => Ok(Some(Value::I64(val))), + Val::F32(val) => Ok(Some(Value::F32(val))), + Val::F64(val) => Ok(Some(Value::F64(val))), + _ => Err("Unknown value type".into()), + } + } } /// Extract linear memory instance from the given instance. fn get_linear_memory(instance: &Instance) -> Result { - let memory_export = instance - .get_export("memory") - .ok_or_else(|| Error::from("memory is not exported under `memory` name"))?; + let memory_export = instance + .get_export("memory") + .ok_or_else(|| Error::from("memory is not exported under `memory` name"))?; - let memory = memory_export - .memory() - .ok_or_else(|| Error::from("the `memory` export should have memory type"))? - .clone(); + let memory = memory_export + .memory() + .ok_or_else(|| Error::from("the `memory` export should have memory type"))? + .clone(); - Ok(memory) + Ok(memory) } /// Extract the table from the given instance if any. fn get_table(instance: &Instance) -> Option
{ - instance - .get_export("__indirect_function_table") - .and_then(|export| export.table()) - .cloned() + instance + .get_export("__indirect_function_table") + .and_then(|export| export.table()) + .cloned() } /// Functions realted to memory. impl InstanceWrapper { - /// Read data from a slice of memory into a destination buffer. - /// - /// Returns an error if the read would go out of the memory bounds. - pub fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice(); - - let range = util::checked_range(address.into(), dest.len(), memory.len()) - .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; - dest.copy_from_slice(&memory[range]); - Ok(()) - } - } - - /// Write data to a slice of memory. - /// - /// Returns an error if the write would go out of the memory bounds. - pub fn write_memory_from(&self, address: Pointer, data: &[u8]) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); - - let range = util::checked_range(address.into(), data.len(), memory.len()) - .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; - &mut memory[range].copy_from_slice(data); - Ok(()) - } - } - - /// Allocate some memory of the given size. Returns pointer to the allocated memory region. - /// - /// Returns `Err` in case memory cannot be allocated. Refer to the allocator documentation - /// to get more details. - pub fn allocate( - &self, - allocator: &mut sp_allocator::FreeingBumpHeapAllocator, - size: WordSize, - ) -> Result> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); - - allocator.allocate(memory, size).map_err(Into::into) - } - } - - /// Deallocate the memory pointed by the given pointer. - /// - /// Returns `Err` in case the given memory region cannot be deallocated. - pub fn deallocate( - &self, - allocator: &mut sp_allocator::FreeingBumpHeapAllocator, - ptr: Pointer, - ) -> Result<()> { - unsafe { - // This should be safe since we don't grow up memory while caching this reference and - // we give up the reference before returning from this function. - let memory = self.memory_as_slice_mut(); - - allocator.deallocate(memory, ptr).map_err(Into::into) - } - } - - /// Returns linear memory of the wasm instance as a slice. - /// - /// # Safety - /// - /// Wasmtime doesn't provide comprehensive documentation about the exact behavior of the data - /// pointer. If a dynamic style heap is used the base pointer of the heap can change. Since - /// growing, we cannot guarantee the lifetime of the returned slice reference. - unsafe fn memory_as_slice(&self) -> &[u8] { - let ptr = self.memory.data_ptr() as *const _; - let len = self.memory.data_size(); - - if len == 0 { - &[] - } else { - slice::from_raw_parts(ptr, len) - } - } - - /// Returns linear memory of the wasm instance as a slice. - /// - /// # Safety - /// - /// See `[memory_as_slice]`. In addition to those requirements, since a mutable reference is - /// returned it must be ensured that only one mutable and no shared references to memory exists - /// at the same time. - unsafe fn memory_as_slice_mut(&self) -> &mut [u8] { - let ptr = self.memory.data_ptr(); - let len = self.memory.data_size(); - - if len == 0 { - &mut [] - } else { - slice::from_raw_parts_mut(ptr, len) - } - } + /// Read data from a slice of memory into a destination buffer. + /// + /// Returns an error if the read would go out of the memory bounds. + pub fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> Result<()> { + unsafe { + // This should be safe since we don't grow up memory while caching this reference and + // we give up the reference before returning from this function. + let memory = self.memory_as_slice(); + + let range = util::checked_range(address.into(), dest.len(), memory.len()) + .ok_or_else(|| Error::Other("memory read is out of bounds".into()))?; + dest.copy_from_slice(&memory[range]); + Ok(()) + } + } + + /// Write data to a slice of memory. + /// + /// Returns an error if the write would go out of the memory bounds. + pub fn write_memory_from(&self, address: Pointer, data: &[u8]) -> Result<()> { + unsafe { + // This should be safe since we don't grow up memory while caching this reference and + // we give up the reference before returning from this function. + let memory = self.memory_as_slice_mut(); + + let range = util::checked_range(address.into(), data.len(), memory.len()) + .ok_or_else(|| Error::Other("memory write is out of bounds".into()))?; + &mut memory[range].copy_from_slice(data); + Ok(()) + } + } + + /// Allocate some memory of the given size. Returns pointer to the allocated memory region. + /// + /// Returns `Err` in case memory cannot be allocated. Refer to the allocator documentation + /// to get more details. + pub fn allocate( + &self, + allocator: &mut sp_allocator::FreeingBumpHeapAllocator, + size: WordSize, + ) -> Result> { + unsafe { + // This should be safe since we don't grow up memory while caching this reference and + // we give up the reference before returning from this function. + let memory = self.memory_as_slice_mut(); + + allocator.allocate(memory, size).map_err(Into::into) + } + } + + /// Deallocate the memory pointed by the given pointer. + /// + /// Returns `Err` in case the given memory region cannot be deallocated. + pub fn deallocate( + &self, + allocator: &mut sp_allocator::FreeingBumpHeapAllocator, + ptr: Pointer, + ) -> Result<()> { + unsafe { + // This should be safe since we don't grow up memory while caching this reference and + // we give up the reference before returning from this function. + let memory = self.memory_as_slice_mut(); + + allocator.deallocate(memory, ptr).map_err(Into::into) + } + } + + /// Returns linear memory of the wasm instance as a slice. + /// + /// # Safety + /// + /// Wasmtime doesn't provide comprehensive documentation about the exact behavior of the data + /// pointer. If a dynamic style heap is used the base pointer of the heap can change. Since + /// growing, we cannot guarantee the lifetime of the returned slice reference. + unsafe fn memory_as_slice(&self) -> &[u8] { + let ptr = self.memory.data_ptr() as *const _; + let len = self.memory.data_size(); + + if len == 0 { + &[] + } else { + slice::from_raw_parts(ptr, len) + } + } + + /// Returns linear memory of the wasm instance as a slice. + /// + /// # Safety + /// + /// See `[memory_as_slice]`. In addition to those requirements, since a mutable reference is + /// returned it must be ensured that only one mutable and no shared references to memory exists + /// at the same time. + unsafe fn memory_as_slice_mut(&self) -> &mut [u8] { + let ptr = self.memory.data_ptr(); + let len = self.memory.data_size(); + + if len == 0 { + &mut [] + } else { + slice::from_raw_parts_mut(ptr, len) + } + } } diff --git a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs index a6ab3fed60..a0b68d6e67 100644 --- a/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs +++ b/client/executor/wasmtime/src/instance_wrapper/globals_snapshot.rs @@ -15,12 +15,10 @@ // along with Substrate. If not, see . use super::InstanceWrapper; -use sc_executor_common::{ - error::{Error, Result}, -}; -use sp_wasm_interface::Value; use cranelift_codegen::ir; use cranelift_wasm::GlobalIndex; +use sc_executor_common::error::{Error, Result}; +use sp_wasm_interface::Value; /// A snapshot of a global variables values. This snapshot can be used later for restoring the /// values to the preserved state. @@ -28,103 +26,103 @@ use cranelift_wasm::GlobalIndex; /// Technically, a snapshot stores only values of mutable global variables. This is because /// immutable global variables always have the same values. pub struct GlobalsSnapshot { - handle: wasmtime_runtime::InstanceHandle, - preserved_mut_globals: Vec<(*mut wasmtime_runtime::VMGlobalDefinition, Value)>, + handle: wasmtime_runtime::InstanceHandle, + preserved_mut_globals: Vec<(*mut wasmtime_runtime::VMGlobalDefinition, Value)>, } impl GlobalsSnapshot { - /// Take a snapshot of global variables for a given instance. - pub fn take(instance_wrapper: &InstanceWrapper) -> Result { - // EVIL: - // Usage of an undocumented function. - let handle = instance_wrapper.instance.handle().clone(); + /// Take a snapshot of global variables for a given instance. + pub fn take(instance_wrapper: &InstanceWrapper) -> Result { + // EVIL: + // Usage of an undocumented function. + let handle = instance_wrapper.instance.handle().clone(); - let mut preserved_mut_globals = vec![]; + let mut preserved_mut_globals = vec![]; - for global_idx in instance_wrapper.imported_globals_count..instance_wrapper.globals_count { - let (def, global) = match handle.lookup_by_declaration( - &wasmtime_environ::Export::Global(GlobalIndex::from_u32(global_idx)), - ) { - wasmtime_runtime::Export::Global { - definition, global, .. - } => (definition, global), - _ => unreachable!("only globals can be returned for a global request"), - }; + for global_idx in instance_wrapper.imported_globals_count..instance_wrapper.globals_count { + let (def, global) = match handle.lookup_by_declaration( + &wasmtime_environ::Export::Global(GlobalIndex::from_u32(global_idx)), + ) { + wasmtime_runtime::Export::Global { + definition, global, .. + } => (definition, global), + _ => unreachable!("only globals can be returned for a global request"), + }; - // skip immutable globals. - if !global.mutability { - continue; - } + // skip immutable globals. + if !global.mutability { + continue; + } - let value = unsafe { - // Safety of this function solely depends on the correctness of the reference and - // the type information of the global. - read_global(def, global.ty)? - }; - preserved_mut_globals.push((def, value)); - } + let value = unsafe { + // Safety of this function solely depends on the correctness of the reference and + // the type information of the global. + read_global(def, global.ty)? + }; + preserved_mut_globals.push((def, value)); + } - Ok(Self { - preserved_mut_globals, - handle, - }) - } + Ok(Self { + preserved_mut_globals, + handle, + }) + } - /// Apply the snapshot to the given instance. - /// - /// This instance must be the same that was used for creation of this snapshot. - pub fn apply(&self, instance_wrapper: &InstanceWrapper) -> Result<()> { - if instance_wrapper.instance.handle() != &self.handle { - return Err(Error::from("unexpected instance handle".to_string())); - } + /// Apply the snapshot to the given instance. + /// + /// This instance must be the same that was used for creation of this snapshot. + pub fn apply(&self, instance_wrapper: &InstanceWrapper) -> Result<()> { + if instance_wrapper.instance.handle() != &self.handle { + return Err(Error::from("unexpected instance handle".to_string())); + } - for (def, value) in &self.preserved_mut_globals { - unsafe { - // The following writes are safe if the precondition that this is the same instance - // this snapshot was created with: - // - // 1. These pointers must be still not-NULL and allocated. - // 2. The set of global variables is fixed for the lifetime of the same instance. - // 3. We obviously assume that the wasmtime references are correct in the first place. - // 4. We write the data with the same type it was read in the first place. - write_global(*def, *value)?; - } - } - Ok(()) - } + for (def, value) in &self.preserved_mut_globals { + unsafe { + // The following writes are safe if the precondition that this is the same instance + // this snapshot was created with: + // + // 1. These pointers must be still not-NULL and allocated. + // 2. The set of global variables is fixed for the lifetime of the same instance. + // 3. We obviously assume that the wasmtime references are correct in the first place. + // 4. We write the data with the same type it was read in the first place. + write_global(*def, *value)?; + } + } + Ok(()) + } } unsafe fn read_global( - def: *const wasmtime_runtime::VMGlobalDefinition, - ty: ir::Type, + def: *const wasmtime_runtime::VMGlobalDefinition, + ty: ir::Type, ) -> Result { - let def = def - .as_ref() - .ok_or_else(|| Error::from("wasmtime global reference is null during read".to_string()))?; - let val = match ty { - ir::types::I32 => Value::I32(*def.as_i32()), - ir::types::I64 => Value::I64(*def.as_i64()), - ir::types::F32 => Value::F32(*def.as_u32()), - ir::types::F64 => Value::F64(*def.as_u64()), - _ => { - return Err(Error::from(format!( - "unsupported global variable type: {}", - ty - ))) - } - }; - Ok(val) + let def = def + .as_ref() + .ok_or_else(|| Error::from("wasmtime global reference is null during read".to_string()))?; + let val = match ty { + ir::types::I32 => Value::I32(*def.as_i32()), + ir::types::I64 => Value::I64(*def.as_i64()), + ir::types::F32 => Value::F32(*def.as_u32()), + ir::types::F64 => Value::F64(*def.as_u64()), + _ => { + return Err(Error::from(format!( + "unsupported global variable type: {}", + ty + ))) + } + }; + Ok(val) } unsafe fn write_global(def: *mut wasmtime_runtime::VMGlobalDefinition, value: Value) -> Result<()> { - let def = def - .as_mut() - .ok_or_else(|| Error::from("wasmtime global reference is null during write".to_string()))?; - match value { - Value::I32(v) => *def.as_i32_mut() = v, - Value::I64(v) => *def.as_i64_mut() = v, - Value::F32(v) => *def.as_u32_mut() = v, - Value::F64(v) => *def.as_u64_mut() = v, - } - Ok(()) + let def = def + .as_mut() + .ok_or_else(|| Error::from("wasmtime global reference is null during write".to_string()))?; + match value { + Value::I32(v) => *def.as_i32_mut() = v, + Value::I64(v) => *def.as_i64_mut() = v, + Value::F32(v) => *def.as_u32_mut() = v, + Value::F64(v) => *def.as_u64_mut() = v, + } + Ok(()) } diff --git a/client/executor/wasmtime/src/lib.rs b/client/executor/wasmtime/src/lib.rs index 66e4e08523..eaf0f45594 100644 --- a/client/executor/wasmtime/src/lib.rs +++ b/client/executor/wasmtime/src/lib.rs @@ -15,12 +15,11 @@ // along with Substrate. If not, see . ///! Defines a `WasmRuntime` that uses the Wasmtime JIT to execute. - mod host; -mod runtime; -mod state_holder; mod imports; mod instance_wrapper; +mod runtime; +mod state_holder; mod util; pub use runtime::create_runtime; diff --git a/client/executor/wasmtime/src/runtime.rs b/client/executor/wasmtime/src/runtime.rs index 0289188ba1..24ea9a71c8 100644 --- a/client/executor/wasmtime/src/runtime.rs +++ b/client/executor/wasmtime/src/runtime.rs @@ -17,66 +17,66 @@ //! Defines the compiled Wasm runtime that uses Wasmtime internally. use crate::host::HostState; -use crate::imports::{Imports, resolve_imports}; -use crate::instance_wrapper::{ModuleWrapper, InstanceWrapper, GlobalsSnapshot}; +use crate::imports::{resolve_imports, Imports}; +use crate::instance_wrapper::{GlobalsSnapshot, InstanceWrapper, ModuleWrapper}; use crate::state_holder; -use std::rc::Rc; -use std::sync::Arc; use sc_executor_common::{ - error::{Error, Result, WasmError}, - wasm_runtime::{WasmModule, WasmInstance}, + error::{Error, Result, WasmError}, + wasm_runtime::{WasmInstance, WasmModule}, }; use sp_allocator::FreeingBumpHeapAllocator; use sp_runtime_interface::unpack_ptr_and_len; -use sp_wasm_interface::{Function, Pointer, WordSize, Value}; +use sp_wasm_interface::{Function, Pointer, Value, WordSize}; +use std::rc::Rc; +use std::sync::Arc; use wasmtime::{Config, Engine, Store}; /// A `WasmModule` implementation using wasmtime to compile the runtime module to machine code /// and execute the compiled code. pub struct WasmtimeRuntime { - module_wrapper: Arc, - heap_pages: u32, - allow_missing_func_imports: bool, - host_functions: Vec<&'static dyn Function>, + module_wrapper: Arc, + heap_pages: u32, + allow_missing_func_imports: bool, + host_functions: Vec<&'static dyn Function>, } impl WasmModule for WasmtimeRuntime { - fn new_instance(&self) -> Result> { - // Scan all imports, find the matching host functions, and create stubs that adapt arguments - // and results. - let imports = resolve_imports( - self.module_wrapper.module(), - &self.host_functions, - self.heap_pages, - self.allow_missing_func_imports, - )?; - - let instance_wrapper = - InstanceWrapper::new(&self.module_wrapper, &imports, self.heap_pages)?; - let heap_base = instance_wrapper.extract_heap_base()?; - let globals_snapshot = GlobalsSnapshot::take(&instance_wrapper)?; - - Ok(Box::new(WasmtimeInstance { - instance_wrapper: Rc::new(instance_wrapper), - module_wrapper: Arc::clone(&self.module_wrapper), - imports, - globals_snapshot, - heap_pages: self.heap_pages, - heap_base, - })) - } + fn new_instance(&self) -> Result> { + // Scan all imports, find the matching host functions, and create stubs that adapt arguments + // and results. + let imports = resolve_imports( + self.module_wrapper.module(), + &self.host_functions, + self.heap_pages, + self.allow_missing_func_imports, + )?; + + let instance_wrapper = + InstanceWrapper::new(&self.module_wrapper, &imports, self.heap_pages)?; + let heap_base = instance_wrapper.extract_heap_base()?; + let globals_snapshot = GlobalsSnapshot::take(&instance_wrapper)?; + + Ok(Box::new(WasmtimeInstance { + instance_wrapper: Rc::new(instance_wrapper), + module_wrapper: Arc::clone(&self.module_wrapper), + imports, + globals_snapshot, + heap_pages: self.heap_pages, + heap_base, + })) + } } /// A `WasmInstance` implementation that reuses compiled module and spawns instances /// to execute the compiled code. pub struct WasmtimeInstance { - module_wrapper: Arc, - instance_wrapper: Rc, - globals_snapshot: GlobalsSnapshot, - imports: Imports, - heap_pages: u32, - heap_base: u32, + module_wrapper: Arc, + instance_wrapper: Rc, + globals_snapshot: GlobalsSnapshot, + imports: Imports, + heap_pages: u32, + heap_base: u32, } // This is safe because `WasmtimeInstance` does not leak reference to `self.imports` @@ -84,108 +84,108 @@ pub struct WasmtimeInstance { unsafe impl Send for WasmtimeInstance {} impl WasmInstance for WasmtimeInstance { - fn call(&self, method: &str, data: &[u8]) -> Result> { - let entrypoint = self.instance_wrapper.resolve_entrypoint(method)?; - let allocator = FreeingBumpHeapAllocator::new(self.heap_base); - - self.module_wrapper - .data_segments_snapshot() - .apply(|offset, contents| { - self.instance_wrapper - .write_memory_from(Pointer::new(offset), contents) - })?; - - self.globals_snapshot.apply(&*self.instance_wrapper)?; - - perform_call( - data, - Rc::clone(&self.instance_wrapper), - entrypoint, - allocator, - ) - } - - fn get_global_const(&self, name: &str) -> Result> { - let instance = InstanceWrapper::new(&self.module_wrapper, &self.imports, self.heap_pages)?; - instance.get_global_val(name) - } + fn call(&self, method: &str, data: &[u8]) -> Result> { + let entrypoint = self.instance_wrapper.resolve_entrypoint(method)?; + let allocator = FreeingBumpHeapAllocator::new(self.heap_base); + + self.module_wrapper + .data_segments_snapshot() + .apply(|offset, contents| { + self.instance_wrapper + .write_memory_from(Pointer::new(offset), contents) + })?; + + self.globals_snapshot.apply(&*self.instance_wrapper)?; + + perform_call( + data, + Rc::clone(&self.instance_wrapper), + entrypoint, + allocator, + ) + } + + fn get_global_const(&self, name: &str) -> Result> { + let instance = InstanceWrapper::new(&self.module_wrapper, &self.imports, self.heap_pages)?; + instance.get_global_val(name) + } } /// Create a new `WasmtimeRuntime` given the code. This function performs translation from Wasm to /// machine code, which can be computationally heavy. pub fn create_runtime( - code: &[u8], - heap_pages: u64, - host_functions: Vec<&'static dyn Function>, - allow_missing_func_imports: bool, + code: &[u8], + heap_pages: u64, + host_functions: Vec<&'static dyn Function>, + allow_missing_func_imports: bool, ) -> std::result::Result { - // Create the engine, store and finally the module from the given code. - let mut config = Config::new(); - config.cranelift_opt_level(wasmtime::OptLevel::SpeedAndSize); - - let engine = Engine::new(&config); - let store = Store::new(&engine); - - let module_wrapper = ModuleWrapper::new(&store, code) - .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; - - Ok(WasmtimeRuntime { - module_wrapper: Arc::new(module_wrapper), - heap_pages: heap_pages as u32, - allow_missing_func_imports, - host_functions, - }) + // Create the engine, store and finally the module from the given code. + let mut config = Config::new(); + config.cranelift_opt_level(wasmtime::OptLevel::SpeedAndSize); + + let engine = Engine::new(&config); + let store = Store::new(&engine); + + let module_wrapper = ModuleWrapper::new(&store, code) + .map_err(|e| WasmError::Other(format!("cannot create module: {}", e)))?; + + Ok(WasmtimeRuntime { + module_wrapper: Arc::new(module_wrapper), + heap_pages: heap_pages as u32, + allow_missing_func_imports, + host_functions, + }) } fn perform_call( - data: &[u8], - instance_wrapper: Rc, - entrypoint: wasmtime::Func, - mut allocator: FreeingBumpHeapAllocator, + data: &[u8], + instance_wrapper: Rc, + entrypoint: wasmtime::Func, + mut allocator: FreeingBumpHeapAllocator, ) -> Result> { - let (data_ptr, data_len) = inject_input_data(&instance_wrapper, &mut allocator, data)?; - - let host_state = HostState::new(allocator, instance_wrapper.clone()); - let ret = state_holder::with_initialized_state(&host_state, || { - match entrypoint.call(&[ - wasmtime::Val::I32(u32::from(data_ptr) as i32), - wasmtime::Val::I32(u32::from(data_len) as i32), - ]) { - Ok(results) => { - let retval = results[0].unwrap_i64() as u64; - Ok(unpack_ptr_and_len(retval)) - } - Err(trap) => { - return Err(Error::from(format!( - "Wasm execution trapped: {}", - trap.message() - ))); - } - } - }); - let (output_ptr, output_len) = ret?; - let output = extract_output_data(&instance_wrapper, output_ptr, output_len)?; - - Ok(output) + let (data_ptr, data_len) = inject_input_data(&instance_wrapper, &mut allocator, data)?; + + let host_state = HostState::new(allocator, instance_wrapper.clone()); + let ret = state_holder::with_initialized_state(&host_state, || { + match entrypoint.call(&[ + wasmtime::Val::I32(u32::from(data_ptr) as i32), + wasmtime::Val::I32(u32::from(data_len) as i32), + ]) { + Ok(results) => { + let retval = results[0].unwrap_i64() as u64; + Ok(unpack_ptr_and_len(retval)) + } + Err(trap) => { + return Err(Error::from(format!( + "Wasm execution trapped: {}", + trap.message() + ))); + } + } + }); + let (output_ptr, output_len) = ret?; + let output = extract_output_data(&instance_wrapper, output_ptr, output_len)?; + + Ok(output) } fn inject_input_data( - instance: &InstanceWrapper, - allocator: &mut FreeingBumpHeapAllocator, - data: &[u8], + instance: &InstanceWrapper, + allocator: &mut FreeingBumpHeapAllocator, + data: &[u8], ) -> Result<(Pointer, WordSize)> { - let data_len = data.len() as WordSize; - let data_ptr = instance.allocate(allocator, data_len)?; - instance.write_memory_from(data_ptr, data)?; - Ok((data_ptr, data_len)) + let data_len = data.len() as WordSize; + let data_ptr = instance.allocate(allocator, data_len)?; + instance.write_memory_from(data_ptr, data)?; + Ok((data_ptr, data_len)) } fn extract_output_data( - instance: &InstanceWrapper, - output_ptr: u32, - output_len: u32, + instance: &InstanceWrapper, + output_ptr: u32, + output_len: u32, ) -> Result> { - let mut output = vec![0; output_len as usize]; - instance.read_memory_into(Pointer::new(output_ptr), &mut output)?; - Ok(output) + let mut output = vec![0; output_len as usize]; + instance.read_memory_into(Pointer::new(output_ptr), &mut output)?; + Ok(output) } diff --git a/client/executor/wasmtime/src/state_holder.rs b/client/executor/wasmtime/src/state_holder.rs index 42cb79e7a3..d9b8dc71bd 100644 --- a/client/executor/wasmtime/src/state_holder.rs +++ b/client/executor/wasmtime/src/state_holder.rs @@ -23,9 +23,9 @@ scoped_tls::scoped_thread_local!(static HOST_STATE: HostState); /// During the execution of the provided function `with_context` will be callable. pub fn with_initialized_state(s: &HostState, f: F) -> R where - F: FnOnce() -> R, + F: FnOnce() -> R, { - HOST_STATE.set(s, f) + HOST_STATE.set(s, f) } /// Create a `HostContext` from the contained `HostState` and execute the given function `f`. @@ -34,10 +34,10 @@ where /// context will be `None`. pub fn with_context(f: F) -> R where - F: FnOnce(Option) -> R, + F: FnOnce(Option) -> R, { - if !HOST_STATE.is_set() { - return f(None) - } - HOST_STATE.with(|state| f(Some(state.materialize()))) + if !HOST_STATE.is_set() { + return f(None); + } + HOST_STATE.with(|state| f(Some(state.materialize()))) } diff --git a/client/executor/wasmtime/src/util.rs b/client/executor/wasmtime/src/util.rs index d2de95d4cc..9e8c5f4de7 100644 --- a/client/executor/wasmtime/src/util.rs +++ b/client/executor/wasmtime/src/util.rs @@ -19,10 +19,10 @@ use std::ops::Range; /// Construct a range from an offset to a data length after the offset. /// Returns None if the end of the range would exceed some maximum offset. pub fn checked_range(offset: usize, len: usize, max: usize) -> Option> { - let end = offset.checked_add(len)?; - if end <= max { - Some(offset..end) - } else { - None - } + let end = offset.checked_add(len)?; + if end <= max { + Some(offset..end) + } else { + None + } } diff --git a/client/finality-grandpa/src/authorities.rs b/client/finality-grandpa/src/authorities.rs index fe3f2dd19e..626d5e57bf 100644 --- a/client/finality-grandpa/src/authorities.rs +++ b/client/finality-grandpa/src/authorities.rs @@ -16,11 +16,11 @@ //! Utilities for dealing with authorities, authority sets, and handoffs. -use fork_tree::ForkTree; -use parking_lot::RwLock; use finality_grandpa::voter_set::VoterSet; -use parity_scale_codec::{Encode, Decode}; +use fork_tree::ForkTree; use log::debug; +use parity_scale_codec::{Decode, Encode}; +use parking_lot::RwLock; use sc_telemetry::{telemetry, CONSENSUS_INFO}; use sp_finality_grandpa::{AuthorityId, AuthorityList}; @@ -31,366 +31,387 @@ use std::sync::Arc; /// A shared authority set. pub(crate) struct SharedAuthoritySet { - inner: Arc>>, + inner: Arc>>, } impl Clone for SharedAuthoritySet { - fn clone(&self) -> Self { - SharedAuthoritySet { inner: self.inner.clone() } - } + fn clone(&self) -> Self { + SharedAuthoritySet { + inner: self.inner.clone(), + } + } } impl SharedAuthoritySet { - /// Acquire a reference to the inner read-write lock. - pub(crate) fn inner(&self) -> &RwLock> { - &*self.inner - } + /// Acquire a reference to the inner read-write lock. + pub(crate) fn inner(&self) -> &RwLock> { + &*self.inner + } } impl SharedAuthoritySet -where N: Add + Ord + Clone + Debug, - H: Clone + Debug +where + N: Add + Ord + Clone + Debug, + H: Clone + Debug, { - /// Get the earliest limit-block number that's higher or equal to the given - /// min number, if any. - pub(crate) fn current_limit(&self, min: N) -> Option { - self.inner.read().current_limit(min) - } - - /// Get the current set ID. This is incremented every time the set changes. - pub(crate) fn set_id(&self) -> u64 { - self.inner.read().set_id - } - - /// Get the current authorities and their weights (for the current set ID). - pub(crate) fn current_authorities(&self) -> VoterSet { - self.inner.read().current_authorities.iter().cloned().collect() - } + /// Get the earliest limit-block number that's higher or equal to the given + /// min number, if any. + pub(crate) fn current_limit(&self, min: N) -> Option { + self.inner.read().current_limit(min) + } + + /// Get the current set ID. This is incremented every time the set changes. + pub(crate) fn set_id(&self) -> u64 { + self.inner.read().set_id + } + + /// Get the current authorities and their weights (for the current set ID). + pub(crate) fn current_authorities(&self) -> VoterSet { + self.inner + .read() + .current_authorities + .iter() + .cloned() + .collect() + } } impl From> for SharedAuthoritySet { - fn from(set: AuthoritySet) -> Self { - SharedAuthoritySet { inner: Arc::new(RwLock::new(set)) } - } + fn from(set: AuthoritySet) -> Self { + SharedAuthoritySet { + inner: Arc::new(RwLock::new(set)), + } + } } /// Status of the set after changes were applied. #[derive(Debug)] pub(crate) struct Status { - /// Whether internal changes were made. - pub(crate) changed: bool, - /// `Some` when underlying authority set has changed, containing the - /// block where that set changed. - pub(crate) new_set_block: Option<(H, N)>, + /// Whether internal changes were made. + pub(crate) changed: bool, + /// `Some` when underlying authority set has changed, containing the + /// block where that set changed. + pub(crate) new_set_block: Option<(H, N)>, } /// A set of authorities. #[derive(Debug, Clone, Encode, Decode, PartialEq)] pub(crate) struct AuthoritySet { - pub(crate) current_authorities: AuthorityList, - pub(crate) set_id: u64, - // Tree of pending standard changes across forks. Standard changes are - // enacted on finality and must be enacted (i.e. finalized) in-order across - // a given branch - pub(crate) pending_standard_changes: ForkTree>, - // Pending forced changes across different forks (at most one per fork). - // Forced changes are enacted on block depth (not finality), for this reason - // only one forced change should exist per fork. - pub(crate) pending_forced_changes: Vec>, + pub(crate) current_authorities: AuthorityList, + pub(crate) set_id: u64, + // Tree of pending standard changes across forks. Standard changes are + // enacted on finality and must be enacted (i.e. finalized) in-order across + // a given branch + pub(crate) pending_standard_changes: ForkTree>, + // Pending forced changes across different forks (at most one per fork). + // Forced changes are enacted on block depth (not finality), for this reason + // only one forced change should exist per fork. + pub(crate) pending_forced_changes: Vec>, } impl AuthoritySet -where H: PartialEq, - N: Ord, +where + H: PartialEq, + N: Ord, { - /// Get a genesis set with given authorities. - pub(crate) fn genesis(initial: AuthorityList) -> Self { - AuthoritySet { - current_authorities: initial, - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - } - } - - /// Get the current set id and a reference to the current authority set. - pub(crate) fn current(&self) -> (u64, &[(AuthorityId, u64)]) { - (self.set_id, &self.current_authorities[..]) - } + /// Get a genesis set with given authorities. + pub(crate) fn genesis(initial: AuthorityList) -> Self { + AuthoritySet { + current_authorities: initial, + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + } + } + + /// Get the current set id and a reference to the current authority set. + pub(crate) fn current(&self) -> (u64, &[(AuthorityId, u64)]) { + (self.set_id, &self.current_authorities[..]) + } } impl AuthoritySet where - N: Add + Ord + Clone + Debug, - H: Clone + Debug + N: Add + Ord + Clone + Debug, + H: Clone + Debug, { - fn add_standard_change( - &mut self, - pending: PendingChange, - is_descendent_of: &F, - ) -> Result<(), fork_tree::Error> where - F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - let hash = pending.canon_hash.clone(); - let number = pending.canon_height.clone(); - - debug!(target: "afg", "Inserting potential standard set change signaled at block {:?} \ + fn add_standard_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + let hash = pending.canon_hash.clone(); + let number = pending.canon_height.clone(); + + debug!(target: "afg", "Inserting potential standard set change signaled at block {:?} \ (delayed by {:?} blocks).", (&number, &hash), pending.delay); - self.pending_standard_changes.import( - hash.clone(), - number.clone(), - pending, - is_descendent_of, - )?; - - debug!(target: "afg", "There are now {} alternatives for the next pending standard change (roots), \ - and a total of {} pending standard changes (across all forks).", - self.pending_standard_changes.roots().count(), - self.pending_standard_changes.iter().count(), - ); - - Ok(()) - } - - fn add_forced_change( - &mut self, - pending: PendingChange, - is_descendent_of: &F, - ) -> Result<(), fork_tree::Error> where - F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - for change in self.pending_forced_changes.iter() { - if change.canon_hash == pending.canon_hash || - is_descendent_of(&change.canon_hash, &pending.canon_hash)? - { - return Err(fork_tree::Error::UnfinalizedAncestor); - } - } - - // ordered first by effective number and then by signal-block number. - let key = (pending.effective_number(), pending.canon_height.clone()); - let idx = self.pending_forced_changes - .binary_search_by_key(&key, |change| ( - change.effective_number(), - change.canon_height.clone(), - )) - .unwrap_or_else(|i| i); - - debug!(target: "afg", "Inserting potential forced set change at block {:?} \ + self.pending_standard_changes.import( + hash.clone(), + number.clone(), + pending, + is_descendent_of, + )?; + + debug!(target: "afg", "There are now {} alternatives for the next pending standard change (roots), \ + and a total of {} pending standard changes (across all forks).", + self.pending_standard_changes.roots().count(), + self.pending_standard_changes.iter().count(), + ); + + Ok(()) + } + + fn add_forced_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + for change in self.pending_forced_changes.iter() { + if change.canon_hash == pending.canon_hash + || is_descendent_of(&change.canon_hash, &pending.canon_hash)? + { + return Err(fork_tree::Error::UnfinalizedAncestor); + } + } + + // ordered first by effective number and then by signal-block number. + let key = (pending.effective_number(), pending.canon_height.clone()); + let idx = self + .pending_forced_changes + .binary_search_by_key(&key, |change| { + (change.effective_number(), change.canon_height.clone()) + }) + .unwrap_or_else(|i| i); + + debug!(target: "afg", "Inserting potential forced set change at block {:?} \ (delayed by {:?} blocks).", (&pending.canon_height, &pending.canon_hash), pending.delay); - self.pending_forced_changes.insert(idx, pending); - - debug!(target: "afg", "There are now {} pending forced changes.", self.pending_forced_changes.len()); - - Ok(()) - } - - /// Note an upcoming pending transition. Multiple pending standard changes - /// on the same branch can be added as long as they don't overlap. Forced - /// changes are restricted to one per fork. This method assumes that changes - /// on the same branch will be added in-order. The given function - /// `is_descendent_of` should return `true` if the second hash (target) is a - /// descendent of the first hash (base). - pub(crate) fn add_pending_change( - &mut self, - pending: PendingChange, - is_descendent_of: &F, - ) -> Result<(), fork_tree::Error> where - F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - match pending.delay_kind { - DelayKind::Best { .. } => { - self.add_forced_change(pending, is_descendent_of) - }, - DelayKind::Finalized => { - self.add_standard_change(pending, is_descendent_of) - }, - } - } - - /// Inspect pending changes. Standard pending changes are iterated first, - /// and the changes in the tree are traversed in pre-order, afterwards all - /// forced changes are iterated. - pub(crate) fn pending_changes(&self) -> impl Iterator> { - self.pending_standard_changes.iter().map(|(_, _, c)| c) - .chain(self.pending_forced_changes.iter()) - } - - /// Get the earliest limit-block number, if any. If there are pending changes across - /// different forks, this method will return the earliest effective number (across the - /// different branches) that is higher or equal to the given min number. - /// - /// Only standard changes are taken into account for the current - /// limit, since any existing forced change should preclude the voter from voting. - pub(crate) fn current_limit(&self, min: N) -> Option { - self.pending_standard_changes.roots() - .filter(|&(_, _, c)| c.effective_number() >= min) - .min_by_key(|&(_, _, c)| c.effective_number()) - .map(|(_, _, c)| c.effective_number()) - } - - /// Apply or prune any pending transitions based on a best-block trigger. - /// - /// Returns `Ok((median, new_set))` when a forced change has occurred. The - /// median represents the median last finalized block at the time the change - /// was signaled, and it should be used as the canon block when starting the - /// new grandpa voter. Only alters the internal state in this case. - /// - /// These transitions are always forced and do not lead to justifications - /// which light clients can follow. - pub(crate) fn apply_forced_changes( - &self, - best_hash: H, - best_number: N, - is_descendent_of: &F, - initial_sync: bool, - ) -> Result, E> - where F: Fn(&H, &H) -> Result, - { - let mut new_set = None; - - for change in self.pending_forced_changes.iter() - .take_while(|c| c.effective_number() <= best_number) // to prevent iterating too far - .filter(|c| c.effective_number() == best_number) - { - // check if the given best block is in the same branch as the block that signaled the change. - if is_descendent_of(&change.canon_hash, &best_hash)? { - // apply this change: make the set canonical - afg_log!(initial_sync, - "👴 Applying authority set change forced at block #{:?}", - change.canon_height, - ); - telemetry!(CONSENSUS_INFO; "afg.applying_forced_authority_set_change"; - "block" => ?change.canon_height - ); - - let median_last_finalized = match change.delay_kind { + self.pending_forced_changes.insert(idx, pending); + + debug!(target: "afg", "There are now {} pending forced changes.", self.pending_forced_changes.len()); + + Ok(()) + } + + /// Note an upcoming pending transition. Multiple pending standard changes + /// on the same branch can be added as long as they don't overlap. Forced + /// changes are restricted to one per fork. This method assumes that changes + /// on the same branch will be added in-order. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + pub(crate) fn add_pending_change( + &mut self, + pending: PendingChange, + is_descendent_of: &F, + ) -> Result<(), fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + match pending.delay_kind { + DelayKind::Best { .. } => self.add_forced_change(pending, is_descendent_of), + DelayKind::Finalized => self.add_standard_change(pending, is_descendent_of), + } + } + + /// Inspect pending changes. Standard pending changes are iterated first, + /// and the changes in the tree are traversed in pre-order, afterwards all + /// forced changes are iterated. + pub(crate) fn pending_changes(&self) -> impl Iterator> { + self.pending_standard_changes + .iter() + .map(|(_, _, c)| c) + .chain(self.pending_forced_changes.iter()) + } + + /// Get the earliest limit-block number, if any. If there are pending changes across + /// different forks, this method will return the earliest effective number (across the + /// different branches) that is higher or equal to the given min number. + /// + /// Only standard changes are taken into account for the current + /// limit, since any existing forced change should preclude the voter from voting. + pub(crate) fn current_limit(&self, min: N) -> Option { + self.pending_standard_changes + .roots() + .filter(|&(_, _, c)| c.effective_number() >= min) + .min_by_key(|&(_, _, c)| c.effective_number()) + .map(|(_, _, c)| c.effective_number()) + } + + /// Apply or prune any pending transitions based on a best-block trigger. + /// + /// Returns `Ok((median, new_set))` when a forced change has occurred. The + /// median represents the median last finalized block at the time the change + /// was signaled, and it should be used as the canon block when starting the + /// new grandpa voter. Only alters the internal state in this case. + /// + /// These transitions are always forced and do not lead to justifications + /// which light clients can follow. + pub(crate) fn apply_forced_changes( + &self, + best_hash: H, + best_number: N, + is_descendent_of: &F, + initial_sync: bool, + ) -> Result, E> + where + F: Fn(&H, &H) -> Result, + { + let mut new_set = None; + + for change in self + .pending_forced_changes + .iter() + .take_while(|c| c.effective_number() <= best_number) // to prevent iterating too far + .filter(|c| c.effective_number() == best_number) + { + // check if the given best block is in the same branch as the block that signaled the change. + if is_descendent_of(&change.canon_hash, &best_hash)? { + // apply this change: make the set canonical + afg_log!( + initial_sync, + "👴 Applying authority set change forced at block #{:?}", + change.canon_height, + ); + telemetry!(CONSENSUS_INFO; "afg.applying_forced_authority_set_change"; + "block" => ?change.canon_height + ); + + let median_last_finalized = match change.delay_kind { DelayKind::Best { ref median_last_finalized } => median_last_finalized.clone(), _ => unreachable!("pending_forced_changes only contains forced changes; forced changes have delay kind Best; qed."), }; - new_set = Some((median_last_finalized, AuthoritySet { - current_authorities: change.next_authorities.clone(), - set_id: self.set_id + 1, - pending_standard_changes: ForkTree::new(), // new set, new changes. - pending_forced_changes: Vec::new(), - })); - - break; - } - - // we don't wipe forced changes until another change is - // applied - } - - Ok(new_set) - } - - /// Apply or prune any pending transitions based on a finality trigger. This - /// method ensures that if there are multiple changes in the same branch, - /// finalizing this block won't finalize past multiple transitions (i.e. - /// transitions must be finalized in-order). The given function - /// `is_descendent_of` should return `true` if the second hash (target) is a - /// descendent of the first hash (base). - /// - /// When the set has changed, the return value will be `Ok(Some((H, N)))` - /// which is the canonical block where the set last changed (i.e. the given - /// hash and number). - pub(crate) fn apply_standard_changes( - &mut self, - finalized_hash: H, - finalized_number: N, - is_descendent_of: &F, - initial_sync: bool, - ) -> Result, fork_tree::Error> - where F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - let mut status = Status { - changed: false, - new_set_block: None, - }; - - match self.pending_standard_changes.finalize_with_descendent_if( - &finalized_hash, - finalized_number.clone(), - is_descendent_of, - |change| change.effective_number() <= finalized_number - )? { - fork_tree::FinalizationResult::Changed(change) => { - status.changed = true; - - // if we are able to finalize any standard change then we can - // discard all pending forced changes (on different forks) - self.pending_forced_changes.clear(); - - if let Some(change) = change { - afg_log!(initial_sync, - "👴 Applying authority set change scheduled at block #{:?}", - change.canon_height, - ); - telemetry!(CONSENSUS_INFO; "afg.applying_scheduled_authority_set_change"; - "block" => ?change.canon_height - ); - - self.current_authorities = change.next_authorities; - self.set_id += 1; - - status.new_set_block = Some(( - finalized_hash, - finalized_number, - )); - } - }, - fork_tree::FinalizationResult::Unchanged => {}, - } - - Ok(status) - } - - /// Check whether the given finalized block number enacts any standard - /// authority set change (without triggering it), ensuring that if there are - /// multiple changes in the same branch, finalizing this block won't - /// finalize past multiple transitions (i.e. transitions must be finalized - /// in-order). Returns `Some(true)` if the block being finalized enacts a - /// change that can be immediately applied, `Some(false)` if the block being - /// finalized enacts a change but it cannot be applied yet since there are - /// other dependent changes, and `None` if no change is enacted. The given - /// function `is_descendent_of` should return `true` if the second hash - /// (target) is a descendent of the first hash (base). - pub fn enacts_standard_change( - &self, - finalized_hash: H, - finalized_number: N, - is_descendent_of: &F, - ) -> Result, fork_tree::Error> - where F: Fn(&H, &H) -> Result, - E: std::error::Error, - { - self.pending_standard_changes.finalizes_any_with_descendent_if( - &finalized_hash, - finalized_number.clone(), - is_descendent_of, - |change| change.effective_number() == finalized_number - ) - } + new_set = Some(( + median_last_finalized, + AuthoritySet { + current_authorities: change.next_authorities.clone(), + set_id: self.set_id + 1, + pending_standard_changes: ForkTree::new(), // new set, new changes. + pending_forced_changes: Vec::new(), + }, + )); + + break; + } + + // we don't wipe forced changes until another change is + // applied + } + + Ok(new_set) + } + + /// Apply or prune any pending transitions based on a finality trigger. This + /// method ensures that if there are multiple changes in the same branch, + /// finalizing this block won't finalize past multiple transitions (i.e. + /// transitions must be finalized in-order). The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + /// + /// When the set has changed, the return value will be `Ok(Some((H, N)))` + /// which is the canonical block where the set last changed (i.e. the given + /// hash and number). + pub(crate) fn apply_standard_changes( + &mut self, + finalized_hash: H, + finalized_number: N, + is_descendent_of: &F, + initial_sync: bool, + ) -> Result, fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + let mut status = Status { + changed: false, + new_set_block: None, + }; + + match self.pending_standard_changes.finalize_with_descendent_if( + &finalized_hash, + finalized_number.clone(), + is_descendent_of, + |change| change.effective_number() <= finalized_number, + )? { + fork_tree::FinalizationResult::Changed(change) => { + status.changed = true; + + // if we are able to finalize any standard change then we can + // discard all pending forced changes (on different forks) + self.pending_forced_changes.clear(); + + if let Some(change) = change { + afg_log!( + initial_sync, + "👴 Applying authority set change scheduled at block #{:?}", + change.canon_height, + ); + telemetry!(CONSENSUS_INFO; "afg.applying_scheduled_authority_set_change"; + "block" => ?change.canon_height + ); + + self.current_authorities = change.next_authorities; + self.set_id += 1; + + status.new_set_block = Some((finalized_hash, finalized_number)); + } + } + fork_tree::FinalizationResult::Unchanged => {} + } + + Ok(status) + } + + /// Check whether the given finalized block number enacts any standard + /// authority set change (without triggering it), ensuring that if there are + /// multiple changes in the same branch, finalizing this block won't + /// finalize past multiple transitions (i.e. transitions must be finalized + /// in-order). Returns `Some(true)` if the block being finalized enacts a + /// change that can be immediately applied, `Some(false)` if the block being + /// finalized enacts a change but it cannot be applied yet since there are + /// other dependent changes, and `None` if no change is enacted. The given + /// function `is_descendent_of` should return `true` if the second hash + /// (target) is a descendent of the first hash (base). + pub fn enacts_standard_change( + &self, + finalized_hash: H, + finalized_number: N, + is_descendent_of: &F, + ) -> Result, fork_tree::Error> + where + F: Fn(&H, &H) -> Result, + E: std::error::Error, + { + self.pending_standard_changes + .finalizes_any_with_descendent_if( + &finalized_hash, + finalized_number.clone(), + is_descendent_of, + |change| change.effective_number() == finalized_number, + ) + } } /// Kinds of delays for pending changes. #[derive(Debug, Clone, Encode, Decode, PartialEq)] pub(crate) enum DelayKind { - /// Depth in finalized chain. - Finalized, - /// Depth in best chain. The median last finalized block is calculated at the time the - /// change was signaled. - Best { median_last_finalized: N }, + /// Depth in finalized chain. + Finalized, + /// Depth in best chain. The median last finalized block is calculated at the time the + /// change was signaled. + Best { median_last_finalized: N }, } /// A pending change to the authority set. @@ -399,476 +420,516 @@ pub(crate) enum DelayKind { /// the finalized or unfinalized chain. #[derive(Debug, Clone, Encode, PartialEq)] pub(crate) struct PendingChange { - /// The new authorities and weights to apply. - pub(crate) next_authorities: AuthorityList, - /// How deep in the chain the announcing block must be - /// before the change is applied. - pub(crate) delay: N, - /// The announcing block's height. - pub(crate) canon_height: N, - /// The announcing block's hash. - pub(crate) canon_hash: H, - /// The delay kind. - pub(crate) delay_kind: DelayKind, + /// The new authorities and weights to apply. + pub(crate) next_authorities: AuthorityList, + /// How deep in the chain the announcing block must be + /// before the change is applied. + pub(crate) delay: N, + /// The announcing block's height. + pub(crate) canon_height: N, + /// The announcing block's hash. + pub(crate) canon_hash: H, + /// The delay kind. + pub(crate) delay_kind: DelayKind, } impl Decode for PendingChange { - fn decode(value: &mut I) -> Result { - let next_authorities = Decode::decode(value)?; - let delay = Decode::decode(value)?; - let canon_height = Decode::decode(value)?; - let canon_hash = Decode::decode(value)?; - - let delay_kind = DelayKind::decode(value).unwrap_or(DelayKind::Finalized); - - Ok(PendingChange { - next_authorities, - delay, - canon_height, - canon_hash, - delay_kind, - }) - } + fn decode( + value: &mut I, + ) -> Result { + let next_authorities = Decode::decode(value)?; + let delay = Decode::decode(value)?; + let canon_height = Decode::decode(value)?; + let canon_hash = Decode::decode(value)?; + + let delay_kind = DelayKind::decode(value).unwrap_or(DelayKind::Finalized); + + Ok(PendingChange { + next_authorities, + delay, + canon_height, + canon_hash, + delay_kind, + }) + } } -impl + Clone> PendingChange { - /// Returns the effective number this change will be applied at. - pub fn effective_number(&self) -> N { - self.canon_height.clone() + self.delay.clone() - } +impl + Clone> PendingChange { + /// Returns the effective number this change will be applied at. + pub fn effective_number(&self) -> N { + self.canon_height.clone() + self.delay.clone() + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::crypto::Public; - - fn static_is_descendent_of(value: bool) - -> impl Fn(&A, &A) -> Result - { - move |_, _| Ok(value) - } - - fn is_descendent_of(f: F) -> impl Fn(&A, &A) -> Result - where F: Fn(&A, &A) -> bool - { - move |base, hash| Ok(f(base, hash)) - } - - #[test] - fn current_limit_filters_min() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let change = |height| { - PendingChange { - next_authorities: Vec::new(), - delay: 0, - canon_height: height, - canon_hash: height.to_string(), - delay_kind: DelayKind::Finalized, - } - }; - - let is_descendent_of = static_is_descendent_of(false); - - authorities.add_pending_change(change(1), &is_descendent_of).unwrap(); - authorities.add_pending_change(change(2), &is_descendent_of).unwrap(); - - assert_eq!( - authorities.current_limit(0), - Some(1), - ); - - assert_eq!( - authorities.current_limit(1), - Some(1), - ); - - assert_eq!( - authorities.current_limit(2), - Some(2), - ); - - assert_eq!( - authorities.current_limit(3), - None, - ); - } - - #[test] - fn changes_iterated_in_pre_order() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let change_a = PendingChange { - next_authorities: Vec::new(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Finalized, - }; - - let change_b = PendingChange { - next_authorities: Vec::new(), - delay: 0, - canon_height: 5, - canon_hash: "hash_b", - delay_kind: DelayKind::Finalized, - }; - - let change_c = PendingChange { - next_authorities: Vec::new(), - delay: 5, - canon_height: 10, - canon_hash: "hash_c", - delay_kind: DelayKind::Finalized, - }; - - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_c.clone(), &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - })).unwrap(); - - // forced changes are iterated last - let change_d = PendingChange { - next_authorities: Vec::new(), - delay: 2, - canon_height: 1, - canon_hash: "hash_d", - delay_kind: DelayKind::Best { median_last_finalized: 0 }, - }; - - let change_e = PendingChange { - next_authorities: Vec::new(), - delay: 2, - canon_height: 0, - canon_hash: "hash_e", - delay_kind: DelayKind::Best { median_last_finalized: 0 }, - }; - - authorities.add_pending_change(change_d.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_e.clone(), &static_is_descendent_of(false)).unwrap(); - - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_b, &change_a, &change_c, &change_e, &change_d], - ); - } - - #[test] - fn apply_change() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; - let set_b = vec![(AuthorityId::from_slice(&[2; 32]), 5)]; - - // two competing changes at the same height on different forks - let change_a = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Finalized, - }; - - let change_b = PendingChange { - next_authorities: set_b.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_b", - delay_kind: DelayKind::Finalized, - }; - - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(true)).unwrap(); - - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_b, &change_a], - ); - - // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" - let status = authorities.apply_standard_changes( - "hash_c", - 11, - &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_c") => true, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - }), - false, - ).unwrap(); - - assert!(status.changed); - assert_eq!(status.new_set_block, None); - assert_eq!( - authorities.pending_changes().collect::>(), - vec![&change_a], - ); - - // finalizing "hash_d" will enact the change signaled at "hash_a" - let status = authorities.apply_standard_changes( - "hash_d", - 15, - &is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_d") => true, - _ => unreachable!(), - }), - false, - ).unwrap(); - - assert!(status.changed); - assert_eq!(status.new_set_block, Some(("hash_d", 15))); - - assert_eq!(authorities.current_authorities, set_a); - assert_eq!(authorities.set_id, 1); - assert_eq!(authorities.pending_changes().count(), 0); - } - - #[test] - fn disallow_multiple_changes_being_finalized_at_once() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; - let set_c = vec![(AuthorityId::from_slice(&[2; 32]), 5)]; - - // two competing changes at the same height on different forks - let change_a = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Finalized, - }; - - let change_c = PendingChange { - next_authorities: set_c.clone(), - delay: 10, - canon_height: 30, - canon_hash: "hash_c", - delay_kind: DelayKind::Finalized, - }; - - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(true)).unwrap(); - authorities.add_pending_change(change_c.clone(), &static_is_descendent_of(true)).unwrap(); - - let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_b") => true, - ("hash_a", "hash_c") => true, - ("hash_a", "hash_d") => true, - - ("hash_c", "hash_b") => false, - ("hash_c", "hash_d") => true, - - ("hash_b", "hash_c") => true, - _ => unreachable!(), - }); - - // trying to finalize past `change_c` without finalizing `change_a` first - assert!(matches!( - authorities.apply_standard_changes("hash_d", 40, &is_descendent_of, false), - Err(fork_tree::Error::UnfinalizedAncestor) - )); - - let status = authorities.apply_standard_changes( - "hash_b", - 15, - &is_descendent_of, - false, - ).unwrap(); - - assert!(status.changed); - assert_eq!(status.new_set_block, Some(("hash_b", 15))); - - assert_eq!(authorities.current_authorities, set_a); - assert_eq!(authorities.set_id, 1); - - // after finalizing `change_a` it should be possible to finalize `change_c` - let status = authorities.apply_standard_changes( - "hash_d", - 40, - &is_descendent_of, - false, - ).unwrap(); - - assert!(status.changed); - assert_eq!(status.new_set_block, Some(("hash_d", 40))); - - assert_eq!(authorities.current_authorities, set_c); - assert_eq!(authorities.set_id, 2); - } - - #[test] - fn enacts_standard_change_works() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; - - let change_a = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Finalized, - }; - - let change_b = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 20, - canon_hash: "hash_b", - delay_kind: DelayKind::Finalized, - }; - - authorities.add_pending_change(change_a.clone(), &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b.clone(), &static_is_descendent_of(true)).unwrap(); - - let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { - ("hash_a", "hash_d") => true, - ("hash_a", "hash_e") => true, - ("hash_b", "hash_d") => true, - ("hash_b", "hash_e") => true, - ("hash_a", "hash_c") => false, - ("hash_b", "hash_c") => false, - _ => unreachable!(), - }); - - // "hash_c" won't finalize the existing change since it isn't a descendent - assert_eq!( - authorities.enacts_standard_change("hash_c", 15, &is_descendent_of).unwrap(), - None, - ); - - // "hash_d" at depth 14 won't work either - assert_eq!( - authorities.enacts_standard_change("hash_d", 14, &is_descendent_of).unwrap(), - None, - ); - - // but it should work at depth 15 (change height + depth) - assert_eq!( - authorities.enacts_standard_change("hash_d", 15, &is_descendent_of).unwrap(), - Some(true), - ); - - // finalizing "hash_e" at depth 20 will trigger change at "hash_b", but - // it can't be applied yet since "hash_a" must be applied first - assert_eq!( - authorities.enacts_standard_change("hash_e", 30, &is_descendent_of).unwrap(), - Some(false), - ); - } - - #[test] - fn forced_changes() { - let mut authorities = AuthoritySet { - current_authorities: Vec::new(), - set_id: 0, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }; - - let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; - let set_b = vec![(AuthorityId::from_slice(&[2; 32]), 5)]; - - let change_a = PendingChange { - next_authorities: set_a.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_a", - delay_kind: DelayKind::Best { median_last_finalized: 42 }, - }; - - let change_b = PendingChange { - next_authorities: set_b.clone(), - delay: 10, - canon_height: 5, - canon_hash: "hash_b", - delay_kind: DelayKind::Best { median_last_finalized: 0 }, - }; - - authorities.add_pending_change(change_a, &static_is_descendent_of(false)).unwrap(); - authorities.add_pending_change(change_b, &static_is_descendent_of(false)).unwrap(); - - // there's an effective change triggered at block 15 but not a standard one. - // so this should do nothing. - assert_eq!( - authorities.enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)).unwrap(), - None, - ); - - // throw a standard change into the mix to prove that it's discarded - // for being on the same fork. - // - // NOTE: after https://github.com/paritytech/substrate/issues/1861 - // this should still be rejected based on the "span" rule -- it overlaps - // with another change on the same fork. - let change_c = PendingChange { - next_authorities: set_b.clone(), - delay: 3, - canon_height: 8, - canon_hash: "hash_a8", - delay_kind: DelayKind::Best { median_last_finalized: 0 }, - }; - - let is_descendent_of_a = is_descendent_of(|base: &&str, _| { - base.starts_with("hash_a") - }); - - assert!(authorities.add_pending_change(change_c, &is_descendent_of_a).is_err()); - - // too early. - assert!( - authorities.apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true), false) - .unwrap() - .is_none() - ); - - // too late. - assert!( - authorities.apply_forced_changes("hash_a16", 16, &static_is_descendent_of(true), false) - .unwrap() - .is_none() - ); - - // on time -- chooses the right change. - assert_eq!( - authorities.apply_forced_changes("hash_a15", 15, &is_descendent_of_a, false) - .unwrap() - .unwrap(), - (42, AuthoritySet { - current_authorities: set_a, - set_id: 1, - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - }), - ); - } + use super::*; + use sp_core::crypto::Public; + + fn static_is_descendent_of(value: bool) -> impl Fn(&A, &A) -> Result { + move |_, _| Ok(value) + } + + fn is_descendent_of(f: F) -> impl Fn(&A, &A) -> Result + where + F: Fn(&A, &A) -> bool, + { + move |base, hash| Ok(f(base, hash)) + } + + #[test] + fn current_limit_filters_min() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let change = |height| PendingChange { + next_authorities: Vec::new(), + delay: 0, + canon_height: height, + canon_hash: height.to_string(), + delay_kind: DelayKind::Finalized, + }; + + let is_descendent_of = static_is_descendent_of(false); + + authorities + .add_pending_change(change(1), &is_descendent_of) + .unwrap(); + authorities + .add_pending_change(change(2), &is_descendent_of) + .unwrap(); + + assert_eq!(authorities.current_limit(0), Some(1),); + + assert_eq!(authorities.current_limit(1), Some(1),); + + assert_eq!(authorities.current_limit(2), Some(2),); + + assert_eq!(authorities.current_limit(3), None,); + } + + #[test] + fn changes_iterated_in_pre_order() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let change_a = PendingChange { + next_authorities: Vec::new(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: Vec::new(), + delay: 0, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + let change_c = PendingChange { + next_authorities: Vec::new(), + delay: 5, + canon_height: 10, + canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change( + change_c.clone(), + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + ) + .unwrap(); + + // forced changes are iterated last + let change_d = PendingChange { + next_authorities: Vec::new(), + delay: 2, + canon_height: 1, + canon_hash: "hash_d", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + let change_e = PendingChange { + next_authorities: Vec::new(), + delay: 2, + canon_height: 0, + canon_hash: "hash_e", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + authorities + .add_pending_change(change_d.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_e.clone(), &static_is_descendent_of(false)) + .unwrap(); + + assert_eq!( + authorities.pending_changes().collect::>(), + vec![&change_b, &change_a, &change_c, &change_e, &change_d], + ); + } + + #[test] + fn apply_change() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; + let set_b = vec![(AuthorityId::from_slice(&[2; 32]), 5)]; + + // two competing changes at the same height on different forks + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: set_b.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); + + assert_eq!( + authorities.pending_changes().collect::>(), + vec![&change_b, &change_a], + ); + + // finalizing "hash_c" won't enact the change signaled at "hash_a" but it will prune out "hash_b" + let status = authorities + .apply_standard_changes( + "hash_c", + 11, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_c") => true, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }), + false, + ) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, None); + assert_eq!( + authorities.pending_changes().collect::>(), + vec![&change_a], + ); + + // finalizing "hash_d" will enact the change signaled at "hash_a" + let status = authorities + .apply_standard_changes( + "hash_d", + 15, + &is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + _ => unreachable!(), + }), + false, + ) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_d", 15))); + + assert_eq!(authorities.current_authorities, set_a); + assert_eq!(authorities.set_id, 1); + assert_eq!(authorities.pending_changes().count(), 0); + } + + #[test] + fn disallow_multiple_changes_being_finalized_at_once() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; + let set_c = vec![(AuthorityId::from_slice(&[2; 32]), 5)]; + + // two competing changes at the same height on different forks + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_c = PendingChange { + next_authorities: set_c.clone(), + delay: 10, + canon_height: 30, + canon_hash: "hash_c", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(true)) + .unwrap(); + authorities + .add_pending_change(change_c.clone(), &static_is_descendent_of(true)) + .unwrap(); + + let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_b") => true, + ("hash_a", "hash_c") => true, + ("hash_a", "hash_d") => true, + + ("hash_c", "hash_b") => false, + ("hash_c", "hash_d") => true, + + ("hash_b", "hash_c") => true, + _ => unreachable!(), + }); + + // trying to finalize past `change_c` without finalizing `change_a` first + assert!(matches!( + authorities.apply_standard_changes("hash_d", 40, &is_descendent_of, false), + Err(fork_tree::Error::UnfinalizedAncestor) + )); + + let status = authorities + .apply_standard_changes("hash_b", 15, &is_descendent_of, false) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_b", 15))); + + assert_eq!(authorities.current_authorities, set_a); + assert_eq!(authorities.set_id, 1); + + // after finalizing `change_a` it should be possible to finalize `change_c` + let status = authorities + .apply_standard_changes("hash_d", 40, &is_descendent_of, false) + .unwrap(); + + assert!(status.changed); + assert_eq!(status.new_set_block, Some(("hash_d", 40))); + + assert_eq!(authorities.current_authorities, set_c); + assert_eq!(authorities.set_id, 2); + } + + #[test] + fn enacts_standard_change_works() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; + + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Finalized, + }; + + let change_b = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 20, + canon_hash: "hash_b", + delay_kind: DelayKind::Finalized, + }; + + authorities + .add_pending_change(change_a.clone(), &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b.clone(), &static_is_descendent_of(true)) + .unwrap(); + + let is_descendent_of = is_descendent_of(|base, hash| match (*base, *hash) { + ("hash_a", "hash_d") => true, + ("hash_a", "hash_e") => true, + ("hash_b", "hash_d") => true, + ("hash_b", "hash_e") => true, + ("hash_a", "hash_c") => false, + ("hash_b", "hash_c") => false, + _ => unreachable!(), + }); + + // "hash_c" won't finalize the existing change since it isn't a descendent + assert_eq!( + authorities + .enacts_standard_change("hash_c", 15, &is_descendent_of) + .unwrap(), + None, + ); + + // "hash_d" at depth 14 won't work either + assert_eq!( + authorities + .enacts_standard_change("hash_d", 14, &is_descendent_of) + .unwrap(), + None, + ); + + // but it should work at depth 15 (change height + depth) + assert_eq!( + authorities + .enacts_standard_change("hash_d", 15, &is_descendent_of) + .unwrap(), + Some(true), + ); + + // finalizing "hash_e" at depth 20 will trigger change at "hash_b", but + // it can't be applied yet since "hash_a" must be applied first + assert_eq!( + authorities + .enacts_standard_change("hash_e", 30, &is_descendent_of) + .unwrap(), + Some(false), + ); + } + + #[test] + fn forced_changes() { + let mut authorities = AuthoritySet { + current_authorities: Vec::new(), + set_id: 0, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + }; + + let set_a = vec![(AuthorityId::from_slice(&[1; 32]), 5)]; + let set_b = vec![(AuthorityId::from_slice(&[2; 32]), 5)]; + + let change_a = PendingChange { + next_authorities: set_a.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_a", + delay_kind: DelayKind::Best { + median_last_finalized: 42, + }, + }; + + let change_b = PendingChange { + next_authorities: set_b.clone(), + delay: 10, + canon_height: 5, + canon_hash: "hash_b", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + authorities + .add_pending_change(change_a, &static_is_descendent_of(false)) + .unwrap(); + authorities + .add_pending_change(change_b, &static_is_descendent_of(false)) + .unwrap(); + + // there's an effective change triggered at block 15 but not a standard one. + // so this should do nothing. + assert_eq!( + authorities + .enacts_standard_change("hash_c", 15, &static_is_descendent_of(true)) + .unwrap(), + None, + ); + + // throw a standard change into the mix to prove that it's discarded + // for being on the same fork. + // + // NOTE: after https://github.com/paritytech/substrate/issues/1861 + // this should still be rejected based on the "span" rule -- it overlaps + // with another change on the same fork. + let change_c = PendingChange { + next_authorities: set_b.clone(), + delay: 3, + canon_height: 8, + canon_hash: "hash_a8", + delay_kind: DelayKind::Best { + median_last_finalized: 0, + }, + }; + + let is_descendent_of_a = is_descendent_of(|base: &&str, _| base.starts_with("hash_a")); + + assert!(authorities + .add_pending_change(change_c, &is_descendent_of_a) + .is_err()); + + // too early. + assert!(authorities + .apply_forced_changes("hash_a10", 10, &static_is_descendent_of(true), false) + .unwrap() + .is_none()); + + // too late. + assert!(authorities + .apply_forced_changes("hash_a16", 16, &static_is_descendent_of(true), false) + .unwrap() + .is_none()); + + // on time -- chooses the right change. + assert_eq!( + authorities + .apply_forced_changes("hash_a15", 15, &is_descendent_of_a, false) + .unwrap() + .unwrap(), + ( + 42, + AuthoritySet { + current_authorities: set_a, + set_id: 1, + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + } + ), + ); + } } diff --git a/client/finality-grandpa/src/aux_schema.rs b/client/finality-grandpa/src/aux_schema.rs index fe652f52fe..d15d51470c 100644 --- a/client/finality-grandpa/src/aux_schema.rs +++ b/client/finality-grandpa/src/aux_schema.rs @@ -16,21 +16,21 @@ //! Schema for stuff in the aux-db. -use std::fmt::Debug; -use std::sync::Arc; -use parity_scale_codec::{Encode, Decode}; -use sc_client_api::backend::AuxStore; -use sp_blockchain::{Result as ClientResult, Error as ClientError}; -use fork_tree::ForkTree; use finality_grandpa::round::State as RoundState; -use sp_runtime::traits::{Block as BlockT, NumberFor}; +use fork_tree::ForkTree; use log::{info, warn}; -use sp_finality_grandpa::{AuthorityList, SetId, RoundNumber}; +use parity_scale_codec::{Decode, Encode}; +use sc_client_api::backend::AuxStore; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_finality_grandpa::{AuthorityList, RoundNumber, SetId}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use std::fmt::Debug; +use std::sync::Arc; -use crate::authorities::{AuthoritySet, SharedAuthoritySet, PendingChange, DelayKind}; -use crate::consensus_changes::{SharedConsensusChanges, ConsensusChanges}; +use crate::authorities::{AuthoritySet, DelayKind, PendingChange, SharedAuthoritySet}; +use crate::consensus_changes::{ConsensusChanges, SharedConsensusChanges}; use crate::environment::{ - CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, VoterSetState, + CompletedRound, CompletedRounds, CurrentRounds, HasVoted, SharedVoterSetState, VoterSetState, }; use crate::NewAuthoritySet; @@ -46,318 +46,320 @@ const CURRENT_VERSION: u32 = 2; #[derive(Debug, Clone, Encode, Decode)] #[cfg_attr(test, derive(PartialEq))] pub enum V1VoterSetState { - /// The voter set state, currently paused. - Paused(RoundNumber, RoundState), - /// The voter set state, currently live. - Live(RoundNumber, RoundState), + /// The voter set state, currently paused. + Paused(RoundNumber, RoundState), + /// The voter set state, currently live. + Live(RoundNumber, RoundState), } type V0VoterSetState = (RoundNumber, RoundState); #[derive(Debug, Clone, Encode, Decode, PartialEq)] struct V0PendingChange { - next_authorities: AuthorityList, - delay: N, - canon_height: N, - canon_hash: H, + next_authorities: AuthorityList, + delay: N, + canon_height: N, + canon_hash: H, } #[derive(Debug, Clone, Encode, Decode, PartialEq)] struct V0AuthoritySet { - current_authorities: AuthorityList, - set_id: SetId, - pending_changes: Vec>, + current_authorities: AuthorityList, + set_id: SetId, + pending_changes: Vec>, } impl Into> for V0AuthoritySet -where H: Clone + Debug + PartialEq, - N: Clone + Debug + Ord, +where + H: Clone + Debug + PartialEq, + N: Clone + Debug + Ord, { - fn into(self) -> AuthoritySet { - let mut pending_standard_changes = ForkTree::new(); - - for old_change in self.pending_changes { - let new_change = PendingChange { - next_authorities: old_change.next_authorities, - delay: old_change.delay, - canon_height: old_change.canon_height, - canon_hash: old_change.canon_hash, - delay_kind: DelayKind::Finalized, - }; - - if let Err(err) = pending_standard_changes.import::<_, ClientError>( - new_change.canon_hash.clone(), - new_change.canon_height.clone(), - new_change, - // previously we only supported at most one pending change per fork - &|_, _| Ok(false), - ) { - warn!(target: "afg", "Error migrating pending authority set change: {:?}.", err); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - } - } - - AuthoritySet { - current_authorities: self.current_authorities, - set_id: self.set_id, - pending_forced_changes: Vec::new(), - pending_standard_changes - } - } + fn into(self) -> AuthoritySet { + let mut pending_standard_changes = ForkTree::new(); + + for old_change in self.pending_changes { + let new_change = PendingChange { + next_authorities: old_change.next_authorities, + delay: old_change.delay, + canon_height: old_change.canon_height, + canon_hash: old_change.canon_hash, + delay_kind: DelayKind::Finalized, + }; + + if let Err(err) = pending_standard_changes.import::<_, ClientError>( + new_change.canon_hash.clone(), + new_change.canon_height.clone(), + new_change, + // previously we only supported at most one pending change per fork + &|_, _| Ok(false), + ) { + warn!(target: "afg", "Error migrating pending authority set change: {:?}.", err); + warn!(target: "afg", "Node is in a potentially inconsistent state."); + } + } + + AuthoritySet { + current_authorities: self.current_authorities, + set_id: self.set_id, + pending_forced_changes: Vec::new(), + pending_standard_changes, + } + } } -pub(crate) fn load_decode(backend: &B, key: &[u8]) -> ClientResult> { - match backend.get_aux(key)? { - None => Ok(None), - Some(t) => T::decode(&mut &t[..]) - .map_err( - |e| ClientError::Backend(format!("GRANDPA DB is corrupted: {}", e.what())), - ) - .map(Some) - } +pub(crate) fn load_decode( + backend: &B, + key: &[u8], +) -> ClientResult> { + match backend.get_aux(key)? { + None => Ok(None), + Some(t) => T::decode(&mut &t[..]) + .map_err(|e| ClientError::Backend(format!("GRANDPA DB is corrupted: {}", e.what()))) + .map(Some), + } } /// Persistent data kept between runs. pub(crate) struct PersistentData { - pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, - pub(crate) set_state: SharedVoterSetState, + pub(crate) authority_set: SharedAuthoritySet>, + pub(crate) consensus_changes: SharedConsensusChanges>, + pub(crate) set_state: SharedVoterSetState, } fn migrate_from_version0( - backend: &B, - genesis_round: &G, -) -> ClientResult>, - VoterSetState, -)>> where B: AuxStore, - G: Fn() -> RoundState>, + backend: &B, + genesis_round: &G, +) -> ClientResult< + Option<( + AuthoritySet>, + VoterSetState, + )>, +> +where + B: AuxStore, + G: Fn() -> RoundState>, { - CURRENT_VERSION.using_encoded(|s| - backend.insert_aux(&[(VERSION_KEY, s)], &[]) - )?; - - if let Some(old_set) = load_decode::<_, V0AuthoritySet>>( - backend, - AUTHORITY_SET_KEY, - )? { - let new_set: AuthoritySet> = old_set.into(); - backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; - - let (last_round_number, last_round_state) = match load_decode::<_, V0VoterSetState>>( - backend, - SET_STATE_KEY, - )? { - Some((number, state)) => (number, state), - None => (0, genesis_round()), - }; - - let set_id = new_set.current().0; - - let base = last_round_state.prevote_ghost - .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - - let mut current_rounds = CurrentRounds::new(); - current_rounds.insert(last_round_number + 1, HasVoted::No); - - let set_state = VoterSetState::Live { - completed_rounds: CompletedRounds::new( - CompletedRound { - number: last_round_number, - state: last_round_state, - votes: Vec::new(), - base, - }, - set_id, - &new_set, - ), - current_rounds, - }; - - backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - - return Ok(Some((new_set, set_state))); - } - - Ok(None) + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; + + if let Some(old_set) = + load_decode::<_, V0AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { + let new_set: AuthoritySet> = old_set.into(); + backend.insert_aux(&[(AUTHORITY_SET_KEY, new_set.encode().as_slice())], &[])?; + + let (last_round_number, last_round_state) = match load_decode::< + _, + V0VoterSetState>, + >(backend, SET_STATE_KEY)? + { + Some((number, state)) => (number, state), + None => (0, genesis_round()), + }; + + let set_id = new_set.current().0; + + let base = last_round_state.prevote_ghost.expect( + "state is for completed round; completed rounds must have a prevote ghost; qed.", + ); + + let mut current_rounds = CurrentRounds::new(); + current_rounds.insert(last_round_number + 1, HasVoted::No); + + let set_state = VoterSetState::Live { + completed_rounds: CompletedRounds::new( + CompletedRound { + number: last_round_number, + state: last_round_state, + votes: Vec::new(), + base, + }, + set_id, + &new_set, + ), + current_rounds, + }; + + backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; + + return Ok(Some((new_set, set_state))); + } + + Ok(None) } fn migrate_from_version1( - backend: &B, - genesis_round: &G, -) -> ClientResult>, - VoterSetState, -)>> where B: AuxStore, - G: Fn() -> RoundState>, + backend: &B, + genesis_round: &G, +) -> ClientResult< + Option<( + AuthoritySet>, + VoterSetState, + )>, +> +where + B: AuxStore, + G: Fn() -> RoundState>, { - CURRENT_VERSION.using_encoded(|s| - backend.insert_aux(&[(VERSION_KEY, s)], &[]) - )?; - - if let Some(set) = load_decode::<_, AuthoritySet>>( - backend, - AUTHORITY_SET_KEY, - )? { - let set_id = set.current().0; - - let completed_rounds = |number, state, base| CompletedRounds::new( - CompletedRound { - number, - state, - votes: Vec::new(), - base, - }, - set_id, - &set, - ); - - let set_state = match load_decode::<_, V1VoterSetState>>( - backend, - SET_STATE_KEY, - )? { - Some(V1VoterSetState::Paused(last_round_number, set_state)) => { - let base = set_state.prevote_ghost + CURRENT_VERSION.using_encoded(|s| backend.insert_aux(&[(VERSION_KEY, s)], &[]))?; + + if let Some(set) = + load_decode::<_, AuthoritySet>>(backend, AUTHORITY_SET_KEY)? + { + let set_id = set.current().0; + + let completed_rounds = |number, state, base| { + CompletedRounds::new( + CompletedRound { + number, + state, + votes: Vec::new(), + base, + }, + set_id, + &set, + ) + }; + + let set_state = match load_decode::<_, V1VoterSetState>>( + backend, + SET_STATE_KEY, + )? { + Some(V1VoterSetState::Paused(last_round_number, set_state)) => { + let base = set_state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - VoterSetState::Paused { - completed_rounds: completed_rounds(last_round_number, set_state, base), - } - }, - Some(V1VoterSetState::Live(last_round_number, set_state)) => { - let base = set_state.prevote_ghost + VoterSetState::Paused { + completed_rounds: completed_rounds(last_round_number, set_state, base), + } + } + Some(V1VoterSetState::Live(last_round_number, set_state)) => { + let base = set_state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - let mut current_rounds = CurrentRounds::new(); - current_rounds.insert(last_round_number + 1, HasVoted::No); - - VoterSetState::Live { - completed_rounds: completed_rounds(last_round_number, set_state, base), - current_rounds, - } - }, - None => { - let set_state = genesis_round(); - let base = set_state.prevote_ghost + let mut current_rounds = CurrentRounds::new(); + current_rounds.insert(last_round_number + 1, HasVoted::No); + + VoterSetState::Live { + completed_rounds: completed_rounds(last_round_number, set_state, base), + current_rounds, + } + } + None => { + let set_state = genesis_round(); + let base = set_state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - VoterSetState::live( - set_id, - &set, - base, - ) - }, - }; + VoterSetState::live(set_id, &set, base) + } + }; - backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; + backend.insert_aux(&[(SET_STATE_KEY, set_state.encode().as_slice())], &[])?; - return Ok(Some((set, set_state))); - } + return Ok(Some((set, set_state))); + } - Ok(None) + Ok(None) } /// Load or initialize persistent data from backend. pub(crate) fn load_persistent( - backend: &B, - genesis_hash: Block::Hash, - genesis_number: NumberFor, - genesis_authorities: G, -) - -> ClientResult> - where - B: AuxStore, - G: FnOnce() -> ClientResult, + backend: &B, + genesis_hash: Block::Hash, + genesis_number: NumberFor, + genesis_authorities: G, +) -> ClientResult> +where + B: AuxStore, + G: FnOnce() -> ClientResult, { - let version: Option = load_decode(backend, VERSION_KEY)?; - let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? - .unwrap_or_else(ConsensusChanges::>::empty); - - let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); - - match version { - None => { - if let Some((new_set, set_state)) = migrate_from_version0::(backend, &make_genesis_round)? { - return Ok(PersistentData { - authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), - set_state: set_state.into(), - }); - } - }, - Some(1) => { - if let Some((new_set, set_state)) = migrate_from_version1::(backend, &make_genesis_round)? { - return Ok(PersistentData { - authority_set: new_set.into(), - consensus_changes: Arc::new(consensus_changes.into()), - set_state: set_state.into(), - }); - } - }, - Some(2) => { - if let Some(set) = load_decode::<_, AuthoritySet>>( - backend, - AUTHORITY_SET_KEY, - )? { - let set_state = match load_decode::<_, VoterSetState>( - backend, - SET_STATE_KEY, - )? { - Some(state) => state, - None => { - let state = make_genesis_round(); - let base = state.prevote_ghost + let version: Option = load_decode(backend, VERSION_KEY)?; + let consensus_changes = load_decode(backend, CONSENSUS_CHANGES_KEY)? + .unwrap_or_else(ConsensusChanges::>::empty); + + let make_genesis_round = move || RoundState::genesis((genesis_hash, genesis_number)); + + match version { + None => { + if let Some((new_set, set_state)) = + migrate_from_version0::(backend, &make_genesis_round)? + { + return Ok(PersistentData { + authority_set: new_set.into(), + consensus_changes: Arc::new(consensus_changes.into()), + set_state: set_state.into(), + }); + } + } + Some(1) => { + if let Some((new_set, set_state)) = + migrate_from_version1::(backend, &make_genesis_round)? + { + return Ok(PersistentData { + authority_set: new_set.into(), + consensus_changes: Arc::new(consensus_changes.into()), + set_state: set_state.into(), + }); + } + } + Some(2) => { + if let Some(set) = load_decode::<_, AuthoritySet>>( + backend, + AUTHORITY_SET_KEY, + )? { + let set_state = + match load_decode::<_, VoterSetState>(backend, SET_STATE_KEY)? { + Some(state) => state, + None => { + let state = make_genesis_round(); + let base = state.prevote_ghost .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - VoterSetState::live( - set.current().0, - &set, - base, - ) - } - }; - - return Ok(PersistentData { - authority_set: set.into(), - consensus_changes: Arc::new(consensus_changes.into()), - set_state: set_state.into(), - }); - } - } - Some(other) => return Err(ClientError::Backend( - format!("Unsupported GRANDPA DB version: {:?}", other) - ).into()), - } - - // genesis. - info!(target: "afg", "👴 Loading GRANDPA authority set \ + VoterSetState::live(set.current().0, &set, base) + } + }; + + return Ok(PersistentData { + authority_set: set.into(), + consensus_changes: Arc::new(consensus_changes.into()), + set_state: set_state.into(), + }); + } + } + Some(other) => { + return Err(ClientError::Backend(format!( + "Unsupported GRANDPA DB version: {:?}", + other + )) + .into()) + } + } + + // genesis. + info!(target: "afg", "👴 Loading GRANDPA authority set \ from genesis on what appears to be first startup."); - let genesis_authorities = genesis_authorities()?; - let genesis_set = AuthoritySet::genesis(genesis_authorities.clone()); - let state = make_genesis_round(); - let base = state.prevote_ghost - .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); - - let genesis_state = VoterSetState::live( - 0, - &genesis_set, - base, - ); - - backend.insert_aux( - &[ - (AUTHORITY_SET_KEY, genesis_set.encode().as_slice()), - (SET_STATE_KEY, genesis_state.encode().as_slice()), - ], - &[], - )?; - - Ok(PersistentData { - authority_set: genesis_set.into(), - set_state: genesis_state.into(), - consensus_changes: Arc::new(consensus_changes.into()), - }) + let genesis_authorities = genesis_authorities()?; + let genesis_set = AuthoritySet::genesis(genesis_authorities.clone()); + let state = make_genesis_round(); + let base = state + .prevote_ghost + .expect("state is for completed round; completed rounds must have a prevote ghost; qed."); + + let genesis_state = VoterSetState::live(0, &genesis_set, base); + + backend.insert_aux( + &[ + (AUTHORITY_SET_KEY, genesis_set.encode().as_slice()), + (SET_STATE_KEY, genesis_state.encode().as_slice()), + ], + &[], + )?; + + Ok(PersistentData { + authority_set: genesis_set.into(), + set_state: genesis_state.into(), + consensus_changes: Arc::new(consensus_changes.into()), + }) } /// Update the authority set on disk after a change. @@ -366,284 +368,296 @@ pub(crate) fn load_persistent( /// handoff. `set` in all cases should reflect the current authority set, with all /// changes and handoffs applied. pub(crate) fn update_authority_set( - set: &AuthoritySet>, - new_set: Option<&NewAuthoritySet>>, - write_aux: F -) -> R where - F: FnOnce(&[(&'static [u8], &[u8])]) -> R, + set: &AuthoritySet>, + new_set: Option<&NewAuthoritySet>>, + write_aux: F, +) -> R +where + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { - // write new authority set state to disk. - let encoded_set = set.encode(); - - if let Some(new_set) = new_set { - // we also overwrite the "last completed round" entry with a blank slate - // because from the perspective of the finality gadget, the chain has - // reset. - let set_state = VoterSetState::::live( - new_set.set_id, - &set, - (new_set.canon_hash, new_set.canon_number), - ); - let encoded = set_state.encode(); - - write_aux(&[ - (AUTHORITY_SET_KEY, &encoded_set[..]), - (SET_STATE_KEY, &encoded[..]), - ]) - } else { - write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])]) - } + // write new authority set state to disk. + let encoded_set = set.encode(); + + if let Some(new_set) = new_set { + // we also overwrite the "last completed round" entry with a blank slate + // because from the perspective of the finality gadget, the chain has + // reset. + let set_state = VoterSetState::::live( + new_set.set_id, + &set, + (new_set.canon_hash, new_set.canon_number), + ); + let encoded = set_state.encode(); + + write_aux(&[ + (AUTHORITY_SET_KEY, &encoded_set[..]), + (SET_STATE_KEY, &encoded[..]), + ]) + } else { + write_aux(&[(AUTHORITY_SET_KEY, &encoded_set[..])]) + } } /// Write voter set state. pub(crate) fn write_voter_set_state( - backend: &B, - state: &VoterSetState, + backend: &B, + state: &VoterSetState, ) -> ClientResult<()> { - backend.insert_aux( - &[(SET_STATE_KEY, state.encode().as_slice())], - &[] - ) + backend.insert_aux(&[(SET_STATE_KEY, state.encode().as_slice())], &[]) } /// Write concluded round. pub(crate) fn write_concluded_round( - backend: &B, - round_data: &CompletedRound, + backend: &B, + round_data: &CompletedRound, ) -> ClientResult<()> { - let mut key = CONCLUDED_ROUNDS.to_vec(); - let round_number = round_data.number; - round_number.using_encoded(|n| key.extend(n)); + let mut key = CONCLUDED_ROUNDS.to_vec(); + let round_number = round_data.number; + round_number.using_encoded(|n| key.extend(n)); - backend.insert_aux(&[(&key[..], round_data.encode().as_slice())], &[]) + backend.insert_aux(&[(&key[..], round_data.encode().as_slice())], &[]) } /// Update the consensus changes. -pub(crate) fn update_consensus_changes( - set: &ConsensusChanges, - write_aux: F -) -> R where - H: Encode + Clone, - N: Encode + Clone, - F: FnOnce(&[(&'static [u8], &[u8])]) -> R, +pub(crate) fn update_consensus_changes(set: &ConsensusChanges, write_aux: F) -> R +where + H: Encode + Clone, + N: Encode + Clone, + F: FnOnce(&[(&'static [u8], &[u8])]) -> R, { - write_aux(&[(CONSENSUS_CHANGES_KEY, set.encode().as_slice())]) + write_aux(&[(CONSENSUS_CHANGES_KEY, set.encode().as_slice())]) } #[cfg(test)] -pub(crate) fn load_authorities(backend: &B) - -> Option> { - load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY) - .expect("backend error") +pub(crate) fn load_authorities( + backend: &B, +) -> Option> { + load_decode::<_, AuthoritySet>(backend, AUTHORITY_SET_KEY).expect("backend error") } #[cfg(test)] mod test { - use sp_finality_grandpa::AuthorityId; - use sp_core::H256; - use substrate_test_runtime_client; - use super::*; - - #[test] - fn load_decode_from_v0_migrates_data_format() { - let client = substrate_test_runtime_client::new(); - - let authorities = vec![(AuthorityId::default(), 100)]; - let set_id = 3; - let round_number: RoundNumber = 42; - let round_state = RoundState:: { - prevote_ghost: Some((H256::random(), 32)), - finalized: None, - estimate: None, - completable: false, - }; - - { - let authority_set = V0AuthoritySet:: { - current_authorities: authorities.clone(), - pending_changes: Vec::new(), - set_id, - }; - - let voter_set_state = (round_number, round_state.clone()); - - client.insert_aux( - &[ - (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), - (SET_STATE_KEY, voter_set_state.encode().as_slice()), - ], - &[], - ).unwrap(); - } - - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - None, - ); - - // should perform the migration - load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); - - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(2), - ); - - let PersistentData { authority_set, set_state, .. } = load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); - - assert_eq!( - *authority_set.inner().read(), - AuthoritySet { - current_authorities: authorities.clone(), - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - set_id, - }, - ); - - let mut current_rounds = CurrentRounds::new(); - current_rounds.insert(round_number + 1, HasVoted::No); - - assert_eq!( - &*set_state.read(), - &VoterSetState::Live { - completed_rounds: CompletedRounds::new( - CompletedRound { - number: round_number, - state: round_state.clone(), - base: round_state.prevote_ghost.unwrap(), - votes: vec![], - }, - set_id, - &*authority_set.inner().read(), - ), - current_rounds, - }, - ); - } - - #[test] - fn load_decode_from_v1_migrates_data_format() { - let client = substrate_test_runtime_client::new(); - - let authorities = vec![(AuthorityId::default(), 100)]; - let set_id = 3; - let round_number: RoundNumber = 42; - let round_state = RoundState:: { - prevote_ghost: Some((H256::random(), 32)), - finalized: None, - estimate: None, - completable: false, - }; - - { - let authority_set = AuthoritySet:: { - current_authorities: authorities.clone(), - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - set_id, - }; - - let voter_set_state = V1VoterSetState::Live(round_number, round_state.clone()); - - client.insert_aux( - &[ - (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), - (SET_STATE_KEY, voter_set_state.encode().as_slice()), - (VERSION_KEY, 1u32.encode().as_slice()), - ], - &[], - ).unwrap(); - } - - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(1), - ); - - // should perform the migration - load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); - - assert_eq!( - load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), - Some(2), - ); - - let PersistentData { authority_set, set_state, .. } = load_persistent::( - &client, - H256::random(), - 0, - || unreachable!(), - ).unwrap(); - - assert_eq!( - *authority_set.inner().read(), - AuthoritySet { - current_authorities: authorities.clone(), - pending_standard_changes: ForkTree::new(), - pending_forced_changes: Vec::new(), - set_id, - }, - ); - - let mut current_rounds = CurrentRounds::new(); - current_rounds.insert(round_number + 1, HasVoted::No); - - assert_eq!( - &*set_state.read(), - &VoterSetState::Live { - completed_rounds: CompletedRounds::new( - CompletedRound { - number: round_number, - state: round_state.clone(), - base: round_state.prevote_ghost.unwrap(), - votes: vec![], - }, - set_id, - &*authority_set.inner().read(), - ), - current_rounds, - }, - ); - } - - #[test] - fn write_read_concluded_rounds() { - let client = substrate_test_runtime_client::new(); - let hash = H256::random(); - let round_state = RoundState::genesis((hash, 0)); - - let completed_round = CompletedRound:: { - number: 42, - state: round_state.clone(), - base: round_state.prevote_ghost.unwrap(), - votes: vec![], - }; - - assert!(write_concluded_round(&client, &completed_round).is_ok()); - - let round_number = completed_round.number; - let mut key = CONCLUDED_ROUNDS.to_vec(); - round_number.using_encoded(|n| key.extend(n)); - - assert_eq!( - load_decode::<_, CompletedRound::>(&client, &key).unwrap(), - Some(completed_round), - ); - } + use super::*; + use sp_core::H256; + use sp_finality_grandpa::AuthorityId; + use substrate_test_runtime_client; + + #[test] + fn load_decode_from_v0_migrates_data_format() { + let client = substrate_test_runtime_client::new(); + + let authorities = vec![(AuthorityId::default(), 100)]; + let set_id = 3; + let round_number: RoundNumber = 42; + let round_state = RoundState:: { + prevote_ghost: Some((H256::random(), 32)), + finalized: None, + estimate: None, + completable: false, + }; + + { + let authority_set = V0AuthoritySet:: { + current_authorities: authorities.clone(), + pending_changes: Vec::new(), + set_id, + }; + + let voter_set_state = (round_number, round_state.clone()); + + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + ], + &[], + ) + .unwrap(); + } + + assert_eq!(load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), None,); + + // should perform the migration + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!( + load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), + Some(2), + ); + + let PersistentData { + authority_set, + set_state, + .. + } = load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!( + *authority_set.inner().read(), + AuthoritySet { + current_authorities: authorities.clone(), + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + set_id, + }, + ); + + let mut current_rounds = CurrentRounds::new(); + current_rounds.insert(round_number + 1, HasVoted::No); + + assert_eq!( + &*set_state.read(), + &VoterSetState::Live { + completed_rounds: CompletedRounds::new( + CompletedRound { + number: round_number, + state: round_state.clone(), + base: round_state.prevote_ghost.unwrap(), + votes: vec![], + }, + set_id, + &*authority_set.inner().read(), + ), + current_rounds, + }, + ); + } + + #[test] + fn load_decode_from_v1_migrates_data_format() { + let client = substrate_test_runtime_client::new(); + + let authorities = vec![(AuthorityId::default(), 100)]; + let set_id = 3; + let round_number: RoundNumber = 42; + let round_state = RoundState:: { + prevote_ghost: Some((H256::random(), 32)), + finalized: None, + estimate: None, + completable: false, + }; + + { + let authority_set = AuthoritySet:: { + current_authorities: authorities.clone(), + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + set_id, + }; + + let voter_set_state = V1VoterSetState::Live(round_number, round_state.clone()); + + client + .insert_aux( + &[ + (AUTHORITY_SET_KEY, authority_set.encode().as_slice()), + (SET_STATE_KEY, voter_set_state.encode().as_slice()), + (VERSION_KEY, 1u32.encode().as_slice()), + ], + &[], + ) + .unwrap(); + } + + assert_eq!( + load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), + Some(1), + ); + + // should perform the migration + load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!( + load_decode::<_, u32>(&client, VERSION_KEY).unwrap(), + Some(2), + ); + + let PersistentData { + authority_set, + set_state, + .. + } = load_persistent::( + &client, + H256::random(), + 0, + || unreachable!(), + ) + .unwrap(); + + assert_eq!( + *authority_set.inner().read(), + AuthoritySet { + current_authorities: authorities.clone(), + pending_standard_changes: ForkTree::new(), + pending_forced_changes: Vec::new(), + set_id, + }, + ); + + let mut current_rounds = CurrentRounds::new(); + current_rounds.insert(round_number + 1, HasVoted::No); + + assert_eq!( + &*set_state.read(), + &VoterSetState::Live { + completed_rounds: CompletedRounds::new( + CompletedRound { + number: round_number, + state: round_state.clone(), + base: round_state.prevote_ghost.unwrap(), + votes: vec![], + }, + set_id, + &*authority_set.inner().read(), + ), + current_rounds, + }, + ); + } + + #[test] + fn write_read_concluded_rounds() { + let client = substrate_test_runtime_client::new(); + let hash = H256::random(); + let round_state = RoundState::genesis((hash, 0)); + + let completed_round = CompletedRound:: { + number: 42, + state: round_state.clone(), + base: round_state.prevote_ghost.unwrap(), + votes: vec![], + }; + + assert!(write_concluded_round(&client, &completed_round).is_ok()); + + let round_number = completed_round.number; + let mut key = CONCLUDED_ROUNDS.to_vec(); + round_number.using_encoded(|n| key.extend(n)); + + assert_eq!( + load_decode::<_, CompletedRound::>( + &client, &key + ) + .unwrap(), + Some(completed_round), + ); + } } diff --git a/client/finality-grandpa/src/communication/gossip.rs b/client/finality-grandpa/src/communication/gossip.rs index 2d39ed7ec4..9f5dcc3071 100644 --- a/client/finality-grandpa/src/communication/gossip.rs +++ b/client/finality-grandpa/src/communication/gossip.rs @@ -82,22 +82,22 @@ //! //! We only send polite messages to peers, -use sp_runtime::traits::{NumberFor, Block as BlockT, Zero}; -use sc_network_gossip::{MessageIntent, ValidatorContext}; +use parity_scale_codec::{Decode, Encode}; use sc_network::{ObservedRole, PeerId, ReputationChange}; -use parity_scale_codec::{Encode, Decode}; +use sc_network_gossip::{MessageIntent, ValidatorContext}; use sp_finality_grandpa::AuthorityId; +use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; +use log::{debug, trace}; +use prometheus_endpoint::{register, CounterVec, Opts, PrometheusError, Registry, U64}; +use rand::seq::SliceRandom; use sc_telemetry::{telemetry, CONSENSUS_DEBUG}; -use log::{trace, debug}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -use prometheus_endpoint::{CounterVec, Opts, PrometheusError, register, Registry, U64}; -use rand::seq::SliceRandom; +use super::{benefit, cost, Round, SetId}; use crate::{environment, CatchUp, CompactCommit, SignedMessage}; -use super::{cost, benefit, Round, SetId}; -use std::collections::{HashMap, VecDeque, HashSet}; +use std::collections::{HashMap, HashSet, VecDeque}; use std::time::{Duration, Instant}; const REBROADCAST_AFTER: Duration = Duration::from_secs(60 * 5); @@ -119,99 +119,113 @@ type Report = (PeerId, ReputationChange); /// An outcome of examining a message. #[derive(Debug, PartialEq, Clone, Copy)] enum Consider { - /// Accept the message. - Accept, - /// Message is too early. Reject. - RejectPast, - /// Message is from the future. Reject. - RejectFuture, - /// Message cannot be evaluated. Reject. - RejectOutOfScope, + /// Accept the message. + Accept, + /// Message is too early. Reject. + RejectPast, + /// Message is from the future. Reject. + RejectFuture, + /// Message cannot be evaluated. Reject. + RejectOutOfScope, } /// A view of protocol state. #[derive(Debug)] struct View { - round: Round, // the current round we are at. - set_id: SetId, // the current voter set id. - last_commit: Option, // commit-finalized block height, if any. + round: Round, // the current round we are at. + set_id: SetId, // the current voter set id. + last_commit: Option, // commit-finalized block height, if any. } impl Default for View { - fn default() -> Self { - View { - round: Round(1), - set_id: SetId(0), - last_commit: None, - } - } + fn default() -> Self { + View { + round: Round(1), + set_id: SetId(0), + last_commit: None, + } + } } impl View { - /// Consider a round and set ID combination under a current view. - fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { - // only from current set - if set_id < self.set_id { return Consider::RejectPast } - if set_id > self.set_id { return Consider::RejectFuture } - - // only r-1 ... r+1 - if round.0 > self.round.0.saturating_add(1) { return Consider::RejectFuture } - if round.0 < self.round.0.saturating_sub(1) { return Consider::RejectPast } - - Consider::Accept - } - - /// Consider a set-id global message. Rounds are not taken into account, but are implicitly - /// because we gate on finalization of a further block than a previous commit. - fn consider_global(&self, set_id: SetId, number: N) -> Consider { - // only from current set - if set_id < self.set_id { return Consider::RejectPast } - if set_id > self.set_id { return Consider::RejectFuture } - - // only commits which claim to prove a higher block number than - // the one we're aware of. - match self.last_commit { - None => Consider::Accept, - Some(ref num) => if num < &number { - Consider::Accept - } else { - Consider::RejectPast - } - } - } + /// Consider a round and set ID combination under a current view. + fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { + // only from current set + if set_id < self.set_id { + return Consider::RejectPast; + } + if set_id > self.set_id { + return Consider::RejectFuture; + } + + // only r-1 ... r+1 + if round.0 > self.round.0.saturating_add(1) { + return Consider::RejectFuture; + } + if round.0 < self.round.0.saturating_sub(1) { + return Consider::RejectPast; + } + + Consider::Accept + } + + /// Consider a set-id global message. Rounds are not taken into account, but are implicitly + /// because we gate on finalization of a further block than a previous commit. + fn consider_global(&self, set_id: SetId, number: N) -> Consider { + // only from current set + if set_id < self.set_id { + return Consider::RejectPast; + } + if set_id > self.set_id { + return Consider::RejectFuture; + } + + // only commits which claim to prove a higher block number than + // the one we're aware of. + match self.last_commit { + None => Consider::Accept, + Some(ref num) => { + if num < &number { + Consider::Accept + } else { + Consider::RejectPast + } + } + } + } } /// A local view of protocol state. Only differs from `View` in that we also /// track the round and set id at which the last commit was observed. struct LocalView { - round: Round, - set_id: SetId, - last_commit: Option<(N, Round, SetId)>, + round: Round, + set_id: SetId, + last_commit: Option<(N, Round, SetId)>, } impl LocalView { - /// Converts the local view to a `View` discarding round and set id - /// information about the last commit. - fn as_view(&self) -> View<&N> { - View { - round: self.round, - set_id: self.set_id, - last_commit: self.last_commit_height(), - } - } - - /// Update the set ID. implies a reset to round 1. - fn update_set(&mut self, set_id: SetId) { - if set_id != self.set_id { - self.set_id = set_id; - self.round = Round(1); - } - } - - /// Returns the height of the block that the last observed commit finalizes. - fn last_commit_height(&self) -> Option<&N> { - self.last_commit.as_ref().map(|(number, _, _)| number) - } + /// Converts the local view to a `View` discarding round and set id + /// information about the last commit. + fn as_view(&self) -> View<&N> { + View { + round: self.round, + set_id: self.set_id, + last_commit: self.last_commit_height(), + } + } + + /// Update the set ID. implies a reset to round 1. + fn update_set(&mut self, set_id: SetId) { + if set_id != self.set_id { + self.set_id = set_id; + self.round = Round(1); + } + } + + /// Returns the height of the block that the last observed commit finalizes. + fn last_commit_height(&self) -> Option<&N> { + self.last_commit.as_ref().map(|(number, _, _)| number) + } } const KEEP_RECENT_ROUNDS: usize = 3; @@ -224,159 +238,160 @@ const KEEP_RECENT_ROUNDS: usize = 3; /// /// - and a global topic for commit and catch-up messages. struct KeepTopics { - current_set: SetId, - rounds: VecDeque<(Round, SetId)>, - reverse_map: HashMap, SetId)> + current_set: SetId, + rounds: VecDeque<(Round, SetId)>, + reverse_map: HashMap, SetId)>, } impl KeepTopics { - fn new() -> Self { - KeepTopics { - current_set: SetId(0), - rounds: VecDeque::with_capacity(KEEP_RECENT_ROUNDS + 2), - reverse_map: HashMap::new(), - } - } - - fn push(&mut self, round: Round, set_id: SetId) { - self.current_set = std::cmp::max(self.current_set, set_id); - - // under normal operation the given round is already tracked (since we - // track one round ahead). if we skip rounds (with a catch up) the given - // round topic might not be tracked yet. - if !self.rounds.contains(&(round, set_id)) { - self.rounds.push_back((round, set_id)); - } - - // we also accept messages for the next round - self.rounds.push_back((Round(round.0.saturating_add(1)), set_id)); - - // the 2 is for the current and next round. - while self.rounds.len() > KEEP_RECENT_ROUNDS + 2 { - let _ = self.rounds.pop_front(); - } - - let mut map = HashMap::with_capacity(KEEP_RECENT_ROUNDS + 3); - map.insert(super::global_topic::(self.current_set.0), (None, self.current_set)); - - for &(round, set) in &self.rounds { - map.insert( - super::round_topic::(round.0, set.0), - (Some(round), set) - ); - } - - self.reverse_map = map; - } - - fn topic_info(&self, topic: &B::Hash) -> Option<(Option, SetId)> { - self.reverse_map.get(topic).cloned() - } + fn new() -> Self { + KeepTopics { + current_set: SetId(0), + rounds: VecDeque::with_capacity(KEEP_RECENT_ROUNDS + 2), + reverse_map: HashMap::new(), + } + } + + fn push(&mut self, round: Round, set_id: SetId) { + self.current_set = std::cmp::max(self.current_set, set_id); + + // under normal operation the given round is already tracked (since we + // track one round ahead). if we skip rounds (with a catch up) the given + // round topic might not be tracked yet. + if !self.rounds.contains(&(round, set_id)) { + self.rounds.push_back((round, set_id)); + } + + // we also accept messages for the next round + self.rounds + .push_back((Round(round.0.saturating_add(1)), set_id)); + + // the 2 is for the current and next round. + while self.rounds.len() > KEEP_RECENT_ROUNDS + 2 { + let _ = self.rounds.pop_front(); + } + + let mut map = HashMap::with_capacity(KEEP_RECENT_ROUNDS + 3); + map.insert( + super::global_topic::(self.current_set.0), + (None, self.current_set), + ); + + for &(round, set) in &self.rounds { + map.insert(super::round_topic::(round.0, set.0), (Some(round), set)); + } + + self.reverse_map = map; + } + + fn topic_info(&self, topic: &B::Hash) -> Option<(Option, SetId)> { + self.reverse_map.get(topic).cloned() + } } // topics to send to a neighbor based on their view. fn neighbor_topics(view: &View>) -> Vec { - let s = view.set_id; - let mut topics = vec![ - super::global_topic::(s.0), - super::round_topic::(view.round.0, s.0), - ]; - - if view.round.0 != 0 { - let r = Round(view.round.0 - 1); - topics.push(super::round_topic::(r.0, s.0)) - } - - topics + let s = view.set_id; + let mut topics = vec![ + super::global_topic::(s.0), + super::round_topic::(view.round.0, s.0), + ]; + + if view.round.0 != 0 { + let r = Round(view.round.0 - 1); + topics.push(super::round_topic::(r.0, s.0)) + } + + topics } /// Grandpa gossip message type. /// This is the root type that gets encoded and sent on the network. #[derive(Debug, Encode, Decode)] pub(super) enum GossipMessage { - /// Grandpa message with round and set info. - Vote(VoteMessage), - /// Grandpa commit message with round and set info. - Commit(FullCommitMessage), - /// A neighbor packet. Not repropagated. - Neighbor(VersionedNeighborPacket>), - /// Grandpa catch up request message with round and set info. Not repropagated. - CatchUpRequest(CatchUpRequestMessage), - /// Grandpa catch up message with round and set info. Not repropagated. - CatchUp(FullCatchUpMessage), + /// Grandpa message with round and set info. + Vote(VoteMessage), + /// Grandpa commit message with round and set info. + Commit(FullCommitMessage), + /// A neighbor packet. Not repropagated. + Neighbor(VersionedNeighborPacket>), + /// Grandpa catch up request message with round and set info. Not repropagated. + CatchUpRequest(CatchUpRequestMessage), + /// Grandpa catch up message with round and set info. Not repropagated. + CatchUp(FullCatchUpMessage), } impl From>> for GossipMessage { - fn from(neighbor: NeighborPacket>) -> Self { - GossipMessage::Neighbor(VersionedNeighborPacket::V1(neighbor)) - } + fn from(neighbor: NeighborPacket>) -> Self { + GossipMessage::Neighbor(VersionedNeighborPacket::V1(neighbor)) + } } /// Network level vote message with topic information. #[derive(Debug, Encode, Decode)] pub(super) struct VoteMessage { - /// The round this message is from. - pub(super) round: Round, - /// The voter set ID this message is from. - pub(super) set_id: SetId, - /// The message itself. - pub(super) message: SignedMessage, + /// The round this message is from. + pub(super) round: Round, + /// The voter set ID this message is from. + pub(super) set_id: SetId, + /// The message itself. + pub(super) message: SignedMessage, } /// Network level commit message with topic information. #[derive(Debug, Encode, Decode)] pub(super) struct FullCommitMessage { - /// The round this message is from. - pub(super) round: Round, - /// The voter set ID this message is from. - pub(super) set_id: SetId, - /// The compact commit message. - pub(super) message: CompactCommit, + /// The round this message is from. + pub(super) round: Round, + /// The voter set ID this message is from. + pub(super) set_id: SetId, + /// The compact commit message. + pub(super) message: CompactCommit, } /// V1 neighbor packet. Neighbor packets are sent from nodes to their peers /// and are not repropagated. These contain information about the node's state. #[derive(Debug, Encode, Decode, Clone)] pub(super) struct NeighborPacket { - /// The round the node is currently at. - pub(super) round: Round, - /// The set ID the node is currently at. - pub(super) set_id: SetId, - /// The highest finalizing commit observed. - pub(super) commit_finalized_height: N, + /// The round the node is currently at. + pub(super) round: Round, + /// The set ID the node is currently at. + pub(super) set_id: SetId, + /// The highest finalizing commit observed. + pub(super) commit_finalized_height: N, } /// A versioned neighbor packet. #[derive(Debug, Encode, Decode)] pub(super) enum VersionedNeighborPacket { - #[codec(index = "1")] - V1(NeighborPacket), + #[codec(index = "1")] + V1(NeighborPacket), } impl VersionedNeighborPacket { - fn into_neighbor_packet(self) -> NeighborPacket { - match self { - VersionedNeighborPacket::V1(p) => p, - } - } + fn into_neighbor_packet(self) -> NeighborPacket { + match self { + VersionedNeighborPacket::V1(p) => p, + } + } } /// A catch up request for a given round (or any further round) localized by set id. #[derive(Clone, Debug, Encode, Decode)] pub(super) struct CatchUpRequestMessage { - /// The round that we want to catch up to. - pub(super) round: Round, - /// The voter set ID this message is from. - pub(super) set_id: SetId, + /// The round that we want to catch up to. + pub(super) round: Round, + /// The voter set ID this message is from. + pub(super) set_id: SetId, } /// Network level catch up message with topic information. #[derive(Debug, Encode, Decode)] pub(super) struct FullCatchUpMessage { - /// The voter set ID this message is from. - pub(super) set_id: SetId, - /// The compact commit message. - pub(super) message: CatchUp, + /// The voter set ID this message is from. + pub(super) set_id: SetId, + /// The compact commit message. + pub(super) message: CatchUp, } /// Misbehavior that peers can perform. @@ -385,2224 +400,2315 @@ pub(super) struct FullCatchUpMessage { /// peer. #[derive(Clone, Copy, Debug, PartialEq)] pub(super) enum Misbehavior { - // invalid neighbor message, considering the last one. - InvalidViewChange, - // could not decode neighbor message. bytes-length of the packet. - UndecodablePacket(i32), - // Bad catch up message (invalid signatures). - BadCatchUpMessage { - signatures_checked: i32, - }, - // Bad commit message - BadCommitMessage { - signatures_checked: i32, - blocks_loaded: i32, - equivocations_caught: i32, - }, - // A message received that's from the future relative to our view. - // always misbehavior. - FutureMessage, - // A message received that cannot be evaluated relative to our view. - // This happens before we have a view and have sent out neighbor packets. - // always misbehavior. - OutOfScopeMessage, + // invalid neighbor message, considering the last one. + InvalidViewChange, + // could not decode neighbor message. bytes-length of the packet. + UndecodablePacket(i32), + // Bad catch up message (invalid signatures). + BadCatchUpMessage { + signatures_checked: i32, + }, + // Bad commit message + BadCommitMessage { + signatures_checked: i32, + blocks_loaded: i32, + equivocations_caught: i32, + }, + // A message received that's from the future relative to our view. + // always misbehavior. + FutureMessage, + // A message received that cannot be evaluated relative to our view. + // This happens before we have a view and have sent out neighbor packets. + // always misbehavior. + OutOfScopeMessage, } impl Misbehavior { - pub(super) fn cost(&self) -> ReputationChange { - use Misbehavior::*; - - match *self { - InvalidViewChange => cost::INVALID_VIEW_CHANGE, - UndecodablePacket(bytes) => ReputationChange::new( - bytes.saturating_mul(cost::PER_UNDECODABLE_BYTE), - "Grandpa: Bad packet", - ), - BadCatchUpMessage { signatures_checked } => ReputationChange::new( - cost::PER_SIGNATURE_CHECKED.saturating_mul(signatures_checked), - "Grandpa: Bad cath-up message", - ), - BadCommitMessage { signatures_checked, blocks_loaded, equivocations_caught } => { - let cost = cost::PER_SIGNATURE_CHECKED - .saturating_mul(signatures_checked) - .saturating_add(cost::PER_BLOCK_LOADED.saturating_mul(blocks_loaded)); - - let benefit = equivocations_caught.saturating_mul(benefit::PER_EQUIVOCATION); - - ReputationChange::new((benefit as i32).saturating_add(cost as i32), "Grandpa: Bad commit") - }, - FutureMessage => cost::FUTURE_MESSAGE, - OutOfScopeMessage => cost::OUT_OF_SCOPE_MESSAGE, - } - } + pub(super) fn cost(&self) -> ReputationChange { + use Misbehavior::*; + + match *self { + InvalidViewChange => cost::INVALID_VIEW_CHANGE, + UndecodablePacket(bytes) => ReputationChange::new( + bytes.saturating_mul(cost::PER_UNDECODABLE_BYTE), + "Grandpa: Bad packet", + ), + BadCatchUpMessage { signatures_checked } => ReputationChange::new( + cost::PER_SIGNATURE_CHECKED.saturating_mul(signatures_checked), + "Grandpa: Bad cath-up message", + ), + BadCommitMessage { + signatures_checked, + blocks_loaded, + equivocations_caught, + } => { + let cost = cost::PER_SIGNATURE_CHECKED + .saturating_mul(signatures_checked) + .saturating_add(cost::PER_BLOCK_LOADED.saturating_mul(blocks_loaded)); + + let benefit = equivocations_caught.saturating_mul(benefit::PER_EQUIVOCATION); + + ReputationChange::new( + (benefit as i32).saturating_add(cost as i32), + "Grandpa: Bad commit", + ) + } + FutureMessage => cost::FUTURE_MESSAGE, + OutOfScopeMessage => cost::OUT_OF_SCOPE_MESSAGE, + } + } } struct PeerInfo { - view: View, - roles: ObservedRole, + view: View, + roles: ObservedRole, } impl PeerInfo { - fn new(roles: ObservedRole) -> Self { - PeerInfo { - view: View::default(), - roles, - } - } + fn new(roles: ObservedRole) -> Self { + PeerInfo { + view: View::default(), + roles, + } + } } /// The peers we're connected do in gossip. struct Peers { - inner: HashMap>, - lucky_peers: HashSet, - lucky_authorities: HashSet, + inner: HashMap>, + lucky_peers: HashSet, + lucky_authorities: HashSet, } impl Default for Peers { - fn default() -> Self { - Peers { - inner: HashMap::new(), - lucky_peers: HashSet::new(), - lucky_authorities: HashSet::new(), - } - } + fn default() -> Self { + Peers { + inner: HashMap::new(), + lucky_peers: HashSet::new(), + lucky_authorities: HashSet::new(), + } + } } impl Peers { - fn new_peer(&mut self, who: PeerId, role: ObservedRole) { - match role { - ObservedRole::Authority if self.lucky_authorities.len() < MIN_LUCKY => { - self.lucky_authorities.insert(who.clone()); - }, - ObservedRole::Full | ObservedRole::Light if self.lucky_peers.len() < MIN_LUCKY => { - self.lucky_peers.insert(who.clone()); - }, - _ => {} - } - self.inner.insert(who, PeerInfo::new(role)); - } - - fn peer_disconnected(&mut self, who: &PeerId) { - self.inner.remove(who); - // This does not happen often enough compared to round duration, - // so we don't reshuffle. - self.lucky_peers.remove(who); - self.lucky_authorities.remove(who); - } - - // returns a reference to the new view, if the peer is known. - fn update_peer_state(&mut self, who: &PeerId, update: NeighborPacket) - -> Result>, Misbehavior> - { - let peer = match self.inner.get_mut(who) { - None => return Ok(None), - Some(p) => p, - }; - - let invalid_change = peer.view.set_id > update.set_id - || peer.view.round > update.round && peer.view.set_id == update.set_id - || peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); - - if invalid_change { - return Err(Misbehavior::InvalidViewChange); - } - - peer.view = View { - round: update.round, - set_id: update.set_id, - last_commit: Some(update.commit_finalized_height), - }; - - trace!(target: "afg", "Peer {} updated view. Now at {:?}, {:?}", + fn new_peer(&mut self, who: PeerId, role: ObservedRole) { + match role { + ObservedRole::Authority if self.lucky_authorities.len() < MIN_LUCKY => { + self.lucky_authorities.insert(who.clone()); + } + ObservedRole::Full | ObservedRole::Light if self.lucky_peers.len() < MIN_LUCKY => { + self.lucky_peers.insert(who.clone()); + } + _ => {} + } + self.inner.insert(who, PeerInfo::new(role)); + } + + fn peer_disconnected(&mut self, who: &PeerId) { + self.inner.remove(who); + // This does not happen often enough compared to round duration, + // so we don't reshuffle. + self.lucky_peers.remove(who); + self.lucky_authorities.remove(who); + } + + // returns a reference to the new view, if the peer is known. + fn update_peer_state( + &mut self, + who: &PeerId, + update: NeighborPacket, + ) -> Result>, Misbehavior> { + let peer = match self.inner.get_mut(who) { + None => return Ok(None), + Some(p) => p, + }; + + let invalid_change = peer.view.set_id > update.set_id + || peer.view.round > update.round && peer.view.set_id == update.set_id + || peer.view.last_commit.as_ref() > Some(&update.commit_finalized_height); + + if invalid_change { + return Err(Misbehavior::InvalidViewChange); + } + + peer.view = View { + round: update.round, + set_id: update.set_id, + last_commit: Some(update.commit_finalized_height), + }; + + trace!(target: "afg", "Peer {} updated view. Now at {:?}, {:?}", who, peer.view.round, peer.view.set_id); - Ok(Some(&peer.view)) - } - - fn update_commit_height(&mut self, who: &PeerId, new_height: N) -> Result<(), Misbehavior> { - let peer = match self.inner.get_mut(who) { - None => return Ok(()), - Some(p) => p, - }; - - // this doesn't allow a peer to send us unlimited commits with the - // same height, because there is still a misbehavior condition based on - // sending commits that are <= the best we are aware of. - if peer.view.last_commit.as_ref() > Some(&new_height) { - return Err(Misbehavior::InvalidViewChange); - } - - peer.view.last_commit = Some(new_height); - - Ok(()) - } - - fn peer<'a>(&'a self, who: &PeerId) -> Option<&'a PeerInfo> { - self.inner.get(who) - } - - fn authorities(&self) -> usize { - // Note that our sentry and our validator are neither authorities nor non-authorities. - self.inner.iter().filter(|(_, info)| matches!(info.roles, ObservedRole::Authority)).count() - } - - fn non_authorities(&self) -> usize { - // Note that our sentry and our validator are neither authorities nor non-authorities. - self.inner - .iter() - .filter(|(_, info)| matches!(info.roles, ObservedRole::Full | ObservedRole::Light)) - .count() - } - - fn reshuffle(&mut self) { - let mut lucky_peers: Vec<_> = self.inner - .iter() - .filter_map(|(id, info)| - if matches!(info.roles, ObservedRole::Full | ObservedRole::Light) { Some(id.clone()) } else { None }) - .collect(); - let mut lucky_authorities: Vec<_> = self.inner - .iter() - .filter_map(|(id, info)| - if matches!(info.roles, ObservedRole::Authority) { Some(id.clone()) } else { None }) - .collect(); - - let num_non_authorities = ((lucky_peers.len() as f32).sqrt() as usize) - .max(MIN_LUCKY) - .min(lucky_peers.len()); - - let num_authorities = ((lucky_authorities.len() as f32).sqrt() as usize) - .max(MIN_LUCKY) - .min(lucky_authorities.len()); - - lucky_peers.partial_shuffle(&mut rand::thread_rng(), num_non_authorities); - lucky_peers.truncate(num_non_authorities); - - lucky_authorities.partial_shuffle(&mut rand::thread_rng(), num_authorities); - lucky_authorities.truncate(num_authorities); - - self.lucky_peers.clear(); - self.lucky_peers.extend(lucky_peers.into_iter()); - - self.lucky_authorities.clear(); - self.lucky_authorities.extend(lucky_authorities.into_iter()); - } + Ok(Some(&peer.view)) + } + + fn update_commit_height(&mut self, who: &PeerId, new_height: N) -> Result<(), Misbehavior> { + let peer = match self.inner.get_mut(who) { + None => return Ok(()), + Some(p) => p, + }; + + // this doesn't allow a peer to send us unlimited commits with the + // same height, because there is still a misbehavior condition based on + // sending commits that are <= the best we are aware of. + if peer.view.last_commit.as_ref() > Some(&new_height) { + return Err(Misbehavior::InvalidViewChange); + } + + peer.view.last_commit = Some(new_height); + + Ok(()) + } + + fn peer<'a>(&'a self, who: &PeerId) -> Option<&'a PeerInfo> { + self.inner.get(who) + } + + fn authorities(&self) -> usize { + // Note that our sentry and our validator are neither authorities nor non-authorities. + self.inner + .iter() + .filter(|(_, info)| matches!(info.roles, ObservedRole::Authority)) + .count() + } + + fn non_authorities(&self) -> usize { + // Note that our sentry and our validator are neither authorities nor non-authorities. + self.inner + .iter() + .filter(|(_, info)| matches!(info.roles, ObservedRole::Full | ObservedRole::Light)) + .count() + } + + fn reshuffle(&mut self) { + let mut lucky_peers: Vec<_> = self + .inner + .iter() + .filter_map(|(id, info)| { + if matches!(info.roles, ObservedRole::Full | ObservedRole::Light) { + Some(id.clone()) + } else { + None + } + }) + .collect(); + let mut lucky_authorities: Vec<_> = self + .inner + .iter() + .filter_map(|(id, info)| { + if matches!(info.roles, ObservedRole::Authority) { + Some(id.clone()) + } else { + None + } + }) + .collect(); + + let num_non_authorities = ((lucky_peers.len() as f32).sqrt() as usize) + .max(MIN_LUCKY) + .min(lucky_peers.len()); + + let num_authorities = ((lucky_authorities.len() as f32).sqrt() as usize) + .max(MIN_LUCKY) + .min(lucky_authorities.len()); + + lucky_peers.partial_shuffle(&mut rand::thread_rng(), num_non_authorities); + lucky_peers.truncate(num_non_authorities); + + lucky_authorities.partial_shuffle(&mut rand::thread_rng(), num_authorities); + lucky_authorities.truncate(num_authorities); + + self.lucky_peers.clear(); + self.lucky_peers.extend(lucky_peers.into_iter()); + + self.lucky_authorities.clear(); + self.lucky_authorities.extend(lucky_authorities.into_iter()); + } } #[derive(Debug, PartialEq)] -pub(super) enum Action { - // repropagate under given topic, to the given peers, applying cost/benefit to originator. - Keep(H, ReputationChange), - // discard and process. - ProcessAndDiscard(H, ReputationChange), - // discard, applying cost/benefit to originator. - Discard(ReputationChange), +pub(super) enum Action { + // repropagate under given topic, to the given peers, applying cost/benefit to originator. + Keep(H, ReputationChange), + // discard and process. + ProcessAndDiscard(H, ReputationChange), + // discard, applying cost/benefit to originator. + Discard(ReputationChange), } /// State of catch up request handling. #[derive(Debug)] enum PendingCatchUp { - /// No pending catch up requests. - None, - /// Pending catch up request which has not been answered yet. - Requesting { - who: PeerId, - request: CatchUpRequestMessage, - instant: Instant, - }, - /// Pending catch up request that was answered and is being processed. - Processing { - instant: Instant, - }, + /// No pending catch up requests. + None, + /// Pending catch up request which has not been answered yet. + Requesting { + who: PeerId, + request: CatchUpRequestMessage, + instant: Instant, + }, + /// Pending catch up request that was answered and is being processed. + Processing { instant: Instant }, } /// Configuration for the round catch-up mechanism. enum CatchUpConfig { - /// Catch requests are enabled, our node will issue them whenever it sees a - /// neighbor packet for a round further than `CATCH_UP_THRESHOLD`. If - /// `only_from_authorities` is set, the node will only send catch-up - /// requests to other authorities it is connected to. This is useful if the - /// GRANDPA observer protocol is live on the network, in which case full - /// nodes (non-authorities) don't have the necessary round data to answer - /// catch-up requests. - Enabled { only_from_authorities: bool }, - /// Catch-up requests are disabled, our node will never issue them. This is - /// useful for the GRANDPA observer mode, where we are only interested in - /// commit messages and don't need to follow the full round protocol. - Disabled, + /// Catch requests are enabled, our node will issue them whenever it sees a + /// neighbor packet for a round further than `CATCH_UP_THRESHOLD`. If + /// `only_from_authorities` is set, the node will only send catch-up + /// requests to other authorities it is connected to. This is useful if the + /// GRANDPA observer protocol is live on the network, in which case full + /// nodes (non-authorities) don't have the necessary round data to answer + /// catch-up requests. + Enabled { only_from_authorities: bool }, + /// Catch-up requests are disabled, our node will never issue them. This is + /// useful for the GRANDPA observer mode, where we are only interested in + /// commit messages and don't need to follow the full round protocol. + Disabled, } impl CatchUpConfig { - fn enabled(only_from_authorities: bool) -> CatchUpConfig { - CatchUpConfig::Enabled { only_from_authorities } - } - - fn disabled() -> CatchUpConfig { - CatchUpConfig::Disabled - } - - fn request_allowed(&self, peer: &PeerInfo) -> bool { - match self { - CatchUpConfig::Disabled => false, - CatchUpConfig::Enabled { only_from_authorities, .. } => match peer.roles { - ObservedRole::Authority | ObservedRole::OurSentry | - ObservedRole::OurGuardedAuthority => true, - _ => !only_from_authorities - } - } - } + fn enabled(only_from_authorities: bool) -> CatchUpConfig { + CatchUpConfig::Enabled { + only_from_authorities, + } + } + + fn disabled() -> CatchUpConfig { + CatchUpConfig::Disabled + } + + fn request_allowed(&self, peer: &PeerInfo) -> bool { + match self { + CatchUpConfig::Disabled => false, + CatchUpConfig::Enabled { + only_from_authorities, + .. + } => match peer.roles { + ObservedRole::Authority + | ObservedRole::OurSentry + | ObservedRole::OurGuardedAuthority => true, + _ => !only_from_authorities, + }, + } + } } struct Inner { - local_view: Option>>, - peers: Peers>, - live_topics: KeepTopics, - round_start: Instant, - authorities: Vec, - config: crate::Config, - next_rebroadcast: Instant, - pending_catch_up: PendingCatchUp, - catch_up_config: CatchUpConfig, + local_view: Option>>, + peers: Peers>, + live_topics: KeepTopics, + round_start: Instant, + authorities: Vec, + config: crate::Config, + next_rebroadcast: Instant, + pending_catch_up: PendingCatchUp, + catch_up_config: CatchUpConfig, } type MaybeMessage = Option<(Vec, NeighborPacket>)>; impl Inner { - fn new(config: crate::Config) -> Self { - let catch_up_config = if config.observer_enabled { - if config.is_authority { - // since the observer protocol is enabled, we will only issue - // catch-up requests if we are an authority (and only to other - // authorities). - CatchUpConfig::enabled(true) - } else { - // otherwise, we are running the observer protocol and don't - // care about catch-up requests. - CatchUpConfig::disabled() - } - } else { - // if the observer protocol isn't enabled, then any full node should - // be able to answer catch-up requests. - CatchUpConfig::enabled(false) - }; - - Inner { - local_view: None, - peers: Peers::default(), - live_topics: KeepTopics::new(), - round_start: Instant::now(), - next_rebroadcast: Instant::now() + REBROADCAST_AFTER, - authorities: Vec::new(), - pending_catch_up: PendingCatchUp::None, - catch_up_config, - config, - } - } - - /// Note a round in the current set has started. - fn note_round(&mut self, round: Round) -> MaybeMessage { - { - let local_view = match self.local_view { - None => return None, - Some(ref mut v) => if v.round == round { - return None - } else { - v - }, - }; - - let set_id = local_view.set_id; - - debug!(target: "afg", "Voter {} noting beginning of round {:?} to network.", + fn new(config: crate::Config) -> Self { + let catch_up_config = if config.observer_enabled { + if config.is_authority { + // since the observer protocol is enabled, we will only issue + // catch-up requests if we are an authority (and only to other + // authorities). + CatchUpConfig::enabled(true) + } else { + // otherwise, we are running the observer protocol and don't + // care about catch-up requests. + CatchUpConfig::disabled() + } + } else { + // if the observer protocol isn't enabled, then any full node should + // be able to answer catch-up requests. + CatchUpConfig::enabled(false) + }; + + Inner { + local_view: None, + peers: Peers::default(), + live_topics: KeepTopics::new(), + round_start: Instant::now(), + next_rebroadcast: Instant::now() + REBROADCAST_AFTER, + authorities: Vec::new(), + pending_catch_up: PendingCatchUp::None, + catch_up_config, + config, + } + } + + /// Note a round in the current set has started. + fn note_round(&mut self, round: Round) -> MaybeMessage { + { + let local_view = match self.local_view { + None => return None, + Some(ref mut v) => { + if v.round == round { + return None; + } else { + v + } + } + }; + + let set_id = local_view.set_id; + + debug!(target: "afg", "Voter {} noting beginning of round {:?} to network.", self.config.name(), (round, set_id)); - local_view.round = round; - - self.live_topics.push(round, set_id); - self.round_start = Instant::now(); - self.peers.reshuffle(); - } - self.multicast_neighbor_packet() - } - - /// Note that a voter set with given ID has started. Does nothing if the last - /// call to the function was with the same `set_id`. - fn note_set(&mut self, set_id: SetId, authorities: Vec) -> MaybeMessage { - { - let local_view = match self.local_view { - ref mut x @ None => x.get_or_insert(LocalView { - round: Round(1), - set_id, - last_commit: None, - }), - Some(ref mut v) => if v.set_id == set_id { - if self.authorities != authorities { - debug!(target: "afg", - "Gossip validator noted set {:?} twice with different authorities. \ - Was the authority set hard forked?", - set_id, - ); - self.authorities = authorities; - } - return None; - } else { - v - }, - }; - - local_view.update_set(set_id); - self.live_topics.push(Round(1), set_id); - self.authorities = authorities; - } - self.multicast_neighbor_packet() - } - - /// Note that we've imported a commit finalizing a given block. - fn note_commit_finalized( - &mut self, - round: Round, - set_id: SetId, - finalized: NumberFor, - ) -> MaybeMessage { - { - match self.local_view { - None => return None, - Some(ref mut v) => if v.last_commit_height() < Some(&finalized) { - v.last_commit = Some((finalized, round, set_id)); - } else { - return None - }, - }; - } - - self.multicast_neighbor_packet() - } - - fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { - self.local_view.as_ref() - .map(LocalView::as_view) - .map(|v| v.consider_vote(round, set_id)) - .unwrap_or(Consider::RejectOutOfScope) - } - - fn consider_global(&self, set_id: SetId, number: NumberFor) -> Consider { - self.local_view.as_ref() - .map(LocalView::as_view) - .map(|v| v.consider_global(set_id, &number)) - .unwrap_or(Consider::RejectOutOfScope) - } - - fn cost_past_rejection(&self, _who: &PeerId, _round: Round, _set_id: SetId) -> ReputationChange { - // hardcoded for now. - cost::PAST_REJECTION - } - - fn validate_round_message(&self, who: &PeerId, full: &VoteMessage) - -> Action - { - match self.consider_vote(full.round, full.set_id) { - Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), - Consider::RejectOutOfScope => return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), - Consider::RejectPast => - return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), - Consider::Accept => {}, - } - - // ensure authority is part of the set. - if !self.authorities.contains(&full.message.id) { - debug!(target: "afg", "Message from unknown voter: {}", full.message.id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); - return Action::Discard(cost::UNKNOWN_VOTER); - } - - if let Err(()) = super::check_message_sig::( - &full.message.message, - &full.message.id, - &full.message.signature, - full.round.0, - full.set_id.0, - ) { - debug!(target: "afg", "Bad message signature {}", full.message.id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); - return Action::Discard(cost::BAD_SIGNATURE); - } - - let topic = super::round_topic::(full.round.0, full.set_id.0); - Action::Keep(topic, benefit::ROUND_MESSAGE) - } - - fn validate_commit_message(&mut self, who: &PeerId, full: &FullCommitMessage) - -> Action - { - - if let Err(misbehavior) = self.peers.update_commit_height(who, full.message.target_number) { - return Action::Discard(misbehavior.cost()); - } - - match self.consider_global(full.set_id, full.message.target_number) { - Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), - Consider::RejectPast => - return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)), - Consider::RejectOutOfScope => return Action::Discard(Misbehavior::OutOfScopeMessage.cost()), - Consider::Accept => {}, - } - - if full.message.precommits.len() != full.message.auth_data.len() || full.message.precommits.is_empty() { - debug!(target: "afg", "Malformed compact commit"); - telemetry!(CONSENSUS_DEBUG; "afg.malformed_compact_commit"; - "precommits_len" => ?full.message.precommits.len(), - "auth_data_len" => ?full.message.auth_data.len(), - "precommits_is_empty" => ?full.message.precommits.is_empty(), - ); - return Action::Discard(cost::MALFORMED_COMMIT); - } - - // always discard commits initially and rebroadcast after doing full - // checking. - let topic = super::global_topic::(full.set_id.0); - Action::ProcessAndDiscard(topic, benefit::BASIC_VALIDATED_COMMIT) - } - - fn validate_catch_up_message(&mut self, who: &PeerId, full: &FullCatchUpMessage) - -> Action - { - match &self.pending_catch_up { - PendingCatchUp::Requesting { who: peer, request, instant } => { - if peer != who { - return Action::Discard(Misbehavior::OutOfScopeMessage.cost()); - } - - if request.set_id != full.set_id { - return Action::Discard(cost::MALFORMED_CATCH_UP); - } - - if request.round.0 > full.message.round_number { - return Action::Discard(cost::MALFORMED_CATCH_UP); - } - - if full.message.prevotes.is_empty() || full.message.precommits.is_empty() { - return Action::Discard(cost::MALFORMED_CATCH_UP); - } - - // move request to pending processing state, we won't push out - // any catch up requests until we import this one (either with a - // success or failure). - self.pending_catch_up = PendingCatchUp::Processing { - instant: instant.clone(), - }; - - // always discard catch up messages, they're point-to-point - let topic = super::global_topic::(full.set_id.0); - Action::ProcessAndDiscard(topic, benefit::BASIC_VALIDATED_CATCH_UP) - }, - _ => Action::Discard(Misbehavior::OutOfScopeMessage.cost()), - } - } - - fn note_catch_up_message_processed(&mut self) { - match &self.pending_catch_up { - PendingCatchUp::Processing { .. } => { - self.pending_catch_up = PendingCatchUp::None; - }, - state => trace!(target: "afg", - "Noted processed catch up message when state was: {:?}", - state, - ), - } - } - - fn handle_catch_up_request( - &mut self, - who: &PeerId, - request: CatchUpRequestMessage, - set_state: &environment::SharedVoterSetState, - ) -> (Option>, Action) { - let local_view = match self.local_view { - None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), - Some(ref view) => view, - }; - - if request.set_id != local_view.set_id { - // NOTE: When we're close to a set change there is potentially a - // race where the peer sent us the request before it observed that - // we had transitioned to a new set. In this case we charge a lower - // cost. - if request.set_id.0.saturating_add(1) == local_view.set_id.0 && - local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 - { - return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)); - } - - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); - } - - match self.peers.peer(who) { - None => - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), - Some(peer) if peer.view.round >= request.round => - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), - _ => {}, - } - - let last_completed_round = set_state.read().last_completed_round(); - if last_completed_round.number < request.round.0 { - return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); - } - - trace!(target: "afg", "Replying to catch-up request for round {} from {} with round {}", - request.round.0, - who, - last_completed_round.number, - ); - - let mut prevotes = Vec::new(); - let mut precommits = Vec::new(); - - // NOTE: the set of votes stored in `LastCompletedRound` is a minimal - // set of votes, i.e. at most one equivocation is stored per voter. The - // code below assumes this invariant is maintained when creating the - // catch up reply since peers won't accept catch-up messages that have - // too many equivocations (we exceed the fault-tolerance bound). - for vote in last_completed_round.votes { - match vote.message { - finality_grandpa::Message::Prevote(prevote) => { - prevotes.push(finality_grandpa::SignedPrevote { - prevote, - signature: vote.signature, - id: vote.id, - }); - }, - finality_grandpa::Message::Precommit(precommit) => { - precommits.push(finality_grandpa::SignedPrecommit { - precommit, - signature: vote.signature, - id: vote.id, - }); - }, - _ => {}, - } - } - - let (base_hash, base_number) = last_completed_round.base; - - let catch_up = CatchUp:: { - round_number: last_completed_round.number, - prevotes, - precommits, - base_hash, - base_number, - }; - - let full_catch_up = GossipMessage::CatchUp::(FullCatchUpMessage { - set_id: request.set_id, - message: catch_up, - }); - - (Some(full_catch_up), Action::Discard(cost::CATCH_UP_REPLY)) - } - - fn try_catch_up(&mut self, who: &PeerId) -> (Option>, Option) { - let mut catch_up = None; - let mut report = None; - - // if the peer is on the same set and ahead of us by a margin bigger - // than `CATCH_UP_THRESHOLD` then we should ask it for a catch up - // message. we only send catch-up requests to authorities, observers - // won't be able to reply since they don't follow the full GRANDPA - // protocol and therefore might not have the vote data available. - if let (Some(peer), Some(local_view)) = (self.peers.peer(who), &self.local_view) { - if self.catch_up_config.request_allowed(&peer) && - peer.view.set_id == local_view.set_id && - peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0 - { - // send catch up request if allowed - let round = peer.view.round.0 - 1; // peer.view.round is > 0 - let request = CatchUpRequestMessage { - set_id: peer.view.set_id, - round: Round(round), - }; - - let (catch_up_allowed, catch_up_report) = self.note_catch_up_request(who, &request); - - if catch_up_allowed { - trace!(target: "afg", "Sending catch-up request for round {} to {}", - round, - who, - ); - - catch_up = Some(GossipMessage::::CatchUpRequest(request)); - } - - report = catch_up_report; - } - } - - (catch_up, report) - } - - fn import_neighbor_message(&mut self, who: &PeerId, update: NeighborPacket>) - -> (Vec, Action, Option>, Option) - { - let update_res = self.peers.update_peer_state(who, update); - - let (cost_benefit, topics) = match update_res { - Ok(view) => - (benefit::NEIGHBOR_MESSAGE, view.map(|view| neighbor_topics::(view))), - Err(misbehavior) => - (misbehavior.cost(), None), - }; - - let (catch_up, report) = match update_res { - Ok(_) => self.try_catch_up(who), - _ => (None, None), - }; - - let neighbor_topics = topics.unwrap_or_default(); - - // always discard neighbor messages, it's only valid for one hop. - let action = Action::Discard(cost_benefit); - - (neighbor_topics, action, catch_up, report) - } - - fn multicast_neighbor_packet(&self) -> MaybeMessage { - self.local_view.as_ref().map(|local_view| { - let packet = NeighborPacket { - round: local_view.round, - set_id: local_view.set_id, - commit_finalized_height: *local_view.last_commit_height().unwrap_or(&Zero::zero()), - }; - - let peers = self.peers.inner.keys().cloned().collect(); - (peers, packet) - }) - } - - fn note_catch_up_request( - &mut self, - who: &PeerId, - catch_up_request: &CatchUpRequestMessage, - ) -> (bool, Option) { - let report = match &self.pending_catch_up { - PendingCatchUp::Requesting { who: peer, instant, .. } => - if instant.elapsed() <= CATCH_UP_REQUEST_TIMEOUT { - return (false, None); - } else { - // report peer for timeout - Some((peer.clone(), cost::CATCH_UP_REQUEST_TIMEOUT)) - }, - PendingCatchUp::Processing { instant, .. } => - if instant.elapsed() < CATCH_UP_PROCESS_TIMEOUT { - return (false, None); - } else { - None - }, - _ => None, - }; - - self.pending_catch_up = PendingCatchUp::Requesting { - who: who.clone(), - request: catch_up_request.clone(), - instant: Instant::now(), - }; - - (true, report) - } - - /// The initial logic for filtering round messages follows the given state - /// transitions: - /// - /// - State 0: not allowed to anyone (only if our local node is not an authority) - /// - State 1: allowed to random `sqrt(authorities)` - /// - State 2: allowed to all authorities - /// - State 3: allowed to random `sqrt(non-authorities)` - /// - State 4: allowed to all non-authorities - /// - /// Transitions will be triggered on repropagation attempts by the - /// underlying gossip layer, which should happen every 30 seconds. - fn round_message_allowed(&self, who: &PeerId, peer: &PeerInfo) -> bool { - let round_duration = self.config.gossip_duration * ROUND_DURATION; - let round_elapsed = self.round_start.elapsed(); - - - if !self.config.is_authority - && round_elapsed < round_duration * PROPAGATION_ALL - { - // non-authority nodes don't gossip any messages right away. we - // assume that authorities (and sentries) are strongly connected, so - // it should be unnecessary for non-authorities to gossip all - // messages right away. - return false; - } - - match peer.roles { - ObservedRole::OurGuardedAuthority | ObservedRole::OurSentry => true, - ObservedRole::Authority => { - let authorities = self.peers.authorities(); - - // the target node is an authority, on the first round duration we start by - // sending the message to only `sqrt(authorities)` (if we're - // connected to at least `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES - && authorities > MIN_LUCKY - { - self.peers.lucky_authorities.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // authorities for whom it is polite to do so - true - } - }, - ObservedRole::Full | ObservedRole::Light => { - // the node is not an authority so we apply stricter filters - if round_elapsed >= round_duration * PROPAGATION_ALL { - // if we waited for 3 (or more) rounds - // then it is allowed to be sent to all peers. - true - } else if round_elapsed >= round_duration * PROPAGATION_SOME_NON_AUTHORITIES { - // otherwise we only send it to `sqrt(non-authorities)`. - self.peers.lucky_peers.contains(who) - } else { - false - } - }, - } - } - - /// The initial logic for filtering global messages follows the given state - /// transitions: - /// - /// - State 0: send to `sqrt(authorities)` ++ `sqrt(non-authorities)`. - /// - State 1: send to all authorities - /// - State 2: send to all non-authorities - /// - /// We are more lenient with global messages since there should be a lot - /// less global messages than round messages (just commits), and we want - /// these to propagate to non-authorities fast enough so that they can - /// observe finality. - /// - /// Transitions will be triggered on repropagation attempts by the - /// underlying gossip layer, which should happen every 30 seconds. - fn global_message_allowed(&self, who: &PeerId, peer: &PeerInfo) -> bool { - let round_duration = self.config.gossip_duration * ROUND_DURATION; - let round_elapsed = self.round_start.elapsed(); - - match peer.roles { - ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority => true, - ObservedRole::Authority => { - let authorities = self.peers.authorities(); - - // the target node is an authority, on the first round duration we start by - // sending the message to only `sqrt(authorities)` (if we're - // connected to at least `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES - && authorities > MIN_LUCKY - { - self.peers.lucky_authorities.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // authorities for whom it is polite to do so - true - } - }, - ObservedRole::Full | ObservedRole::Light => { - let non_authorities = self.peers.non_authorities(); - - // the target node is not an authority, on the first and second - // round duration we start by sending the message to only - // `sqrt(non_authorities)` (if we're connected to at least - // `MIN_LUCKY`). - if round_elapsed < round_duration * PROPAGATION_SOME_NON_AUTHORITIES - && non_authorities > MIN_LUCKY - { - self.peers.lucky_peers.contains(who) - } else { - // otherwise we already went through the step above, so - // we won't filter the message and send it to all - // non-authorities for whom it is polite to do so - true - } - } - } - } + local_view.round = round; + + self.live_topics.push(round, set_id); + self.round_start = Instant::now(); + self.peers.reshuffle(); + } + self.multicast_neighbor_packet() + } + + /// Note that a voter set with given ID has started. Does nothing if the last + /// call to the function was with the same `set_id`. + fn note_set(&mut self, set_id: SetId, authorities: Vec) -> MaybeMessage { + { + let local_view = match self.local_view { + ref mut x @ None => x.get_or_insert(LocalView { + round: Round(1), + set_id, + last_commit: None, + }), + Some(ref mut v) => { + if v.set_id == set_id { + if self.authorities != authorities { + debug!(target: "afg", + "Gossip validator noted set {:?} twice with different authorities. \ + Was the authority set hard forked?", + set_id, + ); + self.authorities = authorities; + } + return None; + } else { + v + } + } + }; + + local_view.update_set(set_id); + self.live_topics.push(Round(1), set_id); + self.authorities = authorities; + } + self.multicast_neighbor_packet() + } + + /// Note that we've imported a commit finalizing a given block. + fn note_commit_finalized( + &mut self, + round: Round, + set_id: SetId, + finalized: NumberFor, + ) -> MaybeMessage { + { + match self.local_view { + None => return None, + Some(ref mut v) => { + if v.last_commit_height() < Some(&finalized) { + v.last_commit = Some((finalized, round, set_id)); + } else { + return None; + } + } + }; + } + + self.multicast_neighbor_packet() + } + + fn consider_vote(&self, round: Round, set_id: SetId) -> Consider { + self.local_view + .as_ref() + .map(LocalView::as_view) + .map(|v| v.consider_vote(round, set_id)) + .unwrap_or(Consider::RejectOutOfScope) + } + + fn consider_global(&self, set_id: SetId, number: NumberFor) -> Consider { + self.local_view + .as_ref() + .map(LocalView::as_view) + .map(|v| v.consider_global(set_id, &number)) + .unwrap_or(Consider::RejectOutOfScope) + } + + fn cost_past_rejection( + &self, + _who: &PeerId, + _round: Round, + _set_id: SetId, + ) -> ReputationChange { + // hardcoded for now. + cost::PAST_REJECTION + } + + fn validate_round_message( + &self, + who: &PeerId, + full: &VoteMessage, + ) -> Action { + match self.consider_vote(full.round, full.set_id) { + Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), + Consider::RejectOutOfScope => { + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) + } + Consider::RejectPast => { + return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)) + } + Consider::Accept => {} + } + + // ensure authority is part of the set. + if !self.authorities.contains(&full.message.id) { + debug!(target: "afg", "Message from unknown voter: {}", full.message.id); + telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); + return Action::Discard(cost::UNKNOWN_VOTER); + } + + if let Err(()) = super::check_message_sig::( + &full.message.message, + &full.message.id, + &full.message.signature, + full.round.0, + full.set_id.0, + ) { + debug!(target: "afg", "Bad message signature {}", full.message.id); + telemetry!(CONSENSUS_DEBUG; "afg.bad_msg_signature"; "signature" => ?full.message.id); + return Action::Discard(cost::BAD_SIGNATURE); + } + + let topic = super::round_topic::(full.round.0, full.set_id.0); + Action::Keep(topic, benefit::ROUND_MESSAGE) + } + + fn validate_commit_message( + &mut self, + who: &PeerId, + full: &FullCommitMessage, + ) -> Action { + if let Err(misbehavior) = self + .peers + .update_commit_height(who, full.message.target_number) + { + return Action::Discard(misbehavior.cost()); + } + + match self.consider_global(full.set_id, full.message.target_number) { + Consider::RejectFuture => return Action::Discard(Misbehavior::FutureMessage.cost()), + Consider::RejectPast => { + return Action::Discard(self.cost_past_rejection(who, full.round, full.set_id)) + } + Consider::RejectOutOfScope => { + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()) + } + Consider::Accept => {} + } + + if full.message.precommits.len() != full.message.auth_data.len() + || full.message.precommits.is_empty() + { + debug!(target: "afg", "Malformed compact commit"); + telemetry!(CONSENSUS_DEBUG; "afg.malformed_compact_commit"; + "precommits_len" => ?full.message.precommits.len(), + "auth_data_len" => ?full.message.auth_data.len(), + "precommits_is_empty" => ?full.message.precommits.is_empty(), + ); + return Action::Discard(cost::MALFORMED_COMMIT); + } + + // always discard commits initially and rebroadcast after doing full + // checking. + let topic = super::global_topic::(full.set_id.0); + Action::ProcessAndDiscard(topic, benefit::BASIC_VALIDATED_COMMIT) + } + + fn validate_catch_up_message( + &mut self, + who: &PeerId, + full: &FullCatchUpMessage, + ) -> Action { + match &self.pending_catch_up { + PendingCatchUp::Requesting { + who: peer, + request, + instant, + } => { + if peer != who { + return Action::Discard(Misbehavior::OutOfScopeMessage.cost()); + } + + if request.set_id != full.set_id { + return Action::Discard(cost::MALFORMED_CATCH_UP); + } + + if request.round.0 > full.message.round_number { + return Action::Discard(cost::MALFORMED_CATCH_UP); + } + + if full.message.prevotes.is_empty() || full.message.precommits.is_empty() { + return Action::Discard(cost::MALFORMED_CATCH_UP); + } + + // move request to pending processing state, we won't push out + // any catch up requests until we import this one (either with a + // success or failure). + self.pending_catch_up = PendingCatchUp::Processing { + instant: instant.clone(), + }; + + // always discard catch up messages, they're point-to-point + let topic = super::global_topic::(full.set_id.0); + Action::ProcessAndDiscard(topic, benefit::BASIC_VALIDATED_CATCH_UP) + } + _ => Action::Discard(Misbehavior::OutOfScopeMessage.cost()), + } + } + + fn note_catch_up_message_processed(&mut self) { + match &self.pending_catch_up { + PendingCatchUp::Processing { .. } => { + self.pending_catch_up = PendingCatchUp::None; + } + state => trace!(target: "afg", + "Noted processed catch up message when state was: {:?}", + state, + ), + } + } + + fn handle_catch_up_request( + &mut self, + who: &PeerId, + request: CatchUpRequestMessage, + set_state: &environment::SharedVoterSetState, + ) -> (Option>, Action) { + let local_view = match self.local_view { + None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), + Some(ref view) => view, + }; + + if request.set_id != local_view.set_id { + // NOTE: When we're close to a set change there is potentially a + // race where the peer sent us the request before it observed that + // we had transitioned to a new set. In this case we charge a lower + // cost. + if request.set_id.0.saturating_add(1) == local_view.set_id.0 + && local_view.round.0.saturating_sub(CATCH_UP_THRESHOLD) == 0 + { + return (None, Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP)); + } + + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); + } + + match self.peers.peer(who) { + None => return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())), + Some(peer) if peer.view.round >= request.round => { + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())) + } + _ => {} + } + + let last_completed_round = set_state.read().last_completed_round(); + if last_completed_round.number < request.round.0 { + return (None, Action::Discard(Misbehavior::OutOfScopeMessage.cost())); + } + + trace!(target: "afg", "Replying to catch-up request for round {} from {} with round {}", + request.round.0, + who, + last_completed_round.number, + ); + + let mut prevotes = Vec::new(); + let mut precommits = Vec::new(); + + // NOTE: the set of votes stored in `LastCompletedRound` is a minimal + // set of votes, i.e. at most one equivocation is stored per voter. The + // code below assumes this invariant is maintained when creating the + // catch up reply since peers won't accept catch-up messages that have + // too many equivocations (we exceed the fault-tolerance bound). + for vote in last_completed_round.votes { + match vote.message { + finality_grandpa::Message::Prevote(prevote) => { + prevotes.push(finality_grandpa::SignedPrevote { + prevote, + signature: vote.signature, + id: vote.id, + }); + } + finality_grandpa::Message::Precommit(precommit) => { + precommits.push(finality_grandpa::SignedPrecommit { + precommit, + signature: vote.signature, + id: vote.id, + }); + } + _ => {} + } + } + + let (base_hash, base_number) = last_completed_round.base; + + let catch_up = CatchUp:: { + round_number: last_completed_round.number, + prevotes, + precommits, + base_hash, + base_number, + }; + + let full_catch_up = GossipMessage::CatchUp::(FullCatchUpMessage { + set_id: request.set_id, + message: catch_up, + }); + + (Some(full_catch_up), Action::Discard(cost::CATCH_UP_REPLY)) + } + + fn try_catch_up(&mut self, who: &PeerId) -> (Option>, Option) { + let mut catch_up = None; + let mut report = None; + + // if the peer is on the same set and ahead of us by a margin bigger + // than `CATCH_UP_THRESHOLD` then we should ask it for a catch up + // message. we only send catch-up requests to authorities, observers + // won't be able to reply since they don't follow the full GRANDPA + // protocol and therefore might not have the vote data available. + if let (Some(peer), Some(local_view)) = (self.peers.peer(who), &self.local_view) { + if self.catch_up_config.request_allowed(&peer) + && peer.view.set_id == local_view.set_id + && peer.view.round.0.saturating_sub(CATCH_UP_THRESHOLD) > local_view.round.0 + { + // send catch up request if allowed + let round = peer.view.round.0 - 1; // peer.view.round is > 0 + let request = CatchUpRequestMessage { + set_id: peer.view.set_id, + round: Round(round), + }; + + let (catch_up_allowed, catch_up_report) = self.note_catch_up_request(who, &request); + + if catch_up_allowed { + trace!(target: "afg", "Sending catch-up request for round {} to {}", + round, + who, + ); + + catch_up = Some(GossipMessage::::CatchUpRequest(request)); + } + + report = catch_up_report; + } + } + + (catch_up, report) + } + + fn import_neighbor_message( + &mut self, + who: &PeerId, + update: NeighborPacket>, + ) -> ( + Vec, + Action, + Option>, + Option, + ) { + let update_res = self.peers.update_peer_state(who, update); + + let (cost_benefit, topics) = match update_res { + Ok(view) => ( + benefit::NEIGHBOR_MESSAGE, + view.map(|view| neighbor_topics::(view)), + ), + Err(misbehavior) => (misbehavior.cost(), None), + }; + + let (catch_up, report) = match update_res { + Ok(_) => self.try_catch_up(who), + _ => (None, None), + }; + + let neighbor_topics = topics.unwrap_or_default(); + + // always discard neighbor messages, it's only valid for one hop. + let action = Action::Discard(cost_benefit); + + (neighbor_topics, action, catch_up, report) + } + + fn multicast_neighbor_packet(&self) -> MaybeMessage { + self.local_view.as_ref().map(|local_view| { + let packet = NeighborPacket { + round: local_view.round, + set_id: local_view.set_id, + commit_finalized_height: *local_view.last_commit_height().unwrap_or(&Zero::zero()), + }; + + let peers = self.peers.inner.keys().cloned().collect(); + (peers, packet) + }) + } + + fn note_catch_up_request( + &mut self, + who: &PeerId, + catch_up_request: &CatchUpRequestMessage, + ) -> (bool, Option) { + let report = match &self.pending_catch_up { + PendingCatchUp::Requesting { + who: peer, instant, .. + } => { + if instant.elapsed() <= CATCH_UP_REQUEST_TIMEOUT { + return (false, None); + } else { + // report peer for timeout + Some((peer.clone(), cost::CATCH_UP_REQUEST_TIMEOUT)) + } + } + PendingCatchUp::Processing { instant, .. } => { + if instant.elapsed() < CATCH_UP_PROCESS_TIMEOUT { + return (false, None); + } else { + None + } + } + _ => None, + }; + + self.pending_catch_up = PendingCatchUp::Requesting { + who: who.clone(), + request: catch_up_request.clone(), + instant: Instant::now(), + }; + + (true, report) + } + + /// The initial logic for filtering round messages follows the given state + /// transitions: + /// + /// - State 0: not allowed to anyone (only if our local node is not an authority) + /// - State 1: allowed to random `sqrt(authorities)` + /// - State 2: allowed to all authorities + /// - State 3: allowed to random `sqrt(non-authorities)` + /// - State 4: allowed to all non-authorities + /// + /// Transitions will be triggered on repropagation attempts by the + /// underlying gossip layer, which should happen every 30 seconds. + fn round_message_allowed(&self, who: &PeerId, peer: &PeerInfo) -> bool { + let round_duration = self.config.gossip_duration * ROUND_DURATION; + let round_elapsed = self.round_start.elapsed(); + + if !self.config.is_authority && round_elapsed < round_duration * PROPAGATION_ALL { + // non-authority nodes don't gossip any messages right away. we + // assume that authorities (and sentries) are strongly connected, so + // it should be unnecessary for non-authorities to gossip all + // messages right away. + return false; + } + + match peer.roles { + ObservedRole::OurGuardedAuthority | ObservedRole::OurSentry => true, + ObservedRole::Authority => { + let authorities = self.peers.authorities(); + + // the target node is an authority, on the first round duration we start by + // sending the message to only `sqrt(authorities)` (if we're + // connected to at least `MIN_LUCKY`). + if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES + && authorities > MIN_LUCKY + { + self.peers.lucky_authorities.contains(who) + } else { + // otherwise we already went through the step above, so + // we won't filter the message and send it to all + // authorities for whom it is polite to do so + true + } + } + ObservedRole::Full | ObservedRole::Light => { + // the node is not an authority so we apply stricter filters + if round_elapsed >= round_duration * PROPAGATION_ALL { + // if we waited for 3 (or more) rounds + // then it is allowed to be sent to all peers. + true + } else if round_elapsed >= round_duration * PROPAGATION_SOME_NON_AUTHORITIES { + // otherwise we only send it to `sqrt(non-authorities)`. + self.peers.lucky_peers.contains(who) + } else { + false + } + } + } + } + + /// The initial logic for filtering global messages follows the given state + /// transitions: + /// + /// - State 0: send to `sqrt(authorities)` ++ `sqrt(non-authorities)`. + /// - State 1: send to all authorities + /// - State 2: send to all non-authorities + /// + /// We are more lenient with global messages since there should be a lot + /// less global messages than round messages (just commits), and we want + /// these to propagate to non-authorities fast enough so that they can + /// observe finality. + /// + /// Transitions will be triggered on repropagation attempts by the + /// underlying gossip layer, which should happen every 30 seconds. + fn global_message_allowed(&self, who: &PeerId, peer: &PeerInfo) -> bool { + let round_duration = self.config.gossip_duration * ROUND_DURATION; + let round_elapsed = self.round_start.elapsed(); + + match peer.roles { + ObservedRole::OurSentry | ObservedRole::OurGuardedAuthority => true, + ObservedRole::Authority => { + let authorities = self.peers.authorities(); + + // the target node is an authority, on the first round duration we start by + // sending the message to only `sqrt(authorities)` (if we're + // connected to at least `MIN_LUCKY`). + if round_elapsed < round_duration * PROPAGATION_ALL_AUTHORITIES + && authorities > MIN_LUCKY + { + self.peers.lucky_authorities.contains(who) + } else { + // otherwise we already went through the step above, so + // we won't filter the message and send it to all + // authorities for whom it is polite to do so + true + } + } + ObservedRole::Full | ObservedRole::Light => { + let non_authorities = self.peers.non_authorities(); + + // the target node is not an authority, on the first and second + // round duration we start by sending the message to only + // `sqrt(non_authorities)` (if we're connected to at least + // `MIN_LUCKY`). + if round_elapsed < round_duration * PROPAGATION_SOME_NON_AUTHORITIES + && non_authorities > MIN_LUCKY + { + self.peers.lucky_peers.contains(who) + } else { + // otherwise we already went through the step above, so + // we won't filter the message and send it to all + // non-authorities for whom it is polite to do so + true + } + } + } + } } // Prometheus metrics for [`GossipValidator`]. pub(crate) struct Metrics { - messages_validated: CounterVec, + messages_validated: CounterVec, } impl Metrics { - pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { - Ok(Self { - messages_validated: register( - CounterVec::new( - Opts::new( - "finality_grandpa_communication_gossip_validator_messages", - "Number of messages validated by the finality grandpa gossip validator." - ), - &["message", "action"] - )?, - registry, - )?, - }) - } + pub(crate) fn register( + registry: &prometheus_endpoint::Registry, + ) -> Result { + Ok(Self { + messages_validated: register( + CounterVec::new( + Opts::new( + "finality_grandpa_communication_gossip_validator_messages", + "Number of messages validated by the finality grandpa gossip validator.", + ), + &["message", "action"], + )?, + registry, + )?, + }) + } } /// A validator for GRANDPA gossip messages. pub(super) struct GossipValidator { - inner: parking_lot::RwLock>, - set_state: environment::SharedVoterSetState, - report_sender: TracingUnboundedSender, - metrics: Option, + inner: parking_lot::RwLock>, + set_state: environment::SharedVoterSetState, + report_sender: TracingUnboundedSender, + metrics: Option, } impl GossipValidator { - /// Create a new gossip-validator. The current set is initialized to 0. If - /// `catch_up_enabled` is set to false then the validator will not issue any - /// catch up requests (useful e.g. when running just the GRANDPA observer). - pub(super) fn new( - config: crate::Config, - set_state: environment::SharedVoterSetState, - prometheus_registry: Option<&Registry>, - ) -> (GossipValidator, TracingUnboundedReceiver) { - let metrics = match prometheus_registry.map(Metrics::register) { - Some(Ok(metrics)) => Some(metrics), - Some(Err(e)) => { - debug!(target: "afg", "Failed to register metrics: {:?}", e); - None - }, - None => None, - }; - - let (tx, rx) = tracing_unbounded("mpsc_grandpa_gossip_validator"); - let val = GossipValidator { - inner: parking_lot::RwLock::new(Inner::new(config)), - set_state, - report_sender: tx, - metrics: metrics, - }; - - (val, rx) - } - - /// Note a round in the current set has started. - pub(super) fn note_round(&self, round: Round, send_neighbor: F) - where F: FnOnce(Vec, NeighborPacket>) - { - let maybe_msg = self.inner.write().note_round(round); - if let Some((to, msg)) = maybe_msg { - send_neighbor(to, msg); - } - } - - /// Note that a voter set with given ID has started. Updates the current set to given - /// value and initializes the round to 0. - pub(super) fn note_set(&self, set_id: SetId, authorities: Vec, send_neighbor: F) - where F: FnOnce(Vec, NeighborPacket>) - { - let maybe_msg = self.inner.write().note_set(set_id, authorities); - if let Some((to, msg)) = maybe_msg { - send_neighbor(to, msg); - } - } - - /// Note that we've imported a commit finalizing a given block. - pub(super) fn note_commit_finalized( - &self, - round: Round, - set_id: SetId, - finalized: NumberFor, - send_neighbor: F, - ) - where F: FnOnce(Vec, NeighborPacket>) - { - let maybe_msg = self.inner.write().note_commit_finalized( - round, - set_id, - finalized, - ); - - if let Some((to, msg)) = maybe_msg { - send_neighbor(to, msg); - } - } - - /// Note that we've processed a catch up message. - pub(super) fn note_catch_up_message_processed(&self) { - self.inner.write().note_catch_up_message_processed(); - } - - fn report(&self, who: PeerId, cost_benefit: ReputationChange) { - let _ = self.report_sender.unbounded_send(PeerReport { who, cost_benefit }); - } - - pub(super) fn do_validate(&self, who: &PeerId, mut data: &[u8]) - -> (Action, Vec, Option>) - { - let mut broadcast_topics = Vec::new(); - let mut peer_reply = None; - - // Message name for Prometheus metric recording. - let message_name; - - let action = { - match GossipMessage::::decode(&mut data) { - Ok(GossipMessage::Vote(ref message)) => { - message_name = Some("vote"); - self.inner.write().validate_round_message(who, message) - }, - Ok(GossipMessage::Commit(ref message)) => { - message_name = Some("commit"); - self.inner.write().validate_commit_message(who, message) - }, - Ok(GossipMessage::Neighbor(update)) => { - message_name = Some("neighbor"); - let (topics, action, catch_up, report) = self.inner.write().import_neighbor_message( - who, - update.into_neighbor_packet(), - ); - - if let Some((peer, cost_benefit)) = report { - self.report(peer, cost_benefit); - } - - broadcast_topics = topics; - peer_reply = catch_up; - action - } - Ok(GossipMessage::CatchUp(ref message)) => { - message_name = Some("catch_up"); - self.inner.write().validate_catch_up_message(who, message) - }, - Ok(GossipMessage::CatchUpRequest(request)) => { - message_name = Some("catch_up_request"); - let (reply, action) = self.inner.write().handle_catch_up_request( - who, - request, - &self.set_state, - ); - - peer_reply = reply; - action - } - Err(e) => { - message_name = None; - debug!(target: "afg", "Error decoding message: {}", e.what()); - telemetry!(CONSENSUS_DEBUG; "afg.err_decoding_msg"; "" => ""); - - let len = std::cmp::min(i32::max_value() as usize, data.len()) as i32; - Action::Discard(Misbehavior::UndecodablePacket(len).cost()) - } - } - }; - - // Prometheus metric recording. - if let (Some(metrics), Some(message_name)) = (&self.metrics, message_name) { - let action_name = match action { - Action::Keep(_, _) => "keep", - Action::ProcessAndDiscard(_, _) => "process_and_discard", - Action::Discard(_) => "discard", - }; - metrics.messages_validated.with_label_values(&[message_name, action_name]).inc(); - } - - (action, broadcast_topics, peer_reply) - } - - #[cfg(test)] - fn inner(&self) -> &parking_lot::RwLock> { - &self.inner - } + /// Create a new gossip-validator. The current set is initialized to 0. If + /// `catch_up_enabled` is set to false then the validator will not issue any + /// catch up requests (useful e.g. when running just the GRANDPA observer). + pub(super) fn new( + config: crate::Config, + set_state: environment::SharedVoterSetState, + prometheus_registry: Option<&Registry>, + ) -> (GossipValidator, TracingUnboundedReceiver) { + let metrics = match prometheus_registry.map(Metrics::register) { + Some(Ok(metrics)) => Some(metrics), + Some(Err(e)) => { + debug!(target: "afg", "Failed to register metrics: {:?}", e); + None + } + None => None, + }; + + let (tx, rx) = tracing_unbounded("mpsc_grandpa_gossip_validator"); + let val = GossipValidator { + inner: parking_lot::RwLock::new(Inner::new(config)), + set_state, + report_sender: tx, + metrics: metrics, + }; + + (val, rx) + } + + /// Note a round in the current set has started. + pub(super) fn note_round(&self, round: Round, send_neighbor: F) + where + F: FnOnce(Vec, NeighborPacket>), + { + let maybe_msg = self.inner.write().note_round(round); + if let Some((to, msg)) = maybe_msg { + send_neighbor(to, msg); + } + } + + /// Note that a voter set with given ID has started. Updates the current set to given + /// value and initializes the round to 0. + pub(super) fn note_set(&self, set_id: SetId, authorities: Vec, send_neighbor: F) + where + F: FnOnce(Vec, NeighborPacket>), + { + let maybe_msg = self.inner.write().note_set(set_id, authorities); + if let Some((to, msg)) = maybe_msg { + send_neighbor(to, msg); + } + } + + /// Note that we've imported a commit finalizing a given block. + pub(super) fn note_commit_finalized( + &self, + round: Round, + set_id: SetId, + finalized: NumberFor, + send_neighbor: F, + ) where + F: FnOnce(Vec, NeighborPacket>), + { + let maybe_msg = self + .inner + .write() + .note_commit_finalized(round, set_id, finalized); + + if let Some((to, msg)) = maybe_msg { + send_neighbor(to, msg); + } + } + + /// Note that we've processed a catch up message. + pub(super) fn note_catch_up_message_processed(&self) { + self.inner.write().note_catch_up_message_processed(); + } + + fn report(&self, who: PeerId, cost_benefit: ReputationChange) { + let _ = self + .report_sender + .unbounded_send(PeerReport { who, cost_benefit }); + } + + pub(super) fn do_validate( + &self, + who: &PeerId, + mut data: &[u8], + ) -> ( + Action, + Vec, + Option>, + ) { + let mut broadcast_topics = Vec::new(); + let mut peer_reply = None; + + // Message name for Prometheus metric recording. + let message_name; + + let action = { + match GossipMessage::::decode(&mut data) { + Ok(GossipMessage::Vote(ref message)) => { + message_name = Some("vote"); + self.inner.write().validate_round_message(who, message) + } + Ok(GossipMessage::Commit(ref message)) => { + message_name = Some("commit"); + self.inner.write().validate_commit_message(who, message) + } + Ok(GossipMessage::Neighbor(update)) => { + message_name = Some("neighbor"); + let (topics, action, catch_up, report) = self + .inner + .write() + .import_neighbor_message(who, update.into_neighbor_packet()); + + if let Some((peer, cost_benefit)) = report { + self.report(peer, cost_benefit); + } + + broadcast_topics = topics; + peer_reply = catch_up; + action + } + Ok(GossipMessage::CatchUp(ref message)) => { + message_name = Some("catch_up"); + self.inner.write().validate_catch_up_message(who, message) + } + Ok(GossipMessage::CatchUpRequest(request)) => { + message_name = Some("catch_up_request"); + let (reply, action) = + self.inner + .write() + .handle_catch_up_request(who, request, &self.set_state); + + peer_reply = reply; + action + } + Err(e) => { + message_name = None; + debug!(target: "afg", "Error decoding message: {}", e.what()); + telemetry!(CONSENSUS_DEBUG; "afg.err_decoding_msg"; "" => ""); + + let len = std::cmp::min(i32::max_value() as usize, data.len()) as i32; + Action::Discard(Misbehavior::UndecodablePacket(len).cost()) + } + } + }; + + // Prometheus metric recording. + if let (Some(metrics), Some(message_name)) = (&self.metrics, message_name) { + let action_name = match action { + Action::Keep(_, _) => "keep", + Action::ProcessAndDiscard(_, _) => "process_and_discard", + Action::Discard(_) => "discard", + }; + metrics + .messages_validated + .with_label_values(&[message_name, action_name]) + .inc(); + } + + (action, broadcast_topics, peer_reply) + } + + #[cfg(test)] + fn inner(&self) -> &parking_lot::RwLock> { + &self.inner + } } impl sc_network_gossip::Validator for GossipValidator { - fn new_peer(&self, context: &mut dyn ValidatorContext, who: &PeerId, roles: ObservedRole) { - let packet = { - let mut inner = self.inner.write(); - inner.peers.new_peer(who.clone(), roles); - - inner.local_view.as_ref().map(|v| { - NeighborPacket { - round: v.round, - set_id: v.set_id, - commit_finalized_height: *v.last_commit_height().unwrap_or(&Zero::zero()), - } - }) - }; - - if let Some(packet) = packet { - let packet_data = GossipMessage::::from(packet).encode(); - context.send_message(who, packet_data); - } - } - - fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, who: &PeerId) { - self.inner.write().peers.peer_disconnected(who); - } - - fn validate(&self, context: &mut dyn ValidatorContext, who: &PeerId, data: &[u8]) - -> sc_network_gossip::ValidationResult - { - let (action, broadcast_topics, peer_reply) = self.do_validate(who, data); - - // not with lock held! - if let Some(msg) = peer_reply { - context.send_message(who, msg.encode()); - } - - for topic in broadcast_topics { - context.send_topic(who, topic, false); - } - - match action { - Action::Keep(topic, cb) => { - self.report(who.clone(), cb); - context.broadcast_message(topic, data.to_vec(), false); - sc_network_gossip::ValidationResult::ProcessAndKeep(topic) - } - Action::ProcessAndDiscard(topic, cb) => { - self.report(who.clone(), cb); - sc_network_gossip::ValidationResult::ProcessAndDiscard(topic) - } - Action::Discard(cb) => { - self.report(who.clone(), cb); - sc_network_gossip::ValidationResult::Discard - } - } - } - - fn message_allowed<'a>(&'a self) - -> Box bool + 'a> - { - let (inner, do_rebroadcast) = { - use parking_lot::RwLockWriteGuard; - - let mut inner = self.inner.write(); - let now = Instant::now(); - let do_rebroadcast = if now >= inner.next_rebroadcast { - inner.next_rebroadcast = now + REBROADCAST_AFTER; - true - } else { - false - }; - - // downgrade to read-lock. - (RwLockWriteGuard::downgrade(inner), do_rebroadcast) - }; - - Box::new(move |who, intent, topic, mut data| { - if let MessageIntent::PeriodicRebroadcast = intent { - return do_rebroadcast; - } - - let peer = match inner.peers.peer(who) { - None => return false, - Some(x) => x, - }; - - // if the topic is not something we're keeping at the moment, - // do not send. - let (maybe_round, set_id) = match inner.live_topics.topic_info(&topic) { - None => return false, - Some(x) => x, - }; - - if let MessageIntent::Broadcast = intent { - if maybe_round.is_some() { - if !inner.round_message_allowed(who, peer) { - // early return if the vote message isn't allowed at this stage. - return false; - } - } else { - if !inner.global_message_allowed(who, peer) { - // early return if the global message isn't allowed at this stage. - return false; - } - } - } - - // if the topic is not something the peer accepts, discard. - if let Some(round) = maybe_round { - return peer.view.consider_vote(round, set_id) == Consider::Accept - } - - // global message. - let local_view = match inner.local_view { - Some(ref v) => v, - None => return false, // cannot evaluate until we have a local view. - }; - - match GossipMessage::::decode(&mut data) { - Err(_) => false, - Ok(GossipMessage::Commit(full)) => { - // we only broadcast commit messages if they're for the same - // set the peer is in and if the commit is better than the - // last received by peer, additionally we make sure to only - // broadcast our best commit. - peer.view.consider_global(set_id, full.message.target_number) == Consider::Accept && - Some(&full.message.target_number) == local_view.last_commit_height() - } - Ok(GossipMessage::Neighbor(_)) => false, - Ok(GossipMessage::CatchUpRequest(_)) => false, - Ok(GossipMessage::CatchUp(_)) => false, - Ok(GossipMessage::Vote(_)) => false, // should not be the case. - } - }) - } - - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - let inner = self.inner.read(); - Box::new(move |topic, mut data| { - // if the topic is not one of the ones that we are keeping at the moment, - // it is expired. - match inner.live_topics.topic_info(&topic) { - None => return true, - Some((Some(_), _)) => return false, // round messages don't require further checking. - Some((None, _)) => {}, - }; - - let local_view = match inner.local_view { - Some(ref v) => v, - None => return true, // no local view means we can't evaluate or hold any topic. - }; - - // global messages -- only keep the best commit. - match GossipMessage::::decode(&mut data) { - Err(_) => true, - Ok(GossipMessage::Commit(full)) => match local_view.last_commit { - Some((number, round, set_id)) => - // we expire any commit message that doesn't target the same block - // as our best commit or isn't from the same round and set id - !(full.message.target_number == number && - full.round == round && - full.set_id == set_id), - None => true, - }, - Ok(_) => true, - } - }) - } + fn new_peer( + &self, + context: &mut dyn ValidatorContext, + who: &PeerId, + roles: ObservedRole, + ) { + let packet = { + let mut inner = self.inner.write(); + inner.peers.new_peer(who.clone(), roles); + + inner.local_view.as_ref().map(|v| NeighborPacket { + round: v.round, + set_id: v.set_id, + commit_finalized_height: *v.last_commit_height().unwrap_or(&Zero::zero()), + }) + }; + + if let Some(packet) = packet { + let packet_data = GossipMessage::::from(packet).encode(); + context.send_message(who, packet_data); + } + } + + fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, who: &PeerId) { + self.inner.write().peers.peer_disconnected(who); + } + + fn validate( + &self, + context: &mut dyn ValidatorContext, + who: &PeerId, + data: &[u8], + ) -> sc_network_gossip::ValidationResult { + let (action, broadcast_topics, peer_reply) = self.do_validate(who, data); + + // not with lock held! + if let Some(msg) = peer_reply { + context.send_message(who, msg.encode()); + } + + for topic in broadcast_topics { + context.send_topic(who, topic, false); + } + + match action { + Action::Keep(topic, cb) => { + self.report(who.clone(), cb); + context.broadcast_message(topic, data.to_vec(), false); + sc_network_gossip::ValidationResult::ProcessAndKeep(topic) + } + Action::ProcessAndDiscard(topic, cb) => { + self.report(who.clone(), cb); + sc_network_gossip::ValidationResult::ProcessAndDiscard(topic) + } + Action::Discard(cb) => { + self.report(who.clone(), cb); + sc_network_gossip::ValidationResult::Discard + } + } + } + + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { + let (inner, do_rebroadcast) = { + use parking_lot::RwLockWriteGuard; + + let mut inner = self.inner.write(); + let now = Instant::now(); + let do_rebroadcast = if now >= inner.next_rebroadcast { + inner.next_rebroadcast = now + REBROADCAST_AFTER; + true + } else { + false + }; + + // downgrade to read-lock. + (RwLockWriteGuard::downgrade(inner), do_rebroadcast) + }; + + Box::new(move |who, intent, topic, mut data| { + if let MessageIntent::PeriodicRebroadcast = intent { + return do_rebroadcast; + } + + let peer = match inner.peers.peer(who) { + None => return false, + Some(x) => x, + }; + + // if the topic is not something we're keeping at the moment, + // do not send. + let (maybe_round, set_id) = match inner.live_topics.topic_info(&topic) { + None => return false, + Some(x) => x, + }; + + if let MessageIntent::Broadcast = intent { + if maybe_round.is_some() { + if !inner.round_message_allowed(who, peer) { + // early return if the vote message isn't allowed at this stage. + return false; + } + } else { + if !inner.global_message_allowed(who, peer) { + // early return if the global message isn't allowed at this stage. + return false; + } + } + } + + // if the topic is not something the peer accepts, discard. + if let Some(round) = maybe_round { + return peer.view.consider_vote(round, set_id) == Consider::Accept; + } + + // global message. + let local_view = match inner.local_view { + Some(ref v) => v, + None => return false, // cannot evaluate until we have a local view. + }; + + match GossipMessage::::decode(&mut data) { + Err(_) => false, + Ok(GossipMessage::Commit(full)) => { + // we only broadcast commit messages if they're for the same + // set the peer is in and if the commit is better than the + // last received by peer, additionally we make sure to only + // broadcast our best commit. + peer.view + .consider_global(set_id, full.message.target_number) + == Consider::Accept + && Some(&full.message.target_number) == local_view.last_commit_height() + } + Ok(GossipMessage::Neighbor(_)) => false, + Ok(GossipMessage::CatchUpRequest(_)) => false, + Ok(GossipMessage::CatchUp(_)) => false, + Ok(GossipMessage::Vote(_)) => false, // should not be the case. + } + }) + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + let inner = self.inner.read(); + Box::new(move |topic, mut data| { + // if the topic is not one of the ones that we are keeping at the moment, + // it is expired. + match inner.live_topics.topic_info(&topic) { + None => return true, + Some((Some(_), _)) => return false, // round messages don't require further checking. + Some((None, _)) => {} + }; + + let local_view = match inner.local_view { + Some(ref v) => v, + None => return true, // no local view means we can't evaluate or hold any topic. + }; + + // global messages -- only keep the best commit. + match GossipMessage::::decode(&mut data) { + Err(_) => true, + Ok(GossipMessage::Commit(full)) => match local_view.last_commit { + Some((number, round, set_id)) => + // we expire any commit message that doesn't target the same block + // as our best commit or isn't from the same round and set id + { + !(full.message.target_number == number + && full.round == round + && full.set_id == set_id) + } + None => true, + }, + Ok(_) => true, + } + }) + } } /// Report specifying a reputation change for a given peer. pub(super) struct PeerReport { - pub who: PeerId, - pub cost_benefit: ReputationChange, + pub who: PeerId, + pub cost_benefit: ReputationChange, } #[cfg(test)] mod tests { - use super::*; - use super::environment::SharedVoterSetState; - use sc_network_gossip::Validator as GossipValidatorT; - use sc_network_test::Block; - use sp_core::{crypto::Public, H256}; - - // some random config (not really needed) - fn config() -> crate::Config { - crate::Config { - gossip_duration: Duration::from_millis(10), - justification_period: 256, - keystore: None, - name: None, - is_authority: true, - observer_enabled: true, - } - } - - // dummy voter set state - fn voter_set_state() -> SharedVoterSetState { - use crate::authorities::AuthoritySet; - use crate::environment::VoterSetState; - - let base = (H256::zero(), 0); - let voters = AuthoritySet::genesis(Vec::new()); - let set_state = VoterSetState::live( - 0, - &voters, - base, - ); - - set_state.into() - } - - #[test] - fn view_vote_rules() { - let view = View { round: Round(100), set_id: SetId(1), last_commit: Some(1000u64) }; - - assert_eq!(view.consider_vote(Round(98), SetId(1)), Consider::RejectPast); - assert_eq!(view.consider_vote(Round(1), SetId(0)), Consider::RejectPast); - assert_eq!(view.consider_vote(Round(1000), SetId(0)), Consider::RejectPast); - - assert_eq!(view.consider_vote(Round(99), SetId(1)), Consider::Accept); - assert_eq!(view.consider_vote(Round(100), SetId(1)), Consider::Accept); - assert_eq!(view.consider_vote(Round(101), SetId(1)), Consider::Accept); - - assert_eq!(view.consider_vote(Round(102), SetId(1)), Consider::RejectFuture); - assert_eq!(view.consider_vote(Round(1), SetId(2)), Consider::RejectFuture); - assert_eq!(view.consider_vote(Round(1000), SetId(2)), Consider::RejectFuture); - } - - #[test] - fn view_global_message_rules() { - let view = View { round: Round(100), set_id: SetId(2), last_commit: Some(1000u64) }; - - assert_eq!(view.consider_global(SetId(3), 1), Consider::RejectFuture); - assert_eq!(view.consider_global(SetId(3), 1000), Consider::RejectFuture); - assert_eq!(view.consider_global(SetId(3), 10000), Consider::RejectFuture); - - assert_eq!(view.consider_global(SetId(1), 1), Consider::RejectPast); - assert_eq!(view.consider_global(SetId(1), 1000), Consider::RejectPast); - assert_eq!(view.consider_global(SetId(1), 10000), Consider::RejectPast); - - assert_eq!(view.consider_global(SetId(2), 1), Consider::RejectPast); - assert_eq!(view.consider_global(SetId(2), 1000), Consider::RejectPast); - assert_eq!(view.consider_global(SetId(2), 1001), Consider::Accept); - assert_eq!(view.consider_global(SetId(2), 10000), Consider::Accept); - } - - #[test] - fn unknown_peer_cannot_be_updated() { - let mut peers = Peers::default(); - let id = PeerId::random(); - - let update = NeighborPacket { - round: Round(5), - set_id: SetId(10), - commit_finalized_height: 50, - }; - - let res = peers.update_peer_state(&id, update.clone()); - assert!(res.unwrap().is_none()); - - // connect & disconnect. - peers.new_peer(id.clone(), ObservedRole::Authority); - peers.peer_disconnected(&id); - - let res = peers.update_peer_state(&id, update.clone()); - assert!(res.unwrap().is_none()); - } - - #[test] - fn update_peer_state() { - let update1 = NeighborPacket { - round: Round(5), - set_id: SetId(10), - commit_finalized_height: 50u32, - }; - - let update2 = NeighborPacket { - round: Round(6), - set_id: SetId(10), - commit_finalized_height: 60, - }; - - let update3 = NeighborPacket { - round: Round(2), - set_id: SetId(11), - commit_finalized_height: 61, - }; - - let update4 = NeighborPacket { - round: Round(3), - set_id: SetId(11), - commit_finalized_height: 80, - }; - - let mut peers = Peers::default(); - let id = PeerId::random(); - - peers.new_peer(id.clone(), ObservedRole::Authority); - - let mut check_update = move |update: NeighborPacket<_>| { - let view = peers.update_peer_state(&id, update.clone()).unwrap().unwrap(); - assert_eq!(view.round, update.round); - assert_eq!(view.set_id, update.set_id); - assert_eq!(view.last_commit, Some(update.commit_finalized_height)); - }; - - check_update(update1); - check_update(update2); - check_update(update3); - check_update(update4); - } - - #[test] - fn invalid_view_change() { - let mut peers = Peers::default(); - - let id = PeerId::random(); - peers.new_peer(id.clone(), ObservedRole::Authority); - - peers.update_peer_state(&id, NeighborPacket { - round: Round(10), - set_id: SetId(10), - commit_finalized_height: 10, - }).unwrap().unwrap(); - - let mut check_update = move |update: NeighborPacket<_>| { - let err = peers.update_peer_state(&id, update.clone()).unwrap_err(); - assert_eq!(err, Misbehavior::InvalidViewChange); - }; - - // round moves backwards. - check_update(NeighborPacket { - round: Round(9), - set_id: SetId(10), - commit_finalized_height: 10, - }); - // commit finalized height moves backwards. - check_update(NeighborPacket { - round: Round(10), - set_id: SetId(10), - commit_finalized_height: 9, - }); - // set ID moves backwards. - check_update(NeighborPacket { - round: Round(10), - set_id: SetId(9), - commit_finalized_height: 10, - }); - } - - #[test] - fn messages_not_expired_immediately() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); - - let set_id = 1; - - val.note_set(SetId(set_id), Vec::new(), |_, _| {}); - - for round_num in 1u64..10 { - val.note_round(Round(round_num), |_, _| {}); - } - - { - let mut is_expired = val.message_expired(); - let last_kept_round = 10u64 - KEEP_RECENT_ROUNDS as u64 - 1; - - // messages from old rounds are expired. - for round_num in 1u64..last_kept_round { - let topic = crate::communication::round_topic::(round_num, 1); - assert!(is_expired(topic, &[1, 2, 3])); - } - - // messages from not-too-old rounds are not expired. - for round_num in last_kept_round..10 { - let topic = crate::communication::round_topic::(round_num, 1); - assert!(!is_expired(topic, &[1, 2, 3])); - } - } - } - - #[test] - fn message_from_unknown_authority_discarded() { - assert!(cost::UNKNOWN_VOTER != cost::BAD_SIGNATURE); - - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); - let set_id = 1; - let auth = AuthorityId::from_slice(&[1u8; 32]); - let peer = PeerId::random(); - - val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); - val.note_round(Round(1), |_, _| {}); - - let inner = val.inner.read(); - let unknown_voter = inner.validate_round_message(&peer, &VoteMessage { - round: Round(1), - set_id: SetId(set_id), - message: SignedMessage:: { - message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { - target_hash: Default::default(), - target_number: 10, - }), - signature: Default::default(), - id: AuthorityId::from_slice(&[2u8; 32]), - } - }); - - let bad_sig = inner.validate_round_message(&peer, &VoteMessage { - round: Round(1), - set_id: SetId(set_id), - message: SignedMessage:: { - message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { - target_hash: Default::default(), - target_number: 10, - }), - signature: Default::default(), - id: auth.clone(), - } - }); - - assert_eq!(unknown_voter, Action::Discard(cost::UNKNOWN_VOTER)); - assert_eq!(bad_sig, Action::Discard(cost::BAD_SIGNATURE)); - } - - #[test] - fn unsolicited_catch_up_messages_discarded() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); - - let set_id = 1; - let auth = AuthorityId::from_slice(&[1u8; 32]); - let peer = PeerId::random(); - - val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); - val.note_round(Round(1), |_, _| {}); - - let validate_catch_up = || { - let mut inner = val.inner.write(); - inner.validate_catch_up_message(&peer, &FullCatchUpMessage { - set_id: SetId(set_id), - message: finality_grandpa::CatchUp { - round_number: 10, - prevotes: Default::default(), - precommits: Default::default(), - base_hash: Default::default(), - base_number: Default::default(), - } - }) - }; - - // the catch up is discarded because we have no pending request - assert_eq!(validate_catch_up(), Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)); - - let noted = val.inner.write().note_catch_up_request( - &peer, - &CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(10), - } - ); - - assert!(noted.0); - - // catch up is allowed because we have requested it, but it's rejected - // because it's malformed (empty prevotes and precommits) - assert_eq!(validate_catch_up(), Action::Discard(cost::MALFORMED_CATCH_UP)); - } - - #[test] - fn unanswerable_catch_up_requests_discarded() { - // create voter set state with round 2 completed - let set_state: SharedVoterSetState = { - let mut completed_rounds = voter_set_state().read().completed_rounds(); - - completed_rounds.push(environment::CompletedRound { - number: 2, - state: finality_grandpa::round::State::genesis(Default::default()), - base: Default::default(), - votes: Default::default(), - }); - - let mut current_rounds = environment::CurrentRounds::new(); - current_rounds.insert(3, environment::HasVoted::No); - - let set_state = environment::VoterSetState::::Live { - completed_rounds, - current_rounds, - }; - - set_state.into() - }; - - let (val, _) = GossipValidator::::new( - config(), - set_state.clone(), - None, - ); - - let set_id = 1; - let auth = AuthorityId::from_slice(&[1u8; 32]); - let peer = PeerId::random(); - - val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); - val.note_round(Round(3), |_, _| {}); - - // add the peer making the request to the validator, - // otherwise it is discarded - let mut inner = val.inner.write(); - inner.peers.new_peer(peer.clone(), ObservedRole::Authority); - - let res = inner.handle_catch_up_request( - &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(10), - }, - &set_state, - ); - - // we're at round 3, a catch up request for round 10 is out of scope - assert!(res.0.is_none()); - assert_eq!(res.1, Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)); - - let res = inner.handle_catch_up_request( - &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(2), - }, - &set_state, - ); - - // a catch up request for round 2 should be answered successfully - match res.0.unwrap() { - GossipMessage::CatchUp(catch_up) => { - assert_eq!(catch_up.set_id, SetId(set_id)); - assert_eq!(catch_up.message.round_number, 2); - - assert_eq!(res.1, Action::Discard(cost::CATCH_UP_REPLY)); - }, - _ => panic!("expected catch up message"), - }; - } - - #[test] - fn detects_honest_out_of_scope_catch_requests() { - let set_state = voter_set_state(); - let (val, _) = GossipValidator::::new( - config(), - set_state.clone(), - None, - ); - - // the validator starts at set id 2 - val.note_set(SetId(2), Vec::new(), |_, _| {}); - - // add the peer making the request to the validator, - // otherwise it is discarded - let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); - - let send_request = |set_id, round| { - let mut inner = val.inner.write(); - inner.handle_catch_up_request( - &peer, - CatchUpRequestMessage { - set_id: SetId(set_id), - round: Round(round), - }, - &set_state, - ) - }; - - let assert_res = |res: (Option<_>, Action<_>), honest| { - assert!(res.0.is_none()); - assert_eq!( - res.1, - if honest { - Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP) - } else { - Action::Discard(Misbehavior::OutOfScopeMessage.cost()) - }, - ); - }; - - // the validator is at set id 2 and round 0. requests for set id 1 - // should not be answered but they should be considered an honest - // mistake - assert_res( - send_request(1, 1), - true, - ); - - assert_res( - send_request(1, 10), - true, - ); - - // requests for set id 0 should be considered out of scope - assert_res( - send_request(0, 1), - false, - ); - - assert_res( - send_request(0, 10), - false, - ); - - // after the validator progresses further than CATCH_UP_THRESHOLD in set - // id 2, any request for set id 1 should no longer be considered an - // honest mistake. - val.note_round(Round(3), |_, _| {}); - - assert_res( - send_request(1, 1), - false, - ); - - assert_res( - send_request(1, 2), - false, - ); - } - - #[test] - fn issues_catch_up_request_on_neighbor_packet_import() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); - - // the validator starts at set id 1. - val.note_set(SetId(1), Vec::new(), |_, _| {}); - - // add the peer making the request to the validator, - // otherwise it is discarded. - let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); - - let import_neighbor_message = |set_id, round| { - let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( - &peer, - NeighborPacket { - round: Round(round), - set_id: SetId(set_id), - commit_finalized_height: 42, - }, - ); - - catch_up_request - }; - - // importing a neighbor message from a peer in the same set in a later - // round should lead to a catch up request for the previous round. - match import_neighbor_message(1, 42) { - Some(GossipMessage::CatchUpRequest(request)) => { - assert_eq!(request.set_id, SetId(1)); - assert_eq!(request.round, Round(41)); - }, - _ => panic!("expected catch up message"), - } - - // we note that we're at round 41. - val.note_round(Round(41), |_, _| {}); - - // if we import a neighbor message within CATCH_UP_THRESHOLD then we - // won't request a catch up. - match import_neighbor_message(1, 42) { - None => {}, - _ => panic!("expected no catch up message"), - } - - // or if the peer is on a lower round. - match import_neighbor_message(1, 40) { - None => {}, - _ => panic!("expected no catch up message"), - } - - // we also don't request a catch up if the peer is in a different set. - match import_neighbor_message(2, 42) { - None => {}, - _ => panic!("expected no catch up message"), - } - } - - #[test] - fn doesnt_send_catch_up_requests_when_disabled() { - // we create a gossip validator with catch up requests disabled. - let config = { - let mut c = config(); - - // if the observer protocol is enabled and we are not an authority, - // then we don't issue any catch-up requests. - c.is_authority = false; - c.observer_enabled = true; - - c - }; - - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - ); - - // the validator starts at set id 1. - val.note_set(SetId(1), Vec::new(), |_, _| {}); - - // add the peer making the request to the validator, - // otherwise it is discarded. - let peer = PeerId::random(); - val.inner.write().peers.new_peer(peer.clone(), ObservedRole::Authority); - - // importing a neighbor message from a peer in the same set in a later - // round should lead to a catch up request but since they're disabled - // we should get `None`. - let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( - &peer, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, - ); - - match catch_up_request { - None => {}, - _ => panic!("expected no catch up message"), - } - } - - #[test] - fn doesnt_send_catch_up_requests_to_non_authorities_when_observer_enabled() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); - - // the validator starts at set id 1. - val.note_set(SetId(1), Vec::new(), |_, _| {}); - - // add the peers making the requests to the validator, - // otherwise it is discarded. - let peer_authority = PeerId::random(); - let peer_full = PeerId::random(); - - val.inner.write().peers.new_peer(peer_authority.clone(), ObservedRole::Authority); - val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); - - let import_neighbor_message = |peer| { - let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( - &peer, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, - ); - - catch_up_request - }; - - // importing a neighbor message from a peer in the same set in a later - // round should lead to a catch up request but since the node is not an - // authority we should get `None`. - if import_neighbor_message(peer_full).is_some() { - panic!("expected no catch up message"); - } - - // importing the same neighbor message from a peer who is an authority - // should lead to a catch up request. - match import_neighbor_message(peer_authority) { - Some(GossipMessage::CatchUpRequest(request)) => { - assert_eq!(request.set_id, SetId(1)); - assert_eq!(request.round, Round(41)); - }, - _ => panic!("expected catch up message"), - } - } - - #[test] - fn sends_catch_up_requests_to_non_authorities_when_observer_disabled() { - let config = { - let mut c = config(); - - // if the observer protocol is disable any full-node should be able - // to answer catch-up requests. - c.observer_enabled = false; - - c - }; - - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - ); - - // the validator starts at set id 1. - val.note_set(SetId(1), Vec::new(), |_, _| {}); - - // add the peer making the requests to the validator, otherwise it is - // discarded. - let peer_full = PeerId::random(); - val.inner.write().peers.new_peer(peer_full.clone(), ObservedRole::Full); - - let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( - &peer_full, - NeighborPacket { - round: Round(42), - set_id: SetId(1), - commit_finalized_height: 50, - }, - ); - - // importing a neighbor message from a peer in the same set in a later - // round should lead to a catch up request, the node is not an - // authority, but since the observer protocol is disabled we should - // issue a catch-up request to it anyway. - match catch_up_request { - Some(GossipMessage::CatchUpRequest(request)) => { - assert_eq!(request.set_id, SetId(1)); - assert_eq!(request.round, Round(41)); - }, - _ => panic!("expected catch up message"), - } - } - - #[test] - fn doesnt_expire_next_round_messages() { - // NOTE: this is a regression test - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); - - // the validator starts at set id 1. - val.note_set(SetId(1), Vec::new(), |_, _| {}); - - // we are at round 10 - val.note_round(Round(9), |_, _| {}); - val.note_round(Round(10), |_, _| {}); - - let mut is_expired = val.message_expired(); - - // we accept messages from rounds 9, 10 and 11 - // therefore neither of those should be considered expired - for round in &[9, 10, 11] { - assert!( - !is_expired( - crate::communication::round_topic::(*round, 1), - &[], - ) - ) - } - } - - #[test] - fn progressively_gossips_to_more_peers() { - let mut config = config(); - config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race - let round_duration = config.gossip_duration * ROUND_DURATION; - - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - ); - - // the validator start at set id 0 - val.note_set(SetId(0), Vec::new(), |_, _| {}); - - // add 60 peers, 30 authorities and 30 full nodes - let mut authorities = Vec::new(); - authorities.resize_with(30, || PeerId::random()); - - let mut full_nodes = Vec::new(); - full_nodes.resize_with(30, || PeerId::random()); - - for i in 0..30 { - val.inner.write().peers.new_peer(authorities[i].clone(), ObservedRole::Authority); - val.inner.write().peers.new_peer(full_nodes[i].clone(), ObservedRole::Full); - } - - let test = |num_round, peers| { - // rewind n round durations - val.inner.write().round_start = Instant::now() - round_duration * num_round; - let mut message_allowed = val.message_allowed(); - - move || { - let mut allowed = 0; - for peer in peers { - if message_allowed( - peer, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) { - allowed += 1; - } - } - allowed - } - }; - - fn trial usize>(mut test: F) -> usize { - let mut results = Vec::new(); - let n = 1000; - - for _ in 0..n { - results.push(test()); - } - - let n = results.len(); - let sum: usize = results.iter().sum(); - - sum / n - } - - // on the first attempt we will only gossip to `sqrt(authorities)`, - // which should average out to 5 peers after a couple of trials - assert_eq!(trial(test(1, &authorities)), 5); - - // on the second (and subsequent attempts) we should gossip to all - // authorities we're connected to. - assert_eq!(trial(test(2, &authorities)), 30); - assert_eq!(trial(test(3, &authorities)), 30); - - // we should only gossip to non-authorities after the third attempt - assert_eq!(trial(test(1, &full_nodes)), 0); - assert_eq!(trial(test(2, &full_nodes)), 0); - - // and only to `sqrt(non-authorities)` - assert_eq!(trial(test(3, &full_nodes)), 5); - - // only on the fourth attempt should we gossip to all non-authorities - assert_eq!(trial(test(4, &full_nodes)), 30); - } - - #[test] - fn only_restricts_gossip_to_authorities_after_a_minimum_threshold() { - let (val, _) = GossipValidator::::new( - config(), - voter_set_state(), - None, - ); - - // the validator start at set id 0 - val.note_set(SetId(0), Vec::new(), |_, _| {}); - - let mut authorities = Vec::new(); - for _ in 0..5 { - let peer_id = PeerId::random(); - val.inner.write().peers.new_peer(peer_id.clone(), ObservedRole::Authority); - authorities.push(peer_id); - } - - let mut message_allowed = val.message_allowed(); - - // since we're only connected to 5 authorities, we should never restrict - // sending of gossip messages, and instead just allow them to all - // non-authorities on the first attempt. - for authority in &authorities { - assert!( - message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } - - #[test] - fn non_authorities_never_gossip_messages_on_first_round_duration() { - let mut config = config(); - config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race - config.is_authority = false; - let round_duration = config.gossip_duration * ROUND_DURATION; - - let (val, _) = GossipValidator::::new( - config, - voter_set_state(), - None, - ); - - // the validator start at set id 0 - val.note_set(SetId(0), Vec::new(), |_, _| {}); - - let mut authorities = Vec::new(); - for _ in 0..100 { - let peer_id = PeerId::random(); - val.inner.write().peers.new_peer(peer_id.clone(), ObservedRole::Authority); - authorities.push(peer_id); - } - - { - let mut message_allowed = val.message_allowed(); - // since our node is not an authority we should **never** gossip any - // messages on the first attempt. - for authority in &authorities { - assert!( - !message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } - - { - val.inner.write().round_start = Instant::now() - round_duration * 4; - let mut message_allowed = val.message_allowed(); - // on the fourth round duration we should allow messages to authorities - // (on the second we would do `sqrt(authorities)`) - for authority in &authorities { - assert!( - message_allowed( - authority, - MessageIntent::Broadcast, - &crate::communication::round_topic::(1, 0), - &[], - ) - ); - } - } - } - - #[test] - fn only_gossip_commits_to_peers_on_same_set() { - let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); - - // the validator start at set id 1 - val.note_set(SetId(1), Vec::new(), |_, _| {}); - - // add a new peer at set id 1 - let peer1 = PeerId::random(); - - val.inner - .write() - .peers - .new_peer(peer1.clone(), ObservedRole::Authority); - - val.inner - .write() - .peers - .update_peer_state( - &peer1, - NeighborPacket { - round: Round(1), - set_id: SetId(1), - commit_finalized_height: 1, - }, - ) - .unwrap(); - - // peer2 will default to set id 0 - let peer2 = PeerId::random(); - val.inner - .write() - .peers - .new_peer(peer2.clone(), ObservedRole::Authority); - - // create a commit for round 1 of set id 1 - // targeting a block at height 2 - let commit = { - let commit = finality_grandpa::CompactCommit { - target_hash: H256::random(), - target_number: 2, - precommits: Vec::new(), - auth_data: Vec::new(), - }; - - crate::communication::gossip::GossipMessage::::Commit( - crate::communication::gossip::FullCommitMessage { - round: Round(1), - set_id: SetId(1), - message: commit, - }, - ) - .encode() - }; - - // note the commit in the validator - val.note_commit_finalized(Round(1), SetId(1), 2, |_, _| {}); - - let mut message_allowed = val.message_allowed(); - - // the commit should be allowed to peer 1 - assert!(message_allowed( - &peer1, - MessageIntent::Broadcast, - &crate::communication::global_topic::(1), - &commit, - )); - - // but disallowed to peer 2 since the peer is on set id 0 - // the commit should be allowed to peer 1 - assert!(!message_allowed( - &peer2, - MessageIntent::Broadcast, - &crate::communication::global_topic::(1), - &commit, - )); - } - - #[test] - fn expire_commits_from_older_rounds() { - let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); - - let commit = |round, set_id, target_number| { - let commit = finality_grandpa::CompactCommit { - target_hash: H256::random(), - target_number, - precommits: Vec::new(), - auth_data: Vec::new(), - }; - - crate::communication::gossip::GossipMessage::::Commit( - crate::communication::gossip::FullCommitMessage { - round: Round(round), - set_id: SetId(set_id), - message: commit, - }, - ) - .encode() - }; - - // note the beginning of a new set with id 1 - val.note_set(SetId(1), Vec::new(), |_, _| {}); - - // note a commit for round 1 in the validator - // finalizing a block at height 2 - val.note_commit_finalized(Round(1), SetId(1), 2, |_, _| {}); - - let mut message_expired = val.message_expired(); - - // a commit message for round 1 that finalizes the same height as we - // have observed previously should not be expired - assert!(!message_expired( - crate::communication::global_topic::(1), - &commit(1, 1, 2), - )); - - // it should be expired if it is for a lower block - assert!(message_expired( - crate::communication::global_topic::(1), - &commit(1, 1, 1), - )); - - // or the same block height but from the previous round - assert!(message_expired( - crate::communication::global_topic::(1), - &commit(0, 1, 2), - )); - } - - #[test] - fn allow_noting_different_authorities_for_same_set() { - let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); - - let a1 = vec![AuthorityId::default()]; - val.note_set(SetId(1), a1.clone(), |_, _| {}); - - assert_eq!(val.inner().read().authorities, a1); - - let a2 = vec![AuthorityId::default(), AuthorityId::default()]; - val.note_set(SetId(1), a2.clone(), |_, _| {}); - - assert_eq!(val.inner().read().authorities, a2); - } + use super::environment::SharedVoterSetState; + use super::*; + use sc_network_gossip::Validator as GossipValidatorT; + use sc_network_test::Block; + use sp_core::{crypto::Public, H256}; + + // some random config (not really needed) + fn config() -> crate::Config { + crate::Config { + gossip_duration: Duration::from_millis(10), + justification_period: 256, + keystore: None, + name: None, + is_authority: true, + observer_enabled: true, + } + } + + // dummy voter set state + fn voter_set_state() -> SharedVoterSetState { + use crate::authorities::AuthoritySet; + use crate::environment::VoterSetState; + + let base = (H256::zero(), 0); + let voters = AuthoritySet::genesis(Vec::new()); + let set_state = VoterSetState::live(0, &voters, base); + + set_state.into() + } + + #[test] + fn view_vote_rules() { + let view = View { + round: Round(100), + set_id: SetId(1), + last_commit: Some(1000u64), + }; + + assert_eq!( + view.consider_vote(Round(98), SetId(1)), + Consider::RejectPast + ); + assert_eq!(view.consider_vote(Round(1), SetId(0)), Consider::RejectPast); + assert_eq!( + view.consider_vote(Round(1000), SetId(0)), + Consider::RejectPast + ); + + assert_eq!(view.consider_vote(Round(99), SetId(1)), Consider::Accept); + assert_eq!(view.consider_vote(Round(100), SetId(1)), Consider::Accept); + assert_eq!(view.consider_vote(Round(101), SetId(1)), Consider::Accept); + + assert_eq!( + view.consider_vote(Round(102), SetId(1)), + Consider::RejectFuture + ); + assert_eq!( + view.consider_vote(Round(1), SetId(2)), + Consider::RejectFuture + ); + assert_eq!( + view.consider_vote(Round(1000), SetId(2)), + Consider::RejectFuture + ); + } + + #[test] + fn view_global_message_rules() { + let view = View { + round: Round(100), + set_id: SetId(2), + last_commit: Some(1000u64), + }; + + assert_eq!(view.consider_global(SetId(3), 1), Consider::RejectFuture); + assert_eq!(view.consider_global(SetId(3), 1000), Consider::RejectFuture); + assert_eq!( + view.consider_global(SetId(3), 10000), + Consider::RejectFuture + ); + + assert_eq!(view.consider_global(SetId(1), 1), Consider::RejectPast); + assert_eq!(view.consider_global(SetId(1), 1000), Consider::RejectPast); + assert_eq!(view.consider_global(SetId(1), 10000), Consider::RejectPast); + + assert_eq!(view.consider_global(SetId(2), 1), Consider::RejectPast); + assert_eq!(view.consider_global(SetId(2), 1000), Consider::RejectPast); + assert_eq!(view.consider_global(SetId(2), 1001), Consider::Accept); + assert_eq!(view.consider_global(SetId(2), 10000), Consider::Accept); + } + + #[test] + fn unknown_peer_cannot_be_updated() { + let mut peers = Peers::default(); + let id = PeerId::random(); + + let update = NeighborPacket { + round: Round(5), + set_id: SetId(10), + commit_finalized_height: 50, + }; + + let res = peers.update_peer_state(&id, update.clone()); + assert!(res.unwrap().is_none()); + + // connect & disconnect. + peers.new_peer(id.clone(), ObservedRole::Authority); + peers.peer_disconnected(&id); + + let res = peers.update_peer_state(&id, update.clone()); + assert!(res.unwrap().is_none()); + } + + #[test] + fn update_peer_state() { + let update1 = NeighborPacket { + round: Round(5), + set_id: SetId(10), + commit_finalized_height: 50u32, + }; + + let update2 = NeighborPacket { + round: Round(6), + set_id: SetId(10), + commit_finalized_height: 60, + }; + + let update3 = NeighborPacket { + round: Round(2), + set_id: SetId(11), + commit_finalized_height: 61, + }; + + let update4 = NeighborPacket { + round: Round(3), + set_id: SetId(11), + commit_finalized_height: 80, + }; + + let mut peers = Peers::default(); + let id = PeerId::random(); + + peers.new_peer(id.clone(), ObservedRole::Authority); + + let mut check_update = move |update: NeighborPacket<_>| { + let view = peers + .update_peer_state(&id, update.clone()) + .unwrap() + .unwrap(); + assert_eq!(view.round, update.round); + assert_eq!(view.set_id, update.set_id); + assert_eq!(view.last_commit, Some(update.commit_finalized_height)); + }; + + check_update(update1); + check_update(update2); + check_update(update3); + check_update(update4); + } + + #[test] + fn invalid_view_change() { + let mut peers = Peers::default(); + + let id = PeerId::random(); + peers.new_peer(id.clone(), ObservedRole::Authority); + + peers + .update_peer_state( + &id, + NeighborPacket { + round: Round(10), + set_id: SetId(10), + commit_finalized_height: 10, + }, + ) + .unwrap() + .unwrap(); + + let mut check_update = move |update: NeighborPacket<_>| { + let err = peers.update_peer_state(&id, update.clone()).unwrap_err(); + assert_eq!(err, Misbehavior::InvalidViewChange); + }; + + // round moves backwards. + check_update(NeighborPacket { + round: Round(9), + set_id: SetId(10), + commit_finalized_height: 10, + }); + // commit finalized height moves backwards. + check_update(NeighborPacket { + round: Round(10), + set_id: SetId(10), + commit_finalized_height: 9, + }); + // set ID moves backwards. + check_update(NeighborPacket { + round: Round(10), + set_id: SetId(9), + commit_finalized_height: 10, + }); + } + + #[test] + fn messages_not_expired_immediately() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + let set_id = 1; + + val.note_set(SetId(set_id), Vec::new(), |_, _| {}); + + for round_num in 1u64..10 { + val.note_round(Round(round_num), |_, _| {}); + } + + { + let mut is_expired = val.message_expired(); + let last_kept_round = 10u64 - KEEP_RECENT_ROUNDS as u64 - 1; + + // messages from old rounds are expired. + for round_num in 1u64..last_kept_round { + let topic = crate::communication::round_topic::(round_num, 1); + assert!(is_expired(topic, &[1, 2, 3])); + } + + // messages from not-too-old rounds are not expired. + for round_num in last_kept_round..10 { + let topic = crate::communication::round_topic::(round_num, 1); + assert!(!is_expired(topic, &[1, 2, 3])); + } + } + } + + #[test] + fn message_from_unknown_authority_discarded() { + assert!(cost::UNKNOWN_VOTER != cost::BAD_SIGNATURE); + + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + let set_id = 1; + let auth = AuthorityId::from_slice(&[1u8; 32]); + let peer = PeerId::random(); + + val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); + val.note_round(Round(1), |_, _| {}); + + let inner = val.inner.read(); + let unknown_voter = inner.validate_round_message( + &peer, + &VoteMessage { + round: Round(1), + set_id: SetId(set_id), + message: SignedMessage:: { + message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { + target_hash: Default::default(), + target_number: 10, + }), + signature: Default::default(), + id: AuthorityId::from_slice(&[2u8; 32]), + }, + }, + ); + + let bad_sig = inner.validate_round_message( + &peer, + &VoteMessage { + round: Round(1), + set_id: SetId(set_id), + message: SignedMessage:: { + message: finality_grandpa::Message::Prevote(finality_grandpa::Prevote { + target_hash: Default::default(), + target_number: 10, + }), + signature: Default::default(), + id: auth.clone(), + }, + }, + ); + + assert_eq!(unknown_voter, Action::Discard(cost::UNKNOWN_VOTER)); + assert_eq!(bad_sig, Action::Discard(cost::BAD_SIGNATURE)); + } + + #[test] + fn unsolicited_catch_up_messages_discarded() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + let set_id = 1; + let auth = AuthorityId::from_slice(&[1u8; 32]); + let peer = PeerId::random(); + + val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); + val.note_round(Round(1), |_, _| {}); + + let validate_catch_up = || { + let mut inner = val.inner.write(); + inner.validate_catch_up_message( + &peer, + &FullCatchUpMessage { + set_id: SetId(set_id), + message: finality_grandpa::CatchUp { + round_number: 10, + prevotes: Default::default(), + precommits: Default::default(), + base_hash: Default::default(), + base_number: Default::default(), + }, + }, + ) + }; + + // the catch up is discarded because we have no pending request + assert_eq!( + validate_catch_up(), + Action::Discard(cost::OUT_OF_SCOPE_MESSAGE) + ); + + let noted = val.inner.write().note_catch_up_request( + &peer, + &CatchUpRequestMessage { + set_id: SetId(set_id), + round: Round(10), + }, + ); + + assert!(noted.0); + + // catch up is allowed because we have requested it, but it's rejected + // because it's malformed (empty prevotes and precommits) + assert_eq!( + validate_catch_up(), + Action::Discard(cost::MALFORMED_CATCH_UP) + ); + } + + #[test] + fn unanswerable_catch_up_requests_discarded() { + // create voter set state with round 2 completed + let set_state: SharedVoterSetState = { + let mut completed_rounds = voter_set_state().read().completed_rounds(); + + completed_rounds.push(environment::CompletedRound { + number: 2, + state: finality_grandpa::round::State::genesis(Default::default()), + base: Default::default(), + votes: Default::default(), + }); + + let mut current_rounds = environment::CurrentRounds::new(); + current_rounds.insert(3, environment::HasVoted::No); + + let set_state = environment::VoterSetState::::Live { + completed_rounds, + current_rounds, + }; + + set_state.into() + }; + + let (val, _) = GossipValidator::::new(config(), set_state.clone(), None); + + let set_id = 1; + let auth = AuthorityId::from_slice(&[1u8; 32]); + let peer = PeerId::random(); + + val.note_set(SetId(set_id), vec![auth.clone()], |_, _| {}); + val.note_round(Round(3), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded + let mut inner = val.inner.write(); + inner.peers.new_peer(peer.clone(), ObservedRole::Authority); + + let res = inner.handle_catch_up_request( + &peer, + CatchUpRequestMessage { + set_id: SetId(set_id), + round: Round(10), + }, + &set_state, + ); + + // we're at round 3, a catch up request for round 10 is out of scope + assert!(res.0.is_none()); + assert_eq!(res.1, Action::Discard(cost::OUT_OF_SCOPE_MESSAGE)); + + let res = inner.handle_catch_up_request( + &peer, + CatchUpRequestMessage { + set_id: SetId(set_id), + round: Round(2), + }, + &set_state, + ); + + // a catch up request for round 2 should be answered successfully + match res.0.unwrap() { + GossipMessage::CatchUp(catch_up) => { + assert_eq!(catch_up.set_id, SetId(set_id)); + assert_eq!(catch_up.message.round_number, 2); + + assert_eq!(res.1, Action::Discard(cost::CATCH_UP_REPLY)); + } + _ => panic!("expected catch up message"), + }; + } + + #[test] + fn detects_honest_out_of_scope_catch_requests() { + let set_state = voter_set_state(); + let (val, _) = GossipValidator::::new(config(), set_state.clone(), None); + + // the validator starts at set id 2 + val.note_set(SetId(2), Vec::new(), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded + let peer = PeerId::random(); + val.inner + .write() + .peers + .new_peer(peer.clone(), ObservedRole::Authority); + + let send_request = |set_id, round| { + let mut inner = val.inner.write(); + inner.handle_catch_up_request( + &peer, + CatchUpRequestMessage { + set_id: SetId(set_id), + round: Round(round), + }, + &set_state, + ) + }; + + let assert_res = |res: (Option<_>, Action<_>), honest| { + assert!(res.0.is_none()); + assert_eq!( + res.1, + if honest { + Action::Discard(cost::HONEST_OUT_OF_SCOPE_CATCH_UP) + } else { + Action::Discard(Misbehavior::OutOfScopeMessage.cost()) + }, + ); + }; + + // the validator is at set id 2 and round 0. requests for set id 1 + // should not be answered but they should be considered an honest + // mistake + assert_res(send_request(1, 1), true); + + assert_res(send_request(1, 10), true); + + // requests for set id 0 should be considered out of scope + assert_res(send_request(0, 1), false); + + assert_res(send_request(0, 10), false); + + // after the validator progresses further than CATCH_UP_THRESHOLD in set + // id 2, any request for set id 1 should no longer be considered an + // honest mistake. + val.note_round(Round(3), |_, _| {}); + + assert_res(send_request(1, 1), false); + + assert_res(send_request(1, 2), false); + } + + #[test] + fn issues_catch_up_request_on_neighbor_packet_import() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded. + let peer = PeerId::random(); + val.inner + .write() + .peers + .new_peer(peer.clone(), ObservedRole::Authority); + + let import_neighbor_message = |set_id, round| { + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 42, + }, + ); + + catch_up_request + }; + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request for the previous round. + match import_neighbor_message(1, 42) { + Some(GossipMessage::CatchUpRequest(request)) => { + assert_eq!(request.set_id, SetId(1)); + assert_eq!(request.round, Round(41)); + } + _ => panic!("expected catch up message"), + } + + // we note that we're at round 41. + val.note_round(Round(41), |_, _| {}); + + // if we import a neighbor message within CATCH_UP_THRESHOLD then we + // won't request a catch up. + match import_neighbor_message(1, 42) { + None => {} + _ => panic!("expected no catch up message"), + } + + // or if the peer is on a lower round. + match import_neighbor_message(1, 40) { + None => {} + _ => panic!("expected no catch up message"), + } + + // we also don't request a catch up if the peer is in a different set. + match import_neighbor_message(2, 42) { + None => {} + _ => panic!("expected no catch up message"), + } + } + + #[test] + fn doesnt_send_catch_up_requests_when_disabled() { + // we create a gossip validator with catch up requests disabled. + let config = { + let mut c = config(); + + // if the observer protocol is enabled and we are not an authority, + // then we don't issue any catch-up requests. + c.is_authority = false; + c.observer_enabled = true; + + c + }; + + let (val, _) = GossipValidator::::new(config, voter_set_state(), None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peer making the request to the validator, + // otherwise it is discarded. + let peer = PeerId::random(); + val.inner + .write() + .peers + .new_peer(peer.clone(), ObservedRole::Authority); + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request but since they're disabled + // we should get `None`. + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { + round: Round(42), + set_id: SetId(1), + commit_finalized_height: 50, + }, + ); + + match catch_up_request { + None => {} + _ => panic!("expected no catch up message"), + } + } + + #[test] + fn doesnt_send_catch_up_requests_to_non_authorities_when_observer_enabled() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peers making the requests to the validator, + // otherwise it is discarded. + let peer_authority = PeerId::random(); + let peer_full = PeerId::random(); + + val.inner + .write() + .peers + .new_peer(peer_authority.clone(), ObservedRole::Authority); + val.inner + .write() + .peers + .new_peer(peer_full.clone(), ObservedRole::Full); + + let import_neighbor_message = |peer| { + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer, + NeighborPacket { + round: Round(42), + set_id: SetId(1), + commit_finalized_height: 50, + }, + ); + + catch_up_request + }; + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request but since the node is not an + // authority we should get `None`. + if import_neighbor_message(peer_full).is_some() { + panic!("expected no catch up message"); + } + + // importing the same neighbor message from a peer who is an authority + // should lead to a catch up request. + match import_neighbor_message(peer_authority) { + Some(GossipMessage::CatchUpRequest(request)) => { + assert_eq!(request.set_id, SetId(1)); + assert_eq!(request.round, Round(41)); + } + _ => panic!("expected catch up message"), + } + } + + #[test] + fn sends_catch_up_requests_to_non_authorities_when_observer_disabled() { + let config = { + let mut c = config(); + + // if the observer protocol is disable any full-node should be able + // to answer catch-up requests. + c.observer_enabled = false; + + c + }; + + let (val, _) = GossipValidator::::new(config, voter_set_state(), None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add the peer making the requests to the validator, otherwise it is + // discarded. + let peer_full = PeerId::random(); + val.inner + .write() + .peers + .new_peer(peer_full.clone(), ObservedRole::Full); + + let (_, _, catch_up_request, _) = val.inner.write().import_neighbor_message( + &peer_full, + NeighborPacket { + round: Round(42), + set_id: SetId(1), + commit_finalized_height: 50, + }, + ); + + // importing a neighbor message from a peer in the same set in a later + // round should lead to a catch up request, the node is not an + // authority, but since the observer protocol is disabled we should + // issue a catch-up request to it anyway. + match catch_up_request { + Some(GossipMessage::CatchUpRequest(request)) => { + assert_eq!(request.set_id, SetId(1)); + assert_eq!(request.round, Round(41)); + } + _ => panic!("expected catch up message"), + } + } + + #[test] + fn doesnt_expire_next_round_messages() { + // NOTE: this is a regression test + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + // the validator starts at set id 1. + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // we are at round 10 + val.note_round(Round(9), |_, _| {}); + val.note_round(Round(10), |_, _| {}); + + let mut is_expired = val.message_expired(); + + // we accept messages from rounds 9, 10 and 11 + // therefore neither of those should be considered expired + for round in &[9, 10, 11] { + assert!(!is_expired( + crate::communication::round_topic::(*round, 1), + &[], + )) + } + } + + #[test] + fn progressively_gossips_to_more_peers() { + let mut config = config(); + config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race + let round_duration = config.gossip_duration * ROUND_DURATION; + + let (val, _) = GossipValidator::::new(config, voter_set_state(), None); + + // the validator start at set id 0 + val.note_set(SetId(0), Vec::new(), |_, _| {}); + + // add 60 peers, 30 authorities and 30 full nodes + let mut authorities = Vec::new(); + authorities.resize_with(30, || PeerId::random()); + + let mut full_nodes = Vec::new(); + full_nodes.resize_with(30, || PeerId::random()); + + for i in 0..30 { + val.inner + .write() + .peers + .new_peer(authorities[i].clone(), ObservedRole::Authority); + val.inner + .write() + .peers + .new_peer(full_nodes[i].clone(), ObservedRole::Full); + } + + let test = |num_round, peers| { + // rewind n round durations + val.inner.write().round_start = Instant::now() - round_duration * num_round; + let mut message_allowed = val.message_allowed(); + + move || { + let mut allowed = 0; + for peer in peers { + if message_allowed( + peer, + MessageIntent::Broadcast, + &crate::communication::round_topic::(1, 0), + &[], + ) { + allowed += 1; + } + } + allowed + } + }; + + fn trial usize>(mut test: F) -> usize { + let mut results = Vec::new(); + let n = 1000; + + for _ in 0..n { + results.push(test()); + } + + let n = results.len(); + let sum: usize = results.iter().sum(); + + sum / n + } + + // on the first attempt we will only gossip to `sqrt(authorities)`, + // which should average out to 5 peers after a couple of trials + assert_eq!(trial(test(1, &authorities)), 5); + + // on the second (and subsequent attempts) we should gossip to all + // authorities we're connected to. + assert_eq!(trial(test(2, &authorities)), 30); + assert_eq!(trial(test(3, &authorities)), 30); + + // we should only gossip to non-authorities after the third attempt + assert_eq!(trial(test(1, &full_nodes)), 0); + assert_eq!(trial(test(2, &full_nodes)), 0); + + // and only to `sqrt(non-authorities)` + assert_eq!(trial(test(3, &full_nodes)), 5); + + // only on the fourth attempt should we gossip to all non-authorities + assert_eq!(trial(test(4, &full_nodes)), 30); + } + + #[test] + fn only_restricts_gossip_to_authorities_after_a_minimum_threshold() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + // the validator start at set id 0 + val.note_set(SetId(0), Vec::new(), |_, _| {}); + + let mut authorities = Vec::new(); + for _ in 0..5 { + let peer_id = PeerId::random(); + val.inner + .write() + .peers + .new_peer(peer_id.clone(), ObservedRole::Authority); + authorities.push(peer_id); + } + + let mut message_allowed = val.message_allowed(); + + // since we're only connected to 5 authorities, we should never restrict + // sending of gossip messages, and instead just allow them to all + // non-authorities on the first attempt. + for authority in &authorities { + assert!(message_allowed( + authority, + MessageIntent::Broadcast, + &crate::communication::round_topic::(1, 0), + &[], + )); + } + } + + #[test] + fn non_authorities_never_gossip_messages_on_first_round_duration() { + let mut config = config(); + config.gossip_duration = Duration::from_secs(300); // Set to high value to prevent test race + config.is_authority = false; + let round_duration = config.gossip_duration * ROUND_DURATION; + + let (val, _) = GossipValidator::::new(config, voter_set_state(), None); + + // the validator start at set id 0 + val.note_set(SetId(0), Vec::new(), |_, _| {}); + + let mut authorities = Vec::new(); + for _ in 0..100 { + let peer_id = PeerId::random(); + val.inner + .write() + .peers + .new_peer(peer_id.clone(), ObservedRole::Authority); + authorities.push(peer_id); + } + + { + let mut message_allowed = val.message_allowed(); + // since our node is not an authority we should **never** gossip any + // messages on the first attempt. + for authority in &authorities { + assert!(!message_allowed( + authority, + MessageIntent::Broadcast, + &crate::communication::round_topic::(1, 0), + &[], + )); + } + } + + { + val.inner.write().round_start = Instant::now() - round_duration * 4; + let mut message_allowed = val.message_allowed(); + // on the fourth round duration we should allow messages to authorities + // (on the second we would do `sqrt(authorities)`) + for authority in &authorities { + assert!(message_allowed( + authority, + MessageIntent::Broadcast, + &crate::communication::round_topic::(1, 0), + &[], + )); + } + } + } + + #[test] + fn only_gossip_commits_to_peers_on_same_set() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + // the validator start at set id 1 + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // add a new peer at set id 1 + let peer1 = PeerId::random(); + + val.inner + .write() + .peers + .new_peer(peer1.clone(), ObservedRole::Authority); + + val.inner + .write() + .peers + .update_peer_state( + &peer1, + NeighborPacket { + round: Round(1), + set_id: SetId(1), + commit_finalized_height: 1, + }, + ) + .unwrap(); + + // peer2 will default to set id 0 + let peer2 = PeerId::random(); + val.inner + .write() + .peers + .new_peer(peer2.clone(), ObservedRole::Authority); + + // create a commit for round 1 of set id 1 + // targeting a block at height 2 + let commit = { + let commit = finality_grandpa::CompactCommit { + target_hash: H256::random(), + target_number: 2, + precommits: Vec::new(), + auth_data: Vec::new(), + }; + + crate::communication::gossip::GossipMessage::::Commit( + crate::communication::gossip::FullCommitMessage { + round: Round(1), + set_id: SetId(1), + message: commit, + }, + ) + .encode() + }; + + // note the commit in the validator + val.note_commit_finalized(Round(1), SetId(1), 2, |_, _| {}); + + let mut message_allowed = val.message_allowed(); + + // the commit should be allowed to peer 1 + assert!(message_allowed( + &peer1, + MessageIntent::Broadcast, + &crate::communication::global_topic::(1), + &commit, + )); + + // but disallowed to peer 2 since the peer is on set id 0 + // the commit should be allowed to peer 1 + assert!(!message_allowed( + &peer2, + MessageIntent::Broadcast, + &crate::communication::global_topic::(1), + &commit, + )); + } + + #[test] + fn expire_commits_from_older_rounds() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + let commit = |round, set_id, target_number| { + let commit = finality_grandpa::CompactCommit { + target_hash: H256::random(), + target_number, + precommits: Vec::new(), + auth_data: Vec::new(), + }; + + crate::communication::gossip::GossipMessage::::Commit( + crate::communication::gossip::FullCommitMessage { + round: Round(round), + set_id: SetId(set_id), + message: commit, + }, + ) + .encode() + }; + + // note the beginning of a new set with id 1 + val.note_set(SetId(1), Vec::new(), |_, _| {}); + + // note a commit for round 1 in the validator + // finalizing a block at height 2 + val.note_commit_finalized(Round(1), SetId(1), 2, |_, _| {}); + + let mut message_expired = val.message_expired(); + + // a commit message for round 1 that finalizes the same height as we + // have observed previously should not be expired + assert!(!message_expired( + crate::communication::global_topic::(1), + &commit(1, 1, 2), + )); + + // it should be expired if it is for a lower block + assert!(message_expired( + crate::communication::global_topic::(1), + &commit(1, 1, 1), + )); + + // or the same block height but from the previous round + assert!(message_expired( + crate::communication::global_topic::(1), + &commit(0, 1, 2), + )); + } + + #[test] + fn allow_noting_different_authorities_for_same_set() { + let (val, _) = GossipValidator::::new(config(), voter_set_state(), None); + + let a1 = vec![AuthorityId::default()]; + val.note_set(SetId(1), a1.clone(), |_, _| {}); + + assert_eq!(val.inner().read().authorities, a1); + + let a2 = vec![AuthorityId::default(), AuthorityId::default()]; + val.note_set(SetId(1), a2.clone(), |_, _| {}); + + assert_eq!(val.inner().read().authorities, a2); + } } diff --git a/client/finality-grandpa/src/communication/mod.rs b/client/finality-grandpa/src/communication/mod.rs index 7daa121513..0ba2bc4cc0 100644 --- a/client/finality-grandpa/src/communication/mod.rs +++ b/client/finality-grandpa/src/communication/mod.rs @@ -27,36 +27,35 @@ //! In the future, there will be a fallback for allowing sending the same message //! under certain conditions that are used to un-stick the protocol. -use futures::{prelude::*, channel::mpsc}; +use futures::{channel::mpsc, prelude::*}; use log::{debug, trace}; use parking_lot::Mutex; use prometheus_endpoint::Registry; -use std::{pin::Pin, sync::Arc, task::{Context, Poll}}; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; -use finality_grandpa::Message::{Prevote, Precommit, PrimaryPropose}; +use finality_grandpa::Message::{Precommit, Prevote, PrimaryPropose}; use finality_grandpa::{voter, voter_set::VoterSet}; +use parity_scale_codec::{Decode, Encode}; use sc_network::{NetworkService, ReputationChange}; use sc_network_gossip::{GossipEngine, Network as GossipNetwork}; -use parity_scale_codec::{Encode, Decode}; +use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO}; use sp_core::Pair; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, NumberFor}; -use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use crate::environment::HasVoted; use crate::{ - CatchUp, Commit, CommunicationIn, CommunicationOutH, - CompactCommit, Error, Message, SignedMessage, + CatchUp, Commit, CommunicationIn, CommunicationOutH, CompactCommit, Error, Message, + SignedMessage, }; -use crate::environment::HasVoted; use gossip::{ - FullCatchUpMessage, - FullCommitMessage, - GossipMessage, - GossipValidator, - PeerReport, - VoteMessage, + FullCatchUpMessage, FullCommitMessage, GossipMessage, GossipValidator, PeerReport, VoteMessage, }; use sp_finality_grandpa::{ - AuthorityPair, AuthorityId, AuthoritySignature, SetId as SetIdNumber, RoundNumber, + AuthorityId, AuthorityPair, AuthoritySignature, RoundNumber, SetId as SetIdNumber, }; use sp_utils::mpsc::TracingUnboundedReceiver; @@ -71,36 +70,38 @@ pub const GRANDPA_PROTOCOL_NAME: &[u8] = b"/paritytech/grandpa/1"; // cost scalars for reporting peers. mod cost { - use sc_network::ReputationChange as Rep; - pub(super) const PAST_REJECTION: Rep = Rep::new(-50, "Grandpa: Past message"); - pub(super) const BAD_SIGNATURE: Rep = Rep::new(-100, "Grandpa: Bad signature"); - pub(super) const MALFORMED_CATCH_UP: Rep = Rep::new(-1000, "Grandpa: Malformed cath-up"); - pub(super) const MALFORMED_COMMIT: Rep = Rep::new(-1000, "Grandpa: Malformed commit"); - pub(super) const FUTURE_MESSAGE: Rep = Rep::new(-500, "Grandpa: Future message"); - pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "Grandpa: Unknown voter"); - - pub(super) const INVALID_VIEW_CHANGE: Rep = Rep::new(-500, "Grandpa: Invalid view change"); - pub(super) const PER_UNDECODABLE_BYTE: i32 = -5; - pub(super) const PER_SIGNATURE_CHECKED: i32 = -25; - pub(super) const PER_BLOCK_LOADED: i32 = -10; - pub(super) const INVALID_CATCH_UP: Rep = Rep::new(-5000, "Grandpa: Invalid catch-up"); - pub(super) const INVALID_COMMIT: Rep = Rep::new(-5000, "Grandpa: Invalid commit"); - pub(super) const OUT_OF_SCOPE_MESSAGE: Rep = Rep::new(-500, "Grandpa: Out-of-scope message"); - pub(super) const CATCH_UP_REQUEST_TIMEOUT: Rep = Rep::new(-200, "Grandpa: Catch-up request timeout"); - - // cost of answering a catch up request - pub(super) const CATCH_UP_REPLY: Rep = Rep::new(-200, "Grandpa: Catch-up reply"); - pub(super) const HONEST_OUT_OF_SCOPE_CATCH_UP: Rep = Rep::new(-200, "Grandpa: Out-of-scope catch-up"); + use sc_network::ReputationChange as Rep; + pub(super) const PAST_REJECTION: Rep = Rep::new(-50, "Grandpa: Past message"); + pub(super) const BAD_SIGNATURE: Rep = Rep::new(-100, "Grandpa: Bad signature"); + pub(super) const MALFORMED_CATCH_UP: Rep = Rep::new(-1000, "Grandpa: Malformed cath-up"); + pub(super) const MALFORMED_COMMIT: Rep = Rep::new(-1000, "Grandpa: Malformed commit"); + pub(super) const FUTURE_MESSAGE: Rep = Rep::new(-500, "Grandpa: Future message"); + pub(super) const UNKNOWN_VOTER: Rep = Rep::new(-150, "Grandpa: Unknown voter"); + + pub(super) const INVALID_VIEW_CHANGE: Rep = Rep::new(-500, "Grandpa: Invalid view change"); + pub(super) const PER_UNDECODABLE_BYTE: i32 = -5; + pub(super) const PER_SIGNATURE_CHECKED: i32 = -25; + pub(super) const PER_BLOCK_LOADED: i32 = -10; + pub(super) const INVALID_CATCH_UP: Rep = Rep::new(-5000, "Grandpa: Invalid catch-up"); + pub(super) const INVALID_COMMIT: Rep = Rep::new(-5000, "Grandpa: Invalid commit"); + pub(super) const OUT_OF_SCOPE_MESSAGE: Rep = Rep::new(-500, "Grandpa: Out-of-scope message"); + pub(super) const CATCH_UP_REQUEST_TIMEOUT: Rep = + Rep::new(-200, "Grandpa: Catch-up request timeout"); + + // cost of answering a catch up request + pub(super) const CATCH_UP_REPLY: Rep = Rep::new(-200, "Grandpa: Catch-up reply"); + pub(super) const HONEST_OUT_OF_SCOPE_CATCH_UP: Rep = + Rep::new(-200, "Grandpa: Out-of-scope catch-up"); } // benefit scalars for reporting peers. mod benefit { - use sc_network::ReputationChange as Rep; - pub(super) const NEIGHBOR_MESSAGE: Rep = Rep::new(100, "Grandpa: Neighbor message"); - pub(super) const ROUND_MESSAGE: Rep = Rep::new(100, "Grandpa: Round message"); - pub(super) const BASIC_VALIDATED_CATCH_UP: Rep = Rep::new(200, "Grandpa: Catch-up message"); - pub(super) const BASIC_VALIDATED_COMMIT: Rep = Rep::new(100, "Grandpa: Commit"); - pub(super) const PER_EQUIVOCATION: i32 = 10; + use sc_network::ReputationChange as Rep; + pub(super) const NEIGHBOR_MESSAGE: Rep = Rep::new(100, "Grandpa: Neighbor message"); + pub(super) const ROUND_MESSAGE: Rep = Rep::new(100, "Grandpa: Round message"); + pub(super) const BASIC_VALIDATED_CATCH_UP: Rep = Rep::new(200, "Grandpa: Catch-up message"); + pub(super) const BASIC_VALIDATED_COMMIT: Rep = Rep::new(100, "Grandpa: Commit"); + pub(super) const PER_EQUIVOCATION: i32 = 10; } /// If the voter set is larger than this value some telemetry events are not @@ -113,191 +114,187 @@ const TELEMETRY_VOTERS_LIMIT: usize = 10; /// Something that provides both the capabilities needed for the `gossip_network::Network` trait as /// well as the ability to set a fork sync request for a particular block. pub trait Network: GossipNetwork + Clone + Send + 'static { - /// Notifies the sync service to try and sync the given block from the given - /// peers. - /// - /// If the given vector of peers is empty then the underlying implementation - /// should make a best effort to fetch the block from any peers it is - /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ); } -impl Network for Arc> where - B: BlockT, - H: sc_network::ExHashT, +impl Network for Arc> +where + B: BlockT, + H: sc_network::ExHashT, { - fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - NetworkService::set_sync_fork_request(self, peers, hash, number) - } + fn set_sync_fork_request( + &self, + peers: Vec, + hash: B::Hash, + number: NumberFor, + ) { + NetworkService::set_sync_fork_request(self, peers, hash, number) + } } /// Create a unique topic for a round and set-id combo. pub(crate) fn round_topic(round: RoundNumber, set_id: SetIdNumber) -> B::Hash { - <::Hashing as HashT>::hash(format!("{}-{}", set_id, round).as_bytes()) + <::Hashing as HashT>::hash(format!("{}-{}", set_id, round).as_bytes()) } /// Create a unique topic for global messages on a set ID. pub(crate) fn global_topic(set_id: SetIdNumber) -> B::Hash { - <::Hashing as HashT>::hash(format!("{}-GLOBAL", set_id).as_bytes()) + <::Hashing as HashT>::hash(format!("{}-GLOBAL", set_id).as_bytes()) } /// Bridge between the underlying network service, gossiping consensus messages and Grandpa pub(crate) struct NetworkBridge> { - service: N, - gossip_engine: Arc>>, - validator: Arc>, - - /// Sender side of the neighbor packet channel. - /// - /// Packets sent into this channel are processed by the `NeighborPacketWorker` and passed on to - /// the underlying `GossipEngine`. - neighbor_sender: periodic::NeighborPacketSender, - - /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. - // - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children, - // thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. - neighbor_packet_worker: Arc>>, - - /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the - /// gossip engine. - // - // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children, - // thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given that it is - // just an `UnboundedReceiver`, one could also switch to a multi-producer-*multi*-consumer - // channel implementation. - gossip_validator_report_stream: Arc>>, + service: N, + gossip_engine: Arc>>, + validator: Arc>, + + /// Sender side of the neighbor packet channel. + /// + /// Packets sent into this channel are processed by the `NeighborPacketWorker` and passed on to + /// the underlying `GossipEngine`. + neighbor_sender: periodic::NeighborPacketSender, + + /// `NeighborPacketWorker` processing packets sent through the `NeighborPacketSender`. + // + // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children, + // thus one has to wrap `neighbor_packet_worker` with an `Arc` `Mutex`. + neighbor_packet_worker: Arc>>, + + /// Receiver side of the peer report stream populated by the gossip validator, forwarded to the + /// gossip engine. + // + // `NetworkBridge` is required to be cloneable, thus one needs to be able to clone its children, + // thus one has to wrap gossip_validator_report_stream with an `Arc` `Mutex`. Given that it is + // just an `UnboundedReceiver`, one could also switch to a multi-producer-*multi*-consumer + // channel implementation. + gossip_validator_report_stream: Arc>>, } impl> Unpin for NetworkBridge {} impl> NetworkBridge { - /// Create a new NetworkBridge to the given NetworkService. Returns the service - /// handle. - /// On creation it will register previous rounds' votes with the gossip - /// service taken from the VoterSetState. - pub(crate) fn new( - service: N, - config: crate::Config, - set_state: crate::environment::SharedVoterSetState, - prometheus_registry: Option<&Registry>, - ) -> Self { - let (validator, report_stream) = GossipValidator::new( - config, - set_state.clone(), - prometheus_registry, - ); - - let validator = Arc::new(validator); - let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( - service.clone(), - GRANDPA_ENGINE_ID, - GRANDPA_PROTOCOL_NAME, - validator.clone() - ))); - - { - // register all previous votes with the gossip service so that they're - // available to peers potentially stuck on a previous round. - let completed = set_state.read().completed_rounds(); - let (set_id, voters) = completed.set_info(); - validator.note_set(SetId(set_id), voters.to_vec(), |_, _| {}); - for round in completed.iter() { - let topic = round_topic::(round.number, set_id); - - // we need to note the round with the gossip validator otherwise - // messages will be ignored. - validator.note_round(Round(round.number), |_, _| {}); - - for signed in round.votes.iter() { - let message = gossip::GossipMessage::Vote( - gossip::VoteMessage:: { - message: signed.clone(), - round: Round(round.number), - set_id: SetId(set_id), - } - ); - - gossip_engine.lock().register_gossip_message( - topic, - message.encode(), - ); - } - - trace!(target: "afg", - "Registered {} messages for topic {:?} (round: {}, set_id: {})", - round.votes.len(), - topic, - round.number, - set_id, - ); - } - } - - let (neighbor_packet_worker, neighbor_packet_sender) = periodic::NeighborPacketWorker::new(); - - let bridge = NetworkBridge { - service, - gossip_engine, - validator, - neighbor_sender: neighbor_packet_sender, - neighbor_packet_worker: Arc::new(Mutex::new(neighbor_packet_worker)), - gossip_validator_report_stream: Arc::new(Mutex::new(report_stream)), - }; - - bridge - } - - /// Note the beginning of a new round to the `GossipValidator`. - pub(crate) fn note_round( - &self, - round: Round, - set_id: SetId, - voters: &VoterSet, - ) { - // is a no-op if currently in that set. - self.validator.note_set( - set_id, - voters.voters().iter().map(|(v, _)| v.clone()).collect(), - |to, neighbor| self.neighbor_sender.send(to, neighbor), - ); - - self.validator.note_round( - round, - |to, neighbor| self.neighbor_sender.send(to, neighbor), - ); - } - - /// Get a stream of signature-checked round messages from the network as well as a sink for round messages to the - /// network all within the current set. - pub(crate) fn round_communication( - &self, - round: Round, - set_id: SetId, - voters: Arc>, - local_key: Option, - has_voted: HasVoted, - ) -> ( - impl Stream> + Unpin, - OutgoingMessages, - ) { - self.note_round( - round, - set_id, - &*voters, - ); - - let locals = local_key.and_then(|pair| { - let id = pair.public(); - if voters.contains_key(&id) { - Some((pair, id)) - } else { - None - } - }); - - let topic = round_topic::(round.0, set_id.0); - let incoming = self.gossip_engine.lock().messages_for(topic) + /// Create a new NetworkBridge to the given NetworkService. Returns the service + /// handle. + /// On creation it will register previous rounds' votes with the gossip + /// service taken from the VoterSetState. + pub(crate) fn new( + service: N, + config: crate::Config, + set_state: crate::environment::SharedVoterSetState, + prometheus_registry: Option<&Registry>, + ) -> Self { + let (validator, report_stream) = + GossipValidator::new(config, set_state.clone(), prometheus_registry); + + let validator = Arc::new(validator); + let gossip_engine = Arc::new(Mutex::new(GossipEngine::new( + service.clone(), + GRANDPA_ENGINE_ID, + GRANDPA_PROTOCOL_NAME, + validator.clone(), + ))); + + { + // register all previous votes with the gossip service so that they're + // available to peers potentially stuck on a previous round. + let completed = set_state.read().completed_rounds(); + let (set_id, voters) = completed.set_info(); + validator.note_set(SetId(set_id), voters.to_vec(), |_, _| {}); + for round in completed.iter() { + let topic = round_topic::(round.number, set_id); + + // we need to note the round with the gossip validator otherwise + // messages will be ignored. + validator.note_round(Round(round.number), |_, _| {}); + + for signed in round.votes.iter() { + let message = gossip::GossipMessage::Vote(gossip::VoteMessage:: { + message: signed.clone(), + round: Round(round.number), + set_id: SetId(set_id), + }); + + gossip_engine + .lock() + .register_gossip_message(topic, message.encode()); + } + + trace!(target: "afg", + "Registered {} messages for topic {:?} (round: {}, set_id: {})", + round.votes.len(), + topic, + round.number, + set_id, + ); + } + } + + let (neighbor_packet_worker, neighbor_packet_sender) = + periodic::NeighborPacketWorker::new(); + + let bridge = NetworkBridge { + service, + gossip_engine, + validator, + neighbor_sender: neighbor_packet_sender, + neighbor_packet_worker: Arc::new(Mutex::new(neighbor_packet_worker)), + gossip_validator_report_stream: Arc::new(Mutex::new(report_stream)), + }; + + bridge + } + + /// Note the beginning of a new round to the `GossipValidator`. + pub(crate) fn note_round(&self, round: Round, set_id: SetId, voters: &VoterSet) { + // is a no-op if currently in that set. + self.validator.note_set( + set_id, + voters.voters().iter().map(|(v, _)| v.clone()).collect(), + |to, neighbor| self.neighbor_sender.send(to, neighbor), + ); + + self.validator.note_round(round, |to, neighbor| { + self.neighbor_sender.send(to, neighbor) + }); + } + + /// Get a stream of signature-checked round messages from the network as well as a sink for round messages to the + /// network all within the current set. + pub(crate) fn round_communication( + &self, + round: Round, + set_id: SetId, + voters: Arc>, + local_key: Option, + has_voted: HasVoted, + ) -> ( + impl Stream> + Unpin, + OutgoingMessages, + ) { + self.note_round(round, set_id, &*voters); + + let locals = local_key.and_then(|pair| { + let id = pair.public(); + if voters.contains_key(&id) { + Some((pair, id)) + } else { + None + } + }); + + let topic = round_topic::(round.0, set_id.0); + let incoming = self.gossip_engine.lock().messages_for(topic) .filter_map(move |notification| { let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); @@ -348,233 +345,232 @@ impl> NetworkBridge { } }); - let (tx, out_rx) = mpsc::channel(0); - let outgoing = OutgoingMessages:: { - round: round.0, - set_id: set_id.0, - network: self.gossip_engine.clone(), - locals, - sender: tx, - has_voted, - }; - - // Combine incoming votes from external GRANDPA nodes with outgoing - // votes from our own GRANDPA voter to have a single - // vote-import-pipeline. - let incoming = stream::select(incoming, out_rx); - - (incoming, outgoing) - } - - /// Set up the global communication streams. - pub(crate) fn global_communication( - &self, - set_id: SetId, - voters: Arc>, - is_voter: bool, - ) -> ( - impl Stream>, - impl Sink, Error = Error> + Unpin, - ) { - self.validator.note_set( - set_id, - voters.voters().iter().map(|(v, _)| v.clone()).collect(), - |to, neighbor| self.neighbor_sender.send(to, neighbor), - ); - - let topic = global_topic::(set_id.0); - let incoming = incoming_global( - self.gossip_engine.clone(), - topic, - voters, - self.validator.clone(), - self.neighbor_sender.clone(), - ); - - let outgoing = CommitsOut::::new( - self.gossip_engine.clone(), - set_id.0, - is_voter, - self.validator.clone(), - self.neighbor_sender.clone(), - ); - - let outgoing = outgoing.with(|out| { - let voter::CommunicationOut::Commit(round, commit) = out; - future::ok((round, commit)) - }); - - (incoming, outgoing) - } - - /// Notifies the sync service to try and sync the given block from the given - /// peers. - /// - /// If the given vector of peers is empty then the underlying implementation - /// should make a best effort to fetch the block from any peers it is - /// connected to (NOTE: this assumption will change in the future #3629). - pub(crate) fn set_sync_fork_request( - &self, - peers: Vec, - hash: B::Hash, - number: NumberFor - ) { - Network::set_sync_fork_request(&self.service, peers, hash, number) - } + let (tx, out_rx) = mpsc::channel(0); + let outgoing = OutgoingMessages:: { + round: round.0, + set_id: set_id.0, + network: self.gossip_engine.clone(), + locals, + sender: tx, + has_voted, + }; + + // Combine incoming votes from external GRANDPA nodes with outgoing + // votes from our own GRANDPA voter to have a single + // vote-import-pipeline. + let incoming = stream::select(incoming, out_rx); + + (incoming, outgoing) + } + + /// Set up the global communication streams. + pub(crate) fn global_communication( + &self, + set_id: SetId, + voters: Arc>, + is_voter: bool, + ) -> ( + impl Stream>, + impl Sink, Error = Error> + Unpin, + ) { + self.validator.note_set( + set_id, + voters.voters().iter().map(|(v, _)| v.clone()).collect(), + |to, neighbor| self.neighbor_sender.send(to, neighbor), + ); + + let topic = global_topic::(set_id.0); + let incoming = incoming_global( + self.gossip_engine.clone(), + topic, + voters, + self.validator.clone(), + self.neighbor_sender.clone(), + ); + + let outgoing = CommitsOut::::new( + self.gossip_engine.clone(), + set_id.0, + is_voter, + self.validator.clone(), + self.neighbor_sender.clone(), + ); + + let outgoing = outgoing.with(|out| { + let voter::CommunicationOut::Commit(round, commit) = out; + future::ok((round, commit)) + }); + + (incoming, outgoing) + } + + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + pub(crate) fn set_sync_fork_request( + &self, + peers: Vec, + hash: B::Hash, + number: NumberFor, + ) { + Network::set_sync_fork_request(&self.service, peers, hash, number) + } } impl> Future for NetworkBridge { - type Output = Result<(), Error>; - - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - loop { - match self.neighbor_packet_worker.lock().poll_next_unpin(cx) { - Poll::Ready(Some((to, packet))) => { - self.gossip_engine.lock().send_message(to, packet.encode()); - }, - Poll::Ready(None) => return Poll::Ready( - Err(Error::Network("Neighbor packet worker stream closed.".into())) - ), - Poll::Pending => break, - } - } - - loop { - match self.gossip_validator_report_stream.lock().poll_next_unpin(cx) { - Poll::Ready(Some(PeerReport { who, cost_benefit })) => { - self.gossip_engine.lock().report(who, cost_benefit); - }, - Poll::Ready(None) => return Poll::Ready( - Err(Error::Network("Gossip validator report stream closed.".into())) - ), - Poll::Pending => break, - } - } - - match self.gossip_engine.lock().poll_unpin(cx) { - Poll::Ready(()) => return Poll::Ready( - Err(Error::Network("Gossip engine future finished.".into())) - ), - Poll::Pending => {}, - } - - Poll::Pending - } + type Output = Result<(), Error>; + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + loop { + match self.neighbor_packet_worker.lock().poll_next_unpin(cx) { + Poll::Ready(Some((to, packet))) => { + self.gossip_engine.lock().send_message(to, packet.encode()); + } + Poll::Ready(None) => { + return Poll::Ready(Err(Error::Network( + "Neighbor packet worker stream closed.".into(), + ))) + } + Poll::Pending => break, + } + } + + loop { + match self + .gossip_validator_report_stream + .lock() + .poll_next_unpin(cx) + { + Poll::Ready(Some(PeerReport { who, cost_benefit })) => { + self.gossip_engine.lock().report(who, cost_benefit); + } + Poll::Ready(None) => { + return Poll::Ready(Err(Error::Network( + "Gossip validator report stream closed.".into(), + ))) + } + Poll::Pending => break, + } + } + + match self.gossip_engine.lock().poll_unpin(cx) { + Poll::Ready(()) => { + return Poll::Ready(Err(Error::Network("Gossip engine future finished.".into()))) + } + Poll::Pending => {} + } + + Poll::Pending + } } fn incoming_global( - gossip_engine: Arc>>, - topic: B::Hash, - voters: Arc>, - gossip_validator: Arc>, - neighbor_sender: periodic::NeighborPacketSender, + gossip_engine: Arc>>, + topic: B::Hash, + voters: Arc>, + gossip_validator: Arc>, + neighbor_sender: periodic::NeighborPacketSender, ) -> impl Stream> { - let process_commit = move | - msg: FullCommitMessage, - mut notification: sc_network_gossip::TopicNotification, - gossip_engine: &Arc>>, - gossip_validator: &Arc>, - voters: &VoterSet, - | { - if voters.len() <= TELEMETRY_VOTERS_LIMIT { - let precommits_signed_by: Vec = - msg.message.auth_data.iter().map(move |(_, a)| { - format!("{}", a) - }).collect(); - - telemetry!(CONSENSUS_INFO; "afg.received_commit"; - "contains_precommits_signed_by" => ?precommits_signed_by, - "target_number" => ?msg.message.target_number.clone(), - "target_hash" => ?msg.message.target_hash.clone(), - ); - } - - if let Err(cost) = check_compact_commit::( - &msg.message, - voters, - msg.round, - msg.set_id, - ) { - if let Some(who) = notification.sender { - gossip_engine.lock().report(who, cost); - } - - return None; - } - - let round = msg.round; - let set_id = msg.set_id; - let commit = msg.message; - let finalized_number = commit.target_number; - let gossip_validator = gossip_validator.clone(); - let gossip_engine = gossip_engine.clone(); - let neighbor_sender = neighbor_sender.clone(); - let cb = move |outcome| match outcome { - voter::CommitProcessingOutcome::Good(_) => { - // if it checks out, gossip it. not accounting for - // any discrepancy between the actual ghost and the claimed - // finalized number. - gossip_validator.note_commit_finalized( - round, - set_id, - finalized_number, - |to, neighbor| neighbor_sender.send(to, neighbor), - ); - - gossip_engine.lock().gossip_message(topic, notification.message.clone(), false); - } - voter::CommitProcessingOutcome::Bad(_) => { - // report peer and do not gossip. - if let Some(who) = notification.sender.take() { - gossip_engine.lock().report(who, cost::INVALID_COMMIT); - } - } - }; - - let cb = voter::Callback::Work(Box::new(cb)); - - Some(voter::CommunicationIn::Commit(round.0, commit, cb)) - }; - - let process_catch_up = move | - msg: FullCatchUpMessage, - mut notification: sc_network_gossip::TopicNotification, - gossip_engine: &Arc>>, - gossip_validator: &Arc>, - voters: &VoterSet, - | { - let gossip_validator = gossip_validator.clone(); - let gossip_engine = gossip_engine.clone(); - - if let Err(cost) = check_catch_up::( - &msg.message, - voters, - msg.set_id, - ) { - if let Some(who) = notification.sender { - gossip_engine.lock().report(who, cost); - } - - return None; - } - - let cb = move |outcome| { - if let voter::CatchUpProcessingOutcome::Bad(_) = outcome { - // report peer - if let Some(who) = notification.sender.take() { - gossip_engine.lock().report(who, cost::INVALID_CATCH_UP); - } - } - - gossip_validator.note_catch_up_message_processed(); - }; - - let cb = voter::Callback::Work(Box::new(cb)); - - Some(voter::CommunicationIn::CatchUp(msg.message, cb)) - }; - - gossip_engine.clone().lock().messages_for(topic) + let process_commit = move |msg: FullCommitMessage, + mut notification: sc_network_gossip::TopicNotification, + gossip_engine: &Arc>>, + gossip_validator: &Arc>, + voters: &VoterSet| { + if voters.len() <= TELEMETRY_VOTERS_LIMIT { + let precommits_signed_by: Vec = msg + .message + .auth_data + .iter() + .map(move |(_, a)| format!("{}", a)) + .collect(); + + telemetry!(CONSENSUS_INFO; "afg.received_commit"; + "contains_precommits_signed_by" => ?precommits_signed_by, + "target_number" => ?msg.message.target_number.clone(), + "target_hash" => ?msg.message.target_hash.clone(), + ); + } + + if let Err(cost) = check_compact_commit::(&msg.message, voters, msg.round, msg.set_id) { + if let Some(who) = notification.sender { + gossip_engine.lock().report(who, cost); + } + + return None; + } + + let round = msg.round; + let set_id = msg.set_id; + let commit = msg.message; + let finalized_number = commit.target_number; + let gossip_validator = gossip_validator.clone(); + let gossip_engine = gossip_engine.clone(); + let neighbor_sender = neighbor_sender.clone(); + let cb = move |outcome| match outcome { + voter::CommitProcessingOutcome::Good(_) => { + // if it checks out, gossip it. not accounting for + // any discrepancy between the actual ghost and the claimed + // finalized number. + gossip_validator.note_commit_finalized( + round, + set_id, + finalized_number, + |to, neighbor| neighbor_sender.send(to, neighbor), + ); + + gossip_engine + .lock() + .gossip_message(topic, notification.message.clone(), false); + } + voter::CommitProcessingOutcome::Bad(_) => { + // report peer and do not gossip. + if let Some(who) = notification.sender.take() { + gossip_engine.lock().report(who, cost::INVALID_COMMIT); + } + } + }; + + let cb = voter::Callback::Work(Box::new(cb)); + + Some(voter::CommunicationIn::Commit(round.0, commit, cb)) + }; + + let process_catch_up = move |msg: FullCatchUpMessage, + mut notification: sc_network_gossip::TopicNotification, + gossip_engine: &Arc>>, + gossip_validator: &Arc>, + voters: &VoterSet| { + let gossip_validator = gossip_validator.clone(); + let gossip_engine = gossip_engine.clone(); + + if let Err(cost) = check_catch_up::(&msg.message, voters, msg.set_id) { + if let Some(who) = notification.sender { + gossip_engine.lock().report(who, cost); + } + + return None; + } + + let cb = move |outcome| { + if let voter::CatchUpProcessingOutcome::Bad(_) = outcome { + // report peer + if let Some(who) = notification.sender.take() { + gossip_engine.lock().report(who, cost::INVALID_CATCH_UP); + } + } + + gossip_validator.note_catch_up_message_processed(); + }; + + let cb = voter::Callback::Work(Box::new(cb)); + + Some(voter::CommunicationIn::CatchUp(msg.message, cb)) + }; + + gossip_engine.clone().lock().messages_for(topic) .filter_map(|notification| { // this could be optimized by decoding piecewise. let decoded = GossipMessage::::decode(&mut ¬ification.message[..]); @@ -598,40 +594,40 @@ fn incoming_global( } impl> Clone for NetworkBridge { - fn clone(&self) -> Self { - NetworkBridge { - service: self.service.clone(), - gossip_engine: self.gossip_engine.clone(), - validator: Arc::clone(&self.validator), - neighbor_sender: self.neighbor_sender.clone(), - neighbor_packet_worker: self.neighbor_packet_worker.clone(), - gossip_validator_report_stream: self.gossip_validator_report_stream.clone(), - } - } + fn clone(&self) -> Self { + NetworkBridge { + service: self.service.clone(), + gossip_engine: self.gossip_engine.clone(), + validator: Arc::clone(&self.validator), + neighbor_sender: self.neighbor_sender.clone(), + neighbor_packet_worker: self.neighbor_packet_worker.clone(), + gossip_validator_report_stream: self.gossip_validator_report_stream.clone(), + } + } } /// Encode round message localized to a given round and set id. pub(crate) fn localized_payload( - round: RoundNumber, - set_id: SetIdNumber, - message: &E, + round: RoundNumber, + set_id: SetIdNumber, + message: &E, ) -> Vec { - let mut buf = Vec::new(); - localized_payload_with_buffer(round, set_id, message, &mut buf); - buf + let mut buf = Vec::new(); + localized_payload_with_buffer(round, set_id, message, &mut buf); + buf } /// Encode round message localized to a given round and set id using the given /// buffer. The given buffer will be cleared and the resulting encoded payload /// will always be written to the start of the buffer. pub(crate) fn localized_payload_with_buffer( - round: RoundNumber, - set_id: SetIdNumber, - message: &E, - buf: &mut Vec, + round: RoundNumber, + set_id: SetIdNumber, + message: &E, + buf: &mut Vec, ) { - buf.clear(); - (message, round, set_id).encode_to(buf) + buf.clear(); + (message, round, set_id).encode_to(buf) } /// Type-safe wrapper around a round number. @@ -645,20 +641,13 @@ pub struct SetId(pub SetIdNumber); /// Check a message signature by encoding the message as a localized payload and /// verifying the provided signature using the expected authority id. pub(crate) fn check_message_sig( - message: &Message, - id: &AuthorityId, - signature: &AuthoritySignature, - round: RoundNumber, - set_id: SetIdNumber, + message: &Message, + id: &AuthorityId, + signature: &AuthoritySignature, + round: RoundNumber, + set_id: SetIdNumber, ) -> Result<(), ()> { - check_message_sig_with_buffer::( - message, - id, - signature, - round, - set_id, - &mut Vec::new(), - ) + check_message_sig_with_buffer::(message, id, signature, round, set_id, &mut Vec::new()) } /// Check a message signature by encoding the message as a localized payload and @@ -666,22 +655,22 @@ pub(crate) fn check_message_sig( /// The encoding necessary to verify the signature will be done using the given /// buffer, the original content of the buffer will be cleared. pub(crate) fn check_message_sig_with_buffer( - message: &Message, - id: &AuthorityId, - signature: &AuthoritySignature, - round: RoundNumber, - set_id: SetIdNumber, - buf: &mut Vec, + message: &Message, + id: &AuthorityId, + signature: &AuthoritySignature, + round: RoundNumber, + set_id: SetIdNumber, + buf: &mut Vec, ) -> Result<(), ()> { - let as_public = id.clone(); - localized_payload_with_buffer(round, set_id, message, buf); - - if AuthorityPair::verify(signature, buf, &as_public) { - Ok(()) - } else { - debug!(target: "afg", "Bad signature on message from {:?}", id); - Err(()) - } + let as_public = id.clone(); + localized_payload_with_buffer(round, set_id, message, buf); + + if AuthorityPair::verify(signature, buf, &as_public) { + Ok(()) + } else { + debug!(target: "afg", "Bad signature on message from {:?}", id); + Err(()) + } } /// A sink for outgoing messages to the network. Any messages that are sent will @@ -692,362 +681,378 @@ pub(crate) fn check_message_sig_with_buffer( /// `ed25519` and `BLS` signatures (which we might use in the future), care must /// be taken when switching to different key types. pub(crate) struct OutgoingMessages { - round: RoundNumber, - set_id: SetIdNumber, - locals: Option<(AuthorityPair, AuthorityId)>, - sender: mpsc::Sender>, - network: Arc>>, - has_voted: HasVoted, + round: RoundNumber, + set_id: SetIdNumber, + locals: Option<(AuthorityPair, AuthorityId)>, + sender: mpsc::Sender>, + network: Arc>>, + has_voted: HasVoted, } impl Unpin for OutgoingMessages {} -impl Sink> for OutgoingMessages -{ - type Error = Error; - - fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Sink::poll_ready(Pin::new(&mut self.sender), cx) - .map(|elem| { elem.map_err(|e| { - Error::Network(format!("Failed to poll_ready channel sender: {:?}", e)) - })}) - } - - fn start_send(mut self: Pin<&mut Self>, mut msg: Message) -> Result<(), Self::Error> { - // if we've voted on this round previously under the same key, send that vote instead - match &mut msg { - finality_grandpa::Message::PrimaryPropose(ref mut vote) => - if let Some(propose) = self.has_voted.propose() { - *vote = propose.clone(); - }, - finality_grandpa::Message::Prevote(ref mut vote) => - if let Some(prevote) = self.has_voted.prevote() { - *vote = prevote.clone(); - }, - finality_grandpa::Message::Precommit(ref mut vote) => - if let Some(precommit) = self.has_voted.precommit() { - *vote = precommit.clone(); - }, - } - - // when locals exist, sign messages on import - if let Some((ref pair, ref local_id)) = self.locals { - let encoded = localized_payload(self.round, self.set_id, &msg); - let signature = pair.sign(&encoded[..]); - - let target_hash = msg.target().0.clone(); - let signed = SignedMessage:: { - message: msg, - signature, - id: local_id.clone(), - }; - - let message = GossipMessage::Vote(VoteMessage:: { - message: signed.clone(), - round: Round(self.round), - set_id: SetId(self.set_id), - }); - - debug!( - target: "afg", - "Announcing block {} to peers which we voted on in round {} in set {}", - target_hash, - self.round, - self.set_id, - ); - - telemetry!( - CONSENSUS_DEBUG; "afg.announcing_blocks_to_voted_peers"; - "block" => ?target_hash, "round" => ?self.round, "set_id" => ?self.set_id, - ); - - // announce the block we voted on to our peers. - self.network.lock().announce(target_hash, Vec::new()); - - // propagate the message to peers - let topic = round_topic::(self.round, self.set_id); - self.network.lock().gossip_message(topic, message.encode(), false); - - // forward the message to the inner sender. - return self.sender.start_send(signed).map_err(|e| { - Error::Network(format!("Failed to start_send on channel sender: {:?}", e)) - }); - }; - - Ok(()) - } - - fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Sink::poll_close(Pin::new(&mut self.sender), cx) - .map(|elem| { elem.map_err(|e| { - Error::Network(format!("Failed to poll_close channel sender: {:?}", e)) - })}) - } +impl Sink> for OutgoingMessages { + type Error = Error; + + fn poll_ready(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + Sink::poll_ready(Pin::new(&mut self.sender), cx).map(|elem| { + elem.map_err(|e| { + Error::Network(format!("Failed to poll_ready channel sender: {:?}", e)) + }) + }) + } + + fn start_send(mut self: Pin<&mut Self>, mut msg: Message) -> Result<(), Self::Error> { + // if we've voted on this round previously under the same key, send that vote instead + match &mut msg { + finality_grandpa::Message::PrimaryPropose(ref mut vote) => { + if let Some(propose) = self.has_voted.propose() { + *vote = propose.clone(); + } + } + finality_grandpa::Message::Prevote(ref mut vote) => { + if let Some(prevote) = self.has_voted.prevote() { + *vote = prevote.clone(); + } + } + finality_grandpa::Message::Precommit(ref mut vote) => { + if let Some(precommit) = self.has_voted.precommit() { + *vote = precommit.clone(); + } + } + } + + // when locals exist, sign messages on import + if let Some((ref pair, ref local_id)) = self.locals { + let encoded = localized_payload(self.round, self.set_id, &msg); + let signature = pair.sign(&encoded[..]); + + let target_hash = msg.target().0.clone(); + let signed = SignedMessage:: { + message: msg, + signature, + id: local_id.clone(), + }; + + let message = GossipMessage::Vote(VoteMessage:: { + message: signed.clone(), + round: Round(self.round), + set_id: SetId(self.set_id), + }); + + debug!( + target: "afg", + "Announcing block {} to peers which we voted on in round {} in set {}", + target_hash, + self.round, + self.set_id, + ); + + telemetry!( + CONSENSUS_DEBUG; "afg.announcing_blocks_to_voted_peers"; + "block" => ?target_hash, "round" => ?self.round, "set_id" => ?self.set_id, + ); + + // announce the block we voted on to our peers. + self.network.lock().announce(target_hash, Vec::new()); + + // propagate the message to peers + let topic = round_topic::(self.round, self.set_id); + self.network + .lock() + .gossip_message(topic, message.encode(), false); + + // forward the message to the inner sender. + return self.sender.start_send(signed).map_err(|e| { + Error::Network(format!("Failed to start_send on channel sender: {:?}", e)) + }); + }; + + Ok(()) + } + + fn poll_flush(self: Pin<&mut Self>, _cx: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + Sink::poll_close(Pin::new(&mut self.sender), cx).map(|elem| { + elem.map_err(|e| { + Error::Network(format!("Failed to poll_close channel sender: {:?}", e)) + }) + }) + } } // checks a compact commit. returns the cost associated with processing it if // the commit was bad. fn check_compact_commit( - msg: &CompactCommit, - voters: &VoterSet, - round: Round, - set_id: SetId, + msg: &CompactCommit, + voters: &VoterSet, + round: Round, + set_id: SetId, ) -> Result<(), ReputationChange> { - // 4f + 1 = equivocations from f voters. - let f = voters.total_weight() - voters.threshold(); - let full_threshold = voters.total_weight() + f; - - // check total weight is not out of range. - let mut total_weight = 0; - for (_, ref id) in &msg.auth_data { - if let Some(weight) = voters.info(id).map(|info| info.weight()) { - total_weight += weight; - if total_weight > full_threshold { - return Err(cost::MALFORMED_COMMIT); - } - } else { - debug!(target: "afg", "Skipping commit containing unknown voter {}", id); - return Err(cost::MALFORMED_COMMIT); - } - } - - if total_weight < voters.threshold() { - return Err(cost::MALFORMED_COMMIT); - } - - // check signatures on all contained precommits. - let mut buf = Vec::new(); - for (i, (precommit, &(ref sig, ref id))) in msg.precommits.iter() - .zip(&msg.auth_data) - .enumerate() - { - use crate::communication::gossip::Misbehavior; - use finality_grandpa::Message as GrandpaMessage; - - if let Err(()) = check_message_sig_with_buffer::( - &GrandpaMessage::Precommit(precommit.clone()), - id, - sig, - round.0, - set_id.0, - &mut buf, - ) { - debug!(target: "afg", "Bad commit message signature {}", id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_commit_msg_signature"; "id" => ?id); - let cost = Misbehavior::BadCommitMessage { - signatures_checked: i as i32, - blocks_loaded: 0, - equivocations_caught: 0, - }.cost(); - - return Err(cost); - } - } - - Ok(()) + // 4f + 1 = equivocations from f voters. + let f = voters.total_weight() - voters.threshold(); + let full_threshold = voters.total_weight() + f; + + // check total weight is not out of range. + let mut total_weight = 0; + for (_, ref id) in &msg.auth_data { + if let Some(weight) = voters.info(id).map(|info| info.weight()) { + total_weight += weight; + if total_weight > full_threshold { + return Err(cost::MALFORMED_COMMIT); + } + } else { + debug!(target: "afg", "Skipping commit containing unknown voter {}", id); + return Err(cost::MALFORMED_COMMIT); + } + } + + if total_weight < voters.threshold() { + return Err(cost::MALFORMED_COMMIT); + } + + // check signatures on all contained precommits. + let mut buf = Vec::new(); + for (i, (precommit, &(ref sig, ref id))) in + msg.precommits.iter().zip(&msg.auth_data).enumerate() + { + use crate::communication::gossip::Misbehavior; + use finality_grandpa::Message as GrandpaMessage; + + if let Err(()) = check_message_sig_with_buffer::( + &GrandpaMessage::Precommit(precommit.clone()), + id, + sig, + round.0, + set_id.0, + &mut buf, + ) { + debug!(target: "afg", "Bad commit message signature {}", id); + telemetry!(CONSENSUS_DEBUG; "afg.bad_commit_msg_signature"; "id" => ?id); + let cost = Misbehavior::BadCommitMessage { + signatures_checked: i as i32, + blocks_loaded: 0, + equivocations_caught: 0, + } + .cost(); + + return Err(cost); + } + } + + Ok(()) } // checks a catch up. returns the cost associated with processing it if // the catch up was bad. fn check_catch_up( - msg: &CatchUp, - voters: &VoterSet, - set_id: SetId, + msg: &CatchUp, + voters: &VoterSet, + set_id: SetId, ) -> Result<(), ReputationChange> { - // 4f + 1 = equivocations from f voters. - let f = voters.total_weight() - voters.threshold(); - let full_threshold = voters.total_weight() + f; - - // check total weight is not out of range for a set of votes. - fn check_weight<'a>( - voters: &'a VoterSet, - votes: impl Iterator, - full_threshold: u64, - ) -> Result<(), ReputationChange> { - let mut total_weight = 0; - - for id in votes { - if let Some(weight) = voters.info(&id).map(|info| info.weight()) { - total_weight += weight; - if total_weight > full_threshold { - return Err(cost::MALFORMED_CATCH_UP); - } - } else { - debug!(target: "afg", "Skipping catch up message containing unknown voter {}", id); - return Err(cost::MALFORMED_CATCH_UP); - } - } - - if total_weight < voters.threshold() { - return Err(cost::MALFORMED_CATCH_UP); - } - - Ok(()) - }; - - check_weight( - voters, - msg.prevotes.iter().map(|vote| &vote.id), - full_threshold, - )?; - - check_weight( - voters, - msg.precommits.iter().map(|vote| &vote.id), - full_threshold, - )?; - - fn check_signatures<'a, B, I>( - messages: I, - round: RoundNumber, - set_id: SetIdNumber, - mut signatures_checked: usize, - buf: &mut Vec, - ) -> Result where - B: BlockT, - I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, - { - use crate::communication::gossip::Misbehavior; - - for (msg, id, sig) in messages { - signatures_checked += 1; - - if let Err(()) = check_message_sig_with_buffer::( - &msg, - id, - sig, - round, - set_id, - buf, - ) { - debug!(target: "afg", "Bad catch up message signature {}", id); - telemetry!(CONSENSUS_DEBUG; "afg.bad_catch_up_msg_signature"; "id" => ?id); - - let cost = Misbehavior::BadCatchUpMessage { - signatures_checked: signatures_checked as i32, - }.cost(); - - return Err(cost); - } - } - - Ok(signatures_checked) - } - - let mut buf = Vec::new(); - - // check signatures on all contained prevotes. - let signatures_checked = check_signatures::( - msg.prevotes.iter().map(|vote| { - (finality_grandpa::Message::Prevote(vote.prevote.clone()), &vote.id, &vote.signature) - }), - msg.round_number, - set_id.0, - 0, - &mut buf, - )?; - - // check signatures on all contained precommits. - let _ = check_signatures::( - msg.precommits.iter().map(|vote| { - (finality_grandpa::Message::Precommit(vote.precommit.clone()), &vote.id, &vote.signature) - }), - msg.round_number, - set_id.0, - signatures_checked, - &mut buf, - )?; - - Ok(()) + // 4f + 1 = equivocations from f voters. + let f = voters.total_weight() - voters.threshold(); + let full_threshold = voters.total_weight() + f; + + // check total weight is not out of range for a set of votes. + fn check_weight<'a>( + voters: &'a VoterSet, + votes: impl Iterator, + full_threshold: u64, + ) -> Result<(), ReputationChange> { + let mut total_weight = 0; + + for id in votes { + if let Some(weight) = voters.info(&id).map(|info| info.weight()) { + total_weight += weight; + if total_weight > full_threshold { + return Err(cost::MALFORMED_CATCH_UP); + } + } else { + debug!(target: "afg", "Skipping catch up message containing unknown voter {}", id); + return Err(cost::MALFORMED_CATCH_UP); + } + } + + if total_weight < voters.threshold() { + return Err(cost::MALFORMED_CATCH_UP); + } + + Ok(()) + }; + + check_weight( + voters, + msg.prevotes.iter().map(|vote| &vote.id), + full_threshold, + )?; + + check_weight( + voters, + msg.precommits.iter().map(|vote| &vote.id), + full_threshold, + )?; + + fn check_signatures<'a, B, I>( + messages: I, + round: RoundNumber, + set_id: SetIdNumber, + mut signatures_checked: usize, + buf: &mut Vec, + ) -> Result + where + B: BlockT, + I: Iterator, &'a AuthorityId, &'a AuthoritySignature)>, + { + use crate::communication::gossip::Misbehavior; + + for (msg, id, sig) in messages { + signatures_checked += 1; + + if let Err(()) = check_message_sig_with_buffer::(&msg, id, sig, round, set_id, buf) { + debug!(target: "afg", "Bad catch up message signature {}", id); + telemetry!(CONSENSUS_DEBUG; "afg.bad_catch_up_msg_signature"; "id" => ?id); + + let cost = Misbehavior::BadCatchUpMessage { + signatures_checked: signatures_checked as i32, + } + .cost(); + + return Err(cost); + } + } + + Ok(signatures_checked) + } + + let mut buf = Vec::new(); + + // check signatures on all contained prevotes. + let signatures_checked = check_signatures::( + msg.prevotes.iter().map(|vote| { + ( + finality_grandpa::Message::Prevote(vote.prevote.clone()), + &vote.id, + &vote.signature, + ) + }), + msg.round_number, + set_id.0, + 0, + &mut buf, + )?; + + // check signatures on all contained precommits. + let _ = check_signatures::( + msg.precommits.iter().map(|vote| { + ( + finality_grandpa::Message::Precommit(vote.precommit.clone()), + &vote.id, + &vote.signature, + ) + }), + msg.round_number, + set_id.0, + signatures_checked, + &mut buf, + )?; + + Ok(()) } /// An output sink for commit messages. struct CommitsOut { - network: Arc>>, - set_id: SetId, - is_voter: bool, - gossip_validator: Arc>, - neighbor_sender: periodic::NeighborPacketSender, + network: Arc>>, + set_id: SetId, + is_voter: bool, + gossip_validator: Arc>, + neighbor_sender: periodic::NeighborPacketSender, } impl CommitsOut { - /// Create a new commit output stream. - pub(crate) fn new( - network: Arc>>, - set_id: SetIdNumber, - is_voter: bool, - gossip_validator: Arc>, - neighbor_sender: periodic::NeighborPacketSender, - ) -> Self { - CommitsOut { - network, - set_id: SetId(set_id), - is_voter, - gossip_validator, - neighbor_sender, - } - } + /// Create a new commit output stream. + pub(crate) fn new( + network: Arc>>, + set_id: SetIdNumber, + is_voter: bool, + gossip_validator: Arc>, + neighbor_sender: periodic::NeighborPacketSender, + ) -> Self { + CommitsOut { + network, + set_id: SetId(set_id), + is_voter, + gossip_validator, + neighbor_sender, + } + } } impl Sink<(RoundNumber, Commit)> for CommitsOut { - type Error = Error; - - fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, input: (RoundNumber, Commit)) -> Result<(), Self::Error> { - if !self.is_voter { - return Ok(()); - } - - let (round, commit) = input; - let round = Round(round); - - telemetry!(CONSENSUS_DEBUG; "afg.commit_issued"; - "target_number" => ?commit.target_number, "target_hash" => ?commit.target_hash, - ); - let (precommits, auth_data) = commit.precommits.into_iter() - .map(|signed| (signed.precommit, (signed.signature, signed.id))) - .unzip(); - - let compact_commit = CompactCommit:: { - target_hash: commit.target_hash, - target_number: commit.target_number, - precommits, - auth_data - }; - - let message = GossipMessage::Commit(FullCommitMessage:: { - round, - set_id: self.set_id, - message: compact_commit, - }); - - let topic = global_topic::(self.set_id.0); - - // the gossip validator needs to be made aware of the best commit-height we know of - // before gossiping - self.gossip_validator.note_commit_finalized( - round, - self.set_id, - commit.target_number, - |to, neighbor| self.neighbor_sender.send(to, neighbor), - ); - self.network.lock().gossip_message(topic, message.encode(), false); - - Ok(()) - } - - fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - Poll::Ready(Ok(())) - } + type Error = Error; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send( + self: Pin<&mut Self>, + input: (RoundNumber, Commit), + ) -> Result<(), Self::Error> { + if !self.is_voter { + return Ok(()); + } + + let (round, commit) = input; + let round = Round(round); + + telemetry!(CONSENSUS_DEBUG; "afg.commit_issued"; + "target_number" => ?commit.target_number, "target_hash" => ?commit.target_hash, + ); + let (precommits, auth_data) = commit + .precommits + .into_iter() + .map(|signed| (signed.precommit, (signed.signature, signed.id))) + .unzip(); + + let compact_commit = CompactCommit:: { + target_hash: commit.target_hash, + target_number: commit.target_number, + precommits, + auth_data, + }; + + let message = GossipMessage::Commit(FullCommitMessage:: { + round, + set_id: self.set_id, + message: compact_commit, + }); + + let topic = global_topic::(self.set_id.0); + + // the gossip validator needs to be made aware of the best commit-height we know of + // before gossiping + self.gossip_validator.note_commit_finalized( + round, + self.set_id, + commit.target_number, + |to, neighbor| self.neighbor_sender.send(to, neighbor), + ); + self.network + .lock() + .gossip_message(topic, message.encode(), false); + + Ok(()) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } } diff --git a/client/finality-grandpa/src/communication/periodic.rs b/client/finality-grandpa/src/communication/periodic.rs index f894624bdf..1a51164ea8 100644 --- a/client/finality-grandpa/src/communication/periodic.rs +++ b/client/finality-grandpa/src/communication/periodic.rs @@ -16,15 +16,19 @@ //! Periodic rebroadcast of neighbor packets. +use futures::{future::FutureExt as _, prelude::*, ready, stream::Stream}; use futures_timer::Delay; -use futures::{future::{FutureExt as _}, prelude::*, ready, stream::Stream}; use log::debug; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; +use super::gossip::{GossipMessage, NeighborPacket}; use sc_network::PeerId; -use sp_runtime::traits::{NumberFor, Block as BlockT}; -use super::gossip::{NeighborPacket, GossipMessage}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; // How often to rebroadcast, in cases where no new packets are created. const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); @@ -32,20 +36,20 @@ const REBROADCAST_AFTER: Duration = Duration::from_secs(2 * 60); /// A sender used to send neighbor packets to a background job. #[derive(Clone)] pub(super) struct NeighborPacketSender( - TracingUnboundedSender<(Vec, NeighborPacket>)> + TracingUnboundedSender<(Vec, NeighborPacket>)>, ); impl NeighborPacketSender { - /// Send a neighbor packet for the background worker to gossip to peers. - pub fn send( - &self, - who: Vec, - neighbor_packet: NeighborPacket>, - ) { - if let Err(err) = self.0.unbounded_send((who, neighbor_packet)) { - debug!(target: "afg", "Failed to send neighbor packet: {:?}", err); - } - } + /// Send a neighbor packet for the background worker to gossip to peers. + pub fn send( + &self, + who: Vec, + neighbor_packet: NeighborPacket>, + ) { + if let Err(err) = self.0.unbounded_send((who, neighbor_packet)) { + debug!(target: "afg", "Failed to send neighbor packet: {:?}", err); + } + } } /// NeighborPacketWorker is listening on a channel for new neighbor packets being produced by @@ -53,61 +57,64 @@ impl NeighborPacketSender { /// `NetworkEngine` through the `NetworkBridge` that it is being polled by (see `Stream` /// implementation). Periodically it sends out the last packet in cases where no new ones arrive. pub(super) struct NeighborPacketWorker { - last: Option<(Vec, NeighborPacket>)>, - delay: Delay, - rx: TracingUnboundedReceiver<(Vec, NeighborPacket>)>, + last: Option<(Vec, NeighborPacket>)>, + delay: Delay, + rx: TracingUnboundedReceiver<(Vec, NeighborPacket>)>, } impl Unpin for NeighborPacketWorker {} impl NeighborPacketWorker { - pub(super) fn new() -> (Self, NeighborPacketSender){ - let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)> - ("mpsc_grandpa_neighbor_packet_worker"); - let delay = Delay::new(REBROADCAST_AFTER); - - (NeighborPacketWorker { - last: None, - delay, - rx, - }, NeighborPacketSender(tx)) - } + pub(super) fn new() -> (Self, NeighborPacketSender) { + let (tx, rx) = tracing_unbounded::<(Vec, NeighborPacket>)>( + "mpsc_grandpa_neighbor_packet_worker", + ); + let delay = Delay::new(REBROADCAST_AFTER); + + ( + NeighborPacketWorker { + last: None, + delay, + rx, + }, + NeighborPacketSender(tx), + ) + } } -impl Stream for NeighborPacketWorker { - type Item = (Vec, GossipMessage); +impl Stream for NeighborPacketWorker { + type Item = (Vec, GossipMessage); - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> - { - let this = &mut *self; - match this.rx.poll_next_unpin(cx) { - Poll::Ready(None) => return Poll::Ready(None), - Poll::Ready(Some((to, packet))) => { - this.delay.reset(REBROADCAST_AFTER); - this.last = Some((to.clone(), packet.clone())); + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = &mut *self; + match this.rx.poll_next_unpin(cx) { + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some((to, packet))) => { + this.delay.reset(REBROADCAST_AFTER); + this.last = Some((to.clone(), packet.clone())); - return Poll::Ready(Some((to, GossipMessage::::from(packet.clone())))); - } - // Don't return yet, maybe the timer fired. - Poll::Pending => {}, - }; + return Poll::Ready(Some((to, GossipMessage::::from(packet.clone())))); + } + // Don't return yet, maybe the timer fired. + Poll::Pending => {} + }; - ready!(this.delay.poll_unpin(cx)); + ready!(this.delay.poll_unpin(cx)); - // Getting this far here implies that the timer fired. + // Getting this far here implies that the timer fired. - this.delay.reset(REBROADCAST_AFTER); + this.delay.reset(REBROADCAST_AFTER); - // Make sure the underlying task is scheduled for wake-up. - // - // Note: In case poll_unpin is called after the resetted delay fires again, this - // will drop one tick. Deemed as very unlikely and also not critical. - while let Poll::Ready(()) = this.delay.poll_unpin(cx) {}; + // Make sure the underlying task is scheduled for wake-up. + // + // Note: In case poll_unpin is called after the resetted delay fires again, this + // will drop one tick. Deemed as very unlikely and also not critical. + while let Poll::Ready(()) = this.delay.poll_unpin(cx) {} - if let Some((ref to, ref packet)) = this.last { - return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))); - } + if let Some((ref to, ref packet)) = this.last { + return Poll::Ready(Some((to.clone(), GossipMessage::::from(packet.clone())))); + } - return Poll::Pending; - } + return Poll::Pending; + } } diff --git a/client/finality-grandpa/src/communication/tests.rs b/client/finality-grandpa/src/communication/tests.rs index ea995eff63..a4fd590ee0 100644 --- a/client/finality-grandpa/src/communication/tests.rs +++ b/client/finality-grandpa/src/communication/tests.rs @@ -16,517 +16,549 @@ //! Tests for the communication portion of the GRANDPA crate. -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use super::gossip::{self, GossipValidator}; +use super::{AuthorityId, Round, SetId, VoterSet}; +use crate::environment::SharedVoterSetState; use futures::prelude::*; +use parity_scale_codec::Encode; use sc_network::{Event as NetworkEvent, ObservedRole, PeerId}; -use sc_network_test::{Block, Hash}; use sc_network_gossip::Validator; -use std::sync::Arc; -use sp_keyring::Ed25519Keyring; -use parity_scale_codec::Encode; -use sp_runtime::{ConsensusEngineId, traits::NumberFor}; -use std::{borrow::Cow, pin::Pin, task::{Context, Poll}}; -use crate::environment::SharedVoterSetState; +use sc_network_test::{Block, Hash}; use sp_finality_grandpa::{AuthorityList, GRANDPA_ENGINE_ID}; -use super::gossip::{self, GossipValidator}; -use super::{AuthorityId, VoterSet, Round, SetId}; +use sp_keyring::Ed25519Keyring; +use sp_runtime::{traits::NumberFor, ConsensusEngineId}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::sync::Arc; +use std::{ + borrow::Cow, + pin::Pin, + task::{Context, Poll}, +}; #[derive(Debug)] pub(crate) enum Event { - EventStream(TracingUnboundedSender), - WriteNotification(sc_network::PeerId, Vec), - Report(sc_network::PeerId, sc_network::ReputationChange), - Announce(Hash), + EventStream(TracingUnboundedSender), + WriteNotification(sc_network::PeerId, Vec), + Report(sc_network::PeerId, sc_network::ReputationChange), + Announce(Hash), } #[derive(Clone)] pub(crate) struct TestNetwork { - sender: TracingUnboundedSender, + sender: TracingUnboundedSender, } impl sc_network_gossip::Network for TestNetwork { - fn event_stream(&self) -> Pin + Send>> { - let (tx, rx) = tracing_unbounded("test"); - let _ = self.sender.unbounded_send(Event::EventStream(tx)); - Box::pin(rx) - } + fn event_stream(&self) -> Pin + Send>> { + let (tx, rx) = tracing_unbounded("test"); + let _ = self.sender.unbounded_send(Event::EventStream(tx)); + Box::pin(rx) + } - fn report_peer(&self, who: sc_network::PeerId, cost_benefit: sc_network::ReputationChange) { - let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); - } + fn report_peer(&self, who: sc_network::PeerId, cost_benefit: sc_network::ReputationChange) { + let _ = self.sender.unbounded_send(Event::Report(who, cost_benefit)); + } - fn disconnect_peer(&self, _: PeerId) {} + fn disconnect_peer(&self, _: PeerId) {} - fn write_notification(&self, who: PeerId, _: ConsensusEngineId, message: Vec) { - let _ = self.sender.unbounded_send(Event::WriteNotification(who, message)); - } + fn write_notification(&self, who: PeerId, _: ConsensusEngineId, message: Vec) { + let _ = self + .sender + .unbounded_send(Event::WriteNotification(who, message)); + } - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} + fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} - fn announce(&self, block: Hash, _associated_data: Vec) { - let _ = self.sender.unbounded_send(Event::Announce(block)); - } + fn announce(&self, block: Hash, _associated_data: Vec) { + let _ = self.sender.unbounded_send(Event::Announce(block)); + } } impl super::Network for TestNetwork { - fn set_sync_fork_request( - &self, - _peers: Vec, - _hash: Hash, - _number: NumberFor, - ) {} + fn set_sync_fork_request( + &self, + _peers: Vec, + _hash: Hash, + _number: NumberFor, + ) { + } } impl sc_network_gossip::ValidatorContext for TestNetwork { - fn broadcast_topic(&mut self, _: Hash, _: bool) { } + fn broadcast_topic(&mut self, _: Hash, _: bool) {} - fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) { } + fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} - fn send_message(&mut self, who: &sc_network::PeerId, data: Vec) { - >::write_notification( - self, - who.clone(), - GRANDPA_ENGINE_ID, - data, - ); - } + fn send_message(&mut self, who: &sc_network::PeerId, data: Vec) { + >::write_notification( + self, + who.clone(), + GRANDPA_ENGINE_ID, + data, + ); + } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) { } + fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} } pub(crate) struct Tester { - pub(crate) net_handle: super::NetworkBridge, - gossip_validator: Arc>, - pub(crate) events: TracingUnboundedReceiver, + pub(crate) net_handle: super::NetworkBridge, + gossip_validator: Arc>, + pub(crate) events: TracingUnboundedReceiver, } impl Tester { - fn filter_network_events(self, mut pred: F) -> impl Future - where F: FnMut(Event) -> bool - { - let mut s = Some(self); - futures::future::poll_fn(move |cx| loop { - match Stream::poll_next(Pin::new(&mut s.as_mut().unwrap().events), cx) { - Poll::Ready(None) => panic!("concluded early"), - Poll::Ready(Some(item)) => if pred(item) { - return Poll::Ready(s.take().unwrap()) - }, - Poll::Pending => return Poll::Pending, - } - }) - } - - pub(crate) fn trigger_gossip_validator_reputation_change(&self, p: &PeerId) { - self.gossip_validator.validate( - &mut crate::communication::tests::NoopContext, - p, - &vec![1, 2, 3], - ); - } + fn filter_network_events(self, mut pred: F) -> impl Future + where + F: FnMut(Event) -> bool, + { + let mut s = Some(self); + futures::future::poll_fn(move |cx| loop { + match Stream::poll_next(Pin::new(&mut s.as_mut().unwrap().events), cx) { + Poll::Ready(None) => panic!("concluded early"), + Poll::Ready(Some(item)) => { + if pred(item) { + return Poll::Ready(s.take().unwrap()); + } + } + Poll::Pending => return Poll::Pending, + } + }) + } + + pub(crate) fn trigger_gossip_validator_reputation_change(&self, p: &PeerId) { + self.gossip_validator.validate( + &mut crate::communication::tests::NoopContext, + p, + &vec![1, 2, 3], + ); + } } // some random config (not really needed) fn config() -> crate::Config { - crate::Config { - gossip_duration: std::time::Duration::from_millis(10), - justification_period: 256, - keystore: None, - name: None, - is_authority: true, - observer_enabled: true, - } + crate::Config { + gossip_duration: std::time::Duration::from_millis(10), + justification_period: 256, + keystore: None, + name: None, + is_authority: true, + observer_enabled: true, + } } // dummy voter set state fn voter_set_state() -> SharedVoterSetState { - use crate::authorities::AuthoritySet; - use crate::environment::VoterSetState; - use finality_grandpa::round::State as RoundState; - use sp_core::H256; - - let state = RoundState::genesis((H256::zero(), 0)); - let base = state.prevote_ghost.unwrap(); - let voters = AuthoritySet::genesis(Vec::new()); - let set_state = VoterSetState::live( - 0, - &voters, - base, - ); - - set_state.into() + use crate::authorities::AuthoritySet; + use crate::environment::VoterSetState; + use finality_grandpa::round::State as RoundState; + use sp_core::H256; + + let state = RoundState::genesis((H256::zero(), 0)); + let base = state.prevote_ghost.unwrap(); + let voters = AuthoritySet::genesis(Vec::new()); + let set_state = VoterSetState::live(0, &voters, base); + + set_state.into() } // needs to run in a tokio runtime. -pub(crate) fn make_test_network() -> ( - impl Future, - TestNetwork, -) { - let (tx, rx) = tracing_unbounded("test"); - let net = TestNetwork { sender: tx }; - - #[derive(Clone)] - struct Exit; - - impl futures::Future for Exit { - type Output = (); - - fn poll(self: Pin<&mut Self>, _: &mut Context) -> Poll<()> { - Poll::Pending - } - } - - let bridge = super::NetworkBridge::new( - net.clone(), - config(), - voter_set_state(), - None, - ); - - ( - futures::future::ready(Tester { - gossip_validator: bridge.validator.clone(), - net_handle: bridge, - events: rx, - }), - net, - ) +pub(crate) fn make_test_network() -> (impl Future, TestNetwork) { + let (tx, rx) = tracing_unbounded("test"); + let net = TestNetwork { sender: tx }; + + #[derive(Clone)] + struct Exit; + + impl futures::Future for Exit { + type Output = (); + + fn poll(self: Pin<&mut Self>, _: &mut Context) -> Poll<()> { + Poll::Pending + } + } + + let bridge = super::NetworkBridge::new(net.clone(), config(), voter_set_state(), None); + + ( + futures::future::ready(Tester { + gossip_validator: bridge.validator.clone(), + net_handle: bridge, + events: rx, + }), + net, + ) } fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter() - .map(|key| key.clone().public().into()) - .map(|id| (id, 1)) - .collect() + keys.iter() + .map(|key| key.clone().public().into()) + .map(|id| (id, 1)) + .collect() } struct NoopContext; impl sc_network_gossip::ValidatorContext for NoopContext { - fn broadcast_topic(&mut self, _: Hash, _: bool) { } - fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) { } - fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) { } - fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) { } + fn broadcast_topic(&mut self, _: Hash, _: bool) {} + fn broadcast_message(&mut self, _: Hash, _: Vec, _: bool) {} + fn send_message(&mut self, _: &sc_network::PeerId, _: Vec) {} + fn send_topic(&mut self, _: &sc_network::PeerId, _: Hash, _: bool) {} } #[test] fn good_commit_leads_to_relay() { - let private = [Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let public = make_ids(&private[..]); - let voter_set = Arc::new(public.iter().cloned().collect::>()); - - let round = 1; - let set_id = 1; - - let commit = { - let target_hash: Hash = [1; 32].into(); - let target_number = 500; - - let precommit = finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; - let payload = super::localized_payload( - round, set_id, &finality_grandpa::Message::Precommit(precommit.clone()) - ); - - let mut precommits = Vec::new(); - let mut auth_data = Vec::new(); - - for (i, key) in private.iter().enumerate() { - precommits.push(precommit.clone()); - - let signature = sp_finality_grandpa::AuthoritySignature::from(key.sign(&payload[..])); - auth_data.push((signature, public[i].0.clone())) - } - - finality_grandpa::CompactCommit { - target_hash, - target_number, - precommits, - auth_data, - } - }; - - let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { - round: Round(round), - set_id: SetId(set_id), - message: commit, - }).encode(); - - let id = sc_network::PeerId::random(); - let global_topic = super::global_topic::(set_id); - - let test = make_test_network().0 - .then(move |tester| { - // register a peer. - tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); - future::ready((tester, id)) - }) - .then(move |(tester, id)| { - // start round, dispatch commit, and wait for broadcast. - let (commits_in, _) = tester.net_handle.global_communication(SetId(1), voter_set, false); - - { - let (action, ..) = tester.gossip_validator.do_validate(&id, &encoded_commit[..]); - match action { - gossip::Action::ProcessAndDiscard(t, _) => assert_eq!(t, global_topic), - _ => panic!("wrong expected outcome from initial commit validation"), - } - } - - let commit_to_send = encoded_commit.clone(); - let network_bridge = tester.net_handle.clone(); - - // asking for global communication will cause the test network - // to send us an event asking us for a stream. use it to - // send a message. - let sender_id = id.clone(); - let send_message = tester.filter_network_events(move |event| match event { - Event::EventStream(sender) => { - // Add the sending peer and send the commit - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id.clone(), - engine_id: GRANDPA_ENGINE_ID, - role: ObservedRole::Full, - }); - - let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id.clone(), - messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], - }); - - // Add a random peer which will be the recipient of this message - let receiver_id = sc_network::PeerId::random(); - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: receiver_id.clone(), - engine_id: GRANDPA_ENGINE_ID, - role: ObservedRole::Full, - }); - - // Announce its local set has being on the current set id through a neighbor - // packet, otherwise it won't be eligible to receive the commit - let _ = { - let update = gossip::VersionedNeighborPacket::V1( - gossip::NeighborPacket { - round: Round(round), - set_id: SetId(set_id), - commit_finalized_height: 1, - } - ); - - let msg = gossip::GossipMessage::::Neighbor(update); - - sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: receiver_id, - messages: vec![(GRANDPA_ENGINE_ID, msg.encode().into())], - }) - }; - - true - } - _ => false, - }); - - // when the commit comes in, we'll tell the callback it was good. - let handle_commit = commits_in.into_future() - .map(|(item, _)| { - match item.unwrap() { - finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { - callback.run(finality_grandpa::voter::CommitProcessingOutcome::good()); - }, - _ => panic!("commit expected"), - } - }); - - // once the message is sent and commit is "handled" we should have - // a repropagation event coming from the network. - let fut = future::join(send_message, handle_commit).then(move |(tester, ())| { - tester.filter_network_events(move |event| match event { - Event::WriteNotification(_, data) => { - data == encoded_commit - } - _ => false, - }) - }) - .map(|_| ()); - - // Poll both the future sending and handling the commit, as well as the underlying - // NetworkBridge. Complete once the former completes. - future::select(fut, network_bridge) - }); - - futures::executor::block_on(test); + let private = [ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let public = make_ids(&private[..]); + let voter_set = Arc::new(public.iter().cloned().collect::>()); + + let round = 1; + let set_id = 1; + + let commit = { + let target_hash: Hash = [1; 32].into(); + let target_number = 500; + + let precommit = finality_grandpa::Precommit { + target_hash: target_hash.clone(), + target_number, + }; + let payload = super::localized_payload( + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), + ); + + let mut precommits = Vec::new(); + let mut auth_data = Vec::new(); + + for (i, key) in private.iter().enumerate() { + precommits.push(precommit.clone()); + + let signature = sp_finality_grandpa::AuthoritySignature::from(key.sign(&payload[..])); + auth_data.push((signature, public[i].0.clone())) + } + + finality_grandpa::CompactCommit { + target_hash, + target_number, + precommits, + auth_data, + } + }; + + let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { + round: Round(round), + set_id: SetId(set_id), + message: commit, + }) + .encode(); + + let id = sc_network::PeerId::random(); + let global_topic = super::global_topic::(set_id); + + let test = make_test_network() + .0 + .then(move |tester| { + // register a peer. + tester + .gossip_validator + .new_peer(&mut NoopContext, &id, ObservedRole::Full); + future::ready((tester, id)) + }) + .then(move |(tester, id)| { + // start round, dispatch commit, and wait for broadcast. + let (commits_in, _) = + tester + .net_handle + .global_communication(SetId(1), voter_set, false); + + { + let (action, ..) = tester + .gossip_validator + .do_validate(&id, &encoded_commit[..]); + match action { + gossip::Action::ProcessAndDiscard(t, _) => assert_eq!(t, global_topic), + _ => panic!("wrong expected outcome from initial commit validation"), + } + } + + let commit_to_send = encoded_commit.clone(); + let network_bridge = tester.net_handle.clone(); + + // asking for global communication will cause the test network + // to send us an event asking us for a stream. use it to + // send a message. + let sender_id = id.clone(); + let send_message = tester.filter_network_events(move |event| match event { + Event::EventStream(sender) => { + // Add the sending peer and send the commit + let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { + remote: sender_id.clone(), + engine_id: GRANDPA_ENGINE_ID, + role: ObservedRole::Full, + }); + + let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { + remote: sender_id.clone(), + messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], + }); + + // Add a random peer which will be the recipient of this message + let receiver_id = sc_network::PeerId::random(); + let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { + remote: receiver_id.clone(), + engine_id: GRANDPA_ENGINE_ID, + role: ObservedRole::Full, + }); + + // Announce its local set has being on the current set id through a neighbor + // packet, otherwise it won't be eligible to receive the commit + let _ = { + let update = gossip::VersionedNeighborPacket::V1(gossip::NeighborPacket { + round: Round(round), + set_id: SetId(set_id), + commit_finalized_height: 1, + }); + + let msg = gossip::GossipMessage::::Neighbor(update); + + sender.unbounded_send(NetworkEvent::NotificationsReceived { + remote: receiver_id, + messages: vec![(GRANDPA_ENGINE_ID, msg.encode().into())], + }) + }; + + true + } + _ => false, + }); + + // when the commit comes in, we'll tell the callback it was good. + let handle_commit = commits_in + .into_future() + .map(|(item, _)| match item.unwrap() { + finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { + callback.run(finality_grandpa::voter::CommitProcessingOutcome::good()); + } + _ => panic!("commit expected"), + }); + + // once the message is sent and commit is "handled" we should have + // a repropagation event coming from the network. + let fut = future::join(send_message, handle_commit) + .then(move |(tester, ())| { + tester.filter_network_events(move |event| match event { + Event::WriteNotification(_, data) => data == encoded_commit, + _ => false, + }) + }) + .map(|_| ()); + + // Poll both the future sending and handling the commit, as well as the underlying + // NetworkBridge. Complete once the former completes. + future::select(fut, network_bridge) + }); + + futures::executor::block_on(test); } #[test] fn bad_commit_leads_to_report() { - let _ = env_logger::try_init(); - let private = [Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let public = make_ids(&private[..]); - let voter_set = Arc::new(public.iter().cloned().collect::>()); - - let round = 1; - let set_id = 1; - - let commit = { - let target_hash: Hash = [1; 32].into(); - let target_number = 500; - - let precommit = finality_grandpa::Precommit { target_hash: target_hash.clone(), target_number }; - let payload = super::localized_payload( - round, set_id, &finality_grandpa::Message::Precommit(precommit.clone()) - ); - - let mut precommits = Vec::new(); - let mut auth_data = Vec::new(); - - for (i, key) in private.iter().enumerate() { - precommits.push(precommit.clone()); - - let signature = sp_finality_grandpa::AuthoritySignature::from(key.sign(&payload[..])); - auth_data.push((signature, public[i].0.clone())) - } - - finality_grandpa::CompactCommit { - target_hash, - target_number, - precommits, - auth_data, - } - }; - - let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { - round: Round(round), - set_id: SetId(set_id), - message: commit, - }).encode(); - - let id = sc_network::PeerId::random(); - let global_topic = super::global_topic::(set_id); - - let test = make_test_network().0 - .map(move |tester| { - // register a peer. - tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Full); - (tester, id) - }) - .then(move |(tester, id)| { - // start round, dispatch commit, and wait for broadcast. - let (commits_in, _) = tester.net_handle.global_communication(SetId(1), voter_set, false); - - { - let (action, ..) = tester.gossip_validator.do_validate(&id, &encoded_commit[..]); - match action { - gossip::Action::ProcessAndDiscard(t, _) => assert_eq!(t, global_topic), - _ => panic!("wrong expected outcome from initial commit validation"), - } - } - - let commit_to_send = encoded_commit.clone(); - let network_bridge = tester.net_handle.clone(); - - // asking for global communication will cause the test network - // to send us an event asking us for a stream. use it to - // send a message. - let sender_id = id.clone(); - let send_message = tester.filter_network_events(move |event| match event { - Event::EventStream(sender) => { - let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { - remote: sender_id.clone(), - engine_id: GRANDPA_ENGINE_ID, - role: ObservedRole::Full, - }); - let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { - remote: sender_id.clone(), - messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], - }); - - true - } - _ => false, - }); - - // when the commit comes in, we'll tell the callback it was bad. - let handle_commit = commits_in.into_future() - .map(|(item, _)| { - match item.unwrap() { - finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { - callback.run(finality_grandpa::voter::CommitProcessingOutcome::bad()); - }, - _ => panic!("commit expected"), - } - }); - - // once the message is sent and commit is "handled" we should have - // a report event coming from the network. - let fut = future::join(send_message, handle_commit).then(move |(tester, ())| { - tester.filter_network_events(move |event| match event { - Event::Report(who, cost_benefit) => { - who == id && cost_benefit == super::cost::INVALID_COMMIT - } - _ => false, - }) - }) - .map(|_| ()); - - // Poll both the future sending and handling the commit, as well as the underlying - // NetworkBridge. Complete once the former completes. - future::select(fut, network_bridge) - }); - - futures::executor::block_on(test); + let _ = env_logger::try_init(); + let private = [ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let public = make_ids(&private[..]); + let voter_set = Arc::new(public.iter().cloned().collect::>()); + + let round = 1; + let set_id = 1; + + let commit = { + let target_hash: Hash = [1; 32].into(); + let target_number = 500; + + let precommit = finality_grandpa::Precommit { + target_hash: target_hash.clone(), + target_number, + }; + let payload = super::localized_payload( + round, + set_id, + &finality_grandpa::Message::Precommit(precommit.clone()), + ); + + let mut precommits = Vec::new(); + let mut auth_data = Vec::new(); + + for (i, key) in private.iter().enumerate() { + precommits.push(precommit.clone()); + + let signature = sp_finality_grandpa::AuthoritySignature::from(key.sign(&payload[..])); + auth_data.push((signature, public[i].0.clone())) + } + + finality_grandpa::CompactCommit { + target_hash, + target_number, + precommits, + auth_data, + } + }; + + let encoded_commit = gossip::GossipMessage::::Commit(gossip::FullCommitMessage { + round: Round(round), + set_id: SetId(set_id), + message: commit, + }) + .encode(); + + let id = sc_network::PeerId::random(); + let global_topic = super::global_topic::(set_id); + + let test = make_test_network() + .0 + .map(move |tester| { + // register a peer. + tester + .gossip_validator + .new_peer(&mut NoopContext, &id, ObservedRole::Full); + (tester, id) + }) + .then(move |(tester, id)| { + // start round, dispatch commit, and wait for broadcast. + let (commits_in, _) = + tester + .net_handle + .global_communication(SetId(1), voter_set, false); + + { + let (action, ..) = tester + .gossip_validator + .do_validate(&id, &encoded_commit[..]); + match action { + gossip::Action::ProcessAndDiscard(t, _) => assert_eq!(t, global_topic), + _ => panic!("wrong expected outcome from initial commit validation"), + } + } + + let commit_to_send = encoded_commit.clone(); + let network_bridge = tester.net_handle.clone(); + + // asking for global communication will cause the test network + // to send us an event asking us for a stream. use it to + // send a message. + let sender_id = id.clone(); + let send_message = tester.filter_network_events(move |event| match event { + Event::EventStream(sender) => { + let _ = sender.unbounded_send(NetworkEvent::NotificationStreamOpened { + remote: sender_id.clone(), + engine_id: GRANDPA_ENGINE_ID, + role: ObservedRole::Full, + }); + let _ = sender.unbounded_send(NetworkEvent::NotificationsReceived { + remote: sender_id.clone(), + messages: vec![(GRANDPA_ENGINE_ID, commit_to_send.clone().into())], + }); + + true + } + _ => false, + }); + + // when the commit comes in, we'll tell the callback it was bad. + let handle_commit = commits_in + .into_future() + .map(|(item, _)| match item.unwrap() { + finality_grandpa::voter::CommunicationIn::Commit(_, _, mut callback) => { + callback.run(finality_grandpa::voter::CommitProcessingOutcome::bad()); + } + _ => panic!("commit expected"), + }); + + // once the message is sent and commit is "handled" we should have + // a report event coming from the network. + let fut = future::join(send_message, handle_commit) + .then(move |(tester, ())| { + tester.filter_network_events(move |event| match event { + Event::Report(who, cost_benefit) => { + who == id && cost_benefit == super::cost::INVALID_COMMIT + } + _ => false, + }) + }) + .map(|_| ()); + + // Poll both the future sending and handling the commit, as well as the underlying + // NetworkBridge. Complete once the former completes. + future::select(fut, network_bridge) + }); + + futures::executor::block_on(test); } #[test] fn peer_with_higher_view_leads_to_catch_up_request() { - let id = sc_network::PeerId::random(); - - let (tester, mut net) = make_test_network(); - let test = tester - .map(move |tester| { - // register a peer with authority role. - tester.gossip_validator.new_peer(&mut NoopContext, &id, ObservedRole::Authority); - (tester, id) - }) - .then(move |(tester, id)| { - // send neighbor message at round 10 and height 50 - let result = tester.gossip_validator.validate( - &mut net, - &id, - &gossip::GossipMessage::::from(gossip::NeighborPacket { - set_id: SetId(0), - round: Round(10), - commit_finalized_height: 50, - }).encode(), - ); - - // neighbor packets are always discard - match result { - sc_network_gossip::ValidationResult::Discard => {}, - _ => panic!("wrong expected outcome from neighbor validation"), - } - - // a catch up request should be sent to the peer for round - 1 - tester.filter_network_events(move |event| match event { - Event::WriteNotification(peer, message) => { - assert_eq!( - peer, - id, - ); - - assert_eq!( - message, - gossip::GossipMessage::::CatchUpRequest( - gossip::CatchUpRequestMessage { - set_id: SetId(0), - round: Round(9), - } - ).encode(), - ); - - true - }, - _ => false, - }) - .map(|_| ()) - }); - - futures::executor::block_on(test); + let id = sc_network::PeerId::random(); + + let (tester, mut net) = make_test_network(); + let test = tester + .map(move |tester| { + // register a peer with authority role. + tester + .gossip_validator + .new_peer(&mut NoopContext, &id, ObservedRole::Authority); + (tester, id) + }) + .then(move |(tester, id)| { + // send neighbor message at round 10 and height 50 + let result = tester.gossip_validator.validate( + &mut net, + &id, + &gossip::GossipMessage::::from(gossip::NeighborPacket { + set_id: SetId(0), + round: Round(10), + commit_finalized_height: 50, + }) + .encode(), + ); + + // neighbor packets are always discard + match result { + sc_network_gossip::ValidationResult::Discard => {} + _ => panic!("wrong expected outcome from neighbor validation"), + } + + // a catch up request should be sent to the peer for round - 1 + tester + .filter_network_events(move |event| match event { + Event::WriteNotification(peer, message) => { + assert_eq!(peer, id,); + + assert_eq!( + message, + gossip::GossipMessage::::CatchUpRequest( + gossip::CatchUpRequestMessage { + set_id: SetId(0), + round: Round(9), + } + ) + .encode(), + ); + + true + } + _ => false, + }) + .map(|_| ()) + }); + + futures::executor::block_on(test); } diff --git a/client/finality-grandpa/src/consensus_changes.rs b/client/finality-grandpa/src/consensus_changes.rs index 1ce7b551d0..e73b445d58 100644 --- a/client/finality-grandpa/src/consensus_changes.rs +++ b/client/finality-grandpa/src/consensus_changes.rs @@ -14,64 +14,73 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use parity_scale_codec::{Decode, Encode}; use std::sync::Arc; -use parity_scale_codec::{Encode, Decode}; /// Consensus-related data changes tracker. #[derive(Clone, Debug, Encode, Decode)] pub(crate) struct ConsensusChanges { - pending_changes: Vec<(N, H)>, + pending_changes: Vec<(N, H)>, } impl ConsensusChanges { - /// Create empty consensus changes. - pub(crate) fn empty() -> Self { - ConsensusChanges { pending_changes: Vec::new(), } - } + /// Create empty consensus changes. + pub(crate) fn empty() -> Self { + ConsensusChanges { + pending_changes: Vec::new(), + } + } } impl ConsensusChanges { + /// Returns reference to all pending changes. + pub fn pending_changes(&self) -> &[(N, H)] { + &self.pending_changes + } - /// Returns reference to all pending changes. - pub fn pending_changes(&self) -> &[(N, H)] { - &self.pending_changes - } + /// Note unfinalized change of consensus-related data. + pub(crate) fn note_change(&mut self, at: (N, H)) { + let idx = self + .pending_changes + .binary_search_by_key(&at.0, |change| change.0) + .unwrap_or_else(|i| i); + self.pending_changes.insert(idx, at); + } - /// Note unfinalized change of consensus-related data. - pub(crate) fn note_change(&mut self, at: (N, H)) { - let idx = self.pending_changes - .binary_search_by_key(&at.0, |change| change.0) - .unwrap_or_else(|i| i); - self.pending_changes.insert(idx, at); - } + /// Finalize all pending consensus changes that are finalized by given block. + /// Returns true if there any changes were finalized. + pub(crate) fn finalize ::sp_blockchain::Result>>( + &mut self, + block: (N, H), + canonical_at_height: F, + ) -> ::sp_blockchain::Result<(bool, bool)> { + let (split_idx, has_finalized_changes) = self + .pending_changes + .iter() + .enumerate() + .take_while(|(_, &(at_height, _))| at_height <= block.0) + .fold( + (None, Ok(false)), + |(_, has_finalized_changes), (idx, ref at)| { + ( + Some(idx), + has_finalized_changes.and_then(|has_finalized_changes| { + if has_finalized_changes { + Ok(has_finalized_changes) + } else { + canonical_at_height(at.0).map(|can_hash| Some(at.1) == can_hash) + } + }), + ) + }, + ); - /// Finalize all pending consensus changes that are finalized by given block. - /// Returns true if there any changes were finalized. - pub(crate) fn finalize ::sp_blockchain::Result>>( - &mut self, - block: (N, H), - canonical_at_height: F, - ) -> ::sp_blockchain::Result<(bool, bool)> { - let (split_idx, has_finalized_changes) = self.pending_changes.iter() - .enumerate() - .take_while(|(_, &(at_height, _))| at_height <= block.0) - .fold((None, Ok(false)), |(_, has_finalized_changes), (idx, ref at)| - ( - Some(idx), - has_finalized_changes - .and_then(|has_finalized_changes| if has_finalized_changes { - Ok(has_finalized_changes) - } else { - canonical_at_height(at.0).map(|can_hash| Some(at.1) == can_hash) - }), - )); - - let altered_changes = split_idx.is_some(); - if let Some(split_idx) = split_idx { - self.pending_changes = self.pending_changes.split_off(split_idx + 1); - } - has_finalized_changes.map(|has_finalized_changes| (altered_changes, has_finalized_changes)) - } + let altered_changes = split_idx.is_some(); + if let Some(split_idx) = split_idx { + self.pending_changes = self.pending_changes.split_off(split_idx + 1); + } + has_finalized_changes.map(|has_finalized_changes| (altered_changes, has_finalized_changes)) + } } /// Thread-safe consensus changes tracker reference. diff --git a/client/finality-grandpa/src/environment.rs b/client/finality-grandpa/src/environment.rs index d3bbc1adb3..9b42984fd5 100644 --- a/client/finality-grandpa/src/environment.rs +++ b/client/finality-grandpa/src/environment.rs @@ -20,30 +20,28 @@ use std::pin::Pin; use std::sync::Arc; use std::time::Duration; -use log::{debug, warn}; -use parity_scale_codec::{Decode, Encode}; use futures::prelude::*; use futures_timer::Delay; +use log::{debug, warn}; +use parity_scale_codec::{Decode, Encode}; use parking_lot::RwLock; -use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; use std::marker::PhantomData; -use sc_client_api::{backend::Backend, utils::is_descendent_of}; -use sc_client::apply_aux; use finality_grandpa::{ - BlockNumberOps, Equivocation, Error as GrandpaError, round::State as RoundState, - voter, voter_set::VoterSet, + round::State as RoundState, voter, voter_set::VoterSet, BlockNumberOps, Equivocation, + Error as GrandpaError, }; +use sc_client::apply_aux; +use sc_client_api::{backend::Backend, utils::is_descendent_of}; +use sc_telemetry::{telemetry, CONSENSUS_INFO}; use sp_core::Pair; use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, One, Zero, -}; -use sc_telemetry::{telemetry, CONSENSUS_INFO}; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, One, Zero}; use crate::{ - CommandOrError, Commit, Config, Error, Precommit, Prevote, - PrimaryPropose, SignedMessage, NewAuthoritySet, VoterCommand, + CommandOrError, Commit, Config, Error, NewAuthoritySet, Precommit, Prevote, PrimaryPropose, + SignedMessage, VoterCommand, }; use sp_consensus::SelectChain; @@ -54,28 +52,28 @@ use crate::consensus_changes::SharedConsensusChanges; use crate::justification::GrandpaJustification; use crate::until_imported::UntilVoteTargetImported; use crate::voting_rule::VotingRule; -use sp_finality_grandpa::{AuthorityId, AuthoritySignature, SetId, RoundNumber}; -use prometheus_endpoint::{Gauge, U64, register, PrometheusError}; +use prometheus_endpoint::{register, Gauge, PrometheusError, U64}; +use sp_finality_grandpa::{AuthorityId, AuthoritySignature, RoundNumber, SetId}; type HistoricalVotes = finality_grandpa::HistoricalVotes< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, >; /// Data about a completed round. The set of votes that is stored must be /// minimal, i.e. at most one equivocation is stored per voter. #[derive(Debug, Clone, Decode, Encode, PartialEq)] pub struct CompletedRound { - /// The round number. - pub number: RoundNumber, - /// The round state (prevote ghost, estimate, finalized, etc.) - pub state: RoundState>, - /// The target block base used for voting in the round. - pub base: (Block::Hash, NumberFor), - /// All the votes observed in the round. - pub votes: Vec>, + /// The round number. + pub number: RoundNumber, + /// The round state (prevote ghost, estimate, finalized, etc.) + pub state: RoundState>, + /// The target block base used for voting in the round. + pub base: (Block::Hash, NumberFor), + /// All the votes observed in the round. + pub votes: Vec>, } // Data about last completed rounds within a single voter set. Stores @@ -83,9 +81,9 @@ pub struct CompletedRound { // (genesis). #[derive(Debug, Clone, PartialEq)] pub struct CompletedRounds { - rounds: Vec>, - set_id: SetId, - voters: Vec, + rounds: Vec>, + set_id: SetId, + voters: Vec, } // NOTE: the current strategy for persisting completed rounds is very naive @@ -94,74 +92,81 @@ pub struct CompletedRounds { const NUM_LAST_COMPLETED_ROUNDS: usize = 2; impl Encode for CompletedRounds { - fn encode(&self) -> Vec { - let v = Vec::from_iter(&self.rounds); - (&v, &self.set_id, &self.voters).encode() - } + fn encode(&self) -> Vec { + let v = Vec::from_iter(&self.rounds); + (&v, &self.set_id, &self.voters).encode() + } } impl parity_scale_codec::EncodeLike for CompletedRounds {} impl Decode for CompletedRounds { - fn decode(value: &mut I) -> Result { - <(Vec>, SetId, Vec)>::decode(value) - .map(|(rounds, set_id, voters)| CompletedRounds { - rounds: rounds.into(), - set_id, - voters, - }) - } + fn decode( + value: &mut I, + ) -> Result { + <(Vec>, SetId, Vec)>::decode(value).map( + |(rounds, set_id, voters)| CompletedRounds { + rounds: rounds.into(), + set_id, + voters, + }, + ) + } } impl CompletedRounds { - /// Create a new completed rounds tracker with NUM_LAST_COMPLETED_ROUNDS capacity. - pub(crate) fn new( - genesis: CompletedRound, - set_id: SetId, - voters: &AuthoritySet>, - ) - -> CompletedRounds - { - let mut rounds = Vec::with_capacity(NUM_LAST_COMPLETED_ROUNDS); - rounds.push(genesis); - - let voters = voters.current().1.iter().map(|(a, _)| a.clone()).collect(); - CompletedRounds { rounds, set_id, voters } - } - - /// Get the set-id and voter set of the completed rounds. - pub fn set_info(&self) -> (SetId, &[AuthorityId]) { - (self.set_id, &self.voters[..]) - } - - /// Iterate over all completed rounds. - pub fn iter(&self) -> impl Iterator> { - self.rounds.iter().rev() - } - - /// Returns the last (latest) completed round. - pub fn last(&self) -> &CompletedRound { - self.rounds.first() - .expect("inner is never empty; always contains at least genesis; qed") - } - - /// Push a new completed round, oldest round is evicted if number of rounds - /// is higher than `NUM_LAST_COMPLETED_ROUNDS`. - pub fn push(&mut self, completed_round: CompletedRound) { - use std::cmp::Reverse; - - match self.rounds.binary_search_by_key( - &Reverse(completed_round.number), - |completed_round| Reverse(completed_round.number), - ) { - Ok(idx) => self.rounds[idx] = completed_round, - Err(idx) => self.rounds.insert(idx, completed_round), - }; - - if self.rounds.len() > NUM_LAST_COMPLETED_ROUNDS { - self.rounds.pop(); - } - } + /// Create a new completed rounds tracker with NUM_LAST_COMPLETED_ROUNDS capacity. + pub(crate) fn new( + genesis: CompletedRound, + set_id: SetId, + voters: &AuthoritySet>, + ) -> CompletedRounds { + let mut rounds = Vec::with_capacity(NUM_LAST_COMPLETED_ROUNDS); + rounds.push(genesis); + + let voters = voters.current().1.iter().map(|(a, _)| a.clone()).collect(); + CompletedRounds { + rounds, + set_id, + voters, + } + } + + /// Get the set-id and voter set of the completed rounds. + pub fn set_info(&self) -> (SetId, &[AuthorityId]) { + (self.set_id, &self.voters[..]) + } + + /// Iterate over all completed rounds. + pub fn iter(&self) -> impl Iterator> { + self.rounds.iter().rev() + } + + /// Returns the last (latest) completed round. + pub fn last(&self) -> &CompletedRound { + self.rounds + .first() + .expect("inner is never empty; always contains at least genesis; qed") + } + + /// Push a new completed round, oldest round is evicted if number of rounds + /// is higher than `NUM_LAST_COMPLETED_ROUNDS`. + pub fn push(&mut self, completed_round: CompletedRound) { + use std::cmp::Reverse; + + match self + .rounds + .binary_search_by_key(&Reverse(completed_round.number), |completed_round| { + Reverse(completed_round.number) + }) { + Ok(idx) => self.rounds[idx] = completed_round, + Err(idx) => self.rounds.insert(idx, completed_round), + }; + + if self.rounds.len() > NUM_LAST_COMPLETED_ROUNDS { + self.rounds.pop(); + } + } } /// A map with voter status information for currently live rounds, @@ -175,788 +180,870 @@ pub type CurrentRounds = BTreeMap>; /// key). #[derive(Debug, Decode, Encode, PartialEq)] pub enum VoterSetState { - /// The voter is live, i.e. participating in rounds. - Live { - /// The previously completed rounds. - completed_rounds: CompletedRounds, - /// Voter status for the currently live rounds. - current_rounds: CurrentRounds, - }, - /// The voter is paused, i.e. not casting or importing any votes. - Paused { - /// The previously completed rounds. - completed_rounds: CompletedRounds, - }, + /// The voter is live, i.e. participating in rounds. + Live { + /// The previously completed rounds. + completed_rounds: CompletedRounds, + /// Voter status for the currently live rounds. + current_rounds: CurrentRounds, + }, + /// The voter is paused, i.e. not casting or importing any votes. + Paused { + /// The previously completed rounds. + completed_rounds: CompletedRounds, + }, } impl VoterSetState { - /// Create a new live VoterSetState with round 0 as a completed round using - /// the given genesis state and the given authorities. Round 1 is added as a - /// current round (with state `HasVoted::No`). - pub(crate) fn live( - set_id: SetId, - authority_set: &AuthoritySet>, - genesis_state: (Block::Hash, NumberFor), - ) -> VoterSetState { - let state = RoundState::genesis((genesis_state.0, genesis_state.1)); - let completed_rounds = CompletedRounds::new( - CompletedRound { - number: 0, - state, - base: (genesis_state.0, genesis_state.1), - votes: Vec::new(), - }, - set_id, - authority_set, - ); - - let mut current_rounds = CurrentRounds::new(); - current_rounds.insert(1, HasVoted::No); - - VoterSetState::Live { - completed_rounds, - current_rounds, - } - } - - /// Returns the last completed rounds. - pub(crate) fn completed_rounds(&self) -> CompletedRounds { - match self { - VoterSetState::Live { completed_rounds, .. } => - completed_rounds.clone(), - VoterSetState::Paused { completed_rounds } => - completed_rounds.clone(), - } - } - - /// Returns the last completed round. - pub(crate) fn last_completed_round(&self) -> CompletedRound { - match self { - VoterSetState::Live { completed_rounds, .. } => - completed_rounds.last().clone(), - VoterSetState::Paused { completed_rounds } => - completed_rounds.last().clone(), - } - } - - /// Returns the voter set state validating that it includes the given round - /// in current rounds and that the voter isn't paused. - pub fn with_current_round(&self, round: RoundNumber) - -> Result<(&CompletedRounds, &CurrentRounds), Error> - { - if let VoterSetState::Live { completed_rounds, current_rounds } = self { - if current_rounds.contains_key(&round) { - return Ok((completed_rounds, current_rounds)); - } else { - let msg = "Voter acting on a live round we are not tracking."; - return Err(Error::Safety(msg.to_string())); - } - } else { - let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); - } - } + /// Create a new live VoterSetState with round 0 as a completed round using + /// the given genesis state and the given authorities. Round 1 is added as a + /// current round (with state `HasVoted::No`). + pub(crate) fn live( + set_id: SetId, + authority_set: &AuthoritySet>, + genesis_state: (Block::Hash, NumberFor), + ) -> VoterSetState { + let state = RoundState::genesis((genesis_state.0, genesis_state.1)); + let completed_rounds = CompletedRounds::new( + CompletedRound { + number: 0, + state, + base: (genesis_state.0, genesis_state.1), + votes: Vec::new(), + }, + set_id, + authority_set, + ); + + let mut current_rounds = CurrentRounds::new(); + current_rounds.insert(1, HasVoted::No); + + VoterSetState::Live { + completed_rounds, + current_rounds, + } + } + + /// Returns the last completed rounds. + pub(crate) fn completed_rounds(&self) -> CompletedRounds { + match self { + VoterSetState::Live { + completed_rounds, .. + } => completed_rounds.clone(), + VoterSetState::Paused { completed_rounds } => completed_rounds.clone(), + } + } + + /// Returns the last completed round. + pub(crate) fn last_completed_round(&self) -> CompletedRound { + match self { + VoterSetState::Live { + completed_rounds, .. + } => completed_rounds.last().clone(), + VoterSetState::Paused { completed_rounds } => completed_rounds.last().clone(), + } + } + + /// Returns the voter set state validating that it includes the given round + /// in current rounds and that the voter isn't paused. + pub fn with_current_round( + &self, + round: RoundNumber, + ) -> Result<(&CompletedRounds, &CurrentRounds), Error> { + if let VoterSetState::Live { + completed_rounds, + current_rounds, + } = self + { + if current_rounds.contains_key(&round) { + return Ok((completed_rounds, current_rounds)); + } else { + let msg = "Voter acting on a live round we are not tracking."; + return Err(Error::Safety(msg.to_string())); + } + } else { + let msg = "Voter acting while in paused state."; + return Err(Error::Safety(msg.to_string())); + } + } } /// Whether we've voted already during a prior run of the program. #[derive(Clone, Debug, Decode, Encode, PartialEq)] pub enum HasVoted { - /// Has not voted already in this round. - No, - /// Has voted in this round. - Yes(AuthorityId, Vote), + /// Has not voted already in this round. + No, + /// Has voted in this round. + Yes(AuthorityId, Vote), } /// The votes cast by this voter already during a prior run of the program. #[derive(Debug, Clone, Decode, Encode, PartialEq)] pub enum Vote { - /// Has cast a proposal. - Propose(PrimaryPropose), - /// Has cast a prevote. - Prevote(Option>, Prevote), - /// Has cast a precommit (implies prevote.) - Precommit(Option>, Prevote, Precommit), + /// Has cast a proposal. + Propose(PrimaryPropose), + /// Has cast a prevote. + Prevote(Option>, Prevote), + /// Has cast a precommit (implies prevote.) + Precommit( + Option>, + Prevote, + Precommit, + ), } impl HasVoted { - /// Returns the proposal we should vote with (if any.) - pub fn propose(&self) -> Option<&PrimaryPropose> { - match self { - HasVoted::Yes(_, Vote::Propose(propose)) => - Some(propose), - HasVoted::Yes(_, Vote::Prevote(propose, _)) | HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => - propose.as_ref(), - _ => None, - } - } - - /// Returns the prevote we should vote with (if any.) - pub fn prevote(&self) -> Option<&Prevote> { - match self { - HasVoted::Yes(_, Vote::Prevote(_, prevote)) | HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => - Some(prevote), - _ => None, - } - } - - /// Returns the precommit we should vote with (if any.) - pub fn precommit(&self) -> Option<&Precommit> { - match self { - HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => - Some(precommit), - _ => None, - } - } - - /// Returns true if the voter can still propose, false otherwise. - pub fn can_propose(&self) -> bool { - self.propose().is_none() - } - - /// Returns true if the voter can still prevote, false otherwise. - pub fn can_prevote(&self) -> bool { - self.prevote().is_none() - } - - /// Returns true if the voter can still precommit, false otherwise. - pub fn can_precommit(&self) -> bool { - self.precommit().is_none() - } + /// Returns the proposal we should vote with (if any.) + pub fn propose(&self) -> Option<&PrimaryPropose> { + match self { + HasVoted::Yes(_, Vote::Propose(propose)) => Some(propose), + HasVoted::Yes(_, Vote::Prevote(propose, _)) + | HasVoted::Yes(_, Vote::Precommit(propose, _, _)) => propose.as_ref(), + _ => None, + } + } + + /// Returns the prevote we should vote with (if any.) + pub fn prevote(&self) -> Option<&Prevote> { + match self { + HasVoted::Yes(_, Vote::Prevote(_, prevote)) + | HasVoted::Yes(_, Vote::Precommit(_, prevote, _)) => Some(prevote), + _ => None, + } + } + + /// Returns the precommit we should vote with (if any.) + pub fn precommit(&self) -> Option<&Precommit> { + match self { + HasVoted::Yes(_, Vote::Precommit(_, _, precommit)) => Some(precommit), + _ => None, + } + } + + /// Returns true if the voter can still propose, false otherwise. + pub fn can_propose(&self) -> bool { + self.propose().is_none() + } + + /// Returns true if the voter can still prevote, false otherwise. + pub fn can_prevote(&self) -> bool { + self.prevote().is_none() + } + + /// Returns true if the voter can still precommit, false otherwise. + pub fn can_precommit(&self) -> bool { + self.precommit().is_none() + } } /// A voter set state meant to be shared safely across multiple owners. #[derive(Clone)] pub struct SharedVoterSetState { - inner: Arc>>, + inner: Arc>>, } impl From> for SharedVoterSetState { - fn from(set_state: VoterSetState) -> Self { - SharedVoterSetState::new(set_state) - } + fn from(set_state: VoterSetState) -> Self { + SharedVoterSetState::new(set_state) + } } impl SharedVoterSetState { - /// Create a new shared voter set tracker with the given state. - pub(crate) fn new(state: VoterSetState) -> Self { - SharedVoterSetState { inner: Arc::new(RwLock::new(state)) } - } - - /// Read the inner voter set state. - pub(crate) fn read(&self) -> parking_lot::RwLockReadGuard> { - self.inner.read() - } - - /// Return vote status information for the current round. - pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { - match &*self.inner.read() { - VoterSetState::Live { current_rounds, .. } => { - current_rounds.get(&round).and_then(|has_voted| match has_voted { - HasVoted::Yes(id, vote) => - Some(HasVoted::Yes(id.clone(), vote.clone())), - _ => None, - }) - .unwrap_or(HasVoted::No) - }, - _ => HasVoted::No, - } - } - - // NOTE: not exposed outside of this module intentionally. - fn with(&self, f: F) -> R - where F: FnOnce(&mut VoterSetState) -> R - { - f(&mut *self.inner.write()) - } + /// Create a new shared voter set tracker with the given state. + pub(crate) fn new(state: VoterSetState) -> Self { + SharedVoterSetState { + inner: Arc::new(RwLock::new(state)), + } + } + + /// Read the inner voter set state. + pub(crate) fn read(&self) -> parking_lot::RwLockReadGuard> { + self.inner.read() + } + + /// Return vote status information for the current round. + pub(crate) fn has_voted(&self, round: RoundNumber) -> HasVoted { + match &*self.inner.read() { + VoterSetState::Live { current_rounds, .. } => current_rounds + .get(&round) + .and_then(|has_voted| match has_voted { + HasVoted::Yes(id, vote) => Some(HasVoted::Yes(id.clone(), vote.clone())), + _ => None, + }) + .unwrap_or(HasVoted::No), + _ => HasVoted::No, + } + } + + // NOTE: not exposed outside of this module intentionally. + fn with(&self, f: F) -> R + where + F: FnOnce(&mut VoterSetState) -> R, + { + f(&mut *self.inner.write()) + } } /// Prometheus metrics for GRANDPA. #[derive(Clone)] pub(crate) struct Metrics { - finality_grandpa_round: Gauge, + finality_grandpa_round: Gauge, } impl Metrics { - pub(crate) fn register(registry: &prometheus_endpoint::Registry) -> Result { - Ok(Self { - finality_grandpa_round: register( - Gauge::new("finality_grandpa_round", "Highest completed GRANDPA round.")?, - registry - )?, - }) - } + pub(crate) fn register( + registry: &prometheus_endpoint::Registry, + ) -> Result { + Ok(Self { + finality_grandpa_round: register( + Gauge::new("finality_grandpa_round", "Highest completed GRANDPA round.")?, + registry, + )?, + }) + } } - /// The environment we run GRANDPA in. pub(crate) struct Environment, SC, VR> { - pub(crate) client: Arc, - pub(crate) select_chain: SC, - pub(crate) voters: Arc>, - pub(crate) config: Config, - pub(crate) authority_set: SharedAuthoritySet>, - pub(crate) consensus_changes: SharedConsensusChanges>, - pub(crate) network: crate::communication::NetworkBridge, - pub(crate) set_id: SetId, - pub(crate) voter_set_state: SharedVoterSetState, - pub(crate) voting_rule: VR, - pub(crate) metrics: Option, - pub(crate) _phantom: PhantomData, + pub(crate) client: Arc, + pub(crate) select_chain: SC, + pub(crate) voters: Arc>, + pub(crate) config: Config, + pub(crate) authority_set: SharedAuthoritySet>, + pub(crate) consensus_changes: SharedConsensusChanges>, + pub(crate) network: crate::communication::NetworkBridge, + pub(crate) set_id: SetId, + pub(crate) voter_set_state: SharedVoterSetState, + pub(crate) voting_rule: VR, + pub(crate) metrics: Option, + pub(crate) _phantom: PhantomData, } -impl, SC, VR> Environment { - /// Updates the voter set state using the given closure. The write lock is - /// held during evaluation of the closure and the environment's voter set - /// state is set to its result if successful. - pub(crate) fn update_voter_set_state(&self, f: F) -> Result<(), Error> where - F: FnOnce(&VoterSetState) -> Result>, Error> - { - self.voter_set_state.with(|voter_set_state| { - if let Some(set_state) = f(&voter_set_state)? { - *voter_set_state = set_state; - - if let Some(metrics) = self.metrics.as_ref() { - if let VoterSetState::Live { completed_rounds, .. } = voter_set_state { - let highest = completed_rounds.rounds.iter() - .map(|round| round.number) - .max() - .expect("There is always one completed round (genesis); qed"); - - metrics.finality_grandpa_round.set(highest); - } - } - } - Ok(()) - }) - } +impl, SC, VR> + Environment +{ + /// Updates the voter set state using the given closure. The write lock is + /// held during evaluation of the closure and the environment's voter set + /// state is set to its result if successful. + pub(crate) fn update_voter_set_state(&self, f: F) -> Result<(), Error> + where + F: FnOnce(&VoterSetState) -> Result>, Error>, + { + self.voter_set_state.with(|voter_set_state| { + if let Some(set_state) = f(&voter_set_state)? { + *voter_set_state = set_state; + + if let Some(metrics) = self.metrics.as_ref() { + if let VoterSetState::Live { + completed_rounds, .. + } = voter_set_state + { + let highest = completed_rounds + .rounds + .iter() + .map(|round| round.number) + .max() + .expect("There is always one completed round (genesis); qed"); + + metrics.finality_grandpa_round.set(highest); + } + } + } + Ok(()) + }) + } } -impl - finality_grandpa::Chain> -for Environment +impl finality_grandpa::Chain> + for Environment where - Block: 'static, - BE: Backend, - C: crate::ClientForGrandpa, - N: NetworkT + 'static + Send, - SC: SelectChain + 'static, - VR: VotingRule, - NumberFor: BlockNumberOps, + Block: 'static, + BE: Backend, + C: crate::ClientForGrandpa, + N: NetworkT + 'static + Send, + SC: SelectChain + 'static, + VR: VotingRule, + NumberFor: BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { - ancestry(&self.client, base, block) - } - - fn best_chain_containing(&self, block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - // NOTE: when we finalize an authority set change through the sync protocol the voter is - // signaled asynchronously. therefore the voter could still vote in the next round - // before activating the new set. the `authority_set` is updated immediately thus we - // restrict the voter based on that. - if self.set_id != self.authority_set.inner().read().current().0 { - return None; - } - - let base_header = match self.client.header(BlockId::Hash(block)).ok()? { - Some(h) => h, - None => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find base block", block); - return None; - } - }; - - // we refuse to vote beyond the current limit number where transitions are scheduled to - // occur. - // once blocks are finalized that make that transition irrelevant or activate it, - // we will proceed onwards. most of the time there will be no pending transition. - // the limit, if any, is guaranteed to be higher than or equal to the given base number. - let limit = self.authority_set.current_limit(*base_header.number()); - debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); - - match self.select_chain.finality_target(block, None) { - Ok(Some(best_hash)) => { - let best_header = self.client.header(BlockId::Hash(best_hash)).ok()? - .expect("Header known to exist after `finality_target` call; qed"); - - // check if our vote is currently being limited due to a pending change - let limit = limit.filter(|limit| limit < best_header.number()); - let target; - - let target_header = if let Some(target_number) = limit { - let mut target_header = best_header.clone(); - - // walk backwards until we find the target block - loop { - if *target_header.number() < target_number { - unreachable!( - "we are traversing backwards from a known block; \ + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { + ancestry(&self.client, base, block) + } + + fn best_chain_containing(&self, block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { + // NOTE: when we finalize an authority set change through the sync protocol the voter is + // signaled asynchronously. therefore the voter could still vote in the next round + // before activating the new set. the `authority_set` is updated immediately thus we + // restrict the voter based on that. + if self.set_id != self.authority_set.inner().read().current().0 { + return None; + } + + let base_header = match self.client.header(BlockId::Hash(block)).ok()? { + Some(h) => h, + None => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find base block", block); + return None; + } + }; + + // we refuse to vote beyond the current limit number where transitions are scheduled to + // occur. + // once blocks are finalized that make that transition irrelevant or activate it, + // we will proceed onwards. most of the time there will be no pending transition. + // the limit, if any, is guaranteed to be higher than or equal to the given base number. + let limit = self.authority_set.current_limit(*base_header.number()); + debug!(target: "afg", "Finding best chain containing block {:?} with number limit {:?}", block, limit); + + match self.select_chain.finality_target(block, None) { + Ok(Some(best_hash)) => { + let best_header = self + .client + .header(BlockId::Hash(best_hash)) + .ok()? + .expect("Header known to exist after `finality_target` call; qed"); + + // check if our vote is currently being limited due to a pending change + let limit = limit.filter(|limit| limit < best_header.number()); + let target; + + let target_header = if let Some(target_number) = limit { + let mut target_header = best_header.clone(); + + // walk backwards until we find the target block + loop { + if *target_header.number() < target_number { + unreachable!( + "we are traversing backwards from a known block; \ blocks are stored contiguously; \ qed" - ); - } - - if *target_header.number() == target_number { - break; - } - - target_header = self.client.header(BlockId::Hash(*target_header.parent_hash())).ok()? - .expect("Header known to exist after `finality_target` call; qed"); - } - - target = target_header; - &target - } else { - // otherwise just use the given best as the target - &best_header - }; - - // restrict vote according to the given voting rule, if the - // voting rule doesn't restrict the vote then we keep the - // previous target. - // - // note that we pass the original `best_header`, i.e. before the - // authority set limit filter, which can be considered a - // mandatory/implicit voting rule. - // - // we also make sure that the restricted vote is higher than the - // round base (i.e. last finalized), otherwise the value - // returned by the given voting rule is ignored and the original - // target is used instead. - self.voting_rule - .restrict_vote(&*self.client, &base_header, &best_header, target_header) - .filter(|(_, restricted_number)| { - // we can only restrict votes within the interval [base, target] - restricted_number >= base_header.number() && - restricted_number < target_header.number() - }) - .or(Some((target_header.hash(), *target_header.number()))) - }, - Ok(None) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); - None - } - Err(e) => { - debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); - None - } - } - } + ); + } + + if *target_header.number() == target_number { + break; + } + + target_header = self + .client + .header(BlockId::Hash(*target_header.parent_hash())) + .ok()? + .expect("Header known to exist after `finality_target` call; qed"); + } + + target = target_header; + &target + } else { + // otherwise just use the given best as the target + &best_header + }; + + // restrict vote according to the given voting rule, if the + // voting rule doesn't restrict the vote then we keep the + // previous target. + // + // note that we pass the original `best_header`, i.e. before the + // authority set limit filter, which can be considered a + // mandatory/implicit voting rule. + // + // we also make sure that the restricted vote is higher than the + // round base (i.e. last finalized), otherwise the value + // returned by the given voting rule is ignored and the original + // target is used instead. + self.voting_rule + .restrict_vote(&*self.client, &base_header, &best_header, target_header) + .filter(|(_, restricted_number)| { + // we can only restrict votes within the interval [base, target] + restricted_number >= base_header.number() + && restricted_number < target_header.number() + }) + .or(Some((target_header.hash(), *target_header.number()))) + } + Ok(None) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: couldn't find target block", block); + None + } + Err(e) => { + debug!(target: "afg", "Encountered error finding best chain containing {:?}: {:?}", block, e); + None + } + } + } } - pub(crate) fn ancestry( - client: &Arc, - base: Block::Hash, - block: Block::Hash, -) -> Result, GrandpaError> where - Client: HeaderMetadata, + client: &Arc, + base: Block::Hash, + block: Block::Hash, +) -> Result, GrandpaError> +where + Client: HeaderMetadata, { - if base == block { return Err(GrandpaError::NotDescendent) } + if base == block { + return Err(GrandpaError::NotDescendent); + } - let tree_route_res = sp_blockchain::tree_route(&**client, block, base); + let tree_route_res = sp_blockchain::tree_route(&**client, block, base); - let tree_route = match tree_route_res { - Ok(tree_route) => tree_route, - Err(e) => { - debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {:?}", + let tree_route = match tree_route_res { + Ok(tree_route) => tree_route, + Err(e) => { + debug!(target: "afg", "Encountered error computing ancestry between block {:?} and base {:?}: {:?}", block, base, e); - return Err(GrandpaError::NotDescendent); - } - }; - - if tree_route.common_block().hash != base { - return Err(GrandpaError::NotDescendent); - } - - // skip one because our ancestry is meant to start from the parent of `block`, - // and `tree_route` includes it. - Ok(tree_route.retracted().iter().skip(1).map(|e| e.hash).collect()) + return Err(GrandpaError::NotDescendent); + } + }; + + if tree_route.common_block().hash != base { + return Err(GrandpaError::NotDescendent); + } + + // skip one because our ancestry is meant to start from the parent of `block`, + // and `tree_route` includes it. + Ok(tree_route + .retracted() + .iter() + .skip(1) + .map(|e| e.hash) + .collect()) } -impl - voter::Environment> -for Environment +impl voter::Environment> + for Environment where - Block: 'static, - B: Backend, - C: crate::ClientForGrandpa + 'static, - N: NetworkT + 'static + Send, - SC: SelectChain + 'static, - VR: VotingRule, - NumberFor: BlockNumberOps, + Block: 'static, + B: Backend, + C: crate::ClientForGrandpa + 'static, + N: NetworkT + 'static + Send, + SC: SelectChain + 'static, + VR: VotingRule, + NumberFor: BlockNumberOps, { - type Timer = Pin> + Send>>; - type Id = AuthorityId; - type Signature = AuthoritySignature; - - // regular round message streams - type In = Pin, Self::Signature, Self::Id>, Self::Error> - > + Send>>; - type Out = Pin>, - Error = Self::Error, - > + Send>>; - - type Error = CommandOrError>; - - fn round_data( - &self, - round: RoundNumber, - ) -> voter::RoundData { - let prevote_timer = Delay::new(self.config.gossip_duration * 2); - let precommit_timer = Delay::new(self.config.gossip_duration * 4); - - let local_key = crate::is_voter(&self.voters, &self.config.keystore); - - let has_voted = match self.voter_set_state.has_voted(round) { - HasVoted::Yes(id, vote) => { - if local_key.as_ref().map(|k| k.public() == id).unwrap_or(false) { - HasVoted::Yes(id, vote) - } else { - HasVoted::No - } - }, - HasVoted::No => HasVoted::No, - }; - - let (incoming, outgoing) = self.network.round_communication( - crate::communication::Round(round), - crate::communication::SetId(self.set_id), - self.voters.clone(), - local_key.clone(), - has_voted, - ); - - // schedule incoming messages from the network to be held until - // corresponding blocks are imported. - let incoming = Box::pin(UntilVoteTargetImported::new( - self.client.import_notification_stream(), - self.network.clone(), - self.client.clone(), - incoming, - "round", - None, - ).map_err(Into::into)); - - // schedule network message cleanup when sink drops. - let outgoing = Box::pin(outgoing.sink_err_into()); - - voter::RoundData { - voter_id: local_key.map(|pair| pair.public()), - prevote_timer: Box::pin(prevote_timer.map(Ok)), - precommit_timer: Box::pin(precommit_timer.map(Ok)), - incoming, - outgoing, - } - } - - fn proposed(&self, round: RoundNumber, propose: PrimaryPropose) -> Result<(), Self::Error> { - let local_id = crate::is_voter(&self.voters, &self.config.keystore); - - let local_id = match local_id { - Some(id) => id.public(), - None => return Ok(()), - }; - - self.update_voter_set_state(|voter_set_state| { - let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; - let current_round = current_rounds.get(&round) - .expect("checked in with_current_round that key exists; qed."); - - if !current_round.can_propose() { - // we've already proposed in this round (in a previous run), - // ignore the given vote and don't update the voter set - // state - return Ok(None); - } - - let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) - .expect("checked previously that key exists; qed."); - - *current_round = HasVoted::Yes(local_id, Vote::Propose(propose)); - - let set_state = VoterSetState::::Live { - completed_rounds: completed_rounds.clone(), - current_rounds, - }; - - crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; - - Ok(Some(set_state)) - })?; - - Ok(()) - } - - fn prevoted(&self, round: RoundNumber, prevote: Prevote) -> Result<(), Self::Error> { - let local_id = crate::is_voter(&self.voters, &self.config.keystore); - - let local_id = match local_id { - Some(id) => id.public(), - None => return Ok(()), - }; - - self.update_voter_set_state(|voter_set_state| { - let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; - let current_round = current_rounds.get(&round) - .expect("checked in with_current_round that key exists; qed."); - - if !current_round.can_prevote() { - // we've already prevoted in this round (in a previous run), - // ignore the given vote and don't update the voter set - // state - return Ok(None); - } - - let propose = current_round.propose(); - - let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) - .expect("checked previously that key exists; qed."); - - *current_round = HasVoted::Yes(local_id, Vote::Prevote(propose.cloned(), prevote)); - - let set_state = VoterSetState::::Live { - completed_rounds: completed_rounds.clone(), - current_rounds, - }; - - crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; - - Ok(Some(set_state)) - })?; - - Ok(()) - } - - fn precommitted(&self, round: RoundNumber, precommit: Precommit) -> Result<(), Self::Error> { - let local_id = crate::is_voter(&self.voters, &self.config.keystore); - - let local_id = match local_id { - Some(id) => id.public(), - None => return Ok(()), - }; - - self.update_voter_set_state(|voter_set_state| { - let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; - let current_round = current_rounds.get(&round) - .expect("checked in with_current_round that key exists; qed."); - - if !current_round.can_precommit() { - // we've already precommitted in this round (in a previous run), - // ignore the given vote and don't update the voter set - // state - return Ok(None); - } - - let propose = current_round.propose(); - let prevote = match current_round { - HasVoted::Yes(_, Vote::Prevote(_, prevote)) => prevote, - _ => { - let msg = "Voter precommitting before prevoting."; - return Err(Error::Safety(msg.to_string())); - }, - }; - - let mut current_rounds = current_rounds.clone(); - let current_round = current_rounds.get_mut(&round) - .expect("checked previously that key exists; qed."); - - *current_round = HasVoted::Yes( - local_id, - Vote::Precommit(propose.cloned(), prevote.clone(), precommit), - ); - - let set_state = VoterSetState::::Live { - completed_rounds: completed_rounds.clone(), - current_rounds, - }; - - crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; - - Ok(Some(set_state)) - })?; - - Ok(()) - } - - fn completed( - &self, - round: RoundNumber, - state: RoundState>, - base: (Block::Hash, NumberFor), - historical_votes: &HistoricalVotes, - ) -> Result<(), Self::Error> { - debug!( - target: "afg", "Voter {} completed round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", - self.config.name(), - round, - self.set_id, - state.estimate.as_ref().map(|e| e.1), - state.finalized.as_ref().map(|e| e.1), - ); - - self.update_voter_set_state(|voter_set_state| { - // NOTE: we don't use `with_current_round` here, it is possible that - // we are not currently tracking this round if it is a round we - // caught up to. - let (completed_rounds, current_rounds) = - if let VoterSetState::Live { completed_rounds, current_rounds } = voter_set_state { - (completed_rounds, current_rounds) - } else { - let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); - }; - - let mut completed_rounds = completed_rounds.clone(); - - // TODO: Future integration will store the prevote and precommit index. See #2611. - let votes = historical_votes.seen().to_vec(); - - completed_rounds.push(CompletedRound { - number: round, - state: state.clone(), - base, - votes, - }); - - // remove the round from live rounds and start tracking the next round - let mut current_rounds = current_rounds.clone(); - current_rounds.remove(&round); - current_rounds.insert(round + 1, HasVoted::No); - - let set_state = VoterSetState::::Live { - completed_rounds, - current_rounds, - }; - - crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; - - Ok(Some(set_state)) - })?; - - Ok(()) - } - - fn concluded( - &self, - round: RoundNumber, - state: RoundState>, - _base: (Block::Hash, NumberFor), - historical_votes: &HistoricalVotes, - ) -> Result<(), Self::Error> { - debug!( - target: "afg", "Voter {} concluded round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", - self.config.name(), - round, - self.set_id, - state.estimate.as_ref().map(|e| e.1), - state.finalized.as_ref().map(|e| e.1), - ); - - self.update_voter_set_state(|voter_set_state| { - // NOTE: we don't use `with_current_round` here, because a concluded - // round is completed and cannot be current. - let (completed_rounds, current_rounds) = - if let VoterSetState::Live { completed_rounds, current_rounds } = voter_set_state { - (completed_rounds, current_rounds) - } else { - let msg = "Voter acting while in paused state."; - return Err(Error::Safety(msg.to_string())); - }; - - let mut completed_rounds = completed_rounds.clone(); - - if let Some(already_completed) = completed_rounds.rounds - .iter_mut().find(|r| r.number == round) - { - let n_existing_votes = already_completed.votes.len(); - - // the interface of Environment guarantees that the previous `historical_votes` - // from `completable` is a prefix of what is passed to `concluded`. - already_completed.votes.extend( - historical_votes.seen().iter().skip(n_existing_votes).cloned() - ); - already_completed.state = state; - crate::aux_schema::write_concluded_round(&*self.client, &already_completed)?; - } - - let set_state = VoterSetState::::Live { - completed_rounds, - current_rounds: current_rounds.clone(), - }; - - crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; - - Ok(Some(set_state)) - })?; - - Ok(()) - } - - fn finalize_block( - &self, - hash: Block::Hash, - number: NumberFor, - round: RoundNumber, - commit: Commit, - ) -> Result<(), Self::Error> { - finalize_block( - self.client.clone(), - &self.authority_set, - &self.consensus_changes, - Some(self.config.justification_period.into()), - hash, - number, - (round, commit).into(), - false, - ) - } - - fn round_commit_timer(&self) -> Self::Timer { - use rand::{thread_rng, Rng}; - - //random between 0-1 seconds. - let delay: u64 = thread_rng().gen_range(0, 1000); - Box::pin(Delay::new(Duration::from_millis(delay)).map(Ok)) - } - - fn prevote_equivocation( - &self, - _round: RoundNumber, - equivocation: ::finality_grandpa::Equivocation, Self::Signature> - ) { - warn!(target: "afg", "Detected prevote equivocation in the finality worker: {:?}", equivocation); - // nothing yet; this could craft misbehavior reports of some kind. - } - - fn precommit_equivocation( - &self, - _round: RoundNumber, - equivocation: Equivocation, Self::Signature> - ) { - warn!(target: "afg", "Detected precommit equivocation in the finality worker: {:?}", equivocation); - // nothing yet - } + type Timer = Pin> + Send>>; + type Id = AuthorityId; + type Signature = AuthoritySignature; + + // regular round message streams + type In = Pin< + Box< + dyn Stream< + Item = Result< + ::finality_grandpa::SignedMessage< + Block::Hash, + NumberFor, + Self::Signature, + Self::Id, + >, + Self::Error, + >, + > + Send, + >, + >; + type Out = Pin< + Box< + dyn Sink< + ::finality_grandpa::Message>, + Error = Self::Error, + > + Send, + >, + >; + + type Error = CommandOrError>; + + fn round_data( + &self, + round: RoundNumber, + ) -> voter::RoundData { + let prevote_timer = Delay::new(self.config.gossip_duration * 2); + let precommit_timer = Delay::new(self.config.gossip_duration * 4); + + let local_key = crate::is_voter(&self.voters, &self.config.keystore); + + let has_voted = match self.voter_set_state.has_voted(round) { + HasVoted::Yes(id, vote) => { + if local_key + .as_ref() + .map(|k| k.public() == id) + .unwrap_or(false) + { + HasVoted::Yes(id, vote) + } else { + HasVoted::No + } + } + HasVoted::No => HasVoted::No, + }; + + let (incoming, outgoing) = self.network.round_communication( + crate::communication::Round(round), + crate::communication::SetId(self.set_id), + self.voters.clone(), + local_key.clone(), + has_voted, + ); + + // schedule incoming messages from the network to be held until + // corresponding blocks are imported. + let incoming = Box::pin( + UntilVoteTargetImported::new( + self.client.import_notification_stream(), + self.network.clone(), + self.client.clone(), + incoming, + "round", + None, + ) + .map_err(Into::into), + ); + + // schedule network message cleanup when sink drops. + let outgoing = Box::pin(outgoing.sink_err_into()); + + voter::RoundData { + voter_id: local_key.map(|pair| pair.public()), + prevote_timer: Box::pin(prevote_timer.map(Ok)), + precommit_timer: Box::pin(precommit_timer.map(Ok)), + incoming, + outgoing, + } + } + + fn proposed( + &self, + round: RoundNumber, + propose: PrimaryPropose, + ) -> Result<(), Self::Error> { + let local_id = crate::is_voter(&self.voters, &self.config.keystore); + + let local_id = match local_id { + Some(id) => id.public(), + None => return Ok(()), + }; + + self.update_voter_set_state(|voter_set_state| { + let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; + let current_round = current_rounds + .get(&round) + .expect("checked in with_current_round that key exists; qed."); + + if !current_round.can_propose() { + // we've already proposed in this round (in a previous run), + // ignore the given vote and don't update the voter set + // state + return Ok(None); + } + + let mut current_rounds = current_rounds.clone(); + let current_round = current_rounds + .get_mut(&round) + .expect("checked previously that key exists; qed."); + + *current_round = HasVoted::Yes(local_id, Vote::Propose(propose)); + + let set_state = VoterSetState::::Live { + completed_rounds: completed_rounds.clone(), + current_rounds, + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn prevoted(&self, round: RoundNumber, prevote: Prevote) -> Result<(), Self::Error> { + let local_id = crate::is_voter(&self.voters, &self.config.keystore); + + let local_id = match local_id { + Some(id) => id.public(), + None => return Ok(()), + }; + + self.update_voter_set_state(|voter_set_state| { + let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; + let current_round = current_rounds + .get(&round) + .expect("checked in with_current_round that key exists; qed."); + + if !current_round.can_prevote() { + // we've already prevoted in this round (in a previous run), + // ignore the given vote and don't update the voter set + // state + return Ok(None); + } + + let propose = current_round.propose(); + + let mut current_rounds = current_rounds.clone(); + let current_round = current_rounds + .get_mut(&round) + .expect("checked previously that key exists; qed."); + + *current_round = HasVoted::Yes(local_id, Vote::Prevote(propose.cloned(), prevote)); + + let set_state = VoterSetState::::Live { + completed_rounds: completed_rounds.clone(), + current_rounds, + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn precommitted( + &self, + round: RoundNumber, + precommit: Precommit, + ) -> Result<(), Self::Error> { + let local_id = crate::is_voter(&self.voters, &self.config.keystore); + + let local_id = match local_id { + Some(id) => id.public(), + None => return Ok(()), + }; + + self.update_voter_set_state(|voter_set_state| { + let (completed_rounds, current_rounds) = voter_set_state.with_current_round(round)?; + let current_round = current_rounds + .get(&round) + .expect("checked in with_current_round that key exists; qed."); + + if !current_round.can_precommit() { + // we've already precommitted in this round (in a previous run), + // ignore the given vote and don't update the voter set + // state + return Ok(None); + } + + let propose = current_round.propose(); + let prevote = match current_round { + HasVoted::Yes(_, Vote::Prevote(_, prevote)) => prevote, + _ => { + let msg = "Voter precommitting before prevoting."; + return Err(Error::Safety(msg.to_string())); + } + }; + + let mut current_rounds = current_rounds.clone(); + let current_round = current_rounds + .get_mut(&round) + .expect("checked previously that key exists; qed."); + + *current_round = HasVoted::Yes( + local_id, + Vote::Precommit(propose.cloned(), prevote.clone(), precommit), + ); + + let set_state = VoterSetState::::Live { + completed_rounds: completed_rounds.clone(), + current_rounds, + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn completed( + &self, + round: RoundNumber, + state: RoundState>, + base: (Block::Hash, NumberFor), + historical_votes: &HistoricalVotes, + ) -> Result<(), Self::Error> { + debug!( + target: "afg", "Voter {} completed round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", + self.config.name(), + round, + self.set_id, + state.estimate.as_ref().map(|e| e.1), + state.finalized.as_ref().map(|e| e.1), + ); + + self.update_voter_set_state(|voter_set_state| { + // NOTE: we don't use `with_current_round` here, it is possible that + // we are not currently tracking this round if it is a round we + // caught up to. + let (completed_rounds, current_rounds) = if let VoterSetState::Live { + completed_rounds, + current_rounds, + } = voter_set_state + { + (completed_rounds, current_rounds) + } else { + let msg = "Voter acting while in paused state."; + return Err(Error::Safety(msg.to_string())); + }; + + let mut completed_rounds = completed_rounds.clone(); + + // TODO: Future integration will store the prevote and precommit index. See #2611. + let votes = historical_votes.seen().to_vec(); + + completed_rounds.push(CompletedRound { + number: round, + state: state.clone(), + base, + votes, + }); + + // remove the round from live rounds and start tracking the next round + let mut current_rounds = current_rounds.clone(); + current_rounds.remove(&round); + current_rounds.insert(round + 1, HasVoted::No); + + let set_state = VoterSetState::::Live { + completed_rounds, + current_rounds, + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn concluded( + &self, + round: RoundNumber, + state: RoundState>, + _base: (Block::Hash, NumberFor), + historical_votes: &HistoricalVotes, + ) -> Result<(), Self::Error> { + debug!( + target: "afg", "Voter {} concluded round {} in set {}. Estimate = {:?}, Finalized in round = {:?}", + self.config.name(), + round, + self.set_id, + state.estimate.as_ref().map(|e| e.1), + state.finalized.as_ref().map(|e| e.1), + ); + + self.update_voter_set_state(|voter_set_state| { + // NOTE: we don't use `with_current_round` here, because a concluded + // round is completed and cannot be current. + let (completed_rounds, current_rounds) = if let VoterSetState::Live { + completed_rounds, + current_rounds, + } = voter_set_state + { + (completed_rounds, current_rounds) + } else { + let msg = "Voter acting while in paused state."; + return Err(Error::Safety(msg.to_string())); + }; + + let mut completed_rounds = completed_rounds.clone(); + + if let Some(already_completed) = completed_rounds + .rounds + .iter_mut() + .find(|r| r.number == round) + { + let n_existing_votes = already_completed.votes.len(); + + // the interface of Environment guarantees that the previous `historical_votes` + // from `completable` is a prefix of what is passed to `concluded`. + already_completed.votes.extend( + historical_votes + .seen() + .iter() + .skip(n_existing_votes) + .cloned(), + ); + already_completed.state = state; + crate::aux_schema::write_concluded_round(&*self.client, &already_completed)?; + } + + let set_state = VoterSetState::::Live { + completed_rounds, + current_rounds: current_rounds.clone(), + }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + Ok(Some(set_state)) + })?; + + Ok(()) + } + + fn finalize_block( + &self, + hash: Block::Hash, + number: NumberFor, + round: RoundNumber, + commit: Commit, + ) -> Result<(), Self::Error> { + finalize_block( + self.client.clone(), + &self.authority_set, + &self.consensus_changes, + Some(self.config.justification_period.into()), + hash, + number, + (round, commit).into(), + false, + ) + } + + fn round_commit_timer(&self) -> Self::Timer { + use rand::{thread_rng, Rng}; + + //random between 0-1 seconds. + let delay: u64 = thread_rng().gen_range(0, 1000); + Box::pin(Delay::new(Duration::from_millis(delay)).map(Ok)) + } + + fn prevote_equivocation( + &self, + _round: RoundNumber, + equivocation: ::finality_grandpa::Equivocation, Self::Signature>, + ) { + warn!(target: "afg", "Detected prevote equivocation in the finality worker: {:?}", equivocation); + // nothing yet; this could craft misbehavior reports of some kind. + } + + fn precommit_equivocation( + &self, + _round: RoundNumber, + equivocation: Equivocation, Self::Signature>, + ) { + warn!(target: "afg", "Detected precommit equivocation in the finality worker: {:?}", equivocation); + // nothing yet + } } pub(crate) enum JustificationOrCommit { - Justification(GrandpaJustification), - Commit((RoundNumber, Commit)), + Justification(GrandpaJustification), + Commit((RoundNumber, Commit)), } impl From<(RoundNumber, Commit)> for JustificationOrCommit { - fn from(commit: (RoundNumber, Commit)) -> JustificationOrCommit { - JustificationOrCommit::Commit(commit) - } + fn from(commit: (RoundNumber, Commit)) -> JustificationOrCommit { + JustificationOrCommit::Commit(commit) + } } impl From> for JustificationOrCommit { - fn from(justification: GrandpaJustification) -> JustificationOrCommit { - JustificationOrCommit::Justification(justification) - } + fn from(justification: GrandpaJustification) -> JustificationOrCommit { + JustificationOrCommit::Justification(justification) + } } /// Finalize the given block and apply any authority set changes. If an @@ -964,237 +1051,240 @@ impl From> for JustificationOrCommit< /// given) and stored with the block when finalizing it. /// This method assumes that the block being finalized has already been imported. pub(crate) fn finalize_block( - client: Arc, - authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, - justification_period: Option>, - hash: Block::Hash, - number: NumberFor, - justification_or_commit: JustificationOrCommit, - initial_sync: bool, -) -> Result<(), CommandOrError>> where - Block: BlockT, - BE: Backend, - Client: crate::ClientForGrandpa, + client: Arc, + authority_set: &SharedAuthoritySet>, + consensus_changes: &SharedConsensusChanges>, + justification_period: Option>, + hash: Block::Hash, + number: NumberFor, + justification_or_commit: JustificationOrCommit, + initial_sync: bool, +) -> Result<(), CommandOrError>> +where + Block: BlockT, + BE: Backend, + Client: crate::ClientForGrandpa, { - // NOTE: lock must be held through writing to DB to avoid race. this lock - // also implicitly synchronizes the check for last finalized number - // below. - let mut authority_set = authority_set.inner().write(); - - let status = client.info(); - if number <= status.finalized_number && client.hash(number)? == Some(hash) { - // This can happen after a forced change (triggered by the finality tracker when finality is stalled), since - // the voter will be restarted at the median last finalized block, which can be lower than the local best - // finalized block. - warn!(target: "afg", "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}", - hash, - number, - status.finalized_number, - ); - - return Ok(()); - } - - // FIXME #1483: clone only when changed - let old_authority_set = authority_set.clone(); - // holds the old consensus changes in case it is changed below, needed for - // reverting in case of failure - let mut old_consensus_changes = None; - - let mut consensus_changes = consensus_changes.lock(); - let canon_at_height = |canon_number| { - // "true" because the block is finalized - canonical_at_height(&*client, (hash, number), true, canon_number) - }; - - let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { - let status = authority_set.apply_standard_changes( - hash, - number, - &is_descendent_of::(&*client, None), - initial_sync, - ).map_err(|e| Error::Safety(e.to_string()))?; - - // check if this is this is the first finalization of some consensus changes - let (alters_consensus_changes, finalizes_consensus_changes) = consensus_changes - .finalize((number, hash), &canon_at_height)?; - - if alters_consensus_changes { - old_consensus_changes = Some(consensus_changes.clone()); - - let write_result = crate::aux_schema::update_consensus_changes( - &*consensus_changes, - |insert| apply_aux(import_op, insert, &[]), - ); - - if let Err(e) = write_result { - warn!(target: "afg", "Failed to write updated consensus changes to disk. Bailing."); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - - return Err(e.into()); - } - } - - // NOTE: this code assumes that honest voters will never vote past a - // transition block, thus we don't have to worry about the case where - // we have a transition with `effective_block = N`, but we finalize - // `N+1`. this assumption is required to make sure we store - // justifications for transition blocks which will be requested by - // syncing clients. - let justification = match justification_or_commit { - JustificationOrCommit::Justification(justification) => Some(justification.encode()), - JustificationOrCommit::Commit((round_number, commit)) => { - let mut justification_required = + // NOTE: lock must be held through writing to DB to avoid race. this lock + // also implicitly synchronizes the check for last finalized number + // below. + let mut authority_set = authority_set.inner().write(); + + let status = client.info(); + if number <= status.finalized_number && client.hash(number)? == Some(hash) { + // This can happen after a forced change (triggered by the finality tracker when finality is stalled), since + // the voter will be restarted at the median last finalized block, which can be lower than the local best + // finalized block. + warn!(target: "afg", "Re-finalized block #{:?} ({:?}) in the canonical chain, current best finalized is #{:?}", + hash, + number, + status.finalized_number, + ); + + return Ok(()); + } + + // FIXME #1483: clone only when changed + let old_authority_set = authority_set.clone(); + // holds the old consensus changes in case it is changed below, needed for + // reverting in case of failure + let mut old_consensus_changes = None; + + let mut consensus_changes = consensus_changes.lock(); + let canon_at_height = |canon_number| { + // "true" because the block is finalized + canonical_at_height(&*client, (hash, number), true, canon_number) + }; + + let update_res: Result<_, Error> = client.lock_import_and_run(|import_op| { + let status = authority_set + .apply_standard_changes( + hash, + number, + &is_descendent_of::(&*client, None), + initial_sync, + ) + .map_err(|e| Error::Safety(e.to_string()))?; + + // check if this is this is the first finalization of some consensus changes + let (alters_consensus_changes, finalizes_consensus_changes) = + consensus_changes.finalize((number, hash), &canon_at_height)?; + + if alters_consensus_changes { + old_consensus_changes = Some(consensus_changes.clone()); + + let write_result = + crate::aux_schema::update_consensus_changes(&*consensus_changes, |insert| { + apply_aux(import_op, insert, &[]) + }); + + if let Err(e) = write_result { + warn!(target: "afg", "Failed to write updated consensus changes to disk. Bailing."); + warn!(target: "afg", "Node is in a potentially inconsistent state."); + + return Err(e.into()); + } + } + + // NOTE: this code assumes that honest voters will never vote past a + // transition block, thus we don't have to worry about the case where + // we have a transition with `effective_block = N`, but we finalize + // `N+1`. this assumption is required to make sure we store + // justifications for transition blocks which will be requested by + // syncing clients. + let justification = match justification_or_commit { + JustificationOrCommit::Justification(justification) => Some(justification.encode()), + JustificationOrCommit::Commit((round_number, commit)) => { + let mut justification_required = // justification is always required when block that enacts new authorities // set is finalized status.new_set_block.is_some() || // justification is required when consensus changes are finalized finalizes_consensus_changes; - // justification is required every N blocks to be able to prove blocks - // finalization to remote nodes - if !justification_required { - if let Some(justification_period) = justification_period { - let last_finalized_number = client.info().finalized_number; - justification_required = - (!last_finalized_number.is_zero() || number - last_finalized_number == justification_period) && - (last_finalized_number / justification_period != number / justification_period); - } - } - - if justification_required { - let justification = GrandpaJustification::from_commit( - &client, - round_number, - commit, - )?; - - Some(justification.encode()) - } else { - None - } - }, - }; - - debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); - - // ideally some handle to a synchronization oracle would be used - // to avoid unconditionally notifying. - client.apply_finality(import_op, BlockId::Hash(hash), justification, true).map_err(|e| { + // justification is required every N blocks to be able to prove blocks + // finalization to remote nodes + if !justification_required { + if let Some(justification_period) = justification_period { + let last_finalized_number = client.info().finalized_number; + justification_required = (!last_finalized_number.is_zero() + || number - last_finalized_number == justification_period) + && (last_finalized_number / justification_period + != number / justification_period); + } + } + + if justification_required { + let justification = + GrandpaJustification::from_commit(&client, round_number, commit)?; + + Some(justification.encode()) + } else { + None + } + } + }; + + debug!(target: "afg", "Finalizing blocks up to ({:?}, {})", number, hash); + + // ideally some handle to a synchronization oracle would be used + // to avoid unconditionally notifying. + client.apply_finality(import_op, BlockId::Hash(hash), justification, true).map_err(|e| { warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); e })?; - telemetry!(CONSENSUS_INFO; "afg.finalized_blocks_up_to"; - "number" => ?number, "hash" => ?hash, - ); - - let new_authorities = if let Some((canon_hash, canon_number)) = status.new_set_block { - // the authority set has changed. - let (new_id, set_ref) = authority_set.current(); - - if set_ref.len() > 16 { - afg_log!(initial_sync, - "👴 Applying GRANDPA set change to new set with {} authorities", - set_ref.len(), - ); - } else { - afg_log!(initial_sync, - "👴 Applying GRANDPA set change to new set {:?}", - set_ref, - ); - } - - telemetry!(CONSENSUS_INFO; "afg.generating_new_authority_set"; - "number" => ?canon_number, "hash" => ?canon_hash, - "authorities" => ?set_ref.to_vec(), - "set_id" => ?new_id, - ); - Some(NewAuthoritySet { - canon_hash, - canon_number, - set_id: new_id, - authorities: set_ref.to_vec(), - }) - } else { - None - }; - - if status.changed { - let write_result = crate::aux_schema::update_authority_set::( - &authority_set, - new_authorities.as_ref(), - |insert| apply_aux(import_op, insert, &[]), - ); - - if let Err(e) = write_result { - warn!(target: "afg", "Failed to write updated authority set to disk. Bailing."); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - - return Err(e.into()); - } - } - - Ok(new_authorities.map(VoterCommand::ChangeAuthorities)) - }); - - match update_res { - Ok(Some(command)) => Err(CommandOrError::VoterCommand(command)), - Ok(None) => Ok(()), - Err(e) => { - *authority_set = old_authority_set; - - if let Some(old_consensus_changes) = old_consensus_changes { - *consensus_changes = old_consensus_changes; - } - - Err(CommandOrError::Error(e)) - } - } + telemetry!(CONSENSUS_INFO; "afg.finalized_blocks_up_to"; + "number" => ?number, "hash" => ?hash, + ); + + let new_authorities = if let Some((canon_hash, canon_number)) = status.new_set_block { + // the authority set has changed. + let (new_id, set_ref) = authority_set.current(); + + if set_ref.len() > 16 { + afg_log!( + initial_sync, + "👴 Applying GRANDPA set change to new set with {} authorities", + set_ref.len(), + ); + } else { + afg_log!( + initial_sync, + "👴 Applying GRANDPA set change to new set {:?}", + set_ref, + ); + } + + telemetry!(CONSENSUS_INFO; "afg.generating_new_authority_set"; + "number" => ?canon_number, "hash" => ?canon_hash, + "authorities" => ?set_ref.to_vec(), + "set_id" => ?new_id, + ); + Some(NewAuthoritySet { + canon_hash, + canon_number, + set_id: new_id, + authorities: set_ref.to_vec(), + }) + } else { + None + }; + + if status.changed { + let write_result = crate::aux_schema::update_authority_set::( + &authority_set, + new_authorities.as_ref(), + |insert| apply_aux(import_op, insert, &[]), + ); + + if let Err(e) = write_result { + warn!(target: "afg", "Failed to write updated authority set to disk. Bailing."); + warn!(target: "afg", "Node is in a potentially inconsistent state."); + + return Err(e.into()); + } + } + + Ok(new_authorities.map(VoterCommand::ChangeAuthorities)) + }); + + match update_res { + Ok(Some(command)) => Err(CommandOrError::VoterCommand(command)), + Ok(None) => Ok(()), + Err(e) => { + *authority_set = old_authority_set; + + if let Some(old_consensus_changes) = old_consensus_changes { + *consensus_changes = old_consensus_changes; + } + + Err(CommandOrError::Error(e)) + } + } } /// Using the given base get the block at the given height on this chain. The /// target block must be an ancestor of base, therefore `height <= base.height`. pub(crate) fn canonical_at_height>( - provider: &C, - base: (Block::Hash, NumberFor), - base_is_canonical: bool, - height: NumberFor, + provider: &C, + base: (Block::Hash, NumberFor), + base_is_canonical: bool, + height: NumberFor, ) -> Result, ClientError> { - if height > base.1 { - return Ok(None); - } - - if height == base.1 { - if base_is_canonical { - return Ok(Some(base.0)); - } else { - return Ok(provider.hash(height).unwrap_or(None)); - } - } else if base_is_canonical { - return Ok(provider.hash(height).unwrap_or(None)); - } - - let one = NumberFor::::one(); - - // start by getting _canonical_ block with number at parent position and then iterating - // backwards by hash. - let mut current = match provider.header(BlockId::Number(base.1 - one))? { - Some(header) => header, - _ => return Ok(None), - }; - - // we've already checked that base > height above. - let mut steps = base.1 - height - one; - - while steps > NumberFor::::zero() { - current = match provider.header(BlockId::Hash(*current.parent_hash()))? { - Some(header) => header, - _ => return Ok(None), - }; - - steps -= one; - } - - Ok(Some(current.hash())) + if height > base.1 { + return Ok(None); + } + + if height == base.1 { + if base_is_canonical { + return Ok(Some(base.0)); + } else { + return Ok(provider.hash(height).unwrap_or(None)); + } + } else if base_is_canonical { + return Ok(provider.hash(height).unwrap_or(None)); + } + + let one = NumberFor::::one(); + + // start by getting _canonical_ block with number at parent position and then iterating + // backwards by hash. + let mut current = match provider.header(BlockId::Number(base.1 - one))? { + Some(header) => header, + _ => return Ok(None), + }; + + // we've already checked that base > height above. + let mut steps = base.1 - height - one; + + while steps > NumberFor::::zero() { + current = match provider.header(BlockId::Hash(*current.parent_hash()))? { + Some(header) => header, + _ => return Ok(None), + }; + + steps -= one; + } + + Ok(Some(current.hash())) } diff --git a/client/finality-grandpa/src/finality_proof.rs b/client/finality-grandpa/src/finality_proof.rs index 2c85839b5e..034f1238ec 100644 --- a/client/finality-grandpa/src/finality_proof.rs +++ b/client/finality-grandpa/src/finality_proof.rs @@ -34,24 +34,27 @@ //! finality proof (that finalizes some block C that is ancestor of the B and descendant //! of the U) could be returned. -use std::sync::Arc; use log::{trace, warn}; +use std::sync::Arc; -use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; +use finality_grandpa::BlockNumberOps; +use parity_scale_codec::{Decode, Encode}; use sc_client_api::{ - backend::Backend, StorageProof, - light::{FetchChecker, RemoteReadRequest}, - StorageProvider, ProofProvider, + backend::Backend, + light::{FetchChecker, RemoteReadRequest}, + ProofProvider, StorageProof, StorageProvider, +}; +use sc_telemetry::{telemetry, CONSENSUS_INFO}; +use sp_blockchain::{Backend as BlockchainBackend, Error as ClientError, Result as ClientResult}; +use sp_core::storage::StorageKey; +use sp_finality_grandpa::{ + AuthorityId, AuthorityList, VersionedAuthorityList, GRANDPA_AUTHORITIES_KEY, }; -use parity_scale_codec::{Encode, Decode}; -use finality_grandpa::BlockNumberOps; use sp_runtime::{ - Justification, generic::BlockId, - traits::{NumberFor, Block as BlockT, Header as HeaderT, One}, + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT, NumberFor, One}, + Justification, }; -use sp_core::storage::StorageKey; -use sc_telemetry::{telemetry, CONSENSUS_INFO}; -use sp_finality_grandpa::{AuthorityId, AuthorityList, VersionedAuthorityList, GRANDPA_AUTHORITIES_KEY}; use crate::justification::GrandpaJustification; @@ -60,155 +63,159 @@ const MAX_FRAGMENTS_IN_PROOF: usize = 8; /// GRANDPA authority set related methods for the finality proof provider. pub trait AuthoritySetForFinalityProver: Send + Sync { - /// Read GRANDPA_AUTHORITIES_KEY from storage at given block. - fn authorities(&self, block: &BlockId) -> ClientResult; - /// Prove storage read of GRANDPA_AUTHORITIES_KEY at given block. - fn prove_authorities(&self, block: &BlockId) -> ClientResult; + /// Read GRANDPA_AUTHORITIES_KEY from storage at given block. + fn authorities(&self, block: &BlockId) -> ClientResult; + /// Prove storage read of GRANDPA_AUTHORITIES_KEY at given block. + fn prove_authorities(&self, block: &BlockId) -> ClientResult; } /// Trait that combines `StorageProvider` and `ProofProvider` -pub trait StorageAndProofProvider: StorageProvider + ProofProvider + Send + Sync - where - Block: BlockT, - BE: Backend + Send + Sync, -{} +pub trait StorageAndProofProvider: + StorageProvider + ProofProvider + Send + Sync +where + Block: BlockT, + BE: Backend + Send + Sync, +{ +} /// Blanket implementation. impl StorageAndProofProvider for P - where - Block: BlockT, - BE: Backend + Send + Sync, - P: StorageProvider + ProofProvider + Send + Sync, -{} +where + Block: BlockT, + BE: Backend + Send + Sync, + P: StorageProvider + ProofProvider + Send + Sync, +{ +} /// Implementation of AuthoritySetForFinalityProver. -impl AuthoritySetForFinalityProver for Arc> - where - BE: Backend + Send + Sync + 'static, +impl AuthoritySetForFinalityProver + for Arc> +where + BE: Backend + Send + Sync + 'static, { - fn authorities(&self, block: &BlockId) -> ClientResult { - let storage_key = StorageKey(GRANDPA_AUTHORITIES_KEY.to_vec()); - self.storage(block, &storage_key)? - .and_then(|encoded| VersionedAuthorityList::decode(&mut encoded.0.as_slice()).ok()) - .map(|versioned| versioned.into()) - .ok_or(ClientError::InvalidAuthoritiesSet) - } - - fn prove_authorities(&self, block: &BlockId) -> ClientResult { - self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY)) - } + fn authorities(&self, block: &BlockId) -> ClientResult { + let storage_key = StorageKey(GRANDPA_AUTHORITIES_KEY.to_vec()); + self.storage(block, &storage_key)? + .and_then(|encoded| VersionedAuthorityList::decode(&mut encoded.0.as_slice()).ok()) + .map(|versioned| versioned.into()) + .ok_or(ClientError::InvalidAuthoritiesSet) + } + + fn prove_authorities(&self, block: &BlockId) -> ClientResult { + self.read_proof(block, &mut std::iter::once(GRANDPA_AUTHORITIES_KEY)) + } } /// GRANDPA authority set related methods for the finality proof checker. pub trait AuthoritySetForFinalityChecker: Send + Sync { - /// Check storage read proof of GRANDPA_AUTHORITIES_KEY at given block. - fn check_authorities_proof( - &self, - hash: Block::Hash, - header: Block::Header, - proof: StorageProof, - ) -> ClientResult; + /// Check storage read proof of GRANDPA_AUTHORITIES_KEY at given block. + fn check_authorities_proof( + &self, + hash: Block::Hash, + header: Block::Header, + proof: StorageProof, + ) -> ClientResult; } /// FetchChecker-based implementation of AuthoritySetForFinalityChecker. impl AuthoritySetForFinalityChecker for Arc> { - fn check_authorities_proof( - &self, - hash: Block::Hash, - header: Block::Header, - proof: StorageProof, - ) -> ClientResult { - let storage_key = GRANDPA_AUTHORITIES_KEY.to_vec(); - let request = RemoteReadRequest { - block: hash, - header, - keys: vec![storage_key.clone()], - retry_count: None, - }; - - self.check_read_proof(&request, proof) - .and_then(|results| { - let maybe_encoded = results.get(&storage_key) - .expect( - "storage_key is listed in the request keys; \ + fn check_authorities_proof( + &self, + hash: Block::Hash, + header: Block::Header, + proof: StorageProof, + ) -> ClientResult { + let storage_key = GRANDPA_AUTHORITIES_KEY.to_vec(); + let request = RemoteReadRequest { + block: hash, + header, + keys: vec![storage_key.clone()], + retry_count: None, + }; + + self.check_read_proof(&request, proof).and_then(|results| { + let maybe_encoded = results.get(&storage_key).expect( + "storage_key is listed in the request keys; \ check_read_proof must return a value for each requested key; - qed" - ); - maybe_encoded - .as_ref() - .and_then(|encoded| { - VersionedAuthorityList::decode(&mut encoded.as_slice()).ok() - }) - .map(|versioned| versioned.into()) - .ok_or(ClientError::InvalidAuthoritiesSet) - }) - } + qed", + ); + maybe_encoded + .as_ref() + .and_then(|encoded| VersionedAuthorityList::decode(&mut encoded.as_slice()).ok()) + .map(|versioned| versioned.into()) + .ok_or(ClientError::InvalidAuthoritiesSet) + }) + } } /// Finality proof provider for serving network requests. -pub struct FinalityProofProvider { - backend: Arc, - authority_provider: Arc>, +pub struct FinalityProofProvider { + backend: Arc, + authority_provider: Arc>, } impl FinalityProofProvider - where B: Backend + Send + Sync + 'static +where + B: Backend + Send + Sync + 'static, { - /// Create new finality proof provider using: - /// - /// - backend for accessing blockchain data; - /// - authority_provider for calling and proving runtime methods. - pub fn new

( - backend: Arc, - authority_provider: P, - ) -> Self - where P: AuthoritySetForFinalityProver + 'static, - { - FinalityProofProvider { backend, authority_provider: Arc::new(authority_provider) } - } + /// Create new finality proof provider using: + /// + /// - backend for accessing blockchain data; + /// - authority_provider for calling and proving runtime methods. + pub fn new

(backend: Arc, authority_provider: P) -> Self + where + P: AuthoritySetForFinalityProver + 'static, + { + FinalityProofProvider { + backend, + authority_provider: Arc::new(authority_provider), + } + } } impl sc_network::config::FinalityProofProvider for FinalityProofProvider - where - Block: BlockT, - NumberFor: BlockNumberOps, - B: Backend + Send + Sync + 'static, +where + Block: BlockT, + NumberFor: BlockNumberOps, + B: Backend + Send + Sync + 'static, { - fn prove_finality( - &self, - for_block: Block::Hash, - request: &[u8], - ) -> Result>, ClientError> { - let request: FinalityProofRequest = Decode::decode(&mut &request[..]) - .map_err(|e| { - warn!(target: "afg", "Unable to decode finality proof request: {}", e.what()); - ClientError::Backend(format!("Invalid finality proof request")) - })?; - match request { - FinalityProofRequest::Original(request) => prove_finality::<_, _, GrandpaJustification>( - &*self.backend.blockchain(), - &*self.authority_provider, - request.authorities_set_id, - request.last_finalized, - for_block, - ), - } - } + fn prove_finality( + &self, + for_block: Block::Hash, + request: &[u8], + ) -> Result>, ClientError> { + let request: FinalityProofRequest = Decode::decode(&mut &request[..]) + .map_err(|e| { + warn!(target: "afg", "Unable to decode finality proof request: {}", e.what()); + ClientError::Backend(format!("Invalid finality proof request")) + })?; + match request { + FinalityProofRequest::Original(request) => { + prove_finality::<_, _, GrandpaJustification>( + &*self.backend.blockchain(), + &*self.authority_provider, + request.authorities_set_id, + request.last_finalized, + for_block, + ) + } + } + } } /// The effects of block finality. #[derive(Debug, PartialEq)] pub struct FinalityEffects { - /// The (ordered) set of headers that could be imported. - pub headers_to_import: Vec

, - /// The hash of the block that could be finalized. - pub block: Header::Hash, - /// The justification for the block. - pub justification: Vec, - /// New authorities set id that should be applied starting from block. - pub new_set_id: u64, - /// New authorities set that should be applied starting from block. - pub new_authorities: AuthorityList, + /// The (ordered) set of headers that could be imported. + pub headers_to_import: Vec
, + /// The hash of the block that could be finalized. + pub block: Header::Hash, + /// The justification for the block. + pub justification: Vec, + /// New authorities set id that should be applied starting from block. + pub new_set_id: u64, + /// New authorities set that should be applied starting from block. + pub new_authorities: AuthorityList, } /// Single fragment of proof-of-finality. @@ -219,14 +226,14 @@ pub struct FinalityEffects { /// 3) proof of GRANDPA::authorities() if the set changes at block F. #[derive(Debug, PartialEq, Encode, Decode)] struct FinalityProofFragment { - /// The hash of block F for which justification is provided. - pub block: Header::Hash, - /// Justification of the block F. - pub justification: Vec, - /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. - pub unknown_headers: Vec
, - /// Optional proof of execution of GRANDPA::authorities() at the `block`. - pub authorities_proof: Option, + /// The hash of block F for which justification is provided. + pub block: Header::Hash, + /// Justification of the block F. + pub justification: Vec, + /// The set of headers in the range (U; F] that we believe are unknown to the caller. Ordered. + pub unknown_headers: Vec
, + /// Optional proof of execution of GRANDPA::authorities() at the `block`. + pub authorities_proof: Option, } /// Proof of finality is the ordered set of finality fragments, where: @@ -237,27 +244,31 @@ type FinalityProof
= Vec>; /// Finality proof request data. #[derive(Debug, Encode, Decode)] enum FinalityProofRequest { - /// Original version of the request. - Original(OriginalFinalityProofRequest), + /// Original version of the request. + Original(OriginalFinalityProofRequest), } /// Original version of finality proof request. #[derive(Debug, Encode, Decode)] struct OriginalFinalityProofRequest { - /// The authorities set id we are waiting proof from. - /// - /// The first justification in the proof must be signed by this authority set. - pub authorities_set_id: u64, - /// Hash of the last known finalized block. - pub last_finalized: H, + /// The authorities set id we are waiting proof from. + /// + /// The first justification in the proof must be signed by this authority set. + pub authorities_set_id: u64, + /// Hash of the last known finalized block. + pub last_finalized: H, } /// Prepare data blob associated with finality proof request. -pub(crate) fn make_finality_proof_request(last_finalized: H, authorities_set_id: u64) -> Vec { - FinalityProofRequest::Original(OriginalFinalityProofRequest { - authorities_set_id, - last_finalized, - }).encode() +pub(crate) fn make_finality_proof_request( + last_finalized: H, + authorities_set_id: u64, +) -> Vec { + FinalityProofRequest::Original(OriginalFinalityProofRequest { + authorities_set_id, + last_finalized, + }) + .encode() } /// Prepare proof-of-finality for the best possible block in the range: (begin; end]. @@ -267,157 +278,159 @@ pub(crate) fn make_finality_proof_request(last_finalized: H, /// /// Returns None if there are no finalized blocks unknown to the caller. pub(crate) fn prove_finality, J>( - blockchain: &B, - authorities_provider: &dyn AuthoritySetForFinalityProver, - authorities_set_id: u64, - begin: Block::Hash, - end: Block::Hash, + blockchain: &B, + authorities_provider: &dyn AuthoritySetForFinalityProver, + authorities_set_id: u64, + begin: Block::Hash, + end: Block::Hash, ) -> ::sp_blockchain::Result>> - where - J: ProvableJustification, +where + J: ProvableJustification, { - let begin_id = BlockId::Hash(begin); - let begin_number = blockchain.expect_block_number_from_id(&begin_id)?; - - // early-return if we sure that there are no blocks finalized AFTER begin block - let info = blockchain.info(); - if info.finalized_number <= begin_number { - trace!( - target: "afg", - "Requested finality proof for descendant of #{} while we only have finalized #{}. Returning empty proof.", - begin_number, - info.finalized_number, - ); - - return Ok(None); - } - - // check if blocks range is valid. It is the caller responsibility to ensure - // that it only asks peers that know about whole blocks range - let end_number = blockchain.expect_block_number_from_id(&BlockId::Hash(end))?; - if begin_number + One::one() > end_number { - return Err(ClientError::Backend( - format!("Cannot generate finality proof for invalid range: {}..{}", begin_number, end_number), - )); - } - - // early-return if we sure that the block is NOT a part of canonical chain - let canonical_begin = blockchain.expect_block_hash_from_id(&BlockId::Number(begin_number))?; - if begin != canonical_begin { - return Err(ClientError::Backend( - format!("Cannot generate finality proof for non-canonical block: {}", begin), - )); - } - - // iterate justifications && try to prove finality - let mut fragment_index = 0; - let mut current_authorities = authorities_provider.authorities(&begin_id)?; - let mut current_number = begin_number + One::one(); - let mut finality_proof = Vec::new(); - let mut unknown_headers = Vec::new(); - let mut latest_proof_fragment = None; - let begin_authorities = current_authorities.clone(); - loop { - let current_id = BlockId::Number(current_number); - - // check if header is unknown to the caller - if current_number > end_number { - let unknown_header = blockchain.expect_header(current_id)?; - unknown_headers.push(unknown_header); - } - - if let Some(justification) = blockchain.justification(current_id)? { - // check if the current block enacts new GRANDPA authorities set - let new_authorities = authorities_provider.authorities(¤t_id)?; - let new_authorities_proof = if current_authorities != new_authorities { - current_authorities = new_authorities; - Some(authorities_provider.prove_authorities(¤t_id)?) - } else { - None - }; - - // prepare finality proof for the current block - let current = blockchain.expect_block_hash_from_id(&BlockId::Number(current_number))?; - let proof_fragment = FinalityProofFragment { - block: current, - justification, - unknown_headers: ::std::mem::replace(&mut unknown_headers, Vec::new()), - authorities_proof: new_authorities_proof, - }; - - // append justification to finality proof if required - let justifies_end_block = current_number >= end_number; - let justifies_authority_set_change = proof_fragment.authorities_proof.is_some(); - if justifies_end_block || justifies_authority_set_change { - // check if the proof is generated by the requested authority set - if finality_proof.is_empty() { - let justification_check_result = J::decode_and_verify( - &proof_fragment.justification, - authorities_set_id, - &begin_authorities, - ); - if justification_check_result.is_err() { - trace!( - target: "afg", - "Can not provide finality proof with requested set id #{}\ - (possible forced change?). Returning empty proof.", - authorities_set_id, - ); - - return Ok(None); - } - } - - finality_proof.push(proof_fragment); - latest_proof_fragment = None; - } else { - latest_proof_fragment = Some(proof_fragment); - } - - // we don't need to provide more justifications - if justifies_end_block { - break; - } - } - - // we can't provide more justifications - if current_number == info.finalized_number { - // append last justification - even if we can't generate finality proof for - // the end block, we try to generate it for the latest possible block - if let Some(latest_proof_fragment) = latest_proof_fragment.take() { - finality_proof.push(latest_proof_fragment); - - fragment_index += 1; - if fragment_index == MAX_FRAGMENTS_IN_PROOF { - break; - } - } - break; - } - - // else search for the next justification - current_number = current_number + One::one(); - } - - if finality_proof.is_empty() { - trace!( - target: "afg", - "No justifications found when making finality proof for {}. Returning empty proof.", - end, - ); - - Ok(None) - } else { - trace!( - target: "afg", - "Built finality proof for {} of {} fragments. Last fragment for {}.", - end, - finality_proof.len(), - finality_proof.last().expect("checked that !finality_proof.is_empty(); qed").block, - ); - - Ok(Some(finality_proof.encode())) - } + let begin_id = BlockId::Hash(begin); + let begin_number = blockchain.expect_block_number_from_id(&begin_id)?; + + // early-return if we sure that there are no blocks finalized AFTER begin block + let info = blockchain.info(); + if info.finalized_number <= begin_number { + trace!( + target: "afg", + "Requested finality proof for descendant of #{} while we only have finalized #{}. Returning empty proof.", + begin_number, + info.finalized_number, + ); + + return Ok(None); + } + + // check if blocks range is valid. It is the caller responsibility to ensure + // that it only asks peers that know about whole blocks range + let end_number = blockchain.expect_block_number_from_id(&BlockId::Hash(end))?; + if begin_number + One::one() > end_number { + return Err(ClientError::Backend(format!( + "Cannot generate finality proof for invalid range: {}..{}", + begin_number, end_number + ))); + } + + // early-return if we sure that the block is NOT a part of canonical chain + let canonical_begin = blockchain.expect_block_hash_from_id(&BlockId::Number(begin_number))?; + if begin != canonical_begin { + return Err(ClientError::Backend(format!( + "Cannot generate finality proof for non-canonical block: {}", + begin + ))); + } + + // iterate justifications && try to prove finality + let mut fragment_index = 0; + let mut current_authorities = authorities_provider.authorities(&begin_id)?; + let mut current_number = begin_number + One::one(); + let mut finality_proof = Vec::new(); + let mut unknown_headers = Vec::new(); + let mut latest_proof_fragment = None; + let begin_authorities = current_authorities.clone(); + loop { + let current_id = BlockId::Number(current_number); + + // check if header is unknown to the caller + if current_number > end_number { + let unknown_header = blockchain.expect_header(current_id)?; + unknown_headers.push(unknown_header); + } + + if let Some(justification) = blockchain.justification(current_id)? { + // check if the current block enacts new GRANDPA authorities set + let new_authorities = authorities_provider.authorities(¤t_id)?; + let new_authorities_proof = if current_authorities != new_authorities { + current_authorities = new_authorities; + Some(authorities_provider.prove_authorities(¤t_id)?) + } else { + None + }; + + // prepare finality proof for the current block + let current = blockchain.expect_block_hash_from_id(&BlockId::Number(current_number))?; + let proof_fragment = FinalityProofFragment { + block: current, + justification, + unknown_headers: ::std::mem::replace(&mut unknown_headers, Vec::new()), + authorities_proof: new_authorities_proof, + }; + + // append justification to finality proof if required + let justifies_end_block = current_number >= end_number; + let justifies_authority_set_change = proof_fragment.authorities_proof.is_some(); + if justifies_end_block || justifies_authority_set_change { + // check if the proof is generated by the requested authority set + if finality_proof.is_empty() { + let justification_check_result = J::decode_and_verify( + &proof_fragment.justification, + authorities_set_id, + &begin_authorities, + ); + if justification_check_result.is_err() { + trace!( + target: "afg", + "Can not provide finality proof with requested set id #{}\ + (possible forced change?). Returning empty proof.", + authorities_set_id, + ); + + return Ok(None); + } + } + + finality_proof.push(proof_fragment); + latest_proof_fragment = None; + } else { + latest_proof_fragment = Some(proof_fragment); + } + + // we don't need to provide more justifications + if justifies_end_block { + break; + } + } + + // we can't provide more justifications + if current_number == info.finalized_number { + // append last justification - even if we can't generate finality proof for + // the end block, we try to generate it for the latest possible block + if let Some(latest_proof_fragment) = latest_proof_fragment.take() { + finality_proof.push(latest_proof_fragment); + + fragment_index += 1; + if fragment_index == MAX_FRAGMENTS_IN_PROOF { + break; + } + } + break; + } + + // else search for the next justification + current_number = current_number + One::one(); + } + + if finality_proof.is_empty() { + trace!( + target: "afg", + "No justifications found when making finality proof for {}. Returning empty proof.", + end, + ); + + Ok(None) + } else { + trace!( + target: "afg", + "Built finality proof for {} of {} fragments. Last fragment for {}.", + end, + finality_proof.len(), + finality_proof.last().expect("checked that !finality_proof.is_empty(); qed").block, + ); + + Ok(Some(finality_proof.encode())) + } } /// Check GRANDPA proof-of-finality for the given block. @@ -425,616 +438,857 @@ pub(crate) fn prove_finality, J>( /// Returns the vector of headers that MUST be validated + imported /// AND if at least one of those headers is invalid, all other MUST be considered invalid. pub(crate) fn check_finality_proof( - blockchain: &B, - current_set_id: u64, - current_authorities: AuthorityList, - authorities_provider: &dyn AuthoritySetForFinalityChecker, - remote_proof: Vec, + blockchain: &B, + current_set_id: u64, + current_authorities: AuthorityList, + authorities_provider: &dyn AuthoritySetForFinalityChecker, + remote_proof: Vec, ) -> ClientResult> - where - NumberFor: BlockNumberOps, - B: BlockchainBackend, +where + NumberFor: BlockNumberOps, + B: BlockchainBackend, { - do_check_finality_proof::<_, _, GrandpaJustification>( - blockchain, - current_set_id, - current_authorities, - authorities_provider, - remote_proof) + do_check_finality_proof::<_, _, GrandpaJustification>( + blockchain, + current_set_id, + current_authorities, + authorities_provider, + remote_proof, + ) } fn do_check_finality_proof( - blockchain: &B, - current_set_id: u64, - current_authorities: AuthorityList, - authorities_provider: &dyn AuthoritySetForFinalityChecker, - remote_proof: Vec, + blockchain: &B, + current_set_id: u64, + current_authorities: AuthorityList, + authorities_provider: &dyn AuthoritySetForFinalityChecker, + remote_proof: Vec, ) -> ClientResult> - where - NumberFor: BlockNumberOps, - B: BlockchainBackend, - J: ProvableJustification, +where + NumberFor: BlockNumberOps, + B: BlockchainBackend, + J: ProvableJustification, { - // decode finality proof - let proof = FinalityProof::::decode(&mut &remote_proof[..]) - .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; - - // empty proof can't prove anything - if proof.is_empty() { - return Err(ClientError::BadJustification("empty proof of finality".into())); - } - - // iterate and verify proof fragments - let last_fragment_index = proof.len() - 1; - let mut authorities = AuthoritiesOrEffects::Authorities(current_set_id, current_authorities); - for (proof_fragment_index, proof_fragment) in proof.into_iter().enumerate() { - // check that proof is non-redundant. The proof still can be valid, but - // we do not want peer to spam us with redundant data - if proof_fragment_index != last_fragment_index { - let has_unknown_headers = !proof_fragment.unknown_headers.is_empty(); - let has_new_authorities = proof_fragment.authorities_proof.is_some(); - if has_unknown_headers || !has_new_authorities { - return Err(ClientError::BadJustification("redundant proof of finality".into())); - } - } - - authorities = check_finality_proof_fragment::<_, _, J>( - blockchain, - authorities, - authorities_provider, - proof_fragment)?; - } - - let effects = authorities.extract_effects().expect("at least one loop iteration is guaranteed + // decode finality proof + let proof = FinalityProof::::decode(&mut &remote_proof[..]) + .map_err(|_| ClientError::BadJustification("failed to decode finality proof".into()))?; + + // empty proof can't prove anything + if proof.is_empty() { + return Err(ClientError::BadJustification( + "empty proof of finality".into(), + )); + } + + // iterate and verify proof fragments + let last_fragment_index = proof.len() - 1; + let mut authorities = AuthoritiesOrEffects::Authorities(current_set_id, current_authorities); + for (proof_fragment_index, proof_fragment) in proof.into_iter().enumerate() { + // check that proof is non-redundant. The proof still can be valid, but + // we do not want peer to spam us with redundant data + if proof_fragment_index != last_fragment_index { + let has_unknown_headers = !proof_fragment.unknown_headers.is_empty(); + let has_new_authorities = proof_fragment.authorities_proof.is_some(); + if has_unknown_headers || !has_new_authorities { + return Err(ClientError::BadJustification( + "redundant proof of finality".into(), + )); + } + } + + authorities = check_finality_proof_fragment::<_, _, J>( + blockchain, + authorities, + authorities_provider, + proof_fragment, + )?; + } + + let effects = authorities.extract_effects().expect( + "at least one loop iteration is guaranteed because proof is not empty;\ check_finality_proof_fragment is called on every iteration;\ check_finality_proof_fragment always returns FinalityEffects;\ - qed"); + qed", + ); - telemetry!(CONSENSUS_INFO; "afg.finality_proof_ok"; + telemetry!(CONSENSUS_INFO; "afg.finality_proof_ok"; "set_id" => ?effects.new_set_id, "finalized_header_hash" => ?effects.block); - Ok(effects) + Ok(effects) } /// Check finality proof for the single block. fn check_finality_proof_fragment( - blockchain: &B, - authority_set: AuthoritiesOrEffects, - authorities_provider: &dyn AuthoritySetForFinalityChecker, - proof_fragment: FinalityProofFragment, + blockchain: &B, + authority_set: AuthoritiesOrEffects, + authorities_provider: &dyn AuthoritySetForFinalityChecker, + proof_fragment: FinalityProofFragment, ) -> ClientResult> - where - NumberFor: BlockNumberOps, - B: BlockchainBackend, - J: Decode + ProvableJustification, +where + NumberFor: BlockNumberOps, + B: BlockchainBackend, + J: Decode + ProvableJustification, { - // verify justification using previous authorities set - let (mut current_set_id, mut current_authorities) = authority_set.extract_authorities(); - let justification: J = Decode::decode(&mut &proof_fragment.justification[..]) - .map_err(|_| ClientError::JustificationDecode)?; - justification.verify(current_set_id, ¤t_authorities)?; - - // and now verify new authorities proof (if provided) - if let Some(new_authorities_proof) = proof_fragment.authorities_proof { - // the proof is either generated using known header and it is safe to query header - // here, because its non-finality proves that it can't be pruned - // or it is generated using last unknown header (because it is the one who has - // justification => we only generate proofs for headers with justifications) - let header = match proof_fragment.unknown_headers.iter().rev().next().cloned() { - Some(header) => header, - None => blockchain.expect_header(BlockId::Hash(proof_fragment.block))?, - }; - current_authorities = authorities_provider.check_authorities_proof( - proof_fragment.block, - header, - new_authorities_proof, - )?; - - current_set_id = current_set_id + 1; - } - - Ok(AuthoritiesOrEffects::Effects(FinalityEffects { - headers_to_import: proof_fragment.unknown_headers, - block: proof_fragment.block, - justification: proof_fragment.justification, - new_set_id: current_set_id, - new_authorities: current_authorities, - })) + // verify justification using previous authorities set + let (mut current_set_id, mut current_authorities) = authority_set.extract_authorities(); + let justification: J = Decode::decode(&mut &proof_fragment.justification[..]) + .map_err(|_| ClientError::JustificationDecode)?; + justification.verify(current_set_id, ¤t_authorities)?; + + // and now verify new authorities proof (if provided) + if let Some(new_authorities_proof) = proof_fragment.authorities_proof { + // the proof is either generated using known header and it is safe to query header + // here, because its non-finality proves that it can't be pruned + // or it is generated using last unknown header (because it is the one who has + // justification => we only generate proofs for headers with justifications) + let header = match proof_fragment.unknown_headers.iter().rev().next().cloned() { + Some(header) => header, + None => blockchain.expect_header(BlockId::Hash(proof_fragment.block))?, + }; + current_authorities = authorities_provider.check_authorities_proof( + proof_fragment.block, + header, + new_authorities_proof, + )?; + + current_set_id = current_set_id + 1; + } + + Ok(AuthoritiesOrEffects::Effects(FinalityEffects { + headers_to_import: proof_fragment.unknown_headers, + block: proof_fragment.block, + justification: proof_fragment.justification, + new_set_id: current_set_id, + new_authorities: current_authorities, + })) } /// Authorities set from initial authorities set or finality effects. enum AuthoritiesOrEffects { - Authorities(u64, AuthorityList), - Effects(FinalityEffects
), + Authorities(u64, AuthorityList), + Effects(FinalityEffects
), } impl AuthoritiesOrEffects
{ - pub fn extract_authorities(self) -> (u64, AuthorityList) { - match self { - AuthoritiesOrEffects::Authorities(set_id, authorities) => (set_id, authorities), - AuthoritiesOrEffects::Effects(effects) => (effects.new_set_id, effects.new_authorities), - } - } - - pub fn extract_effects(self) -> Option> { - match self { - AuthoritiesOrEffects::Authorities(_, _) => None, - AuthoritiesOrEffects::Effects(effects) => Some(effects), - } - } + pub fn extract_authorities(self) -> (u64, AuthorityList) { + match self { + AuthoritiesOrEffects::Authorities(set_id, authorities) => (set_id, authorities), + AuthoritiesOrEffects::Effects(effects) => (effects.new_set_id, effects.new_authorities), + } + } + + pub fn extract_effects(self) -> Option> { + match self { + AuthoritiesOrEffects::Authorities(_, _) => None, + AuthoritiesOrEffects::Effects(effects) => Some(effects), + } + } } /// Justification used to prove block finality. pub(crate) trait ProvableJustification: Encode + Decode { - /// Verify justification with respect to authorities set and authorities set id. - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()>; - - /// Decode and verify justification. - fn decode_and_verify( - justification: &Justification, - set_id: u64, - authorities: &[(AuthorityId, u64)], - ) -> ClientResult { - let justification = Self::decode(&mut &**justification) - .map_err(|_| ClientError::JustificationDecode)?; - justification.verify(set_id, authorities)?; - Ok(justification) - } + /// Verify justification with respect to authorities set and authorities set id. + fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()>; + + /// Decode and verify justification. + fn decode_and_verify( + justification: &Justification, + set_id: u64, + authorities: &[(AuthorityId, u64)], + ) -> ClientResult { + let justification = + Self::decode(&mut &**justification).map_err(|_| ClientError::JustificationDecode)?; + justification.verify(set_id, authorities)?; + Ok(justification) + } } impl ProvableJustification for GrandpaJustification - where - NumberFor: BlockNumberOps, +where + NumberFor: BlockNumberOps, { - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { - GrandpaJustification::verify(self, set_id, &authorities.iter().cloned().collect()) - } + fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { + GrandpaJustification::verify(self, set_id, &authorities.iter().cloned().collect()) + } } #[cfg(test)] pub(crate) mod tests { - use substrate_test_runtime_client::runtime::{Block, Header, H256}; - use sc_client_api::NewBlockState; - use substrate_test_runtime_client::sc_client::in_mem::Blockchain as InMemoryBlockchain; - use super::*; - use sp_core::crypto::Public; - - type FinalityProof = super::FinalityProof
; - - impl AuthoritySetForFinalityProver for (GetAuthorities, ProveAuthorities) - where - GetAuthorities: Send + Sync + Fn(BlockId) -> ClientResult, - ProveAuthorities: Send + Sync + Fn(BlockId) -> ClientResult, - { - fn authorities(&self, block: &BlockId) -> ClientResult { - self.0(*block) - } - - fn prove_authorities(&self, block: &BlockId) -> ClientResult { - self.1(*block) - } - } - - struct ClosureAuthoritySetForFinalityChecker(pub Closure); - - impl AuthoritySetForFinalityChecker for ClosureAuthoritySetForFinalityChecker - where - Closure: Send + Sync + Fn(H256, Header, StorageProof) -> ClientResult, - { - fn check_authorities_proof( - &self, - hash: H256, - header: Header, - proof: StorageProof, - ) -> ClientResult { - self.0(hash, header, proof) - } - } - - #[derive(Debug, PartialEq, Encode, Decode)] - pub struct TestJustification(pub (u64, AuthorityList), pub Vec); - - impl ProvableJustification
for TestJustification { - fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { - if (self.0).0 != set_id || (self.0).1 != authorities { - return Err(ClientError::BadJustification("test".into())); - } - - Ok(()) - } - } - - fn header(number: u64) -> Header { - let parent_hash = match number { - 0 => Default::default(), - _ => header(number - 1).hash(), - }; - Header::new(number, H256::from_low_u64_be(0), H256::from_low_u64_be(0), parent_hash, Default::default()) - } - - fn side_header(number: u64) -> Header { - Header::new( - number, - H256::from_low_u64_be(0), - H256::from_low_u64_be(1), - header(number - 1).hash(), - Default::default(), - ) - } - - fn second_side_header(number: u64) -> Header { - Header::new( - number, - H256::from_low_u64_be(0), - H256::from_low_u64_be(1), - side_header(number - 1).hash(), - Default::default(), - ) - } - - fn test_blockchain() -> InMemoryBlockchain { - let blockchain = InMemoryBlockchain::::new(); - blockchain.insert(header(0).hash(), header(0), Some(vec![0]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(1).hash(), header(1), Some(vec![1]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(2).hash(), header(2), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(header(3).hash(), header(3), Some(vec![3]), None, NewBlockState::Final).unwrap(); - blockchain - } - - #[test] - fn finality_prove_fails_with_invalid_range() { - let blockchain = test_blockchain(); - - // their last finalized is: 2 - // they request for proof-of-finality of: 2 - // => range is invalid - prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(2).hash(), - header(2).hash(), - ).unwrap_err(); - } - - #[test] - fn finality_proof_is_none_if_no_more_last_finalized_blocks() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - - // our last finalized is: 3 - // their last finalized is: 3 - // => we can't provide any additional justifications - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(4).hash(), - ).unwrap(); - assert_eq!(proof_of_4, None); - } - - #[test] - fn finality_proof_fails_for_non_canonical_block() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(side_header(4).hash(), side_header(4), None, None, NewBlockState::Best).unwrap(); - blockchain.insert(second_side_header(5).hash(), second_side_header(5), None, None, NewBlockState::Best) - .unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(vec![5]), None, NewBlockState::Final).unwrap(); - - // chain is 1 -> 2 -> 3 -> 4 -> 5 - // \> 4' -> 5' - // and the best finalized is 5 - // => when requesting for (4'; 5'], error is returned - prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| unreachable!("should return before calling GetAuthorities"), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - side_header(4).hash(), - second_side_header(5).hash(), - ).unwrap_err(); - } - - #[test] - fn finality_proof_is_none_if_no_justification_known() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); - - // block 4 is finalized without justification - // => we can't prove finality - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("authorities didn't change => ProveAuthorities won't be called"), - ), - 0, - header(3).hash(), - header(4).hash(), - ).unwrap(); - assert_eq!(proof_of_4, None); - } - - #[test] - fn finality_proof_works_without_authorities_change() { - let blockchain = test_blockchain(); - let authorities = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; - let just4 = TestJustification((0, authorities.clone()), vec![4]).encode(); - let just5 = TestJustification((0, authorities.clone()), vec![5]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(just5.clone()), None, NewBlockState::Final).unwrap(); - - // blocks 4 && 5 are finalized with justification - // => since authorities are the same, we only need justification for 5 - let proof_of_5: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(authorities.clone()), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(5).hash(), - ).unwrap().unwrap()[..]).unwrap(); - assert_eq!(proof_of_5, vec![FinalityProofFragment { - block: header(5).hash(), - justification: just5, - unknown_headers: Vec::new(), - authorities_proof: None, - }]); - } - - #[test] - fn finality_proof_finalized_earlier_block_if_no_justification_for_target_is_known() { - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), Some(vec![4]), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); - - // block 4 is finalized with justification + we request for finality of 5 - // => we can't prove finality of 5, but providing finality for 4 is still useful for requester - let proof_of_5: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(5).hash(), - ).unwrap().unwrap()[..]).unwrap(); - assert_eq!(proof_of_5, vec![FinalityProofFragment { - block: header(4).hash(), - justification: vec![4], - unknown_headers: Vec::new(), - authorities_proof: None, - }]); - } - - #[test] - fn finality_proof_works_with_authorities_change() { - let blockchain = test_blockchain(); - let auth3 = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - let auth5 = vec![(AuthorityId::from_slice(&[5u8; 32]), 1u64)]; - let auth7 = vec![(AuthorityId::from_slice(&[7u8; 32]), 1u64)]; - let just4 = TestJustification((0, auth3.clone()), vec![4]).encode(); - let just5 = TestJustification((0, auth3.clone()), vec![5]).encode(); - let just7 = TestJustification((1, auth5.clone()), vec![7]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), Some(just5.clone()), None, NewBlockState::Final).unwrap(); - blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(7).hash(), header(7), Some(just7.clone()), None, NewBlockState::Final).unwrap(); - - // when querying for finality of 6, we assume that the #3 is the last block known to the requester - // => since we only have justification for #7, we provide #7 - let proof_of_6: FinalityProof = Decode::decode(&mut &prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |block_id| match block_id { - BlockId::Hash(h) if h == header(3).hash() => Ok(auth3.clone()), - BlockId::Number(4) => Ok(auth3.clone()), - BlockId::Number(5) => Ok(auth5.clone()), - BlockId::Number(7) => Ok(auth7.clone()), - _ => unreachable!("no other authorities should be fetched: {:?}", block_id), - }, - |block_id| match block_id { - BlockId::Number(5) => Ok(StorageProof::new(vec![vec![50]])), - BlockId::Number(7) => Ok(StorageProof::new(vec![vec![70]])), - _ => unreachable!("no other authorities should be proved: {:?}", block_id), - }, - ), - 0, - header(3).hash(), - header(6).hash(), - ).unwrap().unwrap()[..]).unwrap(); - // initial authorities set (which start acting from #0) is [3; 32] - assert_eq!(proof_of_6, vec![ - // new authorities set starts acting from #5 => we do not provide fragment for #4 - // first fragment provides justification for #5 && authorities set that starts acting from #5 - FinalityProofFragment { - block: header(5).hash(), - justification: just5, - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![50]])), - }, - // last fragment provides justification for #7 && unknown#7 - FinalityProofFragment { - block: header(7).hash(), - justification: just7.clone(), - unknown_headers: vec![header(7)], - authorities_proof: Some(StorageProof::new(vec![vec![70]])), - }, - ]); - - // now let's verify finality proof - let blockchain = test_blockchain(); - blockchain.insert(header(4).hash(), header(4), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(5).hash(), header(5), None, None, NewBlockState::Final).unwrap(); - blockchain.insert(header(6).hash(), header(6), None, None, NewBlockState::Final).unwrap(); - let effects = do_check_finality_proof::<_, _, TestJustification>( - &blockchain, - 0, - auth3, - &ClosureAuthoritySetForFinalityChecker( - |hash, _header, proof: StorageProof| match proof.clone().iter_nodes().next().map(|x| x[0]) { - Some(50) => Ok(auth5.clone()), - Some(70) => Ok(auth7.clone()), - _ => unreachable!("no other proofs should be checked: {}", hash), - } - ), - proof_of_6.encode(), - ).unwrap(); - - assert_eq!(effects, FinalityEffects { - headers_to_import: vec![header(7)], - block: header(7).hash(), - justification: TestJustification((1, auth5.clone()), vec![7]).encode(), - new_set_id: 2, - new_authorities: auth7, - }); - } - - #[test] - fn finality_proof_check_fails_when_proof_decode_fails() { - let blockchain = test_blockchain(); - - // when we can't decode proof from Vec - do_check_finality_proof::<_, _, TestJustification>( - &blockchain, - 1, - vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - vec![42], - ).unwrap_err(); - } - - #[test] - fn finality_proof_check_fails_when_proof_is_empty() { - let blockchain = test_blockchain(); - - // when decoded proof has zero length - do_check_finality_proof::<_, _, TestJustification>( - &blockchain, - 1, - vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - Vec::::new().encode(), - ).unwrap_err(); - } - - #[test] - fn finality_proof_check_fails_when_intermediate_fragment_has_unknown_headers() { - let blockchain = test_blockchain(); - - // when intermediate (#0) fragment has non-empty unknown headers - let authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - do_check_finality_proof::<_, _, TestJustification>( - &blockchain, - 1, - authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - vec![FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((0, authorities.clone()), vec![7]).encode(), - unknown_headers: vec![header(4)], - authorities_proof: Some(StorageProof::new(vec![vec![42]])), - }, FinalityProofFragment { - block: header(5).hash(), - justification: TestJustification((0, authorities), vec![8]).encode(), - unknown_headers: vec![header(5)], - authorities_proof: None, - }].encode(), - ).unwrap_err(); - } - - #[test] - fn finality_proof_check_fails_when_intermediate_fragment_has_no_authorities_proof() { - let blockchain = test_blockchain(); - - // when intermediate (#0) fragment has empty authorities proof - let authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - do_check_finality_proof::<_, _, TestJustification>( - &blockchain, - 1, - authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| unreachable!("returns before CheckAuthoritiesProof")), - vec![FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((0, authorities.clone()), vec![7]).encode(), - unknown_headers: Vec::new(), - authorities_proof: None, - }, FinalityProofFragment { - block: header(5).hash(), - justification: TestJustification((0, authorities), vec![8]).encode(), - unknown_headers: vec![header(5)], - authorities_proof: None, - }].encode(), - ).unwrap_err(); - } - - #[test] - fn finality_proof_check_works() { - let blockchain = test_blockchain(); - - let initial_authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; - let next_authorities = vec![(AuthorityId::from_slice(&[4u8; 32]), 1u64)]; - let effects = do_check_finality_proof::<_, _, TestJustification>( - &blockchain, - 1, - initial_authorities.clone(), - &ClosureAuthoritySetForFinalityChecker(|_, _, _| Ok(next_authorities.clone())), - vec![FinalityProofFragment { - block: header(2).hash(), - justification: TestJustification((1, initial_authorities.clone()), vec![7]).encode(), - unknown_headers: Vec::new(), - authorities_proof: Some(StorageProof::new(vec![vec![42]])), - }, FinalityProofFragment { - block: header(4).hash(), - justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), - unknown_headers: vec![header(4)], - authorities_proof: None, - }].encode(), - ).unwrap(); - assert_eq!(effects, FinalityEffects { - headers_to_import: vec![header(4)], - block: header(4).hash(), - justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), - new_set_id: 2, - new_authorities: vec![(AuthorityId::from_slice(&[4u8; 32]), 1u64)], - }); - } - - #[test] - fn finality_proof_is_none_if_first_justification_is_generated_by_unknown_set() { - // this is the case for forced change: set_id has been forcibly increased on full node - // and light node missed that - // => justification verification will fail on light node anyways, so we do not return - // finality proof at all - let blockchain = test_blockchain(); - let just4 = TestJustification((0, vec![(AuthorityId::from_slice(&[42u8; 32]), 1u64)]), vec![4]).encode(); - blockchain.insert(header(4).hash(), header(4), Some(just4), None, NewBlockState::Final).unwrap(); - - let proof_of_4 = prove_finality::<_, _, TestJustification>( - &blockchain, - &( - |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), - |_| unreachable!("should return before calling ProveAuthorities"), - ), - 0, - header(3).hash(), - header(4).hash(), - ).unwrap(); - assert!(proof_of_4.is_none()); - } + use super::*; + use sc_client_api::NewBlockState; + use sp_core::crypto::Public; + use substrate_test_runtime_client::runtime::{Block, Header, H256}; + use substrate_test_runtime_client::sc_client::in_mem::Blockchain as InMemoryBlockchain; + + type FinalityProof = super::FinalityProof
; + + impl AuthoritySetForFinalityProver + for (GetAuthorities, ProveAuthorities) + where + GetAuthorities: Send + Sync + Fn(BlockId) -> ClientResult, + ProveAuthorities: Send + Sync + Fn(BlockId) -> ClientResult, + { + fn authorities(&self, block: &BlockId) -> ClientResult { + self.0(*block) + } + + fn prove_authorities(&self, block: &BlockId) -> ClientResult { + self.1(*block) + } + } + + struct ClosureAuthoritySetForFinalityChecker(pub Closure); + + impl AuthoritySetForFinalityChecker + for ClosureAuthoritySetForFinalityChecker + where + Closure: Send + Sync + Fn(H256, Header, StorageProof) -> ClientResult, + { + fn check_authorities_proof( + &self, + hash: H256, + header: Header, + proof: StorageProof, + ) -> ClientResult { + self.0(hash, header, proof) + } + } + + #[derive(Debug, PartialEq, Encode, Decode)] + pub struct TestJustification(pub (u64, AuthorityList), pub Vec); + + impl ProvableJustification
for TestJustification { + fn verify(&self, set_id: u64, authorities: &[(AuthorityId, u64)]) -> ClientResult<()> { + if (self.0).0 != set_id || (self.0).1 != authorities { + return Err(ClientError::BadJustification("test".into())); + } + + Ok(()) + } + } + + fn header(number: u64) -> Header { + let parent_hash = match number { + 0 => Default::default(), + _ => header(number - 1).hash(), + }; + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(0), + parent_hash, + Default::default(), + ) + } + + fn side_header(number: u64) -> Header { + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(1), + header(number - 1).hash(), + Default::default(), + ) + } + + fn second_side_header(number: u64) -> Header { + Header::new( + number, + H256::from_low_u64_be(0), + H256::from_low_u64_be(1), + side_header(number - 1).hash(), + Default::default(), + ) + } + + fn test_blockchain() -> InMemoryBlockchain { + let blockchain = InMemoryBlockchain::::new(); + blockchain + .insert( + header(0).hash(), + header(0), + Some(vec![0]), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(1).hash(), + header(1), + Some(vec![1]), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert(header(2).hash(), header(2), None, None, NewBlockState::Best) + .unwrap(); + blockchain + .insert( + header(3).hash(), + header(3), + Some(vec![3]), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + } + + #[test] + fn finality_prove_fails_with_invalid_range() { + let blockchain = test_blockchain(); + + // their last finalized is: 2 + // they request for proof-of-finality of: 2 + // => range is invalid + prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| unreachable!("should return before calling GetAuthorities"), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(2).hash(), + header(2).hash(), + ) + .unwrap_err(); + } + + #[test] + fn finality_proof_is_none_if_no_more_last_finalized_blocks() { + let blockchain = test_blockchain(); + blockchain + .insert(header(4).hash(), header(4), None, None, NewBlockState::Best) + .unwrap(); + + // our last finalized is: 3 + // their last finalized is: 3 + // => we can't provide any additional justifications + let proof_of_4 = prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| unreachable!("should return before calling GetAuthorities"), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(3).hash(), + header(4).hash(), + ) + .unwrap(); + assert_eq!(proof_of_4, None); + } + + #[test] + fn finality_proof_fails_for_non_canonical_block() { + let blockchain = test_blockchain(); + blockchain + .insert(header(4).hash(), header(4), None, None, NewBlockState::Best) + .unwrap(); + blockchain + .insert( + side_header(4).hash(), + side_header(4), + None, + None, + NewBlockState::Best, + ) + .unwrap(); + blockchain + .insert( + second_side_header(5).hash(), + second_side_header(5), + None, + None, + NewBlockState::Best, + ) + .unwrap(); + blockchain + .insert( + header(5).hash(), + header(5), + Some(vec![5]), + None, + NewBlockState::Final, + ) + .unwrap(); + + // chain is 1 -> 2 -> 3 -> 4 -> 5 + // \> 4' -> 5' + // and the best finalized is 5 + // => when requesting for (4'; 5'], error is returned + prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| unreachable!("should return before calling GetAuthorities"), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + side_header(4).hash(), + second_side_header(5).hash(), + ) + .unwrap_err(); + } + + #[test] + fn finality_proof_is_none_if_no_justification_known() { + let blockchain = test_blockchain(); + blockchain + .insert( + header(4).hash(), + header(4), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + + // block 4 is finalized without justification + // => we can't prove finality + let proof_of_4 = prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), + |_| unreachable!("authorities didn't change => ProveAuthorities won't be called"), + ), + 0, + header(3).hash(), + header(4).hash(), + ) + .unwrap(); + assert_eq!(proof_of_4, None); + } + + #[test] + fn finality_proof_works_without_authorities_change() { + let blockchain = test_blockchain(); + let authorities = vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]; + let just4 = TestJustification((0, authorities.clone()), vec![4]).encode(); + let just5 = TestJustification((0, authorities.clone()), vec![5]).encode(); + blockchain + .insert( + header(4).hash(), + header(4), + Some(just4), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(5).hash(), + header(5), + Some(just5.clone()), + None, + NewBlockState::Final, + ) + .unwrap(); + + // blocks 4 && 5 are finalized with justification + // => since authorities are the same, we only need justification for 5 + let proof_of_5: FinalityProof = Decode::decode( + &mut &prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| Ok(authorities.clone()), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(3).hash(), + header(5).hash(), + ) + .unwrap() + .unwrap()[..], + ) + .unwrap(); + assert_eq!( + proof_of_5, + vec![FinalityProofFragment { + block: header(5).hash(), + justification: just5, + unknown_headers: Vec::new(), + authorities_proof: None, + }] + ); + } + + #[test] + fn finality_proof_finalized_earlier_block_if_no_justification_for_target_is_known() { + let blockchain = test_blockchain(); + blockchain + .insert( + header(4).hash(), + header(4), + Some(vec![4]), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(5).hash(), + header(5), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + + // block 4 is finalized with justification + we request for finality of 5 + // => we can't prove finality of 5, but providing finality for 4 is still useful for requester + let proof_of_5: FinalityProof = Decode::decode( + &mut &prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(3).hash(), + header(5).hash(), + ) + .unwrap() + .unwrap()[..], + ) + .unwrap(); + assert_eq!( + proof_of_5, + vec![FinalityProofFragment { + block: header(4).hash(), + justification: vec![4], + unknown_headers: Vec::new(), + authorities_proof: None, + }] + ); + } + + #[test] + fn finality_proof_works_with_authorities_change() { + let blockchain = test_blockchain(); + let auth3 = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; + let auth5 = vec![(AuthorityId::from_slice(&[5u8; 32]), 1u64)]; + let auth7 = vec![(AuthorityId::from_slice(&[7u8; 32]), 1u64)]; + let just4 = TestJustification((0, auth3.clone()), vec![4]).encode(); + let just5 = TestJustification((0, auth3.clone()), vec![5]).encode(); + let just7 = TestJustification((1, auth5.clone()), vec![7]).encode(); + blockchain + .insert( + header(4).hash(), + header(4), + Some(just4), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(5).hash(), + header(5), + Some(just5.clone()), + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(6).hash(), + header(6), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(7).hash(), + header(7), + Some(just7.clone()), + None, + NewBlockState::Final, + ) + .unwrap(); + + // when querying for finality of 6, we assume that the #3 is the last block known to the requester + // => since we only have justification for #7, we provide #7 + let proof_of_6: FinalityProof = Decode::decode( + &mut &prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |block_id| match block_id { + BlockId::Hash(h) if h == header(3).hash() => Ok(auth3.clone()), + BlockId::Number(4) => Ok(auth3.clone()), + BlockId::Number(5) => Ok(auth5.clone()), + BlockId::Number(7) => Ok(auth7.clone()), + _ => unreachable!("no other authorities should be fetched: {:?}", block_id), + }, + |block_id| match block_id { + BlockId::Number(5) => Ok(StorageProof::new(vec![vec![50]])), + BlockId::Number(7) => Ok(StorageProof::new(vec![vec![70]])), + _ => unreachable!("no other authorities should be proved: {:?}", block_id), + }, + ), + 0, + header(3).hash(), + header(6).hash(), + ) + .unwrap() + .unwrap()[..], + ) + .unwrap(); + // initial authorities set (which start acting from #0) is [3; 32] + assert_eq!( + proof_of_6, + vec![ + // new authorities set starts acting from #5 => we do not provide fragment for #4 + // first fragment provides justification for #5 && authorities set that starts acting from #5 + FinalityProofFragment { + block: header(5).hash(), + justification: just5, + unknown_headers: Vec::new(), + authorities_proof: Some(StorageProof::new(vec![vec![50]])), + }, + // last fragment provides justification for #7 && unknown#7 + FinalityProofFragment { + block: header(7).hash(), + justification: just7.clone(), + unknown_headers: vec![header(7)], + authorities_proof: Some(StorageProof::new(vec![vec![70]])), + }, + ] + ); + + // now let's verify finality proof + let blockchain = test_blockchain(); + blockchain + .insert( + header(4).hash(), + header(4), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(5).hash(), + header(5), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + blockchain + .insert( + header(6).hash(), + header(6), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + let effects = + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, + 0, + auth3, + &ClosureAuthoritySetForFinalityChecker(|hash, _header, proof: StorageProof| { + match proof.clone().iter_nodes().next().map(|x| x[0]) { + Some(50) => Ok(auth5.clone()), + Some(70) => Ok(auth7.clone()), + _ => unreachable!("no other proofs should be checked: {}", hash), + } + }), + proof_of_6.encode(), + ) + .unwrap(); + + assert_eq!( + effects, + FinalityEffects { + headers_to_import: vec![header(7)], + block: header(7).hash(), + justification: TestJustification((1, auth5.clone()), vec![7]).encode(), + new_set_id: 2, + new_authorities: auth7, + } + ); + } + + #[test] + fn finality_proof_check_fails_when_proof_decode_fails() { + let blockchain = test_blockchain(); + + // when we can't decode proof from Vec + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, + 1, + vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], + &ClosureAuthoritySetForFinalityChecker(|_, _, _| { + unreachable!("returns before CheckAuthoritiesProof") + }), + vec![42], + ) + .unwrap_err(); + } + + #[test] + fn finality_proof_check_fails_when_proof_is_empty() { + let blockchain = test_blockchain(); + + // when decoded proof has zero length + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, + 1, + vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)], + &ClosureAuthoritySetForFinalityChecker(|_, _, _| { + unreachable!("returns before CheckAuthoritiesProof") + }), + Vec::::new().encode(), + ) + .unwrap_err(); + } + + #[test] + fn finality_proof_check_fails_when_intermediate_fragment_has_unknown_headers() { + let blockchain = test_blockchain(); + + // when intermediate (#0) fragment has non-empty unknown headers + let authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, + 1, + authorities.clone(), + &ClosureAuthoritySetForFinalityChecker(|_, _, _| { + unreachable!("returns before CheckAuthoritiesProof") + }), + vec![ + FinalityProofFragment { + block: header(4).hash(), + justification: TestJustification((0, authorities.clone()), vec![7]).encode(), + unknown_headers: vec![header(4)], + authorities_proof: Some(StorageProof::new(vec![vec![42]])), + }, + FinalityProofFragment { + block: header(5).hash(), + justification: TestJustification((0, authorities), vec![8]).encode(), + unknown_headers: vec![header(5)], + authorities_proof: None, + }, + ] + .encode(), + ) + .unwrap_err(); + } + + #[test] + fn finality_proof_check_fails_when_intermediate_fragment_has_no_authorities_proof() { + let blockchain = test_blockchain(); + + // when intermediate (#0) fragment has empty authorities proof + let authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; + do_check_finality_proof::<_, _, TestJustification>( + &blockchain, + 1, + authorities.clone(), + &ClosureAuthoritySetForFinalityChecker(|_, _, _| { + unreachable!("returns before CheckAuthoritiesProof") + }), + vec![ + FinalityProofFragment { + block: header(4).hash(), + justification: TestJustification((0, authorities.clone()), vec![7]).encode(), + unknown_headers: Vec::new(), + authorities_proof: None, + }, + FinalityProofFragment { + block: header(5).hash(), + justification: TestJustification((0, authorities), vec![8]).encode(), + unknown_headers: vec![header(5)], + authorities_proof: None, + }, + ] + .encode(), + ) + .unwrap_err(); + } + + #[test] + fn finality_proof_check_works() { + let blockchain = test_blockchain(); + + let initial_authorities = vec![(AuthorityId::from_slice(&[3u8; 32]), 1u64)]; + let next_authorities = vec![(AuthorityId::from_slice(&[4u8; 32]), 1u64)]; + let effects = do_check_finality_proof::<_, _, TestJustification>( + &blockchain, + 1, + initial_authorities.clone(), + &ClosureAuthoritySetForFinalityChecker(|_, _, _| Ok(next_authorities.clone())), + vec![ + FinalityProofFragment { + block: header(2).hash(), + justification: TestJustification((1, initial_authorities.clone()), vec![7]) + .encode(), + unknown_headers: Vec::new(), + authorities_proof: Some(StorageProof::new(vec![vec![42]])), + }, + FinalityProofFragment { + block: header(4).hash(), + justification: TestJustification((2, next_authorities.clone()), vec![8]) + .encode(), + unknown_headers: vec![header(4)], + authorities_proof: None, + }, + ] + .encode(), + ) + .unwrap(); + assert_eq!( + effects, + FinalityEffects { + headers_to_import: vec![header(4)], + block: header(4).hash(), + justification: TestJustification((2, next_authorities.clone()), vec![8]).encode(), + new_set_id: 2, + new_authorities: vec![(AuthorityId::from_slice(&[4u8; 32]), 1u64)], + } + ); + } + + #[test] + fn finality_proof_is_none_if_first_justification_is_generated_by_unknown_set() { + // this is the case for forced change: set_id has been forcibly increased on full node + // and light node missed that + // => justification verification will fail on light node anyways, so we do not return + // finality proof at all + let blockchain = test_blockchain(); + let just4 = TestJustification( + (0, vec![(AuthorityId::from_slice(&[42u8; 32]), 1u64)]), + vec![4], + ) + .encode(); + blockchain + .insert( + header(4).hash(), + header(4), + Some(just4), + None, + NewBlockState::Final, + ) + .unwrap(); + + let proof_of_4 = prove_finality::<_, _, TestJustification>( + &blockchain, + &( + |_| Ok(vec![(AuthorityId::from_slice(&[1u8; 32]), 1u64)]), + |_| unreachable!("should return before calling ProveAuthorities"), + ), + 0, + header(3).hash(), + header(4).hash(), + ) + .unwrap(); + assert!(proof_of_4.is_none()); + } } diff --git a/client/finality-grandpa/src/import.rs b/client/finality-grandpa/src/import.rs index c1e32dfa6c..9fb250bf8b 100644 --- a/client/finality-grandpa/src/import.rs +++ b/client/finality-grandpa/src/import.rs @@ -14,34 +14,31 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{sync::Arc, collections::HashMap}; +use std::{collections::HashMap, sync::Arc}; use log::{debug, trace}; use parity_scale_codec::Encode; use parking_lot::RwLockWriteGuard; -use sp_blockchain::{BlockStatus, well_known_cache_keys}; use sc_client_api::{backend::Backend, utils::is_descendent_of}; +use sp_api::TransactionFor; +use sp_blockchain::{well_known_cache_keys, BlockStatus}; use sp_utils::mpsc::TracingUnboundedSender; -use sp_api::{TransactionFor}; use sp_consensus::{ - BlockImport, Error as ConsensusError, - BlockCheckParams, BlockImportParams, BlockOrigin, ImportResult, JustificationImport, - SelectChain, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, Error as ConsensusError, + ImportResult, JustificationImport, SelectChain, }; use sp_finality_grandpa::{ConsensusLog, ScheduledChange, SetId, GRANDPA_ENGINE_ID}; -use sp_runtime::Justification; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; -use sp_runtime::traits::{ - Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero, -}; +use sp_runtime::traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor, Zero}; +use sp_runtime::Justification; -use crate::{Error, CommandOrError, NewAuthoritySet, VoterCommand}; -use crate::authorities::{AuthoritySet, SharedAuthoritySet, DelayKind, PendingChange}; +use crate::authorities::{AuthoritySet, DelayKind, PendingChange, SharedAuthoritySet}; use crate::consensus_changes::SharedConsensusChanges; use crate::environment::finalize_block; use crate::justification::GrandpaJustification; +use crate::{CommandOrError, Error, NewAuthoritySet, VoterCommand}; use std::marker::PhantomData; /// A block-import handler for GRANDPA. @@ -54,464 +51,485 @@ use std::marker::PhantomData; /// When using GRANDPA, the block import worker should be using this block import /// object. pub struct GrandpaBlockImport { - inner: Arc, - select_chain: SC, - authority_set: SharedAuthoritySet>, - send_voter_commands: TracingUnboundedSender>>, - consensus_changes: SharedConsensusChanges>, - authority_set_hard_forks: HashMap>>, - _phantom: PhantomData, + inner: Arc, + select_chain: SC, + authority_set: SharedAuthoritySet>, + send_voter_commands: TracingUnboundedSender>>, + consensus_changes: SharedConsensusChanges>, + authority_set_hard_forks: HashMap>>, + _phantom: PhantomData, } -impl Clone for - GrandpaBlockImport +impl Clone + for GrandpaBlockImport { - fn clone(&self) -> Self { - GrandpaBlockImport { - inner: self.inner.clone(), - select_chain: self.select_chain.clone(), - authority_set: self.authority_set.clone(), - send_voter_commands: self.send_voter_commands.clone(), - consensus_changes: self.consensus_changes.clone(), - authority_set_hard_forks: self.authority_set_hard_forks.clone(), - _phantom: PhantomData, - } - } + fn clone(&self) -> Self { + GrandpaBlockImport { + inner: self.inner.clone(), + select_chain: self.select_chain.clone(), + authority_set: self.authority_set.clone(), + send_voter_commands: self.send_voter_commands.clone(), + consensus_changes: self.consensus_changes.clone(), + authority_set_hard_forks: self.authority_set_hard_forks.clone(), + _phantom: PhantomData, + } + } } impl JustificationImport - for GrandpaBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend, - Client: crate::ClientForGrandpa, - SC: SelectChain, + for GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend, + Client: crate::ClientForGrandpa, + SC: SelectChain, { - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - let mut out = Vec::new(); - let chain_info = self.inner.info(); - - // request justifications for all pending changes for which change blocks have already been imported - let authorities = self.authority_set.inner().read(); - for pending_change in authorities.pending_changes() { - if pending_change.delay_kind == DelayKind::Finalized && - pending_change.effective_number() > chain_info.finalized_number && - pending_change.effective_number() <= chain_info.best_number - { - let effective_block_hash = if !pending_change.delay.is_zero() { - self.select_chain.finality_target( - pending_change.canon_hash, - Some(pending_change.effective_number()), - ) - } else { - Ok(Some(pending_change.canon_hash)) - }; - - if let Ok(Some(hash)) = effective_block_hash { - if let Ok(Some(header)) = self.inner.header(BlockId::Hash(hash)) { - if *header.number() == pending_change.effective_number() { - out.push((header.hash(), *header.number())); - } - } - } - } - } - - out - } - - fn import_justification( - &mut self, - hash: Block::Hash, - number: NumberFor, - justification: Justification, - ) -> Result<(), Self::Error> { - // this justification was requested by the sync service, therefore we - // are not sure if it should enact a change or not. it could have been a - // request made as part of initial sync but that means the justification - // wasn't part of the block and was requested asynchronously, probably - // makes sense to log in that case. - GrandpaBlockImport::import_justification(self, hash, number, justification, false, false) - } + type Error = ConsensusError; + + fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { + let mut out = Vec::new(); + let chain_info = self.inner.info(); + + // request justifications for all pending changes for which change blocks have already been imported + let authorities = self.authority_set.inner().read(); + for pending_change in authorities.pending_changes() { + if pending_change.delay_kind == DelayKind::Finalized + && pending_change.effective_number() > chain_info.finalized_number + && pending_change.effective_number() <= chain_info.best_number + { + let effective_block_hash = if !pending_change.delay.is_zero() { + self.select_chain.finality_target( + pending_change.canon_hash, + Some(pending_change.effective_number()), + ) + } else { + Ok(Some(pending_change.canon_hash)) + }; + + if let Ok(Some(hash)) = effective_block_hash { + if let Ok(Some(header)) = self.inner.header(BlockId::Hash(hash)) { + if *header.number() == pending_change.effective_number() { + out.push((header.hash(), *header.number())); + } + } + } + } + } + + out + } + + fn import_justification( + &mut self, + hash: Block::Hash, + number: NumberFor, + justification: Justification, + ) -> Result<(), Self::Error> { + // this justification was requested by the sync service, therefore we + // are not sure if it should enact a change or not. it could have been a + // request made as part of initial sync but that means the justification + // wasn't part of the block and was requested asynchronously, probably + // makes sense to log in that case. + GrandpaBlockImport::import_justification(self, hash, number, justification, false, false) + } } enum AppliedChanges { - Standard(bool), // true if the change is ready to be applied (i.e. it's a root) - Forced(NewAuthoritySet), - None, + Standard(bool), // true if the change is ready to be applied (i.e. it's a root) + Forced(NewAuthoritySet), + None, } impl AppliedChanges { - fn needs_justification(&self) -> bool { - match *self { - AppliedChanges::Standard(_) => true, - AppliedChanges::Forced(_) | AppliedChanges::None => false, - } - } + fn needs_justification(&self) -> bool { + match *self { + AppliedChanges::Standard(_) => true, + AppliedChanges::Forced(_) | AppliedChanges::None => false, + } + } } struct PendingSetChanges<'a, Block: 'a + BlockT> { - just_in_case: Option<( - AuthoritySet>, - RwLockWriteGuard<'a, AuthoritySet>>, - )>, - applied_changes: AppliedChanges>, - do_pause: bool, + just_in_case: Option<( + AuthoritySet>, + RwLockWriteGuard<'a, AuthoritySet>>, + )>, + applied_changes: AppliedChanges>, + do_pause: bool, } impl<'a, Block: 'a + BlockT> PendingSetChanges<'a, Block> { - // revert the pending set change explicitly. - fn revert(self) { } - - fn defuse(mut self) -> (AppliedChanges>, bool) { - self.just_in_case = None; - let applied_changes = ::std::mem::replace(&mut self.applied_changes, AppliedChanges::None); - (applied_changes, self.do_pause) - } + // revert the pending set change explicitly. + fn revert(self) {} + + fn defuse(mut self) -> (AppliedChanges>, bool) { + self.just_in_case = None; + let applied_changes = ::std::mem::replace(&mut self.applied_changes, AppliedChanges::None); + (applied_changes, self.do_pause) + } } impl<'a, Block: 'a + BlockT> Drop for PendingSetChanges<'a, Block> { - fn drop(&mut self) { - if let Some((old_set, mut authorities)) = self.just_in_case.take() { - *authorities = old_set; - } - } + fn drop(&mut self) { + if let Some((old_set, mut authorities)) = self.just_in_case.take() { + *authorities = old_set; + } + } } -fn find_scheduled_change(header: &B::Header) - -> Option>> -{ - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); +fn find_scheduled_change(header: &B::Header) -> Option>> { + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - let filter_log = |log: ConsensusLog>| match log { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - }; + let filter_log = |log: ConsensusLog>| match log { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + }; - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header + .digest() + .convert_first(|l| l.try_to(id).and_then(filter_log)) } -fn find_forced_change(header: &B::Header) - -> Option<(NumberFor, ScheduledChange>)> -{ - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - - let filter_log = |log: ConsensusLog>| match log { - ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), - _ => None, - }; - - // find the first consensus digest with the right ID which converts to - // the right kind of consensus log. - header.digest().convert_first(|l| l.try_to(id).and_then(filter_log)) +fn find_forced_change( + header: &B::Header, +) -> Option<(NumberFor, ScheduledChange>)> { + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + + let filter_log = |log: ConsensusLog>| match log { + ConsensusLog::ForcedChange(delay, change) => Some((delay, change)), + _ => None, + }; + + // find the first consensus digest with the right ID which converts to + // the right kind of consensus log. + header + .digest() + .convert_first(|l| l.try_to(id).and_then(filter_log)) } -impl - GrandpaBlockImport +impl GrandpaBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend, - Client: crate::ClientForGrandpa, + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend, + Client: crate::ClientForGrandpa, { - // check for a new authority set change. - fn check_new_change( - &self, - header: &Block::Header, - hash: Block::Hash, - ) -> Option>> { - // check for forced authority set hard forks - if let Some(change) = self.authority_set_hard_forks.get(&hash) { - return Some(change.clone()); - } - - // check for forced change. - if let Some((median_last_finalized, change)) = find_forced_change::(header) { - return Some(PendingChange { - next_authorities: change.next_authorities, - delay: change.delay, - canon_height: *header.number(), - canon_hash: hash, - delay_kind: DelayKind::Best { median_last_finalized }, - }); - } - - // check normal scheduled change. - let change = find_scheduled_change::(header)?; - Some(PendingChange { - next_authorities: change.next_authorities, - delay: change.delay, - canon_height: *header.number(), - canon_hash: hash, - delay_kind: DelayKind::Finalized, - }) - } - - fn make_authorities_changes( - &self, - block: &mut BlockImportParams>, - hash: Block::Hash, - initial_sync: bool, - ) -> Result, ConsensusError> { - // when we update the authorities, we need to hold the lock - // until the block is written to prevent a race if we need to restore - // the old authority set on error or panic. - struct InnerGuard<'a, T: 'a> { - old: Option, - guard: Option>, - } - - impl<'a, T: 'a> InnerGuard<'a, T> { - fn as_mut(&mut self) -> &mut T { - &mut **self.guard.as_mut().expect("only taken on deconstruction; qed") - } - - fn set_old(&mut self, old: T) { - if self.old.is_none() { - // ignore "newer" old changes. - self.old = Some(old); - } - } - - fn consume(mut self) -> Option<(T, RwLockWriteGuard<'a, T>)> { - if let Some(old) = self.old.take() { - Some((old, self.guard.take().expect("only taken on deconstruction; qed"))) - } else { - None - } - } - } - - impl<'a, T: 'a> Drop for InnerGuard<'a, T> { - fn drop(&mut self) { - if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { - *guard = old; - } - } - } - - let number = block.header.number().clone(); - let maybe_change = self.check_new_change( - &block.header, - hash, - ); - - // returns a function for checking whether a block is a descendent of another - // consistent with querying client directly after importing the block. - let parent_hash = *block.header.parent_hash(); - let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); - - let mut guard = InnerGuard { - guard: Some(self.authority_set.inner().write()), - old: None, - }; - - // whether to pause the old authority set -- happens after import - // of a forced change block. - let mut do_pause = false; - - // add any pending changes. - if let Some(change) = maybe_change { - let old = guard.as_mut().clone(); - guard.set_old(old); - - if let DelayKind::Best { .. } = change.delay_kind { - do_pause = true; - } - - guard.as_mut().add_pending_change( - change, - &is_descendent_of, - ).map_err(|e| ConsensusError::from(ConsensusError::ClientImport(e.to_string())))?; - } - - let applied_changes = { - let forced_change_set = guard - .as_mut() - .apply_forced_changes(hash, number, &is_descendent_of, initial_sync) - .map_err(|e| ConsensusError::ClientImport(e.to_string())) - .map_err(ConsensusError::from)?; - - if let Some((median_last_finalized_number, new_set)) = forced_change_set { - let new_authorities = { - let (set_id, new_authorities) = new_set.current(); - - // we will use the median last finalized number as a hint - // for the canon block the new authority set should start - // with. we use the minimum between the median and the local - // best finalized block. - let best_finalized_number = self.inner.info().finalized_number; - let canon_number = best_finalized_number.min(median_last_finalized_number); - let canon_hash = + // check for a new authority set change. + fn check_new_change( + &self, + header: &Block::Header, + hash: Block::Hash, + ) -> Option>> { + // check for forced authority set hard forks + if let Some(change) = self.authority_set_hard_forks.get(&hash) { + return Some(change.clone()); + } + + // check for forced change. + if let Some((median_last_finalized, change)) = find_forced_change::(header) { + return Some(PendingChange { + next_authorities: change.next_authorities, + delay: change.delay, + canon_height: *header.number(), + canon_hash: hash, + delay_kind: DelayKind::Best { + median_last_finalized, + }, + }); + } + + // check normal scheduled change. + let change = find_scheduled_change::(header)?; + Some(PendingChange { + next_authorities: change.next_authorities, + delay: change.delay, + canon_height: *header.number(), + canon_hash: hash, + delay_kind: DelayKind::Finalized, + }) + } + + fn make_authorities_changes( + &self, + block: &mut BlockImportParams>, + hash: Block::Hash, + initial_sync: bool, + ) -> Result, ConsensusError> { + // when we update the authorities, we need to hold the lock + // until the block is written to prevent a race if we need to restore + // the old authority set on error or panic. + struct InnerGuard<'a, T: 'a> { + old: Option, + guard: Option>, + } + + impl<'a, T: 'a> InnerGuard<'a, T> { + fn as_mut(&mut self) -> &mut T { + &mut **self + .guard + .as_mut() + .expect("only taken on deconstruction; qed") + } + + fn set_old(&mut self, old: T) { + if self.old.is_none() { + // ignore "newer" old changes. + self.old = Some(old); + } + } + + fn consume(mut self) -> Option<(T, RwLockWriteGuard<'a, T>)> { + if let Some(old) = self.old.take() { + Some(( + old, + self.guard + .take() + .expect("only taken on deconstruction; qed"), + )) + } else { + None + } + } + } + + impl<'a, T: 'a> Drop for InnerGuard<'a, T> { + fn drop(&mut self) { + if let (Some(mut guard), Some(old)) = (self.guard.take(), self.old.take()) { + *guard = old; + } + } + } + + let number = block.header.number().clone(); + let maybe_change = self.check_new_change(&block.header, hash); + + // returns a function for checking whether a block is a descendent of another + // consistent with querying client directly after importing the block. + let parent_hash = *block.header.parent_hash(); + let is_descendent_of = is_descendent_of(&*self.inner, Some((hash, parent_hash))); + + let mut guard = InnerGuard { + guard: Some(self.authority_set.inner().write()), + old: None, + }; + + // whether to pause the old authority set -- happens after import + // of a forced change block. + let mut do_pause = false; + + // add any pending changes. + if let Some(change) = maybe_change { + let old = guard.as_mut().clone(); + guard.set_old(old); + + if let DelayKind::Best { .. } = change.delay_kind { + do_pause = true; + } + + guard + .as_mut() + .add_pending_change(change, &is_descendent_of) + .map_err(|e| ConsensusError::from(ConsensusError::ClientImport(e.to_string())))?; + } + + let applied_changes = { + let forced_change_set = guard + .as_mut() + .apply_forced_changes(hash, number, &is_descendent_of, initial_sync) + .map_err(|e| ConsensusError::ClientImport(e.to_string())) + .map_err(ConsensusError::from)?; + + if let Some((median_last_finalized_number, new_set)) = forced_change_set { + let new_authorities = { + let (set_id, new_authorities) = new_set.current(); + + // we will use the median last finalized number as a hint + // for the canon block the new authority set should start + // with. we use the minimum between the median and the local + // best finalized block. + let best_finalized_number = self.inner.info().finalized_number; + let canon_number = best_finalized_number.min(median_last_finalized_number); + let canon_hash = self.inner.header(BlockId::Number(canon_number)) .map_err(|e| ConsensusError::ClientImport(e.to_string()))? .expect("the given block number is less or equal than the current best finalized number; \ current best finalized number must exist in chain; qed.") .hash(); - NewAuthoritySet { - canon_number, - canon_hash, - set_id, - authorities: new_authorities.to_vec(), - } - }; - let old = ::std::mem::replace(guard.as_mut(), new_set); - guard.set_old(old); - - AppliedChanges::Forced(new_authorities) - } else { - let did_standard = guard.as_mut().enacts_standard_change(hash, number, &is_descendent_of) - .map_err(|e| ConsensusError::ClientImport(e.to_string())) - .map_err(ConsensusError::from)?; - - if let Some(root) = did_standard { - AppliedChanges::Standard(root) - } else { - AppliedChanges::None - } - } - }; - - // consume the guard safely and write necessary changes. - let just_in_case = guard.consume(); - if let Some((_, ref authorities)) = just_in_case { - let authorities_change = match applied_changes { - AppliedChanges::Forced(ref new) => Some(new), - AppliedChanges::Standard(_) => None, // the change isn't actually applied yet. - AppliedChanges::None => None, - }; - - crate::aux_schema::update_authority_set::( - authorities, - authorities_change, - |insert| block.auxiliary.extend( - insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - ) - ); - } - - Ok(PendingSetChanges { just_in_case, applied_changes, do_pause }) - } + NewAuthoritySet { + canon_number, + canon_hash, + set_id, + authorities: new_authorities.to_vec(), + } + }; + let old = ::std::mem::replace(guard.as_mut(), new_set); + guard.set_old(old); + + AppliedChanges::Forced(new_authorities) + } else { + let did_standard = guard + .as_mut() + .enacts_standard_change(hash, number, &is_descendent_of) + .map_err(|e| ConsensusError::ClientImport(e.to_string())) + .map_err(ConsensusError::from)?; + + if let Some(root) = did_standard { + AppliedChanges::Standard(root) + } else { + AppliedChanges::None + } + } + }; + + // consume the guard safely and write necessary changes. + let just_in_case = guard.consume(); + if let Some((_, ref authorities)) = just_in_case { + let authorities_change = match applied_changes { + AppliedChanges::Forced(ref new) => Some(new), + AppliedChanges::Standard(_) => None, // the change isn't actually applied yet. + AppliedChanges::None => None, + }; + + crate::aux_schema::update_authority_set::( + authorities, + authorities_change, + |insert| { + block + .auxiliary + .extend(insert.iter().map(|(k, v)| (k.to_vec(), Some(v.to_vec())))) + }, + ); + } + + Ok(PendingSetChanges { + just_in_case, + applied_changes, + do_pause, + }) + } } -impl BlockImport - for GrandpaBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend, - Client: crate::ClientForGrandpa, - for<'a> &'a Client: - BlockImport>, +impl BlockImport for GrandpaBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend, + Client: crate::ClientForGrandpa, + for<'a> &'a Client: + BlockImport>, { - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - let hash = block.post_hash(); - let number = block.header.number().clone(); - - // early exit if block already in chain, otherwise the check for - // authority changes will error when trying to re-import a change block - match self.inner.status(BlockId::Hash(hash)) { - Ok(BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), - Ok(BlockStatus::Unknown) => {}, - Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()), - } - - // on initial sync we will restrict logging under info to avoid spam. - let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; - - let pending_changes = self.make_authorities_changes(&mut block, hash, initial_sync)?; - - // we don't want to finalize on `inner.import_block` - let mut justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); - let import_result = (&*self.inner).import_block(block, new_cache); - - let mut imported_aux = { - match import_result { - Ok(ImportResult::Imported(aux)) => aux, - Ok(r) => { - debug!( - target: "afg", - "Restoring old authority set after block import result: {:?}", - r, - ); - pending_changes.revert(); - return Ok(r); - }, - Err(e) => { - debug!( - target: "afg", - "Restoring old authority set after block import error: {:?}", - e, - ); - pending_changes.revert(); - return Err(ConsensusError::ClientImport(e.to_string()).into()); - }, - } - }; - - let (applied_changes, do_pause) = pending_changes.defuse(); - - // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. - if do_pause { - let _ = self.send_voter_commands.unbounded_send( - VoterCommand::Pause(format!("Forced change scheduled after inactivity")) - ); - } - - let needs_justification = applied_changes.needs_justification(); - - match applied_changes { - AppliedChanges::Forced(new) => { - // NOTE: when we do a force change we are "discrediting" the old set so we - // ignore any justifications from them. this block may contain a justification - // which should be checked and imported below against the new authority - // triggered by this forced change. the new grandpa voter will start at the - // last median finalized block (which is before the block that enacts the - // change), full nodes syncing the chain will not be able to successfully - // import justifications for those blocks since their local authority set view - // is still of the set before the forced change was enacted, still after #1867 - // they should import the block and discard the justification, and they will - // then request a justification from sync if it's necessary (which they should - // then be able to successfully validate). - let _ = self.send_voter_commands.unbounded_send(VoterCommand::ChangeAuthorities(new)); - - // we must clear all pending justifications requests, presumably they won't be - // finalized hence why this forced changes was triggered - imported_aux.clear_justification_requests = true; - }, - AppliedChanges::Standard(false) => { - // we can't apply this change yet since there are other dependent changes that we - // need to apply first, drop any justification that might have been provided with - // the block to make sure we request them from `sync` which will ensure they'll be - // applied in-order. - justification.take(); - }, - _ => {}, - } - - match justification { - Some(justification) => { - let import_res = self.import_justification( - hash, - number, - justification, - needs_justification, - initial_sync, - ); - - import_res.unwrap_or_else(|err| { + type Error = ConsensusError; + type Transaction = TransactionFor; + + fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + let hash = block.post_hash(); + let number = block.header.number().clone(); + + // early exit if block already in chain, otherwise the check for + // authority changes will error when trying to re-import a change block + match self.inner.status(BlockId::Hash(hash)) { + Ok(BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + Ok(BlockStatus::Unknown) => {} + Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()), + } + + // on initial sync we will restrict logging under info to avoid spam. + let initial_sync = block.origin == BlockOrigin::NetworkInitialSync; + + let pending_changes = self.make_authorities_changes(&mut block, hash, initial_sync)?; + + // we don't want to finalize on `inner.import_block` + let mut justification = block.justification.take(); + let enacts_consensus_change = !new_cache.is_empty(); + let import_result = (&*self.inner).import_block(block, new_cache); + + let mut imported_aux = { + match import_result { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => { + debug!( + target: "afg", + "Restoring old authority set after block import result: {:?}", + r, + ); + pending_changes.revert(); + return Ok(r); + } + Err(e) => { + debug!( + target: "afg", + "Restoring old authority set after block import error: {:?}", + e, + ); + pending_changes.revert(); + return Err(ConsensusError::ClientImport(e.to_string()).into()); + } + } + }; + + let (applied_changes, do_pause) = pending_changes.defuse(); + + // Send the pause signal after import but BEFORE sending a `ChangeAuthorities` message. + if do_pause { + let _ = self + .send_voter_commands + .unbounded_send(VoterCommand::Pause(format!( + "Forced change scheduled after inactivity" + ))); + } + + let needs_justification = applied_changes.needs_justification(); + + match applied_changes { + AppliedChanges::Forced(new) => { + // NOTE: when we do a force change we are "discrediting" the old set so we + // ignore any justifications from them. this block may contain a justification + // which should be checked and imported below against the new authority + // triggered by this forced change. the new grandpa voter will start at the + // last median finalized block (which is before the block that enacts the + // change), full nodes syncing the chain will not be able to successfully + // import justifications for those blocks since their local authority set view + // is still of the set before the forced change was enacted, still after #1867 + // they should import the block and discard the justification, and they will + // then request a justification from sync if it's necessary (which they should + // then be able to successfully validate). + let _ = self + .send_voter_commands + .unbounded_send(VoterCommand::ChangeAuthorities(new)); + + // we must clear all pending justifications requests, presumably they won't be + // finalized hence why this forced changes was triggered + imported_aux.clear_justification_requests = true; + } + AppliedChanges::Standard(false) => { + // we can't apply this change yet since there are other dependent changes that we + // need to apply first, drop any justification that might have been provided with + // the block to make sure we request them from `sync` which will ensure they'll be + // applied in-order. + justification.take(); + } + _ => {} + } + + match justification { + Some(justification) => { + let import_res = self.import_justification( + hash, + number, + justification, + needs_justification, + initial_sync, + ); + + import_res.unwrap_or_else(|err| { if needs_justification || enacts_consensus_change { debug!(target: "afg", "Imported block #{} that enacts authority set change with \ invalid justification: {:?}, requesting justification from peers.", number, err); @@ -519,162 +537,163 @@ impl BlockImport imported_aux.needs_justification = true; } }); - }, - None => { - if needs_justification { - trace!( - target: "afg", - "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", - number, - ); - - imported_aux.needs_justification = true; - } - - // we have imported block with consensus data changes, but without justification - // => remember to create justification when next block will be finalized - if enacts_consensus_change { - self.consensus_changes.lock().note_change((number, hash)); - } - } - } - - Ok(ImportResult::Imported(imported_aux)) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.inner.check_block(block) - } + } + None => { + if needs_justification { + trace!( + target: "afg", + "Imported unjustified block #{} that enacts authority set change, waiting for finality for enactment.", + number, + ); + + imported_aux.needs_justification = true; + } + + // we have imported block with consensus data changes, but without justification + // => remember to create justification when next block will be finalized + if enacts_consensus_change { + self.consensus_changes.lock().note_change((number, hash)); + } + } + } + + Ok(ImportResult::Imported(imported_aux)) + } + + fn check_block(&mut self, block: BlockCheckParams) -> Result { + self.inner.check_block(block) + } } impl GrandpaBlockImport { - pub(crate) fn new( - inner: Arc, - select_chain: SC, - authority_set: SharedAuthoritySet>, - send_voter_commands: TracingUnboundedSender>>, - consensus_changes: SharedConsensusChanges>, - authority_set_hard_forks: Vec<(SetId, PendingChange>)>, - ) -> GrandpaBlockImport { - // check for and apply any forced authority set hard fork that applies - // to the *current* authority set. - if let Some((_, change)) = authority_set_hard_forks - .iter() - .find(|(set_id, _)| *set_id == authority_set.set_id()) - { - let mut authority_set = authority_set.inner().write(); - authority_set.current_authorities = change.next_authorities.clone(); - } - - // index authority set hard forks by block hash so that they can be used - // by any node syncing the chain and importing a block hard fork - // authority set changes. - let authority_set_hard_forks = authority_set_hard_forks - .into_iter() - .map(|(_, change)| (change.canon_hash, change)) - .collect::>(); - - // check for and apply any forced authority set hard fork that apply to - // any *pending* standard changes, checking by the block hash at which - // they were announced. - { - let mut authority_set = authority_set.inner().write(); - - authority_set.pending_standard_changes = authority_set - .pending_standard_changes - .clone() - .map(&mut |hash, _, original| { - authority_set_hard_forks - .get(&hash) - .cloned() - .unwrap_or(original) - }); - } - - GrandpaBlockImport { - inner, - select_chain, - authority_set, - send_voter_commands, - consensus_changes, - authority_set_hard_forks, - _phantom: PhantomData, - } - } + pub(crate) fn new( + inner: Arc, + select_chain: SC, + authority_set: SharedAuthoritySet>, + send_voter_commands: TracingUnboundedSender>>, + consensus_changes: SharedConsensusChanges>, + authority_set_hard_forks: Vec<(SetId, PendingChange>)>, + ) -> GrandpaBlockImport { + // check for and apply any forced authority set hard fork that applies + // to the *current* authority set. + if let Some((_, change)) = authority_set_hard_forks + .iter() + .find(|(set_id, _)| *set_id == authority_set.set_id()) + { + let mut authority_set = authority_set.inner().write(); + authority_set.current_authorities = change.next_authorities.clone(); + } + + // index authority set hard forks by block hash so that they can be used + // by any node syncing the chain and importing a block hard fork + // authority set changes. + let authority_set_hard_forks = authority_set_hard_forks + .into_iter() + .map(|(_, change)| (change.canon_hash, change)) + .collect::>(); + + // check for and apply any forced authority set hard fork that apply to + // any *pending* standard changes, checking by the block hash at which + // they were announced. + { + let mut authority_set = authority_set.inner().write(); + + authority_set.pending_standard_changes = authority_set + .pending_standard_changes + .clone() + .map(&mut |hash, _, original| { + authority_set_hard_forks + .get(&hash) + .cloned() + .unwrap_or(original) + }); + } + + GrandpaBlockImport { + inner, + select_chain, + authority_set, + send_voter_commands, + consensus_changes, + authority_set_hard_forks, + _phantom: PhantomData, + } + } } impl GrandpaBlockImport where - BE: Backend, - Client: crate::ClientForGrandpa, - NumberFor: finality_grandpa::BlockNumberOps, + BE: Backend, + Client: crate::ClientForGrandpa, + NumberFor: finality_grandpa::BlockNumberOps, { - - /// Import a block justification and finalize the block. - /// - /// If `enacts_change` is set to true, then finalizing this block *must* - /// enact an authority set change, the function will panic otherwise. - fn import_justification( - &mut self, - hash: Block::Hash, - number: NumberFor, - justification: Justification, - enacts_change: bool, - initial_sync: bool, - ) -> Result<(), ConsensusError> { - let justification = GrandpaJustification::decode_and_verify_finalizes( - &justification, - (hash, number), - self.authority_set.set_id(), - &self.authority_set.current_authorities(), - ); - - let justification = match justification { - Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()), - Ok(justification) => justification, - }; - - let result = finalize_block( - self.inner.clone(), - &self.authority_set, - &self.consensus_changes, - None, - hash, - number, - justification.into(), - initial_sync, - ); - - match result { - Err(CommandOrError::VoterCommand(command)) => { - afg_log!(initial_sync, - "👴 Imported justification for block #{} that triggers \ + /// Import a block justification and finalize the block. + /// + /// If `enacts_change` is set to true, then finalizing this block *must* + /// enact an authority set change, the function will panic otherwise. + fn import_justification( + &mut self, + hash: Block::Hash, + number: NumberFor, + justification: Justification, + enacts_change: bool, + initial_sync: bool, + ) -> Result<(), ConsensusError> { + let justification = GrandpaJustification::decode_and_verify_finalizes( + &justification, + (hash, number), + self.authority_set.set_id(), + &self.authority_set.current_authorities(), + ); + + let justification = match justification { + Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()), + Ok(justification) => justification, + }; + + let result = finalize_block( + self.inner.clone(), + &self.authority_set, + &self.consensus_changes, + None, + hash, + number, + justification.into(), + initial_sync, + ); + + match result { + Err(CommandOrError::VoterCommand(command)) => { + afg_log!( + initial_sync, + "👴 Imported justification for block #{} that triggers \ command {}, signaling voter.", - number, - command, - ); - - // send the command to the voter - let _ = self.send_voter_commands.unbounded_send(command); - }, - Err(CommandOrError::Error(e)) => { - return Err(match e { - Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), - Error::Network(error) => ConsensusError::ClientImport(error), - Error::Blockchain(error) => ConsensusError::ClientImport(error), - Error::Client(error) => ConsensusError::ClientImport(error.to_string()), - Error::Safety(error) => ConsensusError::ClientImport(error), - Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), - }.into()); - }, - Ok(_) => { - assert!(!enacts_change, "returns Ok when no authority set change should be enacted; qed;"); - }, - } - - Ok(()) - } + number, + command, + ); + + // send the command to the voter + let _ = self.send_voter_commands.unbounded_send(command); + } + Err(CommandOrError::Error(e)) => { + return Err(match e { + Error::Grandpa(error) => ConsensusError::ClientImport(error.to_string()), + Error::Network(error) => ConsensusError::ClientImport(error), + Error::Blockchain(error) => ConsensusError::ClientImport(error), + Error::Client(error) => ConsensusError::ClientImport(error.to_string()), + Error::Safety(error) => ConsensusError::ClientImport(error), + Error::Timer(error) => ConsensusError::ClientImport(error.to_string()), + } + .into()); + } + Ok(_) => { + assert!( + !enacts_change, + "returns Ok when no authority set change should be enacted; qed;" + ); + } + } + + Ok(()) + } } diff --git a/client/finality-grandpa/src/justification.rs b/client/finality-grandpa/src/justification.rs index 084c0042ab..ac530b2392 100644 --- a/client/finality-grandpa/src/justification.rs +++ b/client/finality-grandpa/src/justification.rs @@ -17,16 +17,16 @@ use std::collections::{HashMap, HashSet}; use std::sync::Arc; -use sp_blockchain::{Error as ClientError, HeaderBackend}; -use parity_scale_codec::{Encode, Decode}; use finality_grandpa::voter_set::VoterSet; -use finality_grandpa::{Error as GrandpaError}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{NumberFor, Block as BlockT, Header as HeaderT}; +use finality_grandpa::Error as GrandpaError; +use parity_scale_codec::{Decode, Encode}; +use sp_blockchain::{Error as ClientError, HeaderBackend}; use sp_finality_grandpa::AuthorityId; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use crate::{Commit, Error}; use crate::communication; +use crate::{Commit, Error}; /// A GRANDPA justification for block finality, it includes a commit message and /// an ancestry proof including all headers routing all precommit target blocks @@ -38,186 +38,215 @@ use crate::communication; /// nodes, and are used by syncing nodes to prove authority set handoffs. #[derive(Encode, Decode)] pub struct GrandpaJustification { - round: u64, - pub(crate) commit: Commit, - votes_ancestries: Vec, + round: u64, + pub(crate) commit: Commit, + votes_ancestries: Vec, } impl GrandpaJustification { - /// Create a GRANDPA justification from the given commit. This method - /// assumes the commit is valid and well-formed. - pub(crate) fn from_commit( - client: &Arc, - round: u64, - commit: Commit, - ) -> Result, Error> where - C: HeaderBackend, - { - let mut votes_ancestries_hashes = HashSet::new(); - let mut votes_ancestries = Vec::new(); - - let error = || { - let msg = "invalid precommits for target commit".to_string(); - Err(Error::Client(ClientError::BadJustification(msg))) - }; - - for signed in commit.precommits.iter() { - let mut current_hash = signed.precommit.target_hash.clone(); - loop { - if current_hash == commit.target_hash { break; } - - match client.header(BlockId::Hash(current_hash))? { - Some(current_header) => { - if *current_header.number() <= commit.target_number { - return error(); - } - - let parent_hash = current_header.parent_hash().clone(); - if votes_ancestries_hashes.insert(current_hash) { - votes_ancestries.push(current_header); - } - current_hash = parent_hash; - }, - _ => return error(), - } - } - } - - Ok(GrandpaJustification { round, commit, votes_ancestries }) - } - - /// Decode a GRANDPA justification and validate the commit and the votes' - /// ancestry proofs finalize the given block. - pub(crate) fn decode_and_verify_finalizes( - encoded: &[u8], - finalized_target: (Block::Hash, NumberFor), - set_id: u64, - voters: &VoterSet, - ) -> Result, ClientError> where - NumberFor: finality_grandpa::BlockNumberOps, - { - - let justification = GrandpaJustification::::decode(&mut &*encoded) - .map_err(|_| ClientError::JustificationDecode)?; - - if (justification.commit.target_hash, justification.commit.target_number) != finalized_target { - let msg = "invalid commit target in grandpa justification".to_string(); - Err(ClientError::BadJustification(msg)) - } else { - justification.verify(set_id, voters).map(|_| justification) - } - } - - /// Validate the commit and the votes' ancestry proofs. - pub(crate) fn verify(&self, set_id: u64, voters: &VoterSet) -> Result<(), ClientError> - where - NumberFor: finality_grandpa::BlockNumberOps, - { - use finality_grandpa::Chain; - - let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); - - match finality_grandpa::validate_commit( - &self.commit, - voters, - &ancestry_chain, - ) { - Ok(ref result) if result.ghost().is_some() => {}, - _ => { - let msg = "invalid commit in grandpa justification".to_string(); - return Err(ClientError::BadJustification(msg)); - } - } - - let mut buf = Vec::new(); - let mut visited_hashes = HashSet::new(); - for signed in self.commit.precommits.iter() { - if let Err(_) = communication::check_message_sig_with_buffer::( - &finality_grandpa::Message::Precommit(signed.precommit.clone()), - &signed.id, - &signed.signature, - self.round, - set_id, - &mut buf, - ) { - return Err(ClientError::BadJustification( - "invalid signature for precommit in grandpa justification".to_string()).into()); - } - - if self.commit.target_hash == signed.precommit.target_hash { - continue; - } - - match ancestry_chain.ancestry(self.commit.target_hash, signed.precommit.target_hash) { - Ok(route) => { - // ancestry starts from parent hash but the precommit target hash has been visited - visited_hashes.insert(signed.precommit.target_hash); - for hash in route { - visited_hashes.insert(hash); - } - }, - _ => { - return Err(ClientError::BadJustification( - "invalid precommit ancestry proof in grandpa justification".to_string()).into()); - }, - } - } - - let ancestry_hashes = self.votes_ancestries - .iter() - .map(|h: &Block::Header| h.hash()) - .collect(); - - if visited_hashes != ancestry_hashes { - return Err(ClientError::BadJustification( - "invalid precommit ancestries in grandpa justification with unused headers".to_string()).into()); - } - - Ok(()) - } + /// Create a GRANDPA justification from the given commit. This method + /// assumes the commit is valid and well-formed. + pub(crate) fn from_commit( + client: &Arc, + round: u64, + commit: Commit, + ) -> Result, Error> + where + C: HeaderBackend, + { + let mut votes_ancestries_hashes = HashSet::new(); + let mut votes_ancestries = Vec::new(); + + let error = || { + let msg = "invalid precommits for target commit".to_string(); + Err(Error::Client(ClientError::BadJustification(msg))) + }; + + for signed in commit.precommits.iter() { + let mut current_hash = signed.precommit.target_hash.clone(); + loop { + if current_hash == commit.target_hash { + break; + } + + match client.header(BlockId::Hash(current_hash))? { + Some(current_header) => { + if *current_header.number() <= commit.target_number { + return error(); + } + + let parent_hash = current_header.parent_hash().clone(); + if votes_ancestries_hashes.insert(current_hash) { + votes_ancestries.push(current_header); + } + current_hash = parent_hash; + } + _ => return error(), + } + } + } + + Ok(GrandpaJustification { + round, + commit, + votes_ancestries, + }) + } + + /// Decode a GRANDPA justification and validate the commit and the votes' + /// ancestry proofs finalize the given block. + pub(crate) fn decode_and_verify_finalizes( + encoded: &[u8], + finalized_target: (Block::Hash, NumberFor), + set_id: u64, + voters: &VoterSet, + ) -> Result, ClientError> + where + NumberFor: finality_grandpa::BlockNumberOps, + { + let justification = GrandpaJustification::::decode(&mut &*encoded) + .map_err(|_| ClientError::JustificationDecode)?; + + if ( + justification.commit.target_hash, + justification.commit.target_number, + ) != finalized_target + { + let msg = "invalid commit target in grandpa justification".to_string(); + Err(ClientError::BadJustification(msg)) + } else { + justification.verify(set_id, voters).map(|_| justification) + } + } + + /// Validate the commit and the votes' ancestry proofs. + pub(crate) fn verify( + &self, + set_id: u64, + voters: &VoterSet, + ) -> Result<(), ClientError> + where + NumberFor: finality_grandpa::BlockNumberOps, + { + use finality_grandpa::Chain; + + let ancestry_chain = AncestryChain::::new(&self.votes_ancestries); + + match finality_grandpa::validate_commit(&self.commit, voters, &ancestry_chain) { + Ok(ref result) if result.ghost().is_some() => {} + _ => { + let msg = "invalid commit in grandpa justification".to_string(); + return Err(ClientError::BadJustification(msg)); + } + } + + let mut buf = Vec::new(); + let mut visited_hashes = HashSet::new(); + for signed in self.commit.precommits.iter() { + if let Err(_) = communication::check_message_sig_with_buffer::( + &finality_grandpa::Message::Precommit(signed.precommit.clone()), + &signed.id, + &signed.signature, + self.round, + set_id, + &mut buf, + ) { + return Err(ClientError::BadJustification( + "invalid signature for precommit in grandpa justification".to_string(), + ) + .into()); + } + + if self.commit.target_hash == signed.precommit.target_hash { + continue; + } + + match ancestry_chain.ancestry(self.commit.target_hash, signed.precommit.target_hash) { + Ok(route) => { + // ancestry starts from parent hash but the precommit target hash has been visited + visited_hashes.insert(signed.precommit.target_hash); + for hash in route { + visited_hashes.insert(hash); + } + } + _ => { + return Err(ClientError::BadJustification( + "invalid precommit ancestry proof in grandpa justification".to_string(), + ) + .into()); + } + } + } + + let ancestry_hashes = self + .votes_ancestries + .iter() + .map(|h: &Block::Header| h.hash()) + .collect(); + + if visited_hashes != ancestry_hashes { + return Err(ClientError::BadJustification( + "invalid precommit ancestries in grandpa justification with unused headers" + .to_string(), + ) + .into()); + } + + Ok(()) + } } /// A utility trait implementing `finality_grandpa::Chain` using a given set of headers. /// This is useful when validating commits, using the given set of headers to /// verify a valid ancestry route to the target commit block. struct AncestryChain { - ancestry: HashMap, + ancestry: HashMap, } impl AncestryChain { - fn new(ancestry: &[Block::Header]) -> AncestryChain { - let ancestry: HashMap<_, _> = ancestry - .iter() - .cloned() - .map(|h: Block::Header| (h.hash(), h)) - .collect(); - - AncestryChain { ancestry } - } + fn new(ancestry: &[Block::Header]) -> AncestryChain { + let ancestry: HashMap<_, _> = ancestry + .iter() + .cloned() + .map(|h: Block::Header| (h.hash(), h)) + .collect(); + + AncestryChain { ancestry } + } } -impl finality_grandpa::Chain> for AncestryChain where - NumberFor: finality_grandpa::BlockNumberOps +impl finality_grandpa::Chain> for AncestryChain +where + NumberFor: finality_grandpa::BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { - let mut route = Vec::new(); - let mut current_hash = block; - loop { - if current_hash == base { break; } - match self.ancestry.get(¤t_hash) { - Some(current_header) => { - current_hash = *current_header.parent_hash(); - route.push(current_hash); - }, - _ => return Err(GrandpaError::NotDescendent), - } - } - route.pop(); // remove the base - - Ok(route) - } - - fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - None - } + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { + let mut route = Vec::new(); + let mut current_hash = block; + loop { + if current_hash == base { + break; + } + match self.ancestry.get(¤t_hash) { + Some(current_header) => { + current_hash = *current_header.parent_hash(); + route.push(current_hash); + } + _ => return Err(GrandpaError::NotDescendent), + } + } + route.pop(); // remove the base + + Ok(route) + } + + fn best_chain_containing( + &self, + _block: Block::Hash, + ) -> Option<(Block::Hash, NumberFor)> { + None + } } diff --git a/client/finality-grandpa/src/lib.rs b/client/finality-grandpa/src/lib.rs index 6fab89ac68..5e0fb1f2a6 100644 --- a/client/finality-grandpa/src/lib.rs +++ b/client/finality-grandpa/src/lib.rs @@ -55,34 +55,34 @@ use futures::prelude::*; use futures::StreamExt; use log::{debug, info}; -use sc_client_api::{ - backend::{AuxStore, Backend}, - LockImportRun, BlockchainEvents, CallExecutor, - ExecutionStrategy, Finalizer, TransactionFor, ExecutorProvider, -}; -use sp_blockchain::{HeaderBackend, Error as ClientError, HeaderMetadata}; use parity_scale_codec::{Decode, Encode}; use prometheus_endpoint::{PrometheusError, Registry}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{NumberFor, Block as BlockT, DigestFor, Zero}; +use sc_client_api::{ + backend::{AuxStore, Backend}, + BlockchainEvents, CallExecutor, ExecutionStrategy, ExecutorProvider, Finalizer, LockImportRun, + TransactionFor, +}; use sc_keystore::KeyStorePtr; -use sp_inherents::InherentDataProviders; -use sp_consensus::{SelectChain, BlockImport}; +use sc_telemetry::{telemetry, CONSENSUS_DEBUG, CONSENSUS_INFO}; +use serde_json; +use sp_blockchain::{Error as ClientError, HeaderBackend, HeaderMetadata}; +use sp_consensus::{BlockImport, SelectChain}; use sp_core::Pair; +use sp_inherents::InherentDataProviders; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, DigestFor, NumberFor, Zero}; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; -use sc_telemetry::{telemetry, CONSENSUS_INFO, CONSENSUS_DEBUG}; -use serde_json; use sp_finality_tracker; use finality_grandpa::Error as GrandpaError; -use finality_grandpa::{voter, BlockNumberOps, voter_set::VoterSet}; +use finality_grandpa::{voter, voter_set::VoterSet, BlockNumberOps}; -use std::{fmt, io}; +use std::pin::Pin; use std::sync::Arc; +use std::task::{Context, Poll}; use std::time::Duration; -use std::pin::Pin; -use std::task::{Poll, Context}; +use std::{fmt, io}; // utility logging macro that takes as first argument a conditional to // decide whether to log under debug or info level (useful to restrict @@ -118,19 +118,19 @@ pub use finality_proof::{FinalityProofProvider, StorageAndProofProvider}; pub use justification::GrandpaJustification; pub use light_import::light_block_import; pub use voting_rule::{ - BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder + BeforeBestBlockBy, ThreeQuartersOfTheUnfinalizedChain, VotingRule, VotingRulesBuilder, }; use aux_schema::PersistentData; +use communication::{Network as NetworkT, NetworkBridge}; use environment::{Environment, VoterSetState}; use import::GrandpaBlockImport; -use until_imported::UntilGlobalMessageBlocksImported; -use communication::{NetworkBridge, Network as NetworkT}; use sp_finality_grandpa::{AuthorityList, AuthorityPair, AuthoritySignature, SetId}; +use until_imported::UntilGlobalMessageBlocksImported; // Re-export these two because it's just so damn convenient. -pub use sp_finality_grandpa::{AuthorityId, ScheduledChange}; use sp_api::ProvideRuntimeApi; +pub use sp_finality_grandpa::{AuthorityId, ScheduledChange}; use std::marker::PhantomData; #[cfg(test)] @@ -140,319 +140,343 @@ mod tests; pub type Message = finality_grandpa::Message<::Hash, NumberFor>; /// A signed message. pub type SignedMessage = finality_grandpa::SignedMessage< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, >; /// A primary propose message for this chain's block type. -pub type PrimaryPropose = finality_grandpa::PrimaryPropose<::Hash, NumberFor>; +pub type PrimaryPropose = + finality_grandpa::PrimaryPropose<::Hash, NumberFor>; /// A prevote message for this chain's block type. pub type Prevote = finality_grandpa::Prevote<::Hash, NumberFor>; /// A precommit message for this chain's block type. pub type Precommit = finality_grandpa::Precommit<::Hash, NumberFor>; /// A catch up message for this chain's block type. pub type CatchUp = finality_grandpa::CatchUp< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, >; /// A commit message for this chain's block type. pub type Commit = finality_grandpa::Commit< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, >; /// A compact commit message for this chain's block type. pub type CompactCommit = finality_grandpa::CompactCommit< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, >; /// A global communication input stream for commits and catch up messages. Not /// exposed publicly, used internally to simplify types in the communication /// layer. type CommunicationIn = finality_grandpa::voter::CommunicationIn< - ::Hash, - NumberFor, - AuthoritySignature, - AuthorityId, + ::Hash, + NumberFor, + AuthoritySignature, + AuthorityId, >; /// Global communication input stream for commits and catch up messages, with /// the hash type not being derived from the block, useful for forcing the hash /// to some type (e.g. `H256`) when the compiler can't do the inference. -type CommunicationInH = finality_grandpa::voter::CommunicationIn< - H, - NumberFor, - AuthoritySignature, - AuthorityId, ->; +type CommunicationInH = + finality_grandpa::voter::CommunicationIn, AuthoritySignature, AuthorityId>; /// Global communication sink for commits with the hash type not being derived /// from the block, useful for forcing the hash to some type (e.g. `H256`) when /// the compiler can't do the inference. -type CommunicationOutH = finality_grandpa::voter::CommunicationOut< - H, - NumberFor, - AuthoritySignature, - AuthorityId, ->; +type CommunicationOutH = + finality_grandpa::voter::CommunicationOut, AuthoritySignature, AuthorityId>; /// Configuration for the GRANDPA service. #[derive(Clone)] pub struct Config { - /// The expected duration for a message to be gossiped across the network. - pub gossip_duration: Duration, - /// Justification generation period (in blocks). GRANDPA will try to generate justifications - /// at least every justification_period blocks. There are some other events which might cause - /// justification generation. - pub justification_period: u32, - /// Whether the GRANDPA observer protocol is live on the network and thereby - /// a full-node not running as a validator is running the GRANDPA observer - /// protocol (we will only issue catch-up requests to authorities when the - /// observer protocol is enabled). - pub observer_enabled: bool, - /// Whether the node is running as an authority (i.e. running the full GRANDPA protocol). - pub is_authority: bool, - /// Some local identifier of the voter. - pub name: Option, - /// The keystore that manages the keys of this node. - pub keystore: Option, + /// The expected duration for a message to be gossiped across the network. + pub gossip_duration: Duration, + /// Justification generation period (in blocks). GRANDPA will try to generate justifications + /// at least every justification_period blocks. There are some other events which might cause + /// justification generation. + pub justification_period: u32, + /// Whether the GRANDPA observer protocol is live on the network and thereby + /// a full-node not running as a validator is running the GRANDPA observer + /// protocol (we will only issue catch-up requests to authorities when the + /// observer protocol is enabled). + pub observer_enabled: bool, + /// Whether the node is running as an authority (i.e. running the full GRANDPA protocol). + pub is_authority: bool, + /// Some local identifier of the voter. + pub name: Option, + /// The keystore that manages the keys of this node. + pub keystore: Option, } impl Config { - fn name(&self) -> &str { - self.name.as_ref().map(|s| s.as_str()).unwrap_or("") - } + fn name(&self) -> &str { + self.name + .as_ref() + .map(|s| s.as_str()) + .unwrap_or("") + } } /// Errors that can occur while voting in GRANDPA. #[derive(Debug)] pub enum Error { - /// An error within grandpa. - Grandpa(GrandpaError), - /// A network error. - Network(String), - /// A blockchain error. - Blockchain(String), - /// Could not complete a round on disk. - Client(ClientError), - /// An invariant has been violated (e.g. not finalizing pending change blocks in-order) - Safety(String), - /// A timer failed to fire. - Timer(io::Error), + /// An error within grandpa. + Grandpa(GrandpaError), + /// A network error. + Network(String), + /// A blockchain error. + Blockchain(String), + /// Could not complete a round on disk. + Client(ClientError), + /// An invariant has been violated (e.g. not finalizing pending change blocks in-order) + Safety(String), + /// A timer failed to fire. + Timer(io::Error), } impl From for Error { - fn from(e: GrandpaError) -> Self { - Error::Grandpa(e) - } + fn from(e: GrandpaError) -> Self { + Error::Grandpa(e) + } } impl From for Error { - fn from(e: ClientError) -> Self { - Error::Client(e) - } + fn from(e: ClientError) -> Self { + Error::Client(e) + } } /// Something which can determine if a block is known. pub(crate) trait BlockStatus { - /// Return `Ok(Some(number))` or `Ok(None)` depending on whether the block - /// is definitely known and has been imported. - /// If an unexpected error occurs, return that. - fn block_number(&self, hash: Block::Hash) -> Result>, Error>; + /// Return `Ok(Some(number))` or `Ok(None)` depending on whether the block + /// is definitely known and has been imported. + /// If an unexpected error occurs, return that. + fn block_number(&self, hash: Block::Hash) -> Result>, Error>; } -impl BlockStatus for Arc where - Client: HeaderBackend, - NumberFor: BlockNumberOps, +impl BlockStatus for Arc +where + Client: HeaderBackend, + NumberFor: BlockNumberOps, { - fn block_number(&self, hash: Block::Hash) -> Result>, Error> { - self.block_number_from_id(&BlockId::Hash(hash)) - .map_err(|e| Error::Blockchain(format!("{:?}", e))) - } + fn block_number(&self, hash: Block::Hash) -> Result>, Error> { + self.block_number_from_id(&BlockId::Hash(hash)) + .map_err(|e| Error::Blockchain(format!("{:?}", e))) + } } /// A trait that includes all the client functionalities grandpa requires. /// Ideally this would be a trait alias, we're not there yet. /// tracking issue https://github.com/rust-lang/rust/issues/41517 pub trait ClientForGrandpa: - LockImportRun + Finalizer + AuxStore - + HeaderMetadata + HeaderBackend - + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider - + BlockImport, Error = sp_consensus::Error> - where - BE: Backend, - Block: BlockT, -{} + LockImportRun + + Finalizer + + AuxStore + + HeaderMetadata + + HeaderBackend + + BlockchainEvents + + ProvideRuntimeApi + + ExecutorProvider + + BlockImport, Error = sp_consensus::Error> +where + BE: Backend, + Block: BlockT, +{ +} impl ClientForGrandpa for T - where - BE: Backend, - Block: BlockT, - T: LockImportRun + Finalizer + AuxStore - + HeaderMetadata + HeaderBackend - + BlockchainEvents + ProvideRuntimeApi + ExecutorProvider - + BlockImport, Error = sp_consensus::Error>, -{} +where + BE: Backend, + Block: BlockT, + T: LockImportRun + + Finalizer + + AuxStore + + HeaderMetadata + + HeaderBackend + + BlockchainEvents + + ProvideRuntimeApi + + ExecutorProvider + + BlockImport, Error = sp_consensus::Error>, +{ +} /// Something that one can ask to do a block sync request. pub(crate) trait BlockSyncRequester { - /// Notifies the sync service to try and sync the given block from the given - /// peers. - /// - /// If the given vector of peers is empty then the underlying implementation - /// should make a best effort to fetch the block from any peers it is - /// connected to (NOTE: this assumption will change in the future #3629). - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor); + /// Notifies the sync service to try and sync the given block from the given + /// peers. + /// + /// If the given vector of peers is empty then the underlying implementation + /// should make a best effort to fetch the block from any peers it is + /// connected to (NOTE: this assumption will change in the future #3629). + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ); } -impl BlockSyncRequester for NetworkBridge where - Block: BlockT, - Network: NetworkT, +impl BlockSyncRequester for NetworkBridge +where + Block: BlockT, + Network: NetworkT, { - fn set_sync_fork_request(&self, peers: Vec, hash: Block::Hash, number: NumberFor) { - NetworkBridge::set_sync_fork_request(self, peers, hash, number) - } + fn set_sync_fork_request( + &self, + peers: Vec, + hash: Block::Hash, + number: NumberFor, + ) { + NetworkBridge::set_sync_fork_request(self, peers, hash, number) + } } /// A new authority set along with the canonical block it changed at. #[derive(Debug)] pub(crate) struct NewAuthoritySet { - pub(crate) canon_number: N, - pub(crate) canon_hash: H, - pub(crate) set_id: SetId, - pub(crate) authorities: AuthorityList, + pub(crate) canon_number: N, + pub(crate) canon_hash: H, + pub(crate) set_id: SetId, + pub(crate) authorities: AuthorityList, } /// Commands issued to the voter. #[derive(Debug)] pub(crate) enum VoterCommand { - /// Pause the voter for given reason. - Pause(String), - /// New authorities. - ChangeAuthorities(NewAuthoritySet) + /// Pause the voter for given reason. + Pause(String), + /// New authorities. + ChangeAuthorities(NewAuthoritySet), } impl fmt::Display for VoterCommand { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - VoterCommand::Pause(ref reason) => write!(f, "Pausing voter: {}", reason), - VoterCommand::ChangeAuthorities(_) => write!(f, "Changing authorities"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + VoterCommand::Pause(ref reason) => write!(f, "Pausing voter: {}", reason), + VoterCommand::ChangeAuthorities(_) => write!(f, "Changing authorities"), + } + } } /// Signals either an early exit of a voter or an error. #[derive(Debug)] pub(crate) enum CommandOrError { - /// An error occurred. - Error(Error), - /// A command to the voter. - VoterCommand(VoterCommand), + /// An error occurred. + Error(Error), + /// A command to the voter. + VoterCommand(VoterCommand), } impl From for CommandOrError { - fn from(e: Error) -> Self { - CommandOrError::Error(e) - } + fn from(e: Error) -> Self { + CommandOrError::Error(e) + } } impl From for CommandOrError { - fn from(e: ClientError) -> Self { - CommandOrError::Error(Error::Client(e)) - } + fn from(e: ClientError) -> Self { + CommandOrError::Error(Error::Client(e)) + } } impl From for CommandOrError { - fn from(e: finality_grandpa::Error) -> Self { - CommandOrError::Error(Error::from(e)) - } + fn from(e: finality_grandpa::Error) -> Self { + CommandOrError::Error(Error::from(e)) + } } impl From> for CommandOrError { - fn from(e: VoterCommand) -> Self { - CommandOrError::VoterCommand(e) - } + fn from(e: VoterCommand) -> Self { + CommandOrError::VoterCommand(e) + } } -impl ::std::error::Error for CommandOrError { } +impl ::std::error::Error for CommandOrError {} impl fmt::Display for CommandOrError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - CommandOrError::Error(ref e) => write!(f, "{:?}", e), - CommandOrError::VoterCommand(ref cmd) => write!(f, "{}", cmd), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + CommandOrError::Error(ref e) => write!(f, "{:?}", e), + CommandOrError::VoterCommand(ref cmd) => write!(f, "{}", cmd), + } + } } pub struct LinkHalf { - client: Arc, - select_chain: SC, - persistent_data: PersistentData, - voter_commands_rx: TracingUnboundedReceiver>>, + client: Arc, + select_chain: SC, + persistent_data: PersistentData, + voter_commands_rx: TracingUnboundedReceiver>>, } /// Provider for the Grandpa authority set configured on the genesis block. pub trait GenesisAuthoritySetProvider { - /// Get the authority set at the genesis block. - fn get(&self) -> Result; + /// Get the authority set at the genesis block. + fn get(&self) -> Result; } -impl GenesisAuthoritySetProvider for Arc> - where E: CallExecutor, +impl GenesisAuthoritySetProvider + for Arc> +where + E: CallExecutor, { - fn get(&self) -> Result { - // This implementation uses the Grandpa runtime API instead of reading directly from the - // `GRANDPA_AUTHORITIES_KEY` as the data may have been migrated since the genesis block of - // the chain, whereas the runtime API is backwards compatible. - self.executor() - .call( - &BlockId::Number(Zero::zero()), - "GrandpaApi_grandpa_authorities", - &[], - ExecutionStrategy::NativeElseWasm, - None, - ) - .and_then(|call_result| { - Decode::decode(&mut &call_result[..]) - .map_err(|err| ClientError::CallResultDecode( - "failed to decode GRANDPA authorities set proof".into(), err - )) - }) - } + fn get(&self) -> Result { + // This implementation uses the Grandpa runtime API instead of reading directly from the + // `GRANDPA_AUTHORITIES_KEY` as the data may have been migrated since the genesis block of + // the chain, whereas the runtime API is backwards compatible. + self.executor() + .call( + &BlockId::Number(Zero::zero()), + "GrandpaApi_grandpa_authorities", + &[], + ExecutionStrategy::NativeElseWasm, + None, + ) + .and_then(|call_result| { + Decode::decode(&mut &call_result[..]).map_err(|err| { + ClientError::CallResultDecode( + "failed to decode GRANDPA authorities set proof".into(), + err, + ) + }) + }) + } } /// Make block importer and link half necessary to tie the background voter /// to it. pub fn block_import( - client: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - select_chain: SC, + client: Arc, + genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + select_chain: SC, ) -> Result< - ( - GrandpaBlockImport, - LinkHalf, - ), - ClientError, + ( + GrandpaBlockImport, + LinkHalf, + ), + ClientError, > where - SC: SelectChain, - BE: Backend + 'static, - Client: ClientForGrandpa + 'static, + SC: SelectChain, + BE: Backend + 'static, + Client: ClientForGrandpa + 'static, { - block_import_with_authority_set_hard_forks( - client, - genesis_authorities_provider, - select_chain, - Default::default(), - ) + block_import_with_authority_set_hard_forks( + client, + genesis_authorities_provider, + select_chain, + Default::default(), + ) } /// Make block importer and link half necessary to tie the background voter to @@ -461,217 +485,216 @@ where /// block when importing it) will be replaced by a standard change with the /// given static authorities. pub fn block_import_with_authority_set_hard_forks( - client: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - select_chain: SC, - authority_set_hard_forks: Vec<(SetId, (Block::Hash, NumberFor), AuthorityList)>, + client: Arc, + genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + select_chain: SC, + authority_set_hard_forks: Vec<(SetId, (Block::Hash, NumberFor), AuthorityList)>, ) -> Result< - ( - GrandpaBlockImport, - LinkHalf, - ), - ClientError, + ( + GrandpaBlockImport, + LinkHalf, + ), + ClientError, > where - SC: SelectChain, - BE: Backend + 'static, - Client: ClientForGrandpa + 'static, + SC: SelectChain, + BE: Backend + 'static, + Client: ClientForGrandpa + 'static, { - let chain_info = client.info(); - let genesis_hash = chain_info.genesis_hash; - - let persistent_data = aux_schema::load_persistent( - &*client, - genesis_hash, - >::zero(), - || { - let authorities = genesis_authorities_provider.get()?; - telemetry!(CONSENSUS_DEBUG; "afg.loading_authorities"; - "authorities_len" => ?authorities.len() - ); - Ok(authorities) - } - )?; - - let (voter_commands_tx, voter_commands_rx) = tracing_unbounded("mpsc_grandpa_voter_command"); - - // create pending change objects with 0 delay and enacted on finality - // (i.e. standard changes) for each authority set hard fork. - let authority_set_hard_forks = authority_set_hard_forks - .into_iter() - .map(|(set_id, (hash, number), authorities)| { - ( - set_id, - authorities::PendingChange { - next_authorities: authorities, - delay: Zero::zero(), - canon_hash: hash, - canon_height: number, - delay_kind: authorities::DelayKind::Finalized, - }, - ) - }) - .collect(); - - Ok(( - GrandpaBlockImport::new( - client.clone(), - select_chain.clone(), - persistent_data.authority_set.clone(), - voter_commands_tx, - persistent_data.consensus_changes.clone(), - authority_set_hard_forks, - ), - LinkHalf { - client, - select_chain, - persistent_data, - voter_commands_rx, - }, - )) + let chain_info = client.info(); + let genesis_hash = chain_info.genesis_hash; + + let persistent_data = + aux_schema::load_persistent(&*client, genesis_hash, >::zero(), || { + let authorities = genesis_authorities_provider.get()?; + telemetry!(CONSENSUS_DEBUG; "afg.loading_authorities"; + "authorities_len" => ?authorities.len() + ); + Ok(authorities) + })?; + + let (voter_commands_tx, voter_commands_rx) = tracing_unbounded("mpsc_grandpa_voter_command"); + + // create pending change objects with 0 delay and enacted on finality + // (i.e. standard changes) for each authority set hard fork. + let authority_set_hard_forks = authority_set_hard_forks + .into_iter() + .map(|(set_id, (hash, number), authorities)| { + ( + set_id, + authorities::PendingChange { + next_authorities: authorities, + delay: Zero::zero(), + canon_hash: hash, + canon_height: number, + delay_kind: authorities::DelayKind::Finalized, + }, + ) + }) + .collect(); + + Ok(( + GrandpaBlockImport::new( + client.clone(), + select_chain.clone(), + persistent_data.authority_set.clone(), + voter_commands_tx, + persistent_data.consensus_changes.clone(), + authority_set_hard_forks, + ), + LinkHalf { + client, + select_chain, + persistent_data, + voter_commands_rx, + }, + )) } fn global_communication( - set_id: SetId, - voters: &Arc>, - client: Arc, - network: &NetworkBridge, - keystore: &Option, - metrics: Option, + set_id: SetId, + voters: &Arc>, + client: Arc, + network: &NetworkBridge, + keystore: &Option, + metrics: Option, ) -> ( - impl Stream< - Item = Result, CommandOrError>>, - >, - impl Sink< - CommunicationOutH, - Error = CommandOrError>, - > + Unpin, -) where - BE: Backend + 'static, - C: ClientForGrandpa + 'static, - N: NetworkT, - NumberFor: BlockNumberOps, + impl Stream< + Item = Result< + CommunicationInH, + CommandOrError>, + >, + >, + impl Sink< + CommunicationOutH, + Error = CommandOrError>, + > + Unpin, +) +where + BE: Backend + 'static, + C: ClientForGrandpa + 'static, + N: NetworkT, + NumberFor: BlockNumberOps, { - let is_voter = is_voter(voters, keystore).is_some(); - - // verification stream - let (global_in, global_out) = network.global_communication( - communication::SetId(set_id), - voters.clone(), - is_voter, - ); - - // block commit and catch up messages until relevant blocks are imported. - let global_in = UntilGlobalMessageBlocksImported::new( - client.import_notification_stream(), - network.clone(), - client.clone(), - global_in, - "global", - metrics, - ); - - let global_in = global_in.map_err(CommandOrError::from); - let global_out = global_out.sink_map_err(CommandOrError::from); - - (global_in, global_out) + let is_voter = is_voter(voters, keystore).is_some(); + + // verification stream + let (global_in, global_out) = + network.global_communication(communication::SetId(set_id), voters.clone(), is_voter); + + // block commit and catch up messages until relevant blocks are imported. + let global_in = UntilGlobalMessageBlocksImported::new( + client.import_notification_stream(), + network.clone(), + client.clone(), + global_in, + "global", + metrics, + ); + + let global_in = global_in.map_err(CommandOrError::from); + let global_out = global_out.sink_map_err(CommandOrError::from); + + (global_in, global_out) } /// Register the finality tracker inherent data provider (which is used by /// GRANDPA), if not registered already. fn register_finality_tracker_inherent_data_provider( - client: Arc, - inherent_data_providers: &InherentDataProviders, -) -> Result<(), sp_consensus::Error> where - Client: HeaderBackend + 'static, + client: Arc, + inherent_data_providers: &InherentDataProviders, +) -> Result<(), sp_consensus::Error> +where + Client: HeaderBackend + 'static, { - if !inherent_data_providers.has_provider(&sp_finality_tracker::INHERENT_IDENTIFIER) { - inherent_data_providers - .register_provider(sp_finality_tracker::InherentDataProvider::new(move || { - #[allow(deprecated)] - { - let info = client.info(); - telemetry!(CONSENSUS_INFO; "afg.finalized"; - "finalized_number" => ?info.finalized_number, - "finalized_hash" => ?info.finalized_hash, - ); - Ok(info.finalized_number) - } - })) - .map_err(|err| sp_consensus::Error::InherentData(err.into())) - } else { - Ok(()) - } + if !inherent_data_providers.has_provider(&sp_finality_tracker::INHERENT_IDENTIFIER) { + inherent_data_providers + .register_provider(sp_finality_tracker::InherentDataProvider::new(move || { + #[allow(deprecated)] + { + let info = client.info(); + telemetry!(CONSENSUS_INFO; "afg.finalized"; + "finalized_number" => ?info.finalized_number, + "finalized_hash" => ?info.finalized_hash, + ); + Ok(info.finalized_number) + } + })) + .map_err(|err| sp_consensus::Error::InherentData(err.into())) + } else { + Ok(()) + } } /// Parameters used to run Grandpa. pub struct GrandpaParams { - /// Configuration for the GRANDPA service. - pub config: Config, - /// A link to the block import worker. - pub link: LinkHalf, - /// The Network instance. - pub network: N, - /// The inherent data providers. - pub inherent_data_providers: InherentDataProviders, - /// If supplied, can be used to hook on telemetry connection established events. - pub telemetry_on_connect: Option>, - /// A voting rule used to potentially restrict target votes. - pub voting_rule: VR, - /// The prometheus metrics registry. - pub prometheus_registry: Option, + /// Configuration for the GRANDPA service. + pub config: Config, + /// A link to the block import worker. + pub link: LinkHalf, + /// The Network instance. + pub network: N, + /// The inherent data providers. + pub inherent_data_providers: InherentDataProviders, + /// If supplied, can be used to hook on telemetry connection established events. + pub telemetry_on_connect: Option>, + /// A voting rule used to potentially restrict target votes. + pub voting_rule: VR, + /// The prometheus metrics registry. + pub prometheus_registry: Option, } /// Run a GRANDPA voter as a task. Provide configuration and a link to a /// block import worker that has already been instantiated with `block_import`. pub fn run_grandpa_voter( - grandpa_params: GrandpaParams, -) -> sp_blockchain::Result + Unpin + Send + 'static> where - Block::Hash: Ord, - BE: Backend + 'static, - N: NetworkT + Send + Sync + Clone + 'static, - SC: SelectChain + 'static, - VR: VotingRule + Clone + 'static, - NumberFor: BlockNumberOps, - DigestFor: Encode, - C: ClientForGrandpa + 'static, + grandpa_params: GrandpaParams, +) -> sp_blockchain::Result + Unpin + Send + 'static> +where + Block::Hash: Ord, + BE: Backend + 'static, + N: NetworkT + Send + Sync + Clone + 'static, + SC: SelectChain + 'static, + VR: VotingRule + Clone + 'static, + NumberFor: BlockNumberOps, + DigestFor: Encode, + C: ClientForGrandpa + 'static, { - let GrandpaParams { - mut config, - link, - network, - inherent_data_providers, - telemetry_on_connect, - voting_rule, - prometheus_registry, - } = grandpa_params; - - // NOTE: we have recently removed `run_grandpa_observer` from the public - // API, I felt it is easier to just ignore this field rather than removing - // it from the config temporarily. This should be removed after #5013 is - // fixed and we re-add the observer to the public API. - config.observer_enabled = false; - - let LinkHalf { - client, - select_chain, - persistent_data, - voter_commands_rx, - } = link; - - let network = NetworkBridge::new( - network, - config.clone(), - persistent_data.set_state.clone(), - prometheus_registry.as_ref(), - ); - - register_finality_tracker_inherent_data_provider(client.clone(), &inherent_data_providers)?; - - let conf = config.clone(); - let telemetry_task = if let Some(telemetry_on_connect) = telemetry_on_connect { - let authorities = persistent_data.authority_set.clone(); - let events = telemetry_on_connect + let GrandpaParams { + mut config, + link, + network, + inherent_data_providers, + telemetry_on_connect, + voting_rule, + prometheus_registry, + } = grandpa_params; + + // NOTE: we have recently removed `run_grandpa_observer` from the public + // API, I felt it is easier to just ignore this field rather than removing + // it from the config temporarily. This should be removed after #5013 is + // fixed and we re-add the observer to the public API. + config.observer_enabled = false; + + let LinkHalf { + client, + select_chain, + persistent_data, + voter_commands_rx, + } = link; + + let network = NetworkBridge::new( + network, + config.clone(), + persistent_data.set_state.clone(), + prometheus_registry.as_ref(), + ); + + register_finality_tracker_inherent_data_provider(client.clone(), &inherent_data_providers)?; + + let conf = config.clone(); + let telemetry_task = if let Some(telemetry_on_connect) = telemetry_on_connect { + let authorities = persistent_data.authority_set.clone(); + let events = telemetry_on_connect .for_each(move |_| { let curr = authorities.current_authorities(); let mut auths = curr.voters().into_iter().map(|(p, _)| p); @@ -690,293 +713,293 @@ pub fn run_grandpa_voter( ); future::ready(()) }); - future::Either::Left(events) - } else { - future::Either::Right(future::pending()) - }; - - let voter_work = VoterWork::new( - client, - config, - network, - select_chain, - voting_rule, - persistent_data, - voter_commands_rx, - prometheus_registry, - ); - - let voter_work = voter_work - .map(|_| ()); - - // Make sure that `telemetry_task` doesn't accidentally finish and kill grandpa. - let telemetry_task = telemetry_task - .then(|_| future::pending::<()>()); - - Ok(future::select(voter_work, telemetry_task).map(drop)) + future::Either::Left(events) + } else { + future::Either::Right(future::pending()) + }; + + let voter_work = VoterWork::new( + client, + config, + network, + select_chain, + voting_rule, + persistent_data, + voter_commands_rx, + prometheus_registry, + ); + + let voter_work = voter_work.map(|_| ()); + + // Make sure that `telemetry_task` doesn't accidentally finish and kill grandpa. + let telemetry_task = telemetry_task.then(|_| future::pending::<()>()); + + Ok(future::select(voter_work, telemetry_task).map(drop)) } struct Metrics { - environment: environment::Metrics, - until_imported: until_imported::Metrics, + environment: environment::Metrics, + until_imported: until_imported::Metrics, } impl Metrics { - fn register(registry: &Registry) -> Result { - Ok(Metrics { - environment: environment::Metrics::register(registry)?, - until_imported: until_imported::Metrics::register(registry)?, - }) - } + fn register(registry: &Registry) -> Result { + Ok(Metrics { + environment: environment::Metrics::register(registry)?, + until_imported: until_imported::Metrics::register(registry)?, + }) + } } /// Future that powers the voter. #[must_use] struct VoterWork, SC, VR> { - voter: Pin>>> + Send>>, - env: Arc>, - voter_commands_rx: TracingUnboundedReceiver>>, - network: NetworkBridge, - - /// Prometheus metrics. - metrics: Option, + voter: Pin< + Box>>> + Send>, + >, + env: Arc>, + voter_commands_rx: TracingUnboundedReceiver>>, + network: NetworkBridge, + + /// Prometheus metrics. + metrics: Option, } impl VoterWork where - Block: BlockT, - B: Backend + 'static, - C: ClientForGrandpa + 'static, - N: NetworkT + Sync, - NumberFor: BlockNumberOps, - SC: SelectChain + 'static, - VR: VotingRule + Clone + 'static, + Block: BlockT, + B: Backend + 'static, + C: ClientForGrandpa + 'static, + N: NetworkT + Sync, + NumberFor: BlockNumberOps, + SC: SelectChain + 'static, + VR: VotingRule + Clone + 'static, { - fn new( - client: Arc, - config: Config, - network: NetworkBridge, - select_chain: SC, - voting_rule: VR, - persistent_data: PersistentData, - voter_commands_rx: TracingUnboundedReceiver>>, - prometheus_registry: Option, - ) -> Self { - let metrics = match prometheus_registry.as_ref().map(Metrics::register) { - Some(Ok(metrics)) => Some(metrics), - Some(Err(e)) => { - debug!(target: "afg", "Failed to register metrics: {:?}", e); - None - } - None => None, - }; - - let voters = persistent_data.authority_set.current_authorities(); - let env = Arc::new(Environment { - client, - select_chain, - voting_rule, - voters: Arc::new(voters), - config, - network: network.clone(), - set_id: persistent_data.authority_set.set_id(), - authority_set: persistent_data.authority_set.clone(), - consensus_changes: persistent_data.consensus_changes.clone(), - voter_set_state: persistent_data.set_state.clone(), - metrics: metrics.as_ref().map(|m| m.environment.clone()), - _phantom: PhantomData, - }); - - let mut work = VoterWork { - // `voter` is set to a temporary value and replaced below when - // calling `rebuild_voter`. - voter: Box::pin(future::pending()), - env, - voter_commands_rx, - network, - metrics, - }; - work.rebuild_voter(); - work - } - - /// Rebuilds the `self.voter` field using the current authority set - /// state. This method should be called when we know that the authority set - /// has changed (e.g. as signalled by a voter command). - fn rebuild_voter(&mut self) { - debug!(target: "afg", "{}: Starting new voter with set ID {}", self.env.config.name(), self.env.set_id); - - let authority_id = is_voter(&self.env.voters, &self.env.config.keystore) - .map(|ap| ap.public()) - .unwrap_or(Default::default()); - - telemetry!(CONSENSUS_DEBUG; "afg.starting_new_voter"; - "name" => ?self.env.config.name(), - "set_id" => ?self.env.set_id, - "authority_id" => authority_id.to_string(), - ); - - let chain_info = self.env.client.info(); - telemetry!(CONSENSUS_INFO; "afg.authority_set"; - "number" => ?chain_info.finalized_number, - "hash" => ?chain_info.finalized_hash, - "authority_id" => authority_id.to_string(), - "authority_set_id" => ?self.env.set_id, - "authorities" => { - let authorities: Vec = self.env.voters.voters() - .iter().map(|(id, _)| id.to_string()).collect(); - serde_json::to_string(&authorities) - .expect("authorities is always at least an empty vector; elements are always of type string") - }, - ); - - match &*self.env.voter_set_state.read() { - VoterSetState::Live { completed_rounds, .. } => { - let last_finalized = ( - chain_info.finalized_hash, - chain_info.finalized_number, - ); - - let global_comms = global_communication( - self.env.set_id, - &self.env.voters, - self.env.client.clone(), - &self.env.network, - &self.env.config.keystore, - self.metrics.as_ref().map(|m| m.until_imported.clone()), - ); - - let last_completed_round = completed_rounds.last(); - - let voter = voter::Voter::new( - self.env.clone(), - (*self.env.voters).clone(), - global_comms, - last_completed_round.number, - last_completed_round.votes.clone(), - last_completed_round.base.clone(), - last_finalized, - ); - - self.voter = Box::pin(voter); - }, - VoterSetState::Paused { .. } => - self.voter = Box::pin(future::pending()), - }; - } - - fn handle_voter_command( - &mut self, - command: VoterCommand> - ) -> Result<(), Error> { - match command { - VoterCommand::ChangeAuthorities(new) => { - let voters: Vec = new.authorities.iter().map(move |(a, _)| { - format!("{}", a) - }).collect(); - telemetry!(CONSENSUS_INFO; "afg.voter_command_change_authorities"; - "number" => ?new.canon_number, - "hash" => ?new.canon_hash, - "voters" => ?voters, - "set_id" => ?new.set_id, - ); - - self.env.update_voter_set_state(|_| { - // start the new authority set using the block where the - // set changed (not where the signal happened!) as the base. - let set_state = VoterSetState::live( - new.set_id, - &*self.env.authority_set.inner().read(), - (new.canon_hash, new.canon_number), - ); - - aux_schema::write_voter_set_state(&*self.env.client, &set_state)?; - Ok(Some(set_state)) - })?; - - self.env = Arc::new(Environment { - voters: Arc::new(new.authorities.into_iter().collect()), - set_id: new.set_id, - voter_set_state: self.env.voter_set_state.clone(), - // Fields below are simply transferred and not updated. - client: self.env.client.clone(), - select_chain: self.env.select_chain.clone(), - config: self.env.config.clone(), - authority_set: self.env.authority_set.clone(), - consensus_changes: self.env.consensus_changes.clone(), - network: self.env.network.clone(), - voting_rule: self.env.voting_rule.clone(), - metrics: self.env.metrics.clone(), - _phantom: PhantomData, - }); - - self.rebuild_voter(); - Ok(()) - } - VoterCommand::Pause(reason) => { - info!(target: "afg", "Pausing old validator set: {}", reason); - - // not racing because old voter is shut down. - self.env.update_voter_set_state(|voter_set_state| { - let completed_rounds = voter_set_state.completed_rounds(); - let set_state = VoterSetState::Paused { completed_rounds }; - - aux_schema::write_voter_set_state(&*self.env.client, &set_state)?; - Ok(Some(set_state)) - })?; - - self.rebuild_voter(); - Ok(()) - } - } - } + fn new( + client: Arc, + config: Config, + network: NetworkBridge, + select_chain: SC, + voting_rule: VR, + persistent_data: PersistentData, + voter_commands_rx: TracingUnboundedReceiver>>, + prometheus_registry: Option, + ) -> Self { + let metrics = match prometheus_registry.as_ref().map(Metrics::register) { + Some(Ok(metrics)) => Some(metrics), + Some(Err(e)) => { + debug!(target: "afg", "Failed to register metrics: {:?}", e); + None + } + None => None, + }; + + let voters = persistent_data.authority_set.current_authorities(); + let env = Arc::new(Environment { + client, + select_chain, + voting_rule, + voters: Arc::new(voters), + config, + network: network.clone(), + set_id: persistent_data.authority_set.set_id(), + authority_set: persistent_data.authority_set.clone(), + consensus_changes: persistent_data.consensus_changes.clone(), + voter_set_state: persistent_data.set_state.clone(), + metrics: metrics.as_ref().map(|m| m.environment.clone()), + _phantom: PhantomData, + }); + + let mut work = VoterWork { + // `voter` is set to a temporary value and replaced below when + // calling `rebuild_voter`. + voter: Box::pin(future::pending()), + env, + voter_commands_rx, + network, + metrics, + }; + work.rebuild_voter(); + work + } + + /// Rebuilds the `self.voter` field using the current authority set + /// state. This method should be called when we know that the authority set + /// has changed (e.g. as signalled by a voter command). + fn rebuild_voter(&mut self) { + debug!(target: "afg", "{}: Starting new voter with set ID {}", self.env.config.name(), self.env.set_id); + + let authority_id = is_voter(&self.env.voters, &self.env.config.keystore) + .map(|ap| ap.public()) + .unwrap_or(Default::default()); + + telemetry!(CONSENSUS_DEBUG; "afg.starting_new_voter"; + "name" => ?self.env.config.name(), + "set_id" => ?self.env.set_id, + "authority_id" => authority_id.to_string(), + ); + + let chain_info = self.env.client.info(); + telemetry!(CONSENSUS_INFO; "afg.authority_set"; + "number" => ?chain_info.finalized_number, + "hash" => ?chain_info.finalized_hash, + "authority_id" => authority_id.to_string(), + "authority_set_id" => ?self.env.set_id, + "authorities" => { + let authorities: Vec = self.env.voters.voters() + .iter().map(|(id, _)| id.to_string()).collect(); + serde_json::to_string(&authorities) + .expect("authorities is always at least an empty vector; elements are always of type string") + }, + ); + + match &*self.env.voter_set_state.read() { + VoterSetState::Live { + completed_rounds, .. + } => { + let last_finalized = (chain_info.finalized_hash, chain_info.finalized_number); + + let global_comms = global_communication( + self.env.set_id, + &self.env.voters, + self.env.client.clone(), + &self.env.network, + &self.env.config.keystore, + self.metrics.as_ref().map(|m| m.until_imported.clone()), + ); + + let last_completed_round = completed_rounds.last(); + + let voter = voter::Voter::new( + self.env.clone(), + (*self.env.voters).clone(), + global_comms, + last_completed_round.number, + last_completed_round.votes.clone(), + last_completed_round.base.clone(), + last_finalized, + ); + + self.voter = Box::pin(voter); + } + VoterSetState::Paused { .. } => self.voter = Box::pin(future::pending()), + }; + } + + fn handle_voter_command( + &mut self, + command: VoterCommand>, + ) -> Result<(), Error> { + match command { + VoterCommand::ChangeAuthorities(new) => { + let voters: Vec = new + .authorities + .iter() + .map(move |(a, _)| format!("{}", a)) + .collect(); + telemetry!(CONSENSUS_INFO; "afg.voter_command_change_authorities"; + "number" => ?new.canon_number, + "hash" => ?new.canon_hash, + "voters" => ?voters, + "set_id" => ?new.set_id, + ); + + self.env.update_voter_set_state(|_| { + // start the new authority set using the block where the + // set changed (not where the signal happened!) as the base. + let set_state = VoterSetState::live( + new.set_id, + &*self.env.authority_set.inner().read(), + (new.canon_hash, new.canon_number), + ); + + aux_schema::write_voter_set_state(&*self.env.client, &set_state)?; + Ok(Some(set_state)) + })?; + + self.env = Arc::new(Environment { + voters: Arc::new(new.authorities.into_iter().collect()), + set_id: new.set_id, + voter_set_state: self.env.voter_set_state.clone(), + // Fields below are simply transferred and not updated. + client: self.env.client.clone(), + select_chain: self.env.select_chain.clone(), + config: self.env.config.clone(), + authority_set: self.env.authority_set.clone(), + consensus_changes: self.env.consensus_changes.clone(), + network: self.env.network.clone(), + voting_rule: self.env.voting_rule.clone(), + metrics: self.env.metrics.clone(), + _phantom: PhantomData, + }); + + self.rebuild_voter(); + Ok(()) + } + VoterCommand::Pause(reason) => { + info!(target: "afg", "Pausing old validator set: {}", reason); + + // not racing because old voter is shut down. + self.env.update_voter_set_state(|voter_set_state| { + let completed_rounds = voter_set_state.completed_rounds(); + let set_state = VoterSetState::Paused { completed_rounds }; + + aux_schema::write_voter_set_state(&*self.env.client, &set_state)?; + Ok(Some(set_state)) + })?; + + self.rebuild_voter(); + Ok(()) + } + } + } } impl Future for VoterWork where - Block: BlockT, - B: Backend + 'static, - N: NetworkT + Sync, - NumberFor: BlockNumberOps, - SC: SelectChain + 'static, - C: ClientForGrandpa + 'static, - VR: VotingRule + Clone + 'static, + Block: BlockT, + B: Backend + 'static, + N: NetworkT + Sync, + NumberFor: BlockNumberOps, + SC: SelectChain + 'static, + C: ClientForGrandpa + 'static, + VR: VotingRule + Clone + 'static, { - type Output = Result<(), Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - match Future::poll(Pin::new(&mut self.voter), cx) { - Poll::Pending => {} - Poll::Ready(Ok(())) => { - // voters don't conclude naturally - return Poll::Ready(Err(Error::Safety("GRANDPA voter has concluded.".into()))) - } - Poll::Ready(Err(CommandOrError::Error(e))) => { - // return inner observer error - return Poll::Ready(Err(e)) - } - Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { - // some command issued internally - self.handle_voter_command(command)?; - cx.waker().wake_by_ref(); - } - } - - match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { - Poll::Pending => {} - Poll::Ready(None) => { - // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready(Ok(())) - } - Poll::Ready(Some(command)) => { - // some command issued externally - self.handle_voter_command(command)?; - cx.waker().wake_by_ref(); - } - } - - Future::poll(Pin::new(&mut self.network), cx) - } + type Output = Result<(), Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match Future::poll(Pin::new(&mut self.voter), cx) { + Poll::Pending => {} + Poll::Ready(Ok(())) => { + // voters don't conclude naturally + return Poll::Ready(Err(Error::Safety("GRANDPA voter has concluded.".into()))); + } + Poll::Ready(Err(CommandOrError::Error(e))) => { + // return inner observer error + return Poll::Ready(Err(e)); + } + Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { + // some command issued internally + self.handle_voter_command(command)?; + cx.waker().wake_by_ref(); + } + } + + match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { + Poll::Pending => {} + Poll::Ready(None) => { + // the `voter_commands_rx` stream should never conclude since it's never closed. + return Poll::Ready(Ok(())); + } + Poll::Ready(Some(command)) => { + // some command issued externally + self.handle_voter_command(command)?; + cx.waker().wake_by_ref(); + } + } + + Future::poll(Pin::new(&mut self.network), cx) + } } /// When GRANDPA is not initialized we still need to register the finality @@ -986,59 +1009,56 @@ where /// us a `Neighbor` message, since there is no registered gossip validator for /// the engine id defined in the message.) pub fn setup_disabled_grandpa( - client: Arc, - inherent_data_providers: &InherentDataProviders, - network: N, -) -> Result<(), sp_consensus::Error> where - N: NetworkT + Send + Clone + 'static, - Client: HeaderBackend + 'static, + client: Arc, + inherent_data_providers: &InherentDataProviders, + network: N, +) -> Result<(), sp_consensus::Error> +where + N: NetworkT + Send + Clone + 'static, + Client: HeaderBackend + 'static, { - register_finality_tracker_inherent_data_provider( - client, - inherent_data_providers, - )?; - - // We register the GRANDPA protocol so that we don't consider it an anomaly - // to receive GRANDPA messages on the network. We don't process the - // messages. - network.register_notifications_protocol( - communication::GRANDPA_ENGINE_ID, - From::from(communication::GRANDPA_PROTOCOL_NAME), - ); - - Ok(()) + register_finality_tracker_inherent_data_provider(client, inherent_data_providers)?; + + // We register the GRANDPA protocol so that we don't consider it an anomaly + // to receive GRANDPA messages on the network. We don't process the + // messages. + network.register_notifications_protocol( + communication::GRANDPA_ENGINE_ID, + From::from(communication::GRANDPA_PROTOCOL_NAME), + ); + + Ok(()) } /// Checks if this node is a voter in the given voter set. /// /// Returns the key pair of the node that is being used in the current voter set or `None`. fn is_voter( - voters: &Arc>, - keystore: &Option, + voters: &Arc>, + keystore: &Option, ) -> Option { - match keystore { - Some(keystore) => voters.voters().iter() - .find_map(|(p, _)| keystore.read().key_pair::(&p).ok()), - None => None, - } + match keystore { + Some(keystore) => voters + .voters() + .iter() + .find_map(|(p, _)| keystore.read().key_pair::(&p).ok()), + None => None, + } } /// Returns the authority id of this node, if available. -fn authority_id<'a, I>( - authorities: &mut I, - keystore: &Option, -) -> Option where - I: Iterator, +fn authority_id<'a, I>(authorities: &mut I, keystore: &Option) -> Option +where + I: Iterator, { - match keystore { - Some(keystore) => { - authorities - .find_map(|p| { - keystore.read().key_pair::(&p) - .ok() - .map(|ap| ap.public()) - }) - } - None => None, - } + match keystore { + Some(keystore) => authorities.find_map(|p| { + keystore + .read() + .key_pair::(&p) + .ok() + .map(|ap| ap.public()) + }), + None => None, + } } diff --git a/client/finality-grandpa/src/light_import.rs b/client/finality-grandpa/src/light_import.rs index 276f5d0f28..09bff76e21 100644 --- a/client/finality-grandpa/src/light_import.rs +++ b/client/finality-grandpa/src/light_import.rs @@ -14,34 +14,31 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::collections::HashMap; -use std::sync::Arc; use log::{info, trace, warn}; +use parity_scale_codec::{Decode, Encode}; use parking_lot::RwLock; -use sc_client_api::{ - backend::{AuxStore, Backend, Finalizer, TransactionFor}, -}; -use sp_blockchain::{HeaderBackend, Error as ClientError, well_known_cache_keys}; -use parity_scale_codec::{Encode, Decode}; +use sc_client_api::backend::{AuxStore, Backend, Finalizer, TransactionFor}; +use sc_network::config::{BoxFinalityProofRequestBuilder, FinalityProofRequestBuilder}; +use sp_blockchain::{well_known_cache_keys, Error as ClientError, HeaderBackend}; use sp_consensus::{ - import_queue::Verifier, - BlockOrigin, BlockImport, FinalityProofImport, BlockImportParams, ImportResult, ImportedAux, - BlockCheckParams, Error as ConsensusError, + import_queue::Verifier, BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, + Error as ConsensusError, FinalityProofImport, ImportResult, ImportedAux, }; -use sc_network::config::{BoxFinalityProofRequestBuilder, FinalityProofRequestBuilder}; -use sp_runtime::Justification; -use sp_runtime::traits::{NumberFor, Block as BlockT, Header as HeaderT, DigestFor}; use sp_finality_grandpa::{self, AuthorityList}; use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, DigestFor, Header as HeaderT, NumberFor}; +use sp_runtime::Justification; +use std::collections::HashMap; +use std::sync::Arc; -use crate::GenesisAuthoritySetProvider; use crate::aux_schema::load_decode; use crate::consensus_changes::ConsensusChanges; use crate::environment::canonical_at_height; use crate::finality_proof::{ - AuthoritySetForFinalityChecker, ProvableJustification, make_finality_proof_request, + make_finality_proof_request, AuthoritySetForFinalityChecker, ProvableJustification, }; use crate::justification::GrandpaJustification; +use crate::GenesisAuthoritySetProvider; /// LightAuthoritySet is saved under this key in aux storage. const LIGHT_AUTHORITY_SET_KEY: &[u8] = b"grandpa_voters"; @@ -50,27 +47,24 @@ const LIGHT_CONSENSUS_CHANGES_KEY: &[u8] = b"grandpa_consensus_changes"; /// Create light block importer. pub fn light_block_import( - client: Arc, - backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - authority_set_provider: Arc>, + client: Arc, + backend: Arc, + genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + authority_set_provider: Arc>, ) -> Result, ClientError> - where - BE: Backend, - Client: crate::ClientForGrandpa, +where + BE: Backend, + Client: crate::ClientForGrandpa, { - let info = client.info(); - let import_data = load_aux_import_data( - info.finalized_hash, - &*client, - genesis_authorities_provider, - )?; - Ok(GrandpaLightBlockImport { - client, - backend, - authority_set_provider, - data: Arc::new(RwLock::new(import_data)), - }) + let info = client.info(); + let import_data = + load_aux_import_data(info.finalized_hash, &*client, genesis_authorities_provider)?; + Ok(GrandpaLightBlockImport { + client, + backend, + authority_set_provider, + data: Arc::new(RwLock::new(import_data)), + }) } /// A light block-import handler for GRANDPA. @@ -79,722 +73,754 @@ pub fn light_block_import( /// - checking GRANDPA justifications; /// - fetching finality proofs for blocks that are enacting consensus changes. pub struct GrandpaLightBlockImport { - client: Arc, - backend: Arc, - authority_set_provider: Arc>, - data: Arc>>, + client: Arc, + backend: Arc, + authority_set_provider: Arc>, + data: Arc>>, } impl Clone for GrandpaLightBlockImport { - fn clone(&self) -> Self { - GrandpaLightBlockImport { - client: self.client.clone(), - backend: self.backend.clone(), - authority_set_provider: self.authority_set_provider.clone(), - data: self.data.clone(), - } - } + fn clone(&self) -> Self { + GrandpaLightBlockImport { + client: self.client.clone(), + backend: self.backend.clone(), + authority_set_provider: self.authority_set_provider.clone(), + data: self.data.clone(), + } + } } /// Mutable data of light block importer. struct LightImportData { - last_finalized: Block::Hash, - authority_set: LightAuthoritySet, - consensus_changes: ConsensusChanges>, + last_finalized: Block::Hash, + authority_set: LightAuthoritySet, + consensus_changes: ConsensusChanges>, } /// Latest authority set tracker. #[derive(Debug, Encode, Decode)] struct LightAuthoritySet { - set_id: u64, - authorities: AuthorityList, + set_id: u64, + authorities: AuthorityList, } impl GrandpaLightBlockImport { - /// Create finality proof request builder. - pub fn create_finality_proof_request_builder(&self) -> BoxFinalityProofRequestBuilder { - Box::new(GrandpaFinalityProofRequestBuilder(self.data.clone())) as _ - } + /// Create finality proof request builder. + pub fn create_finality_proof_request_builder(&self) -> BoxFinalityProofRequestBuilder { + Box::new(GrandpaFinalityProofRequestBuilder(self.data.clone())) as _ + } } -impl BlockImport - for GrandpaLightBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for<'a> &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, +impl BlockImport for GrandpaLightBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend + 'static, + for<'a> &'a Client: HeaderBackend + + BlockImport> + + Finalizer + + AuxStore, { - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - do_import_block::<_, _, _, GrandpaJustification>( - &*self.client, &mut *self.data.write(), block, new_cache - ) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.client.check_block(block) - } + type Error = ConsensusError; + type Transaction = TransactionFor; + + fn import_block( + &mut self, + block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + do_import_block::<_, _, _, GrandpaJustification>( + &*self.client, + &mut *self.data.write(), + block, + new_cache, + ) + } + + fn check_block(&mut self, block: BlockCheckParams) -> Result { + self.client.check_block(block) + } } impl FinalityProofImport - for GrandpaLightBlockImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for<'a> &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, + for GrandpaLightBlockImport +where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend + 'static, + for<'a> &'a Client: HeaderBackend + + BlockImport> + + Finalizer + + AuxStore, { - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - let mut out = Vec::new(); - let chain_info = (&*self.client).info(); - - let data = self.data.read(); - for (pending_number, pending_hash) in data.consensus_changes.pending_changes() { - if *pending_number > chain_info.finalized_number - && *pending_number <= chain_info.best_number - { - out.push((pending_hash.clone(), *pending_number)); - } - } - - out - } - - fn import_finality_proof( - &mut self, - hash: Block::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(Block::Hash, NumberFor), Self::Error> { - do_import_finality_proof::<_, _, _, GrandpaJustification>( - &*self.client, - self.backend.clone(), - &*self.authority_set_provider, - &mut *self.data.write(), - hash, - number, - finality_proof, - verifier, - ) - } + type Error = ConsensusError; + + fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { + let mut out = Vec::new(); + let chain_info = (&*self.client).info(); + + let data = self.data.read(); + for (pending_number, pending_hash) in data.consensus_changes.pending_changes() { + if *pending_number > chain_info.finalized_number + && *pending_number <= chain_info.best_number + { + out.push((pending_hash.clone(), *pending_number)); + } + } + + out + } + + fn import_finality_proof( + &mut self, + hash: Block::Hash, + number: NumberFor, + finality_proof: Vec, + verifier: &mut dyn Verifier, + ) -> Result<(Block::Hash, NumberFor), Self::Error> { + do_import_finality_proof::<_, _, _, GrandpaJustification>( + &*self.client, + self.backend.clone(), + &*self.authority_set_provider, + &mut *self.data.write(), + hash, + number, + finality_proof, + verifier, + ) + } } impl LightAuthoritySet { - /// Get a genesis set with given authorities. - pub fn genesis(initial: AuthorityList) -> Self { - LightAuthoritySet { - set_id: sp_finality_grandpa::SetId::default(), - authorities: initial, - } - } - - /// Get latest set id. - pub fn set_id(&self) -> u64 { - self.set_id - } - - /// Get latest authorities set. - pub fn authorities(&self) -> AuthorityList { - self.authorities.clone() - } - - /// Set new authorities set. - pub fn update(&mut self, set_id: u64, authorities: AuthorityList) { - self.set_id = set_id; - std::mem::replace(&mut self.authorities, authorities); - } + /// Get a genesis set with given authorities. + pub fn genesis(initial: AuthorityList) -> Self { + LightAuthoritySet { + set_id: sp_finality_grandpa::SetId::default(), + authorities: initial, + } + } + + /// Get latest set id. + pub fn set_id(&self) -> u64 { + self.set_id + } + + /// Get latest authorities set. + pub fn authorities(&self) -> AuthorityList { + self.authorities.clone() + } + + /// Set new authorities set. + pub fn update(&mut self, set_id: u64, authorities: AuthorityList) { + self.set_id = set_id; + std::mem::replace(&mut self.authorities, authorities); + } } struct GrandpaFinalityProofRequestBuilder(Arc>>); impl FinalityProofRequestBuilder for GrandpaFinalityProofRequestBuilder { - fn build_request_data(&mut self, _hash: &B::Hash) -> Vec { - let data = self.0.read(); - make_finality_proof_request( - data.last_finalized, - data.authority_set.set_id(), - ) - } + fn build_request_data(&mut self, _hash: &B::Hash) -> Vec { + let data = self.0.read(); + make_finality_proof_request(data.last_finalized, data.authority_set.set_id()) + } } /// Try to import new block. fn do_import_block( - mut client: C, - data: &mut LightImportData, - mut block: BlockImportParams>, - new_cache: HashMap>, + mut client: C, + data: &mut LightImportData, + mut block: BlockImportParams>, + new_cache: HashMap>, ) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + BlockImport> - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - J: ProvableJustification, +where + C: HeaderBackend + + AuxStore + + Finalizer + + BlockImport> + + Clone, + B: Backend + 'static, + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + J: ProvableJustification, { - let hash = block.post_hash(); - let number = block.header.number().clone(); - - // we don't want to finalize on `inner.import_block` - let justification = block.justification.take(); - let enacts_consensus_change = !new_cache.is_empty(); - let import_result = client.import_block(block, new_cache); - - let mut imported_aux = match import_result { - Ok(ImportResult::Imported(aux)) => aux, - Ok(r) => return Ok(r), - Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()), - }; - - match justification { - Some(justification) => { - trace!( - target: "afg", - "Imported block {}{}. Importing justification.", - if enacts_consensus_change { " which enacts consensus changes" } else { "" }, - hash, - ); - - do_import_justification::<_, _, _, J>(client, data, hash, number, justification) - }, - None if enacts_consensus_change => { - trace!( - target: "afg", - "Imported block {} which enacts consensus changes. Requesting finality proof.", - hash, - ); - - // remember that we need finality proof for this block - imported_aux.needs_finality_proof = true; - data.consensus_changes.note_change((number, hash)); - Ok(ImportResult::Imported(imported_aux)) - }, - None => Ok(ImportResult::Imported(imported_aux)), - } + let hash = block.post_hash(); + let number = block.header.number().clone(); + + // we don't want to finalize on `inner.import_block` + let justification = block.justification.take(); + let enacts_consensus_change = !new_cache.is_empty(); + let import_result = client.import_block(block, new_cache); + + let mut imported_aux = match import_result { + Ok(ImportResult::Imported(aux)) => aux, + Ok(r) => return Ok(r), + Err(e) => return Err(ConsensusError::ClientImport(e.to_string()).into()), + }; + + match justification { + Some(justification) => { + trace!( + target: "afg", + "Imported block {}{}. Importing justification.", + if enacts_consensus_change { " which enacts consensus changes" } else { "" }, + hash, + ); + + do_import_justification::<_, _, _, J>(client, data, hash, number, justification) + } + None if enacts_consensus_change => { + trace!( + target: "afg", + "Imported block {} which enacts consensus changes. Requesting finality proof.", + hash, + ); + + // remember that we need finality proof for this block + imported_aux.needs_finality_proof = true; + data.consensus_changes.note_change((number, hash)); + Ok(ImportResult::Imported(imported_aux)) + } + None => Ok(ImportResult::Imported(imported_aux)), + } } /// Try to import finality proof. fn do_import_finality_proof( - client: C, - backend: Arc, - authority_set_provider: &dyn AuthoritySetForFinalityChecker, - data: &mut LightImportData, - _hash: Block::Hash, - _number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, + client: C, + backend: Arc, + authority_set_provider: &dyn AuthoritySetForFinalityChecker, + data: &mut LightImportData, + _hash: Block::Hash, + _number: NumberFor, + finality_proof: Vec, + verifier: &mut dyn Verifier, ) -> Result<(Block::Hash, NumberFor), ConsensusError> - where - C: HeaderBackend - + AuxStore - + Finalizer - + BlockImport> - + Clone, - B: Backend + 'static, - DigestFor: Encode, - NumberFor: finality_grandpa::BlockNumberOps, - J: ProvableJustification, +where + C: HeaderBackend + + AuxStore + + Finalizer + + BlockImport> + + Clone, + B: Backend + 'static, + DigestFor: Encode, + NumberFor: finality_grandpa::BlockNumberOps, + J: ProvableJustification, { - let authority_set_id = data.authority_set.set_id(); - let authorities = data.authority_set.authorities(); - let finality_effects = crate::finality_proof::check_finality_proof( - backend.blockchain(), - authority_set_id, - authorities, - authority_set_provider, - finality_proof, - ).map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - - // try to import all new headers - let block_origin = BlockOrigin::NetworkBroadcast; - for header_to_import in finality_effects.headers_to_import { - let (block_to_import, new_authorities) = verifier.verify( - block_origin, - header_to_import, - None, - None, - ).map_err(|e| ConsensusError::ClientImport(e))?; - assert!( - block_to_import.justification.is_none(), - "We have passed None as justification to verifier.verify", - ); - - let mut cache = HashMap::new(); - if let Some(authorities) = new_authorities { - cache.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); - } - do_import_block::<_, _, _, J>( - client.clone(), - data, - block_to_import.convert_transaction(), - cache, - )?; - } - - // try to import latest justification - let finalized_block_hash = finality_effects.block; - let finalized_block_number = backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(finality_effects.block)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; - do_finalize_block( - client, - data, - finalized_block_hash, - finalized_block_number, - finality_effects.justification.encode(), - )?; - - // apply new authorities set - data.authority_set.update( - finality_effects.new_set_id, - finality_effects.new_authorities, - ); - - Ok((finalized_block_hash, finalized_block_number)) + let authority_set_id = data.authority_set.set_id(); + let authorities = data.authority_set.authorities(); + let finality_effects = crate::finality_proof::check_finality_proof( + backend.blockchain(), + authority_set_id, + authorities, + authority_set_provider, + finality_proof, + ) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + + // try to import all new headers + let block_origin = BlockOrigin::NetworkBroadcast; + for header_to_import in finality_effects.headers_to_import { + let (block_to_import, new_authorities) = verifier + .verify(block_origin, header_to_import, None, None) + .map_err(|e| ConsensusError::ClientImport(e))?; + assert!( + block_to_import.justification.is_none(), + "We have passed None as justification to verifier.verify", + ); + + let mut cache = HashMap::new(); + if let Some(authorities) = new_authorities { + cache.insert(well_known_cache_keys::AUTHORITIES, authorities.encode()); + } + do_import_block::<_, _, _, J>( + client.clone(), + data, + block_to_import.convert_transaction(), + cache, + )?; + } + + // try to import latest justification + let finalized_block_hash = finality_effects.block; + let finalized_block_number = backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(finality_effects.block)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))?; + do_finalize_block( + client, + data, + finalized_block_hash, + finalized_block_number, + finality_effects.justification.encode(), + )?; + + // apply new authorities set + data.authority_set.update( + finality_effects.new_set_id, + finality_effects.new_authorities, + ); + + Ok((finalized_block_hash, finalized_block_number)) } /// Try to import justification. fn do_import_justification( - client: C, - data: &mut LightImportData, - hash: Block::Hash, - number: NumberFor, - justification: Justification, + client: C, + data: &mut LightImportData, + hash: Block::Hash, + number: NumberFor, + justification: Justification, ) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, - J: ProvableJustification, +where + C: HeaderBackend + AuxStore + Finalizer + Clone, + B: Backend + 'static, + NumberFor: finality_grandpa::BlockNumberOps, + J: ProvableJustification, { - // with justification, we have two cases - // - // optimistic: the same GRANDPA authorities set has generated intermediate justification - // => justification is verified using current authorities set + we could proceed further - // - // pessimistic scenario: the GRANDPA authorities set has changed - // => we need to fetch new authorities set (i.e. finality proof) from remote node - - // first, try to behave optimistically - let authority_set_id = data.authority_set.set_id(); - let justification = J::decode_and_verify( - &justification, - authority_set_id, - &data.authority_set.authorities(), - ); - - // BadJustification error means that justification has been successfully decoded, but - // it isn't valid within current authority set - let justification = match justification { - Err(ClientError::BadJustification(_)) => { - trace!( - target: "afg", - "Justification for {} is not valid within current authorities set. Requesting finality proof.", - hash, - ); - - let mut imported_aux = ImportedAux::default(); - imported_aux.needs_finality_proof = true; - return Ok(ImportResult::Imported(imported_aux)); - }, - Err(e) => { - trace!( - target: "afg", - "Justification for {} is not valid. Bailing.", - hash, - ); - - return Err(ConsensusError::ClientImport(e.to_string()).into()); - }, - Ok(justification) => { - trace!( - target: "afg", - "Justification for {} is valid. Finalizing the block.", - hash, - ); - - justification - }, - }; - - // finalize the block - do_finalize_block(client, data, hash, number, justification.encode()) + // with justification, we have two cases + // + // optimistic: the same GRANDPA authorities set has generated intermediate justification + // => justification is verified using current authorities set + we could proceed further + // + // pessimistic scenario: the GRANDPA authorities set has changed + // => we need to fetch new authorities set (i.e. finality proof) from remote node + + // first, try to behave optimistically + let authority_set_id = data.authority_set.set_id(); + let justification = J::decode_and_verify( + &justification, + authority_set_id, + &data.authority_set.authorities(), + ); + + // BadJustification error means that justification has been successfully decoded, but + // it isn't valid within current authority set + let justification = match justification { + Err(ClientError::BadJustification(_)) => { + trace!( + target: "afg", + "Justification for {} is not valid within current authorities set. Requesting finality proof.", + hash, + ); + + let mut imported_aux = ImportedAux::default(); + imported_aux.needs_finality_proof = true; + return Ok(ImportResult::Imported(imported_aux)); + } + Err(e) => { + trace!( + target: "afg", + "Justification for {} is not valid. Bailing.", + hash, + ); + + return Err(ConsensusError::ClientImport(e.to_string()).into()); + } + Ok(justification) => { + trace!( + target: "afg", + "Justification for {} is valid. Finalizing the block.", + hash, + ); + + justification + } + }; + + // finalize the block + do_finalize_block(client, data, hash, number, justification.encode()) } /// Finalize the block. fn do_finalize_block( - client: C, - data: &mut LightImportData, - hash: Block::Hash, - number: NumberFor, - justification: Justification, + client: C, + data: &mut LightImportData, + hash: Block::Hash, + number: NumberFor, + justification: Justification, ) -> Result - where - C: HeaderBackend - + AuxStore - + Finalizer - + Clone, - B: Backend + 'static, - NumberFor: finality_grandpa::BlockNumberOps, +where + C: HeaderBackend + AuxStore + Finalizer + Clone, + B: Backend + 'static, + NumberFor: finality_grandpa::BlockNumberOps, { - // finalize the block - client.finalize_block(BlockId::Hash(hash), Some(justification), true).map_err(|e| { - warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); - ConsensusError::ClientImport(e.to_string()) - })?; - - // forget obsoleted consensus changes - let consensus_finalization_res = data.consensus_changes - .finalize( - (number, hash), - |at_height| canonical_at_height(&client, (hash, number), true, at_height) - ); - match consensus_finalization_res { - Ok((true, _)) => require_insert_aux( - &client, - LIGHT_CONSENSUS_CHANGES_KEY, - &data.consensus_changes, - "consensus changes", - )?, - Ok(_) => (), - Err(error) => return Err(on_post_finalization_error(error, "consensus changes")), - } - - // update last finalized block reference - data.last_finalized = hash; - - // we just finalized this block, so if we were importing it, it is now the new best - Ok(ImportResult::imported(true)) + // finalize the block + client + .finalize_block(BlockId::Hash(hash), Some(justification), true) + .map_err(|e| { + warn!(target: "afg", "Error applying finality to block {:?}: {:?}", (hash, number), e); + ConsensusError::ClientImport(e.to_string()) + })?; + + // forget obsoleted consensus changes + let consensus_finalization_res = data + .consensus_changes + .finalize((number, hash), |at_height| { + canonical_at_height(&client, (hash, number), true, at_height) + }); + match consensus_finalization_res { + Ok((true, _)) => require_insert_aux( + &client, + LIGHT_CONSENSUS_CHANGES_KEY, + &data.consensus_changes, + "consensus changes", + )?, + Ok(_) => (), + Err(error) => return Err(on_post_finalization_error(error, "consensus changes")), + } + + // update last finalized block reference + data.last_finalized = hash; + + // we just finalized this block, so if we were importing it, it is now the new best + Ok(ImportResult::imported(true)) } /// Load light import aux data from the store. fn load_aux_import_data( - last_finalized: Block::Hash, - aux_store: &B, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + last_finalized: Block::Hash, + aux_store: &B, + genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, ) -> Result, ClientError> - where - B: AuxStore, - Block: BlockT, +where + B: AuxStore, + Block: BlockT, { - let authority_set = match load_decode(aux_store, LIGHT_AUTHORITY_SET_KEY)? { - Some(authority_set) => authority_set, - None => { - info!(target: "afg", "Loading GRANDPA authorities \ + let authority_set = match load_decode(aux_store, LIGHT_AUTHORITY_SET_KEY)? { + Some(authority_set) => authority_set, + None => { + info!(target: "afg", "Loading GRANDPA authorities \ from genesis on what appears to be first startup."); - // no authority set on disk: fetch authorities from genesis state - let genesis_authorities = genesis_authorities_provider.get()?; + // no authority set on disk: fetch authorities from genesis state + let genesis_authorities = genesis_authorities_provider.get()?; - let authority_set = LightAuthoritySet::genesis(genesis_authorities); - let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?; + let authority_set = LightAuthoritySet::genesis(genesis_authorities); + let encoded = authority_set.encode(); + aux_store.insert_aux(&[(LIGHT_AUTHORITY_SET_KEY, &encoded[..])], &[])?; - authority_set - }, - }; + authority_set + } + }; - let consensus_changes = match load_decode(aux_store, LIGHT_CONSENSUS_CHANGES_KEY)? { - Some(consensus_changes) => consensus_changes, - None => { - let consensus_changes = ConsensusChanges::>::empty(); + let consensus_changes = match load_decode(aux_store, LIGHT_CONSENSUS_CHANGES_KEY)? { + Some(consensus_changes) => consensus_changes, + None => { + let consensus_changes = ConsensusChanges::>::empty(); - let encoded = authority_set.encode(); - aux_store.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?; + let encoded = authority_set.encode(); + aux_store.insert_aux(&[(LIGHT_CONSENSUS_CHANGES_KEY, &encoded[..])], &[])?; - consensus_changes - }, - }; + consensus_changes + } + }; - Ok(LightImportData { - last_finalized, - authority_set, - consensus_changes, - }) + Ok(LightImportData { + last_finalized, + authority_set, + consensus_changes, + }) } /// Insert into aux store. If failed, return error && show inconsistency warning. fn require_insert_aux( - store: &A, - key: &[u8], - value: &T, - value_type: &str, + store: &A, + key: &[u8], + value: &T, + value_type: &str, ) -> Result<(), ConsensusError> { - let encoded = value.encode(); - let update_res = store.insert_aux(&[(key, &encoded[..])], &[]); - if let Err(error) = update_res { - return Err(on_post_finalization_error(error, value_type)); - } + let encoded = value.encode(); + let update_res = store.insert_aux(&[(key, &encoded[..])], &[]); + if let Err(error) = update_res { + return Err(on_post_finalization_error(error, value_type)); + } - Ok(()) + Ok(()) } /// Display inconsistency warning. fn on_post_finalization_error(error: ClientError, value_type: &str) -> ConsensusError { - warn!(target: "afg", "Failed to write updated {} to disk. Bailing.", value_type); - warn!(target: "afg", "Node is in a potentially inconsistent state."); - ConsensusError::ClientImport(error.to_string()) + warn!(target: "afg", "Failed to write updated {} to disk. Bailing.", value_type); + warn!(target: "afg", "Node is in a potentially inconsistent state."); + ConsensusError::ClientImport(error.to_string()) } #[cfg(test)] pub mod tests { - use super::*; - use sp_consensus::{ForkChoiceStrategy, BlockImport}; - use sp_finality_grandpa::AuthorityId; - use sp_core::{H256, crypto::Public}; - use substrate_test_runtime_client::sc_client::in_mem::Blockchain as InMemoryAuxStore; - use substrate_test_runtime_client::runtime::{Block, Header}; - use crate::tests::TestApi; - use crate::finality_proof::tests::TestJustification; - - pub struct NoJustificationsImport( - pub GrandpaLightBlockImport - ); - - impl Clone - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - { - fn clone(&self) -> Self { - NoJustificationsImport(self.0.clone()) - } - } - - impl BlockImport - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - DigestFor: Encode, - BE: Backend + 'static, - for <'a > &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, - GrandpaLightBlockImport: - BlockImport, Error = ConsensusError> - { - type Error = ConsensusError; - type Transaction = TransactionFor; - - fn import_block( - &mut self, - mut block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - block.justification.take(); - self.0.import_block(block, new_cache) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - self.0.check_block(block) - } - } - - impl FinalityProofImport - for NoJustificationsImport where - NumberFor: finality_grandpa::BlockNumberOps, - BE: Backend + 'static, - DigestFor: Encode, - for <'a > &'a Client: - HeaderBackend - + BlockImport> - + Finalizer - + AuxStore, - { - type Error = ConsensusError; - - fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { - self.0.on_start() - } - - fn import_finality_proof( - &mut self, - hash: Block::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(Block::Hash, NumberFor), Self::Error> { - self.0.import_finality_proof(hash, number, finality_proof, verifier) - } - } - - /// Creates light block import that ignores justifications that came outside of finality proofs. - pub fn light_block_import_without_justifications( - client: Arc, - backend: Arc, - genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, - authority_set_provider: Arc>, - ) -> Result, ClientError> - where - BE: Backend + 'static, - Client: crate::ClientForGrandpa, - { - light_block_import(client, backend, genesis_authorities_provider, authority_set_provider) - .map(NoJustificationsImport) - } - - fn import_block( - new_cache: HashMap>, - justification: Option, - ) -> ImportResult { - let (client, _backend) = substrate_test_runtime_client::new_light(); - let client = Arc::new(client); - let mut import_data = LightImportData { - last_finalized: Default::default(), - authority_set: LightAuthoritySet::genesis(vec![(AuthorityId::from_slice(&[1; 32]), 1)]), - consensus_changes: ConsensusChanges::empty(), - }; - let mut block = BlockImportParams::new( - BlockOrigin::Own, - Header { - number: 1, - parent_hash: client.chain_info().best_hash, - state_root: Default::default(), - digest: Default::default(), - extrinsics_root: Default::default(), - }, - ); - block.justification = justification; - block.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - do_import_block::<_, _, _, TestJustification>( - &*client, - &mut import_data, - block, - new_cache, - ).unwrap() - } - - #[test] - fn finality_proof_not_required_when_consensus_data_does_not_changes_and_no_justification_provided() { - assert_eq!(import_block(HashMap::new(), None), ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_not_required_when_consensus_data_does_not_changes_and_correct_justification_provided() { - let justification = TestJustification((0, vec![(AuthorityId::from_slice(&[1; 32]), 1)]), Vec::new()).encode(); - assert_eq!(import_block(HashMap::new(), Some(justification)), ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_required_when_consensus_data_changes_and_no_justification_provided() { - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId::from_slice(&[2; 32])].encode()); - assert_eq!(import_block(cache, None), ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: true, - is_new_best: true, - header_only: false, - })); - } - - #[test] - fn finality_proof_required_when_consensus_data_changes_and_incorrect_justification_provided() { - let justification = TestJustification((0, vec![]), Vec::new()).encode(); - let mut cache = HashMap::new(); - cache.insert(well_known_cache_keys::AUTHORITIES, vec![AuthorityId::from_slice(&[2; 32])].encode()); - assert_eq!( - import_block(cache, Some(justification)), - ImportResult::Imported(ImportedAux { - clear_justification_requests: false, - needs_justification: false, - bad_justification: false, - needs_finality_proof: true, - is_new_best: false, - header_only: false, - }, - )); - } - - - #[test] - fn aux_data_updated_on_start() { - let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - - // when aux store is empty initially - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_none()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_none()); - - // it is updated on importer start - load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert!(aux_store.get_aux(LIGHT_AUTHORITY_SET_KEY).unwrap().is_some()); - assert!(aux_store.get_aux(LIGHT_CONSENSUS_CHANGES_KEY).unwrap().is_some()); - } - - #[test] - fn aux_data_loaded_on_restart() { - let aux_store = InMemoryAuxStore::::new(); - let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); - - // when aux store is non-empty initially - let mut consensus_changes = ConsensusChanges::::empty(); - consensus_changes.note_change((42, Default::default())); - aux_store.insert_aux( - &[ - ( - LIGHT_AUTHORITY_SET_KEY, - LightAuthoritySet::genesis( - vec![(AuthorityId::from_slice(&[42; 32]), 2)] - ).encode().as_slice(), - ), - ( - LIGHT_CONSENSUS_CHANGES_KEY, - consensus_changes.encode().as_slice(), - ), - ], - &[], - ).unwrap(); - - // importer uses it on start - let data = load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); - assert_eq!(data.authority_set.authorities(), vec![(AuthorityId::from_slice(&[42; 32]), 2)]); - assert_eq!(data.consensus_changes.pending_changes(), &[(42, Default::default())]); - } + use super::*; + use crate::finality_proof::tests::TestJustification; + use crate::tests::TestApi; + use sp_consensus::{BlockImport, ForkChoiceStrategy}; + use sp_core::{crypto::Public, H256}; + use sp_finality_grandpa::AuthorityId; + use substrate_test_runtime_client::runtime::{Block, Header}; + use substrate_test_runtime_client::sc_client::in_mem::Blockchain as InMemoryAuxStore; + + pub struct NoJustificationsImport( + pub GrandpaLightBlockImport, + ); + + impl Clone for NoJustificationsImport + where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend + 'static, + { + fn clone(&self) -> Self { + NoJustificationsImport(self.0.clone()) + } + } + + impl BlockImport for NoJustificationsImport + where + NumberFor: finality_grandpa::BlockNumberOps, + DigestFor: Encode, + BE: Backend + 'static, + for<'a> &'a Client: HeaderBackend + + BlockImport> + + Finalizer + + AuxStore, + GrandpaLightBlockImport: + BlockImport, Error = ConsensusError>, + { + type Error = ConsensusError; + type Transaction = TransactionFor; + + fn import_block( + &mut self, + mut block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + block.justification.take(); + self.0.import_block(block, new_cache) + } + + fn check_block( + &mut self, + block: BlockCheckParams, + ) -> Result { + self.0.check_block(block) + } + } + + impl FinalityProofImport + for NoJustificationsImport + where + NumberFor: finality_grandpa::BlockNumberOps, + BE: Backend + 'static, + DigestFor: Encode, + for<'a> &'a Client: HeaderBackend + + BlockImport> + + Finalizer + + AuxStore, + { + type Error = ConsensusError; + + fn on_start(&mut self) -> Vec<(Block::Hash, NumberFor)> { + self.0.on_start() + } + + fn import_finality_proof( + &mut self, + hash: Block::Hash, + number: NumberFor, + finality_proof: Vec, + verifier: &mut dyn Verifier, + ) -> Result<(Block::Hash, NumberFor), Self::Error> { + self.0 + .import_finality_proof(hash, number, finality_proof, verifier) + } + } + + /// Creates light block import that ignores justifications that came outside of finality proofs. + pub fn light_block_import_without_justifications( + client: Arc, + backend: Arc, + genesis_authorities_provider: &dyn GenesisAuthoritySetProvider, + authority_set_provider: Arc>, + ) -> Result, ClientError> + where + BE: Backend + 'static, + Client: crate::ClientForGrandpa, + { + light_block_import( + client, + backend, + genesis_authorities_provider, + authority_set_provider, + ) + .map(NoJustificationsImport) + } + + fn import_block( + new_cache: HashMap>, + justification: Option, + ) -> ImportResult { + let (client, _backend) = substrate_test_runtime_client::new_light(); + let client = Arc::new(client); + let mut import_data = LightImportData { + last_finalized: Default::default(), + authority_set: LightAuthoritySet::genesis(vec![(AuthorityId::from_slice(&[1; 32]), 1)]), + consensus_changes: ConsensusChanges::empty(), + }; + let mut block = BlockImportParams::new( + BlockOrigin::Own, + Header { + number: 1, + parent_hash: client.chain_info().best_hash, + state_root: Default::default(), + digest: Default::default(), + extrinsics_root: Default::default(), + }, + ); + block.justification = justification; + block.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + do_import_block::<_, _, _, TestJustification>(&*client, &mut import_data, block, new_cache) + .unwrap() + } + + #[test] + fn finality_proof_not_required_when_consensus_data_does_not_changes_and_no_justification_provided( + ) { + assert_eq!( + import_block(HashMap::new(), None), + ImportResult::Imported(ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: false, + is_new_best: true, + header_only: false, + }) + ); + } + + #[test] + fn finality_proof_not_required_when_consensus_data_does_not_changes_and_correct_justification_provided( + ) { + let justification = TestJustification( + (0, vec![(AuthorityId::from_slice(&[1; 32]), 1)]), + Vec::new(), + ) + .encode(); + assert_eq!( + import_block(HashMap::new(), Some(justification)), + ImportResult::Imported(ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: false, + is_new_best: true, + header_only: false, + }) + ); + } + + #[test] + fn finality_proof_required_when_consensus_data_changes_and_no_justification_provided() { + let mut cache = HashMap::new(); + cache.insert( + well_known_cache_keys::AUTHORITIES, + vec![AuthorityId::from_slice(&[2; 32])].encode(), + ); + assert_eq!( + import_block(cache, None), + ImportResult::Imported(ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: true, + is_new_best: true, + header_only: false, + }) + ); + } + + #[test] + fn finality_proof_required_when_consensus_data_changes_and_incorrect_justification_provided() { + let justification = TestJustification((0, vec![]), Vec::new()).encode(); + let mut cache = HashMap::new(); + cache.insert( + well_known_cache_keys::AUTHORITIES, + vec![AuthorityId::from_slice(&[2; 32])].encode(), + ); + assert_eq!( + import_block(cache, Some(justification)), + ImportResult::Imported(ImportedAux { + clear_justification_requests: false, + needs_justification: false, + bad_justification: false, + needs_finality_proof: true, + is_new_best: false, + header_only: false, + },) + ); + } + + #[test] + fn aux_data_updated_on_start() { + let aux_store = InMemoryAuxStore::::new(); + let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); + + // when aux store is empty initially + assert!(aux_store + .get_aux(LIGHT_AUTHORITY_SET_KEY) + .unwrap() + .is_none()); + assert!(aux_store + .get_aux(LIGHT_CONSENSUS_CHANGES_KEY) + .unwrap() + .is_none()); + + // it is updated on importer start + load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); + assert!(aux_store + .get_aux(LIGHT_AUTHORITY_SET_KEY) + .unwrap() + .is_some()); + assert!(aux_store + .get_aux(LIGHT_CONSENSUS_CHANGES_KEY) + .unwrap() + .is_some()); + } + + #[test] + fn aux_data_loaded_on_restart() { + let aux_store = InMemoryAuxStore::::new(); + let api = TestApi::new(vec![(AuthorityId::from_slice(&[1; 32]), 1)]); + + // when aux store is non-empty initially + let mut consensus_changes = ConsensusChanges::::empty(); + consensus_changes.note_change((42, Default::default())); + aux_store + .insert_aux( + &[ + ( + LIGHT_AUTHORITY_SET_KEY, + LightAuthoritySet::genesis(vec![(AuthorityId::from_slice(&[42; 32]), 2)]) + .encode() + .as_slice(), + ), + ( + LIGHT_CONSENSUS_CHANGES_KEY, + consensus_changes.encode().as_slice(), + ), + ], + &[], + ) + .unwrap(); + + // importer uses it on start + let data = load_aux_import_data(Default::default(), &aux_store, &api).unwrap(); + assert_eq!( + data.authority_set.authorities(), + vec![(AuthorityId::from_slice(&[42; 32]), 2)] + ); + assert_eq!( + data.consensus_changes.pending_changes(), + &[(42, Default::default())] + ); + } } diff --git a/client/finality-grandpa/src/observer.rs b/client/finality-grandpa/src/observer.rs index 1e6c8ddf18..8de06afb13 100644 --- a/client/finality-grandpa/src/observer.rs +++ b/client/finality-grandpa/src/observer.rs @@ -20,135 +20,146 @@ use std::task::{Context, Poll}; use futures::prelude::*; -use finality_grandpa::{ - BlockNumberOps, Error as GrandpaError, voter, voter_set::VoterSet -}; +use finality_grandpa::{voter, voter_set::VoterSet, BlockNumberOps, Error as GrandpaError}; use log::{debug, info, warn}; -use sp_consensus::SelectChain; use sc_client_api::backend::Backend; -use sp_utils::mpsc::TracingUnboundedReceiver; -use sp_runtime::traits::{NumberFor, Block as BlockT}; use sp_blockchain::HeaderMetadata; +use sp_consensus::SelectChain; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_utils::mpsc::TracingUnboundedReceiver; -use crate::{ - global_communication, CommandOrError, CommunicationIn, Config, environment, - LinkHalf, Error, aux_schema::PersistentData, VoterCommand, VoterSetState, -}; use crate::authorities::SharedAuthoritySet; use crate::communication::{Network as NetworkT, NetworkBridge}; use crate::consensus_changes::SharedConsensusChanges; +use crate::{ + aux_schema::PersistentData, environment, global_communication, CommandOrError, CommunicationIn, + Config, Error, LinkHalf, VoterCommand, VoterSetState, +}; use sp_finality_grandpa::AuthorityId; use std::marker::{PhantomData, Unpin}; struct ObserverChain<'a, Block: BlockT, Client> { - client: &'a Arc, - _phantom: PhantomData, + client: &'a Arc, + _phantom: PhantomData, } impl<'a, Block, Client> finality_grandpa::Chain> - for ObserverChain<'a, Block, Client> where - Block: BlockT, - Client: HeaderMetadata, - NumberFor: BlockNumberOps, + for ObserverChain<'a, Block, Client> +where + Block: BlockT, + Client: HeaderMetadata, + NumberFor: BlockNumberOps, { - fn ancestry(&self, base: Block::Hash, block: Block::Hash) -> Result, GrandpaError> { - environment::ancestry(&self.client, base, block) - } - - fn best_chain_containing(&self, _block: Block::Hash) -> Option<(Block::Hash, NumberFor)> { - // only used by voter - None - } + fn ancestry( + &self, + base: Block::Hash, + block: Block::Hash, + ) -> Result, GrandpaError> { + environment::ancestry(&self.client, base, block) + } + + fn best_chain_containing( + &self, + _block: Block::Hash, + ) -> Option<(Block::Hash, NumberFor)> { + // only used by voter + None + } } fn grandpa_observer( - client: &Arc, - authority_set: &SharedAuthoritySet>, - consensus_changes: &SharedConsensusChanges>, - voters: &Arc>, - last_finalized_number: NumberFor, - commits: S, - note_round: F, -) -> impl Future>>> where - NumberFor: BlockNumberOps, - S: Stream< - Item = Result, CommandOrError>>, - >, - F: Fn(u64), - BE: Backend, - Client: crate::ClientForGrandpa, + client: &Arc, + authority_set: &SharedAuthoritySet>, + consensus_changes: &SharedConsensusChanges>, + voters: &Arc>, + last_finalized_number: NumberFor, + commits: S, + note_round: F, +) -> impl Future>>> +where + NumberFor: BlockNumberOps, + S: Stream, CommandOrError>>>, + F: Fn(u64), + BE: Backend, + Client: crate::ClientForGrandpa, { - let authority_set = authority_set.clone(); - let consensus_changes = consensus_changes.clone(); - let client = client.clone(); - let voters = voters.clone(); - - let observer = commits.try_fold(last_finalized_number, move |last_finalized_number, global| { - let (round, commit, callback) = match global { - voter::CommunicationIn::Commit(round, commit, callback) => { - let commit = finality_grandpa::Commit::from(commit); - (round, commit, callback) - }, - voter::CommunicationIn::CatchUp(..) => { - // ignore catch up messages - return future::ok(last_finalized_number); - }, - }; - - // if the commit we've received targets a block lower or equal to the last - // finalized, ignore it and continue with the current state - if commit.target_number <= last_finalized_number { - return future::ok(last_finalized_number); - } - - let validation_result = match finality_grandpa::validate_commit( - &commit, - &voters, - &ObserverChain { client: &client, _phantom: PhantomData }, - ) { - Ok(r) => r, - Err(e) => return future::err(e.into()), - }; - - if let Some(_) = validation_result.ghost() { - let finalized_hash = commit.target_hash; - let finalized_number = commit.target_number; - - // commit is valid, finalize the block it targets - match environment::finalize_block( - client.clone(), - &authority_set, - &consensus_changes, - None, - finalized_hash, - finalized_number, - (round, commit).into(), - false, - ) { - Ok(_) => {}, - Err(e) => return future::err(e), - }; - - // note that we've observed completion of this round through the commit, - // and that implies that the next round has started. - note_round(round + 1); - - finality_grandpa::process_commit_validation_result(validation_result, callback); - - // proceed processing with new finalized block number - future::ok(finalized_number) - } else { - debug!(target: "afg", "Received invalid commit: ({:?}, {:?})", round, commit); - - finality_grandpa::process_commit_validation_result(validation_result, callback); - - // commit is invalid, continue processing commits with the current state - future::ok(last_finalized_number) - } - }); - - observer.map_ok(|_| ()) + let authority_set = authority_set.clone(); + let consensus_changes = consensus_changes.clone(); + let client = client.clone(); + let voters = voters.clone(); + + let observer = commits.try_fold( + last_finalized_number, + move |last_finalized_number, global| { + let (round, commit, callback) = match global { + voter::CommunicationIn::Commit(round, commit, callback) => { + let commit = finality_grandpa::Commit::from(commit); + (round, commit, callback) + } + voter::CommunicationIn::CatchUp(..) => { + // ignore catch up messages + return future::ok(last_finalized_number); + } + }; + + // if the commit we've received targets a block lower or equal to the last + // finalized, ignore it and continue with the current state + if commit.target_number <= last_finalized_number { + return future::ok(last_finalized_number); + } + + let validation_result = match finality_grandpa::validate_commit( + &commit, + &voters, + &ObserverChain { + client: &client, + _phantom: PhantomData, + }, + ) { + Ok(r) => r, + Err(e) => return future::err(e.into()), + }; + + if let Some(_) = validation_result.ghost() { + let finalized_hash = commit.target_hash; + let finalized_number = commit.target_number; + + // commit is valid, finalize the block it targets + match environment::finalize_block( + client.clone(), + &authority_set, + &consensus_changes, + None, + finalized_hash, + finalized_number, + (round, commit).into(), + false, + ) { + Ok(_) => {} + Err(e) => return future::err(e), + }; + + // note that we've observed completion of this round through the commit, + // and that implies that the next round has started. + note_round(round + 1); + + finality_grandpa::process_commit_validation_result(validation_result, callback); + + // proceed processing with new finalized block number + future::ok(finalized_number) + } else { + debug!(target: "afg", "Received invalid commit: ({:?}, {:?})", round, commit); + + finality_grandpa::process_commit_validation_result(validation_result, callback); + + // commit is invalid, continue processing commits with the current state + future::ok(last_finalized_number) + } + }, + ); + + observer.map_ok(|_| ()) } /// Run a GRANDPA observer as a task, the observer will finalize blocks only by @@ -159,288 +170,292 @@ fn grandpa_observer( /// it stable enough to use on a live network. #[allow(unused)] pub fn run_grandpa_observer( - config: Config, - link: LinkHalf, - network: N, + config: Config, + link: LinkHalf, + network: N, ) -> sp_blockchain::Result + Unpin + Send + 'static> where - BE: Backend + Unpin + 'static, - N: NetworkT + Send + Clone + 'static, - SC: SelectChain + 'static, - NumberFor: BlockNumberOps, - Client: crate::ClientForGrandpa + 'static, + BE: Backend + Unpin + 'static, + N: NetworkT + Send + Clone + 'static, + SC: SelectChain + 'static, + NumberFor: BlockNumberOps, + Client: crate::ClientForGrandpa + 'static, { - let LinkHalf { - client, - select_chain: _, - persistent_data, - voter_commands_rx, - } = link; - - let network = NetworkBridge::new( - network, - config.clone(), - persistent_data.set_state.clone(), - None, - ); - - let observer_work = ObserverWork::new( - client, - network, - persistent_data, - config.keystore.clone(), - voter_commands_rx - ); - - let observer_work = observer_work - .map_ok(|_| ()) - .map_err(|e| { - warn!("GRANDPA Observer failed: {:?}", e); - }); - - Ok(observer_work.map(drop)) + let LinkHalf { + client, + select_chain: _, + persistent_data, + voter_commands_rx, + } = link; + + let network = NetworkBridge::new( + network, + config.clone(), + persistent_data.set_state.clone(), + None, + ); + + let observer_work = ObserverWork::new( + client, + network, + persistent_data, + config.keystore.clone(), + voter_commands_rx, + ); + + let observer_work = observer_work.map_ok(|_| ()).map_err(|e| { + warn!("GRANDPA Observer failed: {:?}", e); + }); + + Ok(observer_work.map(drop)) } /// Future that powers the observer. #[must_use] struct ObserverWork> { - observer: Pin>>> + Send>>, - client: Arc, - network: NetworkBridge, - persistent_data: PersistentData, - keystore: Option, - voter_commands_rx: TracingUnboundedReceiver>>, - _phantom: PhantomData, + observer: + Pin>>> + Send>>, + client: Arc, + network: NetworkBridge, + persistent_data: PersistentData, + keystore: Option, + voter_commands_rx: TracingUnboundedReceiver>>, + _phantom: PhantomData, } impl ObserverWork where - B: BlockT, - BE: Backend + 'static, - Client: crate::ClientForGrandpa + 'static, - Network: NetworkT, - NumberFor: BlockNumberOps, + B: BlockT, + BE: Backend + 'static, + Client: crate::ClientForGrandpa + 'static, + Network: NetworkT, + NumberFor: BlockNumberOps, { - fn new( - client: Arc, - network: NetworkBridge, - persistent_data: PersistentData, - keystore: Option, - voter_commands_rx: TracingUnboundedReceiver>>, - ) -> Self { - - let mut work = ObserverWork { - // `observer` is set to a temporary value and replaced below when - // calling `rebuild_observer`. - observer: Box::pin(future::pending()) as Pin>, - client, - network, - persistent_data, - keystore, - voter_commands_rx, - _phantom: PhantomData, - }; - work.rebuild_observer(); - work - } - - /// Rebuilds the `self.observer` field using the current authority set - /// state. This method should be called when we know that the authority set - /// has changed (e.g. as signalled by a voter command). - fn rebuild_observer(&mut self) { - let set_id = self.persistent_data.authority_set.set_id(); - let voters = Arc::new(self.persistent_data.authority_set.current_authorities()); - - // start global communication stream for the current set - let (global_in, _) = global_communication( - set_id, - &voters, - self.client.clone(), - &self.network, - &self.keystore, - None, - ); - - let last_finalized_number = self.client.info().finalized_number; - - // NOTE: since we are not using `round_communication` we have to - // manually note the round with the gossip validator, otherwise we won't - // relay round messages. we want all full nodes to contribute to vote - // availability. - let note_round = { - let network = self.network.clone(); - let voters = voters.clone(); - - move |round| network.note_round( - crate::communication::Round(round), - crate::communication::SetId(set_id), - &*voters, - ) - }; - - // create observer for the current set - let observer = grandpa_observer( - &self.client, - &self.persistent_data.authority_set, - &self.persistent_data.consensus_changes, - &voters, - last_finalized_number, - global_in, - note_round, - ); - - self.observer = Box::pin(observer); - } - - fn handle_voter_command( - &mut self, - command: VoterCommand>, - ) -> Result<(), Error> { - // the observer doesn't use the voter set state, but we need to - // update it on-disk in case we restart as validator in the future. - self.persistent_data.set_state = match command { - VoterCommand::Pause(reason) => { - info!(target: "afg", "Pausing old validator set: {}", reason); - - let completed_rounds = self.persistent_data.set_state.read().completed_rounds(); - let set_state = VoterSetState::Paused { completed_rounds }; - - crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; - - set_state - }, - VoterCommand::ChangeAuthorities(new) => { - // start the new authority set using the block where the - // set changed (not where the signal happened!) as the base. - let set_state = VoterSetState::live( - new.set_id, - &*self.persistent_data.authority_set.inner().read(), - (new.canon_hash, new.canon_number), - ); - - crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; - - set_state - }, - }.into(); - - self.rebuild_observer(); - Ok(()) - } + fn new( + client: Arc, + network: NetworkBridge, + persistent_data: PersistentData, + keystore: Option, + voter_commands_rx: TracingUnboundedReceiver>>, + ) -> Self { + let mut work = ObserverWork { + // `observer` is set to a temporary value and replaced below when + // calling `rebuild_observer`. + observer: Box::pin(future::pending()) as Pin>, + client, + network, + persistent_data, + keystore, + voter_commands_rx, + _phantom: PhantomData, + }; + work.rebuild_observer(); + work + } + + /// Rebuilds the `self.observer` field using the current authority set + /// state. This method should be called when we know that the authority set + /// has changed (e.g. as signalled by a voter command). + fn rebuild_observer(&mut self) { + let set_id = self.persistent_data.authority_set.set_id(); + let voters = Arc::new(self.persistent_data.authority_set.current_authorities()); + + // start global communication stream for the current set + let (global_in, _) = global_communication( + set_id, + &voters, + self.client.clone(), + &self.network, + &self.keystore, + None, + ); + + let last_finalized_number = self.client.info().finalized_number; + + // NOTE: since we are not using `round_communication` we have to + // manually note the round with the gossip validator, otherwise we won't + // relay round messages. we want all full nodes to contribute to vote + // availability. + let note_round = { + let network = self.network.clone(); + let voters = voters.clone(); + + move |round| { + network.note_round( + crate::communication::Round(round), + crate::communication::SetId(set_id), + &*voters, + ) + } + }; + + // create observer for the current set + let observer = grandpa_observer( + &self.client, + &self.persistent_data.authority_set, + &self.persistent_data.consensus_changes, + &voters, + last_finalized_number, + global_in, + note_round, + ); + + self.observer = Box::pin(observer); + } + + fn handle_voter_command( + &mut self, + command: VoterCommand>, + ) -> Result<(), Error> { + // the observer doesn't use the voter set state, but we need to + // update it on-disk in case we restart as validator in the future. + self.persistent_data.set_state = match command { + VoterCommand::Pause(reason) => { + info!(target: "afg", "Pausing old validator set: {}", reason); + + let completed_rounds = self.persistent_data.set_state.read().completed_rounds(); + let set_state = VoterSetState::Paused { completed_rounds }; + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + set_state + } + VoterCommand::ChangeAuthorities(new) => { + // start the new authority set using the block where the + // set changed (not where the signal happened!) as the base. + let set_state = VoterSetState::live( + new.set_id, + &*self.persistent_data.authority_set.inner().read(), + (new.canon_hash, new.canon_number), + ); + + crate::aux_schema::write_voter_set_state(&*self.client, &set_state)?; + + set_state + } + } + .into(); + + self.rebuild_observer(); + Ok(()) + } } impl Future for ObserverWork where - B: BlockT, - BE: Backend + Unpin + 'static, - C: crate::ClientForGrandpa + 'static, - N: NetworkT, - NumberFor: BlockNumberOps, + B: BlockT, + BE: Backend + Unpin + 'static, + C: crate::ClientForGrandpa + 'static, + N: NetworkT, + NumberFor: BlockNumberOps, { - type Output = Result<(), Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - match Future::poll(Pin::new(&mut self.observer), cx) { - Poll::Pending => {} - Poll::Ready(Ok(())) => { - // observer commit stream doesn't conclude naturally; this could reasonably be an error. - return Poll::Ready(Ok(())) - } - Poll::Ready(Err(CommandOrError::Error(e))) => { - // return inner observer error - return Poll::Ready(Err(e)) - } - Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { - // some command issued internally - self.handle_voter_command(command)?; - cx.waker().wake_by_ref(); - } - } - - match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { - Poll::Pending => {} - Poll::Ready(None) => { - // the `voter_commands_rx` stream should never conclude since it's never closed. - return Poll::Ready(Ok(())) - } - Poll::Ready(Some(command)) => { - // some command issued externally - self.handle_voter_command(command)?; - cx.waker().wake_by_ref(); - } - } - - Future::poll(Pin::new(&mut self.network), cx) - } + type Output = Result<(), Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match Future::poll(Pin::new(&mut self.observer), cx) { + Poll::Pending => {} + Poll::Ready(Ok(())) => { + // observer commit stream doesn't conclude naturally; this could reasonably be an error. + return Poll::Ready(Ok(())); + } + Poll::Ready(Err(CommandOrError::Error(e))) => { + // return inner observer error + return Poll::Ready(Err(e)); + } + Poll::Ready(Err(CommandOrError::VoterCommand(command))) => { + // some command issued internally + self.handle_voter_command(command)?; + cx.waker().wake_by_ref(); + } + } + + match Stream::poll_next(Pin::new(&mut self.voter_commands_rx), cx) { + Poll::Pending => {} + Poll::Ready(None) => { + // the `voter_commands_rx` stream should never conclude since it's never closed. + return Poll::Ready(Ok(())); + } + Poll::Ready(Some(command)) => { + // some command issued externally + self.handle_voter_command(command)?; + cx.waker().wake_by_ref(); + } + } + + Future::poll(Pin::new(&mut self.network), cx) + } } #[cfg(test)] mod tests { - use super::*; - - use assert_matches::assert_matches; - use sp_utils::mpsc::tracing_unbounded; - use crate::{aux_schema, communication::tests::{Event, make_test_network}}; - use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; - use sc_network::PeerId; - use sp_blockchain::HeaderBackend as _; - - use futures::executor; - - /// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`. Regression - /// test for bug introduced in d4fbb897c and fixed in b7af8b339. - /// - /// When polled, `NetworkBridge` forwards reputation change requests from the `GossipValidator` - /// to the underlying `dyn Network`. This test triggers a reputation change by calling - /// `GossipValidator::validate` with an invalid gossip message. After polling the `ObserverWork` - /// which should poll the `NetworkBridge`, the reputation change should be forwarded to the test - /// network. - #[test] - fn observer_work_polls_underlying_network_bridge() { - // Create a test network. - let (tester_fut, _network) = make_test_network(); - let mut tester = executor::block_on(tester_fut); - - // Create an observer. - let (client, backend) = { - let builder = TestClientBuilder::with_default_backend(); - let backend = builder.backend(); - let (client, _) = builder.build_with_longest_chain(); - (Arc::new(client), backend) - }; - - let persistent_data = aux_schema::load_persistent( - &*backend, - client.info().genesis_hash, - 0, - || Ok(vec![]), - ).unwrap(); - - let (_tx, voter_command_rx) = tracing_unbounded(""); - let observer = ObserverWork::new( - client, - tester.net_handle.clone(), - persistent_data, - None, - voter_command_rx, - ); - - // Trigger a reputation change through the gossip validator. - let peer_id = PeerId::random(); - tester.trigger_gossip_validator_reputation_change(&peer_id); - - executor::block_on(async move { - // Poll the observer once and have it forward the reputation change from the gossip - // validator to the test network. - assert!(observer.now_or_never().is_none()); - - // Ignore initial event stream request by gossip engine. - match tester.events.next().now_or_never() { - Some(Some(Event::EventStream(_))) => {}, - _ => panic!("expected event stream request"), - }; - - assert_matches!(tester.events.next().now_or_never(), Some(Some(Event::Report(_, _)))); - }); - } + use super::*; + + use crate::{ + aux_schema, + communication::tests::{make_test_network, Event}, + }; + use assert_matches::assert_matches; + use sc_network::PeerId; + use sp_blockchain::HeaderBackend as _; + use sp_utils::mpsc::tracing_unbounded; + use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; + + use futures::executor; + + /// Ensure `Future` implementation of `ObserverWork` is polling its `NetworkBridge`. Regression + /// test for bug introduced in d4fbb897c and fixed in b7af8b339. + /// + /// When polled, `NetworkBridge` forwards reputation change requests from the `GossipValidator` + /// to the underlying `dyn Network`. This test triggers a reputation change by calling + /// `GossipValidator::validate` with an invalid gossip message. After polling the `ObserverWork` + /// which should poll the `NetworkBridge`, the reputation change should be forwarded to the test + /// network. + #[test] + fn observer_work_polls_underlying_network_bridge() { + // Create a test network. + let (tester_fut, _network) = make_test_network(); + let mut tester = executor::block_on(tester_fut); + + // Create an observer. + let (client, backend) = { + let builder = TestClientBuilder::with_default_backend(); + let backend = builder.backend(); + let (client, _) = builder.build_with_longest_chain(); + (Arc::new(client), backend) + }; + + let persistent_data = + aux_schema::load_persistent(&*backend, client.info().genesis_hash, 0, || Ok(vec![])) + .unwrap(); + + let (_tx, voter_command_rx) = tracing_unbounded(""); + let observer = ObserverWork::new( + client, + tester.net_handle.clone(), + persistent_data, + None, + voter_command_rx, + ); + + // Trigger a reputation change through the gossip validator. + let peer_id = PeerId::random(); + tester.trigger_gossip_validator_reputation_change(&peer_id); + + executor::block_on(async move { + // Poll the observer once and have it forward the reputation change from the gossip + // validator to the test network. + assert!(observer.now_or_never().is_none()); + + // Ignore initial event stream request by gossip engine. + match tester.events.next().now_or_never() { + Some(Some(Event::EventStream(_))) => {} + _ => panic!("expected event stream request"), + }; + + assert_matches!( + tester.events.next().now_or_never(), + Some(Some(Event::Report(_, _))) + ); + }); + } } diff --git a/client/finality-grandpa/src/tests.rs b/client/finality-grandpa/src/tests.rs index d7d1d1e48d..b319e3d79b 100644 --- a/client/finality-grandpa/src/tests.rs +++ b/client/finality-grandpa/src/tests.rs @@ -18,1691 +18,1953 @@ use super::*; use environment::HasVoted; -use sc_network_test::{ - Block, Hash, TestNetFactory, BlockImportAdapter, Peer, - PeersClient, PassThroughVerifier, PeersFullClient, -}; -use sc_network::config::{ProtocolConfig, BoxFinalityProofRequestBuilder}; -use parking_lot::Mutex; use futures_timer::Delay; -use tokio::runtime::{Runtime, Handle}; -use sp_keyring::Ed25519Keyring; +use parity_scale_codec::Decode; +use parking_lot::Mutex; use sc_client::LongestChain; use sc_client_api::backend::TransactionFor; +use sc_network::config::{BoxFinalityProofRequestBuilder, ProtocolConfig}; +use sc_network_test::{ + Block, BlockImportAdapter, Hash, PassThroughVerifier, Peer, PeersClient, PeersFullClient, + TestNetFactory, +}; +use sp_api::{ApiRef, ProvideRuntimeApi, StorageProof}; use sp_blockchain::Result; -use sp_api::{ApiRef, StorageProof, ProvideRuntimeApi}; -use substrate_test_runtime_client::runtime::BlockNumber; use sp_consensus::{ - BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, ImportResult, BlockImport, - import_queue::{BoxJustificationImport, BoxFinalityProofImport}, + import_queue::{BoxFinalityProofImport, BoxJustificationImport}, + BlockImport, BlockImportParams, BlockOrigin, ForkChoiceStrategy, ImportResult, ImportedAux, }; -use std::{collections::{HashMap, HashSet}, pin::Pin}; -use parity_scale_codec::Decode; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, HashFor}; +use sp_core::{crypto::Public, H256}; +use sp_finality_grandpa::{AuthorityList, GrandpaApi, GRANDPA_ENGINE_ID}; +use sp_keyring::Ed25519Keyring; use sp_runtime::generic::{BlockId, DigestItem}; -use sp_core::{H256, crypto::Public}; -use sp_finality_grandpa::{GRANDPA_ENGINE_ID, AuthorityList, GrandpaApi}; -use sp_state_machine::{InMemoryBackend, prove_read, read_proof_check}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT}; +use sp_state_machine::{prove_read, read_proof_check, InMemoryBackend}; +use std::{ + collections::{HashMap, HashSet}, + pin::Pin, +}; +use substrate_test_runtime_client::runtime::BlockNumber; +use tokio::runtime::{Handle, Runtime}; use authorities::AuthoritySet; +use consensus_changes::ConsensusChanges; use finality_proof::{ - FinalityProofProvider, AuthoritySetForFinalityProver, AuthoritySetForFinalityChecker, + AuthoritySetForFinalityChecker, AuthoritySetForFinalityProver, FinalityProofProvider, }; -use consensus_changes::ConsensusChanges; use sc_block_builder::BlockBuilderProvider; -type PeerData = - Mutex< - Option< - LinkHalf< - Block, - PeersFullClient, - LongestChain - > - > - >; +type PeerData = Mutex< + Option< + LinkHalf< + Block, + PeersFullClient, + LongestChain, + >, + >, +>; type GrandpaPeer = Peer; struct GrandpaTestNet { - peers: Vec, - test_config: TestApi, + peers: Vec, + test_config: TestApi, } impl GrandpaTestNet { - fn new(test_config: TestApi, n_peers: usize) -> Self { - let mut net = GrandpaTestNet { - peers: Vec::with_capacity(n_peers), - test_config, - }; - for _ in 0..n_peers { - net.add_full_peer(); - } - net - } + fn new(test_config: TestApi, n_peers: usize) -> Self { + let mut net = GrandpaTestNet { + peers: Vec::with_capacity(n_peers), + test_config, + }; + for _ in 0..n_peers { + net.add_full_peer(); + } + net + } } impl TestNetFactory for GrandpaTestNet { - type Verifier = PassThroughVerifier; - type PeerData = PeerData; - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - GrandpaTestNet { - peers: Vec::new(), - test_config: Default::default(), - } - } - - fn default_config() -> ProtocolConfig { - // This is unused. - ProtocolConfig::default() - } - - fn make_verifier( - &self, - _client: PeersClient, - _cfg: &ProtocolConfig, - _: &PeerData, - ) -> Self::Verifier { - PassThroughVerifier(false) // use non-instant finality. - } - - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - PeerData, - ) - { - match client { - PeersClient::Full(ref client, ref backend) => { - let (import, link) = block_import( - client.clone(), - &self.test_config, - LongestChain::new(backend.clone()), - ).expect("Could not create block import for fresh peer."); - let justification_import = Box::new(import.clone()); - ( - BlockImportAdapter::new_full(import), - Some(justification_import), - None, - None, - Mutex::new(Some(link)), - ) - }, - PeersClient::Light(ref client, ref backend) => { - use crate::light_import::tests::light_block_import_without_justifications; - - let authorities_provider = Arc::new(self.test_config.clone()); - // forbid direct finalization using justification that came with the block - // => light clients will try to fetch finality proofs - let import = light_block_import_without_justifications( - client.clone(), - backend.clone(), - &self.test_config, - authorities_provider, - ).expect("Could not create block import for fresh peer."); - let finality_proof_req_builder = import.0.create_finality_proof_request_builder(); - let proof_import = Box::new(import.clone()); - ( - BlockImportAdapter::new_light(import), - None, - Some(proof_import), - Some(finality_proof_req_builder), - Mutex::new(None), - ) - }, - } - } - - fn make_finality_proof_provider( - &self, - client: PeersClient - ) -> Option>> { - match client { - PeersClient::Full(_, ref backend) => { - Some(Arc::new(FinalityProofProvider::new(backend.clone(), self.test_config.clone()))) - }, - PeersClient::Light(_, _) => None, - } - } - - fn peer(&mut self, i: usize) -> &mut GrandpaPeer { - &mut self.peers[i] - } - - fn peers(&self) -> &Vec { - &self.peers - } - - fn mut_peers)>(&mut self, closure: F) { - closure(&mut self.peers); - } + type Verifier = PassThroughVerifier; + type PeerData = PeerData; + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + GrandpaTestNet { + peers: Vec::new(), + test_config: Default::default(), + } + } + + fn default_config() -> ProtocolConfig { + // This is unused. + ProtocolConfig::default() + } + + fn make_verifier( + &self, + _client: PeersClient, + _cfg: &ProtocolConfig, + _: &PeerData, + ) -> Self::Verifier { + PassThroughVerifier(false) // use non-instant finality. + } + + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option>, + Option>, + PeerData, + ) { + match client { + PeersClient::Full(ref client, ref backend) => { + let (import, link) = block_import( + client.clone(), + &self.test_config, + LongestChain::new(backend.clone()), + ) + .expect("Could not create block import for fresh peer."); + let justification_import = Box::new(import.clone()); + ( + BlockImportAdapter::new_full(import), + Some(justification_import), + None, + None, + Mutex::new(Some(link)), + ) + } + PeersClient::Light(ref client, ref backend) => { + use crate::light_import::tests::light_block_import_without_justifications; + + let authorities_provider = Arc::new(self.test_config.clone()); + // forbid direct finalization using justification that came with the block + // => light clients will try to fetch finality proofs + let import = light_block_import_without_justifications( + client.clone(), + backend.clone(), + &self.test_config, + authorities_provider, + ) + .expect("Could not create block import for fresh peer."); + let finality_proof_req_builder = import.0.create_finality_proof_request_builder(); + let proof_import = Box::new(import.clone()); + ( + BlockImportAdapter::new_light(import), + None, + Some(proof_import), + Some(finality_proof_req_builder), + Mutex::new(None), + ) + } + } + } + + fn make_finality_proof_provider( + &self, + client: PeersClient, + ) -> Option>> { + match client { + PeersClient::Full(_, ref backend) => Some(Arc::new(FinalityProofProvider::new( + backend.clone(), + self.test_config.clone(), + ))), + PeersClient::Light(_, _) => None, + } + } + + fn peer(&mut self, i: usize) -> &mut GrandpaPeer { + &mut self.peers[i] + } + + fn peers(&self) -> &Vec { + &self.peers + } + + fn mut_peers)>(&mut self, closure: F) { + closure(&mut self.peers); + } } #[derive(Default, Clone)] pub(crate) struct TestApi { - genesis_authorities: AuthorityList, + genesis_authorities: AuthorityList, } impl TestApi { - pub fn new(genesis_authorities: AuthorityList) -> Self { - TestApi { - genesis_authorities, - } - } + pub fn new(genesis_authorities: AuthorityList) -> Self { + TestApi { + genesis_authorities, + } + } } pub(crate) struct RuntimeApi { - inner: TestApi, + inner: TestApi, } impl ProvideRuntimeApi for TestApi { - type Api = RuntimeApi; - - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RuntimeApi { inner: self.clone() }.into() - } + type Api = RuntimeApi; + + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + RuntimeApi { + inner: self.clone(), + } + .into() + } } sp_api::mock_impl_runtime_apis! { - impl GrandpaApi for RuntimeApi { - type Error = sp_blockchain::Error; + impl GrandpaApi for RuntimeApi { + type Error = sp_blockchain::Error; - fn grandpa_authorities(&self) -> AuthorityList { - self.inner.genesis_authorities.clone() - } - } + fn grandpa_authorities(&self) -> AuthorityList { + self.inner.genesis_authorities.clone() + } + } } impl GenesisAuthoritySetProvider for TestApi { - fn get(&self) -> Result { - Ok(self.genesis_authorities.clone()) - } + fn get(&self) -> Result { + Ok(self.genesis_authorities.clone()) + } } impl AuthoritySetForFinalityProver for TestApi { - fn authorities(&self, _block: &BlockId) -> Result { - Ok(self.genesis_authorities.clone()) - } - - fn prove_authorities(&self, block: &BlockId) -> Result { - let authorities = self.authorities(block)?; - let backend = >>::from(vec![ - (None, vec![(b"authorities".to_vec(), Some(authorities.encode()))]) - ]); - let proof = prove_read(backend, vec![b"authorities"]) - .expect("failure proving read from in-memory storage backend"); - Ok(proof) - } + fn authorities(&self, _block: &BlockId) -> Result { + Ok(self.genesis_authorities.clone()) + } + + fn prove_authorities(&self, block: &BlockId) -> Result { + let authorities = self.authorities(block)?; + let backend = >>::from(vec![( + None, + vec![(b"authorities".to_vec(), Some(authorities.encode()))], + )]); + let proof = prove_read(backend, vec![b"authorities"]) + .expect("failure proving read from in-memory storage backend"); + Ok(proof) + } } impl AuthoritySetForFinalityChecker for TestApi { - fn check_authorities_proof( - &self, - _hash: ::Hash, - header: ::Header, - proof: StorageProof, - ) -> Result { - let results = read_proof_check::, _>( - *header.state_root(), proof, vec![b"authorities"] - ) - .expect("failure checking read proof for authorities"); - let encoded = results.get(&b"authorities"[..]) - .expect("returned map must contain all proof keys") - .as_ref() - .expect("authorities in proof is None"); - let authorities = Decode::decode(&mut &encoded[..]) - .expect("failure decoding authorities read from proof"); - Ok(authorities) - } + fn check_authorities_proof( + &self, + _hash: ::Hash, + header: ::Header, + proof: StorageProof, + ) -> Result { + let results = read_proof_check::, _>( + *header.state_root(), + proof, + vec![b"authorities"], + ) + .expect("failure checking read proof for authorities"); + let encoded = results + .get(&b"authorities"[..]) + .expect("returned map must contain all proof keys") + .as_ref() + .expect("authorities in proof is None"); + let authorities = Decode::decode(&mut &encoded[..]) + .expect("failure decoding authorities read from proof"); + Ok(authorities) + } } const TEST_GOSSIP_DURATION: Duration = Duration::from_millis(500); fn make_ids(keys: &[Ed25519Keyring]) -> AuthorityList { - keys.iter().map(|key| key.clone().public().into()).map(|id| (id, 1)).collect() + keys.iter() + .map(|key| key.clone().public().into()) + .map(|id| (id, 1)) + .collect() } fn create_keystore(authority: Ed25519Keyring) -> (KeyStorePtr, tempfile::TempDir) { - let keystore_path = tempfile::tempdir().expect("Creates keystore path"); - let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); - keystore.write().insert_ephemeral_from_seed::(&authority.to_seed()) - .expect("Creates authority key"); - - (keystore, keystore_path) + let keystore_path = tempfile::tempdir().expect("Creates keystore path"); + let keystore = sc_keystore::Store::open(keystore_path.path(), None).expect("Creates keystore"); + keystore + .write() + .insert_ephemeral_from_seed::(&authority.to_seed()) + .expect("Creates authority key"); + + (keystore, keystore_path) } -fn block_until_complete(future: impl Future + Unpin, net: &Arc>, runtime: &mut Runtime) { - let drive_to_completion = futures::future::poll_fn(|cx| { - net.lock().poll(cx); Poll::<()>::Pending - }); - runtime.block_on( - future::select(future, drive_to_completion) - ); +fn block_until_complete( + future: impl Future + Unpin, + net: &Arc>, + runtime: &mut Runtime, +) { + let drive_to_completion = futures::future::poll_fn(|cx| { + net.lock().poll(cx); + Poll::<()>::Pending + }); + runtime.block_on(future::select(future, drive_to_completion)); } // run the voters to completion. provide a closure to be invoked after // the voters are spawned but before blocking on them. fn run_to_completion_with( - runtime: &mut Runtime, - blocks: u64, - net: Arc>, - peers: &[Ed25519Keyring], - with: F, -) -> u64 where - F: FnOnce(Handle) -> Option>>> + runtime: &mut Runtime, + blocks: u64, + net: Arc>, + peers: &[Ed25519Keyring], + with: F, +) -> u64 +where + F: FnOnce(Handle) -> Option>>>, { - use parking_lot::RwLock; - - let mut wait_for = Vec::new(); - - let highest_finalized = Arc::new(RwLock::new(0)); - - if let Some(f) = (with)(runtime.handle().clone()) { - wait_for.push(f); - }; - - let mut keystore_paths = Vec::new(); - for (peer_id, key) in peers.iter().enumerate() { - let (keystore, keystore_path) = create_keystore(*key); - keystore_paths.push(keystore_path); - - let highest_finalized = highest_finalized.clone(); - let (client, net_service, link) = { - let net = net.lock(); - // temporary needed for some reason - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; - - wait_for.push( - Box::pin( - client.finality_notification_stream() - .take_while(move |n| { - let mut highest_finalized = highest_finalized.write(); - if *n.header.number() > *highest_finalized { - *highest_finalized = *n.header.number(); - } - future::ready(n.header.number() < &blocks) - }) - .collect::>() - .map(|_| ()) - ) - ); - - fn assert_send(_: &T) { } - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(keystore), - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link: link, - network: net_service, - inherent_data_providers: InherentDataProviders::new(), - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - - assert_send(&voter); - - runtime.spawn(voter); - } - - // wait for all finalized on each. - let wait_for = ::futures::future::join_all(wait_for); - - block_until_complete(wait_for, &net, runtime); - let highest_finalized = *highest_finalized.read(); - highest_finalized + use parking_lot::RwLock; + + let mut wait_for = Vec::new(); + + let highest_finalized = Arc::new(RwLock::new(0)); + + if let Some(f) = (with)(runtime.handle().clone()) { + wait_for.push(f); + }; + + let mut keystore_paths = Vec::new(); + for (peer_id, key) in peers.iter().enumerate() { + let (keystore, keystore_path) = create_keystore(*key); + keystore_paths.push(keystore_path); + + let highest_finalized = highest_finalized.clone(); + let (client, net_service, link) = { + let net = net.lock(); + // temporary needed for some reason + let link = net.peers[peer_id] + .data + .lock() + .take() + .expect("link initialized at startup; qed"); + ( + net.peers[peer_id].client().clone(), + net.peers[peer_id].network_service().clone(), + link, + ) + }; + + wait_for.push(Box::pin( + client + .finality_notification_stream() + .take_while(move |n| { + let mut highest_finalized = highest_finalized.write(); + if *n.header.number() > *highest_finalized { + *highest_finalized = *n.header.number(); + } + future::ready(n.header.number() < &blocks) + }) + .collect::>() + .map(|_| ()), + )); + + fn assert_send(_: &T) {} + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + is_authority: true, + observer_enabled: true, + }, + link: link, + network: net_service, + inherent_data_providers: InherentDataProviders::new(), + telemetry_on_connect: None, + voting_rule: (), + prometheus_registry: None, + }; + let voter = + run_grandpa_voter(grandpa_params).expect("all in order with client and network"); + + assert_send(&voter); + + runtime.spawn(voter); + } + + // wait for all finalized on each. + let wait_for = ::futures::future::join_all(wait_for); + + block_until_complete(wait_for, &net, runtime); + let highest_finalized = *highest_finalized.read(); + highest_finalized } fn run_to_completion( - runtime: &mut Runtime, - blocks: u64, - net: Arc>, - peers: &[Ed25519Keyring] + runtime: &mut Runtime, + blocks: u64, + net: Arc>, + peers: &[Ed25519Keyring], ) -> u64 { - run_to_completion_with(runtime, blocks, net, peers, |_| None) + run_to_completion_with(runtime, blocks, net, peers, |_| None) } fn add_scheduled_change(block: &mut Block, change: ScheduledChange) { - block.header.digest_mut().push(DigestItem::Consensus( - GRANDPA_ENGINE_ID, - sp_finality_grandpa::ConsensusLog::ScheduledChange(change).encode(), - )); + block.header.digest_mut().push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + sp_finality_grandpa::ConsensusLog::ScheduledChange(change).encode(), + )); } fn add_forced_change( - block: &mut Block, - median_last_finalized: BlockNumber, - change: ScheduledChange, + block: &mut Block, + median_last_finalized: BlockNumber, + change: ScheduledChange, ) { - block.header.digest_mut().push(DigestItem::Consensus( - GRANDPA_ENGINE_ID, - sp_finality_grandpa::ConsensusLog::ForcedChange(median_last_finalized, change).encode(), - )); + block.header.digest_mut().push(DigestItem::Consensus( + GRANDPA_ENGINE_ID, + sp_finality_grandpa::ConsensusLog::ForcedChange(median_last_finalized, change).encode(), + )); } #[test] fn finalize_3_voters_no_observers() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - for i in 0..3 { - assert_eq!(net.peer(i).client().info().best_number, 20, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 20, net.clone(), peers); - - // normally there's no justification for finalized blocks - assert!( - net.lock().peer(0).client().justification(&BlockId::Number(20)).unwrap().is_none(), - "Extra justification for block#1", - ); + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + let peers = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + for i in 0..3 { + assert_eq!( + net.peer(i).client().info().best_number, + 20, + "Peer #{} failed to sync", + i + ); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(&mut runtime, 20, net.clone(), peers); + + // normally there's no justification for finalized blocks + assert!( + net.lock() + .peer(0) + .client() + .justification(&BlockId::Number(20)) + .unwrap() + .is_none(), + "Extra justification for block#1", + ); } #[test] fn finalize_3_voters_1_full_observer() { - let mut runtime = Runtime::new().unwrap(); - - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - let net = Arc::new(Mutex::new(net)); - let mut finality_notifications = Vec::new(); - - let all_peers = peers.iter() - .cloned() - .map(Some) - .chain(std::iter::once(None)); - - let mut keystore_paths = Vec::new(); - - let mut voters = Vec::new(); - - for (peer_id, local_key) in all_peers.enumerate() { - let (client, net_service, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; - finality_notifications.push( - client.finality_notification_stream() - .take_while(|n| future::ready(n.header.number() < &20)) - .for_each(move |_| future::ready(())) - ); - - let keystore = if let Some(local_key) = local_key { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); - Some(keystore) - } else { - None - }; - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore, - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link: link, - network: net_service, - inherent_data_providers: InherentDataProviders::new(), - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - }; - - voters.push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); - } - - for voter in voters { - runtime.spawn(voter); - } - - // wait for all finalized on each. - let wait_for = futures::future::join_all(finality_notifications) - .map(|_| ()); - - block_until_complete(wait_for, &net, &mut runtime); + let mut runtime = Runtime::new().unwrap(); + + let peers = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + let net = Arc::new(Mutex::new(net)); + let mut finality_notifications = Vec::new(); + + let all_peers = peers.iter().cloned().map(Some).chain(std::iter::once(None)); + + let mut keystore_paths = Vec::new(); + + let mut voters = Vec::new(); + + for (peer_id, local_key) in all_peers.enumerate() { + let (client, net_service, link) = { + let net = net.lock(); + let link = net.peers[peer_id] + .data + .lock() + .take() + .expect("link initialized at startup; qed"); + ( + net.peers[peer_id].client().clone(), + net.peers[peer_id].network_service().clone(), + link, + ) + }; + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &20)) + .for_each(move |_| future::ready(())), + ); + + let keystore = if let Some(local_key) = local_key { + let (keystore, keystore_path) = create_keystore(local_key); + keystore_paths.push(keystore_path); + Some(keystore) + } else { + None + }; + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore, + name: Some(format!("peer#{}", peer_id)), + is_authority: true, + observer_enabled: true, + }, + link: link, + network: net_service, + inherent_data_providers: InherentDataProviders::new(), + telemetry_on_connect: None, + voting_rule: (), + prometheus_registry: None, + }; + + voters + .push(run_grandpa_voter(grandpa_params).expect("all in order with client and network")); + } + + for voter in voters { + runtime.spawn(voter); + } + + // wait for all finalized on each. + let wait_for = futures::future::join_all(finality_notifications).map(|_| ()); + + block_until_complete(wait_for, &net, &mut runtime); } #[test] fn transition_3_voters_twice_1_full_observer() { - let _ = env_logger::try_init(); - let peers_a = &[ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ]; - - let peers_b = &[ - Ed25519Keyring::Dave, - Ed25519Keyring::Eve, - Ed25519Keyring::Ferdie, - ]; - - let peers_c = &[ - Ed25519Keyring::Alice, - Ed25519Keyring::Eve, - Ed25519Keyring::Two, - ]; - - let observer = &[Ed25519Keyring::One]; - - let genesis_voters = make_ids(peers_a); - - let api = TestApi::new(genesis_voters); - let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); - - let mut runtime = Runtime::new().unwrap(); - - net.lock().peer(0).push_blocks(1, false); - net.lock().block_until_sync(); - - for (i, peer) in net.lock().peers().iter().enumerate() { - let full_client = peer.client().as_full().expect("only full clients are used in test"); - assert_eq!(full_client.chain_info().best_number, 1, - "Peer #{} failed to sync", i); - - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); - - assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); - assert_eq!(set.pending_changes().count(), 0); - } - - { - let net = net.clone(); - let client = net.lock().peers[0].client().clone(); - let peers_c = peers_c.clone(); - - // wait for blocks to be finalized before generating new ones - let block_production = client.finality_notification_stream() - .take_while(|n| future::ready(n.header.number() < &30)) - .for_each(move |n| { - match n.header.number() { - 1 => { - // first 14 blocks. - net.lock().peer(0).push_blocks(13, false); - }, - 14 => { - // generate transition at block 15, applied at 20. - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 4, - }); - - block - }); - net.lock().peer(0).push_blocks(5, false); - }, - 20 => { - // at block 21 we do another transition, but this time instant. - // add more until we have 30. - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(&peers_c), - delay: 0, - }); - - block - }); - net.lock().peer(0).push_blocks(9, false); - }, - _ => {}, - } - - future::ready(()) - }); - - runtime.spawn(block_production); - } - - let mut finality_notifications = Vec::new(); - let all_peers = peers_a.iter() - .chain(peers_b) - .chain(peers_c) - .chain(observer) - .cloned() - .collect::>() // deduplicate - .into_iter() - .enumerate(); - - let mut keystore_paths = Vec::new(); - for (peer_id, local_key) in all_peers { - let (keystore, keystore_path) = create_keystore(local_key); - keystore_paths.push(keystore_path); - - let (client, net_service, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - net.peers[peer_id].network_service().clone(), - link, - ) - }; - - finality_notifications.push( - client.finality_notification_stream() - .take_while(|n| future::ready(n.header.number() < &30)) - .for_each(move |_| future::ready(())) - .map(move |()| { - let full_client = client.as_full().expect("only full clients are used in test"); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); - - assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); - assert_eq!(set.pending_changes().count(), 0); - }) - ); - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(keystore), - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link: link, - network: net_service, - inherent_data_providers: InherentDataProviders::new(), - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - }; - let voter = run_grandpa_voter(grandpa_params).expect("all in order with client and network"); - - runtime.spawn(voter); - } - - // wait for all finalized on each. - let wait_for = ::futures::future::join_all(finality_notifications); - - block_until_complete(wait_for, &net, &mut runtime); + let _ = env_logger::try_init(); + let peers_a = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + + let peers_b = &[ + Ed25519Keyring::Dave, + Ed25519Keyring::Eve, + Ed25519Keyring::Ferdie, + ]; + + let peers_c = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Eve, + Ed25519Keyring::Two, + ]; + + let observer = &[Ed25519Keyring::One]; + + let genesis_voters = make_ids(peers_a); + + let api = TestApi::new(genesis_voters); + let net = Arc::new(Mutex::new(GrandpaTestNet::new(api, 8))); + + let mut runtime = Runtime::new().unwrap(); + + net.lock().peer(0).push_blocks(1, false); + net.lock().block_until_sync(); + + for (i, peer) in net.lock().peers().iter().enumerate() { + let full_client = peer + .client() + .as_full() + .expect("only full clients are used in test"); + assert_eq!( + full_client.chain_info().best_number, + 1, + "Peer #{} failed to sync", + i + ); + + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); + + assert_eq!(set.current(), (0, make_ids(peers_a).as_slice())); + assert_eq!(set.pending_changes().count(), 0); + } + + { + let net = net.clone(); + let client = net.lock().peers[0].client().clone(); + let peers_c = peers_c.clone(); + + // wait for blocks to be finalized before generating new ones + let block_production = client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &30)) + .for_each(move |n| { + match n.header.number() { + 1 => { + // first 14 blocks. + net.lock().peer(0).push_blocks(13, false); + } + 14 => { + // generate transition at block 15, applied at 20. + net.lock() + .peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let mut block = builder.build().unwrap().block; + add_scheduled_change( + &mut block, + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 4, + }, + ); + + block + }); + net.lock().peer(0).push_blocks(5, false); + } + 20 => { + // at block 21 we do another transition, but this time instant. + // add more until we have 30. + net.lock() + .peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let mut block = builder.build().unwrap().block; + add_scheduled_change( + &mut block, + ScheduledChange { + next_authorities: make_ids(&peers_c), + delay: 0, + }, + ); + + block + }); + net.lock().peer(0).push_blocks(9, false); + } + _ => {} + } + + future::ready(()) + }); + + runtime.spawn(block_production); + } + + let mut finality_notifications = Vec::new(); + let all_peers = peers_a + .iter() + .chain(peers_b) + .chain(peers_c) + .chain(observer) + .cloned() + .collect::>() // deduplicate + .into_iter() + .enumerate(); + + let mut keystore_paths = Vec::new(); + for (peer_id, local_key) in all_peers { + let (keystore, keystore_path) = create_keystore(local_key); + keystore_paths.push(keystore_path); + + let (client, net_service, link) = { + let net = net.lock(); + let link = net.peers[peer_id] + .data + .lock() + .take() + .expect("link initialized at startup; qed"); + ( + net.peers[peer_id].client().clone(), + net.peers[peer_id].network_service().clone(), + link, + ) + }; + + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &30)) + .for_each(move |_| future::ready(())) + .map(move |()| { + let full_client = client + .as_full() + .expect("only full clients are used in test"); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); + + assert_eq!(set.current(), (2, make_ids(peers_c).as_slice())); + assert_eq!(set.pending_changes().count(), 0); + }), + ); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", peer_id)), + is_authority: true, + observer_enabled: true, + }, + link: link, + network: net_service, + inherent_data_providers: InherentDataProviders::new(), + telemetry_on_connect: None, + voting_rule: (), + prometheus_registry: None, + }; + let voter = + run_grandpa_voter(grandpa_params).expect("all in order with client and network"); + + runtime.spawn(voter); + } + + // wait for all finalized on each. + let wait_for = ::futures::future::join_all(finality_notifications); + + block_until_complete(wait_for, &net, &mut runtime); } #[test] fn justification_is_emitted_when_consensus_data_changes() { - let mut runtime = Runtime::new().unwrap(); - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); - - // import block#1 WITH consensus data change - let new_authorities = vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]; - net.peer(0).push_authorities_change_block(new_authorities); - net.block_until_sync(); - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 1, net.clone(), peers); - - // ... and check that there's justification for block#1 - assert!(net.lock().peer(0).client().justification(&BlockId::Number(1)).unwrap().is_some(), - "Missing justification for block#1"); + let mut runtime = Runtime::new().unwrap(); + let peers = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 3); + + // import block#1 WITH consensus data change + let new_authorities = vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]; + net.peer(0).push_authorities_change_block(new_authorities); + net.block_until_sync(); + let net = Arc::new(Mutex::new(net)); + run_to_completion(&mut runtime, 1, net.clone(), peers); + + // ... and check that there's justification for block#1 + assert!( + net.lock() + .peer(0) + .client() + .justification(&BlockId::Number(1)) + .unwrap() + .is_some(), + "Missing justification for block#1" + ); } #[test] fn justification_is_generated_periodically() { - let mut runtime = Runtime::new().unwrap(); - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(32, false); - net.block_until_sync(); - - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 32, net.clone(), peers); - - // when block#32 (justification_period) is finalized, justification - // is required => generated - for i in 0..3 { - assert!(net.lock().peer(i).client().justification(&BlockId::Number(32)).unwrap().is_some()); - } + let mut runtime = Runtime::new().unwrap(); + let peers = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + net.peer(0).push_blocks(32, false); + net.block_until_sync(); + + let net = Arc::new(Mutex::new(net)); + run_to_completion(&mut runtime, 32, net.clone(), peers); + + // when block#32 (justification_period) is finalized, justification + // is required => generated + for i in 0..3 { + assert!(net + .lock() + .peer(i) + .client() + .justification(&BlockId::Number(32)) + .unwrap() + .is_some()); + } } #[test] fn consensus_changes_works() { - let mut changes = ConsensusChanges::::empty(); - - // pending changes are not finalized - changes.note_change((10, H256::from_low_u64_be(1))); - assert_eq!(changes.finalize((5, H256::from_low_u64_be(5)), |_| Ok(None)).unwrap(), (false, false)); - - // no change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1001)))).unwrap(), (true, false)); - - // change is selected from competing pending changes - changes.note_change((1, H256::from_low_u64_be(1))); - changes.note_change((1, H256::from_low_u64_be(101))); - assert_eq!(changes.finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some(H256::from_low_u64_be(1)))).unwrap(), (true, true)); + let mut changes = ConsensusChanges::::empty(); + + // pending changes are not finalized + changes.note_change((10, H256::from_low_u64_be(1))); + assert_eq!( + changes + .finalize((5, H256::from_low_u64_be(5)), |_| Ok(None)) + .unwrap(), + (false, false) + ); + + // no change is selected from competing pending changes + changes.note_change((1, H256::from_low_u64_be(1))); + changes.note_change((1, H256::from_low_u64_be(101))); + assert_eq!( + changes + .finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some( + H256::from_low_u64_be(1001) + ))) + .unwrap(), + (true, false) + ); + + // change is selected from competing pending changes + changes.note_change((1, H256::from_low_u64_be(1))); + changes.note_change((1, H256::from_low_u64_be(101))); + assert_eq!( + changes + .finalize((10, H256::from_low_u64_be(10)), |_| Ok(Some( + H256::from_low_u64_be(1) + ))) + .unwrap(), + (true, true) + ); } #[test] fn sync_justifications_on_change_blocks() { - let mut runtime = Runtime::new().unwrap(); - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; - let voters = make_ids(peers_b); - - // 4 peers, 3 of them are authorities and participate in grandpa - let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api, 4); - - // add 20 blocks - net.peer(0).push_blocks(20, false); - - // at block 21 we do add a transition which is instant - net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); - block - }); - - // add more blocks on top of it (until we have 25) - net.peer(0).push_blocks(4, false); - net.block_until_sync(); - - for i in 0..4 { - assert_eq!(net.peer(i).client().info().best_number, 25, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 25, net.clone(), peers_a); - - // the first 3 peers are grandpa voters and therefore have already finalized - // block 21 and stored a justification - for i in 0..3 { - assert!(net.lock().peer(i).client().justification(&BlockId::Number(21)).unwrap().is_some()); - } - - // the last peer should get the justification by syncing from other peers - futures::executor::block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(3).client().justification(&BlockId::Number(21)).unwrap().is_none() { - net.lock().poll(cx); - Poll::Pending - } else { - Poll::Ready(()) - } - })) + let mut runtime = Runtime::new().unwrap(); + let peers_a = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers_b); + + // 4 peers, 3 of them are authorities and participate in grandpa + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api, 4); + + // add 20 blocks + net.peer(0).push_blocks(20, false); + + // at block 21 we do add a transition which is instant + net.peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let mut block = builder.build().unwrap().block; + add_scheduled_change( + &mut block, + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 0, + }, + ); + block + }); + + // add more blocks on top of it (until we have 25) + net.peer(0).push_blocks(4, false); + net.block_until_sync(); + + for i in 0..4 { + assert_eq!( + net.peer(i).client().info().best_number, + 25, + "Peer #{} failed to sync", + i + ); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(&mut runtime, 25, net.clone(), peers_a); + + // the first 3 peers are grandpa voters and therefore have already finalized + // block 21 and stored a justification + for i in 0..3 { + assert!(net + .lock() + .peer(i) + .client() + .justification(&BlockId::Number(21)) + .unwrap() + .is_some()); + } + + // the last peer should get the justification by syncing from other peers + futures::executor::block_on(futures::future::poll_fn(move |cx| { + if net + .lock() + .peer(3) + .client() + .justification(&BlockId::Number(21)) + .unwrap() + .is_none() + { + net.lock().poll(cx); + Poll::Pending + } else { + Poll::Ready(()) + } + })) } #[test] fn finalizes_multiple_pending_changes_in_order() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let peers_b = &[Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie]; - let peers_c = &[Ed25519Keyring::Dave, Ed25519Keyring::Alice, Ed25519Keyring::Bob]; - - let all_peers = &[ - Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie, - Ed25519Keyring::Dave, Ed25519Keyring::Eve, Ed25519Keyring::Ferdie, - ]; - let genesis_voters = make_ids(peers_a); - - // 6 peers, 3 of them are authorities and participate in grandpa from genesis - let api = TestApi::new(genesis_voters); - let mut net = GrandpaTestNet::new(api, 6); - - // add 20 blocks - net.peer(0).push_blocks(20, false); - - // at block 21 we do add a transition which is instant - net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); - block - }); - - // add more blocks on top of it (until we have 25) - net.peer(0).push_blocks(4, false); - - // at block 26 we add another which is enacted at block 30 - net.peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_c), - delay: 4, - }); - block - }); - - // add more blocks on top of it (until we have 30) - net.peer(0).push_blocks(4, false); - - net.block_until_sync(); - - // all peers imported both change blocks - for i in 0..6 { - assert_eq!(net.peer(i).client().info().best_number, 30, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 30, net.clone(), all_peers); + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + let peers_a = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let peers_b = &[ + Ed25519Keyring::Dave, + Ed25519Keyring::Eve, + Ed25519Keyring::Ferdie, + ]; + let peers_c = &[ + Ed25519Keyring::Dave, + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + ]; + + let all_peers = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + Ed25519Keyring::Dave, + Ed25519Keyring::Eve, + Ed25519Keyring::Ferdie, + ]; + let genesis_voters = make_ids(peers_a); + + // 6 peers, 3 of them are authorities and participate in grandpa from genesis + let api = TestApi::new(genesis_voters); + let mut net = GrandpaTestNet::new(api, 6); + + // add 20 blocks + net.peer(0).push_blocks(20, false); + + // at block 21 we do add a transition which is instant + net.peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let mut block = builder.build().unwrap().block; + add_scheduled_change( + &mut block, + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 0, + }, + ); + block + }); + + // add more blocks on top of it (until we have 25) + net.peer(0).push_blocks(4, false); + + // at block 26 we add another which is enacted at block 30 + net.peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let mut block = builder.build().unwrap().block; + add_scheduled_change( + &mut block, + ScheduledChange { + next_authorities: make_ids(peers_c), + delay: 4, + }, + ); + block + }); + + // add more blocks on top of it (until we have 30) + net.peer(0).push_blocks(4, false); + + net.block_until_sync(); + + // all peers imported both change blocks + for i in 0..6 { + assert_eq!( + net.peer(i).client().info().best_number, + 30, + "Peer #{} failed to sync", + i + ); + } + + let net = Arc::new(Mutex::new(net)); + run_to_completion(&mut runtime, 30, net.clone(), all_peers); } #[test] fn force_change_to_new_set() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - // two of these guys are offline. - let genesis_authorities = &[ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - Ed25519Keyring::One, - Ed25519Keyring::Two, - ]; - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let api = TestApi::new(make_ids(genesis_authorities)); - - let voters = make_ids(peers_a); - let net = GrandpaTestNet::new(api, 3); - let net = Arc::new(Mutex::new(net)); - - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - let mut block = builder.build().unwrap().block; - - // add a forced transition at block 12. - add_forced_change(&mut block, 0, ScheduledChange { - next_authorities: voters.clone(), - delay: 10, - }); - - // add a normal transition too to ensure that forced changes take priority. - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(genesis_authorities), - delay: 5, - }); - - block - }); - - net.lock().peer(0).push_blocks(25, false); - net.lock().block_until_sync(); - - for (i, peer) in net.lock().peers().iter().enumerate() { - assert_eq!(peer.client().info().best_number, 26, - "Peer #{} failed to sync", i); - - let full_client = peer.client().as_full().expect("only full clients are used in test"); - let set: AuthoritySet = crate::aux_schema::load_authorities(&*full_client).unwrap(); - - assert_eq!(set.current(), (1, voters.as_slice())); - assert_eq!(set.pending_changes().count(), 0); - } - - // it will only finalize if the forced transition happens. - // we add_blocks after the voters are spawned because otherwise - // the link-halves have the wrong AuthoritySet - run_to_completion(&mut runtime, 25, net, peers_a); + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + // two of these guys are offline. + let genesis_authorities = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + Ed25519Keyring::One, + Ed25519Keyring::Two, + ]; + let peers_a = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let api = TestApi::new(make_ids(genesis_authorities)); + + let voters = make_ids(peers_a); + let net = GrandpaTestNet::new(api, 3); + let net = Arc::new(Mutex::new(net)); + + net.lock() + .peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + let mut block = builder.build().unwrap().block; + + // add a forced transition at block 12. + add_forced_change( + &mut block, + 0, + ScheduledChange { + next_authorities: voters.clone(), + delay: 10, + }, + ); + + // add a normal transition too to ensure that forced changes take priority. + add_scheduled_change( + &mut block, + ScheduledChange { + next_authorities: make_ids(genesis_authorities), + delay: 5, + }, + ); + + block + }); + + net.lock().peer(0).push_blocks(25, false); + net.lock().block_until_sync(); + + for (i, peer) in net.lock().peers().iter().enumerate() { + assert_eq!( + peer.client().info().best_number, + 26, + "Peer #{} failed to sync", + i + ); + + let full_client = peer + .client() + .as_full() + .expect("only full clients are used in test"); + let set: AuthoritySet = + crate::aux_schema::load_authorities(&*full_client).unwrap(); + + assert_eq!(set.current(), (1, voters.as_slice())); + assert_eq!(set.pending_changes().count(), 0); + } + + // it will only finalize if the forced transition happens. + // we add_blocks after the voters are spawned because otherwise + // the link-halves have the wrong AuthoritySet + run_to_completion(&mut runtime, 25, net, peers_a); } #[test] fn allows_reimporting_change_blocks() { - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; - let voters = make_ids(peers_a); - let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 3); - - let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >( - client.clone(), - ); - - let full_client = client.as_full().unwrap(); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - let mut block = builder.build().unwrap().block; - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); - - let block = || { - let block = block.clone(); - let mut import = BlockImportParams::new(BlockOrigin::File, block.header); - import.body = Some(block.extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - import - }; - - assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::Imported(ImportedAux { - needs_justification: true, - clear_justification_requests: false, - bad_justification: false, - needs_finality_proof: false, - is_new_best: true, - header_only: false, - }), - ); - - assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::AlreadyInChain - ); + let peers_a = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers_a); + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api.clone(), 3); + + let client = net.peer(0).client().clone(); + let (mut block_import, ..) = net + .make_block_import::>( + client.clone(), + ); + + let full_client = client.as_full().unwrap(); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + let mut block = builder.build().unwrap().block; + add_scheduled_change( + &mut block, + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 0, + }, + ); + + let block = || { + let block = block.clone(); + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.body = Some(block.extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + import + }; + + assert_eq!( + block_import.import_block(block(), HashMap::new()).unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: false, + needs_finality_proof: false, + is_new_best: true, + header_only: false, + }), + ); + + assert_eq!( + block_import.import_block(block(), HashMap::new()).unwrap(), + ImportResult::AlreadyInChain + ); } #[test] fn test_bad_justification() { - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; - let voters = make_ids(peers_a); - let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 3); - - let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >( - client.clone(), - ); - - let full_client = client.as_full().expect("only full clients are used in test"); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - let mut block = builder.build().unwrap().block; - - add_scheduled_change(&mut block, ScheduledChange { - next_authorities: make_ids(peers_b), - delay: 0, - }); - - let block = || { - let block = block.clone(); - let mut import = BlockImportParams::new(BlockOrigin::File, block.header); - import.justification = Some(Vec::new()); - import.body = Some(block.extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - import - }; - - assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::Imported(ImportedAux { - needs_justification: true, - clear_justification_requests: false, - bad_justification: true, - is_new_best: true, - ..Default::default() - }), - ); - - assert_eq!( - block_import.import_block(block(), HashMap::new()).unwrap(), - ImportResult::AlreadyInChain - ); + let peers_a = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let peers_b = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers_a); + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api.clone(), 3); + + let client = net.peer(0).client().clone(); + let (mut block_import, ..) = net + .make_block_import::>( + client.clone(), + ); + + let full_client = client + .as_full() + .expect("only full clients are used in test"); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + let mut block = builder.build().unwrap().block; + + add_scheduled_change( + &mut block, + ScheduledChange { + next_authorities: make_ids(peers_b), + delay: 0, + }, + ); + + let block = || { + let block = block.clone(); + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.justification = Some(Vec::new()); + import.body = Some(block.extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + import + }; + + assert_eq!( + block_import.import_block(block(), HashMap::new()).unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: true, + clear_justification_requests: false, + bad_justification: true, + is_new_best: true, + ..Default::default() + }), + ); + + assert_eq!( + block_import.import_block(block(), HashMap::new()).unwrap(), + ImportResult::AlreadyInChain + ); } #[test] fn voter_persists_its_votes() { - use std::iter::FromIterator; - use std::sync::atomic::{AtomicUsize, Ordering}; - use futures::future; - use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; - - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - // we have two authorities but we'll only be running the voter for alice - // we are going to be listening for the prevotes it casts - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; - let voters = make_ids(peers); - - // alice has a chain with 20 blocks - let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - assert_eq!(net.peer(0).client().info().best_number, 20, - "Peer #{} failed to sync", 0); - - - let peer = net.peer(0); - let client = peer.client().clone(); - let net = Arc::new(Mutex::new(net)); - - // channel between the voter and the main controller. - // sending a message on the `voter_tx` restarts the voter. - let (voter_tx, voter_rx) = tracing_unbounded::<()>(""); - - let mut keystore_paths = Vec::new(); - - // startup a grandpa voter for alice but also listen for messages on a - // channel. whenever a message is received the voter is restarted. when the - // sender is dropped the voter is stopped. - { - let (keystore, keystore_path) = create_keystore(peers[0]); - keystore_paths.push(keystore_path); - - struct ResettableVoter { - voter: Pin + Send + Unpin>>, - voter_rx: TracingUnboundedReceiver<()>, - net: Arc>, - client: PeersClient, - keystore: KeyStorePtr, - } - - impl Future for ResettableVoter { - type Output = (); - - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = Pin::into_inner(self); - - if let Poll::Ready(()) = Pin::new(&mut this.voter).poll(cx) { - panic!("error in the voter"); - } - - match Pin::new(&mut this.voter_rx).poll_next(cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Ready(Some(())) => { - let (_block_import, _, _, _, link) = + use futures::future; + use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver}; + use std::iter::FromIterator; + use std::sync::atomic::{AtomicUsize, Ordering}; + + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + // we have two authorities but we'll only be running the voter for alice + // we are going to be listening for the prevotes it casts + let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers); + + // alice has a chain with 20 blocks + let mut net = GrandpaTestNet::new(TestApi::new(voters.clone()), 2); + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + assert_eq!( + net.peer(0).client().info().best_number, + 20, + "Peer #{} failed to sync", + 0 + ); + + let peer = net.peer(0); + let client = peer.client().clone(); + let net = Arc::new(Mutex::new(net)); + + // channel between the voter and the main controller. + // sending a message on the `voter_tx` restarts the voter. + let (voter_tx, voter_rx) = tracing_unbounded::<()>(""); + + let mut keystore_paths = Vec::new(); + + // startup a grandpa voter for alice but also listen for messages on a + // channel. whenever a message is received the voter is restarted. when the + // sender is dropped the voter is stopped. + { + let (keystore, keystore_path) = create_keystore(peers[0]); + keystore_paths.push(keystore_path); + + struct ResettableVoter { + voter: Pin + Send + Unpin>>, + voter_rx: TracingUnboundedReceiver<()>, + net: Arc>, + client: PeersClient, + keystore: KeyStorePtr, + } + + impl Future for ResettableVoter { + type Output = (); + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = Pin::into_inner(self); + + if let Poll::Ready(()) = Pin::new(&mut this.voter).poll(cx) { + panic!("error in the voter"); + } + + match Pin::new(&mut this.voter_rx).poll_next(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(None) => return Poll::Ready(()), + Poll::Ready(Some(())) => { + let (_block_import, _, _, _, link) = this.net.lock() .make_block_import::< TransactionFor >(this.client.clone()); - let link = link.lock().take().unwrap(); - - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(this.keystore.clone()), - name: Some(format!("peer#{}", 0)), - is_authority: true, - observer_enabled: true, - }, - link, - network: this.net.lock().peers[0].network_service().clone(), - inherent_data_providers: InherentDataProviders::new(), - telemetry_on_connect: None, - voting_rule: VotingRulesBuilder::default().build(), - prometheus_registry: None, - }; - - let voter = run_grandpa_voter(grandpa_params) - .expect("all in order with client and network") - .map(move |r| { - // we need to keep the block_import alive since it owns the - // sender for the voter commands channel, if that gets dropped - // then the voter will stop - drop(_block_import); - r - }); - - this.voter = Box::pin(voter); - // notify current task in order to poll the voter - cx.waker().wake_by_ref(); - } - }; - - Poll::Pending - } - } - - // we create a "dummy" voter by setting it to `pending` and triggering the `tx`. - // this way, the `ResettableVoter` will reset its `voter` field to a value ASAP. - voter_tx.unbounded_send(()).unwrap(); - runtime.spawn(ResettableVoter { - voter: Box::pin(futures::future::pending()), - voter_rx, - net: net.clone(), - client: client.clone(), - keystore, - }); - } - - let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); - - // create the communication layer for bob, but don't start any - // voter. instead we'll listen for the prevote that alice casts - // and cast our own manually - { - let (keystore, keystore_path) = create_keystore(peers[1]); - keystore_paths.push(keystore_path); - - let config = Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: Some(keystore), - name: Some(format!("peer#{}", 1)), - is_authority: true, - observer_enabled: true, - }; - - let set_state = { - let (_, _, _, _, link) = net.lock() - .make_block_import::< - TransactionFor - >(client); - let LinkHalf { persistent_data, .. } = link.lock().take().unwrap(); - let PersistentData { set_state, .. } = persistent_data; - set_state - }; - - let network = communication::NetworkBridge::new( - net.lock().peers[1].network_service().clone(), - config.clone(), - set_state, - None, - ); - - let (round_rx, round_tx) = network.round_communication( - communication::Round(1), - communication::SetId(0), - Arc::new(VoterSet::from_iter(voters)), - Some(peers[1].pair().into()), - HasVoted::No, - ); - - runtime.spawn(network); - - let round_tx = Arc::new(Mutex::new(round_tx)); - let exit_tx = Arc::new(Mutex::new(Some(exit_tx))); - - let net = net.clone(); - let state = Arc::new(AtomicUsize::new(0)); - - runtime.spawn(round_rx.for_each(move |signed| { - let net2 = net.clone(); - let net = net.clone(); - let voter_tx = voter_tx.clone(); - let round_tx = round_tx.clone(); - let state = state.clone(); - let exit_tx = exit_tx.clone(); - - async move { - if state.compare_and_swap(0, 1, Ordering::SeqCst) == 0 { - // the first message we receive should be a prevote from alice. - let prevote = match signed.message { - finality_grandpa::Message::Prevote(prevote) => prevote, - _ => panic!("voter should prevote."), - }; - - // its chain has 20 blocks and the voter targets 3/4 of the - // unfinalized chain, so the vote should be for block 15 - assert!(prevote.target_number == 15); - - // we push 20 more blocks to alice's chain - net.lock().peer(0).push_blocks(20, false); - - let interval = futures::stream::unfold(Delay::new(Duration::from_millis(200)), |delay| - Box::pin(async move { - delay.await; - Some(((), Delay::new(Duration::from_millis(200)))) - }) - ); - - interval - .take_while(move |_| { - future::ready(net2.lock().peer(1).client().info().best_number != 40) - }) - .for_each(|_| future::ready(())) - .await; - - let block_30_hash = - net.lock().peer(0).client().as_full().unwrap().hash(30).unwrap().unwrap(); - - // we restart alice's voter - voter_tx.unbounded_send(()).unwrap(); - - // and we push our own prevote for block 30 - let prevote = finality_grandpa::Prevote { - target_number: 30, - target_hash: block_30_hash, - }; - - // One should either be calling `Sink::send` or `Sink::start_send` followed - // by `Sink::poll_complete` to make sure items are being flushed. Given that - // we send in a loop including a delay until items are received, this can be - // ignored for the sake of reduced complexity. - Pin::new(&mut *round_tx.lock()).start_send(finality_grandpa::Message::Prevote(prevote)).unwrap(); - } else if state.compare_and_swap(1, 2, Ordering::SeqCst) == 1 { - // the next message we receive should be our own prevote - let prevote = match signed.message { - finality_grandpa::Message::Prevote(prevote) => prevote, - _ => panic!("We should receive our own prevote."), - }; - - // targeting block 30 - assert!(prevote.target_number == 30); - - // after alice restarts it should send its previous prevote - // therefore we won't ever receive it again since it will be a - // known message on the gossip layer - - } else if state.compare_and_swap(2, 3, Ordering::SeqCst) == 2 { - // we then receive a precommit from alice for block 15 - // even though we casted a prevote for block 30 - let precommit = match signed.message { - finality_grandpa::Message::Precommit(precommit) => precommit, - _ => panic!("voter should precommit."), - }; - - assert!(precommit.target_number == 15); - - // signal exit - exit_tx.clone().lock().take().unwrap().send(()).unwrap(); - } else { - panic!() - } - } - })); - } - - block_until_complete(exit_rx.into_future(), &net, &mut runtime); + let link = link.lock().take().unwrap(); + + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(this.keystore.clone()), + name: Some(format!("peer#{}", 0)), + is_authority: true, + observer_enabled: true, + }, + link, + network: this.net.lock().peers[0].network_service().clone(), + inherent_data_providers: InherentDataProviders::new(), + telemetry_on_connect: None, + voting_rule: VotingRulesBuilder::default().build(), + prometheus_registry: None, + }; + + let voter = run_grandpa_voter(grandpa_params) + .expect("all in order with client and network") + .map(move |r| { + // we need to keep the block_import alive since it owns the + // sender for the voter commands channel, if that gets dropped + // then the voter will stop + drop(_block_import); + r + }); + + this.voter = Box::pin(voter); + // notify current task in order to poll the voter + cx.waker().wake_by_ref(); + } + }; + + Poll::Pending + } + } + + // we create a "dummy" voter by setting it to `pending` and triggering the `tx`. + // this way, the `ResettableVoter` will reset its `voter` field to a value ASAP. + voter_tx.unbounded_send(()).unwrap(); + runtime.spawn(ResettableVoter { + voter: Box::pin(futures::future::pending()), + voter_rx, + net: net.clone(), + client: client.clone(), + keystore, + }); + } + + let (exit_tx, exit_rx) = futures::channel::oneshot::channel::<()>(); + + // create the communication layer for bob, but don't start any + // voter. instead we'll listen for the prevote that alice casts + // and cast our own manually + { + let (keystore, keystore_path) = create_keystore(peers[1]); + keystore_paths.push(keystore_path); + + let config = Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: Some(keystore), + name: Some(format!("peer#{}", 1)), + is_authority: true, + observer_enabled: true, + }; + + let set_state = { + let (_, _, _, _, link) = net + .lock() + .make_block_import::>( + client, + ); + let LinkHalf { + persistent_data, .. + } = link.lock().take().unwrap(); + let PersistentData { set_state, .. } = persistent_data; + set_state + }; + + let network = communication::NetworkBridge::new( + net.lock().peers[1].network_service().clone(), + config.clone(), + set_state, + None, + ); + + let (round_rx, round_tx) = network.round_communication( + communication::Round(1), + communication::SetId(0), + Arc::new(VoterSet::from_iter(voters)), + Some(peers[1].pair().into()), + HasVoted::No, + ); + + runtime.spawn(network); + + let round_tx = Arc::new(Mutex::new(round_tx)); + let exit_tx = Arc::new(Mutex::new(Some(exit_tx))); + + let net = net.clone(); + let state = Arc::new(AtomicUsize::new(0)); + + runtime.spawn(round_rx.for_each(move |signed| { + let net2 = net.clone(); + let net = net.clone(); + let voter_tx = voter_tx.clone(); + let round_tx = round_tx.clone(); + let state = state.clone(); + let exit_tx = exit_tx.clone(); + + async move { + if state.compare_and_swap(0, 1, Ordering::SeqCst) == 0 { + // the first message we receive should be a prevote from alice. + let prevote = match signed.message { + finality_grandpa::Message::Prevote(prevote) => prevote, + _ => panic!("voter should prevote."), + }; + + // its chain has 20 blocks and the voter targets 3/4 of the + // unfinalized chain, so the vote should be for block 15 + assert!(prevote.target_number == 15); + + // we push 20 more blocks to alice's chain + net.lock().peer(0).push_blocks(20, false); + + let interval = + futures::stream::unfold(Delay::new(Duration::from_millis(200)), |delay| { + Box::pin(async move { + delay.await; + Some(((), Delay::new(Duration::from_millis(200)))) + }) + }); + + interval + .take_while(move |_| { + future::ready(net2.lock().peer(1).client().info().best_number != 40) + }) + .for_each(|_| future::ready(())) + .await; + + let block_30_hash = net + .lock() + .peer(0) + .client() + .as_full() + .unwrap() + .hash(30) + .unwrap() + .unwrap(); + + // we restart alice's voter + voter_tx.unbounded_send(()).unwrap(); + + // and we push our own prevote for block 30 + let prevote = finality_grandpa::Prevote { + target_number: 30, + target_hash: block_30_hash, + }; + + // One should either be calling `Sink::send` or `Sink::start_send` followed + // by `Sink::poll_complete` to make sure items are being flushed. Given that + // we send in a loop including a delay until items are received, this can be + // ignored for the sake of reduced complexity. + Pin::new(&mut *round_tx.lock()) + .start_send(finality_grandpa::Message::Prevote(prevote)) + .unwrap(); + } else if state.compare_and_swap(1, 2, Ordering::SeqCst) == 1 { + // the next message we receive should be our own prevote + let prevote = match signed.message { + finality_grandpa::Message::Prevote(prevote) => prevote, + _ => panic!("We should receive our own prevote."), + }; + + // targeting block 30 + assert!(prevote.target_number == 30); + + // after alice restarts it should send its previous prevote + // therefore we won't ever receive it again since it will be a + // known message on the gossip layer + } else if state.compare_and_swap(2, 3, Ordering::SeqCst) == 2 { + // we then receive a precommit from alice for block 15 + // even though we casted a prevote for block 30 + let precommit = match signed.message { + finality_grandpa::Message::Precommit(precommit) => precommit, + _ => panic!("voter should precommit."), + }; + + assert!(precommit.target_number == 15); + + // signal exit + exit_tx.clone().lock().take().unwrap().send(()).unwrap(); + } else { + panic!() + } + } + })); + } + + block_until_complete(exit_rx.into_future(), &net, &mut runtime); } #[test] fn finalize_3_voters_1_light_observer() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - let authorities = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let voters = make_ids(authorities); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - for i in 0..4 { - assert_eq!(net.peer(i).client().info().best_number, 20, - "Peer #{} failed to sync", i); - } - - let net = Arc::new(Mutex::new(net)); - let link = net.lock().peer(3).data.lock().take().expect("link initialized on startup; qed"); - - let finality_notifications = net.lock().peer(3).client().finality_notification_stream() - .take_while(|n| { - future::ready(n.header.number() < &20) - }) - .collect::>(); - - run_to_completion_with(&mut runtime, 20, net.clone(), authorities, |executor| { - executor.spawn( - observer::run_grandpa_observer( - Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: None, - name: Some("observer".to_string()), - is_authority: false, - observer_enabled: true, - }, - link, - net.lock().peers[3].network_service().clone(), - ).unwrap() - ); - - Some(Box::pin(finality_notifications.map(|_| ()))) - }); + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + let authorities = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let voters = make_ids(authorities); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 4); + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + for i in 0..4 { + assert_eq!( + net.peer(i).client().info().best_number, + 20, + "Peer #{} failed to sync", + i + ); + } + + let net = Arc::new(Mutex::new(net)); + let link = net + .lock() + .peer(3) + .data + .lock() + .take() + .expect("link initialized on startup; qed"); + + let finality_notifications = net + .lock() + .peer(3) + .client() + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &20)) + .collect::>(); + + run_to_completion_with(&mut runtime, 20, net.clone(), authorities, |executor| { + executor.spawn( + observer::run_grandpa_observer( + Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: None, + name: Some("observer".to_string()), + is_authority: false, + observer_enabled: true, + }, + link, + net.lock().peers[3].network_service().clone(), + ) + .unwrap(), + ); + + Some(Box::pin(finality_notifications.map(|_| ()))) + }); } #[test] fn finality_proof_is_fetched_by_light_client_when_consensus_data_changes() { - let _ = ::env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let peers = &[Ed25519Keyring::Alice]; - let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); - net.add_light_peer(); - - // import block#1 WITH consensus data change. Light client ignores justification - // && instead fetches finality proof for block #1 - net.peer(0).push_authorities_change_block(vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]); - let net = Arc::new(Mutex::new(net)); - run_to_completion(&mut runtime, 1, net.clone(), peers); - net.lock().block_until_sync(); - - // check that the block#1 is finalized on light client - runtime.block_on(futures::future::poll_fn(move |cx| { - if net.lock().peer(1).client().info().finalized_number == 1 { - Poll::Ready(()) - } else { - net.lock().poll(cx); - Poll::Pending - } - })); + let _ = ::env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + let peers = &[Ed25519Keyring::Alice]; + let mut net = GrandpaTestNet::new(TestApi::new(make_ids(peers)), 1); + net.add_light_peer(); + + // import block#1 WITH consensus data change. Light client ignores justification + // && instead fetches finality proof for block #1 + net.peer(0) + .push_authorities_change_block(vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]); + let net = Arc::new(Mutex::new(net)); + run_to_completion(&mut runtime, 1, net.clone(), peers); + net.lock().block_until_sync(); + + // check that the block#1 is finalized on light client + runtime.block_on(futures::future::poll_fn(move |cx| { + if net.lock().peer(1).client().info().finalized_number == 1 { + Poll::Ready(()) + } else { + net.lock().poll(cx); + Poll::Pending + } + })); } #[test] fn empty_finality_proof_is_returned_to_light_client_when_authority_set_is_different() { - // for debug: to ensure that without forced change light client will sync finality proof - const FORCE_CHANGE: bool = true; - - let _ = ::env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - // two of these guys are offline. - let genesis_authorities = if FORCE_CHANGE { - vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - Ed25519Keyring::One, - Ed25519Keyring::Two, - ] - } else { - vec![ - Ed25519Keyring::Alice, - Ed25519Keyring::Bob, - Ed25519Keyring::Charlie, - ] - }; - let peers_a = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob, Ed25519Keyring::Charlie]; - let api = TestApi::new(make_ids(&genesis_authorities)); - - let voters = make_ids(peers_a); - let net = GrandpaTestNet::new(api, 3); - let net = Arc::new(Mutex::new(net)); - - // best is #1 - net.lock().peer(0).generate_blocks(1, BlockOrigin::File, |builder| { - // add a forced transition at block 5. - let mut block = builder.build().unwrap().block; - if FORCE_CHANGE { - add_forced_change(&mut block, 0, ScheduledChange { - next_authorities: voters.clone(), - delay: 3, - }); - } - block - }); - - // ensure block#10 enacts authorities set change => justification is generated - // normally it will reach light client, but because of the forced change, it will not - net.lock().peer(0).push_blocks(8, false); // best is #9 - net.lock().peer(0).push_authorities_change_block( - vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])] - ); // #10 - net.lock().peer(0).push_blocks(1, false); // best is #11 - net.lock().block_until_sync(); - - // finalize block #11 on full clients - run_to_completion(&mut runtime, 11, net.clone(), peers_a); - - // request finalization by light client - net.lock().add_light_peer(); - net.lock().block_until_sync(); - - // check block, finalized on light client - assert_eq!( - net.lock().peer(3).client().info().finalized_number, - if FORCE_CHANGE { 0 } else { 10 }, - ); + // for debug: to ensure that without forced change light client will sync finality proof + const FORCE_CHANGE: bool = true; + + let _ = ::env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + // two of these guys are offline. + let genesis_authorities = if FORCE_CHANGE { + vec![ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + Ed25519Keyring::One, + Ed25519Keyring::Two, + ] + } else { + vec![ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ] + }; + let peers_a = &[ + Ed25519Keyring::Alice, + Ed25519Keyring::Bob, + Ed25519Keyring::Charlie, + ]; + let api = TestApi::new(make_ids(&genesis_authorities)); + + let voters = make_ids(peers_a); + let net = GrandpaTestNet::new(api, 3); + let net = Arc::new(Mutex::new(net)); + + // best is #1 + net.lock() + .peer(0) + .generate_blocks(1, BlockOrigin::File, |builder| { + // add a forced transition at block 5. + let mut block = builder.build().unwrap().block; + if FORCE_CHANGE { + add_forced_change( + &mut block, + 0, + ScheduledChange { + next_authorities: voters.clone(), + delay: 3, + }, + ); + } + block + }); + + // ensure block#10 enacts authorities set change => justification is generated + // normally it will reach light client, but because of the forced change, it will not + net.lock().peer(0).push_blocks(8, false); // best is #9 + net.lock() + .peer(0) + .push_authorities_change_block(vec![sp_consensus_babe::AuthorityId::from_slice(&[42; 32])]); // #10 + net.lock().peer(0).push_blocks(1, false); // best is #11 + net.lock().block_until_sync(); + + // finalize block #11 on full clients + run_to_completion(&mut runtime, 11, net.clone(), peers_a); + + // request finalization by light client + net.lock().add_light_peer(); + net.lock().block_until_sync(); + + // check block, finalized on light client + assert_eq!( + net.lock().peer(3).client().info().finalized_number, + if FORCE_CHANGE { 0 } else { 10 }, + ); } #[test] fn voter_catches_up_to_latest_round_when_behind() { - let _ = env_logger::try_init(); - let mut runtime = Runtime::new().unwrap(); - - let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); - net.peer(0).push_blocks(50, false); - net.block_until_sync(); - - let net = Arc::new(Mutex::new(net)); - let mut finality_notifications = Vec::new(); - - let voter = |keystore, peer_id, link, net: Arc>| -> Pin + Send>> { - let grandpa_params = GrandpaParams { - config: Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore, - name: Some(format!("peer#{}", peer_id)), - is_authority: true, - observer_enabled: true, - }, - link, - network: net.lock().peer(peer_id).network_service().clone(), - inherent_data_providers: InherentDataProviders::new(), - telemetry_on_connect: None, - voting_rule: (), - prometheus_registry: None, - }; - - Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) - }; - - let mut keystore_paths = Vec::new(); - - // spawn authorities - for (peer_id, key) in peers.iter().enumerate() { - let (client, link) = { - let net = net.lock(); - let link = net.peers[peer_id].data.lock().take().expect("link initialized at startup; qed"); - ( - net.peers[peer_id].client().clone(), - link, - ) - }; - - finality_notifications.push( - client.finality_notification_stream() - .take_while(|n| future::ready(n.header.number() < &50)) - .for_each(move |_| future::ready(())) - ); - - let (keystore, keystore_path) = create_keystore(*key); - keystore_paths.push(keystore_path); - - let voter = voter(Some(keystore), peer_id, link, net.clone()); - - runtime.spawn(voter); - } - - // wait for them to finalize block 50. since they'll vote on 3/4 of the - // unfinalized chain it will take at least 4 rounds to do it. - let wait_for_finality = ::futures::future::join_all(finality_notifications); - - // spawn a new voter, it should be behind by at least 4 rounds and should be - // able to catch up to the latest round - let test = { - let net = net.clone(); - let runtime = runtime.handle().clone(); - - wait_for_finality.then(move |_| { - let peer_id = 2; - let link = { - let net = net.lock(); - let mut link = net.peers[peer_id].data.lock(); - link.take().expect("link initialized at startup; qed") - }; - - let set_state = link.persistent_data.set_state.clone(); - - let voter = voter(None, peer_id, link, net); - - runtime.spawn(voter); - - let start_time = std::time::Instant::now(); - let timeout = Duration::from_secs(5 * 60); - let wait_for_catch_up = futures::future::poll_fn(move |_| { - // The voter will start at round 1 and since everyone else is - // already at a later round the only way to get to round 4 (or - // later) is by issuing a catch up request. - if set_state.read().last_completed_round().number >= 4 { - Poll::Ready(()) - } else if start_time.elapsed() > timeout { - panic!("Timed out while waiting for catch up to happen") - } else { - Poll::Pending - } - }); - - wait_for_catch_up - }) - }; - - let drive_to_completion = futures::future::poll_fn(|cx| { - net.lock().poll(cx); Poll::<()>::Pending - }); - runtime.block_on( - future::select(test, drive_to_completion) - ); + let _ = env_logger::try_init(); + let mut runtime = Runtime::new().unwrap(); + + let peers = &[Ed25519Keyring::Alice, Ed25519Keyring::Bob]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 3); + net.peer(0).push_blocks(50, false); + net.block_until_sync(); + + let net = Arc::new(Mutex::new(net)); + let mut finality_notifications = Vec::new(); + + let voter = |keystore, + peer_id, + link, + net: Arc>| + -> Pin + Send>> { + let grandpa_params = GrandpaParams { + config: Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore, + name: Some(format!("peer#{}", peer_id)), + is_authority: true, + observer_enabled: true, + }, + link, + network: net.lock().peer(peer_id).network_service().clone(), + inherent_data_providers: InherentDataProviders::new(), + telemetry_on_connect: None, + voting_rule: (), + prometheus_registry: None, + }; + + Box::pin(run_grandpa_voter(grandpa_params).expect("all in order with client and network")) + }; + + let mut keystore_paths = Vec::new(); + + // spawn authorities + for (peer_id, key) in peers.iter().enumerate() { + let (client, link) = { + let net = net.lock(); + let link = net.peers[peer_id] + .data + .lock() + .take() + .expect("link initialized at startup; qed"); + (net.peers[peer_id].client().clone(), link) + }; + + finality_notifications.push( + client + .finality_notification_stream() + .take_while(|n| future::ready(n.header.number() < &50)) + .for_each(move |_| future::ready(())), + ); + + let (keystore, keystore_path) = create_keystore(*key); + keystore_paths.push(keystore_path); + + let voter = voter(Some(keystore), peer_id, link, net.clone()); + + runtime.spawn(voter); + } + + // wait for them to finalize block 50. since they'll vote on 3/4 of the + // unfinalized chain it will take at least 4 rounds to do it. + let wait_for_finality = ::futures::future::join_all(finality_notifications); + + // spawn a new voter, it should be behind by at least 4 rounds and should be + // able to catch up to the latest round + let test = { + let net = net.clone(); + let runtime = runtime.handle().clone(); + + wait_for_finality.then(move |_| { + let peer_id = 2; + let link = { + let net = net.lock(); + let mut link = net.peers[peer_id].data.lock(); + link.take().expect("link initialized at startup; qed") + }; + + let set_state = link.persistent_data.set_state.clone(); + + let voter = voter(None, peer_id, link, net); + + runtime.spawn(voter); + + let start_time = std::time::Instant::now(); + let timeout = Duration::from_secs(5 * 60); + let wait_for_catch_up = futures::future::poll_fn(move |_| { + // The voter will start at round 1 and since everyone else is + // already at a later round the only way to get to round 4 (or + // later) is by issuing a catch up request. + if set_state.read().last_completed_round().number >= 4 { + Poll::Ready(()) + } else if start_time.elapsed() > timeout { + panic!("Timed out while waiting for catch up to happen") + } else { + Poll::Pending + } + }); + + wait_for_catch_up + }) + }; + + let drive_to_completion = futures::future::poll_fn(|cx| { + net.lock().poll(cx); + Poll::<()>::Pending + }); + runtime.block_on(future::select(test, drive_to_completion)); } #[test] fn grandpa_environment_respects_voting_rules() { - use finality_grandpa::Chain; - use sc_network_test::TestClient; - - let peers = &[Ed25519Keyring::Alice]; - let voters = make_ids(peers); - - let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); - let peer = net.peer(0); - let network_service = peer.network_service().clone(); - let link = peer.data.lock().take().unwrap(); - - // create a voter environment with a given voting rule - let environment = |voting_rule: Box>| { - let PersistentData { - ref authority_set, - ref consensus_changes, - ref set_state, - .. - } = link.persistent_data; - - let config = Config { - gossip_duration: TEST_GOSSIP_DURATION, - justification_period: 32, - keystore: None, - name: None, - is_authority: true, - observer_enabled: true, - }; - - let network = NetworkBridge::new( - network_service.clone(), - config.clone(), - set_state.clone(), - None, - ); - - Environment { - authority_set: authority_set.clone(), - config: config.clone(), - consensus_changes: consensus_changes.clone(), - client: link.client.clone(), - select_chain: link.select_chain.clone(), - set_id: authority_set.set_id(), - voter_set_state: set_state.clone(), - voters: Arc::new(authority_set.current_authorities()), - network, - voting_rule, - metrics: None, - _phantom: PhantomData, - } - }; - - // add 21 blocks - peer.push_blocks(21, false); - - // create an environment with no voting rule restrictions - let unrestricted_env = environment(Box::new(())); - - // another with 3/4 unfinalized chain voting rule restriction - let three_quarters_env = environment(Box::new( - voting_rule::ThreeQuartersOfTheUnfinalizedChain - )); - - // and another restricted with the default voting rules: i.e. 3/4 rule and - // always below best block - let default_env = environment(Box::new( - VotingRulesBuilder::default().build() - )); - - // the unrestricted environment should just return the best block - assert_eq!( - unrestricted_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, - 21, - ); - - // both the other environments should return block 16, which is 3/4 of the - // way in the unfinalized chain - assert_eq!( - three_quarters_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, - 16, - ); - - assert_eq!( - default_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, - 16, - ); - - // we finalize block 19 with block 21 being the best block - peer.client().finalize_block(BlockId::Number(19), None, false).unwrap(); - - // the 3/4 environment should propose block 21 for voting - assert_eq!( - three_quarters_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, - 21, - ); - - // while the default environment will always still make sure we don't vote - // on the best block (2 behind) - assert_eq!( - default_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, - 19, - ); - - // we finalize block 21 with block 21 being the best block - peer.client().finalize_block(BlockId::Number(21), None, false).unwrap(); - - // even though the default environment will always try to not vote on the - // best block, there's a hard rule that we can't cast any votes lower than - // the given base (#21). - assert_eq!( - default_env.best_chain_containing( - peer.client().info().finalized_hash - ).unwrap().1, - 21, - ); + use finality_grandpa::Chain; + use sc_network_test::TestClient; + + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + + let mut net = GrandpaTestNet::new(TestApi::new(voters), 1); + let peer = net.peer(0); + let network_service = peer.network_service().clone(); + let link = peer.data.lock().take().unwrap(); + + // create a voter environment with a given voting rule + let environment = |voting_rule: Box>| { + let PersistentData { + ref authority_set, + ref consensus_changes, + ref set_state, + .. + } = link.persistent_data; + + let config = Config { + gossip_duration: TEST_GOSSIP_DURATION, + justification_period: 32, + keystore: None, + name: None, + is_authority: true, + observer_enabled: true, + }; + + let network = NetworkBridge::new( + network_service.clone(), + config.clone(), + set_state.clone(), + None, + ); + + Environment { + authority_set: authority_set.clone(), + config: config.clone(), + consensus_changes: consensus_changes.clone(), + client: link.client.clone(), + select_chain: link.select_chain.clone(), + set_id: authority_set.set_id(), + voter_set_state: set_state.clone(), + voters: Arc::new(authority_set.current_authorities()), + network, + voting_rule, + metrics: None, + _phantom: PhantomData, + } + }; + + // add 21 blocks + peer.push_blocks(21, false); + + // create an environment with no voting rule restrictions + let unrestricted_env = environment(Box::new(())); + + // another with 3/4 unfinalized chain voting rule restriction + let three_quarters_env = environment(Box::new(voting_rule::ThreeQuartersOfTheUnfinalizedChain)); + + // and another restricted with the default voting rules: i.e. 3/4 rule and + // always below best block + let default_env = environment(Box::new(VotingRulesBuilder::default().build())); + + // the unrestricted environment should just return the best block + assert_eq!( + unrestricted_env + .best_chain_containing(peer.client().info().finalized_hash) + .unwrap() + .1, + 21, + ); + + // both the other environments should return block 16, which is 3/4 of the + // way in the unfinalized chain + assert_eq!( + three_quarters_env + .best_chain_containing(peer.client().info().finalized_hash) + .unwrap() + .1, + 16, + ); + + assert_eq!( + default_env + .best_chain_containing(peer.client().info().finalized_hash) + .unwrap() + .1, + 16, + ); + + // we finalize block 19 with block 21 being the best block + peer.client() + .finalize_block(BlockId::Number(19), None, false) + .unwrap(); + + // the 3/4 environment should propose block 21 for voting + assert_eq!( + three_quarters_env + .best_chain_containing(peer.client().info().finalized_hash) + .unwrap() + .1, + 21, + ); + + // while the default environment will always still make sure we don't vote + // on the best block (2 behind) + assert_eq!( + default_env + .best_chain_containing(peer.client().info().finalized_hash) + .unwrap() + .1, + 19, + ); + + // we finalize block 21 with block 21 being the best block + peer.client() + .finalize_block(BlockId::Number(21), None, false) + .unwrap(); + + // even though the default environment will always try to not vote on the + // best block, there's a hard rule that we can't cast any votes lower than + // the given base (#21). + assert_eq!( + default_env + .best_chain_containing(peer.client().info().finalized_hash) + .unwrap() + .1, + 21, + ); } #[test] fn imports_justification_for_regular_blocks_on_import() { - // NOTE: this is a regression test since initially we would only import - // justifications for authority change blocks, and would discard any - // existing justification otherwise. - let peers = &[Ed25519Keyring::Alice]; - let voters = make_ids(peers); - let api = TestApi::new(voters); - let mut net = GrandpaTestNet::new(api.clone(), 1); - - let client = net.peer(0).client().clone(); - let (mut block_import, ..) = net.make_block_import::< - TransactionFor - >(client.clone()); - - let full_client = client.as_full().expect("only full clients are used in test"); - let builder = full_client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - let block = builder.build().unwrap().block; - - let block_hash = block.hash(); - - // create a valid justification, with one precommit targeting the block - let justification = { - let round = 1; - let set_id = 0; - - let precommit = finality_grandpa::Precommit { - target_hash: block_hash, - target_number: *block.header.number(), - }; - - let msg = finality_grandpa::Message::Precommit(precommit.clone()); - let encoded = communication::localized_payload(round, set_id, &msg); - let signature = peers[0].sign(&encoded[..]).into(); - - let precommit = finality_grandpa::SignedPrecommit { - precommit, - signature, - id: peers[0].public().into(), - }; - - let commit = finality_grandpa::Commit { - target_hash: block_hash, - target_number: *block.header.number(), - precommits: vec![precommit], - }; - - GrandpaJustification::from_commit( - &full_client, - round, - commit, - ).unwrap() - }; - - // we import the block with justification attached - let mut import = BlockImportParams::new(BlockOrigin::File, block.header); - import.justification = Some(justification.encode()); - import.body = Some(block.extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - assert_eq!( - block_import.import_block(import, HashMap::new()).unwrap(), - ImportResult::Imported(ImportedAux { - needs_justification: false, - clear_justification_requests: false, - bad_justification: false, - is_new_best: true, - ..Default::default() - }), - ); - - // the justification should be imported and available from the client - assert!( - client.justification(&BlockId::Hash(block_hash)).unwrap().is_some(), - ); + // NOTE: this is a regression test since initially we would only import + // justifications for authority change blocks, and would discard any + // existing justification otherwise. + let peers = &[Ed25519Keyring::Alice]; + let voters = make_ids(peers); + let api = TestApi::new(voters); + let mut net = GrandpaTestNet::new(api.clone(), 1); + + let client = net.peer(0).client().clone(); + let (mut block_import, ..) = net + .make_block_import::>( + client.clone(), + ); + + let full_client = client + .as_full() + .expect("only full clients are used in test"); + let builder = full_client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + let block = builder.build().unwrap().block; + + let block_hash = block.hash(); + + // create a valid justification, with one precommit targeting the block + let justification = { + let round = 1; + let set_id = 0; + + let precommit = finality_grandpa::Precommit { + target_hash: block_hash, + target_number: *block.header.number(), + }; + + let msg = finality_grandpa::Message::Precommit(precommit.clone()); + let encoded = communication::localized_payload(round, set_id, &msg); + let signature = peers[0].sign(&encoded[..]).into(); + + let precommit = finality_grandpa::SignedPrecommit { + precommit, + signature, + id: peers[0].public().into(), + }; + + let commit = finality_grandpa::Commit { + target_hash: block_hash, + target_number: *block.header.number(), + precommits: vec![precommit], + }; + + GrandpaJustification::from_commit(&full_client, round, commit).unwrap() + }; + + // we import the block with justification attached + let mut import = BlockImportParams::new(BlockOrigin::File, block.header); + import.justification = Some(justification.encode()); + import.body = Some(block.extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + assert_eq!( + block_import.import_block(import, HashMap::new()).unwrap(), + ImportResult::Imported(ImportedAux { + needs_justification: false, + clear_justification_requests: false, + bad_justification: false, + is_new_best: true, + ..Default::default() + }), + ); + + // the justification should be imported and available from the client + assert!(client + .justification(&BlockId::Hash(block_hash)) + .unwrap() + .is_some(),); } diff --git a/client/finality-grandpa/src/until_imported.rs b/client/finality-grandpa/src/until_imported.rs index 40da7707b6..34d611a692 100644 --- a/client/finality-grandpa/src/until_imported.rs +++ b/client/finality-grandpa/src/until_imported.rs @@ -21,26 +21,21 @@ //! This is used for votes and commit messages currently. use super::{ - BlockStatus as BlockStatusT, - BlockSyncRequester as BlockSyncRequesterT, - CommunicationIn, - Error, - SignedMessage, + BlockStatus as BlockStatusT, BlockSyncRequester as BlockSyncRequesterT, CommunicationIn, Error, + SignedMessage, }; -use log::{debug, warn}; -use sp_utils::mpsc::TracingUnboundedReceiver; +use finality_grandpa::voter; use futures::prelude::*; use futures::stream::Fuse; use futures_timer::Delay; -use finality_grandpa::voter; +use log::{debug, warn}; use parking_lot::Mutex; -use prometheus_endpoint::{ - Gauge, U64, PrometheusError, register, Registry, -}; +use prometheus_endpoint::{register, Gauge, PrometheusError, Registry, U64}; use sc_client_api::{BlockImportNotification, ImportNotifications}; use sp_finality_grandpa::AuthorityId; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_utils::mpsc::TracingUnboundedReceiver; use std::collections::{HashMap, VecDeque}; use std::pin::Pin; @@ -55,18 +50,18 @@ const LOG_PENDING_INTERVAL: Duration = Duration::from_secs(15); /// For example a GRANDPA commit message which is not of any use without the corresponding block /// that it commits on. pub(crate) trait BlockUntilImported: Sized { - /// The type that is blocked on. - type Blocked; - - /// Check if a new incoming item needs awaiting until a block(s) is imported. - fn needs_waiting>( - input: Self::Blocked, - status_check: &S, - ) -> Result, Error>; - - /// called when the wait has completed. The canonical number is passed through - /// for further checks. - fn wait_completed(self, canon_number: NumberFor) -> Option; + /// The type that is blocked on. + type Blocked; + + /// Check if a new incoming item needs awaiting until a block(s) is imported. + fn needs_waiting>( + input: Self::Blocked, + status_check: &S, + ) -> Result, Error>; + + /// called when the wait has completed. The canonical number is passed through + /// for further checks. + fn wait_completed(self, canon_number: NumberFor) -> Option; } /// Describes whether a given [`BlockUntilImported`] (a) should be discarded, (b) is waiting for @@ -75,9 +70,9 @@ pub(crate) trait BlockUntilImported: Sized { /// A reason for discarding a [`BlockUntilImported`] would be if a referenced block is perceived /// under a different number than specified in the message. pub(crate) enum DiscardWaitOrReady { - Discard, - Wait(Vec<(Block::Hash, NumberFor, W)>), - Ready(R), + Discard, + Wait(Vec<(Block::Hash, NumberFor, W)>), + Ready(R), } /// Prometheus metrics for the `UntilImported` queue. @@ -91,290 +86,307 @@ pub(crate) enum DiscardWaitOrReady { // by subtracting the local_waiting_messages (the amount of messages left in the queue about to // be dropped) from the global_waiting_messages gauge. pub(crate) struct Metrics { - global_waiting_messages: Gauge, - local_waiting_messages: u64, + global_waiting_messages: Gauge, + local_waiting_messages: u64, } impl Metrics { - pub(crate) fn register(registry: &Registry) -> Result { - Ok(Self { - global_waiting_messages: register(Gauge::new( - "finality_grandpa_until_imported_waiting_messages_number", - "Number of finality grandpa messages waiting within the until imported queue.", - )?, registry)?, - local_waiting_messages: 0, - }) - } - - fn waiting_messages_inc(&mut self) { - self.local_waiting_messages += 1; - self.global_waiting_messages.inc(); - } - - fn waiting_messages_dec(&mut self) { - self.local_waiting_messages -= 1; - self.global_waiting_messages.dec(); - } + pub(crate) fn register(registry: &Registry) -> Result { + Ok(Self { + global_waiting_messages: register( + Gauge::new( + "finality_grandpa_until_imported_waiting_messages_number", + "Number of finality grandpa messages waiting within the until imported queue.", + )?, + registry, + )?, + local_waiting_messages: 0, + }) + } + + fn waiting_messages_inc(&mut self) { + self.local_waiting_messages += 1; + self.global_waiting_messages.inc(); + } + + fn waiting_messages_dec(&mut self) { + self.local_waiting_messages -= 1; + self.global_waiting_messages.dec(); + } } - impl Clone for Metrics { - fn clone(&self) -> Self { - Metrics { - global_waiting_messages: self.global_waiting_messages.clone(), - // When cloned, reset local_waiting_messages, so the global counter is not reduced a - // second time for the same messages on `drop` of the clone. - local_waiting_messages: 0, - } - } + fn clone(&self) -> Self { + Metrics { + global_waiting_messages: self.global_waiting_messages.clone(), + // When cloned, reset local_waiting_messages, so the global counter is not reduced a + // second time for the same messages on `drop` of the clone. + local_waiting_messages: 0, + } + } } impl Drop for Metrics { - fn drop(&mut self) { - // Reduce the global counter by the amount of messages that were still left in the dropped - // queue. - self.global_waiting_messages.sub(self.local_waiting_messages) - } + fn drop(&mut self) { + // Reduce the global counter by the amount of messages that were still left in the dropped + // queue. + self.global_waiting_messages + .sub(self.local_waiting_messages) + } } /// Buffering imported messages until blocks with given hashes are imported. #[pin_project::pin_project] -pub(crate) struct UntilImported> { - import_notifications: Fuse>>, - block_sync_requester: BlockSyncRequester, - status_check: BlockStatus, - #[pin] - inner: Fuse, - ready: VecDeque, - /// Interval at which to check status of each awaited block. - check_pending: Pin> + Send>>, - /// Mapping block hashes to their block number, the point in time it was - /// first encountered (Instant) and a list of GRANDPA messages referencing - /// the block hash. - pending: HashMap, Instant, Vec)>, - - /// Queue identifier for differentiation in logs. - identifier: &'static str, - /// Prometheus metrics. - metrics: Option, +pub(crate) struct UntilImported< + Block: BlockT, + BlockStatus, + BlockSyncRequester, + I, + M: BlockUntilImported, +> { + import_notifications: Fuse>>, + block_sync_requester: BlockSyncRequester, + status_check: BlockStatus, + #[pin] + inner: Fuse, + ready: VecDeque, + /// Interval at which to check status of each awaited block. + check_pending: Pin> + Send>>, + /// Mapping block hashes to their block number, the point in time it was + /// first encountered (Instant) and a list of GRANDPA messages referencing + /// the block hash. + pending: HashMap, Instant, Vec)>, + + /// Queue identifier for differentiation in logs. + identifier: &'static str, + /// Prometheus metrics. + metrics: Option, } -impl UntilImported where - Block: BlockT, - BlockStatus: BlockStatusT, - BlockSyncRequester: BlockSyncRequesterT, - I: Stream, - M: BlockUntilImported, +impl + UntilImported +where + Block: BlockT, + BlockStatus: BlockStatusT, + BlockSyncRequester: BlockSyncRequesterT, + I: Stream, + M: BlockUntilImported, { - /// Create a new `UntilImported` wrapper. - pub(crate) fn new( - import_notifications: ImportNotifications, - block_sync_requester: BlockSyncRequester, - status_check: BlockStatus, - stream: I, - identifier: &'static str, - metrics: Option, - ) -> Self { - // how often to check if pending messages that are waiting for blocks to be - // imported can be checked. - // - // the import notifications interval takes care of most of this; this is - // used in the event of missed import notifications - const CHECK_PENDING_INTERVAL: Duration = Duration::from_secs(5); - - let check_pending = futures::stream::unfold(Delay::new(CHECK_PENDING_INTERVAL), |delay| - Box::pin(async move { - delay.await; - Some((Ok(()), Delay::new(CHECK_PENDING_INTERVAL))) - })); - - UntilImported { - import_notifications: import_notifications.fuse(), - block_sync_requester, - status_check, - inner: stream.fuse(), - ready: VecDeque::new(), - check_pending: Box::pin(check_pending), - pending: HashMap::new(), - identifier, - metrics, - } - } + /// Create a new `UntilImported` wrapper. + pub(crate) fn new( + import_notifications: ImportNotifications, + block_sync_requester: BlockSyncRequester, + status_check: BlockStatus, + stream: I, + identifier: &'static str, + metrics: Option, + ) -> Self { + // how often to check if pending messages that are waiting for blocks to be + // imported can be checked. + // + // the import notifications interval takes care of most of this; this is + // used in the event of missed import notifications + const CHECK_PENDING_INTERVAL: Duration = Duration::from_secs(5); + + let check_pending = futures::stream::unfold(Delay::new(CHECK_PENDING_INTERVAL), |delay| { + Box::pin(async move { + delay.await; + Some((Ok(()), Delay::new(CHECK_PENDING_INTERVAL))) + }) + }); + + UntilImported { + import_notifications: import_notifications.fuse(), + block_sync_requester, + status_check, + inner: stream.fuse(), + ready: VecDeque::new(), + check_pending: Box::pin(check_pending), + pending: HashMap::new(), + identifier, + metrics, + } + } } -impl Stream for UntilImported where - Block: BlockT, - BStatus: BlockStatusT, - BSyncRequester: BlockSyncRequesterT, - I: Stream, - M: BlockUntilImported, +impl Stream + for UntilImported +where + Block: BlockT, + BStatus: BlockStatusT, + BSyncRequester: BlockSyncRequesterT, + I: Stream, + M: BlockUntilImported, { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - // We are using a `this` variable in order to allow multiple simultaneous mutable borrow - // to `self`. - let mut this = self.project(); - - loop { - match Stream::poll_next(Pin::new(&mut this.inner), cx) { - Poll::Ready(None) => return Poll::Ready(None), - Poll::Ready(Some(input)) => { - // new input: schedule wait of any parts which require - // blocks to be known. - match M::needs_waiting(input, this.status_check)? { - DiscardWaitOrReady::Discard => {}, - DiscardWaitOrReady::Wait(items) => { - for (target_hash, target_number, wait) in items { - this.pending - .entry(target_hash) - .or_insert_with(|| (target_number, Instant::now(), Vec::new())) - .2 - .push(wait) - } - }, - DiscardWaitOrReady::Ready(item) => this.ready.push_back(item), - } - - if let Some(metrics) = &mut this.metrics { - metrics.waiting_messages_inc(); - } - } - Poll::Pending => break, - } - } - - loop { - match Stream::poll_next(Pin::new(&mut this.import_notifications), cx) { - Poll::Ready(None) => return Poll::Ready(None), - Poll::Ready(Some(notification)) => { - // new block imported. queue up all messages tied to that hash. - if let Some((_, _, messages)) = this.pending.remove(¬ification.hash) { - let canon_number = notification.header.number().clone(); - let ready_messages = messages.into_iter() - .filter_map(|m| m.wait_completed(canon_number)); - - this.ready.extend(ready_messages); - } - } - Poll::Pending => break, - } - } - - let mut update_interval = false; - while let Poll::Ready(Some(Ok(()))) = this.check_pending.poll_next_unpin(cx) { - update_interval = true; - } - - if update_interval { - let mut known_keys = Vec::new(); - for (&block_hash, &mut (block_number, ref mut last_log, ref v)) in this.pending.iter_mut() { - if let Some(number) = this.status_check.block_number(block_hash)? { - known_keys.push((block_hash, number)); - } else { - let next_log = *last_log + LOG_PENDING_INTERVAL; - if Instant::now() >= next_log { - debug!( - target: "afg", - "Waiting to import block {} before {} {} messages can be imported. \ - Requesting network sync service to retrieve block from. \ - Possible fork?", - block_hash, - v.len(), - this.identifier, - ); - - // NOTE: when sending an empty vec of peers the - // underlying should make a best effort to sync the - // block from any peers it knows about. - this.block_sync_requester.set_sync_fork_request( - vec![], - block_hash, - block_number, - ); - - *last_log = next_log; - } - } - } - - for (known_hash, canon_number) in known_keys { - if let Some((_, _, pending_messages)) = this.pending.remove(&known_hash) { - let ready_messages = pending_messages.into_iter() - .filter_map(|m| m.wait_completed(canon_number)); - - this.ready.extend(ready_messages); - } - } - } - - if let Some(ready) = this.ready.pop_front() { - if let Some(metrics) = &mut this.metrics { - metrics.waiting_messages_dec(); - } - return Poll::Ready(Some(Ok(ready))) - } - - if this.import_notifications.is_done() && this.inner.is_done() { - Poll::Ready(None) - } else { - Poll::Pending - } - } + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // We are using a `this` variable in order to allow multiple simultaneous mutable borrow + // to `self`. + let mut this = self.project(); + + loop { + match Stream::poll_next(Pin::new(&mut this.inner), cx) { + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some(input)) => { + // new input: schedule wait of any parts which require + // blocks to be known. + match M::needs_waiting(input, this.status_check)? { + DiscardWaitOrReady::Discard => {} + DiscardWaitOrReady::Wait(items) => { + for (target_hash, target_number, wait) in items { + this.pending + .entry(target_hash) + .or_insert_with(|| (target_number, Instant::now(), Vec::new())) + .2 + .push(wait) + } + } + DiscardWaitOrReady::Ready(item) => this.ready.push_back(item), + } + + if let Some(metrics) = &mut this.metrics { + metrics.waiting_messages_inc(); + } + } + Poll::Pending => break, + } + } + + loop { + match Stream::poll_next(Pin::new(&mut this.import_notifications), cx) { + Poll::Ready(None) => return Poll::Ready(None), + Poll::Ready(Some(notification)) => { + // new block imported. queue up all messages tied to that hash. + if let Some((_, _, messages)) = this.pending.remove(¬ification.hash) { + let canon_number = notification.header.number().clone(); + let ready_messages = messages + .into_iter() + .filter_map(|m| m.wait_completed(canon_number)); + + this.ready.extend(ready_messages); + } + } + Poll::Pending => break, + } + } + + let mut update_interval = false; + while let Poll::Ready(Some(Ok(()))) = this.check_pending.poll_next_unpin(cx) { + update_interval = true; + } + + if update_interval { + let mut known_keys = Vec::new(); + for (&block_hash, &mut (block_number, ref mut last_log, ref v)) in + this.pending.iter_mut() + { + if let Some(number) = this.status_check.block_number(block_hash)? { + known_keys.push((block_hash, number)); + } else { + let next_log = *last_log + LOG_PENDING_INTERVAL; + if Instant::now() >= next_log { + debug!( + target: "afg", + "Waiting to import block {} before {} {} messages can be imported. \ + Requesting network sync service to retrieve block from. \ + Possible fork?", + block_hash, + v.len(), + this.identifier, + ); + + // NOTE: when sending an empty vec of peers the + // underlying should make a best effort to sync the + // block from any peers it knows about. + this.block_sync_requester.set_sync_fork_request( + vec![], + block_hash, + block_number, + ); + + *last_log = next_log; + } + } + } + + for (known_hash, canon_number) in known_keys { + if let Some((_, _, pending_messages)) = this.pending.remove(&known_hash) { + let ready_messages = pending_messages + .into_iter() + .filter_map(|m| m.wait_completed(canon_number)); + + this.ready.extend(ready_messages); + } + } + } + + if let Some(ready) = this.ready.pop_front() { + if let Some(metrics) = &mut this.metrics { + metrics.waiting_messages_dec(); + } + return Poll::Ready(Some(Ok(ready))); + } + + if this.import_notifications.is_done() && this.inner.is_done() { + Poll::Ready(None) + } else { + Poll::Pending + } + } } fn warn_authority_wrong_target(hash: H, id: AuthorityId) { - warn!( - target: "afg", - "Authority {:?} signed GRANDPA message with \ - wrong block number for hash {}", - id, - hash, - ); + warn!( + target: "afg", + "Authority {:?} signed GRANDPA message with \ + wrong block number for hash {}", + id, + hash, + ); } impl BlockUntilImported for SignedMessage { - type Blocked = Self; - - fn needs_waiting>( - msg: Self::Blocked, - status_check: &BlockStatus, - ) -> Result, Error> { - let (&target_hash, target_number) = msg.target(); - - if let Some(number) = status_check.block_number(target_hash)? { - if number != target_number { - warn_authority_wrong_target(target_hash, msg.id); - return Ok(DiscardWaitOrReady::Discard); - } else { - return Ok(DiscardWaitOrReady::Ready(msg)); - } - } - - return Ok(DiscardWaitOrReady::Wait(vec![(target_hash, target_number, msg)])) - } - - fn wait_completed(self, canon_number: NumberFor) -> Option { - let (&target_hash, target_number) = self.target(); - if canon_number != target_number { - warn_authority_wrong_target(target_hash, self.id); - - None - } else { - Some(self) - } - } + type Blocked = Self; + + fn needs_waiting>( + msg: Self::Blocked, + status_check: &BlockStatus, + ) -> Result, Error> { + let (&target_hash, target_number) = msg.target(); + + if let Some(number) = status_check.block_number(target_hash)? { + if number != target_number { + warn_authority_wrong_target(target_hash, msg.id); + return Ok(DiscardWaitOrReady::Discard); + } else { + return Ok(DiscardWaitOrReady::Ready(msg)); + } + } + + return Ok(DiscardWaitOrReady::Wait(vec![( + target_hash, + target_number, + msg, + )])); + } + + fn wait_completed(self, canon_number: NumberFor) -> Option { + let (&target_hash, target_number) = self.target(); + if canon_number != target_number { + warn_authority_wrong_target(target_hash, self.id); + + None + } else { + Some(self) + } + } } /// Helper type definition for the stream which waits until vote targets for /// signed messages are imported. -pub(crate) type UntilVoteTargetImported = UntilImported< - Block, - BlockStatus, - BlockSyncRequester, - I, - SignedMessage, ->; +pub(crate) type UntilVoteTargetImported = + UntilImported>; /// This blocks a global message import, i.e. a commit or catch up messages, /// until all blocks referenced in its votes are known. @@ -386,702 +398,669 @@ pub(crate) type UntilVoteTargetImported { - inner: Arc>>>, - target_number: NumberFor, + inner: Arc>>>, + target_number: NumberFor, } impl Unpin for BlockGlobalMessage {} impl BlockUntilImported for BlockGlobalMessage { - type Blocked = CommunicationIn; - - fn needs_waiting>( - input: Self::Blocked, - status_check: &BlockStatus, - ) -> Result, Error> { - use std::collections::hash_map::Entry; - - enum KnownOrUnknown { - Known(N), - Unknown(N), - } - - impl KnownOrUnknown { - fn number(&self) -> &N { - match *self { - KnownOrUnknown::Known(ref n) => n, - KnownOrUnknown::Unknown(ref n) => n, - } - } - } - - let mut checked_hashes: HashMap<_, KnownOrUnknown>> = HashMap::new(); - - { - // returns false when should early exit. - let mut query_known = |target_hash, perceived_number| -> Result { - // check integrity: all votes for same hash have same number. - let canon_number = match checked_hashes.entry(target_hash) { - Entry::Occupied(entry) => entry.get().number().clone(), - Entry::Vacant(entry) => { - if let Some(number) = status_check.block_number(target_hash)? { - entry.insert(KnownOrUnknown::Known(number)); - number - - } else { - entry.insert(KnownOrUnknown::Unknown(perceived_number)); - perceived_number - } - } - }; - - if canon_number != perceived_number { - // invalid global message: messages targeting wrong number - // or at least different from other vote in same global - // message. - return Ok(false); - } - - Ok(true) - }; - - match input { - voter::CommunicationIn::Commit(_, ref commit, ..) => { - // add known hashes from all precommits. - let precommit_targets = commit.precommits - .iter() - .map(|c| (c.target_number, c.target_hash)); - - for (target_number, target_hash) in precommit_targets { - if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard); - } - } - }, - voter::CommunicationIn::CatchUp(ref catch_up, ..) => { - // add known hashes from all prevotes and precommits. - let prevote_targets = catch_up.prevotes - .iter() - .map(|s| (s.prevote.target_number, s.prevote.target_hash)); - - let precommit_targets = catch_up.precommits - .iter() - .map(|s| (s.precommit.target_number, s.precommit.target_hash)); - - let targets = prevote_targets.chain(precommit_targets); - - for (target_number, target_hash) in targets { - if !query_known(target_hash, target_number)? { - return Ok(DiscardWaitOrReady::Discard); - } - } - }, - }; - } - - let unknown_hashes = checked_hashes.into_iter().filter_map(|(hash, num)| match num { - KnownOrUnknown::Unknown(number) => Some((hash, number)), - KnownOrUnknown::Known(_) => None, - }).collect::>(); - - if unknown_hashes.is_empty() { - // none of the hashes in the global message were unknown. - // we can just return the message directly. - return Ok(DiscardWaitOrReady::Ready(input)); - } - - let locked_global = Arc::new(Mutex::new(Some(input))); - - let items_to_await = unknown_hashes.into_iter().map(|(hash, target_number)| { - (hash, target_number, BlockGlobalMessage { inner: locked_global.clone(), target_number }) - }).collect(); - - // schedule waits for all unknown messages. - // when the last one of these has `wait_completed` called on it, - // the global message will be returned. - Ok(DiscardWaitOrReady::Wait(items_to_await)) - } - - fn wait_completed(self, canon_number: NumberFor) -> Option { - if self.target_number != canon_number { - // Delete the inner message so it won't ever be forwarded. Future calls to - // `wait_completed` on the same `inner` will ignore it. - *self.inner.lock() = None; - return None; - } - - match Arc::try_unwrap(self.inner) { - // This is the last reference and thus the last outstanding block to be awaited. `inner` - // is either `Some(_)` or `None`. The latter implies that a previous `wait_completed` - // call witnessed a block number mismatch (see above). - Ok(inner) => Mutex::into_inner(inner), - // There are still other strong references to this `Arc`, thus the message is blocked on - // other blocks to be imported. - Err(_) => None, - } - } + type Blocked = CommunicationIn; + + fn needs_waiting>( + input: Self::Blocked, + status_check: &BlockStatus, + ) -> Result, Error> { + use std::collections::hash_map::Entry; + + enum KnownOrUnknown { + Known(N), + Unknown(N), + } + + impl KnownOrUnknown { + fn number(&self) -> &N { + match *self { + KnownOrUnknown::Known(ref n) => n, + KnownOrUnknown::Unknown(ref n) => n, + } + } + } + + let mut checked_hashes: HashMap<_, KnownOrUnknown>> = HashMap::new(); + + { + // returns false when should early exit. + let mut query_known = |target_hash, perceived_number| -> Result { + // check integrity: all votes for same hash have same number. + let canon_number = match checked_hashes.entry(target_hash) { + Entry::Occupied(entry) => entry.get().number().clone(), + Entry::Vacant(entry) => { + if let Some(number) = status_check.block_number(target_hash)? { + entry.insert(KnownOrUnknown::Known(number)); + number + } else { + entry.insert(KnownOrUnknown::Unknown(perceived_number)); + perceived_number + } + } + }; + + if canon_number != perceived_number { + // invalid global message: messages targeting wrong number + // or at least different from other vote in same global + // message. + return Ok(false); + } + + Ok(true) + }; + + match input { + voter::CommunicationIn::Commit(_, ref commit, ..) => { + // add known hashes from all precommits. + let precommit_targets = commit + .precommits + .iter() + .map(|c| (c.target_number, c.target_hash)); + + for (target_number, target_hash) in precommit_targets { + if !query_known(target_hash, target_number)? { + return Ok(DiscardWaitOrReady::Discard); + } + } + } + voter::CommunicationIn::CatchUp(ref catch_up, ..) => { + // add known hashes from all prevotes and precommits. + let prevote_targets = catch_up + .prevotes + .iter() + .map(|s| (s.prevote.target_number, s.prevote.target_hash)); + + let precommit_targets = catch_up + .precommits + .iter() + .map(|s| (s.precommit.target_number, s.precommit.target_hash)); + + let targets = prevote_targets.chain(precommit_targets); + + for (target_number, target_hash) in targets { + if !query_known(target_hash, target_number)? { + return Ok(DiscardWaitOrReady::Discard); + } + } + } + }; + } + + let unknown_hashes = checked_hashes + .into_iter() + .filter_map(|(hash, num)| match num { + KnownOrUnknown::Unknown(number) => Some((hash, number)), + KnownOrUnknown::Known(_) => None, + }) + .collect::>(); + + if unknown_hashes.is_empty() { + // none of the hashes in the global message were unknown. + // we can just return the message directly. + return Ok(DiscardWaitOrReady::Ready(input)); + } + + let locked_global = Arc::new(Mutex::new(Some(input))); + + let items_to_await = unknown_hashes + .into_iter() + .map(|(hash, target_number)| { + ( + hash, + target_number, + BlockGlobalMessage { + inner: locked_global.clone(), + target_number, + }, + ) + }) + .collect(); + + // schedule waits for all unknown messages. + // when the last one of these has `wait_completed` called on it, + // the global message will be returned. + Ok(DiscardWaitOrReady::Wait(items_to_await)) + } + + fn wait_completed(self, canon_number: NumberFor) -> Option { + if self.target_number != canon_number { + // Delete the inner message so it won't ever be forwarded. Future calls to + // `wait_completed` on the same `inner` will ignore it. + *self.inner.lock() = None; + return None; + } + + match Arc::try_unwrap(self.inner) { + // This is the last reference and thus the last outstanding block to be awaited. `inner` + // is either `Some(_)` or `None`. The latter implies that a previous `wait_completed` + // call witnessed a block number mismatch (see above). + Ok(inner) => Mutex::into_inner(inner), + // There are still other strong references to this `Arc`, thus the message is blocked on + // other blocks to be imported. + Err(_) => None, + } + } } /// A stream which gates off incoming global messages, i.e. commit and catch up /// messages, until all referenced block hashes have been imported. -pub(crate) type UntilGlobalMessageBlocksImported = UntilImported< - Block, - BlockStatus, - BlockSyncRequester, - I, - BlockGlobalMessage, ->; +pub(crate) type UntilGlobalMessageBlocksImported = + UntilImported>; #[cfg(test)] mod tests { - use super::*; - use crate::{CatchUp, CompactCommit}; - use substrate_test_runtime_client::runtime::{Block, Hash, Header}; - use sp_consensus::BlockOrigin; - use sc_client_api::BlockImportNotification; - use futures::future::Either; - use futures_timer::Delay; - use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; - use finality_grandpa::Precommit; - - #[derive(Clone)] - struct TestChainState { - sender: TracingUnboundedSender>, - known_blocks: Arc>>, - } - - impl TestChainState { - fn new() -> (Self, ImportNotifications) { - let (tx, rx) = tracing_unbounded("test"); - let state = TestChainState { - sender: tx, - known_blocks: Arc::new(Mutex::new(HashMap::new())), - }; - - (state, rx) - } - - fn block_status(&self) -> TestBlockStatus { - TestBlockStatus { inner: self.known_blocks.clone() } - } - - fn import_header(&self, header: Header) { - let hash = header.hash(); - let number = header.number().clone(); - - self.known_blocks.lock().insert(hash, number); - self.sender.unbounded_send(BlockImportNotification { - hash, - origin: BlockOrigin::File, - header, - is_new_best: false, - retracted: vec![], - }).unwrap(); - } - } - - struct TestBlockStatus { - inner: Arc>>, - } - - impl BlockStatusT for TestBlockStatus { - fn block_number(&self, hash: Hash) -> Result, Error> { - Ok(self.inner.lock().get(&hash).map(|x| x.clone())) - } - } - - #[derive(Clone)] - struct TestBlockSyncRequester { - requests: Arc)>>>, - } - - impl Default for TestBlockSyncRequester { - fn default() -> Self { - TestBlockSyncRequester { - requests: Arc::new(Mutex::new(Vec::new())), - } - } - } - - impl BlockSyncRequesterT for TestBlockSyncRequester { - fn set_sync_fork_request(&self, _peers: Vec, hash: Hash, number: NumberFor) { - self.requests.lock().push((hash, number)); - } - } - - fn make_header(number: u64) -> Header { - Header::new( - number, - Default::default(), - Default::default(), - Default::default(), - Default::default(), - ) - } - - // unwrap the commit from `CommunicationIn` returning its fields in a tuple, - // panics if the given message isn't a commit - fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit::) { - match msg { - voter::CommunicationIn::Commit(round, commit, ..) => (round, commit), - _ => panic!("expected commit"), - } - } - - // unwrap the catch up from `CommunicationIn` returning its inner representation, - // panics if the given message isn't a catch up - fn unapply_catch_up(msg: CommunicationIn) -> CatchUp { - match msg { - voter::CommunicationIn::CatchUp(catch_up, ..) => catch_up, - _ => panic!("expected catch up"), - } - } - - fn message_all_dependencies_satisfied( - msg: CommunicationIn, - enact_dependencies: F, - ) -> CommunicationIn where - F: FnOnce(&TestChainState), - { - let (chain_state, import_notifications) = TestChainState::new(); - let block_status = chain_state.block_status(); - - // enact all dependencies before importing the message - enact_dependencies(&chain_state); - - let (global_tx, global_rx) = tracing_unbounded("test"); - - let until_imported = UntilGlobalMessageBlocksImported::new( - import_notifications, - TestBlockSyncRequester::default(), - block_status, - global_rx, - "global", - None, - ); - - global_tx.unbounded_send(msg).unwrap(); - - let work = until_imported.into_future(); - - futures::executor::block_on(work).0.unwrap().unwrap() - } - - fn blocking_message_on_dependencies( - msg: CommunicationIn, - enact_dependencies: F, - ) -> CommunicationIn where - F: FnOnce(&TestChainState), - { - let (chain_state, import_notifications) = TestChainState::new(); - let block_status = chain_state.block_status(); - - let (global_tx, global_rx) = tracing_unbounded("test"); - - let until_imported = UntilGlobalMessageBlocksImported::new( - import_notifications, - TestBlockSyncRequester::default(), - block_status, - global_rx, - "global", - None, - ); - - global_tx.unbounded_send(msg).unwrap(); - - // NOTE: needs to be cloned otherwise it is moved to the stream and - // dropped too early. - let inner_chain_state = chain_state.clone(); - let work = future::select(until_imported.into_future(), Delay::new(Duration::from_millis(100))) - .then(move |res| match res { - Either::Left(_) => panic!("timeout should have fired first"), - Either::Right((_, until_imported)) => { - // timeout fired. push in the headers. - enact_dependencies(&inner_chain_state); - - until_imported - } - }); - - futures::executor::block_on(work).0.unwrap().unwrap() - } - - #[test] - fn blocking_commit_message() { - let h1 = make_header(5); - let h2 = make_header(6); - let h3 = make_header(7); - - let unknown_commit = CompactCommit:: { - target_hash: h1.hash(), - target_number: 5, - precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, - ], - auth_data: Vec::new(), // not used - }; - - let unknown_commit = || voter::CommunicationIn::Commit( - 0, - unknown_commit.clone(), - voter::Callback::Blank, - ); - - let res = blocking_message_on_dependencies( - unknown_commit(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); - - assert_eq!( - unapply_commit(res), - unapply_commit(unknown_commit()), - ); - } - - #[test] - fn commit_message_all_known() { - let h1 = make_header(5); - let h2 = make_header(6); - let h3 = make_header(7); - - let known_commit = CompactCommit:: { - target_hash: h1.hash(), - target_number: 5, - precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, - ], - auth_data: Vec::new(), // not used - }; - - let known_commit = || voter::CommunicationIn::Commit( - 0, - known_commit.clone(), - voter::Callback::Blank, - ); - - let res = message_all_dependencies_satisfied( - known_commit(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); - - assert_eq!( - unapply_commit(res), - unapply_commit(known_commit()), - ); - } - - #[test] - fn blocking_catch_up_message() { - let h1 = make_header(5); - let h2 = make_header(6); - let h3 = make_header(7); - - let signed_prevote = |header: &Header| { - finality_grandpa::SignedPrevote { - id: Default::default(), - signature: Default::default(), - prevote: finality_grandpa::Prevote { - target_hash: header.hash(), - target_number: *header.number(), - }, - } - }; - - let signed_precommit = |header: &Header| { - finality_grandpa::SignedPrecommit { - id: Default::default(), - signature: Default::default(), - precommit: finality_grandpa::Precommit { - target_hash: header.hash(), - target_number: *header.number(), - }, - } - }; - - let prevotes = vec![ - signed_prevote(&h1), - signed_prevote(&h3), - ]; - - let precommits = vec![ - signed_precommit(&h1), - signed_precommit(&h2), - ]; - - let unknown_catch_up = finality_grandpa::CatchUp { - round_number: 1, - prevotes, - precommits, - base_hash: h1.hash(), - base_number: *h1.number(), - }; - - let unknown_catch_up = || voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); - - let res = blocking_message_on_dependencies( - unknown_catch_up(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); - - assert_eq!( - unapply_catch_up(res), - unapply_catch_up(unknown_catch_up()), - ); - } - - #[test] - fn catch_up_message_all_known() { - let h1 = make_header(5); - let h2 = make_header(6); - let h3 = make_header(7); - - let signed_prevote = |header: &Header| { - finality_grandpa::SignedPrevote { - id: Default::default(), - signature: Default::default(), - prevote: finality_grandpa::Prevote { - target_hash: header.hash(), - target_number: *header.number(), - }, - } - }; - - let signed_precommit = |header: &Header| { - finality_grandpa::SignedPrecommit { - id: Default::default(), - signature: Default::default(), - precommit: finality_grandpa::Precommit { - target_hash: header.hash(), - target_number: *header.number(), - }, - } - }; - - let prevotes = vec![ - signed_prevote(&h1), - signed_prevote(&h3), - ]; - - let precommits = vec![ - signed_precommit(&h1), - signed_precommit(&h2), - ]; - - let unknown_catch_up = finality_grandpa::CatchUp { - round_number: 1, - prevotes, - precommits, - base_hash: h1.hash(), - base_number: *h1.number(), - }; - - let unknown_catch_up = || voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); - - let res = message_all_dependencies_satisfied( - unknown_catch_up(), - |chain_state| { - chain_state.import_header(h1); - chain_state.import_header(h2); - chain_state.import_header(h3); - }, - ); - - assert_eq!( - unapply_catch_up(res), - unapply_catch_up(unknown_catch_up()), - ); - } - - #[test] - fn request_block_sync_for_needed_blocks() { - let (chain_state, import_notifications) = TestChainState::new(); - let block_status = chain_state.block_status(); - - let (global_tx, global_rx) = tracing_unbounded("test"); - - let block_sync_requester = TestBlockSyncRequester::default(); - - let until_imported = UntilGlobalMessageBlocksImported::new( - import_notifications, - block_sync_requester.clone(), - block_status, - global_rx, - "global", - None, - ); - - let h1 = make_header(5); - let h2 = make_header(6); - let h3 = make_header(7); - - // we create a commit message, with precommits for blocks 6 and 7 which - // we haven't imported. - let unknown_commit = CompactCommit:: { - target_hash: h1.hash(), - target_number: 5, - precommits: vec![ - Precommit { - target_hash: h2.hash(), - target_number: 6, - }, - Precommit { - target_hash: h3.hash(), - target_number: 7, - }, - ], - auth_data: Vec::new(), // not used - }; - - let unknown_commit = || voter::CommunicationIn::Commit( - 0, - unknown_commit.clone(), - voter::Callback::Blank, - ); - - // we send the commit message and spawn the until_imported stream - global_tx.unbounded_send(unknown_commit()).unwrap(); - - let threads_pool = futures::executor::ThreadPool::new().unwrap(); - threads_pool.spawn_ok(until_imported.into_future().map(|_| ())); - - // assert that we will make sync requests - let assert = futures::future::poll_fn(|_| { - let block_sync_requests = block_sync_requester.requests.lock(); - - // we request blocks targeted by the precommits that aren't imported - if block_sync_requests.contains(&(h2.hash(), *h2.number())) && - block_sync_requests.contains(&(h3.hash(), *h3.number())) - { - return Poll::Ready(()); - } - - Poll::Pending - }); - - // the `until_imported` stream doesn't request the blocks immediately, - // but it should request them after a small timeout - let timeout = Delay::new(Duration::from_secs(60)); - let test = future::select(assert, timeout).map(|res| match res { - Either::Left(_) => {}, - Either::Right(_) => panic!("timed out waiting for block sync request"), - }).map(drop); - - futures::executor::block_on(test); - } - - fn test_catch_up() -> Arc>>> { - let header = make_header(5); - - let unknown_catch_up = finality_grandpa::CatchUp { - round_number: 1, - precommits: vec![], - prevotes: vec![], - base_hash: header.hash(), - base_number: *header.number(), - }; - - let catch_up = voter::CommunicationIn::CatchUp( - unknown_catch_up.clone(), - voter::Callback::Blank, - ); - - Arc::new(Mutex::new(Some(catch_up))) - } - - #[test] - fn block_global_message_wait_completed_return_when_all_awaited() { - let msg_inner = test_catch_up(); - - let waiting_block_1 = BlockGlobalMessage:: { - inner: msg_inner.clone(), - target_number: 1, - }; - - let waiting_block_2 = BlockGlobalMessage:: { - inner: msg_inner, - target_number: 2, - }; - - // waiting_block_2 is still waiting for block 2, thus this should return `None`. - assert!(waiting_block_1.wait_completed(1).is_none()); - - // Message only depended on block 1 and 2. Both have been imported, thus this should yield - // the message. - assert!(waiting_block_2.wait_completed(2).is_some()); - } - - #[test] - fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { - let msg_inner = test_catch_up(); - - let waiting_block_1 = BlockGlobalMessage:: { - inner: msg_inner.clone(), - target_number: 1, - }; - - let waiting_block_2 = BlockGlobalMessage:: { - inner: msg_inner, - target_number: 2, - }; - - // Calling wait_completed with wrong block number should yield None. - assert!(waiting_block_1.wait_completed(1234).is_none()); - - // All blocks, that the message depended on, have been imported. Still, given the above - // block number mismatch this should return None. - assert!(waiting_block_2.wait_completed(2).is_none()); - } - - #[test] - fn metrics_cleans_up_after_itself() { - let r = Registry::new(); - - let mut m1 = Metrics::register(&r).unwrap(); - let m2 = m1.clone(); - - // Add a new message to the 'queue' of m1. - m1.waiting_messages_inc(); - - // m1 and m2 are synced through the shared atomic. - assert_eq!(1, m2.global_waiting_messages.get()); - - // Drop 'queue' m1. - drop(m1); - - // Make sure m1 cleaned up after itself, removing all messages that were left in its queue - // when dropped from the global metric. - assert_eq!(0, m2.global_waiting_messages.get()); - } + use super::*; + use crate::{CatchUp, CompactCommit}; + use finality_grandpa::Precommit; + use futures::future::Either; + use futures_timer::Delay; + use sc_client_api::BlockImportNotification; + use sp_consensus::BlockOrigin; + use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; + use substrate_test_runtime_client::runtime::{Block, Hash, Header}; + + #[derive(Clone)] + struct TestChainState { + sender: TracingUnboundedSender>, + known_blocks: Arc>>, + } + + impl TestChainState { + fn new() -> (Self, ImportNotifications) { + let (tx, rx) = tracing_unbounded("test"); + let state = TestChainState { + sender: tx, + known_blocks: Arc::new(Mutex::new(HashMap::new())), + }; + + (state, rx) + } + + fn block_status(&self) -> TestBlockStatus { + TestBlockStatus { + inner: self.known_blocks.clone(), + } + } + + fn import_header(&self, header: Header) { + let hash = header.hash(); + let number = header.number().clone(); + + self.known_blocks.lock().insert(hash, number); + self.sender + .unbounded_send(BlockImportNotification { + hash, + origin: BlockOrigin::File, + header, + is_new_best: false, + retracted: vec![], + }) + .unwrap(); + } + } + + struct TestBlockStatus { + inner: Arc>>, + } + + impl BlockStatusT for TestBlockStatus { + fn block_number(&self, hash: Hash) -> Result, Error> { + Ok(self.inner.lock().get(&hash).map(|x| x.clone())) + } + } + + #[derive(Clone)] + struct TestBlockSyncRequester { + requests: Arc)>>>, + } + + impl Default for TestBlockSyncRequester { + fn default() -> Self { + TestBlockSyncRequester { + requests: Arc::new(Mutex::new(Vec::new())), + } + } + } + + impl BlockSyncRequesterT for TestBlockSyncRequester { + fn set_sync_fork_request( + &self, + _peers: Vec, + hash: Hash, + number: NumberFor, + ) { + self.requests.lock().push((hash, number)); + } + } + + fn make_header(number: u64) -> Header { + Header::new( + number, + Default::default(), + Default::default(), + Default::default(), + Default::default(), + ) + } + + // unwrap the commit from `CommunicationIn` returning its fields in a tuple, + // panics if the given message isn't a commit + fn unapply_commit(msg: CommunicationIn) -> (u64, CompactCommit) { + match msg { + voter::CommunicationIn::Commit(round, commit, ..) => (round, commit), + _ => panic!("expected commit"), + } + } + + // unwrap the catch up from `CommunicationIn` returning its inner representation, + // panics if the given message isn't a catch up + fn unapply_catch_up(msg: CommunicationIn) -> CatchUp { + match msg { + voter::CommunicationIn::CatchUp(catch_up, ..) => catch_up, + _ => panic!("expected catch up"), + } + } + + fn message_all_dependencies_satisfied( + msg: CommunicationIn, + enact_dependencies: F, + ) -> CommunicationIn + where + F: FnOnce(&TestChainState), + { + let (chain_state, import_notifications) = TestChainState::new(); + let block_status = chain_state.block_status(); + + // enact all dependencies before importing the message + enact_dependencies(&chain_state); + + let (global_tx, global_rx) = tracing_unbounded("test"); + + let until_imported = UntilGlobalMessageBlocksImported::new( + import_notifications, + TestBlockSyncRequester::default(), + block_status, + global_rx, + "global", + None, + ); + + global_tx.unbounded_send(msg).unwrap(); + + let work = until_imported.into_future(); + + futures::executor::block_on(work).0.unwrap().unwrap() + } + + fn blocking_message_on_dependencies( + msg: CommunicationIn, + enact_dependencies: F, + ) -> CommunicationIn + where + F: FnOnce(&TestChainState), + { + let (chain_state, import_notifications) = TestChainState::new(); + let block_status = chain_state.block_status(); + + let (global_tx, global_rx) = tracing_unbounded("test"); + + let until_imported = UntilGlobalMessageBlocksImported::new( + import_notifications, + TestBlockSyncRequester::default(), + block_status, + global_rx, + "global", + None, + ); + + global_tx.unbounded_send(msg).unwrap(); + + // NOTE: needs to be cloned otherwise it is moved to the stream and + // dropped too early. + let inner_chain_state = chain_state.clone(); + let work = future::select( + until_imported.into_future(), + Delay::new(Duration::from_millis(100)), + ) + .then(move |res| match res { + Either::Left(_) => panic!("timeout should have fired first"), + Either::Right((_, until_imported)) => { + // timeout fired. push in the headers. + enact_dependencies(&inner_chain_state); + + until_imported + } + }); + + futures::executor::block_on(work).0.unwrap().unwrap() + } + + #[test] + fn blocking_commit_message() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let unknown_commit = CompactCommit:: { + target_hash: h1.hash(), + target_number: 5, + precommits: vec![ + Precommit { + target_hash: h2.hash(), + target_number: 6, + }, + Precommit { + target_hash: h3.hash(), + target_number: 7, + }, + ], + auth_data: Vec::new(), // not used + }; + + let unknown_commit = + || voter::CommunicationIn::Commit(0, unknown_commit.clone(), voter::Callback::Blank); + + let res = blocking_message_on_dependencies(unknown_commit(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); + + assert_eq!(unapply_commit(res), unapply_commit(unknown_commit()),); + } + + #[test] + fn commit_message_all_known() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let known_commit = CompactCommit:: { + target_hash: h1.hash(), + target_number: 5, + precommits: vec![ + Precommit { + target_hash: h2.hash(), + target_number: 6, + }, + Precommit { + target_hash: h3.hash(), + target_number: 7, + }, + ], + auth_data: Vec::new(), // not used + }; + + let known_commit = + || voter::CommunicationIn::Commit(0, known_commit.clone(), voter::Callback::Blank); + + let res = message_all_dependencies_satisfied(known_commit(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); + + assert_eq!(unapply_commit(res), unapply_commit(known_commit()),); + } + + #[test] + fn blocking_catch_up_message() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let signed_prevote = |header: &Header| finality_grandpa::SignedPrevote { + id: Default::default(), + signature: Default::default(), + prevote: finality_grandpa::Prevote { + target_hash: header.hash(), + target_number: *header.number(), + }, + }; + + let signed_precommit = |header: &Header| finality_grandpa::SignedPrecommit { + id: Default::default(), + signature: Default::default(), + precommit: finality_grandpa::Precommit { + target_hash: header.hash(), + target_number: *header.number(), + }, + }; + + let prevotes = vec![signed_prevote(&h1), signed_prevote(&h3)]; + + let precommits = vec![signed_precommit(&h1), signed_precommit(&h2)]; + + let unknown_catch_up = finality_grandpa::CatchUp { + round_number: 1, + prevotes, + precommits, + base_hash: h1.hash(), + base_number: *h1.number(), + }; + + let unknown_catch_up = + || voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); + + let res = blocking_message_on_dependencies(unknown_catch_up(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); + + assert_eq!(unapply_catch_up(res), unapply_catch_up(unknown_catch_up()),); + } + + #[test] + fn catch_up_message_all_known() { + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + let signed_prevote = |header: &Header| finality_grandpa::SignedPrevote { + id: Default::default(), + signature: Default::default(), + prevote: finality_grandpa::Prevote { + target_hash: header.hash(), + target_number: *header.number(), + }, + }; + + let signed_precommit = |header: &Header| finality_grandpa::SignedPrecommit { + id: Default::default(), + signature: Default::default(), + precommit: finality_grandpa::Precommit { + target_hash: header.hash(), + target_number: *header.number(), + }, + }; + + let prevotes = vec![signed_prevote(&h1), signed_prevote(&h3)]; + + let precommits = vec![signed_precommit(&h1), signed_precommit(&h2)]; + + let unknown_catch_up = finality_grandpa::CatchUp { + round_number: 1, + prevotes, + precommits, + base_hash: h1.hash(), + base_number: *h1.number(), + }; + + let unknown_catch_up = + || voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); + + let res = message_all_dependencies_satisfied(unknown_catch_up(), |chain_state| { + chain_state.import_header(h1); + chain_state.import_header(h2); + chain_state.import_header(h3); + }); + + assert_eq!(unapply_catch_up(res), unapply_catch_up(unknown_catch_up()),); + } + + #[test] + fn request_block_sync_for_needed_blocks() { + let (chain_state, import_notifications) = TestChainState::new(); + let block_status = chain_state.block_status(); + + let (global_tx, global_rx) = tracing_unbounded("test"); + + let block_sync_requester = TestBlockSyncRequester::default(); + + let until_imported = UntilGlobalMessageBlocksImported::new( + import_notifications, + block_sync_requester.clone(), + block_status, + global_rx, + "global", + None, + ); + + let h1 = make_header(5); + let h2 = make_header(6); + let h3 = make_header(7); + + // we create a commit message, with precommits for blocks 6 and 7 which + // we haven't imported. + let unknown_commit = CompactCommit:: { + target_hash: h1.hash(), + target_number: 5, + precommits: vec![ + Precommit { + target_hash: h2.hash(), + target_number: 6, + }, + Precommit { + target_hash: h3.hash(), + target_number: 7, + }, + ], + auth_data: Vec::new(), // not used + }; + + let unknown_commit = + || voter::CommunicationIn::Commit(0, unknown_commit.clone(), voter::Callback::Blank); + + // we send the commit message and spawn the until_imported stream + global_tx.unbounded_send(unknown_commit()).unwrap(); + + let threads_pool = futures::executor::ThreadPool::new().unwrap(); + threads_pool.spawn_ok(until_imported.into_future().map(|_| ())); + + // assert that we will make sync requests + let assert = futures::future::poll_fn(|_| { + let block_sync_requests = block_sync_requester.requests.lock(); + + // we request blocks targeted by the precommits that aren't imported + if block_sync_requests.contains(&(h2.hash(), *h2.number())) + && block_sync_requests.contains(&(h3.hash(), *h3.number())) + { + return Poll::Ready(()); + } + + Poll::Pending + }); + + // the `until_imported` stream doesn't request the blocks immediately, + // but it should request them after a small timeout + let timeout = Delay::new(Duration::from_secs(60)); + let test = future::select(assert, timeout) + .map(|res| match res { + Either::Left(_) => {} + Either::Right(_) => panic!("timed out waiting for block sync request"), + }) + .map(drop); + + futures::executor::block_on(test); + } + + fn test_catch_up() -> Arc>>> { + let header = make_header(5); + + let unknown_catch_up = finality_grandpa::CatchUp { + round_number: 1, + precommits: vec![], + prevotes: vec![], + base_hash: header.hash(), + base_number: *header.number(), + }; + + let catch_up = + voter::CommunicationIn::CatchUp(unknown_catch_up.clone(), voter::Callback::Blank); + + Arc::new(Mutex::new(Some(catch_up))) + } + + #[test] + fn block_global_message_wait_completed_return_when_all_awaited() { + let msg_inner = test_catch_up(); + + let waiting_block_1 = BlockGlobalMessage:: { + inner: msg_inner.clone(), + target_number: 1, + }; + + let waiting_block_2 = BlockGlobalMessage:: { + inner: msg_inner, + target_number: 2, + }; + + // waiting_block_2 is still waiting for block 2, thus this should return `None`. + assert!(waiting_block_1.wait_completed(1).is_none()); + + // Message only depended on block 1 and 2. Both have been imported, thus this should yield + // the message. + assert!(waiting_block_2.wait_completed(2).is_some()); + } + + #[test] + fn block_global_message_wait_completed_return_none_on_block_number_missmatch() { + let msg_inner = test_catch_up(); + + let waiting_block_1 = BlockGlobalMessage:: { + inner: msg_inner.clone(), + target_number: 1, + }; + + let waiting_block_2 = BlockGlobalMessage:: { + inner: msg_inner, + target_number: 2, + }; + + // Calling wait_completed with wrong block number should yield None. + assert!(waiting_block_1.wait_completed(1234).is_none()); + + // All blocks, that the message depended on, have been imported. Still, given the above + // block number mismatch this should return None. + assert!(waiting_block_2.wait_completed(2).is_none()); + } + + #[test] + fn metrics_cleans_up_after_itself() { + let r = Registry::new(); + + let mut m1 = Metrics::register(&r).unwrap(); + let m2 = m1.clone(); + + // Add a new message to the 'queue' of m1. + m1.waiting_messages_inc(); + + // m1 and m2 are synced through the shared atomic. + assert_eq!(1, m2.global_waiting_messages.get()); + + // Drop 'queue' m1. + drop(m1); + + // Make sure m1 cleaned up after itself, removing all messages that were left in its queue + // when dropped from the global metric. + assert_eq!(0, m2.global_waiting_messages.get()); + } } diff --git a/client/finality-grandpa/src/voting_rule.rs b/client/finality-grandpa/src/voting_rule.rs index 523a1b05cd..28276df9e0 100644 --- a/client/finality-grandpa/src/voting_rule.rs +++ b/client/finality-grandpa/src/voting_rule.rs @@ -27,82 +27,81 @@ use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One, Zero}; /// A trait for custom voting rules in GRANDPA. -pub trait VotingRule: Send + Sync where - Block: BlockT, - B: HeaderBackend, +pub trait VotingRule: Send + Sync +where + Block: BlockT, + B: HeaderBackend, { - /// Restrict the given `current_target` vote, returning the block hash and - /// number of the block to vote on, and `None` in case the vote should not - /// be restricted. `base` is the block that we're basing our votes on in - /// order to pick our target (e.g. last round estimate), and `best_target` - /// is the initial best vote target before any vote rules were applied. When - /// applying multiple `VotingRule`s both `base` and `best_target` should - /// remain unchanged. - /// - /// The contract of this interface requires that when restricting a vote, the - /// returned value **must** be an ancestor of the given `current_target`, - /// this also means that a variant must be maintained throughout the - /// execution of voting rules wherein `current_target <= best_target`. - fn restrict_vote( - &self, - backend: &B, - base: &Block::Header, - best_target: &Block::Header, - current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)>; + /// Restrict the given `current_target` vote, returning the block hash and + /// number of the block to vote on, and `None` in case the vote should not + /// be restricted. `base` is the block that we're basing our votes on in + /// order to pick our target (e.g. last round estimate), and `best_target` + /// is the initial best vote target before any vote rules were applied. When + /// applying multiple `VotingRule`s both `base` and `best_target` should + /// remain unchanged. + /// + /// The contract of this interface requires that when restricting a vote, the + /// returned value **must** be an ancestor of the given `current_target`, + /// this also means that a variant must be maintained throughout the + /// execution of voting rules wherein `current_target <= best_target`. + fn restrict_vote( + &self, + backend: &B, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> Option<(Block::Hash, NumberFor)>; } -impl VotingRule for () where - Block: BlockT, - B: HeaderBackend, +impl VotingRule for () +where + Block: BlockT, + B: HeaderBackend, { - fn restrict_vote( - &self, - _backend: &B, - _base: &Block::Header, - _best_target: &Block::Header, - _current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - None - } + fn restrict_vote( + &self, + _backend: &B, + _base: &Block::Header, + _best_target: &Block::Header, + _current_target: &Block::Header, + ) -> Option<(Block::Hash, NumberFor)> { + None + } } /// A custom voting rule that guarantees that our vote is always behind the best /// block, in the best case exactly one block behind it. #[derive(Clone)] pub struct BeforeBestBlockBy(N); -impl VotingRule for BeforeBestBlockBy> where - Block: BlockT, - B: HeaderBackend, +impl VotingRule for BeforeBestBlockBy> +where + Block: BlockT, + B: HeaderBackend, { - fn restrict_vote( - &self, - backend: &B, - _base: &Block::Header, - best_target: &Block::Header, - current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - use sp_arithmetic::traits::Saturating; - - if current_target.number().is_zero() { - return None; - } - - // find the target number restricted by this rule - let target_number = best_target.number().saturating_sub(self.0); - - // our current target is already lower than this rule would restrict - if target_number >= *current_target.number() { - return None; - } - - // find the block at the given target height - find_target( - backend, - target_number, - current_target, - ) - } + fn restrict_vote( + &self, + backend: &B, + _base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> Option<(Block::Hash, NumberFor)> { + use sp_arithmetic::traits::Saturating; + + if current_target.number().is_zero() { + return None; + } + + // find the target number restricted by this rule + let target_number = best_target.number().saturating_sub(self.0); + + // our current target is already lower than this rule would restrict + if target_number >= *current_target.number() { + return None; + } + + // find the block at the given target height + find_target(backend, target_number, current_target) + } } /// A custom voting rule that limits votes towards 3/4 of the unfinalized chain, @@ -110,186 +109,185 @@ impl VotingRule for BeforeBestBlockBy> wher /// should fall. pub struct ThreeQuartersOfTheUnfinalizedChain; -impl VotingRule for ThreeQuartersOfTheUnfinalizedChain where - Block: BlockT, - B: HeaderBackend, +impl VotingRule for ThreeQuartersOfTheUnfinalizedChain +where + Block: BlockT, + B: HeaderBackend, { - fn restrict_vote( - &self, - backend: &B, - base: &Block::Header, - best_target: &Block::Header, - current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - // target a vote towards 3/4 of the unfinalized chain (rounding up) - let target_number = { - let two = NumberFor::::one() + One::one(); - let three = two + One::one(); - let four = three + One::one(); - - let diff = *best_target.number() - *base.number(); - let diff = ((diff * three) + two) / four; - - *base.number() + diff - }; - - // our current target is already lower than this rule would restrict - if target_number >= *current_target.number() { - return None; - } - - // find the block at the given target height - find_target( - backend, - target_number, - current_target, - ) - } + fn restrict_vote( + &self, + backend: &B, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> Option<(Block::Hash, NumberFor)> { + // target a vote towards 3/4 of the unfinalized chain (rounding up) + let target_number = { + let two = NumberFor::::one() + One::one(); + let three = two + One::one(); + let four = three + One::one(); + + let diff = *best_target.number() - *base.number(); + let diff = ((diff * three) + two) / four; + + *base.number() + diff + }; + + // our current target is already lower than this rule would restrict + if target_number >= *current_target.number() { + return None; + } + + // find the block at the given target height + find_target(backend, target_number, current_target) + } } // walk backwards until we find the target block fn find_target( - backend: &B, - target_number: NumberFor, - current_header: &Block::Header, -) -> Option<(Block::Hash, NumberFor)> where - Block: BlockT, - B: HeaderBackend, + backend: &B, + target_number: NumberFor, + current_header: &Block::Header, +) -> Option<(Block::Hash, NumberFor)> +where + Block: BlockT, + B: HeaderBackend, { - let mut target_hash = current_header.hash(); - let mut target_header = current_header.clone(); + let mut target_hash = current_header.hash(); + let mut target_header = current_header.clone(); - loop { - if *target_header.number() < target_number { - unreachable!( - "we are traversing backwards from a known block; \ + loop { + if *target_header.number() < target_number { + unreachable!( + "we are traversing backwards from a known block; \ blocks are stored contiguously; \ qed" - ); - } - - if *target_header.number() == target_number { - return Some((target_hash, target_number)); - } - - target_hash = *target_header.parent_hash(); - target_header = backend.header(BlockId::Hash(target_hash)).ok()? - .expect("Header known to exist due to the existence of one of its descendents; qed"); - } + ); + } + + if *target_header.number() == target_number { + return Some((target_hash, target_number)); + } + + target_hash = *target_header.parent_hash(); + target_header = backend + .header(BlockId::Hash(target_hash)) + .ok()? + .expect("Header known to exist due to the existence of one of its descendents; qed"); + } } struct VotingRules { - rules: Arc>>>, + rules: Arc>>>, } impl Clone for VotingRules { - fn clone(&self) -> Self { - VotingRules { - rules: self.rules.clone(), - } - } + fn clone(&self) -> Self { + VotingRules { + rules: self.rules.clone(), + } + } } -impl VotingRule for VotingRules where - Block: BlockT, - B: HeaderBackend, +impl VotingRule for VotingRules +where + Block: BlockT, + B: HeaderBackend, { - fn restrict_vote( - &self, - backend: &B, - base: &Block::Header, - best_target: &Block::Header, - current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - let restricted_target = self.rules.iter().fold( - current_target.clone(), - |current_target, rule| { - rule.restrict_vote( - backend, - base, - best_target, - ¤t_target, - ) - .and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok()) - .and_then(std::convert::identity) - .unwrap_or(current_target) - }, - ); - - let restricted_hash = restricted_target.hash(); - - if restricted_hash != current_target.hash() { - Some((restricted_hash, *restricted_target.number())) - } else { - None - } - } + fn restrict_vote( + &self, + backend: &B, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> Option<(Block::Hash, NumberFor)> { + let restricted_target = + self.rules + .iter() + .fold(current_target.clone(), |current_target, rule| { + rule.restrict_vote(backend, base, best_target, ¤t_target) + .and_then(|(hash, _)| backend.header(BlockId::Hash(hash)).ok()) + .and_then(std::convert::identity) + .unwrap_or(current_target) + }); + + let restricted_hash = restricted_target.hash(); + + if restricted_hash != current_target.hash() { + Some((restricted_hash, *restricted_target.number())) + } else { + None + } + } } /// A builder of a composite voting rule that applies a set of rules to /// progressively restrict the vote. pub struct VotingRulesBuilder { - rules: Vec>>, + rules: Vec>>, } -impl Default for VotingRulesBuilder where - Block: BlockT, - B: HeaderBackend, +impl Default for VotingRulesBuilder +where + Block: BlockT, + B: HeaderBackend, { - fn default() -> Self { - VotingRulesBuilder::new() - .add(BeforeBestBlockBy(2.into())) - .add(ThreeQuartersOfTheUnfinalizedChain) - } + fn default() -> Self { + VotingRulesBuilder::new() + .add(BeforeBestBlockBy(2.into())) + .add(ThreeQuartersOfTheUnfinalizedChain) + } } -impl VotingRulesBuilder where - Block: BlockT, - B: HeaderBackend, +impl VotingRulesBuilder +where + Block: BlockT, + B: HeaderBackend, { - /// Return a new voting rule builder using the given backend. - pub fn new() -> Self { - VotingRulesBuilder { - rules: Vec::new(), - } - } - - /// Add a new voting rule to the builder. - pub fn add(mut self, rule: R) -> Self where - R: VotingRule + 'static, - { - self.rules.push(Box::new(rule)); - self - } - - /// Add all given voting rules to the builder. - pub fn add_all(mut self, rules: I) -> Self where - I: IntoIterator>>, - { - self.rules.extend(rules); - self - } - - /// Return a new `VotingRule` that applies all of the previously added - /// voting rules in-order. - pub fn build(self) -> impl VotingRule + Clone { - VotingRules { - rules: Arc::new(self.rules), - } - } + /// Return a new voting rule builder using the given backend. + pub fn new() -> Self { + VotingRulesBuilder { rules: Vec::new() } + } + + /// Add a new voting rule to the builder. + pub fn add(mut self, rule: R) -> Self + where + R: VotingRule + 'static, + { + self.rules.push(Box::new(rule)); + self + } + + /// Add all given voting rules to the builder. + pub fn add_all(mut self, rules: I) -> Self + where + I: IntoIterator>>, + { + self.rules.extend(rules); + self + } + + /// Return a new `VotingRule` that applies all of the previously added + /// voting rules in-order. + pub fn build(self) -> impl VotingRule + Clone { + VotingRules { + rules: Arc::new(self.rules), + } + } } -impl VotingRule for Box> where - Block: BlockT, - B: HeaderBackend, +impl VotingRule for Box> +where + Block: BlockT, + B: HeaderBackend, { - fn restrict_vote( - &self, - backend: &B, - base: &Block::Header, - best_target: &Block::Header, - current_target: &Block::Header, - ) -> Option<(Block::Hash, NumberFor)> { - (**self).restrict_vote(backend, base, best_target, current_target) - } + fn restrict_vote( + &self, + backend: &B, + base: &Block::Header, + best_target: &Block::Header, + current_target: &Block::Header, + ) -> Option<(Block::Hash, NumberFor)> { + (**self).restrict_vote(backend, base, best_target, current_target) + } } diff --git a/client/informant/src/display.rs b/client/informant/src/display.rs index 42f4989983..84778a7c67 100644 --- a/client/informant/src/display.rs +++ b/client/informant/src/display.rs @@ -14,15 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::OutputFormat; use ansi_term::Colour; -use sc_client_api::ClientInfo; use log::info; +use sc_client_api::ClientInfo; use sc_network::SyncState; -use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Zero, Saturating}; use sc_service::NetworkStatus; -use std::{convert::{TryFrom, TryInto}, fmt}; +use sp_runtime::traits::{Block as BlockT, CheckedDiv, NumberFor, Saturating, Zero}; +use std::{ + convert::{TryFrom, TryInto}, + fmt, +}; use wasm_timer::Instant; -use crate::OutputFormat; /// State of the informant display system. /// @@ -38,135 +41,141 @@ use crate::OutputFormat; /// information to display. /// pub struct InformantDisplay { - /// Head of chain block number from the last time `display` has been called. - /// `None` if `display` has never been called. - last_number: Option>, - /// The last time `display` or `new` has been called. - last_update: Instant, - /// The format to print output in. - format: OutputFormat, + /// Head of chain block number from the last time `display` has been called. + /// `None` if `display` has never been called. + last_number: Option>, + /// The last time `display` or `new` has been called. + last_update: Instant, + /// The format to print output in. + format: OutputFormat, } impl InformantDisplay { - /// Builds a new informant display system. - pub fn new(format: OutputFormat) -> InformantDisplay { - InformantDisplay { - last_number: None, - last_update: Instant::now(), - format, - } - } - - /// Displays the informant by calling `info!`. - pub fn display(&mut self, info: &ClientInfo, net_status: NetworkStatus) { - let best_number = info.chain.best_number; - let best_hash = info.chain.best_hash; - let finalized_number = info.chain.finalized_number; - let num_connected_peers = net_status.num_connected_peers; - let speed = speed::(best_number, self.last_number, self.last_update); - self.last_update = Instant::now(); - self.last_number = Some(best_number); - - let (status, target) = match (net_status.sync_state, net_status.best_seen_block) { - (SyncState::Idle, _) => ("💤 Idle".into(), "".into()), - (SyncState::Downloading, None) => (format!("⚙️ Preparing{}", speed), "".into()), - (SyncState::Downloading, Some(n)) => (format!("⚙️ Syncing{}", speed), format!(", target=#{}", n)), - }; - - if self.format == OutputFormat::Coloured { - info!( - target: "substrate", - "{}{} ({} peers), best: #{} ({}), finalized #{} ({}), {} {}", - Colour::White.bold().paint(&status), - target, - Colour::White.bold().paint(format!("{}", num_connected_peers)), - Colour::White.bold().paint(format!("{}", best_number)), - best_hash, - Colour::White.bold().paint(format!("{}", finalized_number)), - info.chain.finalized_hash, - Colour::Green.paint(format!("⬇ {}", TransferRateFormat(net_status.average_download_per_sec))), - Colour::Red.paint(format!("⬆ {}", TransferRateFormat(net_status.average_upload_per_sec))), - ); - } else { - info!( - target: "substrate", - "{}{} ({} peers), best: #{} ({}), finalized #{} ({}), ⬇ {} ⬆ {}", - status, - target, - num_connected_peers, - best_number, - best_hash, - finalized_number, - info.chain.finalized_hash, - TransferRateFormat(net_status.average_download_per_sec), - TransferRateFormat(net_status.average_upload_per_sec), - ); - } - } + /// Builds a new informant display system. + pub fn new(format: OutputFormat) -> InformantDisplay { + InformantDisplay { + last_number: None, + last_update: Instant::now(), + format, + } + } + + /// Displays the informant by calling `info!`. + pub fn display(&mut self, info: &ClientInfo, net_status: NetworkStatus) { + let best_number = info.chain.best_number; + let best_hash = info.chain.best_hash; + let finalized_number = info.chain.finalized_number; + let num_connected_peers = net_status.num_connected_peers; + let speed = speed::(best_number, self.last_number, self.last_update); + self.last_update = Instant::now(); + self.last_number = Some(best_number); + + let (status, target) = match (net_status.sync_state, net_status.best_seen_block) { + (SyncState::Idle, _) => ("💤 Idle".into(), "".into()), + (SyncState::Downloading, None) => (format!("⚙️ Preparing{}", speed), "".into()), + (SyncState::Downloading, Some(n)) => { + (format!("⚙️ Syncing{}", speed), format!(", target=#{}", n)) + } + }; + + if self.format == OutputFormat::Coloured { + info!( + target: "substrate", + "{}{} ({} peers), best: #{} ({}), finalized #{} ({}), {} {}", + Colour::White.bold().paint(&status), + target, + Colour::White.bold().paint(format!("{}", num_connected_peers)), + Colour::White.bold().paint(format!("{}", best_number)), + best_hash, + Colour::White.bold().paint(format!("{}", finalized_number)), + info.chain.finalized_hash, + Colour::Green.paint(format!("⬇ {}", TransferRateFormat(net_status.average_download_per_sec))), + Colour::Red.paint(format!("⬆ {}", TransferRateFormat(net_status.average_upload_per_sec))), + ); + } else { + info!( + target: "substrate", + "{}{} ({} peers), best: #{} ({}), finalized #{} ({}), ⬇ {} ⬆ {}", + status, + target, + num_connected_peers, + best_number, + best_hash, + finalized_number, + info.chain.finalized_hash, + TransferRateFormat(net_status.average_download_per_sec), + TransferRateFormat(net_status.average_upload_per_sec), + ); + } + } } /// Calculates `(best_number - last_number) / (now - last_update)` and returns a `String` /// representing the speed of import. fn speed( - best_number: NumberFor, - last_number: Option>, - last_update: Instant + best_number: NumberFor, + last_number: Option>, + last_update: Instant, ) -> String { - // Number of milliseconds elapsed since last time. - let elapsed_ms = { - let elapsed = last_update.elapsed(); - let since_last_millis = elapsed.as_secs() * 1000; - let since_last_subsec_millis = elapsed.subsec_millis() as u64; - since_last_millis + since_last_subsec_millis - }; - - // Number of blocks that have been imported since last time. - let diff = match last_number { - None => return String::new(), - Some(n) => best_number.saturating_sub(n) - }; - - if let Ok(diff) = TryInto::::try_into(diff) { - // If the number of blocks can be converted to a regular integer, then it's easy: just - // do the math and turn it into a `f64`. - let speed = diff.saturating_mul(10_000).checked_div(u128::from(elapsed_ms)) - .map_or(0.0, |s| s as f64) / 10.0; - format!(" {:4.1} bps", speed) - - } else { - // If the number of blocks can't be converted to a regular integer, then we need a more - // algebraic approach and we stay within the realm of integers. - let one_thousand = NumberFor::::from(1_000); - let elapsed = NumberFor::::from( - >::try_from(elapsed_ms).unwrap_or(u32::max_value()) - ); - - let speed = diff.saturating_mul(one_thousand).checked_div(&elapsed) - .unwrap_or_else(Zero::zero); - format!(" {} bps", speed) - } + // Number of milliseconds elapsed since last time. + let elapsed_ms = { + let elapsed = last_update.elapsed(); + let since_last_millis = elapsed.as_secs() * 1000; + let since_last_subsec_millis = elapsed.subsec_millis() as u64; + since_last_millis + since_last_subsec_millis + }; + + // Number of blocks that have been imported since last time. + let diff = match last_number { + None => return String::new(), + Some(n) => best_number.saturating_sub(n), + }; + + if let Ok(diff) = TryInto::::try_into(diff) { + // If the number of blocks can be converted to a regular integer, then it's easy: just + // do the math and turn it into a `f64`. + let speed = diff + .saturating_mul(10_000) + .checked_div(u128::from(elapsed_ms)) + .map_or(0.0, |s| s as f64) + / 10.0; + format!(" {:4.1} bps", speed) + } else { + // If the number of blocks can't be converted to a regular integer, then we need a more + // algebraic approach and we stay within the realm of integers. + let one_thousand = NumberFor::::from(1_000); + let elapsed = NumberFor::::from( + >::try_from(elapsed_ms).unwrap_or(u32::max_value()), + ); + + let speed = diff + .saturating_mul(one_thousand) + .checked_div(&elapsed) + .unwrap_or_else(Zero::zero); + format!(" {} bps", speed) + } } /// Contains a number of bytes per second. Implements `fmt::Display` and shows this number of bytes /// per second in a nice way. struct TransferRateFormat(u64); impl fmt::Display for TransferRateFormat { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - // Special case 0. - if self.0 == 0 { - return write!(f, "0") - } - - // Under 0.1 kiB, display plain bytes. - if self.0 < 100 { - return write!(f, "{} B/s", self.0) - } - - // Under 1.0 MiB/sec, display the value in kiB/sec. - if self.0 < 1024 * 1024 { - return write!(f, "{:.1}kiB/s", self.0 as f64 / 1024.0) - } - - write!(f, "{:.1}MiB/s", self.0 as f64 / (1024.0 * 1024.0)) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + // Special case 0. + if self.0 == 0 { + return write!(f, "0"); + } + + // Under 0.1 kiB, display plain bytes. + if self.0 < 100 { + return write!(f, "{} B/s", self.0); + } + + // Under 1.0 MiB/sec, display the value in kiB/sec. + if self.0 < 1024 * 1024 { + return write!(f, "{:.1}kiB/s", self.0 as f64 / 1024.0); + } + + write!(f, "{:.1}MiB/s", self.0 as f64 / (1024.0 * 1024.0)) + } } diff --git a/client/informant/src/lib.rs b/client/informant/src/lib.rs index 66d5ed41fb..a631c0251f 100644 --- a/client/informant/src/lib.rs +++ b/client/informant/src/lib.rs @@ -17,11 +17,11 @@ //! Console informant. Prints sync progress and block events. Runs on the calling thread. use ansi_term::Colour; -use sc_client_api::BlockchainEvents; use futures::prelude::*; -use log::{info, warn, trace}; -use sp_runtime::traits::Header; +use log::{info, trace, warn}; +use sc_client_api::BlockchainEvents; use sc_service::AbstractService; +use sp_runtime::traits::Header; use std::time::Duration; mod display; @@ -29,46 +29,49 @@ mod display; /// The format to print telemetry output in. #[derive(PartialEq)] pub enum OutputFormat { - Coloured, - Plain, + Coloured, + Plain, } /// Creates an informant in the form of a `Future` that must be polled regularly. -pub fn build(service: &impl AbstractService, format: OutputFormat) -> impl futures::Future { - let client = service.client(); - let pool = service.transaction_pool(); - - let mut display = display::InformantDisplay::new(format); - - let display_notifications = service - .network_status(Duration::from_millis(5000)) - .for_each(move |(net_status, _)| { - let info = client.usage_info(); - if let Some(ref usage) = info.usage { - trace!(target: "usage", "Usage statistics: {}", usage); - } else { - trace!( - target: "usage", - "Usage statistics not displayed as backend does not provide it", - ) - } - #[cfg(not(target_os = "unknown"))] - trace!( - target: "usage", - "Subsystems memory [txpool: {} kB]", - parity_util_mem::malloc_size(&*pool) / 1024, - ); - display.display(&info, net_status); - future::ready(()) - }); - - let client = service.client(); - let mut last_best = { - let info = client.usage_info(); - Some((info.chain.best_number, info.chain.best_hash)) - }; - - let display_block_import = client.import_notification_stream().for_each(move |n| { +pub fn build( + service: &impl AbstractService, + format: OutputFormat, +) -> impl futures::Future { + let client = service.client(); + let pool = service.transaction_pool(); + + let mut display = display::InformantDisplay::new(format); + + let display_notifications = service + .network_status(Duration::from_millis(5000)) + .for_each(move |(net_status, _)| { + let info = client.usage_info(); + if let Some(ref usage) = info.usage { + trace!(target: "usage", "Usage statistics: {}", usage); + } else { + trace!( + target: "usage", + "Usage statistics not displayed as backend does not provide it", + ) + } + #[cfg(not(target_os = "unknown"))] + trace!( + target: "usage", + "Subsystems memory [txpool: {} kB]", + parity_util_mem::malloc_size(&*pool) / 1024, + ); + display.display(&info, net_status); + future::ready(()) + }); + + let client = service.client(); + let mut last_best = { + let info = client.usage_info(); + Some((info.chain.best_number, info.chain.best_hash)) + }; + + let display_block_import = client.import_notification_stream().for_each(move |n| { // detect and log reorganizations. if let Some((ref last_num, ref last_hash)) = last_best { if n.header.parent_hash() != last_hash && n.is_new_best { @@ -99,8 +102,5 @@ pub fn build(service: &impl AbstractService, format: OutputFormat) -> impl futur future::ready(()) }); - future::join( - display_notifications, - display_block_import - ).map(|_| ()) + future::join(display_notifications, display_block_import).map(|_| ()) } diff --git a/client/keystore/src/lib.rs b/client/keystore/src/lib.rs index f8bc930971..e4cb2e505a 100644 --- a/client/keystore/src/lib.rs +++ b/client/keystore/src/lib.rs @@ -17,14 +17,20 @@ //! Keystore (and session key management) for ed25519 based chains like Polkadot. #![warn(missing_docs)] -use std::{collections::{HashMap, HashSet}, path::PathBuf, fs::{self, File}, io::{self, Write}, sync::Arc}; +use parking_lot::RwLock; +use sp_application_crypto::{ed25519, sr25519, AppKey, AppPair, AppPublic}; use sp_core::{ - crypto::{IsWrappedBy, CryptoTypePublicPair, KeyTypeId, Pair as PairT, Protected, Public}, - traits::{BareCryptoStore, BareCryptoStoreError as TraitError}, - Encode, + crypto::{CryptoTypePublicPair, IsWrappedBy, KeyTypeId, Pair as PairT, Protected, Public}, + traits::{BareCryptoStore, BareCryptoStoreError as TraitError}, + Encode, +}; +use std::{ + collections::{HashMap, HashSet}, + fs::{self, File}, + io::{self, Write}, + path::PathBuf, + sync::Arc, }; -use sp_application_crypto::{AppKey, AppPublic, AppPair, ed25519, sr25519}; -use parking_lot::RwLock; /// Keystore pointer pub type KeyStorePtr = Arc>; @@ -32,56 +38,56 @@ pub type KeyStorePtr = Arc>; /// Keystore error. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// IO error. - Io(io::Error), - /// JSON error. - Json(serde_json::Error), - /// Invalid password. - #[display(fmt="Invalid password")] - InvalidPassword, - /// Invalid BIP39 phrase - #[display(fmt="Invalid recovery phrase (BIP39) data")] - InvalidPhrase, - /// Invalid seed - #[display(fmt="Invalid seed")] - InvalidSeed, - /// Public key type is not supported - #[display(fmt="Key crypto type is not supported")] - KeyNotSupported(KeyTypeId), - /// Pair not found for public key and KeyTypeId - #[display(fmt="Pair not found for {} public key", "_0")] - PairNotFound(String), - /// Keystore unavailable - #[display(fmt="Keystore unavailable")] - Unavailable, + /// IO error. + Io(io::Error), + /// JSON error. + Json(serde_json::Error), + /// Invalid password. + #[display(fmt = "Invalid password")] + InvalidPassword, + /// Invalid BIP39 phrase + #[display(fmt = "Invalid recovery phrase (BIP39) data")] + InvalidPhrase, + /// Invalid seed + #[display(fmt = "Invalid seed")] + InvalidSeed, + /// Public key type is not supported + #[display(fmt = "Key crypto type is not supported")] + KeyNotSupported(KeyTypeId), + /// Pair not found for public key and KeyTypeId + #[display(fmt = "Pair not found for {} public key", "_0")] + PairNotFound(String), + /// Keystore unavailable + #[display(fmt = "Keystore unavailable")] + Unavailable, } /// Keystore Result pub type Result = std::result::Result; impl From for TraitError { - fn from(error: Error) -> Self { - match error { - Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id), - Error::PairNotFound(e) => TraitError::PairNotFound(e), - Error::InvalidSeed | Error::InvalidPhrase | Error::InvalidPassword => { - TraitError::ValidationError(error.to_string()) - }, - Error::Unavailable => TraitError::Unavailable, - Error::Io(e) => TraitError::Other(e.to_string()), - Error::Json(e) => TraitError::Other(e.to_string()), - } - } + fn from(error: Error) -> Self { + match error { + Error::KeyNotSupported(id) => TraitError::KeyNotSupported(id), + Error::PairNotFound(e) => TraitError::PairNotFound(e), + Error::InvalidSeed | Error::InvalidPhrase | Error::InvalidPassword => { + TraitError::ValidationError(error.to_string()) + } + Error::Unavailable => TraitError::Unavailable, + Error::Io(e) => TraitError::Other(e.to_string()), + Error::Json(e) => TraitError::Other(e.to_string()), + } + } } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Io(ref err) => Some(err), - Error::Json(ref err) => Some(err), - _ => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Io(ref err) => Some(err), + Error::Json(ref err) => Some(err), + _ => None, + } + } } /// Key store. @@ -90,444 +96,488 @@ impl std::error::Error for Error { /// /// Every pair that is being generated by a `seed`, will be placed in memory. pub struct Store { - path: Option, - /// Map over `(KeyTypeId, Raw public key)` -> `Key phrase/seed` - additional: HashMap<(KeyTypeId, Vec), String>, - password: Option>, + path: Option, + /// Map over `(KeyTypeId, Raw public key)` -> `Key phrase/seed` + additional: HashMap<(KeyTypeId, Vec), String>, + password: Option>, } impl Store { - /// Open the store at the given path. - /// - /// Optionally takes a password that will be used to encrypt/decrypt the keys. - pub fn open>(path: T, password: Option>) -> Result { - let path = path.into(); - fs::create_dir_all(&path)?; - - let instance = Self { path: Some(path), additional: HashMap::new(), password }; - Ok(Arc::new(RwLock::new(instance))) - } - - /// Create a new in-memory store. - pub fn new_in_memory() -> KeyStorePtr { - Arc::new(RwLock::new(Self { - path: None, - additional: HashMap::new(), - password: None - })) - } - - /// Get the key phrase for the given public key and key type from the in-memory store. - fn get_additional_pair( - &self, - public: &[u8], - key_type: KeyTypeId, - ) -> Option<&String> { - let key = (key_type, public.to_vec()); - self.additional.get(&key) - } - - /// Insert the given public/private key pair with the given key type. - /// - /// Does not place it into the file system store. - fn insert_ephemeral_pair(&mut self, pair: &Pair, seed: &str, key_type: KeyTypeId) { - let key = (key_type, pair.public().to_raw_vec()); - self.additional.insert(key, seed.into()); - } - - /// Insert a new key with anonymous crypto. - /// - /// Places it into the file system store. - fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { - if let Some(path) = self.key_file_path(public, key_type) { - let mut file = File::create(path).map_err(Error::Io)?; - serde_json::to_writer(&file, &suri).map_err(Error::Json)?; - file.flush().map_err(Error::Io)?; - } - Ok(()) - } - - /// Insert a new key. - /// - /// Places it into the file system store. - pub fn insert_by_type(&self, key_type: KeyTypeId, suri: &str) -> Result { - let pair = Pair::from_string( - suri, - self.password.as_ref().map(|p| &***p) - ).map_err(|_| Error::InvalidSeed)?; - self.insert_unknown(key_type, suri, pair.public().as_slice()) - .map_err(|_| Error::Unavailable)?; - Ok(pair) - } - - /// Insert a new key. - /// - /// Places it into the file system store. - pub fn insert(&self, suri: &str) -> Result { - self.insert_by_type::(Pair::ID, suri).map(Into::into) - } - - /// Generate a new key. - /// - /// Places it into the file system store. - pub fn generate_by_type(&self, key_type: KeyTypeId) -> Result { - let (pair, phrase, _) = Pair::generate_with_phrase(self.password.as_ref().map(|p| &***p)); - if let Some(path) = self.key_file_path(pair.public().as_slice(), key_type) { - let mut file = File::create(path)?; - serde_json::to_writer(&file, &phrase)?; - file.flush()?; - } - Ok(pair) - } - - /// Generate a new key. - /// - /// Places it into the file system store. - pub fn generate(&self) -> Result { - self.generate_by_type::(Pair::ID).map(Into::into) - } - - /// Create a new key from seed. - /// - /// Does not place it into the file system store. - pub fn insert_ephemeral_from_seed_by_type( - &mut self, - seed: &str, - key_type: KeyTypeId, - ) -> Result { - let pair = Pair::from_string(seed, None).map_err(|_| Error::InvalidSeed)?; - self.insert_ephemeral_pair(&pair, seed, key_type); - Ok(pair) - } - - /// Create a new key from seed. - /// - /// Does not place it into the file system store. - pub fn insert_ephemeral_from_seed(&mut self, seed: &str) -> Result { - self.insert_ephemeral_from_seed_by_type::(seed, Pair::ID).map(Into::into) - } - - /// Get the key phrase for a given public key and key type. - fn key_phrase_by_type(&self, public: &[u8], key_type: KeyTypeId) -> Result { - if let Some(phrase) = self.get_additional_pair(public, key_type) { - return Ok(phrase.clone()) - } - - let path = self.key_file_path(public, key_type).ok_or_else(|| Error::Unavailable)?; - let file = File::open(path)?; - - serde_json::from_reader(&file).map_err(Into::into) - } - - /// Get a key pair for the given public key and key type. - pub fn key_pair_by_type(&self, - public: &Pair::Public, - key_type: KeyTypeId, - ) -> Result { - let phrase = self.key_phrase_by_type(public.as_slice(), key_type)?; - let pair = Pair::from_string( - &phrase, - self.password.as_ref().map(|p| &***p), - ).map_err(|_| Error::InvalidPhrase)?; - - if &pair.public() == public { - Ok(pair) - } else { - Err(Error::InvalidPassword) - } - } - - /// Get a key pair for the given public key. - pub fn key_pair(&self, public: &::Public) -> Result { - self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID).map(Into::into) - } - - /// Get public keys of all stored keys that match the key type. - /// - /// This will just use the type of the public key (a list of which to be returned) in order - /// to determine the key type. Unless you use a specialized application-type public key, then - /// this only give you keys registered under generic cryptography, and will not return keys - /// registered under the application type. - pub fn public_keys(&self) -> Result> { - self.raw_public_keys(Public::ID) - .map(|v| { - v.into_iter() - .map(|k| Public::from_slice(k.as_slice())) - .collect() - }) - } - - /// Returns the file path for the given public key and key type. - fn key_file_path(&self, public: &[u8], key_type: KeyTypeId) -> Option { - let mut buf = self.path.as_ref()?.clone(); - let key_type = hex::encode(key_type.0); - let key = hex::encode(public); - buf.push(key_type + key.as_str()); - Some(buf) - } - - /// Returns a list of raw public keys filtered by `KeyTypeId` - fn raw_public_keys(&self, id: KeyTypeId) -> Result>> { - let mut public_keys: Vec> = self.additional.keys() - .into_iter() - .filter_map(|k| if k.0 == id { Some(k.1.clone()) } else { None }) - .collect(); - - if let Some(path) = &self.path { - for entry in fs::read_dir(&path)? { - let entry = entry?; - let path = entry.path(); - - // skip directories and non-unicode file names (hex is unicode) - if let Some(name) = path.file_name().and_then(|n| n.to_str()) { - match hex::decode(name) { - Ok(ref hex) if hex.len() > 4 => { - if &hex[0..4] != &id.0 { - continue; - } - let public = hex[4..].to_vec(); - public_keys.push(public); - } - _ => continue, - } - } - } - } - - Ok(public_keys) - } + /// Open the store at the given path. + /// + /// Optionally takes a password that will be used to encrypt/decrypt the keys. + pub fn open>( + path: T, + password: Option>, + ) -> Result { + let path = path.into(); + fs::create_dir_all(&path)?; + + let instance = Self { + path: Some(path), + additional: HashMap::new(), + password, + }; + Ok(Arc::new(RwLock::new(instance))) + } + + /// Create a new in-memory store. + pub fn new_in_memory() -> KeyStorePtr { + Arc::new(RwLock::new(Self { + path: None, + additional: HashMap::new(), + password: None, + })) + } + + /// Get the key phrase for the given public key and key type from the in-memory store. + fn get_additional_pair(&self, public: &[u8], key_type: KeyTypeId) -> Option<&String> { + let key = (key_type, public.to_vec()); + self.additional.get(&key) + } + + /// Insert the given public/private key pair with the given key type. + /// + /// Does not place it into the file system store. + fn insert_ephemeral_pair(&mut self, pair: &Pair, seed: &str, key_type: KeyTypeId) { + let key = (key_type, pair.public().to_raw_vec()); + self.additional.insert(key, seed.into()); + } + + /// Insert a new key with anonymous crypto. + /// + /// Places it into the file system store. + fn insert_unknown(&self, key_type: KeyTypeId, suri: &str, public: &[u8]) -> Result<()> { + if let Some(path) = self.key_file_path(public, key_type) { + let mut file = File::create(path).map_err(Error::Io)?; + serde_json::to_writer(&file, &suri).map_err(Error::Json)?; + file.flush().map_err(Error::Io)?; + } + Ok(()) + } + + /// Insert a new key. + /// + /// Places it into the file system store. + pub fn insert_by_type(&self, key_type: KeyTypeId, suri: &str) -> Result { + let pair = Pair::from_string(suri, self.password.as_ref().map(|p| &***p)) + .map_err(|_| Error::InvalidSeed)?; + self.insert_unknown(key_type, suri, pair.public().as_slice()) + .map_err(|_| Error::Unavailable)?; + Ok(pair) + } + + /// Insert a new key. + /// + /// Places it into the file system store. + pub fn insert(&self, suri: &str) -> Result { + self.insert_by_type::(Pair::ID, suri) + .map(Into::into) + } + + /// Generate a new key. + /// + /// Places it into the file system store. + pub fn generate_by_type(&self, key_type: KeyTypeId) -> Result { + let (pair, phrase, _) = Pair::generate_with_phrase(self.password.as_ref().map(|p| &***p)); + if let Some(path) = self.key_file_path(pair.public().as_slice(), key_type) { + let mut file = File::create(path)?; + serde_json::to_writer(&file, &phrase)?; + file.flush()?; + } + Ok(pair) + } + + /// Generate a new key. + /// + /// Places it into the file system store. + pub fn generate(&self) -> Result { + self.generate_by_type::(Pair::ID) + .map(Into::into) + } + + /// Create a new key from seed. + /// + /// Does not place it into the file system store. + pub fn insert_ephemeral_from_seed_by_type( + &mut self, + seed: &str, + key_type: KeyTypeId, + ) -> Result { + let pair = Pair::from_string(seed, None).map_err(|_| Error::InvalidSeed)?; + self.insert_ephemeral_pair(&pair, seed, key_type); + Ok(pair) + } + + /// Create a new key from seed. + /// + /// Does not place it into the file system store. + pub fn insert_ephemeral_from_seed(&mut self, seed: &str) -> Result { + self.insert_ephemeral_from_seed_by_type::(seed, Pair::ID) + .map(Into::into) + } + + /// Get the key phrase for a given public key and key type. + fn key_phrase_by_type(&self, public: &[u8], key_type: KeyTypeId) -> Result { + if let Some(phrase) = self.get_additional_pair(public, key_type) { + return Ok(phrase.clone()); + } + + let path = self + .key_file_path(public, key_type) + .ok_or_else(|| Error::Unavailable)?; + let file = File::open(path)?; + + serde_json::from_reader(&file).map_err(Into::into) + } + + /// Get a key pair for the given public key and key type. + pub fn key_pair_by_type( + &self, + public: &Pair::Public, + key_type: KeyTypeId, + ) -> Result { + let phrase = self.key_phrase_by_type(public.as_slice(), key_type)?; + let pair = Pair::from_string(&phrase, self.password.as_ref().map(|p| &***p)) + .map_err(|_| Error::InvalidPhrase)?; + + if &pair.public() == public { + Ok(pair) + } else { + Err(Error::InvalidPassword) + } + } + + /// Get a key pair for the given public key. + pub fn key_pair(&self, public: &::Public) -> Result { + self.key_pair_by_type::(IsWrappedBy::from_ref(public), Pair::ID) + .map(Into::into) + } + + /// Get public keys of all stored keys that match the key type. + /// + /// This will just use the type of the public key (a list of which to be returned) in order + /// to determine the key type. Unless you use a specialized application-type public key, then + /// this only give you keys registered under generic cryptography, and will not return keys + /// registered under the application type. + pub fn public_keys(&self) -> Result> { + self.raw_public_keys(Public::ID).map(|v| { + v.into_iter() + .map(|k| Public::from_slice(k.as_slice())) + .collect() + }) + } + + /// Returns the file path for the given public key and key type. + fn key_file_path(&self, public: &[u8], key_type: KeyTypeId) -> Option { + let mut buf = self.path.as_ref()?.clone(); + let key_type = hex::encode(key_type.0); + let key = hex::encode(public); + buf.push(key_type + key.as_str()); + Some(buf) + } + + /// Returns a list of raw public keys filtered by `KeyTypeId` + fn raw_public_keys(&self, id: KeyTypeId) -> Result>> { + let mut public_keys: Vec> = self + .additional + .keys() + .into_iter() + .filter_map(|k| if k.0 == id { Some(k.1.clone()) } else { None }) + .collect(); + + if let Some(path) = &self.path { + for entry in fs::read_dir(&path)? { + let entry = entry?; + let path = entry.path(); + + // skip directories and non-unicode file names (hex is unicode) + if let Some(name) = path.file_name().and_then(|n| n.to_str()) { + match hex::decode(name) { + Ok(ref hex) if hex.len() > 4 => { + if &hex[0..4] != &id.0 { + continue; + } + let public = hex[4..].to_vec(); + public_keys.push(public); + } + _ => continue, + } + } + } + } + + Ok(public_keys) + } } impl BareCryptoStore for Store { - fn keys( - &self, - id: KeyTypeId - ) -> std::result::Result, TraitError> { - let raw_keys = self.raw_public_keys(id)?; - Ok(raw_keys.into_iter() - .fold(Vec::new(), |mut v, k| { - v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); - v - })) - } - - fn supported_keys( - &self, - id: KeyTypeId, - keys: Vec - ) -> std::result::Result, TraitError> { - let all_keys = self.keys(id)?.into_iter().collect::>(); - Ok(keys.into_iter() - .filter(|key| all_keys.contains(key)) - .collect::>()) - } - - fn sign_with( - &self, - id: KeyTypeId, - key: &CryptoTypePublicPair, - msg: &[u8], - ) -> std::result::Result, TraitError> { - match key.0 { - ed25519::CRYPTO_ID => { - let pub_key = ed25519::Public::from_slice(key.1.as_slice()); - let key_pair: ed25519::Pair = self - .key_pair_by_type::(&pub_key, id) - .map_err(|e| TraitError::from(e))?; - Ok(key_pair.sign(msg).encode()) - } - sr25519::CRYPTO_ID => { - let pub_key = sr25519::Public::from_slice(key.1.as_slice()); - let key_pair: sr25519::Pair = self - .key_pair_by_type::(&pub_key, id) - .map_err(|e| TraitError::from(e))?; - Ok(key_pair.sign(msg).encode()) - } - _ => Err(TraitError::KeyNotSupported(id)) - } - } - - fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .map(|k| sr25519::Public::from_slice(k.as_slice())) - .collect() - }) - .unwrap_or_default() - } - - fn sr25519_generate_new( - &mut self, - id: KeyTypeId, - seed: Option<&str>, - ) -> std::result::Result { - let pair = match seed { - Some(seed) => self.insert_ephemeral_from_seed_by_type::(seed, id), - None => self.generate_by_type::(id), - }.map_err(|e| -> TraitError { e.into() })?; - - Ok(pair.public()) - } - - fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { - self.raw_public_keys(key_type) - .map(|v| { - v.into_iter() - .map(|k| ed25519::Public::from_slice(k.as_slice())) - .collect() - }) - .unwrap_or_default() - } - - fn ed25519_generate_new( - &mut self, - id: KeyTypeId, - seed: Option<&str>, - ) -> std::result::Result { - let pair = match seed { - Some(seed) => self.insert_ephemeral_from_seed_by_type::(seed, id), - None => self.generate_by_type::(id), - }.map_err(|e| -> TraitError { e.into() })?; - - Ok(pair.public()) - } - - fn insert_unknown(&mut self, key_type: KeyTypeId, suri: &str, public: &[u8]) - -> std::result::Result<(), ()> - { - Store::insert_unknown(self, key_type, suri, public).map_err(|_| ()) - } - - fn password(&self) -> Option<&str> { - self.password.as_ref().map(|x| x.as_str()) - } - - fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys.iter().all(|(p, t)| self.key_phrase_by_type(&p, *t).is_ok()) - } + fn keys(&self, id: KeyTypeId) -> std::result::Result, TraitError> { + let raw_keys = self.raw_public_keys(id)?; + Ok(raw_keys.into_iter().fold(Vec::new(), |mut v, k| { + v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); + v + })) + } + + fn supported_keys( + &self, + id: KeyTypeId, + keys: Vec, + ) -> std::result::Result, TraitError> { + let all_keys = self.keys(id)?.into_iter().collect::>(); + Ok(keys + .into_iter() + .filter(|key| all_keys.contains(key)) + .collect::>()) + } + + fn sign_with( + &self, + id: KeyTypeId, + key: &CryptoTypePublicPair, + msg: &[u8], + ) -> std::result::Result, TraitError> { + match key.0 { + ed25519::CRYPTO_ID => { + let pub_key = ed25519::Public::from_slice(key.1.as_slice()); + let key_pair: ed25519::Pair = self + .key_pair_by_type::(&pub_key, id) + .map_err(|e| TraitError::from(e))?; + Ok(key_pair.sign(msg).encode()) + } + sr25519::CRYPTO_ID => { + let pub_key = sr25519::Public::from_slice(key.1.as_slice()); + let key_pair: sr25519::Pair = self + .key_pair_by_type::(&pub_key, id) + .map_err(|e| TraitError::from(e))?; + Ok(key_pair.sign(msg).encode()) + } + _ => Err(TraitError::KeyNotSupported(id)), + } + } + + fn sr25519_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.raw_public_keys(key_type) + .map(|v| { + v.into_iter() + .map(|k| sr25519::Public::from_slice(k.as_slice())) + .collect() + }) + .unwrap_or_default() + } + + fn sr25519_generate_new( + &mut self, + id: KeyTypeId, + seed: Option<&str>, + ) -> std::result::Result { + let pair = match seed { + Some(seed) => self.insert_ephemeral_from_seed_by_type::(seed, id), + None => self.generate_by_type::(id), + } + .map_err(|e| -> TraitError { e.into() })?; + + Ok(pair.public()) + } + + fn ed25519_public_keys(&self, key_type: KeyTypeId) -> Vec { + self.raw_public_keys(key_type) + .map(|v| { + v.into_iter() + .map(|k| ed25519::Public::from_slice(k.as_slice())) + .collect() + }) + .unwrap_or_default() + } + + fn ed25519_generate_new( + &mut self, + id: KeyTypeId, + seed: Option<&str>, + ) -> std::result::Result { + let pair = match seed { + Some(seed) => self.insert_ephemeral_from_seed_by_type::(seed, id), + None => self.generate_by_type::(id), + } + .map_err(|e| -> TraitError { e.into() })?; + + Ok(pair.public()) + } + + fn insert_unknown( + &mut self, + key_type: KeyTypeId, + suri: &str, + public: &[u8], + ) -> std::result::Result<(), ()> { + Store::insert_unknown(self, key_type, suri, public).map_err(|_| ()) + } + + fn password(&self) -> Option<&str> { + self.password.as_ref().map(|x| x.as_str()) + } + + fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { + public_keys + .iter() + .all(|(p, t)| self.key_phrase_by_type(&p, *t).is_ok()) + } } #[cfg(test)] mod tests { - use super::*; - use tempfile::TempDir; - use sp_core::{testing::SR25519, crypto::Ss58Codec}; - - #[test] - fn basic_store() { - let temp_dir = TempDir::new().unwrap(); - let store = Store::open(temp_dir.path(), None).unwrap(); - - assert!(store.read().public_keys::().unwrap().is_empty()); - - let key: ed25519::AppPair = store.write().generate().unwrap(); - let key2: ed25519::AppPair = store.read().key_pair(&key.public()).unwrap(); - - assert_eq!(key.public(), key2.public()); - - assert_eq!(store.read().public_keys::().unwrap()[0], key.public()); - } - - #[test] - fn test_insert_ephemeral_from_seed() { - let temp_dir = TempDir::new().unwrap(); - let store = Store::open(temp_dir.path(), None).unwrap(); - - let pair: ed25519::AppPair = store - .write() - .insert_ephemeral_from_seed("0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc") - .unwrap(); - assert_eq!( - "5DKUrgFqCPV8iAXx9sjy1nyBygQCeiUYRFWurZGhnrn3HJCA", - pair.public().to_ss58check() - ); - - drop(store); - let store = Store::open(temp_dir.path(), None).unwrap(); - // Keys generated from seed should not be persisted! - assert!(store.read().key_pair::(&pair.public()).is_err()); - } - - #[test] - fn password_being_used() { - let password = String::from("password"); - let temp_dir = TempDir::new().unwrap(); - let store = Store::open(temp_dir.path(), Some(password.clone().into())).unwrap(); - - let pair: ed25519::AppPair = store.write().generate().unwrap(); - assert_eq!( - pair.public(), - store.read().key_pair::(&pair.public()).unwrap().public(), - ); - - // Without the password the key should not be retrievable - let store = Store::open(temp_dir.path(), None).unwrap(); - assert!(store.read().key_pair::(&pair.public()).is_err()); - - let store = Store::open(temp_dir.path(), Some(password.into())).unwrap(); - assert_eq!( - pair.public(), - store.read().key_pair::(&pair.public()).unwrap().public(), - ); - } - - #[test] - fn public_keys_are_returned() { - let temp_dir = TempDir::new().unwrap(); - let store = Store::open(temp_dir.path(), None).unwrap(); - - let mut public_keys = Vec::new(); - for i in 0..10 { - public_keys.push(store.write().generate::().unwrap().public()); - public_keys.push(store.write().insert_ephemeral_from_seed::( - &format!("0x3d97c819d68f9bafa7d6e79cb991eebcd7{}d966c5334c0b94d9e1fa7ad0869dc", i), - ).unwrap().public()); - } - - // Generate a key of a different type - store.write().generate::().unwrap(); - - public_keys.sort(); - let mut store_pubs = store.read().public_keys::().unwrap(); - store_pubs.sort(); - - assert_eq!(public_keys, store_pubs); - } - - #[test] - fn store_unknown_and_extract_it() { - let temp_dir = TempDir::new().unwrap(); - let store = Store::open(temp_dir.path(), None).unwrap(); - - let secret_uri = "//Alice"; - let key_pair = sr25519::AppPair::from_string(secret_uri, None).expect("Generates key pair"); - - store.write().insert_unknown( - SR25519, - secret_uri, - key_pair.public().as_ref(), - ).expect("Inserts unknown key"); - - let store_key_pair = store.read().key_pair_by_type::( - &key_pair.public(), - SR25519, - ).expect("Gets key pair from keystore"); - - assert_eq!(key_pair.public(), store_key_pair.public()); - } - - #[test] - fn store_ignores_files_with_invalid_name() { - let temp_dir = TempDir::new().unwrap(); - let store = Store::open(temp_dir.path(), None).unwrap(); - - let file_name = temp_dir.path().join(hex::encode(&SR25519.0[..2])); - fs::write(file_name, "test").expect("Invalid file is written"); - - assert!( - store.read().sr25519_public_keys(SR25519).is_empty(), - ); - } + use super::*; + use sp_core::{crypto::Ss58Codec, testing::SR25519}; + use tempfile::TempDir; + + #[test] + fn basic_store() { + let temp_dir = TempDir::new().unwrap(); + let store = Store::open(temp_dir.path(), None).unwrap(); + + assert!(store + .read() + .public_keys::() + .unwrap() + .is_empty()); + + let key: ed25519::AppPair = store.write().generate().unwrap(); + let key2: ed25519::AppPair = store.read().key_pair(&key.public()).unwrap(); + + assert_eq!(key.public(), key2.public()); + + assert_eq!( + store.read().public_keys::().unwrap()[0], + key.public() + ); + } + + #[test] + fn test_insert_ephemeral_from_seed() { + let temp_dir = TempDir::new().unwrap(); + let store = Store::open(temp_dir.path(), None).unwrap(); + + let pair: ed25519::AppPair = store + .write() + .insert_ephemeral_from_seed( + "0x3d97c819d68f9bafa7d6e79cb991eebcd77d966c5334c0b94d9e1fa7ad0869dc", + ) + .unwrap(); + assert_eq!( + "5DKUrgFqCPV8iAXx9sjy1nyBygQCeiUYRFWurZGhnrn3HJCA", + pair.public().to_ss58check() + ); + + drop(store); + let store = Store::open(temp_dir.path(), None).unwrap(); + // Keys generated from seed should not be persisted! + assert!(store + .read() + .key_pair::(&pair.public()) + .is_err()); + } + + #[test] + fn password_being_used() { + let password = String::from("password"); + let temp_dir = TempDir::new().unwrap(); + let store = Store::open(temp_dir.path(), Some(password.clone().into())).unwrap(); + + let pair: ed25519::AppPair = store.write().generate().unwrap(); + assert_eq!( + pair.public(), + store + .read() + .key_pair::(&pair.public()) + .unwrap() + .public(), + ); + + // Without the password the key should not be retrievable + let store = Store::open(temp_dir.path(), None).unwrap(); + assert!(store + .read() + .key_pair::(&pair.public()) + .is_err()); + + let store = Store::open(temp_dir.path(), Some(password.into())).unwrap(); + assert_eq!( + pair.public(), + store + .read() + .key_pair::(&pair.public()) + .unwrap() + .public(), + ); + } + + #[test] + fn public_keys_are_returned() { + let temp_dir = TempDir::new().unwrap(); + let store = Store::open(temp_dir.path(), None).unwrap(); + + let mut public_keys = Vec::new(); + for i in 0..10 { + public_keys.push( + store + .write() + .generate::() + .unwrap() + .public(), + ); + public_keys.push( + store + .write() + .insert_ephemeral_from_seed::(&format!( + "0x3d97c819d68f9bafa7d6e79cb991eebcd7{}d966c5334c0b94d9e1fa7ad0869dc", + i + )) + .unwrap() + .public(), + ); + } + + // Generate a key of a different type + store.write().generate::().unwrap(); + + public_keys.sort(); + let mut store_pubs = store.read().public_keys::().unwrap(); + store_pubs.sort(); + + assert_eq!(public_keys, store_pubs); + } + + #[test] + fn store_unknown_and_extract_it() { + let temp_dir = TempDir::new().unwrap(); + let store = Store::open(temp_dir.path(), None).unwrap(); + + let secret_uri = "//Alice"; + let key_pair = sr25519::AppPair::from_string(secret_uri, None).expect("Generates key pair"); + + store + .write() + .insert_unknown(SR25519, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); + + let store_key_pair = store + .read() + .key_pair_by_type::(&key_pair.public(), SR25519) + .expect("Gets key pair from keystore"); + + assert_eq!(key_pair.public(), store_key_pair.public()); + } + + #[test] + fn store_ignores_files_with_invalid_name() { + let temp_dir = TempDir::new().unwrap(); + let store = Store::open(temp_dir.path(), None).unwrap(); + + let file_name = temp_dir.path().join(hex::encode(&SR25519.0[..2])); + fs::write(file_name, "test").expect("Invalid file is written"); + + assert!(store.read().sr25519_public_keys(SR25519).is_empty(),); + } } diff --git a/client/network-gossip/src/bridge.rs b/client/network-gossip/src/bridge.rs index b3bfe606ba..b89babe9aa 100644 --- a/client/network-gossip/src/bridge.rs +++ b/client/network-gossip/src/bridge.rs @@ -14,237 +14,248 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{Network, Validator}; use crate::state_machine::{ConsensusGossip, TopicNotification, PERIODIC_MAINTENANCE_INTERVAL}; +use crate::{Network, Validator}; use sc_network::{Event, ReputationChange}; use futures::prelude::*; use libp2p::PeerId; use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; -use std::{borrow::Cow, pin::Pin, sync::Arc, task::{Context, Poll}}; use sp_utils::mpsc::TracingUnboundedReceiver; +use std::{ + borrow::Cow, + pin::Pin, + sync::Arc, + task::{Context, Poll}, +}; /// Wraps around an implementation of the `Network` crate and provides gossiping capabilities on /// top of it. pub struct GossipEngine { - state_machine: ConsensusGossip, - network: Box + Send>, - periodic_maintenance_interval: futures_timer::Delay, - network_event_stream: Pin + Send>>, - engine_id: ConsensusEngineId, + state_machine: ConsensusGossip, + network: Box + Send>, + periodic_maintenance_interval: futures_timer::Delay, + network_event_stream: Pin + Send>>, + engine_id: ConsensusEngineId, } impl Unpin for GossipEngine {} impl GossipEngine { - /// Create a new instance. - pub fn new + Send + Clone + 'static>( - network: N, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, - validator: Arc>, - ) -> Self where B: 'static { - // We grab the event stream before registering the notifications protocol, otherwise we - // might miss events. - let network_event_stream = network.event_stream(); - network.register_notifications_protocol(engine_id, protocol_name.into()); - - GossipEngine { - state_machine: ConsensusGossip::new(validator, engine_id), - network: Box::new(network), - periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), - network_event_stream, - engine_id, - } - } - - pub fn report(&self, who: PeerId, reputation: ReputationChange) { - self.network.report_peer(who, reputation); - } - - /// Registers a message without propagating it to any peers. The message - /// becomes available to new peers or when the service is asked to gossip - /// the message's topic. No validation is performed on the message, if the - /// message is already expired it should be dropped on the next garbage - /// collection. - pub fn register_gossip_message( - &mut self, - topic: B::Hash, - message: Vec, - ) { - self.state_machine.register_message(topic, message); - } - - /// Broadcast all messages with given topic. - pub fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { - self.state_machine.broadcast_topic(&mut *self.network, topic, force); - } - - /// Get data of valid, incoming messages for a topic (but might have expired meanwhile). - pub fn messages_for(&mut self, topic: B::Hash) - -> TracingUnboundedReceiver - { - self.state_machine.messages_for(topic) - } - - /// Send all messages with given topic to a peer. - pub fn send_topic( - &mut self, - who: &PeerId, - topic: B::Hash, - force: bool - ) { - self.state_machine.send_topic(&mut *self.network, who, topic, force) - } - - /// Multicast a message to all peers. - pub fn gossip_message( - &mut self, - topic: B::Hash, - message: Vec, - force: bool, - ) { - self.state_machine.multicast(&mut *self.network, topic, message, force) - } - - /// Send addressed message to the given peers. The message is not kept or multicast - /// later on. - pub fn send_message(&mut self, who: Vec, data: Vec) { - for who in &who { - self.state_machine.send_message(&mut *self.network, who, data.clone()); - } - } - - /// Notify everyone we're connected to that we have the given block. - /// - /// Note: this method isn't strictly related to gossiping and should eventually be moved - /// somewhere else. - pub fn announce(&self, block: B::Hash, associated_data: Vec) { - self.network.announce(block, associated_data); - } + /// Create a new instance. + pub fn new + Send + Clone + 'static>( + network: N, + engine_id: ConsensusEngineId, + protocol_name: impl Into>, + validator: Arc>, + ) -> Self + where + B: 'static, + { + // We grab the event stream before registering the notifications protocol, otherwise we + // might miss events. + let network_event_stream = network.event_stream(); + network.register_notifications_protocol(engine_id, protocol_name.into()); + + GossipEngine { + state_machine: ConsensusGossip::new(validator, engine_id), + network: Box::new(network), + periodic_maintenance_interval: futures_timer::Delay::new(PERIODIC_MAINTENANCE_INTERVAL), + network_event_stream, + engine_id, + } + } + + pub fn report(&self, who: PeerId, reputation: ReputationChange) { + self.network.report_peer(who, reputation); + } + + /// Registers a message without propagating it to any peers. The message + /// becomes available to new peers or when the service is asked to gossip + /// the message's topic. No validation is performed on the message, if the + /// message is already expired it should be dropped on the next garbage + /// collection. + pub fn register_gossip_message(&mut self, topic: B::Hash, message: Vec) { + self.state_machine.register_message(topic, message); + } + + /// Broadcast all messages with given topic. + pub fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { + self.state_machine + .broadcast_topic(&mut *self.network, topic, force); + } + + /// Get data of valid, incoming messages for a topic (but might have expired meanwhile). + pub fn messages_for(&mut self, topic: B::Hash) -> TracingUnboundedReceiver { + self.state_machine.messages_for(topic) + } + + /// Send all messages with given topic to a peer. + pub fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { + self.state_machine + .send_topic(&mut *self.network, who, topic, force) + } + + /// Multicast a message to all peers. + pub fn gossip_message(&mut self, topic: B::Hash, message: Vec, force: bool) { + self.state_machine + .multicast(&mut *self.network, topic, message, force) + } + + /// Send addressed message to the given peers. The message is not kept or multicast + /// later on. + pub fn send_message(&mut self, who: Vec, data: Vec) { + for who in &who { + self.state_machine + .send_message(&mut *self.network, who, data.clone()); + } + } + + /// Notify everyone we're connected to that we have the given block. + /// + /// Note: this method isn't strictly related to gossiping and should eventually be moved + /// somewhere else. + pub fn announce(&self, block: B::Hash, associated_data: Vec) { + self.network.announce(block, associated_data); + } } impl Future for GossipEngine { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = &mut *self; - - loop { - match this.network_event_stream.poll_next_unpin(cx) { - Poll::Ready(Some(event)) => match event { - Event::NotificationStreamOpened { remote, engine_id: msg_engine_id, role } => { - if msg_engine_id != this.engine_id { - continue; - } - this.state_machine.new_peer(&mut *this.network, remote, role); - } - Event::NotificationStreamClosed { remote, engine_id: msg_engine_id } => { - if msg_engine_id != this.engine_id { - continue; - } - this.state_machine.peer_disconnected(&mut *this.network, remote); - }, - Event::NotificationsReceived { remote, messages } => { - let engine_id = this.engine_id.clone(); - this.state_machine.on_incoming( - &mut *this.network, - remote, - messages.into_iter() - .filter_map(|(engine, data)| if engine == engine_id { - Some(data.to_vec()) - } else { None }) - .collect() - ); - }, - Event::Dht(_) => {} - } - // The network event stream closed. Do the same for [`GossipValidator`]. - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => break, - } - } - - while let Poll::Ready(()) = this.periodic_maintenance_interval.poll_unpin(cx) { - this.periodic_maintenance_interval.reset(PERIODIC_MAINTENANCE_INTERVAL); - this.state_machine.tick(&mut *this.network); - } - - Poll::Pending - } + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = &mut *self; + + loop { + match this.network_event_stream.poll_next_unpin(cx) { + Poll::Ready(Some(event)) => match event { + Event::NotificationStreamOpened { + remote, + engine_id: msg_engine_id, + role, + } => { + if msg_engine_id != this.engine_id { + continue; + } + this.state_machine + .new_peer(&mut *this.network, remote, role); + } + Event::NotificationStreamClosed { + remote, + engine_id: msg_engine_id, + } => { + if msg_engine_id != this.engine_id { + continue; + } + this.state_machine + .peer_disconnected(&mut *this.network, remote); + } + Event::NotificationsReceived { remote, messages } => { + let engine_id = this.engine_id.clone(); + this.state_machine.on_incoming( + &mut *this.network, + remote, + messages + .into_iter() + .filter_map(|(engine, data)| { + if engine == engine_id { + Some(data.to_vec()) + } else { + None + } + }) + .collect(), + ); + } + Event::Dht(_) => {} + }, + // The network event stream closed. Do the same for [`GossipValidator`]. + Poll::Ready(None) => return Poll::Ready(()), + Poll::Pending => break, + } + } + + while let Poll::Ready(()) = this.periodic_maintenance_interval.poll_unpin(cx) { + this.periodic_maintenance_interval + .reset(PERIODIC_MAINTENANCE_INTERVAL); + this.state_machine.tick(&mut *this.network); + } + + Poll::Pending + } } #[cfg(test)] mod tests { - use super::*; - use crate::{ValidationResult, ValidatorContext}; - use substrate_test_runtime_client::runtime::Block; - - struct TestNetwork {} - - impl Network for Arc { - fn event_stream(&self) -> Pin + Send>> { - let (_tx, rx) = futures::channel::mpsc::channel(0); - - // Return rx and drop tx. Thus the given channel will yield `Poll::Ready(None)` on first - // poll. - Box::pin(rx) - } - - fn report_peer(&self, _: PeerId, _: ReputationChange) { - unimplemented!(); - } - - fn disconnect_peer(&self, _: PeerId) { - unimplemented!(); - } - - fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { - unimplemented!(); - } - - fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} - - fn announce(&self, _: B::Hash, _: Vec) { - unimplemented!(); - } - } - - struct TestValidator {} - - impl Validator for TestValidator { - fn validate( - &self, - _: &mut dyn ValidatorContext, - _: &PeerId, - _: &[u8] - ) -> ValidationResult { - unimplemented!(); - } - } - - /// Regression test for the case where the `GossipEngine.network_event_stream` closes. One - /// should not ignore a `Poll::Ready(None)` as `poll_next_unpin` will panic on subsequent calls. - /// - /// See https://github.com/paritytech/substrate/issues/5000 for details. - #[test] - fn returns_when_network_event_stream_closes() { - let mut gossip_engine = GossipEngine::::new( - Arc::new(TestNetwork{}), - [1, 2, 3, 4], - "my_protocol".as_bytes(), - Arc::new(TestValidator{}), - ); - - futures::executor::block_on(futures::future::poll_fn(move |ctx| { - if let Poll::Pending = gossip_engine.poll_unpin(ctx) { - panic!( - "Expected gossip engine to finish on first poll, given that \ + use super::*; + use crate::{ValidationResult, ValidatorContext}; + use substrate_test_runtime_client::runtime::Block; + + struct TestNetwork {} + + impl Network for Arc { + fn event_stream(&self) -> Pin + Send>> { + let (_tx, rx) = futures::channel::mpsc::channel(0); + + // Return rx and drop tx. Thus the given channel will yield `Poll::Ready(None)` on first + // poll. + Box::pin(rx) + } + + fn report_peer(&self, _: PeerId, _: ReputationChange) { + unimplemented!(); + } + + fn disconnect_peer(&self, _: PeerId) { + unimplemented!(); + } + + fn write_notification(&self, _: PeerId, _: ConsensusEngineId, _: Vec) { + unimplemented!(); + } + + fn register_notifications_protocol(&self, _: ConsensusEngineId, _: Cow<'static, [u8]>) {} + + fn announce(&self, _: B::Hash, _: Vec) { + unimplemented!(); + } + } + + struct TestValidator {} + + impl Validator for TestValidator { + fn validate( + &self, + _: &mut dyn ValidatorContext, + _: &PeerId, + _: &[u8], + ) -> ValidationResult { + unimplemented!(); + } + } + + /// Regression test for the case where the `GossipEngine.network_event_stream` closes. One + /// should not ignore a `Poll::Ready(None)` as `poll_next_unpin` will panic on subsequent calls. + /// + /// See https://github.com/paritytech/substrate/issues/5000 for details. + #[test] + fn returns_when_network_event_stream_closes() { + let mut gossip_engine = GossipEngine::::new( + Arc::new(TestNetwork {}), + [1, 2, 3, 4], + "my_protocol".as_bytes(), + Arc::new(TestValidator {}), + ); + + futures::executor::block_on(futures::future::poll_fn(move |ctx| { + if let Poll::Pending = gossip_engine.poll_unpin(ctx) { + panic!( + "Expected gossip engine to finish on first poll, given that \ `GossipEngine.network_event_stream` closes right away." - ) - } - Poll::Ready(()) - })) - } + ) + } + Poll::Ready(()) + })) + } } diff --git a/client/network-gossip/src/lib.rs b/client/network-gossip/src/lib.rs index 42aeca86cb..8188de6807 100644 --- a/client/network-gossip/src/lib.rs +++ b/client/network-gossip/src/lib.rs @@ -56,7 +56,9 @@ pub use self::bridge::GossipEngine; pub use self::state_machine::TopicNotification; -pub use self::validator::{DiscardAll, MessageIntent, Validator, ValidatorContext, ValidationResult}; +pub use self::validator::{ + DiscardAll, MessageIntent, ValidationResult, Validator, ValidatorContext, +}; use futures::prelude::*; use sc_network::{Event, ExHashT, NetworkService, PeerId, ReputationChange}; @@ -69,60 +71,60 @@ mod validator; /// Abstraction over a network. pub trait Network { - /// Returns a stream of events representing what happens on the network. - fn event_stream(&self) -> Pin + Send>>; - - /// Adjust the reputation of a node. - fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); - - /// Force-disconnect a peer. - fn disconnect_peer(&self, who: PeerId); - - /// Send a notification to a peer. - fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec); - - /// Registers a notifications protocol. - /// - /// See the documentation of [`NetworkService:register_notifications_protocol`] for more information. - fn register_notifications_protocol( - &self, - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, [u8]>, - ); - - /// Notify everyone we're connected to that we have the given block. - /// - /// Note: this method isn't strictly related to gossiping and should eventually be moved - /// somewhere else. - fn announce(&self, block: B::Hash, associated_data: Vec); + /// Returns a stream of events representing what happens on the network. + fn event_stream(&self) -> Pin + Send>>; + + /// Adjust the reputation of a node. + fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange); + + /// Force-disconnect a peer. + fn disconnect_peer(&self, who: PeerId); + + /// Send a notification to a peer. + fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec); + + /// Registers a notifications protocol. + /// + /// See the documentation of [`NetworkService:register_notifications_protocol`] for more information. + fn register_notifications_protocol( + &self, + engine_id: ConsensusEngineId, + protocol_name: Cow<'static, [u8]>, + ); + + /// Notify everyone we're connected to that we have the given block. + /// + /// Note: this method isn't strictly related to gossiping and should eventually be moved + /// somewhere else. + fn announce(&self, block: B::Hash, associated_data: Vec); } impl Network for Arc> { - fn event_stream(&self) -> Pin + Send>> { - Box::pin(NetworkService::event_stream(self, "network-gossip")) - } - - fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange) { - NetworkService::report_peer(self, peer_id, reputation); - } - - fn disconnect_peer(&self, who: PeerId) { - NetworkService::disconnect_peer(self, who) - } - - fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec) { - NetworkService::write_notification(self, who, engine_id, message) - } - - fn register_notifications_protocol( - &self, - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, [u8]>, - ) { - NetworkService::register_notifications_protocol(self, engine_id, protocol_name) - } - - fn announce(&self, block: B::Hash, associated_data: Vec) { - NetworkService::announce_block(self, block, associated_data) - } + fn event_stream(&self) -> Pin + Send>> { + Box::pin(NetworkService::event_stream(self, "network-gossip")) + } + + fn report_peer(&self, peer_id: PeerId, reputation: ReputationChange) { + NetworkService::report_peer(self, peer_id, reputation); + } + + fn disconnect_peer(&self, who: PeerId) { + NetworkService::disconnect_peer(self, who) + } + + fn write_notification(&self, who: PeerId, engine_id: ConsensusEngineId, message: Vec) { + NetworkService::write_notification(self, who, engine_id, message) + } + + fn register_notifications_protocol( + &self, + engine_id: ConsensusEngineId, + protocol_name: Cow<'static, [u8]>, + ) { + NetworkService::register_notifications_protocol(self, engine_id, protocol_name) + } + + fn announce(&self, block: B::Hash, associated_data: Vec) { + NetworkService::announce_block(self, block, associated_data) + } } diff --git a/client/network-gossip/src/state_machine.rs b/client/network-gossip/src/state_machine.rs index d93003fcfb..af164bedfd 100644 --- a/client/network-gossip/src/state_machine.rs +++ b/client/network-gossip/src/state_machine.rs @@ -14,19 +14,19 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{Network, MessageIntent, Validator, ValidatorContext, ValidationResult}; +use crate::{MessageIntent, Network, ValidationResult, Validator, ValidatorContext}; -use std::collections::{HashMap, HashSet, hash_map::Entry}; -use std::sync::Arc; -use std::iter; -use std::time; +use libp2p::PeerId; use log::trace; use lru::LruCache; -use libp2p::PeerId; +use sc_network::ObservedRole; use sp_runtime::traits::{Block as BlockT, Hash, HashFor}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; use sp_runtime::ConsensusEngineId; -use sc_network::ObservedRole; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::collections::{hash_map::Entry, HashMap, HashSet}; +use std::iter; +use std::sync::Arc; +use std::time; use wasm_timer::Instant; // FIXME: Add additional spam/DoS attack protection: https://github.com/paritytech/substrate/issues/1115 @@ -37,307 +37,334 @@ const REBROADCAST_INTERVAL: time::Duration = time::Duration::from_secs(30); pub(crate) const PERIODIC_MAINTENANCE_INTERVAL: time::Duration = time::Duration::from_millis(1100); mod rep { - use sc_network::ReputationChange as Rep; - /// Reputation change when a peer sends us a gossip message that we didn't know about. - pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successfull gossip"); - /// Reputation change when a peer sends us a gossip message that we already knew about. - pub const DUPLICATE_GOSSIP: Rep = Rep::new(-(1 << 2), "Duplicate gossip"); - /// Reputation change when a peer sends a message from a topic it isn't registered on. - pub const UNREGISTERED_TOPIC: Rep = Rep::new(-(1 << 10), "Unregistered gossip message topic"); + use sc_network::ReputationChange as Rep; + /// Reputation change when a peer sends us a gossip message that we didn't know about. + pub const GOSSIP_SUCCESS: Rep = Rep::new(1 << 4, "Successfull gossip"); + /// Reputation change when a peer sends us a gossip message that we already knew about. + pub const DUPLICATE_GOSSIP: Rep = Rep::new(-(1 << 2), "Duplicate gossip"); + /// Reputation change when a peer sends a message from a topic it isn't registered on. + pub const UNREGISTERED_TOPIC: Rep = Rep::new(-(1 << 10), "Unregistered gossip message topic"); } struct PeerConsensus { - known_messages: HashSet, + known_messages: HashSet, } /// Topic stream message with sender. #[derive(Debug, Eq, PartialEq)] pub struct TopicNotification { - /// Message data. - pub message: Vec, - /// Sender if available. - pub sender: Option, + /// Message data. + pub message: Vec, + /// Sender if available. + pub sender: Option, } struct MessageEntry { - message_hash: B::Hash, - topic: B::Hash, - message: Vec, - sender: Option, + message_hash: B::Hash, + topic: B::Hash, + message: Vec, + sender: Option, } /// Local implementation of `ValidatorContext`. struct NetworkContext<'g, 'p, B: BlockT> { - gossip: &'g mut ConsensusGossip, - network: &'p mut dyn Network, + gossip: &'g mut ConsensusGossip, + network: &'p mut dyn Network, } impl<'g, 'p, B: BlockT> ValidatorContext for NetworkContext<'g, 'p, B> { - /// Broadcast all messages with given topic to peers that do not have it yet. - fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { - self.gossip.broadcast_topic(self.network, topic, force); - } - - /// Broadcast a message to all peers that have not received it previously. - fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool) { - self.gossip.multicast( - self.network, - topic, - message, - force, - ); - } - - /// Send addressed message to a peer. - fn send_message(&mut self, who: &PeerId, message: Vec) { - self.network.write_notification(who.clone(), self.gossip.engine_id, message); - } - - /// Send all messages with given topic to a peer. - fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { - self.gossip.send_topic(self.network, who, topic, force); - } + /// Broadcast all messages with given topic to peers that do not have it yet. + fn broadcast_topic(&mut self, topic: B::Hash, force: bool) { + self.gossip.broadcast_topic(self.network, topic, force); + } + + /// Broadcast a message to all peers that have not received it previously. + fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool) { + self.gossip.multicast(self.network, topic, message, force); + } + + /// Send addressed message to a peer. + fn send_message(&mut self, who: &PeerId, message: Vec) { + self.network + .write_notification(who.clone(), self.gossip.engine_id, message); + } + + /// Send all messages with given topic to a peer. + fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool) { + self.gossip.send_topic(self.network, who, topic, force); + } } fn propagate<'a, B: BlockT, I>( - network: &mut dyn Network, - engine_id: ConsensusEngineId, - messages: I, - intent: MessageIntent, - peers: &mut HashMap>, - validator: &Arc>, + network: &mut dyn Network, + engine_id: ConsensusEngineId, + messages: I, + intent: MessageIntent, + peers: &mut HashMap>, + validator: &Arc>, ) - // (msg_hash, topic, message) - where I: Clone + IntoIterator)>, +// (msg_hash, topic, message) +where + I: Clone + IntoIterator)>, { - let mut message_allowed = validator.message_allowed(); - - for (id, ref mut peer) in peers.iter_mut() { - for (message_hash, topic, message) in messages.clone() { - let intent = match intent { - MessageIntent::Broadcast { .. } => - if peer.known_messages.contains(&message_hash) { - continue; - } else { - MessageIntent::Broadcast - }, - MessageIntent::PeriodicRebroadcast => - if peer.known_messages.contains(&message_hash) { - MessageIntent::PeriodicRebroadcast - } else { - // peer doesn't know message, so the logic should treat it as an - // initial broadcast. - MessageIntent::Broadcast - }, - other => other, - }; - - if !message_allowed(id, intent, &topic, &message) { - continue; - } - - peer.known_messages.insert(message_hash.clone()); - - trace!(target: "gossip", "Propagating to {}: {:?}", id, message); - network.write_notification(id.clone(), engine_id, message.clone()); - } - } + let mut message_allowed = validator.message_allowed(); + + for (id, ref mut peer) in peers.iter_mut() { + for (message_hash, topic, message) in messages.clone() { + let intent = match intent { + MessageIntent::Broadcast { .. } => { + if peer.known_messages.contains(&message_hash) { + continue; + } else { + MessageIntent::Broadcast + } + } + MessageIntent::PeriodicRebroadcast => { + if peer.known_messages.contains(&message_hash) { + MessageIntent::PeriodicRebroadcast + } else { + // peer doesn't know message, so the logic should treat it as an + // initial broadcast. + MessageIntent::Broadcast + } + } + other => other, + }; + + if !message_allowed(id, intent, &topic, &message) { + continue; + } + + peer.known_messages.insert(message_hash.clone()); + + trace!(target: "gossip", "Propagating to {}: {:?}", id, message); + network.write_notification(id.clone(), engine_id, message.clone()); + } + } } /// Consensus network protocol handler. Manages statements and candidate requests. pub struct ConsensusGossip { - peers: HashMap>, - live_message_sinks: HashMap>>, - messages: Vec>, - known_messages: LruCache, - engine_id: ConsensusEngineId, - validator: Arc>, - next_broadcast: Instant, + peers: HashMap>, + live_message_sinks: HashMap>>, + messages: Vec>, + known_messages: LruCache, + engine_id: ConsensusEngineId, + validator: Arc>, + next_broadcast: Instant, } impl ConsensusGossip { - /// Create a new instance using the given validator. - pub fn new(validator: Arc>, engine_id: ConsensusEngineId) -> Self { - ConsensusGossip { - peers: HashMap::new(), - live_message_sinks: HashMap::new(), - messages: Default::default(), - known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), - engine_id, - validator, - next_broadcast: Instant::now() + REBROADCAST_INTERVAL, - } - } - - /// Handle new connected peer. - pub fn new_peer(&mut self, network: &mut dyn Network, who: PeerId, role: ObservedRole) { - // light nodes are not valid targets for consensus gossip messages - if role.is_light() { - return; - } - - trace!(target:"gossip", "Registering {:?} {}", role, who); - self.peers.insert(who.clone(), PeerConsensus { - known_messages: HashSet::new(), - }); - - let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; - validator.new_peer(&mut context, &who, role.clone()); - } - - fn register_message_hashed( - &mut self, - message_hash: B::Hash, - topic: B::Hash, - message: Vec, - sender: Option, - ) { - if self.known_messages.put(message_hash.clone(), ()).is_none() { - self.messages.push(MessageEntry { - message_hash, - topic, - message, - sender, - }); - } - } - - /// Registers a message without propagating it to any peers. The message - /// becomes available to new peers or when the service is asked to gossip - /// the message's topic. No validation is performed on the message, if the - /// message is already expired it should be dropped on the next garbage - /// collection. - pub fn register_message( - &mut self, - topic: B::Hash, - message: Vec, - ) { - let message_hash = HashFor::::hash(&message[..]); - self.register_message_hashed(message_hash, topic, message, None); - } - - /// Call when a peer has been disconnected to stop tracking gossip status. - pub fn peer_disconnected(&mut self, network: &mut dyn Network, who: PeerId) { - let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; - validator.peer_disconnected(&mut context, &who); - self.peers.remove(&who); - } - - /// Perform periodic maintenance - pub fn tick(&mut self, network: &mut dyn Network) { - self.collect_garbage(); - if Instant::now() >= self.next_broadcast { - self.rebroadcast(network); - self.next_broadcast = Instant::now() + REBROADCAST_INTERVAL; - } - } - - /// Rebroadcast all messages to all peers. - fn rebroadcast(&mut self, network: &mut dyn Network) { - let messages = self.messages.iter() - .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); - propagate(network, self.engine_id, messages, MessageIntent::PeriodicRebroadcast, &mut self.peers, &self.validator); - } - - /// Broadcast all messages with given topic. - pub fn broadcast_topic(&mut self, network: &mut dyn Network, topic: B::Hash, force: bool) { - let messages = self.messages.iter() - .filter_map(|entry| - if entry.topic == topic { - Some((&entry.message_hash, &entry.topic, &entry.message)) - } else { None } - ); - let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.engine_id, messages, intent, &mut self.peers, &self.validator); - } - - /// Prune old or no longer relevant consensus messages. Provide a predicate - /// for pruning, which returns `false` when the items with a given topic should be pruned. - pub fn collect_garbage(&mut self) { - self.live_message_sinks.retain(|_, sinks| { - sinks.retain(|sink| !sink.is_closed()); - !sinks.is_empty() - }); - - let known_messages = &mut self.known_messages; - let before = self.messages.len(); - - let mut message_expired = self.validator.message_expired(); - self.messages.retain(|entry| !message_expired(entry.topic, &entry.message)); - - trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", - before - self.messages.len(), - self.messages.len(), - known_messages.len(), - ); - - for (_, ref mut peer) in self.peers.iter_mut() { - peer.known_messages.retain(|h| known_messages.contains(h)); - } - } - - /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) - pub fn messages_for(&mut self, topic: B::Hash) - -> TracingUnboundedReceiver - { - let (tx, rx) = tracing_unbounded("mpsc_gossip_messages_for"); - for entry in self.messages.iter_mut().filter(|e| e.topic == topic) { - tx.unbounded_send(TopicNotification { - message: entry.message.clone(), - sender: entry.sender.clone(), - }) - .expect("receiver known to be live; qed"); - } - - self.live_message_sinks.entry(topic).or_default().push(tx); - - rx - } - - /// Handle an incoming message for topic by who via protocol. Discard message if topic already - /// known, the message is old, its source peers isn't a registered peer or the connection to - /// them is broken. - pub fn on_incoming( - &mut self, - network: &mut dyn Network, - who: PeerId, - messages: Vec>, - ) { - if !messages.is_empty() { - trace!(target: "gossip", "Received {} messages from peer {}", messages.len(), who); - } - - for message in messages { - let message_hash = HashFor::::hash(&message[..]); - - if self.known_messages.contains(&message_hash) { - trace!(target:"gossip", "Ignored already known message from {}", who); - network.report_peer(who.clone(), rep::DUPLICATE_GOSSIP); - continue; - } - - // validate the message - let validation = { - let validator = self.validator.clone(); - let mut context = NetworkContext { gossip: self, network }; - validator.validate(&mut context, &who, &message) - }; - - let validation_result = match validation { - ValidationResult::ProcessAndKeep(topic) => Some((topic, true)), - ValidationResult::ProcessAndDiscard(topic) => Some((topic, false)), - ValidationResult::Discard => None, - }; - - if let Some((topic, keep)) = validation_result { - network.report_peer(who.clone(), rep::GOSSIP_SUCCESS); - if let Some(ref mut peer) = self.peers.get_mut(&who) { - peer.known_messages.insert(message_hash); - if let Entry::Occupied(mut entry) = self.live_message_sinks.entry(topic) { - trace!(target: "gossip", "Pushing consensus message to sinks for {}.", topic); - entry.get_mut().retain(|sink| { + /// Create a new instance using the given validator. + pub fn new(validator: Arc>, engine_id: ConsensusEngineId) -> Self { + ConsensusGossip { + peers: HashMap::new(), + live_message_sinks: HashMap::new(), + messages: Default::default(), + known_messages: LruCache::new(KNOWN_MESSAGES_CACHE_SIZE), + engine_id, + validator, + next_broadcast: Instant::now() + REBROADCAST_INTERVAL, + } + } + + /// Handle new connected peer. + pub fn new_peer(&mut self, network: &mut dyn Network, who: PeerId, role: ObservedRole) { + // light nodes are not valid targets for consensus gossip messages + if role.is_light() { + return; + } + + trace!(target:"gossip", "Registering {:?} {}", role, who); + self.peers.insert( + who.clone(), + PeerConsensus { + known_messages: HashSet::new(), + }, + ); + + let validator = self.validator.clone(); + let mut context = NetworkContext { + gossip: self, + network, + }; + validator.new_peer(&mut context, &who, role.clone()); + } + + fn register_message_hashed( + &mut self, + message_hash: B::Hash, + topic: B::Hash, + message: Vec, + sender: Option, + ) { + if self.known_messages.put(message_hash.clone(), ()).is_none() { + self.messages.push(MessageEntry { + message_hash, + topic, + message, + sender, + }); + } + } + + /// Registers a message without propagating it to any peers. The message + /// becomes available to new peers or when the service is asked to gossip + /// the message's topic. No validation is performed on the message, if the + /// message is already expired it should be dropped on the next garbage + /// collection. + pub fn register_message(&mut self, topic: B::Hash, message: Vec) { + let message_hash = HashFor::::hash(&message[..]); + self.register_message_hashed(message_hash, topic, message, None); + } + + /// Call when a peer has been disconnected to stop tracking gossip status. + pub fn peer_disconnected(&mut self, network: &mut dyn Network, who: PeerId) { + let validator = self.validator.clone(); + let mut context = NetworkContext { + gossip: self, + network, + }; + validator.peer_disconnected(&mut context, &who); + self.peers.remove(&who); + } + + /// Perform periodic maintenance + pub fn tick(&mut self, network: &mut dyn Network) { + self.collect_garbage(); + if Instant::now() >= self.next_broadcast { + self.rebroadcast(network); + self.next_broadcast = Instant::now() + REBROADCAST_INTERVAL; + } + } + + /// Rebroadcast all messages to all peers. + fn rebroadcast(&mut self, network: &mut dyn Network) { + let messages = self + .messages + .iter() + .map(|entry| (&entry.message_hash, &entry.topic, &entry.message)); + propagate( + network, + self.engine_id, + messages, + MessageIntent::PeriodicRebroadcast, + &mut self.peers, + &self.validator, + ); + } + + /// Broadcast all messages with given topic. + pub fn broadcast_topic(&mut self, network: &mut dyn Network, topic: B::Hash, force: bool) { + let messages = self.messages.iter().filter_map(|entry| { + if entry.topic == topic { + Some((&entry.message_hash, &entry.topic, &entry.message)) + } else { + None + } + }); + let intent = if force { + MessageIntent::ForcedBroadcast + } else { + MessageIntent::Broadcast + }; + propagate( + network, + self.engine_id, + messages, + intent, + &mut self.peers, + &self.validator, + ); + } + + /// Prune old or no longer relevant consensus messages. Provide a predicate + /// for pruning, which returns `false` when the items with a given topic should be pruned. + pub fn collect_garbage(&mut self) { + self.live_message_sinks.retain(|_, sinks| { + sinks.retain(|sink| !sink.is_closed()); + !sinks.is_empty() + }); + + let known_messages = &mut self.known_messages; + let before = self.messages.len(); + + let mut message_expired = self.validator.message_expired(); + self.messages + .retain(|entry| !message_expired(entry.topic, &entry.message)); + + trace!(target: "gossip", "Cleaned up {} stale messages, {} left ({} known)", + before - self.messages.len(), + self.messages.len(), + known_messages.len(), + ); + + for (_, ref mut peer) in self.peers.iter_mut() { + peer.known_messages.retain(|h| known_messages.contains(h)); + } + } + + /// Get data of valid, incoming messages for a topic (but might have expired meanwhile) + pub fn messages_for(&mut self, topic: B::Hash) -> TracingUnboundedReceiver { + let (tx, rx) = tracing_unbounded("mpsc_gossip_messages_for"); + for entry in self.messages.iter_mut().filter(|e| e.topic == topic) { + tx.unbounded_send(TopicNotification { + message: entry.message.clone(), + sender: entry.sender.clone(), + }) + .expect("receiver known to be live; qed"); + } + + self.live_message_sinks.entry(topic).or_default().push(tx); + + rx + } + + /// Handle an incoming message for topic by who via protocol. Discard message if topic already + /// known, the message is old, its source peers isn't a registered peer or the connection to + /// them is broken. + pub fn on_incoming( + &mut self, + network: &mut dyn Network, + who: PeerId, + messages: Vec>, + ) { + if !messages.is_empty() { + trace!(target: "gossip", "Received {} messages from peer {}", messages.len(), who); + } + + for message in messages { + let message_hash = HashFor::::hash(&message[..]); + + if self.known_messages.contains(&message_hash) { + trace!(target:"gossip", "Ignored already known message from {}", who); + network.report_peer(who.clone(), rep::DUPLICATE_GOSSIP); + continue; + } + + // validate the message + let validation = { + let validator = self.validator.clone(); + let mut context = NetworkContext { + gossip: self, + network, + }; + validator.validate(&mut context, &who, &message) + }; + + let validation_result = match validation { + ValidationResult::ProcessAndKeep(topic) => Some((topic, true)), + ValidationResult::ProcessAndDiscard(topic) => Some((topic, false)), + ValidationResult::Discard => None, + }; + + if let Some((topic, keep)) = validation_result { + network.report_peer(who.clone(), rep::GOSSIP_SUCCESS); + if let Some(ref mut peer) = self.peers.get_mut(&who) { + peer.known_messages.insert(message_hash); + if let Entry::Occupied(mut entry) = self.live_message_sinks.entry(topic) { + trace!(target: "gossip", "Pushing consensus message to sinks for {}.", topic); + entry.get_mut().retain(|sink| { if let Err(e) = sink.unbounded_send(TopicNotification { message: message.clone(), sender: Some(who.clone()) @@ -346,264 +373,293 @@ impl ConsensusGossip { } !sink.is_closed() }); - if entry.get().is_empty() { - entry.remove_entry(); - } - } - if keep { - self.register_message_hashed(message_hash, topic, message, Some(who.clone())); - } - } else { - trace!(target:"gossip", "Ignored statement from unregistered peer {}", who); - network.report_peer(who.clone(), rep::UNREGISTERED_TOPIC); - } - } else { - trace!(target:"gossip", "Discard message from peer {}", who); - } - } - } - - /// Send all messages with given topic to a peer. - pub fn send_topic( - &mut self, - network: &mut dyn Network, - who: &PeerId, - topic: B::Hash, - force: bool - ) { - let mut message_allowed = self.validator.message_allowed(); - - if let Some(ref mut peer) = self.peers.get_mut(who) { - for entry in self.messages.iter().filter(|m| m.topic == topic) { - let intent = if force { - MessageIntent::ForcedBroadcast - } else { - MessageIntent::Broadcast - }; - - if !force && peer.known_messages.contains(&entry.message_hash) { - continue; - } - - if !message_allowed(who, intent, &entry.topic, &entry.message) { - continue; - } - - peer.known_messages.insert(entry.message_hash.clone()); - - trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); - network.write_notification(who.clone(), self.engine_id, entry.message.clone()); - } - } - } - - /// Multicast a message to all peers. - pub fn multicast( - &mut self, - network: &mut dyn Network, - topic: B::Hash, - message: Vec, - force: bool, - ) { - let message_hash = HashFor::::hash(&message); - self.register_message_hashed(message_hash, topic, message.clone(), None); - let intent = if force { MessageIntent::ForcedBroadcast } else { MessageIntent::Broadcast }; - propagate(network, self.engine_id, iter::once((&message_hash, &topic, &message)), intent, &mut self.peers, &self.validator); - } - - /// Send addressed message to a peer. The message is not kept or multicast - /// later on. - pub fn send_message( - &mut self, - network: &mut dyn Network, - who: &PeerId, - message: Vec, - ) { - let peer = match self.peers.get_mut(who) { - None => return, - Some(peer) => peer, - }; - - let message_hash = HashFor::::hash(&message); - - trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); - - peer.known_messages.insert(message_hash); - network.write_notification(who.clone(), self.engine_id, message); - } + if entry.get().is_empty() { + entry.remove_entry(); + } + } + if keep { + self.register_message_hashed( + message_hash, + topic, + message, + Some(who.clone()), + ); + } + } else { + trace!(target:"gossip", "Ignored statement from unregistered peer {}", who); + network.report_peer(who.clone(), rep::UNREGISTERED_TOPIC); + } + } else { + trace!(target:"gossip", "Discard message from peer {}", who); + } + } + } + + /// Send all messages with given topic to a peer. + pub fn send_topic( + &mut self, + network: &mut dyn Network, + who: &PeerId, + topic: B::Hash, + force: bool, + ) { + let mut message_allowed = self.validator.message_allowed(); + + if let Some(ref mut peer) = self.peers.get_mut(who) { + for entry in self.messages.iter().filter(|m| m.topic == topic) { + let intent = if force { + MessageIntent::ForcedBroadcast + } else { + MessageIntent::Broadcast + }; + + if !force && peer.known_messages.contains(&entry.message_hash) { + continue; + } + + if !message_allowed(who, intent, &entry.topic, &entry.message) { + continue; + } + + peer.known_messages.insert(entry.message_hash.clone()); + + trace!(target: "gossip", "Sending topic message to {}: {:?}", who, entry.message); + network.write_notification(who.clone(), self.engine_id, entry.message.clone()); + } + } + } + + /// Multicast a message to all peers. + pub fn multicast( + &mut self, + network: &mut dyn Network, + topic: B::Hash, + message: Vec, + force: bool, + ) { + let message_hash = HashFor::::hash(&message); + self.register_message_hashed(message_hash, topic, message.clone(), None); + let intent = if force { + MessageIntent::ForcedBroadcast + } else { + MessageIntent::Broadcast + }; + propagate( + network, + self.engine_id, + iter::once((&message_hash, &topic, &message)), + intent, + &mut self.peers, + &self.validator, + ); + } + + /// Send addressed message to a peer. The message is not kept or multicast + /// later on. + pub fn send_message(&mut self, network: &mut dyn Network, who: &PeerId, message: Vec) { + let peer = match self.peers.get_mut(who) { + None => return, + Some(peer) => peer, + }; + + let message_hash = HashFor::::hash(&message); + + trace!(target: "gossip", "Sending direct to {}: {:?}", who, message); + + peer.known_messages.insert(message_hash); + network.write_notification(who.clone(), self.engine_id, message); + } } #[cfg(test)] mod tests { - use std::sync::Arc; - use sp_runtime::testing::{H256, Block as RawBlock, ExtrinsicWrapper}; - use futures::executor::block_on_stream; - - use super::*; - - type Block = RawBlock>; - - macro_rules! push_msg { - ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { - if $consensus.known_messages.put($hash, ()).is_none() { - $consensus.messages.push(MessageEntry { - message_hash: $hash, - topic: $topic, - message: $m, - sender: None, - }); - } - } - } - - struct AllowAll; - impl Validator for AllowAll { - fn validate( - &self, - _context: &mut dyn ValidatorContext, - _sender: &PeerId, - _data: &[u8], - ) -> ValidationResult { - ValidationResult::ProcessAndKeep(H256::default()) - } - } - - #[test] - fn collects_garbage() { - struct AllowOne; - impl Validator for AllowOne { - fn validate( - &self, - _context: &mut dyn ValidatorContext, - _sender: &PeerId, - data: &[u8], - ) -> ValidationResult { - if data[0] == 1 { - ValidationResult::ProcessAndKeep(H256::default()) - } else { - ValidationResult::Discard - } - } - - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_topic, data| data[0] != 1) - } - } - - let prev_hash = H256::random(); - let best_hash = H256::random(); - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); - let m1_hash = H256::random(); - let m2_hash = H256::random(); - let m1 = vec![1, 2, 3]; - let m2 = vec![4, 5, 6]; - - push_msg!(consensus, prev_hash, m1_hash, m1); - push_msg!(consensus, best_hash, m2_hash, m2); - consensus.known_messages.put(m1_hash, ()); - consensus.known_messages.put(m2_hash, ()); - - consensus.collect_garbage(); - assert_eq!(consensus.messages.len(), 2); - assert_eq!(consensus.known_messages.len(), 2); - - consensus.validator = Arc::new(AllowOne); - - // m2 is expired - consensus.collect_garbage(); - assert_eq!(consensus.messages.len(), 1); - // known messages are only pruned based on size. - assert_eq!(consensus.known_messages.len(), 2); - assert!(consensus.known_messages.contains(&m2_hash)); - } - - #[test] - fn message_stream_include_those_sent_before_asking_for_stream() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); - - let message = vec![4, 5, 6]; - let topic = HashFor::::hash(&[1,2,3]); - - consensus.register_message(topic, message.clone()); - let mut stream = block_on_stream(consensus.messages_for(topic)); - - assert_eq!(stream.next(), Some(TopicNotification { message: message, sender: None })); - } - - #[test] - fn can_keep_multiple_messages_per_topic() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); - - let topic = [1; 32].into(); - let msg_a = vec![1, 2, 3]; - let msg_b = vec![4, 5, 6]; - - consensus.register_message(topic, msg_a); - consensus.register_message(topic, msg_b); - - assert_eq!(consensus.messages.len(), 2); - } - - #[test] - fn can_keep_multiple_subscribers_per_topic() { - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); - - let message = vec![4, 5, 6]; - let topic = HashFor::::hash(&[1, 2, 3]); - - consensus.register_message(topic, message.clone()); - - let mut stream1 = block_on_stream(consensus.messages_for(topic)); - let mut stream2 = block_on_stream(consensus.messages_for(topic)); - - assert_eq!(stream1.next(), Some(TopicNotification { message: message.clone(), sender: None })); - assert_eq!(stream2.next(), Some(TopicNotification { message, sender: None })); - } - - #[test] - fn peer_is_removed_on_disconnect() { - struct TestNetwork; - impl Network for TestNetwork { - fn event_stream( - &self, - ) -> std::pin::Pin + Send>> { - unimplemented!("Not required in tests") - } - - fn report_peer(&self, _: PeerId, _: crate::ReputationChange) { - unimplemented!("Not required in tests") - } - - fn disconnect_peer(&self, _: PeerId) { - unimplemented!("Not required in tests") - } - - fn write_notification(&self, _: PeerId, _: crate::ConsensusEngineId, _: Vec) { - unimplemented!("Not required in tests") - } - - fn register_notifications_protocol( - &self, - _: ConsensusEngineId, - _: std::borrow::Cow<'static, [u8]>, - ) { - unimplemented!("Not required in tests") - } - - fn announce(&self, _: H256, _: Vec) { - unimplemented!("Not required in tests") - } - } - - let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); - - let mut network = TestNetwork; - - let peer_id = PeerId::random(); - consensus.new_peer(&mut network, peer_id.clone(), ObservedRole::Full); - assert!(consensus.peers.contains_key(&peer_id)); - - consensus.peer_disconnected(&mut network, peer_id.clone()); - assert!(!consensus.peers.contains_key(&peer_id)); - } + use futures::executor::block_on_stream; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper, H256}; + use std::sync::Arc; + + use super::*; + + type Block = RawBlock>; + + macro_rules! push_msg { + ($consensus:expr, $topic:expr, $hash: expr, $m:expr) => { + if $consensus.known_messages.put($hash, ()).is_none() { + $consensus.messages.push(MessageEntry { + message_hash: $hash, + topic: $topic, + message: $m, + sender: None, + }); + } + }; + } + + struct AllowAll; + impl Validator for AllowAll { + fn validate( + &self, + _context: &mut dyn ValidatorContext, + _sender: &PeerId, + _data: &[u8], + ) -> ValidationResult { + ValidationResult::ProcessAndKeep(H256::default()) + } + } + + #[test] + fn collects_garbage() { + struct AllowOne; + impl Validator for AllowOne { + fn validate( + &self, + _context: &mut dyn ValidatorContext, + _sender: &PeerId, + data: &[u8], + ) -> ValidationResult { + if data[0] == 1 { + ValidationResult::ProcessAndKeep(H256::default()) + } else { + ValidationResult::Discard + } + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_topic, data| data[0] != 1) + } + } + + let prev_hash = H256::random(); + let best_hash = H256::random(); + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + let m1_hash = H256::random(); + let m2_hash = H256::random(); + let m1 = vec![1, 2, 3]; + let m2 = vec![4, 5, 6]; + + push_msg!(consensus, prev_hash, m1_hash, m1); + push_msg!(consensus, best_hash, m2_hash, m2); + consensus.known_messages.put(m1_hash, ()); + consensus.known_messages.put(m2_hash, ()); + + consensus.collect_garbage(); + assert_eq!(consensus.messages.len(), 2); + assert_eq!(consensus.known_messages.len(), 2); + + consensus.validator = Arc::new(AllowOne); + + // m2 is expired + consensus.collect_garbage(); + assert_eq!(consensus.messages.len(), 1); + // known messages are only pruned based on size. + assert_eq!(consensus.known_messages.len(), 2); + assert!(consensus.known_messages.contains(&m2_hash)); + } + + #[test] + fn message_stream_include_those_sent_before_asking_for_stream() { + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + + let message = vec![4, 5, 6]; + let topic = HashFor::::hash(&[1, 2, 3]); + + consensus.register_message(topic, message.clone()); + let mut stream = block_on_stream(consensus.messages_for(topic)); + + assert_eq!( + stream.next(), + Some(TopicNotification { + message: message, + sender: None + }) + ); + } + + #[test] + fn can_keep_multiple_messages_per_topic() { + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + + let topic = [1; 32].into(); + let msg_a = vec![1, 2, 3]; + let msg_b = vec![4, 5, 6]; + + consensus.register_message(topic, msg_a); + consensus.register_message(topic, msg_b); + + assert_eq!(consensus.messages.len(), 2); + } + + #[test] + fn can_keep_multiple_subscribers_per_topic() { + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + + let message = vec![4, 5, 6]; + let topic = HashFor::::hash(&[1, 2, 3]); + + consensus.register_message(topic, message.clone()); + + let mut stream1 = block_on_stream(consensus.messages_for(topic)); + let mut stream2 = block_on_stream(consensus.messages_for(topic)); + + assert_eq!( + stream1.next(), + Some(TopicNotification { + message: message.clone(), + sender: None + }) + ); + assert_eq!( + stream2.next(), + Some(TopicNotification { + message, + sender: None + }) + ); + } + + #[test] + fn peer_is_removed_on_disconnect() { + struct TestNetwork; + impl Network for TestNetwork { + fn event_stream( + &self, + ) -> std::pin::Pin + Send>> { + unimplemented!("Not required in tests") + } + + fn report_peer(&self, _: PeerId, _: crate::ReputationChange) { + unimplemented!("Not required in tests") + } + + fn disconnect_peer(&self, _: PeerId) { + unimplemented!("Not required in tests") + } + + fn write_notification(&self, _: PeerId, _: crate::ConsensusEngineId, _: Vec) { + unimplemented!("Not required in tests") + } + + fn register_notifications_protocol( + &self, + _: ConsensusEngineId, + _: std::borrow::Cow<'static, [u8]>, + ) { + unimplemented!("Not required in tests") + } + + fn announce(&self, _: H256, _: Vec) { + unimplemented!("Not required in tests") + } + } + + let mut consensus = ConsensusGossip::::new(Arc::new(AllowAll), [0, 0, 0, 0]); + + let mut network = TestNetwork; + + let peer_id = PeerId::random(); + consensus.new_peer(&mut network, peer_id.clone(), ObservedRole::Full); + assert!(consensus.peers.contains_key(&peer_id)); + + consensus.peer_disconnected(&mut network, peer_id.clone()); + assert!(!consensus.peers.contains_key(&peer_id)); + } } diff --git a/client/network-gossip/src/validator.rs b/client/network-gossip/src/validator.rs index 6b330d7b61..8f4262e3d5 100644 --- a/client/network-gossip/src/validator.rs +++ b/client/network-gossip/src/validator.rs @@ -19,85 +19,88 @@ use sp_runtime::traits::Block as BlockT; /// Validates consensus messages. pub trait Validator: Send + Sync { - /// New peer is connected. - fn new_peer(&self, _context: &mut dyn ValidatorContext, _who: &PeerId, _role: ObservedRole) { - } - - /// New connection is dropped. - fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, _who: &PeerId) { - } - - /// Validate consensus message. - fn validate( - &self, - context: &mut dyn ValidatorContext, - sender: &PeerId, - data: &[u8] - ) -> ValidationResult; - - /// Produce a closure for validating messages on a given topic. - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_topic, _data| false) - } - - /// Produce a closure for filtering egress messages. - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_who, _intent, _topic, _data| true) - } + /// New peer is connected. + fn new_peer(&self, _context: &mut dyn ValidatorContext, _who: &PeerId, _role: ObservedRole) { + } + + /// New connection is dropped. + fn peer_disconnected(&self, _context: &mut dyn ValidatorContext, _who: &PeerId) {} + + /// Validate consensus message. + fn validate( + &self, + context: &mut dyn ValidatorContext, + sender: &PeerId, + data: &[u8], + ) -> ValidationResult; + + /// Produce a closure for validating messages on a given topic. + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_topic, _data| false) + } + + /// Produce a closure for filtering egress messages. + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { + Box::new(move |_who, _intent, _topic, _data| true) + } } /// Validation context. Allows reacting to incoming messages by sending out further messages. pub trait ValidatorContext { - /// Broadcast all messages with given topic to peers that do not have it yet. - fn broadcast_topic(&mut self, topic: B::Hash, force: bool); - /// Broadcast a message to all peers that have not received it previously. - fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool); - /// Send addressed message to a peer. - fn send_message(&mut self, who: &PeerId, message: Vec); - /// Send all messages with given topic to a peer. - fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool); + /// Broadcast all messages with given topic to peers that do not have it yet. + fn broadcast_topic(&mut self, topic: B::Hash, force: bool); + /// Broadcast a message to all peers that have not received it previously. + fn broadcast_message(&mut self, topic: B::Hash, message: Vec, force: bool); + /// Send addressed message to a peer. + fn send_message(&mut self, who: &PeerId, message: Vec); + /// Send all messages with given topic to a peer. + fn send_topic(&mut self, who: &PeerId, topic: B::Hash, force: bool); } /// The reason for sending out the message. #[derive(Eq, PartialEq, Copy, Clone)] #[cfg_attr(test, derive(Debug))] pub enum MessageIntent { - /// Requested broadcast. - Broadcast, - /// Requested broadcast to all peers. - ForcedBroadcast, - /// Periodic rebroadcast of all messages to all peers. - PeriodicRebroadcast, + /// Requested broadcast. + Broadcast, + /// Requested broadcast to all peers. + ForcedBroadcast, + /// Periodic rebroadcast of all messages to all peers. + PeriodicRebroadcast, } /// Message validation result. pub enum ValidationResult { - /// Message should be stored and propagated under given topic. - ProcessAndKeep(H), - /// Message should be processed, but not propagated. - ProcessAndDiscard(H), - /// Message should be ignored. - Discard, + /// Message should be stored and propagated under given topic. + ProcessAndKeep(H), + /// Message should be processed, but not propagated. + ProcessAndDiscard(H), + /// Message should be ignored. + Discard, } /// A gossip message validator that discards all messages. pub struct DiscardAll; impl Validator for DiscardAll { - fn validate( - &self, - _context: &mut dyn ValidatorContext, - _sender: &PeerId, - _data: &[u8], - ) -> ValidationResult { - ValidationResult::Discard - } - - fn message_expired<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_topic, _data| true) - } - - fn message_allowed<'a>(&'a self) -> Box bool + 'a> { - Box::new(move |_who, _intent, _topic, _data| false) - } + fn validate( + &self, + _context: &mut dyn ValidatorContext, + _sender: &PeerId, + _data: &[u8], + ) -> ValidationResult { + ValidationResult::Discard + } + + fn message_expired<'a>(&'a self) -> Box bool + 'a> { + Box::new(move |_topic, _data| true) + } + + fn message_allowed<'a>( + &'a self, + ) -> Box bool + 'a> { + Box::new(move |_who, _intent, _topic, _data| false) + } } diff --git a/client/network/build.rs b/client/network/build.rs index 0fd1f12866..b28e8b600d 100644 --- a/client/network/build.rs +++ b/client/network/build.rs @@ -1,8 +1,8 @@ const PROTOS: &[&str] = &[ - "src/protocol/schema/api.v1.proto", - "src/protocol/schema/light.v1.proto" + "src/protocol/schema/api.v1.proto", + "src/protocol/schema/light.v1.proto", ]; fn main() { - prost_build::compile_protos(PROTOS, &["src/protocol"]).unwrap(); + prost_build::compile_protos(PROTOS, &["src/protocol"]).unwrap(); } diff --git a/client/network/src/behaviour.rs b/client/network/src/behaviour.rs index cb9c552115..f9663b5898 100644 --- a/client/network/src/behaviour.rs +++ b/client/network/src/behaviour.rs @@ -14,19 +14,26 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::protocol::{self, light_client_handler, message::Roles, CustomMessageOutcome, Protocol}; use crate::{ - config::{ProtocolId, Role}, - debug_info, discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, - Event, ObservedRole, DhtEvent, ExHashT, + config::{ProtocolId, Role}, + debug_info, + discovery::{DiscoveryBehaviour, DiscoveryConfig, DiscoveryOut}, + DhtEvent, Event, ExHashT, ObservedRole, }; -use crate::protocol::{self, light_client_handler, message::Roles, CustomMessageOutcome, Protocol}; -use libp2p::NetworkBehaviour; use libp2p::core::{Multiaddr, PeerId, PublicKey}; use libp2p::kad::record; use libp2p::swarm::{NetworkBehaviourAction, NetworkBehaviourEventProcess, PollParameters}; +use libp2p::NetworkBehaviour; use log::debug; -use sp_consensus::{BlockOrigin, import_queue::{IncomingBlock, Origin}}; -use sp_runtime::{traits::{Block as BlockT, NumberFor}, ConsensusEngineId, Justification}; +use sp_consensus::{ + import_queue::{IncomingBlock, Origin}, + BlockOrigin, +}; +use sp_runtime::{ + traits::{Block as BlockT, NumberFor}, + ConsensusEngineId, Justification, +}; use std::{borrow::Cow, iter, task::Context, task::Poll}; use void; @@ -34,265 +41,313 @@ use void; #[derive(NetworkBehaviour)] #[behaviour(out_event = "BehaviourOut", poll_method = "poll")] pub struct Behaviour { - /// All the substrate-specific protocols. - substrate: Protocol, - /// Periodically pings and identifies the nodes we are connected to, and store information in a - /// cache. - debug_info: debug_info::DebugInfoBehaviour, - /// Discovers nodes of the network. - discovery: DiscoveryBehaviour, - /// Block request handling. - block_requests: protocol::BlockRequests, - /// Light client request handling. - light_client_handler: protocol::LightClientHandler, + /// All the substrate-specific protocols. + substrate: Protocol, + /// Periodically pings and identifies the nodes we are connected to, and store information in a + /// cache. + debug_info: debug_info::DebugInfoBehaviour, + /// Discovers nodes of the network. + discovery: DiscoveryBehaviour, + /// Block request handling. + block_requests: protocol::BlockRequests, + /// Light client request handling. + light_client_handler: protocol::LightClientHandler, - /// Queue of events to produce for the outside. - #[behaviour(ignore)] - events: Vec>, + /// Queue of events to produce for the outside. + #[behaviour(ignore)] + events: Vec>, - /// Role of our local node, as originally passed from the configuration. - #[behaviour(ignore)] - role: Role, + /// Role of our local node, as originally passed from the configuration. + #[behaviour(ignore)] + role: Role, } /// Event generated by `Behaviour`. pub enum BehaviourOut { - BlockImport(BlockOrigin, Vec>), - JustificationImport(Origin, B::Hash, NumberFor, Justification), - FinalityProofImport(Origin, B::Hash, NumberFor, Vec), - /// Started a random Kademlia discovery query. - RandomKademliaStarted(ProtocolId), - Event(Event), + BlockImport(BlockOrigin, Vec>), + JustificationImport(Origin, B::Hash, NumberFor, Justification), + FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + /// Started a random Kademlia discovery query. + RandomKademliaStarted(ProtocolId), + Event(Event), } impl Behaviour { - /// Builds a new `Behaviour`. - pub fn new( - substrate: Protocol, - role: Role, - user_agent: String, - local_public_key: PublicKey, - block_requests: protocol::BlockRequests, - light_client_handler: protocol::LightClientHandler, - disco_config: DiscoveryConfig, - ) -> Self { - Behaviour { - substrate, - debug_info: debug_info::DebugInfoBehaviour::new(user_agent, local_public_key.clone()), - discovery: disco_config.finish(), - block_requests, - light_client_handler, - events: Vec::new(), - role, - } - } + /// Builds a new `Behaviour`. + pub fn new( + substrate: Protocol, + role: Role, + user_agent: String, + local_public_key: PublicKey, + block_requests: protocol::BlockRequests, + light_client_handler: protocol::LightClientHandler, + disco_config: DiscoveryConfig, + ) -> Self { + Behaviour { + substrate, + debug_info: debug_info::DebugInfoBehaviour::new(user_agent, local_public_key.clone()), + discovery: disco_config.finish(), + block_requests, + light_client_handler, + events: Vec::new(), + role, + } + } - /// Returns the list of nodes that we know exist in the network. - pub fn known_peers(&mut self) -> impl Iterator { - self.discovery.known_peers() - } + /// Returns the list of nodes that we know exist in the network. + pub fn known_peers(&mut self) -> impl Iterator { + self.discovery.known_peers() + } - /// Adds a hard-coded address for the given peer, that never expires. - pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { - self.discovery.add_known_address(peer_id, addr) - } + /// Adds a hard-coded address for the given peer, that never expires. + pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + self.discovery.add_known_address(peer_id, addr) + } - /// Returns the number of nodes that are in the Kademlia k-buckets. - pub fn num_kbuckets_entries(&mut self) -> impl ExactSizeIterator { - self.discovery.num_kbuckets_entries() - } + /// Returns the number of nodes that are in the Kademlia k-buckets. + pub fn num_kbuckets_entries(&mut self) -> impl ExactSizeIterator { + self.discovery.num_kbuckets_entries() + } - /// Returns the number of records in the Kademlia record stores. - pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { - self.discovery.num_kademlia_records() - } + /// Returns the number of records in the Kademlia record stores. + pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { + self.discovery.num_kademlia_records() + } - /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size(&mut self) -> impl ExactSizeIterator { - self.discovery.kademlia_records_total_size() - } + /// Returns the total size in bytes of all the records in the Kademlia record stores. + pub fn kademlia_records_total_size( + &mut self, + ) -> impl ExactSizeIterator { + self.discovery.kademlia_records_total_size() + } - /// Borrows `self` and returns a struct giving access to the information about a node. - /// - /// Returns `None` if we don't know anything about this node. Always returns `Some` for nodes - /// we're connected to, meaning that if `None` is returned then we're not connected to that - /// node. - pub fn node(&self, peer_id: &PeerId) -> Option { - self.debug_info.node(peer_id) - } + /// Borrows `self` and returns a struct giving access to the information about a node. + /// + /// Returns `None` if we don't know anything about this node. Always returns `Some` for nodes + /// we're connected to, meaning that if `None` is returned then we're not connected to that + /// node. + pub fn node(&self, peer_id: &PeerId) -> Option { + self.debug_info.node(peer_id) + } - /// Registers a new notifications protocol. - /// - /// After that, you can call `write_notifications`. - /// - /// Please call `event_stream` before registering a protocol, otherwise you may miss events - /// about the protocol that you have registered. - /// - /// You are very strongly encouraged to call this method very early on. Any connection open - /// will retain the protocols that were registered then, and not any new one. - pub fn register_notifications_protocol( - &mut self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, - ) { - let list = self.substrate.register_notifications_protocol(engine_id, protocol_name); - for (remote, roles) in list { - let role = reported_roles_to_observed_role(&self.role, remote, roles); - let ev = Event::NotificationStreamOpened { - remote: remote.clone(), - engine_id, - role, - }; - self.events.push(BehaviourOut::Event(ev)); - } - } + /// Registers a new notifications protocol. + /// + /// After that, you can call `write_notifications`. + /// + /// Please call `event_stream` before registering a protocol, otherwise you may miss events + /// about the protocol that you have registered. + /// + /// You are very strongly encouraged to call this method very early on. Any connection open + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notifications_protocol( + &mut self, + engine_id: ConsensusEngineId, + protocol_name: impl Into>, + ) { + let list = self + .substrate + .register_notifications_protocol(engine_id, protocol_name); + for (remote, roles) in list { + let role = reported_roles_to_observed_role(&self.role, remote, roles); + let ev = Event::NotificationStreamOpened { + remote: remote.clone(), + engine_id, + role, + }; + self.events.push(BehaviourOut::Event(ev)); + } + } - /// Returns a shared reference to the user protocol. - pub fn user_protocol(&self) -> &Protocol { - &self.substrate - } + /// Returns a shared reference to the user protocol. + pub fn user_protocol(&self) -> &Protocol { + &self.substrate + } - /// Returns a mutable reference to the user protocol. - pub fn user_protocol_mut(&mut self) -> &mut Protocol { - &mut self.substrate - } + /// Returns a mutable reference to the user protocol. + pub fn user_protocol_mut(&mut self) -> &mut Protocol { + &mut self.substrate + } - /// Start querying a record from the DHT. Will later produce either a `ValueFound` or a `ValueNotFound` event. - pub fn get_value(&mut self, key: &record::Key) { - self.discovery.get_value(key); - } + /// Start querying a record from the DHT. Will later produce either a `ValueFound` or a `ValueNotFound` event. + pub fn get_value(&mut self, key: &record::Key) { + self.discovery.get_value(key); + } - /// Starts putting a record into DHT. Will later produce either a `ValuePut` or a `ValuePutFailed` event. - pub fn put_value(&mut self, key: record::Key, value: Vec) { - self.discovery.put_value(key, value); - } + /// Starts putting a record into DHT. Will later produce either a `ValuePut` or a `ValuePutFailed` event. + pub fn put_value(&mut self, key: record::Key, value: Vec) { + self.discovery.put_value(key, value); + } - /// Issue a light client request. - pub fn light_client_request(&mut self, r: light_client_handler::Request) -> Result<(), light_client_handler::Error> { - self.light_client_handler.request(r) - } + /// Issue a light client request. + pub fn light_client_request( + &mut self, + r: light_client_handler::Request, + ) -> Result<(), light_client_handler::Error> { + self.light_client_handler.request(r) + } } -fn reported_roles_to_observed_role(local_role: &Role, remote: &PeerId, roles: Roles) -> ObservedRole { - if roles.is_authority() { - match local_role { - Role::Authority { sentry_nodes } - if sentry_nodes.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurSentry, - Role::Sentry { validators } - if validators.iter().any(|s| s.peer_id == *remote) => ObservedRole::OurGuardedAuthority, - _ => ObservedRole::Authority - } - } else if roles.is_full() { - ObservedRole::Full - } else { - ObservedRole::Light - } +fn reported_roles_to_observed_role( + local_role: &Role, + remote: &PeerId, + roles: Roles, +) -> ObservedRole { + if roles.is_authority() { + match local_role { + Role::Authority { sentry_nodes } + if sentry_nodes.iter().any(|s| s.peer_id == *remote) => + { + ObservedRole::OurSentry + } + Role::Sentry { validators } if validators.iter().any(|s| s.peer_id == *remote) => { + ObservedRole::OurGuardedAuthority + } + _ => ObservedRole::Authority, + } + } else if roles.is_full() { + ObservedRole::Full + } else { + ObservedRole::Light + } } -impl NetworkBehaviourEventProcess for -Behaviour { - fn inject_event(&mut self, event: void::Void) { - void::unreachable(event) - } +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, event: void::Void) { + void::unreachable(event) + } } -impl NetworkBehaviourEventProcess> for -Behaviour { - fn inject_event(&mut self, event: CustomMessageOutcome) { - match event { - CustomMessageOutcome::BlockImport(origin, blocks) => - self.events.push(BehaviourOut::BlockImport(origin, blocks)), - CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => - self.events.push(BehaviourOut::JustificationImport(origin, hash, nb, justification)), - CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => - self.events.push(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)), - CustomMessageOutcome::NotificationStreamOpened { remote, protocols, roles } => { - let role = reported_roles_to_observed_role(&self.role, &remote, roles); - for engine_id in protocols { - self.events.push(BehaviourOut::Event(Event::NotificationStreamOpened { - remote: remote.clone(), - engine_id, - role: role.clone(), - })); - } - }, - CustomMessageOutcome::NotificationStreamClosed { remote, protocols } => - for engine_id in protocols { - self.events.push(BehaviourOut::Event(Event::NotificationStreamClosed { - remote: remote.clone(), - engine_id, - })); - }, - CustomMessageOutcome::NotificationsReceived { remote, messages } => { - let ev = Event::NotificationsReceived { remote, messages }; - self.events.push(BehaviourOut::Event(ev)); - }, - CustomMessageOutcome::PeerNewBest(peer_id, number) => { - self.light_client_handler.update_best_block(&peer_id, number); - } - CustomMessageOutcome::None => {} - } - } +impl NetworkBehaviourEventProcess> + for Behaviour +{ + fn inject_event(&mut self, event: CustomMessageOutcome) { + match event { + CustomMessageOutcome::BlockImport(origin, blocks) => { + self.events.push(BehaviourOut::BlockImport(origin, blocks)) + } + CustomMessageOutcome::JustificationImport(origin, hash, nb, justification) => { + self.events.push(BehaviourOut::JustificationImport( + origin, + hash, + nb, + justification, + )) + } + CustomMessageOutcome::FinalityProofImport(origin, hash, nb, proof) => self + .events + .push(BehaviourOut::FinalityProofImport(origin, hash, nb, proof)), + CustomMessageOutcome::NotificationStreamOpened { + remote, + protocols, + roles, + } => { + let role = reported_roles_to_observed_role(&self.role, &remote, roles); + for engine_id in protocols { + self.events + .push(BehaviourOut::Event(Event::NotificationStreamOpened { + remote: remote.clone(), + engine_id, + role: role.clone(), + })); + } + } + CustomMessageOutcome::NotificationStreamClosed { remote, protocols } => { + for engine_id in protocols { + self.events + .push(BehaviourOut::Event(Event::NotificationStreamClosed { + remote: remote.clone(), + engine_id, + })); + } + } + CustomMessageOutcome::NotificationsReceived { remote, messages } => { + let ev = Event::NotificationsReceived { remote, messages }; + self.events.push(BehaviourOut::Event(ev)); + } + CustomMessageOutcome::PeerNewBest(peer_id, number) => { + self.light_client_handler + .update_best_block(&peer_id, number); + } + CustomMessageOutcome::None => {} + } + } } impl NetworkBehaviourEventProcess - for Behaviour { - fn inject_event(&mut self, event: debug_info::DebugInfoEvent) { - let debug_info::DebugInfoEvent::Identified { peer_id, mut info } = event; - if info.listen_addrs.len() > 30 { - debug!(target: "sub-libp2p", "Node {:?} has reported more than 30 addresses; \ - it is identified by {:?} and {:?}", peer_id, info.protocol_version, - info.agent_version - ); - info.listen_addrs.truncate(30); - } - for addr in &info.listen_addrs { - self.discovery.add_self_reported_address(&peer_id, addr.clone()); - } - self.substrate.add_discovered_nodes(iter::once(peer_id.clone())); - } + for Behaviour +{ + fn inject_event(&mut self, event: debug_info::DebugInfoEvent) { + let debug_info::DebugInfoEvent::Identified { peer_id, mut info } = event; + if info.listen_addrs.len() > 30 { + debug!(target: "sub-libp2p", "Node {:?} has reported more than 30 addresses; \ + it is identified by {:?} and {:?}", peer_id, info.protocol_version, + info.agent_version + ); + info.listen_addrs.truncate(30); + } + for addr in &info.listen_addrs { + self.discovery + .add_self_reported_address(&peer_id, addr.clone()); + } + self.substrate + .add_discovered_nodes(iter::once(peer_id.clone())); + } } -impl NetworkBehaviourEventProcess - for Behaviour { - fn inject_event(&mut self, out: DiscoveryOut) { - match out { - DiscoveryOut::UnroutablePeer(_peer_id) => { - // Obtaining and reporting listen addresses for unroutable peers back - // to Kademlia is handled by the `Identify` protocol, part of the - // `DebugInfoBehaviour`. See the `NetworkBehaviourEventProcess` - // implementation for `DebugInfoEvent`. - } - DiscoveryOut::Discovered(peer_id) => { - self.substrate.add_discovered_nodes(iter::once(peer_id)); - } - DiscoveryOut::ValueFound(results) => { - self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValueFound(results)))); - } - DiscoveryOut::ValueNotFound(key) => { - self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValueNotFound(key)))); - } - DiscoveryOut::ValuePut(key) => { - self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePut(key)))); - } - DiscoveryOut::ValuePutFailed(key) => { - self.events.push(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePutFailed(key)))); - } - DiscoveryOut::RandomKademliaStarted(protocols) => { - for protocol in protocols { - self.events.push(BehaviourOut::RandomKademliaStarted(protocol)); - } - } - } - } +impl NetworkBehaviourEventProcess for Behaviour { + fn inject_event(&mut self, out: DiscoveryOut) { + match out { + DiscoveryOut::UnroutablePeer(_peer_id) => { + // Obtaining and reporting listen addresses for unroutable peers back + // to Kademlia is handled by the `Identify` protocol, part of the + // `DebugInfoBehaviour`. See the `NetworkBehaviourEventProcess` + // implementation for `DebugInfoEvent`. + } + DiscoveryOut::Discovered(peer_id) => { + self.substrate.add_discovered_nodes(iter::once(peer_id)); + } + DiscoveryOut::ValueFound(results) => { + self.events + .push(BehaviourOut::Event(Event::Dht(DhtEvent::ValueFound( + results, + )))); + } + DiscoveryOut::ValueNotFound(key) => { + self.events + .push(BehaviourOut::Event(Event::Dht(DhtEvent::ValueNotFound( + key, + )))); + } + DiscoveryOut::ValuePut(key) => { + self.events + .push(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePut(key)))); + } + DiscoveryOut::ValuePutFailed(key) => { + self.events + .push(BehaviourOut::Event(Event::Dht(DhtEvent::ValuePutFailed( + key, + )))); + } + DiscoveryOut::RandomKademliaStarted(protocols) => { + for protocol in protocols { + self.events + .push(BehaviourOut::RandomKademliaStarted(protocol)); + } + } + } + } } impl Behaviour { - fn poll(&mut self, _: &mut Context, _: &mut impl PollParameters) -> Poll>> { - if !self.events.is_empty() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))) - } + fn poll( + &mut self, + _: &mut Context, + _: &mut impl PollParameters, + ) -> Poll>> { + if !self.events.is_empty() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(self.events.remove(0))); + } - Poll::Pending - } + Poll::Pending + } } diff --git a/client/network/src/chain.rs b/client/network/src/chain.rs index 4e7e28be93..df8193e134 100644 --- a/client/network/src/chain.rs +++ b/client/network/src/chain.rs @@ -16,29 +16,49 @@ //! Blockchain access trait -use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sc_client_api::{BlockBackend, ProofProvider}; +use sp_blockchain::{Error, HeaderBackend, HeaderMetadata}; use sp_runtime::traits::{Block as BlockT, BlockIdTo}; /// Local client abstraction for the network. -pub trait Client: HeaderBackend + ProofProvider + BlockIdTo - + BlockBackend + HeaderMetadata + Send + Sync -{} +pub trait Client: + HeaderBackend + + ProofProvider + + BlockIdTo + + BlockBackend + + HeaderMetadata + + Send + + Sync +{ +} -impl Client for T - where - T: HeaderBackend + ProofProvider + BlockIdTo - + BlockBackend + HeaderMetadata + Send + Sync -{} +impl Client for T where + T: HeaderBackend + + ProofProvider + + BlockIdTo + + BlockBackend + + HeaderMetadata + + Send + + Sync +{ +} /// Finality proof provider. pub trait FinalityProofProvider: Send + Sync { - /// Prove finality of the block. - fn prove_finality(&self, for_block: Block::Hash, request: &[u8]) -> Result>, Error>; + /// Prove finality of the block. + fn prove_finality( + &self, + for_block: Block::Hash, + request: &[u8], + ) -> Result>, Error>; } impl FinalityProofProvider for () { - fn prove_finality(&self, _for_block: Block::Hash, _request: &[u8]) -> Result>, Error> { - Ok(None) - } + fn prove_finality( + &self, + _for_block: Block::Hash, + _request: &[u8], + ) -> Result>, Error> { + Ok(None) + } } diff --git a/client/network/src/config.rs b/client/network/src/config.rs index 4914ad680a..a6d65ee777 100644 --- a/client/network/src/config.rs +++ b/client/network/src/config.rs @@ -21,8 +21,8 @@ pub use crate::chain::{Client, FinalityProofProvider}; pub use crate::on_demand_layer::{AlwaysBadChecker, OnDemand}; -pub use crate::service::{TransactionPool, EmptyTransactionPool}; -pub use libp2p::{identity, core::PublicKey, wasm_ext::ExtTransport, build_multiaddr}; +pub use crate::service::{EmptyTransactionPool, TransactionPool}; +pub use libp2p::{build_multiaddr, core::PublicKey, identity, wasm_ext::ExtTransport}; // Note: this re-export shouldn't be part of the public API of the crate and will be removed in // the future. @@ -40,117 +40,117 @@ use sp_consensus::{block_validation::BlockAnnounceValidator, import_queue::Impor use sp_runtime::{traits::Block as BlockT, ConsensusEngineId}; use std::{borrow::Cow, convert::TryFrom, future::Future, pin::Pin, str::FromStr}; use std::{ - error::Error, - fs, - io::{self, Write}, - net::Ipv4Addr, - path::{Path, PathBuf}, - sync::Arc, + error::Error, + fs, + io::{self, Write}, + net::Ipv4Addr, + path::{Path, PathBuf}, + sync::Arc, }; use zeroize::Zeroize; /// Network initialization parameters. pub struct Params { - /// Assigned role for our node (full, light, ...). - pub role: Role, + /// Assigned role for our node (full, light, ...). + pub role: Role, - /// How to spawn background tasks. If you pass `None`, then a threads pool will be used by - /// default. - pub executor: Option + Send>>) + Send>>, + /// How to spawn background tasks. If you pass `None`, then a threads pool will be used by + /// default. + pub executor: Option + Send>>) + Send>>, - /// Network layer configuration. - pub network_config: NetworkConfiguration, + /// Network layer configuration. + pub network_config: NetworkConfiguration, - /// Client that contains the blockchain. - pub chain: Arc>, + /// Client that contains the blockchain. + pub chain: Arc>, - /// Finality proof provider. - /// - /// This object, if `Some`, is used when a node on the network requests a proof of finality - /// from us. - pub finality_proof_provider: Option>>, + /// Finality proof provider. + /// + /// This object, if `Some`, is used when a node on the network requests a proof of finality + /// from us. + pub finality_proof_provider: Option>>, - /// How to build requests for proofs of finality. - /// - /// This object, if `Some`, is used when we need a proof of finality from another node. - pub finality_proof_request_builder: Option>, + /// How to build requests for proofs of finality. + /// + /// This object, if `Some`, is used when we need a proof of finality from another node. + pub finality_proof_request_builder: Option>, - /// The `OnDemand` object acts as a "receiver" for block data requests from the client. - /// If `Some`, the network worker will process these requests and answer them. - /// Normally used only for light clients. - pub on_demand: Option>>, + /// The `OnDemand` object acts as a "receiver" for block data requests from the client. + /// If `Some`, the network worker will process these requests and answer them. + /// Normally used only for light clients. + pub on_demand: Option>>, - /// Pool of transactions. - /// - /// The network worker will fetch transactions from this object in order to propagate them on - /// the network. - pub transaction_pool: Arc>, + /// Pool of transactions. + /// + /// The network worker will fetch transactions from this object in order to propagate them on + /// the network. + pub transaction_pool: Arc>, - /// Name of the protocol to use on the wire. Should be different for each chain. - pub protocol_id: ProtocolId, + /// Name of the protocol to use on the wire. Should be different for each chain. + pub protocol_id: ProtocolId, - /// Import queue to use. - /// - /// The import queue is the component that verifies that blocks received from other nodes are - /// valid. - pub import_queue: Box>, + /// Import queue to use. + /// + /// The import queue is the component that verifies that blocks received from other nodes are + /// valid. + pub import_queue: Box>, - /// Type to check incoming block announcements. - pub block_announce_validator: Box + Send>, + /// Type to check incoming block announcements. + pub block_announce_validator: Box + Send>, - /// Registry for recording prometheus metrics to. - pub metrics_registry: Option, + /// Registry for recording prometheus metrics to. + pub metrics_registry: Option, } /// Role of the local node. #[derive(Debug, Clone)] pub enum Role { - /// Regular full node. - Full, - /// Regular light node. - Light, - /// Sentry node that guards an authority. Will be reported as "authority" on the wire protocol. - Sentry { - /// Address and identity of the validator nodes that we're guarding. - /// - /// The nodes will be granted some priviledged status. - validators: Vec, - }, - /// Actual authority. - Authority { - /// List of public addresses and identities of our sentry nodes. - sentry_nodes: Vec, - } + /// Regular full node. + Full, + /// Regular light node. + Light, + /// Sentry node that guards an authority. Will be reported as "authority" on the wire protocol. + Sentry { + /// Address and identity of the validator nodes that we're guarding. + /// + /// The nodes will be granted some priviledged status. + validators: Vec, + }, + /// Actual authority. + Authority { + /// List of public addresses and identities of our sentry nodes. + sentry_nodes: Vec, + }, } impl Role { - /// True for `Role::Authority` - pub fn is_authority(&self) -> bool { - matches!(self, Role::Authority { .. }) - } + /// True for `Role::Authority` + pub fn is_authority(&self) -> bool { + matches!(self, Role::Authority { .. }) + } - /// True for `Role::Authority` and `Role::Sentry` since they're both - /// announced as having the authority role to the network. - pub fn is_network_authority(&self) -> bool { - matches!(self, Role::Authority { .. } | Role::Sentry { .. }) - } + /// True for `Role::Authority` and `Role::Sentry` since they're both + /// announced as having the authority role to the network. + pub fn is_network_authority(&self) -> bool { + matches!(self, Role::Authority { .. } | Role::Sentry { .. }) + } } impl fmt::Display for Role { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - Role::Full => write!(f, "FULL"), - Role::Light => write!(f, "LIGHT"), - Role::Sentry { .. } => write!(f, "SENTRY"), - Role::Authority { .. } => write!(f, "AUTHORITY"), - } - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + Role::Full => write!(f, "FULL"), + Role::Light => write!(f, "LIGHT"), + Role::Sentry { .. } => write!(f, "SENTRY"), + Role::Authority { .. } => write!(f, "AUTHORITY"), + } + } } /// Finality proof request builder. pub trait FinalityProofRequestBuilder: Send { - /// Build data blob, associated with the request. - fn build_request_data(&mut self, hash: &B::Hash) -> Vec; + /// Build data blob, associated with the request. + fn build_request_data(&mut self, hash: &B::Hash) -> Vec; } /// Implementation of `FinalityProofRequestBuilder` that builds a dummy empty request. @@ -158,9 +158,9 @@ pub trait FinalityProofRequestBuilder: Send { pub struct DummyFinalityProofRequestBuilder; impl FinalityProofRequestBuilder for DummyFinalityProofRequestBuilder { - fn build_request_data(&mut self, _: &B::Hash) -> Vec { - Vec::new() - } + fn build_request_data(&mut self, _: &B::Hash) -> Vec { + Vec::new() + } } /// Shared finality proof request builder struct used by the queue. @@ -171,16 +171,16 @@ pub type BoxFinalityProofRequestBuilder = Box); impl<'a> From<&'a [u8]> for ProtocolId { - fn from(bytes: &'a [u8]) -> ProtocolId { - ProtocolId(bytes.into()) - } + fn from(bytes: &'a [u8]) -> ProtocolId { + ProtocolId(bytes.into()) + } } impl ProtocolId { - /// Exposes the `ProtocolId` as bytes. - pub fn as_bytes(&self) -> &[u8] { - self.0.as_ref() - } + /// Exposes the `ProtocolId` as bytes. + pub fn as_bytes(&self) -> &[u8] { + self.0.as_ref() + } } /// Parses a string address and splits it into Multiaddress and PeerId, if @@ -198,19 +198,20 @@ impl ProtocolId { /// ``` /// pub fn parse_str_addr(addr_str: &str) -> Result<(PeerId, Multiaddr), ParseErr> { - let addr: Multiaddr = addr_str.parse()?; - parse_addr(addr) + let addr: Multiaddr = addr_str.parse()?; + parse_addr(addr) } /// Splits a Multiaddress into a Multiaddress and PeerId. -pub fn parse_addr(mut addr: Multiaddr)-> Result<(PeerId, Multiaddr), ParseErr> { - let who = match addr.pop() { - Some(multiaddr::Protocol::P2p(key)) => PeerId::from_multihash(key) - .map_err(|_| ParseErr::InvalidPeerId)?, - _ => return Err(ParseErr::PeerIdMissing), - }; +pub fn parse_addr(mut addr: Multiaddr) -> Result<(PeerId, Multiaddr), ParseErr> { + let who = match addr.pop() { + Some(multiaddr::Protocol::P2p(key)) => { + PeerId::from_multihash(key).map_err(|_| ParseErr::InvalidPeerId)? + } + _ => return Err(ParseErr::PeerIdMissing), + }; - Ok((who, addr)) + Ok((who, addr)) } /// Address of a node, including its identity. @@ -229,241 +230,232 @@ pub fn parse_addr(mut addr: Multiaddr)-> Result<(PeerId, Multiaddr), ParseErr> { #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[serde(try_from = "String", into = "String")] pub struct MultiaddrWithPeerId { - /// Address of the node. - pub multiaddr: Multiaddr, - /// Its identity. - pub peer_id: PeerId, + /// Address of the node. + pub multiaddr: Multiaddr, + /// Its identity. + pub peer_id: PeerId, } impl MultiaddrWithPeerId { - /// Concatenates the multiaddress and peer ID into one multiaddress containing both. - pub fn concat(&self) -> Multiaddr { - let proto = multiaddr::Protocol::P2p(From::from(self.peer_id.clone())); - self.multiaddr.clone().with(proto) - } + /// Concatenates the multiaddress and peer ID into one multiaddress containing both. + pub fn concat(&self) -> Multiaddr { + let proto = multiaddr::Protocol::P2p(From::from(self.peer_id.clone())); + self.multiaddr.clone().with(proto) + } } impl fmt::Display for MultiaddrWithPeerId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Display::fmt(&self.concat(), f) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Display::fmt(&self.concat(), f) + } } impl FromStr for MultiaddrWithPeerId { - type Err = ParseErr; + type Err = ParseErr; - fn from_str(s: &str) -> Result { - let (peer_id, multiaddr) = parse_str_addr(s)?; - Ok(MultiaddrWithPeerId { - peer_id, - multiaddr, - }) - } + fn from_str(s: &str) -> Result { + let (peer_id, multiaddr) = parse_str_addr(s)?; + Ok(MultiaddrWithPeerId { peer_id, multiaddr }) + } } impl From for String { - fn from(ma: MultiaddrWithPeerId) -> String { - format!("{}", ma) - } + fn from(ma: MultiaddrWithPeerId) -> String { + format!("{}", ma) + } } impl TryFrom for MultiaddrWithPeerId { - type Error = ParseErr; - fn try_from(string: String) -> Result { - string.parse() - } + type Error = ParseErr; + fn try_from(string: String) -> Result { + string.parse() + } } /// Error that can be generated by `parse_str_addr`. #[derive(Debug)] pub enum ParseErr { - /// Error while parsing the multiaddress. - MultiaddrParse(multiaddr::Error), - /// Multihash of the peer ID is invalid. - InvalidPeerId, - /// The peer ID is missing from the address. - PeerIdMissing, + /// Error while parsing the multiaddress. + MultiaddrParse(multiaddr::Error), + /// Multihash of the peer ID is invalid. + InvalidPeerId, + /// The peer ID is missing from the address. + PeerIdMissing, } impl fmt::Display for ParseErr { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - match self { - ParseErr::MultiaddrParse(err) => write!(f, "{}", err), - ParseErr::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), - ParseErr::PeerIdMissing => write!(f, "Peer id is missing from the address"), - } - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ParseErr::MultiaddrParse(err) => write!(f, "{}", err), + ParseErr::InvalidPeerId => write!(f, "Peer id at the end of the address is invalid"), + ParseErr::PeerIdMissing => write!(f, "Peer id is missing from the address"), + } + } } impl std::error::Error for ParseErr { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - ParseErr::MultiaddrParse(err) => Some(err), - ParseErr::InvalidPeerId => None, - ParseErr::PeerIdMissing => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + ParseErr::MultiaddrParse(err) => Some(err), + ParseErr::InvalidPeerId => None, + ParseErr::PeerIdMissing => None, + } + } } impl From for ParseErr { - fn from(err: multiaddr::Error) -> ParseErr { - ParseErr::MultiaddrParse(err) - } + fn from(err: multiaddr::Error) -> ParseErr { + ParseErr::MultiaddrParse(err) + } } /// Network service configuration. #[derive(Clone, Debug)] pub struct NetworkConfiguration { - /// Directory path to store network-specific configuration. None means nothing will be saved. - pub net_config_path: Option, - /// Multiaddresses to listen for incoming connections. - pub listen_addresses: Vec, - /// Multiaddresses to advertise. Detected automatically if empty. - pub public_addresses: Vec, - /// List of initial node addresses - pub boot_nodes: Vec, - /// The node key configuration, which determines the node's network identity keypair. - pub node_key: NodeKeyConfig, - /// List of notifications protocols that the node supports. Must also include a - /// `ConsensusEngineId` for backwards-compatibility. - pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, [u8]>)>, - /// Maximum allowed number of incoming connections. - pub in_peers: u32, - /// Number of outgoing connections we're trying to maintain. - pub out_peers: u32, - /// List of reserved node addresses. - pub reserved_nodes: Vec, - /// The non-reserved peer mode. - pub non_reserved_mode: NonReservedPeerMode, - /// Client identifier. Sent over the wire for debugging purposes. - pub client_version: String, - /// Name of the node. Sent over the wire for debugging purposes. - pub node_name: String, - /// Configuration for the transport layer. - pub transport: TransportConfig, - /// Maximum number of peers to ask the same blocks in parallel. - pub max_parallel_downloads: u32, + /// Directory path to store network-specific configuration. None means nothing will be saved. + pub net_config_path: Option, + /// Multiaddresses to listen for incoming connections. + pub listen_addresses: Vec, + /// Multiaddresses to advertise. Detected automatically if empty. + pub public_addresses: Vec, + /// List of initial node addresses + pub boot_nodes: Vec, + /// The node key configuration, which determines the node's network identity keypair. + pub node_key: NodeKeyConfig, + /// List of notifications protocols that the node supports. Must also include a + /// `ConsensusEngineId` for backwards-compatibility. + pub notifications_protocols: Vec<(ConsensusEngineId, Cow<'static, [u8]>)>, + /// Maximum allowed number of incoming connections. + pub in_peers: u32, + /// Number of outgoing connections we're trying to maintain. + pub out_peers: u32, + /// List of reserved node addresses. + pub reserved_nodes: Vec, + /// The non-reserved peer mode. + pub non_reserved_mode: NonReservedPeerMode, + /// Client identifier. Sent over the wire for debugging purposes. + pub client_version: String, + /// Name of the node. Sent over the wire for debugging purposes. + pub node_name: String, + /// Configuration for the transport layer. + pub transport: TransportConfig, + /// Maximum number of peers to ask the same blocks in parallel. + pub max_parallel_downloads: u32, } impl NetworkConfiguration { - /// Create new default configuration - pub fn new, SV: Into>( - node_name: SN, - client_version: SV, - node_key: NodeKeyConfig, - net_config_path: Option, - ) -> Self { - NetworkConfiguration { - net_config_path, - listen_addresses: Vec::new(), - public_addresses: Vec::new(), - boot_nodes: Vec::new(), - node_key, - notifications_protocols: Vec::new(), - in_peers: 25, - out_peers: 75, - reserved_nodes: Vec::new(), - non_reserved_mode: NonReservedPeerMode::Accept, - client_version: client_version.into(), - node_name: node_name.into(), - transport: TransportConfig::Normal { - enable_mdns: false, - allow_private_ipv4: true, - wasm_external_transport: None, - use_yamux_flow_control: false, - }, - max_parallel_downloads: 5, - } - } + /// Create new default configuration + pub fn new, SV: Into>( + node_name: SN, + client_version: SV, + node_key: NodeKeyConfig, + net_config_path: Option, + ) -> Self { + NetworkConfiguration { + net_config_path, + listen_addresses: Vec::new(), + public_addresses: Vec::new(), + boot_nodes: Vec::new(), + node_key, + notifications_protocols: Vec::new(), + in_peers: 25, + out_peers: 75, + reserved_nodes: Vec::new(), + non_reserved_mode: NonReservedPeerMode::Accept, + client_version: client_version.into(), + node_name: node_name.into(), + transport: TransportConfig::Normal { + enable_mdns: false, + allow_private_ipv4: true, + wasm_external_transport: None, + use_yamux_flow_control: false, + }, + max_parallel_downloads: 5, + } + } } impl NetworkConfiguration { - /// Create new default configuration for localhost-only connection with random port (useful for testing) - pub fn new_local() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - - config.listen_addresses = vec![ - iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) - .chain(iter::once(multiaddr::Protocol::Tcp(0))) - .collect() - ]; - - config - } - - /// Create new default configuration for localhost-only connection with random port (useful for testing) - pub fn new_memory() -> NetworkConfiguration { - let mut config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - - config.listen_addresses = vec![ - iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) - .chain(iter::once(multiaddr::Protocol::Tcp(0))) - .collect() - ]; - - config - } + /// Create new default configuration for localhost-only connection with random port (useful for testing) + pub fn new_local() -> NetworkConfiguration { + let mut config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + + config.listen_addresses = + vec![ + iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp(0))) + .collect(), + ]; + + config + } + + /// Create new default configuration for localhost-only connection with random port (useful for testing) + pub fn new_memory() -> NetworkConfiguration { + let mut config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + + config.listen_addresses = + vec![ + iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp(0))) + .collect(), + ]; + + config + } } /// Configuration for the transport layer. #[derive(Clone, Debug)] pub enum TransportConfig { - /// Normal transport mode. - Normal { - /// If true, the network will use mDNS to discover other libp2p nodes on the local network - /// and connect to them if they support the same chain. - enable_mdns: bool, - - /// If true, allow connecting to private IPv4 addresses (as defined in - /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have - /// been passed in [`NetworkConfiguration::reserved_nodes`] or - /// [`NetworkConfiguration::boot_nodes`]. - allow_private_ipv4: bool, - - /// Optional external implementation of a libp2p transport. Used in WASM contexts where we - /// need some binding between the networking provided by the operating system or environment - /// and libp2p. - /// - /// This parameter exists whatever the target platform is, but it is expected to be set to - /// `Some` only when compiling for WASM. - wasm_external_transport: Option, - /// Use flow control for yamux streams if set to true. - use_yamux_flow_control: bool, - }, - - /// Only allow connections within the same process. - /// Only addresses of the form `/memory/...` will be supported. - MemoryOnly, + /// Normal transport mode. + Normal { + /// If true, the network will use mDNS to discover other libp2p nodes on the local network + /// and connect to them if they support the same chain. + enable_mdns: bool, + + /// If true, allow connecting to private IPv4 addresses (as defined in + /// [RFC1918](https://tools.ietf.org/html/rfc1918)). Irrelevant for addresses that have + /// been passed in [`NetworkConfiguration::reserved_nodes`] or + /// [`NetworkConfiguration::boot_nodes`]. + allow_private_ipv4: bool, + + /// Optional external implementation of a libp2p transport. Used in WASM contexts where we + /// need some binding between the networking provided by the operating system or environment + /// and libp2p. + /// + /// This parameter exists whatever the target platform is, but it is expected to be set to + /// `Some` only when compiling for WASM. + wasm_external_transport: Option, + /// Use flow control for yamux streams if set to true. + use_yamux_flow_control: bool, + }, + + /// Only allow connections within the same process. + /// Only addresses of the form `/memory/...` will be supported. + MemoryOnly, } /// The policy for connections to non-reserved peers. #[derive(Clone, Debug, PartialEq, Eq)] pub enum NonReservedPeerMode { - /// Accept them. This is the default. - Accept, - /// Deny them. - Deny, + /// Accept them. This is the default. + Accept, + /// Deny them. + Deny, } impl NonReservedPeerMode { - /// Attempt to parse the peer mode from a string. - pub fn parse(s: &str) -> Option { - match s { - "accept" => Some(NonReservedPeerMode::Accept), - "deny" => Some(NonReservedPeerMode::Deny), - _ => None, - } - } + /// Attempt to parse the peer mode from a string. + pub fn parse(s: &str) -> Option { + match s { + "accept" => Some(NonReservedPeerMode::Accept), + "deny" => Some(NonReservedPeerMode::Deny), + _ => None, + } + } } /// The configuration of a node's secret key, describing the type of key @@ -471,14 +463,14 @@ impl NonReservedPeerMode { /// the evaluation of the node key configuration. #[derive(Clone, Debug)] pub enum NodeKeyConfig { - /// A Ed25519 secret key configuration. - Ed25519(Secret) + /// A Ed25519 secret key configuration. + Ed25519(Secret), } impl Default for NodeKeyConfig { - fn default() -> NodeKeyConfig { - NodeKeyConfig::Ed25519(Secret::New) - } + fn default() -> NodeKeyConfig { + NodeKeyConfig::Ed25519(Secret::New) + } } /// The options for obtaining a Ed25519 secret key. @@ -487,57 +479,56 @@ pub type Ed25519Secret = Secret; /// The configuration options for obtaining a secret key `K`. #[derive(Clone)] pub enum Secret { - /// Use the given secret key `K`. - Input(K), - /// Read the secret key from a file. If the file does not exist, - /// it is created with a newly generated secret key `K`. The format - /// of the file is determined by `K`: - /// - /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. - File(PathBuf), - /// Always generate a new secret key `K`. - New + /// Use the given secret key `K`. + Input(K), + /// Read the secret key from a file. If the file does not exist, + /// it is created with a newly generated secret key `K`. The format + /// of the file is determined by `K`: + /// + /// * `ed25519::SecretKey`: An unencoded 32 bytes Ed25519 secret key. + File(PathBuf), + /// Always generate a new secret key `K`. + New, } impl fmt::Debug for Secret { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Secret::Input(_) => f.debug_tuple("Secret::Input").finish(), - Secret::File(path) => f.debug_tuple("Secret::File").field(path).finish(), - Secret::New => f.debug_tuple("Secret::New").finish(), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Secret::Input(_) => f.debug_tuple("Secret::Input").finish(), + Secret::File(path) => f.debug_tuple("Secret::File").field(path).finish(), + Secret::New => f.debug_tuple("Secret::New").finish(), + } + } } impl NodeKeyConfig { - /// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`: - /// - /// * If the secret is configured as input, the corresponding keypair is returned. - /// - /// * If the secret is configured as a file, it is read from that file, if it exists. - /// Otherwise a new secret is generated and stored. In either case, the - /// keypair obtained from the secret is returned. - /// - /// * If the secret is configured to be new, it is generated and the corresponding - /// keypair is returned. - pub fn into_keypair(self) -> io::Result { - use NodeKeyConfig::*; - match self { - Ed25519(Secret::New) => - Ok(Keypair::generate_ed25519()), - - Ed25519(Secret::Input(k)) => - Ok(Keypair::Ed25519(k.into())), - - Ed25519(Secret::File(f)) => - get_secret(f, - |mut b| ed25519::SecretKey::from_bytes(&mut b), - ed25519::SecretKey::generate, - |b| b.as_ref().to_vec()) - .map(ed25519::Keypair::from) - .map(Keypair::Ed25519), - } - } + /// Evaluate a `NodeKeyConfig` to obtain an identity `Keypair`: + /// + /// * If the secret is configured as input, the corresponding keypair is returned. + /// + /// * If the secret is configured as a file, it is read from that file, if it exists. + /// Otherwise a new secret is generated and stored. In either case, the + /// keypair obtained from the secret is returned. + /// + /// * If the secret is configured to be new, it is generated and the corresponding + /// keypair is returned. + pub fn into_keypair(self) -> io::Result { + use NodeKeyConfig::*; + match self { + Ed25519(Secret::New) => Ok(Keypair::generate_ed25519()), + + Ed25519(Secret::Input(k)) => Ok(Keypair::Ed25519(k.into())), + + Ed25519(Secret::File(f)) => get_secret( + f, + |mut b| ed25519::SecretKey::from_bytes(&mut b), + ed25519::SecretKey::generate, + |b| b.as_ref().to_vec(), + ) + .map(ed25519::Keypair::from) + .map(Keypair::Ed25519), + } + } } /// Load a secret key from a file, if it exists, or generate a @@ -545,104 +536,112 @@ impl NodeKeyConfig { /// the secret key is returned. fn get_secret(file: P, parse: F, generate: G, serialize: W) -> io::Result where - P: AsRef, - F: for<'r> FnOnce(&'r mut [u8]) -> Result, - G: FnOnce() -> K, - E: Error + Send + Sync + 'static, - W: Fn(&K) -> Vec, + P: AsRef, + F: for<'r> FnOnce(&'r mut [u8]) -> Result, + G: FnOnce() -> K, + E: Error + Send + Sync + 'static, + W: Fn(&K) -> Vec, { - std::fs::read(&file) - .and_then(|mut sk_bytes| - parse(&mut sk_bytes) - .map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e))) - .or_else(|e| { - if e.kind() == io::ErrorKind::NotFound { - file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; - let sk = generate(); - let mut sk_vec = serialize(&sk); - write_secret_file(file, &sk_vec)?; - sk_vec.zeroize(); - Ok(sk) - } else { - Err(e) - } - }) + std::fs::read(&file) + .and_then(|mut sk_bytes| { + parse(&mut sk_bytes).map_err(|e| io::Error::new(io::ErrorKind::InvalidData, e)) + }) + .or_else(|e| { + if e.kind() == io::ErrorKind::NotFound { + file.as_ref().parent().map_or(Ok(()), fs::create_dir_all)?; + let sk = generate(); + let mut sk_vec = serialize(&sk); + write_secret_file(file, &sk_vec)?; + sk_vec.zeroize(); + Ok(sk) + } else { + Err(e) + } + }) } /// Write secret bytes to a file. fn write_secret_file

(path: P, sk_bytes: &[u8]) -> io::Result<()> where - P: AsRef + P: AsRef, { - let mut file = open_secret_file(&path)?; - file.write_all(sk_bytes) + let mut file = open_secret_file(&path)?; + file.write_all(sk_bytes) } /// Opens a file containing a secret key in write mode. #[cfg(unix)] fn open_secret_file

(path: P) -> io::Result where - P: AsRef + P: AsRef, { - use std::os::unix::fs::OpenOptionsExt; - fs::OpenOptions::new() - .write(true) - .create_new(true) - .mode(0o600) - .open(path) + use std::os::unix::fs::OpenOptionsExt; + fs::OpenOptions::new() + .write(true) + .create_new(true) + .mode(0o600) + .open(path) } /// Opens a file containing a secret key in write mode. #[cfg(not(unix))] fn open_secret_file

(path: P) -> Result where - P: AsRef + P: AsRef, { - fs::OpenOptions::new() - .write(true) - .create_new(true) - .open(path) + fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(path) } #[cfg(test)] mod tests { - use super::*; - use tempfile::TempDir; - - fn tempdir_with_prefix(prefix: &str) -> TempDir { - tempfile::Builder::new().prefix(prefix).tempdir().unwrap() - } - - fn secret_bytes(kp: &Keypair) -> Vec { - match kp { - Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), - Keypair::Secp256k1(p) => p.secret().to_bytes().to_vec(), - _ => panic!("Unexpected keypair.") - } - } - - #[test] - fn test_secret_file() { - let tmp = tempdir_with_prefix("x"); - std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated - let file = tmp.path().join("x").to_path_buf(); - let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())).into_keypair().unwrap(); - assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2)) - } - - #[test] - fn test_secret_input() { - let sk = ed25519::SecretKey::generate(); - let kp1 = NodeKeyConfig::Ed25519(Secret::Input(sk.clone())).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Ed25519(Secret::Input(sk)).into_keypair().unwrap(); - assert!(secret_bytes(&kp1) == secret_bytes(&kp2)); - } - - #[test] - fn test_secret_new() { - let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); - let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); - assert!(secret_bytes(&kp1) != secret_bytes(&kp2)); - } + use super::*; + use tempfile::TempDir; + + fn tempdir_with_prefix(prefix: &str) -> TempDir { + tempfile::Builder::new().prefix(prefix).tempdir().unwrap() + } + + fn secret_bytes(kp: &Keypair) -> Vec { + match kp { + Keypair::Ed25519(p) => p.secret().as_ref().iter().cloned().collect(), + Keypair::Secp256k1(p) => p.secret().to_bytes().to_vec(), + _ => panic!("Unexpected keypair."), + } + } + + #[test] + fn test_secret_file() { + let tmp = tempdir_with_prefix("x"); + std::fs::remove_dir(tmp.path()).unwrap(); // should be recreated + let file = tmp.path().join("x").to_path_buf(); + let kp1 = NodeKeyConfig::Ed25519(Secret::File(file.clone())) + .into_keypair() + .unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::File(file.clone())) + .into_keypair() + .unwrap(); + assert!(file.is_file() && secret_bytes(&kp1) == secret_bytes(&kp2)) + } + + #[test] + fn test_secret_input() { + let sk = ed25519::SecretKey::generate(); + let kp1 = NodeKeyConfig::Ed25519(Secret::Input(sk.clone())) + .into_keypair() + .unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::Input(sk)) + .into_keypair() + .unwrap(); + assert!(secret_bytes(&kp1) == secret_bytes(&kp2)); + } + + #[test] + fn test_secret_new() { + let kp1 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); + let kp2 = NodeKeyConfig::Ed25519(Secret::New).into_keypair().unwrap(); + assert!(secret_bytes(&kp1) != secret_bytes(&kp2)); + } } diff --git a/client/network/src/debug_info.rs b/client/network/src/debug_info.rs index e2803cde35..4b575854fb 100644 --- a/client/network/src/debug_info.rs +++ b/client/network/src/debug_info.rs @@ -14,24 +14,24 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::utils::interval; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::Multiaddr; use libp2p::core::connection::{ConnectionId, ListenerId}; -use libp2p::core::{ConnectedPoint, either::EitherOutput, PeerId, PublicKey}; -use libp2p::swarm::{IntoProtocolsHandler, IntoProtocolsHandlerSelect, ProtocolsHandler}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::core::{either::EitherOutput, ConnectedPoint, PeerId, PublicKey}; use libp2p::identify::{Identify, IdentifyEvent, IdentifyInfo}; use libp2p::ping::{Ping, PingConfig, PingEvent, PingSuccess}; -use log::{debug, trace, error}; +use libp2p::swarm::{IntoProtocolsHandler, IntoProtocolsHandlerSelect, ProtocolsHandler}; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::Multiaddr; +use log::{debug, error, trace}; use smallvec::SmallVec; -use std::{error, io}; use std::collections::hash_map::Entry; use std::pin::Pin; use std::task::{Context, Poll}; use std::time::Duration; +use std::{error, io}; use wasm_timer::Instant; -use crate::utils::interval; /// Time after we disconnect from a node before we purge its information from the cache. const CACHE_EXPIRE: Duration = Duration::from_secs(10 * 60); @@ -41,241 +41,261 @@ const GARBAGE_COLLECT_INTERVAL: Duration = Duration::from_secs(2 * 60); /// Implementation of `NetworkBehaviour` that holds information about nodes in cache for diagnostic /// purposes. pub struct DebugInfoBehaviour { - /// Periodically ping nodes, and close the connection if it's unresponsive. - ping: Ping, - /// Periodically identifies the remote and responds to incoming requests. - identify: Identify, - /// Information that we know about all nodes. - nodes_info: FnvHashMap, - /// Interval at which we perform garbage collection in `nodes_info`. - garbage_collect: Pin + Send>>, + /// Periodically ping nodes, and close the connection if it's unresponsive. + ping: Ping, + /// Periodically identifies the remote and responds to incoming requests. + identify: Identify, + /// Information that we know about all nodes. + nodes_info: FnvHashMap, + /// Interval at which we perform garbage collection in `nodes_info`. + garbage_collect: Pin + Send>>, } /// Information about a node we're connected to. #[derive(Debug)] struct NodeInfo { - /// When we will remove the entry about this node from the list, or `None` if we're connected - /// to the node. - info_expire: Option, - /// Non-empty list of connected endpoints, one per connection. - endpoints: SmallVec<[ConnectedPoint; crate::MAX_CONNECTIONS_PER_PEER]>, - /// Version reported by the remote, or `None` if unknown. - client_version: Option, - /// Latest ping time with this node. - latest_ping: Option, + /// When we will remove the entry about this node from the list, or `None` if we're connected + /// to the node. + info_expire: Option, + /// Non-empty list of connected endpoints, one per connection. + endpoints: SmallVec<[ConnectedPoint; crate::MAX_CONNECTIONS_PER_PEER]>, + /// Version reported by the remote, or `None` if unknown. + client_version: Option, + /// Latest ping time with this node. + latest_ping: Option, } impl NodeInfo { - fn new(endpoint: ConnectedPoint) -> Self { - let mut endpoints = SmallVec::new(); - endpoints.push(endpoint); - NodeInfo { - info_expire: None, - endpoints, - client_version: None, - latest_ping: None, - } - } + fn new(endpoint: ConnectedPoint) -> Self { + let mut endpoints = SmallVec::new(); + endpoints.push(endpoint); + NodeInfo { + info_expire: None, + endpoints, + client_version: None, + latest_ping: None, + } + } } impl DebugInfoBehaviour { - /// Builds a new `DebugInfoBehaviour`. - pub fn new( - user_agent: String, - local_public_key: PublicKey, - ) -> Self { - let identify = { - let proto_version = "/substrate/1.0".to_string(); - Identify::new(proto_version, user_agent, local_public_key.clone()) - }; - - DebugInfoBehaviour { - ping: Ping::new(PingConfig::new()), - identify, - nodes_info: FnvHashMap::default(), - garbage_collect: Box::pin(interval(GARBAGE_COLLECT_INTERVAL)), - } - } - - /// Borrows `self` and returns a struct giving access to the information about a node. - /// - /// Returns `None` if we don't know anything about this node. Always returns `Some` for nodes - /// we're connected to, meaning that if `None` is returned then we're not connected to that - /// node. - pub fn node(&self, peer_id: &PeerId) -> Option { - self.nodes_info.get(peer_id).map(Node) - } - - /// Inserts a ping time in the cache. Has no effect if we don't have any entry for that node, - /// which shouldn't happen. - fn handle_ping_report(&mut self, peer_id: &PeerId, ping_time: Duration) { - trace!(target: "sub-libp2p", "Ping time with {:?}: {:?}", peer_id, ping_time); - if let Some(entry) = self.nodes_info.get_mut(peer_id) { - entry.latest_ping = Some(ping_time); - } else { - error!(target: "sub-libp2p", + /// Builds a new `DebugInfoBehaviour`. + pub fn new(user_agent: String, local_public_key: PublicKey) -> Self { + let identify = { + let proto_version = "/substrate/1.0".to_string(); + Identify::new(proto_version, user_agent, local_public_key.clone()) + }; + + DebugInfoBehaviour { + ping: Ping::new(PingConfig::new()), + identify, + nodes_info: FnvHashMap::default(), + garbage_collect: Box::pin(interval(GARBAGE_COLLECT_INTERVAL)), + } + } + + /// Borrows `self` and returns a struct giving access to the information about a node. + /// + /// Returns `None` if we don't know anything about this node. Always returns `Some` for nodes + /// we're connected to, meaning that if `None` is returned then we're not connected to that + /// node. + pub fn node(&self, peer_id: &PeerId) -> Option { + self.nodes_info.get(peer_id).map(Node) + } + + /// Inserts a ping time in the cache. Has no effect if we don't have any entry for that node, + /// which shouldn't happen. + fn handle_ping_report(&mut self, peer_id: &PeerId, ping_time: Duration) { + trace!(target: "sub-libp2p", "Ping time with {:?}: {:?}", peer_id, ping_time); + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.latest_ping = Some(ping_time); + } else { + error!(target: "sub-libp2p", "Received ping from node we're not connected to {:?}", peer_id); - } - } - - /// Inserts an identify record in the cache. Has no effect if we don't have any entry for that - /// node, which shouldn't happen. - fn handle_identify_report(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { - trace!(target: "sub-libp2p", "Identified {:?} => {:?}", peer_id, info); - if let Some(entry) = self.nodes_info.get_mut(peer_id) { - entry.client_version = Some(info.agent_version.clone()); - } else { - error!(target: "sub-libp2p", + } + } + + /// Inserts an identify record in the cache. Has no effect if we don't have any entry for that + /// node, which shouldn't happen. + fn handle_identify_report(&mut self, peer_id: &PeerId, info: &IdentifyInfo) { + trace!(target: "sub-libp2p", "Identified {:?} => {:?}", peer_id, info); + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.client_version = Some(info.agent_version.clone()); + } else { + error!(target: "sub-libp2p", "Received pong from node we're not connected to {:?}", peer_id); - } - } + } + } } /// Gives access to the information about a node. pub struct Node<'a>(&'a NodeInfo); impl<'a> Node<'a> { - /// Returns the endpoint of an established connection to the peer. - pub fn endpoint(&self) -> &'a ConnectedPoint { - &self.0.endpoints[0] // `endpoints` are non-empty by definition - } - - /// Returns the latest version information we know of. - pub fn client_version(&self) -> Option<&'a str> { - self.0.client_version.as_ref().map(|s| &s[..]) - } - - /// Returns the latest ping time we know of for this node. `None` if we never successfully - /// pinged this node. - pub fn latest_ping(&self) -> Option { - self.0.latest_ping - } + /// Returns the endpoint of an established connection to the peer. + pub fn endpoint(&self) -> &'a ConnectedPoint { + &self.0.endpoints[0] // `endpoints` are non-empty by definition + } + + /// Returns the latest version information we know of. + pub fn client_version(&self) -> Option<&'a str> { + self.0.client_version.as_ref().map(|s| &s[..]) + } + + /// Returns the latest ping time we know of for this node. `None` if we never successfully + /// pinged this node. + pub fn latest_ping(&self) -> Option { + self.0.latest_ping + } } /// Event that can be emitted by the behaviour. #[derive(Debug)] pub enum DebugInfoEvent { - /// We have obtained debug information from a peer, including the addresses it is listening - /// on. - Identified { - /// Id of the peer that has been identified. - peer_id: PeerId, - /// Information about the peer. - info: IdentifyInfo, - }, + /// We have obtained debug information from a peer, including the addresses it is listening + /// on. + Identified { + /// Id of the peer that has been identified. + peer_id: PeerId, + /// Information about the peer. + info: IdentifyInfo, + }, } impl NetworkBehaviour for DebugInfoBehaviour { - type ProtocolsHandler = IntoProtocolsHandlerSelect< - ::ProtocolsHandler, - ::ProtocolsHandler - >; - type OutEvent = DebugInfoEvent; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - IntoProtocolsHandler::select(self.ping.new_handler(), self.identify.new_handler()) - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - let mut list = self.ping.addresses_of_peer(peer_id); - list.extend_from_slice(&self.identify.addresses_of_peer(peer_id)); - list - } - - fn inject_connected(&mut self, peer_id: &PeerId) { - self.ping.inject_connected(peer_id); - self.identify.inject_connected(peer_id); - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.ping.inject_connection_established(peer_id, conn, endpoint); - self.identify.inject_connection_established(peer_id, conn, endpoint); - match self.nodes_info.entry(peer_id.clone()) { - Entry::Vacant(e) => { - e.insert(NodeInfo::new(endpoint.clone())); - } - Entry::Occupied(e) => { - let e = e.into_mut(); - if e.info_expire.as_ref().map(|exp| *exp < Instant::now()).unwrap_or(false) { - e.client_version = None; - e.latest_ping = None; - } - e.info_expire = None; - e.endpoints.push(endpoint.clone()); - } - } - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.ping.inject_connection_closed(peer_id, conn, endpoint); - self.identify.inject_connection_closed(peer_id, conn, endpoint); - - if let Some(entry) = self.nodes_info.get_mut(peer_id) { - entry.endpoints.retain(|ep| ep != endpoint) - } else { - error!(target: "sub-libp2p", + type ProtocolsHandler = IntoProtocolsHandlerSelect< + ::ProtocolsHandler, + ::ProtocolsHandler, + >; + type OutEvent = DebugInfoEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + IntoProtocolsHandler::select(self.ping.new_handler(), self.identify.new_handler()) + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + let mut list = self.ping.addresses_of_peer(peer_id); + list.extend_from_slice(&self.identify.addresses_of_peer(peer_id)); + list + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + self.ping.inject_connected(peer_id); + self.identify.inject_connected(peer_id); + } + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.ping + .inject_connection_established(peer_id, conn, endpoint); + self.identify + .inject_connection_established(peer_id, conn, endpoint); + match self.nodes_info.entry(peer_id.clone()) { + Entry::Vacant(e) => { + e.insert(NodeInfo::new(endpoint.clone())); + } + Entry::Occupied(e) => { + let e = e.into_mut(); + if e.info_expire + .as_ref() + .map(|exp| *exp < Instant::now()) + .unwrap_or(false) + { + e.client_version = None; + e.latest_ping = None; + } + e.info_expire = None; + e.endpoints.push(endpoint.clone()); + } + } + } + + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.ping.inject_connection_closed(peer_id, conn, endpoint); + self.identify + .inject_connection_closed(peer_id, conn, endpoint); + + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.endpoints.retain(|ep| ep != endpoint) + } else { + error!(target: "sub-libp2p", "Unknown connection to {:?} closed: {:?}", peer_id, endpoint); - } - } + } + } - fn inject_disconnected(&mut self, peer_id: &PeerId) { - self.ping.inject_disconnected(peer_id); - self.identify.inject_disconnected(peer_id); + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.ping.inject_disconnected(peer_id); + self.identify.inject_disconnected(peer_id); - if let Some(entry) = self.nodes_info.get_mut(peer_id) { - entry.info_expire = Some(Instant::now() + CACHE_EXPIRE); - } else { - error!(target: "sub-libp2p", + if let Some(entry) = self.nodes_info.get_mut(peer_id) { + entry.info_expire = Some(Instant::now() + CACHE_EXPIRE); + } else { + error!(target: "sub-libp2p", "Disconnected from node we were not connected to {:?}", peer_id); - } - } - - fn inject_event( - &mut self, - peer_id: PeerId, - connection: ConnectionId, - event: <::Handler as ProtocolsHandler>::OutEvent - ) { - match event { - EitherOutput::First(event) => self.ping.inject_event(peer_id, connection, event), - EitherOutput::Second(event) => self.identify.inject_event(peer_id, connection, event), - } - } - - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { - self.ping.inject_addr_reach_failure(peer_id, addr, error); - self.identify.inject_addr_reach_failure(peer_id, addr, error); - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - self.ping.inject_dial_failure(peer_id); - self.identify.inject_dial_failure(peer_id); - } - - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.ping.inject_new_listen_addr(addr); - self.identify.inject_new_listen_addr(addr); - } - - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.ping.inject_expired_listen_addr(addr); - self.identify.inject_expired_listen_addr(addr); - } - - fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - self.ping.inject_new_external_addr(addr); - self.identify.inject_new_external_addr(addr); - } - - fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { - self.ping.inject_listener_error(id, err); - self.identify.inject_listener_error(id, err); - } - - fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { - self.ping.inject_listener_closed(id, reason); - self.identify.inject_listener_closed(id, reason); - } + } + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + event: <::Handler as ProtocolsHandler>::OutEvent, + ) { + match event { + EitherOutput::First(event) => self.ping.inject_event(peer_id, connection, event), + EitherOutput::Second(event) => self.identify.inject_event(peer_id, connection, event), + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error, + ) { + self.ping.inject_addr_reach_failure(peer_id, addr, error); + self.identify + .inject_addr_reach_failure(peer_id, addr, error); + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + self.ping.inject_dial_failure(peer_id); + self.identify.inject_dial_failure(peer_id); + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + self.ping.inject_new_listen_addr(addr); + self.identify.inject_new_listen_addr(addr); + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + self.ping.inject_expired_listen_addr(addr); + self.identify.inject_expired_listen_addr(addr); + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + self.ping.inject_new_external_addr(addr); + self.identify.inject_new_external_addr(addr); + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { + self.ping.inject_listener_error(id, err); + self.identify.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.ping.inject_listener_closed(id, reason); + self.identify.inject_listener_closed(id, reason); + } fn poll( &mut self, @@ -286,66 +306,88 @@ impl NetworkBehaviour for DebugInfoBehaviour { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent > - > { - loop { - match self.ping.poll(cx, params) { - Poll::Pending => break, - Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => { - if let PingEvent { peer, result: Ok(PingSuccess::Ping { rtt }) } = ev { - self.handle_ping_report(&peer, rtt) - } - }, - Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event: EitherOutput::First(event) - }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), - } - } - - loop { - match self.identify.poll(cx, params) { - Poll::Pending => break, - Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) => { - match event { - IdentifyEvent::Received { peer_id, info, .. } => { - self.handle_identify_report(&peer_id, &info); - let event = DebugInfoEvent::Identified { peer_id, info }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); - } - IdentifyEvent::Error { peer_id, error } => - debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error), - IdentifyEvent::Sent { .. } => {} - } - }, - Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event: EitherOutput::Second(event) - }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), - } - } - - while let Poll::Ready(Some(())) = self.garbage_collect.poll_next_unpin(cx) { - self.nodes_info.retain(|_, node| { - node.info_expire.as_ref().map(|exp| *exp >= Instant::now()).unwrap_or(true) - }); - } - - Poll::Pending - } +>{ + loop { + match self.ping.poll(cx, params) { + Poll::Pending => break, + Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => { + if let PingEvent { + peer, + result: Ok(PingSuccess::Ping { rtt }), + } = ev + { + self.handle_ping_report(&peer, rtt) + } + } + Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => { + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) + } + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => { + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) + } + Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }) => { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: EitherOutput::First(event), + }) + } + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) + } + } + } + + loop { + match self.identify.poll(cx, params) { + Poll::Pending => break, + Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)) => match event { + IdentifyEvent::Received { peer_id, info, .. } => { + self.handle_identify_report(&peer_id, &info); + let event = DebugInfoEvent::Identified { peer_id, info }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(event)); + } + IdentifyEvent::Error { peer_id, error } => { + debug!(target: "sub-libp2p", "Identification with peer {:?} failed => {}", peer_id, error) + } + IdentifyEvent::Sent { .. } => {} + }, + Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => { + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) + } + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => { + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) + } + Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }) => { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: EitherOutput::Second(event), + }) + } + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) + } + } + } + + while let Poll::Ready(Some(())) = self.garbage_collect.poll_next_unpin(cx) { + self.nodes_info.retain(|_, node| { + node.info_expire + .as_ref() + .map(|exp| *exp >= Instant::now()) + .unwrap_or(true) + }); + } + + Poll::Pending + } } diff --git a/client/network/src/discovery.rs b/client/network/src/discovery.rs index fc78e9b3e3..e297160c09 100644 --- a/client/network/src/discovery.rs +++ b/client/network/src/discovery.rs @@ -48,733 +48,786 @@ use crate::config::ProtocolId; use futures::prelude::*; use futures_timer::Delay; -use libp2p::core::{connection::{ConnectionId, ListenerId}, ConnectedPoint, Multiaddr, PeerId, PublicKey}; -use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; -use libp2p::swarm::protocols_handler::multi::MultiHandler; -use libp2p::kad::{Kademlia, KademliaConfig, KademliaEvent, Quorum, Record}; -use libp2p::kad::GetClosestPeersError; +use libp2p::core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, Multiaddr, PeerId, PublicKey, +}; use libp2p::kad::handler::KademliaHandler; +use libp2p::kad::record::{ + self, + store::{MemoryStore, RecordStore}, +}; +use libp2p::kad::GetClosestPeersError; use libp2p::kad::QueryId; -use libp2p::kad::record::{self, store::{MemoryStore, RecordStore}}; -#[cfg(not(target_os = "unknown"))] -use libp2p::swarm::toggle::Toggle; +use libp2p::kad::{Kademlia, KademliaConfig, KademliaEvent, Quorum, Record}; #[cfg(not(target_os = "unknown"))] use libp2p::mdns::{Mdns, MdnsEvent}; use libp2p::multiaddr::Protocol; -use log::{debug, info, trace, warn, error}; -use std::{cmp, collections::{HashMap, HashSet, VecDeque}, io, time::Duration}; -use std::task::{Context, Poll}; +use libp2p::swarm::protocols_handler::multi::MultiHandler; +#[cfg(not(target_os = "unknown"))] +use libp2p::swarm::toggle::Toggle; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters, ProtocolsHandler}; +use log::{debug, error, info, trace, warn}; use sp_core::hexdisplay::HexDisplay; +use std::task::{Context, Poll}; +use std::{ + cmp, + collections::{HashMap, HashSet, VecDeque}, + io, + time::Duration, +}; /// `DiscoveryBehaviour` configuration. pub struct DiscoveryConfig { - local_peer_id: PeerId, - user_defined: Vec<(PeerId, Multiaddr)>, - allow_private_ipv4: bool, - discovery_only_if_under_num: u64, - enable_mdns: bool, - kademlias: HashMap> + local_peer_id: PeerId, + user_defined: Vec<(PeerId, Multiaddr)>, + allow_private_ipv4: bool, + discovery_only_if_under_num: u64, + enable_mdns: bool, + kademlias: HashMap>, } impl DiscoveryConfig { - /// Create a default configuration with the given public key. - pub fn new(local_public_key: PublicKey) -> Self { - let mut this = DiscoveryConfig { - local_peer_id: local_public_key.into_peer_id(), - user_defined: Vec::new(), - allow_private_ipv4: true, - discovery_only_if_under_num: std::u64::MAX, - enable_mdns: false, - kademlias: HashMap::new() - }; - - // Temporary hack to retain backwards compatibility. - // We should eventually remove the special handling of DEFAULT_PROTO_NAME. - let proto_id = ProtocolId::from(libp2p::kad::protocol::DEFAULT_PROTO_NAME); - let proto_name = Vec::from(proto_id.as_bytes()); - this.add_kademlia(proto_id, proto_name); - - this - } - - /// Set the number of active connections at which we pause discovery. - pub fn discovery_limit(&mut self, limit: u64) -> &mut Self { - self.discovery_only_if_under_num = limit; - self - } - - /// Set custom nodes which never expire, e.g. bootstrap or reserved nodes. - pub fn with_user_defined(&mut self, user_defined: I) -> &mut Self - where - I: IntoIterator - { - for (peer_id, addr) in user_defined { - for kad in self.kademlias.values_mut() { - kad.add_address(&peer_id, addr.clone()) - } - self.user_defined.push((peer_id, addr)) - } - self - } - - /// Should private IPv4 addresses be reported? - pub fn allow_private_ipv4(&mut self, value: bool) -> &mut Self { - self.allow_private_ipv4 = value; - self - } - - /// Should MDNS discovery be supported? - pub fn with_mdns(&mut self, value: bool) -> &mut Self { - if value && cfg!(target_os = "unknown") { - log::warn!(target: "sub-libp2p", "mDNS is not available on this platform") - } - self.enable_mdns = value; - self - } - - /// Add discovery via Kademlia for the given protocol. - pub fn add_protocol(&mut self, p: ProtocolId) -> &mut Self { - // NB: If this protocol name derivation is changed, check if - // `DiscoveryBehaviour::new_handler` is still correct. - let proto_name = { - let mut v = vec![b'/']; - v.extend_from_slice(p.as_bytes()); - v.extend_from_slice(b"/kad"); - v - }; - - self.add_kademlia(p, proto_name); - self - } - - fn add_kademlia(&mut self, id: ProtocolId, proto_name: Vec) { - if self.kademlias.contains_key(&id) { - warn!(target: "sub-libp2p", "Discovery already registered for protocol {:?}", id); - return - } - - let mut config = KademliaConfig::default(); - config.set_protocol_name(proto_name); - - let store = MemoryStore::new(self.local_peer_id.clone()); - let mut kad = Kademlia::with_config(self.local_peer_id.clone(), store, config); - - for (peer_id, addr) in &self.user_defined { - kad.add_address(peer_id, addr.clone()); - } - - self.kademlias.insert(id, kad); - } - - /// Create a `DiscoveryBehaviour` from this config. - pub fn finish(self) -> DiscoveryBehaviour { - DiscoveryBehaviour { - user_defined: self.user_defined, - kademlias: self.kademlias, - next_kad_random_query: Delay::new(Duration::new(0, 0)), - duration_to_next_kad: Duration::from_secs(1), - discoveries: VecDeque::new(), - local_peer_id: self.local_peer_id, - num_connections: 0, - allow_private_ipv4: self.allow_private_ipv4, - discovery_only_if_under_num: self.discovery_only_if_under_num, - #[cfg(not(target_os = "unknown"))] - mdns: if self.enable_mdns { - match Mdns::new() { - Ok(mdns) => Some(mdns).into(), - Err(err) => { - warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); - None.into() - } - } - } else { - None.into() - }, - } - } + /// Create a default configuration with the given public key. + pub fn new(local_public_key: PublicKey) -> Self { + let mut this = DiscoveryConfig { + local_peer_id: local_public_key.into_peer_id(), + user_defined: Vec::new(), + allow_private_ipv4: true, + discovery_only_if_under_num: std::u64::MAX, + enable_mdns: false, + kademlias: HashMap::new(), + }; + + // Temporary hack to retain backwards compatibility. + // We should eventually remove the special handling of DEFAULT_PROTO_NAME. + let proto_id = ProtocolId::from(libp2p::kad::protocol::DEFAULT_PROTO_NAME); + let proto_name = Vec::from(proto_id.as_bytes()); + this.add_kademlia(proto_id, proto_name); + + this + } + + /// Set the number of active connections at which we pause discovery. + pub fn discovery_limit(&mut self, limit: u64) -> &mut Self { + self.discovery_only_if_under_num = limit; + self + } + + /// Set custom nodes which never expire, e.g. bootstrap or reserved nodes. + pub fn with_user_defined(&mut self, user_defined: I) -> &mut Self + where + I: IntoIterator, + { + for (peer_id, addr) in user_defined { + for kad in self.kademlias.values_mut() { + kad.add_address(&peer_id, addr.clone()) + } + self.user_defined.push((peer_id, addr)) + } + self + } + + /// Should private IPv4 addresses be reported? + pub fn allow_private_ipv4(&mut self, value: bool) -> &mut Self { + self.allow_private_ipv4 = value; + self + } + + /// Should MDNS discovery be supported? + pub fn with_mdns(&mut self, value: bool) -> &mut Self { + if value && cfg!(target_os = "unknown") { + log::warn!(target: "sub-libp2p", "mDNS is not available on this platform") + } + self.enable_mdns = value; + self + } + + /// Add discovery via Kademlia for the given protocol. + pub fn add_protocol(&mut self, p: ProtocolId) -> &mut Self { + // NB: If this protocol name derivation is changed, check if + // `DiscoveryBehaviour::new_handler` is still correct. + let proto_name = { + let mut v = vec![b'/']; + v.extend_from_slice(p.as_bytes()); + v.extend_from_slice(b"/kad"); + v + }; + + self.add_kademlia(p, proto_name); + self + } + + fn add_kademlia(&mut self, id: ProtocolId, proto_name: Vec) { + if self.kademlias.contains_key(&id) { + warn!(target: "sub-libp2p", "Discovery already registered for protocol {:?}", id); + return; + } + + let mut config = KademliaConfig::default(); + config.set_protocol_name(proto_name); + + let store = MemoryStore::new(self.local_peer_id.clone()); + let mut kad = Kademlia::with_config(self.local_peer_id.clone(), store, config); + + for (peer_id, addr) in &self.user_defined { + kad.add_address(peer_id, addr.clone()); + } + + self.kademlias.insert(id, kad); + } + + /// Create a `DiscoveryBehaviour` from this config. + pub fn finish(self) -> DiscoveryBehaviour { + DiscoveryBehaviour { + user_defined: self.user_defined, + kademlias: self.kademlias, + next_kad_random_query: Delay::new(Duration::new(0, 0)), + duration_to_next_kad: Duration::from_secs(1), + discoveries: VecDeque::new(), + local_peer_id: self.local_peer_id, + num_connections: 0, + allow_private_ipv4: self.allow_private_ipv4, + discovery_only_if_under_num: self.discovery_only_if_under_num, + #[cfg(not(target_os = "unknown"))] + mdns: if self.enable_mdns { + match Mdns::new() { + Ok(mdns) => Some(mdns).into(), + Err(err) => { + warn!(target: "sub-libp2p", "Failed to initialize mDNS: {:?}", err); + None.into() + } + } + } else { + None.into() + }, + } + } } /// Implementation of `NetworkBehaviour` that discovers the nodes on the network. pub struct DiscoveryBehaviour { - /// User-defined list of nodes and their addresses. Typically includes bootstrap nodes and - /// reserved nodes. - user_defined: Vec<(PeerId, Multiaddr)>, - /// Kademlia requests and answers. - kademlias: HashMap>, - /// Discovers nodes on the local network. - #[cfg(not(target_os = "unknown"))] - mdns: Toggle, - /// Stream that fires when we need to perform the next random Kademlia query. - next_kad_random_query: Delay, - /// After `next_kad_random_query` triggers, the next one triggers after this duration. - duration_to_next_kad: Duration, - /// Discovered nodes to return. - discoveries: VecDeque, - /// Identity of our local node. - local_peer_id: PeerId, - /// Number of nodes we're currently connected to. - num_connections: u64, - /// If false, `addresses_of_peer` won't return any private IPv4 address, except for the ones - /// stored in `user_defined`. - allow_private_ipv4: bool, - /// Number of active connections over which we interrupt the discovery process. - discovery_only_if_under_num: u64, + /// User-defined list of nodes and their addresses. Typically includes bootstrap nodes and + /// reserved nodes. + user_defined: Vec<(PeerId, Multiaddr)>, + /// Kademlia requests and answers. + kademlias: HashMap>, + /// Discovers nodes on the local network. + #[cfg(not(target_os = "unknown"))] + mdns: Toggle, + /// Stream that fires when we need to perform the next random Kademlia query. + next_kad_random_query: Delay, + /// After `next_kad_random_query` triggers, the next one triggers after this duration. + duration_to_next_kad: Duration, + /// Discovered nodes to return. + discoveries: VecDeque, + /// Identity of our local node. + local_peer_id: PeerId, + /// Number of nodes we're currently connected to. + num_connections: u64, + /// If false, `addresses_of_peer` won't return any private IPv4 address, except for the ones + /// stored in `user_defined`. + allow_private_ipv4: bool, + /// Number of active connections over which we interrupt the discovery process. + discovery_only_if_under_num: u64, } impl DiscoveryBehaviour { - /// Returns the list of nodes that we know exist in the network. - pub fn known_peers(&mut self) -> impl Iterator { - let mut set = HashSet::new(); - for p in self.kademlias.values_mut().map(|k| k.kbuckets_entries()).flatten() { - set.insert(p); - } - set.into_iter() - } - - /// Adds a hard-coded address for the given peer, that never expires. - /// - /// This adds an entry to the parameter that was passed to `new`. - /// - /// If we didn't know this address before, also generates a `Discovered` event. - pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { - if self.user_defined.iter().all(|(p, a)| *p != peer_id && *a != addr) { - for k in self.kademlias.values_mut() { - k.add_address(&peer_id, addr.clone()) - } - self.discoveries.push_back(peer_id.clone()); - self.user_defined.push((peer_id, addr)); - } - } - - /// Call this method when a node reports an address for itself. - /// - /// **Note**: It is important that you call this method, otherwise the discovery mechanism will - /// not properly work. - pub fn add_self_reported_address(&mut self, peer_id: &PeerId, addr: Multiaddr) { - for k in self.kademlias.values_mut() { - k.add_address(peer_id, addr.clone()) - } - } - - /// Start fetching a record from the DHT. - /// - /// A corresponding `ValueFound` or `ValueNotFound` event will later be generated. - pub fn get_value(&mut self, key: &record::Key) { - for k in self.kademlias.values_mut() { - k.get_record(key, Quorum::One) - } - } - - /// Start putting a record into the DHT. Other nodes can later fetch that value with - /// `get_value`. - /// - /// A corresponding `ValuePut` or `ValuePutFailed` event will later be generated. - pub fn put_value(&mut self, key: record::Key, value: Vec) { - for k in self.kademlias.values_mut() { - k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) - } - } - - /// Returns the number of nodes that are in the Kademlia k-buckets. - pub fn num_kbuckets_entries(&mut self) -> impl ExactSizeIterator { - self.kademlias.iter_mut().map(|(id, kad)| (id, kad.kbuckets_entries().count())) - } - - /// Returns the number of records in the Kademlia record stores. - pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { - // Note that this code is ok only because we use a `MemoryStore`. - self.kademlias.iter_mut().map(|(id, kad)| { - let num = kad.store_mut().records().count(); - (id, num) - }) - } - - /// Returns the total size in bytes of all the records in the Kademlia record stores. - pub fn kademlia_records_total_size(&mut self) -> impl ExactSizeIterator { - // Note that this code is ok only because we use a `MemoryStore`. If the records were - // for example stored on disk, this would load every single one of them every single time. - self.kademlias.iter_mut().map(|(id, kad)| { - let size = kad.store_mut().records().fold(0, |tot, rec| tot + rec.value.len()); - (id, size) - }) - } + /// Returns the list of nodes that we know exist in the network. + pub fn known_peers(&mut self) -> impl Iterator { + let mut set = HashSet::new(); + for p in self + .kademlias + .values_mut() + .map(|k| k.kbuckets_entries()) + .flatten() + { + set.insert(p); + } + set.into_iter() + } + + /// Adds a hard-coded address for the given peer, that never expires. + /// + /// This adds an entry to the parameter that was passed to `new`. + /// + /// If we didn't know this address before, also generates a `Discovered` event. + pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + if self + .user_defined + .iter() + .all(|(p, a)| *p != peer_id && *a != addr) + { + for k in self.kademlias.values_mut() { + k.add_address(&peer_id, addr.clone()) + } + self.discoveries.push_back(peer_id.clone()); + self.user_defined.push((peer_id, addr)); + } + } + + /// Call this method when a node reports an address for itself. + /// + /// **Note**: It is important that you call this method, otherwise the discovery mechanism will + /// not properly work. + pub fn add_self_reported_address(&mut self, peer_id: &PeerId, addr: Multiaddr) { + for k in self.kademlias.values_mut() { + k.add_address(peer_id, addr.clone()) + } + } + + /// Start fetching a record from the DHT. + /// + /// A corresponding `ValueFound` or `ValueNotFound` event will later be generated. + pub fn get_value(&mut self, key: &record::Key) { + for k in self.kademlias.values_mut() { + k.get_record(key, Quorum::One) + } + } + + /// Start putting a record into the DHT. Other nodes can later fetch that value with + /// `get_value`. + /// + /// A corresponding `ValuePut` or `ValuePutFailed` event will later be generated. + pub fn put_value(&mut self, key: record::Key, value: Vec) { + for k in self.kademlias.values_mut() { + k.put_record(Record::new(key.clone(), value.clone()), Quorum::All) + } + } + + /// Returns the number of nodes that are in the Kademlia k-buckets. + pub fn num_kbuckets_entries(&mut self) -> impl ExactSizeIterator { + self.kademlias + .iter_mut() + .map(|(id, kad)| (id, kad.kbuckets_entries().count())) + } + + /// Returns the number of records in the Kademlia record stores. + pub fn num_kademlia_records(&mut self) -> impl ExactSizeIterator { + // Note that this code is ok only because we use a `MemoryStore`. + self.kademlias.iter_mut().map(|(id, kad)| { + let num = kad.store_mut().records().count(); + (id, num) + }) + } + + /// Returns the total size in bytes of all the records in the Kademlia record stores. + pub fn kademlia_records_total_size( + &mut self, + ) -> impl ExactSizeIterator { + // Note that this code is ok only because we use a `MemoryStore`. If the records were + // for example stored on disk, this would load every single one of them every single time. + self.kademlias.iter_mut().map(|(id, kad)| { + let size = kad + .store_mut() + .records() + .fold(0, |tot, rec| tot + rec.value.len()); + (id, size) + }) + } } /// Event generated by the `DiscoveryBehaviour`. pub enum DiscoveryOut { - /// The address of a peer has been added to the Kademlia routing table. - /// - /// Can be called multiple times with the same identity. - Discovered(PeerId), + /// The address of a peer has been added to the Kademlia routing table. + /// + /// Can be called multiple times with the same identity. + Discovered(PeerId), - /// A peer connected to this node for whom no listen address is known. - /// - /// In order for the peer to be added to the Kademlia routing table, a known - /// listen address must be added via [`DiscoveryBehaviour::add_self_reported_address`], - /// e.g. obtained through the `identify` protocol. - UnroutablePeer(PeerId), + /// A peer connected to this node for whom no listen address is known. + /// + /// In order for the peer to be added to the Kademlia routing table, a known + /// listen address must be added via [`DiscoveryBehaviour::add_self_reported_address`], + /// e.g. obtained through the `identify` protocol. + UnroutablePeer(PeerId), - /// The DHT yielded results for the record request, grouped in (key, value) pairs. - ValueFound(Vec<(record::Key, Vec)>), + /// The DHT yielded results for the record request, grouped in (key, value) pairs. + ValueFound(Vec<(record::Key, Vec)>), - /// The record requested was not found in the DHT. - ValueNotFound(record::Key), + /// The record requested was not found in the DHT. + ValueNotFound(record::Key), - /// The record with a given key was successfully inserted into the DHT. - ValuePut(record::Key), + /// The record with a given key was successfully inserted into the DHT. + ValuePut(record::Key), - /// Inserting a value into the DHT failed. - ValuePutFailed(record::Key), + /// Inserting a value into the DHT failed. + ValuePutFailed(record::Key), - /// Started a random Kademlia query for each DHT identified by the given `ProtocolId`s. - RandomKademliaStarted(Vec), + /// Started a random Kademlia query for each DHT identified by the given `ProtocolId`s. + RandomKademliaStarted(Vec), } impl NetworkBehaviour for DiscoveryBehaviour { - type ProtocolsHandler = MultiHandler>; - type OutEvent = DiscoveryOut; + type ProtocolsHandler = MultiHandler>; + type OutEvent = DiscoveryOut; - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let iter = self.kademlias.iter_mut() - .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let iter = self + .kademlias + .iter_mut() + .map(|(p, k)| (p.clone(), NetworkBehaviour::new_handler(k))); - MultiHandler::try_from_iter(iter) - .expect("There can be at most one handler per `ProtocolId` and \ + MultiHandler::try_from_iter(iter).expect( + "There can be at most one handler per `ProtocolId` and \ protocol names contain the `ProtocolId` so no two protocol \ names in `self.kademlias` can be equal which is the only error \ `try_from_iter` can return, therefore this call is guaranteed \ - to succeed; qed") - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - let mut list = self.user_defined.iter() - .filter_map(|(p, a)| if p == peer_id { Some(a.clone()) } else { None }) - .collect::>(); - - { - let mut list_to_filter = Vec::new(); - for k in self.kademlias.values_mut() { - list_to_filter.extend(k.addresses_of_peer(peer_id)) - } - - #[cfg(not(target_os = "unknown"))] - list_to_filter.extend(self.mdns.addresses_of_peer(peer_id)); - - if !self.allow_private_ipv4 { - list_to_filter.retain(|addr| { - if let Some(Protocol::Ip4(addr)) = addr.iter().next() { - if addr.is_private() { - return false; - } - } - - true - }); - } - - list.extend(list_to_filter); - } - - trace!(target: "sub-libp2p", "Addresses of {:?} are {:?}", peer_id, list); - - if list.is_empty() { - let mut has_entry = false; - for k in self.kademlias.values_mut() { - if k.kbuckets_entries().any(|p| p == peer_id) { - has_entry = true; - break - } - } - if has_entry { - debug!(target: "sub-libp2p", + to succeed; qed", + ) + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + let mut list = self + .user_defined + .iter() + .filter_map(|(p, a)| if p == peer_id { Some(a.clone()) } else { None }) + .collect::>(); + + { + let mut list_to_filter = Vec::new(); + for k in self.kademlias.values_mut() { + list_to_filter.extend(k.addresses_of_peer(peer_id)) + } + + #[cfg(not(target_os = "unknown"))] + list_to_filter.extend(self.mdns.addresses_of_peer(peer_id)); + + if !self.allow_private_ipv4 { + list_to_filter.retain(|addr| { + if let Some(Protocol::Ip4(addr)) = addr.iter().next() { + if addr.is_private() { + return false; + } + } + + true + }); + } + + list.extend(list_to_filter); + } + + trace!(target: "sub-libp2p", "Addresses of {:?} are {:?}", peer_id, list); + + if list.is_empty() { + let mut has_entry = false; + for k in self.kademlias.values_mut() { + if k.kbuckets_entries().any(|p| p == peer_id) { + has_entry = true; + break; + } + } + if has_entry { + debug!(target: "sub-libp2p", "Requested dialing to {:?} (peer in k-buckets), and no address was found", peer_id); - } else { - debug!(target: "sub-libp2p", + } else { + debug!(target: "sub-libp2p", "Requested dialing to {:?} (peer not in k-buckets), and no address was found", peer_id); - } - } - list - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.num_connections += 1; - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_connection_established(k, peer_id, conn, endpoint) - } - } - - fn inject_connected(&mut self, peer_id: &PeerId) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_connected(k, peer_id) - } - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.num_connections -= 1; - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_connection_closed(k, peer_id, conn, endpoint) - } - } - - fn inject_disconnected(&mut self, peer_id: &PeerId) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_disconnected(k, peer_id) - } - } - - fn inject_addr_reach_failure( - &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error - ) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_addr_reach_failure(k, peer_id, addr, error) - } - } - - fn inject_event( - &mut self, - peer_id: PeerId, - connection: ConnectionId, - (pid, event): ::OutEvent, - ) { - if let Some(kad) = self.kademlias.get_mut(&pid) { - return kad.inject_event(peer_id, connection, event) - } - log::error!(target: "sub-libp2p", + } + } + list + } + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.num_connections += 1; + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_connection_established(k, peer_id, conn, endpoint) + } + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_connected(k, peer_id) + } + } + + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.num_connections -= 1; + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_connection_closed(k, peer_id, conn, endpoint) + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_disconnected(k, peer_id) + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error, + ) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_addr_reach_failure(k, peer_id, addr, error) + } + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + (pid, event): ::OutEvent, + ) { + if let Some(kad) = self.kademlias.get_mut(&pid) { + return kad.inject_event(peer_id, connection, event); + } + log::error!(target: "sub-libp2p", "inject_node_event: no kademlia instance registered for protocol {:?}", pid) - } - - fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - let new_addr = addr.clone() - .with(Protocol::P2p(self.local_peer_id.clone().into())); - info!(target: "sub-libp2p", "🔍 Discovered new external address for our node: {}", new_addr); - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_new_external_addr(k, addr) - } - } - - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - info!(target: "sub-libp2p", "No longer listening on {}", addr); - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_expired_listen_addr(k, addr) - } - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_dial_failure(k, peer_id) - } - } - - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_new_listen_addr(k, addr) - } - } - - fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { - error!(target: "sub-libp2p", "Error on libp2p listener {:?}: {}", id, err); - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_listener_error(k, id, err) - } - } - - fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { - error!(target: "sub-libp2p", "Libp2p listener {:?} closed", id); - for k in self.kademlias.values_mut() { - NetworkBehaviour::inject_listener_closed(k, id, reason) - } - } - - fn poll( - &mut self, - cx: &mut Context, - params: &mut impl PollParameters, - ) -> Poll< - NetworkBehaviourAction< - ::InEvent, - Self::OutEvent, - >, - > { - // Immediately process the content of `discovered`. - if let Some(peer_id) = self.discoveries.pop_front() { - let ev = DiscoveryOut::Discovered(peer_id); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - - // Poll the stream that fires when we need to start a random Kademlia query. - while let Poll::Ready(_) = self.next_kad_random_query.poll_unpin(cx) { - let actually_started = if self.num_connections < self.discovery_only_if_under_num { - let random_peer_id = PeerId::random(); - debug!(target: "sub-libp2p", + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + let new_addr = addr + .clone() + .with(Protocol::P2p(self.local_peer_id.clone().into())); + info!(target: "sub-libp2p", "🔍 Discovered new external address for our node: {}", new_addr); + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_external_addr(k, addr) + } + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + info!(target: "sub-libp2p", "No longer listening on {}", addr); + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_expired_listen_addr(k, addr) + } + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_dial_failure(k, peer_id) + } + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_new_listen_addr(k, addr) + } + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + error!(target: "sub-libp2p", "Error on libp2p listener {:?}: {}", id, err); + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_listener_error(k, id, err) + } + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + error!(target: "sub-libp2p", "Libp2p listener {:?} closed", id); + for k in self.kademlias.values_mut() { + NetworkBehaviour::inject_listener_closed(k, id, reason) + } + } + + fn poll( + &mut self, + cx: &mut Context, + params: &mut impl PollParameters, + ) -> Poll< + NetworkBehaviourAction< + ::InEvent, + Self::OutEvent, + >, + > { + // Immediately process the content of `discovered`. + if let Some(peer_id) = self.discoveries.pop_front() { + let ev = DiscoveryOut::Discovered(peer_id); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + + // Poll the stream that fires when we need to start a random Kademlia query. + while let Poll::Ready(_) = self.next_kad_random_query.poll_unpin(cx) { + let actually_started = if self.num_connections < self.discovery_only_if_under_num { + let random_peer_id = PeerId::random(); + debug!(target: "sub-libp2p", "Libp2p <= Starting random Kademlia request for {:?}", random_peer_id); - for k in self.kademlias.values_mut() { - k.get_closest_peers(random_peer_id.clone()) - } - true - } else { - debug!( - target: "sub-libp2p", - "Kademlia paused due to high number of connections ({})", - self.num_connections - ); - false - }; - - // Schedule the next random query with exponentially increasing delay, - // capped at 60 seconds. - self.next_kad_random_query = Delay::new(self.duration_to_next_kad); - self.duration_to_next_kad = cmp::min(self.duration_to_next_kad * 2, - Duration::from_secs(60)); - - if actually_started { - let ev = DiscoveryOut::RandomKademliaStarted(self.kademlias.keys().cloned().collect()); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - } - - // Poll Kademlias. - for (pid, kademlia) in &mut self.kademlias { - while let Poll::Ready(ev) = kademlia.poll(cx, params) { - match ev { - NetworkBehaviourAction::GenerateEvent(ev) => match ev { - KademliaEvent::UnroutablePeer { peer, .. } => { - let ev = DiscoveryOut::UnroutablePeer(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - KademliaEvent::RoutingUpdated { peer, .. } => { - let ev = DiscoveryOut::Discovered(peer); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - KademliaEvent::GetClosestPeersResult(res) => { - match res { - Err(GetClosestPeersError::Timeout { key, peers }) => { - debug!(target: "sub-libp2p", + for k in self.kademlias.values_mut() { + k.get_closest_peers(random_peer_id.clone()) + } + true + } else { + debug!( + target: "sub-libp2p", + "Kademlia paused due to high number of connections ({})", + self.num_connections + ); + false + }; + + // Schedule the next random query with exponentially increasing delay, + // capped at 60 seconds. + self.next_kad_random_query = Delay::new(self.duration_to_next_kad); + self.duration_to_next_kad = + cmp::min(self.duration_to_next_kad * 2, Duration::from_secs(60)); + + if actually_started { + let ev = + DiscoveryOut::RandomKademliaStarted(self.kademlias.keys().cloned().collect()); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + } + + // Poll Kademlias. + for (pid, kademlia) in &mut self.kademlias { + while let Poll::Ready(ev) = kademlia.poll(cx, params) { + match ev { + NetworkBehaviourAction::GenerateEvent(ev) => match ev { + KademliaEvent::UnroutablePeer { peer, .. } => { + let ev = DiscoveryOut::UnroutablePeer(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::RoutingUpdated { peer, .. } => { + let ev = DiscoveryOut::Discovered(peer); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::GetClosestPeersResult(res) => match res { + Err(GetClosestPeersError::Timeout { key, peers }) => { + debug!(target: "sub-libp2p", "Libp2p => Query for {:?} timed out with {} results", HexDisplay::from(&key), peers.len()); - }, - Ok(ok) => { - trace!(target: "sub-libp2p", + } + Ok(ok) => { + trace!(target: "sub-libp2p", "Libp2p => Query for {:?} yielded {:?} results", HexDisplay::from(&ok.key), ok.peers.len()); - if ok.peers.is_empty() && self.num_connections != 0 { - debug!(target: "sub-libp2p", "Libp2p => Random Kademlia query has yielded empty \ + if ok.peers.is_empty() && self.num_connections != 0 { + debug!(target: "sub-libp2p", "Libp2p => Random Kademlia query has yielded empty \ results"); - } - } - } - } - KademliaEvent::GetRecordResult(res) => { - let ev = match res { - Ok(ok) => { - let results = ok.records - .into_iter() - .map(|r| (r.key, r.value)) - .collect(); - - DiscoveryOut::ValueFound(results) - } - Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { - trace!(target: "sub-libp2p", + } + } + }, + KademliaEvent::GetRecordResult(res) => { + let ev = match res { + Ok(ok) => { + let results = + ok.records.into_iter().map(|r| (r.key, r.value)).collect(); + + DiscoveryOut::ValueFound(results) + } + Err(e @ libp2p::kad::GetRecordError::NotFound { .. }) => { + trace!(target: "sub-libp2p", "Libp2p => Failed to get record: {:?}", e); - DiscoveryOut::ValueNotFound(e.into_key()) - } - Err(e) => { - warn!(target: "sub-libp2p", + DiscoveryOut::ValueNotFound(e.into_key()) + } + Err(e) => { + warn!(target: "sub-libp2p", "Libp2p => Failed to get record: {:?}", e); - DiscoveryOut::ValueNotFound(e.into_key()) - } - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - KademliaEvent::PutRecordResult(res) => { - let ev = match res { - Ok(ok) => DiscoveryOut::ValuePut(ok.key), - Err(e) => { - warn!(target: "sub-libp2p", + DiscoveryOut::ValueNotFound(e.into_key()) + } + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::PutRecordResult(res) => { + let ev = match res { + Ok(ok) => DiscoveryOut::ValuePut(ok.key), + Err(e) => { + warn!(target: "sub-libp2p", "Libp2p => Failed to put record: {:?}", e); - DiscoveryOut::ValuePutFailed(e.into_key()) - } - }; - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - KademliaEvent::RepublishRecordResult(res) => { - match res { - Ok(ok) => debug!(target: "sub-libp2p", + DiscoveryOut::ValuePutFailed(e.into_key()) + } + }; + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + KademliaEvent::RepublishRecordResult(res) => match res { + Ok(ok) => debug!(target: "sub-libp2p", "Libp2p => Record republished: {:?}", ok.key), - Err(e) => warn!(target: "sub-libp2p", + Err(e) => warn!(target: "sub-libp2p", "Libp2p => Republishing of record {:?} failed with: {:?}", - e.key(), e) - } - } - KademliaEvent::Discovered { .. } => { - // We are not interested in these events at the moment. - } - // We never start any other type of query. - e => { - warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) - } - } - NetworkBehaviourAction::DialAddress { address } => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - NetworkBehaviourAction::DialPeer { peer_id, condition } => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), - NetworkBehaviourAction::NotifyHandler { peer_id, handler, event } => - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event: (pid.clone(), event) - }), - NetworkBehaviourAction::ReportObservedAddr { address } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), - } - } - } - - // Poll mDNS. - #[cfg(not(target_os = "unknown"))] - while let Poll::Ready(ev) = self.mdns.poll(cx, params) { - match ev { - NetworkBehaviourAction::GenerateEvent(event) => { - match event { - MdnsEvent::Discovered(list) => { - if self.num_connections >= self.discovery_only_if_under_num { - continue; - } - - self.discoveries.extend(list.into_iter().map(|(peer_id, _)| peer_id)); - if let Some(peer_id) = self.discoveries.pop_front() { - let ev = DiscoveryOut::Discovered(peer_id); - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); - } - }, - MdnsEvent::Expired(_) => {} - } - }, - NetworkBehaviourAction::DialAddress { address } => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - NetworkBehaviourAction::DialPeer { peer_id, condition } => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), - NetworkBehaviourAction::NotifyHandler { event, .. } => - match event {}, // `event` is an enum with no variant - NetworkBehaviourAction::ReportObservedAddr { address } => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), - } - } - - Poll::Pending - } + e.key(), e), + }, + KademliaEvent::Discovered { .. } => { + // We are not interested in these events at the moment. + } + // We never start any other type of query. + e => { + warn!(target: "sub-libp2p", "Libp2p => Unhandled Kademlia event: {:?}", e) + } + }, + NetworkBehaviourAction::DialAddress { address } => { + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) + } + NetworkBehaviourAction::DialPeer { peer_id, condition } => { + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) + } + NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + } => { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event: (pid.clone(), event), + }) + } + NetworkBehaviourAction::ReportObservedAddr { address } => { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) + } + } + } + } + + // Poll mDNS. + #[cfg(not(target_os = "unknown"))] + while let Poll::Ready(ev) = self.mdns.poll(cx, params) { + match ev { + NetworkBehaviourAction::GenerateEvent(event) => match event { + MdnsEvent::Discovered(list) => { + if self.num_connections >= self.discovery_only_if_under_num { + continue; + } + + self.discoveries + .extend(list.into_iter().map(|(peer_id, _)| peer_id)); + if let Some(peer_id) = self.discoveries.pop_front() { + let ev = DiscoveryOut::Discovered(peer_id); + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)); + } + } + MdnsEvent::Expired(_) => {} + }, + NetworkBehaviourAction::DialAddress { address } => { + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) + } + NetworkBehaviourAction::DialPeer { peer_id, condition } => { + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) + } + NetworkBehaviourAction::NotifyHandler { event, .. } => match event {}, // `event` is an enum with no variant + NetworkBehaviourAction::ReportObservedAddr { address } => { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) + } + } + } + + Poll::Pending + } } #[cfg(test)] mod tests { - use futures::prelude::*; - use libp2p::identity::Keypair; - use libp2p::Multiaddr; - use libp2p::core::upgrade; - use libp2p::core::transport::{Transport, MemoryTransport}; - use libp2p::core::upgrade::{InboundUpgradeExt, OutboundUpgradeExt}; - use libp2p::swarm::Swarm; - use std::{collections::HashSet, task::Poll}; - use super::{DiscoveryConfig, DiscoveryOut}; - - #[test] - fn discovery_working() { - let mut user_defined = Vec::new(); - - // Build swarms whose behaviour is `DiscoveryBehaviour`. - let mut swarms = (0..25).map(|_| { - let keypair = Keypair::generate_ed25519(); - let keypair2 = keypair.clone(); - - let transport = MemoryTransport - .and_then(move |out, endpoint| { - let secio = libp2p::secio::SecioConfig::new(keypair2); - libp2p::core::upgrade::apply( - out, - secio, - endpoint, - upgrade::Version::V1 - ) - }) - .and_then(move |(peer_id, stream), endpoint| { - let peer_id2 = peer_id.clone(); - let upgrade = libp2p::yamux::Config::default() - .map_inbound(move |muxer| (peer_id, muxer)) - .map_outbound(move |muxer| (peer_id2, muxer)); - upgrade::apply(stream, upgrade, endpoint, upgrade::Version::V1) - }); - - let behaviour = { - let mut config = DiscoveryConfig::new(keypair.public()); - config.with_user_defined(user_defined.clone()) - .allow_private_ipv4(true) - .discovery_limit(50); - config.finish() - }; - - let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); - let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()).parse().unwrap(); - - if user_defined.is_empty() { - user_defined.push((keypair.public().into_peer_id(), listen_addr.clone())); - } - - Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); - (swarm, listen_addr) - }).collect::>(); - - // Build a `Vec>` with the list of nodes remaining to be discovered. - let mut to_discover = (0..swarms.len()).map(|n| { - (0..swarms.len()).filter(|p| *p != n) - .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) - .collect::>() - }).collect::>(); - - let fut = futures::future::poll_fn(move |cx| { - 'polling: loop { - for swarm_n in 0..swarms.len() { - match swarms[swarm_n].0.poll_next_unpin(cx) { - Poll::Ready(Some(e)) => { - match e { - DiscoveryOut::UnroutablePeer(other) => { - // Call `add_self_reported_address` to simulate identify happening. - let addr = swarms.iter().find_map(|(s, a)| - if s.local_peer_id == other { - Some(a.clone()) - } else { - None - }) - .unwrap(); - swarms[swarm_n].0.add_self_reported_address(&other, addr); - }, - DiscoveryOut::Discovered(other) => { - to_discover[swarm_n].remove(&other); - } - _ => {} - } - continue 'polling - } - _ => {} - } - } - break - } - - if to_discover.iter().all(|l| l.is_empty()) { - Poll::Ready(()) - } else { - Poll::Pending - } - }); - - futures::executor::block_on(fut); - } + use super::{DiscoveryConfig, DiscoveryOut}; + use futures::prelude::*; + use libp2p::core::transport::{MemoryTransport, Transport}; + use libp2p::core::upgrade; + use libp2p::core::upgrade::{InboundUpgradeExt, OutboundUpgradeExt}; + use libp2p::identity::Keypair; + use libp2p::swarm::Swarm; + use libp2p::Multiaddr; + use std::{collections::HashSet, task::Poll}; + + #[test] + fn discovery_working() { + let mut user_defined = Vec::new(); + + // Build swarms whose behaviour is `DiscoveryBehaviour`. + let mut swarms = (0..25) + .map(|_| { + let keypair = Keypair::generate_ed25519(); + let keypair2 = keypair.clone(); + + let transport = MemoryTransport + .and_then(move |out, endpoint| { + let secio = libp2p::secio::SecioConfig::new(keypair2); + libp2p::core::upgrade::apply(out, secio, endpoint, upgrade::Version::V1) + }) + .and_then(move |(peer_id, stream), endpoint| { + let peer_id2 = peer_id.clone(); + let upgrade = libp2p::yamux::Config::default() + .map_inbound(move |muxer| (peer_id, muxer)) + .map_outbound(move |muxer| (peer_id2, muxer)); + upgrade::apply(stream, upgrade, endpoint, upgrade::Version::V1) + }); + + let behaviour = { + let mut config = DiscoveryConfig::new(keypair.public()); + config + .with_user_defined(user_defined.clone()) + .allow_private_ipv4(true) + .discovery_limit(50); + config.finish() + }; + + let mut swarm = Swarm::new(transport, behaviour, keypair.public().into_peer_id()); + let listen_addr: Multiaddr = format!("/memory/{}", rand::random::()) + .parse() + .unwrap(); + + if user_defined.is_empty() { + user_defined.push((keypair.public().into_peer_id(), listen_addr.clone())); + } + + Swarm::listen_on(&mut swarm, listen_addr.clone()).unwrap(); + (swarm, listen_addr) + }) + .collect::>(); + + // Build a `Vec>` with the list of nodes remaining to be discovered. + let mut to_discover = (0..swarms.len()) + .map(|n| { + (0..swarms.len()) + .filter(|p| *p != n) + .map(|p| Swarm::local_peer_id(&swarms[p].0).clone()) + .collect::>() + }) + .collect::>(); + + let fut = futures::future::poll_fn(move |cx| { + 'polling: loop { + for swarm_n in 0..swarms.len() { + match swarms[swarm_n].0.poll_next_unpin(cx) { + Poll::Ready(Some(e)) => { + match e { + DiscoveryOut::UnroutablePeer(other) => { + // Call `add_self_reported_address` to simulate identify happening. + let addr = swarms + .iter() + .find_map(|(s, a)| { + if s.local_peer_id == other { + Some(a.clone()) + } else { + None + } + }) + .unwrap(); + swarms[swarm_n].0.add_self_reported_address(&other, addr); + } + DiscoveryOut::Discovered(other) => { + to_discover[swarm_n].remove(&other); + } + _ => {} + } + continue 'polling; + } + _ => {} + } + } + break; + } + + if to_discover.iter().all(|l| l.is_empty()) { + Poll::Ready(()) + } else { + Poll::Pending + } + }); + + futures::executor::block_on(fut); + } } diff --git a/client/network/src/error.rs b/client/network/src/error.rs index 158e75fcf1..d853670394 100644 --- a/client/network/src/error.rs +++ b/client/network/src/error.rs @@ -16,7 +16,7 @@ //! Substrate network possible errors. -use libp2p::{PeerId, Multiaddr}; +use libp2p::{Multiaddr, PeerId}; use std::fmt; @@ -26,43 +26,43 @@ pub type Result = std::result::Result; /// Error type for the network. #[derive(derive_more::Display, derive_more::From)] pub enum Error { - /// Io error - Io(std::io::Error), - /// Client error - Client(sp_blockchain::Error), - /// The same bootnode (based on address) is registered with two different peer ids. - #[display( - fmt = "The same bootnode (`{}`) is registered with two different peer ids: `{}` and `{}`", - address, - first_id, - second_id, - )] - DuplicateBootnode { - /// The address of the bootnode. - address: Multiaddr, - /// The first peer id that was found for the bootnode. - first_id: PeerId, - /// The second peer id that was found for the bootnode. - second_id: PeerId, - }, - /// Prometheus metrics error. - Prometheus(prometheus_endpoint::PrometheusError) + /// Io error + Io(std::io::Error), + /// Client error + Client(sp_blockchain::Error), + /// The same bootnode (based on address) is registered with two different peer ids. + #[display( + fmt = "The same bootnode (`{}`) is registered with two different peer ids: `{}` and `{}`", + address, + first_id, + second_id + )] + DuplicateBootnode { + /// The address of the bootnode. + address: Multiaddr, + /// The first peer id that was found for the bootnode. + first_id: PeerId, + /// The second peer id that was found for the bootnode. + second_id: PeerId, + }, + /// Prometheus metrics error. + Prometheus(prometheus_endpoint::PrometheusError), } // Make `Debug` use the `Display` implementation. impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - fmt::Display::fmt(self, f) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + fmt::Display::fmt(self, f) + } } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Io(ref err) => Some(err), - Error::Client(ref err) => Some(err), - Error::DuplicateBootnode { .. } => None, - Error::Prometheus(ref err) => Some(err), - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Io(ref err) => Some(err), + Error::Client(ref err) => Some(err), + Error::DuplicateBootnode { .. } => None, + Error::Prometheus(ref err) => Some(err), + } + } } diff --git a/client/network/src/lib.rs b/client/network/src/lib.rs index d8afa1f153..6d7327ea0f 100644 --- a/client/network/src/lib.rs +++ b/client/network/src/lib.rs @@ -246,13 +246,13 @@ pub mod config; pub mod error; pub mod network_state; -pub use service::{NetworkService, NetworkStateInfo, NetworkWorker, ExHashT, ReportHandle}; -pub use protocol::PeerInfo; -pub use protocol::event::{Event, DhtEvent, ObservedRole}; -pub use protocol::sync::SyncState; -pub use libp2p::{Multiaddr, PeerId}; #[doc(inline)] pub use libp2p::multiaddr; +pub use libp2p::{Multiaddr, PeerId}; +pub use protocol::event::{DhtEvent, Event, ObservedRole}; +pub use protocol::sync::SyncState; +pub use protocol::PeerInfo; +pub use service::{ExHashT, NetworkService, NetworkStateInfo, NetworkWorker, ReportHandle}; pub use sc_peerset::ReputationChange; diff --git a/client/network/src/network_state.rs b/client/network/src/network_state.rs index 00d53976ae..210a47fe04 100644 --- a/client/network/src/network_state.rs +++ b/client/network/src/network_state.rs @@ -21,7 +21,10 @@ use libp2p::{core::ConnectedPoint, Multiaddr}; use serde::{Deserialize, Serialize}; use slog_derive::SerdeValue; -use std::{collections::{HashMap, HashSet}, time::Duration}; +use std::{ + collections::{HashMap, HashSet}, + time::Duration, +}; /// Returns general information about the networking. /// @@ -31,81 +34,82 @@ use std::{collections::{HashMap, HashSet}, time::Duration}; #[derive(Clone, Debug, PartialEq, Serialize, Deserialize, SerdeValue)] #[serde(rename_all = "camelCase")] pub struct NetworkState { - /// PeerId of the local node. - pub peer_id: String, - /// List of addresses the node is currently listening on. - pub listened_addresses: HashSet, - /// List of addresses the node knows it can be reached as. - pub external_addresses: HashSet, - /// List of node we're connected to. - pub connected_peers: HashMap, - /// List of node that we know of but that we're not connected to. - pub not_connected_peers: HashMap, - /// Downloaded bytes per second averaged over the past few seconds. - pub average_download_per_sec: u64, - /// Uploaded bytes per second averaged over the past few seconds. - pub average_upload_per_sec: u64, - /// State of the peerset manager. - pub peerset: serde_json::Value, + /// PeerId of the local node. + pub peer_id: String, + /// List of addresses the node is currently listening on. + pub listened_addresses: HashSet, + /// List of addresses the node knows it can be reached as. + pub external_addresses: HashSet, + /// List of node we're connected to. + pub connected_peers: HashMap, + /// List of node that we know of but that we're not connected to. + pub not_connected_peers: HashMap, + /// Downloaded bytes per second averaged over the past few seconds. + pub average_download_per_sec: u64, + /// Uploaded bytes per second averaged over the past few seconds. + pub average_upload_per_sec: u64, + /// State of the peerset manager. + pub peerset: serde_json::Value, } /// Part of the `NetworkState` struct. Unstable. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Peer { - /// How we are connected to the node. - pub endpoint: PeerEndpoint, - /// Node information, as provided by the node itself. Can be empty if not known yet. - pub version_string: Option, - /// Latest ping duration with this node. - pub latest_ping_time: Option, - /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols - /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. - pub enabled: bool, - /// If true, the peer is "open", which means that we have a Substrate-related protocol - /// with this peer. - pub open: bool, - /// List of addresses known for this node. - pub known_addresses: HashSet, + /// How we are connected to the node. + pub endpoint: PeerEndpoint, + /// Node information, as provided by the node itself. Can be empty if not known yet. + pub version_string: Option, + /// Latest ping duration with this node. + pub latest_ping_time: Option, + /// If true, the peer is "enabled", which means that we try to open Substrate-related protocols + /// with this peer. If false, we stick to Kademlia and/or other network-only protocols. + pub enabled: bool, + /// If true, the peer is "open", which means that we have a Substrate-related protocol + /// with this peer. + pub open: bool, + /// List of addresses known for this node. + pub known_addresses: HashSet, } /// Part of the `NetworkState` struct. Unstable. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct NotConnectedPeer { - /// List of addresses known for this node. - pub known_addresses: HashSet, - /// Node information, as provided by the node itself, if we were ever connected to this node. - pub version_string: Option, - /// Latest ping duration with this node, if we were ever connected to this node. - pub latest_ping_time: Option, + /// List of addresses known for this node. + pub known_addresses: HashSet, + /// Node information, as provided by the node itself, if we were ever connected to this node. + pub version_string: Option, + /// Latest ping duration with this node, if we were ever connected to this node. + pub latest_ping_time: Option, } /// Part of the `NetworkState` struct. Unstable. #[derive(Clone, Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum PeerEndpoint { - /// We are dialing the given address. - Dialing(Multiaddr), - /// We are listening. - Listening { - /// Local address of the connection. - local_addr: Multiaddr, - /// Address data is sent back to. - send_back_addr: Multiaddr, - }, + /// We are dialing the given address. + Dialing(Multiaddr), + /// We are listening. + Listening { + /// Local address of the connection. + local_addr: Multiaddr, + /// Address data is sent back to. + send_back_addr: Multiaddr, + }, } impl From for PeerEndpoint { - fn from(endpoint: ConnectedPoint) -> Self { - match endpoint { - ConnectedPoint::Dialer { address } => - PeerEndpoint::Dialing(address), - ConnectedPoint::Listener { local_addr, send_back_addr } => - PeerEndpoint::Listening { - local_addr, - send_back_addr - } - } - } + fn from(endpoint: ConnectedPoint) -> Self { + match endpoint { + ConnectedPoint::Dialer { address } => PeerEndpoint::Dialing(address), + ConnectedPoint::Listener { + local_addr, + send_back_addr, + } => PeerEndpoint::Listening { + local_addr, + send_back_addr, + }, + } + } } diff --git a/client/network/src/on_demand_layer.rs b/client/network/src/on_demand_layer.rs index d881bf6fe2..00061d1a85 100644 --- a/client/network/src/on_demand_layer.rs +++ b/client/network/src/on_demand_layer.rs @@ -21,12 +21,13 @@ use crate::protocol::light_client_handler; use futures::{channel::oneshot, prelude::*}; use parking_lot::Mutex; use sc_client_api::{ - FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, - RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, StorageProof, ChangesProof, + ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, + RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, + StorageProof, }; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; /// Implements the `Fetcher` trait of the client. Makes it possible for the light client to perform @@ -35,18 +36,18 @@ use std::{collections::HashMap, pin::Pin, sync::Arc, task::Context, task::Poll}; /// This implementation stores all the requests in a queue. The network, in parallel, is then /// responsible for pulling elements out of that queue and fulfilling them. pub struct OnDemand { - /// Objects that checks whether what has been retrieved is correct. - checker: Arc>, - - /// Queue of requests. Set to `Some` at initialization, then extracted by the network. - /// - /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method - /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in - /// asynchronous Rust at the moment - requests_queue: Mutex>>>, - - /// Sending side of `requests_queue`. - requests_send: TracingUnboundedSender>, + /// Objects that checks whether what has been retrieved is correct. + checker: Arc>, + + /// Queue of requests. Set to `Some` at initialization, then extracted by the network. + /// + /// Note that a better alternative would be to use a MPMC queue here, and add a `poll` method + /// from the `OnDemand`. However there exists no popular implementation of MPMC channels in + /// asynchronous Rust at the moment + requests_queue: Mutex>>>, + + /// Sending side of `requests_queue`. + requests_send: TracingUnboundedSender>, } /// Dummy implementation of `FetchChecker` that always assumes that responses are bad. @@ -57,170 +58,170 @@ pub struct OnDemand { pub struct AlwaysBadChecker; impl FetchChecker for AlwaysBadChecker { - fn check_header_proof( - &self, - _request: &RemoteHeaderRequest, - _remote_header: Option, - _remote_proof: StorageProof, - ) -> Result { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_read_proof( - &self, - _request: &RemoteReadRequest, - _remote_proof: StorageProof, - ) -> Result,Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_read_child_proof( - &self, - _request: &RemoteReadChildRequest, - _remote_proof: StorageProof, - ) -> Result, Option>>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_execution_proof( - &self, - _request: &RemoteCallRequest, - _remote_proof: StorageProof, - ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_changes_proof( - &self, - _request: &RemoteChangesRequest, - _remote_proof: ChangesProof - ) -> Result, u32)>, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } - - fn check_body_proof( - &self, - _request: &RemoteBodyRequest, - _body: Vec - ) -> Result, ClientError> { - Err(ClientError::Msg("AlwaysBadChecker".into())) - } + fn check_header_proof( + &self, + _request: &RemoteHeaderRequest, + _remote_header: Option, + _remote_proof: StorageProof, + ) -> Result { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_read_proof( + &self, + _request: &RemoteReadRequest, + _remote_proof: StorageProof, + ) -> Result, Option>>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_read_child_proof( + &self, + _request: &RemoteReadChildRequest, + _remote_proof: StorageProof, + ) -> Result, Option>>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_execution_proof( + &self, + _request: &RemoteCallRequest, + _remote_proof: StorageProof, + ) -> Result, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_changes_proof( + &self, + _request: &RemoteChangesRequest, + _remote_proof: ChangesProof, + ) -> Result, u32)>, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } + + fn check_body_proof( + &self, + _request: &RemoteBodyRequest, + _body: Vec, + ) -> Result, ClientError> { + Err(ClientError::Msg("AlwaysBadChecker".into())) + } } impl OnDemand where - B::Header: HeaderT, + B::Header: HeaderT, { - /// Creates new on-demand service. - pub fn new(checker: Arc>) -> Self { - let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); - let requests_queue = Mutex::new(Some(requests_queue)); - - OnDemand { - checker, - requests_queue, - requests_send, - } - } - - /// Get checker reference. - pub fn checker(&self) -> &Arc> { - &self.checker - } - - /// Extracts the queue of requests. - /// - /// Whenever one of the methods of the `Fetcher` trait is called, an element is pushed on this - /// channel. - /// - /// If this function returns `None`, that means that the receiver has already been extracted in - /// the past, and therefore that something already handles the requests. - pub(crate) fn extract_receiver(&self) - -> Option>> - { - self.requests_queue.lock().take() - } + /// Creates new on-demand service. + pub fn new(checker: Arc>) -> Self { + let (requests_send, requests_queue) = tracing_unbounded("mpsc_ondemand"); + let requests_queue = Mutex::new(Some(requests_queue)); + + OnDemand { + checker, + requests_queue, + requests_send, + } + } + + /// Get checker reference. + pub fn checker(&self) -> &Arc> { + &self.checker + } + + /// Extracts the queue of requests. + /// + /// Whenever one of the methods of the `Fetcher` trait is called, an element is pushed on this + /// channel. + /// + /// If this function returns `None`, that means that the receiver has already been extracted in + /// the past, and therefore that something already handles the requests. + pub(crate) fn extract_receiver( + &self, + ) -> Option>> { + self.requests_queue.lock().take() + } } impl Fetcher for OnDemand where - B: BlockT, - B::Header: HeaderT, + B: BlockT, + B::Header: HeaderT, { - type RemoteHeaderResult = RemoteResponse; - type RemoteReadResult = RemoteResponse, Option>>>; - type RemoteCallResult = RemoteResponse>; - type RemoteChangesResult = RemoteResponse, u32)>>; - type RemoteBodyResult = RemoteResponse>; - - fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_handler::Request::Header { request, sender }); - RemoteResponse { receiver } - } - - fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_handler::Request::Read { request, sender }); - RemoteResponse { receiver } - } - - fn remote_read_child( - &self, - request: RemoteReadChildRequest, - ) -> Self::RemoteReadResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_handler::Request::ReadChild { request, sender }); - RemoteResponse { receiver } - } - - fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_handler::Request::Call { request, sender }); - RemoteResponse { receiver } - } - - fn remote_changes( - &self, - request: RemoteChangesRequest, - ) -> Self::RemoteChangesResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_handler::Request::Changes { request, sender }); - RemoteResponse { receiver } - } - - fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult { - let (sender, receiver) = oneshot::channel(); - let _ = self - .requests_send - .unbounded_send(light_client_handler::Request::Body { request, sender }); - RemoteResponse { receiver } - } + type RemoteHeaderResult = RemoteResponse; + type RemoteReadResult = RemoteResponse, Option>>>; + type RemoteCallResult = RemoteResponse>; + type RemoteChangesResult = RemoteResponse, u32)>>; + type RemoteBodyResult = RemoteResponse>; + + fn remote_header(&self, request: RemoteHeaderRequest) -> Self::RemoteHeaderResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Header { request, sender }); + RemoteResponse { receiver } + } + + fn remote_read(&self, request: RemoteReadRequest) -> Self::RemoteReadResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Read { request, sender }); + RemoteResponse { receiver } + } + + fn remote_read_child( + &self, + request: RemoteReadChildRequest, + ) -> Self::RemoteReadResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::ReadChild { request, sender }); + RemoteResponse { receiver } + } + + fn remote_call(&self, request: RemoteCallRequest) -> Self::RemoteCallResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Call { request, sender }); + RemoteResponse { receiver } + } + + fn remote_changes( + &self, + request: RemoteChangesRequest, + ) -> Self::RemoteChangesResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Changes { request, sender }); + RemoteResponse { receiver } + } + + fn remote_body(&self, request: RemoteBodyRequest) -> Self::RemoteBodyResult { + let (sender, receiver) = oneshot::channel(); + let _ = self + .requests_send + .unbounded_send(light_client_handler::Request::Body { request, sender }); + RemoteResponse { receiver } + } } /// Future for an on-demand remote call response. pub struct RemoteResponse { - receiver: oneshot::Receiver>, + receiver: oneshot::Receiver>, } impl Future for RemoteResponse { - type Output = Result; - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - match self.receiver.poll_unpin(cx) { - Poll::Ready(Ok(res)) => Poll::Ready(res), - Poll::Ready(Err(_)) => Poll::Ready(Err(From::from(ClientError::RemoteFetchCancelled))), - Poll::Pending => Poll::Pending, - } - } + type Output = Result; + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + match self.receiver.poll_unpin(cx) { + Poll::Ready(Ok(res)) => Poll::Ready(res), + Poll::Ready(Err(_)) => Poll::Ready(Err(From::from(ClientError::RemoteFetchCancelled))), + Poll::Pending => Poll::Pending, + } + } } diff --git a/client/network/src/protocol.rs b/client/network/src/protocol.rs index 49479aa2d4..bd2e3739d2 100644 --- a/client/network/src/protocol.rs +++ b/client/network/src/protocol.rs @@ -14,70 +14,73 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::chain::{Client, FinalityProofProvider}; +use crate::config::BoxFinalityProofRequestBuilder; use crate::config::ProtocolId; +use crate::error; +use crate::service::{ExHashT, TransactionPool}; use crate::utils::interval; use bytes::{Bytes, BytesMut}; +use codec::{Decode, Encode}; use futures::prelude::*; use generic_proto::{GenericProto, GenericProtoOut}; -use libp2p::{Multiaddr, PeerId}; -use libp2p::core::{ConnectedPoint, connection::{ConnectionId, ListenerId}}; -use libp2p::swarm::{ProtocolsHandler, IntoProtocolsHandler}; +use libp2p::core::{ + connection::{ConnectionId, ListenerId}, + ConnectedPoint, +}; +use libp2p::swarm::{IntoProtocolsHandler, ProtocolsHandler}; use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; -use sp_core::{ - storage::{StorageKey, ChildInfo}, - hexdisplay::HexDisplay +use libp2p::{Multiaddr, PeerId}; +use log::{debug, error, log, trace, warn, Level}; +use message::generic::{ConsensusMessage, Message as GenericMessage, Roles}; +use message::{BlockAnnounce, Message}; +use prometheus_endpoint::{ + register, Gauge, GaugeVec, HistogramVec, Opts, PrometheusError, Registry, U64, }; +use sc_client_api::{ChangesProof, StorageProof}; +use sp_arithmetic::traits::SaturatedConversion; use sp_consensus::{ - BlockOrigin, - block_validation::BlockAnnounceValidator, - import_queue::{BlockImportResult, BlockImportError, IncomingBlock, Origin} + block_validation::BlockAnnounceValidator, + import_queue::{BlockImportError, BlockImportResult, IncomingBlock, Origin}, + BlockOrigin, }; -use codec::{Decode, Encode}; -use sp_runtime::{generic::BlockId, ConsensusEngineId, Justification}; -use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, NumberFor, One, Zero, CheckedSub +use sp_core::{ + hexdisplay::HexDisplay, + storage::{ChildInfo, StorageKey}, }; -use sp_arithmetic::traits::SaturatedConversion; -use message::{BlockAnnounce, Message}; -use message::generic::{Message as GenericMessage, ConsensusMessage, Roles}; -use prometheus_endpoint::{Registry, Gauge, GaugeVec, HistogramVec, PrometheusError, Opts, register, U64}; -use sync::{ChainSync, SyncState}; -use crate::service::{TransactionPool, ExHashT}; -use crate::config::BoxFinalityProofRequestBuilder; +use sp_runtime::traits::{Block as BlockT, CheckedSub, Header as HeaderT, NumberFor, One, Zero}; +use sp_runtime::{generic::BlockId, ConsensusEngineId, Justification}; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap, HashSet, VecDeque}; -use std::sync::Arc; use std::fmt::Write; +use std::sync::Arc; use std::{cmp, io, num::NonZeroUsize, pin::Pin, task::Poll, time}; -use log::{log, Level, trace, debug, warn, error}; -use crate::chain::{Client, FinalityProofProvider}; -use sc_client_api::{ChangesProof, StorageProof}; -use crate::error; +use sync::{ChainSync, SyncState}; use util::LruHashSet; use wasm_timer::Instant; // Include sources generated from protobuf definitions. pub mod api { - pub mod v1 { - include!(concat!(env!("OUT_DIR"), "/api.v1.rs")); - pub mod light { - include!(concat!(env!("OUT_DIR"), "/api.v1.light.rs")); - } - } + pub mod v1 { + include!(concat!(env!("OUT_DIR"), "/api.v1.rs")); + pub mod light { + include!(concat!(env!("OUT_DIR"), "/api.v1.light.rs")); + } + } } mod generic_proto; mod util; pub mod block_requests; -pub mod message; pub mod event; pub mod light_client_handler; +pub mod message; pub mod sync; pub use block_requests::BlockRequests; -pub use light_client_handler::LightClientHandler; pub use generic_proto::LegacyConnectionKillError; +pub use light_client_handler::LightClientHandler; const REQUEST_TIMEOUT_SEC: u64 = 40; /// Interval at which we perform time based maintenance @@ -103,490 +106,503 @@ const MAX_BLOCK_DATA_RESPONSE: u32 = 128; const LIGHT_MAXIMAL_BLOCKS_DIFFERENCE: u64 = 8192; mod rep { - use sc_peerset::ReputationChange as Rep; - /// Reputation change when a peer is "clogged", meaning that it's not fast enough to process our - /// messages. - pub const CLOGGED_PEER: Rep = Rep::new(-(1 << 12), "Clogged message queue"); - /// Reputation change when a peer doesn't respond in time to our messages. - pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); - /// Reputation change when a peer sends us a status message while we already received one. - pub const UNEXPECTED_STATUS: Rep = Rep::new(-(1 << 20), "Unexpected status message"); - /// Reputation change when we are a light client and a peer is behind us. - pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); - /// Reputation change when a peer sends us an extrinsic that we didn't know about. - pub const GOOD_EXTRINSIC: Rep = Rep::new(1 << 7, "Good extrinsic"); - /// Reputation change when a peer sends us a bad extrinsic. - pub const BAD_EXTRINSIC: Rep = Rep::new(-(1 << 12), "Bad extrinsic"); - /// We sent an RPC query to the given node, but it failed. - pub const RPC_FAILED: Rep = Rep::new(-(1 << 12), "Remote call failed"); - /// We received a message that failed to decode. - pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); - /// We received an unexpected response. - pub const UNEXPECTED_RESPONSE: Rep = Rep::new_fatal("Unexpected response packet"); - /// We received an unexpected extrinsic packet. - pub const UNEXPECTED_EXTRINSICS: Rep = Rep::new_fatal("Unexpected extrinsics packet"); - /// We received an unexpected light node request. - pub const UNEXPECTED_REQUEST: Rep = Rep::new_fatal("Unexpected block request packet"); - /// Peer has different genesis. - pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); - /// Peer is on unsupported protocol version. - pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); - /// Peer role does not match (e.g. light peer connecting to another light peer). - pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); - /// Peer response data does not have requested bits. - pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); + use sc_peerset::ReputationChange as Rep; + /// Reputation change when a peer is "clogged", meaning that it's not fast enough to process our + /// messages. + pub const CLOGGED_PEER: Rep = Rep::new(-(1 << 12), "Clogged message queue"); + /// Reputation change when a peer doesn't respond in time to our messages. + pub const TIMEOUT: Rep = Rep::new(-(1 << 10), "Request timeout"); + /// Reputation change when a peer sends us a status message while we already received one. + pub const UNEXPECTED_STATUS: Rep = Rep::new(-(1 << 20), "Unexpected status message"); + /// Reputation change when we are a light client and a peer is behind us. + pub const PEER_BEHIND_US_LIGHT: Rep = Rep::new(-(1 << 8), "Useless for a light peer"); + /// Reputation change when a peer sends us an extrinsic that we didn't know about. + pub const GOOD_EXTRINSIC: Rep = Rep::new(1 << 7, "Good extrinsic"); + /// Reputation change when a peer sends us a bad extrinsic. + pub const BAD_EXTRINSIC: Rep = Rep::new(-(1 << 12), "Bad extrinsic"); + /// We sent an RPC query to the given node, but it failed. + pub const RPC_FAILED: Rep = Rep::new(-(1 << 12), "Remote call failed"); + /// We received a message that failed to decode. + pub const BAD_MESSAGE: Rep = Rep::new(-(1 << 12), "Bad message"); + /// We received an unexpected response. + pub const UNEXPECTED_RESPONSE: Rep = Rep::new_fatal("Unexpected response packet"); + /// We received an unexpected extrinsic packet. + pub const UNEXPECTED_EXTRINSICS: Rep = Rep::new_fatal("Unexpected extrinsics packet"); + /// We received an unexpected light node request. + pub const UNEXPECTED_REQUEST: Rep = Rep::new_fatal("Unexpected block request packet"); + /// Peer has different genesis. + pub const GENESIS_MISMATCH: Rep = Rep::new_fatal("Genesis mismatch"); + /// Peer is on unsupported protocol version. + pub const BAD_PROTOCOL: Rep = Rep::new_fatal("Unsupported protocol"); + /// Peer role does not match (e.g. light peer connecting to another light peer). + pub const BAD_ROLE: Rep = Rep::new_fatal("Unsupported role"); + /// Peer response data does not have requested bits. + pub const BAD_RESPONSE: Rep = Rep::new(-(1 << 12), "Incomplete response"); } struct Metrics { - handshaking_peers: Gauge, - obsolete_requests: Gauge, - peers: Gauge, - queued_blocks: Gauge, - fork_targets: Gauge, - finality_proofs: GaugeVec, - justifications: GaugeVec, + handshaking_peers: Gauge, + obsolete_requests: Gauge, + peers: Gauge, + queued_blocks: Gauge, + fork_targets: Gauge, + finality_proofs: GaugeVec, + justifications: GaugeVec, } impl Metrics { - fn register(r: &Registry) -> Result { - Ok(Metrics { - handshaking_peers: { - let g = Gauge::new("sync_handshaking_peers", "Number of newly connected peers")?; - register(g, r)? - }, - obsolete_requests: { - let g = Gauge::new("sync_obsolete_requests", "Number of obsolete requests")?; - register(g, r)? - }, - peers: { - let g = Gauge::new("sync_peers", "Number of peers we sync with")?; - register(g, r)? - }, - queued_blocks: { - let g = Gauge::new("sync_queued_blocks", "Number of blocks in import queue")?; - register(g, r)? - }, - fork_targets: { - let g = Gauge::new("sync_fork_targets", "Number of fork sync targets")?; - register(g, r)? - }, - justifications: { - let g = GaugeVec::new( - Opts::new( - "sync_extra_justifications", - "Number of extra justifications requests" - ), - &["status"], - )?; - register(g, r)? - }, - finality_proofs: { - let g = GaugeVec::new( - Opts::new( - "sync_extra_finality_proofs", - "Number of extra finality proof requests", - ), - &["status"], - )?; - register(g, r)? - }, - }) - } + fn register(r: &Registry) -> Result { + Ok(Metrics { + handshaking_peers: { + let g = Gauge::new("sync_handshaking_peers", "Number of newly connected peers")?; + register(g, r)? + }, + obsolete_requests: { + let g = Gauge::new("sync_obsolete_requests", "Number of obsolete requests")?; + register(g, r)? + }, + peers: { + let g = Gauge::new("sync_peers", "Number of peers we sync with")?; + register(g, r)? + }, + queued_blocks: { + let g = Gauge::new("sync_queued_blocks", "Number of blocks in import queue")?; + register(g, r)? + }, + fork_targets: { + let g = Gauge::new("sync_fork_targets", "Number of fork sync targets")?; + register(g, r)? + }, + justifications: { + let g = GaugeVec::new( + Opts::new( + "sync_extra_justifications", + "Number of extra justifications requests", + ), + &["status"], + )?; + register(g, r)? + }, + finality_proofs: { + let g = GaugeVec::new( + Opts::new( + "sync_extra_finality_proofs", + "Number of extra finality proof requests", + ), + &["status"], + )?; + register(g, r)? + }, + }) + } } // Lock must always be taken in order declared here. pub struct Protocol { - /// Interval at which we call `tick`. - tick_timeout: Pin + Send>>, - /// Interval at which we call `propagate_extrinsics`. - propagate_timeout: Pin + Send>>, - /// Pending list of messages to return from `poll` as a priority. - pending_messages: VecDeque>, - config: ProtocolConfig, - genesis_hash: B::Hash, - sync: ChainSync, - context_data: ContextData, - /// List of nodes for which we perform additional logging because they are important for the - /// user. - important_peers: HashSet, - // Connected peers pending Status message. - handshaking_peers: HashMap, - /// Used to report reputation changes. - peerset_handle: sc_peerset::PeersetHandle, - transaction_pool: Arc>, - /// When asked for a proof of finality, we use this struct to build one. - finality_proof_provider: Option>>, - /// Handles opening the unique substream and sending and receiving raw messages. - behaviour: GenericProto, - /// For each legacy gossiping engine ID, the corresponding new protocol name. - protocol_name_by_engine: HashMap>, - /// For each protocol name, the legacy equivalent. - legacy_equiv_by_name: HashMap, Fallback>, - /// Name of the protocol used for transactions. - transactions_protocol: Cow<'static, [u8]>, - /// Name of the protocol used for block announces. - block_announces_protocol: Cow<'static, [u8]>, - /// Prometheus metrics. - metrics: Option, - /// The `PeerId`'s of all boot nodes. - boot_node_ids: Arc>, + /// Interval at which we call `tick`. + tick_timeout: Pin + Send>>, + /// Interval at which we call `propagate_extrinsics`. + propagate_timeout: Pin + Send>>, + /// Pending list of messages to return from `poll` as a priority. + pending_messages: VecDeque>, + config: ProtocolConfig, + genesis_hash: B::Hash, + sync: ChainSync, + context_data: ContextData, + /// List of nodes for which we perform additional logging because they are important for the + /// user. + important_peers: HashSet, + // Connected peers pending Status message. + handshaking_peers: HashMap, + /// Used to report reputation changes. + peerset_handle: sc_peerset::PeersetHandle, + transaction_pool: Arc>, + /// When asked for a proof of finality, we use this struct to build one. + finality_proof_provider: Option>>, + /// Handles opening the unique substream and sending and receiving raw messages. + behaviour: GenericProto, + /// For each legacy gossiping engine ID, the corresponding new protocol name. + protocol_name_by_engine: HashMap>, + /// For each protocol name, the legacy equivalent. + legacy_equiv_by_name: HashMap, Fallback>, + /// Name of the protocol used for transactions. + transactions_protocol: Cow<'static, [u8]>, + /// Name of the protocol used for block announces. + block_announces_protocol: Cow<'static, [u8]>, + /// Prometheus metrics. + metrics: Option, + /// The `PeerId`'s of all boot nodes. + boot_node_ids: Arc>, } #[derive(Default)] struct PacketStats { - bytes_in: u64, - bytes_out: u64, - count_in: u64, - count_out: u64, + bytes_in: u64, + bytes_out: u64, + count_in: u64, + count_out: u64, } /// A peer that we are connected to /// and from whom we have not yet received a Status message. struct HandshakingPeer { - timestamp: Instant, + timestamp: Instant, } /// Peer information #[derive(Debug, Clone)] struct Peer { - info: PeerInfo, - /// Current block request, if any. - block_request: Option<(Instant, message::BlockRequest)>, - /// Requests we are no longer interested in. - obsolete_requests: HashMap, - /// Holds a set of transactions known to this peer. - known_extrinsics: LruHashSet, - /// Holds a set of blocks known to this peer. - known_blocks: LruHashSet, - /// Request counter, - next_request_id: message::RequestId, + info: PeerInfo, + /// Current block request, if any. + block_request: Option<(Instant, message::BlockRequest)>, + /// Requests we are no longer interested in. + obsolete_requests: HashMap, + /// Holds a set of transactions known to this peer. + known_extrinsics: LruHashSet, + /// Holds a set of blocks known to this peer. + known_blocks: LruHashSet, + /// Request counter, + next_request_id: message::RequestId, } /// Info about a peer's known state. #[derive(Clone, Debug)] pub struct PeerInfo { - /// Roles - pub roles: Roles, - /// Protocol version - pub protocol_version: u32, - /// Peer best block hash - pub best_hash: B::Hash, - /// Peer best block number - pub best_number: ::Number, + /// Roles + pub roles: Roles, + /// Protocol version + pub protocol_version: u32, + /// Peer best block hash + pub best_hash: B::Hash, + /// Peer best block number + pub best_number: ::Number, } /// Data necessary to create a context. struct ContextData { - // All connected peers - peers: HashMap>, - stats: HashMap<&'static str, PacketStats>, - pub chain: Arc>, + // All connected peers + peers: HashMap>, + stats: HashMap<&'static str, PacketStats>, + pub chain: Arc>, } /// Configuration for the Substrate-specific part of the networking layer. #[derive(Clone)] pub struct ProtocolConfig { - /// Assigned roles. - pub roles: Roles, - /// Maximum number of peers to ask the same blocks in parallel. - pub max_parallel_downloads: u32, + /// Assigned roles. + pub roles: Roles, + /// Maximum number of peers to ask the same blocks in parallel. + pub max_parallel_downloads: u32, } impl Default for ProtocolConfig { - fn default() -> ProtocolConfig { - ProtocolConfig { - roles: Roles::FULL, - max_parallel_downloads: 5, - } - } + fn default() -> ProtocolConfig { + ProtocolConfig { + roles: Roles::FULL, + max_parallel_downloads: 5, + } + } } /// Fallback mechanism to use to send a notification if no substream is open. #[derive(Debug, Clone, PartialEq, Eq)] enum Fallback { - /// Use a `Message::Consensus` with the given engine ID. - Consensus(ConsensusEngineId), - /// The message is the bytes encoding of a `Transactions` (which is itself defined as a `Vec`). - Transactions, - /// The message is the bytes encoding of a `BlockAnnounce`. - BlockAnnounce, + /// Use a `Message::Consensus` with the given engine ID. + Consensus(ConsensusEngineId), + /// The message is the bytes encoding of a `Transactions` (which is itself defined as a `Vec`). + Transactions, + /// The message is the bytes encoding of a `BlockAnnounce`. + BlockAnnounce, } impl Protocol { - /// Create a new instance. - pub fn new( - config: ProtocolConfig, - chain: Arc>, - transaction_pool: Arc>, - finality_proof_provider: Option>>, - finality_proof_request_builder: Option>, - protocol_id: ProtocolId, - peerset_config: sc_peerset::PeersetConfig, - block_announce_validator: Box + Send>, - metrics_registry: Option<&Registry>, - boot_node_ids: Arc>, - queue_size_report: Option, - ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { - let info = chain.info(); - let sync = ChainSync::new( - config.roles, - chain.clone(), - &info, - finality_proof_request_builder, - block_announce_validator, - config.max_parallel_downloads, - ); - - let important_peers = { - let mut imp_p = HashSet::new(); - for reserved in peerset_config.priority_groups.iter().flat_map(|(_, l)| l.iter()) { - imp_p.insert(reserved.clone()); - } - imp_p.shrink_to_fit(); - imp_p - }; - - let (peerset, peerset_handle) = sc_peerset::Peerset::from_config(peerset_config); - let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); - let mut behaviour = GenericProto::new(protocol_id.clone(), versions, peerset, queue_size_report); - - let mut legacy_equiv_by_name = HashMap::new(); - - let transactions_protocol: Cow<'static, [u8]> = Cow::from({ - let mut proto = b"/".to_vec(); - proto.extend(protocol_id.as_bytes()); - proto.extend(b"/transactions/1"); - proto - }); - behaviour.register_notif_protocol(transactions_protocol.clone(), Vec::new()); - legacy_equiv_by_name.insert(transactions_protocol.clone(), Fallback::Transactions); - - let block_announces_protocol: Cow<'static, [u8]> = Cow::from({ - let mut proto = b"/".to_vec(); - proto.extend(protocol_id.as_bytes()); - proto.extend(b"/block-announces/1"); - proto - }); - behaviour.register_notif_protocol(block_announces_protocol.clone(), Vec::new()); - legacy_equiv_by_name.insert(block_announces_protocol.clone(), Fallback::BlockAnnounce); - - let protocol = Protocol { - tick_timeout: Box::pin(interval(TICK_TIMEOUT)), - propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), - pending_messages: VecDeque::new(), - config, - context_data: ContextData { - peers: HashMap::new(), - stats: HashMap::new(), - chain, - }, - genesis_hash: info.genesis_hash, - sync, - handshaking_peers: HashMap::new(), - important_peers, - transaction_pool, - finality_proof_provider, - peerset_handle: peerset_handle.clone(), - behaviour, - protocol_name_by_engine: HashMap::new(), - legacy_equiv_by_name, - transactions_protocol, - block_announces_protocol, - metrics: if let Some(r) = metrics_registry { - Some(Metrics::register(r)?) - } else { - None - }, - boot_node_ids, - }; - - Ok((protocol, peerset_handle)) - } - - /// Returns the list of all the peers we have an open channel to. - pub fn open_peers(&self) -> impl Iterator { - self.behaviour.open_peers() - } - - /// Returns true if we have a channel open with this node. - pub fn is_open(&self, peer_id: &PeerId) -> bool { - self.behaviour.is_open(peer_id) - } - - /// Returns the list of all the peers that the peerset currently requests us to be connected to. - pub fn requested_peers(&self) -> impl Iterator { - self.behaviour.requested_peers() - } - - /// Returns the number of discovered nodes that we keep in memory. - pub fn num_discovered_peers(&self) -> usize { - self.behaviour.num_discovered_peers() - } - - /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId) { - self.behaviour.disconnect_peer(peer_id) - } - - /// Returns true if we try to open protocols with the given peer. - pub fn is_enabled(&self, peer_id: &PeerId) -> bool { - self.behaviour.is_enabled(peer_id) - } - - /// Returns the state of the peerset manager, for debugging purposes. - pub fn peerset_debug_info(&mut self) -> serde_json::Value { - self.behaviour.peerset_debug_info() - } - - /// Returns the number of peers we're connected to. - pub fn num_connected_peers(&self) -> usize { - self.context_data.peers.values().count() - } - - /// Returns the number of peers we're connected to and that are being queried. - pub fn num_active_peers(&self) -> usize { - self.context_data - .peers - .values() - .filter(|p| p.block_request.is_some()) - .count() - } - - /// Current global sync state. - pub fn sync_state(&self) -> SyncState { - self.sync.status().state - } - - /// Target sync block number. - pub fn best_seen_block(&self) -> Option> { - self.sync.status().best_seen_block - } - - /// Number of peers participating in syncing. - pub fn num_sync_peers(&self) -> u32 { - self.sync.status().num_peers - } - - /// Number of blocks in the import queue. - pub fn num_queued_blocks(&self) -> u32 { - self.sync.status().queued_blocks - } - - /// Number of processed blocks. - pub fn num_processed_blocks(&self) -> usize { - self.sync.num_processed_blocks() - } - - /// Number of active sync requests. - pub fn num_sync_requests(&self) -> usize { - self.sync.num_sync_requests() - } - - fn handle_response( - &mut self, - who: PeerId, - response: &message::BlockResponse - ) -> Option> { - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - if let Some(_) = peer.obsolete_requests.remove(&response.id) { - trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", who, response.id); - return None; - } - // Clear the request. If the response is invalid peer will be disconnected anyway. - let request = peer.block_request.take(); - if request.as_ref().map_or(false, |(_, r)| r.id == response.id) { - return request.map(|(_, r)| r) - } - trace!(target: "sync", "Unexpected response packet from {} ({})", who, response.id); - self.peerset_handle.report_peer(who.clone(), rep::UNEXPECTED_RESPONSE); - self.behaviour.disconnect_peer(&who); - } - None - } - - fn update_peer_info(&mut self, who: &PeerId) { - if let Some(info) = self.sync.peer_info(who) { - if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { - peer.info.best_hash = info.best_hash; - peer.info.best_number = info.best_number; - } - } - } - - /// Returns information about all the peers we are connected to after the handshake message. - pub fn peers_info(&self) -> impl Iterator)> { - self.context_data.peers.iter().map(|(id, peer)| (id, &peer.info)) - } - - pub fn on_custom_message( - &mut self, - who: PeerId, - data: BytesMut, - ) -> CustomMessageOutcome { - - let message = match as Decode>::decode(&mut &data[..]) { - Ok(message) => message, - Err(err) => { - debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); - self.peerset_handle.report_peer(who.clone(), rep::BAD_MESSAGE); - return CustomMessageOutcome::None; - } - }; - - let mut stats = self.context_data.stats.entry(message.id()).or_default(); - stats.bytes_in += data.len() as u64; - stats.count_in += 1; - - match message { - GenericMessage::Status(s) => return self.on_status_message(who, s), - GenericMessage::BlockRequest(r) => self.on_block_request(who, r), - GenericMessage::BlockResponse(r) => { - if let Some(request) = self.handle_response(who.clone(), &r) { - let outcome = self.on_block_response(who.clone(), request, r); - self.update_peer_info(&who); - return outcome - } - }, - GenericMessage::BlockAnnounce(announce) => { - let outcome = self.on_block_announce(who.clone(), announce); - self.update_peer_info(&who); - return outcome; - }, - GenericMessage::Transactions(m) => - self.on_extrinsics(who, m), - GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(who, request), - GenericMessage::RemoteCallResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected RemoteCallResponse"), - GenericMessage::RemoteReadRequest(request) => - self.on_remote_read_request(who, request), - GenericMessage::RemoteReadResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected RemoteReadResponse"), - GenericMessage::RemoteHeaderRequest(request) => - self.on_remote_header_request(who, request), - GenericMessage::RemoteHeaderResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse"), - GenericMessage::RemoteChangesRequest(request) => - self.on_remote_changes_request(who, request), - GenericMessage::RemoteChangesResponse(_) => - warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse"), - GenericMessage::FinalityProofRequest(request) => - self.on_finality_proof_request(who, request), - GenericMessage::FinalityProofResponse(response) => - return self.on_finality_proof_response(who, response), - GenericMessage::RemoteReadChildRequest(request) => - self.on_remote_read_child_request(who, request), - GenericMessage::Consensus(msg) => - return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { - CustomMessageOutcome::NotificationsReceived { - remote: who.clone(), - messages: vec![(msg.engine_id, From::from(msg.data))], - } - } else { - warn!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); - CustomMessageOutcome::None - }, - GenericMessage::ConsensusBatch(messages) => { - let messages = messages + /// Create a new instance. + pub fn new( + config: ProtocolConfig, + chain: Arc>, + transaction_pool: Arc>, + finality_proof_provider: Option>>, + finality_proof_request_builder: Option>, + protocol_id: ProtocolId, + peerset_config: sc_peerset::PeersetConfig, + block_announce_validator: Box + Send>, + metrics_registry: Option<&Registry>, + boot_node_ids: Arc>, + queue_size_report: Option, + ) -> error::Result<(Protocol, sc_peerset::PeersetHandle)> { + let info = chain.info(); + let sync = ChainSync::new( + config.roles, + chain.clone(), + &info, + finality_proof_request_builder, + block_announce_validator, + config.max_parallel_downloads, + ); + + let important_peers = { + let mut imp_p = HashSet::new(); + for reserved in peerset_config + .priority_groups + .iter() + .flat_map(|(_, l)| l.iter()) + { + imp_p.insert(reserved.clone()); + } + imp_p.shrink_to_fit(); + imp_p + }; + + let (peerset, peerset_handle) = sc_peerset::Peerset::from_config(peerset_config); + let versions = &((MIN_VERSION as u8)..=(CURRENT_VERSION as u8)).collect::>(); + let mut behaviour = + GenericProto::new(protocol_id.clone(), versions, peerset, queue_size_report); + + let mut legacy_equiv_by_name = HashMap::new(); + + let transactions_protocol: Cow<'static, [u8]> = Cow::from({ + let mut proto = b"/".to_vec(); + proto.extend(protocol_id.as_bytes()); + proto.extend(b"/transactions/1"); + proto + }); + behaviour.register_notif_protocol(transactions_protocol.clone(), Vec::new()); + legacy_equiv_by_name.insert(transactions_protocol.clone(), Fallback::Transactions); + + let block_announces_protocol: Cow<'static, [u8]> = Cow::from({ + let mut proto = b"/".to_vec(); + proto.extend(protocol_id.as_bytes()); + proto.extend(b"/block-announces/1"); + proto + }); + behaviour.register_notif_protocol(block_announces_protocol.clone(), Vec::new()); + legacy_equiv_by_name.insert(block_announces_protocol.clone(), Fallback::BlockAnnounce); + + let protocol = Protocol { + tick_timeout: Box::pin(interval(TICK_TIMEOUT)), + propagate_timeout: Box::pin(interval(PROPAGATE_TIMEOUT)), + pending_messages: VecDeque::new(), + config, + context_data: ContextData { + peers: HashMap::new(), + stats: HashMap::new(), + chain, + }, + genesis_hash: info.genesis_hash, + sync, + handshaking_peers: HashMap::new(), + important_peers, + transaction_pool, + finality_proof_provider, + peerset_handle: peerset_handle.clone(), + behaviour, + protocol_name_by_engine: HashMap::new(), + legacy_equiv_by_name, + transactions_protocol, + block_announces_protocol, + metrics: if let Some(r) = metrics_registry { + Some(Metrics::register(r)?) + } else { + None + }, + boot_node_ids, + }; + + Ok((protocol, peerset_handle)) + } + + /// Returns the list of all the peers we have an open channel to. + pub fn open_peers(&self) -> impl Iterator { + self.behaviour.open_peers() + } + + /// Returns true if we have a channel open with this node. + pub fn is_open(&self, peer_id: &PeerId) -> bool { + self.behaviour.is_open(peer_id) + } + + /// Returns the list of all the peers that the peerset currently requests us to be connected to. + pub fn requested_peers(&self) -> impl Iterator { + self.behaviour.requested_peers() + } + + /// Returns the number of discovered nodes that we keep in memory. + pub fn num_discovered_peers(&self) -> usize { + self.behaviour.num_discovered_peers() + } + + /// Disconnects the given peer if we are connected to it. + pub fn disconnect_peer(&mut self, peer_id: &PeerId) { + self.behaviour.disconnect_peer(peer_id) + } + + /// Returns true if we try to open protocols with the given peer. + pub fn is_enabled(&self, peer_id: &PeerId) -> bool { + self.behaviour.is_enabled(peer_id) + } + + /// Returns the state of the peerset manager, for debugging purposes. + pub fn peerset_debug_info(&mut self) -> serde_json::Value { + self.behaviour.peerset_debug_info() + } + + /// Returns the number of peers we're connected to. + pub fn num_connected_peers(&self) -> usize { + self.context_data.peers.values().count() + } + + /// Returns the number of peers we're connected to and that are being queried. + pub fn num_active_peers(&self) -> usize { + self.context_data + .peers + .values() + .filter(|p| p.block_request.is_some()) + .count() + } + + /// Current global sync state. + pub fn sync_state(&self) -> SyncState { + self.sync.status().state + } + + /// Target sync block number. + pub fn best_seen_block(&self) -> Option> { + self.sync.status().best_seen_block + } + + /// Number of peers participating in syncing. + pub fn num_sync_peers(&self) -> u32 { + self.sync.status().num_peers + } + + /// Number of blocks in the import queue. + pub fn num_queued_blocks(&self) -> u32 { + self.sync.status().queued_blocks + } + + /// Number of processed blocks. + pub fn num_processed_blocks(&self) -> usize { + self.sync.num_processed_blocks() + } + + /// Number of active sync requests. + pub fn num_sync_requests(&self) -> usize { + self.sync.num_sync_requests() + } + + fn handle_response( + &mut self, + who: PeerId, + response: &message::BlockResponse, + ) -> Option> { + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + if let Some(_) = peer.obsolete_requests.remove(&response.id) { + trace!(target: "sync", "Ignoring obsolete block response packet from {} ({})", who, response.id); + return None; + } + // Clear the request. If the response is invalid peer will be disconnected anyway. + let request = peer.block_request.take(); + if request.as_ref().map_or(false, |(_, r)| r.id == response.id) { + return request.map(|(_, r)| r); + } + trace!(target: "sync", "Unexpected response packet from {} ({})", who, response.id); + self.peerset_handle + .report_peer(who.clone(), rep::UNEXPECTED_RESPONSE); + self.behaviour.disconnect_peer(&who); + } + None + } + + fn update_peer_info(&mut self, who: &PeerId) { + if let Some(info) = self.sync.peer_info(who) { + if let Some(ref mut peer) = self.context_data.peers.get_mut(who) { + peer.info.best_hash = info.best_hash; + peer.info.best_number = info.best_number; + } + } + } + + /// Returns information about all the peers we are connected to after the handshake message. + pub fn peers_info(&self) -> impl Iterator)> { + self.context_data + .peers + .iter() + .map(|(id, peer)| (id, &peer.info)) + } + + pub fn on_custom_message(&mut self, who: PeerId, data: BytesMut) -> CustomMessageOutcome { + let message = match as Decode>::decode(&mut &data[..]) { + Ok(message) => message, + Err(err) => { + debug!(target: "sync", "Couldn't decode packet sent by {}: {:?}: {}", who, data, err.what()); + self.peerset_handle + .report_peer(who.clone(), rep::BAD_MESSAGE); + return CustomMessageOutcome::None; + } + }; + + let mut stats = self.context_data.stats.entry(message.id()).or_default(); + stats.bytes_in += data.len() as u64; + stats.count_in += 1; + + match message { + GenericMessage::Status(s) => return self.on_status_message(who, s), + GenericMessage::BlockRequest(r) => self.on_block_request(who, r), + GenericMessage::BlockResponse(r) => { + if let Some(request) = self.handle_response(who.clone(), &r) { + let outcome = self.on_block_response(who.clone(), request, r); + self.update_peer_info(&who); + return outcome; + } + } + GenericMessage::BlockAnnounce(announce) => { + let outcome = self.on_block_announce(who.clone(), announce); + self.update_peer_info(&who); + return outcome; + } + GenericMessage::Transactions(m) => self.on_extrinsics(who, m), + GenericMessage::RemoteCallRequest(request) => self.on_remote_call_request(who, request), + GenericMessage::RemoteCallResponse(_) => { + warn!(target: "sub-libp2p", "Received unexpected RemoteCallResponse") + } + GenericMessage::RemoteReadRequest(request) => self.on_remote_read_request(who, request), + GenericMessage::RemoteReadResponse(_) => { + warn!(target: "sub-libp2p", "Received unexpected RemoteReadResponse") + } + GenericMessage::RemoteHeaderRequest(request) => { + self.on_remote_header_request(who, request) + } + GenericMessage::RemoteHeaderResponse(_) => { + warn!(target: "sub-libp2p", "Received unexpected RemoteHeaderResponse") + } + GenericMessage::RemoteChangesRequest(request) => { + self.on_remote_changes_request(who, request) + } + GenericMessage::RemoteChangesResponse(_) => { + warn!(target: "sub-libp2p", "Received unexpected RemoteChangesResponse") + } + GenericMessage::FinalityProofRequest(request) => { + self.on_finality_proof_request(who, request) + } + GenericMessage::FinalityProofResponse(response) => { + return self.on_finality_proof_response(who, response) + } + GenericMessage::RemoteReadChildRequest(request) => { + self.on_remote_read_child_request(who, request) + } + GenericMessage::Consensus(msg) => { + return if self.protocol_name_by_engine.contains_key(&msg.engine_id) { + CustomMessageOutcome::NotificationsReceived { + remote: who.clone(), + messages: vec![(msg.engine_id, From::from(msg.data))], + } + } else { + warn!(target: "sync", "Received message on non-registered protocol: {:?}", msg.engine_id); + CustomMessageOutcome::None + } + } + GenericMessage::ConsensusBatch(messages) => { + let messages = messages .into_iter() .filter_map(|msg| { if self.protocol_name_by_engine.contains_key(&msg.engine_id) { @@ -598,1263 +614,1393 @@ impl Protocol { }) .collect::>(); - return if !messages.is_empty() { - CustomMessageOutcome::NotificationsReceived { - remote: who.clone(), - messages, - } - } else { - CustomMessageOutcome::None - }; - }, - } - - CustomMessageOutcome::None - } - - fn send_request(&mut self, who: &PeerId, message: Message) { - send_request::( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - who, - message, - ); - } - - fn send_message( - &mut self, - who: &PeerId, - message: Option<(Cow<'static, [u8]>, Vec)>, - legacy: Message, - ) { - send_message::( - &mut self.behaviour, - &mut self.context_data.stats, - who, - message, - legacy, - ); - } - - /// Called when a new peer is connected - pub fn on_peer_connected(&mut self, who: PeerId) { - trace!(target: "sync", "Connecting {}", who); - self.handshaking_peers.insert(who.clone(), HandshakingPeer { timestamp: Instant::now() }); - self.send_status(who); - } - - /// Called by peer when it is disconnecting - pub fn on_peer_disconnected(&mut self, peer: PeerId) -> CustomMessageOutcome { - if self.important_peers.contains(&peer) { - warn!(target: "sync", "Reserved peer {} disconnected", peer); - } else { - trace!(target: "sync", "{} disconnected", peer); - } - - // lock all the the peer lists so that add/remove peer events are in order - let removed = { - self.handshaking_peers.remove(&peer); - self.context_data.peers.remove(&peer) - }; - if let Some(_peer_data) = removed { - self.sync.peer_disconnected(peer.clone()); - - // Notify all the notification protocols as closed. - CustomMessageOutcome::NotificationStreamClosed { - remote: peer, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), - } - } else { - CustomMessageOutcome::None - } - } - - /// Called as a back-pressure mechanism if the networking detects that the peer cannot process - /// our messaging rate fast enough. - pub fn on_clogged_peer(&self, who: PeerId, _msg: Option>) { - self.peerset_handle.report_peer(who.clone(), rep::CLOGGED_PEER); - - // Print some diagnostics. - if let Some(peer) = self.context_data.peers.get(&who) { - debug!(target: "sync", "Clogged peer {} (protocol_version: {:?}; roles: {:?}; \ + return if !messages.is_empty() { + CustomMessageOutcome::NotificationsReceived { + remote: who.clone(), + messages, + } + } else { + CustomMessageOutcome::None + }; + } + } + + CustomMessageOutcome::None + } + + fn send_request(&mut self, who: &PeerId, message: Message) { + send_request::( + &mut self.behaviour, + &mut self.context_data.stats, + &mut self.context_data.peers, + who, + message, + ); + } + + fn send_message( + &mut self, + who: &PeerId, + message: Option<(Cow<'static, [u8]>, Vec)>, + legacy: Message, + ) { + send_message::( + &mut self.behaviour, + &mut self.context_data.stats, + who, + message, + legacy, + ); + } + + /// Called when a new peer is connected + pub fn on_peer_connected(&mut self, who: PeerId) { + trace!(target: "sync", "Connecting {}", who); + self.handshaking_peers.insert( + who.clone(), + HandshakingPeer { + timestamp: Instant::now(), + }, + ); + self.send_status(who); + } + + /// Called by peer when it is disconnecting + pub fn on_peer_disconnected(&mut self, peer: PeerId) -> CustomMessageOutcome { + if self.important_peers.contains(&peer) { + warn!(target: "sync", "Reserved peer {} disconnected", peer); + } else { + trace!(target: "sync", "{} disconnected", peer); + } + + // lock all the the peer lists so that add/remove peer events are in order + let removed = { + self.handshaking_peers.remove(&peer); + self.context_data.peers.remove(&peer) + }; + if let Some(_peer_data) = removed { + self.sync.peer_disconnected(peer.clone()); + + // Notify all the notification protocols as closed. + CustomMessageOutcome::NotificationStreamClosed { + remote: peer, + protocols: self.protocol_name_by_engine.keys().cloned().collect(), + } + } else { + CustomMessageOutcome::None + } + } + + /// Called as a back-pressure mechanism if the networking detects that the peer cannot process + /// our messaging rate fast enough. + pub fn on_clogged_peer(&self, who: PeerId, _msg: Option>) { + self.peerset_handle + .report_peer(who.clone(), rep::CLOGGED_PEER); + + // Print some diagnostics. + if let Some(peer) = self.context_data.peers.get(&who) { + debug!(target: "sync", "Clogged peer {} (protocol_version: {:?}; roles: {:?}; \ known_extrinsics: {:?}; known_blocks: {:?}; best_hash: {:?}; best_number: {:?})", who, peer.info.protocol_version, peer.info.roles, peer.known_extrinsics, peer.known_blocks, peer.info.best_hash, peer.info.best_number); - } else { - debug!(target: "sync", "Peer clogged before being properly connected"); - } - } - - fn on_block_request(&mut self, peer: PeerId, request: message::BlockRequest) { - trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?} for {:?}", - request.id, - peer, - request.from, - request.to, - request.max, - request.fields, - ); - - // sending block requests to the node that is unable to serve it is considered a bad behavior - if !self.config.roles.is_full() { - trace!(target: "sync", "Peer {} is trying to sync from the light node", peer); - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::UNEXPECTED_REQUEST); - return; - } - - let mut blocks = Vec::new(); - let mut id = match request.from { - message::FromBlock::Hash(h) => BlockId::Hash(h), - message::FromBlock::Number(n) => BlockId::Number(n), - }; - let max = cmp::min(request.max.unwrap_or(u32::max_value()), MAX_BLOCK_DATA_RESPONSE) as usize; - let get_header = request.fields.contains(message::BlockAttributes::HEADER); - let get_body = request.fields.contains(message::BlockAttributes::BODY); - let get_justification = request - .fields - .contains(message::BlockAttributes::JUSTIFICATION); - while let Some(header) = self.context_data.chain.header(id).unwrap_or(None) { - if blocks.len() >= max { - break; - } - let number = header.number().clone(); - let hash = header.hash(); - let parent_hash = header.parent_hash().clone(); - let justification = if get_justification { - self.context_data.chain.justification(&BlockId::Hash(hash)).unwrap_or(None) - } else { - None - }; - let block_data = message::generic::BlockData { - hash: hash, - header: if get_header { Some(header) } else { None }, - body: if get_body { - self.context_data - .chain - .block_body(&BlockId::Hash(hash)) - .unwrap_or(None) - } else { - None - }, - receipt: None, - message_queue: None, - justification, - }; - // Stop if we don't have requested block body - if get_body && block_data.body.is_none() { - trace!(target: "sync", "Missing data for block request."); - break; - } - blocks.push(block_data); - match request.direction { - message::Direction::Ascending => id = BlockId::Number(number + One::one()), - message::Direction::Descending => { - if number.is_zero() { - break; - } - id = BlockId::Hash(parent_hash) - } - } - } - let response = message::generic::BlockResponse { - id: request.id, - blocks: blocks, - }; - trace!(target: "sync", "Sending BlockResponse with {} blocks", response.blocks.len()); - self.send_message(&peer, None, GenericMessage::BlockResponse(response)) - } - - /// Adjusts the reputation of a node. - pub fn report_peer(&self, who: PeerId, reputation: sc_peerset::ReputationChange) { - self.peerset_handle.report_peer(who, reputation) - } - - fn on_block_response( - &mut self, - peer: PeerId, - request: message::BlockRequest, - response: message::BlockResponse, - ) -> CustomMessageOutcome { - let blocks_range = || match ( - response.blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), - response.blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - trace!(target: "sync", "BlockResponse {} from {} with {} blocks {}", - response.id, - peer, - response.blocks.len(), - blocks_range(), - ); - - if request.fields == message::BlockAttributes::JUSTIFICATION { - match self.sync.on_block_justification(peer, response) { - Ok(sync::OnBlockJustification::Nothing) => CustomMessageOutcome::None, - Ok(sync::OnBlockJustification::Import { peer, hash, number, justification }) => - CustomMessageOutcome::JustificationImport(peer, hash, number, justification), - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None - } - } - } else { - // Validate fields against the request. - if request.fields.contains(message::BlockAttributes::HEADER) && response.blocks.iter().any(|b| b.header.is_none()) { - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); - trace!(target: "sync", "Missing header for a block"); - return CustomMessageOutcome::None - } - if request.fields.contains(message::BlockAttributes::BODY) && response.blocks.iter().any(|b| b.body.is_none()) { - self.behaviour.disconnect_peer(&peer); - self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); - trace!(target: "sync", "Missing body for a block"); - return CustomMessageOutcome::None - } - - match self.sync.on_block_data(peer, Some(request), response) { - Ok(sync::OnBlockData::Import(origin, blocks)) => - CustomMessageOutcome::BlockImport(origin, blocks), - Ok(sync::OnBlockData::Request(peer, req)) => { - self.send_request(&peer, GenericMessage::BlockRequest(req)); - CustomMessageOutcome::None - } - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None - } - } - } - } - - /// Perform time based maintenance. - /// - /// > **Note**: This method normally doesn't have to be called except for testing purposes. - pub fn tick(&mut self) { - self.maintain_peers(); - self.report_metrics() - } - - fn maintain_peers(&mut self) { - let tick = Instant::now(); - let mut aborting = Vec::new(); - { - for (who, peer) in self.context_data.peers.iter() { - if peer.block_request.as_ref().map_or(false, |(t, _)| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { - log!( - target: "sync", - if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, - "Request timeout {}", who - ); - aborting.push(who.clone()); - } else if peer.obsolete_requests.values().any(|t| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) { - log!( - target: "sync", - if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, - "Obsolete timeout {}", who - ); - aborting.push(who.clone()); - } - } - for (who, _) in self.handshaking_peers.iter() - .filter(|(_, handshaking)| (tick - handshaking.timestamp).as_secs() > REQUEST_TIMEOUT_SEC) - { - log!( - target: "sync", - if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, - "Handshake timeout {}", who - ); - aborting.push(who.clone()); - } - } - - for p in aborting { - self.behaviour.disconnect_peer(&p); - self.peerset_handle.report_peer(p, rep::TIMEOUT); - } - } - - /// Called by peer to report status - fn on_status_message(&mut self, who: PeerId, status: message::Status) -> CustomMessageOutcome { - trace!(target: "sync", "New peer {} {:?}", who, status); - let _protocol_version = { - if self.context_data.peers.contains_key(&who) { - log!( - target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Debug }, - "Unexpected status packet from {}", who - ); - self.peerset_handle.report_peer(who, rep::UNEXPECTED_STATUS); - return CustomMessageOutcome::None; - } - if status.genesis_hash != self.genesis_hash { - log!( - target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, - "Peer is on different chain (our genesis: {} theirs: {})", - self.genesis_hash, status.genesis_hash - ); - self.peerset_handle.report_peer(who.clone(), rep::GENESIS_MISMATCH); - self.behaviour.disconnect_peer(&who); - - if self.boot_node_ids.contains(&who) { - error!( - target: "sync", - "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", - who, - self.genesis_hash, - status.genesis_hash, - ); - } - - return CustomMessageOutcome::None; - } - if status.version < MIN_VERSION && CURRENT_VERSION < status.min_supported_version { - log!( - target: "sync", - if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, - "Peer {:?} using unsupported protocol version {}", who, status.version - ); - self.peerset_handle.report_peer(who.clone(), rep::BAD_PROTOCOL); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; - } - - if self.config.roles.is_light() { - // we're not interested in light peers - if status.roles.is_light() { - debug!(target: "sync", "Peer {} is unable to serve light requests", who); - self.peerset_handle.report_peer(who.clone(), rep::BAD_ROLE); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; - } - - // we don't interested in peers that are far behind us - let self_best_block = self - .context_data - .chain - .info() - .best_number; - let blocks_difference = self_best_block - .checked_sub(&status.best_number) - .unwrap_or_else(Zero::zero) - .saturated_into::(); - if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { - debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); - self.peerset_handle.report_peer(who.clone(), rep::PEER_BEHIND_US_LIGHT); - self.behaviour.disconnect_peer(&who); - return CustomMessageOutcome::None; - } - } - - let info = match self.handshaking_peers.remove(&who) { - Some(_handshaking) => { - PeerInfo { - protocol_version: status.version, - roles: status.roles, - best_hash: status.best_hash, - best_number: status.best_number - } - }, - None => { - error!(target: "sync", "Received status from previously unconnected node {}", who); - return CustomMessageOutcome::None; - }, - }; - - let peer = Peer { - info, - block_request: None, - known_extrinsics: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_EXTRINSICS) - .expect("Constant is nonzero")), - known_blocks: LruHashSet::new(NonZeroUsize::new(MAX_KNOWN_BLOCKS) - .expect("Constant is nonzero")), - next_request_id: 0, - obsolete_requests: HashMap::new(), - }; - self.context_data.peers.insert(who.clone(), peer); - - debug!(target: "sync", "Connected {}", who); - status.version - }; - - let info = self.context_data.peers.get(&who).expect("We just inserted above; QED").info.clone(); - self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who.clone(), status.best_number)); - if info.roles.is_full() { - match self.sync.new_peer(who.clone(), info.best_hash, info.best_number) { - Ok(None) => (), - Ok(Some(req)) => self.send_request(&who, GenericMessage::BlockRequest(req)), - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu) - } - } - } - - // Notify all the notification protocols as open. - CustomMessageOutcome::NotificationStreamOpened { - remote: who, - protocols: self.protocol_name_by_engine.keys().cloned().collect(), - roles: info.roles, - } - } - - /// Send a notification to the given peer we're connected to. - /// - /// Doesn't do anything if we don't have a notifications substream for that protocol with that - /// peer. - pub fn write_notification( - &mut self, - target: PeerId, - engine_id: ConsensusEngineId, - message: impl Into>, - ) { - if let Some(protocol_name) = self.protocol_name_by_engine.get(&engine_id) { - let message = message.into(); - let fallback = GenericMessage::<(), (), (), ()>::Consensus(ConsensusMessage { - engine_id, - data: message.clone(), - }).encode(); - self.behaviour.write_notification(&target, protocol_name.clone(), message, fallback); - } else { - error!( - target: "sub-libp2p", - "Sending a notification with a protocol that wasn't registered: {:?}", - engine_id - ); - } - } - - /// Registers a new notifications protocol. - /// - /// While registering a protocol while we already have open connections is discouraged, we - /// nonetheless handle it by notifying that we opened channels with everyone. This function - /// returns a list of substreams to open as a result. - pub fn register_notifications_protocol<'a>( - &'a mut self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, - ) -> impl ExactSizeIterator + 'a { - let protocol_name = protocol_name.into(); - if self.protocol_name_by_engine.insert(engine_id, protocol_name.clone()).is_some() { - error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); - } else { - self.behaviour.register_notif_protocol(protocol_name.clone(), Vec::new()); - self.legacy_equiv_by_name.insert(protocol_name, Fallback::Consensus(engine_id)); - } - - self.context_data.peers.iter() - .map(|(peer_id, peer)| (peer_id, peer.info.roles)) - } - - /// Called when peer sends us new extrinsics - fn on_extrinsics( - &mut self, - who: PeerId, - extrinsics: message::Transactions - ) { - // sending extrinsic to light node is considered a bad behavior - if !self.config.roles.is_full() { - trace!(target: "sync", "Peer {} is trying to send extrinsic to the light node", who); - self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::UNEXPECTED_EXTRINSICS); - return; - } - - // Accept extrinsics only when fully synced - if self.sync.status().state != SyncState::Idle { - trace!(target: "sync", "{} Ignoring extrinsics while syncing", who); - return; - } - trace!(target: "sync", "Received {} extrinsics from {}", extrinsics.len(), who); - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - for t in extrinsics { - let hash = self.transaction_pool.hash_of(&t); - peer.known_extrinsics.insert(hash); - - self.transaction_pool.import( - self.peerset_handle.clone().into(), - who.clone(), - rep::GOOD_EXTRINSIC, - rep::BAD_EXTRINSIC, - t, - ); - } - } - } - - /// Propagate one extrinsic. - pub fn propagate_extrinsic( - &mut self, - hash: &H, - ) { - debug!(target: "sync", "Propagating extrinsic [{:?}]", hash); - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - return; - } - if let Some(extrinsic) = self.transaction_pool.transaction(hash) { - let propagated_to = self.do_propagate_extrinsics(&[(hash.clone(), extrinsic)]); - self.transaction_pool.on_broadcasted(propagated_to); - } - } - - fn do_propagate_extrinsics( - &mut self, - extrinsics: &[(H, B::Extrinsic)], - ) -> HashMap> { - let mut propagated_to = HashMap::new(); - for (who, peer) in self.context_data.peers.iter_mut() { - // never send extrinsics to the light node - if !peer.info.roles.is_full() { - continue; - } - - let (hashes, to_send): (Vec<_>, Vec<_>) = extrinsics - .iter() - .filter(|&(ref hash, _)| peer.known_extrinsics.insert(hash.clone())) - .cloned() - .unzip(); - - if !to_send.is_empty() { - for hash in hashes { - propagated_to - .entry(hash) - .or_insert_with(Vec::new) - .push(who.to_base58()); - } - trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); - let encoded = to_send.encode(); - send_message:: ( - &mut self.behaviour, - &mut self.context_data.stats, - &who, - Some((self.transactions_protocol.clone(), encoded)), - GenericMessage::Transactions(to_send) - ) - } - } - - propagated_to - } - - /// Call when we must propagate ready extrinsics to peers. - pub fn propagate_extrinsics(&mut self) { - debug!(target: "sync", "Propagating extrinsics"); - // Accept transactions only when fully synced - if self.sync.status().state != SyncState::Idle { - return; - } - let extrinsics = self.transaction_pool.transactions(); - let propagated_to = self.do_propagate_extrinsics(&extrinsics); - self.transaction_pool.on_broadcasted(propagated_to); - } - - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. - pub fn announce_block(&mut self, hash: B::Hash, data: Vec) { - let header = match self.context_data.chain.header(BlockId::Hash(hash)) { - Ok(Some(header)) => header, - Ok(None) => { - warn!("Trying to announce unknown block: {}", hash); - return; - } - Err(e) => { - warn!("Error reading block header {}: {:?}", hash, e); - return; - } - }; - - // don't announce genesis block since it will be ignored - if header.number().is_zero() { - return; - } - - let is_best = self.context_data.chain.info().best_hash == hash; - debug!(target: "sync", "Reannouncing block {:?}", hash); - self.send_announcement(&header, data, is_best, true) - } - - fn send_announcement(&mut self, header: &B::Header, data: Vec, is_best: bool, force: bool) { - let hash = header.hash(); - - for (who, ref mut peer) in self.context_data.peers.iter_mut() { - trace!(target: "sync", "Announcing block {:?} to {}", hash, who); - let inserted = peer.known_blocks.insert(hash); - if inserted || force { - let message = message::BlockAnnounce { - header: header.clone(), - state: if peer.info.protocol_version >= 4 { - if is_best { - Some(message::BlockState::Best) - } else { - Some(message::BlockState::Normal) - } - } else { - None - }, - data: if peer.info.protocol_version >= 4 { - Some(data.clone()) - } else { - None - }, - }; - - let encoded = message.encode(); - - send_message:: ( - &mut self.behaviour, - &mut self.context_data.stats, - &who, - Some((self.block_announces_protocol.clone(), encoded)), - Message::::BlockAnnounce(message), - ) - } - } - } - - /// Send Status message - fn send_status(&mut self, who: PeerId) { - let info = self.context_data.chain.info(); - let status = message::generic::Status { - version: CURRENT_VERSION, - min_supported_version: MIN_VERSION, - genesis_hash: info.genesis_hash, - roles: self.config.roles.into(), - best_number: info.best_number, - best_hash: info.best_hash, - chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible - }; - - self.send_message(&who, None, GenericMessage::Status(status)) - } - - fn on_block_announce( - &mut self, - who: PeerId, - announce: BlockAnnounce, - ) -> CustomMessageOutcome { - let hash = announce.header.hash(); - let number = *announce.header.number(); - - if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { - peer.known_blocks.insert(hash.clone()); - } - - let is_their_best = match announce.state.unwrap_or(message::BlockState::Best) { - message::BlockState::Best => true, - message::BlockState::Normal => false, - }; - - match self.sync.on_block_announce(who.clone(), hash, &announce, is_their_best) { - sync::OnBlockAnnounce::Nothing => { - // `on_block_announce` returns `OnBlockAnnounce::ImportHeader` - // when we have all data required to import the block - // in the BlockAnnounce message. This is only when: - // 1) we're on light client; - // AND - // 2) parent block is already imported and not pruned. - if is_their_best { - return CustomMessageOutcome::PeerNewBest(who, number); - } else { - return CustomMessageOutcome::None; - } - } - sync::OnBlockAnnounce::ImportHeader => () // We proceed with the import. - } - - // to import header from announced block let's construct response to request that normally would have - // been sent over network (but it is not in our case) - let blocks_to_import = self.sync.on_block_data( - who.clone(), - None, - message::generic::BlockResponse { - id: 0, - blocks: vec![ - message::generic::BlockData { - hash: hash, - header: Some(announce.header), - body: None, - receipt: None, - message_queue: None, - justification: None, - }, - ], - }, - ); - match blocks_to_import { - Ok(sync::OnBlockData::Import(origin, blocks)) => { - if is_their_best { - self.pending_messages.push_back(CustomMessageOutcome::PeerNewBest(who, number)); - } - CustomMessageOutcome::BlockImport(origin, blocks) - }, - Ok(sync::OnBlockData::Request(peer, req)) => { - self.send_request(&peer, GenericMessage::BlockRequest(req)); - if is_their_best { - CustomMessageOutcome::PeerNewBest(who, number) - } else { - CustomMessageOutcome::None - } - } - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu); - if is_their_best { - CustomMessageOutcome::PeerNewBest(who, number) - } else { - CustomMessageOutcome::None - } - } - } - } - - /// Call this when a block has been imported in the import queue - pub fn on_block_imported(&mut self, header: &B::Header, is_best: bool) { - if is_best { - self.sync.update_chain_info(header); - } - } - - /// Call this when a block has been finalized. The sync layer may have some additional - /// requesting to perform. - pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { - self.sync.on_block_finalized(&hash, *header.number()) - } - - fn on_remote_call_request( - &mut self, - who: PeerId, - request: message::RemoteCallRequest, - ) { - trace!(target: "sync", "Remote call request {} from {} ({} at {})", - request.id, - who, - request.method, - request.block - ); - let proof = match self.context_data.chain.execution_proof( - &BlockId::Hash(request.block), - &request.method, - &request.data, - ) { - Ok((_, proof)) => proof, - Err(error) => { - trace!(target: "sync", "Remote call request {} from {} ({} at {}) failed with: {}", - request.id, - who, - request.method, - request.block, - error - ); - self.peerset_handle.report_peer(who.clone(), rep::RPC_FAILED); - StorageProof::empty() - } - }; - - self.send_message( - &who, - None, - GenericMessage::RemoteCallResponse(message::RemoteCallResponse { - id: request.id, - proof, - }), - ); - } - - /// Request a justification for the given block. - /// - /// Uses `protocol` to queue a new justification request and tries to dispatch all pending - /// requests. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.sync.request_justification(&hash, number) - } - - /// Request syncing for the given block from given set of peers. - /// Uses `protocol` to queue a new block download request and tries to dispatch all pending - /// requests. - pub fn set_sync_fork_request(&mut self, peers: Vec, hash: &B::Hash, number: NumberFor) { - self.sync.set_sync_fork_request(peers, hash, number) - } - - /// A batch of blocks have been processed, with or without errors. - /// Call this when a batch of blocks have been processed by the importqueue, with or without - /// errors. - pub fn blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> - ) { - let results = self.sync.on_blocks_processed( - imported, - count, - results, - ); - for result in results { - match result { - Ok((id, req)) => { - let msg = GenericMessage::BlockRequest(req); - send_request( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - &id, - msg - ) - } - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu) - } - } - } - } - - /// Call this when a justification has been processed by the import queue, with or without - /// errors. - pub fn justification_import_result(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - self.sync.on_justification_import(hash, number, success) - } - - /// Request a finality proof for the given block. - /// - /// Queues a new finality proof request and tries to dispatch all pending requests. - pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - self.sync.request_finality_proof(&hash, number) - } - - /// Notify the protocol that we have learned about the existence of nodes. - /// - /// Can be called multiple times with the same `PeerId`s. - pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - self.behaviour.add_discovered_nodes(peer_ids) - } - - pub fn finality_proof_import_result( - &mut self, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - self.sync.on_finality_proof_import(request_block, finalization_result) - } - - fn on_remote_read_request( - &mut self, - who: PeerId, - request: message::RemoteReadRequest, - ) { - if request.keys.is_empty() { - debug!(target: "sync", "Invalid remote read request sent by {}", who); - self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); - return; - } - - let keys_str = || match request.keys.len() { - 1 => HexDisplay::from(&request.keys[0]).to_string(), - _ => format!( - "{}..{}", - HexDisplay::from(&request.keys[0]), - HexDisplay::from(&request.keys[request.keys.len() - 1]), - ), - }; - - trace!(target: "sync", "Remote read request {} from {} ({} at {})", + } else { + debug!(target: "sync", "Peer clogged before being properly connected"); + } + } + + fn on_block_request(&mut self, peer: PeerId, request: message::BlockRequest) { + trace!(target: "sync", "BlockRequest {} from {}: from {:?} to {:?} max {:?} for {:?}", + request.id, + peer, + request.from, + request.to, + request.max, + request.fields, + ); + + // sending block requests to the node that is unable to serve it is considered a bad behavior + if !self.config.roles.is_full() { + trace!(target: "sync", "Peer {} is trying to sync from the light node", peer); + self.behaviour.disconnect_peer(&peer); + self.peerset_handle + .report_peer(peer, rep::UNEXPECTED_REQUEST); + return; + } + + let mut blocks = Vec::new(); + let mut id = match request.from { + message::FromBlock::Hash(h) => BlockId::Hash(h), + message::FromBlock::Number(n) => BlockId::Number(n), + }; + let max = cmp::min( + request.max.unwrap_or(u32::max_value()), + MAX_BLOCK_DATA_RESPONSE, + ) as usize; + let get_header = request.fields.contains(message::BlockAttributes::HEADER); + let get_body = request.fields.contains(message::BlockAttributes::BODY); + let get_justification = request + .fields + .contains(message::BlockAttributes::JUSTIFICATION); + while let Some(header) = self.context_data.chain.header(id).unwrap_or(None) { + if blocks.len() >= max { + break; + } + let number = header.number().clone(); + let hash = header.hash(); + let parent_hash = header.parent_hash().clone(); + let justification = if get_justification { + self.context_data + .chain + .justification(&BlockId::Hash(hash)) + .unwrap_or(None) + } else { + None + }; + let block_data = message::generic::BlockData { + hash: hash, + header: if get_header { Some(header) } else { None }, + body: if get_body { + self.context_data + .chain + .block_body(&BlockId::Hash(hash)) + .unwrap_or(None) + } else { + None + }, + receipt: None, + message_queue: None, + justification, + }; + // Stop if we don't have requested block body + if get_body && block_data.body.is_none() { + trace!(target: "sync", "Missing data for block request."); + break; + } + blocks.push(block_data); + match request.direction { + message::Direction::Ascending => id = BlockId::Number(number + One::one()), + message::Direction::Descending => { + if number.is_zero() { + break; + } + id = BlockId::Hash(parent_hash) + } + } + } + let response = message::generic::BlockResponse { + id: request.id, + blocks: blocks, + }; + trace!(target: "sync", "Sending BlockResponse with {} blocks", response.blocks.len()); + self.send_message(&peer, None, GenericMessage::BlockResponse(response)) + } + + /// Adjusts the reputation of a node. + pub fn report_peer(&self, who: PeerId, reputation: sc_peerset::ReputationChange) { + self.peerset_handle.report_peer(who, reputation) + } + + fn on_block_response( + &mut self, + peer: PeerId, + request: message::BlockRequest, + response: message::BlockResponse, + ) -> CustomMessageOutcome { + let blocks_range = || match ( + response + .blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + response + .blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + trace!(target: "sync", "BlockResponse {} from {} with {} blocks {}", + response.id, + peer, + response.blocks.len(), + blocks_range(), + ); + + if request.fields == message::BlockAttributes::JUSTIFICATION { + match self.sync.on_block_justification(peer, response) { + Ok(sync::OnBlockJustification::Nothing) => CustomMessageOutcome::None, + Ok(sync::OnBlockJustification::Import { + peer, + hash, + number, + justification, + }) => CustomMessageOutcome::JustificationImport(peer, hash, number, justification), + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + } + } + } else { + // Validate fields against the request. + if request.fields.contains(message::BlockAttributes::HEADER) + && response.blocks.iter().any(|b| b.header.is_none()) + { + self.behaviour.disconnect_peer(&peer); + self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); + trace!(target: "sync", "Missing header for a block"); + return CustomMessageOutcome::None; + } + if request.fields.contains(message::BlockAttributes::BODY) + && response.blocks.iter().any(|b| b.body.is_none()) + { + self.behaviour.disconnect_peer(&peer); + self.peerset_handle.report_peer(peer, rep::BAD_RESPONSE); + trace!(target: "sync", "Missing body for a block"); + return CustomMessageOutcome::None; + } + + match self.sync.on_block_data(peer, Some(request), response) { + Ok(sync::OnBlockData::Import(origin, blocks)) => { + CustomMessageOutcome::BlockImport(origin, blocks) + } + Ok(sync::OnBlockData::Request(peer, req)) => { + self.send_request(&peer, GenericMessage::BlockRequest(req)); + CustomMessageOutcome::None + } + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + } + } + } + } + + /// Perform time based maintenance. + /// + /// > **Note**: This method normally doesn't have to be called except for testing purposes. + pub fn tick(&mut self) { + self.maintain_peers(); + self.report_metrics() + } + + fn maintain_peers(&mut self) { + let tick = Instant::now(); + let mut aborting = Vec::new(); + { + for (who, peer) in self.context_data.peers.iter() { + if peer + .block_request + .as_ref() + .map_or(false, |(t, _)| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) + { + log!( + target: "sync", + if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, + "Request timeout {}", who + ); + aborting.push(who.clone()); + } else if peer + .obsolete_requests + .values() + .any(|t| (tick - *t).as_secs() > REQUEST_TIMEOUT_SEC) + { + log!( + target: "sync", + if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, + "Obsolete timeout {}", who + ); + aborting.push(who.clone()); + } + } + for (who, _) in self.handshaking_peers.iter().filter(|(_, handshaking)| { + (tick - handshaking.timestamp).as_secs() > REQUEST_TIMEOUT_SEC + }) { + log!( + target: "sync", + if self.important_peers.contains(who) { Level::Warn } else { Level::Trace }, + "Handshake timeout {}", who + ); + aborting.push(who.clone()); + } + } + + for p in aborting { + self.behaviour.disconnect_peer(&p); + self.peerset_handle.report_peer(p, rep::TIMEOUT); + } + } + + /// Called by peer to report status + fn on_status_message( + &mut self, + who: PeerId, + status: message::Status, + ) -> CustomMessageOutcome { + trace!(target: "sync", "New peer {} {:?}", who, status); + let _protocol_version = { + if self.context_data.peers.contains_key(&who) { + log!( + target: "sync", + if self.important_peers.contains(&who) { Level::Warn } else { Level::Debug }, + "Unexpected status packet from {}", who + ); + self.peerset_handle.report_peer(who, rep::UNEXPECTED_STATUS); + return CustomMessageOutcome::None; + } + if status.genesis_hash != self.genesis_hash { + log!( + target: "sync", + if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, + "Peer is on different chain (our genesis: {} theirs: {})", + self.genesis_hash, status.genesis_hash + ); + self.peerset_handle + .report_peer(who.clone(), rep::GENESIS_MISMATCH); + self.behaviour.disconnect_peer(&who); + + if self.boot_node_ids.contains(&who) { + error!( + target: "sync", + "Bootnode with peer id `{}` is on a different chain (our genesis: {} theirs: {})", + who, + self.genesis_hash, + status.genesis_hash, + ); + } + + return CustomMessageOutcome::None; + } + if status.version < MIN_VERSION && CURRENT_VERSION < status.min_supported_version { + log!( + target: "sync", + if self.important_peers.contains(&who) { Level::Warn } else { Level::Trace }, + "Peer {:?} using unsupported protocol version {}", who, status.version + ); + self.peerset_handle + .report_peer(who.clone(), rep::BAD_PROTOCOL); + self.behaviour.disconnect_peer(&who); + return CustomMessageOutcome::None; + } + + if self.config.roles.is_light() { + // we're not interested in light peers + if status.roles.is_light() { + debug!(target: "sync", "Peer {} is unable to serve light requests", who); + self.peerset_handle.report_peer(who.clone(), rep::BAD_ROLE); + self.behaviour.disconnect_peer(&who); + return CustomMessageOutcome::None; + } + + // we don't interested in peers that are far behind us + let self_best_block = self.context_data.chain.info().best_number; + let blocks_difference = self_best_block + .checked_sub(&status.best_number) + .unwrap_or_else(Zero::zero) + .saturated_into::(); + if blocks_difference > LIGHT_MAXIMAL_BLOCKS_DIFFERENCE { + debug!(target: "sync", "Peer {} is far behind us and will unable to serve light requests", who); + self.peerset_handle + .report_peer(who.clone(), rep::PEER_BEHIND_US_LIGHT); + self.behaviour.disconnect_peer(&who); + return CustomMessageOutcome::None; + } + } + + let info = match self.handshaking_peers.remove(&who) { + Some(_handshaking) => PeerInfo { + protocol_version: status.version, + roles: status.roles, + best_hash: status.best_hash, + best_number: status.best_number, + }, + None => { + error!(target: "sync", "Received status from previously unconnected node {}", who); + return CustomMessageOutcome::None; + } + }; + + let peer = Peer { + info, + block_request: None, + known_extrinsics: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_EXTRINSICS).expect("Constant is nonzero"), + ), + known_blocks: LruHashSet::new( + NonZeroUsize::new(MAX_KNOWN_BLOCKS).expect("Constant is nonzero"), + ), + next_request_id: 0, + obsolete_requests: HashMap::new(), + }; + self.context_data.peers.insert(who.clone(), peer); + + debug!(target: "sync", "Connected {}", who); + status.version + }; + + let info = self + .context_data + .peers + .get(&who) + .expect("We just inserted above; QED") + .info + .clone(); + self.pending_messages + .push_back(CustomMessageOutcome::PeerNewBest( + who.clone(), + status.best_number, + )); + if info.roles.is_full() { + match self + .sync + .new_peer(who.clone(), info.best_hash, info.best_number) + { + Ok(None) => (), + Ok(Some(req)) => self.send_request(&who, GenericMessage::BlockRequest(req)), + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu) + } + } + } + + // Notify all the notification protocols as open. + CustomMessageOutcome::NotificationStreamOpened { + remote: who, + protocols: self.protocol_name_by_engine.keys().cloned().collect(), + roles: info.roles, + } + } + + /// Send a notification to the given peer we're connected to. + /// + /// Doesn't do anything if we don't have a notifications substream for that protocol with that + /// peer. + pub fn write_notification( + &mut self, + target: PeerId, + engine_id: ConsensusEngineId, + message: impl Into>, + ) { + if let Some(protocol_name) = self.protocol_name_by_engine.get(&engine_id) { + let message = message.into(); + let fallback = GenericMessage::<(), (), (), ()>::Consensus(ConsensusMessage { + engine_id, + data: message.clone(), + }) + .encode(); + self.behaviour + .write_notification(&target, protocol_name.clone(), message, fallback); + } else { + error!( + target: "sub-libp2p", + "Sending a notification with a protocol that wasn't registered: {:?}", + engine_id + ); + } + } + + /// Registers a new notifications protocol. + /// + /// While registering a protocol while we already have open connections is discouraged, we + /// nonetheless handle it by notifying that we opened channels with everyone. This function + /// returns a list of substreams to open as a result. + pub fn register_notifications_protocol<'a>( + &'a mut self, + engine_id: ConsensusEngineId, + protocol_name: impl Into>, + ) -> impl ExactSizeIterator + 'a { + let protocol_name = protocol_name.into(); + if self + .protocol_name_by_engine + .insert(engine_id, protocol_name.clone()) + .is_some() + { + error!(target: "sub-libp2p", "Notifications protocol already registered: {:?}", protocol_name); + } else { + self.behaviour + .register_notif_protocol(protocol_name.clone(), Vec::new()); + self.legacy_equiv_by_name + .insert(protocol_name, Fallback::Consensus(engine_id)); + } + + self.context_data + .peers + .iter() + .map(|(peer_id, peer)| (peer_id, peer.info.roles)) + } + + /// Called when peer sends us new extrinsics + fn on_extrinsics(&mut self, who: PeerId, extrinsics: message::Transactions) { + // sending extrinsic to light node is considered a bad behavior + if !self.config.roles.is_full() { + trace!(target: "sync", "Peer {} is trying to send extrinsic to the light node", who); + self.behaviour.disconnect_peer(&who); + self.peerset_handle + .report_peer(who, rep::UNEXPECTED_EXTRINSICS); + return; + } + + // Accept extrinsics only when fully synced + if self.sync.status().state != SyncState::Idle { + trace!(target: "sync", "{} Ignoring extrinsics while syncing", who); + return; + } + trace!(target: "sync", "Received {} extrinsics from {}", extrinsics.len(), who); + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + for t in extrinsics { + let hash = self.transaction_pool.hash_of(&t); + peer.known_extrinsics.insert(hash); + + self.transaction_pool.import( + self.peerset_handle.clone().into(), + who.clone(), + rep::GOOD_EXTRINSIC, + rep::BAD_EXTRINSIC, + t, + ); + } + } + } + + /// Propagate one extrinsic. + pub fn propagate_extrinsic(&mut self, hash: &H) { + debug!(target: "sync", "Propagating extrinsic [{:?}]", hash); + // Accept transactions only when fully synced + if self.sync.status().state != SyncState::Idle { + return; + } + if let Some(extrinsic) = self.transaction_pool.transaction(hash) { + let propagated_to = self.do_propagate_extrinsics(&[(hash.clone(), extrinsic)]); + self.transaction_pool.on_broadcasted(propagated_to); + } + } + + fn do_propagate_extrinsics( + &mut self, + extrinsics: &[(H, B::Extrinsic)], + ) -> HashMap> { + let mut propagated_to = HashMap::new(); + for (who, peer) in self.context_data.peers.iter_mut() { + // never send extrinsics to the light node + if !peer.info.roles.is_full() { + continue; + } + + let (hashes, to_send): (Vec<_>, Vec<_>) = extrinsics + .iter() + .filter(|&(ref hash, _)| peer.known_extrinsics.insert(hash.clone())) + .cloned() + .unzip(); + + if !to_send.is_empty() { + for hash in hashes { + propagated_to + .entry(hash) + .or_insert_with(Vec::new) + .push(who.to_base58()); + } + trace!(target: "sync", "Sending {} transactions to {}", to_send.len(), who); + let encoded = to_send.encode(); + send_message::( + &mut self.behaviour, + &mut self.context_data.stats, + &who, + Some((self.transactions_protocol.clone(), encoded)), + GenericMessage::Transactions(to_send), + ) + } + } + + propagated_to + } + + /// Call when we must propagate ready extrinsics to peers. + pub fn propagate_extrinsics(&mut self) { + debug!(target: "sync", "Propagating extrinsics"); + // Accept transactions only when fully synced + if self.sync.status().state != SyncState::Idle { + return; + } + let extrinsics = self.transaction_pool.transactions(); + let propagated_to = self.do_propagate_extrinsics(&extrinsics); + self.transaction_pool.on_broadcasted(propagated_to); + } + + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. + pub fn announce_block(&mut self, hash: B::Hash, data: Vec) { + let header = match self.context_data.chain.header(BlockId::Hash(hash)) { + Ok(Some(header)) => header, + Ok(None) => { + warn!("Trying to announce unknown block: {}", hash); + return; + } + Err(e) => { + warn!("Error reading block header {}: {:?}", hash, e); + return; + } + }; + + // don't announce genesis block since it will be ignored + if header.number().is_zero() { + return; + } + + let is_best = self.context_data.chain.info().best_hash == hash; + debug!(target: "sync", "Reannouncing block {:?}", hash); + self.send_announcement(&header, data, is_best, true) + } + + fn send_announcement(&mut self, header: &B::Header, data: Vec, is_best: bool, force: bool) { + let hash = header.hash(); + + for (who, ref mut peer) in self.context_data.peers.iter_mut() { + trace!(target: "sync", "Announcing block {:?} to {}", hash, who); + let inserted = peer.known_blocks.insert(hash); + if inserted || force { + let message = message::BlockAnnounce { + header: header.clone(), + state: if peer.info.protocol_version >= 4 { + if is_best { + Some(message::BlockState::Best) + } else { + Some(message::BlockState::Normal) + } + } else { + None + }, + data: if peer.info.protocol_version >= 4 { + Some(data.clone()) + } else { + None + }, + }; + + let encoded = message.encode(); + + send_message::( + &mut self.behaviour, + &mut self.context_data.stats, + &who, + Some((self.block_announces_protocol.clone(), encoded)), + Message::::BlockAnnounce(message), + ) + } + } + } + + /// Send Status message + fn send_status(&mut self, who: PeerId) { + let info = self.context_data.chain.info(); + let status = message::generic::Status { + version: CURRENT_VERSION, + min_supported_version: MIN_VERSION, + genesis_hash: info.genesis_hash, + roles: self.config.roles.into(), + best_number: info.best_number, + best_hash: info.best_hash, + chain_status: Vec::new(), // TODO: find a way to make this backwards-compatible + }; + + self.send_message(&who, None, GenericMessage::Status(status)) + } + + fn on_block_announce( + &mut self, + who: PeerId, + announce: BlockAnnounce, + ) -> CustomMessageOutcome { + let hash = announce.header.hash(); + let number = *announce.header.number(); + + if let Some(ref mut peer) = self.context_data.peers.get_mut(&who) { + peer.known_blocks.insert(hash.clone()); + } + + let is_their_best = match announce.state.unwrap_or(message::BlockState::Best) { + message::BlockState::Best => true, + message::BlockState::Normal => false, + }; + + match self + .sync + .on_block_announce(who.clone(), hash, &announce, is_their_best) + { + sync::OnBlockAnnounce::Nothing => { + // `on_block_announce` returns `OnBlockAnnounce::ImportHeader` + // when we have all data required to import the block + // in the BlockAnnounce message. This is only when: + // 1) we're on light client; + // AND + // 2) parent block is already imported and not pruned. + if is_their_best { + return CustomMessageOutcome::PeerNewBest(who, number); + } else { + return CustomMessageOutcome::None; + } + } + sync::OnBlockAnnounce::ImportHeader => (), // We proceed with the import. + } + + // to import header from announced block let's construct response to request that normally would have + // been sent over network (but it is not in our case) + let blocks_to_import = self.sync.on_block_data( + who.clone(), + None, + message::generic::BlockResponse { + id: 0, + blocks: vec![message::generic::BlockData { + hash: hash, + header: Some(announce.header), + body: None, + receipt: None, + message_queue: None, + justification: None, + }], + }, + ); + match blocks_to_import { + Ok(sync::OnBlockData::Import(origin, blocks)) => { + if is_their_best { + self.pending_messages + .push_back(CustomMessageOutcome::PeerNewBest(who, number)); + } + CustomMessageOutcome::BlockImport(origin, blocks) + } + Ok(sync::OnBlockData::Request(peer, req)) => { + self.send_request(&peer, GenericMessage::BlockRequest(req)); + if is_their_best { + CustomMessageOutcome::PeerNewBest(who, number) + } else { + CustomMessageOutcome::None + } + } + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu); + if is_their_best { + CustomMessageOutcome::PeerNewBest(who, number) + } else { + CustomMessageOutcome::None + } + } + } + } + + /// Call this when a block has been imported in the import queue + pub fn on_block_imported(&mut self, header: &B::Header, is_best: bool) { + if is_best { + self.sync.update_chain_info(header); + } + } + + /// Call this when a block has been finalized. The sync layer may have some additional + /// requesting to perform. + pub fn on_block_finalized(&mut self, hash: B::Hash, header: &B::Header) { + self.sync.on_block_finalized(&hash, *header.number()) + } + + fn on_remote_call_request( + &mut self, + who: PeerId, + request: message::RemoteCallRequest, + ) { + trace!(target: "sync", "Remote call request {} from {} ({} at {})", + request.id, + who, + request.method, + request.block + ); + let proof = match self.context_data.chain.execution_proof( + &BlockId::Hash(request.block), + &request.method, + &request.data, + ) { + Ok((_, proof)) => proof, + Err(error) => { + trace!(target: "sync", "Remote call request {} from {} ({} at {}) failed with: {}", + request.id, + who, + request.method, + request.block, + error + ); + self.peerset_handle + .report_peer(who.clone(), rep::RPC_FAILED); + StorageProof::empty() + } + }; + + self.send_message( + &who, + None, + GenericMessage::RemoteCallResponse(message::RemoteCallResponse { + id: request.id, + proof, + }), + ); + } + + /// Request a justification for the given block. + /// + /// Uses `protocol` to queue a new justification request and tries to dispatch all pending + /// requests. + pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + self.sync.request_justification(&hash, number) + } + + /// Request syncing for the given block from given set of peers. + /// Uses `protocol` to queue a new block download request and tries to dispatch all pending + /// requests. + pub fn set_sync_fork_request( + &mut self, + peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { + self.sync.set_sync_fork_request(peers, hash, number) + } + + /// A batch of blocks have been processed, with or without errors. + /// Call this when a batch of blocks have been processed by the importqueue, with or without + /// errors. + pub fn blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + ) { + let results = self.sync.on_blocks_processed(imported, count, results); + for result in results { + match result { + Ok((id, req)) => { + let msg = GenericMessage::BlockRequest(req); + send_request( + &mut self.behaviour, + &mut self.context_data.stats, + &mut self.context_data.peers, + &id, + msg, + ) + } + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu) + } + } + } + } + + /// Call this when a justification has been processed by the import queue, with or without + /// errors. + pub fn justification_import_result( + &mut self, + hash: B::Hash, + number: NumberFor, + success: bool, + ) { + self.sync.on_justification_import(hash, number, success) + } + + /// Request a finality proof for the given block. + /// + /// Queues a new finality proof request and tries to dispatch all pending requests. + pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { + self.sync.request_finality_proof(&hash, number) + } + + /// Notify the protocol that we have learned about the existence of nodes. + /// + /// Can be called multiple times with the same `PeerId`s. + pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { + self.behaviour.add_discovered_nodes(peer_ids) + } + + pub fn finality_proof_import_result( + &mut self, + request_block: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + ) { + self.sync + .on_finality_proof_import(request_block, finalization_result) + } + + fn on_remote_read_request( + &mut self, + who: PeerId, + request: message::RemoteReadRequest, + ) { + if request.keys.is_empty() { + debug!(target: "sync", "Invalid remote read request sent by {}", who); + self.behaviour.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); + return; + } + + let keys_str = || match request.keys.len() { + 1 => HexDisplay::from(&request.keys[0]).to_string(), + _ => format!( + "{}..{}", + HexDisplay::from(&request.keys[0]), + HexDisplay::from(&request.keys[request.keys.len() - 1]), + ), + }; + + trace!(target: "sync", "Remote read request {} from {} ({} at {})", request.id, who, keys_str(), request.block); - let proof = match self.context_data.chain.read_proof( - &BlockId::Hash(request.block), - &mut request.keys.iter().map(AsRef::as_ref) - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}", - request.id, - who, - keys_str(), - request.block, - error - ); - StorageProof::empty() - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteReadResponse(message::RemoteReadResponse { - id: request.id, - proof, - }), - ); - } - - fn on_remote_read_child_request( - &mut self, - who: PeerId, - request: message::RemoteReadChildRequest, - ) { - if request.keys.is_empty() { - debug!(target: "sync", "Invalid remote child read request sent by {}", who); - self.behaviour.disconnect_peer(&who); - self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); - return; - } - - let keys_str = || match request.keys.len() { - 1 => HexDisplay::from(&request.keys[0]).to_string(), - _ => format!( - "{}..{}", - HexDisplay::from(&request.keys[0]), - HexDisplay::from(&request.keys[request.keys.len() - 1]), - ), - }; - - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", + let proof = match self.context_data.chain.read_proof( + &BlockId::Hash(request.block), + &mut request.keys.iter().map(AsRef::as_ref), + ) { + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote read request {} from {} ({} at {}) failed with: {}", + request.id, + who, + keys_str(), + request.block, + error + ); + StorageProof::empty() + } + }; + self.send_message( + &who, + None, + GenericMessage::RemoteReadResponse(message::RemoteReadResponse { + id: request.id, + proof, + }), + ); + } + + fn on_remote_read_child_request( + &mut self, + who: PeerId, + request: message::RemoteReadChildRequest, + ) { + if request.keys.is_empty() { + debug!(target: "sync", "Invalid remote child read request sent by {}", who); + self.behaviour.disconnect_peer(&who); + self.peerset_handle.report_peer(who, rep::BAD_MESSAGE); + return; + } + + let keys_str = || match request.keys.len() { + 1 => HexDisplay::from(&request.keys[0]).to_string(), + _ => format!( + "{}..{}", + HexDisplay::from(&request.keys[0]), + HexDisplay::from(&request.keys[request.keys.len() - 1]), + ), + }; + + trace!(target: "sync", "Remote read child request {} from {} ({} {} at {})", request.id, who, HexDisplay::from(&request.storage_key), keys_str(), request.block); - let proof = if let Some(child_info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.context_data.chain.read_child_proof( - &BlockId::Hash(request.block), - &request.storage_key, - child_info, - &mut request.keys.iter().map(AsRef::as_ref), - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - HexDisplay::from(&request.storage_key), - keys_str(), - request.block, - error - ); - StorageProof::empty() - } - } - } else { - trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", - request.id, - who, - HexDisplay::from(&request.storage_key), - keys_str(), - request.block, - "invalid child info and type", - ); - - StorageProof::empty() - }; - self.send_message( - &who, - None, - GenericMessage::RemoteReadResponse(message::RemoteReadResponse { - id: request.id, - proof, - }), - ); - } - - fn on_remote_header_request( - &mut self, - who: PeerId, - request: message::RemoteHeaderRequest>, - ) { - trace!(target: "sync", "Remote header proof request {} from {} ({})", + let proof = if let Some(child_info) = + ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) + { + match self.context_data.chain.read_child_proof( + &BlockId::Hash(request.block), + &request.storage_key, + child_info, + &mut request.keys.iter().map(AsRef::as_ref), + ) { + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", + request.id, + who, + HexDisplay::from(&request.storage_key), + keys_str(), + request.block, + error + ); + StorageProof::empty() + } + } + } else { + trace!(target: "sync", "Remote read child request {} from {} ({} {} at {}) failed with: {}", + request.id, + who, + HexDisplay::from(&request.storage_key), + keys_str(), + request.block, + "invalid child info and type", + ); + + StorageProof::empty() + }; + self.send_message( + &who, + None, + GenericMessage::RemoteReadResponse(message::RemoteReadResponse { + id: request.id, + proof, + }), + ); + } + + fn on_remote_header_request( + &mut self, + who: PeerId, + request: message::RemoteHeaderRequest>, + ) { + trace!(target: "sync", "Remote header proof request {} from {} ({})", request.id, who, request.block); - let (header, proof) = match self.context_data.chain.header_proof(&BlockId::Number(request.block)) { - Ok((header, proof)) => (Some(header), proof), - Err(error) => { - trace!(target: "sync", "Remote header proof request {} from {} ({}) failed with: {}", - request.id, - who, - request.block, - error - ); - (Default::default(), StorageProof::empty()) - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { - id: request.id, - header, - proof, - }), - ); - } - - fn on_remote_changes_request( - &mut self, - who: PeerId, - request: message::RemoteChangesRequest, - ) { - trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})", - request.id, - who, - if let Some(sk) = request.storage_key.as_ref() { - format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&request.key)) - } else { - HexDisplay::from(&request.key).to_string() - }, - request.first, - request.last - ); - let storage_key = request.storage_key.map(|sk| StorageKey(sk)); - let key = StorageKey(request.key); - let proof = match self.context_data.chain.key_changes_proof( - request.first, - request.last, - request.min, - request.max, - storage_key.as_ref(), - &key, - ) { - Ok(proof) => proof, - Err(error) => { - trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", - request.id, - who, - if let Some(sk) = storage_key { - format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0)) - } else { - HexDisplay::from(&key.0).to_string() - }, - request.first, - request.last, - error - ); - ChangesProof:: { - max_block: Zero::zero(), - proof: vec![], - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - } - }; - self.send_message( - &who, - None, - GenericMessage::RemoteChangesResponse(message::RemoteChangesResponse { - id: request.id, - max: proof.max_block, - proof: proof.proof, - roots: proof.roots.into_iter().collect(), - roots_proof: proof.roots_proof, - }), - ); - } - - fn on_finality_proof_request( - &mut self, - who: PeerId, - request: message::FinalityProofRequest, - ) { - trace!(target: "sync", "Finality proof request from {} for {}", who, request.block); - let finality_proof = self.finality_proof_provider.as_ref() - .ok_or_else(|| String::from("Finality provider is not configured")) - .and_then(|provider| - provider.prove_finality(request.block, &request.request).map_err(|e| e.to_string()) - ); - let finality_proof = match finality_proof { - Ok(finality_proof) => finality_proof, - Err(error) => { - trace!(target: "sync", "Finality proof request from {} for {} failed with: {}", - who, - request.block, - error - ); - None - }, - }; - self.send_message( - &who, - None, - GenericMessage::FinalityProofResponse(message::FinalityProofResponse { - id: 0, - block: request.block, - proof: finality_proof, - }), - ); - } - - fn on_finality_proof_response( - &mut self, - who: PeerId, - response: message::FinalityProofResponse, - ) -> CustomMessageOutcome { - trace!(target: "sync", "Finality proof response from {} for {}", who, response.block); - match self.sync.on_block_finality_proof(who, response) { - Ok(sync::OnBlockFinalityProof::Nothing) => CustomMessageOutcome::None, - Ok(sync::OnBlockFinalityProof::Import { peer, hash, number, proof }) => - CustomMessageOutcome::FinalityProofImport(peer, hash, number, proof), - Err(sync::BadPeer(id, repu)) => { - self.behaviour.disconnect_peer(&id); - self.peerset_handle.report_peer(id, repu); - CustomMessageOutcome::None - } - } - } - - fn format_stats(&self) -> String { - let mut out = String::new(); - for (id, stats) in &self.context_data.stats { - let _ = writeln!( - &mut out, - "{}: In: {} bytes ({}), Out: {} bytes ({})", - id, - stats.bytes_in, - stats.count_in, - stats.bytes_out, - stats.count_out, - ); - } - out - } - - fn report_metrics(&self) { - use std::convert::TryInto; - - if let Some(metrics) = &self.metrics { - let mut obsolete_requests: u64 = 0; - for peer in self.context_data.peers.values() { - let n = peer.obsolete_requests.len().try_into().unwrap_or(std::u64::MAX); - obsolete_requests = obsolete_requests.saturating_add(n); - } - metrics.obsolete_requests.set(obsolete_requests); - - let n = self.handshaking_peers.len().try_into().unwrap_or(std::u64::MAX); - metrics.handshaking_peers.set(n); - - let n = self.context_data.peers.len().try_into().unwrap_or(std::u64::MAX); - metrics.peers.set(n); - - let m = self.sync.metrics(); - - metrics.fork_targets.set(m.fork_targets.into()); - metrics.queued_blocks.set(m.queued_blocks.into()); - - metrics.justifications.with_label_values(&["pending"]) - .set(m.justifications.pending_requests.into()); - metrics.justifications.with_label_values(&["active"]) - .set(m.justifications.active_requests.into()); - metrics.justifications.with_label_values(&["failed"]) - .set(m.justifications.failed_requests.into()); - metrics.justifications.with_label_values(&["importing"]) - .set(m.justifications.importing_requests.into()); - - metrics.finality_proofs.with_label_values(&["pending"]) - .set(m.finality_proofs.pending_requests.into()); - metrics.finality_proofs.with_label_values(&["active"]) - .set(m.finality_proofs.active_requests.into()); - metrics.finality_proofs.with_label_values(&["failed"]) - .set(m.finality_proofs.failed_requests.into()); - metrics.finality_proofs.with_label_values(&["importing"]) - .set(m.finality_proofs.importing_requests.into()); - } - } + let (header, proof) = match self + .context_data + .chain + .header_proof(&BlockId::Number(request.block)) + { + Ok((header, proof)) => (Some(header), proof), + Err(error) => { + trace!(target: "sync", "Remote header proof request {} from {} ({}) failed with: {}", + request.id, + who, + request.block, + error + ); + (Default::default(), StorageProof::empty()) + } + }; + self.send_message( + &who, + None, + GenericMessage::RemoteHeaderResponse(message::RemoteHeaderResponse { + id: request.id, + header, + proof, + }), + ); + } + + fn on_remote_changes_request( + &mut self, + who: PeerId, + request: message::RemoteChangesRequest, + ) { + trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{})", + request.id, + who, + if let Some(sk) = request.storage_key.as_ref() { + format!("{} : {}", HexDisplay::from(sk), HexDisplay::from(&request.key)) + } else { + HexDisplay::from(&request.key).to_string() + }, + request.first, + request.last + ); + let storage_key = request.storage_key.map(|sk| StorageKey(sk)); + let key = StorageKey(request.key); + let proof = match self.context_data.chain.key_changes_proof( + request.first, + request.last, + request.min, + request.max, + storage_key.as_ref(), + &key, + ) { + Ok(proof) => proof, + Err(error) => { + trace!(target: "sync", "Remote changes proof request {} from {} for key {} ({}..{}) failed with: {}", + request.id, + who, + if let Some(sk) = storage_key { + format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0)) + } else { + HexDisplay::from(&key.0).to_string() + }, + request.first, + request.last, + error + ); + ChangesProof:: { + max_block: Zero::zero(), + proof: vec![], + roots: BTreeMap::new(), + roots_proof: StorageProof::empty(), + } + } + }; + self.send_message( + &who, + None, + GenericMessage::RemoteChangesResponse(message::RemoteChangesResponse { + id: request.id, + max: proof.max_block, + proof: proof.proof, + roots: proof.roots.into_iter().collect(), + roots_proof: proof.roots_proof, + }), + ); + } + + fn on_finality_proof_request( + &mut self, + who: PeerId, + request: message::FinalityProofRequest, + ) { + trace!(target: "sync", "Finality proof request from {} for {}", who, request.block); + let finality_proof = self + .finality_proof_provider + .as_ref() + .ok_or_else(|| String::from("Finality provider is not configured")) + .and_then(|provider| { + provider + .prove_finality(request.block, &request.request) + .map_err(|e| e.to_string()) + }); + let finality_proof = match finality_proof { + Ok(finality_proof) => finality_proof, + Err(error) => { + trace!(target: "sync", "Finality proof request from {} for {} failed with: {}", + who, + request.block, + error + ); + None + } + }; + self.send_message( + &who, + None, + GenericMessage::FinalityProofResponse(message::FinalityProofResponse { + id: 0, + block: request.block, + proof: finality_proof, + }), + ); + } + + fn on_finality_proof_response( + &mut self, + who: PeerId, + response: message::FinalityProofResponse, + ) -> CustomMessageOutcome { + trace!(target: "sync", "Finality proof response from {} for {}", who, response.block); + match self.sync.on_block_finality_proof(who, response) { + Ok(sync::OnBlockFinalityProof::Nothing) => CustomMessageOutcome::None, + Ok(sync::OnBlockFinalityProof::Import { + peer, + hash, + number, + proof, + }) => CustomMessageOutcome::FinalityProofImport(peer, hash, number, proof), + Err(sync::BadPeer(id, repu)) => { + self.behaviour.disconnect_peer(&id); + self.peerset_handle.report_peer(id, repu); + CustomMessageOutcome::None + } + } + } + + fn format_stats(&self) -> String { + let mut out = String::new(); + for (id, stats) in &self.context_data.stats { + let _ = writeln!( + &mut out, + "{}: In: {} bytes ({}), Out: {} bytes ({})", + id, stats.bytes_in, stats.count_in, stats.bytes_out, stats.count_out, + ); + } + out + } + + fn report_metrics(&self) { + use std::convert::TryInto; + + if let Some(metrics) = &self.metrics { + let mut obsolete_requests: u64 = 0; + for peer in self.context_data.peers.values() { + let n = peer + .obsolete_requests + .len() + .try_into() + .unwrap_or(std::u64::MAX); + obsolete_requests = obsolete_requests.saturating_add(n); + } + metrics.obsolete_requests.set(obsolete_requests); + + let n = self + .handshaking_peers + .len() + .try_into() + .unwrap_or(std::u64::MAX); + metrics.handshaking_peers.set(n); + + let n = self + .context_data + .peers + .len() + .try_into() + .unwrap_or(std::u64::MAX); + metrics.peers.set(n); + + let m = self.sync.metrics(); + + metrics.fork_targets.set(m.fork_targets.into()); + metrics.queued_blocks.set(m.queued_blocks.into()); + + metrics + .justifications + .with_label_values(&["pending"]) + .set(m.justifications.pending_requests.into()); + metrics + .justifications + .with_label_values(&["active"]) + .set(m.justifications.active_requests.into()); + metrics + .justifications + .with_label_values(&["failed"]) + .set(m.justifications.failed_requests.into()); + metrics + .justifications + .with_label_values(&["importing"]) + .set(m.justifications.importing_requests.into()); + + metrics + .finality_proofs + .with_label_values(&["pending"]) + .set(m.finality_proofs.pending_requests.into()); + metrics + .finality_proofs + .with_label_values(&["active"]) + .set(m.finality_proofs.active_requests.into()); + metrics + .finality_proofs + .with_label_values(&["failed"]) + .set(m.finality_proofs.failed_requests.into()); + metrics + .finality_proofs + .with_label_values(&["importing"]) + .set(m.finality_proofs.importing_requests.into()); + } + } } /// Outcome of an incoming custom message. #[derive(Debug)] pub enum CustomMessageOutcome { - BlockImport(BlockOrigin, Vec>), - JustificationImport(Origin, B::Hash, NumberFor, Justification), - FinalityProofImport(Origin, B::Hash, NumberFor, Vec), - /// Notification protocols have been opened with a remote. - NotificationStreamOpened { remote: PeerId, protocols: Vec, roles: Roles }, - /// Notification protocols have been closed with a remote. - NotificationStreamClosed { remote: PeerId, protocols: Vec }, - /// Messages have been received on one or more notifications protocols. - NotificationsReceived { remote: PeerId, messages: Vec<(ConsensusEngineId, Bytes)> }, - /// Peer has a reported a new head of chain. - PeerNewBest(PeerId, NumberFor), - None, + BlockImport(BlockOrigin, Vec>), + JustificationImport(Origin, B::Hash, NumberFor, Justification), + FinalityProofImport(Origin, B::Hash, NumberFor, Vec), + /// Notification protocols have been opened with a remote. + NotificationStreamOpened { + remote: PeerId, + protocols: Vec, + roles: Roles, + }, + /// Notification protocols have been closed with a remote. + NotificationStreamClosed { + remote: PeerId, + protocols: Vec, + }, + /// Messages have been received on one or more notifications protocols. + NotificationsReceived { + remote: PeerId, + messages: Vec<(ConsensusEngineId, Bytes)>, + }, + /// Peer has a reported a new head of chain. + PeerNewBest(PeerId, NumberFor), + None, } fn send_request( - behaviour: &mut GenericProto, - stats: &mut HashMap<&'static str, PacketStats>, - peers: &mut HashMap>, - who: &PeerId, - mut message: Message, + behaviour: &mut GenericProto, + stats: &mut HashMap<&'static str, PacketStats>, + peers: &mut HashMap>, + who: &PeerId, + mut message: Message, ) { - if let GenericMessage::BlockRequest(ref mut r) = message { - if let Some(ref mut peer) = peers.get_mut(who) { - r.id = peer.next_request_id; - peer.next_request_id = peer.next_request_id + 1; - if let Some((timestamp, request)) = peer.block_request.take() { - trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); - peer.obsolete_requests.insert(request.id, timestamp); - } - peer.block_request = Some((Instant::now(), r.clone())); - } - } - send_message::(behaviour, stats, who, None, message) + if let GenericMessage::BlockRequest(ref mut r) = message { + if let Some(ref mut peer) = peers.get_mut(who) { + r.id = peer.next_request_id; + peer.next_request_id = peer.next_request_id + 1; + if let Some((timestamp, request)) = peer.block_request.take() { + trace!(target: "sync", "Request {} for {} is now obsolete.", request.id, who); + peer.obsolete_requests.insert(request.id, timestamp); + } + peer.block_request = Some((Instant::now(), r.clone())); + } + } + send_message::(behaviour, stats, who, None, message) } fn send_message( - behaviour: &mut GenericProto, - stats: &mut HashMap<&'static str, PacketStats>, - who: &PeerId, - message: Option<(Cow<'static, [u8]>, Vec)>, - legacy_message: Message, + behaviour: &mut GenericProto, + stats: &mut HashMap<&'static str, PacketStats>, + who: &PeerId, + message: Option<(Cow<'static, [u8]>, Vec)>, + legacy_message: Message, ) { - let encoded = legacy_message.encode(); - let mut stats = stats.entry(legacy_message.id()).or_default(); - stats.bytes_out += encoded.len() as u64; - stats.count_out += 1; - if let Some((proto, msg)) = message { - behaviour.write_notification(who, proto, msg, encoded); - } else { - behaviour.send_packet(who, encoded); - } + let encoded = legacy_message.encode(); + let mut stats = stats.entry(legacy_message.id()).or_default(); + stats.bytes_out += encoded.len() as u64; + stats.count_out += 1; + if let Some((proto, msg)) = message { + behaviour.write_notification(who, proto, msg, encoded); + } else { + behaviour.send_packet(who, encoded); + } } impl NetworkBehaviour for Protocol { - type ProtocolsHandler = ::ProtocolsHandler; - type OutEvent = CustomMessageOutcome; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - self.behaviour.new_handler() - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - self.behaviour.addresses_of_peer(peer_id) - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.behaviour.inject_connection_established(peer_id, conn, endpoint) - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.behaviour.inject_connection_closed(peer_id, conn, endpoint) - } - - fn inject_connected(&mut self, peer_id: &PeerId) { - self.behaviour.inject_connected(peer_id) - } - - fn inject_disconnected(&mut self, peer_id: &PeerId) { - self.behaviour.inject_disconnected(peer_id) - } - - fn inject_event( - &mut self, - peer_id: PeerId, - connection: ConnectionId, - event: <::Handler as ProtocolsHandler>::OutEvent, - ) { - self.behaviour.inject_event(peer_id, connection, event) - } + type ProtocolsHandler = ::ProtocolsHandler; + type OutEvent = CustomMessageOutcome; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + self.behaviour.new_handler() + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + self.behaviour.addresses_of_peer(peer_id) + } + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.behaviour + .inject_connection_established(peer_id, conn, endpoint) + } + + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.behaviour + .inject_connection_closed(peer_id, conn, endpoint) + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + self.behaviour.inject_connected(peer_id) + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.behaviour.inject_disconnected(peer_id) + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + event: <::Handler as ProtocolsHandler>::OutEvent, + ) { + self.behaviour.inject_event(peer_id, connection, event) + } fn poll( &mut self, @@ -1865,198 +2011,221 @@ impl NetworkBehaviour for Protocol { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent > - > { - if let Some(message) = self.pending_messages.pop_front() { - return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); - } - - while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { - self.tick(); - } - - while let Poll::Ready(Some(())) = self.propagate_timeout.poll_next_unpin(cx) { - self.propagate_extrinsics(); - } - - for (id, r) in self.sync.block_requests() { - send_request( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - &id, - GenericMessage::BlockRequest(r) - ) - } - for (id, r) in self.sync.justification_requests() { - send_request( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - &id, - GenericMessage::BlockRequest(r) - ) - } - for (id, r) in self.sync.finality_proof_requests() { - send_request( - &mut self.behaviour, - &mut self.context_data.stats, - &mut self.context_data.peers, - &id, - GenericMessage::FinalityProofRequest(r)) - } - - let event = match self.behaviour.poll(cx, params) { - Poll::Pending => return Poll::Pending, - Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => ev, - Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => - return Poll::Ready(NetworkBehaviourAction::DialAddress { address }), - Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => - return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }), - Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }) => - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, handler, event }), - Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => - return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }), - }; - - let outcome = match event { - GenericProtoOut::CustomProtocolOpen { peer_id, .. } => { - self.on_peer_connected(peer_id.clone()); - CustomMessageOutcome::None - } - GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { - self.on_peer_disconnected(peer_id.clone()) - }, - GenericProtoOut::LegacyMessage { peer_id, message } => - self.on_custom_message(peer_id, message), - GenericProtoOut::Notification { peer_id, protocol_name, message } => - match self.legacy_equiv_by_name.get(&protocol_name) { - Some(Fallback::Consensus(engine_id)) => { - CustomMessageOutcome::NotificationsReceived { - remote: peer_id, - messages: vec![(*engine_id, message.freeze())], - } - } - Some(Fallback::Transactions) => { - if let Ok(m) = message::Transactions::decode(&mut message.as_ref()) { - self.on_extrinsics(peer_id, m); - } else { - warn!(target: "sub-libp2p", "Failed to decode transactions list"); - } - CustomMessageOutcome::None - } - Some(Fallback::BlockAnnounce) => { - if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { - let outcome = self.on_block_announce(peer_id.clone(), announce); - self.update_peer_info(&peer_id); - outcome - } else { - warn!(target: "sub-libp2p", "Failed to decode block announce"); - CustomMessageOutcome::None - } - } - None => { - error!(target: "sub-libp2p", "Received notification from unknown protocol {:?}", protocol_name); - CustomMessageOutcome::None - } - } - GenericProtoOut::Clogged { peer_id, messages } => { - debug!(target: "sync", "{} clogging messages:", messages.len()); - for msg in messages.into_iter().take(5) { - let message: Option> = Decode::decode(&mut &msg[..]).ok(); - debug!(target: "sync", "{:?}", message); - self.on_clogged_peer(peer_id.clone(), message); - } - CustomMessageOutcome::None - } - }; - - if let CustomMessageOutcome::None = outcome { - Poll::Pending - } else { - Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) - } - } - - fn inject_addr_reach_failure( - &mut self, - peer_id: Option<&PeerId>, - addr: &Multiaddr, - error: &dyn std::error::Error - ) { - self.behaviour.inject_addr_reach_failure(peer_id, addr, error) - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - self.behaviour.inject_dial_failure(peer_id) - } - - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.behaviour.inject_new_listen_addr(addr) - } - - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.behaviour.inject_expired_listen_addr(addr) - } - - fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - self.behaviour.inject_new_external_addr(addr) - } - - fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { - self.behaviour.inject_listener_error(id, err); - } - - fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { - self.behaviour.inject_listener_closed(id, reason); - } +>{ + if let Some(message) = self.pending_messages.pop_front() { + return Poll::Ready(NetworkBehaviourAction::GenerateEvent(message)); + } + + while let Poll::Ready(Some(())) = self.tick_timeout.poll_next_unpin(cx) { + self.tick(); + } + + while let Poll::Ready(Some(())) = self.propagate_timeout.poll_next_unpin(cx) { + self.propagate_extrinsics(); + } + + for (id, r) in self.sync.block_requests() { + send_request( + &mut self.behaviour, + &mut self.context_data.stats, + &mut self.context_data.peers, + &id, + GenericMessage::BlockRequest(r), + ) + } + for (id, r) in self.sync.justification_requests() { + send_request( + &mut self.behaviour, + &mut self.context_data.stats, + &mut self.context_data.peers, + &id, + GenericMessage::BlockRequest(r), + ) + } + for (id, r) in self.sync.finality_proof_requests() { + send_request( + &mut self.behaviour, + &mut self.context_data.stats, + &mut self.context_data.peers, + &id, + GenericMessage::FinalityProofRequest(r), + ) + } + + let event = match self.behaviour.poll(cx, params) { + Poll::Pending => return Poll::Pending, + Poll::Ready(NetworkBehaviourAction::GenerateEvent(ev)) => ev, + Poll::Ready(NetworkBehaviourAction::DialAddress { address }) => { + return Poll::Ready(NetworkBehaviourAction::DialAddress { address }) + } + Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) => { + return Poll::Ready(NetworkBehaviourAction::DialPeer { peer_id, condition }) + } + Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }) => { + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }) + } + Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) => { + return Poll::Ready(NetworkBehaviourAction::ReportObservedAddr { address }) + } + }; + + let outcome = match event { + GenericProtoOut::CustomProtocolOpen { peer_id, .. } => { + self.on_peer_connected(peer_id.clone()); + CustomMessageOutcome::None + } + GenericProtoOut::CustomProtocolClosed { peer_id, .. } => { + self.on_peer_disconnected(peer_id.clone()) + } + GenericProtoOut::LegacyMessage { peer_id, message } => { + self.on_custom_message(peer_id, message) + } + GenericProtoOut::Notification { + peer_id, + protocol_name, + message, + } => match self.legacy_equiv_by_name.get(&protocol_name) { + Some(Fallback::Consensus(engine_id)) => { + CustomMessageOutcome::NotificationsReceived { + remote: peer_id, + messages: vec![(*engine_id, message.freeze())], + } + } + Some(Fallback::Transactions) => { + if let Ok(m) = message::Transactions::decode(&mut message.as_ref()) { + self.on_extrinsics(peer_id, m); + } else { + warn!(target: "sub-libp2p", "Failed to decode transactions list"); + } + CustomMessageOutcome::None + } + Some(Fallback::BlockAnnounce) => { + if let Ok(announce) = message::BlockAnnounce::decode(&mut message.as_ref()) { + let outcome = self.on_block_announce(peer_id.clone(), announce); + self.update_peer_info(&peer_id); + outcome + } else { + warn!(target: "sub-libp2p", "Failed to decode block announce"); + CustomMessageOutcome::None + } + } + None => { + error!(target: "sub-libp2p", "Received notification from unknown protocol {:?}", protocol_name); + CustomMessageOutcome::None + } + }, + GenericProtoOut::Clogged { peer_id, messages } => { + debug!(target: "sync", "{} clogging messages:", messages.len()); + for msg in messages.into_iter().take(5) { + let message: Option> = Decode::decode(&mut &msg[..]).ok(); + debug!(target: "sync", "{:?}", message); + self.on_clogged_peer(peer_id.clone(), message); + } + CustomMessageOutcome::None + } + }; + + if let CustomMessageOutcome::None = outcome { + Poll::Pending + } else { + Poll::Ready(NetworkBehaviourAction::GenerateEvent(outcome)) + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error, + ) { + self.behaviour + .inject_addr_reach_failure(peer_id, addr, error) + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + self.behaviour.inject_dial_failure(peer_id) + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + self.behaviour.inject_new_listen_addr(addr) + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + self.behaviour.inject_expired_listen_addr(addr) + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + self.behaviour.inject_new_external_addr(addr) + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn std::error::Error + 'static)) { + self.behaviour.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.behaviour.inject_listener_closed(id, reason); + } } impl Drop for Protocol { - fn drop(&mut self) { - debug!(target: "sync", "Network stats:\n{}", self.format_stats()); - } + fn drop(&mut self) { + debug!(target: "sync", "Network stats:\n{}", self.format_stats()); + } } #[cfg(test)] mod tests { - use crate::PeerId; - use crate::config::EmptyTransactionPool; - use super::{CustomMessageOutcome, Protocol, ProtocolConfig}; - - use sp_consensus::block_validation::DefaultBlockAnnounceValidator; - use std::sync::Arc; - use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; - use substrate_test_runtime_client::runtime::{Block, Hash}; - - #[test] - fn no_handshake_no_notif_closed() { - let client = Arc::new(TestClientBuilder::with_default_backend().build_with_longest_chain().0); - - let (mut protocol, _) = Protocol::::new( - ProtocolConfig::default(), - client.clone(), - Arc::new(EmptyTransactionPool), - None, - None, - From::from(&b"test"[..]), - sc_peerset::PeersetConfig { - in_peers: 10, - out_peers: 10, - bootnodes: Vec::new(), - reserved_only: false, - priority_groups: Vec::new(), - }, - Box::new(DefaultBlockAnnounceValidator::new(client.clone())), - None, - Default::default(), - None, - ).unwrap(); - - let dummy_peer_id = PeerId::random(); - let _ = protocol.on_peer_connected(dummy_peer_id.clone()); - match protocol.on_peer_disconnected(dummy_peer_id) { - CustomMessageOutcome::None => {}, - _ => panic!() - }; - } + use super::{CustomMessageOutcome, Protocol, ProtocolConfig}; + use crate::config::EmptyTransactionPool; + use crate::PeerId; + + use sp_consensus::block_validation::DefaultBlockAnnounceValidator; + use std::sync::Arc; + use substrate_test_runtime_client::runtime::{Block, Hash}; + use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt}; + + #[test] + fn no_handshake_no_notif_closed() { + let client = Arc::new( + TestClientBuilder::with_default_backend() + .build_with_longest_chain() + .0, + ); + + let (mut protocol, _) = Protocol::::new( + ProtocolConfig::default(), + client.clone(), + Arc::new(EmptyTransactionPool), + None, + None, + From::from(&b"test"[..]), + sc_peerset::PeersetConfig { + in_peers: 10, + out_peers: 10, + bootnodes: Vec::new(), + reserved_only: false, + priority_groups: Vec::new(), + }, + Box::new(DefaultBlockAnnounceValidator::new(client.clone())), + None, + Default::default(), + None, + ) + .unwrap(); + + let dummy_peer_id = PeerId::random(); + let _ = protocol.on_peer_connected(dummy_peer_id.clone()); + match protocol.on_peer_disconnected(dummy_peer_id) { + CustomMessageOutcome::None => {} + _ => panic!(), + }; + } } diff --git a/client/network/src/protocol/block_requests.rs b/client/network/src/protocol/block_requests.rs index 6af5023d39..ede0765cd0 100644 --- a/client/network/src/protocol/block_requests.rs +++ b/client/network/src/protocol/block_requests.rs @@ -22,44 +22,39 @@ #![allow(unused)] -use bytes::Bytes; -use codec::{Encode, Decode}; use crate::{ - chain::Client, - config::ProtocolId, - protocol::{api, message::BlockAttributes} + chain::Client, + config::ProtocolId, + protocol::{api, message::BlockAttributes}, }; +use bytes::Bytes; +use codec::{Decode, Encode}; use futures::{future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{DeniedUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol - } + core::{ + connection::ConnectionId, + upgrade::{read_one, write_one, DeniedUpgrade}, + upgrade::{InboundUpgrade, Negotiated, ReadOneError, UpgradeInfo}, + ConnectedPoint, Multiaddr, PeerId, + }, + swarm::{ + NegotiatedSubstream, NetworkBehaviour, NetworkBehaviourAction, OneShotHandler, + OneShotHandlerConfig, PollParameters, SubstreamProtocol, + }, }; use prost::Message; -use sp_runtime::{generic::BlockId, traits::{Block, Header, One, Zero}}; +use sp_runtime::{ + generic::BlockId, + traits::{Block, Header, One, Zero}, +}; use std::{ - cmp::min, - io, - iter, - sync::Arc, - time::Duration, - task::{Context, Poll} + cmp::min, + io, iter, + sync::Arc, + task::{Context, Poll}, + time::Duration, }; -use void::{Void, unreachable}; +use void::{unreachable, Void}; // Type alias for convenience. pub type Error = Box; @@ -67,242 +62,250 @@ pub type Error = Box; /// Configuration options for `BlockRequests`. #[derive(Debug, Clone)] pub struct Config { - max_block_data_response: u32, - max_request_len: usize, - inactivity_timeout: Duration, - protocol: Bytes, + max_block_data_response: u32, + max_request_len: usize, + inactivity_timeout: Duration, + protocol: Bytes, } impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. block data in response = 128 - /// - max. request size = 1 MiB - /// - inactivity timeout = 15s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_block_data_response: 128, - max_request_len: 1024 * 1024, - inactivity_timeout: Duration::from_secs(15), - protocol: Bytes::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. number of block data in a response. - pub fn set_max_block_data_response(&mut self, v: u32) -> &mut Self { - self.max_block_data_response = v; - self - } - - /// Limit the max. length of incoming block request bytes. - pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { - self.max_request_len = v; - self - } - - /// Limit the max. duration the substream may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut v = Vec::new(); - v.extend_from_slice(b"/"); - v.extend_from_slice(id.as_bytes()); - v.extend_from_slice(b"/sync/2"); - self.protocol = v.into(); - self - } + /// Create a fresh configuration with the following options: + /// + /// - max. block data in response = 128 + /// - max. request size = 1 MiB + /// - inactivity timeout = 15s + pub fn new(id: &ProtocolId) -> Self { + let mut c = Config { + max_block_data_response: 128, + max_request_len: 1024 * 1024, + inactivity_timeout: Duration::from_secs(15), + protocol: Bytes::new(), + }; + c.set_protocol(id); + c + } + + /// Limit the max. number of block data in a response. + pub fn set_max_block_data_response(&mut self, v: u32) -> &mut Self { + self.max_block_data_response = v; + self + } + + /// Limit the max. length of incoming block request bytes. + pub fn set_max_request_len(&mut self, v: usize) -> &mut Self { + self.max_request_len = v; + self + } + + /// Limit the max. duration the substream may remain inactive before closing it. + pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { + self.inactivity_timeout = v; + self + } + + /// Set protocol to use for upgrade negotiation. + pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { + let mut v = Vec::new(); + v.extend_from_slice(b"/"); + v.extend_from_slice(id.as_bytes()); + v.extend_from_slice(b"/sync/2"); + self.protocol = v.into(); + self + } } /// The block request handling behaviour. pub struct BlockRequests { - /// This behaviour's configuration. - config: Config, - /// Blockchain client. - chain: Arc>, - /// Futures sending back the block request response. - outgoing: FuturesUnordered>, + /// This behaviour's configuration. + config: Config, + /// Blockchain client. + chain: Arc>, + /// Futures sending back the block request response. + outgoing: FuturesUnordered>, } impl BlockRequests where - B: Block, + B: Block, { - pub fn new(cfg: Config, chain: Arc>) -> Self { - BlockRequests { - config: cfg, - chain, - outgoing: FuturesUnordered::new(), - } - } - - /// Callback, invoked when a new block request has been received from remote. - fn on_block_request - ( &mut self - , peer: &PeerId - , request: &api::v1::BlockRequest - ) -> Result - { - log::trace!("block request from peer {}: from block {:?} to block {:?}, max blocks {:?}", - peer, - request.from_block, - request.to_block, - request.max_blocks); - - let from_block_id = - match request.from_block { - Some(api::v1::block_request::FromBlock::Hash(ref h)) => { - let h = Decode::decode(&mut h.as_ref())?; - BlockId::::Hash(h) - } - Some(api::v1::block_request::FromBlock::Number(ref n)) => { - let n = Decode::decode(&mut n.as_ref())?; - BlockId::::Number(n) - } - None => { - let msg = "missing `BlockRequest::from_block` field"; - return Err(io::Error::new(io::ErrorKind::Other, msg).into()) - } - }; - - let max_blocks = - if request.max_blocks == 0 { - self.config.max_block_data_response - } else { - min(request.max_blocks, self.config.max_block_data_response) - }; - - let direction = - if request.direction == api::v1::Direction::Ascending as i32 { - api::v1::Direction::Ascending - } else if request.direction == api::v1::Direction::Descending as i32 { - api::v1::Direction::Descending - } else { - let msg = format!("invalid `BlockRequest::direction` value: {}", request.direction); - return Err(io::Error::new(io::ErrorKind::Other, msg).into()) - }; - - let attributes = BlockAttributes::decode(&mut request.fields.to_be_bytes().as_ref())?; - let get_header = attributes.contains(BlockAttributes::HEADER); - let get_body = attributes.contains(BlockAttributes::BODY); - let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION); - - let mut blocks = Vec::new(); - let mut block_id = from_block_id; - while let Some(header) = self.chain.header(block_id).unwrap_or(None) { - if blocks.len() >= max_blocks as usize { - break - } - - let number = header.number().clone(); - let hash = header.hash(); - let parent_hash = header.parent_hash().clone(); - - let block_data = api::v1::BlockData { - hash: hash.encode(), - header: if get_header { - header.encode() - } else { - Vec::new() - }, - body: if get_body { - self.chain.block_body(&BlockId::Hash(hash))? - .unwrap_or(Vec::new()) - .iter_mut() - .map(|extrinsic| extrinsic.encode()) - .collect() - } else { - Vec::new() - }, - receipt: Vec::new(), - message_queue: Vec::new(), - justification: if get_justification { - self.chain.justification(&BlockId::Hash(hash))?.unwrap_or(Vec::new()) - } else { - Vec::new() - } - }; - - blocks.push(block_data); - - match direction { - api::v1::Direction::Ascending => { - block_id = BlockId::Number(number + One::one()) - } - api::v1::Direction::Descending => { - if number.is_zero() { - break - } - block_id = BlockId::Hash(parent_hash) - } - } - } - - Ok(api::v1::BlockResponse { blocks }) - } + pub fn new(cfg: Config, chain: Arc>) -> Self { + BlockRequests { + config: cfg, + chain, + outgoing: FuturesUnordered::new(), + } + } + + /// Callback, invoked when a new block request has been received from remote. + fn on_block_request( + &mut self, + peer: &PeerId, + request: &api::v1::BlockRequest, + ) -> Result { + log::trace!( + "block request from peer {}: from block {:?} to block {:?}, max blocks {:?}", + peer, + request.from_block, + request.to_block, + request.max_blocks + ); + + let from_block_id = match request.from_block { + Some(api::v1::block_request::FromBlock::Hash(ref h)) => { + let h = Decode::decode(&mut h.as_ref())?; + BlockId::::Hash(h) + } + Some(api::v1::block_request::FromBlock::Number(ref n)) => { + let n = Decode::decode(&mut n.as_ref())?; + BlockId::::Number(n) + } + None => { + let msg = "missing `BlockRequest::from_block` field"; + return Err(io::Error::new(io::ErrorKind::Other, msg).into()); + } + }; + + let max_blocks = if request.max_blocks == 0 { + self.config.max_block_data_response + } else { + min(request.max_blocks, self.config.max_block_data_response) + }; + + let direction = if request.direction == api::v1::Direction::Ascending as i32 { + api::v1::Direction::Ascending + } else if request.direction == api::v1::Direction::Descending as i32 { + api::v1::Direction::Descending + } else { + let msg = format!( + "invalid `BlockRequest::direction` value: {}", + request.direction + ); + return Err(io::Error::new(io::ErrorKind::Other, msg).into()); + }; + + let attributes = BlockAttributes::decode(&mut request.fields.to_be_bytes().as_ref())?; + let get_header = attributes.contains(BlockAttributes::HEADER); + let get_body = attributes.contains(BlockAttributes::BODY); + let get_justification = attributes.contains(BlockAttributes::JUSTIFICATION); + + let mut blocks = Vec::new(); + let mut block_id = from_block_id; + while let Some(header) = self.chain.header(block_id).unwrap_or(None) { + if blocks.len() >= max_blocks as usize { + break; + } + + let number = header.number().clone(); + let hash = header.hash(); + let parent_hash = header.parent_hash().clone(); + + let block_data = api::v1::BlockData { + hash: hash.encode(), + header: if get_header { + header.encode() + } else { + Vec::new() + }, + body: if get_body { + self.chain + .block_body(&BlockId::Hash(hash))? + .unwrap_or(Vec::new()) + .iter_mut() + .map(|extrinsic| extrinsic.encode()) + .collect() + } else { + Vec::new() + }, + receipt: Vec::new(), + message_queue: Vec::new(), + justification: if get_justification { + self.chain + .justification(&BlockId::Hash(hash))? + .unwrap_or(Vec::new()) + } else { + Vec::new() + }, + }; + + blocks.push(block_data); + + match direction { + api::v1::Direction::Ascending => block_id = BlockId::Number(number + One::one()), + api::v1::Direction::Descending => { + if number.is_zero() { + break; + } + block_id = BlockId::Hash(parent_hash) + } + } + } + + Ok(api::v1::BlockResponse { blocks }) + } } impl NetworkBehaviour for BlockRequests where - B: Block + B: Block, { - type ProtocolsHandler = OneShotHandler>; - type OutEvent = Void; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = Protocol { - max_request_len: self.config.max_request_len, - protocol: self.config.protocol.clone(), - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.inactive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p), cfg) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, _peer: &PeerId) { - } - - fn inject_disconnected(&mut self, _peer: &PeerId) { - } - - fn inject_event( - &mut self, - peer: PeerId, - connection: ConnectionId, - Request(request, mut stream): Request - ) { - match self.on_block_request(&peer, &request) { - Ok(res) => { - log::trace!("enqueueing block response for peer {} with {} blocks", peer, res.blocks.len()); - let mut data = Vec::with_capacity(res.encoded_len()); - if let Err(e) = res.encode(&mut data) { - log::debug!("error encoding block response for peer {}: {}", peer, e) - } else { - let future = async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!("error writing block response: {}", e) - } - }; - self.outgoing.push(future.boxed()) - } - } - Err(e) => log::debug!("error handling block request from peer {}: {}", peer, e) - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) -> Poll> { - while let Poll::Ready(Some(_)) = self.outgoing.poll_next_unpin(cx) {} - Poll::Pending - } + type ProtocolsHandler = OneShotHandler>; + type OutEvent = Void; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let p = Protocol { + max_request_len: self.config.max_request_len, + protocol: self.config.protocol.clone(), + }; + let mut cfg = OneShotHandlerConfig::default(); + cfg.inactive_timeout = self.config.inactivity_timeout; + OneShotHandler::new(SubstreamProtocol::new(p), cfg) + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _peer: &PeerId) {} + + fn inject_disconnected(&mut self, _peer: &PeerId) {} + + fn inject_event( + &mut self, + peer: PeerId, + connection: ConnectionId, + Request(request, mut stream): Request, + ) { + match self.on_block_request(&peer, &request) { + Ok(res) => { + log::trace!( + "enqueueing block response for peer {} with {} blocks", + peer, + res.blocks.len() + ); + let mut data = Vec::with_capacity(res.encoded_len()); + if let Err(e) = res.encode(&mut data) { + log::debug!("error encoding block response for peer {}: {}", peer, e) + } else { + let future = async move { + if let Err(e) = write_one(&mut stream, data).await { + log::debug!("error writing block response: {}", e) + } + }; + self.outgoing.push(future.boxed()) + } + } + Err(e) => log::debug!("error handling block request from peer {}: {}", peer, e), + } + } + + fn poll( + &mut self, + cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll> { + while let Poll::Ready(Some(_)) = self.outgoing.poll_next_unpin(cx) {} + Poll::Pending + } } /// The incoming block request. @@ -313,9 +316,9 @@ where pub struct Request(api::v1::BlockRequest, T); impl From for Request { - fn from(v: Void) -> Self { - unreachable(v) - } + fn from(v: Void) -> Self { + unreachable(v) + } } /// Substream upgrade protocol. @@ -326,10 +329,10 @@ impl From for Request { /// relevant callback to process the message and prepare a response. #[derive(Debug, Clone)] pub struct Protocol { - /// The max. request length in bytes. - max_request_len: usize, - /// The protocol to use during upgrade negotiation. - protocol: Bytes, + /// The max. request length in bytes. + max_request_len: usize, + /// The protocol to use during upgrade negotiation. + protocol: Bytes, } impl UpgradeInfo for Protocol { @@ -343,22 +346,21 @@ impl UpgradeInfo for Protocol { impl InboundUpgrade for Protocol where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { type Output = Request; type Error = ReadOneError; type Future = BoxFuture<'static, Result>; fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - let future = async move { - let len = self.max_request_len; - let vec = read_one(&mut s, len).await?; - match api::v1::BlockRequest::decode(&vec[..]) { - Ok(r) => Ok(Request(r, s)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }; - future.boxed() - } + let future = async move { + let len = self.max_request_len; + let vec = read_one(&mut s, len).await?; + match api::v1::BlockRequest::decode(&vec[..]) { + Ok(r) => Ok(Request(r, s)), + Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))), + } + }; + future.boxed() + } } - diff --git a/client/network/src/protocol/event.rs b/client/network/src/protocol/event.rs index 637bf805b5..5902c6cf71 100644 --- a/client/network/src/protocol/event.rs +++ b/client/network/src/protocol/event.rs @@ -26,75 +26,75 @@ use sp_runtime::ConsensusEngineId; #[derive(Debug, Clone)] #[must_use] pub enum DhtEvent { - /// The value was found. - ValueFound(Vec<(Key, Vec)>), + /// The value was found. + ValueFound(Vec<(Key, Vec)>), - /// The requested record has not been found in the DHT. - ValueNotFound(Key), + /// The requested record has not been found in the DHT. + ValueNotFound(Key), - /// The record has been successfully inserted into the DHT. - ValuePut(Key), + /// The record has been successfully inserted into the DHT. + ValuePut(Key), - /// An error has occurred while putting a record into the DHT. - ValuePutFailed(Key), + /// An error has occurred while putting a record into the DHT. + ValuePutFailed(Key), } /// Type for events generated by networking layer. #[derive(Debug, Clone)] #[must_use] pub enum Event { - /// Event generated by a DHT. - Dht(DhtEvent), + /// Event generated by a DHT. + Dht(DhtEvent), - /// Opened a substream with the given node with the given notifications protocol. - /// - /// The protocol is always one of the notification protocols that have been registered. - NotificationStreamOpened { - /// Node we opened the substream with. - remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, - /// Role of the remote. - role: ObservedRole, - }, + /// Opened a substream with the given node with the given notifications protocol. + /// + /// The protocol is always one of the notification protocols that have been registered. + NotificationStreamOpened { + /// Node we opened the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + /// Role of the remote. + role: ObservedRole, + }, - /// Closed a substream with the given node. Always matches a corresponding previous - /// `NotificationStreamOpened` message. - NotificationStreamClosed { - /// Node we closed the substream with. - remote: PeerId, - /// The concerned protocol. Each protocol uses a different substream. - engine_id: ConsensusEngineId, - }, + /// Closed a substream with the given node. Always matches a corresponding previous + /// `NotificationStreamOpened` message. + NotificationStreamClosed { + /// Node we closed the substream with. + remote: PeerId, + /// The concerned protocol. Each protocol uses a different substream. + engine_id: ConsensusEngineId, + }, - /// Received one or more messages from the given node using the given protocol. - NotificationsReceived { - /// Node we received the message from. - remote: PeerId, - /// Concerned protocol and associated message. - messages: Vec<(ConsensusEngineId, Bytes)>, - }, + /// Received one or more messages from the given node using the given protocol. + NotificationsReceived { + /// Node we received the message from. + remote: PeerId, + /// Concerned protocol and associated message. + messages: Vec<(ConsensusEngineId, Bytes)>, + }, } /// Role that the peer sent to us during the handshake, with the addition of what our local node /// knows about that peer. #[derive(Debug, Clone)] pub enum ObservedRole { - /// Full node. - Full, - /// Light node. - Light, - /// When we are a validator node, this is a sentry that protects us. - OurSentry, - /// When we are a sentry node, this is the authority we are protecting. - OurGuardedAuthority, - /// Third-party authority. - Authority, + /// Full node. + Full, + /// Light node. + Light, + /// When we are a validator node, this is a sentry that protects us. + OurSentry, + /// When we are a sentry node, this is the authority we are protecting. + OurGuardedAuthority, + /// Third-party authority. + Authority, } impl ObservedRole { - /// Returns `true` for `ObservedRole::Light`. - pub fn is_light(&self) -> bool { - matches!(self, ObservedRole::Light) - } + /// Returns `true` for `ObservedRole::Light`. + pub fn is_light(&self) -> bool { + matches!(self, ObservedRole::Light) + } } diff --git a/client/network/src/protocol/generic_proto.rs b/client/network/src/protocol/generic_proto.rs index cf8434d8bc..e19c04fd00 100644 --- a/client/network/src/protocol/generic_proto.rs +++ b/client/network/src/protocol/generic_proto.rs @@ -25,5 +25,5 @@ pub use self::handler::LegacyConnectionKillError; mod behaviour; mod handler; -mod upgrade; mod tests; +mod upgrade; diff --git a/client/network/src/protocol/generic_proto/behaviour.rs b/client/network/src/protocol/generic_proto/behaviour.rs index e62edb3733..47c4d7a4de 100644 --- a/client/network/src/protocol/generic_proto/behaviour.rs +++ b/client/network/src/protocol/generic_proto/behaviour.rs @@ -15,19 +15,17 @@ // along with Substrate. If not, see . use crate::config::ProtocolId; -use crate::protocol::generic_proto::handler::{NotifsHandlerProto, NotifsHandlerOut, NotifsHandlerIn}; +use crate::protocol::generic_proto::handler::{ + NotifsHandlerIn, NotifsHandlerOut, NotifsHandlerProto, +}; use crate::protocol::generic_proto::upgrade::RegisteredProtocol; use bytes::BytesMut; use fnv::FnvHashMap; use futures::prelude::*; -use libp2p::core::{ConnectedPoint, Multiaddr, PeerId, connection::ConnectionId}; +use libp2p::core::{connection::ConnectionId, ConnectedPoint, Multiaddr, PeerId}; use libp2p::swarm::{ - DialPeerCondition, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - PollParameters + DialPeerCondition, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, PollParameters, }; use log::{debug, error, trace, warn}; use prometheus_endpoint::HistogramVec; @@ -109,1187 +107,1259 @@ use wasm_timer::Instant; /// tries to connect, the connection is accepted. A ban only delays dialing attempts. /// pub struct GenericProto { - /// Legacy protocol to open with peers. Never modified. - legacy_protocol: RegisteredProtocol, + /// Legacy protocol to open with peers. Never modified. + legacy_protocol: RegisteredProtocol, - /// Notification protocols. Entries are only ever added and not removed. - notif_protocols: Vec<(Cow<'static, [u8]>, Vec)>, + /// Notification protocols. Entries are only ever added and not removed. + notif_protocols: Vec<(Cow<'static, [u8]>, Vec)>, - /// Receiver for instructions about who to connect to or disconnect from. - peerset: sc_peerset::Peerset, + /// Receiver for instructions about who to connect to or disconnect from. + peerset: sc_peerset::Peerset, - /// List of peers in our state. - peers: FnvHashMap, + /// List of peers in our state. + peers: FnvHashMap, - /// List of incoming messages we have sent to the peer set manager and that are waiting for an - /// answer. - incoming: SmallVec<[IncomingPeer; 6]>, + /// List of incoming messages we have sent to the peer set manager and that are waiting for an + /// answer. + incoming: SmallVec<[IncomingPeer; 6]>, - /// We generate indices to identify incoming connections. This is the next value for the index - /// to use when a connection is incoming. - next_incoming_index: sc_peerset::IncomingIndex, + /// We generate indices to identify incoming connections. This is the next value for the index + /// to use when a connection is incoming. + next_incoming_index: sc_peerset::IncomingIndex, - /// Events to produce from `poll()`. - events: SmallVec<[NetworkBehaviourAction; 4]>, + /// Events to produce from `poll()`. + events: SmallVec<[NetworkBehaviourAction; 4]>, - /// If `Some`, report the message queue sizes on this `Histogram`. - queue_size_report: Option, + /// If `Some`, report the message queue sizes on this `Histogram`. + queue_size_report: Option, } /// State of a peer we're connected to. #[derive(Debug)] enum PeerState { - /// State is poisoned. This is a temporary state for a peer and we should always switch back - /// to it later. If it is found in the wild, that means there was either a panic or a bug in - /// the state machine code. - Poisoned, - - /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial - /// delay to the connection. - Banned { - /// Until when the peer is banned. - until: Instant, - }, - - /// The peerset requested that we connect to this peer. We are currently not connected. - PendingRequest { - /// When to actually start dialing. - timer: futures_timer::Delay, - /// When the `timer` will trigger. - timer_deadline: Instant, - }, - - /// The peerset requested that we connect to this peer. We are currently dialing this peer. - Requested, - - /// We are connected to this peer but the peerset refused it. - /// - /// We may still have ongoing traffic with that peer, but it should cease shortly. - Disabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. - banned_until: Option, - }, - - /// We are connected to this peer but we are not opening any Substrate substream. The handler - /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, - /// but should get disconnected in a few seconds. - DisabledPendingEnable { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - /// When to enable this remote. - timer: futures_timer::Delay, - /// When the `timer` will trigger. - timer_deadline: Instant, - }, - - /// We are connected to this peer and the peerset has accepted it. The handler is in the - /// enabled state. - Enabled { - /// The connections that are currently open for custom protocol traffic. - open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, - }, - - /// We received an incoming connection from this peer and forwarded that - /// connection request to the peerset. The connection handlers are waiting - /// for initialisation, i.e. to be enabled or disabled based on whether - /// the peerset accepts or rejects the peer. - Incoming, + /// State is poisoned. This is a temporary state for a peer and we should always switch back + /// to it later. If it is found in the wild, that means there was either a panic or a bug in + /// the state machine code. + Poisoned, + + /// The peer misbehaved. If the PSM wants us to connect to this peer, we will add an artificial + /// delay to the connection. + Banned { + /// Until when the peer is banned. + until: Instant, + }, + + /// The peerset requested that we connect to this peer. We are currently not connected. + PendingRequest { + /// When to actually start dialing. + timer: futures_timer::Delay, + /// When the `timer` will trigger. + timer_deadline: Instant, + }, + + /// The peerset requested that we connect to this peer. We are currently dialing this peer. + Requested, + + /// We are connected to this peer but the peerset refused it. + /// + /// We may still have ongoing traffic with that peer, but it should cease shortly. + Disabled { + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + /// If `Some`, any dial attempts to this peer are delayed until the given `Instant`. + banned_until: Option, + }, + + /// We are connected to this peer but we are not opening any Substrate substream. The handler + /// will be enabled when `timer` fires. This peer can still perform Kademlia queries and such, + /// but should get disconnected in a few seconds. + DisabledPendingEnable { + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + /// When to enable this remote. + timer: futures_timer::Delay, + /// When the `timer` will trigger. + timer_deadline: Instant, + }, + + /// We are connected to this peer and the peerset has accepted it. The handler is in the + /// enabled state. + Enabled { + /// The connections that are currently open for custom protocol traffic. + open: SmallVec<[ConnectionId; crate::MAX_CONNECTIONS_PER_PEER]>, + }, + + /// We received an incoming connection from this peer and forwarded that + /// connection request to the peerset. The connection handlers are waiting + /// for initialisation, i.e. to be enabled or disabled based on whether + /// the peerset accepts or rejects the peer. + Incoming, } impl PeerState { - /// True if there exists an established connection to tbe peer - /// that is open for custom protocol traffic. - fn is_open(&self) -> bool { - self.get_open().is_some() - } - - /// Returns the connection ID of the first established connection - /// that is open for custom protocol traffic. - fn get_open(&self) -> Option { - match self { - PeerState::Disabled { open, .. } | - PeerState::DisabledPendingEnable { open, .. } | - PeerState::Enabled { open, .. } => - if !open.is_empty() { - Some(open[0]) - } else { - None - } - PeerState::Poisoned => None, - PeerState::Banned { .. } => None, - PeerState::PendingRequest { .. } => None, - PeerState::Requested => None, - PeerState::Incoming { .. } => None, - } - } - - /// True if that node has been requested by the PSM. - fn is_requested(&self) -> bool { - match self { - PeerState::Poisoned => false, - PeerState::Banned { .. } => false, - PeerState::PendingRequest { .. } => true, - PeerState::Requested => true, - PeerState::Disabled { .. } => false, - PeerState::DisabledPendingEnable { .. } => true, - PeerState::Enabled { .. } => true, - PeerState::Incoming { .. } => false, - } - } + /// True if there exists an established connection to tbe peer + /// that is open for custom protocol traffic. + fn is_open(&self) -> bool { + self.get_open().is_some() + } + + /// Returns the connection ID of the first established connection + /// that is open for custom protocol traffic. + fn get_open(&self) -> Option { + match self { + PeerState::Disabled { open, .. } + | PeerState::DisabledPendingEnable { open, .. } + | PeerState::Enabled { open, .. } => { + if !open.is_empty() { + Some(open[0]) + } else { + None + } + } + PeerState::Poisoned => None, + PeerState::Banned { .. } => None, + PeerState::PendingRequest { .. } => None, + PeerState::Requested => None, + PeerState::Incoming { .. } => None, + } + } + + /// True if that node has been requested by the PSM. + fn is_requested(&self) -> bool { + match self { + PeerState::Poisoned => false, + PeerState::Banned { .. } => false, + PeerState::PendingRequest { .. } => true, + PeerState::Requested => true, + PeerState::Disabled { .. } => false, + PeerState::DisabledPendingEnable { .. } => true, + PeerState::Enabled { .. } => true, + PeerState::Incoming { .. } => false, + } + } } /// State of an "incoming" message sent to the peer set manager. #[derive(Debug)] struct IncomingPeer { - /// Id of the remote peer of the incoming connection. - peer_id: PeerId, - /// If true, this "incoming" still corresponds to an actual connection. If false, then the - /// connection corresponding to it has been closed or replaced already. - alive: bool, - /// Id that the we sent to the peerset. - incoming_id: sc_peerset::IncomingIndex, + /// Id of the remote peer of the incoming connection. + peer_id: PeerId, + /// If true, this "incoming" still corresponds to an actual connection. If false, then the + /// connection corresponding to it has been closed or replaced already. + alive: bool, + /// Id that the we sent to the peerset. + incoming_id: sc_peerset::IncomingIndex, } /// Event that can be emitted by the `GenericProto`. #[derive(Debug)] pub enum GenericProtoOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Id of the peer we are connected to. - peer_id: PeerId, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Id of the peer we were connected to. - peer_id: PeerId, - /// Reason why the substream closed, for debugging purposes. - reason: Cow<'static, str>, - }, - - /// Receives a message on the legacy substream. - LegacyMessage { - /// Id of the peer the message came from. - peer_id: PeerId, - /// Message that has been received. - message: BytesMut, - }, - - /// Receives a message on a custom protocol substream. - /// - /// Also concerns received notifications for the notifications API. - Notification { - /// Id of the peer the message came from. - peer_id: PeerId, - /// Engine corresponding to the message. - protocol_name: Cow<'static, [u8]>, - /// Message that has been received. - message: BytesMut, - }, - - /// The substream used by the protocol is pretty large. We should print avoid sending more - /// messages on it if possible. - Clogged { - /// Id of the peer which is clogged. - peer_id: PeerId, - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec>, - }, + /// Opened a custom protocol with the remote. + CustomProtocolOpen { + /// Id of the peer we are connected to. + peer_id: PeerId, + }, + + /// Closed a custom protocol with the remote. + CustomProtocolClosed { + /// Id of the peer we were connected to. + peer_id: PeerId, + /// Reason why the substream closed, for debugging purposes. + reason: Cow<'static, str>, + }, + + /// Receives a message on the legacy substream. + LegacyMessage { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Message that has been received. + message: BytesMut, + }, + + /// Receives a message on a custom protocol substream. + /// + /// Also concerns received notifications for the notifications API. + Notification { + /// Id of the peer the message came from. + peer_id: PeerId, + /// Engine corresponding to the message. + protocol_name: Cow<'static, [u8]>, + /// Message that has been received. + message: BytesMut, + }, + + /// The substream used by the protocol is pretty large. We should print avoid sending more + /// messages on it if possible. + Clogged { + /// Id of the peer which is clogged. + peer_id: PeerId, + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec>, + }, } impl GenericProto { - /// Creates a `CustomProtos`. - /// - /// The `queue_size_report` is an optional Prometheus metric that can report the size of the - /// messages queue. If passed, it must have one label for the protocol name. - pub fn new( - protocol: impl Into, - versions: &[u8], - peerset: sc_peerset::Peerset, - queue_size_report: Option, - ) -> Self { - let legacy_protocol = RegisteredProtocol::new(protocol, versions); - - GenericProto { - legacy_protocol, - notif_protocols: Vec::new(), - peerset, - peers: FnvHashMap::default(), - incoming: SmallVec::new(), - next_incoming_index: sc_peerset::IncomingIndex(0), - events: SmallVec::new(), - queue_size_report, - } - } - - /// Registers a new notifications protocol. - /// - /// You are very strongly encouraged to call this method very early on. Any open connection - /// will retain the protocols that were registered then, and not any new one. - pub fn register_notif_protocol( - &mut self, - protocol_name: impl Into>, - handshake_msg: impl Into> - ) { - self.notif_protocols.push((protocol_name.into(), handshake_msg.into())); - } - - /// Returns the number of discovered nodes that we keep in memory. - pub fn num_discovered_peers(&self) -> usize { - self.peerset.num_discovered_peers() - } - - /// Returns the list of all the peers we have an open channel to. - pub fn open_peers<'a>(&'a self) -> impl Iterator + 'a { - self.peers.iter().filter(|(_, state)| state.is_open()).map(|(id, _)| id) - } - - /// Returns true if we have an open connection to the given peer. - pub fn is_open(&self, peer_id: &PeerId) -> bool { - self.peers.get(peer_id).map(|p| p.is_open()).unwrap_or(false) - } - - /// Disconnects the given peer if we are connected to it. - pub fn disconnect_peer(&mut self, peer_id: &PeerId) { - debug!(target: "sub-libp2p", "External API => Disconnect {:?}", peer_id); - self.disconnect_peer_inner(peer_id, None); - } - - /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the peer - /// for the specific duration. - fn disconnect_peer_inner(&mut self, peer_id: &PeerId, ban: Option) { - let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { - entry - } else { - return - }; - - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // We're not connected anyway. - st @ PeerState::Disabled { .. } => *entry.into_mut() = st, - st @ PeerState::Requested => *entry.into_mut() = st, - st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, - st @ PeerState::Banned { .. } => *entry.into_mut() = st, - - // DisabledPendingEnable => Disabled. - PeerState::DisabledPendingEnable { - open, - timer_deadline, - timer: _ - } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()); - let banned_until = Some(if let Some(ban) = ban { - cmp::max(timer_deadline, Instant::now() + ban) - } else { - timer_deadline - }); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - } - }, - - // Enabled => Disabled. - PeerState::Enabled { open } => { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - } - }, - - // Incoming => Disabled. - PeerState::Incoming => { - let inc = if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == *entry.key() && i.alive) { - inc - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + /// Creates a `CustomProtos`. + /// + /// The `queue_size_report` is an optional Prometheus metric that can report the size of the + /// messages queue. If passed, it must have one label for the protocol name. + pub fn new( + protocol: impl Into, + versions: &[u8], + peerset: sc_peerset::Peerset, + queue_size_report: Option, + ) -> Self { + let legacy_protocol = RegisteredProtocol::new(protocol, versions); + + GenericProto { + legacy_protocol, + notif_protocols: Vec::new(), + peerset, + peers: FnvHashMap::default(), + incoming: SmallVec::new(), + next_incoming_index: sc_peerset::IncomingIndex(0), + events: SmallVec::new(), + queue_size_report, + } + } + + /// Registers a new notifications protocol. + /// + /// You are very strongly encouraged to call this method very early on. Any open connection + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notif_protocol( + &mut self, + protocol_name: impl Into>, + handshake_msg: impl Into>, + ) { + self.notif_protocols + .push((protocol_name.into(), handshake_msg.into())); + } + + /// Returns the number of discovered nodes that we keep in memory. + pub fn num_discovered_peers(&self) -> usize { + self.peerset.num_discovered_peers() + } + + /// Returns the list of all the peers we have an open channel to. + pub fn open_peers<'a>(&'a self) -> impl Iterator + 'a { + self.peers + .iter() + .filter(|(_, state)| state.is_open()) + .map(|(id, _)| id) + } + + /// Returns true if we have an open connection to the given peer. + pub fn is_open(&self, peer_id: &PeerId) -> bool { + self.peers + .get(peer_id) + .map(|p| p.is_open()) + .unwrap_or(false) + } + + /// Disconnects the given peer if we are connected to it. + pub fn disconnect_peer(&mut self, peer_id: &PeerId) { + debug!(target: "sub-libp2p", "External API => Disconnect {:?}", peer_id); + self.disconnect_peer_inner(peer_id, None); + } + + /// Inner implementation of `disconnect_peer`. If `ban` is `Some`, we ban the peer + /// for the specific duration. + fn disconnect_peer_inner(&mut self, peer_id: &PeerId, ban: Option) { + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(peer_id.clone()) { + entry + } else { + return; + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // We're not connected anyway. + st @ PeerState::Disabled { .. } => *entry.into_mut() = st, + st @ PeerState::Requested => *entry.into_mut() = st, + st @ PeerState::PendingRequest { .. } => *entry.into_mut() = st, + st @ PeerState::Banned { .. } => *entry.into_mut() = st, + + // DisabledPendingEnable => Disabled. + PeerState::DisabledPendingEnable { + open, + timer_deadline, + timer: _, + } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()); + let banned_until = Some(if let Some(ban) = ban { + cmp::max(timer_deadline, Instant::now() + ban) + } else { + timer_deadline + }); + *entry.into_mut() = PeerState::Disabled { open, banned_until } + } + + // Enabled => Disabled. + PeerState::Enabled { open } => { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + let banned_until = ban.map(|dur| Instant::now() + dur); + *entry.into_mut() = PeerState::Disabled { open, banned_until } + } + + // Incoming => Disabled. + PeerState::Incoming => { + let inc = if let Some(inc) = self + .incoming + .iter_mut() + .find(|i| i.peer_id == *entry.key() && i.alive) + { + inc + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer"); - return - }; - - inc.alive = false; - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - let banned_until = ban.map(|dur| Instant::now() + dur); - *entry.into_mut() = PeerState::Disabled { - open: SmallVec::new(), - banned_until - } - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), - } - } - - /// Returns the list of all the peers that the peerset currently requests us to be connected to. - pub fn requested_peers<'a>(&'a self) -> impl Iterator + 'a { - self.peers.iter().filter(|(_, state)| state.is_requested()).map(|(id, _)| id) - } - - /// Returns true if we try to open protocols with the given peer. - pub fn is_enabled(&self, peer_id: &PeerId) -> bool { - match self.peers.get(peer_id) { - None => false, - Some(PeerState::Disabled { .. }) => false, - Some(PeerState::DisabledPendingEnable { .. }) => false, - Some(PeerState::Enabled { .. }) => true, - Some(PeerState::Incoming { .. }) => false, - Some(PeerState::Requested) => false, - Some(PeerState::PendingRequest { .. }) => false, - Some(PeerState::Banned { .. }) => false, - Some(PeerState::Poisoned) => false, - } - } - - /// Notify the behaviour that we have learned about the existence of nodes. - /// - /// Can be called multiple times with the same `PeerId`s. - pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { - self.peerset.discovered(peer_ids.into_iter().map(|peer_id| { - debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); - peer_id - })); - } - - /// Sends a notification to a peer. - /// - /// Has no effect if the custom protocol is not open with the given peer. - /// - /// Also note that even if we have a valid open substream, it may in fact be already closed - /// without us knowing, in which case the packet will not be received. - /// - /// The `fallback` parameter is used for backwards-compatibility reason if the remote doesn't - /// support our protocol. One needs to pass the equivalent of what would have been passed - /// with `send_packet`. - pub fn write_notification( - &mut self, - target: &PeerId, - protocol_name: Cow<'static, [u8]>, - message: impl Into>, - encoded_fallback_message: Vec, - ) { - let conn = match self.peers.get(target).and_then(|p| p.get_open()) { - None => { - debug!(target: "sub-libp2p", + return; + }; + + inc.alive = false; + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", peer_id); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + let banned_until = ban.map(|dur| Instant::now() + dur); + *entry.into_mut() = PeerState::Disabled { + open: SmallVec::new(), + banned_until, + } + } + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id) + } + } + } + + /// Returns the list of all the peers that the peerset currently requests us to be connected to. + pub fn requested_peers<'a>(&'a self) -> impl Iterator + 'a { + self.peers + .iter() + .filter(|(_, state)| state.is_requested()) + .map(|(id, _)| id) + } + + /// Returns true if we try to open protocols with the given peer. + pub fn is_enabled(&self, peer_id: &PeerId) -> bool { + match self.peers.get(peer_id) { + None => false, + Some(PeerState::Disabled { .. }) => false, + Some(PeerState::DisabledPendingEnable { .. }) => false, + Some(PeerState::Enabled { .. }) => true, + Some(PeerState::Incoming { .. }) => false, + Some(PeerState::Requested) => false, + Some(PeerState::PendingRequest { .. }) => false, + Some(PeerState::Banned { .. }) => false, + Some(PeerState::Poisoned) => false, + } + } + + /// Notify the behaviour that we have learned about the existence of nodes. + /// + /// Can be called multiple times with the same `PeerId`s. + pub fn add_discovered_nodes(&mut self, peer_ids: impl Iterator) { + self.peerset.discovered(peer_ids.into_iter().map(|peer_id| { + debug!(target: "sub-libp2p", "PSM <= Discovered({:?})", peer_id); + peer_id + })); + } + + /// Sends a notification to a peer. + /// + /// Has no effect if the custom protocol is not open with the given peer. + /// + /// Also note that even if we have a valid open substream, it may in fact be already closed + /// without us knowing, in which case the packet will not be received. + /// + /// The `fallback` parameter is used for backwards-compatibility reason if the remote doesn't + /// support our protocol. One needs to pass the equivalent of what would have been passed + /// with `send_packet`. + pub fn write_notification( + &mut self, + target: &PeerId, + protocol_name: Cow<'static, [u8]>, + message: impl Into>, + encoded_fallback_message: Vec, + ) { + let conn = match self.peers.get(target).and_then(|p| p.get_open()) { + None => { + debug!(target: "sub-libp2p", "Tried to sent notification to {:?} without an open channel.", target); - return - }, - Some(conn) => conn - }; - - trace!( - target: "sub-libp2p", - "External API => Notification({:?}, {:?})", - target, - str::from_utf8(&protocol_name) - ); - trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::One(conn), - event: NotifsHandlerIn::SendNotification { - message: message.into(), - encoded_fallback_message, - protocol_name, - }, - }); - } - - /// Sends a message to a peer. - /// - /// Has no effect if the custom protocol is not open with the given peer. - /// - /// Also note that even we have a valid open substream, it may in fact be already closed - /// without us knowing, in which case the packet will not be received. - pub fn send_packet(&mut self, target: &PeerId, message: Vec) { - let conn = match self.peers.get(target).and_then(|p| p.get_open()) { - None => { - debug!(target: "sub-libp2p", + return; + } + Some(conn) => conn, + }; + + trace!( + target: "sub-libp2p", + "External API => Notification({:?}, {:?})", + target, + str::from_utf8(&protocol_name) + ); + trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); + + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: target.clone(), + handler: NotifyHandler::One(conn), + event: NotifsHandlerIn::SendNotification { + message: message.into(), + encoded_fallback_message, + protocol_name, + }, + }); + } + + /// Sends a message to a peer. + /// + /// Has no effect if the custom protocol is not open with the given peer. + /// + /// Also note that even we have a valid open substream, it may in fact be already closed + /// without us knowing, in which case the packet will not be received. + pub fn send_packet(&mut self, target: &PeerId, message: Vec) { + let conn = match self.peers.get(target).and_then(|p| p.get_open()) { + None => { + debug!(target: "sub-libp2p", "Tried to sent packet to {:?} without an open channel.", target); - return - } - Some(conn) => conn - }; - - trace!(target: "sub-libp2p", "External API => Packet for {:?}", target); - trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: target.clone(), - handler: NotifyHandler::One(conn), - event: NotifsHandlerIn::SendLegacy { - message, - } - }); - } - - /// Returns the state of the peerset manager, for debugging purposes. - pub fn peerset_debug_info(&mut self) -> serde_json::Value { - self.peerset.debug_info() - } - - /// Function that is called when the peerset wants us to connect to a peer. - fn peerset_report_connect(&mut self, peer_id: PeerId) { - let mut occ_entry = match self.peers.entry(peer_id) { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => { - // If there's no entry in `self.peers`, start dialing. - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", entry.key()); - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", entry.key()); - self.events.push(NetworkBehaviourAction::DialPeer { - peer_id: entry.key().clone(), - condition: DialPeerCondition::Disconnected - }); - entry.insert(PeerState::Requested); - return; - } - }; - - let now = Instant::now(); - - match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { - PeerState::Banned { ref until } if *until > now => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ + return; + } + Some(conn) => conn, + }; + + trace!(target: "sub-libp2p", "External API => Packet for {:?}", target); + trace!(target: "sub-libp2p", "Handler({:?}) <= Packet", target); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: target.clone(), + handler: NotifyHandler::One(conn), + event: NotifsHandlerIn::SendLegacy { message }, + }); + } + + /// Returns the state of the peerset manager, for debugging purposes. + pub fn peerset_debug_info(&mut self) -> serde_json::Value { + self.peerset.debug_info() + } + + /// Function that is called when the peerset wants us to connect to a peer. + fn peerset_report_connect(&mut self, peer_id: PeerId) { + let mut occ_entry = match self.peers.entry(peer_id) { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => { + // If there's no entry in `self.peers`, start dialing. + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", entry.key()); + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", entry.key()); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: entry.key().clone(), + condition: DialPeerCondition::Disconnected, + }); + entry.insert(PeerState::Requested); + return; + } + }; + + let now = Instant::now(); + + match mem::replace(occ_entry.get_mut(), PeerState::Poisoned) { + PeerState::Banned { ref until } if *until > now => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Will start to connect at \ until {:?}", occ_entry.key(), until); - *occ_entry.into_mut() = PeerState::PendingRequest { - timer: futures_timer::Delay::new(until.clone() - now), - timer_deadline: until.clone(), - }; - }, - - PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); - self.events.push(NetworkBehaviourAction::DialPeer { - peer_id: occ_entry.key().clone(), - condition: DialPeerCondition::Disconnected - }); - *occ_entry.into_mut() = PeerState::Requested; - }, - - PeerState::Disabled { - open, - banned_until: Some(ref banned) - } if *banned > now => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}", + *occ_entry.into_mut() = PeerState::PendingRequest { + timer: futures_timer::Delay::new(until.clone() - now), + timer_deadline: until.clone(), + }; + } + + PeerState::Banned { .. } => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Starting to connect", occ_entry.key()); + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?}", occ_entry.key()); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: occ_entry.key().clone(), + condition: DialPeerCondition::Disconnected, + }); + *occ_entry.into_mut() = PeerState::Requested; + } + + PeerState::Disabled { + open, + banned_until: Some(ref banned), + } if *banned > now => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): But peer is banned until {:?}", occ_entry.key(), banned); - *occ_entry.into_mut() = PeerState::DisabledPendingEnable { - open, - timer: futures_timer::Delay::new(banned.clone() - now), - timer_deadline: banned.clone(), - }; - }, - - PeerState::Disabled { open, banned_until: _ } => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", + *occ_entry.into_mut() = PeerState::DisabledPendingEnable { + open, + timer: futures_timer::Delay::new(banned.clone() - now), + timer_deadline: banned.clone(), + }; + } + + PeerState::Disabled { + open, + banned_until: _, + } => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *occ_entry.into_mut() = PeerState::Enabled { open }; - }, - - PeerState::Incoming => { - debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: occ_entry.key().clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *occ_entry.into_mut() = PeerState::Enabled { open }; + } + + PeerState::Incoming => { + debug!(target: "sub-libp2p", "PSM => Connect({:?}): Enabling connections.", occ_entry.key()); - if let Some(inc) = self.incoming.iter_mut() - .find(|i| i.peer_id == *occ_entry.key() && i.alive) { - inc.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ + if let Some(inc) = self + .incoming + .iter_mut() + .find(|i| i.peer_id == *occ_entry.key() && i.alive) + { + inc.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in \ incoming for incoming peer") - } - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: occ_entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *occ_entry.into_mut() = PeerState::Enabled { open: SmallVec::new() }; - }, - - st @ PeerState::Enabled { .. } => { - warn!(target: "sub-libp2p", + } + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", occ_entry.key()); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: occ_entry.key().clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *occ_entry.into_mut() = PeerState::Enabled { + open: SmallVec::new(), + }; + } + + st @ PeerState::Enabled { .. } => { + warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already connected.", occ_entry.key()); - *occ_entry.into_mut() = st; - }, - st @ PeerState::DisabledPendingEnable { .. } => { - warn!(target: "sub-libp2p", + *occ_entry.into_mut() = st; + } + st @ PeerState::DisabledPendingEnable { .. } => { + warn!(target: "sub-libp2p", "PSM => Connect({:?}): Already pending enabling.", occ_entry.key()); - *occ_entry.into_mut() = st; - }, - st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { - warn!(target: "sub-libp2p", + *occ_entry.into_mut() = st; + } + st @ PeerState::Requested { .. } | st @ PeerState::PendingRequest { .. } => { + warn!(target: "sub-libp2p", "PSM => Connect({:?}): Duplicate request.", occ_entry.key()); - *occ_entry.into_mut() = st; - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()), - } - } - - /// Function that is called when the peerset wants us to disconnect from a peer. - fn peerset_report_disconnect(&mut self, peer_id: PeerId) { - let mut entry = match self.peers.entry(peer_id) { - Entry::Occupied(entry) => entry, - Entry::Vacant(entry) => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); - return - } - }; - - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); - *entry.into_mut() = st; - }, - - PeerState::DisabledPendingEnable { - open, - timer_deadline, - timer: _ - } => { - debug!(target: "sub-libp2p", + *occ_entry.into_mut() = st; + } + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", occ_entry.key()) + } + } + } + + /// Function that is called when the peerset wants us to disconnect from a peer. + fn peerset_report_disconnect(&mut self, peer_id: PeerId) { + let mut entry = match self.peers.entry(peer_id) { + Entry::Occupied(entry) => entry, + Entry::Vacant(entry) => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); + return; + } + }; + + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + st @ PeerState::Disabled { .. } | st @ PeerState::Banned { .. } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Already disabled.", entry.key()); + *entry.into_mut() = st; + } + + PeerState::DisabledPendingEnable { + open, + timer_deadline, + timer: _, + } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Interrupting pending enabling.", entry.key()); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: Some(timer_deadline), - }; - }, - - PeerState::Enabled { open } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: entry.key().clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None - } - }, - st @ PeerState::Incoming => { - error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: Some(timer_deadline), + }; + } + + PeerState::Enabled { open } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Disabling connections.", entry.key()); + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", entry.key()); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: entry.key().clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: None, + } + } + st @ PeerState::Incoming => { + error!(target: "sub-libp2p", "PSM => Drop({:?}): Not enabled (Incoming).", entry.key()); - *entry.into_mut() = st; - }, - PeerState::Requested => { - // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other - // sub-systems (such as the discovery mechanism) may require dialing this peer as - // well at the same time. - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key()); - entry.remove(); - }, - PeerState::PendingRequest { timer_deadline, .. } => { - debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); - *entry.into_mut() = PeerState::Banned { until: timer_deadline } - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()), - } - } - - /// Function that is called when the peerset wants us to accept a connection - /// request from a peer. - fn peerset_report_accept(&mut self, index: sc_peerset::IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { - self.incoming.remove(pos) - } else { - error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); - return - }; - - if !incoming.alive { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, + *entry.into_mut() = st; + } + PeerState::Requested => { + // We don't cancel dialing. Libp2p doesn't expose that on purpose, as other + // sub-systems (such as the discovery mechanism) may require dialing this peer as + // well at the same time. + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected.", entry.key()); + entry.remove(); + } + PeerState::PendingRequest { timer_deadline, .. } => { + debug!(target: "sub-libp2p", "PSM => Drop({:?}): Not yet connected", entry.key()); + *entry.into_mut() = PeerState::Banned { + until: timer_deadline, + } + } + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", entry.key()) + } + } + } + + /// Function that is called when the peerset wants us to accept a connection + /// request from a peer. + fn peerset_report_accept(&mut self, index: sc_peerset::IncomingIndex) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) + { + self.incoming.remove(pos) + } else { + error!(target: "sub-libp2p", "PSM => Accept({:?}): Invalid index", index); + return; + }; + + if !incoming.alive { + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Obsolete incoming, sending back dropped", index, incoming.peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); - self.peerset.dropped(incoming.peer_id.clone()); - return - } - - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { - debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", incoming.peer_id); + self.peerset.dropped(incoming.peer_id.clone()); + return; + } + + match self.peers.get_mut(&incoming.peer_id) { + Some(state @ PeerState::Incoming) => { + debug!(target: "sub-libp2p", "PSM => Accept({:?}, {:?}): Enabling connections.", index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *state = PeerState::Enabled { open: SmallVec::new() }; - } - peer => error!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable", incoming.peer_id); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id, + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *state = PeerState::Enabled { + open: SmallVec::new(), + }; + } + peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer) - } - } - - /// Function that is called when the peerset wants us to reject an incoming peer. - fn peerset_report_reject(&mut self, index: sc_peerset::IncomingIndex) { - let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) { - self.incoming.remove(pos) - } else { - error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); - return - }; - - if !incoming.alive { - error!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Obsolete incoming, \ + peer), + } + } + + /// Function that is called when the peerset wants us to reject an incoming peer. + fn peerset_report_reject(&mut self, index: sc_peerset::IncomingIndex) { + let incoming = if let Some(pos) = self.incoming.iter().position(|i| i.incoming_id == index) + { + self.incoming.remove(pos) + } else { + error!(target: "sub-libp2p", "PSM => Reject({:?}): Invalid index", index); + return; + }; + + if !incoming.alive { + error!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Obsolete incoming, \ ignoring", index, incoming.peer_id); - return - } + return; + } - match self.peers.get_mut(&incoming.peer_id) { - Some(state @ PeerState::Incoming) => { - debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", + match self.peers.get_mut(&incoming.peer_id) { + Some(state @ PeerState::Incoming) => { + debug!(target: "sub-libp2p", "PSM => Reject({:?}, {:?}): Rejecting connections.", index, incoming.peer_id); - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: incoming.peer_id, - handler: NotifyHandler::All, - event: NotifsHandlerIn::Disable, - }); - *state = PeerState::Disabled { - open: SmallVec::new(), - banned_until: None - }; - } - peer => error!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", incoming.peer_id); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: incoming.peer_id, + handler: NotifyHandler::All, + event: NotifsHandlerIn::Disable, + }); + *state = PeerState::Disabled { + open: SmallVec::new(), + banned_until: None, + }; + } + peer => error!(target: "sub-libp2p", "State mismatch in libp2p: Expected alive incoming. Got {:?}.", - peer) - } - } + peer), + } + } } impl NetworkBehaviour for GenericProto { - type ProtocolsHandler = NotifsHandlerProto; - type OutEvent = GenericProtoOut; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - NotifsHandlerProto::new( - self.legacy_protocol.clone(), - self.notif_protocols.clone(), - self.queue_size_report.clone() - ) - } - - fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { - Vec::new() - } - - fn inject_connected(&mut self, _: &PeerId) { - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", + type ProtocolsHandler = NotifsHandlerProto; + type OutEvent = GenericProtoOut; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + NotifsHandlerProto::new( + self.legacy_protocol.clone(), + self.notif_protocols.clone(), + self.queue_size_report.clone(), + ) + } + + fn addresses_of_peer(&mut self, _: &PeerId) -> Vec { + Vec::new() + } + + fn inject_connected(&mut self, _: &PeerId) {} + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} established.", conn, endpoint, peer_id); - match (self.peers.entry(peer_id.clone()).or_insert(PeerState::Poisoned), endpoint) { - (st @ &mut PeerState::Requested, endpoint) | - (st @ &mut PeerState::PendingRequest { .. }, endpoint) => { - debug!(target: "sub-libp2p", - "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", - peer_id, endpoint - ); - *st = PeerState::Enabled { open: SmallVec::new() }; - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable - }); - } - - // Note: it may seem weird that "Banned" peers get treated as if they were absent. - // This is because the word "Banned" means "temporarily prevent outgoing connections to - // this peer", and not "banned" in the sense that we would refuse the peer altogether. - (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) | - (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { - let incoming_id = self.next_incoming_index.clone(); - self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { - Some(v) => v, - None => { - error!(target: "sub-libp2p", "Overflow in next_incoming_index"); - return - } - }; - debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection", + match ( + self.peers + .entry(peer_id.clone()) + .or_insert(PeerState::Poisoned), + endpoint, + ) { + (st @ &mut PeerState::Requested, endpoint) + | (st @ &mut PeerState::PendingRequest { .. }, endpoint) => { + debug!(target: "sub-libp2p", + "Libp2p => Connected({}, {:?}): Connection was requested by PSM.", + peer_id, endpoint + ); + *st = PeerState::Enabled { + open: SmallVec::new(), + }; + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Enable, + }); + } + + // Note: it may seem weird that "Banned" peers get treated as if they were absent. + // This is because the word "Banned" means "temporarily prevent outgoing connections to + // this peer", and not "banned" in the sense that we would refuse the peer altogether. + (st @ &mut PeerState::Poisoned, endpoint @ ConnectedPoint::Listener { .. }) + | (st @ &mut PeerState::Banned { .. }, endpoint @ ConnectedPoint::Listener { .. }) => { + let incoming_id = self.next_incoming_index.clone(); + self.next_incoming_index.0 = match self.next_incoming_index.0.checked_add(1) { + Some(v) => v, + None => { + error!(target: "sub-libp2p", "Overflow in next_incoming_index"); + return; + } + }; + debug!(target: "sub-libp2p", "Libp2p => Connected({}, {:?}): Incoming connection", peer_id, endpoint); - debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", + debug!(target: "sub-libp2p", "PSM <= Incoming({}, {:?}).", peer_id, incoming_id); - self.peerset.incoming(peer_id.clone(), incoming_id); - self.incoming.push(IncomingPeer { - peer_id: peer_id.clone(), - alive: true, - incoming_id, - }); - *st = PeerState::Incoming { }; - } - - (st @ &mut PeerState::Poisoned, endpoint) | - (st @ &mut PeerState::Banned { .. }, endpoint) => { - let banned_until = if let PeerState::Banned { until } = st { - Some(*until) - } else { - None - }; - debug!(target: "sub-libp2p", + self.peerset.incoming(peer_id.clone(), incoming_id); + self.incoming.push(IncomingPeer { + peer_id: peer_id.clone(), + alive: true, + incoming_id, + }); + *st = PeerState::Incoming {}; + } + + (st @ &mut PeerState::Poisoned, endpoint) + | (st @ &mut PeerState::Banned { .. }, endpoint) => { + let banned_until = if let PeerState::Banned { until } = st { + Some(*until) + } else { + None + }; + debug!(target: "sub-libp2p", "Libp2p => Connected({},{:?}): Not requested by PSM, disabling.", peer_id, endpoint); - *st = PeerState::Disabled { open: SmallVec::new(), banned_until }; - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); - } - - (PeerState::Incoming { .. }, _) => { - debug!(target: "sub-libp2p", + *st = PeerState::Disabled { + open: SmallVec::new(), + banned_until, + }; + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Disable, + }); + } + + (PeerState::Incoming { .. }, _) => { + debug!(target: "sub-libp2p", "Secondary connection {:?} to {} waiting for PSM decision.", conn, peer_id); - }, + } - (PeerState::Enabled { .. }, _) => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", + (PeerState::Enabled { .. }, _) => { + debug!(target: "sub-libp2p", "Handler({},{:?}) <= Enable secondary connection", peer_id, conn); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Enable - }); - } - - (PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => { - debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Enable, + }); + } + + (PeerState::Disabled { .. }, _) | (PeerState::DisabledPendingEnable { .. }, _) => { + debug!(target: "sub-libp2p", "Handler({},{:?}) <= Disable secondary connection", peer_id, conn); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::One(*conn), - event: NotifsHandlerIn::Disable - }); - } - } - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.", + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::One(*conn), + event: NotifsHandlerIn::Disable, + }); + } + } + } + + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + debug!(target: "sub-libp2p", "Libp2p => Connection ({:?},{:?}) to {} closed.", conn, endpoint, peer_id); - match self.peers.get_mut(peer_id) { - Some(PeerState::Disabled { open, .. }) | - Some(PeerState::DisabledPendingEnable { open, .. }) | - Some(PeerState::Enabled { open, .. }) => { - // Check if the "link" to the peer is already considered closed, - // i.e. there is no connection that is open for custom protocols, - // in which case `CustomProtocolClosed` was already emitted. - let closed = open.is_empty(); - open.retain(|c| c != conn); - if open.is_empty() && !closed { - debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); - let event = GenericProtoOut::CustomProtocolClosed { - peer_id: peer_id.clone(), - reason: "Disconnected by libp2p".into(), - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - } - _ => {} - } - } - - fn inject_disconnected(&mut self, peer_id: &PeerId) { - match self.peers.remove(peer_id) { - None | Some(PeerState::Requested) | Some(PeerState::PendingRequest { .. }) | - Some(PeerState::Banned { .. }) => - // This is a serious bug either in this state machine or in libp2p. - error!(target: "sub-libp2p", + match self.peers.get_mut(peer_id) { + Some(PeerState::Disabled { open, .. }) + | Some(PeerState::DisabledPendingEnable { open, .. }) + | Some(PeerState::Enabled { open, .. }) => { + // Check if the "link" to the peer is already considered closed, + // i.e. there is no connection that is open for custom protocols, + // in which case `CustomProtocolClosed` was already emitted. + let closed = open.is_empty(); + open.retain(|c| c != conn); + if open.is_empty() && !closed { + debug!(target: "sub-libp2p", "External API <= Closed({})", peer_id); + let event = GenericProtoOut::CustomProtocolClosed { + peer_id: peer_id.clone(), + reason: "Disconnected by libp2p".into(), + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + } + _ => {} + } + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + match self.peers.remove(peer_id) { + None + | Some(PeerState::Requested) + | Some(PeerState::PendingRequest { .. }) + | Some(PeerState::Banned { .. }) => + // This is a serious bug either in this state machine or in libp2p. + { + error!(target: "sub-libp2p", "`inject_disconnected` called for unknown peer {}", - peer_id), - - Some(PeerState::Disabled { banned_until, .. }) => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id); - if let Some(until) = banned_until { - self.peers.insert(peer_id.clone(), PeerState::Banned { until }); - } - } - - Some(PeerState::DisabledPendingEnable { timer_deadline, .. }) => { - debug!(target: "sub-libp2p", + peer_id) + } + + Some(PeerState::Disabled { banned_until, .. }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled.", peer_id); + if let Some(until) = banned_until { + self.peers + .insert(peer_id.clone(), PeerState::Banned { until }); + } + } + + Some(PeerState::DisabledPendingEnable { timer_deadline, .. }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was disabled but pending enable.", peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - self.peers.insert(peer_id.clone(), PeerState::Banned { until: timer_deadline }); - } - - Some(PeerState::Enabled { .. }) => { - debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id); - debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); - self.peerset.dropped(peer_id.clone()); - let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); - self.peers.insert(peer_id.clone(), PeerState::Banned { - until: Instant::now() + Duration::from_secs(ban_dur) - }); - } - - // In the incoming state, we don't report "Dropped". Instead we will just ignore the - // corresponding Accept/Reject. - Some(PeerState::Incoming { }) => { - if let Some(state) = self.incoming.iter_mut().find(|i| i.peer_id == *peer_id) { - debug!(target: "sub-libp2p", + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); + self.peers.insert( + peer_id.clone(), + PeerState::Banned { + until: timer_deadline, + }, + ); + } + + Some(PeerState::Enabled { .. }) => { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was enabled.", peer_id); + debug!(target: "sub-libp2p", "PSM <= Dropped({})", peer_id); + self.peerset.dropped(peer_id.clone()); + let ban_dur = Uniform::new(5, 10).sample(&mut rand::thread_rng()); + self.peers.insert( + peer_id.clone(), + PeerState::Banned { + until: Instant::now() + Duration::from_secs(ban_dur), + }, + ); + } + + // In the incoming state, we don't report "Dropped". Instead we will just ignore the + // corresponding Accept/Reject. + Some(PeerState::Incoming {}) => { + if let Some(state) = self.incoming.iter_mut().find(|i| i.peer_id == *peer_id) { + debug!(target: "sub-libp2p", "Libp2p => Disconnected({}): Was in incoming mode with id {:?}.", peer_id, state.incoming_id); - state.alive = false; - } else { - error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ + state.alive = false; + } else { + error!(target: "sub-libp2p", "State mismatch in libp2p: no entry in incoming \ corresponding to an incoming state in peers") - } - } - - Some(PeerState::Poisoned) => - error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id), - } - } - - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn error::Error) { - trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { - match mem::replace(entry.get_mut(), PeerState::Poisoned) { - // The peer is not in our list. - st @ PeerState::Banned { .. } => { - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = st; - }, - - // "Basic" situation: we failed to reach a peer that the peerset requested. - PeerState::Requested | PeerState::PendingRequest { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = PeerState::Banned { - until: Instant::now() + Duration::from_secs(5) - }; - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); - self.peerset.dropped(peer_id.clone()) - }, - - // We can still get dial failures even if we are already connected to the peer, - // as an extra diagnostic for an earlier attempt. - st @ PeerState::Disabled { .. } | st @ PeerState::Enabled { .. } | - st @ PeerState::DisabledPendingEnable { .. } | st @ PeerState::Incoming { .. } => { - debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - *entry.into_mut() = st; - }, - - PeerState::Poisoned => - error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id), - } - - } else { - // The peer is not in our list. - trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); - } - } - - fn inject_event( - &mut self, - source: PeerId, - connection: ConnectionId, - event: NotifsHandlerOut, - ) { - match event { - NotifsHandlerOut::Closed { endpoint, reason } => { - debug!(target: "sub-libp2p", + } + } + + Some(PeerState::Poisoned) => { + error!(target: "sub-libp2p", "State of peer {} is poisoned", peer_id) + } + } + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn error::Error, + ) { + trace!(target: "sub-libp2p", "Libp2p => Reach failure for {:?} through {:?}: {:?}", peer_id, addr, error); + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + if let Entry::Occupied(mut entry) = self.peers.entry(peer_id.clone()) { + match mem::replace(entry.get_mut(), PeerState::Poisoned) { + // The peer is not in our list. + st @ PeerState::Banned { .. } => { + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = st; + } + + // "Basic" situation: we failed to reach a peer that the peerset requested. + PeerState::Requested | PeerState::PendingRequest { .. } => { + debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = PeerState::Banned { + until: Instant::now() + Duration::from_secs(5), + }; + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", peer_id); + self.peerset.dropped(peer_id.clone()) + } + + // We can still get dial failures even if we are already connected to the peer, + // as an extra diagnostic for an earlier attempt. + st @ PeerState::Disabled { .. } + | st @ PeerState::Enabled { .. } + | st @ PeerState::DisabledPendingEnable { .. } + | st @ PeerState::Incoming { .. } => { + debug!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + *entry.into_mut() = st; + } + + PeerState::Poisoned => { + error!(target: "sub-libp2p", "State of {:?} is poisoned", peer_id) + } + } + } else { + // The peer is not in our list. + trace!(target: "sub-libp2p", "Libp2p => Dial failure for {:?}", peer_id); + } + } + + fn inject_event(&mut self, source: PeerId, connection: ConnectionId, event: NotifsHandlerOut) { + match event { + NotifsHandlerOut::Closed { endpoint, reason } => { + debug!(target: "sub-libp2p", "Handler({:?}) => Endpoint {:?} closed for custom protocols: {}", source, endpoint, reason); - let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { - entry - } else { - error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); - return - }; - - let last = match mem::replace(entry.get_mut(), PeerState::Poisoned) { - PeerState::Enabled { mut open } => { - debug_assert!(open.iter().any(|c| c == &connection)); - open.retain(|c| c != &connection); - - debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: source.clone(), - handler: NotifyHandler::One(connection), - event: NotifsHandlerIn::Disable, - }); - - let last = open.is_empty(); - - if last { - debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); - self.peerset.dropped(source.clone()); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until: None - }; - } else { - *entry.into_mut() = PeerState::Enabled { open }; - } - - last - }, - PeerState::Disabled { mut open, banned_until } => { - debug_assert!(open.iter().any(|c| c == &connection)); - open.retain(|c| c != &connection); - let last = open.is_empty(); - *entry.into_mut() = PeerState::Disabled { - open, - banned_until - }; - last - }, - PeerState::DisabledPendingEnable { - mut open, - timer, - timer_deadline - } => { - debug_assert!(open.iter().any(|c| c == &connection)); - open.retain(|c| c != &connection); - let last = open.is_empty(); - *entry.into_mut() = PeerState::DisabledPendingEnable { - open, - timer, - timer_deadline - }; - last - }, - state => { - error!(target: "sub-libp2p", + let mut entry = if let Entry::Occupied(entry) = self.peers.entry(source.clone()) { + entry + } else { + error!(target: "sub-libp2p", "Closed: State mismatch in the custom protos handler"); + return; + }; + + let last = match mem::replace(entry.get_mut(), PeerState::Poisoned) { + PeerState::Enabled { mut open } => { + debug_assert!(open.iter().any(|c| c == &connection)); + open.retain(|c| c != &connection); + + debug!(target: "sub-libp2p", "Handler({:?}) <= Disable", source); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: source.clone(), + handler: NotifyHandler::One(connection), + event: NotifsHandlerIn::Disable, + }); + + let last = open.is_empty(); + + if last { + debug!(target: "sub-libp2p", "PSM <= Dropped({:?})", source); + self.peerset.dropped(source.clone()); + *entry.into_mut() = PeerState::Disabled { + open, + banned_until: None, + }; + } else { + *entry.into_mut() = PeerState::Enabled { open }; + } + + last + } + PeerState::Disabled { + mut open, + banned_until, + } => { + debug_assert!(open.iter().any(|c| c == &connection)); + open.retain(|c| c != &connection); + let last = open.is_empty(); + *entry.into_mut() = PeerState::Disabled { open, banned_until }; + last + } + PeerState::DisabledPendingEnable { + mut open, + timer, + timer_deadline, + } => { + debug_assert!(open.iter().any(|c| c == &connection)); + open.retain(|c| c != &connection); + let last = open.is_empty(); + *entry.into_mut() = PeerState::DisabledPendingEnable { + open, + timer, + timer_deadline, + }; + last + } + state => { + error!(target: "sub-libp2p", "Unexpected state in the custom protos handler: {:?}", state); - return - } - }; - - if last { - debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); - let event = GenericProtoOut::CustomProtocolClosed { - reason, - peer_id: source.clone(), - }; - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } else { - debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); - } - } - - NotifsHandlerOut::Open { endpoint } => { - debug!(target: "sub-libp2p", + return; + } + }; + + if last { + debug!(target: "sub-libp2p", "External API <= Closed({:?})", source); + let event = GenericProtoOut::CustomProtocolClosed { + reason, + peer_id: source.clone(), + }; + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } else { + debug!(target: "sub-libp2p", "Secondary connection closed custom protocol."); + } + } + + NotifsHandlerOut::Open { endpoint } => { + debug!(target: "sub-libp2p", "Handler({:?}) => Endpoint {:?} open for custom protocols.", source, endpoint); - let first = match self.peers.get_mut(&source) { - Some(PeerState::Enabled { ref mut open, .. }) | - Some(PeerState::DisabledPendingEnable { ref mut open, .. }) | - Some(PeerState::Disabled { ref mut open, .. }) => { - let first = open.is_empty(); - open.push(connection); - first - } - state => { - error!(target: "sub-libp2p", + let first = match self.peers.get_mut(&source) { + Some(PeerState::Enabled { ref mut open, .. }) + | Some(PeerState::DisabledPendingEnable { ref mut open, .. }) + | Some(PeerState::Disabled { ref mut open, .. }) => { + let first = open.is_empty(); + open.push(connection); + first + } + state => { + error!(target: "sub-libp2p", "Open: Unexpected state in the custom protos handler: {:?}", state); - return - } - }; - - if first { - debug!(target: "sub-libp2p", "External API <= Open({:?})", source); - let event = GenericProtoOut::CustomProtocolOpen { peer_id: source }; - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } else { - debug!(target: "sub-libp2p", "Secondary connection opened custom protocol."); - } - } - - NotifsHandlerOut::CustomMessage { message } => { - debug_assert!(self.is_open(&source)); - trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); - trace!(target: "sub-libp2p", "External API <= Message({:?})", source); - let event = GenericProtoOut::LegacyMessage { - peer_id: source, - message, - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - - NotifsHandlerOut::Notification { protocol_name, message } => { - debug_assert!(self.is_open(&source)); - trace!( - target: "sub-libp2p", - "Handler({:?}) => Notification({:?})", - source, - str::from_utf8(&protocol_name) - ); - trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); - let event = GenericProtoOut::Notification { - peer_id: source, - protocol_name, - message, - }; - - self.events.push(NetworkBehaviourAction::GenerateEvent(event)); - } - - NotifsHandlerOut::Clogged { messages } => { - debug_assert!(self.is_open(&source)); - trace!(target: "sub-libp2p", "Handler({:?}) => Clogged", source); - trace!(target: "sub-libp2p", "External API <= Clogged({:?})", source); - warn!(target: "sub-libp2p", "Queue of packets to send to {:?} is \ + return; + } + }; + + if first { + debug!(target: "sub-libp2p", "External API <= Open({:?})", source); + let event = GenericProtoOut::CustomProtocolOpen { peer_id: source }; + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } else { + debug!(target: "sub-libp2p", "Secondary connection opened custom protocol."); + } + } + + NotifsHandlerOut::CustomMessage { message } => { + debug_assert!(self.is_open(&source)); + trace!(target: "sub-libp2p", "Handler({:?}) => Message", source); + trace!(target: "sub-libp2p", "External API <= Message({:?})", source); + let event = GenericProtoOut::LegacyMessage { + peer_id: source, + message, + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + + NotifsHandlerOut::Notification { + protocol_name, + message, + } => { + debug_assert!(self.is_open(&source)); + trace!( + target: "sub-libp2p", + "Handler({:?}) => Notification({:?})", + source, + str::from_utf8(&protocol_name) + ); + trace!(target: "sub-libp2p", "External API <= Message({:?}, {:?})", protocol_name, source); + let event = GenericProtoOut::Notification { + peer_id: source, + protocol_name, + message, + }; + + self.events + .push(NetworkBehaviourAction::GenerateEvent(event)); + } + + NotifsHandlerOut::Clogged { messages } => { + debug_assert!(self.is_open(&source)); + trace!(target: "sub-libp2p", "Handler({:?}) => Clogged", source); + trace!(target: "sub-libp2p", "External API <= Clogged({:?})", source); + warn!(target: "sub-libp2p", "Queue of packets to send to {:?} is \ pretty large", source); - self.events.push(NetworkBehaviourAction::GenerateEvent(GenericProtoOut::Clogged { - peer_id: source, - messages, - })); - } - - // Don't do anything for non-severe errors except report them. - NotifsHandlerOut::ProtocolError { is_severe, ref error } if !is_severe => { - debug!(target: "sub-libp2p", "Handler({:?}) => Benign protocol error: {:?}", + self.events.push(NetworkBehaviourAction::GenerateEvent( + GenericProtoOut::Clogged { + peer_id: source, + messages, + }, + )); + } + + // Don't do anything for non-severe errors except report them. + NotifsHandlerOut::ProtocolError { + is_severe, + ref error, + } if !is_severe => { + debug!(target: "sub-libp2p", "Handler({:?}) => Benign protocol error: {:?}", source, error) - } + } - NotifsHandlerOut::ProtocolError { error, .. } => { - debug!(target: "sub-libp2p", + NotifsHandlerOut::ProtocolError { error, .. } => { + debug!(target: "sub-libp2p", "Handler({:?}) => Severe protocol error: {:?}", source, error); - // A severe protocol error happens when we detect a "bad" peer, such as a peer on - // a different chain, or a peer that doesn't speak the same protocol(s). We - // decrease the peer's reputation, hence lowering the chances we try this peer - // again in the short term. - self.peerset.report_peer( - source.clone(), - sc_peerset::ReputationChange::new(i32::min_value(), "Protocol error") - ); - self.disconnect_peer_inner(&source, Some(Duration::from_secs(5))); - } - } - } - - fn poll( - &mut self, - cx: &mut Context, - _params: &mut impl PollParameters, - ) -> Poll< - NetworkBehaviourAction< - NotifsHandlerIn, - Self::OutEvent, - >, - > { - // Poll for instructions from the peerset. - // Note that the peerset is a *best effort* crate, and we have to use defensive programming. - loop { - match futures::Stream::poll_next(Pin::new(&mut self.peerset), cx) { - Poll::Ready(Some(sc_peerset::Message::Accept(index))) => { - self.peerset_report_accept(index); - } - Poll::Ready(Some(sc_peerset::Message::Reject(index))) => { - self.peerset_report_reject(index); - } - Poll::Ready(Some(sc_peerset::Message::Connect(id))) => { - self.peerset_report_connect(id); - } - Poll::Ready(Some(sc_peerset::Message::Drop(id))) => { - self.peerset_report_disconnect(id); - } - Poll::Ready(None) => { - error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); - break; - } - Poll::Pending => break, - } - } - - for (peer_id, peer_state) in self.peers.iter_mut() { - match mem::replace(peer_state, PeerState::Poisoned) { - PeerState::PendingRequest { mut timer, timer_deadline } => { - if let Poll::Pending = Pin::new(&mut timer).poll(cx) { - *peer_state = PeerState::PendingRequest { timer, timer_deadline }; - continue; - } - - debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); - self.events.push(NetworkBehaviourAction::DialPeer { - peer_id: peer_id.clone(), - condition: DialPeerCondition::Disconnected - }); - *peer_state = PeerState::Requested; - } - - PeerState::DisabledPendingEnable { - mut timer, - open, - timer_deadline - } => { - if let Poll::Pending = Pin::new(&mut timer).poll(cx) { - *peer_state = PeerState::DisabledPendingEnable { - timer, - open, - timer_deadline - }; - continue; - } - - debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id); - self.events.push(NetworkBehaviourAction::NotifyHandler { - peer_id: peer_id.clone(), - handler: NotifyHandler::All, - event: NotifsHandlerIn::Enable, - }); - *peer_state = PeerState::Enabled { open }; - } - - st @ _ => *peer_state = st, - } - } - - if !self.events.is_empty() { - return Poll::Ready(self.events.remove(0)) - } - - Poll::Pending - } + // A severe protocol error happens when we detect a "bad" peer, such as a peer on + // a different chain, or a peer that doesn't speak the same protocol(s). We + // decrease the peer's reputation, hence lowering the chances we try this peer + // again in the short term. + self.peerset.report_peer( + source.clone(), + sc_peerset::ReputationChange::new(i32::min_value(), "Protocol error"), + ); + self.disconnect_peer_inner(&source, Some(Duration::from_secs(5))); + } + } + } + + fn poll( + &mut self, + cx: &mut Context, + _params: &mut impl PollParameters, + ) -> Poll> { + // Poll for instructions from the peerset. + // Note that the peerset is a *best effort* crate, and we have to use defensive programming. + loop { + match futures::Stream::poll_next(Pin::new(&mut self.peerset), cx) { + Poll::Ready(Some(sc_peerset::Message::Accept(index))) => { + self.peerset_report_accept(index); + } + Poll::Ready(Some(sc_peerset::Message::Reject(index))) => { + self.peerset_report_reject(index); + } + Poll::Ready(Some(sc_peerset::Message::Connect(id))) => { + self.peerset_report_connect(id); + } + Poll::Ready(Some(sc_peerset::Message::Drop(id))) => { + self.peerset_report_disconnect(id); + } + Poll::Ready(None) => { + error!(target: "sub-libp2p", "Peerset receiver stream has returned None"); + break; + } + Poll::Pending => break, + } + } + + for (peer_id, peer_state) in self.peers.iter_mut() { + match mem::replace(peer_state, PeerState::Poisoned) { + PeerState::PendingRequest { + mut timer, + timer_deadline, + } => { + if let Poll::Pending = Pin::new(&mut timer).poll(cx) { + *peer_state = PeerState::PendingRequest { + timer, + timer_deadline, + }; + continue; + } + + debug!(target: "sub-libp2p", "Libp2p <= Dial {:?} now that ban has expired", peer_id); + self.events.push(NetworkBehaviourAction::DialPeer { + peer_id: peer_id.clone(), + condition: DialPeerCondition::Disconnected, + }); + *peer_state = PeerState::Requested; + } + + PeerState::DisabledPendingEnable { + mut timer, + open, + timer_deadline, + } => { + if let Poll::Pending = Pin::new(&mut timer).poll(cx) { + *peer_state = PeerState::DisabledPendingEnable { + timer, + open, + timer_deadline, + }; + continue; + } + + debug!(target: "sub-libp2p", "Handler({:?}) <= Enable (ban expired)", peer_id); + self.events.push(NetworkBehaviourAction::NotifyHandler { + peer_id: peer_id.clone(), + handler: NotifyHandler::All, + event: NotifsHandlerIn::Enable, + }); + *peer_state = PeerState::Enabled { open }; + } + + st @ _ => *peer_state = st, + } + } + + if !self.events.is_empty() { + return Poll::Ready(self.events.remove(0)); + } + + Poll::Pending + } } diff --git a/client/network/src/protocol/generic_proto/handler.rs b/client/network/src/protocol/generic_proto/handler.rs index f0e2fc4bb8..db8707f233 100644 --- a/client/network/src/protocol/generic_proto/handler.rs +++ b/client/network/src/protocol/generic_proto/handler.rs @@ -14,7 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -pub use self::group::{NotifsHandlerProto, NotifsHandler, NotifsHandlerIn, NotifsHandlerOut}; +pub use self::group::{NotifsHandler, NotifsHandlerIn, NotifsHandlerOut, NotifsHandlerProto}; pub use self::legacy::ConnectionKillError as LegacyConnectionKillError; mod group; diff --git a/client/network/src/protocol/generic_proto/handler/group.rs b/client/network/src/protocol/generic_proto/handler/group.rs index 46b759d458..356b5bb9e0 100644 --- a/client/network/src/protocol/generic_proto/handler/group.rs +++ b/client/network/src/protocol/generic_proto/handler/group.rs @@ -46,26 +46,40 @@ //! use crate::protocol::generic_proto::{ - handler::legacy::{LegacyProtoHandler, LegacyProtoHandlerProto, LegacyProtoHandlerIn, LegacyProtoHandlerOut}, - handler::notif_in::{NotifsInHandlerProto, NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut}, - handler::notif_out::{NotifsOutHandlerProto, NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut}, - upgrade::{NotificationsIn, NotificationsOut, NotificationsHandshakeError, RegisteredProtocol, UpgradeCollec}, + handler::legacy::{ + LegacyProtoHandler, LegacyProtoHandlerIn, LegacyProtoHandlerOut, LegacyProtoHandlerProto, + }, + handler::notif_in::{ + NotifsInHandler, NotifsInHandlerIn, NotifsInHandlerOut, NotifsInHandlerProto, + }, + handler::notif_out::{ + NotifsOutHandler, NotifsOutHandlerIn, NotifsOutHandlerOut, NotifsOutHandlerProto, + }, + upgrade::{ + NotificationsHandshakeError, NotificationsIn, NotificationsOut, RegisteredProtocol, + UpgradeCollec, + }, }; use bytes::BytesMut; -use libp2p::core::{either::{EitherError, EitherOutput}, ConnectedPoint, PeerId}; -use libp2p::core::upgrade::{EitherUpgrade, UpgradeError, SelectUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::core::upgrade::{ + EitherUpgrade, InboundUpgrade, OutboundUpgrade, SelectUpgrade, UpgradeError, +}; +use libp2p::core::{ + either::{EitherError, EitherOutput}, + ConnectedPoint, PeerId, +}; use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, + IntoProtocolsHandler, KeepAlive, NegotiatedSubstream, ProtocolsHandler, ProtocolsHandlerEvent, + ProtocolsHandlerUpgrErr, SubstreamProtocol, }; use log::{debug, error}; use prometheus_endpoint::HistogramVec; -use std::{borrow::Cow, error, io, str, task::{Context, Poll}}; +use std::{ + borrow::Cow, + error, io, str, + task::{Context, Poll}, +}; /// Implements the `IntoProtocolsHandler` trait of libp2p. /// @@ -75,458 +89,518 @@ use std::{borrow::Cow, error, io, str, task::{Context, Poll}}; /// /// See the documentation at the module level for more information. pub struct NotifsHandlerProto { - /// Prototypes for handlers for inbound substreams. - in_handlers: Vec, + /// Prototypes for handlers for inbound substreams. + in_handlers: Vec, - /// Prototypes for handlers for outbound substreams. - out_handlers: Vec, + /// Prototypes for handlers for outbound substreams. + out_handlers: Vec, - /// Prototype for handler for backwards-compatibility. - legacy: LegacyProtoHandlerProto, + /// Prototype for handler for backwards-compatibility. + legacy: LegacyProtoHandlerProto, } /// The actual handler once the connection has been established. /// /// See the documentation at the module level for more information. pub struct NotifsHandler { - /// Handlers for inbound substreams. - in_handlers: Vec, + /// Handlers for inbound substreams. + in_handlers: Vec, - /// Handlers for outbound substreams. - out_handlers: Vec, + /// Handlers for outbound substreams. + out_handlers: Vec, - /// Handler for backwards-compatibility. - legacy: LegacyProtoHandler, + /// Handler for backwards-compatibility. + legacy: LegacyProtoHandler, - /// State of this handler. - enabled: EnabledState, + /// State of this handler. + enabled: EnabledState, - /// If we receive inbound substream requests while in initialization mode, - /// we push the corresponding index here and process them when the handler - /// gets enabled/disabled. - pending_in: Vec, + /// If we receive inbound substream requests while in initialization mode, + /// we push the corresponding index here and process them when the handler + /// gets enabled/disabled. + pending_in: Vec, } #[derive(Debug, Clone, PartialEq, Eq)] enum EnabledState { - Initial, - Enabled, - Disabled, + Initial, + Enabled, + Disabled, } impl IntoProtocolsHandler for NotifsHandlerProto { - type Handler = NotifsHandler; - - fn inbound_protocol(&self) -> SelectUpgrade, RegisteredProtocol> { - let in_handlers = self.in_handlers.iter() - .map(|h| h.inbound_protocol()) - .collect::>(); - - SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) - } - - fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { - NotifsHandler { - in_handlers: self.in_handlers - .into_iter() - .map(|p| p.into_handler(remote_peer_id, connected_point)) - .collect(), - out_handlers: self.out_handlers - .into_iter() - .map(|p| p.into_handler(remote_peer_id, connected_point)) - .collect(), - legacy: self.legacy.into_handler(remote_peer_id, connected_point), - enabled: EnabledState::Initial, - pending_in: Vec::new(), - } - } + type Handler = NotifsHandler; + + fn inbound_protocol( + &self, + ) -> SelectUpgrade, RegisteredProtocol> { + let in_handlers = self + .in_handlers + .iter() + .map(|h| h.inbound_protocol()) + .collect::>(); + + SelectUpgrade::new(in_handlers, self.legacy.inbound_protocol()) + } + + fn into_handler( + self, + remote_peer_id: &PeerId, + connected_point: &ConnectedPoint, + ) -> Self::Handler { + NotifsHandler { + in_handlers: self + .in_handlers + .into_iter() + .map(|p| p.into_handler(remote_peer_id, connected_point)) + .collect(), + out_handlers: self + .out_handlers + .into_iter() + .map(|p| p.into_handler(remote_peer_id, connected_point)) + .collect(), + legacy: self.legacy.into_handler(remote_peer_id, connected_point), + enabled: EnabledState::Initial, + pending_in: Vec::new(), + } + } } /// Event that can be received by a `NotifsHandler`. #[derive(Debug, Clone)] pub enum NotifsHandlerIn { - /// The node should start using custom protocols. - Enable, - - /// The node should stop using custom protocols. - Disable, - - /// Sends a message through the custom protocol substream. - /// - /// > **Note**: This must **not** be a `ConsensusMessage`, `Transactions`, or - /// > `BlockAnnounce` message. - SendLegacy { - /// The message to send. - message: Vec, - }, - - /// Sends a notifications message. - SendNotification { - /// Name of the protocol for the message. - /// - /// Must match one of the registered protocols. For backwards-compatibility reasons, if - /// the remote doesn't support this protocol, we use the legacy substream. - protocol_name: Cow<'static, [u8]>, - - /// Message to send on the legacy substream if the protocol isn't available. - /// - /// This corresponds to what you would have sent with `SendLegacy`. - encoded_fallback_message: Vec, - - /// The message to send. - message: Vec, - }, + /// The node should start using custom protocols. + Enable, + + /// The node should stop using custom protocols. + Disable, + + /// Sends a message through the custom protocol substream. + /// + /// > **Note**: This must **not** be a `ConsensusMessage`, `Transactions`, or + /// > `BlockAnnounce` message. + SendLegacy { + /// The message to send. + message: Vec, + }, + + /// Sends a notifications message. + SendNotification { + /// Name of the protocol for the message. + /// + /// Must match one of the registered protocols. For backwards-compatibility reasons, if + /// the remote doesn't support this protocol, we use the legacy substream. + protocol_name: Cow<'static, [u8]>, + + /// Message to send on the legacy substream if the protocol isn't available. + /// + /// This corresponds to what you would have sent with `SendLegacy`. + encoded_fallback_message: Vec, + + /// The message to send. + message: Vec, + }, } /// Event that can be emitted by a `NotifsHandler`. #[derive(Debug)] pub enum NotifsHandlerOut { - /// The connection is open for custom protocols. - Open { - /// The endpoint of the connection that is open for custom protocols. - endpoint: ConnectedPoint, - }, - - /// The connection is closed for custom protocols. - Closed { - /// The reason for closing, for diagnostic purposes. - reason: Cow<'static, str>, - /// The endpoint of the connection that closed for custom protocols. - endpoint: ConnectedPoint, - }, - - /// Received a non-gossiping message on the legacy substream. - CustomMessage { - /// Message that has been received. - /// - /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a - /// notification. - message: BytesMut, - }, - - /// Received a message on a custom protocol substream. - Notification { - /// Name of the protocol of the message. - protocol_name: Cow<'static, [u8]>, - - /// Message that has been received. - message: BytesMut, - }, - - /// A substream to the remote is clogged. The send buffer is very large, and we should print - /// a diagnostic message and/or avoid sending more data. - Clogged { - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec>, - }, - - /// An error has happened on the protocol level with this node. - ProtocolError { - /// If true the error is severe, such as a protocol violation. - is_severe: bool, - /// The error that happened. - error: Box, - }, + /// The connection is open for custom protocols. + Open { + /// The endpoint of the connection that is open for custom protocols. + endpoint: ConnectedPoint, + }, + + /// The connection is closed for custom protocols. + Closed { + /// The reason for closing, for diagnostic purposes. + reason: Cow<'static, str>, + /// The endpoint of the connection that closed for custom protocols. + endpoint: ConnectedPoint, + }, + + /// Received a non-gossiping message on the legacy substream. + CustomMessage { + /// Message that has been received. + /// + /// Keep in mind that this can be a `ConsensusMessage` message, which then contains a + /// notification. + message: BytesMut, + }, + + /// Received a message on a custom protocol substream. + Notification { + /// Name of the protocol of the message. + protocol_name: Cow<'static, [u8]>, + + /// Message that has been received. + message: BytesMut, + }, + + /// A substream to the remote is clogged. The send buffer is very large, and we should print + /// a diagnostic message and/or avoid sending more data. + Clogged { + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec>, + }, + + /// An error has happened on the protocol level with this node. + ProtocolError { + /// If true the error is severe, such as a protocol violation. + is_severe: bool, + /// The error that happened. + error: Box, + }, } impl NotifsHandlerProto { - /// Builds a new handler. - /// - /// The `queue_size_report` is an optional Prometheus metric that can report the size of the - /// messages queue. If passed, it must have one label for the protocol name. - pub fn new(legacy: RegisteredProtocol, list: impl Into, Vec)>>, queue_size_report: Option) -> Self { - let list = list.into(); - - let out_handlers = list - .clone() - .into_iter() - .map(|(p, _)| { - let queue_size_report = queue_size_report.as_ref().and_then(|qs| { - if let Ok(utf8) = str::from_utf8(&p) { - Some(qs.with_label_values(&[utf8])) - } else { - log::warn!("Ignoring Prometheus metric because {:?} isn't UTF-8", p); - None - } - }); - NotifsOutHandlerProto::new(p, queue_size_report) - }).collect(); - - NotifsHandlerProto { - in_handlers: list.clone().into_iter().map(|(p, _)| NotifsInHandlerProto::new(p)).collect(), - out_handlers, - legacy: LegacyProtoHandlerProto::new(legacy), - } - } + /// Builds a new handler. + /// + /// The `queue_size_report` is an optional Prometheus metric that can report the size of the + /// messages queue. If passed, it must have one label for the protocol name. + pub fn new( + legacy: RegisteredProtocol, + list: impl Into, Vec)>>, + queue_size_report: Option, + ) -> Self { + let list = list.into(); + + let out_handlers = list + .clone() + .into_iter() + .map(|(p, _)| { + let queue_size_report = queue_size_report.as_ref().and_then(|qs| { + if let Ok(utf8) = str::from_utf8(&p) { + Some(qs.with_label_values(&[utf8])) + } else { + log::warn!("Ignoring Prometheus metric because {:?} isn't UTF-8", p); + None + } + }); + NotifsOutHandlerProto::new(p, queue_size_report) + }) + .collect(); + + NotifsHandlerProto { + in_handlers: list + .clone() + .into_iter() + .map(|(p, _)| NotifsInHandlerProto::new(p)) + .collect(), + out_handlers, + legacy: LegacyProtoHandlerProto::new(legacy), + } + } } impl ProtocolsHandler for NotifsHandler { - type InEvent = NotifsHandlerIn; - type OutEvent = NotifsHandlerOut; - type Error = EitherError< - EitherError< - ::Error, - ::Error, - >, - ::Error, - >; - type InboundProtocol = SelectUpgrade, RegisteredProtocol>; - type OutboundProtocol = EitherUpgrade; - // Index within the `out_handlers`; None for legacy - type OutboundOpenInfo = Option; - - fn listen_protocol(&self) -> SubstreamProtocol { - let in_handlers = self.in_handlers.iter() - .map(|h| h.listen_protocol().into_upgrade().1) - .collect::>(); - - let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); - SubstreamProtocol::new(proto) - } - - fn inject_fully_negotiated_inbound( - &mut self, - out: >::Output - ) { - match out { - EitherOutput::First((out, num)) => - self.in_handlers[num].inject_fully_negotiated_inbound(out), - EitherOutput::Second(out) => - self.legacy.inject_fully_negotiated_inbound(out), - } - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - num: Self::OutboundOpenInfo - ) { - match (out, num) { - (EitherOutput::First(out), Some(num)) => - self.out_handlers[num].inject_fully_negotiated_outbound(out, ()), - (EitherOutput::Second(out), None) => - self.legacy.inject_fully_negotiated_outbound(out, ()), - _ => error!("inject_fully_negotiated_outbound called with wrong parameters"), - } - } - - fn inject_event(&mut self, message: NotifsHandlerIn) { - match message { - NotifsHandlerIn::Enable => { - if let EnabledState::Enabled = self.enabled { - debug!("enabling already-enabled handler"); - } - self.enabled = EnabledState::Enabled; - self.legacy.inject_event(LegacyProtoHandlerIn::Enable); - for handler in &mut self.out_handlers { - handler.inject_event(NotifsOutHandlerIn::Enable { - initial_message: vec![] - }); - } - for num in self.pending_in.drain(..) { - self.in_handlers[num].inject_event(NotifsInHandlerIn::Accept(vec![])); - } - }, - NotifsHandlerIn::Disable => { - if let EnabledState::Disabled = self.enabled { - debug!("disabling already-disabled handler"); - } - self.legacy.inject_event(LegacyProtoHandlerIn::Disable); - // The notifications protocols start in the disabled state. If we were in the - // "Initial" state, then we shouldn't disable the notifications protocols again. - if self.enabled != EnabledState::Initial { - for handler in &mut self.out_handlers { - handler.inject_event(NotifsOutHandlerIn::Disable); - } - } - self.enabled = EnabledState::Disabled; - for num in self.pending_in.drain(..) { - self.in_handlers[num].inject_event(NotifsInHandlerIn::Refuse); - } - }, - NotifsHandlerIn::SendLegacy { message } => - self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }), - NotifsHandlerIn::SendNotification { message, encoded_fallback_message, protocol_name } => { - for handler in &mut self.out_handlers { - if handler.protocol_name() != &protocol_name[..] { - continue; - } - - if handler.is_open() { - handler.inject_event(NotifsOutHandlerIn::Send(message)); - return; - } - } - - self.legacy.inject_event(LegacyProtoHandlerIn::SendCustomMessage { - message: encoded_fallback_message, - }); - }, - } - } - - fn inject_dial_upgrade_error( - &mut self, - num: Option, - err: ProtocolsHandlerUpgrErr> - ) { - match (err, num) { - (ProtocolsHandlerUpgrErr::Timeout, Some(num)) => - self.out_handlers[num].inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timeout - ), - (ProtocolsHandlerUpgrErr::Timeout, None) => - self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), - (ProtocolsHandlerUpgrErr::Timer, Some(num)) => - self.out_handlers[num].inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Timer - ), - (ProtocolsHandlerUpgrErr::Timer, None) => - self.legacy.inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer), - (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), Some(num)) => - self.out_handlers[num].inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) - ), - (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), None) => - self.legacy.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)) - ), - (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))), Some(num)) => - self.out_handlers[num].inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) - ), - (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(err))), None) => - self.legacy.inject_dial_upgrade_error( - (), - ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)) - ), - _ => error!("inject_dial_upgrade_error called with bad parameters"), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - // Iterate over each handler and return the maximum value. - - let mut ret = self.legacy.connection_keep_alive(); - if ret.is_yes() { - return KeepAlive::Yes; - } - - for handler in &self.in_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } - } - - for handler in &self.out_handlers { - let val = handler.connection_keep_alive(); - if val.is_yes() { - return KeepAlive::Yes; - } - if ret < val { ret = val; } - } - - ret - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - while let Poll::Ready(ev) = self.legacy.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(EitherUpgrade::B), - info: None, - }), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { endpoint, .. }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Open { endpoint } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { endpoint, reason }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Closed { endpoint, reason } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::CustomMessage { message } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::Clogged { messages }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::Clogged { messages } - )), - ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { is_severe, error }) => - return Poll::Ready(ProtocolsHandlerEvent::Custom( - NotifsHandlerOut::ProtocolError { is_severe, error } - )), - ProtocolsHandlerEvent::Close(err) => - return Poll::Ready(ProtocolsHandlerEvent::Close(EitherError::B(err))), - } - } - - for (handler_num, handler) in self.in_handlers.iter_mut().enumerate() { - while let Poll::Ready(ev) = handler.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => - error!("Incoming substream handler tried to open a substream"), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => - match self.enabled { - EnabledState::Initial => self.pending_in.push(handler_num), - EnabledState::Enabled => - handler.inject_event(NotifsInHandlerIn::Accept(vec![])), - EnabledState::Disabled => - handler.inject_event(NotifsInHandlerIn::Refuse), - }, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { - // Note that right now the legacy substream has precedence over - // everything. If it is not open, then we consider that nothing is open. - if self.legacy.is_open() { - let msg = NotifsHandlerOut::Notification { - message, - protocol_name: handler.protocol_name().to_owned().into(), - }; - return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); - } - }, - } - } - } - - for (handler_num, handler) in self.out_handlers.iter_mut().enumerate() { - while let Poll::Ready(ev) = handler.poll(cx) { - match ev { - ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => - return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: protocol.map_upgrade(EitherUpgrade::A), - info: Some(handler_num), - }), - ProtocolsHandlerEvent::Close(err) => void::unreachable(err), - - // At the moment we don't actually care whether any notifications protocol - // opens or closes. - // Whether our communications with the remote are open or closed entirely - // depends on the legacy substream, because as long as we are open the user of - // this struct might try to send legacy protocol messages which we need to - // deliver for things to work properly. - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {}, - ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {}, - } - } - } - - Poll::Pending - } + type InEvent = NotifsHandlerIn; + type OutEvent = NotifsHandlerOut; + type Error = EitherError< + EitherError< + ::Error, + ::Error, + >, + ::Error, + >; + type InboundProtocol = SelectUpgrade, RegisteredProtocol>; + type OutboundProtocol = EitherUpgrade; + // Index within the `out_handlers`; None for legacy + type OutboundOpenInfo = Option; + + fn listen_protocol(&self) -> SubstreamProtocol { + let in_handlers = self + .in_handlers + .iter() + .map(|h| h.listen_protocol().into_upgrade().1) + .collect::>(); + + let proto = SelectUpgrade::new(in_handlers, self.legacy.listen_protocol().into_upgrade().1); + SubstreamProtocol::new(proto) + } + + fn inject_fully_negotiated_inbound( + &mut self, + out: >::Output, + ) { + match out { + EitherOutput::First((out, num)) => { + self.in_handlers[num].inject_fully_negotiated_inbound(out) + } + EitherOutput::Second(out) => self.legacy.inject_fully_negotiated_inbound(out), + } + } + + fn inject_fully_negotiated_outbound( + &mut self, + out: >::Output, + num: Self::OutboundOpenInfo, + ) { + match (out, num) { + (EitherOutput::First(out), Some(num)) => { + self.out_handlers[num].inject_fully_negotiated_outbound(out, ()) + } + (EitherOutput::Second(out), None) => { + self.legacy.inject_fully_negotiated_outbound(out, ()) + } + _ => error!("inject_fully_negotiated_outbound called with wrong parameters"), + } + } + + fn inject_event(&mut self, message: NotifsHandlerIn) { + match message { + NotifsHandlerIn::Enable => { + if let EnabledState::Enabled = self.enabled { + debug!("enabling already-enabled handler"); + } + self.enabled = EnabledState::Enabled; + self.legacy.inject_event(LegacyProtoHandlerIn::Enable); + for handler in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Enable { + initial_message: vec![], + }); + } + for num in self.pending_in.drain(..) { + self.in_handlers[num].inject_event(NotifsInHandlerIn::Accept(vec![])); + } + } + NotifsHandlerIn::Disable => { + if let EnabledState::Disabled = self.enabled { + debug!("disabling already-disabled handler"); + } + self.legacy.inject_event(LegacyProtoHandlerIn::Disable); + // The notifications protocols start in the disabled state. If we were in the + // "Initial" state, then we shouldn't disable the notifications protocols again. + if self.enabled != EnabledState::Initial { + for handler in &mut self.out_handlers { + handler.inject_event(NotifsOutHandlerIn::Disable); + } + } + self.enabled = EnabledState::Disabled; + for num in self.pending_in.drain(..) { + self.in_handlers[num].inject_event(NotifsInHandlerIn::Refuse); + } + } + NotifsHandlerIn::SendLegacy { message } => self + .legacy + .inject_event(LegacyProtoHandlerIn::SendCustomMessage { message }), + NotifsHandlerIn::SendNotification { + message, + encoded_fallback_message, + protocol_name, + } => { + for handler in &mut self.out_handlers { + if handler.protocol_name() != &protocol_name[..] { + continue; + } + + if handler.is_open() { + handler.inject_event(NotifsOutHandlerIn::Send(message)); + return; + } + } + + self.legacy + .inject_event(LegacyProtoHandlerIn::SendCustomMessage { + message: encoded_fallback_message, + }); + } + } + } + + fn inject_dial_upgrade_error( + &mut self, + num: Option, + err: ProtocolsHandlerUpgrErr>, + ) { + match (err, num) { + (ProtocolsHandlerUpgrErr::Timeout, Some(num)) => self.out_handlers[num] + .inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), + (ProtocolsHandlerUpgrErr::Timeout, None) => self + .legacy + .inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timeout), + (ProtocolsHandlerUpgrErr::Timer, Some(num)) => { + self.out_handlers[num].inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer) + } + (ProtocolsHandlerUpgrErr::Timer, None) => self + .legacy + .inject_dial_upgrade_error((), ProtocolsHandlerUpgrErr::Timer), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), Some(num)) => { + self.out_handlers[num].inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), + ) + } + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), None) => { + self.legacy.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Select(err)), + ) + } + ( + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::A(err))), + Some(num), + ) => self.out_handlers[num].inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), + ), + (ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(EitherError::B(err))), None) => { + self.legacy.inject_dial_upgrade_error( + (), + ProtocolsHandlerUpgrErr::Upgrade(UpgradeError::Apply(err)), + ) + } + _ => error!("inject_dial_upgrade_error called with bad parameters"), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + // Iterate over each handler and return the maximum value. + + let mut ret = self.legacy.connection_keep_alive(); + if ret.is_yes() { + return KeepAlive::Yes; + } + + for handler in &self.in_handlers { + let val = handler.connection_keep_alive(); + if val.is_yes() { + return KeepAlive::Yes; + } + if ret < val { + ret = val; + } + } + + for handler in &self.out_handlers { + let val = handler.connection_keep_alive(); + if val.is_yes() { + return KeepAlive::Yes; + } + if ret < val { + ret = val; + } + } + + ret + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + while let Poll::Ready(ev) = self.legacy.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => { + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol.map_upgrade(EitherUpgrade::B), + info: None, + }) + } + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolOpen { + endpoint, + .. + }) => { + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::Open { + endpoint, + })) + } + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomProtocolClosed { + endpoint, + reason, + }) => { + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::Closed { + endpoint, + reason, + })) + } + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::CustomMessage { message }) => { + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::CustomMessage { message }, + )) + } + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::Clogged { messages }) => { + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsHandlerOut::Clogged { + messages, + })) + } + ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { + is_severe, + error, + }) => { + return Poll::Ready(ProtocolsHandlerEvent::Custom( + NotifsHandlerOut::ProtocolError { is_severe, error }, + )) + } + ProtocolsHandlerEvent::Close(err) => { + return Poll::Ready(ProtocolsHandlerEvent::Close(EitherError::B(err))) + } + } + } + + for (handler_num, handler) in self.in_handlers.iter_mut().enumerate() { + while let Poll::Ready(ev) = handler.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { .. } => { + error!("Incoming substream handler tried to open a substream") + } + ProtocolsHandlerEvent::Close(err) => void::unreachable(err), + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(_)) => match self + .enabled + { + EnabledState::Initial => self.pending_in.push(handler_num), + EnabledState::Enabled => { + handler.inject_event(NotifsInHandlerIn::Accept(vec![])) + } + EnabledState::Disabled => handler.inject_event(NotifsInHandlerIn::Refuse), + }, + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed) => {} + ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(message)) => { + // Note that right now the legacy substream has precedence over + // everything. If it is not open, then we consider that nothing is open. + if self.legacy.is_open() { + let msg = NotifsHandlerOut::Notification { + message, + protocol_name: handler.protocol_name().to_owned().into(), + }; + return Poll::Ready(ProtocolsHandlerEvent::Custom(msg)); + } + } + } + } + } + + for (handler_num, handler) in self.out_handlers.iter_mut().enumerate() { + while let Poll::Ready(ev) = handler.poll(cx) { + match ev { + ProtocolsHandlerEvent::OutboundSubstreamRequest { protocol, info: () } => { + return Poll::Ready(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: protocol.map_upgrade(EitherUpgrade::A), + info: Some(handler_num), + }) + } + ProtocolsHandlerEvent::Close(err) => void::unreachable(err), + + // At the moment we don't actually care whether any notifications protocol + // opens or closes. + // Whether our communications with the remote are open or closed entirely + // depends on the legacy substream, because as long as we are open the user of + // this struct might try to send legacy protocol messages which we need to + // deliver for things to work properly. + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Open { .. }) => {} + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed) => {} + ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Refused) => {} + } + } + } + + Poll::Pending + } } diff --git a/client/network/src/protocol/generic_proto/handler/legacy.rs b/client/network/src/protocol/generic_proto/handler/legacy.rs index bc84fd847c..684ff10eeb 100644 --- a/client/network/src/protocol/generic_proto/handler/legacy.rs +++ b/client/network/src/protocol/generic_proto/handler/legacy.rs @@ -14,24 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::protocol::generic_proto::upgrade::{RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream}; +use crate::protocol::generic_proto::upgrade::{ + RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolSubstream, +}; use bytes::BytesMut; use futures::prelude::*; use futures_timer::Delay; -use libp2p::core::{ConnectedPoint, PeerId, Endpoint}; use libp2p::core::upgrade::{InboundUpgrade, OutboundUpgrade}; +use libp2p::core::{ConnectedPoint, Endpoint, PeerId}; use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, + IntoProtocolsHandler, KeepAlive, NegotiatedSubstream, ProtocolsHandler, ProtocolsHandlerEvent, + ProtocolsHandlerUpgrErr, SubstreamProtocol, }; use log::{debug, error}; use smallvec::{smallvec, SmallVec}; use std::{borrow::Cow, error, fmt, io, mem, time::Duration}; -use std::{pin::Pin, task::{Context, Poll}}; +use std::{ + pin::Pin, + task::{Context, Poll}, +}; /// Implements the `IntoProtocolsHandler` trait of libp2p. /// @@ -88,559 +89,623 @@ use std::{pin::Pin, task::{Context, Poll}}; /// Re-opening it can then be performed by closing all active substream and re-opening one. /// pub struct LegacyProtoHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, + /// Configuration for the protocol upgrade to negotiate. + protocol: RegisteredProtocol, } impl LegacyProtoHandlerProto { - /// Builds a new `LegacyProtoHandlerProto`. - pub fn new(protocol: RegisteredProtocol) -> Self { - LegacyProtoHandlerProto { - protocol, - } - } + /// Builds a new `LegacyProtoHandlerProto`. + pub fn new(protocol: RegisteredProtocol) -> Self { + LegacyProtoHandlerProto { protocol } + } } impl IntoProtocolsHandler for LegacyProtoHandlerProto { - type Handler = LegacyProtoHandler; - - fn inbound_protocol(&self) -> RegisteredProtocol { - self.protocol.clone() - } - - fn into_handler(self, remote_peer_id: &PeerId, connected_point: &ConnectedPoint) -> Self::Handler { - LegacyProtoHandler { - protocol: self.protocol, - endpoint: connected_point.clone(), - remote_peer_id: remote_peer_id.clone(), - state: ProtocolState::Init { - substreams: SmallVec::new(), - init_deadline: Delay::new(Duration::from_secs(5)) - }, - events_queue: SmallVec::new(), - } - } + type Handler = LegacyProtoHandler; + + fn inbound_protocol(&self) -> RegisteredProtocol { + self.protocol.clone() + } + + fn into_handler( + self, + remote_peer_id: &PeerId, + connected_point: &ConnectedPoint, + ) -> Self::Handler { + LegacyProtoHandler { + protocol: self.protocol, + endpoint: connected_point.clone(), + remote_peer_id: remote_peer_id.clone(), + state: ProtocolState::Init { + substreams: SmallVec::new(), + init_deadline: Delay::new(Duration::from_secs(5)), + }, + events_queue: SmallVec::new(), + } + } } /// The actual handler once the connection has been established. pub struct LegacyProtoHandler { - /// Configuration for the protocol upgrade to negotiate. - protocol: RegisteredProtocol, - - /// State of the communications with the remote. - state: ProtocolState, - - /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have - /// any influence on the behaviour. - remote_peer_id: PeerId, - - /// Whether we are the connection dialer or listener. Used to determine who, between the local - /// node and the remote node, has priority. - endpoint: ConnectedPoint, - - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, + /// Configuration for the protocol upgrade to negotiate. + protocol: RegisteredProtocol, + + /// State of the communications with the remote. + state: ProtocolState, + + /// Identifier of the node we're talking to. Used only for logging purposes and shouldn't have + /// any influence on the behaviour. + remote_peer_id: PeerId, + + /// Whether we are the connection dialer or listener. Used to determine who, between the local + /// node and the remote node, has priority. + endpoint: ConnectedPoint, + + /// Queue of events to send to the outside. + /// + /// This queue must only ever be modified to insert elements at the back, or remove the first + /// element. + events_queue: SmallVec< + [ProtocolsHandlerEvent; + 16], + >, } /// State of the handler. enum ProtocolState { - /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. - Init { - /// List of substreams opened by the remote but that haven't been processed yet. - substreams: SmallVec<[RegisteredProtocolSubstream; 6]>, - /// Deadline after which the initialization is abnormally long. - init_deadline: Delay, - }, - - /// Handler is opening a substream in order to activate itself. - /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. - Opening { - /// Deadline after which the opening is abnormally long. - deadline: Delay, - }, - - /// Normal operating mode. Contains the substreams that are open. - /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. - Normal { - /// The substreams where bidirectional communications happen. - substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, - /// Contains substreams which are being shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, - }, - - /// We are disabled. Contains substreams that are being closed. - /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the - /// outside or we have never sent any `CustomProtocolOpen` in the first place. - Disabled { - /// List of substreams to shut down. - shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, - - /// If true, we should reactivate the handler after all the substreams in `shutdown` have - /// been closed. - /// - /// Since we don't want to mix old and new substreams, we wait for all old substreams to - /// be closed before opening any new one. - reenable: bool, - }, - - /// In this state, we don't care about anything anymore and need to kill the connection as soon - /// as possible. - KillAsap, - - /// We sometimes temporarily switch to this state during processing. If we are in this state - /// at the beginning of a method, that means something bad happened in the source code. - Poisoned, + /// Waiting for the behaviour to tell the handler whether it is enabled or disabled. + Init { + /// List of substreams opened by the remote but that haven't been processed yet. + substreams: SmallVec<[RegisteredProtocolSubstream; 6]>, + /// Deadline after which the initialization is abnormally long. + init_deadline: Delay, + }, + + /// Handler is opening a substream in order to activate itself. + /// If we are in this state, we haven't sent any `CustomProtocolOpen` yet. + Opening { + /// Deadline after which the opening is abnormally long. + deadline: Delay, + }, + + /// Normal operating mode. Contains the substreams that are open. + /// If we are in this state, we have sent a `CustomProtocolOpen` message to the outside. + Normal { + /// The substreams where bidirectional communications happen. + substreams: SmallVec<[RegisteredProtocolSubstream; 4]>, + /// Contains substreams which are being shut down. + shutdown: SmallVec<[RegisteredProtocolSubstream; 4]>, + }, + + /// We are disabled. Contains substreams that are being closed. + /// If we are in this state, either we have sent a `CustomProtocolClosed` message to the + /// outside or we have never sent any `CustomProtocolOpen` in the first place. + Disabled { + /// List of substreams to shut down. + shutdown: SmallVec<[RegisteredProtocolSubstream; 6]>, + + /// If true, we should reactivate the handler after all the substreams in `shutdown` have + /// been closed. + /// + /// Since we don't want to mix old and new substreams, we wait for all old substreams to + /// be closed before opening any new one. + reenable: bool, + }, + + /// In this state, we don't care about anything anymore and need to kill the connection as soon + /// as possible. + KillAsap, + + /// We sometimes temporarily switch to this state during processing. If we are in this state + /// at the beginning of a method, that means something bad happened in the source code. + Poisoned, } /// Event that can be received by a `LegacyProtoHandler`. #[derive(Debug)] pub enum LegacyProtoHandlerIn { - /// The node should start using custom protocols. - Enable, + /// The node should start using custom protocols. + Enable, - /// The node should stop using custom protocols. - Disable, + /// The node should stop using custom protocols. + Disable, - /// Sends a message through a custom protocol substream. - SendCustomMessage { - /// The message to send. - message: Vec, - }, + /// Sends a message through a custom protocol substream. + SendCustomMessage { + /// The message to send. + message: Vec, + }, } /// Event that can be emitted by a `LegacyProtoHandler`. #[derive(Debug)] pub enum LegacyProtoHandlerOut { - /// Opened a custom protocol with the remote. - CustomProtocolOpen { - /// Version of the protocol that has been opened. - version: u8, - /// The connected endpoint. - endpoint: ConnectedPoint, - }, - - /// Closed a custom protocol with the remote. - CustomProtocolClosed { - /// Reason why the substream closed, for diagnostic purposes. - reason: Cow<'static, str>, - /// The connected endpoint. - endpoint: ConnectedPoint, - }, - - /// Receives a message on a custom protocol substream. - CustomMessage { - /// Message that has been received. - message: BytesMut, - }, - - /// A substream to the remote is clogged. The send buffer is very large, and we should print - /// a diagnostic message and/or avoid sending more data. - Clogged { - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec>, - }, - - /// An error has happened on the protocol level with this node. - ProtocolError { - /// If true the error is severe, such as a protocol violation. - is_severe: bool, - /// The error that happened. - error: Box, - }, + /// Opened a custom protocol with the remote. + CustomProtocolOpen { + /// Version of the protocol that has been opened. + version: u8, + /// The connected endpoint. + endpoint: ConnectedPoint, + }, + + /// Closed a custom protocol with the remote. + CustomProtocolClosed { + /// Reason why the substream closed, for diagnostic purposes. + reason: Cow<'static, str>, + /// The connected endpoint. + endpoint: ConnectedPoint, + }, + + /// Receives a message on a custom protocol substream. + CustomMessage { + /// Message that has been received. + message: BytesMut, + }, + + /// A substream to the remote is clogged. The send buffer is very large, and we should print + /// a diagnostic message and/or avoid sending more data. + Clogged { + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec>, + }, + + /// An error has happened on the protocol level with this node. + ProtocolError { + /// If true the error is severe, such as a protocol violation. + is_severe: bool, + /// The error that happened. + error: Box, + }, } impl LegacyProtoHandler { - /// Returns true if the legacy substream is currently open. - pub fn is_open(&self) -> bool { - match &self.state { - ProtocolState::Init { substreams, .. } => !substreams.is_empty(), - ProtocolState::Opening { .. } => false, - ProtocolState::Normal { substreams, .. } => !substreams.is_empty(), - ProtocolState::Disabled { .. } => false, - ProtocolState::KillAsap => false, - ProtocolState::Poisoned => false, - } - } - - /// Enables the handler. - fn enable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + /// Returns true if the legacy substream is currently open. + pub fn is_open(&self) -> bool { + match &self.state { + ProtocolState::Init { substreams, .. } => !substreams.is_empty(), + ProtocolState::Opening { .. } => false, + ProtocolState::Normal { substreams, .. } => !substreams.is_empty(), + ProtocolState::Disabled { .. } => false, + ProtocolState::KillAsap => false, + ProtocolState::Poisoned => false, + } + } + + /// Enables the handler. + fn enable(&mut self) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: incoming, .. } => { - if incoming.is_empty() { - if let ConnectedPoint::Dialer { .. } = self.endpoint { - self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(self.protocol.clone()), - info: (), - }); - } - ProtocolState::Opening { - deadline: Delay::new(Duration::from_secs(60)) - } - } else { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: incoming[0].protocol_version(), - endpoint: self.endpoint.clone() - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: incoming.into_iter().collect(), - shutdown: SmallVec::new() - } - } - } - - st @ ProtocolState::KillAsap => st, - st @ ProtocolState::Opening { .. } => st, - st @ ProtocolState::Normal { .. } => st, - ProtocolState::Disabled { shutdown, .. } => { - ProtocolState::Disabled { shutdown, reenable: true } - } - } - } - - /// Disables the handler. - fn disable(&mut self) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + ProtocolState::Poisoned + } + + ProtocolState::Init { + substreams: incoming, + .. + } => { + if incoming.is_empty() { + if let ConnectedPoint::Dialer { .. } = self.endpoint { + self.events_queue + .push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(self.protocol.clone()), + info: (), + }); + } + ProtocolState::Opening { + deadline: Delay::new(Duration::from_secs(60)), + } + } else { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { + version: incoming[0].protocol_version(), + endpoint: self.endpoint.clone(), + }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Normal { + substreams: incoming.into_iter().collect(), + shutdown: SmallVec::new(), + } + } + } + + st @ ProtocolState::KillAsap => st, + st @ ProtocolState::Opening { .. } => st, + st @ ProtocolState::Normal { .. } => st, + ProtocolState::Disabled { shutdown, .. } => ProtocolState::Disabled { + shutdown, + reenable: true, + }, + } + } + + /// Disables the handler. + fn disable(&mut self) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { substreams: mut shutdown, .. } => { - for s in &mut shutdown { - s.shutdown(); - } - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::Opening { .. } | ProtocolState::Normal { .. } => - // At the moment, if we get disabled while things were working, we kill the entire - // connection in order to force a reset of the state. - // This is obviously an extremely shameful way to do things, but at the time of - // the writing of this comment, the networking works very poorly and a solution - // needs to be found. - ProtocolState::KillAsap, - - ProtocolState::Disabled { shutdown, .. } => - ProtocolState::Disabled { shutdown, reenable: false }, - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - - /// Polls the state for events. Optionally returns an event to produce. - #[must_use] - fn poll_state(&mut self, cx: &mut Context) - -> Option> { - match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + ProtocolState::Poisoned + } + + ProtocolState::Init { + substreams: mut shutdown, + .. + } => { + for s in &mut shutdown { + s.shutdown(); + } + ProtocolState::Disabled { + shutdown, + reenable: false, + } + } + + ProtocolState::Opening { .. } | ProtocolState::Normal { .. } => + // At the moment, if we get disabled while things were working, we kill the entire + // connection in order to force a reset of the state. + // This is obviously an extremely shameful way to do things, but at the time of + // the writing of this comment, the networking works very poorly and a solution + // needs to be found. + { + ProtocolState::KillAsap + } + + ProtocolState::Disabled { shutdown, .. } => ProtocolState::Disabled { + shutdown, + reenable: false, + }, + + ProtocolState::KillAsap => ProtocolState::KillAsap, + }; + } + + /// Polls the state for events. Optionally returns an event to produce. + #[must_use] + fn poll_state( + &mut self, + cx: &mut Context, + ) -> Option< + ProtocolsHandlerEvent, + > { + match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", self.remote_peer_id); - self.state = ProtocolState::Poisoned; - None - } - - ProtocolState::Init { substreams, mut init_deadline } => { - match Pin::new(&mut init_deadline).poll(cx) { - Poll::Ready(()) => { - init_deadline = Delay::new(Duration::from_secs(60)); - error!(target: "sub-libp2p", "Handler initialization process is too long \ + self.state = ProtocolState::Poisoned; + None + } + + ProtocolState::Init { + substreams, + mut init_deadline, + } => { + match Pin::new(&mut init_deadline).poll(cx) { + Poll::Ready(()) => { + init_deadline = Delay::new(Duration::from_secs(60)); + error!(target: "sub-libp2p", "Handler initialization process is too long \ with {:?}", self.remote_peer_id) - }, - Poll::Pending => {} - } - - self.state = ProtocolState::Init { substreams, init_deadline }; - None - } - - ProtocolState::Opening { mut deadline } => { - match Pin::new(&mut deadline).poll(cx) { - Poll::Ready(()) => { - deadline = Delay::new(Duration::from_secs(60)); - let event = LegacyProtoHandlerOut::ProtocolError { - is_severe: true, - error: "Timeout when opening protocol".to_string().into(), - }; - self.state = ProtocolState::Opening { deadline }; - Some(ProtocolsHandlerEvent::Custom(event)) - }, - Poll::Pending => { - self.state = ProtocolState::Opening { deadline }; - None - }, - } - } - - ProtocolState::Normal { mut substreams, mut shutdown } => { - for n in (0..substreams.len()).rev() { - let mut substream = substreams.swap_remove(n); - match Pin::new(&mut substream).poll_next(cx) { - Poll::Pending => substreams.push(substream), - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { - let event = LegacyProtoHandlerOut::CustomMessage { - message - }; - substreams.push(substream); - self.state = ProtocolState::Normal { substreams, shutdown }; - return Some(ProtocolsHandlerEvent::Custom(event)); - }, - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged { messages }))) => { - let event = LegacyProtoHandlerOut::Clogged { - messages, - }; - substreams.push(substream); - self.state = ProtocolState::Normal { substreams, shutdown }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - Poll::Ready(None) => { - shutdown.push(substream); - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: "All substreams have been closed by the remote".into(), - endpoint: self.endpoint.clone() - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } - } - Poll::Ready(Some(Err(err))) => { - if substreams.is_empty() { - let event = LegacyProtoHandlerOut::CustomProtocolClosed { - reason: format!("Error on the last substream: {:?}", err).into(), - endpoint: self.endpoint.clone() - }; - self.state = ProtocolState::Disabled { - shutdown: shutdown.into_iter().collect(), - reenable: true - }; - return Some(ProtocolsHandlerEvent::Custom(event)); - } else { - debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); - } - } - } - } - - // This code is reached is none if and only if none of the substreams are in a ready state. - self.state = ProtocolState::Normal { substreams, shutdown }; - None - } - - ProtocolState::Disabled { mut shutdown, reenable } => { - shutdown_list(&mut shutdown, cx); - // If `reenable` is `true`, that means we should open the substreams system again - // after all the substreams are closed. - if reenable && shutdown.is_empty() { - self.state = ProtocolState::Opening { - deadline: Delay::new(Duration::from_secs(60)) - }; - Some(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(self.protocol.clone()), - info: (), - }) - } else { - self.state = ProtocolState::Disabled { shutdown, reenable }; - None - } - } - - ProtocolState::KillAsap => None, - } - } - - /// Called by `inject_fully_negotiated_inbound` and `inject_fully_negotiated_outbound`. - fn inject_fully_negotiated( - &mut self, - mut substream: RegisteredProtocolSubstream - ) { - self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { - ProtocolState::Poisoned => { - error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", + } + Poll::Pending => {} + } + + self.state = ProtocolState::Init { + substreams, + init_deadline, + }; + None + } + + ProtocolState::Opening { mut deadline } => match Pin::new(&mut deadline).poll(cx) { + Poll::Ready(()) => { + deadline = Delay::new(Duration::from_secs(60)); + let event = LegacyProtoHandlerOut::ProtocolError { + is_severe: true, + error: "Timeout when opening protocol".to_string().into(), + }; + self.state = ProtocolState::Opening { deadline }; + Some(ProtocolsHandlerEvent::Custom(event)) + } + Poll::Pending => { + self.state = ProtocolState::Opening { deadline }; + None + } + }, + + ProtocolState::Normal { + mut substreams, + mut shutdown, + } => { + for n in (0..substreams.len()).rev() { + let mut substream = substreams.swap_remove(n); + match Pin::new(&mut substream).poll_next(cx) { + Poll::Pending => substreams.push(substream), + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(message)))) => { + let event = LegacyProtoHandlerOut::CustomMessage { message }; + substreams.push(substream); + self.state = ProtocolState::Normal { + substreams, + shutdown, + }; + return Some(ProtocolsHandlerEvent::Custom(event)); + } + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged { messages }))) => { + let event = LegacyProtoHandlerOut::Clogged { messages }; + substreams.push(substream); + self.state = ProtocolState::Normal { + substreams, + shutdown, + }; + return Some(ProtocolsHandlerEvent::Custom(event)); + } + Poll::Ready(None) => { + shutdown.push(substream); + if substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { + reason: "All substreams have been closed by the remote".into(), + endpoint: self.endpoint.clone(), + }; + self.state = ProtocolState::Disabled { + shutdown: shutdown.into_iter().collect(), + reenable: true, + }; + return Some(ProtocolsHandlerEvent::Custom(event)); + } + } + Poll::Ready(Some(Err(err))) => { + if substreams.is_empty() { + let event = LegacyProtoHandlerOut::CustomProtocolClosed { + reason: format!("Error on the last substream: {:?}", err) + .into(), + endpoint: self.endpoint.clone(), + }; + self.state = ProtocolState::Disabled { + shutdown: shutdown.into_iter().collect(), + reenable: true, + }; + return Some(ProtocolsHandlerEvent::Custom(event)); + } else { + debug!(target: "sub-libp2p", "Error on extra substream: {:?}", err); + } + } + } + } + + // This code is reached is none if and only if none of the substreams are in a ready state. + self.state = ProtocolState::Normal { + substreams, + shutdown, + }; + None + } + + ProtocolState::Disabled { + mut shutdown, + reenable, + } => { + shutdown_list(&mut shutdown, cx); + // If `reenable` is `true`, that means we should open the substreams system again + // after all the substreams are closed. + if reenable && shutdown.is_empty() { + self.state = ProtocolState::Opening { + deadline: Delay::new(Duration::from_secs(60)), + }; + Some(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(self.protocol.clone()), + info: (), + }) + } else { + self.state = ProtocolState::Disabled { shutdown, reenable }; + None + } + } + + ProtocolState::KillAsap => None, + } + } + + /// Called by `inject_fully_negotiated_inbound` and `inject_fully_negotiated_outbound`. + fn inject_fully_negotiated( + &mut self, + mut substream: RegisteredProtocolSubstream, + ) { + self.state = match mem::replace(&mut self.state, ProtocolState::Poisoned) { + ProtocolState::Poisoned => { + error!(target: "sub-libp2p", "Handler with {:?} is in poisoned state", self.remote_peer_id); - ProtocolState::Poisoned - } - - ProtocolState::Init { mut substreams, init_deadline } => { - if substream.endpoint() == Endpoint::Dialer { - error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ + ProtocolState::Poisoned + } + + ProtocolState::Init { + mut substreams, + init_deadline, + } => { + if substream.endpoint() == Endpoint::Dialer { + error!(target: "sub-libp2p", "Opened dialing substream with {:?} before \ initialization", self.remote_peer_id); - } - substreams.push(substream); - ProtocolState::Init { substreams, init_deadline } - } - - ProtocolState::Opening { .. } => { - let event = LegacyProtoHandlerOut::CustomProtocolOpen { - version: substream.protocol_version(), - endpoint: self.endpoint.clone() - }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); - ProtocolState::Normal { - substreams: smallvec![substream], - shutdown: SmallVec::new() - } - } - - ProtocolState::Normal { substreams: mut existing, shutdown } => { - existing.push(substream); - ProtocolState::Normal { substreams: existing, shutdown } - } - - ProtocolState::Disabled { mut shutdown, .. } => { - substream.shutdown(); - shutdown.push(substream); - ProtocolState::Disabled { shutdown, reenable: false } - } - - ProtocolState::KillAsap => ProtocolState::KillAsap, - }; - } - - /// Sends a message to the remote. - fn send_message(&mut self, message: Vec) { - match self.state { - ProtocolState::Normal { ref mut substreams, .. } => - substreams[0].send_message(message), - - _ => debug!(target: "sub-libp2p", "Tried to send message over closed protocol \ - with {:?}", self.remote_peer_id) - } - } + } + substreams.push(substream); + ProtocolState::Init { + substreams, + init_deadline, + } + } + + ProtocolState::Opening { .. } => { + let event = LegacyProtoHandlerOut::CustomProtocolOpen { + version: substream.protocol_version(), + endpoint: self.endpoint.clone(), + }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(event)); + ProtocolState::Normal { + substreams: smallvec![substream], + shutdown: SmallVec::new(), + } + } + + ProtocolState::Normal { + substreams: mut existing, + shutdown, + } => { + existing.push(substream); + ProtocolState::Normal { + substreams: existing, + shutdown, + } + } + + ProtocolState::Disabled { mut shutdown, .. } => { + substream.shutdown(); + shutdown.push(substream); + ProtocolState::Disabled { + shutdown, + reenable: false, + } + } + + ProtocolState::KillAsap => ProtocolState::KillAsap, + }; + } + + /// Sends a message to the remote. + fn send_message(&mut self, message: Vec) { + match self.state { + ProtocolState::Normal { + ref mut substreams, .. + } => substreams[0].send_message(message), + + _ => debug!(target: "sub-libp2p", "Tried to send message over closed protocol \ + with {:?}", self.remote_peer_id), + } + } } impl ProtocolsHandler for LegacyProtoHandler { - type InEvent = LegacyProtoHandlerIn; - type OutEvent = LegacyProtoHandlerOut; - type Error = ConnectionKillError; - type InboundProtocol = RegisteredProtocol; - type OutboundProtocol = RegisteredProtocol; - type OutboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.protocol.clone()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - proto: >::Output - ) { - self.inject_fully_negotiated(proto); - } - - fn inject_fully_negotiated_outbound( - &mut self, - proto: >::Output, - _: Self::OutboundOpenInfo - ) { - self.inject_fully_negotiated(proto); - } - - fn inject_event(&mut self, message: LegacyProtoHandlerIn) { - match message { - LegacyProtoHandlerIn::Disable => self.disable(), - LegacyProtoHandlerIn::Enable => self.enable(), - LegacyProtoHandlerIn::SendCustomMessage { message } => - self.send_message(message), - } - } - - #[inline] - fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { - let is_severe = match err { - ProtocolsHandlerUpgrErr::Upgrade(_) => true, - _ => false, - }; - - self.events_queue.push(ProtocolsHandlerEvent::Custom(LegacyProtoHandlerOut::ProtocolError { - is_severe, - error: Box::new(err), - })); - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - ProtocolState::Init { .. } | ProtocolState::Opening { .. } | - ProtocolState::Normal { .. } => KeepAlive::Yes, - ProtocolState::Disabled { .. } | ProtocolState::Poisoned | - ProtocolState::KillAsap => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if !self.events_queue.is_empty() { - let event = self.events_queue.remove(0); - return Poll::Ready(event) - } - - // Kill the connection if needed. - if let ProtocolState::KillAsap = self.state { - return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError)); - } - - // Process all the substreams. - if let Some(event) = self.poll_state(cx) { - return Poll::Ready(event) - } - - Poll::Pending - } + type InEvent = LegacyProtoHandlerIn; + type OutEvent = LegacyProtoHandlerOut; + type Error = ConnectionKillError; + type InboundProtocol = RegisteredProtocol; + type OutboundProtocol = RegisteredProtocol; + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(self.protocol.clone()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + proto: >::Output, + ) { + self.inject_fully_negotiated(proto); + } + + fn inject_fully_negotiated_outbound( + &mut self, + proto: >::Output, + _: Self::OutboundOpenInfo, + ) { + self.inject_fully_negotiated(proto); + } + + fn inject_event(&mut self, message: LegacyProtoHandlerIn) { + match message { + LegacyProtoHandlerIn::Disable => self.disable(), + LegacyProtoHandlerIn::Enable => self.enable(), + LegacyProtoHandlerIn::SendCustomMessage { message } => self.send_message(message), + } + } + + #[inline] + fn inject_dial_upgrade_error(&mut self, _: (), err: ProtocolsHandlerUpgrErr) { + let is_severe = match err { + ProtocolsHandlerUpgrErr::Upgrade(_) => true, + _ => false, + }; + + self.events_queue.push(ProtocolsHandlerEvent::Custom( + LegacyProtoHandlerOut::ProtocolError { + is_severe, + error: Box::new(err), + }, + )); + } + + fn connection_keep_alive(&self) -> KeepAlive { + match self.state { + ProtocolState::Init { .. } + | ProtocolState::Opening { .. } + | ProtocolState::Normal { .. } => KeepAlive::Yes, + ProtocolState::Disabled { .. } | ProtocolState::Poisoned | ProtocolState::KillAsap => { + KeepAlive::No + } + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + // Flush the events queue if necessary. + if !self.events_queue.is_empty() { + let event = self.events_queue.remove(0); + return Poll::Ready(event); + } + + // Kill the connection if needed. + if let ProtocolState::KillAsap = self.state { + return Poll::Ready(ProtocolsHandlerEvent::Close(ConnectionKillError)); + } + + // Process all the substreams. + if let Some(event) = self.poll_state(cx) { + return Poll::Ready(event); + } + + Poll::Pending + } } impl fmt::Debug for LegacyProtoHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("LegacyProtoHandler") - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("LegacyProtoHandler").finish() + } } /// Given a list of substreams, tries to shut them down. The substreams that have been successfully /// shut down are removed from the list. -fn shutdown_list - (list: &mut SmallVec>>, - cx: &mut Context) -{ - 'outer: for n in (0..list.len()).rev() { - let mut substream = list.swap_remove(n); - loop { - match substream.poll_next_unpin(cx) { - Poll::Ready(Some(Ok(_))) => {} - Poll::Pending => break, - Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, - } - } - list.push(substream); - } +fn shutdown_list( + list: &mut SmallVec< + impl smallvec::Array>, + >, + cx: &mut Context, +) { + 'outer: for n in (0..list.len()).rev() { + let mut substream = list.swap_remove(n); + loop { + match substream.poll_next_unpin(cx) { + Poll::Ready(Some(Ok(_))) => {} + Poll::Pending => break, + Poll::Ready(Some(Err(_))) | Poll::Ready(None) => continue 'outer, + } + } + list.push(substream); + } } /// Error returned when switching from normal to disabled. #[derive(Debug)] pub struct ConnectionKillError; -impl error::Error for ConnectionKillError { -} +impl error::Error for ConnectionKillError {} impl fmt::Display for ConnectionKillError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Connection kill when switching from normal to disabled") - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Connection kill when switching from normal to disabled") + } } diff --git a/client/network/src/protocol/generic_proto/handler/notif_in.rs b/client/network/src/protocol/generic_proto/handler/notif_in.rs index 83923154bd..53a9e33d5e 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_in.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_in.rs @@ -24,19 +24,20 @@ use crate::protocol::generic_proto::upgrade::{NotificationsIn, NotificationsInSubstream}; use bytes::BytesMut; use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::core::{ConnectedPoint, PeerId}; use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, + IntoProtocolsHandler, KeepAlive, NegotiatedSubstream, ProtocolsHandler, ProtocolsHandlerEvent, + ProtocolsHandlerUpgrErr, SubstreamProtocol, }; use log::{error, warn}; use smallvec::SmallVec; -use std::{borrow::Cow, fmt, pin::Pin, task::{Context, Poll}}; +use std::{ + borrow::Cow, + fmt, + pin::Pin, + task::{Context, Poll}, +}; /// Implements the `IntoProtocolsHandler` trait of libp2p. /// @@ -44,223 +45,237 @@ use std::{borrow::Cow, fmt, pin::Pin, task::{Context, Poll}}; /// sent to a background task dedicated to this connection. Once the connection is established, /// it is turned into a [`NotifsInHandler`]. pub struct NotifsInHandlerProto { - /// Configuration for the protocol upgrade to negotiate. - in_protocol: NotificationsIn, + /// Configuration for the protocol upgrade to negotiate. + in_protocol: NotificationsIn, } /// The actual handler once the connection has been established. pub struct NotifsInHandler { - /// Configuration for the protocol upgrade to negotiate for inbound substreams. - in_protocol: NotificationsIn, - - /// Substream that is open with the remote. - substream: Option>, - - /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and - /// `Closed` messages in a row without the handler having time to respond with `Accept` or - /// `Refuse`. - /// - /// In order to keep the state consistent, we increment this variable every time an - /// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received. - pending_accept_refuses: usize, - - /// Queue of events to send to the outside. - /// - /// This queue is only ever modified to insert elements at the back, or remove the first - /// element. - events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, + /// Configuration for the protocol upgrade to negotiate for inbound substreams. + in_protocol: NotificationsIn, + + /// Substream that is open with the remote. + substream: Option>, + + /// If the substream is opened and closed rapidly, we can emit several `OpenRequest` and + /// `Closed` messages in a row without the handler having time to respond with `Accept` or + /// `Refuse`. + /// + /// In order to keep the state consistent, we increment this variable every time an + /// `OpenRequest` is emitted and decrement it every time an `Accept` or `Refuse` is received. + pending_accept_refuses: usize, + + /// Queue of events to send to the outside. + /// + /// This queue is only ever modified to insert elements at the back, or remove the first + /// element. + events_queue: + SmallVec<[ProtocolsHandlerEvent; 16]>, } /// Event that can be received by a `NotifsInHandler`. #[derive(Debug, Clone)] pub enum NotifsInHandlerIn { - /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send - /// to the remote. - /// - /// After sending this to the handler, the substream is now considered open and `Notif` events - /// can be received. - Accept(Vec), - - /// Can be sent back as a response to an `OpenRequest`. - Refuse, + /// Can be sent back as a response to an `OpenRequest`. Contains the status message to send + /// to the remote. + /// + /// After sending this to the handler, the substream is now considered open and `Notif` events + /// can be received. + Accept(Vec), + + /// Can be sent back as a response to an `OpenRequest`. + Refuse, } /// Event that can be emitted by a `NotifsInHandler`. #[derive(Debug)] pub enum NotifsInHandlerOut { - /// The remote wants to open a substream. Contains the initial message sent by the remote - /// when the substream has been opened. - /// - /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent - /// back even if a `Closed` is received. - OpenRequest(Vec), - - /// The notifications substream has been closed by the remote. In order to avoid race - /// conditions, this does **not** cancel any previously-sent `OpenRequest`. - Closed, - - /// Received a message on the notifications substream. - /// - /// Can only happen after an `Accept` and before a `Closed`. - Notif(BytesMut), + /// The remote wants to open a substream. Contains the initial message sent by the remote + /// when the substream has been opened. + /// + /// Every time this event is emitted, a corresponding `Accepted` or `Refused` **must** be sent + /// back even if a `Closed` is received. + OpenRequest(Vec), + + /// The notifications substream has been closed by the remote. In order to avoid race + /// conditions, this does **not** cancel any previously-sent `OpenRequest`. + Closed, + + /// Received a message on the notifications substream. + /// + /// Can only happen after an `Accept` and before a `Closed`. + Notif(BytesMut), } impl NotifsInHandlerProto { - /// Builds a new `NotifsInHandlerProto`. - pub fn new( - protocol_name: impl Into> - ) -> Self { - NotifsInHandlerProto { - in_protocol: NotificationsIn::new(protocol_name), - } - } + /// Builds a new `NotifsInHandlerProto`. + pub fn new(protocol_name: impl Into>) -> Self { + NotifsInHandlerProto { + in_protocol: NotificationsIn::new(protocol_name), + } + } } impl IntoProtocolsHandler for NotifsInHandlerProto { - type Handler = NotifsInHandler; - - fn inbound_protocol(&self) -> NotificationsIn { - self.in_protocol.clone() - } - - fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsInHandler { - in_protocol: self.in_protocol, - substream: None, - pending_accept_refuses: 0, - events_queue: SmallVec::new(), - } - } + type Handler = NotifsInHandler; + + fn inbound_protocol(&self) -> NotificationsIn { + self.in_protocol.clone() + } + + fn into_handler(self, _: &PeerId, _: &ConnectedPoint) -> Self::Handler { + NotifsInHandler { + in_protocol: self.in_protocol, + substream: None, + pending_accept_refuses: 0, + events_queue: SmallVec::new(), + } + } } impl NotifsInHandler { - /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &[u8] { - self.in_protocol.protocol_name() - } + /// Returns the name of the protocol that we accept. + pub fn protocol_name(&self) -> &[u8] { + self.in_protocol.protocol_name() + } } impl ProtocolsHandler for NotifsInHandler { - type InEvent = NotifsInHandlerIn; - type OutEvent = NotifsInHandlerOut; - type Error = void::Void; - type InboundProtocol = NotificationsIn; - type OutboundProtocol = DeniedUpgrade; - type OutboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(self.in_protocol.clone()) - } - - fn inject_fully_negotiated_inbound( - &mut self, - (msg, proto): >::Output - ) { - // If a substream already exists, we drop it and replace it with the new incoming one. - if self.substream.is_some() { - self.events_queue.push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - } - - // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" - // to the remote and force-close the substream. It might seem like an unclean way to get - // rid of a substream. However, keep in mind that it is invalid for the remote to open - // multiple such substreams, and therefore sending a "RST" is the correct thing to do. - // Also note that we have already closed our writing side during the initial handshake, - // and we can't close "more" than that anyway. - self.substream = Some(proto); - - self.events_queue.push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::OpenRequest(msg))); - self.pending_accept_refuses = self.pending_accept_refuses - .checked_add(1) - .unwrap_or_else(|| { - error!(target: "sub-libp2p", "Overflow in pending_accept_refuses"); - usize::max_value() - }); - } - - fn inject_fully_negotiated_outbound( - &mut self, - out: >::Output, - _: Self::OutboundOpenInfo - ) { - // We never emit any outgoing substream. - void::unreachable(out) - } - - fn inject_event(&mut self, message: NotifsInHandlerIn) { - self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { - Some(v) => v, - None => { - error!( - target: "sub-libp2p", - "Inconsistent state: received Accept/Refuse when no pending request exists" - ); - return; - } - }; - - // If we send multiple `OpenRequest`s in a row, we will receive back multiple - // `Accept`/`Refuse` messages. All of them are obsolete except the last one. - if self.pending_accept_refuses != 0 { - return; - } - - match (message, self.substream.as_mut()) { - (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), - (NotifsInHandlerIn::Accept(_), None) => {}, - (NotifsInHandlerIn::Refuse, _) => self.substream = None, - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); - } - - fn connection_keep_alive(&self) -> KeepAlive { - if self.substream.is_some() { - KeepAlive::Yes - } else { - KeepAlive::No - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll< - ProtocolsHandlerEvent - > { - // Flush the events queue if necessary. - if !self.events_queue.is_empty() { - let event = self.events_queue.remove(0); - return Poll::Ready(event) - } - - match self.substream.as_mut().map(|s| Stream::poll_next(Pin::new(s), cx)) { - None | Some(Poll::Pending) => {}, - Some(Poll::Ready(Some(Ok(msg)))) => { - if self.pending_accept_refuses != 0 { - warn!( - target: "sub-libp2p", - "Bad state in inbound-only handler: notif before accepting substream" - ); - } - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif(msg))) - }, - Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { - self.substream = None; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); - }, - } - - Poll::Pending - } + type InEvent = NotifsInHandlerIn; + type OutEvent = NotifsInHandlerOut; + type Error = void::Void; + type InboundProtocol = NotificationsIn; + type OutboundProtocol = DeniedUpgrade; + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(self.in_protocol.clone()) + } + + fn inject_fully_negotiated_inbound( + &mut self, + (msg, proto): >::Output, + ) { + // If a substream already exists, we drop it and replace it with the new incoming one. + if self.substream.is_some() { + self.events_queue + .push(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); + } + + // Note that we drop the existing substream, which will send an equivalent to a TCP "RST" + // to the remote and force-close the substream. It might seem like an unclean way to get + // rid of a substream. However, keep in mind that it is invalid for the remote to open + // multiple such substreams, and therefore sending a "RST" is the correct thing to do. + // Also note that we have already closed our writing side during the initial handshake, + // and we can't close "more" than that anyway. + self.substream = Some(proto); + + self.events_queue.push(ProtocolsHandlerEvent::Custom( + NotifsInHandlerOut::OpenRequest(msg), + )); + self.pending_accept_refuses = + self.pending_accept_refuses + .checked_add(1) + .unwrap_or_else(|| { + error!(target: "sub-libp2p", "Overflow in pending_accept_refuses"); + usize::max_value() + }); + } + + fn inject_fully_negotiated_outbound( + &mut self, + out: >::Output, + _: Self::OutboundOpenInfo, + ) { + // We never emit any outgoing substream. + void::unreachable(out) + } + + fn inject_event(&mut self, message: NotifsInHandlerIn) { + self.pending_accept_refuses = match self.pending_accept_refuses.checked_sub(1) { + Some(v) => v, + None => { + error!( + target: "sub-libp2p", + "Inconsistent state: received Accept/Refuse when no pending request exists" + ); + return; + } + }; + + // If we send multiple `OpenRequest`s in a row, we will receive back multiple + // `Accept`/`Refuse` messages. All of them are obsolete except the last one. + if self.pending_accept_refuses != 0 { + return; + } + + match (message, self.substream.as_mut()) { + (NotifsInHandlerIn::Accept(message), Some(sub)) => sub.send_handshake(message), + (NotifsInHandlerIn::Accept(_), None) => {} + (NotifsInHandlerIn::Refuse, _) => self.substream = None, + } + } + + fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { + error!(target: "sub-libp2p", "Received dial upgrade error in inbound-only handler"); + } + + fn connection_keep_alive(&self) -> KeepAlive { + if self.substream.is_some() { + KeepAlive::Yes + } else { + KeepAlive::No + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + // Flush the events queue if necessary. + if !self.events_queue.is_empty() { + let event = self.events_queue.remove(0); + return Poll::Ready(event); + } + + match self + .substream + .as_mut() + .map(|s| Stream::poll_next(Pin::new(s), cx)) + { + None | Some(Poll::Pending) => {} + Some(Poll::Ready(Some(Ok(msg)))) => { + if self.pending_accept_refuses != 0 { + warn!( + target: "sub-libp2p", + "Bad state in inbound-only handler: notif before accepting substream" + ); + } + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Notif( + msg, + ))); + } + Some(Poll::Ready(None)) | Some(Poll::Ready(Some(Err(_)))) => { + self.substream = None; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsInHandlerOut::Closed)); + } + } + + Poll::Pending + } } impl fmt::Debug for NotifsInHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsInHandler") - .field("substream_open", &self.substream.is_some()) - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("NotifsInHandler") + .field("substream_open", &self.substream.is_some()) + .finish() + } } diff --git a/client/network/src/protocol/generic_proto/handler/notif_out.rs b/client/network/src/protocol/generic_proto/handler/notif_out.rs index b5d6cd61ad..15375b0ab4 100644 --- a/client/network/src/protocol/generic_proto/handler/notif_out.rs +++ b/client/network/src/protocol/generic_proto/handler/notif_out.rs @@ -21,22 +21,26 @@ //! > protocols, you need to create multiple instances and group them. //! -use crate::protocol::generic_proto::upgrade::{NotificationsOut, NotificationsOutSubstream, NotificationsHandshakeError}; +use crate::protocol::generic_proto::upgrade::{ + NotificationsHandshakeError, NotificationsOut, NotificationsOutSubstream, +}; use futures::prelude::*; -use libp2p::core::{ConnectedPoint, PeerId}; use libp2p::core::upgrade::{DeniedUpgrade, InboundUpgrade, OutboundUpgrade}; +use libp2p::core::{ConnectedPoint, PeerId}; use libp2p::swarm::{ - ProtocolsHandler, ProtocolsHandlerEvent, - IntoProtocolsHandler, - KeepAlive, - ProtocolsHandlerUpgrErr, - SubstreamProtocol, - NegotiatedSubstream, + IntoProtocolsHandler, KeepAlive, NegotiatedSubstream, ProtocolsHandler, ProtocolsHandlerEvent, + ProtocolsHandlerUpgrErr, SubstreamProtocol, }; -use log::{debug, warn, error}; +use log::{debug, error, warn}; use prometheus_endpoint::Histogram; use smallvec::SmallVec; -use std::{borrow::Cow, fmt, mem, pin::Pin, task::{Context, Poll}, time::Duration}; +use std::{ + borrow::Cow, + fmt, mem, + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::Instant; /// Maximum duration to open a substream and receive the handshake message. After that, we @@ -55,40 +59,43 @@ const INITIAL_KEEPALIVE_TIME: Duration = Duration::from_secs(5); /// /// See the documentation of [`NotifsOutHandler`] for more information. pub struct NotifsOutHandlerProto { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, [u8]>, - /// Optional Prometheus histogram to report message queue size variations. - queue_size_report: Option, + /// Name of the protocol to negotiate. + protocol_name: Cow<'static, [u8]>, + /// Optional Prometheus histogram to report message queue size variations. + queue_size_report: Option, } impl NotifsOutHandlerProto { - /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the - /// notifications substream. - pub fn new(protocol_name: impl Into>, queue_size_report: Option) -> Self { - NotifsOutHandlerProto { - protocol_name: protocol_name.into(), - queue_size_report, - } - } + /// Builds a new [`NotifsOutHandlerProto`]. Will use the given protocol name for the + /// notifications substream. + pub fn new( + protocol_name: impl Into>, + queue_size_report: Option, + ) -> Self { + NotifsOutHandlerProto { + protocol_name: protocol_name.into(), + queue_size_report, + } + } } impl IntoProtocolsHandler for NotifsOutHandlerProto { - type Handler = NotifsOutHandler; - - fn inbound_protocol(&self) -> DeniedUpgrade { - DeniedUpgrade - } - - fn into_handler(self, peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler { - NotifsOutHandler { - protocol_name: self.protocol_name, - when_connection_open: Instant::now(), - queue_size_report: self.queue_size_report, - state: State::Disabled, - events_queue: SmallVec::new(), - peer_id: peer_id.clone(), - } - } + type Handler = NotifsOutHandler; + + fn inbound_protocol(&self) -> DeniedUpgrade { + DeniedUpgrade + } + + fn into_handler(self, peer_id: &PeerId, _: &ConnectedPoint) -> Self::Handler { + NotifsOutHandler { + protocol_name: self.protocol_name, + when_connection_open: Instant::now(), + queue_size_report: self.queue_size_report, + state: State::Disabled, + events_queue: SmallVec::new(), + peer_id: peer_id.clone(), + } + } } /// Handler for an outbound notification substream. @@ -100,318 +107,355 @@ impl IntoProtocolsHandler for NotifsOutHandlerProto { /// handler. Once done, the handler will try to establish then maintain an outbound substream with /// the remote for the purpose of sending notifications to it. pub struct NotifsOutHandler { - /// Name of the protocol to negotiate. - protocol_name: Cow<'static, [u8]>, + /// Name of the protocol to negotiate. + protocol_name: Cow<'static, [u8]>, - /// Relationship with the node we're connected to. - state: State, + /// Relationship with the node we're connected to. + state: State, - /// When the connection with the remote has been successfully established. - when_connection_open: Instant, + /// When the connection with the remote has been successfully established. + when_connection_open: Instant, - /// Optional prometheus histogram to report message queue sizes variations. - queue_size_report: Option, + /// Optional prometheus histogram to report message queue sizes variations. + queue_size_report: Option, - /// Queue of events to send to the outside. - /// - /// This queue must only ever be modified to insert elements at the back, or remove the first - /// element. - events_queue: SmallVec<[ProtocolsHandlerEvent; 16]>, + /// Queue of events to send to the outside. + /// + /// This queue must only ever be modified to insert elements at the back, or remove the first + /// element. + events_queue: SmallVec< + [ProtocolsHandlerEvent; 16], + >, - /// Who we are connected to. - peer_id: PeerId, + /// Who we are connected to. + peer_id: PeerId, } /// Our relationship with the node we're connected to. enum State { - /// The handler is disabled and idle. No substream is open. - Disabled, - - /// The handler is disabled. A substream is still open and needs to be closed. - /// - /// > **Important**: Having this state means that `poll_close` has been called at least once, - /// > but the `Sink` API is unclear about whether or not the stream can then - /// > be recovered. Because of that, we must never switch from the - /// > `DisabledOpen` state to the `Open` state while keeping the same substream. - DisabledOpen(NotificationsOutSubstream), - - /// The handler is disabled but we are still trying to open a substream with the remote. - /// - /// If the handler gets enabled again, we can immediately switch to `Opening`. - DisabledOpening, - - /// The handler is enabled and we are trying to open a substream with the remote. - Opening { - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// The handler is enabled. We have tried opening a substream in the past but the remote - /// refused it. - Refused, - - /// The handler is enabled and substream is open. - Open { - /// Substream that is currently open. - substream: NotificationsOutSubstream, - /// The initial message that we sent. Necessary if we need to re-open a substream. - initial_message: Vec, - }, - - /// Poisoned state. Shouldn't be found in the wild. - Poisoned, + /// The handler is disabled and idle. No substream is open. + Disabled, + + /// The handler is disabled. A substream is still open and needs to be closed. + /// + /// > **Important**: Having this state means that `poll_close` has been called at least once, + /// > but the `Sink` API is unclear about whether or not the stream can then + /// > be recovered. Because of that, we must never switch from the + /// > `DisabledOpen` state to the `Open` state while keeping the same substream. + DisabledOpen(NotificationsOutSubstream), + + /// The handler is disabled but we are still trying to open a substream with the remote. + /// + /// If the handler gets enabled again, we can immediately switch to `Opening`. + DisabledOpening, + + /// The handler is enabled and we are trying to open a substream with the remote. + Opening { + /// The initial message that we sent. Necessary if we need to re-open a substream. + initial_message: Vec, + }, + + /// The handler is enabled. We have tried opening a substream in the past but the remote + /// refused it. + Refused, + + /// The handler is enabled and substream is open. + Open { + /// Substream that is currently open. + substream: NotificationsOutSubstream, + /// The initial message that we sent. Necessary if we need to re-open a substream. + initial_message: Vec, + }, + + /// Poisoned state. Shouldn't be found in the wild. + Poisoned, } /// Event that can be received by a `NotifsOutHandler`. #[derive(Debug)] pub enum NotifsOutHandlerIn { - /// Enables the notifications substream for this node. The handler will try to maintain a - /// substream with the remote. - Enable { - /// Initial message to send to remote nodes when we open substreams. - initial_message: Vec, - }, - - /// Disables the notifications substream for this node. This is the default state. - Disable, - - /// Sends a message on the notifications substream. Ignored if the substream isn't open. - /// - /// It is only valid to send this if the notifications substream has been enabled. - Send(Vec), + /// Enables the notifications substream for this node. The handler will try to maintain a + /// substream with the remote. + Enable { + /// Initial message to send to remote nodes when we open substreams. + initial_message: Vec, + }, + + /// Disables the notifications substream for this node. This is the default state. + Disable, + + /// Sends a message on the notifications substream. Ignored if the substream isn't open. + /// + /// It is only valid to send this if the notifications substream has been enabled. + Send(Vec), } /// Event that can be emitted by a `NotifsOutHandler`. #[derive(Debug)] pub enum NotifsOutHandlerOut { - /// The notifications substream has been accepted by the remote. - Open { - /// Handshake message sent by the remote after we opened the substream. - handshake: Vec, - }, - - /// The notifications substream has been closed by the remote. - Closed, - - /// We tried to open a notifications substream, but the remote refused it. - /// - /// Can only happen if we're in a closed state. - Refused, + /// The notifications substream has been accepted by the remote. + Open { + /// Handshake message sent by the remote after we opened the substream. + handshake: Vec, + }, + + /// The notifications substream has been closed by the remote. + Closed, + + /// We tried to open a notifications substream, but the remote refused it. + /// + /// Can only happen if we're in a closed state. + Refused, } impl NotifsOutHandler { - /// Returns true if the substream is currently open. - pub fn is_open(&self) -> bool { - match &self.state { - State::Disabled => false, - State::DisabledOpening => false, - State::DisabledOpen(_) => true, - State::Opening { .. } => false, - State::Refused => false, - State::Open { .. } => true, - State::Poisoned => false, - } - } - - /// Returns the name of the protocol that we negotiate. - pub fn protocol_name(&self) -> &[u8] { - &self.protocol_name - } + /// Returns true if the substream is currently open. + pub fn is_open(&self) -> bool { + match &self.state { + State::Disabled => false, + State::DisabledOpening => false, + State::DisabledOpen(_) => true, + State::Opening { .. } => false, + State::Refused => false, + State::Open { .. } => true, + State::Poisoned => false, + } + } + + /// Returns the name of the protocol that we negotiate. + pub fn protocol_name(&self) -> &[u8] { + &self.protocol_name + } } impl ProtocolsHandler for NotifsOutHandler { - type InEvent = NotifsOutHandlerIn; - type OutEvent = NotifsOutHandlerOut; - type Error = void::Void; - type InboundProtocol = DeniedUpgrade; - type OutboundProtocol = NotificationsOut; - type OutboundOpenInfo = (); - - fn listen_protocol(&self) -> SubstreamProtocol { - SubstreamProtocol::new(DeniedUpgrade) - } - - fn inject_fully_negotiated_inbound( - &mut self, - proto: >::Output - ) { - // We should never reach here. `proto` is a `Void`. - void::unreachable(proto) - } - - fn inject_fully_negotiated_outbound( - &mut self, - (handshake_msg, substream): >::Output, - _: () - ) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Opening { initial_message } => { - let ev = NotifsOutHandlerOut::Open { handshake: handshake_msg }; - self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); - self.state = State::Open { substream, initial_message }; - }, - // If the handler was disabled while we were negotiating the protocol, immediately - // close it. - State::DisabledOpening => self.state = State::DisabledOpen(substream), - - // Any other situation should never happen. - State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => - error!("☎️ State mismatch in notifications handler: substream already open"), - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn inject_event(&mut self, message: NotifsOutHandlerIn) { - match message { - NotifsOutHandlerIn::Enable { initial_message } => { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => { - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), - info: (), - }); - self.state = State::Opening { initial_message }; - }, - State::DisabledOpening => self.state = State::Opening { initial_message }, - State::DisabledOpen(mut sub) => { - // As documented above, in this state we have already called `poll_close` - // once on the substream, and it is unclear whether the substream can then - // be recovered. When in doubt, let's drop the existing substream and - // open a new one. - if sub.close().now_or_never().is_none() { - warn!( - target: "sub-libp2p", - "📞 Improperly closed outbound notifications substream" - ); - } - - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message.clone()); - self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), - info: (), - }); - self.state = State::Opening { initial_message }; - }, - st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => { - debug!(target: "sub-libp2p", + type InEvent = NotifsOutHandlerIn; + type OutEvent = NotifsOutHandlerOut; + type Error = void::Void; + type InboundProtocol = DeniedUpgrade; + type OutboundProtocol = NotificationsOut; + type OutboundOpenInfo = (); + + fn listen_protocol(&self) -> SubstreamProtocol { + SubstreamProtocol::new(DeniedUpgrade) + } + + fn inject_fully_negotiated_inbound( + &mut self, + proto: >::Output, + ) { + // We should never reach here. `proto` is a `Void`. + void::unreachable(proto) + } + + fn inject_fully_negotiated_outbound( + &mut self, + (handshake_msg, substream): >::Output, + _: (), + ) { + match mem::replace(&mut self.state, State::Poisoned) { + State::Opening { initial_message } => { + let ev = NotifsOutHandlerOut::Open { + handshake: handshake_msg, + }; + self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); + self.state = State::Open { + substream, + initial_message, + }; + } + // If the handler was disabled while we were negotiating the protocol, immediately + // close it. + State::DisabledOpening => self.state = State::DisabledOpen(substream), + + // Any other situation should never happen. + State::Disabled | State::Refused | State::Open { .. } | State::DisabledOpen(_) => { + error!("☎️ State mismatch in notifications handler: substream already open") + } + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), + } + } + + fn inject_event(&mut self, message: NotifsOutHandlerIn) { + match message { + NotifsOutHandlerIn::Enable { initial_message } => { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled => { + let proto = NotificationsOut::new( + self.protocol_name.clone(), + initial_message.clone(), + ); + self.events_queue + .push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), + info: (), + }); + self.state = State::Opening { initial_message }; + } + State::DisabledOpening => self.state = State::Opening { initial_message }, + State::DisabledOpen(mut sub) => { + // As documented above, in this state we have already called `poll_close` + // once on the substream, and it is unclear whether the substream can then + // be recovered. When in doubt, let's drop the existing substream and + // open a new one. + if sub.close().now_or_never().is_none() { + warn!( + target: "sub-libp2p", + "📞 Improperly closed outbound notifications substream" + ); + } + + let proto = NotificationsOut::new( + self.protocol_name.clone(), + initial_message.clone(), + ); + self.events_queue + .push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), + info: (), + }); + self.state = State::Opening { initial_message }; + } + st @ State::Opening { .. } | st @ State::Refused | st @ State::Open { .. } => { + debug!(target: "sub-libp2p", "Tried to enable notifications handler that was already enabled"); - self.state = st; - } - State::Poisoned => error!("Notifications handler in a poisoned state"), - } - } - - NotifsOutHandlerIn::Disable => { - match mem::replace(&mut self.state, State::Poisoned) { - st @ State::Disabled | st @ State::DisabledOpen(_) | st @ State::DisabledOpening => { - debug!(target: "sub-libp2p", + self.state = st; + } + State::Poisoned => error!("Notifications handler in a poisoned state"), + } + } + + NotifsOutHandlerIn::Disable => match mem::replace(&mut self.state, State::Poisoned) { + st @ State::Disabled + | st @ State::DisabledOpen(_) + | st @ State::DisabledOpening => { + debug!(target: "sub-libp2p", "Tried to disable notifications handler that was already disabled"); - self.state = st; - } - State::Opening { .. } => self.state = State::DisabledOpening, - State::Refused => self.state = State::Disabled, - State::Open { substream, .. } => self.state = State::DisabledOpen(substream), - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - NotifsOutHandlerIn::Send(msg) => - if let State::Open { substream, .. } = &mut self.state { - if substream.push_message(msg).is_err() { - warn!( - target: "sub-libp2p", - "📞 Notifications queue with peer {} is full, dropped message (protocol: {:?})", - self.peer_id, - self.protocol_name, - ); - } - if let Some(metric) = &self.queue_size_report { - metric.observe(substream.queue_len() as f64); - } - } else { - // This is an API misuse. - warn!( - target: "sub-libp2p", - "📞 Tried to send a notification on a disabled handler" - ); - }, - } - } - - fn inject_dial_upgrade_error(&mut self, _: (), _: ProtocolsHandlerUpgrErr) { - match mem::replace(&mut self.state, State::Poisoned) { - State::Disabled => {}, - State::DisabledOpen(_) | State::Refused | State::Open { .. } => - error!("☎️ State mismatch in NotificationsOut"), - State::Opening { .. } => { - self.state = State::Refused; - let ev = NotifsOutHandlerOut::Refused; - self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); - }, - State::DisabledOpening => self.state = State::Disabled, - State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), - } - } - - fn connection_keep_alive(&self) -> KeepAlive { - match self.state { - // We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the - // connection open no matter what, in order to avoid closing and reopening - // connections all the time. - State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => - KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME), - State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, - State::Refused | State::Poisoned => KeepAlive::No, - } - } - - fn poll( - &mut self, - cx: &mut Context, - ) -> Poll> { - // Flush the events queue if necessary. - if !self.events_queue.is_empty() { - let event = self.events_queue.remove(0); - return Poll::Ready(event); - } - - match &mut self.state { - State::Open { substream, initial_message } => - match Sink::poll_flush(Pin::new(substream), cx) { - Poll::Pending | Poll::Ready(Ok(())) => {}, - Poll::Ready(Err(_)) => { - // We try to re-open a substream. - let initial_message = mem::replace(initial_message, Vec::new()); - self.state = State::Opening { initial_message: initial_message.clone() }; - let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); - self.events_queue.push(ProtocolsHandlerEvent::OutboundSubstreamRequest { - protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), - info: (), - }); - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - } - }, - - State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) { - Poll::Pending => {}, - Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => { - self.state = State::Disabled; - return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); - }, - }, - - _ => {} - } - - Poll::Pending - } + self.state = st; + } + State::Opening { .. } => self.state = State::DisabledOpening, + State::Refused => self.state = State::Disabled, + State::Open { substream, .. } => self.state = State::DisabledOpen(substream), + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), + }, + + NotifsOutHandlerIn::Send(msg) => { + if let State::Open { substream, .. } = &mut self.state { + if substream.push_message(msg).is_err() { + warn!( + target: "sub-libp2p", + "📞 Notifications queue with peer {} is full, dropped message (protocol: {:?})", + self.peer_id, + self.protocol_name, + ); + } + if let Some(metric) = &self.queue_size_report { + metric.observe(substream.queue_len() as f64); + } + } else { + // This is an API misuse. + warn!( + target: "sub-libp2p", + "📞 Tried to send a notification on a disabled handler" + ); + } + } + } + } + + fn inject_dial_upgrade_error( + &mut self, + _: (), + _: ProtocolsHandlerUpgrErr, + ) { + match mem::replace(&mut self.state, State::Poisoned) { + State::Disabled => {} + State::DisabledOpen(_) | State::Refused | State::Open { .. } => { + error!("☎️ State mismatch in NotificationsOut") + } + State::Opening { .. } => { + self.state = State::Refused; + let ev = NotifsOutHandlerOut::Refused; + self.events_queue.push(ProtocolsHandlerEvent::Custom(ev)); + } + State::DisabledOpening => self.state = State::Disabled, + State::Poisoned => error!("☎️ Notifications handler in a poisoned state"), + } + } + + fn connection_keep_alive(&self) -> KeepAlive { + match self.state { + // We have a small grace period of `INITIAL_KEEPALIVE_TIME` during which we keep the + // connection open no matter what, in order to avoid closing and reopening + // connections all the time. + State::Disabled | State::DisabledOpen(_) | State::DisabledOpening => { + KeepAlive::Until(self.when_connection_open + INITIAL_KEEPALIVE_TIME) + } + State::Opening { .. } | State::Open { .. } => KeepAlive::Yes, + State::Refused | State::Poisoned => KeepAlive::No, + } + } + + fn poll( + &mut self, + cx: &mut Context, + ) -> Poll< + ProtocolsHandlerEvent< + Self::OutboundProtocol, + Self::OutboundOpenInfo, + Self::OutEvent, + Self::Error, + >, + > { + // Flush the events queue if necessary. + if !self.events_queue.is_empty() { + let event = self.events_queue.remove(0); + return Poll::Ready(event); + } + + match &mut self.state { + State::Open { + substream, + initial_message, + } => match Sink::poll_flush(Pin::new(substream), cx) { + Poll::Pending | Poll::Ready(Ok(())) => {} + Poll::Ready(Err(_)) => { + // We try to re-open a substream. + let initial_message = mem::replace(initial_message, Vec::new()); + self.state = State::Opening { + initial_message: initial_message.clone(), + }; + let proto = NotificationsOut::new(self.protocol_name.clone(), initial_message); + self.events_queue + .push(ProtocolsHandlerEvent::OutboundSubstreamRequest { + protocol: SubstreamProtocol::new(proto).with_timeout(OPEN_TIMEOUT), + info: (), + }); + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); + } + }, + + State::DisabledOpen(sub) => match Sink::poll_close(Pin::new(sub), cx) { + Poll::Pending => {} + Poll::Ready(Ok(())) | Poll::Ready(Err(_)) => { + self.state = State::Disabled; + return Poll::Ready(ProtocolsHandlerEvent::Custom(NotifsOutHandlerOut::Closed)); + } + }, + + _ => {} + } + + Poll::Pending + } } impl fmt::Debug for NotifsOutHandler { - fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { - f.debug_struct("NotifsOutHandler") - .field("open", &self.is_open()) - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { + f.debug_struct("NotifsOutHandler") + .field("open", &self.is_open()) + .finish() + } } diff --git a/client/network/src/protocol/generic_proto/tests.rs b/client/network/src/protocol/generic_proto/tests.rs index 1bc6e745f8..12acc15f52 100644 --- a/client/network/src/protocol/generic_proto/tests.rs +++ b/client/network/src/protocol/generic_proto/tests.rs @@ -16,162 +16,181 @@ #![cfg(test)] +use crate::protocol::generic_proto::{GenericProto, GenericProtoOut}; +use crate::protocol::message::{generic::BlockResponse, Message}; +use codec::{Decode, Encode}; use futures::{prelude::*, ready}; -use codec::{Encode, Decode}; use libp2p::core::connection::{ConnectionId, ListenerId}; use libp2p::core::ConnectedPoint; -use libp2p::swarm::{Swarm, ProtocolsHandler, IntoProtocolsHandler}; -use libp2p::swarm::{PollParameters, NetworkBehaviour, NetworkBehaviourAction}; -use libp2p::{PeerId, Multiaddr, Transport}; +use libp2p::swarm::{IntoProtocolsHandler, ProtocolsHandler, Swarm}; +use libp2p::swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}; +use libp2p::{Multiaddr, PeerId, Transport}; use rand::seq::SliceRandom; -use std::{error, io, task::Context, task::Poll, time::Duration}; -use std::collections::HashSet; -use crate::protocol::message::{generic::BlockResponse, Message}; -use crate::protocol::generic_proto::{GenericProto, GenericProtoOut}; use sp_test_primitives::Block; +use std::collections::HashSet; +use std::{error, io, task::Context, task::Poll, time::Duration}; /// Builds two nodes that have each other as bootstrap nodes. /// This is to be used only for testing, and a panic will happen if something goes wrong. fn build_nodes() -> (Swarm, Swarm) { - let mut out = Vec::with_capacity(2); - - let keypairs: Vec<_> = (0..2).map(|_| libp2p::identity::Keypair::generate_ed25519()).collect(); - let addrs: Vec = (0..2) - .map(|_| format!("/memory/{}", rand::random::()).parse().unwrap()) - .collect(); - - for index in 0 .. 2 { - let keypair = keypairs[index].clone(); - let transport = libp2p::core::transport::MemoryTransport - .and_then(move |out, endpoint| { - let secio = libp2p::secio::SecioConfig::new(keypair); - libp2p::core::upgrade::apply( - out, - secio, - endpoint, - libp2p::core::upgrade::Version::V1 - ) - }) - .and_then(move |(peer_id, stream), endpoint| { - libp2p::core::upgrade::apply( - stream, - libp2p::yamux::Config::default(), - endpoint, - libp2p::core::upgrade::Version::V1 - ) - .map_ok(|muxer| (peer_id, libp2p::core::muxing::StreamMuxerBox::new(muxer))) - }) - .timeout(Duration::from_secs(20)) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .boxed(); - - let (peerset, _) = sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: if index == 0 { - keypairs - .iter() - .skip(1) - .map(|keypair| keypair.public().into_peer_id()) - .collect() - } else { - vec![] - }, - reserved_only: false, - priority_groups: Vec::new(), - }); - - let behaviour = CustomProtoWithAddr { - inner: GenericProto::new(&b"test"[..], &[1], peerset, None), - addrs: addrs - .iter() - .enumerate() - .filter_map(|(n, a)| if n != index { - Some((keypairs[n].public().into_peer_id(), a.clone())) - } else { - None - }) - .collect(), - }; - - let mut swarm = Swarm::new( - transport, - behaviour, - keypairs[index].public().into_peer_id() - ); - Swarm::listen_on(&mut swarm, addrs[index].clone()).unwrap(); - out.push(swarm); - } - - // Final output - let mut out_iter = out.into_iter(); - let first = out_iter.next().unwrap(); - let second = out_iter.next().unwrap(); - (first, second) + let mut out = Vec::with_capacity(2); + + let keypairs: Vec<_> = (0..2) + .map(|_| libp2p::identity::Keypair::generate_ed25519()) + .collect(); + let addrs: Vec = (0..2) + .map(|_| { + format!("/memory/{}", rand::random::()) + .parse() + .unwrap() + }) + .collect(); + + for index in 0..2 { + let keypair = keypairs[index].clone(); + let transport = libp2p::core::transport::MemoryTransport + .and_then(move |out, endpoint| { + let secio = libp2p::secio::SecioConfig::new(keypair); + libp2p::core::upgrade::apply( + out, + secio, + endpoint, + libp2p::core::upgrade::Version::V1, + ) + }) + .and_then(move |(peer_id, stream), endpoint| { + libp2p::core::upgrade::apply( + stream, + libp2p::yamux::Config::default(), + endpoint, + libp2p::core::upgrade::Version::V1, + ) + .map_ok(|muxer| (peer_id, libp2p::core::muxing::StreamMuxerBox::new(muxer))) + }) + .timeout(Duration::from_secs(20)) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .boxed(); + + let (peerset, _) = sc_peerset::Peerset::from_config(sc_peerset::PeersetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: if index == 0 { + keypairs + .iter() + .skip(1) + .map(|keypair| keypair.public().into_peer_id()) + .collect() + } else { + vec![] + }, + reserved_only: false, + priority_groups: Vec::new(), + }); + + let behaviour = CustomProtoWithAddr { + inner: GenericProto::new(&b"test"[..], &[1], peerset, None), + addrs: addrs + .iter() + .enumerate() + .filter_map(|(n, a)| { + if n != index { + Some((keypairs[n].public().into_peer_id(), a.clone())) + } else { + None + } + }) + .collect(), + }; + + let mut swarm = Swarm::new( + transport, + behaviour, + keypairs[index].public().into_peer_id(), + ); + Swarm::listen_on(&mut swarm, addrs[index].clone()).unwrap(); + out.push(swarm); + } + + // Final output + let mut out_iter = out.into_iter(); + let first = out_iter.next().unwrap(); + let second = out_iter.next().unwrap(); + (first, second) } /// Wraps around the `CustomBehaviour` network behaviour, and adds hardcoded node addresses to it. struct CustomProtoWithAddr { - inner: GenericProto, - addrs: Vec<(PeerId, Multiaddr)>, + inner: GenericProto, + addrs: Vec<(PeerId, Multiaddr)>, } impl std::ops::Deref for CustomProtoWithAddr { - type Target = GenericProto; + type Target = GenericProto; - fn deref(&self) -> &Self::Target { - &self.inner - } + fn deref(&self) -> &Self::Target { + &self.inner + } } impl std::ops::DerefMut for CustomProtoWithAddr { - fn deref_mut(&mut self) -> &mut Self::Target { - &mut self.inner - } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.inner + } } impl NetworkBehaviour for CustomProtoWithAddr { - type ProtocolsHandler = ::ProtocolsHandler; - type OutEvent = ::OutEvent; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - self.inner.new_handler() - } - - fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { - let mut list = self.inner.addresses_of_peer(peer_id); - for (p, a) in self.addrs.iter() { - if p == peer_id { - list.push(a.clone()); - } - } - list - } - - fn inject_connected(&mut self, peer_id: &PeerId) { - self.inner.inject_connected(peer_id) - } - - fn inject_disconnected(&mut self, peer_id: &PeerId) { - self.inner.inject_disconnected(peer_id) - } - - fn inject_connection_established(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.inner.inject_connection_established(peer_id, conn, endpoint) - } - - fn inject_connection_closed(&mut self, peer_id: &PeerId, conn: &ConnectionId, endpoint: &ConnectedPoint) { - self.inner.inject_connection_closed(peer_id, conn, endpoint) - } - - fn inject_event( - &mut self, - peer_id: PeerId, - connection: ConnectionId, - event: <::Handler as ProtocolsHandler>::OutEvent - ) { - self.inner.inject_event(peer_id, connection, event) - } + type ProtocolsHandler = ::ProtocolsHandler; + type OutEvent = ::OutEvent; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + self.inner.new_handler() + } + + fn addresses_of_peer(&mut self, peer_id: &PeerId) -> Vec { + let mut list = self.inner.addresses_of_peer(peer_id); + for (p, a) in self.addrs.iter() { + if p == peer_id { + list.push(a.clone()); + } + } + list + } + + fn inject_connected(&mut self, peer_id: &PeerId) { + self.inner.inject_connected(peer_id) + } + + fn inject_disconnected(&mut self, peer_id: &PeerId) { + self.inner.inject_disconnected(peer_id) + } + + fn inject_connection_established( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.inner + .inject_connection_established(peer_id, conn, endpoint) + } + + fn inject_connection_closed( + &mut self, + peer_id: &PeerId, + conn: &ConnectionId, + endpoint: &ConnectedPoint, + ) { + self.inner.inject_connection_closed(peer_id, conn, endpoint) + } + + fn inject_event( + &mut self, + peer_id: PeerId, + connection: ConnectionId, + event: <::Handler as ProtocolsHandler>::OutEvent, + ) { + self.inner.inject_event(peer_id, connection, event) + } fn poll( &mut self, @@ -182,256 +201,272 @@ impl NetworkBehaviour for CustomProtoWithAddr { <::Handler as ProtocolsHandler>::InEvent, Self::OutEvent > - > { - self.inner.poll(cx, params) - } - - fn inject_addr_reach_failure(&mut self, peer_id: Option<&PeerId>, addr: &Multiaddr, error: &dyn std::error::Error) { - self.inner.inject_addr_reach_failure(peer_id, addr, error) - } - - fn inject_dial_failure(&mut self, peer_id: &PeerId) { - self.inner.inject_dial_failure(peer_id) - } - - fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { - self.inner.inject_new_listen_addr(addr) - } - - fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { - self.inner.inject_expired_listen_addr(addr) - } - - fn inject_new_external_addr(&mut self, addr: &Multiaddr) { - self.inner.inject_new_external_addr(addr) - } - - fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { - self.inner.inject_listener_error(id, err); - } - - fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { - self.inner.inject_listener_closed(id, reason); - } +>{ + self.inner.poll(cx, params) + } + + fn inject_addr_reach_failure( + &mut self, + peer_id: Option<&PeerId>, + addr: &Multiaddr, + error: &dyn std::error::Error, + ) { + self.inner.inject_addr_reach_failure(peer_id, addr, error) + } + + fn inject_dial_failure(&mut self, peer_id: &PeerId) { + self.inner.inject_dial_failure(peer_id) + } + + fn inject_new_listen_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_new_listen_addr(addr) + } + + fn inject_expired_listen_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_expired_listen_addr(addr) + } + + fn inject_new_external_addr(&mut self, addr: &Multiaddr) { + self.inner.inject_new_external_addr(addr) + } + + fn inject_listener_error(&mut self, id: ListenerId, err: &(dyn error::Error + 'static)) { + self.inner.inject_listener_error(id, err); + } + + fn inject_listener_closed(&mut self, id: ListenerId, reason: Result<(), &io::Error>) { + self.inner.inject_listener_closed(id, reason); + } } #[test] fn two_nodes_transfer_lots_of_packets() { - // We spawn two nodes, then make the first one send lots of packets to the second one. The test - // ends when the second one has received all of them. - - // Note that if we go too high, we will reach the limit to the number of simultaneous - // substreams allowed by the multiplexer. - const NUM_PACKETS: u32 = 5000; - - let (mut service1, mut service2) = build_nodes(); - - let fut1 = future::poll_fn(move |cx| -> Poll<()> { - loop { - match ready!(service1.poll_next_unpin(cx)) { - Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { - for n in 0 .. NUM_PACKETS { - service1.send_packet( - &peer_id, - Message::::BlockResponse(BlockResponse { - id: n as _, - blocks: Vec::new(), - }).encode() - ); - } - }, - _ => panic!(), - } - } - }); - - let mut packet_counter = 0u32; - let fut2 = future::poll_fn(move |cx| { - loop { - match ready!(service2.poll_next_unpin(cx)) { - Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, - Some(GenericProtoOut::LegacyMessage { message, .. }) => { - match Message::::decode(&mut &message[..]).unwrap() { - Message::::BlockResponse(BlockResponse { id: _, blocks }) => { - assert!(blocks.is_empty()); - packet_counter += 1; - if packet_counter == NUM_PACKETS { - return Poll::Ready(()) - } - }, - _ => panic!(), - } - } - _ => panic!(), - } - } - }); - - futures::executor::block_on(async move { - future::select(fut1, fut2).await; - }); + // We spawn two nodes, then make the first one send lots of packets to the second one. The test + // ends when the second one has received all of them. + + // Note that if we go too high, we will reach the limit to the number of simultaneous + // substreams allowed by the multiplexer. + const NUM_PACKETS: u32 = 5000; + + let (mut service1, mut service2) = build_nodes(); + + let fut1 = future::poll_fn(move |cx| -> Poll<()> { + loop { + match ready!(service1.poll_next_unpin(cx)) { + Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { + for n in 0..NUM_PACKETS { + service1.send_packet( + &peer_id, + Message::::BlockResponse(BlockResponse { + id: n as _, + blocks: Vec::new(), + }) + .encode(), + ); + } + } + _ => panic!(), + } + } + }); + + let mut packet_counter = 0u32; + let fut2 = future::poll_fn(move |cx| loop { + match ready!(service2.poll_next_unpin(cx)) { + Some(GenericProtoOut::CustomProtocolOpen { .. }) => {} + Some(GenericProtoOut::LegacyMessage { message, .. }) => { + match Message::::decode(&mut &message[..]).unwrap() { + Message::::BlockResponse(BlockResponse { id: _, blocks }) => { + assert!(blocks.is_empty()); + packet_counter += 1; + if packet_counter == NUM_PACKETS { + return Poll::Ready(()); + } + } + _ => panic!(), + } + } + _ => panic!(), + } + }); + + futures::executor::block_on(async move { + future::select(fut1, fut2).await; + }); } #[test] fn basic_two_nodes_requests_in_parallel() { - let (mut service1, mut service2) = build_nodes(); - - // Generate random messages with or without a request id. - let mut to_send = { - let mut to_send = Vec::new(); - let mut existing_ids = HashSet::new(); - for _ in 0..200 { // Note: don't make that number too high or the CPU usage will explode. - let req_id = loop { - let req_id = rand::random::(); - - // ensure uniqueness - odds of randomly sampling collisions - // is unlikely, but possible to cause spurious test failures. - if existing_ids.insert(req_id) { - break req_id; - } - }; - - to_send.push(Message::::BlockResponse( - BlockResponse { id: req_id, blocks: Vec::new() } - )); - } - to_send - }; - - // Clone `to_send` in `to_receive`. Below we will remove from `to_receive` the messages we - // receive, until the list is empty. - let mut to_receive = to_send.clone(); - to_send.shuffle(&mut rand::thread_rng()); - - let fut1 = future::poll_fn(move |cx| -> Poll<()> { - loop { - match ready!(service1.poll_next_unpin(cx)) { - Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { - for msg in to_send.drain(..) { - service1.send_packet(&peer_id, msg.encode()); - } - }, - _ => panic!(), - } - } - }); - - let fut2 = future::poll_fn(move |cx| { - loop { - match ready!(service2.poll_next_unpin(cx)) { - Some(GenericProtoOut::CustomProtocolOpen { .. }) => {}, - Some(GenericProtoOut::LegacyMessage { message, .. }) => { - let pos = to_receive.iter().position(|m| m.encode() == message).unwrap(); - to_receive.remove(pos); - if to_receive.is_empty() { - return Poll::Ready(()) - } - } - _ => panic!(), - } - } - }); - - futures::executor::block_on(async move { - future::select(fut1, fut2).await; - }); + let (mut service1, mut service2) = build_nodes(); + + // Generate random messages with or without a request id. + let mut to_send = { + let mut to_send = Vec::new(); + let mut existing_ids = HashSet::new(); + for _ in 0..200 { + // Note: don't make that number too high or the CPU usage will explode. + let req_id = loop { + let req_id = rand::random::(); + + // ensure uniqueness - odds of randomly sampling collisions + // is unlikely, but possible to cause spurious test failures. + if existing_ids.insert(req_id) { + break req_id; + } + }; + + to_send.push(Message::::BlockResponse(BlockResponse { + id: req_id, + blocks: Vec::new(), + })); + } + to_send + }; + + // Clone `to_send` in `to_receive`. Below we will remove from `to_receive` the messages we + // receive, until the list is empty. + let mut to_receive = to_send.clone(); + to_send.shuffle(&mut rand::thread_rng()); + + let fut1 = future::poll_fn(move |cx| -> Poll<()> { + loop { + match ready!(service1.poll_next_unpin(cx)) { + Some(GenericProtoOut::CustomProtocolOpen { peer_id, .. }) => { + for msg in to_send.drain(..) { + service1.send_packet(&peer_id, msg.encode()); + } + } + _ => panic!(), + } + } + }); + + let fut2 = future::poll_fn(move |cx| loop { + match ready!(service2.poll_next_unpin(cx)) { + Some(GenericProtoOut::CustomProtocolOpen { .. }) => {} + Some(GenericProtoOut::LegacyMessage { message, .. }) => { + let pos = to_receive + .iter() + .position(|m| m.encode() == message) + .unwrap(); + to_receive.remove(pos); + if to_receive.is_empty() { + return Poll::Ready(()); + } + } + _ => panic!(), + } + }); + + futures::executor::block_on(async move { + future::select(fut1, fut2).await; + }); } #[test] fn reconnect_after_disconnect() { - // We connect two nodes together, then force a disconnect (through the API of the `Service`), - // check that the disconnect worked, and finally check whether they successfully reconnect. - - let (mut service1, mut service2) = build_nodes(); - - // For this test, the services can be in the following states. - #[derive(Debug, Copy, Clone, PartialEq, Eq)] - enum ServiceState { NotConnected, FirstConnec, Disconnected, ConnectedAgain } - let mut service1_state = ServiceState::NotConnected; - let mut service2_state = ServiceState::NotConnected; - - futures::executor::block_on(async move { - loop { - // Grab next event from services. - let event = { - let s1 = service1.next(); - let s2 = service2.next(); - futures::pin_mut!(s1, s2); - match future::select(s1, s2).await { - future::Either::Left((ev, _)) => future::Either::Left(ev), - future::Either::Right((ev, _)) => future::Either::Right(ev), - } - }; - - match event { - future::Either::Left(GenericProtoOut::CustomProtocolOpen { .. }) => { - match service1_state { - ServiceState::NotConnected => { - service1_state = ServiceState::FirstConnec; - if service2_state == ServiceState::FirstConnec { - service1.disconnect_peer(Swarm::local_peer_id(&service2)); - } - }, - ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - } - }, - future::Either::Left(GenericProtoOut::CustomProtocolClosed { .. }) => { - match service1_state { - ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain| ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - } - }, - future::Either::Right(GenericProtoOut::CustomProtocolOpen { .. }) => { - match service2_state { - ServiceState::NotConnected => { - service2_state = ServiceState::FirstConnec; - if service1_state == ServiceState::FirstConnec { - service1.disconnect_peer(Swarm::local_peer_id(&service2)); - } - }, - ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, - ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), - } - }, - future::Either::Right(GenericProtoOut::CustomProtocolClosed { .. }) => { - match service2_state { - ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, - ServiceState::ConnectedAgain| ServiceState::NotConnected | - ServiceState::Disconnected => panic!(), - } - }, - _ => {} - } - - if service1_state == ServiceState::ConnectedAgain && service2_state == ServiceState::ConnectedAgain { - break; - } - } - - // Now that the two services have disconnected and reconnected, wait for 3 seconds and - // check whether they're still connected. - let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); - - loop { - // Grab next event from services. - let event = { - let s1 = service1.next(); - let s2 = service2.next(); - futures::pin_mut!(s1, s2); - match future::select(future::select(s1, s2), &mut delay).await { - future::Either::Right(_) => break, // success - future::Either::Left((future::Either::Left((ev, _)), _)) => ev, - future::Either::Left((future::Either::Right((ev, _)), _)) => ev, - } - }; - - match event { - GenericProtoOut::CustomProtocolOpen { .. } | - GenericProtoOut::CustomProtocolClosed { .. } => panic!(), - _ => {} - } - } - }); + // We connect two nodes together, then force a disconnect (through the API of the `Service`), + // check that the disconnect worked, and finally check whether they successfully reconnect. + + let (mut service1, mut service2) = build_nodes(); + + // For this test, the services can be in the following states. + #[derive(Debug, Copy, Clone, PartialEq, Eq)] + enum ServiceState { + NotConnected, + FirstConnec, + Disconnected, + ConnectedAgain, + } + let mut service1_state = ServiceState::NotConnected; + let mut service2_state = ServiceState::NotConnected; + + futures::executor::block_on(async move { + loop { + // Grab next event from services. + let event = { + let s1 = service1.next(); + let s2 = service2.next(); + futures::pin_mut!(s1, s2); + match future::select(s1, s2).await { + future::Either::Left((ev, _)) => future::Either::Left(ev), + future::Either::Right((ev, _)) => future::Either::Right(ev), + } + }; + + match event { + future::Either::Left(GenericProtoOut::CustomProtocolOpen { .. }) => { + match service1_state { + ServiceState::NotConnected => { + service1_state = ServiceState::FirstConnec; + if service2_state == ServiceState::FirstConnec { + service1.disconnect_peer(Swarm::local_peer_id(&service2)); + } + } + ServiceState::Disconnected => service1_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + } + } + future::Either::Left(GenericProtoOut::CustomProtocolClosed { .. }) => { + match service1_state { + ServiceState::FirstConnec => service1_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain + | ServiceState::NotConnected + | ServiceState::Disconnected => panic!(), + } + } + future::Either::Right(GenericProtoOut::CustomProtocolOpen { .. }) => { + match service2_state { + ServiceState::NotConnected => { + service2_state = ServiceState::FirstConnec; + if service1_state == ServiceState::FirstConnec { + service1.disconnect_peer(Swarm::local_peer_id(&service2)); + } + } + ServiceState::Disconnected => service2_state = ServiceState::ConnectedAgain, + ServiceState::FirstConnec | ServiceState::ConnectedAgain => panic!(), + } + } + future::Either::Right(GenericProtoOut::CustomProtocolClosed { .. }) => { + match service2_state { + ServiceState::FirstConnec => service2_state = ServiceState::Disconnected, + ServiceState::ConnectedAgain + | ServiceState::NotConnected + | ServiceState::Disconnected => panic!(), + } + } + _ => {} + } + + if service1_state == ServiceState::ConnectedAgain + && service2_state == ServiceState::ConnectedAgain + { + break; + } + } + + // Now that the two services have disconnected and reconnected, wait for 3 seconds and + // check whether they're still connected. + let mut delay = futures_timer::Delay::new(Duration::from_secs(3)); + + loop { + // Grab next event from services. + let event = { + let s1 = service1.next(); + let s2 = service2.next(); + futures::pin_mut!(s1, s2); + match future::select(future::select(s1, s2), &mut delay).await { + future::Either::Right(_) => break, // success + future::Either::Left((future::Either::Left((ev, _)), _)) => ev, + future::Either::Left((future::Either::Right((ev, _)), _)) => ev, + } + }; + + match event { + GenericProtoOut::CustomProtocolOpen { .. } + | GenericProtoOut::CustomProtocolClosed { .. } => panic!(), + _ => {} + } + } + }); } diff --git a/client/network/src/protocol/generic_proto/upgrade.rs b/client/network/src/protocol/generic_proto/upgrade.rs index 36f8263365..66a0f02314 100644 --- a/client/network/src/protocol/generic_proto/upgrade.rs +++ b/client/network/src/protocol/generic_proto/upgrade.rs @@ -16,18 +16,12 @@ pub use self::collec::UpgradeCollec; pub use self::legacy::{ - RegisteredProtocol, - RegisteredProtocolEvent, - RegisteredProtocolName, - RegisteredProtocolSubstream + RegisteredProtocol, RegisteredProtocolEvent, RegisteredProtocolName, + RegisteredProtocolSubstream, }; pub use self::notifications::{ - NotificationsIn, - NotificationsInSubstream, - NotificationsOut, - NotificationsOutSubstream, - NotificationsHandshakeError, - NotificationsOutError, + NotificationsHandshakeError, NotificationsIn, NotificationsInSubstream, NotificationsOut, + NotificationsOutError, NotificationsOutSubstream, }; mod collec; diff --git a/client/network/src/protocol/generic_proto/upgrade/collec.rs b/client/network/src/protocol/generic_proto/upgrade/collec.rs index f8d1999749..c2b5f2ae24 100644 --- a/client/network/src/protocol/generic_proto/upgrade/collec.rs +++ b/client/network/src/protocol/generic_proto/upgrade/collec.rs @@ -20,7 +20,12 @@ use futures::prelude::*; use libp2p::core::upgrade::{InboundUpgrade, ProtocolName, UpgradeInfo}; -use std::{iter::FromIterator, pin::Pin, task::{Context, Poll}, vec}; +use std::{ + iter::FromIterator, + pin::Pin, + task::{Context, Poll}, + vec, +}; // TODO: move this to libp2p => https://github.com/libp2p/rust-libp2p/issues/1445 @@ -30,42 +35,47 @@ use std::{iter::FromIterator, pin::Pin, task::{Context, Poll}, vec}; pub struct UpgradeCollec(pub Vec); impl From> for UpgradeCollec { - fn from(list: Vec) -> Self { - UpgradeCollec(list) - } + fn from(list: Vec) -> Self { + UpgradeCollec(list) + } } impl FromIterator for UpgradeCollec { - fn from_iter>(iter: I) -> Self { - UpgradeCollec(iter.into_iter().collect()) - } + fn from_iter>(iter: I) -> Self { + UpgradeCollec(iter.into_iter().collect()) + } } impl UpgradeInfo for UpgradeCollec { - type Info = ProtoNameWithUsize; - type InfoIter = vec::IntoIter; + type Info = ProtoNameWithUsize; + type InfoIter = vec::IntoIter; - fn protocol_info(&self) -> Self::InfoIter { - self.0.iter().enumerate() - .flat_map(|(n, p)| - p.protocol_info().into_iter().map(move |i| ProtoNameWithUsize(i, n))) - .collect::>() - .into_iter() - } + fn protocol_info(&self) -> Self::InfoIter { + self.0 + .iter() + .enumerate() + .flat_map(|(n, p)| { + p.protocol_info() + .into_iter() + .map(move |i| ProtoNameWithUsize(i, n)) + }) + .collect::>() + .into_iter() + } } impl InboundUpgrade for UpgradeCollec where - T: InboundUpgrade, + T: InboundUpgrade, { - type Output = (T::Output, usize); - type Error = (T::Error, usize); - type Future = FutWithUsize; + type Output = (T::Output, usize); + type Error = (T::Error, usize); + type Future = FutWithUsize; - fn upgrade_inbound(mut self, sock: C, info: Self::Info) -> Self::Future { - let fut = self.0.remove(info.1).upgrade_inbound(sock, info.0); - FutWithUsize(fut, info.1) - } + fn upgrade_inbound(mut self, sock: C, info: Self::Info) -> Self::Future { + let fut = self.0.remove(info.1).upgrade_inbound(sock, info.0); + FutWithUsize(fut, info.1) + } } /// Groups a `ProtocolName` with a `usize`. @@ -73,9 +83,9 @@ where pub struct ProtoNameWithUsize(T, usize); impl ProtocolName for ProtoNameWithUsize { - fn protocol_name(&self) -> &[u8] { - self.0.protocol_name() - } + fn protocol_name(&self) -> &[u8] { + self.0.protocol_name() + } } /// Equivalent to `fut.map_ok(|v| (v, num)).map_err(|e| (e, num))`, where `fut` and `num` are @@ -84,14 +94,14 @@ impl ProtocolName for ProtoNameWithUsize { pub struct FutWithUsize(#[pin] T, usize); impl>, O, E> Future for FutWithUsize { - type Output = Result<(O, usize), (E, usize)>; + type Output = Result<(O, usize), (E, usize)>; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = self.project(); - match Future::poll(this.0, cx) { - Poll::Ready(Ok(v)) => Poll::Ready(Ok((v, *this.1))), - Poll::Ready(Err(e)) => Poll::Ready(Err((e, *this.1))), - Poll::Pending => Poll::Pending, - } - } + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = self.project(); + match Future::poll(this.0, cx) { + Poll::Ready(Ok(v)) => Poll::Ready(Ok((v, *this.1))), + Poll::Ready(Err(e)) => Poll::Ready(Err((e, *this.1))), + Poll::Pending => Poll::Pending, + } + } } diff --git a/client/network/src/protocol/generic_proto/upgrade/legacy.rs b/client/network/src/protocol/generic_proto/upgrade/legacy.rs index 311e0b04f9..efb4e8c965 100644 --- a/client/network/src/protocol/generic_proto/upgrade/legacy.rs +++ b/client/network/src/protocol/generic_proto/upgrade/legacy.rs @@ -18,9 +18,9 @@ use crate::config::ProtocolId; use bytes::BytesMut; use futures::prelude::*; use futures_codec::Framed; -use libp2p::core::{Endpoint, UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade::ProtocolName}; -use std::{collections::VecDeque, io, pin::Pin, vec::IntoIter as VecIntoIter}; +use libp2p::core::{upgrade::ProtocolName, Endpoint, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use std::task::{Context, Poll}; +use std::{collections::VecDeque, io, pin::Pin, vec::IntoIter as VecIntoIter}; use unsigned_varint::codec::UviBytes; /// Connection upgrade for a single protocol. @@ -28,271 +28,266 @@ use unsigned_varint::codec::UviBytes; /// Note that "a single protocol" here refers to `par` for example. However /// each protocol can have multiple different versions for networking purposes. pub struct RegisteredProtocol { - /// Id of the protocol for API purposes. - id: ProtocolId, - /// Base name of the protocol as advertised on the network. - /// Ends with `/` so that we can append a version number behind. - base_name: Vec, - /// List of protocol versions that we support. - /// Ordered in descending order so that the best comes first. - supported_versions: Vec, + /// Id of the protocol for API purposes. + id: ProtocolId, + /// Base name of the protocol as advertised on the network. + /// Ends with `/` so that we can append a version number behind. + base_name: Vec, + /// List of protocol versions that we support. + /// Ordered in descending order so that the best comes first. + supported_versions: Vec, } impl RegisteredProtocol { - /// Creates a new `RegisteredProtocol`. The `custom_data` parameter will be - /// passed inside the `RegisteredProtocolOutput`. - pub fn new(protocol: impl Into, versions: &[u8]) - -> Self { - let protocol = protocol.into(); - let mut base_name = b"/substrate/".to_vec(); - base_name.extend_from_slice(protocol.as_bytes()); - base_name.extend_from_slice(b"/"); - - RegisteredProtocol { - base_name, - id: protocol, - supported_versions: { - let mut tmp = versions.to_vec(); - tmp.sort_unstable_by(|a, b| b.cmp(&a)); - tmp - }, - } - } + /// Creates a new `RegisteredProtocol`. The `custom_data` parameter will be + /// passed inside the `RegisteredProtocolOutput`. + pub fn new(protocol: impl Into, versions: &[u8]) -> Self { + let protocol = protocol.into(); + let mut base_name = b"/substrate/".to_vec(); + base_name.extend_from_slice(protocol.as_bytes()); + base_name.extend_from_slice(b"/"); + + RegisteredProtocol { + base_name, + id: protocol, + supported_versions: { + let mut tmp = versions.to_vec(); + tmp.sort_unstable_by(|a, b| b.cmp(&a)); + tmp + }, + } + } } impl Clone for RegisteredProtocol { - fn clone(&self) -> Self { - RegisteredProtocol { - id: self.id.clone(), - base_name: self.base_name.clone(), - supported_versions: self.supported_versions.clone(), - } - } + fn clone(&self) -> Self { + RegisteredProtocol { + id: self.id.clone(), + base_name: self.base_name.clone(), + supported_versions: self.supported_versions.clone(), + } + } } /// Output of a `RegisteredProtocol` upgrade. pub struct RegisteredProtocolSubstream { - /// If true, we are in the process of closing the sink. - is_closing: bool, - /// Whether the local node opened this substream (dialer), or we received this substream from - /// the remote (listener). - endpoint: Endpoint, - /// Buffer of packets to send. - send_queue: VecDeque, - /// If true, we should call `poll_complete` on the inner sink. - requires_poll_flush: bool, - /// The underlying substream. - inner: stream::Fuse>>, - /// Version of the protocol that was negotiated. - protocol_version: u8, - /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one - /// unless the buffer empties then fills itself again. - clogged_fuse: bool, + /// If true, we are in the process of closing the sink. + is_closing: bool, + /// Whether the local node opened this substream (dialer), or we received this substream from + /// the remote (listener). + endpoint: Endpoint, + /// Buffer of packets to send. + send_queue: VecDeque, + /// If true, we should call `poll_complete` on the inner sink. + requires_poll_flush: bool, + /// The underlying substream. + inner: stream::Fuse>>, + /// Version of the protocol that was negotiated. + protocol_version: u8, + /// If true, we have sent a "remote is clogged" event recently and shouldn't send another one + /// unless the buffer empties then fills itself again. + clogged_fuse: bool, } impl RegisteredProtocolSubstream { - /// Returns the version of the protocol that was negotiated. - pub fn protocol_version(&self) -> u8 { - self.protocol_version - } - - /// Returns whether the local node opened this substream (dialer), or we received this - /// substream from the remote (listener). - pub fn endpoint(&self) -> Endpoint { - self.endpoint - } - - /// Starts a graceful shutdown process on this substream. - /// - /// Note that "graceful" means that we sent a closing message. We don't wait for any - /// confirmation from the remote. - /// - /// After calling this, the stream is guaranteed to finish soon-ish. - pub fn shutdown(&mut self) { - self.is_closing = true; - self.send_queue.clear(); - } - - /// Sends a message to the substream. - pub fn send_message(&mut self, data: Vec) { - if self.is_closing { - return - } - - self.send_queue.push_back(From::from(&data[..])); - } + /// Returns the version of the protocol that was negotiated. + pub fn protocol_version(&self) -> u8 { + self.protocol_version + } + + /// Returns whether the local node opened this substream (dialer), or we received this + /// substream from the remote (listener). + pub fn endpoint(&self) -> Endpoint { + self.endpoint + } + + /// Starts a graceful shutdown process on this substream. + /// + /// Note that "graceful" means that we sent a closing message. We don't wait for any + /// confirmation from the remote. + /// + /// After calling this, the stream is guaranteed to finish soon-ish. + pub fn shutdown(&mut self) { + self.is_closing = true; + self.send_queue.clear(); + } + + /// Sends a message to the substream. + pub fn send_message(&mut self, data: Vec) { + if self.is_closing { + return; + } + + self.send_queue.push_back(From::from(&data[..])); + } } /// Event produced by the `RegisteredProtocolSubstream`. #[derive(Debug, Clone)] pub enum RegisteredProtocolEvent { - /// Received a message from the remote. - Message(BytesMut), - - /// Diagnostic event indicating that the connection is clogged and we should avoid sending too - /// many messages to it. - Clogged { - /// Copy of the messages that are within the buffer, for further diagnostic. - messages: Vec>, - }, + /// Received a message from the remote. + Message(BytesMut), + + /// Diagnostic event indicating that the connection is clogged and we should avoid sending too + /// many messages to it. + Clogged { + /// Copy of the messages that are within the buffer, for further diagnostic. + messages: Vec>, + }, } impl Stream for RegisteredProtocolSubstream -where TSubstream: AsyncRead + AsyncWrite + Unpin { - type Item = Result; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - // Flushing the local queue. - while !self.send_queue.is_empty() { - match Pin::new(&mut self.inner).poll_ready(cx) { - Poll::Ready(Ok(())) => {}, - Poll::Ready(Err(err)) => return Poll::Ready(Some(Err(err))), - Poll::Pending => break, - } - - if let Some(packet) = self.send_queue.pop_front() { - Pin::new(&mut self.inner).start_send(packet)?; - self.requires_poll_flush = true; - } - } - - // If we are closing, close as soon as the Sink is closed. - if self.is_closing { - return match Pin::new(&mut self.inner).poll_close(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(Ok(_)) => Poll::Ready(None), - Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), - } - } - - // Indicating that the remote is clogged if that's the case. - if self.send_queue.len() >= 2048 { - if !self.clogged_fuse { - // Note: this fuse is important not just for preventing us from flooding the logs; - // if you remove the fuse, then we will always return early from this function and - // thus never read any message from the network. - self.clogged_fuse = true; - return Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged { - messages: self.send_queue.iter() - .map(|m| m.clone().to_vec()) - .collect(), - }))) - } - } else { - self.clogged_fuse = false; - } - - // Flushing if necessary. - if self.requires_poll_flush { - if let Poll::Ready(()) = Pin::new(&mut self.inner).poll_flush(cx)? { - self.requires_poll_flush = false; - } - } - - // Receiving incoming packets. - // Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever. - match Pin::new(&mut self.inner).poll_next(cx)? { - Poll::Ready(Some(data)) => { - Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(data)))) - } - Poll::Ready(None) => - if !self.requires_poll_flush && self.send_queue.is_empty() { - Poll::Ready(None) - } else { - Poll::Pending - } - Poll::Pending => Poll::Pending, - } - } +where + TSubstream: AsyncRead + AsyncWrite + Unpin, +{ + type Item = Result; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + // Flushing the local queue. + while !self.send_queue.is_empty() { + match Pin::new(&mut self.inner).poll_ready(cx) { + Poll::Ready(Ok(())) => {} + Poll::Ready(Err(err)) => return Poll::Ready(Some(Err(err))), + Poll::Pending => break, + } + + if let Some(packet) = self.send_queue.pop_front() { + Pin::new(&mut self.inner).start_send(packet)?; + self.requires_poll_flush = true; + } + } + + // If we are closing, close as soon as the Sink is closed. + if self.is_closing { + return match Pin::new(&mut self.inner).poll_close(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(Ok(_)) => Poll::Ready(None), + Poll::Ready(Err(err)) => Poll::Ready(Some(Err(err))), + }; + } + + // Indicating that the remote is clogged if that's the case. + if self.send_queue.len() >= 2048 { + if !self.clogged_fuse { + // Note: this fuse is important not just for preventing us from flooding the logs; + // if you remove the fuse, then we will always return early from this function and + // thus never read any message from the network. + self.clogged_fuse = true; + return Poll::Ready(Some(Ok(RegisteredProtocolEvent::Clogged { + messages: self.send_queue.iter().map(|m| m.clone().to_vec()).collect(), + }))); + } + } else { + self.clogged_fuse = false; + } + + // Flushing if necessary. + if self.requires_poll_flush { + if let Poll::Ready(()) = Pin::new(&mut self.inner).poll_flush(cx)? { + self.requires_poll_flush = false; + } + } + + // Receiving incoming packets. + // Note that `inner` is wrapped in a `Fuse`, therefore we can poll it forever. + match Pin::new(&mut self.inner).poll_next(cx)? { + Poll::Ready(Some(data)) => { + Poll::Ready(Some(Ok(RegisteredProtocolEvent::Message(data)))) + } + Poll::Ready(None) => { + if !self.requires_poll_flush && self.send_queue.is_empty() { + Poll::Ready(None) + } else { + Poll::Pending + } + } + Poll::Pending => Poll::Pending, + } + } } impl UpgradeInfo for RegisteredProtocol { - type Info = RegisteredProtocolName; - type InfoIter = VecIntoIter; - - #[inline] - fn protocol_info(&self) -> Self::InfoIter { - // Report each version as an individual protocol. - self.supported_versions.iter().map(|&version| { - let num = version.to_string(); - - let mut name = self.base_name.clone(); - name.extend_from_slice(num.as_bytes()); - RegisteredProtocolName { - name, - version, - } - }).collect::>().into_iter() - } + type Info = RegisteredProtocolName; + type InfoIter = VecIntoIter; + + #[inline] + fn protocol_info(&self) -> Self::InfoIter { + // Report each version as an individual protocol. + self.supported_versions + .iter() + .map(|&version| { + let num = version.to_string(); + + let mut name = self.base_name.clone(); + name.extend_from_slice(num.as_bytes()); + RegisteredProtocolName { name, version } + }) + .collect::>() + .into_iter() + } } /// Implementation of `ProtocolName` for a custom protocol. #[derive(Debug, Clone)] pub struct RegisteredProtocolName { - /// Protocol name, as advertised on the wire. - name: Vec, - /// Version number. Stored in string form in `name`, but duplicated here for easier retrieval. - version: u8, + /// Protocol name, as advertised on the wire. + name: Vec, + /// Version number. Stored in string form in `name`, but duplicated here for easier retrieval. + version: u8, } impl ProtocolName for RegisteredProtocolName { - fn protocol_name(&self) -> &[u8] { - &self.name - } + fn protocol_name(&self) -> &[u8] { + &self.name + } } impl InboundUpgrade for RegisteredProtocol -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { - type Output = RegisteredProtocolSubstream; - type Future = future::Ready>; - type Error = io::Error; - - fn upgrade_inbound( - self, - socket: TSubstream, - info: Self::Info, - ) -> Self::Future { - let framed = { - let mut codec = UviBytes::default(); - codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. - Framed::new(socket, codec) - }; - - future::ok(RegisteredProtocolSubstream { - is_closing: false, - endpoint: Endpoint::Listener, - send_queue: VecDeque::new(), - requires_poll_flush: false, - inner: framed.fuse(), - protocol_version: info.version, - clogged_fuse: false, - }) - } + type Output = RegisteredProtocolSubstream; + type Future = future::Ready>; + type Error = io::Error; + + fn upgrade_inbound(self, socket: TSubstream, info: Self::Info) -> Self::Future { + let framed = { + let mut codec = UviBytes::default(); + codec.set_max_len(16 * 1024 * 1024); // 16 MiB hard limit for packets. + Framed::new(socket, codec) + }; + + future::ok(RegisteredProtocolSubstream { + is_closing: false, + endpoint: Endpoint::Listener, + send_queue: VecDeque::new(), + requires_poll_flush: false, + inner: framed.fuse(), + protocol_version: info.version, + clogged_fuse: false, + }) + } } impl OutboundUpgrade for RegisteredProtocol -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { - type Output = >::Output; - type Future = >::Future; - type Error = >::Error; - - fn upgrade_outbound( - self, - socket: TSubstream, - info: Self::Info, - ) -> Self::Future { - let framed = Framed::new(socket, UviBytes::default()); - - future::ok(RegisteredProtocolSubstream { - is_closing: false, - endpoint: Endpoint::Dialer, - send_queue: VecDeque::new(), - requires_poll_flush: false, - inner: framed.fuse(), - protocol_version: info.version, - clogged_fuse: false, - }) - } + type Output = >::Output; + type Future = >::Future; + type Error = >::Error; + + fn upgrade_outbound(self, socket: TSubstream, info: Self::Info) -> Self::Future { + let framed = Framed::new(socket, UviBytes::default()); + + future::ok(RegisteredProtocolSubstream { + is_closing: false, + endpoint: Endpoint::Dialer, + send_queue: VecDeque::new(), + requires_poll_flush: false, + inner: framed.fuse(), + protocol_version: info.version, + clogged_fuse: false, + }) + } } diff --git a/client/network/src/protocol/generic_proto/upgrade/notifications.rs b/client/network/src/protocol/generic_proto/upgrade/notifications.rs index cf271016e7..39dab6abc4 100644 --- a/client/network/src/protocol/generic_proto/upgrade/notifications.rs +++ b/client/network/src/protocol/generic_proto/upgrade/notifications.rs @@ -32,13 +32,19 @@ /// Notification substreams are unidirectional. If A opens a substream with B, then B is /// encouraged but not required to open a substream to A as well. /// - use bytes::BytesMut; use futures::{prelude::*, ready}; use futures_codec::Framed; -use libp2p::core::{UpgradeInfo, InboundUpgrade, OutboundUpgrade, upgrade}; +use libp2p::core::{upgrade, InboundUpgrade, OutboundUpgrade, UpgradeInfo}; use log::error; -use std::{borrow::Cow, collections::VecDeque, convert::TryFrom as _, io, iter, mem, pin::Pin, task::{Context, Poll}}; +use std::{ + borrow::Cow, + collections::VecDeque, + convert::TryFrom as _, + io, iter, mem, + pin::Pin, + task::{Context, Poll}, +}; use unsigned_varint::codec::UviBytes; /// Maximum allowed size of the two handshake messages, in bytes. @@ -50,18 +56,18 @@ const MAX_PENDING_MESSAGES: usize = 512; /// stream of messages. #[derive(Debug, Clone)] pub struct NotificationsIn { - /// Protocol name to use when negotiating the substream. - protocol_name: Cow<'static, [u8]>, + /// Protocol name to use when negotiating the substream. + protocol_name: Cow<'static, [u8]>, } /// Upgrade that opens a substream, waits for the remote to accept by sending back a status /// message, then becomes a unidirectional sink of data. #[derive(Debug, Clone)] pub struct NotificationsOut { - /// Protocol name to use when negotiating the substream. - protocol_name: Cow<'static, [u8]>, - /// Message to send when we start the handshake. - initial_message: Vec, + /// Protocol name to use when negotiating the substream. + protocol_name: Cow<'static, [u8]>, + /// Message to send when we start the handshake. + initial_message: Vec, } /// A substream for incoming notification messages. @@ -70,569 +76,613 @@ pub struct NotificationsOut { /// message to the remote. No message will come before this has been done. #[pin_project::pin_project] pub struct NotificationsInSubstream { - #[pin] - socket: Framed>>>, - handshake: NotificationsInSubstreamHandshake, + #[pin] + socket: Framed>>>, + handshake: NotificationsInSubstreamHandshake, } /// State of the handshake sending back process. enum NotificationsInSubstreamHandshake { - /// Waiting for the user to give us the handshake message. - NotSent, - /// User gave us the handshake message. Trying to push it in the socket. - PendingSend(Vec), - /// Handshake message was pushed in the socket. Still need to flush. - Close, - /// Handshake message successfully sent. - Sent, + /// Waiting for the user to give us the handshake message. + NotSent, + /// User gave us the handshake message. Trying to push it in the socket. + PendingSend(Vec), + /// Handshake message was pushed in the socket. Still need to flush. + Close, + /// Handshake message successfully sent. + Sent, } /// A substream for outgoing notification messages. #[pin_project::pin_project] pub struct NotificationsOutSubstream { - /// Substream where to send messages. - #[pin] - socket: Framed>>>, - /// Queue of messages waiting to be sent. - messages_queue: VecDeque>, - /// If true, we need to flush `socket`. - need_flush: bool, + /// Substream where to send messages. + #[pin] + socket: Framed>>>, + /// Queue of messages waiting to be sent. + messages_queue: VecDeque>, + /// If true, we need to flush `socket`. + need_flush: bool, } impl NotificationsIn { - /// Builds a new potential upgrade. - pub fn new(protocol_name: impl Into>) -> Self { - NotificationsIn { - protocol_name: protocol_name.into(), - } - } - - /// Returns the name of the protocol that we accept. - pub fn protocol_name(&self) -> &[u8] { - &self.protocol_name - } + /// Builds a new potential upgrade. + pub fn new(protocol_name: impl Into>) -> Self { + NotificationsIn { + protocol_name: protocol_name.into(), + } + } + + /// Returns the name of the protocol that we accept. + pub fn protocol_name(&self) -> &[u8] { + &self.protocol_name + } } impl UpgradeInfo for NotificationsIn { - type Info = Cow<'static, [u8]>; - type InfoIter = iter::Once; + type Info = Cow<'static, [u8]>; + type InfoIter = iter::Once; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol_name.clone()) - } + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol_name.clone()) + } } impl InboundUpgrade for NotificationsIn -where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +where + TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - type Output = (Vec, NotificationsInSubstream); - type Future = Pin> + Send>>; - type Error = NotificationsHandshakeError; - - fn upgrade_inbound( - self, - mut socket: TSubstream, - _: Self::Info, - ) -> Self::Future { - Box::pin(async move { - let initial_message_len = unsigned_varint::aio::read_usize(&mut socket).await?; - if initial_message_len > MAX_HANDSHAKE_SIZE { - return Err(NotificationsHandshakeError::TooLarge { - requested: initial_message_len, - max: MAX_HANDSHAKE_SIZE, - }); - } - - let mut initial_message = vec![0u8; initial_message_len]; - if !initial_message.is_empty() { - socket.read(&mut initial_message).await?; - } - - let substream = NotificationsInSubstream { - socket: Framed::new(socket, UviBytes::default()), - handshake: NotificationsInSubstreamHandshake::NotSent, - }; - - Ok((initial_message, substream)) - }) - } + type Output = (Vec, NotificationsInSubstream); + type Future = Pin> + Send>>; + type Error = NotificationsHandshakeError; + + fn upgrade_inbound(self, mut socket: TSubstream, _: Self::Info) -> Self::Future { + Box::pin(async move { + let initial_message_len = unsigned_varint::aio::read_usize(&mut socket).await?; + if initial_message_len > MAX_HANDSHAKE_SIZE { + return Err(NotificationsHandshakeError::TooLarge { + requested: initial_message_len, + max: MAX_HANDSHAKE_SIZE, + }); + } + + let mut initial_message = vec![0u8; initial_message_len]; + if !initial_message.is_empty() { + socket.read(&mut initial_message).await?; + } + + let substream = NotificationsInSubstream { + socket: Framed::new(socket, UviBytes::default()), + handshake: NotificationsInSubstreamHandshake::NotSent, + }; + + Ok((initial_message, substream)) + }) + } } impl NotificationsInSubstream -where TSubstream: AsyncRead + AsyncWrite, +where + TSubstream: AsyncRead + AsyncWrite, { - /// Sends the handshake in order to inform the remote that we accept the substream. - pub fn send_handshake(&mut self, message: impl Into>) { - if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) { - error!(target: "sub-libp2p", "Tried to send handshake twice"); - return; - } - - self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); - } + /// Sends the handshake in order to inform the remote that we accept the substream. + pub fn send_handshake(&mut self, message: impl Into>) { + if !matches!(self.handshake, NotificationsInSubstreamHandshake::NotSent) { + error!(target: "sub-libp2p", "Tried to send handshake twice"); + return; + } + + self.handshake = NotificationsInSubstreamHandshake::PendingSend(message.into()); + } } impl Stream for NotificationsInSubstream -where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let mut this = self.project(); - - // This `Stream` implementation first tries to send back the handshake if necessary. - loop { - match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { - NotificationsInSubstreamHandshake::Sent => - return Stream::poll_next(this.socket.as_mut(), cx), - NotificationsInSubstreamHandshake::NotSent => { - *this.handshake = NotificationsInSubstreamHandshake::NotSent; - return Poll::Pending - }, - NotificationsInSubstreamHandshake::PendingSend(msg) => - match Sink::poll_ready(this.socket.as_mut(), cx) { - Poll::Ready(_) => { - *this.handshake = NotificationsInSubstreamHandshake::Close; - match Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg)) { - Ok(()) => {}, - Err(err) => return Poll::Ready(Some(Err(err))), - } - }, - Poll::Pending => { - *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); - return Poll::Pending - } - }, - NotificationsInSubstreamHandshake::Close => - match Sink::poll_close(this.socket.as_mut(), cx)? { - Poll::Ready(()) => - *this.handshake = NotificationsInSubstreamHandshake::Sent, - Poll::Pending => { - *this.handshake = NotificationsInSubstreamHandshake::Close; - return Poll::Pending - } - }, - } - } - } + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + + // This `Stream` implementation first tries to send back the handshake if necessary. + loop { + match mem::replace(this.handshake, NotificationsInSubstreamHandshake::Sent) { + NotificationsInSubstreamHandshake::Sent => { + return Stream::poll_next(this.socket.as_mut(), cx) + } + NotificationsInSubstreamHandshake::NotSent => { + *this.handshake = NotificationsInSubstreamHandshake::NotSent; + return Poll::Pending; + } + NotificationsInSubstreamHandshake::PendingSend(msg) => { + match Sink::poll_ready(this.socket.as_mut(), cx) { + Poll::Ready(_) => { + *this.handshake = NotificationsInSubstreamHandshake::Close; + match Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg)) { + Ok(()) => {} + Err(err) => return Poll::Ready(Some(Err(err))), + } + } + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::PendingSend(msg); + return Poll::Pending; + } + } + } + NotificationsInSubstreamHandshake::Close => { + match Sink::poll_close(this.socket.as_mut(), cx)? { + Poll::Ready(()) => { + *this.handshake = NotificationsInSubstreamHandshake::Sent + } + Poll::Pending => { + *this.handshake = NotificationsInSubstreamHandshake::Close; + return Poll::Pending; + } + } + } + } + } + } } impl NotificationsOut { - /// Builds a new potential upgrade. - pub fn new(protocol_name: impl Into>, initial_message: impl Into>) -> Self { - let initial_message = initial_message.into(); - if initial_message.len() > MAX_HANDSHAKE_SIZE { - error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit"); - } - - NotificationsOut { - protocol_name: protocol_name.into(), - initial_message, - } - } + /// Builds a new potential upgrade. + pub fn new( + protocol_name: impl Into>, + initial_message: impl Into>, + ) -> Self { + let initial_message = initial_message.into(); + if initial_message.len() > MAX_HANDSHAKE_SIZE { + error!(target: "sub-libp2p", "Outbound networking handshake is above allowed protocol limit"); + } + + NotificationsOut { + protocol_name: protocol_name.into(), + initial_message, + } + } } impl UpgradeInfo for NotificationsOut { - type Info = Cow<'static, [u8]>; - type InfoIter = iter::Once; + type Info = Cow<'static, [u8]>; + type InfoIter = iter::Once; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol_name.clone()) - } + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol_name.clone()) + } } impl OutboundUpgrade for NotificationsOut -where TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, +where + TSubstream: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - type Output = (Vec, NotificationsOutSubstream); - type Future = Pin> + Send>>; - type Error = NotificationsHandshakeError; - - fn upgrade_outbound( - self, - mut socket: TSubstream, - _: Self::Info, - ) -> Self::Future { - Box::pin(async move { - upgrade::write_with_len_prefix(&mut socket, &self.initial_message).await?; - - // Reading handshake. - let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; - if handshake_len > MAX_HANDSHAKE_SIZE { - return Err(NotificationsHandshakeError::TooLarge { - requested: handshake_len, - max: MAX_HANDSHAKE_SIZE, - }); - } - - let mut handshake = vec![0u8; handshake_len]; - if !handshake.is_empty() { - socket.read(&mut handshake).await?; - } - - Ok((handshake, NotificationsOutSubstream { - socket: Framed::new(socket, UviBytes::default()), - messages_queue: VecDeque::with_capacity(MAX_PENDING_MESSAGES), - need_flush: false, - })) - }) - } + type Output = (Vec, NotificationsOutSubstream); + type Future = Pin> + Send>>; + type Error = NotificationsHandshakeError; + + fn upgrade_outbound(self, mut socket: TSubstream, _: Self::Info) -> Self::Future { + Box::pin(async move { + upgrade::write_with_len_prefix(&mut socket, &self.initial_message).await?; + + // Reading handshake. + let handshake_len = unsigned_varint::aio::read_usize(&mut socket).await?; + if handshake_len > MAX_HANDSHAKE_SIZE { + return Err(NotificationsHandshakeError::TooLarge { + requested: handshake_len, + max: MAX_HANDSHAKE_SIZE, + }); + } + + let mut handshake = vec![0u8; handshake_len]; + if !handshake.is_empty() { + socket.read(&mut handshake).await?; + } + + Ok(( + handshake, + NotificationsOutSubstream { + socket: Framed::new(socket, UviBytes::default()), + messages_queue: VecDeque::with_capacity(MAX_PENDING_MESSAGES), + need_flush: false, + }, + )) + }) + } } impl NotificationsOutSubstream { - /// Returns the number of items in the queue, capped to `u32::max_value()`. - pub fn queue_len(&self) -> u32 { - u32::try_from(self.messages_queue.len()).unwrap_or(u32::max_value()) - } - - /// Push a message to the queue of messages. - /// - /// This has the same effect as the `Sink::start_send` implementation. - pub fn push_message(&mut self, item: Vec) -> Result<(), NotificationsOutError> { - if self.messages_queue.len() >= MAX_PENDING_MESSAGES { - return Err(NotificationsOutError::Clogged); - } - - self.messages_queue.push_back(item); - Ok(()) - } + /// Returns the number of items in the queue, capped to `u32::max_value()`. + pub fn queue_len(&self) -> u32 { + u32::try_from(self.messages_queue.len()).unwrap_or(u32::max_value()) + } + + /// Push a message to the queue of messages. + /// + /// This has the same effect as the `Sink::start_send` implementation. + pub fn push_message(&mut self, item: Vec) -> Result<(), NotificationsOutError> { + if self.messages_queue.len() >= MAX_PENDING_MESSAGES { + return Err(NotificationsOutError::Clogged); + } + + self.messages_queue.push_back(item); + Ok(()) + } } impl Sink> for NotificationsOutSubstream - where TSubstream: AsyncRead + AsyncWrite + Unpin, +where + TSubstream: AsyncRead + AsyncWrite + Unpin, { - type Error = NotificationsOutError; - - fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { - Poll::Ready(Ok(())) - } - - fn start_send(mut self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { - self.push_message(item) - } - - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let mut this = self.project(); - - while !this.messages_queue.is_empty() { - match Sink::poll_ready(this.socket.as_mut(), cx) { - Poll::Ready(Err(err)) => return Poll::Ready(Err(From::from(err))), - Poll::Ready(Ok(())) => { - let msg = this.messages_queue.pop_front() - .expect("checked for !is_empty above; qed"); - Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg))?; - *this.need_flush = true; - }, - Poll::Pending => return Poll::Pending, - } - } - - if *this.need_flush { - match Sink::poll_flush(this.socket.as_mut(), cx) { - Poll::Ready(Err(err)) => return Poll::Ready(Err(From::from(err))), - Poll::Ready(Ok(())) => *this.need_flush = false, - Poll::Pending => return Poll::Pending, - } - } - - Poll::Ready(Ok(())) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(Sink::poll_flush(self.as_mut(), cx))?; - let this = self.project(); - match Sink::poll_close(this.socket, cx) { - Poll::Ready(Ok(())) => Poll::Ready(Ok(())), - Poll::Ready(Err(err)) => Poll::Ready(Err(From::from(err))), - Poll::Pending => Poll::Pending, - } - } + type Error = NotificationsOutError; + + fn poll_ready(self: Pin<&mut Self>, _: &mut Context) -> Poll> { + Poll::Ready(Ok(())) + } + + fn start_send(mut self: Pin<&mut Self>, item: Vec) -> Result<(), Self::Error> { + self.push_message(item) + } + + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut this = self.project(); + + while !this.messages_queue.is_empty() { + match Sink::poll_ready(this.socket.as_mut(), cx) { + Poll::Ready(Err(err)) => return Poll::Ready(Err(From::from(err))), + Poll::Ready(Ok(())) => { + let msg = this + .messages_queue + .pop_front() + .expect("checked for !is_empty above; qed"); + Sink::start_send(this.socket.as_mut(), io::Cursor::new(msg))?; + *this.need_flush = true; + } + Poll::Pending => return Poll::Pending, + } + } + + if *this.need_flush { + match Sink::poll_flush(this.socket.as_mut(), cx) { + Poll::Ready(Err(err)) => return Poll::Ready(Err(From::from(err))), + Poll::Ready(Ok(())) => *this.need_flush = false, + Poll::Pending => return Poll::Pending, + } + } + + Poll::Ready(Ok(())) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(Sink::poll_flush(self.as_mut(), cx))?; + let this = self.project(); + match Sink::poll_close(this.socket, cx) { + Poll::Ready(Ok(())) => Poll::Ready(Ok(())), + Poll::Ready(Err(err)) => Poll::Ready(Err(From::from(err))), + Poll::Pending => Poll::Pending, + } + } } /// Error generated by sending on a notifications out substream. #[derive(Debug, derive_more::From, derive_more::Display)] pub enum NotificationsHandshakeError { - /// I/O error on the substream. - Io(io::Error), - - /// Initial message or handshake was too large. - #[display(fmt = "Initial message or handshake was too large: {}", requested)] - TooLarge { - /// Size requested by the remote. - requested: usize, - /// Maximum allowed, - max: usize, - }, - - /// Error while decoding the variable-length integer. - VarintDecode(unsigned_varint::decode::Error), + /// I/O error on the substream. + Io(io::Error), + + /// Initial message or handshake was too large. + #[display(fmt = "Initial message or handshake was too large: {}", requested)] + TooLarge { + /// Size requested by the remote. + requested: usize, + /// Maximum allowed, + max: usize, + }, + + /// Error while decoding the variable-length integer. + VarintDecode(unsigned_varint::decode::Error), } impl From for NotificationsHandshakeError { - fn from(err: unsigned_varint::io::ReadError) -> Self { - match err { - unsigned_varint::io::ReadError::Io(err) => NotificationsHandshakeError::Io(err), - unsigned_varint::io::ReadError::Decode(err) => NotificationsHandshakeError::VarintDecode(err), - _ => { - log::warn!("Unrecognized varint decoding error"); - NotificationsHandshakeError::Io(From::from(io::ErrorKind::InvalidData)) - } - } - } + fn from(err: unsigned_varint::io::ReadError) -> Self { + match err { + unsigned_varint::io::ReadError::Io(err) => NotificationsHandshakeError::Io(err), + unsigned_varint::io::ReadError::Decode(err) => { + NotificationsHandshakeError::VarintDecode(err) + } + _ => { + log::warn!("Unrecognized varint decoding error"); + NotificationsHandshakeError::Io(From::from(io::ErrorKind::InvalidData)) + } + } + } } /// Error generated by sending on a notifications out substream. #[derive(Debug, derive_more::From, derive_more::Display)] pub enum NotificationsOutError { - /// I/O error on the substream. - Io(io::Error), - - /// Remote doesn't process our messages quickly enough. - /// - /// > **Note**: This is not necessarily the remote's fault, and could also be caused by the - /// > local node sending data too quickly. Properly doing back-pressure, however, - /// > would require a deep refactoring effort in Substrate as a whole. - Clogged, + /// I/O error on the substream. + Io(io::Error), + + /// Remote doesn't process our messages quickly enough. + /// + /// > **Note**: This is not necessarily the remote's fault, and could also be caused by the + /// > local node sending data too quickly. Properly doing back-pressure, however, + /// > would require a deep refactoring effort in Substrate as a whole. + Clogged, } #[cfg(test)] mod tests { - use super::{NotificationsIn, NotificationsOut}; - - use async_std::net::{TcpListener, TcpStream}; - use futures::{prelude::*, channel::oneshot}; - use libp2p::core::upgrade; - use std::pin::Pin; - - #[test] - fn basic_works() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; - let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); - - let client = async_std::task::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let (handshake, mut substream) = upgrade::apply_outbound( - socket, - NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), - upgrade::Version::V1 - ).await.unwrap(); - - assert_eq!(handshake, b"hello world"); - substream.send(b"test message".to_vec()).await.unwrap(); - }); - - async_std::task::block_on(async move { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let (initial_message, mut substream) = upgrade::apply_inbound( - socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); - - assert_eq!(initial_message, b"initial message"); - substream.send_handshake(&b"hello world"[..]); - - let msg = substream.next().await.unwrap().unwrap(); - assert_eq!(msg.as_ref(), b"test message"); - }); - - async_std::task::block_on(client); - } - - #[test] - fn empty_handshake() { - // Check that everything still works when the handshake messages are empty. - - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; - let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); - - let client = async_std::task::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let (handshake, mut substream) = upgrade::apply_outbound( - socket, - NotificationsOut::new(PROTO_NAME, vec![]), - upgrade::Version::V1 - ).await.unwrap(); - - assert!(handshake.is_empty()); - substream.send(Default::default()).await.unwrap(); - }); - - async_std::task::block_on(async move { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let (initial_message, mut substream) = upgrade::apply_inbound( - socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); - - assert!(initial_message.is_empty()); - substream.send_handshake(vec![]); - - let msg = substream.next().await.unwrap().unwrap(); - assert!(msg.as_ref().is_empty()); - }); - - async_std::task::block_on(client); - } - - #[test] - fn refused() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; - let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); - - let client = async_std::task::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let outcome = upgrade::apply_outbound( - socket, - NotificationsOut::new(PROTO_NAME, &b"hello"[..]), - upgrade::Version::V1 - ).await; - - // Despite the protocol negotiation being successfully conducted on the listener - // side, we have to receive an error here because the listener didn't send the - // handshake. - assert!(outcome.is_err()); - }); - - async_std::task::block_on(async move { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let (initial_msg, substream) = upgrade::apply_inbound( - socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); - - assert_eq!(initial_msg, b"hello"); - - // We successfully upgrade to the protocol, but then close the substream. - drop(substream); - }); - - async_std::task::block_on(client); - } - - #[test] - fn large_initial_message_refused() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; - let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); - - let client = async_std::task::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let ret = upgrade::apply_outbound( - socket, - // We check that an initial message that is too large gets refused. - NotificationsOut::new(PROTO_NAME, (0..32768).map(|_| 0).collect::>()), - upgrade::Version::V1 - ).await; - assert!(ret.is_err()); - }); - - async_std::task::block_on(async move { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let ret = upgrade::apply_inbound( - socket, - NotificationsIn::new(PROTO_NAME) - ).await; - assert!(ret.is_err()); - }); - - async_std::task::block_on(client); - } - - #[test] - fn large_handshake_refused() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; - let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); - - let client = async_std::task::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let ret = upgrade::apply_outbound( - socket, - NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), - upgrade::Version::V1 - ).await; - assert!(ret.is_err()); - }); - - async_std::task::block_on(async move { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let (initial_message, mut substream) = upgrade::apply_inbound( - socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); - assert_eq!(initial_message, b"initial message"); - - // We check that a handshake that is too large gets refused. - substream.send_handshake((0..32768).map(|_| 0).collect::>()); - let _ = substream.next().await; - }); - - async_std::task::block_on(client); - } - - #[test] - fn buffer_is_full_closes_connection() { - const PROTO_NAME: &'static [u8] = b"/test/proto/1"; - let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); - - let client = async_std::task::spawn(async move { - let socket = TcpStream::connect(listener_addr_rx.await.unwrap()).await.unwrap(); - let (handshake, mut substream) = upgrade::apply_outbound( - socket, - NotificationsOut::new(PROTO_NAME, vec![]), - upgrade::Version::V1 - ).await.unwrap(); - - assert!(handshake.is_empty()); - - // Push an item and flush so that the test works. - substream.send(b"hello world".to_vec()).await.unwrap(); - - for _ in 0..32768 { - // Push an item on the sink without flushing until an error happens because the - // buffer is full. - let message = b"hello world!".to_vec(); - if future::poll_fn(|cx| Sink::poll_ready(Pin::new(&mut substream), cx)).await.is_err() { - return Ok(()); - } - if Sink::start_send(Pin::new(&mut substream), message).is_err() { - return Ok(()); - } - } - - Err(()) - }); - - async_std::task::block_on(async move { - let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); - listener_addr_tx.send(listener.local_addr().unwrap()).unwrap(); - - let (socket, _) = listener.accept().await.unwrap(); - let (initial_message, mut substream) = upgrade::apply_inbound( - socket, - NotificationsIn::new(PROTO_NAME) - ).await.unwrap(); - - assert!(initial_message.is_empty()); - substream.send_handshake(vec![]); - - // Process one message so that the handshake and all works. - let _ = substream.next().await.unwrap().unwrap(); - - client.await.unwrap(); - }); - } + use super::{NotificationsIn, NotificationsOut}; + + use async_std::net::{TcpListener, TcpStream}; + use futures::{channel::oneshot, prelude::*}; + use libp2p::core::upgrade; + use std::pin::Pin; + + #[test] + fn basic_works() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()) + .await + .unwrap(); + let (handshake, mut substream) = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), + upgrade::Version::V1, + ) + .await + .unwrap(); + + assert_eq!(handshake, b"hello world"); + substream.send(b"test message".to_vec()).await.unwrap(); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx + .send(listener.local_addr().unwrap()) + .unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = + upgrade::apply_inbound(socket, NotificationsIn::new(PROTO_NAME)) + .await + .unwrap(); + + assert_eq!(initial_message, b"initial message"); + substream.send_handshake(&b"hello world"[..]); + + let msg = substream.next().await.unwrap().unwrap(); + assert_eq!(msg.as_ref(), b"test message"); + }); + + async_std::task::block_on(client); + } + + #[test] + fn empty_handshake() { + // Check that everything still works when the handshake messages are empty. + + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()) + .await + .unwrap(); + let (handshake, mut substream) = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, vec![]), + upgrade::Version::V1, + ) + .await + .unwrap(); + + assert!(handshake.is_empty()); + substream.send(Default::default()).await.unwrap(); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx + .send(listener.local_addr().unwrap()) + .unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = + upgrade::apply_inbound(socket, NotificationsIn::new(PROTO_NAME)) + .await + .unwrap(); + + assert!(initial_message.is_empty()); + substream.send_handshake(vec![]); + + let msg = substream.next().await.unwrap().unwrap(); + assert!(msg.as_ref().is_empty()); + }); + + async_std::task::block_on(client); + } + + #[test] + fn refused() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()) + .await + .unwrap(); + let outcome = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"hello"[..]), + upgrade::Version::V1, + ) + .await; + + // Despite the protocol negotiation being successfully conducted on the listener + // side, we have to receive an error here because the listener didn't send the + // handshake. + assert!(outcome.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx + .send(listener.local_addr().unwrap()) + .unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_msg, substream) = + upgrade::apply_inbound(socket, NotificationsIn::new(PROTO_NAME)) + .await + .unwrap(); + + assert_eq!(initial_msg, b"hello"); + + // We successfully upgrade to the protocol, but then close the substream. + drop(substream); + }); + + async_std::task::block_on(client); + } + + #[test] + fn large_initial_message_refused() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()) + .await + .unwrap(); + let ret = upgrade::apply_outbound( + socket, + // We check that an initial message that is too large gets refused. + NotificationsOut::new(PROTO_NAME, (0..32768).map(|_| 0).collect::>()), + upgrade::Version::V1, + ) + .await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx + .send(listener.local_addr().unwrap()) + .unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let ret = upgrade::apply_inbound(socket, NotificationsIn::new(PROTO_NAME)).await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(client); + } + + #[test] + fn large_handshake_refused() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()) + .await + .unwrap(); + let ret = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, &b"initial message"[..]), + upgrade::Version::V1, + ) + .await; + assert!(ret.is_err()); + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx + .send(listener.local_addr().unwrap()) + .unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = + upgrade::apply_inbound(socket, NotificationsIn::new(PROTO_NAME)) + .await + .unwrap(); + assert_eq!(initial_message, b"initial message"); + + // We check that a handshake that is too large gets refused. + substream.send_handshake((0..32768).map(|_| 0).collect::>()); + let _ = substream.next().await; + }); + + async_std::task::block_on(client); + } + + #[test] + fn buffer_is_full_closes_connection() { + const PROTO_NAME: &'static [u8] = b"/test/proto/1"; + let (listener_addr_tx, listener_addr_rx) = oneshot::channel(); + + let client = async_std::task::spawn(async move { + let socket = TcpStream::connect(listener_addr_rx.await.unwrap()) + .await + .unwrap(); + let (handshake, mut substream) = upgrade::apply_outbound( + socket, + NotificationsOut::new(PROTO_NAME, vec![]), + upgrade::Version::V1, + ) + .await + .unwrap(); + + assert!(handshake.is_empty()); + + // Push an item and flush so that the test works. + substream.send(b"hello world".to_vec()).await.unwrap(); + + for _ in 0..32768 { + // Push an item on the sink without flushing until an error happens because the + // buffer is full. + let message = b"hello world!".to_vec(); + if future::poll_fn(|cx| Sink::poll_ready(Pin::new(&mut substream), cx)) + .await + .is_err() + { + return Ok(()); + } + if Sink::start_send(Pin::new(&mut substream), message).is_err() { + return Ok(()); + } + } + + Err(()) + }); + + async_std::task::block_on(async move { + let listener = TcpListener::bind("127.0.0.1:0").await.unwrap(); + listener_addr_tx + .send(listener.local_addr().unwrap()) + .unwrap(); + + let (socket, _) = listener.accept().await.unwrap(); + let (initial_message, mut substream) = + upgrade::apply_inbound(socket, NotificationsIn::new(PROTO_NAME)) + .await + .unwrap(); + + assert!(initial_message.is_empty()); + substream.send_handshake(vec![]); + + // Process one message so that the handshake and all works. + let _ = substream.next().await.unwrap().unwrap(); + + client.await.unwrap(); + }); + } } diff --git a/client/network/src/protocol/light_client_handler.rs b/client/network/src/protocol/light_client_handler.rs index 85312b0803..2290969055 100644 --- a/client/network/src/protocol/light_client_handler.rs +++ b/client/network/src/protocol/light_client_handler.rs @@ -24,56 +24,47 @@ #![allow(unused)] -use bytes::Bytes; -use codec::{self, Encode, Decode}; use crate::{ - chain::Client, - config::ProtocolId, - protocol::{api, message::BlockAttributes} + chain::Client, + config::ProtocolId, + protocol::{api, message::BlockAttributes}, }; +use bytes::Bytes; +use codec::{self, Decode, Encode}; use futures::{channel::oneshot, future::BoxFuture, prelude::*, stream::FuturesUnordered}; use libp2p::{ - core::{ - ConnectedPoint, - Multiaddr, - PeerId, - connection::ConnectionId, - upgrade::{InboundUpgrade, ReadOneError, UpgradeInfo, Negotiated}, - upgrade::{OutboundUpgrade, read_one, write_one} - }, - swarm::{ - NegotiatedSubstream, - NetworkBehaviour, - NetworkBehaviourAction, - NotifyHandler, - OneShotHandler, - OneShotHandlerConfig, - PollParameters, - SubstreamProtocol, - } + core::{ + connection::ConnectionId, + upgrade::{read_one, write_one, OutboundUpgrade}, + upgrade::{InboundUpgrade, Negotiated, ReadOneError, UpgradeInfo}, + ConnectedPoint, Multiaddr, PeerId, + }, + swarm::{ + NegotiatedSubstream, NetworkBehaviour, NetworkBehaviourAction, NotifyHandler, + OneShotHandler, OneShotHandlerConfig, PollParameters, SubstreamProtocol, + }, }; use nohash_hasher::IntMap; use prost::Message; use sc_client::light::fetcher; use sc_client_api::StorageProof; use sc_peerset::ReputationChange; +use smallvec::SmallVec; +use sp_blockchain::Error as ClientError; use sp_core::{ - storage::{ChildInfo, StorageKey}, - hexdisplay::HexDisplay, + hexdisplay::HexDisplay, + storage::{ChildInfo, StorageKey}, }; -use smallvec::SmallVec; -use sp_blockchain::{Error as ClientError}; use sp_runtime::{ - traits::{Block, Header, NumberFor, Zero}, - generic::BlockId, + generic::BlockId, + traits::{Block, Header, NumberFor, Zero}, }; use std::{ - collections::{BTreeMap, VecDeque, HashMap}, - iter, - io, - sync::Arc, - time::Duration, - task::{Context, Poll} + collections::{BTreeMap, HashMap, VecDeque}, + io, iter, + sync::Arc, + task::{Context, Poll}, + time::Duration, }; use void::Void; use wasm_timer::Instant; @@ -84,103 +75,103 @@ pub(crate) const TIMEOUT_REPUTATION_CHANGE: i32 = -(1 << 8); /// Configuration options for `LightClientHandler` behaviour. #[derive(Debug, Clone)] pub struct Config { - max_request_size: usize, - max_response_size: usize, - max_pending_requests: usize, - inactivity_timeout: Duration, - request_timeout: Duration, - light_protocol: Bytes, - block_protocol: Bytes, + max_request_size: usize, + max_response_size: usize, + max_pending_requests: usize, + inactivity_timeout: Duration, + request_timeout: Duration, + light_protocol: Bytes, + block_protocol: Bytes, } impl Config { - /// Create a fresh configuration with the following options: - /// - /// - max. request size = 1 MiB - /// - max. response size = 16 MiB - /// - max. pending requests = 128 - /// - inactivity timeout = 15s - /// - request timeout = 15s - pub fn new(id: &ProtocolId) -> Self { - let mut c = Config { - max_request_size: 1 * 1024 * 1024, - max_response_size: 16 * 1024 * 1024, - max_pending_requests: 128, - inactivity_timeout: Duration::from_secs(15), - request_timeout: Duration::from_secs(15), - light_protocol: Bytes::new(), - block_protocol: Bytes::new(), - }; - c.set_protocol(id); - c - } - - /// Limit the max. length in bytes of a request. - pub fn set_max_request_size(&mut self, v: usize) -> &mut Self { - self.max_request_size = v; - self - } - - /// Limit the max. length in bytes of a response. - pub fn set_max_response_size(&mut self, v: usize) -> &mut Self { - self.max_response_size = v; - self - } - - /// Limit the max. number of pending requests. - pub fn set_max_pending_requests(&mut self, v: usize) -> &mut Self { - self.max_pending_requests = v; - self - } - - /// Limit the max. duration the connection may remain inactive before closing it. - pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { - self.inactivity_timeout = v; - self - } - - /// Limit the max. request duration. - pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { - self.request_timeout = v; - self - } - - /// Set protocol to use for upgrade negotiation. - pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { - let mut vl = Vec::new(); - vl.extend_from_slice(b"/"); - vl.extend_from_slice(id.as_bytes()); - vl.extend_from_slice(b"/light/2"); - self.light_protocol = vl.into(); - - let mut vb = Vec::new(); - vb.extend_from_slice(b"/"); - vb.extend_from_slice(id.as_bytes()); - vb.extend_from_slice(b"/sync/2"); - self.block_protocol = vb.into(); - - self - } + /// Create a fresh configuration with the following options: + /// + /// - max. request size = 1 MiB + /// - max. response size = 16 MiB + /// - max. pending requests = 128 + /// - inactivity timeout = 15s + /// - request timeout = 15s + pub fn new(id: &ProtocolId) -> Self { + let mut c = Config { + max_request_size: 1 * 1024 * 1024, + max_response_size: 16 * 1024 * 1024, + max_pending_requests: 128, + inactivity_timeout: Duration::from_secs(15), + request_timeout: Duration::from_secs(15), + light_protocol: Bytes::new(), + block_protocol: Bytes::new(), + }; + c.set_protocol(id); + c + } + + /// Limit the max. length in bytes of a request. + pub fn set_max_request_size(&mut self, v: usize) -> &mut Self { + self.max_request_size = v; + self + } + + /// Limit the max. length in bytes of a response. + pub fn set_max_response_size(&mut self, v: usize) -> &mut Self { + self.max_response_size = v; + self + } + + /// Limit the max. number of pending requests. + pub fn set_max_pending_requests(&mut self, v: usize) -> &mut Self { + self.max_pending_requests = v; + self + } + + /// Limit the max. duration the connection may remain inactive before closing it. + pub fn set_inactivity_timeout(&mut self, v: Duration) -> &mut Self { + self.inactivity_timeout = v; + self + } + + /// Limit the max. request duration. + pub fn set_request_timeout(&mut self, v: Duration) -> &mut Self { + self.request_timeout = v; + self + } + + /// Set protocol to use for upgrade negotiation. + pub fn set_protocol(&mut self, id: &ProtocolId) -> &mut Self { + let mut vl = Vec::new(); + vl.extend_from_slice(b"/"); + vl.extend_from_slice(id.as_bytes()); + vl.extend_from_slice(b"/light/2"); + self.light_protocol = vl.into(); + + let mut vb = Vec::new(); + vb.extend_from_slice(b"/"); + vb.extend_from_slice(id.as_bytes()); + vb.extend_from_slice(b"/sync/2"); + self.block_protocol = vb.into(); + + self + } } /// Possible errors while handling light clients. #[derive(Debug, thiserror::Error)] pub enum Error { - /// There are currently too many pending request. - #[error("too many pending requests")] - TooManyRequests, - /// The response type does not correspond to the issued request. - #[error("unexpected response")] - UnexpectedResponse, - /// A bad request has been received. - #[error("bad request: {0}")] - BadRequest(&'static str), - /// The chain client errored. - #[error("client error: {0}")] - Client(#[from] ClientError), - /// Encoding or decoding of some data failed. - #[error("codec error: {0}")] - Codec(#[from] codec::Error), + /// There are currently too many pending request. + #[error("too many pending requests")] + TooManyRequests, + /// The response type does not correspond to the issued request. + #[error("unexpected response")] + UnexpectedResponse, + /// A bad request has been received. + #[error("bad request: {0}")] + BadRequest(&'static str), + /// The chain client errored. + #[error("client error: {0}")] + Client(#[from] ClientError), + /// Encoding or decoding of some data failed. + #[error("codec error: {0}")] + Codec(#[from] codec::Error), } /// The possible light client requests we support. @@ -192,30 +183,30 @@ pub enum Error { // used because we currently only support a subset of those. #[derive(Debug)] pub enum Request { - Body { - request: fetcher::RemoteBodyRequest, - sender: oneshot::Sender, ClientError>> - }, - Header { - request: fetcher::RemoteHeaderRequest, - sender: oneshot::Sender> - }, - Read { - request: fetcher::RemoteReadRequest, - sender: oneshot::Sender, Option>>, ClientError>> - }, - ReadChild { - request: fetcher::RemoteReadChildRequest, - sender: oneshot::Sender, Option>>, ClientError>> - }, - Call { - request: fetcher::RemoteCallRequest, - sender: oneshot::Sender, ClientError>> - }, - Changes { - request: fetcher::RemoteChangesRequest, - sender: oneshot::Sender, u32)>, ClientError>> - } + Body { + request: fetcher::RemoteBodyRequest, + sender: oneshot::Sender, ClientError>>, + }, + Header { + request: fetcher::RemoteHeaderRequest, + sender: oneshot::Sender>, + }, + Read { + request: fetcher::RemoteReadRequest, + sender: oneshot::Sender, Option>>, ClientError>>, + }, + ReadChild { + request: fetcher::RemoteReadChildRequest, + sender: oneshot::Sender, Option>>, ClientError>>, + }, + Call { + request: fetcher::RemoteCallRequest, + sender: oneshot::Sender, ClientError>>, + }, + Changes { + request: fetcher::RemoteChangesRequest, + sender: oneshot::Sender, u32)>, ClientError>>, + }, } /// The data to send back to the light client over the oneshot channel. @@ -225,44 +216,44 @@ pub enum Request { // response processing. #[derive(Debug)] enum Reply { - VecU8(Vec), - VecNumberU32(Vec<(::Number, u32)>), - MapVecU8OptVecU8(HashMap, Option>>), - Header(B::Header), - Extrinsics(Vec), + VecU8(Vec), + VecNumberU32(Vec<(::Number, u32)>), + MapVecU8OptVecU8(HashMap, Option>>), + Header(B::Header), + Extrinsics(Vec), } /// Augments a light client request with metadata. #[derive(Debug)] struct RequestWrapper { - /// Time when this value was created. - timestamp: Instant, - /// Remaining retries. - retries: usize, - /// The actual request. - request: Request, - /// The peer to send the request to, e.g. `PeerId`. - peer: P, - /// The connection to use for sending the request. - connection: Option, + /// Time when this value was created. + timestamp: Instant, + /// Remaining retries. + retries: usize, + /// The actual request. + request: Request, + /// The peer to send the request to, e.g. `PeerId`. + peer: P, + /// The connection to use for sending the request. + connection: Option, } /// Information we have about some peer. #[derive(Debug)] struct PeerInfo { - connections: SmallVec<[(ConnectionId, Multiaddr); crate::MAX_CONNECTIONS_PER_PEER]>, - best_block: Option>, - status: PeerStatus, + connections: SmallVec<[(ConnectionId, Multiaddr); crate::MAX_CONNECTIONS_PER_PEER]>, + best_block: Option>, + status: PeerStatus, } impl Default for PeerInfo { - fn default() -> Self { - PeerInfo { - connections: SmallVec::new(), - best_block: None, - status: PeerStatus::Idle, - } - } + fn default() -> Self { + PeerInfo { + connections: SmallVec::new(), + best_block: None, + status: PeerStatus::Idle, + } + } } type RequestId = u64; @@ -270,916 +261,1063 @@ type RequestId = u64; /// A peer is either idle or busy processing a request from us. #[derive(Debug, Clone, PartialEq, Eq)] enum PeerStatus { - /// The peer is available. - Idle, - /// We wait for the peer to return us a response for the given request ID. - BusyWith(RequestId), + /// The peer is available. + Idle, + /// We wait for the peer to return us a response for the given request ID. + BusyWith(RequestId), } /// The light client handler behaviour. pub struct LightClientHandler { - /// This behaviour's configuration. - config: Config, - /// Blockchain client. - chain: Arc>, - /// Verifies that received responses are correct. - checker: Arc>, - /// Peer information (addresses, their best block, etc.) - peers: HashMap>, - /// Futures sending back response to remote clients. - responses: FuturesUnordered>, - /// Pending (local) requests. - pending_requests: VecDeque>, - /// Requests on their way to remote peers. - outstanding: IntMap>, - /// (Local) Request ID counter - next_request_id: RequestId, - /// Handle to use for reporting misbehaviour of peers. - peerset: sc_peerset::PeersetHandle, + /// This behaviour's configuration. + config: Config, + /// Blockchain client. + chain: Arc>, + /// Verifies that received responses are correct. + checker: Arc>, + /// Peer information (addresses, their best block, etc.) + peers: HashMap>, + /// Futures sending back response to remote clients. + responses: FuturesUnordered>, + /// Pending (local) requests. + pending_requests: VecDeque>, + /// Requests on their way to remote peers. + outstanding: IntMap>, + /// (Local) Request ID counter + next_request_id: RequestId, + /// Handle to use for reporting misbehaviour of peers. + peerset: sc_peerset::PeersetHandle, } impl LightClientHandler where - B: Block, + B: Block, { - /// Construct a new light client handler. - pub fn new( - cfg: Config, - chain: Arc>, - checker: Arc>, - peerset: sc_peerset::PeersetHandle, - ) -> Self { - LightClientHandler { - config: cfg, - chain, - checker, - peers: HashMap::new(), - responses: FuturesUnordered::new(), - pending_requests: VecDeque::new(), - outstanding: IntMap::default(), - next_request_id: 1, - peerset, - } - } - - /// We rely on external information about peers best blocks as we lack the - /// means to determine it ourselves. - pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { - if let Some(info) = self.peers.get_mut(peer) { - log::trace!("new best block for {:?}: {:?}", peer, num); - info.best_block = Some(num) - } - } - - /// Issue a new light client request. - pub fn request(&mut self, req: Request) -> Result<(), Error> { - if self.pending_requests.len() >= self.config.max_pending_requests { - return Err(Error::TooManyRequests) - } - let rw = RequestWrapper { - timestamp: Instant::now(), - retries: retries(&req), - request: req, - peer: (), // we do not know the peer yet - connection: None, - }; - self.pending_requests.push_back(rw); - Ok(()) - } - - fn next_request_id(&mut self) -> RequestId { - let id = self.next_request_id; - self.next_request_id += 1; - id - } - - /// Remove the given peer. - /// - /// If we have a request to this peer in flight, we move it back to - /// the pending requests queue. - fn remove_peer(&mut self, peer: &PeerId) { - if let Some(id) = self.outstanding.iter().find(|(_, rw)| &rw.peer == peer).map(|(k, _)| *k) { - let rw = self.outstanding.remove(&id).expect("key belongs to entry in this map"); - let rw = RequestWrapper { - timestamp: rw.timestamp, - retries: rw.retries, - request: rw.request, - peer: (), // need to find another peer - connection: None, - }; - self.pending_requests.push_back(rw); - } - self.peers.remove(peer); - } - - /// Prepares a request by selecting a suitable peer and connection to send it to. - /// - /// If there is currently no suitable peer for the request, the given request - /// is returned as `Err`. - fn prepare_request(&self, req: RequestWrapper) - -> Result<(PeerId, RequestWrapper), RequestWrapper> - { - let number = required_block(&req.request); - - let mut peer = None; - for (peer_id, peer_info) in self.peers.iter() { - if peer_info.status == PeerStatus::Idle { - match peer_info.best_block { - Some(n) => if n >= number { - peer = Some((peer_id, peer_info)); - break - }, - None => peer = Some((peer_id, peer_info)) - } - } - } - - if let Some((peer_id, peer_info)) = peer { - let connection = peer_info.connections.iter().next().map(|(id, _)| *id); - let rw = RequestWrapper { - timestamp: req.timestamp, - retries: req.retries, - request: req.request, - peer: peer_id.clone(), - connection, - }; - Ok((peer_id.clone(), rw)) - } else { - Err(req) - } - } - - /// Process a local request's response from remote. - /// - /// If successful, this will give us the actual, checked data we should be - /// sending back to the client, otherwise an error. - fn on_response - ( &mut self - , peer: &PeerId - , request: &Request - , response: Response - ) -> Result, Error> - { - log::trace!("response from {}", peer); - match response { - Response::Light(r) => self.on_response_light(peer, request, r), - Response::Block(r) => self.on_response_block(peer, request, r), - } - } - - fn on_response_light - ( &mut self - , peer: &PeerId - , request: &Request - , response: api::v1::light::Response - ) -> Result, Error> - { - use api::v1::light::response::Response; - match response.response { - Some(Response::RemoteCallResponse(response)) => - if let Request::Call { request , .. } = request { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_execution_proof(request, proof)?; - Ok(Reply::VecU8(reply)) - } else { - Err(Error::UnexpectedResponse) - } - Some(Response::RemoteReadResponse(response)) => - match request { - Request::Read { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - } - Request::ReadChild { request, .. } => { - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_read_child_proof(&request, proof)?; - Ok(Reply::MapVecU8OptVecU8(reply)) - } - _ => Err(Error::UnexpectedResponse) - } - Some(Response::RemoteChangesResponse(response)) => - if let Request::Changes { request, .. } = request { - let max_block = Decode::decode(&mut response.max.as_ref())?; - let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; - let roots = { - let mut r = BTreeMap::new(); - for pair in response.roots { - let k = Decode::decode(&mut pair.fst.as_ref())?; - let v = Decode::decode(&mut pair.snd.as_ref())?; - r.insert(k, v); - } - r - }; - let reply = self.checker.check_changes_proof(&request, fetcher::ChangesProof { - max_block, - proof: response.proof, - roots, - roots_proof, - })?; - Ok(Reply::VecNumberU32(reply)) - } else { - Err(Error::UnexpectedResponse) - } - Some(Response::RemoteHeaderResponse(response)) => - if let Request::Header { request, .. } = request { - let header = - if response.header.is_empty() { - None - } else { - Some(Decode::decode(&mut response.header.as_ref())?) - }; - let proof = Decode::decode(&mut response.proof.as_ref())?; - let reply = self.checker.check_header_proof(&request, header, proof)?; - Ok(Reply::Header(reply)) - } else { - Err(Error::UnexpectedResponse) - } - None => Err(Error::UnexpectedResponse) - } - } - - fn on_response_block - ( &mut self - , peer: &PeerId - , request: &Request - , response: api::v1::BlockResponse - ) -> Result, Error> - { - let request = if let Request::Body { request , .. } = &request { - request - } else { - return Err(Error::UnexpectedResponse); - }; - - let body: Vec<_> = match response.blocks.into_iter().next() { - Some(b) => b.body, - None => return Err(Error::UnexpectedResponse), - }; - - let body = body.into_iter() - .map(|mut extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) - .collect::>()?; - - let body = self.checker.check_body_proof(&request, body)?; - Ok(Reply::Extrinsics(body)) - } - - fn on_remote_call_request - ( &mut self - , peer: &PeerId - , request: &api::v1::light::RemoteCallRequest - ) -> Result - { - log::trace!("remote call request from {} ({} at {:?})", - peer, - request.method, - request.block, - ); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let proof = match self.chain.execution_proof(&BlockId::Hash(block), &request.method, &request.data) { - Ok((_, proof)) => proof, - Err(e) => { - log::trace!("remote call request from {} ({} at {:?}) failed with: {}", - peer, - request.method, - request.block, - e, - ); - StorageProof::empty() - } - }; - - let response = { - let r = api::v1::light::RemoteCallResponse { proof: proof.encode() }; - api::v1::light::response::Response::RemoteCallResponse(r) - }; - - Ok(api::v1::light::Response { response: Some(response) }) - } - - fn on_remote_read_request - ( &mut self - , peer: &PeerId - , request: &api::v1::light::RemoteReadRequest - ) -> Result - { - if request.keys.is_empty() { - log::debug!("invalid remote read request sent by {}", peer); - return Err(Error::BadRequest("remote read request without keys")) - } - - log::trace!("remote read request from {} ({} at {:?})", - peer, - fmt_keys(request.keys.first(), request.keys.last()), - request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let proof = match self.chain.read_proof(&BlockId::Hash(block), &mut request.keys.iter().map(AsRef::as_ref)) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read request from {} ({} at {:?}) failed with: {}", - peer, - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - }; - - let response = { - let r = api::v1::light::RemoteReadResponse { proof: proof.encode() }; - api::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(api::v1::light::Response { response: Some(response) }) - } - - fn on_remote_read_child_request - ( &mut self - , peer: &PeerId - , request: &api::v1::light::RemoteReadChildRequest - ) -> Result - { - if request.keys.is_empty() { - log::debug!("invalid remote child read request sent by {}", peer); - return Err(Error::BadRequest("remove read child request without keys")) - } - - log::trace!("remote read child request from {} ({} {} at {:?})", - peer, - HexDisplay::from(&request.storage_key), - fmt_keys(request.keys.first(), request.keys.last()), - request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - - let proof = - if let Some(info) = ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) { - match self.chain.read_child_proof( - &BlockId::Hash(block), - &request.storage_key, - info, - &mut request.keys.iter().map(AsRef::as_ref) - ) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", - peer, - HexDisplay::from(&request.storage_key), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - error); - StorageProof::empty() - } - } - } else { - log::trace!("remote read child request from {} ({} {} at {:?}) failed with: {}", - peer, - HexDisplay::from(&request.storage_key), - fmt_keys(request.keys.first(), request.keys.last()), - request.block, - "invalid child info and type" - ); - StorageProof::empty() - }; - - let response = { - let r = api::v1::light::RemoteReadResponse { proof: proof.encode() }; - api::v1::light::response::Response::RemoteReadResponse(r) - }; - - Ok(api::v1::light::Response { response: Some(response) }) - } - - fn on_remote_header_request - ( &mut self - , peer: &PeerId - , request: &api::v1::light::RemoteHeaderRequest - ) -> Result - { - log::trace!("remote header proof request from {} ({:?})", peer, request.block); - - let block = Decode::decode(&mut request.block.as_ref())?; - let (header, proof) = match self.chain.header_proof(&BlockId::Number(block)) { - Ok((header, proof)) => (header.encode(), proof), - Err(error) => { - log::trace!("remote header proof request from {} ({:?}) failed with: {}", - peer, - request.block, - error); - (Default::default(), StorageProof::empty()) - } - }; - - let response = { - let r = api::v1::light::RemoteHeaderResponse { header, proof: proof.encode() }; - api::v1::light::response::Response::RemoteHeaderResponse(r) - }; - - Ok(api::v1::light::Response { response: Some(response) }) - } - - fn on_remote_changes_request - ( &mut self - , peer: &PeerId - , request: &api::v1::light::RemoteChangesRequest - ) -> Result - { - log::trace!("remote changes proof request from {} for key {} ({:?}..{:?})", - peer, - if !request.storage_key.is_empty() { - format!("{} : {}", HexDisplay::from(&request.storage_key), HexDisplay::from(&request.key)) - } else { - HexDisplay::from(&request.key).to_string() - }, - request.first, - request.last); - - let first = Decode::decode(&mut request.first.as_ref())?; - let last = Decode::decode(&mut request.last.as_ref())?; - let min = Decode::decode(&mut request.min.as_ref())?; - let max = Decode::decode(&mut request.max.as_ref())?; - let key = StorageKey(request.key.clone()); - let storage_key = - if request.storage_key.is_empty() { - None - } else { - Some(StorageKey(request.storage_key.clone())) - }; - - let proof = match self.chain.key_changes_proof(first, last, min, max, storage_key.as_ref(), &key) { - Ok(proof) => proof, - Err(error) => { - log::trace!("remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", - peer, - if let Some(sk) = storage_key { - format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0)) - } else { - HexDisplay::from(&key.0).to_string() - }, - request.first, - request.last, - error); - - fetcher::ChangesProof:: { - max_block: Zero::zero(), - proof: Vec::new(), - roots: BTreeMap::new(), - roots_proof: StorageProof::empty(), - } - } - }; - - let response = { - let r = api::v1::light::RemoteChangesResponse { - max: proof.max_block.encode(), - proof: proof.proof, - roots: proof.roots.into_iter() - .map(|(k, v)| api::v1::light::Pair { fst: k.encode(), snd: v.encode() }) - .collect(), - roots_proof: proof.roots_proof.encode(), - }; - api::v1::light::response::Response::RemoteChangesResponse(r) - }; - - Ok(api::v1::light::Response { response: Some(response) }) - } + /// Construct a new light client handler. + pub fn new( + cfg: Config, + chain: Arc>, + checker: Arc>, + peerset: sc_peerset::PeersetHandle, + ) -> Self { + LightClientHandler { + config: cfg, + chain, + checker, + peers: HashMap::new(), + responses: FuturesUnordered::new(), + pending_requests: VecDeque::new(), + outstanding: IntMap::default(), + next_request_id: 1, + peerset, + } + } + + /// We rely on external information about peers best blocks as we lack the + /// means to determine it ourselves. + pub fn update_best_block(&mut self, peer: &PeerId, num: NumberFor) { + if let Some(info) = self.peers.get_mut(peer) { + log::trace!("new best block for {:?}: {:?}", peer, num); + info.best_block = Some(num) + } + } + + /// Issue a new light client request. + pub fn request(&mut self, req: Request) -> Result<(), Error> { + if self.pending_requests.len() >= self.config.max_pending_requests { + return Err(Error::TooManyRequests); + } + let rw = RequestWrapper { + timestamp: Instant::now(), + retries: retries(&req), + request: req, + peer: (), // we do not know the peer yet + connection: None, + }; + self.pending_requests.push_back(rw); + Ok(()) + } + + fn next_request_id(&mut self) -> RequestId { + let id = self.next_request_id; + self.next_request_id += 1; + id + } + + /// Remove the given peer. + /// + /// If we have a request to this peer in flight, we move it back to + /// the pending requests queue. + fn remove_peer(&mut self, peer: &PeerId) { + if let Some(id) = self + .outstanding + .iter() + .find(|(_, rw)| &rw.peer == peer) + .map(|(k, _)| *k) + { + let rw = self + .outstanding + .remove(&id) + .expect("key belongs to entry in this map"); + let rw = RequestWrapper { + timestamp: rw.timestamp, + retries: rw.retries, + request: rw.request, + peer: (), // need to find another peer + connection: None, + }; + self.pending_requests.push_back(rw); + } + self.peers.remove(peer); + } + + /// Prepares a request by selecting a suitable peer and connection to send it to. + /// + /// If there is currently no suitable peer for the request, the given request + /// is returned as `Err`. + fn prepare_request( + &self, + req: RequestWrapper, + ) -> Result<(PeerId, RequestWrapper), RequestWrapper> { + let number = required_block(&req.request); + + let mut peer = None; + for (peer_id, peer_info) in self.peers.iter() { + if peer_info.status == PeerStatus::Idle { + match peer_info.best_block { + Some(n) => { + if n >= number { + peer = Some((peer_id, peer_info)); + break; + } + } + None => peer = Some((peer_id, peer_info)), + } + } + } + + if let Some((peer_id, peer_info)) = peer { + let connection = peer_info.connections.iter().next().map(|(id, _)| *id); + let rw = RequestWrapper { + timestamp: req.timestamp, + retries: req.retries, + request: req.request, + peer: peer_id.clone(), + connection, + }; + Ok((peer_id.clone(), rw)) + } else { + Err(req) + } + } + + /// Process a local request's response from remote. + /// + /// If successful, this will give us the actual, checked data we should be + /// sending back to the client, otherwise an error. + fn on_response( + &mut self, + peer: &PeerId, + request: &Request, + response: Response, + ) -> Result, Error> { + log::trace!("response from {}", peer); + match response { + Response::Light(r) => self.on_response_light(peer, request, r), + Response::Block(r) => self.on_response_block(peer, request, r), + } + } + + fn on_response_light( + &mut self, + peer: &PeerId, + request: &Request, + response: api::v1::light::Response, + ) -> Result, Error> { + use api::v1::light::response::Response; + match response.response { + Some(Response::RemoteCallResponse(response)) => { + if let Request::Call { request, .. } = request { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_execution_proof(request, proof)?; + Ok(Reply::VecU8(reply)) + } else { + Err(Error::UnexpectedResponse) + } + } + Some(Response::RemoteReadResponse(response)) => match request { + Request::Read { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + } + Request::ReadChild { request, .. } => { + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_read_child_proof(&request, proof)?; + Ok(Reply::MapVecU8OptVecU8(reply)) + } + _ => Err(Error::UnexpectedResponse), + }, + Some(Response::RemoteChangesResponse(response)) => { + if let Request::Changes { request, .. } = request { + let max_block = Decode::decode(&mut response.max.as_ref())?; + let roots_proof = Decode::decode(&mut response.roots_proof.as_ref())?; + let roots = { + let mut r = BTreeMap::new(); + for pair in response.roots { + let k = Decode::decode(&mut pair.fst.as_ref())?; + let v = Decode::decode(&mut pair.snd.as_ref())?; + r.insert(k, v); + } + r + }; + let reply = self.checker.check_changes_proof( + &request, + fetcher::ChangesProof { + max_block, + proof: response.proof, + roots, + roots_proof, + }, + )?; + Ok(Reply::VecNumberU32(reply)) + } else { + Err(Error::UnexpectedResponse) + } + } + Some(Response::RemoteHeaderResponse(response)) => { + if let Request::Header { request, .. } = request { + let header = if response.header.is_empty() { + None + } else { + Some(Decode::decode(&mut response.header.as_ref())?) + }; + let proof = Decode::decode(&mut response.proof.as_ref())?; + let reply = self.checker.check_header_proof(&request, header, proof)?; + Ok(Reply::Header(reply)) + } else { + Err(Error::UnexpectedResponse) + } + } + None => Err(Error::UnexpectedResponse), + } + } + + fn on_response_block( + &mut self, + peer: &PeerId, + request: &Request, + response: api::v1::BlockResponse, + ) -> Result, Error> { + let request = if let Request::Body { request, .. } = &request { + request + } else { + return Err(Error::UnexpectedResponse); + }; + + let body: Vec<_> = match response.blocks.into_iter().next() { + Some(b) => b.body, + None => return Err(Error::UnexpectedResponse), + }; + + let body = body + .into_iter() + .map(|mut extrinsic| B::Extrinsic::decode(&mut &extrinsic[..])) + .collect::>()?; + + let body = self.checker.check_body_proof(&request, body)?; + Ok(Reply::Extrinsics(body)) + } + + fn on_remote_call_request( + &mut self, + peer: &PeerId, + request: &api::v1::light::RemoteCallRequest, + ) -> Result { + log::trace!( + "remote call request from {} ({} at {:?})", + peer, + request.method, + request.block, + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = + match self + .chain + .execution_proof(&BlockId::Hash(block), &request.method, &request.data) + { + Ok((_, proof)) => proof, + Err(e) => { + log::trace!( + "remote call request from {} ({} at {:?}) failed with: {}", + peer, + request.method, + request.block, + e, + ); + StorageProof::empty() + } + }; + + let response = { + let r = api::v1::light::RemoteCallResponse { + proof: proof.encode(), + }; + api::v1::light::response::Response::RemoteCallResponse(r) + }; + + Ok(api::v1::light::Response { + response: Some(response), + }) + } + + fn on_remote_read_request( + &mut self, + peer: &PeerId, + request: &api::v1::light::RemoteReadRequest, + ) -> Result { + if request.keys.is_empty() { + log::debug!("invalid remote read request sent by {}", peer); + return Err(Error::BadRequest("remote read request without keys")); + } + + log::trace!( + "remote read request from {} ({} at {:?})", + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = match self.chain.read_proof( + &BlockId::Hash(block), + &mut request.keys.iter().map(AsRef::as_ref), + ) { + Ok(proof) => proof, + Err(error) => { + log::trace!( + "remote read request from {} ({} at {:?}) failed with: {}", + peer, + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error + ); + StorageProof::empty() + } + }; + + let response = { + let r = api::v1::light::RemoteReadResponse { + proof: proof.encode(), + }; + api::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(api::v1::light::Response { + response: Some(response), + }) + } + + fn on_remote_read_child_request( + &mut self, + peer: &PeerId, + request: &api::v1::light::RemoteReadChildRequest, + ) -> Result { + if request.keys.is_empty() { + log::debug!("invalid remote child read request sent by {}", peer); + return Err(Error::BadRequest("remove read child request without keys")); + } + + log::trace!( + "remote read child request from {} ({} {} at {:?})", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + + let proof = if let Some(info) = + ChildInfo::resolve_child_info(request.child_type, &request.child_info[..]) + { + match self.chain.read_child_proof( + &BlockId::Hash(block), + &request.storage_key, + info, + &mut request.keys.iter().map(AsRef::as_ref), + ) { + Ok(proof) => proof, + Err(error) => { + log::trace!( + "remote read child request from {} ({} {} at {:?}) failed with: {}", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + error + ); + StorageProof::empty() + } + } + } else { + log::trace!( + "remote read child request from {} ({} {} at {:?}) failed with: {}", + peer, + HexDisplay::from(&request.storage_key), + fmt_keys(request.keys.first(), request.keys.last()), + request.block, + "invalid child info and type" + ); + StorageProof::empty() + }; + + let response = { + let r = api::v1::light::RemoteReadResponse { + proof: proof.encode(), + }; + api::v1::light::response::Response::RemoteReadResponse(r) + }; + + Ok(api::v1::light::Response { + response: Some(response), + }) + } + + fn on_remote_header_request( + &mut self, + peer: &PeerId, + request: &api::v1::light::RemoteHeaderRequest, + ) -> Result { + log::trace!( + "remote header proof request from {} ({:?})", + peer, + request.block + ); + + let block = Decode::decode(&mut request.block.as_ref())?; + let (header, proof) = match self.chain.header_proof(&BlockId::Number(block)) { + Ok((header, proof)) => (header.encode(), proof), + Err(error) => { + log::trace!( + "remote header proof request from {} ({:?}) failed with: {}", + peer, + request.block, + error + ); + (Default::default(), StorageProof::empty()) + } + }; + + let response = { + let r = api::v1::light::RemoteHeaderResponse { + header, + proof: proof.encode(), + }; + api::v1::light::response::Response::RemoteHeaderResponse(r) + }; + + Ok(api::v1::light::Response { + response: Some(response), + }) + } + + fn on_remote_changes_request( + &mut self, + peer: &PeerId, + request: &api::v1::light::RemoteChangesRequest, + ) -> Result { + log::trace!( + "remote changes proof request from {} for key {} ({:?}..{:?})", + peer, + if !request.storage_key.is_empty() { + format!( + "{} : {}", + HexDisplay::from(&request.storage_key), + HexDisplay::from(&request.key) + ) + } else { + HexDisplay::from(&request.key).to_string() + }, + request.first, + request.last + ); + + let first = Decode::decode(&mut request.first.as_ref())?; + let last = Decode::decode(&mut request.last.as_ref())?; + let min = Decode::decode(&mut request.min.as_ref())?; + let max = Decode::decode(&mut request.max.as_ref())?; + let key = StorageKey(request.key.clone()); + let storage_key = if request.storage_key.is_empty() { + None + } else { + Some(StorageKey(request.storage_key.clone())) + }; + + let proof = match self.chain.key_changes_proof( + first, + last, + min, + max, + storage_key.as_ref(), + &key, + ) { + Ok(proof) => proof, + Err(error) => { + log::trace!( + "remote changes proof request from {} for key {} ({:?}..{:?}) failed with: {}", + peer, + if let Some(sk) = storage_key { + format!("{} : {}", HexDisplay::from(&sk.0), HexDisplay::from(&key.0)) + } else { + HexDisplay::from(&key.0).to_string() + }, + request.first, + request.last, + error + ); + + fetcher::ChangesProof:: { + max_block: Zero::zero(), + proof: Vec::new(), + roots: BTreeMap::new(), + roots_proof: StorageProof::empty(), + } + } + }; + + let response = { + let r = api::v1::light::RemoteChangesResponse { + max: proof.max_block.encode(), + proof: proof.proof, + roots: proof + .roots + .into_iter() + .map(|(k, v)| api::v1::light::Pair { + fst: k.encode(), + snd: v.encode(), + }) + .collect(), + roots_proof: proof.roots_proof.encode(), + }; + api::v1::light::response::Response::RemoteChangesResponse(r) + }; + + Ok(api::v1::light::Response { + response: Some(response), + }) + } } impl NetworkBehaviour for LightClientHandler where - B: Block + B: Block, { - type ProtocolsHandler = OneShotHandler>; - type OutEvent = Void; - - fn new_handler(&mut self) -> Self::ProtocolsHandler { - let p = InboundProtocol { - max_request_size: self.config.max_request_size, - protocol: self.config.light_protocol.clone(), - }; - let mut cfg = OneShotHandlerConfig::default(); - cfg.inactive_timeout = self.config.inactivity_timeout; - OneShotHandler::new(SubstreamProtocol::new(p), cfg) - } - - fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { - self.peers.get(peer) - .map(|info| info.connections.iter().map(|(_, a)| a.clone()).collect()) - .unwrap_or_default() - } - - fn inject_connected(&mut self, peer: &PeerId) { - } - - fn inject_connection_established(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { - let peer_address = match info { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(), - ConnectedPoint::Dialer { address } => address.clone() - }; - - log::trace!("peer {} connected with address {}", peer, peer_address); - - let entry = self.peers.entry(peer.clone()).or_default(); - entry.connections.push((*conn, peer_address)); - } - - fn inject_disconnected(&mut self, peer: &PeerId) { - log::trace!("peer {} disconnected", peer); - self.remove_peer(peer) - } - - fn inject_connection_closed(&mut self, peer: &PeerId, conn: &ConnectionId, info: &ConnectedPoint) { - let peer_address = match info { - ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, - ConnectedPoint::Dialer { address } => address - }; - - log::trace!("connection to peer {} closed: {}", peer, peer_address); - - if let Some(info) = self.peers.get_mut(peer) { - info.connections.retain(|(c, _)| c != conn) - } - - // Add any outstanding requests on the closed connection back to the - // pending requests. - if let Some(id) = self.outstanding.iter() - .find(|(_, rw)| &rw.peer == peer && rw.connection == Some(*conn)) // (*) - .map(|(id, _)| *id) - { - let rw = self.outstanding.remove(&id).expect("by (*)"); - let rw = RequestWrapper { - timestamp: rw.timestamp, - retries: rw.retries, - request: rw.request, - peer: (), // need to find another peer - connection: None, - }; - self.pending_requests.push_back(rw); - } - } - - fn inject_event(&mut self, peer: PeerId, conn: ConnectionId, event: Event) { - match event { - // An incoming request from remote has been received. - Event::Request(request, mut stream) => { - log::trace!("incoming request from {}", peer); - let result = match &request.request { - Some(api::v1::light::request::Request::RemoteCallRequest(r)) => - self.on_remote_call_request(&peer, r), - Some(api::v1::light::request::Request::RemoteReadRequest(r)) => - self.on_remote_read_request(&peer, r), - Some(api::v1::light::request::Request::RemoteHeaderRequest(r)) => - self.on_remote_header_request(&peer, r), - Some(api::v1::light::request::Request::RemoteReadChildRequest(r)) => - self.on_remote_read_child_request(&peer, r), - Some(api::v1::light::request::Request::RemoteChangesRequest(r)) => - self.on_remote_changes_request(&peer, r), - None => { - log::debug!("ignoring request without request data from peer {}", peer); - return - } - }; - match result { - Ok(response) => { - log::trace!("enqueueing response for peer {}", peer); - let mut data = Vec::new(); - if let Err(e) = response.encode(&mut data) { - log::debug!("error encoding response for peer {}: {}", peer, e) - } else { - let future = async move { - if let Err(e) = write_one(&mut stream, data).await { - log::debug!("error writing response: {}", e) - } - }; - self.responses.push(future.boxed()) - } - } - Err(Error::BadRequest(_)) => { - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new(-(1 << 12), "bad request")) - } - Err(e) => log::debug!("error handling request from peer {}: {}", peer, e) - } - } - // A response to one of our own requests has been received. - Event::Response(id, response) => { - if let Some(request) = self.outstanding.remove(&id) { - // We first just check if the response originates from the expected peer - // and connection. - if request.peer != peer { - log::debug!("Expected response from {} instead of {}.", request.peer, peer); - self.outstanding.insert(id, request); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); - return - } - - if let Some(info) = self.peers.get_mut(&peer) { - if info.status != PeerStatus::BusyWith(id) { - // If we get here, something is wrong with our internal handling of peer - // status information. At any time, a single peer processes at most one - // request from us and its status should contain the request ID we are - // expecting a response for. If a peer would send us a response with a - // random ID, we should not have an entry for it with this peer ID in - // our `outstanding` map, so a malicious peer should not be able to get - // us here. It is our own fault and must be fixed! - panic!("unexpected peer status {:?} for {}", info.status, peer); - } - - info.status = PeerStatus::Idle; // Make peer available again. - - match self.on_response(&peer, &request.request, response) { - Ok(reply) => send_reply(Ok(reply), request.request), - Err(Error::UnexpectedResponse) => { - log::debug!("unexpected response {} from peer {}", id, peer); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("unexpected response from peer")); - let rw = RequestWrapper { - timestamp: request.timestamp, - retries: request.retries, - request: request.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw); - } - Err(other) => { - log::debug!("error handling response {} from peer {}: {}", id, peer, other); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("invalid response from peer")); - if request.retries > 0 { - let rw = RequestWrapper { - timestamp: request.timestamp, - retries: request.retries - 1, - request: request.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw) - } else { - send_reply(Err(ClientError::RemoteFetchFailed), request.request) - } - } - } - } else { - // If we get here, something is wrong with our internal handling of peers. - // We apparently have an entry in our `outstanding` map and the peer is the one we - // expected. So, if we can not find an entry for it in our peer information table, - // then these two collections are out of sync which must not happen and is a clear - // programmer error that must be fixed! - panic!("missing peer information for {}; response {}", peer, id); - } - } else { - log::debug!("unexpected response {} from peer {}", id, peer); - self.remove_peer(&peer); - self.peerset.report_peer(peer, ReputationChange::new_fatal("response from unexpected peer")); - } - } - } - } - - fn poll(&mut self, cx: &mut Context, _: &mut impl PollParameters) -> Poll> { - // Process response sending futures. - while let Poll::Ready(Some(_)) = self.responses.poll_next_unpin(cx) {} - - // If we have a pending request to send, try to find an available peer and send it. - let now = Instant::now(); - while let Some(mut request) = self.pending_requests.pop_front() { - if now > request.timestamp + self.config.request_timeout { - if request.retries == 0 { - send_reply(Err(ClientError::RemoteFetchFailed), request.request); - continue - } - request.timestamp = Instant::now(); - request.retries -= 1 - } - - - match self.prepare_request(request) { - Err(request) => { - self.pending_requests.push_front(request); - log::debug!("no peer available to send request to"); - break - } - Ok((peer, request)) => { - let request_bytes = match serialize_request(&request.request) { - Ok(bytes) => bytes, - Err(error) => { - log::debug!("failed to serialize request: {}", error); - send_reply(Err(ClientError::RemoteFetchFailed), request.request); - continue - } - }; - - let (expected, protocol) = match request.request { - Request::Body { .. } => - (ExpectedResponseTy::Block, self.config.block_protocol.clone()), - _ => - (ExpectedResponseTy::Light, self.config.light_protocol.clone()), - }; - - let peer_id = peer.clone(); - let handler = request.connection.map_or(NotifyHandler::Any, NotifyHandler::One); - - let request_id = self.next_request_id(); - self.peers.get_mut(&peer).map(|p| p.status = PeerStatus::BusyWith(request_id)); - self.outstanding.insert(request_id, request); - - let event = OutboundProtocol { - request_id, - request: request_bytes, - expected, - max_response_size: self.config.max_response_size, - protocol, - }; - - log::trace!("sending request {} to peer {}", request_id, peer_id); - - return Poll::Ready(NetworkBehaviourAction::NotifyHandler { - peer_id, - handler, - event, - }) - } - } - } - - // Look for ongoing requests that have timed out. - let mut expired = Vec::new(); - for (id, rw) in &self.outstanding { - if now > rw.timestamp + self.config.request_timeout { - log::debug!("request {} timed out", id); - expired.push(*id) - } - } - for id in expired { - if let Some(rw) = self.outstanding.remove(&id) { - self.remove_peer(&rw.peer); - self.peerset.report_peer(rw.peer.clone(), - ReputationChange::new(TIMEOUT_REPUTATION_CHANGE, "light request timeout")); - if rw.retries == 0 { - send_reply(Err(ClientError::RemoteFetchFailed), rw.request); - continue - } - let rw = RequestWrapper { - timestamp: Instant::now(), - retries: rw.retries - 1, - request: rw.request, - peer: (), - connection: None, - }; - self.pending_requests.push_back(rw) - } - } - - Poll::Pending - } + type ProtocolsHandler = + OneShotHandler>; + type OutEvent = Void; + + fn new_handler(&mut self) -> Self::ProtocolsHandler { + let p = InboundProtocol { + max_request_size: self.config.max_request_size, + protocol: self.config.light_protocol.clone(), + }; + let mut cfg = OneShotHandlerConfig::default(); + cfg.inactive_timeout = self.config.inactivity_timeout; + OneShotHandler::new(SubstreamProtocol::new(p), cfg) + } + + fn addresses_of_peer(&mut self, peer: &PeerId) -> Vec { + self.peers + .get(peer) + .map(|info| info.connections.iter().map(|(_, a)| a.clone()).collect()) + .unwrap_or_default() + } + + fn inject_connected(&mut self, peer: &PeerId) {} + + fn inject_connection_established( + &mut self, + peer: &PeerId, + conn: &ConnectionId, + info: &ConnectedPoint, + ) { + let peer_address = match info { + ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr.clone(), + ConnectedPoint::Dialer { address } => address.clone(), + }; + + log::trace!("peer {} connected with address {}", peer, peer_address); + + let entry = self.peers.entry(peer.clone()).or_default(); + entry.connections.push((*conn, peer_address)); + } + + fn inject_disconnected(&mut self, peer: &PeerId) { + log::trace!("peer {} disconnected", peer); + self.remove_peer(peer) + } + + fn inject_connection_closed( + &mut self, + peer: &PeerId, + conn: &ConnectionId, + info: &ConnectedPoint, + ) { + let peer_address = match info { + ConnectedPoint::Listener { send_back_addr, .. } => send_back_addr, + ConnectedPoint::Dialer { address } => address, + }; + + log::trace!("connection to peer {} closed: {}", peer, peer_address); + + if let Some(info) = self.peers.get_mut(peer) { + info.connections.retain(|(c, _)| c != conn) + } + + // Add any outstanding requests on the closed connection back to the + // pending requests. + if let Some(id) = self + .outstanding + .iter() + .find(|(_, rw)| &rw.peer == peer && rw.connection == Some(*conn)) // (*) + .map(|(id, _)| *id) + { + let rw = self.outstanding.remove(&id).expect("by (*)"); + let rw = RequestWrapper { + timestamp: rw.timestamp, + retries: rw.retries, + request: rw.request, + peer: (), // need to find another peer + connection: None, + }; + self.pending_requests.push_back(rw); + } + } + + fn inject_event( + &mut self, + peer: PeerId, + conn: ConnectionId, + event: Event, + ) { + match event { + // An incoming request from remote has been received. + Event::Request(request, mut stream) => { + log::trace!("incoming request from {}", peer); + let result = match &request.request { + Some(api::v1::light::request::Request::RemoteCallRequest(r)) => { + self.on_remote_call_request(&peer, r) + } + Some(api::v1::light::request::Request::RemoteReadRequest(r)) => { + self.on_remote_read_request(&peer, r) + } + Some(api::v1::light::request::Request::RemoteHeaderRequest(r)) => { + self.on_remote_header_request(&peer, r) + } + Some(api::v1::light::request::Request::RemoteReadChildRequest(r)) => { + self.on_remote_read_child_request(&peer, r) + } + Some(api::v1::light::request::Request::RemoteChangesRequest(r)) => { + self.on_remote_changes_request(&peer, r) + } + None => { + log::debug!("ignoring request without request data from peer {}", peer); + return; + } + }; + match result { + Ok(response) => { + log::trace!("enqueueing response for peer {}", peer); + let mut data = Vec::new(); + if let Err(e) = response.encode(&mut data) { + log::debug!("error encoding response for peer {}: {}", peer, e) + } else { + let future = async move { + if let Err(e) = write_one(&mut stream, data).await { + log::debug!("error writing response: {}", e) + } + }; + self.responses.push(future.boxed()) + } + } + Err(Error::BadRequest(_)) => { + self.remove_peer(&peer); + self.peerset + .report_peer(peer, ReputationChange::new(-(1 << 12), "bad request")) + } + Err(e) => log::debug!("error handling request from peer {}: {}", peer, e), + } + } + // A response to one of our own requests has been received. + Event::Response(id, response) => { + if let Some(request) = self.outstanding.remove(&id) { + // We first just check if the response originates from the expected peer + // and connection. + if request.peer != peer { + log::debug!( + "Expected response from {} instead of {}.", + request.peer, + peer + ); + self.outstanding.insert(id, request); + self.remove_peer(&peer); + self.peerset.report_peer( + peer, + ReputationChange::new_fatal("response from unexpected peer"), + ); + return; + } + + if let Some(info) = self.peers.get_mut(&peer) { + if info.status != PeerStatus::BusyWith(id) { + // If we get here, something is wrong with our internal handling of peer + // status information. At any time, a single peer processes at most one + // request from us and its status should contain the request ID we are + // expecting a response for. If a peer would send us a response with a + // random ID, we should not have an entry for it with this peer ID in + // our `outstanding` map, so a malicious peer should not be able to get + // us here. It is our own fault and must be fixed! + panic!("unexpected peer status {:?} for {}", info.status, peer); + } + + info.status = PeerStatus::Idle; // Make peer available again. + + match self.on_response(&peer, &request.request, response) { + Ok(reply) => send_reply(Ok(reply), request.request), + Err(Error::UnexpectedResponse) => { + log::debug!("unexpected response {} from peer {}", id, peer); + self.remove_peer(&peer); + self.peerset.report_peer( + peer, + ReputationChange::new_fatal("unexpected response from peer"), + ); + let rw = RequestWrapper { + timestamp: request.timestamp, + retries: request.retries, + request: request.request, + peer: (), + connection: None, + }; + self.pending_requests.push_back(rw); + } + Err(other) => { + log::debug!( + "error handling response {} from peer {}: {}", + id, + peer, + other + ); + self.remove_peer(&peer); + self.peerset.report_peer( + peer, + ReputationChange::new_fatal("invalid response from peer"), + ); + if request.retries > 0 { + let rw = RequestWrapper { + timestamp: request.timestamp, + retries: request.retries - 1, + request: request.request, + peer: (), + connection: None, + }; + self.pending_requests.push_back(rw) + } else { + send_reply(Err(ClientError::RemoteFetchFailed), request.request) + } + } + } + } else { + // If we get here, something is wrong with our internal handling of peers. + // We apparently have an entry in our `outstanding` map and the peer is the one we + // expected. So, if we can not find an entry for it in our peer information table, + // then these two collections are out of sync which must not happen and is a clear + // programmer error that must be fixed! + panic!("missing peer information for {}; response {}", peer, id); + } + } else { + log::debug!("unexpected response {} from peer {}", id, peer); + self.remove_peer(&peer); + self.peerset.report_peer( + peer, + ReputationChange::new_fatal("response from unexpected peer"), + ); + } + } + } + } + + fn poll( + &mut self, + cx: &mut Context, + _: &mut impl PollParameters, + ) -> Poll> { + // Process response sending futures. + while let Poll::Ready(Some(_)) = self.responses.poll_next_unpin(cx) {} + + // If we have a pending request to send, try to find an available peer and send it. + let now = Instant::now(); + while let Some(mut request) = self.pending_requests.pop_front() { + if now > request.timestamp + self.config.request_timeout { + if request.retries == 0 { + send_reply(Err(ClientError::RemoteFetchFailed), request.request); + continue; + } + request.timestamp = Instant::now(); + request.retries -= 1 + } + + match self.prepare_request(request) { + Err(request) => { + self.pending_requests.push_front(request); + log::debug!("no peer available to send request to"); + break; + } + Ok((peer, request)) => { + let request_bytes = match serialize_request(&request.request) { + Ok(bytes) => bytes, + Err(error) => { + log::debug!("failed to serialize request: {}", error); + send_reply(Err(ClientError::RemoteFetchFailed), request.request); + continue; + } + }; + + let (expected, protocol) = match request.request { + Request::Body { .. } => ( + ExpectedResponseTy::Block, + self.config.block_protocol.clone(), + ), + _ => ( + ExpectedResponseTy::Light, + self.config.light_protocol.clone(), + ), + }; + + let peer_id = peer.clone(); + let handler = request + .connection + .map_or(NotifyHandler::Any, NotifyHandler::One); + + let request_id = self.next_request_id(); + self.peers + .get_mut(&peer) + .map(|p| p.status = PeerStatus::BusyWith(request_id)); + self.outstanding.insert(request_id, request); + + let event = OutboundProtocol { + request_id, + request: request_bytes, + expected, + max_response_size: self.config.max_response_size, + protocol, + }; + + log::trace!("sending request {} to peer {}", request_id, peer_id); + + return Poll::Ready(NetworkBehaviourAction::NotifyHandler { + peer_id, + handler, + event, + }); + } + } + } + + // Look for ongoing requests that have timed out. + let mut expired = Vec::new(); + for (id, rw) in &self.outstanding { + if now > rw.timestamp + self.config.request_timeout { + log::debug!("request {} timed out", id); + expired.push(*id) + } + } + for id in expired { + if let Some(rw) = self.outstanding.remove(&id) { + self.remove_peer(&rw.peer); + self.peerset.report_peer( + rw.peer.clone(), + ReputationChange::new(TIMEOUT_REPUTATION_CHANGE, "light request timeout"), + ); + if rw.retries == 0 { + send_reply(Err(ClientError::RemoteFetchFailed), rw.request); + continue; + } + let rw = RequestWrapper { + timestamp: Instant::now(), + retries: rw.retries - 1, + request: rw.request, + peer: (), + connection: None, + }; + self.pending_requests.push_back(rw) + } + } + + Poll::Pending + } } fn required_block(request: &Request) -> NumberFor { - match request { - Request::Body { request, .. } => *request.header.number(), - Request::Header { request, .. } => request.block, - Request::Read { request, .. } => *request.header.number(), - Request::ReadChild { request, .. } => *request.header.number(), - Request::Call { request, .. } => *request.header.number(), - Request::Changes { request, .. } => request.max_block.0, - } + match request { + Request::Body { request, .. } => *request.header.number(), + Request::Header { request, .. } => request.block, + Request::Read { request, .. } => *request.header.number(), + Request::ReadChild { request, .. } => *request.header.number(), + Request::Call { request, .. } => *request.header.number(), + Request::Changes { request, .. } => request.max_block.0, + } } fn retries(request: &Request) -> usize { - let rc = match request { - Request::Body { request, .. } => request.retry_count, - Request::Header { request, .. } => request.retry_count, - Request::Read { request, .. } => request.retry_count, - Request::ReadChild { request, .. } => request.retry_count, - Request::Call { request, .. } => request.retry_count, - Request::Changes { request, .. } => request.retry_count, - }; - rc.unwrap_or(0) + let rc = match request { + Request::Body { request, .. } => request.retry_count, + Request::Header { request, .. } => request.retry_count, + Request::Read { request, .. } => request.retry_count, + Request::ReadChild { request, .. } => request.retry_count, + Request::Call { request, .. } => request.retry_count, + Request::Changes { request, .. } => request.retry_count, + }; + rc.unwrap_or(0) } fn serialize_request(request: &Request) -> Result, prost::EncodeError> { - let request = match request { - Request::Body { request, .. } => { - let rq = api::v1::BlockRequest { - fields: u32::from(BlockAttributes::BODY.bits()), - from_block: Some(api::v1::block_request::FromBlock::Hash(request.header.hash().encode())), - to_block: Vec::new(), - direction: api::v1::Direction::Ascending as i32, - max_blocks: 1, - }; - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - return Ok(buf); - } - Request::Header { request, .. } => { - let r = api::v1::light::RemoteHeaderRequest { block: request.block.encode() }; - api::v1::light::request::Request::RemoteHeaderRequest(r) - } - Request::Read { request, .. } => { - let r = api::v1::light::RemoteReadRequest { - block: request.block.encode(), - keys: request.keys.clone(), - }; - api::v1::light::request::Request::RemoteReadRequest(r) - } - Request::ReadChild { request, .. } => { - let r = api::v1::light::RemoteReadChildRequest { - block: request.block.encode(), - storage_key: request.storage_key.clone(), - child_type: request.child_type.clone(), - child_info: request.child_info.clone(), - keys: request.keys.clone(), - }; - api::v1::light::request::Request::RemoteReadChildRequest(r) - } - Request::Call { request, .. } => { - let r = api::v1::light::RemoteCallRequest { - block: request.block.encode(), - method: request.method.clone(), - data: request.call_data.clone(), - }; - api::v1::light::request::Request::RemoteCallRequest(r) - } - Request::Changes { request, .. } => { - let r = api::v1::light::RemoteChangesRequest { - first: request.first_block.1.encode(), - last: request.last_block.1.encode(), - min: request.tries_roots.1.encode(), - max: request.max_block.1.encode(), - storage_key: request.storage_key.clone().unwrap_or_default(), - key: request.key.clone(), - }; - api::v1::light::request::Request::RemoteChangesRequest(r) - } - }; - - let rq = api::v1::light::Request { request: Some(request) }; - let mut buf = Vec::with_capacity(rq.encoded_len()); - rq.encode(&mut buf)?; - Ok(buf) + let request = match request { + Request::Body { request, .. } => { + let rq = api::v1::BlockRequest { + fields: u32::from(BlockAttributes::BODY.bits()), + from_block: Some(api::v1::block_request::FromBlock::Hash( + request.header.hash().encode(), + )), + to_block: Vec::new(), + direction: api::v1::Direction::Ascending as i32, + max_blocks: 1, + }; + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + return Ok(buf); + } + Request::Header { request, .. } => { + let r = api::v1::light::RemoteHeaderRequest { + block: request.block.encode(), + }; + api::v1::light::request::Request::RemoteHeaderRequest(r) + } + Request::Read { request, .. } => { + let r = api::v1::light::RemoteReadRequest { + block: request.block.encode(), + keys: request.keys.clone(), + }; + api::v1::light::request::Request::RemoteReadRequest(r) + } + Request::ReadChild { request, .. } => { + let r = api::v1::light::RemoteReadChildRequest { + block: request.block.encode(), + storage_key: request.storage_key.clone(), + child_type: request.child_type.clone(), + child_info: request.child_info.clone(), + keys: request.keys.clone(), + }; + api::v1::light::request::Request::RemoteReadChildRequest(r) + } + Request::Call { request, .. } => { + let r = api::v1::light::RemoteCallRequest { + block: request.block.encode(), + method: request.method.clone(), + data: request.call_data.clone(), + }; + api::v1::light::request::Request::RemoteCallRequest(r) + } + Request::Changes { request, .. } => { + let r = api::v1::light::RemoteChangesRequest { + first: request.first_block.1.encode(), + last: request.last_block.1.encode(), + min: request.tries_roots.1.encode(), + max: request.max_block.1.encode(), + storage_key: request.storage_key.clone().unwrap_or_default(), + key: request.key.clone(), + }; + api::v1::light::request::Request::RemoteChangesRequest(r) + } + }; + + let rq = api::v1::light::Request { + request: Some(request), + }; + let mut buf = Vec::with_capacity(rq.encoded_len()); + rq.encode(&mut buf)?; + Ok(buf) } fn send_reply(result: Result, ClientError>, request: Request) { - fn send(item: T, sender: oneshot::Sender) { - let _ = sender.send(item); // It is okay if the other end already hung up. - } - match request { - Request::Body { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), - } - Request::Header { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::Header(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for header request: {:?}, {:?}", reply, request), - } - Request::Read { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), - } - Request::ReadChild { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for read child request: {:?}, {:?}", reply, request), - } - Request::Call { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecU8(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), - } - Request::Changes { request, sender } => match result { - Err(e) => send(Err(e), sender), - Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), - reply => log::error!("invalid reply for changes request: {:?}, {:?}", reply, request), - } - } + fn send(item: T, sender: oneshot::Sender) { + let _ = sender.send(item); // It is okay if the other end already hung up. + } + match request { + Request::Body { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Extrinsics(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for body request: {:?}, {:?}", reply, request), + }, + Request::Header { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::Header(x)) => send(Ok(x), sender), + reply => log::error!( + "invalid reply for header request: {:?}, {:?}", + reply, + request + ), + }, + Request::Read { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for read request: {:?}, {:?}", reply, request), + }, + Request::ReadChild { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::MapVecU8OptVecU8(x)) => send(Ok(x), sender), + reply => log::error!( + "invalid reply for read child request: {:?}, {:?}", + reply, + request + ), + }, + Request::Call { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::VecU8(x)) => send(Ok(x), sender), + reply => log::error!("invalid reply for call request: {:?}, {:?}", reply, request), + }, + Request::Changes { request, sender } => match result { + Err(e) => send(Err(e), sender), + Ok(Reply::VecNumberU32(x)) => send(Ok(x), sender), + reply => log::error!( + "invalid reply for changes request: {:?}, {:?}", + reply, + request + ), + }, + } } /// Output type of inbound and outbound substream upgrades. #[derive(Debug)] pub enum Event { - /// Incoming request from remote and substream to use for the response. - Request(api::v1::light::Request, T), - /// Incoming response from remote. - Response(RequestId, Response), + /// Incoming request from remote and substream to use for the response. + Request(api::v1::light::Request, T), + /// Incoming response from remote. + Response(RequestId, Response), } /// Incoming response from remote. #[derive(Debug, Clone)] pub enum Response { - /// Incoming light response from remote. - Light(api::v1::light::Response), - /// Incoming block response from remote. - Block(api::v1::BlockResponse), + /// Incoming light response from remote. + Light(api::v1::light::Response), + /// Incoming block response from remote. + Block(api::v1::BlockResponse), } /// Substream upgrade protocol. @@ -1187,39 +1325,39 @@ pub enum Response { /// Reads incoming requests from remote. #[derive(Debug, Clone)] pub struct InboundProtocol { - /// The max. request length in bytes. - max_request_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, + /// The max. request length in bytes. + max_request_size: usize, + /// The protocol to use for upgrade negotiation. + protocol: Bytes, } impl UpgradeInfo for InboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; + type Info = Bytes; + type InfoIter = iter::Once; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } } impl InboundUpgrade for InboundProtocol where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - type Output = Event; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { - let future = async move { - let vec = read_one(&mut s, self.max_request_size).await?; - match api::v1::light::Request::decode(&vec[..]) { - Ok(r) => Ok(Event::Request(r, s)), - Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))) - } - }; - future.boxed() - } + type Output = Event; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; + + fn upgrade_inbound(self, mut s: T, _: Self::Info) -> Self::Future { + let future = async move { + let vec = read_one(&mut s, self.max_request_size).await?; + match api::v1::light::Request::decode(&vec[..]) { + Ok(r) => Ok(Event::Request(r, s)), + Err(e) => Err(ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))), + } + }; + future.boxed() + } } /// Substream upgrade protocol. @@ -1227,825 +1365,953 @@ where /// Sends a request to remote and awaits the response. #[derive(Debug, Clone)] pub struct OutboundProtocol { - /// The serialized protobuf request. - request: Vec, - /// Local identifier for the request. Used to associate it with a response. - request_id: RequestId, - /// Kind of response expected for this request. - expected: ExpectedResponseTy, - /// The max. response length in bytes. - max_response_size: usize, - /// The protocol to use for upgrade negotiation. - protocol: Bytes, + /// The serialized protobuf request. + request: Vec, + /// Local identifier for the request. Used to associate it with a response. + request_id: RequestId, + /// Kind of response expected for this request. + expected: ExpectedResponseTy, + /// The max. response length in bytes. + max_response_size: usize, + /// The protocol to use for upgrade negotiation. + protocol: Bytes, } /// Type of response expected from the remote for this request. #[derive(Debug, Clone)] enum ExpectedResponseTy { - Light, - Block, + Light, + Block, } impl UpgradeInfo for OutboundProtocol { - type Info = Bytes; - type InfoIter = iter::Once; + type Info = Bytes; + type InfoIter = iter::Once; - fn protocol_info(&self) -> Self::InfoIter { - iter::once(self.protocol.clone()) - } + fn protocol_info(&self) -> Self::InfoIter { + iter::once(self.protocol.clone()) + } } impl OutboundUpgrade for OutboundProtocol where - T: AsyncRead + AsyncWrite + Unpin + Send + 'static + T: AsyncRead + AsyncWrite + Unpin + Send + 'static, { - type Output = Event; - type Error = ReadOneError; - type Future = BoxFuture<'static, Result>; - - fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { - let future = async move { - write_one(&mut s, &self.request).await?; - let vec = read_one(&mut s, self.max_response_size).await?; - - match self.expected { - ExpectedResponseTy::Light => { - api::v1::light::Response::decode(&vec[..]) - .map(|r| Event::Response(self.request_id, Response::Light(r))) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - }, - ExpectedResponseTy::Block => { - api::v1::BlockResponse::decode(&vec[..]) - .map(|r| Event::Response(self.request_id, Response::Block(r))) - .map_err(|e| { - ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e)) - }) - } - } - }; - future.boxed() - } + type Output = Event; + type Error = ReadOneError; + type Future = BoxFuture<'static, Result>; + + fn upgrade_outbound(self, mut s: T, _: Self::Info) -> Self::Future { + let future = async move { + write_one(&mut s, &self.request).await?; + let vec = read_one(&mut s, self.max_response_size).await?; + + match self.expected { + ExpectedResponseTy::Light => api::v1::light::Response::decode(&vec[..]) + .map(|r| Event::Response(self.request_id, Response::Light(r))) + .map_err(|e| ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))), + ExpectedResponseTy::Block => api::v1::BlockResponse::decode(&vec[..]) + .map(|r| Event::Response(self.request_id, Response::Block(r))) + .map_err(|e| ReadOneError::Io(io::Error::new(io::ErrorKind::Other, e))), + } + }; + future.boxed() + } } fn fmt_keys(first: Option<&Vec>, last: Option<&Vec>) -> String { - if let (Some(first), Some(last)) = (first, last) { - if first == last { - HexDisplay::from(first).to_string() - } else { - format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last)) - } - } else { - String::from("n/a") - } + if let (Some(first), Some(last)) = (first, last) { + if first == last { + HexDisplay::from(first).to_string() + } else { + format!("{}..{}", HexDisplay::from(first), HexDisplay::from(last)) + } + } else { + String::from("n/a") + } } #[cfg(test)] mod tests { - use async_std::task; - use assert_matches::assert_matches; - use codec::Encode; - use crate::{ - chain::Client, - config::ProtocolId, - protocol::api, - }; - use futures::{channel::oneshot, prelude::*}; - use libp2p::{ - PeerId, - Multiaddr, - core::{ - ConnectedPoint, - connection::ConnectionId, - identity, - muxing::{StreamMuxerBox, SubstreamRef}, - transport::{Transport, boxed::Boxed, memory::MemoryTransport}, - upgrade - }, - noise::{self, Keypair, X25519, NoiseConfig}, - swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, - yamux - }; - use sc_client_api::StorageProof; - use sc_client::light::fetcher; - use sp_blockchain::{Error as ClientError}; - use sp_core::storage::ChildInfo; - use std::{ - collections::{HashMap, HashSet}, - io, - iter::{self, FromIterator}, - pin::Pin, - sync::Arc, - task::{Context, Poll} - }; - use sp_runtime::{generic::Header, traits::{BlakeTwo256, Block as BlockT, NumberFor}}; - use super::{Event, LightClientHandler, Request, Response, OutboundProtocol, PeerStatus}; - use void::Void; - - const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz"); - - type Block = sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; - type Handler = LightClientHandler; - type Swarm = libp2p::swarm::Swarm; - - fn empty_proof() -> Vec { - StorageProof::empty().encode() - } - - fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { - let client = Arc::new(substrate_test_runtime_client::new()); - let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); - let id_key = identity::Keypair::generate_ed25519(); - let dh_key = Keypair::::new().into_authentic(&id_key).unwrap(); - let local_peer = id_key.public().into_peer_id(); - let transport = MemoryTransport::default() - .upgrade(upgrade::Version::V1) - .authenticate(NoiseConfig::xx(dh_key).into_authenticated()) - .multiplex(yamux::Config::default()) - .map(|(peer, muxer), _| (peer, StreamMuxerBox::new(muxer))) - .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) - .boxed(); - Swarm::new(transport, LightClientHandler::new(cf, client, checker, ps), local_peer) - } - - struct DummyFetchChecker { - ok: bool, - _mark: std::marker::PhantomData - } - - impl fetcher::FetchChecker for DummyFetchChecker { - fn check_header_proof( - &self, - _request: &fetcher::RemoteHeaderRequest, - header: Option, - _remote_proof: fetcher::StorageProof, - ) -> Result { - match self.ok { - true if header.is_some() => Ok(header.unwrap()), - _ => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_proof( - &self, - request: &fetcher::RemoteReadRequest, - _: fetcher::StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect() - ), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_read_child_proof( - &self, - request: &fetcher::RemoteReadChildRequest, - _: fetcher::StorageProof, - ) -> Result, Option>>, ClientError> { - match self.ok { - true => Ok(request.keys - .iter() - .cloned() - .map(|k| (k, Some(vec![42]))) - .collect() - ), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_execution_proof( - &self, - _: &fetcher::RemoteCallRequest, - _: fetcher::StorageProof, - ) -> Result, ClientError> { - match self.ok { - true => Ok(vec![42]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_changes_proof( - &self, - _: &fetcher::RemoteChangesRequest, - _: fetcher::ChangesProof - ) -> Result, u32)>, ClientError> { - match self.ok { - true => Ok(vec![(100.into(), 2)]), - false => Err(ClientError::Backend("Test error".into())), - } - } - - fn check_body_proof( - &self, - _: &fetcher::RemoteBodyRequest, - body: Vec - ) -> Result, ClientError> { - match self.ok { - true => Ok(body), - false => Err(ClientError::Backend("Test error".into())), - } - } - } - - fn make_config() -> super::Config { - super::Config::new(&ProtocolId::from(&b"foo"[..])) - } - - fn dummy_header() -> sp_test_primitives::Header { - sp_test_primitives::Header { - parent_hash: Default::default(), - number: 0, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - } - } - - struct EmptyPollParams(PeerId); - - impl PollParameters for EmptyPollParams { - type SupportedProtocolsIter = iter::Empty>; - type ListenedAddressesIter = iter::Empty; - type ExternalAddressesIter = iter::Empty; - - fn supported_protocols(&self) -> Self::SupportedProtocolsIter { - iter::empty() - } - - fn listened_addresses(&self) -> Self::ListenedAddressesIter { - iter::empty() - } - - fn external_addresses(&self) -> Self::ExternalAddressesIter { - iter::empty() - } - - fn local_peer_id(&self) -> &PeerId { - &self.0 - } - } - - fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { - let cfg = sc_peerset::PeersetConfig { - in_peers: 128, - out_peers: 128, - bootnodes: Vec::new(), - reserved_only: false, - priority_groups: Vec::new(), - }; - sc_peerset::Peerset::from_config(cfg) - } - - fn make_behaviour - ( ok: bool - , ps: sc_peerset::PeersetHandle - , cf: super::Config - ) -> LightClientHandler - { - let client = Arc::new(substrate_test_runtime_client::new()); - let checker = Arc::new(DummyFetchChecker { ok, _mark: std::marker::PhantomData }); - LightClientHandler::new(cf, client, checker, ps) - } - - fn empty_dialer() -> ConnectedPoint { - ConnectedPoint::Dialer { address: Multiaddr::empty() } - } - - fn poll(mut b: &mut LightClientHandler) -> Poll> { - let mut p = EmptyPollParams(PeerId::random()); - match future::poll_fn(|cx| Pin::new(&mut b).poll(cx, &mut p)).now_or_never() { - Some(a) => Poll::Ready(a), - None => Poll::Pending - } - } - - #[test] - fn disconnects_from_peer_if_told() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - behaviour.inject_connection_established(&peer, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - behaviour.inject_connection_closed(&peer, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_disconnected(&peer); - assert_eq!(0, behaviour.peers.len()) - } - - #[test] - fn disconnects_from_peer_if_request_times_out() { - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - behaviour.inject_connection_established(&peer0, &ConnectionId::new(1), &empty_dialer()); - behaviour.inject_connected(&peer0); - behaviour.inject_connection_established(&peer1, &ConnectionId::new(2), &empty_dialer()); - behaviour.inject_connected(&peer1); - - // We now know about two peers. - assert_eq!(HashSet::from_iter(&[peer0.clone(), peer1.clone()]), behaviour.peers.keys().collect::>()); - - // No requests have been made yet. - assert!(behaviour.pending_requests.is_empty()); - assert!(behaviour.outstanding.is_empty()); - - // Issue our first request! - let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - assert_eq!(1, behaviour.pending_requests.len()); - - // The behaviour should now attempt to send the request. - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, .. }) => { - assert!(peer_id == peer0 || peer_id == peer1) - }); - - // And we should have one busy peer. - assert!({ - let (idle, busy): (Vec<_>, Vec<_>) = - behaviour.peers.iter().partition(|(_, info)| info.status == PeerStatus::Idle); - - idle.len() == 1 && busy.len() == 1 - && (idle[0].0 == &peer0 || busy[0].0 == &peer0) - && (idle[0].0 == &peer1 || busy[0].0 == &peer1) - }); - - // No more pending requests, but one should be outstanding. - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - // We now set back the timestamp of the outstanding request to make it expire. - let request = behaviour.outstanding.values_mut().next().unwrap(); - request.timestamp -= make_config().request_timeout; - - // Make progress, but do not expect some action. - assert_matches!(poll(&mut behaviour), Poll::Pending); - - // The request should have timed out by now and the corresponding peer be removed. - assert_eq!(1, behaviour.peers.len()); - // Since we asked for one retry, the request should be back in the pending queue. - assert_eq!(1, behaviour.pending_requests.len()); - // No other request should be ongoing. - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_incorrect_response() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(false, pset.1, make_config()); - // ^--- Making sure the response data check fails. - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - poll(&mut behaviour); // Make progress - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - let request_id = *behaviour.outstanding.keys().next().unwrap(); - - let response = { - let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); - assert!(behaviour.peers.is_empty()); - - poll(&mut behaviour); // More progress - - // The request should be back in the pending queue - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_unexpected_response() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - - // Some unsolicited response - let response = { - let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(2347895932, Response::Light(response))); - - assert!(behaviour.peers.is_empty()); - poll(&mut behaviour); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn disconnects_from_peer_on_wrong_response_type() { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(1), - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - poll(&mut behaviour); // Make progress - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - let request_id = *behaviour.outstanding.keys().next().unwrap(); - - let response = { - let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; // Not a RemoteCallResponse! - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), - } - }; - - behaviour.inject_event(peer.clone(), conn, Event::Response(request_id, Response::Light(response))); - assert!(behaviour.peers.is_empty()); - - poll(&mut behaviour); // More progress - - // The request should be back in the pending queue - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - } - - #[test] - fn receives_remote_failure_after_retry_count_failures() { - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - let peer3 = PeerId::random(); - let peer4 = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(false, pset.1, make_config()); - // ^--- Making sure the response data check fails. - - let conn1 = ConnectionId::new(1); - behaviour.inject_connection_established(&peer1, &conn1, &empty_dialer()); - behaviour.inject_connected(&peer1); - let conn2 = ConnectionId::new(2); - behaviour.inject_connection_established(&peer2, &conn2, &empty_dialer()); - behaviour.inject_connected(&peer2); - let conn3 = ConnectionId::new(3); - behaviour.inject_connection_established(&peer3, &conn3, &empty_dialer()); - behaviour.inject_connected(&peer3); - let conn4 = ConnectionId::new(3); - behaviour.inject_connection_established(&peer4, &conn4, &empty_dialer()); - behaviour.inject_connected(&peer4); - assert_eq!(4, behaviour.peers.len()); - - let mut chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: Some(3), // Attempt up to three retries. - }; - behaviour.request(Request::Call { request, sender: chan.0 }).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - - for i in 1 ..= 3 { - // Construct an invalid response - let request_id = *behaviour.outstanding.keys().next().unwrap(); - let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); - let response = { - let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteCallResponse(r)) - } - }; - let conn = ConnectionId::new(i); - behaviour.inject_event(responding_peer, conn, Event::Response(request_id, Response::Light(response.clone()))); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_matches!(chan.1.try_recv(), Ok(None)) - } - // Final invalid response - let request_id = *behaviour.outstanding.keys().next().unwrap(); - let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); - let response = { - let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), - } - }; - behaviour.inject_event(responding_peer, conn4, Event::Response(request_id, Response::Light(response))); - assert_matches!(poll(&mut behaviour), Poll::Pending); - assert_matches!(chan.1.try_recv(), Ok(Some(Err(ClientError::RemoteFetchFailed)))) - } - - fn issue_request(request: Request) { - let peer = PeerId::random(); - let pset = peerset(); - let mut behaviour = make_behaviour(true, pset.1, make_config()); - - let conn = ConnectionId::new(1); - behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); - behaviour.inject_connected(&peer); - assert_eq!(1, behaviour.peers.len()); - - let response = match request { - Request::Body { .. } => unimplemented!(), - Request::Header{..} => { - let r = api::v1::light::RemoteHeaderResponse { - header: dummy_header().encode(), - proof: empty_proof() - }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteHeaderResponse(r)), - } - } - Request::Read{..} => { - let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), - } - } - Request::ReadChild{..} => { - let r = api::v1::light::RemoteReadResponse { proof: empty_proof() }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), - } - } - Request::Call{..} => { - let r = api::v1::light::RemoteCallResponse { proof: empty_proof() }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), - } - } - Request::Changes{..} => { - let r = api::v1::light::RemoteChangesResponse { - max: iter::repeat(1).take(32).collect(), - proof: Vec::new(), - roots: Vec::new(), - roots_proof: empty_proof() - }; - api::v1::light::Response { - response: Some(api::v1::light::response::Response::RemoteChangesResponse(r)), - } - } - }; - - behaviour.request(request).unwrap(); - - assert_eq!(1, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()); - assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. })); - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(1, behaviour.outstanding.len()); - assert_eq!(1, *behaviour.outstanding.keys().next().unwrap()); - - behaviour.inject_event(peer.clone(), conn, Event::Response(1, Response::Light(response))); - - poll(&mut behaviour); - - assert_eq!(0, behaviour.pending_requests.len()); - assert_eq!(0, behaviour.outstanding.len()) - } - - #[test] - fn receives_remote_call_response() { - let mut chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - issue_request(Request::Call { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_response() { - let mut chan = oneshot::channel(); - let request = fetcher::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None, - }; - issue_request(Request::Read { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_read_child_response() { - let info = CHILD_INFO.info(); - let mut chan = oneshot::channel(); - let request = fetcher::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), - keys: vec![b":key".to_vec()], - child_info: info.0.to_vec(), - child_type: info.1, - retry_count: None, - }; - issue_request(Request::ReadChild { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_header_response() { - let mut chan = oneshot::channel(); - let request = fetcher::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - issue_request(Request::Header { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - #[test] - fn receives_remote_changes_response() { - let mut chan = oneshot::channel(); - let request = fetcher::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - issue_request(Request::Changes { request, sender: chan.0 }); - assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) - } - - fn send_receive(request: Request) { - // We start a swarm on the listening side which awaits incoming requests and answers them: - let local_pset = peerset(); - let local_listen_addr: libp2p::Multiaddr = libp2p::multiaddr::Protocol::Memory(rand::random()).into(); - let mut local_swarm = make_swarm(true, local_pset.1, make_config()); - Swarm::listen_on(&mut local_swarm, local_listen_addr.clone()).unwrap(); - - // We also start a swarm that makes requests and awaits responses: - let remote_pset = peerset(); - let mut remote_swarm = make_swarm(true, remote_pset.1, make_config()); - - // We now schedule a request, dial the remote and let the two swarm work it out: - remote_swarm.request(request).unwrap(); - Swarm::dial_addr(&mut remote_swarm, local_listen_addr).unwrap(); - - let future = { - let a = local_swarm.for_each(|_| future::ready(())); - let b = remote_swarm.for_each(|_| future::ready(())); - future::join(a, b).map(|_| ()) - }; - - task::spawn(future); - } - - #[test] - fn send_receive_call() { - let chan = oneshot::channel(); - let request = fetcher::RemoteCallRequest { - block: Default::default(), - header: dummy_header(), - method: "test".into(), - call_data: vec![], - retry_count: None, - }; - send_receive(Request::Call { request, sender: chan.0 }); - assert_eq!(vec![42], task::block_on(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_execution_proof` - } - - #[test] - fn send_receive_read() { - let chan = oneshot::channel(); - let request = fetcher::RemoteReadRequest { - header: dummy_header(), - block: Default::default(), - keys: vec![b":key".to_vec()], - retry_count: None - }; - send_receive(Request::Read { request, sender: chan.0 }); - assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_proof` - } - - #[test] - fn send_receive_read_child() { - let info = CHILD_INFO.info(); - let chan = oneshot::channel(); - let request = fetcher::RemoteReadChildRequest { - header: dummy_header(), - block: Default::default(), - storage_key: b":child_storage:sub".to_vec(), - keys: vec![b":key".to_vec()], - child_info: info.0.to_vec(), - child_type: info.1, - retry_count: None, - }; - send_receive(Request::ReadChild { request, sender: chan.0 }); - assert_eq!(Some(vec![42]), task::block_on(chan.1).unwrap().unwrap().remove(&b":key"[..]).unwrap()); - // ^--- from `DummyFetchChecker::check_read_child_proof` - } - - #[test] - fn send_receive_header() { - let _ = env_logger::try_init(); - let chan = oneshot::channel(); - let request = fetcher::RemoteHeaderRequest { - cht_root: Default::default(), - block: 1, - retry_count: None, - }; - send_receive(Request::Header { request, sender: chan.0 }); - // The remote does not know block 1: - assert_matches!(task::block_on(chan.1).unwrap(), Err(ClientError::RemoteFetchFailed)); - } - - #[test] - fn send_receive_changes() { - let chan = oneshot::channel(); - let request = fetcher::RemoteChangesRequest { - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), - }], - first_block: (1, Default::default()), - last_block: (100, Default::default()), - max_block: (100, Default::default()), - tries_roots: (1, Default::default(), Vec::new()), - key: Vec::new(), - storage_key: None, - retry_count: None, - }; - send_receive(Request::Changes { request, sender: chan.0 }); - assert_eq!(vec![(100, 2)], task::block_on(chan.1).unwrap().unwrap()); - // ^--- from `DummyFetchChecker::check_changes_proof` - } + use super::{Event, LightClientHandler, OutboundProtocol, PeerStatus, Request, Response}; + use crate::{chain::Client, config::ProtocolId, protocol::api}; + use assert_matches::assert_matches; + use async_std::task; + use codec::Encode; + use futures::{channel::oneshot, prelude::*}; + use libp2p::{ + core::{ + connection::ConnectionId, + identity, + muxing::{StreamMuxerBox, SubstreamRef}, + transport::{boxed::Boxed, memory::MemoryTransport, Transport}, + upgrade, ConnectedPoint, + }, + noise::{self, Keypair, NoiseConfig, X25519}, + swarm::{NetworkBehaviour, NetworkBehaviourAction, PollParameters}, + yamux, Multiaddr, PeerId, + }; + use sc_client::light::fetcher; + use sc_client_api::StorageProof; + use sp_blockchain::Error as ClientError; + use sp_core::storage::ChildInfo; + use sp_runtime::{ + generic::Header, + traits::{BlakeTwo256, Block as BlockT, NumberFor}, + }; + use std::{ + collections::{HashMap, HashSet}, + io, + iter::{self, FromIterator}, + pin::Pin, + sync::Arc, + task::{Context, Poll}, + }; + use void::Void; + + const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"foobarbaz"); + + type Block = + sp_runtime::generic::Block, substrate_test_runtime::Extrinsic>; + type Handler = LightClientHandler; + type Swarm = libp2p::swarm::Swarm; + + fn empty_proof() -> Vec { + StorageProof::empty().encode() + } + + fn make_swarm(ok: bool, ps: sc_peerset::PeersetHandle, cf: super::Config) -> Swarm { + let client = Arc::new(substrate_test_runtime_client::new()); + let checker = Arc::new(DummyFetchChecker { + ok, + _mark: std::marker::PhantomData, + }); + let id_key = identity::Keypair::generate_ed25519(); + let dh_key = Keypair::::new().into_authentic(&id_key).unwrap(); + let local_peer = id_key.public().into_peer_id(); + let transport = MemoryTransport::default() + .upgrade(upgrade::Version::V1) + .authenticate(NoiseConfig::xx(dh_key).into_authenticated()) + .multiplex(yamux::Config::default()) + .map(|(peer, muxer), _| (peer, StreamMuxerBox::new(muxer))) + .map_err(|e| io::Error::new(io::ErrorKind::Other, e)) + .boxed(); + Swarm::new( + transport, + LightClientHandler::new(cf, client, checker, ps), + local_peer, + ) + } + + struct DummyFetchChecker { + ok: bool, + _mark: std::marker::PhantomData, + } + + impl fetcher::FetchChecker for DummyFetchChecker { + fn check_header_proof( + &self, + _request: &fetcher::RemoteHeaderRequest, + header: Option, + _remote_proof: fetcher::StorageProof, + ) -> Result { + match self.ok { + true if header.is_some() => Ok(header.unwrap()), + _ => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_proof( + &self, + request: &fetcher::RemoteReadRequest, + _: fetcher::StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request + .keys + .iter() + .cloned() + .map(|k| (k, Some(vec![42]))) + .collect()), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_read_child_proof( + &self, + request: &fetcher::RemoteReadChildRequest, + _: fetcher::StorageProof, + ) -> Result, Option>>, ClientError> { + match self.ok { + true => Ok(request + .keys + .iter() + .cloned() + .map(|k| (k, Some(vec![42]))) + .collect()), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_execution_proof( + &self, + _: &fetcher::RemoteCallRequest, + _: fetcher::StorageProof, + ) -> Result, ClientError> { + match self.ok { + true => Ok(vec![42]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_changes_proof( + &self, + _: &fetcher::RemoteChangesRequest, + _: fetcher::ChangesProof, + ) -> Result, u32)>, ClientError> { + match self.ok { + true => Ok(vec![(100.into(), 2)]), + false => Err(ClientError::Backend("Test error".into())), + } + } + + fn check_body_proof( + &self, + _: &fetcher::RemoteBodyRequest, + body: Vec, + ) -> Result, ClientError> { + match self.ok { + true => Ok(body), + false => Err(ClientError::Backend("Test error".into())), + } + } + } + + fn make_config() -> super::Config { + super::Config::new(&ProtocolId::from(&b"foo"[..])) + } + + fn dummy_header() -> sp_test_primitives::Header { + sp_test_primitives::Header { + parent_hash: Default::default(), + number: 0, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + } + } + + struct EmptyPollParams(PeerId); + + impl PollParameters for EmptyPollParams { + type SupportedProtocolsIter = iter::Empty>; + type ListenedAddressesIter = iter::Empty; + type ExternalAddressesIter = iter::Empty; + + fn supported_protocols(&self) -> Self::SupportedProtocolsIter { + iter::empty() + } + + fn listened_addresses(&self) -> Self::ListenedAddressesIter { + iter::empty() + } + + fn external_addresses(&self) -> Self::ExternalAddressesIter { + iter::empty() + } + + fn local_peer_id(&self) -> &PeerId { + &self.0 + } + } + + fn peerset() -> (sc_peerset::Peerset, sc_peerset::PeersetHandle) { + let cfg = sc_peerset::PeersetConfig { + in_peers: 128, + out_peers: 128, + bootnodes: Vec::new(), + reserved_only: false, + priority_groups: Vec::new(), + }; + sc_peerset::Peerset::from_config(cfg) + } + + fn make_behaviour( + ok: bool, + ps: sc_peerset::PeersetHandle, + cf: super::Config, + ) -> LightClientHandler { + let client = Arc::new(substrate_test_runtime_client::new()); + let checker = Arc::new(DummyFetchChecker { + ok, + _mark: std::marker::PhantomData, + }); + LightClientHandler::new(cf, client, checker, ps) + } + + fn empty_dialer() -> ConnectedPoint { + ConnectedPoint::Dialer { + address: Multiaddr::empty(), + } + } + + fn poll( + mut b: &mut LightClientHandler, + ) -> Poll> { + let mut p = EmptyPollParams(PeerId::random()); + match future::poll_fn(|cx| Pin::new(&mut b).poll(cx, &mut p)).now_or_never() { + Some(a) => Poll::Ready(a), + None => Poll::Pending, + } + } + + #[test] + fn disconnects_from_peer_if_told() { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + behaviour.inject_connection_established(&peer, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + + behaviour.inject_connection_closed(&peer, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_disconnected(&peer); + assert_eq!(0, behaviour.peers.len()) + } + + #[test] + fn disconnects_from_peer_if_request_times_out() { + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + behaviour.inject_connection_established(&peer0, &ConnectionId::new(1), &empty_dialer()); + behaviour.inject_connected(&peer0); + behaviour.inject_connection_established(&peer1, &ConnectionId::new(2), &empty_dialer()); + behaviour.inject_connected(&peer1); + + // We now know about two peers. + assert_eq!( + HashSet::from_iter(&[peer0.clone(), peer1.clone()]), + behaviour.peers.keys().collect::>() + ); + + // No requests have been made yet. + assert!(behaviour.pending_requests.is_empty()); + assert!(behaviour.outstanding.is_empty()); + + // Issue our first request! + let chan = oneshot::channel(); + let request = fetcher::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + behaviour + .request(Request::Call { + request, + sender: chan.0, + }) + .unwrap(); + assert_eq!(1, behaviour.pending_requests.len()); + + // The behaviour should now attempt to send the request. + assert_matches!(poll(&mut behaviour), Poll::Ready(NetworkBehaviourAction::NotifyHandler { peer_id, .. }) => { + assert!(peer_id == peer0 || peer_id == peer1) + }); + + // And we should have one busy peer. + assert!({ + let (idle, busy): (Vec<_>, Vec<_>) = behaviour + .peers + .iter() + .partition(|(_, info)| info.status == PeerStatus::Idle); + + idle.len() == 1 + && busy.len() == 1 + && (idle[0].0 == &peer0 || busy[0].0 == &peer0) + && (idle[0].0 == &peer1 || busy[0].0 == &peer1) + }); + + // No more pending requests, but one should be outstanding. + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + + // We now set back the timestamp of the outstanding request to make it expire. + let request = behaviour.outstanding.values_mut().next().unwrap(); + request.timestamp -= make_config().request_timeout; + + // Make progress, but do not expect some action. + assert_matches!(poll(&mut behaviour), Poll::Pending); + + // The request should have timed out by now and the corresponding peer be removed. + assert_eq!(1, behaviour.peers.len()); + // Since we asked for one retry, the request should be back in the pending queue. + assert_eq!(1, behaviour.pending_requests.len()); + // No other request should be ongoing. + assert_eq!(0, behaviour.outstanding.len()); + } + + #[test] + fn disconnects_from_peer_on_incorrect_response() { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(false, pset.1, make_config()); + // ^--- Making sure the response data check fails. + + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + + let chan = oneshot::channel(); + let request = fetcher::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + behaviour + .request(Request::Call { + request, + sender: chan.0, + }) + .unwrap(); + + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + poll(&mut behaviour); // Make progress + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + + let request_id = *behaviour.outstanding.keys().next().unwrap(); + + let response = { + let r = api::v1::light::RemoteCallResponse { + proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), + } + }; + + behaviour.inject_event( + peer.clone(), + conn, + Event::Response(request_id, Response::Light(response)), + ); + assert!(behaviour.peers.is_empty()); + + poll(&mut behaviour); // More progress + + // The request should be back in the pending queue + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + } + + #[test] + fn disconnects_from_peer_on_unexpected_response() { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + + // Some unsolicited response + let response = { + let r = api::v1::light::RemoteCallResponse { + proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), + } + }; + + behaviour.inject_event( + peer.clone(), + conn, + Event::Response(2347895932, Response::Light(response)), + ); + + assert!(behaviour.peers.is_empty()); + poll(&mut behaviour); + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + } + + #[test] + fn disconnects_from_peer_on_wrong_response_type() { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + + let chan = oneshot::channel(); + let request = fetcher::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(1), + }; + behaviour + .request(Request::Call { + request, + sender: chan.0, + }) + .unwrap(); + + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + poll(&mut behaviour); // Make progress + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + + let request_id = *behaviour.outstanding.keys().next().unwrap(); + + let response = { + let r = api::v1::light::RemoteReadResponse { + proof: empty_proof(), + }; // Not a RemoteCallResponse! + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), + } + }; + + behaviour.inject_event( + peer.clone(), + conn, + Event::Response(request_id, Response::Light(response)), + ); + assert!(behaviour.peers.is_empty()); + + poll(&mut behaviour); // More progress + + // The request should be back in the pending queue + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + } + + #[test] + fn receives_remote_failure_after_retry_count_failures() { + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + let peer3 = PeerId::random(); + let peer4 = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(false, pset.1, make_config()); + // ^--- Making sure the response data check fails. + + let conn1 = ConnectionId::new(1); + behaviour.inject_connection_established(&peer1, &conn1, &empty_dialer()); + behaviour.inject_connected(&peer1); + let conn2 = ConnectionId::new(2); + behaviour.inject_connection_established(&peer2, &conn2, &empty_dialer()); + behaviour.inject_connected(&peer2); + let conn3 = ConnectionId::new(3); + behaviour.inject_connection_established(&peer3, &conn3, &empty_dialer()); + behaviour.inject_connected(&peer3); + let conn4 = ConnectionId::new(3); + behaviour.inject_connection_established(&peer4, &conn4, &empty_dialer()); + behaviour.inject_connected(&peer4); + assert_eq!(4, behaviour.peers.len()); + + let mut chan = oneshot::channel(); + let request = fetcher::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: Some(3), // Attempt up to three retries. + }; + behaviour + .request(Request::Call { + request, + sender: chan.0, + }) + .unwrap(); + + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + assert_matches!( + poll(&mut behaviour), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. }) + ); + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + + for i in 1..=3 { + // Construct an invalid response + let request_id = *behaviour.outstanding.keys().next().unwrap(); + let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); + let response = { + let r = api::v1::light::RemoteCallResponse { + proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), + } + }; + let conn = ConnectionId::new(i); + behaviour.inject_event( + responding_peer, + conn, + Event::Response(request_id, Response::Light(response.clone())), + ); + assert_matches!( + poll(&mut behaviour), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. }) + ); + assert_matches!(chan.1.try_recv(), Ok(None)) + } + // Final invalid response + let request_id = *behaviour.outstanding.keys().next().unwrap(); + let responding_peer = behaviour.outstanding.values().next().unwrap().peer.clone(); + let response = { + let r = api::v1::light::RemoteCallResponse { + proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), + } + }; + behaviour.inject_event( + responding_peer, + conn4, + Event::Response(request_id, Response::Light(response)), + ); + assert_matches!(poll(&mut behaviour), Poll::Pending); + assert_matches!( + chan.1.try_recv(), + Ok(Some(Err(ClientError::RemoteFetchFailed))) + ) + } + + fn issue_request(request: Request) { + let peer = PeerId::random(); + let pset = peerset(); + let mut behaviour = make_behaviour(true, pset.1, make_config()); + + let conn = ConnectionId::new(1); + behaviour.inject_connection_established(&peer, &conn, &empty_dialer()); + behaviour.inject_connected(&peer); + assert_eq!(1, behaviour.peers.len()); + + let response = match request { + Request::Body { .. } => unimplemented!(), + Request::Header { .. } => { + let r = api::v1::light::RemoteHeaderResponse { + header: dummy_header().encode(), + proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteHeaderResponse(r)), + } + } + Request::Read { .. } => { + let r = api::v1::light::RemoteReadResponse { + proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), + } + } + Request::ReadChild { .. } => { + let r = api::v1::light::RemoteReadResponse { + proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteReadResponse(r)), + } + } + Request::Call { .. } => { + let r = api::v1::light::RemoteCallResponse { + proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteCallResponse(r)), + } + } + Request::Changes { .. } => { + let r = api::v1::light::RemoteChangesResponse { + max: iter::repeat(1).take(32).collect(), + proof: Vec::new(), + roots: Vec::new(), + roots_proof: empty_proof(), + }; + api::v1::light::Response { + response: Some(api::v1::light::response::Response::RemoteChangesResponse(r)), + } + } + }; + + behaviour.request(request).unwrap(); + + assert_eq!(1, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()); + assert_matches!( + poll(&mut behaviour), + Poll::Ready(NetworkBehaviourAction::NotifyHandler { .. }) + ); + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(1, behaviour.outstanding.len()); + assert_eq!(1, *behaviour.outstanding.keys().next().unwrap()); + + behaviour.inject_event( + peer.clone(), + conn, + Event::Response(1, Response::Light(response)), + ); + + poll(&mut behaviour); + + assert_eq!(0, behaviour.pending_requests.len()); + assert_eq!(0, behaviour.outstanding.len()) + } + + #[test] + fn receives_remote_call_response() { + let mut chan = oneshot::channel(); + let request = fetcher::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }; + issue_request(Request::Call { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_read_response() { + let mut chan = oneshot::channel(); + let request = fetcher::RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + issue_request(Request::Read { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_read_child_response() { + let info = CHILD_INFO.info(); + let mut chan = oneshot::channel(); + let request = fetcher::RemoteReadChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: b":child_storage:sub".to_vec(), + keys: vec![b":key".to_vec()], + child_info: info.0.to_vec(), + child_type: info.1, + retry_count: None, + }; + issue_request(Request::ReadChild { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_header_response() { + let mut chan = oneshot::channel(); + let request = fetcher::RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }; + issue_request(Request::Header { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + #[test] + fn receives_remote_changes_response() { + let mut chan = oneshot::channel(); + let request = fetcher::RemoteChangesRequest { + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), Vec::new()), + key: Vec::new(), + storage_key: None, + retry_count: None, + }; + issue_request(Request::Changes { + request, + sender: chan.0, + }); + assert_matches!(chan.1.try_recv(), Ok(Some(Ok(_)))) + } + + fn send_receive(request: Request) { + // We start a swarm on the listening side which awaits incoming requests and answers them: + let local_pset = peerset(); + let local_listen_addr: libp2p::Multiaddr = + libp2p::multiaddr::Protocol::Memory(rand::random()).into(); + let mut local_swarm = make_swarm(true, local_pset.1, make_config()); + Swarm::listen_on(&mut local_swarm, local_listen_addr.clone()).unwrap(); + + // We also start a swarm that makes requests and awaits responses: + let remote_pset = peerset(); + let mut remote_swarm = make_swarm(true, remote_pset.1, make_config()); + + // We now schedule a request, dial the remote and let the two swarm work it out: + remote_swarm.request(request).unwrap(); + Swarm::dial_addr(&mut remote_swarm, local_listen_addr).unwrap(); + + let future = { + let a = local_swarm.for_each(|_| future::ready(())); + let b = remote_swarm.for_each(|_| future::ready(())); + future::join(a, b).map(|_| ()) + }; + + task::spawn(future); + } + + #[test] + fn send_receive_call() { + let chan = oneshot::channel(); + let request = fetcher::RemoteCallRequest { + block: Default::default(), + header: dummy_header(), + method: "test".into(), + call_data: vec![], + retry_count: None, + }; + send_receive(Request::Call { + request, + sender: chan.0, + }); + assert_eq!(vec![42], task::block_on(chan.1).unwrap().unwrap()); + // ^--- from `DummyFetchChecker::check_execution_proof` + } + + #[test] + fn send_receive_read() { + let chan = oneshot::channel(); + let request = fetcher::RemoteReadRequest { + header: dummy_header(), + block: Default::default(), + keys: vec![b":key".to_vec()], + retry_count: None, + }; + send_receive(Request::Read { + request, + sender: chan.0, + }); + assert_eq!( + Some(vec![42]), + task::block_on(chan.1) + .unwrap() + .unwrap() + .remove(&b":key"[..]) + .unwrap() + ); + // ^--- from `DummyFetchChecker::check_read_proof` + } + + #[test] + fn send_receive_read_child() { + let info = CHILD_INFO.info(); + let chan = oneshot::channel(); + let request = fetcher::RemoteReadChildRequest { + header: dummy_header(), + block: Default::default(), + storage_key: b":child_storage:sub".to_vec(), + keys: vec![b":key".to_vec()], + child_info: info.0.to_vec(), + child_type: info.1, + retry_count: None, + }; + send_receive(Request::ReadChild { + request, + sender: chan.0, + }); + assert_eq!( + Some(vec![42]), + task::block_on(chan.1) + .unwrap() + .unwrap() + .remove(&b":key"[..]) + .unwrap() + ); + // ^--- from `DummyFetchChecker::check_read_child_proof` + } + + #[test] + fn send_receive_header() { + let _ = env_logger::try_init(); + let chan = oneshot::channel(); + let request = fetcher::RemoteHeaderRequest { + cht_root: Default::default(), + block: 1, + retry_count: None, + }; + send_receive(Request::Header { + request, + sender: chan.0, + }); + // The remote does not know block 1: + assert_matches!( + task::block_on(chan.1).unwrap(), + Err(ClientError::RemoteFetchFailed) + ); + } + + #[test] + fn send_receive_changes() { + let chan = oneshot::channel(); + let request = fetcher::RemoteChangesRequest { + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(sp_core::ChangesTrieConfiguration::new(4, 2)), + }], + first_block: (1, Default::default()), + last_block: (100, Default::default()), + max_block: (100, Default::default()), + tries_roots: (1, Default::default(), Vec::new()), + key: Vec::new(), + storage_key: None, + retry_count: None, + }; + send_receive(Request::Changes { + request, + sender: chan.0, + }); + assert_eq!(vec![(100, 2)], task::block_on(chan.1).unwrap().unwrap()); + // ^--- from `DummyFetchChecker::check_changes_proof` + } } diff --git a/client/network/src/protocol/message.rs b/client/network/src/protocol/message.rs index ae83b49e60..059ea182bd 100644 --- a/client/network/src/protocol/message.rs +++ b/client/network/src/protocol/message.rs @@ -16,551 +16,543 @@ //! Network packet message types. These get serialized and put into the lower level protocol payload. -use bitflags::bitflags; -use sp_runtime::{ConsensusEngineId, traits::{Block as BlockT, Header as HeaderT}}; -use codec::{Encode, Decode, Input, Output, Error}; pub use self::generic::{ - BlockAnnounce, RemoteCallRequest, RemoteReadRequest, - RemoteHeaderRequest, RemoteHeaderResponse, - RemoteChangesRequest, RemoteChangesResponse, - FinalityProofRequest, FinalityProofResponse, - FromBlock, RemoteReadChildRequest, Roles, + BlockAnnounce, FinalityProofRequest, FinalityProofResponse, FromBlock, RemoteCallRequest, + RemoteChangesRequest, RemoteChangesResponse, RemoteHeaderRequest, RemoteHeaderResponse, + RemoteReadChildRequest, RemoteReadRequest, Roles, }; +use bitflags::bitflags; +use codec::{Decode, Encode, Error, Input, Output}; use sc_client_api::StorageProof; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT}, + ConsensusEngineId, +}; /// A unique ID of a request. pub type RequestId = u64; /// Type alias for using the message type using block type parameters. pub type Message = generic::Message< - ::Header, - ::Hash, - <::Header as HeaderT>::Number, - ::Extrinsic, + ::Header, + ::Hash, + <::Header as HeaderT>::Number, + ::Extrinsic, >; /// Type alias for using the status type using block type parameters. -pub type Status = generic::Status< - ::Hash, - <::Header as HeaderT>::Number, ->; +pub type Status = + generic::Status<::Hash, <::Header as HeaderT>::Number>; /// Type alias for using the block request type using block type parameters. -pub type BlockRequest = generic::BlockRequest< - ::Hash, - <::Header as HeaderT>::Number, ->; +pub type BlockRequest = + generic::BlockRequest<::Hash, <::Header as HeaderT>::Number>; /// Type alias for using the BlockData type using block type parameters. -pub type BlockData = generic::BlockData< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockData = + generic::BlockData<::Header, ::Hash, ::Extrinsic>; /// Type alias for using the BlockResponse type using block type parameters. -pub type BlockResponse = generic::BlockResponse< - ::Header, - ::Hash, - ::Extrinsic, ->; +pub type BlockResponse = + generic::BlockResponse<::Header, ::Hash, ::Extrinsic>; /// A set of transactions. pub type Transactions = Vec; // Bits of block data and associated artifacts to request. bitflags! { - /// Node roles bitmask. - pub struct BlockAttributes: u8 { - /// Include block header. - const HEADER = 0b00000001; - /// Include block body. - const BODY = 0b00000010; - /// Include block receipt. - const RECEIPT = 0b00000100; - /// Include block message queue. - const MESSAGE_QUEUE = 0b00001000; - /// Include a justification for the block. - const JUSTIFICATION = 0b00010000; - } + /// Node roles bitmask. + pub struct BlockAttributes: u8 { + /// Include block header. + const HEADER = 0b00000001; + /// Include block body. + const BODY = 0b00000010; + /// Include block receipt. + const RECEIPT = 0b00000100; + /// Include block message queue. + const MESSAGE_QUEUE = 0b00001000; + /// Include a justification for the block. + const JUSTIFICATION = 0b00010000; + } } impl Encode for BlockAttributes { - fn encode_to(&self, dest: &mut T) { - dest.push_byte(self.bits()) - } + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } } impl codec::EncodeLike for BlockAttributes {} impl Decode for BlockAttributes { - fn decode(input: &mut I) -> Result { - Self::from_bits(input.read_byte()?).ok_or_else(|| Error::from("Invalid bytes")) - } + fn decode(input: &mut I) -> Result { + Self::from_bits(input.read_byte()?).ok_or_else(|| Error::from("Invalid bytes")) + } } #[derive(Debug, PartialEq, Eq, Clone, Copy, Encode, Decode)] /// Block enumeration direction. pub enum Direction { - /// Enumerate in ascending order (from child to parent). - Ascending = 0, - /// Enumerate in descending order (from parent to canonical child). - Descending = 1, + /// Enumerate in ascending order (from child to parent). + Ascending = 0, + /// Enumerate in descending order (from parent to canonical child). + Descending = 1, } /// Block state in the chain. #[derive(Debug, PartialEq, Eq, Clone, Copy, Encode, Decode)] pub enum BlockState { - /// Block is not part of the best chain. - Normal, - /// Latest best block. - Best, + /// Block is not part of the best chain. + Normal, + /// Latest best block. + Best, } /// Remote call response. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct RemoteCallResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Execution proof. - pub proof: StorageProof, + /// Id of a request this response was made for. + pub id: RequestId, + /// Execution proof. + pub proof: StorageProof, } #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] /// Remote read response. pub struct RemoteReadResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Read proof. - pub proof: StorageProof, + /// Id of a request this response was made for. + pub id: RequestId, + /// Read proof. + pub proof: StorageProof, } /// Generic types. pub mod generic { - use bitflags::bitflags; - use codec::{Encode, Decode, Input, Output}; - use sp_runtime::Justification; - use super::{ - RemoteReadResponse, Transactions, Direction, - RequestId, BlockAttributes, RemoteCallResponse, ConsensusEngineId, - BlockState, StorageProof, - }; - - bitflags! { - /// Bitmask of the roles that a node fulfills. - pub struct Roles: u8 { - /// No network. - const NONE = 0b00000000; - /// Full node, does not participate in consensus. - const FULL = 0b00000001; - /// Light client node. - const LIGHT = 0b00000010; - /// Act as an authority - const AUTHORITY = 0b00000100; - } - } - - impl Roles { - /// Does this role represents a client that holds full chain data locally? - pub fn is_full(&self) -> bool { - self.intersects(Roles::FULL | Roles::AUTHORITY) - } - - /// Does this role represents a client that does not participates in the consensus? - pub fn is_authority(&self) -> bool { - *self == Roles::AUTHORITY - } - - /// Does this role represents a client that does not hold full chain data locally? - pub fn is_light(&self) -> bool { - !self.is_full() - } - } - - impl<'a> From<&'a crate::config::Role> for Roles { - fn from(roles: &'a crate::config::Role) -> Self { - match roles { - crate::config::Role::Full => Roles::FULL, - crate::config::Role::Light => Roles::LIGHT, - crate::config::Role::Sentry { .. } => Roles::AUTHORITY, - crate::config::Role::Authority { .. } => Roles::AUTHORITY, - } - } - } - - impl codec::Encode for Roles { - fn encode_to(&self, dest: &mut T) { - dest.push_byte(self.bits()) - } - } - - impl codec::EncodeLike for Roles {} - - impl codec::Decode for Roles { - fn decode(input: &mut I) -> Result { - Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) - } - } - - /// Consensus is mostly opaque to us - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct ConsensusMessage { - /// Identifies consensus engine. - pub engine_id: ConsensusEngineId, - /// Message payload. - pub data: Vec, - } - - /// Block data sent in the response. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct BlockData { - /// Block header hash. - pub hash: Hash, - /// Block header if requested. - pub header: Option

, - /// Block body if requested. - pub body: Option>, - /// Block receipt if requested. - pub receipt: Option>, - /// Block message queue if requested. - pub message_queue: Option>, - /// Justification if requested. - pub justification: Option, - } - - /// Identifies starting point of a block sequence. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub enum FromBlock { - /// Start with given hash. - Hash(Hash), - /// Start with given block number. - Number(Number), - } - - /// A network message. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub enum Message { - /// Status packet. - Status(Status), - /// Block request. - BlockRequest(BlockRequest), - /// Block response. - BlockResponse(BlockResponse), - /// Block announce. - BlockAnnounce(BlockAnnounce
), - /// Transactions. - Transactions(Transactions), - /// Consensus protocol message. - Consensus(ConsensusMessage), - /// Remote method call request. - RemoteCallRequest(RemoteCallRequest), - /// Remote method call response. - RemoteCallResponse(RemoteCallResponse), - /// Remote storage read request. - RemoteReadRequest(RemoteReadRequest), - /// Remote storage read response. - RemoteReadResponse(RemoteReadResponse), - /// Remote header request. - RemoteHeaderRequest(RemoteHeaderRequest), - /// Remote header response. - RemoteHeaderResponse(RemoteHeaderResponse
), - /// Remote changes request. - RemoteChangesRequest(RemoteChangesRequest), - /// Remote changes response. - RemoteChangesResponse(RemoteChangesResponse), - /// Remote child storage read request. - RemoteReadChildRequest(RemoteReadChildRequest), - /// Finality proof request. - FinalityProofRequest(FinalityProofRequest), - /// Finality proof response. - FinalityProofResponse(FinalityProofResponse), - /// Batch of consensus protocol messages. - ConsensusBatch(Vec), - } - - impl Message { - /// Message id useful for logging. - pub fn id(&self) -> &'static str { - match self { - Message::Status(_) => "Status", - Message::BlockRequest(_) => "BlockRequest", - Message::BlockResponse(_) => "BlockResponse", - Message::BlockAnnounce(_) => "BlockAnnounce", - Message::Transactions(_) => "Transactions", - Message::Consensus(_) => "Consensus", - Message::RemoteCallRequest(_) => "RemoteCallRequest", - Message::RemoteCallResponse(_) => "RemoteCallResponse", - Message::RemoteReadRequest(_) => "RemoteReadRequest", - Message::RemoteReadResponse(_) => "RemoteReadResponse", - Message::RemoteHeaderRequest(_) => "RemoteHeaderRequest", - Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", - Message::RemoteChangesRequest(_) => "RemoteChangesRequest", - Message::RemoteChangesResponse(_) => "RemoteChangesResponse", - Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", - Message::FinalityProofRequest(_) => "FinalityProofRequest", - Message::FinalityProofResponse(_) => "FinalityProofResponse", - Message::ConsensusBatch(_) => "ConsensusBatch", - } - } - } - - /// Status sent on connection. - // TODO https://github.com/paritytech/substrate/issues/4674: replace the `Status` - // struct with this one, after waiting a few releases beyond `NetworkSpecialization`'s - // removal (https://github.com/paritytech/substrate/pull/4665) - // - // and set MIN_VERSION to 6. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct CompactStatus { - /// Protocol version. - pub version: u32, - /// Minimum supported version. - pub min_supported_version: u32, - /// Supported roles. - pub roles: Roles, - /// Best block number. - pub best_number: Number, - /// Best block hash. - pub best_hash: Hash, - /// Genesis block hash. - pub genesis_hash: Hash, - } - - /// Status sent on connection. - #[derive(Debug, PartialEq, Eq, Clone, Encode)] - pub struct Status { - /// Protocol version. - pub version: u32, - /// Minimum supported version. - pub min_supported_version: u32, - /// Supported roles. - pub roles: Roles, - /// Best block number. - pub best_number: Number, - /// Best block hash. - pub best_hash: Hash, - /// Genesis block hash. - pub genesis_hash: Hash, - /// DEPRECATED. Chain-specific status. - pub chain_status: Vec, - } - - impl Decode for Status { - fn decode(value: &mut I) -> Result { - const LAST_CHAIN_STATUS_VERSION: u32 = 5; - let compact = CompactStatus::decode(value)?; - let chain_status = match >::decode(value) { - Ok(v) => v, - Err(e) => if compact.version <= LAST_CHAIN_STATUS_VERSION { - return Err(e) - } else { - Vec::new() - } - }; - - let CompactStatus { - version, - min_supported_version, - roles, - best_number, - best_hash, - genesis_hash, - } = compact; - - Ok(Status { - version, - min_supported_version, - roles, - best_number, - best_hash, - genesis_hash, - chain_status, - }) - } - } - - /// Request block data from a peer. - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct BlockRequest { - /// Unique request id. - pub id: RequestId, - /// Bits of block data to request. - pub fields: BlockAttributes, - /// Start from this block. - pub from: FromBlock, - /// End at this block. An implementation defined maximum is used when unspecified. - pub to: Option, - /// Sequence direction. - pub direction: Direction, - /// Maximum number of blocks to return. An implementation defined maximum is used when unspecified. - pub max: Option, - } - - /// Response to `BlockRequest` - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - pub struct BlockResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Block data for the requested sequence. - pub blocks: Vec>, - } - - /// Announce a new complete relay chain block on the network. - #[derive(Debug, PartialEq, Eq, Clone)] - pub struct BlockAnnounce { - /// New block header. - pub header: H, - /// Block state. TODO: Remove `Option` and custom encoding when v4 becomes common. - pub state: Option, - /// Data associated with this block announcement, e.g. a candidate message. - pub data: Option>, - } - - // Custom Encode/Decode impl to maintain backwards compatibility with v3. - // This assumes that the packet contains nothing but the announcement message. - // TODO: Get rid of it once protocol v4 is common. - impl Encode for BlockAnnounce { - fn encode_to(&self, dest: &mut T) { - self.header.encode_to(dest); - if let Some(state) = &self.state { - state.encode_to(dest); - } - if let Some(data) = &self.data { - data.encode_to(dest) - } - } - } - - impl Decode for BlockAnnounce { - fn decode(input: &mut I) -> Result { - let header = H::decode(input)?; - let state = BlockState::decode(input).ok(); - let data = Vec::decode(input).ok(); - Ok(BlockAnnounce { - header, - state, - data, - }) - } - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote call request. - pub struct RemoteCallRequest { - /// Unique request id. - pub id: RequestId, - /// Block at which to perform call. - pub block: H, - /// Method name. - pub method: String, - /// Call data. - pub data: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote storage read request. - pub struct RemoteReadRequest { - /// Unique request id. - pub id: RequestId, - /// Block at which to perform call. - pub block: H, - /// Storage key. - pub keys: Vec>, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote storage read child request. - pub struct RemoteReadChildRequest { - /// Unique request id. - pub id: RequestId, - /// Block at which to perform call. - pub block: H, - /// Child Storage key. - pub storage_key: Vec, - /// Child trie source information. - pub child_info: Vec, - /// Child type, its required to resolve `child_info` - /// content and choose child implementation. - pub child_type: u32, - /// Storage key. - pub keys: Vec>, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote header request. - pub struct RemoteHeaderRequest { - /// Unique request id. - pub id: RequestId, - /// Block number to request header for. - pub block: N, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote header response. - pub struct RemoteHeaderResponse
{ - /// Id of a request this response was made for. - pub id: RequestId, - /// Header. None if proof generation has failed (e.g. header is unknown). - pub header: Option
, - /// Header proof. - pub proof: StorageProof, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote changes request. - pub struct RemoteChangesRequest { - /// Unique request id. - pub id: RequestId, - /// Hash of the first block of the range (including first) where changes are requested. - pub first: H, - /// Hash of the last block of the range (including last) where changes are requested. - pub last: H, - /// Hash of the first block for which the requester has the changes trie root. All other - /// affected roots must be proved. - pub min: H, - /// Hash of the last block that we can use when querying changes. - pub max: H, - /// Storage child node key which changes are requested. - pub storage_key: Option>, - /// Storage key which changes are requested. - pub key: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Remote changes response. - pub struct RemoteChangesResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Proof has been generated using block with this number as a max block. Should be - /// less than or equal to the RemoteChangesRequest::max block number. - pub max: N, - /// Changes proof. - pub proof: Vec>, - /// Changes tries roots missing on the requester' node. - pub roots: Vec<(N, H)>, - /// Missing changes tries roots proof. - pub roots_proof: StorageProof, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Finality proof request. - pub struct FinalityProofRequest { - /// Unique request id. - pub id: RequestId, - /// Hash of the block to request proof for. - pub block: H, - /// Additional data blob (that both requester and provider understood) required for proving finality. - pub request: Vec, - } - - #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] - /// Finality proof response. - pub struct FinalityProofResponse { - /// Id of a request this response was made for. - pub id: RequestId, - /// Hash of the block (the same as in the FinalityProofRequest). - pub block: H, - /// Finality proof (if available). - pub proof: Option>, - } + use super::{ + BlockAttributes, BlockState, ConsensusEngineId, Direction, RemoteCallResponse, + RemoteReadResponse, RequestId, StorageProof, Transactions, + }; + use bitflags::bitflags; + use codec::{Decode, Encode, Input, Output}; + use sp_runtime::Justification; + + bitflags! { + /// Bitmask of the roles that a node fulfills. + pub struct Roles: u8 { + /// No network. + const NONE = 0b00000000; + /// Full node, does not participate in consensus. + const FULL = 0b00000001; + /// Light client node. + const LIGHT = 0b00000010; + /// Act as an authority + const AUTHORITY = 0b00000100; + } + } + + impl Roles { + /// Does this role represents a client that holds full chain data locally? + pub fn is_full(&self) -> bool { + self.intersects(Roles::FULL | Roles::AUTHORITY) + } + + /// Does this role represents a client that does not participates in the consensus? + pub fn is_authority(&self) -> bool { + *self == Roles::AUTHORITY + } + + /// Does this role represents a client that does not hold full chain data locally? + pub fn is_light(&self) -> bool { + !self.is_full() + } + } + + impl<'a> From<&'a crate::config::Role> for Roles { + fn from(roles: &'a crate::config::Role) -> Self { + match roles { + crate::config::Role::Full => Roles::FULL, + crate::config::Role::Light => Roles::LIGHT, + crate::config::Role::Sentry { .. } => Roles::AUTHORITY, + crate::config::Role::Authority { .. } => Roles::AUTHORITY, + } + } + } + + impl codec::Encode for Roles { + fn encode_to(&self, dest: &mut T) { + dest.push_byte(self.bits()) + } + } + + impl codec::EncodeLike for Roles {} + + impl codec::Decode for Roles { + fn decode(input: &mut I) -> Result { + Self::from_bits(input.read_byte()?).ok_or_else(|| codec::Error::from("Invalid bytes")) + } + } + + /// Consensus is mostly opaque to us + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct ConsensusMessage { + /// Identifies consensus engine. + pub engine_id: ConsensusEngineId, + /// Message payload. + pub data: Vec, + } + + /// Block data sent in the response. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockData { + /// Block header hash. + pub hash: Hash, + /// Block header if requested. + pub header: Option
, + /// Block body if requested. + pub body: Option>, + /// Block receipt if requested. + pub receipt: Option>, + /// Block message queue if requested. + pub message_queue: Option>, + /// Justification if requested. + pub justification: Option, + } + + /// Identifies starting point of a block sequence. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub enum FromBlock { + /// Start with given hash. + Hash(Hash), + /// Start with given block number. + Number(Number), + } + + /// A network message. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub enum Message { + /// Status packet. + Status(Status), + /// Block request. + BlockRequest(BlockRequest), + /// Block response. + BlockResponse(BlockResponse), + /// Block announce. + BlockAnnounce(BlockAnnounce
), + /// Transactions. + Transactions(Transactions), + /// Consensus protocol message. + Consensus(ConsensusMessage), + /// Remote method call request. + RemoteCallRequest(RemoteCallRequest), + /// Remote method call response. + RemoteCallResponse(RemoteCallResponse), + /// Remote storage read request. + RemoteReadRequest(RemoteReadRequest), + /// Remote storage read response. + RemoteReadResponse(RemoteReadResponse), + /// Remote header request. + RemoteHeaderRequest(RemoteHeaderRequest), + /// Remote header response. + RemoteHeaderResponse(RemoteHeaderResponse
), + /// Remote changes request. + RemoteChangesRequest(RemoteChangesRequest), + /// Remote changes response. + RemoteChangesResponse(RemoteChangesResponse), + /// Remote child storage read request. + RemoteReadChildRequest(RemoteReadChildRequest), + /// Finality proof request. + FinalityProofRequest(FinalityProofRequest), + /// Finality proof response. + FinalityProofResponse(FinalityProofResponse), + /// Batch of consensus protocol messages. + ConsensusBatch(Vec), + } + + impl Message { + /// Message id useful for logging. + pub fn id(&self) -> &'static str { + match self { + Message::Status(_) => "Status", + Message::BlockRequest(_) => "BlockRequest", + Message::BlockResponse(_) => "BlockResponse", + Message::BlockAnnounce(_) => "BlockAnnounce", + Message::Transactions(_) => "Transactions", + Message::Consensus(_) => "Consensus", + Message::RemoteCallRequest(_) => "RemoteCallRequest", + Message::RemoteCallResponse(_) => "RemoteCallResponse", + Message::RemoteReadRequest(_) => "RemoteReadRequest", + Message::RemoteReadResponse(_) => "RemoteReadResponse", + Message::RemoteHeaderRequest(_) => "RemoteHeaderRequest", + Message::RemoteHeaderResponse(_) => "RemoteHeaderResponse", + Message::RemoteChangesRequest(_) => "RemoteChangesRequest", + Message::RemoteChangesResponse(_) => "RemoteChangesResponse", + Message::RemoteReadChildRequest(_) => "RemoteReadChildRequest", + Message::FinalityProofRequest(_) => "FinalityProofRequest", + Message::FinalityProofResponse(_) => "FinalityProofResponse", + Message::ConsensusBatch(_) => "ConsensusBatch", + } + } + } + + /// Status sent on connection. + // TODO https://github.com/paritytech/substrate/issues/4674: replace the `Status` + // struct with this one, after waiting a few releases beyond `NetworkSpecialization`'s + // removal (https://github.com/paritytech/substrate/pull/4665) + // + // and set MIN_VERSION to 6. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct CompactStatus { + /// Protocol version. + pub version: u32, + /// Minimum supported version. + pub min_supported_version: u32, + /// Supported roles. + pub roles: Roles, + /// Best block number. + pub best_number: Number, + /// Best block hash. + pub best_hash: Hash, + /// Genesis block hash. + pub genesis_hash: Hash, + } + + /// Status sent on connection. + #[derive(Debug, PartialEq, Eq, Clone, Encode)] + pub struct Status { + /// Protocol version. + pub version: u32, + /// Minimum supported version. + pub min_supported_version: u32, + /// Supported roles. + pub roles: Roles, + /// Best block number. + pub best_number: Number, + /// Best block hash. + pub best_hash: Hash, + /// Genesis block hash. + pub genesis_hash: Hash, + /// DEPRECATED. Chain-specific status. + pub chain_status: Vec, + } + + impl Decode for Status { + fn decode(value: &mut I) -> Result { + const LAST_CHAIN_STATUS_VERSION: u32 = 5; + let compact = CompactStatus::decode(value)?; + let chain_status = match >::decode(value) { + Ok(v) => v, + Err(e) => { + if compact.version <= LAST_CHAIN_STATUS_VERSION { + return Err(e); + } else { + Vec::new() + } + } + }; + + let CompactStatus { + version, + min_supported_version, + roles, + best_number, + best_hash, + genesis_hash, + } = compact; + + Ok(Status { + version, + min_supported_version, + roles, + best_number, + best_hash, + genesis_hash, + chain_status, + }) + } + } + + /// Request block data from a peer. + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockRequest { + /// Unique request id. + pub id: RequestId, + /// Bits of block data to request. + pub fields: BlockAttributes, + /// Start from this block. + pub from: FromBlock, + /// End at this block. An implementation defined maximum is used when unspecified. + pub to: Option, + /// Sequence direction. + pub direction: Direction, + /// Maximum number of blocks to return. An implementation defined maximum is used when unspecified. + pub max: Option, + } + + /// Response to `BlockRequest` + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + pub struct BlockResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Block data for the requested sequence. + pub blocks: Vec>, + } + + /// Announce a new complete relay chain block on the network. + #[derive(Debug, PartialEq, Eq, Clone)] + pub struct BlockAnnounce { + /// New block header. + pub header: H, + /// Block state. TODO: Remove `Option` and custom encoding when v4 becomes common. + pub state: Option, + /// Data associated with this block announcement, e.g. a candidate message. + pub data: Option>, + } + + // Custom Encode/Decode impl to maintain backwards compatibility with v3. + // This assumes that the packet contains nothing but the announcement message. + // TODO: Get rid of it once protocol v4 is common. + impl Encode for BlockAnnounce { + fn encode_to(&self, dest: &mut T) { + self.header.encode_to(dest); + if let Some(state) = &self.state { + state.encode_to(dest); + } + if let Some(data) = &self.data { + data.encode_to(dest) + } + } + } + + impl Decode for BlockAnnounce { + fn decode(input: &mut I) -> Result { + let header = H::decode(input)?; + let state = BlockState::decode(input).ok(); + let data = Vec::decode(input).ok(); + Ok(BlockAnnounce { + header, + state, + data, + }) + } + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote call request. + pub struct RemoteCallRequest { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Method name. + pub method: String, + /// Call data. + pub data: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote storage read request. + pub struct RemoteReadRequest { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Storage key. + pub keys: Vec>, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote storage read child request. + pub struct RemoteReadChildRequest { + /// Unique request id. + pub id: RequestId, + /// Block at which to perform call. + pub block: H, + /// Child Storage key. + pub storage_key: Vec, + /// Child trie source information. + pub child_info: Vec, + /// Child type, its required to resolve `child_info` + /// content and choose child implementation. + pub child_type: u32, + /// Storage key. + pub keys: Vec>, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote header request. + pub struct RemoteHeaderRequest { + /// Unique request id. + pub id: RequestId, + /// Block number to request header for. + pub block: N, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote header response. + pub struct RemoteHeaderResponse
{ + /// Id of a request this response was made for. + pub id: RequestId, + /// Header. None if proof generation has failed (e.g. header is unknown). + pub header: Option
, + /// Header proof. + pub proof: StorageProof, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote changes request. + pub struct RemoteChangesRequest { + /// Unique request id. + pub id: RequestId, + /// Hash of the first block of the range (including first) where changes are requested. + pub first: H, + /// Hash of the last block of the range (including last) where changes are requested. + pub last: H, + /// Hash of the first block for which the requester has the changes trie root. All other + /// affected roots must be proved. + pub min: H, + /// Hash of the last block that we can use when querying changes. + pub max: H, + /// Storage child node key which changes are requested. + pub storage_key: Option>, + /// Storage key which changes are requested. + pub key: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Remote changes response. + pub struct RemoteChangesResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Proof has been generated using block with this number as a max block. Should be + /// less than or equal to the RemoteChangesRequest::max block number. + pub max: N, + /// Changes proof. + pub proof: Vec>, + /// Changes tries roots missing on the requester' node. + pub roots: Vec<(N, H)>, + /// Missing changes tries roots proof. + pub roots_proof: StorageProof, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Finality proof request. + pub struct FinalityProofRequest { + /// Unique request id. + pub id: RequestId, + /// Hash of the block to request proof for. + pub block: H, + /// Additional data blob (that both requester and provider understood) required for proving finality. + pub request: Vec, + } + + #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] + /// Finality proof response. + pub struct FinalityProofResponse { + /// Id of a request this response was made for. + pub id: RequestId, + /// Hash of the block (the same as in the FinalityProofRequest). + pub block: H, + /// Finality proof (if available). + pub proof: Option>, + } } diff --git a/client/network/src/protocol/sync.rs b/client/network/src/protocol/sync.rs index b480f3abb9..ac66406670 100644 --- a/client/network/src/protocol/sync.rs +++ b/client/network/src/protocol/sync.rs @@ -27,27 +27,35 @@ //! order to update it. //! -use blocks::BlockCollection; -use sp_blockchain::{Error as ClientError, Info as BlockchainInfo, HeaderMetadata}; -use sp_consensus::{BlockOrigin, BlockStatus, - block_validation::{BlockAnnounceValidator, Validation}, - import_queue::{IncomingBlock, BlockImportResult, BlockImportError} -}; use crate::{ - config::BoxFinalityProofRequestBuilder, - protocol::message::{self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, BlockResponse, - FinalityProofResponse, Roles}, + config::BoxFinalityProofRequestBuilder, + protocol::message::{ + self, generic::FinalityProofRequest, BlockAnnounce, BlockAttributes, BlockRequest, + BlockResponse, FinalityProofResponse, Roles, + }, }; +use blocks::BlockCollection; use either::Either; use extra_requests::ExtraRequests; use libp2p::PeerId; -use log::{debug, trace, warn, info, error}; +use log::{debug, error, info, trace, warn}; +use sp_blockchain::{Error as ClientError, HeaderMetadata, Info as BlockchainInfo}; +use sp_consensus::{ + block_validation::{BlockAnnounceValidator, Validation}, + import_queue::{BlockImportError, BlockImportResult, IncomingBlock}, + BlockOrigin, BlockStatus, +}; use sp_runtime::{ - Justification, - generic::BlockId, - traits::{Block as BlockT, Header, NumberFor, Zero, One, CheckedSub, SaturatedConversion} + generic::BlockId, + traits::{Block as BlockT, CheckedSub, Header, NumberFor, One, SaturatedConversion, Zero}, + Justification, +}; +use std::{ + collections::{HashMap, HashSet, VecDeque}, + fmt, + ops::Range, + sync::Arc, }; -use std::{fmt, ops::Range, collections::{HashMap, HashSet, VecDeque}, sync::Arc}; mod blocks; mod extra_requests; @@ -72,110 +80,110 @@ const MAJOR_SYNC_BLOCKS: u8 = 5; const ANNOUNCE_HISTORY_SIZE: usize = 64; mod rep { - use sc_peerset::ReputationChange as Rep; - /// Reputation change when a peer sent us a message that led to a - /// database read error. - pub const BLOCKCHAIN_READ_ERROR: Rep = Rep::new(-(1 << 16), "DB Error"); + use sc_peerset::ReputationChange as Rep; + /// Reputation change when a peer sent us a message that led to a + /// database read error. + pub const BLOCKCHAIN_READ_ERROR: Rep = Rep::new(-(1 << 16), "DB Error"); - /// Reputation change when a peer sent us a status message with a different - /// genesis than us. - pub const GENESIS_MISMATCH: Rep = Rep::new(i32::min_value(), "Genesis mismatch"); + /// Reputation change when a peer sent us a status message with a different + /// genesis than us. + pub const GENESIS_MISMATCH: Rep = Rep::new(i32::min_value(), "Genesis mismatch"); - /// Reputation change for peers which send us a block with an incomplete header. - pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); + /// Reputation change for peers which send us a block with an incomplete header. + pub const INCOMPLETE_HEADER: Rep = Rep::new(-(1 << 20), "Incomplete header"); - /// Reputation change for peers which send us a block which we fail to verify. - pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); + /// Reputation change for peers which send us a block which we fail to verify. + pub const VERIFICATION_FAIL: Rep = Rep::new(-(1 << 29), "Block verification failed"); - /// Reputation change for peers which send us a known bad block. - pub const BAD_BLOCK: Rep = Rep::new(-(1 << 29), "Bad block"); + /// Reputation change for peers which send us a known bad block. + pub const BAD_BLOCK: Rep = Rep::new(-(1 << 29), "Bad block"); - /// Reputation change for peers which send us a known block. - pub const KNOWN_BLOCK: Rep = Rep::new(-(1 << 29), "Duplicate block"); + /// Reputation change for peers which send us a known block. + pub const KNOWN_BLOCK: Rep = Rep::new(-(1 << 29), "Duplicate block"); - /// Reputation change for peers which send us a block with bad justifications. - pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); + /// Reputation change for peers which send us a block with bad justifications. + pub const BAD_JUSTIFICATION: Rep = Rep::new(-(1 << 16), "Bad justification"); - /// Reputation change for peers which send us a block with bad finality proof. - pub const BAD_FINALITY_PROOF: Rep = Rep::new(-(1 << 16), "Bad finality proof"); + /// Reputation change for peers which send us a block with bad finality proof. + pub const BAD_FINALITY_PROOF: Rep = Rep::new(-(1 << 16), "Bad finality proof"); - /// Reputation change when a peer sent us invlid ancestry result. - pub const UNKNOWN_ANCESTOR:Rep = Rep::new(-(1 << 16), "DB Error"); + /// Reputation change when a peer sent us invlid ancestry result. + pub const UNKNOWN_ANCESTOR: Rep = Rep::new(-(1 << 16), "DB Error"); } /// The main data structure which contains all the state for a chains /// active syncing strategy. pub struct ChainSync { - /// Chain client. - client: Arc>, - /// The active peers that we are using to sync and their PeerSync status - peers: HashMap>, - /// A `BlockCollection` of blocks that are being downloaded from peers - blocks: BlockCollection, - /// The best block number in our queue of blocks to import - best_queued_number: NumberFor, - /// The best block hash in our queue of blocks to import - best_queued_hash: B::Hash, - /// The role of this node, e.g. light or full - role: Roles, - /// What block attributes we require for this node, usually derived from - /// what role we are, but could be customized - required_block_attributes: message::BlockAttributes, - /// Any extra finality proof requests. - extra_finality_proofs: ExtraRequests, - /// Any extra justification requests. - extra_justifications: ExtraRequests, - /// A set of hashes of blocks that are being downloaded or have been - /// downloaded and are queued for import. - queue_blocks: HashSet, - /// The best block number that was successfully imported into the chain. - /// This can not decrease. - best_imported_number: NumberFor, - /// Finality proof handler. - request_builder: Option>, - /// Fork sync targets. - fork_targets: HashMap>, - /// A flag that caches idle state with no pending requests. - is_idle: bool, - /// A type to check incoming block announcements. - block_announce_validator: Box + Send>, - /// Maximum number of peers to ask the same blocks in parallel. - max_parallel_downloads: u32, - /// Total number of processed blocks (imported or failed). - processed_blocks: usize, + /// Chain client. + client: Arc>, + /// The active peers that we are using to sync and their PeerSync status + peers: HashMap>, + /// A `BlockCollection` of blocks that are being downloaded from peers + blocks: BlockCollection, + /// The best block number in our queue of blocks to import + best_queued_number: NumberFor, + /// The best block hash in our queue of blocks to import + best_queued_hash: B::Hash, + /// The role of this node, e.g. light or full + role: Roles, + /// What block attributes we require for this node, usually derived from + /// what role we are, but could be customized + required_block_attributes: message::BlockAttributes, + /// Any extra finality proof requests. + extra_finality_proofs: ExtraRequests, + /// Any extra justification requests. + extra_justifications: ExtraRequests, + /// A set of hashes of blocks that are being downloaded or have been + /// downloaded and are queued for import. + queue_blocks: HashSet, + /// The best block number that was successfully imported into the chain. + /// This can not decrease. + best_imported_number: NumberFor, + /// Finality proof handler. + request_builder: Option>, + /// Fork sync targets. + fork_targets: HashMap>, + /// A flag that caches idle state with no pending requests. + is_idle: bool, + /// A type to check incoming block announcements. + block_announce_validator: Box + Send>, + /// Maximum number of peers to ask the same blocks in parallel. + max_parallel_downloads: u32, + /// Total number of processed blocks (imported or failed). + processed_blocks: usize, } /// All the data we have about a Peer that we are trying to sync with #[derive(Debug, Clone)] pub struct PeerSync { - /// The common number is the block number that is a common point of - /// ancestry for both our chains (as far as we know). - pub common_number: NumberFor, - /// The hash of the best block that we've seen for this peer. - pub best_hash: B::Hash, - /// The number of the best block that we've seen for this peer. - pub best_number: NumberFor, - /// The state of syncing this peer is in for us, generally categories - /// into `Available` or "busy" with something as defined by `PeerSyncState`. - pub state: PeerSyncState, - /// A queue of blocks that this peer has announced to us, should only - /// contain `ANNOUNCE_HISTORY_SIZE` entries. - pub recently_announced: VecDeque + /// The common number is the block number that is a common point of + /// ancestry for both our chains (as far as we know). + pub common_number: NumberFor, + /// The hash of the best block that we've seen for this peer. + pub best_hash: B::Hash, + /// The number of the best block that we've seen for this peer. + pub best_number: NumberFor, + /// The state of syncing this peer is in for us, generally categories + /// into `Available` or "busy" with something as defined by `PeerSyncState`. + pub state: PeerSyncState, + /// A queue of blocks that this peer has announced to us, should only + /// contain `ANNOUNCE_HISTORY_SIZE` entries. + pub recently_announced: VecDeque, } /// The sync status of a peer we are trying to sync with #[derive(Debug)] pub struct PeerInfo { - /// Their best block hash. - pub best_hash: B::Hash, - /// Their best block number. - pub best_number: NumberFor + /// Their best block hash. + pub best_hash: B::Hash, + /// Their best block number. + pub best_number: NumberFor, } struct ForkTarget { - number: NumberFor, - parent_hash: Option, - peers: HashSet, + number: NumberFor, + parent_hash: Option, + peers: HashSet, } /// The state of syncing between a Peer and ourselves. @@ -184,56 +192,56 @@ struct ForkTarget { /// defines what we are busy with. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum PeerSyncState { - /// Available for sync requests. - Available, - /// Searching for ancestors the Peer has in common with us. - AncestorSearch { - start: NumberFor, - current: NumberFor, - state: AncestorSearchState, - }, - /// Actively downloading new blocks, starting from the given Number. - DownloadingNew(NumberFor), - /// Downloading a stale block with given Hash. Stale means that it is a - /// block with a number that is lower than our best number. It might be - /// from a fork and not necessarily already imported. - DownloadingStale(B::Hash), - /// Downloading justification for given block hash. - DownloadingJustification(B::Hash), - /// Downloading finality proof for given block hash. - DownloadingFinalityProof(B::Hash) + /// Available for sync requests. + Available, + /// Searching for ancestors the Peer has in common with us. + AncestorSearch { + start: NumberFor, + current: NumberFor, + state: AncestorSearchState, + }, + /// Actively downloading new blocks, starting from the given Number. + DownloadingNew(NumberFor), + /// Downloading a stale block with given Hash. Stale means that it is a + /// block with a number that is lower than our best number. It might be + /// from a fork and not necessarily already imported. + DownloadingStale(B::Hash), + /// Downloading justification for given block hash. + DownloadingJustification(B::Hash), + /// Downloading finality proof for given block hash. + DownloadingFinalityProof(B::Hash), } impl PeerSyncState { - pub fn is_available(&self) -> bool { - if let PeerSyncState::Available = self { - true - } else { - false - } - } + pub fn is_available(&self) -> bool { + if let PeerSyncState::Available = self { + true + } else { + false + } + } } /// Reported sync state. #[derive(Clone, Eq, PartialEq, Debug)] pub enum SyncState { - /// Initial sync is complete, keep-up sync is active. - Idle, - /// Actively catching up with the chain. - Downloading + /// Initial sync is complete, keep-up sync is active. + Idle, + /// Actively catching up with the chain. + Downloading, } /// Syncing status and statistics. #[derive(Clone)] pub struct Status { - /// Current global sync state. - pub state: SyncState, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_peers: u32, - /// Number of blocks queued for import - pub queued_blocks: u32, + /// Current global sync state. + pub state: SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_peers: u32, + /// Number of blocks queued for import + pub queued_blocks: u32, } /// A peer did not behave as expected and should be reported. @@ -241,9 +249,9 @@ pub struct Status { pub struct BadPeer(pub PeerId, pub sc_peerset::ReputationChange); impl fmt::Display for BadPeer { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Bad peer {}; Reputation change: {:?}", self.0, self.1) + } } impl std::error::Error for BadPeer {} @@ -251,1047 +259,1173 @@ impl std::error::Error for BadPeer {} /// Result of [`ChainSync::on_block_data`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum OnBlockData { - /// The block should be imported. - Import(BlockOrigin, Vec>), - /// A new block request needs to be made to the given peer. - Request(PeerId, BlockRequest) + /// The block should be imported. + Import(BlockOrigin, Vec>), + /// A new block request needs to be made to the given peer. + Request(PeerId, BlockRequest), } /// Result of [`ChainSync::on_block_announce`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum OnBlockAnnounce { - /// The announcement does not require further handling. - Nothing, - /// The announcement header should be imported. - ImportHeader, + /// The announcement does not require further handling. + Nothing, + /// The announcement header should be imported. + ImportHeader, } /// Result of [`ChainSync::on_block_justification`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum OnBlockJustification { - /// The justification needs no further handling. - Nothing, - /// The justification should be imported. - Import { - peer: PeerId, - hash: B::Hash, - number: NumberFor, - justification: Justification - } + /// The justification needs no further handling. + Nothing, + /// The justification should be imported. + Import { + peer: PeerId, + hash: B::Hash, + number: NumberFor, + justification: Justification, + }, } /// Result of [`ChainSync::on_block_finality_proof`]. #[derive(Debug, Clone, PartialEq, Eq)] pub enum OnBlockFinalityProof { - /// The proof needs no further handling. - Nothing, - /// The proof should be imported. - Import { - peer: PeerId, - hash: B::Hash, - number: NumberFor, - proof: Vec - } + /// The proof needs no further handling. + Nothing, + /// The proof should be imported. + Import { + peer: PeerId, + hash: B::Hash, + number: NumberFor, + proof: Vec, + }, } impl ChainSync { - /// Create a new instance. - pub fn new( - role: Roles, - client: Arc>, - info: &BlockchainInfo, - request_builder: Option>, - block_announce_validator: Box + Send>, - max_parallel_downloads: u32, - ) -> Self { - let mut required_block_attributes = BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION; - - if role.is_full() { - required_block_attributes |= BlockAttributes::BODY - } - - ChainSync { - client, - peers: HashMap::new(), - blocks: BlockCollection::new(), - best_queued_hash: info.best_hash, - best_queued_number: info.best_number, - best_imported_number: info.best_number, - extra_finality_proofs: ExtraRequests::new("finality proof"), - extra_justifications: ExtraRequests::new("justification"), - role, - required_block_attributes, - queue_blocks: Default::default(), - request_builder, - fork_targets: Default::default(), - is_idle: false, - block_announce_validator, - max_parallel_downloads, - processed_blocks: 0, - } - } - - /// Returns the state of the sync of the given peer. - /// - /// Returns `None` if the peer is unknown. - pub fn peer_info(&self, who: &PeerId) -> Option> { - self.peers.get(who).map(|p| PeerInfo { best_hash: p.best_hash, best_number: p.best_number }) - } - - /// Returns the current sync status. - pub fn status(&self) -> Status { - let best_seen = self.peers.values().max_by_key(|p| p.best_number).map(|p| p.best_number); - let sync_state = - if let Some(n) = best_seen { - // A chain is classified as downloading if the provided best block is - // more than `MAJOR_SYNC_BLOCKS` behind the best queued block. - if n > self.best_queued_number && n - self.best_queued_number > MAJOR_SYNC_BLOCKS.into() { - SyncState::Downloading - } else { - SyncState::Idle - } - } else { - SyncState::Idle - }; - - Status { - state: sync_state, - best_seen_block: best_seen, - num_peers: self.peers.len() as u32, - queued_blocks: self.queue_blocks.len() as u32, - } - } - - /// Number of active sync requests. - pub fn num_sync_requests(&self) -> usize { - self.fork_targets.len() - } - - /// Number of processed blocks. - pub fn num_processed_blocks(&self) -> usize { - self.processed_blocks - } - - /// Handle a new connected peer. - /// - /// Call this method whenever we connect to a new peer. - pub fn new_peer(&mut self, who: PeerId, best_hash: B::Hash, best_number: NumberFor) - -> Result>, BadPeer> - { - // There is nothing sync can get from the node that has no blockchain data. - match self.block_status(&best_hash) { - Err(e) => { - debug!(target:"sync", "Error reading blockchain: {:?}", e); - Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) - } - Ok(BlockStatus::KnownBad) => { - info!("💔 New peer with known bad best block {} ({}).", best_hash, best_number); - Err(BadPeer(who, rep::BAD_BLOCK)) - } - Ok(BlockStatus::Unknown) => { - if best_number.is_zero() { - info!("💔 New peer with unknown genesis hash {} ({}).", best_hash, best_number); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)); - } - // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have - // enough to do in the import queue that it's not worth kicking off - // an ancestor search, which is what we do in the next match case below. - if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { - debug!( - target:"sync", - "New peer with unknown best hash {} ({}), assuming common block.", - self.best_queued_hash, - self.best_queued_number - ); - self.peers.insert(who, PeerSync { - common_number: self.best_queued_number, - best_hash, - best_number, - state: PeerSyncState::Available, - recently_announced: Default::default() - }); - return Ok(None) - } - - // If we are at genesis, just start downloading. - if self.best_queued_number.is_zero() { - debug!(target:"sync", "New peer with best hash {} ({}).", best_hash, best_number); - self.peers.insert(who.clone(), PeerSync { - common_number: Zero::zero(), - best_hash, - best_number, - state: PeerSyncState::Available, - recently_announced: Default::default(), - }); - self.is_idle = false; - return Ok(None) - } - - let common_best = std::cmp::min(self.best_queued_number, best_number); - - debug!(target:"sync", - "New peer with unknown best hash {} ({}), searching for common ancestor.", - best_hash, - best_number - ); - - self.peers.insert(who, PeerSync { - common_number: Zero::zero(), - best_hash, - best_number, - state: PeerSyncState::AncestorSearch { - current: common_best, - start: self.best_queued_number, - state: AncestorSearchState::ExponentialBackoff(One::one()), - }, - recently_announced: Default::default() - }); - self.is_idle = false; - - Ok(Some(ancestry_request::(common_best))) - } - Ok(BlockStatus::Queued) | Ok(BlockStatus::InChainWithState) | Ok(BlockStatus::InChainPruned) => { - debug!(target:"sync", "New peer with known best hash {} ({}).", best_hash, best_number); - self.peers.insert(who.clone(), PeerSync { - common_number: best_number, - best_hash, - best_number, - state: PeerSyncState::Available, - recently_announced: Default::default(), - }); - self.is_idle = false; - Ok(None) - } - } - } - - /// Signal that `best_header` has been queued for import and update the - /// `ChainSync` state with that information. - pub fn update_chain_info(&mut self, best_header: &B::Header) { - self.on_block_queued(&best_header.hash(), *best_header.number()) - } - - /// Schedule a justification request for the given block. - pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - self.extra_justifications.schedule((*hash, number), |base, block| { - is_descendent_of(&**client, base, block) - }) - } - - /// Schedule a finality proof request for the given block. - pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - self.extra_finality_proofs.schedule((*hash, number), |base, block| { - is_descendent_of(&**client, base, block) - }) - } - - /// Request syncing for the given block from given set of peers. - // The implementation is similar to on_block_announce with unknown parent hash. - pub fn set_sync_fork_request(&mut self, mut peers: Vec, hash: &B::Hash, number: NumberFor) { - if peers.is_empty() { - debug!( - target: "sync", - "Explicit sync request for block {:?} with no peers specified. \ - Syncing from all connected peers {:?} instead.", - hash, peers, - ); - - peers = self.peers.iter() - // Only request blocks from peers who are ahead or on a par. - .filter(|(_, peer)| peer.best_number >= number) - .map(|(id, _)| id.clone()) - .collect(); - } else { - debug!(target: "sync", "Explicit sync request for block {:?} with {:?}", hash, peers); - } - - if self.is_known(&hash) { - debug!(target: "sync", "Refusing to sync known hash {:?}", hash); - return; - } - - trace!(target: "sync", "Downloading requested old fork {:?}", hash); - self.is_idle = false; - for peer_id in &peers { - if let Some(peer) = self.peers.get_mut(peer_id) { - if let PeerSyncState::AncestorSearch {..} = peer.state { - continue; - } - - if number > peer.best_number { - peer.best_number = number; - peer.best_hash = hash.clone(); - } - } - } - - self.fork_targets - .entry(hash.clone()) - .or_insert_with(|| ForkTarget { - number, - peers: Default::default(), - parent_hash: None, - }) - .peers.extend(peers); - } - - /// Get an iterator over all scheduled justification requests. - pub fn justification_requests(&mut self) -> impl Iterator)> + '_ { - let peers = &mut self.peers; - let mut matcher = self.extra_justifications.matcher(); - std::iter::from_fn(move || { - if let Some((peer, request)) = matcher.next(&peers) { - peers.get_mut(&peer) - .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") - .state = PeerSyncState::DownloadingJustification(request.0); - let req = message::generic::BlockRequest { - id: 0, - fields: BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Hash(request.0), - to: None, - direction: message::Direction::Ascending, - max: Some(1) - }; - Some((peer, req)) - } else { - None - } - }) - } - - /// Get an iterator over all scheduled finality proof requests. - pub fn finality_proof_requests(&mut self) -> impl Iterator)> + '_ { - let peers = &mut self.peers; - let request_builder = &mut self.request_builder; - let mut matcher = self.extra_finality_proofs.matcher(); - std::iter::from_fn(move || { - if let Some((peer, request)) = matcher.next(&peers) { - peers.get_mut(&peer) - .expect("`Matcher::next` guarantees the `PeerId` comes from the given peers; qed") - .state = PeerSyncState::DownloadingFinalityProof(request.0); - let req = message::generic::FinalityProofRequest { - id: 0, - block: request.0, - request: request_builder.as_mut() - .map(|builder| builder.build_request_data(&request.0)) - .unwrap_or_default() - }; - Some((peer, req)) - } else { - None - } - }) - } - - /// Get an iterator over all block requests of all peers. - pub fn block_requests(&mut self) -> impl Iterator)> + '_ { - if self.is_idle { - return Either::Left(std::iter::empty()) - } - if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { - trace!(target: "sync", "Too many blocks in the queue."); - return Either::Left(std::iter::empty()) - } - let major_sync = self.status().state == SyncState::Downloading; - let blocks = &mut self.blocks; - let attrs = &self.required_block_attributes; - let fork_targets = &mut self.fork_targets; - let mut have_requests = false; - let last_finalized = self.client.info().finalized_number; - let best_queued = self.best_queued_number; - let client = &self.client; - let queue = &self.queue_blocks; - let max_parallel = if major_sync { 1 } else { self.max_parallel_downloads }; - let iter = self.peers.iter_mut().filter_map(move |(id, peer)| { - if !peer.state.is_available() { - trace!(target: "sync", "Peer {} is busy", id); - return None - } - if let Some((range, req)) = peer_block_request( - id, - peer, - blocks, - attrs, - max_parallel, - last_finalized, - best_queued, - ) { - peer.state = PeerSyncState::DownloadingNew(range.start); - trace!( - target: "sync", - "New block request for {}, (best:{}, common:{}) {:?}", - id, - peer.best_number, - peer.common_number, - req, - ); - have_requests = true; - Some((id.clone(), req)) - } else if let Some((hash, req)) = fork_sync_request( - id, - fork_targets, - best_queued, - last_finalized, - attrs, - |hash| if queue.contains(hash) { - BlockStatus::Queued - } else { - client.block_status(&BlockId::Hash(*hash)).unwrap_or(BlockStatus::Unknown) - }, - ) { - trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); - peer.state = PeerSyncState::DownloadingStale(hash); - have_requests = true; - Some((id.clone(), req)) - } else { - None - } - }); - if !have_requests { - self.is_idle = true; - } - Either::Right(iter) - } - - /// Handle a response from the remote to a block request that we made. - /// - /// `request` must be the original request that triggered `response`. - /// or `None` if data comes from the block announcement. - /// - /// If this corresponds to a valid block, this outputs the block that - /// must be imported in the import queue. - pub fn on_block_data - (&mut self, who: PeerId, request: Option>, response: BlockResponse) -> Result, BadPeer> - { - let mut new_blocks: Vec> = - if let Some(peer) = self.peers.get_mut(&who) { - let mut blocks = response.blocks; - if request.as_ref().map_or(false, |r| r.direction == message::Direction::Descending) { - trace!(target: "sync", "Reversing incoming block list"); - blocks.reverse() - } - self.is_idle = false; - if request.is_some() { - match &mut peer.state { - PeerSyncState::DownloadingNew(start_block) => { - self.blocks.clear_peer_download(&who); - self.blocks.insert(*start_block, blocks, who.clone()); - peer.state = PeerSyncState::Available; - self.blocks - .drain(self.best_queued_number + One::one()) - .into_iter() - .map(|block_data| { - IncomingBlock { - hash: block_data.block.hash, - header: block_data.block.header, - body: block_data.block.body, - justification: block_data.block.justification, - origin: block_data.origin, - allow_missing_state: true, - import_existing: false, - } - }).collect() - } - PeerSyncState::DownloadingStale(_) => { - peer.state = PeerSyncState::Available; - blocks.into_iter().map(|b| { - IncomingBlock { - hash: b.hash, - header: b.header, - body: b.body, - justification: b.justification, - origin: Some(who.clone()), - allow_missing_state: true, - import_existing: false, - } - }).collect() - } - PeerSyncState::AncestorSearch { current, start, state } => { - let matching_hash = match (blocks.get(0), self.client.hash(*current)) { - (Some(block), Ok(maybe_our_block_hash)) => { - trace!(target: "sync", "Got ancestry block #{} ({}) from peer {}", current, block.hash, who); - maybe_our_block_hash.filter(|x| x == &block.hash) - }, - (None, _) => { - debug!(target: "sync", "Invalid response when searching for ancestor from {}", who); - return Err(BadPeer(who, rep::UNKNOWN_ANCESTOR)) - }, - (_, Err(e)) => { - info!("❌ Error answering legitimate blockchain query: {:?}", e); - return Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) - } - }; - if matching_hash.is_some() { - if *start < self.best_queued_number && self.best_queued_number <= peer.best_number { - // We've made progress on this chain since the search was started. - // Opportunistically set common number to updated number - // instead of the one that started the search. - peer.common_number = self.best_queued_number; - } - else if peer.common_number < *current { - peer.common_number = *current; - } - } - if matching_hash.is_none() && current.is_zero() { - trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); - return Err(BadPeer(who, rep::GENESIS_MISMATCH)) - } - if let Some((next_state, next_num)) = handle_ancestor_search_state(state, *current, matching_hash.is_some()) { - peer.state = PeerSyncState::AncestorSearch { - current: next_num, - start: *start, - state: next_state, - }; - return Ok(OnBlockData::Request(who, ancestry_request::(next_num))) - } else { - // Ancestry search is complete. Check if peer is on a stale fork unknown to us and - // add it to sync targets if necessary. - trace!(target: "sync", "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", - self.best_queued_hash, - self.best_queued_number, - peer.best_hash, - peer.best_number, - matching_hash, - peer.common_number, - ); - if peer.common_number < peer.best_number - && peer.best_number < self.best_queued_number - { - trace!(target: "sync", "Added fork target {} for {}" , peer.best_hash, who); - self.fork_targets - .entry(peer.best_hash.clone()) - .or_insert_with(|| ForkTarget { - number: peer.best_number, - parent_hash: None, - peers: Default::default(), - }) - .peers.insert(who.clone()); - } - peer.state = PeerSyncState::Available; - Vec::new() - } - } - - | PeerSyncState::Available - | PeerSyncState::DownloadingJustification(..) - | PeerSyncState::DownloadingFinalityProof(..) => Vec::new() - } - } else { - // When request.is_none() this is a block announcement. Just accept blocks. - blocks.into_iter().map(|b| { - IncomingBlock { - hash: b.hash, - header: b.header, - body: b.body, - justification: b.justification, - origin: Some(who.clone()), - allow_missing_state: true, - import_existing: false, - } - }).collect() - } - } else { - Vec::new() - }; - - // When doing initial sync we don't request blocks in parallel. - // So the only way this can happen is when peers lie about the - // common block. - let is_recent = new_blocks.first() - .map(|block| { - self.peers.iter().any(|(_, peer)| peer.recently_announced.contains(&block.hash)) - }) - .unwrap_or(false); - - if !is_recent && new_blocks.last().map_or(false, |b| self.is_known(&b.hash)) { - // When doing initial sync we don't request blocks in parallel. - // So the only way this can happen is when peers lie about the - // common block. - debug!(target: "sync", "Ignoring known blocks from {}", who); - return Err(BadPeer(who, rep::KNOWN_BLOCK)); - } - let orig_len = new_blocks.len(); - new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); - if new_blocks.len() != orig_len { - debug!(target: "sync", "Ignoring {} blocks that are already queued", orig_len - new_blocks.len()); - } - - let origin = - if is_recent { - BlockOrigin::NetworkBroadcast - } else { - BlockOrigin::NetworkInitialSync - }; - - if let Some((h, n)) = new_blocks.last().and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) { - trace!(target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), h, origin); - self.on_block_queued(h, n) - } - - self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); - - Ok(OnBlockData::Import(origin, new_blocks)) - } - - /// Handle a response from the remote to a justification request that we made. - /// - /// `request` must be the original request that triggered `response`. - /// - /// Returns `Some` if this produces a justification that must be imported - /// into the import queue. - pub fn on_block_justification - (&mut self, who: PeerId, response: BlockResponse) -> Result, BadPeer> - { - let peer = - if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); - return Ok(OnBlockJustification::Nothing) - }; - - self.is_idle = false; - if let PeerSyncState::DownloadingJustification(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // We only request one justification at a time - let justification = if let Some(block) = response.blocks.into_iter().next() { - if hash != block.hash { - info!( - target: "sync", - "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, block.hash - ); - return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); - } - - block.justification - } else { - // we might have asked the peer for a justification on a block that we assumed it - // had but didn't (regardless of whether it had a justification for it or not). - trace!(target: "sync", - "Peer {:?} provided empty response for justification request {:?}", - who, - hash, - ); - - None - }; - - if let Some((peer, hash, number, j)) = self.extra_justifications.on_response(who, justification) { - return Ok(OnBlockJustification::Import { peer, hash, number, justification: j }) - } - } - - Ok(OnBlockJustification::Nothing) - } - - /// Handle new finality proof data. - pub fn on_block_finality_proof - (&mut self, who: PeerId, resp: FinalityProofResponse) -> Result, BadPeer> - { - let peer = - if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_finality_proof_data with a bad peer ID"); - return Ok(OnBlockFinalityProof::Nothing) - }; - - self.is_idle = false; - if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state { - peer.state = PeerSyncState::Available; - - // We only request one finality proof at a time. - if hash != resp.block { - info!( - target: "sync", - "💔 Invalid block finality proof provided: requested: {:?} got: {:?}", - hash, - resp.block - ); - return Err(BadPeer(who, rep::BAD_FINALITY_PROOF)); - } - - if let Some((peer, hash, number, p)) = self.extra_finality_proofs.on_response(who, resp.proof) { - return Ok(OnBlockFinalityProof::Import { peer, hash, number, proof: p }) - } - } - - Ok(OnBlockFinalityProof::Nothing) - } - - /// A batch of blocks have been processed, with or without errors. - /// - /// Call this when a batch of blocks have been processed by the import - /// queue, with or without errors. - /// - /// `peer_info` is passed in case of a restart. - pub fn on_blocks_processed<'a>( - &'a mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)>, - ) -> impl Iterator), BadPeer>> + 'a { - trace!(target: "sync", "Imported {} of {}", imported, count); - - let mut output = Vec::new(); - - let mut has_error = false; - for (_, hash) in &results { - self.queue_blocks.remove(&hash); - } - self.processed_blocks += results.len(); - - for (result, hash) in results { - if has_error { - continue; - } - - if result.is_err() { - has_error = true; - } - - match result { - Ok(BlockImportResult::ImportedKnown(_number)) => {} - Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { - if aux.clear_justification_requests { - trace!( - target: "sync", - "Block imported clears all pending justification requests {}: {:?}", - number, - hash - ); - self.extra_justifications.reset() - } - - if aux.needs_justification { - trace!(target: "sync", "Block imported but requires justification {}: {:?}", number, hash); - self.request_justification(&hash, number); - } - - if aux.bad_justification { - if let Some(peer) = who { - info!("💔 Sent block with bad justification to import"); - output.push(Err(BadPeer(peer, rep::BAD_JUSTIFICATION))); - } - } - - if aux.needs_finality_proof { - trace!(target: "sync", "Block imported but requires finality proof {}: {:?}", number, hash); - self.request_finality_proof(&hash, number); - } - - if number > self.best_imported_number { - self.best_imported_number = number; - } - }, - Err(BlockImportError::IncompleteHeader(who)) => { - if let Some(peer) = who { - warn!("💔 Peer sent block with incomplete header to import"); - output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); - output.extend(self.restart()); - } - }, - Err(BlockImportError::VerificationFailed(who, e)) => { - if let Some(peer) = who { - warn!("💔 Verification failed for block {:?} received from peer: {}, {:?}", hash, peer, e); - output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); - output.extend(self.restart()); - } - }, - Err(BlockImportError::BadBlock(who)) => { - if let Some(peer) = who { - info!("💔 Block {:?} received from peer {} has been blacklisted", hash, peer); - output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); - } - }, - Err(BlockImportError::MissingState) => { - // This may happen if the chain we were requesting upon has been discarded - // in the meantime because other chain has been finalized. - // Don't mark it as bad as it still may be synced if explicitly requested. - trace!(target: "sync", "Obsolete block {:?}", hash); - }, - e @ Err(BlockImportError::UnknownParent) | - e @ Err(BlockImportError::Other(_)) => { - warn!(target: "sync", "💔 Error importing block {:?}: {:?}", hash, e); - output.extend(self.restart()); - }, - Err(BlockImportError::Cancelled) => {} - }; - } - - self.is_idle = false; - output.into_iter() - } - - /// Call this when a justification has been processed by the import queue, - /// with or without errors. - pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { - let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; - self.extra_justifications.try_finalize_root((hash, number), finalization_result, true); - self.is_idle = false; - } - - pub fn on_finality_proof_import(&mut self, req: (B::Hash, NumberFor), res: Result<(B::Hash, NumberFor), ()>) { - self.extra_finality_proofs.try_finalize_root(req, res, true); - self.is_idle = false; - } - - /// Notify about finalization of the given block. - pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { - let client = &self.client; - let r = self.extra_finality_proofs.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); - - if let Err(err) = r { - warn!(target: "sync", "💔 Error cleaning up pending extra finality proof data requests: {:?}", err) - } - - let client = &self.client; - let r = self.extra_justifications.on_block_finalized(hash, number, |base, block| { - is_descendent_of(&**client, base, block) - }); - - if let Err(err) = r { - warn!(target: "sync", "💔 Error cleaning up pending extra justification data requests: {:?}", err); - } - } - - /// Called when a block has been queued for import. - /// - /// Updates our internal state for best queued block and then goes - /// through all peers to update our view of their state as well. - fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { - if let Some(_) = self.fork_targets.remove(&hash) { - trace!(target: "sync", "Completed fork sync {:?}", hash); - } - if number > self.best_queued_number { - self.best_queued_number = number; - self.best_queued_hash = *hash; - // Update common blocks - for (n, peer) in self.peers.iter_mut() { - if let PeerSyncState::AncestorSearch {..} = peer.state { - // Wait for ancestry search to complete first. - continue; - } - let new_common_number = if peer.best_number >= number { - number - } else { - peer.best_number - }; - trace!( - target: "sync", - "Updating peer {} info, ours={}, common={}->{}, their best={}", - n, - number, - peer.common_number, - new_common_number, - peer.best_number, - ); - peer.common_number = new_common_number; - } - } - self.is_idle = false; - } - - /// Call when a node announces a new block. - /// - /// If `OnBlockAnnounce::ImportHeader` is returned, then the caller MUST try to import passed - /// header (call `on_block_data`). The network request isn't sent - /// in this case. Both hash and header is passed as an optimization - /// to avoid rehashing the header. - pub fn on_block_announce(&mut self, who: PeerId, hash: B::Hash, announce: &BlockAnnounce, is_best: bool) - -> OnBlockAnnounce - { - let header = &announce.header; - let number = *header.number(); - debug!(target: "sync", "Received block announcement {:?} with number {:?} from {}", hash, number, who); - if number.is_zero() { - warn!(target: "sync", "💔 Ignored genesis block (#0) announcement from {}: {}", who, hash); - return OnBlockAnnounce::Nothing - } - let parent_status = self.block_status(header.parent_hash()).ok().unwrap_or(BlockStatus::Unknown); - let known_parent = parent_status != BlockStatus::Unknown; - let ancient_parent = parent_status == BlockStatus::InChainPruned; - - let known = self.is_known(&hash); - let peer = if let Some(peer) = self.peers.get_mut(&who) { - peer - } else { - error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); - return OnBlockAnnounce::Nothing - }; - while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { - peer.recently_announced.pop_front(); - } - peer.recently_announced.push_back(hash.clone()); - if is_best { - // update their best block - peer.best_number = number; - peer.best_hash = hash; - } - if let PeerSyncState::AncestorSearch {..} = peer.state { - return OnBlockAnnounce::Nothing - } - // If the announced block is the best they have and is not ahead of us, our common number - // is either one further ahead or it's the one they just announced, if we know about it. - if is_best { - if known && self.best_queued_number >= number { - peer.common_number = number - } else if header.parent_hash() == &self.best_queued_hash - || known_parent && self.best_queued_number >= number - { - peer.common_number = number - One::one(); - } - } - self.is_idle = false; - - // known block case - if known || self.is_already_downloading(&hash) { - trace!(target: "sync", "Known block announce from {}: {}", who, hash); - if let Some(target) = self.fork_targets.get_mut(&hash) { - target.peers.insert(who); - } - return OnBlockAnnounce::Nothing - } - - // Let external validator check the block announcement. - let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); - match self.block_announce_validator.validate(&header, assoc_data) { - Ok(Validation::Success) => (), - Ok(Validation::Failure) => { - debug!(target: "sync", "Block announcement validation of block {} from {} failed", hash, who); - return OnBlockAnnounce::Nothing - } - Err(e) => { - error!(target: "sync", "💔 Block announcement validation errored: {}", e); - return OnBlockAnnounce::Nothing - } - } - - if ancient_parent { - trace!(target: "sync", "Ignored ancient block announced from {}: {} {:?}", who, hash, header); - return OnBlockAnnounce::Nothing - } - - let requires_additional_data = !self.role.is_light() || !known_parent; - if !requires_additional_data { - trace!(target: "sync", "Importing new header announced from {}: {} {:?}", who, hash, header); - return OnBlockAnnounce::ImportHeader - } - - if number <= self.best_queued_number { - trace!( - target: "sync", - "Added sync target for block announced from {}: {} {:?}", who, hash, header - ); - self.fork_targets - .entry(hash.clone()) - .or_insert_with(|| ForkTarget { - number, - parent_hash: Some(header.parent_hash().clone()), - peers: Default::default(), - }) - .peers.insert(who); - } - - OnBlockAnnounce::Nothing - } - - /// Call when a peer has disconnected. - pub fn peer_disconnected(&mut self, who: PeerId) { - self.blocks.clear_peer_download(&who); - self.peers.remove(&who); - self.extra_justifications.peer_disconnected(&who); - self.extra_finality_proofs.peer_disconnected(&who); - self.is_idle = false; - } - - /// Restart the sync process. - fn restart<'a>(&'a mut self) -> impl Iterator), BadPeer>> + 'a { - self.processed_blocks = 0; - self.blocks.clear(); - let info = self.client.info(); - self.best_queued_hash = info.best_hash; - self.best_queued_number = std::cmp::max(info.best_number, self.best_imported_number); - self.is_idle = false; - debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); - let old_peers = std::mem::replace(&mut self.peers, HashMap::new()); - old_peers.into_iter().filter_map(move |(id, p)| { - match self.new_peer(id.clone(), p.best_hash, p.best_number) { - Ok(None) => None, - Ok(Some(x)) => Some(Ok((id, x))), - Err(e) => Some(Err(e)) - } - }) - } - - /// What is the status of the block corresponding to the given hash? - fn block_status(&self, hash: &B::Hash) -> Result { - if self.queue_blocks.contains(hash) { - return Ok(BlockStatus::Queued) - } - self.client.block_status(&BlockId::Hash(*hash)) - } - - /// Is the block corresponding to the given hash known? - fn is_known(&self, hash: &B::Hash) -> bool { - self.block_status(hash).ok().map_or(false, |s| s != BlockStatus::Unknown) - } - - /// Is any peer downloading the given hash? - fn is_already_downloading(&self, hash: &B::Hash) -> bool { - self.peers.iter().any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) - } - - /// Return some key metrics. - pub(crate) fn metrics(&self) -> Metrics { - use std::convert::TryInto; - Metrics { - queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), - fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), - finality_proofs: self.extra_finality_proofs.metrics(), - justifications: self.extra_justifications.metrics(), - _priv: () - } - } + /// Create a new instance. + pub fn new( + role: Roles, + client: Arc>, + info: &BlockchainInfo, + request_builder: Option>, + block_announce_validator: Box + Send>, + max_parallel_downloads: u32, + ) -> Self { + let mut required_block_attributes = + BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION; + + if role.is_full() { + required_block_attributes |= BlockAttributes::BODY + } + + ChainSync { + client, + peers: HashMap::new(), + blocks: BlockCollection::new(), + best_queued_hash: info.best_hash, + best_queued_number: info.best_number, + best_imported_number: info.best_number, + extra_finality_proofs: ExtraRequests::new("finality proof"), + extra_justifications: ExtraRequests::new("justification"), + role, + required_block_attributes, + queue_blocks: Default::default(), + request_builder, + fork_targets: Default::default(), + is_idle: false, + block_announce_validator, + max_parallel_downloads, + processed_blocks: 0, + } + } + + /// Returns the state of the sync of the given peer. + /// + /// Returns `None` if the peer is unknown. + pub fn peer_info(&self, who: &PeerId) -> Option> { + self.peers.get(who).map(|p| PeerInfo { + best_hash: p.best_hash, + best_number: p.best_number, + }) + } + + /// Returns the current sync status. + pub fn status(&self) -> Status { + let best_seen = self + .peers + .values() + .max_by_key(|p| p.best_number) + .map(|p| p.best_number); + let sync_state = if let Some(n) = best_seen { + // A chain is classified as downloading if the provided best block is + // more than `MAJOR_SYNC_BLOCKS` behind the best queued block. + if n > self.best_queued_number && n - self.best_queued_number > MAJOR_SYNC_BLOCKS.into() + { + SyncState::Downloading + } else { + SyncState::Idle + } + } else { + SyncState::Idle + }; + + Status { + state: sync_state, + best_seen_block: best_seen, + num_peers: self.peers.len() as u32, + queued_blocks: self.queue_blocks.len() as u32, + } + } + + /// Number of active sync requests. + pub fn num_sync_requests(&self) -> usize { + self.fork_targets.len() + } + + /// Number of processed blocks. + pub fn num_processed_blocks(&self) -> usize { + self.processed_blocks + } + + /// Handle a new connected peer. + /// + /// Call this method whenever we connect to a new peer. + pub fn new_peer( + &mut self, + who: PeerId, + best_hash: B::Hash, + best_number: NumberFor, + ) -> Result>, BadPeer> { + // There is nothing sync can get from the node that has no blockchain data. + match self.block_status(&best_hash) { + Err(e) => { + debug!(target:"sync", "Error reading blockchain: {:?}", e); + Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)) + } + Ok(BlockStatus::KnownBad) => { + info!( + "💔 New peer with known bad best block {} ({}).", + best_hash, best_number + ); + Err(BadPeer(who, rep::BAD_BLOCK)) + } + Ok(BlockStatus::Unknown) => { + if best_number.is_zero() { + info!( + "💔 New peer with unknown genesis hash {} ({}).", + best_hash, best_number + ); + return Err(BadPeer(who, rep::GENESIS_MISMATCH)); + } + // If there are more than `MAJOR_SYNC_BLOCKS` in the import queue then we have + // enough to do in the import queue that it's not worth kicking off + // an ancestor search, which is what we do in the next match case below. + if self.queue_blocks.len() > MAJOR_SYNC_BLOCKS.into() { + debug!( + target:"sync", + "New peer with unknown best hash {} ({}), assuming common block.", + self.best_queued_hash, + self.best_queued_number + ); + self.peers.insert( + who, + PeerSync { + common_number: self.best_queued_number, + best_hash, + best_number, + state: PeerSyncState::Available, + recently_announced: Default::default(), + }, + ); + return Ok(None); + } + + // If we are at genesis, just start downloading. + if self.best_queued_number.is_zero() { + debug!(target:"sync", "New peer with best hash {} ({}).", best_hash, best_number); + self.peers.insert( + who.clone(), + PeerSync { + common_number: Zero::zero(), + best_hash, + best_number, + state: PeerSyncState::Available, + recently_announced: Default::default(), + }, + ); + self.is_idle = false; + return Ok(None); + } + + let common_best = std::cmp::min(self.best_queued_number, best_number); + + debug!(target:"sync", + "New peer with unknown best hash {} ({}), searching for common ancestor.", + best_hash, + best_number + ); + + self.peers.insert( + who, + PeerSync { + common_number: Zero::zero(), + best_hash, + best_number, + state: PeerSyncState::AncestorSearch { + current: common_best, + start: self.best_queued_number, + state: AncestorSearchState::ExponentialBackoff(One::one()), + }, + recently_announced: Default::default(), + }, + ); + self.is_idle = false; + + Ok(Some(ancestry_request::(common_best))) + } + Ok(BlockStatus::Queued) + | Ok(BlockStatus::InChainWithState) + | Ok(BlockStatus::InChainPruned) => { + debug!(target:"sync", "New peer with known best hash {} ({}).", best_hash, best_number); + self.peers.insert( + who.clone(), + PeerSync { + common_number: best_number, + best_hash, + best_number, + state: PeerSyncState::Available, + recently_announced: Default::default(), + }, + ); + self.is_idle = false; + Ok(None) + } + } + } + + /// Signal that `best_header` has been queued for import and update the + /// `ChainSync` state with that information. + pub fn update_chain_info(&mut self, best_header: &B::Header) { + self.on_block_queued(&best_header.hash(), *best_header.number()) + } + + /// Schedule a justification request for the given block. + pub fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + self.extra_justifications + .schedule((*hash, number), |base, block| { + is_descendent_of(&**client, base, block) + }) + } + + /// Schedule a finality proof request for the given block. + pub fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + self.extra_finality_proofs + .schedule((*hash, number), |base, block| { + is_descendent_of(&**client, base, block) + }) + } + + /// Request syncing for the given block from given set of peers. + // The implementation is similar to on_block_announce with unknown parent hash. + pub fn set_sync_fork_request( + &mut self, + mut peers: Vec, + hash: &B::Hash, + number: NumberFor, + ) { + if peers.is_empty() { + debug!( + target: "sync", + "Explicit sync request for block {:?} with no peers specified. \ + Syncing from all connected peers {:?} instead.", + hash, peers, + ); + + peers = self + .peers + .iter() + // Only request blocks from peers who are ahead or on a par. + .filter(|(_, peer)| peer.best_number >= number) + .map(|(id, _)| id.clone()) + .collect(); + } else { + debug!(target: "sync", "Explicit sync request for block {:?} with {:?}", hash, peers); + } + + if self.is_known(&hash) { + debug!(target: "sync", "Refusing to sync known hash {:?}", hash); + return; + } + + trace!(target: "sync", "Downloading requested old fork {:?}", hash); + self.is_idle = false; + for peer_id in &peers { + if let Some(peer) = self.peers.get_mut(peer_id) { + if let PeerSyncState::AncestorSearch { .. } = peer.state { + continue; + } + + if number > peer.best_number { + peer.best_number = number; + peer.best_hash = hash.clone(); + } + } + } + + self.fork_targets + .entry(hash.clone()) + .or_insert_with(|| ForkTarget { + number, + peers: Default::default(), + parent_hash: None, + }) + .peers + .extend(peers); + } + + /// Get an iterator over all scheduled justification requests. + pub fn justification_requests( + &mut self, + ) -> impl Iterator)> + '_ { + let peers = &mut self.peers; + let mut matcher = self.extra_justifications.matcher(); + std::iter::from_fn(move || { + if let Some((peer, request)) = matcher.next(&peers) { + peers + .get_mut(&peer) + .expect( + "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", + ) + .state = PeerSyncState::DownloadingJustification(request.0); + let req = message::generic::BlockRequest { + id: 0, + fields: BlockAttributes::JUSTIFICATION, + from: message::FromBlock::Hash(request.0), + to: None, + direction: message::Direction::Ascending, + max: Some(1), + }; + Some((peer, req)) + } else { + None + } + }) + } + + /// Get an iterator over all scheduled finality proof requests. + pub fn finality_proof_requests( + &mut self, + ) -> impl Iterator)> + '_ { + let peers = &mut self.peers; + let request_builder = &mut self.request_builder; + let mut matcher = self.extra_finality_proofs.matcher(); + std::iter::from_fn(move || { + if let Some((peer, request)) = matcher.next(&peers) { + peers + .get_mut(&peer) + .expect( + "`Matcher::next` guarantees the `PeerId` comes from the given peers; qed", + ) + .state = PeerSyncState::DownloadingFinalityProof(request.0); + let req = message::generic::FinalityProofRequest { + id: 0, + block: request.0, + request: request_builder + .as_mut() + .map(|builder| builder.build_request_data(&request.0)) + .unwrap_or_default(), + }; + Some((peer, req)) + } else { + None + } + }) + } + + /// Get an iterator over all block requests of all peers. + pub fn block_requests(&mut self) -> impl Iterator)> + '_ { + if self.is_idle { + return Either::Left(std::iter::empty()); + } + if self.queue_blocks.len() > MAX_IMPORTING_BLOCKS { + trace!(target: "sync", "Too many blocks in the queue."); + return Either::Left(std::iter::empty()); + } + let major_sync = self.status().state == SyncState::Downloading; + let blocks = &mut self.blocks; + let attrs = &self.required_block_attributes; + let fork_targets = &mut self.fork_targets; + let mut have_requests = false; + let last_finalized = self.client.info().finalized_number; + let best_queued = self.best_queued_number; + let client = &self.client; + let queue = &self.queue_blocks; + let max_parallel = if major_sync { + 1 + } else { + self.max_parallel_downloads + }; + let iter = self.peers.iter_mut().filter_map(move |(id, peer)| { + if !peer.state.is_available() { + trace!(target: "sync", "Peer {} is busy", id); + return None; + } + if let Some((range, req)) = peer_block_request( + id, + peer, + blocks, + attrs, + max_parallel, + last_finalized, + best_queued, + ) { + peer.state = PeerSyncState::DownloadingNew(range.start); + trace!( + target: "sync", + "New block request for {}, (best:{}, common:{}) {:?}", + id, + peer.best_number, + peer.common_number, + req, + ); + have_requests = true; + Some((id.clone(), req)) + } else if let Some((hash, req)) = fork_sync_request( + id, + fork_targets, + best_queued, + last_finalized, + attrs, + |hash| { + if queue.contains(hash) { + BlockStatus::Queued + } else { + client + .block_status(&BlockId::Hash(*hash)) + .unwrap_or(BlockStatus::Unknown) + } + }, + ) { + trace!(target: "sync", "Downloading fork {:?} from {}", hash, id); + peer.state = PeerSyncState::DownloadingStale(hash); + have_requests = true; + Some((id.clone(), req)) + } else { + None + } + }); + if !have_requests { + self.is_idle = true; + } + Either::Right(iter) + } + + /// Handle a response from the remote to a block request that we made. + /// + /// `request` must be the original request that triggered `response`. + /// or `None` if data comes from the block announcement. + /// + /// If this corresponds to a valid block, this outputs the block that + /// must be imported in the import queue. + pub fn on_block_data( + &mut self, + who: PeerId, + request: Option>, + response: BlockResponse, + ) -> Result, BadPeer> { + let mut new_blocks: Vec> = if let Some(peer) = self.peers.get_mut(&who) { + let mut blocks = response.blocks; + if request + .as_ref() + .map_or(false, |r| r.direction == message::Direction::Descending) + { + trace!(target: "sync", "Reversing incoming block list"); + blocks.reverse() + } + self.is_idle = false; + if request.is_some() { + match &mut peer.state { + PeerSyncState::DownloadingNew(start_block) => { + self.blocks.clear_peer_download(&who); + self.blocks.insert(*start_block, blocks, who.clone()); + peer.state = PeerSyncState::Available; + self.blocks + .drain(self.best_queued_number + One::one()) + .into_iter() + .map(|block_data| IncomingBlock { + hash: block_data.block.hash, + header: block_data.block.header, + body: block_data.block.body, + justification: block_data.block.justification, + origin: block_data.origin, + allow_missing_state: true, + import_existing: false, + }) + .collect() + } + PeerSyncState::DownloadingStale(_) => { + peer.state = PeerSyncState::Available; + blocks + .into_iter() + .map(|b| IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + justification: b.justification, + origin: Some(who.clone()), + allow_missing_state: true, + import_existing: false, + }) + .collect() + } + PeerSyncState::AncestorSearch { + current, + start, + state, + } => { + let matching_hash = match (blocks.get(0), self.client.hash(*current)) { + (Some(block), Ok(maybe_our_block_hash)) => { + trace!(target: "sync", "Got ancestry block #{} ({}) from peer {}", current, block.hash, who); + maybe_our_block_hash.filter(|x| x == &block.hash) + } + (None, _) => { + debug!(target: "sync", "Invalid response when searching for ancestor from {}", who); + return Err(BadPeer(who, rep::UNKNOWN_ANCESTOR)); + } + (_, Err(e)) => { + info!("❌ Error answering legitimate blockchain query: {:?}", e); + return Err(BadPeer(who, rep::BLOCKCHAIN_READ_ERROR)); + } + }; + if matching_hash.is_some() { + if *start < self.best_queued_number + && self.best_queued_number <= peer.best_number + { + // We've made progress on this chain since the search was started. + // Opportunistically set common number to updated number + // instead of the one that started the search. + peer.common_number = self.best_queued_number; + } else if peer.common_number < *current { + peer.common_number = *current; + } + } + if matching_hash.is_none() && current.is_zero() { + trace!(target:"sync", "Ancestry search: genesis mismatch for peer {}", who); + return Err(BadPeer(who, rep::GENESIS_MISMATCH)); + } + if let Some((next_state, next_num)) = + handle_ancestor_search_state(state, *current, matching_hash.is_some()) + { + peer.state = PeerSyncState::AncestorSearch { + current: next_num, + start: *start, + state: next_state, + }; + return Ok(OnBlockData::Request(who, ancestry_request::(next_num))); + } else { + // Ancestry search is complete. Check if peer is on a stale fork unknown to us and + // add it to sync targets if necessary. + trace!(target: "sync", "Ancestry search complete. Ours={} ({}), Theirs={} ({}), Common={:?} ({})", + self.best_queued_hash, + self.best_queued_number, + peer.best_hash, + peer.best_number, + matching_hash, + peer.common_number, + ); + if peer.common_number < peer.best_number + && peer.best_number < self.best_queued_number + { + trace!(target: "sync", "Added fork target {} for {}" , peer.best_hash, who); + self.fork_targets + .entry(peer.best_hash.clone()) + .or_insert_with(|| ForkTarget { + number: peer.best_number, + parent_hash: None, + peers: Default::default(), + }) + .peers + .insert(who.clone()); + } + peer.state = PeerSyncState::Available; + Vec::new() + } + } + + PeerSyncState::Available + | PeerSyncState::DownloadingJustification(..) + | PeerSyncState::DownloadingFinalityProof(..) => Vec::new(), + } + } else { + // When request.is_none() this is a block announcement. Just accept blocks. + blocks + .into_iter() + .map(|b| IncomingBlock { + hash: b.hash, + header: b.header, + body: b.body, + justification: b.justification, + origin: Some(who.clone()), + allow_missing_state: true, + import_existing: false, + }) + .collect() + } + } else { + Vec::new() + }; + + // When doing initial sync we don't request blocks in parallel. + // So the only way this can happen is when peers lie about the + // common block. + let is_recent = new_blocks + .first() + .map(|block| { + self.peers + .iter() + .any(|(_, peer)| peer.recently_announced.contains(&block.hash)) + }) + .unwrap_or(false); + + if !is_recent && new_blocks.last().map_or(false, |b| self.is_known(&b.hash)) { + // When doing initial sync we don't request blocks in parallel. + // So the only way this can happen is when peers lie about the + // common block. + debug!(target: "sync", "Ignoring known blocks from {}", who); + return Err(BadPeer(who, rep::KNOWN_BLOCK)); + } + let orig_len = new_blocks.len(); + new_blocks.retain(|b| !self.queue_blocks.contains(&b.hash)); + if new_blocks.len() != orig_len { + debug!(target: "sync", "Ignoring {} blocks that are already queued", orig_len - new_blocks.len()); + } + + let origin = if is_recent { + BlockOrigin::NetworkBroadcast + } else { + BlockOrigin::NetworkInitialSync + }; + + if let Some((h, n)) = new_blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| (&b.hash, *h.number()))) + { + trace!(target:"sync", "Accepted {} blocks ({:?}) with origin {:?}", new_blocks.len(), h, origin); + self.on_block_queued(h, n) + } + + self.queue_blocks.extend(new_blocks.iter().map(|b| b.hash)); + + Ok(OnBlockData::Import(origin, new_blocks)) + } + + /// Handle a response from the remote to a justification request that we made. + /// + /// `request` must be the original request that triggered `response`. + /// + /// Returns `Some` if this produces a justification that must be imported + /// into the import queue. + pub fn on_block_justification( + &mut self, + who: PeerId, + response: BlockResponse, + ) -> Result, BadPeer> { + let peer = if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: "sync", "💔 Called on_block_justification with a bad peer ID"); + return Ok(OnBlockJustification::Nothing); + }; + + self.is_idle = false; + if let PeerSyncState::DownloadingJustification(hash) = peer.state { + peer.state = PeerSyncState::Available; + + // We only request one justification at a time + let justification = if let Some(block) = response.blocks.into_iter().next() { + if hash != block.hash { + info!( + target: "sync", + "💔 Invalid block justification provided by {}: requested: {:?} got: {:?}", who, hash, block.hash + ); + return Err(BadPeer(who, rep::BAD_JUSTIFICATION)); + } + + block.justification + } else { + // we might have asked the peer for a justification on a block that we assumed it + // had but didn't (regardless of whether it had a justification for it or not). + trace!(target: "sync", + "Peer {:?} provided empty response for justification request {:?}", + who, + hash, + ); + + None + }; + + if let Some((peer, hash, number, j)) = + self.extra_justifications.on_response(who, justification) + { + return Ok(OnBlockJustification::Import { + peer, + hash, + number, + justification: j, + }); + } + } + + Ok(OnBlockJustification::Nothing) + } + + /// Handle new finality proof data. + pub fn on_block_finality_proof( + &mut self, + who: PeerId, + resp: FinalityProofResponse, + ) -> Result, BadPeer> { + let peer = if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: "sync", "💔 Called on_block_finality_proof_data with a bad peer ID"); + return Ok(OnBlockFinalityProof::Nothing); + }; + + self.is_idle = false; + if let PeerSyncState::DownloadingFinalityProof(hash) = peer.state { + peer.state = PeerSyncState::Available; + + // We only request one finality proof at a time. + if hash != resp.block { + info!( + target: "sync", + "💔 Invalid block finality proof provided: requested: {:?} got: {:?}", + hash, + resp.block + ); + return Err(BadPeer(who, rep::BAD_FINALITY_PROOF)); + } + + if let Some((peer, hash, number, p)) = + self.extra_finality_proofs.on_response(who, resp.proof) + { + return Ok(OnBlockFinalityProof::Import { + peer, + hash, + number, + proof: p, + }); + } + } + + Ok(OnBlockFinalityProof::Nothing) + } + + /// A batch of blocks have been processed, with or without errors. + /// + /// Call this when a batch of blocks have been processed by the import + /// queue, with or without errors. + /// + /// `peer_info` is passed in case of a restart. + pub fn on_blocks_processed<'a>( + &'a mut self, + imported: usize, + count: usize, + results: Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + ) -> impl Iterator), BadPeer>> + 'a { + trace!(target: "sync", "Imported {} of {}", imported, count); + + let mut output = Vec::new(); + + let mut has_error = false; + for (_, hash) in &results { + self.queue_blocks.remove(&hash); + } + self.processed_blocks += results.len(); + + for (result, hash) in results { + if has_error { + continue; + } + + if result.is_err() { + has_error = true; + } + + match result { + Ok(BlockImportResult::ImportedKnown(_number)) => {} + Ok(BlockImportResult::ImportedUnknown(number, aux, who)) => { + if aux.clear_justification_requests { + trace!( + target: "sync", + "Block imported clears all pending justification requests {}: {:?}", + number, + hash + ); + self.extra_justifications.reset() + } + + if aux.needs_justification { + trace!(target: "sync", "Block imported but requires justification {}: {:?}", number, hash); + self.request_justification(&hash, number); + } + + if aux.bad_justification { + if let Some(peer) = who { + info!("💔 Sent block with bad justification to import"); + output.push(Err(BadPeer(peer, rep::BAD_JUSTIFICATION))); + } + } + + if aux.needs_finality_proof { + trace!(target: "sync", "Block imported but requires finality proof {}: {:?}", number, hash); + self.request_finality_proof(&hash, number); + } + + if number > self.best_imported_number { + self.best_imported_number = number; + } + } + Err(BlockImportError::IncompleteHeader(who)) => { + if let Some(peer) = who { + warn!("💔 Peer sent block with incomplete header to import"); + output.push(Err(BadPeer(peer, rep::INCOMPLETE_HEADER))); + output.extend(self.restart()); + } + } + Err(BlockImportError::VerificationFailed(who, e)) => { + if let Some(peer) = who { + warn!( + "💔 Verification failed for block {:?} received from peer: {}, {:?}", + hash, peer, e + ); + output.push(Err(BadPeer(peer, rep::VERIFICATION_FAIL))); + output.extend(self.restart()); + } + } + Err(BlockImportError::BadBlock(who)) => { + if let Some(peer) = who { + info!( + "💔 Block {:?} received from peer {} has been blacklisted", + hash, peer + ); + output.push(Err(BadPeer(peer, rep::BAD_BLOCK))); + } + } + Err(BlockImportError::MissingState) => { + // This may happen if the chain we were requesting upon has been discarded + // in the meantime because other chain has been finalized. + // Don't mark it as bad as it still may be synced if explicitly requested. + trace!(target: "sync", "Obsolete block {:?}", hash); + } + e @ Err(BlockImportError::UnknownParent) | e @ Err(BlockImportError::Other(_)) => { + warn!(target: "sync", "💔 Error importing block {:?}: {:?}", hash, e); + output.extend(self.restart()); + } + Err(BlockImportError::Cancelled) => {} + }; + } + + self.is_idle = false; + output.into_iter() + } + + /// Call this when a justification has been processed by the import queue, + /// with or without errors. + pub fn on_justification_import(&mut self, hash: B::Hash, number: NumberFor, success: bool) { + let finalization_result = if success { Ok((hash, number)) } else { Err(()) }; + self.extra_justifications + .try_finalize_root((hash, number), finalization_result, true); + self.is_idle = false; + } + + pub fn on_finality_proof_import( + &mut self, + req: (B::Hash, NumberFor), + res: Result<(B::Hash, NumberFor), ()>, + ) { + self.extra_finality_proofs.try_finalize_root(req, res, true); + self.is_idle = false; + } + + /// Notify about finalization of the given block. + pub fn on_block_finalized(&mut self, hash: &B::Hash, number: NumberFor) { + let client = &self.client; + let r = self + .extra_finality_proofs + .on_block_finalized(hash, number, |base, block| { + is_descendent_of(&**client, base, block) + }); + + if let Err(err) = r { + warn!(target: "sync", "💔 Error cleaning up pending extra finality proof data requests: {:?}", err) + } + + let client = &self.client; + let r = self + .extra_justifications + .on_block_finalized(hash, number, |base, block| { + is_descendent_of(&**client, base, block) + }); + + if let Err(err) = r { + warn!(target: "sync", "💔 Error cleaning up pending extra justification data requests: {:?}", err); + } + } + + /// Called when a block has been queued for import. + /// + /// Updates our internal state for best queued block and then goes + /// through all peers to update our view of their state as well. + fn on_block_queued(&mut self, hash: &B::Hash, number: NumberFor) { + if let Some(_) = self.fork_targets.remove(&hash) { + trace!(target: "sync", "Completed fork sync {:?}", hash); + } + if number > self.best_queued_number { + self.best_queued_number = number; + self.best_queued_hash = *hash; + // Update common blocks + for (n, peer) in self.peers.iter_mut() { + if let PeerSyncState::AncestorSearch { .. } = peer.state { + // Wait for ancestry search to complete first. + continue; + } + let new_common_number = if peer.best_number >= number { + number + } else { + peer.best_number + }; + trace!( + target: "sync", + "Updating peer {} info, ours={}, common={}->{}, their best={}", + n, + number, + peer.common_number, + new_common_number, + peer.best_number, + ); + peer.common_number = new_common_number; + } + } + self.is_idle = false; + } + + /// Call when a node announces a new block. + /// + /// If `OnBlockAnnounce::ImportHeader` is returned, then the caller MUST try to import passed + /// header (call `on_block_data`). The network request isn't sent + /// in this case. Both hash and header is passed as an optimization + /// to avoid rehashing the header. + pub fn on_block_announce( + &mut self, + who: PeerId, + hash: B::Hash, + announce: &BlockAnnounce, + is_best: bool, + ) -> OnBlockAnnounce { + let header = &announce.header; + let number = *header.number(); + debug!(target: "sync", "Received block announcement {:?} with number {:?} from {}", hash, number, who); + if number.is_zero() { + warn!(target: "sync", "💔 Ignored genesis block (#0) announcement from {}: {}", who, hash); + return OnBlockAnnounce::Nothing; + } + let parent_status = self + .block_status(header.parent_hash()) + .ok() + .unwrap_or(BlockStatus::Unknown); + let known_parent = parent_status != BlockStatus::Unknown; + let ancient_parent = parent_status == BlockStatus::InChainPruned; + + let known = self.is_known(&hash); + let peer = if let Some(peer) = self.peers.get_mut(&who) { + peer + } else { + error!(target: "sync", "💔 Called on_block_announce with a bad peer ID"); + return OnBlockAnnounce::Nothing; + }; + while peer.recently_announced.len() >= ANNOUNCE_HISTORY_SIZE { + peer.recently_announced.pop_front(); + } + peer.recently_announced.push_back(hash.clone()); + if is_best { + // update their best block + peer.best_number = number; + peer.best_hash = hash; + } + if let PeerSyncState::AncestorSearch { .. } = peer.state { + return OnBlockAnnounce::Nothing; + } + // If the announced block is the best they have and is not ahead of us, our common number + // is either one further ahead or it's the one they just announced, if we know about it. + if is_best { + if known && self.best_queued_number >= number { + peer.common_number = number + } else if header.parent_hash() == &self.best_queued_hash + || known_parent && self.best_queued_number >= number + { + peer.common_number = number - One::one(); + } + } + self.is_idle = false; + + // known block case + if known || self.is_already_downloading(&hash) { + trace!(target: "sync", "Known block announce from {}: {}", who, hash); + if let Some(target) = self.fork_targets.get_mut(&hash) { + target.peers.insert(who); + } + return OnBlockAnnounce::Nothing; + } + + // Let external validator check the block announcement. + let assoc_data = announce.data.as_ref().map_or(&[][..], |v| v.as_slice()); + match self.block_announce_validator.validate(&header, assoc_data) { + Ok(Validation::Success) => (), + Ok(Validation::Failure) => { + debug!(target: "sync", "Block announcement validation of block {} from {} failed", hash, who); + return OnBlockAnnounce::Nothing; + } + Err(e) => { + error!(target: "sync", "💔 Block announcement validation errored: {}", e); + return OnBlockAnnounce::Nothing; + } + } + + if ancient_parent { + trace!(target: "sync", "Ignored ancient block announced from {}: {} {:?}", who, hash, header); + return OnBlockAnnounce::Nothing; + } + + let requires_additional_data = !self.role.is_light() || !known_parent; + if !requires_additional_data { + trace!(target: "sync", "Importing new header announced from {}: {} {:?}", who, hash, header); + return OnBlockAnnounce::ImportHeader; + } + + if number <= self.best_queued_number { + trace!( + target: "sync", + "Added sync target for block announced from {}: {} {:?}", who, hash, header + ); + self.fork_targets + .entry(hash.clone()) + .or_insert_with(|| ForkTarget { + number, + parent_hash: Some(header.parent_hash().clone()), + peers: Default::default(), + }) + .peers + .insert(who); + } + + OnBlockAnnounce::Nothing + } + + /// Call when a peer has disconnected. + pub fn peer_disconnected(&mut self, who: PeerId) { + self.blocks.clear_peer_download(&who); + self.peers.remove(&who); + self.extra_justifications.peer_disconnected(&who); + self.extra_finality_proofs.peer_disconnected(&who); + self.is_idle = false; + } + + /// Restart the sync process. + fn restart<'a>( + &'a mut self, + ) -> impl Iterator), BadPeer>> + 'a { + self.processed_blocks = 0; + self.blocks.clear(); + let info = self.client.info(); + self.best_queued_hash = info.best_hash; + self.best_queued_number = std::cmp::max(info.best_number, self.best_imported_number); + self.is_idle = false; + debug!(target:"sync", "Restarted with {} ({})", self.best_queued_number, self.best_queued_hash); + let old_peers = std::mem::replace(&mut self.peers, HashMap::new()); + old_peers.into_iter().filter_map(move |(id, p)| { + match self.new_peer(id.clone(), p.best_hash, p.best_number) { + Ok(None) => None, + Ok(Some(x)) => Some(Ok((id, x))), + Err(e) => Some(Err(e)), + } + }) + } + + /// What is the status of the block corresponding to the given hash? + fn block_status(&self, hash: &B::Hash) -> Result { + if self.queue_blocks.contains(hash) { + return Ok(BlockStatus::Queued); + } + self.client.block_status(&BlockId::Hash(*hash)) + } + + /// Is the block corresponding to the given hash known? + fn is_known(&self, hash: &B::Hash) -> bool { + self.block_status(hash) + .ok() + .map_or(false, |s| s != BlockStatus::Unknown) + } + + /// Is any peer downloading the given hash? + fn is_already_downloading(&self, hash: &B::Hash) -> bool { + self.peers + .iter() + .any(|(_, p)| p.state == PeerSyncState::DownloadingStale(*hash)) + } + + /// Return some key metrics. + pub(crate) fn metrics(&self) -> Metrics { + use std::convert::TryInto; + Metrics { + queued_blocks: self.queue_blocks.len().try_into().unwrap_or(std::u32::MAX), + fork_targets: self.fork_targets.len().try_into().unwrap_or(std::u32::MAX), + finality_proofs: self.extra_finality_proofs.metrics(), + justifications: self.extra_justifications.metrics(), + _priv: (), + } + } } #[derive(Debug)] pub(crate) struct Metrics { - pub(crate) queued_blocks: u32, - pub(crate) fork_targets: u32, - pub(crate) finality_proofs: extra_requests::Metrics, - pub(crate) justifications: extra_requests::Metrics, - _priv: () + pub(crate) queued_blocks: u32, + pub(crate) fork_targets: u32, + pub(crate) finality_proofs: extra_requests::Metrics, + pub(crate) justifications: extra_requests::Metrics, + _priv: (), } /// Request the ancestry for a block. Sends a request for header and justification for the given /// block number. Used during ancestry search. fn ancestry_request(block: NumberFor) -> BlockRequest { - message::generic::BlockRequest { - id: 0, - fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, - from: message::FromBlock::Number(block), - to: None, - direction: message::Direction::Ascending, - max: Some(1) - } + message::generic::BlockRequest { + id: 0, + fields: BlockAttributes::HEADER | BlockAttributes::JUSTIFICATION, + from: message::FromBlock::Number(block), + to: None, + direction: message::Direction::Ascending, + max: Some(1), + } } /// The ancestor search state expresses which algorithm, and its stateful parameters, we are using to /// try to find an ancestor block #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum AncestorSearchState { - /// Use exponential backoff to find an ancestor, then switch to binary search. - /// We keep track of the exponent. - ExponentialBackoff(NumberFor), - /// Using binary search to find the best ancestor. - /// We keep track of left and right bounds. - BinarySearch(NumberFor, NumberFor), + /// Use exponential backoff to find an ancestor, then switch to binary search. + /// We keep track of the exponent. + ExponentialBackoff(NumberFor), + /// Using binary search to find the best ancestor. + /// We keep track of left and right bounds. + BinarySearch(NumberFor, NumberFor), } /// This function handles the ancestor search strategy used. The goal is to find a common point @@ -1302,237 +1436,246 @@ pub enum AncestorSearchState { /// When we've found a block hash mismatch we then fall back to a binary search between the two /// last known points to find the common block closest to the tip. fn handle_ancestor_search_state( - state: &AncestorSearchState, - curr_block_num: NumberFor, - block_hash_match: bool + state: &AncestorSearchState, + curr_block_num: NumberFor, + block_hash_match: bool, ) -> Option<(AncestorSearchState, NumberFor)> { - let two = >::one() + >::one(); - match state { - AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { - let next_distance_to_tip = *next_distance_to_tip; - if block_hash_match && next_distance_to_tip == One::one() { - // We found the ancestor in the first step so there is no need to execute binary search. - return None; - } - if block_hash_match { - let left = curr_block_num; - let right = left + next_distance_to_tip / two; - let middle = left + (right - left) / two; - Some((AncestorSearchState::BinarySearch(left, right), middle)) - } else { - let next_block_num = curr_block_num.checked_sub(&next_distance_to_tip) - .unwrap_or_else(Zero::zero); - let next_distance_to_tip = next_distance_to_tip * two; - Some((AncestorSearchState::ExponentialBackoff(next_distance_to_tip), next_block_num)) - } - } - AncestorSearchState::BinarySearch(mut left, mut right) => { - if left >= curr_block_num { - return None; - } - if block_hash_match { - left = curr_block_num; - } else { - right = curr_block_num; - } - assert!(right >= left); - let middle = left + (right - left) / two; - Some((AncestorSearchState::BinarySearch(left, right), middle)) - } - } + let two = >::one() + >::one(); + match state { + AncestorSearchState::ExponentialBackoff(next_distance_to_tip) => { + let next_distance_to_tip = *next_distance_to_tip; + if block_hash_match && next_distance_to_tip == One::one() { + // We found the ancestor in the first step so there is no need to execute binary search. + return None; + } + if block_hash_match { + let left = curr_block_num; + let right = left + next_distance_to_tip / two; + let middle = left + (right - left) / two; + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } else { + let next_block_num = curr_block_num + .checked_sub(&next_distance_to_tip) + .unwrap_or_else(Zero::zero); + let next_distance_to_tip = next_distance_to_tip * two; + Some(( + AncestorSearchState::ExponentialBackoff(next_distance_to_tip), + next_block_num, + )) + } + } + AncestorSearchState::BinarySearch(mut left, mut right) => { + if left >= curr_block_num { + return None; + } + if block_hash_match { + left = curr_block_num; + } else { + right = curr_block_num; + } + assert!(right >= left); + let middle = left + (right - left) / two; + Some((AncestorSearchState::BinarySearch(left, right), middle)) + } + } } /// Get a new block request for the peer if any. fn peer_block_request( - id: &PeerId, - peer: &PeerSync, - blocks: &mut BlockCollection, - attrs: &message::BlockAttributes, - max_parallel_downloads: u32, - finalized: NumberFor, - best_num: NumberFor, + id: &PeerId, + peer: &PeerSync, + blocks: &mut BlockCollection, + attrs: &message::BlockAttributes, + max_parallel_downloads: u32, + finalized: NumberFor, + best_num: NumberFor, ) -> Option<(Range>, BlockRequest)> { - if best_num >= peer.best_number { - // Will be downloaded as alternative fork instead. - return None; - } - if peer.common_number < finalized { - trace!( - target: "sync", - "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", - id, finalized, peer.common_number, peer.best_number, best_num, - ); - } - if let Some(range) = blocks.needed_blocks( - id.clone(), - MAX_BLOCKS_TO_REQUEST, - peer.best_number, - peer.common_number, - max_parallel_downloads, - MAX_DOWNLOAD_AHEAD, - ) { - let request = message::generic::BlockRequest { - id: 0, - fields: attrs.clone(), - from: message::FromBlock::Number(range.start), - to: None, - direction: message::Direction::Ascending, - max: Some((range.end - range.start).saturated_into::()) - }; - Some((range, request)) - } else { - None - } + if best_num >= peer.best_number { + // Will be downloaded as alternative fork instead. + return None; + } + if peer.common_number < finalized { + trace!( + target: "sync", + "Requesting pre-finalized chain from {:?}, common={}, finalized={}, peer best={}, our best={}", + id, finalized, peer.common_number, peer.best_number, best_num, + ); + } + if let Some(range) = blocks.needed_blocks( + id.clone(), + MAX_BLOCKS_TO_REQUEST, + peer.best_number, + peer.common_number, + max_parallel_downloads, + MAX_DOWNLOAD_AHEAD, + ) { + let request = message::generic::BlockRequest { + id: 0, + fields: attrs.clone(), + from: message::FromBlock::Number(range.start), + to: None, + direction: message::Direction::Ascending, + max: Some((range.end - range.start).saturated_into::()), + }; + Some((range, request)) + } else { + None + } } /// Get pending fork sync targets for a peer. fn fork_sync_request( - id: &PeerId, - targets: &mut HashMap>, - best_num: NumberFor, - finalized: NumberFor, - attributes: &message::BlockAttributes, - check_block: impl Fn(&B::Hash) -> BlockStatus, -) -> Option<(B::Hash, BlockRequest)> -{ - targets.retain(|hash, r| { - if r.number <= finalized { - trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); - return false; - } - if check_block(hash) != BlockStatus::Unknown { - trace!(target: "sync", "Removed obsolete fork sync request {:?} (#{})", hash, r.number); - return false; - } - true - }); - for (hash, r) in targets { - if !r.peers.contains(id) { - continue - } - if r.number <= best_num { - let parent_status = r.parent_hash.as_ref().map_or(BlockStatus::Unknown, check_block); - let mut count = (r.number - finalized).saturated_into::(); // up to the last finalized block - if parent_status != BlockStatus::Unknown { - // request only single block - count = 1; - } - trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); - return Some((hash.clone(), message::generic::BlockRequest { - id: 0, - fields: attributes.clone(), - from: message::FromBlock::Hash(hash.clone()), - to: None, - direction: message::Direction::Descending, - max: Some(count), - })) - } - } - None + id: &PeerId, + targets: &mut HashMap>, + best_num: NumberFor, + finalized: NumberFor, + attributes: &message::BlockAttributes, + check_block: impl Fn(&B::Hash) -> BlockStatus, +) -> Option<(B::Hash, BlockRequest)> { + targets.retain(|hash, r| { + if r.number <= finalized { + trace!(target: "sync", "Removed expired fork sync request {:?} (#{})", hash, r.number); + return false; + } + if check_block(hash) != BlockStatus::Unknown { + trace!(target: "sync", "Removed obsolete fork sync request {:?} (#{})", hash, r.number); + return false; + } + true + }); + for (hash, r) in targets { + if !r.peers.contains(id) { + continue; + } + if r.number <= best_num { + let parent_status = r + .parent_hash + .as_ref() + .map_or(BlockStatus::Unknown, check_block); + let mut count = (r.number - finalized).saturated_into::(); // up to the last finalized block + if parent_status != BlockStatus::Unknown { + // request only single block + count = 1; + } + trace!(target: "sync", "Downloading requested fork {:?} from {}, {} blocks", hash, id, count); + return Some(( + hash.clone(), + message::generic::BlockRequest { + id: 0, + fields: attributes.clone(), + from: message::FromBlock::Hash(hash.clone()), + to: None, + direction: message::Direction::Descending, + max: Some(count), + }, + )); + } + } + None } /// Returns `true` if the given `block` is a descendent of `base`. -fn is_descendent_of(client: &T, base: &Block::Hash, block: &Block::Hash) -> sp_blockchain::Result - where - Block: BlockT, - T: HeaderMetadata + ?Sized, +fn is_descendent_of( + client: &T, + base: &Block::Hash, + block: &Block::Hash, +) -> sp_blockchain::Result +where + Block: BlockT, + T: HeaderMetadata + ?Sized, { - if base == block { - return Ok(false); - } + if base == block { + return Ok(false); + } - let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; + let ancestor = sp_blockchain::lowest_common_ancestor(client, *block, *base)?; - Ok(ancestor.hash == *base) + Ok(ancestor.hash == *base) } #[cfg(test)] mod test { - use super::*; - use super::message::FromBlock; - use substrate_test_runtime_client::{ - runtime::Block, - DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, - }; - use sp_blockchain::HeaderBackend; - use sc_block_builder::BlockBuilderProvider; - use sp_consensus::block_validation::DefaultBlockAnnounceValidator; - - #[test] - fn processes_empty_response_on_justification_request_for_unknown_block() { - // if we ask for a justification for a given block to a peer that doesn't know that block - // (different from not having a justification), the peer will reply with an empty response. - // internally we should process the response as the justification not being available. - - let client = Arc::new(TestClientBuilder::new().build()); - let info = client.info(); - let block_announce_validator = Box::new(DefaultBlockAnnounceValidator::new(client.clone())); - let peer_id = PeerId::random(); - - let mut sync = ChainSync::new( - Roles::AUTHORITY, - client.clone(), - &info, - None, - block_announce_validator, - 1, - ); - - let (a1_hash, a1_number) = { - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - (a1.hash(), *a1.header.number()) - }; - - // add a new peer with the same best block - sync.new_peer(peer_id.clone(), a1_hash, a1_number).unwrap(); - - // and request a justification for the block - sync.request_justification(&a1_hash, a1_number); - - // the justification request should be scheduled to that peer - assert!( - sync.justification_requests().any(|(who, request)| { - who == peer_id && request.from == FromBlock::Hash(a1_hash) - }) - ); - - // there are no extra pending requests - assert_eq!( - sync.extra_justifications.pending_requests().count(), - 0, - ); - - // there's one in-flight extra request to the expected peer - assert!( - sync.extra_justifications.active_requests().any(|(who, (hash, number))| { - *who == peer_id && *hash == a1_hash && *number == a1_number - }) - ); - - // if the peer replies with an empty response (i.e. it doesn't know the block), - // the active request should be cleared. - assert_eq!( - sync.on_block_justification( - peer_id.clone(), - BlockResponse:: { - id: 0, - blocks: vec![], - } - ), - Ok(OnBlockJustification::Nothing), - ); - - // there should be no in-flight requests - assert_eq!( - sync.extra_justifications.active_requests().count(), - 0, - ); - - // and the request should now be pending again, waiting for reschedule - assert!( - sync.extra_justifications.pending_requests().any(|(hash, number)| { - *hash == a1_hash && *number == a1_number - }) - ); - } + use super::message::FromBlock; + use super::*; + use sc_block_builder::BlockBuilderProvider; + use sp_blockchain::HeaderBackend; + use sp_consensus::block_validation::DefaultBlockAnnounceValidator; + use substrate_test_runtime_client::{ + runtime::Block, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, + }; + + #[test] + fn processes_empty_response_on_justification_request_for_unknown_block() { + // if we ask for a justification for a given block to a peer that doesn't know that block + // (different from not having a justification), the peer will reply with an empty response. + // internally we should process the response as the justification not being available. + + let client = Arc::new(TestClientBuilder::new().build()); + let info = client.info(); + let block_announce_validator = Box::new(DefaultBlockAnnounceValidator::new(client.clone())); + let peer_id = PeerId::random(); + + let mut sync = ChainSync::new( + Roles::AUTHORITY, + client.clone(), + &info, + None, + block_announce_validator, + 1, + ); + + let (a1_hash, a1_number) = { + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + (a1.hash(), *a1.header.number()) + }; + + // add a new peer with the same best block + sync.new_peer(peer_id.clone(), a1_hash, a1_number).unwrap(); + + // and request a justification for the block + sync.request_justification(&a1_hash, a1_number); + + // the justification request should be scheduled to that peer + assert!(sync + .justification_requests() + .any(|(who, request)| { who == peer_id && request.from == FromBlock::Hash(a1_hash) })); + + // there are no extra pending requests + assert_eq!(sync.extra_justifications.pending_requests().count(), 0,); + + // there's one in-flight extra request to the expected peer + assert!(sync + .extra_justifications + .active_requests() + .any(|(who, (hash, number))| { + *who == peer_id && *hash == a1_hash && *number == a1_number + })); + + // if the peer replies with an empty response (i.e. it doesn't know the block), + // the active request should be cleared. + assert_eq!( + sync.on_block_justification( + peer_id.clone(), + BlockResponse:: { + id: 0, + blocks: vec![], + } + ), + Ok(OnBlockJustification::Nothing), + ); + + // there should be no in-flight requests + assert_eq!(sync.extra_justifications.active_requests().count(), 0,); + + // and the request should now be pending again, waiting for reschedule + assert!(sync + .extra_justifications + .pending_requests() + .any(|(hash, number)| { *hash == a1_hash && *number == a1_number })); + } } diff --git a/client/network/src/protocol/sync/blocks.rs b/client/network/src/protocol/sync/blocks.rs index 359287701e..5fbdd7caf0 100644 --- a/client/network/src/protocol/sync/blocks.rs +++ b/client/network/src/protocol/sync/blocks.rs @@ -14,297 +14,403 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::cmp; -use std::ops::Range; -use std::collections::{HashMap, BTreeMap}; -use std::collections::hash_map::Entry; -use log::trace; +use crate::protocol::message; use libp2p::PeerId; +use log::trace; use sp_runtime::traits::{Block as BlockT, NumberFor, One}; -use crate::protocol::message; +use std::cmp; +use std::collections::hash_map::Entry; +use std::collections::{BTreeMap, HashMap}; +use std::ops::Range; /// Block data with origin. #[derive(Debug, Clone, PartialEq, Eq)] pub struct BlockData { - /// The Block Message from the wire - pub block: message::BlockData, - /// The peer, we received this from - pub origin: Option, + /// The Block Message from the wire + pub block: message::BlockData, + /// The peer, we received this from + pub origin: Option, } #[derive(Debug)] enum BlockRangeState { - Downloading { - len: NumberFor, - downloading: u32, - }, - Complete(Vec>), + Downloading { len: NumberFor, downloading: u32 }, + Complete(Vec>), } impl BlockRangeState { - pub fn len(&self) -> NumberFor { - match *self { - BlockRangeState::Downloading { len, .. } => len, - BlockRangeState::Complete(ref blocks) => (blocks.len() as u32).into(), - } - } + pub fn len(&self) -> NumberFor { + match *self { + BlockRangeState::Downloading { len, .. } => len, + BlockRangeState::Complete(ref blocks) => (blocks.len() as u32).into(), + } + } } /// A collection of blocks being downloaded. #[derive(Default)] pub struct BlockCollection { - /// Downloaded blocks. - blocks: BTreeMap, BlockRangeState>, - peer_requests: HashMap>, + /// Downloaded blocks. + blocks: BTreeMap, BlockRangeState>, + peer_requests: HashMap>, } impl BlockCollection { - /// Create a new instance. - pub fn new() -> Self { - BlockCollection { - blocks: BTreeMap::new(), - peer_requests: HashMap::new(), - } - } - - /// Clear everything. - pub fn clear(&mut self) { - self.blocks.clear(); - self.peer_requests.clear(); - } - - /// Insert a set of blocks into collection. - pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { - if blocks.is_empty() { - return; - } - - match self.blocks.get(&start) { - Some(&BlockRangeState::Downloading { .. }) => { - trace!(target: "sync", "Inserting block data still marked as being downloaded: {}", start); - }, - Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { - trace!(target: "sync", "Ignored block data already downloaded: {}", start); - return; - }, - _ => (), - } - - self.blocks.insert(start, BlockRangeState::Complete(blocks.into_iter() - .map(|b| BlockData { origin: Some(who.clone()), block: b }).collect())); - } - - /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. - pub fn needed_blocks( - &mut self, - who: PeerId, - count: usize, - peer_best: NumberFor, - common: NumberFor, - max_parallel: u32, - max_ahead: u32, - ) -> Option>> { - if peer_best <= common { - // Bail out early - return None; - } - // First block number that we need to download - let first_different = common + >::one(); - let count = (count as u32).into(); - let (mut range, downloading) = { - let mut downloading_iter = self.blocks.iter().peekable(); - let mut prev: Option<(&NumberFor, &BlockRangeState)> = None; - loop { - let next = downloading_iter.next(); - break match &(prev, next) { - &(Some((start, &BlockRangeState::Downloading { ref len, downloading })), _) - if downloading < max_parallel => - (*start .. *start + *len, downloading), - &(Some((start, r)), Some((next_start, _))) if *start + r.len() < *next_start => - (*start + r.len() .. cmp::min(*next_start, *start + r.len() + count), 0), // gap - &(Some((start, r)), None) => - (*start + r.len() .. *start + r.len() + count, 0), // last range - &(None, None) => - (first_different .. first_different + count, 0), // empty - &(None, Some((start, _))) if *start > first_different => - (first_different .. cmp::min(first_different + count, *start), 0), // gap at the start - _ => { - prev = next; - continue - }, - } - } - }; - // crop to peers best - if range.start > peer_best { - trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); - return None; - } - range.end = cmp::min(peer_best + One::one(), range.end); - - if self.blocks.iter().next().map_or(false, |(n, _)| range.start > *n + max_ahead.into()) { - trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start); - return None; - } - - self.peer_requests.insert(who, range.start); - self.blocks.insert(range.start, BlockRangeState::Downloading { - len: range.end - range.start, - downloading: downloading + 1 - }); - if range.end <= range.start { - panic!("Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", - range, count, peer_best, common, self.blocks); - } - Some(range) - } - - /// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain. - pub fn drain(&mut self, from: NumberFor) -> Vec> { - let mut drained = Vec::new(); - let mut ranges = Vec::new(); - - let mut prev = from; - for (start, range_data) in &mut self.blocks { - match range_data { - &mut BlockRangeState::Complete(ref mut blocks) if *start <= prev => { - prev = *start + (blocks.len() as u32).into(); - // Remove all elements from `blocks` and add them to `drained` - drained.append(blocks); - ranges.push(*start); - }, - _ => break, - } - } - - for r in ranges { - self.blocks.remove(&r); - } - trace!(target: "sync", "Drained {} blocks", drained.len()); - drained - } - - pub fn clear_peer_download(&mut self, who: &PeerId) { - match self.peer_requests.entry(who.clone()) { - Entry::Occupied(entry) => { - let start = entry.remove(); - let remove = match self.blocks.get_mut(&start) { - Some(&mut BlockRangeState::Downloading { ref mut downloading, .. }) if *downloading > 1 => { - *downloading = *downloading - 1; - false - }, - Some(&mut BlockRangeState::Downloading { .. }) => { - true - }, - _ => { - false - } - }; - if remove { - self.blocks.remove(&start); - } - }, - _ => (), - } - } + /// Create a new instance. + pub fn new() -> Self { + BlockCollection { + blocks: BTreeMap::new(), + peer_requests: HashMap::new(), + } + } + + /// Clear everything. + pub fn clear(&mut self) { + self.blocks.clear(); + self.peer_requests.clear(); + } + + /// Insert a set of blocks into collection. + pub fn insert(&mut self, start: NumberFor, blocks: Vec>, who: PeerId) { + if blocks.is_empty() { + return; + } + + match self.blocks.get(&start) { + Some(&BlockRangeState::Downloading { .. }) => { + trace!(target: "sync", "Inserting block data still marked as being downloaded: {}", start); + } + Some(&BlockRangeState::Complete(ref existing)) if existing.len() >= blocks.len() => { + trace!(target: "sync", "Ignored block data already downloaded: {}", start); + return; + } + _ => (), + } + + self.blocks.insert( + start, + BlockRangeState::Complete( + blocks + .into_iter() + .map(|b| BlockData { + origin: Some(who.clone()), + block: b, + }) + .collect(), + ), + ); + } + + /// Returns a set of block hashes that require a header download. The returned set is marked as being downloaded. + pub fn needed_blocks( + &mut self, + who: PeerId, + count: usize, + peer_best: NumberFor, + common: NumberFor, + max_parallel: u32, + max_ahead: u32, + ) -> Option>> { + if peer_best <= common { + // Bail out early + return None; + } + // First block number that we need to download + let first_different = common + >::one(); + let count = (count as u32).into(); + let (mut range, downloading) = { + let mut downloading_iter = self.blocks.iter().peekable(); + let mut prev: Option<(&NumberFor, &BlockRangeState)> = None; + loop { + let next = downloading_iter.next(); + break match &(prev, next) { + &( + Some(( + start, + &BlockRangeState::Downloading { + ref len, + downloading, + }, + )), + _, + ) if downloading < max_parallel => (*start..*start + *len, downloading), + &(Some((start, r)), Some((next_start, _))) + if *start + r.len() < *next_start => + { + ( + *start + r.len()..cmp::min(*next_start, *start + r.len() + count), + 0, + ) + } // gap + &(Some((start, r)), None) => (*start + r.len()..*start + r.len() + count, 0), // last range + &(None, None) => (first_different..first_different + count, 0), // empty + &(None, Some((start, _))) if *start > first_different => ( + first_different..cmp::min(first_different + count, *start), + 0, + ), // gap at the start + _ => { + prev = next; + continue; + } + }; + } + }; + // crop to peers best + if range.start > peer_best { + trace!(target: "sync", "Out of range for peer {} ({} vs {})", who, range.start, peer_best); + return None; + } + range.end = cmp::min(peer_best + One::one(), range.end); + + if self + .blocks + .iter() + .next() + .map_or(false, |(n, _)| range.start > *n + max_ahead.into()) + { + trace!(target: "sync", "Too far ahead for peer {} ({})", who, range.start); + return None; + } + + self.peer_requests.insert(who, range.start); + self.blocks.insert( + range.start, + BlockRangeState::Downloading { + len: range.end - range.start, + downloading: downloading + 1, + }, + ); + if range.end <= range.start { + panic!( + "Empty range {:?}, count={}, peer_best={}, common={}, blocks={:?}", + range, count, peer_best, common, self.blocks + ); + } + Some(range) + } + + /// Get a valid chain of blocks ordered in descending order and ready for importing into blockchain. + pub fn drain(&mut self, from: NumberFor) -> Vec> { + let mut drained = Vec::new(); + let mut ranges = Vec::new(); + + let mut prev = from; + for (start, range_data) in &mut self.blocks { + match range_data { + &mut BlockRangeState::Complete(ref mut blocks) if *start <= prev => { + prev = *start + (blocks.len() as u32).into(); + // Remove all elements from `blocks` and add them to `drained` + drained.append(blocks); + ranges.push(*start); + } + _ => break, + } + } + + for r in ranges { + self.blocks.remove(&r); + } + trace!(target: "sync", "Drained {} blocks", drained.len()); + drained + } + + pub fn clear_peer_download(&mut self, who: &PeerId) { + match self.peer_requests.entry(who.clone()) { + Entry::Occupied(entry) => { + let start = entry.remove(); + let remove = match self.blocks.get_mut(&start) { + Some(&mut BlockRangeState::Downloading { + ref mut downloading, + .. + }) if *downloading > 1 => { + *downloading = *downloading - 1; + false + } + Some(&mut BlockRangeState::Downloading { .. }) => true, + _ => false, + }; + if remove { + self.blocks.remove(&start); + } + } + _ => (), + } + } } #[cfg(test)] mod test { - use super::{BlockCollection, BlockData, BlockRangeState}; - use crate::{protocol::message, PeerId}; - use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; - use sp_core::H256; - - type Block = RawBlock>; - - fn is_empty(bc: &BlockCollection) -> bool { - bc.blocks.is_empty() && - bc.peer_requests.is_empty() - } - - fn generate_blocks(n: usize) -> Vec> { - (0 .. n).map(|_| message::generic::BlockData { - hash: H256::random(), - header: None, - body: None, - message_queue: None, - receipt: None, - justification: None, - }).collect() - } - - #[test] - fn create_clear() { - let mut bc = BlockCollection::new(); - assert!(is_empty(&bc)); - bc.insert(1, generate_blocks(100), PeerId::random()); - assert!(!is_empty(&bc)); - bc.clear(); - assert!(is_empty(&bc)); - } - - #[test] - fn insert_blocks() { - let mut bc = BlockCollection::new(); - assert!(is_empty(&bc)); - let peer0 = PeerId::random(); - let peer1 = PeerId::random(); - let peer2 = PeerId::random(); - - let blocks = generate_blocks(150); - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(1 .. 41)); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(41 .. 81)); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), Some(81 .. 121)); - - bc.clear_peer_download(&peer1); - bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); - assert_eq!(bc.drain(1), vec![]); - assert_eq!(bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), Some(121 .. 151)); - bc.clear_peer_download(&peer0); - bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); - - assert_eq!(bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), Some(11 .. 41)); - assert_eq!(bc.drain(1), blocks[1..11].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()); - - bc.clear_peer_download(&peer0); - bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); - - let drained = bc.drain(12); - assert_eq!(drained[..30], blocks[11..41].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer0.clone()) }).collect::>()[..]); - assert_eq!(drained[30..], blocks[41..81].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); - - bc.clear_peer_download(&peer2); - assert_eq!(bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), Some(81 .. 121)); - bc.clear_peer_download(&peer2); - bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); - bc.clear_peer_download(&peer1); - bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); - - assert_eq!(bc.drain(80), vec![]); - let drained = bc.drain(81); - assert_eq!(drained[..40], blocks[81..121].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer2.clone()) }).collect::>()[..]); - assert_eq!(drained[40..], blocks[121..150].iter() - .map(|b| BlockData { block: b.clone(), origin: Some(peer1.clone()) }).collect::>()[..]); - } - - #[test] - fn large_gap() { - let mut bc: BlockCollection = BlockCollection::new(); - bc.blocks.insert(100, BlockRangeState::Downloading { - len: 128, - downloading: 1, - }); - let blocks = generate_blocks(10).into_iter().map(|b| BlockData { block: b, origin: None }).collect(); - bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); - - let peer0 = PeerId::random(); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), Some(1 .. 100)); - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), None); // too far ahead - assert_eq!(bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), Some(100 + 128 .. 100 + 128 + 128)); - } + use super::{BlockCollection, BlockData, BlockRangeState}; + use crate::{protocol::message, PeerId}; + use sp_core::H256; + use sp_runtime::testing::{Block as RawBlock, ExtrinsicWrapper}; + + type Block = RawBlock>; + + fn is_empty(bc: &BlockCollection) -> bool { + bc.blocks.is_empty() && bc.peer_requests.is_empty() + } + + fn generate_blocks(n: usize) -> Vec> { + (0..n) + .map(|_| message::generic::BlockData { + hash: H256::random(), + header: None, + body: None, + message_queue: None, + receipt: None, + justification: None, + }) + .collect() + } + + #[test] + fn create_clear() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + bc.insert(1, generate_blocks(100), PeerId::random()); + assert!(!is_empty(&bc)); + bc.clear(); + assert!(is_empty(&bc)); + } + + #[test] + fn insert_blocks() { + let mut bc = BlockCollection::new(); + assert!(is_empty(&bc)); + let peer0 = PeerId::random(); + let peer1 = PeerId::random(); + let peer2 = PeerId::random(); + + let blocks = generate_blocks(150); + assert_eq!( + bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), + Some(1..41) + ); + assert_eq!( + bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), + Some(41..81) + ); + assert_eq!( + bc.needed_blocks(peer2.clone(), 40, 150, 0, 1, 200), + Some(81..121) + ); + + bc.clear_peer_download(&peer1); + bc.insert(41, blocks[41..81].to_vec(), peer1.clone()); + assert_eq!(bc.drain(1), vec![]); + assert_eq!( + bc.needed_blocks(peer1.clone(), 40, 150, 0, 1, 200), + Some(121..151) + ); + bc.clear_peer_download(&peer0); + bc.insert(1, blocks[1..11].to_vec(), peer0.clone()); + + assert_eq!( + bc.needed_blocks(peer0.clone(), 40, 150, 0, 1, 200), + Some(11..41) + ); + assert_eq!( + bc.drain(1), + blocks[1..11] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer0.clone()) + }) + .collect::>() + ); + + bc.clear_peer_download(&peer0); + bc.insert(11, blocks[11..41].to_vec(), peer0.clone()); + + let drained = bc.drain(12); + assert_eq!( + drained[..30], + blocks[11..41] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer0.clone()) + }) + .collect::>()[..] + ); + assert_eq!( + drained[30..], + blocks[41..81] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer1.clone()) + }) + .collect::>()[..] + ); + + bc.clear_peer_download(&peer2); + assert_eq!( + bc.needed_blocks(peer2.clone(), 40, 150, 80, 1, 200), + Some(81..121) + ); + bc.clear_peer_download(&peer2); + bc.insert(81, blocks[81..121].to_vec(), peer2.clone()); + bc.clear_peer_download(&peer1); + bc.insert(121, blocks[121..150].to_vec(), peer1.clone()); + + assert_eq!(bc.drain(80), vec![]); + let drained = bc.drain(81); + assert_eq!( + drained[..40], + blocks[81..121] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer2.clone()) + }) + .collect::>()[..] + ); + assert_eq!( + drained[40..], + blocks[121..150] + .iter() + .map(|b| BlockData { + block: b.clone(), + origin: Some(peer1.clone()) + }) + .collect::>()[..] + ); + } + + #[test] + fn large_gap() { + let mut bc: BlockCollection = BlockCollection::new(); + bc.blocks.insert( + 100, + BlockRangeState::Downloading { + len: 128, + downloading: 1, + }, + ); + let blocks = generate_blocks(10) + .into_iter() + .map(|b| BlockData { + block: b, + origin: None, + }) + .collect(); + bc.blocks.insert(114305, BlockRangeState::Complete(blocks)); + + let peer0 = PeerId::random(); + assert_eq!( + bc.needed_blocks(peer0.clone(), 128, 10000, 000, 1, 200), + Some(1..100) + ); + assert_eq!( + bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200), + None + ); // too far ahead + assert_eq!( + bc.needed_blocks(peer0.clone(), 128, 10000, 600, 1, 200000), + Some(100 + 128..100 + 128 + 128) + ); + } } diff --git a/client/network/src/protocol/sync/extra_requests.rs b/client/network/src/protocol/sync/extra_requests.rs index 3d854b574b..8ffd425bb2 100644 --- a/client/network/src/protocol/sync/extra_requests.rs +++ b/client/network/src/protocol/sync/extra_requests.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sp_blockchain::Error as ClientError; use crate::protocol::sync::{PeerSync, PeerSyncState}; use fork_tree::ForkTree; use libp2p::PeerId; use log::{debug, trace, warn}; +use sp_blockchain::Error as ClientError; use sp_runtime::traits::{Block as BlockT, NumberFor, Zero}; use std::collections::{HashMap, HashSet, VecDeque}; use std::time::Duration; @@ -38,535 +38,629 @@ pub(crate) type ExtraRequest = (::Hash, NumberFor); /// competing fork). #[derive(Debug)] pub(crate) struct ExtraRequests { - tree: ForkTree, ()>, - /// best finalized block number that we have seen since restart - best_seen_finalized_number: NumberFor, - /// requests which have been queued for later processing - pending_requests: VecDeque>, - /// requests which are currently underway to some peer - active_requests: HashMap>, - /// previous requests without response - failed_requests: HashMap, Vec<(PeerId, Instant)>>, - /// successful requests - importing_requests: HashSet>, - /// the name of this type of extra request (useful for logging.) - request_type_name: &'static str, + tree: ForkTree, ()>, + /// best finalized block number that we have seen since restart + best_seen_finalized_number: NumberFor, + /// requests which have been queued for later processing + pending_requests: VecDeque>, + /// requests which are currently underway to some peer + active_requests: HashMap>, + /// previous requests without response + failed_requests: HashMap, Vec<(PeerId, Instant)>>, + /// successful requests + importing_requests: HashSet>, + /// the name of this type of extra request (useful for logging.) + request_type_name: &'static str, } #[derive(Debug)] pub(crate) struct Metrics { - pub(crate) pending_requests: u32, - pub(crate) active_requests: u32, - pub(crate) importing_requests: u32, - pub(crate) failed_requests: u32, - _priv: () + pub(crate) pending_requests: u32, + pub(crate) active_requests: u32, + pub(crate) importing_requests: u32, + pub(crate) failed_requests: u32, + _priv: (), } impl ExtraRequests { - pub(crate) fn new(request_type_name: &'static str) -> Self { - ExtraRequests { - tree: ForkTree::new(), - best_seen_finalized_number: Zero::zero(), - pending_requests: VecDeque::new(), - active_requests: HashMap::new(), - failed_requests: HashMap::new(), - importing_requests: HashSet::new(), - request_type_name, - } - } - - /// Reset all state as if returned from `new`. - pub(crate) fn reset(&mut self) { - self.tree = ForkTree::new(); - self.pending_requests.clear(); - self.active_requests.clear(); - self.failed_requests.clear(); - } - - /// Returns an iterator-like struct that yields peers which extra - /// requests can be sent to. - pub(crate) fn matcher(&mut self) -> Matcher { - Matcher::new(self) - } - - /// Queue an extra data request to be considered by the `Matcher`. - pub(crate) fn schedule(&mut self, request: ExtraRequest, is_descendent_of: F) - where F: Fn(&B::Hash, &B::Hash) -> Result - { - match self.tree.import(request.0, request.1, (), &is_descendent_of) { - Ok(true) => { - // this is a new root so we add it to the current `pending_requests` - self.pending_requests.push_back((request.0, request.1)); - } - Err(fork_tree::Error::Revert) => { - // we have finalized further than the given request, presumably - // by some other part of the system (not sync). we can safely - // ignore the `Revert` error. - return; - }, - Err(err) => { - debug!(target: "sync", "Failed to insert request {:?} into tree: {:?}", request, err); - return; - } - _ => () - } - } - - /// Retry any pending request if a peer disconnected. - pub(crate) fn peer_disconnected(&mut self, who: &PeerId) { - if let Some(request) = self.active_requests.remove(who) { - self.pending_requests.push_front(request); - } - } - - /// Processes the response for the request previously sent to the given peer. - pub(crate) fn on_response(&mut self, who: PeerId, resp: Option) -> Option<(PeerId, B::Hash, NumberFor, R)> { - // we assume that the request maps to the given response, this is - // currently enforced by the outer network protocol before passing on - // messages to chain sync. - if let Some(request) = self.active_requests.remove(&who) { - if let Some(r) = resp { - trace!(target: "sync", "Queuing import of {} from {:?} for {:?}", - self.request_type_name, - who, - request, - ); - - self.importing_requests.insert(request); - return Some((who, request.0, request.1, r)) - } else { - trace!(target: "sync", "Empty {} response from {:?} for {:?}", - self.request_type_name, - who, - request, - ); - } - self.failed_requests.entry(request).or_insert(Vec::new()).push((who, Instant::now())); - self.pending_requests.push_front(request); - } else { - trace!(target: "sync", "No active {} request to {:?}", - self.request_type_name, - who, - ); - } - None - } - - /// Removes any pending extra requests for blocks lower than the given best finalized. - pub(crate) fn on_block_finalized( - &mut self, - best_finalized_hash: &B::Hash, - best_finalized_number: NumberFor, - is_descendent_of: F - ) -> Result<(), fork_tree::Error> - where F: Fn(&B::Hash, &B::Hash) -> Result - { - let request = (*best_finalized_hash, best_finalized_number); - - if self.try_finalize_root::<()>(request, Ok(request), false) { - return Ok(()) - } - - if best_finalized_number > self.best_seen_finalized_number { - // normally we'll receive finality notifications for every block => finalize would be enough - // but if many blocks are finalized at once, some notifications may be omitted - // => let's use finalize_with_ancestors here - match self.tree.finalize_with_ancestors( - best_finalized_hash, - best_finalized_number, - &is_descendent_of, - ) { - Err(fork_tree::Error::Revert) => { - // we might have finalized further already in which case we - // will get a `Revert` error which we can safely ignore. - }, - Err(err) => return Err(err), - Ok(_) => {}, - } - - self.best_seen_finalized_number = best_finalized_number; - } - - let roots = self.tree.roots().collect::>(); - - self.pending_requests.retain(|(h, n)| roots.contains(&(h, n, &()))); - self.active_requests.retain(|_, (h, n)| roots.contains(&(h, n, &()))); - self.failed_requests.retain(|(h, n), _| roots.contains(&(h, n, &()))); - - Ok(()) - } - - /// Try to finalize pending root. - /// - /// Returns true if import of this request has been scheduled. - pub(crate) fn try_finalize_root( - &mut self, - request: ExtraRequest, - result: Result, E>, - reschedule_on_failure: bool - ) -> bool - { - if !self.importing_requests.remove(&request) { - return false - } - - let (finalized_hash, finalized_number) = match result { - Ok(req) => (req.0, req.1), - Err(_) => { - if reschedule_on_failure { - self.pending_requests.push_front(request); - } - return true - } - }; - - if self.tree.finalize_root(&finalized_hash).is_none() { - warn!(target: "sync", "‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}", - finalized_hash, - finalized_number, - self.tree.roots().collect::>() - ); - return true - } - - self.failed_requests.clear(); - self.active_requests.clear(); - self.pending_requests.clear(); - self.pending_requests.extend(self.tree.roots().map(|(&h, &n, _)| (h, n))); - self.best_seen_finalized_number = finalized_number; - - true - } - - /// Returns an iterator over all active (in-flight) requests and associated peer id. - #[cfg(test)] - pub(crate) fn active_requests(&self) -> impl Iterator)> { - self.active_requests.iter() - } - - /// Returns an iterator over all scheduled pending requests. - #[cfg(test)] - pub(crate) fn pending_requests(&self) -> impl Iterator> { - self.pending_requests.iter() - } - - /// Get some key metrics. - pub(crate) fn metrics(&self) -> Metrics { - use std::convert::TryInto; - Metrics { - pending_requests: self.pending_requests.len().try_into().unwrap_or(std::u32::MAX), - active_requests: self.active_requests.len().try_into().unwrap_or(std::u32::MAX), - failed_requests: self.failed_requests.len().try_into().unwrap_or(std::u32::MAX), - importing_requests: self.importing_requests.len().try_into().unwrap_or(std::u32::MAX), - _priv: () - } - } + pub(crate) fn new(request_type_name: &'static str) -> Self { + ExtraRequests { + tree: ForkTree::new(), + best_seen_finalized_number: Zero::zero(), + pending_requests: VecDeque::new(), + active_requests: HashMap::new(), + failed_requests: HashMap::new(), + importing_requests: HashSet::new(), + request_type_name, + } + } + + /// Reset all state as if returned from `new`. + pub(crate) fn reset(&mut self) { + self.tree = ForkTree::new(); + self.pending_requests.clear(); + self.active_requests.clear(); + self.failed_requests.clear(); + } + + /// Returns an iterator-like struct that yields peers which extra + /// requests can be sent to. + pub(crate) fn matcher(&mut self) -> Matcher { + Matcher::new(self) + } + + /// Queue an extra data request to be considered by the `Matcher`. + pub(crate) fn schedule(&mut self, request: ExtraRequest, is_descendent_of: F) + where + F: Fn(&B::Hash, &B::Hash) -> Result, + { + match self + .tree + .import(request.0, request.1, (), &is_descendent_of) + { + Ok(true) => { + // this is a new root so we add it to the current `pending_requests` + self.pending_requests.push_back((request.0, request.1)); + } + Err(fork_tree::Error::Revert) => { + // we have finalized further than the given request, presumably + // by some other part of the system (not sync). we can safely + // ignore the `Revert` error. + return; + } + Err(err) => { + debug!(target: "sync", "Failed to insert request {:?} into tree: {:?}", request, err); + return; + } + _ => (), + } + } + + /// Retry any pending request if a peer disconnected. + pub(crate) fn peer_disconnected(&mut self, who: &PeerId) { + if let Some(request) = self.active_requests.remove(who) { + self.pending_requests.push_front(request); + } + } + + /// Processes the response for the request previously sent to the given peer. + pub(crate) fn on_response( + &mut self, + who: PeerId, + resp: Option, + ) -> Option<(PeerId, B::Hash, NumberFor, R)> { + // we assume that the request maps to the given response, this is + // currently enforced by the outer network protocol before passing on + // messages to chain sync. + if let Some(request) = self.active_requests.remove(&who) { + if let Some(r) = resp { + trace!(target: "sync", "Queuing import of {} from {:?} for {:?}", + self.request_type_name, + who, + request, + ); + + self.importing_requests.insert(request); + return Some((who, request.0, request.1, r)); + } else { + trace!(target: "sync", "Empty {} response from {:?} for {:?}", + self.request_type_name, + who, + request, + ); + } + self.failed_requests + .entry(request) + .or_insert(Vec::new()) + .push((who, Instant::now())); + self.pending_requests.push_front(request); + } else { + trace!(target: "sync", "No active {} request to {:?}", + self.request_type_name, + who, + ); + } + None + } + + /// Removes any pending extra requests for blocks lower than the given best finalized. + pub(crate) fn on_block_finalized( + &mut self, + best_finalized_hash: &B::Hash, + best_finalized_number: NumberFor, + is_descendent_of: F, + ) -> Result<(), fork_tree::Error> + where + F: Fn(&B::Hash, &B::Hash) -> Result, + { + let request = (*best_finalized_hash, best_finalized_number); + + if self.try_finalize_root::<()>(request, Ok(request), false) { + return Ok(()); + } + + if best_finalized_number > self.best_seen_finalized_number { + // normally we'll receive finality notifications for every block => finalize would be enough + // but if many blocks are finalized at once, some notifications may be omitted + // => let's use finalize_with_ancestors here + match self.tree.finalize_with_ancestors( + best_finalized_hash, + best_finalized_number, + &is_descendent_of, + ) { + Err(fork_tree::Error::Revert) => { + // we might have finalized further already in which case we + // will get a `Revert` error which we can safely ignore. + } + Err(err) => return Err(err), + Ok(_) => {} + } + + self.best_seen_finalized_number = best_finalized_number; + } + + let roots = self.tree.roots().collect::>(); + + self.pending_requests + .retain(|(h, n)| roots.contains(&(h, n, &()))); + self.active_requests + .retain(|_, (h, n)| roots.contains(&(h, n, &()))); + self.failed_requests + .retain(|(h, n), _| roots.contains(&(h, n, &()))); + + Ok(()) + } + + /// Try to finalize pending root. + /// + /// Returns true if import of this request has been scheduled. + pub(crate) fn try_finalize_root( + &mut self, + request: ExtraRequest, + result: Result, E>, + reschedule_on_failure: bool, + ) -> bool { + if !self.importing_requests.remove(&request) { + return false; + } + + let (finalized_hash, finalized_number) = match result { + Ok(req) => (req.0, req.1), + Err(_) => { + if reschedule_on_failure { + self.pending_requests.push_front(request); + } + return true; + } + }; + + if self.tree.finalize_root(&finalized_hash).is_none() { + warn!(target: "sync", "‼️ Imported {:?} {:?} which isn't a root in the tree: {:?}", + finalized_hash, + finalized_number, + self.tree.roots().collect::>() + ); + return true; + } + + self.failed_requests.clear(); + self.active_requests.clear(); + self.pending_requests.clear(); + self.pending_requests + .extend(self.tree.roots().map(|(&h, &n, _)| (h, n))); + self.best_seen_finalized_number = finalized_number; + + true + } + + /// Returns an iterator over all active (in-flight) requests and associated peer id. + #[cfg(test)] + pub(crate) fn active_requests(&self) -> impl Iterator)> { + self.active_requests.iter() + } + + /// Returns an iterator over all scheduled pending requests. + #[cfg(test)] + pub(crate) fn pending_requests(&self) -> impl Iterator> { + self.pending_requests.iter() + } + + /// Get some key metrics. + pub(crate) fn metrics(&self) -> Metrics { + use std::convert::TryInto; + Metrics { + pending_requests: self + .pending_requests + .len() + .try_into() + .unwrap_or(std::u32::MAX), + active_requests: self + .active_requests + .len() + .try_into() + .unwrap_or(std::u32::MAX), + failed_requests: self + .failed_requests + .len() + .try_into() + .unwrap_or(std::u32::MAX), + importing_requests: self + .importing_requests + .len() + .try_into() + .unwrap_or(std::u32::MAX), + _priv: (), + } + } } /// Matches peers with pending extra requests. #[derive(Debug)] pub(crate) struct Matcher<'a, B: BlockT> { - /// Length of pending requests collection. - /// Used to ensure we do not loop more than once over all pending requests. - remaining: usize, - extras: &'a mut ExtraRequests + /// Length of pending requests collection. + /// Used to ensure we do not loop more than once over all pending requests. + remaining: usize, + extras: &'a mut ExtraRequests, } impl<'a, B: BlockT> Matcher<'a, B> { - fn new(extras: &'a mut ExtraRequests) -> Self { - Matcher { - remaining: extras.pending_requests.len(), - extras - } - } - - /// Finds a peer to which a pending request can be sent. - /// - /// Peers are filtered according to the current known best block (i.e. we won't - /// send an extra request for block #10 to a peer at block #2), and we also - /// throttle requests to the same peer if a previous request yielded no results. - /// - /// This method returns as soon as it finds a peer that should be able to answer - /// our request. If no request is pending or no peer can handle it, `None` is - /// returned instead. - /// - /// # Note - /// - /// The returned `PeerId` (if any) is guaranteed to come from the given `peers` - /// argument. - pub(crate) fn next(&mut self, peers: &HashMap>) -> Option<(PeerId, ExtraRequest)> { - if self.remaining == 0 { - return None - } - - // clean up previously failed requests so we can retry again - for requests in self.extras.failed_requests.values_mut() { - requests.retain(|(_, instant)| instant.elapsed() < EXTRA_RETRY_WAIT); - } - - while let Some(request) = self.extras.pending_requests.pop_front() { - for (peer, sync) in peers.iter().filter(|(_, sync)| sync.state == PeerSyncState::Available) { - // only ask peers that have synced at least up to the block number that we're asking the extra for - if sync.best_number < request.1 { - continue - } - // don't request to any peers that already have pending requests - if self.extras.active_requests.contains_key(peer) { - continue - } - // only ask if the same request has not failed for this peer before - if self.extras.failed_requests.get(&request).map(|rr| rr.iter().any(|i| &i.0 == peer)).unwrap_or(false) { - continue - } - self.extras.active_requests.insert(peer.clone(), request); - - trace!(target: "sync", "Sending {} request to {:?} for {:?}", - self.extras.request_type_name, - peer, - request, - ); - - return Some((peer.clone(), request)) - } - - self.extras.pending_requests.push_back(request); - self.remaining -= 1; - - if self.remaining == 0 { - break - } - } - - None - } + fn new(extras: &'a mut ExtraRequests) -> Self { + Matcher { + remaining: extras.pending_requests.len(), + extras, + } + } + + /// Finds a peer to which a pending request can be sent. + /// + /// Peers are filtered according to the current known best block (i.e. we won't + /// send an extra request for block #10 to a peer at block #2), and we also + /// throttle requests to the same peer if a previous request yielded no results. + /// + /// This method returns as soon as it finds a peer that should be able to answer + /// our request. If no request is pending or no peer can handle it, `None` is + /// returned instead. + /// + /// # Note + /// + /// The returned `PeerId` (if any) is guaranteed to come from the given `peers` + /// argument. + pub(crate) fn next( + &mut self, + peers: &HashMap>, + ) -> Option<(PeerId, ExtraRequest)> { + if self.remaining == 0 { + return None; + } + + // clean up previously failed requests so we can retry again + for requests in self.extras.failed_requests.values_mut() { + requests.retain(|(_, instant)| instant.elapsed() < EXTRA_RETRY_WAIT); + } + + while let Some(request) = self.extras.pending_requests.pop_front() { + for (peer, sync) in peers + .iter() + .filter(|(_, sync)| sync.state == PeerSyncState::Available) + { + // only ask peers that have synced at least up to the block number that we're asking the extra for + if sync.best_number < request.1 { + continue; + } + // don't request to any peers that already have pending requests + if self.extras.active_requests.contains_key(peer) { + continue; + } + // only ask if the same request has not failed for this peer before + if self + .extras + .failed_requests + .get(&request) + .map(|rr| rr.iter().any(|i| &i.0 == peer)) + .unwrap_or(false) + { + continue; + } + self.extras.active_requests.insert(peer.clone(), request); + + trace!(target: "sync", "Sending {} request to {:?} for {:?}", + self.extras.request_type_name, + peer, + request, + ); + + return Some((peer.clone(), request)); + } + + self.extras.pending_requests.push_back(request); + self.remaining -= 1; + + if self.remaining == 0 { + break; + } + } + + None + } } #[cfg(test)] mod tests { - use crate::protocol::sync::PeerSync; - use sp_blockchain::Error as ClientError; - use quickcheck::{Arbitrary, Gen, QuickCheck, StdThreadGen}; - use rand::Rng; - use std::collections::{HashMap, HashSet}; - use super::*; - use sp_test_primitives::{Block, BlockNumber, Hash}; - - #[test] - fn requests_are_processed_in_order() { - fn property(mut peers: ArbitraryPeers) { - let mut requests = ExtraRequests::::new("test"); - - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); - - for i in 0 .. num_peers_available { - requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) - } - - let pending = requests.pending_requests.clone(); - let mut m = requests.matcher(); - - for p in &pending { - let (peer, r) = m.next(&peers.0).unwrap(); - assert_eq!(p, &r); - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); - } - } - - QuickCheck::with_gen(StdThreadGen::new(19)) - .quickcheck(property as fn(ArbitraryPeers)) - } - - #[test] - fn new_roots_schedule_new_request() { - fn property(data: Vec) { - let mut requests = ExtraRequests::::new("test"); - for (i, number) in data.into_iter().enumerate() { - let hash = [i as u8; 32].into(); - let pending = requests.pending_requests.len(); - let is_root = requests.tree.roots().any(|(&h, &n, _)| hash == h && number == n); - requests.schedule((hash, number), |a, b| Ok(a[0] >= b[0])); - if !is_root { - assert_eq!(1 + pending, requests.pending_requests.len()) - } - } - } - QuickCheck::new().quickcheck(property as fn(Vec)) - } - - #[test] - fn disconnecting_implies_rescheduling() { - fn property(mut peers: ArbitraryPeers) -> bool { - let mut requests = ExtraRequests::::new("test"); - - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); - - for i in 0 .. num_peers_available { - requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) - } - - let mut m = requests.matcher(); - while let Some((peer, r)) = m.next(&peers.0) { - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); - } - - assert!(requests.pending_requests.is_empty()); - - let active_peers = requests.active_requests.keys().cloned().collect::>(); - let previously_active = requests.active_requests.values().cloned().collect::>(); - - for peer in &active_peers { - requests.peer_disconnected(peer) - } - - assert!(requests.active_requests.is_empty()); - - previously_active == requests.pending_requests.iter().cloned().collect::>() - } - - QuickCheck::with_gen(StdThreadGen::new(19)) - .quickcheck(property as fn(ArbitraryPeers) -> bool) - } - - #[test] - fn no_response_reschedules() { - fn property(mut peers: ArbitraryPeers) { - let mut requests = ExtraRequests::::new("test"); - - let num_peers_available = peers.0.values() - .filter(|s| s.state == PeerSyncState::Available).count(); - - for i in 0 .. num_peers_available { - requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) - } - - let mut m = requests.matcher(); - while let Some((peer, r)) = m.next(&peers.0) { - peers.0.get_mut(&peer).unwrap().state = PeerSyncState::DownloadingJustification(r.0); - } - - let active = requests.active_requests.iter().map(|(p, &r)| (p.clone(), r)).collect::>(); - - for (peer, req) in &active { - assert!(requests.failed_requests.get(req).is_none()); - assert!(!requests.pending_requests.contains(req)); - assert!(requests.on_response::<()>(peer.clone(), None).is_none()); - assert!(requests.pending_requests.contains(req)); - assert_eq!(1, requests.failed_requests.get(req).unwrap().iter().filter(|(p, _)| p == peer).count()) - } - } - - QuickCheck::with_gen(StdThreadGen::new(19)) - .quickcheck(property as fn(ArbitraryPeers)) - } - - #[test] - fn request_is_rescheduled_when_earlier_block_is_finalized() { - let _ = ::env_logger::try_init(); - - let mut finality_proofs = ExtraRequests::::new("test"); - - let hash4 = [4; 32].into(); - let hash5 = [5; 32].into(); - let hash6 = [6; 32].into(); - let hash7 = [7; 32].into(); - - fn is_descendent_of(base: &Hash, target: &Hash) -> Result { - Ok(target[0] >= base[0]) - } - - // make #4 last finalized block - finality_proofs.tree.import(hash4, 4, (), &is_descendent_of).unwrap(); - finality_proofs.tree.finalize_root(&hash4); - - // schedule request for #6 - finality_proofs.schedule((hash6, 6), is_descendent_of); - - // receive finality proof for #5 - finality_proofs.importing_requests.insert((hash6, 6)); - finality_proofs.on_block_finalized(&hash5, 5, is_descendent_of).unwrap(); - finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash5, 5)), true); - - // ensure that request for #6 is still pending - assert_eq!(finality_proofs.pending_requests.iter().collect::>(), vec![&(hash6, 6)]); - - // receive finality proof for #7 - finality_proofs.importing_requests.insert((hash6, 6)); - finality_proofs.on_block_finalized(&hash6, 6, is_descendent_of).unwrap(); - finality_proofs.on_block_finalized(&hash7, 7, is_descendent_of).unwrap(); - finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash7, 7)), true); - - // ensure that there's no request for #6 - assert_eq!(finality_proofs.pending_requests.iter().collect::>(), Vec::<&(Hash, u64)>::new()); - } - - #[test] - fn ancestor_roots_are_finalized_when_finality_notification_is_missed() { - let mut finality_proofs = ExtraRequests::::new("test"); - - let hash4 = [4; 32].into(); - let hash5 = [5; 32].into(); - - fn is_descendent_of(base: &Hash, target: &Hash) -> Result { - Ok(target[0] >= base[0]) - } - - // schedule request for #4 - finality_proofs.schedule((hash4, 4), is_descendent_of); - - // receive finality notification for #5 (missing notification for #4!!!) - finality_proofs.importing_requests.insert((hash4, 5)); - finality_proofs.on_block_finalized(&hash5, 5, is_descendent_of).unwrap(); - assert_eq!(finality_proofs.tree.roots().count(), 0); - } - - // Some Arbitrary instances to allow easy construction of random peer sets: - - #[derive(Debug, Clone)] - struct ArbitraryPeerSyncState(PeerSyncState); - - impl Arbitrary for ArbitraryPeerSyncState { - fn arbitrary(g: &mut G) -> Self { - let s = match g.gen::() % 5 { - 0 => PeerSyncState::Available, - // TODO: 1 => PeerSyncState::AncestorSearch(g.gen(), AncestorSearchState), - 1 => PeerSyncState::DownloadingNew(g.gen::()), - 2 => PeerSyncState::DownloadingStale(Hash::random()), - 3 => PeerSyncState::DownloadingJustification(Hash::random()), - _ => PeerSyncState::DownloadingFinalityProof(Hash::random()) - }; - ArbitraryPeerSyncState(s) - } - } - - #[derive(Debug, Clone)] - struct ArbitraryPeerSync(PeerSync); - - impl Arbitrary for ArbitraryPeerSync { - fn arbitrary(g: &mut G) -> Self { - let ps = PeerSync { - common_number: g.gen(), - best_hash: Hash::random(), - best_number: g.gen(), - state: ArbitraryPeerSyncState::arbitrary(g).0, - recently_announced: Default::default() - }; - ArbitraryPeerSync(ps) - } - } - - #[derive(Debug, Clone)] - struct ArbitraryPeers(HashMap>); - - impl Arbitrary for ArbitraryPeers { - fn arbitrary(g: &mut G) -> Self { - let mut peers = HashMap::with_capacity(g.size()); - for _ in 0 .. g.size() { - peers.insert(PeerId::random(), ArbitraryPeerSync::arbitrary(g).0); - } - ArbitraryPeers(peers) - } - } - + use super::*; + use crate::protocol::sync::PeerSync; + use quickcheck::{Arbitrary, Gen, QuickCheck, StdThreadGen}; + use rand::Rng; + use sp_blockchain::Error as ClientError; + use sp_test_primitives::{Block, BlockNumber, Hash}; + use std::collections::{HashMap, HashSet}; + + #[test] + fn requests_are_processed_in_order() { + fn property(mut peers: ArbitraryPeers) { + let mut requests = ExtraRequests::::new("test"); + + let num_peers_available = peers + .0 + .values() + .filter(|s| s.state == PeerSyncState::Available) + .count(); + + for i in 0..num_peers_available { + requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) + } + + let pending = requests.pending_requests.clone(); + let mut m = requests.matcher(); + + for p in &pending { + let (peer, r) = m.next(&peers.0).unwrap(); + assert_eq!(p, &r); + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); + } + } + + QuickCheck::with_gen(StdThreadGen::new(19)).quickcheck(property as fn(ArbitraryPeers)) + } + + #[test] + fn new_roots_schedule_new_request() { + fn property(data: Vec) { + let mut requests = ExtraRequests::::new("test"); + for (i, number) in data.into_iter().enumerate() { + let hash = [i as u8; 32].into(); + let pending = requests.pending_requests.len(); + let is_root = requests + .tree + .roots() + .any(|(&h, &n, _)| hash == h && number == n); + requests.schedule((hash, number), |a, b| Ok(a[0] >= b[0])); + if !is_root { + assert_eq!(1 + pending, requests.pending_requests.len()) + } + } + } + QuickCheck::new().quickcheck(property as fn(Vec)) + } + + #[test] + fn disconnecting_implies_rescheduling() { + fn property(mut peers: ArbitraryPeers) -> bool { + let mut requests = ExtraRequests::::new("test"); + + let num_peers_available = peers + .0 + .values() + .filter(|s| s.state == PeerSyncState::Available) + .count(); + + for i in 0..num_peers_available { + requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) + } + + let mut m = requests.matcher(); + while let Some((peer, r)) = m.next(&peers.0) { + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); + } + + assert!(requests.pending_requests.is_empty()); + + let active_peers = requests.active_requests.keys().cloned().collect::>(); + let previously_active = requests + .active_requests + .values() + .cloned() + .collect::>(); + + for peer in &active_peers { + requests.peer_disconnected(peer) + } + + assert!(requests.active_requests.is_empty()); + + previously_active + == requests + .pending_requests + .iter() + .cloned() + .collect::>() + } + + QuickCheck::with_gen(StdThreadGen::new(19)) + .quickcheck(property as fn(ArbitraryPeers) -> bool) + } + + #[test] + fn no_response_reschedules() { + fn property(mut peers: ArbitraryPeers) { + let mut requests = ExtraRequests::::new("test"); + + let num_peers_available = peers + .0 + .values() + .filter(|s| s.state == PeerSyncState::Available) + .count(); + + for i in 0..num_peers_available { + requests.schedule((Hash::random(), i as u64), |a, b| Ok(a[0] >= b[0])) + } + + let mut m = requests.matcher(); + while let Some((peer, r)) = m.next(&peers.0) { + peers.0.get_mut(&peer).unwrap().state = + PeerSyncState::DownloadingJustification(r.0); + } + + let active = requests + .active_requests + .iter() + .map(|(p, &r)| (p.clone(), r)) + .collect::>(); + + for (peer, req) in &active { + assert!(requests.failed_requests.get(req).is_none()); + assert!(!requests.pending_requests.contains(req)); + assert!(requests.on_response::<()>(peer.clone(), None).is_none()); + assert!(requests.pending_requests.contains(req)); + assert_eq!( + 1, + requests + .failed_requests + .get(req) + .unwrap() + .iter() + .filter(|(p, _)| p == peer) + .count() + ) + } + } + + QuickCheck::with_gen(StdThreadGen::new(19)).quickcheck(property as fn(ArbitraryPeers)) + } + + #[test] + fn request_is_rescheduled_when_earlier_block_is_finalized() { + let _ = ::env_logger::try_init(); + + let mut finality_proofs = ExtraRequests::::new("test"); + + let hash4 = [4; 32].into(); + let hash5 = [5; 32].into(); + let hash6 = [6; 32].into(); + let hash7 = [7; 32].into(); + + fn is_descendent_of(base: &Hash, target: &Hash) -> Result { + Ok(target[0] >= base[0]) + } + + // make #4 last finalized block + finality_proofs + .tree + .import(hash4, 4, (), &is_descendent_of) + .unwrap(); + finality_proofs.tree.finalize_root(&hash4); + + // schedule request for #6 + finality_proofs.schedule((hash6, 6), is_descendent_of); + + // receive finality proof for #5 + finality_proofs.importing_requests.insert((hash6, 6)); + finality_proofs + .on_block_finalized(&hash5, 5, is_descendent_of) + .unwrap(); + finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash5, 5)), true); + + // ensure that request for #6 is still pending + assert_eq!( + finality_proofs.pending_requests.iter().collect::>(), + vec![&(hash6, 6)] + ); + + // receive finality proof for #7 + finality_proofs.importing_requests.insert((hash6, 6)); + finality_proofs + .on_block_finalized(&hash6, 6, is_descendent_of) + .unwrap(); + finality_proofs + .on_block_finalized(&hash7, 7, is_descendent_of) + .unwrap(); + finality_proofs.try_finalize_root::<()>((hash6, 6), Ok((hash7, 7)), true); + + // ensure that there's no request for #6 + assert_eq!( + finality_proofs.pending_requests.iter().collect::>(), + Vec::<&(Hash, u64)>::new() + ); + } + + #[test] + fn ancestor_roots_are_finalized_when_finality_notification_is_missed() { + let mut finality_proofs = ExtraRequests::::new("test"); + + let hash4 = [4; 32].into(); + let hash5 = [5; 32].into(); + + fn is_descendent_of(base: &Hash, target: &Hash) -> Result { + Ok(target[0] >= base[0]) + } + + // schedule request for #4 + finality_proofs.schedule((hash4, 4), is_descendent_of); + + // receive finality notification for #5 (missing notification for #4!!!) + finality_proofs.importing_requests.insert((hash4, 5)); + finality_proofs + .on_block_finalized(&hash5, 5, is_descendent_of) + .unwrap(); + assert_eq!(finality_proofs.tree.roots().count(), 0); + } + + // Some Arbitrary instances to allow easy construction of random peer sets: + + #[derive(Debug, Clone)] + struct ArbitraryPeerSyncState(PeerSyncState); + + impl Arbitrary for ArbitraryPeerSyncState { + fn arbitrary(g: &mut G) -> Self { + let s = match g.gen::() % 5 { + 0 => PeerSyncState::Available, + // TODO: 1 => PeerSyncState::AncestorSearch(g.gen(), AncestorSearchState), + 1 => PeerSyncState::DownloadingNew(g.gen::()), + 2 => PeerSyncState::DownloadingStale(Hash::random()), + 3 => PeerSyncState::DownloadingJustification(Hash::random()), + _ => PeerSyncState::DownloadingFinalityProof(Hash::random()), + }; + ArbitraryPeerSyncState(s) + } + } + + #[derive(Debug, Clone)] + struct ArbitraryPeerSync(PeerSync); + + impl Arbitrary for ArbitraryPeerSync { + fn arbitrary(g: &mut G) -> Self { + let ps = PeerSync { + common_number: g.gen(), + best_hash: Hash::random(), + best_number: g.gen(), + state: ArbitraryPeerSyncState::arbitrary(g).0, + recently_announced: Default::default(), + }; + ArbitraryPeerSync(ps) + } + } + + #[derive(Debug, Clone)] + struct ArbitraryPeers(HashMap>); + + impl Arbitrary for ArbitraryPeers { + fn arbitrary(g: &mut G) -> Self { + let mut peers = HashMap::with_capacity(g.size()); + for _ in 0..g.size() { + peers.insert(PeerId::random(), ArbitraryPeerSync::arbitrary(g).0); + } + ArbitraryPeers(peers) + } + } } diff --git a/client/network/src/protocol/util.rs b/client/network/src/protocol/util.rs index 9ba9bf6ae8..4f0ee5e382 100644 --- a/client/network/src/protocol/util.rs +++ b/client/network/src/protocol/util.rs @@ -22,55 +22,58 @@ use std::{hash::Hash, num::NonZeroUsize}; /// In the limit, for each element inserted the oldest existing element will be removed. #[derive(Debug, Clone)] pub(crate) struct LruHashSet { - set: LinkedHashSet, - limit: NonZeroUsize + set: LinkedHashSet, + limit: NonZeroUsize, } impl LruHashSet { - /// Create a new `LruHashSet` with the given (exclusive) limit. - pub(crate) fn new(limit: NonZeroUsize) -> Self { - Self { set: LinkedHashSet::new(), limit } - } + /// Create a new `LruHashSet` with the given (exclusive) limit. + pub(crate) fn new(limit: NonZeroUsize) -> Self { + Self { + set: LinkedHashSet::new(), + limit, + } + } - /// Insert element into the set. - /// - /// Returns `true` if this is a new element to the set, `false` otherwise. - /// Maintains the limit of the set by removing the oldest entry if necessary. - /// Inserting the same element will update its LRU position. - pub(crate) fn insert(&mut self, e: T) -> bool { - if self.set.insert(e) { - if self.set.len() == usize::from(self.limit) { - self.set.pop_front(); // remove oldest entry - } - return true - } - false - } + /// Insert element into the set. + /// + /// Returns `true` if this is a new element to the set, `false` otherwise. + /// Maintains the limit of the set by removing the oldest entry if necessary. + /// Inserting the same element will update its LRU position. + pub(crate) fn insert(&mut self, e: T) -> bool { + if self.set.insert(e) { + if self.set.len() == usize::from(self.limit) { + self.set.pop_front(); // remove oldest entry + } + return true; + } + false + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn maintains_limit() { - let three = NonZeroUsize::new(3).unwrap(); - let mut set = LruHashSet::::new(three); + #[test] + fn maintains_limit() { + let three = NonZeroUsize::new(3).unwrap(); + let mut set = LruHashSet::::new(three); - // First element. - assert!(set.insert(1)); - assert_eq!(vec![&1], set.set.iter().collect::>()); + // First element. + assert!(set.insert(1)); + assert_eq!(vec![&1], set.set.iter().collect::>()); - // Second element. - assert!(set.insert(2)); - assert_eq!(vec![&1, &2], set.set.iter().collect::>()); + // Second element. + assert!(set.insert(2)); + assert_eq!(vec![&1, &2], set.set.iter().collect::>()); - // Inserting the same element updates its LRU position. - assert!(!set.insert(1)); - assert_eq!(vec![&2, &1], set.set.iter().collect::>()); + // Inserting the same element updates its LRU position. + assert!(!set.insert(1)); + assert_eq!(vec![&2, &1], set.set.iter().collect::>()); - // We reached the limit. The next element forces the oldest one out. - assert!(set.insert(3)); - assert_eq!(vec![&1, &3], set.set.iter().collect::>()); - } + // We reached the limit. The next element forces the oldest one out. + assert!(set.insert(3)); + assert_eq!(vec![&1, &3], set.set.iter().collect::>()); + } } diff --git a/client/network/src/service.rs b/client/network/src/service.rs index 091c75d635..dfeea11552 100644 --- a/client/network/src/service.rs +++ b/client/network/src/service.rs @@ -26,47 +26,57 @@ //! which is then processed by [`NetworkWorker::poll`]. use crate::{ - behaviour::{Behaviour, BehaviourOut}, - config::{parse_addr, parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, - discovery::DiscoveryConfig, - error::Error, - network_state::{ - NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, - }, - on_demand_layer::AlwaysBadChecker, - protocol::{self, event::Event, light_client_handler, LegacyConnectionKillError, sync::SyncState, PeerInfo, Protocol}, - transport, ReputationChange, + behaviour::{Behaviour, BehaviourOut}, + config::{parse_addr, parse_str_addr, NonReservedPeerMode, Params, Role, TransportConfig}, + discovery::DiscoveryConfig, + error::Error, + network_state::{ + NetworkState, NotConnectedPeer as NetworkStateNotConnectedPeer, Peer as NetworkStatePeer, + }, + on_demand_layer::AlwaysBadChecker, + protocol::{ + self, event::Event, light_client_handler, sync::SyncState, LegacyConnectionKillError, + PeerInfo, Protocol, + }, + transport, ReputationChange, }; use futures::prelude::*; -use libp2p::{PeerId, Multiaddr}; -use libp2p::core::{ConnectedPoint, Executor, connection::{ConnectionError, PendingConnectionError}, either::EitherError}; +use libp2p::core::{ + connection::{ConnectionError, PendingConnectionError}, + either::EitherError, + ConnectedPoint, Executor, +}; use libp2p::kad::record; use libp2p::ping::handler::PingFailure; -use libp2p::swarm::{NetworkBehaviour, SwarmBuilder, SwarmEvent, protocols_handler::NodeHandlerWrapperError}; +use libp2p::swarm::{ + protocols_handler::NodeHandlerWrapperError, NetworkBehaviour, SwarmBuilder, SwarmEvent, +}; +use libp2p::{Multiaddr, PeerId}; use log::{error, info, trace, warn}; use parking_lot::Mutex; use prometheus_endpoint::{ - register, Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, HistogramVec, Opts, PrometheusError, Registry, U64, + register, Counter, CounterVec, Gauge, GaugeVec, HistogramOpts, HistogramVec, Opts, + PrometheusError, Registry, U64, }; use sc_peerset::PeersetHandle; use sp_consensus::import_queue::{BlockImportError, BlockImportResult, ImportQueue, Link}; use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, - ConsensusEngineId, + traits::{Block as BlockT, NumberFor}, + ConsensusEngineId, }; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{ - borrow::Cow, - collections::{HashMap, HashSet}, - fs, io, - marker::PhantomData, - pin::Pin, - str, - sync::{ - atomic::{AtomicBool, AtomicUsize, Ordering}, - Arc, - }, - task::Poll, + borrow::Cow, + collections::{HashMap, HashSet}, + fs, io, + marker::PhantomData, + pin::Pin, + str, + sync::{ + atomic::{AtomicBool, AtomicUsize, Ordering}, + Arc, + }, + task::Poll, }; mod out_events; @@ -81,25 +91,25 @@ impl ExHashT for T where T: std::hash::Hash + Eq + std::fmt::Debug + Clone + /// Transaction pool interface pub trait TransactionPool: Send + Sync { - /// Get transactions from the pool that are ready to be propagated. - fn transactions(&self) -> Vec<(H, B::Extrinsic)>; - /// Get hash of transaction. - fn hash_of(&self, transaction: &B::Extrinsic) -> H; - /// Import a transaction into the pool. - /// - /// Peer reputation is changed by reputation_change if transaction is accepted by the pool. - fn import( - &self, - report_handle: ReportHandle, - who: PeerId, - reputation_change_good: ReputationChange, - reputation_change_bad: ReputationChange, - transaction: B::Extrinsic, - ); - /// Notify the pool about transactions broadcast. - fn on_broadcasted(&self, propagations: HashMap>); - /// Get transaction by hash. - fn transaction(&self, hash: &H) -> Option; + /// Get transactions from the pool that are ready to be propagated. + fn transactions(&self) -> Vec<(H, B::Extrinsic)>; + /// Get hash of transaction. + fn hash_of(&self, transaction: &B::Extrinsic) -> H; + /// Import a transaction into the pool. + /// + /// Peer reputation is changed by reputation_change if transaction is accepted by the pool. + fn import( + &self, + report_handle: ReportHandle, + who: PeerId, + reputation_change_good: ReputationChange, + reputation_change_bad: ReputationChange, + transaction: B::Extrinsic, + ); + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap>); + /// Get transaction by hash. + fn transaction(&self, hash: &H) -> Option; } /// Dummy implementation of the [`TransactionPool`] trait for a transaction pool that is always @@ -111,398 +121,425 @@ pub trait TransactionPool: Send + Sync { pub struct EmptyTransactionPool; impl TransactionPool for EmptyTransactionPool { - fn transactions(&self) -> Vec<(H, B::Extrinsic)> { - Vec::new() - } - - fn hash_of(&self, _transaction: &B::Extrinsic) -> H { - Default::default() - } - - fn import( - &self, - _report_handle: ReportHandle, - _who: PeerId, - _rep_change_good: ReputationChange, - _rep_change_bad: ReputationChange, - _transaction: B::Extrinsic - ) {} - - fn on_broadcasted(&self, _: HashMap>) {} - - fn transaction(&self, _h: &H) -> Option { None } + fn transactions(&self) -> Vec<(H, B::Extrinsic)> { + Vec::new() + } + + fn hash_of(&self, _transaction: &B::Extrinsic) -> H { + Default::default() + } + + fn import( + &self, + _report_handle: ReportHandle, + _who: PeerId, + _rep_change_good: ReputationChange, + _rep_change_bad: ReputationChange, + _transaction: B::Extrinsic, + ) { + } + + fn on_broadcasted(&self, _: HashMap>) {} + + fn transaction(&self, _h: &H) -> Option { + None + } } /// A cloneable handle for reporting cost/benefits of peers. #[derive(Clone)] pub struct ReportHandle { - inner: PeersetHandle, // wraps it so we don't have to worry about breaking API. + inner: PeersetHandle, // wraps it so we don't have to worry about breaking API. } impl From for ReportHandle { - fn from(peerset_handle: PeersetHandle) -> Self { - ReportHandle { inner: peerset_handle } - } + fn from(peerset_handle: PeersetHandle) -> Self { + ReportHandle { + inner: peerset_handle, + } + } } impl ReportHandle { - /// Report a given peer as either beneficial (+) or costly (-) according to the - /// given scalar. - pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.inner.report_peer(who, cost_benefit); - } + /// Report a given peer as either beneficial (+) or costly (-) according to the + /// given scalar. + pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.inner.report_peer(who, cost_benefit); + } } /// Substrate network service. Handles network IO and manages connectivity. pub struct NetworkService { - /// Number of peers we're connected to. - num_connected: Arc, - /// The local external addresses. - external_addresses: Arc>>, - /// Are we actively catching up with the chain? - is_major_syncing: Arc, - /// Local copy of the `PeerId` of the local node. - local_peer_id: PeerId, - /// Bandwidth logging system. Can be queried to know the average bandwidth consumed. - bandwidth: Arc, - /// Peerset manager (PSM); manages the reputation of nodes and indicates the network which - /// nodes it should be connected to or not. - peerset: PeersetHandle, - /// Channel that sends messages to the actual worker. - to_worker: TracingUnboundedSender>, - /// Marker to pin the `H` generic. Serves no purpose except to not break backwards - /// compatibility. - _marker: PhantomData, + /// Number of peers we're connected to. + num_connected: Arc, + /// The local external addresses. + external_addresses: Arc>>, + /// Are we actively catching up with the chain? + is_major_syncing: Arc, + /// Local copy of the `PeerId` of the local node. + local_peer_id: PeerId, + /// Bandwidth logging system. Can be queried to know the average bandwidth consumed. + bandwidth: Arc, + /// Peerset manager (PSM); manages the reputation of nodes and indicates the network which + /// nodes it should be connected to or not. + peerset: PeersetHandle, + /// Channel that sends messages to the actual worker. + to_worker: TracingUnboundedSender>, + /// Marker to pin the `H` generic. Serves no purpose except to not break backwards + /// compatibility. + _marker: PhantomData, } impl NetworkWorker { - /// Creates the network service. - /// - /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order - /// for the network processing to advance. From it, you can extract a `NetworkService` using - /// `worker.service()`. The `NetworkService` can be shared through the codebase. - pub fn new(params: Params) -> Result, Error> { - let (to_worker, from_worker) = tracing_unbounded("mpsc_network_worker"); - - if let Some(path) = params.network_config.net_config_path { - fs::create_dir_all(&path)?; - } - - // List of multiaddresses that we know in the network. - let mut known_addresses = Vec::new(); - let mut bootnodes = Vec::new(); - let mut boot_node_ids = HashSet::new(); - - // Process the bootnodes. - for bootnode in params.network_config.boot_nodes.iter() { - bootnodes.push(bootnode.peer_id.clone()); - boot_node_ids.insert(bootnode.peer_id.clone()); - known_addresses.push((bootnode.peer_id.clone(), bootnode.multiaddr.clone())); - } - - let boot_node_ids = Arc::new(boot_node_ids); - - // Check for duplicate bootnodes. - known_addresses.iter() - .try_for_each(|(peer_id, addr)| - if let Some(other) = known_addresses - .iter() - .find(|o| o.1 == *addr && o.0 != *peer_id) - { - Err(Error::DuplicateBootnode { - address: addr.clone(), - first_id: peer_id.clone(), - second_id: other.0.clone(), - }) - } else { - Ok(()) - } - )?; - - // Initialize the peers we should always be connected to. - let priority_groups = { - let mut reserved_nodes = HashSet::new(); - for reserved in params.network_config.reserved_nodes.iter() { - reserved_nodes.insert(reserved.peer_id.clone()); - known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); - } - - let mut sentries_and_validators = HashSet::new(); - match ¶ms.role { - Role::Sentry { validators } => { - for validator in validators { - sentries_and_validators.insert(validator.peer_id.clone()); - known_addresses.push((validator.peer_id.clone(), validator.multiaddr.clone())); - } - } - Role::Authority { sentry_nodes } => { - for sentry_node in sentry_nodes { - sentries_and_validators.insert(sentry_node.peer_id.clone()); - known_addresses.push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone())); - } - } - _ => {} - } - - vec![ - ("reserved".to_owned(), reserved_nodes), - ("sentries_and_validators".to_owned(), sentries_and_validators), - ] - }; - - let peerset_config = sc_peerset::PeersetConfig { - in_peers: params.network_config.in_peers, - out_peers: params.network_config.out_peers, - bootnodes, - reserved_only: params.network_config.non_reserved_mode == NonReservedPeerMode::Deny, - priority_groups, - }; - - // Private and public keys configuration. - let local_identity = params.network_config.node_key.clone().into_keypair()?; - let local_public = local_identity.public(); - let local_peer_id = local_public.clone().into_peer_id(); - info!(target: "sub-libp2p", "🏷 Local node identity is: {}", local_peer_id.to_base58()); - - // Initialize the metrics. - let metrics = match ¶ms.metrics_registry { - Some(registry) => Some(Metrics::register(®istry)?), - None => None - }; - - let checker = params.on_demand.as_ref() - .map(|od| od.checker().clone()) - .unwrap_or(Arc::new(AlwaysBadChecker)); - - let num_connected = Arc::new(AtomicUsize::new(0)); - let is_major_syncing = Arc::new(AtomicBool::new(false)); - let (protocol, peerset_handle) = Protocol::new( - protocol::ProtocolConfig { - roles: From::from(¶ms.role), - max_parallel_downloads: params.network_config.max_parallel_downloads, - }, - params.chain.clone(), - params.transaction_pool, - params.finality_proof_provider.clone(), - params.finality_proof_request_builder, - params.protocol_id.clone(), - peerset_config, - params.block_announce_validator, - params.metrics_registry.as_ref(), - boot_node_ids.clone(), - metrics.as_ref().map(|m| m.notifications_queues_size.clone()), - )?; - - // Build the swarm. - let (mut swarm, bandwidth): (Swarm, _) = { - let user_agent = format!( - "{} ({})", - params.network_config.client_version, - params.network_config.node_name - ); - let block_requests = { - let config = protocol::block_requests::Config::new(¶ms.protocol_id); - protocol::BlockRequests::new(config, params.chain.clone()) - }; - let light_client_handler = { - let config = protocol::light_client_handler::Config::new(¶ms.protocol_id); - protocol::LightClientHandler::new( - config, - params.chain, - checker, - peerset_handle.clone(), - ) - }; - - let discovery_config = { - let mut config = DiscoveryConfig::new(local_public.clone()); - config.with_user_defined(known_addresses); - config.discovery_limit(u64::from(params.network_config.out_peers) + 15); - config.add_protocol(params.protocol_id.clone()); - - match params.network_config.transport { - TransportConfig::MemoryOnly => { - config.with_mdns(false); - config.allow_private_ipv4(false); - } - TransportConfig::Normal { enable_mdns, allow_private_ipv4, .. } => { - config.with_mdns(enable_mdns); - config.allow_private_ipv4(allow_private_ipv4); - } - } - - config - }; - - let mut behaviour = Behaviour::new( - protocol, - params.role, - user_agent, - local_public, - block_requests, - light_client_handler, - discovery_config - ); - - for (engine_id, protocol_name) in ¶ms.network_config.notifications_protocols { - behaviour.register_notifications_protocol(*engine_id, protocol_name.clone()); - } - let (transport, bandwidth) = { - let (config_mem, config_wasm, flowctrl) = match params.network_config.transport { - TransportConfig::MemoryOnly => (true, None, false), - TransportConfig::Normal { wasm_external_transport, use_yamux_flow_control, .. } => - (false, wasm_external_transport, use_yamux_flow_control) - }; - transport::build_transport(local_identity, config_mem, config_wasm, flowctrl) - }; - let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) - .peer_connection_limit(crate::MAX_CONNECTIONS_PER_PEER); - if let Some(spawner) = params.executor { - struct SpawnImpl(F); - impl + Send>>)> Executor for SpawnImpl { - fn exec(&self, f: Pin + Send>>) { - (self.0)(f) - } - } - builder = builder.executor(Box::new(SpawnImpl(spawner))); - } - (builder.build(), bandwidth) - }; - - // Listen on multiaddresses. - for addr in ¶ms.network_config.listen_addresses { - if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { - warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) - } - } - - // Add external addresses. - for addr in ¶ms.network_config.public_addresses { - Swarm::::add_external_address(&mut swarm, addr.clone()); - } - - let external_addresses = Arc::new(Mutex::new(Vec::new())); - - let service = Arc::new(NetworkService { - bandwidth, - external_addresses: external_addresses.clone(), - num_connected: num_connected.clone(), - is_major_syncing: is_major_syncing.clone(), - peerset: peerset_handle, - local_peer_id, - to_worker: to_worker.clone(), - _marker: PhantomData, - }); - - Ok(NetworkWorker { - external_addresses, - num_connected, - is_major_syncing, - network_service: swarm, - service, - import_queue: params.import_queue, - from_worker, - light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), - event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, - metrics, - boot_node_ids, - }) - } - - /// Returns the downloaded bytes per second averaged over the past few seconds. - pub fn average_download_per_sec(&self) -> u64 { - self.service.bandwidth.average_download_per_sec() - } - - /// Returns the uploaded bytes per second averaged over the past few seconds. - pub fn average_upload_per_sec(&self) -> u64 { - self.service.bandwidth.average_upload_per_sec() - } - - /// Returns the number of peers we're connected to. - pub fn num_connected_peers(&self) -> usize { - self.network_service.user_protocol().num_connected_peers() - } - - /// Returns the number of peers we're connected to and that are being queried. - pub fn num_active_peers(&self) -> usize { - self.network_service.user_protocol().num_active_peers() - } - - /// Current global sync state. - pub fn sync_state(&self) -> SyncState { - self.network_service.user_protocol().sync_state() - } - - /// Target sync block number. - pub fn best_seen_block(&self) -> Option> { - self.network_service.user_protocol().best_seen_block() - } - - /// Number of peers participating in syncing. - pub fn num_sync_peers(&self) -> u32 { - self.network_service.user_protocol().num_sync_peers() - } - - /// Number of blocks in the import queue. - pub fn num_queued_blocks(&self) -> u32 { - self.network_service.user_protocol().num_queued_blocks() - } - - /// Returns the number of processed blocks. - pub fn num_processed_blocks(&self) -> usize { - self.network_service.user_protocol().num_processed_blocks() - } - - /// Number of active sync requests. - pub fn num_sync_requests(&self) -> usize { - self.network_service.user_protocol().num_sync_requests() - } - - /// Adds an address for a node. - pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { - self.network_service.add_known_address(peer_id, addr); - } - - /// Return a `NetworkService` that can be shared through the code base and can be used to - /// manipulate the worker. - pub fn service(&self) -> &Arc> { - &self.service - } - - /// You must call this when a new block is imported by the client. - pub fn on_block_imported(&mut self, header: B::Header, is_best: bool) { - self.network_service.user_protocol_mut().on_block_imported(&header, is_best); - } - - /// You must call this when a new block is finalized by the client. - pub fn on_block_finalized(&mut self, hash: B::Hash, header: B::Header) { - self.network_service.user_protocol_mut().on_block_finalized(hash, &header); - } - - /// Returns the local `PeerId`. - pub fn local_peer_id(&self) -> &PeerId { - Swarm::::local_peer_id(&self.network_service) - } - - /// Returns the list of addresses we are listening on. - /// - /// Does **NOT** include a trailing `/p2p/` with our `PeerId`. - pub fn listen_addresses(&self) -> impl Iterator { - Swarm::::listeners(&self.network_service) - } - - /// Get network state. - /// - /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally - /// everywhere about this. Please don't use this function to retrieve actual information. - pub fn network_state(&mut self) -> NetworkState { - let swarm = &mut self.network_service; - let open = swarm.user_protocol().open_peers().cloned().collect::>(); - - let connected_peers = { - let swarm = &mut *swarm; - open.iter().filter_map(move |peer_id| { + /// Creates the network service. + /// + /// Returns a `NetworkWorker` that implements `Future` and must be regularly polled in order + /// for the network processing to advance. From it, you can extract a `NetworkService` using + /// `worker.service()`. The `NetworkService` can be shared through the codebase. + pub fn new(params: Params) -> Result, Error> { + let (to_worker, from_worker) = tracing_unbounded("mpsc_network_worker"); + + if let Some(path) = params.network_config.net_config_path { + fs::create_dir_all(&path)?; + } + + // List of multiaddresses that we know in the network. + let mut known_addresses = Vec::new(); + let mut bootnodes = Vec::new(); + let mut boot_node_ids = HashSet::new(); + + // Process the bootnodes. + for bootnode in params.network_config.boot_nodes.iter() { + bootnodes.push(bootnode.peer_id.clone()); + boot_node_ids.insert(bootnode.peer_id.clone()); + known_addresses.push((bootnode.peer_id.clone(), bootnode.multiaddr.clone())); + } + + let boot_node_ids = Arc::new(boot_node_ids); + + // Check for duplicate bootnodes. + known_addresses.iter().try_for_each(|(peer_id, addr)| { + if let Some(other) = known_addresses + .iter() + .find(|o| o.1 == *addr && o.0 != *peer_id) + { + Err(Error::DuplicateBootnode { + address: addr.clone(), + first_id: peer_id.clone(), + second_id: other.0.clone(), + }) + } else { + Ok(()) + } + })?; + + // Initialize the peers we should always be connected to. + let priority_groups = { + let mut reserved_nodes = HashSet::new(); + for reserved in params.network_config.reserved_nodes.iter() { + reserved_nodes.insert(reserved.peer_id.clone()); + known_addresses.push((reserved.peer_id.clone(), reserved.multiaddr.clone())); + } + + let mut sentries_and_validators = HashSet::new(); + match ¶ms.role { + Role::Sentry { validators } => { + for validator in validators { + sentries_and_validators.insert(validator.peer_id.clone()); + known_addresses + .push((validator.peer_id.clone(), validator.multiaddr.clone())); + } + } + Role::Authority { sentry_nodes } => { + for sentry_node in sentry_nodes { + sentries_and_validators.insert(sentry_node.peer_id.clone()); + known_addresses + .push((sentry_node.peer_id.clone(), sentry_node.multiaddr.clone())); + } + } + _ => {} + } + + vec![ + ("reserved".to_owned(), reserved_nodes), + ( + "sentries_and_validators".to_owned(), + sentries_and_validators, + ), + ] + }; + + let peerset_config = sc_peerset::PeersetConfig { + in_peers: params.network_config.in_peers, + out_peers: params.network_config.out_peers, + bootnodes, + reserved_only: params.network_config.non_reserved_mode == NonReservedPeerMode::Deny, + priority_groups, + }; + + // Private and public keys configuration. + let local_identity = params.network_config.node_key.clone().into_keypair()?; + let local_public = local_identity.public(); + let local_peer_id = local_public.clone().into_peer_id(); + info!(target: "sub-libp2p", "🏷 Local node identity is: {}", local_peer_id.to_base58()); + + // Initialize the metrics. + let metrics = match ¶ms.metrics_registry { + Some(registry) => Some(Metrics::register(®istry)?), + None => None, + }; + + let checker = params + .on_demand + .as_ref() + .map(|od| od.checker().clone()) + .unwrap_or(Arc::new(AlwaysBadChecker)); + + let num_connected = Arc::new(AtomicUsize::new(0)); + let is_major_syncing = Arc::new(AtomicBool::new(false)); + let (protocol, peerset_handle) = Protocol::new( + protocol::ProtocolConfig { + roles: From::from(¶ms.role), + max_parallel_downloads: params.network_config.max_parallel_downloads, + }, + params.chain.clone(), + params.transaction_pool, + params.finality_proof_provider.clone(), + params.finality_proof_request_builder, + params.protocol_id.clone(), + peerset_config, + params.block_announce_validator, + params.metrics_registry.as_ref(), + boot_node_ids.clone(), + metrics + .as_ref() + .map(|m| m.notifications_queues_size.clone()), + )?; + + // Build the swarm. + let (mut swarm, bandwidth): (Swarm, _) = { + let user_agent = format!( + "{} ({})", + params.network_config.client_version, params.network_config.node_name + ); + let block_requests = { + let config = protocol::block_requests::Config::new(¶ms.protocol_id); + protocol::BlockRequests::new(config, params.chain.clone()) + }; + let light_client_handler = { + let config = protocol::light_client_handler::Config::new(¶ms.protocol_id); + protocol::LightClientHandler::new( + config, + params.chain, + checker, + peerset_handle.clone(), + ) + }; + + let discovery_config = { + let mut config = DiscoveryConfig::new(local_public.clone()); + config.with_user_defined(known_addresses); + config.discovery_limit(u64::from(params.network_config.out_peers) + 15); + config.add_protocol(params.protocol_id.clone()); + + match params.network_config.transport { + TransportConfig::MemoryOnly => { + config.with_mdns(false); + config.allow_private_ipv4(false); + } + TransportConfig::Normal { + enable_mdns, + allow_private_ipv4, + .. + } => { + config.with_mdns(enable_mdns); + config.allow_private_ipv4(allow_private_ipv4); + } + } + + config + }; + + let mut behaviour = Behaviour::new( + protocol, + params.role, + user_agent, + local_public, + block_requests, + light_client_handler, + discovery_config, + ); + + for (engine_id, protocol_name) in ¶ms.network_config.notifications_protocols { + behaviour.register_notifications_protocol(*engine_id, protocol_name.clone()); + } + let (transport, bandwidth) = { + let (config_mem, config_wasm, flowctrl) = match params.network_config.transport { + TransportConfig::MemoryOnly => (true, None, false), + TransportConfig::Normal { + wasm_external_transport, + use_yamux_flow_control, + .. + } => (false, wasm_external_transport, use_yamux_flow_control), + }; + transport::build_transport(local_identity, config_mem, config_wasm, flowctrl) + }; + let mut builder = SwarmBuilder::new(transport, behaviour, local_peer_id.clone()) + .peer_connection_limit(crate::MAX_CONNECTIONS_PER_PEER); + if let Some(spawner) = params.executor { + struct SpawnImpl(F); + impl + Send>>)> Executor for SpawnImpl { + fn exec(&self, f: Pin + Send>>) { + (self.0)(f) + } + } + builder = builder.executor(Box::new(SpawnImpl(spawner))); + } + (builder.build(), bandwidth) + }; + + // Listen on multiaddresses. + for addr in ¶ms.network_config.listen_addresses { + if let Err(err) = Swarm::::listen_on(&mut swarm, addr.clone()) { + warn!(target: "sub-libp2p", "Can't listen on {} because: {:?}", addr, err) + } + } + + // Add external addresses. + for addr in ¶ms.network_config.public_addresses { + Swarm::::add_external_address(&mut swarm, addr.clone()); + } + + let external_addresses = Arc::new(Mutex::new(Vec::new())); + + let service = Arc::new(NetworkService { + bandwidth, + external_addresses: external_addresses.clone(), + num_connected: num_connected.clone(), + is_major_syncing: is_major_syncing.clone(), + peerset: peerset_handle, + local_peer_id, + to_worker: to_worker.clone(), + _marker: PhantomData, + }); + + Ok(NetworkWorker { + external_addresses, + num_connected, + is_major_syncing, + network_service: swarm, + service, + import_queue: params.import_queue, + from_worker, + light_client_rqs: params.on_demand.and_then(|od| od.extract_receiver()), + event_streams: out_events::OutChannels::new(params.metrics_registry.as_ref())?, + metrics, + boot_node_ids, + }) + } + + /// Returns the downloaded bytes per second averaged over the past few seconds. + pub fn average_download_per_sec(&self) -> u64 { + self.service.bandwidth.average_download_per_sec() + } + + /// Returns the uploaded bytes per second averaged over the past few seconds. + pub fn average_upload_per_sec(&self) -> u64 { + self.service.bandwidth.average_upload_per_sec() + } + + /// Returns the number of peers we're connected to. + pub fn num_connected_peers(&self) -> usize { + self.network_service.user_protocol().num_connected_peers() + } + + /// Returns the number of peers we're connected to and that are being queried. + pub fn num_active_peers(&self) -> usize { + self.network_service.user_protocol().num_active_peers() + } + + /// Current global sync state. + pub fn sync_state(&self) -> SyncState { + self.network_service.user_protocol().sync_state() + } + + /// Target sync block number. + pub fn best_seen_block(&self) -> Option> { + self.network_service.user_protocol().best_seen_block() + } + + /// Number of peers participating in syncing. + pub fn num_sync_peers(&self) -> u32 { + self.network_service.user_protocol().num_sync_peers() + } + + /// Number of blocks in the import queue. + pub fn num_queued_blocks(&self) -> u32 { + self.network_service.user_protocol().num_queued_blocks() + } + + /// Returns the number of processed blocks. + pub fn num_processed_blocks(&self) -> usize { + self.network_service.user_protocol().num_processed_blocks() + } + + /// Number of active sync requests. + pub fn num_sync_requests(&self) -> usize { + self.network_service.user_protocol().num_sync_requests() + } + + /// Adds an address for a node. + pub fn add_known_address(&mut self, peer_id: PeerId, addr: Multiaddr) { + self.network_service.add_known_address(peer_id, addr); + } + + /// Return a `NetworkService` that can be shared through the code base and can be used to + /// manipulate the worker. + pub fn service(&self) -> &Arc> { + &self.service + } + + /// You must call this when a new block is imported by the client. + pub fn on_block_imported(&mut self, header: B::Header, is_best: bool) { + self.network_service + .user_protocol_mut() + .on_block_imported(&header, is_best); + } + + /// You must call this when a new block is finalized by the client. + pub fn on_block_finalized(&mut self, hash: B::Hash, header: B::Header) { + self.network_service + .user_protocol_mut() + .on_block_finalized(hash, &header); + } + + /// Returns the local `PeerId`. + pub fn local_peer_id(&self) -> &PeerId { + Swarm::::local_peer_id(&self.network_service) + } + + /// Returns the list of addresses we are listening on. + /// + /// Does **NOT** include a trailing `/p2p/` with our `PeerId`. + pub fn listen_addresses(&self) -> impl Iterator { + Swarm::::listeners(&self.network_service) + } + + /// Get network state. + /// + /// **Note**: Use this only for debugging. This API is unstable. There are warnings literally + /// everywhere about this. Please don't use this function to retrieve actual information. + pub fn network_state(&mut self) -> NetworkState { + let swarm = &mut self.network_service; + let open = swarm + .user_protocol() + .open_peers() + .cloned() + .collect::>(); + + let connected_peers = { + let swarm = &mut *swarm; + open.iter().filter_map(move |peer_id| { let known_addresses = NetworkBehaviour::addresses_of_peer(&mut **swarm, peer_id) .into_iter().collect(); @@ -524,322 +561,363 @@ impl NetworkWorker { known_addresses, })) }).collect() - }; - - let not_connected_peers = { - let swarm = &mut *swarm; - let list = swarm.known_peers().filter(|p| open.iter().all(|n| n != *p)) - .cloned().collect::>(); - list.into_iter().map(move |peer_id| { - (peer_id.to_base58(), NetworkStateNotConnectedPeer { - version_string: swarm.node(&peer_id) - .and_then(|i| i.client_version().map(|s| s.to_owned())).clone(), - latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()), - known_addresses: NetworkBehaviour::addresses_of_peer(&mut **swarm, &peer_id) - .into_iter().collect(), - }) - }).collect() - }; - - NetworkState { - peer_id: Swarm::::local_peer_id(&swarm).to_base58(), - listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), - external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), - average_download_per_sec: self.service.bandwidth.average_download_per_sec(), - average_upload_per_sec: self.service.bandwidth.average_upload_per_sec(), - connected_peers, - not_connected_peers, - peerset: swarm.user_protocol_mut().peerset_debug_info(), - } - } - - /// Get currently connected peers. - pub fn peers_debug_info(&mut self) -> Vec<(PeerId, PeerInfo)> { - self.network_service.user_protocol_mut() - .peers_info() - .map(|(id, info)| (id.clone(), info.clone())) - .collect() - } - - /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_reserved_peer(&self, peer: PeerId) { - self.service.remove_reserved_peer(peer); - } - - /// Adds a `PeerId` and its address as reserved. The string should encode the address - /// and peer ID of the remote node. - pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { - self.service.add_reserved_peer(peer) - } + }; + + let not_connected_peers = { + let swarm = &mut *swarm; + let list = swarm + .known_peers() + .filter(|p| open.iter().all(|n| n != *p)) + .cloned() + .collect::>(); + list.into_iter() + .map(move |peer_id| { + ( + peer_id.to_base58(), + NetworkStateNotConnectedPeer { + version_string: swarm + .node(&peer_id) + .and_then(|i| i.client_version().map(|s| s.to_owned())) + .clone(), + latest_ping_time: swarm.node(&peer_id).and_then(|i| i.latest_ping()), + known_addresses: NetworkBehaviour::addresses_of_peer( + &mut **swarm, + &peer_id, + ) + .into_iter() + .collect(), + }, + ) + }) + .collect() + }; + + NetworkState { + peer_id: Swarm::::local_peer_id(&swarm).to_base58(), + listened_addresses: Swarm::::listeners(&swarm).cloned().collect(), + external_addresses: Swarm::::external_addresses(&swarm).cloned().collect(), + average_download_per_sec: self.service.bandwidth.average_download_per_sec(), + average_upload_per_sec: self.service.bandwidth.average_upload_per_sec(), + connected_peers, + not_connected_peers, + peerset: swarm.user_protocol_mut().peerset_debug_info(), + } + } + + /// Get currently connected peers. + pub fn peers_debug_info(&mut self) -> Vec<(PeerId, PeerInfo)> { + self.network_service + .user_protocol_mut() + .peers_info() + .map(|(id, info)| (id.clone(), info.clone())) + .collect() + } + + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_reserved_peer(&self, peer: PeerId) { + self.service.remove_reserved_peer(peer); + } + + /// Adds a `PeerId` and its address as reserved. The string should encode the address + /// and peer ID of the remote node. + pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + self.service.add_reserved_peer(peer) + } } impl NetworkService { - /// Returns the local `PeerId`. - pub fn local_peer_id(&self) -> &PeerId { - &self.local_peer_id - } - - /// Writes a message on an open notifications channel. Has no effect if the notifications - /// channel with this protocol name is closed. - /// - /// > **Note**: The reason why this is a no-op in the situation where we have no channel is - /// > that we don't guarantee message delivery anyway. Networking issues can cause - /// > connections to drop at any time, and higher-level logic shouldn't differentiate - /// > between the remote voluntarily closing a substream or a network error - /// > preventing the message from being delivered. - /// - /// The protocol must have been registered with `register_notifications_protocol`. - /// - pub fn write_notification(&self, target: PeerId, engine_id: ConsensusEngineId, message: Vec) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::WriteNotification { - target, - engine_id, - message, - }); - } - - /// Returns a stream containing the events that happen on the network. - /// - /// If this method is called multiple times, the events are duplicated. - /// - /// The stream never ends (unless the `NetworkWorker` gets shut down). - /// - /// The name passed is used to identify the channel in the Prometheus metrics. Note that the - /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having - /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory - pub fn event_stream(&self, name: &'static str) -> impl Stream { - // Note: when transitioning to stable futures, remove the `Error` entirely - let (tx, rx) = out_events::channel(name); - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::EventStream(tx)); - rx - } - - /// Registers a new notifications protocol. - /// - /// After that, you can call `write_notifications`. - /// - /// Please call `event_stream` before registering a protocol, otherwise you may miss events - /// about the protocol that you have registered. - /// - /// You are very strongly encouraged to call this method very early on. Any connection open - /// will retain the protocols that were registered then, and not any new one. - pub fn register_notifications_protocol( - &self, - engine_id: ConsensusEngineId, - protocol_name: impl Into>, - ) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { - engine_id, - protocol_name: protocol_name.into(), - }); - } - - /// You may call this when new transactons are imported by the transaction pool. - /// - /// All transactions will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - pub fn trigger_repropagate(&self) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateExtrinsics); - } - - /// You must call when new transaction is imported by the transaction pool. - /// - /// This transaction will be fetched from the `TransactionPool` that was passed at - /// initialization as part of the configuration and propagated to peers. - pub fn propagate_extrinsic(&self, hash: H) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::PropagateExtrinsic(hash)); - } - - /// Make sure an important block is propagated to peers. - /// - /// In chain-based consensus, we often need to make sure non-best forks are - /// at least temporarily synced. This function forces such an announcement. - pub fn announce_block(&self, hash: B::Hash, data: Vec) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); - } - - /// Report a given peer as either beneficial (+) or costly (-) according to the - /// given scalar. - pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { - self.peerset.report_peer(who, cost_benefit); - } - - /// Disconnect from a node as soon as possible. - /// - /// This triggers the same effects as if the connection had closed itself spontaneously. - pub fn disconnect_peer(&self, who: PeerId) { - let _ = self.to_worker.unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who)); - } - - /// Request a justification for the given block from the network. - /// - /// On success, the justification will be passed to the import queue that was part at - /// initialization as part of the configuration. - pub fn request_justification(&self, hash: &B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::RequestJustification(hash.clone(), number)); - } - - /// Are we in the process of downloading the chain? - pub fn is_major_syncing(&self) -> bool { - self.is_major_syncing.load(Ordering::Relaxed) - } - - /// Start getting a value from the DHT. - /// - /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an - /// item on the [`NetworkWorker`] stream. - pub fn get_value(&self, key: &record::Key) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); - } - - /// Start putting a value in the DHT. - /// - /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an - /// item on the [`NetworkWorker`] stream. - pub fn put_value(&self, key: record::Key, value: Vec) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); - } - - /// Connect to unreserved peers and allow unreserved peers to connect. - pub fn accept_unreserved_peers(&self) { - self.peerset.set_reserved_only(false); - } - - /// Disconnect from unreserved peers and deny new unreserved peers to connect. - pub fn deny_unreserved_peers(&self) { - self.peerset.set_reserved_only(true); - } - - /// Removes a `PeerId` from the list of reserved peers. - pub fn remove_reserved_peer(&self, peer: PeerId) { - self.peerset.remove_reserved_peer(peer); - } - - /// Adds a `PeerId` and its address as reserved. The string should encode the address - /// and peer ID of the remote node. - pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { - let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; - self.peerset.add_reserved_peer(peer_id.clone()); - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); - Ok(()) - } - - /// Configure an explicit fork sync request. - /// Note that this function should not be used for recent blocks. - /// Sync should be able to download all the recent forks normally. - /// `set_sync_fork_request` should only be used if external code detects that there's - /// a stale fork missing. - /// Passing empty `peers` set effectively removes the sync request. - pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); - } - - /// Modify a peerset priority group. - pub fn set_priority_group(&self, group_id: String, peers: HashSet) -> Result<(), String> { - let peers = peers.into_iter().map(|p| { - parse_addr(p).map_err(|e| format!("{:?}", e)) - }).collect::, String>>()?; - - let peer_ids = peers.iter().map(|(peer_id, _addr)| peer_id.clone()).collect(); - self.peerset.set_priority_group(group_id, peer_ids); - - for (peer_id, addr) in peers.into_iter() { - let _ = self - .to_worker - .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); - } - - Ok(()) - } - - /// Returns the number of peers we're connected to. - pub fn num_connected(&self) -> usize { - self.num_connected.load(Ordering::Relaxed) - } + /// Returns the local `PeerId`. + pub fn local_peer_id(&self) -> &PeerId { + &self.local_peer_id + } + + /// Writes a message on an open notifications channel. Has no effect if the notifications + /// channel with this protocol name is closed. + /// + /// > **Note**: The reason why this is a no-op in the situation where we have no channel is + /// > that we don't guarantee message delivery anyway. Networking issues can cause + /// > connections to drop at any time, and higher-level logic shouldn't differentiate + /// > between the remote voluntarily closing a substream or a network error + /// > preventing the message from being delivered. + /// + /// The protocol must have been registered with `register_notifications_protocol`. + /// + pub fn write_notification( + &self, + target: PeerId, + engine_id: ConsensusEngineId, + message: Vec, + ) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::WriteNotification { + target, + engine_id, + message, + }); + } + + /// Returns a stream containing the events that happen on the network. + /// + /// If this method is called multiple times, the events are duplicated. + /// + /// The stream never ends (unless the `NetworkWorker` gets shut down). + /// + /// The name passed is used to identify the channel in the Prometheus metrics. Note that the + /// parameter is a `&'static str`, and not a `String`, in order to avoid accidentally having + /// an unbounded set of Prometheus metrics, which would be quite bad in terms of memory + pub fn event_stream(&self, name: &'static str) -> impl Stream { + // Note: when transitioning to stable futures, remove the `Error` entirely + let (tx, rx) = out_events::channel(name); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::EventStream(tx)); + rx + } + + /// Registers a new notifications protocol. + /// + /// After that, you can call `write_notifications`. + /// + /// Please call `event_stream` before registering a protocol, otherwise you may miss events + /// about the protocol that you have registered. + /// + /// You are very strongly encouraged to call this method very early on. Any connection open + /// will retain the protocols that were registered then, and not any new one. + pub fn register_notifications_protocol( + &self, + engine_id: ConsensusEngineId, + protocol_name: impl Into>, + ) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::RegisterNotifProtocol { + engine_id, + protocol_name: protocol_name.into(), + }); + } + + /// You may call this when new transactons are imported by the transaction pool. + /// + /// All transactions will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn trigger_repropagate(&self) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::PropagateExtrinsics); + } + + /// You must call when new transaction is imported by the transaction pool. + /// + /// This transaction will be fetched from the `TransactionPool` that was passed at + /// initialization as part of the configuration and propagated to peers. + pub fn propagate_extrinsic(&self, hash: H) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::PropagateExtrinsic(hash)); + } + + /// Make sure an important block is propagated to peers. + /// + /// In chain-based consensus, we often need to make sure non-best forks are + /// at least temporarily synced. This function forces such an announcement. + pub fn announce_block(&self, hash: B::Hash, data: Vec) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AnnounceBlock(hash, data)); + } + + /// Report a given peer as either beneficial (+) or costly (-) according to the + /// given scalar. + pub fn report_peer(&self, who: PeerId, cost_benefit: ReputationChange) { + self.peerset.report_peer(who, cost_benefit); + } + + /// Disconnect from a node as soon as possible. + /// + /// This triggers the same effects as if the connection had closed itself spontaneously. + pub fn disconnect_peer(&self, who: PeerId) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::DisconnectPeer(who)); + } + + /// Request a justification for the given block from the network. + /// + /// On success, the justification will be passed to the import queue that was part at + /// initialization as part of the configuration. + pub fn request_justification(&self, hash: &B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::RequestJustification( + hash.clone(), + number, + )); + } + + /// Are we in the process of downloading the chain? + pub fn is_major_syncing(&self) -> bool { + self.is_major_syncing.load(Ordering::Relaxed) + } + + /// Start getting a value from the DHT. + /// + /// This will generate either a `ValueFound` or a `ValueNotFound` event and pass it as an + /// item on the [`NetworkWorker`] stream. + pub fn get_value(&self, key: &record::Key) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::GetValue(key.clone())); + } + + /// Start putting a value in the DHT. + /// + /// This will generate either a `ValuePut` or a `ValuePutFailed` event and pass it as an + /// item on the [`NetworkWorker`] stream. + pub fn put_value(&self, key: record::Key, value: Vec) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::PutValue(key, value)); + } + + /// Connect to unreserved peers and allow unreserved peers to connect. + pub fn accept_unreserved_peers(&self) { + self.peerset.set_reserved_only(false); + } + + /// Disconnect from unreserved peers and deny new unreserved peers to connect. + pub fn deny_unreserved_peers(&self) { + self.peerset.set_reserved_only(true); + } + + /// Removes a `PeerId` from the list of reserved peers. + pub fn remove_reserved_peer(&self, peer: PeerId) { + self.peerset.remove_reserved_peer(peer); + } + + /// Adds a `PeerId` and its address as reserved. The string should encode the address + /// and peer ID of the remote node. + pub fn add_reserved_peer(&self, peer: String) -> Result<(), String> { + let (peer_id, addr) = parse_str_addr(&peer).map_err(|e| format!("{:?}", e))?; + self.peerset.add_reserved_peer(peer_id.clone()); + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + Ok(()) + } + + /// Configure an explicit fork sync request. + /// Note that this function should not be used for recent blocks. + /// Sync should be able to download all the recent forks normally. + /// `set_sync_fork_request` should only be used if external code detects that there's + /// a stale fork missing. + /// Passing empty `peers` set effectively removes the sync request. + pub fn set_sync_fork_request(&self, peers: Vec, hash: B::Hash, number: NumberFor) { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::SyncFork(peers, hash, number)); + } + + /// Modify a peerset priority group. + pub fn set_priority_group( + &self, + group_id: String, + peers: HashSet, + ) -> Result<(), String> { + let peers = peers + .into_iter() + .map(|p| parse_addr(p).map_err(|e| format!("{:?}", e))) + .collect::, String>>()?; + + let peer_ids = peers + .iter() + .map(|(peer_id, _addr)| peer_id.clone()) + .collect(); + self.peerset.set_priority_group(group_id, peer_ids); + + for (peer_id, addr) in peers.into_iter() { + let _ = self + .to_worker + .unbounded_send(ServiceToWorkerMsg::AddKnownAddress(peer_id, addr)); + } + + Ok(()) + } + + /// Returns the number of peers we're connected to. + pub fn num_connected(&self) -> usize { + self.num_connected.load(Ordering::Relaxed) + } } -impl sp_consensus::SyncOracle - for NetworkService -{ - fn is_major_syncing(&mut self) -> bool { - NetworkService::is_major_syncing(self) - } +impl sp_consensus::SyncOracle for NetworkService { + fn is_major_syncing(&mut self) -> bool { + NetworkService::is_major_syncing(self) + } - fn is_offline(&mut self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 - } + fn is_offline(&mut self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 + } } -impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle - for &'a NetworkService -{ - fn is_major_syncing(&mut self) -> bool { - NetworkService::is_major_syncing(self) - } +impl<'a, B: BlockT + 'static, H: ExHashT> sp_consensus::SyncOracle for &'a NetworkService { + fn is_major_syncing(&mut self) -> bool { + NetworkService::is_major_syncing(self) + } - fn is_offline(&mut self) -> bool { - self.num_connected.load(Ordering::Relaxed) == 0 - } + fn is_offline(&mut self) -> bool { + self.num_connected.load(Ordering::Relaxed) == 0 + } } /// Trait for providing information about the local network state pub trait NetworkStateInfo { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec; + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec; - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId; + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId; } impl NetworkStateInfo for NetworkService - where - B: sp_runtime::traits::Block, - H: ExHashT, +where + B: sp_runtime::traits::Block, + H: ExHashT, { - /// Returns the local external addresses. - fn external_addresses(&self) -> Vec { - self.external_addresses.lock().clone() - } - - /// Returns the local Peer ID. - fn local_peer_id(&self) -> PeerId { - self.local_peer_id.clone() - } + /// Returns the local external addresses. + fn external_addresses(&self) -> Vec { + self.external_addresses.lock().clone() + } + + /// Returns the local Peer ID. + fn local_peer_id(&self) -> PeerId { + self.local_peer_id.clone() + } } /// Messages sent from the `NetworkService` to the `NetworkWorker`. /// /// Each entry corresponds to a method of `NetworkService`. enum ServiceToWorkerMsg { - PropagateExtrinsic(H), - PropagateExtrinsics, - RequestJustification(B::Hash, NumberFor), - AnnounceBlock(B::Hash, Vec), - GetValue(record::Key), - PutValue(record::Key, Vec), - AddKnownAddress(PeerId, Multiaddr), - SyncFork(Vec, B::Hash, NumberFor), - EventStream(out_events::Sender), - WriteNotification { - message: Vec, - engine_id: ConsensusEngineId, - target: PeerId, - }, - RegisterNotifProtocol { - engine_id: ConsensusEngineId, - protocol_name: Cow<'static, [u8]>, - }, - DisconnectPeer(PeerId), + PropagateExtrinsic(H), + PropagateExtrinsics, + RequestJustification(B::Hash, NumberFor), + AnnounceBlock(B::Hash, Vec), + GetValue(record::Key), + PutValue(record::Key, Vec), + AddKnownAddress(PeerId, Multiaddr), + SyncFork(Vec, B::Hash, NumberFor), + EventStream(out_events::Sender), + WriteNotification { + message: Vec, + engine_id: ConsensusEngineId, + target: PeerId, + }, + RegisterNotifProtocol { + engine_id: ConsensusEngineId, + protocol_name: Cow<'static, [u8]>, + }, + DisconnectPeer(PeerId), } /// Main network worker. Must be polled in order for the network to advance. @@ -847,515 +925,725 @@ enum ServiceToWorkerMsg { /// You are encouraged to poll this in a separate background thread or task. #[must_use = "The NetworkWorker must be polled in order for the network to work"] pub struct NetworkWorker { - /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. - external_addresses: Arc>>, - /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. - num_connected: Arc, - /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. - is_major_syncing: Arc, - /// The network service that can be extracted and shared through the codebase. - service: Arc>, - /// The *actual* network. - network_service: Swarm, - /// The import queue that was passed as initialization. - import_queue: Box>, - /// Messages from the `NetworkService` and that must be processed. - from_worker: TracingUnboundedReceiver>, - /// Receiver for queries from the light client that must be processed. - light_client_rqs: Option>>, - /// Senders for events that happen on the network. - event_streams: out_events::OutChannels, - /// Prometheus network metrics. - metrics: Option, - /// The `PeerId`'s of all boot nodes. - boot_node_ids: Arc>, + /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. + external_addresses: Arc>>, + /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. + num_connected: Arc, + /// Updated by the `NetworkWorker` and loaded by the `NetworkService`. + is_major_syncing: Arc, + /// The network service that can be extracted and shared through the codebase. + service: Arc>, + /// The *actual* network. + network_service: Swarm, + /// The import queue that was passed as initialization. + import_queue: Box>, + /// Messages from the `NetworkService` and that must be processed. + from_worker: TracingUnboundedReceiver>, + /// Receiver for queries from the light client that must be processed. + light_client_rqs: Option>>, + /// Senders for events that happen on the network. + event_streams: out_events::OutChannels, + /// Prometheus network metrics. + metrics: Option, + /// The `PeerId`'s of all boot nodes. + boot_node_ids: Arc>, } struct Metrics { - // This list is ordered alphabetically - connections_closed_total: CounterVec, - connections_opened_total: CounterVec, - import_queue_blocks_submitted: Counter, - import_queue_finality_proofs_submitted: Counter, - import_queue_justifications_submitted: Counter, - incoming_connections_errors_total: CounterVec, - incoming_connections_total: Counter, - is_major_syncing: Gauge, - issued_light_requests: Counter, - kademlia_random_queries_total: CounterVec, - kademlia_records_count: GaugeVec, - kademlia_records_sizes_total: GaugeVec, - kbuckets_num_nodes: GaugeVec, - listeners_local_addresses: Gauge, - listeners_errors_total: Counter, - network_per_sec_bytes: GaugeVec, - notifications_queues_size: HistogramVec, - notifications_sizes: HistogramVec, - notifications_streams_closed_total: CounterVec, - notifications_streams_opened_total: CounterVec, - peers_count: Gauge, - peerset_num_discovered: Gauge, - peerset_num_requested: Gauge, - pending_connections: Gauge, - pending_connections_errors_total: CounterVec, + // This list is ordered alphabetically + connections_closed_total: CounterVec, + connections_opened_total: CounterVec, + import_queue_blocks_submitted: Counter, + import_queue_finality_proofs_submitted: Counter, + import_queue_justifications_submitted: Counter, + incoming_connections_errors_total: CounterVec, + incoming_connections_total: Counter, + is_major_syncing: Gauge, + issued_light_requests: Counter, + kademlia_random_queries_total: CounterVec, + kademlia_records_count: GaugeVec, + kademlia_records_sizes_total: GaugeVec, + kbuckets_num_nodes: GaugeVec, + listeners_local_addresses: Gauge, + listeners_errors_total: Counter, + network_per_sec_bytes: GaugeVec, + notifications_queues_size: HistogramVec, + notifications_sizes: HistogramVec, + notifications_streams_closed_total: CounterVec, + notifications_streams_opened_total: CounterVec, + peers_count: Gauge, + peerset_num_discovered: Gauge, + peerset_num_requested: Gauge, + pending_connections: Gauge, + pending_connections_errors_total: CounterVec, } impl Metrics { - fn register(registry: &Registry) -> Result { - Ok(Self { - // This list is ordered alphabetically - connections_closed_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_connections_closed_total", - "Total number of connections closed, by reason and direction" - ), - &["direction", "reason"] - )?, registry)?, - connections_opened_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_connections_opened_total", - "Total number of connections opened" - ), - &["direction"] - )?, registry)?, - import_queue_blocks_submitted: register(Counter::new( - "import_queue_blocks_submitted", - "Number of blocks submitted to the import queue.", - )?, registry)?, - import_queue_finality_proofs_submitted: register(Counter::new( - "import_queue_finality_proofs_submitted", - "Number of finality proofs submitted to the import queue.", - )?, registry)?, - import_queue_justifications_submitted: register(Counter::new( - "import_queue_justifications_submitted", - "Number of justifications submitted to the import queue.", - )?, registry)?, - incoming_connections_errors_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_incoming_connections_handshake_errors_total", - "Total number of incoming connections that have failed during the \ - initial handshake" - ), - &["reason"] - )?, registry)?, - incoming_connections_total: register(Counter::new( - "sub_libp2p_incoming_connections_total", - "Total number of incoming connections on the listening sockets" - )?, registry)?, - is_major_syncing: register(Gauge::new( - "sub_libp2p_is_major_syncing", "Whether the node is performing a major sync or not.", - )?, registry)?, - issued_light_requests: register(Counter::new( - "issued_light_requests", - "Number of light client requests that our node has issued.", - )?, registry)?, - kademlia_random_queries_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_kademlia_random_queries_total", - "Number of random Kademlia queries started" - ), - &["protocol"] - )?, registry)?, - kademlia_records_count: register(GaugeVec::new( - Opts::new( - "sub_libp2p_kademlia_records_count", - "Number of records in the Kademlia records store" - ), - &["protocol"] - )?, registry)?, - kademlia_records_sizes_total: register(GaugeVec::new( - Opts::new( - "sub_libp2p_kademlia_records_sizes_total", - "Total size of all the records in the Kademlia records store" - ), - &["protocol"] - )?, registry)?, - kbuckets_num_nodes: register(GaugeVec::new( - Opts::new( - "sub_libp2p_kbuckets_num_nodes", - "Number of nodes in the Kademlia k-buckets" - ), - &["protocol"] - )?, registry)?, - listeners_local_addresses: register(Gauge::new( - "sub_libp2p_listeners_local_addresses", "Number of local addresses we're listening on" - )?, registry)?, - listeners_errors_total: register(Counter::new( - "sub_libp2p_listeners_errors_total", - "Total number of non-fatal errors reported by a listener" - )?, registry)?, - network_per_sec_bytes: register(GaugeVec::new( - Opts::new( - "sub_libp2p_network_per_sec_bytes", - "Average bandwidth usage per second" - ), - &["direction"] - )?, registry)?, - notifications_queues_size: register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "sub_libp2p_notifications_queues_size", - "Total size of all the notification queues" - ), - buckets: vec![0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 511.0, 512.0], - }, - &["protocol"] - )?, registry)?, - notifications_sizes: register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "sub_libp2p_notifications_sizes", - "Sizes of the notifications send to and received from all nodes" - ), - buckets: prometheus_endpoint::exponential_buckets(64.0, 4.0, 8) - .expect("parameters are always valid values; qed"), - }, - &["direction", "protocol"] - )?, registry)?, - notifications_streams_closed_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_notifications_streams_closed_total", - "Total number of notification substreams that have been closed" - ), - &["protocol"] - )?, registry)?, - notifications_streams_opened_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_notifications_streams_opened_total", - "Total number of notification substreams that have been opened" - ), - &["protocol"] - )?, registry)?, - peers_count: register(Gauge::new( - "sub_libp2p_peers_count", "Number of network gossip peers", - )?, registry)?, - peerset_num_discovered: register(Gauge::new( - "sub_libp2p_peerset_num_discovered", "Number of nodes stored in the peerset manager", - )?, registry)?, - peerset_num_requested: register(Gauge::new( - "sub_libp2p_peerset_num_requested", "Number of nodes that the peerset manager wants us to be connected to", - )?, registry)?, - pending_connections: register(Gauge::new( - "sub_libp2p_pending_connections", - "Number of connections in the process of being established", - )?, registry)?, - pending_connections_errors_total: register(CounterVec::new( - Opts::new( - "sub_libp2p_pending_connections_errors_total", - "Total number of pending connection errors" - ), - &["reason"] - )?, registry)?, - }) - } - - fn update_with_network_event(&self, event: &Event) { - match event { - Event::NotificationStreamOpened { engine_id, .. } => { - self.notifications_streams_opened_total - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id)]).inc(); - }, - Event::NotificationStreamClosed { engine_id, .. } => { - self.notifications_streams_closed_total - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id)]).inc(); - }, - Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { - self.notifications_sizes - .with_label_values(&["in", &maybe_utf8_bytes_to_string(engine_id)]) - .observe(message.len() as f64); - } - }, - _ => {} - } - } + fn register(registry: &Registry) -> Result { + Ok(Self { + // This list is ordered alphabetically + connections_closed_total: register( + CounterVec::new( + Opts::new( + "sub_libp2p_connections_closed_total", + "Total number of connections closed, by reason and direction", + ), + &["direction", "reason"], + )?, + registry, + )?, + connections_opened_total: register( + CounterVec::new( + Opts::new( + "sub_libp2p_connections_opened_total", + "Total number of connections opened", + ), + &["direction"], + )?, + registry, + )?, + import_queue_blocks_submitted: register( + Counter::new( + "import_queue_blocks_submitted", + "Number of blocks submitted to the import queue.", + )?, + registry, + )?, + import_queue_finality_proofs_submitted: register( + Counter::new( + "import_queue_finality_proofs_submitted", + "Number of finality proofs submitted to the import queue.", + )?, + registry, + )?, + import_queue_justifications_submitted: register( + Counter::new( + "import_queue_justifications_submitted", + "Number of justifications submitted to the import queue.", + )?, + registry, + )?, + incoming_connections_errors_total: register( + CounterVec::new( + Opts::new( + "sub_libp2p_incoming_connections_handshake_errors_total", + "Total number of incoming connections that have failed during the \ + initial handshake", + ), + &["reason"], + )?, + registry, + )?, + incoming_connections_total: register( + Counter::new( + "sub_libp2p_incoming_connections_total", + "Total number of incoming connections on the listening sockets", + )?, + registry, + )?, + is_major_syncing: register( + Gauge::new( + "sub_libp2p_is_major_syncing", + "Whether the node is performing a major sync or not.", + )?, + registry, + )?, + issued_light_requests: register( + Counter::new( + "issued_light_requests", + "Number of light client requests that our node has issued.", + )?, + registry, + )?, + kademlia_random_queries_total: register( + CounterVec::new( + Opts::new( + "sub_libp2p_kademlia_random_queries_total", + "Number of random Kademlia queries started", + ), + &["protocol"], + )?, + registry, + )?, + kademlia_records_count: register( + GaugeVec::new( + Opts::new( + "sub_libp2p_kademlia_records_count", + "Number of records in the Kademlia records store", + ), + &["protocol"], + )?, + registry, + )?, + kademlia_records_sizes_total: register( + GaugeVec::new( + Opts::new( + "sub_libp2p_kademlia_records_sizes_total", + "Total size of all the records in the Kademlia records store", + ), + &["protocol"], + )?, + registry, + )?, + kbuckets_num_nodes: register( + GaugeVec::new( + Opts::new( + "sub_libp2p_kbuckets_num_nodes", + "Number of nodes in the Kademlia k-buckets", + ), + &["protocol"], + )?, + registry, + )?, + listeners_local_addresses: register( + Gauge::new( + "sub_libp2p_listeners_local_addresses", + "Number of local addresses we're listening on", + )?, + registry, + )?, + listeners_errors_total: register( + Counter::new( + "sub_libp2p_listeners_errors_total", + "Total number of non-fatal errors reported by a listener", + )?, + registry, + )?, + network_per_sec_bytes: register( + GaugeVec::new( + Opts::new( + "sub_libp2p_network_per_sec_bytes", + "Average bandwidth usage per second", + ), + &["direction"], + )?, + registry, + )?, + notifications_queues_size: register( + HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_notifications_queues_size", + "Total size of all the notification queues", + ), + buckets: vec![ + 0.0, 1.0, 2.0, 4.0, 8.0, 16.0, 32.0, 64.0, 128.0, 256.0, 511.0, 512.0, + ], + }, + &["protocol"], + )?, + registry, + )?, + notifications_sizes: register( + HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "sub_libp2p_notifications_sizes", + "Sizes of the notifications send to and received from all nodes", + ), + buckets: prometheus_endpoint::exponential_buckets(64.0, 4.0, 8) + .expect("parameters are always valid values; qed"), + }, + &["direction", "protocol"], + )?, + registry, + )?, + notifications_streams_closed_total: register( + CounterVec::new( + Opts::new( + "sub_libp2p_notifications_streams_closed_total", + "Total number of notification substreams that have been closed", + ), + &["protocol"], + )?, + registry, + )?, + notifications_streams_opened_total: register( + CounterVec::new( + Opts::new( + "sub_libp2p_notifications_streams_opened_total", + "Total number of notification substreams that have been opened", + ), + &["protocol"], + )?, + registry, + )?, + peers_count: register( + Gauge::new("sub_libp2p_peers_count", "Number of network gossip peers")?, + registry, + )?, + peerset_num_discovered: register( + Gauge::new( + "sub_libp2p_peerset_num_discovered", + "Number of nodes stored in the peerset manager", + )?, + registry, + )?, + peerset_num_requested: register( + Gauge::new( + "sub_libp2p_peerset_num_requested", + "Number of nodes that the peerset manager wants us to be connected to", + )?, + registry, + )?, + pending_connections: register( + Gauge::new( + "sub_libp2p_pending_connections", + "Number of connections in the process of being established", + )?, + registry, + )?, + pending_connections_errors_total: register( + CounterVec::new( + Opts::new( + "sub_libp2p_pending_connections_errors_total", + "Total number of pending connection errors", + ), + &["reason"], + )?, + registry, + )?, + }) + } + + fn update_with_network_event(&self, event: &Event) { + match event { + Event::NotificationStreamOpened { engine_id, .. } => { + self.notifications_streams_opened_total + .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id)]) + .inc(); + } + Event::NotificationStreamClosed { engine_id, .. } => { + self.notifications_streams_closed_total + .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id)]) + .inc(); + } + Event::NotificationsReceived { messages, .. } => { + for (engine_id, message) in messages { + self.notifications_sizes + .with_label_values(&["in", &maybe_utf8_bytes_to_string(engine_id)]) + .observe(message.len() as f64); + } + } + _ => {} + } + } } impl Future for NetworkWorker { - type Output = Result<(), io::Error>; - - fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll { - let this = &mut *self; - - // Poll the import queue for actions to perform. - this.import_queue.poll_actions(cx, &mut NetworkLink { - protocol: &mut this.network_service, - }); - - // Check for new incoming light client requests. - if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { - while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { - // This can error if there are too many queued requests already. - if this.network_service.light_client_request(rq).is_err() { - log::warn!("Couldn't start light client request: too many pending requests"); - } - if let Some(metrics) = this.metrics.as_ref() { - metrics.issued_light_requests.inc(); - } - } - } - - loop { - // Process the next message coming from the `NetworkService`. - let msg = match this.from_worker.poll_next_unpin(cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) => return Poll::Ready(Ok(())), - Poll::Pending => break, - }; - - match msg { - ServiceToWorkerMsg::AnnounceBlock(hash, data) => - this.network_service.user_protocol_mut().announce_block(hash, data), - ServiceToWorkerMsg::RequestJustification(hash, number) => - this.network_service.user_protocol_mut().request_justification(&hash, number), - ServiceToWorkerMsg::PropagateExtrinsic(hash) => - this.network_service.user_protocol_mut().propagate_extrinsic(&hash), - ServiceToWorkerMsg::PropagateExtrinsics => - this.network_service.user_protocol_mut().propagate_extrinsics(), - ServiceToWorkerMsg::GetValue(key) => - this.network_service.get_value(&key), - ServiceToWorkerMsg::PutValue(key, value) => - this.network_service.put_value(key, value), - ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => - this.network_service.add_known_address(peer_id, addr), - ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => - this.network_service.user_protocol_mut().set_sync_fork_request(peer_ids, &hash, number), - ServiceToWorkerMsg::EventStream(sender) => - this.event_streams.push(sender), - ServiceToWorkerMsg::WriteNotification { message, engine_id, target } => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.notifications_sizes - .with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) - .observe(message.len() as f64); - } - this.network_service.user_protocol_mut().write_notification(target, engine_id, message) - }, - ServiceToWorkerMsg::RegisterNotifProtocol { engine_id, protocol_name } => { - this.network_service - .register_notifications_protocol(engine_id, protocol_name); - }, - ServiceToWorkerMsg::DisconnectPeer(who) => - this.network_service.user_protocol_mut().disconnect_peer(&who), - } - } - - loop { - // Process the next action coming from the network. - let next_event = this.network_service.next_event(); - futures::pin_mut!(next_event); - let poll_value = next_event.poll_unpin(cx); - - match poll_value { - Poll::Pending => break, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockImport(origin, blocks))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_blocks_submitted.inc(); - } - this.import_queue.import_blocks(origin, blocks); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport(origin, hash, nb, justification))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_justifications_submitted.inc(); - } - this.import_queue.import_justification(origin, hash, nb, justification); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport(origin, hash, nb, proof))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.import_queue_finality_proofs_submitted.inc(); - } - this.import_queue.import_finality_proof(origin, hash, nb, proof); - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted(protocol))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.kademlia_random_queries_total - .with_label_values(&[&maybe_utf8_bytes_to_string(protocol.as_bytes())]) - .inc(); - } - }, - Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Event(ev))) => { - if let Some(metrics) = this.metrics.as_ref() { - metrics.update_with_network_event(&ev); - } - this.event_streams.send(ev); - }, - Poll::Ready(SwarmEvent::ConnectionEstablished { peer_id, endpoint, .. }) => { - trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); - if let Some(metrics) = this.metrics.as_ref() { - match endpoint { - ConnectedPoint::Dialer { .. } => - metrics.connections_opened_total.with_label_values(&["out"]).inc(), - ConnectedPoint::Listener { .. } => - metrics.connections_opened_total.with_label_values(&["in"]).inc(), - } - } - }, - Poll::Ready(SwarmEvent::ConnectionClosed { peer_id, cause, endpoint, .. }) => { - trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); - if let Some(metrics) = this.metrics.as_ref() { - let dir = match endpoint { - ConnectedPoint::Dialer { .. } => "out", - ConnectedPoint::Listener { .. } => "in", - }; - - match cause { - ConnectionError::IO(_) => - metrics.connections_closed_total.with_label_values(&[dir, "transport-error"]).inc(), - ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::B(EitherError::A(PingFailure::Timeout))))))) => - metrics.connections_closed_total.with_label_values(&[dir, "ping-timeout"]).inc(), - ConnectionError::Handler(NodeHandlerWrapperError::Handler(EitherError::A(EitherError::A( - EitherError::A(EitherError::A(EitherError::B(LegacyConnectionKillError))))))) => - metrics.connections_closed_total.with_label_values(&[dir, "force-closed"]).inc(), - ConnectionError::Handler(NodeHandlerWrapperError::Handler(_)) => - metrics.connections_closed_total.with_label_values(&[dir, "protocol-error"]).inc(), - ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout) => - metrics.connections_closed_total.with_label_values(&[dir, "keep-alive-timeout"]).inc(), - } - } - }, - Poll::Ready(SwarmEvent::NewListenAddr(addr)) => { - trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr); - if let Some(metrics) = this.metrics.as_ref() { - metrics.listeners_local_addresses.inc(); - } - }, - Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => { - trace!(target: "sub-libp2p", "Libp2p => ExpiredListenAddr({})", addr); - if let Some(metrics) = this.metrics.as_ref() { - metrics.listeners_local_addresses.dec(); - } - }, - Poll::Ready(SwarmEvent::UnreachableAddr { peer_id, address, error, .. }) => { - trace!( - target: "sub-libp2p", "Libp2p => Failed to reach {:?} through {:?}: {}", - peer_id, - address, - error, - ); - - if this.boot_node_ids.contains(&peer_id) { - if let PendingConnectionError::InvalidPeerId = error { - error!( - "💔 Invalid peer ID from bootnode, expected `{}` at address `{}`.", - peer_id, - address, - ); - } - } - - if let Some(metrics) = this.metrics.as_ref() { - match error { - PendingConnectionError::ConnectionLimit(_) => - metrics.pending_connections_errors_total.with_label_values(&["limit-reached"]).inc(), - PendingConnectionError::InvalidPeerId => - metrics.pending_connections_errors_total.with_label_values(&["invalid-peer-id"]).inc(), - PendingConnectionError::Transport(_) | PendingConnectionError::IO(_) => - metrics.pending_connections_errors_total.with_label_values(&["transport-error"]).inc(), - } - } - } - Poll::Ready(SwarmEvent::Dialing(peer_id)) => - trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id), - Poll::Ready(SwarmEvent::IncomingConnection { local_addr, send_back_addr }) => { - trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({},{}))", + type Output = Result<(), io::Error>; + + fn poll(mut self: Pin<&mut Self>, cx: &mut std::task::Context) -> Poll { + let this = &mut *self; + + // Poll the import queue for actions to perform. + this.import_queue.poll_actions( + cx, + &mut NetworkLink { + protocol: &mut this.network_service, + }, + ); + + // Check for new incoming light client requests. + if let Some(light_client_rqs) = this.light_client_rqs.as_mut() { + while let Poll::Ready(Some(rq)) = light_client_rqs.poll_next_unpin(cx) { + // This can error if there are too many queued requests already. + if this.network_service.light_client_request(rq).is_err() { + log::warn!("Couldn't start light client request: too many pending requests"); + } + if let Some(metrics) = this.metrics.as_ref() { + metrics.issued_light_requests.inc(); + } + } + } + + loop { + // Process the next message coming from the `NetworkService`. + let msg = match this.from_worker.poll_next_unpin(cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) => return Poll::Ready(Ok(())), + Poll::Pending => break, + }; + + match msg { + ServiceToWorkerMsg::AnnounceBlock(hash, data) => this + .network_service + .user_protocol_mut() + .announce_block(hash, data), + ServiceToWorkerMsg::RequestJustification(hash, number) => this + .network_service + .user_protocol_mut() + .request_justification(&hash, number), + ServiceToWorkerMsg::PropagateExtrinsic(hash) => this + .network_service + .user_protocol_mut() + .propagate_extrinsic(&hash), + ServiceToWorkerMsg::PropagateExtrinsics => this + .network_service + .user_protocol_mut() + .propagate_extrinsics(), + ServiceToWorkerMsg::GetValue(key) => this.network_service.get_value(&key), + ServiceToWorkerMsg::PutValue(key, value) => { + this.network_service.put_value(key, value) + } + ServiceToWorkerMsg::AddKnownAddress(peer_id, addr) => { + this.network_service.add_known_address(peer_id, addr) + } + ServiceToWorkerMsg::SyncFork(peer_ids, hash, number) => this + .network_service + .user_protocol_mut() + .set_sync_fork_request(peer_ids, &hash, number), + ServiceToWorkerMsg::EventStream(sender) => this.event_streams.push(sender), + ServiceToWorkerMsg::WriteNotification { + message, + engine_id, + target, + } => { + if let Some(metrics) = this.metrics.as_ref() { + metrics + .notifications_sizes + .with_label_values(&["out", &maybe_utf8_bytes_to_string(&engine_id)]) + .observe(message.len() as f64); + } + this.network_service + .user_protocol_mut() + .write_notification(target, engine_id, message) + } + ServiceToWorkerMsg::RegisterNotifProtocol { + engine_id, + protocol_name, + } => { + this.network_service + .register_notifications_protocol(engine_id, protocol_name); + } + ServiceToWorkerMsg::DisconnectPeer(who) => this + .network_service + .user_protocol_mut() + .disconnect_peer(&who), + } + } + + loop { + // Process the next action coming from the network. + let next_event = this.network_service.next_event(); + futures::pin_mut!(next_event); + let poll_value = next_event.poll_unpin(cx); + + match poll_value { + Poll::Pending => break, + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::BlockImport(origin, blocks))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_blocks_submitted.inc(); + } + this.import_queue.import_blocks(origin, blocks); + } + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::JustificationImport( + origin, + hash, + nb, + justification, + ))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_justifications_submitted.inc(); + } + this.import_queue + .import_justification(origin, hash, nb, justification); + } + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::FinalityProofImport( + origin, + hash, + nb, + proof, + ))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.import_queue_finality_proofs_submitted.inc(); + } + this.import_queue + .import_finality_proof(origin, hash, nb, proof); + } + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::RandomKademliaStarted( + protocol, + ))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics + .kademlia_random_queries_total + .with_label_values(&[&maybe_utf8_bytes_to_string(protocol.as_bytes())]) + .inc(); + } + } + Poll::Ready(SwarmEvent::Behaviour(BehaviourOut::Event(ev))) => { + if let Some(metrics) = this.metrics.as_ref() { + metrics.update_with_network_event(&ev); + } + this.event_streams.send(ev); + } + Poll::Ready(SwarmEvent::ConnectionEstablished { + peer_id, endpoint, .. + }) => { + trace!(target: "sub-libp2p", "Libp2p => Connected({:?})", peer_id); + if let Some(metrics) = this.metrics.as_ref() { + match endpoint { + ConnectedPoint::Dialer { .. } => metrics + .connections_opened_total + .with_label_values(&["out"]) + .inc(), + ConnectedPoint::Listener { .. } => metrics + .connections_opened_total + .with_label_values(&["in"]) + .inc(), + } + } + } + Poll::Ready(SwarmEvent::ConnectionClosed { + peer_id, + cause, + endpoint, + .. + }) => { + trace!(target: "sub-libp2p", "Libp2p => Disconnected({:?}, {:?})", peer_id, cause); + if let Some(metrics) = this.metrics.as_ref() { + let dir = match endpoint { + ConnectedPoint::Dialer { .. } => "out", + ConnectedPoint::Listener { .. } => "in", + }; + + match cause { + ConnectionError::IO(_) => metrics + .connections_closed_total + .with_label_values(&[dir, "transport-error"]) + .inc(), + ConnectionError::Handler(NodeHandlerWrapperError::Handler( + EitherError::A(EitherError::A(EitherError::A(EitherError::B( + EitherError::A(PingFailure::Timeout), + )))), + )) => metrics + .connections_closed_total + .with_label_values(&[dir, "ping-timeout"]) + .inc(), + ConnectionError::Handler(NodeHandlerWrapperError::Handler( + EitherError::A(EitherError::A(EitherError::A(EitherError::A( + EitherError::B(LegacyConnectionKillError), + )))), + )) => metrics + .connections_closed_total + .with_label_values(&[dir, "force-closed"]) + .inc(), + ConnectionError::Handler(NodeHandlerWrapperError::Handler(_)) => { + metrics + .connections_closed_total + .with_label_values(&[dir, "protocol-error"]) + .inc() + } + ConnectionError::Handler(NodeHandlerWrapperError::KeepAliveTimeout) => { + metrics + .connections_closed_total + .with_label_values(&[dir, "keep-alive-timeout"]) + .inc() + } + } + } + } + Poll::Ready(SwarmEvent::NewListenAddr(addr)) => { + trace!(target: "sub-libp2p", "Libp2p => NewListenAddr({})", addr); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_local_addresses.inc(); + } + } + Poll::Ready(SwarmEvent::ExpiredListenAddr(addr)) => { + trace!(target: "sub-libp2p", "Libp2p => ExpiredListenAddr({})", addr); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_local_addresses.dec(); + } + } + Poll::Ready(SwarmEvent::UnreachableAddr { + peer_id, + address, + error, + .. + }) => { + trace!( + target: "sub-libp2p", "Libp2p => Failed to reach {:?} through {:?}: {}", + peer_id, + address, + error, + ); + + if this.boot_node_ids.contains(&peer_id) { + if let PendingConnectionError::InvalidPeerId = error { + error!( + "💔 Invalid peer ID from bootnode, expected `{}` at address `{}`.", + peer_id, address, + ); + } + } + + if let Some(metrics) = this.metrics.as_ref() { + match error { + PendingConnectionError::ConnectionLimit(_) => metrics + .pending_connections_errors_total + .with_label_values(&["limit-reached"]) + .inc(), + PendingConnectionError::InvalidPeerId => metrics + .pending_connections_errors_total + .with_label_values(&["invalid-peer-id"]) + .inc(), + PendingConnectionError::Transport(_) + | PendingConnectionError::IO(_) => metrics + .pending_connections_errors_total + .with_label_values(&["transport-error"]) + .inc(), + } + } + } + Poll::Ready(SwarmEvent::Dialing(peer_id)) => { + trace!(target: "sub-libp2p", "Libp2p => Dialing({:?})", peer_id) + } + Poll::Ready(SwarmEvent::IncomingConnection { + local_addr, + send_back_addr, + }) => { + trace!(target: "sub-libp2p", "Libp2p => IncomingConnection({},{}))", local_addr, send_back_addr); - if let Some(metrics) = this.metrics.as_ref() { - metrics.incoming_connections_total.inc(); - } - }, - Poll::Ready(SwarmEvent::IncomingConnectionError { local_addr, send_back_addr, error }) => { - trace!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}", + if let Some(metrics) = this.metrics.as_ref() { + metrics.incoming_connections_total.inc(); + } + } + Poll::Ready(SwarmEvent::IncomingConnectionError { + local_addr, + send_back_addr, + error, + }) => { + trace!(target: "sub-libp2p", "Libp2p => IncomingConnectionError({},{}): {}", local_addr, send_back_addr, error); - if let Some(metrics) = this.metrics.as_ref() { - let reason = match error { - PendingConnectionError::ConnectionLimit(_) => "limit-reached", - PendingConnectionError::InvalidPeerId => "invalid-peer-id", - PendingConnectionError::Transport(_) | - PendingConnectionError::IO(_) => "transport-error", - }; - - metrics.incoming_connections_errors_total.with_label_values(&[reason]).inc(); - } - }, - Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => { - trace!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.", + if let Some(metrics) = this.metrics.as_ref() { + let reason = match error { + PendingConnectionError::ConnectionLimit(_) => "limit-reached", + PendingConnectionError::InvalidPeerId => "invalid-peer-id", + PendingConnectionError::Transport(_) + | PendingConnectionError::IO(_) => "transport-error", + }; + + metrics + .incoming_connections_errors_total + .with_label_values(&[reason]) + .inc(); + } + } + Poll::Ready(SwarmEvent::BannedPeer { peer_id, endpoint }) => { + trace!(target: "sub-libp2p", "Libp2p => BannedPeer({}). Connected via {:?}.", peer_id, endpoint); - if let Some(metrics) = this.metrics.as_ref() { - metrics.incoming_connections_errors_total.with_label_values(&["banned"]).inc(); - } - }, - Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => - trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", - address, error), - Poll::Ready(SwarmEvent::ListenerClosed { reason, addresses }) => { - warn!(target: "sub-libp2p", "Libp2p => ListenerClosed: {:?}", reason); - if let Some(metrics) = this.metrics.as_ref() { - metrics.listeners_local_addresses.sub(addresses.len() as u64); - } - }, - Poll::Ready(SwarmEvent::ListenerError { error }) => { - trace!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error); - if let Some(metrics) = this.metrics.as_ref() { - metrics.listeners_errors_total.inc(); - } - }, - }; - } - - let num_connected_peers = this.network_service.user_protocol_mut().num_connected_peers(); - - // Update the variables shared with the `NetworkService`. - this.num_connected.store(num_connected_peers, Ordering::Relaxed); - { - let external_addresses = Swarm::::external_addresses(&this.network_service).cloned().collect(); - *this.external_addresses.lock() = external_addresses; - } - - let is_major_syncing = match this.network_service.user_protocol_mut().sync_state() { - SyncState::Idle => false, - SyncState::Downloading => true, - }; - - this.is_major_syncing.store(is_major_syncing, Ordering::Relaxed); - - if let Some(metrics) = this.metrics.as_ref() { - metrics.network_per_sec_bytes.with_label_values(&["in"]).set(this.service.bandwidth.average_download_per_sec()); - metrics.network_per_sec_bytes.with_label_values(&["out"]).set(this.service.bandwidth.average_upload_per_sec()); - metrics.is_major_syncing.set(is_major_syncing as u64); - for (proto, num_entries) in this.network_service.num_kbuckets_entries() { - let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); - metrics.kbuckets_num_nodes.with_label_values(&[&proto]).set(num_entries as u64); - } - for (proto, num_entries) in this.network_service.num_kademlia_records() { - let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); - metrics.kademlia_records_count.with_label_values(&[&proto]).set(num_entries as u64); - } - for (proto, num_entries) in this.network_service.kademlia_records_total_size() { - let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); - metrics.kademlia_records_sizes_total.with_label_values(&[&proto]).set(num_entries as u64); - } - metrics.peers_count.set(num_connected_peers as u64); - metrics.peerset_num_discovered.set(this.network_service.user_protocol().num_discovered_peers() as u64); - metrics.peerset_num_requested.set(this.network_service.user_protocol().requested_peers().count() as u64); - metrics.pending_connections.set(Swarm::network_info(&this.network_service).num_connections_pending as u64); - } - - Poll::Pending - } + if let Some(metrics) = this.metrics.as_ref() { + metrics + .incoming_connections_errors_total + .with_label_values(&["banned"]) + .inc(); + } + } + Poll::Ready(SwarmEvent::UnknownPeerUnreachableAddr { address, error }) => { + trace!(target: "sub-libp2p", "Libp2p => UnknownPeerUnreachableAddr({}): {}", + address, error) + } + Poll::Ready(SwarmEvent::ListenerClosed { reason, addresses }) => { + warn!(target: "sub-libp2p", "Libp2p => ListenerClosed: {:?}", reason); + if let Some(metrics) = this.metrics.as_ref() { + metrics + .listeners_local_addresses + .sub(addresses.len() as u64); + } + } + Poll::Ready(SwarmEvent::ListenerError { error }) => { + trace!(target: "sub-libp2p", "Libp2p => ListenerError: {}", error); + if let Some(metrics) = this.metrics.as_ref() { + metrics.listeners_errors_total.inc(); + } + } + }; + } + + let num_connected_peers = this + .network_service + .user_protocol_mut() + .num_connected_peers(); + + // Update the variables shared with the `NetworkService`. + this.num_connected + .store(num_connected_peers, Ordering::Relaxed); + { + let external_addresses = Swarm::::external_addresses(&this.network_service) + .cloned() + .collect(); + *this.external_addresses.lock() = external_addresses; + } + + let is_major_syncing = match this.network_service.user_protocol_mut().sync_state() { + SyncState::Idle => false, + SyncState::Downloading => true, + }; + + this.is_major_syncing + .store(is_major_syncing, Ordering::Relaxed); + + if let Some(metrics) = this.metrics.as_ref() { + metrics + .network_per_sec_bytes + .with_label_values(&["in"]) + .set(this.service.bandwidth.average_download_per_sec()); + metrics + .network_per_sec_bytes + .with_label_values(&["out"]) + .set(this.service.bandwidth.average_upload_per_sec()); + metrics.is_major_syncing.set(is_major_syncing as u64); + for (proto, num_entries) in this.network_service.num_kbuckets_entries() { + let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); + metrics + .kbuckets_num_nodes + .with_label_values(&[&proto]) + .set(num_entries as u64); + } + for (proto, num_entries) in this.network_service.num_kademlia_records() { + let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); + metrics + .kademlia_records_count + .with_label_values(&[&proto]) + .set(num_entries as u64); + } + for (proto, num_entries) in this.network_service.kademlia_records_total_size() { + let proto = maybe_utf8_bytes_to_string(proto.as_bytes()); + metrics + .kademlia_records_sizes_total + .with_label_values(&[&proto]) + .set(num_entries as u64); + } + metrics.peers_count.set(num_connected_peers as u64); + metrics + .peerset_num_discovered + .set(this.network_service.user_protocol().num_discovered_peers() as u64); + metrics.peerset_num_requested.set( + this.network_service + .user_protocol() + .requested_peers() + .count() as u64, + ); + metrics + .pending_connections + .set(Swarm::network_info(&this.network_service).num_connections_pending as u64); + } + + Poll::Pending + } } -impl Unpin for NetworkWorker { -} +impl Unpin for NetworkWorker {} /// Turns bytes that are potentially UTF-8 into a reasonable representable string. /// /// Meant to be used only for debugging or metrics-reporting purposes. fn maybe_utf8_bytes_to_string(id: &[u8]) -> Cow { - if let Ok(s) = std::str::from_utf8(&id[..]) { - Cow::Borrowed(s) - } else { - Cow::Owned(format!("{:?}", id)) - } + if let Ok(s) = std::str::from_utf8(&id[..]) { + Cow::Borrowed(s) + } else { + Cow::Owned(format!("{:?}", id)) + } } /// The libp2p swarm, customized for our needs. @@ -1363,44 +1651,70 @@ type Swarm = libp2p::swarm::Swarm>; // Implementation of `import_queue::Link` trait using the available local variables. struct NetworkLink<'a, B: BlockT, H: ExHashT> { - protocol: &'a mut Swarm, + protocol: &'a mut Swarm, } impl<'a, B: BlockT, H: ExHashT> Link for NetworkLink<'a, B, H> { - fn blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> - ) { - self.protocol.user_protocol_mut().blocks_processed(imported, count, results) - } - fn justification_imported(&mut self, who: PeerId, hash: &B::Hash, number: NumberFor, success: bool) { - self.protocol.user_protocol_mut().justification_import_result(hash.clone(), number, success); - if !success { - info!("💔 Invalid justification provided by {} for #{}", who, hash); - self.protocol.user_protocol_mut().disconnect_peer(&who); - self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid justification")); - } - } - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol.user_protocol_mut().request_justification(hash, number) - } - fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - self.protocol.user_protocol_mut().request_finality_proof(hash, number) - } - fn finality_proof_imported( - &mut self, - who: PeerId, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - let success = finalization_result.is_ok(); - self.protocol.user_protocol_mut().finality_proof_import_result(request_block, finalization_result); - if !success { - info!("💔 Invalid finality proof provided by {} for #{}", who, request_block.0); - self.protocol.user_protocol_mut().disconnect_peer(&who); - self.protocol.user_protocol_mut().report_peer(who, ReputationChange::new_fatal("Invalid finality proof")); - } - } + fn blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + ) { + self.protocol + .user_protocol_mut() + .blocks_processed(imported, count, results) + } + fn justification_imported( + &mut self, + who: PeerId, + hash: &B::Hash, + number: NumberFor, + success: bool, + ) { + self.protocol + .user_protocol_mut() + .justification_import_result(hash.clone(), number, success); + if !success { + info!("💔 Invalid justification provided by {} for #{}", who, hash); + self.protocol.user_protocol_mut().disconnect_peer(&who); + self.protocol + .user_protocol_mut() + .report_peer(who, ReputationChange::new_fatal("Invalid justification")); + } + } + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + self.protocol + .user_protocol_mut() + .request_justification(hash, number) + } + fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { + self.protocol + .user_protocol_mut() + .request_finality_proof(hash, number) + } + fn finality_proof_imported( + &mut self, + who: PeerId, + request_block: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + ) { + let success = finalization_result.is_ok(); + self.protocol + .user_protocol_mut() + .finality_proof_import_result(request_block, finalization_result); + if !success { + info!( + "💔 Invalid finality proof provided by {} for #{}", + who, request_block.0 + ); + self.protocol.user_protocol_mut().disconnect_peer(&who); + self.protocol + .user_protocol_mut() + .report_peer(who, ReputationChange::new_fatal("Invalid finality proof")); + } + } } diff --git a/client/network/src/service/out_events.rs b/client/network/src/service/out_events.rs index 8f9c138095..326824c112 100644 --- a/client/network/src/service/out_events.rs +++ b/client/network/src/service/out_events.rs @@ -30,27 +30,37 @@ //! collection. //! -use crate::Event; use super::maybe_utf8_bytes_to_string; +use crate::Event; -use futures::{prelude::*, channel::mpsc, ready}; +use futures::{channel::mpsc, prelude::*, ready}; use parking_lot::Mutex; use prometheus_endpoint::{register, CounterVec, GaugeVec, Opts, PrometheusError, Registry, U64}; use std::{ - convert::TryFrom as _, - fmt, pin::Pin, sync::Arc, - task::{Context, Poll} + convert::TryFrom as _, + fmt, + pin::Pin, + sync::Arc, + task::{Context, Poll}, }; /// Creates a new channel that can be associated to a [`OutChannels`]. /// /// The name is used in Prometheus reports. pub fn channel(name: &'static str) -> (Sender, Receiver) { - let (tx, rx) = mpsc::unbounded(); - let metrics = Arc::new(Mutex::new(None)); - let tx = Sender { inner: tx, name, metrics: metrics.clone() }; - let rx = Receiver { inner: rx, name, metrics }; - (tx, rx) + let (tx, rx) = mpsc::unbounded(); + let metrics = Arc::new(Mutex::new(None)); + let tx = Sender { + inner: tx, + name, + metrics: metrics.clone(), + }; + let rx = Receiver { + inner: rx, + name, + metrics, + }; + (tx, rx) } /// Sending side of a channel. @@ -61,136 +71,135 @@ pub fn channel(name: &'static str) -> (Sender, Receiver) { /// implement the `Clone` trait e.g. in Order to not complicate the logic keeping the metrics in /// sync on drop. If someone adds a `#[derive(Clone)]` below, it is **wrong**. pub struct Sender { - inner: mpsc::UnboundedSender, - name: &'static str, - /// Clone of [`Receiver::metrics`]. - metrics: Arc>>>>, + inner: mpsc::UnboundedSender, + name: &'static str, + /// Clone of [`Receiver::metrics`]. + metrics: Arc>>>>, } impl fmt::Debug for Sender { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("Sender").finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("Sender").finish() + } } impl Drop for Sender { - fn drop(&mut self) { - let metrics = self.metrics.lock(); - if let Some(Some(metrics)) = metrics.as_ref().map(|m| &**m) { - metrics.num_channels.with_label_values(&[self.name]).dec(); - } - } + fn drop(&mut self) { + let metrics = self.metrics.lock(); + if let Some(Some(metrics)) = metrics.as_ref().map(|m| &**m) { + metrics.num_channels.with_label_values(&[self.name]).dec(); + } + } } /// Receiving side of a channel. pub struct Receiver { - inner: mpsc::UnboundedReceiver, - name: &'static str, - /// Initially contains `None`, and will be set to a value once the corresponding [`Sender`] - /// is assigned to an instance of [`OutChannels`]. - metrics: Arc>>>>, + inner: mpsc::UnboundedReceiver, + name: &'static str, + /// Initially contains `None`, and will be set to a value once the corresponding [`Sender`] + /// is assigned to an instance of [`OutChannels`]. + metrics: Arc>>>>, } impl Stream for Receiver { - type Item = Event; + type Item = Event; - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - if let Some(ev) = ready!(Pin::new(&mut self.inner).poll_next(cx)) { - let metrics = self.metrics.lock().clone(); - if let Some(Some(metrics)) = metrics.as_ref().map(|m| &**m) { - metrics.event_out(&ev, self.name); - } else { - log::warn!("Inconsistency in out_events: event happened before sender associated"); - } - Poll::Ready(Some(ev)) - } else { - Poll::Ready(None) - } - } + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + if let Some(ev) = ready!(Pin::new(&mut self.inner).poll_next(cx)) { + let metrics = self.metrics.lock().clone(); + if let Some(Some(metrics)) = metrics.as_ref().map(|m| &**m) { + metrics.event_out(&ev, self.name); + } else { + log::warn!("Inconsistency in out_events: event happened before sender associated"); + } + Poll::Ready(Some(ev)) + } else { + Poll::Ready(None) + } + } } impl fmt::Debug for Receiver { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("Receiver").finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("Receiver").finish() + } } impl Drop for Receiver { - fn drop(&mut self) { - // Empty the list to properly decrease the metrics. - while let Some(Some(_)) = self.next().now_or_never() {} - } + fn drop(&mut self) { + // Empty the list to properly decrease the metrics. + while let Some(Some(_)) = self.next().now_or_never() {} + } } /// Collection of senders. pub struct OutChannels { - event_streams: Vec, - /// The metrics we collect. A clone of this is sent to each [`Receiver`] associated with this - /// object. - metrics: Arc>, + event_streams: Vec, + /// The metrics we collect. A clone of this is sent to each [`Receiver`] associated with this + /// object. + metrics: Arc>, } impl OutChannels { - /// Creates a new empty collection of senders. - pub fn new(registry: Option<&Registry>) -> Result { - let metrics = if let Some(registry) = registry { - Some(Metrics::register(registry)?) - } else { - None - }; + /// Creates a new empty collection of senders. + pub fn new(registry: Option<&Registry>) -> Result { + let metrics = if let Some(registry) = registry { + Some(Metrics::register(registry)?) + } else { + None + }; - Ok(OutChannels { - event_streams: Vec::new(), - metrics: Arc::new(metrics), - }) - } + Ok(OutChannels { + event_streams: Vec::new(), + metrics: Arc::new(metrics), + }) + } - /// Adds a new [`Sender`] to the collection. - pub fn push(&mut self, sender: Sender) { - let mut metrics = sender.metrics.lock(); - debug_assert!(metrics.is_none()); - *metrics = Some(self.metrics.clone()); - drop(metrics); + /// Adds a new [`Sender`] to the collection. + pub fn push(&mut self, sender: Sender) { + let mut metrics = sender.metrics.lock(); + debug_assert!(metrics.is_none()); + *metrics = Some(self.metrics.clone()); + drop(metrics); - if let Some(metrics) = &*self.metrics { - metrics.num_channels.with_label_values(&[sender.name]).inc(); - } + if let Some(metrics) = &*self.metrics { + metrics.num_channels.with_label_values(&[sender.name]).inc(); + } - self.event_streams.push(sender); - } + self.event_streams.push(sender); + } - /// Sends an event. - pub fn send(&mut self, event: Event) { - self.event_streams.retain(|sender| { - sender.inner.unbounded_send(event.clone()).is_ok() - }); + /// Sends an event. + pub fn send(&mut self, event: Event) { + self.event_streams + .retain(|sender| sender.inner.unbounded_send(event.clone()).is_ok()); - if let Some(metrics) = &*self.metrics { - for ev in &self.event_streams { - metrics.event_in(&event, 1, ev.name); - } - } - } + if let Some(metrics) = &*self.metrics { + for ev in &self.event_streams { + metrics.event_in(&event, 1, ev.name); + } + } + } } impl fmt::Debug for OutChannels { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_struct("OutChannels") - .field("num_channels", &self.event_streams.len()) - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_struct("OutChannels") + .field("num_channels", &self.event_streams.len()) + .finish() + } } struct Metrics { - // This list is ordered alphabetically - events_total: CounterVec, - notifications_sizes: CounterVec, - num_channels: GaugeVec, + // This list is ordered alphabetically + events_total: CounterVec, + notifications_sizes: CounterVec, + num_channels: GaugeVec, } impl Metrics { - fn register(registry: &Registry) -> Result { - Ok(Self { + fn register(registry: &Registry) -> Result { + Ok(Self { events_total: register(CounterVec::new( Opts::new( "sub_libp2p_out_events_events_total", @@ -215,65 +224,75 @@ impl Metrics { &["name"] )?, registry)?, }) - } + } - fn event_in(&self, event: &Event, num: u64, name: &str) { - match event { - Event::Dht(_) => { - self.events_total - .with_label_values(&["dht", "sent", name]) - .inc_by(num); - } - Event::NotificationStreamOpened { engine_id, .. } => { - self.events_total - .with_label_values(&[&format!("notif-open-{:?}", engine_id), "sent", name]) - .inc_by(num); - }, - Event::NotificationStreamClosed { engine_id, .. } => { - self.events_total - .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "sent", name]) - .inc_by(num); - }, - Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { - self.events_total - .with_label_values(&[&format!("notif-{:?}", engine_id), "sent", name]) - .inc_by(num); - self.notifications_sizes - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "sent", name]) - .inc_by(num.saturating_mul(u64::try_from(message.len()).unwrap_or(u64::max_value()))); - } - }, - } - } + fn event_in(&self, event: &Event, num: u64, name: &str) { + match event { + Event::Dht(_) => { + self.events_total + .with_label_values(&["dht", "sent", name]) + .inc_by(num); + } + Event::NotificationStreamOpened { engine_id, .. } => { + self.events_total + .with_label_values(&[&format!("notif-open-{:?}", engine_id), "sent", name]) + .inc_by(num); + } + Event::NotificationStreamClosed { engine_id, .. } => { + self.events_total + .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "sent", name]) + .inc_by(num); + } + Event::NotificationsReceived { messages, .. } => { + for (engine_id, message) in messages { + self.events_total + .with_label_values(&[&format!("notif-{:?}", engine_id), "sent", name]) + .inc_by(num); + self.notifications_sizes + .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "sent", name]) + .inc_by(num.saturating_mul( + u64::try_from(message.len()).unwrap_or(u64::max_value()), + )); + } + } + } + } - fn event_out(&self, event: &Event, name: &str) { - match event { - Event::Dht(_) => { - self.events_total - .with_label_values(&["dht", "received", name]) - .inc(); - } - Event::NotificationStreamOpened { engine_id, .. } => { - self.events_total - .with_label_values(&[&format!("notif-open-{:?}", engine_id), "received", name]) - .inc(); - }, - Event::NotificationStreamClosed { engine_id, .. } => { - self.events_total - .with_label_values(&[&format!("notif-closed-{:?}", engine_id), "received", name]) - .inc(); - }, - Event::NotificationsReceived { messages, .. } => { - for (engine_id, message) in messages { - self.events_total - .with_label_values(&[&format!("notif-{:?}", engine_id), "received", name]) - .inc(); - self.notifications_sizes - .with_label_values(&[&maybe_utf8_bytes_to_string(engine_id), "received", name]) - .inc_by(u64::try_from(message.len()).unwrap_or(u64::max_value())); - } - }, - } - } + fn event_out(&self, event: &Event, name: &str) { + match event { + Event::Dht(_) => { + self.events_total + .with_label_values(&["dht", "received", name]) + .inc(); + } + Event::NotificationStreamOpened { engine_id, .. } => { + self.events_total + .with_label_values(&[&format!("notif-open-{:?}", engine_id), "received", name]) + .inc(); + } + Event::NotificationStreamClosed { engine_id, .. } => { + self.events_total + .with_label_values(&[ + &format!("notif-closed-{:?}", engine_id), + "received", + name, + ]) + .inc(); + } + Event::NotificationsReceived { messages, .. } => { + for (engine_id, message) in messages { + self.events_total + .with_label_values(&[&format!("notif-{:?}", engine_id), "received", name]) + .inc(); + self.notifications_sizes + .with_label_values(&[ + &maybe_utf8_bytes_to_string(engine_id), + "received", + name, + ]) + .inc_by(u64::try_from(message.len()).unwrap_or(u64::max_value())); + } + } + } + } } diff --git a/client/network/src/service/tests.rs b/client/network/src/service/tests.rs index a60b32efb4..394846f6bd 100644 --- a/client/network/src/service/tests.rs +++ b/client/network/src/service/tests.rs @@ -22,8 +22,8 @@ use std::{sync::Arc, time::Duration}; use substrate_test_runtime_client::{TestClientBuilder, TestClientBuilderExt as _}; type TestNetworkService = NetworkService< - substrate_test_runtime_client::runtime::Block, - substrate_test_runtime_client::runtime::Hash, + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::Hash, >; /// Builds a full node to be used for testing. Returns the node service and its associated events @@ -31,240 +31,275 @@ type TestNetworkService = NetworkService< /// /// > **Note**: We return the events stream in order to not possibly lose events between the /// > construction of the service and the moment the events stream is grabbed. -fn build_test_full_node(config: config::NetworkConfiguration) - -> (Arc, impl Stream) -{ - let client = Arc::new( - TestClientBuilder::with_default_backend() - .build_with_longest_chain() - .0, - ); +fn build_test_full_node( + config: config::NetworkConfiguration, +) -> (Arc, impl Stream) { + let client = Arc::new( + TestClientBuilder::with_default_backend() + .build_with_longest_chain() + .0, + ); - #[derive(Clone)] - struct PassThroughVerifier(bool); - impl sp_consensus::import_queue::Verifier for PassThroughVerifier { - fn verify( - &mut self, - origin: sp_consensus::BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, - ) -> Result< - ( - sp_consensus::BlockImportParams, - Option)>>, - ), - String, - > { - let maybe_keys = header - .digest() - .log(|l| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) - .or_else(|| { - l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"babe")) - }) - }) - .map(|blob| { - vec![( - sp_blockchain::well_known_cache_keys::AUTHORITIES, - blob.to_vec(), - )] - }); + #[derive(Clone)] + struct PassThroughVerifier(bool); + impl sp_consensus::import_queue::Verifier for PassThroughVerifier { + fn verify( + &mut self, + origin: sp_consensus::BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result< + ( + sp_consensus::BlockImportParams, + Option)>>, + ), + String, + > { + let maybe_keys = header + .digest() + .log(|l| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| { + l.try_as_raw(sp_runtime::generic::OpaqueDigestItemId::Consensus( + b"babe", + )) + }) + }) + .map(|blob| { + vec![( + sp_blockchain::well_known_cache_keys::AUTHORITIES, + blob.to_vec(), + )] + }); - let mut import = sp_consensus::BlockImportParams::new(origin, header); - import.body = body; - import.finalized = self.0; - import.justification = justification; - import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); - Ok((import, maybe_keys)) - } - } + let mut import = sp_consensus::BlockImportParams::new(origin, header); + import.body = body; + import.finalized = self.0; + import.justification = justification; + import.fork_choice = Some(sp_consensus::ForkChoiceStrategy::LongestChain); + Ok((import, maybe_keys)) + } + } - let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( - PassThroughVerifier(false), - Box::new(client.clone()), - None, - None, - )); + let import_queue = Box::new(sp_consensus::import_queue::BasicQueue::new( + PassThroughVerifier(false), + Box::new(client.clone()), + None, + None, + )); - let worker = NetworkWorker::new(config::Params { - role: config::Role::Full, - executor: None, - network_config: config, - chain: client.clone(), - finality_proof_provider: None, - finality_proof_request_builder: None, - on_demand: None, - transaction_pool: Arc::new(crate::service::EmptyTransactionPool), - protocol_id: config::ProtocolId::from(&b"/test-protocol-name"[..]), - import_queue, - block_announce_validator: Box::new( - sp_consensus::block_validation::DefaultBlockAnnounceValidator::new(client.clone()), - ), - metrics_registry: None, - }) - .unwrap(); + let worker = NetworkWorker::new(config::Params { + role: config::Role::Full, + executor: None, + network_config: config, + chain: client.clone(), + finality_proof_provider: None, + finality_proof_request_builder: None, + on_demand: None, + transaction_pool: Arc::new(crate::service::EmptyTransactionPool), + protocol_id: config::ProtocolId::from(&b"/test-protocol-name"[..]), + import_queue, + block_announce_validator: Box::new( + sp_consensus::block_validation::DefaultBlockAnnounceValidator::new(client.clone()), + ), + metrics_registry: None, + }) + .unwrap(); - let service = worker.service().clone(); - let event_stream = service.event_stream("test"); + let service = worker.service().clone(); + let event_stream = service.event_stream("test"); - async_std::task::spawn(async move { - futures::pin_mut!(worker); - let _ = worker.await; - }); + async_std::task::spawn(async move { + futures::pin_mut!(worker); + let _ = worker.await; + }); - (service, event_stream) + (service, event_stream) } const ENGINE_ID: sp_runtime::ConsensusEngineId = *b"foo\0"; /// Builds two nodes and their associated events stream. /// The nodes are connected together and have the `ENGINE_ID` protocol registered. -fn build_nodes_one_proto() - -> (Arc, impl Stream, Arc, impl Stream) -{ - let listen_addr = config::build_multiaddr![Memory(rand::random::())]; +fn build_nodes_one_proto() -> ( + Arc, + impl Stream, + Arc, + impl Stream, +) { + let listen_addr = config::build_multiaddr![Memory(rand::random::())]; - let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], - listen_addresses: vec![listen_addr.clone()], - transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() - }); + let (node1, events_stream1) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + listen_addresses: vec![listen_addr.clone()], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); - let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { - notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], - reserved_nodes: vec![config::MultiaddrWithPeerId { - multiaddr: listen_addr, - peer_id: node1.local_peer_id().clone(), - }], - transport: config::TransportConfig::MemoryOnly, - .. config::NetworkConfiguration::new_local() - }); + let (node2, events_stream2) = build_test_full_node(config::NetworkConfiguration { + notifications_protocols: vec![(ENGINE_ID, From::from(&b"/foo"[..]))], + reserved_nodes: vec![config::MultiaddrWithPeerId { + multiaddr: listen_addr, + peer_id: node1.local_peer_id().clone(), + }], + transport: config::TransportConfig::MemoryOnly, + ..config::NetworkConfiguration::new_local() + }); - (node1, events_stream1, node2, events_stream2) + (node1, events_stream1, node2, events_stream2) } #[test] fn notifications_state_consistent() { - // Runs two nodes and ensures that events are propagated out of the API in a consistent - // correct order, which means no notification received on a closed substream. + // Runs two nodes and ensures that events are propagated out of the API in a consistent + // correct order, which means no notification received on a closed substream. - let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); + let (node1, mut events_stream1, node2, mut events_stream2) = build_nodes_one_proto(); - // Write some initial notifications that shouldn't get through. - for _ in 0..(rand::random::() % 5) { - node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); - } - for _ in 0..(rand::random::() % 5) { - node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); - } + // Write some initial notifications that shouldn't get through. + for _ in 0..(rand::random::() % 5) { + node1.write_notification( + node2.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec(), + ); + } + for _ in 0..(rand::random::() % 5) { + node2.write_notification( + node1.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec(), + ); + } - async_std::task::block_on(async move { - // True if we have an active substream from node1 to node2. - let mut node1_to_node2_open = false; - // True if we have an active substream from node2 to node1. - let mut node2_to_node1_open = false; - // We stop the test after a certain number of iterations. - let mut iterations = 0; - // Safe guard because we don't want the test to pass if no substream has been open. - let mut something_happened = false; + async_std::task::block_on(async move { + // True if we have an active substream from node1 to node2. + let mut node1_to_node2_open = false; + // True if we have an active substream from node2 to node1. + let mut node2_to_node1_open = false; + // We stop the test after a certain number of iterations. + let mut iterations = 0; + // Safe guard because we don't want the test to pass if no substream has been open. + let mut something_happened = false; - loop { - iterations += 1; - if iterations >= 1_000 { - assert!(something_happened); - break; - } + loop { + iterations += 1; + if iterations >= 1_000 { + assert!(something_happened); + break; + } - // Start by sending a notification from node1 to node2 and vice-versa. Part of the - // test consists in ensuring that notifications get ignored if the stream isn't open. - if rand::random::() % 5 >= 3 { - node1.write_notification(node2.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); - } - if rand::random::() % 5 >= 3 { - node2.write_notification(node1.local_peer_id().clone(), ENGINE_ID, b"hello world".to_vec()); - } + // Start by sending a notification from node1 to node2 and vice-versa. Part of the + // test consists in ensuring that notifications get ignored if the stream isn't open. + if rand::random::() % 5 >= 3 { + node1.write_notification( + node2.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec(), + ); + } + if rand::random::() % 5 >= 3 { + node2.write_notification( + node1.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec(), + ); + } - // Also randomly disconnect the two nodes from time to time. - if rand::random::() % 20 == 0 { - node1.disconnect_peer(node2.local_peer_id().clone()); - } - if rand::random::() % 20 == 0 { - node2.disconnect_peer(node1.local_peer_id().clone()); - } + // Also randomly disconnect the two nodes from time to time. + if rand::random::() % 20 == 0 { + node1.disconnect_peer(node2.local_peer_id().clone()); + } + if rand::random::() % 20 == 0 { + node2.disconnect_peer(node1.local_peer_id().clone()); + } - // Grab next event from either `events_stream1` or `events_stream2`. - let next_event = { - let next1 = events_stream1.next(); - let next2 = events_stream2.next(); - // We also await on a small timer, otherwise it is possible for the test to wait - // forever while nothing at all happens on the network. - let continue_test = futures_timer::Delay::new(Duration::from_millis(20)); - match future::select(future::select(next1, next2), continue_test).await { - future::Either::Left((future::Either::Left((Some(ev), _)), _)) => - future::Either::Left(ev), - future::Either::Left((future::Either::Right((Some(ev), _)), _)) => - future::Either::Right(ev), - future::Either::Right(_) => continue, - _ => break, - } - }; + // Grab next event from either `events_stream1` or `events_stream2`. + let next_event = { + let next1 = events_stream1.next(); + let next2 = events_stream2.next(); + // We also await on a small timer, otherwise it is possible for the test to wait + // forever while nothing at all happens on the network. + let continue_test = futures_timer::Delay::new(Duration::from_millis(20)); + match future::select(future::select(next1, next2), continue_test).await { + future::Either::Left((future::Either::Left((Some(ev), _)), _)) => { + future::Either::Left(ev) + } + future::Either::Left((future::Either::Right((Some(ev), _)), _)) => { + future::Either::Right(ev) + } + future::Either::Right(_) => continue, + _ => break, + } + }; - match next_event { - future::Either::Left(Event::NotificationStreamOpened { remote, engine_id, .. }) => { - something_happened = true; - assert!(!node1_to_node2_open); - node1_to_node2_open = true; - assert_eq!(remote, *node2.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); - } - future::Either::Right(Event::NotificationStreamOpened { remote, engine_id, .. }) => { - something_happened = true; - assert!(!node2_to_node1_open); - node2_to_node1_open = true; - assert_eq!(remote, *node1.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); - } - future::Either::Left(Event::NotificationStreamClosed { remote, engine_id, .. }) => { - assert!(node1_to_node2_open); - node1_to_node2_open = false; - assert_eq!(remote, *node2.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); - } - future::Either::Right(Event::NotificationStreamClosed { remote, engine_id, .. }) => { - assert!(node2_to_node1_open); - node2_to_node1_open = false; - assert_eq!(remote, *node1.local_peer_id()); - assert_eq!(engine_id, ENGINE_ID); - } - future::Either::Left(Event::NotificationsReceived { remote, .. }) => { - assert!(node1_to_node2_open); - assert_eq!(remote, *node2.local_peer_id()); - if rand::random::() % 5 >= 4 { - node1.write_notification( - node2.local_peer_id().clone(), - ENGINE_ID, - b"hello world".to_vec() - ); - } - } - future::Either::Right(Event::NotificationsReceived { remote, .. }) => { - assert!(node2_to_node1_open); - assert_eq!(remote, *node1.local_peer_id()); - if rand::random::() % 5 >= 4 { - node2.write_notification( - node1.local_peer_id().clone(), - ENGINE_ID, - b"hello world".to_vec() - ); - } - } + match next_event { + future::Either::Left(Event::NotificationStreamOpened { + remote, engine_id, .. + }) => { + something_happened = true; + assert!(!node1_to_node2_open); + node1_to_node2_open = true; + assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Right(Event::NotificationStreamOpened { + remote, + engine_id, + .. + }) => { + something_happened = true; + assert!(!node2_to_node1_open); + node2_to_node1_open = true; + assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Left(Event::NotificationStreamClosed { + remote, engine_id, .. + }) => { + assert!(node1_to_node2_open); + node1_to_node2_open = false; + assert_eq!(remote, *node2.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Right(Event::NotificationStreamClosed { + remote, + engine_id, + .. + }) => { + assert!(node2_to_node1_open); + node2_to_node1_open = false; + assert_eq!(remote, *node1.local_peer_id()); + assert_eq!(engine_id, ENGINE_ID); + } + future::Either::Left(Event::NotificationsReceived { remote, .. }) => { + assert!(node1_to_node2_open); + assert_eq!(remote, *node2.local_peer_id()); + if rand::random::() % 5 >= 4 { + node1.write_notification( + node2.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec(), + ); + } + } + future::Either::Right(Event::NotificationsReceived { remote, .. }) => { + assert!(node2_to_node1_open); + assert_eq!(remote, *node1.local_peer_id()); + if rand::random::() % 5 >= 4 { + node2.write_notification( + node1.local_peer_id().clone(), + ENGINE_ID, + b"hello world".to_vec(), + ); + } + } - // Add new events here. - future::Either::Left(Event::Dht(_)) => {} - future::Either::Right(Event::Dht(_)) => {} - }; - } - }); + // Add new events here. + future::Either::Left(Event::Dht(_)) => {} + future::Either::Right(Event::Dht(_)) => {} + }; + } + }); } diff --git a/client/network/src/transport.rs b/client/network/src/transport.rs index 75ee2d5db8..2ae1294c30 100644 --- a/client/network/src/transport.rs +++ b/client/network/src/transport.rs @@ -15,13 +15,15 @@ // along with Substrate. If not, see . use futures::prelude::*; +use libp2p::core::{ + self, muxing::StreamMuxerBox, transport::boxed::Boxed, transport::OptionalTransport, upgrade, +}; use libp2p::{ - InboundUpgradeExt, OutboundUpgradeExt, PeerId, Transport, - mplex, identity, bandwidth, wasm_ext, noise + bandwidth, identity, mplex, noise, wasm_ext, InboundUpgradeExt, OutboundUpgradeExt, PeerId, + Transport, }; #[cfg(not(target_os = "unknown"))] -use libp2p::{tcp, dns, websocket}; -use libp2p::core::{self, upgrade, transport::boxed::Boxed, transport::OptionalTransport, muxing::StreamMuxerBox}; +use libp2p::{dns, tcp, websocket}; use std::{io, sync::Arc, time::Duration, usize}; pub use self::bandwidth::BandwidthSinks; @@ -34,92 +36,99 @@ pub use self::bandwidth::BandwidthSinks; /// Returns a `BandwidthSinks` object that allows querying the average bandwidth produced by all /// the connections spawned with this transport. pub fn build_transport( - keypair: identity::Keypair, - memory_only: bool, - wasm_external_transport: Option, - use_yamux_flow_control: bool -) -> (Boxed<(PeerId, StreamMuxerBox), io::Error>, Arc) { - // Build configuration objects for encryption mechanisms. - let noise_config = { - let noise_keypair = noise::Keypair::new().into_authentic(&keypair) + keypair: identity::Keypair, + memory_only: bool, + wasm_external_transport: Option, + use_yamux_flow_control: bool, +) -> ( + Boxed<(PeerId, StreamMuxerBox), io::Error>, + Arc, +) { + // Build configuration objects for encryption mechanisms. + let noise_config = + { + let noise_keypair = noise::Keypair::new().into_authentic(&keypair) // For more information about this panic, see in "On the Importance of Checking // Cryptographic Protocols for Faults" by Dan Boneh, Richard A. DeMillo, // and Richard J. Lipton. .expect("can only fail in case of a hardware bug; since this signing is performed only \ once and at initialization, we're taking the bet that the inconvenience of a very \ rare panic here is basically zero"); - noise::NoiseConfig::ix(noise_keypair) - }; - - // Build configuration objects for multiplexing mechanisms. - let mut mplex_config = mplex::MplexConfig::new(); - mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); - mplex_config.max_buffer_len(usize::MAX); - - let mut yamux_config = libp2p::yamux::Config::default(); - yamux_config.set_lazy_open(true); // Only set SYN flag on first data frame sent to the remote. - - if use_yamux_flow_control { - // Enable proper flow-control: window updates are only sent when - // buffered data has been consumed. - yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::OnRead); - } - - // Build the base layer of the transport. - let transport = if let Some(t) = wasm_external_transport { - OptionalTransport::some(t) - } else { - OptionalTransport::none() - }; - #[cfg(not(target_os = "unknown"))] - let transport = transport.or_transport(if !memory_only { - let desktop_trans = tcp::TcpConfig::new(); - let desktop_trans = websocket::WsConfig::new(desktop_trans.clone()) - .or_transport(desktop_trans); - OptionalTransport::some(if let Ok(dns) = dns::DnsConfig::new(desktop_trans.clone()) { - dns.boxed() - } else { - desktop_trans.map_err(dns::DnsErr::Underlying).boxed() - }) - } else { - OptionalTransport::none() - }); - - let transport = transport.or_transport(if memory_only { - OptionalTransport::some(libp2p::core::transport::MemoryTransport::default()) - } else { - OptionalTransport::none() - }); - - let (transport, sinks) = bandwidth::BandwidthLogging::new(transport, Duration::from_secs(5)); - - // Encryption - let transport = transport.and_then(move |stream, endpoint| { - core::upgrade::apply(stream, noise_config, endpoint, upgrade::Version::V1) - .and_then(|(remote_id, out)| async move { - let remote_key = match remote_id { - noise::RemoteIdentity::IdentityKey(key) => key, - _ => return Err(upgrade::UpgradeError::Apply(noise::NoiseError::InvalidKey)) - }; - Ok((out, remote_key.into_peer_id())) - }) - }); - - // Multiplexing - let transport = transport.and_then(move |(stream, peer_id), endpoint| { - let peer_id2 = peer_id.clone(); - let upgrade = core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) - .map_inbound(move |muxer| (peer_id, muxer)) - .map_outbound(move |muxer| (peer_id2, muxer)); - - core::upgrade::apply(stream, upgrade, endpoint, upgrade::Version::V1) - .map_ok(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) - }); - - let transport = transport - .timeout(Duration::from_secs(20)) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .boxed(); - - (transport, sinks) + noise::NoiseConfig::ix(noise_keypair) + }; + + // Build configuration objects for multiplexing mechanisms. + let mut mplex_config = mplex::MplexConfig::new(); + mplex_config.max_buffer_len_behaviour(mplex::MaxBufferBehaviour::Block); + mplex_config.max_buffer_len(usize::MAX); + + let mut yamux_config = libp2p::yamux::Config::default(); + yamux_config.set_lazy_open(true); // Only set SYN flag on first data frame sent to the remote. + + if use_yamux_flow_control { + // Enable proper flow-control: window updates are only sent when + // buffered data has been consumed. + yamux_config.set_window_update_mode(libp2p::yamux::WindowUpdateMode::OnRead); + } + + // Build the base layer of the transport. + let transport = if let Some(t) = wasm_external_transport { + OptionalTransport::some(t) + } else { + OptionalTransport::none() + }; + #[cfg(not(target_os = "unknown"))] + let transport = transport.or_transport(if !memory_only { + let desktop_trans = tcp::TcpConfig::new(); + let desktop_trans = + websocket::WsConfig::new(desktop_trans.clone()).or_transport(desktop_trans); + OptionalTransport::some( + if let Ok(dns) = dns::DnsConfig::new(desktop_trans.clone()) { + dns.boxed() + } else { + desktop_trans.map_err(dns::DnsErr::Underlying).boxed() + }, + ) + } else { + OptionalTransport::none() + }); + + let transport = transport.or_transport(if memory_only { + OptionalTransport::some(libp2p::core::transport::MemoryTransport::default()) + } else { + OptionalTransport::none() + }); + + let (transport, sinks) = bandwidth::BandwidthLogging::new(transport, Duration::from_secs(5)); + + // Encryption + let transport = transport.and_then(move |stream, endpoint| { + core::upgrade::apply(stream, noise_config, endpoint, upgrade::Version::V1).and_then( + |(remote_id, out)| async move { + let remote_key = match remote_id { + noise::RemoteIdentity::IdentityKey(key) => key, + _ => return Err(upgrade::UpgradeError::Apply(noise::NoiseError::InvalidKey)), + }; + Ok((out, remote_key.into_peer_id())) + }, + ) + }); + + // Multiplexing + let transport = transport.and_then(move |(stream, peer_id), endpoint| { + let peer_id2 = peer_id.clone(); + let upgrade = core::upgrade::SelectUpgrade::new(yamux_config, mplex_config) + .map_inbound(move |muxer| (peer_id, muxer)) + .map_outbound(move |muxer| (peer_id2, muxer)); + + core::upgrade::apply(stream, upgrade, endpoint, upgrade::Version::V1) + .map_ok(|(id, muxer)| (id, core::muxing::StreamMuxerBox::new(muxer))) + }); + + let transport = transport + .timeout(Duration::from_secs(20)) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .boxed(); + + (transport, sinks) } diff --git a/client/network/src/utils.rs b/client/network/src/utils.rs index f13505d012..7cd21957e9 100644 --- a/client/network/src/utils.rs +++ b/client/network/src/utils.rs @@ -14,12 +14,10 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::time::Duration; -use futures::{FutureExt, Stream, StreamExt, stream::unfold}; +use futures::{stream::unfold, FutureExt, Stream, StreamExt}; use futures_timer::Delay; +use std::time::Duration; -pub fn interval(duration: Duration) -> impl Stream + Unpin { - unfold((), move |_| { - Delay::new(duration).map(|_| Some(((), ()))) - }).map(drop) +pub fn interval(duration: Duration) -> impl Stream + Unpin { + unfold((), move |_| Delay::new(duration).map(|_| Some(((), ())))).map(drop) } diff --git a/client/network/test/src/block_import.rs b/client/network/test/src/block_import.rs index aa6d275141..4a123a97f1 100644 --- a/client/network/test/src/block_import.rs +++ b/client/network/test/src/block_import.rs @@ -16,75 +16,106 @@ //! Testing block import logic. -use sp_consensus::ImportedAux; +use super::*; +use sc_block_builder::BlockBuilderProvider; use sp_consensus::import_queue::{ - import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, + import_single_block, BasicQueue, BlockImportError, BlockImportResult, IncomingBlock, }; -use substrate_test_runtime_client::{self, prelude::*}; -use substrate_test_runtime_client::runtime::{Block, Hash}; +use sp_consensus::ImportedAux; use sp_runtime::generic::BlockId; -use sc_block_builder::BlockBuilderProvider; -use super::*; +use substrate_test_runtime_client::runtime::{Block, Hash}; +use substrate_test_runtime_client::{self, prelude::*}; fn prepare_good_block() -> (TestClient, Hash, u64, PeerId, IncomingBlock) { - let mut client = substrate_test_runtime_client::new(); - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::File, block).unwrap(); + let mut client = substrate_test_runtime_client::new(); + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::File, block).unwrap(); - let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); - let header = client.header(&BlockId::Number(1)).unwrap(); - let justification = client.justification(&BlockId::Number(1)).unwrap(); - let peer_id = PeerId::random(); - (client, hash, number, peer_id.clone(), IncomingBlock { - hash, - header, - body: Some(Vec::new()), - justification, - origin: Some(peer_id.clone()), - allow_missing_state: false, - import_existing: false, - }) + let (hash, number) = (client.block_hash(1).unwrap().unwrap(), 1); + let header = client.header(&BlockId::Number(1)).unwrap(); + let justification = client.justification(&BlockId::Number(1)).unwrap(); + let peer_id = PeerId::random(); + ( + client, + hash, + number, + peer_id.clone(), + IncomingBlock { + hash, + header, + body: Some(Vec::new()), + justification, + origin: Some(peer_id.clone()), + allow_missing_state: false, + import_existing: false, + }, + ) } #[test] fn import_single_good_block_works() { - let (_, _hash, number, peer_id, block) = prepare_good_block(); + let (_, _hash, number, peer_id, block) = prepare_good_block(); - let mut expected_aux = ImportedAux::default(); - expected_aux.is_new_best = true; + let mut expected_aux = ImportedAux::default(); + expected_aux.is_new_best = true; - match import_single_block(&mut substrate_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier(true)) { - Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) - if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} - r @ _ => panic!("{:?}", r) - } + match import_single_block( + &mut substrate_test_runtime_client::new(), + BlockOrigin::File, + block, + &mut PassThroughVerifier(true), + ) { + Ok(BlockImportResult::ImportedUnknown(ref num, ref aux, ref org)) + if *num == number && *aux == expected_aux && *org == Some(peer_id) => {} + r @ _ => panic!("{:?}", r), + } } #[test] fn import_single_good_known_block_is_ignored() { - let (mut client, _hash, number, _, block) = prepare_good_block(); - match import_single_block(&mut client, BlockOrigin::File, block, &mut PassThroughVerifier(true)) { - Ok(BlockImportResult::ImportedKnown(ref n)) if *n == number => {} - _ => panic!() - } + let (mut client, _hash, number, _, block) = prepare_good_block(); + match import_single_block( + &mut client, + BlockOrigin::File, + block, + &mut PassThroughVerifier(true), + ) { + Ok(BlockImportResult::ImportedKnown(ref n)) if *n == number => {} + _ => panic!(), + } } #[test] fn import_single_good_block_without_header_fails() { - let (_, _, _, peer_id, mut block) = prepare_good_block(); - block.header = None; - match import_single_block(&mut substrate_test_runtime_client::new(), BlockOrigin::File, block, &mut PassThroughVerifier(true)) { - Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} - _ => panic!() - } + let (_, _, _, peer_id, mut block) = prepare_good_block(); + block.header = None; + match import_single_block( + &mut substrate_test_runtime_client::new(), + BlockOrigin::File, + block, + &mut PassThroughVerifier(true), + ) { + Err(BlockImportError::IncompleteHeader(ref org)) if *org == Some(peer_id) => {} + _ => panic!(), + } } #[test] fn async_import_queue_drops() { - // Perform this test multiple times since it exhibits non-deterministic behavior. - for _ in 0..100 { - let verifier = PassThroughVerifier(true); - let queue = BasicQueue::new(verifier, Box::new(substrate_test_runtime_client::new()), None, None); - drop(queue); - } + // Perform this test multiple times since it exhibits non-deterministic behavior. + for _ in 0..100 { + let verifier = PassThroughVerifier(true); + let queue = BasicQueue::new( + verifier, + Box::new(substrate_test_runtime_client::new()), + None, + None, + ); + drop(queue); + } } diff --git a/client/network/test/src/lib.rs b/client/network/test/src/lib.rs index 7b070f8041..f52da6c607 100644 --- a/client/network/test/src/lib.rs +++ b/client/network/test/src/lib.rs @@ -21,33 +21,46 @@ mod block_import; #[cfg(test)] mod sync; -use std::{collections::HashMap, pin::Pin, sync::Arc, marker::PhantomData, task::{Poll, Context as FutureContext}}; +use std::{ + collections::HashMap, + marker::PhantomData, + pin::Pin, + sync::Arc, + task::{Context as FutureContext, Poll}, +}; +use futures::prelude::*; use libp2p::build_multiaddr; +use libp2p::PeerId; use log::trace; -use sc_network::config::FinalityProofProvider; -use sp_blockchain::{ - Result as ClientResult, well_known_cache_keys::{self, Id as CacheKeyId}, Info as BlockchainInfo, -}; -use sc_client_api::{BlockchainEvents, BlockImportNotification, FinalityNotifications, ImportNotifications, FinalityNotification, backend::{TransactionFor, AuxStore, Backend, Finalizer}, BlockBackend}; +use parking_lot::Mutex; use sc_block_builder::{BlockBuilder, BlockBuilderProvider}; -use sc_client::LongestChain; use sc_client::blockchain::HeaderBackend; +use sc_client::LongestChain; +use sc_client_api::{ + backend::{AuxStore, Backend, Finalizer, TransactionFor}, + BlockBackend, BlockImportNotification, BlockchainEvents, FinalityNotification, + FinalityNotifications, ImportNotifications, +}; +use sc_network::config::FinalityProofProvider; +use sc_network::config::ProtocolConfig; use sc_network::config::Role; +use sc_network::config::{BoxFinalityProofRequestBuilder, NetworkConfiguration, TransportConfig}; +use sc_network::{config::ProtocolId, NetworkService, NetworkWorker}; +use sp_blockchain::{ + well_known_cache_keys::{self, Id as CacheKeyId}, + Info as BlockchainInfo, Result as ClientResult, +}; +use sp_consensus::block_import::{BlockImport, ImportResult}; use sp_consensus::block_validation::DefaultBlockAnnounceValidator; use sp_consensus::import_queue::{ - BasicQueue, BoxJustificationImport, Verifier, BoxFinalityProofImport, + BasicQueue, BoxFinalityProofImport, BoxJustificationImport, Verifier, }; -use sp_consensus::block_import::{BlockImport, ImportResult}; use sp_consensus::Error as ConsensusError; -use sp_consensus::{BlockOrigin, ForkChoiceStrategy, BlockImportParams, BlockCheckParams, JustificationImport}; -use futures::prelude::*; -use sc_network::{NetworkWorker, NetworkService, config::ProtocolId}; -use sc_network::config::{NetworkConfiguration, TransportConfig, BoxFinalityProofRequestBuilder}; -use libp2p::PeerId; -use parking_lot::Mutex; +use sp_consensus::{ + BlockCheckParams, BlockImportParams, BlockOrigin, ForkChoiceStrategy, JustificationImport, +}; use sp_core::H256; -use sc_network::config::ProtocolConfig; use sp_runtime::generic::{BlockId, OpaqueDigestItemId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use sp_runtime::Justification; @@ -66,325 +79,395 @@ pub struct PassThroughVerifier(pub bool); /// This `Verifier` accepts all data as valid. impl Verifier for PassThroughVerifier { - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option> - ) -> Result<(BlockImportParams, Option)>>), String> { - let maybe_keys = header.digest() - .log(|l| l.try_as_raw(OpaqueDigestItemId::Consensus(b"aura")) - .or_else(|| l.try_as_raw(OpaqueDigestItemId::Consensus(b"babe"))) - ) - .map(|blob| vec![(well_known_cache_keys::AUTHORITIES, blob.to_vec())]); - let mut import = BlockImportParams::new(origin, header); - import.body = body; - import.finalized = self.0; - import.justification = justification; - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - Ok((import, maybe_keys)) - } + fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + let maybe_keys = header + .digest() + .log(|l| { + l.try_as_raw(OpaqueDigestItemId::Consensus(b"aura")) + .or_else(|| l.try_as_raw(OpaqueDigestItemId::Consensus(b"babe"))) + }) + .map(|blob| vec![(well_known_cache_keys::AUTHORITIES, blob.to_vec())]); + let mut import = BlockImportParams::new(origin, header); + import.body = body; + import.finalized = self.0; + import.justification = justification; + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + Ok((import, maybe_keys)) + } } -pub type PeersFullClient = - sc_client::Client; -pub type PeersLightClient = - sc_client::Client; +pub type PeersFullClient = sc_client::Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + Block, + substrate_test_runtime_client::runtime::RuntimeApi, +>; +pub type PeersLightClient = sc_client::Client< + substrate_test_runtime_client::LightBackend, + substrate_test_runtime_client::LightExecutor, + Block, + substrate_test_runtime_client::runtime::RuntimeApi, +>; #[derive(Clone)] pub enum PeersClient { - Full(Arc, Arc), - Light(Arc, Arc), + Full( + Arc, + Arc, + ), + Light( + Arc, + Arc, + ), } impl PeersClient { - pub fn as_full(&self) -> Option> { - match *self { - PeersClient::Full(ref client, ref _backend) => Some(client.clone()), - _ => None, - } - } - - pub fn as_block_import(&self) -> BlockImportAdapter { - match *self { - PeersClient::Full(ref client, ref _backend) => - BlockImportAdapter::new_full(client.clone()), - PeersClient::Light(ref client, ref _backend) => - BlockImportAdapter::Light(Arc::new(Mutex::new(client.clone())), PhantomData), - } - } - - pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { - match *self { - PeersClient::Full(ref client, ref _backend) => client.get_aux(key), - PeersClient::Light(ref client, ref _backend) => client.get_aux(key), - } - } - - pub fn info(&self) -> BlockchainInfo { - match *self { - PeersClient::Full(ref client, ref _backend) => client.chain_info(), - PeersClient::Light(ref client, ref _backend) => client.chain_info(), - } - } - - pub fn header(&self, block: &BlockId) -> ClientResult::Header>> { - match *self { - PeersClient::Full(ref client, ref _backend) => client.header(block), - PeersClient::Light(ref client, ref _backend) => client.header(block), - } - } - - pub fn justification(&self, block: &BlockId) -> ClientResult> { - match *self { - PeersClient::Full(ref client, ref _backend) => client.justification(block), - PeersClient::Light(ref client, ref _backend) => client.justification(block), - } - } - - pub fn finality_notification_stream(&self) -> FinalityNotifications { - match *self { - PeersClient::Full(ref client, ref _backend) => client.finality_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.finality_notification_stream(), - } - } - - pub fn import_notification_stream(&self) -> ImportNotifications{ - match *self { - PeersClient::Full(ref client, ref _backend) => client.import_notification_stream(), - PeersClient::Light(ref client, ref _backend) => client.import_notification_stream(), - } - } - - pub fn finalize_block( - &self, - id: BlockId, - justification: Option, - notify: bool - ) -> ClientResult<()> { - match *self { - PeersClient::Full(ref client, ref _backend) => client.finalize_block(id, justification, notify), - PeersClient::Light(ref client, ref _backend) => client.finalize_block(id, justification, notify), - } - } + pub fn as_full(&self) -> Option> { + match *self { + PeersClient::Full(ref client, ref _backend) => Some(client.clone()), + _ => None, + } + } + + pub fn as_block_import(&self) -> BlockImportAdapter { + match *self { + PeersClient::Full(ref client, ref _backend) => { + BlockImportAdapter::new_full(client.clone()) + } + PeersClient::Light(ref client, ref _backend) => { + BlockImportAdapter::Light(Arc::new(Mutex::new(client.clone())), PhantomData) + } + } + } + + pub fn get_aux(&self, key: &[u8]) -> ClientResult>> { + match *self { + PeersClient::Full(ref client, ref _backend) => client.get_aux(key), + PeersClient::Light(ref client, ref _backend) => client.get_aux(key), + } + } + + pub fn info(&self) -> BlockchainInfo { + match *self { + PeersClient::Full(ref client, ref _backend) => client.chain_info(), + PeersClient::Light(ref client, ref _backend) => client.chain_info(), + } + } + + pub fn header( + &self, + block: &BlockId, + ) -> ClientResult::Header>> { + match *self { + PeersClient::Full(ref client, ref _backend) => client.header(block), + PeersClient::Light(ref client, ref _backend) => client.header(block), + } + } + + pub fn justification(&self, block: &BlockId) -> ClientResult> { + match *self { + PeersClient::Full(ref client, ref _backend) => client.justification(block), + PeersClient::Light(ref client, ref _backend) => client.justification(block), + } + } + + pub fn finality_notification_stream(&self) -> FinalityNotifications { + match *self { + PeersClient::Full(ref client, ref _backend) => client.finality_notification_stream(), + PeersClient::Light(ref client, ref _backend) => client.finality_notification_stream(), + } + } + + pub fn import_notification_stream(&self) -> ImportNotifications { + match *self { + PeersClient::Full(ref client, ref _backend) => client.import_notification_stream(), + PeersClient::Light(ref client, ref _backend) => client.import_notification_stream(), + } + } + + pub fn finalize_block( + &self, + id: BlockId, + justification: Option, + notify: bool, + ) -> ClientResult<()> { + match *self { + PeersClient::Full(ref client, ref _backend) => { + client.finalize_block(id, justification, notify) + } + PeersClient::Light(ref client, ref _backend) => { + client.finalize_block(id, justification, notify) + } + } + } } pub struct Peer { - pub data: D, - client: PeersClient, - /// We keep a copy of the verifier so that we can invoke it for locally-generated blocks, - /// instead of going through the import queue. - verifier: VerifierAdapter, - /// We keep a copy of the block_import so that we can invoke it for locally-generated blocks, - /// instead of going through the import queue. - block_import: BlockImportAdapter<()>, - select_chain: Option>, - backend: Option>, - network: NetworkWorker::Hash>, - imported_blocks_stream: Pin> + Send>>, - finality_notification_stream: Pin> + Send>>, + pub data: D, + client: PeersClient, + /// We keep a copy of the verifier so that we can invoke it for locally-generated blocks, + /// instead of going through the import queue. + verifier: VerifierAdapter, + /// We keep a copy of the block_import so that we can invoke it for locally-generated blocks, + /// instead of going through the import queue. + block_import: BlockImportAdapter<()>, + select_chain: Option>, + backend: Option>, + network: NetworkWorker::Hash>, + imported_blocks_stream: Pin> + Send>>, + finality_notification_stream: Pin> + Send>>, } impl Peer { - /// Get this peer ID. - pub fn id(&self) -> PeerId { - self.network.service().local_peer_id().clone() - } - - /// Returns true if we're major syncing. - pub fn is_major_syncing(&self) -> bool { - self.network.service().is_major_syncing() - } - - // Returns a clone of the local SelectChain, only available on full nodes - pub fn select_chain(&self) -> Option> { - self.select_chain.clone() - } - - /// Returns the number of peers we're connected to. - pub fn num_peers(&self) -> usize { - self.network.num_connected_peers() - } - - /// Returns the number of processed blocks. - pub fn num_processed_blocks(&self) -> usize { - self.network.num_processed_blocks() - } - - /// Returns true if we have no peer. - pub fn is_offline(&self) -> bool { - self.num_peers() == 0 - } - - /// Request a justification for the given block. - pub fn request_justification(&self, hash: &::Hash, number: NumberFor) { - self.network.service().request_justification(hash, number); - } - - /// Announces an important block on the network. - pub fn announce_block(&self, hash: ::Hash, data: Vec) { - self.network.service().announce_block(hash, data); - } - - /// Request explicit fork sync. - pub fn set_sync_fork_request(&self, peers: Vec, hash: ::Hash, number: NumberFor) { - self.network.service().set_sync_fork_request(peers, hash, number); - } - - /// Add blocks to the peer -- edit the block before adding - pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 - where F: FnMut(BlockBuilder) -> Block - { - let best_hash = self.client.info().best_hash; - self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false) - } - - /// Add blocks to the peer -- edit the block before adding. The chain will - /// start at the given block iD. - fn generate_blocks_at( - &mut self, - at: BlockId, - count: usize, - origin: BlockOrigin, - mut edit_block: F, - headers_only: bool, - ) -> H256 where F: FnMut(BlockBuilder) -> Block { - let full_client = self.client.as_full() - .expect("blocks could only be generated by full clients"); - let mut at = full_client.header(&at).unwrap().unwrap().hash(); - for _ in 0..count { - let builder = full_client.new_block_at( - &BlockId::Hash(at), - Default::default(), - false, - ).unwrap(); - let block = edit_block(builder); - let hash = block.header.hash(); - trace!( - target: "test_network", - "Generating {}, (#{}, parent={})", - hash, - block.header.number, - block.header.parent_hash, - ); - let header = block.header.clone(); - let (import_block, cache) = self.verifier.verify( - origin, - header.clone(), - None, - if headers_only { None } else { Some(block.extrinsics) }, - ).unwrap(); - let cache = if let Some(cache) = cache { - cache.into_iter().collect() - } else { - Default::default() - }; - self.block_import.import_block(import_block, cache).expect("block_import failed"); - self.network.on_block_imported(header, true); - self.network.service().announce_block(hash, Vec::new()); - at = hash; - } - - self.network.service().announce_block(at.clone(), Vec::new()); - at - } - - /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_blocks(&mut self, count: usize, with_tx: bool) -> H256 { - let best_hash = self.client.info().best_hash; - self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) - } - - /// Push blocks to the peer (simplified: with or without a TX) - pub fn push_headers(&mut self, count: usize) -> H256 { - let best_hash = self.client.info().best_hash; - self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true) - } - - /// Push blocks to the peer (simplified: with or without a TX) starting from - /// given hash. - pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { - self.generate_tx_blocks_at(at, count, with_tx, false) - } - - /// Push blocks/headers to the peer (simplified: with or without a TX) starting from - /// given hash. - fn generate_tx_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool, headers_only:bool) -> H256 { - let mut nonce = 0; - if with_tx { - self.generate_blocks_at( - at, - count, - BlockOrigin::File, |mut builder| { - let transfer = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Alice.into(), - amount: 1, - nonce, - }; - builder.push(transfer.into_signed_tx()).unwrap(); - nonce = nonce + 1; - builder.build().unwrap().block - }, - headers_only - ) - } else { - self.generate_blocks_at( - at, - count, - BlockOrigin::File, - |builder| builder.build().unwrap().block, - headers_only, - ) - } - } - - pub fn push_authorities_change_block(&mut self, new_authorities: Vec) -> H256 { - self.generate_blocks(1, BlockOrigin::File, |mut builder| { - builder.push(Extrinsic::AuthoritiesChange(new_authorities.clone())).unwrap(); - builder.build().unwrap().block - }) - } - - /// Get a reference to the client. - pub fn client(&self) -> &PeersClient { - &self.client - } - - /// Get a reference to the network service. - pub fn network_service(&self) -> &Arc::Hash>> { - &self.network.service() - } - - /// Test helper to compare the blockchain state of multiple (networked) - /// clients. - pub fn blockchain_canon_equals(&self, other: &Self) -> bool { - if let (Some(mine), Some(others)) = (self.backend.clone(), other.backend.clone()) { - mine.blockchain().info().best_hash == others.blockchain().info().best_hash - } else { - false - } - } - - /// Count the total number of imported blocks. - pub fn blocks_count(&self) -> u64 { - self.backend.as_ref().map( - |backend| backend.blockchain().info().best_number - ).unwrap_or(0) - } - - /// Return a collection of block hashes that failed verification - pub fn failed_verifications(&self) -> HashMap<::Hash, String> { - self.verifier.failed_verifications.lock().clone() - } - - pub fn has_block(&self, hash: &H256) -> bool { - self.backend.as_ref().map( - |backend| backend.blockchain().header(BlockId::hash(*hash)).unwrap().is_some() - ).unwrap_or(false) - } + /// Get this peer ID. + pub fn id(&self) -> PeerId { + self.network.service().local_peer_id().clone() + } + + /// Returns true if we're major syncing. + pub fn is_major_syncing(&self) -> bool { + self.network.service().is_major_syncing() + } + + // Returns a clone of the local SelectChain, only available on full nodes + pub fn select_chain( + &self, + ) -> Option> { + self.select_chain.clone() + } + + /// Returns the number of peers we're connected to. + pub fn num_peers(&self) -> usize { + self.network.num_connected_peers() + } + + /// Returns the number of processed blocks. + pub fn num_processed_blocks(&self) -> usize { + self.network.num_processed_blocks() + } + + /// Returns true if we have no peer. + pub fn is_offline(&self) -> bool { + self.num_peers() == 0 + } + + /// Request a justification for the given block. + pub fn request_justification(&self, hash: &::Hash, number: NumberFor) { + self.network.service().request_justification(hash, number); + } + + /// Announces an important block on the network. + pub fn announce_block(&self, hash: ::Hash, data: Vec) { + self.network.service().announce_block(hash, data); + } + + /// Request explicit fork sync. + pub fn set_sync_fork_request( + &self, + peers: Vec, + hash: ::Hash, + number: NumberFor, + ) { + self.network + .service() + .set_sync_fork_request(peers, hash, number); + } + + /// Add blocks to the peer -- edit the block before adding + pub fn generate_blocks(&mut self, count: usize, origin: BlockOrigin, edit_block: F) -> H256 + where + F: FnMut( + BlockBuilder, + ) -> Block, + { + let best_hash = self.client.info().best_hash; + self.generate_blocks_at(BlockId::Hash(best_hash), count, origin, edit_block, false) + } + + /// Add blocks to the peer -- edit the block before adding. The chain will + /// start at the given block iD. + fn generate_blocks_at( + &mut self, + at: BlockId, + count: usize, + origin: BlockOrigin, + mut edit_block: F, + headers_only: bool, + ) -> H256 + where + F: FnMut( + BlockBuilder, + ) -> Block, + { + let full_client = self + .client + .as_full() + .expect("blocks could only be generated by full clients"); + let mut at = full_client.header(&at).unwrap().unwrap().hash(); + for _ in 0..count { + let builder = full_client + .new_block_at(&BlockId::Hash(at), Default::default(), false) + .unwrap(); + let block = edit_block(builder); + let hash = block.header.hash(); + trace!( + target: "test_network", + "Generating {}, (#{}, parent={})", + hash, + block.header.number, + block.header.parent_hash, + ); + let header = block.header.clone(); + let (import_block, cache) = self + .verifier + .verify( + origin, + header.clone(), + None, + if headers_only { + None + } else { + Some(block.extrinsics) + }, + ) + .unwrap(); + let cache = if let Some(cache) = cache { + cache.into_iter().collect() + } else { + Default::default() + }; + self.block_import + .import_block(import_block, cache) + .expect("block_import failed"); + self.network.on_block_imported(header, true); + self.network.service().announce_block(hash, Vec::new()); + at = hash; + } + + self.network + .service() + .announce_block(at.clone(), Vec::new()); + at + } + + /// Push blocks to the peer (simplified: with or without a TX) + pub fn push_blocks(&mut self, count: usize, with_tx: bool) -> H256 { + let best_hash = self.client.info().best_hash; + self.push_blocks_at(BlockId::Hash(best_hash), count, with_tx) + } + + /// Push blocks to the peer (simplified: with or without a TX) + pub fn push_headers(&mut self, count: usize) -> H256 { + let best_hash = self.client.info().best_hash; + self.generate_tx_blocks_at(BlockId::Hash(best_hash), count, false, true) + } + + /// Push blocks to the peer (simplified: with or without a TX) starting from + /// given hash. + pub fn push_blocks_at(&mut self, at: BlockId, count: usize, with_tx: bool) -> H256 { + self.generate_tx_blocks_at(at, count, with_tx, false) + } + + /// Push blocks/headers to the peer (simplified: with or without a TX) starting from + /// given hash. + fn generate_tx_blocks_at( + &mut self, + at: BlockId, + count: usize, + with_tx: bool, + headers_only: bool, + ) -> H256 { + let mut nonce = 0; + if with_tx { + self.generate_blocks_at( + at, + count, + BlockOrigin::File, + |mut builder| { + let transfer = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Alice.into(), + amount: 1, + nonce, + }; + builder.push(transfer.into_signed_tx()).unwrap(); + nonce = nonce + 1; + builder.build().unwrap().block + }, + headers_only, + ) + } else { + self.generate_blocks_at( + at, + count, + BlockOrigin::File, + |builder| builder.build().unwrap().block, + headers_only, + ) + } + } + + pub fn push_authorities_change_block(&mut self, new_authorities: Vec) -> H256 { + self.generate_blocks(1, BlockOrigin::File, |mut builder| { + builder + .push(Extrinsic::AuthoritiesChange(new_authorities.clone())) + .unwrap(); + builder.build().unwrap().block + }) + } + + /// Get a reference to the client. + pub fn client(&self) -> &PeersClient { + &self.client + } + + /// Get a reference to the network service. + pub fn network_service(&self) -> &Arc::Hash>> { + &self.network.service() + } + + /// Test helper to compare the blockchain state of multiple (networked) + /// clients. + pub fn blockchain_canon_equals(&self, other: &Self) -> bool { + if let (Some(mine), Some(others)) = (self.backend.clone(), other.backend.clone()) { + mine.blockchain().info().best_hash == others.blockchain().info().best_hash + } else { + false + } + } + + /// Count the total number of imported blocks. + pub fn blocks_count(&self) -> u64 { + self.backend + .as_ref() + .map(|backend| backend.blockchain().info().best_number) + .unwrap_or(0) + } + + /// Return a collection of block hashes that failed verification + pub fn failed_verifications(&self) -> HashMap<::Hash, String> { + self.verifier.failed_verifications.lock().clone() + } + + pub fn has_block(&self, hash: &H256) -> bool { + self.backend + .as_ref() + .map(|backend| { + backend + .blockchain() + .header(BlockId::hash(*hash)) + .unwrap() + .is_some() + }) + .unwrap_or(false) + } } /// Implements `BlockImport` for any `Transaction`. Internally the transaction is @@ -393,520 +476,551 @@ impl Peer { /// This is required as the `TestNetFactory` trait does not distinguish between /// full and light nodes. pub enum BlockImportAdapter { - Full( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), - Light( - Arc, - Error = ConsensusError - > + Send>>, - PhantomData, - ), + Full( + Arc< + Mutex< + dyn BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + Send, + >, + >, + PhantomData, + ), + Light( + Arc< + Mutex< + dyn BlockImport< + Block, + Transaction = TransactionFor< + substrate_test_runtime_client::LightBackend, + Block, + >, + Error = ConsensusError, + > + Send, + >, + >, + PhantomData, + ), } impl BlockImportAdapter { - /// Create a new instance of `Self::Full`. - pub fn new_full( - full: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Full(Arc::new(Mutex::new(full)), PhantomData) - } - - /// Create a new instance of `Self::Light`. - pub fn new_light( - light: impl BlockImport< - Block, - Transaction = TransactionFor, - Error = ConsensusError - > - + 'static - + Send - ) -> Self { - Self::Light(Arc::new(Mutex::new(light)), PhantomData) - } + /// Create a new instance of `Self::Full`. + pub fn new_full( + full: impl BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + + 'static + + Send, + ) -> Self { + Self::Full(Arc::new(Mutex::new(full)), PhantomData) + } + + /// Create a new instance of `Self::Light`. + pub fn new_light( + light: impl BlockImport< + Block, + Transaction = TransactionFor, + Error = ConsensusError, + > + + 'static + + Send, + ) -> Self { + Self::Light(Arc::new(Mutex::new(light)), PhantomData) + } } impl Clone for BlockImportAdapter { - fn clone(&self) -> Self { - match self { - Self::Full(full, _) => Self::Full(full.clone(), PhantomData), - Self::Light(light, _) => Self::Light(light.clone(), PhantomData), - } - } + fn clone(&self) -> Self { + match self { + Self::Full(full, _) => Self::Full(full.clone(), PhantomData), + Self::Light(light, _) => Self::Light(light.clone(), PhantomData), + } + } } impl BlockImport for BlockImportAdapter { - type Error = ConsensusError; - type Transaction = Transaction; - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - match self { - Self::Full(full, _) => full.lock().check_block(block), - Self::Light(light, _) => light.lock().check_block(block), - } - } - - fn import_block( - &mut self, - block: BlockImportParams, - cache: HashMap>, - ) -> Result { - match self { - Self::Full(full, _) => full.lock().import_block(block.convert_transaction(), cache), - Self::Light(light, _) => light.lock().import_block(block.convert_transaction(), cache), - } - } + type Error = ConsensusError; + type Transaction = Transaction; + + fn check_block(&mut self, block: BlockCheckParams) -> Result { + match self { + Self::Full(full, _) => full.lock().check_block(block), + Self::Light(light, _) => light.lock().check_block(block), + } + } + + fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result { + match self { + Self::Full(full, _) => full.lock().import_block(block.convert_transaction(), cache), + Self::Light(light, _) => light + .lock() + .import_block(block.convert_transaction(), cache), + } + } } /// Implements `Verifier` on an `Arc>`. Used internally. #[derive(Clone)] struct VerifierAdapter { - verifier: Arc>>>, - failed_verifications: Arc>>, + verifier: Arc>>>, + failed_verifications: Arc>>, } impl Verifier for VerifierAdapter { - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option> - ) -> Result<(BlockImportParams, Option)>>), String> { - let hash = header.hash(); - self.verifier.lock().verify(origin, header, justification, body).map_err(|e| { - self.failed_verifications.lock().insert(hash, e.clone()); - e - }) - } + fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String> { + let hash = header.hash(); + self.verifier + .lock() + .verify(origin, header, justification, body) + .map_err(|e| { + self.failed_verifications.lock().insert(hash, e.clone()); + e + }) + } } impl VerifierAdapter { - fn new(verifier: Arc>>>) -> VerifierAdapter { - VerifierAdapter { - verifier, - failed_verifications: Default::default(), - } - } + fn new(verifier: Arc>>>) -> VerifierAdapter { + VerifierAdapter { + verifier, + failed_verifications: Default::default(), + } + } } pub trait TestNetFactory: Sized { - type Verifier: 'static + Verifier; - type PeerData: Default; - - /// These two need to be implemented! - fn from_config(config: &ProtocolConfig) -> Self; - fn make_verifier( - &self, - client: PeersClient, - config: &ProtocolConfig, - peer_data: &Self::PeerData, - ) -> Self::Verifier; - - /// Get reference to peer. - fn peer(&mut self, i: usize) -> &mut Peer; - fn peers(&self) -> &Vec>; - fn mut_peers>)>( - &mut self, - closure: F, - ); - - /// Get custom block import handle for fresh client, along with peer data. - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Self::PeerData, - ) - { - (client.as_block_import(), None, None, None, Default::default()) - } - - /// Get finality proof provider (if supported). - fn make_finality_proof_provider( - &self, - _client: PeersClient, - ) -> Option>> { - None - } - - fn default_config() -> ProtocolConfig { - ProtocolConfig::default() - } - - /// Create new test network with this many peers. - fn new(n: usize) -> Self { - trace!(target: "test_network", "Creating test network"); - let config = Self::default_config(); - let mut net = Self::from_config(&config); - - for i in 0..n { - trace!(target: "test_network", "Adding peer {}", i); - net.add_full_peer(); - } - net - } - - fn add_full_peer(&mut self) { - self.add_full_peer_with_states(None) - } - - /// Add a full peer. - fn add_full_peer_with_states(&mut self, keep_blocks: Option) { - let test_client_builder = match keep_blocks { - Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), - None => TestClientBuilder::with_default_backend(), - }; - let backend = test_client_builder.backend(); - let (c, longest_chain) = test_client_builder.build_with_longest_chain(); - let client = Arc::new(c); - - let ( - block_import, - justification_import, - finality_proof_import, - finality_proof_request_builder, - data, - ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); - - let verifier = self.make_verifier( - PeersClient::Full(client.clone(), backend.clone()), - &Default::default(), - &data, - ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); - - let import_queue = Box::new(BasicQueue::new( - verifier.clone(), - Box::new(block_import.clone()), - justification_import, - finality_proof_import, - )); - - let listen_addr = build_multiaddr![Memory(rand::random::())]; - - let mut network_config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - network_config.transport = TransportConfig::MemoryOnly; - network_config.listen_addresses = vec![listen_addr.clone()]; - - let network = NetworkWorker::new(sc_network::config::Params { - role: Role::Full, - executor: None, - network_config, - chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Full(client.clone(), backend.clone()), - ), - finality_proof_request_builder, - on_demand: None, - transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), - import_queue, - block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), - metrics_registry: None, - }).unwrap(); - - self.mut_peers(|peers| { - for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); - } - - let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); - - peers.push(Peer { - data, - client: PeersClient::Full(client, backend.clone()), - select_chain: Some(longest_chain), - backend: Some(backend), - imported_blocks_stream, - finality_notification_stream, - block_import, - verifier, - network, - }); - }); - } - - /// Add a light peer. - fn add_light_peer(&mut self) { - let (c, backend) = substrate_test_runtime_client::new_light(); - let client = Arc::new(c); - let ( - block_import, - justification_import, - finality_proof_import, - finality_proof_request_builder, - data, - ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); - - let verifier = self.make_verifier( - PeersClient::Light(client.clone(), backend.clone()), - &Default::default(), - &data, - ); - let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); - - let import_queue = Box::new(BasicQueue::new( - verifier.clone(), - Box::new(block_import.clone()), - justification_import, - finality_proof_import, - )); - - let listen_addr = build_multiaddr![Memory(rand::random::())]; - - let mut network_config = NetworkConfiguration::new( - "test-node", - "test-client", - Default::default(), - None, - ); - network_config.transport = TransportConfig::MemoryOnly; - network_config.listen_addresses = vec![listen_addr.clone()]; - - let network = NetworkWorker::new(sc_network::config::Params { - role: Role::Light, - executor: None, - network_config, - chain: client.clone(), - finality_proof_provider: self.make_finality_proof_provider( - PeersClient::Light(client.clone(), backend.clone()) - ), - finality_proof_request_builder, - on_demand: None, - transaction_pool: Arc::new(EmptyTransactionPool), - protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), - import_queue, - block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), - metrics_registry: None, - }).unwrap(); - - self.mut_peers(|peers| { - for peer in peers.iter_mut() { - peer.network.add_known_address(network.service().local_peer_id().clone(), listen_addr.clone()); - } - - let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); - let finality_notification_stream = Box::pin(client.finality_notification_stream().fuse()); - - peers.push(Peer { - data, - verifier, - select_chain: None, - backend: None, - block_import, - client: PeersClient::Light(client, backend), - imported_blocks_stream, - finality_notification_stream, - network, - }); - }); - } - - /// Polls the testnet until all nodes are in sync. - /// - /// Must be executed in a task context. - fn poll_until_sync(&mut self, cx: &mut FutureContext) -> Poll<()> { - self.poll(cx); - - // Return `NotReady` if there's a mismatch in the highest block number. - let mut highest = None; - for peer in self.peers().iter() { - if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending - } - if peer.network.num_sync_requests() != 0 { - return Poll::Pending - } - match (highest, peer.client.info().best_hash) { - (None, b) => highest = Some(b), - (Some(ref a), ref b) if a == b => {}, - (Some(_), _) => return Poll::Pending - } - } - Poll::Ready(()) - } - - /// Polls the testnet until theres' no activiy of any kind. - /// - /// Must be executed in a task context. - fn poll_until_idle(&mut self, cx: &mut FutureContext) -> Poll<()> { - self.poll(cx); - - for peer in self.peers().iter() { - if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { - return Poll::Pending - } - if peer.network.num_sync_requests() != 0 { - return Poll::Pending - } - } - Poll::Ready(()) - } - - /// Blocks the current thread until we are sync'ed. - /// - /// Calls `poll_until_sync` repeatedly. - fn block_until_sync(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_sync(cx))); - } - - /// Blocks the current thread until there are no pending packets. - /// - /// Calls `poll_until_idle` repeatedly with the runtime passed as parameter. - fn block_until_idle(&mut self) { - futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| self.poll_until_idle(cx))); - } - - /// Polls the testnet. Processes all the pending actions and returns `NotReady`. - fn poll(&mut self, cx: &mut FutureContext) { - self.mut_peers(|peers| { - for peer in peers { - trace!(target: "sync", "-- Polling {}", peer.id()); - if let Poll::Ready(res) = Pin::new(&mut peer.network).poll(cx) { - res.unwrap(); - } - trace!(target: "sync", "-- Polling complete {}", peer.id()); - - // We poll `imported_blocks_stream`. - while let Poll::Ready(Some(notification)) = peer.imported_blocks_stream.as_mut().poll_next(cx) { - peer.network.on_block_imported( - notification.header, - true, - ); - peer.network.service().announce_block(notification.hash, Vec::new()); - } - - // We poll `finality_notification_stream`, but we only take the last event. - let mut last = None; - while let Poll::Ready(Some(item)) = peer.finality_notification_stream.as_mut().poll_next(cx) { - last = Some(item); - } - if let Some(notification) = last { - peer.network.on_block_finalized(notification.hash, notification.header); - } - } - }); - } + type Verifier: 'static + Verifier; + type PeerData: Default; + + /// These two need to be implemented! + fn from_config(config: &ProtocolConfig) -> Self; + fn make_verifier( + &self, + client: PeersClient, + config: &ProtocolConfig, + peer_data: &Self::PeerData, + ) -> Self::Verifier; + + /// Get reference to peer. + fn peer(&mut self, i: usize) -> &mut Peer; + fn peers(&self) -> &Vec>; + fn mut_peers>)>(&mut self, closure: F); + + /// Get custom block import handle for fresh client, along with peer data. + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option>, + Option>, + Self::PeerData, + ) { + ( + client.as_block_import(), + None, + None, + None, + Default::default(), + ) + } + + /// Get finality proof provider (if supported). + fn make_finality_proof_provider( + &self, + _client: PeersClient, + ) -> Option>> { + None + } + + fn default_config() -> ProtocolConfig { + ProtocolConfig::default() + } + + /// Create new test network with this many peers. + fn new(n: usize) -> Self { + trace!(target: "test_network", "Creating test network"); + let config = Self::default_config(); + let mut net = Self::from_config(&config); + + for i in 0..n { + trace!(target: "test_network", "Adding peer {}", i); + net.add_full_peer(); + } + net + } + + fn add_full_peer(&mut self) { + self.add_full_peer_with_states(None) + } + + /// Add a full peer. + fn add_full_peer_with_states(&mut self, keep_blocks: Option) { + let test_client_builder = match keep_blocks { + Some(keep_blocks) => TestClientBuilder::with_pruning_window(keep_blocks), + None => TestClientBuilder::with_default_backend(), + }; + let backend = test_client_builder.backend(); + let (c, longest_chain) = test_client_builder.build_with_longest_chain(); + let client = Arc::new(c); + + let ( + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + data, + ) = self.make_block_import(PeersClient::Full(client.clone(), backend.clone())); + + let verifier = self.make_verifier( + PeersClient::Full(client.clone(), backend.clone()), + &Default::default(), + &data, + ); + let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + + let import_queue = Box::new(BasicQueue::new( + verifier.clone(), + Box::new(block_import.clone()), + justification_import, + finality_proof_import, + )); + + let listen_addr = build_multiaddr![Memory(rand::random::())]; + + let mut network_config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + network_config.transport = TransportConfig::MemoryOnly; + network_config.listen_addresses = vec![listen_addr.clone()]; + + let network = NetworkWorker::new(sc_network::config::Params { + role: Role::Full, + executor: None, + network_config, + chain: client.clone(), + finality_proof_provider: self + .make_finality_proof_provider(PeersClient::Full(client.clone(), backend.clone())), + finality_proof_request_builder, + on_demand: None, + transaction_pool: Arc::new(EmptyTransactionPool), + protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), + import_queue, + block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), + metrics_registry: None, + }) + .unwrap(); + + self.mut_peers(|peers| { + for peer in peers.iter_mut() { + peer.network.add_known_address( + network.service().local_peer_id().clone(), + listen_addr.clone(), + ); + } + + let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); + let finality_notification_stream = + Box::pin(client.finality_notification_stream().fuse()); + + peers.push(Peer { + data, + client: PeersClient::Full(client, backend.clone()), + select_chain: Some(longest_chain), + backend: Some(backend), + imported_blocks_stream, + finality_notification_stream, + block_import, + verifier, + network, + }); + }); + } + + /// Add a light peer. + fn add_light_peer(&mut self) { + let (c, backend) = substrate_test_runtime_client::new_light(); + let client = Arc::new(c); + let ( + block_import, + justification_import, + finality_proof_import, + finality_proof_request_builder, + data, + ) = self.make_block_import(PeersClient::Light(client.clone(), backend.clone())); + + let verifier = self.make_verifier( + PeersClient::Light(client.clone(), backend.clone()), + &Default::default(), + &data, + ); + let verifier = VerifierAdapter::new(Arc::new(Mutex::new(Box::new(verifier) as Box<_>))); + + let import_queue = Box::new(BasicQueue::new( + verifier.clone(), + Box::new(block_import.clone()), + justification_import, + finality_proof_import, + )); + + let listen_addr = build_multiaddr![Memory(rand::random::())]; + + let mut network_config = + NetworkConfiguration::new("test-node", "test-client", Default::default(), None); + network_config.transport = TransportConfig::MemoryOnly; + network_config.listen_addresses = vec![listen_addr.clone()]; + + let network = NetworkWorker::new(sc_network::config::Params { + role: Role::Light, + executor: None, + network_config, + chain: client.clone(), + finality_proof_provider: self + .make_finality_proof_provider(PeersClient::Light(client.clone(), backend.clone())), + finality_proof_request_builder, + on_demand: None, + transaction_pool: Arc::new(EmptyTransactionPool), + protocol_id: ProtocolId::from(&b"test-protocol-name"[..]), + import_queue, + block_announce_validator: Box::new(DefaultBlockAnnounceValidator::new(client.clone())), + metrics_registry: None, + }) + .unwrap(); + + self.mut_peers(|peers| { + for peer in peers.iter_mut() { + peer.network.add_known_address( + network.service().local_peer_id().clone(), + listen_addr.clone(), + ); + } + + let imported_blocks_stream = Box::pin(client.import_notification_stream().fuse()); + let finality_notification_stream = + Box::pin(client.finality_notification_stream().fuse()); + + peers.push(Peer { + data, + verifier, + select_chain: None, + backend: None, + block_import, + client: PeersClient::Light(client, backend), + imported_blocks_stream, + finality_notification_stream, + network, + }); + }); + } + + /// Polls the testnet until all nodes are in sync. + /// + /// Must be executed in a task context. + fn poll_until_sync(&mut self, cx: &mut FutureContext) -> Poll<()> { + self.poll(cx); + + // Return `NotReady` if there's a mismatch in the highest block number. + let mut highest = None; + for peer in self.peers().iter() { + if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { + return Poll::Pending; + } + if peer.network.num_sync_requests() != 0 { + return Poll::Pending; + } + match (highest, peer.client.info().best_hash) { + (None, b) => highest = Some(b), + (Some(ref a), ref b) if a == b => {} + (Some(_), _) => return Poll::Pending, + } + } + Poll::Ready(()) + } + + /// Polls the testnet until theres' no activiy of any kind. + /// + /// Must be executed in a task context. + fn poll_until_idle(&mut self, cx: &mut FutureContext) -> Poll<()> { + self.poll(cx); + + for peer in self.peers().iter() { + if peer.is_major_syncing() || peer.network.num_queued_blocks() != 0 { + return Poll::Pending; + } + if peer.network.num_sync_requests() != 0 { + return Poll::Pending; + } + } + Poll::Ready(()) + } + + /// Blocks the current thread until we are sync'ed. + /// + /// Calls `poll_until_sync` repeatedly. + fn block_until_sync(&mut self) { + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { + self.poll_until_sync(cx) + })); + } + + /// Blocks the current thread until there are no pending packets. + /// + /// Calls `poll_until_idle` repeatedly with the runtime passed as parameter. + fn block_until_idle(&mut self) { + futures::executor::block_on(futures::future::poll_fn::<(), _>(|cx| { + self.poll_until_idle(cx) + })); + } + + /// Polls the testnet. Processes all the pending actions and returns `NotReady`. + fn poll(&mut self, cx: &mut FutureContext) { + self.mut_peers(|peers| { + for peer in peers { + trace!(target: "sync", "-- Polling {}", peer.id()); + if let Poll::Ready(res) = Pin::new(&mut peer.network).poll(cx) { + res.unwrap(); + } + trace!(target: "sync", "-- Polling complete {}", peer.id()); + + // We poll `imported_blocks_stream`. + while let Poll::Ready(Some(notification)) = + peer.imported_blocks_stream.as_mut().poll_next(cx) + { + peer.network.on_block_imported(notification.header, true); + peer.network + .service() + .announce_block(notification.hash, Vec::new()); + } + + // We poll `finality_notification_stream`, but we only take the last event. + let mut last = None; + while let Poll::Ready(Some(item)) = + peer.finality_notification_stream.as_mut().poll_next(cx) + { + last = Some(item); + } + if let Some(notification) = last { + peer.network + .on_block_finalized(notification.hash, notification.header); + } + } + }); + } } pub struct TestNet { - peers: Vec>, + peers: Vec>, } impl TestNetFactory for TestNet { - type Verifier = PassThroughVerifier; - type PeerData = (); - - /// Create new test network with peers and given config. - fn from_config(_config: &ProtocolConfig) -> Self { - TestNet { - peers: Vec::new(), - } - } - - fn make_verifier(&self, _client: PeersClient, _config: &ProtocolConfig, _peer_data: &()) - -> Self::Verifier - { - PassThroughVerifier(false) - } - - fn peer(&mut self, i: usize) -> &mut Peer<()> { - &mut self.peers[i] - } - - fn peers(&self) -> &Vec> { - &self.peers - } - - fn mut_peers>)>(&mut self, closure: F) { - closure(&mut self.peers); - } + type Verifier = PassThroughVerifier; + type PeerData = (); + + /// Create new test network with peers and given config. + fn from_config(_config: &ProtocolConfig) -> Self { + TestNet { peers: Vec::new() } + } + + fn make_verifier( + &self, + _client: PeersClient, + _config: &ProtocolConfig, + _peer_data: &(), + ) -> Self::Verifier { + PassThroughVerifier(false) + } + + fn peer(&mut self, i: usize) -> &mut Peer<()> { + &mut self.peers[i] + } + + fn peers(&self) -> &Vec> { + &self.peers + } + + fn mut_peers>)>(&mut self, closure: F) { + closure(&mut self.peers); + } } pub struct ForceFinalized(PeersClient); impl JustificationImport for ForceFinalized { - type Error = ConsensusError; - - fn import_justification( - &mut self, - hash: H256, - _number: NumberFor, - justification: Justification, - ) -> Result<(), Self::Error> { - self.0.finalize_block(BlockId::Hash(hash), Some(justification), true) - .map_err(|_| ConsensusError::InvalidJustification.into()) - } + type Error = ConsensusError; + + fn import_justification( + &mut self, + hash: H256, + _number: NumberFor, + justification: Justification, + ) -> Result<(), Self::Error> { + self.0 + .finalize_block(BlockId::Hash(hash), Some(justification), true) + .map_err(|_| ConsensusError::InvalidJustification.into()) + } } pub struct JustificationTestNet(TestNet); impl TestNetFactory for JustificationTestNet { - type Verifier = PassThroughVerifier; - type PeerData = (); - - fn from_config(config: &ProtocolConfig) -> Self { - JustificationTestNet(TestNet::from_config(config)) - } - - fn make_verifier(&self, client: PeersClient, config: &ProtocolConfig, peer_data: &()) -> Self::Verifier { - self.0.make_verifier(client, config, peer_data) - } - - fn peer(&mut self, i: usize) -> &mut Peer { - self.0.peer(i) - } - - fn peers(&self) -> &Vec> { - self.0.peers() - } - - fn mut_peers>, - )>(&mut self, closure: F) { - self.0.mut_peers(closure) - } - - fn make_block_import(&self, client: PeersClient) - -> ( - BlockImportAdapter, - Option>, - Option>, - Option>, - Self::PeerData, - ) - { - ( - client.as_block_import(), - Some(Box::new(ForceFinalized(client))), - None, - None, - Default::default(), - ) - } + type Verifier = PassThroughVerifier; + type PeerData = (); + + fn from_config(config: &ProtocolConfig) -> Self { + JustificationTestNet(TestNet::from_config(config)) + } + + fn make_verifier( + &self, + client: PeersClient, + config: &ProtocolConfig, + peer_data: &(), + ) -> Self::Verifier { + self.0.make_verifier(client, config, peer_data) + } + + fn peer(&mut self, i: usize) -> &mut Peer { + self.0.peer(i) + } + + fn peers(&self) -> &Vec> { + self.0.peers() + } + + fn mut_peers>)>(&mut self, closure: F) { + self.0.mut_peers(closure) + } + + fn make_block_import( + &self, + client: PeersClient, + ) -> ( + BlockImportAdapter, + Option>, + Option>, + Option>, + Self::PeerData, + ) { + ( + client.as_block_import(), + Some(Box::new(ForceFinalized(client))), + None, + None, + Default::default(), + ) + } } diff --git a/client/network/test/src/sync.rs b/client/network/test/src/sync.rs index 60e9e558c5..5edf5c456b 100644 --- a/client/network/test/src/sync.rs +++ b/client/network/test/src/sync.rs @@ -14,697 +14,851 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use super::*; +use futures::executor::block_on; use sp_consensus::BlockOrigin; use std::time::Duration; -use futures::executor::block_on; -use super::*; fn test_ancestor_search_when_common_is(n: usize) { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); - net.peer(0).push_blocks(n, false); - net.peer(1).push_blocks(n, false); - net.peer(2).push_blocks(n, false); + net.peer(0).push_blocks(n, false); + net.peer(1).push_blocks(n, false); + net.peer(2).push_blocks(n, false); - net.peer(0).push_blocks(10, true); - net.peer(1).push_blocks(100, false); - net.peer(2).push_blocks(100, false); + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); - net.block_until_sync(); - let peer1 = &net.peers()[1]; - assert!(net.peers()[0].blockchain_canon_equals(peer1)); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); } #[test] fn sync_peers_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - for peer in 0..3 { - if net.peer(peer).num_peers() != 2 { - return Poll::Pending - } - } - Poll::Ready(()) - })); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + for peer in 0..3 { + if net.peer(peer).num_peers() != 2 { + return Poll::Pending; + } + } + Poll::Ready(()) + })); } #[test] fn sync_cycle_from_offline_to_syncing_to_offline() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - for peer in 0..3 { - // Offline, and not major syncing. - assert!(net.peer(peer).is_offline()); - assert!(!net.peer(peer).is_major_syncing()); - } - - // Generate blocks. - net.peer(2).push_blocks(100, false); - - // Block until all nodes are online and nodes 0 and 1 and major syncing. - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - for peer in 0..3 { - // Online - if net.peer(peer).is_offline() { - return Poll::Pending - } - if peer < 2 { - // Major syncing. - if net.peer(peer).blocks_count() < 100 && !net.peer(peer).is_major_syncing() { - return Poll::Pending - } - } - } - Poll::Ready(()) - })); - - // Block until all nodes are done syncing. - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - for peer in 0..3 { - if net.peer(peer).is_major_syncing() { - return Poll::Pending - } - } - Poll::Ready(()) - })); - - // Now drop nodes 1 and 2, and check that node 0 is offline. - net.peers.remove(2); - net.peers.remove(1); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if !net.peer(0).is_offline() { - Poll::Pending - } else { - Poll::Ready(()) - } - })); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + for peer in 0..3 { + // Offline, and not major syncing. + assert!(net.peer(peer).is_offline()); + assert!(!net.peer(peer).is_major_syncing()); + } + + // Generate blocks. + net.peer(2).push_blocks(100, false); + + // Block until all nodes are online and nodes 0 and 1 and major syncing. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + for peer in 0..3 { + // Online + if net.peer(peer).is_offline() { + return Poll::Pending; + } + if peer < 2 { + // Major syncing. + if net.peer(peer).blocks_count() < 100 && !net.peer(peer).is_major_syncing() { + return Poll::Pending; + } + } + } + Poll::Ready(()) + })); + + // Block until all nodes are done syncing. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + for peer in 0..3 { + if net.peer(peer).is_major_syncing() { + return Poll::Pending; + } + } + Poll::Ready(()) + })); + + // Now drop nodes 1 and 2, and check that node 0 is offline. + net.peers.remove(2); + net.peers.remove(1); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if !net.peer(0).is_offline() { + Poll::Pending + } else { + Poll::Ready(()) + } + })); } #[test] fn syncing_node_not_major_syncing_when_disconnected() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - - // Generate blocks. - net.peer(2).push_blocks(100, false); - - // Check that we're not major syncing when disconnected. - assert!(!net.peer(1).is_major_syncing()); - - // Check that we switch to major syncing. - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if !net.peer(1).is_major_syncing() { - Poll::Pending - } else { - Poll::Ready(()) - } - })); - - // Destroy two nodes, and check that we switch to non-major syncing. - net.peers.remove(2); - net.peers.remove(0); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).is_major_syncing() { - Poll::Pending - } else { - Poll::Ready(()) - } - })); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + + // Generate blocks. + net.peer(2).push_blocks(100, false); + + // Check that we're not major syncing when disconnected. + assert!(!net.peer(1).is_major_syncing()); + + // Check that we switch to major syncing. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if !net.peer(1).is_major_syncing() { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // Destroy two nodes, and check that we switch to non-major syncing. + net.peers.remove(2); + net.peers.remove(0); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).is_major_syncing() { + Poll::Pending + } else { + Poll::Ready(()) + } + })); } #[test] fn sync_from_two_peers_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.peer(1).push_blocks(100, false); - net.peer(2).push_blocks(100, false); - net.block_until_sync(); - let peer1 = &net.peers()[1]; - assert!(net.peers()[0].blockchain_canon_equals(peer1)); - assert!(!net.peer(0).is_major_syncing()); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); + assert!(!net.peer(0).is_major_syncing()); } #[test] fn sync_from_two_peers_with_ancestry_search_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.peer(0).push_blocks(10, true); - net.peer(1).push_blocks(100, false); - net.peer(2).push_blocks(100, false); - net.block_until_sync(); - let peer1 = &net.peers()[1]; - assert!(net.peers()[0].blockchain_canon_equals(peer1)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); } #[test] fn ancestry_search_works_when_backoff_is_one() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); - net.peer(0).push_blocks(1, false); - net.peer(1).push_blocks(2, false); - net.peer(2).push_blocks(2, false); + net.peer(0).push_blocks(1, false); + net.peer(1).push_blocks(2, false); + net.peer(2).push_blocks(2, false); - net.block_until_sync(); - let peer1 = &net.peers()[1]; - assert!(net.peers()[0].blockchain_canon_equals(peer1)); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); } #[test] fn ancestry_search_works_when_ancestor_is_genesis() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); - net.peer(0).push_blocks(13, true); - net.peer(1).push_blocks(100, false); - net.peer(2).push_blocks(100, false); + net.peer(0).push_blocks(13, true); + net.peer(1).push_blocks(100, false); + net.peer(2).push_blocks(100, false); - net.block_until_sync(); - let peer1 = &net.peers()[1]; - assert!(net.peers()[0].blockchain_canon_equals(peer1)); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); } #[test] fn ancestry_search_works_when_common_is_one() { - test_ancestor_search_when_common_is(1); + test_ancestor_search_when_common_is(1); } #[test] fn ancestry_search_works_when_common_is_two() { - test_ancestor_search_when_common_is(2); + test_ancestor_search_when_common_is(2); } #[test] fn ancestry_search_works_when_common_is_hundred() { - test_ancestor_search_when_common_is(100); + test_ancestor_search_when_common_is(100); } #[test] fn sync_long_chain_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(2); - net.peer(1).push_blocks(500, false); - net.block_until_sync(); - let peer1 = &net.peers()[1]; - assert!(net.peers()[0].blockchain_canon_equals(peer1)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(2); + net.peer(1).push_blocks(500, false); + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); } #[test] fn sync_no_common_longer_chain_fails() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.peer(0).push_blocks(20, true); - net.peer(1).push_blocks(20, false); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).is_major_syncing() { - Poll::Pending - } else { - Poll::Ready(()) - } - })); - let peer1 = &net.peers()[1]; - assert!(!net.peers()[0].blockchain_canon_equals(peer1)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.peer(0).push_blocks(20, true); + net.peer(1).push_blocks(20, false); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).is_major_syncing() { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + let peer1 = &net.peers()[1]; + assert!(!net.peers()[0].blockchain_canon_equals(peer1)); } #[test] fn sync_justifications() { - let _ = ::env_logger::try_init(); - let mut net = JustificationTestNet::new(3); - net.peer(0).push_blocks(20, false); - net.block_until_sync(); - - // there's currently no justification for block #10 - assert_eq!(net.peer(0).client().justification(&BlockId::Number(10)).unwrap(), None); - assert_eq!(net.peer(1).client().justification(&BlockId::Number(10)).unwrap(), None); - - // we finalize block #10, #15 and #20 for peer 0 with a justification - net.peer(0).client().finalize_block(BlockId::Number(10), Some(Vec::new()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(15), Some(Vec::new()), true).unwrap(); - net.peer(0).client().finalize_block(BlockId::Number(20), Some(Vec::new()), true).unwrap(); - - let h1 = net.peer(1).client().header(&BlockId::Number(10)).unwrap().unwrap(); - let h2 = net.peer(1).client().header(&BlockId::Number(15)).unwrap().unwrap(); - let h3 = net.peer(1).client().header(&BlockId::Number(20)).unwrap().unwrap(); - - // peer 1 should get the justifications from the network - net.peer(1).request_justification(&h1.hash().into(), 10); - net.peer(1).request_justification(&h2.hash().into(), 15); - net.peer(1).request_justification(&h3.hash().into(), 20); - - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - - for height in (10..21).step_by(5) { - if net.peer(0).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { - return Poll::Pending; - } - if net.peer(1).client().justification(&BlockId::Number(height)).unwrap() != Some(Vec::new()) { - return Poll::Pending; - } - } - - Poll::Ready(()) - })); + let _ = ::env_logger::try_init(); + let mut net = JustificationTestNet::new(3); + net.peer(0).push_blocks(20, false); + net.block_until_sync(); + + // there's currently no justification for block #10 + assert_eq!( + net.peer(0) + .client() + .justification(&BlockId::Number(10)) + .unwrap(), + None + ); + assert_eq!( + net.peer(1) + .client() + .justification(&BlockId::Number(10)) + .unwrap(), + None + ); + + // we finalize block #10, #15 and #20 for peer 0 with a justification + net.peer(0) + .client() + .finalize_block(BlockId::Number(10), Some(Vec::new()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(15), Some(Vec::new()), true) + .unwrap(); + net.peer(0) + .client() + .finalize_block(BlockId::Number(20), Some(Vec::new()), true) + .unwrap(); + + let h1 = net + .peer(1) + .client() + .header(&BlockId::Number(10)) + .unwrap() + .unwrap(); + let h2 = net + .peer(1) + .client() + .header(&BlockId::Number(15)) + .unwrap() + .unwrap(); + let h3 = net + .peer(1) + .client() + .header(&BlockId::Number(20)) + .unwrap() + .unwrap(); + + // peer 1 should get the justifications from the network + net.peer(1).request_justification(&h1.hash().into(), 10); + net.peer(1).request_justification(&h2.hash().into(), 15); + net.peer(1).request_justification(&h3.hash().into(), 20); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + for height in (10..21).step_by(5) { + if net + .peer(0) + .client() + .justification(&BlockId::Number(height)) + .unwrap() + != Some(Vec::new()) + { + return Poll::Pending; + } + if net + .peer(1) + .client() + .justification(&BlockId::Number(height)) + .unwrap() + != Some(Vec::new()) + { + return Poll::Pending; + } + } + + Poll::Ready(()) + })); } #[test] fn sync_justifications_across_forks() { - let _ = ::env_logger::try_init(); - let mut net = JustificationTestNet::new(3); - // we push 5 blocks - net.peer(0).push_blocks(5, false); - // and then two forks 5 and 6 blocks long - let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false); - let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false); - - // peer 1 will only see the longer fork. but we'll request justifications - // for both and finalize the small fork instead. - net.block_until_sync(); - - net.peer(0).client().finalize_block(BlockId::Hash(f1_best), Some(Vec::new()), true).unwrap(); - - net.peer(1).request_justification(&f1_best, 10); - net.peer(1).request_justification(&f2_best, 11); - - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - - if net.peer(0).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) && - net.peer(1).client().justification(&BlockId::Number(10)).unwrap() == Some(Vec::new()) - { - Poll::Ready(()) - } else { - Poll::Pending - } - })); + let _ = ::env_logger::try_init(); + let mut net = JustificationTestNet::new(3); + // we push 5 blocks + net.peer(0).push_blocks(5, false); + // and then two forks 5 and 6 blocks long + let f1_best = net.peer(0).push_blocks_at(BlockId::Number(5), 5, false); + let f2_best = net.peer(0).push_blocks_at(BlockId::Number(5), 6, false); + + // peer 1 will only see the longer fork. but we'll request justifications + // for both and finalize the small fork instead. + net.block_until_sync(); + + net.peer(0) + .client() + .finalize_block(BlockId::Hash(f1_best), Some(Vec::new()), true) + .unwrap(); + + net.peer(1).request_justification(&f1_best, 10); + net.peer(1).request_justification(&f2_best, 11); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + if net + .peer(0) + .client() + .justification(&BlockId::Number(10)) + .unwrap() + == Some(Vec::new()) + && net + .peer(1) + .client() + .justification(&BlockId::Number(10)) + .unwrap() + == Some(Vec::new()) + { + Poll::Ready(()) + } else { + Poll::Pending + } + })); } #[test] fn sync_after_fork_works() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.peer(0).push_blocks(30, false); - net.peer(1).push_blocks(30, false); - net.peer(2).push_blocks(30, false); - - net.peer(0).push_blocks(10, true); - net.peer(1).push_blocks(20, false); - net.peer(2).push_blocks(20, false); - - net.peer(1).push_blocks(10, true); - net.peer(2).push_blocks(1, false); - - // peer 1 has the best chain - net.block_until_sync(); - let peer1 = &net.peers()[1]; - assert!(net.peers()[0].blockchain_canon_equals(peer1)); - (net.peers()[1].blockchain_canon_equals(peer1)); - (net.peers()[2].blockchain_canon_equals(peer1)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.peer(0).push_blocks(30, false); + net.peer(1).push_blocks(30, false); + net.peer(2).push_blocks(30, false); + + net.peer(0).push_blocks(10, true); + net.peer(1).push_blocks(20, false); + net.peer(2).push_blocks(20, false); + + net.peer(1).push_blocks(10, true); + net.peer(2).push_blocks(1, false); + + // peer 1 has the best chain + net.block_until_sync(); + let peer1 = &net.peers()[1]; + assert!(net.peers()[0].blockchain_canon_equals(peer1)); + (net.peers()[1].blockchain_canon_equals(peer1)); + (net.peers()[2].blockchain_canon_equals(peer1)); } #[test] fn syncs_all_forks() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(4); - net.peer(0).push_blocks(2, false); - net.peer(1).push_blocks(2, false); - - let b1 = net.peer(0).push_blocks(2, true); - let b2 = net.peer(1).push_blocks(4, false); - - net.block_until_sync(); - // Check that all peers have all of the branches. - assert!(net.peer(0).has_block(&b1)); - assert!(net.peer(0).has_block(&b2)); - assert!(net.peer(1).has_block(&b1)); - assert!(net.peer(1).has_block(&b2)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(4); + net.peer(0).push_blocks(2, false); + net.peer(1).push_blocks(2, false); + + let b1 = net.peer(0).push_blocks(2, true); + let b2 = net.peer(1).push_blocks(4, false); + + net.block_until_sync(); + // Check that all peers have all of the branches. + assert!(net.peer(0).has_block(&b1)); + assert!(net.peer(0).has_block(&b2)); + assert!(net.peer(1).has_block(&b1)); + assert!(net.peer(1).has_block(&b2)); } #[test] fn own_blocks_are_announced() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - net.block_until_sync(); // connect'em - net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| builder.build().unwrap().block); - - net.block_until_sync(); - - assert_eq!(net.peer(0).client.info().best_number, 1); - assert_eq!(net.peer(1).client.info().best_number, 1); - let peer0 = &net.peers()[0]; - assert!(net.peers()[1].blockchain_canon_equals(peer0)); - (net.peers()[2].blockchain_canon_equals(peer0)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + net.block_until_sync(); // connect'em + net.peer(0).generate_blocks(1, BlockOrigin::Own, |builder| { + builder.build().unwrap().block + }); + + net.block_until_sync(); + + assert_eq!(net.peer(0).client.info().best_number, 1); + assert_eq!(net.peer(1).client.info().best_number, 1); + let peer0 = &net.peers()[0]; + assert!(net.peers()[1].blockchain_canon_equals(peer0)); + (net.peers()[2].blockchain_canon_equals(peer0)); } #[test] fn blocks_are_not_announced_by_light_nodes() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(0); - - // full peer0 is connected to light peer - // light peer1 is connected to full peer2 - net.add_full_peer(); - net.add_light_peer(); - - // Sync between 0 and 1. - net.peer(0).push_blocks(1, false); - assert_eq!(net.peer(0).client.info().best_number, 1); - net.block_until_sync(); - assert_eq!(net.peer(1).client.info().best_number, 1); - - // Add another node and remove node 0. - net.add_full_peer(); - net.peers.remove(0); - - // Poll for a few seconds and make sure 1 and 2 (now 0 and 1) don't sync together. - let mut delay = futures_timer::Delay::new(Duration::from_secs(5)); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - Pin::new(&mut delay).poll(cx) - })); - assert_eq!(net.peer(1).client.info().best_number, 0); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(0); + + // full peer0 is connected to light peer + // light peer1 is connected to full peer2 + net.add_full_peer(); + net.add_light_peer(); + + // Sync between 0 and 1. + net.peer(0).push_blocks(1, false); + assert_eq!(net.peer(0).client.info().best_number, 1); + net.block_until_sync(); + assert_eq!(net.peer(1).client.info().best_number, 1); + + // Add another node and remove node 0. + net.add_full_peer(); + net.peers.remove(0); + + // Poll for a few seconds and make sure 1 and 2 (now 0 and 1) don't sync together. + let mut delay = futures_timer::Delay::new(Duration::from_secs(5)); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Pin::new(&mut delay).poll(cx) + })); + assert_eq!(net.peer(1).client.info().best_number, 0); } #[test] fn can_sync_small_non_best_forks() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(2); - net.peer(0).push_blocks(30, false); - net.peer(1).push_blocks(30, false); - - // small fork + reorg on peer 1. - net.peer(0).push_blocks_at(BlockId::Number(30), 2, true); - let small_hash = net.peer(0).client().info().best_hash; - net.peer(0).push_blocks_at(BlockId::Number(30), 10, false); - assert_eq!(net.peer(0).client().info().best_number, 40); - - // peer 1 only ever had the long fork. - net.peer(1).push_blocks(10, false); - assert_eq!(net.peer(1).client().info().best_number, 40); - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); - - // poll until the two nodes connect, otherwise announcing the block will not work - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 { - Poll::Pending - } else { - Poll::Ready(()) - } - })); - - // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. - - assert_eq!(net.peer(0).client().info().best_number, 40); - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - - net.peer(0).announce_block(small_hash, Vec::new()); - - // after announcing, peer 1 downloads the block. - - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Poll::Pending - } - Poll::Ready(()) - })); - net.block_until_sync(); - - let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true); - net.peer(0).announce_block(another_fork, Vec::new()); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(another_fork)).unwrap().is_none() { - return Poll::Pending - } - Poll::Ready(()) - })); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(2); + net.peer(0).push_blocks(30, false); + net.peer(1).push_blocks(30, false); + + // small fork + reorg on peer 1. + net.peer(0).push_blocks_at(BlockId::Number(30), 2, true); + let small_hash = net.peer(0).client().info().best_hash; + net.peer(0).push_blocks_at(BlockId::Number(30), 10, false); + assert_eq!(net.peer(0).client().info().best_number, 40); + + // peer 1 only ever had the long fork. + net.peer(1).push_blocks(10, false); + assert_eq!(net.peer(1).client().info().best_number, 40); + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + assert!(net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_none()); + + // poll until the two nodes connect, otherwise announcing the block will not work + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. + + assert_eq!(net.peer(0).client().info().best_number, 40); + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + assert!(!net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + + net.peer(0).announce_block(small_hash, Vec::new()); + + // after announcing, peer 1 downloads the block. + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + if net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_none() + { + return Poll::Pending; + } + Poll::Ready(()) + })); + net.block_until_sync(); + + let another_fork = net.peer(0).push_blocks_at(BlockId::Number(35), 2, true); + net.peer(0).announce_block(another_fork, Vec::new()); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net + .peer(1) + .client() + .header(&BlockId::Hash(another_fork)) + .unwrap() + .is_none() + { + return Poll::Pending; + } + Poll::Ready(()) + })); } #[test] fn can_not_sync_from_light_peer() { - let _ = ::env_logger::try_init(); - - // given the network with 1 full nodes (#0) and 1 light node (#1) - let mut net = TestNet::new(1); - net.add_light_peer(); - - // generate some blocks on #0 - net.peer(0).push_blocks(1, false); - - // and let the light client sync from this node - net.block_until_sync(); - - // ensure #0 && #1 have the same best block - let full0_info = net.peer(0).client.info(); - let light_info = net.peer(1).client.info(); - assert_eq!(full0_info.best_number, 1); - assert_eq!(light_info.best_number, 1); - assert_eq!(light_info.best_hash, full0_info.best_hash); - - // add new full client (#2) && remove #0 - net.add_full_peer(); - net.peers.remove(0); - - // ensure that the #2 (now #1) fails to sync block #1 even after 5 seconds - let mut test_finished = futures_timer::Delay::new(Duration::from_secs(5)); - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - Pin::new(&mut test_finished).poll(cx) - })); + let _ = ::env_logger::try_init(); + + // given the network with 1 full nodes (#0) and 1 light node (#1) + let mut net = TestNet::new(1); + net.add_light_peer(); + + // generate some blocks on #0 + net.peer(0).push_blocks(1, false); + + // and let the light client sync from this node + net.block_until_sync(); + + // ensure #0 && #1 have the same best block + let full0_info = net.peer(0).client.info(); + let light_info = net.peer(1).client.info(); + assert_eq!(full0_info.best_number, 1); + assert_eq!(light_info.best_number, 1); + assert_eq!(light_info.best_hash, full0_info.best_hash); + + // add new full client (#2) && remove #0 + net.add_full_peer(); + net.peers.remove(0); + + // ensure that the #2 (now #1) fails to sync block #1 even after 5 seconds + let mut test_finished = futures_timer::Delay::new(Duration::from_secs(5)); + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + Pin::new(&mut test_finished).poll(cx) + })); } #[test] fn light_peer_imports_header_from_announce() { - let _ = ::env_logger::try_init(); - - fn import_with_announce(net: &mut TestNet, hash: H256) { - net.peer(0).announce_block(hash, Vec::new()); - - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { - Poll::Ready(()) - } else { - Poll::Pending - } - })); - } - - // given the network with 1 full nodes (#0) and 1 light node (#1) - let mut net = TestNet::new(1); - net.add_light_peer(); - - // let them connect to each other - net.block_until_sync(); - - // check that NEW block is imported from announce message - let new_hash = net.peer(0).push_blocks(1, false); - import_with_announce(&mut net, new_hash); - - // check that KNOWN STALE block is imported from announce message - let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); - import_with_announce(&mut net, known_stale_hash); + let _ = ::env_logger::try_init(); + + fn import_with_announce(net: &mut TestNet, hash: H256) { + net.peer(0).announce_block(hash, Vec::new()); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net + .peer(1) + .client() + .header(&BlockId::Hash(hash)) + .unwrap() + .is_some() + { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + } + + // given the network with 1 full nodes (#0) and 1 light node (#1) + let mut net = TestNet::new(1); + net.add_light_peer(); + + // let them connect to each other + net.block_until_sync(); + + // check that NEW block is imported from announce message + let new_hash = net.peer(0).push_blocks(1, false); + import_with_announce(&mut net, new_hash); + + // check that KNOWN STALE block is imported from announce message + let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); + import_with_announce(&mut net, known_stale_hash); } #[test] fn can_sync_explicit_forks() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(2); - net.peer(0).push_blocks(30, false); - net.peer(1).push_blocks(30, false); - - // small fork + reorg on peer 1. - net.peer(0).push_blocks_at(BlockId::Number(30), 2, true); - let small_hash = net.peer(0).client().info().best_hash; - let small_number = net.peer(0).client().info().best_number; - net.peer(0).push_blocks_at(BlockId::Number(30), 10, false); - assert_eq!(net.peer(0).client().info().best_number, 40); - - // peer 1 only ever had the long fork. - net.peer(1).push_blocks(10, false); - assert_eq!(net.peer(1).client().info().best_number, 40); - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none()); - - // poll until the two nodes connect, otherwise announcing the block will not work - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { - Poll::Ready(()) - } - })); - - // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. - - assert_eq!(net.peer(0).client().info().best_number, 40); - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - assert!(!net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - - // request explicit sync - let first_peer_id = net.peer(0).id(); - net.peer(1).set_sync_fork_request(vec![first_peer_id], small_hash, small_number); - - // peer 1 downloads the block. - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - - assert!(net.peer(0).client().header(&BlockId::Hash(small_hash)).unwrap().is_some()); - if net.peer(1).client().header(&BlockId::Hash(small_hash)).unwrap().is_none() { - return Poll::Pending - } - Poll::Ready(()) - })); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(2); + net.peer(0).push_blocks(30, false); + net.peer(1).push_blocks(30, false); + + // small fork + reorg on peer 1. + net.peer(0).push_blocks_at(BlockId::Number(30), 2, true); + let small_hash = net.peer(0).client().info().best_hash; + let small_number = net.peer(0).client().info().best_number; + net.peer(0).push_blocks_at(BlockId::Number(30), 10, false); + assert_eq!(net.peer(0).client().info().best_number, 40); + + // peer 1 only ever had the long fork. + net.peer(1).push_blocks(10, false); + assert_eq!(net.peer(1).client().info().best_number, 40); + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + assert!(net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_none()); + + // poll until the two nodes connect, otherwise announcing the block will not work + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // synchronization: 0 synced to longer chain and 1 didn't sync to small chain. + + assert_eq!(net.peer(0).client().info().best_number, 40); + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + assert!(!net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + + // request explicit sync + let first_peer_id = net.peer(0).id(); + net.peer(1) + .set_sync_fork_request(vec![first_peer_id], small_hash, small_number); + + // peer 1 downloads the block. + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + + assert!(net + .peer(0) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_some()); + if net + .peer(1) + .client() + .header(&BlockId::Hash(small_hash)) + .unwrap() + .is_none() + { + return Poll::Pending; + } + Poll::Ready(()) + })); } #[test] fn syncs_header_only_forks() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(0); - net.add_full_peer_with_states(None); - net.add_full_peer_with_states(Some(3)); - net.peer(0).push_blocks(2, false); - net.peer(1).push_blocks(2, false); - - net.peer(0).push_blocks(2, true); - let small_hash = net.peer(0).client().info().best_hash; - net.peer(1).push_blocks(4, false); - - net.block_until_sync(); - // Peer 1 will sync the small fork even though common block state is missing - assert!(net.peer(1).has_block(&small_hash)); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(0); + net.add_full_peer_with_states(None); + net.add_full_peer_with_states(Some(3)); + net.peer(0).push_blocks(2, false); + net.peer(1).push_blocks(2, false); + + net.peer(0).push_blocks(2, true); + let small_hash = net.peer(0).client().info().best_hash; + net.peer(1).push_blocks(4, false); + + net.block_until_sync(); + // Peer 1 will sync the small fork even though common block state is missing + assert!(net.peer(1).has_block(&small_hash)); } #[test] fn does_not_sync_announced_old_best_block() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(3); - - let old_hash = net.peer(0).push_blocks(1, false); - let old_hash_with_parent = net.peer(0).push_blocks(1, false); - net.peer(0).push_blocks(18, true); - net.peer(1).push_blocks(20, true); - - net.peer(0).announce_block(old_hash, Vec::new()); - block_on(futures::future::poll_fn::<(), _>(|cx| { - // poll once to import announcement - net.poll(cx); - Poll::Ready(()) - })); - assert!(!net.peer(1).is_major_syncing()); - - net.peer(0).announce_block(old_hash_with_parent, Vec::new()); - block_on(futures::future::poll_fn::<(), _>(|cx| { - // poll once to import announcement - net.poll(cx); - Poll::Ready(()) - })); - assert!(!net.peer(1).is_major_syncing()); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(3); + + let old_hash = net.peer(0).push_blocks(1, false); + let old_hash_with_parent = net.peer(0).push_blocks(1, false); + net.peer(0).push_blocks(18, true); + net.peer(1).push_blocks(20, true); + + net.peer(0).announce_block(old_hash, Vec::new()); + block_on(futures::future::poll_fn::<(), _>(|cx| { + // poll once to import announcement + net.poll(cx); + Poll::Ready(()) + })); + assert!(!net.peer(1).is_major_syncing()); + + net.peer(0).announce_block(old_hash_with_parent, Vec::new()); + block_on(futures::future::poll_fn::<(), _>(|cx| { + // poll once to import announcement + net.poll(cx); + Poll::Ready(()) + })); + assert!(!net.peer(1).is_major_syncing()); } #[test] fn full_sync_requires_block_body() { - // Check that we don't sync headers-only in full mode. - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(2); - - net.peer(0).push_headers(1); - // Wait for nodes to connect - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { - Poll::Ready(()) - } - })); - net.block_until_idle(); - assert_eq!(net.peer(1).client.info().best_number, 0); + // Check that we don't sync headers-only in full mode. + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(2); + + net.peer(0).push_headers(1); + // Wait for nodes to connect + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + net.block_until_idle(); + assert_eq!(net.peer(1).client.info().best_number, 0); } #[test] fn imports_stale_once() { - let _ = ::env_logger::try_init(); - - fn import_with_announce(net: &mut TestNet, hash: H256) { - // Announce twice - net.peer(0).announce_block(hash, Vec::new()); - net.peer(0).announce_block(hash, Vec::new()); - - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(1).client().header(&BlockId::Hash(hash)).unwrap().is_some() { - Poll::Ready(()) - } else { - Poll::Pending - } - })); - } - - // given the network with 2 full nodes - let mut net = TestNet::new(2); - - // let them connect to each other - net.block_until_sync(); - - // check that NEW block is imported from announce message - let new_hash = net.peer(0).push_blocks(1, false); - import_with_announce(&mut net, new_hash); - assert_eq!(net.peer(1).num_processed_blocks(), 1); - - // check that KNOWN STALE block is imported from announce message - let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); - import_with_announce(&mut net, known_stale_hash); - assert_eq!(net.peer(1).num_processed_blocks(), 2); + let _ = ::env_logger::try_init(); + + fn import_with_announce(net: &mut TestNet, hash: H256) { + // Announce twice + net.peer(0).announce_block(hash, Vec::new()); + net.peer(0).announce_block(hash, Vec::new()); + + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net + .peer(1) + .client() + .header(&BlockId::Hash(hash)) + .unwrap() + .is_some() + { + Poll::Ready(()) + } else { + Poll::Pending + } + })); + } + + // given the network with 2 full nodes + let mut net = TestNet::new(2); + + // let them connect to each other + net.block_until_sync(); + + // check that NEW block is imported from announce message + let new_hash = net.peer(0).push_blocks(1, false); + import_with_announce(&mut net, new_hash); + assert_eq!(net.peer(1).num_processed_blocks(), 1); + + // check that KNOWN STALE block is imported from announce message + let known_stale_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 1, true); + import_with_announce(&mut net, known_stale_hash); + assert_eq!(net.peer(1).num_processed_blocks(), 2); } #[test] fn can_sync_to_peers_with_wrong_common_block() { - let _ = ::env_logger::try_init(); - let mut net = TestNet::new(2); - - net.peer(0).push_blocks(2, true); - net.peer(1).push_blocks(2, true); - let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false); - net.peer(1).push_blocks_at(BlockId::Number(0), 2, false); - // wait for connection - block_on(futures::future::poll_fn::<(), _>(|cx| { - net.poll(cx); - if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { - Poll::Pending - } else { - Poll::Ready(()) - } - })); - - // both peers re-org to the same fork without notifying each other - net.peer(0).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); - net.peer(1).client().finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true).unwrap(); - let final_hash = net.peer(0).push_blocks(1, false); - - net.block_until_sync(); - - assert!(net.peer(1).client().header(&BlockId::Hash(final_hash)).unwrap().is_some()); + let _ = ::env_logger::try_init(); + let mut net = TestNet::new(2); + + net.peer(0).push_blocks(2, true); + net.peer(1).push_blocks(2, true); + let fork_hash = net.peer(0).push_blocks_at(BlockId::Number(0), 2, false); + net.peer(1).push_blocks_at(BlockId::Number(0), 2, false); + // wait for connection + block_on(futures::future::poll_fn::<(), _>(|cx| { + net.poll(cx); + if net.peer(0).num_peers() == 0 || net.peer(1).num_peers() == 0 { + Poll::Pending + } else { + Poll::Ready(()) + } + })); + + // both peers re-org to the same fork without notifying each other + net.peer(0) + .client() + .finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true) + .unwrap(); + net.peer(1) + .client() + .finalize_block(BlockId::Hash(fork_hash), Some(Vec::new()), true) + .unwrap(); + let final_hash = net.peer(0).push_blocks(1, false); + + net.block_until_sync(); + + assert!(net + .peer(1) + .client() + .header(&BlockId::Hash(final_hash)) + .unwrap() + .is_some()); } - diff --git a/client/offchain/src/api.rs b/client/offchain/src/api.rs index 45a82d230c..c28054e711 100644 --- a/client/offchain/src/api.rs +++ b/client/offchain/src/api.rs @@ -14,21 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{ - str::FromStr, - sync::Arc, - convert::TryFrom, - thread::sleep, -}; +use std::{convert::TryFrom, str::FromStr, sync::Arc, thread::sleep}; -use sp_core::offchain::OffchainStorage; +use codec::{Decode, Encode}; use futures::Future; use log::error; -use sc_network::{PeerId, Multiaddr, NetworkStateInfo}; -use codec::{Encode, Decode}; +use sc_network::{Multiaddr, NetworkStateInfo, PeerId}; +use sp_core::offchain::OffchainStorage; use sp_core::offchain::{ - Externalities as OffchainExt, HttpRequestId, Timestamp, HttpRequestStatus, HttpError, - OpaqueNetworkState, OpaquePeerId, OpaqueMultiaddr, StorageKind, + Externalities as OffchainExt, HttpError, HttpRequestId, HttpRequestStatus, OpaqueMultiaddr, + OpaqueNetworkState, OpaquePeerId, StorageKind, Timestamp, }; pub use sp_offchain::STORAGE_PREFIX; @@ -46,375 +41,382 @@ mod timestamp; /// /// NOTE this is done to prevent recursive calls into the runtime (which are not supported currently). pub(crate) struct Api { - /// Offchain Workers database. - db: Storage, - /// A NetworkState provider. - network_state: Arc, - /// Is this node a potential validator? - is_validator: bool, - /// Everything HTTP-related is handled by a different struct. - http: http::HttpApi, + /// Offchain Workers database. + db: Storage, + /// A NetworkState provider. + network_state: Arc, + /// Is this node a potential validator? + is_validator: bool, + /// Everything HTTP-related is handled by a different struct. + http: http::HttpApi, } fn unavailable_yet(name: &str) -> R { - error!( - "The {:?} API is not available for offchain workers yet. Follow \ - https://github.com/paritytech/substrate/issues/1458 for details", name - ); - Default::default() + error!( + "The {:?} API is not available for offchain workers yet. Follow \ + https://github.com/paritytech/substrate/issues/1458 for details", + name + ); + Default::default() } const LOCAL_DB: &str = "LOCAL (fork-aware) DB"; impl OffchainExt for Api { - fn is_validator(&self) -> bool { - self.is_validator - } - - fn network_state(&self) -> Result { - let external_addresses = self.network_state.external_addresses(); - - let state = NetworkState::new( - self.network_state.local_peer_id(), - external_addresses, - ); - Ok(OpaqueNetworkState::from(state)) - } - - fn timestamp(&mut self) -> Timestamp { - timestamp::now() - } - - fn sleep_until(&mut self, deadline: Timestamp) { - sleep(timestamp::timestamp_from_now(deadline)); - } - - fn random_seed(&mut self) -> [u8; 32] { - rand::random() - } - - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - match kind { - StorageKind::PERSISTENT => self.db.set(STORAGE_PREFIX, key, value), - StorageKind::LOCAL => unavailable_yet(LOCAL_DB), - } - } - - fn local_storage_compare_and_set( - &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - match kind { - StorageKind::PERSISTENT => { - self.db.compare_and_set(STORAGE_PREFIX, key, old_value, new_value) - }, - StorageKind::LOCAL => unavailable_yet(LOCAL_DB), - } - } - - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - match kind { - StorageKind::PERSISTENT => self.db.get(STORAGE_PREFIX, key), - StorageKind::LOCAL => unavailable_yet(LOCAL_DB), - } - } - - fn http_request_start( - &mut self, - method: &str, - uri: &str, - _meta: &[u8] - ) -> Result { - self.http.request_start(method, uri) - } - - fn http_request_add_header( - &mut self, - request_id: HttpRequestId, - name: &str, - value: &str - ) -> Result<(), ()> { - self.http.request_add_header(request_id, name, value) - } - - fn http_request_write_body( - &mut self, - request_id: HttpRequestId, - chunk: &[u8], - deadline: Option - ) -> Result<(), HttpError> { - self.http.request_write_body(request_id, chunk, deadline) - } - - fn http_response_wait( - &mut self, - ids: &[HttpRequestId], - deadline: Option - ) -> Vec { - self.http.response_wait(ids, deadline) - } - - fn http_response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)> { - self.http.response_headers(request_id) - } - - fn http_response_read_body( - &mut self, - request_id: HttpRequestId, - buffer: &mut [u8], - deadline: Option - ) -> Result { - self.http.response_read_body(request_id, buffer, deadline) - } + fn is_validator(&self) -> bool { + self.is_validator + } + + fn network_state(&self) -> Result { + let external_addresses = self.network_state.external_addresses(); + + let state = NetworkState::new(self.network_state.local_peer_id(), external_addresses); + Ok(OpaqueNetworkState::from(state)) + } + + fn timestamp(&mut self) -> Timestamp { + timestamp::now() + } + + fn sleep_until(&mut self, deadline: Timestamp) { + sleep(timestamp::timestamp_from_now(deadline)); + } + + fn random_seed(&mut self) -> [u8; 32] { + rand::random() + } + + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + match kind { + StorageKind::PERSISTENT => self.db.set(STORAGE_PREFIX, key, value), + StorageKind::LOCAL => unavailable_yet(LOCAL_DB), + } + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + match kind { + StorageKind::PERSISTENT => { + self.db + .compare_and_set(STORAGE_PREFIX, key, old_value, new_value) + } + StorageKind::LOCAL => unavailable_yet(LOCAL_DB), + } + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + match kind { + StorageKind::PERSISTENT => self.db.get(STORAGE_PREFIX, key), + StorageKind::LOCAL => unavailable_yet(LOCAL_DB), + } + } + + fn http_request_start( + &mut self, + method: &str, + uri: &str, + _meta: &[u8], + ) -> Result { + self.http.request_start(method, uri) + } + + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { + self.http.request_add_header(request_id, name, value) + } + + fn http_request_write_body( + &mut self, + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option, + ) -> Result<(), HttpError> { + self.http.request_write_body(request_id, chunk, deadline) + } + + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { + self.http.response_wait(ids, deadline) + } + + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { + self.http.response_headers(request_id) + } + + fn http_response_read_body( + &mut self, + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option, + ) -> Result { + self.http.response_read_body(request_id, buffer, deadline) + } } /// Information about the local node's network state. #[derive(Clone, Eq, PartialEq, Debug)] pub struct NetworkState { - peer_id: PeerId, - external_addresses: Vec, + peer_id: PeerId, + external_addresses: Vec, } impl NetworkState { - fn new(peer_id: PeerId, external_addresses: Vec) -> Self { - NetworkState { - peer_id, - external_addresses, - } - } + fn new(peer_id: PeerId, external_addresses: Vec) -> Self { + NetworkState { + peer_id, + external_addresses, + } + } } impl From for OpaqueNetworkState { - fn from(state: NetworkState) -> OpaqueNetworkState { - let enc = Encode::encode(&state.peer_id.into_bytes()); - let peer_id = OpaquePeerId::new(enc); - - let external_addresses: Vec = state - .external_addresses - .iter() - .map(|multiaddr| { - let e = Encode::encode(&multiaddr.to_string()); - OpaqueMultiaddr::new(e) - }) - .collect(); - - OpaqueNetworkState { - peer_id, - external_addresses, - } - } + fn from(state: NetworkState) -> OpaqueNetworkState { + let enc = Encode::encode(&state.peer_id.into_bytes()); + let peer_id = OpaquePeerId::new(enc); + + let external_addresses: Vec = state + .external_addresses + .iter() + .map(|multiaddr| { + let e = Encode::encode(&multiaddr.to_string()); + OpaqueMultiaddr::new(e) + }) + .collect(); + + OpaqueNetworkState { + peer_id, + external_addresses, + } + } } impl TryFrom for NetworkState { - type Error = (); - - fn try_from(state: OpaqueNetworkState) -> Result { - let inner_vec = state.peer_id.0; - - let bytes: Vec = Decode::decode(&mut &inner_vec[..]).map_err(|_| ())?; - let peer_id = PeerId::from_bytes(bytes).map_err(|_| ())?; - - let external_addresses: Result, Self::Error> = state.external_addresses - .iter() - .map(|enc_multiaddr| -> Result { - let inner_vec = &enc_multiaddr.0; - let bytes = >::decode(&mut &inner_vec[..]).map_err(|_| ())?; - let multiaddr_str = String::from_utf8(bytes).map_err(|_| ())?; - let multiaddr = Multiaddr::from_str(&multiaddr_str).map_err(|_| ())?; - Ok(multiaddr) - }) - .collect(); - let external_addresses = external_addresses?; - - Ok(NetworkState { - peer_id, - external_addresses, - }) - } + type Error = (); + + fn try_from(state: OpaqueNetworkState) -> Result { + let inner_vec = state.peer_id.0; + + let bytes: Vec = Decode::decode(&mut &inner_vec[..]).map_err(|_| ())?; + let peer_id = PeerId::from_bytes(bytes).map_err(|_| ())?; + + let external_addresses: Result, Self::Error> = state + .external_addresses + .iter() + .map(|enc_multiaddr| -> Result { + let inner_vec = &enc_multiaddr.0; + let bytes = >::decode(&mut &inner_vec[..]).map_err(|_| ())?; + let multiaddr_str = String::from_utf8(bytes).map_err(|_| ())?; + let multiaddr = Multiaddr::from_str(&multiaddr_str).map_err(|_| ())?; + Ok(multiaddr) + }) + .collect(); + let external_addresses = external_addresses?; + + Ok(NetworkState { + peer_id, + external_addresses, + }) + } } /// Offchain extensions implementation API /// /// This is the asynchronous processing part of the API. pub(crate) struct AsyncApi { - /// Everything HTTP-related is handled by a different struct. - http: Option, + /// Everything HTTP-related is handled by a different struct. + http: Option, } impl AsyncApi { - /// Creates new Offchain extensions API implementation an the asynchronous processing part. - pub fn new( - db: S, - network_state: Arc, - is_validator: bool, - ) -> (Api, AsyncApi) { - let (http_api, http_worker) = http::http(); - - let api = Api { - db, - network_state, - is_validator, - http: http_api, - }; - - let async_api = AsyncApi { - http: Some(http_worker), - }; - - (api, async_api) - } - - /// Run a processing task for the API - pub fn process(mut self) -> impl Future { - let http = self.http.take().expect("Take invoked only once."); - - http - } + /// Creates new Offchain extensions API implementation an the asynchronous processing part. + pub fn new( + db: S, + network_state: Arc, + is_validator: bool, + ) -> (Api, AsyncApi) { + let (http_api, http_worker) = http::http(); + + let api = Api { + db, + network_state, + is_validator, + http: http_api, + }; + + let async_api = AsyncApi { + http: Some(http_worker), + }; + + (api, async_api) + } + + /// Run a processing task for the API + pub fn process(mut self) -> impl Future { + let http = self.http.take().expect("Take invoked only once."); + + http + } } #[cfg(test)] mod tests { - use super::*; - use std::{convert::{TryFrom, TryInto}, time::SystemTime}; - use sc_client_db::offchain::LocalStorage; - use sc_network::PeerId; - - struct MockNetworkStateInfo(); - - impl NetworkStateInfo for MockNetworkStateInfo { - fn external_addresses(&self) -> Vec { - Vec::new() - } - - fn local_peer_id(&self) -> PeerId { - PeerId::random() - } - } - - fn offchain_api() -> (Api, AsyncApi) { - let _ = env_logger::try_init(); - let db = LocalStorage::new_test(); - let mock = Arc::new(MockNetworkStateInfo()); - - AsyncApi::new( - db, - mock, - false, - ) - } - - #[test] - fn should_get_timestamp() { - let mut api = offchain_api().0; - - // Get timestamp from std. - let now = SystemTime::now(); - let d: u64 = now.duration_since(SystemTime::UNIX_EPOCH).unwrap().as_millis().try_into().unwrap(); - - // Get timestamp from offchain api. - let timestamp = api.timestamp(); - - // Compare. - assert!(timestamp.unix_millis() > 0); - assert_eq!(timestamp.unix_millis(), d); - } - - #[test] - fn should_sleep() { - let mut api = offchain_api().0; - - // Arrange. - let now = api.timestamp(); - let delta = sp_core::offchain::Duration::from_millis(100); - let deadline = now.add(delta); - - // Act. - api.sleep_until(deadline); - let new_now = api.timestamp(); - - // Assert. - // The diff could be more than the sleep duration. - assert!(new_now.unix_millis() - 100 >= now.unix_millis()); - } - - #[test] - fn should_set_and_get_local_storage() { - // given - let kind = StorageKind::PERSISTENT; - let mut api = offchain_api().0; - let key = b"test"; - - // when - assert_eq!(api.local_storage_get(kind, key), None); - api.local_storage_set(kind, key, b"value"); - - // then - assert_eq!(api.local_storage_get(kind, key), Some(b"value".to_vec())); - } - - #[test] - fn should_compare_and_set_local_storage() { - // given - let kind = StorageKind::PERSISTENT; - let mut api = offchain_api().0; - let key = b"test"; - api.local_storage_set(kind, key, b"value"); - - // when - assert_eq!(api.local_storage_compare_and_set(kind, key, Some(b"val"), b"xxx"), false); - assert_eq!(api.local_storage_get(kind, key), Some(b"value".to_vec())); - - // when - assert_eq!(api.local_storage_compare_and_set(kind, key, Some(b"value"), b"xxx"), true); - assert_eq!(api.local_storage_get(kind, key), Some(b"xxx".to_vec())); - } - - #[test] - fn should_compare_and_set_local_storage_with_none() { - // given - let kind = StorageKind::PERSISTENT; - let mut api = offchain_api().0; - let key = b"test"; - - // when - let res = api.local_storage_compare_and_set(kind, key, None, b"value"); - - // then - assert_eq!(res, true); - assert_eq!(api.local_storage_get(kind, key), Some(b"value".to_vec())); - } - - #[test] - fn should_convert_network_states() { - // given - let state = NetworkState::new( - PeerId::random(), - vec![ - Multiaddr::try_from("/ip4/127.0.0.1/tcp/1234".to_string()).unwrap(), - Multiaddr::try_from("/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21").unwrap(), - ], - ); - - // when - let opaque_state = OpaqueNetworkState::from(state.clone()); - let converted_back_state = NetworkState::try_from(opaque_state).unwrap(); - - // then - assert_eq!(state, converted_back_state); - } - - #[test] - fn should_get_random_seed() { - // given - let mut api = offchain_api().0; - let seed = api.random_seed(); - // then - assert_ne!(seed, [0; 32]); - } + use super::*; + use sc_client_db::offchain::LocalStorage; + use sc_network::PeerId; + use std::{ + convert::{TryFrom, TryInto}, + time::SystemTime, + }; + + struct MockNetworkStateInfo(); + + impl NetworkStateInfo for MockNetworkStateInfo { + fn external_addresses(&self) -> Vec { + Vec::new() + } + + fn local_peer_id(&self) -> PeerId { + PeerId::random() + } + } + + fn offchain_api() -> (Api, AsyncApi) { + let _ = env_logger::try_init(); + let db = LocalStorage::new_test(); + let mock = Arc::new(MockNetworkStateInfo()); + + AsyncApi::new(db, mock, false) + } + + #[test] + fn should_get_timestamp() { + let mut api = offchain_api().0; + + // Get timestamp from std. + let now = SystemTime::now(); + let d: u64 = now + .duration_since(SystemTime::UNIX_EPOCH) + .unwrap() + .as_millis() + .try_into() + .unwrap(); + + // Get timestamp from offchain api. + let timestamp = api.timestamp(); + + // Compare. + assert!(timestamp.unix_millis() > 0); + assert_eq!(timestamp.unix_millis(), d); + } + + #[test] + fn should_sleep() { + let mut api = offchain_api().0; + + // Arrange. + let now = api.timestamp(); + let delta = sp_core::offchain::Duration::from_millis(100); + let deadline = now.add(delta); + + // Act. + api.sleep_until(deadline); + let new_now = api.timestamp(); + + // Assert. + // The diff could be more than the sleep duration. + assert!(new_now.unix_millis() - 100 >= now.unix_millis()); + } + + #[test] + fn should_set_and_get_local_storage() { + // given + let kind = StorageKind::PERSISTENT; + let mut api = offchain_api().0; + let key = b"test"; + + // when + assert_eq!(api.local_storage_get(kind, key), None); + api.local_storage_set(kind, key, b"value"); + + // then + assert_eq!(api.local_storage_get(kind, key), Some(b"value".to_vec())); + } + + #[test] + fn should_compare_and_set_local_storage() { + // given + let kind = StorageKind::PERSISTENT; + let mut api = offchain_api().0; + let key = b"test"; + api.local_storage_set(kind, key, b"value"); + + // when + assert_eq!( + api.local_storage_compare_and_set(kind, key, Some(b"val"), b"xxx"), + false + ); + assert_eq!(api.local_storage_get(kind, key), Some(b"value".to_vec())); + + // when + assert_eq!( + api.local_storage_compare_and_set(kind, key, Some(b"value"), b"xxx"), + true + ); + assert_eq!(api.local_storage_get(kind, key), Some(b"xxx".to_vec())); + } + + #[test] + fn should_compare_and_set_local_storage_with_none() { + // given + let kind = StorageKind::PERSISTENT; + let mut api = offchain_api().0; + let key = b"test"; + + // when + let res = api.local_storage_compare_and_set(kind, key, None, b"value"); + + // then + assert_eq!(res, true); + assert_eq!(api.local_storage_get(kind, key), Some(b"value".to_vec())); + } + + #[test] + fn should_convert_network_states() { + // given + let state = NetworkState::new( + PeerId::random(), + vec![ + Multiaddr::try_from("/ip4/127.0.0.1/tcp/1234".to_string()).unwrap(), + Multiaddr::try_from("/ip6/2601:9:4f81:9700:803e:ca65:66e8:c21").unwrap(), + ], + ); + + // when + let opaque_state = OpaqueNetworkState::from(state.clone()); + let converted_back_state = NetworkState::try_from(opaque_state).unwrap(); + + // then + assert_eq!(state, converted_back_state); + } + + #[test] + fn should_get_random_seed() { + // given + let mut api = offchain_api().0; + let seed = api.random_seed(); + // then + assert_ne!(seed, [0; 32]); + } } diff --git a/client/offchain/src/api/http.rs b/client/offchain/src/api/http.rs index a64fe03897..1c03f7a1ae 100644 --- a/client/offchain/src/api/http.rs +++ b/client/offchain/src/api/http.rs @@ -26,36 +26,36 @@ //! actively calling any function. use crate::api::timestamp; -use bytes::buf::ext::{Reader, BufExt}; +use bytes::buf::ext::{BufExt, Reader}; use fnv::FnvHashMap; -use futures::{prelude::*, future, channel::mpsc}; +use futures::{channel::mpsc, future, prelude::*}; use log::error; -use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; +use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{fmt, io::Read as _, mem, pin::Pin, task::Context, task::Poll}; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; /// Creates a pair of [`HttpApi`] and [`HttpWorker`]. pub fn http() -> (HttpApi, HttpWorker) { - let (to_worker, from_api) = tracing_unbounded("mpsc_ocw_to_worker"); - let (to_api, from_worker) = tracing_unbounded("mpsc_ocw_to_api"); - - let api = HttpApi { - to_worker, - from_worker: from_worker.fuse(), - // We start with a random ID for the first HTTP request, to prevent mischievous people from - // writing runtime code with hardcoded IDs. - next_id: HttpRequestId(rand::random::() % 2000), - requests: FnvHashMap::default(), - }; - - let engine = HttpWorker { - to_api, - from_api, - http_client: hyper::Client::builder().build(hyper_rustls::HttpsConnector::new()), - requests: Vec::new(), - }; - - (api, engine) + let (to_worker, from_api) = tracing_unbounded("mpsc_ocw_to_worker"); + let (to_api, from_worker) = tracing_unbounded("mpsc_ocw_to_api"); + + let api = HttpApi { + to_worker, + from_worker: from_worker.fuse(), + // We start with a random ID for the first HTTP request, to prevent mischievous people from + // writing runtime code with hardcoded IDs. + next_id: HttpRequestId(rand::random::() % 2000), + requests: FnvHashMap::default(), + }; + + let engine = HttpWorker { + to_api, + from_api, + http_client: hyper::Client::builder().build(hyper_rustls::HttpsConnector::new()), + requests: Vec::new(), + }; + + (api, engine) } /// Provides HTTP capabilities. @@ -63,919 +63,1029 @@ pub fn http() -> (HttpApi, HttpWorker) { /// Since this struct is a helper for offchain workers, its API is mimicking the API provided /// to offchain workers. pub struct HttpApi { - /// Used to sends messages to the worker. - to_worker: TracingUnboundedSender, - /// Used to receive messages from the worker. - /// We use a `Fuse` in order to have an extra protection against panicking. - from_worker: stream::Fuse>, - /// Id to assign to the next HTTP request that is started. - next_id: HttpRequestId, - /// List of HTTP requests in preparation or in progress. - requests: FnvHashMap, + /// Used to sends messages to the worker. + to_worker: TracingUnboundedSender, + /// Used to receive messages from the worker. + /// We use a `Fuse` in order to have an extra protection against panicking. + from_worker: stream::Fuse>, + /// Id to assign to the next HTTP request that is started. + next_id: HttpRequestId, + /// List of HTTP requests in preparation or in progress. + requests: FnvHashMap, } /// One active request within `HttpApi`. enum HttpApiRequest { - /// The request object is being constructed locally and not started yet. - NotDispatched(hyper::Request, hyper::body::Sender), - /// The request has been dispatched and we're in the process of sending out the body (if the - /// field is `Some`) or waiting for a response (if the field is `None`). - Dispatched(Option), - /// Received a response. - Response(HttpApiRequestRp), - /// A request has been dispatched but the worker notified us of an error. We report this - /// failure to the user as an `IoError` and remove the request from the list as soon as - /// possible. - Fail(hyper::Error), + /// The request object is being constructed locally and not started yet. + NotDispatched(hyper::Request, hyper::body::Sender), + /// The request has been dispatched and we're in the process of sending out the body (if the + /// field is `Some`) or waiting for a response (if the field is `None`). + Dispatched(Option), + /// Received a response. + Response(HttpApiRequestRp), + /// A request has been dispatched but the worker notified us of an error. We report this + /// failure to the user as an `IoError` and remove the request from the list as soon as + /// possible. + Fail(hyper::Error), } /// A request within `HttpApi` that has received a response. struct HttpApiRequestRp { - /// We might still be writing the request's body when the response comes. - /// This field allows to continue writing that body. - sending_body: Option, - /// Status code of the response. - status_code: hyper::StatusCode, - /// Headers of the response. - headers: hyper::HeaderMap, - /// Body of the response, as a channel of `Chunk` objects. - /// While the code is designed to drop the `Receiver` once it ends, we wrap it within a - /// `Fuse` in order to be extra precautious about panics. - /// Elements extracted from the channel are first put into `current_read_chunk`. - /// If the channel produces an error, then that is translated into an `IoError` and the request - /// is removed from the list. - body: stream::Fuse>>, - /// Chunk that has been extracted from the channel and that is currently being read. - /// Reading data from the response should read from this field in priority. - current_read_chunk: Option>, + /// We might still be writing the request's body when the response comes. + /// This field allows to continue writing that body. + sending_body: Option, + /// Status code of the response. + status_code: hyper::StatusCode, + /// Headers of the response. + headers: hyper::HeaderMap, + /// Body of the response, as a channel of `Chunk` objects. + /// While the code is designed to drop the `Receiver` once it ends, we wrap it within a + /// `Fuse` in order to be extra precautious about panics. + /// Elements extracted from the channel are first put into `current_read_chunk`. + /// If the channel produces an error, then that is translated into an `IoError` and the request + /// is removed from the list. + body: stream::Fuse>>, + /// Chunk that has been extracted from the channel and that is currently being read. + /// Reading data from the response should read from this field in priority. + current_read_chunk: Option>, } impl HttpApi { - /// Mimics the corresponding method in the offchain API. - pub fn request_start( - &mut self, - method: &str, - uri: &str - ) -> Result { - // Start by building the prototype of the request. - // We do this first so that we don't touch anything in `self` if building the prototype - // fails. - let (body_sender, body) = hyper::Body::channel(); - let mut request = hyper::Request::new(body); - *request.method_mut() = hyper::Method::from_bytes(method.as_bytes()).map_err(|_| ())?; - *request.uri_mut() = hyper::Uri::from_maybe_shared(uri.to_owned()).map_err(|_| ())?; - - let new_id = self.next_id; - debug_assert!(!self.requests.contains_key(&new_id)); - match self.next_id.0.checked_add(1) { - Some(new_id) => self.next_id.0 = new_id, - None => { - error!("Overflow in offchain worker HTTP request ID assignment"); - return Err(()); - } - }; - self.requests.insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); - - Ok(new_id) - } - - /// Mimics the corresponding method in the offchain API. - pub fn request_add_header( - &mut self, - request_id: HttpRequestId, - name: &str, - value: &str - ) -> Result<(), ()> { - let request = match self.requests.get_mut(&request_id) { - Some(&mut HttpApiRequest::NotDispatched(ref mut rq, _)) => rq, - _ => return Err(()) - }; - - let name = hyper::header::HeaderName::from_bytes(name.as_bytes()).map_err(|_| ())?; - let value = hyper::header::HeaderValue::from_str(value).map_err(|_| ())?; - // Note that we're always appending headers and never replacing old values. - // We assume here that the user knows what they're doing. - request.headers_mut().append(name, value); - Ok(()) - } - - /// Mimics the corresponding method in the offchain API. - pub fn request_write_body( - &mut self, - request_id: HttpRequestId, - chunk: &[u8], - deadline: Option - ) -> Result<(), HttpError> { - // Extract the request from the list. - // Don't forget to add it back if necessary when returning. - let mut request = match self.requests.remove(&request_id) { - None => return Err(HttpError::Invalid), - Some(r) => r, - }; - - let mut deadline = timestamp::deadline_to_future(deadline); - // Closure that writes data to a sender, taking the deadline into account. Can return `Ok` - // (if the body has been written), or `DeadlineReached`, or `IoError`. - // If `IoError` is returned, don't forget to remove the request from the list. - let mut poll_sender = move |sender: &mut hyper::body::Sender| -> Result<(), HttpError> { - let mut when_ready = future::maybe_done(future::poll_fn(|cx| sender.poll_ready(cx))); - futures::executor::block_on(future::select(&mut when_ready, &mut deadline)); - match when_ready { - future::MaybeDone::Done(Ok(())) => {} - future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), - future::MaybeDone::Future(_) | - future::MaybeDone::Gone => { - debug_assert!(if let future::MaybeDone::Done(_) = deadline { true } else { false }); - return Err(HttpError::DeadlineReached) - } - }; - - futures::executor::block_on(sender.send_data(hyper::body::Bytes::from(chunk.to_owned()))) - .map_err(|_| { - error!("HTTP sender refused data despite being ready"); - HttpError::IoError - }) - }; - - loop { - request = match request { - HttpApiRequest::NotDispatched(request, sender) => { - // If the request is not dispatched yet, dispatch it and loop again. - let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { - id: request_id, - request - }); - HttpApiRequest::Dispatched(Some(sender)) - } - - HttpApiRequest::Dispatched(Some(mut sender)) => - if !chunk.is_empty() { - match poll_sender(&mut sender) { - Err(HttpError::IoError) => return Err(HttpError::IoError), - other => { - self.requests.insert( - request_id, - HttpApiRequest::Dispatched(Some(sender)) - ); - return other - } - } - } else { - // Writing an empty body is a hint that we should stop writing. Dropping - // the sender. - self.requests.insert(request_id, HttpApiRequest::Dispatched(None)); - return Ok(()) - } - - HttpApiRequest::Response(mut response @ HttpApiRequestRp { sending_body: Some(_), .. }) => - if !chunk.is_empty() { - match poll_sender(response.sending_body.as_mut() - .expect("Can only enter this match branch if Some; qed")) { - Err(HttpError::IoError) => return Err(HttpError::IoError), - other => { - self.requests.insert(request_id, HttpApiRequest::Response(response)); - return other - } - } - - } else { - // Writing an empty body is a hint that we should stop writing. Dropping - // the sender. - self.requests.insert(request_id, HttpApiRequest::Response(HttpApiRequestRp { - sending_body: None, - ..response - })); - return Ok(()) - } - - HttpApiRequest::Fail(_) => - // If the request has already failed, return without putting back the request - // in the list. - return Err(HttpError::IoError), - - v @ HttpApiRequest::Dispatched(None) | - v @ HttpApiRequest::Response(HttpApiRequestRp { sending_body: None, .. }) => { - // We have already finished sending this body. - self.requests.insert(request_id, v); - return Err(HttpError::Invalid) - } - } - } - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_wait( - &mut self, - ids: &[HttpRequestId], - deadline: Option - ) -> Vec { - // First of all, dispatch all the non-dispatched requests and drop all senders so that the - // user can't write anymore data. - for id in ids { - match self.requests.get_mut(id) { - Some(HttpApiRequest::NotDispatched(_, _)) => {} - Some(HttpApiRequest::Dispatched(sending_body)) | - Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { - let _ = sending_body.take(); - continue - } - _ => continue - }; - - let (request, _sender) = match self.requests.remove(id) { - Some(HttpApiRequest::NotDispatched(rq, s)) => (rq, s), - _ => unreachable!("we checked for NotDispatched above; qed") - }; - - let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { - id: *id, - request - }); - - // We also destroy the sender in order to forbid writing more data. - self.requests.insert(*id, HttpApiRequest::Dispatched(None)); - } - - let mut deadline = timestamp::deadline_to_future(deadline); - - loop { - // Within that loop, first try to see if we have all the elements for a response. - // This includes the situation where the deadline is reached. - { - let mut output = Vec::with_capacity(ids.len()); - let mut must_wait_more = false; - for id in ids { - output.push(match self.requests.get(id) { - None => HttpRequestStatus::Invalid, - Some(HttpApiRequest::NotDispatched(_, _)) => - unreachable!("we replaced all the NotDispatched with Dispatched earlier; qed"), - Some(HttpApiRequest::Dispatched(_)) => { - must_wait_more = true; - HttpRequestStatus::DeadlineReached - }, - Some(HttpApiRequest::Fail(_)) => HttpRequestStatus::IoError, - Some(HttpApiRequest::Response(HttpApiRequestRp { status_code, .. })) => - HttpRequestStatus::Finished(status_code.as_u16()), - }); - } - debug_assert_eq!(output.len(), ids.len()); - - // Are we ready to call `return`? - let is_done = if let future::MaybeDone::Done(_) = deadline { - true - } else { - !must_wait_more - }; - - if is_done { - // Requests in "fail" mode are purged before returning. - debug_assert_eq!(output.len(), ids.len()); - for n in (0..ids.len()).rev() { - if let HttpRequestStatus::IoError = output[n] { - self.requests.remove(&ids[n]); - } - } - return output - } - } - - // Grab next message from the worker. We call `continue` if deadline is reached so that - // we loop back and `return`. - let next_message = { - let mut next_msg = future::maybe_done(self.from_worker.next()); - futures::executor::block_on(future::select(&mut next_msg, &mut deadline)); - if let future::MaybeDone::Done(msg) = next_msg { - msg - } else { - debug_assert!(if let future::MaybeDone::Done(_) = deadline { true } else { false }); - continue - } - }; - - // Update internal state based on received message. - match next_message { - Some(WorkerToApi::Response { id, status_code, headers, body }) => - match self.requests.remove(&id) { - Some(HttpApiRequest::Dispatched(sending_body)) => { - self.requests.insert(id, HttpApiRequest::Response(HttpApiRequestRp { - sending_body, - status_code, - headers, - body: body.fuse(), - current_read_chunk: None, - })); - } - None => {} // can happen if we detected an IO error when sending the body - _ => error!("State mismatch between the API and worker"), - } - - Some(WorkerToApi::Fail { id, error }) => - match self.requests.remove(&id) { - Some(HttpApiRequest::Dispatched(_)) => { - self.requests.insert(id, HttpApiRequest::Fail(error)); - } - None => {} // can happen if we detected an IO error when sending the body - _ => error!("State mismatch between the API and worker"), - } - - None => { - error!("Worker has crashed"); - return ids.iter().map(|_| HttpRequestStatus::IoError).collect() - } - } - - } - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)> { - // Do an implicit non-blocking wait on the request. - let _ = self.response_wait(&[request_id], Some(timestamp::now())); - - let headers = match self.requests.get(&request_id) { - Some(HttpApiRequest::Response(HttpApiRequestRp { headers, .. })) => headers, - _ => return Vec::new() - }; - - headers - .iter() - .map(|(name, value)| (name.as_str().as_bytes().to_owned(), value.as_bytes().to_owned())) - .collect() - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_read_body( - &mut self, - request_id: HttpRequestId, - buffer: &mut [u8], - deadline: Option - ) -> Result { - // Do an implicit wait on the request. - let _ = self.response_wait(&[request_id], deadline); - - // Remove the request from the list and handle situations where the request is invalid or - // in the wrong state. - let mut response = match self.requests.remove(&request_id) { - Some(HttpApiRequest::Response(r)) => r, - // Because we called `response_wait` above, we know that the deadline has been reached - // and we still haven't received a response. - Some(rq @ HttpApiRequest::Dispatched(_)) => { - self.requests.insert(request_id, rq); - return Err(HttpError::DeadlineReached) - }, - // The request has failed. - Some(HttpApiRequest::Fail { .. }) => - return Err(HttpError::IoError), - // Request hasn't been dispatched yet; reading the body is invalid. - Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { - self.requests.insert(request_id, rq); - return Err(HttpError::Invalid) - } - None => return Err(HttpError::Invalid) - }; - - // Convert the deadline into a `Future` that resolves when the deadline is reached. - let mut deadline = timestamp::deadline_to_future(deadline); - - loop { - // First read from `current_read_chunk`. - if let Some(mut current_read_chunk) = response.current_read_chunk.take() { - match current_read_chunk.read(buffer) { - Ok(0) => {} - Ok(n) => { - self.requests.insert(request_id, HttpApiRequest::Response(HttpApiRequestRp { - current_read_chunk: Some(current_read_chunk), - .. response - })); - return Ok(n) - }, - Err(err) => { - // This code should never be reached unless there's a logic error somewhere. - error!("Failed to read from current read chunk: {:?}", err); - return Err(HttpError::IoError) - } - } - } - - // If we reach here, that means the `current_read_chunk` is empty and needs to be - // filled with a new chunk from `body`. We block on either the next body or the - // deadline. - let mut next_body = future::maybe_done(response.body.next()); - futures::executor::block_on(future::select(&mut next_body, &mut deadline)); - - if let future::MaybeDone::Done(next_body) = next_body { - match next_body { - Some(Ok(chunk)) => response.current_read_chunk = Some(chunk.reader()), - Some(Err(_)) => return Err(HttpError::IoError), - None => return Ok(0), // eof - } - } - - if let future::MaybeDone::Done(_) = deadline { - self.requests.insert(request_id, HttpApiRequest::Response(response)); - return Err(HttpError::DeadlineReached) - } - } - } + /// Mimics the corresponding method in the offchain API. + pub fn request_start(&mut self, method: &str, uri: &str) -> Result { + // Start by building the prototype of the request. + // We do this first so that we don't touch anything in `self` if building the prototype + // fails. + let (body_sender, body) = hyper::Body::channel(); + let mut request = hyper::Request::new(body); + *request.method_mut() = hyper::Method::from_bytes(method.as_bytes()).map_err(|_| ())?; + *request.uri_mut() = hyper::Uri::from_maybe_shared(uri.to_owned()).map_err(|_| ())?; + + let new_id = self.next_id; + debug_assert!(!self.requests.contains_key(&new_id)); + match self.next_id.0.checked_add(1) { + Some(new_id) => self.next_id.0 = new_id, + None => { + error!("Overflow in offchain worker HTTP request ID assignment"); + return Err(()); + } + }; + self.requests + .insert(new_id, HttpApiRequest::NotDispatched(request, body_sender)); + + Ok(new_id) + } + + /// Mimics the corresponding method in the offchain API. + pub fn request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { + let request = match self.requests.get_mut(&request_id) { + Some(&mut HttpApiRequest::NotDispatched(ref mut rq, _)) => rq, + _ => return Err(()), + }; + + let name = hyper::header::HeaderName::from_bytes(name.as_bytes()).map_err(|_| ())?; + let value = hyper::header::HeaderValue::from_str(value).map_err(|_| ())?; + // Note that we're always appending headers and never replacing old values. + // We assume here that the user knows what they're doing. + request.headers_mut().append(name, value); + Ok(()) + } + + /// Mimics the corresponding method in the offchain API. + pub fn request_write_body( + &mut self, + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option, + ) -> Result<(), HttpError> { + // Extract the request from the list. + // Don't forget to add it back if necessary when returning. + let mut request = match self.requests.remove(&request_id) { + None => return Err(HttpError::Invalid), + Some(r) => r, + }; + + let mut deadline = timestamp::deadline_to_future(deadline); + // Closure that writes data to a sender, taking the deadline into account. Can return `Ok` + // (if the body has been written), or `DeadlineReached`, or `IoError`. + // If `IoError` is returned, don't forget to remove the request from the list. + let mut poll_sender = move |sender: &mut hyper::body::Sender| -> Result<(), HttpError> { + let mut when_ready = future::maybe_done(future::poll_fn(|cx| sender.poll_ready(cx))); + futures::executor::block_on(future::select(&mut when_ready, &mut deadline)); + match when_ready { + future::MaybeDone::Done(Ok(())) => {} + future::MaybeDone::Done(Err(_)) => return Err(HttpError::IoError), + future::MaybeDone::Future(_) | future::MaybeDone::Gone => { + debug_assert!(if let future::MaybeDone::Done(_) = deadline { + true + } else { + false + }); + return Err(HttpError::DeadlineReached); + } + }; + + futures::executor::block_on( + sender.send_data(hyper::body::Bytes::from(chunk.to_owned())), + ) + .map_err(|_| { + error!("HTTP sender refused data despite being ready"); + HttpError::IoError + }) + }; + + loop { + request = match request { + HttpApiRequest::NotDispatched(request, sender) => { + // If the request is not dispatched yet, dispatch it and loop again. + let _ = self.to_worker.unbounded_send(ApiToWorker::Dispatch { + id: request_id, + request, + }); + HttpApiRequest::Dispatched(Some(sender)) + } + + HttpApiRequest::Dispatched(Some(mut sender)) => { + if !chunk.is_empty() { + match poll_sender(&mut sender) { + Err(HttpError::IoError) => return Err(HttpError::IoError), + other => { + self.requests + .insert(request_id, HttpApiRequest::Dispatched(Some(sender))); + return other; + } + } + } else { + // Writing an empty body is a hint that we should stop writing. Dropping + // the sender. + self.requests + .insert(request_id, HttpApiRequest::Dispatched(None)); + return Ok(()); + } + } + + HttpApiRequest::Response( + mut + response + @ + HttpApiRequestRp { + sending_body: Some(_), + .. + }, + ) => { + if !chunk.is_empty() { + match poll_sender( + response + .sending_body + .as_mut() + .expect("Can only enter this match branch if Some; qed"), + ) { + Err(HttpError::IoError) => return Err(HttpError::IoError), + other => { + self.requests + .insert(request_id, HttpApiRequest::Response(response)); + return other; + } + } + } else { + // Writing an empty body is a hint that we should stop writing. Dropping + // the sender. + self.requests.insert( + request_id, + HttpApiRequest::Response(HttpApiRequestRp { + sending_body: None, + ..response + }), + ); + return Ok(()); + } + } + + HttpApiRequest::Fail(_) => + // If the request has already failed, return without putting back the request + // in the list. + { + return Err(HttpError::IoError) + } + + v @ HttpApiRequest::Dispatched(None) + | v + @ + HttpApiRequest::Response(HttpApiRequestRp { + sending_body: None, .. + }) => { + // We have already finished sending this body. + self.requests.insert(request_id, v); + return Err(HttpError::Invalid); + } + } + } + } + + /// Mimics the corresponding method in the offchain API. + pub fn response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { + // First of all, dispatch all the non-dispatched requests and drop all senders so that the + // user can't write anymore data. + for id in ids { + match self.requests.get_mut(id) { + Some(HttpApiRequest::NotDispatched(_, _)) => {} + Some(HttpApiRequest::Dispatched(sending_body)) + | Some(HttpApiRequest::Response(HttpApiRequestRp { sending_body, .. })) => { + let _ = sending_body.take(); + continue; + } + _ => continue, + }; + + let (request, _sender) = match self.requests.remove(id) { + Some(HttpApiRequest::NotDispatched(rq, s)) => (rq, s), + _ => unreachable!("we checked for NotDispatched above; qed"), + }; + + let _ = self + .to_worker + .unbounded_send(ApiToWorker::Dispatch { id: *id, request }); + + // We also destroy the sender in order to forbid writing more data. + self.requests.insert(*id, HttpApiRequest::Dispatched(None)); + } + + let mut deadline = timestamp::deadline_to_future(deadline); + + loop { + // Within that loop, first try to see if we have all the elements for a response. + // This includes the situation where the deadline is reached. + { + let mut output = Vec::with_capacity(ids.len()); + let mut must_wait_more = false; + for id in ids { + output.push(match self.requests.get(id) { + None => HttpRequestStatus::Invalid, + Some(HttpApiRequest::NotDispatched(_, _)) => unreachable!( + "we replaced all the NotDispatched with Dispatched earlier; qed" + ), + Some(HttpApiRequest::Dispatched(_)) => { + must_wait_more = true; + HttpRequestStatus::DeadlineReached + } + Some(HttpApiRequest::Fail(_)) => HttpRequestStatus::IoError, + Some(HttpApiRequest::Response(HttpApiRequestRp { + status_code, .. + })) => HttpRequestStatus::Finished(status_code.as_u16()), + }); + } + debug_assert_eq!(output.len(), ids.len()); + + // Are we ready to call `return`? + let is_done = if let future::MaybeDone::Done(_) = deadline { + true + } else { + !must_wait_more + }; + + if is_done { + // Requests in "fail" mode are purged before returning. + debug_assert_eq!(output.len(), ids.len()); + for n in (0..ids.len()).rev() { + if let HttpRequestStatus::IoError = output[n] { + self.requests.remove(&ids[n]); + } + } + return output; + } + } + + // Grab next message from the worker. We call `continue` if deadline is reached so that + // we loop back and `return`. + let next_message = { + let mut next_msg = future::maybe_done(self.from_worker.next()); + futures::executor::block_on(future::select(&mut next_msg, &mut deadline)); + if let future::MaybeDone::Done(msg) = next_msg { + msg + } else { + debug_assert!(if let future::MaybeDone::Done(_) = deadline { + true + } else { + false + }); + continue; + } + }; + + // Update internal state based on received message. + match next_message { + Some(WorkerToApi::Response { + id, + status_code, + headers, + body, + }) => match self.requests.remove(&id) { + Some(HttpApiRequest::Dispatched(sending_body)) => { + self.requests.insert( + id, + HttpApiRequest::Response(HttpApiRequestRp { + sending_body, + status_code, + headers, + body: body.fuse(), + current_read_chunk: None, + }), + ); + } + None => {} // can happen if we detected an IO error when sending the body + _ => error!("State mismatch between the API and worker"), + }, + + Some(WorkerToApi::Fail { id, error }) => match self.requests.remove(&id) { + Some(HttpApiRequest::Dispatched(_)) => { + self.requests.insert(id, HttpApiRequest::Fail(error)); + } + None => {} // can happen if we detected an IO error when sending the body + _ => error!("State mismatch between the API and worker"), + }, + + None => { + error!("Worker has crashed"); + return ids.iter().map(|_| HttpRequestStatus::IoError).collect(); + } + } + } + } + + /// Mimics the corresponding method in the offchain API. + pub fn response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { + // Do an implicit non-blocking wait on the request. + let _ = self.response_wait(&[request_id], Some(timestamp::now())); + + let headers = match self.requests.get(&request_id) { + Some(HttpApiRequest::Response(HttpApiRequestRp { headers, .. })) => headers, + _ => return Vec::new(), + }; + + headers + .iter() + .map(|(name, value)| { + ( + name.as_str().as_bytes().to_owned(), + value.as_bytes().to_owned(), + ) + }) + .collect() + } + + /// Mimics the corresponding method in the offchain API. + pub fn response_read_body( + &mut self, + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option, + ) -> Result { + // Do an implicit wait on the request. + let _ = self.response_wait(&[request_id], deadline); + + // Remove the request from the list and handle situations where the request is invalid or + // in the wrong state. + let mut response = match self.requests.remove(&request_id) { + Some(HttpApiRequest::Response(r)) => r, + // Because we called `response_wait` above, we know that the deadline has been reached + // and we still haven't received a response. + Some(rq @ HttpApiRequest::Dispatched(_)) => { + self.requests.insert(request_id, rq); + return Err(HttpError::DeadlineReached); + } + // The request has failed. + Some(HttpApiRequest::Fail { .. }) => return Err(HttpError::IoError), + // Request hasn't been dispatched yet; reading the body is invalid. + Some(rq @ HttpApiRequest::NotDispatched(_, _)) => { + self.requests.insert(request_id, rq); + return Err(HttpError::Invalid); + } + None => return Err(HttpError::Invalid), + }; + + // Convert the deadline into a `Future` that resolves when the deadline is reached. + let mut deadline = timestamp::deadline_to_future(deadline); + + loop { + // First read from `current_read_chunk`. + if let Some(mut current_read_chunk) = response.current_read_chunk.take() { + match current_read_chunk.read(buffer) { + Ok(0) => {} + Ok(n) => { + self.requests.insert( + request_id, + HttpApiRequest::Response(HttpApiRequestRp { + current_read_chunk: Some(current_read_chunk), + ..response + }), + ); + return Ok(n); + } + Err(err) => { + // This code should never be reached unless there's a logic error somewhere. + error!("Failed to read from current read chunk: {:?}", err); + return Err(HttpError::IoError); + } + } + } + + // If we reach here, that means the `current_read_chunk` is empty and needs to be + // filled with a new chunk from `body`. We block on either the next body or the + // deadline. + let mut next_body = future::maybe_done(response.body.next()); + futures::executor::block_on(future::select(&mut next_body, &mut deadline)); + + if let future::MaybeDone::Done(next_body) = next_body { + match next_body { + Some(Ok(chunk)) => response.current_read_chunk = Some(chunk.reader()), + Some(Err(_)) => return Err(HttpError::IoError), + None => return Ok(0), // eof + } + } + + if let future::MaybeDone::Done(_) = deadline { + self.requests + .insert(request_id, HttpApiRequest::Response(response)); + return Err(HttpError::DeadlineReached); + } + } + } } impl fmt::Debug for HttpApi { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list() - .entries(self.requests.iter()) - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_list().entries(self.requests.iter()).finish() + } } impl fmt::Debug for HttpApiRequest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - HttpApiRequest::NotDispatched(_, _) => - f.debug_tuple("HttpApiRequest::NotDispatched").finish(), - HttpApiRequest::Dispatched(_) => - f.debug_tuple("HttpApiRequest::Dispatched").finish(), - HttpApiRequest::Response(HttpApiRequestRp { status_code, headers, .. }) => - f.debug_tuple("HttpApiRequest::Response").field(status_code).field(headers).finish(), - HttpApiRequest::Fail(err) => - f.debug_tuple("HttpApiRequest::Fail").field(err).finish(), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + HttpApiRequest::NotDispatched(_, _) => { + f.debug_tuple("HttpApiRequest::NotDispatched").finish() + } + HttpApiRequest::Dispatched(_) => f.debug_tuple("HttpApiRequest::Dispatched").finish(), + HttpApiRequest::Response(HttpApiRequestRp { + status_code, + headers, + .. + }) => f + .debug_tuple("HttpApiRequest::Response") + .field(status_code) + .field(headers) + .finish(), + HttpApiRequest::Fail(err) => f.debug_tuple("HttpApiRequest::Fail").field(err).finish(), + } + } } /// Message send from the API to the worker. enum ApiToWorker { - /// Dispatches a new HTTP request. - Dispatch { - /// ID to send back when the response comes back. - id: HttpRequestId, - /// Request to start executing. - request: hyper::Request, - } + /// Dispatches a new HTTP request. + Dispatch { + /// ID to send back when the response comes back. + id: HttpRequestId, + /// Request to start executing. + request: hyper::Request, + }, } /// Message send from the API to the worker. enum WorkerToApi { - /// A request has succeeded. - Response { - /// The ID that was passed to the worker. - id: HttpRequestId, - /// Status code of the response. - status_code: hyper::StatusCode, - /// Headers of the response. - headers: hyper::HeaderMap, - /// Body of the response, as a channel of `Chunk` objects. - /// We send the body back through a channel instead of returning the hyper `Body` object - /// because we don't want the `HttpApi` to have to drive the reading. - /// Instead, reading an item from the channel will notify the worker task, which will push - /// the next item. - /// Can also be used to send an error, in case an error happend on the HTTP socket. After - /// an error is sent, the channel will close. - body: mpsc::Receiver>, - }, - /// A request has failed because of an error. The request is then no longer valid. - Fail { - /// The ID that was passed to the worker. - id: HttpRequestId, - /// Error that happened. - error: hyper::Error, - }, + /// A request has succeeded. + Response { + /// The ID that was passed to the worker. + id: HttpRequestId, + /// Status code of the response. + status_code: hyper::StatusCode, + /// Headers of the response. + headers: hyper::HeaderMap, + /// Body of the response, as a channel of `Chunk` objects. + /// We send the body back through a channel instead of returning the hyper `Body` object + /// because we don't want the `HttpApi` to have to drive the reading. + /// Instead, reading an item from the channel will notify the worker task, which will push + /// the next item. + /// Can also be used to send an error, in case an error happend on the HTTP socket. After + /// an error is sent, the channel will close. + body: mpsc::Receiver>, + }, + /// A request has failed because of an error. The request is then no longer valid. + Fail { + /// The ID that was passed to the worker. + id: HttpRequestId, + /// Error that happened. + error: hyper::Error, + }, } /// Must be continuously polled for the [`HttpApi`] to properly work. pub struct HttpWorker { - /// Used to sends messages to the `HttpApi`. - to_api: TracingUnboundedSender, - /// Used to receive messages from the `HttpApi`. - from_api: TracingUnboundedReceiver, - /// The engine that runs HTTP requests. - http_client: hyper::Client, hyper::Body>, - /// HTTP requests that are being worked on by the engine. - requests: Vec<(HttpRequestId, HttpWorkerRequest)>, + /// Used to sends messages to the `HttpApi`. + to_api: TracingUnboundedSender, + /// Used to receive messages from the `HttpApi`. + from_api: TracingUnboundedReceiver, + /// The engine that runs HTTP requests. + http_client: + hyper::Client, hyper::Body>, + /// HTTP requests that are being worked on by the engine. + requests: Vec<(HttpRequestId, HttpWorkerRequest)>, } /// HTTP request being processed by the worker. enum HttpWorkerRequest { - /// Request has been dispatched and is waiting for a response from the Internet. - Dispatched(hyper::client::ResponseFuture), - /// Progressively reading the body of the response and sending it to the channel. - ReadBody { - /// Body to read `Chunk`s from. Only used if the channel is ready to accept data. - body: hyper::Body, - /// Channel to the [`HttpApi`] where we send the chunks to. - tx: mpsc::Sender>, - }, + /// Request has been dispatched and is waiting for a response from the Internet. + Dispatched(hyper::client::ResponseFuture), + /// Progressively reading the body of the response and sending it to the channel. + ReadBody { + /// Body to read `Chunk`s from. Only used if the channel is ready to accept data. + body: hyper::Body, + /// Channel to the [`HttpApi`] where we send the chunks to. + tx: mpsc::Sender>, + }, } impl Future for HttpWorker { - type Output = (); - - fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { - // Reminder: this is continuously run in the background. - - // We use a `me` variable because the compiler isn't smart enough to allow borrowing - // multiple fields at once through a `Deref`. - let me = &mut *self; - - // We remove each element from `requests` one by one and add them back only if necessary. - for n in (0..me.requests.len()).rev() { - let (id, request) = me.requests.swap_remove(n); - match request { - HttpWorkerRequest::Dispatched(mut future) => { - // Check for an HTTP response from the Internet. - let mut response = match Future::poll(Pin::new(&mut future), cx) { - Poll::Pending => { - me.requests.push((id, HttpWorkerRequest::Dispatched(future))); - continue - }, - Poll::Ready(Ok(response)) => response, - Poll::Ready(Err(err)) => { - let _ = me.to_api.unbounded_send(WorkerToApi::Fail { - id, - error: err, - }); - continue; // don't insert the request back - } - }; - - // We received a response! Decompose it into its parts. - let status_code = response.status(); - let headers = mem::replace(response.headers_mut(), hyper::HeaderMap::new()); - let body = response.into_body(); - - let (body_tx, body_rx) = mpsc::channel(3); - let _ = me.to_api.unbounded_send(WorkerToApi::Response { - id, - status_code, - headers, - body: body_rx, - }); - - me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); - cx.waker().wake_by_ref(); // reschedule in order to poll the new future - continue - } - - HttpWorkerRequest::ReadBody { mut body, mut tx } => { - // Before reading from the HTTP response, check that `tx` is ready to accept - // a new chunk. - match tx.poll_ready(cx) { - Poll::Ready(Ok(())) => {} - Poll::Ready(Err(_)) => continue, // don't insert the request back - Poll::Pending => { - me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - continue - } - } - - // `tx` is ready. Read a chunk from the socket and send it to the channel. - match Stream::poll_next(Pin::new(&mut body), cx) { - Poll::Ready(Some(Ok(chunk))) => { - let _ = tx.start_send(Ok(chunk)); - me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - cx.waker().wake_by_ref(); // reschedule in order to continue reading - } - Poll::Ready(Some(Err(err))) => { - let _ = tx.start_send(Err(err)); - // don't insert the request back - }, - Poll::Ready(None) => {} // EOF; don't insert the request back - Poll::Pending => { - me.requests.push((id, HttpWorkerRequest::ReadBody { body, tx })); - }, - } - } - } - } - - // Check for messages coming from the [`HttpApi`]. - match Stream::poll_next(Pin::new(&mut me.from_api), cx) { - Poll::Pending => {}, - Poll::Ready(None) => return Poll::Ready(()), // stops the worker - Poll::Ready(Some(ApiToWorker::Dispatch { id, request })) => { - let future = me.http_client.request(request); - debug_assert!(me.requests.iter().all(|(i, _)| *i != id)); - me.requests.push((id, HttpWorkerRequest::Dispatched(future))); - cx.waker().wake_by_ref(); // reschedule the task to poll the request - } - } - - Poll::Pending - } + type Output = (); + + fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll { + // Reminder: this is continuously run in the background. + + // We use a `me` variable because the compiler isn't smart enough to allow borrowing + // multiple fields at once through a `Deref`. + let me = &mut *self; + + // We remove each element from `requests` one by one and add them back only if necessary. + for n in (0..me.requests.len()).rev() { + let (id, request) = me.requests.swap_remove(n); + match request { + HttpWorkerRequest::Dispatched(mut future) => { + // Check for an HTTP response from the Internet. + let mut response = match Future::poll(Pin::new(&mut future), cx) { + Poll::Pending => { + me.requests + .push((id, HttpWorkerRequest::Dispatched(future))); + continue; + } + Poll::Ready(Ok(response)) => response, + Poll::Ready(Err(err)) => { + let _ = me + .to_api + .unbounded_send(WorkerToApi::Fail { id, error: err }); + continue; // don't insert the request back + } + }; + + // We received a response! Decompose it into its parts. + let status_code = response.status(); + let headers = mem::replace(response.headers_mut(), hyper::HeaderMap::new()); + let body = response.into_body(); + + let (body_tx, body_rx) = mpsc::channel(3); + let _ = me.to_api.unbounded_send(WorkerToApi::Response { + id, + status_code, + headers, + body: body_rx, + }); + + me.requests + .push((id, HttpWorkerRequest::ReadBody { body, tx: body_tx })); + cx.waker().wake_by_ref(); // reschedule in order to poll the new future + continue; + } + + HttpWorkerRequest::ReadBody { mut body, mut tx } => { + // Before reading from the HTTP response, check that `tx` is ready to accept + // a new chunk. + match tx.poll_ready(cx) { + Poll::Ready(Ok(())) => {} + Poll::Ready(Err(_)) => continue, // don't insert the request back + Poll::Pending => { + me.requests + .push((id, HttpWorkerRequest::ReadBody { body, tx })); + continue; + } + } + + // `tx` is ready. Read a chunk from the socket and send it to the channel. + match Stream::poll_next(Pin::new(&mut body), cx) { + Poll::Ready(Some(Ok(chunk))) => { + let _ = tx.start_send(Ok(chunk)); + me.requests + .push((id, HttpWorkerRequest::ReadBody { body, tx })); + cx.waker().wake_by_ref(); // reschedule in order to continue reading + } + Poll::Ready(Some(Err(err))) => { + let _ = tx.start_send(Err(err)); + // don't insert the request back + } + Poll::Ready(None) => {} // EOF; don't insert the request back + Poll::Pending => { + me.requests + .push((id, HttpWorkerRequest::ReadBody { body, tx })); + } + } + } + } + } + + // Check for messages coming from the [`HttpApi`]. + match Stream::poll_next(Pin::new(&mut me.from_api), cx) { + Poll::Pending => {} + Poll::Ready(None) => return Poll::Ready(()), // stops the worker + Poll::Ready(Some(ApiToWorker::Dispatch { id, request })) => { + let future = me.http_client.request(request); + debug_assert!(me.requests.iter().all(|(i, _)| *i != id)); + me.requests + .push((id, HttpWorkerRequest::Dispatched(future))); + cx.waker().wake_by_ref(); // reschedule the task to poll the request + } + } + + Poll::Pending + } } impl fmt::Debug for HttpWorker { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_list() - .entries(self.requests.iter()) - .finish() - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_list().entries(self.requests.iter()).finish() + } } impl fmt::Debug for HttpWorkerRequest { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - HttpWorkerRequest::Dispatched(_) => - f.debug_tuple("HttpWorkerRequest::Dispatched").finish(), - HttpWorkerRequest::ReadBody { .. } => - f.debug_tuple("HttpWorkerRequest::Response").finish(), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + HttpWorkerRequest::Dispatched(_) => { + f.debug_tuple("HttpWorkerRequest::Dispatched").finish() + } + HttpWorkerRequest::ReadBody { .. } => { + f.debug_tuple("HttpWorkerRequest::Response").finish() + } + } + } } #[cfg(test)] mod tests { - use core::convert::Infallible; - use crate::api::timestamp; - use super::http; - use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Duration}; - - // Returns an `HttpApi` whose worker is ran in the background, and a `SocketAddr` to an HTTP - // server that runs in the background as well. - macro_rules! build_api_server { - () => {{ - fn tokio_run(future: impl std::future::Future) { - let _ = tokio::runtime::Runtime::new().unwrap().block_on(future); - } - - // We spawn quite a bit of HTTP servers here due to how async API - // works for offchain workers, so be sure to raise the FD limit - // (particularly useful for macOS where the default soft limit may - // not be enough). - fdlimit::raise_fd_limit(); - - let (api, worker) = http(); - std::thread::spawn(move || tokio_run(worker)); - - let (addr_tx, addr_rx) = std::sync::mpsc::channel(); - std::thread::spawn(move || { - tokio_run(async move { - let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()) - .serve(hyper::service::make_service_fn(|_| { async move { - Ok::<_, Infallible>(hyper::service::service_fn(move |_req| async move { - Ok::<_, Infallible>( - hyper::Response::new(hyper::Body::from("Hello World!")) - ) - })) - }})); - let _ = addr_tx.send(server.local_addr()); - server.await - }); - }); - (api, addr_rx.recv().unwrap()) - }}; - } - - #[test] - fn basic_localhost() { - let deadline = timestamp::now().add(Duration::from_millis(10_000)); - - // Performs an HTTP query to a background HTTP server. - - let (mut api, addr) = build_api_server!(); - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.request_write_body(id, &[], Some(deadline)).unwrap(); - - match api.response_wait(&[id], Some(deadline))[0] { - HttpRequestStatus::Finished(200) => {}, - v => panic!("Connecting to localhost failed: {:?}", v) - } - - let headers = api.response_headers(id); - assert!(headers.iter().any(|(h, _)| h.eq_ignore_ascii_case(b"Date"))); - - let mut buf = vec![0; 2048]; - let n = api.response_read_body(id, &mut buf, Some(deadline)).unwrap(); - assert_eq!(&buf[..n], b"Hello World!"); - } - - #[test] - fn request_start_invalid_call() { - let (mut api, addr) = build_api_server!(); - - match api.request_start("\0", &format!("http://{}", addr)) { - Err(()) => {} - Ok(_) => panic!() - }; - - match api.request_start("GET", "http://\0localhost") { - Err(()) => {} - Ok(_) => panic!() - }; - } - - #[test] - fn request_add_header_invalid_call() { - let (mut api, addr) = build_api_server!(); - - match api.request_add_header(HttpRequestId(0xdead), "Foo", "bar") { - Err(()) => {} - Ok(_) => panic!() - }; - - let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); - match api.request_add_header(id, "\0", "bar") { - Err(()) => {} - Ok(_) => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - match api.request_add_header(id, "Foo", "\0") { - Err(()) => {} - Ok(_) => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.request_add_header(id, "Foo", "Bar").unwrap(); - api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); - match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() - }; - - let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); - api.response_headers(id); - match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() - }; - - let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); - api.response_read_body(id, &mut [], None).unwrap(); - match api.request_add_header(id, "Foo2", "Bar") { - Err(()) => {} - Ok(_) => panic!() - }; - } - - #[test] - fn request_write_body_invalid_call() { - let (mut api, addr) = build_api_server!(); - - match api.request_write_body(HttpRequestId(0xdead), &[1, 2, 3], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - match api.request_write_body(HttpRequestId(0xdead), &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); - api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); - api.request_write_body(id, &[], None).unwrap(); - match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); - api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); - api.request_write_body(id, &[], None).unwrap(); - match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); - api.response_wait(&[id], None); - match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); - api.response_wait(&[id], None); - match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.response_headers(id); - match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); - api.response_headers(id); - match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.response_read_body(id, &mut [], None).unwrap(); - match api.request_write_body(id, &[1, 2, 3, 4], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.response_read_body(id, &mut [], None).unwrap(); - match api.request_write_body(id, &[], None) { - Err(HttpError::Invalid) => {} - _ => panic!() - }; - } - - #[test] - fn response_headers_invalid_call() { - let (mut api, addr) = build_api_server!(); - assert!(api.response_headers(HttpRequestId(0xdead)).is_empty()); - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - assert!(api.response_headers(id).is_empty()); - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.request_write_body(id, &[], None).unwrap(); - while api.response_headers(id).is_empty() { - std::thread::sleep(std::time::Duration::from_millis(100)); - } - - let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); - api.response_wait(&[id], None); - assert!(!api.response_headers(id).is_empty()); - - let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); - let mut buf = [0; 128]; - while api.response_read_body(id, &mut buf, None).unwrap() != 0 {} - assert!(api.response_headers(id).is_empty()); - } - - #[test] - fn response_header_invalid_call() { - let (mut api, addr) = build_api_server!(); - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - assert!(api.response_headers(id).is_empty()); - - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - api.request_add_header(id, "Foo", "Bar").unwrap(); - assert!(api.response_headers(id).is_empty()); - - let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); - api.request_add_header(id, "Foo", "Bar").unwrap(); - api.request_write_body(id, &[], None).unwrap(); - // Note: this test actually sends out the request, and is supposed to test a situation - // where we haven't received any response yet. This test can theoretically fail if the - // HTTP response comes back faster than the kernel schedules our thread, but that is highly - // unlikely. - assert!(api.response_headers(id).is_empty()); - } - - #[test] - fn response_read_body_invalid_call() { - let (mut api, addr) = build_api_server!(); - let mut buf = [0; 512]; - - match api.response_read_body(HttpRequestId(0xdead), &mut buf, None) { - Err(HttpError::Invalid) => {} - _ => panic!() - } - - let id = api.request_start("GET", &format!("http://{}", addr)).unwrap(); - while api.response_read_body(id, &mut buf, None).unwrap() != 0 {} - match api.response_read_body(id, &mut buf, None) { - Err(HttpError::Invalid) => {} - _ => panic!() - } - } - - #[test] - fn fuzzing() { - // Uses the API in random ways to try to trigger panics. - // Doesn't test some paths, such as waiting for multiple requests. Also doesn't test what - // happens if the server force-closes our socket. - - let (mut api, addr) = build_api_server!(); - - for _ in 0..50 { - let id = api.request_start("POST", &format!("http://{}", addr)).unwrap(); - - for _ in 0..250 { - match rand::random::() % 6 { - 0 => { let _ = api.request_add_header(id, "Foo", "Bar"); } - 1 => { let _ = api.request_write_body(id, &[1, 2, 3, 4], None); } - 2 => { let _ = api.request_write_body(id, &[], None); } - 3 => { let _ = api.response_wait(&[id], None); } - 4 => { let _ = api.response_headers(id); } - 5 => { - let mut buf = [0; 512]; - let _ = api.response_read_body(id, &mut buf, None); - } - 6 ..= 255 => unreachable!() - } - } - } - } + use super::http; + use crate::api::timestamp; + use core::convert::Infallible; + use sp_core::offchain::{Duration, HttpError, HttpRequestId, HttpRequestStatus}; + + // Returns an `HttpApi` whose worker is ran in the background, and a `SocketAddr` to an HTTP + // server that runs in the background as well. + macro_rules! build_api_server { + () => {{ + fn tokio_run(future: impl std::future::Future) { + let _ = tokio::runtime::Runtime::new().unwrap().block_on(future); + } + + // We spawn quite a bit of HTTP servers here due to how async API + // works for offchain workers, so be sure to raise the FD limit + // (particularly useful for macOS where the default soft limit may + // not be enough). + fdlimit::raise_fd_limit(); + + let (api, worker) = http(); + std::thread::spawn(move || tokio_run(worker)); + + let (addr_tx, addr_rx) = std::sync::mpsc::channel(); + std::thread::spawn(move || { + tokio_run(async move { + let server = hyper::Server::bind(&"127.0.0.1:0".parse().unwrap()).serve( + hyper::service::make_service_fn(|_| async move { + Ok::<_, Infallible>(hyper::service::service_fn( + move |_req| async move { + Ok::<_, Infallible>(hyper::Response::new(hyper::Body::from( + "Hello World!", + ))) + }, + )) + }), + ); + let _ = addr_tx.send(server.local_addr()); + server.await + }); + }); + (api, addr_rx.recv().unwrap()) + }}; + } + + #[test] + fn basic_localhost() { + let deadline = timestamp::now().add(Duration::from_millis(10_000)); + + // Performs an HTTP query to a background HTTP server. + + let (mut api, addr) = build_api_server!(); + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.request_write_body(id, &[], Some(deadline)).unwrap(); + + match api.response_wait(&[id], Some(deadline))[0] { + HttpRequestStatus::Finished(200) => {} + v => panic!("Connecting to localhost failed: {:?}", v), + } + + let headers = api.response_headers(id); + assert!(headers.iter().any(|(h, _)| h.eq_ignore_ascii_case(b"Date"))); + + let mut buf = vec![0; 2048]; + let n = api + .response_read_body(id, &mut buf, Some(deadline)) + .unwrap(); + assert_eq!(&buf[..n], b"Hello World!"); + } + + #[test] + fn request_start_invalid_call() { + let (mut api, addr) = build_api_server!(); + + match api.request_start("\0", &format!("http://{}", addr)) { + Err(()) => {} + Ok(_) => panic!(), + }; + + match api.request_start("GET", "http://\0localhost") { + Err(()) => {} + Ok(_) => panic!(), + }; + } + + #[test] + fn request_add_header_invalid_call() { + let (mut api, addr) = build_api_server!(); + + match api.request_add_header(HttpRequestId(0xdead), "Foo", "bar") { + Err(()) => {} + Ok(_) => panic!(), + }; + + let id = api + .request_start("GET", &format!("http://{}", addr)) + .unwrap(); + match api.request_add_header(id, "\0", "bar") { + Err(()) => {} + Ok(_) => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + match api.request_add_header(id, "Foo", "\0") { + Err(()) => {} + Ok(_) => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.request_add_header(id, "Foo", "Bar").unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + match api.request_add_header(id, "Foo2", "Bar") { + Err(()) => {} + Ok(_) => panic!(), + }; + + let id = api + .request_start("GET", &format!("http://{}", addr)) + .unwrap(); + api.response_headers(id); + match api.request_add_header(id, "Foo2", "Bar") { + Err(()) => {} + Ok(_) => panic!(), + }; + + let id = api + .request_start("GET", &format!("http://{}", addr)) + .unwrap(); + api.response_read_body(id, &mut [], None).unwrap(); + match api.request_add_header(id, "Foo2", "Bar") { + Err(()) => {} + Ok(_) => panic!(), + }; + } + + #[test] + fn request_write_body_invalid_call() { + let (mut api, addr) = build_api_server!(); + + match api.request_write_body(HttpRequestId(0xdead), &[1, 2, 3], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + match api.request_write_body(HttpRequestId(0xdead), &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.request_write_body(id, &[], None).unwrap(); + match api.request_write_body(id, &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.request_write_body(id, &[], None).unwrap(); + match api.request_write_body(id, &[1, 2, 3, 4], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.response_wait(&[id], None); + match api.request_write_body(id, &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.request_write_body(id, &[1, 2, 3, 4], None).unwrap(); + api.response_wait(&[id], None); + match api.request_write_body(id, &[1, 2, 3, 4], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.response_headers(id); + match api.request_write_body(id, &[1, 2, 3, 4], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + let id = api + .request_start("GET", &format!("http://{}", addr)) + .unwrap(); + api.response_headers(id); + match api.request_write_body(id, &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.response_read_body(id, &mut [], None).unwrap(); + match api.request_write_body(id, &[1, 2, 3, 4], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.response_read_body(id, &mut [], None).unwrap(); + match api.request_write_body(id, &[], None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + }; + } + + #[test] + fn response_headers_invalid_call() { + let (mut api, addr) = build_api_server!(); + assert!(api.response_headers(HttpRequestId(0xdead)).is_empty()); + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + assert!(api.response_headers(id).is_empty()); + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.request_write_body(id, &[], None).unwrap(); + while api.response_headers(id).is_empty() { + std::thread::sleep(std::time::Duration::from_millis(100)); + } + + let id = api + .request_start("GET", &format!("http://{}", addr)) + .unwrap(); + api.response_wait(&[id], None); + assert!(!api.response_headers(id).is_empty()); + + let id = api + .request_start("GET", &format!("http://{}", addr)) + .unwrap(); + let mut buf = [0; 128]; + while api.response_read_body(id, &mut buf, None).unwrap() != 0 {} + assert!(api.response_headers(id).is_empty()); + } + + #[test] + fn response_header_invalid_call() { + let (mut api, addr) = build_api_server!(); + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + assert!(api.response_headers(id).is_empty()); + + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + api.request_add_header(id, "Foo", "Bar").unwrap(); + assert!(api.response_headers(id).is_empty()); + + let id = api + .request_start("GET", &format!("http://{}", addr)) + .unwrap(); + api.request_add_header(id, "Foo", "Bar").unwrap(); + api.request_write_body(id, &[], None).unwrap(); + // Note: this test actually sends out the request, and is supposed to test a situation + // where we haven't received any response yet. This test can theoretically fail if the + // HTTP response comes back faster than the kernel schedules our thread, but that is highly + // unlikely. + assert!(api.response_headers(id).is_empty()); + } + + #[test] + fn response_read_body_invalid_call() { + let (mut api, addr) = build_api_server!(); + let mut buf = [0; 512]; + + match api.response_read_body(HttpRequestId(0xdead), &mut buf, None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + } + + let id = api + .request_start("GET", &format!("http://{}", addr)) + .unwrap(); + while api.response_read_body(id, &mut buf, None).unwrap() != 0 {} + match api.response_read_body(id, &mut buf, None) { + Err(HttpError::Invalid) => {} + _ => panic!(), + } + } + + #[test] + fn fuzzing() { + // Uses the API in random ways to try to trigger panics. + // Doesn't test some paths, such as waiting for multiple requests. Also doesn't test what + // happens if the server force-closes our socket. + + let (mut api, addr) = build_api_server!(); + + for _ in 0..50 { + let id = api + .request_start("POST", &format!("http://{}", addr)) + .unwrap(); + + for _ in 0..250 { + match rand::random::() % 6 { + 0 => { + let _ = api.request_add_header(id, "Foo", "Bar"); + } + 1 => { + let _ = api.request_write_body(id, &[1, 2, 3, 4], None); + } + 2 => { + let _ = api.request_write_body(id, &[], None); + } + 3 => { + let _ = api.response_wait(&[id], None); + } + 4 => { + let _ = api.response_headers(id); + } + 5 => { + let mut buf = [0; 512]; + let _ = api.response_read_body(id, &mut buf, None); + } + 6..=255 => unreachable!(), + } + } + } + } } diff --git a/client/offchain/src/api/http_dummy.rs b/client/offchain/src/api/http_dummy.rs index 5ff77a1068..fc69b04b7a 100644 --- a/client/offchain/src/api/http_dummy.rs +++ b/client/offchain/src/api/http_dummy.rs @@ -16,12 +16,12 @@ //! Contains the same API as the `http` module, except that everything returns an error. -use sp_core::offchain::{HttpRequestId, Timestamp, HttpRequestStatus, HttpError}; +use sp_core::offchain::{HttpError, HttpRequestId, HttpRequestStatus, Timestamp}; use std::{future::Future, pin::Pin, task::Context, task::Poll}; /// Creates a pair of [`HttpApi`] and [`HttpWorker`]. pub fn http() -> (HttpApi, HttpWorker) { - (HttpApi, HttpWorker) + (HttpApi, HttpWorker) } /// Dummy implementation of HTTP capabilities. @@ -33,77 +33,75 @@ pub struct HttpApi; pub struct HttpWorker; impl HttpApi { - /// Mimics the corresponding method in the offchain API. - pub fn request_start( - &mut self, - _: &str, - _: &str - ) -> Result { - /// Because this always returns an error, none of the other methods should ever be called. - Err(()) - } - - /// Mimics the corresponding method in the offchain API. - pub fn request_add_header( - &mut self, - _: HttpRequestId, - _: &str, - _: &str - ) -> Result<(), ()> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") - } - - /// Mimics the corresponding method in the offchain API. - pub fn request_write_body( - &mut self, - _: HttpRequestId, - _: &[u8], - _: Option - ) -> Result<(), HttpError> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_wait( - &mut self, - requests: &[HttpRequestId], - _: Option - ) -> Vec { - if requests.is_empty() { - Vec::new() - } else { - unreachable!("Creating a request always fails, thus the list of requests should \ - always be empty; qed") - } - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_headers( - &mut self, - _: HttpRequestId - ) -> Vec<(Vec, Vec)> { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") - } - - /// Mimics the corresponding method in the offchain API. - pub fn response_read_body( - &mut self, - _: HttpRequestId, - _: &mut [u8], - _: Option - ) -> Result { - unreachable!("Creating a request always fails, thus this function will \ - never be called; qed") - } + /// Mimics the corresponding method in the offchain API. + pub fn request_start(&mut self, _: &str, _: &str) -> Result { + /// Because this always returns an error, none of the other methods should ever be called. + Err(()) + } + + /// Mimics the corresponding method in the offchain API. + pub fn request_add_header(&mut self, _: HttpRequestId, _: &str, _: &str) -> Result<(), ()> { + unreachable!( + "Creating a request always fails, thus this function will \ + never be called; qed" + ) + } + + /// Mimics the corresponding method in the offchain API. + pub fn request_write_body( + &mut self, + _: HttpRequestId, + _: &[u8], + _: Option, + ) -> Result<(), HttpError> { + unreachable!( + "Creating a request always fails, thus this function will \ + never be called; qed" + ) + } + + /// Mimics the corresponding method in the offchain API. + pub fn response_wait( + &mut self, + requests: &[HttpRequestId], + _: Option, + ) -> Vec { + if requests.is_empty() { + Vec::new() + } else { + unreachable!( + "Creating a request always fails, thus the list of requests should \ + always be empty; qed" + ) + } + } + + /// Mimics the corresponding method in the offchain API. + pub fn response_headers(&mut self, _: HttpRequestId) -> Vec<(Vec, Vec)> { + unreachable!( + "Creating a request always fails, thus this function will \ + never be called; qed" + ) + } + + /// Mimics the corresponding method in the offchain API. + pub fn response_read_body( + &mut self, + _: HttpRequestId, + _: &mut [u8], + _: Option, + ) -> Result { + unreachable!( + "Creating a request always fails, thus this function will \ + never be called; qed" + ) + } } impl Future for HttpWorker { - type Output = (); + type Output = (); - fn poll(self: Pin<&mut Self>, _: &mut Context) -> Poll { - Poll::Ready(()) - } + fn poll(self: Pin<&mut Self>, _: &mut Context) -> Poll { + Poll::Ready(()) + } } diff --git a/client/offchain/src/api/timestamp.rs b/client/offchain/src/api/timestamp.rs index e5494fe70d..56c9f0b3cf 100644 --- a/client/offchain/src/api/timestamp.rs +++ b/client/offchain/src/api/timestamp.rs @@ -18,45 +18,48 @@ use sp_core::offchain::Timestamp; use std::convert::TryInto; -use std::time::{SystemTime, Duration}; +use std::time::{Duration, SystemTime}; /// Returns the current time as a `Timestamp`. pub fn now() -> Timestamp { - let now = SystemTime::now(); - let epoch_duration = now.duration_since(SystemTime::UNIX_EPOCH); - match epoch_duration { - Err(_) => { - // Current time is earlier than UNIX_EPOCH. - Timestamp::from_unix_millis(0) - }, - Ok(d) => { - let duration = d.as_millis(); - // Assuming overflow won't happen for a few hundred years. - Timestamp::from_unix_millis(duration.try_into() - .expect("epoch milliseconds won't overflow u64 for hundreds of years; qed")) - } - } + let now = SystemTime::now(); + let epoch_duration = now.duration_since(SystemTime::UNIX_EPOCH); + match epoch_duration { + Err(_) => { + // Current time is earlier than UNIX_EPOCH. + Timestamp::from_unix_millis(0) + } + Ok(d) => { + let duration = d.as_millis(); + // Assuming overflow won't happen for a few hundred years. + Timestamp::from_unix_millis( + duration + .try_into() + .expect("epoch milliseconds won't overflow u64 for hundreds of years; qed"), + ) + } + } } /// Returns how a `Timestamp` compares to "now". /// /// In other words, returns `timestamp - now()`. pub fn timestamp_from_now(timestamp: Timestamp) -> Duration { - Duration::from_millis(timestamp.diff(&now()).millis()) + Duration::from_millis(timestamp.diff(&now()).millis()) } /// Converts the deadline into a `Future` that resolves when the deadline is reached. /// /// If `None`, returns a never-ending `Future`. pub fn deadline_to_future( - deadline: Option, + deadline: Option, ) -> futures::future::MaybeDone { - use futures::future; - - future::maybe_done(match deadline { - Some(deadline) => future::Either::Left( - futures_timer::Delay::new(timestamp_from_now(deadline)) - ), - None => future::Either::Right(future::pending()) - }) + use futures::future; + + future::maybe_done(match deadline { + Some(deadline) => { + future::Either::Left(futures_timer::Delay::new(timestamp_from_now(deadline))) + } + None => future::Either::Right(future::pending()), + }) } diff --git a/client/offchain/src/lib.rs b/client/offchain/src/lib.rs index 332e9f779a..cc87ce320e 100644 --- a/client/offchain/src/lib.rs +++ b/client/offchain/src/lib.rs @@ -35,14 +35,20 @@ use std::{fmt, marker::PhantomData, sync::Arc}; -use parking_lot::Mutex; -use threadpool::ThreadPool; -use sp_api::{ApiExt, ProvideRuntimeApi}; use futures::future::Future; use log::{debug, warn}; +use parking_lot::Mutex; use sc_network::NetworkStateInfo; -use sp_core::{offchain::{self, OffchainStorage}, ExecutionContext}; -use sp_runtime::{generic::BlockId, traits::{self, Header}}; +use sp_api::{ApiExt, ProvideRuntimeApi}; +use sp_core::{ + offchain::{self, OffchainStorage}, + ExecutionContext, +}; +use sp_runtime::{ + generic::BlockId, + traits::{self, Header}, +}; +use threadpool::ThreadPool; mod api; @@ -50,176 +56,176 @@ pub use sp_offchain::{OffchainWorkerApi, STORAGE_PREFIX}; /// An offchain workers manager. pub struct OffchainWorkers { - client: Arc, - db: Storage, - _block: PhantomData, - thread_pool: Mutex, + client: Arc, + db: Storage, + _block: PhantomData, + thread_pool: Mutex, } impl OffchainWorkers { - /// Creates new `OffchainWorkers`. - pub fn new(client: Arc, db: Storage) -> Self { - Self { - client, - db, - _block: PhantomData, - thread_pool: Mutex::new(ThreadPool::new(num_cpus::get())), - } - } + /// Creates new `OffchainWorkers`. + pub fn new(client: Arc, db: Storage) -> Self { + Self { + client, + db, + _block: PhantomData, + thread_pool: Mutex::new(ThreadPool::new(num_cpus::get())), + } + } } -impl fmt::Debug for OffchainWorkers< - Client, - Storage, - Block, -> { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - f.debug_tuple("OffchainWorkers").finish() - } +impl fmt::Debug for OffchainWorkers { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + f.debug_tuple("OffchainWorkers").finish() + } } -impl OffchainWorkers< - Client, - Storage, - Block, -> where - Block: traits::Block, - Client: ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: OffchainWorkerApi, - Storage: OffchainStorage + 'static, +impl OffchainWorkers +where + Block: traits::Block, + Client: ProvideRuntimeApi + Send + Sync + 'static, + Client::Api: OffchainWorkerApi, + Storage: OffchainStorage + 'static, { - /// Start the offchain workers after given block. - #[must_use] - pub fn on_block_imported( - &self, - header: &Block::Header, - network_state: Arc, - is_validator: bool, - ) -> impl Future { - let runtime = self.client.runtime_api(); - let at = BlockId::hash(header.hash()); - let has_api_v1 = runtime.has_api_with::, _>( - &at, |v| v == 1 - ); - let has_api_v2 = runtime.has_api_with::, _>( - &at, |v| v == 2 - ); - let version = match (has_api_v1, has_api_v2) { - (_, Ok(true)) => 2, - (Ok(true), _) => 1, - err => { - let help = "Consider turning off offchain workers if they are not part of your runtime."; - log::error!("Unsupported Offchain Worker API version: {:?}. {}.", err, help); - 0 - } - }; - debug!("Checking offchain workers at {:?}: version:{}", at, version); - if version > 0 { - let (api, runner) = api::AsyncApi::new( - self.db.clone(), - network_state.clone(), - is_validator, - ); - debug!("Spawning offchain workers at {:?}", at); - let header = header.clone(); - let client = self.client.clone(); - self.spawn_worker(move || { - let runtime = client.runtime_api(); - let api = Box::new(api); - debug!("Running offchain workers at {:?}", at); - let context = ExecutionContext::OffchainCall(Some( - (api, offchain::Capabilities::all()) - )); - let run = if version == 2 { - runtime.offchain_worker_with_context(&at, context, &header) - } else { - #[allow(deprecated)] - runtime.offchain_worker_before_version_2_with_context( - &at, context, *header.number() - ) - }; - if let Err(e) = run { - log::error!("Error running offchain workers at {:?}: {:?}", at, e); - } - }); - futures::future::Either::Left(runner.process()) - } else { - futures::future::Either::Right(futures::future::ready(())) - } - } - - /// Spawns a new offchain worker. - /// - /// We spawn offchain workers for each block in a separate thread, - /// since they can run for a significant amount of time - /// in a blocking fashion and we don't want to block the runtime. - /// - /// Note that we should avoid that if we switch to future-based runtime in the future, - /// alternatively: - fn spawn_worker(&self, f: impl FnOnce() -> () + Send + 'static) { - self.thread_pool.lock().execute(f); - } + /// Start the offchain workers after given block. + #[must_use] + pub fn on_block_imported( + &self, + header: &Block::Header, + network_state: Arc, + is_validator: bool, + ) -> impl Future { + let runtime = self.client.runtime_api(); + let at = BlockId::hash(header.hash()); + let has_api_v1 = + runtime.has_api_with::, _>(&at, |v| v == 1); + let has_api_v2 = + runtime.has_api_with::, _>(&at, |v| v == 2); + let version = match (has_api_v1, has_api_v2) { + (_, Ok(true)) => 2, + (Ok(true), _) => 1, + err => { + let help = + "Consider turning off offchain workers if they are not part of your runtime."; + log::error!( + "Unsupported Offchain Worker API version: {:?}. {}.", + err, + help + ); + 0 + } + }; + debug!("Checking offchain workers at {:?}: version:{}", at, version); + if version > 0 { + let (api, runner) = + api::AsyncApi::new(self.db.clone(), network_state.clone(), is_validator); + debug!("Spawning offchain workers at {:?}", at); + let header = header.clone(); + let client = self.client.clone(); + self.spawn_worker(move || { + let runtime = client.runtime_api(); + let api = Box::new(api); + debug!("Running offchain workers at {:?}", at); + let context = + ExecutionContext::OffchainCall(Some((api, offchain::Capabilities::all()))); + let run = if version == 2 { + runtime.offchain_worker_with_context(&at, context, &header) + } else { + #[allow(deprecated)] + runtime.offchain_worker_before_version_2_with_context( + &at, + context, + *header.number(), + ) + }; + if let Err(e) = run { + log::error!("Error running offchain workers at {:?}: {:?}", at, e); + } + }); + futures::future::Either::Left(runner.process()) + } else { + futures::future::Either::Right(futures::future::ready(())) + } + } + + /// Spawns a new offchain worker. + /// + /// We spawn offchain workers for each block in a separate thread, + /// since they can run for a significant amount of time + /// in a blocking fashion and we don't want to block the runtime. + /// + /// Note that we should avoid that if we switch to future-based runtime in the future, + /// alternatively: + fn spawn_worker(&self, f: impl FnOnce() -> () + Send + 'static) { + self.thread_pool.lock().execute(f); + } } #[cfg(test)] mod tests { - use super::*; - use std::sync::Arc; - use sc_network::{Multiaddr, PeerId}; - use substrate_test_runtime_client::runtime::Block; - use sc_transaction_pool::{BasicPool, FullChainApi}; - use sp_transaction_pool::{TransactionPool, InPoolTransaction}; - use sc_client_api::ExecutorProvider; - - struct MockNetworkStateInfo(); - - impl NetworkStateInfo for MockNetworkStateInfo { - fn external_addresses(&self) -> Vec { - Vec::new() - } - - fn local_peer_id(&self) -> PeerId { - PeerId::random() - } - } - - struct TestPool(BasicPool, Block>); - - impl sp_transaction_pool::OffchainSubmitTransaction for TestPool { - fn submit_at( - &self, - at: &BlockId, - extrinsic: ::Extrinsic, - ) -> Result<(), ()> { - let source = sp_transaction_pool::TransactionSource::Local; - futures::executor::block_on(self.0.submit_one(&at, source, extrinsic)) - .map(|_| ()) - .map_err(|_| ()) - } - } - - #[test] - fn should_call_into_runtime_and_produce_extrinsic() { - // given - let _ = env_logger::try_init(); - let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new(TestPool(BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0)); - client.execution_extensions() - .register_transaction_pool(Arc::downgrade(&pool.clone()) as _); - let db = sc_client_db::offchain::LocalStorage::new_test(); - let network_state = Arc::new(MockNetworkStateInfo()); - let header = client.header(&BlockId::number(0)).unwrap().unwrap(); - - // when - let offchain = OffchainWorkers::new(client, db); - futures::executor::block_on(offchain.on_block_imported(&header, network_state, false)); - - // then - assert_eq!(pool.0.status().ready, 1); - assert_eq!(pool.0.ready().next().unwrap().is_propagable(), false); - } + use super::*; + use sc_client_api::ExecutorProvider; + use sc_network::{Multiaddr, PeerId}; + use sc_transaction_pool::{BasicPool, FullChainApi}; + use sp_transaction_pool::{InPoolTransaction, TransactionPool}; + use std::sync::Arc; + use substrate_test_runtime_client::runtime::Block; + + struct MockNetworkStateInfo(); + + impl NetworkStateInfo for MockNetworkStateInfo { + fn external_addresses(&self) -> Vec { + Vec::new() + } + + fn local_peer_id(&self) -> PeerId { + PeerId::random() + } + } + + struct TestPool( + BasicPool, Block>, + ); + + impl sp_transaction_pool::OffchainSubmitTransaction for TestPool { + fn submit_at( + &self, + at: &BlockId, + extrinsic: ::Extrinsic, + ) -> Result<(), ()> { + let source = sp_transaction_pool::TransactionSource::Local; + futures::executor::block_on(self.0.submit_one(&at, source, extrinsic)) + .map(|_| ()) + .map_err(|_| ()) + } + } + + #[test] + fn should_call_into_runtime_and_produce_extrinsic() { + // given + let _ = env_logger::try_init(); + let client = Arc::new(substrate_test_runtime_client::new()); + let pool = Arc::new(TestPool( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ) + .0, + )); + client + .execution_extensions() + .register_transaction_pool(Arc::downgrade(&pool.clone()) as _); + let db = sc_client_db::offchain::LocalStorage::new_test(); + let network_state = Arc::new(MockNetworkStateInfo()); + let header = client.header(&BlockId::number(0)).unwrap().unwrap(); + + // when + let offchain = OffchainWorkers::new(client, db); + futures::executor::block_on(offchain.on_block_imported(&header, network_state, false)); + + // then + assert_eq!(pool.0.status().ready, 1); + assert_eq!(pool.0.ready().next().unwrap().is_propagable(), false); + } } diff --git a/client/peerset/src/lib.rs b/client/peerset/src/lib.rs index 9376e9594b..e0e95520f4 100644 --- a/client/peerset/src/lib.rs +++ b/client/peerset/src/lib.rs @@ -19,13 +19,20 @@ mod peersstate; -use std::{collections::{HashSet, HashMap}, collections::VecDeque}; use futures::prelude::*; use log::{debug, error, trace}; use serde_json::json; -use std::{pin::Pin, task::{Context, Poll}, time::Duration}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + collections::VecDeque, + collections::{HashMap, HashSet}, +}; +use std::{ + pin::Pin, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::Instant; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; pub use libp2p::PeerId; @@ -41,102 +48,113 @@ const FORGET_AFTER: Duration = Duration::from_secs(3600); #[derive(Debug)] enum Action { - AddReservedPeer(PeerId), - RemoveReservedPeer(PeerId), - SetReservedOnly(bool), - ReportPeer(PeerId, ReputationChange), - SetPriorityGroup(String, HashSet), - AddToPriorityGroup(String, PeerId), - RemoveFromPriorityGroup(String, PeerId), + AddReservedPeer(PeerId), + RemoveReservedPeer(PeerId), + SetReservedOnly(bool), + ReportPeer(PeerId, ReputationChange), + SetPriorityGroup(String, HashSet), + AddToPriorityGroup(String, PeerId), + RemoveFromPriorityGroup(String, PeerId), } /// Description of a reputation adjustment for a node. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub struct ReputationChange { - /// Reputation delta. - pub value: i32, - /// Reason for reputation change. - pub reason: &'static str, + /// Reputation delta. + pub value: i32, + /// Reason for reputation change. + pub reason: &'static str, } impl ReputationChange { - /// New reputation change with given delta and reason. - pub const fn new(value: i32, reason: &'static str) -> ReputationChange { - ReputationChange { value, reason } - } - - /// New reputation change that forces minimum possible reputation. - pub const fn new_fatal(reason: &'static str) -> ReputationChange { - ReputationChange { value: i32::min_value(), reason } - } + /// New reputation change with given delta and reason. + pub const fn new(value: i32, reason: &'static str) -> ReputationChange { + ReputationChange { value, reason } + } + + /// New reputation change that forces minimum possible reputation. + pub const fn new_fatal(reason: &'static str) -> ReputationChange { + ReputationChange { + value: i32::min_value(), + reason, + } + } } /// Shared handle to the peer set manager (PSM). Distributed around the code. #[derive(Debug, Clone)] pub struct PeersetHandle { - tx: TracingUnboundedSender, + tx: TracingUnboundedSender, } impl PeersetHandle { - /// Adds a new reserved peer. The peerset will make an effort to always remain connected to - /// this peer. - /// - /// Has no effect if the node was already a reserved peer. - /// - /// > **Note**: Keep in mind that the networking has to know an address for this node, - /// > otherwise it will not be able to connect to it. - pub fn add_reserved_peer(&self, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::AddReservedPeer(peer_id)); - } - - /// Remove a previously-added reserved peer. - /// - /// Has no effect if the node was not a reserved peer. - pub fn remove_reserved_peer(&self, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::RemoveReservedPeer(peer_id)); - } - - /// Sets whether or not the peerset only has connections . - pub fn set_reserved_only(&self, reserved: bool) { - let _ = self.tx.unbounded_send(Action::SetReservedOnly(reserved)); - } - - /// Reports an adjustment to the reputation of the given peer. - pub fn report_peer(&self, peer_id: PeerId, score_diff: ReputationChange) { - let _ = self.tx.unbounded_send(Action::ReportPeer(peer_id, score_diff)); - } - - /// Modify a priority group. - pub fn set_priority_group(&self, group_id: String, peers: HashSet) { - let _ = self.tx.unbounded_send(Action::SetPriorityGroup(group_id, peers)); - } - - /// Add a peer to a priority group. - pub fn add_to_priority_group(&self, group_id: String, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::AddToPriorityGroup(group_id, peer_id)); - } - - /// Remove a peer from a priority group. - pub fn remove_from_priority_group(&self, group_id: String, peer_id: PeerId) { - let _ = self.tx.unbounded_send(Action::RemoveFromPriorityGroup(group_id, peer_id)); - } + /// Adds a new reserved peer. The peerset will make an effort to always remain connected to + /// this peer. + /// + /// Has no effect if the node was already a reserved peer. + /// + /// > **Note**: Keep in mind that the networking has to know an address for this node, + /// > otherwise it will not be able to connect to it. + pub fn add_reserved_peer(&self, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::AddReservedPeer(peer_id)); + } + + /// Remove a previously-added reserved peer. + /// + /// Has no effect if the node was not a reserved peer. + pub fn remove_reserved_peer(&self, peer_id: PeerId) { + let _ = self.tx.unbounded_send(Action::RemoveReservedPeer(peer_id)); + } + + /// Sets whether or not the peerset only has connections . + pub fn set_reserved_only(&self, reserved: bool) { + let _ = self.tx.unbounded_send(Action::SetReservedOnly(reserved)); + } + + /// Reports an adjustment to the reputation of the given peer. + pub fn report_peer(&self, peer_id: PeerId, score_diff: ReputationChange) { + let _ = self + .tx + .unbounded_send(Action::ReportPeer(peer_id, score_diff)); + } + + /// Modify a priority group. + pub fn set_priority_group(&self, group_id: String, peers: HashSet) { + let _ = self + .tx + .unbounded_send(Action::SetPriorityGroup(group_id, peers)); + } + + /// Add a peer to a priority group. + pub fn add_to_priority_group(&self, group_id: String, peer_id: PeerId) { + let _ = self + .tx + .unbounded_send(Action::AddToPriorityGroup(group_id, peer_id)); + } + + /// Remove a peer from a priority group. + pub fn remove_from_priority_group(&self, group_id: String, peer_id: PeerId) { + let _ = self + .tx + .unbounded_send(Action::RemoveFromPriorityGroup(group_id, peer_id)); + } } /// Message that can be sent by the peer set manager (PSM). #[derive(Debug, PartialEq)] pub enum Message { - /// Request to open a connection to the given peer. From the point of view of the PSM, we are - /// immediately connected. - Connect(PeerId), + /// Request to open a connection to the given peer. From the point of view of the PSM, we are + /// immediately connected. + Connect(PeerId), - /// Drop the connection to the given peer, or cancel the connection attempt after a `Connect`. - Drop(PeerId), + /// Drop the connection to the given peer, or cancel the connection attempt after a `Connect`. + Drop(PeerId), - /// Equivalent to `Connect` for the peer corresponding to this incoming index. - Accept(IncomingIndex), + /// Equivalent to `Connect` for the peer corresponding to this incoming index. + Accept(IncomingIndex), - /// Equivalent to `Drop` for the peer corresponding to this incoming index. - Reject(IncomingIndex), + /// Equivalent to `Drop` for the peer corresponding to this incoming index. + Reject(IncomingIndex), } /// Opaque identifier for an incoming connection. Allocated by the network. @@ -144,34 +162,34 @@ pub enum Message { pub struct IncomingIndex(pub u64); impl From for IncomingIndex { - fn from(val: u64) -> IncomingIndex { - IncomingIndex(val) - } + fn from(val: u64) -> IncomingIndex { + IncomingIndex(val) + } } /// Configuration to pass when creating the peer set manager. #[derive(Debug)] pub struct PeersetConfig { - /// Maximum number of ingoing links to peers. - pub in_peers: u32, - - /// Maximum number of outgoing links to peers. - pub out_peers: u32, - - /// List of bootstrap nodes to initialize the peer with. - /// - /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. - pub bootnodes: Vec, - - /// If true, we only accept nodes in [`PeersetConfig::priority_groups`]. - pub reserved_only: bool, - - /// Lists of nodes we should always be connected to. - /// - /// > **Note**: Keep in mind that the networking has to know an address for these nodes, - /// > otherwise it will not be able to connect to them. - pub priority_groups: Vec<(String, HashSet)>, + /// Maximum number of ingoing links to peers. + pub in_peers: u32, + + /// Maximum number of outgoing links to peers. + pub out_peers: u32, + + /// List of bootstrap nodes to initialize the peer with. + /// + /// > **Note**: Keep in mind that the networking has to know an address for these nodes, + /// > otherwise it will not be able to connect to them. + pub bootnodes: Vec, + + /// If true, we only accept nodes in [`PeersetConfig::priority_groups`]. + pub reserved_only: bool, + + /// Lists of nodes we should always be connected to. + /// + /// > **Note**: Keep in mind that the networking has to know an address for these nodes, + /// > otherwise it will not be able to connect to them. + pub priority_groups: Vec<(String, HashSet)>, } /// Side of the peer set manager owned by the network. In other words, the "receiving" side. @@ -180,535 +198,572 @@ pub struct PeersetConfig { /// errors. #[derive(Debug)] pub struct Peerset { - data: peersstate::PeersState, - /// If true, we only accept reserved nodes. - reserved_only: bool, - /// Receiver for messages from the `PeersetHandle` and from `tx`. - rx: TracingUnboundedReceiver, - /// Sending side of `rx`. - tx: TracingUnboundedSender, - /// Queue of messages to be emitted when the `Peerset` is polled. - message_queue: VecDeque, - /// When the `Peerset` was created. - created: Instant, - /// Last time when we updated the reputations of connected nodes. - latest_time_update: Instant, + data: peersstate::PeersState, + /// If true, we only accept reserved nodes. + reserved_only: bool, + /// Receiver for messages from the `PeersetHandle` and from `tx`. + rx: TracingUnboundedReceiver, + /// Sending side of `rx`. + tx: TracingUnboundedSender, + /// Queue of messages to be emitted when the `Peerset` is polled. + message_queue: VecDeque, + /// When the `Peerset` was created. + created: Instant, + /// Last time when we updated the reputations of connected nodes. + latest_time_update: Instant, } impl Peerset { - /// Builds a new peerset from the given configuration. - pub fn from_config(config: PeersetConfig) -> (Peerset, PeersetHandle) { - let (tx, rx) = tracing_unbounded("mpsc_peerset_messages"); - - let handle = PeersetHandle { - tx: tx.clone(), - }; - - let now = Instant::now(); - - let mut peerset = Peerset { - data: peersstate::PeersState::new(config.in_peers, config.out_peers, config.reserved_only), - tx, - rx, - reserved_only: config.reserved_only, - message_queue: VecDeque::new(), - created: now, - latest_time_update: now, - }; - - for (group, nodes) in config.priority_groups { - peerset.data.set_priority_group(&group, nodes); - } - - for peer_id in config.bootnodes { - if let peersstate::Peer::Unknown(entry) = peerset.data.peer(&peer_id) { - entry.discover(); - } else { - debug!(target: "peerset", "Duplicate bootnode in config: {:?}", peer_id); - } - } - - peerset.alloc_slots(); - (peerset, handle) - } - - fn on_add_reserved_peer(&mut self, peer_id: PeerId) { - let mut reserved = self.data.get_priority_group(RESERVED_NODES).unwrap_or_default(); - reserved.insert(peer_id); - self.data.set_priority_group(RESERVED_NODES, reserved); - self.alloc_slots(); - } - - fn on_remove_reserved_peer(&mut self, peer_id: PeerId) { - let mut reserved = self.data.get_priority_group(RESERVED_NODES).unwrap_or_default(); - reserved.remove(&peer_id); - self.data.set_priority_group(RESERVED_NODES, reserved); - match self.data.peer(&peer_id) { - peersstate::Peer::Connected(peer) => { - if self.reserved_only { - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); - } - } - peersstate::Peer::NotConnected(_) => {}, - peersstate::Peer::Unknown(_) => {}, - } - } - - fn on_set_reserved_only(&mut self, reserved_only: bool) { - self.reserved_only = reserved_only; - self.data.set_priority_only(reserved_only); - - if self.reserved_only { - // Disconnect non-reserved nodes. - let reserved = self.data.get_priority_group(RESERVED_NODES).unwrap_or_default(); - for peer_id in self.data.connected_peers().cloned().collect::>().into_iter() { - let peer = self.data.peer(&peer_id).into_connected() - .expect("We are enumerating connected peers, therefore the peer is connected; qed"); - if !reserved.contains(&peer_id) { - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); - } - } - } else { - self.alloc_slots(); - } - } - - fn on_set_priority_group(&mut self, group_id: &str, peers: HashSet) { - self.data.set_priority_group(group_id, peers); - self.alloc_slots(); - } - - fn on_add_to_priority_group(&mut self, group_id: &str, peer_id: PeerId) { - self.data.add_to_priority_group(group_id, peer_id); - self.alloc_slots(); - } - - fn on_remove_from_priority_group(&mut self, group_id: &str, peer_id: PeerId) { - self.data.remove_from_priority_group(group_id, &peer_id); - self.alloc_slots(); - } - - fn on_report_peer(&mut self, peer_id: PeerId, change: ReputationChange) { - // We want reputations to be up-to-date before adjusting them. - self.update_time(); - - match self.data.peer(&peer_id) { - peersstate::Peer::Connected(mut peer) => { - peer.add_reputation(change.value); - if peer.reputation() < BANNED_THRESHOLD { - debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", - peer_id, change.value, peer.reputation(), change.reason - ); - peer.disconnect(); - self.message_queue.push_back(Message::Drop(peer_id)); - } else { - trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", - peer_id, change.value, peer.reputation(), change.reason - ); - } - }, - peersstate::Peer::NotConnected(mut peer) => peer.add_reputation(change.value), - peersstate::Peer::Unknown(peer) => peer.discover().add_reputation(change.value), - } - } - - /// Updates the value of `self.latest_time_update` and performs all the updates that happen - /// over time, such as reputation increases for staying connected. - fn update_time(&mut self) { - let now = Instant::now(); - - // We basically do `(now - self.latest_update).as_secs()`, except that by the way we do it - // we know that we're not going to miss seconds because of rounding to integers. - let secs_diff = { - let elapsed_latest = self.latest_time_update - self.created; - let elapsed_now = now - self.created; - self.latest_time_update = now; - elapsed_now.as_secs() - elapsed_latest.as_secs() - }; - - // For each elapsed second, move the node reputation towards zero. - // If we multiply each second the reputation by `k` (where `k` is between 0 and 1), it - // takes `ln(0.5) / ln(k)` seconds to reduce the reputation by half. Use this formula to - // empirically determine a value of `k` that looks correct. - for _ in 0..secs_diff { - for peer_id in self.data.peers().cloned().collect::>() { - // We use `k = 0.98`, so we divide by `50`. With that value, it takes 34.3 seconds - // to reduce the reputation by half. - fn reput_tick(reput: i32) -> i32 { - let mut diff = reput / 50; - if diff == 0 && reput < 0 { - diff = -1; - } else if diff == 0 && reput > 0 { - diff = 1; - } - reput.saturating_sub(diff) - } - match self.data.peer(&peer_id) { - peersstate::Peer::Connected(mut peer) => { - let before = peer.reputation(); - let after = reput_tick(before); - trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); - peer.set_reputation(after) - } - peersstate::Peer::NotConnected(mut peer) => { - if peer.reputation() == 0 && - peer.last_connected_or_discovered() + FORGET_AFTER < now - { - peer.forget_peer(); - } else { - let before = peer.reputation(); - let after = reput_tick(before); - trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); - peer.set_reputation(after) - } - } - peersstate::Peer::Unknown(_) => unreachable!("We iterate over known peers; qed") - }; - } - } - } - - /// Try to fill available out slots with nodes. - fn alloc_slots(&mut self) { - self.update_time(); - - // Try to grab the next node to attempt to connect to. - while let Some(next) = { - if self.reserved_only { - self.data.priority_not_connected_peer_from_group(RESERVED_NODES) - } else { - self.data.priority_not_connected_peer() - } - } { - match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. - } - } - - loop { - if self.reserved_only { - break - } - - // Try to grab the next node to attempt to connect to. - let next = match self.data.highest_not_connected_peer() { - Some(p) => p, - None => break, // No known node to add. - }; - - // Don't connect to nodes with an abysmal reputation. - if next.reputation() < BANNED_THRESHOLD { - break; - } - - match next.try_outgoing() { - Ok(conn) => self.message_queue.push_back(Message::Connect(conn.into_peer_id())), - Err(_) => break, // No more slots available. - } - } - } - - /// Indicate that we received an incoming connection. Must be answered either with - /// a corresponding `Accept` or `Reject`, except if we were already connected to this peer. - /// - /// Note that this mechanism is orthogonal to `Connect`/`Drop`. Accepting an incoming - /// connection implicitly means `Connect`, but incoming connections aren't cancelled by - /// `dropped`. - /// - // Implementation note: because of concurrency issues, it is possible that we push a `Connect` - // message to the output channel with a `PeerId`, and that `incoming` gets called with the same - // `PeerId` before that message has been read by the user. In this situation we must not answer. - pub fn incoming(&mut self, peer_id: PeerId, index: IncomingIndex) { - trace!(target: "peerset", "Incoming {:?}", peer_id); - self.update_time(); - - let not_connected = match self.data.peer(&peer_id) { - // If we're already connected, don't answer, as the docs mention. - peersstate::Peer::Connected(_) => return, - peersstate::Peer::NotConnected(mut entry) => { - entry.bump_last_connected_or_discovered(); - entry - }, - peersstate::Peer::Unknown(entry) => entry.discover(), - }; - - if not_connected.reputation() < BANNED_THRESHOLD { - self.message_queue.push_back(Message::Reject(index)); - return - } - - match not_connected.try_accept_incoming() { - Ok(_) => self.message_queue.push_back(Message::Accept(index)), - Err(_) => self.message_queue.push_back(Message::Reject(index)), - } - } - - /// Indicate that we dropped an active connection with a peer, or that we failed to connect. - /// - /// Must only be called after the PSM has either generated a `Connect` message with this - /// `PeerId`, or accepted an incoming connection with this `PeerId`. - pub fn dropped(&mut self, peer_id: PeerId) { - trace!(target: "peerset", "Dropping {:?}", peer_id); - - // We want reputations to be up-to-date before adjusting them. - self.update_time(); - - match self.data.peer(&peer_id) { - peersstate::Peer::Connected(mut entry) => { - // Decrease the node's reputation so that we don't try it again and again and again. - entry.add_reputation(DISCONNECT_REPUTATION_CHANGE); - entry.disconnect(); - } - peersstate::Peer::NotConnected(_) | peersstate::Peer::Unknown(_) => - error!(target: "peerset", "Received dropped() for non-connected node"), - } - - self.alloc_slots(); - } - - /// Adds discovered peer ids to the PSM. - /// - /// > **Note**: There is no equivalent "expired" message, meaning that it is the responsibility - /// > of the PSM to remove `PeerId`s that fail to dial too often. - pub fn discovered>(&mut self, peer_ids: I) { - let mut discovered_any = false; - - for peer_id in peer_ids { - if let peersstate::Peer::Unknown(entry) = self.data.peer(&peer_id) { - entry.discover(); - discovered_any = true; - } - } - - if discovered_any { - self.alloc_slots(); - } - } - - /// Reports an adjustment to the reputation of the given peer. - pub fn report_peer(&mut self, peer_id: PeerId, score_diff: ReputationChange) { - // We don't immediately perform the adjustments in order to have state consistency. We - // don't want the reporting here to take priority over messages sent using the - // `PeersetHandle`. - let _ = self.tx.unbounded_send(Action::ReportPeer(peer_id, score_diff)); - } - - /// Produces a JSON object containing the state of the peerset manager, for debugging purposes. - pub fn debug_info(&mut self) -> serde_json::Value { - self.update_time(); - - json!({ - "nodes": self.data.peers().cloned().collect::>().into_iter().map(|peer_id| { - let state = match self.data.peer(&peer_id) { - peersstate::Peer::Connected(entry) => json!({ - "connected": true, - "reputation": entry.reputation() - }), - peersstate::Peer::NotConnected(entry) => json!({ - "connected": false, - "reputation": entry.reputation() - }), - peersstate::Peer::Unknown(_) => - unreachable!("We iterate over the known peers; QED") - }; - - (peer_id.to_base58(), state) - }).collect::>(), - "reserved_only": self.reserved_only, - "message_queue": self.message_queue.len(), - }) - } - - /// Returns the number of peers that we have discovered. - pub fn num_discovered_peers(&self) -> usize { - self.data.peers().len() - } - - /// Returns priority group by id. - pub fn get_priority_group(&self, group_id: &str) -> Option> { - self.data.get_priority_group(group_id) - } + /// Builds a new peerset from the given configuration. + pub fn from_config(config: PeersetConfig) -> (Peerset, PeersetHandle) { + let (tx, rx) = tracing_unbounded("mpsc_peerset_messages"); + + let handle = PeersetHandle { tx: tx.clone() }; + + let now = Instant::now(); + + let mut peerset = Peerset { + data: peersstate::PeersState::new( + config.in_peers, + config.out_peers, + config.reserved_only, + ), + tx, + rx, + reserved_only: config.reserved_only, + message_queue: VecDeque::new(), + created: now, + latest_time_update: now, + }; + + for (group, nodes) in config.priority_groups { + peerset.data.set_priority_group(&group, nodes); + } + + for peer_id in config.bootnodes { + if let peersstate::Peer::Unknown(entry) = peerset.data.peer(&peer_id) { + entry.discover(); + } else { + debug!(target: "peerset", "Duplicate bootnode in config: {:?}", peer_id); + } + } + + peerset.alloc_slots(); + (peerset, handle) + } + + fn on_add_reserved_peer(&mut self, peer_id: PeerId) { + let mut reserved = self + .data + .get_priority_group(RESERVED_NODES) + .unwrap_or_default(); + reserved.insert(peer_id); + self.data.set_priority_group(RESERVED_NODES, reserved); + self.alloc_slots(); + } + + fn on_remove_reserved_peer(&mut self, peer_id: PeerId) { + let mut reserved = self + .data + .get_priority_group(RESERVED_NODES) + .unwrap_or_default(); + reserved.remove(&peer_id); + self.data.set_priority_group(RESERVED_NODES, reserved); + match self.data.peer(&peer_id) { + peersstate::Peer::Connected(peer) => { + if self.reserved_only { + peer.disconnect(); + self.message_queue.push_back(Message::Drop(peer_id)); + } + } + peersstate::Peer::NotConnected(_) => {} + peersstate::Peer::Unknown(_) => {} + } + } + + fn on_set_reserved_only(&mut self, reserved_only: bool) { + self.reserved_only = reserved_only; + self.data.set_priority_only(reserved_only); + + if self.reserved_only { + // Disconnect non-reserved nodes. + let reserved = self + .data + .get_priority_group(RESERVED_NODES) + .unwrap_or_default(); + for peer_id in self + .data + .connected_peers() + .cloned() + .collect::>() + .into_iter() + { + let peer = self.data.peer(&peer_id).into_connected().expect( + "We are enumerating connected peers, therefore the peer is connected; qed", + ); + if !reserved.contains(&peer_id) { + peer.disconnect(); + self.message_queue.push_back(Message::Drop(peer_id)); + } + } + } else { + self.alloc_slots(); + } + } + + fn on_set_priority_group(&mut self, group_id: &str, peers: HashSet) { + self.data.set_priority_group(group_id, peers); + self.alloc_slots(); + } + + fn on_add_to_priority_group(&mut self, group_id: &str, peer_id: PeerId) { + self.data.add_to_priority_group(group_id, peer_id); + self.alloc_slots(); + } + + fn on_remove_from_priority_group(&mut self, group_id: &str, peer_id: PeerId) { + self.data.remove_from_priority_group(group_id, &peer_id); + self.alloc_slots(); + } + + fn on_report_peer(&mut self, peer_id: PeerId, change: ReputationChange) { + // We want reputations to be up-to-date before adjusting them. + self.update_time(); + + match self.data.peer(&peer_id) { + peersstate::Peer::Connected(mut peer) => { + peer.add_reputation(change.value); + if peer.reputation() < BANNED_THRESHOLD { + debug!(target: "peerset", "Report {}: {:+} to {}. Reason: {}, Disconnecting", + peer_id, change.value, peer.reputation(), change.reason + ); + peer.disconnect(); + self.message_queue.push_back(Message::Drop(peer_id)); + } else { + trace!(target: "peerset", "Report {}: {:+} to {}. Reason: {}", + peer_id, change.value, peer.reputation(), change.reason + ); + } + } + peersstate::Peer::NotConnected(mut peer) => peer.add_reputation(change.value), + peersstate::Peer::Unknown(peer) => peer.discover().add_reputation(change.value), + } + } + + /// Updates the value of `self.latest_time_update` and performs all the updates that happen + /// over time, such as reputation increases for staying connected. + fn update_time(&mut self) { + let now = Instant::now(); + + // We basically do `(now - self.latest_update).as_secs()`, except that by the way we do it + // we know that we're not going to miss seconds because of rounding to integers. + let secs_diff = { + let elapsed_latest = self.latest_time_update - self.created; + let elapsed_now = now - self.created; + self.latest_time_update = now; + elapsed_now.as_secs() - elapsed_latest.as_secs() + }; + + // For each elapsed second, move the node reputation towards zero. + // If we multiply each second the reputation by `k` (where `k` is between 0 and 1), it + // takes `ln(0.5) / ln(k)` seconds to reduce the reputation by half. Use this formula to + // empirically determine a value of `k` that looks correct. + for _ in 0..secs_diff { + for peer_id in self.data.peers().cloned().collect::>() { + // We use `k = 0.98`, so we divide by `50`. With that value, it takes 34.3 seconds + // to reduce the reputation by half. + fn reput_tick(reput: i32) -> i32 { + let mut diff = reput / 50; + if diff == 0 && reput < 0 { + diff = -1; + } else if diff == 0 && reput > 0 { + diff = 1; + } + reput.saturating_sub(diff) + } + match self.data.peer(&peer_id) { + peersstate::Peer::Connected(mut peer) => { + let before = peer.reputation(); + let after = reput_tick(before); + trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); + peer.set_reputation(after) + } + peersstate::Peer::NotConnected(mut peer) => { + if peer.reputation() == 0 + && peer.last_connected_or_discovered() + FORGET_AFTER < now + { + peer.forget_peer(); + } else { + let before = peer.reputation(); + let after = reput_tick(before); + trace!(target: "peerset", "Fleeting {}: {} -> {}", peer_id, before, after); + peer.set_reputation(after) + } + } + peersstate::Peer::Unknown(_) => { + unreachable!("We iterate over known peers; qed") + } + }; + } + } + } + + /// Try to fill available out slots with nodes. + fn alloc_slots(&mut self) { + self.update_time(); + + // Try to grab the next node to attempt to connect to. + while let Some(next) = { + if self.reserved_only { + self.data + .priority_not_connected_peer_from_group(RESERVED_NODES) + } else { + self.data.priority_not_connected_peer() + } + } { + match next.try_outgoing() { + Ok(conn) => self + .message_queue + .push_back(Message::Connect(conn.into_peer_id())), + Err(_) => break, // No more slots available. + } + } + + loop { + if self.reserved_only { + break; + } + + // Try to grab the next node to attempt to connect to. + let next = match self.data.highest_not_connected_peer() { + Some(p) => p, + None => break, // No known node to add. + }; + + // Don't connect to nodes with an abysmal reputation. + if next.reputation() < BANNED_THRESHOLD { + break; + } + + match next.try_outgoing() { + Ok(conn) => self + .message_queue + .push_back(Message::Connect(conn.into_peer_id())), + Err(_) => break, // No more slots available. + } + } + } + + /// Indicate that we received an incoming connection. Must be answered either with + /// a corresponding `Accept` or `Reject`, except if we were already connected to this peer. + /// + /// Note that this mechanism is orthogonal to `Connect`/`Drop`. Accepting an incoming + /// connection implicitly means `Connect`, but incoming connections aren't cancelled by + /// `dropped`. + /// + // Implementation note: because of concurrency issues, it is possible that we push a `Connect` + // message to the output channel with a `PeerId`, and that `incoming` gets called with the same + // `PeerId` before that message has been read by the user. In this situation we must not answer. + pub fn incoming(&mut self, peer_id: PeerId, index: IncomingIndex) { + trace!(target: "peerset", "Incoming {:?}", peer_id); + self.update_time(); + + let not_connected = match self.data.peer(&peer_id) { + // If we're already connected, don't answer, as the docs mention. + peersstate::Peer::Connected(_) => return, + peersstate::Peer::NotConnected(mut entry) => { + entry.bump_last_connected_or_discovered(); + entry + } + peersstate::Peer::Unknown(entry) => entry.discover(), + }; + + if not_connected.reputation() < BANNED_THRESHOLD { + self.message_queue.push_back(Message::Reject(index)); + return; + } + + match not_connected.try_accept_incoming() { + Ok(_) => self.message_queue.push_back(Message::Accept(index)), + Err(_) => self.message_queue.push_back(Message::Reject(index)), + } + } + + /// Indicate that we dropped an active connection with a peer, or that we failed to connect. + /// + /// Must only be called after the PSM has either generated a `Connect` message with this + /// `PeerId`, or accepted an incoming connection with this `PeerId`. + pub fn dropped(&mut self, peer_id: PeerId) { + trace!(target: "peerset", "Dropping {:?}", peer_id); + + // We want reputations to be up-to-date before adjusting them. + self.update_time(); + + match self.data.peer(&peer_id) { + peersstate::Peer::Connected(mut entry) => { + // Decrease the node's reputation so that we don't try it again and again and again. + entry.add_reputation(DISCONNECT_REPUTATION_CHANGE); + entry.disconnect(); + } + peersstate::Peer::NotConnected(_) | peersstate::Peer::Unknown(_) => { + error!(target: "peerset", "Received dropped() for non-connected node") + } + } + + self.alloc_slots(); + } + + /// Adds discovered peer ids to the PSM. + /// + /// > **Note**: There is no equivalent "expired" message, meaning that it is the responsibility + /// > of the PSM to remove `PeerId`s that fail to dial too often. + pub fn discovered>(&mut self, peer_ids: I) { + let mut discovered_any = false; + + for peer_id in peer_ids { + if let peersstate::Peer::Unknown(entry) = self.data.peer(&peer_id) { + entry.discover(); + discovered_any = true; + } + } + + if discovered_any { + self.alloc_slots(); + } + } + + /// Reports an adjustment to the reputation of the given peer. + pub fn report_peer(&mut self, peer_id: PeerId, score_diff: ReputationChange) { + // We don't immediately perform the adjustments in order to have state consistency. We + // don't want the reporting here to take priority over messages sent using the + // `PeersetHandle`. + let _ = self + .tx + .unbounded_send(Action::ReportPeer(peer_id, score_diff)); + } + + /// Produces a JSON object containing the state of the peerset manager, for debugging purposes. + pub fn debug_info(&mut self) -> serde_json::Value { + self.update_time(); + + json!({ + "nodes": self.data.peers().cloned().collect::>().into_iter().map(|peer_id| { + let state = match self.data.peer(&peer_id) { + peersstate::Peer::Connected(entry) => json!({ + "connected": true, + "reputation": entry.reputation() + }), + peersstate::Peer::NotConnected(entry) => json!({ + "connected": false, + "reputation": entry.reputation() + }), + peersstate::Peer::Unknown(_) => + unreachable!("We iterate over the known peers; QED") + }; + + (peer_id.to_base58(), state) + }).collect::>(), + "reserved_only": self.reserved_only, + "message_queue": self.message_queue.len(), + }) + } + + /// Returns the number of peers that we have discovered. + pub fn num_discovered_peers(&self) -> usize { + self.data.peers().len() + } + + /// Returns priority group by id. + pub fn get_priority_group(&self, group_id: &str) -> Option> { + self.data.get_priority_group(group_id) + } } impl Stream for Peerset { - type Item = Message; - - fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - loop { - if let Some(message) = self.message_queue.pop_front() { - return Poll::Ready(Some(message)); - } - - let action = match Stream::poll_next(Pin::new(&mut self.rx), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready(Some(event)) => event, - Poll::Ready(None) => return Poll::Pending, - }; - - match action { - Action::AddReservedPeer(peer_id) => - self.on_add_reserved_peer(peer_id), - Action::RemoveReservedPeer(peer_id) => - self.on_remove_reserved_peer(peer_id), - Action::SetReservedOnly(reserved) => - self.on_set_reserved_only(reserved), - Action::ReportPeer(peer_id, score_diff) => - self.on_report_peer(peer_id, score_diff), - Action::SetPriorityGroup(group_id, peers) => - self.on_set_priority_group(&group_id, peers), - Action::AddToPriorityGroup(group_id, peer_id) => - self.on_add_to_priority_group(&group_id, peer_id), - Action::RemoveFromPriorityGroup(group_id, peer_id) => - self.on_remove_from_priority_group(&group_id, peer_id), - } - } - } + type Item = Message; + + fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + loop { + if let Some(message) = self.message_queue.pop_front() { + return Poll::Ready(Some(message)); + } + + let action = match Stream::poll_next(Pin::new(&mut self.rx), cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Some(event)) => event, + Poll::Ready(None) => return Poll::Pending, + }; + + match action { + Action::AddReservedPeer(peer_id) => self.on_add_reserved_peer(peer_id), + Action::RemoveReservedPeer(peer_id) => self.on_remove_reserved_peer(peer_id), + Action::SetReservedOnly(reserved) => self.on_set_reserved_only(reserved), + Action::ReportPeer(peer_id, score_diff) => self.on_report_peer(peer_id, score_diff), + Action::SetPriorityGroup(group_id, peers) => { + self.on_set_priority_group(&group_id, peers) + } + Action::AddToPriorityGroup(group_id, peer_id) => { + self.on_add_to_priority_group(&group_id, peer_id) + } + Action::RemoveFromPriorityGroup(group_id, peer_id) => { + self.on_remove_from_priority_group(&group_id, peer_id) + } + } + } + } } #[cfg(test)] mod tests { - use libp2p::PeerId; - use futures::prelude::*; - use super::{PeersetConfig, Peerset, Message, IncomingIndex, ReputationChange, BANNED_THRESHOLD}; - use std::{pin::Pin, task::Poll, thread, time::Duration}; - - fn assert_messages(mut peerset: Peerset, messages: Vec) -> Peerset { - for expected_message in messages { - let (message, p) = next_message(peerset).expect("expected message"); - assert_eq!(message, expected_message); - peerset = p; - } - assert!(peerset.message_queue.is_empty(), peerset.message_queue); - peerset - } - - fn next_message(mut peerset: Peerset) -> Result<(Message, Peerset), ()> { - let next = futures::executor::block_on_stream(&mut peerset).next(); - let message = next.ok_or_else(|| ())?; - Ok((message, peerset)) - } - - #[test] - fn test_peerset_add_reserved_peer() { - let bootnode = PeerId::random(); - let reserved_peer = PeerId::random(); - let reserved_peer2 = PeerId::random(); - let config = PeersetConfig { - in_peers: 0, - out_peers: 2, - bootnodes: vec![bootnode], - reserved_only: true, - priority_groups: Vec::new(), - }; - - let (peerset, handle) = Peerset::from_config(config); - handle.add_reserved_peer(reserved_peer.clone()); - handle.add_reserved_peer(reserved_peer2.clone()); - - assert_messages(peerset, vec![ - Message::Connect(reserved_peer), - Message::Connect(reserved_peer2) - ]); - } - - #[test] - fn test_peerset_incoming() { - let bootnode = PeerId::random(); - let incoming = PeerId::random(); - let incoming2 = PeerId::random(); - let incoming3 = PeerId::random(); - let ii = IncomingIndex(1); - let ii2 = IncomingIndex(2); - let ii3 = IncomingIndex(3); - let ii4 = IncomingIndex(3); - let config = PeersetConfig { - in_peers: 2, - out_peers: 1, - bootnodes: vec![bootnode.clone()], - reserved_only: false, - priority_groups: Vec::new(), - }; - - let (mut peerset, _handle) = Peerset::from_config(config); - peerset.incoming(incoming.clone(), ii); - peerset.incoming(incoming.clone(), ii4); - peerset.incoming(incoming2.clone(), ii2); - peerset.incoming(incoming3.clone(), ii3); - - assert_messages(peerset, vec![ - Message::Connect(bootnode.clone()), - Message::Accept(ii), - Message::Accept(ii2), - Message::Reject(ii3), - ]); - } - - #[test] - fn test_peerset_discovered() { - let bootnode = PeerId::random(); - let discovered = PeerId::random(); - let discovered2 = PeerId::random(); - let config = PeersetConfig { - in_peers: 0, - out_peers: 2, - bootnodes: vec![bootnode.clone()], - reserved_only: false, - priority_groups: vec![], - }; - - let (mut peerset, _handle) = Peerset::from_config(config); - peerset.discovered(Some(discovered.clone())); - peerset.discovered(Some(discovered.clone())); - peerset.discovered(Some(discovered2)); - - assert_messages(peerset, vec![ - Message::Connect(bootnode), - Message::Connect(discovered), - ]); - } - - #[test] - fn test_peerset_banned() { - let (mut peerset, handle) = Peerset::from_config(PeersetConfig { - in_peers: 25, - out_peers: 25, - bootnodes: vec![], - reserved_only: false, - priority_groups: vec![], - }); - - // We ban a node by setting its reputation under the threshold. - let peer_id = PeerId::random(); - handle.report_peer(peer_id.clone(), ReputationChange::new(BANNED_THRESHOLD - 1, "")); - - let fut = futures::future::poll_fn(move |cx| { - // We need one polling for the message to be processed. - assert_eq!(Stream::poll_next(Pin::new(&mut peerset), cx), Poll::Pending); - - // Check that an incoming connection from that node gets refused. - peerset.incoming(peer_id.clone(), IncomingIndex(1)); - if let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { - assert_eq!(msg.unwrap(), Message::Reject(IncomingIndex(1))); - } else { - panic!() - } - - // Wait a bit for the node's reputation to go above the threshold. - thread::sleep(Duration::from_millis(1500)); - - // Try again. This time the node should be accepted. - peerset.incoming(peer_id.clone(), IncomingIndex(2)); - while let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { - assert_eq!(msg.unwrap(), Message::Accept(IncomingIndex(2))); - } - - Poll::Ready(()) - }); - - futures::executor::block_on(fut); - } + use super::{ + IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange, BANNED_THRESHOLD, + }; + use futures::prelude::*; + use libp2p::PeerId; + use std::{pin::Pin, task::Poll, thread, time::Duration}; + + fn assert_messages(mut peerset: Peerset, messages: Vec) -> Peerset { + for expected_message in messages { + let (message, p) = next_message(peerset).expect("expected message"); + assert_eq!(message, expected_message); + peerset = p; + } + assert!(peerset.message_queue.is_empty(), peerset.message_queue); + peerset + } + + fn next_message(mut peerset: Peerset) -> Result<(Message, Peerset), ()> { + let next = futures::executor::block_on_stream(&mut peerset).next(); + let message = next.ok_or_else(|| ())?; + Ok((message, peerset)) + } + + #[test] + fn test_peerset_add_reserved_peer() { + let bootnode = PeerId::random(); + let reserved_peer = PeerId::random(); + let reserved_peer2 = PeerId::random(); + let config = PeersetConfig { + in_peers: 0, + out_peers: 2, + bootnodes: vec![bootnode], + reserved_only: true, + priority_groups: Vec::new(), + }; + + let (peerset, handle) = Peerset::from_config(config); + handle.add_reserved_peer(reserved_peer.clone()); + handle.add_reserved_peer(reserved_peer2.clone()); + + assert_messages( + peerset, + vec![ + Message::Connect(reserved_peer), + Message::Connect(reserved_peer2), + ], + ); + } + + #[test] + fn test_peerset_incoming() { + let bootnode = PeerId::random(); + let incoming = PeerId::random(); + let incoming2 = PeerId::random(); + let incoming3 = PeerId::random(); + let ii = IncomingIndex(1); + let ii2 = IncomingIndex(2); + let ii3 = IncomingIndex(3); + let ii4 = IncomingIndex(3); + let config = PeersetConfig { + in_peers: 2, + out_peers: 1, + bootnodes: vec![bootnode.clone()], + reserved_only: false, + priority_groups: Vec::new(), + }; + + let (mut peerset, _handle) = Peerset::from_config(config); + peerset.incoming(incoming.clone(), ii); + peerset.incoming(incoming.clone(), ii4); + peerset.incoming(incoming2.clone(), ii2); + peerset.incoming(incoming3.clone(), ii3); + + assert_messages( + peerset, + vec![ + Message::Connect(bootnode.clone()), + Message::Accept(ii), + Message::Accept(ii2), + Message::Reject(ii3), + ], + ); + } + + #[test] + fn test_peerset_discovered() { + let bootnode = PeerId::random(); + let discovered = PeerId::random(); + let discovered2 = PeerId::random(); + let config = PeersetConfig { + in_peers: 0, + out_peers: 2, + bootnodes: vec![bootnode.clone()], + reserved_only: false, + priority_groups: vec![], + }; + + let (mut peerset, _handle) = Peerset::from_config(config); + peerset.discovered(Some(discovered.clone())); + peerset.discovered(Some(discovered.clone())); + peerset.discovered(Some(discovered2)); + + assert_messages( + peerset, + vec![Message::Connect(bootnode), Message::Connect(discovered)], + ); + } + + #[test] + fn test_peerset_banned() { + let (mut peerset, handle) = Peerset::from_config(PeersetConfig { + in_peers: 25, + out_peers: 25, + bootnodes: vec![], + reserved_only: false, + priority_groups: vec![], + }); + + // We ban a node by setting its reputation under the threshold. + let peer_id = PeerId::random(); + handle.report_peer( + peer_id.clone(), + ReputationChange::new(BANNED_THRESHOLD - 1, ""), + ); + + let fut = futures::future::poll_fn(move |cx| { + // We need one polling for the message to be processed. + assert_eq!(Stream::poll_next(Pin::new(&mut peerset), cx), Poll::Pending); + + // Check that an incoming connection from that node gets refused. + peerset.incoming(peer_id.clone(), IncomingIndex(1)); + if let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { + assert_eq!(msg.unwrap(), Message::Reject(IncomingIndex(1))); + } else { + panic!() + } + + // Wait a bit for the node's reputation to go above the threshold. + thread::sleep(Duration::from_millis(1500)); + + // Try again. This time the node should be accepted. + peerset.incoming(peer_id.clone(), IncomingIndex(2)); + while let Poll::Ready(msg) = Stream::poll_next(Pin::new(&mut peerset), cx) { + assert_eq!(msg.unwrap(), Message::Accept(IncomingIndex(2))); + } + + Poll::Ready(()) + }); + + futures::executor::block_on(fut); + } } - diff --git a/client/peerset/src/peersstate.rs b/client/peerset/src/peersstate.rs index 843ec0a360..f3b8ce20b1 100644 --- a/client/peerset/src/peersstate.rs +++ b/client/peerset/src/peersstate.rs @@ -18,7 +18,10 @@ use libp2p::PeerId; use log::{error, warn}; -use std::{borrow::Cow, collections::{HashSet, HashMap}}; +use std::{ + borrow::Cow, + collections::{HashMap, HashSet}, +}; use wasm_timer::Instant; /// State storage behind the peerset. @@ -30,757 +33,890 @@ use wasm_timer::Instant; /// #[derive(Debug, Clone)] pub struct PeersState { - /// List of nodes that we know about. - /// - /// > **Note**: This list should really be ordered by decreasing reputation, so that we can - /// easily select the best node to connect to. As a first draft, however, we don't - /// sort, to make the logic easier. - nodes: HashMap, + /// List of nodes that we know about. + /// + /// > **Note**: This list should really be ordered by decreasing reputation, so that we can + /// easily select the best node to connect to. As a first draft, however, we don't + /// sort, to make the logic easier. + nodes: HashMap, - /// Number of non-priority nodes for which the `ConnectionState` is `In`. - num_in: u32, + /// Number of non-priority nodes for which the `ConnectionState` is `In`. + num_in: u32, - /// Number of non-priority nodes for which the `ConnectionState` is `In`. - num_out: u32, + /// Number of non-priority nodes for which the `ConnectionState` is `In`. + num_out: u32, - /// Maximum allowed number of non-priority nodes for which the `ConnectionState` is `In`. - max_in: u32, + /// Maximum allowed number of non-priority nodes for which the `ConnectionState` is `In`. + max_in: u32, - /// Maximum allowed number of non-priority nodes for which the `ConnectionState` is `Out`. - max_out: u32, + /// Maximum allowed number of non-priority nodes for which the `ConnectionState` is `Out`. + max_out: u32, - /// Priority groups. Each group is identified by a string ID and contains a set of peer IDs. - priority_nodes: HashMap>, + /// Priority groups. Each group is identified by a string ID and contains a set of peer IDs. + priority_nodes: HashMap>, - /// Only allow connections to/from peers in a priority group. - priority_only: bool, + /// Only allow connections to/from peers in a priority group. + priority_only: bool, } /// State of a single node that we know about. #[derive(Debug, Copy, Clone, PartialEq, Eq)] struct Node { - /// Whether we are connected to this node. - connection_state: ConnectionState, + /// Whether we are connected to this node. + connection_state: ConnectionState, - /// Reputation value of the node, between `i32::min_value` (we hate that node) and - /// `i32::max_value` (we love that node). - reputation: i32, + /// Reputation value of the node, between `i32::min_value` (we hate that node) and + /// `i32::max_value` (we love that node). + reputation: i32, } impl Default for Node { - fn default() -> Node { - Node { - connection_state: ConnectionState::NotConnected { - last_connected: Instant::now(), - }, - reputation: 0, - } - } + fn default() -> Node { + Node { + connection_state: ConnectionState::NotConnected { + last_connected: Instant::now(), + }, + reputation: 0, + } + } } /// Whether we are connected to a node. #[derive(Debug, Copy, Clone, PartialEq, Eq)] enum ConnectionState { - /// We are connected through an ingoing connection. - In, - /// We are connected through an outgoing connection. - Out, - /// We are not connected to this node. - NotConnected { - /// When we were last connected to the node, or if we were never connected when we - /// discovered it. - last_connected: Instant, - }, + /// We are connected through an ingoing connection. + In, + /// We are connected through an outgoing connection. + Out, + /// We are not connected to this node. + NotConnected { + /// When we were last connected to the node, or if we were never connected when we + /// discovered it. + last_connected: Instant, + }, } impl ConnectionState { - /// Returns `true` for `In` and `Out`. - fn is_connected(self) -> bool { - match self { - ConnectionState::In => true, - ConnectionState::Out => true, - ConnectionState::NotConnected { .. } => false, - } - } + /// Returns `true` for `In` and `Out`. + fn is_connected(self) -> bool { + match self { + ConnectionState::In => true, + ConnectionState::Out => true, + ConnectionState::NotConnected { .. } => false, + } + } } impl PeersState { - /// Builds a new empty `PeersState`. - pub fn new(in_peers: u32, out_peers: u32, priority_only: bool) -> Self { - PeersState { - nodes: HashMap::new(), - num_in: 0, - num_out: 0, - max_in: in_peers, - max_out: out_peers, - priority_nodes: HashMap::new(), - priority_only, - } - } - - /// Returns an object that grants access to the state of a peer. - pub fn peer<'a>(&'a mut self, peer_id: &'a PeerId) -> Peer<'a> { - match self.nodes.get_mut(peer_id) { - None => return Peer::Unknown(UnknownPeer { - parent: self, - peer_id: Cow::Borrowed(peer_id), - }), - Some(peer) => { - if peer.connection_state.is_connected() { - Peer::Connected(ConnectedPeer { - state: self, - peer_id: Cow::Borrowed(peer_id), - }) - } else { - Peer::NotConnected(NotConnectedPeer { - state: self, - peer_id: Cow::Borrowed(peer_id), - }) - } - } - } - } - - /// Returns the list of all the peers we know of. - // Note: this method could theoretically return a `Peer`, but implementing that - // isn't simple. - pub fn peers(&self) -> impl ExactSizeIterator { - self.nodes.keys() - } - - /// Returns the list of peers we are connected to. - // Note: this method could theoretically return a `ConnectedPeer`, but implementing that - // isn't simple. - pub fn connected_peers(&self) -> impl Iterator { - self.nodes.iter() - .filter(|(_, p)| p.connection_state.is_connected()) - .map(|(p, _)| p) - } - - /// Returns the first priority peer that we are not connected to. - /// - /// If multiple nodes are prioritized, which one is returned is unspecified. - pub fn priority_not_connected_peer(&mut self) -> Option { - let id = self.priority_nodes.values() - .flatten() - .find(|&id| self.nodes.get(id).map_or(false, |node| !node.connection_state.is_connected())) - .cloned(); - id.map(move |id| NotConnectedPeer { - state: self, - peer_id: Cow::Owned(id), - }) - } - - /// Returns the first priority peer that we are not connected to. - /// - /// If multiple nodes are prioritized, which one is returned is unspecified. - pub fn priority_not_connected_peer_from_group(&mut self, group_id: &str) -> Option { - let id = self.priority_nodes.get(group_id) - .and_then(|group| group.iter() - .find(|&id| self.nodes.get(id).map_or(false, |node| !node.connection_state.is_connected())) - .cloned()); - id.map(move |id| NotConnectedPeer { - state: self, - peer_id: Cow::Owned(id), - }) - } - - /// Returns the peer with the highest reputation and that we are not connected to. - /// - /// If multiple nodes have the same reputation, which one is returned is unspecified. - pub fn highest_not_connected_peer(&mut self) -> Option { - let outcome = self.nodes - .iter_mut() - .filter(|(_, Node { connection_state, .. })| !connection_state.is_connected()) - .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { - if let Some(cur_node) = cur_node.take() { - if cur_node.1.reputation >= to_try.1.reputation { - return Some(cur_node); - } - } - Some(to_try) - }) - .map(|(peer_id, _)| peer_id.clone()); - - if let Some(peer_id) = outcome { - Some(NotConnectedPeer { - state: self, - peer_id: Cow::Owned(peer_id), - }) - } else { - None - } - } - - fn disconnect(&mut self, peer_id: &PeerId) { - let is_priority = self.is_priority(peer_id); - if let Some(mut node) = self.nodes.get_mut(peer_id) { - if !is_priority { - match node.connection_state { - ConnectionState::In => self.num_in -= 1, - ConnectionState::Out => self.num_out -= 1, - ConnectionState::NotConnected { .. } => - debug_assert!(false, "State inconsistency: disconnecting a disconnected node") - } - } - node.connection_state = ConnectionState::NotConnected { - last_connected: Instant::now(), - }; - } else { - warn!(target: "peerset", "Attempting to disconnect unknown peer {}", peer_id); - } - } - - /// Sets the peer as connected with an outgoing connection. - fn try_outgoing(&mut self, peer_id: &PeerId) -> bool { - let is_priority = self.is_priority(peer_id); - - // We are only accepting connections from priority nodes. - if !is_priority && self.priority_only { - return false; - } - - // Note that it is possible for num_out to be strictly superior to the max, in case we were - // connected to reserved node then marked them as not reserved. - if self.num_out >= self.max_out && !is_priority { - return false; - } - - if let Some(mut peer) = self.nodes.get_mut(peer_id) { - peer.connection_state = ConnectionState::Out; - if !is_priority { - self.num_out += 1; - } - return true; - } - false - } - - /// Tries to accept the peer as an incoming connection. - /// - /// If there are enough slots available, switches the node to "connected" and returns `true`. If - /// the slots are full, the node stays "not connected" and we return `false`. - /// - /// Note that reserved nodes don't count towards the number of slots. - fn try_accept_incoming(&mut self, peer_id: &PeerId) -> bool { - let is_priority = self.is_priority(peer_id); - - // We are only accepting connections from priority nodes. - if !is_priority && self.priority_only { - return false; - } - - // Note that it is possible for num_in to be strictly superior to the max, in case we were - // connected to reserved node then marked them as not reserved. - if self.num_in >= self.max_in && !is_priority { - return false; - } - if let Some(mut peer) = self.nodes.get_mut(peer_id) { - peer.connection_state = ConnectionState::In; - if !is_priority { - self.num_in += 1; - } - return true; - } - false - } - - /// Sets priority group - pub fn set_priority_group(&mut self, group_id: &str, peers: HashSet) { - // update slot counters - let all_other_groups: HashSet<_> = self.priority_nodes - .iter() - .filter(|(g, _)| *g != group_id) - .flat_map(|(_, id)| id.clone()) - .collect(); - let existing_group = self.priority_nodes.remove(group_id).unwrap_or_default(); - for id in existing_group { - // update slots for nodes that are no longer priority - if !all_other_groups.contains(&id) { - if let Some(peer) = self.nodes.get_mut(&id) { - match peer.connection_state { - ConnectionState::In => self.num_in += 1, - ConnectionState::Out => self.num_out += 1, - ConnectionState::NotConnected { .. } => {}, - } - } - } - } - - for id in &peers { - // update slots for nodes that become priority - if !all_other_groups.contains(id) { - let peer = self.nodes.entry(id.clone()).or_default(); - match peer.connection_state { - ConnectionState::In => self.num_in -= 1, - ConnectionState::Out => self.num_out -= 1, - ConnectionState::NotConnected { .. } => {}, - } - } - } - self.priority_nodes.insert(group_id.into(), peers); - } - - /// Add a peer to a priority group. - pub fn add_to_priority_group(&mut self, group_id: &str, peer_id: PeerId) { - let mut peers = self.priority_nodes.get(group_id).cloned().unwrap_or_default(); - peers.insert(peer_id); - self.set_priority_group(group_id, peers); - } - - /// Remove a peer from a priority group. - pub fn remove_from_priority_group(&mut self, group_id: &str, peer_id: &PeerId) { - let mut peers = self.priority_nodes.get(group_id).cloned().unwrap_or_default(); - peers.remove(peer_id); - self.set_priority_group(group_id, peers); - } - - /// Get priority group content. - pub fn get_priority_group(&self, group_id: &str) -> Option> { - self.priority_nodes.get(group_id).cloned() - } - - /// Set whether to only allow connections to/from peers in a priority group. - /// Calling this method does not affect any existing connection, e.g. - /// enabling priority only will not disconnect from any non-priority peers - /// we are already connected to, only future incoming/outgoing connection - /// attempts will be affected. - pub fn set_priority_only(&mut self, priority: bool) { - self.priority_only = priority; - } - - /// Check that node is any priority group. - fn is_priority(&self, peer_id: &PeerId) -> bool { - self.priority_nodes.iter().any(|(_, group)| group.contains(peer_id)) - } - - /// Returns the reputation value of the node. - fn reputation(&self, peer_id: &PeerId) -> i32 { - self.nodes.get(peer_id).map_or(0, |p| p.reputation) - } - - /// Sets the reputation of the peer. - fn set_reputation(&mut self, peer_id: &PeerId, value: i32) { - let node = self.nodes - .entry(peer_id.clone()) - .or_default(); - node.reputation = value; - } - - /// Performs an arithmetic addition on the reputation score of that peer. - /// - /// In case of overflow, the value will be capped. - /// If the peer is unknown to us, we insert it and consider that it has a reputation of 0. - fn add_reputation(&mut self, peer_id: &PeerId, modifier: i32) { - let node = self.nodes - .entry(peer_id.clone()) - .or_default(); - node.reputation = node.reputation.saturating_add(modifier); - } + /// Builds a new empty `PeersState`. + pub fn new(in_peers: u32, out_peers: u32, priority_only: bool) -> Self { + PeersState { + nodes: HashMap::new(), + num_in: 0, + num_out: 0, + max_in: in_peers, + max_out: out_peers, + priority_nodes: HashMap::new(), + priority_only, + } + } + + /// Returns an object that grants access to the state of a peer. + pub fn peer<'a>(&'a mut self, peer_id: &'a PeerId) -> Peer<'a> { + match self.nodes.get_mut(peer_id) { + None => { + return Peer::Unknown(UnknownPeer { + parent: self, + peer_id: Cow::Borrowed(peer_id), + }) + } + Some(peer) => { + if peer.connection_state.is_connected() { + Peer::Connected(ConnectedPeer { + state: self, + peer_id: Cow::Borrowed(peer_id), + }) + } else { + Peer::NotConnected(NotConnectedPeer { + state: self, + peer_id: Cow::Borrowed(peer_id), + }) + } + } + } + } + + /// Returns the list of all the peers we know of. + // Note: this method could theoretically return a `Peer`, but implementing that + // isn't simple. + pub fn peers(&self) -> impl ExactSizeIterator { + self.nodes.keys() + } + + /// Returns the list of peers we are connected to. + // Note: this method could theoretically return a `ConnectedPeer`, but implementing that + // isn't simple. + pub fn connected_peers(&self) -> impl Iterator { + self.nodes + .iter() + .filter(|(_, p)| p.connection_state.is_connected()) + .map(|(p, _)| p) + } + + /// Returns the first priority peer that we are not connected to. + /// + /// If multiple nodes are prioritized, which one is returned is unspecified. + pub fn priority_not_connected_peer(&mut self) -> Option { + let id = self + .priority_nodes + .values() + .flatten() + .find(|&id| { + self.nodes + .get(id) + .map_or(false, |node| !node.connection_state.is_connected()) + }) + .cloned(); + id.map(move |id| NotConnectedPeer { + state: self, + peer_id: Cow::Owned(id), + }) + } + + /// Returns the first priority peer that we are not connected to. + /// + /// If multiple nodes are prioritized, which one is returned is unspecified. + pub fn priority_not_connected_peer_from_group( + &mut self, + group_id: &str, + ) -> Option { + let id = self.priority_nodes.get(group_id).and_then(|group| { + group + .iter() + .find(|&id| { + self.nodes + .get(id) + .map_or(false, |node| !node.connection_state.is_connected()) + }) + .cloned() + }); + id.map(move |id| NotConnectedPeer { + state: self, + peer_id: Cow::Owned(id), + }) + } + + /// Returns the peer with the highest reputation and that we are not connected to. + /// + /// If multiple nodes have the same reputation, which one is returned is unspecified. + pub fn highest_not_connected_peer(&mut self) -> Option { + let outcome = self + .nodes + .iter_mut() + .filter( + |( + _, + Node { + connection_state, .. + }, + )| !connection_state.is_connected(), + ) + .fold(None::<(&PeerId, &mut Node)>, |mut cur_node, to_try| { + if let Some(cur_node) = cur_node.take() { + if cur_node.1.reputation >= to_try.1.reputation { + return Some(cur_node); + } + } + Some(to_try) + }) + .map(|(peer_id, _)| peer_id.clone()); + + if let Some(peer_id) = outcome { + Some(NotConnectedPeer { + state: self, + peer_id: Cow::Owned(peer_id), + }) + } else { + None + } + } + + fn disconnect(&mut self, peer_id: &PeerId) { + let is_priority = self.is_priority(peer_id); + if let Some(mut node) = self.nodes.get_mut(peer_id) { + if !is_priority { + match node.connection_state { + ConnectionState::In => self.num_in -= 1, + ConnectionState::Out => self.num_out -= 1, + ConnectionState::NotConnected { .. } => debug_assert!( + false, + "State inconsistency: disconnecting a disconnected node" + ), + } + } + node.connection_state = ConnectionState::NotConnected { + last_connected: Instant::now(), + }; + } else { + warn!(target: "peerset", "Attempting to disconnect unknown peer {}", peer_id); + } + } + + /// Sets the peer as connected with an outgoing connection. + fn try_outgoing(&mut self, peer_id: &PeerId) -> bool { + let is_priority = self.is_priority(peer_id); + + // We are only accepting connections from priority nodes. + if !is_priority && self.priority_only { + return false; + } + + // Note that it is possible for num_out to be strictly superior to the max, in case we were + // connected to reserved node then marked them as not reserved. + if self.num_out >= self.max_out && !is_priority { + return false; + } + + if let Some(mut peer) = self.nodes.get_mut(peer_id) { + peer.connection_state = ConnectionState::Out; + if !is_priority { + self.num_out += 1; + } + return true; + } + false + } + + /// Tries to accept the peer as an incoming connection. + /// + /// If there are enough slots available, switches the node to "connected" and returns `true`. If + /// the slots are full, the node stays "not connected" and we return `false`. + /// + /// Note that reserved nodes don't count towards the number of slots. + fn try_accept_incoming(&mut self, peer_id: &PeerId) -> bool { + let is_priority = self.is_priority(peer_id); + + // We are only accepting connections from priority nodes. + if !is_priority && self.priority_only { + return false; + } + + // Note that it is possible for num_in to be strictly superior to the max, in case we were + // connected to reserved node then marked them as not reserved. + if self.num_in >= self.max_in && !is_priority { + return false; + } + if let Some(mut peer) = self.nodes.get_mut(peer_id) { + peer.connection_state = ConnectionState::In; + if !is_priority { + self.num_in += 1; + } + return true; + } + false + } + + /// Sets priority group + pub fn set_priority_group(&mut self, group_id: &str, peers: HashSet) { + // update slot counters + let all_other_groups: HashSet<_> = self + .priority_nodes + .iter() + .filter(|(g, _)| *g != group_id) + .flat_map(|(_, id)| id.clone()) + .collect(); + let existing_group = self.priority_nodes.remove(group_id).unwrap_or_default(); + for id in existing_group { + // update slots for nodes that are no longer priority + if !all_other_groups.contains(&id) { + if let Some(peer) = self.nodes.get_mut(&id) { + match peer.connection_state { + ConnectionState::In => self.num_in += 1, + ConnectionState::Out => self.num_out += 1, + ConnectionState::NotConnected { .. } => {} + } + } + } + } + + for id in &peers { + // update slots for nodes that become priority + if !all_other_groups.contains(id) { + let peer = self.nodes.entry(id.clone()).or_default(); + match peer.connection_state { + ConnectionState::In => self.num_in -= 1, + ConnectionState::Out => self.num_out -= 1, + ConnectionState::NotConnected { .. } => {} + } + } + } + self.priority_nodes.insert(group_id.into(), peers); + } + + /// Add a peer to a priority group. + pub fn add_to_priority_group(&mut self, group_id: &str, peer_id: PeerId) { + let mut peers = self + .priority_nodes + .get(group_id) + .cloned() + .unwrap_or_default(); + peers.insert(peer_id); + self.set_priority_group(group_id, peers); + } + + /// Remove a peer from a priority group. + pub fn remove_from_priority_group(&mut self, group_id: &str, peer_id: &PeerId) { + let mut peers = self + .priority_nodes + .get(group_id) + .cloned() + .unwrap_or_default(); + peers.remove(peer_id); + self.set_priority_group(group_id, peers); + } + + /// Get priority group content. + pub fn get_priority_group(&self, group_id: &str) -> Option> { + self.priority_nodes.get(group_id).cloned() + } + + /// Set whether to only allow connections to/from peers in a priority group. + /// Calling this method does not affect any existing connection, e.g. + /// enabling priority only will not disconnect from any non-priority peers + /// we are already connected to, only future incoming/outgoing connection + /// attempts will be affected. + pub fn set_priority_only(&mut self, priority: bool) { + self.priority_only = priority; + } + + /// Check that node is any priority group. + fn is_priority(&self, peer_id: &PeerId) -> bool { + self.priority_nodes + .iter() + .any(|(_, group)| group.contains(peer_id)) + } + + /// Returns the reputation value of the node. + fn reputation(&self, peer_id: &PeerId) -> i32 { + self.nodes.get(peer_id).map_or(0, |p| p.reputation) + } + + /// Sets the reputation of the peer. + fn set_reputation(&mut self, peer_id: &PeerId, value: i32) { + let node = self.nodes.entry(peer_id.clone()).or_default(); + node.reputation = value; + } + + /// Performs an arithmetic addition on the reputation score of that peer. + /// + /// In case of overflow, the value will be capped. + /// If the peer is unknown to us, we insert it and consider that it has a reputation of 0. + fn add_reputation(&mut self, peer_id: &PeerId, modifier: i32) { + let node = self.nodes.entry(peer_id.clone()).or_default(); + node.reputation = node.reputation.saturating_add(modifier); + } } /// Grants access to the state of a peer in the `PeersState`. pub enum Peer<'a> { - /// We are connected to this node. - Connected(ConnectedPeer<'a>), - /// We are not connected to this node. - NotConnected(NotConnectedPeer<'a>), - /// We have never heard of this node. - Unknown(UnknownPeer<'a>), + /// We are connected to this node. + Connected(ConnectedPeer<'a>), + /// We are not connected to this node. + NotConnected(NotConnectedPeer<'a>), + /// We have never heard of this node. + Unknown(UnknownPeer<'a>), } impl<'a> Peer<'a> { - /// If we are the `Connected` variant, returns the inner `ConnectedPeer`. Returns `None` - /// otherwise. - pub fn into_connected(self) -> Option> { - match self { - Peer::Connected(peer) => Some(peer), - Peer::NotConnected(_) => None, - Peer::Unknown(_) => None, - } - } - - /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` - /// otherwise. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests - pub fn into_not_connected(self) -> Option> { - match self { - Peer::Connected(_) => None, - Peer::NotConnected(peer) => Some(peer), - Peer::Unknown(_) => None, - } - } - - /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` - /// otherwise. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests - pub fn into_unknown(self) -> Option> { - match self { - Peer::Connected(_) => None, - Peer::NotConnected(_) => None, - Peer::Unknown(peer) => Some(peer), - } - } + /// If we are the `Connected` variant, returns the inner `ConnectedPeer`. Returns `None` + /// otherwise. + pub fn into_connected(self) -> Option> { + match self { + Peer::Connected(peer) => Some(peer), + Peer::NotConnected(_) => None, + Peer::Unknown(_) => None, + } + } + + /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` + /// otherwise. + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests + pub fn into_not_connected(self) -> Option> { + match self { + Peer::Connected(_) => None, + Peer::NotConnected(peer) => Some(peer), + Peer::Unknown(_) => None, + } + } + + /// If we are the `Unknown` variant, returns the inner `ConnectedPeer`. Returns `None` + /// otherwise. + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests + pub fn into_unknown(self) -> Option> { + match self { + Peer::Connected(_) => None, + Peer::NotConnected(_) => None, + Peer::Unknown(peer) => Some(peer), + } + } } /// A peer that is connected to us. pub struct ConnectedPeer<'a> { - state: &'a mut PeersState, - peer_id: Cow<'a, PeerId>, + state: &'a mut PeersState, + peer_id: Cow<'a, PeerId>, } impl<'a> ConnectedPeer<'a> { - /// Destroys this `ConnectedPeer` and returns the `PeerId` inside of it. - pub fn into_peer_id(self) -> PeerId { - self.peer_id.into_owned() - } - - /// Switches the peer to "not connected". - pub fn disconnect(self) -> NotConnectedPeer<'a> { - self.state.disconnect(&self.peer_id); - NotConnectedPeer { - state: self.state, - peer_id: self.peer_id, - } - } - - /// Returns the reputation value of the node. - pub fn reputation(&self) -> i32 { - self.state.reputation(&self.peer_id) - } - - /// Sets the reputation of the peer. - pub fn set_reputation(&mut self, value: i32) { - self.state.set_reputation(&self.peer_id, value) - } - - /// Performs an arithmetic addition on the reputation score of that peer. - /// - /// In case of overflow, the value will be capped. - pub fn add_reputation(&mut self, modifier: i32) { - self.state.add_reputation(&self.peer_id, modifier) - } + /// Destroys this `ConnectedPeer` and returns the `PeerId` inside of it. + pub fn into_peer_id(self) -> PeerId { + self.peer_id.into_owned() + } + + /// Switches the peer to "not connected". + pub fn disconnect(self) -> NotConnectedPeer<'a> { + self.state.disconnect(&self.peer_id); + NotConnectedPeer { + state: self.state, + peer_id: self.peer_id, + } + } + + /// Returns the reputation value of the node. + pub fn reputation(&self) -> i32 { + self.state.reputation(&self.peer_id) + } + + /// Sets the reputation of the peer. + pub fn set_reputation(&mut self, value: i32) { + self.state.set_reputation(&self.peer_id, value) + } + + /// Performs an arithmetic addition on the reputation score of that peer. + /// + /// In case of overflow, the value will be capped. + pub fn add_reputation(&mut self, modifier: i32) { + self.state.add_reputation(&self.peer_id, modifier) + } } /// A peer that is not connected to us. #[derive(Debug)] pub struct NotConnectedPeer<'a> { - state: &'a mut PeersState, - peer_id: Cow<'a, PeerId>, + state: &'a mut PeersState, + peer_id: Cow<'a, PeerId>, } impl<'a> NotConnectedPeer<'a> { - /// Destroys this `NotConnectedPeer` and returns the `PeerId` inside of it. - #[cfg(test)] // Feel free to remove this if this function is needed outside of tests - pub fn into_peer_id(self) -> PeerId { - self.peer_id.into_owned() - } - - /// Bumps the value that `last_connected_or_discovered` would return to now, even if we - /// didn't connect or disconnect. - pub fn bump_last_connected_or_discovered(&mut self) { - let state = match self.state.nodes.get_mut(&*self.peer_id) { - Some(s) => s, - None => return, - }; - - if let ConnectionState::NotConnected { last_connected } = &mut state.connection_state { - *last_connected = Instant::now(); - } - } - - /// Returns when we were last connected to this peer, or when we discovered it if we were - /// never connected. - /// - /// Guaranteed to be earlier than calling `Instant::now()` after the function returns. - pub fn last_connected_or_discovered(&self) -> Instant { - let state = match self.state.nodes.get(&*self.peer_id) { - Some(s) => s, - None => { - error!( - target: "peerset", - "State inconsistency with {}; not connected after borrow", - self.peer_id - ); - return Instant::now(); - } - }; - - match state.connection_state { - ConnectionState::NotConnected { last_connected } => last_connected, - _ => { - error!(target: "peerset", "State inconsistency with {}", self.peer_id); - Instant::now() - } - } - } - - /// Tries to set the peer as connected as an outgoing connection. - /// - /// If there are enough slots available, switches the node to "connected" and returns `Ok`. If - /// the slots are full, the node stays "not connected" and we return `Err`. - /// - /// Note that priority nodes don't count towards the number of slots. - pub fn try_outgoing(self) -> Result, NotConnectedPeer<'a>> { - if self.state.try_outgoing(&self.peer_id) { - Ok(ConnectedPeer { - state: self.state, - peer_id: self.peer_id, - }) - } else { - Err(self) - } - } - - /// Tries to accept the peer as an incoming connection. - /// - /// If there are enough slots available, switches the node to "connected" and returns `Ok`. If - /// the slots are full, the node stays "not connected" and we return `Err`. - /// - /// Note that priority nodes don't count towards the number of slots. - pub fn try_accept_incoming(self) -> Result, NotConnectedPeer<'a>> { - if self.state.try_accept_incoming(&self.peer_id) { - Ok(ConnectedPeer { - state: self.state, - peer_id: self.peer_id, - }) - } else { - Err(self) - } - } - - /// Returns the reputation value of the node. - pub fn reputation(&self) -> i32 { - self.state.reputation(&self.peer_id) - } - - /// Sets the reputation of the peer. - pub fn set_reputation(&mut self, value: i32) { - self.state.set_reputation(&self.peer_id, value) - } - - /// Performs an arithmetic addition on the reputation score of that peer. - /// - /// In case of overflow, the value will be capped. - /// If the peer is unknown to us, we insert it and consider that it has a reputation of 0. - pub fn add_reputation(&mut self, modifier: i32) { - self.state.add_reputation(&self.peer_id, modifier) - } - - /// Un-discovers the peer. Removes it from the list. - pub fn forget_peer(self) -> UnknownPeer<'a> { - if self.state.nodes.remove(&*self.peer_id).is_none() { - error!( - target: "peerset", - "State inconsistency with {} when forgetting peer", - self.peer_id - ); - } - - UnknownPeer { - parent: self.state, - peer_id: self.peer_id, - } - } + /// Destroys this `NotConnectedPeer` and returns the `PeerId` inside of it. + #[cfg(test)] // Feel free to remove this if this function is needed outside of tests + pub fn into_peer_id(self) -> PeerId { + self.peer_id.into_owned() + } + + /// Bumps the value that `last_connected_or_discovered` would return to now, even if we + /// didn't connect or disconnect. + pub fn bump_last_connected_or_discovered(&mut self) { + let state = match self.state.nodes.get_mut(&*self.peer_id) { + Some(s) => s, + None => return, + }; + + if let ConnectionState::NotConnected { last_connected } = &mut state.connection_state { + *last_connected = Instant::now(); + } + } + + /// Returns when we were last connected to this peer, or when we discovered it if we were + /// never connected. + /// + /// Guaranteed to be earlier than calling `Instant::now()` after the function returns. + pub fn last_connected_or_discovered(&self) -> Instant { + let state = match self.state.nodes.get(&*self.peer_id) { + Some(s) => s, + None => { + error!( + target: "peerset", + "State inconsistency with {}; not connected after borrow", + self.peer_id + ); + return Instant::now(); + } + }; + + match state.connection_state { + ConnectionState::NotConnected { last_connected } => last_connected, + _ => { + error!(target: "peerset", "State inconsistency with {}", self.peer_id); + Instant::now() + } + } + } + + /// Tries to set the peer as connected as an outgoing connection. + /// + /// If there are enough slots available, switches the node to "connected" and returns `Ok`. If + /// the slots are full, the node stays "not connected" and we return `Err`. + /// + /// Note that priority nodes don't count towards the number of slots. + pub fn try_outgoing(self) -> Result, NotConnectedPeer<'a>> { + if self.state.try_outgoing(&self.peer_id) { + Ok(ConnectedPeer { + state: self.state, + peer_id: self.peer_id, + }) + } else { + Err(self) + } + } + + /// Tries to accept the peer as an incoming connection. + /// + /// If there are enough slots available, switches the node to "connected" and returns `Ok`. If + /// the slots are full, the node stays "not connected" and we return `Err`. + /// + /// Note that priority nodes don't count towards the number of slots. + pub fn try_accept_incoming(self) -> Result, NotConnectedPeer<'a>> { + if self.state.try_accept_incoming(&self.peer_id) { + Ok(ConnectedPeer { + state: self.state, + peer_id: self.peer_id, + }) + } else { + Err(self) + } + } + + /// Returns the reputation value of the node. + pub fn reputation(&self) -> i32 { + self.state.reputation(&self.peer_id) + } + + /// Sets the reputation of the peer. + pub fn set_reputation(&mut self, value: i32) { + self.state.set_reputation(&self.peer_id, value) + } + + /// Performs an arithmetic addition on the reputation score of that peer. + /// + /// In case of overflow, the value will be capped. + /// If the peer is unknown to us, we insert it and consider that it has a reputation of 0. + pub fn add_reputation(&mut self, modifier: i32) { + self.state.add_reputation(&self.peer_id, modifier) + } + + /// Un-discovers the peer. Removes it from the list. + pub fn forget_peer(self) -> UnknownPeer<'a> { + if self.state.nodes.remove(&*self.peer_id).is_none() { + error!( + target: "peerset", + "State inconsistency with {} when forgetting peer", + self.peer_id + ); + } + + UnknownPeer { + parent: self.state, + peer_id: self.peer_id, + } + } } /// A peer that we have never heard of. pub struct UnknownPeer<'a> { - parent: &'a mut PeersState, - peer_id: Cow<'a, PeerId>, + parent: &'a mut PeersState, + peer_id: Cow<'a, PeerId>, } impl<'a> UnknownPeer<'a> { - /// Inserts the peer identity in our list. - /// - /// The node starts with a reputation of 0. You can adjust these default - /// values using the `NotConnectedPeer` that this method returns. - pub fn discover(self) -> NotConnectedPeer<'a> { - self.parent.nodes.insert(self.peer_id.clone().into_owned(), Node { - connection_state: ConnectionState::NotConnected { - last_connected: Instant::now(), - }, - reputation: 0, - }); - - let state = self.parent; - NotConnectedPeer { - state, - peer_id: self.peer_id, - } - } + /// Inserts the peer identity in our list. + /// + /// The node starts with a reputation of 0. You can adjust these default + /// values using the `NotConnectedPeer` that this method returns. + pub fn discover(self) -> NotConnectedPeer<'a> { + self.parent.nodes.insert( + self.peer_id.clone().into_owned(), + Node { + connection_state: ConnectionState::NotConnected { + last_connected: Instant::now(), + }, + reputation: 0, + }, + ); + + let state = self.parent; + NotConnectedPeer { + state, + peer_id: self.peer_id, + } + } } #[cfg(test)] mod tests { - use super::{PeersState, Peer}; - use libp2p::PeerId; - - #[test] - fn full_slots_in() { - let mut peers_state = PeersState::new(1, 1, false); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - if let Peer::Unknown(e) = peers_state.peer(&id1) { - assert!(e.discover().try_accept_incoming().is_ok()); - } - - if let Peer::Unknown(e) = peers_state.peer(&id2) { - assert!(e.discover().try_accept_incoming().is_err()); - } - } - - #[test] - fn priority_node_doesnt_use_slot() { - let mut peers_state = PeersState::new(1, 1, false); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - peers_state.set_priority_group("test", vec![id1.clone()].into_iter().collect()); - if let Peer::NotConnected(p) = peers_state.peer(&id1) { - assert!(p.try_accept_incoming().is_ok()); - } else { panic!() } - - if let Peer::Unknown(e) = peers_state.peer(&id2) { - assert!(e.discover().try_accept_incoming().is_ok()); - } else { panic!() } - } - - #[test] - fn disconnecting_frees_slot() { - let mut peers_state = PeersState::new(1, 1, false); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - assert!(peers_state.peer(&id1).into_unknown().unwrap().discover().try_accept_incoming().is_ok()); - assert!(peers_state.peer(&id2).into_unknown().unwrap().discover().try_accept_incoming().is_err()); - peers_state.peer(&id1).into_connected().unwrap().disconnect(); - assert!(peers_state.peer(&id2).into_not_connected().unwrap().try_accept_incoming().is_ok()); - } - - #[test] - fn priority_not_connected_peer() { - let mut peers_state = PeersState::new(25, 25, false); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - assert!(peers_state.priority_not_connected_peer().is_none()); - peers_state.peer(&id1).into_unknown().unwrap().discover(); - peers_state.peer(&id2).into_unknown().unwrap().discover(); - - assert!(peers_state.priority_not_connected_peer().is_none()); - peers_state.set_priority_group("test", vec![id1.clone()].into_iter().collect()); - assert!(peers_state.priority_not_connected_peer().is_some()); - peers_state.set_priority_group("test", vec![id2.clone(), id2.clone()].into_iter().collect()); - assert!(peers_state.priority_not_connected_peer().is_some()); - peers_state.set_priority_group("test", vec![].into_iter().collect()); - assert!(peers_state.priority_not_connected_peer().is_none()); - } - - #[test] - fn highest_not_connected_peer() { - let mut peers_state = PeersState::new(25, 25, false); - let id1 = PeerId::random(); - let id2 = PeerId::random(); - - assert!(peers_state.highest_not_connected_peer().is_none()); - peers_state.peer(&id1).into_unknown().unwrap().discover().set_reputation(50); - peers_state.peer(&id2).into_unknown().unwrap().discover().set_reputation(25); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id2).into_not_connected().unwrap().set_reputation(75); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id2.clone())); - peers_state.peer(&id2).into_not_connected().unwrap().try_accept_incoming().unwrap(); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id1).into_not_connected().unwrap().set_reputation(100); - peers_state.peer(&id2).into_connected().unwrap().disconnect(); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id1.clone())); - peers_state.peer(&id1).into_not_connected().unwrap().set_reputation(-100); - assert_eq!(peers_state.highest_not_connected_peer().map(|p| p.into_peer_id()), Some(id2.clone())); - } - - #[test] - fn disconnect_priority_doesnt_panic() { - let mut peers_state = PeersState::new(1, 1, false); - let id = PeerId::random(); - peers_state.set_priority_group("test", vec![id.clone()].into_iter().collect()); - let peer = peers_state.peer(&id).into_not_connected().unwrap().try_outgoing().unwrap(); - peer.disconnect(); - } - - #[test] - fn multiple_priority_groups_slot_count() { - let mut peers_state = PeersState::new(1, 1, false); - let id = PeerId::random(); - - if let Peer::Unknown(p) = peers_state.peer(&id) { - assert!(p.discover().try_accept_incoming().is_ok()); - } else { panic!() } - - assert_eq!(peers_state.num_in, 1); - peers_state.set_priority_group("test1", vec![id.clone()].into_iter().collect()); - assert_eq!(peers_state.num_in, 0); - peers_state.set_priority_group("test2", vec![id.clone()].into_iter().collect()); - assert_eq!(peers_state.num_in, 0); - peers_state.set_priority_group("test1", vec![].into_iter().collect()); - assert_eq!(peers_state.num_in, 0); - peers_state.set_priority_group("test2", vec![].into_iter().collect()); - assert_eq!(peers_state.num_in, 1); - } - - #[test] - fn priority_only_mode_ignores_drops_unknown_nodes() { - // test whether connection to/from given peer is allowed - let test_connection = |peers_state: &mut PeersState, id| { - if let Peer::Unknown(p) = peers_state.peer(id) { - p.discover(); - } - - let incoming = if let Peer::NotConnected(p) = peers_state.peer(id) { - p.try_accept_incoming().is_ok() - } else { - panic!() - }; - - if incoming { - peers_state.peer(id).into_connected().map(|p| p.disconnect()); - } - - let outgoing = if let Peer::NotConnected(p) = peers_state.peer(id) { - p.try_outgoing().is_ok() - } else { - panic!() - }; - - if outgoing { - peers_state.peer(id).into_connected().map(|p| p.disconnect()); - } - - incoming || outgoing - }; - - let mut peers_state = PeersState::new(1, 1, true); - let id = PeerId::random(); - - // this is an unknown peer and our peer state is set to only allow - // priority peers so any connection attempt should be denied. - assert!(!test_connection(&mut peers_state, &id)); - - // disabling priority only mode should allow the connection to go - // through. - peers_state.set_priority_only(false); - assert!(test_connection(&mut peers_state, &id)); - - // re-enabling it we should again deny connections from the peer. - peers_state.set_priority_only(true); - assert!(!test_connection(&mut peers_state, &id)); - - // but if we add the peer to a priority group it should be accepted. - peers_state.set_priority_group("TEST_GROUP", vec![id.clone()].into_iter().collect()); - assert!(test_connection(&mut peers_state, &id)); - - // and removing it will cause the connection to once again be denied. - peers_state.remove_from_priority_group("TEST_GROUP", &id); - assert!(!test_connection(&mut peers_state, &id)); - } + use super::{Peer, PeersState}; + use libp2p::PeerId; + + #[test] + fn full_slots_in() { + let mut peers_state = PeersState::new(1, 1, false); + let id1 = PeerId::random(); + let id2 = PeerId::random(); + + if let Peer::Unknown(e) = peers_state.peer(&id1) { + assert!(e.discover().try_accept_incoming().is_ok()); + } + + if let Peer::Unknown(e) = peers_state.peer(&id2) { + assert!(e.discover().try_accept_incoming().is_err()); + } + } + + #[test] + fn priority_node_doesnt_use_slot() { + let mut peers_state = PeersState::new(1, 1, false); + let id1 = PeerId::random(); + let id2 = PeerId::random(); + + peers_state.set_priority_group("test", vec![id1.clone()].into_iter().collect()); + if let Peer::NotConnected(p) = peers_state.peer(&id1) { + assert!(p.try_accept_incoming().is_ok()); + } else { + panic!() + } + + if let Peer::Unknown(e) = peers_state.peer(&id2) { + assert!(e.discover().try_accept_incoming().is_ok()); + } else { + panic!() + } + } + + #[test] + fn disconnecting_frees_slot() { + let mut peers_state = PeersState::new(1, 1, false); + let id1 = PeerId::random(); + let id2 = PeerId::random(); + + assert!(peers_state + .peer(&id1) + .into_unknown() + .unwrap() + .discover() + .try_accept_incoming() + .is_ok()); + assert!(peers_state + .peer(&id2) + .into_unknown() + .unwrap() + .discover() + .try_accept_incoming() + .is_err()); + peers_state + .peer(&id1) + .into_connected() + .unwrap() + .disconnect(); + assert!(peers_state + .peer(&id2) + .into_not_connected() + .unwrap() + .try_accept_incoming() + .is_ok()); + } + + #[test] + fn priority_not_connected_peer() { + let mut peers_state = PeersState::new(25, 25, false); + let id1 = PeerId::random(); + let id2 = PeerId::random(); + + assert!(peers_state.priority_not_connected_peer().is_none()); + peers_state.peer(&id1).into_unknown().unwrap().discover(); + peers_state.peer(&id2).into_unknown().unwrap().discover(); + + assert!(peers_state.priority_not_connected_peer().is_none()); + peers_state.set_priority_group("test", vec![id1.clone()].into_iter().collect()); + assert!(peers_state.priority_not_connected_peer().is_some()); + peers_state + .set_priority_group("test", vec![id2.clone(), id2.clone()].into_iter().collect()); + assert!(peers_state.priority_not_connected_peer().is_some()); + peers_state.set_priority_group("test", vec![].into_iter().collect()); + assert!(peers_state.priority_not_connected_peer().is_none()); + } + + #[test] + fn highest_not_connected_peer() { + let mut peers_state = PeersState::new(25, 25, false); + let id1 = PeerId::random(); + let id2 = PeerId::random(); + + assert!(peers_state.highest_not_connected_peer().is_none()); + peers_state + .peer(&id1) + .into_unknown() + .unwrap() + .discover() + .set_reputation(50); + peers_state + .peer(&id2) + .into_unknown() + .unwrap() + .discover() + .set_reputation(25); + assert_eq!( + peers_state + .highest_not_connected_peer() + .map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state + .peer(&id2) + .into_not_connected() + .unwrap() + .set_reputation(75); + assert_eq!( + peers_state + .highest_not_connected_peer() + .map(|p| p.into_peer_id()), + Some(id2.clone()) + ); + peers_state + .peer(&id2) + .into_not_connected() + .unwrap() + .try_accept_incoming() + .unwrap(); + assert_eq!( + peers_state + .highest_not_connected_peer() + .map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state + .peer(&id1) + .into_not_connected() + .unwrap() + .set_reputation(100); + peers_state + .peer(&id2) + .into_connected() + .unwrap() + .disconnect(); + assert_eq!( + peers_state + .highest_not_connected_peer() + .map(|p| p.into_peer_id()), + Some(id1.clone()) + ); + peers_state + .peer(&id1) + .into_not_connected() + .unwrap() + .set_reputation(-100); + assert_eq!( + peers_state + .highest_not_connected_peer() + .map(|p| p.into_peer_id()), + Some(id2.clone()) + ); + } + + #[test] + fn disconnect_priority_doesnt_panic() { + let mut peers_state = PeersState::new(1, 1, false); + let id = PeerId::random(); + peers_state.set_priority_group("test", vec![id.clone()].into_iter().collect()); + let peer = peers_state + .peer(&id) + .into_not_connected() + .unwrap() + .try_outgoing() + .unwrap(); + peer.disconnect(); + } + + #[test] + fn multiple_priority_groups_slot_count() { + let mut peers_state = PeersState::new(1, 1, false); + let id = PeerId::random(); + + if let Peer::Unknown(p) = peers_state.peer(&id) { + assert!(p.discover().try_accept_incoming().is_ok()); + } else { + panic!() + } + + assert_eq!(peers_state.num_in, 1); + peers_state.set_priority_group("test1", vec![id.clone()].into_iter().collect()); + assert_eq!(peers_state.num_in, 0); + peers_state.set_priority_group("test2", vec![id.clone()].into_iter().collect()); + assert_eq!(peers_state.num_in, 0); + peers_state.set_priority_group("test1", vec![].into_iter().collect()); + assert_eq!(peers_state.num_in, 0); + peers_state.set_priority_group("test2", vec![].into_iter().collect()); + assert_eq!(peers_state.num_in, 1); + } + + #[test] + fn priority_only_mode_ignores_drops_unknown_nodes() { + // test whether connection to/from given peer is allowed + let test_connection = |peers_state: &mut PeersState, id| { + if let Peer::Unknown(p) = peers_state.peer(id) { + p.discover(); + } + + let incoming = if let Peer::NotConnected(p) = peers_state.peer(id) { + p.try_accept_incoming().is_ok() + } else { + panic!() + }; + + if incoming { + peers_state + .peer(id) + .into_connected() + .map(|p| p.disconnect()); + } + + let outgoing = if let Peer::NotConnected(p) = peers_state.peer(id) { + p.try_outgoing().is_ok() + } else { + panic!() + }; + + if outgoing { + peers_state + .peer(id) + .into_connected() + .map(|p| p.disconnect()); + } + + incoming || outgoing + }; + + let mut peers_state = PeersState::new(1, 1, true); + let id = PeerId::random(); + + // this is an unknown peer and our peer state is set to only allow + // priority peers so any connection attempt should be denied. + assert!(!test_connection(&mut peers_state, &id)); + + // disabling priority only mode should allow the connection to go + // through. + peers_state.set_priority_only(false); + assert!(test_connection(&mut peers_state, &id)); + + // re-enabling it we should again deny connections from the peer. + peers_state.set_priority_only(true); + assert!(!test_connection(&mut peers_state, &id)); + + // but if we add the peer to a priority group it should be accepted. + peers_state.set_priority_group("TEST_GROUP", vec![id.clone()].into_iter().collect()); + assert!(test_connection(&mut peers_state, &id)); + + // and removing it will cause the connection to once again be denied. + peers_state.remove_from_priority_group("TEST_GROUP", &id); + assert!(!test_connection(&mut peers_state, &id)); + } } diff --git a/client/peerset/tests/fuzz.rs b/client/peerset/tests/fuzz.rs index 44477cec65..463c6f0703 100644 --- a/client/peerset/tests/fuzz.rs +++ b/client/peerset/tests/fuzz.rs @@ -18,124 +18,159 @@ use futures::prelude::*; use libp2p::PeerId; use rand::distributions::{Distribution, Uniform, WeightedIndex}; use rand::seq::IteratorRandom; +use sc_peerset::{IncomingIndex, Message, Peerset, PeersetConfig, ReputationChange}; use std::{collections::HashMap, collections::HashSet, iter, pin::Pin, task::Poll}; -use sc_peerset::{IncomingIndex, Message, PeersetConfig, Peerset, ReputationChange}; #[test] fn run() { - for _ in 0..50 { - test_once(); - } + for _ in 0..50 { + test_once(); + } } fn test_once() { - // PRNG to use. - let mut rng = rand::thread_rng(); - - // Nodes that the peerset knows about. - let mut known_nodes = HashSet::::new(); - // Nodes that we have reserved. Always a subset of `known_nodes`. - let mut reserved_nodes = HashSet::::new(); - - let (mut peerset, peerset_handle) = Peerset::from_config(PeersetConfig { - bootnodes: (0 .. Uniform::new_inclusive(0, 4).sample(&mut rng)).map(|_| { - let id = PeerId::random(); - known_nodes.insert(id.clone()); - id - }).collect(), - priority_groups: { - let list = (0 .. Uniform::new_inclusive(0, 2).sample(&mut rng)).map(|_| { - let id = PeerId::random(); - known_nodes.insert(id.clone()); - reserved_nodes.insert(id.clone()); - id - }).collect(); - vec![("reserved".to_owned(), list)] - }, - reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, - in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), - out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), - }); - - futures::executor::block_on(futures::future::poll_fn(move |cx| { - // List of nodes the user of `peerset` assumes it's connected to. Always a subset of - // `known_nodes`. - let mut connected_nodes = HashSet::::new(); - // List of nodes the user of `peerset` called `incoming` with and that haven't been - // accepted or rejected yet. - let mut incoming_nodes = HashMap::::new(); - // Next id for incoming connections. - let mut next_incoming_id = IncomingIndex(0); - - // Perform a certain number of actions while checking that the state is consistent. If we - // reach the end of the loop, the run has succeeded. - for _ in 0 .. 2500 { - // Each of these weights corresponds to an action that we may perform. - let action_weights = [150, 90, 90, 30, 30, 1, 1, 4, 4]; - match WeightedIndex::new(&action_weights).unwrap().sample(&mut rng) { - // If we generate 0, poll the peerset. - 0 => match Stream::poll_next(Pin::new(&mut peerset), cx) { - Poll::Ready(Some(Message::Connect(id))) => { - if let Some(id) = incoming_nodes.iter().find(|(_, v)| **v == id).map(|(&id, _)| id) { - incoming_nodes.remove(&id); - } - assert!(connected_nodes.insert(id)); - } - Poll::Ready(Some(Message::Drop(id))) => { connected_nodes.remove(&id); } - Poll::Ready(Some(Message::Accept(n))) => - assert!(connected_nodes.insert(incoming_nodes.remove(&n).unwrap())), - Poll::Ready(Some(Message::Reject(n))) => - assert!(!connected_nodes.contains(&incoming_nodes.remove(&n).unwrap())), - Poll::Ready(None) => panic!(), - Poll::Pending => {} - } - - // If we generate 1, discover a new node. - 1 => { - let new_id = PeerId::random(); - known_nodes.insert(new_id.clone()); - peerset.discovered(iter::once(new_id)); - } - - // If we generate 2, adjust a random reputation. - 2 => if let Some(id) = known_nodes.iter().choose(&mut rng) { - let val = Uniform::new_inclusive(i32::min_value(), i32::max_value()).sample(&mut rng); - peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); - } - - // If we generate 3, disconnect from a random node. - 3 => if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { - connected_nodes.remove(&id); - peerset.dropped(id); - } - - // If we generate 4, connect to a random node. - 4 => if let Some(id) = known_nodes.iter() - .filter(|n| incoming_nodes.values().all(|m| m != *n) && !connected_nodes.contains(*n)) - .choose(&mut rng) { - peerset.incoming(id.clone(), next_incoming_id.clone()); - incoming_nodes.insert(next_incoming_id.clone(), id.clone()); - next_incoming_id.0 += 1; - } - - // 5 and 6 are the reserved-only mode. - 5 => peerset_handle.set_reserved_only(true), - 6 => peerset_handle.set_reserved_only(false), - - // 7 and 8 are about switching a random node in or out of reserved mode. - 7 => if let Some(id) = known_nodes.iter().filter(|n| !reserved_nodes.contains(*n)).choose(&mut rng) { - peerset_handle.add_reserved_peer(id.clone()); - reserved_nodes.insert(id.clone()); - } - 8 => if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { - reserved_nodes.remove(&id); - peerset_handle.remove_reserved_peer(id); - } - - _ => unreachable!() - } - } - - Poll::Ready(()) - })); + // PRNG to use. + let mut rng = rand::thread_rng(); + + // Nodes that the peerset knows about. + let mut known_nodes = HashSet::::new(); + // Nodes that we have reserved. Always a subset of `known_nodes`. + let mut reserved_nodes = HashSet::::new(); + + let (mut peerset, peerset_handle) = Peerset::from_config(PeersetConfig { + bootnodes: (0..Uniform::new_inclusive(0, 4).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id.clone()); + id + }) + .collect(), + priority_groups: { + let list = (0..Uniform::new_inclusive(0, 2).sample(&mut rng)) + .map(|_| { + let id = PeerId::random(); + known_nodes.insert(id.clone()); + reserved_nodes.insert(id.clone()); + id + }) + .collect(); + vec![("reserved".to_owned(), list)] + }, + reserved_only: Uniform::new_inclusive(0, 10).sample(&mut rng) == 0, + in_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + out_peers: Uniform::new_inclusive(0, 25).sample(&mut rng), + }); + + futures::executor::block_on(futures::future::poll_fn(move |cx| { + // List of nodes the user of `peerset` assumes it's connected to. Always a subset of + // `known_nodes`. + let mut connected_nodes = HashSet::::new(); + // List of nodes the user of `peerset` called `incoming` with and that haven't been + // accepted or rejected yet. + let mut incoming_nodes = HashMap::::new(); + // Next id for incoming connections. + let mut next_incoming_id = IncomingIndex(0); + + // Perform a certain number of actions while checking that the state is consistent. If we + // reach the end of the loop, the run has succeeded. + for _ in 0..2500 { + // Each of these weights corresponds to an action that we may perform. + let action_weights = [150, 90, 90, 30, 30, 1, 1, 4, 4]; + match WeightedIndex::new(&action_weights) + .unwrap() + .sample(&mut rng) + { + // If we generate 0, poll the peerset. + 0 => match Stream::poll_next(Pin::new(&mut peerset), cx) { + Poll::Ready(Some(Message::Connect(id))) => { + if let Some(id) = incoming_nodes + .iter() + .find(|(_, v)| **v == id) + .map(|(&id, _)| id) + { + incoming_nodes.remove(&id); + } + assert!(connected_nodes.insert(id)); + } + Poll::Ready(Some(Message::Drop(id))) => { + connected_nodes.remove(&id); + } + Poll::Ready(Some(Message::Accept(n))) => { + assert!(connected_nodes.insert(incoming_nodes.remove(&n).unwrap())) + } + Poll::Ready(Some(Message::Reject(n))) => { + assert!(!connected_nodes.contains(&incoming_nodes.remove(&n).unwrap())) + } + Poll::Ready(None) => panic!(), + Poll::Pending => {} + }, + + // If we generate 1, discover a new node. + 1 => { + let new_id = PeerId::random(); + known_nodes.insert(new_id.clone()); + peerset.discovered(iter::once(new_id)); + } + + // If we generate 2, adjust a random reputation. + 2 => { + if let Some(id) = known_nodes.iter().choose(&mut rng) { + let val = Uniform::new_inclusive(i32::min_value(), i32::max_value()) + .sample(&mut rng); + peerset_handle.report_peer(id.clone(), ReputationChange::new(val, "")); + } + } + + // If we generate 3, disconnect from a random node. + 3 => { + if let Some(id) = connected_nodes.iter().choose(&mut rng).cloned() { + connected_nodes.remove(&id); + peerset.dropped(id); + } + } + + // If we generate 4, connect to a random node. + 4 => { + if let Some(id) = known_nodes + .iter() + .filter(|n| { + incoming_nodes.values().all(|m| m != *n) + && !connected_nodes.contains(*n) + }) + .choose(&mut rng) + { + peerset.incoming(id.clone(), next_incoming_id.clone()); + incoming_nodes.insert(next_incoming_id.clone(), id.clone()); + next_incoming_id.0 += 1; + } + } + + // 5 and 6 are the reserved-only mode. + 5 => peerset_handle.set_reserved_only(true), + 6 => peerset_handle.set_reserved_only(false), + + // 7 and 8 are about switching a random node in or out of reserved mode. + 7 => { + if let Some(id) = known_nodes + .iter() + .filter(|n| !reserved_nodes.contains(*n)) + .choose(&mut rng) + { + peerset_handle.add_reserved_peer(id.clone()); + reserved_nodes.insert(id.clone()); + } + } + 8 => { + if let Some(id) = reserved_nodes.iter().choose(&mut rng).cloned() { + reserved_nodes.remove(&id); + peerset_handle.remove_reserved_peer(id); + } + } + + _ => unreachable!(), + } + } + + Poll::Ready(()) + })); } diff --git a/client/rpc-api/src/author/error.rs b/client/rpc-api/src/author/error.rs index f1b5691008..0cae3ee2fa 100644 --- a/client/rpc-api/src/author/error.rs +++ b/client/rpc-api/src/author/error.rs @@ -28,46 +28,46 @@ pub type FutureResult = Box /// Author RPC errors. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Client error. - #[display(fmt="Client error: {}", _0)] - #[from(ignore)] - Client(Box), - /// Transaction pool error, - #[display(fmt="Transaction pool error: {}", _0)] - Pool(sp_transaction_pool::error::Error), - /// Verification error - #[display(fmt="Extrinsic verification error: {}", _0)] - #[from(ignore)] - Verification(Box), - /// Incorrect extrinsic format. - #[display(fmt="Invalid extrinsic format: {}", _0)] - BadFormat(codec::Error), - /// Incorrect seed phrase. - #[display(fmt="Invalid seed phrase/SURI")] - BadSeedPhrase, - /// Key type ID has an unknown format. - #[display(fmt="Invalid key type ID format (should be of length four)")] - BadKeyType, - /// Key type ID has some unsupported crypto. - #[display(fmt="The crypto of key type ID is unknown")] - UnsupportedKeyType, - /// Some random issue with the key store. Shouldn't happen. - #[display(fmt="The key store is unavailable")] - KeyStoreUnavailable, - /// Invalid session keys encoding. - #[display(fmt="Session keys are not encoded correctly")] - InvalidSessionKeys, + /// Client error. + #[display(fmt = "Client error: {}", _0)] + #[from(ignore)] + Client(Box), + /// Transaction pool error, + #[display(fmt = "Transaction pool error: {}", _0)] + Pool(sp_transaction_pool::error::Error), + /// Verification error + #[display(fmt = "Extrinsic verification error: {}", _0)] + #[from(ignore)] + Verification(Box), + /// Incorrect extrinsic format. + #[display(fmt = "Invalid extrinsic format: {}", _0)] + BadFormat(codec::Error), + /// Incorrect seed phrase. + #[display(fmt = "Invalid seed phrase/SURI")] + BadSeedPhrase, + /// Key type ID has an unknown format. + #[display(fmt = "Invalid key type ID format (should be of length four)")] + BadKeyType, + /// Key type ID has some unsupported crypto. + #[display(fmt = "The crypto of key type ID is unknown")] + UnsupportedKeyType, + /// Some random issue with the key store. Shouldn't happen. + #[display(fmt = "The key store is unavailable")] + KeyStoreUnavailable, + /// Invalid session keys encoding. + #[display(fmt = "Session keys are not encoded correctly")] + InvalidSessionKeys, } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(&**err), - Error::Pool(ref err) => Some(err), - Error::Verification(ref err) => Some(&**err), - _ => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Client(ref err) => Some(&**err), + Error::Pool(ref err) => Some(err), + Error::Verification(ref err) => Some(&**err), + _ => None, + } + } } /// Base code for all authorship errors. @@ -95,10 +95,10 @@ const POOL_IMMEDIATELY_DROPPED: i64 = POOL_INVALID_TX + 6; const UNSUPPORTED_KEY_TYPE: i64 = POOL_INVALID_TX + 7; impl From for rpc::Error { - fn from(e: Error) -> Self { - use sp_transaction_pool::error::{Error as PoolError}; + fn from(e: Error) -> Self { + use sp_transaction_pool::error::Error as PoolError; - match e { + match e { Error::BadFormat(e) => rpc::Error { code: rpc::ErrorCode::ServerError(BAD_FORMAT), message: format!("Extrinsic has invalid format: {}", e).into(), @@ -154,5 +154,5 @@ impl From for rpc::Error { }, e => errors::internal(e), } - } + } } diff --git a/client/rpc-api/src/author/hash.rs b/client/rpc-api/src/author/hash.rs index 4287af8ede..df886fa70e 100644 --- a/client/rpc-api/src/author/hash.rs +++ b/client/rpc-api/src/author/hash.rs @@ -16,8 +16,8 @@ //! Extrinsic helpers for author RPC module. +use serde::{Deserialize, Serialize}; use sp_core::Bytes; -use serde::{Serialize, Deserialize}; /// RPC Extrinsic or hash /// @@ -25,8 +25,8 @@ use serde::{Serialize, Deserialize}; #[derive(Debug, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum ExtrinsicOrHash { - /// The hash of the extrinsic. - Hash(Hash), - /// Raw extrinsic bytes. - Extrinsic(Bytes), + /// The hash of the extrinsic. + Hash(Hash), + /// Raw extrinsic bytes. + Extrinsic(Bytes), } diff --git a/client/rpc-api/src/author/mod.rs b/client/rpc-api/src/author/mod.rs index 49c4c996fa..85b3ccb67b 100644 --- a/client/rpc-api/src/author/mod.rs +++ b/client/rpc-api/src/author/mod.rs @@ -19,84 +19,82 @@ pub mod error; pub mod hash; +use self::error::{FutureResult, Result}; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use sp_core::Bytes; use sp_transaction_pool::TransactionStatus; -use self::error::{FutureResult, Result}; pub use self::gen_client::Client as AuthorClient; /// Substrate authoring RPC API #[rpc] pub trait AuthorApi { - /// RPC metadata - type Metadata; - - /// Submit hex-encoded extrinsic for inclusion in block. - #[rpc(name = "author_submitExtrinsic")] - fn submit_extrinsic(&self, extrinsic: Bytes) -> FutureResult; - - /// Insert a key into the keystore. - #[rpc(name = "author_insertKey")] - fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) -> Result<()>; - - /// Generate new session keys and returns the corresponding public keys. - #[rpc(name = "author_rotateKeys")] - fn rotate_keys(&self) -> Result; - - /// Checks if the keystore has private keys for the given session public keys. - /// - /// `session_keys` is the SCALE encoded session keys object from the runtime. - /// - /// Returns `true` iff all private keys could be found. - #[rpc(name = "author_hasSessionKeys")] - fn has_session_keys(&self, session_keys: Bytes) -> Result; - - /// Checks if the keystore has private keys for the given public key and key type. - /// - /// Returns `true` if a private key could be found. - #[rpc(name = "author_hasKey")] - fn has_key(&self, public_key: Bytes, key_type: String) -> Result; - - /// Returns all pending extrinsics, potentially grouped by sender. - #[rpc(name = "author_pendingExtrinsics")] - fn pending_extrinsics(&self) -> Result>; - - /// Remove given extrinsic from the pool and temporarily ban it to prevent reimporting. - #[rpc(name = "author_removeExtrinsic")] - fn remove_extrinsic(&self, - bytes_or_hash: Vec> - ) -> Result>; - - /// Submit an extrinsic to watch. - /// - /// See [`TransactionStatus`](sp_transaction_pool::TransactionStatus) for details on transaction - /// life cycle. - #[pubsub( - subscription = "author_extrinsicUpdate", - subscribe, - name = "author_submitAndWatchExtrinsic" - )] - fn watch_extrinsic(&self, - metadata: Self::Metadata, - subscriber: Subscriber>, - bytes: Bytes - ); - - /// Unsubscribe from extrinsic watching. - #[pubsub( - subscription = "author_extrinsicUpdate", - unsubscribe, - name = "author_unwatchExtrinsic" - )] - fn unwatch_extrinsic(&self, - metadata: Option, - id: SubscriptionId - ) -> Result; + /// RPC metadata + type Metadata; + + /// Submit hex-encoded extrinsic for inclusion in block. + #[rpc(name = "author_submitExtrinsic")] + fn submit_extrinsic(&self, extrinsic: Bytes) -> FutureResult; + + /// Insert a key into the keystore. + #[rpc(name = "author_insertKey")] + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()>; + + /// Generate new session keys and returns the corresponding public keys. + #[rpc(name = "author_rotateKeys")] + fn rotate_keys(&self) -> Result; + + /// Checks if the keystore has private keys for the given session public keys. + /// + /// `session_keys` is the SCALE encoded session keys object from the runtime. + /// + /// Returns `true` iff all private keys could be found. + #[rpc(name = "author_hasSessionKeys")] + fn has_session_keys(&self, session_keys: Bytes) -> Result; + + /// Checks if the keystore has private keys for the given public key and key type. + /// + /// Returns `true` if a private key could be found. + #[rpc(name = "author_hasKey")] + fn has_key(&self, public_key: Bytes, key_type: String) -> Result; + + /// Returns all pending extrinsics, potentially grouped by sender. + #[rpc(name = "author_pendingExtrinsics")] + fn pending_extrinsics(&self) -> Result>; + + /// Remove given extrinsic from the pool and temporarily ban it to prevent reimporting. + #[rpc(name = "author_removeExtrinsic")] + fn remove_extrinsic( + &self, + bytes_or_hash: Vec>, + ) -> Result>; + + /// Submit an extrinsic to watch. + /// + /// See [`TransactionStatus`](sp_transaction_pool::TransactionStatus) for details on transaction + /// life cycle. + #[pubsub( + subscription = "author_extrinsicUpdate", + subscribe, + name = "author_submitAndWatchExtrinsic" + )] + fn watch_extrinsic( + &self, + metadata: Self::Metadata, + subscriber: Subscriber>, + bytes: Bytes, + ); + + /// Unsubscribe from extrinsic watching. + #[pubsub( + subscription = "author_extrinsicUpdate", + unsubscribe, + name = "author_unwatchExtrinsic" + )] + fn unwatch_extrinsic( + &self, + metadata: Option, + id: SubscriptionId, + ) -> Result; } diff --git a/client/rpc-api/src/chain/error.rs b/client/rpc-api/src/chain/error.rs index ffa4d82bdf..0ab929f35e 100644 --- a/client/rpc-api/src/chain/error.rs +++ b/client/rpc-api/src/chain/error.rs @@ -14,7 +14,6 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . - //! Error helpers for Chain RPC module. use crate::errors; @@ -29,34 +28,34 @@ pub type FutureResult = Box /// Chain RPC errors. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Client error. - #[display(fmt="Client error: {}", _0)] - Client(Box), - /// Other error type. - Other(String), + /// Client error. + #[display(fmt = "Client error: {}", _0)] + Client(Box), + /// Other error type. + Other(String), } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(&**err), - _ => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Client(ref err) => Some(&**err), + _ => None, + } + } } /// Base error code for all chain errors. const BASE_ERROR: i64 = 3000; impl From for rpc::Error { - fn from(e: Error) -> Self { - match e { - Error::Other(message) => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message, - data: None, - }, - e => errors::internal(e), - } - } + fn from(e: Error) -> Self { + match e { + Error::Other(message) => rpc::Error { + code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), + message, + data: None, + }, + e => errors::internal(e), + } + } } diff --git a/client/rpc-api/src/chain/mod.rs b/client/rpc-api/src/chain/mod.rs index 2ab3851d37..de2c0292b5 100644 --- a/client/rpc-api/src/chain/mod.rs +++ b/client/rpc-api/src/chain/mod.rs @@ -18,95 +18,103 @@ pub mod error; -use jsonrpc_core::Result as RpcResult; +use self::error::{FutureResult, Result}; use jsonrpc_core::futures::Future; +use jsonrpc_core::Result as RpcResult; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use sp_rpc::{number::NumberOrHex, list::ListOrValue}; -use self::error::{FutureResult, Result}; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; pub use self::gen_client::Client as ChainClient; /// Substrate blockchain API #[rpc] pub trait ChainApi { - /// RPC metadata - type Metadata; - - /// Get header of a relay chain block. - #[rpc(name = "chain_getHeader")] - fn header(&self, hash: Option) -> FutureResult>; - - /// Get header and body of a relay chain block. - #[rpc(name = "chain_getBlock")] - fn block(&self, hash: Option) -> FutureResult>; - - /// Get hash of the n-th block in the canon chain. - /// - /// By default returns latest block hash. - #[rpc(name = "chain_getBlockHash", alias("chain_getHead"))] - fn block_hash( - &self, - hash: Option>>, - ) -> Result>>; - - /// Get hash of the last finalized block in the canon chain. - #[rpc(name = "chain_getFinalizedHead", alias("chain_getFinalisedHead"))] - fn finalized_head(&self) -> Result; - - /// All head subscription - #[pubsub(subscription = "chain_allHead", subscribe, name = "chain_subscribeAllHeads")] - fn subscribe_all_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); - - /// Unsubscribe from all head subscription. - #[pubsub(subscription = "chain_allHead", unsubscribe, name = "chain_unsubscribeAllHeads")] - fn unsubscribe_all_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult; - - /// New head subscription - #[pubsub( - subscription = "chain_newHead", - subscribe, - name = "chain_subscribeNewHeads", - alias("subscribe_newHead", "chain_subscribeNewHead") - )] - fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); - - /// Unsubscribe from new head subscription. - #[pubsub( - subscription = "chain_newHead", - unsubscribe, - name = "chain_unsubscribeNewHeads", - alias("unsubscribe_newHead", "chain_unsubscribeNewHead") - )] - fn unsubscribe_new_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult; - - /// Finalized head subscription - #[pubsub( - subscription = "chain_finalizedHead", - subscribe, - name = "chain_subscribeFinalizedHeads", - alias("chain_subscribeFinalisedHeads") - )] - fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); - - /// Unsubscribe from finalized head subscription. - #[pubsub( - subscription = "chain_finalizedHead", - unsubscribe, - name = "chain_unsubscribeFinalizedHeads", - alias("chain_unsubscribeFinalisedHeads") - )] - fn unsubscribe_finalized_heads( - &self, - metadata: Option, - id: SubscriptionId, - ) -> RpcResult; + /// RPC metadata + type Metadata; + + /// Get header of a relay chain block. + #[rpc(name = "chain_getHeader")] + fn header(&self, hash: Option) -> FutureResult>; + + /// Get header and body of a relay chain block. + #[rpc(name = "chain_getBlock")] + fn block(&self, hash: Option) -> FutureResult>; + + /// Get hash of the n-th block in the canon chain. + /// + /// By default returns latest block hash. + #[rpc(name = "chain_getBlockHash", alias("chain_getHead"))] + fn block_hash( + &self, + hash: Option>>, + ) -> Result>>; + + /// Get hash of the last finalized block in the canon chain. + #[rpc(name = "chain_getFinalizedHead", alias("chain_getFinalisedHead"))] + fn finalized_head(&self) -> Result; + + /// All head subscription + #[pubsub( + subscription = "chain_allHead", + subscribe, + name = "chain_subscribeAllHeads" + )] + fn subscribe_all_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); + + /// Unsubscribe from all head subscription. + #[pubsub( + subscription = "chain_allHead", + unsubscribe, + name = "chain_unsubscribeAllHeads" + )] + fn unsubscribe_all_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; + + /// New head subscription + #[pubsub( + subscription = "chain_newHead", + subscribe, + name = "chain_subscribeNewHeads", + alias("subscribe_newHead", "chain_subscribeNewHead") + )] + fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); + + /// Unsubscribe from new head subscription. + #[pubsub( + subscription = "chain_newHead", + unsubscribe, + name = "chain_unsubscribeNewHeads", + alias("unsubscribe_newHead", "chain_unsubscribeNewHead") + )] + fn unsubscribe_new_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; + + /// Finalized head subscription + #[pubsub( + subscription = "chain_finalizedHead", + subscribe, + name = "chain_subscribeFinalizedHeads", + alias("chain_subscribeFinalisedHeads") + )] + fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber
); + + /// Unsubscribe from finalized head subscription. + #[pubsub( + subscription = "chain_finalizedHead", + unsubscribe, + name = "chain_unsubscribeFinalizedHeads", + alias("chain_unsubscribeFinalisedHeads") + )] + fn unsubscribe_finalized_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; } diff --git a/client/rpc-api/src/errors.rs b/client/rpc-api/src/errors.rs index b75c34ead3..f1e1f5344c 100644 --- a/client/rpc-api/src/errors.rs +++ b/client/rpc-api/src/errors.rs @@ -17,10 +17,10 @@ use log::warn; pub fn internal(e: E) -> jsonrpc_core::Error { - warn!("Unknown error: {:?}", e); - jsonrpc_core::Error { - code: jsonrpc_core::ErrorCode::InternalError, - message: "Unknown error occurred".into(), - data: Some(format!("{:?}", e).into()), - } + warn!("Unknown error: {:?}", e); + jsonrpc_core::Error { + code: jsonrpc_core::ErrorCode::InternalError, + message: "Unknown error occurred".into(), + data: Some(format!("{:?}", e).into()), + } } diff --git a/client/rpc-api/src/helpers.rs b/client/rpc-api/src/helpers.rs index 912a5664b3..abca1a4c76 100644 --- a/client/rpc-api/src/helpers.rs +++ b/client/rpc-api/src/helpers.rs @@ -14,18 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use jsonrpc_core::futures::prelude::*; use futures::{channel::oneshot, compat::Compat}; +use jsonrpc_core::futures::prelude::*; /// Wraps around `oneshot::Receiver` and adjusts the error type to produce an internal error if the /// sender gets dropped. pub struct Receiver(pub Compat>); impl Future for Receiver { - type Item = T; - type Error = jsonrpc_core::Error; + type Item = T; + type Error = jsonrpc_core::Error; - fn poll(&mut self) -> Poll { - self.0.poll().map_err(|_| jsonrpc_core::Error::internal_error()) - } + fn poll(&mut self) -> Poll { + self.0 + .poll() + .map_err(|_| jsonrpc_core::Error::internal_error()) + } } diff --git a/client/rpc-api/src/lib.rs b/client/rpc-api/src/lib.rs index 8ad2d94bfd..89bcf6bb24 100644 --- a/client/rpc-api/src/lib.rs +++ b/client/rpc-api/src/lib.rs @@ -24,9 +24,9 @@ mod errors; mod helpers; mod subscriptions; +pub use helpers::Receiver; pub use jsonrpc_core::IoHandlerExtension as RpcExtension; pub use subscriptions::{Subscriptions, TaskExecutor}; -pub use helpers::Receiver; pub mod author; pub mod chain; diff --git a/client/rpc-api/src/offchain/error.rs b/client/rpc-api/src/offchain/error.rs index c28a2a2f39..e913d0dfff 100644 --- a/client/rpc-api/src/offchain/error.rs +++ b/client/rpc-api/src/offchain/error.rs @@ -24,28 +24,28 @@ pub type Result = std::result::Result; /// Offchain RPC errors. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Unavailable storage kind error. - #[display(fmt="This storage kind is not available yet.")] - UnavailableStorageKind, + /// Unavailable storage kind error. + #[display(fmt = "This storage kind is not available yet.")] + UnavailableStorageKind, } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - None - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + None + } } /// Base error code for all offchain errors. const BASE_ERROR: i64 = 5000; impl From for rpc::Error { - fn from(e: Error) -> Self { - match e { - Error::UnavailableStorageKind => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: "This storage kind is not available yet" .into(), - data: None, - }, - } - } + fn from(e: Error) -> Self { + match e { + Error::UnavailableStorageKind => rpc::Error { + code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), + message: "This storage kind is not available yet".into(), + data: None, + }, + } + } } diff --git a/client/rpc-api/src/offchain/mod.rs b/client/rpc-api/src/offchain/mod.rs index bbe466ff59..4bdabc6709 100644 --- a/client/rpc-api/src/offchain/mod.rs +++ b/client/rpc-api/src/offchain/mod.rs @@ -18,20 +18,20 @@ pub mod error; -use jsonrpc_derive::rpc; use self::error::Result; -use sp_core::{Bytes, offchain::StorageKind}; +use jsonrpc_derive::rpc; +use sp_core::{offchain::StorageKind, Bytes}; pub use self::gen_client::Client as OffchainClient; /// Substrate offchain RPC API #[rpc] pub trait OffchainApi { - /// Set offchain local storage under given key and prefix. - #[rpc(name = "offchain_localStorageSet")] - fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> Result<()>; + /// Set offchain local storage under given key and prefix. + #[rpc(name = "offchain_localStorageSet")] + fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> Result<()>; - /// Get offchain local storage under given key and prefix. - #[rpc(name = "offchain_localStorageGet")] - fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> Result>; + /// Get offchain local storage under given key and prefix. + #[rpc(name = "offchain_localStorageGet")] + fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> Result>; } diff --git a/client/rpc-api/src/state/error.rs b/client/rpc-api/src/state/error.rs index c9c2cf4e45..d62eb76ed8 100644 --- a/client/rpc-api/src/state/error.rs +++ b/client/rpc-api/src/state/error.rs @@ -28,55 +28,60 @@ pub type FutureResult = Box /// State RPC errors. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Client error. - #[display(fmt="Client error: {}", _0)] - Client(Box), - /// Provided block range couldn't be resolved to a list of blocks. - #[display(fmt = "Cannot resolve a block range ['{:?}' ... '{:?}]. {}", from, to, details)] - InvalidBlockRange { - /// Beginning of the block range. - from: String, - /// End of the block range. - to: String, - /// Details of the error message. - details: String, - }, - /// Provided count exceeds maximum value. - #[display(fmt = "count exceeds maximum value. value: {}, max: {}", value, max)] - InvalidCount { - /// Provided value - value: u32, - /// Maximum allowed value - max: u32, - }, + /// Client error. + #[display(fmt = "Client error: {}", _0)] + Client(Box), + /// Provided block range couldn't be resolved to a list of blocks. + #[display( + fmt = "Cannot resolve a block range ['{:?}' ... '{:?}]. {}", + from, + to, + details + )] + InvalidBlockRange { + /// Beginning of the block range. + from: String, + /// End of the block range. + to: String, + /// Details of the error message. + details: String, + }, + /// Provided count exceeds maximum value. + #[display(fmt = "count exceeds maximum value. value: {}, max: {}", value, max)] + InvalidCount { + /// Provided value + value: u32, + /// Maximum allowed value + max: u32, + }, } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(&**err), - _ => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Client(ref err) => Some(&**err), + _ => None, + } + } } /// Base code for all state errors. const BASE_ERROR: i64 = 4000; impl From for rpc::Error { - fn from(e: Error) -> Self { - match e { - Error::InvalidBlockRange { .. } => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: format!("{}", e), - data: None, - }, - Error::InvalidCount { .. } => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 2), - message: format!("{}", e), - data: None, - }, - e => errors::internal(e), - } - } + fn from(e: Error) -> Self { + match e { + Error::InvalidBlockRange { .. } => rpc::Error { + code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), + message: format!("{}", e), + data: None, + }, + Error::InvalidCount { .. } => rpc::Error { + code: rpc::ErrorCode::ServerError(BASE_ERROR + 2), + message: format!("{}", e), + data: None, + }, + e => errors::internal(e), + } + } } diff --git a/client/rpc-api/src/state/mod.rs b/client/rpc-api/src/state/mod.rs index d29e46a4b5..a2f12fa591 100644 --- a/client/rpc-api/src/state/mod.rs +++ b/client/rpc-api/src/state/mod.rs @@ -18,159 +18,185 @@ pub mod error; -use jsonrpc_core::Result as RpcResult; +use self::error::FutureResult; use jsonrpc_core::futures::Future; +use jsonrpc_core::Result as RpcResult; use jsonrpc_derive::rpc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; +use sp_core::storage::{StorageChangeSet, StorageData, StorageKey}; use sp_core::Bytes; -use sp_core::storage::{StorageKey, StorageData, StorageChangeSet}; use sp_version::RuntimeVersion; -use self::error::FutureResult; pub use self::gen_client::Client as StateClient; /// Substrate state API #[rpc] pub trait StateApi { - /// RPC Metadata - type Metadata; - - /// Call a contract at a block's state. - #[rpc(name = "state_call", alias("state_callAt"))] - fn call(&self, name: String, bytes: Bytes, hash: Option) -> FutureResult; - - /// DEPRECATED: Please use `state_getKeysPaged` with proper paging support. - /// Returns the keys with prefix, leave empty to get all the keys. - #[rpc(name = "state_getKeys")] - fn storage_keys(&self, prefix: StorageKey, hash: Option) -> FutureResult>; - - /// Returns the keys with prefix, leave empty to get all the keys - #[rpc(name = "state_getPairs")] - fn storage_pairs(&self, prefix: StorageKey, hash: Option) -> FutureResult>; - - /// Returns the keys with prefix with pagination support. - /// Up to `count` keys will be returned. - /// If `start_key` is passed, return next keys in storage in lexicographic order. - #[rpc(name = "state_getKeysPaged", alias("state_getKeysPagedAt"))] - fn storage_keys_paged( - &self, - prefix: Option, - count: u32, - start_key: Option, - hash: Option, - ) -> FutureResult>; - - /// Returns a storage entry at a specific block's state. - #[rpc(name = "state_getStorage", alias("state_getStorageAt"))] - fn storage(&self, key: StorageKey, hash: Option) -> FutureResult>; - - /// Returns the hash of a storage entry at a block's state. - #[rpc(name = "state_getStorageHash", alias("state_getStorageHashAt"))] - fn storage_hash(&self, key: StorageKey, hash: Option) -> FutureResult>; - - /// Returns the size of a storage entry at a block's state. - #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] - fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; - - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - #[rpc(name = "state_getChildKeys")] - fn child_storage_keys( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns a child storage entry at a specific block's state. - #[rpc(name = "state_getChildStorage")] - fn child_storage( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the hash of a child storage entry at a block's state. - #[rpc(name = "state_getChildStorageHash")] - fn child_storage_hash( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the size of a child storage entry at a block's state. - #[rpc(name = "state_getChildStorageSize")] - fn child_storage_size( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - hash: Option - ) -> FutureResult>; - - /// Returns the runtime metadata as an opaque blob. - #[rpc(name = "state_getMetadata")] - fn metadata(&self, hash: Option) -> FutureResult; - - /// Get the runtime version. - #[rpc(name = "state_getRuntimeVersion", alias("chain_getRuntimeVersion"))] - fn runtime_version(&self, hash: Option) -> FutureResult; - - /// Query historical storage entries (by key) starting from a block given as the second parameter. - /// - /// NOTE This first returned result contains the initial state of storage for all keys. - /// Subsequent values in the vector represent changes to the previous state (diffs). - #[rpc(name = "state_queryStorage")] - fn query_storage( - &self, - keys: Vec, - block: Hash, - hash: Option - ) -> FutureResult>>; - - /// Query storage entries (by key) starting at block hash given as the second parameter. - #[rpc(name = "state_queryStorageAt")] - fn query_storage_at( - &self, - keys: Vec, - at: Option, - ) -> FutureResult>>; - - /// New runtime version subscription - #[pubsub( - subscription = "state_runtimeVersion", - subscribe, - name = "state_subscribeRuntimeVersion", - alias("chain_subscribeRuntimeVersion") - )] - fn subscribe_runtime_version(&self, metadata: Self::Metadata, subscriber: Subscriber); - - /// Unsubscribe from runtime version subscription - #[pubsub( - subscription = "state_runtimeVersion", - unsubscribe, - name = "state_unsubscribeRuntimeVersion", - alias("chain_unsubscribeRuntimeVersion") - )] - fn unsubscribe_runtime_version(&self, metadata: Option, id: SubscriptionId) -> RpcResult; - - /// New storage subscription - #[pubsub(subscription = "state_storage", subscribe, name = "state_subscribeStorage")] - fn subscribe_storage( - &self, metadata: Self::Metadata, subscriber: Subscriber>, keys: Option> - ); - - /// Unsubscribe from storage subscription - #[pubsub(subscription = "state_storage", unsubscribe, name = "state_unsubscribeStorage")] - fn unsubscribe_storage( - &self, metadata: Option, id: SubscriptionId - ) -> RpcResult; + /// RPC Metadata + type Metadata; + + /// Call a contract at a block's state. + #[rpc(name = "state_call", alias("state_callAt"))] + fn call(&self, name: String, bytes: Bytes, hash: Option) -> FutureResult; + + /// DEPRECATED: Please use `state_getKeysPaged` with proper paging support. + /// Returns the keys with prefix, leave empty to get all the keys. + #[rpc(name = "state_getKeys")] + fn storage_keys(&self, prefix: StorageKey, hash: Option) + -> FutureResult>; + + /// Returns the keys with prefix, leave empty to get all the keys + #[rpc(name = "state_getPairs")] + fn storage_pairs( + &self, + prefix: StorageKey, + hash: Option, + ) -> FutureResult>; + + /// Returns the keys with prefix with pagination support. + /// Up to `count` keys will be returned. + /// If `start_key` is passed, return next keys in storage in lexicographic order. + #[rpc(name = "state_getKeysPaged", alias("state_getKeysPagedAt"))] + fn storage_keys_paged( + &self, + prefix: Option, + count: u32, + start_key: Option, + hash: Option, + ) -> FutureResult>; + + /// Returns a storage entry at a specific block's state. + #[rpc(name = "state_getStorage", alias("state_getStorageAt"))] + fn storage(&self, key: StorageKey, hash: Option) -> FutureResult>; + + /// Returns the hash of a storage entry at a block's state. + #[rpc(name = "state_getStorageHash", alias("state_getStorageHashAt"))] + fn storage_hash(&self, key: StorageKey, hash: Option) -> FutureResult>; + + /// Returns the size of a storage entry at a block's state. + #[rpc(name = "state_getStorageSize", alias("state_getStorageSizeAt"))] + fn storage_size(&self, key: StorageKey, hash: Option) -> FutureResult>; + + /// Returns the keys with prefix from a child storage, leave empty to get all the keys + #[rpc(name = "state_getChildKeys")] + fn child_storage_keys( + &self, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + prefix: StorageKey, + hash: Option, + ) -> FutureResult>; + + /// Returns a child storage entry at a specific block's state. + #[rpc(name = "state_getChildStorage")] + fn child_storage( + &self, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + hash: Option, + ) -> FutureResult>; + + /// Returns the hash of a child storage entry at a block's state. + #[rpc(name = "state_getChildStorageHash")] + fn child_storage_hash( + &self, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + hash: Option, + ) -> FutureResult>; + + /// Returns the size of a child storage entry at a block's state. + #[rpc(name = "state_getChildStorageSize")] + fn child_storage_size( + &self, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + hash: Option, + ) -> FutureResult>; + + /// Returns the runtime metadata as an opaque blob. + #[rpc(name = "state_getMetadata")] + fn metadata(&self, hash: Option) -> FutureResult; + + /// Get the runtime version. + #[rpc(name = "state_getRuntimeVersion", alias("chain_getRuntimeVersion"))] + fn runtime_version(&self, hash: Option) -> FutureResult; + + /// Query historical storage entries (by key) starting from a block given as the second parameter. + /// + /// NOTE This first returned result contains the initial state of storage for all keys. + /// Subsequent values in the vector represent changes to the previous state (diffs). + #[rpc(name = "state_queryStorage")] + fn query_storage( + &self, + keys: Vec, + block: Hash, + hash: Option, + ) -> FutureResult>>; + + /// Query storage entries (by key) starting at block hash given as the second parameter. + #[rpc(name = "state_queryStorageAt")] + fn query_storage_at( + &self, + keys: Vec, + at: Option, + ) -> FutureResult>>; + + /// New runtime version subscription + #[pubsub( + subscription = "state_runtimeVersion", + subscribe, + name = "state_subscribeRuntimeVersion", + alias("chain_subscribeRuntimeVersion") + )] + fn subscribe_runtime_version( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ); + + /// Unsubscribe from runtime version subscription + #[pubsub( + subscription = "state_runtimeVersion", + unsubscribe, + name = "state_unsubscribeRuntimeVersion", + alias("chain_unsubscribeRuntimeVersion") + )] + fn unsubscribe_runtime_version( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; + + /// New storage subscription + #[pubsub( + subscription = "state_storage", + subscribe, + name = "state_subscribeStorage" + )] + fn subscribe_storage( + &self, + metadata: Self::Metadata, + subscriber: Subscriber>, + keys: Option>, + ); + + /// Unsubscribe from storage subscription + #[pubsub( + subscription = "state_storage", + unsubscribe, + name = "state_unsubscribeStorage" + )] + fn unsubscribe_storage( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult; } diff --git a/client/rpc-api/src/subscriptions.rs b/client/rpc-api/src/subscriptions.rs index 54881bad51..4003201d13 100644 --- a/client/rpc-api/src/subscriptions.rs +++ b/client/rpc-api/src/subscriptions.rs @@ -15,37 +15,44 @@ // along with Substrate. If not, see . use std::collections::HashMap; -use std::sync::{Arc, atomic::{self, AtomicUsize}}; +use std::sync::{ + atomic::{self, AtomicUsize}, + Arc, +}; +use jsonrpc_core::futures::sync::oneshot; +use jsonrpc_core::futures::{future, Future}; +use jsonrpc_pubsub::{ + typed::{Sink, Subscriber}, + SubscriptionId, +}; use log::{error, warn}; -use jsonrpc_pubsub::{SubscriptionId, typed::{Sink, Subscriber}}; use parking_lot::Mutex; -use jsonrpc_core::futures::sync::oneshot; -use jsonrpc_core::futures::{Future, future}; type Id = u64; /// Alias for a an implementation of `futures::future::Executor`. -pub type TaskExecutor = Arc + Send>> + Send + Sync>; +pub type TaskExecutor = + Arc + Send>> + Send + Sync>; /// Generate unique ids for subscriptions. #[derive(Clone, Debug)] pub struct IdProvider { - next_id: Arc, + next_id: Arc, } impl Default for IdProvider { - fn default() -> Self { - IdProvider { - next_id: Arc::new(AtomicUsize::new(1)), - } - } + fn default() -> Self { + IdProvider { + next_id: Arc::new(AtomicUsize::new(1)), + } + } } impl IdProvider { - /// Returns next id for the subscription. - pub fn next_id(&self) -> Id { - self.next_id.fetch_add(1, atomic::Ordering::AcqRel) as u64 - } + /// Returns next id for the subscription. + pub fn next_id(&self) -> Id { + self.next_id.fetch_add(1, atomic::Ordering::AcqRel) as u64 + } } /// Subscriptions manager. @@ -54,66 +61,67 @@ impl IdProvider { /// driving the sinks into completion. #[derive(Clone)] pub struct Subscriptions { - next_id: IdProvider, - active_subscriptions: Arc>>>, - executor: TaskExecutor, + next_id: IdProvider, + active_subscriptions: Arc>>>, + executor: TaskExecutor, } impl Subscriptions { - /// Creates new `Subscriptions` object. - pub fn new(executor: TaskExecutor) -> Self { - Subscriptions { - next_id: Default::default(), - active_subscriptions: Default::default(), - executor, - } - } - - /// Borrows the internal task executor. - /// - /// This can be used to spawn additional tasks on the underlying event loop. - pub fn executor(&self) -> &TaskExecutor { - &self.executor - } - - /// Creates new subscription for given subscriber. - /// - /// Second parameter is a function that converts Subscriber sink into a future. - /// This future will be driven to completion by the underlying event loop - /// or will be cancelled in case #cancel is invoked. - pub fn add(&self, subscriber: Subscriber, into_future: G) -> SubscriptionId where - G: FnOnce(Sink) -> R, - R: future::IntoFuture, - F: future::Future + Send + 'static, - { - let id = self.next_id.next_id(); - let subscription_id: SubscriptionId = id.into(); - if let Ok(sink) = subscriber.assign_id(subscription_id.clone()) { - let (tx, rx) = oneshot::channel(); - let future = into_future(sink) - .into_future() - .select(rx.map_err(|e| warn!("Error timeing out: {:?}", e))) - .then(|_| Ok(())); - - self.active_subscriptions.lock().insert(id, tx); - if self.executor.execute(Box::new(future)).is_err() { - error!("Failed to spawn RPC subscription task"); - } - } - - subscription_id - } - - /// Cancel subscription. - /// - /// Returns true if subscription existed or false otherwise. - pub fn cancel(&self, id: SubscriptionId) -> bool { - if let SubscriptionId::Number(id) = id { - if let Some(tx) = self.active_subscriptions.lock().remove(&id) { - let _ = tx.send(()); - return true; - } - } - false - } + /// Creates new `Subscriptions` object. + pub fn new(executor: TaskExecutor) -> Self { + Subscriptions { + next_id: Default::default(), + active_subscriptions: Default::default(), + executor, + } + } + + /// Borrows the internal task executor. + /// + /// This can be used to spawn additional tasks on the underlying event loop. + pub fn executor(&self) -> &TaskExecutor { + &self.executor + } + + /// Creates new subscription for given subscriber. + /// + /// Second parameter is a function that converts Subscriber sink into a future. + /// This future will be driven to completion by the underlying event loop + /// or will be cancelled in case #cancel is invoked. + pub fn add(&self, subscriber: Subscriber, into_future: G) -> SubscriptionId + where + G: FnOnce(Sink) -> R, + R: future::IntoFuture, + F: future::Future + Send + 'static, + { + let id = self.next_id.next_id(); + let subscription_id: SubscriptionId = id.into(); + if let Ok(sink) = subscriber.assign_id(subscription_id.clone()) { + let (tx, rx) = oneshot::channel(); + let future = into_future(sink) + .into_future() + .select(rx.map_err(|e| warn!("Error timeing out: {:?}", e))) + .then(|_| Ok(())); + + self.active_subscriptions.lock().insert(id, tx); + if self.executor.execute(Box::new(future)).is_err() { + error!("Failed to spawn RPC subscription task"); + } + } + + subscription_id + } + + /// Cancel subscription. + /// + /// Returns true if subscription existed or false otherwise. + pub fn cancel(&self, id: SubscriptionId) -> bool { + if let SubscriptionId::Number(id) = id { + if let Some(tx) = self.active_subscriptions.lock().remove(&id) { + let _ = tx.send(()); + return true; + } + } + false + } } diff --git a/client/rpc-api/src/system/error.rs b/client/rpc-api/src/system/error.rs index fbb4e44bcb..66a28935e5 100644 --- a/client/rpc-api/src/system/error.rs +++ b/client/rpc-api/src/system/error.rs @@ -25,11 +25,11 @@ pub type Result = std::result::Result; /// System RPC errors. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Provided block range couldn't be resolved to a list of blocks. - #[display(fmt = "Node is not fully functional: {}", _0)] - NotHealthy(Health), - /// Peer argument is malformatted. - MalformattedPeerArg(String), + /// Provided block range couldn't be resolved to a list of blocks. + #[display(fmt = "Node is not fully functional: {}", _0)] + NotHealthy(Health), + /// Peer argument is malformatted. + MalformattedPeerArg(String), } impl std::error::Error for Error {} @@ -38,18 +38,18 @@ impl std::error::Error for Error {} const BASE_ERROR: i64 = 2000; impl From for rpc::Error { - fn from(e: Error) -> Self { - match e { - Error::NotHealthy(ref h) => rpc::Error { - code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), - message: format!("{}", e), - data: serde_json::to_value(h).ok(), - }, - Error::MalformattedPeerArg(ref e) => rpc::Error { - code :rpc::ErrorCode::ServerError(BASE_ERROR + 2), - message: e.clone(), - data: None, - } - } - } + fn from(e: Error) -> Self { + match e { + Error::NotHealthy(ref h) => rpc::Error { + code: rpc::ErrorCode::ServerError(BASE_ERROR + 1), + message: format!("{}", e), + data: serde_json::to_value(h).ok(), + }, + Error::MalformattedPeerArg(ref e) => rpc::Error { + code: rpc::ErrorCode::ServerError(BASE_ERROR + 2), + message: e.clone(), + data: None, + }, + } + } } diff --git a/client/rpc-api/src/system/helpers.rs b/client/rpc-api/src/system/helpers.rs index 46461d6988..ce3cd1c06f 100644 --- a/client/rpc-api/src/system/helpers.rs +++ b/client/rpc-api/src/system/helpers.rs @@ -16,103 +16,108 @@ //! Substrate system API helpers. +use serde::{Deserialize, Serialize}; +use sp_chain_spec::{ChainType, Properties}; use std::fmt; -use serde::{Serialize, Deserialize}; -use sp_chain_spec::{Properties, ChainType}; /// Running node's static details. #[derive(Clone, Debug)] pub struct SystemInfo { - /// Implementation name. - pub impl_name: String, - /// Implementation version. - pub impl_version: String, - /// Chain name. - pub chain_name: String, - /// A custom set of properties defined in the chain spec. - pub properties: Properties, - /// The type of this chain. - pub chain_type: ChainType, + /// Implementation name. + pub impl_name: String, + /// Implementation version. + pub impl_version: String, + /// Chain name. + pub chain_name: String, + /// A custom set of properties defined in the chain spec. + pub properties: Properties, + /// The type of this chain. + pub chain_type: ChainType, } /// Health struct returned by the RPC #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct Health { - /// Number of connected peers - pub peers: usize, - /// Is the node syncing - pub is_syncing: bool, - /// Should this node have any peers - /// - /// Might be false for local chains or when running without discovery. - pub should_have_peers: bool, + /// Number of connected peers + pub peers: usize, + /// Is the node syncing + pub is_syncing: bool, + /// Should this node have any peers + /// + /// Might be false for local chains or when running without discovery. + pub should_have_peers: bool, } impl fmt::Display for Health { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "{} peers ({})", self.peers, if self.is_syncing { - "syncing" - } else { "idle" }) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!( + fmt, + "{} peers ({})", + self.peers, + if self.is_syncing { "syncing" } else { "idle" } + ) + } } /// Network Peer information #[derive(Debug, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct PeerInfo { - /// Peer ID - pub peer_id: String, - /// Roles - pub roles: String, - /// Protocol version - pub protocol_version: u32, - /// Peer best block hash - pub best_hash: Hash, - /// Peer best block number - pub best_number: Number, + /// Peer ID + pub peer_id: String, + /// Roles + pub roles: String, + /// Protocol version + pub protocol_version: u32, + /// Peer best block hash + pub best_hash: Hash, + /// Peer best block number + pub best_number: Number, } /// The role the node is running as #[derive(Debug, PartialEq, Serialize, Deserialize)] pub enum NodeRole { - /// The node is a full node - Full, - /// The node is a light client - LightClient, - /// The node is an authority - Authority, - /// The node is a sentry - Sentry, + /// The node is a full node + Full, + /// The node is a light client + LightClient, + /// The node is an authority + Authority, + /// The node is a sentry + Sentry, } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn should_serialize_health() { - assert_eq!( - ::serde_json::to_string(&Health { - peers: 1, - is_syncing: false, - should_have_peers: true, - }).unwrap(), - r#"{"peers":1,"isSyncing":false,"shouldHavePeers":true}"#, - ); - } + #[test] + fn should_serialize_health() { + assert_eq!( + ::serde_json::to_string(&Health { + peers: 1, + is_syncing: false, + should_have_peers: true, + }) + .unwrap(), + r#"{"peers":1,"isSyncing":false,"shouldHavePeers":true}"#, + ); + } - #[test] - fn should_serialize_peer_info() { - assert_eq!( - ::serde_json::to_string(&PeerInfo { - peer_id: "2".into(), - roles: "a".into(), - protocol_version: 2, - best_hash: 5u32, - best_number: 6u32, - }).unwrap(), - r#"{"peerId":"2","roles":"a","protocolVersion":2,"bestHash":5,"bestNumber":6}"#, - ); - } + #[test] + fn should_serialize_peer_info() { + assert_eq!( + ::serde_json::to_string(&PeerInfo { + peer_id: "2".into(), + roles: "a".into(), + protocol_version: 2, + best_hash: 5u32, + best_number: 6u32, + }) + .unwrap(), + r#"{"peerId":"2","roles":"a","protocolVersion":2,"bestHash":5,"bestNumber":6}"#, + ); + } } diff --git a/client/rpc-api/src/system/mod.rs b/client/rpc-api/src/system/mod.rs index e66ac97a68..32f86f472e 100644 --- a/client/rpc-api/src/system/mod.rs +++ b/client/rpc-api/src/system/mod.rs @@ -20,83 +20,87 @@ pub mod error; pub mod helpers; use crate::helpers::Receiver; +use futures::{compat::Compat, future::BoxFuture}; use jsonrpc_derive::rpc; -use futures::{future::BoxFuture, compat::Compat}; use self::error::Result as SystemResult; -pub use self::helpers::{SystemInfo, Health, PeerInfo, NodeRole}; pub use self::gen_client::Client as SystemClient; +pub use self::helpers::{Health, NodeRole, PeerInfo, SystemInfo}; /// Substrate system RPC API #[rpc] pub trait SystemApi { - /// Get the node's implementation name. Plain old string. - #[rpc(name = "system_name")] - fn system_name(&self) -> SystemResult; - - /// Get the node implementation's version. Should be a semver string. - #[rpc(name = "system_version")] - fn system_version(&self) -> SystemResult; - - /// Get the chain's name. Given as a string identifier. - #[rpc(name = "system_chain")] - fn system_chain(&self) -> SystemResult; - - /// Get the chain's type. - #[rpc(name = "system_chainType")] - fn system_type(&self) -> SystemResult; - - /// Get a custom set of properties as a JSON object, defined in the chain spec. - #[rpc(name = "system_properties")] - fn system_properties(&self) -> SystemResult; - - /// Return health status of the node. - /// - /// Node is considered healthy if it is: - /// - connected to some peers (unless running in dev mode) - /// - not performing a major sync - #[rpc(name = "system_health", returns = "Health")] - fn system_health(&self) -> Receiver; - - /// Returns the base58-encoded PeerId of the node. - #[rpc(name = "system_localPeerId", returns = "String")] - fn system_local_peer_id(&self) -> Receiver; - - /// Returns the multiaddresses that the local node is listening on - /// - /// The addresses include a trailing `/p2p/` with the local PeerId, and are thus suitable to - /// be passed to `system_addReservedPeer` or as a bootnode address for example. - #[rpc(name = "system_localListenAddresses", returns = "Vec")] - fn system_local_listen_addresses(&self) -> Receiver>; - - /// Returns currently connected peers - #[rpc(name = "system_peers", returns = "Vec>")] - fn system_peers(&self) -> Receiver>>; - - /// Returns current state of the network. - /// - /// **Warning**: This API is not stable. - // TODO: make this stable and move structs https://github.com/paritytech/substrate/issues/1890 - #[rpc(name = "system_networkState", returns = "jsonrpc_core::Value")] - fn system_network_state(&self) -> Receiver; - - /// Adds a reserved peer. Returns the empty string or an error. The string - /// parameter should encode a `p2p` multiaddr. - /// - /// `/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV` - /// is an example of a valid, passing multiaddr with PeerId attached. - #[rpc(name = "system_addReservedPeer", returns = "()")] - fn system_add_reserved_peer(&self, peer: String) - -> Compat>>; - - /// Remove a reserved peer. Returns the empty string or an error. The string - /// should encode only the PeerId e.g. `QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV`. - #[rpc(name = "system_removeReservedPeer", returns = "()")] - fn system_remove_reserved_peer(&self, peer_id: String) - -> Compat>>; - - /// Returns the roles the node is running as. - #[rpc(name = "system_nodeRoles", returns = "Vec")] - fn system_node_roles(&self) -> Receiver>; + /// Get the node's implementation name. Plain old string. + #[rpc(name = "system_name")] + fn system_name(&self) -> SystemResult; + + /// Get the node implementation's version. Should be a semver string. + #[rpc(name = "system_version")] + fn system_version(&self) -> SystemResult; + + /// Get the chain's name. Given as a string identifier. + #[rpc(name = "system_chain")] + fn system_chain(&self) -> SystemResult; + + /// Get the chain's type. + #[rpc(name = "system_chainType")] + fn system_type(&self) -> SystemResult; + + /// Get a custom set of properties as a JSON object, defined in the chain spec. + #[rpc(name = "system_properties")] + fn system_properties(&self) -> SystemResult; + + /// Return health status of the node. + /// + /// Node is considered healthy if it is: + /// - connected to some peers (unless running in dev mode) + /// - not performing a major sync + #[rpc(name = "system_health", returns = "Health")] + fn system_health(&self) -> Receiver; + + /// Returns the base58-encoded PeerId of the node. + #[rpc(name = "system_localPeerId", returns = "String")] + fn system_local_peer_id(&self) -> Receiver; + + /// Returns the multiaddresses that the local node is listening on + /// + /// The addresses include a trailing `/p2p/` with the local PeerId, and are thus suitable to + /// be passed to `system_addReservedPeer` or as a bootnode address for example. + #[rpc(name = "system_localListenAddresses", returns = "Vec")] + fn system_local_listen_addresses(&self) -> Receiver>; + + /// Returns currently connected peers + #[rpc(name = "system_peers", returns = "Vec>")] + fn system_peers(&self) -> Receiver>>; + + /// Returns current state of the network. + /// + /// **Warning**: This API is not stable. + // TODO: make this stable and move structs https://github.com/paritytech/substrate/issues/1890 + #[rpc(name = "system_networkState", returns = "jsonrpc_core::Value")] + fn system_network_state(&self) -> Receiver; + + /// Adds a reserved peer. Returns the empty string or an error. The string + /// parameter should encode a `p2p` multiaddr. + /// + /// `/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV` + /// is an example of a valid, passing multiaddr with PeerId attached. + #[rpc(name = "system_addReservedPeer", returns = "()")] + fn system_add_reserved_peer( + &self, + peer: String, + ) -> Compat>>; + + /// Remove a reserved peer. Returns the empty string or an error. The string + /// should encode only the PeerId e.g. `QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV`. + #[rpc(name = "system_removeReservedPeer", returns = "()")] + fn system_remove_reserved_peer( + &self, + peer_id: String, + ) -> Compat>>; + + /// Returns the roles the node is running as. + #[rpc(name = "system_nodeRoles", returns = "Vec")] + fn system_node_roles(&self) -> Receiver>; } diff --git a/client/rpc-servers/src/lib.rs b/client/rpc-servers/src/lib.rs index 97fb10c15e..f448c065df 100644 --- a/client/rpc-servers/src/lib.rs +++ b/client/rpc-servers/src/lib.rs @@ -18,10 +18,10 @@ #![warn(missing_docs)] -use std::io; use jsonrpc_core::IoHandlerExtension; use log::error; use pubsub::PubSubMetadata; +use std::io; /// Maximal payload accepted by RPC servers. const MAX_PAYLOAD: usize = 15 * 1024 * 1024; @@ -35,101 +35,110 @@ pub type RpcHandler = pubsub::PubSubHandler; pub use self::inner::*; /// Construct rpc `IoHandler` -pub fn rpc_handler( - extension: impl IoHandlerExtension -) -> RpcHandler { - let mut io = pubsub::PubSubHandler::default(); - extension.augment(&mut io); - - // add an endpoint to list all available methods. - let mut methods = io.iter().map(|x| x.0.clone()).collect::>(); - io.add_method("rpc_methods", { - methods.sort(); - let methods = serde_json::to_value(&methods) - .expect("Serialization of Vec is infallible; qed"); - - move |_| Ok(serde_json::json!({ - "version": 1, - "methods": methods.clone(), - })) - }); - io +pub fn rpc_handler(extension: impl IoHandlerExtension) -> RpcHandler { + let mut io = pubsub::PubSubHandler::default(); + extension.augment(&mut io); + + // add an endpoint to list all available methods. + let mut methods = io.iter().map(|x| x.0.clone()).collect::>(); + io.add_method("rpc_methods", { + methods.sort(); + let methods = serde_json::to_value(&methods) + .expect("Serialization of Vec is infallible; qed"); + + move |_| { + Ok(serde_json::json!({ + "version": 1, + "methods": methods.clone(), + })) + } + }); + io } #[cfg(not(target_os = "unknown"))] mod inner { - use super::*; - - /// Type alias for http server - pub type HttpServer = http::Server; - /// Type alias for ws server - pub type WsServer = ws::Server; - - /// Start HTTP server listening on given address. - /// - /// **Note**: Only available if `not(target_os = "unknown")`. - pub fn start_http( - addr: &std::net::SocketAddr, - cors: Option<&Vec>, - io: RpcHandler, - ) -> io::Result { - http::ServerBuilder::new(io) - .threads(4) - .health_api(("/health", "system_health")) - .allowed_hosts(hosts_filtering(cors.is_some())) - .rest_api(if cors.is_some() { - http::RestApi::Secure - } else { - http::RestApi::Unsecure - }) - .cors(map_cors::(cors)) - .max_request_body_size(MAX_PAYLOAD) - .start_http(addr) - } - - /// Start WS server listening on given address. - /// - /// **Note**: Only available if `not(target_os = "unknown")`. - pub fn start_ws>> ( - addr: &std::net::SocketAddr, - max_connections: Option, - cors: Option<&Vec>, - io: RpcHandler, - ) -> io::Result { - ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| context.sender().into()) - .max_payload(MAX_PAYLOAD) - .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) - .allowed_origins(map_cors(cors)) - .allowed_hosts(hosts_filtering(cors.is_some())) - .start(addr) - .map_err(|err| match err { - ws::Error::Io(io) => io, - ws::Error::ConnectionClosed => io::ErrorKind::BrokenPipe.into(), - e => { - error!("{}", e); - io::ErrorKind::Other.into() - } - }) - } - - fn map_cors From<&'a str>>( - cors: Option<&Vec> - ) -> http::DomainsValidation { - cors.map(|x| x.iter().map(AsRef::as_ref).map(Into::into).collect::>()).into() - } - - fn hosts_filtering(enable: bool) -> http::DomainsValidation { - if enable { - // NOTE The listening address is whitelisted by default. - // Setting an empty vector here enables the validation - // and allows only the listening address. - http::DomainsValidation::AllowOnly(vec![]) - } else { - http::DomainsValidation::Disabled - } - } + use super::*; + + /// Type alias for http server + pub type HttpServer = http::Server; + /// Type alias for ws server + pub type WsServer = ws::Server; + + /// Start HTTP server listening on given address. + /// + /// **Note**: Only available if `not(target_os = "unknown")`. + pub fn start_http( + addr: &std::net::SocketAddr, + cors: Option<&Vec>, + io: RpcHandler, + ) -> io::Result { + http::ServerBuilder::new(io) + .threads(4) + .health_api(("/health", "system_health")) + .allowed_hosts(hosts_filtering(cors.is_some())) + .rest_api(if cors.is_some() { + http::RestApi::Secure + } else { + http::RestApi::Unsecure + }) + .cors(map_cors::(cors)) + .max_request_body_size(MAX_PAYLOAD) + .start_http(addr) + } + + /// Start WS server listening on given address. + /// + /// **Note**: Only available if `not(target_os = "unknown")`. + pub fn start_ws< + M: pubsub::PubSubMetadata + From>, + >( + addr: &std::net::SocketAddr, + max_connections: Option, + cors: Option<&Vec>, + io: RpcHandler, + ) -> io::Result { + ws::ServerBuilder::with_meta_extractor(io, |context: &ws::RequestContext| { + context.sender().into() + }) + .max_payload(MAX_PAYLOAD) + .max_connections(max_connections.unwrap_or(WS_MAX_CONNECTIONS)) + .allowed_origins(map_cors(cors)) + .allowed_hosts(hosts_filtering(cors.is_some())) + .start(addr) + .map_err(|err| match err { + ws::Error::Io(io) => io, + ws::Error::ConnectionClosed => io::ErrorKind::BrokenPipe.into(), + e => { + error!("{}", e); + io::ErrorKind::Other.into() + } + }) + } + + fn map_cors From<&'a str>>( + cors: Option<&Vec>, + ) -> http::DomainsValidation { + cors.map(|x| { + x.iter() + .map(AsRef::as_ref) + .map(Into::into) + .collect::>() + }) + .into() + } + + fn hosts_filtering(enable: bool) -> http::DomainsValidation { + if enable { + // NOTE The listening address is whitelisted by default. + // Setting an empty vector here enables the validation + // and allows only the listening address. + http::DomainsValidation::AllowOnly(vec![]) + } else { + http::DomainsValidation::Disabled + } + } } #[cfg(target_os = "unknown")] -mod inner { -} +mod inner {} diff --git a/client/rpc/src/author/mod.rs b/client/rpc/src/author/mod.rs index a3f23e8e14..ff74522cc3 100644 --- a/client/rpc/src/author/mod.rs +++ b/client/rpc/src/author/mod.rs @@ -19,63 +19,59 @@ #[cfg(test)] mod tests; -use std::{sync::Arc, convert::TryInto}; use log::warn; +use std::{convert::TryInto, sync::Arc}; use sp_blockchain::{Error as ClientError, HeaderBackend}; -use rpc::futures::{ - Sink, Future, - future::result, -}; -use futures::{StreamExt as _, compat::Compat}; +use codec::{Decode, Encode}; use futures::future::{ready, FutureExt, TryFutureExt}; -use sc_rpc_api::Subscriptions; +use futures::{compat::Compat, StreamExt as _}; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use codec::{Encode, Decode}; -use sp_core::{Bytes, traits::BareCryptoStorePtr}; +use rpc::futures::{future::result, Future, Sink}; +use sc_rpc_api::Subscriptions; use sp_api::ProvideRuntimeApi; +use sp_core::{traits::BareCryptoStorePtr, Bytes}; use sp_runtime::generic; +use sp_session::SessionKeys; use sp_transaction_pool::{ - TransactionPool, InPoolTransaction, TransactionStatus, TransactionSource, - BlockHash, TxHash, TransactionFor, error::IntoPoolError, + error::IntoPoolError, BlockHash, InPoolTransaction, TransactionFor, TransactionPool, + TransactionSource, TransactionStatus, TxHash, }; -use sp_session::SessionKeys; +use self::error::{Error, FutureResult, Result}; /// Re-export the API for backward compatibility. pub use sc_rpc_api::author::*; -use self::error::{Error, FutureResult, Result}; /// Authoring API pub struct Author { - /// Substrate client - client: Arc, - /// Transactions pool - pool: Arc

, - /// Subscriptions manager - subscriptions: Subscriptions, - /// The key store. - keystore: BareCryptoStorePtr, + /// Substrate client + client: Arc, + /// Transactions pool + pool: Arc

, + /// Subscriptions manager + subscriptions: Subscriptions, + /// The key store. + keystore: BareCryptoStorePtr, } impl Author { - /// Create new instance of Authoring API. - pub fn new( - client: Arc, - pool: Arc

, - subscriptions: Subscriptions, - keystore: BareCryptoStorePtr, - ) -> Self { - Author { - client, - pool, - subscriptions, - keystore, - } - } + /// Create new instance of Authoring API. + pub fn new( + client: Arc, + pool: Arc

, + subscriptions: Subscriptions, + keystore: BareCryptoStorePtr, + ) -> Self { + Author { + client, + pool, + subscriptions, + keystore, + } + } } - /// Currently we treat all RPC transactions as externals. /// /// Possibly in the future we could allow opt-in for special treatment @@ -84,142 +80,164 @@ impl Author { const TX_SOURCE: TransactionSource = TransactionSource::External; impl AuthorApi, BlockHash

> for Author - where - P: TransactionPool + Sync + Send + 'static, - Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: SessionKeys, +where + P: TransactionPool + Sync + Send + 'static, + Client: HeaderBackend + ProvideRuntimeApi + Send + Sync + 'static, + Client::Api: SessionKeys, { - type Metadata = crate::metadata::Metadata; - - fn insert_key( - &self, - key_type: String, - suri: String, - public: Bytes, - ) -> Result<()> { - let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; - let mut keystore = self.keystore.write(); - keystore.insert_unknown(key_type, &suri, &public[..]) - .map_err(|_| Error::KeyStoreUnavailable)?; - Ok(()) - } - - fn rotate_keys(&self) -> Result { - let best_block_hash = self.client.info().best_hash; - self.client.runtime_api().generate_session_keys( - &generic::BlockId::Hash(best_block_hash), - None, - ).map(Into::into).map_err(|e| Error::Client(Box::new(e))) - } - - fn has_session_keys(&self, session_keys: Bytes) -> Result { - let best_block_hash = self.client.info().best_hash; - let keys = self.client.runtime_api().decode_session_keys( - &generic::BlockId::Hash(best_block_hash), - session_keys.to_vec(), - ).map_err(|e| Error::Client(Box::new(e)))? - .ok_or_else(|| Error::InvalidSessionKeys)?; - - Ok(self.keystore.read().has_keys(&keys)) - } - - fn has_key(&self, public_key: Bytes, key_type: String) -> Result { - let key_type = key_type.as_str().try_into().map_err(|_| Error::BadKeyType)?; - Ok(self.keystore.read().has_keys(&[(public_key.to_vec(), key_type)])) - } - - fn submit_extrinsic(&self, ext: Bytes) -> FutureResult> { - let xt = match Decode::decode(&mut &ext[..]) { - Ok(xt) => xt, - Err(err) => return Box::new(result(Err(err.into()))), - }; - let best_block_hash = self.client.info().best_hash; - Box::new(self.pool - .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) - .compat() - .map_err(|e| e.into_pool_error() - .map(Into::into) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into())) - ) - } - - fn pending_extrinsics(&self) -> Result> { - Ok(self.pool.ready().map(|tx| tx.data().encode().into()).collect()) - } - - fn remove_extrinsic( - &self, - bytes_or_hash: Vec>>, - ) -> Result>> { - let hashes = bytes_or_hash.into_iter() - .map(|x| match x { - hash::ExtrinsicOrHash::Hash(h) => Ok(h), - hash::ExtrinsicOrHash::Extrinsic(bytes) => { - let xt = Decode::decode(&mut &bytes[..])?; - Ok(self.pool.hash_of(&xt)) - }, - }) - .collect::>>()?; - - Ok( - self.pool - .remove_invalid(&hashes) - .into_iter() - .map(|tx| tx.hash().clone()) - .collect() - ) - } - - fn watch_extrinsic(&self, - _metadata: Self::Metadata, - subscriber: Subscriber, BlockHash

>>, - xt: Bytes, - ) { - let submit = || -> Result<_> { - let best_block_hash = self.client.info().best_hash; - let dxt = TransactionFor::

::decode(&mut &xt[..]) - .map_err(error::Error::from)?; - Ok( - self.pool - .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) - .map_err(|e| e.into_pool_error() - .map(error::Error::from) - .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) - ) - ) - }; - - let subscriptions = self.subscriptions.clone(); - let future = ready(submit()) - .and_then(|res| res) - // convert the watcher into a `Stream` - .map(|res| res.map(|stream| stream.map(|v| Ok::<_, ()>(Ok(v))))) - // now handle the import result, - // start a new subscrition - .map(move |result| match result { - Ok(watcher) => { - subscriptions.add(subscriber, move |sink| { - sink - .sink_map_err(|_| unimplemented!()) - .send_all(Compat::new(watcher)) - .map(|_| ()) - }); - }, - Err(err) => { - warn!("Failed to submit extrinsic: {}", err); - // reject the subscriber (ignore errors - we don't care if subscriber is no longer there). - let _ = subscriber.reject(err.into()); - }, - }); - - let res = self.subscriptions.executor() - .execute(Box::new(Compat::new(future.map(|_| Ok(()))))); - if res.is_err() { - warn!("Error spawning subscription RPC task."); - } - } - - fn unwatch_extrinsic(&self, _metadata: Option, id: SubscriptionId) -> Result { - Ok(self.subscriptions.cancel(id)) - } + type Metadata = crate::metadata::Metadata; + + fn insert_key(&self, key_type: String, suri: String, public: Bytes) -> Result<()> { + let key_type = key_type + .as_str() + .try_into() + .map_err(|_| Error::BadKeyType)?; + let mut keystore = self.keystore.write(); + keystore + .insert_unknown(key_type, &suri, &public[..]) + .map_err(|_| Error::KeyStoreUnavailable)?; + Ok(()) + } + + fn rotate_keys(&self) -> Result { + let best_block_hash = self.client.info().best_hash; + self.client + .runtime_api() + .generate_session_keys(&generic::BlockId::Hash(best_block_hash), None) + .map(Into::into) + .map_err(|e| Error::Client(Box::new(e))) + } + + fn has_session_keys(&self, session_keys: Bytes) -> Result { + let best_block_hash = self.client.info().best_hash; + let keys = self + .client + .runtime_api() + .decode_session_keys( + &generic::BlockId::Hash(best_block_hash), + session_keys.to_vec(), + ) + .map_err(|e| Error::Client(Box::new(e)))? + .ok_or_else(|| Error::InvalidSessionKeys)?; + + Ok(self.keystore.read().has_keys(&keys)) + } + + fn has_key(&self, public_key: Bytes, key_type: String) -> Result { + let key_type = key_type + .as_str() + .try_into() + .map_err(|_| Error::BadKeyType)?; + Ok(self + .keystore + .read() + .has_keys(&[(public_key.to_vec(), key_type)])) + } + + fn submit_extrinsic(&self, ext: Bytes) -> FutureResult> { + let xt = match Decode::decode(&mut &ext[..]) { + Ok(xt) => xt, + Err(err) => return Box::new(result(Err(err.into()))), + }; + let best_block_hash = self.client.info().best_hash; + Box::new( + self.pool + .submit_one(&generic::BlockId::hash(best_block_hash), TX_SOURCE, xt) + .compat() + .map_err(|e| { + e.into_pool_error() + .map(Into::into) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) + }), + ) + } + + fn pending_extrinsics(&self) -> Result> { + Ok(self + .pool + .ready() + .map(|tx| tx.data().encode().into()) + .collect()) + } + + fn remove_extrinsic( + &self, + bytes_or_hash: Vec>>, + ) -> Result>> { + let hashes = bytes_or_hash + .into_iter() + .map(|x| match x { + hash::ExtrinsicOrHash::Hash(h) => Ok(h), + hash::ExtrinsicOrHash::Extrinsic(bytes) => { + let xt = Decode::decode(&mut &bytes[..])?; + Ok(self.pool.hash_of(&xt)) + } + }) + .collect::>>()?; + + Ok(self + .pool + .remove_invalid(&hashes) + .into_iter() + .map(|tx| tx.hash().clone()) + .collect()) + } + + fn watch_extrinsic( + &self, + _metadata: Self::Metadata, + subscriber: Subscriber, BlockHash

>>, + xt: Bytes, + ) { + let submit = || -> Result<_> { + let best_block_hash = self.client.info().best_hash; + let dxt = TransactionFor::

::decode(&mut &xt[..]).map_err(error::Error::from)?; + Ok(self + .pool + .submit_and_watch(&generic::BlockId::hash(best_block_hash), TX_SOURCE, dxt) + .map_err(|e| { + e.into_pool_error() + .map(error::Error::from) + .unwrap_or_else(|e| error::Error::Verification(Box::new(e)).into()) + })) + }; + + let subscriptions = self.subscriptions.clone(); + let future = ready(submit()) + .and_then(|res| res) + // convert the watcher into a `Stream` + .map(|res| res.map(|stream| stream.map(|v| Ok::<_, ()>(Ok(v))))) + // now handle the import result, + // start a new subscrition + .map(move |result| match result { + Ok(watcher) => { + subscriptions.add(subscriber, move |sink| { + sink.sink_map_err(|_| unimplemented!()) + .send_all(Compat::new(watcher)) + .map(|_| ()) + }); + } + Err(err) => { + warn!("Failed to submit extrinsic: {}", err); + // reject the subscriber (ignore errors - we don't care if subscriber is no longer there). + let _ = subscriber.reject(err.into()); + } + }); + + let res = self + .subscriptions + .executor() + .execute(Box::new(Compat::new(future.map(|_| Ok(()))))); + if res.is_err() { + warn!("Error spawning subscription RPC task."); + } + } + + fn unwatch_extrinsic( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> Result { + Ok(self.subscriptions.cancel(id)) + } } diff --git a/client/rpc/src/author/tests.rs b/client/rpc/src/author/tests.rs index 445888c523..d4806a992e 100644 --- a/client/rpc/src/author/tests.rs +++ b/client/rpc/src/author/tests.rs @@ -16,281 +16,330 @@ use super::*; -use std::{mem, sync::Arc}; use assert_matches::assert_matches; use codec::Encode; +use rpc::futures::Stream as _; +use sc_transaction_pool::{BasicPool, FullChainApi}; use sp_core::{ - H256, blake2_256, hexdisplay::HexDisplay, testing::{ED25519, SR25519, KeyStore}, - traits::BareCryptoStorePtr, ed25519, sr25519, - crypto::{CryptoTypePublicPair, Pair, Public}, + blake2_256, + crypto::{CryptoTypePublicPair, Pair, Public}, + ed25519, + hexdisplay::HexDisplay, + sr25519, + testing::{KeyStore, ED25519, SR25519}, + traits::BareCryptoStorePtr, + H256, }; -use rpc::futures::Stream as _; +use std::{mem, sync::Arc}; use substrate_test_runtime_client::{ - self, AccountKeyring, runtime::{Extrinsic, Transfer, SessionKeys, Block}, - DefaultTestClientBuilderExt, TestClientBuilderExt, Backend, Client, + self, + runtime::{Block, Extrinsic, SessionKeys, Transfer}, + AccountKeyring, Backend, Client, DefaultTestClientBuilderExt, TestClientBuilderExt, }; -use sc_transaction_pool::{BasicPool, FullChainApi}; use tokio::runtime; fn uxt(sender: AccountKeyring, nonce: u64) -> Extrinsic { - let tx = Transfer { - amount: Default::default(), - nonce, - from: sender.into(), - to: Default::default(), - }; - tx.into_signed_tx() + let tx = Transfer { + amount: Default::default(), + nonce, + from: sender.into(), + to: Default::default(), + }; + tx.into_signed_tx() } -type FullTransactionPool = BasicPool< - FullChainApi, Block>, - Block, ->; +type FullTransactionPool = BasicPool, Block>, Block>; struct TestSetup { - pub runtime: runtime::Runtime, - pub client: Arc>, - pub keystore: BareCryptoStorePtr, - pub pool: Arc, + pub runtime: runtime::Runtime, + pub client: Arc>, + pub keystore: BareCryptoStorePtr, + pub pool: Arc, } impl Default for TestSetup { - fn default() -> Self { - let keystore = KeyStore::new(); - let client = Arc::new( - substrate_test_runtime_client::TestClientBuilder::new() - .set_keystore(keystore.clone()) - .build() - ); - let pool = Arc::new(BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0); - TestSetup { - runtime: runtime::Runtime::new().expect("Failed to create runtime in test setup"), - client, - keystore, - pool, - } - } + fn default() -> Self { + let keystore = KeyStore::new(); + let client = Arc::new( + substrate_test_runtime_client::TestClientBuilder::new() + .set_keystore(keystore.clone()) + .build(), + ); + let pool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ) + .0, + ); + TestSetup { + runtime: runtime::Runtime::new().expect("Failed to create runtime in test setup"), + client, + keystore, + pool, + } + } } impl TestSetup { - fn author(&self) -> Author> { - Author { - client: self.client.clone(), - pool: self.pool.clone(), - subscriptions: Subscriptions::new(Arc::new(self.runtime.executor())), - keystore: self.keystore.clone(), - } - } + fn author(&self) -> Author> { + Author { + client: self.client.clone(), + pool: self.pool.clone(), + subscriptions: Subscriptions::new(Arc::new(self.runtime.executor())), + keystore: self.keystore.clone(), + } + } } #[test] fn submit_transaction_should_not_cause_error() { - let p = TestSetup::default().author(); - let xt = uxt(AccountKeyring::Alice, 1).encode(); - let h: H256 = blake2_256(&xt).into(); - - assert_matches!( - AuthorApi::submit_extrinsic(&p, xt.clone().into()).wait(), - Ok(h2) if h == h2 - ); - assert!( - AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err() - ); + let p = TestSetup::default().author(); + let xt = uxt(AccountKeyring::Alice, 1).encode(); + let h: H256 = blake2_256(&xt).into(); + + assert_matches!( + AuthorApi::submit_extrinsic(&p, xt.clone().into()).wait(), + Ok(h2) if h == h2 + ); + assert!(AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err()); } #[test] fn submit_rich_transaction_should_not_cause_error() { - let p = TestSetup::default().author(); - let xt = uxt(AccountKeyring::Alice, 0).encode(); - let h: H256 = blake2_256(&xt).into(); - - assert_matches!( - AuthorApi::submit_extrinsic(&p, xt.clone().into()).wait(), - Ok(h2) if h == h2 - ); - assert!( - AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err() - ); + let p = TestSetup::default().author(); + let xt = uxt(AccountKeyring::Alice, 0).encode(); + let h: H256 = blake2_256(&xt).into(); + + assert_matches!( + AuthorApi::submit_extrinsic(&p, xt.clone().into()).wait(), + Ok(h2) if h == h2 + ); + assert!(AuthorApi::submit_extrinsic(&p, xt.into()).wait().is_err()); } #[test] fn should_watch_extrinsic() { - //given - let mut setup = TestSetup::default(); - let p = setup.author(); - - let (subscriber, id_rx, data) = jsonrpc_pubsub::typed::Subscriber::new_test("test"); - - // when - p.watch_extrinsic(Default::default(), subscriber, uxt(AccountKeyring::Alice, 0).encode().into()); - - // then - assert_eq!(setup.runtime.block_on(id_rx), Ok(Ok(1.into()))); - // check notifications - let replacement = { - let tx = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }; - tx.into_signed_tx() - }; - AuthorApi::submit_extrinsic(&p, replacement.encode().into()).wait().unwrap(); - let (res, data) = setup.runtime.block_on(data.into_future()).unwrap(); - assert_eq!( - res, - Some(r#"{"jsonrpc":"2.0","method":"test","params":{"result":"ready","subscription":1}}"#.into()) - ); - let h = blake2_256(&replacement.encode()); - assert_eq!( - setup.runtime.block_on(data.into_future()).unwrap().0, - Some(format!(r#"{{"jsonrpc":"2.0","method":"test","params":{{"result":{{"usurped":"0x{}"}},"subscription":1}}}}"#, HexDisplay::from(&h))) - ); + //given + let mut setup = TestSetup::default(); + let p = setup.author(); + + let (subscriber, id_rx, data) = jsonrpc_pubsub::typed::Subscriber::new_test("test"); + + // when + p.watch_extrinsic( + Default::default(), + subscriber, + uxt(AccountKeyring::Alice, 0).encode().into(), + ); + + // then + assert_eq!(setup.runtime.block_on(id_rx), Ok(Ok(1.into()))); + // check notifications + let replacement = { + let tx = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + }; + tx.into_signed_tx() + }; + AuthorApi::submit_extrinsic(&p, replacement.encode().into()) + .wait() + .unwrap(); + let (res, data) = setup.runtime.block_on(data.into_future()).unwrap(); + assert_eq!( + res, + Some( + r#"{"jsonrpc":"2.0","method":"test","params":{"result":"ready","subscription":1}}"# + .into() + ) + ); + let h = blake2_256(&replacement.encode()); + assert_eq!( + setup.runtime.block_on(data.into_future()).unwrap().0, + Some(format!( + r#"{{"jsonrpc":"2.0","method":"test","params":{{"result":{{"usurped":"0x{}"}},"subscription":1}}}}"#, + HexDisplay::from(&h) + )) + ); } #[test] fn should_return_watch_validation_error() { - //given - let mut setup = TestSetup::default(); - let p = setup.author(); - - let (subscriber, id_rx, _data) = jsonrpc_pubsub::typed::Subscriber::new_test("test"); - - // when - p.watch_extrinsic(Default::default(), subscriber, uxt(AccountKeyring::Alice, 179).encode().into()); - - // then - let res = setup.runtime.block_on(id_rx).unwrap(); - assert!(res.is_err(), "Expected the transaction to be rejected as invalid."); + //given + let mut setup = TestSetup::default(); + let p = setup.author(); + + let (subscriber, id_rx, _data) = jsonrpc_pubsub::typed::Subscriber::new_test("test"); + + // when + p.watch_extrinsic( + Default::default(), + subscriber, + uxt(AccountKeyring::Alice, 179).encode().into(), + ); + + // then + let res = setup.runtime.block_on(id_rx).unwrap(); + assert!( + res.is_err(), + "Expected the transaction to be rejected as invalid." + ); } #[test] fn should_return_pending_extrinsics() { - let p = TestSetup::default().author(); - - let ex = uxt(AccountKeyring::Alice, 0); - AuthorApi::submit_extrinsic(&p, ex.encode().into()).wait().unwrap(); - assert_matches!( - p.pending_extrinsics(), - Ok(ref expected) if *expected == vec![Bytes(ex.encode())] - ); + let p = TestSetup::default().author(); + + let ex = uxt(AccountKeyring::Alice, 0); + AuthorApi::submit_extrinsic(&p, ex.encode().into()) + .wait() + .unwrap(); + assert_matches!( + p.pending_extrinsics(), + Ok(ref expected) if *expected == vec![Bytes(ex.encode())] + ); } #[test] fn should_remove_extrinsics() { - let setup = TestSetup::default(); - let p = setup.author(); - - let ex1 = uxt(AccountKeyring::Alice, 0); - p.submit_extrinsic(ex1.encode().into()).wait().unwrap(); - let ex2 = uxt(AccountKeyring::Alice, 1); - p.submit_extrinsic(ex2.encode().into()).wait().unwrap(); - let ex3 = uxt(AccountKeyring::Bob, 0); - let hash3 = p.submit_extrinsic(ex3.encode().into()).wait().unwrap(); - assert_eq!(setup.pool.status().ready, 3); - - // now remove all 3 - let removed = p.remove_extrinsic(vec![ - hash::ExtrinsicOrHash::Hash(hash3), - // Removing this one will also remove ex2 - hash::ExtrinsicOrHash::Extrinsic(ex1.encode().into()), - ]).unwrap(); - - assert_eq!(removed.len(), 3); + let setup = TestSetup::default(); + let p = setup.author(); + + let ex1 = uxt(AccountKeyring::Alice, 0); + p.submit_extrinsic(ex1.encode().into()).wait().unwrap(); + let ex2 = uxt(AccountKeyring::Alice, 1); + p.submit_extrinsic(ex2.encode().into()).wait().unwrap(); + let ex3 = uxt(AccountKeyring::Bob, 0); + let hash3 = p.submit_extrinsic(ex3.encode().into()).wait().unwrap(); + assert_eq!(setup.pool.status().ready, 3); + + // now remove all 3 + let removed = p + .remove_extrinsic(vec![ + hash::ExtrinsicOrHash::Hash(hash3), + // Removing this one will also remove ex2 + hash::ExtrinsicOrHash::Extrinsic(ex1.encode().into()), + ]) + .unwrap(); + + assert_eq!(removed.len(), 3); } #[test] fn should_insert_key() { - let setup = TestSetup::default(); - let p = setup.author(); - - let suri = "//Alice"; - let key_pair = ed25519::Pair::from_string(suri, None).expect("Generates keypair"); - p.insert_key( - String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), - suri.to_string(), - key_pair.public().0.to_vec().into(), - ).expect("Insert key"); - - let public_keys = setup.keystore.read().keys(ED25519).unwrap(); - - assert!(public_keys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, key_pair.public().to_raw_vec()))); + let setup = TestSetup::default(); + let p = setup.author(); + + let suri = "//Alice"; + let key_pair = ed25519::Pair::from_string(suri, None).expect("Generates keypair"); + p.insert_key( + String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), + suri.to_string(), + key_pair.public().0.to_vec().into(), + ) + .expect("Insert key"); + + let public_keys = setup.keystore.read().keys(ED25519).unwrap(); + + assert!(public_keys.contains(&CryptoTypePublicPair( + ed25519::CRYPTO_ID, + key_pair.public().to_raw_vec() + ))); } #[test] fn should_rotate_keys() { - let setup = TestSetup::default(); - let p = setup.author(); + let setup = TestSetup::default(); + let p = setup.author(); - let new_public_keys = p.rotate_keys().expect("Rotates the keys"); + let new_public_keys = p.rotate_keys().expect("Rotates the keys"); - let session_keys = SessionKeys::decode(&mut &new_public_keys[..]) - .expect("SessionKeys decode successfully"); + let session_keys = + SessionKeys::decode(&mut &new_public_keys[..]).expect("SessionKeys decode successfully"); - let ed25519_public_keys = setup.keystore.read().keys(ED25519).unwrap(); - let sr25519_public_keys = setup.keystore.read().keys(SR25519).unwrap(); + let ed25519_public_keys = setup.keystore.read().keys(ED25519).unwrap(); + let sr25519_public_keys = setup.keystore.read().keys(SR25519).unwrap(); - assert!(ed25519_public_keys.contains(&CryptoTypePublicPair(ed25519::CRYPTO_ID, session_keys.ed25519.to_raw_vec()))); - assert!(sr25519_public_keys.contains(&CryptoTypePublicPair(sr25519::CRYPTO_ID, session_keys.sr25519.to_raw_vec()))); + assert!(ed25519_public_keys.contains(&CryptoTypePublicPair( + ed25519::CRYPTO_ID, + session_keys.ed25519.to_raw_vec() + ))); + assert!(sr25519_public_keys.contains(&CryptoTypePublicPair( + sr25519::CRYPTO_ID, + session_keys.sr25519.to_raw_vec() + ))); } #[test] fn test_has_session_keys() { - let setup = TestSetup::default(); - let p = setup.author(); - - let non_existent_public_keys = TestSetup::default() - .author() - .rotate_keys() - .expect("Rotates the keys"); - - let public_keys = p.rotate_keys().expect("Rotates the keys"); - let test_vectors = vec![ - (public_keys, Ok(true)), - (vec![1, 2, 3].into(), Err(Error::InvalidSessionKeys)), - (non_existent_public_keys, Ok(false)), - ]; - - for (keys, result) in test_vectors { - assert_eq!( - result.map_err(|e| mem::discriminant(&e)), - p.has_session_keys(keys).map_err(|e| mem::discriminant(&e)), - ); - } + let setup = TestSetup::default(); + let p = setup.author(); + + let non_existent_public_keys = TestSetup::default() + .author() + .rotate_keys() + .expect("Rotates the keys"); + + let public_keys = p.rotate_keys().expect("Rotates the keys"); + let test_vectors = vec![ + (public_keys, Ok(true)), + (vec![1, 2, 3].into(), Err(Error::InvalidSessionKeys)), + (non_existent_public_keys, Ok(false)), + ]; + + for (keys, result) in test_vectors { + assert_eq!( + result.map_err(|e| mem::discriminant(&e)), + p.has_session_keys(keys).map_err(|e| mem::discriminant(&e)), + ); + } } #[test] fn test_has_key() { - let setup = TestSetup::default(); - let p = setup.author(); - - let suri = "//Alice"; - let alice_key_pair = ed25519::Pair::from_string(suri, None).expect("Generates keypair"); - p.insert_key( - String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), - suri.to_string(), - alice_key_pair.public().0.to_vec().into(), - ).expect("Insert key"); - let bob_key_pair = ed25519::Pair::from_string("//Bob", None).expect("Generates keypair"); - - let test_vectors = vec![ - (alice_key_pair.public().to_raw_vec().into(), ED25519, Ok(true)), - (alice_key_pair.public().to_raw_vec().into(), SR25519, Ok(false)), - (bob_key_pair.public().to_raw_vec().into(), ED25519, Ok(false)), - ]; - - for (key, key_type, result) in test_vectors { - assert_eq!( - result.map_err(|e| mem::discriminant(&e)), - p.has_key( - key, - String::from_utf8(key_type.0.to_vec()).expect("Keytype is a valid string"), - ).map_err(|e| mem::discriminant(&e)), - ); - } + let setup = TestSetup::default(); + let p = setup.author(); + + let suri = "//Alice"; + let alice_key_pair = ed25519::Pair::from_string(suri, None).expect("Generates keypair"); + p.insert_key( + String::from_utf8(ED25519.0.to_vec()).expect("Keytype is a valid string"), + suri.to_string(), + alice_key_pair.public().0.to_vec().into(), + ) + .expect("Insert key"); + let bob_key_pair = ed25519::Pair::from_string("//Bob", None).expect("Generates keypair"); + + let test_vectors = vec![ + ( + alice_key_pair.public().to_raw_vec().into(), + ED25519, + Ok(true), + ), + ( + alice_key_pair.public().to_raw_vec().into(), + SR25519, + Ok(false), + ), + ( + bob_key_pair.public().to_raw_vec().into(), + ED25519, + Ok(false), + ), + ]; + + for (key, key_type, result) in test_vectors { + assert_eq!( + result.map_err(|e| mem::discriminant(&e)), + p.has_key( + key, + String::from_utf8(key_type.0.to_vec()).expect("Keytype is a valid string"), + ) + .map_err(|e| mem::discriminant(&e)), + ); + } } diff --git a/client/rpc/src/chain/chain_full.rs b/client/rpc/src/chain/chain_full.rs index c1b062754b..7b1707a968 100644 --- a/client/rpc/src/chain/chain_full.rs +++ b/client/rpc/src/chain/chain_full.rs @@ -16,63 +16,67 @@ //! Blockchain API backend for full nodes. -use std::sync::Arc; use rpc::futures::future::result; +use std::sync::Arc; +use sc_client_api::{BlockBackend, BlockchainEvents}; use sc_rpc_api::Subscriptions; -use sc_client_api::{BlockchainEvents, BlockBackend}; -use sp_runtime::{generic::{BlockId, SignedBlock}, traits::{Block as BlockT}}; +use sp_runtime::{ + generic::{BlockId, SignedBlock}, + traits::Block as BlockT, +}; -use super::{ChainBackend, client_err, error::FutureResult}; -use std::marker::PhantomData; +use super::{client_err, error::FutureResult, ChainBackend}; use sp_blockchain::HeaderBackend; +use std::marker::PhantomData; /// Blockchain API backend for full nodes. Reads all the data from local database. pub struct FullChain { - /// Substrate client. - client: Arc, - /// Current subscriptions. - subscriptions: Subscriptions, - /// phantom member to pin the block type - _phantom: PhantomData, + /// Substrate client. + client: Arc, + /// Current subscriptions. + subscriptions: Subscriptions, + /// phantom member to pin the block type + _phantom: PhantomData, } impl FullChain { - /// Create new Chain API RPC handler. - pub fn new(client: Arc, subscriptions: Subscriptions) -> Self { - Self { - client, - subscriptions, - _phantom: PhantomData, - } - } + /// Create new Chain API RPC handler. + pub fn new(client: Arc, subscriptions: Subscriptions) -> Self { + Self { + client, + subscriptions, + _phantom: PhantomData, + } + } } -impl ChainBackend for FullChain where - Block: BlockT + 'static, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, +impl ChainBackend for FullChain +where + Block: BlockT + 'static, + Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, { - fn client(&self) -> &Arc { - &self.client - } + fn client(&self) -> &Arc { + &self.client + } - fn subscriptions(&self) -> &Subscriptions { - &self.subscriptions - } + fn subscriptions(&self) -> &Subscriptions { + &self.subscriptions + } - fn header(&self, hash: Option) -> FutureResult> { - Box::new(result(self.client - .header(BlockId::Hash(self.unwrap_or_best(hash))) - .map_err(client_err) - )) - } + fn header(&self, hash: Option) -> FutureResult> { + Box::new(result( + self.client + .header(BlockId::Hash(self.unwrap_or_best(hash))) + .map_err(client_err), + )) + } - fn block(&self, hash: Option) - -> FutureResult>> - { - Box::new(result(self.client - .block(&BlockId::Hash(self.unwrap_or_best(hash))) - .map_err(client_err) - )) - } + fn block(&self, hash: Option) -> FutureResult>> { + Box::new(result( + self.client + .block(&BlockId::Hash(self.unwrap_or_best(hash))) + .map_err(client_err), + )) + } } diff --git a/client/rpc/src/chain/chain_light.rs b/client/rpc/src/chain/chain_light.rs index b258c8dd3b..7dbac39739 100644 --- a/client/rpc/src/chain/chain_light.rs +++ b/client/rpc/src/chain/chain_light.rs @@ -16,103 +16,108 @@ //! Blockchain API backend for light nodes. -use std::sync::Arc; use futures::{future::ready, FutureExt, TryFutureExt}; -use rpc::futures::future::{result, Future, Either}; +use rpc::futures::future::{result, Either, Future}; +use std::sync::Arc; -use sc_rpc_api::Subscriptions; -use sc_client::{ - light::{fetcher::{Fetcher, RemoteBodyRequest}, blockchain::RemoteBlockchain}, +use sc_client::light::{ + blockchain::RemoteBlockchain, + fetcher::{Fetcher, RemoteBodyRequest}, }; +use sc_rpc_api::Subscriptions; use sp_runtime::{ - generic::{BlockId, SignedBlock}, - traits::{Block as BlockT}, + generic::{BlockId, SignedBlock}, + traits::Block as BlockT, }; -use super::{ChainBackend, client_err, error::FutureResult}; -use sp_blockchain::HeaderBackend; +use super::{client_err, error::FutureResult, ChainBackend}; use sc_client_api::BlockchainEvents; +use sp_blockchain::HeaderBackend; /// Blockchain API backend for light nodes. Reads all the data from local /// database, if available, or fetches it from remote node otherwise. pub struct LightChain { - /// Substrate client. - client: Arc, - /// Current subscriptions. - subscriptions: Subscriptions, - /// Remote blockchain reference - remote_blockchain: Arc>, - /// Remote fetcher reference. - fetcher: Arc, + /// Substrate client. + client: Arc, + /// Current subscriptions. + subscriptions: Subscriptions, + /// Remote blockchain reference + remote_blockchain: Arc>, + /// Remote fetcher reference. + fetcher: Arc, } impl> LightChain { - /// Create new Chain API RPC handler. - pub fn new( - client: Arc, - subscriptions: Subscriptions, - remote_blockchain: Arc>, - fetcher: Arc, - ) -> Self { - Self { - client, - subscriptions, - remote_blockchain, - fetcher, - } - } + /// Create new Chain API RPC handler. + pub fn new( + client: Arc, + subscriptions: Subscriptions, + remote_blockchain: Arc>, + fetcher: Arc, + ) -> Self { + Self { + client, + subscriptions, + remote_blockchain, + fetcher, + } + } } -impl ChainBackend for LightChain where - Block: BlockT + 'static, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + Send + Sync + 'static, +impl ChainBackend for LightChain +where + Block: BlockT + 'static, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + Send + Sync + 'static, { - fn client(&self) -> &Arc { - &self.client - } - - fn subscriptions(&self) -> &Subscriptions { - &self.subscriptions - } - - fn header(&self, hash: Option) -> FutureResult> { - let hash = self.unwrap_or_best(hash); - - let fetcher = self.fetcher.clone(); - let maybe_header = sc_client::light::blockchain::future_header( - &*self.remote_blockchain, - &*fetcher, - BlockId::Hash(hash), - ); - - Box::new(maybe_header.then(move |result| - ready(result.map_err(client_err)), - ).boxed().compat()) - } - - fn block(&self, hash: Option) - -> FutureResult>> - { - let fetcher = self.fetcher.clone(); - let block = self.header(hash) - .and_then(move |header| match header { - Some(header) => Either::A(fetcher - .remote_body(RemoteBodyRequest { - header: header.clone(), - retry_count: Default::default(), - }) - .boxed() - .compat() - .map(move |body| Some(SignedBlock { - block: Block::new(header, body), - justification: None, - })) - .map_err(client_err) - ), - None => Either::B(result(Ok(None))), - }); - - Box::new(block) - } + fn client(&self) -> &Arc { + &self.client + } + + fn subscriptions(&self) -> &Subscriptions { + &self.subscriptions + } + + fn header(&self, hash: Option) -> FutureResult> { + let hash = self.unwrap_or_best(hash); + + let fetcher = self.fetcher.clone(); + let maybe_header = sc_client::light::blockchain::future_header( + &*self.remote_blockchain, + &*fetcher, + BlockId::Hash(hash), + ); + + Box::new( + maybe_header + .then(move |result| ready(result.map_err(client_err))) + .boxed() + .compat(), + ) + } + + fn block(&self, hash: Option) -> FutureResult>> { + let fetcher = self.fetcher.clone(); + let block = self.header(hash).and_then(move |header| match header { + Some(header) => Either::A( + fetcher + .remote_body(RemoteBodyRequest { + header: header.clone(), + retry_count: Default::default(), + }) + .boxed() + .compat() + .map(move |body| { + Some(SignedBlock { + block: Block::new(header, body), + justification: None, + }) + }) + .map_err(client_err), + ), + None => Either::B(result(Ok(None))), + }); + + Box::new(block) + } } diff --git a/client/rpc/src/chain/mod.rs b/client/rpc/src/chain/mod.rs index fa97e21886..e2a849df6a 100644 --- a/client/rpc/src/chain/mod.rs +++ b/client/rpc/src/chain/mod.rs @@ -22,301 +22,325 @@ mod chain_light; #[cfg(test)] mod tests; -use std::sync::Arc; use futures::{future, StreamExt, TryStreamExt}; use log::warn; use rpc::{ - Result as RpcResult, - futures::{stream, Future, Sink, Stream}, + futures::{stream, Future, Sink, Stream}, + Result as RpcResult, }; +use std::sync::Arc; -use sc_rpc_api::Subscriptions; +use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use sc_client::{ - self, BlockchainEvents, - light::{fetcher::Fetcher, blockchain::RemoteBlockchain}, + self, + light::{blockchain::RemoteBlockchain, fetcher::Fetcher}, + BlockchainEvents, }; -use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use sp_rpc::{number::NumberOrHex, list::ListOrValue}; +use sc_rpc_api::Subscriptions; +use sp_rpc::{list::ListOrValue, number::NumberOrHex}; use sp_runtime::{ - generic::{BlockId, SignedBlock}, - traits::{Block as BlockT, Header, NumberFor}, + generic::{BlockId, SignedBlock}, + traits::{Block as BlockT, Header, NumberFor}, }; -use self::error::{Result, Error, FutureResult}; +use self::error::{Error, FutureResult, Result}; +use sc_client_api::BlockBackend; pub use sc_rpc_api::chain::*; use sp_blockchain::HeaderBackend; -use sc_client_api::BlockBackend; /// Blockchain backend API trait ChainBackend: Send + Sync + 'static - where - Block: BlockT + 'static, - Client: HeaderBackend + BlockchainEvents + 'static, +where + Block: BlockT + 'static, + Client: HeaderBackend + BlockchainEvents + 'static, { - /// Get client reference. - fn client(&self) -> &Arc; - - /// Get subscriptions reference. - fn subscriptions(&self) -> &Subscriptions; - - /// Tries to unwrap passed block hash, or uses best block hash otherwise. - fn unwrap_or_best(&self, hash: Option) -> Block::Hash { - match hash.into() { - None => self.client().info().best_hash, - Some(hash) => hash, - } - } - - /// Get header of a relay chain block. - fn header(&self, hash: Option) -> FutureResult>; - - /// Get header and body of a relay chain block. - fn block(&self, hash: Option) -> FutureResult>>; - - /// Get hash of the n-th block in the canon chain. - /// - /// By default returns latest block hash. - fn block_hash( - &self, - number: Option>>, - ) -> Result> { - Ok(match number { - None => Some(self.client().info().best_hash), - Some(num_or_hex) => self.client() - .header(BlockId::number(num_or_hex.to_number()?)) - .map_err(client_err)? - .map(|h| h.hash()), - }) - } - - /// Get hash of the last finalized block in the canon chain. - fn finalized_head(&self) -> Result { - Ok(self.client().info().finalized_hash) - } - - /// All new head subscription - fn subscribe_all_heads( - &self, - _metadata: crate::metadata::Metadata, - subscriber: Subscriber, - ) { - subscribe_headers( - self.client(), - self.subscriptions(), - subscriber, - || self.client().info().best_hash, - || self.client().import_notification_stream() - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), - ) - } - - /// Unsubscribe from all head subscription. - fn unsubscribe_all_heads( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions().cancel(id)) - } - - /// New best head subscription - fn subscribe_new_heads( - &self, - _metadata: crate::metadata::Metadata, - subscriber: Subscriber, - ) { - subscribe_headers( - self.client(), - self.subscriptions(), - subscriber, - || self.client().info().best_hash, - || self.client().import_notification_stream() - .filter(|notification| future::ready(notification.is_new_best)) - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), - ) - } - - /// Unsubscribe from new best head subscription. - fn unsubscribe_new_heads( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions().cancel(id)) - } - - /// Finalized head subscription - fn subscribe_finalized_heads( - &self, - _metadata: crate::metadata::Metadata, - subscriber: Subscriber, - ) { - subscribe_headers( - self.client(), - self.subscriptions(), - subscriber, - || self.client().info().finalized_hash, - || self.client().finality_notification_stream() - .map(|notification| Ok::<_, ()>(notification.header)) - .compat(), - ) - } - - /// Unsubscribe from finalized head subscription. - fn unsubscribe_finalized_heads( - &self, - _metadata: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions().cancel(id)) - } + /// Get client reference. + fn client(&self) -> &Arc; + + /// Get subscriptions reference. + fn subscriptions(&self) -> &Subscriptions; + + /// Tries to unwrap passed block hash, or uses best block hash otherwise. + fn unwrap_or_best(&self, hash: Option) -> Block::Hash { + match hash.into() { + None => self.client().info().best_hash, + Some(hash) => hash, + } + } + + /// Get header of a relay chain block. + fn header(&self, hash: Option) -> FutureResult>; + + /// Get header and body of a relay chain block. + fn block(&self, hash: Option) -> FutureResult>>; + + /// Get hash of the n-th block in the canon chain. + /// + /// By default returns latest block hash. + fn block_hash( + &self, + number: Option>>, + ) -> Result> { + Ok(match number { + None => Some(self.client().info().best_hash), + Some(num_or_hex) => self + .client() + .header(BlockId::number(num_or_hex.to_number()?)) + .map_err(client_err)? + .map(|h| h.hash()), + }) + } + + /// Get hash of the last finalized block in the canon chain. + fn finalized_head(&self) -> Result { + Ok(self.client().info().finalized_hash) + } + + /// All new head subscription + fn subscribe_all_heads( + &self, + _metadata: crate::metadata::Metadata, + subscriber: Subscriber, + ) { + subscribe_headers( + self.client(), + self.subscriptions(), + subscriber, + || self.client().info().best_hash, + || { + self.client() + .import_notification_stream() + .map(|notification| Ok::<_, ()>(notification.header)) + .compat() + }, + ) + } + + /// Unsubscribe from all head subscription. + fn unsubscribe_all_heads( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions().cancel(id)) + } + + /// New best head subscription + fn subscribe_new_heads( + &self, + _metadata: crate::metadata::Metadata, + subscriber: Subscriber, + ) { + subscribe_headers( + self.client(), + self.subscriptions(), + subscriber, + || self.client().info().best_hash, + || { + self.client() + .import_notification_stream() + .filter(|notification| future::ready(notification.is_new_best)) + .map(|notification| Ok::<_, ()>(notification.header)) + .compat() + }, + ) + } + + /// Unsubscribe from new best head subscription. + fn unsubscribe_new_heads( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions().cancel(id)) + } + + /// Finalized head subscription + fn subscribe_finalized_heads( + &self, + _metadata: crate::metadata::Metadata, + subscriber: Subscriber, + ) { + subscribe_headers( + self.client(), + self.subscriptions(), + subscriber, + || self.client().info().finalized_hash, + || { + self.client() + .finality_notification_stream() + .map(|notification| Ok::<_, ()>(notification.header)) + .compat() + }, + ) + } + + /// Unsubscribe from finalized head subscription. + fn unsubscribe_finalized_heads( + &self, + _metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions().cancel(id)) + } } /// Create new state API that works on full node. pub fn new_full( - client: Arc, - subscriptions: Subscriptions, + client: Arc, + subscriptions: Subscriptions, ) -> Chain - where - Block: BlockT + 'static, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, +where + Block: BlockT + 'static, + Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, { - Chain { - backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)), - } + Chain { + backend: Box::new(self::chain_full::FullChain::new(client, subscriptions)), + } } /// Create new state API that works on light node. pub fn new_light>( - client: Arc, - subscriptions: Subscriptions, - remote_blockchain: Arc>, - fetcher: Arc, + client: Arc, + subscriptions: Subscriptions, + remote_blockchain: Arc>, + fetcher: Arc, ) -> Chain - where - Block: BlockT + 'static, - Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, - F: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: BlockBackend + HeaderBackend + BlockchainEvents + 'static, + F: Send + Sync + 'static, { - Chain { - backend: Box::new(self::chain_light::LightChain::new( - client, - subscriptions, - remote_blockchain, - fetcher, - )), - } + Chain { + backend: Box::new(self::chain_light::LightChain::new( + client, + subscriptions, + remote_blockchain, + fetcher, + )), + } } /// Chain API with subscriptions support. pub struct Chain { - backend: Box>, + backend: Box>, } -impl ChainApi, Block::Hash, Block::Header, SignedBlock> for - Chain - where - Block: BlockT + 'static, - Client: HeaderBackend + BlockchainEvents + 'static, +impl ChainApi, Block::Hash, Block::Header, SignedBlock> + for Chain +where + Block: BlockT + 'static, + Client: HeaderBackend + BlockchainEvents + 'static, { - type Metadata = crate::metadata::Metadata; - - fn header(&self, hash: Option) -> FutureResult> { - self.backend.header(hash) - } - - fn block(&self, hash: Option) -> FutureResult>> - { - self.backend.block(hash) - } - - fn block_hash( - &self, - number: Option>>> - ) -> Result>> { - match number { - None => self.backend.block_hash(None).map(ListOrValue::Value), - Some(ListOrValue::Value(number)) => self.backend.block_hash(Some(number)).map(ListOrValue::Value), - Some(ListOrValue::List(list)) => Ok(ListOrValue::List(list - .into_iter() - .map(|number| self.backend.block_hash(Some(number))) - .collect::>()? - )) - } - } - - fn finalized_head(&self) -> Result { - self.backend.finalized_head() - } - - fn subscribe_all_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { - self.backend.subscribe_all_heads(metadata, subscriber) - } - - fn unsubscribe_all_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { - self.backend.unsubscribe_all_heads(metadata, id) - } - - fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { - self.backend.subscribe_new_heads(metadata, subscriber) - } - - fn unsubscribe_new_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { - self.backend.unsubscribe_new_heads(metadata, id) - } - - fn subscribe_finalized_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { - self.backend.subscribe_finalized_heads(metadata, subscriber) - } - - fn unsubscribe_finalized_heads(&self, metadata: Option, id: SubscriptionId) -> RpcResult { - self.backend.unsubscribe_finalized_heads(metadata, id) - } + type Metadata = crate::metadata::Metadata; + + fn header(&self, hash: Option) -> FutureResult> { + self.backend.header(hash) + } + + fn block(&self, hash: Option) -> FutureResult>> { + self.backend.block(hash) + } + + fn block_hash( + &self, + number: Option>>>, + ) -> Result>> { + match number { + None => self.backend.block_hash(None).map(ListOrValue::Value), + Some(ListOrValue::Value(number)) => self + .backend + .block_hash(Some(number)) + .map(ListOrValue::Value), + Some(ListOrValue::List(list)) => Ok(ListOrValue::List( + list.into_iter() + .map(|number| self.backend.block_hash(Some(number))) + .collect::>()?, + )), + } + } + + fn finalized_head(&self) -> Result { + self.backend.finalized_head() + } + + fn subscribe_all_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { + self.backend.subscribe_all_heads(metadata, subscriber) + } + + fn unsubscribe_all_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + self.backend.unsubscribe_all_heads(metadata, id) + } + + fn subscribe_new_heads(&self, metadata: Self::Metadata, subscriber: Subscriber) { + self.backend.subscribe_new_heads(metadata, subscriber) + } + + fn unsubscribe_new_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + self.backend.unsubscribe_new_heads(metadata, id) + } + + fn subscribe_finalized_heads( + &self, + metadata: Self::Metadata, + subscriber: Subscriber, + ) { + self.backend.subscribe_finalized_heads(metadata, subscriber) + } + + fn unsubscribe_finalized_heads( + &self, + metadata: Option, + id: SubscriptionId, + ) -> RpcResult { + self.backend.unsubscribe_finalized_heads(metadata, id) + } } /// Subscribe to new headers. fn subscribe_headers( - client: &Arc, - subscriptions: &Subscriptions, - subscriber: Subscriber, - best_block_hash: G, - stream: F, + client: &Arc, + subscriptions: &Subscriptions, + subscriber: Subscriber, + best_block_hash: G, + stream: F, ) where - Block: BlockT + 'static, - Client: HeaderBackend + 'static, - F: FnOnce() -> S, - G: FnOnce() -> Block::Hash, - ERR: ::std::fmt::Debug, - S: Stream + Send + 'static, + Block: BlockT + 'static, + Client: HeaderBackend + 'static, + F: FnOnce() -> S, + G: FnOnce() -> Block::Hash, + ERR: ::std::fmt::Debug, + S: Stream + Send + 'static, { - subscriptions.add(subscriber, |sink| { - // send current head right at the start. - let header = client.header(BlockId::Hash(best_block_hash())) - .map_err(client_err) - .and_then(|header| { - header.ok_or_else(|| "Best header missing.".to_owned().into()) - }) - .map_err(Into::into); - - // send further subscriptions - let stream = stream() - .map(|res| Ok(res)) - .map_err(|e| warn!("Block notification stream error: {:?}", e)); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all( - stream::iter_result(vec![Ok(header)]) - .chain(stream) - ) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); + subscriptions.add(subscriber, |sink| { + // send current head right at the start. + let header = client + .header(BlockId::Hash(best_block_hash())) + .map_err(client_err) + .and_then(|header| header.ok_or_else(|| "Best header missing.".to_owned().into())) + .map_err(Into::into); + + // send further subscriptions + let stream = stream() + .map(|res| Ok(res)) + .map_err(|e| warn!("Block notification stream error: {:?}", e)); + + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(stream::iter_result(vec![Ok(header)]).chain(stream)) + // we ignore the resulting Stream (if the first stream is over we are unsubscribed) + .map(|_| ()) + }); } fn client_err(err: sp_blockchain::Error) -> Error { - Error::Client(Box::new(err)) + Error::Client(Box::new(err)) } diff --git a/client/rpc/src/chain/tests.rs b/client/rpc/src/chain/tests.rs index 68d904919b..f21252ec8a 100644 --- a/client/rpc/src/chain/tests.rs +++ b/client/rpc/src/chain/tests.rs @@ -16,264 +16,295 @@ use super::*; use assert_matches::assert_matches; +use sc_block_builder::BlockBuilderProvider; +use sp_rpc::list::ListOrValue; use substrate_test_runtime_client::{ - prelude::*, - sp_consensus::BlockOrigin, - runtime::{H256, Block, Header}, + prelude::*, + runtime::{Block, Header, H256}, + sp_consensus::BlockOrigin, }; -use sp_rpc::list::ListOrValue; -use sc_block_builder::BlockBuilderProvider; #[test] fn should_return_header() { - let core = tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - - let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - assert_matches!( - api.header(Some(client.genesis_hash()).into()).wait(), - Ok(Some(ref x)) if x == &Header { - parent_hash: H256::from_low_u64_be(0), - number: 0, - state_root: x.state_root.clone(), - extrinsics_root: - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), - digest: Default::default(), - } - ); - - assert_matches!( - api.header(None.into()).wait(), - Ok(Some(ref x)) if x == &Header { - parent_hash: H256::from_low_u64_be(0), - number: 0, - state_root: x.state_root.clone(), - extrinsics_root: - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), - digest: Default::default(), - } - ); - - assert_matches!(api.header(Some(H256::from_low_u64_be(5)).into()).wait(), Ok(None)); + let core = tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + + let client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + assert_matches!( + api.header(Some(client.genesis_hash()).into()).wait(), + Ok(Some(ref x)) if x == &Header { + parent_hash: H256::from_low_u64_be(0), + number: 0, + state_root: x.state_root.clone(), + extrinsics_root: + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + digest: Default::default(), + } + ); + + assert_matches!( + api.header(None.into()).wait(), + Ok(Some(ref x)) if x == &Header { + parent_hash: H256::from_low_u64_be(0), + number: 0, + state_root: x.state_root.clone(), + extrinsics_root: + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + digest: Default::default(), + } + ); + + assert_matches!( + api.header(Some(H256::from_low_u64_be(5)).into()).wait(), + Ok(None) + ); } #[test] fn should_return_a_block() { - let core = tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - let block_hash = block.hash(); - client.import(BlockOrigin::Own, block).unwrap(); - - // Genesis block is not justified - assert_matches!( + let core = tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + let block_hash = block.hash(); + client.import(BlockOrigin::Own, block).unwrap(); + + // Genesis block is not justified + assert_matches!( api.block(Some(client.genesis_hash()).into()).wait(), Ok(Some(SignedBlock { justification: None, .. })) ); - assert_matches!( - api.block(Some(block_hash).into()).wait(), - Ok(Some(ref x)) if x.block == Block { - header: Header { - parent_hash: client.genesis_hash(), - number: 1, - state_root: x.block.header.state_root.clone(), - extrinsics_root: - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), - digest: Default::default(), - }, - extrinsics: vec![], - } - ); - - assert_matches!( - api.block(None.into()).wait(), - Ok(Some(ref x)) if x.block == Block { - header: Header { - parent_hash: client.genesis_hash(), - number: 1, - state_root: x.block.header.state_root.clone(), - extrinsics_root: - "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), - digest: Default::default(), - }, - extrinsics: vec![], - } - ); - - assert_matches!( - api.block(Some(H256::from_low_u64_be(5)).into()).wait(), - Ok(None) - ); + assert_matches!( + api.block(Some(block_hash).into()).wait(), + Ok(Some(ref x)) if x.block == Block { + header: Header { + parent_hash: client.genesis_hash(), + number: 1, + state_root: x.block.header.state_root.clone(), + extrinsics_root: + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + digest: Default::default(), + }, + extrinsics: vec![], + } + ); + + assert_matches!( + api.block(None.into()).wait(), + Ok(Some(ref x)) if x.block == Block { + header: Header { + parent_hash: client.genesis_hash(), + number: 1, + state_root: x.block.header.state_root.clone(), + extrinsics_root: + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314".parse().unwrap(), + digest: Default::default(), + }, + extrinsics: vec![], + } + ); + + assert_matches!( + api.block(Some(H256::from_low_u64_be(5)).into()).wait(), + Ok(None) + ); } #[test] fn should_return_block_hash() { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - assert_matches!( - api.block_hash(None.into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() - ); - - - assert_matches!( - api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() - ); - - assert_matches!( - api.block_hash(Some(ListOrValue::Value(1u64.into())).into()), - Ok(ListOrValue::Value(None)) - ); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block.clone()).unwrap(); - - assert_matches!( - api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() - ); - assert_matches!( - api.block_hash(Some(ListOrValue::Value(1u64.into())).into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &block.hash() - ); - assert_matches!( - api.block_hash(Some(ListOrValue::Value(sp_core::U256::from(1u64).into())).into()), - Ok(ListOrValue::Value(Some(ref x))) if x == &block.hash() - ); - - assert_matches!( - api.block_hash(Some(vec![0u64.into(), 1.into(), 2.into()].into())), - Ok(ListOrValue::List(list)) if list == &[client.genesis_hash().into(), block.hash().into(), None] - ); + let core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + assert_matches!( + api.block_hash(None.into()), + Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() + ); + + assert_matches!( + api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), + Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() + ); + + assert_matches!( + api.block_hash(Some(ListOrValue::Value(1u64.into())).into()), + Ok(ListOrValue::Value(None)) + ); + + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block.clone()).unwrap(); + + assert_matches!( + api.block_hash(Some(ListOrValue::Value(0u64.into())).into()), + Ok(ListOrValue::Value(Some(ref x))) if x == &client.genesis_hash() + ); + assert_matches!( + api.block_hash(Some(ListOrValue::Value(1u64.into())).into()), + Ok(ListOrValue::Value(Some(ref x))) if x == &block.hash() + ); + assert_matches!( + api.block_hash(Some(ListOrValue::Value(sp_core::U256::from(1u64).into())).into()), + Ok(ListOrValue::Value(Some(ref x))) if x == &block.hash() + ); + + assert_matches!( + api.block_hash(Some(vec![0u64.into(), 1.into(), 2.into()].into())), + Ok(ListOrValue::List(list)) if list == &[client.genesis_hash().into(), block.hash().into(), None] + ); } - #[test] fn should_return_finalized_hash() { - let core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - assert_matches!( - api.finalized_head(), - Ok(ref x) if x == &client.genesis_hash() - ); - - // import new block - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - // no finalization yet - assert_matches!( - api.finalized_head(), - Ok(ref x) if x == &client.genesis_hash() - ); - - // finalize - client.finalize_block(BlockId::number(1), None).unwrap(); - assert_matches!( - api.finalized_head(), - Ok(ref x) if x == &client.block_hash(1).unwrap().unwrap() - ); + let core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + assert_matches!( + api.finalized_head(), + Ok(ref x) if x == &client.genesis_hash() + ); + + // import new block + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + // no finalization yet + assert_matches!( + api.finalized_head(), + Ok(ref x) if x == &client.genesis_hash() + ); + + // finalize + client.finalize_block(BlockId::number(1), None).unwrap(); + assert_matches!( + api.finalized_head(), + Ok(ref x) if x == &client.block_hash(1).unwrap().unwrap() + ); } #[test] fn should_notify_about_latest_block() { - let mut core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - api.subscribe_all_heads(Default::default(), subscriber); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - } - - // assert initial head sent. - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = core.block_on(next.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + api.subscribe_all_heads(Default::default(), subscriber); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + } + + // assert initial head sent. + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // assert notification sent to transport + let (notification, next) = core.block_on(next.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } #[test] fn should_notify_about_best_block() { - let mut core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - api.subscribe_new_heads(Default::default(), subscriber); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - } - - // assert initial head sent. - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = core.block_on(next.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + api.subscribe_new_heads(Default::default(), subscriber); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + } + + // assert initial head sent. + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // assert notification sent to transport + let (notification, next) = core.block_on(next.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } #[test] fn should_notify_about_finalized_block() { - let mut core = ::tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - api.subscribe_finalized_heads(Default::default(), subscriber); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - client.finalize_block(BlockId::number(1), None).unwrap(); - } - - // assert initial head sent. - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = core.block_on(next.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = ::tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + api.subscribe_finalized_heads(Default::default(), subscriber); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + client.finalize_block(BlockId::number(1), None).unwrap(); + } + + // assert initial head sent. + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // assert notification sent to transport + let (notification, next) = core.block_on(next.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } diff --git a/client/rpc/src/lib.rs b/client/rpc/src/lib.rs index ea65785c20..7f94fb6cae 100644 --- a/client/rpc/src/lib.rs +++ b/client/rpc/src/lib.rs @@ -22,9 +22,9 @@ mod metadata; -pub use sc_rpc_api::Subscriptions; pub use self::metadata::Metadata; pub use rpc::IoHandlerExtension as RpcExtension; +pub use sc_rpc_api::Subscriptions; pub mod author; pub mod chain; diff --git a/client/rpc/src/metadata.rs b/client/rpc/src/metadata.rs index d35653f8e6..5ac48d7024 100644 --- a/client/rpc/src/metadata.rs +++ b/client/rpc/src/metadata.rs @@ -17,7 +17,7 @@ //! RPC Metadata use std::sync::Arc; -use jsonrpc_pubsub::{Session, PubSubMetadata}; +use jsonrpc_pubsub::{PubSubMetadata, Session}; use rpc::futures::sync::mpsc; /// RPC Metadata. @@ -27,34 +27,34 @@ use rpc::futures::sync::mpsc; /// (like remote client IP address, request headers, etc) #[derive(Default, Clone)] pub struct Metadata { - session: Option>, + session: Option>, } impl rpc::Metadata for Metadata {} impl PubSubMetadata for Metadata { - fn session(&self) -> Option> { - self.session.clone() - } + fn session(&self) -> Option> { + self.session.clone() + } } impl Metadata { - /// Create new `Metadata` with session (Pub/Sub) support. - pub fn new(transport: mpsc::Sender) -> Self { - Metadata { - session: Some(Arc::new(Session::new(transport))), - } - } - - /// Create new `Metadata` for tests. - #[cfg(test)] - pub fn new_test() -> (mpsc::Receiver, Self) { - let (tx, rx) = mpsc::channel(1); - (rx, Self::new(tx)) - } + /// Create new `Metadata` with session (Pub/Sub) support. + pub fn new(transport: mpsc::Sender) -> Self { + Metadata { + session: Some(Arc::new(Session::new(transport))), + } + } + + /// Create new `Metadata` for tests. + #[cfg(test)] + pub fn new_test() -> (mpsc::Receiver, Self) { + let (tx, rx) = mpsc::channel(1); + (rx, Self::new(tx)) + } } impl From> for Metadata { - fn from(sender: mpsc::Sender) -> Self { - Self::new(sender) - } + fn from(sender: mpsc::Sender) -> Self { + Self::new(sender) + } } diff --git a/client/rpc/src/offchain/mod.rs b/client/rpc/src/offchain/mod.rs index 61984d4845..1de63eefe0 100644 --- a/client/rpc/src/offchain/mod.rs +++ b/client/rpc/src/offchain/mod.rs @@ -19,49 +19,49 @@ #[cfg(test)] mod tests; +use self::error::{Error, Result}; +use parking_lot::RwLock; /// Re-export the API for backward compatibility. pub use sc_rpc_api::offchain::*; -use self::error::{Error, Result}; use sp_core::{ - Bytes, - offchain::{OffchainStorage, StorageKind}, + offchain::{OffchainStorage, StorageKind}, + Bytes, }; -use parking_lot::RwLock; use std::sync::Arc; /// Offchain API #[derive(Debug)] pub struct Offchain { - /// Offchain storage - storage: Arc>, + /// Offchain storage + storage: Arc>, } impl Offchain { - /// Create new instance of Offchain API. - pub fn new(storage: T) -> Self { - Offchain { - storage: Arc::new(RwLock::new(storage)), - } - } + /// Create new instance of Offchain API. + pub fn new(storage: T) -> Self { + Offchain { + storage: Arc::new(RwLock::new(storage)), + } + } } impl OffchainApi for Offchain { - /// Set offchain local storage under given key and prefix. - fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> Result<()> { - let prefix = match kind { - StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, - StorageKind::LOCAL => return Err(Error::UnavailableStorageKind), - }; - self.storage.write().set(prefix, &*key, &*value); - Ok(()) - } + /// Set offchain local storage under given key and prefix. + fn set_local_storage(&self, kind: StorageKind, key: Bytes, value: Bytes) -> Result<()> { + let prefix = match kind { + StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, + StorageKind::LOCAL => return Err(Error::UnavailableStorageKind), + }; + self.storage.write().set(prefix, &*key, &*value); + Ok(()) + } - /// Get offchain local storage under given key and prefix. - fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> Result> { - let prefix = match kind { - StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, - StorageKind::LOCAL => return Err(Error::UnavailableStorageKind), - }; - Ok(self.storage.read().get(prefix, &*key).map(Into::into)) - } + /// Get offchain local storage under given key and prefix. + fn get_local_storage(&self, kind: StorageKind, key: Bytes) -> Result> { + let prefix = match kind { + StorageKind::PERSISTENT => sp_offchain::STORAGE_PREFIX, + StorageKind::LOCAL => return Err(Error::UnavailableStorageKind), + }; + Ok(self.storage.read().get(prefix, &*key).map(Into::into)) + } } diff --git a/client/rpc/src/offchain/tests.rs b/client/rpc/src/offchain/tests.rs index ac1a6a4de3..5e594465f8 100644 --- a/client/rpc/src/offchain/tests.rs +++ b/client/rpc/src/offchain/tests.rs @@ -16,21 +16,21 @@ use super::*; use assert_matches::assert_matches; -use sp_core::{Bytes, offchain::storage::InMemOffchainStorage}; +use sp_core::{offchain::storage::InMemOffchainStorage, Bytes}; #[test] fn local_storage_should_work() { - let storage = InMemOffchainStorage::default(); - let offchain = Offchain::new(storage); - let key = Bytes(b"offchain_storage".to_vec()); - let value = Bytes(b"offchain_value".to_vec()); + let storage = InMemOffchainStorage::default(); + let offchain = Offchain::new(storage); + let key = Bytes(b"offchain_storage".to_vec()); + let value = Bytes(b"offchain_value".to_vec()); - assert_matches!( - offchain.set_local_storage(StorageKind::PERSISTENT, key.clone(), value.clone()), - Ok(()) - ); - assert_matches!( - offchain.get_local_storage(StorageKind::PERSISTENT, key), - Ok(Some(ref v)) if *v == value - ); + assert_matches!( + offchain.set_local_storage(StorageKind::PERSISTENT, key.clone(), value.clone()), + Ok(()) + ); + assert_matches!( + offchain.get_local_storage(StorageKind::PERSISTENT, key), + Ok(Some(ref v)) if *v == value + ); } diff --git a/client/rpc/src/state/mod.rs b/client/rpc/src/state/mod.rs index 2747405c04..8d99f91451 100644 --- a/client/rpc/src/state/mod.rs +++ b/client/rpc/src/state/mod.rs @@ -22,392 +22,441 @@ mod state_light; #[cfg(test)] mod tests; -use std::sync::Arc; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use rpc::{Result as RpcResult, futures::{Future, future::result}}; +use rpc::{ + futures::{future::result, Future}, + Result as RpcResult, +}; +use std::sync::Arc; +use sc_client::light::{blockchain::RemoteBlockchain, fetcher::Fetcher}; use sc_rpc_api::Subscriptions; -use sc_client::{light::{blockchain::RemoteBlockchain, fetcher::Fetcher}}; -use sp_core::{Bytes, storage::{StorageKey, StorageData, StorageChangeSet}}; -use sp_version::RuntimeVersion; +use sp_core::{ + storage::{StorageChangeSet, StorageData, StorageKey}, + Bytes, +}; use sp_runtime::traits::Block as BlockT; +use sp_version::RuntimeVersion; -use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; +use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; use self::error::{Error, FutureResult}; +use sc_client_api::{Backend, BlockchainEvents, ExecutorProvider, StorageProvider}; pub use sc_rpc_api::state::*; -use sc_client_api::{ExecutorProvider, StorageProvider, BlockchainEvents, Backend}; -use sp_blockchain::{HeaderMetadata, HeaderBackend}; +use sp_blockchain::{HeaderBackend, HeaderMetadata}; const STORAGE_KEYS_PAGED_MAX_COUNT: u32 = 1000; /// State backend API. pub trait StateBackend: Send + Sync + 'static - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { - /// Call runtime method at given block. - fn call( - &self, - block: Option, - method: String, - call_data: Bytes, - ) -> FutureResult; - - /// Returns the keys with prefix, leave empty to get all the keys. - fn storage_keys( - &self, - block: Option, - prefix: StorageKey, - ) -> FutureResult>; - - /// Returns the keys with prefix along with their values, leave empty to get all the pairs. - fn storage_pairs( - &self, - block: Option, - prefix: StorageKey, - ) -> FutureResult>; - - /// Returns the keys with prefix with pagination support. - fn storage_keys_paged( - &self, - block: Option, - prefix: Option, - count: u32, - start_key: Option, - ) -> FutureResult>; - - /// Returns a storage entry at a specific block's state. - fn storage( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the hash of a storage entry at a block's state. - fn storage_hash( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the size of a storage entry at a block's state. - fn storage_size( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult> { - Box::new(self.storage(block, key) - .map(|x| x.map(|x| x.0.len() as u64))) - } - - /// Returns the keys with prefix from a child storage, leave empty to get all the keys - fn child_storage_keys( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - ) -> FutureResult>; - - /// Returns a child storage entry at a specific block's state. - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the hash of a child storage entry at a block's state. - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult>; - - /// Returns the size of a child storage entry at a block's state. - fn child_storage_size( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(self.child_storage(block, child_storage_key, child_info, child_type, key) - .map(|x| x.map(|x| x.0.len() as u64))) - } - - /// Returns the runtime metadata as an opaque blob. - fn metadata(&self, block: Option) -> FutureResult; - - /// Get the runtime version. - fn runtime_version(&self, block: Option) -> FutureResult; - - /// Query historical storage entries (by key) starting from a block given as the second parameter. - /// - /// NOTE This first returned result contains the initial state of storage for all keys. - /// Subsequent values in the vector represent changes to the previous state (diffs). - fn query_storage( - &self, - from: Block::Hash, - to: Option, - keys: Vec, - ) -> FutureResult>>; - - /// Query storage entries (by key) starting at block hash given as the second parameter. - fn query_storage_at( - &self, - keys: Vec, - at: Option - ) -> FutureResult>>; - - /// New runtime version subscription - fn subscribe_runtime_version( - &self, - _meta: crate::metadata::Metadata, - subscriber: Subscriber, - ); - - /// Unsubscribe from runtime version subscription - fn unsubscribe_runtime_version( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult; - - /// New storage subscription - fn subscribe_storage( - &self, - _meta: crate::metadata::Metadata, - subscriber: Subscriber>, - keys: Option>, - ); - - /// Unsubscribe from storage subscription - fn unsubscribe_storage( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult; + /// Call runtime method at given block. + fn call( + &self, + block: Option, + method: String, + call_data: Bytes, + ) -> FutureResult; + + /// Returns the keys with prefix, leave empty to get all the keys. + fn storage_keys( + &self, + block: Option, + prefix: StorageKey, + ) -> FutureResult>; + + /// Returns the keys with prefix along with their values, leave empty to get all the pairs. + fn storage_pairs( + &self, + block: Option, + prefix: StorageKey, + ) -> FutureResult>; + + /// Returns the keys with prefix with pagination support. + fn storage_keys_paged( + &self, + block: Option, + prefix: Option, + count: u32, + start_key: Option, + ) -> FutureResult>; + + /// Returns a storage entry at a specific block's state. + fn storage( + &self, + block: Option, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the hash of a storage entry at a block's state. + fn storage_hash( + &self, + block: Option, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the size of a storage entry at a block's state. + fn storage_size( + &self, + block: Option, + key: StorageKey, + ) -> FutureResult> { + Box::new( + self.storage(block, key) + .map(|x| x.map(|x| x.0.len() as u64)), + ) + } + + /// Returns the keys with prefix from a child storage, leave empty to get all the keys + fn child_storage_keys( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + prefix: StorageKey, + ) -> FutureResult>; + + /// Returns a child storage entry at a specific block's state. + fn child_storage( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the hash of a child storage entry at a block's state. + fn child_storage_hash( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + ) -> FutureResult>; + + /// Returns the size of a child storage entry at a block's state. + fn child_storage_size( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + ) -> FutureResult> { + Box::new( + self.child_storage(block, child_storage_key, child_info, child_type, key) + .map(|x| x.map(|x| x.0.len() as u64)), + ) + } + + /// Returns the runtime metadata as an opaque blob. + fn metadata(&self, block: Option) -> FutureResult; + + /// Get the runtime version. + fn runtime_version(&self, block: Option) -> FutureResult; + + /// Query historical storage entries (by key) starting from a block given as the second parameter. + /// + /// NOTE This first returned result contains the initial state of storage for all keys. + /// Subsequent values in the vector represent changes to the previous state (diffs). + fn query_storage( + &self, + from: Block::Hash, + to: Option, + keys: Vec, + ) -> FutureResult>>; + + /// Query storage entries (by key) starting at block hash given as the second parameter. + fn query_storage_at( + &self, + keys: Vec, + at: Option, + ) -> FutureResult>>; + + /// New runtime version subscription + fn subscribe_runtime_version( + &self, + _meta: crate::metadata::Metadata, + subscriber: Subscriber, + ); + + /// Unsubscribe from runtime version subscription + fn unsubscribe_runtime_version( + &self, + _meta: Option, + id: SubscriptionId, + ) -> RpcResult; + + /// New storage subscription + fn subscribe_storage( + &self, + _meta: crate::metadata::Metadata, + subscriber: Subscriber>, + keys: Option>, + ); + + /// Unsubscribe from storage subscription + fn unsubscribe_storage( + &self, + _meta: Option, + id: SubscriptionId, + ) -> RpcResult; } /// Create new state API that works on full node. pub fn new_full( - client: Arc, - subscriptions: Subscriptions, + client: Arc, + subscriptions: Subscriptions, ) -> State - where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + HeaderBackend - + HeaderMetadata + BlockchainEvents - + CallApiAt - + ProvideRuntimeApi + Send + Sync + 'static, - Client::Api: Metadata, +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + + StorageProvider + + HeaderBackend + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: Metadata, { - State { - backend: Box::new(self::state_full::FullState::new(client, subscriptions)), - } + State { + backend: Box::new(self::state_full::FullState::new(client, subscriptions)), + } } /// Create new state API that works on light node. pub fn new_light>( - client: Arc, - subscriptions: Subscriptions, - remote_blockchain: Arc>, - fetcher: Arc, + client: Arc, + subscriptions: Subscriptions, + remote_blockchain: Arc>, + fetcher: Arc, ) -> State - where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider - + HeaderMetadata - + ProvideRuntimeApi + HeaderBackend + BlockchainEvents - + Send + Sync + 'static, - F: Send + Sync + 'static, +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + + StorageProvider + + HeaderMetadata + + ProvideRuntimeApi + + HeaderBackend + + BlockchainEvents + + Send + + Sync + + 'static, + F: Send + Sync + 'static, { - State { - backend: Box::new(self::state_light::LightState::new( - client, - subscriptions, - remote_blockchain, - fetcher, - )), - } + State { + backend: Box::new(self::state_light::LightState::new( + client, + subscriptions, + remote_blockchain, + fetcher, + )), + } } /// State API with subscriptions support. pub struct State { - backend: Box>, + backend: Box>, } impl StateApi for State - where - Block: BlockT + 'static, - Client: Send + Sync + 'static, +where + Block: BlockT + 'static, + Client: Send + Sync + 'static, { - type Metadata = crate::metadata::Metadata; - - fn call(&self, method: String, data: Bytes, block: Option) -> FutureResult { - self.backend.call(block, method, data) - } - - fn storage_keys( - &self, - key_prefix: StorageKey, - block: Option, - ) -> FutureResult> { - self.backend.storage_keys(block, key_prefix) - } - - fn storage_pairs( - &self, - key_prefix: StorageKey, - block: Option, - ) -> FutureResult> { - self.backend.storage_pairs(block, key_prefix) - } - - fn storage_keys_paged( - &self, - prefix: Option, - count: u32, - start_key: Option, - block: Option, - ) -> FutureResult> { - if count > STORAGE_KEYS_PAGED_MAX_COUNT { - return Box::new(result(Err( - Error::InvalidCount { - value: count, - max: STORAGE_KEYS_PAGED_MAX_COUNT, - } - ))); - } - self.backend.storage_keys_paged(block, prefix, count, start_key) - } - - fn storage(&self, key: StorageKey, block: Option) -> FutureResult> { - self.backend.storage(block, key) - } - - fn storage_hash(&self, key: StorageKey, block: Option) -> FutureResult> { - self.backend.storage_hash(block, key) - } - - fn storage_size(&self, key: StorageKey, block: Option) -> FutureResult> { - self.backend.storage_size(block, key) - } - - fn child_storage( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage(block, child_storage_key, child_info, child_type, key) - } - - fn child_storage_keys( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key_prefix: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_keys(block, child_storage_key, child_info, child_type, key_prefix) - } - - fn child_storage_hash( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_hash(block, child_storage_key, child_info, child_type, key) - } - - fn child_storage_size( - &self, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - block: Option - ) -> FutureResult> { - self.backend.child_storage_size(block, child_storage_key, child_info, child_type, key) - } - - fn metadata(&self, block: Option) -> FutureResult { - self.backend.metadata(block) - } - - fn query_storage( - &self, - keys: Vec, - from: Block::Hash, - to: Option - ) -> FutureResult>> { - self.backend.query_storage(from, to, keys) - } - - fn query_storage_at( - &self, - keys: Vec, - at: Option - ) -> FutureResult>> { - self.backend.query_storage_at(keys, at) - } - - fn subscribe_storage( - &self, - meta: Self::Metadata, - subscriber: Subscriber>, - keys: Option> - ) { - self.backend.subscribe_storage(meta, subscriber, keys); - } - - fn unsubscribe_storage(&self, meta: Option, id: SubscriptionId) -> RpcResult { - self.backend.unsubscribe_storage(meta, id) - } - - fn runtime_version(&self, at: Option) -> FutureResult { - self.backend.runtime_version(at) - } - - fn subscribe_runtime_version(&self, meta: Self::Metadata, subscriber: Subscriber) { - self.backend.subscribe_runtime_version(meta, subscriber); - } - - fn unsubscribe_runtime_version( - &self, - meta: Option, - id: SubscriptionId, - ) -> RpcResult { - self.backend.unsubscribe_runtime_version(meta, id) - } + type Metadata = crate::metadata::Metadata; + + fn call(&self, method: String, data: Bytes, block: Option) -> FutureResult { + self.backend.call(block, method, data) + } + + fn storage_keys( + &self, + key_prefix: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend.storage_keys(block, key_prefix) + } + + fn storage_pairs( + &self, + key_prefix: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend.storage_pairs(block, key_prefix) + } + + fn storage_keys_paged( + &self, + prefix: Option, + count: u32, + start_key: Option, + block: Option, + ) -> FutureResult> { + if count > STORAGE_KEYS_PAGED_MAX_COUNT { + return Box::new(result(Err(Error::InvalidCount { + value: count, + max: STORAGE_KEYS_PAGED_MAX_COUNT, + }))); + } + self.backend + .storage_keys_paged(block, prefix, count, start_key) + } + + fn storage( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend.storage(block, key) + } + + fn storage_hash( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend.storage_hash(block, key) + } + + fn storage_size( + &self, + key: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend.storage_size(block, key) + } + + fn child_storage( + &self, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend + .child_storage(block, child_storage_key, child_info, child_type, key) + } + + fn child_storage_keys( + &self, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key_prefix: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend.child_storage_keys( + block, + child_storage_key, + child_info, + child_type, + key_prefix, + ) + } + + fn child_storage_hash( + &self, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend + .child_storage_hash(block, child_storage_key, child_info, child_type, key) + } + + fn child_storage_size( + &self, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + block: Option, + ) -> FutureResult> { + self.backend + .child_storage_size(block, child_storage_key, child_info, child_type, key) + } + + fn metadata(&self, block: Option) -> FutureResult { + self.backend.metadata(block) + } + + fn query_storage( + &self, + keys: Vec, + from: Block::Hash, + to: Option, + ) -> FutureResult>> { + self.backend.query_storage(from, to, keys) + } + + fn query_storage_at( + &self, + keys: Vec, + at: Option, + ) -> FutureResult>> { + self.backend.query_storage_at(keys, at) + } + + fn subscribe_storage( + &self, + meta: Self::Metadata, + subscriber: Subscriber>, + keys: Option>, + ) { + self.backend.subscribe_storage(meta, subscriber, keys); + } + + fn unsubscribe_storage( + &self, + meta: Option, + id: SubscriptionId, + ) -> RpcResult { + self.backend.unsubscribe_storage(meta, id) + } + + fn runtime_version(&self, at: Option) -> FutureResult { + self.backend.runtime_version(at) + } + + fn subscribe_runtime_version( + &self, + meta: Self::Metadata, + subscriber: Subscriber, + ) { + self.backend.subscribe_runtime_version(meta, subscriber); + } + + fn unsubscribe_runtime_version( + &self, + meta: Option, + id: SubscriptionId, + ) -> RpcResult { + self.backend.unsubscribe_runtime_version(meta, id) + } } fn client_err(err: sp_blockchain::Error) -> Error { - Error::Client(Box::new(err)) + Error::Client(Box::new(err)) } const CHILD_RESOLUTION_ERROR: &str = "Unexpected child info and type"; fn child_resolution_error() -> sp_blockchain::Error { - sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string()) + sp_blockchain::Error::Msg(CHILD_RESOLUTION_ERROR.to_string()) } diff --git a/client/rpc/src/state/state_full.rs b/client/rpc/src/state/state_full.rs index bf80820543..8a2ab7128b 100644 --- a/client/rpc/src/state/state_full.rs +++ b/client/rpc/src/state/state_full.rs @@ -16,563 +16,643 @@ //! State API backend for full nodes. -use std::collections::{BTreeMap, HashMap}; -use std::sync::Arc; -use std::ops::Range; use futures::{future, StreamExt as _, TryStreamExt as _}; -use log::warn; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; -use rpc::{Result as RpcResult, futures::{stream, Future, Sink, Stream, future::result}}; +use log::warn; +use rpc::{ + futures::{future::result, stream, Future, Sink, Stream}, + Result as RpcResult, +}; +use std::collections::{BTreeMap, HashMap}; +use std::ops::Range; +use std::sync::Arc; -use sc_rpc_api::Subscriptions; -use sc_client_api::backend::Backend; -use sp_blockchain::{Result as ClientResult, Error as ClientError, HeaderMetadata, CachedHeaderMetadata, HeaderBackend}; use sc_client::BlockchainEvents; +use sc_client_api::backend::Backend; +use sc_rpc_api::Subscriptions; +use sp_blockchain::{ + CachedHeaderMetadata, Error as ClientError, HeaderBackend, HeaderMetadata, + Result as ClientResult, +}; use sp_core::{ - Bytes, storage::{well_known_keys, StorageKey, StorageData, StorageChangeSet, ChildInfo}, + storage::{well_known_keys, ChildInfo, StorageChangeSet, StorageData, StorageKey}, + Bytes, }; -use sp_version::RuntimeVersion; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, NumberFor, SaturatedConversion, CheckedSub}, + generic::BlockId, + traits::{Block as BlockT, CheckedSub, NumberFor, SaturatedConversion}, }; +use sp_version::RuntimeVersion; -use sp_api::{Metadata, ProvideRuntimeApi, CallApiAt}; +use sp_api::{CallApiAt, Metadata, ProvideRuntimeApi}; -use super::{StateBackend, error::{FutureResult, Error, Result}, client_err, child_resolution_error}; +use super::{ + child_resolution_error, client_err, + error::{Error, FutureResult, Result}, + StateBackend, +}; +use sc_client_api::{CallExecutor, ExecutorProvider, StorageProvider}; use std::marker::PhantomData; -use sc_client_api::{CallExecutor, StorageProvider, ExecutorProvider}; /// Ranges to query in state_queryStorage. struct QueryStorageRange { - /// Hashes of all the blocks in the range. - pub hashes: Vec, - /// Number of the first block in the range. - pub first_number: NumberFor, - /// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at - /// each state to get changes. - pub unfiltered_range: Range, - /// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter - /// blocks-with-changes by using changes tries. - pub filtered_range: Option>, + /// Hashes of all the blocks in the range. + pub hashes: Vec, + /// Number of the first block in the range. + pub first_number: NumberFor, + /// Blocks subrange ([begin; end) indices within `hashes`) where we should read keys at + /// each state to get changes. + pub unfiltered_range: Range, + /// Blocks subrange ([begin; end) indices within `hashes`) where we could pre-filter + /// blocks-with-changes by using changes tries. + pub filtered_range: Option>, } /// State API backend for full nodes. pub struct FullState { - client: Arc, - subscriptions: Subscriptions, - _phantom: PhantomData<(BE, Block)> + client: Arc, + subscriptions: Subscriptions, + _phantom: PhantomData<(BE, Block)>, } impl FullState - where - BE: Backend, - Client: StorageProvider + HeaderBackend - + HeaderMetadata, - Block: BlockT + 'static, +where + BE: Backend, + Client: StorageProvider + + HeaderBackend + + HeaderMetadata, + Block: BlockT + 'static, { - /// Create new state API backend for full nodes. - pub fn new(client: Arc, subscriptions: Subscriptions) -> Self { - Self { client, subscriptions, _phantom: PhantomData } - } - - /// Returns given block hash or best block hash if None is passed. - fn block_or_best(&self, hash: Option) -> ClientResult { - Ok(hash.unwrap_or_else(|| self.client.info().best_hash)) - } - - /// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges. - /// Blocks that contain changes within filtered subrange could be filtered using changes tries. - /// Blocks that contain changes within unfiltered subrange must be filtered manually. - fn split_query_storage_range( - &self, - from: Block::Hash, - to: Option - ) -> Result> { - let to = self.block_or_best(to).map_err(|e| invalid_block::(from, to, e.to_string()))?; - - let invalid_block_err = |e: ClientError| invalid_block::(from, Some(to), e.to_string()); - let from_meta = self.client.header_metadata(from).map_err(invalid_block_err)?; - let to_meta = self.client.header_metadata(to).map_err(invalid_block_err)?; - - if from_meta.number > to_meta.number { - return Err(invalid_block_range(&from_meta, &to_meta, "from number > to number".to_owned())) - } - - // check if we can get from `to` to `from` by going through parent_hashes. - let from_number = from_meta.number; - let hashes = { - let mut hashes = vec![to_meta.hash]; - let mut last = to_meta.clone(); - while last.number > from_number { - let header_metadata = self.client - .header_metadata(last.parent) - .map_err(|e| invalid_block_range::(&last, &to_meta, e.to_string()))?; - hashes.push(header_metadata.hash); - last = header_metadata; - } - if last.hash != from_meta.hash { - return Err(invalid_block_range(&from_meta, &to_meta, "from and to are on different forks".to_owned())) - } - hashes.reverse(); - hashes - }; - - // check if we can filter blocks-with-changes from some (sub)range using changes tries - let changes_trie_range = self.client - .max_key_changes_range(from_number, BlockId::Hash(to_meta.hash)) - .map_err(client_err)?; - let filtered_range_begin = changes_trie_range - .and_then(|(begin, _)| { - // avoids a corner case where begin < from_number (happens when querying genesis) - begin.checked_sub(&from_number).map(|x| x.saturated_into::()) - }); - let (unfiltered_range, filtered_range) = split_range(hashes.len(), filtered_range_begin); - - Ok(QueryStorageRange { - hashes, - first_number: from_number, - unfiltered_range, - filtered_range, - }) - } - - /// Iterates through range.unfiltered_range and check each block for changes of keys' values. - fn query_storage_unfiltered( - &self, - range: &QueryStorageRange, - keys: &[StorageKey], - last_values: &mut HashMap>, - changes: &mut Vec>, - ) -> Result<()> { - for block in range.unfiltered_range.start..range.unfiltered_range.end { - let block_hash = range.hashes[block].clone(); - let mut block_changes = StorageChangeSet { block: block_hash.clone(), changes: Vec::new() }; - let id = BlockId::hash(block_hash); - for key in keys { - let (has_changed, data) = { - let curr_data = self.client.storage(&id, key).map_err(client_err)?; - match last_values.get(key) { - Some(prev_data) => (curr_data != *prev_data, curr_data), - None => (true, curr_data), - } - }; - if has_changed { - block_changes.changes.push((key.clone(), data.clone())); - } - last_values.insert(key.clone(), data); - } - if !block_changes.changes.is_empty() { - changes.push(block_changes); - } - } - Ok(()) - } - - /// Iterates through all blocks that are changing keys within range.filtered_range and collects these changes. - fn query_storage_filtered( - &self, - range: &QueryStorageRange, - keys: &[StorageKey], - last_values: &HashMap>, - changes: &mut Vec>, - ) -> Result<()> { - let (begin, end) = match range.filtered_range { - Some(ref filtered_range) => ( - range.first_number + filtered_range.start.saturated_into(), - BlockId::Hash(range.hashes[filtered_range.end - 1].clone()) - ), - None => return Ok(()), - }; - let mut changes_map: BTreeMap, StorageChangeSet> = BTreeMap::new(); - for key in keys { - let mut last_block = None; - let mut last_value = last_values.get(key).cloned().unwrap_or_default(); - let key_changes = self.client.key_changes(begin, end, None, key).map_err(client_err)?; - for (block, _) in key_changes.into_iter().rev() { - if last_block == Some(block) { - continue; - } - - let block_hash = range.hashes[(block - range.first_number).saturated_into::()].clone(); - let id = BlockId::Hash(block_hash); - let value_at_block = self.client.storage(&id, key).map_err(client_err)?; - if last_value == value_at_block { - continue; - } - - changes_map.entry(block) - .or_insert_with(|| StorageChangeSet { block: block_hash, changes: Vec::new() }) - .changes.push((key.clone(), value_at_block.clone())); - last_block = Some(block); - last_value = value_at_block; - } - } - if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) { - changes.reserve(additional_capacity); - } - changes.extend(changes_map.into_iter().map(|(_, cs)| cs)); - Ok(()) - } + /// Create new state API backend for full nodes. + pub fn new(client: Arc, subscriptions: Subscriptions) -> Self { + Self { + client, + subscriptions, + _phantom: PhantomData, + } + } + + /// Returns given block hash or best block hash if None is passed. + fn block_or_best(&self, hash: Option) -> ClientResult { + Ok(hash.unwrap_or_else(|| self.client.info().best_hash)) + } + + /// Splits the `query_storage` block range into 'filtered' and 'unfiltered' subranges. + /// Blocks that contain changes within filtered subrange could be filtered using changes tries. + /// Blocks that contain changes within unfiltered subrange must be filtered manually. + fn split_query_storage_range( + &self, + from: Block::Hash, + to: Option, + ) -> Result> { + let to = self + .block_or_best(to) + .map_err(|e| invalid_block::(from, to, e.to_string()))?; + + let invalid_block_err = + |e: ClientError| invalid_block::(from, Some(to), e.to_string()); + let from_meta = self + .client + .header_metadata(from) + .map_err(invalid_block_err)?; + let to_meta = self.client.header_metadata(to).map_err(invalid_block_err)?; + + if from_meta.number > to_meta.number { + return Err(invalid_block_range( + &from_meta, + &to_meta, + "from number > to number".to_owned(), + )); + } + + // check if we can get from `to` to `from` by going through parent_hashes. + let from_number = from_meta.number; + let hashes = { + let mut hashes = vec![to_meta.hash]; + let mut last = to_meta.clone(); + while last.number > from_number { + let header_metadata = self + .client + .header_metadata(last.parent) + .map_err(|e| invalid_block_range::(&last, &to_meta, e.to_string()))?; + hashes.push(header_metadata.hash); + last = header_metadata; + } + if last.hash != from_meta.hash { + return Err(invalid_block_range( + &from_meta, + &to_meta, + "from and to are on different forks".to_owned(), + )); + } + hashes.reverse(); + hashes + }; + + // check if we can filter blocks-with-changes from some (sub)range using changes tries + let changes_trie_range = self + .client + .max_key_changes_range(from_number, BlockId::Hash(to_meta.hash)) + .map_err(client_err)?; + let filtered_range_begin = changes_trie_range.and_then(|(begin, _)| { + // avoids a corner case where begin < from_number (happens when querying genesis) + begin + .checked_sub(&from_number) + .map(|x| x.saturated_into::()) + }); + let (unfiltered_range, filtered_range) = split_range(hashes.len(), filtered_range_begin); + + Ok(QueryStorageRange { + hashes, + first_number: from_number, + unfiltered_range, + filtered_range, + }) + } + + /// Iterates through range.unfiltered_range and check each block for changes of keys' values. + fn query_storage_unfiltered( + &self, + range: &QueryStorageRange, + keys: &[StorageKey], + last_values: &mut HashMap>, + changes: &mut Vec>, + ) -> Result<()> { + for block in range.unfiltered_range.start..range.unfiltered_range.end { + let block_hash = range.hashes[block].clone(); + let mut block_changes = StorageChangeSet { + block: block_hash.clone(), + changes: Vec::new(), + }; + let id = BlockId::hash(block_hash); + for key in keys { + let (has_changed, data) = { + let curr_data = self.client.storage(&id, key).map_err(client_err)?; + match last_values.get(key) { + Some(prev_data) => (curr_data != *prev_data, curr_data), + None => (true, curr_data), + } + }; + if has_changed { + block_changes.changes.push((key.clone(), data.clone())); + } + last_values.insert(key.clone(), data); + } + if !block_changes.changes.is_empty() { + changes.push(block_changes); + } + } + Ok(()) + } + + /// Iterates through all blocks that are changing keys within range.filtered_range and collects these changes. + fn query_storage_filtered( + &self, + range: &QueryStorageRange, + keys: &[StorageKey], + last_values: &HashMap>, + changes: &mut Vec>, + ) -> Result<()> { + let (begin, end) = match range.filtered_range { + Some(ref filtered_range) => ( + range.first_number + filtered_range.start.saturated_into(), + BlockId::Hash(range.hashes[filtered_range.end - 1].clone()), + ), + None => return Ok(()), + }; + let mut changes_map: BTreeMap, StorageChangeSet> = + BTreeMap::new(); + for key in keys { + let mut last_block = None; + let mut last_value = last_values.get(key).cloned().unwrap_or_default(); + let key_changes = self + .client + .key_changes(begin, end, None, key) + .map_err(client_err)?; + for (block, _) in key_changes.into_iter().rev() { + if last_block == Some(block) { + continue; + } + + let block_hash = + range.hashes[(block - range.first_number).saturated_into::()].clone(); + let id = BlockId::Hash(block_hash); + let value_at_block = self.client.storage(&id, key).map_err(client_err)?; + if last_value == value_at_block { + continue; + } + + changes_map + .entry(block) + .or_insert_with(|| StorageChangeSet { + block: block_hash, + changes: Vec::new(), + }) + .changes + .push((key.clone(), value_at_block.clone())); + last_block = Some(block); + last_value = value_at_block; + } + } + if let Some(additional_capacity) = changes_map.len().checked_sub(changes.len()) { + changes.reserve(additional_capacity); + } + changes.extend(changes_map.into_iter().map(|(_, cs)| cs)); + Ok(()) + } } -impl StateBackend for FullState where - Block: BlockT + 'static, - BE: Backend + 'static, - Client: ExecutorProvider + StorageProvider + HeaderBackend - + HeaderMetadata + BlockchainEvents - + CallApiAt + ProvideRuntimeApi - + Send + Sync + 'static, - Client::Api: Metadata, +impl StateBackend for FullState +where + Block: BlockT + 'static, + BE: Backend + 'static, + Client: ExecutorProvider + + StorageProvider + + HeaderBackend + + HeaderMetadata + + BlockchainEvents + + CallApiAt + + ProvideRuntimeApi + + Send + + Sync + + 'static, + Client::Api: Metadata, { - fn call( - &self, - block: Option, - method: String, - call_data: Bytes, - ) -> FutureResult { - let r = self.block_or_best(block) - .and_then(|block| self - .client - .executor() - .call( - &BlockId::Hash(block), - &method, - &*call_data, - self.client.execution_extensions().strategies().other, - None, - ) - .map(Into::into) - ).map_err(client_err); - Box::new(result(r)) - } - - fn storage_keys( - &self, - block: Option, - prefix: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.storage_keys(&BlockId::Hash(block), &prefix)) - .map_err(client_err))) - } - - fn storage_pairs( - &self, - block: Option, - prefix: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.storage_pairs(&BlockId::Hash(block), &prefix)) - .map_err(client_err))) - } - - fn storage_keys_paged( - &self, - block: Option, - prefix: Option, - count: u32, - start_key: Option, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| - self.client.storage_keys_iter( - &BlockId::Hash(block), prefix.as_ref(), start_key.as_ref() - ) - ) - .map(|v| v.take(count as usize).collect()) - .map_err(client_err))) - } - - fn storage( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.storage(&BlockId::Hash(block), &key)) - .map_err(client_err))) - } - - fn storage_hash( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.storage_hash(&BlockId::Hash(block), &key)) - .map_err(client_err))) - } - - fn child_storage_keys( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - prefix: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.child_storage_keys( - &BlockId::Hash(block), - &child_storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &prefix, - )) - .map_err(client_err))) - } - - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.child_storage( - &BlockId::Hash(block), - &child_storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) - .map_err(client_err))) - } - - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.child_storage_hash( - &BlockId::Hash(block), - &child_storage_key, - ChildInfo::resolve_child_info(child_type, &child_info.0[..]) - .ok_or_else(child_resolution_error)?, - &key, - )) - .map_err(client_err))) - } - - fn metadata(&self, block: Option) -> FutureResult { - Box::new(result( - self.block_or_best(block) - .and_then(|block| - self.client.runtime_api().metadata(&BlockId::Hash(block)).map(Into::into) - ) - .map_err(client_err))) - } - - fn runtime_version(&self, block: Option) -> FutureResult { - Box::new(result( - self.block_or_best(block) - .and_then(|block| self.client.runtime_version_at(&BlockId::Hash(block))) - .map_err(client_err))) - } - - fn query_storage( - &self, - from: Block::Hash, - to: Option, - keys: Vec, - ) -> FutureResult>> { - let call_fn = move || { - let range = self.split_query_storage_range(from, to)?; - let mut changes = Vec::new(); - let mut last_values = HashMap::new(); - self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?; - self.query_storage_filtered(&range, &keys, &last_values, &mut changes)?; - Ok(changes) - }; - Box::new(result(call_fn())) - } - - fn query_storage_at( - &self, - keys: Vec, - at: Option - ) -> FutureResult>> { - let at = at.unwrap_or_else(|| self.client.info().best_hash); - self.query_storage(at, Some(at), keys) - } - - fn subscribe_runtime_version( - &self, - _meta: crate::metadata::Metadata, - subscriber: Subscriber, - ) { - let stream = match self.client.storage_changes_notification_stream( - Some(&[StorageKey(well_known_keys::CODE.to_vec())]), - None, - ) { - Ok(stream) => stream, - Err(err) => { - let _ = subscriber.reject(Error::from(client_err(err)).into()); - return; - } - }; - - self.subscriptions.add(subscriber, |sink| { - let version = self.runtime_version(None.into()) - .map_err(Into::into) - .wait(); - - let client = self.client.clone(); - let mut previous_version = version.clone(); - - let stream = stream - .filter_map(move |_| { - let info = client.info(); - let version = client - .runtime_version_at(&BlockId::hash(info.best_hash)) - .map_err(client_err) - .map_err(Into::into); - if previous_version != version { - previous_version = version.clone(); - future::ready(Some(Ok::<_, ()>(version))) - } else { - future::ready(None) - } - }) - .compat(); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all( - stream::iter_result(vec![Ok(version)]) - .chain(stream) - ) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - } - - fn unsubscribe_runtime_version( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions.cancel(id)) - } - - fn subscribe_storage( - &self, - _meta: crate::metadata::Metadata, - subscriber: Subscriber>, - keys: Option>, - ) { - let keys = Into::>>::into(keys); - let stream = match self.client.storage_changes_notification_stream( - keys.as_ref().map(|x| &**x), - None - ) { - Ok(stream) => stream, - Err(err) => { - let _ = subscriber.reject(client_err(err).into()); - return; - }, - }; - - // initial values - let initial = stream::iter_result(keys - .map(|keys| { - let block = self.client.info().best_hash; - let changes = keys - .into_iter() - .map(|key| self.storage(Some(block.clone()).into(), key.clone()) - .map(|val| (key.clone(), val)) - .wait() - .unwrap_or_else(|_| (key, None)) - ) - .collect(); - vec![Ok(Ok(StorageChangeSet { block, changes }))] - }).unwrap_or_default()); - - self.subscriptions.add(subscriber, |sink| { - let stream = stream - .map(|(block, changes)| Ok::<_, ()>(Ok(StorageChangeSet { - block, - changes: changes.iter() - .filter_map(|(o_sk, k, v)| if o_sk.is_none() { - Some((k.clone(),v.cloned())) - } else { None }).collect(), - }))) - .compat(); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(initial.chain(stream)) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - } - - fn unsubscribe_storage( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions.cancel(id)) - } + fn call( + &self, + block: Option, + method: String, + call_data: Bytes, + ) -> FutureResult { + let r = self + .block_or_best(block) + .and_then(|block| { + self.client + .executor() + .call( + &BlockId::Hash(block), + &method, + &*call_data, + self.client.execution_extensions().strategies().other, + None, + ) + .map(Into::into) + }) + .map_err(client_err); + Box::new(result(r)) + } + + fn storage_keys( + &self, + block: Option, + prefix: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| self.client.storage_keys(&BlockId::Hash(block), &prefix)) + .map_err(client_err), + )) + } + + fn storage_pairs( + &self, + block: Option, + prefix: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| self.client.storage_pairs(&BlockId::Hash(block), &prefix)) + .map_err(client_err), + )) + } + + fn storage_keys_paged( + &self, + block: Option, + prefix: Option, + count: u32, + start_key: Option, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + self.client.storage_keys_iter( + &BlockId::Hash(block), + prefix.as_ref(), + start_key.as_ref(), + ) + }) + .map(|v| v.take(count as usize).collect()) + .map_err(client_err), + )) + } + + fn storage( + &self, + block: Option, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| self.client.storage(&BlockId::Hash(block), &key)) + .map_err(client_err), + )) + } + + fn storage_hash( + &self, + block: Option, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| self.client.storage_hash(&BlockId::Hash(block), &key)) + .map_err(client_err), + )) + } + + fn child_storage_keys( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + prefix: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + self.client.child_storage_keys( + &BlockId::Hash(block), + &child_storage_key, + ChildInfo::resolve_child_info(child_type, &child_info.0[..]) + .ok_or_else(child_resolution_error)?, + &prefix, + ) + }) + .map_err(client_err), + )) + } + + fn child_storage( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + self.client.child_storage( + &BlockId::Hash(block), + &child_storage_key, + ChildInfo::resolve_child_info(child_type, &child_info.0[..]) + .ok_or_else(child_resolution_error)?, + &key, + ) + }) + .map_err(client_err), + )) + } + + fn child_storage_hash( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + ) -> FutureResult> { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + self.client.child_storage_hash( + &BlockId::Hash(block), + &child_storage_key, + ChildInfo::resolve_child_info(child_type, &child_info.0[..]) + .ok_or_else(child_resolution_error)?, + &key, + ) + }) + .map_err(client_err), + )) + } + + fn metadata(&self, block: Option) -> FutureResult { + Box::new(result( + self.block_or_best(block) + .and_then(|block| { + self.client + .runtime_api() + .metadata(&BlockId::Hash(block)) + .map(Into::into) + }) + .map_err(client_err), + )) + } + + fn runtime_version(&self, block: Option) -> FutureResult { + Box::new(result( + self.block_or_best(block) + .and_then(|block| self.client.runtime_version_at(&BlockId::Hash(block))) + .map_err(client_err), + )) + } + + fn query_storage( + &self, + from: Block::Hash, + to: Option, + keys: Vec, + ) -> FutureResult>> { + let call_fn = move || { + let range = self.split_query_storage_range(from, to)?; + let mut changes = Vec::new(); + let mut last_values = HashMap::new(); + self.query_storage_unfiltered(&range, &keys, &mut last_values, &mut changes)?; + self.query_storage_filtered(&range, &keys, &last_values, &mut changes)?; + Ok(changes) + }; + Box::new(result(call_fn())) + } + + fn query_storage_at( + &self, + keys: Vec, + at: Option, + ) -> FutureResult>> { + let at = at.unwrap_or_else(|| self.client.info().best_hash); + self.query_storage(at, Some(at), keys) + } + + fn subscribe_runtime_version( + &self, + _meta: crate::metadata::Metadata, + subscriber: Subscriber, + ) { + let stream = match self.client.storage_changes_notification_stream( + Some(&[StorageKey(well_known_keys::CODE.to_vec())]), + None, + ) { + Ok(stream) => stream, + Err(err) => { + let _ = subscriber.reject(Error::from(client_err(err)).into()); + return; + } + }; + + self.subscriptions.add(subscriber, |sink| { + let version = self.runtime_version(None.into()).map_err(Into::into).wait(); + + let client = self.client.clone(); + let mut previous_version = version.clone(); + + let stream = stream + .filter_map(move |_| { + let info = client.info(); + let version = client + .runtime_version_at(&BlockId::hash(info.best_hash)) + .map_err(client_err) + .map_err(Into::into); + if previous_version != version { + previous_version = version.clone(); + future::ready(Some(Ok::<_, ()>(version))) + } else { + future::ready(None) + } + }) + .compat(); + + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(stream::iter_result(vec![Ok(version)]).chain(stream)) + // we ignore the resulting Stream (if the first stream is over we are unsubscribed) + .map(|_| ()) + }); + } + + fn unsubscribe_runtime_version( + &self, + _meta: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions.cancel(id)) + } + + fn subscribe_storage( + &self, + _meta: crate::metadata::Metadata, + subscriber: Subscriber>, + keys: Option>, + ) { + let keys = Into::>>::into(keys); + let stream = match self + .client + .storage_changes_notification_stream(keys.as_ref().map(|x| &**x), None) + { + Ok(stream) => stream, + Err(err) => { + let _ = subscriber.reject(client_err(err).into()); + return; + } + }; + + // initial values + let initial = stream::iter_result( + keys.map(|keys| { + let block = self.client.info().best_hash; + let changes = keys + .into_iter() + .map(|key| { + self.storage(Some(block.clone()).into(), key.clone()) + .map(|val| (key.clone(), val)) + .wait() + .unwrap_or_else(|_| (key, None)) + }) + .collect(); + vec![Ok(Ok(StorageChangeSet { block, changes }))] + }) + .unwrap_or_default(), + ); + + self.subscriptions.add(subscriber, |sink| { + let stream = stream + .map(|(block, changes)| { + Ok::<_, ()>(Ok(StorageChangeSet { + block, + changes: changes + .iter() + .filter_map(|(o_sk, k, v)| { + if o_sk.is_none() { + Some((k.clone(), v.cloned())) + } else { + None + } + }) + .collect(), + })) + }) + .compat(); + + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(initial.chain(stream)) + // we ignore the resulting Stream (if the first stream is over we are unsubscribed) + .map(|_| ()) + }); + } + + fn unsubscribe_storage( + &self, + _meta: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions.cancel(id)) + } } /// Splits passed range into two subranges where: /// - first range has at least one element in it; /// - second range (optionally) starts at given `middle` element. -pub(crate) fn split_range(size: usize, middle: Option) -> (Range, Option>) { - // check if we can filter blocks-with-changes from some (sub)range using changes tries - let range2_begin = match middle { - // some of required changes tries are pruned => use available tries - Some(middle) if middle != 0 => Some(middle), - // all required changes tries are available, but we still want values at first block - // => do 'unfiltered' read for the first block and 'filtered' for the rest - Some(_) if size > 1 => Some(1), - // range contains single element => do not use changes tries - Some(_) => None, - // changes tries are not available => do 'unfiltered' read for the whole range - None => None, - }; - let range1 = 0..range2_begin.unwrap_or(size); - let range2 = range2_begin.map(|begin| begin..size); - (range1, range2) +pub(crate) fn split_range( + size: usize, + middle: Option, +) -> (Range, Option>) { + // check if we can filter blocks-with-changes from some (sub)range using changes tries + let range2_begin = match middle { + // some of required changes tries are pruned => use available tries + Some(middle) if middle != 0 => Some(middle), + // all required changes tries are available, but we still want values at first block + // => do 'unfiltered' read for the first block and 'filtered' for the rest + Some(_) if size > 1 => Some(1), + // range contains single element => do not use changes tries + Some(_) => None, + // changes tries are not available => do 'unfiltered' read for the whole range + None => None, + }; + let range1 = 0..range2_begin.unwrap_or(size); + let range2 = range2_begin.map(|begin| begin..size); + (range1, range2) } fn invalid_block_range( - from: &CachedHeaderMetadata, - to: &CachedHeaderMetadata, - details: String, + from: &CachedHeaderMetadata, + to: &CachedHeaderMetadata, + details: String, ) -> Error { - let to_string = |h: &CachedHeaderMetadata| format!("{} ({:?})", h.number, h.hash); + let to_string = |h: &CachedHeaderMetadata| format!("{} ({:?})", h.number, h.hash); - Error::InvalidBlockRange { - from: to_string(from), - to: to_string(to), - details, - } + Error::InvalidBlockRange { + from: to_string(from), + to: to_string(to), + details, + } } -fn invalid_block( - from: B::Hash, - to: Option, - details: String, -) -> Error { - Error::InvalidBlockRange { - from: format!("{:?}", from), - to: format!("{:?}", to), - details, - } +fn invalid_block(from: B::Hash, to: Option, details: String) -> Error { + Error::InvalidBlockRange { + from: format!("{:?}", from), + to: format!("{:?}", to), + details, + } } diff --git a/client/rpc/src/state/state_light.rs b/client/rpc/src/state/state_light.rs index 092419ad01..245d4b2c32 100644 --- a/client/rpc/src/state/state_light.rs +++ b/client/rpc/src/state/state_light.rs @@ -16,797 +16,867 @@ //! State API backend for light nodes. -use std::{ - sync::Arc, - collections::{HashSet, HashMap, hash_map::Entry}, -}; use codec::Decode; use futures::{ - future::{ready, Either}, - channel::oneshot::{channel, Sender}, - FutureExt, TryFutureExt, - StreamExt as _, TryStreamExt as _, + channel::oneshot::{channel, Sender}, + future::{ready, Either}, + FutureExt, StreamExt as _, TryFutureExt, TryStreamExt as _, }; use hash_db::Hasher; use jsonrpc_pubsub::{typed::Subscriber, SubscriptionId}; use log::warn; use parking_lot::Mutex; use rpc::{ - Result as RpcResult, - futures::Sink, - futures::future::{result, Future}, - futures::stream::Stream, + futures::future::{result, Future}, + futures::stream::Stream, + futures::Sink, + Result as RpcResult, +}; +use std::{ + collections::{hash_map::Entry, HashMap, HashSet}, + sync::Arc, }; -use sc_rpc_api::Subscriptions; -use sp_blockchain::{Error as ClientError, HeaderBackend}; use sc_client::{ - BlockchainEvents, - light::{ - blockchain::{future_header, RemoteBlockchain}, - fetcher::{Fetcher, RemoteCallRequest, RemoteReadRequest, RemoteReadChildRequest}, - }, + light::{ + blockchain::{future_header, RemoteBlockchain}, + fetcher::{Fetcher, RemoteCallRequest, RemoteReadChildRequest, RemoteReadRequest}, + }, + BlockchainEvents, }; +use sc_rpc_api::Subscriptions; +use sp_blockchain::{Error as ClientError, HeaderBackend}; use sp_core::{ - Bytes, OpaqueMetadata, storage::{StorageKey, StorageData, StorageChangeSet}, + storage::{StorageChangeSet, StorageData, StorageKey}, + Bytes, OpaqueMetadata, +}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, HashFor}, }; use sp_version::RuntimeVersion; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, HashFor}}; -use super::{StateBackend, error::{FutureResult, Error}, client_err}; +use super::{ + client_err, + error::{Error, FutureResult}, + StateBackend, +}; /// Storage data map of storage keys => (optional) storage value. type StorageMap = HashMap>; /// State API backend for light nodes. pub struct LightState, Client> { - client: Arc, - subscriptions: Subscriptions, - version_subscriptions: SimpleSubscriptions, - storage_subscriptions: Arc>>, - remote_blockchain: Arc>, - fetcher: Arc, + client: Arc, + subscriptions: Subscriptions, + version_subscriptions: SimpleSubscriptions, + storage_subscriptions: Arc>>, + remote_blockchain: Arc>, + fetcher: Arc, } /// Shared requests container. trait SharedRequests: Clone + Send + Sync { - /// Tries to listen for already issued request, or issues request. - /// - /// Returns true if requests has been issued. - fn listen_request( - &self, - block: Hash, - sender: Sender>, - ) -> bool; - - /// Returns (and forgets) all listeners for given request. - fn on_response_received(&self, block: Hash) -> Vec>>; + /// Tries to listen for already issued request, or issues request. + /// + /// Returns true if requests has been issued. + fn listen_request(&self, block: Hash, sender: Sender>) -> bool; + + /// Returns (and forgets) all listeners for given request. + fn on_response_received(&self, block: Hash) -> Vec>>; } /// Storage subscriptions data. struct StorageSubscriptions { - /// Active storage requests. - active_requests: HashMap>>>, - /// Map of subscription => keys that this subscription watch for. - keys_by_subscription: HashMap>, - /// Map of key => set of subscriptions that watch this key. - subscriptions_by_key: HashMap>, + /// Active storage requests. + active_requests: HashMap>>>, + /// Map of subscription => keys that this subscription watch for. + keys_by_subscription: HashMap>, + /// Map of key => set of subscriptions that watch this key. + subscriptions_by_key: HashMap>, } -impl SharedRequests for Arc>> { - fn listen_request( - &self, - block: Block::Hash, - sender: Sender>, - ) -> bool { - let mut subscriptions = self.lock(); - let active_requests_at = subscriptions.active_requests.entry(block).or_default(); - active_requests_at.push(sender); - active_requests_at.len() == 1 - } - - fn on_response_received(&self, block: Block::Hash) -> Vec>> { - self.lock().active_requests.remove(&block).unwrap_or_default() - } +impl SharedRequests + for Arc>> +{ + fn listen_request(&self, block: Block::Hash, sender: Sender>) -> bool { + let mut subscriptions = self.lock(); + let active_requests_at = subscriptions.active_requests.entry(block).or_default(); + active_requests_at.push(sender); + active_requests_at.len() == 1 + } + + fn on_response_received(&self, block: Block::Hash) -> Vec>> { + self.lock() + .active_requests + .remove(&block) + .unwrap_or_default() + } } /// Simple, maybe shared, subscription data that shares per block requests. type SimpleSubscriptions = Arc>>>>>; -impl SharedRequests for SimpleSubscriptions where - Hash: Send + Eq + std::hash::Hash, - V: Send, +impl SharedRequests for SimpleSubscriptions +where + Hash: Send + Eq + std::hash::Hash, + V: Send, { - fn listen_request( - &self, - block: Hash, - sender: Sender>, - ) -> bool { - let mut subscriptions = self.lock(); - let active_requests_at = subscriptions.entry(block).or_default(); - active_requests_at.push(sender); - active_requests_at.len() == 1 - } - - fn on_response_received(&self, block: Hash) -> Vec>> { - self.lock().remove(&block).unwrap_or_default() - } + fn listen_request(&self, block: Hash, sender: Sender>) -> bool { + let mut subscriptions = self.lock(); + let active_requests_at = subscriptions.entry(block).or_default(); + active_requests_at.push(sender); + active_requests_at.len() == 1 + } + + fn on_response_received(&self, block: Hash) -> Vec>> { + self.lock().remove(&block).unwrap_or_default() + } } impl + 'static, Client> LightState - where - Block: BlockT, - Client: HeaderBackend + Send + Sync + 'static, +where + Block: BlockT, + Client: HeaderBackend + Send + Sync + 'static, { - /// Create new state API backend for light nodes. - pub fn new( - client: Arc, - subscriptions: Subscriptions, - remote_blockchain: Arc>, - fetcher: Arc, - ) -> Self { - Self { - client, - subscriptions, - version_subscriptions: Arc::new(Mutex::new(HashMap::new())), - storage_subscriptions: Arc::new(Mutex::new(StorageSubscriptions { - active_requests: HashMap::new(), - keys_by_subscription: HashMap::new(), - subscriptions_by_key: HashMap::new(), - })), - remote_blockchain, - fetcher, - } - } - - /// Returns given block hash or best block hash if None is passed. - fn block_or_best(&self, hash: Option) -> Block::Hash { - hash.unwrap_or_else(|| self.client.info().best_hash) - } + /// Create new state API backend for light nodes. + pub fn new( + client: Arc, + subscriptions: Subscriptions, + remote_blockchain: Arc>, + fetcher: Arc, + ) -> Self { + Self { + client, + subscriptions, + version_subscriptions: Arc::new(Mutex::new(HashMap::new())), + storage_subscriptions: Arc::new(Mutex::new(StorageSubscriptions { + active_requests: HashMap::new(), + keys_by_subscription: HashMap::new(), + subscriptions_by_key: HashMap::new(), + })), + remote_blockchain, + fetcher, + } + } + + /// Returns given block hash or best block hash if None is passed. + fn block_or_best(&self, hash: Option) -> Block::Hash { + hash.unwrap_or_else(|| self.client.info().best_hash) + } } impl StateBackend for LightState - where - Block: BlockT, - Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, - F: Fetcher + 'static +where + Block: BlockT, + Client: BlockchainEvents + HeaderBackend + Send + Sync + 'static, + F: Fetcher + 'static, { - fn call( - &self, - block: Option, - method: String, - call_data: Bytes, - ) -> FutureResult { - Box::new(call( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - method, - call_data, - ).boxed().compat()) - } - - fn storage_keys( - &self, - _block: Option, - _prefix: StorageKey, - ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn storage_pairs( - &self, - _block: Option, - _prefix: StorageKey, - ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn storage_keys_paged( - &self, - _block: Option, - _prefix: Option, - _count: u32, - _start_key: Option, - ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn storage( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult> { - Box::new(storage( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - vec![key.0.clone()], - ).boxed().compat().map(move |mut values| values - .remove(&key) - .expect("successful request has entries for all requested keys; qed") - )) - } - - fn storage_hash( - &self, - block: Option, - key: StorageKey, - ) -> FutureResult> { - Box::new(self - .storage(block, key) - .and_then(|maybe_storage| - result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) - ) - } - - fn child_storage_keys( - &self, - _block: Option, - _child_storage_key: StorageKey, - _child_info: StorageKey, - _child_type: u32, - _prefix: StorageKey, - ) -> FutureResult> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn child_storage( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - let block = self.block_or_best(block); - let fetcher = self.fetcher.clone(); - let child_storage = resolve_header(&*self.remote_blockchain, &*self.fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read_child(RemoteReadChildRequest { - block, - header, - storage_key: child_storage_key.0, - child_info: child_info.0, - child_type, - keys: vec![key.0.clone()], - retry_count: Default::default(), - }).then(move |result| ready(result - .map(|mut data| data - .remove(&key.0) - .expect("successful result has entry for all keys; qed") - .map(StorageData) - ) - .map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), - }); - - Box::new(child_storage.boxed().compat()) - } - - fn child_storage_hash( - &self, - block: Option, - child_storage_key: StorageKey, - child_info: StorageKey, - child_type: u32, - key: StorageKey, - ) -> FutureResult> { - Box::new(self - .child_storage(block, child_storage_key, child_info, child_type, key) - .and_then(|maybe_storage| - result(Ok(maybe_storage.map(|storage| HashFor::::hash(&storage.0)))) - ) - ) - } - - fn metadata(&self, block: Option) -> FutureResult { - let metadata = self.call(block, "Metadata_metadata".into(), Bytes(Vec::new())) - .and_then(|metadata| OpaqueMetadata::decode(&mut &metadata.0[..]) - .map(Into::into) - .map_err(|decode_err| client_err(ClientError::CallResultDecode( - "Unable to decode metadata", - decode_err, - )))); - - Box::new(metadata) - } - - fn runtime_version(&self, block: Option) -> FutureResult { - Box::new(runtime_version( - &*self.remote_blockchain, - self.fetcher.clone(), - self.block_or_best(block), - ).boxed().compat()) - } - - fn query_storage( - &self, - _from: Block::Hash, - _to: Option, - _keys: Vec, - ) -> FutureResult>> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn query_storage_at( - &self, - _keys: Vec, - _at: Option - ) -> FutureResult>> { - Box::new(result(Err(client_err(ClientError::NotAvailableOnLightClient)))) - } - - fn subscribe_storage( - &self, - _meta: crate::metadata::Metadata, - subscriber: Subscriber>, - keys: Option> - ) { - let keys = match keys { - Some(keys) if !keys.is_empty() => keys, - _ => { - warn!("Cannot subscribe to all keys on light client. Subscription rejected."); - return; - } - }; - - let keys = keys.iter().cloned().collect::>(); - let keys_to_check = keys.iter().map(|k| k.0.clone()).collect::>(); - let subscription_id = self.subscriptions.add(subscriber, move |sink| { - let fetcher = self.fetcher.clone(); - let remote_blockchain = self.remote_blockchain.clone(); - let storage_subscriptions = self.storage_subscriptions.clone(); - let initial_block = self.block_or_best(None); - let initial_keys = keys_to_check.iter().cloned().collect::>(); - - let changes_stream = subscription_stream::( - storage_subscriptions.clone(), - self.client - .import_notification_stream() - .map(|notification| Ok::<_, ()>(notification.hash)) - .compat(), - display_error(storage( - &*remote_blockchain, - fetcher.clone(), - initial_block, - initial_keys, - ).map(move |r| r.map(|r| (initial_block, r)))), - move |block| { - // there'll be single request per block for all active subscriptions - // with all subscribed keys - let keys = storage_subscriptions - .lock() - .subscriptions_by_key - .keys() - .map(|k| k.0.clone()) - .collect(); - - storage( - &*remote_blockchain, - fetcher.clone(), - block, - keys, - ) - }, - move |block, old_value, new_value| { - // let's only select keys which are valid for this subscription - let new_value = new_value - .iter() - .filter(|(k, _)| keys_to_check.contains(&k.0)) - .map(|(k, v)| (k.clone(), v.clone())) - .collect::>(); - let value_differs = old_value - .as_ref() - .map(|old_value| **old_value != new_value) - .unwrap_or(true); - match value_differs { - true => Some(StorageChangeSet { - block, - changes: new_value - .iter() - .map(|(k, v)| (k.clone(), v.clone())) - .collect(), - }), - false => None, - } - } - ); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(changes_stream.map(|changes| Ok(changes))) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - - // remember keys associated with this subscription - let mut storage_subscriptions = self.storage_subscriptions.lock(); - storage_subscriptions.keys_by_subscription.insert(subscription_id.clone(), keys.clone()); - for key in keys { - storage_subscriptions - .subscriptions_by_key - .entry(key) - .or_default() - .insert(subscription_id.clone()); - } - } - - fn unsubscribe_storage( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult { - if !self.subscriptions.cancel(id.clone()) { - return Ok(false); - } - - // forget subscription keys - let mut storage_subscriptions = self.storage_subscriptions.lock(); - let keys = storage_subscriptions.keys_by_subscription.remove(&id); - for key in keys.into_iter().flat_map(|keys| keys.into_iter()) { - match storage_subscriptions.subscriptions_by_key.entry(key) { - Entry::Vacant(_) => unreachable!("every key from keys_by_subscription has\ - corresponding entry in subscriptions_by_key; qed"), - Entry::Occupied(mut entry) => { - entry.get_mut().remove(&id); - if entry.get().is_empty() { - entry.remove(); - } - } - } - } - - Ok(true) - } - - fn subscribe_runtime_version( - &self, - _meta: crate::metadata::Metadata, - subscriber: Subscriber, - ) { - self.subscriptions.add(subscriber, move |sink| { - let fetcher = self.fetcher.clone(); - let remote_blockchain = self.remote_blockchain.clone(); - let version_subscriptions = self.version_subscriptions.clone(); - let initial_block = self.block_or_best(None); - - let versions_stream = subscription_stream::( - version_subscriptions, - self.client - .import_notification_stream() - .map(|notification| Ok::<_, ()>(notification.hash)) - .compat(), - display_error(runtime_version( - &*remote_blockchain, - fetcher.clone(), - initial_block, - ).map(move |r| r.map(|r| (initial_block, r)))), - move |block| runtime_version( - &*remote_blockchain, - fetcher.clone(), - block, - ), - |_, old_version, new_version| { - let version_differs = old_version - .as_ref() - .map(|old_version| *old_version != new_version) - .unwrap_or(true); - match version_differs { - true => Some(new_version.clone()), - false => None, - } - } - ); - - sink - .sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) - .send_all(versions_stream.map(|version| Ok(version))) - // we ignore the resulting Stream (if the first stream is over we are unsubscribed) - .map(|_| ()) - }); - } - - fn unsubscribe_runtime_version( - &self, - _meta: Option, - id: SubscriptionId, - ) -> RpcResult { - Ok(self.subscriptions.cancel(id)) - } + fn call( + &self, + block: Option, + method: String, + call_data: Bytes, + ) -> FutureResult { + Box::new( + call( + &*self.remote_blockchain, + self.fetcher.clone(), + self.block_or_best(block), + method, + call_data, + ) + .boxed() + .compat(), + ) + } + + fn storage_keys( + &self, + _block: Option, + _prefix: StorageKey, + ) -> FutureResult> { + Box::new(result(Err(client_err( + ClientError::NotAvailableOnLightClient, + )))) + } + + fn storage_pairs( + &self, + _block: Option, + _prefix: StorageKey, + ) -> FutureResult> { + Box::new(result(Err(client_err( + ClientError::NotAvailableOnLightClient, + )))) + } + + fn storage_keys_paged( + &self, + _block: Option, + _prefix: Option, + _count: u32, + _start_key: Option, + ) -> FutureResult> { + Box::new(result(Err(client_err( + ClientError::NotAvailableOnLightClient, + )))) + } + + fn storage( + &self, + block: Option, + key: StorageKey, + ) -> FutureResult> { + Box::new( + storage( + &*self.remote_blockchain, + self.fetcher.clone(), + self.block_or_best(block), + vec![key.0.clone()], + ) + .boxed() + .compat() + .map(move |mut values| { + values + .remove(&key) + .expect("successful request has entries for all requested keys; qed") + }), + ) + } + + fn storage_hash( + &self, + block: Option, + key: StorageKey, + ) -> FutureResult> { + Box::new(self.storage(block, key).and_then(|maybe_storage| { + result(Ok( + maybe_storage.map(|storage| HashFor::::hash(&storage.0)) + )) + })) + } + + fn child_storage_keys( + &self, + _block: Option, + _child_storage_key: StorageKey, + _child_info: StorageKey, + _child_type: u32, + _prefix: StorageKey, + ) -> FutureResult> { + Box::new(result(Err(client_err( + ClientError::NotAvailableOnLightClient, + )))) + } + + fn child_storage( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + ) -> FutureResult> { + let block = self.block_or_best(block); + let fetcher = self.fetcher.clone(); + let child_storage = + resolve_header(&*self.remote_blockchain, &*self.fetcher, block).then(move |result| { + match result { + Ok(header) => Either::Left( + fetcher + .remote_read_child(RemoteReadChildRequest { + block, + header, + storage_key: child_storage_key.0, + child_info: child_info.0, + child_type, + keys: vec![key.0.clone()], + retry_count: Default::default(), + }) + .then(move |result| { + ready( + result + .map(|mut data| { + data.remove(&key.0) + .expect( + "successful result has entry for all keys; qed", + ) + .map(StorageData) + }) + .map_err(client_err), + ) + }), + ), + Err(error) => Either::Right(ready(Err(error))), + } + }); + + Box::new(child_storage.boxed().compat()) + } + + fn child_storage_hash( + &self, + block: Option, + child_storage_key: StorageKey, + child_info: StorageKey, + child_type: u32, + key: StorageKey, + ) -> FutureResult> { + Box::new( + self.child_storage(block, child_storage_key, child_info, child_type, key) + .and_then(|maybe_storage| { + result(Ok( + maybe_storage.map(|storage| HashFor::::hash(&storage.0)) + )) + }), + ) + } + + fn metadata(&self, block: Option) -> FutureResult { + let metadata = self + .call(block, "Metadata_metadata".into(), Bytes(Vec::new())) + .and_then(|metadata| { + OpaqueMetadata::decode(&mut &metadata.0[..]) + .map(Into::into) + .map_err(|decode_err| { + client_err(ClientError::CallResultDecode( + "Unable to decode metadata", + decode_err, + )) + }) + }); + + Box::new(metadata) + } + + fn runtime_version(&self, block: Option) -> FutureResult { + Box::new( + runtime_version( + &*self.remote_blockchain, + self.fetcher.clone(), + self.block_or_best(block), + ) + .boxed() + .compat(), + ) + } + + fn query_storage( + &self, + _from: Block::Hash, + _to: Option, + _keys: Vec, + ) -> FutureResult>> { + Box::new(result(Err(client_err( + ClientError::NotAvailableOnLightClient, + )))) + } + + fn query_storage_at( + &self, + _keys: Vec, + _at: Option, + ) -> FutureResult>> { + Box::new(result(Err(client_err( + ClientError::NotAvailableOnLightClient, + )))) + } + + fn subscribe_storage( + &self, + _meta: crate::metadata::Metadata, + subscriber: Subscriber>, + keys: Option>, + ) { + let keys = match keys { + Some(keys) if !keys.is_empty() => keys, + _ => { + warn!("Cannot subscribe to all keys on light client. Subscription rejected."); + return; + } + }; + + let keys = keys.iter().cloned().collect::>(); + let keys_to_check = keys.iter().map(|k| k.0.clone()).collect::>(); + let subscription_id = self.subscriptions.add(subscriber, move |sink| { + let fetcher = self.fetcher.clone(); + let remote_blockchain = self.remote_blockchain.clone(); + let storage_subscriptions = self.storage_subscriptions.clone(); + let initial_block = self.block_or_best(None); + let initial_keys = keys_to_check.iter().cloned().collect::>(); + + let changes_stream = subscription_stream::( + storage_subscriptions.clone(), + self.client + .import_notification_stream() + .map(|notification| Ok::<_, ()>(notification.hash)) + .compat(), + display_error( + storage( + &*remote_blockchain, + fetcher.clone(), + initial_block, + initial_keys, + ) + .map(move |r| r.map(|r| (initial_block, r))), + ), + move |block| { + // there'll be single request per block for all active subscriptions + // with all subscribed keys + let keys = storage_subscriptions + .lock() + .subscriptions_by_key + .keys() + .map(|k| k.0.clone()) + .collect(); + + storage(&*remote_blockchain, fetcher.clone(), block, keys) + }, + move |block, old_value, new_value| { + // let's only select keys which are valid for this subscription + let new_value = new_value + .iter() + .filter(|(k, _)| keys_to_check.contains(&k.0)) + .map(|(k, v)| (k.clone(), v.clone())) + .collect::>(); + let value_differs = old_value + .as_ref() + .map(|old_value| **old_value != new_value) + .unwrap_or(true); + match value_differs { + true => Some(StorageChangeSet { + block, + changes: new_value + .iter() + .map(|(k, v)| (k.clone(), v.clone())) + .collect(), + }), + false => None, + } + }, + ); + + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(changes_stream.map(|changes| Ok(changes))) + // we ignore the resulting Stream (if the first stream is over we are unsubscribed) + .map(|_| ()) + }); + + // remember keys associated with this subscription + let mut storage_subscriptions = self.storage_subscriptions.lock(); + storage_subscriptions + .keys_by_subscription + .insert(subscription_id.clone(), keys.clone()); + for key in keys { + storage_subscriptions + .subscriptions_by_key + .entry(key) + .or_default() + .insert(subscription_id.clone()); + } + } + + fn unsubscribe_storage( + &self, + _meta: Option, + id: SubscriptionId, + ) -> RpcResult { + if !self.subscriptions.cancel(id.clone()) { + return Ok(false); + } + + // forget subscription keys + let mut storage_subscriptions = self.storage_subscriptions.lock(); + let keys = storage_subscriptions.keys_by_subscription.remove(&id); + for key in keys.into_iter().flat_map(|keys| keys.into_iter()) { + match storage_subscriptions.subscriptions_by_key.entry(key) { + Entry::Vacant(_) => unreachable!( + "every key from keys_by_subscription has\ + corresponding entry in subscriptions_by_key; qed" + ), + Entry::Occupied(mut entry) => { + entry.get_mut().remove(&id); + if entry.get().is_empty() { + entry.remove(); + } + } + } + } + + Ok(true) + } + + fn subscribe_runtime_version( + &self, + _meta: crate::metadata::Metadata, + subscriber: Subscriber, + ) { + self.subscriptions.add(subscriber, move |sink| { + let fetcher = self.fetcher.clone(); + let remote_blockchain = self.remote_blockchain.clone(); + let version_subscriptions = self.version_subscriptions.clone(); + let initial_block = self.block_or_best(None); + + let versions_stream = subscription_stream::( + version_subscriptions, + self.client + .import_notification_stream() + .map(|notification| Ok::<_, ()>(notification.hash)) + .compat(), + display_error( + runtime_version(&*remote_blockchain, fetcher.clone(), initial_block) + .map(move |r| r.map(|r| (initial_block, r))), + ), + move |block| runtime_version(&*remote_blockchain, fetcher.clone(), block), + |_, old_version, new_version| { + let version_differs = old_version + .as_ref() + .map(|old_version| *old_version != new_version) + .unwrap_or(true); + match version_differs { + true => Some(new_version.clone()), + false => None, + } + }, + ); + + sink.sink_map_err(|e| warn!("Error sending notifications: {:?}", e)) + .send_all(versions_stream.map(|version| Ok(version))) + // we ignore the resulting Stream (if the first stream is over we are unsubscribed) + .map(|_| ()) + }); + } + + fn unsubscribe_runtime_version( + &self, + _meta: Option, + id: SubscriptionId, + ) -> RpcResult { + Ok(self.subscriptions.cancel(id)) + } } /// Resolve header by hash. fn resolve_header>( - remote_blockchain: &dyn RemoteBlockchain, - fetcher: &F, - block: Block::Hash, + remote_blockchain: &dyn RemoteBlockchain, + fetcher: &F, + block: Block::Hash, ) -> impl std::future::Future> { - let maybe_header = future_header( - remote_blockchain, - fetcher, - BlockId::Hash(block), - ); - - maybe_header.then(move |result| - ready(result.and_then(|maybe_header| - maybe_header.ok_or(ClientError::UnknownBlock(format!("{}", block))) - ).map_err(client_err)), - ) + let maybe_header = future_header(remote_blockchain, fetcher, BlockId::Hash(block)); + + maybe_header.then(move |result| { + ready( + result + .and_then(|maybe_header| { + maybe_header.ok_or(ClientError::UnknownBlock(format!("{}", block))) + }) + .map_err(client_err), + ) + }) } /// Call runtime method at given block fn call>( - remote_blockchain: &dyn RemoteBlockchain, - fetcher: Arc, - block: Block::Hash, - method: String, - call_data: Bytes, + remote_blockchain: &dyn RemoteBlockchain, + fetcher: Arc, + block: Block::Hash, + method: String, + call_data: Bytes, ) -> impl std::future::Future> { - resolve_header(remote_blockchain, &*fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_call(RemoteCallRequest { - block, - header, - method, - call_data: call_data.0, - retry_count: Default::default(), - }).then(|result| ready(result.map(Bytes).map_err(client_err)))), - Err(error) => Either::Right(ready(Err(error))), - }) + resolve_header(remote_blockchain, &*fetcher, block).then(move |result| match result { + Ok(header) => Either::Left( + fetcher + .remote_call(RemoteCallRequest { + block, + header, + method, + call_data: call_data.0, + retry_count: Default::default(), + }) + .then(|result| ready(result.map(Bytes).map_err(client_err))), + ), + Err(error) => Either::Right(ready(Err(error))), + }) } /// Get runtime version at given block. fn runtime_version>( - remote_blockchain: &dyn RemoteBlockchain, - fetcher: Arc, - block: Block::Hash, + remote_blockchain: &dyn RemoteBlockchain, + fetcher: Arc, + block: Block::Hash, ) -> impl std::future::Future> { - call( - remote_blockchain, - fetcher, - block, - "Core_version".into(), - Bytes(Vec::new()), - ) - .then(|version| ready(version.and_then(|version| - Decode::decode(&mut &version.0[..]) - .map_err(|e| client_err(ClientError::VersionInvalid(e.what().into()))) - ))) + call( + remote_blockchain, + fetcher, + block, + "Core_version".into(), + Bytes(Vec::new()), + ) + .then(|version| { + ready(version.and_then(|version| { + Decode::decode(&mut &version.0[..]) + .map_err(|e| client_err(ClientError::VersionInvalid(e.what().into()))) + })) + }) } /// Get storage value at given key at given block. fn storage>( - remote_blockchain: &dyn RemoteBlockchain, - fetcher: Arc, - block: Block::Hash, - keys: Vec>, + remote_blockchain: &dyn RemoteBlockchain, + fetcher: Arc, + block: Block::Hash, + keys: Vec>, ) -> impl std::future::Future>, Error>> { - resolve_header(remote_blockchain, &*fetcher, block) - .then(move |result| match result { - Ok(header) => Either::Left(fetcher.remote_read(RemoteReadRequest { - block, - header, - keys, - retry_count: Default::default(), - }).then(|result| ready(result - .map(|result| result - .into_iter() - .map(|(key, value)| (StorageKey(key), value.map(StorageData))) - .collect() - ).map_err(client_err) - ))), - Err(error) => Either::Right(ready(Err(error))), - }) + resolve_header(remote_blockchain, &*fetcher, block).then(move |result| match result { + Ok(header) => Either::Left( + fetcher + .remote_read(RemoteReadRequest { + block, + header, + keys, + retry_count: Default::default(), + }) + .then(|result| { + ready( + result + .map(|result| { + result + .into_iter() + .map(|(key, value)| (StorageKey(key), value.map(StorageData))) + .collect() + }) + .map_err(client_err), + ) + }), + ), + Err(error) => Either::Right(ready(Err(error))), + }) } /// Returns subscription stream that issues request on every imported block and /// if value has changed from previous block, emits (stream) item. fn subscription_stream< - Block, - Requests, - FutureBlocksStream, - V, N, - InitialRequestFuture, - IssueRequest, IssueRequestFuture, - CompareValues, + Block, + Requests, + FutureBlocksStream, + V, + N, + InitialRequestFuture, + IssueRequest, + IssueRequestFuture, + CompareValues, >( - shared_requests: Requests, - future_blocks_stream: FutureBlocksStream, - initial_request: InitialRequestFuture, - issue_request: IssueRequest, - compare_values: CompareValues, -) -> impl Stream where - Block: BlockT, - Requests: 'static + SharedRequests, - FutureBlocksStream: Stream, - V: Send + 'static + Clone, - InitialRequestFuture: std::future::Future> + Send + 'static, - IssueRequest: 'static + Fn(Block::Hash) -> IssueRequestFuture, - IssueRequestFuture: std::future::Future> + Send + 'static, - CompareValues: Fn(Block::Hash, Option<&V>, &V) -> Option, + shared_requests: Requests, + future_blocks_stream: FutureBlocksStream, + initial_request: InitialRequestFuture, + issue_request: IssueRequest, + compare_values: CompareValues, +) -> impl Stream +where + Block: BlockT, + Requests: 'static + SharedRequests, + FutureBlocksStream: Stream, + V: Send + 'static + Clone, + InitialRequestFuture: + std::future::Future> + Send + 'static, + IssueRequest: 'static + Fn(Block::Hash) -> IssueRequestFuture, + IssueRequestFuture: std::future::Future> + Send + 'static, + CompareValues: Fn(Block::Hash, Option<&V>, &V) -> Option, { - // we need to send initial value first, then we'll only be sending if value has changed - let previous_value = Arc::new(Mutex::new(None)); - - // prepare 'stream' of initial values - let initial_value_stream = ignore_error(initial_request) - .boxed() - .compat() - .into_stream(); - - // prepare stream of future values - // - // we do not want to stop stream if single request fails - // (the warning should have been already issued by the request issuer) - let future_values_stream = future_blocks_stream - .and_then(move |block| ignore_error(maybe_share_remote_request::( - shared_requests.clone(), - block, - &issue_request, - ).map(move |r| r.map(|v| (block, v)))).boxed().compat()); - - // now let's return changed values for selected blocks - initial_value_stream - .chain(future_values_stream) - .filter_map(move |block_and_new_value| block_and_new_value.and_then(|(block, new_value)| { - let mut previous_value = previous_value.lock(); - compare_values(block, previous_value.as_ref(), &new_value) - .map(|notification_value| { - *previous_value = Some(new_value); - notification_value - }) - })) - .map_err(|_| ()) + // we need to send initial value first, then we'll only be sending if value has changed + let previous_value = Arc::new(Mutex::new(None)); + + // prepare 'stream' of initial values + let initial_value_stream = ignore_error(initial_request).boxed().compat().into_stream(); + + // prepare stream of future values + // + // we do not want to stop stream if single request fails + // (the warning should have been already issued by the request issuer) + let future_values_stream = future_blocks_stream.and_then(move |block| { + ignore_error( + maybe_share_remote_request::( + shared_requests.clone(), + block, + &issue_request, + ) + .map(move |r| r.map(|v| (block, v))), + ) + .boxed() + .compat() + }); + + // now let's return changed values for selected blocks + initial_value_stream + .chain(future_values_stream) + .filter_map(move |block_and_new_value| { + block_and_new_value.and_then(|(block, new_value)| { + let mut previous_value = previous_value.lock(); + compare_values(block, previous_value.as_ref(), &new_value).map( + |notification_value| { + *previous_value = Some(new_value); + notification_value + }, + ) + }) + }) + .map_err(|_| ()) } /// Request some data from remote node, probably reusing response from already /// (in-progress) existing request. fn maybe_share_remote_request( - shared_requests: Requests, - block: Block::Hash, - issue_request: &IssueRequest, -) -> impl std::future::Future> where - V: Clone, - Requests: SharedRequests, - IssueRequest: Fn(Block::Hash) -> IssueRequestFuture, - IssueRequestFuture: std::future::Future>, + shared_requests: Requests, + block: Block::Hash, + issue_request: &IssueRequest, +) -> impl std::future::Future> +where + V: Clone, + Requests: SharedRequests, + IssueRequest: Fn(Block::Hash) -> IssueRequestFuture, + IssueRequestFuture: std::future::Future>, { - let (sender, receiver) = channel(); - let need_issue_request = shared_requests.listen_request(block, sender); - - // if that isn't the first request - just listen for existing request' response - if !need_issue_request { - return Either::Right(receiver.then(|r| ready(r.unwrap_or(Err(()))))); - } - - // that is the first request - issue remote request + notify all listeners on - // completion - Either::Left( - display_error(issue_request(block)) - .then(move |remote_result| { - let listeners = shared_requests.on_response_received(block); - // skip first element, because this future is the first element - for receiver in listeners.into_iter().skip(1) { - if let Err(_) = receiver.send(remote_result.clone()) { - // we don't care if receiver has been dropped already - } - } - - ready(remote_result) - }) - ) + let (sender, receiver) = channel(); + let need_issue_request = shared_requests.listen_request(block, sender); + + // if that isn't the first request - just listen for existing request' response + if !need_issue_request { + return Either::Right(receiver.then(|r| ready(r.unwrap_or(Err(()))))); + } + + // that is the first request - issue remote request + notify all listeners on + // completion + Either::Left( + display_error(issue_request(block)).then(move |remote_result| { + let listeners = shared_requests.on_response_received(block); + // skip first element, because this future is the first element + for receiver in listeners.into_iter().skip(1) { + if let Err(_) = receiver.send(remote_result.clone()) { + // we don't care if receiver has been dropped already + } + } + + ready(remote_result) + }), + ) } /// Convert successful future result into Ok(result) and error into Err(()), /// displaying warning. -fn display_error(future: F) -> impl std::future::Future> where - F: std::future::Future> +fn display_error(future: F) -> impl std::future::Future> +where + F: std::future::Future>, { - future.then(|result| ready(match result { - Ok(result) => Ok(result), - Err(err) => { - warn!("Remote request for subscription data has failed with: {:?}", err); - Err(()) - }, - })) + future.then(|result| { + ready(match result { + Ok(result) => Ok(result), + Err(err) => { + warn!( + "Remote request for subscription data has failed with: {:?}", + err + ); + Err(()) + } + }) + }) } /// Convert successful future result into Ok(Some(result)) and error into Ok(None), /// displaying warning. -fn ignore_error(future: F) -> impl std::future::Future, ()>> where - F: std::future::Future> +fn ignore_error(future: F) -> impl std::future::Future, ()>> +where + F: std::future::Future>, { - future.then(|result| ready(match result { - Ok(result) => Ok(Some(result)), - Err(()) => Ok(None), - })) + future.then(|result| { + ready(match result { + Ok(result) => Ok(Some(result)), + Err(()) => Ok(None), + }) + }) } #[cfg(test)] mod tests { - use rpc::futures::stream::futures_ordered; - use substrate_test_runtime_client::runtime::Block; - use sp_core::H256; - use super::*; - - #[test] - fn subscription_stream_works() { - let stream = subscription_stream::( - SimpleSubscriptions::default(), - futures_ordered(vec![result(Ok(H256::from([2; 32]))), result(Ok(H256::from([3; 32])))]), - ready(Ok((H256::from([1; 32]), 100))), - |block| match block[0] { - 2 => ready(Ok(100)), - 3 => ready(Ok(200)), - _ => unreachable!("should not issue additional requests"), - }, - |_, old_value, new_value| match old_value == Some(new_value) { - true => None, - false => Some(new_value.clone()), - } - ); - - assert_eq!( - stream.collect().wait(), - Ok(vec![100, 200]) - ); - } - - #[test] - fn subscription_stream_ignores_failed_requests() { - let stream = subscription_stream::( - SimpleSubscriptions::default(), - futures_ordered(vec![result(Ok(H256::from([2; 32]))), result(Ok(H256::from([3; 32])))]), - ready(Ok((H256::from([1; 32]), 100))), - |block| match block[0] { - 2 => ready(Err(client_err(ClientError::NotAvailableOnLightClient))), - 3 => ready(Ok(200)), - _ => unreachable!("should not issue additional requests"), - }, - |_, old_value, new_value| match old_value == Some(new_value) { - true => None, - false => Some(new_value.clone()), - } - ); - - assert_eq!( - stream.collect().wait(), - Ok(vec![100, 200]) - ); - } - - #[test] - fn maybe_share_remote_request_shares_request() { - type UnreachableFuture = futures::future::Ready>; - - let shared_requests = SimpleSubscriptions::default(); - - // let's 'issue' requests for B1 - shared_requests.lock().insert( - H256::from([1; 32]), - vec![channel().0], - ); - - // make sure that no additional requests are issued when we're asking for B1 - let _ = maybe_share_remote_request::( - shared_requests.clone(), - H256::from([1; 32]), - &|_| unreachable!("no duplicate requests issued"), - ); - - // make sure that additional requests is issued when we're asking for B2 - let request_issued = Arc::new(Mutex::new(false)); - let _ = maybe_share_remote_request::( - shared_requests.clone(), - H256::from([2; 32]), - &|_| { - *request_issued.lock() = true; - ready(Ok(Default::default())) - }, - ); - assert!(*request_issued.lock()); - } + use super::*; + use rpc::futures::stream::futures_ordered; + use sp_core::H256; + use substrate_test_runtime_client::runtime::Block; + + #[test] + fn subscription_stream_works() { + let stream = subscription_stream::( + SimpleSubscriptions::default(), + futures_ordered(vec![ + result(Ok(H256::from([2; 32]))), + result(Ok(H256::from([3; 32]))), + ]), + ready(Ok((H256::from([1; 32]), 100))), + |block| match block[0] { + 2 => ready(Ok(100)), + 3 => ready(Ok(200)), + _ => unreachable!("should not issue additional requests"), + }, + |_, old_value, new_value| match old_value == Some(new_value) { + true => None, + false => Some(new_value.clone()), + }, + ); + + assert_eq!(stream.collect().wait(), Ok(vec![100, 200])); + } + + #[test] + fn subscription_stream_ignores_failed_requests() { + let stream = subscription_stream::( + SimpleSubscriptions::default(), + futures_ordered(vec![ + result(Ok(H256::from([2; 32]))), + result(Ok(H256::from([3; 32]))), + ]), + ready(Ok((H256::from([1; 32]), 100))), + |block| match block[0] { + 2 => ready(Err(client_err(ClientError::NotAvailableOnLightClient))), + 3 => ready(Ok(200)), + _ => unreachable!("should not issue additional requests"), + }, + |_, old_value, new_value| match old_value == Some(new_value) { + true => None, + false => Some(new_value.clone()), + }, + ); + + assert_eq!(stream.collect().wait(), Ok(vec![100, 200])); + } + + #[test] + fn maybe_share_remote_request_shares_request() { + type UnreachableFuture = futures::future::Ready>; + + let shared_requests = SimpleSubscriptions::default(); + + // let's 'issue' requests for B1 + shared_requests + .lock() + .insert(H256::from([1; 32]), vec![channel().0]); + + // make sure that no additional requests are issued when we're asking for B1 + let _ = maybe_share_remote_request::( + shared_requests.clone(), + H256::from([1; 32]), + &|_| unreachable!("no duplicate requests issued"), + ); + + // make sure that additional requests is issued when we're asking for B2 + let request_issued = Arc::new(Mutex::new(false)); + let _ = maybe_share_remote_request::( + shared_requests.clone(), + H256::from([2; 32]), + &|_| { + *request_issued.lock() = true; + ready(Ok(Default::default())) + }, + ); + assert!(*request_issued.lock()); + } } diff --git a/client/rpc/src/state/tests.rs b/client/rpc/src/state/tests.rs index 0f2358a3ed..e300fc1bfd 100644 --- a/client/rpc/src/state/tests.rs +++ b/client/rpc/src/state/tests.rs @@ -14,469 +14,502 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::*; -use super::state_full::split_range; use self::error::Error; +use super::state_full::split_range; +use super::*; -use std::sync::Arc; use assert_matches::assert_matches; use futures01::stream::Stream; -use sp_core::{storage::{well_known_keys, ChildInfo}, ChangesTrieConfiguration}; -use sp_core::hash::H256; use sc_block_builder::BlockBuilderProvider; -use sp_io::hashing::blake2_256; -use substrate_test_runtime_client::{ - prelude::*, - sp_consensus::BlockOrigin, - runtime, +use sp_core::hash::H256; +use sp_core::{ + storage::{well_known_keys, ChildInfo}, + ChangesTrieConfiguration, }; +use sp_io::hashing::blake2_256; use sp_runtime::generic::BlockId; +use std::sync::Arc; +use substrate_test_runtime_client::{prelude::*, runtime, sp_consensus::BlockOrigin}; const CHILD_INFO: ChildInfo<'static> = ChildInfo::new_default(b"unique_id"); #[test] fn should_return_storage() { - const KEY: &[u8] = b":mock"; - const VALUE: &[u8] = b"hello world"; - const STORAGE_KEY: &[u8] = b":child_storage:default:child"; - const CHILD_VALUE: &[u8] = b"hello world !"; - - let mut core = tokio::runtime::Runtime::new().unwrap(); - let client = TestClientBuilder::new() - .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) - .add_extra_child_storage(STORAGE_KEY.to_vec(), CHILD_INFO, KEY.to_vec(), CHILD_VALUE.to_vec()) - .build(); - let genesis_hash = client.genesis_hash(); - let client = new_full(Arc::new(client), Subscriptions::new(Arc::new(core.executor()))); - let key = StorageKey(KEY.to_vec()); - let storage_key = StorageKey(STORAGE_KEY.to_vec()); - let (child_info, child_type) = CHILD_INFO.info(); - let child_info = StorageKey(child_info.to_vec()); - - assert_eq!( - client.storage(key.clone(), Some(genesis_hash).into()).wait() - .map(|x| x.map(|x| x.0.len())).unwrap().unwrap() as usize, - VALUE.len(), - ); - assert_matches!( - client.storage_hash(key.clone(), Some(genesis_hash).into()).wait() - .map(|x| x.is_some()), - Ok(true) - ); - assert_eq!( - client.storage_size(key.clone(), None).wait().unwrap().unwrap() as usize, - VALUE.len(), - ); - assert_eq!( - core.block_on( - client.child_storage(storage_key, child_info, child_type, key, Some(genesis_hash).into()) - .map(|x| x.map(|x| x.0.len())) - ).unwrap().unwrap() as usize, - CHILD_VALUE.len(), - ); - + const KEY: &[u8] = b":mock"; + const VALUE: &[u8] = b"hello world"; + const STORAGE_KEY: &[u8] = b":child_storage:default:child"; + const CHILD_VALUE: &[u8] = b"hello world !"; + + let mut core = tokio::runtime::Runtime::new().unwrap(); + let client = TestClientBuilder::new() + .add_extra_storage(KEY.to_vec(), VALUE.to_vec()) + .add_extra_child_storage( + STORAGE_KEY.to_vec(), + CHILD_INFO, + KEY.to_vec(), + CHILD_VALUE.to_vec(), + ) + .build(); + let genesis_hash = client.genesis_hash(); + let client = new_full( + Arc::new(client), + Subscriptions::new(Arc::new(core.executor())), + ); + let key = StorageKey(KEY.to_vec()); + let storage_key = StorageKey(STORAGE_KEY.to_vec()); + let (child_info, child_type) = CHILD_INFO.info(); + let child_info = StorageKey(child_info.to_vec()); + + assert_eq!( + client + .storage(key.clone(), Some(genesis_hash).into()) + .wait() + .map(|x| x.map(|x| x.0.len())) + .unwrap() + .unwrap() as usize, + VALUE.len(), + ); + assert_matches!( + client + .storage_hash(key.clone(), Some(genesis_hash).into()) + .wait() + .map(|x| x.is_some()), + Ok(true) + ); + assert_eq!( + client + .storage_size(key.clone(), None) + .wait() + .unwrap() + .unwrap() as usize, + VALUE.len(), + ); + assert_eq!( + core.block_on( + client + .child_storage( + storage_key, + child_info, + child_type, + key, + Some(genesis_hash).into() + ) + .map(|x| x.map(|x| x.0.len())) + ) + .unwrap() + .unwrap() as usize, + CHILD_VALUE.len(), + ); } #[test] fn should_return_child_storage() { - let (child_info, child_type) = CHILD_INFO.info(); - let child_info = StorageKey(child_info.to_vec()); - let core = tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(substrate_test_runtime_client::TestClientBuilder::new() - .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) - .build()); - let genesis_hash = client.genesis_hash(); - let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - let child_key = StorageKey( - well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().chain(b"test").cloned().collect() - ); - let key = StorageKey(b"key".to_vec()); - - - assert_matches!( - client.child_storage( - child_key.clone(), - child_info.clone(), - child_type, - key.clone(), - Some(genesis_hash).into(), - ).wait(), - Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 - ); - assert_matches!( - client.child_storage_hash( - child_key.clone(), - child_info.clone(), - child_type, - key.clone(), - Some(genesis_hash).into(), - ).wait().map(|x| x.is_some()), - Ok(true) - ); - assert_matches!( - client.child_storage_size( - child_key.clone(), - child_info.clone(), - child_type, - key.clone(), - None, - ).wait(), - Ok(Some(1)) - ); + let (child_info, child_type) = CHILD_INFO.info(); + let child_info = StorageKey(child_info.to_vec()); + let core = tokio::runtime::Runtime::new().unwrap(); + let client = Arc::new( + substrate_test_runtime_client::TestClientBuilder::new() + .add_child_storage("test", "key", CHILD_INFO, vec![42_u8]) + .build(), + ); + let genesis_hash = client.genesis_hash(); + let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); + let child_key = StorageKey( + well_known_keys::CHILD_STORAGE_KEY_PREFIX + .iter() + .chain(b"test") + .cloned() + .collect(), + ); + let key = StorageKey(b"key".to_vec()); + + assert_matches!( + client.child_storage( + child_key.clone(), + child_info.clone(), + child_type, + key.clone(), + Some(genesis_hash).into(), + ).wait(), + Ok(Some(StorageData(ref d))) if d[0] == 42 && d.len() == 1 + ); + assert_matches!( + client + .child_storage_hash( + child_key.clone(), + child_info.clone(), + child_type, + key.clone(), + Some(genesis_hash).into(), + ) + .wait() + .map(|x| x.is_some()), + Ok(true) + ); + assert_matches!( + client + .child_storage_size( + child_key.clone(), + child_info.clone(), + child_type, + key.clone(), + None, + ) + .wait(), + Ok(Some(1)) + ); } #[test] fn should_call_contract() { - let core = tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(substrate_test_runtime_client::new()); - let genesis_hash = client.genesis_hash(); - let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); - - assert_matches!( - client.call("balanceOf".into(), Bytes(vec![1,2,3]), Some(genesis_hash).into()).wait(), - Err(Error::Client(_)) - ) + let core = tokio::runtime::Runtime::new().unwrap(); + let client = Arc::new(substrate_test_runtime_client::new()); + let genesis_hash = client.genesis_hash(); + let client = new_full(client, Subscriptions::new(Arc::new(core.executor()))); + + assert_matches!( + client + .call( + "balanceOf".into(), + Bytes(vec![1, 2, 3]), + Some(genesis_hash).into() + ) + .wait(), + Err(Error::Client(_)) + ) } #[test] fn should_notify_about_storage_changes() { - let mut core = tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - api.subscribe_storage(Default::default(), subscriber, None.into()); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - } - - // assert notification sent to transport - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + api.subscribe_storage(Default::default(), subscriber, None.into()); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let mut builder = client.new_block(Default::default()).unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + let block = builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + } + + // assert notification sent to transport + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } #[test] fn should_send_initial_storage_changes_and_notifications() { - let mut core = tokio::runtime::Runtime::new().unwrap(); - let remote = core.executor(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let mut client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); - - let alice_balance_key = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())); - - api.subscribe_storage(Default::default(), subscriber, Some(vec![ - StorageKey(alice_balance_key.to_vec()), - ]).into()); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - - let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_transfer(runtime::Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - } - - // assert initial values sent to transport - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // assert notification sent to transport - let (notification, next) = core.block_on(next.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = tokio::runtime::Runtime::new().unwrap(); + let remote = core.executor(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let mut client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full(client.clone(), Subscriptions::new(Arc::new(remote))); + + let alice_balance_key = blake2_256(&runtime::system::balance_of_key( + AccountKeyring::Alice.into(), + )); + + api.subscribe_storage( + Default::default(), + subscriber, + Some(vec![StorageKey(alice_balance_key.to_vec())]).into(), + ); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + + let mut builder = client.new_block(Default::default()).unwrap(); + builder + .push_transfer(runtime::Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + let block = builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + } + + // assert initial values sent to transport + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // assert notification sent to transport + let (notification, next) = core.block_on(next.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } #[test] fn should_query_storage() { - fn run_tests(mut client: Arc, has_changes_trie_config: bool) { - let core = tokio::runtime::Runtime::new().unwrap(); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); - - let mut add_block = |nonce| { - let mut builder = client.new_block(Default::default()).unwrap(); - // fake change: None -> None -> None - builder.push_storage_change(vec![1], None).unwrap(); - // fake change: None -> Some(value) -> Some(value) - builder.push_storage_change(vec![2], Some(vec![2])).unwrap(); - // actual change: None -> Some(value) -> None - builder.push_storage_change(vec![3], if nonce == 0 { Some(vec![3]) } else { None }).unwrap(); - // actual change: None -> Some(value) - builder.push_storage_change(vec![4], if nonce == 0 { None } else { Some(vec![4]) }).unwrap(); - // actual change: Some(value1) -> Some(value2) - builder.push_storage_change(vec![5], Some(vec![nonce as u8])).unwrap(); - let block = builder.build().unwrap().block; - let hash = block.header.hash(); - client.import(BlockOrigin::Own, block).unwrap(); - hash - }; - let block1_hash = add_block(0); - let block2_hash = add_block(1); - let genesis_hash = client.genesis_hash(); - - if has_changes_trie_config { - assert_eq!( - client.max_key_changes_range(1, BlockId::Hash(block1_hash)).unwrap(), - Some((0, BlockId::Hash(block1_hash))), - ); - } - - let mut expected = vec![ - StorageChangeSet { - block: genesis_hash, - changes: vec![ - (StorageKey(vec![1]), None), - (StorageKey(vec![2]), None), - (StorageKey(vec![3]), None), - (StorageKey(vec![4]), None), - (StorageKey(vec![5]), None), - ], - }, - StorageChangeSet { - block: block1_hash, - changes: vec![ - (StorageKey(vec![2]), Some(StorageData(vec![2]))), - (StorageKey(vec![3]), Some(StorageData(vec![3]))), - (StorageKey(vec![5]), Some(StorageData(vec![0]))), - ], - }, - ]; - - // Query changes only up to block1 - let keys = (1..6).map(|k| StorageKey(vec![k])).collect::>(); - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(block1_hash).into(), - ); - - assert_eq!(result.wait().unwrap(), expected); - - // Query all changes - let result = api.query_storage( - keys.clone(), - genesis_hash, - None.into(), - ); - - expected.push(StorageChangeSet { - block: block2_hash, - changes: vec![ - (StorageKey(vec![3]), None), - (StorageKey(vec![4]), Some(StorageData(vec![4]))), - (StorageKey(vec![5]), Some(StorageData(vec![1]))), - ], - }); - assert_eq!(result.wait().unwrap(), expected); - - // Query changes up to block2. - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(block2_hash), - ); - - assert_eq!(result.wait().unwrap(), expected); - - // Inverted range. - let result = api.query_storage( - keys.clone(), - block1_hash, - Some(genesis_hash), - ); - - assert_eq!( - result.wait().map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("1 ({:?})", block1_hash), - to: format!("0 ({:?})", genesis_hash), - details: "from number > to number".to_owned(), - }).map_err(|e| e.to_string()) - ); - - let random_hash1 = H256::random(); - let random_hash2 = H256::random(); - - // Invalid second hash. - let result = api.query_storage( - keys.clone(), - genesis_hash, - Some(random_hash1), - ); - - assert_eq!( - result.wait().map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("{:?}", genesis_hash), - to: format!("{:?}", Some(random_hash1)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()) - ); - - // Invalid first hash with Some other hash. - let result = api.query_storage( - keys.clone(), - random_hash1, - Some(genesis_hash), - ); - - assert_eq!( - result.wait().map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("{:?}", random_hash1), - to: format!("{:?}", Some(genesis_hash)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), - ); - - // Invalid first hash with None. - let result = api.query_storage( - keys.clone(), - random_hash1, - None, - ); - - assert_eq!( - result.wait().map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("{:?}", random_hash1), - to: format!("{:?}", Some(block2_hash)), // Best block hash. - details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), - ); - - // Both hashes invalid. - let result = api.query_storage( - keys.clone(), - random_hash1, - Some(random_hash2), - ); - - assert_eq!( - result.wait().map_err(|e| e.to_string()), - Err(Error::InvalidBlockRange { - from: format!("{:?}", random_hash1), // First hash not found. - to: format!("{:?}", Some(random_hash2)), - details: format!("UnknownBlock: header not found in db: {}", random_hash1), - }).map_err(|e| e.to_string()), - ); - - // single block range - let result = api.query_storage_at( - keys.clone(), - Some(block1_hash), - ); - - assert_eq!( - result.wait().unwrap(), - vec![ - StorageChangeSet { - block: block1_hash, - changes: vec![ - (StorageKey(vec![1_u8]), None), - (StorageKey(vec![2_u8]), Some(StorageData(vec![2_u8]))), - (StorageKey(vec![3_u8]), Some(StorageData(vec![3_u8]))), - (StorageKey(vec![4_u8]), None), - (StorageKey(vec![5_u8]), Some(StorageData(vec![0_u8]))), - ] - } - ] - ); - } - - run_tests(Arc::new(substrate_test_runtime_client::new()), false); - run_tests( - Arc::new( - TestClientBuilder::new() - .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) - .build(), - ), - true, - ); + fn run_tests(mut client: Arc, has_changes_trie_config: bool) { + let core = tokio::runtime::Runtime::new().unwrap(); + let api = new_full( + client.clone(), + Subscriptions::new(Arc::new(core.executor())), + ); + + let mut add_block = |nonce| { + let mut builder = client.new_block(Default::default()).unwrap(); + // fake change: None -> None -> None + builder.push_storage_change(vec![1], None).unwrap(); + // fake change: None -> Some(value) -> Some(value) + builder.push_storage_change(vec![2], Some(vec![2])).unwrap(); + // actual change: None -> Some(value) -> None + builder + .push_storage_change(vec![3], if nonce == 0 { Some(vec![3]) } else { None }) + .unwrap(); + // actual change: None -> Some(value) + builder + .push_storage_change(vec![4], if nonce == 0 { None } else { Some(vec![4]) }) + .unwrap(); + // actual change: Some(value1) -> Some(value2) + builder + .push_storage_change(vec![5], Some(vec![nonce as u8])) + .unwrap(); + let block = builder.build().unwrap().block; + let hash = block.header.hash(); + client.import(BlockOrigin::Own, block).unwrap(); + hash + }; + let block1_hash = add_block(0); + let block2_hash = add_block(1); + let genesis_hash = client.genesis_hash(); + + if has_changes_trie_config { + assert_eq!( + client + .max_key_changes_range(1, BlockId::Hash(block1_hash)) + .unwrap(), + Some((0, BlockId::Hash(block1_hash))), + ); + } + + let mut expected = vec![ + StorageChangeSet { + block: genesis_hash, + changes: vec![ + (StorageKey(vec![1]), None), + (StorageKey(vec![2]), None), + (StorageKey(vec![3]), None), + (StorageKey(vec![4]), None), + (StorageKey(vec![5]), None), + ], + }, + StorageChangeSet { + block: block1_hash, + changes: vec![ + (StorageKey(vec![2]), Some(StorageData(vec![2]))), + (StorageKey(vec![3]), Some(StorageData(vec![3]))), + (StorageKey(vec![5]), Some(StorageData(vec![0]))), + ], + }, + ]; + + // Query changes only up to block1 + let keys = (1..6).map(|k| StorageKey(vec![k])).collect::>(); + let result = api.query_storage(keys.clone(), genesis_hash, Some(block1_hash).into()); + + assert_eq!(result.wait().unwrap(), expected); + + // Query all changes + let result = api.query_storage(keys.clone(), genesis_hash, None.into()); + + expected.push(StorageChangeSet { + block: block2_hash, + changes: vec![ + (StorageKey(vec![3]), None), + (StorageKey(vec![4]), Some(StorageData(vec![4]))), + (StorageKey(vec![5]), Some(StorageData(vec![1]))), + ], + }); + assert_eq!(result.wait().unwrap(), expected); + + // Query changes up to block2. + let result = api.query_storage(keys.clone(), genesis_hash, Some(block2_hash)); + + assert_eq!(result.wait().unwrap(), expected); + + // Inverted range. + let result = api.query_storage(keys.clone(), block1_hash, Some(genesis_hash)); + + assert_eq!( + result.wait().map_err(|e| e.to_string()), + Err(Error::InvalidBlockRange { + from: format!("1 ({:?})", block1_hash), + to: format!("0 ({:?})", genesis_hash), + details: "from number > to number".to_owned(), + }) + .map_err(|e| e.to_string()) + ); + + let random_hash1 = H256::random(); + let random_hash2 = H256::random(); + + // Invalid second hash. + let result = api.query_storage(keys.clone(), genesis_hash, Some(random_hash1)); + + assert_eq!( + result.wait().map_err(|e| e.to_string()), + Err(Error::InvalidBlockRange { + from: format!("{:?}", genesis_hash), + to: format!("{:?}", Some(random_hash1)), + details: format!("UnknownBlock: header not found in db: {}", random_hash1), + }) + .map_err(|e| e.to_string()) + ); + + // Invalid first hash with Some other hash. + let result = api.query_storage(keys.clone(), random_hash1, Some(genesis_hash)); + + assert_eq!( + result.wait().map_err(|e| e.to_string()), + Err(Error::InvalidBlockRange { + from: format!("{:?}", random_hash1), + to: format!("{:?}", Some(genesis_hash)), + details: format!("UnknownBlock: header not found in db: {}", random_hash1), + }) + .map_err(|e| e.to_string()), + ); + + // Invalid first hash with None. + let result = api.query_storage(keys.clone(), random_hash1, None); + + assert_eq!( + result.wait().map_err(|e| e.to_string()), + Err(Error::InvalidBlockRange { + from: format!("{:?}", random_hash1), + to: format!("{:?}", Some(block2_hash)), // Best block hash. + details: format!("UnknownBlock: header not found in db: {}", random_hash1), + }) + .map_err(|e| e.to_string()), + ); + + // Both hashes invalid. + let result = api.query_storage(keys.clone(), random_hash1, Some(random_hash2)); + + assert_eq!( + result.wait().map_err(|e| e.to_string()), + Err(Error::InvalidBlockRange { + from: format!("{:?}", random_hash1), // First hash not found. + to: format!("{:?}", Some(random_hash2)), + details: format!("UnknownBlock: header not found in db: {}", random_hash1), + }) + .map_err(|e| e.to_string()), + ); + + // single block range + let result = api.query_storage_at(keys.clone(), Some(block1_hash)); + + assert_eq!( + result.wait().unwrap(), + vec![StorageChangeSet { + block: block1_hash, + changes: vec![ + (StorageKey(vec![1_u8]), None), + (StorageKey(vec![2_u8]), Some(StorageData(vec![2_u8]))), + (StorageKey(vec![3_u8]), Some(StorageData(vec![3_u8]))), + (StorageKey(vec![4_u8]), None), + (StorageKey(vec![5_u8]), Some(StorageData(vec![0_u8]))), + ] + }] + ); + } + + run_tests(Arc::new(substrate_test_runtime_client::new()), false); + run_tests( + Arc::new( + TestClientBuilder::new() + .changes_trie_config(Some(ChangesTrieConfiguration::new(4, 2))) + .build(), + ), + true, + ); } #[test] fn should_split_ranges() { - assert_eq!(split_range(1, None), (0..1, None)); - assert_eq!(split_range(100, None), (0..100, None)); - assert_eq!(split_range(1, Some(0)), (0..1, None)); - assert_eq!(split_range(100, Some(50)), (0..50, Some(50..100))); - assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); + assert_eq!(split_range(1, None), (0..1, None)); + assert_eq!(split_range(100, None), (0..100, None)); + assert_eq!(split_range(1, Some(0)), (0..1, None)); + assert_eq!(split_range(100, Some(50)), (0..50, Some(50..100))); + assert_eq!(split_range(100, Some(99)), (0..99, Some(99..100))); } - #[test] fn should_return_runtime_version() { - let core = tokio::runtime::Runtime::new().unwrap(); + let core = tokio::runtime::Runtime::new().unwrap(); - let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); + let client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full( + client.clone(), + Subscriptions::new(Arc::new(core.executor())), + ); - let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ + let result = "{\"specName\":\"test\",\"implName\":\"parity-test\",\"authoringVersion\":1,\ \"specVersion\":2,\"implVersion\":2,\"apis\":[[\"0xdf6acb689907609b\",3],\ [\"0x37e397fc7c91f5e4\",1],[\"0xd2bc9897eed08f15\",2],[\"0x40fe3ad401f8959a\",4],\ [\"0xc6e9a76309f39b09\",1],[\"0xdd718d5cc53262d4\",1],[\"0xcbca25e39f142387\",1],\ [\"0xf78b278be53f454c\",2],[\"0xab3c0572291feb8b\",1],[\"0xbc9d89904f5b923f\",1]],\ \"transactionVersion\":1}"; - let runtime_version = api.runtime_version(None.into()).wait().unwrap(); - let serialized = serde_json::to_string(&runtime_version).unwrap(); - assert_eq!(serialized, result); + let runtime_version = api.runtime_version(None.into()).wait().unwrap(); + let serialized = serde_json::to_string(&runtime_version).unwrap(); + assert_eq!(serialized, result); - let deserialized: RuntimeVersion = serde_json::from_str(result).unwrap(); - assert_eq!(deserialized, runtime_version); + let deserialized: RuntimeVersion = serde_json::from_str(result).unwrap(); + assert_eq!(deserialized, runtime_version); } #[test] fn should_notify_on_runtime_version_initially() { - let mut core = tokio::runtime::Runtime::new().unwrap(); - let (subscriber, id, transport) = Subscriber::new_test("test"); - - { - let client = Arc::new(substrate_test_runtime_client::new()); - let api = new_full(client.clone(), Subscriptions::new(Arc::new(core.executor()))); - - api.subscribe_runtime_version(Default::default(), subscriber); - - // assert id assigned - assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); - } - - // assert initial version sent. - let (notification, next) = core.block_on(transport.into_future()).unwrap(); - assert!(notification.is_some()); - // no more notifications on this channel - assert_eq!(core.block_on(next.into_future()).unwrap().0, None); + let mut core = tokio::runtime::Runtime::new().unwrap(); + let (subscriber, id, transport) = Subscriber::new_test("test"); + + { + let client = Arc::new(substrate_test_runtime_client::new()); + let api = new_full( + client.clone(), + Subscriptions::new(Arc::new(core.executor())), + ); + + api.subscribe_runtime_version(Default::default(), subscriber); + + // assert id assigned + assert_eq!(core.block_on(id), Ok(Ok(SubscriptionId::Number(1)))); + } + + // assert initial version sent. + let (notification, next) = core.block_on(transport.into_future()).unwrap(); + assert!(notification.is_some()); + // no more notifications on this channel + assert_eq!(core.block_on(next.into_future()).unwrap().0, None); } #[test] fn should_deserialize_storage_key() { - let k = "\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\""; - let k: StorageKey = serde_json::from_str(k).unwrap(); + let k = "\"0x7f864e18e3dd8b58386310d2fe0919eef27c6e558564b7f67f22d99d20f587b\""; + let k: StorageKey = serde_json::from_str(k).unwrap(); - assert_eq!(k.0.len(), 32); + assert_eq!(k.0.len(), 32); } diff --git a/client/rpc/src/system/mod.rs b/client/rpc/src/system/mod.rs index 84e06c20a6..069db87d47 100644 --- a/client/rpc/src/system/mod.rs +++ b/client/rpc/src/system/mod.rs @@ -19,143 +19,149 @@ #[cfg(test)] mod tests; -use futures::{future::BoxFuture, FutureExt, TryFutureExt}; use futures::{channel::oneshot, compat::Compat}; +use futures::{future::BoxFuture, FutureExt, TryFutureExt}; use sc_rpc_api::Receiver; -use sp_utils::mpsc::TracingUnboundedSender; use sp_runtime::traits::{self, Header as HeaderT}; +use sp_utils::mpsc::TracingUnboundedSender; use self::error::Result; -pub use sc_rpc_api::system::*; -pub use self::helpers::{SystemInfo, Health, PeerInfo, NodeRole}; pub use self::gen_client::Client as SystemClient; +pub use self::helpers::{Health, NodeRole, PeerInfo, SystemInfo}; +pub use sc_rpc_api::system::*; /// System API implementation pub struct System { - info: SystemInfo, - send_back: TracingUnboundedSender>, + info: SystemInfo, + send_back: TracingUnboundedSender>, } /// Request to be processed. pub enum Request { - /// Must return the health of the network. - Health(oneshot::Sender), - /// Must return the base58-encoded local `PeerId`. - LocalPeerId(oneshot::Sender), - /// Must return the string representation of the addresses we listen on, including the - /// trailing `/p2p/`. - LocalListenAddresses(oneshot::Sender>), - /// Must return information about the peers we are connected to. - Peers(oneshot::Sender::Number>>>), - /// Must return the state of the network. - NetworkState(oneshot::Sender), - /// Must return any potential parse error. - NetworkAddReservedPeer(String, oneshot::Sender>), - /// Must return any potential parse error. - NetworkRemoveReservedPeer(String, oneshot::Sender>), - /// Must return the node role. - NodeRoles(oneshot::Sender>) + /// Must return the health of the network. + Health(oneshot::Sender), + /// Must return the base58-encoded local `PeerId`. + LocalPeerId(oneshot::Sender), + /// Must return the string representation of the addresses we listen on, including the + /// trailing `/p2p/`. + LocalListenAddresses(oneshot::Sender>), + /// Must return information about the peers we are connected to. + Peers(oneshot::Sender::Number>>>), + /// Must return the state of the network. + NetworkState(oneshot::Sender), + /// Must return any potential parse error. + NetworkAddReservedPeer(String, oneshot::Sender>), + /// Must return any potential parse error. + NetworkRemoveReservedPeer(String, oneshot::Sender>), + /// Must return the node role. + NodeRoles(oneshot::Sender>), } impl System { - /// Creates new `System`. - /// - /// The `send_back` will be used to transmit some of the requests. The user is responsible for - /// reading from that channel and answering the requests. - pub fn new( - info: SystemInfo, - send_back: TracingUnboundedSender>, - ) -> Self { - System { - info, - send_back, - } - } + /// Creates new `System`. + /// + /// The `send_back` will be used to transmit some of the requests. The user is responsible for + /// reading from that channel and answering the requests. + pub fn new(info: SystemInfo, send_back: TracingUnboundedSender>) -> Self { + System { info, send_back } + } } impl SystemApi::Number> for System { - fn system_name(&self) -> Result { - Ok(self.info.impl_name.clone()) - } - - fn system_version(&self) -> Result { - Ok(self.info.impl_version.clone()) - } - - fn system_chain(&self) -> Result { - Ok(self.info.chain_name.clone()) - } - - fn system_type(&self) -> Result { - Ok(self.info.chain_type.clone()) - } - - fn system_properties(&self) -> Result { - Ok(self.info.properties.clone()) - } - - fn system_health(&self) -> Receiver { - let (tx, rx) = oneshot::channel(); - let _ = self.send_back.unbounded_send(Request::Health(tx)); - Receiver(Compat::new(rx)) - } - - fn system_local_peer_id(&self) -> Receiver { - let (tx, rx) = oneshot::channel(); - let _ = self.send_back.unbounded_send(Request::LocalPeerId(tx)); - Receiver(Compat::new(rx)) - } - - fn system_local_listen_addresses(&self) -> Receiver> { - let (tx, rx) = oneshot::channel(); - let _ = self.send_back.unbounded_send(Request::LocalListenAddresses(tx)); - Receiver(Compat::new(rx)) - } - - fn system_peers(&self) -> Receiver::Number>>> { - let (tx, rx) = oneshot::channel(); - let _ = self.send_back.unbounded_send(Request::Peers(tx)); - Receiver(Compat::new(rx)) - } - - fn system_network_state(&self) -> Receiver { - let (tx, rx) = oneshot::channel(); - let _ = self.send_back.unbounded_send(Request::NetworkState(tx)); - Receiver(Compat::new(rx)) - } - - fn system_add_reserved_peer(&self, peer: String) - -> Compat>> - { - let (tx, rx) = oneshot::channel(); - let _ = self.send_back.unbounded_send(Request::NetworkAddReservedPeer(peer, tx)); - async move { - match rx.await { - Ok(Ok(())) => Ok(()), - Ok(Err(e)) => Err(rpc::Error::from(e)), - Err(_) => Err(rpc::Error::internal_error()), - } - }.boxed().compat() - } - - fn system_remove_reserved_peer(&self, peer: String) - -> Compat>> - { - let (tx, rx) = oneshot::channel(); - let _ = self.send_back.unbounded_send(Request::NetworkRemoveReservedPeer(peer, tx)); - async move { - match rx.await { - Ok(Ok(())) => Ok(()), - Ok(Err(e)) => Err(rpc::Error::from(e)), - Err(_) => Err(rpc::Error::internal_error()), - } - }.boxed().compat() - } - - fn system_node_roles(&self) -> Receiver> { - let (tx, rx) = oneshot::channel(); - let _ = self.send_back.unbounded_send(Request::NodeRoles(tx)); - Receiver(Compat::new(rx)) - } + fn system_name(&self) -> Result { + Ok(self.info.impl_name.clone()) + } + + fn system_version(&self) -> Result { + Ok(self.info.impl_version.clone()) + } + + fn system_chain(&self) -> Result { + Ok(self.info.chain_name.clone()) + } + + fn system_type(&self) -> Result { + Ok(self.info.chain_type.clone()) + } + + fn system_properties(&self) -> Result { + Ok(self.info.properties.clone()) + } + + fn system_health(&self) -> Receiver { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::Health(tx)); + Receiver(Compat::new(rx)) + } + + fn system_local_peer_id(&self) -> Receiver { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::LocalPeerId(tx)); + Receiver(Compat::new(rx)) + } + + fn system_local_listen_addresses(&self) -> Receiver> { + let (tx, rx) = oneshot::channel(); + let _ = self + .send_back + .unbounded_send(Request::LocalListenAddresses(tx)); + Receiver(Compat::new(rx)) + } + + fn system_peers(&self) -> Receiver::Number>>> { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::Peers(tx)); + Receiver(Compat::new(rx)) + } + + fn system_network_state(&self) -> Receiver { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::NetworkState(tx)); + Receiver(Compat::new(rx)) + } + + fn system_add_reserved_peer( + &self, + peer: String, + ) -> Compat>> { + let (tx, rx) = oneshot::channel(); + let _ = self + .send_back + .unbounded_send(Request::NetworkAddReservedPeer(peer, tx)); + async move { + match rx.await { + Ok(Ok(())) => Ok(()), + Ok(Err(e)) => Err(rpc::Error::from(e)), + Err(_) => Err(rpc::Error::internal_error()), + } + } + .boxed() + .compat() + } + + fn system_remove_reserved_peer( + &self, + peer: String, + ) -> Compat>> { + let (tx, rx) = oneshot::channel(); + let _ = self + .send_back + .unbounded_send(Request::NetworkRemoveReservedPeer(peer, tx)); + async move { + match rx.await { + Ok(Ok(())) => Ok(()), + Ok(Err(e)) => Err(rpc::Error::from(e)), + Err(_) => Err(rpc::Error::internal_error()), + } + } + .boxed() + .compat() + } + + fn system_node_roles(&self) -> Receiver> { + let (tx, rx) = oneshot::channel(); + let _ = self.send_back.unbounded_send(Request::NodeRoles(tx)); + Receiver(Compat::new(rx)) + } } diff --git a/client/rpc/src/system/tests.rs b/client/rpc/src/system/tests.rs index 921d941a1c..f9ae9d07c5 100644 --- a/client/rpc/src/system/tests.rs +++ b/client/rpc/src/system/tests.rs @@ -16,292 +16,307 @@ use super::*; -use sc_network::{self, PeerId}; -use sc_network::config::Role; -use substrate_test_runtime_client::runtime::Block; use assert_matches::assert_matches; -use futures::{prelude::*, channel::mpsc}; +use futures::{channel::mpsc, prelude::*}; +use sc_network::config::Role; +use sc_network::{self, PeerId}; use std::thread; +use substrate_test_runtime_client::runtime::Block; struct Status { - pub peers: usize, - pub is_syncing: bool, - pub is_dev: bool, - pub peer_id: PeerId, + pub peers: usize, + pub is_syncing: bool, + pub is_dev: bool, + pub peer_id: PeerId, } impl Default for Status { - fn default() -> Status { - Status { - peer_id: PeerId::random(), - peers: 0, - is_syncing: false, - is_dev: false, - } - } + fn default() -> Status { + Status { + peer_id: PeerId::random(), + peers: 0, + is_syncing: false, + is_dev: false, + } + } } fn api>>(sync: T) -> System { - let status = sync.into().unwrap_or_default(); - let should_have_peers = !status.is_dev; - let (tx, rx) = mpsc::unbounded(); - thread::spawn(move || { - futures::executor::block_on(rx.for_each(move |request| { - match request { - Request::Health(sender) => { - let _ = sender.send(Health { - peers: status.peers, - is_syncing: status.is_syncing, - should_have_peers, - }); - }, - Request::LocalPeerId(sender) => { - let _ = sender.send("QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()); - }, - Request::LocalListenAddresses(sender) => { - let _ = sender.send(vec![ + let status = sync.into().unwrap_or_default(); + let should_have_peers = !status.is_dev; + let (tx, rx) = mpsc::unbounded(); + thread::spawn(move || { + futures::executor::block_on(rx.for_each(move |request| { + match request { + Request::Health(sender) => { + let _ = sender.send(Health { + peers: status.peers, + is_syncing: status.is_syncing, + should_have_peers, + }); + } + Request::LocalPeerId(sender) => { + let _ = + sender.send("QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string()); + } + Request::LocalListenAddresses(sender) => { + let _ = sender.send(vec![ "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(), "/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(), ]); - }, - Request::Peers(sender) => { - let mut peers = vec![]; - for _peer in 0..status.peers { - peers.push(PeerInfo { - peer_id: status.peer_id.to_base58(), - roles: format!("{}", Role::Full), - protocol_version: 1, - best_hash: Default::default(), - best_number: 1, - }); - } - let _ = sender.send(peers); - } - Request::NetworkState(sender) => { - let _ = sender.send(serde_json::to_value(&sc_network::network_state::NetworkState { - peer_id: String::new(), - listened_addresses: Default::default(), - external_addresses: Default::default(), - connected_peers: Default::default(), - not_connected_peers: Default::default(), - average_download_per_sec: 0, - average_upload_per_sec: 0, - peerset: serde_json::Value::Null, - }).unwrap()); - }, - Request::NetworkAddReservedPeer(peer, sender) => { - let _ = match sc_network::config::parse_str_addr(&peer) { - Ok(_) => sender.send(Ok(())), - Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), - }; - }, - Request::NetworkRemoveReservedPeer(peer, sender) => { - let _ = match peer.parse::() { - Ok(_) => sender.send(Ok(())), - Err(s) => sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))), - }; - } - Request::NodeRoles(sender) => { - let _ = sender.send(vec![NodeRole::Authority]); - } - }; + } + Request::Peers(sender) => { + let mut peers = vec![]; + for _peer in 0..status.peers { + peers.push(PeerInfo { + peer_id: status.peer_id.to_base58(), + roles: format!("{}", Role::Full), + protocol_version: 1, + best_hash: Default::default(), + best_number: 1, + }); + } + let _ = sender.send(peers); + } + Request::NetworkState(sender) => { + let _ = sender.send( + serde_json::to_value(&sc_network::network_state::NetworkState { + peer_id: String::new(), + listened_addresses: Default::default(), + external_addresses: Default::default(), + connected_peers: Default::default(), + not_connected_peers: Default::default(), + average_download_per_sec: 0, + average_upload_per_sec: 0, + peerset: serde_json::Value::Null, + }) + .unwrap(), + ); + } + Request::NetworkAddReservedPeer(peer, sender) => { + let _ = match sc_network::config::parse_str_addr(&peer) { + Ok(_) => sender.send(Ok(())), + Err(s) => { + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))) + } + }; + } + Request::NetworkRemoveReservedPeer(peer, sender) => { + let _ = match peer.parse::() { + Ok(_) => sender.send(Ok(())), + Err(s) => { + sender.send(Err(error::Error::MalformattedPeerArg(s.to_string()))) + } + }; + } + Request::NodeRoles(sender) => { + let _ = sender.send(vec![NodeRole::Authority]); + } + }; - future::ready(()) - })) - }); - System::new(SystemInfo { - impl_name: "testclient".into(), - impl_version: "0.2.0".into(), - chain_name: "testchain".into(), - properties: Default::default(), - chain_type: Default::default(), - }, tx) + future::ready(()) + })) + }); + System::new( + SystemInfo { + impl_name: "testclient".into(), + impl_version: "0.2.0".into(), + chain_name: "testchain".into(), + properties: Default::default(), + chain_type: Default::default(), + }, + tx, + ) } fn wait_receiver(rx: Receiver) -> T { - let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); - runtime.block_on(rx).unwrap() + let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); + runtime.block_on(rx).unwrap() } #[test] fn system_name_works() { - assert_eq!( - api(None).system_name().unwrap(), - "testclient".to_owned(), - ); + assert_eq!(api(None).system_name().unwrap(), "testclient".to_owned(),); } #[test] fn system_version_works() { - assert_eq!( - api(None).system_version().unwrap(), - "0.2.0".to_owned(), - ); + assert_eq!(api(None).system_version().unwrap(), "0.2.0".to_owned(),); } #[test] fn system_chain_works() { - assert_eq!( - api(None).system_chain().unwrap(), - "testchain".to_owned(), - ); + assert_eq!(api(None).system_chain().unwrap(), "testchain".to_owned(),); } #[test] fn system_properties_works() { - assert_eq!( - api(None).system_properties().unwrap(), - serde_json::map::Map::new(), - ); + assert_eq!( + api(None).system_properties().unwrap(), + serde_json::map::Map::new(), + ); } #[test] fn system_type_works() { - assert_eq!( - api(None).system_type().unwrap(), - Default::default(), - ); + assert_eq!(api(None).system_type().unwrap(), Default::default(),); } #[test] fn system_health() { - assert_matches!( - wait_receiver(api(None).system_health()), - Health { - peers: 0, - is_syncing: false, - should_have_peers: true, - } - ); + assert_matches!( + wait_receiver(api(None).system_health()), + Health { + peers: 0, + is_syncing: false, + should_have_peers: true, + } + ); - assert_matches!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 5, - is_syncing: true, - is_dev: true, - }).system_health()), - Health { - peers: 5, - is_syncing: true, - should_have_peers: false, - } - ); + assert_matches!( + wait_receiver( + api(Status { + peer_id: PeerId::random(), + peers: 5, + is_syncing: true, + is_dev: true, + }) + .system_health() + ), + Health { + peers: 5, + is_syncing: true, + should_have_peers: false, + } + ); - assert_eq!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 5, - is_syncing: false, - is_dev: false, - }).system_health()), - Health { - peers: 5, - is_syncing: false, - should_have_peers: true, - } - ); + assert_eq!( + wait_receiver( + api(Status { + peer_id: PeerId::random(), + peers: 5, + is_syncing: false, + is_dev: false, + }) + .system_health() + ), + Health { + peers: 5, + is_syncing: false, + should_have_peers: true, + } + ); - assert_eq!( - wait_receiver(api(Status { - peer_id: PeerId::random(), - peers: 0, - is_syncing: false, - is_dev: true, - }).system_health()), - Health { - peers: 0, - is_syncing: false, - should_have_peers: false, - } - ); + assert_eq!( + wait_receiver( + api(Status { + peer_id: PeerId::random(), + peers: 0, + is_syncing: false, + is_dev: true, + }) + .system_health() + ), + Health { + peers: 0, + is_syncing: false, + should_have_peers: false, + } + ); } #[test] fn system_local_peer_id_works() { - assert_eq!( - wait_receiver(api(None).system_local_peer_id()), - "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_owned(), - ); + assert_eq!( + wait_receiver(api(None).system_local_peer_id()), + "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_owned(), + ); } #[test] fn system_local_listen_addresses_works() { - assert_eq!( - wait_receiver(api(None).system_local_listen_addresses()), - vec![ - "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(), - "/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV".to_string(), - ] - ); + assert_eq!( + wait_receiver(api(None).system_local_listen_addresses()), + vec![ + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" + .to_string(), + "/ip4/127.0.0.1/tcp/30334/ws/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV" + .to_string(), + ] + ); } #[test] fn system_peers() { - let peer_id = PeerId::random(); - assert_eq!( - wait_receiver(api(Status { - peer_id: peer_id.clone(), - peers: 1, - is_syncing: false, - is_dev: true, - }).system_peers()), - vec![PeerInfo { - peer_id: peer_id.to_base58(), - roles: "FULL".into(), - protocol_version: 1, - best_hash: Default::default(), - best_number: 1u64, - }] - ); + let peer_id = PeerId::random(); + assert_eq!( + wait_receiver( + api(Status { + peer_id: peer_id.clone(), + peers: 1, + is_syncing: false, + is_dev: true, + }) + .system_peers() + ), + vec![PeerInfo { + peer_id: peer_id.to_base58(), + roles: "FULL".into(), + protocol_version: 1, + best_hash: Default::default(), + best_number: 1u64, + }] + ); } #[test] fn system_network_state() { - let res = wait_receiver(api(None).system_network_state()); - assert_eq!( - serde_json::from_value::(res).unwrap(), - sc_network::network_state::NetworkState { - peer_id: String::new(), - listened_addresses: Default::default(), - external_addresses: Default::default(), - connected_peers: Default::default(), - not_connected_peers: Default::default(), - average_download_per_sec: 0, - average_upload_per_sec: 0, - peerset: serde_json::Value::Null, - } - ); + let res = wait_receiver(api(None).system_network_state()); + assert_eq!( + serde_json::from_value::(res).unwrap(), + sc_network::network_state::NetworkState { + peer_id: String::new(), + listened_addresses: Default::default(), + external_addresses: Default::default(), + connected_peers: Default::default(), + not_connected_peers: Default::default(), + average_download_per_sec: 0, + average_upload_per_sec: 0, + peerset: serde_json::Value::Null, + } + ); } #[test] fn system_node_roles() { - assert_eq!( - wait_receiver(api(None).system_node_roles()), - vec![NodeRole::Authority] - ); + assert_eq!( + wait_receiver(api(None).system_node_roles()), + vec![NodeRole::Authority] + ); } #[test] fn system_network_add_reserved() { - let good_peer_id = "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; - let bad_peer_id = "/ip4/198.51.100.19/tcp/30333"; - let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); + let good_peer_id = + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; + let bad_peer_id = "/ip4/198.51.100.19/tcp/30333"; + let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); - let good_fut = api(None).system_add_reserved_peer(good_peer_id.into()); - let bad_fut = api(None).system_add_reserved_peer(bad_peer_id.into()); - assert_eq!(runtime.block_on(good_fut), Ok(())); - assert!(runtime.block_on(bad_fut).is_err()); + let good_fut = api(None).system_add_reserved_peer(good_peer_id.into()); + let bad_fut = api(None).system_add_reserved_peer(bad_peer_id.into()); + assert_eq!(runtime.block_on(good_fut), Ok(())); + assert!(runtime.block_on(bad_fut).is_err()); } #[test] fn system_network_remove_reserved() { - let good_peer_id = "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; - let bad_peer_id = "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; - let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); + let good_peer_id = "QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; + let bad_peer_id = + "/ip4/198.51.100.19/tcp/30333/p2p/QmSk5HQbn6LhUwDiNMseVUjuRYhEtYj4aUZ6WfWoGURpdV"; + let mut runtime = tokio::runtime::current_thread::Runtime::new().unwrap(); - let good_fut = api(None).system_remove_reserved_peer(good_peer_id.into()); - let bad_fut = api(None).system_remove_reserved_peer(bad_peer_id.into()); - assert_eq!(runtime.block_on(good_fut), Ok(())); - assert!(runtime.block_on(bad_fut).is_err()); + let good_fut = api(None).system_remove_reserved_peer(good_peer_id.into()); + let bad_fut = api(None).system_remove_reserved_peer(bad_peer_id.into()); + assert_eq!(runtime.block_on(good_fut), Ok(())); + assert!(runtime.block_on(bad_fut).is_err()); } diff --git a/client/service/src/builder.rs b/client/service/src/builder.rs index 90e644481f..1de1ac26fa 100644 --- a/client/service/src/builder.rs +++ b/client/service/src/builder.rs @@ -14,48 +14,44 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{Service, NetworkStatus, NetworkState, error::Error, DEFAULT_PROTOCOL_ID, MallocSizeOfWasm}; -use crate::{TaskManagerBuilder, start_rpc_servers, build_network_future, TransactionPoolAdapter}; -use crate::status_sinks; use crate::config::{Configuration, KeystoreConfig, PrometheusConfig}; use crate::metrics::MetricsService; -use sc_client_api::{ - self, - BlockchainEvents, - backend::RemoteBackend, light::RemoteBlockchain, - execution_extensions::ExtensionsFactory, - ExecutorProvider, CallExecutor +use crate::status_sinks; +use crate::{build_network_future, start_rpc_servers, TaskManagerBuilder, TransactionPoolAdapter}; +use crate::{ + error::Error, MallocSizeOfWasm, NetworkState, NetworkStatus, Service, DEFAULT_PROTOCOL_ID, }; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; -use sc_client::Client; +use futures::{future::ready, Future, FutureExt, StreamExt}; +use log::{error, info, warn}; +use parking_lot::{Mutex, RwLock}; +use prometheus_endpoint::Registry as PrometheusRegistry; use sc_chain_spec::get_extension; -use sp_consensus::import_queue::ImportQueue; -use futures::{ - Future, FutureExt, StreamExt, - future::ready, +use sc_client::Client; +use sc_client_api::{ + self, backend::RemoteBackend, execution_extensions::ExtensionsFactory, light::RemoteBlockchain, + BlockchainEvents, CallExecutor, ExecutorProvider, }; -use sc_keystore::{Store as Keystore}; -use log::{info, warn, error}; -use sc_network::config::{Role, FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +use sc_executor::{NativeExecutionDispatch, NativeExecutor}; +use sc_keystore::Store as Keystore; +use sc_network::config::{BoxFinalityProofRequestBuilder, FinalityProofProvider, OnDemand, Role}; use sc_network::{NetworkService, NetworkStateInfo}; -use parking_lot::{Mutex, RwLock}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, SaturatedConversion, HashFor, -}; +use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_api::ProvideRuntimeApi; -use sc_executor::{NativeExecutor, NativeExecutionDispatch}; +use sp_blockchain; +use sp_consensus::import_queue::ImportQueue; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, HashFor, NumberFor, SaturatedConversion}; +use sp_transaction_pool::{ChainEvent, MaintainedTransactionPool}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use std::{ - io::{Read, Write, Seek}, - marker::PhantomData, sync::Arc, pin::Pin + io::{Read, Seek, Write}, + marker::PhantomData, + pin::Pin, + sync::Arc, }; use wasm_timer::SystemTime; -use sc_telemetry::{telemetry, SUBSTRATE_INFO}; -use sp_transaction_pool::{MaintainedTransactionPool, ChainEvent}; -use sp_blockchain; -use prometheus_endpoint::Registry as PrometheusRegistry; -pub type BackgroundTask = Pin + Send>>; +pub type BackgroundTask = Pin + Send>>; /// Aggregator for the components required to build a service. /// @@ -74,1091 +70,1140 @@ pub type BackgroundTask = Pin + Send>>; /// The order in which the `with_*` methods are called doesn't matter, as the correct binding of /// generics is done when you call `build`. /// -pub struct ServiceBuilder +pub struct ServiceBuilder { - config: Configuration, - pub (crate) client: Arc, - backend: Arc, - tasks_builder: TaskManagerBuilder, - keystore: Arc>, - fetcher: Option, - select_chain: Option, - pub (crate) import_queue: TImpQu, - finality_proof_request_builder: Option, - finality_proof_provider: Option, - transaction_pool: Arc, - rpc_extensions: TRpc, - remote_backend: Option>>, - marker: PhantomData<(TBl, TRtApi)>, - background_tasks: Vec<(&'static str, BackgroundTask)>, + config: Configuration, + pub(crate) client: Arc, + backend: Arc, + tasks_builder: TaskManagerBuilder, + keystore: Arc>, + fetcher: Option, + select_chain: Option, + pub(crate) import_queue: TImpQu, + finality_proof_request_builder: Option, + finality_proof_provider: Option, + transaction_pool: Arc, + rpc_extensions: TRpc, + remote_backend: Option>>, + marker: PhantomData<(TBl, TRtApi)>, + background_tasks: Vec<(&'static str, BackgroundTask)>, } /// Full client type. -pub type TFullClient = Client< - TFullBackend, - TFullCallExecutor, - TBl, - TRtApi, ->; +pub type TFullClient = + Client, TFullCallExecutor, TBl, TRtApi>; /// Full client backend type. pub type TFullBackend = sc_client_db::Backend; /// Full client call executor type. -pub type TFullCallExecutor = sc_client::LocalCallExecutor< - sc_client_db::Backend, - NativeExecutor, ->; +pub type TFullCallExecutor = + sc_client::LocalCallExecutor, NativeExecutor>; /// Light client type. -pub type TLightClient = Client< - TLightBackend, - TLightCallExecutor, - TBl, - TRtApi, ->; +pub type TLightClient = + Client, TLightCallExecutor, TBl, TRtApi>; /// Light client backend type. -pub type TLightBackend = sc_client::light::backend::Backend< - sc_client_db::light::LightStorage, - HashFor, ->; +pub type TLightBackend = + sc_client::light::backend::Backend, HashFor>; /// Light call executor type. pub type TLightCallExecutor = sc_client::light::call_executor::GenesisCallExecutor< - sc_client::light::backend::Backend< - sc_client_db::light::LightStorage, - HashFor - >, - sc_client::LocalCallExecutor< - sc_client::light::backend::Backend< - sc_client_db::light::LightStorage, - HashFor - >, - NativeExecutor - >, + sc_client::light::backend::Backend, HashFor>, + sc_client::LocalCallExecutor< + sc_client::light::backend::Backend, HashFor>, + NativeExecutor, + >, >; type TFullParts = ( - TFullClient, - Arc>, - Arc>, - TaskManagerBuilder, + TFullClient, + Arc>, + Arc>, + TaskManagerBuilder, ); /// Creates a new full client for the given config. pub fn new_full_client( - config: &Configuration, -) -> Result, Error> where - TBl: BlockT, - TExecDisp: NativeExecutionDispatch + 'static, + config: &Configuration, +) -> Result, Error> +where + TBl: BlockT, + TExecDisp: NativeExecutionDispatch + 'static, { - new_full_parts(config).map(|parts| parts.0) + new_full_parts(config).map(|parts| parts.0) } fn new_full_parts( - config: &Configuration, -) -> Result, Error> where - TBl: BlockT, - TExecDisp: NativeExecutionDispatch + 'static, + config: &Configuration, +) -> Result, Error> +where + TBl: BlockT, + TExecDisp: NativeExecutionDispatch + 'static, { - let keystore = match &config.keystore { - KeystoreConfig::Path { path, password } => Keystore::open( - path.clone(), - password.clone() - )?, - KeystoreConfig::InMemory => Keystore::new_in_memory(), - }; - - let tasks_builder = { - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManagerBuilder::new(registry)? - }; - - let executor = NativeExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - ); - - let chain_spec = &config.chain_spec; - let fork_blocks = get_extension::>(chain_spec.extensions()) - .cloned() - .unwrap_or_default(); - - let bad_blocks = get_extension::>(chain_spec.extensions()) - .cloned() - .unwrap_or_default(); - - let (client, backend) = { - let db_config = sc_client_db::DatabaseSettings { - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - pruning: config.pruning.clone(), - source: config.database.clone(), - }; - - let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( - config.execution_strategies.clone(), - Some(keystore.clone()), - ); - - sc_client_db::new_client( - db_config, - executor, - chain_spec.as_storage_builder(), - fork_blocks, - bad_blocks, - extensions, - Box::new(tasks_builder.spawn_handle()), - config.prometheus_config.as_ref().map(|config| config.registry.clone()), - )? - }; - - Ok((client, backend, keystore, tasks_builder)) + let keystore = match &config.keystore { + KeystoreConfig::Path { path, password } => Keystore::open(path.clone(), password.clone())?, + KeystoreConfig::InMemory => Keystore::new_in_memory(), + }; + + let tasks_builder = { + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + TaskManagerBuilder::new(registry)? + }; + + let executor = NativeExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let chain_spec = &config.chain_spec; + let fork_blocks = get_extension::>(chain_spec.extensions()) + .cloned() + .unwrap_or_default(); + + let bad_blocks = get_extension::>(chain_spec.extensions()) + .cloned() + .unwrap_or_default(); + + let (client, backend) = { + let db_config = sc_client_db::DatabaseSettings { + state_cache_size: config.state_cache_size, + state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), + pruning: config.pruning.clone(), + source: config.database.clone(), + }; + + let extensions = sc_client_api::execution_extensions::ExecutionExtensions::new( + config.execution_strategies.clone(), + Some(keystore.clone()), + ); + + sc_client_db::new_client( + db_config, + executor, + chain_spec.as_storage_builder(), + fork_blocks, + bad_blocks, + extensions, + Box::new(tasks_builder.spawn_handle()), + config + .prometheus_config + .as_ref() + .map(|config| config.registry.clone()), + )? + }; + + Ok((client, backend, keystore, tasks_builder)) } impl ServiceBuilder<(), (), (), (), (), (), (), (), (), (), ()> { - /// Start the service builder with a configuration. - pub fn new_full( - config: Configuration, - ) -> Result, - Arc>, - (), - (), - BoxFinalityProofRequestBuilder, - Arc>, - (), - (), - TFullBackend, - >, Error> { - let (client, backend, keystore, tasks_builder) = new_full_parts(&config)?; - - let client = Arc::new(client); - - Ok(ServiceBuilder { - config, - client, - backend, - keystore, - tasks_builder, - fetcher: None, - select_chain: None, - import_queue: (), - finality_proof_request_builder: None, - finality_proof_provider: None, - transaction_pool: Arc::new(()), - rpc_extensions: Default::default(), - remote_backend: None, - background_tasks: Default::default(), - marker: PhantomData, - }) - } - - /// Start the service builder with a configuration. - pub fn new_light( - config: Configuration, - ) -> Result, - Arc>, - (), - (), - BoxFinalityProofRequestBuilder, - Arc>, - (), - (), - TLightBackend, - >, Error> { - let tasks_builder = { - let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); - TaskManagerBuilder::new(registry)? - }; - - let keystore = match &config.keystore { - KeystoreConfig::Path { path, password } => Keystore::open( - path.clone(), - password.clone() - )?, - KeystoreConfig::InMemory => Keystore::new_in_memory(), - }; - - let executor = NativeExecutor::::new( - config.wasm_method, - config.default_heap_pages, - config.max_runtime_instances, - ); - - let db_storage = { - let db_settings = sc_client_db::DatabaseSettings { - state_cache_size: config.state_cache_size, - state_cache_child_ratio: - config.state_cache_child_ratio.map(|v| (v, 100)), - pruning: config.pruning.clone(), - source: config.database.clone(), - }; - sc_client_db::light::LightStorage::new(db_settings)? - }; - let light_blockchain = sc_client::light::new_light_blockchain(db_storage); - let fetch_checker = Arc::new( - sc_client::light::new_fetch_checker::<_, TBl, _>( - light_blockchain.clone(), - executor.clone(), - Box::new(tasks_builder.spawn_handle()), - ), - ); - let fetcher = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); - let backend = sc_client::light::new_light_backend(light_blockchain); - let remote_blockchain = backend.remote_blockchain(); - let client = Arc::new(sc_client::light::new_light( - backend.clone(), - config.chain_spec.as_storage_builder(), - executor, - Box::new(tasks_builder.spawn_handle()), - config.prometheus_config.as_ref().map(|config| config.registry.clone()), - )?); - - Ok(ServiceBuilder { - config, - client, - backend, - tasks_builder, - keystore, - fetcher: Some(fetcher.clone()), - select_chain: None, - import_queue: (), - finality_proof_request_builder: None, - finality_proof_provider: None, - transaction_pool: Arc::new(()), - rpc_extensions: Default::default(), - remote_backend: Some(remote_blockchain), - background_tasks: Default::default(), - marker: PhantomData, - }) - } + /// Start the service builder with a configuration. + pub fn new_full( + config: Configuration, + ) -> Result< + ServiceBuilder< + TBl, + TRtApi, + TFullClient, + Arc>, + (), + (), + BoxFinalityProofRequestBuilder, + Arc>, + (), + (), + TFullBackend, + >, + Error, + > { + let (client, backend, keystore, tasks_builder) = new_full_parts(&config)?; + + let client = Arc::new(client); + + Ok(ServiceBuilder { + config, + client, + backend, + keystore, + tasks_builder, + fetcher: None, + select_chain: None, + import_queue: (), + finality_proof_request_builder: None, + finality_proof_provider: None, + transaction_pool: Arc::new(()), + rpc_extensions: Default::default(), + remote_backend: None, + background_tasks: Default::default(), + marker: PhantomData, + }) + } + + /// Start the service builder with a configuration. + pub fn new_light( + config: Configuration, + ) -> Result< + ServiceBuilder< + TBl, + TRtApi, + TLightClient, + Arc>, + (), + (), + BoxFinalityProofRequestBuilder, + Arc>, + (), + (), + TLightBackend, + >, + Error, + > { + let tasks_builder = { + let registry = config.prometheus_config.as_ref().map(|cfg| &cfg.registry); + TaskManagerBuilder::new(registry)? + }; + + let keystore = match &config.keystore { + KeystoreConfig::Path { path, password } => { + Keystore::open(path.clone(), password.clone())? + } + KeystoreConfig::InMemory => Keystore::new_in_memory(), + }; + + let executor = NativeExecutor::::new( + config.wasm_method, + config.default_heap_pages, + config.max_runtime_instances, + ); + + let db_storage = { + let db_settings = sc_client_db::DatabaseSettings { + state_cache_size: config.state_cache_size, + state_cache_child_ratio: config.state_cache_child_ratio.map(|v| (v, 100)), + pruning: config.pruning.clone(), + source: config.database.clone(), + }; + sc_client_db::light::LightStorage::new(db_settings)? + }; + let light_blockchain = sc_client::light::new_light_blockchain(db_storage); + let fetch_checker = Arc::new(sc_client::light::new_fetch_checker::<_, TBl, _>( + light_blockchain.clone(), + executor.clone(), + Box::new(tasks_builder.spawn_handle()), + )); + let fetcher = Arc::new(sc_network::config::OnDemand::new(fetch_checker)); + let backend = sc_client::light::new_light_backend(light_blockchain); + let remote_blockchain = backend.remote_blockchain(); + let client = Arc::new(sc_client::light::new_light( + backend.clone(), + config.chain_spec.as_storage_builder(), + executor, + Box::new(tasks_builder.spawn_handle()), + config + .prometheus_config + .as_ref() + .map(|config| config.registry.clone()), + )?); + + Ok(ServiceBuilder { + config, + client, + backend, + tasks_builder, + keystore, + fetcher: Some(fetcher.clone()), + select_chain: None, + import_queue: (), + finality_proof_request_builder: None, + finality_proof_provider: None, + transaction_pool: Arc::new(()), + rpc_extensions: Default::default(), + remote_backend: Some(remote_blockchain), + background_tasks: Default::default(), + marker: PhantomData, + }) + } } impl - ServiceBuilder { - - /// Returns a reference to the client that was stored in this builder. - pub fn client(&self) -> &Arc { - &self.client - } - - /// Returns a reference to the backend that was used in this builder. - pub fn backend(&self) -> &Arc { - &self.backend - } - - /// Returns a reference to the select-chain that was stored in this builder. - pub fn select_chain(&self) -> Option<&TSc> { - self.select_chain.as_ref() - } - - /// Returns a reference to the keystore - pub fn keystore(&self) -> Arc> { - self.keystore.clone() - } - - /// Returns a reference to the transaction pool stored in this builder - pub fn pool(&self) -> Arc { - self.transaction_pool.clone() - } - - /// Returns a reference to the fetcher, only available if builder - /// was created with `new_light`. - pub fn fetcher(&self) -> Option - where TFchr: Clone - { - self.fetcher.clone() - } - - /// Returns a reference to the remote_backend, only available if builder - /// was created with `new_light`. - pub fn remote_backend(&self) -> Option>> { - self.remote_backend.clone() - } - - /// Defines which head-of-chain strategy to use. - pub fn with_opt_select_chain( - self, - select_chain_builder: impl FnOnce( - &Configuration, &Arc, - ) -> Result, Error> - ) -> Result, Error> { - let select_chain = select_chain_builder(&self.config, &self.backend)?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - tasks_builder: self.tasks_builder, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions: self.rpc_extensions, - remote_backend: self.remote_backend, - background_tasks: self.background_tasks, - marker: self.marker, - }) - } - - /// Defines which head-of-chain strategy to use. - pub fn with_select_chain( - self, - builder: impl FnOnce(&Configuration, &Arc) -> Result, - ) -> Result, Error> { - self.with_opt_select_chain(|cfg, b| builder(cfg, b).map(Option::Some)) - } - - /// Defines which import queue to use. - pub fn with_import_queue( - self, - builder: impl FnOnce(&Configuration, Arc, Option, Arc) - -> Result - ) -> Result, Error> - where TSc: Clone { - let import_queue = builder( - &self.config, - self.client.clone(), - self.select_chain.clone(), - self.transaction_pool.clone() - )?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - tasks_builder: self.tasks_builder, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions: self.rpc_extensions, - remote_backend: self.remote_backend, - background_tasks: self.background_tasks, - marker: self.marker, - }) - } - - /// Defines which strategy to use for providing finality proofs. - pub fn with_opt_finality_proof_provider( - self, - builder: impl FnOnce(Arc, Arc) -> Result>>, Error> - ) -> Result>, - TExPool, - TRpc, - Backend, - >, Error> { - let finality_proof_provider = builder(self.client.clone(), self.backend.clone())?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - tasks_builder: self.tasks_builder, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions: self.rpc_extensions, - remote_backend: self.remote_backend, - background_tasks: self.background_tasks, - marker: self.marker, - }) - } - - /// Defines which strategy to use for providing finality proofs. - pub fn with_finality_proof_provider( - self, - build: impl FnOnce(Arc, Arc) -> Result>, Error> - ) -> Result>, - TExPool, - TRpc, - Backend, - >, Error> { - self.with_opt_finality_proof_provider(|client, backend| build(client, backend).map(Option::Some)) - } - - /// Defines which import queue to use. - pub fn with_import_queue_and_opt_fprb( - self, - builder: impl FnOnce( - &Configuration, - Arc, - Arc, - Option, - Option, - Arc, - ) -> Result<(UImpQu, Option), Error> - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - let (import_queue, fprb) = builder( - &self.config, - self.client.clone(), - self.backend.clone(), - self.fetcher.clone(), - self.select_chain.clone(), - self.transaction_pool.clone() - )?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - tasks_builder: self.tasks_builder, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue, - finality_proof_request_builder: fprb, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions: self.rpc_extensions, - remote_backend: self.remote_backend, - background_tasks: self.background_tasks, - marker: self.marker, - }) - } - - /// Defines which import queue to use. - pub fn with_import_queue_and_fprb( - self, - builder: impl FnOnce( - &Configuration, - Arc, - Arc, - Option, - Option, - Arc, - ) -> Result<(UImpQu, UFprb), Error> - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - self.with_import_queue_and_opt_fprb(|cfg, cl, b, f, sc, tx| - builder(cfg, cl, b, f, sc, tx) - .map(|(q, f)| (q, Some(f))) - ) - } - - /// Defines which transaction pool to use. - pub fn with_transaction_pool( - mut self, - transaction_pool_builder: impl FnOnce( - sc_transaction_pool::txpool::Options, - Arc, - Option, - Option<&PrometheusRegistry>, - ) -> Result<(UExPool, Option), Error> - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - let (transaction_pool, background_task) = transaction_pool_builder( - self.config.transaction_pool.clone(), - self.client.clone(), - self.fetcher.clone(), - self.config.prometheus_config.as_ref().map(|config| &config.registry), - )?; - - if let Some(background_task) = background_task{ - self.background_tasks.push(("txpool-background", background_task)); - } - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - tasks_builder: self.tasks_builder, - backend: self.backend, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: Arc::new(transaction_pool), - rpc_extensions: self.rpc_extensions, - remote_backend: self.remote_backend, - background_tasks: self.background_tasks, - marker: self.marker, - }) - } - - /// Defines the RPC extensions to use. - pub fn with_rpc_extensions( - self, - rpc_ext_builder: impl FnOnce(&Self) -> Result, - ) -> Result, Error> - where TSc: Clone, TFchr: Clone { - let rpc_extensions = rpc_ext_builder(&self)?; - - Ok(ServiceBuilder { - config: self.config, - client: self.client, - backend: self.backend, - tasks_builder: self.tasks_builder, - keystore: self.keystore, - fetcher: self.fetcher, - select_chain: self.select_chain, - import_queue: self.import_queue, - finality_proof_request_builder: self.finality_proof_request_builder, - finality_proof_provider: self.finality_proof_provider, - transaction_pool: self.transaction_pool, - rpc_extensions, - remote_backend: self.remote_backend, - background_tasks: self.background_tasks, - marker: self.marker, - }) - } + ServiceBuilder +{ + /// Returns a reference to the client that was stored in this builder. + pub fn client(&self) -> &Arc { + &self.client + } + + /// Returns a reference to the backend that was used in this builder. + pub fn backend(&self) -> &Arc { + &self.backend + } + + /// Returns a reference to the select-chain that was stored in this builder. + pub fn select_chain(&self) -> Option<&TSc> { + self.select_chain.as_ref() + } + + /// Returns a reference to the keystore + pub fn keystore(&self) -> Arc> { + self.keystore.clone() + } + + /// Returns a reference to the transaction pool stored in this builder + pub fn pool(&self) -> Arc { + self.transaction_pool.clone() + } + + /// Returns a reference to the fetcher, only available if builder + /// was created with `new_light`. + pub fn fetcher(&self) -> Option + where + TFchr: Clone, + { + self.fetcher.clone() + } + + /// Returns a reference to the remote_backend, only available if builder + /// was created with `new_light`. + pub fn remote_backend(&self) -> Option>> { + self.remote_backend.clone() + } + + /// Defines which head-of-chain strategy to use. + pub fn with_opt_select_chain( + self, + select_chain_builder: impl FnOnce(&Configuration, &Arc) -> Result, Error>, + ) -> Result< + ServiceBuilder, + Error, + > { + let select_chain = select_chain_builder(&self.config, &self.backend)?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + backend: self.backend, + tasks_builder: self.tasks_builder, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + remote_backend: self.remote_backend, + background_tasks: self.background_tasks, + marker: self.marker, + }) + } + + /// Defines which head-of-chain strategy to use. + pub fn with_select_chain( + self, + builder: impl FnOnce(&Configuration, &Arc) -> Result, + ) -> Result< + ServiceBuilder, + Error, + > { + self.with_opt_select_chain(|cfg, b| builder(cfg, b).map(Option::Some)) + } + + /// Defines which import queue to use. + pub fn with_import_queue( + self, + builder: impl FnOnce( + &Configuration, + Arc, + Option, + Arc, + ) -> Result, + ) -> Result< + ServiceBuilder, + Error, + > + where + TSc: Clone, + { + let import_queue = builder( + &self.config, + self.client.clone(), + self.select_chain.clone(), + self.transaction_pool.clone(), + )?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + backend: self.backend, + tasks_builder: self.tasks_builder, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + remote_backend: self.remote_backend, + background_tasks: self.background_tasks, + marker: self.marker, + }) + } + + /// Defines which strategy to use for providing finality proofs. + pub fn with_opt_finality_proof_provider( + self, + builder: impl FnOnce( + Arc, + Arc, + ) -> Result>>, Error>, + ) -> Result< + ServiceBuilder< + TBl, + TRtApi, + TCl, + TFchr, + TSc, + TImpQu, + TFprb, + Arc>, + TExPool, + TRpc, + Backend, + >, + Error, + > { + let finality_proof_provider = builder(self.client.clone(), self.backend.clone())?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + backend: self.backend, + tasks_builder: self.tasks_builder, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + remote_backend: self.remote_backend, + background_tasks: self.background_tasks, + marker: self.marker, + }) + } + + /// Defines which strategy to use for providing finality proofs. + pub fn with_finality_proof_provider( + self, + build: impl FnOnce(Arc, Arc) -> Result>, Error>, + ) -> Result< + ServiceBuilder< + TBl, + TRtApi, + TCl, + TFchr, + TSc, + TImpQu, + TFprb, + Arc>, + TExPool, + TRpc, + Backend, + >, + Error, + > { + self.with_opt_finality_proof_provider(|client, backend| { + build(client, backend).map(Option::Some) + }) + } + + /// Defines which import queue to use. + pub fn with_import_queue_and_opt_fprb( + self, + builder: impl FnOnce( + &Configuration, + Arc, + Arc, + Option, + Option, + Arc, + ) -> Result<(UImpQu, Option), Error>, + ) -> Result< + ServiceBuilder, + Error, + > + where + TSc: Clone, + TFchr: Clone, + { + let (import_queue, fprb) = builder( + &self.config, + self.client.clone(), + self.backend.clone(), + self.fetcher.clone(), + self.select_chain.clone(), + self.transaction_pool.clone(), + )?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + backend: self.backend, + tasks_builder: self.tasks_builder, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue, + finality_proof_request_builder: fprb, + finality_proof_provider: self.finality_proof_provider, + transaction_pool: self.transaction_pool, + rpc_extensions: self.rpc_extensions, + remote_backend: self.remote_backend, + background_tasks: self.background_tasks, + marker: self.marker, + }) + } + + /// Defines which import queue to use. + pub fn with_import_queue_and_fprb( + self, + builder: impl FnOnce( + &Configuration, + Arc, + Arc, + Option, + Option, + Arc, + ) -> Result<(UImpQu, UFprb), Error>, + ) -> Result< + ServiceBuilder, + Error, + > + where + TSc: Clone, + TFchr: Clone, + { + self.with_import_queue_and_opt_fprb(|cfg, cl, b, f, sc, tx| { + builder(cfg, cl, b, f, sc, tx).map(|(q, f)| (q, Some(f))) + }) + } + + /// Defines which transaction pool to use. + pub fn with_transaction_pool( + mut self, + transaction_pool_builder: impl FnOnce( + sc_transaction_pool::txpool::Options, + Arc, + Option, + Option<&PrometheusRegistry>, + ) + -> Result<(UExPool, Option), Error>, + ) -> Result< + ServiceBuilder, + Error, + > + where + TSc: Clone, + TFchr: Clone, + { + let (transaction_pool, background_task) = transaction_pool_builder( + self.config.transaction_pool.clone(), + self.client.clone(), + self.fetcher.clone(), + self.config + .prometheus_config + .as_ref() + .map(|config| &config.registry), + )?; + + if let Some(background_task) = background_task { + self.background_tasks + .push(("txpool-background", background_task)); + } + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + tasks_builder: self.tasks_builder, + backend: self.backend, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + transaction_pool: Arc::new(transaction_pool), + rpc_extensions: self.rpc_extensions, + remote_backend: self.remote_backend, + background_tasks: self.background_tasks, + marker: self.marker, + }) + } + + /// Defines the RPC extensions to use. + pub fn with_rpc_extensions( + self, + rpc_ext_builder: impl FnOnce(&Self) -> Result, + ) -> Result< + ServiceBuilder, + Error, + > + where + TSc: Clone, + TFchr: Clone, + { + let rpc_extensions = rpc_ext_builder(&self)?; + + Ok(ServiceBuilder { + config: self.config, + client: self.client, + backend: self.backend, + tasks_builder: self.tasks_builder, + keystore: self.keystore, + fetcher: self.fetcher, + select_chain: self.select_chain, + import_queue: self.import_queue, + finality_proof_request_builder: self.finality_proof_request_builder, + finality_proof_provider: self.finality_proof_provider, + transaction_pool: self.transaction_pool, + rpc_extensions, + remote_backend: self.remote_backend, + background_tasks: self.background_tasks, + marker: self.marker, + }) + } } /// Implemented on `ServiceBuilder`. Allows running block commands, such as import/export/validate /// components to the builder. pub trait ServiceBuilderCommand { - /// Block type this API operates on. - type Block: BlockT; - /// Native execution dispatch required by some commands. - type NativeDispatch: NativeExecutionDispatch + 'static; - /// Starts the process of importing blocks. - fn import_blocks( - self, - input: impl Read + Seek + Send + 'static, - force: bool, - ) -> Pin> + Send>>; - - /// Performs the blocks export. - fn export_blocks( - self, - output: impl Write + 'static, - from: NumberFor, - to: Option>, - binary: bool - ) -> Pin>>>; - - /// Performs a revert of `blocks` blocks. - fn revert_chain( - &self, - blocks: NumberFor - ) -> Result<(), Error>; - - /// Re-validate known block. - fn check_block( - self, - block: BlockId - ) -> Pin> + Send>>; + /// Block type this API operates on. + type Block: BlockT; + /// Native execution dispatch required by some commands. + type NativeDispatch: NativeExecutionDispatch + 'static; + /// Starts the process of importing blocks. + fn import_blocks( + self, + input: impl Read + Seek + Send + 'static, + force: bool, + ) -> Pin> + Send>>; + + /// Performs the blocks export. + fn export_blocks( + self, + output: impl Write + 'static, + from: NumberFor, + to: Option>, + binary: bool, + ) -> Pin>>>; + + /// Performs a revert of `blocks` blocks. + fn revert_chain(&self, blocks: NumberFor) -> Result<(), Error>; + + /// Re-validate known block. + fn check_block( + self, + block: BlockId, + ) -> Pin> + Send>>; } impl -ServiceBuilder< - TBl, - TRtApi, - Client, - Arc>, - TSc, - TImpQu, - BoxFinalityProofRequestBuilder, - Arc>, - TExPool, - TRpc, - TBackend, -> where - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: - sp_api::Metadata + - sc_offchain::OffchainWorkerApi + - sp_transaction_pool::runtime_api::TaggedTransactionQueue + - sp_session::SessionKeys + - sp_api::ApiErrorExt + - sp_api::ApiExt, - TBl: BlockT, - TRtApi: 'static + Send + Sync, - TBackend: 'static + sc_client_api::backend::Backend + Send, - TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, - TSc: Clone, - TImpQu: 'static + ImportQueue, - TExPool: MaintainedTransactionPool::Hash> + MallocSizeOfWasm + 'static, - TRpc: sc_rpc::RpcExtension + Clone, + ServiceBuilder< + TBl, + TRtApi, + Client, + Arc>, + TSc, + TImpQu, + BoxFinalityProofRequestBuilder, + Arc>, + TExPool, + TRpc, + TBackend, + > +where + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: + sp_api::Metadata + + sc_offchain::OffchainWorkerApi + + sp_transaction_pool::runtime_api::TaggedTransactionQueue + + sp_session::SessionKeys + + sp_api::ApiErrorExt + + sp_api::ApiExt, + TBl: BlockT, + TRtApi: 'static + Send + Sync, + TBackend: 'static + sc_client_api::backend::Backend + Send, + TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, + TSc: Clone, + TImpQu: 'static + ImportQueue, + TExPool: MaintainedTransactionPool::Hash> + + MallocSizeOfWasm + + 'static, + TRpc: sc_rpc::RpcExtension + Clone, { - - /// Set an ExecutionExtensionsFactory - pub fn with_execution_extensions_factory(self, execution_extensions_factory: Box) -> Result { - self.client.execution_extensions().set_extensions_factory(execution_extensions_factory); - Ok(self) - } - - /// Builds the service. - pub fn build(self) -> Result, - TSc, - NetworkStatus, - NetworkService::Hash>, - TExPool, - sc_offchain::OffchainWorkers< - Client, - TBackend::OffchainStorage, - TBl - >, - >, Error> - where TExec: CallExecutor, - { - let ServiceBuilder { - marker: _, - mut config, - client, - tasks_builder, - fetcher: on_demand, - backend, - keystore, - select_chain, - import_queue, - finality_proof_request_builder, - finality_proof_provider, - transaction_pool, - rpc_extensions, - remote_backend, - background_tasks, - } = self; - - sp_session::generate_initial_session_keys( - client.clone(), - &BlockId::Hash(client.chain_info().best_hash), - config.dev_key_seed.clone().map(|s| vec![s]).unwrap_or_default(), - )?; - - // A side-channel for essential tasks to communicate shutdown. - let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks"); - - let import_queue = Box::new(import_queue); - let chain_info = client.chain_info(); - let chain_spec = &config.chain_spec; - - let version = config.impl_version; - info!("📦 Highest known block at #{}", chain_info.best_number); - telemetry!( - SUBSTRATE_INFO; - "node.start"; - "height" => chain_info.best_number.saturated_into::(), - "best" => ?chain_info.best_hash - ); - - // make transaction pool available for off-chain runtime calls. - client.execution_extensions() - .register_transaction_pool(Arc::downgrade(&transaction_pool) as _); - - let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { - imports_external_transactions: !matches!(config.role, Role::Light), - pool: transaction_pool.clone(), - client: client.clone(), - executor: tasks_builder.spawn_handle(), - }); - - let protocol_id = { - let protocol_id_full = match chain_spec.protocol_id() { - Some(pid) => pid, - None => { - warn!("Using default protocol ID {:?} because none is configured in the \ - chain specs", DEFAULT_PROTOCOL_ID - ); - DEFAULT_PROTOCOL_ID - } - }.as_bytes(); - sc_network::config::ProtocolId::from(protocol_id_full) - }; - - let block_announce_validator = - Box::new(sp_consensus::block_validation::DefaultBlockAnnounceValidator::new(client.clone())); - - let network_params = sc_network::config::Params { - role: config.role.clone(), - executor: { - let spawn_handle = tasks_builder.spawn_handle(); - Some(Box::new(move |fut| { - spawn_handle.spawn("libp2p-node", fut); - })) - }, - network_config: config.network.clone(), - chain: client.clone(), - finality_proof_provider, - finality_proof_request_builder, - on_demand: on_demand.clone(), - transaction_pool: transaction_pool_adapter.clone() as _, - import_queue, - protocol_id, - block_announce_validator, - metrics_registry: config.prometheus_config.as_ref().map(|config| config.registry.clone()) - }; - - let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); - let network_mut = sc_network::NetworkWorker::new(network_params)?; - let network = network_mut.service().clone(); - let network_status_sinks = Arc::new(Mutex::new(status_sinks::StatusSinks::new())); - - let offchain_storage = backend.offchain_storage(); - let offchain_workers = match (config.offchain_worker, offchain_storage.clone()) { - (true, Some(db)) => { - Some(Arc::new(sc_offchain::OffchainWorkers::new(client.clone(), db))) - }, - (true, None) => { - warn!("Offchain workers disabled, due to lack of offchain storage support in backend."); - None - }, - _ => None, - }; - - let spawn_handle = tasks_builder.spawn_handle(); - - // Spawn background tasks which were stacked during the - // service building. - for (title, background_task) in background_tasks { - spawn_handle.spawn(title, background_task); - } - - { - // block notifications - let txpool = Arc::downgrade(&transaction_pool); - let offchain = offchain_workers.as_ref().map(Arc::downgrade); - let notifications_spawn_handle = tasks_builder.spawn_handle(); - let network_state_info: Arc = network.clone(); - let is_validator = config.role.is_authority(); - - let (import_stream, finality_stream) = ( - client.import_notification_stream().map(|n| ChainEvent::NewBlock { - id: BlockId::Hash(n.hash), - header: n.header, - retracted: n.retracted, - is_new_best: n.is_new_best, - }), - client.finality_notification_stream().map(|n| ChainEvent::Finalized { - hash: n.hash - }) - ); - let events = futures::stream::select(import_stream, finality_stream) - .for_each(move |event| { - // offchain worker is only interested in block import events - if let ChainEvent::NewBlock { ref header, is_new_best, .. } = event { - let offchain = offchain.as_ref().and_then(|o| o.upgrade()); - match offchain { - Some(offchain) if is_new_best => { - notifications_spawn_handle.spawn( - "offchain-on-block", - offchain.on_block_imported( - &header, - network_state_info.clone(), - is_validator, - ), - ); - }, - Some(_) => log::debug!( - target: "sc_offchain", - "Skipping offchain workers for non-canon block: {:?}", - header, - ), - _ => {}, - } - }; - - let txpool = txpool.upgrade(); - if let Some(txpool) = txpool.as_ref() { - notifications_spawn_handle.spawn( - "txpool-maintain", - txpool.maintain(event), - ); - } - - ready(()) - }); - - spawn_handle.spawn( - "txpool-and-offchain-notif", - events, - ); - } - - { - // extrinsic notifications - let network = Arc::downgrade(&network); - let transaction_pool_ = transaction_pool.clone(); - let events = transaction_pool.import_notification_stream() - .for_each(move |hash| { - if let Some(network) = network.upgrade() { - network.propagate_extrinsic(hash); - } - let status = transaction_pool_.status(); - telemetry!(SUBSTRATE_INFO; "txpool.import"; - "ready" => status.ready, - "future" => status.future - ); - ready(()) - }); - - spawn_handle.spawn( - "telemetry-on-block", - events, - ); - } - - // Prometheus metrics. - let mut metrics_service = if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { - // Set static metrics. - - - let role_bits = match config.role { - Role::Full => 1u64, - Role::Light => 2u64, - Role::Sentry { .. } => 3u64, - Role::Authority { .. } => 4u64, - }; - let metrics = MetricsService::with_prometheus( - ®istry, - &config.network.node_name, - &config.impl_version, - role_bits, - )?; - spawn_handle.spawn( - "prometheus-endpoint", - prometheus_endpoint::init_prometheus(port, registry).map(drop) - ); - - metrics - } else { - MetricsService::new() - }; - - // Periodically notify the telemetry. - let transaction_pool_ = transaction_pool.clone(); - let client_ = client.clone(); - let (state_tx, state_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat1"); - network_status_sinks.lock().push(std::time::Duration::from_millis(5000), state_tx); - let tel_task = state_rx.for_each(move |(net_status, _)| { - let info = client_.usage_info(); - metrics_service.tick( - &info, - &transaction_pool_.status(), - &net_status, - ); - ready(()) - }); - - spawn_handle.spawn( - "telemetry-periodic-send", - tel_task, - ); - - // Periodically send the network state to the telemetry. - let (netstat_tx, netstat_rx) = tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat2"); - network_status_sinks.lock().push(std::time::Duration::from_secs(30), netstat_tx); - let tel_task_2 = netstat_rx.for_each(move |(_, network_state)| { - telemetry!( - SUBSTRATE_INFO; - "system.network_state"; - "state" => network_state, - ); - ready(()) - }); - spawn_handle.spawn( - "telemetry-periodic-network-state", - tel_task_2, - ); - - // RPC - let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); - let gen_handler = || { - use sc_rpc::{chain, state, author, system, offchain}; - - let system_info = sc_rpc::system::SystemInfo { - chain_name: chain_spec.name().into(), - impl_name: config.impl_name.into(), - impl_version: config.impl_version.into(), - properties: chain_spec.properties().clone(), - chain_type: chain_spec.chain_type().clone(), - }; - - let subscriptions = sc_rpc::Subscriptions::new(Arc::new(tasks_builder.spawn_handle())); - - let (chain, state) = if let (Some(remote_backend), Some(on_demand)) = - (remote_backend.as_ref(), on_demand.as_ref()) { - // Light clients - let chain = sc_rpc::chain::new_light( - client.clone(), - subscriptions.clone(), - remote_backend.clone(), - on_demand.clone() - ); - let state = sc_rpc::state::new_light( - client.clone(), - subscriptions.clone(), - remote_backend.clone(), - on_demand.clone() - ); - (chain, state) - - } else { - // Full nodes - let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); - let state = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); - (chain, state) - }; - - let author = sc_rpc::author::Author::new( - client.clone(), - transaction_pool.clone(), - subscriptions, - keystore.clone(), - ); - let system = system::System::new(system_info, system_rpc_tx.clone()); - - match offchain_storage.clone() { - Some(storage) => { - let offchain = sc_rpc::offchain::Offchain::new(storage); - sc_rpc_server::rpc_handler(( - state::StateApi::to_delegate(state), - chain::ChainApi::to_delegate(chain), - offchain::OffchainApi::to_delegate(offchain), - author::AuthorApi::to_delegate(author), - system::SystemApi::to_delegate(system), - rpc_extensions.clone(), - )) - }, - None => sc_rpc_server::rpc_handler(( - state::StateApi::to_delegate(state), - chain::ChainApi::to_delegate(chain), - author::AuthorApi::to_delegate(author), - system::SystemApi::to_delegate(system), - rpc_extensions.clone(), - )) - } - }; - let rpc_handlers = gen_handler(); - let rpc = start_rpc_servers(&config, gen_handler)?; - - spawn_handle.spawn( - "network-worker", - build_network_future( - config.role.clone(), - network_mut, - client.clone(), - network_status_sinks.clone(), - system_rpc_rx, - has_bootnodes, - config.announce_block, - ), - ); - - let telemetry_connection_sinks: Arc>>> = Default::default(); - - // Telemetry - let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { - let is_authority = config.role.is_authority(); - let network_id = network.local_peer_id().to_base58(); - let name = config.network.node_name.clone(); - let impl_name = config.impl_name.to_owned(); - let version = version.clone(); - let chain_name = config.chain_spec.name().to_owned(); - let telemetry_connection_sinks_ = telemetry_connection_sinks.clone(); - let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { - endpoints, - wasm_external_transport: config.telemetry_external_transport.take(), - }); - let startup_time = SystemTime::UNIX_EPOCH.elapsed() - .map(|dur| dur.as_millis()) - .unwrap_or(0); - let future = telemetry.clone() - .for_each(move |event| { - // Safe-guard in case we add more events in the future. - let sc_telemetry::TelemetryEvent::Connected = event; - - telemetry!(SUBSTRATE_INFO; "system.connected"; - "name" => name.clone(), - "implementation" => impl_name.clone(), - "version" => version.clone(), - "config" => "", - "chain" => chain_name.clone(), - "authority" => is_authority, - "startup_time" => startup_time, - "network_id" => network_id.clone() - ); - - telemetry_connection_sinks_.lock().retain(|sink| { - sink.unbounded_send(()).is_ok() - }); - ready(()) - }); - - spawn_handle.spawn( - "telemetry-worker", - future, - ); - - telemetry - }); - - // Instrumentation - if let Some(tracing_targets) = config.tracing_targets.as_ref() { - let subscriber = sc_tracing::ProfilingSubscriber::new( - config.tracing_receiver, tracing_targets - ); - match tracing::subscriber::set_global_default(subscriber) { - Ok(_) => (), - Err(e) => error!(target: "tracing", "Unable to set global default subscriber {}", e), - } - } - - Ok(Service { - client, - task_manager: tasks_builder.into_task_manager(config.task_executor), - network, - network_status_sinks, - select_chain, - transaction_pool, - essential_failed_tx, - essential_failed_rx, - rpc_handlers, - _rpc: rpc, - _telemetry: telemetry, - _offchain_workers: offchain_workers, - _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), - keystore, - marker: PhantomData::, - prometheus_registry: config.prometheus_config.map(|config| config.registry) - }) - } + /// Set an ExecutionExtensionsFactory + pub fn with_execution_extensions_factory( + self, + execution_extensions_factory: Box, + ) -> Result { + self.client + .execution_extensions() + .set_extensions_factory(execution_extensions_factory); + Ok(self) + } + + /// Builds the service. + pub fn build( + self, + ) -> Result< + Service< + TBl, + Client, + TSc, + NetworkStatus, + NetworkService::Hash>, + TExPool, + sc_offchain::OffchainWorkers< + Client, + TBackend::OffchainStorage, + TBl, + >, + >, + Error, + > + where + TExec: CallExecutor, + { + let ServiceBuilder { + marker: _, + mut config, + client, + tasks_builder, + fetcher: on_demand, + backend, + keystore, + select_chain, + import_queue, + finality_proof_request_builder, + finality_proof_provider, + transaction_pool, + rpc_extensions, + remote_backend, + background_tasks, + } = self; + + sp_session::generate_initial_session_keys( + client.clone(), + &BlockId::Hash(client.chain_info().best_hash), + config + .dev_key_seed + .clone() + .map(|s| vec![s]) + .unwrap_or_default(), + )?; + + // A side-channel for essential tasks to communicate shutdown. + let (essential_failed_tx, essential_failed_rx) = tracing_unbounded("mpsc_essential_tasks"); + + let import_queue = Box::new(import_queue); + let chain_info = client.chain_info(); + let chain_spec = &config.chain_spec; + + let version = config.impl_version; + info!("📦 Highest known block at #{}", chain_info.best_number); + telemetry!( + SUBSTRATE_INFO; + "node.start"; + "height" => chain_info.best_number.saturated_into::(), + "best" => ?chain_info.best_hash + ); + + // make transaction pool available for off-chain runtime calls. + client + .execution_extensions() + .register_transaction_pool(Arc::downgrade(&transaction_pool) as _); + + let transaction_pool_adapter = Arc::new(TransactionPoolAdapter { + imports_external_transactions: !matches!(config.role, Role::Light), + pool: transaction_pool.clone(), + client: client.clone(), + executor: tasks_builder.spawn_handle(), + }); + + let protocol_id = { + let protocol_id_full = match chain_spec.protocol_id() { + Some(pid) => pid, + None => { + warn!( + "Using default protocol ID {:?} because none is configured in the \ + chain specs", + DEFAULT_PROTOCOL_ID + ); + DEFAULT_PROTOCOL_ID + } + } + .as_bytes(); + sc_network::config::ProtocolId::from(protocol_id_full) + }; + + let block_announce_validator = Box::new( + sp_consensus::block_validation::DefaultBlockAnnounceValidator::new(client.clone()), + ); + + let network_params = sc_network::config::Params { + role: config.role.clone(), + executor: { + let spawn_handle = tasks_builder.spawn_handle(); + Some(Box::new(move |fut| { + spawn_handle.spawn("libp2p-node", fut); + })) + }, + network_config: config.network.clone(), + chain: client.clone(), + finality_proof_provider, + finality_proof_request_builder, + on_demand: on_demand.clone(), + transaction_pool: transaction_pool_adapter.clone() as _, + import_queue, + protocol_id, + block_announce_validator, + metrics_registry: config + .prometheus_config + .as_ref() + .map(|config| config.registry.clone()), + }; + + let has_bootnodes = !network_params.network_config.boot_nodes.is_empty(); + let network_mut = sc_network::NetworkWorker::new(network_params)?; + let network = network_mut.service().clone(); + let network_status_sinks = Arc::new(Mutex::new(status_sinks::StatusSinks::new())); + + let offchain_storage = backend.offchain_storage(); + let offchain_workers = match (config.offchain_worker, offchain_storage.clone()) { + (true, Some(db)) => Some(Arc::new(sc_offchain::OffchainWorkers::new( + client.clone(), + db, + ))), + (true, None) => { + warn!("Offchain workers disabled, due to lack of offchain storage support in backend."); + None + } + _ => None, + }; + + let spawn_handle = tasks_builder.spawn_handle(); + + // Spawn background tasks which were stacked during the + // service building. + for (title, background_task) in background_tasks { + spawn_handle.spawn(title, background_task); + } + + { + // block notifications + let txpool = Arc::downgrade(&transaction_pool); + let offchain = offchain_workers.as_ref().map(Arc::downgrade); + let notifications_spawn_handle = tasks_builder.spawn_handle(); + let network_state_info: Arc = network.clone(); + let is_validator = config.role.is_authority(); + + let (import_stream, finality_stream) = ( + client + .import_notification_stream() + .map(|n| ChainEvent::NewBlock { + id: BlockId::Hash(n.hash), + header: n.header, + retracted: n.retracted, + is_new_best: n.is_new_best, + }), + client + .finality_notification_stream() + .map(|n| ChainEvent::Finalized { hash: n.hash }), + ); + let events = + futures::stream::select(import_stream, finality_stream).for_each(move |event| { + // offchain worker is only interested in block import events + if let ChainEvent::NewBlock { + ref header, + is_new_best, + .. + } = event + { + let offchain = offchain.as_ref().and_then(|o| o.upgrade()); + match offchain { + Some(offchain) if is_new_best => { + notifications_spawn_handle.spawn( + "offchain-on-block", + offchain.on_block_imported( + &header, + network_state_info.clone(), + is_validator, + ), + ); + } + Some(_) => log::debug!( + target: "sc_offchain", + "Skipping offchain workers for non-canon block: {:?}", + header, + ), + _ => {} + } + }; + + let txpool = txpool.upgrade(); + if let Some(txpool) = txpool.as_ref() { + notifications_spawn_handle.spawn("txpool-maintain", txpool.maintain(event)); + } + + ready(()) + }); + + spawn_handle.spawn("txpool-and-offchain-notif", events); + } + + { + // extrinsic notifications + let network = Arc::downgrade(&network); + let transaction_pool_ = transaction_pool.clone(); + let events = transaction_pool + .import_notification_stream() + .for_each(move |hash| { + if let Some(network) = network.upgrade() { + network.propagate_extrinsic(hash); + } + let status = transaction_pool_.status(); + telemetry!(SUBSTRATE_INFO; "txpool.import"; + "ready" => status.ready, + "future" => status.future + ); + ready(()) + }); + + spawn_handle.spawn("telemetry-on-block", events); + } + + // Prometheus metrics. + let mut metrics_service = + if let Some(PrometheusConfig { port, registry }) = config.prometheus_config.clone() { + // Set static metrics. + + let role_bits = match config.role { + Role::Full => 1u64, + Role::Light => 2u64, + Role::Sentry { .. } => 3u64, + Role::Authority { .. } => 4u64, + }; + let metrics = MetricsService::with_prometheus( + ®istry, + &config.network.node_name, + &config.impl_version, + role_bits, + )?; + spawn_handle.spawn( + "prometheus-endpoint", + prometheus_endpoint::init_prometheus(port, registry).map(drop), + ); + + metrics + } else { + MetricsService::new() + }; + + // Periodically notify the telemetry. + let transaction_pool_ = transaction_pool.clone(); + let client_ = client.clone(); + let (state_tx, state_rx) = + tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat1"); + network_status_sinks + .lock() + .push(std::time::Duration::from_millis(5000), state_tx); + let tel_task = state_rx.for_each(move |(net_status, _)| { + let info = client_.usage_info(); + metrics_service.tick(&info, &transaction_pool_.status(), &net_status); + ready(()) + }); + + spawn_handle.spawn("telemetry-periodic-send", tel_task); + + // Periodically send the network state to the telemetry. + let (netstat_tx, netstat_rx) = + tracing_unbounded::<(NetworkStatus<_>, NetworkState)>("mpsc_netstat2"); + network_status_sinks + .lock() + .push(std::time::Duration::from_secs(30), netstat_tx); + let tel_task_2 = netstat_rx.for_each(move |(_, network_state)| { + telemetry!( + SUBSTRATE_INFO; + "system.network_state"; + "state" => network_state, + ); + ready(()) + }); + spawn_handle.spawn("telemetry-periodic-network-state", tel_task_2); + + // RPC + let (system_rpc_tx, system_rpc_rx) = tracing_unbounded("mpsc_system_rpc"); + let gen_handler = || { + use sc_rpc::{author, chain, offchain, state, system}; + + let system_info = sc_rpc::system::SystemInfo { + chain_name: chain_spec.name().into(), + impl_name: config.impl_name.into(), + impl_version: config.impl_version.into(), + properties: chain_spec.properties().clone(), + chain_type: chain_spec.chain_type().clone(), + }; + + let subscriptions = sc_rpc::Subscriptions::new(Arc::new(tasks_builder.spawn_handle())); + + let (chain, state) = if let (Some(remote_backend), Some(on_demand)) = + (remote_backend.as_ref(), on_demand.as_ref()) + { + // Light clients + let chain = sc_rpc::chain::new_light( + client.clone(), + subscriptions.clone(), + remote_backend.clone(), + on_demand.clone(), + ); + let state = sc_rpc::state::new_light( + client.clone(), + subscriptions.clone(), + remote_backend.clone(), + on_demand.clone(), + ); + (chain, state) + } else { + // Full nodes + let chain = sc_rpc::chain::new_full(client.clone(), subscriptions.clone()); + let state = sc_rpc::state::new_full(client.clone(), subscriptions.clone()); + (chain, state) + }; + + let author = sc_rpc::author::Author::new( + client.clone(), + transaction_pool.clone(), + subscriptions, + keystore.clone(), + ); + let system = system::System::new(system_info, system_rpc_tx.clone()); + + match offchain_storage.clone() { + Some(storage) => { + let offchain = sc_rpc::offchain::Offchain::new(storage); + sc_rpc_server::rpc_handler(( + state::StateApi::to_delegate(state), + chain::ChainApi::to_delegate(chain), + offchain::OffchainApi::to_delegate(offchain), + author::AuthorApi::to_delegate(author), + system::SystemApi::to_delegate(system), + rpc_extensions.clone(), + )) + } + None => sc_rpc_server::rpc_handler(( + state::StateApi::to_delegate(state), + chain::ChainApi::to_delegate(chain), + author::AuthorApi::to_delegate(author), + system::SystemApi::to_delegate(system), + rpc_extensions.clone(), + )), + } + }; + let rpc_handlers = gen_handler(); + let rpc = start_rpc_servers(&config, gen_handler)?; + + spawn_handle.spawn( + "network-worker", + build_network_future( + config.role.clone(), + network_mut, + client.clone(), + network_status_sinks.clone(), + system_rpc_rx, + has_bootnodes, + config.announce_block, + ), + ); + + let telemetry_connection_sinks: Arc>>> = + Default::default(); + + // Telemetry + let telemetry = config.telemetry_endpoints.clone().map(|endpoints| { + let is_authority = config.role.is_authority(); + let network_id = network.local_peer_id().to_base58(); + let name = config.network.node_name.clone(); + let impl_name = config.impl_name.to_owned(); + let version = version.clone(); + let chain_name = config.chain_spec.name().to_owned(); + let telemetry_connection_sinks_ = telemetry_connection_sinks.clone(); + let telemetry = sc_telemetry::init_telemetry(sc_telemetry::TelemetryConfig { + endpoints, + wasm_external_transport: config.telemetry_external_transport.take(), + }); + let startup_time = SystemTime::UNIX_EPOCH + .elapsed() + .map(|dur| dur.as_millis()) + .unwrap_or(0); + let future = telemetry.clone().for_each(move |event| { + // Safe-guard in case we add more events in the future. + let sc_telemetry::TelemetryEvent::Connected = event; + + telemetry!(SUBSTRATE_INFO; "system.connected"; + "name" => name.clone(), + "implementation" => impl_name.clone(), + "version" => version.clone(), + "config" => "", + "chain" => chain_name.clone(), + "authority" => is_authority, + "startup_time" => startup_time, + "network_id" => network_id.clone() + ); + + telemetry_connection_sinks_ + .lock() + .retain(|sink| sink.unbounded_send(()).is_ok()); + ready(()) + }); + + spawn_handle.spawn("telemetry-worker", future); + + telemetry + }); + + // Instrumentation + if let Some(tracing_targets) = config.tracing_targets.as_ref() { + let subscriber = + sc_tracing::ProfilingSubscriber::new(config.tracing_receiver, tracing_targets); + match tracing::subscriber::set_global_default(subscriber) { + Ok(_) => (), + Err(e) => { + error!(target: "tracing", "Unable to set global default subscriber {}", e) + } + } + } + + Ok(Service { + client, + task_manager: tasks_builder.into_task_manager(config.task_executor), + network, + network_status_sinks, + select_chain, + transaction_pool, + essential_failed_tx, + essential_failed_rx, + rpc_handlers, + _rpc: rpc, + _telemetry: telemetry, + _offchain_workers: offchain_workers, + _telemetry_on_connect_sinks: telemetry_connection_sinks.clone(), + keystore, + marker: PhantomData::, + prometheus_registry: config.prometheus_config.map(|config| config.registry), + }) + } } diff --git a/client/service/src/chain_ops.rs b/client/service/src/chain_ops.rs index 12fae32241..a10612272b 100644 --- a/client/service/src/chain_ops.rs +++ b/client/service/src/chain_ops.rs @@ -16,284 +16,302 @@ //! Chain utilities. +use crate::builder::{ServiceBuilder, ServiceBuilderCommand}; use crate::error; -use crate::builder::{ServiceBuilderCommand, ServiceBuilder}; use crate::error::Error; -use sc_chain_spec::ChainSpec; -use log::{warn, info}; -use futures::{future, prelude::*}; -use sp_runtime::traits::{ - Block as BlockT, NumberFor, One, Zero, Header, SaturatedConversion -}; -use sp_runtime::generic::{BlockId, SignedBlock}; use codec::{Decode, Encode, IoReader}; +use futures::{future, prelude::*}; +use log::{info, warn}; +use sc_chain_spec::ChainSpec; use sc_client::{Client, LocalCallExecutor}; +use sc_executor::{NativeExecutionDispatch, NativeExecutor}; use sp_consensus::{ - BlockOrigin, - import_queue::{IncomingBlock, Link, BlockImportError, BlockImportResult, ImportQueue}, + import_queue::{BlockImportError, BlockImportResult, ImportQueue, IncomingBlock, Link}, + BlockOrigin, }; -use sc_executor::{NativeExecutor, NativeExecutionDispatch}; +use sp_runtime::generic::{BlockId, SignedBlock}; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor, One, SaturatedConversion, Zero}; -use std::{io::{Read, Write, Seek}, pin::Pin}; use sc_client_api::BlockBackend; +use std::{ + io::{Read, Seek, Write}, + pin::Pin, +}; /// Build a chain spec json pub fn build_spec(spec: &dyn ChainSpec, raw: bool) -> error::Result { - Ok(spec.as_json(raw)?) + Ok(spec.as_json(raw)?) } -impl< - TBl, TRtApi, TBackend, - TExecDisp, TFchr, TSc, TImpQu, TFprb, TFpp, - TExPool, TRpc, Backend -> ServiceBuilderCommand for ServiceBuilder< - TBl, TRtApi, - Client>, TBl, TRtApi>, - TFchr, TSc, TImpQu, TFprb, TFpp, TExPool, TRpc, Backend -> where - TBl: BlockT, - TBackend: 'static + sc_client_api::backend::Backend + Send, - TExecDisp: 'static + NativeExecutionDispatch, - TImpQu: 'static + ImportQueue, - TRtApi: 'static + Send + Sync, +impl + ServiceBuilderCommand + for ServiceBuilder< + TBl, + TRtApi, + Client>, TBl, TRtApi>, + TFchr, + TSc, + TImpQu, + TFprb, + TFpp, + TExPool, + TRpc, + Backend, + > +where + TBl: BlockT, + TBackend: 'static + sc_client_api::backend::Backend + Send, + TExecDisp: 'static + NativeExecutionDispatch, + TImpQu: 'static + ImportQueue, + TRtApi: 'static + Send + Sync, { - type Block = TBl; - type NativeDispatch = TExecDisp; - - fn import_blocks( - self, - input: impl Read + Seek + Send + 'static, - force: bool, - ) -> Pin> + Send>> { - struct WaitLink { - imported_blocks: u64, - has_error: bool, - } - - impl WaitLink { - fn new() -> WaitLink { - WaitLink { - imported_blocks: 0, - has_error: false, - } - } - } - - impl Link for WaitLink { - fn blocks_processed( - &mut self, - imported: usize, - _count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> - ) { - self.imported_blocks += imported as u64; - - for result in results { - if let (Err(err), hash) = result { - warn!("There was an error importing block with hash {:?}: {:?}", hash, err); - self.has_error = true; - break; - } - } - } - } - - let client = self.client; - let mut queue = self.import_queue; - - let mut io_reader_input = IoReader(input); - let mut count = None::; - let mut read_block_count = 0; - let mut link = WaitLink::new(); - - // Importing blocks is implemented as a future, because we want the operation to be - // interruptible. - // - // Every time we read a block from the input or import a bunch of blocks from the import - // queue, the `Future` re-schedules itself and returns `Poll::Pending`. - // This makes it possible either to interleave other operations in-between the block imports, - // or to stop the operation completely. - let import = future::poll_fn(move |cx| { - // Start by reading the number of blocks if not done so already. - let count = match count { - Some(c) => c, - None => { - let c: u64 = match Decode::decode(&mut io_reader_input) { - Ok(c) => c, - Err(err) => { - let err = format!("Error reading file: {}", err); - return std::task::Poll::Ready(Err(From::from(err))); - }, - }; - info!("📦 Importing {} blocks", c); - count = Some(c); - c - } - }; - - // Read blocks from the input. - if read_block_count < count { - match SignedBlock::::decode(&mut io_reader_input) { - Ok(signed) => { - let (header, extrinsics) = signed.block.deconstruct(); - let hash = header.hash(); - // import queue handles verification and importing it into the client - queue.import_blocks(BlockOrigin::File, vec![ - IncomingBlock:: { - hash, - header: Some(header), - body: Some(extrinsics), - justification: signed.justification, - origin: None, - allow_missing_state: false, - import_existing: force, - } - ]); - } - Err(e) => { - warn!("Error reading block data at {}: {}", read_block_count, e); - return std::task::Poll::Ready(Ok(())); - } - } - - read_block_count += 1; - if read_block_count % 1000 == 0 { - info!("#{} blocks were added to the queue", read_block_count); - } - - cx.waker().wake_by_ref(); - return std::task::Poll::Pending; - } - - let blocks_before = link.imported_blocks; - queue.poll_actions(cx, &mut link); - - if link.has_error { - info!( - "Stopping after #{} blocks because of an error", - link.imported_blocks, - ); - return std::task::Poll::Ready(Ok(())); - } - - if link.imported_blocks / 1000 != blocks_before / 1000 { - info!( - "#{} blocks were imported (#{} left)", - link.imported_blocks, - count - link.imported_blocks - ); - } - - if link.imported_blocks >= count { - info!("🎉 Imported {} blocks. Best: #{}", read_block_count, client.chain_info().best_number); - return std::task::Poll::Ready(Ok(())); - - } else { - // Polling the import queue will re-schedule the task when ready. - return std::task::Poll::Pending; - } - }); - Box::pin(import) - } - - fn export_blocks( - self, - mut output: impl Write + 'static, - from: NumberFor, - to: Option>, - binary: bool - ) -> Pin>>> { - let client = self.client; - let mut block = from; - - let last = match to { - Some(v) if v.is_zero() => One::one(), - Some(v) => v, - None => client.chain_info().best_number, - }; - - let mut wrote_header = false; - - // Exporting blocks is implemented as a future, because we want the operation to be - // interruptible. - // - // Every time we write a block to the output, the `Future` re-schedules itself and returns - // `Poll::Pending`. - // This makes it possible either to interleave other operations in-between the block exports, - // or to stop the operation completely. - let export = future::poll_fn(move |cx| { - if last < block { - return std::task::Poll::Ready(Err("Invalid block range specified".into())); - } - - if !wrote_header { - info!("Exporting blocks from #{} to #{}", block, last); - if binary { - let last_: u64 = last.saturated_into::(); - let block_: u64 = block.saturated_into::(); - let len: u64 = last_ - block_ + 1; - output.write_all(&len.encode())?; - } - wrote_header = true; - } - - match client.block(&BlockId::number(block))? { - Some(block) => { - if binary { - output.write_all(&block.encode())?; - } else { - serde_json::to_writer(&mut output, &block) - .map_err(|e| format!("Error writing JSON: {}", e))?; - } - }, - // Reached end of the chain. - None => return std::task::Poll::Ready(Ok(())), - } - if (block % 10000.into()).is_zero() { - info!("#{}", block); - } - if block == last { - return std::task::Poll::Ready(Ok(())); - } - block += One::one(); - - // Re-schedule the task in order to continue the operation. - cx.waker().wake_by_ref(); - std::task::Poll::Pending - }); - - Box::pin(export) - } - - fn revert_chain( - &self, - blocks: NumberFor - ) -> Result<(), Error> { - let reverted = self.client.revert(blocks)?; - let info = self.client.chain_info(); - - if reverted.is_zero() { - info!("There aren't any non-finalized blocks to revert."); - } else { - info!("Reverted {} blocks. Best: #{} ({})", reverted, info.best_number, info.best_hash); - } - Ok(()) - } - - fn check_block( - self, - block_id: BlockId - ) -> Pin> + Send>> { - match self.client.block(&block_id) { - Ok(Some(block)) => { - let mut buf = Vec::new(); - 1u64.encode_to(&mut buf); - block.encode_to(&mut buf); - let reader = std::io::Cursor::new(buf); - self.import_blocks(reader, true) - } - Ok(None) => Box::pin(future::err("Unknown block".into())), - Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), - } - } + type Block = TBl; + type NativeDispatch = TExecDisp; + + fn import_blocks( + self, + input: impl Read + Seek + Send + 'static, + force: bool, + ) -> Pin> + Send>> { + struct WaitLink { + imported_blocks: u64, + has_error: bool, + } + + impl WaitLink { + fn new() -> WaitLink { + WaitLink { + imported_blocks: 0, + has_error: false, + } + } + } + + impl Link for WaitLink { + fn blocks_processed( + &mut self, + imported: usize, + _count: usize, + results: Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + ) { + self.imported_blocks += imported as u64; + + for result in results { + if let (Err(err), hash) = result { + warn!( + "There was an error importing block with hash {:?}: {:?}", + hash, err + ); + self.has_error = true; + break; + } + } + } + } + + let client = self.client; + let mut queue = self.import_queue; + + let mut io_reader_input = IoReader(input); + let mut count = None::; + let mut read_block_count = 0; + let mut link = WaitLink::new(); + + // Importing blocks is implemented as a future, because we want the operation to be + // interruptible. + // + // Every time we read a block from the input or import a bunch of blocks from the import + // queue, the `Future` re-schedules itself and returns `Poll::Pending`. + // This makes it possible either to interleave other operations in-between the block imports, + // or to stop the operation completely. + let import = future::poll_fn(move |cx| { + // Start by reading the number of blocks if not done so already. + let count = match count { + Some(c) => c, + None => { + let c: u64 = match Decode::decode(&mut io_reader_input) { + Ok(c) => c, + Err(err) => { + let err = format!("Error reading file: {}", err); + return std::task::Poll::Ready(Err(From::from(err))); + } + }; + info!("📦 Importing {} blocks", c); + count = Some(c); + c + } + }; + + // Read blocks from the input. + if read_block_count < count { + match SignedBlock::::decode(&mut io_reader_input) { + Ok(signed) => { + let (header, extrinsics) = signed.block.deconstruct(); + let hash = header.hash(); + // import queue handles verification and importing it into the client + queue.import_blocks( + BlockOrigin::File, + vec![IncomingBlock:: { + hash, + header: Some(header), + body: Some(extrinsics), + justification: signed.justification, + origin: None, + allow_missing_state: false, + import_existing: force, + }], + ); + } + Err(e) => { + warn!("Error reading block data at {}: {}", read_block_count, e); + return std::task::Poll::Ready(Ok(())); + } + } + + read_block_count += 1; + if read_block_count % 1000 == 0 { + info!("#{} blocks were added to the queue", read_block_count); + } + + cx.waker().wake_by_ref(); + return std::task::Poll::Pending; + } + + let blocks_before = link.imported_blocks; + queue.poll_actions(cx, &mut link); + + if link.has_error { + info!( + "Stopping after #{} blocks because of an error", + link.imported_blocks, + ); + return std::task::Poll::Ready(Ok(())); + } + + if link.imported_blocks / 1000 != blocks_before / 1000 { + info!( + "#{} blocks were imported (#{} left)", + link.imported_blocks, + count - link.imported_blocks + ); + } + + if link.imported_blocks >= count { + info!( + "🎉 Imported {} blocks. Best: #{}", + read_block_count, + client.chain_info().best_number + ); + return std::task::Poll::Ready(Ok(())); + } else { + // Polling the import queue will re-schedule the task when ready. + return std::task::Poll::Pending; + } + }); + Box::pin(import) + } + + fn export_blocks( + self, + mut output: impl Write + 'static, + from: NumberFor, + to: Option>, + binary: bool, + ) -> Pin>>> { + let client = self.client; + let mut block = from; + + let last = match to { + Some(v) if v.is_zero() => One::one(), + Some(v) => v, + None => client.chain_info().best_number, + }; + + let mut wrote_header = false; + + // Exporting blocks is implemented as a future, because we want the operation to be + // interruptible. + // + // Every time we write a block to the output, the `Future` re-schedules itself and returns + // `Poll::Pending`. + // This makes it possible either to interleave other operations in-between the block exports, + // or to stop the operation completely. + let export = future::poll_fn(move |cx| { + if last < block { + return std::task::Poll::Ready(Err("Invalid block range specified".into())); + } + + if !wrote_header { + info!("Exporting blocks from #{} to #{}", block, last); + if binary { + let last_: u64 = last.saturated_into::(); + let block_: u64 = block.saturated_into::(); + let len: u64 = last_ - block_ + 1; + output.write_all(&len.encode())?; + } + wrote_header = true; + } + + match client.block(&BlockId::number(block))? { + Some(block) => { + if binary { + output.write_all(&block.encode())?; + } else { + serde_json::to_writer(&mut output, &block) + .map_err(|e| format!("Error writing JSON: {}", e))?; + } + } + // Reached end of the chain. + None => return std::task::Poll::Ready(Ok(())), + } + if (block % 10000.into()).is_zero() { + info!("#{}", block); + } + if block == last { + return std::task::Poll::Ready(Ok(())); + } + block += One::one(); + + // Re-schedule the task in order to continue the operation. + cx.waker().wake_by_ref(); + std::task::Poll::Pending + }); + + Box::pin(export) + } + + fn revert_chain(&self, blocks: NumberFor) -> Result<(), Error> { + let reverted = self.client.revert(blocks)?; + let info = self.client.chain_info(); + + if reverted.is_zero() { + info!("There aren't any non-finalized blocks to revert."); + } else { + info!( + "Reverted {} blocks. Best: #{} ({})", + reverted, info.best_number, info.best_hash + ); + } + Ok(()) + } + + fn check_block( + self, + block_id: BlockId, + ) -> Pin> + Send>> { + match self.client.block(&block_id) { + Ok(Some(block)) => { + let mut buf = Vec::new(); + 1u64.encode_to(&mut buf); + block.encode_to(&mut buf); + let reader = std::io::Cursor::new(buf); + self.import_blocks(reader, true) + } + Ok(None) => Box::pin(future::err("Unknown block".into())), + Err(e) => Box::pin(future::err(format!("Error reading block: {:?}", e).into())), + } + } } diff --git a/client/service/src/config.rs b/client/service/src/config.rs index b90bed723f..146e8b8183 100644 --- a/client/service/src/config.rs +++ b/client/service/src/config.rs @@ -17,138 +17,146 @@ //! Service configuration. pub use sc_client::ExecutionStrategies; -pub use sc_client_db::{Database, PruningMode, DatabaseSettingsSrc as DatabaseConfig}; -pub use sc_network::Multiaddr; -pub use sc_network::config::{ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, Role, NodeKeyConfig}; +pub use sc_client_db::{Database, DatabaseSettingsSrc as DatabaseConfig, PruningMode}; pub use sc_executor::WasmExecutionMethod; +pub use sc_network::config::{ + ExtTransport, MultiaddrWithPeerId, NetworkConfiguration, NodeKeyConfig, Role, +}; +pub use sc_network::Multiaddr; -use std::{future::Future, path::{PathBuf, Path}, pin::Pin, net::SocketAddr, sync::Arc}; -pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; +use prometheus_endpoint::Registry; use sc_chain_spec::ChainSpec; -use sp_core::crypto::Protected; pub use sc_telemetry::TelemetryEndpoints; -use prometheus_endpoint::Registry; +pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; +use sp_core::crypto::Protected; +use std::{ + future::Future, + net::SocketAddr, + path::{Path, PathBuf}, + pin::Pin, + sync::Arc, +}; /// Service configuration. pub struct Configuration { - /// Implementation name - pub impl_name: &'static str, - /// Implementation version (see sc-cli to see an example of format) - pub impl_version: &'static str, - /// Node role. - pub role: Role, - /// How to spawn background tasks. Mandatory, otherwise creating a `Service` will error. - pub task_executor: Arc + Send>>) + Send + Sync>, - /// Extrinsic pool configuration. - pub transaction_pool: TransactionPoolOptions, - /// Network configuration. - pub network: NetworkConfiguration, - /// Configuration for the keystore. - pub keystore: KeystoreConfig, - /// Configuration for the database. - pub database: DatabaseConfig, - /// Size of internal state cache in Bytes - pub state_cache_size: usize, - /// Size in percent of cache size dedicated to child tries - pub state_cache_child_ratio: Option, - /// Pruning settings. - pub pruning: PruningMode, - /// Chain configuration. - pub chain_spec: Box, - /// Wasm execution method. - pub wasm_method: WasmExecutionMethod, - /// Execution strategies. - pub execution_strategies: ExecutionStrategies, - /// RPC over HTTP binding address. `None` if disabled. - pub rpc_http: Option, - /// RPC over Websockets binding address. `None` if disabled. - pub rpc_ws: Option, - /// Maximum number of connections for WebSockets RPC server. `None` if default. - pub rpc_ws_max_connections: Option, - /// CORS settings for HTTP & WS servers. `None` if all origins are allowed. - pub rpc_cors: Option>, - /// Prometheus endpoint configuration. `None` if disabled. - pub prometheus_config: Option, - /// Telemetry service URL. `None` if disabled. - pub telemetry_endpoints: Option, - /// External WASM transport for the telemetry. If `Some`, when connection to a telemetry - /// endpoint, this transport will be tried in priority before all others. - pub telemetry_external_transport: Option, - /// The default number of 64KB pages to allocate for Wasm execution - pub default_heap_pages: Option, - /// Should offchain workers be executed. - pub offchain_worker: bool, - /// Enable authoring even when offline. - pub force_authoring: bool, - /// Disable GRANDPA when running in validator mode - pub disable_grandpa: bool, - /// Development key seed. - /// - /// When running in development mode, the seed will be used to generate authority keys by the keystore. - /// - /// Should only be set when `node` is running development mode. - pub dev_key_seed: Option, - /// Tracing targets - pub tracing_targets: Option, - /// Tracing receiver - pub tracing_receiver: sc_tracing::TracingReceiver, - /// The size of the instances cache. - /// - /// The default value is 8. - pub max_runtime_instances: usize, - /// Announce block automatically after they have been imported - pub announce_block: bool, + /// Implementation name + pub impl_name: &'static str, + /// Implementation version (see sc-cli to see an example of format) + pub impl_version: &'static str, + /// Node role. + pub role: Role, + /// How to spawn background tasks. Mandatory, otherwise creating a `Service` will error. + pub task_executor: Arc + Send>>) + Send + Sync>, + /// Extrinsic pool configuration. + pub transaction_pool: TransactionPoolOptions, + /// Network configuration. + pub network: NetworkConfiguration, + /// Configuration for the keystore. + pub keystore: KeystoreConfig, + /// Configuration for the database. + pub database: DatabaseConfig, + /// Size of internal state cache in Bytes + pub state_cache_size: usize, + /// Size in percent of cache size dedicated to child tries + pub state_cache_child_ratio: Option, + /// Pruning settings. + pub pruning: PruningMode, + /// Chain configuration. + pub chain_spec: Box, + /// Wasm execution method. + pub wasm_method: WasmExecutionMethod, + /// Execution strategies. + pub execution_strategies: ExecutionStrategies, + /// RPC over HTTP binding address. `None` if disabled. + pub rpc_http: Option, + /// RPC over Websockets binding address. `None` if disabled. + pub rpc_ws: Option, + /// Maximum number of connections for WebSockets RPC server. `None` if default. + pub rpc_ws_max_connections: Option, + /// CORS settings for HTTP & WS servers. `None` if all origins are allowed. + pub rpc_cors: Option>, + /// Prometheus endpoint configuration. `None` if disabled. + pub prometheus_config: Option, + /// Telemetry service URL. `None` if disabled. + pub telemetry_endpoints: Option, + /// External WASM transport for the telemetry. If `Some`, when connection to a telemetry + /// endpoint, this transport will be tried in priority before all others. + pub telemetry_external_transport: Option, + /// The default number of 64KB pages to allocate for Wasm execution + pub default_heap_pages: Option, + /// Should offchain workers be executed. + pub offchain_worker: bool, + /// Enable authoring even when offline. + pub force_authoring: bool, + /// Disable GRANDPA when running in validator mode + pub disable_grandpa: bool, + /// Development key seed. + /// + /// When running in development mode, the seed will be used to generate authority keys by the keystore. + /// + /// Should only be set when `node` is running development mode. + pub dev_key_seed: Option, + /// Tracing targets + pub tracing_targets: Option, + /// Tracing receiver + pub tracing_receiver: sc_tracing::TracingReceiver, + /// The size of the instances cache. + /// + /// The default value is 8. + pub max_runtime_instances: usize, + /// Announce block automatically after they have been imported + pub announce_block: bool, } /// Configuration of the client keystore. #[derive(Clone)] pub enum KeystoreConfig { - /// Keystore at a path on-disk. Recommended for native nodes. - Path { - /// The path of the keystore. - path: PathBuf, - /// Node keystore's password. - password: Option> - }, - /// In-memory keystore. Recommended for in-browser nodes. - InMemory, + /// Keystore at a path on-disk. Recommended for native nodes. + Path { + /// The path of the keystore. + path: PathBuf, + /// Node keystore's password. + password: Option>, + }, + /// In-memory keystore. Recommended for in-browser nodes. + InMemory, } impl KeystoreConfig { - /// Returns the path for the keystore. - pub fn path(&self) -> Option<&Path> { - match self { - Self::Path { path, .. } => Some(path), - Self::InMemory => None, - } - } + /// Returns the path for the keystore. + pub fn path(&self) -> Option<&Path> { + match self { + Self::Path { path, .. } => Some(path), + Self::InMemory => None, + } + } } /// Configuration of the Prometheus endpoint. #[derive(Clone)] pub struct PrometheusConfig { - /// Port to use. - pub port: SocketAddr, - /// A metrics registry to use. Useful for setting the metric prefix. - pub registry: Registry, + /// Port to use. + pub port: SocketAddr, + /// A metrics registry to use. Useful for setting the metric prefix. + pub registry: Registry, } impl PrometheusConfig { - /// Create a new config using the default registry. - /// - /// The default registry prefixes metrics with `substrate`. - pub fn new_with_default_registry(port: SocketAddr) -> Self { - Self { - port, - registry: Registry::new_custom(Some("substrate".into()), None) - .expect("this can only fail if the prefix is empty") - } - } + /// Create a new config using the default registry. + /// + /// The default registry prefixes metrics with `substrate`. + pub fn new_with_default_registry(port: SocketAddr) -> Self { + Self { + port, + registry: Registry::new_custom(Some("substrate".into()), None) + .expect("this can only fail if the prefix is empty"), + } + } } impl Configuration { - /// Returns a string displaying the node role. - pub fn display_role(&self) -> String { - self.role.to_string() - } + /// Returns a string displaying the node role. + pub fn display_role(&self) -> String { + self.role.to_string() + } } diff --git a/client/service/src/error.rs b/client/service/src/error.rs index 5a78a18789..fbbd5a3e69 100644 --- a/client/service/src/error.rs +++ b/client/service/src/error.rs @@ -16,10 +16,10 @@ //! Errors that can occur during the service operation. -use sc_network; use sc_keystore; -use sp_consensus; +use sc_network; use sp_blockchain; +use sp_consensus; /// Service Result typedef. pub type Result = std::result::Result; @@ -27,47 +27,47 @@ pub type Result = std::result::Result; /// Service errors. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Client error. - Client(sp_blockchain::Error), - /// IO error. - Io(std::io::Error), - /// Consensus error. - Consensus(sp_consensus::Error), - /// Network error. - Network(sc_network::error::Error), - /// Keystore error. - Keystore(sc_keystore::Error), - /// Best chain selection strategy is missing. - #[display(fmt="Best chain selection strategy (SelectChain) is not provided.")] - SelectChainRequired, - /// Tasks executor is missing. - #[display(fmt="Tasks executor hasn't been provided.")] - TaskExecutorRequired, - /// Other error. - Other(String), + /// Client error. + Client(sp_blockchain::Error), + /// IO error. + Io(std::io::Error), + /// Consensus error. + Consensus(sp_consensus::Error), + /// Network error. + Network(sc_network::error::Error), + /// Keystore error. + Keystore(sc_keystore::Error), + /// Best chain selection strategy is missing. + #[display(fmt = "Best chain selection strategy (SelectChain) is not provided.")] + SelectChainRequired, + /// Tasks executor is missing. + #[display(fmt = "Tasks executor hasn't been provided.")] + TaskExecutorRequired, + /// Other error. + Other(String), } impl<'a> From<&'a str> for Error { - fn from(s: &'a str) -> Self { - Error::Other(s.into()) - } + fn from(s: &'a str) -> Self { + Error::Other(s.into()) + } } impl From for Error { - fn from(e: prometheus_endpoint::PrometheusError) -> Self { - Error::Other(format!("Prometheus error: {}", e)) - } + fn from(e: prometheus_endpoint::PrometheusError) -> Self { + Error::Other(format!("Prometheus error: {}", e)) + } } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Client(ref err) => Some(err), - Error::Io(ref err) => Some(err), - Error::Consensus(ref err) => Some(err), - Error::Network(ref err) => Some(err), - Error::Keystore(ref err) => Some(err), - _ => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Client(ref err) => Some(err), + Error::Io(ref err) => Some(err), + Error::Consensus(ref err) => Some(err), + Error::Network(ref err) => Some(err), + Error::Keystore(ref err) => Some(err), + _ => None, + } + } } diff --git a/client/service/src/lib.rs b/client/service/src/lib.rs index 97481fcc25..93a60821c6 100644 --- a/client/service/src/lib.rs +++ b/client/service/src/lib.rs @@ -24,58 +24,57 @@ pub mod config; pub mod chain_ops; pub mod error; -mod metrics; mod builder; +mod metrics; mod status_sinks; mod task_manager; -use std::{io, pin::Pin}; +use parking_lot::Mutex; +use std::collections::HashMap; use std::marker::PhantomData; use std::net::SocketAddr; -use std::collections::HashMap; +use std::task::{Context, Poll}; use std::time::Duration; +use std::{io, pin::Pin}; use wasm_timer::Instant; -use std::task::{Poll, Context}; -use parking_lot::Mutex; -use sc_client::Client; +use codec::{Decode, Encode}; use futures::{ - Future, FutureExt, Stream, StreamExt, - compat::*, - sink::SinkExt, - task::{Spawn, FutureObj, SpawnError}, + compat::*, + sink::SinkExt, + task::{FutureObj, Spawn, SpawnError}, + Future, FutureExt, Stream, StreamExt, }; -use sc_network::{NetworkService, network_state::NetworkState, PeerId, ReportHandle}; -use log::{log, warn, debug, error, Level}; -use codec::{Encode, Decode}; -use sp_runtime::generic::BlockId; -use sp_runtime::traits::{NumberFor, Block as BlockT}; +use log::{debug, error, log, warn, Level}; use parity_util_mem::MallocSizeOf; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use sc_client::Client; +use sc_network::{network_state::NetworkState, NetworkService, PeerId, ReportHandle}; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, NumberFor}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; -pub use self::error::Error; pub use self::builder::{ - new_full_client, - ServiceBuilder, ServiceBuilderCommand, TFullClient, TLightClient, TFullBackend, TLightBackend, - TFullCallExecutor, TLightCallExecutor, + new_full_client, ServiceBuilder, ServiceBuilderCommand, TFullBackend, TFullCallExecutor, + TFullClient, TLightBackend, TLightCallExecutor, TLightClient, }; -pub use config::{Configuration, Role, PruningMode, DatabaseConfig}; +pub use self::error::Error; +pub use config::{Configuration, DatabaseConfig, PruningMode, Role}; pub use sc_chain_spec::{ - ChainSpec, GenericChainSpec, Properties, RuntimeGenesis, Extension as ChainSpecExtension, - NoExtension, ChainType, + ChainSpec, ChainType, Extension as ChainSpecExtension, GenericChainSpec, NoExtension, + Properties, RuntimeGenesis, }; -pub use sp_transaction_pool::{TransactionPool, InPoolTransaction, error::IntoPoolError}; -pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; pub use sc_client::FinalityNotifications; -pub use sc_rpc::Metadata as RpcMetadata; pub use sc_executor::NativeExecutionDispatch; #[doc(hidden)] -pub use std::{ops::Deref, result::Result, sync::Arc}; -#[doc(hidden)] -pub use sc_network::config::{FinalityProofProvider, OnDemand, BoxFinalityProofRequestBuilder}; +pub use sc_network::config::{BoxFinalityProofRequestBuilder, FinalityProofProvider, OnDemand}; +pub use sc_rpc::Metadata as RpcMetadata; pub use sc_tracing::TracingReceiver; -pub use task_manager::{TaskManagerBuilder, SpawnTaskHandle}; +pub use sc_transaction_pool::txpool::Options as TransactionPoolOptions; +pub use sp_transaction_pool::{error::IntoPoolError, InPoolTransaction, TransactionPool}; +#[doc(hidden)] +pub use std::{ops::Deref, result::Result, sync::Arc}; use task_manager::TaskManager; +pub use task_manager::{SpawnTaskHandle, TaskManagerBuilder}; const DEFAULT_PROTOCOL_ID: &str = "sup"; @@ -91,636 +90,695 @@ impl MallocSizeOfWasm for T {} /// Substrate service. pub struct Service { - client: Arc, - task_manager: TaskManager, - select_chain: Option, - network: Arc, - /// Sinks to propagate network status updates. - /// For each element, every time the `Interval` fires we push an element on the sender. - network_status_sinks: Arc>>, - transaction_pool: Arc, - /// Send a signal when a spawned essential task has concluded. The next time - /// the service future is polled it should complete with an error. - essential_failed_tx: TracingUnboundedSender<()>, - /// A receiver for spawned essential-tasks concluding. - essential_failed_rx: TracingUnboundedReceiver<()>, - rpc_handlers: sc_rpc_server::RpcHandler, - _rpc: Box, - _telemetry: Option, - _telemetry_on_connect_sinks: Arc>>>, - _offchain_workers: Option>, - keystore: sc_keystore::KeyStorePtr, - marker: PhantomData, - prometheus_registry: Option, + client: Arc, + task_manager: TaskManager, + select_chain: Option, + network: Arc, + /// Sinks to propagate network status updates. + /// For each element, every time the `Interval` fires we push an element on the sender. + network_status_sinks: Arc>>, + transaction_pool: Arc, + /// Send a signal when a spawned essential task has concluded. The next time + /// the service future is polled it should complete with an error. + essential_failed_tx: TracingUnboundedSender<()>, + /// A receiver for spawned essential-tasks concluding. + essential_failed_rx: TracingUnboundedReceiver<()>, + rpc_handlers: sc_rpc_server::RpcHandler, + _rpc: Box, + _telemetry: Option, + _telemetry_on_connect_sinks: Arc>>>, + _offchain_workers: Option>, + keystore: sc_keystore::KeyStorePtr, + marker: PhantomData, + prometheus_registry: Option, } -impl Unpin for Service {} +impl Unpin + for Service +{ +} /// Abstraction over a Substrate service. -pub trait AbstractService: 'static + Future> + - Spawn + Send + Unpin { - /// Type of block of this chain. - type Block: BlockT; - /// Backend storage for the client. - type Backend: 'static + sc_client_api::backend::Backend; - /// How to execute calls towards the runtime. - type CallExecutor: 'static + sc_client::CallExecutor + Send + Sync + Clone; - /// API that the runtime provides. - type RuntimeApi: Send + Sync; - /// Chain selection algorithm. - type SelectChain: sp_consensus::SelectChain; - /// Transaction pool. - type TransactionPool: TransactionPool + MallocSizeOfWasm; - - /// Get event stream for telemetry connection established events. - fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()>; - - /// return a shared instance of Telemetry (if enabled) - fn telemetry(&self) -> Option; - - /// Spawns a task in the background that runs the future passed as parameter. - /// - /// Information about this task will be reported to Prometheus. - /// - /// The task name is a `&'static str` as opposed to a `String`. The reason for that is that - /// in order to avoid memory consumption issues with the Prometheus metrics, the set of - /// possible task names has to be bounded. - fn spawn_task(&self, name: &'static str, task: impl Future + Send + 'static); - - /// Spawns a task in the background that runs the future passed as - /// parameter. The given task is considered essential, i.e. if it errors we - /// trigger a service exit. - fn spawn_essential_task(&self, name: &'static str, task: impl Future + Send + 'static); - - /// Returns a handle for spawning tasks. - fn spawn_task_handle(&self) -> SpawnTaskHandle; - - /// Returns the keystore that stores keys. - fn keystore(&self) -> sc_keystore::KeyStorePtr; - - /// Starts an RPC query. - /// - /// The query is passed as a string and must be a JSON text similar to what an HTTP client - /// would for example send. - /// - /// Returns a `Future` that contains the optional response. - /// - /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to - /// send back spontaneous events. - fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>>; - - /// Get shared client instance. - fn client(&self) -> Arc>; - - /// Get clone of select chain. - fn select_chain(&self) -> Option; - - /// Get shared network instance. - fn network(&self) - -> Arc::Hash>>; - - /// Returns a receiver that periodically receives a status of the network. - fn network_status(&self, interval: Duration) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)>; - - /// Get shared transaction pool instance. - fn transaction_pool(&self) -> Arc; - - /// Get a handle to a future that will resolve on exit. - #[deprecated(note = "Use `spawn_task`/`spawn_essential_task` instead, those functions will attach on_exit signal.")] - fn on_exit(&self) -> ::exit_future::Exit; - - /// Get the prometheus metrics registry, if available. - fn prometheus_registry(&self) -> Option; +pub trait AbstractService: + 'static + Future> + Spawn + Send + Unpin +{ + /// Type of block of this chain. + type Block: BlockT; + /// Backend storage for the client. + type Backend: 'static + sc_client_api::backend::Backend; + /// How to execute calls towards the runtime. + type CallExecutor: 'static + sc_client::CallExecutor + Send + Sync + Clone; + /// API that the runtime provides. + type RuntimeApi: Send + Sync; + /// Chain selection algorithm. + type SelectChain: sp_consensus::SelectChain; + /// Transaction pool. + type TransactionPool: TransactionPool + MallocSizeOfWasm; + + /// Get event stream for telemetry connection established events. + fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()>; + + /// return a shared instance of Telemetry (if enabled) + fn telemetry(&self) -> Option; + + /// Spawns a task in the background that runs the future passed as parameter. + /// + /// Information about this task will be reported to Prometheus. + /// + /// The task name is a `&'static str` as opposed to a `String`. The reason for that is that + /// in order to avoid memory consumption issues with the Prometheus metrics, the set of + /// possible task names has to be bounded. + fn spawn_task(&self, name: &'static str, task: impl Future + Send + 'static); + + /// Spawns a task in the background that runs the future passed as + /// parameter. The given task is considered essential, i.e. if it errors we + /// trigger a service exit. + fn spawn_essential_task( + &self, + name: &'static str, + task: impl Future + Send + 'static, + ); + + /// Returns a handle for spawning tasks. + fn spawn_task_handle(&self) -> SpawnTaskHandle; + + /// Returns the keystore that stores keys. + fn keystore(&self) -> sc_keystore::KeyStorePtr; + + /// Starts an RPC query. + /// + /// The query is passed as a string and must be a JSON text similar to what an HTTP client + /// would for example send. + /// + /// Returns a `Future` that contains the optional response. + /// + /// If the request subscribes you to events, the `Sender` in the `RpcSession` object is used to + /// send back spontaneous events. + fn rpc_query( + &self, + mem: &RpcSession, + request: &str, + ) -> Pin> + Send>>; + + /// Get shared client instance. + fn client( + &self, + ) -> Arc>; + + /// Get clone of select chain. + fn select_chain(&self) -> Option; + + /// Get shared network instance. + fn network(&self) -> Arc::Hash>>; + + /// Returns a receiver that periodically receives a status of the network. + fn network_status( + &self, + interval: Duration, + ) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)>; + + /// Get shared transaction pool instance. + fn transaction_pool(&self) -> Arc; + + /// Get a handle to a future that will resolve on exit. + #[deprecated( + note = "Use `spawn_task`/`spawn_essential_task` instead, those functions will attach on_exit signal." + )] + fn on_exit(&self) -> ::exit_future::Exit; + + /// Get the prometheus metrics registry, if available. + fn prometheus_registry(&self) -> Option; } -impl AbstractService for - Service, TSc, NetworkStatus, - NetworkService, TExPool, TOc> +impl AbstractService + for Service< + TBl, + Client, + TSc, + NetworkStatus, + NetworkService, + TExPool, + TOc, + > where - TBl: BlockT, - TBackend: 'static + sc_client_api::backend::Backend, - TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, - TRtApi: 'static + Send + Sync, - TSc: sp_consensus::SelectChain + 'static + Clone + Send + Unpin, - TExPool: 'static + TransactionPool + MallocSizeOfWasm, - TOc: 'static + Send + Sync, + TBl: BlockT, + TBackend: 'static + sc_client_api::backend::Backend, + TExec: 'static + sc_client::CallExecutor + Send + Sync + Clone, + TRtApi: 'static + Send + Sync, + TSc: sp_consensus::SelectChain + 'static + Clone + Send + Unpin, + TExPool: 'static + TransactionPool + MallocSizeOfWasm, + TOc: 'static + Send + Sync, { - type Block = TBl; - type Backend = TBackend; - type CallExecutor = TExec; - type RuntimeApi = TRtApi; - type SelectChain = TSc; - type TransactionPool = TExPool; - - fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()> { - let (sink, stream) = tracing_unbounded("mpsc_telemetry_on_connect"); - self._telemetry_on_connect_sinks.lock().push(sink); - stream - } - - fn telemetry(&self) -> Option { - self._telemetry.as_ref().map(|t| t.clone()) - } - - fn keystore(&self) -> sc_keystore::KeyStorePtr { - self.keystore.clone() - } - - fn spawn_task(&self, name: &'static str, task: impl Future + Send + 'static) { - self.task_manager.spawn(name, task) - } - - fn spawn_essential_task(&self, name: &'static str, task: impl Future + Send + 'static) { - let mut essential_failed = self.essential_failed_tx.clone(); - let essential_task = std::panic::AssertUnwindSafe(task) - .catch_unwind() - .map(move |_| { - error!("Essential task failed. Shutting down service."); - let _ = essential_failed.send(()); - }); - - let _ = self.spawn_task(name, essential_task); - } - - fn spawn_task_handle(&self) -> SpawnTaskHandle { - self.task_manager.spawn_handle() - } - - fn rpc_query(&self, mem: &RpcSession, request: &str) -> Pin> + Send>> { - Box::pin( - self.rpc_handlers.handle_request(request, mem.metadata.clone()) - .compat() - .map(|res| res.expect("this should never fail")) - ) - } - - fn client(&self) -> Arc> { - self.client.clone() - } - - fn select_chain(&self) -> Option { - self.select_chain.clone() - } - - fn network(&self) - -> Arc::Hash>> - { - self.network.clone() - } - - fn network_status(&self, interval: Duration) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)> { - let (sink, stream) = tracing_unbounded("mpsc_network_status"); - self.network_status_sinks.lock().push(interval, sink); - stream - } - - fn transaction_pool(&self) -> Arc { - self.transaction_pool.clone() - } - - fn on_exit(&self) -> exit_future::Exit { - self.task_manager.on_exit() - } - - fn prometheus_registry(&self) -> Option { - self.prometheus_registry.clone() - } + type Block = TBl; + type Backend = TBackend; + type CallExecutor = TExec; + type RuntimeApi = TRtApi; + type SelectChain = TSc; + type TransactionPool = TExPool; + + fn telemetry_on_connect_stream(&self) -> TracingUnboundedReceiver<()> { + let (sink, stream) = tracing_unbounded("mpsc_telemetry_on_connect"); + self._telemetry_on_connect_sinks.lock().push(sink); + stream + } + + fn telemetry(&self) -> Option { + self._telemetry.as_ref().map(|t| t.clone()) + } + + fn keystore(&self) -> sc_keystore::KeyStorePtr { + self.keystore.clone() + } + + fn spawn_task(&self, name: &'static str, task: impl Future + Send + 'static) { + self.task_manager.spawn(name, task) + } + + fn spawn_essential_task( + &self, + name: &'static str, + task: impl Future + Send + 'static, + ) { + let mut essential_failed = self.essential_failed_tx.clone(); + let essential_task = std::panic::AssertUnwindSafe(task) + .catch_unwind() + .map(move |_| { + error!("Essential task failed. Shutting down service."); + let _ = essential_failed.send(()); + }); + + let _ = self.spawn_task(name, essential_task); + } + + fn spawn_task_handle(&self) -> SpawnTaskHandle { + self.task_manager.spawn_handle() + } + + fn rpc_query( + &self, + mem: &RpcSession, + request: &str, + ) -> Pin> + Send>> { + Box::pin( + self.rpc_handlers + .handle_request(request, mem.metadata.clone()) + .compat() + .map(|res| res.expect("this should never fail")), + ) + } + + fn client( + &self, + ) -> Arc> + { + self.client.clone() + } + + fn select_chain(&self) -> Option { + self.select_chain.clone() + } + + fn network(&self) -> Arc::Hash>> { + self.network.clone() + } + + fn network_status( + &self, + interval: Duration, + ) -> TracingUnboundedReceiver<(NetworkStatus, NetworkState)> { + let (sink, stream) = tracing_unbounded("mpsc_network_status"); + self.network_status_sinks.lock().push(interval, sink); + stream + } + + fn transaction_pool(&self) -> Arc { + self.transaction_pool.clone() + } + + fn on_exit(&self) -> exit_future::Exit { + self.task_manager.on_exit() + } + + fn prometheus_registry(&self) -> Option { + self.prometheus_registry.clone() + } } -impl Future for - Service +impl Future + for Service { - type Output = Result<(), Error>; + type Output = Result<(), Error>; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = Pin::into_inner(self); + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = Pin::into_inner(self); - match Pin::new(&mut this.essential_failed_rx).poll_next(cx) { - Poll::Pending => {}, - Poll::Ready(_) => { - // Ready(None) should not be possible since we hold a live - // sender. - return Poll::Ready(Err(Error::Other("Essential task failed.".into()))); - } - } + match Pin::new(&mut this.essential_failed_rx).poll_next(cx) { + Poll::Pending => {} + Poll::Ready(_) => { + // Ready(None) should not be possible since we hold a live + // sender. + return Poll::Ready(Err(Error::Other("Essential task failed.".into()))); + } + } - this.task_manager.process_receiver(cx); + this.task_manager.process_receiver(cx); - // The service future never ends. - Poll::Pending - } + // The service future never ends. + Poll::Pending + } } -impl Spawn for - Service +impl Spawn + for Service { - fn spawn_obj( - &self, - future: FutureObj<'static, ()> - ) -> Result<(), SpawnError> { - self.task_manager.spawn_handle().spawn("unnamed", future); - Ok(()) - } + fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { + self.task_manager.spawn_handle().spawn("unnamed", future); + Ok(()) + } } /// Builds a never-ending future that continuously polls the network. /// /// The `status_sink` contain a list of senders to send a periodic network status to. -fn build_network_future< - B: BlockT, - C: sc_client::BlockchainEvents, - H: sc_network::ExHashT -> ( - role: Role, - mut network: sc_network::NetworkWorker, - client: Arc, - status_sinks: Arc, NetworkState)>>>, - mut rpc_rx: TracingUnboundedReceiver>, - should_have_peers: bool, - announce_imported_blocks: bool, +fn build_network_future, H: sc_network::ExHashT>( + role: Role, + mut network: sc_network::NetworkWorker, + client: Arc, + status_sinks: Arc, NetworkState)>>>, + mut rpc_rx: TracingUnboundedReceiver>, + should_have_peers: bool, + announce_imported_blocks: bool, ) -> impl Future { - let mut imported_blocks_stream = client.import_notification_stream().fuse(); - let mut finality_notification_stream = client.finality_notification_stream().fuse(); - - futures::future::poll_fn(move |cx| { - let before_polling = Instant::now(); - - // We poll `imported_blocks_stream`. - while let Poll::Ready(Some(notification)) = Pin::new(&mut imported_blocks_stream).poll_next(cx) { - network.on_block_imported(notification.header, notification.is_new_best); - - if announce_imported_blocks { - network.service().announce_block(notification.hash, Vec::new()); - } - } - - // We poll `finality_notification_stream`, but we only take the last event. - let mut last = None; - while let Poll::Ready(Some(item)) = Pin::new(&mut finality_notification_stream).poll_next(cx) { - last = Some(item); - } - if let Some(notification) = last { - network.on_block_finalized(notification.hash, notification.header); - } - - // Poll the RPC requests and answer them. - while let Poll::Ready(Some(request)) = Pin::new(&mut rpc_rx).poll_next(cx) { - match request { - sc_rpc::system::Request::Health(sender) => { - let _ = sender.send(sc_rpc::system::Health { - peers: network.peers_debug_info().len(), - is_syncing: network.service().is_major_syncing(), - should_have_peers, - }); - }, - sc_rpc::system::Request::LocalPeerId(sender) => { - let _ = sender.send(network.local_peer_id().to_base58()); - }, - sc_rpc::system::Request::LocalListenAddresses(sender) => { - let peer_id = network.local_peer_id().clone().into(); - let p2p_proto_suffix = sc_network::multiaddr::Protocol::P2p(peer_id); - let addresses = network.listen_addresses() - .map(|addr| addr.clone().with(p2p_proto_suffix.clone()).to_string()) - .collect(); - let _ = sender.send(addresses); - }, - sc_rpc::system::Request::Peers(sender) => { - let _ = sender.send(network.peers_debug_info().into_iter().map(|(peer_id, p)| - sc_rpc::system::PeerInfo { - peer_id: peer_id.to_base58(), - roles: format!("{:?}", p.roles), - protocol_version: p.protocol_version, - best_hash: p.best_hash, - best_number: p.best_number, - } - ).collect()); - } - sc_rpc::system::Request::NetworkState(sender) => { - if let Some(network_state) = serde_json::to_value(&network.network_state()).ok() { - let _ = sender.send(network_state); - } - } - sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { - let x = network.add_reserved_peer(peer_addr) - .map_err(sc_rpc::system::error::Error::MalformattedPeerArg); - let _ = sender.send(x); - } - sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => { - let _ = match peer_id.parse::() { - Ok(peer_id) => { - network.remove_reserved_peer(peer_id); - sender.send(Ok(())) - } - Err(e) => sender.send(Err(sc_rpc::system::error::Error::MalformattedPeerArg( - e.to_string(), - ))), - }; - } - sc_rpc::system::Request::NodeRoles(sender) => { - use sc_rpc::system::NodeRole; - - let node_role = match role { - Role::Authority { .. } => NodeRole::Authority, - Role::Light => NodeRole::LightClient, - Role::Full => NodeRole::Full, - Role::Sentry { .. } => NodeRole::Sentry, - }; - - let _ = sender.send(vec![node_role]); - } - }; - } - - // Interval report for the external API. - status_sinks.lock().poll(cx, || { - let status = NetworkStatus { - sync_state: network.sync_state(), - best_seen_block: network.best_seen_block(), - num_sync_peers: network.num_sync_peers(), - num_connected_peers: network.num_connected_peers(), - num_active_peers: network.num_active_peers(), - average_download_per_sec: network.average_download_per_sec(), - average_upload_per_sec: network.average_upload_per_sec(), - }; - let state = network.network_state(); - (status, state) - }); - - // Main network polling. - if let Poll::Ready(Ok(())) = Pin::new(&mut network).poll(cx).map_err(|err| { - warn!(target: "service", "Error in network: {:?}", err); - }) { - return Poll::Ready(()); - } - - // Now some diagnostic for performances. - let polling_dur = before_polling.elapsed(); - log!( - target: "service", - if polling_dur >= Duration::from_secs(1) { Level::Warn } else { Level::Trace }, - "⚠️ Polling the network future took {:?}", - polling_dur - ); - - Poll::Pending - }) + let mut imported_blocks_stream = client.import_notification_stream().fuse(); + let mut finality_notification_stream = client.finality_notification_stream().fuse(); + + futures::future::poll_fn(move |cx| { + let before_polling = Instant::now(); + + // We poll `imported_blocks_stream`. + while let Poll::Ready(Some(notification)) = + Pin::new(&mut imported_blocks_stream).poll_next(cx) + { + network.on_block_imported(notification.header, notification.is_new_best); + + if announce_imported_blocks { + network + .service() + .announce_block(notification.hash, Vec::new()); + } + } + + // We poll `finality_notification_stream`, but we only take the last event. + let mut last = None; + while let Poll::Ready(Some(item)) = + Pin::new(&mut finality_notification_stream).poll_next(cx) + { + last = Some(item); + } + if let Some(notification) = last { + network.on_block_finalized(notification.hash, notification.header); + } + + // Poll the RPC requests and answer them. + while let Poll::Ready(Some(request)) = Pin::new(&mut rpc_rx).poll_next(cx) { + match request { + sc_rpc::system::Request::Health(sender) => { + let _ = sender.send(sc_rpc::system::Health { + peers: network.peers_debug_info().len(), + is_syncing: network.service().is_major_syncing(), + should_have_peers, + }); + } + sc_rpc::system::Request::LocalPeerId(sender) => { + let _ = sender.send(network.local_peer_id().to_base58()); + } + sc_rpc::system::Request::LocalListenAddresses(sender) => { + let peer_id = network.local_peer_id().clone().into(); + let p2p_proto_suffix = sc_network::multiaddr::Protocol::P2p(peer_id); + let addresses = network + .listen_addresses() + .map(|addr| addr.clone().with(p2p_proto_suffix.clone()).to_string()) + .collect(); + let _ = sender.send(addresses); + } + sc_rpc::system::Request::Peers(sender) => { + let _ = sender.send( + network + .peers_debug_info() + .into_iter() + .map(|(peer_id, p)| sc_rpc::system::PeerInfo { + peer_id: peer_id.to_base58(), + roles: format!("{:?}", p.roles), + protocol_version: p.protocol_version, + best_hash: p.best_hash, + best_number: p.best_number, + }) + .collect(), + ); + } + sc_rpc::system::Request::NetworkState(sender) => { + if let Some(network_state) = serde_json::to_value(&network.network_state()).ok() + { + let _ = sender.send(network_state); + } + } + sc_rpc::system::Request::NetworkAddReservedPeer(peer_addr, sender) => { + let x = network + .add_reserved_peer(peer_addr) + .map_err(sc_rpc::system::error::Error::MalformattedPeerArg); + let _ = sender.send(x); + } + sc_rpc::system::Request::NetworkRemoveReservedPeer(peer_id, sender) => { + let _ = match peer_id.parse::() { + Ok(peer_id) => { + network.remove_reserved_peer(peer_id); + sender.send(Ok(())) + } + Err(e) => sender.send(Err( + sc_rpc::system::error::Error::MalformattedPeerArg(e.to_string()), + )), + }; + } + sc_rpc::system::Request::NodeRoles(sender) => { + use sc_rpc::system::NodeRole; + + let node_role = match role { + Role::Authority { .. } => NodeRole::Authority, + Role::Light => NodeRole::LightClient, + Role::Full => NodeRole::Full, + Role::Sentry { .. } => NodeRole::Sentry, + }; + + let _ = sender.send(vec![node_role]); + } + }; + } + + // Interval report for the external API. + status_sinks.lock().poll(cx, || { + let status = NetworkStatus { + sync_state: network.sync_state(), + best_seen_block: network.best_seen_block(), + num_sync_peers: network.num_sync_peers(), + num_connected_peers: network.num_connected_peers(), + num_active_peers: network.num_active_peers(), + average_download_per_sec: network.average_download_per_sec(), + average_upload_per_sec: network.average_upload_per_sec(), + }; + let state = network.network_state(); + (status, state) + }); + + // Main network polling. + if let Poll::Ready(Ok(())) = Pin::new(&mut network).poll(cx).map_err(|err| { + warn!(target: "service", "Error in network: {:?}", err); + }) { + return Poll::Ready(()); + } + + // Now some diagnostic for performances. + let polling_dur = before_polling.elapsed(); + log!( + target: "service", + if polling_dur >= Duration::from_secs(1) { Level::Warn } else { Level::Trace }, + "⚠️ Polling the network future took {:?}", + polling_dur + ); + + Poll::Pending + }) } /// Overview status of the network. #[derive(Clone)] pub struct NetworkStatus { - /// Current global sync state. - pub sync_state: sc_network::SyncState, - /// Target sync block number. - pub best_seen_block: Option>, - /// Number of peers participating in syncing. - pub num_sync_peers: u32, - /// Total number of connected peers - pub num_connected_peers: usize, - /// Total number of active peers. - pub num_active_peers: usize, - /// Downloaded bytes per second averaged over the past few seconds. - pub average_download_per_sec: u64, - /// Uploaded bytes per second averaged over the past few seconds. - pub average_upload_per_sec: u64, + /// Current global sync state. + pub sync_state: sc_network::SyncState, + /// Target sync block number. + pub best_seen_block: Option>, + /// Number of peers participating in syncing. + pub num_sync_peers: u32, + /// Total number of connected peers + pub num_connected_peers: usize, + /// Total number of active peers. + pub num_active_peers: usize, + /// Downloaded bytes per second averaged over the past few seconds. + pub average_download_per_sec: u64, + /// Uploaded bytes per second averaged over the past few seconds. + pub average_upload_per_sec: u64, } #[cfg(not(target_os = "unknown"))] // Wrapper for HTTP and WS servers that makes sure they are properly shut down. mod waiting { - pub struct HttpServer(pub Option); - impl Drop for HttpServer { - fn drop(&mut self) { - if let Some(server) = self.0.take() { - server.close_handle().close(); - server.wait(); - } - } - } - - pub struct WsServer(pub Option); - impl Drop for WsServer { - fn drop(&mut self) { - if let Some(server) = self.0.take() { - server.close_handle().close(); - let _ = server.wait(); - } - } - } + pub struct HttpServer(pub Option); + impl Drop for HttpServer { + fn drop(&mut self) { + if let Some(server) = self.0.take() { + server.close_handle().close(); + server.wait(); + } + } + } + + pub struct WsServer(pub Option); + impl Drop for WsServer { + fn drop(&mut self) { + if let Some(server) = self.0.take() { + server.close_handle().close(); + let _ = server.wait(); + } + } + } } /// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. #[cfg(not(target_os = "unknown"))] fn start_rpc_servers sc_rpc_server::RpcHandler>( - config: &Configuration, - mut gen_handler: H + config: &Configuration, + mut gen_handler: H, ) -> Result, error::Error> { - fn maybe_start_server(address: Option, mut start: F) -> Result, io::Error> - where F: FnMut(&SocketAddr) -> Result, - { - Ok(match address { - Some(mut address) => Some(start(&address) - .or_else(|e| match e.kind() { - io::ErrorKind::AddrInUse | - io::ErrorKind::PermissionDenied => { - warn!("Unable to bind RPC server to {}. Trying random port.", address); - address.set_port(0); - start(&address) - }, - _ => Err(e), - })?), - None => None, - }) - } - - Ok(Box::new(( - maybe_start_server( - config.rpc_http, - |address| sc_rpc_server::start_http(address, config.rpc_cors.as_ref(), gen_handler()), - )?.map(|s| waiting::HttpServer(Some(s))), - maybe_start_server( - config.rpc_ws, - |address| sc_rpc_server::start_ws( - address, - config.rpc_ws_max_connections, - config.rpc_cors.as_ref(), - gen_handler(), - ), - )?.map(|s| waiting::WsServer(Some(s))).map(Mutex::new), - ))) + fn maybe_start_server( + address: Option, + mut start: F, + ) -> Result, io::Error> + where + F: FnMut(&SocketAddr) -> Result, + { + Ok(match address { + Some(mut address) => Some(start(&address).or_else(|e| match e.kind() { + io::ErrorKind::AddrInUse | io::ErrorKind::PermissionDenied => { + warn!( + "Unable to bind RPC server to {}. Trying random port.", + address + ); + address.set_port(0); + start(&address) + } + _ => Err(e), + })?), + None => None, + }) + } + + Ok(Box::new(( + maybe_start_server(config.rpc_http, |address| { + sc_rpc_server::start_http(address, config.rpc_cors.as_ref(), gen_handler()) + })? + .map(|s| waiting::HttpServer(Some(s))), + maybe_start_server(config.rpc_ws, |address| { + sc_rpc_server::start_ws( + address, + config.rpc_ws_max_connections, + config.rpc_cors.as_ref(), + gen_handler(), + ) + })? + .map(|s| waiting::WsServer(Some(s))) + .map(Mutex::new), + ))) } /// Starts RPC servers that run in their own thread, and returns an opaque object that keeps them alive. #[cfg(target_os = "unknown")] fn start_rpc_servers sc_rpc_server::RpcHandler>( - _: &Configuration, - _: H + _: &Configuration, + _: H, ) -> Result, error::Error> { - Ok(Box::new(())) + Ok(Box::new(())) } /// An RPC session. Used to perform in-memory RPC queries (ie. RPC queries that don't go through /// the HTTP or WebSockets server). #[derive(Clone)] pub struct RpcSession { - metadata: sc_rpc::Metadata, + metadata: sc_rpc::Metadata, } impl RpcSession { - /// Creates an RPC session. - /// - /// The `sender` is stored inside the `RpcSession` and is used to communicate spontaneous JSON - /// messages. - /// - /// The `RpcSession` must be kept alive in order to receive messages on the sender. - pub fn new(sender: futures01::sync::mpsc::Sender) -> RpcSession { - RpcSession { - metadata: sender.into(), - } - } + /// Creates an RPC session. + /// + /// The `sender` is stored inside the `RpcSession` and is used to communicate spontaneous JSON + /// messages. + /// + /// The `RpcSession` must be kept alive in order to receive messages on the sender. + pub fn new(sender: futures01::sync::mpsc::Sender) -> RpcSession { + RpcSession { + metadata: sender.into(), + } + } } /// Transaction pool adapter. pub struct TransactionPoolAdapter { - imports_external_transactions: bool, - pool: Arc

, - client: Arc, - executor: SpawnTaskHandle, + imports_external_transactions: bool, + pool: Arc

, + client: Arc, + executor: SpawnTaskHandle, } /// Get transactions for propagation. /// /// Function extracted to simplify the test and prevent creating `ServiceFactory`. -fn transactions_to_propagate(pool: &Pool) - -> Vec<(H, B::Extrinsic)> +fn transactions_to_propagate(pool: &Pool) -> Vec<(H, B::Extrinsic)> where - Pool: TransactionPool, - B: BlockT, - H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, - E: IntoPoolError + From, + Pool: TransactionPool, + B: BlockT, + H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, + E: IntoPoolError + From, { - pool.ready() - .filter(|t| t.is_propagable()) - .map(|t| { - let hash = t.hash().clone(); - let ex: B::Extrinsic = t.data().clone(); - (hash, ex) - }) - .collect() + pool.ready() + .filter(|t| t.is_propagable()) + .map(|t| { + let hash = t.hash().clone(); + let ex: B::Extrinsic = t.data().clone(); + (hash, ex) + }) + .collect() } -impl sc_network::config::TransactionPool for - TransactionPoolAdapter +impl sc_network::config::TransactionPool for TransactionPoolAdapter where - C: sc_network::config::Client + Send + Sync, - Pool: 'static + TransactionPool, - B: BlockT, - H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, - E: 'static + IntoPoolError + From, + C: sc_network::config::Client + Send + Sync, + Pool: 'static + TransactionPool, + B: BlockT, + H: std::hash::Hash + Eq + sp_runtime::traits::Member + sp_runtime::traits::MaybeSerialize, + E: 'static + IntoPoolError + From, { - fn transactions(&self) -> Vec<(H, B::Extrinsic)> { - transactions_to_propagate(&*self.pool) - } - - fn hash_of(&self, transaction: &B::Extrinsic) -> H { - self.pool.hash_of(transaction) - } - - fn import( - &self, - report_handle: ReportHandle, - who: PeerId, - reputation_change_good: sc_network::ReputationChange, - reputation_change_bad: sc_network::ReputationChange, - transaction: B::Extrinsic - ) { - if !self.imports_external_transactions { - debug!("Transaction rejected"); - return; - } - - let encoded = transaction.encode(); - match Decode::decode(&mut &encoded[..]) { - Ok(uxt) => { - let best_block_id = BlockId::hash(self.client.info().best_hash); - let source = sp_transaction_pool::TransactionSource::External; - let import_future = self.pool.submit_one(&best_block_id, source, uxt); - let import_future = import_future - .map(move |import_result| { - match import_result { - Ok(_) => report_handle.report_peer(who, reputation_change_good), - Err(e) => match e.into_pool_error() { - Ok(sp_transaction_pool::error::Error::AlreadyImported(_)) => (), - Ok(e) => { - report_handle.report_peer(who, reputation_change_bad); - debug!("Error adding transaction to the pool: {:?}", e) - } - Err(e) => debug!("Error converting pool error: {:?}", e), - } - } - }); - - self.executor.spawn("extrinsic-import", import_future); - } - Err(e) => debug!("Error decoding transaction {}", e), - } - } - - fn on_broadcasted(&self, propagations: HashMap>) { - self.pool.on_broadcasted(propagations) - } - - fn transaction(&self, hash: &H) -> Option { - self.pool.ready_transaction(hash) - .and_then( - // Only propagable transactions should be resolved for network service. - |tx| if tx.is_propagable() { Some(tx.data().clone()) } else { None } - ) - } + fn transactions(&self) -> Vec<(H, B::Extrinsic)> { + transactions_to_propagate(&*self.pool) + } + + fn hash_of(&self, transaction: &B::Extrinsic) -> H { + self.pool.hash_of(transaction) + } + + fn import( + &self, + report_handle: ReportHandle, + who: PeerId, + reputation_change_good: sc_network::ReputationChange, + reputation_change_bad: sc_network::ReputationChange, + transaction: B::Extrinsic, + ) { + if !self.imports_external_transactions { + debug!("Transaction rejected"); + return; + } + + let encoded = transaction.encode(); + match Decode::decode(&mut &encoded[..]) { + Ok(uxt) => { + let best_block_id = BlockId::hash(self.client.info().best_hash); + let source = sp_transaction_pool::TransactionSource::External; + let import_future = self.pool.submit_one(&best_block_id, source, uxt); + let import_future = import_future.map(move |import_result| match import_result { + Ok(_) => report_handle.report_peer(who, reputation_change_good), + Err(e) => match e.into_pool_error() { + Ok(sp_transaction_pool::error::Error::AlreadyImported(_)) => (), + Ok(e) => { + report_handle.report_peer(who, reputation_change_bad); + debug!("Error adding transaction to the pool: {:?}", e) + } + Err(e) => debug!("Error converting pool error: {:?}", e), + }, + }); + + self.executor.spawn("extrinsic-import", import_future); + } + Err(e) => debug!("Error decoding transaction {}", e), + } + } + + fn on_broadcasted(&self, propagations: HashMap>) { + self.pool.on_broadcasted(propagations) + } + + fn transaction(&self, hash: &H) -> Option { + self.pool.ready_transaction(hash).and_then( + // Only propagable transactions should be resolved for network service. + |tx| { + if tx.is_propagable() { + Some(tx.data().clone()) + } else { + None + } + }, + ) + } } #[cfg(test)] mod tests { - use super::*; - use futures::executor::block_on; - use sp_consensus::SelectChain; - use sp_runtime::traits::BlindCheckable; - use substrate_test_runtime_client::{prelude::*, runtime::{Extrinsic, Transfer}}; - use sc_transaction_pool::{BasicPool, FullChainApi}; - - #[test] - fn should_not_propagate_transactions_that_are_marked_as_such() { - // given - let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); - let client = Arc::new(client); - let pool = Arc::new(BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0); - let source = sp_runtime::transaction_validity::TransactionSource::External; - let best = longest_chain.best_chain().unwrap(); - let transaction = Transfer { - amount: 5, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }.into_signed_tx(); - block_on(pool.submit_one( - &BlockId::hash(best.hash()), source, transaction.clone()), - ).unwrap(); - block_on(pool.submit_one( - &BlockId::hash(best.hash()), source, Extrinsic::IncludeData(vec![1])), - ).unwrap(); - assert_eq!(pool.status().ready, 2); - - // when - let transactions = transactions_to_propagate(&*pool); - - // then - assert_eq!(transactions.len(), 1); - assert!(transactions[0].1.clone().check().is_ok()); - // this should not panic - let _ = transactions[0].1.transfer(); - } + use super::*; + use futures::executor::block_on; + use sc_transaction_pool::{BasicPool, FullChainApi}; + use sp_consensus::SelectChain; + use sp_runtime::traits::BlindCheckable; + use substrate_test_runtime_client::{ + prelude::*, + runtime::{Extrinsic, Transfer}, + }; + + #[test] + fn should_not_propagate_transactions_that_are_marked_as_such() { + // given + let (client, longest_chain) = TestClientBuilder::new().build_with_longest_chain(); + let client = Arc::new(client); + let pool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ) + .0, + ); + let source = sp_runtime::transaction_validity::TransactionSource::External; + let best = longest_chain.best_chain().unwrap(); + let transaction = Transfer { + amount: 5, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + } + .into_signed_tx(); + block_on(pool.submit_one(&BlockId::hash(best.hash()), source, transaction.clone())) + .unwrap(); + block_on(pool.submit_one( + &BlockId::hash(best.hash()), + source, + Extrinsic::IncludeData(vec![1]), + )) + .unwrap(); + assert_eq!(pool.status().ready, 2); + + // when + let transactions = transactions_to_propagate(&*pool); + + // then + assert_eq!(transactions.len(), 1); + assert!(transactions[0].1.clone().check().is_ok()); + // this should not panic + let _ = transactions[0].1.transfer(); + } } diff --git a/client/service/src/metrics.rs b/client/service/src/metrics.rs index 6b7c32c2d0..beef0cb2a3 100644 --- a/client/service/src/metrics.rs +++ b/client/service/src/metrics.rs @@ -17,10 +17,10 @@ use std::convert::TryFrom; use crate::NetworkStatus; -use prometheus_endpoint::{register, Gauge, U64, F64, Registry, PrometheusError, Opts, GaugeVec}; +use prometheus_endpoint::{register, Gauge, GaugeVec, Opts, PrometheusError, Registry, F64, U64}; use sc_client::ClientInfo; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; -use sp_runtime::traits::{NumberFor, Block, SaturatedConversion, UniqueSaturatedInto}; +use sp_runtime::traits::{Block, NumberFor, SaturatedConversion, UniqueSaturatedInto}; use sp_transaction_pool::PoolStatus; use sp_utils::metrics::register_globals; @@ -28,400 +28,514 @@ use sysinfo::{self, ProcessExt, SystemExt}; #[cfg(not(target_os = "unknown"))] use netstat2::{ - TcpState, ProtocolSocketInfo, iterate_sockets_info, AddressFamilyFlags, ProtocolFlags, + iterate_sockets_info, AddressFamilyFlags, ProtocolFlags, ProtocolSocketInfo, TcpState, }; struct PrometheusMetrics { - // system - #[cfg(any(unix, windows))] - load_avg: GaugeVec, - - // process - cpu_usage_percentage: Gauge, - memory_usage_bytes: Gauge, - threads: Gauge, - open_files: GaugeVec, - - #[cfg(any(unix, windows))] - netstat: GaugeVec, - - // -- inner counters - // generic info - block_height: GaugeVec, - number_leaves: Gauge, - ready_transactions_number: Gauge, - - // I/O - network_per_sec_bytes: GaugeVec, - database_cache: Gauge, - state_cache: Gauge, - state_db: GaugeVec, + // system + #[cfg(any(unix, windows))] + load_avg: GaugeVec, + + // process + cpu_usage_percentage: Gauge, + memory_usage_bytes: Gauge, + threads: Gauge, + open_files: GaugeVec, + + #[cfg(any(unix, windows))] + netstat: GaugeVec, + + // -- inner counters + // generic info + block_height: GaugeVec, + number_leaves: Gauge, + ready_transactions_number: Gauge, + + // I/O + network_per_sec_bytes: GaugeVec, + database_cache: Gauge, + state_cache: Gauge, + state_db: GaugeVec, } impl PrometheusMetrics { - fn setup(registry: &Registry, name: &str, version: &str, roles: u64) - -> Result - { - register(Gauge::::with_opts( - Opts::new( - "build_info", - "A metric with a constant '1' value labeled by name, version" - ) - .const_label("name", name) - .const_label("version", version) - )?, ®istry)?.set(1); - - register(Gauge::::new( - "node_roles", "The roles the node is running as", - )?, ®istry)?.set(roles); - - register_globals(registry)?; - - Ok(Self { - // system - #[cfg(any(unix, windows))] - load_avg: register(GaugeVec::new( - Opts::new("load_avg", "System load average"), - &["over"] - )?, registry)?, - - // process - memory_usage_bytes: register(Gauge::new( - "memory_usage_bytes", "Node memory (resident set size) usage", - )?, registry)?, - - cpu_usage_percentage: register(Gauge::new( - "cpu_usage_percentage", "Node CPU usage", - )?, registry)?, - - #[cfg(any(unix, windows))] - netstat: register(GaugeVec::new( - Opts::new("netstat_tcp", "Current TCP connections "), - &["status"] - )?, registry)?, - - threads: register(Gauge::new( - "threads", "Number of threads used by the process", - )?, registry)?, - - open_files: register(GaugeVec::new( - Opts::new("open_file_handles", "Open file handlers held by the process"), - &["fd_type"] - )?, registry)?, - - // --- internal - - // generic internals - block_height: register(GaugeVec::new( - Opts::new("block_height", "Block height info of the chain"), - &["status"] - )?, registry)?, - - number_leaves: register(Gauge::new( - "number_leaves", "Number of known chain leaves (aka forks)", - )?, registry)?, - - ready_transactions_number: register(Gauge::new( - "ready_transactions_number", "Number of transactions in the ready queue", - )?, registry)?, - - // I/ O - network_per_sec_bytes: register(GaugeVec::new( - Opts::new("network_per_sec_bytes", "Networking bytes per second"), - &["direction"] - )?, registry)?, - database_cache: register(Gauge::new( - "database_cache_bytes", "RocksDB cache size in bytes", - )?, registry)?, - state_cache: register(Gauge::new( - "state_cache_bytes", "State cache size in bytes", - )?, registry)?, - state_db: register(GaugeVec::new( - Opts::new("state_db_cache_bytes", "State DB cache in bytes"), - &["subtype"] - )?, registry)?, - }) - } + fn setup( + registry: &Registry, + name: &str, + version: &str, + roles: u64, + ) -> Result { + register( + Gauge::::with_opts( + Opts::new( + "build_info", + "A metric with a constant '1' value labeled by name, version", + ) + .const_label("name", name) + .const_label("version", version), + )?, + ®istry, + )? + .set(1); + + register( + Gauge::::new("node_roles", "The roles the node is running as")?, + ®istry, + )? + .set(roles); + + register_globals(registry)?; + + Ok(Self { + // system + #[cfg(any(unix, windows))] + load_avg: register( + GaugeVec::new(Opts::new("load_avg", "System load average"), &["over"])?, + registry, + )?, + + // process + memory_usage_bytes: register( + Gauge::new( + "memory_usage_bytes", + "Node memory (resident set size) usage", + )?, + registry, + )?, + + cpu_usage_percentage: register( + Gauge::new("cpu_usage_percentage", "Node CPU usage")?, + registry, + )?, + + #[cfg(any(unix, windows))] + netstat: register( + GaugeVec::new( + Opts::new("netstat_tcp", "Current TCP connections "), + &["status"], + )?, + registry, + )?, + + threads: register( + Gauge::new("threads", "Number of threads used by the process")?, + registry, + )?, + + open_files: register( + GaugeVec::new( + Opts::new( + "open_file_handles", + "Open file handlers held by the process", + ), + &["fd_type"], + )?, + registry, + )?, + + // --- internal + + // generic internals + block_height: register( + GaugeVec::new( + Opts::new("block_height", "Block height info of the chain"), + &["status"], + )?, + registry, + )?, + + number_leaves: register( + Gauge::new("number_leaves", "Number of known chain leaves (aka forks)")?, + registry, + )?, + + ready_transactions_number: register( + Gauge::new( + "ready_transactions_number", + "Number of transactions in the ready queue", + )?, + registry, + )?, + + // I/ O + network_per_sec_bytes: register( + GaugeVec::new( + Opts::new("network_per_sec_bytes", "Networking bytes per second"), + &["direction"], + )?, + registry, + )?, + database_cache: register( + Gauge::new("database_cache_bytes", "RocksDB cache size in bytes")?, + registry, + )?, + state_cache: register( + Gauge::new("state_cache_bytes", "State cache size in bytes")?, + registry, + )?, + state_db: register( + GaugeVec::new( + Opts::new("state_db_cache_bytes", "State DB cache in bytes"), + &["subtype"], + )?, + registry, + )?, + }) + } } #[cfg(any(unix, windows))] #[derive(Default)] struct ConnectionsCount { - listen: u64, - established: u64, - starting: u64, - closing: u64, - closed: u64, - other: u64 + listen: u64, + established: u64, + starting: u64, + closing: u64, + closed: u64, + other: u64, } #[derive(Default)] struct FdCounter { - paths: u64, - sockets: u64, - net: u64, - pipes: u64, - anon_inode: u64, - mem: u64, - other: u64, + paths: u64, + sockets: u64, + net: u64, + pipes: u64, + anon_inode: u64, + mem: u64, + other: u64, } #[derive(Default)] struct ProcessInfo { - cpu_usage: f64, - memory: u64, - threads: Option, - open_fd: Option, + cpu_usage: f64, + memory: u64, + threads: Option, + open_fd: Option, } pub struct MetricsService { - metrics: Option, - #[cfg(not(target_os = "unknown"))] - system: sysinfo::System, - pid: Option, + metrics: Option, + #[cfg(not(target_os = "unknown"))] + system: sysinfo::System, + pid: Option, } #[cfg(target_os = "linux")] impl MetricsService { - fn inner_new(metrics: Option) -> Self { - let process = procfs::process::Process::myself() - .expect("Procfs doesn't fail on unix. qed"); - - Self { - metrics, - system: sysinfo::System::new(), - pid: Some(process.pid), - } - } - - fn process_info(&mut self) -> ProcessInfo { - let pid = self.pid.clone().expect("unix always has a pid. qed"); - let mut info = self.process_info_for(&pid); - let process = procfs::process::Process::new(pid).expect("Our process exists. qed."); - info.threads = process.stat().ok().map(|s| - u64::try_from(s.num_threads).expect("There are no negative thread counts. qed"), - ); - info.open_fd = process.fd().ok().map(|i| - i.into_iter().fold(FdCounter::default(), |mut f, info| { - match info.target { - procfs::process::FDTarget::Path(_) => f.paths += 1, - procfs::process::FDTarget::Socket(_) => f.sockets += 1, - procfs::process::FDTarget::Net(_) => f.net += 1, - procfs::process::FDTarget::Pipe(_) => f.pipes += 1, - procfs::process::FDTarget::AnonInode(_) => f.anon_inode += 1, - procfs::process::FDTarget::MemFD(_) => f.mem += 1, - procfs::process::FDTarget::Other(_,_) => f.other += 1, - }; - f - }) - ); - info - } + fn inner_new(metrics: Option) -> Self { + let process = procfs::process::Process::myself().expect("Procfs doesn't fail on unix. qed"); + + Self { + metrics, + system: sysinfo::System::new(), + pid: Some(process.pid), + } + } + + fn process_info(&mut self) -> ProcessInfo { + let pid = self.pid.clone().expect("unix always has a pid. qed"); + let mut info = self.process_info_for(&pid); + let process = procfs::process::Process::new(pid).expect("Our process exists. qed."); + info.threads = process.stat().ok().map(|s| { + u64::try_from(s.num_threads).expect("There are no negative thread counts. qed") + }); + info.open_fd = process.fd().ok().map(|i| { + i.into_iter().fold(FdCounter::default(), |mut f, info| { + match info.target { + procfs::process::FDTarget::Path(_) => f.paths += 1, + procfs::process::FDTarget::Socket(_) => f.sockets += 1, + procfs::process::FDTarget::Net(_) => f.net += 1, + procfs::process::FDTarget::Pipe(_) => f.pipes += 1, + procfs::process::FDTarget::AnonInode(_) => f.anon_inode += 1, + procfs::process::FDTarget::MemFD(_) => f.mem += 1, + procfs::process::FDTarget::Other(_, _) => f.other += 1, + }; + f + }) + }); + info + } } #[cfg(all(any(unix, windows), not(target_os = "linux")))] impl MetricsService { - fn inner_new(metrics: Option) -> Self { - Self { - metrics, - system: sysinfo::System::new(), - pid: sysinfo::get_current_pid().ok(), - } - } - - fn process_info(&mut self) -> ProcessInfo { - self.pid.map(|pid| self.process_info_for(&pid)).unwrap_or_default() - } + fn inner_new(metrics: Option) -> Self { + Self { + metrics, + system: sysinfo::System::new(), + pid: sysinfo::get_current_pid().ok(), + } + } + + fn process_info(&mut self) -> ProcessInfo { + self.pid + .map(|pid| self.process_info_for(&pid)) + .unwrap_or_default() + } } - #[cfg(target_os = "unknown")] impl MetricsService { - fn inner_new(metrics: Option) -> Self { - Self { - metrics, - pid: None, - } - } - - fn process_info(&mut self) -> ProcessInfo { - ProcessInfo::default() - } -} + fn inner_new(metrics: Option) -> Self { + Self { metrics, pid: None } + } + fn process_info(&mut self) -> ProcessInfo { + ProcessInfo::default() + } +} impl MetricsService { - pub fn with_prometheus(registry: &Registry, name: &str, version: &str, roles: u64) - -> Result - { - PrometheusMetrics::setup(registry, name, version, roles).map(|p| { - Self::inner_new(Some(p)) - }) - } - - pub fn new() -> Self { - Self::inner_new(None) - } - - #[cfg(not(target_os = "unknown"))] - fn process_info_for(&mut self, pid: &sysinfo::Pid) -> ProcessInfo { - let mut info = ProcessInfo::default(); - if self.system.refresh_process(*pid) { - let prc = self.system.get_process(*pid) - .expect("Above refresh_process succeeds, this must be Some(), qed"); - info.cpu_usage = prc.cpu_usage().into(); - info.memory = prc.memory(); - } - info - } - - #[cfg(not(target_os = "unknown"))] - fn connections_info(&self) -> Option { - self.pid.as_ref().and_then(|pid| { - let af_flags = AddressFamilyFlags::IPV4 | AddressFamilyFlags::IPV6; - let proto_flags = ProtocolFlags::TCP; - let netstat_pid = *pid as u32; - - iterate_sockets_info(af_flags, proto_flags).ok().map(|iter| - iter.filter_map(|r| - r.ok().and_then(|s| { - match s.protocol_socket_info { - ProtocolSocketInfo::Tcp(info) - if s.associated_pids.contains(&netstat_pid) => Some(info.state), - _ => None - } - }) - ).fold(ConnectionsCount::default(), |mut counter, socket_state| { - match socket_state { - TcpState::Listen => counter.listen += 1, - TcpState::Established => counter.established += 1, - TcpState::Closed => counter.closed += 1, - TcpState::SynSent | TcpState::SynReceived => counter.starting += 1, - TcpState::FinWait1 | TcpState::FinWait2 | TcpState::CloseWait - | TcpState::Closing | TcpState::LastAck => counter.closing += 1, - _ => counter.other += 1 - } - - counter - }) - ) - }) - } - - pub fn tick( - &mut self, - info: &ClientInfo, - txpool_status: &PoolStatus, - net_status: &NetworkStatus, - ) { - - let best_number = info.chain.best_number.saturated_into::(); - let best_hash = info.chain.best_hash; - let num_peers = net_status.num_connected_peers; - let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); - let bandwidth_download = net_status.average_download_per_sec; - let bandwidth_upload = net_status.average_upload_per_sec; - let best_seen_block = net_status.best_seen_block - .map(|num: NumberFor| num.unique_saturated_into() as u64); - let process_info = self.process_info(); - - telemetry!( - SUBSTRATE_INFO; - "system.interval"; - "peers" => num_peers, - "height" => best_number, - "best" => ?best_hash, - "txcount" => txpool_status.ready, - "cpu" => process_info.cpu_usage, - "memory" => process_info.memory, - "finalized_height" => finalized_number, - "finalized_hash" => ?info.chain.finalized_hash, - "bandwidth_download" => bandwidth_download, - "bandwidth_upload" => bandwidth_upload, - "used_state_cache_size" => info.usage.as_ref() - .map(|usage| usage.memory.state_cache.as_bytes()) - .unwrap_or(0), - "used_db_cache_size" => info.usage.as_ref() - .map(|usage| usage.memory.database_cache.as_bytes()) - .unwrap_or(0), - "disk_read_per_sec" => info.usage.as_ref() - .map(|usage| usage.io.bytes_read) - .unwrap_or(0), - "disk_write_per_sec" => info.usage.as_ref() - .map(|usage| usage.io.bytes_written) - .unwrap_or(0), - ); - - if let Some(metrics) = self.metrics.as_ref() { - metrics.cpu_usage_percentage.set(process_info.cpu_usage as f64); - // `sysinfo::Process::memory` returns memory usage in KiB and not bytes. - metrics.memory_usage_bytes.set(process_info.memory * 1024); - - if let Some(threads) = process_info.threads { - metrics.threads.set(threads); - } - - if let Some(fd_info) = process_info.open_fd { - metrics.open_files.with_label_values(&["paths"]).set(fd_info.paths); - metrics.open_files.with_label_values(&["mem"]).set(fd_info.mem); - metrics.open_files.with_label_values(&["sockets"]).set(fd_info.sockets); - metrics.open_files.with_label_values(&["net"]).set(fd_info.net); - metrics.open_files.with_label_values(&["pipe"]).set(fd_info.pipes); - metrics.open_files.with_label_values(&["anon_inode"]).set(fd_info.anon_inode); - metrics.open_files.with_label_values(&["other"]).set(fd_info.other); - } - - - metrics.network_per_sec_bytes.with_label_values(&["download"]).set( - net_status.average_download_per_sec, - ); - metrics.network_per_sec_bytes.with_label_values(&["upload"]).set( - net_status.average_upload_per_sec, - ); - - metrics.block_height.with_label_values(&["finalized"]).set(finalized_number); - metrics.block_height.with_label_values(&["best"]).set(best_number); - if let Ok(leaves) = u64::try_from(info.chain.number_leaves) { - metrics.number_leaves.set(leaves); - } - - metrics.ready_transactions_number.set(txpool_status.ready as u64); - - if let Some(best_seen_block) = best_seen_block { - metrics.block_height.with_label_values(&["sync_target"]).set(best_seen_block); - } - - if let Some(info) = info.usage.as_ref() { - metrics.database_cache.set(info.memory.database_cache.as_bytes() as u64); - metrics.state_cache.set(info.memory.state_cache.as_bytes() as u64); - - metrics.state_db.with_label_values(&["non_canonical"]).set( - info.memory.state_db.non_canonical.as_bytes() as u64, - ); - if let Some(pruning) = info.memory.state_db.pruning { - metrics.state_db.with_label_values(&["pruning"]).set(pruning.as_bytes() as u64); - } - metrics.state_db.with_label_values(&["pinned"]).set( - info.memory.state_db.pinned.as_bytes() as u64, - ); - } - - #[cfg(not(target_os = "unknown"))] - { - let load = self.system.get_load_average(); - metrics.load_avg.with_label_values(&["1min"]).set(load.one); - metrics.load_avg.with_label_values(&["5min"]).set(load.five); - metrics.load_avg.with_label_values(&["15min"]).set(load.fifteen); - - if let Some(conns) = self.connections_info() { - metrics.netstat.with_label_values(&["listen"]).set(conns.listen); - metrics.netstat.with_label_values(&["established"]).set(conns.established); - metrics.netstat.with_label_values(&["starting"]).set(conns.starting); - metrics.netstat.with_label_values(&["closing"]).set(conns.closing); - metrics.netstat.with_label_values(&["closed"]).set(conns.closed); - metrics.netstat.with_label_values(&["other"]).set(conns.other); - } - } - } - } + pub fn with_prometheus( + registry: &Registry, + name: &str, + version: &str, + roles: u64, + ) -> Result { + PrometheusMetrics::setup(registry, name, version, roles).map(|p| Self::inner_new(Some(p))) + } + + pub fn new() -> Self { + Self::inner_new(None) + } + + #[cfg(not(target_os = "unknown"))] + fn process_info_for(&mut self, pid: &sysinfo::Pid) -> ProcessInfo { + let mut info = ProcessInfo::default(); + if self.system.refresh_process(*pid) { + let prc = self + .system + .get_process(*pid) + .expect("Above refresh_process succeeds, this must be Some(), qed"); + info.cpu_usage = prc.cpu_usage().into(); + info.memory = prc.memory(); + } + info + } + + #[cfg(not(target_os = "unknown"))] + fn connections_info(&self) -> Option { + self.pid.as_ref().and_then(|pid| { + let af_flags = AddressFamilyFlags::IPV4 | AddressFamilyFlags::IPV6; + let proto_flags = ProtocolFlags::TCP; + let netstat_pid = *pid as u32; + + iterate_sockets_info(af_flags, proto_flags) + .ok() + .map(|iter| { + iter.filter_map(|r| { + r.ok().and_then(|s| match s.protocol_socket_info { + ProtocolSocketInfo::Tcp(info) + if s.associated_pids.contains(&netstat_pid) => + { + Some(info.state) + } + _ => None, + }) + }) + .fold( + ConnectionsCount::default(), + |mut counter, socket_state| { + match socket_state { + TcpState::Listen => counter.listen += 1, + TcpState::Established => counter.established += 1, + TcpState::Closed => counter.closed += 1, + TcpState::SynSent | TcpState::SynReceived => counter.starting += 1, + TcpState::FinWait1 + | TcpState::FinWait2 + | TcpState::CloseWait + | TcpState::Closing + | TcpState::LastAck => counter.closing += 1, + _ => counter.other += 1, + } + + counter + }, + ) + }) + }) + } + + pub fn tick( + &mut self, + info: &ClientInfo, + txpool_status: &PoolStatus, + net_status: &NetworkStatus, + ) { + let best_number = info.chain.best_number.saturated_into::(); + let best_hash = info.chain.best_hash; + let num_peers = net_status.num_connected_peers; + let finalized_number: u64 = info.chain.finalized_number.saturated_into::(); + let bandwidth_download = net_status.average_download_per_sec; + let bandwidth_upload = net_status.average_upload_per_sec; + let best_seen_block = net_status + .best_seen_block + .map(|num: NumberFor| num.unique_saturated_into() as u64); + let process_info = self.process_info(); + + telemetry!( + SUBSTRATE_INFO; + "system.interval"; + "peers" => num_peers, + "height" => best_number, + "best" => ?best_hash, + "txcount" => txpool_status.ready, + "cpu" => process_info.cpu_usage, + "memory" => process_info.memory, + "finalized_height" => finalized_number, + "finalized_hash" => ?info.chain.finalized_hash, + "bandwidth_download" => bandwidth_download, + "bandwidth_upload" => bandwidth_upload, + "used_state_cache_size" => info.usage.as_ref() + .map(|usage| usage.memory.state_cache.as_bytes()) + .unwrap_or(0), + "used_db_cache_size" => info.usage.as_ref() + .map(|usage| usage.memory.database_cache.as_bytes()) + .unwrap_or(0), + "disk_read_per_sec" => info.usage.as_ref() + .map(|usage| usage.io.bytes_read) + .unwrap_or(0), + "disk_write_per_sec" => info.usage.as_ref() + .map(|usage| usage.io.bytes_written) + .unwrap_or(0), + ); + + if let Some(metrics) = self.metrics.as_ref() { + metrics + .cpu_usage_percentage + .set(process_info.cpu_usage as f64); + // `sysinfo::Process::memory` returns memory usage in KiB and not bytes. + metrics.memory_usage_bytes.set(process_info.memory * 1024); + + if let Some(threads) = process_info.threads { + metrics.threads.set(threads); + } + + if let Some(fd_info) = process_info.open_fd { + metrics + .open_files + .with_label_values(&["paths"]) + .set(fd_info.paths); + metrics + .open_files + .with_label_values(&["mem"]) + .set(fd_info.mem); + metrics + .open_files + .with_label_values(&["sockets"]) + .set(fd_info.sockets); + metrics + .open_files + .with_label_values(&["net"]) + .set(fd_info.net); + metrics + .open_files + .with_label_values(&["pipe"]) + .set(fd_info.pipes); + metrics + .open_files + .with_label_values(&["anon_inode"]) + .set(fd_info.anon_inode); + metrics + .open_files + .with_label_values(&["other"]) + .set(fd_info.other); + } + + metrics + .network_per_sec_bytes + .with_label_values(&["download"]) + .set(net_status.average_download_per_sec); + metrics + .network_per_sec_bytes + .with_label_values(&["upload"]) + .set(net_status.average_upload_per_sec); + + metrics + .block_height + .with_label_values(&["finalized"]) + .set(finalized_number); + metrics + .block_height + .with_label_values(&["best"]) + .set(best_number); + if let Ok(leaves) = u64::try_from(info.chain.number_leaves) { + metrics.number_leaves.set(leaves); + } + + metrics + .ready_transactions_number + .set(txpool_status.ready as u64); + + if let Some(best_seen_block) = best_seen_block { + metrics + .block_height + .with_label_values(&["sync_target"]) + .set(best_seen_block); + } + + if let Some(info) = info.usage.as_ref() { + metrics + .database_cache + .set(info.memory.database_cache.as_bytes() as u64); + metrics + .state_cache + .set(info.memory.state_cache.as_bytes() as u64); + + metrics + .state_db + .with_label_values(&["non_canonical"]) + .set(info.memory.state_db.non_canonical.as_bytes() as u64); + if let Some(pruning) = info.memory.state_db.pruning { + metrics + .state_db + .with_label_values(&["pruning"]) + .set(pruning.as_bytes() as u64); + } + metrics + .state_db + .with_label_values(&["pinned"]) + .set(info.memory.state_db.pinned.as_bytes() as u64); + } + + #[cfg(not(target_os = "unknown"))] + { + let load = self.system.get_load_average(); + metrics.load_avg.with_label_values(&["1min"]).set(load.one); + metrics.load_avg.with_label_values(&["5min"]).set(load.five); + metrics + .load_avg + .with_label_values(&["15min"]) + .set(load.fifteen); + + if let Some(conns) = self.connections_info() { + metrics + .netstat + .with_label_values(&["listen"]) + .set(conns.listen); + metrics + .netstat + .with_label_values(&["established"]) + .set(conns.established); + metrics + .netstat + .with_label_values(&["starting"]) + .set(conns.starting); + metrics + .netstat + .with_label_values(&["closing"]) + .set(conns.closing); + metrics + .netstat + .with_label_values(&["closed"]) + .set(conns.closed); + metrics + .netstat + .with_label_values(&["other"]) + .set(conns.other); + } + } + } + } } diff --git a/client/service/src/status_sinks.rs b/client/service/src/status_sinks.rs index 4b1dce52f9..979c28e589 100644 --- a/client/service/src/status_sinks.rs +++ b/client/service/src/status_sinks.rs @@ -14,126 +14,130 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use futures::{Stream, stream::futures_unordered::FuturesUnordered}; -use std::time::Duration; -use std::pin::Pin; -use std::task::{Poll, Context}; +use futures::{stream::futures_unordered::FuturesUnordered, Stream}; use futures_timer::Delay; use sp_utils::mpsc::TracingUnboundedSender; +use std::pin::Pin; +use std::task::{Context, Poll}; +use std::time::Duration; /// Holds a list of `UnboundedSender`s, each associated with a certain time period. Every time the /// period elapses, we push an element on the sender. /// /// Senders are removed only when they are closed. pub struct StatusSinks { - entries: FuturesUnordered>, + entries: FuturesUnordered>, } struct YieldAfter { - delay: Delay, - interval: Duration, - sender: Option>, + delay: Delay, + interval: Duration, + sender: Option>, } impl StatusSinks { - /// Builds a new empty collection. - pub fn new() -> StatusSinks { - StatusSinks { - entries: FuturesUnordered::new(), - } - } - - /// Adds a sender to the collection. - /// - /// The `interval` is the time period between two pushes on the sender. - pub fn push(&mut self, interval: Duration, sender: TracingUnboundedSender) { - self.entries.push(YieldAfter { - delay: Delay::new(interval), - interval, - sender: Some(sender), - }) - } - - /// Processes all the senders. If any sender is ready, calls the `status_grab` function and - /// pushes what it returns to the sender. - /// - /// This function doesn't return anything, but it should be treated as if it implicitly - /// returns `Poll::Pending`. In particular, it should be called again when the task - /// is waken up. - /// - /// # Panic - /// - /// Panics if not called within the context of a task. - pub fn poll(&mut self, cx: &mut Context, mut status_grab: impl FnMut() -> T) { - loop { - match Pin::new(&mut self.entries).poll_next(cx) { - Poll::Ready(Some((sender, interval))) => { - let status = status_grab(); - if sender.unbounded_send(status).is_ok() { - self.entries.push(YieldAfter { - // Note that since there's a small delay between the moment a task is - // waken up and the moment it is polled, the period is actually not - // `interval` but `interval + `. We ignore this problem in - // practice. - delay: Delay::new(interval), - interval, - sender: Some(sender), - }); - } - } - Poll::Ready(None) | - Poll::Pending => break, - } - } - } + /// Builds a new empty collection. + pub fn new() -> StatusSinks { + StatusSinks { + entries: FuturesUnordered::new(), + } + } + + /// Adds a sender to the collection. + /// + /// The `interval` is the time period between two pushes on the sender. + pub fn push(&mut self, interval: Duration, sender: TracingUnboundedSender) { + self.entries.push(YieldAfter { + delay: Delay::new(interval), + interval, + sender: Some(sender), + }) + } + + /// Processes all the senders. If any sender is ready, calls the `status_grab` function and + /// pushes what it returns to the sender. + /// + /// This function doesn't return anything, but it should be treated as if it implicitly + /// returns `Poll::Pending`. In particular, it should be called again when the task + /// is waken up. + /// + /// # Panic + /// + /// Panics if not called within the context of a task. + pub fn poll(&mut self, cx: &mut Context, mut status_grab: impl FnMut() -> T) { + loop { + match Pin::new(&mut self.entries).poll_next(cx) { + Poll::Ready(Some((sender, interval))) => { + let status = status_grab(); + if sender.unbounded_send(status).is_ok() { + self.entries.push(YieldAfter { + // Note that since there's a small delay between the moment a task is + // waken up and the moment it is polled, the period is actually not + // `interval` but `interval + `. We ignore this problem in + // practice. + delay: Delay::new(interval), + interval, + sender: Some(sender), + }); + } + } + Poll::Ready(None) | Poll::Pending => break, + } + } + } } impl futures::Future for YieldAfter { - type Output = (TracingUnboundedSender, Duration); - - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = Pin::into_inner(self); - - match Pin::new(&mut this.delay).poll(cx) { - Poll::Pending => Poll::Pending, - Poll::Ready(()) => { - let sender = this.sender.take() - .expect("sender is always Some unless the future is finished; qed"); - Poll::Ready((sender, this.interval)) - } - } - } + type Output = (TracingUnboundedSender, Duration); + + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = Pin::into_inner(self); + + match Pin::new(&mut this.delay).poll(cx) { + Poll::Pending => Poll::Pending, + Poll::Ready(()) => { + let sender = this + .sender + .take() + .expect("sender is always Some unless the future is finished; qed"); + Poll::Ready((sender, this.interval)) + } + } + } } #[cfg(test)] mod tests { - use super::StatusSinks; - use futures::prelude::*; - use futures::channel::mpsc; - use std::time::Duration; - use std::task::Poll; - - #[test] - fn works() { - // We're not testing that the `StatusSink` properly enforces an order in the intervals, as - // this easily causes test failures on busy CPUs. - - let mut status_sinks = StatusSinks::new(); - - let (tx, rx) = mpsc::unbounded(); - status_sinks.push(Duration::from_millis(100), tx); - - let mut val_order = 5; - - futures::executor::block_on(futures::future::select( - futures::future::poll_fn(move |cx| { - status_sinks.poll(cx, || { val_order += 1; val_order }); - Poll::<()>::Pending - }), - Box::pin(async { - let items: Vec = rx.take(3).collect().await; - assert_eq!(items, [6, 7, 8]); - }) - )); - } + use super::StatusSinks; + use futures::channel::mpsc; + use futures::prelude::*; + use std::task::Poll; + use std::time::Duration; + + #[test] + fn works() { + // We're not testing that the `StatusSink` properly enforces an order in the intervals, as + // this easily causes test failures on busy CPUs. + + let mut status_sinks = StatusSinks::new(); + + let (tx, rx) = mpsc::unbounded(); + status_sinks.push(Duration::from_millis(100), tx); + + let mut val_order = 5; + + futures::executor::block_on(futures::future::select( + futures::future::poll_fn(move |cx| { + status_sinks.poll(cx, || { + val_order += 1; + val_order + }); + Poll::<()>::Pending + }), + Box::pin(async { + let items: Vec = rx.take(3).collect().await; + assert_eq!(items, [6, 7, 8]); + }), + )); + } } diff --git a/client/service/src/task_manager.rs b/client/service/src/task_manager.rs index fd7fc62ab5..48f959d651 100644 --- a/client/service/src/task_manager.rs +++ b/client/service/src/task_manager.rs @@ -13,26 +13,26 @@ //! Substrate service tasks management module. -use std::{ - pin::Pin, - result::Result, sync::Arc, - task::{Poll, Context}, -}; use exit_future::Signal; -use log::{debug, error}; use futures::{ - Future, FutureExt, Stream, - future::select, - compat::*, - task::{Spawn, FutureObj, SpawnError}, + compat::*, + future::select, + task::{FutureObj, Spawn, SpawnError}, + Future, FutureExt, Stream, }; +use log::{debug, error}; use prometheus_endpoint::{ - exponential_buckets, register, - PrometheusError, - CounterVec, HistogramOpts, HistogramVec, Opts, Registry, U64 + exponential_buckets, register, CounterVec, HistogramOpts, HistogramVec, Opts, PrometheusError, + Registry, U64, }; use sc_client_api::CloneableSpawn; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; +use std::{ + pin::Pin, + result::Result, + sync::Arc, + task::{Context, Poll}, +}; mod prometheus_future; @@ -44,243 +44,261 @@ pub type TaskScheduler = TracingUnboundedSender /// Helper struct to setup background tasks execution for service. pub struct TaskManagerBuilder { - /// A future that resolves when the service has exited, this is useful to - /// make sure any internally spawned futures stop when the service does. - on_exit: exit_future::Exit, - /// A signal that makes the exit future above resolve, fired on service drop. - signal: Option, - /// Sender for futures that must be spawned as background tasks. - to_spawn_tx: TaskScheduler, - /// Receiver for futures that must be spawned as background tasks. - to_spawn_rx: TracingUnboundedReceiver + Send>>>, - /// Prometheus metrics where to report the stats about tasks. - metrics: Option, + /// A future that resolves when the service has exited, this is useful to + /// make sure any internally spawned futures stop when the service does. + on_exit: exit_future::Exit, + /// A signal that makes the exit future above resolve, fired on service drop. + signal: Option, + /// Sender for futures that must be spawned as background tasks. + to_spawn_tx: TaskScheduler, + /// Receiver for futures that must be spawned as background tasks. + to_spawn_rx: TracingUnboundedReceiver + Send>>>, + /// Prometheus metrics where to report the stats about tasks. + metrics: Option, } impl TaskManagerBuilder { - /// New asynchronous task manager setup. - /// - /// If a Prometheus registry is passed, it will be used to report statistics about the - /// service tasks. - pub fn new(prometheus_registry: Option<&Registry>) -> Result { - let (signal, on_exit) = exit_future::signal(); - let (to_spawn_tx, to_spawn_rx) = tracing_unbounded("mpsc_task_manager"); + /// New asynchronous task manager setup. + /// + /// If a Prometheus registry is passed, it will be used to report statistics about the + /// service tasks. + pub fn new(prometheus_registry: Option<&Registry>) -> Result { + let (signal, on_exit) = exit_future::signal(); + let (to_spawn_tx, to_spawn_rx) = tracing_unbounded("mpsc_task_manager"); - let metrics = prometheus_registry.map(Metrics::register).transpose()?; + let metrics = prometheus_registry.map(Metrics::register).transpose()?; - Ok(Self { - on_exit, - signal: Some(signal), - to_spawn_tx, - to_spawn_rx, - metrics, - }) - } + Ok(Self { + on_exit, + signal: Some(signal), + to_spawn_tx, + to_spawn_rx, + metrics, + }) + } - /// Get spawn handle. - /// - /// Tasks spawned through this handle will get scheduled once - /// service is up and running. - pub fn spawn_handle(&self) -> SpawnTaskHandle { - SpawnTaskHandle { - on_exit: self.on_exit.clone(), - sender: self.to_spawn_tx.clone(), - metrics: self.metrics.clone(), - } - } + /// Get spawn handle. + /// + /// Tasks spawned through this handle will get scheduled once + /// service is up and running. + pub fn spawn_handle(&self) -> SpawnTaskHandle { + SpawnTaskHandle { + on_exit: self.on_exit.clone(), + sender: self.to_spawn_tx.clone(), + metrics: self.metrics.clone(), + } + } - /// Convert into actual task manager from initial setup. - pub(crate) fn into_task_manager(self, executor: ServiceTaskExecutor) -> TaskManager { - let TaskManagerBuilder { - on_exit, - signal, - to_spawn_rx, - to_spawn_tx, - metrics, - } = self; - TaskManager { - on_exit, - signal, - to_spawn_tx, - to_spawn_rx, - executor, - metrics, - } - } + /// Convert into actual task manager from initial setup. + pub(crate) fn into_task_manager(self, executor: ServiceTaskExecutor) -> TaskManager { + let TaskManagerBuilder { + on_exit, + signal, + to_spawn_rx, + to_spawn_tx, + metrics, + } = self; + TaskManager { + on_exit, + signal, + to_spawn_tx, + to_spawn_rx, + executor, + metrics, + } + } } /// An handle for spawning tasks in the service. #[derive(Clone)] pub struct SpawnTaskHandle { - sender: TaskScheduler, - on_exit: exit_future::Exit, - metrics: Option, + sender: TaskScheduler, + on_exit: exit_future::Exit, + metrics: Option, } impl SpawnTaskHandle { - /// Spawns the given task with the given name. - /// - /// Note that the `name` is a `&'static str`. The reason for this choice is that statistics - /// about this task are getting reported to the Prometheus endpoint (if enabled), and that - /// therefore the set of possible task names must be bounded. - /// - /// In other words, it would be a bad idea for someone to do for example - /// `spawn(format!("{:?}", some_public_key))`. - pub fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) { - let on_exit = self.on_exit.clone(); - let metrics = self.metrics.clone(); + /// Spawns the given task with the given name. + /// + /// Note that the `name` is a `&'static str`. The reason for this choice is that statistics + /// about this task are getting reported to the Prometheus endpoint (if enabled), and that + /// therefore the set of possible task names must be bounded. + /// + /// In other words, it would be a bad idea for someone to do for example + /// `spawn(format!("{:?}", some_public_key))`. + pub fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) { + let on_exit = self.on_exit.clone(); + let metrics = self.metrics.clone(); - // Note that we increase the started counter here and not within the future. This way, - // we could properly visualize on Prometheus situations where the spawning doesn't work. - if let Some(metrics) = &self.metrics { - metrics.tasks_spawned.with_label_values(&[name]).inc(); - // We do a dummy increase in order for the task to show up in metrics. - metrics.tasks_ended.with_label_values(&[name]).inc_by(0); - } + // Note that we increase the started counter here and not within the future. This way, + // we could properly visualize on Prometheus situations where the spawning doesn't work. + if let Some(metrics) = &self.metrics { + metrics.tasks_spawned.with_label_values(&[name]).inc(); + // We do a dummy increase in order for the task to show up in metrics. + metrics.tasks_ended.with_label_values(&[name]).inc_by(0); + } - let future = async move { - if let Some(metrics) = metrics { - let poll_duration = metrics.poll_duration.with_label_values(&[name]); - let poll_start = metrics.poll_start.with_label_values(&[name]); - let task = prometheus_future::with_poll_durations(poll_duration, poll_start, task); - futures::pin_mut!(task); - let _ = select(on_exit, task).await; - metrics.tasks_ended.with_label_values(&[name]).inc(); - } else { - futures::pin_mut!(task); - let _ = select(on_exit, task).await; - } - }; + let future = async move { + if let Some(metrics) = metrics { + let poll_duration = metrics.poll_duration.with_label_values(&[name]); + let poll_start = metrics.poll_start.with_label_values(&[name]); + let task = prometheus_future::with_poll_durations(poll_duration, poll_start, task); + futures::pin_mut!(task); + let _ = select(on_exit, task).await; + metrics.tasks_ended.with_label_values(&[name]).inc(); + } else { + futures::pin_mut!(task); + let _ = select(on_exit, task).await; + } + }; - if self.sender.unbounded_send(Box::pin(future)).is_err() { - error!("Failed to send task to spawn over channel"); - } - } + if self.sender.unbounded_send(Box::pin(future)).is_err() { + error!("Failed to send task to spawn over channel"); + } + } } impl Spawn for SpawnTaskHandle { - fn spawn_obj(&self, future: FutureObj<'static, ()>) - -> Result<(), SpawnError> { - self.spawn("unamed", future); - Ok(()) - } + fn spawn_obj(&self, future: FutureObj<'static, ()>) -> Result<(), SpawnError> { + self.spawn("unamed", future); + Ok(()) + } } impl sc_client_api::CloneableSpawn for SpawnTaskHandle { - fn clone(&self) -> Box { - Box::new(Clone::clone(self)) - } + fn clone(&self) -> Box { + Box::new(Clone::clone(self)) + } } type Boxed01Future01 = Box + Send + 'static>; impl futures01::future::Executor for SpawnTaskHandle { - fn execute(&self, future: Boxed01Future01) -> Result<(), futures01::future::ExecuteError>{ - self.spawn("unnamed", future.compat().map(drop)); - Ok(()) - } + fn execute( + &self, + future: Boxed01Future01, + ) -> Result<(), futures01::future::ExecuteError> { + self.spawn("unnamed", future.compat().map(drop)); + Ok(()) + } } /// Helper struct to manage background/async tasks in Service. pub struct TaskManager { - /// A future that resolves when the service has exited, this is useful to - /// make sure any internally spawned futures stop when the service does. - on_exit: exit_future::Exit, - /// A signal that makes the exit future above resolve, fired on service drop. - signal: Option, - /// Sender for futures that must be spawned as background tasks. - to_spawn_tx: TaskScheduler, - /// Receiver for futures that must be spawned as background tasks. - /// Note: please read comment on [`SpawnTaskHandle::spawn`] for why this is a `&'static str`. - to_spawn_rx: TracingUnboundedReceiver + Send>>>, - /// How to spawn background tasks. - executor: ServiceTaskExecutor, - /// Prometheus metric where to report the polling times. - metrics: Option, + /// A future that resolves when the service has exited, this is useful to + /// make sure any internally spawned futures stop when the service does. + on_exit: exit_future::Exit, + /// A signal that makes the exit future above resolve, fired on service drop. + signal: Option, + /// Sender for futures that must be spawned as background tasks. + to_spawn_tx: TaskScheduler, + /// Receiver for futures that must be spawned as background tasks. + /// Note: please read comment on [`SpawnTaskHandle::spawn`] for why this is a `&'static str`. + to_spawn_rx: TracingUnboundedReceiver + Send>>>, + /// How to spawn background tasks. + executor: ServiceTaskExecutor, + /// Prometheus metric where to report the polling times. + metrics: Option, } impl TaskManager { - /// Spawn background/async task, which will be aware on exit signal. - /// - /// See also the documentation of [`SpawnTaskHandler::spawn`]. - pub(super) fn spawn(&self, name: &'static str, task: impl Future + Send + 'static) { - self.spawn_handle().spawn(name, task) - } + /// Spawn background/async task, which will be aware on exit signal. + /// + /// See also the documentation of [`SpawnTaskHandler::spawn`]. + pub(super) fn spawn( + &self, + name: &'static str, + task: impl Future + Send + 'static, + ) { + self.spawn_handle().spawn(name, task) + } - pub(super) fn spawn_handle(&self) -> SpawnTaskHandle { - SpawnTaskHandle { - on_exit: self.on_exit.clone(), - sender: self.to_spawn_tx.clone(), - metrics: self.metrics.clone(), - } - } + pub(super) fn spawn_handle(&self) -> SpawnTaskHandle { + SpawnTaskHandle { + on_exit: self.on_exit.clone(), + sender: self.to_spawn_tx.clone(), + metrics: self.metrics.clone(), + } + } - /// Process background task receiver. - pub(super) fn process_receiver(&mut self, cx: &mut Context) { - while let Poll::Ready(Some(task_to_spawn)) = Pin::new(&mut self.to_spawn_rx).poll_next(cx) { - (self.executor)(task_to_spawn); - } - } + /// Process background task receiver. + pub(super) fn process_receiver(&mut self, cx: &mut Context) { + while let Poll::Ready(Some(task_to_spawn)) = Pin::new(&mut self.to_spawn_rx).poll_next(cx) { + (self.executor)(task_to_spawn); + } + } - /// Clone on exit signal. - pub(super) fn on_exit(&self) -> exit_future::Exit { - self.on_exit.clone() - } + /// Clone on exit signal. + pub(super) fn on_exit(&self) -> exit_future::Exit { + self.on_exit.clone() + } } impl Drop for TaskManager { - fn drop(&mut self) { - debug!(target: "service", "Tasks manager shutdown"); - if let Some(signal) = self.signal.take() { - let _ = signal.fire(); - } - } + fn drop(&mut self) { + debug!(target: "service", "Tasks manager shutdown"); + if let Some(signal) = self.signal.take() { + let _ = signal.fire(); + } + } } #[derive(Clone)] struct Metrics { - // This list is ordered alphabetically - poll_duration: HistogramVec, - poll_start: CounterVec, - tasks_spawned: CounterVec, - tasks_ended: CounterVec, + // This list is ordered alphabetically + poll_duration: HistogramVec, + poll_start: CounterVec, + tasks_spawned: CounterVec, + tasks_ended: CounterVec, } impl Metrics { - fn register(registry: &Registry) -> Result { - Ok(Self { - poll_duration: register(HistogramVec::new( - HistogramOpts { - common_opts: Opts::new( - "tasks_polling_duration", - "Duration in seconds of each invocation of Future::poll" - ), - buckets: exponential_buckets(0.001, 4.0, 9) - .expect("function parameters are constant and always valid; qed"), - }, - &["task_name"] - )?, registry)?, - poll_start: register(CounterVec::new( - Opts::new( - "tasks_polling_started_total", - "Total number of times we started invoking Future::poll" - ), - &["task_name"] - )?, registry)?, - tasks_spawned: register(CounterVec::new( - Opts::new( - "tasks_spawned_total", - "Total number of tasks that have been spawned on the Service" - ), - &["task_name"] - )?, registry)?, - tasks_ended: register(CounterVec::new( - Opts::new( - "tasks_ended_total", - "Total number of tasks for which Future::poll has returned Ready(())" - ), - &["task_name"] - )?, registry)?, - }) - } + fn register(registry: &Registry) -> Result { + Ok(Self { + poll_duration: register( + HistogramVec::new( + HistogramOpts { + common_opts: Opts::new( + "tasks_polling_duration", + "Duration in seconds of each invocation of Future::poll", + ), + buckets: exponential_buckets(0.001, 4.0, 9) + .expect("function parameters are constant and always valid; qed"), + }, + &["task_name"], + )?, + registry, + )?, + poll_start: register( + CounterVec::new( + Opts::new( + "tasks_polling_started_total", + "Total number of times we started invoking Future::poll", + ), + &["task_name"], + )?, + registry, + )?, + tasks_spawned: register( + CounterVec::new( + Opts::new( + "tasks_spawned_total", + "Total number of tasks that have been spawned on the Service", + ), + &["task_name"], + )?, + registry, + )?, + tasks_ended: register( + CounterVec::new( + Opts::new( + "tasks_ended_total", + "Total number of tasks for which Future::poll has returned Ready(())", + ), + &["task_name"], + )?, + registry, + )?, + }) + } } diff --git a/client/service/src/task_manager/prometheus_future.rs b/client/service/src/task_manager/prometheus_future.rs index 53bd59aa7a..b8a733fa4d 100644 --- a/client/service/src/task_manager/prometheus_future.rs +++ b/client/service/src/task_manager/prometheus_future.rs @@ -15,55 +15,59 @@ use futures::prelude::*; use prometheus_endpoint::{Counter, Histogram, U64}; -use std::{fmt, pin::Pin, task::{Context, Poll}}; +use std::{ + fmt, + pin::Pin, + task::{Context, Poll}, +}; /// Wraps around a `Future`. Report the polling duration to the `Histogram` and when the polling /// starts to the `Counter`. pub fn with_poll_durations( - poll_duration: Histogram, - poll_start: Counter, - inner: T + poll_duration: Histogram, + poll_start: Counter, + inner: T, ) -> PrometheusFuture { - PrometheusFuture { - inner, - poll_duration, - poll_start, - } + PrometheusFuture { + inner, + poll_duration, + poll_start, + } } /// Wraps around `Future` and adds diagnostics to it. #[pin_project::pin_project] #[derive(Clone)] pub struct PrometheusFuture { - /// The inner future doing the actual work. - #[pin] - inner: T, - poll_duration: Histogram, - poll_start: Counter, + /// The inner future doing the actual work. + #[pin] + inner: T, + poll_duration: Histogram, + poll_start: Counter, } impl Future for PrometheusFuture where - T: Future, + T: Future, { - type Output = T::Output; + type Output = T::Output; - fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { - let this = self.project(); + fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll { + let this = self.project(); - this.poll_start.inc(); - let _timer = this.poll_duration.start_timer(); - Future::poll(this.inner, cx) + this.poll_start.inc(); + let _timer = this.poll_duration.start_timer(); + Future::poll(this.inner, cx) - // `_timer` is dropped here and will observe the duration - } + // `_timer` is dropped here and will observe the duration + } } impl fmt::Debug for PrometheusFuture where - T: fmt::Debug, + T: fmt::Debug, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - fmt::Debug::fmt(&self.inner, f) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + fmt::Debug::fmt(&self.inner, f) + } } diff --git a/client/service/test/src/lib.rs b/client/service/test/src/lib.rs index 1e824cb273..c5c43390d0 100644 --- a/client/service/test/src/lib.rs +++ b/client/service/test/src/lib.rs @@ -16,534 +16,645 @@ //! Service integration test utils. +use futures::{FutureExt as _, TryFutureExt as _}; +use futures01::{Future, Poll, Stream}; +use log::info; +use sc_network::config::{NetworkConfiguration, TransportConfig}; +use sc_network::{multiaddr, Multiaddr}; +use sc_service::{ + config::{DatabaseConfig, KeystoreConfig}, + AbstractService, ChainSpecExtension, Configuration, Error, GenericChainSpec, Role, + RuntimeGenesis, +}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; +use sp_transaction_pool::TransactionPool; use std::iter; -use std::sync::{Arc, Mutex, MutexGuard}; use std::net::Ipv4Addr; use std::pin::Pin; +use std::sync::{Arc, Mutex, MutexGuard}; use std::time::Duration; -use log::info; -use futures01::{Future, Stream, Poll}; -use futures::{FutureExt as _, TryFutureExt as _}; use tempfile::TempDir; -use tokio::{runtime::Runtime, prelude::FutureExt}; use tokio::timer::Interval; -use sc_service::{ - AbstractService, - GenericChainSpec, - ChainSpecExtension, - Configuration, - config::{DatabaseConfig, KeystoreConfig}, - RuntimeGenesis, - Role, - Error, -}; -use sc_network::{multiaddr, Multiaddr}; -use sc_network::config::{NetworkConfiguration, TransportConfig}; -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; -use sp_transaction_pool::TransactionPool; +use tokio::{prelude::FutureExt, runtime::Runtime}; /// Maximum duration of single wait call. const MAX_WAIT_TIME: Duration = Duration::from_secs(60 * 3); struct TestNet { - runtime: Runtime, - authority_nodes: Vec<(usize, SyncService, U, Multiaddr)>, - full_nodes: Vec<(usize, SyncService, U, Multiaddr)>, - light_nodes: Vec<(usize, SyncService, Multiaddr)>, - chain_spec: GenericChainSpec, - base_port: u16, - nodes: usize, + runtime: Runtime, + authority_nodes: Vec<(usize, SyncService, U, Multiaddr)>, + full_nodes: Vec<(usize, SyncService, U, Multiaddr)>, + light_nodes: Vec<(usize, SyncService, Multiaddr)>, + chain_spec: GenericChainSpec, + base_port: u16, + nodes: usize, } /// Wraps around an `Arc` and implements `Future`. pub struct SyncService(Arc>); impl SyncService { - pub fn get(&self) -> MutexGuard { - self.0.lock().unwrap() - } + pub fn get(&self) -> MutexGuard { + self.0.lock().unwrap() + } } impl Clone for SyncService { - fn clone(&self) -> Self { - Self(self.0.clone()) - } + fn clone(&self) -> Self { + Self(self.0.clone()) + } } impl From for SyncService { - fn from(service: T) -> Self { - SyncService(Arc::new(Mutex::new(service))) - } + fn from(service: T) -> Self { + SyncService(Arc::new(Mutex::new(service))) + } } -impl> + Unpin> Future for SyncService { - type Item = (); - type Error = sc_service::Error; +impl> + Unpin> Future for SyncService { + type Item = (); + type Error = sc_service::Error; - fn poll(&mut self) -> Poll { - let mut f = self.0.lock().unwrap(); - futures::compat::Compat::new(&mut *f).poll() - } + fn poll(&mut self) -> Poll { + let mut f = self.0.lock().unwrap(); + futures::compat::Compat::new(&mut *f).poll() + } } impl TestNet -where F: Send + 'static, L: Send +'static, U: Clone + Send + 'static +where + F: Send + 'static, + L: Send + 'static, + U: Clone + Send + 'static, { - pub fn run_until_all_full( - &mut self, - full_predicate: FP, - light_predicate: LP, - ) - where - FP: Send + Fn(usize, &SyncService) -> bool + 'static, - LP: Send + Fn(usize, &SyncService) -> bool + 'static, - { - let full_nodes = self.full_nodes.clone(); - let light_nodes = self.light_nodes.clone(); - let interval = Interval::new_interval(Duration::from_millis(100)) - .map_err(|_| ()) - .for_each(move |_| { - let full_ready = full_nodes.iter().all(|&(ref id, ref service, _, _)| - full_predicate(*id, service) - ); - - if !full_ready { - return Ok(()); - } - - let light_ready = light_nodes.iter().all(|&(ref id, ref service, _)| - light_predicate(*id, service) - ); - - if !light_ready { - Ok(()) - } else { - Err(()) - } - }) - .timeout(MAX_WAIT_TIME); - - match self.runtime.block_on(interval) { - Ok(()) => unreachable!("interval always fails; qed"), - Err(ref err) if err.is_inner() => (), - Err(_) => panic!("Waited for too long"), - } - } + pub fn run_until_all_full(&mut self, full_predicate: FP, light_predicate: LP) + where + FP: Send + Fn(usize, &SyncService) -> bool + 'static, + LP: Send + Fn(usize, &SyncService) -> bool + 'static, + { + let full_nodes = self.full_nodes.clone(); + let light_nodes = self.light_nodes.clone(); + let interval = Interval::new_interval(Duration::from_millis(100)) + .map_err(|_| ()) + .for_each(move |_| { + let full_ready = full_nodes + .iter() + .all(|&(ref id, ref service, _, _)| full_predicate(*id, service)); + + if !full_ready { + return Ok(()); + } + + let light_ready = light_nodes + .iter() + .all(|&(ref id, ref service, _)| light_predicate(*id, service)); + + if !light_ready { + Ok(()) + } else { + Err(()) + } + }) + .timeout(MAX_WAIT_TIME); + + match self.runtime.block_on(interval) { + Ok(()) => unreachable!("interval always fails; qed"), + Err(ref err) if err.is_inner() => (), + Err(_) => panic!("Waited for too long"), + } + } } -fn node_config ( - index: usize, - spec: &GenericChainSpec, - role: Role, - task_executor: Arc + Send>>) + Send + Sync>, - key_seed: Option, - base_port: u16, - root: &TempDir, -) -> Configuration -{ - let root = root.path().join(format!("node-{}", index)); - - let mut network_config = NetworkConfiguration::new( - format!("Node {}", index), - "network/test/0.1", - Default::default(), - None, - ); - - network_config.listen_addresses.push( - iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) - .chain(iter::once(multiaddr::Protocol::Tcp(base_port + index as u16))) - .collect() - ); - - network_config.transport = TransportConfig::Normal { - enable_mdns: false, - allow_private_ipv4: true, - wasm_external_transport: None, - use_yamux_flow_control: true, - }; - - Configuration { - impl_name: "network-test-impl", - impl_version: "0.1", - role, - task_executor, - transaction_pool: Default::default(), - network: network_config, - keystore: KeystoreConfig::Path { - path: root.join("key"), - password: None - }, - database: DatabaseConfig::RocksDb { - path: root.join("db"), - cache_size: 128, - }, - state_cache_size: 16777216, - state_cache_child_ratio: None, - pruning: Default::default(), - chain_spec: Box::new((*spec).clone()), - wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, - execution_strategies: Default::default(), - rpc_http: None, - rpc_ws: None, - rpc_ws_max_connections: None, - rpc_cors: None, - prometheus_config: None, - telemetry_endpoints: None, - telemetry_external_transport: None, - default_heap_pages: None, - offchain_worker: false, - force_authoring: false, - disable_grandpa: false, - dev_key_seed: key_seed, - tracing_targets: None, - tracing_receiver: Default::default(), - max_runtime_instances: 8, - announce_block: true, - } +fn node_config( + index: usize, + spec: &GenericChainSpec, + role: Role, + task_executor: Arc + Send>>) + Send + Sync>, + key_seed: Option, + base_port: u16, + root: &TempDir, +) -> Configuration { + let root = root.path().join(format!("node-{}", index)); + + let mut network_config = NetworkConfiguration::new( + format!("Node {}", index), + "network/test/0.1", + Default::default(), + None, + ); + + network_config.listen_addresses.push( + iter::once(multiaddr::Protocol::Ip4(Ipv4Addr::new(127, 0, 0, 1))) + .chain(iter::once(multiaddr::Protocol::Tcp( + base_port + index as u16, + ))) + .collect(), + ); + + network_config.transport = TransportConfig::Normal { + enable_mdns: false, + allow_private_ipv4: true, + wasm_external_transport: None, + use_yamux_flow_control: true, + }; + + Configuration { + impl_name: "network-test-impl", + impl_version: "0.1", + role, + task_executor, + transaction_pool: Default::default(), + network: network_config, + keystore: KeystoreConfig::Path { + path: root.join("key"), + password: None, + }, + database: DatabaseConfig::RocksDb { + path: root.join("db"), + cache_size: 128, + }, + state_cache_size: 16777216, + state_cache_child_ratio: None, + pruning: Default::default(), + chain_spec: Box::new((*spec).clone()), + wasm_method: sc_service::config::WasmExecutionMethod::Interpreted, + execution_strategies: Default::default(), + rpc_http: None, + rpc_ws: None, + rpc_ws_max_connections: None, + rpc_cors: None, + prometheus_config: None, + telemetry_endpoints: None, + telemetry_external_transport: None, + default_heap_pages: None, + offchain_worker: false, + force_authoring: false, + disable_grandpa: false, + dev_key_seed: key_seed, + tracing_targets: None, + tracing_receiver: Default::default(), + max_runtime_instances: 8, + announce_block: true, + } } -impl TestNet where - F: AbstractService, - L: AbstractService, - E: ChainSpecExtension + Clone + 'static + Send, - G: RuntimeGenesis + 'static, +impl TestNet +where + F: AbstractService, + L: AbstractService, + E: ChainSpecExtension + Clone + 'static + Send, + G: RuntimeGenesis + 'static, { - fn new( - temp: &TempDir, - spec: GenericChainSpec, - full: impl Iterator Result<(F, U), Error>>, - light: impl Iterator Result>, - authorities: impl Iterator Result<(F, U), Error> - )>, - base_port: u16 - ) -> TestNet { - let _ = env_logger::try_init(); - fdlimit::raise_fd_limit(); - let runtime = Runtime::new().expect("Error creating tokio runtime"); - let mut net = TestNet { - runtime, - authority_nodes: Default::default(), - full_nodes: Default::default(), - light_nodes: Default::default(), - chain_spec: spec, - base_port, - nodes: 0, - }; - net.insert_nodes(temp, full, light, authorities); - net - } - - fn insert_nodes( - &mut self, - temp: &TempDir, - full: impl Iterator Result<(F, U), Error>>, - light: impl Iterator Result>, - authorities: impl Iterator Result<(F, U), Error>)> - ) { - let executor = self.runtime.executor(); - - for (key, authority) in authorities { - let task_executor = { - let executor = executor.clone(); - Arc::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) - }; - let node_config = node_config( - self.nodes, - &self.chain_spec, - Role::Authority { sentry_nodes: Vec::new() }, - task_executor, - Some(key), - self.base_port, - &temp, - ); - let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let (service, user_data) = authority(node_config).expect("Error creating test node service"); - let service = SyncService::from(service); - - executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); - self.authority_nodes.push((self.nodes, service, user_data, addr)); - self.nodes += 1; - } - - for full in full { - let task_executor = { - let executor = executor.clone(); - Arc::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) - }; - let node_config = node_config(self.nodes, &self.chain_spec, Role::Full, task_executor, None, self.base_port, &temp); - let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let (service, user_data) = full(node_config).expect("Error creating test node service"); - let service = SyncService::from(service); - - executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); - self.full_nodes.push((self.nodes, service, user_data, addr)); - self.nodes += 1; - } - - for light in light { - let task_executor = { - let executor = executor.clone(); - Arc::new(move |fut: Pin + Send>>| executor.spawn(fut.unit_error().compat())) - }; - let node_config = node_config(self.nodes, &self.chain_spec, Role::Light, task_executor, None, self.base_port, &temp); - let addr = node_config.network.listen_addresses.iter().next().unwrap().clone(); - let service = SyncService::from(light(node_config).expect("Error creating test node service")); - - executor.spawn(service.clone().map_err(|_| ())); - let addr = addr.with(multiaddr::Protocol::P2p(service.get().network().local_peer_id().clone().into())); - self.light_nodes.push((self.nodes, service, addr)); - self.nodes += 1; - } - } + fn new( + temp: &TempDir, + spec: GenericChainSpec, + full: impl Iterator Result<(F, U), Error>>, + light: impl Iterator Result>, + authorities: impl Iterator Result<(F, U), Error>)>, + base_port: u16, + ) -> TestNet { + let _ = env_logger::try_init(); + fdlimit::raise_fd_limit(); + let runtime = Runtime::new().expect("Error creating tokio runtime"); + let mut net = TestNet { + runtime, + authority_nodes: Default::default(), + full_nodes: Default::default(), + light_nodes: Default::default(), + chain_spec: spec, + base_port, + nodes: 0, + }; + net.insert_nodes(temp, full, light, authorities); + net + } + + fn insert_nodes( + &mut self, + temp: &TempDir, + full: impl Iterator Result<(F, U), Error>>, + light: impl Iterator Result>, + authorities: impl Iterator Result<(F, U), Error>)>, + ) { + let executor = self.runtime.executor(); + + for (key, authority) in authorities { + let task_executor = { + let executor = executor.clone(); + Arc::new( + move |fut: Pin + Send>>| { + executor.spawn(fut.unit_error().compat()) + }, + ) + }; + let node_config = node_config( + self.nodes, + &self.chain_spec, + Role::Authority { + sentry_nodes: Vec::new(), + }, + task_executor, + Some(key), + self.base_port, + &temp, + ); + let addr = node_config + .network + .listen_addresses + .iter() + .next() + .unwrap() + .clone(); + let (service, user_data) = + authority(node_config).expect("Error creating test node service"); + let service = SyncService::from(service); + + executor.spawn(service.clone().map_err(|_| ())); + let addr = addr.with(multiaddr::Protocol::P2p( + service.get().network().local_peer_id().clone().into(), + )); + self.authority_nodes + .push((self.nodes, service, user_data, addr)); + self.nodes += 1; + } + + for full in full { + let task_executor = { + let executor = executor.clone(); + Arc::new( + move |fut: Pin + Send>>| { + executor.spawn(fut.unit_error().compat()) + }, + ) + }; + let node_config = node_config( + self.nodes, + &self.chain_spec, + Role::Full, + task_executor, + None, + self.base_port, + &temp, + ); + let addr = node_config + .network + .listen_addresses + .iter() + .next() + .unwrap() + .clone(); + let (service, user_data) = full(node_config).expect("Error creating test node service"); + let service = SyncService::from(service); + + executor.spawn(service.clone().map_err(|_| ())); + let addr = addr.with(multiaddr::Protocol::P2p( + service.get().network().local_peer_id().clone().into(), + )); + self.full_nodes.push((self.nodes, service, user_data, addr)); + self.nodes += 1; + } + + for light in light { + let task_executor = { + let executor = executor.clone(); + Arc::new( + move |fut: Pin + Send>>| { + executor.spawn(fut.unit_error().compat()) + }, + ) + }; + let node_config = node_config( + self.nodes, + &self.chain_spec, + Role::Light, + task_executor, + None, + self.base_port, + &temp, + ); + let addr = node_config + .network + .listen_addresses + .iter() + .next() + .unwrap() + .clone(); + let service = + SyncService::from(light(node_config).expect("Error creating test node service")); + + executor.spawn(service.clone().map_err(|_| ())); + let addr = addr.with(multiaddr::Protocol::P2p( + service.get().network().local_peer_id().clone().into(), + )); + self.light_nodes.push((self.nodes, service, addr)); + self.nodes += 1; + } + } } fn tempdir_with_prefix(prefix: &str) -> TempDir { - tempfile::Builder::new().prefix(prefix).tempdir().expect("Error creating test dir") + tempfile::Builder::new() + .prefix(prefix) + .tempdir() + .expect("Error creating test dir") } pub fn connectivity( - spec: GenericChainSpec, - full_builder: Fb, - light_builder: Lb, + spec: GenericChainSpec, + full_builder: Fb, + light_builder: Lb, ) where - E: ChainSpecExtension + Clone + 'static + Send, - G: RuntimeGenesis + 'static, - Fb: Fn(Configuration) -> Result, - F: AbstractService, - Lb: Fn(Configuration) -> Result, - L: AbstractService, + E: ChainSpecExtension + Clone + 'static + Send, + G: RuntimeGenesis + 'static, + Fb: Fn(Configuration) -> Result, + F: AbstractService, + Lb: Fn(Configuration) -> Result, + L: AbstractService, { - const NUM_FULL_NODES: usize = 5; - const NUM_LIGHT_NODES: usize = 5; - - let expected_full_connections = NUM_FULL_NODES - 1 + NUM_LIGHT_NODES; - let expected_light_connections = NUM_FULL_NODES; - - { - let temp = tempdir_with_prefix("substrate-connectivity-test"); - let runtime = { - let mut network = TestNet::new( - &temp, - spec.clone(), - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), - // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise - // the type of the closure cannot be inferred. - (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), - 30400, - ); - info!("Checking star topology"); - let first_address = network.full_nodes[0].3.clone(); - for (_, service, _, _) in network.full_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()) - .expect("Error adding reserved peer"); - } - for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()) - .expect("Error adding reserved peer"); - } - - network.run_until_all_full( - move |_index, service| service.get().network().num_connected() - == expected_full_connections, - move |_index, service| service.get().network().num_connected() - == expected_light_connections, - ); - - network.runtime - }; - - runtime.shutdown_now().wait().expect("Error shutting down runtime"); - - temp.close().expect("Error removing temp dir"); - } - { - let temp = tempdir_with_prefix("substrate-connectivity-test"); - { - let mut network = TestNet::new( - &temp, - spec, - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), - // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise - // the type of the closure cannot be inferred. - (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), - 30400, - ); - info!("Checking linked topology"); - let mut address = network.full_nodes[0].3.clone(); - let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES); - for i in 0..max_nodes { - if i != 0 { - if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()) - .expect("Error adding reserved peer"); - address = node_id.clone(); - } - } - - if let Some((_, service, node_id)) = network.light_nodes.get(i) { - service.get().network().add_reserved_peer(address.to_string()) - .expect("Error adding reserved peer"); - address = node_id.clone(); - } - } - - network.run_until_all_full( - move |_index, service| service.get().network().num_connected() - == expected_full_connections, - move |_index, service| service.get().network().num_connected() - == expected_light_connections, - ); - } - temp.close().expect("Error removing temp dir"); - } + const NUM_FULL_NODES: usize = 5; + const NUM_LIGHT_NODES: usize = 5; + + let expected_full_connections = NUM_FULL_NODES - 1 + NUM_LIGHT_NODES; + let expected_light_connections = NUM_FULL_NODES; + + { + let temp = tempdir_with_prefix("substrate-connectivity-test"); + let runtime = { + let mut network = TestNet::new( + &temp, + spec.clone(), + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), + 30400, + ); + info!("Checking star topology"); + let first_address = network.full_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter().skip(1) { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _) in network.light_nodes.iter() { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + + network.run_until_all_full( + move |_index, service| { + service.get().network().num_connected() == expected_full_connections + }, + move |_index, service| { + service.get().network().num_connected() == expected_light_connections + }, + ); + + network.runtime + }; + + runtime + .shutdown_now() + .wait() + .expect("Error shutting down runtime"); + + temp.close().expect("Error removing temp dir"); + } + { + let temp = tempdir_with_prefix("substrate-connectivity-test"); + { + let mut network = TestNet::new( + &temp, + spec, + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), + 30400, + ); + info!("Checking linked topology"); + let mut address = network.full_nodes[0].3.clone(); + let max_nodes = std::cmp::max(NUM_FULL_NODES, NUM_LIGHT_NODES); + for i in 0..max_nodes { + if i != 0 { + if let Some((_, service, _, node_id)) = network.full_nodes.get(i) { + service + .get() + .network() + .add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); + address = node_id.clone(); + } + } + + if let Some((_, service, node_id)) = network.light_nodes.get(i) { + service + .get() + .network() + .add_reserved_peer(address.to_string()) + .expect("Error adding reserved peer"); + address = node_id.clone(); + } + } + + network.run_until_all_full( + move |_index, service| { + service.get().network().num_connected() == expected_full_connections + }, + move |_index, service| { + service.get().network().num_connected() == expected_light_connections + }, + ); + } + temp.close().expect("Error removing temp dir"); + } } pub fn sync( - spec: GenericChainSpec, - full_builder: Fb, - light_builder: Lb, - mut make_block_and_import: B, - mut extrinsic_factory: ExF + spec: GenericChainSpec, + full_builder: Fb, + light_builder: Lb, + mut make_block_and_import: B, + mut extrinsic_factory: ExF, ) where - Fb: Fn(Configuration) -> Result<(F, U), Error>, - F: AbstractService, - Lb: Fn(Configuration) -> Result, - L: AbstractService, - B: FnMut(&F, &mut U), - ExF: FnMut(&F, &U) -> ::Extrinsic, - U: Clone + Send + 'static, - E: ChainSpecExtension + Clone + 'static + Send, - G: RuntimeGenesis + 'static, + Fb: Fn(Configuration) -> Result<(F, U), Error>, + F: AbstractService, + Lb: Fn(Configuration) -> Result, + L: AbstractService, + B: FnMut(&F, &mut U), + ExF: FnMut(&F, &U) -> ::Extrinsic, + U: Clone + Send + 'static, + E: ChainSpecExtension + Clone + 'static + Send, + G: RuntimeGenesis + 'static, { - const NUM_FULL_NODES: usize = 10; - // FIXME: BABE light client support is currently not working. - const NUM_LIGHT_NODES: usize = 10; - const NUM_BLOCKS: usize = 512; - let temp = tempdir_with_prefix("substrate-sync-test"); - let mut network = TestNet::new( - &temp, - spec.clone(), - (0..NUM_FULL_NODES).map(|_| { |cfg| full_builder(cfg) }), - (0..NUM_LIGHT_NODES).map(|_| { |cfg| light_builder(cfg) }), - // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise - // the type of the closure cannot be inferred. - (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), - 30500, - ); - info!("Checking block sync"); - let first_address = { - let &mut (_, ref first_service, ref mut first_user_data, _) = &mut network.full_nodes[0]; - for i in 0 .. NUM_BLOCKS { - if i % 128 == 0 { - info!("Generating #{}", i + 1); - } - - make_block_and_import(&first_service.get(), first_user_data); - } - network.full_nodes[0].3.clone() - }; - - info!("Running sync"); - for (_, service, _, _) in network.full_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); - } - for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); - } - network.run_until_all_full( - |_index, service| - service.get().client().chain_info().best_number == (NUM_BLOCKS as u32).into(), - |_index, service| - service.get().client().chain_info().best_number == (NUM_BLOCKS as u32).into(), - ); - - info!("Checking extrinsic propagation"); - let first_service = network.full_nodes[0].1.clone(); - let first_user_data = &network.full_nodes[0].2; - let best_block = BlockId::number(first_service.get().client().chain_info().best_number); - let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); - let source = sp_transaction_pool::TransactionSource::External; - - futures::executor::block_on( - first_service.get().transaction_pool().submit_one(&best_block, source, extrinsic) - ).expect("failed to submit extrinsic"); - - network.run_until_all_full( - |_index, service| service.get().transaction_pool().ready().count() == 1, - |_index, _service| true, - ); + const NUM_FULL_NODES: usize = 10; + // FIXME: BABE light client support is currently not working. + const NUM_LIGHT_NODES: usize = 10; + const NUM_BLOCKS: usize = 512; + let temp = tempdir_with_prefix("substrate-sync-test"); + let mut network = TestNet::new( + &temp, + spec.clone(), + (0..NUM_FULL_NODES).map(|_| |cfg| full_builder(cfg)), + (0..NUM_LIGHT_NODES).map(|_| |cfg| light_builder(cfg)), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg) })), + 30500, + ); + info!("Checking block sync"); + let first_address = { + let &mut (_, ref first_service, ref mut first_user_data, _) = &mut network.full_nodes[0]; + for i in 0..NUM_BLOCKS { + if i % 128 == 0 { + info!("Generating #{}", i + 1); + } + + make_block_and_import(&first_service.get(), first_user_data); + } + network.full_nodes[0].3.clone() + }; + + info!("Running sync"); + for (_, service, _, _) in network.full_nodes.iter().skip(1) { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _) in network.light_nodes.iter() { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full( + |_index, service| { + service.get().client().chain_info().best_number == (NUM_BLOCKS as u32).into() + }, + |_index, service| { + service.get().client().chain_info().best_number == (NUM_BLOCKS as u32).into() + }, + ); + + info!("Checking extrinsic propagation"); + let first_service = network.full_nodes[0].1.clone(); + let first_user_data = &network.full_nodes[0].2; + let best_block = BlockId::number(first_service.get().client().chain_info().best_number); + let extrinsic = extrinsic_factory(&first_service.get(), first_user_data); + let source = sp_transaction_pool::TransactionSource::External; + + futures::executor::block_on(first_service.get().transaction_pool().submit_one( + &best_block, + source, + extrinsic, + )) + .expect("failed to submit extrinsic"); + + network.run_until_all_full( + |_index, service| service.get().transaction_pool().ready().count() == 1, + |_index, _service| true, + ); } pub fn consensus( - spec: GenericChainSpec, - full_builder: Fb, - light_builder: Lb, - authorities: impl IntoIterator + spec: GenericChainSpec, + full_builder: Fb, + light_builder: Lb, + authorities: impl IntoIterator, ) where - Fb: Fn(Configuration) -> Result, - F: AbstractService, - Lb: Fn(Configuration) -> Result, - L: AbstractService, - E: ChainSpecExtension + Clone + 'static + Send, - G: RuntimeGenesis + 'static, + Fb: Fn(Configuration) -> Result, + F: AbstractService, + Lb: Fn(Configuration) -> Result, + L: AbstractService, + E: ChainSpecExtension + Clone + 'static + Send, + G: RuntimeGenesis + 'static, { - const NUM_FULL_NODES: usize = 10; - const NUM_LIGHT_NODES: usize = 10; - const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds - let temp = tempdir_with_prefix("substrate-consensus-test"); - let mut network = TestNet::new( - &temp, - spec.clone(), - (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), - authorities.into_iter().map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), - 30600, - ); - - info!("Checking consensus"); - let first_address = network.authority_nodes[0].3.clone(); - for (_, service, _, _) in network.full_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); - } - for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); - } - for (_, service, _, _) in network.authority_nodes.iter().skip(1) { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); - } - network.run_until_all_full( - |_index, service| - service.get().client().chain_info().finalized_number >= (NUM_BLOCKS as u32 / 2).into(), - |_index, service| - service.get().client().chain_info().best_number >= (NUM_BLOCKS as u32 / 2).into(), - ); - - info!("Adding more peers"); - network.insert_nodes( - &temp, - (0..NUM_FULL_NODES / 2).map(|_| { |cfg| full_builder(cfg).map(|s| (s, ())) }), - (0..NUM_LIGHT_NODES / 2).map(|_| { |cfg| light_builder(cfg) }), - // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise - // the type of the closure cannot be inferred. - (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), - ); - for (_, service, _, _) in network.full_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); - } - for (_, service, _) in network.light_nodes.iter() { - service.get().network().add_reserved_peer(first_address.to_string()).expect("Error adding reserved peer"); - } - network.run_until_all_full( - |_index, service| - service.get().client().chain_info().finalized_number >= (NUM_BLOCKS as u32).into(), - |_index, service| - service.get().client().chain_info().best_number >= (NUM_BLOCKS as u32).into(), - ); + const NUM_FULL_NODES: usize = 10; + const NUM_LIGHT_NODES: usize = 10; + const NUM_BLOCKS: usize = 10; // 10 * 2 sec block production time = ~20 seconds + let temp = tempdir_with_prefix("substrate-consensus-test"); + let mut network = TestNet::new( + &temp, + spec.clone(), + (0..NUM_FULL_NODES / 2).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES / 2).map(|_| |cfg| light_builder(cfg)), + authorities + .into_iter() + .map(|key| (key, { |cfg| full_builder(cfg).map(|s| (s, ())) })), + 30600, + ); + + info!("Checking consensus"); + let first_address = network.authority_nodes[0].3.clone(); + for (_, service, _, _) in network.full_nodes.iter() { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _) in network.light_nodes.iter() { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _, _) in network.authority_nodes.iter().skip(1) { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full( + |_index, service| { + service.get().client().chain_info().finalized_number >= (NUM_BLOCKS as u32 / 2).into() + }, + |_index, service| { + service.get().client().chain_info().best_number >= (NUM_BLOCKS as u32 / 2).into() + }, + ); + + info!("Adding more peers"); + network.insert_nodes( + &temp, + (0..NUM_FULL_NODES / 2).map(|_| |cfg| full_builder(cfg).map(|s| (s, ()))), + (0..NUM_LIGHT_NODES / 2).map(|_| |cfg| light_builder(cfg)), + // Note: this iterator is empty but we can't just use `iter::empty()`, otherwise + // the type of the closure cannot be inferred. + (0..0).map(|_| (String::new(), { |cfg| full_builder(cfg).map(|s| (s, ())) })), + ); + for (_, service, _, _) in network.full_nodes.iter() { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + for (_, service, _) in network.light_nodes.iter() { + service + .get() + .network() + .add_reserved_peer(first_address.to_string()) + .expect("Error adding reserved peer"); + } + network.run_until_all_full( + |_index, service| { + service.get().client().chain_info().finalized_number >= (NUM_BLOCKS as u32).into() + }, + |_index, service| { + service.get().client().chain_info().best_number >= (NUM_BLOCKS as u32).into() + }, + ); } diff --git a/client/src/block_rules.rs b/client/src/block_rules.rs index e561451181..b0b4e74899 100644 --- a/client/src/block_rules.rs +++ b/client/src/block_rules.rs @@ -18,20 +18,18 @@ use std::collections::{HashMap, HashSet}; -use sp_runtime::{ - traits::{Block as BlockT, NumberFor}, -}; +use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sc_client_api::{ForkBlocks, BadBlocks}; +use sc_client_api::{BadBlocks, ForkBlocks}; /// Chain specification rules lookup result. pub enum LookupResult { - /// Specification rules do not contain any special rules about this block - NotSpecial, - /// The bock is known to be bad and should not be imported - KnownBad, - /// There is a specified canonical block hash for the given height - Expected(B::Hash) + /// Specification rules do not contain any special rules about this block + NotSpecial, + /// The bock is known to be bad and should not be imported + KnownBad, + /// There is a specified canonical block hash for the given height + Expected(B::Hash), } /// Chain-specific block filtering rules. @@ -39,34 +37,31 @@ pub enum LookupResult { /// This holds known bad blocks and known good forks, and /// is usually part of the chain spec. pub struct BlockRules { - bad: HashSet, - forks: HashMap, B::Hash>, + bad: HashSet, + forks: HashMap, B::Hash>, } impl BlockRules { - /// New block rules with provided black and white lists. - pub fn new( - fork_blocks: ForkBlocks, - bad_blocks: BadBlocks, - ) -> Self { - Self { - bad: bad_blocks.unwrap_or(HashSet::new()), - forks: fork_blocks.unwrap_or(vec![]).into_iter().collect(), - } - } + /// New block rules with provided black and white lists. + pub fn new(fork_blocks: ForkBlocks, bad_blocks: BadBlocks) -> Self { + Self { + bad: bad_blocks.unwrap_or(HashSet::new()), + forks: fork_blocks.unwrap_or(vec![]).into_iter().collect(), + } + } - /// Check if there's any rule affecting the given block. - pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { - if let Some(hash_for_height) = self.forks.get(&number) { - if hash_for_height != hash { - return LookupResult::Expected(hash_for_height.clone()); - } - } + /// Check if there's any rule affecting the given block. + pub fn lookup(&self, number: NumberFor, hash: &B::Hash) -> LookupResult { + if let Some(hash_for_height) = self.forks.get(&number) { + if hash_for_height != hash { + return LookupResult::Expected(hash_for_height.clone()); + } + } - if self.bad.contains(hash) { - return LookupResult::KnownBad; - } + if self.bad.contains(hash) { + return LookupResult::KnownBad; + } - LookupResult::NotSpecial - } + LookupResult::NotSpecial + } } diff --git a/client/src/call_executor.rs b/client/src/call_executor.rs index 1160449eee..9b2fefbd5f 100644 --- a/client/src/call_executor.rs +++ b/client/src/call_executor.rs @@ -14,246 +14,250 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{sync::Arc, panic::UnwindSafe, result, cell::RefCell}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use sc_client_api::{backend, call_executor::CallExecutor, CloneableSpawn}; +use sc_executor::{NativeVersion, RuntimeInfo, RuntimeVersion}; +use sp_api::{InitializeBlock, ProofRecorder, StorageTransactionCache}; +use sp_core::{traits::CodeExecutor, NativeOrEncoded, NeverNativeValue}; +use sp_externalities::Extensions; use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, HashFor, NumberFor}, + generic::BlockId, + traits::{Block as BlockT, HashFor, NumberFor}, }; use sp_state_machine::{ - self, OverlayedChanges, Ext, ExecutionManager, StateMachine, ExecutionStrategy, - backend::Backend as _, StorageProof, + self, backend::Backend as _, ExecutionManager, ExecutionStrategy, Ext, OverlayedChanges, + StateMachine, StorageProof, }; -use sc_executor::{RuntimeVersion, RuntimeInfo, NativeVersion}; -use sp_externalities::Extensions; -use sp_core::{NativeOrEncoded, NeverNativeValue, traits::CodeExecutor}; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; -use sc_client_api::{backend, call_executor::CallExecutor, CloneableSpawn}; +use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; /// Call executor that executes methods locally, querying all required /// data from local backend. pub struct LocalCallExecutor { - backend: Arc, - executor: E, - spawn_handle: Box, + backend: Arc, + executor: E, + spawn_handle: Box, } impl LocalCallExecutor { - /// Creates new instance of local call executor. - pub fn new( - backend: Arc, - executor: E, - spawn_handle: Box, - ) -> Self { - LocalCallExecutor { - backend, - executor, - spawn_handle, - } - } + /// Creates new instance of local call executor. + pub fn new(backend: Arc, executor: E, spawn_handle: Box) -> Self { + LocalCallExecutor { + backend, + executor, + spawn_handle, + } + } } -impl Clone for LocalCallExecutor where E: Clone { - fn clone(&self) -> Self { - LocalCallExecutor { - backend: self.backend.clone(), - executor: self.executor.clone(), - spawn_handle: self.spawn_handle.clone(), - } - } +impl Clone for LocalCallExecutor +where + E: Clone, +{ + fn clone(&self) -> Self { + LocalCallExecutor { + backend: self.backend.clone(), + executor: self.executor.clone(), + spawn_handle: self.spawn_handle.clone(), + } + } } impl CallExecutor for LocalCallExecutor where - B: backend::Backend, - E: CodeExecutor + RuntimeInfo + Clone + 'static, - Block: BlockT, + B: backend::Backend, + E: CodeExecutor + RuntimeInfo + Clone + 'static, + Block: BlockT, { - type Error = E::Error; + type Error = E::Error; - type Backend = B; + type Backend = B; - fn call( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - extensions: Option, - ) -> sp_blockchain::Result> { - let mut changes = OverlayedChanges::default(); - let changes_trie = backend::changes_tries_state_at_block( - id, self.backend.changes_trie_storage() - )?; - let state = self.backend.state_at(*id)?; - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let return_data = StateMachine::new( - &state, - changes_trie, - &mut changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &state_runtime_code.runtime_code()?, - self.spawn_handle.clone(), - ).execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - strategy.get_manager(), - None, - )?; + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + strategy: ExecutionStrategy, + extensions: Option, + ) -> sp_blockchain::Result> { + let mut changes = OverlayedChanges::default(); + let changes_trie = + backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; + let state = self.backend.state_at(*id)?; + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let return_data = StateMachine::new( + &state, + changes_trie, + &mut changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &state_runtime_code.runtime_code()?, + self.spawn_handle.clone(), + ) + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + None, + )?; - Ok(return_data.into_encoded()) - } + Ok(return_data.into_encoded()) + } - fn contextual_call< - 'a, - IB: Fn() -> sp_blockchain::Result<()>, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - initialize_block_fn: IB, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &RefCell, - storage_transaction_cache: Option<&RefCell< - StorageTransactionCache - >>, - initialize_block: InitializeBlock<'a, Block>, - execution_manager: ExecutionManager, - native_call: Option, - recorder: &Option>, - extensions: Option, - ) -> Result, sp_blockchain::Error> where ExecutionManager: Clone { - match initialize_block { - InitializeBlock::Do(ref init_block) - if init_block.borrow().as_ref().map(|id| id != at).unwrap_or(true) => { - initialize_block_fn()?; - }, - // We don't need to initialize the runtime at a block. - _ => {}, - } + fn contextual_call< + 'a, + IB: Fn() -> sp_blockchain::Result<()>, + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + initialize_block_fn: IB, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &RefCell, + storage_transaction_cache: Option<&RefCell>>, + initialize_block: InitializeBlock<'a, Block>, + execution_manager: ExecutionManager, + native_call: Option, + recorder: &Option>, + extensions: Option, + ) -> Result, sp_blockchain::Error> + where + ExecutionManager: Clone, + { + match initialize_block { + InitializeBlock::Do(ref init_block) + if init_block + .borrow() + .as_ref() + .map(|id| id != at) + .unwrap_or(true) => + { + initialize_block_fn()?; + } + // We don't need to initialize the runtime at a block. + _ => {} + } - let changes_trie_state = backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; - let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); + let changes_trie_state = + backend::changes_tries_state_at_block(at, self.backend.changes_trie_storage())?; + let mut storage_transaction_cache = storage_transaction_cache.map(|c| c.borrow_mut()); - let mut state = self.backend.state_at(*at)?; + let mut state = self.backend.state_at(*at)?; - match recorder { - Some(recorder) => { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as Box - )?; + match recorder { + Some(recorder) => { + let trie_state = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_state); - // It is important to extract the runtime code here before we create the proof - // recorder. - let runtime_code = state_runtime_code.runtime_code()?; + let state_runtime_code = + sp_state_machine::backend::BackendRuntimeCode::new(&trie_state); + // It is important to extract the runtime code here before we create the proof + // recorder. + let runtime_code = state_runtime_code.runtime_code()?; - let backend = sp_state_machine::ProvingBackend::new_with_recorder( - trie_state, - recorder.clone(), - ); + let backend = sp_state_machine::ProvingBackend::new_with_recorder( + trie_state, + recorder.clone(), + ); - let changes = &mut *changes.borrow_mut(); - let mut state_machine = StateMachine::new( - &backend, - changes_trie_state, - changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ); - // TODO: https://github.com/paritytech/substrate/issues/4455 - // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) - }, - None => { - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - let runtime_code = state_runtime_code.runtime_code()?; - let changes = &mut *changes.borrow_mut(); - let mut state_machine = StateMachine::new( - &state, - changes_trie_state, - changes, - &self.executor, - method, - call_data, - extensions.unwrap_or_default(), - &runtime_code, - self.spawn_handle.clone(), - ).with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)); - state_machine.execute_using_consensus_failure_handler(execution_manager, native_call) - } - }.map_err(Into::into) - } + let changes = &mut *changes.borrow_mut(); + let mut state_machine = StateMachine::new( + &backend, + changes_trie_state, + changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ); + // TODO: https://github.com/paritytech/substrate/issues/4455 + // .with_storage_transaction_cache(storage_transaction_cache.as_mut().map(|c| &mut **c)) + state_machine + .execute_using_consensus_failure_handler(execution_manager, native_call) + } + None => { + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + let runtime_code = state_runtime_code.runtime_code()?; + let changes = &mut *changes.borrow_mut(); + let mut state_machine = StateMachine::new( + &state, + changes_trie_state, + changes, + &self.executor, + method, + call_data, + extensions.unwrap_or_default(), + &runtime_code, + self.spawn_handle.clone(), + ) + .with_storage_transaction_cache( + storage_transaction_cache.as_mut().map(|c| &mut **c), + ); + state_machine + .execute_using_consensus_failure_handler(execution_manager, native_call) + } + } + .map_err(Into::into) + } - fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { - let mut overlay = OverlayedChanges::default(); - let changes_trie_state = backend::changes_tries_state_at_block( - id, - self.backend.changes_trie_storage(), - )?; - let state = self.backend.state_at(*id)?; - let mut cache = StorageTransactionCache::::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &state, - changes_trie_state, - None, - ); - let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); - self.executor.runtime_version(&mut ext, &state_runtime_code.runtime_code()?) - .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) - } + fn runtime_version(&self, id: &BlockId) -> sp_blockchain::Result { + let mut overlay = OverlayedChanges::default(); + let changes_trie_state = + backend::changes_tries_state_at_block(id, self.backend.changes_trie_storage())?; + let state = self.backend.state_at(*id)?; + let mut cache = StorageTransactionCache::::default(); + let mut ext = Ext::new(&mut overlay, &mut cache, &state, changes_trie_state, None); + let state_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&state); + self.executor + .runtime_version(&mut ext, &state_runtime_code.runtime_code()?) + .map_err(|e| sp_blockchain::Error::VersionInvalid(format!("{:?}", e)).into()) + } - fn prove_at_trie_state>>( - &self, - trie_state: &sp_state_machine::TrieBackend>, - overlay: &mut OverlayedChanges, - method: &str, - call_data: &[u8] - ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { - sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _>( - trie_state, - overlay, - &self.executor, - self.spawn_handle.clone(), - method, - call_data, - &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, - ) - .map_err(Into::into) - } + fn prove_at_trie_state>>( + &self, + trie_state: &sp_state_machine::TrieBackend>, + overlay: &mut OverlayedChanges, + method: &str, + call_data: &[u8], + ) -> Result<(Vec, StorageProof), sp_blockchain::Error> { + sp_state_machine::prove_execution_on_trie_backend::<_, _, NumberFor, _>( + trie_state, + overlay, + &self.executor, + self.spawn_handle.clone(), + method, + call_data, + &sp_state_machine::backend::BackendRuntimeCode::new(trie_state).runtime_code()?, + ) + .map_err(Into::into) + } - fn native_runtime_version(&self) -> Option<&NativeVersion> { - Some(self.executor.native_version()) - } + fn native_runtime_version(&self) -> Option<&NativeVersion> { + Some(self.executor.native_version()) + } } impl sp_version::GetRuntimeVersion for LocalCallExecutor - where - B: backend::Backend, - E: CodeExecutor + RuntimeInfo + Clone + 'static, - Block: BlockT, +where + B: backend::Backend, + E: CodeExecutor + RuntimeInfo + Clone + 'static, + Block: BlockT, { - fn native_version(&self) -> &sp_version::NativeVersion { - self.executor.native_version() - } + fn native_version(&self) -> &sp_version::NativeVersion { + self.executor.native_version() + } - fn runtime_version( - &self, - at: &BlockId, - ) -> Result { - CallExecutor::runtime_version(self, at).map_err(|e| format!("{:?}", e)) - } + fn runtime_version(&self, at: &BlockId) -> Result { + CallExecutor::runtime_version(self, at).map_err(|e| format!("{:?}", e)) + } } diff --git a/client/src/cht.rs b/client/src/cht.rs index de67280632..2ff7a5f70e 100644 --- a/client/src/cht.rs +++ b/client/src/cht.rs @@ -23,15 +23,15 @@ //! root has. A correct proof implies that the claimed block is identical to the one //! we discarded. -use hash_db; use codec::Encode; +use hash_db; use sp_trie; -use sp_core::{H256, convert_hash}; -use sp_runtime::traits::{Header as HeaderT, AtLeast32Bit, Zero, One}; +use sp_core::{convert_hash, H256}; +use sp_runtime::traits::{AtLeast32Bit, Header as HeaderT, One, Zero}; use sp_state_machine::{ - MemoryDB, TrieBackend, Backend as StateBackend, StorageProof, InMemoryBackend, - prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend + prove_read_on_trie_backend, read_proof_check, read_proof_check_on_proving_backend, + Backend as StateBackend, InMemoryBackend, MemoryDB, StorageProof, TrieBackend, }; use sp_blockchain::{Error as ClientError, Result as ClientResult}; @@ -42,247 +42,258 @@ const SIZE: u32 = 2048; /// Gets default CHT size. pub fn size>() -> N { - SIZE.into() + SIZE.into() } /// Returns Some(cht_number) if CHT is need to be built when the block with given number is canonized. pub fn is_build_required(cht_size: N, block_num: N) -> Option - where - N: Clone + AtLeast32Bit, +where + N: Clone + AtLeast32Bit, { - let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?; - let two = N::one() + N::one(); - if block_cht_num < two { - return None; - } - let cht_start = start_number(cht_size, block_cht_num.clone()); - if cht_start != block_num { - return None; - } - - Some(block_cht_num - two) + let block_cht_num = block_to_cht_number(cht_size.clone(), block_num.clone())?; + let two = N::one() + N::one(); + if block_cht_num < two { + return None; + } + let cht_start = start_number(cht_size, block_cht_num.clone()); + if cht_start != block_num { + return None; + } + + Some(block_cht_num - two) } /// Returns Some(max_cht_number) if CHT has ever been built given maximal canonical block number. pub fn max_cht_number(cht_size: N, max_canonical_block: N) -> Option - where - N: Clone + AtLeast32Bit, +where + N: Clone + AtLeast32Bit, { - let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?; - let two = N::one() + N::one(); - if max_cht_number < two { - return None; - } - Some(max_cht_number - two) + let max_cht_number = block_to_cht_number(cht_size, max_canonical_block)?; + let two = N::one() + N::one(); + if max_cht_number < two { + return None; + } + Some(max_cht_number - two) } /// Compute a CHT root from an iterator of block hashes. Fails if shorter than /// SIZE items. The items are assumed to proceed sequentially from `start_number(cht_num)`. /// Discards the trie's nodes. pub fn compute_root( - cht_size: Header::Number, - cht_num: Header::Number, - hashes: I, + cht_size: Header::Number, + cht_num: Header::Number, + hashes: I, ) -> ClientResult - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - I: IntoIterator>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord, + I: IntoIterator>>, { - use sp_trie::TrieConfiguration; - Ok(sp_trie::trie_types::Layout::::trie_root( - build_pairs::(cht_size, cht_num, hashes)? - )) + use sp_trie::TrieConfiguration; + Ok(sp_trie::trie_types::Layout::::trie_root( + build_pairs::(cht_size, cht_num, hashes)?, + )) } /// Build CHT-based header proof. pub fn build_proof( - cht_size: Header::Number, - cht_num: Header::Number, - blocks: BlocksI, - hashes: HashesI + cht_size: Header::Number, + cht_num: Header::Number, + blocks: BlocksI, + hashes: HashesI, ) -> ClientResult - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, - BlocksI: IntoIterator, - HashesI: IntoIterator>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, + BlocksI: IntoIterator, + HashesI: IntoIterator>>, { - let transaction = build_pairs::(cht_size, cht_num, hashes)? - .into_iter() - .map(|(k, v)| (k, Some(v))) - .collect::>(); - let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); - let trie_storage = storage.as_trie_backend() - .expect("InMemoryState::as_trie_backend always returns Some; qed"); - prove_read_on_trie_backend( - trie_storage, - blocks.into_iter().map(|number| encode_cht_key(number)), - ).map_err(ClientError::Execution) + let transaction = build_pairs::(cht_size, cht_num, hashes)? + .into_iter() + .map(|(k, v)| (k, Some(v))) + .collect::>(); + let mut storage = InMemoryBackend::::default().update(vec![(None, transaction)]); + let trie_storage = storage + .as_trie_backend() + .expect("InMemoryState::as_trie_backend always returns Some; qed"); + prove_read_on_trie_backend( + trie_storage, + blocks.into_iter().map(|number| encode_cht_key(number)), + ) + .map_err(ClientError::Execution) } /// Check CHT-based header proof. pub fn check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - remote_proof: StorageProof, + local_root: Header::Hash, + local_number: Header::Number, + remote_hash: Header::Hash, + remote_proof: StorageProof, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, { - do_check_proof::( - local_root, - local_number, - remote_hash, - move |local_root, local_cht_key| - read_proof_check::( - local_root, - remote_proof, - ::std::iter::once(local_cht_key), - ) - .map(|mut map| map - .remove(local_cht_key) - .expect("checked proof of local_cht_key; qed")) - .map_err(|e| ClientError::from(e)), - ) + do_check_proof::( + local_root, + local_number, + remote_hash, + move |local_root, local_cht_key| { + read_proof_check::( + local_root, + remote_proof, + ::std::iter::once(local_cht_key), + ) + .map(|mut map| { + map.remove(local_cht_key) + .expect("checked proof of local_cht_key; qed") + }) + .map_err(|e| ClientError::from(e)) + }, + ) } /// Check CHT-based header proof on pre-created proving backend. pub fn check_proof_on_proving_backend( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - proving_backend: &TrieBackend, Hasher>, + local_root: Header::Hash, + local_number: Header::Number, + remote_hash: Header::Hash, + proving_backend: &TrieBackend, Hasher>, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord + codec::Codec, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord + codec::Codec, { - do_check_proof::( - local_root, - local_number, - remote_hash, - |_, local_cht_key| - read_proof_check_on_proving_backend::( - proving_backend, - local_cht_key, - ).map_err(|e| ClientError::from(e)), - ) + do_check_proof::( + local_root, + local_number, + remote_hash, + |_, local_cht_key| { + read_proof_check_on_proving_backend::(proving_backend, local_cht_key) + .map_err(|e| ClientError::from(e)) + }, + ) } /// Check CHT-based header proof using passed checker function. fn do_check_proof( - local_root: Header::Hash, - local_number: Header::Number, - remote_hash: Header::Hash, - checker: F, + local_root: Header::Hash, + local_number: Header::Number, + remote_hash: Header::Hash, + checker: F, ) -> ClientResult<()> - where - Header: HeaderT, - Hasher: hash_db::Hasher, - Hasher::Out: Ord, - F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, +where + Header: HeaderT, + Hasher: hash_db::Hasher, + Hasher::Out: Ord, + F: FnOnce(Hasher::Out, &[u8]) -> ClientResult>>, { - let root: Hasher::Out = convert_hash(&local_root); - let local_cht_key = encode_cht_key(local_number); - let local_cht_value = checker(root, &local_cht_key)?; - let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?; - let local_hash = decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; - match &local_hash[..] == remote_hash.as_ref() { - true => Ok(()), - false => Err(ClientError::InvalidCHTProof.into()), - } - + let root: Hasher::Out = convert_hash(&local_root); + let local_cht_key = encode_cht_key(local_number); + let local_cht_value = checker(root, &local_cht_key)?; + let local_cht_value = local_cht_value.ok_or_else(|| ClientError::InvalidCHTProof)?; + let local_hash = + decode_cht_value(&local_cht_value).ok_or_else(|| ClientError::InvalidCHTProof)?; + match &local_hash[..] == remote_hash.as_ref() { + true => Ok(()), + false => Err(ClientError::InvalidCHTProof.into()), + } } /// Group ordered blocks by CHT number and call functor with blocks of each group. pub fn for_each_cht_group( - cht_size: Header::Number, - blocks: I, - mut functor: F, - mut functor_param: P, + cht_size: Header::Number, + blocks: I, + mut functor: F, + mut functor_param: P, ) -> ClientResult<()> - where - Header: HeaderT, - I: IntoIterator, - F: FnMut(P, Header::Number, Vec) -> ClientResult

, +where + Header: HeaderT, + I: IntoIterator, + F: FnMut(P, Header::Number, Vec) -> ClientResult

, { - let mut current_cht_num = None; - let mut current_cht_blocks = Vec::new(); - for block in blocks { - let new_cht_num = match block_to_cht_number(cht_size, block) { - Some(new_cht_num) => new_cht_num, - None => return Err(ClientError::Backend(format!( - "Cannot compute CHT root for the block #{}", block)).into() - ), - }; - - let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); - if advance_to_next_cht { - let current_cht_num = current_cht_num.expect("advance_to_next_cht is true; - it is true only when current_cht_num is Some; qed"); - assert!(new_cht_num > current_cht_num, "for_each_cht_group only supports ordered iterators"); - - functor_param = functor( - functor_param, - current_cht_num, - ::std::mem::replace(&mut current_cht_blocks, Vec::new()), - )?; - } - - current_cht_blocks.push(block); - current_cht_num = Some(new_cht_num); - } - - if let Some(current_cht_num) = current_cht_num { - functor( - functor_param, - current_cht_num, - ::std::mem::replace(&mut current_cht_blocks, Vec::new()), - )?; - } - - Ok(()) + let mut current_cht_num = None; + let mut current_cht_blocks = Vec::new(); + for block in blocks { + let new_cht_num = match block_to_cht_number(cht_size, block) { + Some(new_cht_num) => new_cht_num, + None => { + return Err(ClientError::Backend(format!( + "Cannot compute CHT root for the block #{}", + block + )) + .into()) + } + }; + + let advance_to_next_cht = current_cht_num.is_some() && current_cht_num != Some(new_cht_num); + if advance_to_next_cht { + let current_cht_num = current_cht_num.expect( + "advance_to_next_cht is true; + it is true only when current_cht_num is Some; qed", + ); + assert!( + new_cht_num > current_cht_num, + "for_each_cht_group only supports ordered iterators" + ); + + functor_param = functor( + functor_param, + current_cht_num, + ::std::mem::replace(&mut current_cht_blocks, Vec::new()), + )?; + } + + current_cht_blocks.push(block); + current_cht_num = Some(new_cht_num); + } + + if let Some(current_cht_num) = current_cht_num { + functor( + functor_param, + current_cht_num, + ::std::mem::replace(&mut current_cht_blocks, Vec::new()), + )?; + } + + Ok(()) } /// Build pairs for computing CHT. fn build_pairs( - cht_size: Header::Number, - cht_num: Header::Number, - hashes: I + cht_size: Header::Number, + cht_num: Header::Number, + hashes: I, ) -> ClientResult, Vec)>> - where - Header: HeaderT, - I: IntoIterator>>, +where + Header: HeaderT, + I: IntoIterator>>, { - let start_num = start_number(cht_size, cht_num); - let mut pairs = Vec::new(); - let mut hash_index = Header::Number::zero(); - for hash in hashes.into_iter() { - let hash = hash?.ok_or_else(|| ClientError::from( - ClientError::MissingHashRequiredForCHT - ))?; - pairs.push(( - encode_cht_key(start_num + hash_index).to_vec(), - encode_cht_value(hash) - )); - hash_index += Header::Number::one(); - if hash_index == cht_size { - break; - } - } - - if hash_index == cht_size { - Ok(pairs) - } else { - Err(ClientError::MissingHashRequiredForCHT) - } + let start_num = start_number(cht_size, cht_num); + let mut pairs = Vec::new(); + let mut hash_index = Header::Number::zero(); + for hash in hashes.into_iter() { + let hash = + hash?.ok_or_else(|| ClientError::from(ClientError::MissingHashRequiredForCHT))?; + pairs.push(( + encode_cht_key(start_num + hash_index).to_vec(), + encode_cht_value(hash), + )); + hash_index += Header::Number::one(); + if hash_index == cht_size { + break; + } + } + + if hash_index == cht_size { + Ok(pairs) + } else { + Err(ClientError::MissingHashRequiredForCHT) + } } /// Get the starting block of a given CHT. @@ -292,174 +303,188 @@ fn build_pairs( /// This is because the genesis hash is assumed to be known /// and including it would be redundant. pub fn start_number(cht_size: N, cht_num: N) -> N { - (cht_num * cht_size) + N::one() + (cht_num * cht_size) + N::one() } /// Get the ending block of a given CHT. pub fn end_number(cht_size: N, cht_num: N) -> N { - (cht_num + N::one()) * cht_size + (cht_num + N::one()) * cht_size } /// Convert a block number to a CHT number. /// Returns `None` for `block_num` == 0, `Some` otherwise. pub fn block_to_cht_number(cht_size: N, block_num: N) -> Option { - if block_num == N::zero() { - None - } else { - Some((block_num - N::one()) / cht_size) - } + if block_num == N::zero() { + None + } else { + Some((block_num - N::one()) / cht_size) + } } /// Convert header number into CHT key. pub fn encode_cht_key(number: N) -> Vec { - number.encode() + number.encode() } /// Convert header hash into CHT value. fn encode_cht_value>(hash: Hash) -> Vec { - hash.as_ref().to_vec() + hash.as_ref().to_vec() } /// Convert CHT value into block header hash. pub fn decode_cht_value(value: &[u8]) -> Option { - match value.len() { - 32 => Some(H256::from_slice(&value[0..32])), - _ => None, - } - + match value.len() { + 32 => Some(H256::from_slice(&value[0..32])), + _ => None, + } } #[cfg(test)] mod tests { - use substrate_test_runtime_client::runtime::Header; - use sp_runtime::traits::BlakeTwo256; - use super::*; - - #[test] - fn is_build_required_works() { - assert_eq!(is_build_required(SIZE, 0u32.into()), None); - assert_eq!(is_build_required(SIZE, 1u32.into()), None); - assert_eq!(is_build_required(SIZE, SIZE), None); - assert_eq!(is_build_required(SIZE, SIZE + 1), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE), None); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE), None); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1)); - assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None); - } - - #[test] - fn max_cht_number_works() { - assert_eq!(max_cht_number(SIZE, 0u32.into()), None); - assert_eq!(max_cht_number(SIZE, 1u32.into()), None); - assert_eq!(max_cht_number(SIZE, SIZE), None); - assert_eq!(max_cht_number(SIZE, SIZE + 1), None); - assert_eq!(max_cht_number(SIZE, 2 * SIZE), None); - assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0)); - assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1)); - assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1)); - } - - #[test] - fn start_number_works() { - assert_eq!(start_number(SIZE, 0u32), 1u32); - assert_eq!(start_number(SIZE, 1u32), SIZE + 1); - assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1); - } - - #[test] - fn end_number_works() { - assert_eq!(end_number(SIZE, 0u32), SIZE); - assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE); - assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE); - } - - #[test] - fn build_pairs_fails_when_no_enough_blocks() { - assert!(build_pairs::(SIZE as _, 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2)).is_err()); - } - - #[test] - fn build_pairs_fails_when_missing_block() { - assert!(build_pairs::( - SIZE as _, - 0, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize / 2) - .chain(::std::iter::once(Ok(None))) - .chain(::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) - .take(SIZE as usize / 2 - 1)) - ).is_err()); - } - - #[test] - fn compute_root_works() { - assert!(compute_root::( - SIZE as _, - 42, - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_ok()); - } - - #[test] - #[should_panic] - fn build_proof_panics_when_querying_wrong_block() { - assert!(build_proof::( - SIZE as _, - 0, - vec![(SIZE * 1000) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_err()); - } - - #[test] - fn build_proof_works() { - assert!(build_proof::( - SIZE as _, - 0, - vec![(SIZE / 2) as u64], - ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) - .take(SIZE as usize) - ).is_ok()); - } - - #[test] - #[should_panic] - fn for_each_cht_group_panics() { - let cht_size = SIZE as u64; - let _ = for_each_cht_group::( - cht_size, - vec![cht_size * 5, cht_size * 2], - |_, _, _| Ok(()), - (), - ); - } - - #[test] - fn for_each_cht_group_works() { - let cht_size = SIZE as u64; - let _ = for_each_cht_group::( - cht_size, - vec![ - cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5, - cht_size * 4 + 1, cht_size * 4 + 7, - cht_size * 6 + 1 - ], |_, cht_num, blocks| { - match cht_num { - 2 => assert_eq!(blocks, vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5]), - 4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]), - 6 => assert_eq!(blocks, vec![cht_size * 6 + 1]), - _ => unreachable!(), - } - - Ok(()) - }, () - ); - } + use super::*; + use sp_runtime::traits::BlakeTwo256; + use substrate_test_runtime_client::runtime::Header; + + #[test] + fn is_build_required_works() { + assert_eq!(is_build_required(SIZE, 0u32.into()), None); + assert_eq!(is_build_required(SIZE, 1u32.into()), None); + assert_eq!(is_build_required(SIZE, SIZE), None); + assert_eq!(is_build_required(SIZE, SIZE + 1), None); + assert_eq!(is_build_required(SIZE, 2 * SIZE), None); + assert_eq!(is_build_required(SIZE, 2 * SIZE + 1), Some(0)); + assert_eq!(is_build_required(SIZE, 2 * SIZE + 2), None); + assert_eq!(is_build_required(SIZE, 3 * SIZE), None); + assert_eq!(is_build_required(SIZE, 3 * SIZE + 1), Some(1)); + assert_eq!(is_build_required(SIZE, 3 * SIZE + 2), None); + } + + #[test] + fn max_cht_number_works() { + assert_eq!(max_cht_number(SIZE, 0u32.into()), None); + assert_eq!(max_cht_number(SIZE, 1u32.into()), None); + assert_eq!(max_cht_number(SIZE, SIZE), None); + assert_eq!(max_cht_number(SIZE, SIZE + 1), None); + assert_eq!(max_cht_number(SIZE, 2 * SIZE), None); + assert_eq!(max_cht_number(SIZE, 2 * SIZE + 1), Some(0)); + assert_eq!(max_cht_number(SIZE, 2 * SIZE + 2), Some(0)); + assert_eq!(max_cht_number(SIZE, 3 * SIZE), Some(0)); + assert_eq!(max_cht_number(SIZE, 3 * SIZE + 1), Some(1)); + assert_eq!(max_cht_number(SIZE, 3 * SIZE + 2), Some(1)); + } + + #[test] + fn start_number_works() { + assert_eq!(start_number(SIZE, 0u32), 1u32); + assert_eq!(start_number(SIZE, 1u32), SIZE + 1); + assert_eq!(start_number(SIZE, 2u32), SIZE + SIZE + 1); + } + + #[test] + fn end_number_works() { + assert_eq!(end_number(SIZE, 0u32), SIZE); + assert_eq!(end_number(SIZE, 1u32), SIZE + SIZE); + assert_eq!(end_number(SIZE, 2u32), SIZE + SIZE + SIZE); + } + + #[test] + fn build_pairs_fails_when_no_enough_blocks() { + assert!(build_pairs::( + SIZE as _, + 0, + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize / 2) + ) + .is_err()); + } + + #[test] + fn build_pairs_fails_when_missing_block() { + assert!(build_pairs::( + SIZE as _, + 0, + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))) + .take(SIZE as usize / 2) + .chain(::std::iter::once(Ok(None))) + .chain( + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(2)))) + .take(SIZE as usize / 2 - 1) + ) + ) + .is_err()); + } + + #[test] + fn compute_root_works() { + assert!(compute_root::( + SIZE as _, + 42, + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_ok()); + } + + #[test] + #[should_panic] + fn build_proof_panics_when_querying_wrong_block() { + assert!(build_proof::( + SIZE as _, + 0, + vec![(SIZE * 1000) as u64], + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_err()); + } + + #[test] + fn build_proof_works() { + assert!(build_proof::( + SIZE as _, + 0, + vec![(SIZE / 2) as u64], + ::std::iter::repeat_with(|| Ok(Some(H256::from_low_u64_be(1)))).take(SIZE as usize) + ) + .is_ok()); + } + + #[test] + #[should_panic] + fn for_each_cht_group_panics() { + let cht_size = SIZE as u64; + let _ = for_each_cht_group::( + cht_size, + vec![cht_size * 5, cht_size * 2], + |_, _, _| Ok(()), + (), + ); + } + + #[test] + fn for_each_cht_group_works() { + let cht_size = SIZE as u64; + let _ = for_each_cht_group::( + cht_size, + vec![ + cht_size * 2 + 1, + cht_size * 2 + 2, + cht_size * 2 + 5, + cht_size * 4 + 1, + cht_size * 4 + 7, + cht_size * 6 + 1, + ], + |_, cht_num, blocks| { + match cht_num { + 2 => assert_eq!( + blocks, + vec![cht_size * 2 + 1, cht_size * 2 + 2, cht_size * 2 + 5] + ), + 4 => assert_eq!(blocks, vec![cht_size * 4 + 1, cht_size * 4 + 7]), + 6 => assert_eq!(blocks, vec![cht_size * 6 + 1]), + _ => unreachable!(), + } + + Ok(()) + }, + (), + ); + } } diff --git a/client/src/client.rs b/client/src/client.rs index a71d6bf964..3b390d39b2 100644 --- a/client/src/client.rs +++ b/client/src/client.rs @@ -16,3544 +16,4404 @@ //! Substrate Client -use std::{ - marker::PhantomData, collections::{HashSet, BTreeMap, HashMap}, sync::Arc, panic::UnwindSafe, - result, -}; +use codec::{Decode, Encode}; +use hash_db::Prefix; use log::{info, trace, warn}; use parking_lot::{Mutex, RwLock}; -use codec::{Encode, Decode}; -use hash_db::Prefix; +use sc_executor::{RuntimeInfo, RuntimeVersion}; +use sc_telemetry::{telemetry, SUBSTRATE_INFO}; +use sp_blockchain::{ + self as blockchain, well_known_cache_keys::Id as CacheKeyId, Backend as ChainBackend, Cache, + CachedHeaderMetadata, HeaderBackend as ChainHeaderBackend, HeaderMetadata, ProvideCache, +}; +use sp_consensus::{ + BlockCheckParams, BlockImportParams, BlockOrigin, BlockStatus, Error as ConsensusError, + ForkChoiceStrategy, ImportResult, RecordProof, SelectChain, +}; use sp_core::{ - ChangesTrieConfiguration, convert_hash, traits::CodeExecutor, - NativeOrEncoded, storage::{StorageKey, StorageData, well_known_keys, ChildInfo}, + convert_hash, + storage::{well_known_keys, ChildInfo, StorageData, StorageKey}, + traits::CodeExecutor, + ChangesTrieConfiguration, NativeOrEncoded, }; -use sc_telemetry::{telemetry, SUBSTRATE_INFO}; use sp_runtime::{ - Justification, BuildStorage, - generic::{BlockId, SignedBlock, DigestItem}, - traits::{ - Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor, SaturatedConversion, One, - DigestFor, - }, + generic::{BlockId, DigestItem, SignedBlock}, + traits::{ + Block as BlockT, DigestFor, HashFor, Header as HeaderT, NumberFor, One, + SaturatedConversion, Zero, + }, + BuildStorage, Justification, }; use sp_state_machine::{ - DBValue, Backend as StateBackend, ChangesTrieAnchorBlockId, - prove_read, prove_child_read, ChangesTrieRootsStorage, ChangesTrieStorage, - ChangesTrieConfigurationRange, key_changes, key_changes_proof, -}; -use sc_executor::{RuntimeVersion, RuntimeInfo}; -use sp_consensus::{ - Error as ConsensusError, BlockStatus, BlockImportParams, BlockCheckParams, ImportResult, - BlockOrigin, ForkChoiceStrategy, SelectChain, RecordProof, -}; -use sp_blockchain::{self as blockchain, - Backend as ChainBackend, - HeaderBackend as ChainHeaderBackend, ProvideCache, Cache, - well_known_cache_keys::Id as CacheKeyId, - HeaderMetadata, CachedHeaderMetadata, + key_changes, key_changes_proof, prove_child_read, prove_read, Backend as StateBackend, + ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage, + ChangesTrieStorage, DBValue, }; use sp_trie::StorageProof; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + marker::PhantomData, + panic::UnwindSafe, + result, + sync::Arc, +}; +use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; use sp_api::{ - CallApiAt, ConstructRuntimeApi, Core as CoreApi, ApiExt, ApiRef, ProvideRuntimeApi, - CallApiAtParams, + ApiExt, ApiRef, CallApiAt, CallApiAtParams, ConstructRuntimeApi, Core as CoreApi, + ProvideRuntimeApi, }; -use sc_block_builder::{BlockBuilderApi, BlockBuilderProvider}; +use prometheus_endpoint::Registry; pub use sc_client_api::{ - backend::{ - self, BlockImportOperation, PrunableStateChangesTrieStorage, - ClientImportOperation, Finalizer, ImportSummary, NewBlockState, - changes_tries_state_at_block, StorageProvider, - LockImportRun, - }, - client::{ - ImportNotifications, FinalityNotification, FinalityNotifications, BlockImportNotification, - ClientInfo, BlockchainEvents, BlockBackend, ProvideUncles, BadBlocks, ForkBlocks, - BlockOf, - }, - execution_extensions::{ExecutionExtensions, ExecutionStrategies}, - notifications::{StorageNotifications, StorageEventStream}, - CallExecutor, ExecutorProvider, ProofProvider, CloneableSpawn, + backend::{ + self, changes_tries_state_at_block, BlockImportOperation, ClientImportOperation, Finalizer, + ImportSummary, LockImportRun, NewBlockState, PrunableStateChangesTrieStorage, + StorageProvider, + }, + client::{ + BadBlocks, BlockBackend, BlockImportNotification, BlockOf, BlockchainEvents, ClientInfo, + FinalityNotification, FinalityNotifications, ForkBlocks, ImportNotifications, + ProvideUncles, + }, + execution_extensions::{ExecutionExtensions, ExecutionStrategies}, + notifications::{StorageEventStream, StorageNotifications}, + CallExecutor, CloneableSpawn, ExecutorProvider, ProofProvider, }; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; use sp_blockchain::Error; -use prometheus_endpoint::Registry; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; +use crate::client::backend::KeyIterator; use crate::{ - call_executor::LocalCallExecutor, - light::{call_executor::prove_execution, fetcher::ChangesProof}, - in_mem, genesis, cht, block_rules::{BlockRules, LookupResult as BlockLookupResult}, + block_rules::{BlockRules, LookupResult as BlockLookupResult}, + call_executor::LocalCallExecutor, + cht, genesis, in_mem, + light::{call_executor::prove_execution, fetcher::ChangesProof}, }; -use crate::client::backend::KeyIterator; /// Substrate Client -pub struct Client where Block: BlockT { - backend: Arc, - executor: E, - storage_notifications: Mutex>, - import_notification_sinks: Mutex>>>, - finality_notification_sinks: Mutex>>>, - // holds the block hash currently being imported. TODO: replace this with block queue - importing_block: RwLock>, - block_rules: BlockRules, - execution_extensions: ExecutionExtensions, - _phantom: PhantomData, +pub struct Client +where + Block: BlockT, +{ + backend: Arc, + executor: E, + storage_notifications: Mutex>, + import_notification_sinks: Mutex>>>, + finality_notification_sinks: Mutex>>>, + // holds the block hash currently being imported. TODO: replace this with block queue + importing_block: RwLock>, + block_rules: BlockRules, + execution_extensions: ExecutionExtensions, + _phantom: PhantomData, } // used in importing a block, where additional changes are made after the runtime // executed. enum PrePostHeader { - // they are the same: no post-runtime digest items. - Same(H), - // different headers (pre, post). - Different(H, H), + // they are the same: no post-runtime digest items. + Same(H), + // different headers (pre, post). + Different(H, H), } impl PrePostHeader { - // get a reference to the "post-header" -- the header as it should be after all changes are applied. - fn post(&self) -> &H { - match *self { - PrePostHeader::Same(ref h) => h, - PrePostHeader::Different(_, ref h) => h, - } - } - - // convert to the "post-header" -- the header as it should be after all changes are applied. - fn into_post(self) -> H { - match self { - PrePostHeader::Same(h) => h, - PrePostHeader::Different(_, h) => h, - } - } + // get a reference to the "post-header" -- the header as it should be after all changes are applied. + fn post(&self) -> &H { + match *self { + PrePostHeader::Same(ref h) => h, + PrePostHeader::Different(_, ref h) => h, + } + } + + // convert to the "post-header" -- the header as it should be after all changes are applied. + fn into_post(self) -> H { + match self { + PrePostHeader::Same(h) => h, + PrePostHeader::Different(_, h) => h, + } + } } /// Create an instance of in-memory client. pub fn new_in_mem( - executor: E, - genesis_storage: &S, - keystore: Option, - prometheus_registry: Option, - spawn_handle: Box, -) -> sp_blockchain::Result, - LocalCallExecutor, E>, - Block, - RA ->> where - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, - Block: BlockT, + executor: E, + genesis_storage: &S, + keystore: Option, + prometheus_registry: Option, + spawn_handle: Box, +) -> sp_blockchain::Result< + Client, LocalCallExecutor, E>, Block, RA>, +> +where + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, + Block: BlockT, { - new_with_backend(Arc::new(in_mem::Backend::new()), executor, genesis_storage, keystore, spawn_handle, prometheus_registry) + new_with_backend( + Arc::new(in_mem::Backend::new()), + executor, + genesis_storage, + keystore, + spawn_handle, + prometheus_registry, + ) } /// Create a client with the explicitly provided backend. /// This is useful for testing backend implementations. pub fn new_with_backend( - backend: Arc, - executor: E, - build_genesis_storage: &S, - keystore: Option, - spawn_handle: Box, - prometheus_registry: Option, + backend: Arc, + executor: E, + build_genesis_storage: &S, + keystore: Option, + spawn_handle: Box, + prometheus_registry: Option, ) -> sp_blockchain::Result, Block, RA>> - where - E: CodeExecutor + RuntimeInfo, - S: BuildStorage, - Block: BlockT, - B: backend::LocalBackend + 'static, +where + E: CodeExecutor + RuntimeInfo, + S: BuildStorage, + Block: BlockT, + B: backend::LocalBackend + 'static, { - let call_executor = LocalCallExecutor::new(backend.clone(), executor, spawn_handle); - let extensions = ExecutionExtensions::new(Default::default(), keystore); - Client::new( - backend, - call_executor, - build_genesis_storage, - Default::default(), - Default::default(), - extensions, - prometheus_registry, - ) + let call_executor = LocalCallExecutor::new(backend.clone(), executor, spawn_handle); + let extensions = ExecutionExtensions::new(Default::default(), keystore); + Client::new( + backend, + call_executor, + build_genesis_storage, + Default::default(), + Default::default(), + extensions, + prometheus_registry, + ) } -impl BlockOf for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl BlockOf for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - type Type = Block; + type Type = Block; } impl LockImportRun for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From, - { - let inner = || { - let _import_lock = self.backend.get_import_lock().write(); - - let mut op = ClientImportOperation { - op: self.backend.begin_operation()?, - notify_imported: None, - notify_finalized: Vec::new(), - }; - - let r = f(&mut op)?; - - let ClientImportOperation { op, notify_imported, notify_finalized } = op; - self.backend.commit_operation(op)?; - - self.notify_finalized(notify_finalized)?; - self.notify_imported(notify_imported)?; - - Ok(r) - }; - - let result = inner(); - *self.importing_block.write() = None; - - result - } + fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, + { + let inner = || { + let _import_lock = self.backend.get_import_lock().write(); + + let mut op = ClientImportOperation { + op: self.backend.begin_operation()?, + notify_imported: None, + notify_finalized: Vec::new(), + }; + + let r = f(&mut op)?; + + let ClientImportOperation { + op, + notify_imported, + notify_finalized, + } = op; + self.backend.commit_operation(op)?; + + self.notify_finalized(notify_finalized)?; + self.notify_imported(notify_imported)?; + + Ok(r) + }; + + let result = inner(); + *self.importing_block.write() = None; + + result + } } impl LockImportRun for &Client - where - Block: BlockT, - B: backend::Backend, - E: CallExecutor, +where + Block: BlockT, + B: backend::Backend, + E: CallExecutor, { - fn lock_import_and_run(&self, f: F) -> Result - where - F: FnOnce(&mut ClientImportOperation) -> Result, - Err: From, - { - (**self).lock_import_and_run(f) - } + fn lock_import_and_run(&self, f: F) -> Result + where + F: FnOnce(&mut ClientImportOperation) -> Result, + Err: From, + { + (**self).lock_import_and_run(f) + } } -impl Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - /// Creates new Substrate Client with given blockchain and code executor. - pub fn new( - backend: Arc, - executor: E, - build_genesis_storage: &dyn BuildStorage, - fork_blocks: ForkBlocks, - bad_blocks: BadBlocks, - execution_extensions: ExecutionExtensions, - _prometheus_registry: Option, - ) -> sp_blockchain::Result { - if backend.blockchain().header(BlockId::Number(Zero::zero()))?.is_none() { - let genesis_storage = build_genesis_storage.build_storage()?; - let mut op = backend.begin_operation()?; - backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; - let state_root = op.reset_storage(genesis_storage)?; - let genesis_block = genesis::construct_genesis_block::(state_root.into()); - info!("🔨 Initializing Genesis block/state (state: {}, header-hash: {})", - genesis_block.header().state_root(), - genesis_block.header().hash() - ); - op.set_block_data( - genesis_block.deconstruct().0, - Some(vec![]), - None, - NewBlockState::Final - )?; - backend.commit_operation(op)?; - } - - Ok(Client { - backend, - executor, - storage_notifications: Default::default(), - import_notification_sinks: Default::default(), - finality_notification_sinks: Default::default(), - importing_block: Default::default(), - block_rules: BlockRules::new(fork_blocks, bad_blocks), - execution_extensions, - _phantom: Default::default(), - }) - } - - /// Get a reference to the state at a given block. - pub fn state_at(&self, block: &BlockId) -> sp_blockchain::Result { - self.backend.state_at(*block) - } - - /// Get the code at a given block. - pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { - Ok(StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))? - .expect("None is returned if there's no value stored for the given key;\ - ':code' key is always defined; qed").0) - } - - /// Get the RuntimeVersion at a given block. - pub fn runtime_version_at(&self, id: &BlockId) -> sp_blockchain::Result { - self.executor.runtime_version(id) - } - - /// Get block hash by number. - pub fn block_hash(&self, - block_number: <::Header as HeaderT>::Number - ) -> sp_blockchain::Result> { - self.backend.blockchain().hash(block_number) - } - - /// Reads given header and generates CHT-based header proof for CHT of given size. - pub fn header_proof_with_cht_size( - &self, - id: &BlockId, - cht_size: NumberFor, - ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - let proof_error = || sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)); - let header = self.backend.blockchain().expect_header(*id)?; - let block_num = *header.number(); - let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; - let cht_start = cht::start_number(cht_size, cht_num); - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let headers = cht_range.map(|num| self.block_hash(num)); - let proof = cht::build_proof::, _, _>( - cht_size, - cht_num, - std::iter::once(block_num), - headers, - )?; - Ok((header, proof)) - } - - /// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size. - pub fn key_changes_proof_with_cht_size( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&StorageKey>, - key: &StorageKey, - cht_size: NumberFor, - ) -> sp_blockchain::Result> { - struct AccessedRootsRecorder<'a, Block: BlockT> { - storage: &'a dyn ChangesTrieStorage, NumberFor>, - min: NumberFor, - required_roots_proofs: Mutex, Block::Hash>>, - }; - - impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> for - AccessedRootsRecorder<'a, Block> - { - fn build_anchor(&self, hash: Block::Hash) - -> Result>, String> - { - self.storage.build_anchor(hash) - } - - fn root( - &self, - anchor: &ChangesTrieAnchorBlockId>, - block: NumberFor, - ) -> Result, String> { - let root = self.storage.root(anchor, block)?; - if block < self.min { - if let Some(ref root) = root { - self.required_roots_proofs.lock().insert( - block, - root.clone() - ); - } - } - Ok(root) - } - } - - impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> for - AccessedRootsRecorder<'a, Block> - { - fn as_roots_storage(&self) - -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> - { - self - } - - fn with_cached_changed_keys( - &self, - root: &Block::Hash, - functor: &mut dyn FnMut(&HashMap>, HashSet>>), - ) -> bool { - self.storage.with_cached_changed_keys(root, functor) - } - - fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { - self.storage.get(key, prefix) - } - } - - let first_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(first))?; - let (storage, configs) = self.require_changes_trie(first_number, last, true)?; - let min_number = self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(min))?; - - let recording_storage = AccessedRootsRecorder:: { - storage: storage.storage(), - min: min_number, - required_roots_proofs: Mutex::new(BTreeMap::new()), - }; - - let max_number = std::cmp::min( - self.backend.blockchain().info().best_number, - self.backend.blockchain().expect_block_number_from_id(&BlockId::Hash(max))?, - ); - - // fetch key changes proof - let mut proof = Vec::new(); - for (config_zero, config_end, config) in configs { - let last_number = self.backend.blockchain() - .expect_block_number_from_id(&BlockId::Hash(last))?; - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: config_zero, - end: config_end.map(|(config_end_number, _)| config_end_number), - }; - let proof_range = key_changes_proof::, _>( - config_range, - &recording_storage, - first_number, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&last), - number: last_number, - }, - max_number, - storage_key.as_ref().map(|x| &x.0[..]), - &key.0, - ) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; - proof.extend(proof_range); - } - - // now gather proofs for all changes tries roots that were touched during key_changes_proof - // execution AND are unknown (i.e. replaced with CHT) to the requester - let roots = recording_storage.required_roots_proofs.into_inner(); - let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?; - - Ok(ChangesProof { - max_block: max_number, - proof, - roots: roots.into_iter().map(|(n, h)| (n, convert_hash(&h))).collect(), - roots_proof, - }) - } - - /// Generate CHT-based proof for roots of changes tries at given blocks. - fn changes_trie_roots_proof>>( - &self, - cht_size: NumberFor, - blocks: I - ) -> sp_blockchain::Result { - // most probably we have touched several changes tries that are parts of the single CHT - // => GroupBy changes tries by CHT number and then gather proof for the whole group at once - let mut proofs = Vec::new(); - - cht::for_each_cht_group::(cht_size, blocks, |_, cht_num, cht_blocks| { - let cht_proof = self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; - proofs.push(cht_proof); - Ok(()) - }, ())?; - - Ok(StorageProof::merge(proofs)) - } - - /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). - fn changes_trie_roots_proof_at_cht( - &self, - cht_size: NumberFor, - cht_num: NumberFor, - blocks: Vec> - ) -> sp_blockchain::Result { - let cht_start = cht::start_number(cht_size, cht_num); - let mut current_num = cht_start; - let cht_range = ::std::iter::from_fn(|| { - let old_current_num = current_num; - current_num = current_num + One::one(); - Some(old_current_num) - }); - let roots = cht_range - .map(|num| self.header(&BlockId::Number(num)) - .map(|block| - block.and_then(|block| block.digest().log(DigestItem::as_changes_trie_root).cloned())) - ); - let proof = cht::build_proof::, _, _>( - cht_size, - cht_num, - blocks, - roots, - )?; - Ok(proof) - } - - /// Returns changes trie storage and all configurations that have been active in the range [first; last]. - /// - /// Configurations are returned in descending order (and obviously never overlap). - /// If fail_if_disabled is false, returns maximal consequent configurations ranges, starting from last and - /// stopping on either first, or when CT have been disabled. - /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled - /// inside first..last blocks range. - fn require_changes_trie( - &self, - first: NumberFor, - last: Block::Hash, - fail_if_disabled: bool, - ) -> sp_blockchain::Result<( - &dyn PrunableStateChangesTrieStorage, - Vec<(NumberFor, Option<(NumberFor, Block::Hash)>, ChangesTrieConfiguration)>, - )> { - let storage = match self.backend.changes_trie_storage() { - Some(storage) => storage, - None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), - }; - - let mut configs = Vec::with_capacity(1); - let mut current = last; - loop { - let config_range = storage.configuration_at(&BlockId::Hash(current))?; - match config_range.config { - Some(config) => configs.push((config_range.zero.0, config_range.end, config)), - None if !fail_if_disabled => return Ok((storage, configs)), - None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), - } - - if config_range.zero.0 < first { - break; - } - - current = *self.backend.blockchain().expect_header(BlockId::Hash(config_range.zero.1))?.parent_hash(); - } - - Ok((storage, configs)) - } - - /// Apply a checked and validated block to an operation. If a justification is provided - /// then `finalized` *must* be true. - fn apply_block( - &self, - operation: &mut ClientImportOperation, - import_block: BlockImportParams>, - new_cache: HashMap>, - ) -> sp_blockchain::Result where - Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, - { - let BlockImportParams { - origin, - header, - justification, - post_digests, - body, - storage_changes, - finalized, - auxiliary, - fork_choice, - intermediates, - import_existing, - .. - } = import_block; - - assert!(justification.is_some() && finalized || justification.is_none()); - - if !intermediates.is_empty() { - return Err(Error::IncompletePipeline) - } - - let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; - - let import_headers = if post_digests.is_empty() { - PrePostHeader::Same(header) - } else { - let mut post_header = header.clone(); - for item in post_digests { - post_header.digest_mut().push(item); - } - PrePostHeader::Different(header, post_header) - }; - - let hash = import_headers.post().hash(); - let height = (*import_headers.post().number()).saturated_into::(); - - *self.importing_block.write() = Some(hash); - - let result = self.execute_and_import_block( - operation, - origin, - hash, - import_headers, - justification, - body, - storage_changes, - new_cache, - finalized, - auxiliary, - fork_choice, - import_existing, - ); - - if let Ok(ImportResult::Imported(ref aux)) = result { - if aux.is_new_best { - use rand::Rng; - - // don't send telemetry block import events during initial sync for every - // block to avoid spamming the telemetry server, these events will be randomly - // sent at a rate of 1/10. - if origin != BlockOrigin::NetworkInitialSync || - rand::thread_rng().gen_bool(0.1) - { - telemetry!(SUBSTRATE_INFO; "block.import"; - "height" => height, - "best" => ?hash, - "origin" => ?origin - ); - } - } - } - - result - } - - fn execute_and_import_block( - &self, - operation: &mut ClientImportOperation, - origin: BlockOrigin, - hash: Block::Hash, - import_headers: PrePostHeader, - justification: Option, - body: Option>, - storage_changes: Option, Block>>, - new_cache: HashMap>, - finalized: bool, - aux: Vec<(Vec, Option>)>, - fork_choice: ForkChoiceStrategy, - import_existing: bool, - ) -> sp_blockchain::Result where - Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, - { - let parent_hash = import_headers.post().parent_hash().clone(); - let status = self.backend.blockchain().status(BlockId::Hash(hash))?; - match (import_existing, status) { - (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), - (false, blockchain::BlockStatus::Unknown) => {}, - (true, blockchain::BlockStatus::InChain) => {}, - (true, blockchain::BlockStatus::Unknown) => - return Err(Error::UnknownBlock(format!("{:?}", hash))), - } - - let info = self.backend.blockchain().info(); - - // the block is lower than our last finalized block so it must revert - // finality, refusing import. - if *import_headers.post().number() <= info.finalized_number { - return Err(sp_blockchain::Error::NotInFinalizedChain); - } - - // this is a fairly arbitrary choice of where to draw the line on making notifications, - // but the general goal is to only make notifications when we are already fully synced - // and get a new chain head. - let make_notifications = match origin { - BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => true, - BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, - }; - - let storage_changes = match storage_changes { - Some(storage_changes) => { - self.backend.begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; - - // ensure parent block is finalized to maintain invariant that - // finality is called sequentially. - if finalized { - self.apply_finality_with_block_hash( - operation, - parent_hash, - None, - info.best_hash, - make_notifications, - )?; - } - - operation.op.update_cache(new_cache); - - let (main_sc, child_sc, tx, _, changes_trie_tx) = storage_changes.into_inner(); - - operation.op.update_db_storage(tx)?; - operation.op.update_storage(main_sc.clone(), child_sc.clone())?; - - if let Some(changes_trie_transaction) = changes_trie_tx { - operation.op.update_changes_trie(changes_trie_transaction)?; - } - - Some((main_sc, child_sc)) - }, - None => None, - }; - - let is_new_best = finalized || match fork_choice { - ForkChoiceStrategy::LongestChain => import_headers.post().number() > &info.best_number, - ForkChoiceStrategy::Custom(v) => v, - }; - - let leaf_state = if finalized { - NewBlockState::Final - } else if is_new_best { - NewBlockState::Best - } else { - NewBlockState::Normal - }; - - let retracted = if is_new_best { - let route_from_best = sp_blockchain::tree_route( - self.backend.blockchain(), - info.best_hash, - parent_hash, - )?; - route_from_best.retracted().iter().rev().map(|e| e.hash.clone()).collect() - } else { - Vec::default() - }; - - trace!( - "Imported {}, (#{}), best={}, origin={:?}", - hash, - import_headers.post().number(), - is_new_best, - origin, - ); - - operation.op.set_block_data( - import_headers.post().clone(), - body, - justification, - leaf_state, - )?; - - operation.op.insert_aux(aux)?; - - if make_notifications { - if finalized { - operation.notify_finalized.push(hash); - } - - operation.notify_imported = Some(ImportSummary { - hash, - origin, - header: import_headers.into_post(), - is_new_best, - storage_changes, - retracted, - }) - } - - Ok(ImportResult::imported(is_new_best)) - } - - /// Prepares the storage changes for a block. - /// - /// It checks if the state should be enacted and if the `import_block` maybe already provides - /// the required storage changes. If the state should be enacted and the storage changes are not - /// provided, the block is re-executed to get the storage changes. - fn prepare_block_storage_changes( - &self, - import_block: &mut BlockImportParams>, - ) -> sp_blockchain::Result> - where - Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, - { - let parent_hash = import_block.header.parent_hash(); - let at = BlockId::Hash(*parent_hash); - let enact_state = match self.block_status(&at)? { - BlockStatus::Unknown => return Ok(Some(ImportResult::UnknownParent)), - BlockStatus::InChainWithState | BlockStatus::Queued => true, - BlockStatus::InChainPruned if import_block.allow_missing_state => false, - BlockStatus::InChainPruned => return Ok(Some(ImportResult::MissingState)), - BlockStatus::KnownBad => return Ok(Some(ImportResult::KnownBad)), - }; - - match (enact_state, &mut import_block.storage_changes, &mut import_block.body) { - // We have storage changes and should enact the state, so we don't need to do anything - // here - (true, Some(_), _) => {}, - // We should enact state, but don't have any storage changes, so we need to execute the - // block. - (true, ref mut storage_changes @ None, Some(ref body)) => { - let runtime_api = self.runtime_api(); - - runtime_api.execute_block( - &at, - Block::new(import_block.header.clone(), body.clone()), - )?; - - let state = self.backend.state_at(at)?; - let changes_trie_state = changes_tries_state_at_block( - &at, - self.backend.changes_trie_storage(), - )?; - - let gen_storage_changes = runtime_api.into_storage_changes( - &state, - changes_trie_state.as_ref(), - *parent_hash, - )?; - - if import_block.header.state_root() - != &gen_storage_changes.transaction_storage_root - { - return Err(Error::InvalidStateRoot) - } else { - **storage_changes = Some(gen_storage_changes); - } - }, - // No block body, no storage changes - (true, None, None) => {}, - // We should not enact the state, so we set the storage changes to `None`. - (false, changes, _) => { - changes.take(); - } - }; - - Ok(None) - } - - fn apply_finality_with_block_hash( - &self, - operation: &mut ClientImportOperation, - block: Block::Hash, - justification: Option, - best_block: Block::Hash, - notify: bool, - ) -> sp_blockchain::Result<()> { - // find tree route from last finalized to given block. - let last_finalized = self.backend.blockchain().last_finalized()?; - - if block == last_finalized { - warn!("Possible safety violation: attempted to re-finalize last finalized block {:?} ", last_finalized); - return Ok(()); - } - - let route_from_finalized = sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?; - - if let Some(retracted) = route_from_finalized.retracted().get(0) { - warn!("Safety violation: attempted to revert finalized block {:?} which is not in the \ - same chain as last finalized {:?}", retracted, last_finalized); - - return Err(sp_blockchain::Error::NotInFinalizedChain); - } - - let route_from_best = sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; - - // if the block is not a direct ancestor of the current best chain, - // then some other block is the common ancestor. - if route_from_best.common_block().hash != block { - // NOTE: we're setting the finalized block as best block, this might - // be slightly inaccurate since we might have a "better" block - // further along this chain, but since best chain selection logic is - // plugable we cannot make a better choice here. usages that need - // an accurate "best" block need to go through `SelectChain` - // instead. - operation.op.mark_head(BlockId::Hash(block))?; - } - - let enacted = route_from_finalized.enacted(); - assert!(enacted.len() > 0); - for finalize_new in &enacted[..enacted.len() - 1] { - operation.op.mark_finalized(BlockId::Hash(finalize_new.hash), None)?; - } - - assert_eq!(enacted.last().map(|e| e.hash), Some(block)); - operation.op.mark_finalized(BlockId::Hash(block), justification)?; - - if notify { - // sometimes when syncing, tons of blocks can be finalized at once. - // we'll send notifications spuriously in that case. - const MAX_TO_NOTIFY: usize = 256; - let enacted = route_from_finalized.enacted(); - let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY); - for finalized in &enacted[start..] { - operation.notify_finalized.push(finalized.hash); - } - } - - Ok(()) - } - - fn notify_finalized( - &self, - notify_finalized: Vec, - ) -> sp_blockchain::Result<()> { - let mut sinks = self.finality_notification_sinks.lock(); - - if notify_finalized.is_empty() { - // cleanup any closed finality notification sinks - // since we won't be running the loop below which - // would also remove any closed sinks. - sinks.retain(|sink| !sink.is_closed()); - - return Ok(()); - } - - for finalized_hash in notify_finalized { - let header = self.header(&BlockId::Hash(finalized_hash))? + /// Creates new Substrate Client with given blockchain and code executor. + pub fn new( + backend: Arc, + executor: E, + build_genesis_storage: &dyn BuildStorage, + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, + execution_extensions: ExecutionExtensions, + _prometheus_registry: Option, + ) -> sp_blockchain::Result { + if backend + .blockchain() + .header(BlockId::Number(Zero::zero()))? + .is_none() + { + let genesis_storage = build_genesis_storage.build_storage()?; + let mut op = backend.begin_operation()?; + backend.begin_state_operation(&mut op, BlockId::Hash(Default::default()))?; + let state_root = op.reset_storage(genesis_storage)?; + let genesis_block = genesis::construct_genesis_block::(state_root.into()); + info!( + "🔨 Initializing Genesis block/state (state: {}, header-hash: {})", + genesis_block.header().state_root(), + genesis_block.header().hash() + ); + op.set_block_data( + genesis_block.deconstruct().0, + Some(vec![]), + None, + NewBlockState::Final, + )?; + backend.commit_operation(op)?; + } + + Ok(Client { + backend, + executor, + storage_notifications: Default::default(), + import_notification_sinks: Default::default(), + finality_notification_sinks: Default::default(), + importing_block: Default::default(), + block_rules: BlockRules::new(fork_blocks, bad_blocks), + execution_extensions, + _phantom: Default::default(), + }) + } + + /// Get a reference to the state at a given block. + pub fn state_at(&self, block: &BlockId) -> sp_blockchain::Result { + self.backend.state_at(*block) + } + + /// Get the code at a given block. + pub fn code_at(&self, id: &BlockId) -> sp_blockchain::Result> { + Ok( + StorageProvider::storage(self, id, &StorageKey(well_known_keys::CODE.to_vec()))? + .expect( + "None is returned if there's no value stored for the given key;\ + ':code' key is always defined; qed", + ) + .0, + ) + } + + /// Get the RuntimeVersion at a given block. + pub fn runtime_version_at(&self, id: &BlockId) -> sp_blockchain::Result { + self.executor.runtime_version(id) + } + + /// Get block hash by number. + pub fn block_hash( + &self, + block_number: <::Header as HeaderT>::Number, + ) -> sp_blockchain::Result> { + self.backend.blockchain().hash(block_number) + } + + /// Reads given header and generates CHT-based header proof for CHT of given size. + pub fn header_proof_with_cht_size( + &self, + id: &BlockId, + cht_size: NumberFor, + ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { + let proof_error = || { + sp_blockchain::Error::Backend(format!("Failed to generate header proof for {:?}", id)) + }; + let header = self.backend.blockchain().expect_header(*id)?; + let block_num = *header.number(); + let cht_num = cht::block_to_cht_number(cht_size, block_num).ok_or_else(proof_error)?; + let cht_start = cht::start_number(cht_size, cht_num); + let mut current_num = cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + let headers = cht_range.map(|num| self.block_hash(num)); + let proof = cht::build_proof::, _, _>( + cht_size, + cht_num, + std::iter::once(block_num), + headers, + )?; + Ok((header, proof)) + } + + /// Does the same work as `key_changes_proof`, but assumes that CHTs are of passed size. + pub fn key_changes_proof_with_cht_size( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + storage_key: Option<&StorageKey>, + key: &StorageKey, + cht_size: NumberFor, + ) -> sp_blockchain::Result> { + struct AccessedRootsRecorder<'a, Block: BlockT> { + storage: &'a dyn ChangesTrieStorage, NumberFor>, + min: NumberFor, + required_roots_proofs: Mutex, Block::Hash>>, + }; + + impl<'a, Block: BlockT> ChangesTrieRootsStorage, NumberFor> + for AccessedRootsRecorder<'a, Block> + { + fn build_anchor( + &self, + hash: Block::Hash, + ) -> Result>, String> + { + self.storage.build_anchor(hash) + } + + fn root( + &self, + anchor: &ChangesTrieAnchorBlockId>, + block: NumberFor, + ) -> Result, String> { + let root = self.storage.root(anchor, block)?; + if block < self.min { + if let Some(ref root) = root { + self.required_roots_proofs + .lock() + .insert(block, root.clone()); + } + } + Ok(root) + } + } + + impl<'a, Block: BlockT> ChangesTrieStorage, NumberFor> + for AccessedRootsRecorder<'a, Block> + { + fn as_roots_storage( + &self, + ) -> &dyn sp_state_machine::ChangesTrieRootsStorage, NumberFor> + { + self + } + + fn with_cached_changed_keys( + &self, + root: &Block::Hash, + functor: &mut dyn FnMut(&HashMap>, HashSet>>), + ) -> bool { + self.storage.with_cached_changed_keys(root, functor) + } + + fn get(&self, key: &Block::Hash, prefix: Prefix) -> Result, String> { + self.storage.get(key, prefix) + } + } + + let first_number = self + .backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(first))?; + let (storage, configs) = self.require_changes_trie(first_number, last, true)?; + let min_number = self + .backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(min))?; + + let recording_storage = AccessedRootsRecorder:: { + storage: storage.storage(), + min: min_number, + required_roots_proofs: Mutex::new(BTreeMap::new()), + }; + + let max_number = std::cmp::min( + self.backend.blockchain().info().best_number, + self.backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(max))?, + ); + + // fetch key changes proof + let mut proof = Vec::new(); + for (config_zero, config_end, config) in configs { + let last_number = self + .backend + .blockchain() + .expect_block_number_from_id(&BlockId::Hash(last))?; + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero, + end: config_end.map(|(config_end_number, _)| config_end_number), + }; + let proof_range = key_changes_proof::, _>( + config_range, + &recording_storage, + first_number, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&last), + number: last_number, + }, + max_number, + storage_key.as_ref().map(|x| &x.0[..]), + &key.0, + ) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + proof.extend(proof_range); + } + + // now gather proofs for all changes tries roots that were touched during key_changes_proof + // execution AND are unknown (i.e. replaced with CHT) to the requester + let roots = recording_storage.required_roots_proofs.into_inner(); + let roots_proof = self.changes_trie_roots_proof(cht_size, roots.keys().cloned())?; + + Ok(ChangesProof { + max_block: max_number, + proof, + roots: roots + .into_iter() + .map(|(n, h)| (n, convert_hash(&h))) + .collect(), + roots_proof, + }) + } + + /// Generate CHT-based proof for roots of changes tries at given blocks. + fn changes_trie_roots_proof>>( + &self, + cht_size: NumberFor, + blocks: I, + ) -> sp_blockchain::Result { + // most probably we have touched several changes tries that are parts of the single CHT + // => GroupBy changes tries by CHT number and then gather proof for the whole group at once + let mut proofs = Vec::new(); + + cht::for_each_cht_group::( + cht_size, + blocks, + |_, cht_num, cht_blocks| { + let cht_proof = + self.changes_trie_roots_proof_at_cht(cht_size, cht_num, cht_blocks)?; + proofs.push(cht_proof); + Ok(()) + }, + (), + )?; + + Ok(StorageProof::merge(proofs)) + } + + /// Generates CHT-based proof for roots of changes tries at given blocks (that are part of single CHT). + fn changes_trie_roots_proof_at_cht( + &self, + cht_size: NumberFor, + cht_num: NumberFor, + blocks: Vec>, + ) -> sp_blockchain::Result { + let cht_start = cht::start_number(cht_size, cht_num); + let mut current_num = cht_start; + let cht_range = ::std::iter::from_fn(|| { + let old_current_num = current_num; + current_num = current_num + One::one(); + Some(old_current_num) + }); + let roots = cht_range.map(|num| { + self.header(&BlockId::Number(num)).map(|block| { + block.and_then(|block| { + block + .digest() + .log(DigestItem::as_changes_trie_root) + .cloned() + }) + }) + }); + let proof = cht::build_proof::, _, _>( + cht_size, cht_num, blocks, roots, + )?; + Ok(proof) + } + + /// Returns changes trie storage and all configurations that have been active in the range [first; last]. + /// + /// Configurations are returned in descending order (and obviously never overlap). + /// If fail_if_disabled is false, returns maximal consequent configurations ranges, starting from last and + /// stopping on either first, or when CT have been disabled. + /// If fail_if_disabled is true, fails when there's a subrange where CT have been disabled + /// inside first..last blocks range. + fn require_changes_trie( + &self, + first: NumberFor, + last: Block::Hash, + fail_if_disabled: bool, + ) -> sp_blockchain::Result<( + &dyn PrunableStateChangesTrieStorage, + Vec<( + NumberFor, + Option<(NumberFor, Block::Hash)>, + ChangesTrieConfiguration, + )>, + )> { + let storage = match self.backend.changes_trie_storage() { + Some(storage) => storage, + None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), + }; + + let mut configs = Vec::with_capacity(1); + let mut current = last; + loop { + let config_range = storage.configuration_at(&BlockId::Hash(current))?; + match config_range.config { + Some(config) => configs.push((config_range.zero.0, config_range.end, config)), + None if !fail_if_disabled => return Ok((storage, configs)), + None => return Err(sp_blockchain::Error::ChangesTriesNotSupported), + } + + if config_range.zero.0 < first { + break; + } + + current = *self + .backend + .blockchain() + .expect_header(BlockId::Hash(config_range.zero.1))? + .parent_hash(); + } + + Ok((storage, configs)) + } + + /// Apply a checked and validated block to an operation. If a justification is provided + /// then `finalized` *must* be true. + fn apply_block( + &self, + operation: &mut ClientImportOperation, + import_block: BlockImportParams>, + new_cache: HashMap>, + ) -> sp_blockchain::Result + where + Self: ProvideRuntimeApi, + >::Api: + CoreApi + ApiExt, + { + let BlockImportParams { + origin, + header, + justification, + post_digests, + body, + storage_changes, + finalized, + auxiliary, + fork_choice, + intermediates, + import_existing, + .. + } = import_block; + + assert!(justification.is_some() && finalized || justification.is_none()); + + if !intermediates.is_empty() { + return Err(Error::IncompletePipeline); + } + + let fork_choice = fork_choice.ok_or(Error::IncompletePipeline)?; + + let import_headers = if post_digests.is_empty() { + PrePostHeader::Same(header) + } else { + let mut post_header = header.clone(); + for item in post_digests { + post_header.digest_mut().push(item); + } + PrePostHeader::Different(header, post_header) + }; + + let hash = import_headers.post().hash(); + let height = (*import_headers.post().number()).saturated_into::(); + + *self.importing_block.write() = Some(hash); + + let result = self.execute_and_import_block( + operation, + origin, + hash, + import_headers, + justification, + body, + storage_changes, + new_cache, + finalized, + auxiliary, + fork_choice, + import_existing, + ); + + if let Ok(ImportResult::Imported(ref aux)) = result { + if aux.is_new_best { + use rand::Rng; + + // don't send telemetry block import events during initial sync for every + // block to avoid spamming the telemetry server, these events will be randomly + // sent at a rate of 1/10. + if origin != BlockOrigin::NetworkInitialSync || rand::thread_rng().gen_bool(0.1) { + telemetry!(SUBSTRATE_INFO; "block.import"; + "height" => height, + "best" => ?hash, + "origin" => ?origin + ); + } + } + } + + result + } + + fn execute_and_import_block( + &self, + operation: &mut ClientImportOperation, + origin: BlockOrigin, + hash: Block::Hash, + import_headers: PrePostHeader, + justification: Option, + body: Option>, + storage_changes: Option, Block>>, + new_cache: HashMap>, + finalized: bool, + aux: Vec<(Vec, Option>)>, + fork_choice: ForkChoiceStrategy, + import_existing: bool, + ) -> sp_blockchain::Result + where + Self: ProvideRuntimeApi, + >::Api: + CoreApi + ApiExt, + { + let parent_hash = import_headers.post().parent_hash().clone(); + let status = self.backend.blockchain().status(BlockId::Hash(hash))?; + match (import_existing, status) { + (false, blockchain::BlockStatus::InChain) => return Ok(ImportResult::AlreadyInChain), + (false, blockchain::BlockStatus::Unknown) => {} + (true, blockchain::BlockStatus::InChain) => {} + (true, blockchain::BlockStatus::Unknown) => { + return Err(Error::UnknownBlock(format!("{:?}", hash))) + } + } + + let info = self.backend.blockchain().info(); + + // the block is lower than our last finalized block so it must revert + // finality, refusing import. + if *import_headers.post().number() <= info.finalized_number { + return Err(sp_blockchain::Error::NotInFinalizedChain); + } + + // this is a fairly arbitrary choice of where to draw the line on making notifications, + // but the general goal is to only make notifications when we are already fully synced + // and get a new chain head. + let make_notifications = match origin { + BlockOrigin::NetworkBroadcast | BlockOrigin::Own | BlockOrigin::ConsensusBroadcast => { + true + } + BlockOrigin::Genesis | BlockOrigin::NetworkInitialSync | BlockOrigin::File => false, + }; + + let storage_changes = match storage_changes { + Some(storage_changes) => { + self.backend + .begin_state_operation(&mut operation.op, BlockId::Hash(parent_hash))?; + + // ensure parent block is finalized to maintain invariant that + // finality is called sequentially. + if finalized { + self.apply_finality_with_block_hash( + operation, + parent_hash, + None, + info.best_hash, + make_notifications, + )?; + } + + operation.op.update_cache(new_cache); + + let (main_sc, child_sc, tx, _, changes_trie_tx) = storage_changes.into_inner(); + + operation.op.update_db_storage(tx)?; + operation + .op + .update_storage(main_sc.clone(), child_sc.clone())?; + + if let Some(changes_trie_transaction) = changes_trie_tx { + operation.op.update_changes_trie(changes_trie_transaction)?; + } + + Some((main_sc, child_sc)) + } + None => None, + }; + + let is_new_best = finalized + || match fork_choice { + ForkChoiceStrategy::LongestChain => { + import_headers.post().number() > &info.best_number + } + ForkChoiceStrategy::Custom(v) => v, + }; + + let leaf_state = if finalized { + NewBlockState::Final + } else if is_new_best { + NewBlockState::Best + } else { + NewBlockState::Normal + }; + + let retracted = if is_new_best { + let route_from_best = + sp_blockchain::tree_route(self.backend.blockchain(), info.best_hash, parent_hash)?; + route_from_best + .retracted() + .iter() + .rev() + .map(|e| e.hash.clone()) + .collect() + } else { + Vec::default() + }; + + trace!( + "Imported {}, (#{}), best={}, origin={:?}", + hash, + import_headers.post().number(), + is_new_best, + origin, + ); + + operation.op.set_block_data( + import_headers.post().clone(), + body, + justification, + leaf_state, + )?; + + operation.op.insert_aux(aux)?; + + if make_notifications { + if finalized { + operation.notify_finalized.push(hash); + } + + operation.notify_imported = Some(ImportSummary { + hash, + origin, + header: import_headers.into_post(), + is_new_best, + storage_changes, + retracted, + }) + } + + Ok(ImportResult::imported(is_new_best)) + } + + /// Prepares the storage changes for a block. + /// + /// It checks if the state should be enacted and if the `import_block` maybe already provides + /// the required storage changes. If the state should be enacted and the storage changes are not + /// provided, the block is re-executed to get the storage changes. + fn prepare_block_storage_changes( + &self, + import_block: &mut BlockImportParams>, + ) -> sp_blockchain::Result> + where + Self: ProvideRuntimeApi, + >::Api: + CoreApi + ApiExt, + { + let parent_hash = import_block.header.parent_hash(); + let at = BlockId::Hash(*parent_hash); + let enact_state = match self.block_status(&at)? { + BlockStatus::Unknown => return Ok(Some(ImportResult::UnknownParent)), + BlockStatus::InChainWithState | BlockStatus::Queued => true, + BlockStatus::InChainPruned if import_block.allow_missing_state => false, + BlockStatus::InChainPruned => return Ok(Some(ImportResult::MissingState)), + BlockStatus::KnownBad => return Ok(Some(ImportResult::KnownBad)), + }; + + match ( + enact_state, + &mut import_block.storage_changes, + &mut import_block.body, + ) { + // We have storage changes and should enact the state, so we don't need to do anything + // here + (true, Some(_), _) => {} + // We should enact state, but don't have any storage changes, so we need to execute the + // block. + (true, ref mut storage_changes @ None, Some(ref body)) => { + let runtime_api = self.runtime_api(); + + runtime_api + .execute_block(&at, Block::new(import_block.header.clone(), body.clone()))?; + + let state = self.backend.state_at(at)?; + let changes_trie_state = + changes_tries_state_at_block(&at, self.backend.changes_trie_storage())?; + + let gen_storage_changes = runtime_api.into_storage_changes( + &state, + changes_trie_state.as_ref(), + *parent_hash, + )?; + + if import_block.header.state_root() != &gen_storage_changes.transaction_storage_root + { + return Err(Error::InvalidStateRoot); + } else { + **storage_changes = Some(gen_storage_changes); + } + } + // No block body, no storage changes + (true, None, None) => {} + // We should not enact the state, so we set the storage changes to `None`. + (false, changes, _) => { + changes.take(); + } + }; + + Ok(None) + } + + fn apply_finality_with_block_hash( + &self, + operation: &mut ClientImportOperation, + block: Block::Hash, + justification: Option, + best_block: Block::Hash, + notify: bool, + ) -> sp_blockchain::Result<()> { + // find tree route from last finalized to given block. + let last_finalized = self.backend.blockchain().last_finalized()?; + + if block == last_finalized { + warn!( + "Possible safety violation: attempted to re-finalize last finalized block {:?} ", + last_finalized + ); + return Ok(()); + } + + let route_from_finalized = + sp_blockchain::tree_route(self.backend.blockchain(), last_finalized, block)?; + + if let Some(retracted) = route_from_finalized.retracted().get(0) { + warn!( + "Safety violation: attempted to revert finalized block {:?} which is not in the \ + same chain as last finalized {:?}", + retracted, last_finalized + ); + + return Err(sp_blockchain::Error::NotInFinalizedChain); + } + + let route_from_best = + sp_blockchain::tree_route(self.backend.blockchain(), best_block, block)?; + + // if the block is not a direct ancestor of the current best chain, + // then some other block is the common ancestor. + if route_from_best.common_block().hash != block { + // NOTE: we're setting the finalized block as best block, this might + // be slightly inaccurate since we might have a "better" block + // further along this chain, but since best chain selection logic is + // plugable we cannot make a better choice here. usages that need + // an accurate "best" block need to go through `SelectChain` + // instead. + operation.op.mark_head(BlockId::Hash(block))?; + } + + let enacted = route_from_finalized.enacted(); + assert!(enacted.len() > 0); + for finalize_new in &enacted[..enacted.len() - 1] { + operation + .op + .mark_finalized(BlockId::Hash(finalize_new.hash), None)?; + } + + assert_eq!(enacted.last().map(|e| e.hash), Some(block)); + operation + .op + .mark_finalized(BlockId::Hash(block), justification)?; + + if notify { + // sometimes when syncing, tons of blocks can be finalized at once. + // we'll send notifications spuriously in that case. + const MAX_TO_NOTIFY: usize = 256; + let enacted = route_from_finalized.enacted(); + let start = enacted.len() - ::std::cmp::min(enacted.len(), MAX_TO_NOTIFY); + for finalized in &enacted[start..] { + operation.notify_finalized.push(finalized.hash); + } + } + + Ok(()) + } + + fn notify_finalized(&self, notify_finalized: Vec) -> sp_blockchain::Result<()> { + let mut sinks = self.finality_notification_sinks.lock(); + + if notify_finalized.is_empty() { + // cleanup any closed finality notification sinks + // since we won't be running the loop below which + // would also remove any closed sinks. + sinks.retain(|sink| !sink.is_closed()); + + return Ok(()); + } + + for finalized_hash in notify_finalized { + let header = self.header(&BlockId::Hash(finalized_hash))? .expect("header already known to exist in DB because it is indicated in the tree route; qed"); - telemetry!(SUBSTRATE_INFO; "notify.finalized"; - "height" => format!("{}", header.number()), - "best" => ?finalized_hash, - ); - - let notification = FinalityNotification { - header, - hash: finalized_hash, - }; - - sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); - } - - Ok(()) - } - - fn notify_imported( - &self, - notify_import: Option>, - ) -> sp_blockchain::Result<()> { - let notify_import = match notify_import { - Some(notify_import) => notify_import, - None => { - // cleanup any closed import notification sinks since we won't - // be sending any notifications below which would remove any - // closed sinks. this is necessary since during initial sync we - // won't send any import notifications which could lead to a - // temporary leak of closed/discarded notification sinks (e.g. - // from consensus code). - self.import_notification_sinks - .lock() - .retain(|sink| !sink.is_closed()); - - return Ok(()); - } - }; - - if let Some(storage_changes) = notify_import.storage_changes { - // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? - self.storage_notifications.lock() - .trigger( - ¬ify_import.hash, - storage_changes.0.into_iter(), - storage_changes.1.into_iter().map(|(sk, v)| (sk, v.into_iter())), - ); - } - - let notification = BlockImportNotification:: { - hash: notify_import.hash, - origin: notify_import.origin, - header: notify_import.header, - is_new_best: notify_import.is_new_best, - retracted: notify_import.retracted, - }; - - self.import_notification_sinks.lock() - .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); - - Ok(()) - } - - /// Attempts to revert the chain by `n` blocks guaranteeing that no block is - /// reverted past the last finalized block. Returns the number of blocks - /// that were successfully reverted. - pub fn revert(&self, n: NumberFor) -> sp_blockchain::Result> { - Ok(self.backend.revert(n, false)?) - } - - /// Attempts to revert the chain by `n` blocks disregarding finality. This - /// method will revert any finalized blocks as requested and can potentially - /// leave the node in an inconsistent state. Other modules in the system that - /// persist data and that rely on finality (e.g. consensus parts) will be - /// unaffected by the revert. Use this method with caution and making sure - /// that no other data needs to be reverted for consistency aside from the - /// block data. - /// - /// Returns the number of blocks that were successfully reverted. - pub fn unsafe_revert(&self, n: NumberFor) -> sp_blockchain::Result> { - Ok(self.backend.revert(n, true)?) - } - - /// Get usage info about current client. - pub fn usage_info(&self) -> ClientInfo { - ClientInfo { - chain: self.chain_info(), - usage: self.backend.usage_info(), - } - } - - /// Get blockchain info. - pub fn chain_info(&self) -> blockchain::Info { - self.backend.blockchain().info() - } - - /// Get block status. - pub fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { - // this can probably be implemented more efficiently - if let BlockId::Hash(ref h) = id { - if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); - } - } - let hash_and_number = match id.clone() { - BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), - BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), - }; - match hash_and_number { - Some((hash, number)) => { - if self.backend.have_state_at(&hash, number) { - Ok(BlockStatus::InChainWithState) - } else { - Ok(BlockStatus::InChainPruned) - } - } - None => Ok(BlockStatus::Unknown), - } - } - - /// Get block header by id. - pub fn header(&self, id: &BlockId) -> sp_blockchain::Result::Header>> { - self.backend.blockchain().header(*id) - } - - /// Get block body by id. - pub fn body(&self, id: &BlockId) -> sp_blockchain::Result::Extrinsic>>> { - self.backend.blockchain().body(*id) - } - - /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. - pub fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { - let load_header = |id: Block::Hash| -> sp_blockchain::Result { - match self.backend.blockchain().header(BlockId::Hash(id))? { - Some(hdr) => Ok(hdr), - None => Err(Error::UnknownBlock(format!("{:?}", id))), - } - }; - - let genesis_hash = self.backend.blockchain().info().genesis_hash; - if genesis_hash == target_hash { return Ok(Vec::new()); } - - let mut current_hash = target_hash; - let mut current = load_header(current_hash)?; - let mut ancestor_hash = *current.parent_hash(); - let mut ancestor = load_header(ancestor_hash)?; - let mut uncles = Vec::new(); - - for _generation in 0..max_generation.saturated_into() { - let children = self.backend.blockchain().children(ancestor_hash)?; - uncles.extend(children.into_iter().filter(|h| h != ¤t_hash)); - current_hash = ancestor_hash; - if genesis_hash == current_hash { break; } - current = ancestor; - ancestor_hash = *current.parent_hash(); - ancestor = load_header(ancestor_hash)?; - } - trace!("Collected {} uncles", uncles.len()); - Ok(uncles) - } - - /// Prepare in-memory header that is used in execution environment. - fn prepare_environment_block(&self, parent: &BlockId) -> sp_blockchain::Result { - let parent_header = self.backend.blockchain().expect_header(*parent)?; - Ok(<::Header as HeaderT>::new( - self.backend.blockchain().expect_block_number_from_id(parent)? + One::one(), - Default::default(), - Default::default(), - parent_header.hash(), - Default::default(), - )) - } + telemetry!(SUBSTRATE_INFO; "notify.finalized"; + "height" => format!("{}", header.number()), + "best" => ?finalized_hash, + ); + + let notification = FinalityNotification { + header, + hash: finalized_hash, + }; + + sinks.retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); + } + + Ok(()) + } + + fn notify_imported( + &self, + notify_import: Option>, + ) -> sp_blockchain::Result<()> { + let notify_import = match notify_import { + Some(notify_import) => notify_import, + None => { + // cleanup any closed import notification sinks since we won't + // be sending any notifications below which would remove any + // closed sinks. this is necessary since during initial sync we + // won't send any import notifications which could lead to a + // temporary leak of closed/discarded notification sinks (e.g. + // from consensus code). + self.import_notification_sinks + .lock() + .retain(|sink| !sink.is_closed()); + + return Ok(()); + } + }; + + if let Some(storage_changes) = notify_import.storage_changes { + // TODO [ToDr] How to handle re-orgs? Should we re-emit all storage changes? + self.storage_notifications.lock().trigger( + ¬ify_import.hash, + storage_changes.0.into_iter(), + storage_changes + .1 + .into_iter() + .map(|(sk, v)| (sk, v.into_iter())), + ); + } + + let notification = BlockImportNotification:: { + hash: notify_import.hash, + origin: notify_import.origin, + header: notify_import.header, + is_new_best: notify_import.is_new_best, + retracted: notify_import.retracted, + }; + + self.import_notification_sinks + .lock() + .retain(|sink| sink.unbounded_send(notification.clone()).is_ok()); + + Ok(()) + } + + /// Attempts to revert the chain by `n` blocks guaranteeing that no block is + /// reverted past the last finalized block. Returns the number of blocks + /// that were successfully reverted. + pub fn revert(&self, n: NumberFor) -> sp_blockchain::Result> { + Ok(self.backend.revert(n, false)?) + } + + /// Attempts to revert the chain by `n` blocks disregarding finality. This + /// method will revert any finalized blocks as requested and can potentially + /// leave the node in an inconsistent state. Other modules in the system that + /// persist data and that rely on finality (e.g. consensus parts) will be + /// unaffected by the revert. Use this method with caution and making sure + /// that no other data needs to be reverted for consistency aside from the + /// block data. + /// + /// Returns the number of blocks that were successfully reverted. + pub fn unsafe_revert(&self, n: NumberFor) -> sp_blockchain::Result> { + Ok(self.backend.revert(n, true)?) + } + + /// Get usage info about current client. + pub fn usage_info(&self) -> ClientInfo { + ClientInfo { + chain: self.chain_info(), + usage: self.backend.usage_info(), + } + } + + /// Get blockchain info. + pub fn chain_info(&self) -> blockchain::Info { + self.backend.blockchain().info() + } + + /// Get block status. + pub fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { + // this can probably be implemented more efficiently + if let BlockId::Hash(ref h) = id { + if self + .importing_block + .read() + .as_ref() + .map_or(false, |importing| h == importing) + { + return Ok(BlockStatus::Queued); + } + } + let hash_and_number = match id.clone() { + BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), + BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), + }; + match hash_and_number { + Some((hash, number)) => { + if self.backend.have_state_at(&hash, number) { + Ok(BlockStatus::InChainWithState) + } else { + Ok(BlockStatus::InChainPruned) + } + } + None => Ok(BlockStatus::Unknown), + } + } + + /// Get block header by id. + pub fn header( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Header>> { + self.backend.blockchain().header(*id) + } + + /// Get block body by id. + pub fn body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Extrinsic>>> { + self.backend.blockchain().body(*id) + } + + /// Gets the uncles of the block with `target_hash` going back `max_generation` ancestors. + pub fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result> { + let load_header = |id: Block::Hash| -> sp_blockchain::Result { + match self.backend.blockchain().header(BlockId::Hash(id))? { + Some(hdr) => Ok(hdr), + None => Err(Error::UnknownBlock(format!("{:?}", id))), + } + }; + + let genesis_hash = self.backend.blockchain().info().genesis_hash; + if genesis_hash == target_hash { + return Ok(Vec::new()); + } + + let mut current_hash = target_hash; + let mut current = load_header(current_hash)?; + let mut ancestor_hash = *current.parent_hash(); + let mut ancestor = load_header(ancestor_hash)?; + let mut uncles = Vec::new(); + + for _generation in 0..max_generation.saturated_into() { + let children = self.backend.blockchain().children(ancestor_hash)?; + uncles.extend(children.into_iter().filter(|h| h != ¤t_hash)); + current_hash = ancestor_hash; + if genesis_hash == current_hash { + break; + } + current = ancestor; + ancestor_hash = *current.parent_hash(); + ancestor = load_header(ancestor_hash)?; + } + trace!("Collected {} uncles", uncles.len()); + Ok(uncles) + } + + /// Prepare in-memory header that is used in execution environment. + fn prepare_environment_block( + &self, + parent: &BlockId, + ) -> sp_blockchain::Result { + let parent_header = self.backend.blockchain().expect_header(*parent)?; + Ok(<::Header as HeaderT>::new( + self.backend + .blockchain() + .expect_block_number_from_id(parent)? + + One::one(), + Default::default(), + Default::default(), + parent_header.hash(), + Default::default(), + )) + } } -impl ProofProvider for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl ProofProvider for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn read_proof( - &self, - id: &BlockId, - keys: &mut dyn Iterator, - ) -> sp_blockchain::Result { - self.state_at(id) - .and_then(|state| prove_read(state, keys) - .map_err(Into::into)) - } - - fn read_child_proof( - &self, - id: &BlockId, - storage_key: &[u8], - child_info: ChildInfo, - keys: &mut dyn Iterator, - ) -> sp_blockchain::Result { - self.state_at(id) - .and_then(|state| prove_child_read(state, storage_key, child_info, keys) - .map_err(Into::into)) - } - - fn execution_proof( - &self, - id: &BlockId, - method: &str, - call_data: &[u8] - ) -> sp_blockchain::Result<(Vec, StorageProof)> { - // Make sure we include the `:code` and `:heap_pages` in the execution proof to be - // backwards compatible. - // - // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 - let code_proof = self.read_proof( - id, - &mut [well_known_keys::CODE, well_known_keys::HEAP_PAGES].iter().map(|v| *v), - )?; - - let state = self.state_at(id)?; - let header = self.prepare_environment_block(id)?; - prove_execution( - state, - header, - &self.executor, - method, - call_data, - ).map(|(r, p)| { - (r, StorageProof::merge(vec![p, code_proof])) - }) - } - - fn header_proof(&self, id: &BlockId) -> sp_blockchain::Result<(Block::Header, StorageProof)> { - self.header_proof_with_cht_size(id, cht::size()) - } - - fn key_changes_proof( - &self, - first: Block::Hash, - last: Block::Hash, - min: Block::Hash, - max: Block::Hash, - storage_key: Option<&StorageKey>, - key: &StorageKey, - ) -> sp_blockchain::Result> { - self.key_changes_proof_with_cht_size( - first, - last, - min, - max, - storage_key, - key, - cht::size(), - ) - } + fn read_proof( + &self, + id: &BlockId, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result { + self.state_at(id) + .and_then(|state| prove_read(state, keys).map_err(Into::into)) + } + + fn read_child_proof( + &self, + id: &BlockId, + storage_key: &[u8], + child_info: ChildInfo, + keys: &mut dyn Iterator, + ) -> sp_blockchain::Result { + self.state_at(id).and_then(|state| { + prove_child_read(state, storage_key, child_info, keys).map_err(Into::into) + }) + } + + fn execution_proof( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + ) -> sp_blockchain::Result<(Vec, StorageProof)> { + // Make sure we include the `:code` and `:heap_pages` in the execution proof to be + // backwards compatible. + // + // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 + let code_proof = self.read_proof( + id, + &mut [well_known_keys::CODE, well_known_keys::HEAP_PAGES] + .iter() + .map(|v| *v), + )?; + + let state = self.state_at(id)?; + let header = self.prepare_environment_block(id)?; + prove_execution(state, header, &self.executor, method, call_data) + .map(|(r, p)| (r, StorageProof::merge(vec![p, code_proof]))) + } + + fn header_proof( + &self, + id: &BlockId, + ) -> sp_blockchain::Result<(Block::Header, StorageProof)> { + self.header_proof_with_cht_size(id, cht::size()) + } + + fn key_changes_proof( + &self, + first: Block::Hash, + last: Block::Hash, + min: Block::Hash, + max: Block::Hash, + storage_key: Option<&StorageKey>, + key: &StorageKey, + ) -> sp_blockchain::Result> { + self.key_changes_proof_with_cht_size(first, last, min, max, storage_key, key, cht::size()) + } } - impl BlockBuilderProvider for Client - where - B: backend::Backend + Send + Sync + 'static, - E: CallExecutor + Send + Sync + 'static, - Block: BlockT, - Self: ChainHeaderBackend + ProvideRuntimeApi, - >::Api: ApiExt> - + BlockBuilderApi, +where + B: backend::Backend + Send + Sync + 'static, + E: CallExecutor + Send + Sync + 'static, + Block: BlockT, + Self: ChainHeaderBackend + ProvideRuntimeApi, + >::Api: ApiExt> + + BlockBuilderApi, { - fn new_block_at>( - &self, - parent: &BlockId, - inherent_digests: DigestFor, - record_proof: R, - ) -> sp_blockchain::Result> { - sc_block_builder::BlockBuilder::new( - self, - self.expect_block_hash_from_id(parent)?, - self.expect_block_number_from_id(parent)?, - record_proof.into(), - inherent_digests, - &self.backend - ) - } - - fn new_block( - &self, - inherent_digests: DigestFor, - ) -> sp_blockchain::Result> { - let info = self.chain_info(); - sc_block_builder::BlockBuilder::new( - self, - info.best_hash, - info.best_number, - RecordProof::No, - inherent_digests, - &self.backend, - ) - } + fn new_block_at>( + &self, + parent: &BlockId, + inherent_digests: DigestFor, + record_proof: R, + ) -> sp_blockchain::Result> { + sc_block_builder::BlockBuilder::new( + self, + self.expect_block_hash_from_id(parent)?, + self.expect_block_number_from_id(parent)?, + record_proof.into(), + inherent_digests, + &self.backend, + ) + } + + fn new_block( + &self, + inherent_digests: DigestFor, + ) -> sp_blockchain::Result> { + let info = self.chain_info(); + sc_block_builder::BlockBuilder::new( + self, + info.best_hash, + info.best_number, + RecordProof::No, + inherent_digests, + &self.backend, + ) + } } -impl ExecutorProvider for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl ExecutorProvider for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - type Executor = E; + type Executor = E; - fn executor(&self) -> &Self::Executor { - &self.executor - } + fn executor(&self) -> &Self::Executor { + &self.executor + } - fn execution_extensions(&self) -> &ExecutionExtensions { - &self.execution_extensions - } + fn execution_extensions(&self) -> &ExecutionExtensions { + &self.execution_extensions + } } -impl StorageProvider for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl StorageProvider for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn storage_keys(&self, id: &BlockId, key_prefix: &StorageKey) -> sp_blockchain::Result> { - let keys = self.state_at(id)?.keys(&key_prefix.0).into_iter().map(StorageKey).collect(); - Ok(keys) - } - - fn storage_pairs(&self, id: &BlockId, key_prefix: &StorageKey) - -> sp_blockchain::Result> - { - let state = self.state_at(id)?; - let keys = state - .keys(&key_prefix.0) - .into_iter() - .map(|k| { - let d = state.storage(&k).ok().flatten().unwrap_or_default(); - (StorageKey(k), StorageData(d)) - }) - .collect(); - Ok(keys) - } - - - fn storage_keys_iter<'a>( - &self, - id: &BlockId, - prefix: Option<&'a StorageKey>, - start_key: Option<&StorageKey> - ) -> sp_blockchain::Result> { - let state = self.state_at(id)?; - let start_key = start_key - .or(prefix) - .map(|key| key.0.clone()) - .unwrap_or_else(Vec::new); - Ok(KeyIterator::new(state, prefix, start_key)) - } - - - fn storage(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result> - { - Ok(self.state_at(id)? - .storage(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .map(StorageData) - ) - } - - - fn storage_hash(&self, id: &BlockId, key: &StorageKey) -> sp_blockchain::Result> - { - Ok(self.state_at(id)? - .storage_hash(&key.0).map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - ) - } - - - fn child_storage_keys( - &self, - id: &BlockId, - child_storage_key: &StorageKey, - child_info: ChildInfo, - key_prefix: &StorageKey - ) -> sp_blockchain::Result> { - let keys = self.state_at(id)? - .child_keys(&child_storage_key.0, child_info, &key_prefix.0) - .into_iter() - .map(StorageKey) - .collect(); - Ok(keys) - } - - - fn child_storage( - &self, - id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, - key: &StorageKey - ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? - .child_storage(&storage_key.0, child_info, &key.0) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - .map(StorageData)) - } - - - fn child_storage_hash( - &self, - id: &BlockId, - storage_key: &StorageKey, - child_info: ChildInfo, - key: &StorageKey - ) -> sp_blockchain::Result> { - Ok(self.state_at(id)? - .child_storage_hash(&storage_key.0, child_info, &key.0) - .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? - ) - } - - fn max_key_changes_range( - &self, - first: NumberFor, - last: BlockId, - ) -> sp_blockchain::Result, BlockId)>> { - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - if first > last_number { - return Err(sp_blockchain::Error::ChangesTrieAccessFailed("Invalid changes trie range".into())); - } - - let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { - Some((storage, configs)) => (storage, configs), - None => return Ok(None), - }; - - let first_available_changes_trie = configs.last().map(|config| config.0); - match first_available_changes_trie { - Some(first_available_changes_trie) => { - let oldest_unpruned = storage.oldest_pruned_digest_range_end(); - let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); - Ok(Some((first, last))) - }, - None => Ok(None) - } - } - - fn key_changes( - &self, - first: NumberFor, - last: BlockId, - storage_key: Option<&StorageKey>, - key: &StorageKey - ) -> sp_blockchain::Result, u32)>> { - let last_number = self.backend.blockchain().expect_block_number_from_id(&last)?; - let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; - let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; - - let mut result = Vec::new(); - let best_number = self.backend.blockchain().info().best_number; - for (config_zero, config_end, config) in configs { - let range_first = ::std::cmp::max(first, config_zero + One::one()); - let range_anchor = match config_end { - Some((config_end_number, config_end_hash)) => if last_number > config_end_number { - ChangesTrieAnchorBlockId { hash: config_end_hash, number: config_end_number } - } else { - ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number } - }, - None => ChangesTrieAnchorBlockId { hash: convert_hash(&last_hash), number: last_number }, - }; - - let config_range = ChangesTrieConfigurationRange { - config: &config, - zero: config_zero.clone(), - end: config_end.map(|(config_end_number, _)| config_end_number), - }; - let result_range: Vec<(NumberFor, u32)> = key_changes::, _>( - config_range, - storage.storage(), - range_first, - &range_anchor, - best_number, - storage_key.as_ref().map(|x| &x.0[..]), - &key.0) - .and_then(|r| r.map(|r| r.map(|(block, tx)| (block, tx))).collect::>()) - .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; - result.extend(result_range); - } - - Ok(result) - } + fn storage_keys( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result> { + let keys = self + .state_at(id)? + .keys(&key_prefix.0) + .into_iter() + .map(StorageKey) + .collect(); + Ok(keys) + } + + fn storage_pairs( + &self, + id: &BlockId, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result> { + let state = self.state_at(id)?; + let keys = state + .keys(&key_prefix.0) + .into_iter() + .map(|k| { + let d = state.storage(&k).ok().flatten().unwrap_or_default(); + (StorageKey(k), StorageData(d)) + }) + .collect(); + Ok(keys) + } + + fn storage_keys_iter<'a>( + &self, + id: &BlockId, + prefix: Option<&'a StorageKey>, + start_key: Option<&StorageKey>, + ) -> sp_blockchain::Result> { + let state = self.state_at(id)?; + let start_key = start_key + .or(prefix) + .map(|key| key.0.clone()) + .unwrap_or_else(Vec::new); + Ok(KeyIterator::new(state, prefix, start_key)) + } + + fn storage( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result> { + Ok(self + .state_at(id)? + .storage(&key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .map(StorageData)) + } + + fn storage_hash( + &self, + id: &BlockId, + key: &StorageKey, + ) -> sp_blockchain::Result> { + Ok(self + .state_at(id)? + .storage_hash(&key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?) + } + + fn child_storage_keys( + &self, + id: &BlockId, + child_storage_key: &StorageKey, + child_info: ChildInfo, + key_prefix: &StorageKey, + ) -> sp_blockchain::Result> { + let keys = self + .state_at(id)? + .child_keys(&child_storage_key.0, child_info, &key_prefix.0) + .into_iter() + .map(StorageKey) + .collect(); + Ok(keys) + } + + fn child_storage( + &self, + id: &BlockId, + storage_key: &StorageKey, + child_info: ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result> { + Ok(self + .state_at(id)? + .child_storage(&storage_key.0, child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))? + .map(StorageData)) + } + + fn child_storage_hash( + &self, + id: &BlockId, + storage_key: &StorageKey, + child_info: ChildInfo, + key: &StorageKey, + ) -> sp_blockchain::Result> { + Ok(self + .state_at(id)? + .child_storage_hash(&storage_key.0, child_info, &key.0) + .map_err(|e| sp_blockchain::Error::from_state(Box::new(e)))?) + } + + fn max_key_changes_range( + &self, + first: NumberFor, + last: BlockId, + ) -> sp_blockchain::Result, BlockId)>> { + let last_number = self + .backend + .blockchain() + .expect_block_number_from_id(&last)?; + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + if first > last_number { + return Err(sp_blockchain::Error::ChangesTrieAccessFailed( + "Invalid changes trie range".into(), + )); + } + + let (storage, configs) = match self.require_changes_trie(first, last_hash, false).ok() { + Some((storage, configs)) => (storage, configs), + None => return Ok(None), + }; + + let first_available_changes_trie = configs.last().map(|config| config.0); + match first_available_changes_trie { + Some(first_available_changes_trie) => { + let oldest_unpruned = storage.oldest_pruned_digest_range_end(); + let first = std::cmp::max(first_available_changes_trie, oldest_unpruned); + Ok(Some((first, last))) + } + None => Ok(None), + } + } + + fn key_changes( + &self, + first: NumberFor, + last: BlockId, + storage_key: Option<&StorageKey>, + key: &StorageKey, + ) -> sp_blockchain::Result, u32)>> { + let last_number = self + .backend + .blockchain() + .expect_block_number_from_id(&last)?; + let last_hash = self.backend.blockchain().expect_block_hash_from_id(&last)?; + let (storage, configs) = self.require_changes_trie(first, last_hash, true)?; + + let mut result = Vec::new(); + let best_number = self.backend.blockchain().info().best_number; + for (config_zero, config_end, config) in configs { + let range_first = ::std::cmp::max(first, config_zero + One::one()); + let range_anchor = match config_end { + Some((config_end_number, config_end_hash)) => { + if last_number > config_end_number { + ChangesTrieAnchorBlockId { + hash: config_end_hash, + number: config_end_number, + } + } else { + ChangesTrieAnchorBlockId { + hash: convert_hash(&last_hash), + number: last_number, + } + } + } + None => ChangesTrieAnchorBlockId { + hash: convert_hash(&last_hash), + number: last_number, + }, + }; + + let config_range = ChangesTrieConfigurationRange { + config: &config, + zero: config_zero.clone(), + end: config_end.map(|(config_end_number, _)| config_end_number), + }; + let result_range: Vec<(NumberFor, u32)> = key_changes::, _>( + config_range, + storage.storage(), + range_first, + &range_anchor, + best_number, + storage_key.as_ref().map(|x| &x.0[..]), + &key.0, + ) + .and_then(|r| { + r.map(|r| r.map(|(block, tx)| (block, tx))) + .collect::>() + }) + .map_err(|err| sp_blockchain::Error::ChangesTrieAccessFailed(err))?; + result.extend(result_range); + } + + Ok(result) + } } -impl HeaderMetadata for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl HeaderMetadata for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - type Error = sp_blockchain::Error; - - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.backend.blockchain().header_metadata(hash) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.backend.blockchain().insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.backend.blockchain().remove_header_metadata(hash) - } + type Error = sp_blockchain::Error; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.backend.blockchain().header_metadata(hash) + } + + fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { + self.backend + .blockchain() + .insert_header_metadata(hash, metadata) + } + + fn remove_header_metadata(&self, hash: Block::Hash) { + self.backend.blockchain().remove_header_metadata(hash) + } } -impl ProvideUncles for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl ProvideUncles for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn uncles(&self, target_hash: Block::Hash, max_generation: NumberFor) -> sp_blockchain::Result> { - Ok(Client::uncles(self, target_hash, max_generation)? - .into_iter() - .filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None)) - .collect() - ) - } + fn uncles( + &self, + target_hash: Block::Hash, + max_generation: NumberFor, + ) -> sp_blockchain::Result> { + Ok(Client::uncles(self, target_hash, max_generation)? + .into_iter() + .filter_map(|hash| Client::header(self, &BlockId::Hash(hash)).unwrap_or(None)) + .collect()) + } } -impl ChainHeaderBackend for Client where - B: backend::Backend, - E: CallExecutor + Send + Sync, - Block: BlockT, - RA: Send + Sync, +impl ChainHeaderBackend for Client +where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: Send + Sync, { - fn header(&self, id: BlockId) -> sp_blockchain::Result> { - self.backend.blockchain().header(id) - } - - fn info(&self) -> blockchain::Info { - self.backend.blockchain().info() - } - - fn status(&self, id: BlockId) -> sp_blockchain::Result { - self.backend.blockchain().status(id) - } - - fn number(&self, hash: Block::Hash) -> sp_blockchain::Result::Header as HeaderT>::Number>> { - self.backend.blockchain().number(hash) - } - - fn hash(&self, number: NumberFor) -> sp_blockchain::Result> { - self.backend.blockchain().hash(number) - } + fn header(&self, id: BlockId) -> sp_blockchain::Result> { + self.backend.blockchain().header(id) + } + + fn info(&self) -> blockchain::Info { + self.backend.blockchain().info() + } + + fn status(&self, id: BlockId) -> sp_blockchain::Result { + self.backend.blockchain().status(id) + } + + fn number( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + self.backend.blockchain().number(hash) + } + + fn hash(&self, number: NumberFor) -> sp_blockchain::Result> { + self.backend.blockchain().hash(number) + } } -impl sp_runtime::traits::BlockIdTo for Client where - B: backend::Backend, - E: CallExecutor + Send + Sync, - Block: BlockT, - RA: Send + Sync, +impl sp_runtime::traits::BlockIdTo for Client +where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: Send + Sync, { - type Error = Error; - - fn to_hash(&self, block_id: &BlockId) -> sp_blockchain::Result> { - self.block_hash_from_id(block_id) - } - - fn to_number(&self, block_id: &BlockId) -> sp_blockchain::Result>> { - self.block_number_from_id(block_id) - } + type Error = Error; + + fn to_hash(&self, block_id: &BlockId) -> sp_blockchain::Result> { + self.block_hash_from_id(block_id) + } + + fn to_number( + &self, + block_id: &BlockId, + ) -> sp_blockchain::Result>> { + self.block_number_from_id(block_id) + } } -impl ChainHeaderBackend for &Client where - B: backend::Backend, - E: CallExecutor + Send + Sync, - Block: BlockT, - RA: Send + Sync, +impl ChainHeaderBackend for &Client +where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: Send + Sync, { - fn header(&self, id: BlockId) -> sp_blockchain::Result> { - (**self).backend.blockchain().header(id) - } - - fn info(&self) -> blockchain::Info { - (**self).backend.blockchain().info() - } - - fn status(&self, id: BlockId) -> sp_blockchain::Result { - (**self).status(id) - } - - fn number(&self, hash: Block::Hash) -> sp_blockchain::Result::Header as HeaderT>::Number>> { - (**self).number(hash) - } - - fn hash(&self, number: NumberFor) -> sp_blockchain::Result> { - (**self).hash(number) - } + fn header(&self, id: BlockId) -> sp_blockchain::Result> { + (**self).backend.blockchain().header(id) + } + + fn info(&self) -> blockchain::Info { + (**self).backend.blockchain().info() + } + + fn status(&self, id: BlockId) -> sp_blockchain::Result { + (**self).status(id) + } + + fn number( + &self, + hash: Block::Hash, + ) -> sp_blockchain::Result::Header as HeaderT>::Number>> { + (**self).number(hash) + } + + fn hash(&self, number: NumberFor) -> sp_blockchain::Result> { + (**self).hash(number) + } } -impl ProvideCache for Client where - B: backend::Backend, - Block: BlockT, +impl ProvideCache for Client +where + B: backend::Backend, + Block: BlockT, { - fn cache(&self) -> Option>> { - self.backend.blockchain().cache() - } + fn cache(&self) -> Option>> { + self.backend.blockchain().cache() + } } -impl ProvideRuntimeApi for Client where - B: backend::Backend, - E: CallExecutor + Send + Sync, - Block: BlockT, - RA: ConstructRuntimeApi, +impl ProvideRuntimeApi for Client +where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + RA: ConstructRuntimeApi, { - type Api = >::RuntimeApi; + type Api = >::RuntimeApi; - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { - RA::construct_runtime_api(self) - } + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api> { + RA::construct_runtime_api(self) + } } -impl CallApiAt for Client where - B: backend::Backend, - E: CallExecutor + Send + Sync, - Block: BlockT, +impl CallApiAt for Client +where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, { - type Error = Error; - type StateBackend = B::State; - - fn call_api_at< - 'a, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - C: CoreApi, - >( - &self, - params: CallApiAtParams<'a, Block, C, NC, B::State>, - ) -> sp_blockchain::Result> { - let core_api = params.core_api; - let at = params.at; - - let (manager, extensions) = self.execution_extensions.manager_and_extensions( - at, - params.context, - ); - - self.executor.contextual_call::<_, fn(_,_) -> _,_,_>( - || core_api.initialize_block(at, &self.prepare_environment_block(at)?), - at, - params.function, - ¶ms.arguments, - params.overlayed_changes, - Some(params.storage_transaction_cache), - params.initialize_block, - manager, - params.native_call, - params.recorder, - Some(extensions), - ) - } - - fn runtime_version_at(&self, at: &BlockId) -> sp_blockchain::Result { - self.runtime_version_at(at) - } + type Error = Error; + type StateBackend = B::State; + + fn call_api_at< + 'a, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + C: CoreApi, + >( + &self, + params: CallApiAtParams<'a, Block, C, NC, B::State>, + ) -> sp_blockchain::Result> { + let core_api = params.core_api; + let at = params.at; + + let (manager, extensions) = self + .execution_extensions + .manager_and_extensions(at, params.context); + + self.executor.contextual_call::<_, fn(_, _) -> _, _, _>( + || core_api.initialize_block(at, &self.prepare_environment_block(at)?), + at, + params.function, + ¶ms.arguments, + params.overlayed_changes, + Some(params.storage_transaction_cache), + params.initialize_block, + manager, + params.native_call, + params.recorder, + Some(extensions), + ) + } + + fn runtime_version_at(&self, at: &BlockId) -> sp_blockchain::Result { + self.runtime_version_at(at) + } } /// NOTE: only use this implementation when you are sure there are NO consensus-level BlockImport /// objects. Otherwise, importing blocks directly into the client would be bypassing /// important verification work. -impl sp_consensus::BlockImport for &Client where - B: backend::Backend, - E: CallExecutor + Send + Sync, - Block: BlockT, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: CoreApi + - ApiExt, +impl sp_consensus::BlockImport for &Client +where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: + CoreApi + ApiExt, { - type Error = ConsensusError; - type Transaction = backend::TransactionFor; - - /// Import a checked and validated block. If a justification is provided in - /// `BlockImportParams` then `finalized` *must* be true. - /// - /// NOTE: only use this implementation when there are NO consensus-level BlockImport - /// objects. Otherwise, importing blocks directly into the client would be bypassing - /// important verification work. - /// - /// If you are not sure that there are no BlockImport objects provided by the consensus - /// algorithm, don't use this function. - fn import_block( - &mut self, - mut import_block: BlockImportParams>, - new_cache: HashMap>, - ) -> Result { - let span = tracing::span!(tracing::Level::DEBUG, "import_block"); - let _enter = span.enter(); - - if let Some(res) = self.prepare_block_storage_changes(&mut import_block).map_err(|e| { - warn!("Block prepare storage changes error:\n{:?}", e); - ConsensusError::ClientImport(e.to_string()) - })? { - return Ok(res) - } - - self.lock_import_and_run(|operation| { - self.apply_block(operation, import_block, new_cache) - }).map_err(|e| { - warn!("Block import error:\n{:?}", e); - ConsensusError::ClientImport(e.to_string()).into() - }) - } - - /// Check block preconditions. - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - let BlockCheckParams { hash, number, parent_hash, allow_missing_state, import_existing } = block; - - // Check the block against white and black lists if any are defined - // (i.e. fork blocks and bad blocks respectively) - match self.block_rules.lookup(number, &hash) { - BlockLookupResult::KnownBad => { - trace!( - "Rejecting known bad block: #{} {:?}", - number, - hash, - ); - return Ok(ImportResult::KnownBad); - }, - BlockLookupResult::Expected(expected_hash) => { - trace!( + type Error = ConsensusError; + type Transaction = backend::TransactionFor; + + /// Import a checked and validated block. If a justification is provided in + /// `BlockImportParams` then `finalized` *must* be true. + /// + /// NOTE: only use this implementation when there are NO consensus-level BlockImport + /// objects. Otherwise, importing blocks directly into the client would be bypassing + /// important verification work. + /// + /// If you are not sure that there are no BlockImport objects provided by the consensus + /// algorithm, don't use this function. + fn import_block( + &mut self, + mut import_block: BlockImportParams>, + new_cache: HashMap>, + ) -> Result { + let span = tracing::span!(tracing::Level::DEBUG, "import_block"); + let _enter = span.enter(); + + if let Some(res) = self + .prepare_block_storage_changes(&mut import_block) + .map_err(|e| { + warn!("Block prepare storage changes error:\n{:?}", e); + ConsensusError::ClientImport(e.to_string()) + })? + { + return Ok(res); + } + + self.lock_import_and_run(|operation| self.apply_block(operation, import_block, new_cache)) + .map_err(|e| { + warn!("Block import error:\n{:?}", e); + ConsensusError::ClientImport(e.to_string()).into() + }) + } + + /// Check block preconditions. + fn check_block(&mut self, block: BlockCheckParams) -> Result { + let BlockCheckParams { + hash, + number, + parent_hash, + allow_missing_state, + import_existing, + } = block; + + // Check the block against white and black lists if any are defined + // (i.e. fork blocks and bad blocks respectively) + match self.block_rules.lookup(number, &hash) { + BlockLookupResult::KnownBad => { + trace!("Rejecting known bad block: #{} {:?}", number, hash,); + return Ok(ImportResult::KnownBad); + } + BlockLookupResult::Expected(expected_hash) => { + trace!( "Rejecting block from known invalid fork. Got {:?}, expected: {:?} at height {}", hash, expected_hash, number ); - return Ok(ImportResult::KnownBad); - }, - BlockLookupResult::NotSpecial => {} - } - - // Own status must be checked first. If the block and ancestry is pruned - // this function must return `AlreadyInChain` rather than `MissingState` - match self.block_status(&BlockId::Hash(hash)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - { - BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => return Ok(ImportResult::AlreadyInChain), - BlockStatus::InChainWithState | BlockStatus::Queued => {}, - BlockStatus::InChainPruned => return Ok(ImportResult::AlreadyInChain), - BlockStatus::Unknown => {}, - BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), - } - - match self.block_status(&BlockId::Hash(parent_hash)) - .map_err(|e| ConsensusError::ClientImport(e.to_string()))? - { - BlockStatus::InChainWithState | BlockStatus::Queued => {}, - BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), - BlockStatus::InChainPruned if allow_missing_state => {}, - BlockStatus::InChainPruned => return Ok(ImportResult::MissingState), - BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), - } - - - Ok(ImportResult::imported(false)) - } + return Ok(ImportResult::KnownBad); + } + BlockLookupResult::NotSpecial => {} + } + + // Own status must be checked first. If the block and ancestry is pruned + // this function must return `AlreadyInChain` rather than `MissingState` + match self + .block_status(&BlockId::Hash(hash)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + { + BlockStatus::InChainWithState | BlockStatus::Queued if !import_existing => { + return Ok(ImportResult::AlreadyInChain) + } + BlockStatus::InChainWithState | BlockStatus::Queued => {} + BlockStatus::InChainPruned => return Ok(ImportResult::AlreadyInChain), + BlockStatus::Unknown => {} + BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), + } + + match self + .block_status(&BlockId::Hash(parent_hash)) + .map_err(|e| ConsensusError::ClientImport(e.to_string()))? + { + BlockStatus::InChainWithState | BlockStatus::Queued => {} + BlockStatus::Unknown => return Ok(ImportResult::UnknownParent), + BlockStatus::InChainPruned if allow_missing_state => {} + BlockStatus::InChainPruned => return Ok(ImportResult::MissingState), + BlockStatus::KnownBad => return Ok(ImportResult::KnownBad), + } + + Ok(ImportResult::imported(false)) + } } -impl sp_consensus::BlockImport for Client where - B: backend::Backend, - E: CallExecutor + Send + Sync, - Block: BlockT, - Self: ProvideRuntimeApi, - >::Api: CoreApi + - ApiExt, +impl sp_consensus::BlockImport for Client +where + B: backend::Backend, + E: CallExecutor + Send + Sync, + Block: BlockT, + Self: ProvideRuntimeApi, + >::Api: + CoreApi + ApiExt, { - type Error = ConsensusError; - type Transaction = backend::TransactionFor; - - fn import_block( - &mut self, - import_block: BlockImportParams, - new_cache: HashMap>, - ) -> Result { - (&*self).import_block(import_block, new_cache) - } - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - (&*self).check_block(block) - } + type Error = ConsensusError; + type Transaction = backend::TransactionFor; + + fn import_block( + &mut self, + import_block: BlockImportParams, + new_cache: HashMap>, + ) -> Result { + (&*self).import_block(import_block, new_cache) + } + + fn check_block(&mut self, block: BlockCheckParams) -> Result { + (&*self).check_block(block) + } } -impl Finalizer for Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl Finalizer for Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn apply_finality( - &self, - operation: &mut ClientImportOperation, - id: BlockId, - justification: Option, - notify: bool, - ) -> sp_blockchain::Result<()> { - let last_best = self.backend.blockchain().info().best_hash; - let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; - self.apply_finality_with_block_hash( - operation, - to_finalize_hash, - justification, - last_best, - notify, - ) - } - - fn finalize_block( - &self, - id: BlockId, - justification: Option, - notify: bool, - ) -> sp_blockchain::Result<()> { - self.lock_import_and_run(|operation| { - self.apply_finality(operation, id, justification, notify) - }) - } + fn apply_finality( + &self, + operation: &mut ClientImportOperation, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()> { + let last_best = self.backend.blockchain().info().best_hash; + let to_finalize_hash = self.backend.blockchain().expect_block_hash_from_id(&id)?; + self.apply_finality_with_block_hash( + operation, + to_finalize_hash, + justification, + last_best, + notify, + ) + } + + fn finalize_block( + &self, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()> { + self.lock_import_and_run(|operation| { + self.apply_finality(operation, id, justification, notify) + }) + } } - -impl Finalizer for &Client where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +impl Finalizer for &Client +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn apply_finality( - &self, - operation: &mut ClientImportOperation, - id: BlockId, - justification: Option, - notify: bool, - ) -> sp_blockchain::Result<()> { - (**self).apply_finality(operation, id, justification, notify) - } - - fn finalize_block( - &self, - id: BlockId, - justification: Option, - notify: bool, - ) -> sp_blockchain::Result<()> { - (**self).finalize_block(id, justification, notify) - } + fn apply_finality( + &self, + operation: &mut ClientImportOperation, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()> { + (**self).apply_finality(operation, id, justification, notify) + } + + fn finalize_block( + &self, + id: BlockId, + justification: Option, + notify: bool, + ) -> sp_blockchain::Result<()> { + (**self).finalize_block(id, justification, notify) + } } impl BlockchainEvents for Client where - E: CallExecutor, - Block: BlockT, + E: CallExecutor, + Block: BlockT, { - /// Get block import event stream. - fn import_notification_stream(&self) -> ImportNotifications { - let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream"); - self.import_notification_sinks.lock().push(sink); - stream - } - - fn finality_notification_stream(&self) -> FinalityNotifications { - let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream"); - self.finality_notification_sinks.lock().push(sink); - stream - } - - /// Get storage changes event stream. - fn storage_changes_notification_stream( - &self, - filter_keys: Option<&[StorageKey]>, - child_filter_keys: Option<&[(StorageKey, Option>)]>, - ) -> sp_blockchain::Result> { - Ok(self.storage_notifications.lock().listen(filter_keys, child_filter_keys)) - } + /// Get block import event stream. + fn import_notification_stream(&self) -> ImportNotifications { + let (sink, stream) = tracing_unbounded("mpsc_import_notification_stream"); + self.import_notification_sinks.lock().push(sink); + stream + } + + fn finality_notification_stream(&self) -> FinalityNotifications { + let (sink, stream) = tracing_unbounded("mpsc_finality_notification_stream"); + self.finality_notification_sinks.lock().push(sink); + stream + } + + /// Get storage changes event stream. + fn storage_changes_notification_stream( + &self, + filter_keys: Option<&[StorageKey]>, + child_filter_keys: Option<&[(StorageKey, Option>)]>, + ) -> sp_blockchain::Result> { + Ok(self + .storage_notifications + .lock() + .listen(filter_keys, child_filter_keys)) + } } /// Implement Longest Chain Select implementation /// where 'longest' is defined as the highest number of blocks pub struct LongestChain { - backend: Arc, - _phantom: PhantomData + backend: Arc, + _phantom: PhantomData, } impl Clone for LongestChain { - fn clone(&self) -> Self { - let backend = self.backend.clone(); - LongestChain { - backend, - _phantom: Default::default() - } - } + fn clone(&self) -> Self { + let backend = self.backend.clone(); + LongestChain { + backend, + _phantom: Default::default(), + } + } } impl LongestChain where - B: backend::Backend, - Block: BlockT, + B: backend::Backend, + Block: BlockT, { - /// Instantiate a new LongestChain for Backend B - pub fn new(backend: Arc) -> Self { - LongestChain { - backend, - _phantom: Default::default() - } - } - - fn best_block_header(&self) -> sp_blockchain::Result<::Header> { - let info = self.backend.blockchain().info(); - let import_lock = self.backend.get_import_lock(); - let best_hash = self.backend - .blockchain() - .best_containing(info.best_hash, None, import_lock)? - .unwrap_or(info.best_hash); - - Ok(self.backend.blockchain().header(BlockId::Hash(best_hash))? - .expect("given block hash was fetched from block in db; qed")) - } - - fn leaves(&self) -> Result::Hash>, sp_blockchain::Error> { - self.backend.blockchain().leaves() - } + /// Instantiate a new LongestChain for Backend B + pub fn new(backend: Arc) -> Self { + LongestChain { + backend, + _phantom: Default::default(), + } + } + + fn best_block_header(&self) -> sp_blockchain::Result<::Header> { + let info = self.backend.blockchain().info(); + let import_lock = self.backend.get_import_lock(); + let best_hash = self + .backend + .blockchain() + .best_containing(info.best_hash, None, import_lock)? + .unwrap_or(info.best_hash); + + Ok(self + .backend + .blockchain() + .header(BlockId::Hash(best_hash))? + .expect("given block hash was fetched from block in db; qed")) + } + + fn leaves(&self) -> Result::Hash>, sp_blockchain::Error> { + self.backend.blockchain().leaves() + } } impl SelectChain for LongestChain where - B: backend::Backend, - Block: BlockT, + B: backend::Backend, + Block: BlockT, { - - fn leaves(&self) -> Result::Hash>, ConsensusError> { - LongestChain::leaves(self) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) - } - - fn best_chain(&self) - -> Result<::Header, ConsensusError> - { - LongestChain::best_block_header(&self) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) - } - - fn finality_target( - &self, - target_hash: Block::Hash, - maybe_max_number: Option> - ) -> Result, ConsensusError> { - let import_lock = self.backend.get_import_lock(); - self.backend.blockchain().best_containing(target_hash, maybe_max_number, import_lock) - .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) - } + fn leaves(&self) -> Result::Hash>, ConsensusError> { + LongestChain::leaves(self).map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) + } + + fn best_chain(&self) -> Result<::Header, ConsensusError> { + LongestChain::best_block_header(&self) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) + } + + fn finality_target( + &self, + target_hash: Block::Hash, + maybe_max_number: Option>, + ) -> Result, ConsensusError> { + let import_lock = self.backend.get_import_lock(); + self.backend + .blockchain() + .best_containing(target_hash, maybe_max_number, import_lock) + .map_err(|e| ConsensusError::ChainLookup(e.to_string()).into()) + } } impl BlockBackend for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, { - fn block_body( - &self, - id: &BlockId, - ) -> sp_blockchain::Result::Extrinsic>>> { - self.body(id) - } - - fn block(&self, id: &BlockId) -> sp_blockchain::Result>> - { - Ok(match (self.header(id)?, self.body(id)?, self.justification(id)?) { - (Some(header), Some(extrinsics), justification) => - Some(SignedBlock { block: Block::new(header, extrinsics), justification }), - _ => None, - }) - } - - fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { - // this can probably be implemented more efficiently - if let BlockId::Hash(ref h) = id { - if self.importing_block.read().as_ref().map_or(false, |importing| h == importing) { - return Ok(BlockStatus::Queued); - } - } - let hash_and_number = match id.clone() { - BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), - BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), - }; - match hash_and_number { - Some((hash, number)) => { - if self.backend.have_state_at(&hash, number) { - Ok(BlockStatus::InChainWithState) - } else { - Ok(BlockStatus::InChainPruned) - } - } - None => Ok(BlockStatus::Unknown), - } - } - - fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { - self.backend.blockchain().justification(*id) - } + fn block_body( + &self, + id: &BlockId, + ) -> sp_blockchain::Result::Extrinsic>>> { + self.body(id) + } + + fn block(&self, id: &BlockId) -> sp_blockchain::Result>> { + Ok( + match (self.header(id)?, self.body(id)?, self.justification(id)?) { + (Some(header), Some(extrinsics), justification) => Some(SignedBlock { + block: Block::new(header, extrinsics), + justification, + }), + _ => None, + }, + ) + } + + fn block_status(&self, id: &BlockId) -> sp_blockchain::Result { + // this can probably be implemented more efficiently + if let BlockId::Hash(ref h) = id { + if self + .importing_block + .read() + .as_ref() + .map_or(false, |importing| h == importing) + { + return Ok(BlockStatus::Queued); + } + } + let hash_and_number = match id.clone() { + BlockId::Hash(hash) => self.backend.blockchain().number(hash)?.map(|n| (hash, n)), + BlockId::Number(n) => self.backend.blockchain().hash(n)?.map(|hash| (hash, n)), + }; + match hash_and_number { + Some((hash, number)) => { + if self.backend.have_state_at(&hash, number) { + Ok(BlockStatus::InChainWithState) + } else { + Ok(BlockStatus::InChainPruned) + } + } + None => Ok(BlockStatus::Unknown), + } + } + + fn justification(&self, id: &BlockId) -> sp_blockchain::Result> { + self.backend.blockchain().justification(*id) + } } impl backend::AuxStore for Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - Self: ProvideRuntimeApi, - >::Api: CoreApi, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Self: ProvideRuntimeApi, + >::Api: CoreApi, { - /// Insert auxiliary data into key-value store. - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { - // Import is locked here because we may have other block import - // operations that tries to set aux data. Note that for consensus - // layer, one can always use atomic operations to make sure - // import is only locked once. - self.lock_import_and_run(|operation| { - apply_aux(operation, insert, delete) - }) - } - /// Query auxiliary data from key-value store. - fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { - backend::AuxStore::get_aux(&*self.backend, key) - } + /// Insert auxiliary data into key-value store. + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + // Import is locked here because we may have other block import + // operations that tries to set aux data. Note that for consensus + // layer, one can always use atomic operations to make sure + // import is only locked once. + self.lock_import_and_run(|operation| apply_aux(operation, insert, delete)) + } + /// Query auxiliary data from key-value store. + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + backend::AuxStore::get_aux(&*self.backend, key) + } } impl backend::AuxStore for &Client - where - B: backend::Backend, - E: CallExecutor, - Block: BlockT, - Client: ProvideRuntimeApi, - as ProvideRuntimeApi>::Api: CoreApi, +where + B: backend::Backend, + E: CallExecutor, + Block: BlockT, + Client: ProvideRuntimeApi, + as ProvideRuntimeApi>::Api: CoreApi, { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { - (**self).insert_aux(insert, delete) - } - - fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { - (**self).get_aux(key) - } + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + (**self).insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + (**self).get_aux(key) + } } - /// Helper function to apply auxiliary data insertion into an operation. pub fn apply_aux<'a, 'b: 'a, 'c: 'a, B, Block, D, I>( - operation: &mut ClientImportOperation, - insert: I, - delete: D, + operation: &mut ClientImportOperation, + insert: I, + delete: D, ) -> sp_blockchain::Result<()> where - Block: BlockT, - B: backend::Backend, - I: IntoIterator, - D: IntoIterator, + Block: BlockT, + B: backend::Backend, + I: IntoIterator, + D: IntoIterator, { - operation.op.insert_aux( - insert.into_iter() - .map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) - .chain(delete.into_iter().map(|k| (k.to_vec(), None))) - ) + operation.op.insert_aux( + insert + .into_iter() + .map(|(k, v)| (k.to_vec(), Some(v.to_vec()))) + .chain(delete.into_iter().map(|k| (k.to_vec(), None))), + ) } impl sp_consensus::block_validation::Chain for Client - where BE: backend::Backend, - E: CallExecutor, - B: BlockT +where + BE: backend::Backend, + E: CallExecutor, + B: BlockT, { - fn block_status( - &self, - id: &BlockId, - ) -> Result> { - Client::block_status(self, id).map_err(|e| Box::new(e) as Box<_>) - } + fn block_status( + &self, + id: &BlockId, + ) -> Result> { + Client::block_status(self, id).map_err(|e| Box::new(e) as Box<_>) + } } #[cfg(test)] pub(crate) mod tests { - use std::collections::HashMap; - use super::*; - use sp_core::{blake2_256, H256}; - use sp_runtime::DigestItem; - use sp_consensus::{BlockOrigin, SelectChain, BlockImport}; - use substrate_test_runtime_client::{ - prelude::*, - client_ext::ClientExt, - sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}, - runtime::{self, Block, Transfer, RuntimeApi, TestAPI}, - }; - use hex_literal::hex; - - /// Returns tuple, consisting of: - /// 1) test client pre-filled with blocks changing balances; - /// 2) roots of changes tries for these blocks - /// 3) test cases in form (begin, end, key, vec![(block, extrinsic)]) that are required to pass - pub fn prepare_client_with_key_changes() -> ( - substrate_test_runtime_client::sc_client::Client, - Vec, - Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, - ) { - // prepare block structure - let blocks_transfers = vec![ - vec![(AccountKeyring::Alice, AccountKeyring::Dave), (AccountKeyring::Bob, AccountKeyring::Dave)], - vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], - vec![], - vec![(AccountKeyring::Alice, AccountKeyring::Dave)], - ]; - - // prepare client ang import blocks - let mut local_roots = Vec::new(); - let config = Some(ChangesTrieConfiguration::new(4, 2)); - let mut remote_client = TestClientBuilder::new().changes_trie_config(config).build(); - let mut nonces: HashMap<_, u64> = Default::default(); - for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { - let mut builder = remote_client.new_block(Default::default()).unwrap(); - for (from, to) in block_transfers { - builder.push_transfer(Transfer { - from: from.into(), - to: to.into(), - amount: 1, - nonce: *nonces.entry(from).and_modify(|n| { *n = *n + 1 }).or_default(), - }).unwrap(); - } - let block = builder.build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); - - let header = remote_client.header(&BlockId::Number(i as u64 + 1)).unwrap().unwrap(); - let trie_root = header.digest().log(DigestItem::as_changes_trie_root) - .map(|root| H256::from_slice(root.as_ref())) - .unwrap(); - local_roots.push(trie_root); - } - - // prepare test cases - let alice = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Alice.into())).to_vec(); - let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); - let charlie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Charlie.into())).to_vec(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); - let ferdie = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Ferdie.into())).to_vec(); - let test_cases = vec![ - (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), - (1, 3, alice.clone(), vec![(1, 0)]), - (2, 4, alice.clone(), vec![(4, 0)]), - (2, 3, alice.clone(), vec![]), - - (1, 4, bob.clone(), vec![(1, 1)]), - (1, 1, bob.clone(), vec![(1, 1)]), - (2, 4, bob.clone(), vec![]), - - (1, 4, charlie.clone(), vec![(2, 0)]), - - (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), - (3, 4, dave.clone(), vec![(4, 0)]), - - (1, 4, eve.clone(), vec![(2, 0)]), - (1, 1, eve.clone(), vec![]), - (3, 4, eve.clone(), vec![]), - - (1, 4, ferdie.clone(), vec![]), - ]; - - (remote_client, local_roots, test_cases) - } - - #[test] - fn client_initializes_from_genesis_ok() { - let client = substrate_test_runtime_client::new(); - - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Alice.into() - ).unwrap(), - 1000 - ); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Ferdie.into() - ).unwrap(), - 0 - ); - } - - #[test] - fn block_builder_works_with_no_transactions() { - let mut client = substrate_test_runtime_client::new(); - - let block = client.new_block(Default::default()).unwrap().build().unwrap().block; - - client.import(BlockOrigin::Own, block).unwrap(); - - assert_eq!(client.chain_info().best_number, 1); - } - - #[test] - fn block_builder_works_with_transactions() { - let mut client = substrate_test_runtime_client::new(); - - let mut builder = client.new_block(Default::default()).unwrap(); - - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - - let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - - assert_eq!(client.chain_info().best_number, 1); - assert_ne!( - client.state_at(&BlockId::Number(1)).unwrap().pairs(), - client.state_at(&BlockId::Number(0)).unwrap().pairs() - ); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Alice.into() - ).unwrap(), - 958 - ); - assert_eq!( - client.runtime_api().balance_of( - &BlockId::Number(client.chain_info().best_number), - AccountKeyring::Ferdie.into() - ).unwrap(), - 42 - ); - } - - #[test] - fn block_builder_does_not_include_invalid() { - let mut client = substrate_test_runtime_client::new(); - - let mut builder = client.new_block(Default::default()).unwrap(); - - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 42, - nonce: 0, - }).unwrap(); - - assert!( - builder.push_transfer(Transfer { - from: AccountKeyring::Eve.into(), - to: AccountKeyring::Alice.into(), - amount: 42, - nonce: 0, - }).is_err() - ); - - let block = builder.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - - assert_eq!(client.chain_info().best_number, 1); - assert_ne!( - client.state_at(&BlockId::Number(1)).unwrap().pairs(), - client.state_at(&BlockId::Number(0)).unwrap().pairs() - ); - assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) - } - - #[test] - fn best_containing_with_genesis_block() { - // block tree: - // G - - let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - let genesis_hash = client.chain_info().genesis_hash; - - assert_eq!( - genesis_hash.clone(), - longest_chain_select.finality_target(genesis_hash.clone(), None).unwrap().unwrap() - ); - } - - #[test] - fn best_containing_with_hash_not_found() { - // block tree: - // G - - let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - let uninserted_block = client.new_block(Default::default()).unwrap().build().unwrap().block; - - assert_eq!( - None, - longest_chain_select.finality_target(uninserted_block.hash().clone(), None).unwrap() - ); - } - - #[test] - fn uncles_with_only_ancestors() { - // block tree: - // G -> A1 -> A2 - let mut client = substrate_test_runtime_client::new(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - let v: Vec = Vec::new(); - assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); - } - - #[test] - fn uncles_with_multiple_forks() { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - let mut client = substrate_test_runtime_client::new(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - let genesis_hash = client.chain_info().genesis_hash; - - let uncles1 = client.uncles(a4.hash(), 10).unwrap(); - assert_eq!(vec![b2.hash(), d2.hash()], uncles1); - - let uncles2 = client.uncles(a4.hash(), 0).unwrap(); - assert_eq!(0, uncles2.len()); - - let uncles3 = client.uncles(a1.hash(), 10).unwrap(); - assert_eq!(0, uncles3.len()); - - let uncles4 = client.uncles(genesis_hash, 10).unwrap(); - assert_eq!(0, uncles4.len()); - - let uncles5 = client.uncles(d2.hash(), 10).unwrap(); - assert_eq!(vec![a2.hash(), b2.hash()], uncles5); - - let uncles6 = client.uncles(b3.hash(), 1).unwrap(); - assert_eq!(vec![c3.hash()], uncles6); - } - - #[test] - fn best_containing_on_longest_chain_with_single_chain_3_blocks() { - // block tree: - // G -> A1 -> A2 - - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let genesis_hash = client.chain_info().genesis_hash; - - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, None).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target(a1.hash(), None).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target(a2.hash(), None).unwrap().unwrap()); - } - - #[test] - fn best_containing_on_longest_chain_with_multiple_forks() { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - assert_eq!(client.chain_info().best_hash, a5.hash()); - - let genesis_hash = client.chain_info().genesis_hash; - let leaves = longest_chain_select.leaves().unwrap(); - - assert!(leaves.contains(&a5.hash())); - assert!(leaves.contains(&b4.hash())); - assert!(leaves.contains(&c3.hash())); - assert!(leaves.contains(&d2.hash())); - assert_eq!(leaves.len(), 4); - - // search without restriction - - assert_eq!(a5.hash(), longest_chain_select.finality_target( - genesis_hash, None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a1.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a2.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a3.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a4.hash(), None).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a5.hash(), None).unwrap().unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), None).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), None).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), None).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), None).unwrap().unwrap()); - - - // search only blocks with number <= 5. equivalent to without restriction for this scenario - - assert_eq!(a5.hash(), longest_chain_select.finality_target( - genesis_hash, Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a1.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a4.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(a5.hash(), longest_chain_select.finality_target( - a5.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), Some(5)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(5)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(5)).unwrap().unwrap()); - - - // search only blocks with number <= 4 - - assert_eq!(a4.hash(), longest_chain_select.finality_target( - genesis_hash, Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a1.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(a4.hash(), longest_chain_select.finality_target( - a4.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(4)).unwrap()); - - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b2.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b3.hash(), Some(4)).unwrap().unwrap()); - assert_eq!(b4.hash(), longest_chain_select.finality_target( - b4.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(4)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(4)).unwrap().unwrap()); - - - // search only blocks with number <= 3 - - assert_eq!(a3.hash(), longest_chain_select.finality_target( - genesis_hash, Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a1.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(a3.hash(), longest_chain_select.finality_target( - a3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(3)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(3)).unwrap()); - - assert_eq!(b3.hash(), longest_chain_select.finality_target( - b2.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(b3.hash(), longest_chain_select.finality_target( - b3.hash(), Some(3)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(3)).unwrap()); - - assert_eq!(c3.hash(), longest_chain_select.finality_target( - c3.hash(), Some(3)).unwrap().unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(3)).unwrap().unwrap()); - - - // search only blocks with number <= 2 - - assert_eq!(a2.hash(), longest_chain_select.finality_target( - genesis_hash, Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target( - a1.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(a2.hash(), longest_chain_select.finality_target( - a2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(2)).unwrap()); - - assert_eq!(b2.hash(), longest_chain_select.finality_target( - b2.hash(), Some(2)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(2)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(2)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash(), Some(2)).unwrap()); - - assert_eq!(d2.hash(), longest_chain_select.finality_target( - d2.hash(), Some(2)).unwrap().unwrap()); - - - // search only blocks with number <= 1 - - assert_eq!(a1.hash(), longest_chain_select.finality_target( - genesis_hash, Some(1)).unwrap().unwrap()); - assert_eq!(a1.hash(), longest_chain_select.finality_target( - a1.hash(), Some(1)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a2.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - b2.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(1)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash(), Some(1)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - d2.hash(), Some(1)).unwrap()); - - // search only blocks with number <= 0 - - assert_eq!(genesis_hash, longest_chain_select.finality_target( - genesis_hash, Some(0)).unwrap().unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a1.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a2.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a3.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a4.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - a5.hash(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - b2.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b3.hash(), Some(0)).unwrap()); - assert_eq!(None, longest_chain_select.finality_target( - b4.hash(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - c3.hash().clone(), Some(0)).unwrap()); - - assert_eq!(None, longest_chain_select.finality_target( - d2.hash().clone(), Some(0)).unwrap()); - } - - #[test] - fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { - // block tree: - // G -> A1 -> A2 - - let (mut client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let genesis_hash = client.chain_info().genesis_hash; - - assert_eq!(a2.hash(), longest_chain_select.finality_target(genesis_hash, Some(10)).unwrap().unwrap()); - } - - #[test] - fn key_changes_works() { - let (client, _, test_cases) = prepare_client_with_key_changes(); - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let end = client.block_hash(end).unwrap().unwrap(); - let actual_result = client.key_changes( - begin, - BlockId::Hash(end), - None, - &StorageKey(key), - ).unwrap(); - match actual_result == expected_result { - true => (), - false => panic!(format!("Failed test {}: actual = {:?}, expected = {:?}", - index, actual_result, expected_result)), - } - } - } - - #[test] - fn import_with_justification() { - let mut client = substrate_test_runtime_client::new(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let justification = vec![1, 2, 3]; - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import_justified(BlockOrigin::Own, a3.clone(), justification.clone()).unwrap(); - - assert_eq!( - client.chain_info().finalized_hash, - a3.hash(), - ); - - assert_eq!( - client.justification(&BlockId::Hash(a3.hash())).unwrap(), - Some(justification), - ); - - assert_eq!( - client.justification(&BlockId::Hash(a1.hash())).unwrap(), - None, - ); - - assert_eq!( - client.justification(&BlockId::Hash(a2.hash())).unwrap(), - None, - ); - } - - #[test] - fn importing_diverged_finalized_block_should_trigger_reorg() { - let mut client = substrate_test_runtime_client::new(); - - // G -> A1 -> A2 - // \ - // -> B1 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); - // needed to make sure B1 gets a different hash from A1 - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - // create but don't import B1 just yet - let b1 = b1.build().unwrap().block; - - // A2 is the current best since it's the longest chain - assert_eq!( - client.chain_info().best_hash, - a2.hash(), - ); - - // importing B1 as finalized should trigger a re-org and set it as new best - let justification = vec![1, 2, 3]; - client.import_justified(BlockOrigin::Own, b1.clone(), justification).unwrap(); - - assert_eq!( - client.chain_info().best_hash, - b1.hash(), - ); - - assert_eq!( - client.chain_info().finalized_hash, - b1.hash(), - ); - } - - #[test] - fn finalizing_diverged_block_should_trigger_reorg() { - - let (mut client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); - - // G -> A1 -> A2 - // \ - // -> B1 -> B2 - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); - // needed to make sure B1 gets a different hash from A1 - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); - - let b2 = client.new_block_at( - &BlockId::Hash(b1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // A2 is the current best since it's the longest chain - assert_eq!( - client.chain_info().best_hash, - a2.hash(), - ); - - // we finalize block B1 which is on a different branch from current best - // which should trigger a re-org. - ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); - - // B1 should now be the latest finalized - assert_eq!( - client.chain_info().finalized_hash, - b1.hash(), - ); - - // and B1 should be the new best block (`finalize_block` as no way of - // knowing about B2) - assert_eq!( - client.chain_info().best_hash, - b1.hash(), - ); - - // `SelectChain` should report B2 as best block though - assert_eq!( - select_chain.best_chain().unwrap().hash(), - b2.hash(), - ); - - // after we build B3 on top of B2 and import it - // it should be the new best block, - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - assert_eq!( - client.chain_info().best_hash, - b3.hash(), - ); - } - - #[test] - fn get_header_by_block_number_doesnt_panic() { - let client = substrate_test_runtime_client::new(); - - // backend uses u32 for block numbers, make sure we don't panic when - // trying to convert - let id = BlockId::::Number(72340207214430721); - client.header(&id).expect_err("invalid block number overflows u32"); - } - - #[test] - fn state_reverted_on_reorg() { - let _ = env_logger::try_init(); - let mut client = substrate_test_runtime_client::new(); - - let current_balance = |client: &substrate_test_runtime_client::TestClient| - client.runtime_api().balance_of( - &BlockId::number(client.chain_info().best_number), AccountKeyring::Alice.into() - ).unwrap(); - - // G -> A1 -> A2 - // \ - // -> B1 - let mut a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); - a1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), - amount: 10, - nonce: 0, - }).unwrap(); - let a1 = a1.build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - let mut b1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap(); - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 50, - nonce: 0, - }).unwrap(); - let b1 = b1.build().unwrap().block; - // Reorg to B1 - client.import_as_best(BlockOrigin::Own, b1.clone()).unwrap(); - - assert_eq!(950, current_balance(&client)); - let mut a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - a2.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Charlie.into(), - amount: 10, - nonce: 1, - }).unwrap(); - let a2 = a2.build().unwrap().block; - // Re-org to A2 - client.import_as_best(BlockOrigin::Own, a2).unwrap(); - assert_eq!(980, current_balance(&client)); - } - - #[test] - fn doesnt_import_blocks_that_revert_finality() { - let _ = env_logger::try_init(); - let tmp = tempfile::tempdir().unwrap(); - - // we need to run with archive pruning to avoid pruning non-canonical - // states - let backend = Arc::new(Backend::new( - DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - pruning: PruningMode::ArchiveAll, - source: DatabaseSettingsSrc::RocksDb { - path: tmp.path().into(), - cache_size: 128, - } - }, - u64::max_value(), - ).unwrap()); - - let mut client = TestClientBuilder::with_backend(backend).build(); - - // -> C1 - // / - // G -> A1 -> A2 - // \ - // -> B1 -> B2 -> B3 - - let a1 = client.new_block_at( - &BlockId::Number(0), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - - // needed to make sure B1 gets a different hash from A1 - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let b1 = b1.build().unwrap().block; - client.import(BlockOrigin::Own, b1.clone()).unwrap(); - - let b2 = client.new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // prepare B3 before we finalize A2, because otherwise we won't be able to - // read changes trie configuration after A2 is finalized - let b3 = client.new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - - // we will finalize A2 which should make it impossible to import a new - // B3 at the same height but that doesn't include it - ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); - - let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); - let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::NotInFinalizedChain.to_string() - ); - - assert_eq!( - import_err.to_string(), - expected_err.to_string(), - ); - - // adding a C1 block which is lower than the last finalized should also - // fail (with a cheaper check that doesn't require checking ancestry). - let mut c1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - - // needed to make sure C1 gets a different hash from A1 and B1 - c1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 2, - nonce: 0, - }).unwrap(); - let c1 = c1.build().unwrap().block; - - let import_err = client.import(BlockOrigin::Own, c1).err().unwrap(); - let expected_err = ConsensusError::ClientImport( - sp_blockchain::Error::NotInFinalizedChain.to_string() - ); - - assert_eq!( - import_err.to_string(), - expected_err.to_string(), - ); - } - - - #[test] - fn respects_block_rules() { - - fn run_test( - record_only: bool, - known_bad: &mut HashSet, - fork_rules: &mut Vec<(u64, H256)>, - ) { - let mut client = if record_only { - TestClientBuilder::new().build() - } else { - TestClientBuilder::new() - .set_block_rules( - Some(fork_rules.clone()), - Some(known_bad.clone()), - ) - .build() - }; - - let block_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; - - let params = BlockCheckParams { - hash: block_ok.hash().clone(), - number: 0, - parent_hash: block_ok.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); - - // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 - let mut block_not_ok = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap(); - block_not_ok.push_storage_change(vec![0], Some(vec![1])).unwrap(); - let block_not_ok = block_not_ok.build().unwrap().block; - - let params = BlockCheckParams { - hash: block_not_ok.hash().clone(), - number: 0, - parent_hash: block_not_ok.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - if record_only { - known_bad.insert(block_not_ok.hash()); - } else { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); - } - - // Now going to the fork - client.import_as_final(BlockOrigin::Own, block_ok).unwrap(); - - // And check good fork - let mut block_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) - .unwrap(); - block_ok.push_storage_change(vec![0], Some(vec![2])).unwrap(); - let block_ok = block_ok.build().unwrap().block; - - let params = BlockCheckParams { - hash: block_ok.hash().clone(), - number: 1, - parent_hash: block_ok.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - if record_only { - fork_rules.push((1, block_ok.hash().clone())); - } - assert_eq!(client.check_block(params).unwrap(), ImportResult::imported(false)); - - // And now try bad fork - let mut block_not_ok = client.new_block_at(&BlockId::Number(1), Default::default(), false) - .unwrap(); - block_not_ok.push_storage_change(vec![0], Some(vec![3])).unwrap(); - let block_not_ok = block_not_ok.build().unwrap().block; - - let params = BlockCheckParams { - hash: block_not_ok.hash().clone(), - number: 1, - parent_hash: block_not_ok.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - - if !record_only { - assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); - } - } - - let mut known_bad = HashSet::new(); - let mut fork_rules = Vec::new(); - - // records what bad_blocks and fork_blocks hashes should be - run_test(true, &mut known_bad, &mut fork_rules); - - // enforces rules and actually makes assertions - run_test(false, &mut known_bad, &mut fork_rules); - } - - #[test] - fn returns_status_for_pruned_blocks() { - let _ = env_logger::try_init(); - let tmp = tempfile::tempdir().unwrap(); - - // set to prune after 1 block - // states - let backend = Arc::new(Backend::new( - DatabaseSettings { - state_cache_size: 1 << 20, - state_cache_child_ratio: None, - pruning: PruningMode::keep_blocks(1), - source: DatabaseSettingsSrc::RocksDb { - path: tmp.path().into(), - cache_size: 128, - } - }, - u64::max_value(), - ).unwrap()); - - let mut client = TestClientBuilder::with_backend(backend).build(); - - let a1 = client.new_block_at(&BlockId::Number(0), Default::default(), false) - .unwrap().build().unwrap().block; - - let mut b1 = client.new_block_at(&BlockId::Number(0), Default::default(), false).unwrap(); - - // b1 is created, but not imported - b1.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let b1 = b1.build().unwrap().block; - - let check_block_a1 = BlockCheckParams { - hash: a1.hash().clone(), - number: 0, - parent_hash: a1.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::imported(false)); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::Unknown); - - client.import_as_final(BlockOrigin::Own, a1.clone()).unwrap(); - - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainWithState); - - let a2 = client.new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - client.import_as_final(BlockOrigin::Own, a2.clone()).unwrap(); - - let check_block_a2 = BlockCheckParams { - hash: a2.hash().clone(), - number: 1, - parent_hash: a1.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainWithState); - - let a3 = client.new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) - .unwrap().build().unwrap().block; - - client.import_as_final(BlockOrigin::Own, a3.clone()).unwrap(); - let check_block_a3 = BlockCheckParams { - hash: a3.hash().clone(), - number: 2, - parent_hash: a2.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - - // a1 and a2 are both pruned at this point - assert_eq!(client.check_block(check_block_a1.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a1.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a2.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a2.hash)).unwrap(), BlockStatus::InChainPruned); - assert_eq!(client.check_block(check_block_a3.clone()).unwrap(), ImportResult::AlreadyInChain); - assert_eq!(client.block_status(&BlockId::hash(check_block_a3.hash)).unwrap(), BlockStatus::InChainWithState); - - let mut check_block_b1 = BlockCheckParams { - hash: b1.hash().clone(), - number: 0, - parent_hash: b1.header().parent_hash().clone(), - allow_missing_state: false, - import_existing: false, - }; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::MissingState); - check_block_b1.allow_missing_state = true; - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::imported(false)); - check_block_b1.parent_hash = H256::random(); - assert_eq!(client.check_block(check_block_b1.clone()).unwrap(), ImportResult::UnknownParent); - } - - #[test] - fn imports_blocks_with_changes_tries_config_change() { - // create client with initial 4^2 configuration - let mut client = TestClientBuilder::with_default_backend() - .changes_trie_config(Some(ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - })).build(); - - // =================================================================== - // blocks 1,2,3,4,5,6,7,8,9,10 are empty - // block 11 changes the key - // block 12 is the L1 digest that covers this change - // blocks 13,14,15,16,17,18,19,20,21,22 are empty - // block 23 changes the configuration to 5^1 AND is skewed digest - // =================================================================== - // blocks 24,25 are changing the key - // block 26 is empty - // block 27 changes the key - // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1 - // =================================================================== - // block 29 is empty - // block 30 changes the key - // block 31 is L1 digest that covers this change - // =================================================================== - (1..11).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (11..12).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (12..23).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (23..24).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 5, - digest_levels: 1, - })).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (24..26).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (26..27).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (27..28).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (28..29).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { - digest_interval: 3, - digest_levels: 1, - })).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (29..30).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (30..31).for_each(|number| { - let mut block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false).unwrap(); - block.push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())).unwrap(); - let block = block.build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - (31..32).for_each(|number| { - let block = client.new_block_at(&BlockId::Number(number - 1), Default::default(), false) - .unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, block).unwrap(); - }); - - // now check that configuration cache works - assert_eq!( - client.key_changes(1, BlockId::Number(31), None, &StorageKey(vec![42])).unwrap(), - vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] - ); - } - - #[test] - fn storage_keys_iter_prefix_and_start_key_works() { - let client = substrate_test_runtime_client::new(); - - let prefix = StorageKey(hex!("3a").to_vec()); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) - .unwrap() - .map(|x| x.0) - .collect(); - assert_eq!(res, [hex!("3a636f6465").to_vec(), hex!("3a686561707061676573").to_vec()]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) - .unwrap() - .map(|x| x.0) - .collect(); - assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a686561707061676573").to_vec()))) - .unwrap() - .map(|x| x.0) - .collect(); - assert_eq!(res, Vec::>::new()); - } - - #[test] - fn storage_keys_iter_works() { - let client = substrate_test_runtime_client::new(); - - let prefix = StorageKey(hex!("").to_vec()); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) - .unwrap() - .take(2) - .map(|x| x.0) - .collect(); - assert_eq!(res, [hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), hex!("3a636f6465").to_vec()]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("3a636f6465").to_vec()))) - .unwrap() - .take(3) - .map(|x| x.0) - .collect(); - assert_eq!(res, [ - hex!("3a686561707061676573").to_vec(), - hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), - hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), - ]); - - let res: Vec<_> = client.storage_keys_iter(&BlockId::Number(0), Some(&prefix), Some(&StorageKey(hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec()))) - .unwrap() - .take(1) - .map(|x| x.0) - .collect(); - assert_eq!(res, [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()]); - } - - #[test] - fn cleans_up_closed_notification_sinks_on_block_import() { - use substrate_test_runtime_client::GenesisInit; - - // NOTE: we need to build the client here instead of using the client - // provided by test_runtime_client otherwise we can't access the private - // `import_notification_sinks` and `finality_notification_sinks` fields. - let mut client = - new_in_mem::< - _, - substrate_test_runtime_client::runtime::Block, - _, - substrate_test_runtime_client::runtime::RuntimeApi - >( - substrate_test_runtime_client::new_native_executor(), - &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), - None, - None, - sp_core::tasks::executor(), - ) - .unwrap(); - - type TestClient = Client< - in_mem::Backend, - LocalCallExecutor, sc_executor::NativeExecutor>, - substrate_test_runtime_client::runtime::Block, - substrate_test_runtime_client::runtime::RuntimeApi, - >; - - let import_notif1 = client.import_notification_stream(); - let import_notif2 = client.import_notification_stream(); - let finality_notif1 = client.finality_notification_stream(); - let finality_notif2 = client.finality_notification_stream(); - - // for some reason I can't seem to use `ClientBlockImportExt` - let bake_and_import_block = |client: &mut TestClient, origin| { - let block = client - .new_block(Default::default()) - .unwrap() - .build() - .unwrap() - .block; - - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.body = Some(extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - client.import_block(import, Default::default()).unwrap(); - }; - - // after importing a block we should still have 4 notification sinks - // (2 import + 2 finality) - bake_and_import_block(&mut client, BlockOrigin::Own); - assert_eq!(client.import_notification_sinks.lock().len(), 2); - assert_eq!(client.finality_notification_sinks.lock().len(), 2); - - // if we drop one import notification receiver and one finality - // notification receiver - drop(import_notif2); - drop(finality_notif2); - - // the sinks should be cleaned up after block import - bake_and_import_block(&mut client, BlockOrigin::Own); - assert_eq!(client.import_notification_sinks.lock().len(), 1); - assert_eq!(client.finality_notification_sinks.lock().len(), 1); - - // the same thing should happen if block import happens during initial - // sync - drop(import_notif1); - drop(finality_notif1); - - bake_and_import_block(&mut client, BlockOrigin::NetworkInitialSync); - assert_eq!(client.import_notification_sinks.lock().len(), 0); - assert_eq!(client.finality_notification_sinks.lock().len(), 0); - } + use super::*; + use hex_literal::hex; + use sp_consensus::{BlockImport, BlockOrigin, SelectChain}; + use sp_core::{blake2_256, H256}; + use sp_runtime::DigestItem; + use std::collections::HashMap; + use substrate_test_runtime_client::{ + client_ext::ClientExt, + prelude::*, + runtime::{self, Block, RuntimeApi, TestAPI, Transfer}, + sc_client_db::{Backend, DatabaseSettings, DatabaseSettingsSrc, PruningMode}, + }; + + /// Returns tuple, consisting of: + /// 1) test client pre-filled with blocks changing balances; + /// 2) roots of changes tries for these blocks + /// 3) test cases in form (begin, end, key, vec![(block, extrinsic)]) that are required to pass + pub fn prepare_client_with_key_changes() -> ( + substrate_test_runtime_client::sc_client::Client< + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + Block, + RuntimeApi, + >, + Vec, + Vec<(u64, u64, Vec, Vec<(u64, u32)>)>, + ) { + // prepare block structure + let blocks_transfers = vec![ + vec![ + (AccountKeyring::Alice, AccountKeyring::Dave), + (AccountKeyring::Bob, AccountKeyring::Dave), + ], + vec![(AccountKeyring::Charlie, AccountKeyring::Eve)], + vec![], + vec![(AccountKeyring::Alice, AccountKeyring::Dave)], + ]; + + // prepare client ang import blocks + let mut local_roots = Vec::new(); + let config = Some(ChangesTrieConfiguration::new(4, 2)); + let mut remote_client = TestClientBuilder::new().changes_trie_config(config).build(); + let mut nonces: HashMap<_, u64> = Default::default(); + for (i, block_transfers) in blocks_transfers.into_iter().enumerate() { + let mut builder = remote_client.new_block(Default::default()).unwrap(); + for (from, to) in block_transfers { + builder + .push_transfer(Transfer { + from: from.into(), + to: to.into(), + amount: 1, + nonce: *nonces.entry(from).and_modify(|n| *n = *n + 1).or_default(), + }) + .unwrap(); + } + let block = builder.build().unwrap().block; + remote_client.import(BlockOrigin::Own, block).unwrap(); + + let header = remote_client + .header(&BlockId::Number(i as u64 + 1)) + .unwrap() + .unwrap(); + let trie_root = header + .digest() + .log(DigestItem::as_changes_trie_root) + .map(|root| H256::from_slice(root.as_ref())) + .unwrap(); + local_roots.push(trie_root); + } + + // prepare test cases + let alice = blake2_256(&runtime::system::balance_of_key( + AccountKeyring::Alice.into(), + )) + .to_vec(); + let bob = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Bob.into())).to_vec(); + let charlie = blake2_256(&runtime::system::balance_of_key( + AccountKeyring::Charlie.into(), + )) + .to_vec(); + let dave = blake2_256(&runtime::system::balance_of_key( + AccountKeyring::Dave.into(), + )) + .to_vec(); + let eve = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Eve.into())).to_vec(); + let ferdie = blake2_256(&runtime::system::balance_of_key( + AccountKeyring::Ferdie.into(), + )) + .to_vec(); + let test_cases = vec![ + (1, 4, alice.clone(), vec![(4, 0), (1, 0)]), + (1, 3, alice.clone(), vec![(1, 0)]), + (2, 4, alice.clone(), vec![(4, 0)]), + (2, 3, alice.clone(), vec![]), + (1, 4, bob.clone(), vec![(1, 1)]), + (1, 1, bob.clone(), vec![(1, 1)]), + (2, 4, bob.clone(), vec![]), + (1, 4, charlie.clone(), vec![(2, 0)]), + (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + (1, 1, dave.clone(), vec![(1, 1), (1, 0)]), + (3, 4, dave.clone(), vec![(4, 0)]), + (1, 4, eve.clone(), vec![(2, 0)]), + (1, 1, eve.clone(), vec![]), + (3, 4, eve.clone(), vec![]), + (1, 4, ferdie.clone(), vec![]), + ]; + + (remote_client, local_roots, test_cases) + } + + #[test] + fn client_initializes_from_genesis_ok() { + let client = substrate_test_runtime_client::new(); + + assert_eq!( + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into() + ) + .unwrap(), + 1000 + ); + assert_eq!( + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into() + ) + .unwrap(), + 0 + ); + } + + #[test] + fn block_builder_works_with_no_transactions() { + let mut client = substrate_test_runtime_client::new(); + + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); + } + + #[test] + fn block_builder_works_with_transactions() { + let mut client = substrate_test_runtime_client::new(); + + let mut builder = client.new_block(Default::default()).unwrap(); + + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + + let block = builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); + assert_ne!( + client.state_at(&BlockId::Number(1)).unwrap().pairs(), + client.state_at(&BlockId::Number(0)).unwrap().pairs() + ); + assert_eq!( + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Alice.into() + ) + .unwrap(), + 958 + ); + assert_eq!( + client + .runtime_api() + .balance_of( + &BlockId::Number(client.chain_info().best_number), + AccountKeyring::Ferdie.into() + ) + .unwrap(), + 42 + ); + } + + #[test] + fn block_builder_does_not_include_invalid() { + let mut client = substrate_test_runtime_client::new(); + + let mut builder = client.new_block(Default::default()).unwrap(); + + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 42, + nonce: 0, + }) + .unwrap(); + + assert!(builder + .push_transfer(Transfer { + from: AccountKeyring::Eve.into(), + to: AccountKeyring::Alice.into(), + amount: 42, + nonce: 0, + }) + .is_err()); + + let block = builder.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + + assert_eq!(client.chain_info().best_number, 1); + assert_ne!( + client.state_at(&BlockId::Number(1)).unwrap().pairs(), + client.state_at(&BlockId::Number(0)).unwrap().pairs() + ); + assert_eq!(client.body(&BlockId::Number(1)).unwrap().unwrap().len(), 1) + } + + #[test] + fn best_containing_with_genesis_block() { + // block tree: + // G + + let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!( + genesis_hash.clone(), + longest_chain_select + .finality_target(genesis_hash.clone(), None) + .unwrap() + .unwrap() + ); + } + + #[test] + fn best_containing_with_hash_not_found() { + // block tree: + // G + + let (client, longest_chain_select) = TestClientBuilder::new().build_with_longest_chain(); + + let uninserted_block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + + assert_eq!( + None, + longest_chain_select + .finality_target(uninserted_block.hash().clone(), None) + .unwrap() + ); + } + + #[test] + fn uncles_with_only_ancestors() { + // block tree: + // G -> A1 -> A2 + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + let v: Vec = Vec::new(); + assert_eq!(v, client.uncles(a2.hash(), 3).unwrap()); + } + + #[test] + fn uncles_with_multiple_forks() { + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + let uncles1 = client.uncles(a4.hash(), 10).unwrap(); + assert_eq!(vec![b2.hash(), d2.hash()], uncles1); + + let uncles2 = client.uncles(a4.hash(), 0).unwrap(); + assert_eq!(0, uncles2.len()); + + let uncles3 = client.uncles(a1.hash(), 10).unwrap(); + assert_eq!(0, uncles3.len()); + + let uncles4 = client.uncles(genesis_hash, 10).unwrap(); + assert_eq!(0, uncles4.len()); + + let uncles5 = client.uncles(d2.hash(), 10).unwrap(); + assert_eq!(vec![a2.hash(), b2.hash()], uncles5); + + let uncles6 = client.uncles(b3.hash(), 1).unwrap(); + assert_eq!(vec![c3.hash()], uncles6); + } + + #[test] + fn best_containing_on_longest_chain_with_single_chain_3_blocks() { + // block tree: + // G -> A1 -> A2 + + let (mut client, longest_chain_select) = + TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!( + a2.hash(), + longest_chain_select + .finality_target(genesis_hash, None) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + longest_chain_select + .finality_target(a1.hash(), None) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + longest_chain_select + .finality_target(a2.hash(), None) + .unwrap() + .unwrap() + ); + } + + #[test] + fn best_containing_on_longest_chain_with_multiple_forks() { + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let (mut client, longest_chain_select) = + TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + assert_eq!(client.chain_info().best_hash, a5.hash()); + + let genesis_hash = client.chain_info().genesis_hash; + let leaves = longest_chain_select.leaves().unwrap(); + + assert!(leaves.contains(&a5.hash())); + assert!(leaves.contains(&b4.hash())); + assert!(leaves.contains(&c3.hash())); + assert!(leaves.contains(&d2.hash())); + assert_eq!(leaves.len(), 4); + + // search without restriction + + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(genesis_hash, None) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a1.hash(), None) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a2.hash(), None) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a3.hash(), None) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a4.hash(), None) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a5.hash(), None) + .unwrap() + .unwrap() + ); + + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b2.hash(), None) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b3.hash(), None) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b4.hash(), None) + .unwrap() + .unwrap() + ); + + assert_eq!( + c3.hash(), + longest_chain_select + .finality_target(c3.hash(), None) + .unwrap() + .unwrap() + ); + + assert_eq!( + d2.hash(), + longest_chain_select + .finality_target(d2.hash(), None) + .unwrap() + .unwrap() + ); + + // search only blocks with number <= 5. equivalent to without restriction for this scenario + + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(genesis_hash, Some(5)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a1.hash(), Some(5)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a2.hash(), Some(5)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a3.hash(), Some(5)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a4.hash(), Some(5)) + .unwrap() + .unwrap() + ); + assert_eq!( + a5.hash(), + longest_chain_select + .finality_target(a5.hash(), Some(5)) + .unwrap() + .unwrap() + ); + + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b2.hash(), Some(5)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b3.hash(), Some(5)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b4.hash(), Some(5)) + .unwrap() + .unwrap() + ); + + assert_eq!( + c3.hash(), + longest_chain_select + .finality_target(c3.hash(), Some(5)) + .unwrap() + .unwrap() + ); + + assert_eq!( + d2.hash(), + longest_chain_select + .finality_target(d2.hash(), Some(5)) + .unwrap() + .unwrap() + ); + + // search only blocks with number <= 4 + + assert_eq!( + a4.hash(), + longest_chain_select + .finality_target(genesis_hash, Some(4)) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + longest_chain_select + .finality_target(a1.hash(), Some(4)) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + longest_chain_select + .finality_target(a2.hash(), Some(4)) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + longest_chain_select + .finality_target(a3.hash(), Some(4)) + .unwrap() + .unwrap() + ); + assert_eq!( + a4.hash(), + longest_chain_select + .finality_target(a4.hash(), Some(4)) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a5.hash(), Some(4)) + .unwrap() + ); + + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b2.hash(), Some(4)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b3.hash(), Some(4)) + .unwrap() + .unwrap() + ); + assert_eq!( + b4.hash(), + longest_chain_select + .finality_target(b4.hash(), Some(4)) + .unwrap() + .unwrap() + ); + + assert_eq!( + c3.hash(), + longest_chain_select + .finality_target(c3.hash(), Some(4)) + .unwrap() + .unwrap() + ); + + assert_eq!( + d2.hash(), + longest_chain_select + .finality_target(d2.hash(), Some(4)) + .unwrap() + .unwrap() + ); + + // search only blocks with number <= 3 + + assert_eq!( + a3.hash(), + longest_chain_select + .finality_target(genesis_hash, Some(3)) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + longest_chain_select + .finality_target(a1.hash(), Some(3)) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + longest_chain_select + .finality_target(a2.hash(), Some(3)) + .unwrap() + .unwrap() + ); + assert_eq!( + a3.hash(), + longest_chain_select + .finality_target(a3.hash(), Some(3)) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a4.hash(), Some(3)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a5.hash(), Some(3)) + .unwrap() + ); + + assert_eq!( + b3.hash(), + longest_chain_select + .finality_target(b2.hash(), Some(3)) + .unwrap() + .unwrap() + ); + assert_eq!( + b3.hash(), + longest_chain_select + .finality_target(b3.hash(), Some(3)) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(b4.hash(), Some(3)) + .unwrap() + ); + + assert_eq!( + c3.hash(), + longest_chain_select + .finality_target(c3.hash(), Some(3)) + .unwrap() + .unwrap() + ); + + assert_eq!( + d2.hash(), + longest_chain_select + .finality_target(d2.hash(), Some(3)) + .unwrap() + .unwrap() + ); + + // search only blocks with number <= 2 + + assert_eq!( + a2.hash(), + longest_chain_select + .finality_target(genesis_hash, Some(2)) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + longest_chain_select + .finality_target(a1.hash(), Some(2)) + .unwrap() + .unwrap() + ); + assert_eq!( + a2.hash(), + longest_chain_select + .finality_target(a2.hash(), Some(2)) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a3.hash(), Some(2)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a4.hash(), Some(2)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a5.hash(), Some(2)) + .unwrap() + ); + + assert_eq!( + b2.hash(), + longest_chain_select + .finality_target(b2.hash(), Some(2)) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(b3.hash(), Some(2)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(b4.hash(), Some(2)) + .unwrap() + ); + + assert_eq!( + None, + longest_chain_select + .finality_target(c3.hash(), Some(2)) + .unwrap() + ); + + assert_eq!( + d2.hash(), + longest_chain_select + .finality_target(d2.hash(), Some(2)) + .unwrap() + .unwrap() + ); + + // search only blocks with number <= 1 + + assert_eq!( + a1.hash(), + longest_chain_select + .finality_target(genesis_hash, Some(1)) + .unwrap() + .unwrap() + ); + assert_eq!( + a1.hash(), + longest_chain_select + .finality_target(a1.hash(), Some(1)) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a2.hash(), Some(1)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a3.hash(), Some(1)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a4.hash(), Some(1)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a5.hash(), Some(1)) + .unwrap() + ); + + assert_eq!( + None, + longest_chain_select + .finality_target(b2.hash(), Some(1)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(b3.hash(), Some(1)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(b4.hash(), Some(1)) + .unwrap() + ); + + assert_eq!( + None, + longest_chain_select + .finality_target(c3.hash(), Some(1)) + .unwrap() + ); + + assert_eq!( + None, + longest_chain_select + .finality_target(d2.hash(), Some(1)) + .unwrap() + ); + + // search only blocks with number <= 0 + + assert_eq!( + genesis_hash, + longest_chain_select + .finality_target(genesis_hash, Some(0)) + .unwrap() + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a1.hash(), Some(0)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a2.hash(), Some(0)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a3.hash(), Some(0)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a4.hash(), Some(0)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(a5.hash(), Some(0)) + .unwrap() + ); + + assert_eq!( + None, + longest_chain_select + .finality_target(b2.hash(), Some(0)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(b3.hash(), Some(0)) + .unwrap() + ); + assert_eq!( + None, + longest_chain_select + .finality_target(b4.hash(), Some(0)) + .unwrap() + ); + + assert_eq!( + None, + longest_chain_select + .finality_target(c3.hash().clone(), Some(0)) + .unwrap() + ); + + assert_eq!( + None, + longest_chain_select + .finality_target(d2.hash().clone(), Some(0)) + .unwrap() + ); + } + + #[test] + fn best_containing_on_longest_chain_with_max_depth_higher_than_best() { + // block tree: + // G -> A1 -> A2 + + let (mut client, longest_chain_select) = + TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!( + a2.hash(), + longest_chain_select + .finality_target(genesis_hash, Some(10)) + .unwrap() + .unwrap() + ); + } + + #[test] + fn key_changes_works() { + let (client, _, test_cases) = prepare_client_with_key_changes(); + + for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { + let end = client.block_hash(end).unwrap().unwrap(); + let actual_result = client + .key_changes(begin, BlockId::Hash(end), None, &StorageKey(key)) + .unwrap(); + match actual_result == expected_result { + true => (), + false => panic!(format!( + "Failed test {}: actual = {:?}, expected = {:?}", + index, actual_result, expected_result + )), + } + } + } + + #[test] + fn import_with_justification() { + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let justification = vec![1, 2, 3]; + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client + .import_justified(BlockOrigin::Own, a3.clone(), justification.clone()) + .unwrap(); + + assert_eq!(client.chain_info().finalized_hash, a3.hash(),); + + assert_eq!( + client.justification(&BlockId::Hash(a3.hash())).unwrap(), + Some(justification), + ); + + assert_eq!( + client.justification(&BlockId::Hash(a1.hash())).unwrap(), + None, + ); + + assert_eq!( + client.justification(&BlockId::Hash(a2.hash())).unwrap(), + None, + ); + } + + #[test] + fn importing_diverged_finalized_block_should_trigger_reorg() { + let mut client = substrate_test_runtime_client::new(); + + // G -> A1 -> A2 + // \ + // -> B1 + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + // create but don't import B1 just yet + let b1 = b1.build().unwrap().block; + + // A2 is the current best since it's the longest chain + assert_eq!(client.chain_info().best_hash, a2.hash(),); + + // importing B1 as finalized should trigger a re-org and set it as new best + let justification = vec![1, 2, 3]; + client + .import_justified(BlockOrigin::Own, b1.clone(), justification) + .unwrap(); + + assert_eq!(client.chain_info().best_hash, b1.hash(),); + + assert_eq!(client.chain_info().finalized_hash, b1.hash(),); + } + + #[test] + fn finalizing_diverged_block_should_trigger_reorg() { + let (mut client, select_chain) = TestClientBuilder::new().build_with_longest_chain(); + + // G -> A1 -> A2 + // \ + // -> B1 -> B2 + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let b1 = b1.build().unwrap().block; + client.import(BlockOrigin::Own, b1.clone()).unwrap(); + + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // A2 is the current best since it's the longest chain + assert_eq!(client.chain_info().best_hash, a2.hash(),); + + // we finalize block B1 which is on a different branch from current best + // which should trigger a re-org. + ClientExt::finalize_block(&client, BlockId::Hash(b1.hash()), None).unwrap(); + + // B1 should now be the latest finalized + assert_eq!(client.chain_info().finalized_hash, b1.hash(),); + + // and B1 should be the new best block (`finalize_block` as no way of + // knowing about B2) + assert_eq!(client.chain_info().best_hash, b1.hash(),); + + // `SelectChain` should report B2 as best block though + assert_eq!(select_chain.best_chain().unwrap().hash(), b2.hash(),); + + // after we build B3 on top of B2 and import it + // it should be the new best block, + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + assert_eq!(client.chain_info().best_hash, b3.hash(),); + } + + #[test] + fn get_header_by_block_number_doesnt_panic() { + let client = substrate_test_runtime_client::new(); + + // backend uses u32 for block numbers, make sure we don't panic when + // trying to convert + let id = BlockId::::Number(72340207214430721); + client + .header(&id) + .expect_err("invalid block number overflows u32"); + } + + #[test] + fn state_reverted_on_reorg() { + let _ = env_logger::try_init(); + let mut client = substrate_test_runtime_client::new(); + + let current_balance = |client: &substrate_test_runtime_client::TestClient| { + client + .runtime_api() + .balance_of( + &BlockId::number(client.chain_info().best_number), + AccountKeyring::Alice.into(), + ) + .unwrap() + }; + + // G -> A1 -> A2 + // \ + // -> B1 + let mut a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + a1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 10, + nonce: 0, + }) + .unwrap(); + let a1 = a1.build().unwrap().block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let mut b1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 50, + nonce: 0, + }) + .unwrap(); + let b1 = b1.build().unwrap().block; + // Reorg to B1 + client.import_as_best(BlockOrigin::Own, b1.clone()).unwrap(); + + assert_eq!(950, current_balance(&client)); + let mut a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + a2.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Charlie.into(), + amount: 10, + nonce: 1, + }) + .unwrap(); + let a2 = a2.build().unwrap().block; + // Re-org to A2 + client.import_as_best(BlockOrigin::Own, a2).unwrap(); + assert_eq!(980, current_balance(&client)); + } + + #[test] + fn doesnt_import_blocks_that_revert_finality() { + let _ = env_logger::try_init(); + let tmp = tempfile::tempdir().unwrap(); + + // we need to run with archive pruning to avoid pruning non-canonical + // states + let backend = Arc::new( + Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + pruning: PruningMode::ArchiveAll, + source: DatabaseSettingsSrc::RocksDb { + path: tmp.path().into(), + cache_size: 128, + }, + }, + u64::max_value(), + ) + .unwrap(), + ); + + let mut client = TestClientBuilder::with_backend(backend).build(); + + // -> C1 + // / + // G -> A1 -> A2 + // \ + // -> B1 -> B2 -> B3 + + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + let mut b1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + + // needed to make sure B1 gets a different hash from A1 + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let b1 = b1.build().unwrap().block; + client.import(BlockOrigin::Own, b1.clone()).unwrap(); + + let b2 = client + .new_block_at(&BlockId::Hash(b1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // prepare B3 before we finalize A2, because otherwise we won't be able to + // read changes trie configuration after A2 is finalized + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + // we will finalize A2 which should make it impossible to import a new + // B3 at the same height but that doesn't include it + ClientExt::finalize_block(&client, BlockId::Hash(a2.hash()), None).unwrap(); + + let import_err = client.import(BlockOrigin::Own, b3).err().unwrap(); + let expected_err = + ConsensusError::ClientImport(sp_blockchain::Error::NotInFinalizedChain.to_string()); + + assert_eq!(import_err.to_string(), expected_err.to_string(),); + + // adding a C1 block which is lower than the last finalized should also + // fail (with a cheaper check that doesn't require checking ancestry). + let mut c1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + + // needed to make sure C1 gets a different hash from A1 and B1 + c1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 2, + nonce: 0, + }) + .unwrap(); + let c1 = c1.build().unwrap().block; + + let import_err = client.import(BlockOrigin::Own, c1).err().unwrap(); + let expected_err = + ConsensusError::ClientImport(sp_blockchain::Error::NotInFinalizedChain.to_string()); + + assert_eq!(import_err.to_string(), expected_err.to_string(),); + } + + #[test] + fn respects_block_rules() { + fn run_test( + record_only: bool, + known_bad: &mut HashSet, + fork_rules: &mut Vec<(u64, H256)>, + ) { + let mut client = if record_only { + TestClientBuilder::new().build() + } else { + TestClientBuilder::new() + .set_block_rules(Some(fork_rules.clone()), Some(known_bad.clone())) + .build() + }; + + let block_ok = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + let params = BlockCheckParams { + hash: block_ok.hash().clone(), + number: 0, + parent_hash: block_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + assert_eq!( + client.check_block(params).unwrap(), + ImportResult::imported(false) + ); + + // this is 0x0d6d6612a10485370d9e085aeea7ec427fb3f34d961c6a816cdbe5cde2278864 + let mut block_not_ok = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + block_not_ok + .push_storage_change(vec![0], Some(vec![1])) + .unwrap(); + let block_not_ok = block_not_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_not_ok.hash().clone(), + number: 0, + parent_hash: block_not_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + if record_only { + known_bad.insert(block_not_ok.hash()); + } else { + assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + } + + // Now going to the fork + client.import_as_final(BlockOrigin::Own, block_ok).unwrap(); + + // And check good fork + let mut block_ok = client + .new_block_at(&BlockId::Number(1), Default::default(), false) + .unwrap(); + block_ok + .push_storage_change(vec![0], Some(vec![2])) + .unwrap(); + let block_ok = block_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_ok.hash().clone(), + number: 1, + parent_hash: block_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + if record_only { + fork_rules.push((1, block_ok.hash().clone())); + } + assert_eq!( + client.check_block(params).unwrap(), + ImportResult::imported(false) + ); + + // And now try bad fork + let mut block_not_ok = client + .new_block_at(&BlockId::Number(1), Default::default(), false) + .unwrap(); + block_not_ok + .push_storage_change(vec![0], Some(vec![3])) + .unwrap(); + let block_not_ok = block_not_ok.build().unwrap().block; + + let params = BlockCheckParams { + hash: block_not_ok.hash().clone(), + number: 1, + parent_hash: block_not_ok.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + if !record_only { + assert_eq!(client.check_block(params).unwrap(), ImportResult::KnownBad); + } + } + + let mut known_bad = HashSet::new(); + let mut fork_rules = Vec::new(); + + // records what bad_blocks and fork_blocks hashes should be + run_test(true, &mut known_bad, &mut fork_rules); + + // enforces rules and actually makes assertions + run_test(false, &mut known_bad, &mut fork_rules); + } + + #[test] + fn returns_status_for_pruned_blocks() { + let _ = env_logger::try_init(); + let tmp = tempfile::tempdir().unwrap(); + + // set to prune after 1 block + // states + let backend = Arc::new( + Backend::new( + DatabaseSettings { + state_cache_size: 1 << 20, + state_cache_child_ratio: None, + pruning: PruningMode::keep_blocks(1), + source: DatabaseSettingsSrc::RocksDb { + path: tmp.path().into(), + cache_size: 128, + }, + }, + u64::max_value(), + ) + .unwrap(), + ); + + let mut client = TestClientBuilder::with_backend(backend).build(); + + let a1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + let mut b1 = client + .new_block_at(&BlockId::Number(0), Default::default(), false) + .unwrap(); + + // b1 is created, but not imported + b1.push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let b1 = b1.build().unwrap().block; + + let check_block_a1 = BlockCheckParams { + hash: a1.hash().clone(), + number: 0, + parent_hash: a1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + assert_eq!( + client.check_block(check_block_a1.clone()).unwrap(), + ImportResult::imported(false) + ); + assert_eq!( + client + .block_status(&BlockId::hash(check_block_a1.hash)) + .unwrap(), + BlockStatus::Unknown + ); + + client + .import_as_final(BlockOrigin::Own, a1.clone()) + .unwrap(); + + assert_eq!( + client.check_block(check_block_a1.clone()).unwrap(), + ImportResult::AlreadyInChain + ); + assert_eq!( + client + .block_status(&BlockId::hash(check_block_a1.hash)) + .unwrap(), + BlockStatus::InChainWithState + ); + + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client + .import_as_final(BlockOrigin::Own, a2.clone()) + .unwrap(); + + let check_block_a2 = BlockCheckParams { + hash: a2.hash().clone(), + number: 1, + parent_hash: a1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + assert_eq!( + client.check_block(check_block_a1.clone()).unwrap(), + ImportResult::AlreadyInChain + ); + assert_eq!( + client + .block_status(&BlockId::hash(check_block_a1.hash)) + .unwrap(), + BlockStatus::InChainPruned + ); + assert_eq!( + client.check_block(check_block_a2.clone()).unwrap(), + ImportResult::AlreadyInChain + ); + assert_eq!( + client + .block_status(&BlockId::hash(check_block_a2.hash)) + .unwrap(), + BlockStatus::InChainWithState + ); + + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + client + .import_as_final(BlockOrigin::Own, a3.clone()) + .unwrap(); + let check_block_a3 = BlockCheckParams { + hash: a3.hash().clone(), + number: 2, + parent_hash: a2.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + + // a1 and a2 are both pruned at this point + assert_eq!( + client.check_block(check_block_a1.clone()).unwrap(), + ImportResult::AlreadyInChain + ); + assert_eq!( + client + .block_status(&BlockId::hash(check_block_a1.hash)) + .unwrap(), + BlockStatus::InChainPruned + ); + assert_eq!( + client.check_block(check_block_a2.clone()).unwrap(), + ImportResult::AlreadyInChain + ); + assert_eq!( + client + .block_status(&BlockId::hash(check_block_a2.hash)) + .unwrap(), + BlockStatus::InChainPruned + ); + assert_eq!( + client.check_block(check_block_a3.clone()).unwrap(), + ImportResult::AlreadyInChain + ); + assert_eq!( + client + .block_status(&BlockId::hash(check_block_a3.hash)) + .unwrap(), + BlockStatus::InChainWithState + ); + + let mut check_block_b1 = BlockCheckParams { + hash: b1.hash().clone(), + number: 0, + parent_hash: b1.header().parent_hash().clone(), + allow_missing_state: false, + import_existing: false, + }; + assert_eq!( + client.check_block(check_block_b1.clone()).unwrap(), + ImportResult::MissingState + ); + check_block_b1.allow_missing_state = true; + assert_eq!( + client.check_block(check_block_b1.clone()).unwrap(), + ImportResult::imported(false) + ); + check_block_b1.parent_hash = H256::random(); + assert_eq!( + client.check_block(check_block_b1.clone()).unwrap(), + ImportResult::UnknownParent + ); + } + + #[test] + fn imports_blocks_with_changes_tries_config_change() { + // create client with initial 4^2 configuration + let mut client = TestClientBuilder::with_default_backend() + .changes_trie_config(Some(ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + })) + .build(); + + // =================================================================== + // blocks 1,2,3,4,5,6,7,8,9,10 are empty + // block 11 changes the key + // block 12 is the L1 digest that covers this change + // blocks 13,14,15,16,17,18,19,20,21,22 are empty + // block 23 changes the configuration to 5^1 AND is skewed digest + // =================================================================== + // blocks 24,25 are changing the key + // block 26 is empty + // block 27 changes the key + // block 28 is the L1 digest (NOT SKEWED!!!) that covers changes AND changes configuration to 3^1 + // =================================================================== + // block 29 is empty + // block 30 changes the key + // block 31 is L1 digest that covers this change + // =================================================================== + (1..11).for_each(|number| { + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (11..12).for_each(|number| { + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (12..23).for_each(|number| { + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (23..24).for_each(|number| { + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 5, + digest_levels: 1, + })) + .unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (24..26).for_each(|number| { + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (26..27).for_each(|number| { + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (27..28).for_each(|number| { + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (28..29).for_each(|number| { + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_changes_trie_configuration_update(Some(ChangesTrieConfiguration { + digest_interval: 3, + digest_levels: 1, + })) + .unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (29..30).for_each(|number| { + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (30..31).for_each(|number| { + let mut block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap(); + block + .push_storage_change(vec![42], Some(number.to_le_bytes().to_vec())) + .unwrap(); + let block = block.build().unwrap().block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + (31..32).for_each(|number| { + let block = client + .new_block_at(&BlockId::Number(number - 1), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, block).unwrap(); + }); + + // now check that configuration cache works + assert_eq!( + client + .key_changes(1, BlockId::Number(31), None, &StorageKey(vec![42])) + .unwrap(), + vec![(30, 0), (27, 0), (25, 0), (24, 0), (11, 0)] + ); + } + + #[test] + fn storage_keys_iter_prefix_and_start_key_works() { + let client = substrate_test_runtime_client::new(); + + let prefix = StorageKey(hex!("3a").to_vec()); + + let res: Vec<_> = client + .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!( + res, + [ + hex!("3a636f6465").to_vec(), + hex!("3a686561707061676573").to_vec() + ] + ); + + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a636f6465").to_vec())), + ) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, [hex!("3a686561707061676573").to_vec()]); + + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a686561707061676573").to_vec())), + ) + .unwrap() + .map(|x| x.0) + .collect(); + assert_eq!(res, Vec::>::new()); + } + + #[test] + fn storage_keys_iter_works() { + let client = substrate_test_runtime_client::new(); + + let prefix = StorageKey(hex!("").to_vec()); + + let res: Vec<_> = client + .storage_keys_iter(&BlockId::Number(0), Some(&prefix), None) + .unwrap() + .take(2) + .map(|x| x.0) + .collect(); + assert_eq!( + res, + [ + hex!("0befda6e1ca4ef40219d588a727f1271").to_vec(), + hex!("3a636f6465").to_vec() + ] + ); + + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey(hex!("3a636f6465").to_vec())), + ) + .unwrap() + .take(3) + .map(|x| x.0) + .collect(); + assert_eq!( + res, + [ + hex!("3a686561707061676573").to_vec(), + hex!("6644b9b8bc315888ac8e41a7968dc2b4141a5403c58acdf70b7e8f7e07bf5081").to_vec(), + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d").to_vec(), + ] + ); + + let res: Vec<_> = client + .storage_keys_iter( + &BlockId::Number(0), + Some(&prefix), + Some(&StorageKey( + hex!("79c07e2b1d2e2abfd4855b936617eeff5e0621c4869aa60c02be9adcc98a0d1d") + .to_vec(), + )), + ) + .unwrap() + .take(1) + .map(|x| x.0) + .collect(); + assert_eq!( + res, + [hex!("cf722c0832b5231d35e29f319ff27389f5032bfc7bfc3ba5ed7839f2042fb99f").to_vec()] + ); + } + + #[test] + fn cleans_up_closed_notification_sinks_on_block_import() { + use substrate_test_runtime_client::GenesisInit; + + // NOTE: we need to build the client here instead of using the client + // provided by test_runtime_client otherwise we can't access the private + // `import_notification_sinks` and `finality_notification_sinks` fields. + let mut client = new_in_mem::< + _, + substrate_test_runtime_client::runtime::Block, + _, + substrate_test_runtime_client::runtime::RuntimeApi, + >( + substrate_test_runtime_client::new_native_executor(), + &substrate_test_runtime_client::GenesisParameters::default().genesis_storage(), + None, + None, + sp_core::tasks::executor(), + ) + .unwrap(); + + type TestClient = Client< + in_mem::Backend, + LocalCallExecutor, sc_executor::NativeExecutor>, + substrate_test_runtime_client::runtime::Block, + substrate_test_runtime_client::runtime::RuntimeApi, + >; + + let import_notif1 = client.import_notification_stream(); + let import_notif2 = client.import_notification_stream(); + let finality_notif1 = client.finality_notification_stream(); + let finality_notif2 = client.finality_notification_stream(); + + // for some reason I can't seem to use `ClientBlockImportExt` + let bake_and_import_block = |client: &mut TestClient, origin| { + let block = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + client.import_block(import, Default::default()).unwrap(); + }; + + // after importing a block we should still have 4 notification sinks + // (2 import + 2 finality) + bake_and_import_block(&mut client, BlockOrigin::Own); + assert_eq!(client.import_notification_sinks.lock().len(), 2); + assert_eq!(client.finality_notification_sinks.lock().len(), 2); + + // if we drop one import notification receiver and one finality + // notification receiver + drop(import_notif2); + drop(finality_notif2); + + // the sinks should be cleaned up after block import + bake_and_import_block(&mut client, BlockOrigin::Own); + assert_eq!(client.import_notification_sinks.lock().len(), 1); + assert_eq!(client.finality_notification_sinks.lock().len(), 1); + + // the same thing should happen if block import happens during initial + // sync + drop(import_notif1); + drop(finality_notif1); + + bake_and_import_block(&mut client, BlockOrigin::NetworkInitialSync); + assert_eq!(client.import_notification_sinks.lock().len(), 0); + assert_eq!(client.finality_notification_sinks.lock().len(), 0); + } } diff --git a/client/src/genesis.rs b/client/src/genesis.rs index 2c84ff1e43..7f639399a7 100644 --- a/client/src/genesis.rs +++ b/client/src/genesis.rs @@ -16,241 +16,263 @@ //! Tool for creating the genesis block. -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, Zero}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT, Zero}; /// Create a genesis block, given the initial storage. -pub fn construct_genesis_block< - Block: BlockT -> ( - state_root: Block::Hash -) -> Block { - let extrinsics_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - Vec::new(), - ); +pub fn construct_genesis_block(state_root: Block::Hash) -> Block { + let extrinsics_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root(Vec::new()); - Block::new( - <::Header as HeaderT>::new( - Zero::zero(), - extrinsics_root, - state_root, - Default::default(), - Default::default() - ), - Default::default() - ) + Block::new( + <::Header as HeaderT>::new( + Zero::zero(), + extrinsics_root, + state_root, + Default::default(), + Default::default(), + ), + Default::default(), + ) } #[cfg(test)] mod tests { - use codec::{Encode, Decode, Joiner}; - use sc_executor::native_executor_instance; - use sp_state_machine::{ - StateMachine, OverlayedChanges, ExecutionStrategy, - InMemoryBackend, - }; - use substrate_test_runtime_client::{ - runtime::genesismap::{GenesisConfig, insert_genesis_block}, - runtime::{Hash, Transfer, Block, BlockNumber, Header, Digest}, - AccountKeyring, Sr25519Keyring, - }; - use sp_runtime::traits::BlakeTwo256; - use sp_core::tasks::executor as tasks_executor; - use hex_literal::*; + use codec::{Decode, Encode, Joiner}; + use hex_literal::*; + use sc_executor::native_executor_instance; + use sp_core::tasks::executor as tasks_executor; + use sp_runtime::traits::BlakeTwo256; + use sp_state_machine::{ExecutionStrategy, InMemoryBackend, OverlayedChanges, StateMachine}; + use substrate_test_runtime_client::{ + runtime::genesismap::{insert_genesis_block, GenesisConfig}, + runtime::{Block, BlockNumber, Digest, Hash, Header, Transfer}, + AccountKeyring, Sr25519Keyring, + }; - native_executor_instance!( - Executor, - substrate_test_runtime_client::runtime::api::dispatch, - substrate_test_runtime_client::runtime::native_version, - ); + native_executor_instance!( + Executor, + substrate_test_runtime_client::runtime::api::dispatch, + substrate_test_runtime_client::runtime::native_version, + ); - fn executor() -> sc_executor::NativeExecutor { - sc_executor::NativeExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) - } + fn executor() -> sc_executor::NativeExecutor { + sc_executor::NativeExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) + } - fn construct_block( - backend: &InMemoryBackend, - number: BlockNumber, - parent_hash: Hash, - state_root: Hash, - txs: Vec - ) -> (Vec, Hash) { - use sp_trie::{TrieConfiguration, trie_types::Layout}; + fn construct_block( + backend: &InMemoryBackend, + number: BlockNumber, + parent_hash: Hash, + state_root: Hash, + txs: Vec, + ) -> (Vec, Hash) { + use sp_trie::{trie_types::Layout, TrieConfiguration}; - let transactions = txs.into_iter().map(|tx| tx.into_signed_tx()).collect::>(); + let transactions = txs + .into_iter() + .map(|tx| tx.into_signed_tx()) + .collect::>(); - let iter = transactions.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter).into(); + let iter = transactions.iter().map(Encode::encode); + let extrinsics_root = Layout::::ordered_trie_root(iter).into(); - let mut header = Header { - parent_hash, - number, - state_root, - extrinsics_root, - digest: Digest { logs: vec![], }, - }; - let hash = header.hash(); - let mut overlay = OverlayedChanges::default(); - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); - let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + let mut header = Header { + parent_hash, + number, + state_root, + extrinsics_root, + digest: Digest { logs: vec![] }, + }; + let hash = header.hash(); + let mut overlay = OverlayedChanges::default(); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code + .runtime_code() + .expect("Code is part of the backend"); - StateMachine::new( - backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), - &mut overlay, - &executor(), - "Core_initialize_block", - &header.encode(), - Default::default(), - &runtime_code, - tasks_executor(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); + StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &executor(), + "Core_initialize_block", + &header.encode(), + Default::default(), + &runtime_code, + tasks_executor(), + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); - for tx in transactions.iter() { - StateMachine::new( - backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), - &mut overlay, - &executor(), - "BlockBuilder_apply_extrinsic", - &tx.encode(), - Default::default(), - &runtime_code, - tasks_executor(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); - } + for tx in transactions.iter() { + StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &executor(), + "BlockBuilder_apply_extrinsic", + &tx.encode(), + Default::default(), + &runtime_code, + tasks_executor(), + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); + } - let ret_data = StateMachine::new( - backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), - &mut overlay, - &executor(), - "BlockBuilder_finalize_block", - &[], - Default::default(), - &runtime_code, - tasks_executor(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); - header = Header::decode(&mut &ret_data[..]).unwrap(); + let ret_data = StateMachine::new( + backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &executor(), + "BlockBuilder_finalize_block", + &[], + Default::default(), + &runtime_code, + tasks_executor(), + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); + header = Header::decode(&mut &ret_data[..]).unwrap(); - (vec![].and(&Block { header, extrinsics: transactions }), hash) - } + ( + vec![].and(&Block { + header, + extrinsics: transactions, + }), + hash, + ) + } - fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { - construct_block( - backend, - 1, - genesis_hash, - hex!("25e5b37074063ab75c889326246640729b40d0c86932edc527bc80db0e04fe5c").into(), - vec![Transfer { - from: AccountKeyring::One.into(), - to: AccountKeyring::Two.into(), - amount: 69, - nonce: 0, - }] - ) - } + fn block1(genesis_hash: Hash, backend: &InMemoryBackend) -> (Vec, Hash) { + construct_block( + backend, + 1, + genesis_hash, + hex!("25e5b37074063ab75c889326246640729b40d0c86932edc527bc80db0e04fe5c").into(), + vec![Transfer { + from: AccountKeyring::One.into(), + to: AccountKeyring::Two.into(), + amount: 69, + nonce: 0, + }], + ) + } - #[test] - fn construct_genesis_should_work_with_native() { - let mut storage = GenesisConfig::new( - None, - vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], - 1000, - None, - Default::default(), - ).genesis_map(); - let genesis_hash = insert_genesis_block(&mut storage); + #[test] + fn construct_genesis_should_work_with_native() { + let mut storage = GenesisConfig::new( + None, + vec![ + Sr25519Keyring::One.public().into(), + Sr25519Keyring::Two.public().into(), + ], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 1000, + None, + Default::default(), + ) + .genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); - let backend = InMemoryBackend::from(storage); - let (b1data, _b1hash) = block1(genesis_hash, &backend); - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); - let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code + .runtime_code() + .expect("Code is part of the backend"); - let mut overlay = OverlayedChanges::default(); - let _ = StateMachine::new( - &backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), - &mut overlay, - &executor(), - "Core_execute_block", - &b1data, - Default::default(), - &runtime_code, - tasks_executor(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ).unwrap(); - } + let mut overlay = OverlayedChanges::default(); + let _ = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + tasks_executor(), + ) + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(); + } - #[test] - fn construct_genesis_should_work_with_wasm() { - let mut storage = GenesisConfig::new(None, - vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], - 1000, - None, - Default::default(), - ).genesis_map(); - let genesis_hash = insert_genesis_block(&mut storage); + #[test] + fn construct_genesis_should_work_with_wasm() { + let mut storage = GenesisConfig::new( + None, + vec![ + Sr25519Keyring::One.public().into(), + Sr25519Keyring::Two.public().into(), + ], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 1000, + None, + Default::default(), + ) + .genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); - let backend = InMemoryBackend::from(storage); - let (b1data, _b1hash) = block1(genesis_hash, &backend); - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); - let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code + .runtime_code() + .expect("Code is part of the backend"); - let mut overlay = OverlayedChanges::default(); - let _ = StateMachine::new( - &backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), - &mut overlay, - &executor(), - "Core_execute_block", - &b1data, - Default::default(), - &runtime_code, - tasks_executor(), - ).execute( - ExecutionStrategy::AlwaysWasm, - ).unwrap(); - } + let mut overlay = OverlayedChanges::default(); + let _ = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + tasks_executor(), + ) + .execute(ExecutionStrategy::AlwaysWasm) + .unwrap(); + } - #[test] - fn construct_genesis_with_bad_transaction_should_panic() { - let mut storage = GenesisConfig::new(None, - vec![Sr25519Keyring::One.public().into(), Sr25519Keyring::Two.public().into()], - vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], - 68, - None, - Default::default(), - ).genesis_map(); - let genesis_hash = insert_genesis_block(&mut storage); + #[test] + fn construct_genesis_with_bad_transaction_should_panic() { + let mut storage = GenesisConfig::new( + None, + vec![ + Sr25519Keyring::One.public().into(), + Sr25519Keyring::Two.public().into(), + ], + vec![AccountKeyring::One.into(), AccountKeyring::Two.into()], + 68, + None, + Default::default(), + ) + .genesis_map(); + let genesis_hash = insert_genesis_block(&mut storage); - let backend = InMemoryBackend::from(storage); - let (b1data, _b1hash) = block1(genesis_hash, &backend); - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); - let runtime_code = backend_runtime_code.runtime_code().expect("Code is part of the backend"); + let backend = InMemoryBackend::from(storage); + let (b1data, _b1hash) = block1(genesis_hash, &backend); + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&backend); + let runtime_code = backend_runtime_code + .runtime_code() + .expect("Code is part of the backend"); - let mut overlay = OverlayedChanges::default(); - let r = StateMachine::new( - &backend, - sp_state_machine::disabled_changes_trie_state::<_, u64>(), - &mut overlay, - &executor(), - "Core_execute_block", - &b1data, - Default::default(), - &runtime_code, - tasks_executor(), - ).execute( - ExecutionStrategy::NativeElseWasm, - ); - assert!(r.is_err()); - } + let mut overlay = OverlayedChanges::default(); + let r = StateMachine::new( + &backend, + sp_state_machine::disabled_changes_trie_state::<_, u64>(), + &mut overlay, + &executor(), + "Core_execute_block", + &b1data, + Default::default(), + &runtime_code, + tasks_executor(), + ) + .execute(ExecutionStrategy::NativeElseWasm); + assert!(r.is_err()); + } } diff --git a/client/src/in_mem.rs b/client/src/in_mem.rs index 3672da1822..c6f9ef7f4e 100644 --- a/client/src/in_mem.rs +++ b/client/src/in_mem.rs @@ -16,761 +16,881 @@ //! In memory client backend -use std::collections::HashMap; -use std::sync::Arc; use parking_lot::RwLock; +use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; +use sp_core::offchain::storage::InMemOffchainStorage as OffchainStorage; use sp_core::storage::well_known_keys; -use sp_core::offchain::storage::{ - InMemOffchainStorage as OffchainStorage -}; use sp_runtime::generic::BlockId; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Zero, NumberFor, HashFor}; +use sp_runtime::traits::{Block as BlockT, HashFor, Header as HeaderT, NumberFor, Zero}; use sp_runtime::{Justification, Storage}; use sp_state_machine::{ - ChangesTrieTransaction, InMemoryBackend, Backend as StateBackend, StorageCollection, - ChildStorageCollection, + Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, + StorageCollection, }; -use sp_blockchain::{CachedHeaderMetadata, HeaderMetadata}; +use std::collections::HashMap; +use std::sync::Arc; +use crate::leaves::LeafSet; use sc_client_api::{ - backend::{self, NewBlockState}, - blockchain::{ - self, BlockStatus, HeaderBackend, well_known_cache_keys::Id as CacheKeyId - }, - UsageInfo, + backend::{self, NewBlockState}, + blockchain::{self, well_known_cache_keys::Id as CacheKeyId, BlockStatus, HeaderBackend}, + UsageInfo, }; -use crate::leaves::LeafSet; struct PendingBlock { - block: StoredBlock, - state: NewBlockState, + block: StoredBlock, + state: NewBlockState, } #[derive(PartialEq, Eq, Clone)] enum StoredBlock { - Header(B::Header, Option), - Full(B, Option), + Header(B::Header, Option), + Full(B, Option), } impl StoredBlock { - fn new(header: B::Header, body: Option>, just: Option) -> Self { - match body { - Some(body) => StoredBlock::Full(B::new(header, body), just), - None => StoredBlock::Header(header, just), - } - } - - fn header(&self) -> &B::Header { - match *self { - StoredBlock::Header(ref h, _) => h, - StoredBlock::Full(ref b, _) => b.header(), - } - } - - fn justification(&self) -> Option<&Justification> { - match *self { - StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref() - } - } - - fn extrinsics(&self) -> Option<&[B::Extrinsic]> { - match *self { - StoredBlock::Header(_, _) => None, - StoredBlock::Full(ref b, _) => Some(b.extrinsics()), - } - } - - fn into_inner(self) -> (B::Header, Option>, Option) { - match self { - StoredBlock::Header(header, just) => (header, None, just), - StoredBlock::Full(block, just) => { - let (header, body) = block.deconstruct(); - (header, Some(body), just) - } - } - } + fn new( + header: B::Header, + body: Option>, + just: Option, + ) -> Self { + match body { + Some(body) => StoredBlock::Full(B::new(header, body), just), + None => StoredBlock::Header(header, just), + } + } + + fn header(&self) -> &B::Header { + match *self { + StoredBlock::Header(ref h, _) => h, + StoredBlock::Full(ref b, _) => b.header(), + } + } + + fn justification(&self) -> Option<&Justification> { + match *self { + StoredBlock::Header(_, ref j) | StoredBlock::Full(_, ref j) => j.as_ref(), + } + } + + fn extrinsics(&self) -> Option<&[B::Extrinsic]> { + match *self { + StoredBlock::Header(_, _) => None, + StoredBlock::Full(ref b, _) => Some(b.extrinsics()), + } + } + + fn into_inner(self) -> (B::Header, Option>, Option) { + match self { + StoredBlock::Header(header, just) => (header, None, just), + StoredBlock::Full(block, just) => { + let (header, body) = block.deconstruct(); + (header, Some(body), just) + } + } + } } #[derive(Clone)] struct BlockchainStorage { - blocks: HashMap>, - hashes: HashMap, Block::Hash>, - best_hash: Block::Hash, - best_number: NumberFor, - finalized_hash: Block::Hash, - finalized_number: NumberFor, - genesis_hash: Block::Hash, - header_cht_roots: HashMap, Block::Hash>, - changes_trie_cht_roots: HashMap, Block::Hash>, - leaves: LeafSet>, - aux: HashMap, Vec>, + blocks: HashMap>, + hashes: HashMap, Block::Hash>, + best_hash: Block::Hash, + best_number: NumberFor, + finalized_hash: Block::Hash, + finalized_number: NumberFor, + genesis_hash: Block::Hash, + header_cht_roots: HashMap, Block::Hash>, + changes_trie_cht_roots: HashMap, Block::Hash>, + leaves: LeafSet>, + aux: HashMap, Vec>, } /// In-memory blockchain. Supports concurrent reads. pub struct Blockchain { - storage: Arc>>, + storage: Arc>>, } impl Clone for Blockchain { - fn clone(&self) -> Self { - let storage = Arc::new(RwLock::new(self.storage.read().clone())); - Blockchain { - storage: storage.clone(), - } - } + fn clone(&self) -> Self { + let storage = Arc::new(RwLock::new(self.storage.read().clone())); + Blockchain { + storage: storage.clone(), + } + } } impl Blockchain { - /// Get header hash of given block. - pub fn id(&self, id: BlockId) -> Option { - match id { - BlockId::Hash(h) => Some(h), - BlockId::Number(n) => self.storage.read().hashes.get(&n).cloned(), - } - } - - /// Create new in-memory blockchain storage. - pub fn new() -> Blockchain { - let storage = Arc::new(RwLock::new( - BlockchainStorage { - blocks: HashMap::new(), - hashes: HashMap::new(), - best_hash: Default::default(), - best_number: Zero::zero(), - finalized_hash: Default::default(), - finalized_number: Zero::zero(), - genesis_hash: Default::default(), - header_cht_roots: HashMap::new(), - changes_trie_cht_roots: HashMap::new(), - leaves: LeafSet::new(), - aux: HashMap::new(), - })); - Blockchain { - storage: storage.clone(), - } - } - - /// Insert a block header and associated data. - pub fn insert( - &self, - hash: Block::Hash, - header: ::Header, - justification: Option, - body: Option::Extrinsic>>, - new_state: NewBlockState, - ) -> sp_blockchain::Result<()> { - let number = header.number().clone(); - if new_state.is_best() { - self.apply_head(&header)?; - } - - { - let mut storage = self.storage.write(); - storage.leaves.import(hash.clone(), number.clone(), header.parent_hash().clone()); - storage.blocks.insert(hash.clone(), StoredBlock::new(header, body, justification)); - - if let NewBlockState::Final = new_state { - storage.finalized_hash = hash; - storage.finalized_number = number.clone(); - } - - if number == Zero::zero() { - storage.genesis_hash = hash; - } - } - - Ok(()) - } - - /// Get total number of blocks. - pub fn blocks_count(&self) -> usize { - self.storage.read().blocks.len() - } - - /// Compare this blockchain with another in-mem blockchain - pub fn equals_to(&self, other: &Self) -> bool { - self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks - } - - /// Compare canonical chain to other canonical chain. - pub fn canon_equals_to(&self, other: &Self) -> bool { - let this = self.storage.read(); - let other = other.storage.read(); - this.hashes == other.hashes - && this.best_hash == other.best_hash - && this.best_number == other.best_number - && this.genesis_hash == other.genesis_hash - } - - /// Insert header CHT root. - pub fn insert_cht_root(&self, block: NumberFor, cht_root: Block::Hash) { - self.storage.write().header_cht_roots.insert(block, cht_root); - } - - /// Set an existing block as head. - pub fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { - let header = match self.header(id)? { - Some(h) => h, - None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), - }; - - self.apply_head(&header) - } - - fn apply_head(&self, header: &::Header) -> sp_blockchain::Result<()> { - let hash = header.hash(); - let number = header.number(); - - // Note: this may lock storage, so it must happen before obtaining storage - // write lock. - let best_tree_route = { - let best_hash = self.storage.read().best_hash; - if &best_hash == header.parent_hash() { - None - } else { - let route = sp_blockchain::tree_route(self, best_hash, *header.parent_hash())?; - Some(route) - } - }; - - let mut storage = self.storage.write(); - - if let Some(tree_route) = best_tree_route { - // apply retraction and enaction when reorganizing up to parent hash - let enacted = tree_route.enacted(); - - for entry in enacted { - storage.hashes.insert(entry.number, entry.hash); - } - - for entry in tree_route.retracted().iter().skip(enacted.len()) { - storage.hashes.remove(&entry.number); - } - } - - storage.best_hash = hash.clone(); - storage.best_number = number.clone(); - storage.hashes.insert(number.clone(), hash.clone()); - - Ok(()) - } - - fn finalize_header(&self, id: BlockId, justification: Option) -> sp_blockchain::Result<()> { - let hash = match self.header(id)? { - Some(h) => h.hash(), - None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), - }; - - let mut storage = self.storage.write(); - storage.finalized_hash = hash; - - if justification.is_some() { - let block = storage.blocks.get_mut(&hash) - .expect("hash was fetched from a block in the db; qed"); - - let block_justification = match block { - StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j - }; - - *block_justification = justification; - } - - Ok(()) - } - - fn write_aux(&self, ops: Vec<(Vec, Option>)>) { - let mut storage = self.storage.write(); - for (k, v) in ops { - match v { - Some(v) => storage.aux.insert(k, v), - None => storage.aux.remove(&k), - }; - } - } + /// Get header hash of given block. + pub fn id(&self, id: BlockId) -> Option { + match id { + BlockId::Hash(h) => Some(h), + BlockId::Number(n) => self.storage.read().hashes.get(&n).cloned(), + } + } + + /// Create new in-memory blockchain storage. + pub fn new() -> Blockchain { + let storage = Arc::new(RwLock::new(BlockchainStorage { + blocks: HashMap::new(), + hashes: HashMap::new(), + best_hash: Default::default(), + best_number: Zero::zero(), + finalized_hash: Default::default(), + finalized_number: Zero::zero(), + genesis_hash: Default::default(), + header_cht_roots: HashMap::new(), + changes_trie_cht_roots: HashMap::new(), + leaves: LeafSet::new(), + aux: HashMap::new(), + })); + Blockchain { + storage: storage.clone(), + } + } + + /// Insert a block header and associated data. + pub fn insert( + &self, + hash: Block::Hash, + header: ::Header, + justification: Option, + body: Option::Extrinsic>>, + new_state: NewBlockState, + ) -> sp_blockchain::Result<()> { + let number = header.number().clone(); + if new_state.is_best() { + self.apply_head(&header)?; + } + + { + let mut storage = self.storage.write(); + storage + .leaves + .import(hash.clone(), number.clone(), header.parent_hash().clone()); + storage + .blocks + .insert(hash.clone(), StoredBlock::new(header, body, justification)); + + if let NewBlockState::Final = new_state { + storage.finalized_hash = hash; + storage.finalized_number = number.clone(); + } + + if number == Zero::zero() { + storage.genesis_hash = hash; + } + } + + Ok(()) + } + + /// Get total number of blocks. + pub fn blocks_count(&self) -> usize { + self.storage.read().blocks.len() + } + + /// Compare this blockchain with another in-mem blockchain + pub fn equals_to(&self, other: &Self) -> bool { + self.canon_equals_to(other) && self.storage.read().blocks == other.storage.read().blocks + } + + /// Compare canonical chain to other canonical chain. + pub fn canon_equals_to(&self, other: &Self) -> bool { + let this = self.storage.read(); + let other = other.storage.read(); + this.hashes == other.hashes + && this.best_hash == other.best_hash + && this.best_number == other.best_number + && this.genesis_hash == other.genesis_hash + } + + /// Insert header CHT root. + pub fn insert_cht_root(&self, block: NumberFor, cht_root: Block::Hash) { + self.storage + .write() + .header_cht_roots + .insert(block, cht_root); + } + + /// Set an existing block as head. + pub fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { + let header = match self.header(id)? { + Some(h) => h, + None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), + }; + + self.apply_head(&header) + } + + fn apply_head(&self, header: &::Header) -> sp_blockchain::Result<()> { + let hash = header.hash(); + let number = header.number(); + + // Note: this may lock storage, so it must happen before obtaining storage + // write lock. + let best_tree_route = { + let best_hash = self.storage.read().best_hash; + if &best_hash == header.parent_hash() { + None + } else { + let route = sp_blockchain::tree_route(self, best_hash, *header.parent_hash())?; + Some(route) + } + }; + + let mut storage = self.storage.write(); + + if let Some(tree_route) = best_tree_route { + // apply retraction and enaction when reorganizing up to parent hash + let enacted = tree_route.enacted(); + + for entry in enacted { + storage.hashes.insert(entry.number, entry.hash); + } + + for entry in tree_route.retracted().iter().skip(enacted.len()) { + storage.hashes.remove(&entry.number); + } + } + + storage.best_hash = hash.clone(); + storage.best_number = number.clone(); + storage.hashes.insert(number.clone(), hash.clone()); + + Ok(()) + } + + fn finalize_header( + &self, + id: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()> { + let hash = match self.header(id)? { + Some(h) => h.hash(), + None => return Err(sp_blockchain::Error::UnknownBlock(format!("{}", id))), + }; + + let mut storage = self.storage.write(); + storage.finalized_hash = hash; + + if justification.is_some() { + let block = storage + .blocks + .get_mut(&hash) + .expect("hash was fetched from a block in the db; qed"); + + let block_justification = match block { + StoredBlock::Header(_, ref mut j) | StoredBlock::Full(_, ref mut j) => j, + }; + + *block_justification = justification; + } + + Ok(()) + } + + fn write_aux(&self, ops: Vec<(Vec, Option>)>) { + let mut storage = self.storage.write(); + for (k, v) in ops { + match v { + Some(v) => storage.aux.insert(k, v), + None => storage.aux.remove(&k), + }; + } + } } impl HeaderBackend for Blockchain { - fn header(&self, id: BlockId) -> sp_blockchain::Result::Header>> { - Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash).map(|b| b.header().clone()) - })) - } - - fn info(&self) -> blockchain::Info { - let storage = self.storage.read(); - blockchain::Info { - best_hash: storage.best_hash, - best_number: storage.best_number, - genesis_hash: storage.genesis_hash, - finalized_hash: storage.finalized_hash, - finalized_number: storage.finalized_number, - number_leaves: storage.leaves.count() - } - } - - fn status(&self, id: BlockId) -> sp_blockchain::Result { - match self.id(id).map_or(false, |hash| self.storage.read().blocks.contains_key(&hash)) { - true => Ok(BlockStatus::InChain), - false => Ok(BlockStatus::Unknown), - } - } - - fn number(&self, hash: Block::Hash) -> sp_blockchain::Result>> { - Ok(self.storage.read().blocks.get(&hash).map(|b| *b.header().number())) - } - - fn hash(&self, number: <::Header as HeaderT>::Number) -> sp_blockchain::Result> { - Ok(self.id(BlockId::Number(number))) - } + fn header( + &self, + id: BlockId, + ) -> sp_blockchain::Result::Header>> { + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .map(|b| b.header().clone()) + })) + } + + fn info(&self) -> blockchain::Info { + let storage = self.storage.read(); + blockchain::Info { + best_hash: storage.best_hash, + best_number: storage.best_number, + genesis_hash: storage.genesis_hash, + finalized_hash: storage.finalized_hash, + finalized_number: storage.finalized_number, + number_leaves: storage.leaves.count(), + } + } + + fn status(&self, id: BlockId) -> sp_blockchain::Result { + match self + .id(id) + .map_or(false, |hash| self.storage.read().blocks.contains_key(&hash)) + { + true => Ok(BlockStatus::InChain), + false => Ok(BlockStatus::Unknown), + } + } + + fn number(&self, hash: Block::Hash) -> sp_blockchain::Result>> { + Ok(self + .storage + .read() + .blocks + .get(&hash) + .map(|b| *b.header().number())) + } + + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> sp_blockchain::Result> { + Ok(self.id(BlockId::Number(number))) + } } impl HeaderMetadata for Blockchain { - type Error = sp_blockchain::Error; - - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) - .ok_or(sp_blockchain::Error::UnknownBlock(format!("header not found: {}", hash))) - } - - fn insert_header_metadata(&self, _hash: Block::Hash, _metadata: CachedHeaderMetadata) { - // No need to implement. - } - fn remove_header_metadata(&self, _hash: Block::Hash) { - // No need to implement. - } + type Error = sp_blockchain::Error; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.header(BlockId::hash(hash))? + .map(|header| CachedHeaderMetadata::from(&header)) + .ok_or(sp_blockchain::Error::UnknownBlock(format!( + "header not found: {}", + hash + ))) + } + + fn insert_header_metadata(&self, _hash: Block::Hash, _metadata: CachedHeaderMetadata) { + // No need to implement. + } + fn remove_header_metadata(&self, _hash: Block::Hash) { + // No need to implement. + } } impl blockchain::Backend for Blockchain { - fn body(&self, id: BlockId) -> sp_blockchain::Result::Extrinsic>>> { - Ok(self.id(id).and_then(|hash| { - self.storage.read().blocks.get(&hash) - .and_then(|b| b.extrinsics().map(|x| x.to_vec())) - })) - } - - fn justification(&self, id: BlockId) -> sp_blockchain::Result> { - Ok(self.id(id).and_then(|hash| self.storage.read().blocks.get(&hash).and_then(|b| - b.justification().map(|x| x.clone())) - )) - } - - fn last_finalized(&self) -> sp_blockchain::Result { - Ok(self.storage.read().finalized_hash.clone()) - } - - fn cache(&self) -> Option>> { - None - } - - fn leaves(&self) -> sp_blockchain::Result> { - Ok(self.storage.read().leaves.hashes()) - } - - fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result> { - unimplemented!() - } + fn body( + &self, + id: BlockId, + ) -> sp_blockchain::Result::Extrinsic>>> { + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.extrinsics().map(|x| x.to_vec())) + })) + } + + fn justification(&self, id: BlockId) -> sp_blockchain::Result> { + Ok(self.id(id).and_then(|hash| { + self.storage + .read() + .blocks + .get(&hash) + .and_then(|b| b.justification().map(|x| x.clone())) + })) + } + + fn last_finalized(&self) -> sp_blockchain::Result { + Ok(self.storage.read().finalized_hash.clone()) + } + + fn cache(&self) -> Option>> { + None + } + + fn leaves(&self) -> sp_blockchain::Result> { + Ok(self.storage.read().leaves.hashes()) + } + + fn children(&self, _parent_hash: Block::Hash) -> sp_blockchain::Result> { + unimplemented!() + } } impl blockchain::ProvideCache for Blockchain { - fn cache(&self) -> Option>> { - None - } + fn cache(&self) -> Option>> { + None + } } impl backend::AuxStore for Blockchain { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { - let mut storage = self.storage.write(); - for (k, v) in insert { - storage.aux.insert(k.to_vec(), v.to_vec()); - } - for k in delete { - storage.aux.remove(*k); - } - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { - Ok(self.storage.read().aux.get(key).cloned()) - } + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + let mut storage = self.storage.write(); + for (k, v) in insert { + storage.aux.insert(k.to_vec(), v.to_vec()); + } + for k in delete { + storage.aux.remove(*k); + } + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + Ok(self.storage.read().aux.get(key).cloned()) + } } impl sc_client_api::light::Storage for Blockchain - where - Block::Hash: From<[u8; 32]>, +where + Block::Hash: From<[u8; 32]>, { - fn import_header( - &self, - header: Block::Header, - _cache: HashMap>, - state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - ) -> sp_blockchain::Result<()> { - let hash = header.hash(); - self.insert(hash, header, None, None, state)?; - - self.write_aux(aux_ops); - Ok(()) - } - - fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { - Blockchain::set_head(self, id) - } - - fn last_finalized(&self) -> sp_blockchain::Result { - Ok(self.storage.read().finalized_hash.clone()) - } - - fn finalize_header(&self, id: BlockId) -> sp_blockchain::Result<()> { - Blockchain::finalize_header(self, id, None) - } - - fn header_cht_root( - &self, - _cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage.read().header_cht_roots.get(&block).cloned() - .ok_or_else(|| sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block))) - .map(Some) - } - - fn changes_trie_cht_root( - &self, - _cht_size: NumberFor, - block: NumberFor, - ) -> sp_blockchain::Result> { - self.storage.read().changes_trie_cht_roots.get(&block).cloned() - .ok_or_else(|| sp_blockchain::Error::Backend(format!("Changes trie CHT for block {} not exists", block))) - .map(Some) - } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } + fn import_header( + &self, + header: Block::Header, + _cache: HashMap>, + state: NewBlockState, + aux_ops: Vec<(Vec, Option>)>, + ) -> sp_blockchain::Result<()> { + let hash = header.hash(); + self.insert(hash, header, None, None, state)?; + + self.write_aux(aux_ops); + Ok(()) + } + + fn set_head(&self, id: BlockId) -> sp_blockchain::Result<()> { + Blockchain::set_head(self, id) + } + + fn last_finalized(&self) -> sp_blockchain::Result { + Ok(self.storage.read().finalized_hash.clone()) + } + + fn finalize_header(&self, id: BlockId) -> sp_blockchain::Result<()> { + Blockchain::finalize_header(self, id, None) + } + + fn header_cht_root( + &self, + _cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + self.storage + .read() + .header_cht_roots + .get(&block) + .cloned() + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!("Header CHT for block {} not exists", block)) + }) + .map(Some) + } + + fn changes_trie_cht_root( + &self, + _cht_size: NumberFor, + block: NumberFor, + ) -> sp_blockchain::Result> { + self.storage + .read() + .changes_trie_cht_roots + .get(&block) + .cloned() + .ok_or_else(|| { + sp_blockchain::Error::Backend(format!( + "Changes trie CHT for block {} not exists", + block + )) + }) + .map(Some) + } + + fn cache(&self) -> Option>> { + None + } + + fn usage_info(&self) -> Option { + None + } } /// In-memory operation. pub struct BlockImportOperation { - pending_block: Option>, - pending_cache: HashMap>, - old_state: InMemoryBackend>, - new_state: Option>>, - aux: Vec<(Vec, Option>)>, - finalized_blocks: Vec<(BlockId, Option)>, - set_head: Option>, + pending_block: Option>, + pending_cache: HashMap>, + old_state: InMemoryBackend>, + new_state: Option>>, + aux: Vec<(Vec, Option>)>, + finalized_blocks: Vec<(BlockId, Option)>, + set_head: Option>, } -impl backend::BlockImportOperation for BlockImportOperation where - Block::Hash: Ord, +impl backend::BlockImportOperation for BlockImportOperation +where + Block::Hash: Ord, { - type State = InMemoryBackend>; - - fn state(&self) -> sp_blockchain::Result> { - Ok(Some(&self.old_state)) - } - - fn set_block_data( - &mut self, - header: ::Header, - body: Option::Extrinsic>>, - justification: Option, - state: NewBlockState, - ) -> sp_blockchain::Result<()> { - assert!(self.pending_block.is_none(), "Only one block per operation is allowed"); - self.pending_block = Some(PendingBlock { - block: StoredBlock::new(header, body, justification), - state, - }); - Ok(()) - } - - fn update_cache(&mut self, cache: HashMap>) { - self.pending_cache = cache; - } - - fn update_db_storage( - &mut self, - update: > as StateBackend>>::Transaction, - ) -> sp_blockchain::Result<()> { - self.new_state = Some(self.old_state.update(update)); - Ok(()) - } - - fn update_changes_trie( - &mut self, - _update: ChangesTrieTransaction, NumberFor>, - ) -> sp_blockchain::Result<()> { - Ok(()) - } - - fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { - check_genesis_storage(&storage)?; - - let child_delta = storage.children.into_iter() - .map(|(storage_key, child_content)| - (storage_key, child_content.data.into_iter().map(|(k, v)| (k, Some(v))), child_content.child_info)); - - let (root, transaction) = self.old_state.full_storage_root( - storage.top.into_iter().map(|(k, v)| (k, Some(v))), - child_delta - ); - - self.new_state = Some(InMemoryBackend::from(transaction)); - Ok(root) - } - - fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> - where I: IntoIterator, Option>)> - { - self.aux.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage( - &mut self, - _update: StorageCollection, - _child_update: ChildStorageCollection, - ) -> sp_blockchain::Result<()> { - Ok(()) - } - - fn mark_finalized( - &mut self, - block: BlockId, - justification: Option, - ) -> sp_blockchain::Result<()> { - self.finalized_blocks.push((block, justification)); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> sp_blockchain::Result<()> { - assert!(self.pending_block.is_none(), "Only one set block per operation is allowed"); - self.set_head = Some(block); - Ok(()) - } + type State = InMemoryBackend>; + + fn state(&self) -> sp_blockchain::Result> { + Ok(Some(&self.old_state)) + } + + fn set_block_data( + &mut self, + header: ::Header, + body: Option::Extrinsic>>, + justification: Option, + state: NewBlockState, + ) -> sp_blockchain::Result<()> { + assert!( + self.pending_block.is_none(), + "Only one block per operation is allowed" + ); + self.pending_block = Some(PendingBlock { + block: StoredBlock::new(header, body, justification), + state, + }); + Ok(()) + } + + fn update_cache(&mut self, cache: HashMap>) { + self.pending_cache = cache; + } + + fn update_db_storage( + &mut self, + update: > as StateBackend>>::Transaction, + ) -> sp_blockchain::Result<()> { + self.new_state = Some(self.old_state.update(update)); + Ok(()) + } + + fn update_changes_trie( + &mut self, + _update: ChangesTrieTransaction, NumberFor>, + ) -> sp_blockchain::Result<()> { + Ok(()) + } + + fn reset_storage(&mut self, storage: Storage) -> sp_blockchain::Result { + check_genesis_storage(&storage)?; + + let child_delta = storage + .children + .into_iter() + .map(|(storage_key, child_content)| { + ( + storage_key, + child_content.data.into_iter().map(|(k, v)| (k, Some(v))), + child_content.child_info, + ) + }); + + let (root, transaction) = self.old_state.full_storage_root( + storage.top.into_iter().map(|(k, v)| (k, Some(v))), + child_delta, + ); + + self.new_state = Some(InMemoryBackend::from(transaction)); + Ok(root) + } + + fn insert_aux(&mut self, ops: I) -> sp_blockchain::Result<()> + where + I: IntoIterator, Option>)>, + { + self.aux.append(&mut ops.into_iter().collect()); + Ok(()) + } + + fn update_storage( + &mut self, + _update: StorageCollection, + _child_update: ChildStorageCollection, + ) -> sp_blockchain::Result<()> { + Ok(()) + } + + fn mark_finalized( + &mut self, + block: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()> { + self.finalized_blocks.push((block, justification)); + Ok(()) + } + + fn mark_head(&mut self, block: BlockId) -> sp_blockchain::Result<()> { + assert!( + self.pending_block.is_none(), + "Only one set block per operation is allowed" + ); + self.set_head = Some(block); + Ok(()) + } } /// In-memory backend. Keeps all states and blocks in memory. /// /// > **Warning**: Doesn't support all the features necessary for a proper database. Only use this /// > struct for testing purposes. Do **NOT** use in production. -pub struct Backend where Block::Hash: Ord { - states: RwLock>>>, - blockchain: Blockchain, - import_lock: RwLock<()>, +pub struct Backend +where + Block::Hash: Ord, +{ + states: RwLock>>>, + blockchain: Blockchain, + import_lock: RwLock<()>, } -impl Backend where Block::Hash: Ord { - /// Create a new instance of in-mem backend. - pub fn new() -> Self { - Backend { - states: RwLock::new(HashMap::new()), - blockchain: Blockchain::new(), - import_lock: Default::default(), - } - } +impl Backend +where + Block::Hash: Ord, +{ + /// Create a new instance of in-mem backend. + pub fn new() -> Self { + Backend { + states: RwLock::new(HashMap::new()), + blockchain: Blockchain::new(), + import_lock: Default::default(), + } + } } -impl backend::AuxStore for Backend where Block::Hash: Ord { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> sp_blockchain::Result<()> { - self.blockchain.insert_aux(insert, delete) - } - - fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { - self.blockchain.get_aux(key) - } +impl backend::AuxStore for Backend +where + Block::Hash: Ord, +{ + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> sp_blockchain::Result<()> { + self.blockchain.insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> sp_blockchain::Result>> { + self.blockchain.get_aux(key) + } } -impl backend::Backend for Backend where Block::Hash: Ord { - type BlockImportOperation = BlockImportOperation; - type Blockchain = Blockchain; - type State = InMemoryBackend>; - type OffchainStorage = OffchainStorage; - - fn begin_operation(&self) -> sp_blockchain::Result { - let old_state = self.state_at(BlockId::Hash(Default::default()))?; - Ok(BlockImportOperation { - pending_block: None, - pending_cache: Default::default(), - old_state, - new_state: None, - aux: Default::default(), - finalized_blocks: Default::default(), - set_head: None, - }) - } - - fn begin_state_operation( - &self, - operation: &mut Self::BlockImportOperation, - block: BlockId, - ) -> sp_blockchain::Result<()> { - operation.old_state = self.state_at(block)?; - Ok(()) - } - - fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { - if !operation.finalized_blocks.is_empty() { - for (block, justification) in operation.finalized_blocks { - self.blockchain.finalize_header(block, justification)?; - } - } - - if let Some(pending_block) = operation.pending_block { - let old_state = &operation.old_state; - let (header, body, justification) = pending_block.block.into_inner(); - - let hash = header.hash(); - - self.states.write().insert(hash, operation.new_state.unwrap_or_else(|| old_state.clone())); - - self.blockchain.insert(hash, header, justification, body, pending_block.state)?; - } - - if !operation.aux.is_empty() { - self.blockchain.write_aux(operation.aux); - } - - if let Some(set_head) = operation.set_head { - self.blockchain.set_head(set_head)?; - } - - Ok(()) - } - - fn finalize_block( - &self, - block: BlockId, - justification: Option, - ) -> sp_blockchain::Result<()> { - self.blockchain.finalize_header(block, justification) - } - - fn blockchain(&self) -> &Self::Blockchain { - &self.blockchain - } - - fn usage_info(&self) -> Option { - None - } - - fn changes_trie_storage(&self) -> Option<&dyn backend::PrunableStateChangesTrieStorage> { - None - } - - fn offchain_storage(&self) -> Option { - None - } - - fn state_at(&self, block: BlockId) -> sp_blockchain::Result { - match block { - BlockId::Hash(h) if h == Default::default() => { - return Ok(Self::State::default()); - }, - _ => {}, - } - - match self.blockchain.id(block).and_then(|id| self.states.read().get(&id).cloned()) { - Some(state) => Ok(state), - None => Err(sp_blockchain::Error::UnknownBlock(format!("{}", block))), - } - } - - fn revert( - &self, - _n: NumberFor, - _revert_finalized: bool, - ) -> sp_blockchain::Result> { - Ok(Zero::zero()) - } - - fn get_import_lock(&self) -> &RwLock<()> { - &self.import_lock - } +impl backend::Backend for Backend +where + Block::Hash: Ord, +{ + type BlockImportOperation = BlockImportOperation; + type Blockchain = Blockchain; + type State = InMemoryBackend>; + type OffchainStorage = OffchainStorage; + + fn begin_operation(&self) -> sp_blockchain::Result { + let old_state = self.state_at(BlockId::Hash(Default::default()))?; + Ok(BlockImportOperation { + pending_block: None, + pending_cache: Default::default(), + old_state, + new_state: None, + aux: Default::default(), + finalized_blocks: Default::default(), + set_head: None, + }) + } + + fn begin_state_operation( + &self, + operation: &mut Self::BlockImportOperation, + block: BlockId, + ) -> sp_blockchain::Result<()> { + operation.old_state = self.state_at(block)?; + Ok(()) + } + + fn commit_operation(&self, operation: Self::BlockImportOperation) -> sp_blockchain::Result<()> { + if !operation.finalized_blocks.is_empty() { + for (block, justification) in operation.finalized_blocks { + self.blockchain.finalize_header(block, justification)?; + } + } + + if let Some(pending_block) = operation.pending_block { + let old_state = &operation.old_state; + let (header, body, justification) = pending_block.block.into_inner(); + + let hash = header.hash(); + + self.states.write().insert( + hash, + operation.new_state.unwrap_or_else(|| old_state.clone()), + ); + + self.blockchain + .insert(hash, header, justification, body, pending_block.state)?; + } + + if !operation.aux.is_empty() { + self.blockchain.write_aux(operation.aux); + } + + if let Some(set_head) = operation.set_head { + self.blockchain.set_head(set_head)?; + } + + Ok(()) + } + + fn finalize_block( + &self, + block: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()> { + self.blockchain.finalize_header(block, justification) + } + + fn blockchain(&self) -> &Self::Blockchain { + &self.blockchain + } + + fn usage_info(&self) -> Option { + None + } + + fn changes_trie_storage(&self) -> Option<&dyn backend::PrunableStateChangesTrieStorage> { + None + } + + fn offchain_storage(&self) -> Option { + None + } + + fn state_at(&self, block: BlockId) -> sp_blockchain::Result { + match block { + BlockId::Hash(h) if h == Default::default() => { + return Ok(Self::State::default()); + } + _ => {} + } + + match self + .blockchain + .id(block) + .and_then(|id| self.states.read().get(&id).cloned()) + { + Some(state) => Ok(state), + None => Err(sp_blockchain::Error::UnknownBlock(format!("{}", block))), + } + } + + fn revert( + &self, + _n: NumberFor, + _revert_finalized: bool, + ) -> sp_blockchain::Result> { + Ok(Zero::zero()) + } + + fn get_import_lock(&self) -> &RwLock<()> { + &self.import_lock + } } impl backend::LocalBackend for Backend where Block::Hash: Ord {} -impl backend::RemoteBackend for Backend where Block::Hash: Ord { - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.blockchain.expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } - - fn remote_blockchain(&self) -> Arc> { - unimplemented!() - } +impl backend::RemoteBackend for Backend +where + Block::Hash: Ord, +{ + fn is_local_state_available(&self, block: &BlockId) -> bool { + self.blockchain + .expect_block_number_from_id(block) + .map(|num| num.is_zero()) + .unwrap_or(false) + } + + fn remote_blockchain(&self) -> Arc> { + unimplemented!() + } } /// Check that genesis storage is valid. pub fn check_genesis_storage(storage: &Storage) -> sp_blockchain::Result<()> { - if storage.top.iter().any(|(k, _)| well_known_keys::is_child_storage_key(k)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - - if storage.children.keys().any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) { - return Err(sp_blockchain::Error::GenesisInvalid.into()); - } - - Ok(()) + if storage + .top + .iter() + .any(|(k, _)| well_known_keys::is_child_storage_key(k)) + { + return Err(sp_blockchain::Error::GenesisInvalid.into()); + } + + if storage + .children + .keys() + .any(|child_key| !well_known_keys::is_child_storage_key(&child_key)) + { + return Err(sp_blockchain::Error::GenesisInvalid.into()); + } + + Ok(()) } #[cfg(test)] mod tests { - use sp_core::offchain::{OffchainStorage, storage::InMemOffchainStorage}; - use std::sync::Arc; - - type TestBackend = substrate_test_runtime_client::sc_client::in_mem::Backend; - - #[test] - fn test_leaves_with_complex_block_tree() { - let backend = Arc::new(TestBackend::new()); - - substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); - } - - #[test] - fn test_blockchain_query_by_number_gets_canonical() { - let backend = Arc::new(TestBackend::new()); - - substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical(backend); - } - - #[test] - fn in_memory_offchain_storage() { - - let mut storage = InMemOffchainStorage::default(); - assert_eq!(storage.get(b"A", b"B"), None); - assert_eq!(storage.get(b"B", b"A"), None); - - storage.set(b"A", b"B", b"C"); - assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); - assert_eq!(storage.get(b"B", b"A"), None); - - storage.compare_and_set(b"A", b"B", Some(b"X"), b"D"); - assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); - storage.compare_and_set(b"A", b"B", Some(b"C"), b"D"); - assert_eq!(storage.get(b"A", b"B"), Some(b"D".to_vec())); - - assert!(!storage.compare_and_set(b"B", b"A", Some(b""), b"Y")); - assert!(storage.compare_and_set(b"B", b"A", None, b"X")); - assert_eq!(storage.get(b"B", b"A"), Some(b"X".to_vec())); - } + use sp_core::offchain::{storage::InMemOffchainStorage, OffchainStorage}; + use std::sync::Arc; + + type TestBackend = substrate_test_runtime_client::sc_client::in_mem::Backend< + substrate_test_runtime_client::runtime::Block, + >; + + #[test] + fn test_leaves_with_complex_block_tree() { + let backend = Arc::new(TestBackend::new()); + + substrate_test_runtime_client::trait_tests::test_leaves_for_backend(backend); + } + + #[test] + fn test_blockchain_query_by_number_gets_canonical() { + let backend = Arc::new(TestBackend::new()); + + substrate_test_runtime_client::trait_tests::test_blockchain_query_by_number_gets_canonical( + backend, + ); + } + + #[test] + fn in_memory_offchain_storage() { + let mut storage = InMemOffchainStorage::default(); + assert_eq!(storage.get(b"A", b"B"), None); + assert_eq!(storage.get(b"B", b"A"), None); + + storage.set(b"A", b"B", b"C"); + assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); + assert_eq!(storage.get(b"B", b"A"), None); + + storage.compare_and_set(b"A", b"B", Some(b"X"), b"D"); + assert_eq!(storage.get(b"A", b"B"), Some(b"C".to_vec())); + storage.compare_and_set(b"A", b"B", Some(b"C"), b"D"); + assert_eq!(storage.get(b"A", b"B"), Some(b"D".to_vec())); + + assert!(!storage.compare_and_set(b"B", b"A", Some(b""), b"Y")); + assert!(storage.compare_and_set(b"B", b"A", None, b"X")); + assert_eq!(storage.get(b"B", b"A"), Some(b"X".to_vec())); + } } diff --git a/client/src/leaves.rs b/client/src/leaves.rs index a555943444..238caf494c 100644 --- a/client/src/leaves.rs +++ b/client/src/leaves.rs @@ -16,43 +16,43 @@ //! Helper for managing the set of available leaves in the chain for DB implementations. -use std::collections::BTreeMap; -use std::cmp::Reverse; +use codec::{Decode, Encode}; +use sp_blockchain::{Error, Result}; use sp_database::{Database, Transaction}; use sp_runtime::traits::AtLeast32Bit; -use codec::{Encode, Decode}; -use sp_blockchain::{Error, Result}; +use std::cmp::Reverse; +use std::collections::BTreeMap; type DbHash = [u8; 32]; #[derive(Debug, Clone, PartialEq, Eq)] struct LeafSetItem { - hash: H, - number: Reverse, + hash: H, + number: Reverse, } /// A displaced leaf after import. #[must_use = "Displaced items from the leaf set must be handled."] pub struct ImportDisplaced { - new_hash: H, - displaced: LeafSetItem, + new_hash: H, + displaced: LeafSetItem, } /// Displaced leaves after finalization. #[must_use = "Displaced items from the leaf set must be handled."] pub struct FinalizationDisplaced { - leaves: BTreeMap, Vec>, + leaves: BTreeMap, Vec>, } impl FinalizationDisplaced { - /// Merge with another. This should only be used for displaced items that - /// are produced within one transaction of each other. - pub fn merge(&mut self, mut other: Self) { - // this will ignore keys that are in duplicate, however - // if these are actually produced correctly via the leaf-set within - // one transaction, then there will be no overlap in the keys. - self.leaves.append(&mut other.leaves); - } + /// Merge with another. This should only be used for displaced items that + /// are produced within one transaction of each other. + pub fn merge(&mut self, mut other: Self) { + // this will ignore keys that are in duplicate, however + // if these are actually produced correctly via the leaf-set within + // one transaction, then there will be no overlap in the keys. + self.leaves.append(&mut other.leaves); + } } /// list of leaf hashes ordered by number (descending). @@ -60,320 +60,350 @@ impl FinalizationDisplaced { /// this allows very fast checking and modification of active leaves. #[derive(Debug, Clone, PartialEq, Eq)] pub struct LeafSet { - storage: BTreeMap, Vec>, - pending_added: Vec<(H, N)>, - pending_removed: Vec, + storage: BTreeMap, Vec>, + pending_added: Vec<(H, N)>, + pending_removed: Vec, } -impl LeafSet where - H: Clone + PartialEq + Decode + Encode, - N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, +impl LeafSet +where + H: Clone + PartialEq + Decode + Encode, + N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { - /// Construct a new, blank leaf set. - pub fn new() -> Self { - Self { - storage: BTreeMap::new(), - pending_added: Vec::new(), - pending_removed: Vec::new(), - } - } - - /// Read the leaf list from the DB, using given prefix for keys. - pub fn read_from_db(db: &dyn Database, column: u32, prefix: &[u8]) -> Result { - let mut storage = BTreeMap::new(); - - match db.get(column, prefix) { - Some(leaves) => { - let vals: Vec<_> = match Decode::decode(&mut leaves.as_ref()) { - Ok(vals) => vals, - Err(_) => return Err(Error::Backend("Error decoding leaves".into())), - }; - for (number, hashes) in vals.into_iter() { - storage.insert(Reverse(number), hashes); - } - } - None => {}, - } - Ok(Self { - storage, - pending_added: Vec::new(), - pending_removed: Vec::new(), - }) - } - - /// update the leaf list on import. returns a displaced leaf if there was one. - pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option> { - // avoid underflow for genesis. - let displaced = if number != N::zero() { - let new_number = Reverse(number.clone() - N::one()); - let was_displaced = self.remove_leaf(&new_number, &parent_hash); - - if was_displaced { - self.pending_removed.push(parent_hash.clone()); - Some(ImportDisplaced { - new_hash: hash.clone(), - displaced: LeafSetItem { - hash: parent_hash, - number: new_number, - }, - }) - } else { - None - } - } else { - None - }; - - self.insert_leaf(Reverse(number.clone()), hash.clone()); - self.pending_added.push((hash, number)); - displaced - } - - /// Note a block height finalized, displacing all leaves with number less than the finalized block's. - /// - /// Although it would be more technically correct to also prune out leaves at the - /// same number as the finalized block, but with different hashes, the current behavior - /// is simpler and our assumptions about how finalization works means that those leaves - /// will be pruned soon afterwards anyway. - pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { - let boundary = if number == N::zero() { - return FinalizationDisplaced { leaves: BTreeMap::new() }; - } else { - number - N::one() - }; - - let below_boundary = self.storage.split_off(&Reverse(boundary)); - self.pending_removed.extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); - FinalizationDisplaced { - leaves: below_boundary, - } - } - - /// Undo all pending operations. - /// - /// This returns an `Undo` struct, where any - /// `Displaced` objects that have returned by previous method calls - /// should be passed to via the appropriate methods. Otherwise, - /// the on-disk state may get out of sync with in-memory state. - pub fn undo(&mut self) -> Undo { - Undo { inner: self } - } - - /// Revert to the given block height by dropping all leaves in the leaf set - /// with a block number higher than the target. - pub fn revert(&mut self, best_hash: H, best_number: N) { - let items = self.storage.iter() - .flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), number.clone()))) - .collect::>(); - - for (hash, number) in &items { - if number.0 > best_number { - assert!( - self.remove_leaf(number, hash), - "item comes from an iterator over storage; qed", - ); - - self.pending_removed.push(hash.clone()); - } - } - - let best_number = Reverse(best_number); - let leaves_contains_best = self.storage - .get(&best_number) - .map_or(false, |hashes| hashes.contains(&best_hash)); - - // we need to make sure that the best block exists in the leaf set as - // this is an invariant of regular block import. - if !leaves_contains_best { - self.insert_leaf(best_number.clone(), best_hash.clone()); - self.pending_added.push((best_hash, best_number.0)); - } - } - - /// returns an iterator over all hashes in the leaf set - /// ordered by their block number descending. - pub fn hashes(&self) -> Vec { - self.storage.iter().flat_map(|(_, hashes)| hashes.iter()).cloned().collect() - } - - /// Number of known leaves - pub fn count(&self) -> usize { - self.storage.len() - } - - /// Write the leaf list to the database transaction. - pub fn prepare_transaction(&mut self, tx: &mut Transaction, column: u32, prefix: &[u8]) { - let leaves: Vec<_> = self.storage.iter().map(|(n, h)| (n.0.clone(), h.clone())).collect(); - tx.set_from_vec(column, prefix, leaves.encode()); - self.pending_added.clear(); - self.pending_removed.clear(); - } - - #[cfg(test)] - fn contains(&self, number: N, hash: H) -> bool { - self.storage.get(&Reverse(number)).map_or(false, |hashes| hashes.contains(&hash)) - } - - fn insert_leaf(&mut self, number: Reverse, hash: H) { - self.storage.entry(number).or_insert_with(Vec::new).push(hash); - } - - // returns true if this leaf was contained, false otherwise. - fn remove_leaf(&mut self, number: &Reverse, hash: &H) -> bool { - let mut empty = false; - let removed = self.storage.get_mut(number).map_or(false, |leaves| { - let mut found = false; - leaves.retain(|h| if h == hash { - found = true; - false - } else { - true - }); - - if leaves.is_empty() { empty = true } - - found - }); - - if removed && empty { - self.storage.remove(number); - } - - removed - } + /// Construct a new, blank leaf set. + pub fn new() -> Self { + Self { + storage: BTreeMap::new(), + pending_added: Vec::new(), + pending_removed: Vec::new(), + } + } + + /// Read the leaf list from the DB, using given prefix for keys. + pub fn read_from_db(db: &dyn Database, column: u32, prefix: &[u8]) -> Result { + let mut storage = BTreeMap::new(); + + match db.get(column, prefix) { + Some(leaves) => { + let vals: Vec<_> = match Decode::decode(&mut leaves.as_ref()) { + Ok(vals) => vals, + Err(_) => return Err(Error::Backend("Error decoding leaves".into())), + }; + for (number, hashes) in vals.into_iter() { + storage.insert(Reverse(number), hashes); + } + } + None => {} + } + Ok(Self { + storage, + pending_added: Vec::new(), + pending_removed: Vec::new(), + }) + } + + /// update the leaf list on import. returns a displaced leaf if there was one. + pub fn import(&mut self, hash: H, number: N, parent_hash: H) -> Option> { + // avoid underflow for genesis. + let displaced = if number != N::zero() { + let new_number = Reverse(number.clone() - N::one()); + let was_displaced = self.remove_leaf(&new_number, &parent_hash); + + if was_displaced { + self.pending_removed.push(parent_hash.clone()); + Some(ImportDisplaced { + new_hash: hash.clone(), + displaced: LeafSetItem { + hash: parent_hash, + number: new_number, + }, + }) + } else { + None + } + } else { + None + }; + + self.insert_leaf(Reverse(number.clone()), hash.clone()); + self.pending_added.push((hash, number)); + displaced + } + + /// Note a block height finalized, displacing all leaves with number less than the finalized block's. + /// + /// Although it would be more technically correct to also prune out leaves at the + /// same number as the finalized block, but with different hashes, the current behavior + /// is simpler and our assumptions about how finalization works means that those leaves + /// will be pruned soon afterwards anyway. + pub fn finalize_height(&mut self, number: N) -> FinalizationDisplaced { + let boundary = if number == N::zero() { + return FinalizationDisplaced { + leaves: BTreeMap::new(), + }; + } else { + number - N::one() + }; + + let below_boundary = self.storage.split_off(&Reverse(boundary)); + self.pending_removed + .extend(below_boundary.values().flat_map(|h| h.iter()).cloned()); + FinalizationDisplaced { + leaves: below_boundary, + } + } + + /// Undo all pending operations. + /// + /// This returns an `Undo` struct, where any + /// `Displaced` objects that have returned by previous method calls + /// should be passed to via the appropriate methods. Otherwise, + /// the on-disk state may get out of sync with in-memory state. + pub fn undo(&mut self) -> Undo { + Undo { inner: self } + } + + /// Revert to the given block height by dropping all leaves in the leaf set + /// with a block number higher than the target. + pub fn revert(&mut self, best_hash: H, best_number: N) { + let items = self + .storage + .iter() + .flat_map(|(number, hashes)| hashes.iter().map(move |h| (h.clone(), number.clone()))) + .collect::>(); + + for (hash, number) in &items { + if number.0 > best_number { + assert!( + self.remove_leaf(number, hash), + "item comes from an iterator over storage; qed", + ); + + self.pending_removed.push(hash.clone()); + } + } + + let best_number = Reverse(best_number); + let leaves_contains_best = self + .storage + .get(&best_number) + .map_or(false, |hashes| hashes.contains(&best_hash)); + + // we need to make sure that the best block exists in the leaf set as + // this is an invariant of regular block import. + if !leaves_contains_best { + self.insert_leaf(best_number.clone(), best_hash.clone()); + self.pending_added.push((best_hash, best_number.0)); + } + } + + /// returns an iterator over all hashes in the leaf set + /// ordered by their block number descending. + pub fn hashes(&self) -> Vec { + self.storage + .iter() + .flat_map(|(_, hashes)| hashes.iter()) + .cloned() + .collect() + } + + /// Number of known leaves + pub fn count(&self) -> usize { + self.storage.len() + } + + /// Write the leaf list to the database transaction. + pub fn prepare_transaction( + &mut self, + tx: &mut Transaction, + column: u32, + prefix: &[u8], + ) { + let leaves: Vec<_> = self + .storage + .iter() + .map(|(n, h)| (n.0.clone(), h.clone())) + .collect(); + tx.set_from_vec(column, prefix, leaves.encode()); + self.pending_added.clear(); + self.pending_removed.clear(); + } + + #[cfg(test)] + fn contains(&self, number: N, hash: H) -> bool { + self.storage + .get(&Reverse(number)) + .map_or(false, |hashes| hashes.contains(&hash)) + } + + fn insert_leaf(&mut self, number: Reverse, hash: H) { + self.storage + .entry(number) + .or_insert_with(Vec::new) + .push(hash); + } + + // returns true if this leaf was contained, false otherwise. + fn remove_leaf(&mut self, number: &Reverse, hash: &H) -> bool { + let mut empty = false; + let removed = self.storage.get_mut(number).map_or(false, |leaves| { + let mut found = false; + leaves.retain(|h| { + if h == hash { + found = true; + false + } else { + true + } + }); + + if leaves.is_empty() { + empty = true + } + + found + }); + + if removed && empty { + self.storage.remove(number); + } + + removed + } } /// Helper for undoing operations. pub struct Undo<'a, H: 'a, N: 'a> { - inner: &'a mut LeafSet, + inner: &'a mut LeafSet, } -impl<'a, H: 'a, N: 'a> Undo<'a, H, N> where - H: Clone + PartialEq + Decode + Encode, - N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, +impl<'a, H: 'a, N: 'a> Undo<'a, H, N> +where + H: Clone + PartialEq + Decode + Encode, + N: std::fmt::Debug + Clone + AtLeast32Bit + Decode + Encode, { - /// Undo an imported block by providing the displaced leaf. - pub fn undo_import(&mut self, displaced: ImportDisplaced) { - let new_number = Reverse(displaced.displaced.number.0.clone() + N::one()); - self.inner.remove_leaf(&new_number, &displaced.new_hash); - self.inner.insert_leaf(new_number, displaced.displaced.hash); - } - - /// Undo a finalization operation by providing the displaced leaves. - pub fn undo_finalization(&mut self, mut displaced: FinalizationDisplaced) { - self.inner.storage.append(&mut displaced.leaves); - } + /// Undo an imported block by providing the displaced leaf. + pub fn undo_import(&mut self, displaced: ImportDisplaced) { + let new_number = Reverse(displaced.displaced.number.0.clone() + N::one()); + self.inner.remove_leaf(&new_number, &displaced.new_hash); + self.inner.insert_leaf(new_number, displaced.displaced.hash); + } + + /// Undo a finalization operation by providing the displaced leaves. + pub fn undo_finalization(&mut self, mut displaced: FinalizationDisplaced) { + self.inner.storage.append(&mut displaced.leaves); + } } impl<'a, H: 'a, N: 'a> Drop for Undo<'a, H, N> { - fn drop(&mut self) { - self.inner.pending_added.clear(); - self.inner.pending_removed.clear(); - } + fn drop(&mut self) { + self.inner.pending_added.clear(); + self.inner.pending_removed.clear(); + } } #[cfg(test)] mod tests { - use super::*; - use std::sync::Arc; - - #[test] - fn it_works() { - let mut set = LeafSet::new(); - set.import(0u32, 0u32, 0u32); - - set.import(1_1, 1, 0); - set.import(2_1, 2, 1_1); - set.import(3_1, 3, 2_1); - - assert!(set.contains(3, 3_1)); - assert!(!set.contains(2, 2_1)); - assert!(!set.contains(1, 1_1)); - assert!(!set.contains(0, 0)); - - set.import(2_2, 2, 1_1); - - assert!(set.contains(3, 3_1)); - assert!(set.contains(2, 2_2)); - } - - #[test] - fn flush_to_disk() { - const PREFIX: &[u8] = b"abcdefg"; - let db = Arc::new(sp_database::MemDb::default()); - - let mut set = LeafSet::new(); - set.import(0u32, 0u32, 0u32); - - set.import(1_1, 1, 0); - set.import(2_1, 2, 1_1); - set.import(3_1, 3, 2_1); - - let mut tx = Transaction::new(); - - set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx); - - let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); - assert_eq!(set, set2); - } - - #[test] - fn two_leaves_same_height_can_be_included() { - let mut set = LeafSet::new(); - - set.import(1_1u32, 10u32,0u32); - set.import(1_2, 10, 0); - - assert!(set.storage.contains_key(&Reverse(10))); - assert!(set.contains(10, 1_1)); - assert!(set.contains(10, 1_2)); - assert!(!set.contains(10, 1_3)); - } - - #[test] - fn finalization_consistent_with_disk() { - const PREFIX: &[u8] = b"prefix"; - let db = Arc::new(sp_database::MemDb::default()); - - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_2); - set.import(11_2, 11, 10_2); - set.import(12_1, 12, 11_123); - - assert!(set.contains(10, 10_1)); - - let mut tx = Transaction::new(); - set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx); - - let _ = set.finalize_height(11); - let mut tx = Transaction::new(); - set.prepare_transaction(&mut tx, 0, PREFIX); - db.commit(tx); - - assert!(set.contains(11, 11_1)); - assert!(set.contains(11, 11_2)); - assert!(set.contains(12, 12_1)); - assert!(!set.contains(10, 10_1)); - - let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); - assert_eq!(set, set2); - } - - #[test] - fn undo_finalization() { - let mut set = LeafSet::new(); - set.import(10_1u32, 10u32, 0u32); - set.import(11_1, 11, 10_2); - set.import(11_2, 11, 10_2); - set.import(12_1, 12, 11_123); - - let displaced = set.finalize_height(11); - assert!(!set.contains(10, 10_1)); - - set.undo().undo_finalization(displaced); - assert!(set.contains(10, 10_1)); - } + use super::*; + use std::sync::Arc; + + #[test] + fn it_works() { + let mut set = LeafSet::new(); + set.import(0u32, 0u32, 0u32); + + set.import(1_1, 1, 0); + set.import(2_1, 2, 1_1); + set.import(3_1, 3, 2_1); + + assert!(set.contains(3, 3_1)); + assert!(!set.contains(2, 2_1)); + assert!(!set.contains(1, 1_1)); + assert!(!set.contains(0, 0)); + + set.import(2_2, 2, 1_1); + + assert!(set.contains(3, 3_1)); + assert!(set.contains(2, 2_2)); + } + + #[test] + fn flush_to_disk() { + const PREFIX: &[u8] = b"abcdefg"; + let db = Arc::new(sp_database::MemDb::default()); + + let mut set = LeafSet::new(); + set.import(0u32, 0u32, 0u32); + + set.import(1_1, 1, 0); + set.import(2_1, 2, 1_1); + set.import(3_1, 3, 2_1); + + let mut tx = Transaction::new(); + + set.prepare_transaction(&mut tx, 0, PREFIX); + db.commit(tx); + + let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); + assert_eq!(set, set2); + } + + #[test] + fn two_leaves_same_height_can_be_included() { + let mut set = LeafSet::new(); + + set.import(1_1u32, 10u32, 0u32); + set.import(1_2, 10, 0); + + assert!(set.storage.contains_key(&Reverse(10))); + assert!(set.contains(10, 1_1)); + assert!(set.contains(10, 1_2)); + assert!(!set.contains(10, 1_3)); + } + + #[test] + fn finalization_consistent_with_disk() { + const PREFIX: &[u8] = b"prefix"; + let db = Arc::new(sp_database::MemDb::default()); + + let mut set = LeafSet::new(); + set.import(10_1u32, 10u32, 0u32); + set.import(11_1, 11, 10_2); + set.import(11_2, 11, 10_2); + set.import(12_1, 12, 11_123); + + assert!(set.contains(10, 10_1)); + + let mut tx = Transaction::new(); + set.prepare_transaction(&mut tx, 0, PREFIX); + db.commit(tx); + + let _ = set.finalize_height(11); + let mut tx = Transaction::new(); + set.prepare_transaction(&mut tx, 0, PREFIX); + db.commit(tx); + + assert!(set.contains(11, 11_1)); + assert!(set.contains(11, 11_2)); + assert!(set.contains(12, 12_1)); + assert!(!set.contains(10, 10_1)); + + let set2 = LeafSet::read_from_db(&*db, 0, PREFIX).unwrap(); + assert_eq!(set, set2); + } + + #[test] + fn undo_finalization() { + let mut set = LeafSet::new(); + set.import(10_1u32, 10u32, 0u32); + set.import(11_1, 11, 10_2); + set.import(11_2, 11, 10_2); + set.import(12_1, 12, 11_123); + + let displaced = set.finalize_height(11); + assert!(!set.contains(10, 10_1)); + + set.undo().undo_finalization(displaced); + assert!(set.contains(10, 10_1)); + } } diff --git a/client/src/lib.rs b/client/src/lib.rs index 20a3ed058a..e54a1e561b 100644 --- a/client/src/lib.rs +++ b/client/src/lib.rs @@ -75,34 +75,33 @@ //! #![warn(missing_docs)] -#![recursion_limit="128"] +#![recursion_limit = "128"] +mod block_rules; +mod call_executor; pub mod cht; -pub mod in_mem; +mod client; pub mod genesis; -pub mod light; +pub mod in_mem; pub mod leaves; -mod call_executor; -mod client; -mod block_rules; +pub mod light; -pub use sc_client_api::{ - blockchain, - blockchain::well_known_cache_keys, - blockchain::Info as ChainInfo, - notifications::{StorageEventStream, StorageChangeSet}, - call_executor::CallExecutor, - utils, -}; pub use crate::{ - call_executor::LocalCallExecutor, - client::{ - new_with_backend, - new_in_mem, - BlockBackend, ImportNotifications, FinalityNotifications, BlockchainEvents, LockImportRun, - BlockImportNotification, Client, ClientInfo, ExecutionStrategies, FinalityNotification, - LongestChain, BlockOf, ProvideUncles, BadBlocks, ForkBlocks, apply_aux, - }, - leaves::LeafSet, + call_executor::LocalCallExecutor, + client::{ + apply_aux, new_in_mem, new_with_backend, BadBlocks, BlockBackend, BlockImportNotification, + BlockOf, BlockchainEvents, Client, ClientInfo, ExecutionStrategies, FinalityNotification, + FinalityNotifications, ForkBlocks, ImportNotifications, LockImportRun, LongestChain, + ProvideUncles, + }, + leaves::LeafSet, +}; +pub use sc_client_api::{ + blockchain, + blockchain::well_known_cache_keys, + blockchain::Info as ChainInfo, + call_executor::CallExecutor, + notifications::{StorageChangeSet, StorageEventStream}, + utils, }; -pub use sp_state_machine::{ExecutionStrategy, StorageProof, StateMachine}; +pub use sp_state_machine::{ExecutionStrategy, StateMachine, StorageProof}; diff --git a/client/src/light/backend.rs b/client/src/light/backend.rs index 0b334d48b7..3e53b5118c 100644 --- a/client/src/light/backend.rs +++ b/client/src/light/backend.rs @@ -17,554 +17,592 @@ //! Light client backend. Only stores headers and justifications of blocks. //! Everything else is requested from full nodes on demand. +use parking_lot::RwLock; use std::collections::HashMap; use std::sync::Arc; -use parking_lot::RwLock; use codec::{Decode, Encode}; -use sp_core::ChangesTrieConfiguration; -use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo}; -use sp_core::offchain::storage::InMemOffchainStorage; -use sp_state_machine::{ - Backend as StateBackend, TrieBackend, InMemoryBackend, ChangesTrieTransaction, - StorageCollection, ChildStorageCollection, -}; -use sp_runtime::{generic::BlockId, Justification, Storage}; -use sp_runtime::traits::{Block as BlockT, NumberFor, Zero, Header, HashFor}; use crate::in_mem::check_genesis_storage; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; -use sc_client_api::{ - backend::{ - AuxStore, Backend as ClientBackend, BlockImportOperation, RemoteBackend, NewBlockState, - PrunableStateChangesTrieStorage, - }, - blockchain::{ - HeaderBackend as BlockchainHeaderBackend, well_known_cache_keys, - }, - light::Storage as BlockchainStorage, - UsageInfo, -}; use crate::light::blockchain::Blockchain; use hash_db::Hasher; +use sc_client_api::{ + backend::{ + AuxStore, Backend as ClientBackend, BlockImportOperation, NewBlockState, + PrunableStateChangesTrieStorage, RemoteBackend, + }, + blockchain::{well_known_cache_keys, HeaderBackend as BlockchainHeaderBackend}, + light::Storage as BlockchainStorage, + UsageInfo, +}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; +use sp_core::offchain::storage::InMemOffchainStorage; +use sp_core::storage::{well_known_keys, ChildInfo, OwnedChildInfo}; +use sp_core::ChangesTrieConfiguration; +use sp_runtime::traits::{Block as BlockT, HashFor, Header, NumberFor, Zero}; +use sp_runtime::{generic::BlockId, Justification, Storage}; +use sp_state_machine::{ + Backend as StateBackend, ChangesTrieTransaction, ChildStorageCollection, InMemoryBackend, + StorageCollection, TrieBackend, +}; -const IN_MEMORY_EXPECT_PROOF: &str = "InMemory state backend has Void error type and always succeeds; qed"; +const IN_MEMORY_EXPECT_PROOF: &str = + "InMemory state backend has Void error type and always succeeds; qed"; /// Light client backend. pub struct Backend { - blockchain: Arc>, - genesis_state: RwLock>>, - import_lock: RwLock<()>, + blockchain: Arc>, + genesis_state: RwLock>>, + import_lock: RwLock<()>, } /// Light block (header and justification) import operation. pub struct ImportOperation { - header: Option, - cache: HashMap>, - leaf_state: NewBlockState, - aux_ops: Vec<(Vec, Option>)>, - finalized_blocks: Vec>, - set_head: Option>, - storage_update: Option>>, - changes_trie_config_update: Option>, - _phantom: std::marker::PhantomData, + header: Option, + cache: HashMap>, + leaf_state: NewBlockState, + aux_ops: Vec<(Vec, Option>)>, + finalized_blocks: Vec>, + set_head: Option>, + storage_update: Option>>, + changes_trie_config_update: Option>, + _phantom: std::marker::PhantomData, } /// Either in-memory genesis state, or locally-unavailable state. pub enum GenesisOrUnavailableState { - /// Genesis state - storage values are stored in-memory. - Genesis(InMemoryBackend), - /// We know that state exists, but all calls will fail with error, because it - /// isn't locally available. - Unavailable, + /// Genesis state - storage values are stored in-memory. + Genesis(InMemoryBackend), + /// We know that state exists, but all calls will fail with error, because it + /// isn't locally available. + Unavailable, } impl Backend { - /// Create new light backend. - pub fn new(blockchain: Arc>) -> Self { - Self { - blockchain, - genesis_state: RwLock::new(None), - import_lock: Default::default(), - } - } - - /// Get shared blockchain reference. - pub fn blockchain(&self) -> &Arc> { - &self.blockchain - } + /// Create new light backend. + pub fn new(blockchain: Arc>) -> Self { + Self { + blockchain, + genesis_state: RwLock::new(None), + import_lock: Default::default(), + } + } + + /// Get shared blockchain reference. + pub fn blockchain(&self) -> &Arc> { + &self.blockchain + } } impl AuxStore for Backend { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, delete: D) -> ClientResult<()> { - self.blockchain.storage().insert_aux(insert, delete) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - self.blockchain.storage().get_aux(key) - } + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + delete: D, + ) -> ClientResult<()> { + self.blockchain.storage().insert_aux(insert, delete) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + self.blockchain.storage().get_aux(key) + } } impl ClientBackend for Backend> - where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, +where + Block: BlockT, + S: BlockchainStorage, + Block::Hash: Ord, { - type BlockImportOperation = ImportOperation; - type Blockchain = Blockchain; - type State = GenesisOrUnavailableState>; - type OffchainStorage = InMemOffchainStorage; - - fn begin_operation(&self) -> ClientResult { - Ok(ImportOperation { - header: None, - cache: Default::default(), - leaf_state: NewBlockState::Normal, - aux_ops: Vec::new(), - finalized_blocks: Vec::new(), - set_head: None, - storage_update: None, - changes_trie_config_update: None, - _phantom: Default::default(), - }) - } - - fn begin_state_operation( - &self, - _operation: &mut Self::BlockImportOperation, - _block: BlockId - ) -> ClientResult<()> { - Ok(()) - } - - fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { - if !operation.finalized_blocks.is_empty() { - for block in operation.finalized_blocks { - self.blockchain.storage().finalize_header(block)?; - } - } - - if let Some(header) = operation.header { - let is_genesis_import = header.number().is_zero(); - if let Some(new_config) = operation.changes_trie_config_update { - operation.cache.insert(well_known_cache_keys::CHANGES_TRIE_CONFIG, new_config.encode()); - } - self.blockchain.storage().import_header( - header, - operation.cache, - operation.leaf_state, - operation.aux_ops, - )?; - - // when importing genesis block => remember its state - if is_genesis_import { - *self.genesis_state.write() = operation.storage_update.take(); - } - } else { - for (key, maybe_val) in operation.aux_ops { - match maybe_val { - Some(val) => self.blockchain.storage().insert_aux( - &[(&key[..], &val[..])], - std::iter::empty(), - )?, - None => self.blockchain.storage().insert_aux(std::iter::empty(), &[&key[..]])?, - } - } - } - - if let Some(set_head) = operation.set_head { - self.blockchain.storage().set_head(set_head)?; - } - - Ok(()) - } - - fn finalize_block( - &self, - block: BlockId, - _justification: Option, - ) -> ClientResult<()> { - self.blockchain.storage().finalize_header(block) - } - - fn blockchain(&self) -> &Blockchain { - &self.blockchain - } - - fn usage_info(&self) -> Option { - self.blockchain.storage().usage_info() - } - - fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { - None - } - - fn offchain_storage(&self) -> Option { - None - } - - fn state_at(&self, block: BlockId) -> ClientResult { - let block_number = self.blockchain.expect_block_number_from_id(&block)?; - - // special case for genesis block - if block_number.is_zero() { - if let Some(genesis_state) = self.genesis_state.read().clone() { - return Ok(GenesisOrUnavailableState::Genesis(genesis_state)); - } - } - - // else return unavailable state. We do not return error here, because error - // would mean that we do not know this state at all. But we know that it exists - Ok(GenesisOrUnavailableState::Unavailable) - } - - fn revert( - &self, - _n: NumberFor, - _revert_finalized: bool, - ) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn get_import_lock(&self) -> &RwLock<()> { - &self.import_lock - } + type BlockImportOperation = ImportOperation; + type Blockchain = Blockchain; + type State = GenesisOrUnavailableState>; + type OffchainStorage = InMemOffchainStorage; + + fn begin_operation(&self) -> ClientResult { + Ok(ImportOperation { + header: None, + cache: Default::default(), + leaf_state: NewBlockState::Normal, + aux_ops: Vec::new(), + finalized_blocks: Vec::new(), + set_head: None, + storage_update: None, + changes_trie_config_update: None, + _phantom: Default::default(), + }) + } + + fn begin_state_operation( + &self, + _operation: &mut Self::BlockImportOperation, + _block: BlockId, + ) -> ClientResult<()> { + Ok(()) + } + + fn commit_operation(&self, mut operation: Self::BlockImportOperation) -> ClientResult<()> { + if !operation.finalized_blocks.is_empty() { + for block in operation.finalized_blocks { + self.blockchain.storage().finalize_header(block)?; + } + } + + if let Some(header) = operation.header { + let is_genesis_import = header.number().is_zero(); + if let Some(new_config) = operation.changes_trie_config_update { + operation.cache.insert( + well_known_cache_keys::CHANGES_TRIE_CONFIG, + new_config.encode(), + ); + } + self.blockchain.storage().import_header( + header, + operation.cache, + operation.leaf_state, + operation.aux_ops, + )?; + + // when importing genesis block => remember its state + if is_genesis_import { + *self.genesis_state.write() = operation.storage_update.take(); + } + } else { + for (key, maybe_val) in operation.aux_ops { + match maybe_val { + Some(val) => self + .blockchain + .storage() + .insert_aux(&[(&key[..], &val[..])], std::iter::empty())?, + None => self + .blockchain + .storage() + .insert_aux(std::iter::empty(), &[&key[..]])?, + } + } + } + + if let Some(set_head) = operation.set_head { + self.blockchain.storage().set_head(set_head)?; + } + + Ok(()) + } + + fn finalize_block( + &self, + block: BlockId, + _justification: Option, + ) -> ClientResult<()> { + self.blockchain.storage().finalize_header(block) + } + + fn blockchain(&self) -> &Blockchain { + &self.blockchain + } + + fn usage_info(&self) -> Option { + self.blockchain.storage().usage_info() + } + + fn changes_trie_storage(&self) -> Option<&dyn PrunableStateChangesTrieStorage> { + None + } + + fn offchain_storage(&self) -> Option { + None + } + + fn state_at(&self, block: BlockId) -> ClientResult { + let block_number = self.blockchain.expect_block_number_from_id(&block)?; + + // special case for genesis block + if block_number.is_zero() { + if let Some(genesis_state) = self.genesis_state.read().clone() { + return Ok(GenesisOrUnavailableState::Genesis(genesis_state)); + } + } + + // else return unavailable state. We do not return error here, because error + // would mean that we do not know this state at all. But we know that it exists + Ok(GenesisOrUnavailableState::Unavailable) + } + + fn revert( + &self, + _n: NumberFor, + _revert_finalized: bool, + ) -> ClientResult> { + Err(ClientError::NotAvailableOnLightClient) + } + + fn get_import_lock(&self) -> &RwLock<()> { + &self.import_lock + } } impl RemoteBackend for Backend> where - Block: BlockT, - S: BlockchainStorage + 'static, - Block::Hash: Ord, + Block: BlockT, + S: BlockchainStorage + 'static, + Block::Hash: Ord, { - fn is_local_state_available(&self, block: &BlockId) -> bool { - self.genesis_state.read().is_some() - && self.blockchain.expect_block_number_from_id(block) - .map(|num| num.is_zero()) - .unwrap_or(false) - } - - fn remote_blockchain(&self) -> Arc> { - self.blockchain.clone() - } + fn is_local_state_available(&self, block: &BlockId) -> bool { + self.genesis_state.read().is_some() + && self + .blockchain + .expect_block_number_from_id(block) + .map(|num| num.is_zero()) + .unwrap_or(false) + } + + fn remote_blockchain(&self) -> Arc> { + self.blockchain.clone() + } } impl BlockImportOperation for ImportOperation - where - Block: BlockT, - S: BlockchainStorage, - Block::Hash: Ord, +where + Block: BlockT, + S: BlockchainStorage, + Block::Hash: Ord, { - type State = GenesisOrUnavailableState>; - - fn state(&self) -> ClientResult> { - // None means 'locally-stateless' backend - Ok(None) - } - - fn set_block_data( - &mut self, - header: Block::Header, - _body: Option>, - _justification: Option, - state: NewBlockState, - ) -> ClientResult<()> { - self.leaf_state = state; - self.header = Some(header); - Ok(()) - } - - fn update_cache(&mut self, cache: HashMap>) { - self.cache = cache; - } - - fn update_db_storage( - &mut self, - _update: >>::Transaction, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn update_changes_trie( - &mut self, - _update: ChangesTrieTransaction, NumberFor>, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn reset_storage(&mut self, input: Storage) -> ClientResult { - check_genesis_storage(&input)?; - - // changes trie configuration - let changes_trie_config = input.top.iter() - .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) - .map(|(_, v)| Decode::decode(&mut &v[..]) - .expect("changes trie configuration is encoded properly at genesis")); - self.changes_trie_config_update = Some(changes_trie_config); - - // this is only called when genesis block is imported => shouldn't be performance bottleneck - let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); - storage.insert(None, input.top); - - // create a list of children keys to re-compute roots for - let child_delta = input.children.iter() - .map(|(storage_key, storage_child)| (storage_key.clone(), None, storage_child.child_info.clone())) - .collect::>(); - - // make sure to persist the child storage - for (child_key, storage_child) in input.children { - storage.insert(Some((child_key, storage_child.child_info)), storage_child.data); - } - - let storage_update = InMemoryBackend::from(storage); - let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); - self.storage_update = Some(storage_update); - - Ok(storage_root) - } - - fn insert_aux(&mut self, ops: I) -> ClientResult<()> - where I: IntoIterator, Option>)> - { - self.aux_ops.append(&mut ops.into_iter().collect()); - Ok(()) - } - - fn update_storage( - &mut self, - _update: StorageCollection, - _child_update: ChildStorageCollection, - ) -> ClientResult<()> { - // we're not storing anything locally => ignore changes - Ok(()) - } - - fn mark_finalized(&mut self, block: BlockId, _justification: Option) -> ClientResult<()> { - self.finalized_blocks.push(block); - Ok(()) - } - - fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { - self.set_head = Some(block); - Ok(()) - } + type State = GenesisOrUnavailableState>; + + fn state(&self) -> ClientResult> { + // None means 'locally-stateless' backend + Ok(None) + } + + fn set_block_data( + &mut self, + header: Block::Header, + _body: Option>, + _justification: Option, + state: NewBlockState, + ) -> ClientResult<()> { + self.leaf_state = state; + self.header = Some(header); + Ok(()) + } + + fn update_cache(&mut self, cache: HashMap>) { + self.cache = cache; + } + + fn update_db_storage( + &mut self, + _update: >>::Transaction, + ) -> ClientResult<()> { + // we're not storing anything locally => ignore changes + Ok(()) + } + + fn update_changes_trie( + &mut self, + _update: ChangesTrieTransaction, NumberFor>, + ) -> ClientResult<()> { + // we're not storing anything locally => ignore changes + Ok(()) + } + + fn reset_storage(&mut self, input: Storage) -> ClientResult { + check_genesis_storage(&input)?; + + // changes trie configuration + let changes_trie_config = input + .top + .iter() + .find(|(k, _)| &k[..] == well_known_keys::CHANGES_TRIE_CONFIG) + .map(|(_, v)| { + Decode::decode(&mut &v[..]) + .expect("changes trie configuration is encoded properly at genesis") + }); + self.changes_trie_config_update = Some(changes_trie_config); + + // this is only called when genesis block is imported => shouldn't be performance bottleneck + let mut storage: HashMap, OwnedChildInfo)>, _> = HashMap::new(); + storage.insert(None, input.top); + + // create a list of children keys to re-compute roots for + let child_delta = input + .children + .iter() + .map(|(storage_key, storage_child)| { + (storage_key.clone(), None, storage_child.child_info.clone()) + }) + .collect::>(); + + // make sure to persist the child storage + for (child_key, storage_child) in input.children { + storage.insert( + Some((child_key, storage_child.child_info)), + storage_child.data, + ); + } + + let storage_update = InMemoryBackend::from(storage); + let (storage_root, _) = storage_update.full_storage_root(std::iter::empty(), child_delta); + self.storage_update = Some(storage_update); + + Ok(storage_root) + } + + fn insert_aux(&mut self, ops: I) -> ClientResult<()> + where + I: IntoIterator, Option>)>, + { + self.aux_ops.append(&mut ops.into_iter().collect()); + Ok(()) + } + + fn update_storage( + &mut self, + _update: StorageCollection, + _child_update: ChildStorageCollection, + ) -> ClientResult<()> { + // we're not storing anything locally => ignore changes + Ok(()) + } + + fn mark_finalized( + &mut self, + block: BlockId, + _justification: Option, + ) -> ClientResult<()> { + self.finalized_blocks.push(block); + Ok(()) + } + + fn mark_head(&mut self, block: BlockId) -> ClientResult<()> { + self.set_head = Some(block); + Ok(()) + } } impl std::fmt::Debug for GenesisOrUnavailableState { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.fmt(f), - GenesisOrUnavailableState::Unavailable => write!(f, "Unavailable"), - } - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => state.fmt(f), + GenesisOrUnavailableState::Unavailable => write!(f, "Unavailable"), + } + } } impl StateBackend for GenesisOrUnavailableState - where - H::Out: Ord + codec::Codec, +where + H::Out: Ord + codec::Codec, { - type Error = ClientError; - type Transaction = as StateBackend>::Transaction; - type TrieBackendStorage = as StateBackend>::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> ClientResult>> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.storage(key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> ClientResult>> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.child_storage(storage_key, child_info, key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - Ok(state.next_storage_key(key).expect(IN_MEMORY_EXPECT_PROOF)), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => Ok( - state.next_child_storage_key(storage_key, child_info, key) - .expect(IN_MEMORY_EXPECT_PROOF) - ), - GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.for_keys_with_prefix(prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], action: A) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.for_key_values_with_prefix(prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - action: A, - ) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_keys_in_child_storage(storage_key, child_info, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - action: A, - ) { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.for_child_keys_with_prefix(storage_key, child_info, prefix, action), - GenesisOrUnavailableState::Unavailable => (), - } - } - - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator, Option>)> - { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => - state.storage_root(delta), - GenesisOrUnavailableState::Unavailable => Default::default(), - } - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (H::Out, bool, Self::Transaction) - where - I: IntoIterator, Option>)> - { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => { - let (root, is_equal, _) = state.child_storage_root(storage_key, child_info, delta); - (root, is_equal, Default::default()) - }, - GenesisOrUnavailableState::Unavailable => - (H::Out::default(), true, Default::default()), - } - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.pairs(), - GenesisOrUnavailableState::Unavailable => Vec::new(), - } - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - match *self { - GenesisOrUnavailableState::Genesis(ref state) => state.keys(prefix), - GenesisOrUnavailableState::Unavailable => Vec::new(), - } - } - - fn register_overlay_stats(&mut self, _stats: &sp_state_machine::StateMachineStats) { } - - fn usage_info(&self) -> sp_state_machine::UsageInfo { - sp_state_machine::UsageInfo::empty() - } - - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { - match self { - GenesisOrUnavailableState::Genesis(ref mut state) => state.as_trie_backend(), - GenesisOrUnavailableState::Unavailable => None, - } - } + type Error = ClientError; + type Transaction = as StateBackend>::Transaction; + type TrieBackendStorage = as StateBackend>::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> ClientResult>> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => { + Ok(state.storage(key).expect(IN_MEMORY_EXPECT_PROOF)) + } + GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), + } + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> ClientResult>> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => Ok(state + .child_storage(storage_key, child_info, key) + .expect(IN_MEMORY_EXPECT_PROOF)), + GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), + } + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => { + Ok(state.next_storage_key(key).expect(IN_MEMORY_EXPECT_PROOF)) + } + GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), + } + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => Ok(state + .next_child_storage_key(storage_key, child_info, key) + .expect(IN_MEMORY_EXPECT_PROOF)), + GenesisOrUnavailableState::Unavailable => Err(ClientError::NotAvailableOnLightClient), + } + } + + fn for_keys_with_prefix(&self, prefix: &[u8], action: A) { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => { + state.for_keys_with_prefix(prefix, action) + } + GenesisOrUnavailableState::Unavailable => (), + } + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], action: A) { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => { + state.for_key_values_with_prefix(prefix, action) + } + GenesisOrUnavailableState::Unavailable => (), + } + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + action: A, + ) { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => { + state.for_keys_in_child_storage(storage_key, child_info, action) + } + GenesisOrUnavailableState::Unavailable => (), + } + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + action: A, + ) { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => { + state.for_child_keys_with_prefix(storage_key, child_info, prefix, action) + } + GenesisOrUnavailableState::Unavailable => (), + } + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => state.storage_root(delta), + GenesisOrUnavailableState::Unavailable => Default::default(), + } + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => { + let (root, is_equal, _) = state.child_storage_root(storage_key, child_info, delta); + (root, is_equal, Default::default()) + } + GenesisOrUnavailableState::Unavailable => (H::Out::default(), true, Default::default()), + } + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => state.pairs(), + GenesisOrUnavailableState::Unavailable => Vec::new(), + } + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + match *self { + GenesisOrUnavailableState::Genesis(ref state) => state.keys(prefix), + GenesisOrUnavailableState::Unavailable => Vec::new(), + } + } + + fn register_overlay_stats(&mut self, _stats: &sp_state_machine::StateMachineStats) {} + + fn usage_info(&self) -> sp_state_machine::UsageInfo { + sp_state_machine::UsageInfo::empty() + } + + fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + match self { + GenesisOrUnavailableState::Genesis(ref mut state) => state.as_trie_backend(), + GenesisOrUnavailableState::Unavailable => None, + } + } } #[cfg(test)] mod tests { - use substrate_test_runtime_client::{self, runtime::Block}; - use sc_client_api::backend::NewBlockState; - use crate::light::blockchain::tests::{DummyBlockchain, DummyStorage}; - use sp_runtime::traits::BlakeTwo256; - use super::*; - - #[test] - fn local_state_is_created_when_genesis_state_is_available() { - let def = Default::default(); - let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - - let backend: Backend<_, BlakeTwo256> = Backend::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - ); - let mut op = backend.begin_operation().unwrap(); - op.set_block_data(header0, None, None, NewBlockState::Final).unwrap(); - op.reset_storage(Default::default()).unwrap(); - backend.commit_operation(op).unwrap(); - - match backend.state_at(BlockId::Number(0)).unwrap() { - GenesisOrUnavailableState::Genesis(_) => (), - _ => panic!("unexpected state"), - } - } - - #[test] - fn unavailable_state_is_created_when_genesis_state_is_unavailable() { - let backend: Backend<_, BlakeTwo256> = Backend::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - ); - - match backend.state_at(BlockId::Number(0)).unwrap() { - GenesisOrUnavailableState::Unavailable => (), - _ => panic!("unexpected state"), - } - } - - #[test] - fn light_aux_store_is_updated_via_non_importing_op() { - let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); - let mut op = ClientBackend::::begin_operation(&backend).unwrap(); - BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); - ClientBackend::::commit_operation(&backend, op).unwrap(); - - assert_eq!(AuxStore::get_aux(&backend, &[1]).unwrap(), Some(vec![2])); - } + use super::*; + use crate::light::blockchain::tests::{DummyBlockchain, DummyStorage}; + use sc_client_api::backend::NewBlockState; + use sp_runtime::traits::BlakeTwo256; + use substrate_test_runtime_client::{self, runtime::Block}; + + #[test] + fn local_state_is_created_when_genesis_state_is_available() { + let def = Default::default(); + let header0 = substrate_test_runtime_client::runtime::Header::new( + 0, + def, + def, + def, + Default::default(), + ); + + let backend: Backend<_, BlakeTwo256> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let mut op = backend.begin_operation().unwrap(); + op.set_block_data(header0, None, None, NewBlockState::Final) + .unwrap(); + op.reset_storage(Default::default()).unwrap(); + backend.commit_operation(op).unwrap(); + + match backend.state_at(BlockId::Number(0)).unwrap() { + GenesisOrUnavailableState::Genesis(_) => (), + _ => panic!("unexpected state"), + } + } + + #[test] + fn unavailable_state_is_created_when_genesis_state_is_unavailable() { + let backend: Backend<_, BlakeTwo256> = + Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + + match backend.state_at(BlockId::Number(0)).unwrap() { + GenesisOrUnavailableState::Unavailable => (), + _ => panic!("unexpected state"), + } + } + + #[test] + fn light_aux_store_is_updated_via_non_importing_op() { + let backend = Backend::new(Arc::new(DummyBlockchain::new(DummyStorage::new()))); + let mut op = ClientBackend::::begin_operation(&backend).unwrap(); + BlockImportOperation::::insert_aux(&mut op, vec![(vec![1], Some(vec![2]))]).unwrap(); + ClientBackend::::commit_operation(&backend, op).unwrap(); + + assert_eq!(AuxStore::get_aux(&backend, &[1]).unwrap(), Some(vec![2])); + } } diff --git a/client/src/light/blockchain.rs b/client/src/light/blockchain.rs index 756147c941..82682b730b 100644 --- a/client/src/light/blockchain.rs +++ b/client/src/light/blockchain.rs @@ -20,307 +20,324 @@ use std::future::Future; use std::sync::Arc; -use sp_runtime::{Justification, generic::BlockId}; use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor, Zero}; +use sp_runtime::{generic::BlockId, Justification}; -use sp_blockchain::{ - HeaderMetadata, CachedHeaderMetadata, - Error as ClientError, Result as ClientResult, -}; -pub use sc_client_api::{ - backend::{ - AuxStore, NewBlockState - }, - blockchain::{ - Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, - HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, - well_known_cache_keys, - }, - light::{ - RemoteBlockchain, LocalOrRemote, Storage - } -}; use crate::cht; use crate::light::fetcher::{Fetcher, RemoteHeaderRequest}; +pub use sc_client_api::{ + backend::{AuxStore, NewBlockState}, + blockchain::{ + well_known_cache_keys, Backend as BlockchainBackend, BlockStatus, Cache as BlockchainCache, + HeaderBackend as BlockchainHeaderBackend, Info as BlockchainInfo, ProvideCache, + }, + light::{LocalOrRemote, RemoteBlockchain, Storage}, +}; +use sp_blockchain::{ + CachedHeaderMetadata, Error as ClientError, HeaderMetadata, Result as ClientResult, +}; /// Light client blockchain. pub struct Blockchain { - storage: S, + storage: S, } impl Blockchain { - /// Create new light blockchain backed with given storage. - pub fn new(storage: S) -> Self { - Self { - storage, - } - } - - /// Get storage reference. - pub fn storage(&self) -> &S { - &self.storage - } + /// Create new light blockchain backed with given storage. + pub fn new(storage: S) -> Self { + Self { storage } + } + + /// Get storage reference. + pub fn storage(&self) -> &S { + &self.storage + } } -impl BlockchainHeaderBackend for Blockchain where Block: BlockT, S: Storage { - fn header(&self, id: BlockId) -> ClientResult> { - match RemoteBlockchain::header(self, id)? { - LocalOrRemote::Local(header) => Ok(Some(header)), - LocalOrRemote::Remote(_) => Err(ClientError::NotAvailableOnLightClient), - LocalOrRemote::Unknown => Ok(None), - } - } - - fn info(&self) -> BlockchainInfo { - self.storage.info() - } - - fn status(&self, id: BlockId) -> ClientResult { - self.storage.status(id) - } - - fn number(&self, hash: Block::Hash) -> ClientResult>> { - self.storage.number(hash) - } - - fn hash(&self, number: <::Header as HeaderT>::Number) -> ClientResult> { - self.storage.hash(number) - } +impl BlockchainHeaderBackend for Blockchain +where + Block: BlockT, + S: Storage, +{ + fn header(&self, id: BlockId) -> ClientResult> { + match RemoteBlockchain::header(self, id)? { + LocalOrRemote::Local(header) => Ok(Some(header)), + LocalOrRemote::Remote(_) => Err(ClientError::NotAvailableOnLightClient), + LocalOrRemote::Unknown => Ok(None), + } + } + + fn info(&self) -> BlockchainInfo { + self.storage.info() + } + + fn status(&self, id: BlockId) -> ClientResult { + self.storage.status(id) + } + + fn number(&self, hash: Block::Hash) -> ClientResult>> { + self.storage.number(hash) + } + + fn hash( + &self, + number: <::Header as HeaderT>::Number, + ) -> ClientResult> { + self.storage.hash(number) + } } -impl HeaderMetadata for Blockchain where Block: BlockT, S: Storage { - type Error = ClientError; - - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.storage.header_metadata(hash) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.storage.insert_header_metadata(hash, metadata) - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.storage.remove_header_metadata(hash) - } +impl HeaderMetadata for Blockchain +where + Block: BlockT, + S: Storage, +{ + type Error = ClientError; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.storage.header_metadata(hash) + } + + fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { + self.storage.insert_header_metadata(hash, metadata) + } + + fn remove_header_metadata(&self, hash: Block::Hash) { + self.storage.remove_header_metadata(hash) + } } -impl BlockchainBackend for Blockchain where Block: BlockT, S: Storage { - fn body(&self, _id: BlockId) -> ClientResult>> { - Err(ClientError::NotAvailableOnLightClient) - } +impl BlockchainBackend for Blockchain +where + Block: BlockT, + S: Storage, +{ + fn body(&self, _id: BlockId) -> ClientResult>> { + Err(ClientError::NotAvailableOnLightClient) + } - fn justification(&self, _id: BlockId) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } + fn justification(&self, _id: BlockId) -> ClientResult> { + Err(ClientError::NotAvailableOnLightClient) + } - fn last_finalized(&self) -> ClientResult { - self.storage.last_finalized() - } + fn last_finalized(&self) -> ClientResult { + self.storage.last_finalized() + } - fn cache(&self) -> Option>> { - self.storage.cache() - } + fn cache(&self) -> Option>> { + self.storage.cache() + } - fn leaves(&self) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } + fn leaves(&self) -> ClientResult> { + Err(ClientError::NotAvailableOnLightClient) + } - fn children(&self, _parent_hash: Block::Hash) -> ClientResult> { - Err(ClientError::NotAvailableOnLightClient) - } + fn children(&self, _parent_hash: Block::Hash) -> ClientResult> { + Err(ClientError::NotAvailableOnLightClient) + } } impl, Block: BlockT> ProvideCache for Blockchain { - fn cache(&self) -> Option>> { - self.storage.cache() - } + fn cache(&self) -> Option>> { + self.storage.cache() + } } impl RemoteBlockchain for Blockchain - where - S: Storage, +where + S: Storage, { - fn header(&self, id: BlockId) -> ClientResult, - >> { - // first, try to read header from local storage - if let Some(local_header) = self.storage.header(id)? { - return Ok(LocalOrRemote::Local(local_header)); - } - - // we need to know block number to check if it's a part of CHT - let number = match id { - BlockId::Hash(hash) => match self.storage.number(hash)? { - Some(number) => number, - None => return Ok(LocalOrRemote::Unknown), - }, - BlockId::Number(number) => number, - }; - - // if the header is genesis (never pruned), non-canonical, or from future => return - if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown { - return Ok(LocalOrRemote::Unknown); - } - - Ok(LocalOrRemote::Remote(RemoteHeaderRequest { - cht_root: match self.storage.header_cht_root(cht::size(), number)? { - Some(cht_root) => cht_root, - None => return Ok(LocalOrRemote::Unknown), - }, - block: number, - retry_count: None, - })) - } + fn header( + &self, + id: BlockId, + ) -> ClientResult>> { + // first, try to read header from local storage + if let Some(local_header) = self.storage.header(id)? { + return Ok(LocalOrRemote::Local(local_header)); + } + + // we need to know block number to check if it's a part of CHT + let number = match id { + BlockId::Hash(hash) => match self.storage.number(hash)? { + Some(number) => number, + None => return Ok(LocalOrRemote::Unknown), + }, + BlockId::Number(number) => number, + }; + + // if the header is genesis (never pruned), non-canonical, or from future => return + if number.is_zero() || self.storage.status(BlockId::Number(number))? == BlockStatus::Unknown + { + return Ok(LocalOrRemote::Unknown); + } + + Ok(LocalOrRemote::Remote(RemoteHeaderRequest { + cht_root: match self.storage.header_cht_root(cht::size(), number)? { + Some(cht_root) => cht_root, + None => return Ok(LocalOrRemote::Unknown), + }, + block: number, + retry_count: None, + })) + } } /// Returns future that resolves header either locally, or remotely. pub fn future_header>( - blockchain: &dyn RemoteBlockchain, - fetcher: &F, - id: BlockId, + blockchain: &dyn RemoteBlockchain, + fetcher: &F, + id: BlockId, ) -> impl Future, ClientError>> { - use futures::future::{ready, Either, FutureExt}; - - match blockchain.header(id) { - Ok(LocalOrRemote::Remote(request)) => Either::Left( - fetcher - .remote_header(request) - .then(|header| ready(header.map(Some))) - ), - Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))), - Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))), - Err(err) => Either::Right(ready(Err(err))), - } + use futures::future::{ready, Either, FutureExt}; + + match blockchain.header(id) { + Ok(LocalOrRemote::Remote(request)) => Either::Left( + fetcher + .remote_header(request) + .then(|header| ready(header.map(Some))), + ), + Ok(LocalOrRemote::Unknown) => Either::Right(ready(Ok(None))), + Ok(LocalOrRemote::Local(local_header)) => Either::Right(ready(Ok(Some(local_header)))), + Err(err) => Either::Right(ready(Err(err))), + } } #[cfg(test)] pub mod tests { - use std::collections::HashMap; - use parking_lot::Mutex; - use substrate_test_runtime_client::runtime::{Hash, Block, Header}; - use sc_client_api::blockchain::Info; - use super::*; - - pub type DummyBlockchain = Blockchain; - - pub struct DummyStorage { - pub changes_tries_cht_roots: HashMap, - pub aux_store: Mutex, Vec>>, - } - - impl DummyStorage { - pub fn new() -> Self { - DummyStorage { - changes_tries_cht_roots: HashMap::new(), - aux_store: Mutex::new(HashMap::new()), - } - } - } - - impl BlockchainHeaderBackend for DummyStorage { - fn header(&self, _id: BlockId) -> ClientResult> { - Err(ClientError::Backend("Test error".into())) - } - - fn info(&self) -> Info { - panic!("Test error") - } - - fn status(&self, _id: BlockId) -> ClientResult { - Err(ClientError::Backend("Test error".into())) - } - - fn number(&self, hash: Hash) -> ClientResult>> { - if hash == Default::default() { - Ok(Some(Default::default())) - } else { - Err(ClientError::Backend("Test error".into())) - } - } - - fn hash(&self, number: u64) -> ClientResult> { - if number == 0 { - Ok(Some(Default::default())) - } else { - Err(ClientError::Backend("Test error".into())) - } - } - } - - impl HeaderMetadata for DummyStorage { - type Error = ClientError; - - fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { - self.header(BlockId::hash(hash))?.map(|header| CachedHeaderMetadata::from(&header)) - .ok_or(ClientError::UnknownBlock("header not found".to_owned())) - } - fn insert_header_metadata(&self, _hash: Hash, _metadata: CachedHeaderMetadata) {} - fn remove_header_metadata(&self, _hash: Hash) {} - } - - impl AuxStore for DummyStorage { - fn insert_aux< - 'a, - 'b: 'a, - 'c: 'a, - I: IntoIterator, - D: IntoIterator, - >(&self, insert: I, _delete: D) -> ClientResult<()> { - for (k, v) in insert.into_iter() { - self.aux_store.lock().insert(k.to_vec(), v.to_vec()); - } - Ok(()) - } - - fn get_aux(&self, key: &[u8]) -> ClientResult>> { - Ok(self.aux_store.lock().get(key).cloned()) - } - } - - impl Storage for DummyStorage { - fn import_header( - &self, - _header: Header, - _cache: HashMap>, - _state: NewBlockState, - _aux_ops: Vec<(Vec, Option>)>, - ) -> ClientResult<()> { - Ok(()) - } - - fn set_head(&self, _block: BlockId) -> ClientResult<()> { - Err(ClientError::Backend("Test error".into())) - } - - fn finalize_header(&self, _block: BlockId) -> ClientResult<()> { - Err(ClientError::Backend("Test error".into())) - } - - fn last_finalized(&self) -> ClientResult { - Err(ClientError::Backend("Test error".into())) - } - - fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { - Err(ClientError::Backend("Test error".into())) - } - - fn changes_trie_cht_root(&self, cht_size: u64, block: u64) -> ClientResult> { - cht::block_to_cht_number(cht_size, block) - .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) - .cloned() - .ok_or_else(|| ClientError::Backend( - format!("Test error: CHT for block #{} not found", block) - ).into()) - .map(Some) - } - - fn cache(&self) -> Option>> { - None - } - - fn usage_info(&self) -> Option { - None - } - } + use super::*; + use parking_lot::Mutex; + use sc_client_api::blockchain::Info; + use std::collections::HashMap; + use substrate_test_runtime_client::runtime::{Block, Hash, Header}; + + pub type DummyBlockchain = Blockchain; + + pub struct DummyStorage { + pub changes_tries_cht_roots: HashMap, + pub aux_store: Mutex, Vec>>, + } + + impl DummyStorage { + pub fn new() -> Self { + DummyStorage { + changes_tries_cht_roots: HashMap::new(), + aux_store: Mutex::new(HashMap::new()), + } + } + } + + impl BlockchainHeaderBackend for DummyStorage { + fn header(&self, _id: BlockId) -> ClientResult> { + Err(ClientError::Backend("Test error".into())) + } + + fn info(&self) -> Info { + panic!("Test error") + } + + fn status(&self, _id: BlockId) -> ClientResult { + Err(ClientError::Backend("Test error".into())) + } + + fn number(&self, hash: Hash) -> ClientResult>> { + if hash == Default::default() { + Ok(Some(Default::default())) + } else { + Err(ClientError::Backend("Test error".into())) + } + } + + fn hash(&self, number: u64) -> ClientResult> { + if number == 0 { + Ok(Some(Default::default())) + } else { + Err(ClientError::Backend("Test error".into())) + } + } + } + + impl HeaderMetadata for DummyStorage { + type Error = ClientError; + + fn header_metadata(&self, hash: Hash) -> Result, Self::Error> { + self.header(BlockId::hash(hash))? + .map(|header| CachedHeaderMetadata::from(&header)) + .ok_or(ClientError::UnknownBlock("header not found".to_owned())) + } + fn insert_header_metadata(&self, _hash: Hash, _metadata: CachedHeaderMetadata) {} + fn remove_header_metadata(&self, _hash: Hash) {} + } + + impl AuxStore for DummyStorage { + fn insert_aux< + 'a, + 'b: 'a, + 'c: 'a, + I: IntoIterator, + D: IntoIterator, + >( + &self, + insert: I, + _delete: D, + ) -> ClientResult<()> { + for (k, v) in insert.into_iter() { + self.aux_store.lock().insert(k.to_vec(), v.to_vec()); + } + Ok(()) + } + + fn get_aux(&self, key: &[u8]) -> ClientResult>> { + Ok(self.aux_store.lock().get(key).cloned()) + } + } + + impl Storage for DummyStorage { + fn import_header( + &self, + _header: Header, + _cache: HashMap>, + _state: NewBlockState, + _aux_ops: Vec<(Vec, Option>)>, + ) -> ClientResult<()> { + Ok(()) + } + + fn set_head(&self, _block: BlockId) -> ClientResult<()> { + Err(ClientError::Backend("Test error".into())) + } + + fn finalize_header(&self, _block: BlockId) -> ClientResult<()> { + Err(ClientError::Backend("Test error".into())) + } + + fn last_finalized(&self) -> ClientResult { + Err(ClientError::Backend("Test error".into())) + } + + fn header_cht_root(&self, _cht_size: u64, _block: u64) -> ClientResult> { + Err(ClientError::Backend("Test error".into())) + } + + fn changes_trie_cht_root(&self, cht_size: u64, block: u64) -> ClientResult> { + cht::block_to_cht_number(cht_size, block) + .and_then(|cht_num| self.changes_tries_cht_roots.get(&cht_num)) + .cloned() + .ok_or_else(|| { + ClientError::Backend(format!("Test error: CHT for block #{} not found", block)) + .into() + }) + .map(Some) + } + + fn cache(&self) -> Option>> { + None + } + + fn usage_info(&self) -> Option { + None + } + } } diff --git a/client/src/light/call_executor.rs b/client/src/light/call_executor.rs index b439a268d2..fe116f150c 100644 --- a/client/src/light/call_executor.rs +++ b/client/src/light/call_executor.rs @@ -16,155 +16,156 @@ //! Methods that light client could use to execute runtime calls. -use std::{ - sync::Arc, panic::UnwindSafe, result, cell::RefCell, -}; +use std::{cell::RefCell, panic::UnwindSafe, result, sync::Arc}; -use codec::{Encode, Decode}; -use sp_core::{convert_hash, NativeOrEncoded, traits::CodeExecutor}; +use codec::{Decode, Encode}; +use hash_db::Hasher; +use sp_core::{convert_hash, traits::CodeExecutor, NativeOrEncoded}; +use sp_externalities::Extensions; use sp_runtime::{ - generic::BlockId, traits::{One, Block as BlockT, Header as HeaderT, HashFor}, + generic::BlockId, + traits::{Block as BlockT, HashFor, Header as HeaderT, One}, }; -use sp_externalities::Extensions; use sp_state_machine::{ - self, Backend as StateBackend, OverlayedChanges, ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, ExecutionManager, StorageProof, CloneableSpawn, + self, create_proof_check_backend, execution_proof_check_on_trie_backend, + Backend as StateBackend, CloneableSpawn, ExecutionManager, ExecutionStrategy, OverlayedChanges, + StorageProof, }; -use hash_db::Hasher; -use sp_api::{ProofRecorder, InitializeBlock, StorageTransactionCache}; +use sp_api::{InitializeBlock, ProofRecorder, StorageTransactionCache}; use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sc_client_api::{ - backend::RemoteBackend, - light::RemoteCallRequest, - call_executor::CallExecutor, + backend::RemoteBackend, call_executor::CallExecutor, light::RemoteCallRequest, }; -use sc_executor::{RuntimeVersion, NativeVersion}; +use sc_executor::{NativeVersion, RuntimeVersion}; /// Call executor that is able to execute calls only on genesis state. /// /// Trying to execute call on non-genesis state leads to error. pub struct GenesisCallExecutor { - backend: Arc, - local: L, + backend: Arc, + local: L, } impl GenesisCallExecutor { - /// Create new genesis call executor. - pub fn new(backend: Arc, local: L) -> Self { - Self { backend, local } - } + /// Create new genesis call executor. + pub fn new(backend: Arc, local: L) -> Self { + Self { backend, local } + } } impl Clone for GenesisCallExecutor { - fn clone(&self) -> Self { - GenesisCallExecutor { - backend: self.backend.clone(), - local: self.local.clone(), - } - } + fn clone(&self) -> Self { + GenesisCallExecutor { + backend: self.backend.clone(), + local: self.local.clone(), + } + } } -impl CallExecutor for - GenesisCallExecutor - where - Block: BlockT, - B: RemoteBackend, - Local: CallExecutor, +impl CallExecutor for GenesisCallExecutor +where + Block: BlockT, + B: RemoteBackend, + Local: CallExecutor, { - type Error = ClientError; - - type Backend = B; - - fn call( - &self, - id: &BlockId, - method: &str, - call_data: &[u8], - strategy: ExecutionStrategy, - extensions: Option, - ) -> ClientResult> { - match self.backend.is_local_state_available(id) { - true => self.local.call(id, method, call_data, strategy, extensions), - false => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn contextual_call< - 'a, - IB: Fn() -> ClientResult<()>, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - initialize_block_fn: IB, - at: &BlockId, - method: &str, - call_data: &[u8], - changes: &RefCell, - _: Option<&RefCell>>, - initialize_block: InitializeBlock<'a, Block>, - _manager: ExecutionManager, - native_call: Option, - recorder: &Option>, - extensions: Option, - ) -> ClientResult> where ExecutionManager: Clone { - // there's no actual way/need to specify native/wasm execution strategy on light node - // => we can safely ignore passed values - - match self.backend.is_local_state_available(at) { - true => CallExecutor::contextual_call::< - _, - fn( - Result, Local::Error>, - Result, Local::Error>, - ) -> Result, Local::Error>, - _, - NC - >( - &self.local, - initialize_block_fn, - at, - method, - call_data, - changes, - None, - initialize_block, - ExecutionManager::NativeWhenPossible, - native_call, - recorder, - extensions, - ).map_err(|e| ClientError::Execution(Box::new(e.to_string()))), - false => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn runtime_version(&self, id: &BlockId) -> ClientResult { - match self.backend.is_local_state_available(id) { - true => self.local.runtime_version(id), - false => Err(ClientError::NotAvailableOnLightClient), - } - } - - fn prove_at_trie_state>>( - &self, - _state: &sp_state_machine::TrieBackend>, - _changes: &mut OverlayedChanges, - _method: &str, - _call_data: &[u8], - ) -> ClientResult<(Vec, StorageProof)> { - Err(ClientError::NotAvailableOnLightClient) - } - - fn native_runtime_version(&self) -> Option<&NativeVersion> { - None - } + type Error = ClientError; + + type Backend = B; + + fn call( + &self, + id: &BlockId, + method: &str, + call_data: &[u8], + strategy: ExecutionStrategy, + extensions: Option, + ) -> ClientResult> { + match self.backend.is_local_state_available(id) { + true => self.local.call(id, method, call_data, strategy, extensions), + false => Err(ClientError::NotAvailableOnLightClient), + } + } + + fn contextual_call< + 'a, + IB: Fn() -> ClientResult<()>, + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + initialize_block_fn: IB, + at: &BlockId, + method: &str, + call_data: &[u8], + changes: &RefCell, + _: Option<&RefCell>>, + initialize_block: InitializeBlock<'a, Block>, + _manager: ExecutionManager, + native_call: Option, + recorder: &Option>, + extensions: Option, + ) -> ClientResult> + where + ExecutionManager: Clone, + { + // there's no actual way/need to specify native/wasm execution strategy on light node + // => we can safely ignore passed values + + match self.backend.is_local_state_available(at) { + true => CallExecutor::contextual_call::< + _, + fn( + Result, Local::Error>, + Result, Local::Error>, + ) -> Result, Local::Error>, + _, + NC, + >( + &self.local, + initialize_block_fn, + at, + method, + call_data, + changes, + None, + initialize_block, + ExecutionManager::NativeWhenPossible, + native_call, + recorder, + extensions, + ) + .map_err(|e| ClientError::Execution(Box::new(e.to_string()))), + false => Err(ClientError::NotAvailableOnLightClient), + } + } + + fn runtime_version(&self, id: &BlockId) -> ClientResult { + match self.backend.is_local_state_available(id) { + true => self.local.runtime_version(id), + false => Err(ClientError::NotAvailableOnLightClient), + } + } + + fn prove_at_trie_state>>( + &self, + _state: &sp_state_machine::TrieBackend>, + _changes: &mut OverlayedChanges, + _method: &str, + _call_data: &[u8], + ) -> ClientResult<(Vec, StorageProof)> { + Err(ClientError::NotAvailableOnLightClient) + } + + fn native_runtime_version(&self) -> Option<&NativeVersion> { + None + } } /// Prove contextual execution using given block header in environment. @@ -172,42 +173,37 @@ impl CallExecutor for /// Method is executed using passed header as environment' current block. /// Proof includes both environment preparation proof and method execution proof. pub fn prove_execution( - mut state: S, - header: Block::Header, - executor: &E, - method: &str, - call_data: &[u8], + mut state: S, + header: Block::Header, + executor: &E, + method: &str, + call_data: &[u8], ) -> ClientResult<(Vec, StorageProof)> - where - Block: BlockT, - S: StateBackend>, - E: CallExecutor, +where + Block: BlockT, + S: StateBackend>, + E: CallExecutor, { - let trie_state = state.as_trie_backend() - .ok_or_else(|| - Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) as - Box - )?; - - // prepare execution environment + record preparation proof - let mut changes = Default::default(); - let (_, init_proof) = executor.prove_at_trie_state( - trie_state, - &mut changes, - "Core_initialize_block", - &header.encode(), - )?; - - // execute method + record execution proof - let (result, exec_proof) = executor.prove_at_trie_state( - &trie_state, - &mut changes, - method, - call_data, - )?; - let total_proof = StorageProof::merge(vec![init_proof, exec_proof]); - - Ok((result, total_proof)) + let trie_state = state.as_trie_backend().ok_or_else(|| { + Box::new(sp_state_machine::ExecutionError::UnableToGenerateProof) + as Box + })?; + + // prepare execution environment + record preparation proof + let mut changes = Default::default(); + let (_, init_proof) = executor.prove_at_trie_state( + trie_state, + &mut changes, + "Core_initialize_block", + &header.encode(), + )?; + + // execute method + record execution proof + let (result, exec_proof) = + executor.prove_at_trie_state(&trie_state, &mut changes, method, call_data)?; + let total_proof = StorageProof::merge(vec![init_proof, exec_proof]); + + Ok((result, total_proof)) } /// Check remote contextual execution proof using given backend. @@ -215,304 +211,344 @@ pub fn prove_execution( /// Method is executed using passed header as environment' current block. /// Proof should include both environment preparation proof and method execution proof. pub fn check_execution_proof( - executor: &E, - spawn_handle: Box, - request: &RemoteCallRequest

, - remote_proof: StorageProof, + executor: &E, + spawn_handle: Box, + request: &RemoteCallRequest
, + remote_proof: StorageProof, ) -> ClientResult> - where - Header: HeaderT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, +where + Header: HeaderT, + E: CodeExecutor + Clone + 'static, + H: Hasher, + H::Out: Ord + codec::Codec + 'static, { - check_execution_proof_with_make_header::( - executor, - spawn_handle, - request, - remote_proof, - |header|
::new( - *header.number() + One::one(), - Default::default(), - Default::default(), - header.hash(), - Default::default(), - ), - ) + check_execution_proof_with_make_header::( + executor, + spawn_handle, + request, + remote_proof, + |header| { +
::new( + *header.number() + One::one(), + Default::default(), + Default::default(), + header.hash(), + Default::default(), + ) + }, + ) } fn check_execution_proof_with_make_header Header>( - executor: &E, - spawn_handle: Box, - request: &RemoteCallRequest
, - remote_proof: StorageProof, - make_next_header: MakeNextHeader, + executor: &E, + spawn_handle: Box, + request: &RemoteCallRequest
, + remote_proof: StorageProof, + make_next_header: MakeNextHeader, ) -> ClientResult> - where - Header: HeaderT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, +where + Header: HeaderT, + E: CodeExecutor + Clone + 'static, + H: Hasher, + H::Out: Ord + codec::Codec + 'static, { - let local_state_root = request.header.state_root(); - let root: H::Out = convert_hash(&local_state_root); - - // prepare execution environment + check preparation proof - let mut changes = OverlayedChanges::default(); - let trie_backend = create_proof_check_backend(root, remote_proof)?; - let next_header = make_next_header(&request.header); - - // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 - let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); - let runtime_code = backend_runtime_code.runtime_code()?; - - execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - spawn_handle.clone(), - "Core_initialize_block", - &next_header.encode(), - &runtime_code, - )?; - - // execute method - execution_proof_check_on_trie_backend::( - &trie_backend, - &mut changes, - executor, - spawn_handle, - &request.method, - &request.call_data, - &runtime_code, - ) - .map_err(Into::into) + let local_state_root = request.header.state_root(); + let root: H::Out = convert_hash(&local_state_root); + + // prepare execution environment + check preparation proof + let mut changes = OverlayedChanges::default(); + let trie_backend = create_proof_check_backend(root, remote_proof)?; + let next_header = make_next_header(&request.header); + + // TODO: Remove when solved: https://github.com/paritytech/substrate/issues/5047 + let backend_runtime_code = sp_state_machine::backend::BackendRuntimeCode::new(&trie_backend); + let runtime_code = backend_runtime_code.runtime_code()?; + + execution_proof_check_on_trie_backend::( + &trie_backend, + &mut changes, + executor, + spawn_handle.clone(), + "Core_initialize_block", + &next_header.encode(), + &runtime_code, + )?; + + // execute method + execution_proof_check_on_trie_backend::( + &trie_backend, + &mut changes, + executor, + spawn_handle, + &request.method, + &request.call_data, + &runtime_code, + ) + .map_err(Into::into) } #[cfg(test)] mod tests { - use super::*; - use sp_consensus::BlockOrigin; - use substrate_test_runtime_client::{ - runtime::{Header, Digest, Block}, TestClient, ClientBlockImportExt, - }; - use sc_executor::{NativeExecutor, WasmExecutionMethod}; - use sp_core::{H256, tasks::executor as tasks_executor}; - use sc_client_api::backend::{Backend, NewBlockState}; - use crate::in_mem::Backend as InMemBackend; - use sc_client_api::ProofProvider; - use sp_runtime::traits::BlakeTwo256; - use sc_block_builder::BlockBuilderProvider; - - struct DummyCallExecutor; - - impl CallExecutor for DummyCallExecutor { - type Error = ClientError; - - type Backend = substrate_test_runtime_client::Backend; - - fn call( - &self, - _id: &BlockId, - _method: &str, - _call_data: &[u8], - _strategy: ExecutionStrategy, - _extensions: Option, - ) -> Result, ClientError> { - Ok(vec![42]) - } - - fn contextual_call< - 'a, - IB: Fn() -> ClientResult<()>, - EM: Fn( - Result, Self::Error>, - Result, Self::Error> - ) -> Result, Self::Error>, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - >( - &self, - _initialize_block_fn: IB, - _at: &BlockId, - _method: &str, - _call_data: &[u8], - _changes: &RefCell, - _storage_transaction_cache: Option<&RefCell< - StorageTransactionCache< - Block, - >::State, - > - >>, - _initialize_block: InitializeBlock<'a, Block>, - _execution_manager: ExecutionManager, - _native_call: Option, - _proof_recorder: &Option>, - _extensions: Option, - ) -> ClientResult> where ExecutionManager: Clone { - unreachable!() - } - - fn runtime_version(&self, _id: &BlockId) -> Result { - unreachable!() - } - - fn prove_at_trie_state>>( - &self, - _trie_state: &sp_state_machine::TrieBackend>, - _overlay: &mut OverlayedChanges, - _method: &str, - _call_data: &[u8] - ) -> Result<(Vec, StorageProof), ClientError> { - unreachable!() - } - - fn native_runtime_version(&self) -> Option<&NativeVersion> { - unreachable!() - } - } - - fn local_executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) - } - - #[test] - fn execution_proof_is_generated_and_checked() { - fn execute(remote_client: &TestClient, at: u64, method: &'static str) -> (Vec, Vec) { - let remote_block_id = BlockId::Number(at); - let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - - // 'fetch' execution proof from remote node - let (remote_result, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - method, - &[] - ).unwrap(); - - // check remote execution proof locally - let local_result = check_execution_proof::<_, _, BlakeTwo256>( - &local_executor(), - tasks_executor(), - &RemoteCallRequest { - block: substrate_test_runtime_client::runtime::Hash::default(), - header: remote_header, - method: method.into(), - call_data: vec![], - retry_count: None, - }, - remote_execution_proof, - ).unwrap(); - - (remote_result, local_result) - } - - fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { - let remote_block_id = BlockId::Number(at); - let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - - // 'fetch' execution proof from remote node - let (_, remote_execution_proof) = remote_client.execution_proof( - &remote_block_id, - method, - &[] - ).unwrap(); - - // check remote execution proof locally - let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( - &local_executor(), - tasks_executor(), - &RemoteCallRequest { - block: substrate_test_runtime_client::runtime::Hash::default(), - header: remote_header, - method: method.into(), - call_data: vec![], - retry_count: None, - }, - remote_execution_proof, - |header|
::new( - at + 1, - Default::default(), - Default::default(), - header.hash(), - header.digest().clone(), // this makes next header wrong - ), - ); - match execution_result { - Err(sp_blockchain::Error::Execution(_)) => (), - _ => panic!("Unexpected execution result: {:?}", execution_result), - } - } - - // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); - for i in 1u32..3u32 { - let mut digest = Digest::default(); - digest.push(sp_runtime::generic::DigestItem::Other::(i.to_le_bytes().to_vec())); - remote_client.import_justified( - BlockOrigin::Own, - remote_client.new_block(digest).unwrap().build().unwrap().block, - Default::default(), - ).unwrap(); - } - - // check method that doesn't requires environment - let (remote, local) = execute(&remote_client, 0, "Core_version"); - assert_eq!(remote, local); - - let (remote, local) = execute(&remote_client, 2, "Core_version"); - assert_eq!(remote, local); - - // check method that requires environment - let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 1); - - let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); - let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); - assert_eq!(local_block.number, 3); - - // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set - execute_with_proof_failure(&remote_client, 2, "Core_version"); - - // check that proof check doesn't panic even if proof is incorrect AND panic handler is set - sp_panic_handler::set("TEST", "1.2.3"); - execute_with_proof_failure(&remote_client, 2, "Core_version"); - } - - #[test] - fn code_is_executed_at_genesis_only() { - let backend = Arc::new(InMemBackend::::new()); - let def = H256::default(); - let header0 = substrate_test_runtime_client::runtime::Header::new(0, def, def, def, Default::default()); - let hash0 = header0.hash(); - let header1 = substrate_test_runtime_client::runtime::Header::new(1, def, def, hash0, Default::default()); - let hash1 = header1.hash(); - backend.blockchain().insert(hash0, header0, None, None, NewBlockState::Final).unwrap(); - backend.blockchain().insert(hash1, header1, None, None, NewBlockState::Final).unwrap(); - - let genesis_executor = GenesisCallExecutor::new(backend, DummyCallExecutor); - assert_eq!( - genesis_executor.call( - &BlockId::Number(0), - "test_method", - &[], - ExecutionStrategy::NativeElseWasm, - None, - ).unwrap(), - vec![42], - ); - - let call_on_unavailable = genesis_executor.call( - &BlockId::Number(1), - "test_method", - &[], - ExecutionStrategy::NativeElseWasm, - None, - ); - - match call_on_unavailable { - Err(ClientError::NotAvailableOnLightClient) => (), - _ => unreachable!("unexpected result: {:?}", call_on_unavailable), - } - } + use super::*; + use crate::in_mem::Backend as InMemBackend; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::backend::{Backend, NewBlockState}; + use sc_client_api::ProofProvider; + use sc_executor::{NativeExecutor, WasmExecutionMethod}; + use sp_consensus::BlockOrigin; + use sp_core::{tasks::executor as tasks_executor, H256}; + use sp_runtime::traits::BlakeTwo256; + use substrate_test_runtime_client::{ + runtime::{Block, Digest, Header}, + ClientBlockImportExt, TestClient, + }; + + struct DummyCallExecutor; + + impl CallExecutor for DummyCallExecutor { + type Error = ClientError; + + type Backend = substrate_test_runtime_client::Backend; + + fn call( + &self, + _id: &BlockId, + _method: &str, + _call_data: &[u8], + _strategy: ExecutionStrategy, + _extensions: Option, + ) -> Result, ClientError> { + Ok(vec![42]) + } + + fn contextual_call< + 'a, + IB: Fn() -> ClientResult<()>, + EM: Fn( + Result, Self::Error>, + Result, Self::Error>, + ) -> Result, Self::Error>, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + >( + &self, + _initialize_block_fn: IB, + _at: &BlockId, + _method: &str, + _call_data: &[u8], + _changes: &RefCell, + _storage_transaction_cache: Option< + &RefCell< + StorageTransactionCache< + Block, + >::State, + >, + >, + >, + _initialize_block: InitializeBlock<'a, Block>, + _execution_manager: ExecutionManager, + _native_call: Option, + _proof_recorder: &Option>, + _extensions: Option, + ) -> ClientResult> + where + ExecutionManager: Clone, + { + unreachable!() + } + + fn runtime_version(&self, _id: &BlockId) -> Result { + unreachable!() + } + + fn prove_at_trie_state>>( + &self, + _trie_state: &sp_state_machine::TrieBackend>, + _overlay: &mut OverlayedChanges, + _method: &str, + _call_data: &[u8], + ) -> Result<(Vec, StorageProof), ClientError> { + unreachable!() + } + + fn native_runtime_version(&self) -> Option<&NativeVersion> { + unreachable!() + } + } + + fn local_executor() -> NativeExecutor { + NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) + } + + #[test] + fn execution_proof_is_generated_and_checked() { + fn execute( + remote_client: &TestClient, + at: u64, + method: &'static str, + ) -> (Vec, Vec) { + let remote_block_id = BlockId::Number(at); + let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + + // 'fetch' execution proof from remote node + let (remote_result, remote_execution_proof) = remote_client + .execution_proof(&remote_block_id, method, &[]) + .unwrap(); + + // check remote execution proof locally + let local_result = check_execution_proof::<_, _, BlakeTwo256>( + &local_executor(), + tasks_executor(), + &RemoteCallRequest { + block: substrate_test_runtime_client::runtime::Hash::default(), + header: remote_header, + method: method.into(), + call_data: vec![], + retry_count: None, + }, + remote_execution_proof, + ) + .unwrap(); + + (remote_result, local_result) + } + + fn execute_with_proof_failure(remote_client: &TestClient, at: u64, method: &'static str) { + let remote_block_id = BlockId::Number(at); + let remote_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + + // 'fetch' execution proof from remote node + let (_, remote_execution_proof) = remote_client + .execution_proof(&remote_block_id, method, &[]) + .unwrap(); + + // check remote execution proof locally + let execution_result = check_execution_proof_with_make_header::<_, _, BlakeTwo256, _>( + &local_executor(), + tasks_executor(), + &RemoteCallRequest { + block: substrate_test_runtime_client::runtime::Hash::default(), + header: remote_header, + method: method.into(), + call_data: vec![], + retry_count: None, + }, + remote_execution_proof, + |header| { +
::new( + at + 1, + Default::default(), + Default::default(), + header.hash(), + header.digest().clone(), // this makes next header wrong + ) + }, + ); + match execution_result { + Err(sp_blockchain::Error::Execution(_)) => (), + _ => panic!("Unexpected execution result: {:?}", execution_result), + } + } + + // prepare remote client + let mut remote_client = substrate_test_runtime_client::new(); + for i in 1u32..3u32 { + let mut digest = Digest::default(); + digest.push(sp_runtime::generic::DigestItem::Other::( + i.to_le_bytes().to_vec(), + )); + remote_client + .import_justified( + BlockOrigin::Own, + remote_client + .new_block(digest) + .unwrap() + .build() + .unwrap() + .block, + Default::default(), + ) + .unwrap(); + } + + // check method that doesn't requires environment + let (remote, local) = execute(&remote_client, 0, "Core_version"); + assert_eq!(remote, local); + + let (remote, local) = execute(&remote_client, 2, "Core_version"); + assert_eq!(remote, local); + + // check method that requires environment + let (_, block) = execute(&remote_client, 0, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 1); + + let (_, block) = execute(&remote_client, 2, "BlockBuilder_finalize_block"); + let local_block: Header = Decode::decode(&mut &block[..]).unwrap(); + assert_eq!(local_block.number, 3); + + // check that proof check doesn't panic even if proof is incorrect AND no panic handler is set + execute_with_proof_failure(&remote_client, 2, "Core_version"); + + // check that proof check doesn't panic even if proof is incorrect AND panic handler is set + sp_panic_handler::set("TEST", "1.2.3"); + execute_with_proof_failure(&remote_client, 2, "Core_version"); + } + + #[test] + fn code_is_executed_at_genesis_only() { + let backend = Arc::new(InMemBackend::::new()); + let def = H256::default(); + let header0 = substrate_test_runtime_client::runtime::Header::new( + 0, + def, + def, + def, + Default::default(), + ); + let hash0 = header0.hash(); + let header1 = substrate_test_runtime_client::runtime::Header::new( + 1, + def, + def, + hash0, + Default::default(), + ); + let hash1 = header1.hash(); + backend + .blockchain() + .insert(hash0, header0, None, None, NewBlockState::Final) + .unwrap(); + backend + .blockchain() + .insert(hash1, header1, None, None, NewBlockState::Final) + .unwrap(); + + let genesis_executor = GenesisCallExecutor::new(backend, DummyCallExecutor); + assert_eq!( + genesis_executor + .call( + &BlockId::Number(0), + "test_method", + &[], + ExecutionStrategy::NativeElseWasm, + None, + ) + .unwrap(), + vec![42], + ); + + let call_on_unavailable = genesis_executor.call( + &BlockId::Number(1), + "test_method", + &[], + ExecutionStrategy::NativeElseWasm, + None, + ); + + match call_on_unavailable { + Err(ClientError::NotAvailableOnLightClient) => (), + _ => unreachable!("unexpected result: {:?}", call_on_unavailable), + } + } } diff --git a/client/src/light/fetcher.rs b/client/src/light/fetcher.rs index 0ae0e68e0c..102bdb264a 100644 --- a/client/src/light/fetcher.rs +++ b/client/src/light/fetcher.rs @@ -16,818 +16,1040 @@ //! Light client data fetcher. Fetches requested data from remote full nodes. -use std::sync::Arc; use std::collections::{BTreeMap, HashMap}; use std::marker::PhantomData; +use std::sync::Arc; -use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; use codec::{Decode, Encode}; +use hash_db::{HashDB, Hasher, EMPTY_PREFIX}; +use sp_blockchain::{Error as ClientError, Result as ClientResult}; use sp_core::{convert_hash, traits::CodeExecutor}; use sp_runtime::traits::{ - Block as BlockT, Header as HeaderT, Hash, HashFor, NumberFor, - AtLeast32Bit, CheckedConversion, + AtLeast32Bit, Block as BlockT, CheckedConversion, Hash, HashFor, Header as HeaderT, NumberFor, }; +pub use sp_state_machine::StorageProof; use sp_state_machine::{ - ChangesTrieRootsStorage, ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, - InMemoryChangesTrieStorage, TrieBackend, read_proof_check, key_changes_proof_check_with_db, - read_child_proof_check, CloneableSpawn, + key_changes_proof_check_with_db, read_child_proof_check, read_proof_check, + ChangesTrieAnchorBlockId, ChangesTrieConfigurationRange, ChangesTrieRootsStorage, + CloneableSpawn, InMemoryChangesTrieStorage, TrieBackend, }; -pub use sp_state_machine::StorageProof; -use sp_blockchain::{Error as ClientError, Result as ClientResult}; use crate::cht; -pub use sc_client_api::{ - light::{ - RemoteCallRequest, RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, - RemoteChangesRequest, ChangesProof, RemoteBodyRequest, Fetcher, FetchChecker, - Storage as BlockchainStorage, - }, -}; -use crate::light::blockchain::{Blockchain}; +use crate::light::blockchain::Blockchain; use crate::light::call_executor::check_execution_proof; +pub use sc_client_api::light::{ + ChangesProof, FetchChecker, Fetcher, RemoteBodyRequest, RemoteCallRequest, + RemoteChangesRequest, RemoteHeaderRequest, RemoteReadChildRequest, RemoteReadRequest, + Storage as BlockchainStorage, +}; /// Remote data checker. pub struct LightDataChecker> { - blockchain: Arc>, - executor: E, - spawn_handle: Box, - _hasher: PhantomData<(B, H)>, + blockchain: Arc>, + executor: E, + spawn_handle: Box, + _hasher: PhantomData<(B, H)>, } impl> LightDataChecker { - /// Create new light data checker. - pub fn new(blockchain: Arc>, executor: E, spawn_handle: Box) -> Self { - Self { - blockchain, executor, spawn_handle, _hasher: PhantomData - } - } - - /// Check remote changes query proof assuming that CHT-s are of given size. - fn check_changes_proof_with_cht_size( - &self, - request: &RemoteChangesRequest, - remote_proof: ChangesProof, - cht_size: NumberFor, - ) -> ClientResult, u32)>> - where - H: Hasher, - H::Out: Ord + codec::Codec, - { - // since we need roots of all changes tries for the range begin..max - // => remote node can't use max block greater that one that we have passed - if remote_proof.max_block > request.max_block.0 || remote_proof.max_block < request.last_block.0 { - return Err(ClientError::ChangesTrieAccessFailed(format!( - "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", - remote_proof.max_block, request.first_block.0, request.last_block.0, request.max_block.0, - )).into()); - } - - // check if remote node has responded with extra changes trie roots proofs - // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) - let is_extra_first_root = remote_proof.roots.keys().next() - .map(|first_root| *first_root < request.first_block.0 - || *first_root >= request.tries_roots.0) - .unwrap_or(false); - let is_extra_last_root = remote_proof.roots.keys().next_back() - .map(|last_root| *last_root >= request.tries_roots.0) - .unwrap_or(false); - if is_extra_first_root || is_extra_last_root { - return Err(ClientError::ChangesTrieAccessFailed(format!( + /// Create new light data checker. + pub fn new( + blockchain: Arc>, + executor: E, + spawn_handle: Box, + ) -> Self { + Self { + blockchain, + executor, + spawn_handle, + _hasher: PhantomData, + } + } + + /// Check remote changes query proof assuming that CHT-s are of given size. + fn check_changes_proof_with_cht_size( + &self, + request: &RemoteChangesRequest, + remote_proof: ChangesProof, + cht_size: NumberFor, + ) -> ClientResult, u32)>> + where + H: Hasher, + H::Out: Ord + codec::Codec, + { + // since we need roots of all changes tries for the range begin..max + // => remote node can't use max block greater that one that we have passed + if remote_proof.max_block > request.max_block.0 + || remote_proof.max_block < request.last_block.0 + { + return Err(ClientError::ChangesTrieAccessFailed(format!( + "Invalid max_block used by the remote node: {}. Local: {}..{}..{}", + remote_proof.max_block, + request.first_block.0, + request.last_block.0, + request.max_block.0, + )) + .into()); + } + + // check if remote node has responded with extra changes trie roots proofs + // all changes tries roots must be in range [request.first_block.0; request.tries_roots.0) + let is_extra_first_root = remote_proof + .roots + .keys() + .next() + .map(|first_root| { + *first_root < request.first_block.0 || *first_root >= request.tries_roots.0 + }) + .unwrap_or(false); + let is_extra_last_root = remote_proof + .roots + .keys() + .next_back() + .map(|last_root| *last_root >= request.tries_roots.0) + .unwrap_or(false); + if is_extra_first_root || is_extra_last_root { + return Err(ClientError::ChangesTrieAccessFailed(format!( "Extra changes tries roots proofs provided by the remote node: [{:?}..{:?}]. Expected in range: [{}; {})", remote_proof.roots.keys().next(), remote_proof.roots.keys().next_back(), request.first_block.0, request.tries_roots.0, )).into()); - } - - // if request has been composed when some required headers were already pruned - // => remote node has sent us CHT-based proof of required changes tries roots - // => check that this proof is correct before proceeding with changes proof - let remote_max_block = remote_proof.max_block; - let remote_roots = remote_proof.roots; - let remote_roots_proof = remote_proof.roots_proof; - let remote_proof = remote_proof.proof; - if !remote_roots.is_empty() { - self.check_changes_tries_proof( - cht_size, - &remote_roots, - remote_roots_proof, - )?; - } - - // and now check the key changes proof + get the changes - let mut result = Vec::new(); - let proof_storage = InMemoryChangesTrieStorage::with_proof(remote_proof); - for config_range in &request.changes_trie_configs { - let result_range = key_changes_proof_check_with_db::( - ChangesTrieConfigurationRange { - config: config_range.config.as_ref().ok_or(ClientError::ChangesTriesNotSupported)?, - zero: config_range.zero.0, - end: config_range.end.map(|(n, _)| n), - }, - &RootsStorage { - roots: (request.tries_roots.0, &request.tries_roots.2), - prev_roots: &remote_roots, - }, - &proof_storage, - request.first_block.0, - &ChangesTrieAnchorBlockId { - hash: convert_hash(&request.last_block.1), - number: request.last_block.0, - }, - remote_max_block, - request.storage_key.as_ref().map(Vec::as_slice), - &request.key) - .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; - result.extend(result_range); - } - - Ok(result) - } - - /// Check CHT-based proof for changes tries roots. - fn check_changes_tries_proof( - &self, - cht_size: NumberFor, - remote_roots: &BTreeMap, B::Hash>, - remote_roots_proof: StorageProof, - ) -> ClientResult<()> - where - H: Hasher, - H::Out: Ord + codec::Codec, - { - // all the checks are sharing the same storage - let storage = remote_roots_proof.into_memory_db(); - - // remote_roots.keys() are sorted => we can use this to group changes tries roots - // that are belongs to the same CHT - let blocks = remote_roots.keys().cloned(); - cht::for_each_cht_group::(cht_size, blocks, |mut storage, _, cht_blocks| { - // get local changes trie CHT root for given CHT - // it should be there, because it is never pruned AND request has been composed - // when required header has been pruned (=> replaced with CHT) - let first_block = cht_blocks.first().cloned() - .expect("for_each_cht_group never calls callback with empty groups"); - let local_cht_root = self.blockchain.storage().changes_trie_cht_root(cht_size, first_block)? - .ok_or(ClientError::InvalidCHTProof)?; - - // check changes trie root for every block within CHT range - for block in cht_blocks { - // check if the proofs storage contains the root - // normally this happens in when the proving backend is created, but since - // we share the storage for multiple checks, do it here - let mut cht_root = H::Out::default(); - cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); - if !storage.contains(&cht_root, EMPTY_PREFIX) { - return Err(ClientError::InvalidCHTProof.into()); - } - - // check proof for single changes trie root - let proving_backend = TrieBackend::new(storage, cht_root); - let remote_changes_trie_root = remote_roots[&block]; - cht::check_proof_on_proving_backend::( - local_cht_root, - block, - remote_changes_trie_root, - &proving_backend, - )?; - - // and return the storage to use in following checks - storage = proving_backend.into_storage(); - } - - Ok(storage) - }, storage) - } + } + + // if request has been composed when some required headers were already pruned + // => remote node has sent us CHT-based proof of required changes tries roots + // => check that this proof is correct before proceeding with changes proof + let remote_max_block = remote_proof.max_block; + let remote_roots = remote_proof.roots; + let remote_roots_proof = remote_proof.roots_proof; + let remote_proof = remote_proof.proof; + if !remote_roots.is_empty() { + self.check_changes_tries_proof(cht_size, &remote_roots, remote_roots_proof)?; + } + + // and now check the key changes proof + get the changes + let mut result = Vec::new(); + let proof_storage = InMemoryChangesTrieStorage::with_proof(remote_proof); + for config_range in &request.changes_trie_configs { + let result_range = key_changes_proof_check_with_db::( + ChangesTrieConfigurationRange { + config: config_range + .config + .as_ref() + .ok_or(ClientError::ChangesTriesNotSupported)?, + zero: config_range.zero.0, + end: config_range.end.map(|(n, _)| n), + }, + &RootsStorage { + roots: (request.tries_roots.0, &request.tries_roots.2), + prev_roots: &remote_roots, + }, + &proof_storage, + request.first_block.0, + &ChangesTrieAnchorBlockId { + hash: convert_hash(&request.last_block.1), + number: request.last_block.0, + }, + remote_max_block, + request.storage_key.as_ref().map(Vec::as_slice), + &request.key, + ) + .map_err(|err| ClientError::ChangesTrieAccessFailed(err))?; + result.extend(result_range); + } + + Ok(result) + } + + /// Check CHT-based proof for changes tries roots. + fn check_changes_tries_proof( + &self, + cht_size: NumberFor, + remote_roots: &BTreeMap, B::Hash>, + remote_roots_proof: StorageProof, + ) -> ClientResult<()> + where + H: Hasher, + H::Out: Ord + codec::Codec, + { + // all the checks are sharing the same storage + let storage = remote_roots_proof.into_memory_db(); + + // remote_roots.keys() are sorted => we can use this to group changes tries roots + // that are belongs to the same CHT + let blocks = remote_roots.keys().cloned(); + cht::for_each_cht_group::( + cht_size, + blocks, + |mut storage, _, cht_blocks| { + // get local changes trie CHT root for given CHT + // it should be there, because it is never pruned AND request has been composed + // when required header has been pruned (=> replaced with CHT) + let first_block = cht_blocks + .first() + .cloned() + .expect("for_each_cht_group never calls callback with empty groups"); + let local_cht_root = self + .blockchain + .storage() + .changes_trie_cht_root(cht_size, first_block)? + .ok_or(ClientError::InvalidCHTProof)?; + + // check changes trie root for every block within CHT range + for block in cht_blocks { + // check if the proofs storage contains the root + // normally this happens in when the proving backend is created, but since + // we share the storage for multiple checks, do it here + let mut cht_root = H::Out::default(); + cht_root.as_mut().copy_from_slice(local_cht_root.as_ref()); + if !storage.contains(&cht_root, EMPTY_PREFIX) { + return Err(ClientError::InvalidCHTProof.into()); + } + + // check proof for single changes trie root + let proving_backend = TrieBackend::new(storage, cht_root); + let remote_changes_trie_root = remote_roots[&block]; + cht::check_proof_on_proving_backend::( + local_cht_root, + block, + remote_changes_trie_root, + &proving_backend, + )?; + + // and return the storage to use in following checks + storage = proving_backend.into_storage(); + } + + Ok(storage) + }, + storage, + ) + } } impl FetchChecker for LightDataChecker - where - Block: BlockT, - E: CodeExecutor + Clone + 'static, - H: Hasher, - H::Out: Ord + codec::Codec + 'static, - S: BlockchainStorage, +where + Block: BlockT, + E: CodeExecutor + Clone + 'static, + H: Hasher, + H::Out: Ord + codec::Codec + 'static, + S: BlockchainStorage, { - fn check_header_proof( - &self, - request: &RemoteHeaderRequest, - remote_header: Option, - remote_proof: StorageProof, - ) -> ClientResult { - let remote_header = remote_header.ok_or_else(|| - ClientError::from(ClientError::InvalidCHTProof))?; - let remote_header_hash = remote_header.hash(); - cht::check_proof::( - request.cht_root, - request.block, - remote_header_hash, - remote_proof, - ).map(|_| remote_header) - } - - fn check_read_proof( - &self, - request: &RemoteReadRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>> { - read_proof_check::( - convert_hash(request.header.state_root()), - remote_proof, - request.keys.iter(), - ).map_err(Into::into) - } - - fn check_read_child_proof( - &self, - request: &RemoteReadChildRequest, - remote_proof: StorageProof, - ) -> ClientResult, Option>>> { - read_child_proof_check::( - convert_hash(request.header.state_root()), - remote_proof, - &request.storage_key, - request.keys.iter(), - ).map_err(Into::into) - } - - fn check_execution_proof( - &self, - request: &RemoteCallRequest, - remote_proof: StorageProof, - ) -> ClientResult> { - check_execution_proof::<_, _, H>( - &self.executor, - self.spawn_handle.clone(), - request, - remote_proof, - ) - } - - fn check_changes_proof( - &self, - request: &RemoteChangesRequest, - remote_proof: ChangesProof - ) -> ClientResult, u32)>> { - self.check_changes_proof_with_cht_size(request, remote_proof, cht::size()) - } - - fn check_body_proof( - &self, - request: &RemoteBodyRequest, - body: Vec - ) -> ClientResult> { - // TODO: #2621 - let extrinsics_root = HashFor::::ordered_trie_root( - body.iter().map(Encode::encode).collect(), - ); - if *request.header.extrinsics_root() == extrinsics_root { - Ok(body) - } else { - Err(format!("RemoteBodyRequest: invalid extrinsics root expected: {} but got {}", - *request.header.extrinsics_root(), - extrinsics_root, - ).into()) - } - - } + fn check_header_proof( + &self, + request: &RemoteHeaderRequest, + remote_header: Option, + remote_proof: StorageProof, + ) -> ClientResult { + let remote_header = + remote_header.ok_or_else(|| ClientError::from(ClientError::InvalidCHTProof))?; + let remote_header_hash = remote_header.hash(); + cht::check_proof::( + request.cht_root, + request.block, + remote_header_hash, + remote_proof, + ) + .map(|_| remote_header) + } + + fn check_read_proof( + &self, + request: &RemoteReadRequest, + remote_proof: StorageProof, + ) -> ClientResult, Option>>> { + read_proof_check::( + convert_hash(request.header.state_root()), + remote_proof, + request.keys.iter(), + ) + .map_err(Into::into) + } + + fn check_read_child_proof( + &self, + request: &RemoteReadChildRequest, + remote_proof: StorageProof, + ) -> ClientResult, Option>>> { + read_child_proof_check::( + convert_hash(request.header.state_root()), + remote_proof, + &request.storage_key, + request.keys.iter(), + ) + .map_err(Into::into) + } + + fn check_execution_proof( + &self, + request: &RemoteCallRequest, + remote_proof: StorageProof, + ) -> ClientResult> { + check_execution_proof::<_, _, H>( + &self.executor, + self.spawn_handle.clone(), + request, + remote_proof, + ) + } + + fn check_changes_proof( + &self, + request: &RemoteChangesRequest, + remote_proof: ChangesProof, + ) -> ClientResult, u32)>> { + self.check_changes_proof_with_cht_size(request, remote_proof, cht::size()) + } + + fn check_body_proof( + &self, + request: &RemoteBodyRequest, + body: Vec, + ) -> ClientResult> { + // TODO: #2621 + let extrinsics_root = + HashFor::::ordered_trie_root(body.iter().map(Encode::encode).collect()); + if *request.header.extrinsics_root() == extrinsics_root { + Ok(body) + } else { + Err(format!( + "RemoteBodyRequest: invalid extrinsics root expected: {} but got {}", + *request.header.extrinsics_root(), + extrinsics_root, + ) + .into()) + } + } } /// A view of BTreeMap as a changes trie roots storage. struct RootsStorage<'a, Number: AtLeast32Bit, Hash: 'a> { - roots: (Number, &'a [Hash]), - prev_roots: &'a BTreeMap, + roots: (Number, &'a [Hash]), + prev_roots: &'a BTreeMap, } impl<'a, H, Number, Hash> ChangesTrieRootsStorage for RootsStorage<'a, Number, Hash> - where - H: Hasher, - Number: std::fmt::Display + std::hash::Hash + Clone + AtLeast32Bit + Encode + Decode + Send + Sync + 'static, - Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, +where + H: Hasher, + Number: std::fmt::Display + + std::hash::Hash + + Clone + + AtLeast32Bit + + Encode + + Decode + + Send + + Sync + + 'static, + Hash: 'a + Send + Sync + Clone + AsRef<[u8]>, { - fn build_anchor( - &self, - _hash: H::Out, - ) -> Result, String> { - Err("build_anchor is only called when building block".into()) - } - - fn root( - &self, - _anchor: &ChangesTrieAnchorBlockId, - block: Number, - ) -> Result, String> { - // we can't ask for roots from parallel forks here => ignore anchor - let root = if block < self.roots.0 { - self.prev_roots.get(&Number::unique_saturated_from(block)).cloned() - } else { - let index: Option = block.checked_sub(&self.roots.0).and_then(|index| index.checked_into()); - match index { - Some(index) => self.roots.1.get(index as usize).cloned(), - None => None, - } - }; - - Ok(root.map(|root| { - let mut hasher_root: H::Out = Default::default(); - hasher_root.as_mut().copy_from_slice(root.as_ref()); - hasher_root - })) - } + fn build_anchor( + &self, + _hash: H::Out, + ) -> Result, String> { + Err("build_anchor is only called when building block".into()) + } + + fn root( + &self, + _anchor: &ChangesTrieAnchorBlockId, + block: Number, + ) -> Result, String> { + // we can't ask for roots from parallel forks here => ignore anchor + let root = if block < self.roots.0 { + self.prev_roots + .get(&Number::unique_saturated_from(block)) + .cloned() + } else { + let index: Option = block + .checked_sub(&self.roots.0) + .and_then(|index| index.checked_into()); + match index { + Some(index) => self.roots.1.get(index as usize).cloned(), + None => None, + } + }; + + Ok(root.map(|root| { + let mut hasher_root: H::Out = Default::default(); + hasher_root.as_mut().copy_from_slice(root.as_ref()); + hasher_root + })) + } } #[cfg(test)] pub mod tests { - use codec::Decode; - use crate::client::tests::prepare_client_with_key_changes; - use sc_executor::{NativeExecutor, WasmExecutionMethod}; - use sp_blockchain::Error as ClientError; - use sc_client_api::backend::NewBlockState; - use substrate_test_runtime_client::{ - blockchain::HeaderBackend, AccountKeyring, ClientBlockImportExt, - runtime::{self, Hash, Block, Header, Extrinsic}, - tasks_executor, - }; - use sp_consensus::BlockOrigin; - - use crate::in_mem::Blockchain as InMemoryBlockchain; - use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; - use crate::light::blockchain::tests::{DummyStorage, DummyBlockchain}; - use sp_core::{blake2_256, ChangesTrieConfiguration, H256}; - use sp_core::storage::{well_known_keys, StorageKey, ChildInfo}; - use sp_runtime::{generic::BlockId, traits::BlakeTwo256}; - use sp_state_machine::Backend; - use super::*; - use sc_client_api::{StorageProvider, ProofProvider}; - use sc_block_builder::BlockBuilderProvider; - - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - - type TestChecker = LightDataChecker< - NativeExecutor, - BlakeTwo256, - Block, - DummyStorage, - >; - - fn local_executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) - } - - fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { - // prepare remote client - let remote_client = substrate_test_runtime_client::new(); - let remote_block_id = BlockId::Number(0); - let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); - let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); - - // 'fetch' read proof from remote node - let heap_pages = remote_client.storage(&remote_block_id, &StorageKey(well_known_keys::HEAP_PAGES.to_vec())) - .unwrap() - .and_then(|v| Decode::decode(&mut &v.0[..]).ok()).unwrap(); - let remote_read_proof = remote_client.read_proof( - &remote_block_id, - &mut std::iter::once(well_known_keys::HEAP_PAGES), - ).unwrap(); - - // check remote read proof locally - let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - NewBlockState::Final, - ).unwrap(); - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - (local_checker, remote_block_header, remote_read_proof, heap_pages) - } - - fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { - use substrate_test_runtime_client::DefaultTestClientBuilderExt; - use substrate_test_runtime_client::TestClientBuilderExt; - // prepare remote client - let remote_client = substrate_test_runtime_client::TestClientBuilder::new() - .add_extra_child_storage( - b":child_storage:default:child1".to_vec(), - CHILD_INFO_1, - b"key1".to_vec(), - b"value1".to_vec(), - ).build(); - let remote_block_id = BlockId::Number(0); - let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); - let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); - remote_block_header.state_root = remote_client.state_at(&remote_block_id).unwrap() - .storage_root(::std::iter::empty()).0.into(); - - // 'fetch' child read proof from remote node - let child_value = remote_client.child_storage( - &remote_block_id, - &StorageKey(b":child_storage:default:child1".to_vec()), - CHILD_INFO_1, - &StorageKey(b"key1".to_vec()), - ).unwrap().unwrap().0; - assert_eq!(b"value1"[..], child_value[..]); - let remote_read_proof = remote_client.read_child_proof( - &remote_block_id, - b":child_storage:default:child1", - CHILD_INFO_1, - &mut std::iter::once("key1".as_bytes()), - ).unwrap(); - - // check locally - let local_storage = InMemoryBlockchain::::new(); - local_storage.insert( - remote_block_hash, - remote_block_header.clone(), - None, - None, - NewBlockState::Final, - ).unwrap(); - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - (local_checker, remote_block_header, remote_read_proof, child_value) - } - - fn prepare_for_header_proof_check(insert_cht: bool) -> (TestChecker, Hash, Header, StorageProof) { - // prepare remote client - let mut remote_client = substrate_test_runtime_client::new(); - let mut local_headers_hashes = Vec::new(); - for i in 0..4 { - let block = remote_client.new_block(Default::default()).unwrap().build().unwrap().block; - remote_client.import(BlockOrigin::Own, block).unwrap(); - local_headers_hashes.push( - remote_client.block_hash(i + 1) - .map_err(|_| ClientError::Backend("TestError".into())) - ); - } - - // 'fetch' header proof from remote node - let remote_block_id = BlockId::Number(1); - let (remote_block_header, remote_header_proof) = remote_client.header_proof_with_cht_size(&remote_block_id, 4).unwrap(); - - // check remote read proof locally - let local_storage = InMemoryBlockchain::::new(); - let local_cht_root = cht::compute_root::(4, 0, local_headers_hashes).unwrap(); - if insert_cht { - local_storage.insert_cht_root(1, local_cht_root); - } - let local_checker = LightDataChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - (local_checker, local_cht_root, remote_block_header, remote_header_proof) - } - - fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { - use sp_trie::{TrieConfiguration, trie_types::Layout}; - let iter = extrinsics.iter().map(Encode::encode); - let extrinsics_root = Layout::::ordered_trie_root(iter); - - // only care about `extrinsics_root` - Header::new(0, extrinsics_root, H256::zero(), H256::zero(), Default::default()) - } - - #[test] - fn storage_read_proof_is_generated_and_checked() { - let (local_checker, remote_block_header, remote_read_proof, heap_pages) = prepare_for_read_proof_check(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_proof(&RemoteReadRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - keys: vec![well_known_keys::HEAP_PAGES.to_vec()], - retry_count: None, - }, remote_read_proof).unwrap().remove(well_known_keys::HEAP_PAGES).unwrap().unwrap()[0], heap_pages as u8); - } - - #[test] - fn storage_child_read_proof_is_generated_and_checked() { - let ( - local_checker, - remote_block_header, - remote_read_proof, - result, - ) = prepare_for_read_child_proof_check(); - let child_infos = CHILD_INFO_1.info(); - assert_eq!((&local_checker as &dyn FetchChecker).check_read_child_proof( - &RemoteReadChildRequest::
{ - block: remote_block_header.hash(), - header: remote_block_header, - storage_key: b":child_storage:default:child1".to_vec(), - child_info: child_infos.0.to_vec(), - child_type: child_infos.1, - keys: vec![b"key1".to_vec()], - retry_count: None, - }, - remote_read_proof - ).unwrap().remove(b"key1".as_ref()).unwrap().unwrap(), result); - } - - #[test] - fn header_proof_is_generated_and_checked() { - let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - assert_eq!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).unwrap(), remote_block_header); - } - - #[test] - fn check_header_proof_fails_if_cht_root_is_invalid() { - let (local_checker, _, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: Default::default(), - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); - } - - #[test] - fn check_header_proof_fails_if_invalid_header_provided() { - let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = prepare_for_header_proof_check(true); - remote_block_header.number = 100; - assert!((&local_checker as &dyn FetchChecker).check_header_proof(&RemoteHeaderRequest::
{ - cht_root: local_cht_root, - block: 1, - retry_count: None, - }, Some(remote_block_header.clone()), remote_header_proof).is_err()); - } - - #[test] - fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { - let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - let local_checker = &local_checker as &dyn FetchChecker; - let max = remote_client.chain_info().best_number; - let max_hash = remote_client.chain_info().best_hash; - - for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { - let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); - let end_hash = remote_client.block_hash(end).unwrap().unwrap(); - - // 'fetch' changes proof from remote node - let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, None, &key - ).unwrap(); - - // check proof on local client - let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (begin, begin_hash), - last_block: (end, end_hash), - max_block: (max, max_hash), - tries_roots: (begin, begin_hash, local_roots_range), - key: key.0, - storage_key: None, - retry_count: None, - }; - let local_result = local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).unwrap(); - - // ..and ensure that result is the same as on remote node - match local_result == expected_result { - true => (), - false => panic!(format!("Failed test {}: local = {:?}, expected = {:?}", - index, local_result, expected_result)), - } - } - } - - #[test] - fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { - // we're testing this test case here: - // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let dave = StorageKey(dave); - - // 'fetch' changes proof from remote node: - // we're fetching changes for range b1..b4 - // we do not know changes trie roots before b3 (i.e. we only know b3+b4) - // but we have changes trie CHT root for b1...b4 - let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); - let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); - let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, None, &dave, 4 - ).unwrap(); - - // prepare local checker, having a root of changes trie CHT#0 - let local_cht_root = cht::compute_root::(4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); - let mut local_storage = DummyStorage::new(); - local_storage.changes_tries_cht_roots.insert(0, local_cht_root); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(local_storage)), - local_executor(), - tasks_executor(), - ); - - // check proof on local client - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (1, b1), - last_block: (4, b4), - max_block: (4, b4), - tries_roots: (3, b3, vec![remote_roots[2].clone(), remote_roots[3].clone()]), - storage_key: None, - key: dave.0, - retry_count: None, - }; - let local_result = local_checker.check_changes_proof_with_cht_size(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof, - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }, 4).unwrap(); - - assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); - } - - #[test] - fn check_changes_proof_fails_if_proof_is_wrong() { - let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - let local_checker = &local_checker as &dyn FetchChecker; - let max = remote_client.chain_info().best_number; - let max_hash = remote_client.chain_info().best_hash; - - let (begin, end, key, _) = test_cases[0].clone(); - let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); - let end_hash = remote_client.block_hash(end).unwrap().unwrap(); - - // 'fetch' changes proof from remote node - let key = StorageKey(key); - let remote_proof = remote_client.key_changes_proof( - begin_hash, end_hash, begin_hash, max_hash, None, &key).unwrap(); - - let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); - let config = ChangesTrieConfiguration::new(4, 2); - let request = RemoteChangesRequest::
{ - changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { - zero: (0, Default::default()), - end: None, - config: Some(config), - }], - first_block: (begin, begin_hash), - last_block: (end, end_hash), - max_block: (max, max_hash), - tries_roots: (begin, begin_hash, local_roots_range.clone()), - storage_key: None, - key: key.0, - retry_count: None, - }; - - // check proof on local client using max from the future - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block + 1, - proof: remote_proof.proof.clone(), - roots: remote_proof.roots.clone(), - roots_proof: remote_proof.roots_proof.clone(), - }).is_err()); - - // check proof on local client using broken proof - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: local_roots_range.clone().into_iter().map(|v| v.as_ref().to_vec()).collect(), - roots: remote_proof.roots, - roots_proof: remote_proof.roots_proof, - }).is_err()); - - // extra roots proofs are provided - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(begin - 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - }).is_err()); - assert!(local_checker.check_changes_proof(&request, ChangesProof { - max_block: remote_proof.max_block, - proof: remote_proof.proof.clone(), - roots: vec![(end + 1, Default::default())].into_iter().collect(), - roots_proof: StorageProof::empty(), - }).is_err()); - } - - #[test] - fn check_changes_tries_proof_fails_if_proof_is_wrong() { - // we're testing this test case here: - // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), - let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); - let local_cht_root = cht::compute_root::( - 4, 0, remote_roots.iter().cloned().map(|ct| Ok(Some(ct)))).unwrap(); - let dave = blake2_256(&runtime::system::balance_of_key(AccountKeyring::Dave.into())).to_vec(); - let dave = StorageKey(dave); - - // 'fetch' changes proof from remote node: - // we're fetching changes for range b1..b4 - // we do not know changes trie roots before b3 (i.e. we only know b3+b4) - // but we have changes trie CHT root for b1...b4 - let b1 = remote_client.block_hash_from_id(&BlockId::Number(1)).unwrap().unwrap(); - let b3 = remote_client.block_hash_from_id(&BlockId::Number(3)).unwrap().unwrap(); - let b4 = remote_client.block_hash_from_id(&BlockId::Number(4)).unwrap().unwrap(); - let remote_proof = remote_client.key_changes_proof_with_cht_size( - b1, b4, b3, b4, None, &dave, 4 - ).unwrap(); - - // fails when changes trie CHT is missing from the local db - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - assert!(local_checker.check_changes_tries_proof(4, &remote_proof.roots, - remote_proof.roots_proof.clone()).is_err()); - - // fails when proof is broken - let mut local_storage = DummyStorage::new(); - local_storage.changes_tries_cht_roots.insert(0, local_cht_root); - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(local_storage)), - local_executor(), - tasks_executor(), - ); - let result = local_checker.check_changes_tries_proof( - 4, &remote_proof.roots, StorageProof::empty() - ); - assert!(result.is_err()); - } - - #[test] - fn check_body_proof_faulty() { - let header = header_with_computed_extrinsics_root( - vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])] - ); - let block = Block::new(header.clone(), Vec::new()); - - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - - let body_request = RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }; - - assert!( - local_checker.check_body_proof(&body_request, block.extrinsics).is_err(), - "vec![1, 2, 3, 4] != vec![]" - ); - } - - #[test] - fn check_body_proof_of_same_data_should_succeed() { - let extrinsics = vec![Extrinsic::IncludeData(vec![1, 2, 3, 4, 5, 6, 7, 8, 255])]; - - let header = header_with_computed_extrinsics_root(extrinsics.clone()); - let block = Block::new(header.clone(), extrinsics); - - let local_checker = TestChecker::new( - Arc::new(DummyBlockchain::new(DummyStorage::new())), - local_executor(), - tasks_executor(), - ); - - let body_request = RemoteBodyRequest { - header: header.clone(), - retry_count: None, - }; - - assert!(local_checker.check_body_proof(&body_request, block.extrinsics).is_ok()); - } + use crate::client::tests::prepare_client_with_key_changes; + use codec::Decode; + use sc_client_api::backend::NewBlockState; + use sc_executor::{NativeExecutor, WasmExecutionMethod}; + use sp_blockchain::Error as ClientError; + use sp_consensus::BlockOrigin; + use substrate_test_runtime_client::{ + blockchain::HeaderBackend, + runtime::{self, Block, Extrinsic, Hash, Header}, + tasks_executor, AccountKeyring, ClientBlockImportExt, + }; + + use super::*; + use crate::in_mem::Blockchain as InMemoryBlockchain; + use crate::light::blockchain::tests::{DummyBlockchain, DummyStorage}; + use crate::light::fetcher::{FetchChecker, LightDataChecker, RemoteHeaderRequest}; + use sc_block_builder::BlockBuilderProvider; + use sc_client_api::{ProofProvider, StorageProvider}; + use sp_core::storage::{well_known_keys, ChildInfo, StorageKey}; + use sp_core::{blake2_256, ChangesTrieConfiguration, H256}; + use sp_runtime::{generic::BlockId, traits::BlakeTwo256}; + use sp_state_machine::Backend; + + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + + type TestChecker = LightDataChecker< + NativeExecutor, + BlakeTwo256, + Block, + DummyStorage, + >; + + fn local_executor() -> NativeExecutor { + NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) + } + + fn prepare_for_read_proof_check() -> (TestChecker, Header, StorageProof, u32) { + // prepare remote client + let remote_client = substrate_test_runtime_client::new(); + let remote_block_id = BlockId::Number(0); + let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); + let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0 + .into(); + + // 'fetch' read proof from remote node + let heap_pages = remote_client + .storage( + &remote_block_id, + &StorageKey(well_known_keys::HEAP_PAGES.to_vec()), + ) + .unwrap() + .and_then(|v| Decode::decode(&mut &v.0[..]).ok()) + .unwrap(); + let remote_read_proof = remote_client + .read_proof( + &remote_block_id, + &mut std::iter::once(well_known_keys::HEAP_PAGES), + ) + .unwrap(); + + // check remote read proof locally + let local_storage = InMemoryBlockchain::::new(); + local_storage + .insert( + remote_block_hash, + remote_block_header.clone(), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + ( + local_checker, + remote_block_header, + remote_read_proof, + heap_pages, + ) + } + + fn prepare_for_read_child_proof_check() -> (TestChecker, Header, StorageProof, Vec) { + use substrate_test_runtime_client::DefaultTestClientBuilderExt; + use substrate_test_runtime_client::TestClientBuilderExt; + // prepare remote client + let remote_client = substrate_test_runtime_client::TestClientBuilder::new() + .add_extra_child_storage( + b":child_storage:default:child1".to_vec(), + CHILD_INFO_1, + b"key1".to_vec(), + b"value1".to_vec(), + ) + .build(); + let remote_block_id = BlockId::Number(0); + let remote_block_hash = remote_client.block_hash(0).unwrap().unwrap(); + let mut remote_block_header = remote_client.header(&remote_block_id).unwrap().unwrap(); + remote_block_header.state_root = remote_client + .state_at(&remote_block_id) + .unwrap() + .storage_root(::std::iter::empty()) + .0 + .into(); + + // 'fetch' child read proof from remote node + let child_value = remote_client + .child_storage( + &remote_block_id, + &StorageKey(b":child_storage:default:child1".to_vec()), + CHILD_INFO_1, + &StorageKey(b"key1".to_vec()), + ) + .unwrap() + .unwrap() + .0; + assert_eq!(b"value1"[..], child_value[..]); + let remote_read_proof = remote_client + .read_child_proof( + &remote_block_id, + b":child_storage:default:child1", + CHILD_INFO_1, + &mut std::iter::once("key1".as_bytes()), + ) + .unwrap(); + + // check locally + let local_storage = InMemoryBlockchain::::new(); + local_storage + .insert( + remote_block_hash, + remote_block_header.clone(), + None, + None, + NewBlockState::Final, + ) + .unwrap(); + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + ( + local_checker, + remote_block_header, + remote_read_proof, + child_value, + ) + } + + fn prepare_for_header_proof_check( + insert_cht: bool, + ) -> (TestChecker, Hash, Header, StorageProof) { + // prepare remote client + let mut remote_client = substrate_test_runtime_client::new(); + let mut local_headers_hashes = Vec::new(); + for i in 0..4 { + let block = remote_client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + remote_client.import(BlockOrigin::Own, block).unwrap(); + local_headers_hashes.push( + remote_client + .block_hash(i + 1) + .map_err(|_| ClientError::Backend("TestError".into())), + ); + } + + // 'fetch' header proof from remote node + let remote_block_id = BlockId::Number(1); + let (remote_block_header, remote_header_proof) = remote_client + .header_proof_with_cht_size(&remote_block_id, 4) + .unwrap(); + + // check remote read proof locally + let local_storage = InMemoryBlockchain::::new(); + let local_cht_root = + cht::compute_root::(4, 0, local_headers_hashes).unwrap(); + if insert_cht { + local_storage.insert_cht_root(1, local_cht_root); + } + let local_checker = LightDataChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + ( + local_checker, + local_cht_root, + remote_block_header, + remote_header_proof, + ) + } + + fn header_with_computed_extrinsics_root(extrinsics: Vec) -> Header { + use sp_trie::{trie_types::Layout, TrieConfiguration}; + let iter = extrinsics.iter().map(Encode::encode); + let extrinsics_root = Layout::::ordered_trie_root(iter); + + // only care about `extrinsics_root` + Header::new( + 0, + extrinsics_root, + H256::zero(), + H256::zero(), + Default::default(), + ) + } + + #[test] + fn storage_read_proof_is_generated_and_checked() { + let (local_checker, remote_block_header, remote_read_proof, heap_pages) = + prepare_for_read_proof_check(); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_proof( + &RemoteReadRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + keys: vec![well_known_keys::HEAP_PAGES.to_vec()], + retry_count: None, + }, + remote_read_proof + ) + .unwrap() + .remove(well_known_keys::HEAP_PAGES) + .unwrap() + .unwrap()[0], + heap_pages as u8 + ); + } + + #[test] + fn storage_child_read_proof_is_generated_and_checked() { + let (local_checker, remote_block_header, remote_read_proof, result) = + prepare_for_read_child_proof_check(); + let child_infos = CHILD_INFO_1.info(); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_read_child_proof( + &RemoteReadChildRequest::
{ + block: remote_block_header.hash(), + header: remote_block_header, + storage_key: b":child_storage:default:child1".to_vec(), + child_info: child_infos.0.to_vec(), + child_type: child_infos.1, + keys: vec![b"key1".to_vec()], + retry_count: None, + }, + remote_read_proof + ) + .unwrap() + .remove(b"key1".as_ref()) + .unwrap() + .unwrap(), + result + ); + } + + #[test] + fn header_proof_is_generated_and_checked() { + let (local_checker, local_cht_root, remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); + assert_eq!( + (&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .unwrap(), + remote_block_header + ); + } + + #[test] + fn check_header_proof_fails_if_cht_root_is_invalid() { + let (local_checker, _, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); + remote_block_header.number = 100; + assert!((&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: Default::default(), + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .is_err()); + } + + #[test] + fn check_header_proof_fails_if_invalid_header_provided() { + let (local_checker, local_cht_root, mut remote_block_header, remote_header_proof) = + prepare_for_header_proof_check(true); + remote_block_header.number = 100; + assert!((&local_checker as &dyn FetchChecker) + .check_header_proof( + &RemoteHeaderRequest::
{ + cht_root: local_cht_root, + block: 1, + retry_count: None, + }, + Some(remote_block_header.clone()), + remote_header_proof + ) + .is_err()); + } + + #[test] + fn changes_proof_is_generated_and_checked_when_headers_are_not_pruned() { + let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + let local_checker = &local_checker as &dyn FetchChecker; + let max = remote_client.chain_info().best_number; + let max_hash = remote_client.chain_info().best_hash; + + for (index, (begin, end, key, expected_result)) in test_cases.into_iter().enumerate() { + let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); + let end_hash = remote_client.block_hash(end).unwrap().unwrap(); + + // 'fetch' changes proof from remote node + let key = StorageKey(key); + let remote_proof = remote_client + .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, None, &key) + .unwrap(); + + // check proof on local client + let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (begin, begin_hash), + last_block: (end, end_hash), + max_block: (max, max_hash), + tries_roots: (begin, begin_hash, local_roots_range), + key: key.0, + storage_key: None, + retry_count: None, + }; + let local_result = local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, + ) + .unwrap(); + + // ..and ensure that result is the same as on remote node + match local_result == expected_result { + true => (), + false => panic!(format!( + "Failed test {}: local = {:?}, expected = {:?}", + index, local_result, expected_result + )), + } + } + } + + #[test] + fn changes_proof_is_generated_and_checked_when_headers_are_pruned() { + // we're testing this test case here: + // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); + let dave = blake2_256(&runtime::system::balance_of_key( + AccountKeyring::Dave.into(), + )) + .to_vec(); + let dave = StorageKey(dave); + + // 'fetch' changes proof from remote node: + // we're fetching changes for range b1..b4 + // we do not know changes trie roots before b3 (i.e. we only know b3+b4) + // but we have changes trie CHT root for b1...b4 + let b1 = remote_client + .block_hash_from_id(&BlockId::Number(1)) + .unwrap() + .unwrap(); + let b3 = remote_client + .block_hash_from_id(&BlockId::Number(3)) + .unwrap() + .unwrap(); + let b4 = remote_client + .block_hash_from_id(&BlockId::Number(4)) + .unwrap() + .unwrap(); + let remote_proof = remote_client + .key_changes_proof_with_cht_size(b1, b4, b3, b4, None, &dave, 4) + .unwrap(); + + // prepare local checker, having a root of changes trie CHT#0 + let local_cht_root = cht::compute_root::( + 4, + 0, + remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), + ) + .unwrap(); + let mut local_storage = DummyStorage::new(); + local_storage + .changes_tries_cht_roots + .insert(0, local_cht_root); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(local_storage)), + local_executor(), + tasks_executor(), + ); + + // check proof on local client + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (1, b1), + last_block: (4, b4), + max_block: (4, b4), + tries_roots: ( + 3, + b3, + vec![remote_roots[2].clone(), remote_roots[3].clone()], + ), + storage_key: None, + key: dave.0, + retry_count: None, + }; + let local_result = local_checker + .check_changes_proof_with_cht_size( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof, + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + }, + 4, + ) + .unwrap(); + + assert_eq!(local_result, vec![(4, 0), (1, 1), (1, 0)]); + } + + #[test] + fn check_changes_proof_fails_if_proof_is_wrong() { + let (remote_client, local_roots, test_cases) = prepare_client_with_key_changes(); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + let local_checker = &local_checker as &dyn FetchChecker; + let max = remote_client.chain_info().best_number; + let max_hash = remote_client.chain_info().best_hash; + + let (begin, end, key, _) = test_cases[0].clone(); + let begin_hash = remote_client.block_hash(begin).unwrap().unwrap(); + let end_hash = remote_client.block_hash(end).unwrap().unwrap(); + + // 'fetch' changes proof from remote node + let key = StorageKey(key); + let remote_proof = remote_client + .key_changes_proof(begin_hash, end_hash, begin_hash, max_hash, None, &key) + .unwrap(); + + let local_roots_range = local_roots.clone()[(begin - 1) as usize..].to_vec(); + let config = ChangesTrieConfiguration::new(4, 2); + let request = RemoteChangesRequest::
{ + changes_trie_configs: vec![sp_core::ChangesTrieConfigurationRange { + zero: (0, Default::default()), + end: None, + config: Some(config), + }], + first_block: (begin, begin_hash), + last_block: (end, end_hash), + max_block: (max, max_hash), + tries_roots: (begin, begin_hash, local_roots_range.clone()), + storage_key: None, + key: key.0, + retry_count: None, + }; + + // check proof on local client using max from the future + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block + 1, + proof: remote_proof.proof.clone(), + roots: remote_proof.roots.clone(), + roots_proof: remote_proof.roots_proof.clone(), + } + ) + .is_err()); + + // check proof on local client using broken proof + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: local_roots_range + .clone() + .into_iter() + .map(|v| v.as_ref().to_vec()) + .collect(), + roots: remote_proof.roots, + roots_proof: remote_proof.roots_proof, + } + ) + .is_err()); + + // extra roots proofs are provided + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(begin - 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + } + ) + .is_err()); + assert!(local_checker + .check_changes_proof( + &request, + ChangesProof { + max_block: remote_proof.max_block, + proof: remote_proof.proof.clone(), + roots: vec![(end + 1, Default::default())].into_iter().collect(), + roots_proof: StorageProof::empty(), + } + ) + .is_err()); + } + + #[test] + fn check_changes_tries_proof_fails_if_proof_is_wrong() { + // we're testing this test case here: + // (1, 4, dave.clone(), vec![(4, 0), (1, 1), (1, 0)]), + let (remote_client, remote_roots, _) = prepare_client_with_key_changes(); + let local_cht_root = cht::compute_root::( + 4, + 0, + remote_roots.iter().cloned().map(|ct| Ok(Some(ct))), + ) + .unwrap(); + let dave = blake2_256(&runtime::system::balance_of_key( + AccountKeyring::Dave.into(), + )) + .to_vec(); + let dave = StorageKey(dave); + + // 'fetch' changes proof from remote node: + // we're fetching changes for range b1..b4 + // we do not know changes trie roots before b3 (i.e. we only know b3+b4) + // but we have changes trie CHT root for b1...b4 + let b1 = remote_client + .block_hash_from_id(&BlockId::Number(1)) + .unwrap() + .unwrap(); + let b3 = remote_client + .block_hash_from_id(&BlockId::Number(3)) + .unwrap() + .unwrap(); + let b4 = remote_client + .block_hash_from_id(&BlockId::Number(4)) + .unwrap() + .unwrap(); + let remote_proof = remote_client + .key_changes_proof_with_cht_size(b1, b4, b3, b4, None, &dave, 4) + .unwrap(); + + // fails when changes trie CHT is missing from the local db + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + assert!(local_checker + .check_changes_tries_proof(4, &remote_proof.roots, remote_proof.roots_proof.clone()) + .is_err()); + + // fails when proof is broken + let mut local_storage = DummyStorage::new(); + local_storage + .changes_tries_cht_roots + .insert(0, local_cht_root); + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(local_storage)), + local_executor(), + tasks_executor(), + ); + let result = + local_checker.check_changes_tries_proof(4, &remote_proof.roots, StorageProof::empty()); + assert!(result.is_err()); + } + + #[test] + fn check_body_proof_faulty() { + let header = + header_with_computed_extrinsics_root(vec![Extrinsic::IncludeData(vec![1, 2, 3, 4])]); + let block = Block::new(header.clone(), Vec::new()); + + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + + let body_request = RemoteBodyRequest { + header: header.clone(), + retry_count: None, + }; + + assert!( + local_checker + .check_body_proof(&body_request, block.extrinsics) + .is_err(), + "vec![1, 2, 3, 4] != vec![]" + ); + } + + #[test] + fn check_body_proof_of_same_data_should_succeed() { + let extrinsics = vec![Extrinsic::IncludeData(vec![1, 2, 3, 4, 5, 6, 7, 8, 255])]; + + let header = header_with_computed_extrinsics_root(extrinsics.clone()); + let block = Block::new(header.clone(), extrinsics); + + let local_checker = TestChecker::new( + Arc::new(DummyBlockchain::new(DummyStorage::new())), + local_executor(), + tasks_executor(), + ); + + let body_request = RemoteBodyRequest { + header: header.clone(), + retry_count: None, + }; + + assert!(local_checker + .check_body_proof(&body_request, block.extrinsics) + .is_ok()); + } } diff --git a/client/src/light/mod.rs b/client/src/light/mod.rs index 2bb6c85376..3cdce06e52 100644 --- a/client/src/light/mod.rs +++ b/client/src/light/mod.rs @@ -23,81 +23,77 @@ pub mod fetcher; use std::sync::Arc; +use prometheus_endpoint::Registry; use sc_executor::RuntimeInfo; +use sp_blockchain::Result as ClientResult; use sp_core::traits::CodeExecutor; -use sp_runtime::BuildStorage; use sp_runtime::traits::{Block as BlockT, HashFor}; -use sp_blockchain::Result as ClientResult; -use prometheus_endpoint::Registry; +use sp_runtime::BuildStorage; use crate::call_executor::LocalCallExecutor; use crate::client::Client; -use sc_client_api::{ - light::Storage as BlockchainStorage, CloneableSpawn, -}; use crate::light::backend::Backend; use crate::light::blockchain::Blockchain; use crate::light::call_executor::GenesisCallExecutor; use crate::light::fetcher::LightDataChecker; +use sc_client_api::{light::Storage as BlockchainStorage, CloneableSpawn}; /// Create an instance of light client blockchain backend. pub fn new_light_blockchain>(storage: S) -> Arc> { - Arc::new(Blockchain::new(storage)) + Arc::new(Blockchain::new(storage)) } /// Create an instance of light client backend. pub fn new_light_backend(blockchain: Arc>) -> Arc>> - where - B: BlockT, - S: BlockchainStorage, +where + B: BlockT, + S: BlockchainStorage, { - Arc::new(Backend::new(blockchain)) + Arc::new(Backend::new(blockchain)) } /// Create an instance of light client. pub fn new_light( - backend: Arc>>, - genesis_storage: &dyn BuildStorage, - code_executor: E, - spawn_handle: Box, - prometheus_registry: Option, + backend: Arc>>, + genesis_storage: &dyn BuildStorage, + code_executor: E, + spawn_handle: Box, + prometheus_registry: Option, ) -> ClientResult< - Client< - Backend>, - GenesisCallExecutor< - Backend>, - LocalCallExecutor>, E> - >, - B, - RA - > - > - where - B: BlockT, - S: BlockchainStorage + 'static, - E: CodeExecutor + RuntimeInfo + Clone + 'static, + Client< + Backend>, + GenesisCallExecutor>, LocalCallExecutor>, E>>, + B, + RA, + >, +> +where + B: BlockT, + S: BlockchainStorage + 'static, + E: CodeExecutor + RuntimeInfo + Clone + 'static, { - let local_executor = LocalCallExecutor::new(backend.clone(), code_executor, spawn_handle.clone()); - let executor = GenesisCallExecutor::new(backend.clone(), local_executor); - Client::new( - backend, - executor, - genesis_storage, - Default::default(), - Default::default(), - Default::default(), - prometheus_registry, - ) + let local_executor = + LocalCallExecutor::new(backend.clone(), code_executor, spawn_handle.clone()); + let executor = GenesisCallExecutor::new(backend.clone(), local_executor); + Client::new( + backend, + executor, + genesis_storage, + Default::default(), + Default::default(), + Default::default(), + prometheus_registry, + ) } /// Create an instance of fetch data checker. pub fn new_fetch_checker>( - blockchain: Arc>, - executor: E, - spawn_handle: Box, + blockchain: Arc>, + executor: E, + spawn_handle: Box, ) -> LightDataChecker, B, S> - where - E: CodeExecutor, +where + E: CodeExecutor, { - LightDataChecker::new(blockchain, executor, spawn_handle) + LightDataChecker::new(blockchain, executor, spawn_handle) } diff --git a/client/state-db/src/lib.rs b/client/state-db/src/lib.rs index 49b1a59285..d5cc2e384a 100644 --- a/client/state-db/src/lib.rs +++ b/client/state-db/src/lib.rs @@ -34,15 +34,15 @@ mod pruning; #[cfg(test)] mod test; -use std::fmt; -use parking_lot::RwLock; use codec::Codec; -use std::collections::{HashMap, hash_map::Entry}; +use log::trace; use noncanonical::NonCanonicalOverlay; +use parity_util_mem::{malloc_size, MallocSizeOf}; +use parking_lot::RwLock; use pruning::RefWindow; -use log::trace; -use parity_util_mem::{MallocSizeOf, malloc_size}; -use sc_client_api::{StateDbMemoryInfo, MemorySize}; +use sc_client_api::{MemorySize, StateDbMemoryInfo}; +use std::collections::{hash_map::Entry, HashMap}; +use std::fmt; const PRUNING_MODE: &[u8] = b"mode"; const PRUNING_MODE_ARCHIVE: &[u8] = b"archive"; @@ -53,604 +53,663 @@ const PRUNING_MODE_CONSTRAINED: &[u8] = b"constrained"; pub type DBValue = Vec; /// Basic set of requirements for the Block hash and node key types. -pub trait Hash: Send + Sync + Sized + Eq + PartialEq + Clone + Default + fmt::Debug + Codec + std::hash::Hash + 'static {} -impl Hash for T {} +pub trait Hash: + Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Codec + + std::hash::Hash + + 'static +{ +} +impl< + T: Send + + Sync + + Sized + + Eq + + PartialEq + + Clone + + Default + + fmt::Debug + + Codec + + std::hash::Hash + + 'static, + > Hash for T +{ +} /// Backend database trait. Read-only. pub trait MetaDb { - type Error: fmt::Debug; + type Error: fmt::Debug; - /// Get meta value, such as the journal. - fn get_meta(&self, key: &[u8]) -> Result, Self::Error>; + /// Get meta value, such as the journal. + fn get_meta(&self, key: &[u8]) -> Result, Self::Error>; } /// Backend database trait. Read-only. pub trait NodeDb { - type Key: ?Sized; - type Error: fmt::Debug; + type Key: ?Sized; + type Error: fmt::Debug; - /// Get state trie node. - fn get(&self, key: &Self::Key) -> Result, Self::Error>; + /// Get state trie node. + fn get(&self, key: &Self::Key) -> Result, Self::Error>; } /// Error type. pub enum Error { - /// Database backend error. - Db(E), - /// `Codec` decoding error. - Decoding(codec::Error), - /// Trying to canonicalize invalid block. - InvalidBlock, - /// Trying to insert block with invalid number. - InvalidBlockNumber, - /// Trying to insert block with unknown parent. - InvalidParent, - /// Invalid pruning mode specified. Contains expected mode. - InvalidPruningMode(String), + /// Database backend error. + Db(E), + /// `Codec` decoding error. + Decoding(codec::Error), + /// Trying to canonicalize invalid block. + InvalidBlock, + /// Trying to insert block with invalid number. + InvalidBlockNumber, + /// Trying to insert block with unknown parent. + InvalidParent, + /// Invalid pruning mode specified. Contains expected mode. + InvalidPruningMode(String), } /// Pinning error type. pub enum PinError { - /// Trying to pin invalid block. - InvalidBlock, + /// Trying to pin invalid block. + InvalidBlock, } impl From for Error { - fn from(x: codec::Error) -> Self { - Error::Decoding(x) - } + fn from(x: codec::Error) -> Self { + Error::Decoding(x) + } } impl fmt::Debug for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::Db(e) => e.fmt(f), - Error::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e.what()), - Error::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), - Error::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), - Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"), - Error::InvalidPruningMode(e) => write!(f, "Expected pruning mode: {}", e), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::Db(e) => e.fmt(f), + Error::Decoding(e) => write!(f, "Error decoding sliceable value: {}", e.what()), + Error::InvalidBlock => write!(f, "Trying to canonicalize invalid block"), + Error::InvalidBlockNumber => write!(f, "Trying to insert block with invalid number"), + Error::InvalidParent => write!(f, "Trying to insert block with unknown parent"), + Error::InvalidPruningMode(e) => write!(f, "Expected pruning mode: {}", e), + } + } } /// A set of state node changes. #[derive(Default, Debug, Clone)] pub struct ChangeSet { - /// Inserted nodes. - pub inserted: Vec<(H, DBValue)>, - /// Deleted nodes. - pub deleted: Vec, + /// Inserted nodes. + pub inserted: Vec<(H, DBValue)>, + /// Deleted nodes. + pub deleted: Vec, } /// A set of changes to the backing database. #[derive(Default, Debug, Clone)] pub struct CommitSet { - /// State node changes. - pub data: ChangeSet, - /// Metadata changes. - pub meta: ChangeSet>, + /// State node changes. + pub data: ChangeSet, + /// Metadata changes. + pub meta: ChangeSet>, } /// Pruning constraints. If none are specified pruning is #[derive(Default, Debug, Clone, Eq, PartialEq)] pub struct Constraints { - /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical states. - pub max_blocks: Option, - /// Maximum memory in the pruning overlay. - pub max_mem: Option, + /// Maximum blocks. Defaults to 0 when unspecified, effectively keeping only non-canonical states. + pub max_blocks: Option, + /// Maximum memory in the pruning overlay. + pub max_mem: Option, } /// Pruning mode. #[derive(Debug, Clone, Eq, PartialEq)] pub enum PruningMode { - /// Maintain a pruning window. - Constrained(Constraints), - /// No pruning. Canonicalization is a no-op. - ArchiveAll, - /// Canonicalization discards non-canonical nodes. All the canonical nodes are kept in the DB. - ArchiveCanonical, + /// Maintain a pruning window. + Constrained(Constraints), + /// No pruning. Canonicalization is a no-op. + ArchiveAll, + /// Canonicalization discards non-canonical nodes. All the canonical nodes are kept in the DB. + ArchiveCanonical, } impl PruningMode { - /// Create a mode that keeps given number of blocks. - pub fn keep_blocks(n: u32) -> PruningMode { - PruningMode::Constrained(Constraints { - max_blocks: Some(n), - max_mem: None, - }) - } - - /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? - pub fn is_archive(&self) -> bool { - match *self { - PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => true, - PruningMode::Constrained(_) => false - } - } - - /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? - pub fn id(&self) -> &[u8] { - match self { - PruningMode::ArchiveAll => PRUNING_MODE_ARCHIVE, - PruningMode::ArchiveCanonical => PRUNING_MODE_ARCHIVE_CANON, - PruningMode::Constrained(_) => PRUNING_MODE_CONSTRAINED, - } - } + /// Create a mode that keeps given number of blocks. + pub fn keep_blocks(n: u32) -> PruningMode { + PruningMode::Constrained(Constraints { + max_blocks: Some(n), + max_mem: None, + }) + } + + /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? + pub fn is_archive(&self) -> bool { + match *self { + PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => true, + PruningMode::Constrained(_) => false, + } + } + + /// Is this an archive (either ArchiveAll or ArchiveCanonical) pruning mode? + pub fn id(&self) -> &[u8] { + match self { + PruningMode::ArchiveAll => PRUNING_MODE_ARCHIVE, + PruningMode::ArchiveCanonical => PRUNING_MODE_ARCHIVE_CANON, + PruningMode::Constrained(_) => PRUNING_MODE_CONSTRAINED, + } + } } impl Default for PruningMode { - fn default() -> Self { - PruningMode::keep_blocks(256) - } + fn default() -> Self { + PruningMode::keep_blocks(256) + } } fn to_meta_key(suffix: &[u8], data: &S) -> Vec { - let mut buffer = data.encode(); - buffer.extend(suffix); - buffer + let mut buffer = data.encode(); + buffer.extend(suffix); + buffer } struct StateDbSync { - mode: PruningMode, - non_canonical: NonCanonicalOverlay, - pruning: Option>, - pinned: HashMap, + mode: PruningMode, + non_canonical: NonCanonicalOverlay, + pruning: Option>, + pinned: HashMap, } impl StateDbSync { - fn new( - mode: PruningMode, - db: &D, - ) -> Result, Error> { - trace!(target: "state-db", "StateDb settings: {:?}", mode); - - // Check that settings match - Self::check_meta(&mode, db)?; - - let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(db)?; - let pruning: Option> = match mode { - PruningMode::Constrained(Constraints { - max_mem: Some(_), - .. - }) => unimplemented!(), - PruningMode::Constrained(_) => Some(RefWindow::new(db)?), - PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, - }; - - Ok(StateDbSync { - mode, - non_canonical, - pruning, - pinned: Default::default(), - }) - } - - fn check_meta(mode: &PruningMode, db: &D) -> Result<(), Error> { - let db_mode = db.get_meta(&to_meta_key(PRUNING_MODE, &())).map_err(Error::Db)?; - trace!(target: "state-db", - "DB pruning mode: {:?}", - db_mode.as_ref().map(|v| std::str::from_utf8(&v)) - ); - match &db_mode { - Some(v) if v.as_slice() == mode.id() => Ok(()), - Some(v) => Err(Error::InvalidPruningMode(String::from_utf8_lossy(v).into())), - None => Ok(()), - } - } - - fn insert_block( - &mut self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - mut changeset: ChangeSet, - ) -> Result, Error> { - let mut meta = ChangeSet::default(); - if number == 0 { - // Save pruning mode when writing first block. - meta.inserted.push((to_meta_key(PRUNING_MODE, &()), self.mode.id().into())); - } - - match self.mode { - PruningMode::ArchiveAll => { - changeset.deleted.clear(); - // write changes immediately - Ok(CommitSet { - data: changeset, - meta, - }) - }, - PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { - let commit = self.non_canonical.insert(hash, number, parent_hash, changeset); - commit.map(|mut c| { - c.meta.inserted.extend(meta.inserted); - c - }) - } - } - } - - fn canonicalize_block( - &mut self, - hash: &BlockHash, - ) -> Result, Error> { - let mut commit = CommitSet::default(); - if self.mode == PruningMode::ArchiveAll { - return Ok(commit) - } - match self.non_canonical.canonicalize(&hash, &mut commit) { - Ok(()) => { - if self.mode == PruningMode::ArchiveCanonical { - commit.data.deleted.clear(); - } - } - Err(e) => return Err(e), - }; - if let Some(ref mut pruning) = self.pruning { - pruning.note_canonical(&hash, &mut commit); - } - self.prune(&mut commit); - Ok(commit) - } - - fn best_canonical(&self) -> Option { - return self.non_canonical.last_canonicalized_block_number() - } - - fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { - match self.mode { - PruningMode::ArchiveAll => false, - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - if self.best_canonical().map(|c| number > c).unwrap_or(true) { - !self.non_canonical.have_block(hash) - } else { - self.pruning - .as_ref() - .map_or( - false, - |pruning| number < pruning.pending() || !pruning.have_block(hash), - ) - } - } - } - } - - fn prune(&mut self, commit: &mut CommitSet) { - if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = (&mut self.pruning, &self.mode) { - loop { - if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { - break; - } - - if constraints.max_mem.map_or(false, |m| pruning.mem_used() > m) { - break; - } - - let pinned = &self.pinned; - if pruning.next_hash().map_or(false, |h| pinned.contains_key(&h)) { - break; - } - pruning.prune_one(commit); - } - } - } - - /// Revert all non-canonical blocks with the best block number. - /// Returns a database commit or `None` if not possible. - /// For archive an empty commit set is returned. - fn revert_one(&mut self) -> Option> { - match self.mode { - PruningMode::ArchiveAll => { - Some(CommitSet::default()) - }, - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - self.non_canonical.revert_one() - }, - } - } - - fn pin(&mut self, hash: &BlockHash) -> Result<(), PinError> { - match self.mode { - PruningMode::ArchiveAll => Ok(()), - PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { - if self.non_canonical.have_block(hash) || - self.pruning.as_ref().map_or(false, |pruning| pruning.have_block(hash)) - { - let refs = self.pinned.entry(hash.clone()).or_default(); - if *refs == 0 { - trace!(target: "state-db-pin", "Pinned block: {:?}", hash); - self.non_canonical.pin(hash); - } - *refs += 1; - Ok(()) - } else { - Err(PinError::InvalidBlock) - } - } - } - } - - fn unpin(&mut self, hash: &BlockHash) { - match self.pinned.entry(hash.clone()) { - Entry::Occupied(mut entry) => { - *entry.get_mut() -= 1; - if *entry.get() == 0 { - trace!(target: "state-db-pin", "Unpinned block: {:?}", hash); - entry.remove(); - self.non_canonical.unpin(hash); - } else { - trace!(target: "state-db-pin", "Releasing reference for {:?}", hash); - } - }, - Entry::Vacant(_) => {}, - } - } - - pub fn get(&self, key: &Key, db: &D) -> Result, Error> - where Key: AsRef - { - if let Some(value) = self.non_canonical.get(key) { - return Ok(Some(value)); - } - db.get(key.as_ref()).map_err(|e| Error::Db(e)) - } - - fn apply_pending(&mut self) { - self.non_canonical.apply_pending(); - if let Some(pruning) = &mut self.pruning { - pruning.apply_pending(); - } - trace!( - target: "forks", - "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}", - self.pruning.as_ref().and_then(|p| p.next_hash()), - self.pruning.as_ref().map(|p| p.pending()).unwrap_or(0), - self.non_canonical.last_canonicalized_hash(), - self.non_canonical.last_canonicalized_block_number().unwrap_or(0), - self.non_canonical.top_level(), - ); - } - - fn revert_pending(&mut self) { - if let Some(pruning) = &mut self.pruning { - pruning.revert_pending(); - } - self.non_canonical.revert_pending(); - } - - fn memory_info(&self) -> StateDbMemoryInfo { - StateDbMemoryInfo { - non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)), - pruning: self.pruning.as_ref().map(|p| MemorySize::from_bytes(malloc_size(p))), - pinned: MemorySize::from_bytes(malloc_size(&self.pinned)), - } - } + fn new( + mode: PruningMode, + db: &D, + ) -> Result, Error> { + trace!(target: "state-db", "StateDb settings: {:?}", mode); + + // Check that settings match + Self::check_meta(&mode, db)?; + + let non_canonical: NonCanonicalOverlay = NonCanonicalOverlay::new(db)?; + let pruning: Option> = match mode { + PruningMode::Constrained(Constraints { + max_mem: Some(_), .. + }) => unimplemented!(), + PruningMode::Constrained(_) => Some(RefWindow::new(db)?), + PruningMode::ArchiveAll | PruningMode::ArchiveCanonical => None, + }; + + Ok(StateDbSync { + mode, + non_canonical, + pruning, + pinned: Default::default(), + }) + } + + fn check_meta(mode: &PruningMode, db: &D) -> Result<(), Error> { + let db_mode = db + .get_meta(&to_meta_key(PRUNING_MODE, &())) + .map_err(Error::Db)?; + trace!(target: "state-db", + "DB pruning mode: {:?}", + db_mode.as_ref().map(|v| std::str::from_utf8(&v)) + ); + match &db_mode { + Some(v) if v.as_slice() == mode.id() => Ok(()), + Some(v) => Err(Error::InvalidPruningMode(String::from_utf8_lossy(v).into())), + None => Ok(()), + } + } + + fn insert_block( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + mut changeset: ChangeSet, + ) -> Result, Error> { + let mut meta = ChangeSet::default(); + if number == 0 { + // Save pruning mode when writing first block. + meta.inserted + .push((to_meta_key(PRUNING_MODE, &()), self.mode.id().into())); + } + + match self.mode { + PruningMode::ArchiveAll => { + changeset.deleted.clear(); + // write changes immediately + Ok(CommitSet { + data: changeset, + meta, + }) + } + PruningMode::Constrained(_) | PruningMode::ArchiveCanonical => { + let commit = self + .non_canonical + .insert(hash, number, parent_hash, changeset); + commit.map(|mut c| { + c.meta.inserted.extend(meta.inserted); + c + }) + } + } + } + + fn canonicalize_block( + &mut self, + hash: &BlockHash, + ) -> Result, Error> { + let mut commit = CommitSet::default(); + if self.mode == PruningMode::ArchiveAll { + return Ok(commit); + } + match self.non_canonical.canonicalize(&hash, &mut commit) { + Ok(()) => { + if self.mode == PruningMode::ArchiveCanonical { + commit.data.deleted.clear(); + } + } + Err(e) => return Err(e), + }; + if let Some(ref mut pruning) = self.pruning { + pruning.note_canonical(&hash, &mut commit); + } + self.prune(&mut commit); + Ok(commit) + } + + fn best_canonical(&self) -> Option { + return self.non_canonical.last_canonicalized_block_number(); + } + + fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { + match self.mode { + PruningMode::ArchiveAll => false, + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { + if self.best_canonical().map(|c| number > c).unwrap_or(true) { + !self.non_canonical.have_block(hash) + } else { + self.pruning.as_ref().map_or(false, |pruning| { + number < pruning.pending() || !pruning.have_block(hash) + }) + } + } + } + } + + fn prune(&mut self, commit: &mut CommitSet) { + if let (&mut Some(ref mut pruning), &PruningMode::Constrained(ref constraints)) = + (&mut self.pruning, &self.mode) + { + loop { + if pruning.window_size() <= constraints.max_blocks.unwrap_or(0) as u64 { + break; + } + + if constraints + .max_mem + .map_or(false, |m| pruning.mem_used() > m) + { + break; + } + + let pinned = &self.pinned; + if pruning + .next_hash() + .map_or(false, |h| pinned.contains_key(&h)) + { + break; + } + pruning.prune_one(commit); + } + } + } + + /// Revert all non-canonical blocks with the best block number. + /// Returns a database commit or `None` if not possible. + /// For archive an empty commit set is returned. + fn revert_one(&mut self) -> Option> { + match self.mode { + PruningMode::ArchiveAll => Some(CommitSet::default()), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { + self.non_canonical.revert_one() + } + } + } + + fn pin(&mut self, hash: &BlockHash) -> Result<(), PinError> { + match self.mode { + PruningMode::ArchiveAll => Ok(()), + PruningMode::ArchiveCanonical | PruningMode::Constrained(_) => { + if self.non_canonical.have_block(hash) + || self + .pruning + .as_ref() + .map_or(false, |pruning| pruning.have_block(hash)) + { + let refs = self.pinned.entry(hash.clone()).or_default(); + if *refs == 0 { + trace!(target: "state-db-pin", "Pinned block: {:?}", hash); + self.non_canonical.pin(hash); + } + *refs += 1; + Ok(()) + } else { + Err(PinError::InvalidBlock) + } + } + } + } + + fn unpin(&mut self, hash: &BlockHash) { + match self.pinned.entry(hash.clone()) { + Entry::Occupied(mut entry) => { + *entry.get_mut() -= 1; + if *entry.get() == 0 { + trace!(target: "state-db-pin", "Unpinned block: {:?}", hash); + entry.remove(); + self.non_canonical.unpin(hash); + } else { + trace!(target: "state-db-pin", "Releasing reference for {:?}", hash); + } + } + Entry::Vacant(_) => {} + } + } + + pub fn get(&self, key: &Key, db: &D) -> Result, Error> + where + Key: AsRef, + { + if let Some(value) = self.non_canonical.get(key) { + return Ok(Some(value)); + } + db.get(key.as_ref()).map_err(|e| Error::Db(e)) + } + + fn apply_pending(&mut self) { + self.non_canonical.apply_pending(); + if let Some(pruning) = &mut self.pruning { + pruning.apply_pending(); + } + trace!( + target: "forks", + "First available: {:?} ({}), Last canon: {:?} ({}), Best forks: {:?}", + self.pruning.as_ref().and_then(|p| p.next_hash()), + self.pruning.as_ref().map(|p| p.pending()).unwrap_or(0), + self.non_canonical.last_canonicalized_hash(), + self.non_canonical.last_canonicalized_block_number().unwrap_or(0), + self.non_canonical.top_level(), + ); + } + + fn revert_pending(&mut self) { + if let Some(pruning) = &mut self.pruning { + pruning.revert_pending(); + } + self.non_canonical.revert_pending(); + } + + fn memory_info(&self) -> StateDbMemoryInfo { + StateDbMemoryInfo { + non_canonical: MemorySize::from_bytes(malloc_size(&self.non_canonical)), + pruning: self + .pruning + .as_ref() + .map(|p| MemorySize::from_bytes(malloc_size(p))), + pinned: MemorySize::from_bytes(malloc_size(&self.pinned)), + } + } } /// State DB maintenance. See module description. /// Can be shared across threads. pub struct StateDb { - db: RwLock>, + db: RwLock>, } impl StateDb { - /// Creates a new instance. Does not expect any metadata in the database. - pub fn new( - mode: PruningMode, - db: &D, - ) -> Result, Error> { - Ok(StateDb { - db: RwLock::new(StateDbSync::new(mode, db)?) - }) - } - - /// Add a new non-canonical block. - pub fn insert_block( - &self, - hash: &BlockHash, - number: u64, - parent_hash: &BlockHash, - changeset: ChangeSet, - ) -> Result, Error> { - self.db.write().insert_block(hash, number, parent_hash, changeset) - } - - /// Finalize a previously inserted block. - pub fn canonicalize_block( - &self, - hash: &BlockHash, - ) -> Result, Error> { - self.db.write().canonicalize_block(hash) - } - - /// Prevents pruning of specified block and its descendants. - pub fn pin(&self, hash: &BlockHash) -> Result<(), PinError> { - self.db.write().pin(hash) - } - - /// Allows pruning of specified block. - pub fn unpin(&self, hash: &BlockHash) { - self.db.write().unpin(hash) - } - - /// Get a value from non-canonical/pruning overlay or the backing DB. - pub fn get(&self, key: &Key, db: &D) -> Result, Error> - where Key: AsRef - { - self.db.read().get(key, db) - } - - /// Revert all non-canonical blocks with the best block number. - /// Returns a database commit or `None` if not possible. - /// For archive an empty commit set is returned. - pub fn revert_one(&self) -> Option> { - self.db.write().revert_one() - } - - /// Returns last finalized block number. - pub fn best_canonical(&self) -> Option { - return self.db.read().best_canonical() - } - - /// Check if block is pruned away. - pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { - return self.db.read().is_pruned(hash, number) - } - - /// Apply all pending changes - pub fn apply_pending(&self) { - self.db.write().apply_pending(); - } - - /// Revert all pending changes - pub fn revert_pending(&self) { - self.db.write().revert_pending(); - } - - /// Returns the current memory statistics of this instance. - pub fn memory_info(&self) -> StateDbMemoryInfo { - self.db.read().memory_info() - } + /// Creates a new instance. Does not expect any metadata in the database. + pub fn new( + mode: PruningMode, + db: &D, + ) -> Result, Error> { + Ok(StateDb { + db: RwLock::new(StateDbSync::new(mode, db)?), + }) + } + + /// Add a new non-canonical block. + pub fn insert_block( + &self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> Result, Error> { + self.db + .write() + .insert_block(hash, number, parent_hash, changeset) + } + + /// Finalize a previously inserted block. + pub fn canonicalize_block( + &self, + hash: &BlockHash, + ) -> Result, Error> { + self.db.write().canonicalize_block(hash) + } + + /// Prevents pruning of specified block and its descendants. + pub fn pin(&self, hash: &BlockHash) -> Result<(), PinError> { + self.db.write().pin(hash) + } + + /// Allows pruning of specified block. + pub fn unpin(&self, hash: &BlockHash) { + self.db.write().unpin(hash) + } + + /// Get a value from non-canonical/pruning overlay or the backing DB. + pub fn get(&self, key: &Key, db: &D) -> Result, Error> + where + Key: AsRef, + { + self.db.read().get(key, db) + } + + /// Revert all non-canonical blocks with the best block number. + /// Returns a database commit or `None` if not possible. + /// For archive an empty commit set is returned. + pub fn revert_one(&self) -> Option> { + self.db.write().revert_one() + } + + /// Returns last finalized block number. + pub fn best_canonical(&self) -> Option { + return self.db.read().best_canonical(); + } + + /// Check if block is pruned away. + pub fn is_pruned(&self, hash: &BlockHash, number: u64) -> bool { + return self.db.read().is_pruned(hash, number); + } + + /// Apply all pending changes + pub fn apply_pending(&self) { + self.db.write().apply_pending(); + } + + /// Revert all pending changes + pub fn revert_pending(&self) { + self.db.write().revert_pending(); + } + + /// Returns the current memory statistics of this instance. + pub fn memory_info(&self) -> StateDbMemoryInfo { + self.db.read().memory_info() + } } #[cfg(test)] mod tests { - use std::io; - use sp_core::H256; - use crate::{StateDb, PruningMode, Constraints}; - use crate::test::{make_db, make_changeset, TestDb}; - - fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { - let mut db = make_db(&[91, 921, 922, 93, 94]); - let state_db = StateDb::new(settings, &db).unwrap(); - - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(1), - 1, - &H256::from_low_u64_be(0), - make_changeset(&[1], &[91]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(21), - 2, - &H256::from_low_u64_be(1), - make_changeset(&[21], &[921, 1]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(22), - 2, - &H256::from_low_u64_be(1), - make_changeset(&[22], &[922]), - ) - .unwrap(), - ); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(3), - 3, - &H256::from_low_u64_be(21), - make_changeset(&[3], &[93]), - ) - .unwrap(), - ); - state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(1)).unwrap()); - state_db.apply_pending(); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(4), - 4, - &H256::from_low_u64_be(3), - make_changeset(&[4], &[94]), - ) - .unwrap(), - ); - state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(21)).unwrap()); - state_db.apply_pending(); - db.commit(&state_db.canonicalize_block::(&H256::from_low_u64_be(3)).unwrap()); - state_db.apply_pending(); - - (db, state_db) - } - - #[test] - fn full_archive_keeps_everything() { - let (db, sdb) = make_test_db(PruningMode::ArchiveAll); - assert!(db.data_eq(&make_db(&[1, 21, 22, 3, 4, 91, 921, 922, 93, 94]))); - assert!(!sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - } - - #[test] - fn canonical_archive_keeps_canonical() { - let (db, _) = make_test_db(PruningMode::ArchiveCanonical); - assert!(db.data_eq(&make_db(&[1, 21, 3, 91, 921, 922, 93, 94]))); - } - - #[test] - fn prune_window_0() { - let (db, _) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(0), - max_mem: None, - })); - assert!(db.data_eq(&make_db(&[21, 3, 922, 94]))); - } - - #[test] - fn prune_window_1() { - let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(1), - max_mem: None, - })); - assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(21), 2)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); - assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94]))); - } - - #[test] - fn prune_window_2() { - let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { - max_blocks: Some(2), - max_mem: None, - })); - assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); - assert!(!sdb.is_pruned(&H256::from_low_u64_be(21), 2)); - assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); - assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94]))); - } - - #[test] - fn detects_incompatible_mode() { - let mut db = make_db(&[]); - let state_db = StateDb::new(PruningMode::ArchiveAll, &db).unwrap(); - db.commit( - &state_db - .insert_block::( - &H256::from_low_u64_be(0), - 0, - &H256::from_low_u64_be(0), - make_changeset(&[], &[]), - ) - .unwrap(), - ); - let new_mode = PruningMode::Constrained(Constraints { max_blocks: Some(2), max_mem: None }); - let state_db: Result, _> = StateDb::new(new_mode, &db); - assert!(state_db.is_err()); - } + use crate::test::{make_changeset, make_db, TestDb}; + use crate::{Constraints, PruningMode, StateDb}; + use sp_core::H256; + use std::io; + + fn make_test_db(settings: PruningMode) -> (TestDb, StateDb) { + let mut db = make_db(&[91, 921, 922, 93, 94]); + let state_db = StateDb::new(settings, &db).unwrap(); + + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(1), + 1, + &H256::from_low_u64_be(0), + make_changeset(&[1], &[91]), + ) + .unwrap(), + ); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(21), + 2, + &H256::from_low_u64_be(1), + make_changeset(&[21], &[921, 1]), + ) + .unwrap(), + ); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(22), + 2, + &H256::from_low_u64_be(1), + make_changeset(&[22], &[922]), + ) + .unwrap(), + ); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(3), + 3, + &H256::from_low_u64_be(21), + make_changeset(&[3], &[93]), + ) + .unwrap(), + ); + state_db.apply_pending(); + db.commit( + &state_db + .canonicalize_block::(&H256::from_low_u64_be(1)) + .unwrap(), + ); + state_db.apply_pending(); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(4), + 4, + &H256::from_low_u64_be(3), + make_changeset(&[4], &[94]), + ) + .unwrap(), + ); + state_db.apply_pending(); + db.commit( + &state_db + .canonicalize_block::(&H256::from_low_u64_be(21)) + .unwrap(), + ); + state_db.apply_pending(); + db.commit( + &state_db + .canonicalize_block::(&H256::from_low_u64_be(3)) + .unwrap(), + ); + state_db.apply_pending(); + + (db, state_db) + } + + #[test] + fn full_archive_keeps_everything() { + let (db, sdb) = make_test_db(PruningMode::ArchiveAll); + assert!(db.data_eq(&make_db(&[1, 21, 22, 3, 4, 91, 921, 922, 93, 94]))); + assert!(!sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + } + + #[test] + fn canonical_archive_keeps_canonical() { + let (db, _) = make_test_db(PruningMode::ArchiveCanonical); + assert!(db.data_eq(&make_db(&[1, 21, 3, 91, 921, 922, 93, 94]))); + } + + #[test] + fn prune_window_0() { + let (db, _) = make_test_db(PruningMode::Constrained(Constraints { + max_blocks: Some(0), + max_mem: None, + })); + assert!(db.data_eq(&make_db(&[21, 3, 922, 94]))); + } + + #[test] + fn prune_window_1() { + let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { + max_blocks: Some(1), + max_mem: None, + })); + assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(21), 2)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); + assert!(db.data_eq(&make_db(&[21, 3, 922, 93, 94]))); + } + + #[test] + fn prune_window_2() { + let (db, sdb) = make_test_db(PruningMode::Constrained(Constraints { + max_blocks: Some(2), + max_mem: None, + })); + assert!(sdb.is_pruned(&H256::from_low_u64_be(0), 0)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(1), 1)); + assert!(!sdb.is_pruned(&H256::from_low_u64_be(21), 2)); + assert!(sdb.is_pruned(&H256::from_low_u64_be(22), 2)); + assert!(db.data_eq(&make_db(&[1, 21, 3, 921, 922, 93, 94]))); + } + + #[test] + fn detects_incompatible_mode() { + let mut db = make_db(&[]); + let state_db = StateDb::new(PruningMode::ArchiveAll, &db).unwrap(); + db.commit( + &state_db + .insert_block::( + &H256::from_low_u64_be(0), + 0, + &H256::from_low_u64_be(0), + make_changeset(&[], &[]), + ) + .unwrap(), + ); + let new_mode = PruningMode::Constrained(Constraints { + max_blocks: Some(2), + max_mem: None, + }); + let state_db: Result, _> = StateDb::new(new_mode, &db); + assert!(state_db.is_err()); + } } diff --git a/client/state-db/src/noncanonical.rs b/client/state-db/src/noncanonical.rs index 6a34523b66..1440fad327 100644 --- a/client/state-db/src/noncanonical.rs +++ b/client/state-db/src/noncanonical.rs @@ -20,11 +20,11 @@ //! All pending changes are kept in memory until next call to `apply_pending` or //! `revert_pending` -use std::fmt; -use std::collections::{HashMap, VecDeque, hash_map::Entry}; -use super::{Error, DBValue, ChangeSet, CommitSet, MetaDb, Hash, to_meta_key}; -use codec::{Encode, Decode}; +use super::{to_meta_key, ChangeSet, CommitSet, DBValue, Error, Hash, MetaDb}; +use codec::{Decode, Encode}; use log::trace; +use std::collections::{hash_map::Entry, HashMap, VecDeque}; +use std::fmt; const NON_CANONICAL_JOURNAL: &[u8] = b"noncanonical_journal"; const LAST_CANONICAL: &[u8] = b"last_canonical"; @@ -32,893 +32,1136 @@ const LAST_CANONICAL: &[u8] = b"last_canonical"; /// See module documentation. #[derive(parity_util_mem_derive::MallocSizeOf)] pub struct NonCanonicalOverlay { - last_canonicalized: Option<(BlockHash, u64)>, - levels: VecDeque>>, - parents: HashMap, - pending_canonicalizations: Vec, - pending_insertions: Vec, - values: HashMap, //ref counted - //would be deleted but kept around because block is pinned, ref counted. - pinned: HashMap, - pinned_insertions: HashMap>, + last_canonicalized: Option<(BlockHash, u64)>, + levels: VecDeque>>, + parents: HashMap, + pending_canonicalizations: Vec, + pending_insertions: Vec, + values: HashMap, //ref counted + //would be deleted but kept around because block is pinned, ref counted. + pinned: HashMap, + pinned_insertions: HashMap>, } #[derive(Encode, Decode)] struct JournalRecord { - hash: BlockHash, - parent_hash: BlockHash, - inserted: Vec<(Key, DBValue)>, - deleted: Vec, + hash: BlockHash, + parent_hash: BlockHash, + inserted: Vec<(Key, DBValue)>, + deleted: Vec, } fn to_journal_key(block: u64, index: u64) -> Vec { - to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) + to_meta_key(NON_CANONICAL_JOURNAL, &(block, index)) } #[cfg_attr(test, derive(PartialEq, Debug))] #[derive(parity_util_mem_derive::MallocSizeOf)] struct BlockOverlay { - hash: BlockHash, - journal_key: Vec, - inserted: Vec, - deleted: Vec, + hash: BlockHash, + journal_key: Vec, + inserted: Vec, + deleted: Vec, } -fn insert_values(values: &mut HashMap, inserted: Vec<(Key, DBValue)>) { - for (k, v) in inserted { - debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); - let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); - *counter += 1; - } +fn insert_values( + values: &mut HashMap, + inserted: Vec<(Key, DBValue)>, +) { + for (k, v) in inserted { + debug_assert!(values.get(&k).map_or(true, |(_, value)| *value == v)); + let (ref mut counter, _) = values.entry(k).or_insert_with(|| (0, v)); + *counter += 1; + } } fn discard_values(values: &mut HashMap, inserted: Vec) { - for k in inserted { - match values.entry(k) { - Entry::Occupied(mut e) => { - let (ref mut counter, _) = e.get_mut(); - *counter -= 1; - if *counter == 0 { - e.remove_entry(); - } - }, - Entry::Vacant(_) => { - debug_assert!(false, "Trying to discard missing value"); - } - } - } + for k in inserted { + match values.entry(k) { + Entry::Occupied(mut e) => { + let (ref mut counter, _) = e.get_mut(); + *counter -= 1; + if *counter == 0 { + e.remove_entry(); + } + } + Entry::Vacant(_) => { + debug_assert!(false, "Trying to discard missing value"); + } + } + } } fn discard_descendants( - levels: &mut VecDeque>>, - mut values: &mut HashMap, - index: usize, - parents: &mut HashMap, - pinned: &HashMap, - pinned_insertions: &mut HashMap>, - hash: &BlockHash, + levels: &mut VecDeque>>, + mut values: &mut HashMap, + index: usize, + parents: &mut HashMap, + pinned: &HashMap, + pinned_insertions: &mut HashMap>, + hash: &BlockHash, ) { - let mut discarded = Vec::new(); - if let Some(level) = levels.get_mut(index) { - *level = level.drain(..).filter_map(|overlay| { - let parent = parents.get(&overlay.hash) - .expect("there is a parent entry for each entry in levels; qed"); - - if parent == hash { - discarded.push(overlay.hash.clone()); - if pinned.contains_key(&overlay.hash) { - // save to be discarded later. - pinned_insertions.insert(overlay.hash.clone(), overlay.inserted); - } else { - // discard immediately. - parents.remove(&overlay.hash); - discard_values(&mut values, overlay.inserted); - } - None - } else { - Some(overlay) - } - }).collect(); - } - for hash in discarded { - discard_descendants(levels, values, index + 1, parents, pinned, pinned_insertions, &hash); - } + let mut discarded = Vec::new(); + if let Some(level) = levels.get_mut(index) { + *level = level + .drain(..) + .filter_map(|overlay| { + let parent = parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed"); + + if parent == hash { + discarded.push(overlay.hash.clone()); + if pinned.contains_key(&overlay.hash) { + // save to be discarded later. + pinned_insertions.insert(overlay.hash.clone(), overlay.inserted); + } else { + // discard immediately. + parents.remove(&overlay.hash); + discard_values(&mut values, overlay.inserted); + } + None + } else { + Some(overlay) + } + }) + .collect(); + } + for hash in discarded { + discard_descendants( + levels, + values, + index + 1, + parents, + pinned, + pinned_insertions, + &hash, + ); + } } impl NonCanonicalOverlay { - /// Creates a new instance. Does not expect any metadata to be present in the DB. - pub fn new(db: &D) -> Result, Error> { - let last_canonicalized = db.get_meta(&to_meta_key(LAST_CANONICAL, &())) - .map_err(|e| Error::Db(e))?; - let last_canonicalized = match last_canonicalized { - Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice())?), - None => None, - }; - let mut levels = VecDeque::new(); - let mut parents = HashMap::new(); - let mut values = HashMap::new(); - if let Some((ref hash, mut block)) = last_canonicalized { - // read the journal - trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); - let mut total: u64 = 0; - block += 1; - loop { - let mut index: u64 = 0; - let mut level = Vec::new(); - loop { - let journal_key = to_journal_key(block, index); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: record.hash.clone(), - journal_key, - inserted: inserted, - deleted: record.deleted, - }; - insert_values(&mut values, record.inserted); - trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); - level.push(overlay); - parents.insert(record.hash, record.parent_hash); - index += 1; - total += 1; - }, - None => break, - } - } - if level.is_empty() { - break; - } - levels.push_back(level); - block += 1; - } - trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total); - } - Ok(NonCanonicalOverlay { - last_canonicalized, - levels, - parents, - pending_canonicalizations: Default::default(), - pending_insertions: Default::default(), - pinned: Default::default(), - pinned_insertions: Default::default(), - values: values, - }) - } - - /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. - pub fn insert(&mut self, hash: &BlockHash, number: u64, parent_hash: &BlockHash, changeset: ChangeSet) -> Result, Error> { - let mut commit = CommitSet::default(); - let front_block_number = self.front_block_number(); - if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { - // assume that parent was canonicalized - let last_canonicalized = (parent_hash.clone(), number - 1); - commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), last_canonicalized.encode())); - self.last_canonicalized = Some(last_canonicalized); - } else if self.last_canonicalized.is_some() { - if number < front_block_number || number >= front_block_number + self.levels.len() as u64 + 1 { - trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})", - number, - front_block_number, - front_block_number + self.levels.len() as u64, - ); - return Err(Error::InvalidBlockNumber); - } - // check for valid parent if inserting on second level or higher - if number == front_block_number { - if !self.last_canonicalized.as_ref().map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) { - return Err(Error::InvalidParent); - } - } else if !self.parents.contains_key(&parent_hash) { - return Err(Error::InvalidParent); - } - } - let level = if self.levels.is_empty() || number == front_block_number + self.levels.len() as u64 { - self.levels.push_back(Vec::new()); - self.levels.back_mut().expect("can't be empty after insertion; qed") - } else { - self.levels.get_mut((number - front_block_number) as usize) + /// Creates a new instance. Does not expect any metadata to be present in the DB. + pub fn new(db: &D) -> Result, Error> { + let last_canonicalized = db + .get_meta(&to_meta_key(LAST_CANONICAL, &())) + .map_err(|e| Error::Db(e))?; + let last_canonicalized = match last_canonicalized { + Some(buffer) => Some(<(BlockHash, u64)>::decode(&mut buffer.as_slice())?), + None => None, + }; + let mut levels = VecDeque::new(); + let mut parents = HashMap::new(); + let mut values = HashMap::new(); + if let Some((ref hash, mut block)) = last_canonicalized { + // read the journal + trace!(target: "state-db", "Reading uncanonicalized journal. Last canonicalized #{} ({:?})", block, hash); + let mut total: u64 = 0; + block += 1; + loop { + let mut index: u64 = 0; + let mut level = Vec::new(); + loop { + let journal_key = to_journal_key(block, index); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecord = + Decode::decode(&mut record.as_slice())?; + let inserted = record.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: record.hash.clone(), + journal_key, + inserted: inserted, + deleted: record.deleted, + }; + insert_values(&mut values, record.inserted); + trace!(target: "state-db", "Uncanonicalized journal entry {}.{} ({} inserted, {} deleted)", block, index, overlay.inserted.len(), overlay.deleted.len()); + level.push(overlay); + parents.insert(record.hash, record.parent_hash); + index += 1; + total += 1; + } + None => break, + } + } + if level.is_empty() { + break; + } + levels.push_back(level); + block += 1; + } + trace!(target: "state-db", "Finished reading uncanonicalized journal, {} entries", total); + } + Ok(NonCanonicalOverlay { + last_canonicalized, + levels, + parents, + pending_canonicalizations: Default::default(), + pending_insertions: Default::default(), + pinned: Default::default(), + pinned_insertions: Default::default(), + values: values, + }) + } + + /// Insert a new block into the overlay. If inserted on the second level or lover expects parent to be present in the window. + pub fn insert( + &mut self, + hash: &BlockHash, + number: u64, + parent_hash: &BlockHash, + changeset: ChangeSet, + ) -> Result, Error> { + let mut commit = CommitSet::default(); + let front_block_number = self.front_block_number(); + if self.levels.is_empty() && self.last_canonicalized.is_none() && number > 0 { + // assume that parent was canonicalized + let last_canonicalized = (parent_hash.clone(), number - 1); + commit.meta.inserted.push(( + to_meta_key(LAST_CANONICAL, &()), + last_canonicalized.encode(), + )); + self.last_canonicalized = Some(last_canonicalized); + } else if self.last_canonicalized.is_some() { + if number < front_block_number + || number >= front_block_number + self.levels.len() as u64 + 1 + { + trace!(target: "state-db", "Failed to insert block {}, current is {} .. {})", + number, + front_block_number, + front_block_number + self.levels.len() as u64, + ); + return Err(Error::InvalidBlockNumber); + } + // check for valid parent if inserting on second level or higher + if number == front_block_number { + if !self + .last_canonicalized + .as_ref() + .map_or(false, |&(ref h, n)| h == parent_hash && n == number - 1) + { + return Err(Error::InvalidParent); + } + } else if !self.parents.contains_key(&parent_hash) { + return Err(Error::InvalidParent); + } + } + let level = if self.levels.is_empty() + || number == front_block_number + self.levels.len() as u64 + { + self.levels.push_back(Vec::new()); + self.levels + .back_mut() + .expect("can't be empty after insertion; qed") + } else { + self.levels.get_mut((number - front_block_number) as usize) .expect("number is [front_block_number .. front_block_number + levels.len()) is asserted in precondition; qed") - }; - - let index = level.len() as u64; - let journal_key = to_journal_key(number, index); - - let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); - let overlay = BlockOverlay { - hash: hash.clone(), - journal_key: journal_key.clone(), - inserted: inserted, - deleted: changeset.deleted.clone(), - }; - level.push(overlay); - self.parents.insert(hash.clone(), parent_hash.clone()); - let journal_record = JournalRecord { - hash: hash.clone(), - parent_hash: parent_hash.clone(), - inserted: changeset.inserted, - deleted: changeset.deleted, - }; - commit.meta.inserted.push((journal_key, journal_record.encode())); - trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); - insert_values(&mut self.values, journal_record.inserted); - self.pending_insertions.push(hash.clone()); - Ok(commit) - } - - fn discard_journals( - &self, - level_index: usize, - discarded_journals: &mut Vec>, - discarded_blocks: &mut Vec, - hash: &BlockHash - ) { - if let Some(level) = self.levels.get(level_index) { - level.iter().for_each(|overlay| { - let parent = self.parents.get(&overlay.hash).expect("there is a parent entry for each entry in levels; qed").clone(); - if parent == *hash { - discarded_journals.push(overlay.journal_key.clone()); - discarded_blocks.push(overlay.hash.clone()); - self.discard_journals(level_index + 1, discarded_journals, discarded_blocks, &overlay.hash); - } - }); - } - } - - fn front_block_number(&self) -> u64 { - self.last_canonicalized.as_ref().map(|&(_, n)| n + 1).unwrap_or(0) - } - - pub fn last_canonicalized_block_number(&self) -> Option { - match self.last_canonicalized.as_ref().map(|&(_, n)| n) { - Some(n) => Some(n + self.pending_canonicalizations.len() as u64), - None if !self.pending_canonicalizations.is_empty() => Some(self.pending_canonicalizations.len() as u64), - _ => None, - } - } - - pub fn last_canonicalized_hash(&self) -> Option { - self.last_canonicalized.as_ref().map(|&(ref h, _)| h.clone()) - } - - pub fn top_level(&self) -> Vec<(BlockHash, u64)> { - let start = self.last_canonicalized_block_number().unwrap_or(0); - self.levels - .get(self.pending_canonicalizations.len()) - .map(|level| level.iter().map(|r| (r.hash.clone(), start)).collect()) - .unwrap_or_default() - } - - /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. - /// Returns a set of changes that need to be added to the DB. - pub fn canonicalize( - &mut self, - hash: &BlockHash, - commit: &mut CommitSet, - ) -> Result<(), Error> { - trace!(target: "state-db", "Canonicalizing {:?}", hash); - let level = self.levels.get(self.pending_canonicalizations.len()).ok_or_else(|| Error::InvalidBlock)?; - let index = level - .iter() - .position(|overlay| overlay.hash == *hash) - .ok_or_else(|| Error::InvalidBlock)?; - - let mut discarded_journals = Vec::new(); - let mut discarded_blocks = Vec::new(); - for (i, overlay) in level.iter().enumerate() { - if i != index { - self.discard_journals( - self.pending_canonicalizations.len() + 1, - &mut discarded_journals, - &mut discarded_blocks, - &overlay.hash - ); - } - discarded_journals.push(overlay.journal_key.clone()); - discarded_blocks.push(overlay.hash.clone()); - } - - // get the one we need to canonicalize - let overlay = &level[index]; - commit.data.inserted.extend(overlay.inserted.iter() - .map(|k| (k.clone(), self.values.get(k).expect("For each key in overlays there's a value in values").1.clone()))); - commit.data.deleted.extend(overlay.deleted.clone()); - - commit.meta.deleted.append(&mut discarded_journals); - let canonicalized = (hash.clone(), self.front_block_number() + self.pending_canonicalizations.len() as u64); - commit.meta.inserted.push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); - trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); - self.pending_canonicalizations.push(hash.clone()); - Ok(()) - } - - fn apply_canonicalizations(&mut self) { - let last = self.pending_canonicalizations.last().cloned(); - let count = self.pending_canonicalizations.len() as u64; - for hash in self.pending_canonicalizations.drain(..) { - trace!(target: "state-db", "Post canonicalizing {:?}", hash); - let level = self.levels.pop_front().expect("Hash validity is checked in `canonicalize`"); - let index = level - .iter() - .position(|overlay| overlay.hash == hash) - .expect("Hash validity is checked in `canonicalize`"); - - // discard unfinalized overlays and values - for (i, overlay) in level.into_iter().enumerate() { - if i != index { - discard_descendants( - &mut self.levels, - &mut self.values, - 0, - &mut self.parents, - &self.pinned, - &mut self.pinned_insertions, - &overlay.hash, - ); - } - if self.pinned.contains_key(&overlay.hash) { - self.pinned_insertions.insert(overlay.hash.clone(), overlay.inserted); - } else { - self.parents.remove(&overlay.hash); - discard_values(&mut self.values, overlay.inserted); - } - } - } - if let Some(hash) = last { - let last_canonicalized = (hash, self.last_canonicalized.as_ref().map(|(_, n)| n + count).unwrap_or(count - 1)); - self.last_canonicalized = Some(last_canonicalized); - } - } - - /// Get a value from the node overlay. This searches in every existing changeset. - pub fn get(&self, key: &Key) -> Option { - if let Some((_, value)) = self.values.get(&key) { - return Some(value.clone()); - } - None - } - - /// Check if the block is in the canonicalization queue. - pub fn have_block(&self, hash: &BlockHash) -> bool { - (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) - && !self.pending_canonicalizations.contains(hash) - } - - /// Revert a single level. Returns commit set that deletes the journal or `None` if not possible. - pub fn revert_one(&mut self) -> Option> { - self.levels.pop_back().map(|level| { - let mut commit = CommitSet::default(); - for overlay in level.into_iter() { - commit.meta.deleted.push(overlay.journal_key); - self.parents.remove(&overlay.hash); - discard_values(&mut self.values, overlay.inserted); - } - commit - }) - } - - fn revert_insertions(&mut self) { - self.pending_insertions.reverse(); - for hash in self.pending_insertions.drain(..) { - self.parents.remove(&hash); - // find a level. When iterating insertions backwards the hash is always last in the level. - let level_index = - self.levels.iter().position(|level| - level.last().expect("Hash is added in `insert` in reverse order").hash == hash) - .expect("Hash is added in insert"); - - let overlay = self.levels[level_index].pop().expect("Empty levels are not allowed in self.levels"); - discard_values(&mut self.values, overlay.inserted); - if self.levels[level_index].is_empty() { - debug_assert_eq!(level_index, self.levels.len() - 1); - self.levels.pop_back(); - } - } - } - - /// Apply all pending changes - pub fn apply_pending(&mut self) { - self.apply_canonicalizations(); - self.pending_insertions.clear(); - } - - /// Revert all pending changes - pub fn revert_pending(&mut self) { - self.pending_canonicalizations.clear(); - self.revert_insertions(); - } - - /// Pin state values in memory - pub fn pin(&mut self, hash: &BlockHash) { - if self.pending_insertions.contains(hash) { - debug_assert!(false, "Trying to pin pending state"); - return; - } - // Also pin all parents - let mut parent = Some(hash); - while let Some(hash) = parent { - let refs = self.pinned.entry(hash.clone()).or_default(); - if *refs == 0 { - trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash); - } - *refs += 1; - parent = self.parents.get(hash); - } - } - - /// Discard pinned state - pub fn unpin(&mut self, hash: &BlockHash) { - // Also unpin all parents - let mut parent = Some(hash.clone()); - while let Some(hash) = parent { - parent = self.parents.get(&hash).cloned(); - match self.pinned.entry(hash.clone()) { - Entry::Occupied(mut entry) => { - *entry.get_mut() -= 1; - if *entry.get() == 0 { - entry.remove(); - if let Some(inserted) = self.pinned_insertions.remove(&hash) { - trace!(target: "state-db-pin", "Discarding unpinned non-canon block: {:?}", hash); - discard_values(&mut self.values, inserted); - self.parents.remove(&hash); - } - } - }, - Entry::Vacant(_) => {}, - } - } - } + }; + + let index = level.len() as u64; + let journal_key = to_journal_key(number, index); + + let inserted = changeset.inserted.iter().map(|(k, _)| k.clone()).collect(); + let overlay = BlockOverlay { + hash: hash.clone(), + journal_key: journal_key.clone(), + inserted: inserted, + deleted: changeset.deleted.clone(), + }; + level.push(overlay); + self.parents.insert(hash.clone(), parent_hash.clone()); + let journal_record = JournalRecord { + hash: hash.clone(), + parent_hash: parent_hash.clone(), + inserted: changeset.inserted, + deleted: changeset.deleted, + }; + commit + .meta + .inserted + .push((journal_key, journal_record.encode())); + trace!(target: "state-db", "Inserted uncanonicalized changeset {}.{} ({} inserted, {} deleted)", number, index, journal_record.inserted.len(), journal_record.deleted.len()); + insert_values(&mut self.values, journal_record.inserted); + self.pending_insertions.push(hash.clone()); + Ok(commit) + } + + fn discard_journals( + &self, + level_index: usize, + discarded_journals: &mut Vec>, + discarded_blocks: &mut Vec, + hash: &BlockHash, + ) { + if let Some(level) = self.levels.get(level_index) { + level.iter().for_each(|overlay| { + let parent = self + .parents + .get(&overlay.hash) + .expect("there is a parent entry for each entry in levels; qed") + .clone(); + if parent == *hash { + discarded_journals.push(overlay.journal_key.clone()); + discarded_blocks.push(overlay.hash.clone()); + self.discard_journals( + level_index + 1, + discarded_journals, + discarded_blocks, + &overlay.hash, + ); + } + }); + } + } + + fn front_block_number(&self) -> u64 { + self.last_canonicalized + .as_ref() + .map(|&(_, n)| n + 1) + .unwrap_or(0) + } + + pub fn last_canonicalized_block_number(&self) -> Option { + match self.last_canonicalized.as_ref().map(|&(_, n)| n) { + Some(n) => Some(n + self.pending_canonicalizations.len() as u64), + None if !self.pending_canonicalizations.is_empty() => { + Some(self.pending_canonicalizations.len() as u64) + } + _ => None, + } + } + + pub fn last_canonicalized_hash(&self) -> Option { + self.last_canonicalized + .as_ref() + .map(|&(ref h, _)| h.clone()) + } + + pub fn top_level(&self) -> Vec<(BlockHash, u64)> { + let start = self.last_canonicalized_block_number().unwrap_or(0); + self.levels + .get(self.pending_canonicalizations.len()) + .map(|level| level.iter().map(|r| (r.hash.clone(), start)).collect()) + .unwrap_or_default() + } + + /// Select a top-level root and canonicalized it. Discards all sibling subtrees and the root. + /// Returns a set of changes that need to be added to the DB. + pub fn canonicalize( + &mut self, + hash: &BlockHash, + commit: &mut CommitSet, + ) -> Result<(), Error> { + trace!(target: "state-db", "Canonicalizing {:?}", hash); + let level = self + .levels + .get(self.pending_canonicalizations.len()) + .ok_or_else(|| Error::InvalidBlock)?; + let index = level + .iter() + .position(|overlay| overlay.hash == *hash) + .ok_or_else(|| Error::InvalidBlock)?; + + let mut discarded_journals = Vec::new(); + let mut discarded_blocks = Vec::new(); + for (i, overlay) in level.iter().enumerate() { + if i != index { + self.discard_journals( + self.pending_canonicalizations.len() + 1, + &mut discarded_journals, + &mut discarded_blocks, + &overlay.hash, + ); + } + discarded_journals.push(overlay.journal_key.clone()); + discarded_blocks.push(overlay.hash.clone()); + } + + // get the one we need to canonicalize + let overlay = &level[index]; + commit + .data + .inserted + .extend(overlay.inserted.iter().map(|k| { + ( + k.clone(), + self.values + .get(k) + .expect("For each key in overlays there's a value in values") + .1 + .clone(), + ) + })); + commit.data.deleted.extend(overlay.deleted.clone()); + + commit.meta.deleted.append(&mut discarded_journals); + let canonicalized = ( + hash.clone(), + self.front_block_number() + self.pending_canonicalizations.len() as u64, + ); + commit + .meta + .inserted + .push((to_meta_key(LAST_CANONICAL, &()), canonicalized.encode())); + trace!(target: "state-db", "Discarding {} records", commit.meta.deleted.len()); + self.pending_canonicalizations.push(hash.clone()); + Ok(()) + } + + fn apply_canonicalizations(&mut self) { + let last = self.pending_canonicalizations.last().cloned(); + let count = self.pending_canonicalizations.len() as u64; + for hash in self.pending_canonicalizations.drain(..) { + trace!(target: "state-db", "Post canonicalizing {:?}", hash); + let level = self + .levels + .pop_front() + .expect("Hash validity is checked in `canonicalize`"); + let index = level + .iter() + .position(|overlay| overlay.hash == hash) + .expect("Hash validity is checked in `canonicalize`"); + + // discard unfinalized overlays and values + for (i, overlay) in level.into_iter().enumerate() { + if i != index { + discard_descendants( + &mut self.levels, + &mut self.values, + 0, + &mut self.parents, + &self.pinned, + &mut self.pinned_insertions, + &overlay.hash, + ); + } + if self.pinned.contains_key(&overlay.hash) { + self.pinned_insertions + .insert(overlay.hash.clone(), overlay.inserted); + } else { + self.parents.remove(&overlay.hash); + discard_values(&mut self.values, overlay.inserted); + } + } + } + if let Some(hash) = last { + let last_canonicalized = ( + hash, + self.last_canonicalized + .as_ref() + .map(|(_, n)| n + count) + .unwrap_or(count - 1), + ); + self.last_canonicalized = Some(last_canonicalized); + } + } + + /// Get a value from the node overlay. This searches in every existing changeset. + pub fn get(&self, key: &Key) -> Option { + if let Some((_, value)) = self.values.get(&key) { + return Some(value.clone()); + } + None + } + + /// Check if the block is in the canonicalization queue. + pub fn have_block(&self, hash: &BlockHash) -> bool { + (self.parents.contains_key(hash) || self.pending_insertions.contains(hash)) + && !self.pending_canonicalizations.contains(hash) + } + + /// Revert a single level. Returns commit set that deletes the journal or `None` if not possible. + pub fn revert_one(&mut self) -> Option> { + self.levels.pop_back().map(|level| { + let mut commit = CommitSet::default(); + for overlay in level.into_iter() { + commit.meta.deleted.push(overlay.journal_key); + self.parents.remove(&overlay.hash); + discard_values(&mut self.values, overlay.inserted); + } + commit + }) + } + + fn revert_insertions(&mut self) { + self.pending_insertions.reverse(); + for hash in self.pending_insertions.drain(..) { + self.parents.remove(&hash); + // find a level. When iterating insertions backwards the hash is always last in the level. + let level_index = self + .levels + .iter() + .position(|level| { + level + .last() + .expect("Hash is added in `insert` in reverse order") + .hash + == hash + }) + .expect("Hash is added in insert"); + + let overlay = self.levels[level_index] + .pop() + .expect("Empty levels are not allowed in self.levels"); + discard_values(&mut self.values, overlay.inserted); + if self.levels[level_index].is_empty() { + debug_assert_eq!(level_index, self.levels.len() - 1); + self.levels.pop_back(); + } + } + } + + /// Apply all pending changes + pub fn apply_pending(&mut self) { + self.apply_canonicalizations(); + self.pending_insertions.clear(); + } + + /// Revert all pending changes + pub fn revert_pending(&mut self) { + self.pending_canonicalizations.clear(); + self.revert_insertions(); + } + + /// Pin state values in memory + pub fn pin(&mut self, hash: &BlockHash) { + if self.pending_insertions.contains(hash) { + debug_assert!(false, "Trying to pin pending state"); + return; + } + // Also pin all parents + let mut parent = Some(hash); + while let Some(hash) = parent { + let refs = self.pinned.entry(hash.clone()).or_default(); + if *refs == 0 { + trace!(target: "state-db-pin", "Pinned non-canon block: {:?}", hash); + } + *refs += 1; + parent = self.parents.get(hash); + } + } + + /// Discard pinned state + pub fn unpin(&mut self, hash: &BlockHash) { + // Also unpin all parents + let mut parent = Some(hash.clone()); + while let Some(hash) = parent { + parent = self.parents.get(&hash).cloned(); + match self.pinned.entry(hash.clone()) { + Entry::Occupied(mut entry) => { + *entry.get_mut() -= 1; + if *entry.get() == 0 { + entry.remove(); + if let Some(inserted) = self.pinned_insertions.remove(&hash) { + trace!(target: "state-db-pin", "Discarding unpinned non-canon block: {:?}", hash); + discard_values(&mut self.values, inserted); + self.parents.remove(&hash); + } + } + } + Entry::Vacant(_) => {} + } + } + } } #[cfg(test)] mod tests { - use std::io; - use sp_core::H256; - use super::{NonCanonicalOverlay, to_journal_key}; - use crate::{ChangeSet, CommitSet}; - use crate::test::{make_db, make_changeset}; - - fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { - overlay.get(&H256::from_low_u64_be(key)) == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) - } - - #[test] - fn created_from_empty_db() { - let db = make_db(&[]); - let overlay: NonCanonicalOverlay = NonCanonicalOverlay::new(&db).unwrap(); - assert_eq!(overlay.last_canonicalized, None); - assert!(overlay.levels.is_empty()); - assert!(overlay.parents.is_empty()); - } - - #[test] - #[should_panic] - fn canonicalize_empty_panics() { - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let mut commit = CommitSet::default(); - overlay.canonicalize::(&H256::default(), &mut commit).unwrap(); - } - - #[test] - #[should_panic] - fn insert_ahead_panics() { - let db = make_db(&[]); - let h1 = H256::random(); - let h2 = H256::random(); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 2, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 1, &h1, ChangeSet::default()).unwrap(); - } - - #[test] - #[should_panic] - fn insert_behind_panics() { - let h1 = H256::random(); - let h2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 3, &h1, ChangeSet::default()).unwrap(); - } - - #[test] - #[should_panic] - fn insert_unknown_parent_panics() { - let db = make_db(&[]); - let h1 = H256::random(); - let h2 = H256::random(); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - overlay.insert::(&h2, 2, &H256::default(), ChangeSet::default()).unwrap(); - } - - #[test] - #[should_panic] - fn canonicalize_unknown_panics() { - let h1 = H256::random(); - let h2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - overlay.insert::(&h1, 1, &H256::default(), ChangeSet::default()).unwrap(); - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); - } - - #[test] - fn insert_canonicalize_one() { - let h1 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[3, 4], &[2]); - let insertion = overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap(); - assert_eq!(insertion.data.inserted.len(), 0); - assert_eq!(insertion.data.deleted.len(), 0); - assert_eq!(insertion.meta.inserted.len(), 2); - assert_eq!(insertion.meta.deleted.len(), 0); - db.commit(&insertion); - let mut finalization = CommitSet::default(); - overlay.canonicalize::(&h1, &mut finalization).unwrap(); - assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); - assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); - assert_eq!(finalization.meta.inserted.len(), 1); - assert_eq!(finalization.meta.deleted.len(), 1); - db.commit(&finalization); - assert!(db.data_eq(&make_db(&[1, 3, 4]))); - } - - #[test] - fn restore_from_journal() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - assert_eq!(db.meta.len(), 3); - - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - } - - #[test] - fn restore_from_journal_after_canonicalize() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])).unwrap()); - db.commit(&overlay.insert::(&h2, 11, &h1, make_changeset(&[5], &[3])).unwrap()); - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h1, &mut commit).unwrap(); - db.commit(&commit); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - } - - #[test] - fn insert_canonicalize_two() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2, 3, 4]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); - assert!(contains(&overlay, 5)); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); - assert!(contains(&overlay, 7)); - assert!(contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 2); - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h1, &mut commit).unwrap(); - db.commit(&commit); - assert!(contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 2); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 1); - assert!(!contains(&overlay, 5)); - assert!(contains(&overlay, 7)); - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); - db.commit(&commit); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); - } - - #[test] - fn insert_same_key() { - let mut db = make_db(&[]); - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); - assert!(contains(&overlay, 1)); - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_1, &mut commit).unwrap(); - db.commit(&commit); - assert!(contains(&overlay, 1)); - overlay.apply_pending(); - assert!(!contains(&overlay, 1)); - } - - #[test] - fn insert_with_pending_canonicalization() { - let h1 = H256::random(); - let h2 = H256::random(); - let h3 = H256::random(); - let mut db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset = make_changeset(&[], &[]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset.clone()).unwrap()); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset.clone()).unwrap()); - overlay.apply_pending(); - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h1, &mut commit).unwrap(); - overlay.canonicalize::(&h2, &mut commit).unwrap(); - db.commit(&commit); - db.commit(&overlay.insert::(&h3, 3, &h2, changeset.clone()).unwrap()); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - } - - #[test] - fn complex_tree() { - use crate::MetaDb; - let mut db = make_db(&[]); - - // - 1 - 1_1 - 1_1_1 - // \ 1_2 - 1_2_1 - // \ 1_2_2 - // \ 1_2_3 - // - // - 2 - 2_1 - 2_1_1 - // \ 2_2 - // - // 1_2_2 is the winner - - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); - - let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); - let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); - let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); - let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); - - let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); - let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); - let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); - let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); - let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - - db.commit(&overlay.insert::(&h_1_1, 2, &h_1, c_1_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2, 2, &h_1, c_1_2).unwrap()); - - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); - - db.commit(&overlay.insert::(&h_2_1, 2, &h_2, c_2_1).unwrap()); - db.commit(&overlay.insert::(&h_2_2, 2, &h_2, c_2_2).unwrap()); - - db.commit(&overlay.insert::(&h_1_1_1, 3, &h_1_1, c_1_1_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2_1, 3, &h_1_2, c_1_2_1).unwrap()); - db.commit(&overlay.insert::(&h_1_2_2, 3, &h_1_2, c_1_2_2).unwrap()); - db.commit(&overlay.insert::(&h_1_2_3, 3, &h_1_2, c_1_2_3).unwrap()); - db.commit(&overlay.insert::(&h_2_1_1, 3, &h_2_1, c_2_1_1).unwrap()); - - assert!(contains(&overlay, 2)); - assert!(contains(&overlay, 11)); - assert!(contains(&overlay, 21)); - assert!(contains(&overlay, 111)); - assert!(contains(&overlay, 122)); - assert!(contains(&overlay, 211)); - assert_eq!(overlay.levels.len(), 3); - assert_eq!(overlay.parents.len(), 11); - assert_eq!(overlay.last_canonicalized, Some((H256::default(), 0))); - - // check if restoration from journal results in the same tree - let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); - assert_eq!(overlay.levels, overlay2.levels); - assert_eq!(overlay.parents, overlay2.parents); - assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); - - // canonicalize 1. 2 and all its children should be discarded - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_1, &mut commit).unwrap(); - db.commit(&commit); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 6); - assert!(!contains(&overlay, 1)); - assert!(!contains(&overlay, 2)); - assert!(!contains(&overlay, 21)); - assert!(!contains(&overlay, 22)); - assert!(!contains(&overlay, 211)); - assert!(contains(&overlay, 111)); - assert!(!contains(&overlay, 211)); - // check that journals are deleted - assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); - assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); - assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); - - // canonicalize 1_2. 1_1 and all its children should be discarded - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_1_2, &mut commit).unwrap(); - db.commit(&commit); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 1); - assert_eq!(overlay.parents.len(), 3); - assert!(!contains(&overlay, 11)); - assert!(!contains(&overlay, 111)); - assert!(contains(&overlay, 121)); - assert!(contains(&overlay, 122)); - assert!(contains(&overlay, 123)); - assert!(overlay.have_block(&h_1_2_1)); - assert!(!overlay.have_block(&h_1_2)); - assert!(!overlay.have_block(&h_1_1)); - assert!(!overlay.have_block(&h_1_1_1)); - - // canonicalize 1_2_2 - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_1_2_2, &mut commit).unwrap(); - db.commit(&commit); - overlay.apply_pending(); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(db.data_eq(&make_db(&[1, 12, 122]))); - assert_eq!(overlay.last_canonicalized, Some((h_1_2_2, 3))); - } - - #[test] - fn insert_revert() { - let h1 = H256::random(); - let h2 = H256::random(); - let mut db = make_db(&[1, 2, 3, 4]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - assert!(overlay.revert_one().is_none()); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - db.commit(&overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap()); - db.commit(&overlay.insert::(&h2, 2, &h1, changeset2).unwrap()); - assert!(contains(&overlay, 7)); - db.commit(&overlay.revert_one().unwrap()); - assert_eq!(overlay.parents.len(), 1); - assert!(contains(&overlay, 5)); - assert!(!contains(&overlay, 7)); - db.commit(&overlay.revert_one().unwrap()); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - assert!(overlay.revert_one().is_none()); - } - - #[test] - fn revert_pending_insertion() { - let h1 = H256::random(); - let h2_1 = H256::random(); - let h2_2 = H256::random(); - let db = make_db(&[]); - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - let changeset1 = make_changeset(&[5, 6], &[2]); - let changeset2 = make_changeset(&[7, 8], &[5, 3]); - let changeset3 = make_changeset(&[9], &[]); - overlay.insert::(&h1, 1, &H256::default(), changeset1).unwrap(); - assert!(contains(&overlay, 5)); - overlay.insert::(&h2_1, 2, &h1, changeset2).unwrap(); - overlay.insert::(&h2_2, 2, &h1, changeset3).unwrap(); - assert!(contains(&overlay, 7)); - assert!(contains(&overlay, 5)); - assert!(contains(&overlay, 9)); - assert_eq!(overlay.levels.len(), 2); - assert_eq!(overlay.parents.len(), 3); - overlay.revert_pending(); - assert!(!contains(&overlay, 5)); - assert_eq!(overlay.levels.len(), 0); - assert_eq!(overlay.parents.len(), 0); - } - - #[test] - fn keeps_pinned() { - let mut db = make_db(&[]); - - // - 0 - 1_1 - // \ 1_2 - - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); - overlay.apply_pending(); - - overlay.pin(&h_1); - - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_2, &mut commit).unwrap(); - db.commit(&commit); - overlay.apply_pending(); - assert!(contains(&overlay, 1)); - overlay.unpin(&h_1); - assert!(!contains(&overlay, 1)); - } - - #[test] - fn keeps_pinned_ref_count() { - let mut db = make_db(&[]); - - // - 0 - 1_1 - // \ 1_2 - // \ 1_3 - - // 1_1 and 1_2 both make the same change - let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); - let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); - let (h_3, c_3) = (H256::random(), make_changeset(&[], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_1, 1, &H256::default(), c_1).unwrap()); - db.commit(&overlay.insert::(&h_2, 1, &H256::default(), c_2).unwrap()); - db.commit(&overlay.insert::(&h_3, 1, &H256::default(), c_3).unwrap()); - overlay.apply_pending(); - - overlay.pin(&h_1); - - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_3, &mut commit).unwrap(); - db.commit(&commit); - overlay.apply_pending(); // 1_2 should be discarded, 1_1 is pinned - - assert!(contains(&overlay, 1)); - overlay.unpin(&h_1); - assert!(!contains(&overlay, 1)); - } - - #[test] - fn pin_keeps_parent() { - let mut db = make_db(&[]); - - // - 0 - 1_1 - 2_1 - // \ 1_2 - - let (h_11, c_11) = (H256::random(), make_changeset(&[1], &[])); - let (h_12, c_12) = (H256::random(), make_changeset(&[], &[])); - let (h_21, c_21) = (H256::random(), make_changeset(&[], &[])); - - let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); - db.commit(&overlay.insert::(&h_11, 1, &H256::default(), c_11).unwrap()); - db.commit(&overlay.insert::(&h_12, 1, &H256::default(), c_12).unwrap()); - db.commit(&overlay.insert::(&h_21, 2, &h_11, c_21).unwrap()); - overlay.apply_pending(); - - overlay.pin(&h_21); - - let mut commit = CommitSet::default(); - overlay.canonicalize::(&h_12, &mut commit).unwrap(); - db.commit(&commit); - overlay.apply_pending(); // 1_1 and 2_1 should be both pinned - - assert!(contains(&overlay, 1)); - overlay.unpin(&h_21); - assert!(!contains(&overlay, 1)); - assert!(overlay.pinned.is_empty()); - } + use super::{to_journal_key, NonCanonicalOverlay}; + use crate::test::{make_changeset, make_db}; + use crate::{ChangeSet, CommitSet}; + use sp_core::H256; + use std::io; + + fn contains(overlay: &NonCanonicalOverlay, key: u64) -> bool { + overlay.get(&H256::from_low_u64_be(key)) + == Some(H256::from_low_u64_be(key).as_bytes().to_vec()) + } + + #[test] + fn created_from_empty_db() { + let db = make_db(&[]); + let overlay: NonCanonicalOverlay = NonCanonicalOverlay::new(&db).unwrap(); + assert_eq!(overlay.last_canonicalized, None); + assert!(overlay.levels.is_empty()); + assert!(overlay.parents.is_empty()); + } + + #[test] + #[should_panic] + fn canonicalize_empty_panics() { + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let mut commit = CommitSet::default(); + overlay + .canonicalize::(&H256::default(), &mut commit) + .unwrap(); + } + + #[test] + #[should_panic] + fn insert_ahead_panics() { + let db = make_db(&[]); + let h1 = H256::random(); + let h2 = H256::random(); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay + .insert::(&h1, 2, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay + .insert::(&h2, 1, &h1, ChangeSet::default()) + .unwrap(); + } + + #[test] + #[should_panic] + fn insert_behind_panics() { + let h1 = H256::random(); + let h2 = H256::random(); + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay + .insert::(&h2, 3, &h1, ChangeSet::default()) + .unwrap(); + } + + #[test] + #[should_panic] + fn insert_unknown_parent_panics() { + let db = make_db(&[]); + let h1 = H256::random(); + let h2 = H256::random(); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); + overlay + .insert::(&h2, 2, &H256::default(), ChangeSet::default()) + .unwrap(); + } + + #[test] + #[should_panic] + fn canonicalize_unknown_panics() { + let h1 = H256::random(); + let h2 = H256::random(); + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + overlay + .insert::(&h1, 1, &H256::default(), ChangeSet::default()) + .unwrap(); + let mut commit = CommitSet::default(); + overlay.canonicalize::(&h2, &mut commit).unwrap(); + } + + #[test] + fn insert_canonicalize_one() { + let h1 = H256::random(); + let mut db = make_db(&[1, 2]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset = make_changeset(&[3, 4], &[2]); + let insertion = overlay + .insert::(&h1, 1, &H256::default(), changeset.clone()) + .unwrap(); + assert_eq!(insertion.data.inserted.len(), 0); + assert_eq!(insertion.data.deleted.len(), 0); + assert_eq!(insertion.meta.inserted.len(), 2); + assert_eq!(insertion.meta.deleted.len(), 0); + db.commit(&insertion); + let mut finalization = CommitSet::default(); + overlay + .canonicalize::(&h1, &mut finalization) + .unwrap(); + assert_eq!(finalization.data.inserted.len(), changeset.inserted.len()); + assert_eq!(finalization.data.deleted.len(), changeset.deleted.len()); + assert_eq!(finalization.meta.inserted.len(), 1); + assert_eq!(finalization.meta.deleted.len(), 1); + db.commit(&finalization); + assert!(db.data_eq(&make_db(&[1, 3, 4]))); + } + + #[test] + fn restore_from_journal() { + let h1 = H256::random(); + let h2 = H256::random(); + let mut db = make_db(&[1, 2]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h2, 11, &h1, make_changeset(&[5], &[3])) + .unwrap(), + ); + assert_eq!(db.meta.len(), 3); + + let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); + assert_eq!(overlay.levels, overlay2.levels); + assert_eq!(overlay.parents, overlay2.parents); + assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); + } + + #[test] + fn restore_from_journal_after_canonicalize() { + let h1 = H256::random(); + let h2 = H256::random(); + let mut db = make_db(&[1, 2]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h1, 10, &H256::default(), make_changeset(&[3, 4], &[2])) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h2, 11, &h1, make_changeset(&[5], &[3])) + .unwrap(), + ); + let mut commit = CommitSet::default(); + overlay.canonicalize::(&h1, &mut commit).unwrap(); + db.commit(&commit); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + + let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); + assert_eq!(overlay.levels, overlay2.levels); + assert_eq!(overlay.parents, overlay2.parents); + assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); + } + + #[test] + fn insert_canonicalize_two() { + let h1 = H256::random(); + let h2 = H256::random(); + let mut db = make_db(&[1, 2, 3, 4]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + db.commit( + &overlay + .insert::(&h1, 1, &H256::default(), changeset1) + .unwrap(), + ); + assert!(contains(&overlay, 5)); + db.commit( + &overlay + .insert::(&h2, 2, &h1, changeset2) + .unwrap(), + ); + assert!(contains(&overlay, 7)); + assert!(contains(&overlay, 5)); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 2); + let mut commit = CommitSet::default(); + overlay.canonicalize::(&h1, &mut commit).unwrap(); + db.commit(&commit); + assert!(contains(&overlay, 5)); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 2); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + assert_eq!(overlay.parents.len(), 1); + assert!(!contains(&overlay, 5)); + assert!(contains(&overlay, 7)); + let mut commit = CommitSet::default(); + overlay.canonicalize::(&h2, &mut commit).unwrap(); + db.commit(&commit); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + assert!(db.data_eq(&make_db(&[1, 4, 6, 7, 8]))); + } + + #[test] + fn insert_same_key() { + let mut db = make_db(&[]); + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); + + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h_1, 1, &H256::default(), c_1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_2, 1, &H256::default(), c_2) + .unwrap(), + ); + assert!(contains(&overlay, 1)); + let mut commit = CommitSet::default(); + overlay + .canonicalize::(&h_1, &mut commit) + .unwrap(); + db.commit(&commit); + assert!(contains(&overlay, 1)); + overlay.apply_pending(); + assert!(!contains(&overlay, 1)); + } + + #[test] + fn insert_with_pending_canonicalization() { + let h1 = H256::random(); + let h2 = H256::random(); + let h3 = H256::random(); + let mut db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset = make_changeset(&[], &[]); + db.commit( + &overlay + .insert::(&h1, 1, &H256::default(), changeset.clone()) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h2, 2, &h1, changeset.clone()) + .unwrap(), + ); + overlay.apply_pending(); + let mut commit = CommitSet::default(); + overlay.canonicalize::(&h1, &mut commit).unwrap(); + overlay.canonicalize::(&h2, &mut commit).unwrap(); + db.commit(&commit); + db.commit( + &overlay + .insert::(&h3, 3, &h2, changeset.clone()) + .unwrap(), + ); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + } + + #[test] + fn complex_tree() { + use crate::MetaDb; + let mut db = make_db(&[]); + + // - 1 - 1_1 - 1_1_1 + // \ 1_2 - 1_2_1 + // \ 1_2_2 + // \ 1_2_3 + // + // - 2 - 2_1 - 2_1_1 + // \ 2_2 + // + // 1_2_2 is the winner + + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); + + let (h_1_1, c_1_1) = (H256::random(), make_changeset(&[11], &[])); + let (h_1_2, c_1_2) = (H256::random(), make_changeset(&[12], &[])); + let (h_2_1, c_2_1) = (H256::random(), make_changeset(&[21], &[])); + let (h_2_2, c_2_2) = (H256::random(), make_changeset(&[22], &[])); + + let (h_1_1_1, c_1_1_1) = (H256::random(), make_changeset(&[111], &[])); + let (h_1_2_1, c_1_2_1) = (H256::random(), make_changeset(&[121], &[])); + let (h_1_2_2, c_1_2_2) = (H256::random(), make_changeset(&[122], &[])); + let (h_1_2_3, c_1_2_3) = (H256::random(), make_changeset(&[123], &[])); + let (h_2_1_1, c_2_1_1) = (H256::random(), make_changeset(&[211], &[])); + + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h_1, 1, &H256::default(), c_1) + .unwrap(), + ); + + db.commit(&overlay.insert::(&h_1_1, 2, &h_1, c_1_1).unwrap()); + db.commit(&overlay.insert::(&h_1_2, 2, &h_1, c_1_2).unwrap()); + + db.commit( + &overlay + .insert::(&h_2, 1, &H256::default(), c_2) + .unwrap(), + ); + + db.commit(&overlay.insert::(&h_2_1, 2, &h_2, c_2_1).unwrap()); + db.commit(&overlay.insert::(&h_2_2, 2, &h_2, c_2_2).unwrap()); + + db.commit( + &overlay + .insert::(&h_1_1_1, 3, &h_1_1, c_1_1_1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_1_2_1, 3, &h_1_2, c_1_2_1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_1_2_2, 3, &h_1_2, c_1_2_2) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_1_2_3, 3, &h_1_2, c_1_2_3) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_2_1_1, 3, &h_2_1, c_2_1_1) + .unwrap(), + ); + + assert!(contains(&overlay, 2)); + assert!(contains(&overlay, 11)); + assert!(contains(&overlay, 21)); + assert!(contains(&overlay, 111)); + assert!(contains(&overlay, 122)); + assert!(contains(&overlay, 211)); + assert_eq!(overlay.levels.len(), 3); + assert_eq!(overlay.parents.len(), 11); + assert_eq!(overlay.last_canonicalized, Some((H256::default(), 0))); + + // check if restoration from journal results in the same tree + let overlay2 = NonCanonicalOverlay::::new(&db).unwrap(); + assert_eq!(overlay.levels, overlay2.levels); + assert_eq!(overlay.parents, overlay2.parents); + assert_eq!(overlay.last_canonicalized, overlay2.last_canonicalized); + + // canonicalize 1. 2 and all its children should be discarded + let mut commit = CommitSet::default(); + overlay + .canonicalize::(&h_1, &mut commit) + .unwrap(); + db.commit(&commit); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 6); + assert!(!contains(&overlay, 1)); + assert!(!contains(&overlay, 2)); + assert!(!contains(&overlay, 21)); + assert!(!contains(&overlay, 22)); + assert!(!contains(&overlay, 211)); + assert!(contains(&overlay, 111)); + assert!(!contains(&overlay, 211)); + // check that journals are deleted + assert!(db.get_meta(&to_journal_key(1, 0)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(1, 1)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(2, 1)).unwrap().is_some()); + assert!(db.get_meta(&to_journal_key(2, 2)).unwrap().is_none()); + assert!(db.get_meta(&to_journal_key(2, 3)).unwrap().is_none()); + + // canonicalize 1_2. 1_1 and all its children should be discarded + let mut commit = CommitSet::default(); + overlay + .canonicalize::(&h_1_2, &mut commit) + .unwrap(); + db.commit(&commit); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 1); + assert_eq!(overlay.parents.len(), 3); + assert!(!contains(&overlay, 11)); + assert!(!contains(&overlay, 111)); + assert!(contains(&overlay, 121)); + assert!(contains(&overlay, 122)); + assert!(contains(&overlay, 123)); + assert!(overlay.have_block(&h_1_2_1)); + assert!(!overlay.have_block(&h_1_2)); + assert!(!overlay.have_block(&h_1_1)); + assert!(!overlay.have_block(&h_1_1_1)); + + // canonicalize 1_2_2 + let mut commit = CommitSet::default(); + overlay + .canonicalize::(&h_1_2_2, &mut commit) + .unwrap(); + db.commit(&commit); + overlay.apply_pending(); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + assert!(db.data_eq(&make_db(&[1, 12, 122]))); + assert_eq!(overlay.last_canonicalized, Some((h_1_2_2, 3))); + } + + #[test] + fn insert_revert() { + let h1 = H256::random(); + let h2 = H256::random(); + let mut db = make_db(&[1, 2, 3, 4]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + assert!(overlay.revert_one().is_none()); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + db.commit( + &overlay + .insert::(&h1, 1, &H256::default(), changeset1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h2, 2, &h1, changeset2) + .unwrap(), + ); + assert!(contains(&overlay, 7)); + db.commit(&overlay.revert_one().unwrap()); + assert_eq!(overlay.parents.len(), 1); + assert!(contains(&overlay, 5)); + assert!(!contains(&overlay, 7)); + db.commit(&overlay.revert_one().unwrap()); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + assert!(overlay.revert_one().is_none()); + } + + #[test] + fn revert_pending_insertion() { + let h1 = H256::random(); + let h2_1 = H256::random(); + let h2_2 = H256::random(); + let db = make_db(&[]); + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + let changeset1 = make_changeset(&[5, 6], &[2]); + let changeset2 = make_changeset(&[7, 8], &[5, 3]); + let changeset3 = make_changeset(&[9], &[]); + overlay + .insert::(&h1, 1, &H256::default(), changeset1) + .unwrap(); + assert!(contains(&overlay, 5)); + overlay + .insert::(&h2_1, 2, &h1, changeset2) + .unwrap(); + overlay + .insert::(&h2_2, 2, &h1, changeset3) + .unwrap(); + assert!(contains(&overlay, 7)); + assert!(contains(&overlay, 5)); + assert!(contains(&overlay, 9)); + assert_eq!(overlay.levels.len(), 2); + assert_eq!(overlay.parents.len(), 3); + overlay.revert_pending(); + assert!(!contains(&overlay, 5)); + assert_eq!(overlay.levels.len(), 0); + assert_eq!(overlay.parents.len(), 0); + } + + #[test] + fn keeps_pinned() { + let mut db = make_db(&[]); + + // - 0 - 1_1 + // \ 1_2 + + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[2], &[])); + + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h_1, 1, &H256::default(), c_1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_2, 1, &H256::default(), c_2) + .unwrap(), + ); + overlay.apply_pending(); + + overlay.pin(&h_1); + + let mut commit = CommitSet::default(); + overlay + .canonicalize::(&h_2, &mut commit) + .unwrap(); + db.commit(&commit); + overlay.apply_pending(); + assert!(contains(&overlay, 1)); + overlay.unpin(&h_1); + assert!(!contains(&overlay, 1)); + } + + #[test] + fn keeps_pinned_ref_count() { + let mut db = make_db(&[]); + + // - 0 - 1_1 + // \ 1_2 + // \ 1_3 + + // 1_1 and 1_2 both make the same change + let (h_1, c_1) = (H256::random(), make_changeset(&[1], &[])); + let (h_2, c_2) = (H256::random(), make_changeset(&[1], &[])); + let (h_3, c_3) = (H256::random(), make_changeset(&[], &[])); + + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h_1, 1, &H256::default(), c_1) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_2, 1, &H256::default(), c_2) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_3, 1, &H256::default(), c_3) + .unwrap(), + ); + overlay.apply_pending(); + + overlay.pin(&h_1); + + let mut commit = CommitSet::default(); + overlay + .canonicalize::(&h_3, &mut commit) + .unwrap(); + db.commit(&commit); + overlay.apply_pending(); // 1_2 should be discarded, 1_1 is pinned + + assert!(contains(&overlay, 1)); + overlay.unpin(&h_1); + assert!(!contains(&overlay, 1)); + } + + #[test] + fn pin_keeps_parent() { + let mut db = make_db(&[]); + + // - 0 - 1_1 - 2_1 + // \ 1_2 + + let (h_11, c_11) = (H256::random(), make_changeset(&[1], &[])); + let (h_12, c_12) = (H256::random(), make_changeset(&[], &[])); + let (h_21, c_21) = (H256::random(), make_changeset(&[], &[])); + + let mut overlay = NonCanonicalOverlay::::new(&db).unwrap(); + db.commit( + &overlay + .insert::(&h_11, 1, &H256::default(), c_11) + .unwrap(), + ); + db.commit( + &overlay + .insert::(&h_12, 1, &H256::default(), c_12) + .unwrap(), + ); + db.commit(&overlay.insert::(&h_21, 2, &h_11, c_21).unwrap()); + overlay.apply_pending(); + + overlay.pin(&h_21); + + let mut commit = CommitSet::default(); + overlay + .canonicalize::(&h_12, &mut commit) + .unwrap(); + db.commit(&commit); + overlay.apply_pending(); // 1_1 and 2_1 should be both pinned + + assert!(contains(&overlay, 1)); + overlay.unpin(&h_21); + assert!(!contains(&overlay, 1)); + assert!(overlay.pinned.is_empty()); + } } diff --git a/client/state-db/src/pruning.rs b/client/state-db/src/pruning.rs index 6cf5f26006..1a894952ce 100644 --- a/client/state-db/src/pruning.rs +++ b/client/state-db/src/pruning.rs @@ -22,10 +22,10 @@ //! the death list. //! The changes are journaled in the DB. -use std::collections::{HashMap, HashSet, VecDeque}; -use codec::{Encode, Decode}; -use crate::{CommitSet, Error, MetaDb, to_meta_key, Hash}; +use crate::{to_meta_key, CommitSet, Error, Hash, MetaDb}; +use codec::{Decode, Encode}; use log::{trace, warn}; +use std::collections::{HashMap, HashSet, VecDeque}; const LAST_PRUNED: &[u8] = b"last_pruned"; const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; @@ -33,348 +33,386 @@ const PRUNING_JOURNAL: &[u8] = b"pruning_journal"; /// See module documentation. #[derive(parity_util_mem_derive::MallocSizeOf)] pub struct RefWindow { - /// A queue of keys that should be deleted for each block in the pruning window. - death_rows: VecDeque>, - /// An index that maps each key from `death_rows` to block number. - death_index: HashMap, - /// Block number that corresponds to the front of `death_rows`. - pending_number: u64, - /// Number of call of `note_canonical` after - /// last call `apply_pending` or `revert_pending` - pending_canonicalizations: usize, - /// Number of calls of `prune_one` after - /// last call `apply_pending` or `revert_pending` - pending_prunings: usize, + /// A queue of keys that should be deleted for each block in the pruning window. + death_rows: VecDeque>, + /// An index that maps each key from `death_rows` to block number. + death_index: HashMap, + /// Block number that corresponds to the front of `death_rows`. + pending_number: u64, + /// Number of call of `note_canonical` after + /// last call `apply_pending` or `revert_pending` + pending_canonicalizations: usize, + /// Number of calls of `prune_one` after + /// last call `apply_pending` or `revert_pending` + pending_prunings: usize, } #[derive(Debug, PartialEq, Eq, parity_util_mem_derive::MallocSizeOf)] struct DeathRow { - hash: BlockHash, - journal_key: Vec, - deleted: HashSet, + hash: BlockHash, + journal_key: Vec, + deleted: HashSet, } #[derive(Encode, Decode)] struct JournalRecord { - hash: BlockHash, - inserted: Vec, - deleted: Vec, + hash: BlockHash, + inserted: Vec, + deleted: Vec, } fn to_journal_key(block: u64) -> Vec { - to_meta_key(PRUNING_JOURNAL, &block) + to_meta_key(PRUNING_JOURNAL, &block) } impl RefWindow { - pub fn new(db: &D) -> Result, Error> { - let last_pruned = db.get_meta(&to_meta_key(LAST_PRUNED, &())) - .map_err(|e| Error::Db(e))?; - let pending_number: u64 = match last_pruned { - Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, - None => 0, - }; - let mut block = pending_number; - let mut pruning = RefWindow { - death_rows: Default::default(), - death_index: Default::default(), - pending_number: pending_number, - pending_canonicalizations: 0, - pending_prunings: 0, - }; - // read the journal - trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); - loop { - let journal_key = to_journal_key(block); - match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { - Some(record) => { - let record: JournalRecord = Decode::decode(&mut record.as_slice())?; - trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); - pruning.import(&record.hash, journal_key, record.inserted.into_iter(), record.deleted); - }, - None => break, - } - block += 1; - } - Ok(pruning) - } - - fn import>(&mut self, hash: &BlockHash, journal_key: Vec, inserted: I, deleted: Vec) { - // remove all re-inserted keys from death rows - for k in inserted { - if let Some(block) = self.death_index.remove(&k) { - self.death_rows[(block - self.pending_number) as usize].deleted.remove(&k); - } - } - - // add new keys - let imported_block = self.pending_number + self.death_rows.len() as u64; - for k in deleted.iter() { - self.death_index.insert(k.clone(), imported_block); - } - self.death_rows.push_back( - DeathRow { - hash: hash.clone(), - deleted: deleted.into_iter().collect(), - journal_key: journal_key, - } - ); - } - - pub fn window_size(&self) -> u64 { - (self.death_rows.len() - self.pending_prunings) as u64 - } - - pub fn next_hash(&self) -> Option { - self.death_rows.get(self.pending_prunings).map(|r| r.hash.clone()) - } - - pub fn mem_used(&self) -> usize { - 0 - } - - pub fn pending(&self) -> u64 { - self.pending_number + self.pending_prunings as u64 - } - - pub fn have_block(&self, hash: &BlockHash) -> bool { - self.death_rows.iter().skip(self.pending_prunings).any(|r| r.hash == *hash) - } - - /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. - pub fn prune_one(&mut self, commit: &mut CommitSet) { - if let Some(pruned) = self.death_rows.get(self.pending_prunings) { - trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - let index = self.pending_number + self.pending_prunings as u64; - commit.data.deleted.extend(pruned.deleted.iter().cloned()); - commit.meta.inserted.push((to_meta_key(LAST_PRUNED, &()), index.encode())); - commit.meta.deleted.push(pruned.journal_key.clone()); - self.pending_prunings += 1; - } else { - warn!(target: "state-db", "Trying to prune when there's nothing to prune"); - } - } - - /// Add a change set to the window. Creates a journal record and pushes it to `commit` - pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { - trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); - let inserted = commit.data.inserted.iter().map(|(k, _)| k.clone()).collect(); - let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); - let journal_record = JournalRecord { - hash: hash.clone(), - inserted, - deleted, - }; - let block = self.pending_number + self.death_rows.len() as u64; - let journal_key = to_journal_key(block); - commit.meta.inserted.push((journal_key.clone(), journal_record.encode())); - self.import(&journal_record.hash, journal_key, journal_record.inserted.into_iter(), journal_record.deleted); - self.pending_canonicalizations += 1; - } - - /// Apply all pending changes - pub fn apply_pending(&mut self) { - self.pending_canonicalizations = 0; - for _ in 0 .. self.pending_prunings { - let pruned = self.death_rows.pop_front().expect("pending_prunings is always < death_rows.len()"); - trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); - for k in pruned.deleted.iter() { - self.death_index.remove(&k); - } - self.pending_number += 1; - } - self.pending_prunings = 0; - } - - /// Revert all pending changes - pub fn revert_pending(&mut self) { - // Revert pending deletions. - // Note that pending insertions might cause some existing deletions to be removed from `death_index` - // We don't bother to track and revert that for now. This means that a few nodes might end up no being - // deleted in case transaction fails and `revert_pending` is called. - self.death_rows.truncate(self.death_rows.len() - self.pending_canonicalizations); - let new_max_block = self.death_rows.len() as u64 + self.pending_number; - self.death_index.retain(|_, block| *block < new_max_block); - self.pending_canonicalizations = 0; - self.pending_prunings = 0; - } + pub fn new(db: &D) -> Result, Error> { + let last_pruned = db + .get_meta(&to_meta_key(LAST_PRUNED, &())) + .map_err(|e| Error::Db(e))?; + let pending_number: u64 = match last_pruned { + Some(buffer) => u64::decode(&mut buffer.as_slice())? + 1, + None => 0, + }; + let mut block = pending_number; + let mut pruning = RefWindow { + death_rows: Default::default(), + death_index: Default::default(), + pending_number: pending_number, + pending_canonicalizations: 0, + pending_prunings: 0, + }; + // read the journal + trace!(target: "state-db", "Reading pruning journal. Pending #{}", pending_number); + loop { + let journal_key = to_journal_key(block); + match db.get_meta(&journal_key).map_err(|e| Error::Db(e))? { + Some(record) => { + let record: JournalRecord = + Decode::decode(&mut record.as_slice())?; + trace!(target: "state-db", "Pruning journal entry {} ({} inserted, {} deleted)", block, record.inserted.len(), record.deleted.len()); + pruning.import( + &record.hash, + journal_key, + record.inserted.into_iter(), + record.deleted, + ); + } + None => break, + } + block += 1; + } + Ok(pruning) + } + + fn import>( + &mut self, + hash: &BlockHash, + journal_key: Vec, + inserted: I, + deleted: Vec, + ) { + // remove all re-inserted keys from death rows + for k in inserted { + if let Some(block) = self.death_index.remove(&k) { + self.death_rows[(block - self.pending_number) as usize] + .deleted + .remove(&k); + } + } + + // add new keys + let imported_block = self.pending_number + self.death_rows.len() as u64; + for k in deleted.iter() { + self.death_index.insert(k.clone(), imported_block); + } + self.death_rows.push_back(DeathRow { + hash: hash.clone(), + deleted: deleted.into_iter().collect(), + journal_key: journal_key, + }); + } + + pub fn window_size(&self) -> u64 { + (self.death_rows.len() - self.pending_prunings) as u64 + } + + pub fn next_hash(&self) -> Option { + self.death_rows + .get(self.pending_prunings) + .map(|r| r.hash.clone()) + } + + pub fn mem_used(&self) -> usize { + 0 + } + + pub fn pending(&self) -> u64 { + self.pending_number + self.pending_prunings as u64 + } + + pub fn have_block(&self, hash: &BlockHash) -> bool { + self.death_rows + .iter() + .skip(self.pending_prunings) + .any(|r| r.hash == *hash) + } + + /// Prune next block. Expects at least one block in the window. Adds changes to `commit`. + pub fn prune_one(&mut self, commit: &mut CommitSet) { + if let Some(pruned) = self.death_rows.get(self.pending_prunings) { + trace!(target: "state-db", "Pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); + let index = self.pending_number + self.pending_prunings as u64; + commit.data.deleted.extend(pruned.deleted.iter().cloned()); + commit + .meta + .inserted + .push((to_meta_key(LAST_PRUNED, &()), index.encode())); + commit.meta.deleted.push(pruned.journal_key.clone()); + self.pending_prunings += 1; + } else { + warn!(target: "state-db", "Trying to prune when there's nothing to prune"); + } + } + + /// Add a change set to the window. Creates a journal record and pushes it to `commit` + pub fn note_canonical(&mut self, hash: &BlockHash, commit: &mut CommitSet) { + trace!(target: "state-db", "Adding to pruning window: {:?} ({} inserted, {} deleted)", hash, commit.data.inserted.len(), commit.data.deleted.len()); + let inserted = commit + .data + .inserted + .iter() + .map(|(k, _)| k.clone()) + .collect(); + let deleted = ::std::mem::replace(&mut commit.data.deleted, Vec::new()); + let journal_record = JournalRecord { + hash: hash.clone(), + inserted, + deleted, + }; + let block = self.pending_number + self.death_rows.len() as u64; + let journal_key = to_journal_key(block); + commit + .meta + .inserted + .push((journal_key.clone(), journal_record.encode())); + self.import( + &journal_record.hash, + journal_key, + journal_record.inserted.into_iter(), + journal_record.deleted, + ); + self.pending_canonicalizations += 1; + } + + /// Apply all pending changes + pub fn apply_pending(&mut self) { + self.pending_canonicalizations = 0; + for _ in 0..self.pending_prunings { + let pruned = self + .death_rows + .pop_front() + .expect("pending_prunings is always < death_rows.len()"); + trace!(target: "state-db", "Applying pruning {:?} ({} deleted)", pruned.hash, pruned.deleted.len()); + for k in pruned.deleted.iter() { + self.death_index.remove(&k); + } + self.pending_number += 1; + } + self.pending_prunings = 0; + } + + /// Revert all pending changes + pub fn revert_pending(&mut self) { + // Revert pending deletions. + // Note that pending insertions might cause some existing deletions to be removed from `death_index` + // We don't bother to track and revert that for now. This means that a few nodes might end up no being + // deleted in case transaction fails and `revert_pending` is called. + self.death_rows + .truncate(self.death_rows.len() - self.pending_canonicalizations); + let new_max_block = self.death_rows.len() as u64 + self.pending_number; + self.death_index.retain(|_, block| *block < new_max_block); + self.pending_canonicalizations = 0; + self.pending_prunings = 0; + } } #[cfg(test)] mod tests { - use super::RefWindow; - use sp_core::H256; - use crate::CommitSet; - use crate::test::{make_db, make_commit, TestDb}; - - fn check_journal(pruning: &RefWindow, db: &TestDb) { - let restored: RefWindow = RefWindow::new(db).unwrap(); - assert_eq!(pruning.pending_number, restored.pending_number); - assert_eq!(pruning.death_rows, restored.death_rows); - assert_eq!(pruning.death_index, restored.death_index); - } - - #[test] - fn created_from_empty_db() { - let db = make_db(&[]); - let pruning: RefWindow = RefWindow::new(&db).unwrap(); - assert_eq!(pruning.pending_number, 0); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); - } - - #[test] - fn prune_empty() { - let db = make_db(&[]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - assert_eq!(pruning.pending_number, 0); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); - assert!(pruning.pending_prunings == 0); - assert!(pruning.pending_canonicalizations == 0); - } - - #[test] - fn prune_one() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[4, 5], &[1, 3]); - let h = H256::random(); - pruning.note_canonical(&h, &mut commit); - db.commit(&commit); - assert!(pruning.have_block(&h)); - pruning.apply_pending(); - assert!(pruning.have_block(&h)); - assert!(commit.data.deleted.is_empty()); - assert_eq!(pruning.death_rows.len(), 1); - assert_eq!(pruning.death_index.len(), 2); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - assert!(!pruning.have_block(&h)); - db.commit(&commit); - pruning.apply_pending(); - assert!(!pruning.have_block(&h)); - assert!(db.data_eq(&make_db(&[2, 4, 5]))); - assert!(pruning.death_rows.is_empty()); - assert!(pruning.death_index.is_empty()); - assert_eq!(pruning.pending_number, 1); - } - - #[test] - fn prune_two() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - pruning.apply_pending(); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - pruning.apply_pending(); - assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - pruning.apply_pending(); - assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.pending_number, 2); - } - - #[test] - fn prune_two_pending() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[4], &[1]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[5], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - pruning.apply_pending(); - assert!(db.data_eq(&make_db(&[3, 4, 5]))); - assert_eq!(pruning.pending_number, 2); - } - - #[test] - fn reinserted_survives() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.apply_pending(); - - check_journal(&pruning, &db); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 3]))); - pruning.apply_pending(); - assert_eq!(pruning.pending_number, 3); - } - - #[test] - fn reinserted_survive_pending() { - let mut db = make_db(&[1, 2, 3]); - let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[2], &[]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - let mut commit = make_commit(&[], &[2]); - pruning.note_canonical(&H256::random(), &mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - let mut commit = CommitSet::default(); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 2, 3]))); - pruning.prune_one(&mut commit); - db.commit(&commit); - assert!(db.data_eq(&make_db(&[1, 3]))); - pruning.apply_pending(); - assert_eq!(pruning.pending_number, 3); - } + use super::RefWindow; + use crate::test::{make_commit, make_db, TestDb}; + use crate::CommitSet; + use sp_core::H256; + + fn check_journal(pruning: &RefWindow, db: &TestDb) { + let restored: RefWindow = RefWindow::new(db).unwrap(); + assert_eq!(pruning.pending_number, restored.pending_number); + assert_eq!(pruning.death_rows, restored.death_rows); + assert_eq!(pruning.death_index, restored.death_index); + } + + #[test] + fn created_from_empty_db() { + let db = make_db(&[]); + let pruning: RefWindow = RefWindow::new(&db).unwrap(); + assert_eq!(pruning.pending_number, 0); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); + } + + #[test] + fn prune_empty() { + let db = make_db(&[]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + assert_eq!(pruning.pending_number, 0); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); + assert!(pruning.pending_prunings == 0); + assert!(pruning.pending_canonicalizations == 0); + } + + #[test] + fn prune_one() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[4, 5], &[1, 3]); + let h = H256::random(); + pruning.note_canonical(&h, &mut commit); + db.commit(&commit); + assert!(pruning.have_block(&h)); + pruning.apply_pending(); + assert!(pruning.have_block(&h)); + assert!(commit.data.deleted.is_empty()); + assert_eq!(pruning.death_rows.len(), 1); + assert_eq!(pruning.death_index.len(), 2); + assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); + check_journal(&pruning, &db); + + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + assert!(!pruning.have_block(&h)); + db.commit(&commit); + pruning.apply_pending(); + assert!(!pruning.have_block(&h)); + assert!(db.data_eq(&make_db(&[2, 4, 5]))); + assert!(pruning.death_rows.is_empty()); + assert!(pruning.death_index.is_empty()); + assert_eq!(pruning.pending_number, 1); + } + + #[test] + fn prune_two() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[4], &[1]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[5], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + pruning.apply_pending(); + assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); + + check_journal(&pruning, &db); + + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + pruning.apply_pending(); + assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + pruning.apply_pending(); + assert!(db.data_eq(&make_db(&[3, 4, 5]))); + assert_eq!(pruning.pending_number, 2); + } + + #[test] + fn prune_two_pending() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[4], &[1]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[5], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3, 4, 5]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[2, 3, 4, 5]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + pruning.apply_pending(); + assert!(db.data_eq(&make_db(&[3, 4, 5]))); + assert_eq!(pruning.pending_number, 2); + } + + #[test] + fn reinserted_survives() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[2], &[]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + pruning.apply_pending(); + + check_journal(&pruning, &db); + + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 3]))); + pruning.apply_pending(); + assert_eq!(pruning.pending_number, 3); + } + + #[test] + fn reinserted_survive_pending() { + let mut db = make_db(&[1, 2, 3]); + let mut pruning: RefWindow = RefWindow::new(&db).unwrap(); + let mut commit = make_commit(&[], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[2], &[]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + let mut commit = make_commit(&[], &[2]); + pruning.note_canonical(&H256::random(), &mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + let mut commit = CommitSet::default(); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 2, 3]))); + pruning.prune_one(&mut commit); + db.commit(&commit); + assert!(db.data_eq(&make_db(&[1, 3]))); + pruning.apply_pending(); + assert_eq!(pruning.pending_number, 3); + } } diff --git a/client/state-db/src/test.rs b/client/state-db/src/test.rs index accafa9bf8..1c83b97e5a 100644 --- a/client/state-db/src/test.rs +++ b/client/state-db/src/test.rs @@ -16,79 +16,84 @@ //! Test utils -use std::collections::HashMap; +use crate::{ChangeSet, CommitSet, DBValue, MetaDb, NodeDb}; use sp_core::H256; -use crate::{DBValue, ChangeSet, CommitSet, MetaDb, NodeDb}; +use std::collections::HashMap; #[derive(Default, Debug, Clone, PartialEq, Eq)] pub struct TestDb { - pub data: HashMap, - pub meta: HashMap, DBValue>, + pub data: HashMap, + pub meta: HashMap, DBValue>, } impl MetaDb for TestDb { - type Error = (); + type Error = (); - fn get_meta(&self, key: &[u8]) -> Result, ()> { - Ok(self.meta.get(key).cloned()) - } + fn get_meta(&self, key: &[u8]) -> Result, ()> { + Ok(self.meta.get(key).cloned()) + } } impl NodeDb for TestDb { - type Error = (); - type Key = H256; + type Error = (); + type Key = H256; - fn get(&self, key: &H256) -> Result, ()> { - Ok(self.data.get(key).cloned()) - } + fn get(&self, key: &H256) -> Result, ()> { + Ok(self.data.get(key).cloned()) + } } impl TestDb { - pub fn commit(&mut self, commit: &CommitSet) { - self.data.extend(commit.data.inserted.iter().cloned()); - self.meta.extend(commit.meta.inserted.iter().cloned()); - for k in commit.data.deleted.iter() { - self.data.remove(k); - } - self.meta.extend(commit.meta.inserted.iter().cloned()); - for k in commit.meta.deleted.iter() { - self.meta.remove(k); - } - } - - pub fn data_eq(&self, other: &TestDb) -> bool { - self.data == other.data - } + pub fn commit(&mut self, commit: &CommitSet) { + self.data.extend(commit.data.inserted.iter().cloned()); + self.meta.extend(commit.meta.inserted.iter().cloned()); + for k in commit.data.deleted.iter() { + self.data.remove(k); + } + self.meta.extend(commit.meta.inserted.iter().cloned()); + for k in commit.meta.deleted.iter() { + self.meta.remove(k); + } + } + + pub fn data_eq(&self, other: &TestDb) -> bool { + self.data == other.data + } } pub fn make_changeset(inserted: &[u64], deleted: &[u64]) -> ChangeSet { - ChangeSet { - inserted: inserted - .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) - .collect(), - deleted: deleted.iter().map(|v| H256::from_low_u64_be(*v)).collect(), - } + ChangeSet { + inserted: inserted + .iter() + .map(|v| { + ( + H256::from_low_u64_be(*v), + H256::from_low_u64_be(*v).as_bytes().to_vec(), + ) + }) + .collect(), + deleted: deleted.iter().map(|v| H256::from_low_u64_be(*v)).collect(), + } } pub fn make_commit(inserted: &[u64], deleted: &[u64]) -> CommitSet { - CommitSet { - data: make_changeset(inserted, deleted), - meta: ChangeSet::default(), - } + CommitSet { + data: make_changeset(inserted, deleted), + meta: ChangeSet::default(), + } } pub fn make_db(inserted: &[u64]) -> TestDb { - TestDb { - data: inserted - .iter() - .map(|v| { - (H256::from_low_u64_be(*v), H256::from_low_u64_be(*v).as_bytes().to_vec()) - }) - .collect(), - meta: Default::default(), - } + TestDb { + data: inserted + .iter() + .map(|v| { + ( + H256::from_low_u64_be(*v), + H256::from_low_u64_be(*v).as_bytes().to_vec(), + ) + }) + .collect(), + meta: Default::default(), + } } - diff --git a/client/telemetry/src/async_record.rs b/client/telemetry/src/async_record.rs index 34b7c1435a..d988439d04 100644 --- a/client/telemetry/src/async_record.rs +++ b/client/telemetry/src/async_record.rs @@ -2,154 +2,149 @@ //! FIXME: REMOVE THIS ONCE THE PR WAS MERGE //! https://github.com/slog-rs/async/pull/14 -use slog::{Record, RecordStatic, Level, SingleKV, KV, BorrowedKV}; -use slog::{Serializer, OwnedKVList, Key}; +use slog::{BorrowedKV, Level, Record, RecordStatic, SingleKV, KV}; +use slog::{Key, OwnedKVList, Serializer}; use std::fmt; use take_mut::take; struct ToSendSerializer { - kv: Box, + kv: Box, } impl ToSendSerializer { - fn new() -> Self { - ToSendSerializer { kv: Box::new(()) } - } + fn new() -> Self { + ToSendSerializer { kv: Box::new(()) } + } - fn finish(self) -> Box { - self.kv - } + fn finish(self) -> Box { + self.kv + } } impl Serializer for ToSendSerializer { - fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_unit(&mut self, key: Key) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, ())))); - Ok(()) - } - fn emit_none(&mut self, key: Key) -> slog::Result { - let val: Option<()> = None; - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_char(&mut self, key: Key, val: char) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { - let val = val.to_owned(); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } - fn emit_arguments( - &mut self, - key: Key, - val: &fmt::Arguments, - ) -> slog::Result { - let val = fmt::format(*val); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } + fn emit_bool(&mut self, key: Key, val: bool) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_unit(&mut self, key: Key) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, ())))); + Ok(()) + } + fn emit_none(&mut self, key: Key) -> slog::Result { + let val: Option<()> = None; + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_char(&mut self, key: Key, val: char) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u8(&mut self, key: Key, val: u8) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i8(&mut self, key: Key, val: i8) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u16(&mut self, key: Key, val: u16) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i16(&mut self, key: Key, val: i16) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u32(&mut self, key: Key, val: u32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i32(&mut self, key: Key, val: i32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_f32(&mut self, key: Key, val: f32) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_u64(&mut self, key: Key, val: u64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_i64(&mut self, key: Key, val: i64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_f64(&mut self, key: Key, val: f64) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_usize(&mut self, key: Key, val: usize) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_isize(&mut self, key: Key, val: isize) -> slog::Result { + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_str(&mut self, key: Key, val: &str) -> slog::Result { + let val = val.to_owned(); + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } + fn emit_arguments(&mut self, key: Key, val: &fmt::Arguments) -> slog::Result { + let val = fmt::format(*val); + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } - fn emit_serde(&mut self, key: Key, value: &dyn slog::SerdeValue) -> slog::Result { - let val = value.to_sendable(); - take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); - Ok(()) - } + fn emit_serde(&mut self, key: Key, value: &dyn slog::SerdeValue) -> slog::Result { + let val = value.to_sendable(); + take(&mut self.kv, |kv| Box::new((kv, SingleKV(key, val)))); + Ok(()) + } } pub(crate) struct AsyncRecord { - msg: String, - level: Level, - location: Box, - tag: String, - logger_values: OwnedKVList, - kv: Box, + msg: String, + level: Level, + location: Box, + tag: String, + logger_values: OwnedKVList, + kv: Box, } impl AsyncRecord { - /// Serializes a `Record` and an `OwnedKVList`. - pub fn from(record: &Record, logger_values: &OwnedKVList) -> Self { - let mut ser = ToSendSerializer::new(); - record - .kv() - .serialize(record, &mut ser) - .expect("`ToSendSerializer` can't fail"); + /// Serializes a `Record` and an `OwnedKVList`. + pub fn from(record: &Record, logger_values: &OwnedKVList) -> Self { + let mut ser = ToSendSerializer::new(); + record + .kv() + .serialize(record, &mut ser) + .expect("`ToSendSerializer` can't fail"); - AsyncRecord { - msg: fmt::format(*record.msg()), - level: record.level(), - location: Box::new(*record.location()), - tag: String::from(record.tag()), - logger_values: logger_values.clone(), - kv: ser.finish(), - } - } + AsyncRecord { + msg: fmt::format(*record.msg()), + level: record.level(), + location: Box::new(*record.location()), + tag: String::from(record.tag()), + logger_values: logger_values.clone(), + kv: ser.finish(), + } + } - /// Deconstruct this `AsyncRecord` into a record and `OwnedKVList`. - pub fn as_record_values(&self, mut f: impl FnMut(&Record, &OwnedKVList)) { - let rs = RecordStatic { - location: &*self.location, - level: self.level, - tag: &self.tag, - }; + /// Deconstruct this `AsyncRecord` into a record and `OwnedKVList`. + pub fn as_record_values(&self, mut f: impl FnMut(&Record, &OwnedKVList)) { + let rs = RecordStatic { + location: &*self.location, + level: self.level, + tag: &self.tag, + }; - f(&Record::new( - &rs, - &format_args!("{}", self.msg), - BorrowedKV(&self.kv), - ), &self.logger_values) - } + f( + &Record::new(&rs, &format_args!("{}", self.msg), BorrowedKV(&self.kv)), + &self.logger_values, + ) + } } diff --git a/client/telemetry/src/lib.rs b/client/telemetry/src/lib.rs index 6c90d6bbcc..38e2584e44 100644 --- a/client/telemetry/src/lib.rs +++ b/client/telemetry/src/lib.rs @@ -58,37 +58,42 @@ //! ``` //! -use futures::{prelude::*, channel::mpsc}; -use libp2p::{Multiaddr, wasm_ext}; +use futures::{channel::mpsc, prelude::*}; +use libp2p::{wasm_ext, Multiaddr}; use log::{error, warn}; use parking_lot::Mutex; -use serde::{Serialize, Deserialize, Deserializer}; -use std::{pin::Pin, sync::Arc, task::{Context, Poll}, time::Duration}; +use serde::{Deserialize, Deserializer, Serialize}; +use std::{ + pin::Pin, + sync::Arc, + task::{Context, Poll}, + time::Duration, +}; use wasm_timer::Instant; pub use libp2p::wasm_ext::ExtTransport; -pub use slog_scope::with_logger; pub use slog; +pub use slog_scope::with_logger; mod async_record; mod worker; /// Configuration for telemetry. pub struct TelemetryConfig { - /// Collection of telemetry WebSocket servers with a corresponding verbosity level. - pub endpoints: TelemetryEndpoints, - - /// Optional external implementation of a libp2p transport. Used in WASM contexts where we need - /// some binding between the networking provided by the operating system or environment and - /// libp2p. - /// - /// This parameter exists whatever the target platform is, but it is expected to be set to - /// `Some` only when compiling for WASM. - /// - /// > **Important**: Each individual call to `write` corresponds to one message. There is no - /// > internal buffering going on. In the context of WebSockets, each `write` - /// > must be one individual WebSockets frame. - pub wasm_external_transport: Option, + /// Collection of telemetry WebSocket servers with a corresponding verbosity level. + pub endpoints: TelemetryEndpoints, + + /// Optional external implementation of a libp2p transport. Used in WASM contexts where we need + /// some binding between the networking provided by the operating system or environment and + /// libp2p. + /// + /// This parameter exists whatever the target platform is, but it is expected to be set to + /// `Some` only when compiling for WASM. + /// + /// > **Important**: Each individual call to `write` corresponds to one message. There is no + /// > internal buffering going on. In the context of WebSockets, each `write` + /// > must be one individual WebSockets frame. + pub wasm_external_transport: Option, } /// List of telemetry servers we want to talk to. Contains the URL of the server, and the @@ -97,46 +102,51 @@ pub struct TelemetryConfig { /// The URL string can be either a URL or a multiaddress. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub struct TelemetryEndpoints( - #[serde(deserialize_with = "url_or_multiaddr_deser")] - Vec<(Multiaddr, u8)> + #[serde(deserialize_with = "url_or_multiaddr_deser")] Vec<(Multiaddr, u8)>, ); /// Custom deserializer for TelemetryEndpoints, used to convert urls or multiaddr to multiaddr. fn url_or_multiaddr_deser<'de, D>(deserializer: D) -> Result, D::Error> - where D: Deserializer<'de> +where + D: Deserializer<'de>, { - Vec::<(String, u8)>::deserialize(deserializer)? - .iter() - .map(|e| Ok((url_to_multiaddr(&e.0) - .map_err(serde::de::Error::custom)?, e.1))) - .collect() + Vec::<(String, u8)>::deserialize(deserializer)? + .iter() + .map(|e| { + Ok(( + url_to_multiaddr(&e.0).map_err(serde::de::Error::custom)?, + e.1, + )) + }) + .collect() } impl TelemetryEndpoints { - pub fn new(endpoints: Vec<(String, u8)>) -> Result { - let endpoints: Result, libp2p::multiaddr::Error> = endpoints.iter() - .map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))) - .collect(); - endpoints.map(Self) - } + pub fn new(endpoints: Vec<(String, u8)>) -> Result { + let endpoints: Result, libp2p::multiaddr::Error> = endpoints + .iter() + .map(|e| Ok((url_to_multiaddr(&e.0)?, e.1))) + .collect(); + endpoints.map(Self) + } } /// Parses a WebSocket URL into a libp2p `Multiaddr`. fn url_to_multiaddr(url: &str) -> Result { - // First, assume that we have a `Multiaddr`. - let parse_error = match url.parse() { - Ok(ma) => return Ok(ma), - Err(err) => err, - }; - - // If not, try the `ws://path/url` format. - if let Ok(ma) = libp2p::multiaddr::from_url(url) { - return Ok(ma) - } - - // If we have no clue about the format of that string, assume that we were expecting a - // `Multiaddr`. - Err(parse_error) + // First, assume that we have a `Multiaddr`. + let parse_error = match url.parse() { + Ok(ma) => return Ok(ma), + Err(err) => err, + }; + + // If not, try the `ws://path/url` format. + if let Ok(ma) = libp2p::multiaddr::from_url(url) { + return Ok(ma); + } + + // If we have no clue about the format of that string, assume that we were expecting a + // `Multiaddr`. + Err(parse_error) } /// Log levels. @@ -154,9 +164,9 @@ pub const CONSENSUS_INFO: &str = "1"; /// Dropping all the clones unregisters the telemetry. #[derive(Clone)] pub struct Telemetry { - inner: Arc>, - /// Slog guard so that we don't get deregistered. - _guard: Arc, + inner: Arc>, + /// Slog guard so that we don't get deregistered. + _guard: Arc, } /// Behind the `Mutex` in `Telemetry`. @@ -166,16 +176,16 @@ pub struct Telemetry { /// where we extract the telemetry registration so that it continues running during the shutdown /// process. struct TelemetryInner { - /// Worker for the telemetry. `None` if it failed to initialize. - worker: Option, - /// Receives log entries for them to be dispatched to the worker. - receiver: mpsc::Receiver, + /// Worker for the telemetry. `None` if it failed to initialize. + worker: Option, + /// Receives log entries for them to be dispatched to the worker. + receiver: mpsc::Receiver, } /// Implements `slog::Drain`. struct TelemetryDrain { - /// Sends log entries. - sender: std::panic::AssertUnwindSafe>, + /// Sends log entries. + sender: std::panic::AssertUnwindSafe>, } /// Initializes the telemetry. See the crate root documentation for more information. @@ -183,121 +193,128 @@ struct TelemetryDrain { /// Please be careful to not call this function twice in the same program. The `slog` crate /// doesn't provide any way of knowing whether a global logger has already been registered. pub fn init_telemetry(config: TelemetryConfig) -> Telemetry { - // Build the list of telemetry endpoints. - let (endpoints, wasm_external_transport) = (config.endpoints.0, config.wasm_external_transport); - - let (sender, receiver) = mpsc::channel(16); - let guard = { - let logger = TelemetryDrain { sender: std::panic::AssertUnwindSafe(sender) }; - let root = slog::Logger::root(slog::Drain::fuse(logger), slog::o!()); - slog_scope::set_global_logger(root) - }; - - let worker = match worker::TelemetryWorker::new(endpoints, wasm_external_transport) { - Ok(w) => Some(w), - Err(err) => { - error!(target: "telemetry", "Failed to initialize telemetry worker: {:?}", err); - None - } - }; - - Telemetry { - inner: Arc::new(Mutex::new(TelemetryInner { - worker, - receiver, - })), - _guard: Arc::new(guard), - } + // Build the list of telemetry endpoints. + let (endpoints, wasm_external_transport) = (config.endpoints.0, config.wasm_external_transport); + + let (sender, receiver) = mpsc::channel(16); + let guard = { + let logger = TelemetryDrain { + sender: std::panic::AssertUnwindSafe(sender), + }; + let root = slog::Logger::root(slog::Drain::fuse(logger), slog::o!()); + slog_scope::set_global_logger(root) + }; + + let worker = match worker::TelemetryWorker::new(endpoints, wasm_external_transport) { + Ok(w) => Some(w), + Err(err) => { + error!(target: "telemetry", "Failed to initialize telemetry worker: {:?}", err); + None + } + }; + + Telemetry { + inner: Arc::new(Mutex::new(TelemetryInner { worker, receiver })), + _guard: Arc::new(guard), + } } /// Event generated when polling the worker. #[derive(Debug)] pub enum TelemetryEvent { - /// We have established a connection to one of the telemetry endpoint, either for the first - /// time or after having been disconnected earlier. - Connected, + /// We have established a connection to one of the telemetry endpoint, either for the first + /// time or after having been disconnected earlier. + Connected, } impl Stream for Telemetry { - type Item = TelemetryEvent; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let before = Instant::now(); - - // Because the `Telemetry` is cloneable, we need to put the actual fields behind a `Mutex`. - // However, the user is only ever supposed to poll from one instance of `Telemetry`, while - // the other instances are used only for RAII purposes. - // We assume that the user is following this advice and therefore that the `Mutex` is only - // ever locked once at a time. - let mut inner = match self.inner.try_lock() { - Some(l) => l, - None => { - warn!( - target: "telemetry", - "The telemetry seems to be polled multiple times simultaneously" - ); - // Returning `Pending` here means that we may never get polled again, but this is - // ok because we're in a situation where something else is actually currently doing - // the polling. - return Poll::Pending; - } - }; - - let mut has_connected = false; - - // The polling pattern is: poll the worker so that it processes its queue, then add one - // message from the receiver (if possible), then poll the worker again, and so on. - loop { - if let Some(worker) = inner.worker.as_mut() { - while let Poll::Ready(event) = worker.poll(cx) { - // Right now we only have one possible event. This line is here in order to not - // forget to handle any possible new event type. - let worker::TelemetryWorkerEvent::Connected = event; - has_connected = true; - } - } - - if let Poll::Ready(Some(log_entry)) = Stream::poll_next(Pin::new(&mut inner.receiver), cx) { - if let Some(worker) = inner.worker.as_mut() { - log_entry.as_record_values(|rec, val| { let _ = worker.log(rec, val); }); - } - } else { - break; - } - } - - if before.elapsed() > Duration::from_millis(200) { - warn!(target: "telemetry", "Polling the telemetry took more than 200ms"); - } - - if has_connected { - Poll::Ready(Some(TelemetryEvent::Connected)) - } else { - Poll::Pending - } - } + type Item = TelemetryEvent; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let before = Instant::now(); + + // Because the `Telemetry` is cloneable, we need to put the actual fields behind a `Mutex`. + // However, the user is only ever supposed to poll from one instance of `Telemetry`, while + // the other instances are used only for RAII purposes. + // We assume that the user is following this advice and therefore that the `Mutex` is only + // ever locked once at a time. + let mut inner = match self.inner.try_lock() { + Some(l) => l, + None => { + warn!( + target: "telemetry", + "The telemetry seems to be polled multiple times simultaneously" + ); + // Returning `Pending` here means that we may never get polled again, but this is + // ok because we're in a situation where something else is actually currently doing + // the polling. + return Poll::Pending; + } + }; + + let mut has_connected = false; + + // The polling pattern is: poll the worker so that it processes its queue, then add one + // message from the receiver (if possible), then poll the worker again, and so on. + loop { + if let Some(worker) = inner.worker.as_mut() { + while let Poll::Ready(event) = worker.poll(cx) { + // Right now we only have one possible event. This line is here in order to not + // forget to handle any possible new event type. + let worker::TelemetryWorkerEvent::Connected = event; + has_connected = true; + } + } + + if let Poll::Ready(Some(log_entry)) = + Stream::poll_next(Pin::new(&mut inner.receiver), cx) + { + if let Some(worker) = inner.worker.as_mut() { + log_entry.as_record_values(|rec, val| { + let _ = worker.log(rec, val); + }); + } + } else { + break; + } + } + + if before.elapsed() > Duration::from_millis(200) { + warn!(target: "telemetry", "Polling the telemetry took more than 200ms"); + } + + if has_connected { + Poll::Ready(Some(TelemetryEvent::Connected)) + } else { + Poll::Pending + } + } } impl slog::Drain for TelemetryDrain { - type Ok = (); - type Err = (); - - fn log(&self, record: &slog::Record, values: &slog::OwnedKVList) -> Result { - let before = Instant::now(); - - let serialized = async_record::AsyncRecord::from(record, values); - // Note: interestingly, `try_send` requires a `&mut` because it modifies some internal value, while `clone()` - // is lock-free. - if let Err(err) = self.sender.clone().try_send(serialized) { - warn!(target: "telemetry", "Ignored telemetry message because of error on channel: {:?}", err); - } - - if before.elapsed() > Duration::from_millis(50) { - warn!(target: "telemetry", "Writing a telemetry log took more than 50ms"); - } - - Ok(()) - } + type Ok = (); + type Err = (); + + fn log( + &self, + record: &slog::Record, + values: &slog::OwnedKVList, + ) -> Result { + let before = Instant::now(); + + let serialized = async_record::AsyncRecord::from(record, values); + // Note: interestingly, `try_send` requires a `&mut` because it modifies some internal value, while `clone()` + // is lock-free. + if let Err(err) = self.sender.clone().try_send(serialized) { + warn!(target: "telemetry", "Ignored telemetry message because of error on channel: {:?}", err); + } + + if before.elapsed() > Duration::from_millis(50) { + warn!(target: "telemetry", "Writing a telemetry log took more than 50ms"); + } + + Ok(()) + } } /// Translates to `slog_scope::info`, but contains an additional verbosity @@ -314,32 +331,45 @@ macro_rules! telemetry { #[cfg(test)] mod telemetry_endpoints_tests { - use libp2p::Multiaddr; - use super::TelemetryEndpoints; - use super::url_to_multiaddr; - - #[test] - fn valid_endpoints() { - let endp = vec![("wss://telemetry.polkadot.io/submit/".into(), 3), ("/ip4/80.123.90.4/tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid"); - let mut res: Vec<(Multiaddr, u8)> = vec![]; - for (a, b) in endp.iter() { - res.push((url_to_multiaddr(a).expect("provided url should be valid"), *b)) - } - assert_eq!(telem.0, res); - } - - #[test] - fn invalid_endpoints() { - let endp = vec![("/ip4/...80.123.90.4/tcp/5432".into(), 3), ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp); - assert!(telem.is_err()); - } - - #[test] - fn valid_and_invalid_endpoints() { - let endp = vec![("/ip4/80.123.90.4/tcp/5432".into(), 3), ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4)]; - let telem = TelemetryEndpoints::new(endp); - assert!(telem.is_err()); - } + use super::url_to_multiaddr; + use super::TelemetryEndpoints; + use libp2p::Multiaddr; + + #[test] + fn valid_endpoints() { + let endp = vec![ + ("wss://telemetry.polkadot.io/submit/".into(), 3), + ("/ip4/80.123.90.4/tcp/5432".into(), 4), + ]; + let telem = + TelemetryEndpoints::new(endp.clone()).expect("Telemetry endpoint should be valid"); + let mut res: Vec<(Multiaddr, u8)> = vec![]; + for (a, b) in endp.iter() { + res.push(( + url_to_multiaddr(a).expect("provided url should be valid"), + *b, + )) + } + assert_eq!(telem.0, res); + } + + #[test] + fn invalid_endpoints() { + let endp = vec![ + ("/ip4/...80.123.90.4/tcp/5432".into(), 3), + ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4), + ]; + let telem = TelemetryEndpoints::new(endp); + assert!(telem.is_err()); + } + + #[test] + fn valid_and_invalid_endpoints() { + let endp = vec![ + ("/ip4/80.123.90.4/tcp/5432".into(), 3), + ("/ip4/no:!?;rlkqre;;::::///tcp/5432".into(), 4), + ]; + let telem = TelemetryEndpoints::new(endp); + assert!(telem.is_err()); + } } diff --git a/client/telemetry/src/worker.rs b/client/telemetry/src/worker.rs index 8f43bb612a..b8f0494403 100644 --- a/client/telemetry/src/worker.rs +++ b/client/telemetry/src/worker.rs @@ -28,8 +28,8 @@ use bytes::BytesMut; use futures::{prelude::*, ready}; -use libp2p::{core::transport::OptionalTransport, Multiaddr, Transport, wasm_ext}; -use log::{trace, warn, error}; +use libp2p::{core::transport::OptionalTransport, wasm_ext, Multiaddr, Transport}; +use log::{error, trace, warn}; use slog::Drain; use std::{io, pin::Pin, task::Context, task::Poll, time}; @@ -42,143 +42,154 @@ const CONNECT_TIMEOUT: time::Duration = time::Duration::from_secs(20); /// Event generated when polling the worker. #[derive(Debug)] pub enum TelemetryWorkerEvent { - /// We have established a connection to one of the telemetry endpoint, either for the first - /// time or after having been disconnected earlier. - Connected, + /// We have established a connection to one of the telemetry endpoint, either for the first + /// time or after having been disconnected earlier. + Connected, } /// Telemetry processing machine. #[derive(Debug)] pub struct TelemetryWorker { - /// List of nodes with their maximum verbosity level. - nodes: Vec<(node::Node, u8)>, + /// List of nodes with their maximum verbosity level. + nodes: Vec<(node::Node, u8)>, } trait StreamAndSink: Stream + Sink {} impl, I> StreamAndSink for T {} type WsTrans = libp2p::core::transport::boxed::Boxed< - Pin, - Error = io::Error - > + Send>>, - io::Error + Pin< + Box< + dyn StreamAndSink, Error = io::Error> + + Send, + >, + >, + io::Error, >; impl TelemetryWorker { - /// Builds a new `TelemetryWorker`. - /// - /// The endpoints must be a list of targets, plus a verbosity level. When you send a message - /// to the telemetry, only the targets whose verbosity is higher than the verbosity of the - /// message will receive it. - pub fn new( - endpoints: impl IntoIterator, - wasm_external_transport: impl Into> - ) -> Result { - let transport = match wasm_external_transport.into() { - Some(t) => OptionalTransport::some(t), - None => OptionalTransport::none() - }.map((|inner, _| StreamSink::from(inner)) as fn(_, _) -> _); - - // The main transport is the `wasm_external_transport`, but if we're on desktop we add - // support for TCP+WebSocket+DNS as a fallback. In practice, you're not expected to pass - // an external transport on desktop and the fallback is used all the time. - #[cfg(not(target_os = "unknown"))] - let transport = transport.or_transport({ - let inner = libp2p::dns::DnsConfig::new(libp2p::tcp::TcpConfig::new())?; - libp2p::websocket::framed::WsConfig::new(inner) - .and_then(|connec, _| { - let connec = connec - .with(|item: BytesMut| { - let item = libp2p::websocket::framed::OutgoingData::Binary(item); - future::ready(Ok::<_, io::Error>(item)) - }) - .try_filter(|item| future::ready(item.is_data())) - .map_ok(|data| BytesMut::from(data.as_ref())); - future::ready(Ok::<_, io::Error>(connec)) - }) - }); - - let transport = transport - .timeout(CONNECT_TIMEOUT) - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .map(|out, _| { - let out = out - .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) - .sink_map_err(|err| io::Error::new(io::ErrorKind::Other, err)); - Box::pin(out) as Pin> - }) - .boxed(); - - Ok(TelemetryWorker { - nodes: endpoints.into_iter().map(|(addr, verbosity)| { - let node = node::Node::new(transport.clone(), addr); - (node, verbosity) - }).collect() - }) - } - - /// Polls the worker for events that happened. - pub fn poll(&mut self, cx: &mut Context) -> Poll { - for (node, _) in &mut self.nodes { - loop { - match node::Node::poll(Pin::new(node), cx) { - Poll::Ready(node::NodeEvent::Connected) => - return Poll::Ready(TelemetryWorkerEvent::Connected), - Poll::Ready(node::NodeEvent::Disconnected(_)) => continue, - Poll::Pending => break, - } - } - } - - Poll::Pending - } - - /// Equivalent to `slog::Drain::log`, but takes `self` by `&mut` instead, which is more convenient. - /// - /// Keep in mind that you should call `TelemetryWorker::poll` in order to process the messages. - /// You should call this function right after calling `slog::Drain::log`. - pub fn log(&mut self, record: &slog::Record, values: &slog::OwnedKVList) -> Result<(), ()> { - let msg_verbosity = match record.tag().parse::() { - Ok(v) => v, - Err(err) => { - warn!(target: "telemetry", "Failed to parse telemetry tag {:?}: {:?}", + /// Builds a new `TelemetryWorker`. + /// + /// The endpoints must be a list of targets, plus a verbosity level. When you send a message + /// to the telemetry, only the targets whose verbosity is higher than the verbosity of the + /// message will receive it. + pub fn new( + endpoints: impl IntoIterator, + wasm_external_transport: impl Into>, + ) -> Result { + let transport = match wasm_external_transport.into() { + Some(t) => OptionalTransport::some(t), + None => OptionalTransport::none(), + } + .map((|inner, _| StreamSink::from(inner)) as fn(_, _) -> _); + + // The main transport is the `wasm_external_transport`, but if we're on desktop we add + // support for TCP+WebSocket+DNS as a fallback. In practice, you're not expected to pass + // an external transport on desktop and the fallback is used all the time. + #[cfg(not(target_os = "unknown"))] + let transport = transport.or_transport({ + let inner = libp2p::dns::DnsConfig::new(libp2p::tcp::TcpConfig::new())?; + libp2p::websocket::framed::WsConfig::new(inner).and_then(|connec, _| { + let connec = connec + .with(|item: BytesMut| { + let item = libp2p::websocket::framed::OutgoingData::Binary(item); + future::ready(Ok::<_, io::Error>(item)) + }) + .try_filter(|item| future::ready(item.is_data())) + .map_ok(|data| BytesMut::from(data.as_ref())); + future::ready(Ok::<_, io::Error>(connec)) + }) + }); + + let transport = transport + .timeout(CONNECT_TIMEOUT) + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .map(|out, _| { + let out = out + .map_err(|err| io::Error::new(io::ErrorKind::Other, err)) + .sink_map_err(|err| io::Error::new(io::ErrorKind::Other, err)); + Box::pin(out) as Pin> + }) + .boxed(); + + Ok(TelemetryWorker { + nodes: endpoints + .into_iter() + .map(|(addr, verbosity)| { + let node = node::Node::new(transport.clone(), addr); + (node, verbosity) + }) + .collect(), + }) + } + + /// Polls the worker for events that happened. + pub fn poll(&mut self, cx: &mut Context) -> Poll { + for (node, _) in &mut self.nodes { + loop { + match node::Node::poll(Pin::new(node), cx) { + Poll::Ready(node::NodeEvent::Connected) => { + return Poll::Ready(TelemetryWorkerEvent::Connected) + } + Poll::Ready(node::NodeEvent::Disconnected(_)) => continue, + Poll::Pending => break, + } + } + } + + Poll::Pending + } + + /// Equivalent to `slog::Drain::log`, but takes `self` by `&mut` instead, which is more convenient. + /// + /// Keep in mind that you should call `TelemetryWorker::poll` in order to process the messages. + /// You should call this function right after calling `slog::Drain::log`. + pub fn log(&mut self, record: &slog::Record, values: &slog::OwnedKVList) -> Result<(), ()> { + let msg_verbosity = match record.tag().parse::() { + Ok(v) => v, + Err(err) => { + warn!(target: "telemetry", "Failed to parse telemetry tag {:?}: {:?}", record.tag(), err); - return Err(()) - } - }; - - // None of the nodes want that verbosity, so just return without doing any serialization. - if self.nodes.iter().all(|(_, node_max_verbosity)| msg_verbosity > *node_max_verbosity) { - trace!( - target: "telemetry", - "Skipping log entry because verbosity {:?} is too high for all endpoints", - msg_verbosity - ); - return Ok(()) - } - - // Turn the message into JSON. - let serialized = { - let mut out = Vec::new(); - slog_json::Json::default(&mut out).log(record, values).map_err(|_| ())?; - out - }; - - for (node, node_max_verbosity) in &mut self.nodes { - if msg_verbosity > *node_max_verbosity { - trace!(target: "telemetry", "Skipping {:?} for log entry with verbosity {:?}", + return Err(()); + } + }; + + // None of the nodes want that verbosity, so just return without doing any serialization. + if self + .nodes + .iter() + .all(|(_, node_max_verbosity)| msg_verbosity > *node_max_verbosity) + { + trace!( + target: "telemetry", + "Skipping log entry because verbosity {:?} is too high for all endpoints", + msg_verbosity + ); + return Ok(()); + } + + // Turn the message into JSON. + let serialized = { + let mut out = Vec::new(); + slog_json::Json::default(&mut out) + .log(record, values) + .map_err(|_| ())?; + out + }; + + for (node, node_max_verbosity) in &mut self.nodes { + if msg_verbosity > *node_max_verbosity { + trace!(target: "telemetry", "Skipping {:?} for log entry with verbosity {:?}", node.addr(), msg_verbosity); - continue; - } + continue; + } - // `send_message` returns an error if we're not connected, which we silently ignore. - let _ = node.send_message(&serialized.clone()[..]); - } + // `send_message` returns an error if we're not connected, which we silently ignore. + let _ = node.send_message(&serialized.clone()[..]); + } - Ok(()) - } + Ok(()) + } } /// Wraps around an `AsyncWrite` and implements `Sink`. Guarantees that each item being sent maps @@ -190,70 +201,70 @@ impl TelemetryWorker { struct StreamSink(#[pin] T, Option); impl From for StreamSink { - fn from(inner: T) -> StreamSink { - StreamSink(inner, None) - } + fn from(inner: T) -> StreamSink { + StreamSink(inner, None) + } } impl Stream for StreamSink { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let this = self.project(); - let mut buf = [0; 128]; - match ready!(AsyncRead::poll_read(this.0, cx, &mut buf)) { - Ok(0) => Poll::Ready(None), - Ok(n) => { - let buf: BytesMut = buf[..n].into(); - Poll::Ready(Some(Ok(buf))) - }, - Err(err) => Poll::Ready(Some(Err(err))), - } - } + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = self.project(); + let mut buf = [0; 128]; + match ready!(AsyncRead::poll_read(this.0, cx, &mut buf)) { + Ok(0) => Poll::Ready(None), + Ok(n) => { + let buf: BytesMut = buf[..n].into(); + Poll::Ready(Some(Ok(buf))) + } + Err(err) => Poll::Ready(Some(Err(err))), + } + } } impl StreamSink { - fn poll_flush_buffer(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let this = self.project(); + fn poll_flush_buffer(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let this = self.project(); - if let Some(buffer) = this.1 { - if ready!(this.0.poll_write(cx, &buffer[..]))? != buffer.len() { - error!(target: "telemetry", + if let Some(buffer) = this.1 { + if ready!(this.0.poll_write(cx, &buffer[..]))? != buffer.len() { + error!(target: "telemetry", "Detected some internal buffering happening in the telemetry"); - let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); - return Poll::Ready(Err(err)); - } - } - - *this.1 = None; - Poll::Ready(Ok(())) - } + let err = io::Error::new(io::ErrorKind::Other, "Internal buffering detected"); + return Poll::Ready(Err(err)); + } + } + + *this.1 = None; + Poll::Ready(Ok(())) + } } impl Sink for StreamSink { - type Error = io::Error; - - fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(StreamSink::poll_flush_buffer(self, cx))?; - Poll::Ready(Ok(())) - } - - fn start_send(self: Pin<&mut Self>, item: BytesMut) -> Result<(), Self::Error> { - let this = self.project(); - debug_assert!(this.1.is_none()); - *this.1 = Some(item); - Ok(()) - } - - fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(self.as_mut().poll_flush_buffer(cx))?; - let this = self.project(); - AsyncWrite::poll_flush(this.0, cx) - } - - fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - ready!(self.as_mut().poll_flush_buffer(cx))?; - let this = self.project(); - AsyncWrite::poll_close(this.0, cx) - } + type Error = io::Error; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(StreamSink::poll_flush_buffer(self, cx))?; + Poll::Ready(Ok(())) + } + + fn start_send(self: Pin<&mut Self>, item: BytesMut) -> Result<(), Self::Error> { + let this = self.project(); + debug_assert!(this.1.is_none()); + *this.1 = Some(item); + Ok(()) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.as_mut().poll_flush_buffer(cx))?; + let this = self.project(); + AsyncWrite::poll_flush(this.0, cx) + } + + fn poll_close(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + ready!(self.as_mut().poll_flush_buffer(cx))?; + let this = self.project(); + AsyncWrite::poll_close(this.0, cx) + } } diff --git a/client/telemetry/src/worker/node.rs b/client/telemetry/src/worker/node.rs index 454f504d66..464a8462ea 100644 --- a/client/telemetry/src/worker/node.rs +++ b/client/telemetry/src/worker/node.rs @@ -19,9 +19,9 @@ use bytes::BytesMut; use futures::prelude::*; use futures_timer::Delay; -use libp2p::Multiaddr; use libp2p::core::transport::Transport; -use log::{trace, debug, warn, error}; +use libp2p::Multiaddr; +use log::{debug, error, trace, warn}; use rand::Rng as _; use std::{collections::VecDeque, fmt, mem, pin::Pin, task::Context, task::Poll, time::Duration}; @@ -30,165 +30,165 @@ const MAX_PENDING: usize = 10; /// Handler for a single telemetry node. pub struct Node { - /// Address of the node. - addr: Multiaddr, - /// State of the connection. - socket: NodeSocket, - /// Transport used to establish new connections. - transport: TTrans, + /// Address of the node. + addr: Multiaddr, + /// State of the connection. + socket: NodeSocket, + /// Transport used to establish new connections. + transport: TTrans, } enum NodeSocket { - /// We're connected to the node. This is the normal state. - Connected(NodeSocketConnected), - /// We are currently dialing the node. - Dialing(TTrans::Dial), - /// A new connection should be started as soon as possible. - ReconnectNow, - /// Waiting before attempting to dial again. - WaitingReconnect(Delay), - /// Temporary transition state. - Poisoned, + /// We're connected to the node. This is the normal state. + Connected(NodeSocketConnected), + /// We are currently dialing the node. + Dialing(TTrans::Dial), + /// A new connection should be started as soon as possible. + ReconnectNow, + /// Waiting before attempting to dial again. + WaitingReconnect(Delay), + /// Temporary transition state. + Poisoned, } struct NodeSocketConnected { - /// Where to send data. - sink: TTrans::Output, - /// Queue of packets to send. - pending: VecDeque, - /// If true, we need to flush the sink. - need_flush: bool, - /// A timeout for the socket to write data. - timeout: Option, + /// Where to send data. + sink: TTrans::Output, + /// Queue of packets to send. + pending: VecDeque, + /// If true, we need to flush the sink. + need_flush: bool, + /// A timeout for the socket to write data. + timeout: Option, } /// Event that can happen with this node. #[derive(Debug)] pub enum NodeEvent { - /// We are now connected to this node. - Connected, - /// We are now disconnected from this node. - Disconnected(ConnectionError), + /// We are now connected to this node. + Connected, + /// We are now disconnected from this node. + Disconnected(ConnectionError), } /// Reason for disconnecting from a node. #[derive(Debug)] pub enum ConnectionError { - /// The connection timed-out. - Timeout, - /// Reading from the socket returned and end-of-file, indicating that the socket has been - /// closed. - Closed, - /// The sink errored. - Sink(TSinkErr), + /// The connection timed-out. + Timeout, + /// Reading from the socket returned and end-of-file, indicating that the socket has been + /// closed. + Closed, + /// The sink errored. + Sink(TSinkErr), } impl Node { - /// Builds a new node handler. - pub fn new(transport: TTrans, addr: Multiaddr) -> Self { - Node { - addr, - socket: NodeSocket::ReconnectNow, - transport, - } - } + /// Builds a new node handler. + pub fn new(transport: TTrans, addr: Multiaddr) -> Self { + Node { + addr, + socket: NodeSocket::ReconnectNow, + transport, + } + } - /// Returns the address that was passed to `new`. - pub fn addr(&self) -> &Multiaddr { - &self.addr - } + /// Returns the address that was passed to `new`. + pub fn addr(&self) -> &Multiaddr { + &self.addr + } } impl Node -where TTrans: Clone + Unpin, TTrans::Dial: Unpin, - TTrans::Output: Sink - + Stream> - + Unpin, - TSinkErr: fmt::Debug +where + TTrans: Clone + Unpin, + TTrans::Dial: Unpin, + TTrans::Output: + Sink + Stream> + Unpin, + TSinkErr: fmt::Debug, { - /// Sends a WebSocket frame to the node. Returns an error if we are not connected to the node. - /// - /// After calling this method, you should call `poll` in order for it to be properly processed. - pub fn send_message(&mut self, payload: impl Into) -> Result<(), ()> { - if let NodeSocket::Connected(NodeSocketConnected { pending, .. }) = &mut self.socket { - if pending.len() <= MAX_PENDING { - trace!(target: "telemetry", "Adding log entry to queue for {:?}", self.addr); - pending.push_back(payload.into()); - Ok(()) - } else { - warn!(target: "telemetry", "⚠️ Rejected log entry because queue is full for {:?}", + /// Sends a WebSocket frame to the node. Returns an error if we are not connected to the node. + /// + /// After calling this method, you should call `poll` in order for it to be properly processed. + pub fn send_message(&mut self, payload: impl Into) -> Result<(), ()> { + if let NodeSocket::Connected(NodeSocketConnected { pending, .. }) = &mut self.socket { + if pending.len() <= MAX_PENDING { + trace!(target: "telemetry", "Adding log entry to queue for {:?}", self.addr); + pending.push_back(payload.into()); + Ok(()) + } else { + warn!(target: "telemetry", "⚠️ Rejected log entry because queue is full for {:?}", self.addr); - Err(()) - } - } else { - Err(()) - } - } + Err(()) + } + } else { + Err(()) + } + } - /// Polls the node for updates. Must be performed regularly. - pub fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - let mut socket = mem::replace(&mut self.socket, NodeSocket::Poisoned); - self.socket = loop { - match socket { - NodeSocket::Connected(mut conn) => { - match NodeSocketConnected::poll(Pin::new(&mut conn), cx, &self.addr) { - Poll::Ready(Ok(v)) => match v {}, - Poll::Pending => { - break NodeSocket::Connected(conn) - }, - Poll::Ready(Err(err)) => { - warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - self.socket = NodeSocket::WaitingReconnect(timeout); - return Poll::Ready(NodeEvent::Disconnected(err)) - } - } - } - NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { - Poll::Ready(Ok(sink)) => { - debug!(target: "telemetry", "✅ Connected to {}", self.addr); - let conn = NodeSocketConnected { - sink, - pending: VecDeque::new(), - need_flush: false, - timeout: None, - }; - self.socket = NodeSocket::Connected(conn); - return Poll::Ready(NodeEvent::Connected) - }, - Poll::Pending => break NodeSocket::Dialing(s), - Poll::Ready(Err(err)) => { - warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - socket = NodeSocket::WaitingReconnect(timeout); - } - } - NodeSocket::ReconnectNow => match self.transport.clone().dial(self.addr.clone()) { - Ok(d) => { - debug!(target: "telemetry", "Started dialing {}", self.addr); - socket = NodeSocket::Dialing(d); - } - Err(err) => { - warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); - let timeout = gen_rand_reconnect_delay(); - socket = NodeSocket::WaitingReconnect(timeout); - } - } - NodeSocket::WaitingReconnect(mut s) => - if let Poll::Ready(_) = Future::poll(Pin::new(&mut s), cx) { - socket = NodeSocket::ReconnectNow; - } else { - break NodeSocket::WaitingReconnect(s) - } - NodeSocket::Poisoned => { - error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); - break NodeSocket::Poisoned - } - } - }; + /// Polls the node for updates. Must be performed regularly. + pub fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + let mut socket = mem::replace(&mut self.socket, NodeSocket::Poisoned); + self.socket = loop { + match socket { + NodeSocket::Connected(mut conn) => { + match NodeSocketConnected::poll(Pin::new(&mut conn), cx, &self.addr) { + Poll::Ready(Ok(v)) => match v {}, + Poll::Pending => break NodeSocket::Connected(conn), + Poll::Ready(Err(err)) => { + warn!(target: "telemetry", "⚠️ Disconnected from {}: {:?}", self.addr, err); + let timeout = gen_rand_reconnect_delay(); + self.socket = NodeSocket::WaitingReconnect(timeout); + return Poll::Ready(NodeEvent::Disconnected(err)); + } + } + } + NodeSocket::Dialing(mut s) => match Future::poll(Pin::new(&mut s), cx) { + Poll::Ready(Ok(sink)) => { + debug!(target: "telemetry", "✅ Connected to {}", self.addr); + let conn = NodeSocketConnected { + sink, + pending: VecDeque::new(), + need_flush: false, + timeout: None, + }; + self.socket = NodeSocket::Connected(conn); + return Poll::Ready(NodeEvent::Connected); + } + Poll::Pending => break NodeSocket::Dialing(s), + Poll::Ready(Err(err)) => { + warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); + let timeout = gen_rand_reconnect_delay(); + socket = NodeSocket::WaitingReconnect(timeout); + } + }, + NodeSocket::ReconnectNow => match self.transport.clone().dial(self.addr.clone()) { + Ok(d) => { + debug!(target: "telemetry", "Started dialing {}", self.addr); + socket = NodeSocket::Dialing(d); + } + Err(err) => { + warn!(target: "telemetry", "❌ Error while dialing {}: {:?}", self.addr, err); + let timeout = gen_rand_reconnect_delay(); + socket = NodeSocket::WaitingReconnect(timeout); + } + }, + NodeSocket::WaitingReconnect(mut s) => { + if let Poll::Ready(_) = Future::poll(Pin::new(&mut s), cx) { + socket = NodeSocket::ReconnectNow; + } else { + break NodeSocket::WaitingReconnect(s); + } + } + NodeSocket::Poisoned => { + error!(target: "telemetry", "‼️ Poisoned connection with {}", self.addr); + break NodeSocket::Poisoned; + } + } + }; - Poll::Pending - } + Poll::Pending + } } /// Generates a `Delay` object with a random timeout. @@ -196,109 +196,103 @@ where TTrans: Clone + Unpin, TTrans::Dial: Unpin, /// If there are general connection issues, not all endpoints should be synchronized in their /// re-connection time. fn gen_rand_reconnect_delay() -> Delay { - let random_delay = rand::thread_rng().gen_range(5, 10); - Delay::new(Duration::from_secs(random_delay)) + let random_delay = rand::thread_rng().gen_range(5, 10); + Delay::new(Duration::from_secs(random_delay)) } impl NodeSocketConnected -where TTrans::Output: Sink - + Stream> - + Unpin +where + TTrans::Output: + Sink + Stream> + Unpin, { - /// Processes the queue of messages for the connected socket. - /// - /// The address is passed for logging purposes only. - fn poll( - mut self: Pin<&mut Self>, - cx: &mut Context, - my_addr: &Multiaddr, - ) -> Poll>> { - - while let Some(item) = self.pending.pop_front() { - if let Poll::Ready(result) = Sink::poll_ready(Pin::new(&mut self.sink), cx) { - if let Err(err) = result { - return Poll::Ready(Err(ConnectionError::Sink(err))) - } - - let item_len = item.len(); - if let Err(err) = Sink::start_send(Pin::new(&mut self.sink), item) { - return Poll::Ready(Err(ConnectionError::Sink(err))) - } - trace!( - target: "telemetry", "Successfully sent {:?} bytes message to {}", - item_len, my_addr - ); - self.need_flush = true; + /// Processes the queue of messages for the connected socket. + /// + /// The address is passed for logging purposes only. + fn poll( + mut self: Pin<&mut Self>, + cx: &mut Context, + my_addr: &Multiaddr, + ) -> Poll>> { + while let Some(item) = self.pending.pop_front() { + if let Poll::Ready(result) = Sink::poll_ready(Pin::new(&mut self.sink), cx) { + if let Err(err) = result { + return Poll::Ready(Err(ConnectionError::Sink(err))); + } - } else { - self.pending.push_front(item); - if self.timeout.is_none() { - self.timeout = Some(Delay::new(Duration::from_secs(10))); - } - break; - } - } + let item_len = item.len(); + if let Err(err) = Sink::start_send(Pin::new(&mut self.sink), item) { + return Poll::Ready(Err(ConnectionError::Sink(err))); + } + trace!( + target: "telemetry", "Successfully sent {:?} bytes message to {}", + item_len, my_addr + ); + self.need_flush = true; + } else { + self.pending.push_front(item); + if self.timeout.is_none() { + self.timeout = Some(Delay::new(Duration::from_secs(10))); + } + break; + } + } - if self.need_flush { - match Sink::poll_flush(Pin::new(&mut self.sink), cx) { - Poll::Pending => { - if self.timeout.is_none() { - self.timeout = Some(Delay::new(Duration::from_secs(10))); - } - }, - Poll::Ready(Err(err)) => { - self.timeout = None; - return Poll::Ready(Err(ConnectionError::Sink(err))) - }, - Poll::Ready(Ok(())) => { - self.timeout = None; - self.need_flush = false; - }, - } - } + if self.need_flush { + match Sink::poll_flush(Pin::new(&mut self.sink), cx) { + Poll::Pending => { + if self.timeout.is_none() { + self.timeout = Some(Delay::new(Duration::from_secs(10))); + } + } + Poll::Ready(Err(err)) => { + self.timeout = None; + return Poll::Ready(Err(ConnectionError::Sink(err))); + } + Poll::Ready(Ok(())) => { + self.timeout = None; + self.need_flush = false; + } + } + } - if let Some(timeout) = self.timeout.as_mut() { - match Future::poll(Pin::new(timeout), cx) { - Poll::Pending => {}, - Poll::Ready(()) => { - self.timeout = None; - return Poll::Ready(Err(ConnectionError::Timeout)) - } - } - } + if let Some(timeout) = self.timeout.as_mut() { + match Future::poll(Pin::new(timeout), cx) { + Poll::Pending => {} + Poll::Ready(()) => { + self.timeout = None; + return Poll::Ready(Err(ConnectionError::Timeout)); + } + } + } - match Stream::poll_next(Pin::new(&mut self.sink), cx) { - Poll::Ready(Some(Ok(_))) => { - // We poll the telemetry `Stream` because the underlying implementation relies on - // this in order to answer PINGs. - // We don't do anything with incoming messages, however. - }, - Poll::Ready(Some(Err(err))) => { - return Poll::Ready(Err(ConnectionError::Sink(err))) - }, - Poll::Ready(None) => { - return Poll::Ready(Err(ConnectionError::Closed)) - }, - Poll::Pending => {}, - } + match Stream::poll_next(Pin::new(&mut self.sink), cx) { + Poll::Ready(Some(Ok(_))) => { + // We poll the telemetry `Stream` because the underlying implementation relies on + // this in order to answer PINGs. + // We don't do anything with incoming messages, however. + } + Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(ConnectionError::Sink(err))), + Poll::Ready(None) => return Poll::Ready(Err(ConnectionError::Closed)), + Poll::Pending => {} + } - Poll::Pending - } + Poll::Pending + } } impl fmt::Debug for Node { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let state = match self.socket { - NodeSocket::Connected(_) => "Connected", - NodeSocket::Dialing(_) => "Dialing", - NodeSocket::ReconnectNow => "Pending reconnect", - NodeSocket::WaitingReconnect(_) => "Pending reconnect", - NodeSocket::Poisoned => "Poisoned", - }; + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let state = match self.socket { + NodeSocket::Connected(_) => "Connected", + NodeSocket::Dialing(_) => "Dialing", + NodeSocket::ReconnectNow => "Pending reconnect", + NodeSocket::WaitingReconnect(_) => "Pending reconnect", + NodeSocket::Poisoned => "Poisoned", + }; - f.debug_struct("Node") - .field("addr", &self.addr) - .field("state", &state) - .finish() - } + f.debug_struct("Node") + .field("addr", &self.addr) + .field("state", &state) + .finish() + } } diff --git a/client/tracing/src/lib.rs b/client/tracing/src/lib.rs index c00bca9275..05c4142598 100644 --- a/client/tracing/src/lib.rs +++ b/client/tracing/src/lib.rs @@ -42,15 +42,15 @@ use std::sync::atomic::{AtomicU64, Ordering}; use std::time::{Duration, Instant}; use parking_lot::Mutex; -use serde::ser::{Serialize, Serializer, SerializeMap}; +use serde::ser::{Serialize, SerializeMap, Serializer}; use slog::{SerdeValue, Value}; use tracing_core::{ - event::Event, - field::{Visit, Field}, - Level, - metadata::Metadata, - span::{Attributes, Id, Record}, - subscriber::Subscriber + event::Event, + field::{Field, Visit}, + metadata::Metadata, + span::{Attributes, Id, Record}, + subscriber::Subscriber, + Level, }; use sc_telemetry::{telemetry, SUBSTRATE_INFO}; @@ -58,232 +58,251 @@ use sc_telemetry::{telemetry, SUBSTRATE_INFO}; /// Used to configure how to receive the metrics #[derive(Debug, Clone)] pub enum TracingReceiver { - /// Output to logger - Log, - /// Output to telemetry - Telemetry, + /// Output to logger + Log, + /// Output to telemetry + Telemetry, } impl Default for TracingReceiver { - fn default() -> Self { - Self::Log - } + fn default() -> Self { + Self::Log + } } #[derive(Debug)] struct SpanDatum { - id: u64, - name: &'static str, - target: &'static str, - level: Level, - line: u32, - start_time: Instant, - overall_time: Duration, - values: Visitor, + id: u64, + name: &'static str, + target: &'static str, + level: Level, + line: u32, + start_time: Instant, + overall_time: Duration, + values: Visitor, } #[derive(Clone, Debug)] struct Visitor(Vec<(String, String)>); impl Visit for Visitor { - fn record_i64(&mut self, field: &Field, value: i64) { - self.record_debug(field, &value) - } - - fn record_u64(&mut self, field: &Field, value: u64) { - self.record_debug(field, &value) - } - - fn record_bool(&mut self, field: &Field, value: bool) { - self.record_debug(field, &value) - } - - fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { - self.0.push((field.name().to_string(), format!("{:?}",value))); - } + fn record_i64(&mut self, field: &Field, value: i64) { + self.record_debug(field, &value) + } + + fn record_u64(&mut self, field: &Field, value: u64) { + self.record_debug(field, &value) + } + + fn record_bool(&mut self, field: &Field, value: bool) { + self.record_debug(field, &value) + } + + fn record_debug(&mut self, field: &Field, value: &dyn std::fmt::Debug) { + self.0 + .push((field.name().to_string(), format!("{:?}", value))); + } } impl Serialize for Visitor { - fn serialize(&self, serializer: S) -> Result - where S: Serializer, - { - let mut map = serializer.serialize_map(Some(self.0.len()))?; - for (k, v) in &self.0 { - map.serialize_entry(k, v)?; - } - map.end() - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + let mut map = serializer.serialize_map(Some(self.0.len()))?; + for (k, v) in &self.0 { + map.serialize_entry(k, v)?; + } + map.end() + } } impl fmt::Display for Visitor { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let values = self.0.iter().map(|(k,v)| format!("{}={}",k,v)).collect::>().join(", "); - write!(f, "{}", values) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + let values = self + .0 + .iter() + .map(|(k, v)| format!("{}={}", k, v)) + .collect::>() + .join(", "); + write!(f, "{}", values) + } } impl SerdeValue for Visitor { - fn as_serde(&self) -> &dyn erased_serde::Serialize { - self - } + fn as_serde(&self) -> &dyn erased_serde::Serialize { + self + } - fn to_sendable(&self) -> Box { - Box::new(self.clone()) - } + fn to_sendable(&self) -> Box { + Box::new(self.clone()) + } } impl Value for Visitor { - fn serialize( - &self, - _record: &slog::Record, - key: slog::Key, - ser: &mut dyn slog::Serializer, - ) -> slog::Result { - ser.emit_serde(key, self) - } + fn serialize( + &self, + _record: &slog::Record, + key: slog::Key, + ser: &mut dyn slog::Serializer, + ) -> slog::Result { + ser.emit_serde(key, self) + } } /// Responsible for assigning ids to new spans, which are not re-used. pub struct ProfilingSubscriber { - next_id: AtomicU64, - targets: Vec<(String, Level)>, - receiver: TracingReceiver, - span_data: Mutex>, + next_id: AtomicU64, + targets: Vec<(String, Level)>, + receiver: TracingReceiver, + span_data: Mutex>, } impl ProfilingSubscriber { - /// Takes a `Receiver` and a comma separated list of targets, - /// either with a level: "pallet=trace" - /// or without: "pallet". - pub fn new(receiver: TracingReceiver, targets: &str) -> Self { - let targets: Vec<_> = targets.split(',').map(|s| parse_target(s)).collect(); - ProfilingSubscriber { - next_id: AtomicU64::new(1), - targets, - receiver, - span_data: Mutex::new(HashMap::new()), - } - } + /// Takes a `Receiver` and a comma separated list of targets, + /// either with a level: "pallet=trace" + /// or without: "pallet". + pub fn new(receiver: TracingReceiver, targets: &str) -> Self { + let targets: Vec<_> = targets.split(',').map(|s| parse_target(s)).collect(); + ProfilingSubscriber { + next_id: AtomicU64::new(1), + targets, + receiver, + span_data: Mutex::new(HashMap::new()), + } + } } // Default to TRACE if no level given or unable to parse Level // We do not support a global `Level` currently fn parse_target(s: &str) -> (String, Level) { - match s.find('=') { - Some(i) => { - let target = s[0..i].to_string(); - if s.len() > i { - let level = s[i + 1..s.len()].parse::().unwrap_or(Level::TRACE); - (target, level) - } else { - (target, Level::TRACE) - } - } - None => (s.to_string(), Level::TRACE) - } + match s.find('=') { + Some(i) => { + let target = s[0..i].to_string(); + if s.len() > i { + let level = s[i + 1..s.len()].parse::().unwrap_or(Level::TRACE); + (target, level) + } else { + (target, Level::TRACE) + } + } + None => (s.to_string(), Level::TRACE), + } } impl Subscriber for ProfilingSubscriber { - fn enabled(&self, metadata: &Metadata<'_>) -> bool { - for t in &self.targets { - if metadata.target().starts_with(t.0.as_str()) && metadata.level() <= &t.1 { - log::debug!("Enabled target: {}, level: {}", metadata.target(), metadata.level()); - return true; - } else { - log::debug!("Disabled target: {}, level: {}", metadata.target(), metadata.level()); - } - } - false - } - - fn new_span(&self, attrs: &Attributes<'_>) -> Id { - let id = self.next_id.fetch_add(1, Ordering::Relaxed); - let mut values = Visitor(Vec::new()); - attrs.record(&mut values); - let span_datum = SpanDatum { - id, - name: attrs.metadata().name(), - target: attrs.metadata().target(), - level: attrs.metadata().level().clone(), - line: attrs.metadata().line().unwrap_or(0), - start_time: Instant::now(), - overall_time: Duration::from_nanos(0), - values, - }; - self.span_data.lock().insert(id, span_datum); - Id::from_u64(id) - } - - fn record(&self, _span: &Id, _values: &Record<'_>) {} - - fn record_follows_from(&self, _span: &Id, _follows: &Id) {} - - fn event(&self, _event: &Event<'_>) {} - - fn enter(&self, span: &Id) { - let mut span_data = self.span_data.lock(); - let start_time = Instant::now(); - if let Some(mut s) = span_data.get_mut(&span.into_u64()) { - s.start_time = start_time; - } else { - log::warn!("Tried to enter span {:?} that has already been closed!", span); - } - } - - fn exit(&self, span: &Id) { - let mut span_data = self.span_data.lock(); - let end_time = Instant::now(); - if let Some(mut s) = span_data.get_mut(&span.into_u64()) { - s.overall_time = end_time - s.start_time + s.overall_time; - } - } - - fn try_close(&self, span: Id) -> bool { - let mut span_data = self.span_data.lock(); - if let Some(data) = span_data.remove(&span.into_u64()) { - self.send_span(data); - }; - true - } + fn enabled(&self, metadata: &Metadata<'_>) -> bool { + for t in &self.targets { + if metadata.target().starts_with(t.0.as_str()) && metadata.level() <= &t.1 { + log::debug!( + "Enabled target: {}, level: {}", + metadata.target(), + metadata.level() + ); + return true; + } else { + log::debug!( + "Disabled target: {}, level: {}", + metadata.target(), + metadata.level() + ); + } + } + false + } + + fn new_span(&self, attrs: &Attributes<'_>) -> Id { + let id = self.next_id.fetch_add(1, Ordering::Relaxed); + let mut values = Visitor(Vec::new()); + attrs.record(&mut values); + let span_datum = SpanDatum { + id, + name: attrs.metadata().name(), + target: attrs.metadata().target(), + level: attrs.metadata().level().clone(), + line: attrs.metadata().line().unwrap_or(0), + start_time: Instant::now(), + overall_time: Duration::from_nanos(0), + values, + }; + self.span_data.lock().insert(id, span_datum); + Id::from_u64(id) + } + + fn record(&self, _span: &Id, _values: &Record<'_>) {} + + fn record_follows_from(&self, _span: &Id, _follows: &Id) {} + + fn event(&self, _event: &Event<'_>) {} + + fn enter(&self, span: &Id) { + let mut span_data = self.span_data.lock(); + let start_time = Instant::now(); + if let Some(mut s) = span_data.get_mut(&span.into_u64()) { + s.start_time = start_time; + } else { + log::warn!( + "Tried to enter span {:?} that has already been closed!", + span + ); + } + } + + fn exit(&self, span: &Id) { + let mut span_data = self.span_data.lock(); + let end_time = Instant::now(); + if let Some(mut s) = span_data.get_mut(&span.into_u64()) { + s.overall_time = end_time - s.start_time + s.overall_time; + } + } + + fn try_close(&self, span: Id) -> bool { + let mut span_data = self.span_data.lock(); + if let Some(data) = span_data.remove(&span.into_u64()) { + self.send_span(data); + }; + true + } } impl ProfilingSubscriber { - fn send_span(&self, span_datum: SpanDatum) { - match self.receiver { - TracingReceiver::Log => print_log(span_datum), - TracingReceiver::Telemetry => send_telemetry(span_datum), - } - } + fn send_span(&self, span_datum: SpanDatum) { + match self.receiver { + TracingReceiver::Log => print_log(span_datum), + TracingReceiver::Telemetry => send_telemetry(span_datum), + } + } } fn print_log(span_datum: SpanDatum) { - if span_datum.values.0.is_empty() { - log::info!("TRACING: {} {}: {}, line: {}, time: {}", - span_datum.level, - span_datum.target, - span_datum.name, - span_datum.line, - span_datum.overall_time.as_nanos(), - ); - } else { - log::info!("TRACING: {} {}: {}, line: {}, time: {}, {}", - span_datum.level, - span_datum.target, - span_datum.name, - span_datum.line, - span_datum.overall_time.as_nanos(), - span_datum.values - ); - } + if span_datum.values.0.is_empty() { + log::info!( + "TRACING: {} {}: {}, line: {}, time: {}", + span_datum.level, + span_datum.target, + span_datum.name, + span_datum.line, + span_datum.overall_time.as_nanos(), + ); + } else { + log::info!( + "TRACING: {} {}: {}, line: {}, time: {}, {}", + span_datum.level, + span_datum.target, + span_datum.name, + span_datum.line, + span_datum.overall_time.as_nanos(), + span_datum.values + ); + } } fn send_telemetry(span_datum: SpanDatum) { - telemetry!(SUBSTRATE_INFO; "tracing.profiling"; - "name" => span_datum.name, - "target" => span_datum.target, - "line" => span_datum.line, - "time" => span_datum.overall_time.as_nanos(), - "values" => span_datum.values - ); + telemetry!(SUBSTRATE_INFO; "tracing.profiling"; + "name" => span_datum.name, + "target" => span_datum.target, + "line" => span_datum.line, + "time" => span_datum.overall_time.as_nanos(), + "values" => span_datum.values + ); } - diff --git a/client/transaction-pool/graph/benches/basics.rs b/client/transaction-pool/graph/benches/basics.rs index 23b4dba348..d0bb91e307 100644 --- a/client/transaction-pool/graph/benches/basics.rs +++ b/client/transaction-pool/graph/benches/basics.rs @@ -16,162 +16,167 @@ use criterion::{criterion_group, criterion_main, Criterion}; -use futures::{future::{ready, Ready}, executor::block_on}; -use sc_transaction_graph::*; use codec::Encode; -use substrate_test_runtime::{Block, Extrinsic, Transfer, H256, AccountId}; -use sp_runtime::{ - generic::BlockId, - transaction_validity::{ - ValidTransaction, InvalidTransaction, TransactionValidity, TransactionTag as Tag, - TransactionSource, - }, +use futures::{ + executor::block_on, + future::{ready, Ready}, }; +use sc_transaction_graph::*; use sp_core::blake2_256; +use sp_runtime::{ + generic::BlockId, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionTag as Tag, TransactionValidity, + ValidTransaction, + }, +}; +use substrate_test_runtime::{AccountId, Block, Extrinsic, Transfer, H256}; #[derive(Clone, Debug, Default)] struct TestApi { - nonce_dependant: bool, + nonce_dependant: bool, } impl TestApi { - fn new_dependant() -> Self { - TestApi { nonce_dependant: true } - } + fn new_dependant() -> Self { + TestApi { + nonce_dependant: true, + } + } } fn to_tag(nonce: u64, from: AccountId) -> Tag { - let mut data = [0u8; 40]; - data[..8].copy_from_slice(&nonce.to_le_bytes()[..]); - data[8..].copy_from_slice(&from.0[..]); - data.to_vec() + let mut data = [0u8; 40]; + data[..8].copy_from_slice(&nonce.to_le_bytes()[..]); + data[8..].copy_from_slice(&from.0[..]); + data.to_vec() } impl ChainApi for TestApi { - type Block = Block; - type Hash = H256; - type Error = sp_transaction_pool::error::Error; - type ValidationFuture = Ready>; - type BodyFuture = Ready>>>; - - fn validate_transaction( - &self, - at: &BlockId, - _source: TransactionSource, - uxt: ExtrinsicFor, - ) -> Self::ValidationFuture { - let nonce = uxt.transfer().nonce; - let from = uxt.transfer().from.clone(); - - match self.block_id_to_number(at) { - Ok(Some(num)) if num > 5 => { - return ready( - Ok(Err(InvalidTransaction::Stale.into())) - ) - }, - _ => {}, - } - - ready( - Ok(Ok(ValidTransaction { - priority: 4, - requires: if nonce > 1 && self.nonce_dependant { - vec![to_tag(nonce-1, from.clone())] - } else { vec![] }, - provides: vec![to_tag(nonce, from)], - longevity: 10, - propagate: true, - })) - ) - } - - fn block_id_to_number( - &self, - at: &BlockId, - ) -> Result>, Self::Error> { - Ok(match at { - BlockId::Number(num) => Some(*num), - BlockId::Hash(_) => None, - }) - } - - fn block_id_to_hash( - &self, - at: &BlockId, - ) -> Result>, Self::Error> { - Ok(match at { - BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), - BlockId::Hash(_) => None, - }) - } - - fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize) { - let encoded = uxt.encode(); - (blake2_256(&encoded).into(), encoded.len()) - } - - fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { - ready(Ok(None)) - } + type Block = Block; + type Hash = H256; + type Error = sp_transaction_pool::error::Error; + type ValidationFuture = Ready>; + type BodyFuture = Ready>>>; + + fn validate_transaction( + &self, + at: &BlockId, + _source: TransactionSource, + uxt: ExtrinsicFor, + ) -> Self::ValidationFuture { + let nonce = uxt.transfer().nonce; + let from = uxt.transfer().from.clone(); + + match self.block_id_to_number(at) { + Ok(Some(num)) if num > 5 => return ready(Ok(Err(InvalidTransaction::Stale.into()))), + _ => {} + } + + ready(Ok(Ok(ValidTransaction { + priority: 4, + requires: if nonce > 1 && self.nonce_dependant { + vec![to_tag(nonce - 1, from.clone())] + } else { + vec![] + }, + provides: vec![to_tag(nonce, from)], + longevity: 10, + propagate: true, + }))) + } + + fn block_id_to_number( + &self, + at: &BlockId, + ) -> Result>, Self::Error> { + Ok(match at { + BlockId::Number(num) => Some(*num), + BlockId::Hash(_) => None, + }) + } + + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> Result>, Self::Error> { + Ok(match at { + BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), + BlockId::Hash(_) => None, + }) + } + + fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize) { + let encoded = uxt.encode(); + (blake2_256(&encoded).into(), encoded.len()) + } + + fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { + ready(Ok(None)) + } } fn uxt(transfer: Transfer) -> Extrinsic { - Extrinsic::Transfer { - transfer, - signature: Default::default(), - exhaust_resources_when_not_first: false, - } + Extrinsic::Transfer { + transfer, + signature: Default::default(), + exhaust_resources_when_not_first: false, + } } fn bench_configured(pool: Pool, number: u64) { - let source = TransactionSource::External; - let mut futures = Vec::new(); - let mut tags = Vec::new(); - - for nonce in 1..=number { - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce, - }); - - tags.push(to_tag(nonce, AccountId::from_h256(H256::from_low_u64_be(1)))); - futures.push(pool.submit_one(&BlockId::Number(1), source, xt)); - } - - let res = block_on(futures::future::join_all(futures.into_iter())); - assert!(res.iter().all(Result::is_ok)); - - assert_eq!(pool.validated_pool().status().future, 0); - assert_eq!(pool.validated_pool().status().ready, number as usize); - - // Prune all transactions. - let block_num = 6; - block_on(pool.prune_tags( - &BlockId::Number(block_num), - tags, - vec![], - )).expect("Prune failed"); - - // pool is empty - assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 0); + let source = TransactionSource::External; + let mut futures = Vec::new(); + let mut tags = Vec::new(); + + for nonce in 1..=number { + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce, + }); + + tags.push(to_tag( + nonce, + AccountId::from_h256(H256::from_low_u64_be(1)), + )); + futures.push(pool.submit_one(&BlockId::Number(1), source, xt)); + } + + let res = block_on(futures::future::join_all(futures.into_iter())); + assert!(res.iter().all(Result::is_ok)); + + assert_eq!(pool.validated_pool().status().future, 0); + assert_eq!(pool.validated_pool().status().ready, number as usize); + + // Prune all transactions. + let block_num = 6; + block_on(pool.prune_tags(&BlockId::Number(block_num), tags, vec![])).expect("Prune failed"); + + // pool is empty + assert_eq!(pool.validated_pool().status().ready, 0); + assert_eq!(pool.validated_pool().status().future, 0); } fn benchmark_main(c: &mut Criterion) { - - c.bench_function("sequential 50 tx", |b| { - b.iter(|| { - bench_configured(Pool::new(Default::default(), TestApi::new_dependant().into()), 50); - }); - }); - - c.bench_function("random 100 tx", |b| { - b.iter(|| { - bench_configured(Pool::new(Default::default(), TestApi::default().into()), 100); - }); - }); + c.bench_function("sequential 50 tx", |b| { + b.iter(|| { + bench_configured( + Pool::new(Default::default(), TestApi::new_dependant().into()), + 50, + ); + }); + }); + + c.bench_function("random 100 tx", |b| { + b.iter(|| { + bench_configured( + Pool::new(Default::default(), TestApi::default().into()), + 100, + ); + }); + }); } criterion_group!(benches, benchmark_main); diff --git a/client/transaction-pool/graph/src/base_pool.rs b/client/transaction-pool/graph/src/base_pool.rs index 38151e9bfd..7783decbd6 100644 --- a/client/transaction-pool/graph/src/base_pool.rs +++ b/client/transaction-pool/graph/src/base_pool.rs @@ -18,24 +18,17 @@ //! //! For a more full-featured pool, have a look at the `pool` module. -use std::{ - collections::HashSet, - fmt, - hash, - sync::Arc, -}; +use std::{collections::HashSet, fmt, hash, sync::Arc}; -use log::{trace, debug, warn}; +use log::{debug, trace, warn}; use serde::Serialize; use sp_core::hexdisplay::HexDisplay; use sp_runtime::traits::Member; use sp_runtime::transaction_validity::{ - TransactionTag as Tag, - TransactionLongevity as Longevity, - TransactionPriority as Priority, - TransactionSource as Source, + TransactionLongevity as Longevity, TransactionPriority as Priority, + TransactionSource as Source, TransactionTag as Tag, }; -use sp_transaction_pool::{error, PoolStatus, InPoolTransaction}; +use sp_transaction_pool::{error, InPoolTransaction, PoolStatus}; use crate::future::{FutureTransactions, WaitingTransaction}; use crate::ready::ReadyTransactions; @@ -43,162 +36,163 @@ use crate::ready::ReadyTransactions; /// Successful import result. #[derive(Debug, PartialEq, Eq)] pub enum Imported { - /// Transaction was successfully imported to Ready queue. - Ready { - /// Hash of transaction that was successfully imported. - hash: Hash, - /// Transactions that got promoted from the Future queue. - promoted: Vec, - /// Transactions that failed to be promoted from the Future queue and are now discarded. - failed: Vec, - /// Transactions removed from the Ready pool (replaced). - removed: Vec>>, - }, - /// Transaction was successfully imported to Future queue. - Future { - /// Hash of transaction that was successfully imported. - hash: Hash, - } + /// Transaction was successfully imported to Ready queue. + Ready { + /// Hash of transaction that was successfully imported. + hash: Hash, + /// Transactions that got promoted from the Future queue. + promoted: Vec, + /// Transactions that failed to be promoted from the Future queue and are now discarded. + failed: Vec, + /// Transactions removed from the Ready pool (replaced). + removed: Vec>>, + }, + /// Transaction was successfully imported to Future queue. + Future { + /// Hash of transaction that was successfully imported. + hash: Hash, + }, } impl Imported { - /// Returns the hash of imported transaction. - pub fn hash(&self) -> &Hash { - use self::Imported::*; - match *self { - Ready { ref hash, .. } => hash, - Future { ref hash, .. } => hash, - } - } + /// Returns the hash of imported transaction. + pub fn hash(&self) -> &Hash { + use self::Imported::*; + match *self { + Ready { ref hash, .. } => hash, + Future { ref hash, .. } => hash, + } + } } /// Status of pruning the queue. #[derive(Debug)] pub struct PruneStatus { - /// A list of imports that satisfying the tag triggered. - pub promoted: Vec>, - /// A list of transactions that failed to be promoted and now are discarded. - pub failed: Vec, - /// A list of transactions that got pruned from the ready queue. - pub pruned: Vec>>, + /// A list of imports that satisfying the tag triggered. + pub promoted: Vec>, + /// A list of transactions that failed to be promoted and now are discarded. + pub failed: Vec, + /// A list of transactions that got pruned from the ready queue. + pub pruned: Vec>>, } /// Immutable transaction #[cfg_attr(test, derive(Clone))] #[derive(PartialEq, Eq, parity_util_mem::MallocSizeOf)] pub struct Transaction { - /// Raw extrinsic representing that transaction. - pub data: Extrinsic, - /// Number of bytes encoding of the transaction requires. - pub bytes: usize, - /// Transaction hash (unique) - pub hash: Hash, - /// Transaction priority (higher = better) - pub priority: Priority, - /// At which block the transaction becomes invalid? - pub valid_till: Longevity, - /// Tags required by the transaction. - pub requires: Vec, - /// Tags that this transaction provides. - pub provides: Vec, - /// Should that transaction be propagated. - pub propagate: bool, - /// Source of that transaction. - pub source: Source, + /// Raw extrinsic representing that transaction. + pub data: Extrinsic, + /// Number of bytes encoding of the transaction requires. + pub bytes: usize, + /// Transaction hash (unique) + pub hash: Hash, + /// Transaction priority (higher = better) + pub priority: Priority, + /// At which block the transaction becomes invalid? + pub valid_till: Longevity, + /// Tags required by the transaction. + pub requires: Vec, + /// Tags that this transaction provides. + pub provides: Vec, + /// Should that transaction be propagated. + pub propagate: bool, + /// Source of that transaction. + pub source: Source, } impl AsRef for Transaction { - fn as_ref(&self) -> &Extrinsic { - &self.data - } + fn as_ref(&self) -> &Extrinsic { + &self.data + } } impl InPoolTransaction for Transaction { - type Transaction = Extrinsic; - type Hash = Hash; + type Transaction = Extrinsic; + type Hash = Hash; - fn data(&self) -> &Extrinsic { - &self.data - } + fn data(&self) -> &Extrinsic { + &self.data + } - fn hash(&self) -> &Hash { - &self.hash - } + fn hash(&self) -> &Hash { + &self.hash + } - fn priority(&self) -> &Priority { - &self.priority - } + fn priority(&self) -> &Priority { + &self.priority + } - fn longevity(&self) ->&Longevity { - &self.valid_till - } + fn longevity(&self) -> &Longevity { + &self.valid_till + } - fn requires(&self) -> &[Tag] { - &self.requires - } + fn requires(&self) -> &[Tag] { + &self.requires + } - fn provides(&self) -> &[Tag] { - &self.provides - } + fn provides(&self) -> &[Tag] { + &self.provides + } - fn is_propagable(&self) -> bool { - self.propagate - } + fn is_propagable(&self) -> bool { + self.propagate + } } impl Transaction { - /// Explicit transaction clone. - /// - /// Transaction should be cloned only if absolutely necessary && we want - /// every reason to be commented. That's why we `Transaction` is not `Clone`, - /// but there's explicit `duplicate` method. - pub fn duplicate(&self) -> Self { - Transaction { - data: self.data.clone(), - bytes: self.bytes.clone(), - hash: self.hash.clone(), - priority: self.priority.clone(), - source: self.source, - valid_till: self.valid_till.clone(), - requires: self.requires.clone(), - provides: self.provides.clone(), - propagate: self.propagate, - } - } + /// Explicit transaction clone. + /// + /// Transaction should be cloned only if absolutely necessary && we want + /// every reason to be commented. That's why we `Transaction` is not `Clone`, + /// but there's explicit `duplicate` method. + pub fn duplicate(&self) -> Self { + Transaction { + data: self.data.clone(), + bytes: self.bytes.clone(), + hash: self.hash.clone(), + priority: self.priority.clone(), + source: self.source, + valid_till: self.valid_till.clone(), + requires: self.requires.clone(), + provides: self.provides.clone(), + propagate: self.propagate, + } + } } -impl fmt::Debug for Transaction where - Hash: fmt::Debug, - Extrinsic: fmt::Debug, +impl fmt::Debug for Transaction +where + Hash: fmt::Debug, + Extrinsic: fmt::Debug, { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - fn print_tags(fmt: &mut fmt::Formatter, tags: &[Tag]) -> fmt::Result { - let mut it = tags.iter(); - if let Some(t) = it.next() { - write!(fmt, "{}", HexDisplay::from(t))?; - } - for t in it { - write!(fmt, ",{}", HexDisplay::from(t))?; - } - Ok(()) - } - - write!(fmt, "Transaction {{ ")?; - write!(fmt, "hash: {:?}, ", &self.hash)?; - write!(fmt, "priority: {:?}, ", &self.priority)?; - write!(fmt, "valid_till: {:?}, ", &self.valid_till)?; - write!(fmt, "bytes: {:?}, ", &self.bytes)?; - write!(fmt, "propagate: {:?}, ", &self.propagate)?; - write!(fmt, "source: {:?}, ", &self.source)?; - write!(fmt, "requires: [")?; - print_tags(fmt, &self.requires)?; - write!(fmt, "], provides: [")?; - print_tags(fmt, &self.provides)?; - write!(fmt, "], ")?; - write!(fmt, "data: {:?}", &self.data)?; - write!(fmt, "}}")?; - Ok(()) - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + fn print_tags(fmt: &mut fmt::Formatter, tags: &[Tag]) -> fmt::Result { + let mut it = tags.iter(); + if let Some(t) = it.next() { + write!(fmt, "{}", HexDisplay::from(t))?; + } + for t in it { + write!(fmt, ",{}", HexDisplay::from(t))?; + } + Ok(()) + } + + write!(fmt, "Transaction {{ ")?; + write!(fmt, "hash: {:?}, ", &self.hash)?; + write!(fmt, "priority: {:?}, ", &self.priority)?; + write!(fmt, "valid_till: {:?}, ", &self.valid_till)?; + write!(fmt, "bytes: {:?}, ", &self.bytes)?; + write!(fmt, "propagate: {:?}, ", &self.propagate)?; + write!(fmt, "source: {:?}, ", &self.source)?; + write!(fmt, "requires: [")?; + print_tags(fmt, &self.requires)?; + write!(fmt, "], provides: [")?; + print_tags(fmt, &self.provides)?; + write!(fmt, "], ")?; + write!(fmt, "data: {:?}", &self.data)?; + write!(fmt, "}}")?; + Ok(()) + } } /// Store last pruned tags for given number of invocations. @@ -217,981 +211,1036 @@ const RECENTLY_PRUNED_TAGS: usize = 2; #[derive(Debug)] #[cfg_attr(not(target_os = "unknown"), derive(parity_util_mem::MallocSizeOf))] pub struct BasePool { - reject_future_transactions: bool, - future: FutureTransactions, - ready: ReadyTransactions, - /// Store recently pruned tags (for last two invocations). - /// - /// This is used to make sure we don't accidentally put - /// transactions to future in case they were just stuck in verification. - recently_pruned: [HashSet; RECENTLY_PRUNED_TAGS], - recently_pruned_index: usize, + reject_future_transactions: bool, + future: FutureTransactions, + ready: ReadyTransactions, + /// Store recently pruned tags (for last two invocations). + /// + /// This is used to make sure we don't accidentally put + /// transactions to future in case they were just stuck in verification. + recently_pruned: [HashSet; RECENTLY_PRUNED_TAGS], + recently_pruned_index: usize, } impl Default for BasePool { - fn default() -> Self { - Self::new(false) - } + fn default() -> Self { + Self::new(false) + } } impl BasePool { - /// Create new pool given reject_future_transactions flag. - pub fn new(reject_future_transactions: bool) -> Self { - BasePool { - reject_future_transactions, - future: Default::default(), - ready: Default::default(), - recently_pruned: Default::default(), - recently_pruned_index: 0, - } - } - - /// Temporary enables future transactions, runs closure and then restores - /// `reject_future_transactions` flag back to previous value. - /// - /// The closure accepts the mutable reference to the pool and original value - /// of the `reject_future_transactions` flag. - pub(crate) fn with_futures_enabled(&mut self, closure: impl FnOnce(&mut Self, bool) -> T) -> T { - let previous = self.reject_future_transactions; - self.reject_future_transactions = false; - let return_value = closure(self, previous); - self.reject_future_transactions = previous; - return_value - } - - /// Imports transaction to the pool. - /// - /// The pool consists of two parts: Future and Ready. - /// The former contains transactions that require some tags that are not yet provided by - /// other transactions in the pool. - /// The latter contains transactions that have all the requirements satisfied and are - /// ready to be included in the block. - pub fn import( - &mut self, - tx: Transaction, - ) -> error::Result> { - if self.future.contains(&tx.hash) || self.ready.contains(&tx.hash) { - return Err(error::Error::AlreadyImported(Box::new(tx.hash.clone()))) - } - - let tx = WaitingTransaction::new( - tx, - self.ready.provided_tags(), - &self.recently_pruned, - ); - trace!(target: "txpool", "[{:?}] {:?}", tx.transaction.hash, tx); - debug!( - target: "txpool", - "[{:?}] Importing to {}", - tx.transaction.hash, - if tx.is_ready() { "ready" } else { "future" } - ); - - // If all tags are not satisfied import to future. - if !tx.is_ready() { - if self.reject_future_transactions { - return Err(error::Error::RejectedFutureTransaction); - } - - let hash = tx.transaction.hash.clone(); - self.future.import(tx); - return Ok(Imported::Future { hash }); - } - - self.import_to_ready(tx) - } - - /// Imports transaction to ready queue. - /// - /// NOTE the transaction has to have all requirements satisfied. - fn import_to_ready(&mut self, tx: WaitingTransaction) -> error::Result> { - let hash = tx.transaction.hash.clone(); - let mut promoted = vec![]; - let mut failed = vec![]; - let mut removed = vec![]; - - let mut first = true; - let mut to_import = vec![tx]; - - loop { - // take first transaction from the list - let tx = match to_import.pop() { - Some(tx) => tx, - None => break, - }; - - // find transactions in Future that it unlocks - to_import.append(&mut self.future.satisfy_tags(&tx.transaction.provides)); - - // import this transaction - let current_hash = tx.transaction.hash.clone(); - match self.ready.import(tx) { - Ok(mut replaced) => { - if !first { - promoted.push(current_hash); - } - // The transactions were removed from the ready pool. We might attempt to re-import them. - removed.append(&mut replaced); - }, - // transaction failed to be imported. - Err(e) => if first { - debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); - return Err(e) - } else { - failed.push(current_hash); - }, - } - first = false; - } - - // An edge case when importing transaction caused - // some future transactions to be imported and that - // future transactions pushed out current transaction. - // This means that there is a cycle and the transactions should - // be moved back to future, since we can't resolve it. - if removed.iter().any(|tx| tx.hash == hash) { - // We still need to remove all transactions that we promoted - // since they depend on each other and will never get to the best iterator. - self.ready.remove_subtree(&promoted); - - debug!(target: "txpool", "[{:?}] Cycle detected, bailing.", hash); - return Err(error::Error::CycleDetected) - } - - Ok(Imported::Ready { - hash, - promoted, - failed, - removed, - }) - } - - /// Returns an iterator over ready transactions in the pool. - pub fn ready(&self) -> impl Iterator>> { - self.ready.get() - } - - /// Returns an iterator over future transactions in the pool. - pub fn futures(&self) -> impl Iterator> { - self.future.all() - } - - /// Returns pool transactions given list of hashes. - /// - /// Includes both ready and future pool. For every hash in the `hashes` - /// iterator an `Option` is produced (so the resulting `Vec` always have the same length). - pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { - let ready = self.ready.by_hashes(hashes); - let future = self.future.by_hashes(hashes); - - ready - .into_iter() - .zip(future) - .map(|(a, b)| a.or(b)) - .collect() - } - - /// Returns pool transaction by hash. - pub fn ready_by_hash(&self, hash: &Hash) -> Option>> { - self.ready.by_hash(hash) - } - - /// Makes sure that the transactions in the queues stay within provided limits. - /// - /// Removes and returns worst transactions from the queues and all transactions that depend on them. - /// Technically the worst transaction should be evaluated by computing the entire pending set. - /// We use a simplified approach to remove the transaction that occupies the pool for the longest time. - pub fn enforce_limits(&mut self, ready: &Limit, future: &Limit) -> Vec>> { - let mut removed = vec![]; - - while ready.is_exceeded(self.ready.len(), self.ready.bytes()) { - // find the worst transaction - let minimal = self.ready - .fold(|minimal, current| { - let transaction = ¤t.transaction; - match minimal { - None => Some(transaction.clone()), - Some(ref tx) if tx.insertion_id > transaction.insertion_id => { - Some(transaction.clone()) - }, - other => other, - } - }); - - if let Some(minimal) = minimal { - removed.append(&mut self.remove_subtree(&[minimal.transaction.hash.clone()])) - } else { - break; - } - } - - while future.is_exceeded(self.future.len(), self.future.bytes()) { - // find the worst transaction - let minimal = self.future - .fold(|minimal, current| { - match minimal { - None => Some(current.clone()), - Some(ref tx) if tx.imported_at > current.imported_at => { - Some(current.clone()) - }, - other => other, - } - }); - - if let Some(minimal) = minimal { - removed.append(&mut self.remove_subtree(&[minimal.transaction.hash.clone()])) - } else { - break; - } - } - - removed - } - - /// Removes all transactions represented by the hashes and all other transactions - /// that depend on them. - /// - /// Returns a list of actually removed transactions. - /// NOTE some transactions might still be valid, but were just removed because - /// they were part of a chain, you may attempt to re-import them later. - /// NOTE If you want to remove ready transactions that were already used - /// and you don't want them to be stored in the pool use `prune_tags` method. - pub fn remove_subtree(&mut self, hashes: &[Hash]) -> Vec>> { - let mut removed = self.ready.remove_subtree(hashes); - removed.extend(self.future.remove(hashes)); - removed - } - - /// Removes and returns all transactions from the future queue. - pub fn clear_future(&mut self) -> Vec>> { - self.future.clear() - } - - /// Prunes transactions that provide given list of tags. - /// - /// This will cause all transactions that provide these tags to be removed from the pool, - /// but unlike `remove_subtree`, dependent transactions are not touched. - /// Additional transactions from future queue might be promoted to ready if you satisfy tags - /// that the pool didn't previously know about. - pub fn prune_tags(&mut self, tags: impl IntoIterator) -> PruneStatus { - let mut to_import = vec![]; - let mut pruned = vec![]; - let recently_pruned = &mut self.recently_pruned[self.recently_pruned_index]; - self.recently_pruned_index = (self.recently_pruned_index + 1) % RECENTLY_PRUNED_TAGS; - recently_pruned.clear(); - - for tag in tags { - // make sure to promote any future transactions that could be unlocked - to_import.append(&mut self.future.satisfy_tags(std::iter::once(&tag))); - // and actually prune transactions in ready queue - pruned.append(&mut self.ready.prune_tags(tag.clone())); - // store the tags for next submission - recently_pruned.insert(tag); - } - - let mut promoted = vec![]; - let mut failed = vec![]; - for tx in to_import { - let hash = tx.transaction.hash.clone(); - match self.import_to_ready(tx) { - Ok(res) => promoted.push(res), - Err(e) => { - warn!(target: "txpool", "[{:?}] Failed to promote during pruning: {:?}", hash, e); - failed.push(hash) - }, - } - } - - PruneStatus { - pruned, - failed, - promoted, - } - } - - /// Get pool status. - pub fn status(&self) -> PoolStatus { - PoolStatus { - ready: self.ready.len(), - ready_bytes: self.ready.bytes(), - future: self.future.len(), - future_bytes: self.future.bytes(), - } - } + /// Create new pool given reject_future_transactions flag. + pub fn new(reject_future_transactions: bool) -> Self { + BasePool { + reject_future_transactions, + future: Default::default(), + ready: Default::default(), + recently_pruned: Default::default(), + recently_pruned_index: 0, + } + } + + /// Temporary enables future transactions, runs closure and then restores + /// `reject_future_transactions` flag back to previous value. + /// + /// The closure accepts the mutable reference to the pool and original value + /// of the `reject_future_transactions` flag. + pub(crate) fn with_futures_enabled( + &mut self, + closure: impl FnOnce(&mut Self, bool) -> T, + ) -> T { + let previous = self.reject_future_transactions; + self.reject_future_transactions = false; + let return_value = closure(self, previous); + self.reject_future_transactions = previous; + return_value + } + + /// Imports transaction to the pool. + /// + /// The pool consists of two parts: Future and Ready. + /// The former contains transactions that require some tags that are not yet provided by + /// other transactions in the pool. + /// The latter contains transactions that have all the requirements satisfied and are + /// ready to be included in the block. + pub fn import(&mut self, tx: Transaction) -> error::Result> { + if self.future.contains(&tx.hash) || self.ready.contains(&tx.hash) { + return Err(error::Error::AlreadyImported(Box::new(tx.hash.clone()))); + } + + let tx = WaitingTransaction::new(tx, self.ready.provided_tags(), &self.recently_pruned); + trace!(target: "txpool", "[{:?}] {:?}", tx.transaction.hash, tx); + debug!( + target: "txpool", + "[{:?}] Importing to {}", + tx.transaction.hash, + if tx.is_ready() { "ready" } else { "future" } + ); + + // If all tags are not satisfied import to future. + if !tx.is_ready() { + if self.reject_future_transactions { + return Err(error::Error::RejectedFutureTransaction); + } + + let hash = tx.transaction.hash.clone(); + self.future.import(tx); + return Ok(Imported::Future { hash }); + } + + self.import_to_ready(tx) + } + + /// Imports transaction to ready queue. + /// + /// NOTE the transaction has to have all requirements satisfied. + fn import_to_ready( + &mut self, + tx: WaitingTransaction, + ) -> error::Result> { + let hash = tx.transaction.hash.clone(); + let mut promoted = vec![]; + let mut failed = vec![]; + let mut removed = vec![]; + + let mut first = true; + let mut to_import = vec![tx]; + + loop { + // take first transaction from the list + let tx = match to_import.pop() { + Some(tx) => tx, + None => break, + }; + + // find transactions in Future that it unlocks + to_import.append(&mut self.future.satisfy_tags(&tx.transaction.provides)); + + // import this transaction + let current_hash = tx.transaction.hash.clone(); + match self.ready.import(tx) { + Ok(mut replaced) => { + if !first { + promoted.push(current_hash); + } + // The transactions were removed from the ready pool. We might attempt to re-import them. + removed.append(&mut replaced); + } + // transaction failed to be imported. + Err(e) => { + if first { + debug!(target: "txpool", "[{:?}] Error importing: {:?}", current_hash, e); + return Err(e); + } else { + failed.push(current_hash); + } + } + } + first = false; + } + + // An edge case when importing transaction caused + // some future transactions to be imported and that + // future transactions pushed out current transaction. + // This means that there is a cycle and the transactions should + // be moved back to future, since we can't resolve it. + if removed.iter().any(|tx| tx.hash == hash) { + // We still need to remove all transactions that we promoted + // since they depend on each other and will never get to the best iterator. + self.ready.remove_subtree(&promoted); + + debug!(target: "txpool", "[{:?}] Cycle detected, bailing.", hash); + return Err(error::Error::CycleDetected); + } + + Ok(Imported::Ready { + hash, + promoted, + failed, + removed, + }) + } + + /// Returns an iterator over ready transactions in the pool. + pub fn ready(&self) -> impl Iterator>> { + self.ready.get() + } + + /// Returns an iterator over future transactions in the pool. + pub fn futures(&self) -> impl Iterator> { + self.future.all() + } + + /// Returns pool transactions given list of hashes. + /// + /// Includes both ready and future pool. For every hash in the `hashes` + /// iterator an `Option` is produced (so the resulting `Vec` always have the same length). + pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { + let ready = self.ready.by_hashes(hashes); + let future = self.future.by_hashes(hashes); + + ready + .into_iter() + .zip(future) + .map(|(a, b)| a.or(b)) + .collect() + } + + /// Returns pool transaction by hash. + pub fn ready_by_hash(&self, hash: &Hash) -> Option>> { + self.ready.by_hash(hash) + } + + /// Makes sure that the transactions in the queues stay within provided limits. + /// + /// Removes and returns worst transactions from the queues and all transactions that depend on them. + /// Technically the worst transaction should be evaluated by computing the entire pending set. + /// We use a simplified approach to remove the transaction that occupies the pool for the longest time. + pub fn enforce_limits( + &mut self, + ready: &Limit, + future: &Limit, + ) -> Vec>> { + let mut removed = vec![]; + + while ready.is_exceeded(self.ready.len(), self.ready.bytes()) { + // find the worst transaction + let minimal = self.ready.fold(|minimal, current| { + let transaction = ¤t.transaction; + match minimal { + None => Some(transaction.clone()), + Some(ref tx) if tx.insertion_id > transaction.insertion_id => { + Some(transaction.clone()) + } + other => other, + } + }); + + if let Some(minimal) = minimal { + removed.append(&mut self.remove_subtree(&[minimal.transaction.hash.clone()])) + } else { + break; + } + } + + while future.is_exceeded(self.future.len(), self.future.bytes()) { + // find the worst transaction + let minimal = self.future.fold(|minimal, current| match minimal { + None => Some(current.clone()), + Some(ref tx) if tx.imported_at > current.imported_at => Some(current.clone()), + other => other, + }); + + if let Some(minimal) = minimal { + removed.append(&mut self.remove_subtree(&[minimal.transaction.hash.clone()])) + } else { + break; + } + } + + removed + } + + /// Removes all transactions represented by the hashes and all other transactions + /// that depend on them. + /// + /// Returns a list of actually removed transactions. + /// NOTE some transactions might still be valid, but were just removed because + /// they were part of a chain, you may attempt to re-import them later. + /// NOTE If you want to remove ready transactions that were already used + /// and you don't want them to be stored in the pool use `prune_tags` method. + pub fn remove_subtree(&mut self, hashes: &[Hash]) -> Vec>> { + let mut removed = self.ready.remove_subtree(hashes); + removed.extend(self.future.remove(hashes)); + removed + } + + /// Removes and returns all transactions from the future queue. + pub fn clear_future(&mut self) -> Vec>> { + self.future.clear() + } + + /// Prunes transactions that provide given list of tags. + /// + /// This will cause all transactions that provide these tags to be removed from the pool, + /// but unlike `remove_subtree`, dependent transactions are not touched. + /// Additional transactions from future queue might be promoted to ready if you satisfy tags + /// that the pool didn't previously know about. + pub fn prune_tags(&mut self, tags: impl IntoIterator) -> PruneStatus { + let mut to_import = vec![]; + let mut pruned = vec![]; + let recently_pruned = &mut self.recently_pruned[self.recently_pruned_index]; + self.recently_pruned_index = (self.recently_pruned_index + 1) % RECENTLY_PRUNED_TAGS; + recently_pruned.clear(); + + for tag in tags { + // make sure to promote any future transactions that could be unlocked + to_import.append(&mut self.future.satisfy_tags(std::iter::once(&tag))); + // and actually prune transactions in ready queue + pruned.append(&mut self.ready.prune_tags(tag.clone())); + // store the tags for next submission + recently_pruned.insert(tag); + } + + let mut promoted = vec![]; + let mut failed = vec![]; + for tx in to_import { + let hash = tx.transaction.hash.clone(); + match self.import_to_ready(tx) { + Ok(res) => promoted.push(res), + Err(e) => { + warn!(target: "txpool", "[{:?}] Failed to promote during pruning: {:?}", hash, e); + failed.push(hash) + } + } + } + + PruneStatus { + pruned, + failed, + promoted, + } + } + + /// Get pool status. + pub fn status(&self) -> PoolStatus { + PoolStatus { + ready: self.ready.len(), + ready_bytes: self.ready.bytes(), + future: self.future.len(), + future_bytes: self.future.bytes(), + } + } } /// Queue limits #[derive(Debug, Clone)] pub struct Limit { - /// Maximal number of transactions in the queue. - pub count: usize, - /// Maximal size of encodings of all transactions in the queue. - pub total_bytes: usize, + /// Maximal number of transactions in the queue. + pub count: usize, + /// Maximal size of encodings of all transactions in the queue. + pub total_bytes: usize, } impl Limit { - /// Returns true if any of the provided values exceeds the limit. - pub fn is_exceeded(&self, count: usize, bytes: usize) -> bool { - self.count < count || self.total_bytes < bytes - } + /// Returns true if any of the provided values exceeds the limit. + pub fn is_exceeded(&self, count: usize, bytes: usize) -> bool { + self.count < count || self.total_bytes < bytes + } } #[cfg(test)] mod tests { - use super::*; - - type Hash = u64; - - fn pool() -> BasePool> { - BasePool::default() - } - - #[test] - fn should_import_transaction_to_ready() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1u64, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - - // then - assert_eq!(pool.ready().count(), 1); - assert_eq!(pool.ready.len(), 1); - } - - #[test] - fn should_not_import_same_transaction_twice() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap_err(); - - // then - assert_eq!(pool.ready().count(), 1); - assert_eq!(pool.ready.len(), 1); - } - - - #[test] - fn should_import_transaction_to_future_and_promote_it_later() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap(); - - // then - assert_eq!(pool.ready().count(), 2); - assert_eq!(pool.ready.len(), 2); - } - - #[test] - fn should_promote_a_subgraph() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![3], vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![4]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - - let res = pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0], vec![4]], - propagate: true, - source: Source::External, - }).unwrap(); - - // then - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - - assert_eq!(it.next(), Some(5)); - assert_eq!(it.next(), Some(1)); - assert_eq!(it.next(), Some(2)); - assert_eq!(it.next(), Some(4)); - assert_eq!(it.next(), Some(3)); - assert_eq!(it.next(), None); - assert_eq!(res, Imported::Ready { - hash: 5, - promoted: vec![1, 2, 3, 4], - failed: vec![], - removed: vec![], - }); - } - - #[test] - fn should_handle_a_cycle() { - // given - let mut pool = pool(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - - // when - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap(); - - // then - { - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), None); - } - // all transactions occupy the Future queue - it's fine - assert_eq!(pool.future.len(), 3); - - // let's close the cycle with one additional transaction - let res = pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 50u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap(); - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), Some(4)); - assert_eq!(it.next(), Some(1)); - assert_eq!(it.next(), Some(3)); - assert_eq!(it.next(), None); - assert_eq!(res, Imported::Ready { - hash: 4, - promoted: vec![1, 3], - failed: vec![2], - removed: vec![], - }); - assert_eq!(pool.future.len(), 0); - } - - #[test] - fn should_handle_a_cycle_with_low_priority() { - // given - let mut pool = pool(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); - assert_eq!(pool.ready().count(), 0); - assert_eq!(pool.ready.len(), 0); - - // when - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap(); - - // then - { - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), None); - } - // all transactions occupy the Future queue - it's fine - assert_eq!(pool.future.len(), 3); - - // let's close the cycle with one additional transaction - let err = pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1u64, // lower priority than Tx(2) - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0]], - propagate: true, - source: Source::External, - }).unwrap_err(); - let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); - assert_eq!(it.next(), None); - assert_eq!(pool.ready.len(), 0); - assert_eq!(pool.future.len(), 0); - if let error::Error::CycleDetected = err { - } else { - assert!(false, "Invalid error kind: {:?}", err); - } - } - - #[test] - fn can_track_heap_size() { - let mut pool = pool(); - pool.import(Transaction { - data: vec![5u8; 1024], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0], vec![4]], - propagate: true, - source: Source::External, - }).expect("import 1 should be ok"); - pool.import(Transaction { - data: vec![3u8; 1024], - bytes: 1, - hash: 7, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![2], vec![7]], - propagate: true, - source: Source::External, - }).expect("import 2 should be ok"); - - assert!(parity_util_mem::malloc_size(&pool) > 5000); - } - - #[test] - fn should_remove_invalid_transactions() { - // given - let mut pool = pool(); - pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![0], vec![4]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![3], vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![4]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); - // future - pool.import(Transaction { - data: vec![6u8], - bytes: 1, - hash: 6, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![11]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); - assert_eq!(pool.ready().count(), 5); - assert_eq!(pool.future.len(), 1); - - // when - pool.remove_subtree(&[6, 1]); - - // then - assert_eq!(pool.ready().count(), 1); - assert_eq!(pool.future.len(), 0); - } - - #[test] - fn should_prune_ready_transactions() { - // given - let mut pool = pool(); - // future (waiting for 0) - pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![vec![100]], - propagate: true, - source: Source::External, - }).unwrap(); - // ready - pool.import(Transaction { - data: vec![1u8], - bytes: 1, - hash: 1, - priority: 5u64, - valid_till: 64u64, - requires: vec![], - provides: vec![vec![1]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![2u8], - bytes: 1, - hash: 2, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![2]], - provides: vec![vec![3]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![3u8], - bytes: 1, - hash: 3, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![1]], - provides: vec![vec![2]], - propagate: true, - source: Source::External, - }).unwrap(); - pool.import(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![2]], - provides: vec![vec![4]], - propagate: true, - source: Source::External, - }).unwrap(); - - assert_eq!(pool.ready().count(), 4); - assert_eq!(pool.future.len(), 1); - - // when - let result = pool.prune_tags(vec![vec![0], vec![2]]); - - // then - assert_eq!(result.pruned.len(), 2); - assert_eq!(result.failed.len(), 0); - assert_eq!(result.promoted[0], Imported::Ready { - hash: 5, - promoted: vec![], - failed: vec![], - removed: vec![], - }); - assert_eq!(result.promoted.len(), 1); - assert_eq!(pool.future.len(), 0); - assert_eq!(pool.ready.len(), 3); - assert_eq!(pool.ready().count(), 3); - } - - #[test] - fn transaction_debug() { - assert_eq!( - format!("{:?}", Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![2]], - provides: vec![vec![4]], - propagate: true, - source: Source::External, - }), - "Transaction { \ + use super::*; + + type Hash = u64; + + fn pool() -> BasePool> { + BasePool::default() + } + + #[test] + fn should_import_transaction_to_ready() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1u64, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap(); + + // then + assert_eq!(pool.ready().count(), 1); + assert_eq!(pool.ready.len(), 1); + } + + #[test] + fn should_not_import_same_transaction_twice() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap_err(); + + // then + assert_eq!(pool.ready().count(), 1); + assert_eq!(pool.ready.len(), 1); + } + + #[test] + fn should_import_transaction_to_future_and_promote_it_later() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0]], + propagate: true, + source: Source::External, + }) + .unwrap(); + + // then + assert_eq!(pool.ready().count(), 2); + assert_eq!(pool.ready.len(), 2); + } + + #[test] + fn should_promote_a_subgraph() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![3], vec![2]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![4]], + provides: vec![], + propagate: true, + source: Source::External, + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + let res = pool + .import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0], vec![4]], + propagate: true, + source: Source::External, + }) + .unwrap(); + + // then + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + + assert_eq!(it.next(), Some(5)); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(2)); + assert_eq!(it.next(), Some(4)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + assert_eq!( + res, + Imported::Ready { + hash: 5, + promoted: vec![1, 2, 3, 4], + failed: vec![], + removed: vec![], + } + ); + } + + #[test] + fn should_handle_a_cycle() { + // given + let mut pool = pool(); + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![2]], + propagate: true, + source: Source::External, + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + // when + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![vec![0]], + propagate: true, + source: Source::External, + }) + .unwrap(); + + // then + { + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), None); + } + // all transactions occupy the Future queue - it's fine + assert_eq!(pool.future.len(), 3); + + // let's close the cycle with one additional transaction + let res = pool + .import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 50u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0]], + propagate: true, + source: Source::External, + }) + .unwrap(); + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), Some(4)); + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), None); + assert_eq!( + res, + Imported::Ready { + hash: 4, + promoted: vec![1, 3], + failed: vec![2], + removed: vec![], + } + ); + assert_eq!(pool.future.len(), 0); + } + + #[test] + fn should_handle_a_cycle_with_low_priority() { + // given + let mut pool = pool(); + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![2]], + propagate: true, + source: Source::External, + }) + .unwrap(); + assert_eq!(pool.ready().count(), 0); + assert_eq!(pool.ready.len(), 0); + + // when + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![vec![0]], + propagate: true, + source: Source::External, + }) + .unwrap(); + + // then + { + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), None); + } + // all transactions occupy the Future queue - it's fine + assert_eq!(pool.future.len(), 3); + + // let's close the cycle with one additional transaction + let err = pool + .import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1u64, // lower priority than Tx(2) + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0]], + propagate: true, + source: Source::External, + }) + .unwrap_err(); + let mut it = pool.ready().into_iter().map(|tx| tx.data[0]); + assert_eq!(it.next(), None); + assert_eq!(pool.ready.len(), 0); + assert_eq!(pool.future.len(), 0); + if let error::Error::CycleDetected = err { + } else { + assert!(false, "Invalid error kind: {:?}", err); + } + } + + #[test] + fn can_track_heap_size() { + let mut pool = pool(); + pool.import(Transaction { + data: vec![5u8; 1024], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0], vec![4]], + propagate: true, + source: Source::External, + }) + .expect("import 1 should be ok"); + pool.import(Transaction { + data: vec![3u8; 1024], + bytes: 1, + hash: 7, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![2], vec![7]], + propagate: true, + source: Source::External, + }) + .expect("import 2 should be ok"); + + assert!(parity_util_mem::malloc_size(&pool) > 5000); + } + + #[test] + fn should_remove_invalid_transactions() { + // given + let mut pool = pool(); + pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![0], vec![4]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![3], vec![2]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![4]], + provides: vec![], + propagate: true, + source: Source::External, + }) + .unwrap(); + // future + pool.import(Transaction { + data: vec![6u8], + bytes: 1, + hash: 6, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![11]], + provides: vec![], + propagate: true, + source: Source::External, + }) + .unwrap(); + assert_eq!(pool.ready().count(), 5); + assert_eq!(pool.future.len(), 1); + + // when + pool.remove_subtree(&[6, 1]); + + // then + assert_eq!(pool.ready().count(), 1); + assert_eq!(pool.future.len(), 0); + } + + #[test] + fn should_prune_ready_transactions() { + // given + let mut pool = pool(); + // future (waiting for 0) + pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![vec![100]], + propagate: true, + source: Source::External, + }) + .unwrap(); + // ready + pool.import(Transaction { + data: vec![1u8], + bytes: 1, + hash: 1, + priority: 5u64, + valid_till: 64u64, + requires: vec![], + provides: vec![vec![1]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![2u8], + bytes: 1, + hash: 2, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![2]], + provides: vec![vec![3]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![3u8], + bytes: 1, + hash: 3, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![1]], + provides: vec![vec![2]], + propagate: true, + source: Source::External, + }) + .unwrap(); + pool.import(Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![2]], + provides: vec![vec![4]], + propagate: true, + source: Source::External, + }) + .unwrap(); + + assert_eq!(pool.ready().count(), 4); + assert_eq!(pool.future.len(), 1); + + // when + let result = pool.prune_tags(vec![vec![0], vec![2]]); + + // then + assert_eq!(result.pruned.len(), 2); + assert_eq!(result.failed.len(), 0); + assert_eq!( + result.promoted[0], + Imported::Ready { + hash: 5, + promoted: vec![], + failed: vec![], + removed: vec![], + } + ); + assert_eq!(result.promoted.len(), 1); + assert_eq!(pool.future.len(), 0); + assert_eq!(pool.ready.len(), 3); + assert_eq!(pool.ready().count(), 3); + } + + #[test] + fn transaction_debug() { + assert_eq!( + format!( + "{:?}", + Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![2]], + provides: vec![vec![4]], + propagate: true, + source: Source::External, + } + ), + "Transaction { \ hash: 4, priority: 1000, valid_till: 64, bytes: 1, propagate: true, \ -source: TransactionSource::External, requires: [03,02], provides: [04], data: [4]}".to_owned() - ); - } - - #[test] - fn transaction_propagation() { - assert_eq!(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![2]], - provides: vec![vec![4]], - propagate: true, - source: Source::External, - }.is_propagable(), true); - - assert_eq!(Transaction { - data: vec![4u8], - bytes: 1, - hash: 4, - priority: 1_000u64, - valid_till: 64u64, - requires: vec![vec![3], vec![2]], - provides: vec![vec![4]], - propagate: false, - source: Source::External, - }.is_propagable(), false); - } - - #[test] - fn should_reject_future_transactions() { - // given - let mut pool = pool(); - - // when - pool.reject_future_transactions = true; - - // then - let err = pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![], - propagate: true, - source: Source::External, - }); - - if let Err(error::Error::RejectedFutureTransaction) = err { - } else { - assert!(false, "Invalid error kind: {:?}", err); - } - } - - #[test] - fn should_clear_future_queue() { - // given - let mut pool = pool(); - - // when - pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); - - // then - assert_eq!(pool.future.len(), 1); - - // and then when - assert_eq!(pool.clear_future().len(), 1); - - // then - assert_eq!(pool.future.len(), 0); - } - - #[test] - fn should_accept_future_transactions_when_explicitly_asked_to() { - // given - let mut pool = pool(); - pool.reject_future_transactions = true; - - // when - let flag_value = pool.with_futures_enabled(|pool, flag| { - pool.import(Transaction { - data: vec![5u8], - bytes: 1, - hash: 5, - priority: 5u64, - valid_till: 64u64, - requires: vec![vec![0]], - provides: vec![], - propagate: true, - source: Source::External, - }).unwrap(); - - flag - }); - - // then - assert_eq!(flag_value, true); - assert_eq!(pool.reject_future_transactions, true); - assert_eq!(pool.future.len(), 1); - } +source: TransactionSource::External, requires: [03,02], provides: [04], data: [4]}" + .to_owned() + ); + } + + #[test] + fn transaction_propagation() { + assert_eq!( + Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![2]], + provides: vec![vec![4]], + propagate: true, + source: Source::External, + } + .is_propagable(), + true + ); + + assert_eq!( + Transaction { + data: vec![4u8], + bytes: 1, + hash: 4, + priority: 1_000u64, + valid_till: 64u64, + requires: vec![vec![3], vec![2]], + provides: vec![vec![4]], + propagate: false, + source: Source::External, + } + .is_propagable(), + false + ); + } + + #[test] + fn should_reject_future_transactions() { + // given + let mut pool = pool(); + + // when + pool.reject_future_transactions = true; + + // then + let err = pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![], + propagate: true, + source: Source::External, + }); + + if let Err(error::Error::RejectedFutureTransaction) = err { + } else { + assert!(false, "Invalid error kind: {:?}", err); + } + } + + #[test] + fn should_clear_future_queue() { + // given + let mut pool = pool(); + + // when + pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![], + propagate: true, + source: Source::External, + }) + .unwrap(); + + // then + assert_eq!(pool.future.len(), 1); + + // and then when + assert_eq!(pool.clear_future().len(), 1); + + // then + assert_eq!(pool.future.len(), 0); + } + + #[test] + fn should_accept_future_transactions_when_explicitly_asked_to() { + // given + let mut pool = pool(); + pool.reject_future_transactions = true; + + // when + let flag_value = pool.with_futures_enabled(|pool, flag| { + pool.import(Transaction { + data: vec![5u8], + bytes: 1, + hash: 5, + priority: 5u64, + valid_till: 64u64, + requires: vec![vec![0]], + provides: vec![], + propagate: true, + source: Source::External, + }) + .unwrap(); + + flag + }); + + // then + assert_eq!(flag_value, true); + assert_eq!(pool.reject_future_transactions, true); + assert_eq!(pool.future.len(), 1); + } } diff --git a/client/transaction-pool/graph/src/future.rs b/client/transaction-pool/graph/src/future.rs index 76181c837f..b286a72ede 100644 --- a/client/transaction-pool/graph/src/future.rs +++ b/client/transaction-pool/graph/src/future.rs @@ -15,16 +15,13 @@ // along with Substrate. If not, see . use std::{ - collections::{HashMap, HashSet}, - fmt, - hash, - sync::Arc, + collections::{HashMap, HashSet}, + fmt, hash, + sync::Arc, }; use sp_core::hexdisplay::HexDisplay; -use sp_runtime::transaction_validity::{ - TransactionTag as Tag, -}; +use sp_runtime::transaction_validity::TransactionTag as Tag; use wasm_timer::Instant; use crate::base_pool::Transaction; @@ -32,78 +29,80 @@ use crate::base_pool::Transaction; #[cfg_attr(not(target_os = "unknown"), derive(parity_util_mem::MallocSizeOf))] /// Transaction with partially satisfied dependencies. pub struct WaitingTransaction { - /// Transaction details. - pub transaction: Arc>, - /// Tags that are required and have not been satisfied yet by other transactions in the pool. - pub missing_tags: HashSet, - /// Time of import to the Future Queue. - pub imported_at: Instant, + /// Transaction details. + pub transaction: Arc>, + /// Tags that are required and have not been satisfied yet by other transactions in the pool. + pub missing_tags: HashSet, + /// Time of import to the Future Queue. + pub imported_at: Instant, } impl fmt::Debug for WaitingTransaction { - fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { - write!(fmt, "WaitingTransaction {{ ")?; - write!(fmt, "imported_at: {:?}, ", self.imported_at)?; - write!(fmt, "transaction: {:?}, ", self.transaction)?; - write!(fmt, "missing_tags: {{")?; - let mut it = self.missing_tags.iter().map(|tag| HexDisplay::from(tag)); - if let Some(tag) = it.next() { - write!(fmt, "{}", tag)?; - } - for tag in it { - write!(fmt, ", {}", tag)?; - } - write!(fmt, " }}}}") - } + fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result { + write!(fmt, "WaitingTransaction {{ ")?; + write!(fmt, "imported_at: {:?}, ", self.imported_at)?; + write!(fmt, "transaction: {:?}, ", self.transaction)?; + write!(fmt, "missing_tags: {{")?; + let mut it = self.missing_tags.iter().map(|tag| HexDisplay::from(tag)); + if let Some(tag) = it.next() { + write!(fmt, "{}", tag)?; + } + for tag in it { + write!(fmt, ", {}", tag)?; + } + write!(fmt, " }}}}") + } } impl Clone for WaitingTransaction { - fn clone(&self) -> Self { - WaitingTransaction { - transaction: self.transaction.clone(), - missing_tags: self.missing_tags.clone(), - imported_at: self.imported_at.clone(), - } - } + fn clone(&self) -> Self { + WaitingTransaction { + transaction: self.transaction.clone(), + missing_tags: self.missing_tags.clone(), + imported_at: self.imported_at.clone(), + } + } } impl WaitingTransaction { - /// Creates a new `WaitingTransaction`. - /// - /// Computes the set of missing tags based on the requirements and tags that - /// are provided by all transactions in the ready queue. - pub fn new( - transaction: Transaction, - provided: &HashMap, - recently_pruned: &[HashSet], - ) -> Self { - let missing_tags = transaction.requires - .iter() - .filter(|tag| { - // is true if the tag is already satisfied either via transaction in the pool - // or one that was recently included. - let is_provided = provided.contains_key(&**tag) || recently_pruned.iter().any(|x| x.contains(&**tag)); - !is_provided - }) - .cloned() - .collect(); - - WaitingTransaction { - transaction: Arc::new(transaction), - missing_tags, - imported_at: Instant::now(), - } - } - - /// Marks the tag as satisfied. - pub fn satisfy_tag(&mut self, tag: &Tag) { - self.missing_tags.remove(tag); - } - - /// Returns true if transaction has all requirements satisfied. - pub fn is_ready(&self) -> bool { - self.missing_tags.is_empty() - } + /// Creates a new `WaitingTransaction`. + /// + /// Computes the set of missing tags based on the requirements and tags that + /// are provided by all transactions in the ready queue. + pub fn new( + transaction: Transaction, + provided: &HashMap, + recently_pruned: &[HashSet], + ) -> Self { + let missing_tags = transaction + .requires + .iter() + .filter(|tag| { + // is true if the tag is already satisfied either via transaction in the pool + // or one that was recently included. + let is_provided = provided.contains_key(&**tag) + || recently_pruned.iter().any(|x| x.contains(&**tag)); + !is_provided + }) + .cloned() + .collect(); + + WaitingTransaction { + transaction: Arc::new(transaction), + missing_tags, + imported_at: Instant::now(), + } + } + + /// Marks the tag as satisfied. + pub fn satisfy_tag(&mut self, tag: &Tag) { + self.missing_tags.remove(tag); + } + + /// Returns true if transaction has all requirements satisfied. + pub fn is_ready(&self) -> bool { + self.missing_tags.is_empty() + } } /// A pool of transactions that are not yet ready to be included in the block. @@ -113,19 +112,19 @@ impl WaitingTransaction { #[derive(Debug)] #[cfg_attr(not(target_os = "unknown"), derive(parity_util_mem::MallocSizeOf))] pub struct FutureTransactions { - /// tags that are not yet provided by any transaction and we await for them - wanted_tags: HashMap>, - /// Transactions waiting for a particular other transaction - waiting: HashMap>, + /// tags that are not yet provided by any transaction and we await for them + wanted_tags: HashMap>, + /// Transactions waiting for a particular other transaction + waiting: HashMap>, } impl Default for FutureTransactions { - fn default() -> Self { - FutureTransactions { - wanted_tags: Default::default(), - waiting: Default::default(), - } - } + fn default() -> Self { + FutureTransactions { + wanted_tags: Default::default(), + waiting: Default::default(), + } + } } const WAITING_PROOF: &str = r"# @@ -136,141 +135,159 @@ qed #"; impl FutureTransactions { - /// Import transaction to Future queue. - /// - /// Only transactions that don't have all their tags satisfied should occupy - /// the Future queue. - /// As soon as required tags are provided by some other transactions that are ready - /// we should remove the transactions from here and move them to the Ready queue. - pub fn import(&mut self, tx: WaitingTransaction) { - assert!(!tx.is_ready(), "Transaction is ready."); - assert!(!self.waiting.contains_key(&tx.transaction.hash), "Transaction is already imported."); - - // Add all tags that are missing - for tag in &tx.missing_tags { - let entry = self.wanted_tags.entry(tag.clone()).or_insert_with(HashSet::new); - entry.insert(tx.transaction.hash.clone()); - } - - // Add the transaction to a by-hash waiting map - self.waiting.insert(tx.transaction.hash.clone(), tx); - } - - /// Returns true if given hash is part of the queue. - pub fn contains(&self, hash: &Hash) -> bool { - self.waiting.contains_key(hash) - } - - /// Returns a list of known transactions - pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { - hashes.iter().map(|h| self.waiting.get(h).map(|x| x.transaction.clone())).collect() - } - - /// Satisfies provided tags in transactions that are waiting for them. - /// - /// Returns (and removes) transactions that became ready after their last tag got - /// satisfied and now we can remove them from Future and move to Ready queue. - pub fn satisfy_tags>(&mut self, tags: impl IntoIterator) -> Vec> { - let mut became_ready = vec![]; - - for tag in tags { - if let Some(hashes) = self.wanted_tags.remove(tag.as_ref()) { - for hash in hashes { - let is_ready = { - let tx = self.waiting.get_mut(&hash).expect(WAITING_PROOF); - tx.satisfy_tag(tag.as_ref()); - tx.is_ready() - }; - - if is_ready { - let tx = self.waiting.remove(&hash).expect(WAITING_PROOF); - became_ready.push(tx); - } - } - } - } - - became_ready - } - - /// Removes transactions for given list of hashes. - /// - /// Returns a list of actually removed transactions. - pub fn remove(&mut self, hashes: &[Hash]) -> Vec>> { - let mut removed = vec![]; - for hash in hashes { - if let Some(waiting_tx) = self.waiting.remove(hash) { - // remove from wanted_tags as well - for tag in waiting_tx.missing_tags { - let remove = if let Some(wanted) = self.wanted_tags.get_mut(&tag) { - wanted.remove(hash); - wanted.is_empty() - } else { false }; - if remove { - self.wanted_tags.remove(&tag); - } - } - // add to result - removed.push(waiting_tx.transaction) - } - } - removed - } - - /// Fold a list of future transactions to compute a single value. - pub fn fold, &WaitingTransaction) -> Option>(&mut self, f: F) -> Option { - self.waiting - .values() - .fold(None, f) - } - - /// Returns iterator over all future transactions - pub fn all(&self) -> impl Iterator> { - self.waiting.values().map(|waiting| &*waiting.transaction) - } - - /// Removes and returns all future transactions. - pub fn clear(&mut self) -> Vec>> { - self.wanted_tags.clear(); - self.waiting.drain().map(|(_, tx)| tx.transaction).collect() - } - - /// Returns number of transactions in the Future queue. - pub fn len(&self) -> usize { - self.waiting.len() - } - - /// Returns sum of encoding lengths of all transactions in this queue. - pub fn bytes(&self) -> usize { - self.waiting.values().fold(0, |acc, tx| acc + tx.transaction.bytes) - } + /// Import transaction to Future queue. + /// + /// Only transactions that don't have all their tags satisfied should occupy + /// the Future queue. + /// As soon as required tags are provided by some other transactions that are ready + /// we should remove the transactions from here and move them to the Ready queue. + pub fn import(&mut self, tx: WaitingTransaction) { + assert!(!tx.is_ready(), "Transaction is ready."); + assert!( + !self.waiting.contains_key(&tx.transaction.hash), + "Transaction is already imported." + ); + + // Add all tags that are missing + for tag in &tx.missing_tags { + let entry = self + .wanted_tags + .entry(tag.clone()) + .or_insert_with(HashSet::new); + entry.insert(tx.transaction.hash.clone()); + } + + // Add the transaction to a by-hash waiting map + self.waiting.insert(tx.transaction.hash.clone(), tx); + } + + /// Returns true if given hash is part of the queue. + pub fn contains(&self, hash: &Hash) -> bool { + self.waiting.contains_key(hash) + } + + /// Returns a list of known transactions + pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { + hashes + .iter() + .map(|h| self.waiting.get(h).map(|x| x.transaction.clone())) + .collect() + } + + /// Satisfies provided tags in transactions that are waiting for them. + /// + /// Returns (and removes) transactions that became ready after their last tag got + /// satisfied and now we can remove them from Future and move to Ready queue. + pub fn satisfy_tags>( + &mut self, + tags: impl IntoIterator, + ) -> Vec> { + let mut became_ready = vec![]; + + for tag in tags { + if let Some(hashes) = self.wanted_tags.remove(tag.as_ref()) { + for hash in hashes { + let is_ready = { + let tx = self.waiting.get_mut(&hash).expect(WAITING_PROOF); + tx.satisfy_tag(tag.as_ref()); + tx.is_ready() + }; + + if is_ready { + let tx = self.waiting.remove(&hash).expect(WAITING_PROOF); + became_ready.push(tx); + } + } + } + } + + became_ready + } + + /// Removes transactions for given list of hashes. + /// + /// Returns a list of actually removed transactions. + pub fn remove(&mut self, hashes: &[Hash]) -> Vec>> { + let mut removed = vec![]; + for hash in hashes { + if let Some(waiting_tx) = self.waiting.remove(hash) { + // remove from wanted_tags as well + for tag in waiting_tx.missing_tags { + let remove = if let Some(wanted) = self.wanted_tags.get_mut(&tag) { + wanted.remove(hash); + wanted.is_empty() + } else { + false + }; + if remove { + self.wanted_tags.remove(&tag); + } + } + // add to result + removed.push(waiting_tx.transaction) + } + } + removed + } + + /// Fold a list of future transactions to compute a single value. + pub fn fold, &WaitingTransaction) -> Option>( + &mut self, + f: F, + ) -> Option { + self.waiting.values().fold(None, f) + } + + /// Returns iterator over all future transactions + pub fn all(&self) -> impl Iterator> { + self.waiting.values().map(|waiting| &*waiting.transaction) + } + + /// Removes and returns all future transactions. + pub fn clear(&mut self) -> Vec>> { + self.wanted_tags.clear(); + self.waiting.drain().map(|(_, tx)| tx.transaction).collect() + } + + /// Returns number of transactions in the Future queue. + pub fn len(&self) -> usize { + self.waiting.len() + } + + /// Returns sum of encoding lengths of all transactions in this queue. + pub fn bytes(&self) -> usize { + self.waiting + .values() + .fold(0, |acc, tx| acc + tx.transaction.bytes) + } } #[cfg(test)] mod tests { - use super::*; - use sp_runtime::transaction_validity::TransactionSource; - - #[test] - fn can_track_heap_size() { - let mut future = FutureTransactions::default(); - future.import(WaitingTransaction { - transaction: Transaction { - data: vec![0u8; 1024], - bytes: 1, - hash: 1, - priority: 1, - valid_till: 2, - requires: vec![vec![1], vec![2]], - provides: vec![vec![3], vec![4]], - propagate: true, - source: TransactionSource::External, - }.into(), - missing_tags: vec![vec![1u8], vec![2u8]].into_iter().collect(), - imported_at: std::time::Instant::now(), - }); - - // data is at least 1024! - assert!(parity_util_mem::malloc_size(&future) > 1024); - } + use super::*; + use sp_runtime::transaction_validity::TransactionSource; + + #[test] + fn can_track_heap_size() { + let mut future = FutureTransactions::default(); + future.import(WaitingTransaction { + transaction: Transaction { + data: vec![0u8; 1024], + bytes: 1, + hash: 1, + priority: 1, + valid_till: 2, + requires: vec![vec![1], vec![2]], + provides: vec![vec![3], vec![4]], + propagate: true, + source: TransactionSource::External, + } + .into(), + missing_tags: vec![vec![1u8], vec![2u8]].into_iter().collect(), + imported_at: std::time::Instant::now(), + }); + + // data is at least 1024! + assert!(parity_util_mem::malloc_size(&future) > 1024); + } } diff --git a/client/transaction-pool/graph/src/lib.rs b/client/transaction-pool/graph/src/lib.rs index ed10ef38d2..7e3da22f36 100644 --- a/client/transaction-pool/graph/src/lib.rs +++ b/client/transaction-pool/graph/src/lib.rs @@ -36,8 +36,6 @@ pub mod watcher; pub use self::base_pool::Transaction; pub use self::pool::{ - Pool, - Options, ChainApi, EventStream, ExtrinsicFor, - BlockHash, ExHash, NumberFor, TransactionFor, - ValidatedTransaction, + BlockHash, ChainApi, EventStream, ExHash, ExtrinsicFor, NumberFor, Options, Pool, + TransactionFor, ValidatedTransaction, }; diff --git a/client/transaction-pool/graph/src/listener.rs b/client/transaction-pool/graph/src/listener.rs index 92ba71007e..2d33bbdbb8 100644 --- a/client/transaction-pool/graph/src/listener.rs +++ b/client/transaction-pool/graph/src/listener.rs @@ -1,4 +1,3 @@ - // Copyright 2018-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. @@ -15,124 +14,131 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{ - collections::HashMap, hash, -}; +use crate::{watcher, BlockHash, ChainApi}; use linked_hash_map::LinkedHashMap; -use serde::Serialize; -use crate::{watcher, ChainApi, BlockHash}; use log::{debug, trace, warn}; +use serde::Serialize; use sp_runtime::traits; +use std::{collections::HashMap, hash}; /// Extrinsic pool default listener. pub struct Listener { - watchers: HashMap>>, - finality_watchers: LinkedHashMap, Vec>, + watchers: HashMap>>, + finality_watchers: LinkedHashMap, Vec>, } /// Maximum number of blocks awaiting finality at any time. const MAX_FINALITY_WATCHERS: usize = 512; impl Default for Listener { - fn default() -> Self { - Listener { - watchers: Default::default(), - finality_watchers: Default::default(), - } - } + fn default() -> Self { + Listener { + watchers: Default::default(), + finality_watchers: Default::default(), + } + } } impl Listener { - fn fire(&mut self, hash: &H, fun: F) where F: FnOnce(&mut watcher::Sender>) { - let clean = if let Some(h) = self.watchers.get_mut(hash) { - fun(h); - h.is_done() - } else { - false - }; - - if clean { - self.watchers.remove(hash); - } - } - - /// Creates a new watcher for given verified extrinsic. - /// - /// The watcher can be used to subscribe to life-cycle events of that extrinsic. - pub fn create_watcher(&mut self, hash: H) -> watcher::Watcher> { - let sender = self.watchers.entry(hash.clone()).or_insert_with(watcher::Sender::default); - sender.new_watcher(hash) - } - - /// Notify the listeners about extrinsic broadcast. - pub fn broadcasted(&mut self, hash: &H, peers: Vec) { - trace!(target: "txpool", "[{:?}] Broadcasted", hash); - self.fire(hash, |watcher| watcher.broadcast(peers)); - } - - /// New transaction was added to the ready pool or promoted from the future pool. - pub fn ready(&mut self, tx: &H, old: Option<&H>) { - trace!(target: "txpool", "[{:?}] Ready (replaced: {:?})", tx, old); - self.fire(tx, |watcher| watcher.ready()); - if let Some(old) = old { - self.fire(old, |watcher| watcher.usurped(tx.clone())); - } - } - - /// New transaction was added to the future pool. - pub fn future(&mut self, tx: &H) { - trace!(target: "txpool", "[{:?}] Future", tx); - self.fire(tx, |watcher| watcher.future()); - } - - /// Transaction was dropped from the pool because of the limit. - pub fn dropped(&mut self, tx: &H, by: Option<&H>) { - trace!(target: "txpool", "[{:?}] Dropped (replaced by {:?})", tx, by); - self.fire(tx, |watcher| match by { - Some(t) => watcher.usurped(t.clone()), - None => watcher.dropped(), - }) - } - - /// Transaction was removed as invalid. - pub fn invalid(&mut self, tx: &H, warn: bool) { - if warn { - warn!(target: "txpool", "Extrinsic invalid: {:?}", tx); - } else { - debug!(target: "txpool", "Extrinsic invalid: {:?}", tx); - } - self.fire(tx, |watcher| watcher.invalid()); - } - - /// Transaction was pruned from the pool. - pub fn pruned(&mut self, block_hash: BlockHash, tx: &H) { - debug!(target: "txpool", "[{:?}] Pruned at {:?}", tx, block_hash); - self.fire(tx, |s| s.in_block(block_hash)); - self.finality_watchers.entry(block_hash).or_insert(vec![]).push(tx.clone()); - - while self.finality_watchers.len() > MAX_FINALITY_WATCHERS { - if let Some((hash, txs)) = self.finality_watchers.pop_front() { - for tx in txs { - self.fire(&tx, |s| s.finality_timeout(hash.clone())); - } - } - } - } - - /// The block this transaction was included in has been retracted. - pub fn retracted(&mut self, block_hash: BlockHash) { - if let Some(hashes) = self.finality_watchers.remove(&block_hash) { - for hash in hashes { - self.fire(&hash, |s| s.retracted(block_hash)) - } - } - } - - /// Notify all watchers that transactions have been finalized - pub fn finalized(&mut self, block_hash: BlockHash, txs: Vec) { - self.finality_watchers.remove(&block_hash); - for h in txs { - self.fire(&h, |s| s.finalized(block_hash.clone())) - } - } + fn fire(&mut self, hash: &H, fun: F) + where + F: FnOnce(&mut watcher::Sender>), + { + let clean = if let Some(h) = self.watchers.get_mut(hash) { + fun(h); + h.is_done() + } else { + false + }; + + if clean { + self.watchers.remove(hash); + } + } + + /// Creates a new watcher for given verified extrinsic. + /// + /// The watcher can be used to subscribe to life-cycle events of that extrinsic. + pub fn create_watcher(&mut self, hash: H) -> watcher::Watcher> { + let sender = self + .watchers + .entry(hash.clone()) + .or_insert_with(watcher::Sender::default); + sender.new_watcher(hash) + } + + /// Notify the listeners about extrinsic broadcast. + pub fn broadcasted(&mut self, hash: &H, peers: Vec) { + trace!(target: "txpool", "[{:?}] Broadcasted", hash); + self.fire(hash, |watcher| watcher.broadcast(peers)); + } + + /// New transaction was added to the ready pool or promoted from the future pool. + pub fn ready(&mut self, tx: &H, old: Option<&H>) { + trace!(target: "txpool", "[{:?}] Ready (replaced: {:?})", tx, old); + self.fire(tx, |watcher| watcher.ready()); + if let Some(old) = old { + self.fire(old, |watcher| watcher.usurped(tx.clone())); + } + } + + /// New transaction was added to the future pool. + pub fn future(&mut self, tx: &H) { + trace!(target: "txpool", "[{:?}] Future", tx); + self.fire(tx, |watcher| watcher.future()); + } + + /// Transaction was dropped from the pool because of the limit. + pub fn dropped(&mut self, tx: &H, by: Option<&H>) { + trace!(target: "txpool", "[{:?}] Dropped (replaced by {:?})", tx, by); + self.fire(tx, |watcher| match by { + Some(t) => watcher.usurped(t.clone()), + None => watcher.dropped(), + }) + } + + /// Transaction was removed as invalid. + pub fn invalid(&mut self, tx: &H, warn: bool) { + if warn { + warn!(target: "txpool", "Extrinsic invalid: {:?}", tx); + } else { + debug!(target: "txpool", "Extrinsic invalid: {:?}", tx); + } + self.fire(tx, |watcher| watcher.invalid()); + } + + /// Transaction was pruned from the pool. + pub fn pruned(&mut self, block_hash: BlockHash, tx: &H) { + debug!(target: "txpool", "[{:?}] Pruned at {:?}", tx, block_hash); + self.fire(tx, |s| s.in_block(block_hash)); + self.finality_watchers + .entry(block_hash) + .or_insert(vec![]) + .push(tx.clone()); + + while self.finality_watchers.len() > MAX_FINALITY_WATCHERS { + if let Some((hash, txs)) = self.finality_watchers.pop_front() { + for tx in txs { + self.fire(&tx, |s| s.finality_timeout(hash.clone())); + } + } + } + } + + /// The block this transaction was included in has been retracted. + pub fn retracted(&mut self, block_hash: BlockHash) { + if let Some(hashes) = self.finality_watchers.remove(&block_hash) { + for hash in hashes { + self.fire(&hash, |s| s.retracted(block_hash)) + } + } + } + + /// Notify all watchers that transactions have been finalized + pub fn finalized(&mut self, block_hash: BlockHash, txs: Vec) { + self.finality_watchers.remove(&block_hash); + for h in txs { + self.fire(&h, |s| s.finalized(block_hash.clone())) + } + } } diff --git a/client/transaction-pool/graph/src/pool.rs b/client/transaction-pool/graph/src/pool.rs index 0b817b155d..cd56283c70 100644 --- a/client/transaction-pool/graph/src/pool.rs +++ b/client/transaction-pool/graph/src/pool.rs @@ -14,11 +14,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{ - hash, - collections::HashMap, - sync::Arc, -}; +use std::{collections::HashMap, hash, sync::Arc}; use crate::base_pool as base; use crate::watcher::Watcher; @@ -26,15 +22,15 @@ use serde::Serialize; use futures::{Future, FutureExt}; use sp_runtime::{ - generic::BlockId, - traits::{self, SaturatedConversion}, - transaction_validity::{ - TransactionValidity, TransactionTag as Tag, TransactionValidityError, TransactionSource, - }, + generic::BlockId, + traits::{self, SaturatedConversion}, + transaction_validity::{ + TransactionSource, TransactionTag as Tag, TransactionValidity, TransactionValidityError, + }, }; use sp_transaction_pool::error; -use wasm_timer::Instant; use sp_utils::mpsc::TracingUnboundedReceiver; +use wasm_timer::Instant; use crate::validated_pool::ValidatedPool; pub use crate::validated_pool::ValidatedTransaction; @@ -53,940 +49,1097 @@ pub type NumberFor = traits::NumberFor<::Block>; /// A type of transaction stored in the pool pub type TransactionFor = Arc, ExtrinsicFor>>; /// A type of validated transaction stored in the pool. -pub type ValidatedTransactionFor = ValidatedTransaction< - ExHash, - ExtrinsicFor, - ::Error, ->; +pub type ValidatedTransactionFor = + ValidatedTransaction, ExtrinsicFor, ::Error>; /// Concrete extrinsic validation and query logic. pub trait ChainApi: Send + Sync { - /// Block type. - type Block: traits::Block; - /// Transaction Hash type - type Hash: hash::Hash + Eq + traits::Member + Serialize; - /// Error type. - type Error: From + error::IntoPoolError; - /// Validate transaction future. - type ValidationFuture: Future> + Send + Unpin; - /// Body future (since block body might be remote) - type BodyFuture: Future::Extrinsic>>, Self::Error>> + Unpin + Send + 'static; - - /// Verify extrinsic at given block. - fn validate_transaction( - &self, - at: &BlockId, - source: TransactionSource, - uxt: ExtrinsicFor, - ) -> Self::ValidationFuture; - - /// Returns a block number given the block id. - fn block_id_to_number(&self, at: &BlockId) -> Result>, Self::Error>; - - /// Returns a block hash given the block id. - fn block_id_to_hash(&self, at: &BlockId) -> Result>, Self::Error>; - - /// Returns hash and encoding length of the extrinsic. - fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize); - - /// Returns a block body given the block id. - fn block_body(&self, at: &BlockId) -> Self::BodyFuture; + /// Block type. + type Block: traits::Block; + /// Transaction Hash type + type Hash: hash::Hash + Eq + traits::Member + Serialize; + /// Error type. + type Error: From + error::IntoPoolError; + /// Validate transaction future. + type ValidationFuture: Future> + Send + Unpin; + /// Body future (since block body might be remote) + type BodyFuture: Future::Extrinsic>>, Self::Error>> + + Unpin + + Send + + 'static; + + /// Verify extrinsic at given block. + fn validate_transaction( + &self, + at: &BlockId, + source: TransactionSource, + uxt: ExtrinsicFor, + ) -> Self::ValidationFuture; + + /// Returns a block number given the block id. + fn block_id_to_number( + &self, + at: &BlockId, + ) -> Result>, Self::Error>; + + /// Returns a block hash given the block id. + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> Result>, Self::Error>; + + /// Returns hash and encoding length of the extrinsic. + fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize); + + /// Returns a block body given the block id. + fn block_body(&self, at: &BlockId) -> Self::BodyFuture; } /// Pool configuration options. #[derive(Debug, Clone)] pub struct Options { - /// Ready queue limits. - pub ready: base::Limit, - /// Future queue limits. - pub future: base::Limit, - /// Reject future transactions. - pub reject_future_transactions: bool, + /// Ready queue limits. + pub ready: base::Limit, + /// Future queue limits. + pub future: base::Limit, + /// Reject future transactions. + pub reject_future_transactions: bool, } impl Default for Options { - fn default() -> Self { - Options { - ready: base::Limit { - count: 8192, - total_bytes: 20 * 1024 * 1024, - }, - future: base::Limit { - count: 512, - total_bytes: 1 * 1024 * 1024, - }, - reject_future_transactions: false, - } - } + fn default() -> Self { + Options { + ready: base::Limit { + count: 8192, + total_bytes: 20 * 1024 * 1024, + }, + future: base::Limit { + count: 512, + total_bytes: 1 * 1024 * 1024, + }, + reject_future_transactions: false, + } + } } /// Extrinsics pool that performs validation. pub struct Pool { - validated_pool: Arc>, + validated_pool: Arc>, } #[cfg(not(target_os = "unknown"))] impl parity_util_mem::MallocSizeOf for Pool where - B::Hash: parity_util_mem::MallocSizeOf, - ExtrinsicFor: parity_util_mem::MallocSizeOf, + B::Hash: parity_util_mem::MallocSizeOf, + ExtrinsicFor: parity_util_mem::MallocSizeOf, { - fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - self.validated_pool.size_of(ops) - } + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + self.validated_pool.size_of(ops) + } } impl Pool { - /// Create a new transaction pool. - pub fn new(options: Options, api: Arc) -> Self { - Pool { - validated_pool: Arc::new(ValidatedPool::new(options, api)), - } - } - - /// Imports a bunch of unverified extrinsics to the pool - pub async fn submit_at( - &self, - at: &BlockId, - source: TransactionSource, - xts: T, - force: bool, - ) -> Result, B::Error>>, B::Error> where - T: IntoIterator>, - { - let validated_pool = self.validated_pool.clone(); - let xts = xts.into_iter().map(|xt| (source, xt)); - self.verify(at, xts, force) - .map(move |validated_transactions| validated_transactions - .map(|validated_transactions| validated_pool.submit(validated_transactions - .into_iter() - .map(|(_, tx)| tx)))) - .await - } - - /// Imports one unverified extrinsic to the pool - pub async fn submit_one( - &self, - at: &BlockId, - source: TransactionSource, - xt: ExtrinsicFor, - ) -> Result, B::Error> { - self.submit_at(at, source, std::iter::once(xt), false) - .map(|import_result| import_result.and_then(|mut import_result| import_result - .pop() - .expect("One extrinsic passed; one result returned; qed") - )) - .await - } - - /// Import a single extrinsic and starts to watch their progress in the pool. - pub async fn submit_and_watch( - &self, - at: &BlockId, - source: TransactionSource, - xt: ExtrinsicFor, - ) -> Result, BlockHash>, B::Error> { - let block_number = self.resolve_block_number(at)?; - let (_, tx) = self.verify_one( - at, block_number, source, xt, false - ).await; - self.validated_pool.submit_and_watch(tx) - } - - /// Resubmit some transaction that were validated elsewhere. - pub fn resubmit( - &self, - revalidated_transactions: HashMap, ValidatedTransactionFor>, - ) { - - let now = Instant::now(); - self.validated_pool.resubmit(revalidated_transactions); - log::debug!(target: "txpool", - "Resubmitted. Took {} ms. Status: {:?}", - now.elapsed().as_millis(), - self.validated_pool.status() - ); - } - - /// Prunes known ready transactions. - /// - /// Used to clear the pool from transactions that were part of recently imported block. - /// The main difference from the `prune` is that we do not revalidate any transactions - /// and ignore unknown passed hashes. - pub fn prune_known(&self, at: &BlockId, hashes: &[ExHash]) -> Result<(), B::Error> { - // Get details of all extrinsics that are already in the pool - let in_pool_tags = self.validated_pool.extrinsics_tags(hashes) - .into_iter().filter_map(|x| x).flat_map(|x| x); - - // Prune all transactions that provide given tags - let prune_status = self.validated_pool.prune_tags(in_pool_tags)?; - let pruned_transactions = hashes.into_iter().cloned() - .chain(prune_status.pruned.iter().map(|tx| tx.hash.clone())); - self.validated_pool.fire_pruned(at, pruned_transactions) - } - - /// Prunes ready transactions. - /// - /// Used to clear the pool from transactions that were part of recently imported block. - /// To perform pruning we need the tags that each extrinsic provides and to avoid calling - /// into runtime too often we first lookup all extrinsics that are in the pool and get - /// their provided tags from there. Otherwise we query the runtime at the `parent` block. - pub async fn prune( - &self, - at: &BlockId, - parent: &BlockId, - extrinsics: &[ExtrinsicFor], - ) -> Result<(), B::Error> { - log::debug!( - target: "txpool", - "Starting pruning of block {:?} (extrinsics: {})", - at, - extrinsics.len() - ); - // Get details of all extrinsics that are already in the pool - let in_pool_hashes = extrinsics.iter().map(|extrinsic| self.hash_of(extrinsic)).collect::>(); - let in_pool_tags = self.validated_pool.extrinsics_tags(&in_pool_hashes); - - // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option>)`) - let all = extrinsics.iter().zip(in_pool_tags.into_iter()); - - let mut future_tags = Vec::new(); - for (extrinsic, in_pool_tags) in all { - match in_pool_tags { - // reuse the tags for extrinsics that were found in the pool - Some(tags) => future_tags.extend(tags), - // if it's not found in the pool query the runtime at parent block - // to get validity info and tags that the extrinsic provides. - None => { - let validity = self.validated_pool.api() - .validate_transaction(parent, TransactionSource::InBlock, extrinsic.clone()) - .await; - - if let Ok(Ok(validity)) = validity { - future_tags.extend(validity.provides); - } - }, - } - } - - self.prune_tags(at, future_tags, in_pool_hashes).await - } - - /// Prunes ready transactions that provide given list of tags. - /// - /// Given tags are assumed to be always provided now, so all transactions - /// in the Future Queue that require that particular tag (and have other - /// requirements satisfied) are promoted to Ready Queue. - /// - /// Moreover for each provided tag we remove transactions in the pool that: - /// 1. Provide that tag directly - /// 2. Are a dependency of pruned transaction. - /// - /// Returns transactions that have been removed from the pool and must be reverified - /// before reinserting to the pool. - /// - /// By removing predecessor transactions as well we might actually end up - /// pruning too much, so all removed transactions are reverified against - /// the runtime (`validate_transaction`) to make sure they are invalid. - /// - /// However we avoid revalidating transactions that are contained within - /// the second parameter of `known_imported_hashes`. These transactions - /// (if pruned) are not revalidated and become temporarily banned to - /// prevent importing them in the (near) future. - pub async fn prune_tags( - &self, - at: &BlockId, - tags: impl IntoIterator, - known_imported_hashes: impl IntoIterator> + Clone, - ) -> Result<(), B::Error> { - log::debug!(target: "txpool", "Pruning at {:?}", at); - // Prune all transactions that provide given tags - let prune_status = match self.validated_pool.prune_tags(tags) { - Ok(prune_status) => prune_status, - Err(e) => return Err(e), - }; - - // Make sure that we don't revalidate extrinsics that were part of the recently - // imported block. This is especially important for UTXO-like chains cause the - // inputs are pruned so such transaction would go to future again. - self.validated_pool.ban(&Instant::now(), known_imported_hashes.clone().into_iter()); - - // Try to re-validate pruned transactions since some of them might be still valid. - // note that `known_imported_hashes` will be rejected here due to temporary ban. - let pruned_hashes = prune_status.pruned - .iter() - .map(|tx| tx.hash.clone()).collect::>(); - let pruned_transactions = prune_status.pruned - .into_iter() - .map(|tx| (tx.source, tx.data.clone())); - - let reverified_transactions = self.verify(at, pruned_transactions, false).await?; - - log::trace!(target: "txpool", "Pruning at {:?}. Resubmitting transactions.", at); - // And finally - submit reverified transactions back to the pool - - self.validated_pool.resubmit_pruned( - &at, - known_imported_hashes, - pruned_hashes, - reverified_transactions.into_iter().map(|(_, xt)| xt).collect(), - ) - } - - /// Returns transaction hash - pub fn hash_of(&self, xt: &ExtrinsicFor) -> ExHash { - self.validated_pool.api().hash_and_length(xt).0 - } - - /// Resolves block number by id. - fn resolve_block_number(&self, at: &BlockId) -> Result, B::Error> { - self.validated_pool.api().block_id_to_number(at) - .and_then(|number| number.ok_or_else(|| - error::Error::InvalidBlockId(format!("{:?}", at)).into())) - } - - /// Returns future that validates a bunch of transactions at given block. - async fn verify( - &self, - at: &BlockId, - xts: impl IntoIterator)>, - force: bool, - ) -> Result, ValidatedTransactionFor>, B::Error> { - // we need a block number to compute tx validity - let block_number = self.resolve_block_number(at)?; - let mut result = HashMap::new(); - - for (hash, validated_tx) in - futures::future::join_all( - xts.into_iter() - .map(|(source, xt)| self.verify_one(at, block_number, source, xt, force)) - ) - .await - { - result.insert(hash, validated_tx); - } - - Ok(result) - } - - /// Returns future that validates single transaction at given block. - async fn verify_one( - &self, - block_id: &BlockId, - block_number: NumberFor, - source: TransactionSource, - xt: ExtrinsicFor, - force: bool, - ) -> (ExHash, ValidatedTransactionFor) { - let (hash, bytes) = self.validated_pool.api().hash_and_length(&xt); - if !force && self.validated_pool.is_banned(&hash) { - return ( - hash.clone(), - ValidatedTransaction::Invalid(hash, error::Error::TemporarilyBanned.into()), - ) - } - - let validation_result = self.validated_pool.api().validate_transaction( - block_id, - source, - xt.clone(), - ).await; - - let status = match validation_result { - Ok(status) => status, - Err(e) => return (hash.clone(), ValidatedTransaction::Invalid(hash, e)), - }; - - let validity = match status { - Ok(validity) => { - if validity.provides.is_empty() { - ValidatedTransaction::Invalid(hash.clone(), error::Error::NoTagsProvided.into()) - } else { - ValidatedTransaction::valid_at( - block_number.saturated_into::(), - hash.clone(), - source, - xt, - bytes, - validity, - ) - } - }, - Err(TransactionValidityError::Invalid(e)) => - ValidatedTransaction::Invalid(hash.clone(), error::Error::InvalidTransaction(e).into()), - Err(TransactionValidityError::Unknown(e)) => - ValidatedTransaction::Unknown(hash.clone(), error::Error::UnknownTransaction(e).into()), - }; - - (hash, validity) - } - - /// get a reference to the underlying validated pool. - pub fn validated_pool(&self) -> &ValidatedPool { - &self.validated_pool - } + /// Create a new transaction pool. + pub fn new(options: Options, api: Arc) -> Self { + Pool { + validated_pool: Arc::new(ValidatedPool::new(options, api)), + } + } + + /// Imports a bunch of unverified extrinsics to the pool + pub async fn submit_at( + &self, + at: &BlockId, + source: TransactionSource, + xts: T, + force: bool, + ) -> Result, B::Error>>, B::Error> + where + T: IntoIterator>, + { + let validated_pool = self.validated_pool.clone(); + let xts = xts.into_iter().map(|xt| (source, xt)); + self.verify(at, xts, force) + .map(move |validated_transactions| { + validated_transactions.map(|validated_transactions| { + validated_pool.submit(validated_transactions.into_iter().map(|(_, tx)| tx)) + }) + }) + .await + } + + /// Imports one unverified extrinsic to the pool + pub async fn submit_one( + &self, + at: &BlockId, + source: TransactionSource, + xt: ExtrinsicFor, + ) -> Result, B::Error> { + self.submit_at(at, source, std::iter::once(xt), false) + .map(|import_result| { + import_result.and_then(|mut import_result| { + import_result + .pop() + .expect("One extrinsic passed; one result returned; qed") + }) + }) + .await + } + + /// Import a single extrinsic and starts to watch their progress in the pool. + pub async fn submit_and_watch( + &self, + at: &BlockId, + source: TransactionSource, + xt: ExtrinsicFor, + ) -> Result, BlockHash>, B::Error> { + let block_number = self.resolve_block_number(at)?; + let (_, tx) = self.verify_one(at, block_number, source, xt, false).await; + self.validated_pool.submit_and_watch(tx) + } + + /// Resubmit some transaction that were validated elsewhere. + pub fn resubmit( + &self, + revalidated_transactions: HashMap, ValidatedTransactionFor>, + ) { + let now = Instant::now(); + self.validated_pool.resubmit(revalidated_transactions); + log::debug!(target: "txpool", + "Resubmitted. Took {} ms. Status: {:?}", + now.elapsed().as_millis(), + self.validated_pool.status() + ); + } + + /// Prunes known ready transactions. + /// + /// Used to clear the pool from transactions that were part of recently imported block. + /// The main difference from the `prune` is that we do not revalidate any transactions + /// and ignore unknown passed hashes. + pub fn prune_known( + &self, + at: &BlockId, + hashes: &[ExHash], + ) -> Result<(), B::Error> { + // Get details of all extrinsics that are already in the pool + let in_pool_tags = self + .validated_pool + .extrinsics_tags(hashes) + .into_iter() + .filter_map(|x| x) + .flat_map(|x| x); + + // Prune all transactions that provide given tags + let prune_status = self.validated_pool.prune_tags(in_pool_tags)?; + let pruned_transactions = hashes + .into_iter() + .cloned() + .chain(prune_status.pruned.iter().map(|tx| tx.hash.clone())); + self.validated_pool.fire_pruned(at, pruned_transactions) + } + + /// Prunes ready transactions. + /// + /// Used to clear the pool from transactions that were part of recently imported block. + /// To perform pruning we need the tags that each extrinsic provides and to avoid calling + /// into runtime too often we first lookup all extrinsics that are in the pool and get + /// their provided tags from there. Otherwise we query the runtime at the `parent` block. + pub async fn prune( + &self, + at: &BlockId, + parent: &BlockId, + extrinsics: &[ExtrinsicFor], + ) -> Result<(), B::Error> { + log::debug!( + target: "txpool", + "Starting pruning of block {:?} (extrinsics: {})", + at, + extrinsics.len() + ); + // Get details of all extrinsics that are already in the pool + let in_pool_hashes = extrinsics + .iter() + .map(|extrinsic| self.hash_of(extrinsic)) + .collect::>(); + let in_pool_tags = self.validated_pool.extrinsics_tags(&in_pool_hashes); + + // Zip the ones from the pool with the full list (we get pairs `(Extrinsic, Option>)`) + let all = extrinsics.iter().zip(in_pool_tags.into_iter()); + + let mut future_tags = Vec::new(); + for (extrinsic, in_pool_tags) in all { + match in_pool_tags { + // reuse the tags for extrinsics that were found in the pool + Some(tags) => future_tags.extend(tags), + // if it's not found in the pool query the runtime at parent block + // to get validity info and tags that the extrinsic provides. + None => { + let validity = self + .validated_pool + .api() + .validate_transaction(parent, TransactionSource::InBlock, extrinsic.clone()) + .await; + + if let Ok(Ok(validity)) = validity { + future_tags.extend(validity.provides); + } + } + } + } + + self.prune_tags(at, future_tags, in_pool_hashes).await + } + + /// Prunes ready transactions that provide given list of tags. + /// + /// Given tags are assumed to be always provided now, so all transactions + /// in the Future Queue that require that particular tag (and have other + /// requirements satisfied) are promoted to Ready Queue. + /// + /// Moreover for each provided tag we remove transactions in the pool that: + /// 1. Provide that tag directly + /// 2. Are a dependency of pruned transaction. + /// + /// Returns transactions that have been removed from the pool and must be reverified + /// before reinserting to the pool. + /// + /// By removing predecessor transactions as well we might actually end up + /// pruning too much, so all removed transactions are reverified against + /// the runtime (`validate_transaction`) to make sure they are invalid. + /// + /// However we avoid revalidating transactions that are contained within + /// the second parameter of `known_imported_hashes`. These transactions + /// (if pruned) are not revalidated and become temporarily banned to + /// prevent importing them in the (near) future. + pub async fn prune_tags( + &self, + at: &BlockId, + tags: impl IntoIterator, + known_imported_hashes: impl IntoIterator> + Clone, + ) -> Result<(), B::Error> { + log::debug!(target: "txpool", "Pruning at {:?}", at); + // Prune all transactions that provide given tags + let prune_status = match self.validated_pool.prune_tags(tags) { + Ok(prune_status) => prune_status, + Err(e) => return Err(e), + }; + + // Make sure that we don't revalidate extrinsics that were part of the recently + // imported block. This is especially important for UTXO-like chains cause the + // inputs are pruned so such transaction would go to future again. + self.validated_pool + .ban(&Instant::now(), known_imported_hashes.clone().into_iter()); + + // Try to re-validate pruned transactions since some of them might be still valid. + // note that `known_imported_hashes` will be rejected here due to temporary ban. + let pruned_hashes = prune_status + .pruned + .iter() + .map(|tx| tx.hash.clone()) + .collect::>(); + let pruned_transactions = prune_status + .pruned + .into_iter() + .map(|tx| (tx.source, tx.data.clone())); + + let reverified_transactions = self.verify(at, pruned_transactions, false).await?; + + log::trace!(target: "txpool", "Pruning at {:?}. Resubmitting transactions.", at); + // And finally - submit reverified transactions back to the pool + + self.validated_pool.resubmit_pruned( + &at, + known_imported_hashes, + pruned_hashes, + reverified_transactions + .into_iter() + .map(|(_, xt)| xt) + .collect(), + ) + } + + /// Returns transaction hash + pub fn hash_of(&self, xt: &ExtrinsicFor) -> ExHash { + self.validated_pool.api().hash_and_length(xt).0 + } + + /// Resolves block number by id. + fn resolve_block_number(&self, at: &BlockId) -> Result, B::Error> { + self.validated_pool + .api() + .block_id_to_number(at) + .and_then(|number| { + number.ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into()) + }) + } + + /// Returns future that validates a bunch of transactions at given block. + async fn verify( + &self, + at: &BlockId, + xts: impl IntoIterator)>, + force: bool, + ) -> Result, ValidatedTransactionFor>, B::Error> { + // we need a block number to compute tx validity + let block_number = self.resolve_block_number(at)?; + let mut result = HashMap::new(); + + for (hash, validated_tx) in futures::future::join_all( + xts.into_iter() + .map(|(source, xt)| self.verify_one(at, block_number, source, xt, force)), + ) + .await + { + result.insert(hash, validated_tx); + } + + Ok(result) + } + + /// Returns future that validates single transaction at given block. + async fn verify_one( + &self, + block_id: &BlockId, + block_number: NumberFor, + source: TransactionSource, + xt: ExtrinsicFor, + force: bool, + ) -> (ExHash, ValidatedTransactionFor) { + let (hash, bytes) = self.validated_pool.api().hash_and_length(&xt); + if !force && self.validated_pool.is_banned(&hash) { + return ( + hash.clone(), + ValidatedTransaction::Invalid(hash, error::Error::TemporarilyBanned.into()), + ); + } + + let validation_result = self + .validated_pool + .api() + .validate_transaction(block_id, source, xt.clone()) + .await; + + let status = match validation_result { + Ok(status) => status, + Err(e) => return (hash.clone(), ValidatedTransaction::Invalid(hash, e)), + }; + + let validity = match status { + Ok(validity) => { + if validity.provides.is_empty() { + ValidatedTransaction::Invalid(hash.clone(), error::Error::NoTagsProvided.into()) + } else { + ValidatedTransaction::valid_at( + block_number.saturated_into::(), + hash.clone(), + source, + xt, + bytes, + validity, + ) + } + } + Err(TransactionValidityError::Invalid(e)) => ValidatedTransaction::Invalid( + hash.clone(), + error::Error::InvalidTransaction(e).into(), + ), + Err(TransactionValidityError::Unknown(e)) => ValidatedTransaction::Unknown( + hash.clone(), + error::Error::UnknownTransaction(e).into(), + ), + }; + + (hash, validity) + } + + /// get a reference to the underlying validated pool. + pub fn validated_pool(&self) -> &ValidatedPool { + &self.validated_pool + } } impl Clone for Pool { - fn clone(&self) -> Self { - Self { - validated_pool: self.validated_pool.clone(), - } - } + fn clone(&self) -> Self { + Self { + validated_pool: self.validated_pool.clone(), + } + } } #[cfg(test)] mod tests { - use std::collections::{HashMap, HashSet}; - use parking_lot::Mutex; - use futures::executor::block_on; - use super::*; - use sp_transaction_pool::TransactionStatus; - use sp_runtime::transaction_validity::{ValidTransaction, InvalidTransaction, TransactionSource}; - use codec::Encode; - use substrate_test_runtime::{Block, Extrinsic, Transfer, H256, AccountId}; - use assert_matches::assert_matches; - use wasm_timer::Instant; - use crate::base_pool::Limit; - - const INVALID_NONCE: u64 = 254; - const SOURCE: TransactionSource = TransactionSource::External; - - #[derive(Clone, Debug, Default)] - struct TestApi { - delay: Arc>>>, - invalidate: Arc>>, - clear_requirements: Arc>>, - add_requirements: Arc>>, - } - - impl ChainApi for TestApi { - type Block = Block; - type Hash = u64; - type Error = error::Error; - type ValidationFuture = futures::future::Ready>; - type BodyFuture = futures::future::Ready>>>; - - /// Verify extrinsic at given block. - fn validate_transaction( - &self, - at: &BlockId, - _source: TransactionSource, - uxt: ExtrinsicFor, - ) -> Self::ValidationFuture { - let hash = self.hash_and_length(&uxt).0; - let block_number = self.block_id_to_number(at).unwrap().unwrap(); - let nonce = uxt.transfer().nonce; - - // This is used to control the test flow. - if nonce > 0 { - let opt = self.delay.lock().take(); - if let Some(delay) = opt { - if delay.recv().is_err() { - println!("Error waiting for delay!"); - } - } - } - - if self.invalidate.lock().contains(&hash) { - return futures::future::ready(Ok(InvalidTransaction::Custom(0).into())); - } - - futures::future::ready(if nonce < block_number { - Ok(InvalidTransaction::Stale.into()) - } else { - let mut transaction = ValidTransaction { - priority: 4, - requires: if nonce > block_number { vec![vec![nonce as u8 - 1]] } else { vec![] }, - provides: if nonce == INVALID_NONCE { vec![] } else { vec![vec![nonce as u8]] }, - longevity: 3, - propagate: true, - }; - - if self.clear_requirements.lock().contains(&hash) { - transaction.requires.clear(); - } - - if self.add_requirements.lock().contains(&hash) { - transaction.requires.push(vec![128]); - } - - Ok(Ok(transaction)) - }) - } - - /// Returns a block number given the block id. - fn block_id_to_number(&self, at: &BlockId) -> Result>, Self::Error> { - Ok(match at { - BlockId::Number(num) => Some(*num), - BlockId::Hash(_) => None, - }) - } - - /// Returns a block hash given the block id. - fn block_id_to_hash(&self, at: &BlockId) -> Result>, Self::Error> { - Ok(match at { - BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), - BlockId::Hash(_) => None, - }) - } - - /// Hash the extrinsic. - fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize) { - let len = uxt.encode().len(); - ( - (H256::from(uxt.transfer().from.clone()).to_low_u64_be() << 5) + uxt.transfer().nonce, - len - ) - } - - fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { - futures::future::ready(Ok(None)) - } - } - - fn uxt(transfer: Transfer) -> Extrinsic { - Extrinsic::Transfer { - transfer, - signature: Default::default(), - exhaust_resources_when_not_first: false, - } - } - - fn pool() -> Pool { - Pool::new(Default::default(), TestApi::default().into()) - } - - #[test] - fn should_validate_and_import_transaction() { - // given - let pool = pool(); - - // when - let hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - - // then - assert_eq!(pool.validated_pool().ready().map(|v| v.hash).collect::>(), vec![hash]); - } - - #[test] - fn should_reject_if_temporarily_banned() { - // given - let pool = pool(); - let uxt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }); - - // when - pool.validated_pool.rotator().ban(&Instant::now(), vec![pool.hash_of(&uxt)]); - let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); - assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 0); - - // then - assert_matches!(res.unwrap_err(), error::Error::TemporarilyBanned); - } - - #[test] - fn should_notify_about_pool_events() { - let stream = { - // given - let pool = pool(); - let stream = pool.validated_pool().import_notification_stream(); - - // when - let _hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - let _hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); - // future doesn't count - let _hash = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - }))).unwrap(); - - assert_eq!(pool.validated_pool().status().ready, 2); - assert_eq!(pool.validated_pool().status().future, 1); - stream - }; - - // then - let mut it = futures::executor::block_on_stream(stream); - assert_eq!(it.next(), Some(32)); - assert_eq!(it.next(), Some(33)); - assert_eq!(it.next(), None); - } - - #[test] - fn should_clear_stale_transactions() { - // given - let pool = pool(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - let hash2 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); - let hash3 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 3, - }))).unwrap(); - - // when - pool.validated_pool.clear_stale(&BlockId::Number(5)).unwrap(); - - // then - assert_eq!(pool.validated_pool().ready().count(), 0); - assert_eq!(pool.validated_pool().status().future, 0); - assert_eq!(pool.validated_pool().status().ready, 0); - // make sure they are temporarily banned as well - assert!(pool.validated_pool.rotator().is_banned(&hash1)); - assert!(pool.validated_pool.rotator().is_banned(&hash2)); - assert!(pool.validated_pool.rotator().is_banned(&hash3)); - } - - #[test] - fn should_ban_mined_transactions() { - // given - let pool = pool(); - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - - // when - block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()])).unwrap(); - - // then - assert!(pool.validated_pool.rotator().is_banned(&hash1)); - } - - #[test] - fn should_limit_futures() { - // given - let limit = Limit { - count: 100, - total_bytes: 200, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }, TestApi::default().into()); - - let hash1 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); - assert_eq!(pool.validated_pool().status().future, 1); - - // when - let hash2 = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(2)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 10, - }))).unwrap(); - - // then - assert_eq!(pool.validated_pool().status().future, 1); - assert!(pool.validated_pool.rotator().is_banned(&hash1)); - assert!(!pool.validated_pool.rotator().is_banned(&hash2)); - } - - #[test] - fn should_error_if_reject_immediately() { - // given - let limit = Limit { - count: 100, - total_bytes: 10, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }, TestApi::default().into()); - - // when - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap_err(); - - // then - assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 0); - } - - #[test] - fn should_reject_transactions_with_no_provides() { - // given - let pool = pool(); - - // when - let err = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: INVALID_NONCE, - }))).unwrap_err(); - - // then - assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 0); - assert_matches!(err, error::Error::NoTagsProvided); - } - - mod listener { - use super::*; - - #[test] - fn should_trigger_ready_and_finalized() { - // given - let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 1); - assert_eq!(pool.validated_pool().status().future, 0); - - // when - block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![])).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 0); - - // then - let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into()))); - } - - #[test] - fn should_trigger_ready_and_finalized_when_pruning_via_hash() { - // given - let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 1); - assert_eq!(pool.validated_pool().status().future, 0); - - // when - block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![2u64])).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 0); - - // then - let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into()))); - } - - #[test] - fn should_trigger_future_and_ready_after_promoted() { - // given - let pool = pool(); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }))).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 1); - - // when - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }))).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 2); - - // then - let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(TransactionStatus::Future)); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - } - - #[test] - fn should_trigger_invalid_and_ban() { - // given - let pool = pool(); - let uxt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 1); - - // when - pool.validated_pool.remove_invalid(&[*watcher.hash()]); - - - // then - let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::Invalid)); - assert_eq!(stream.next(), None); - } - - #[test] - fn should_trigger_broadcasted() { - // given - let pool = pool(); - let uxt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 1); - - // when - let mut map = HashMap::new(); - let peers = vec!["a".into(), "b".into(), "c".into()]; - map.insert(*watcher.hash(), peers.clone()); - pool.validated_pool().on_broadcasted(map); - - - // then - let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::Broadcast(peers))); - } - - #[test] - fn should_trigger_dropped() { - // given - let limit = Limit { - count: 1, - total_bytes: 1000, - }; - let pool = Pool::new(Options { - ready: limit.clone(), - future: limit.clone(), - ..Default::default() - }, TestApi::default().into()); - - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 0, - }); - let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 1); - - // when - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(2)), - to: AccountId::from_h256(H256::from_low_u64_be(1)), - amount: 4, - nonce: 1, - }); - block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 1); - - // then - let mut stream = futures::executor::block_on_stream(watcher.into_stream()); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::Dropped)); - } - - #[test] - fn should_handle_pruning_in_the_middle_of_import() { - // given - let (ready, is_ready) = std::sync::mpsc::sync_channel(0); - let (tx, rx) = std::sync::mpsc::sync_channel(1); - let mut api = TestApi::default(); - api.delay = Arc::new(Mutex::new(rx.into())); - let pool = Arc::new(Pool::new(Default::default(), api.into())); - - // when - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 5, - nonce: 1, - }); - - // This transaction should go to future, since we use `nonce: 1` - let pool2 = pool.clone(); - std::thread::spawn(move || { - block_on(pool2.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); - ready.send(()).unwrap(); - }); - - // But now before the previous one is imported we import - // the one that it depends on. - let xt = uxt(Transfer { - from: AccountId::from_h256(H256::from_low_u64_be(1)), - to: AccountId::from_h256(H256::from_low_u64_be(2)), - amount: 4, - nonce: 0, - }); - // The tag the above transaction provides (TestApi is using just nonce as u8) - let provides = vec![0_u8]; - block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 1); - - // Now block import happens before the second transaction is able to finish verification. - block_on(pool.prune_tags(&BlockId::Number(1), vec![provides], vec![])).unwrap(); - assert_eq!(pool.validated_pool().status().ready, 0); - - - // so when we release the verification of the previous one it will have - // something in `requires`, but should go to ready directly, since the previous transaction was imported - // correctly. - tx.send(()).unwrap(); - - // then - is_ready.recv().unwrap(); // wait for finish - assert_eq!(pool.validated_pool().status().ready, 1); - assert_eq!(pool.validated_pool().status().future, 0); - } - } + use super::*; + use crate::base_pool::Limit; + use assert_matches::assert_matches; + use codec::Encode; + use futures::executor::block_on; + use parking_lot::Mutex; + use sp_runtime::transaction_validity::{ + InvalidTransaction, TransactionSource, ValidTransaction, + }; + use sp_transaction_pool::TransactionStatus; + use std::collections::{HashMap, HashSet}; + use substrate_test_runtime::{AccountId, Block, Extrinsic, Transfer, H256}; + use wasm_timer::Instant; + + const INVALID_NONCE: u64 = 254; + const SOURCE: TransactionSource = TransactionSource::External; + + #[derive(Clone, Debug, Default)] + struct TestApi { + delay: Arc>>>, + invalidate: Arc>>, + clear_requirements: Arc>>, + add_requirements: Arc>>, + } + + impl ChainApi for TestApi { + type Block = Block; + type Hash = u64; + type Error = error::Error; + type ValidationFuture = futures::future::Ready>; + type BodyFuture = futures::future::Ready>>>; + + /// Verify extrinsic at given block. + fn validate_transaction( + &self, + at: &BlockId, + _source: TransactionSource, + uxt: ExtrinsicFor, + ) -> Self::ValidationFuture { + let hash = self.hash_and_length(&uxt).0; + let block_number = self.block_id_to_number(at).unwrap().unwrap(); + let nonce = uxt.transfer().nonce; + + // This is used to control the test flow. + if nonce > 0 { + let opt = self.delay.lock().take(); + if let Some(delay) = opt { + if delay.recv().is_err() { + println!("Error waiting for delay!"); + } + } + } + + if self.invalidate.lock().contains(&hash) { + return futures::future::ready(Ok(InvalidTransaction::Custom(0).into())); + } + + futures::future::ready(if nonce < block_number { + Ok(InvalidTransaction::Stale.into()) + } else { + let mut transaction = ValidTransaction { + priority: 4, + requires: if nonce > block_number { + vec![vec![nonce as u8 - 1]] + } else { + vec![] + }, + provides: if nonce == INVALID_NONCE { + vec![] + } else { + vec![vec![nonce as u8]] + }, + longevity: 3, + propagate: true, + }; + + if self.clear_requirements.lock().contains(&hash) { + transaction.requires.clear(); + } + + if self.add_requirements.lock().contains(&hash) { + transaction.requires.push(vec![128]); + } + + Ok(Ok(transaction)) + }) + } + + /// Returns a block number given the block id. + fn block_id_to_number( + &self, + at: &BlockId, + ) -> Result>, Self::Error> { + Ok(match at { + BlockId::Number(num) => Some(*num), + BlockId::Hash(_) => None, + }) + } + + /// Returns a block hash given the block id. + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> Result>, Self::Error> { + Ok(match at { + BlockId::Number(num) => Some(H256::from_low_u64_be(*num)).into(), + BlockId::Hash(_) => None, + }) + } + + /// Hash the extrinsic. + fn hash_and_length(&self, uxt: &ExtrinsicFor) -> (Self::Hash, usize) { + let len = uxt.encode().len(); + ( + (H256::from(uxt.transfer().from.clone()).to_low_u64_be() << 5) + + uxt.transfer().nonce, + len, + ) + } + + fn block_body(&self, _id: &BlockId) -> Self::BodyFuture { + futures::future::ready(Ok(None)) + } + } + + fn uxt(transfer: Transfer) -> Extrinsic { + Extrinsic::Transfer { + transfer, + signature: Default::default(), + exhaust_resources_when_not_first: false, + } + } + + fn pool() -> Pool { + Pool::new(Default::default(), TestApi::default().into()) + } + + #[test] + fn should_validate_and_import_transaction() { + // given + let pool = pool(); + + // when + let hash = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + + // then + assert_eq!( + pool.validated_pool() + .ready() + .map(|v| v.hash) + .collect::>(), + vec![hash] + ); + } + + #[test] + fn should_reject_if_temporarily_banned() { + // given + let pool = pool(); + let uxt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + + // when + pool.validated_pool + .rotator() + .ban(&Instant::now(), vec![pool.hash_of(&uxt)]); + let res = block_on(pool.submit_one(&BlockId::Number(0), SOURCE, uxt)); + assert_eq!(pool.validated_pool().status().ready, 0); + assert_eq!(pool.validated_pool().status().future, 0); + + // then + assert_matches!(res.unwrap_err(), error::Error::TemporarilyBanned); + } + + #[test] + fn should_notify_about_pool_events() { + let stream = { + // given + let pool = pool(); + let stream = pool.validated_pool().import_notification_stream(); + + // when + let _hash = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + let _hash = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); + // future doesn't count + let _hash = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }), + )) + .unwrap(); + + assert_eq!(pool.validated_pool().status().ready, 2); + assert_eq!(pool.validated_pool().status().future, 1); + stream + }; + + // then + let mut it = futures::executor::block_on_stream(stream); + assert_eq!(it.next(), Some(32)); + assert_eq!(it.next(), Some(33)); + assert_eq!(it.next(), None); + } + + #[test] + fn should_clear_stale_transactions() { + // given + let pool = pool(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + let hash2 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); + let hash3 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 3, + }), + )) + .unwrap(); + + // when + pool.validated_pool + .clear_stale(&BlockId::Number(5)) + .unwrap(); + + // then + assert_eq!(pool.validated_pool().ready().count(), 0); + assert_eq!(pool.validated_pool().status().future, 0); + assert_eq!(pool.validated_pool().status().ready, 0); + // make sure they are temporarily banned as well + assert!(pool.validated_pool.rotator().is_banned(&hash1)); + assert!(pool.validated_pool.rotator().is_banned(&hash2)); + assert!(pool.validated_pool.rotator().is_banned(&hash3)); + } + + #[test] + fn should_ban_mined_transactions() { + // given + let pool = pool(); + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + + // when + block_on(pool.prune_tags(&BlockId::Number(1), vec![vec![0]], vec![hash1.clone()])).unwrap(); + + // then + assert!(pool.validated_pool.rotator().is_banned(&hash1)); + } + + #[test] + fn should_limit_futures() { + // given + let limit = Limit { + count: 100, + total_bytes: 200, + }; + let pool = Pool::new( + Options { + ready: limit.clone(), + future: limit.clone(), + ..Default::default() + }, + TestApi::default().into(), + ); + + let hash1 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); + assert_eq!(pool.validated_pool().status().future, 1); + + // when + let hash2 = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(2)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 10, + }), + )) + .unwrap(); + + // then + assert_eq!(pool.validated_pool().status().future, 1); + assert!(pool.validated_pool.rotator().is_banned(&hash1)); + assert!(!pool.validated_pool.rotator().is_banned(&hash2)); + } + + #[test] + fn should_error_if_reject_immediately() { + // given + let limit = Limit { + count: 100, + total_bytes: 10, + }; + let pool = Pool::new( + Options { + ready: limit.clone(), + future: limit.clone(), + ..Default::default() + }, + TestApi::default().into(), + ); + + // when + block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap_err(); + + // then + assert_eq!(pool.validated_pool().status().ready, 0); + assert_eq!(pool.validated_pool().status().future, 0); + } + + #[test] + fn should_reject_transactions_with_no_provides() { + // given + let pool = pool(); + + // when + let err = block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: INVALID_NONCE, + }), + )) + .unwrap_err(); + + // then + assert_eq!(pool.validated_pool().status().ready, 0); + assert_eq!(pool.validated_pool().status().future, 0); + assert_matches!(err, error::Error::NoTagsProvided); + } + + mod listener { + use super::*; + + #[test] + fn should_trigger_ready_and_finalized() { + // given + let pool = pool(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + assert_eq!(pool.validated_pool().status().ready, 1); + assert_eq!(pool.validated_pool().status().future, 0); + + // when + block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![])).unwrap(); + assert_eq!(pool.validated_pool().status().ready, 0); + assert_eq!(pool.validated_pool().status().future, 0); + + // then + let mut stream = futures::executor::block_on_stream(watcher.into_stream()); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!( + stream.next(), + Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into())) + ); + } + + #[test] + fn should_trigger_ready_and_finalized_when_pruning_via_hash() { + // given + let pool = pool(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + assert_eq!(pool.validated_pool().status().ready, 1); + assert_eq!(pool.validated_pool().status().future, 0); + + // when + block_on(pool.prune_tags(&BlockId::Number(2), vec![vec![0u8]], vec![2u64])).unwrap(); + assert_eq!(pool.validated_pool().status().ready, 0); + assert_eq!(pool.validated_pool().status().future, 0); + + // then + let mut stream = futures::executor::block_on_stream(watcher.into_stream()); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!( + stream.next(), + Some(TransactionStatus::InBlock(H256::from_low_u64_be(2).into())) + ); + } + + #[test] + fn should_trigger_future_and_ready_after_promoted() { + // given + let pool = pool(); + let watcher = block_on(pool.submit_and_watch( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }), + )) + .unwrap(); + assert_eq!(pool.validated_pool().status().ready, 0); + assert_eq!(pool.validated_pool().status().future, 1); + + // when + block_on(pool.submit_one( + &BlockId::Number(0), + SOURCE, + uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }), + )) + .unwrap(); + assert_eq!(pool.validated_pool().status().ready, 2); + + // then + let mut stream = futures::executor::block_on_stream(watcher.into_stream()); + assert_eq!(stream.next(), Some(TransactionStatus::Future)); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + } + + #[test] + fn should_trigger_invalid_and_ban() { + // given + let pool = pool(); + let uxt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + let watcher = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + assert_eq!(pool.validated_pool().status().ready, 1); + + // when + pool.validated_pool.remove_invalid(&[*watcher.hash()]); + + // then + let mut stream = futures::executor::block_on_stream(watcher.into_stream()); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Invalid)); + assert_eq!(stream.next(), None); + } + + #[test] + fn should_trigger_broadcasted() { + // given + let pool = pool(); + let uxt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + let watcher = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, uxt)).unwrap(); + assert_eq!(pool.validated_pool().status().ready, 1); + + // when + let mut map = HashMap::new(); + let peers = vec!["a".into(), "b".into(), "c".into()]; + map.insert(*watcher.hash(), peers.clone()); + pool.validated_pool().on_broadcasted(map); + + // then + let mut stream = futures::executor::block_on_stream(watcher.into_stream()); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Broadcast(peers))); + } + + #[test] + fn should_trigger_dropped() { + // given + let limit = Limit { + count: 1, + total_bytes: 1000, + }; + let pool = Pool::new( + Options { + ready: limit.clone(), + future: limit.clone(), + ..Default::default() + }, + TestApi::default().into(), + ); + + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 0, + }); + let watcher = block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, xt)).unwrap(); + assert_eq!(pool.validated_pool().status().ready, 1); + + // when + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(2)), + to: AccountId::from_h256(H256::from_low_u64_be(1)), + amount: 4, + nonce: 1, + }); + block_on(pool.submit_one(&BlockId::Number(1), SOURCE, xt)).unwrap(); + assert_eq!(pool.validated_pool().status().ready, 1); + + // then + let mut stream = futures::executor::block_on_stream(watcher.into_stream()); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::Dropped)); + } + + #[test] + fn should_handle_pruning_in_the_middle_of_import() { + // given + let (ready, is_ready) = std::sync::mpsc::sync_channel(0); + let (tx, rx) = std::sync::mpsc::sync_channel(1); + let mut api = TestApi::default(); + api.delay = Arc::new(Mutex::new(rx.into())); + let pool = Arc::new(Pool::new(Default::default(), api.into())); + + // when + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 5, + nonce: 1, + }); + + // This transaction should go to future, since we use `nonce: 1` + let pool2 = pool.clone(); + std::thread::spawn(move || { + block_on(pool2.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + ready.send(()).unwrap(); + }); + + // But now before the previous one is imported we import + // the one that it depends on. + let xt = uxt(Transfer { + from: AccountId::from_h256(H256::from_low_u64_be(1)), + to: AccountId::from_h256(H256::from_low_u64_be(2)), + amount: 4, + nonce: 0, + }); + // The tag the above transaction provides (TestApi is using just nonce as u8) + let provides = vec![0_u8]; + block_on(pool.submit_one(&BlockId::Number(0), SOURCE, xt)).unwrap(); + assert_eq!(pool.validated_pool().status().ready, 1); + + // Now block import happens before the second transaction is able to finish verification. + block_on(pool.prune_tags(&BlockId::Number(1), vec![provides], vec![])).unwrap(); + assert_eq!(pool.validated_pool().status().ready, 0); + + // so when we release the verification of the previous one it will have + // something in `requires`, but should go to ready directly, since the previous transaction was imported + // correctly. + tx.send(()).unwrap(); + + // then + is_ready.recv().unwrap(); // wait for finish + assert_eq!(pool.validated_pool().status().ready, 1); + assert_eq!(pool.validated_pool().status().future, 0); + } + } } diff --git a/client/transaction-pool/graph/src/ready.rs b/client/transaction-pool/graph/src/ready.rs index c856535a61..7ddf6cd8b7 100644 --- a/client/transaction-pool/graph/src/ready.rs +++ b/client/transaction-pool/graph/src/ready.rs @@ -15,86 +15,91 @@ // along with Substrate. If not, see . use std::{ - collections::{HashMap, HashSet, BTreeSet}, - cmp, - hash, - sync::Arc, + cmp, + collections::{BTreeSet, HashMap, HashSet}, + hash, + sync::Arc, }; -use serde::Serialize; use log::trace; use parking_lot::RwLock; +use serde::Serialize; use sp_runtime::traits::Member; -use sp_runtime::transaction_validity::{ - TransactionTag as Tag, -}; +use sp_runtime::transaction_validity::TransactionTag as Tag; use sp_transaction_pool::error; -use crate::future::WaitingTransaction; use crate::base_pool::Transaction; +use crate::future::WaitingTransaction; /// An in-pool transaction reference. /// /// Should be cheap to clone. #[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct TransactionRef { - /// The actual transaction data. - pub transaction: Arc>, - /// Unique id when transaction was inserted into the pool. - pub insertion_id: u64, + /// The actual transaction data. + pub transaction: Arc>, + /// Unique id when transaction was inserted into the pool. + pub insertion_id: u64, } impl Clone for TransactionRef { - fn clone(&self) -> Self { - TransactionRef { - transaction: self.transaction.clone(), - insertion_id: self.insertion_id, - } - } + fn clone(&self) -> Self { + TransactionRef { + transaction: self.transaction.clone(), + insertion_id: self.insertion_id, + } + } } impl Ord for TransactionRef { - fn cmp(&self, other: &Self) -> cmp::Ordering { - self.transaction.priority.cmp(&other.transaction.priority) - .then_with(|| other.transaction.valid_till.cmp(&self.transaction.valid_till)) - .then_with(|| other.insertion_id.cmp(&self.insertion_id)) - } + fn cmp(&self, other: &Self) -> cmp::Ordering { + self.transaction + .priority + .cmp(&other.transaction.priority) + .then_with(|| { + other + .transaction + .valid_till + .cmp(&self.transaction.valid_till) + }) + .then_with(|| other.insertion_id.cmp(&self.insertion_id)) + } } impl PartialOrd for TransactionRef { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl PartialEq for TransactionRef { - fn eq(&self, other: &Self) -> bool { - self.cmp(other) == cmp::Ordering::Equal - } + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == cmp::Ordering::Equal + } } impl Eq for TransactionRef {} #[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct ReadyTx { - /// A reference to a transaction - pub transaction: TransactionRef, - /// A list of transactions that get unlocked by this one - pub unlocks: Vec, - /// How many required tags are provided inherently - /// - /// Some transactions might be already pruned from the queue, - /// so when we compute ready set we may consider this transactions ready earlier. - pub requires_offset: usize, + /// A reference to a transaction + pub transaction: TransactionRef, + /// A list of transactions that get unlocked by this one + pub unlocks: Vec, + /// How many required tags are provided inherently + /// + /// Some transactions might be already pruned from the queue, + /// so when we compute ready set we may consider this transactions ready earlier. + pub requires_offset: usize, } impl Clone for ReadyTx { - fn clone(&self) -> Self { - ReadyTx { - transaction: self.transaction.clone(), - unlocks: self.unlocks.clone(), - requires_offset: self.requires_offset, - } - } + fn clone(&self) -> Self { + ReadyTx { + transaction: self.transaction.clone(), + unlocks: self.unlocks.clone(), + requires_offset: self.requires_offset, + } + } } const HASH_READY: &str = r#" @@ -106,631 +111,651 @@ qed #[derive(Debug, parity_util_mem::MallocSizeOf)] pub struct ReadyTransactions { - /// Insertion id - insertion_id: u64, - /// tags that are provided by Ready transactions - provided_tags: HashMap, - /// Transactions that are ready (i.e. don't have any requirements external to the pool) - ready: Arc>>>, - /// Best transactions that are ready to be included to the block without any other previous transaction. - best: BTreeSet>, + /// Insertion id + insertion_id: u64, + /// tags that are provided by Ready transactions + provided_tags: HashMap, + /// Transactions that are ready (i.e. don't have any requirements external to the pool) + ready: Arc>>>, + /// Best transactions that are ready to be included to the block without any other previous transaction. + best: BTreeSet>, } impl Default for ReadyTransactions { - fn default() -> Self { - ReadyTransactions { - insertion_id: Default::default(), - provided_tags: Default::default(), - ready: Default::default(), - best: Default::default(), - } - } + fn default() -> Self { + ReadyTransactions { + insertion_id: Default::default(), + provided_tags: Default::default(), + ready: Default::default(), + best: Default::default(), + } + } } impl ReadyTransactions { - /// Borrows a map of tags that are provided by transactions in this queue. - pub fn provided_tags(&self) -> &HashMap { - &self.provided_tags - } - - /// Returns an iterator of ready transactions. - /// - /// Transactions are returned in order: - /// 1. First by the dependencies: - /// - never return transaction that requires a tag, which was not provided by one of the previously returned transactions - /// 2. Then by priority: - /// - If there are two transactions with all requirements satisfied the one with higher priority goes first. - /// 3. Then by the ttl that's left - /// - transactions that are valid for a shorter time go first - /// 4. Lastly we sort by the time in the queue - /// - transactions that are longer in the queue go first - pub fn get(&self) -> impl Iterator>> { - BestIterator { - all: self.ready.clone(), - best: self.best.clone(), - awaiting: Default::default(), - } - } - - /// Imports transactions to the pool of ready transactions. - /// - /// The transaction needs to have all tags satisfied (be ready) by transactions - /// that are in this queue. - /// Returns transactions that were replaced by the one imported. - pub fn import( - &mut self, - tx: WaitingTransaction, - ) -> error::Result>>> { - assert!( - tx.is_ready(), - "Only ready transactions can be imported. Missing: {:?}", tx.missing_tags - ); - assert!(!self.ready.read().contains_key(&tx.transaction.hash), "Transaction is already imported."); - - self.insertion_id += 1; - let insertion_id = self.insertion_id; - let hash = tx.transaction.hash.clone(); - let transaction = tx.transaction; - - let (replaced, unlocks) = self.replace_previous(&transaction)?; - - let mut goes_to_best = true; - let mut ready = self.ready.write(); - let mut requires_offset = 0; - // Add links to transactions that unlock the current one - for tag in &transaction.requires { - // Check if the transaction that satisfies the tag is still in the queue. - if let Some(other) = self.provided_tags.get(tag) { - let tx = ready.get_mut(other).expect(HASH_READY); - tx.unlocks.push(hash.clone()); - // this transaction depends on some other, so it doesn't go to best directly. - goes_to_best = false; - } else { - requires_offset += 1; - } - } - - // update provided_tags - // call to replace_previous guarantees that we will be overwriting - // only entries that have been removed. - for tag in &transaction.provides { - self.provided_tags.insert(tag.clone(), hash.clone()); - } - - let transaction = TransactionRef { - insertion_id, - transaction - }; - - // insert to best if it doesn't require any other transaction to be included before it - if goes_to_best { - self.best.insert(transaction.clone()); - } - - // insert to Ready - ready.insert(hash, ReadyTx { - transaction, - unlocks, - requires_offset, - }); - - Ok(replaced) - } - - /// Fold a list of ready transactions to compute a single value. - pub fn fold, &ReadyTx) -> Option>(&mut self, f: F) -> Option { - self.ready - .read() - .values() - .fold(None, f) - } - - /// Returns true if given hash is part of the queue. - pub fn contains(&self, hash: &Hash) -> bool { - self.ready.read().contains_key(hash) - } - - /// Retrive transaction by hash - pub fn by_hash(&self, hash: &Hash) -> Option>> { - self.by_hashes(&[hash.clone()]).into_iter().next().unwrap_or(None) - } - - /// Retrieve transactions by hash - pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { - let ready = self.ready.read(); - hashes.iter().map(|hash| { - ready.get(hash).map(|x| x.transaction.transaction.clone()) - }).collect() - } - - /// Removes a subtree of transactions from the ready pool. - /// - /// NOTE removing a transaction will also cause a removal of all transactions that depend on that one - /// (i.e. the entire subgraph that this transaction is a start of will be removed). - /// All removed transactions are returned. - pub fn remove_subtree(&mut self, hashes: &[Hash]) -> Vec>> { - let to_remove = hashes.iter().cloned().collect::>(); - self.remove_subtree_with_tag_filter(to_remove, None) - } - - /// Removes a subtrees of transactions trees starting from roots given in `to_remove`. - /// - /// We proceed with a particular branch only if there is at least one provided tag - /// that is not part of `provides_tag_filter`. I.e. the filter contains tags - /// that will stay in the pool, so that we can early exit and avoid descending. - fn remove_subtree_with_tag_filter( - &mut self, - mut to_remove: Vec, - provides_tag_filter: Option>, - ) -> Vec>> { - let mut removed = vec![]; - let mut ready = self.ready.write(); - loop { - let hash = match to_remove.pop() { - Some(hash) => hash, - None => return removed, - }; - - if let Some(mut tx) = ready.remove(&hash) { - let invalidated = tx.transaction.transaction.provides - .iter() - .filter(|tag| provides_tag_filter - .as_ref() - .map(|filter| !filter.contains(&**tag)) - .unwrap_or(true) - ); - - let mut removed_some_tags = false; - // remove entries from provided_tags - for tag in invalidated { - removed_some_tags = true; - self.provided_tags.remove(tag); - } - - // remove from unlocks - for tag in &tx.transaction.transaction.requires { - if let Some(hash) = self.provided_tags.get(tag) { - if let Some(tx) = ready.get_mut(hash) { - remove_item(&mut tx.unlocks, &hash); - } - } - } - - // remove from best - self.best.remove(&tx.transaction); - - if removed_some_tags { - // remove all transactions that the current one unlocks - to_remove.append(&mut tx.unlocks); - } - - // add to removed - trace!(target: "txpool", "[{:?}] Removed as part of the subtree.", hash); - removed.push(tx.transaction.transaction); - } - } - } - - /// Removes transactions that provide given tag. - /// - /// All transactions that lead to a transaction, which provides this tag - /// are going to be removed from the queue, but no other transactions are touched - - /// i.e. all other subgraphs starting from given tag are still considered valid & ready. - pub fn prune_tags(&mut self, tag: Tag) -> Vec>> { - let mut removed = vec![]; - let mut to_remove = vec![tag]; - - loop { - let tag = match to_remove.pop() { - Some(tag) => tag, - None => return removed, - }; - - let res = self.provided_tags.remove(&tag) - .and_then(|hash| self.ready.write().remove(&hash)); - - if let Some(tx) = res { - let unlocks = tx.unlocks; - let tx = tx.transaction.transaction; - - // prune previous transactions as well - { - let hash = &tx.hash; - let mut ready = self.ready.write(); - let mut find_previous = |tag| -> Option> { - let prev_hash = self.provided_tags.get(tag)?; - let tx2 = ready.get_mut(&prev_hash)?; - remove_item(&mut tx2.unlocks, hash); - // We eagerly prune previous transactions as well. - // But it might not always be good. - // Possible edge case: - // - tx provides two tags - // - the second tag enables some subgraph we don't know of yet - // - we will prune the transaction - // - when we learn about the subgraph it will go to future - // - we will have to wait for re-propagation of that transaction - // Alternatively the caller may attempt to re-import these transactions. - if tx2.unlocks.is_empty() { - Some(tx2.transaction.transaction.provides.clone()) - } else { - None - } - }; - - // find previous transactions - for tag in &tx.requires { - if let Some(mut tags_to_remove) = find_previous(tag) { - to_remove.append(&mut tags_to_remove); - } - } - } - - // add the transactions that just got unlocked to `best` - for hash in unlocks { - if let Some(tx) = self.ready.write().get_mut(&hash) { - tx.requires_offset += 1; - // this transaction is ready - if tx.requires_offset == tx.transaction.transaction.requires.len() { - self.best.insert(tx.transaction.clone()); - } - } - } - - // we also need to remove all other tags that this transaction provides, - // but since all the hard work is done, we only clear the provided_tag -> hash - // mapping. - let current_tag = &tag; - for tag in &tx.provides { - let removed = self.provided_tags.remove(tag); - assert_eq!( + /// Borrows a map of tags that are provided by transactions in this queue. + pub fn provided_tags(&self) -> &HashMap { + &self.provided_tags + } + + /// Returns an iterator of ready transactions. + /// + /// Transactions are returned in order: + /// 1. First by the dependencies: + /// - never return transaction that requires a tag, which was not provided by one of the previously returned transactions + /// 2. Then by priority: + /// - If there are two transactions with all requirements satisfied the one with higher priority goes first. + /// 3. Then by the ttl that's left + /// - transactions that are valid for a shorter time go first + /// 4. Lastly we sort by the time in the queue + /// - transactions that are longer in the queue go first + pub fn get(&self) -> impl Iterator>> { + BestIterator { + all: self.ready.clone(), + best: self.best.clone(), + awaiting: Default::default(), + } + } + + /// Imports transactions to the pool of ready transactions. + /// + /// The transaction needs to have all tags satisfied (be ready) by transactions + /// that are in this queue. + /// Returns transactions that were replaced by the one imported. + pub fn import( + &mut self, + tx: WaitingTransaction, + ) -> error::Result>>> { + assert!( + tx.is_ready(), + "Only ready transactions can be imported. Missing: {:?}", + tx.missing_tags + ); + assert!( + !self.ready.read().contains_key(&tx.transaction.hash), + "Transaction is already imported." + ); + + self.insertion_id += 1; + let insertion_id = self.insertion_id; + let hash = tx.transaction.hash.clone(); + let transaction = tx.transaction; + + let (replaced, unlocks) = self.replace_previous(&transaction)?; + + let mut goes_to_best = true; + let mut ready = self.ready.write(); + let mut requires_offset = 0; + // Add links to transactions that unlock the current one + for tag in &transaction.requires { + // Check if the transaction that satisfies the tag is still in the queue. + if let Some(other) = self.provided_tags.get(tag) { + let tx = ready.get_mut(other).expect(HASH_READY); + tx.unlocks.push(hash.clone()); + // this transaction depends on some other, so it doesn't go to best directly. + goes_to_best = false; + } else { + requires_offset += 1; + } + } + + // update provided_tags + // call to replace_previous guarantees that we will be overwriting + // only entries that have been removed. + for tag in &transaction.provides { + self.provided_tags.insert(tag.clone(), hash.clone()); + } + + let transaction = TransactionRef { + insertion_id, + transaction, + }; + + // insert to best if it doesn't require any other transaction to be included before it + if goes_to_best { + self.best.insert(transaction.clone()); + } + + // insert to Ready + ready.insert( + hash, + ReadyTx { + transaction, + unlocks, + requires_offset, + }, + ); + + Ok(replaced) + } + + /// Fold a list of ready transactions to compute a single value. + pub fn fold, &ReadyTx) -> Option>( + &mut self, + f: F, + ) -> Option { + self.ready.read().values().fold(None, f) + } + + /// Returns true if given hash is part of the queue. + pub fn contains(&self, hash: &Hash) -> bool { + self.ready.read().contains_key(hash) + } + + /// Retrive transaction by hash + pub fn by_hash(&self, hash: &Hash) -> Option>> { + self.by_hashes(&[hash.clone()]) + .into_iter() + .next() + .unwrap_or(None) + } + + /// Retrieve transactions by hash + pub fn by_hashes(&self, hashes: &[Hash]) -> Vec>>> { + let ready = self.ready.read(); + hashes + .iter() + .map(|hash| ready.get(hash).map(|x| x.transaction.transaction.clone())) + .collect() + } + + /// Removes a subtree of transactions from the ready pool. + /// + /// NOTE removing a transaction will also cause a removal of all transactions that depend on that one + /// (i.e. the entire subgraph that this transaction is a start of will be removed). + /// All removed transactions are returned. + pub fn remove_subtree(&mut self, hashes: &[Hash]) -> Vec>> { + let to_remove = hashes.iter().cloned().collect::>(); + self.remove_subtree_with_tag_filter(to_remove, None) + } + + /// Removes a subtrees of transactions trees starting from roots given in `to_remove`. + /// + /// We proceed with a particular branch only if there is at least one provided tag + /// that is not part of `provides_tag_filter`. I.e. the filter contains tags + /// that will stay in the pool, so that we can early exit and avoid descending. + fn remove_subtree_with_tag_filter( + &mut self, + mut to_remove: Vec, + provides_tag_filter: Option>, + ) -> Vec>> { + let mut removed = vec![]; + let mut ready = self.ready.write(); + loop { + let hash = match to_remove.pop() { + Some(hash) => hash, + None => return removed, + }; + + if let Some(mut tx) = ready.remove(&hash) { + let invalidated = tx.transaction.transaction.provides.iter().filter(|tag| { + provides_tag_filter + .as_ref() + .map(|filter| !filter.contains(&**tag)) + .unwrap_or(true) + }); + + let mut removed_some_tags = false; + // remove entries from provided_tags + for tag in invalidated { + removed_some_tags = true; + self.provided_tags.remove(tag); + } + + // remove from unlocks + for tag in &tx.transaction.transaction.requires { + if let Some(hash) = self.provided_tags.get(tag) { + if let Some(tx) = ready.get_mut(hash) { + remove_item(&mut tx.unlocks, &hash); + } + } + } + + // remove from best + self.best.remove(&tx.transaction); + + if removed_some_tags { + // remove all transactions that the current one unlocks + to_remove.append(&mut tx.unlocks); + } + + // add to removed + trace!(target: "txpool", "[{:?}] Removed as part of the subtree.", hash); + removed.push(tx.transaction.transaction); + } + } + } + + /// Removes transactions that provide given tag. + /// + /// All transactions that lead to a transaction, which provides this tag + /// are going to be removed from the queue, but no other transactions are touched - + /// i.e. all other subgraphs starting from given tag are still considered valid & ready. + pub fn prune_tags(&mut self, tag: Tag) -> Vec>> { + let mut removed = vec![]; + let mut to_remove = vec![tag]; + + loop { + let tag = match to_remove.pop() { + Some(tag) => tag, + None => return removed, + }; + + let res = self + .provided_tags + .remove(&tag) + .and_then(|hash| self.ready.write().remove(&hash)); + + if let Some(tx) = res { + let unlocks = tx.unlocks; + let tx = tx.transaction.transaction; + + // prune previous transactions as well + { + let hash = &tx.hash; + let mut ready = self.ready.write(); + let mut find_previous = |tag| -> Option> { + let prev_hash = self.provided_tags.get(tag)?; + let tx2 = ready.get_mut(&prev_hash)?; + remove_item(&mut tx2.unlocks, hash); + // We eagerly prune previous transactions as well. + // But it might not always be good. + // Possible edge case: + // - tx provides two tags + // - the second tag enables some subgraph we don't know of yet + // - we will prune the transaction + // - when we learn about the subgraph it will go to future + // - we will have to wait for re-propagation of that transaction + // Alternatively the caller may attempt to re-import these transactions. + if tx2.unlocks.is_empty() { + Some(tx2.transaction.transaction.provides.clone()) + } else { + None + } + }; + + // find previous transactions + for tag in &tx.requires { + if let Some(mut tags_to_remove) = find_previous(tag) { + to_remove.append(&mut tags_to_remove); + } + } + } + + // add the transactions that just got unlocked to `best` + for hash in unlocks { + if let Some(tx) = self.ready.write().get_mut(&hash) { + tx.requires_offset += 1; + // this transaction is ready + if tx.requires_offset == tx.transaction.transaction.requires.len() { + self.best.insert(tx.transaction.clone()); + } + } + } + + // we also need to remove all other tags that this transaction provides, + // but since all the hard work is done, we only clear the provided_tag -> hash + // mapping. + let current_tag = &tag; + for tag in &tx.provides { + let removed = self.provided_tags.remove(tag); + assert_eq!( removed.as_ref(), if current_tag == tag { None } else { Some(&tx.hash) }, "The pool contains exactly one transaction providing given tag; the removed transaction claims to provide that tag, so it has to be mapped to it's hash; qed" ); - } - - removed.push(tx); - } - } - } - - /// Checks if the transaction is providing the same tags as other transactions. - /// - /// In case that's true it determines if the priority of transactions that - /// we are about to replace is lower than the priority of the replacement transaction. - /// We remove/replace old transactions in case they have lower priority. - /// - /// In case replacement is successful returns a list of removed transactions - /// and a list of hashes that are still in pool and gets unlocked by the new transaction. - fn replace_previous( - &mut self, - tx: &Transaction, - ) -> error::Result< - (Vec>>, Vec) - > { - let (to_remove, unlocks) = { - // check if we are replacing a transaction - let replace_hashes = tx.provides - .iter() - .filter_map(|tag| self.provided_tags.get(tag)) - .collect::>(); - - // early exit if we are not replacing anything. - if replace_hashes.is_empty() { - return Ok((vec![], vec![])); - } - - // now check if collective priority is lower than the replacement transaction. - let old_priority = { - let ready = self.ready.read(); - replace_hashes - .iter() - .filter_map(|hash| ready.get(hash)) - .fold(0u64, |total, tx| - total.saturating_add(tx.transaction.transaction.priority) - ) - }; - - // bail - the transaction has too low priority to replace the old ones - if old_priority >= tx.priority { - return Err(error::Error::TooLowPriority { old: old_priority, new: tx.priority }) - } - - // construct a list of unlocked transactions - let unlocks = { - let ready = self.ready.read(); - replace_hashes - .iter() - .filter_map(|hash| ready.get(hash)) - .fold(vec![], |mut list, tx| { - list.extend(tx.unlocks.iter().cloned()); - list - }) - }; - - ( - replace_hashes.into_iter().cloned().collect::>(), - unlocks - ) - }; - - let new_provides = tx.provides.iter().cloned().collect::>(); - let removed = self.remove_subtree_with_tag_filter(to_remove, Some(new_provides)); - - Ok(( - removed, - unlocks - )) - } - - /// Returns number of transactions in this queue. - pub fn len(&self) -> usize { - self.ready.read().len() - } - - /// Returns sum of encoding lengths of all transactions in this queue. - pub fn bytes(&self) -> usize { - self.ready.read().values().fold(0, |acc, tx| acc + tx.transaction.transaction.bytes) - } + } + + removed.push(tx); + } + } + } + + /// Checks if the transaction is providing the same tags as other transactions. + /// + /// In case that's true it determines if the priority of transactions that + /// we are about to replace is lower than the priority of the replacement transaction. + /// We remove/replace old transactions in case they have lower priority. + /// + /// In case replacement is successful returns a list of removed transactions + /// and a list of hashes that are still in pool and gets unlocked by the new transaction. + fn replace_previous( + &mut self, + tx: &Transaction, + ) -> error::Result<(Vec>>, Vec)> { + let (to_remove, unlocks) = { + // check if we are replacing a transaction + let replace_hashes = tx + .provides + .iter() + .filter_map(|tag| self.provided_tags.get(tag)) + .collect::>(); + + // early exit if we are not replacing anything. + if replace_hashes.is_empty() { + return Ok((vec![], vec![])); + } + + // now check if collective priority is lower than the replacement transaction. + let old_priority = { + let ready = self.ready.read(); + replace_hashes + .iter() + .filter_map(|hash| ready.get(hash)) + .fold(0u64, |total, tx| { + total.saturating_add(tx.transaction.transaction.priority) + }) + }; + + // bail - the transaction has too low priority to replace the old ones + if old_priority >= tx.priority { + return Err(error::Error::TooLowPriority { + old: old_priority, + new: tx.priority, + }); + } + + // construct a list of unlocked transactions + let unlocks = { + let ready = self.ready.read(); + replace_hashes + .iter() + .filter_map(|hash| ready.get(hash)) + .fold(vec![], |mut list, tx| { + list.extend(tx.unlocks.iter().cloned()); + list + }) + }; + + ( + replace_hashes.into_iter().cloned().collect::>(), + unlocks, + ) + }; + + let new_provides = tx.provides.iter().cloned().collect::>(); + let removed = self.remove_subtree_with_tag_filter(to_remove, Some(new_provides)); + + Ok((removed, unlocks)) + } + + /// Returns number of transactions in this queue. + pub fn len(&self) -> usize { + self.ready.read().len() + } + + /// Returns sum of encoding lengths of all transactions in this queue. + pub fn bytes(&self) -> usize { + self.ready + .read() + .values() + .fold(0, |acc, tx| acc + tx.transaction.transaction.bytes) + } } /// Iterator of ready transactions ordered by priority. pub struct BestIterator { - all: Arc>>>, - awaiting: HashMap)>, - best: BTreeSet>, + all: Arc>>>, + awaiting: HashMap)>, + best: BTreeSet>, } impl BestIterator { - /// Depending on number of satisfied requirements insert given ref - /// either to awaiting set or to best set. - fn best_or_awaiting(&mut self, satisfied: usize, tx_ref: TransactionRef) { - if satisfied >= tx_ref.transaction.requires.len() { - // If we have satisfied all deps insert to best - self.best.insert(tx_ref); - - } else { - // otherwise we're still awaiting for some deps - self.awaiting.insert(tx_ref.transaction.hash.clone(), (satisfied, tx_ref)); - } - } + /// Depending on number of satisfied requirements insert given ref + /// either to awaiting set or to best set. + fn best_or_awaiting(&mut self, satisfied: usize, tx_ref: TransactionRef) { + if satisfied >= tx_ref.transaction.requires.len() { + // If we have satisfied all deps insert to best + self.best.insert(tx_ref); + } else { + // otherwise we're still awaiting for some deps + self.awaiting + .insert(tx_ref.transaction.hash.clone(), (satisfied, tx_ref)); + } + } } impl Iterator for BestIterator { - type Item = Arc>; - - fn next(&mut self) -> Option { - loop { - let best = self.best.iter().next_back()?.clone(); - let best = self.best.take(&best)?; - - let next = self.all.read().get(&best.transaction.hash).cloned(); - let ready = match next { - Some(ready) => ready, - // The transaction is not in all, maybe it was removed in the meantime? - None => continue, - }; - - // Insert transactions that just got unlocked. - for hash in &ready.unlocks { - // first check local awaiting transactions - let res = if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) { - satisfied += 1; - Some((satisfied, tx_ref)) - // then get from the pool - } else if let Some(next) = self.all.read().get(hash) { - Some((next.requires_offset + 1, next.transaction.clone())) - } else { - None - }; - - if let Some((satisfied, tx_ref)) = res { - self.best_or_awaiting(satisfied, tx_ref) - } - } - - return Some(best.transaction.clone()) - } - } + type Item = Arc>; + + fn next(&mut self) -> Option { + loop { + let best = self.best.iter().next_back()?.clone(); + let best = self.best.take(&best)?; + + let next = self.all.read().get(&best.transaction.hash).cloned(); + let ready = match next { + Some(ready) => ready, + // The transaction is not in all, maybe it was removed in the meantime? + None => continue, + }; + + // Insert transactions that just got unlocked. + for hash in &ready.unlocks { + // first check local awaiting transactions + let res = if let Some((mut satisfied, tx_ref)) = self.awaiting.remove(hash) { + satisfied += 1; + Some((satisfied, tx_ref)) + // then get from the pool + } else if let Some(next) = self.all.read().get(hash) { + Some((next.requires_offset + 1, next.transaction.clone())) + } else { + None + }; + + if let Some((satisfied, tx_ref)) = res { + self.best_or_awaiting(satisfied, tx_ref) + } + } + + return Some(best.transaction.clone()); + } + } } // See: https://github.com/rust-lang/rust/issues/40062 fn remove_item(vec: &mut Vec, item: &T) { - if let Some(idx) = vec.iter().position(|i| i == item) { - vec.swap_remove(idx); - } + if let Some(idx) = vec.iter().position(|i| i == item) { + vec.swap_remove(idx); + } } #[cfg(test)] mod tests { - use super::*; - use sp_runtime::transaction_validity::TransactionSource as Source; - - fn tx(id: u8) -> Transaction> { - Transaction { - data: vec![id], - bytes: 1, - hash: id as u64, - priority: 1, - valid_till: 2, - requires: vec![vec![1], vec![2]], - provides: vec![vec![3], vec![4]], - propagate: true, - source: Source::External, - } - } - - fn import( - ready: &mut ReadyTransactions, - tx: Transaction - ) -> error::Result>>> { - let x = WaitingTransaction::new(tx, ready.provided_tags(), &[]); - ready.import(x) - } - - #[test] - fn should_replace_transaction_that_provides_the_same_tag() { - // given - let mut ready = ReadyTransactions::default(); - let mut tx1 = tx(1); - tx1.requires.clear(); - let mut tx2 = tx(2); - tx2.requires.clear(); - tx2.provides = vec![vec![3]]; - let mut tx3 = tx(3); - tx3.requires.clear(); - tx3.provides = vec![vec![4]]; - - // when - import(&mut ready, tx2).unwrap(); - import(&mut ready, tx3).unwrap(); - assert_eq!(ready.get().count(), 2); - - // too low priority - import(&mut ready, tx1.clone()).unwrap_err(); - - tx1.priority = 10; - import(&mut ready, tx1).unwrap(); - - // then - assert_eq!(ready.get().count(), 1); - } - - #[test] - fn should_replace_multiple_transactions_correctly() { - // given - let mut ready = ReadyTransactions::default(); - let mut tx0 = tx(0); - tx0.requires = vec![]; - tx0.provides = vec![vec![0]]; - let mut tx1 = tx(1); - tx1.requires = vec![]; - tx1.provides = vec![vec![1]]; - let mut tx2 = tx(2); - tx2.requires = vec![vec![0], vec![1]]; - tx2.provides = vec![vec![2], vec![3]]; - let mut tx3 = tx(3); - tx3.requires = vec![vec![2]]; - tx3.provides = vec![vec![4]]; - let mut tx4 = tx(4); - tx4.requires = vec![vec![3]]; - tx4.provides = vec![vec![5]]; - // replacement - let mut tx2_2 = tx(5); - tx2_2.requires = vec![vec![0], vec![1]]; - tx2_2.provides = vec![vec![2]]; - tx2_2.priority = 10; - - for tx in vec![tx0, tx1, tx2, tx3, tx4] { - import(&mut ready, tx).unwrap(); - } - assert_eq!(ready.get().count(), 5); - - // when - import(&mut ready, tx2_2).unwrap(); - - // then - assert_eq!(ready.get().count(), 3); - } - - #[test] - fn should_return_best_transactions_in_correct_order() { - // given - let mut ready = ReadyTransactions::default(); - let mut tx1 = tx(1); - tx1.requires.clear(); - let mut tx2 = tx(2); - tx2.requires = tx1.provides.clone(); - tx2.provides = vec![vec![106]]; - let mut tx3 = tx(3); - tx3.requires = vec![tx1.provides[0].clone(), vec![106]]; - tx3.provides = vec![]; - let mut tx4 = tx(4); - tx4.requires = vec![tx1.provides[0].clone()]; - tx4.provides = vec![]; - let tx5 = Transaction { - data: vec![5], - bytes: 1, - hash: 5, - priority: 1, - valid_till: u64::max_value(), // use the max_value() here for testing. - requires: vec![tx1.provides[0].clone()], - provides: vec![], - propagate: true, - source: Source::External, - }; - - // when - for tx in vec![tx1, tx2, tx3, tx4, tx5] { - import(&mut ready, tx).unwrap(); - } - - // then - assert_eq!(ready.best.len(), 1); - - let mut it = ready.get().map(|tx| tx.data[0]); - - assert_eq!(it.next(), Some(1)); - assert_eq!(it.next(), Some(2)); - assert_eq!(it.next(), Some(3)); - assert_eq!(it.next(), Some(4)); - assert_eq!(it.next(), Some(5)); - assert_eq!(it.next(), None); - } - - #[test] - fn can_report_heap_size() { - let mut ready = ReadyTransactions::default(); - let tx = Transaction { - data: vec![5], - bytes: 1, - hash: 5, - priority: 1, - valid_till: u64::max_value(), // use the max_value() here for testing. - requires: vec![], - provides: vec![], - propagate: true, - source: Source::External, - }; - import(&mut ready, tx).unwrap(); - - assert!(parity_util_mem::malloc_size(&ready) > 200); - } - - #[test] - fn should_order_refs() { - let mut id = 1; - let mut with_priority = |priority, longevity| { - id += 1; - let mut tx = tx(id); - tx.priority = priority; - tx.valid_till = longevity; - tx - }; - // higher priority = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(2, 3)), - insertion_id: 2, - }); - // lower validity = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 2)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 2, - }); - // lower insertion_id = better - assert!(TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 1, - } > TransactionRef { - transaction: Arc::new(with_priority(3, 3)), - insertion_id: 2, - }); - } + use super::*; + use sp_runtime::transaction_validity::TransactionSource as Source; + + fn tx(id: u8) -> Transaction> { + Transaction { + data: vec![id], + bytes: 1, + hash: id as u64, + priority: 1, + valid_till: 2, + requires: vec![vec![1], vec![2]], + provides: vec![vec![3], vec![4]], + propagate: true, + source: Source::External, + } + } + + fn import( + ready: &mut ReadyTransactions, + tx: Transaction, + ) -> error::Result>>> { + let x = WaitingTransaction::new(tx, ready.provided_tags(), &[]); + ready.import(x) + } + + #[test] + fn should_replace_transaction_that_provides_the_same_tag() { + // given + let mut ready = ReadyTransactions::default(); + let mut tx1 = tx(1); + tx1.requires.clear(); + let mut tx2 = tx(2); + tx2.requires.clear(); + tx2.provides = vec![vec![3]]; + let mut tx3 = tx(3); + tx3.requires.clear(); + tx3.provides = vec![vec![4]]; + + // when + import(&mut ready, tx2).unwrap(); + import(&mut ready, tx3).unwrap(); + assert_eq!(ready.get().count(), 2); + + // too low priority + import(&mut ready, tx1.clone()).unwrap_err(); + + tx1.priority = 10; + import(&mut ready, tx1).unwrap(); + + // then + assert_eq!(ready.get().count(), 1); + } + + #[test] + fn should_replace_multiple_transactions_correctly() { + // given + let mut ready = ReadyTransactions::default(); + let mut tx0 = tx(0); + tx0.requires = vec![]; + tx0.provides = vec![vec![0]]; + let mut tx1 = tx(1); + tx1.requires = vec![]; + tx1.provides = vec![vec![1]]; + let mut tx2 = tx(2); + tx2.requires = vec![vec![0], vec![1]]; + tx2.provides = vec![vec![2], vec![3]]; + let mut tx3 = tx(3); + tx3.requires = vec![vec![2]]; + tx3.provides = vec![vec![4]]; + let mut tx4 = tx(4); + tx4.requires = vec![vec![3]]; + tx4.provides = vec![vec![5]]; + // replacement + let mut tx2_2 = tx(5); + tx2_2.requires = vec![vec![0], vec![1]]; + tx2_2.provides = vec![vec![2]]; + tx2_2.priority = 10; + + for tx in vec![tx0, tx1, tx2, tx3, tx4] { + import(&mut ready, tx).unwrap(); + } + assert_eq!(ready.get().count(), 5); + + // when + import(&mut ready, tx2_2).unwrap(); + + // then + assert_eq!(ready.get().count(), 3); + } + + #[test] + fn should_return_best_transactions_in_correct_order() { + // given + let mut ready = ReadyTransactions::default(); + let mut tx1 = tx(1); + tx1.requires.clear(); + let mut tx2 = tx(2); + tx2.requires = tx1.provides.clone(); + tx2.provides = vec![vec![106]]; + let mut tx3 = tx(3); + tx3.requires = vec![tx1.provides[0].clone(), vec![106]]; + tx3.provides = vec![]; + let mut tx4 = tx(4); + tx4.requires = vec![tx1.provides[0].clone()]; + tx4.provides = vec![]; + let tx5 = Transaction { + data: vec![5], + bytes: 1, + hash: 5, + priority: 1, + valid_till: u64::max_value(), // use the max_value() here for testing. + requires: vec![tx1.provides[0].clone()], + provides: vec![], + propagate: true, + source: Source::External, + }; + + // when + for tx in vec![tx1, tx2, tx3, tx4, tx5] { + import(&mut ready, tx).unwrap(); + } + + // then + assert_eq!(ready.best.len(), 1); + + let mut it = ready.get().map(|tx| tx.data[0]); + + assert_eq!(it.next(), Some(1)); + assert_eq!(it.next(), Some(2)); + assert_eq!(it.next(), Some(3)); + assert_eq!(it.next(), Some(4)); + assert_eq!(it.next(), Some(5)); + assert_eq!(it.next(), None); + } + + #[test] + fn can_report_heap_size() { + let mut ready = ReadyTransactions::default(); + let tx = Transaction { + data: vec![5], + bytes: 1, + hash: 5, + priority: 1, + valid_till: u64::max_value(), // use the max_value() here for testing. + requires: vec![], + provides: vec![], + propagate: true, + source: Source::External, + }; + import(&mut ready, tx).unwrap(); + + assert!(parity_util_mem::malloc_size(&ready) > 200); + } + + #[test] + fn should_order_refs() { + let mut id = 1; + let mut with_priority = |priority, longevity| { + id += 1; + let mut tx = tx(id); + tx.priority = priority; + tx.valid_till = longevity; + tx + }; + // higher priority = better + assert!( + TransactionRef { + transaction: Arc::new(with_priority(3, 3)), + insertion_id: 1, + } > TransactionRef { + transaction: Arc::new(with_priority(2, 3)), + insertion_id: 2, + } + ); + // lower validity = better + assert!( + TransactionRef { + transaction: Arc::new(with_priority(3, 2)), + insertion_id: 1, + } > TransactionRef { + transaction: Arc::new(with_priority(3, 3)), + insertion_id: 2, + } + ); + // lower insertion_id = better + assert!( + TransactionRef { + transaction: Arc::new(with_priority(3, 3)), + insertion_id: 1, + } > TransactionRef { + transaction: Arc::new(with_priority(3, 3)), + insertion_id: 2, + } + ); + } } diff --git a/client/transaction-pool/graph/src/rotator.rs b/client/transaction-pool/graph/src/rotator.rs index be96174d1d..1ff87beee6 100644 --- a/client/transaction-pool/graph/src/rotator.rs +++ b/client/transaction-pool/graph/src/rotator.rs @@ -19,13 +19,8 @@ //! Keeps only recent extrinsic and discard the ones kept for a significant amount of time. //! Discarded extrinsics are banned so that they don't get re-imported again. -use std::{ - collections::HashMap, - hash, - iter, - time::Duration, -}; use parking_lot::RwLock; +use std::{collections::HashMap, hash, iter, time::Duration}; use wasm_timer::Instant; use crate::base_pool::Transaction; @@ -38,178 +33,181 @@ const EXPECTED_SIZE: usize = 2048; /// Extrinsics that occupy the pool for too long are culled and temporarily banned from entering /// the pool again. pub struct PoolRotator { - /// How long the extrinsic is banned for. - ban_time: Duration, - /// Currently banned extrinsics. - banned_until: RwLock>, + /// How long the extrinsic is banned for. + ban_time: Duration, + /// Currently banned extrinsics. + banned_until: RwLock>, } impl Default for PoolRotator { - fn default() -> Self { - PoolRotator { - ban_time: Duration::from_secs(60 * 30), - banned_until: Default::default(), - } - } + fn default() -> Self { + PoolRotator { + ban_time: Duration::from_secs(60 * 30), + banned_until: Default::default(), + } + } } impl PoolRotator { - /// Returns `true` if extrinsic hash is currently banned. - pub fn is_banned(&self, hash: &Hash) -> bool { - self.banned_until.read().contains_key(hash) - } - - /// Bans given set of hashes. - pub fn ban(&self, now: &Instant, hashes: impl IntoIterator) { - let mut banned = self.banned_until.write(); - - for hash in hashes { - banned.insert(hash, *now + self.ban_time); - } - - if banned.len() > 2 * EXPECTED_SIZE { - while banned.len() > EXPECTED_SIZE { - if let Some(key) = banned.keys().next().cloned() { - banned.remove(&key); - } - } - } - } - - - /// Bans extrinsic if it's stale. - /// - /// Returns `true` if extrinsic is stale and got banned. - pub fn ban_if_stale(&self, now: &Instant, current_block: u64, xt: &Transaction) -> bool { - if xt.valid_till > current_block { - return false; - } - - self.ban(now, iter::once(xt.hash.clone())); - true - } - - /// Removes timed bans. - pub fn clear_timeouts(&self, now: &Instant) { - let mut banned = self.banned_until.write(); - - banned.retain(|_, &mut v| v >= *now); - } + /// Returns `true` if extrinsic hash is currently banned. + pub fn is_banned(&self, hash: &Hash) -> bool { + self.banned_until.read().contains_key(hash) + } + + /// Bans given set of hashes. + pub fn ban(&self, now: &Instant, hashes: impl IntoIterator) { + let mut banned = self.banned_until.write(); + + for hash in hashes { + banned.insert(hash, *now + self.ban_time); + } + + if banned.len() > 2 * EXPECTED_SIZE { + while banned.len() > EXPECTED_SIZE { + if let Some(key) = banned.keys().next().cloned() { + banned.remove(&key); + } + } + } + } + + /// Bans extrinsic if it's stale. + /// + /// Returns `true` if extrinsic is stale and got banned. + pub fn ban_if_stale( + &self, + now: &Instant, + current_block: u64, + xt: &Transaction, + ) -> bool { + if xt.valid_till > current_block { + return false; + } + + self.ban(now, iter::once(xt.hash.clone())); + true + } + + /// Removes timed bans. + pub fn clear_timeouts(&self, now: &Instant) { + let mut banned = self.banned_until.write(); + + banned.retain(|_, &mut v| v >= *now); + } } #[cfg(test)] mod tests { - use super::*; - use sp_runtime::transaction_validity::TransactionSource; - - type Hash = u64; - type Ex = (); - - fn rotator() -> PoolRotator { - PoolRotator { - ban_time: Duration::from_millis(10), - ..Default::default() - } - } - - fn tx() -> (Hash, Transaction) { - let hash = 5u64; - let tx = Transaction { - data: (), - bytes: 1, - hash: hash.clone(), - priority: 5, - valid_till: 1, - requires: vec![], - provides: vec![], - propagate: true, - source: TransactionSource::External, - }; - - (hash, tx) - } - - #[test] - fn should_not_ban_if_not_stale() { - // given - let (hash, tx) = tx(); - let rotator = rotator(); - assert!(!rotator.is_banned(&hash)); - let now = Instant::now(); - let past_block = 0; - - // when - assert!(!rotator.ban_if_stale(&now, past_block, &tx)); - - // then - assert!(!rotator.is_banned(&hash)); - } - - #[test] - fn should_ban_stale_extrinsic() { - // given - let (hash, tx) = tx(); - let rotator = rotator(); - assert!(!rotator.is_banned(&hash)); - - // when - assert!(rotator.ban_if_stale(&Instant::now(), 1, &tx)); - - // then - assert!(rotator.is_banned(&hash)); - } - - - #[test] - fn should_clear_banned() { - // given - let (hash, tx) = tx(); - let rotator = rotator(); - assert!(rotator.ban_if_stale(&Instant::now(), 1, &tx)); - assert!(rotator.is_banned(&hash)); - - // when - let future = Instant::now() + rotator.ban_time + rotator.ban_time; - rotator.clear_timeouts(&future); - - // then - assert!(!rotator.is_banned(&hash)); - } - - #[test] - fn should_garbage_collect() { - // given - fn tx_with(i: u64, valid_till: u64) -> Transaction { - let hash = i; - Transaction { - data: (), - bytes: 2, - hash, - priority: 5, - valid_till, - requires: vec![], - provides: vec![], - propagate: true, - source: TransactionSource::External, - } - } - - let rotator = rotator(); - - let now = Instant::now(); - let past_block = 0; - - // when - for i in 0..2*EXPECTED_SIZE { - let tx = tx_with(i as u64, past_block); - assert!(rotator.ban_if_stale(&now, past_block, &tx)); - } - assert_eq!(rotator.banned_until.read().len(), 2*EXPECTED_SIZE); - - // then - let tx = tx_with(2*EXPECTED_SIZE as u64, past_block); - // trigger a garbage collection - assert!(rotator.ban_if_stale(&now, past_block, &tx)); - assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); - } + use super::*; + use sp_runtime::transaction_validity::TransactionSource; + + type Hash = u64; + type Ex = (); + + fn rotator() -> PoolRotator { + PoolRotator { + ban_time: Duration::from_millis(10), + ..Default::default() + } + } + + fn tx() -> (Hash, Transaction) { + let hash = 5u64; + let tx = Transaction { + data: (), + bytes: 1, + hash: hash.clone(), + priority: 5, + valid_till: 1, + requires: vec![], + provides: vec![], + propagate: true, + source: TransactionSource::External, + }; + + (hash, tx) + } + + #[test] + fn should_not_ban_if_not_stale() { + // given + let (hash, tx) = tx(); + let rotator = rotator(); + assert!(!rotator.is_banned(&hash)); + let now = Instant::now(); + let past_block = 0; + + // when + assert!(!rotator.ban_if_stale(&now, past_block, &tx)); + + // then + assert!(!rotator.is_banned(&hash)); + } + + #[test] + fn should_ban_stale_extrinsic() { + // given + let (hash, tx) = tx(); + let rotator = rotator(); + assert!(!rotator.is_banned(&hash)); + + // when + assert!(rotator.ban_if_stale(&Instant::now(), 1, &tx)); + + // then + assert!(rotator.is_banned(&hash)); + } + + #[test] + fn should_clear_banned() { + // given + let (hash, tx) = tx(); + let rotator = rotator(); + assert!(rotator.ban_if_stale(&Instant::now(), 1, &tx)); + assert!(rotator.is_banned(&hash)); + + // when + let future = Instant::now() + rotator.ban_time + rotator.ban_time; + rotator.clear_timeouts(&future); + + // then + assert!(!rotator.is_banned(&hash)); + } + + #[test] + fn should_garbage_collect() { + // given + fn tx_with(i: u64, valid_till: u64) -> Transaction { + let hash = i; + Transaction { + data: (), + bytes: 2, + hash, + priority: 5, + valid_till, + requires: vec![], + provides: vec![], + propagate: true, + source: TransactionSource::External, + } + } + + let rotator = rotator(); + + let now = Instant::now(); + let past_block = 0; + + // when + for i in 0..2 * EXPECTED_SIZE { + let tx = tx_with(i as u64, past_block); + assert!(rotator.ban_if_stale(&now, past_block, &tx)); + } + assert_eq!(rotator.banned_until.read().len(), 2 * EXPECTED_SIZE); + + // then + let tx = tx_with(2 * EXPECTED_SIZE as u64, past_block); + // trigger a garbage collection + assert!(rotator.ban_if_stale(&now, past_block, &tx)); + assert_eq!(rotator.banned_until.read().len(), EXPECTED_SIZE); + } } diff --git a/client/transaction-pool/graph/src/validated_pool.rs b/client/transaction-pool/graph/src/validated_pool.rs index 2ff2acfe24..31ead83df2 100644 --- a/client/transaction-pool/graph/src/validated_pool.rs +++ b/client/transaction-pool/graph/src/validated_pool.rs @@ -15,590 +15,626 @@ // along with Substrate. If not, see . use std::{ - collections::{HashSet, HashMap}, - hash, - sync::Arc, + collections::{HashMap, HashSet}, + hash, + sync::Arc, }; -use crate::{base_pool as base, BlockHash}; use crate::listener::Listener; use crate::rotator::PoolRotator; use crate::watcher::Watcher; -use serde::Serialize; +use crate::{base_pool as base, BlockHash}; use log::{debug, warn}; +use serde::Serialize; use parking_lot::{Mutex, RwLock}; use sp_runtime::{ - generic::BlockId, - traits::{self, SaturatedConversion}, - transaction_validity::{TransactionTag as Tag, ValidTransaction, TransactionSource}, + generic::BlockId, + traits::{self, SaturatedConversion}, + transaction_validity::{TransactionSource, TransactionTag as Tag, ValidTransaction}, }; use sp_transaction_pool::{error, PoolStatus}; -use wasm_timer::Instant; use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; +use wasm_timer::Instant; use crate::base_pool::PruneStatus; -use crate::pool::{EventStream, Options, ChainApi, ExHash, ExtrinsicFor, TransactionFor}; +use crate::pool::{ChainApi, EventStream, ExHash, ExtrinsicFor, Options, TransactionFor}; /// Pre-validated transaction. Validated pool only accepts transactions wrapped in this enum. #[derive(Debug)] pub enum ValidatedTransaction { - /// Transaction that has been validated successfully. - Valid(base::Transaction), - /// Transaction that is invalid. - Invalid(Hash, Error), - /// Transaction which validity can't be determined. - /// - /// We're notifying watchers about failure, if 'unknown' transaction is submitted. - Unknown(Hash, Error), + /// Transaction that has been validated successfully. + Valid(base::Transaction), + /// Transaction that is invalid. + Invalid(Hash, Error), + /// Transaction which validity can't be determined. + /// + /// We're notifying watchers about failure, if 'unknown' transaction is submitted. + Unknown(Hash, Error), } impl ValidatedTransaction { - /// Consume validity result, transaction data and produce ValidTransaction. - pub fn valid_at( - at: u64, - hash: Hash, - source: TransactionSource, - data: Ex, - bytes: usize, - validity: ValidTransaction, - ) -> Self { - Self::Valid(base::Transaction { - data, - bytes, - hash, - source, - priority: validity.priority, - requires: validity.requires, - provides: validity.provides, - propagate: validity.propagate, - valid_till: at - .saturated_into::() - .saturating_add(validity.longevity), - }) - } + /// Consume validity result, transaction data and produce ValidTransaction. + pub fn valid_at( + at: u64, + hash: Hash, + source: TransactionSource, + data: Ex, + bytes: usize, + validity: ValidTransaction, + ) -> Self { + Self::Valid(base::Transaction { + data, + bytes, + hash, + source, + priority: validity.priority, + requires: validity.requires, + provides: validity.provides, + propagate: validity.propagate, + valid_till: at + .saturated_into::() + .saturating_add(validity.longevity), + }) + } } /// A type of validated transaction stored in the pool. -pub type ValidatedTransactionFor = ValidatedTransaction< - ExHash, - ExtrinsicFor, - ::Error, ->; +pub type ValidatedTransactionFor = + ValidatedTransaction, ExtrinsicFor, ::Error>; /// Pool that deals with validated transactions. pub struct ValidatedPool { - api: Arc, - options: Options, - listener: RwLock, B>>, - pool: RwLock, - ExtrinsicFor, - >>, - import_notification_sinks: Mutex>>>, - rotator: PoolRotator>, + api: Arc, + options: Options, + listener: RwLock, B>>, + pool: RwLock, ExtrinsicFor>>, + import_notification_sinks: Mutex>>>, + rotator: PoolRotator>, } #[cfg(not(target_os = "unknown"))] impl parity_util_mem::MallocSizeOf for ValidatedPool where - B::Hash: parity_util_mem::MallocSizeOf, - ExtrinsicFor: parity_util_mem::MallocSizeOf, + B::Hash: parity_util_mem::MallocSizeOf, + ExtrinsicFor: parity_util_mem::MallocSizeOf, { - fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - // other entries insignificant or non-primary references - self.pool.size_of(ops) - } + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + // other entries insignificant or non-primary references + self.pool.size_of(ops) + } } impl ValidatedPool { - /// Create a new transaction pool. - pub fn new(options: Options, api: Arc) -> Self { - let base_pool = base::BasePool::new(options.reject_future_transactions); - ValidatedPool { - options, - listener: Default::default(), - api, - pool: RwLock::new(base_pool), - import_notification_sinks: Default::default(), - rotator: Default::default(), - } - } - - /// Bans given set of hashes. - pub fn ban(&self, now: &Instant, hashes: impl IntoIterator>) { - self.rotator.ban(now, hashes) - } - - /// Returns true if transaction with given hash is currently banned from the pool. - pub fn is_banned(&self, hash: &ExHash) -> bool { - self.rotator.is_banned(hash) - } - - /// Imports a bunch of pre-validated transactions to the pool. - pub fn submit(&self, txs: T) -> Vec, B::Error>> where - T: IntoIterator> - { - let results = txs.into_iter() - .map(|validated_tx| self.submit_one(validated_tx)) - .collect::>(); - - // only enforce limits if there is at least one imported transaction - let removed = if results.iter().any(|res| res.is_ok()) { - self.enforce_limits() - } else { - Default::default() - }; - - results.into_iter().map(|res| match res { - Ok(ref hash) if removed.contains(hash) => Err(error::Error::ImmediatelyDropped.into()), - other => other, - }).collect() - } - - /// Submit single pre-validated transaction to the pool. - fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { - match tx { - ValidatedTransaction::Valid(tx) => { - let imported = self.pool.write().import(tx)?; - - if let base::Imported::Ready { ref hash, .. } = imported { - self.import_notification_sinks.lock() - .retain(|sink| sink.unbounded_send(hash.clone()).is_ok()); - } - - let mut listener = self.listener.write(); - fire_events(&mut *listener, &imported); - Ok(imported.hash().clone()) - }, - ValidatedTransaction::Invalid(hash, err) => { - self.rotator.ban(&Instant::now(), std::iter::once(hash)); - Err(err.into()) - }, - ValidatedTransaction::Unknown(hash, err) => { - self.listener.write().invalid(&hash, false); - Err(err.into()) - }, - } - } - - fn enforce_limits(&self) -> HashSet> { - let status = self.pool.read().status(); - let ready_limit = &self.options.ready; - let future_limit = &self.options.future; - - debug!(target: "txpool", "Pool Status: {:?}", status); - if ready_limit.is_exceeded(status.ready, status.ready_bytes) - || future_limit.is_exceeded(status.future, status.future_bytes) - { - debug!( - target: "txpool", - "Enforcing limits ({}/{}kB ready, {}/{}kB future", - ready_limit.count, ready_limit.total_bytes / 1024, - future_limit.count, future_limit.total_bytes / 1024, - ); - - // clean up the pool - let removed = { - let mut pool = self.pool.write(); - let removed = pool.enforce_limits(ready_limit, future_limit) - .into_iter().map(|x| x.hash.clone()).collect::>(); - // ban all removed transactions - self.rotator.ban(&Instant::now(), removed.iter().map(|x| x.clone())); - removed - }; - // run notifications - debug!(target: "txpool", "Enforcing limits: {} dropped", removed.len()); - let mut listener = self.listener.write(); - for h in &removed { - listener.dropped(h, None); - } - - removed - } else { - Default::default() - } - } - - /// Import a single extrinsic and starts to watch their progress in the pool. - pub fn submit_and_watch( - &self, - tx: ValidatedTransactionFor, - ) -> Result, BlockHash>, B::Error> { - match tx { - ValidatedTransaction::Valid(tx) => { - let hash = self.api.hash_and_length(&tx.data).0; - let watcher = self.listener.write().create_watcher(hash); - self.submit(std::iter::once(ValidatedTransaction::Valid(tx))) - .pop() - .expect("One extrinsic passed; one result returned; qed") - .map(|_| watcher) - }, - ValidatedTransaction::Invalid(hash, err) => { - self.rotator.ban(&Instant::now(), std::iter::once(hash)); - Err(err.into()) - }, - ValidatedTransaction::Unknown(_, err) => Err(err.into()), - } - } - - /// Resubmits revalidated transactions back to the pool. - /// - /// Removes and then submits passed transactions and all dependent transactions. - /// Transactions that are missing from the pool are not submitted. - pub fn resubmit(&self, mut updated_transactions: HashMap, ValidatedTransactionFor>) { - #[derive(Debug, Clone, Copy, PartialEq)] - enum Status { Future, Ready, Failed, Dropped }; - - let (mut initial_statuses, final_statuses) = { - let mut pool = self.pool.write(); - - // remove all passed transactions from the ready/future queues - // (this may remove additional transactions as well) - // - // for every transaction that has an entry in the `updated_transactions`, - // we store updated validation result in txs_to_resubmit - // for every transaction that has no entry in the `updated_transactions`, - // we store last validation result (i.e. the pool entry) in txs_to_resubmit - let mut initial_statuses = HashMap::new(); - let mut txs_to_resubmit = Vec::with_capacity(updated_transactions.len()); - while !updated_transactions.is_empty() { - let hash = updated_transactions.keys().next().cloned().expect("transactions is not empty; qed"); - - // note we are not considering tx with hash invalid here - we just want - // to remove it along with dependent transactions and `remove_subtree()` - // does exactly what we need - let removed = pool.remove_subtree(&[hash.clone()]); - for removed_tx in removed { - let removed_hash = removed_tx.hash.clone(); - let updated_transaction = updated_transactions.remove(&removed_hash); - let tx_to_resubmit = if let Some(updated_tx) = updated_transaction { - updated_tx - } else { - // in most cases we'll end up in successful `try_unwrap`, but if not - // we still need to reinsert transaction back to the pool => duplicate call - let transaction = match Arc::try_unwrap(removed_tx) { - Ok(transaction) => transaction, - Err(transaction) => transaction.duplicate(), - }; - ValidatedTransaction::Valid(transaction) - }; - - initial_statuses.insert(removed_hash.clone(), Status::Ready); - txs_to_resubmit.push((removed_hash, tx_to_resubmit)); - } - // make sure to remove the hash even if it's not present in the pool any more. - updated_transactions.remove(&hash); - } - - // if we're rejecting future transactions, then insertion order matters here: - // if tx1 depends on tx2, then if tx1 is inserted before tx2, then it goes - // to the future queue and gets rejected immediately - // => let's temporary stop rejection and clear future queue before return - pool.with_futures_enabled(|pool, reject_future_transactions| { - // now resubmit all removed transactions back to the pool - let mut final_statuses = HashMap::new(); - for (hash, tx_to_resubmit) in txs_to_resubmit { - match tx_to_resubmit { - ValidatedTransaction::Valid(tx) => match pool.import(tx) { - Ok(imported) => match imported { - base::Imported::Ready { promoted, failed, removed, .. } => { - final_statuses.insert(hash, Status::Ready); - for hash in promoted { - final_statuses.insert(hash, Status::Ready); - } - for hash in failed { - final_statuses.insert(hash, Status::Failed); - } - for tx in removed { - final_statuses.insert(tx.hash.clone(), Status::Dropped); - } - }, - base::Imported::Future { .. } => { - final_statuses.insert(hash, Status::Future); - }, - }, - Err(err) => { - // we do not want to fail if single transaction import has failed - // nor we do want to propagate this error, because it could tx unknown to caller - // => let's just notify listeners (and issue debug message) - warn!( - target: "txpool", - "[{:?}] Removing invalid transaction from update: {}", - hash, - err, - ); - final_statuses.insert(hash, Status::Failed); - }, - }, - ValidatedTransaction::Invalid(_, _) | ValidatedTransaction::Unknown(_, _) => { - final_statuses.insert(hash, Status::Failed); - }, - } - } - - // if the pool is configured to reject future transactions, let's clear the future - // queue, updating final statuses as required - if reject_future_transactions { - for future_tx in pool.clear_future() { - final_statuses.insert(future_tx.hash.clone(), Status::Dropped); - } - } - - (initial_statuses, final_statuses) - }) - }; - - // and now let's notify listeners about status changes - let mut listener = self.listener.write(); - for (hash, final_status) in final_statuses { - let initial_status = initial_statuses.remove(&hash); - if initial_status.is_none() || Some(final_status) != initial_status { - match final_status { - Status::Future => listener.future(&hash), - Status::Ready => listener.ready(&hash, None), - Status::Dropped => listener.dropped(&hash, None), - Status::Failed => listener.invalid(&hash, initial_status.is_some()), - } - } - } - } - - /// For each extrinsic, returns tags that it provides (if known), or None (if it is unknown). - pub fn extrinsics_tags(&self, hashes: &[ExHash]) -> Vec>> { - self.pool.read().by_hashes(&hashes) - .into_iter() - .map(|existing_in_pool| existing_in_pool - .map(|transaction| transaction.provides.iter().cloned().collect())) - .collect() - } - - /// Get ready transaction by hash - pub fn ready_by_hash(&self, hash: &ExHash) -> Option> { - self.pool.read().ready_by_hash(hash) - } - - /// Prunes ready transactions that provide given list of tags. - pub fn prune_tags( - &self, - tags: impl IntoIterator, - ) -> Result, ExtrinsicFor>, B::Error> { - // Perform tag-based pruning in the base pool - let status = self.pool.write().prune_tags(tags); - // Notify event listeners of all transactions - // that were promoted to `Ready` or were dropped. - { - let mut listener = self.listener.write(); - for promoted in &status.promoted { - fire_events(&mut *listener, promoted); - } - for f in &status.failed { - listener.dropped(f, None); - } - } - - Ok(status) - } - - /// Resubmit transactions that have been revalidated after prune_tags call. - pub fn resubmit_pruned( - &self, - at: &BlockId, - known_imported_hashes: impl IntoIterator> + Clone, - pruned_hashes: Vec>, - pruned_xts: Vec>, - ) -> Result<(), B::Error> { - debug_assert_eq!(pruned_hashes.len(), pruned_xts.len()); - - // Resubmit pruned transactions - let results = self.submit(pruned_xts); - - // Collect the hashes of transactions that now became invalid (meaning that they are successfully pruned). - let hashes = results - .into_iter() - .enumerate() - .filter_map(|(idx, r)| match r.map_err(error::IntoPoolError::into_pool_error) { - Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx].clone()), - _ => None, - }); - // Fire `pruned` notifications for collected hashes and make sure to include - // `known_imported_hashes` since they were just imported as part of the block. - let hashes = hashes.chain(known_imported_hashes.into_iter()); - self.fire_pruned(at, hashes)?; - - // perform regular cleanup of old transactions in the pool - // and update temporary bans. - self.clear_stale(at)?; - Ok(()) - } - - /// Fire notifications for pruned transactions. - pub fn fire_pruned( - &self, - at: &BlockId, - hashes: impl Iterator>, - ) -> Result<(), B::Error> { - let header_hash = self.api.block_id_to_hash(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())?; - let mut listener = self.listener.write(); - let mut set = HashSet::with_capacity(hashes.size_hint().0); - for h in hashes { - // `hashes` has possibly duplicate hashes. - // we'd like to send out the `InBlock` notification only once. - if !set.contains(&h) { - listener.pruned(header_hash, &h); - set.insert(h); - } - } - Ok(()) - } - - /// Removes stale transactions from the pool. - /// - /// Stale transactions are transaction beyond their longevity period. - /// Note this function does not remove transactions that are already included in the chain. - /// See `prune_tags` if you want this. - pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { - let block_number = self.api.block_id_to_number(at)? - .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())? - .saturated_into::(); - let now = Instant::now(); - let to_remove = { - self.ready() - .filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx)) - .map(|tx| tx.hash.clone()) - .collect::>() - }; - let futures_to_remove: Vec> = { - let p = self.pool.read(); - let mut hashes = Vec::new(); - for tx in p.futures() { - if self.rotator.ban_if_stale(&now, block_number, &tx) { - hashes.push(tx.hash.clone()); - } - } - hashes - }; - // removing old transactions - self.remove_invalid(&to_remove); - self.remove_invalid(&futures_to_remove); - // clear banned transactions timeouts - self.rotator.clear_timeouts(&now); - - Ok(()) - } - - /// Get rotator reference. - #[cfg(test)] - pub fn rotator(&self) -> &PoolRotator> { - &self.rotator - } - - /// Get api reference. - pub fn api(&self) -> &B { - &self.api - } - - /// Return an event stream of notifications for when transactions are imported to the pool. - /// - /// Consumers of this stream should use the `ready` method to actually get the - /// pending transactions in the right order. - pub fn import_notification_stream(&self) -> EventStream> { - let (sink, stream) = tracing_unbounded("mpsc_import_notifications"); - self.import_notification_sinks.lock().push(sink); - stream - } - - /// Invoked when extrinsics are broadcasted. - pub fn on_broadcasted(&self, propagated: HashMap, Vec>) { - let mut listener = self.listener.write(); - for (hash, peers) in propagated.into_iter() { - listener.broadcasted(&hash, peers); - } - } - - /// Remove a subtree of transactions from the pool and mark them invalid. - /// - /// The transactions passed as an argument will be additionally banned - /// to prevent them from entering the pool right away. - /// Note this is not the case for the dependent transactions - those may - /// still be valid so we want to be able to re-import them. - pub fn remove_invalid(&self, hashes: &[ExHash]) -> Vec> { - // early exit in case there is no invalid transactions. - if hashes.is_empty() { - return vec![]; - } - - debug!(target: "txpool", "Removing invalid transactions: {:?}", hashes); - - // temporarily ban invalid transactions - self.rotator.ban(&Instant::now(), hashes.iter().cloned()); - - let invalid = self.pool.write().remove_subtree(hashes); - - debug!(target: "txpool", "Removed invalid transactions: {:?}", invalid); - - let mut listener = self.listener.write(); - for tx in &invalid { - listener.invalid(&tx.hash, true); - } - - invalid - } - - /// Get an iterator for ready transactions ordered by priority - pub fn ready(&self) -> impl Iterator> + Send { - self.pool.read().ready() - } - - /// Returns pool status. - pub fn status(&self) -> PoolStatus { - self.pool.read().status() - } - - /// Notify all watchers that transactions in the block with hash have been finalized - pub async fn on_block_finalized(&self, block_hash: BlockHash) -> Result<(), B::Error> { - debug!(target: "txpool", "Attempting to notify watchers of finalization for {}", block_hash); - // fetch all extrinsic hashes - if let Some(txs) = self.api.block_body(&BlockId::Hash(block_hash.clone())).await? { - let tx_hashes = txs.into_iter() - .map(|tx| self.api.hash_and_length(&tx).0) - .collect::>(); - // notify the watcher that these extrinsics have been finalized - self.listener.write().finalized(block_hash, tx_hashes); - } - - Ok(()) - } - - /// Notify the listener of retracted blocks - pub fn on_block_retracted(&self, block_hash: BlockHash) { - self.listener.write().retracted(block_hash) - } + /// Create a new transaction pool. + pub fn new(options: Options, api: Arc) -> Self { + let base_pool = base::BasePool::new(options.reject_future_transactions); + ValidatedPool { + options, + listener: Default::default(), + api, + pool: RwLock::new(base_pool), + import_notification_sinks: Default::default(), + rotator: Default::default(), + } + } + + /// Bans given set of hashes. + pub fn ban(&self, now: &Instant, hashes: impl IntoIterator>) { + self.rotator.ban(now, hashes) + } + + /// Returns true if transaction with given hash is currently banned from the pool. + pub fn is_banned(&self, hash: &ExHash) -> bool { + self.rotator.is_banned(hash) + } + + /// Imports a bunch of pre-validated transactions to the pool. + pub fn submit(&self, txs: T) -> Vec, B::Error>> + where + T: IntoIterator>, + { + let results = txs + .into_iter() + .map(|validated_tx| self.submit_one(validated_tx)) + .collect::>(); + + // only enforce limits if there is at least one imported transaction + let removed = if results.iter().any(|res| res.is_ok()) { + self.enforce_limits() + } else { + Default::default() + }; + + results + .into_iter() + .map(|res| match res { + Ok(ref hash) if removed.contains(hash) => { + Err(error::Error::ImmediatelyDropped.into()) + } + other => other, + }) + .collect() + } + + /// Submit single pre-validated transaction to the pool. + fn submit_one(&self, tx: ValidatedTransactionFor) -> Result, B::Error> { + match tx { + ValidatedTransaction::Valid(tx) => { + let imported = self.pool.write().import(tx)?; + + if let base::Imported::Ready { ref hash, .. } = imported { + self.import_notification_sinks + .lock() + .retain(|sink| sink.unbounded_send(hash.clone()).is_ok()); + } + + let mut listener = self.listener.write(); + fire_events(&mut *listener, &imported); + Ok(imported.hash().clone()) + } + ValidatedTransaction::Invalid(hash, err) => { + self.rotator.ban(&Instant::now(), std::iter::once(hash)); + Err(err.into()) + } + ValidatedTransaction::Unknown(hash, err) => { + self.listener.write().invalid(&hash, false); + Err(err.into()) + } + } + } + + fn enforce_limits(&self) -> HashSet> { + let status = self.pool.read().status(); + let ready_limit = &self.options.ready; + let future_limit = &self.options.future; + + debug!(target: "txpool", "Pool Status: {:?}", status); + if ready_limit.is_exceeded(status.ready, status.ready_bytes) + || future_limit.is_exceeded(status.future, status.future_bytes) + { + debug!( + target: "txpool", + "Enforcing limits ({}/{}kB ready, {}/{}kB future", + ready_limit.count, ready_limit.total_bytes / 1024, + future_limit.count, future_limit.total_bytes / 1024, + ); + + // clean up the pool + let removed = { + let mut pool = self.pool.write(); + let removed = pool + .enforce_limits(ready_limit, future_limit) + .into_iter() + .map(|x| x.hash.clone()) + .collect::>(); + // ban all removed transactions + self.rotator + .ban(&Instant::now(), removed.iter().map(|x| x.clone())); + removed + }; + // run notifications + debug!(target: "txpool", "Enforcing limits: {} dropped", removed.len()); + let mut listener = self.listener.write(); + for h in &removed { + listener.dropped(h, None); + } + + removed + } else { + Default::default() + } + } + + /// Import a single extrinsic and starts to watch their progress in the pool. + pub fn submit_and_watch( + &self, + tx: ValidatedTransactionFor, + ) -> Result, BlockHash>, B::Error> { + match tx { + ValidatedTransaction::Valid(tx) => { + let hash = self.api.hash_and_length(&tx.data).0; + let watcher = self.listener.write().create_watcher(hash); + self.submit(std::iter::once(ValidatedTransaction::Valid(tx))) + .pop() + .expect("One extrinsic passed; one result returned; qed") + .map(|_| watcher) + } + ValidatedTransaction::Invalid(hash, err) => { + self.rotator.ban(&Instant::now(), std::iter::once(hash)); + Err(err.into()) + } + ValidatedTransaction::Unknown(_, err) => Err(err.into()), + } + } + + /// Resubmits revalidated transactions back to the pool. + /// + /// Removes and then submits passed transactions and all dependent transactions. + /// Transactions that are missing from the pool are not submitted. + pub fn resubmit( + &self, + mut updated_transactions: HashMap, ValidatedTransactionFor>, + ) { + #[derive(Debug, Clone, Copy, PartialEq)] + enum Status { + Future, + Ready, + Failed, + Dropped, + }; + + let (mut initial_statuses, final_statuses) = { + let mut pool = self.pool.write(); + + // remove all passed transactions from the ready/future queues + // (this may remove additional transactions as well) + // + // for every transaction that has an entry in the `updated_transactions`, + // we store updated validation result in txs_to_resubmit + // for every transaction that has no entry in the `updated_transactions`, + // we store last validation result (i.e. the pool entry) in txs_to_resubmit + let mut initial_statuses = HashMap::new(); + let mut txs_to_resubmit = Vec::with_capacity(updated_transactions.len()); + while !updated_transactions.is_empty() { + let hash = updated_transactions + .keys() + .next() + .cloned() + .expect("transactions is not empty; qed"); + + // note we are not considering tx with hash invalid here - we just want + // to remove it along with dependent transactions and `remove_subtree()` + // does exactly what we need + let removed = pool.remove_subtree(&[hash.clone()]); + for removed_tx in removed { + let removed_hash = removed_tx.hash.clone(); + let updated_transaction = updated_transactions.remove(&removed_hash); + let tx_to_resubmit = if let Some(updated_tx) = updated_transaction { + updated_tx + } else { + // in most cases we'll end up in successful `try_unwrap`, but if not + // we still need to reinsert transaction back to the pool => duplicate call + let transaction = match Arc::try_unwrap(removed_tx) { + Ok(transaction) => transaction, + Err(transaction) => transaction.duplicate(), + }; + ValidatedTransaction::Valid(transaction) + }; + + initial_statuses.insert(removed_hash.clone(), Status::Ready); + txs_to_resubmit.push((removed_hash, tx_to_resubmit)); + } + // make sure to remove the hash even if it's not present in the pool any more. + updated_transactions.remove(&hash); + } + + // if we're rejecting future transactions, then insertion order matters here: + // if tx1 depends on tx2, then if tx1 is inserted before tx2, then it goes + // to the future queue and gets rejected immediately + // => let's temporary stop rejection and clear future queue before return + pool.with_futures_enabled(|pool, reject_future_transactions| { + // now resubmit all removed transactions back to the pool + let mut final_statuses = HashMap::new(); + for (hash, tx_to_resubmit) in txs_to_resubmit { + match tx_to_resubmit { + ValidatedTransaction::Valid(tx) => match pool.import(tx) { + Ok(imported) => match imported { + base::Imported::Ready { + promoted, + failed, + removed, + .. + } => { + final_statuses.insert(hash, Status::Ready); + for hash in promoted { + final_statuses.insert(hash, Status::Ready); + } + for hash in failed { + final_statuses.insert(hash, Status::Failed); + } + for tx in removed { + final_statuses.insert(tx.hash.clone(), Status::Dropped); + } + } + base::Imported::Future { .. } => { + final_statuses.insert(hash, Status::Future); + } + }, + Err(err) => { + // we do not want to fail if single transaction import has failed + // nor we do want to propagate this error, because it could tx unknown to caller + // => let's just notify listeners (and issue debug message) + warn!( + target: "txpool", + "[{:?}] Removing invalid transaction from update: {}", + hash, + err, + ); + final_statuses.insert(hash, Status::Failed); + } + }, + ValidatedTransaction::Invalid(_, _) + | ValidatedTransaction::Unknown(_, _) => { + final_statuses.insert(hash, Status::Failed); + } + } + } + + // if the pool is configured to reject future transactions, let's clear the future + // queue, updating final statuses as required + if reject_future_transactions { + for future_tx in pool.clear_future() { + final_statuses.insert(future_tx.hash.clone(), Status::Dropped); + } + } + + (initial_statuses, final_statuses) + }) + }; + + // and now let's notify listeners about status changes + let mut listener = self.listener.write(); + for (hash, final_status) in final_statuses { + let initial_status = initial_statuses.remove(&hash); + if initial_status.is_none() || Some(final_status) != initial_status { + match final_status { + Status::Future => listener.future(&hash), + Status::Ready => listener.ready(&hash, None), + Status::Dropped => listener.dropped(&hash, None), + Status::Failed => listener.invalid(&hash, initial_status.is_some()), + } + } + } + } + + /// For each extrinsic, returns tags that it provides (if known), or None (if it is unknown). + pub fn extrinsics_tags(&self, hashes: &[ExHash]) -> Vec>> { + self.pool + .read() + .by_hashes(&hashes) + .into_iter() + .map(|existing_in_pool| { + existing_in_pool.map(|transaction| transaction.provides.iter().cloned().collect()) + }) + .collect() + } + + /// Get ready transaction by hash + pub fn ready_by_hash(&self, hash: &ExHash) -> Option> { + self.pool.read().ready_by_hash(hash) + } + + /// Prunes ready transactions that provide given list of tags. + pub fn prune_tags( + &self, + tags: impl IntoIterator, + ) -> Result, ExtrinsicFor>, B::Error> { + // Perform tag-based pruning in the base pool + let status = self.pool.write().prune_tags(tags); + // Notify event listeners of all transactions + // that were promoted to `Ready` or were dropped. + { + let mut listener = self.listener.write(); + for promoted in &status.promoted { + fire_events(&mut *listener, promoted); + } + for f in &status.failed { + listener.dropped(f, None); + } + } + + Ok(status) + } + + /// Resubmit transactions that have been revalidated after prune_tags call. + pub fn resubmit_pruned( + &self, + at: &BlockId, + known_imported_hashes: impl IntoIterator> + Clone, + pruned_hashes: Vec>, + pruned_xts: Vec>, + ) -> Result<(), B::Error> { + debug_assert_eq!(pruned_hashes.len(), pruned_xts.len()); + + // Resubmit pruned transactions + let results = self.submit(pruned_xts); + + // Collect the hashes of transactions that now became invalid (meaning that they are successfully pruned). + let hashes = results.into_iter().enumerate().filter_map(|(idx, r)| { + match r.map_err(error::IntoPoolError::into_pool_error) { + Err(Ok(error::Error::InvalidTransaction(_))) => Some(pruned_hashes[idx].clone()), + _ => None, + } + }); + // Fire `pruned` notifications for collected hashes and make sure to include + // `known_imported_hashes` since they were just imported as part of the block. + let hashes = hashes.chain(known_imported_hashes.into_iter()); + self.fire_pruned(at, hashes)?; + + // perform regular cleanup of old transactions in the pool + // and update temporary bans. + self.clear_stale(at)?; + Ok(()) + } + + /// Fire notifications for pruned transactions. + pub fn fire_pruned( + &self, + at: &BlockId, + hashes: impl Iterator>, + ) -> Result<(), B::Error> { + let header_hash = self + .api + .block_id_to_hash(at)? + .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())?; + let mut listener = self.listener.write(); + let mut set = HashSet::with_capacity(hashes.size_hint().0); + for h in hashes { + // `hashes` has possibly duplicate hashes. + // we'd like to send out the `InBlock` notification only once. + if !set.contains(&h) { + listener.pruned(header_hash, &h); + set.insert(h); + } + } + Ok(()) + } + + /// Removes stale transactions from the pool. + /// + /// Stale transactions are transaction beyond their longevity period. + /// Note this function does not remove transactions that are already included in the chain. + /// See `prune_tags` if you want this. + pub fn clear_stale(&self, at: &BlockId) -> Result<(), B::Error> { + let block_number = self + .api + .block_id_to_number(at)? + .ok_or_else(|| error::Error::InvalidBlockId(format!("{:?}", at)).into())? + .saturated_into::(); + let now = Instant::now(); + let to_remove = { + self.ready() + .filter(|tx| self.rotator.ban_if_stale(&now, block_number, &tx)) + .map(|tx| tx.hash.clone()) + .collect::>() + }; + let futures_to_remove: Vec> = { + let p = self.pool.read(); + let mut hashes = Vec::new(); + for tx in p.futures() { + if self.rotator.ban_if_stale(&now, block_number, &tx) { + hashes.push(tx.hash.clone()); + } + } + hashes + }; + // removing old transactions + self.remove_invalid(&to_remove); + self.remove_invalid(&futures_to_remove); + // clear banned transactions timeouts + self.rotator.clear_timeouts(&now); + + Ok(()) + } + + /// Get rotator reference. + #[cfg(test)] + pub fn rotator(&self) -> &PoolRotator> { + &self.rotator + } + + /// Get api reference. + pub fn api(&self) -> &B { + &self.api + } + + /// Return an event stream of notifications for when transactions are imported to the pool. + /// + /// Consumers of this stream should use the `ready` method to actually get the + /// pending transactions in the right order. + pub fn import_notification_stream(&self) -> EventStream> { + let (sink, stream) = tracing_unbounded("mpsc_import_notifications"); + self.import_notification_sinks.lock().push(sink); + stream + } + + /// Invoked when extrinsics are broadcasted. + pub fn on_broadcasted(&self, propagated: HashMap, Vec>) { + let mut listener = self.listener.write(); + for (hash, peers) in propagated.into_iter() { + listener.broadcasted(&hash, peers); + } + } + + /// Remove a subtree of transactions from the pool and mark them invalid. + /// + /// The transactions passed as an argument will be additionally banned + /// to prevent them from entering the pool right away. + /// Note this is not the case for the dependent transactions - those may + /// still be valid so we want to be able to re-import them. + pub fn remove_invalid(&self, hashes: &[ExHash]) -> Vec> { + // early exit in case there is no invalid transactions. + if hashes.is_empty() { + return vec![]; + } + + debug!(target: "txpool", "Removing invalid transactions: {:?}", hashes); + + // temporarily ban invalid transactions + self.rotator.ban(&Instant::now(), hashes.iter().cloned()); + + let invalid = self.pool.write().remove_subtree(hashes); + + debug!(target: "txpool", "Removed invalid transactions: {:?}", invalid); + + let mut listener = self.listener.write(); + for tx in &invalid { + listener.invalid(&tx.hash, true); + } + + invalid + } + + /// Get an iterator for ready transactions ordered by priority + pub fn ready(&self) -> impl Iterator> + Send { + self.pool.read().ready() + } + + /// Returns pool status. + pub fn status(&self) -> PoolStatus { + self.pool.read().status() + } + + /// Notify all watchers that transactions in the block with hash have been finalized + pub async fn on_block_finalized(&self, block_hash: BlockHash) -> Result<(), B::Error> { + debug!(target: "txpool", "Attempting to notify watchers of finalization for {}", block_hash); + // fetch all extrinsic hashes + if let Some(txs) = self + .api + .block_body(&BlockId::Hash(block_hash.clone())) + .await? + { + let tx_hashes = txs + .into_iter() + .map(|tx| self.api.hash_and_length(&tx).0) + .collect::>(); + // notify the watcher that these extrinsics have been finalized + self.listener.write().finalized(block_hash, tx_hashes); + } + + Ok(()) + } + + /// Notify the listener of retracted blocks + pub fn on_block_retracted(&self, block_hash: BlockHash) { + self.listener.write().retracted(block_hash) + } } -fn fire_events( - listener: &mut Listener, - imported: &base::Imported, -) where - H: hash::Hash + Eq + traits::Member + Serialize, - B: ChainApi, +fn fire_events(listener: &mut Listener, imported: &base::Imported) +where + H: hash::Hash + Eq + traits::Member + Serialize, + B: ChainApi, { - match *imported { - base::Imported::Ready { ref promoted, ref failed, ref removed, ref hash } => { - listener.ready(hash, None); - for f in failed { - listener.invalid(f, true); - } - for r in removed { - listener.dropped(&r.hash, Some(hash)); - } - for p in promoted { - listener.ready(p, None); - } - }, - base::Imported::Future { ref hash } => { - listener.future(hash) - }, - } + match *imported { + base::Imported::Ready { + ref promoted, + ref failed, + ref removed, + ref hash, + } => { + listener.ready(hash, None); + for f in failed { + listener.invalid(f, true); + } + for r in removed { + listener.dropped(&r.hash, Some(hash)); + } + for p in promoted { + listener.ready(p, None); + } + } + base::Imported::Future { ref hash } => listener.future(hash), + } } diff --git a/client/transaction-pool/graph/src/watcher.rs b/client/transaction-pool/graph/src/watcher.rs index d54cc2718b..c61ede8bcb 100644 --- a/client/transaction-pool/graph/src/watcher.rs +++ b/client/transaction-pool/graph/src/watcher.rs @@ -18,120 +18,118 @@ use futures::Stream; use sp_transaction_pool::TransactionStatus; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; /// Extrinsic watcher. /// /// Represents a stream of status updates for particular extrinsic. #[derive(Debug)] pub struct Watcher { - receiver: TracingUnboundedReceiver>, - hash: H, + receiver: TracingUnboundedReceiver>, + hash: H, } impl Watcher { - /// Returns the transaction hash. - pub fn hash(&self) -> &H { - &self.hash - } - - /// Pipe the notifications to given sink. - /// - /// Make sure to drive the future to completion. - pub fn into_stream(self) -> impl Stream> { - self.receiver - } + /// Returns the transaction hash. + pub fn hash(&self) -> &H { + &self.hash + } + + /// Pipe the notifications to given sink. + /// + /// Make sure to drive the future to completion. + pub fn into_stream(self) -> impl Stream> { + self.receiver + } } /// Sender part of the watcher. Exposed only for testing purposes. #[derive(Debug)] pub struct Sender { - receivers: Vec>>, - is_finalized: bool, + receivers: Vec>>, + is_finalized: bool, } impl Default for Sender { - fn default() -> Self { - Sender { - receivers: Default::default(), - is_finalized: false, - } - } + fn default() -> Self { + Sender { + receivers: Default::default(), + is_finalized: false, + } + } } impl Sender { - /// Add a new watcher to this sender object. - pub fn new_watcher(&mut self, hash: H) -> Watcher { - let (tx, receiver) = tracing_unbounded("mpsc_txpool_watcher"); - self.receivers.push(tx); - Watcher { - receiver, - hash, - } - } - - /// Transaction became ready. - pub fn ready(&mut self) { - self.send(TransactionStatus::Ready) - } - - /// Transaction was moved to future. - pub fn future(&mut self) { - self.send(TransactionStatus::Future) - } - - /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. - pub fn usurped(&mut self, hash: H) { - self.send(TransactionStatus::Usurped(hash)); - self.is_finalized = true; - } - - /// Extrinsic has been included in block with given hash. - pub fn in_block(&mut self, hash: BH) { - self.send(TransactionStatus::InBlock(hash)); - } - - /// Extrinsic has been finalized by a finality gadget. - pub fn finalized(&mut self, hash: BH) { - self.send(TransactionStatus::Finalized(hash)); - self.is_finalized = true; - } - - /// The block this extrinsic was included in has been retracted - pub fn finality_timeout(&mut self, hash: BH) { - self.send(TransactionStatus::FinalityTimeout(hash)); - self.is_finalized = true; - } - - /// The block this extrinsic was included in has been retracted - pub fn retracted(&mut self, hash: BH) { - self.send(TransactionStatus::Retracted(hash)); - } - - /// Extrinsic has been marked as invalid by the block builder. - pub fn invalid(&mut self) { - self.send(TransactionStatus::Invalid); - // we mark as finalized as there are no more notifications - self.is_finalized = true; - } - - /// Transaction has been dropped from the pool because of the limit. - pub fn dropped(&mut self) { - self.send(TransactionStatus::Dropped); - self.is_finalized = true; - } - - /// The extrinsic has been broadcast to the given peers. - pub fn broadcast(&mut self, peers: Vec) { - self.send(TransactionStatus::Broadcast(peers)) - } - - /// Returns true if the are no more listeners for this extrinsic or it was finalized. - pub fn is_done(&self) -> bool { - self.is_finalized || self.receivers.is_empty() - } - - fn send(&mut self, status: TransactionStatus) { - self.receivers.retain(|sender| sender.unbounded_send(status.clone()).is_ok()) - } + /// Add a new watcher to this sender object. + pub fn new_watcher(&mut self, hash: H) -> Watcher { + let (tx, receiver) = tracing_unbounded("mpsc_txpool_watcher"); + self.receivers.push(tx); + Watcher { receiver, hash } + } + + /// Transaction became ready. + pub fn ready(&mut self) { + self.send(TransactionStatus::Ready) + } + + /// Transaction was moved to future. + pub fn future(&mut self) { + self.send(TransactionStatus::Future) + } + + /// Some state change (perhaps another extrinsic was included) rendered this extrinsic invalid. + pub fn usurped(&mut self, hash: H) { + self.send(TransactionStatus::Usurped(hash)); + self.is_finalized = true; + } + + /// Extrinsic has been included in block with given hash. + pub fn in_block(&mut self, hash: BH) { + self.send(TransactionStatus::InBlock(hash)); + } + + /// Extrinsic has been finalized by a finality gadget. + pub fn finalized(&mut self, hash: BH) { + self.send(TransactionStatus::Finalized(hash)); + self.is_finalized = true; + } + + /// The block this extrinsic was included in has been retracted + pub fn finality_timeout(&mut self, hash: BH) { + self.send(TransactionStatus::FinalityTimeout(hash)); + self.is_finalized = true; + } + + /// The block this extrinsic was included in has been retracted + pub fn retracted(&mut self, hash: BH) { + self.send(TransactionStatus::Retracted(hash)); + } + + /// Extrinsic has been marked as invalid by the block builder. + pub fn invalid(&mut self) { + self.send(TransactionStatus::Invalid); + // we mark as finalized as there are no more notifications + self.is_finalized = true; + } + + /// Transaction has been dropped from the pool because of the limit. + pub fn dropped(&mut self) { + self.send(TransactionStatus::Dropped); + self.is_finalized = true; + } + + /// The extrinsic has been broadcast to the given peers. + pub fn broadcast(&mut self, peers: Vec) { + self.send(TransactionStatus::Broadcast(peers)) + } + + /// Returns true if the are no more listeners for this extrinsic or it was finalized. + pub fn is_done(&self) -> bool { + self.is_finalized || self.receivers.is_empty() + } + + fn send(&mut self, status: TransactionStatus) { + self.receivers + .retain(|sender| sender.unbounded_send(status.clone()).is_ok()) + } } diff --git a/client/transaction-pool/src/api.rs b/client/transaction-pool/src/api.rs index 2e590ccad8..fbe0fb9011 100644 --- a/client/transaction-pool/src/api.rs +++ b/client/transaction-pool/src/api.rs @@ -16,239 +16,284 @@ //! Chain api required for the transaction pool. -use std::{marker::PhantomData, pin::Pin, sync::Arc}; use codec::{Decode, Encode}; use futures::{ - channel::oneshot, executor::{ThreadPool, ThreadPoolBuilder}, future::{Future, FutureExt, ready, Ready}, + channel::oneshot, + executor::{ThreadPool, ThreadPoolBuilder}, + future::{ready, Future, FutureExt, Ready}, }; +use std::{marker::PhantomData, pin::Pin, sync::Arc}; use sc_client_api::{ - blockchain::HeaderBackend, - light::{Fetcher, RemoteCallRequest, RemoteBodyRequest}, - BlockBackend, + blockchain::HeaderBackend, + light::{Fetcher, RemoteBodyRequest, RemoteCallRequest}, + BlockBackend, }; +use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_runtime::{ - generic::BlockId, traits::{self, Block as BlockT, BlockIdTo, Header as HeaderT, Hash as HashT}, - transaction_validity::{TransactionValidity, TransactionSource}, + generic::BlockId, + traits::{self, Block as BlockT, BlockIdTo, Hash as HashT, Header as HeaderT}, + transaction_validity::{TransactionSource, TransactionValidity}, }; use sp_transaction_pool::runtime_api::TaggedTransactionQueue; -use sp_api::{ProvideRuntimeApi, ApiExt}; use crate::error::{self, Error}; /// The transaction pool logic for full client. pub struct FullChainApi { - client: Arc, - pool: ThreadPool, - _marker: PhantomData, + client: Arc, + pool: ThreadPool, + _marker: PhantomData, } -impl FullChainApi where - Block: BlockT, - Client: ProvideRuntimeApi + BlockIdTo, +impl FullChainApi +where + Block: BlockT, + Client: ProvideRuntimeApi + BlockIdTo, { - /// Create new transaction pool logic. - pub fn new(client: Arc) -> Self { - FullChainApi { - client, - pool: ThreadPoolBuilder::new() - .pool_size(2) - .name_prefix("txpool-verifier") - .create() - .expect("Failed to spawn verifier threads, that are critical for node operation."), - _marker: Default::default() - } - } + /// Create new transaction pool logic. + pub fn new(client: Arc) -> Self { + FullChainApi { + client, + pool: ThreadPoolBuilder::new() + .pool_size(2) + .name_prefix("txpool-verifier") + .create() + .expect("Failed to spawn verifier threads, that are critical for node operation."), + _marker: Default::default(), + } + } } -impl sc_transaction_graph::ChainApi for FullChainApi where - Block: BlockT, - Client: ProvideRuntimeApi + BlockBackend + BlockIdTo, - Client: Send + Sync + 'static, - Client::Api: TaggedTransactionQueue, - sp_api::ApiErrorFor: Send, +impl sc_transaction_graph::ChainApi for FullChainApi +where + Block: BlockT, + Client: ProvideRuntimeApi + BlockBackend + BlockIdTo, + Client: Send + Sync + 'static, + Client::Api: TaggedTransactionQueue, + sp_api::ApiErrorFor: Send, { - type Block = Block; - type Hash = Block::Hash; - type Error = error::Error; - type ValidationFuture = Pin> + Send>>; - type BodyFuture = Ready::Extrinsic>>>>; - - fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - ready(self.client.block_body(&id).map_err(|e| error::Error::from(e))) - } - - fn validate_transaction( - &self, - at: &BlockId, - source: TransactionSource, - uxt: sc_transaction_graph::ExtrinsicFor, - ) -> Self::ValidationFuture { - let (tx, rx) = oneshot::channel(); - let client = self.client.clone(); - let at = at.clone(); - - self.pool.spawn_ok(futures_diagnose::diagnose("validate-transaction", async move { - let span = tracing::span!(tracing::Level::DEBUG, "validate_transaction::check_version"); - let guard = span.enter(); - let runtime_api = client.runtime_api(); - let has_v2 = runtime_api - .has_api_with::, _>( - &at, |v| v >= 2, - ) - .unwrap_or_default(); - std::mem::drop(guard); - let span = tracing::span!(tracing::Level::DEBUG, "validate_transaction"); - let _guard = span.enter(); - let res = if has_v2 { - runtime_api.validate_transaction(&at, source, uxt) - } else { - #[allow(deprecated)] // old validate_transaction - runtime_api.validate_transaction_before_version_2(&at, uxt) - }; - let res = res.map_err(|e| Error::RuntimeApi(format!("{:?}", e))); - if let Err(e) = tx.send(res) { - log::warn!("Unable to send a validate transaction result: {:?}", e); - } - })); - - Box::pin(async move { - match rx.await { - Ok(r) => r, - Err(_) => Err(Error::RuntimeApi("Validation was canceled".into())), - } - }) - } - - fn block_id_to_number( - &self, - at: &BlockId, - ) -> error::Result>> { - self.client.to_number(at).map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) - } - - fn block_id_to_hash( - &self, - at: &BlockId, - ) -> error::Result>> { - self.client.to_hash(at).map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) - } - - fn hash_and_length(&self, ex: &sc_transaction_graph::ExtrinsicFor) -> (Self::Hash, usize) { - ex.using_encoded(|x| { - ( as traits::Hash>::hash(x), x.len()) - }) - } + type Block = Block; + type Hash = Block::Hash; + type Error = error::Error; + type ValidationFuture = + Pin> + Send>>; + type BodyFuture = Ready::Extrinsic>>>>; + + fn block_body(&self, id: &BlockId) -> Self::BodyFuture { + ready( + self.client + .block_body(&id) + .map_err(|e| error::Error::from(e)), + ) + } + + fn validate_transaction( + &self, + at: &BlockId, + source: TransactionSource, + uxt: sc_transaction_graph::ExtrinsicFor, + ) -> Self::ValidationFuture { + let (tx, rx) = oneshot::channel(); + let client = self.client.clone(); + let at = at.clone(); + + self.pool.spawn_ok(futures_diagnose::diagnose( + "validate-transaction", + async move { + let span = + tracing::span!(tracing::Level::DEBUG, "validate_transaction::check_version"); + let guard = span.enter(); + let runtime_api = client.runtime_api(); + let has_v2 = runtime_api + .has_api_with::, _>( + &at, + |v| v >= 2, + ) + .unwrap_or_default(); + std::mem::drop(guard); + let span = tracing::span!(tracing::Level::DEBUG, "validate_transaction"); + let _guard = span.enter(); + let res = if has_v2 { + runtime_api.validate_transaction(&at, source, uxt) + } else { + #[allow(deprecated)] // old validate_transaction + runtime_api.validate_transaction_before_version_2(&at, uxt) + }; + let res = res.map_err(|e| Error::RuntimeApi(format!("{:?}", e))); + if let Err(e) = tx.send(res) { + log::warn!("Unable to send a validate transaction result: {:?}", e); + } + }, + )); + + Box::pin(async move { + match rx.await { + Ok(r) => r, + Err(_) => Err(Error::RuntimeApi("Validation was canceled".into())), + } + }) + } + + fn block_id_to_number( + &self, + at: &BlockId, + ) -> error::Result>> { + self.client + .to_number(at) + .map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) + } + + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> error::Result>> { + self.client + .to_hash(at) + .map_err(|e| Error::BlockIdConversion(format!("{:?}", e))) + } + + fn hash_and_length( + &self, + ex: &sc_transaction_graph::ExtrinsicFor, + ) -> (Self::Hash, usize) { + ex.using_encoded(|x| ( as traits::Hash>::hash(x), x.len())) + } } /// The transaction pool logic for light client. pub struct LightChainApi { - client: Arc, - fetcher: Arc, - _phantom: PhantomData, + client: Arc, + fetcher: Arc, + _phantom: PhantomData, } -impl LightChainApi where - Block: BlockT, - Client: HeaderBackend, - F: Fetcher, +impl LightChainApi +where + Block: BlockT, + Client: HeaderBackend, + F: Fetcher, { - /// Create new transaction pool logic. - pub fn new(client: Arc, fetcher: Arc) -> Self { - LightChainApi { - client, - fetcher, - _phantom: Default::default(), - } - } + /// Create new transaction pool logic. + pub fn new(client: Arc, fetcher: Arc) -> Self { + LightChainApi { + client, + fetcher, + _phantom: Default::default(), + } + } } -impl sc_transaction_graph::ChainApi for LightChainApi where - Block: BlockT, - Client: HeaderBackend + 'static, - F: Fetcher + 'static, +impl sc_transaction_graph::ChainApi for LightChainApi +where + Block: BlockT, + Client: HeaderBackend + 'static, + F: Fetcher + 'static, { - type Block = Block; - type Hash = Block::Hash; - type Error = error::Error; - type ValidationFuture = Box> + Send + Unpin>; - type BodyFuture = Pin::Extrinsic>>>> + Send>>; - - fn validate_transaction( - &self, - at: &BlockId, - source: TransactionSource, - uxt: sc_transaction_graph::ExtrinsicFor, - ) -> Self::ValidationFuture { - let header_hash = self.client.expect_block_hash_from_id(at); - let header_and_hash = header_hash - .and_then(|header_hash| self.client.expect_header(BlockId::Hash(header_hash)) - .map(|header| (header_hash, header))); - let (block, header) = match header_and_hash { - Ok((header_hash, header)) => (header_hash, header), - Err(err) => return Box::new(ready(Err(err.into()))), - }; - let remote_validation_request = self.fetcher.remote_call(RemoteCallRequest { - block, - header, - method: "TaggedTransactionQueue_validate_transaction".into(), - call_data: (source, uxt).encode(), - retry_count: None, - }); - let remote_validation_request = remote_validation_request.then(move |result| { - let result: error::Result = result - .map_err(Into::into) - .and_then(|result| Decode::decode(&mut &result[..]) - .map_err(|e| Error::RuntimeApi( - format!("Error decoding tx validation result: {:?}", e) - )) - ); - ready(result) - }); - - Box::new(remote_validation_request) - } - - fn block_id_to_number(&self, at: &BlockId) -> error::Result>> { - Ok(self.client.block_number_from_id(at)?) - } - - fn block_id_to_hash(&self, at: &BlockId) -> error::Result>> { - Ok(self.client.block_hash_from_id(at)?) - } - - fn hash_and_length(&self, ex: &sc_transaction_graph::ExtrinsicFor) -> (Self::Hash, usize) { - ex.using_encoded(|x| { - (<::Hashing as HashT>::hash(x), x.len()) - }) - } - - fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - let header = self.client.header(*id) - .and_then(|h| h.ok_or(sp_blockchain::Error::UnknownBlock(format!("{}", id)))); - let header = match header { - Ok(header) => header, - Err(err) => { - log::warn!(target: "txpool", "Failed to query header: {:?}", err); - return Box::pin(ready(Ok(None))); - } - }; - - let fetcher = self.fetcher.clone(); - async move { - let transactions = fetcher.remote_body({ - RemoteBodyRequest { - header, - retry_count: None, - } - }) - .await - .unwrap_or_else(|e| { - log::warn!(target: "txpool", "Failed to fetch block body: {:?}", e); - Vec::new() - }); - - Ok(Some(transactions)) - }.boxed() - } + type Block = Block; + type Hash = Block::Hash; + type Error = error::Error; + type ValidationFuture = + Box> + Send + Unpin>; + type BodyFuture = Pin< + Box< + dyn Future::Extrinsic>>>> + + Send, + >, + >; + + fn validate_transaction( + &self, + at: &BlockId, + source: TransactionSource, + uxt: sc_transaction_graph::ExtrinsicFor, + ) -> Self::ValidationFuture { + let header_hash = self.client.expect_block_hash_from_id(at); + let header_and_hash = header_hash.and_then(|header_hash| { + self.client + .expect_header(BlockId::Hash(header_hash)) + .map(|header| (header_hash, header)) + }); + let (block, header) = match header_and_hash { + Ok((header_hash, header)) => (header_hash, header), + Err(err) => return Box::new(ready(Err(err.into()))), + }; + let remote_validation_request = self.fetcher.remote_call(RemoteCallRequest { + block, + header, + method: "TaggedTransactionQueue_validate_transaction".into(), + call_data: (source, uxt).encode(), + retry_count: None, + }); + let remote_validation_request = remote_validation_request.then(move |result| { + let result: error::Result = + result.map_err(Into::into).and_then(|result| { + Decode::decode(&mut &result[..]).map_err(|e| { + Error::RuntimeApi(format!("Error decoding tx validation result: {:?}", e)) + }) + }); + ready(result) + }); + + Box::new(remote_validation_request) + } + + fn block_id_to_number( + &self, + at: &BlockId, + ) -> error::Result>> { + Ok(self.client.block_number_from_id(at)?) + } + + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> error::Result>> { + Ok(self.client.block_hash_from_id(at)?) + } + + fn hash_and_length( + &self, + ex: &sc_transaction_graph::ExtrinsicFor, + ) -> (Self::Hash, usize) { + ex.using_encoded(|x| { + ( + <::Hashing as HashT>::hash(x), + x.len(), + ) + }) + } + + fn block_body(&self, id: &BlockId) -> Self::BodyFuture { + let header = self + .client + .header(*id) + .and_then(|h| h.ok_or(sp_blockchain::Error::UnknownBlock(format!("{}", id)))); + let header = match header { + Ok(header) => header, + Err(err) => { + log::warn!(target: "txpool", "Failed to query header: {:?}", err); + return Box::pin(ready(Ok(None))); + } + }; + + let fetcher = self.fetcher.clone(); + async move { + let transactions = fetcher + .remote_body({ + RemoteBodyRequest { + header, + retry_count: None, + } + }) + .await + .unwrap_or_else(|e| { + log::warn!(target: "txpool", "Failed to fetch block body: {:?}", e); + Vec::new() + }); + + Ok(Some(transactions)) + } + .boxed() + } } diff --git a/client/transaction-pool/src/error.rs b/client/transaction-pool/src/error.rs index fa48b387c4..3b36e08af5 100644 --- a/client/transaction-pool/src/error.rs +++ b/client/transaction-pool/src/error.rs @@ -24,34 +24,34 @@ pub type Result = std::result::Result; /// Transaction pool error type. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Pool error. - Pool(TxPoolError), - /// Blockchain error. - Blockchain(sp_blockchain::Error), - /// Error while converting a `BlockId`. - #[from(ignore)] - BlockIdConversion(String), - /// Error while calling the runtime api. - #[from(ignore)] - RuntimeApi(String), + /// Pool error. + Pool(TxPoolError), + /// Blockchain error. + Blockchain(sp_blockchain::Error), + /// Error while converting a `BlockId`. + #[from(ignore)] + BlockIdConversion(String), + /// Error while calling the runtime api. + #[from(ignore)] + RuntimeApi(String), } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Pool(ref err) => Some(err), - Error::Blockchain(ref err) => Some(err), - Error::BlockIdConversion(_) => None, - Error::RuntimeApi(_) => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Pool(ref err) => Some(err), + Error::Blockchain(ref err) => Some(err), + Error::BlockIdConversion(_) => None, + Error::RuntimeApi(_) => None, + } + } } impl sp_transaction_pool::error::IntoPoolError for Error { - fn into_pool_error(self) -> std::result::Result { - match self { - Error::Pool(e) => Ok(e), - e => Err(e), - } - } + fn into_pool_error(self) -> std::result::Result { + match self { + Error::Pool(e) => Ok(e), + e => Err(e), + } + } } diff --git a/client/transaction-pool/src/lib.rs b/client/transaction-pool/src/lib.rs index e095191c57..f36ee55513 100644 --- a/client/transaction-pool/src/lib.rs +++ b/client/transaction-pool/src/lib.rs @@ -16,511 +16,548 @@ //! Substrate transaction pool implementation. -#![recursion_limit="256"] +#![recursion_limit = "256"] #![warn(missing_docs)] #![warn(unused_extern_crates)] mod api; -mod revalidation; mod metrics; +mod revalidation; pub mod error; #[cfg(any(feature = "test-helpers", test))] pub mod testing; -pub use sc_transaction_graph as txpool; pub use crate::api::{FullChainApi, LightChainApi}; +pub use sc_transaction_graph as txpool; -use std::{collections::HashMap, sync::Arc, pin::Pin}; -use futures::{prelude::*, future::ready, channel::oneshot}; +use futures::{channel::oneshot, future::ready, prelude::*}; use parking_lot::Mutex; +use std::{collections::HashMap, pin::Pin, sync::Arc}; use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, NumberFor, AtLeast32Bit, Extrinsic, Zero}, + generic::BlockId, + traits::{AtLeast32Bit, Block as BlockT, Extrinsic, NumberFor, Zero}, }; use sp_transaction_pool::{ - TransactionPool, PoolStatus, ImportNotificationStream, TxHash, TransactionFor, - TransactionStatusStreamFor, MaintainedTransactionPool, PoolFuture, ChainEvent, - TransactionSource, + ChainEvent, ImportNotificationStream, MaintainedTransactionPool, PoolFuture, PoolStatus, + TransactionFor, TransactionPool, TransactionSource, TransactionStatusStreamFor, TxHash, }; use wasm_timer::Instant; -use prometheus_endpoint::Registry as PrometheusRegistry; use crate::metrics::MetricsLink as PrometheusMetrics; +use prometheus_endpoint::Registry as PrometheusRegistry; -type BoxedReadyIterator = Box>> + Send>; +type BoxedReadyIterator = + Box>> + Send>; -type ReadyIteratorFor = BoxedReadyIterator, sc_transaction_graph::ExtrinsicFor>; +type ReadyIteratorFor = BoxedReadyIterator< + sc_transaction_graph::ExHash, + sc_transaction_graph::ExtrinsicFor, +>; -type PolledIterator = Pin> + Send>>; +type PolledIterator = Pin> + Send>>; /// Basic implementation of transaction pool that can be customized by providing PoolApi. pub struct BasicPool - where - Block: BlockT, - PoolApi: sc_transaction_graph::ChainApi, +where + Block: BlockT, + PoolApi: sc_transaction_graph::ChainApi, { - pool: Arc>, - api: Arc, - revalidation_strategy: Arc>>>, - revalidation_queue: Arc>, - ready_poll: Arc, Block>>>, - metrics: PrometheusMetrics, + pool: Arc>, + api: Arc, + revalidation_strategy: Arc>>>, + revalidation_queue: Arc>, + ready_poll: Arc, Block>>>, + metrics: PrometheusMetrics, } struct ReadyPoll { - updated_at: NumberFor, - pollers: Vec<(NumberFor, oneshot::Sender)>, + updated_at: NumberFor, + pollers: Vec<(NumberFor, oneshot::Sender)>, } impl Default for ReadyPoll { - fn default() -> Self { - Self { - updated_at: NumberFor::::zero(), - pollers: Default::default(), - } - } + fn default() -> Self { + Self { + updated_at: NumberFor::::zero(), + pollers: Default::default(), + } + } } impl ReadyPoll { - fn trigger(&mut self, number: NumberFor, iterator_factory: impl Fn() -> T) { - self.updated_at = number; - - let mut idx = 0; - while idx < self.pollers.len() { - if self.pollers[idx].0 <= number { - let poller_sender = self.pollers.swap_remove(idx); - log::debug!(target: "txpool", "Sending ready signal at block {}", number); - let _ = poller_sender.1.send(iterator_factory()); - } else { - idx += 1; - } - } - } - - fn add(&mut self, number: NumberFor) -> oneshot::Receiver { - let (sender, receiver) = oneshot::channel(); - self.pollers.push((number, sender)); - receiver - } - - fn updated_at(&self) -> NumberFor { - self.updated_at - } + fn trigger(&mut self, number: NumberFor, iterator_factory: impl Fn() -> T) { + self.updated_at = number; + + let mut idx = 0; + while idx < self.pollers.len() { + if self.pollers[idx].0 <= number { + let poller_sender = self.pollers.swap_remove(idx); + log::debug!(target: "txpool", "Sending ready signal at block {}", number); + let _ = poller_sender.1.send(iterator_factory()); + } else { + idx += 1; + } + } + } + + fn add(&mut self, number: NumberFor) -> oneshot::Receiver { + let (sender, receiver) = oneshot::channel(); + self.pollers.push((number, sender)); + receiver + } + + fn updated_at(&self) -> NumberFor { + self.updated_at + } } #[cfg(not(target_os = "unknown"))] impl parity_util_mem::MallocSizeOf for BasicPool where - PoolApi: sc_transaction_graph::ChainApi, - PoolApi::Hash: parity_util_mem::MallocSizeOf, - Block: BlockT, + PoolApi: sc_transaction_graph::ChainApi, + PoolApi::Hash: parity_util_mem::MallocSizeOf, + Block: BlockT, { - fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - // other entries insignificant or non-primary references - self.pool.size_of(ops) - } + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + // other entries insignificant or non-primary references + self.pool.size_of(ops) + } } /// Type of revalidation. pub enum RevalidationType { - /// Light revalidation type. - /// - /// During maintenance, transaction pool makes periodic revalidation - /// of all transactions depending on number of blocks or time passed. - /// Also this kind of revalidation does not resubmit transactions from - /// retracted blocks, since it is too expensive. - Light, - - /// Full revalidation type. - /// - /// During maintenance, transaction pool revalidates some fixed amount of - /// transactions from the pool of valid transactions. - Full, + /// Light revalidation type. + /// + /// During maintenance, transaction pool makes periodic revalidation + /// of all transactions depending on number of blocks or time passed. + /// Also this kind of revalidation does not resubmit transactions from + /// retracted blocks, since it is too expensive. + Light, + + /// Full revalidation type. + /// + /// During maintenance, transaction pool revalidates some fixed amount of + /// transactions from the pool of valid transactions. + Full, } impl BasicPool - where - Block: BlockT, - PoolApi: sc_transaction_graph::ChainApi + 'static, +where + Block: BlockT, + PoolApi: sc_transaction_graph::ChainApi + 'static, { - /// Create new basic transaction pool with provided api. - /// - /// It will also optionally return background task that might be started by the - /// caller. - pub fn new( - options: sc_transaction_graph::Options, - pool_api: Arc, - prometheus: Option<&PrometheusRegistry>, - ) -> (Self, Option + Send>>>) { - Self::with_revalidation_type(options, pool_api, prometheus, RevalidationType::Full) - } - - /// Create new basic transaction pool with provided api, for tests. - #[cfg(test)] - pub fn new_test( - pool_api: Arc, - ) -> (Self, Pin + Send>>, intervalier::BackSignalControl) { - let pool = Arc::new(sc_transaction_graph::Pool::new(Default::default(), pool_api.clone())); - let (revalidation_queue, background_task, notifier) = - revalidation::RevalidationQueue::new_test(pool_api.clone(), pool.clone()); - ( - BasicPool { - api: pool_api, - pool, - revalidation_queue: Arc::new(revalidation_queue), - revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)), - ready_poll: Default::default(), - metrics: Default::default(), - }, - background_task, - notifier, - ) - } - - /// Create new basic transaction pool with provided api and custom - /// revalidation type. - pub fn with_revalidation_type( - options: sc_transaction_graph::Options, - pool_api: Arc, - prometheus: Option<&PrometheusRegistry>, - revalidation_type: RevalidationType, - ) -> (Self, Option + Send>>>) { - let pool = Arc::new(sc_transaction_graph::Pool::new(options, pool_api.clone())); - let (revalidation_queue, background_task) = match revalidation_type { - RevalidationType::Light => (revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), None), - RevalidationType::Full => { - let (queue, background) = revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); - (queue, Some(background)) - }, - }; - - ( - BasicPool { - api: pool_api, - pool, - revalidation_queue: Arc::new(revalidation_queue), - revalidation_strategy: Arc::new(Mutex::new( - match revalidation_type { - RevalidationType::Light => RevalidationStrategy::Light(RevalidationStatus::NotScheduled), - RevalidationType::Full => RevalidationStrategy::Always, - } - )), - ready_poll: Default::default(), - metrics: PrometheusMetrics::new(prometheus), - }, - background_task, - ) - } - - /// Gets shared reference to the underlying pool. - pub fn pool(&self) -> &Arc> { - &self.pool - } + /// Create new basic transaction pool with provided api. + /// + /// It will also optionally return background task that might be started by the + /// caller. + pub fn new( + options: sc_transaction_graph::Options, + pool_api: Arc, + prometheus: Option<&PrometheusRegistry>, + ) -> (Self, Option + Send>>>) { + Self::with_revalidation_type(options, pool_api, prometheus, RevalidationType::Full) + } + + /// Create new basic transaction pool with provided api, for tests. + #[cfg(test)] + pub fn new_test( + pool_api: Arc, + ) -> ( + Self, + Pin + Send>>, + intervalier::BackSignalControl, + ) { + let pool = Arc::new(sc_transaction_graph::Pool::new( + Default::default(), + pool_api.clone(), + )); + let (revalidation_queue, background_task, notifier) = + revalidation::RevalidationQueue::new_test(pool_api.clone(), pool.clone()); + ( + BasicPool { + api: pool_api, + pool, + revalidation_queue: Arc::new(revalidation_queue), + revalidation_strategy: Arc::new(Mutex::new(RevalidationStrategy::Always)), + ready_poll: Default::default(), + metrics: Default::default(), + }, + background_task, + notifier, + ) + } + + /// Create new basic transaction pool with provided api and custom + /// revalidation type. + pub fn with_revalidation_type( + options: sc_transaction_graph::Options, + pool_api: Arc, + prometheus: Option<&PrometheusRegistry>, + revalidation_type: RevalidationType, + ) -> (Self, Option + Send>>>) { + let pool = Arc::new(sc_transaction_graph::Pool::new(options, pool_api.clone())); + let (revalidation_queue, background_task) = match revalidation_type { + RevalidationType::Light => ( + revalidation::RevalidationQueue::new(pool_api.clone(), pool.clone()), + None, + ), + RevalidationType::Full => { + let (queue, background) = + revalidation::RevalidationQueue::new_background(pool_api.clone(), pool.clone()); + (queue, Some(background)) + } + }; + + ( + BasicPool { + api: pool_api, + pool, + revalidation_queue: Arc::new(revalidation_queue), + revalidation_strategy: Arc::new(Mutex::new(match revalidation_type { + RevalidationType::Light => { + RevalidationStrategy::Light(RevalidationStatus::NotScheduled) + } + RevalidationType::Full => RevalidationStrategy::Always, + })), + ready_poll: Default::default(), + metrics: PrometheusMetrics::new(prometheus), + }, + background_task, + ) + } + + /// Gets shared reference to the underlying pool. + pub fn pool(&self) -> &Arc> { + &self.pool + } } impl TransactionPool for BasicPool - where - Block: BlockT, - PoolApi: 'static + sc_transaction_graph::ChainApi, +where + Block: BlockT, + PoolApi: 'static + sc_transaction_graph::ChainApi, { - type Block = PoolApi::Block; - type Hash = sc_transaction_graph::ExHash; - type InPoolTransaction = sc_transaction_graph::base_pool::Transaction, TransactionFor>; - type Error = PoolApi::Error; - - fn submit_at( - &self, - at: &BlockId, - source: TransactionSource, - xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error> { - let pool = self.pool.clone(); - let at = *at; - - self.metrics.report(|metrics| metrics.validations_scheduled.inc_by(xts.len() as u64)); - - let metrics = self.metrics.clone(); - async move { - let tx_count = xts.len(); - let res = pool.submit_at(&at, source, xts, false).await; - metrics.report(|metrics| metrics.validations_finished.inc_by(tx_count as u64)); - res - }.boxed() - } - - fn submit_one( - &self, - at: &BlockId, - source: TransactionSource, - xt: TransactionFor, - ) -> PoolFuture, Self::Error> { - let pool = self.pool.clone(); - let at = *at; - - self.metrics.report(|metrics| metrics.validations_scheduled.inc()); - - let metrics = self.metrics.clone(); - async move { - let res = pool.submit_one(&at, source, xt).await; - - metrics.report(|metrics| metrics.validations_finished.inc()); - res - - }.boxed() - } - - fn submit_and_watch( - &self, - at: &BlockId, - source: TransactionSource, - xt: TransactionFor, - ) -> PoolFuture>, Self::Error> { - let at = *at; - let pool = self.pool.clone(); - - self.metrics.report(|metrics| metrics.validations_scheduled.inc()); - - let metrics = self.metrics.clone(); - async move { - let result = pool.submit_and_watch(&at, source, xt) - .map(|result| result.map(|watcher| Box::new(watcher.into_stream()) as _)) - .await; - - metrics.report(|metrics| metrics.validations_finished.inc()); - - result - }.boxed() - } - - fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { - self.pool.validated_pool().remove_invalid(hashes) - } - - fn status(&self) -> PoolStatus { - self.pool.validated_pool().status() - } - - fn import_notification_stream(&self) -> ImportNotificationStream> { - self.pool.validated_pool().import_notification_stream() - } - - fn hash_of(&self, xt: &TransactionFor) -> TxHash { - self.pool.hash_of(xt) - } - - fn on_broadcasted(&self, propagations: HashMap, Vec>) { - self.pool.validated_pool().on_broadcasted(propagations) - } - - fn ready_transaction(&self, hash: &TxHash) -> Option> { - self.pool.validated_pool().ready_by_hash(hash) - } - - fn ready_at(&self, at: NumberFor) -> PolledIterator { - if self.ready_poll.lock().updated_at() >= at { - let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); - return Box::pin(futures::future::ready(iterator)); - } - - Box::pin( - self.ready_poll - .lock() - .add(at) - .map(|received| received.unwrap_or_else(|e| { - log::warn!("Error receiving pending set: {:?}", e); - Box::new(vec![].into_iter()) - })) - ) - } - - fn ready(&self) -> ReadyIteratorFor { - Box::new(self.pool.validated_pool().ready()) - } + type Block = PoolApi::Block; + type Hash = sc_transaction_graph::ExHash; + type InPoolTransaction = + sc_transaction_graph::base_pool::Transaction, TransactionFor>; + type Error = PoolApi::Error; + + fn submit_at( + &self, + at: &BlockId, + source: TransactionSource, + xts: Vec>, + ) -> PoolFuture, Self::Error>>, Self::Error> { + let pool = self.pool.clone(); + let at = *at; + + self.metrics + .report(|metrics| metrics.validations_scheduled.inc_by(xts.len() as u64)); + + let metrics = self.metrics.clone(); + async move { + let tx_count = xts.len(); + let res = pool.submit_at(&at, source, xts, false).await; + metrics.report(|metrics| metrics.validations_finished.inc_by(tx_count as u64)); + res + } + .boxed() + } + + fn submit_one( + &self, + at: &BlockId, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture, Self::Error> { + let pool = self.pool.clone(); + let at = *at; + + self.metrics + .report(|metrics| metrics.validations_scheduled.inc()); + + let metrics = self.metrics.clone(); + async move { + let res = pool.submit_one(&at, source, xt).await; + + metrics.report(|metrics| metrics.validations_finished.inc()); + res + } + .boxed() + } + + fn submit_and_watch( + &self, + at: &BlockId, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture>, Self::Error> { + let at = *at; + let pool = self.pool.clone(); + + self.metrics + .report(|metrics| metrics.validations_scheduled.inc()); + + let metrics = self.metrics.clone(); + async move { + let result = pool + .submit_and_watch(&at, source, xt) + .map(|result| result.map(|watcher| Box::new(watcher.into_stream()) as _)) + .await; + + metrics.report(|metrics| metrics.validations_finished.inc()); + + result + } + .boxed() + } + + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec> { + self.pool.validated_pool().remove_invalid(hashes) + } + + fn status(&self) -> PoolStatus { + self.pool.validated_pool().status() + } + + fn import_notification_stream(&self) -> ImportNotificationStream> { + self.pool.validated_pool().import_notification_stream() + } + + fn hash_of(&self, xt: &TransactionFor) -> TxHash { + self.pool.hash_of(xt) + } + + fn on_broadcasted(&self, propagations: HashMap, Vec>) { + self.pool.validated_pool().on_broadcasted(propagations) + } + + fn ready_transaction(&self, hash: &TxHash) -> Option> { + self.pool.validated_pool().ready_by_hash(hash) + } + + fn ready_at(&self, at: NumberFor) -> PolledIterator { + if self.ready_poll.lock().updated_at() >= at { + let iterator: ReadyIteratorFor = Box::new(self.pool.validated_pool().ready()); + return Box::pin(futures::future::ready(iterator)); + } + + Box::pin(self.ready_poll.lock().add(at).map(|received| { + received.unwrap_or_else(|e| { + log::warn!("Error receiving pending set: {:?}", e); + Box::new(vec![].into_iter()) + }) + })) + } + + fn ready(&self) -> ReadyIteratorFor { + Box::new(self.pool.validated_pool().ready()) + } } #[cfg_attr(test, derive(Debug))] enum RevalidationStatus { - /// The revalidation has never been completed. - NotScheduled, - /// The revalidation is scheduled. - Scheduled(Option, Option), - /// The revalidation is in progress. - InProgress, + /// The revalidation has never been completed. + NotScheduled, + /// The revalidation is scheduled. + Scheduled(Option, Option), + /// The revalidation is in progress. + InProgress, } enum RevalidationStrategy { - Always, - Light(RevalidationStatus), + Always, + Light(RevalidationStatus), } struct RevalidationAction { - revalidate: bool, - resubmit: bool, + revalidate: bool, + resubmit: bool, } impl RevalidationStrategy { - pub fn clear(&mut self) { - if let Self::Light(status) = self { - status.clear() - } - } - - pub fn next( - &mut self, - block: N, - revalidate_time_period: Option, - revalidate_block_period: Option, - ) -> RevalidationAction { - match self { - Self::Light(status) => RevalidationAction { - revalidate: status.next_required( - block, - revalidate_time_period, - revalidate_block_period, - ), - resubmit: false, - }, - Self::Always => RevalidationAction { - revalidate: true, - resubmit: true, - } - } - } + pub fn clear(&mut self) { + if let Self::Light(status) = self { + status.clear() + } + } + + pub fn next( + &mut self, + block: N, + revalidate_time_period: Option, + revalidate_block_period: Option, + ) -> RevalidationAction { + match self { + Self::Light(status) => RevalidationAction { + revalidate: status.next_required( + block, + revalidate_time_period, + revalidate_block_period, + ), + resubmit: false, + }, + Self::Always => RevalidationAction { + revalidate: true, + resubmit: true, + }, + } + } } impl RevalidationStatus { - /// Called when revalidation is completed. - pub fn clear(&mut self) { - *self = Self::NotScheduled; - } - - /// Returns true if revalidation is required. - pub fn next_required( - &mut self, - block: N, - revalidate_time_period: Option, - revalidate_block_period: Option, - ) -> bool { - match *self { - Self::NotScheduled => { - *self = Self::Scheduled( - revalidate_time_period.map(|period| Instant::now() + period), - revalidate_block_period.map(|period| block + period), - ); - false - } - Self::Scheduled(revalidate_at_time, revalidate_at_block) => { - let is_required = revalidate_at_time.map(|at| Instant::now() >= at).unwrap_or(false) - || revalidate_at_block.map(|at| block >= at).unwrap_or(false); - if is_required { - *self = Self::InProgress; - } - is_required - } - Self::InProgress => false, - } - } + /// Called when revalidation is completed. + pub fn clear(&mut self) { + *self = Self::NotScheduled; + } + + /// Returns true if revalidation is required. + pub fn next_required( + &mut self, + block: N, + revalidate_time_period: Option, + revalidate_block_period: Option, + ) -> bool { + match *self { + Self::NotScheduled => { + *self = Self::Scheduled( + revalidate_time_period.map(|period| Instant::now() + period), + revalidate_block_period.map(|period| block + period), + ); + false + } + Self::Scheduled(revalidate_at_time, revalidate_at_block) => { + let is_required = revalidate_at_time + .map(|at| Instant::now() >= at) + .unwrap_or(false) + || revalidate_at_block.map(|at| block >= at).unwrap_or(false); + if is_required { + *self = Self::InProgress; + } + is_required + } + Self::InProgress => false, + } + } } impl MaintainedTransactionPool for BasicPool - where - Block: BlockT, - PoolApi: 'static + sc_transaction_graph::ChainApi, +where + Block: BlockT, + PoolApi: 'static + sc_transaction_graph::ChainApi, { - fn maintain(&self, event: ChainEvent) -> Pin + Send>> { - match event { - ChainEvent::NewBlock { id, retracted, .. } => { - let id = id.clone(); - let pool = self.pool.clone(); - let api = self.api.clone(); - - let block_number = match api.block_id_to_number(&id) { - Ok(Some(number)) => number, - _ => { - log::trace!(target: "txpool", "Skipping chain event - no number for that block {:?}", id); - return Box::pin(ready(())); - } - }; - - let next_action = self.revalidation_strategy.lock().next( - block_number, - Some(std::time::Duration::from_secs(60)), - Some(20.into()), - ); - let revalidation_strategy = self.revalidation_strategy.clone(); - let retracted = retracted.clone(); - let revalidation_queue = self.revalidation_queue.clone(); - let ready_poll = self.ready_poll.clone(); - - async move { - // We don't query block if we won't prune anything - if !pool.validated_pool().status().is_empty() { - let hashes = api.block_body(&id).await - .unwrap_or_else(|e| { - log::warn!("Prune known transactions: error request {:?}!", e); - None - }) - .unwrap_or_default() - .into_iter() - .map(|tx| pool.hash_of(&tx)) - .collect::>(); - - if let Err(e) = pool.prune_known(&id, &hashes) { - log::error!("Cannot prune known in the pool {:?}!", e); - } - } - - let extra_pool = pool.clone(); - // After #5200 lands, this arguably might be moved to the handler of "all blocks notification". - ready_poll.lock().trigger(block_number, move || Box::new(extra_pool.validated_pool().ready())); - - if next_action.resubmit { - let mut resubmit_transactions = Vec::new(); - - for retracted_hash in retracted { - // notify txs awaiting finality that it has been retracted - pool.validated_pool().on_block_retracted(retracted_hash.clone()); - - let block_transactions = api.block_body(&BlockId::hash(retracted_hash.clone())).await - .unwrap_or_else(|e| { - log::warn!("Failed to fetch block body {:?}!", e); - None - }) - .unwrap_or_default() - .into_iter() - .filter(|tx| tx.is_signed().unwrap_or(true)); - - resubmit_transactions.extend(block_transactions); - } - if let Err(e) = pool.submit_at( - &id, - // These transactions are coming from retracted blocks, we should - // simply consider them external. - TransactionSource::External, - resubmit_transactions, - true - ).await { - log::debug!( - target: "txpool", - "[{:?}] Error re-submitting transactions: {:?}", id, e - ) - } - } - - if next_action.revalidate { - let hashes = pool.validated_pool().ready().map(|tx| tx.hash.clone()).collect(); - revalidation_queue.revalidate_later(block_number, hashes).await; - } - - revalidation_strategy.lock().clear(); - }.boxed() - } - ChainEvent::Finalized { hash } => { - let pool = self.pool.clone(); - async move { + fn maintain(&self, event: ChainEvent) -> Pin + Send>> { + match event { + ChainEvent::NewBlock { id, retracted, .. } => { + let id = id.clone(); + let pool = self.pool.clone(); + let api = self.api.clone(); + + let block_number = match api.block_id_to_number(&id) { + Ok(Some(number)) => number, + _ => { + log::trace!(target: "txpool", "Skipping chain event - no number for that block {:?}", id); + return Box::pin(ready(())); + } + }; + + let next_action = self.revalidation_strategy.lock().next( + block_number, + Some(std::time::Duration::from_secs(60)), + Some(20.into()), + ); + let revalidation_strategy = self.revalidation_strategy.clone(); + let retracted = retracted.clone(); + let revalidation_queue = self.revalidation_queue.clone(); + let ready_poll = self.ready_poll.clone(); + + async move { + // We don't query block if we won't prune anything + if !pool.validated_pool().status().is_empty() { + let hashes = api + .block_body(&id) + .await + .unwrap_or_else(|e| { + log::warn!("Prune known transactions: error request {:?}!", e); + None + }) + .unwrap_or_default() + .into_iter() + .map(|tx| pool.hash_of(&tx)) + .collect::>(); + + if let Err(e) = pool.prune_known(&id, &hashes) { + log::error!("Cannot prune known in the pool {:?}!", e); + } + } + + let extra_pool = pool.clone(); + // After #5200 lands, this arguably might be moved to the handler of "all blocks notification". + ready_poll.lock().trigger(block_number, move || { + Box::new(extra_pool.validated_pool().ready()) + }); + + if next_action.resubmit { + let mut resubmit_transactions = Vec::new(); + + for retracted_hash in retracted { + // notify txs awaiting finality that it has been retracted + pool.validated_pool() + .on_block_retracted(retracted_hash.clone()); + + let block_transactions = api + .block_body(&BlockId::hash(retracted_hash.clone())) + .await + .unwrap_or_else(|e| { + log::warn!("Failed to fetch block body {:?}!", e); + None + }) + .unwrap_or_default() + .into_iter() + .filter(|tx| tx.is_signed().unwrap_or(true)); + + resubmit_transactions.extend(block_transactions); + } + if let Err(e) = pool + .submit_at( + &id, + // These transactions are coming from retracted blocks, we should + // simply consider them external. + TransactionSource::External, + resubmit_transactions, + true, + ) + .await + { + log::debug!( + target: "txpool", + "[{:?}] Error re-submitting transactions: {:?}", id, e + ) + } + } + + if next_action.revalidate { + let hashes = pool + .validated_pool() + .ready() + .map(|tx| tx.hash.clone()) + .collect(); + revalidation_queue + .revalidate_later(block_number, hashes) + .await; + } + + revalidation_strategy.lock().clear(); + } + .boxed() + } + ChainEvent::Finalized { hash } => { + let pool = self.pool.clone(); + async move { if let Err(e) = pool.validated_pool().on_block_finalized(hash).await { log::warn!( target: "txpool", @@ -529,7 +566,7 @@ impl MaintainedTransactionPool for BasicPool ) } }.boxed() - } - } - } + } + } + } } diff --git a/client/transaction-pool/src/metrics.rs b/client/transaction-pool/src/metrics.rs index 78e49b3ca5..137a0237ef 100644 --- a/client/transaction-pool/src/metrics.rs +++ b/client/transaction-pool/src/metrics.rs @@ -24,46 +24,46 @@ use prometheus_endpoint::{register, Counter, PrometheusError, Registry, U64}; pub struct MetricsLink(Arc>); impl MetricsLink { - pub fn new(registry: Option<&Registry>) -> Self { - Self(Arc::new( - registry.and_then(|registry| - Metrics::register(registry) - .map_err(|err| { log::warn!("Failed to register prometheus metrics: {}", err); }) - .ok() - ) - )) - } + pub fn new(registry: Option<&Registry>) -> Self { + Self(Arc::new(registry.and_then(|registry| { + Metrics::register(registry) + .map_err(|err| { + log::warn!("Failed to register prometheus metrics: {}", err); + }) + .ok() + }))) + } - pub fn report(&self, do_this: impl FnOnce(&Metrics)) { - if let Some(metrics) = self.0.as_ref() { - do_this(metrics); - } - } + pub fn report(&self, do_this: impl FnOnce(&Metrics)) { + if let Some(metrics) = self.0.as_ref() { + do_this(metrics); + } + } } /// Transaction pool Prometheus metrics. pub struct Metrics { - pub validations_scheduled: Counter, - pub validations_finished: Counter, + pub validations_scheduled: Counter, + pub validations_finished: Counter, } impl Metrics { - pub fn register(registry: &Registry) -> Result { - Ok(Self { - validations_scheduled: register( - Counter::new( - "sub_txpool_validations_scheduled", - "Total number of transactions scheduled for validation", - )?, - registry, - )?, - validations_finished: register( - Counter::new( - "sub_txpool_validations_finished", - "Total number of transactions that finished validation", - )?, - registry, - )?, - }) - } + pub fn register(registry: &Registry) -> Result { + Ok(Self { + validations_scheduled: register( + Counter::new( + "sub_txpool_validations_scheduled", + "Total number of transactions scheduled for validation", + )?, + registry, + )?, + validations_finished: register( + Counter::new( + "sub_txpool_validations_finished", + "Total number of transactions that finished validation", + )?, + registry, + )?, + }) + } } diff --git a/client/transaction-pool/src/revalidation.rs b/client/transaction-pool/src/revalidation.rs index f203bf08a0..68503ec547 100644 --- a/client/transaction-pool/src/revalidation.rs +++ b/client/transaction-pool/src/revalidation.rs @@ -16,13 +16,17 @@ //! Pool periodic revalidation. -use std::{sync::Arc, pin::Pin, collections::{HashMap, HashSet, BTreeMap}}; +use std::{ + collections::{BTreeMap, HashMap, HashSet}, + pin::Pin, + sync::Arc, +}; -use sc_transaction_graph::{ChainApi, Pool, ExHash, NumberFor, ValidatedTransaction}; -use sp_runtime::traits::{Zero, SaturatedConversion}; +use sc_transaction_graph::{ChainApi, ExHash, NumberFor, Pool, ValidatedTransaction}; use sp_runtime::generic::BlockId; +use sp_runtime::traits::{SaturatedConversion, Zero}; use sp_runtime::transaction_validity::TransactionValidityError; -use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use futures::prelude::*; use std::time::Duration; @@ -36,19 +40,19 @@ const BACKGROUND_REVALIDATION_BATCH_SIZE: usize = 20; /// Payload from queue to worker. struct WorkerPayload { - at: NumberFor, - transactions: Vec>, + at: NumberFor, + transactions: Vec>, } /// Async revalidation worker. /// /// Implements future and can be spawned in place or in background. struct RevalidationWorker { - api: Arc, - pool: Arc>, - best_block: NumberFor, - block_ordered: BTreeMap, HashSet>>, - members: HashMap, NumberFor>, + api: Arc, + pool: Arc>, + best_block: NumberFor, + block_ordered: BTreeMap, HashSet>>, + members: HashMap, NumberFor>, } impl Unpin for RevalidationWorker {} @@ -58,321 +62,338 @@ impl Unpin for RevalidationWorker {} /// Each transaction is validated against chain, and invalid are /// removed from the `pool`, while valid are resubmitted. async fn batch_revalidate( - pool: Arc>, - api: Arc, - at: NumberFor, - batch: impl IntoIterator>, + pool: Arc>, + api: Arc, + at: NumberFor, + batch: impl IntoIterator>, ) { - let mut invalid_hashes = Vec::new(); - let mut revalidated = HashMap::new(); - - for ext_hash in batch { - let ext = match pool.validated_pool().ready_by_hash(&ext_hash) { - Some(ext) => ext, - None => continue, - }; - - match api.validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()).await { - Ok(Err(TransactionValidityError::Invalid(err))) => { - log::debug!(target: "txpool", "[{:?}]: Revalidation: invalid {:?}", ext_hash, err); - invalid_hashes.push(ext_hash); - }, - Ok(Err(TransactionValidityError::Unknown(err))) => { - // skipping unknown, they might be pushed by valid or invalid transaction - // when latter resubmitted. - log::trace!(target: "txpool", "[{:?}]: Unknown during revalidation: {:?}", ext_hash, err); - }, - Ok(Ok(validity)) => { - revalidated.insert( - ext_hash.clone(), - ValidatedTransaction::valid_at( - at.saturated_into::(), - ext_hash, - ext.source, - ext.data.clone(), - api.hash_and_length(&ext.data).1, - validity, - ) - ); - }, - Err(validation_err) => { - log::debug!( - target: "txpool", - "[{:?}]: Error during revalidation: {:?}. Removing.", - ext_hash, - validation_err - ); - invalid_hashes.push(ext_hash); - } - } - } - - pool.validated_pool().remove_invalid(&invalid_hashes); - if revalidated.len() > 0 { - pool.resubmit(revalidated); - } + let mut invalid_hashes = Vec::new(); + let mut revalidated = HashMap::new(); + + for ext_hash in batch { + let ext = match pool.validated_pool().ready_by_hash(&ext_hash) { + Some(ext) => ext, + None => continue, + }; + + match api + .validate_transaction(&BlockId::Number(at), ext.source, ext.data.clone()) + .await + { + Ok(Err(TransactionValidityError::Invalid(err))) => { + log::debug!(target: "txpool", "[{:?}]: Revalidation: invalid {:?}", ext_hash, err); + invalid_hashes.push(ext_hash); + } + Ok(Err(TransactionValidityError::Unknown(err))) => { + // skipping unknown, they might be pushed by valid or invalid transaction + // when latter resubmitted. + log::trace!(target: "txpool", "[{:?}]: Unknown during revalidation: {:?}", ext_hash, err); + } + Ok(Ok(validity)) => { + revalidated.insert( + ext_hash.clone(), + ValidatedTransaction::valid_at( + at.saturated_into::(), + ext_hash, + ext.source, + ext.data.clone(), + api.hash_and_length(&ext.data).1, + validity, + ), + ); + } + Err(validation_err) => { + log::debug!( + target: "txpool", + "[{:?}]: Error during revalidation: {:?}. Removing.", + ext_hash, + validation_err + ); + invalid_hashes.push(ext_hash); + } + } + } + + pool.validated_pool().remove_invalid(&invalid_hashes); + if revalidated.len() > 0 { + pool.resubmit(revalidated); + } } impl RevalidationWorker { - fn new( - api: Arc, - pool: Arc>, - ) -> Self { - Self { - api, - pool, - block_ordered: Default::default(), - members: Default::default(), - best_block: Zero::zero(), - } - } - - fn prepare_batch(&mut self) -> Vec> { - let mut queued_exts = Vec::new(); - let mut left = BACKGROUND_REVALIDATION_BATCH_SIZE; - - // Take maximum of count transaction by order - // which they got into the pool - while left > 0 { - let first_block = match self.block_ordered.keys().next().cloned() { - Some(bn) => bn, - None => break, - }; - let mut block_drained = false; - if let Some(extrinsics) = self.block_ordered.get_mut(&first_block) { - let to_queue = extrinsics.iter().take(left).cloned().collect::>(); - if to_queue.len() == extrinsics.len() { - block_drained = true; - } else { - for xt in &to_queue { - extrinsics.remove(xt); - } - } - left -= to_queue.len(); - queued_exts.extend(to_queue); - } - - if block_drained { - self.block_ordered.remove(&first_block); - } - } - - for hash in queued_exts.iter() { - self.members.remove(hash); - } - - queued_exts - } - - fn len(&self) -> usize { - self.block_ordered.iter().map(|b| b.1.len()).sum() - } - - fn push(&mut self, worker_payload: WorkerPayload) { - // we don't add something that already scheduled for revalidation - let transactions = worker_payload.transactions; - let block_number = worker_payload.at; - - for ext_hash in transactions { - // we don't add something that already scheduled for revalidation - if self.members.contains_key(&ext_hash) { - log::debug!( - target: "txpool", - "[{:?}] Skipped adding for revalidation: Already there.", - ext_hash, - ); - - continue; - } - - self.block_ordered.entry(block_number) - .and_modify(|value| { value.insert(ext_hash.clone()); }) - .or_insert_with(|| { - let mut bt = HashSet::new(); - bt.insert(ext_hash.clone()); - bt - }); - self.members.insert(ext_hash.clone(), block_number); - } - } - - /// Background worker main loop. - /// - /// It does two things: periodically tries to process some transactions - /// from the queue and also accepts messages to enqueue some more - /// transactions from the pool. - pub async fn run( - mut self, - from_queue: TracingUnboundedReceiver>, - interval: R, - ) where R: Send, R::Guard: Send - { - let interval = interval.into_stream().fuse(); - let from_queue = from_queue.fuse(); - futures::pin_mut!(interval, from_queue); - let this = &mut self; - - loop { - futures::select! { - _guard = interval.next() => { - let next_batch = this.prepare_batch(); - let batch_len = next_batch.len(); - - batch_revalidate(this.pool.clone(), this.api.clone(), this.best_block, next_batch).await; - - #[cfg(test)] - { - use intervalier::Guard; - // only trigger test events if something was processed - if batch_len == 0 { - _guard.expect("Always some() in tests").skip(); - } - } - - if batch_len > 0 || this.len() > 0 { - log::debug!( - target: "txpool", - "Revalidated {} transactions. Left in the queue for revalidation: {}.", - batch_len, - this.len(), - ); - } - }, - workload = from_queue.next() => { - match workload { - Some(worker_payload) => { - this.best_block = worker_payload.at; - this.push(worker_payload); - continue; - }, - // R.I.P. worker! - None => break, - } - } - } - } - } + fn new(api: Arc, pool: Arc>) -> Self { + Self { + api, + pool, + block_ordered: Default::default(), + members: Default::default(), + best_block: Zero::zero(), + } + } + + fn prepare_batch(&mut self) -> Vec> { + let mut queued_exts = Vec::new(); + let mut left = BACKGROUND_REVALIDATION_BATCH_SIZE; + + // Take maximum of count transaction by order + // which they got into the pool + while left > 0 { + let first_block = match self.block_ordered.keys().next().cloned() { + Some(bn) => bn, + None => break, + }; + let mut block_drained = false; + if let Some(extrinsics) = self.block_ordered.get_mut(&first_block) { + let to_queue = extrinsics.iter().take(left).cloned().collect::>(); + if to_queue.len() == extrinsics.len() { + block_drained = true; + } else { + for xt in &to_queue { + extrinsics.remove(xt); + } + } + left -= to_queue.len(); + queued_exts.extend(to_queue); + } + + if block_drained { + self.block_ordered.remove(&first_block); + } + } + + for hash in queued_exts.iter() { + self.members.remove(hash); + } + + queued_exts + } + + fn len(&self) -> usize { + self.block_ordered.iter().map(|b| b.1.len()).sum() + } + + fn push(&mut self, worker_payload: WorkerPayload) { + // we don't add something that already scheduled for revalidation + let transactions = worker_payload.transactions; + let block_number = worker_payload.at; + + for ext_hash in transactions { + // we don't add something that already scheduled for revalidation + if self.members.contains_key(&ext_hash) { + log::debug!( + target: "txpool", + "[{:?}] Skipped adding for revalidation: Already there.", + ext_hash, + ); + + continue; + } + + self.block_ordered + .entry(block_number) + .and_modify(|value| { + value.insert(ext_hash.clone()); + }) + .or_insert_with(|| { + let mut bt = HashSet::new(); + bt.insert(ext_hash.clone()); + bt + }); + self.members.insert(ext_hash.clone(), block_number); + } + } + + /// Background worker main loop. + /// + /// It does two things: periodically tries to process some transactions + /// from the queue and also accepts messages to enqueue some more + /// transactions from the pool. + pub async fn run( + mut self, + from_queue: TracingUnboundedReceiver>, + interval: R, + ) where + R: Send, + R::Guard: Send, + { + let interval = interval.into_stream().fuse(); + let from_queue = from_queue.fuse(); + futures::pin_mut!(interval, from_queue); + let this = &mut self; + + loop { + futures::select! { + _guard = interval.next() => { + let next_batch = this.prepare_batch(); + let batch_len = next_batch.len(); + + batch_revalidate(this.pool.clone(), this.api.clone(), this.best_block, next_batch).await; + + #[cfg(test)] + { + use intervalier::Guard; + // only trigger test events if something was processed + if batch_len == 0 { + _guard.expect("Always some() in tests").skip(); + } + } + + if batch_len > 0 || this.len() > 0 { + log::debug!( + target: "txpool", + "Revalidated {} transactions. Left in the queue for revalidation: {}.", + batch_len, + this.len(), + ); + } + }, + workload = from_queue.next() => { + match workload { + Some(worker_payload) => { + this.best_block = worker_payload.at; + this.push(worker_payload); + continue; + }, + // R.I.P. worker! + None => break, + } + } + } + } + } } - /// Revalidation queue. /// /// Can be configured background (`new_background`) /// or immediate (just `new`). pub struct RevalidationQueue { - pool: Arc>, - api: Arc, - background: Option>>, + pool: Arc>, + api: Arc, + background: Option>>, } impl RevalidationQueue where - Api: 'static, + Api: 'static, { - /// New revalidation queue without background worker. - pub fn new(api: Arc, pool: Arc>) -> Self { - Self { - api, - pool, - background: None, - } - } - - pub fn new_with_interval( - api: Arc, - pool: Arc>, - interval: R, - ) -> (Self, Pin + Send>>) - where R: Send + 'static, R::Guard: Send - { - let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue"); - - let worker = RevalidationWorker::new(api.clone(), pool.clone()); - - let queue = - Self { - api, - pool, - background: Some(to_worker), - }; - - (queue, worker.run(from_queue, interval).boxed()) - } - - /// New revalidation queue with background worker. - pub fn new_background(api: Arc, pool: Arc>) -> - (Self, Pin + Send>>) - { - Self::new_with_interval(api, pool, intervalier::Interval::new(BACKGROUND_REVALIDATION_INTERVAL)) - } - - /// New revalidation queue with background worker and test signal. - #[cfg(test)] - pub fn new_test(api: Arc, pool: Arc>) -> - (Self, Pin + Send>>, intervalier::BackSignalControl) - { - let (interval, notifier) = intervalier::BackSignalInterval::new(BACKGROUND_REVALIDATION_INTERVAL); - let (queue, background) = Self::new_with_interval(api, pool, interval); - - (queue, background, notifier) - } - - /// Queue some transaction for later revalidation. - /// - /// If queue configured with background worker, this will return immediately. - /// If queue configured without background worker, this will resolve after - /// revalidation is actually done. - pub async fn revalidate_later(&self, at: NumberFor, transactions: Vec>) { - if transactions.len() > 0 { - log::debug!(target: "txpool", "Added {} transactions to revalidation queue", transactions.len()); - } - - if let Some(ref to_worker) = self.background { - if let Err(e) = to_worker.unbounded_send(WorkerPayload { at, transactions }) { - log::warn!(target: "txpool", "Failed to update background worker: {:?}", e); - } - return; - } else { - let pool = self.pool.clone(); - let api = self.api.clone(); - batch_revalidate(pool, api, at, transactions).await - } - } + /// New revalidation queue without background worker. + pub fn new(api: Arc, pool: Arc>) -> Self { + Self { + api, + pool, + background: None, + } + } + + pub fn new_with_interval( + api: Arc, + pool: Arc>, + interval: R, + ) -> (Self, Pin + Send>>) + where + R: Send + 'static, + R::Guard: Send, + { + let (to_worker, from_queue) = tracing_unbounded("mpsc_revalidation_queue"); + + let worker = RevalidationWorker::new(api.clone(), pool.clone()); + + let queue = Self { + api, + pool, + background: Some(to_worker), + }; + + (queue, worker.run(from_queue, interval).boxed()) + } + + /// New revalidation queue with background worker. + pub fn new_background( + api: Arc, + pool: Arc>, + ) -> (Self, Pin + Send>>) { + Self::new_with_interval( + api, + pool, + intervalier::Interval::new(BACKGROUND_REVALIDATION_INTERVAL), + ) + } + + /// New revalidation queue with background worker and test signal. + #[cfg(test)] + pub fn new_test( + api: Arc, + pool: Arc>, + ) -> ( + Self, + Pin + Send>>, + intervalier::BackSignalControl, + ) { + let (interval, notifier) = + intervalier::BackSignalInterval::new(BACKGROUND_REVALIDATION_INTERVAL); + let (queue, background) = Self::new_with_interval(api, pool, interval); + + (queue, background, notifier) + } + + /// Queue some transaction for later revalidation. + /// + /// If queue configured with background worker, this will return immediately. + /// If queue configured without background worker, this will resolve after + /// revalidation is actually done. + pub async fn revalidate_later(&self, at: NumberFor, transactions: Vec>) { + if transactions.len() > 0 { + log::debug!(target: "txpool", "Added {} transactions to revalidation queue", transactions.len()); + } + + if let Some(ref to_worker) = self.background { + if let Err(e) = to_worker.unbounded_send(WorkerPayload { at, transactions }) { + log::warn!(target: "txpool", "Failed to update background worker: {:?}", e); + } + return; + } else { + let pool = self.pool.clone(); + let api = self.api.clone(); + batch_revalidate(pool, api, at, transactions).await + } + } } #[cfg(test)] mod tests { - use super::*; - use sc_transaction_graph::Pool; - use sp_transaction_pool::TransactionSource; - use substrate_test_runtime_transaction_pool::{TestApi, uxt}; - use futures::executor::block_on; - use substrate_test_runtime_client::{ - AccountKeyring::*, - }; - - fn setup() -> (Arc, Pool) { - let test_api = Arc::new(TestApi::empty()); - let pool = Pool::new(Default::default(), test_api.clone()); - (test_api, pool) - } - - #[test] - fn smoky() { - let (api, pool) = setup(); - let pool = Arc::new(pool); - let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); - - let uxt = uxt(Alice, 0); - let uxt_hash = block_on( - pool.submit_one(&BlockId::number(0), TransactionSource::External, uxt.clone()) - ).expect("Should be valid"); - - block_on(queue.revalidate_later(0, vec![uxt_hash])); - - // revalidated in sync offload 2nd time - assert_eq!(api.validation_requests().len(), 2); - // number of ready - assert_eq!(pool.validated_pool().status().ready, 1); - } + use super::*; + use futures::executor::block_on; + use sc_transaction_graph::Pool; + use sp_transaction_pool::TransactionSource; + use substrate_test_runtime_client::AccountKeyring::*; + use substrate_test_runtime_transaction_pool::{uxt, TestApi}; + + fn setup() -> (Arc, Pool) { + let test_api = Arc::new(TestApi::empty()); + let pool = Pool::new(Default::default(), test_api.clone()); + (test_api, pool) + } + + #[test] + fn smoky() { + let (api, pool) = setup(); + let pool = Arc::new(pool); + let queue = Arc::new(RevalidationQueue::new(api.clone(), pool.clone())); + + let uxt = uxt(Alice, 0); + let uxt_hash = block_on(pool.submit_one( + &BlockId::number(0), + TransactionSource::External, + uxt.clone(), + )) + .expect("Should be valid"); + + block_on(queue.revalidate_later(0, vec![uxt_hash])); + + // revalidated in sync offload 2nd time + assert_eq!(api.validation_requests().len(), 2); + // number of ready + assert_eq!(pool.validated_pool().status().ready, 1); + } } diff --git a/client/transaction-pool/src/testing/pool.rs b/client/transaction-pool/src/testing/pool.rs index 45fb6f42c3..32e8cbf02a 100644 --- a/client/transaction-pool/src/testing/pool.rs +++ b/client/transaction-pool/src/testing/pool.rs @@ -15,691 +15,752 @@ // along with Substrate. If not, see . use crate::*; -use sp_transaction_pool::TransactionStatus; +use codec::Encode; use futures::executor::block_on; -use txpool::{self, Pool}; +use futures::{prelude::*, task::Poll}; use sp_runtime::{ - generic::BlockId, - transaction_validity::{ValidTransaction, TransactionSource, InvalidTransaction}, + generic::BlockId, + transaction_validity::{InvalidTransaction, TransactionSource, ValidTransaction}, }; +use sp_transaction_pool::TransactionStatus; use substrate_test_runtime_client::{ - runtime::{Block, Hash, Index, Header, Extrinsic, Transfer}, - AccountKeyring::*, + runtime::{Block, Extrinsic, Hash, Header, Index, Transfer}, + AccountKeyring::*, }; -use substrate_test_runtime_transaction_pool::{TestApi, uxt}; -use futures::{prelude::*, task::Poll}; -use codec::Encode; +use substrate_test_runtime_transaction_pool::{uxt, TestApi}; +use txpool::{self, Pool}; fn pool() -> Pool { - Pool::new(Default::default(), TestApi::with_alice_nonce(209).into()) + Pool::new(Default::default(), TestApi::with_alice_nonce(209).into()) } fn maintained_pool() -> ( - BasicPool, - futures::executor::ThreadPool, - intervalier::BackSignalControl, + BasicPool, + futures::executor::ThreadPool, + intervalier::BackSignalControl, ) { - let (pool, background_task, notifier) = BasicPool::new_test( - std::sync::Arc::new(TestApi::with_alice_nonce(209)) - ); + let (pool, background_task, notifier) = + BasicPool::new_test(std::sync::Arc::new(TestApi::with_alice_nonce(209))); - let thread_pool = futures::executor::ThreadPool::new().unwrap(); - thread_pool.spawn_ok(background_task); - (pool, thread_pool, notifier) + let thread_pool = futures::executor::ThreadPool::new().unwrap(); + thread_pool.spawn_ok(background_task); + (pool, thread_pool, notifier) } fn header(number: u64) -> Header { - Header { - number, - digest: Default::default(), - extrinsics_root: Default::default(), - parent_hash: Default::default(), - state_root: Default::default(), - } + Header { + number, + digest: Default::default(), + extrinsics_root: Default::default(), + parent_hash: Default::default(), + state_root: Default::default(), + } } const SOURCE: TransactionSource = TransactionSource::External; #[test] fn submission_should_work() { - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); - - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![209]); + let pool = pool(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, vec![209]); } #[test] fn multiple_submission_should_work() { - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); - - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![209, 210]); + let pool = pool(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, vec![209, 210]); } #[test] fn early_nonce_should_be_culled() { - let pool = pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 208))).unwrap(); - - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, Vec::::new()); + let pool = pool(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 208))).unwrap(); + + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, Vec::::new()); } #[test] fn late_nonce_should_be_queued() { - let pool = pool(); - - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, Vec::::new()); - - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![209, 210]); + let pool = pool(); + + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, Vec::::new()); + + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, vec![209, 210]); } #[test] fn prune_tags_should_work() { - let pool = pool(); - let hash209 = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); - - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![209, 210]); - - block_on( - pool.prune_tags( - &BlockId::number(1), - vec![vec![209]], - vec![hash209], - ) - ).expect("Prune tags"); - - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![210]); + let pool = pool(); + let hash209 = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).unwrap(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).unwrap(); + + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, vec![209, 210]); + + block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![hash209])) + .expect("Prune tags"); + + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, vec![210]); } #[test] fn should_ban_invalid_transactions() { - let pool = pool(); - let uxt = uxt(Alice, 209); - let hash = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap(); - pool.validated_pool().remove_invalid(&[hash]); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); - - // when - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, Vec::::new()); - - // then - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); + let pool = pool(); + let uxt = uxt(Alice, 209); + let hash = block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap(); + pool.validated_pool().remove_invalid(&[hash]); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); + + // when + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, Vec::::new()); + + // then + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt.clone())).unwrap_err(); } #[test] fn should_correctly_prune_transactions_providing_more_than_one_tag() { - let api = Arc::new(TestApi::with_alice_nonce(209)); - api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| { - v.provides.push(vec![155]); - })); - let pool = Pool::new(Default::default(), api.clone()); - let xt = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); - assert_eq!(pool.validated_pool().status().ready, 1); - - // remove the transaction that just got imported. - api.increment_nonce(Alice.into()); - block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![])).expect("1. Pruned"); - assert_eq!(pool.validated_pool().status().ready, 0); - // it's re-imported to future - assert_eq!(pool.validated_pool().status().future, 1); - - // so now let's insert another transaction that also provides the 155 - api.increment_nonce(Alice.into()); - let xt = uxt(Alice, 211); - block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt.clone())).expect("2. Imported"); - assert_eq!(pool.validated_pool().status().ready, 1); - assert_eq!(pool.validated_pool().status().future, 1); - let pending: Vec<_> = pool.validated_pool().ready().map(|a| a.data.transfer().nonce).collect(); - assert_eq!(pending, vec![211]); - - // prune it and make sure the pool is empty - api.increment_nonce(Alice.into()); - block_on(pool.prune_tags(&BlockId::number(3), vec![vec![155]], vec![])).expect("2. Pruned"); - assert_eq!(pool.validated_pool().status().ready, 0); - assert_eq!(pool.validated_pool().status().future, 2); + let api = Arc::new(TestApi::with_alice_nonce(209)); + api.set_valid_modifier(Box::new(|v: &mut ValidTransaction| { + v.provides.push(vec![155]); + })); + let pool = Pool::new(Default::default(), api.clone()); + let xt = uxt(Alice, 209); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + assert_eq!(pool.validated_pool().status().ready, 1); + + // remove the transaction that just got imported. + api.increment_nonce(Alice.into()); + block_on(pool.prune_tags(&BlockId::number(1), vec![vec![209]], vec![])).expect("1. Pruned"); + assert_eq!(pool.validated_pool().status().ready, 0); + // it's re-imported to future + assert_eq!(pool.validated_pool().status().future, 1); + + // so now let's insert another transaction that also provides the 155 + api.increment_nonce(Alice.into()); + let xt = uxt(Alice, 211); + block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt.clone())).expect("2. Imported"); + assert_eq!(pool.validated_pool().status().ready, 1); + assert_eq!(pool.validated_pool().status().future, 1); + let pending: Vec<_> = pool + .validated_pool() + .ready() + .map(|a| a.data.transfer().nonce) + .collect(); + assert_eq!(pending, vec![211]); + + // prune it and make sure the pool is empty + api.increment_nonce(Alice.into()); + block_on(pool.prune_tags(&BlockId::number(3), vec![vec![155]], vec![])).expect("2. Pruned"); + assert_eq!(pool.validated_pool().status().ready, 0); + assert_eq!(pool.validated_pool().status().future, 2); } fn block_event(id: u64) -> ChainEvent { - ChainEvent::NewBlock { - id: BlockId::number(id), - is_new_best: true, - retracted: vec![], - header: header(id), - } + ChainEvent::NewBlock { + id: BlockId::number(id), + is_new_best: true, + retracted: vec![], + header: header(id), + } } fn block_event_with_retracted(id: u64, retracted: Vec) -> ChainEvent { - ChainEvent::NewBlock { - id: BlockId::number(id), - is_new_best: true, - retracted: retracted, - header: header(id), - } + ChainEvent::NewBlock { + id: BlockId::number(id), + is_new_best: true, + retracted: retracted, + header: header(id), + } } - #[test] fn should_prune_old_during_maintenance() { - let xt = uxt(Alice, 209); + let xt = uxt(Alice, 209); - let (pool, _guard, _notifier) = maintained_pool(); + let (pool, _guard, _notifier) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); - pool.api.push_block(1, vec![xt.clone()]); + pool.api.push_block(1, vec![xt.clone()]); - block_on(pool.maintain(block_event(1))); - assert_eq!(pool.status().ready, 0); + block_on(pool.maintain(block_event(1))); + assert_eq!(pool.status().ready, 0); } #[test] fn should_revalidate_during_maintenance() { - let xt1 = uxt(Alice, 209); - let xt2 = uxt(Alice, 210); + let xt1 = uxt(Alice, 209); + let xt2 = uxt(Alice, 210); - let (pool, _guard, mut notifier) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt2.clone())).expect("2. Imported"); - assert_eq!(pool.status().ready, 2); - assert_eq!(pool.api.validation_requests().len(), 2); + let (pool, _guard, mut notifier) = maintained_pool(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt2.clone())).expect("2. Imported"); + assert_eq!(pool.status().ready, 2); + assert_eq!(pool.api.validation_requests().len(), 2); - pool.api.push_block(1, vec![xt1.clone()]); + pool.api.push_block(1, vec![xt1.clone()]); - block_on(pool.maintain(block_event(1))); - assert_eq!(pool.status().ready, 1); - block_on(notifier.next()); + block_on(pool.maintain(block_event(1))); + assert_eq!(pool.status().ready, 1); + block_on(notifier.next()); - // test that pool revalidated transaction that left ready and not included in the block - assert_eq!(pool.api.validation_requests().len(), 3); + // test that pool revalidated transaction that left ready and not included in the block + assert_eq!(pool.api.validation_requests().len(), 3); } #[test] fn should_resubmit_from_retracted_during_maintenance() { - let xt = uxt(Alice, 209); - let retracted_hash = Hash::random(); + let xt = uxt(Alice, 209); + let retracted_hash = Hash::random(); - let (pool, _guard, _notifier) = maintained_pool(); + let (pool, _guard, _notifier) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); - pool.api.push_block(1, vec![]); - pool.api.push_fork_block(retracted_hash, vec![xt.clone()]); + pool.api.push_block(1, vec![]); + pool.api.push_fork_block(retracted_hash, vec![xt.clone()]); - let event = block_event_with_retracted(1, vec![retracted_hash]); + let event = block_event_with_retracted(1, vec![retracted_hash]); - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 1); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 1); } #[test] fn should_not_retain_invalid_hashes_from_retracted() { - let xt = uxt(Alice, 209); - let retracted_hash = Hash::random(); + let xt = uxt(Alice, 209); + let retracted_hash = Hash::random(); - let (pool, _guard, mut notifier) = maintained_pool(); + let (pool, _guard, mut notifier) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); - pool.api.push_block(1, vec![]); - pool.api.push_fork_block(retracted_hash, vec![xt.clone()]); - pool.api.add_invalid(&xt); + pool.api.push_block(1, vec![]); + pool.api.push_fork_block(retracted_hash, vec![xt.clone()]); + pool.api.add_invalid(&xt); - let event = block_event_with_retracted(1, vec![retracted_hash]); + let event = block_event_with_retracted(1, vec![retracted_hash]); - block_on(pool.maintain(event)); - block_on(notifier.next()); + block_on(pool.maintain(event)); + block_on(notifier.next()); - assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().ready, 0); } #[test] fn should_revalidate_transaction_multiple_times() { - let xt = uxt(Alice, 209); + let xt = uxt(Alice, 209); - let (pool, _guard, mut notifier) = maintained_pool(); + let (pool, _guard, mut notifier) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); - pool.api.push_block(1, vec![xt.clone()]); + pool.api.push_block(1, vec![xt.clone()]); - block_on(pool.maintain(block_event(1))); + block_on(pool.maintain(block_event(1))); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 1); - pool.api.push_block(2, vec![]); - pool.api.add_invalid(&xt); + pool.api.push_block(2, vec![]); + pool.api.add_invalid(&xt); - block_on(pool.maintain(block_event(2))); - block_on(notifier.next()); + block_on(pool.maintain(block_event(2))); + block_on(notifier.next()); - assert_eq!(pool.status().ready, 0); + assert_eq!(pool.status().ready, 0); } #[test] fn should_revalidate_across_many_blocks() { - let xt1 = uxt(Alice, 209); - let xt2 = uxt(Alice, 210); - let xt3 = uxt(Alice, 211); + let xt1 = uxt(Alice, 209); + let xt2 = uxt(Alice, 210); + let xt3 = uxt(Alice, 211); - let (pool, _guard, mut notifier) = maintained_pool(); + let (pool, _guard, mut notifier) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt2.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 2); + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt2.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 2); - pool.api.push_block(1, vec![]); - block_on(pool.maintain(block_event(1))); - block_on(notifier.next()); + pool.api.push_block(1, vec![]); + block_on(pool.maintain(block_event(1))); + block_on(notifier.next()); - block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt3.clone())).expect("1. Imported"); - assert_eq!(pool.status().ready, 3); + block_on(pool.submit_one(&BlockId::number(2), SOURCE, xt3.clone())).expect("1. Imported"); + assert_eq!(pool.status().ready, 3); - pool.api.push_block(2, vec![xt1.clone()]); - block_on(pool.maintain(block_event(2))); - block_on(notifier.next()); + pool.api.push_block(2, vec![xt1.clone()]); + block_on(pool.maintain(block_event(2))); + block_on(notifier.next()); - assert_eq!(pool.status().ready, 2); - // xt1 and xt2 validated twice, then xt3 once, then xt2 and xt3 again - assert_eq!(pool.api.validation_requests().len(), 7); + assert_eq!(pool.status().ready, 2); + // xt1 and xt2 validated twice, then xt3 once, then xt2 and xt3 again + assert_eq!(pool.api.validation_requests().len(), 7); } - #[test] fn should_push_watchers_during_maintaince() { - fn alice_uxt(nonce: u64) -> Extrinsic { - uxt(Alice, 209 + nonce) - } - - // given - let (pool, _guard, mut notifier) = maintained_pool(); - - let tx0 = alice_uxt(0); - let watcher0 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx0.clone()) - ).unwrap(); - let tx1 = alice_uxt(1); - let watcher1 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx1.clone()) - ).unwrap(); - let tx2 = alice_uxt(2); - let watcher2 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx2.clone()) - ).unwrap(); - let tx3 = alice_uxt(3); - let watcher3 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx3.clone()) - ).unwrap(); - let tx4 = alice_uxt(4); - let watcher4 = block_on( - pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx4.clone()) - ).unwrap(); - assert_eq!(pool.status().ready, 5); - - // when - pool.api.add_invalid(&tx3); - pool.api.add_invalid(&tx4); - - // clear timer events if any - block_on(pool.maintain(block_event(0))); - block_on(notifier.next()); - - // then - // hash3 is now invalid - // hash4 is now invalid - assert_eq!(pool.status().ready, 3); - assert_eq!( - futures::executor::block_on_stream(watcher3).collect::>(), - vec![TransactionStatus::Ready, TransactionStatus::Invalid], - ); - assert_eq!( - futures::executor::block_on_stream(watcher4).collect::>(), - vec![TransactionStatus::Ready, TransactionStatus::Invalid], - ); - - // when - let header_hash = pool.api.push_block(1, vec![tx0, tx1, tx2]).hash(); - block_on(pool.maintain(block_event(1))); - - let event = ChainEvent::Finalized { hash: header_hash.clone() }; - block_on(pool.maintain(event)); - - // then - // events for hash0 are: Ready, InBlock - // events for hash1 are: Ready, InBlock - // events for hash2 are: Ready, InBlock - assert_eq!( - futures::executor::block_on_stream(watcher0).collect::>(), - vec![TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), TransactionStatus::Finalized(header_hash.clone())], - ); - assert_eq!( - futures::executor::block_on_stream(watcher1).collect::>(), - vec![TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), TransactionStatus::Finalized(header_hash.clone())], - ); - assert_eq!( - futures::executor::block_on_stream(watcher2).collect::>(), - vec![TransactionStatus::Ready, TransactionStatus::InBlock(header_hash.clone()), TransactionStatus::Finalized(header_hash.clone())], - ); + fn alice_uxt(nonce: u64) -> Extrinsic { + uxt(Alice, 209 + nonce) + } + + // given + let (pool, _guard, mut notifier) = maintained_pool(); + + let tx0 = alice_uxt(0); + let watcher0 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx0.clone())).unwrap(); + let tx1 = alice_uxt(1); + let watcher1 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx1.clone())).unwrap(); + let tx2 = alice_uxt(2); + let watcher2 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx2.clone())).unwrap(); + let tx3 = alice_uxt(3); + let watcher3 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx3.clone())).unwrap(); + let tx4 = alice_uxt(4); + let watcher4 = + block_on(pool.submit_and_watch(&BlockId::Number(0), SOURCE, tx4.clone())).unwrap(); + assert_eq!(pool.status().ready, 5); + + // when + pool.api.add_invalid(&tx3); + pool.api.add_invalid(&tx4); + + // clear timer events if any + block_on(pool.maintain(block_event(0))); + block_on(notifier.next()); + + // then + // hash3 is now invalid + // hash4 is now invalid + assert_eq!(pool.status().ready, 3); + assert_eq!( + futures::executor::block_on_stream(watcher3).collect::>(), + vec![TransactionStatus::Ready, TransactionStatus::Invalid], + ); + assert_eq!( + futures::executor::block_on_stream(watcher4).collect::>(), + vec![TransactionStatus::Ready, TransactionStatus::Invalid], + ); + + // when + let header_hash = pool.api.push_block(1, vec![tx0, tx1, tx2]).hash(); + block_on(pool.maintain(block_event(1))); + + let event = ChainEvent::Finalized { + hash: header_hash.clone(), + }; + block_on(pool.maintain(event)); + + // then + // events for hash0 are: Ready, InBlock + // events for hash1 are: Ready, InBlock + // events for hash2 are: Ready, InBlock + assert_eq!( + futures::executor::block_on_stream(watcher0).collect::>(), + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock(header_hash.clone()), + TransactionStatus::Finalized(header_hash.clone()) + ], + ); + assert_eq!( + futures::executor::block_on_stream(watcher1).collect::>(), + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock(header_hash.clone()), + TransactionStatus::Finalized(header_hash.clone()) + ], + ); + assert_eq!( + futures::executor::block_on_stream(watcher2).collect::>(), + vec![ + TransactionStatus::Ready, + TransactionStatus::InBlock(header_hash.clone()), + TransactionStatus::Finalized(header_hash.clone()) + ], + ); } #[test] fn can_track_heap_size() { - let (pool, _guard, _notifier) = maintained_pool(); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 211))).expect("1. Imported"); - block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 212))).expect("1. Imported"); + let (pool, _guard, _notifier) = maintained_pool(); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 209))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 210))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 211))).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(0), SOURCE, uxt(Alice, 212))).expect("1. Imported"); - assert!(parity_util_mem::malloc_size(&pool) > 3000); + assert!(parity_util_mem::malloc_size(&pool) > 3000); } #[test] fn finalization() { - let xt = uxt(Alice, 209); - let api = TestApi::with_alice_nonce(209); - api.push_block(1, vec![]); - let (pool, _background, _) = BasicPool::new_test(api.into()); - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) - ).expect("1. Imported"); - pool.api.push_block(2, vec![xt.clone()]); - - let header = pool.api.chain().read().header_by_number.get(&2).cloned().unwrap(); - let event = ChainEvent::NewBlock { - id: BlockId::Hash(header.hash()), - is_new_best: true, - header: header.clone(), - retracted: vec![] - }; - block_on(pool.maintain(event)); - - let event = ChainEvent::Finalized { hash: header.hash() }; - block_on(pool.maintain(event)); - - let mut stream = futures::executor::block_on_stream(watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(header.hash()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(header.hash()))); - assert_eq!(stream.next(), None); + let xt = uxt(Alice, 209); + let api = TestApi::with_alice_nonce(209); + api.push_block(1, vec![]); + let (pool, _background, _) = BasicPool::new_test(api.into()); + let watcher = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) + .expect("1. Imported"); + pool.api.push_block(2, vec![xt.clone()]); + + let header = pool + .api + .chain() + .read() + .header_by_number + .get(&2) + .cloned() + .unwrap(); + let event = ChainEvent::NewBlock { + id: BlockId::Hash(header.hash()), + is_new_best: true, + header: header.clone(), + retracted: vec![], + }; + block_on(pool.maintain(event)); + + let event = ChainEvent::Finalized { + hash: header.hash(), + }; + block_on(pool.maintain(event)); + + let mut stream = futures::executor::block_on_stream(watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!( + stream.next(), + Some(TransactionStatus::InBlock(header.hash())) + ); + assert_eq!( + stream.next(), + Some(TransactionStatus::Finalized(header.hash())) + ); + assert_eq!(stream.next(), None); } #[test] fn fork_aware_finalization() { - let api = TestApi::empty(); - // starting block A1 (last finalized.) - api.push_block(1, vec![]); - - let (pool, _background, _) = BasicPool::new_test(api.into()); - let mut canon_watchers = vec![]; - - let from_alice = uxt(Alice, 1); - let from_dave = uxt(Dave, 2); - let from_bob = uxt(Bob, 1); - let from_charlie = uxt(Charlie, 1); - pool.api.increment_nonce(Alice.into()); - pool.api.increment_nonce(Dave.into()); - pool.api.increment_nonce(Charlie.into()); - pool.api.increment_nonce(Bob.into()); - - let from_dave_watcher; - let from_bob_watcher; - let b1; - let d1; - let c2; - let d2; - - - // block B1 - { - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone()) - ).expect("1. Imported"); - let header = pool.api.push_block(2, vec![from_alice.clone()]); - canon_watchers.push((watcher, header.hash())); - assert_eq!(pool.status().ready, 1); - - let event = ChainEvent::NewBlock { - id: BlockId::Number(2), - is_new_best: true, - header: header.clone(), - retracted: vec![], - }; - b1 = header.hash(); - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 0); - let event = ChainEvent::Finalized { hash: b1 }; - block_on(pool.maintain(event)); - } - - // block C2 - { - let header = pool.api.push_fork_block_with_parent(b1, vec![from_dave.clone()]); - from_dave_watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone()) - ).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); - let event = ChainEvent::NewBlock { - id: BlockId::Hash(header.hash()), - is_new_best: true, - header: header.clone(), - retracted: vec![] - }; - c2 = header.hash(); - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 0); - } - - // block D2 - { - from_bob_watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone()) - ).expect("1. Imported"); - assert_eq!(pool.status().ready, 1); - let header = pool.api.push_fork_block_with_parent(c2, vec![from_bob.clone()]); - - let event = ChainEvent::NewBlock { - id: BlockId::Hash(header.hash()), - is_new_best: true, - header: header.clone(), - retracted: vec![] - }; - d2 = header.hash(); - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 0); - } - - // block C1 - { - let watcher = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone()) - ).expect("1.Imported"); - assert_eq!(pool.status().ready, 1); - let header = pool.api.push_block(3, vec![from_charlie.clone()]); - - canon_watchers.push((watcher, header.hash())); - let event = ChainEvent::NewBlock { - id: BlockId::Number(3), - is_new_best: true, - header: header.clone(), - retracted: vec![c2, d2], - }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 2); - let event = ChainEvent::Finalized { hash: header.hash() }; - block_on(pool.maintain(event)); - } - - // block D1 - { - let xt = uxt(Eve, 0); - let w = block_on( - pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone()) - ).expect("1. Imported"); - assert_eq!(pool.status().ready, 3); - let header = pool.api.push_block(4, vec![xt.clone()]); - canon_watchers.push((w, header.hash())); - - let event = ChainEvent::NewBlock { - id: BlockId::Hash(header.hash()), - is_new_best: true, - header: header.clone(), - retracted: vec![] - }; - d1 = header.hash(); - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 2); - let event = ChainEvent::Finalized { hash: d1 }; - block_on(pool.maintain(event)); - } - - let e1; - - // block e1 - { - let header = pool.api.push_block(5, vec![from_dave, from_bob]); - e1 = header.hash(); - let event = ChainEvent::NewBlock { - id: BlockId::Hash(header.hash()), - is_new_best: true, - header: header.clone(), - retracted: vec![] - }; - block_on(pool.maintain(event)); - assert_eq!(pool.status().ready, 0); - block_on(pool.maintain(ChainEvent::Finalized { hash: e1 })); - } - - - for (canon_watcher, h) in canon_watchers { - let mut stream = futures::executor::block_on_stream(canon_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h.clone()))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(h))); - assert_eq!(stream.next(), None); - } - - - { - let mut stream= futures::executor::block_on_stream(from_dave_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2.clone()))); - assert_eq!(stream.next(), Some(TransactionStatus::Retracted(c2))); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); - assert_eq!(stream.next(), None); - } - - { - let mut stream= futures::executor::block_on_stream(from_bob_watcher); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2.clone()))); - assert_eq!(stream.next(), Some(TransactionStatus::Retracted(d2))); - assert_eq!(stream.next(), Some(TransactionStatus::Ready)); - assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); - assert_eq!(stream.next(), Some(TransactionStatus::Finalized(e1.clone()))); - assert_eq!(stream.next(), None); - } + let api = TestApi::empty(); + // starting block A1 (last finalized.) + api.push_block(1, vec![]); + + let (pool, _background, _) = BasicPool::new_test(api.into()); + let mut canon_watchers = vec![]; + + let from_alice = uxt(Alice, 1); + let from_dave = uxt(Dave, 2); + let from_bob = uxt(Bob, 1); + let from_charlie = uxt(Charlie, 1); + pool.api.increment_nonce(Alice.into()); + pool.api.increment_nonce(Dave.into()); + pool.api.increment_nonce(Charlie.into()); + pool.api.increment_nonce(Bob.into()); + + let from_dave_watcher; + let from_bob_watcher; + let b1; + let d1; + let c2; + let d2; + + // block B1 + { + let watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_alice.clone())) + .expect("1. Imported"); + let header = pool.api.push_block(2, vec![from_alice.clone()]); + canon_watchers.push((watcher, header.hash())); + assert_eq!(pool.status().ready, 1); + + let event = ChainEvent::NewBlock { + id: BlockId::Number(2), + is_new_best: true, + header: header.clone(), + retracted: vec![], + }; + b1 = header.hash(); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + let event = ChainEvent::Finalized { hash: b1 }; + block_on(pool.maintain(event)); + } + + // block C2 + { + let header = pool + .api + .push_fork_block_with_parent(b1, vec![from_dave.clone()]); + from_dave_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_dave.clone())) + .expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + let event = ChainEvent::NewBlock { + id: BlockId::Hash(header.hash()), + is_new_best: true, + header: header.clone(), + retracted: vec![], + }; + c2 = header.hash(); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + // block D2 + { + from_bob_watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_bob.clone())) + .expect("1. Imported"); + assert_eq!(pool.status().ready, 1); + let header = pool + .api + .push_fork_block_with_parent(c2, vec![from_bob.clone()]); + + let event = ChainEvent::NewBlock { + id: BlockId::Hash(header.hash()), + is_new_best: true, + header: header.clone(), + retracted: vec![], + }; + d2 = header.hash(); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + } + + // block C1 + { + let watcher = + block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, from_charlie.clone())) + .expect("1.Imported"); + assert_eq!(pool.status().ready, 1); + let header = pool.api.push_block(3, vec![from_charlie.clone()]); + + canon_watchers.push((watcher, header.hash())); + let event = ChainEvent::NewBlock { + id: BlockId::Number(3), + is_new_best: true, + header: header.clone(), + retracted: vec![c2, d2], + }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 2); + let event = ChainEvent::Finalized { + hash: header.hash(), + }; + block_on(pool.maintain(event)); + } + + // block D1 + { + let xt = uxt(Eve, 0); + let w = block_on(pool.submit_and_watch(&BlockId::number(1), SOURCE, xt.clone())) + .expect("1. Imported"); + assert_eq!(pool.status().ready, 3); + let header = pool.api.push_block(4, vec![xt.clone()]); + canon_watchers.push((w, header.hash())); + + let event = ChainEvent::NewBlock { + id: BlockId::Hash(header.hash()), + is_new_best: true, + header: header.clone(), + retracted: vec![], + }; + d1 = header.hash(); + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 2); + let event = ChainEvent::Finalized { hash: d1 }; + block_on(pool.maintain(event)); + } + + let e1; + + // block e1 + { + let header = pool.api.push_block(5, vec![from_dave, from_bob]); + e1 = header.hash(); + let event = ChainEvent::NewBlock { + id: BlockId::Hash(header.hash()), + is_new_best: true, + header: header.clone(), + retracted: vec![], + }; + block_on(pool.maintain(event)); + assert_eq!(pool.status().ready, 0); + block_on(pool.maintain(ChainEvent::Finalized { hash: e1 })); + } + + for (canon_watcher, h) in canon_watchers { + let mut stream = futures::executor::block_on_stream(canon_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(h.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Finalized(h))); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_dave_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(c2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(c2))); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); + assert_eq!( + stream.next(), + Some(TransactionStatus::Finalized(e1.clone())) + ); + assert_eq!(stream.next(), None); + } + + { + let mut stream = futures::executor::block_on_stream(from_bob_watcher); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(d2.clone()))); + assert_eq!(stream.next(), Some(TransactionStatus::Retracted(d2))); + assert_eq!(stream.next(), Some(TransactionStatus::Ready)); + assert_eq!(stream.next(), Some(TransactionStatus::InBlock(e1))); + assert_eq!( + stream.next(), + Some(TransactionStatus::Finalized(e1.clone())) + ); + assert_eq!(stream.next(), None); + } } #[test] fn ready_set_should_not_resolve_before_block_update() { - let (pool, _guard, _notifier) = maintained_pool(); - let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + let (pool, _guard, _notifier) = maintained_pool(); + let xt1 = uxt(Alice, 209); + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); - assert!(pool.ready_at(1).now_or_never().is_none()); + assert!(pool.ready_at(1).now_or_never().is_none()); } #[test] fn ready_set_should_resolve_after_block_update() { - let (pool, _guard, _notifier) = maintained_pool(); - pool.api.push_block(1, vec![]); + let (pool, _guard, _notifier) = maintained_pool(); + pool.api.push_block(1, vec![]); - let xt1 = uxt(Alice, 209); + let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); - block_on(pool.maintain(block_event(1))); + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.maintain(block_event(1))); - assert!(pool.ready_at(1).now_or_never().is_some()); + assert!(pool.ready_at(1).now_or_never().is_some()); } #[test] fn ready_set_should_eventually_resolve_when_block_update_arrives() { - let (pool, _guard, _notifier) = maintained_pool(); - pool.api.push_block(1, vec![]); + let (pool, _guard, _notifier) = maintained_pool(); + pool.api.push_block(1, vec![]); - let xt1 = uxt(Alice, 209); + let xt1 = uxt(Alice, 209); - block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); + block_on(pool.submit_one(&BlockId::number(1), SOURCE, xt1.clone())).expect("1. Imported"); - let noop_waker = futures::task::noop_waker(); - let mut context = futures::task::Context::from_waker(&noop_waker); + let noop_waker = futures::task::noop_waker(); + let mut context = futures::task::Context::from_waker(&noop_waker); - let mut ready_set_future = pool.ready_at(1); - if let Poll::Ready(_) = ready_set_future.poll_unpin(&mut context) { - panic!("Ready set should not be ready before block update!"); - } + let mut ready_set_future = pool.ready_at(1); + if let Poll::Ready(_) = ready_set_future.poll_unpin(&mut context) { + panic!("Ready set should not be ready before block update!"); + } - block_on(pool.maintain(block_event(1))); + block_on(pool.maintain(block_event(1))); - match ready_set_future.poll_unpin(&mut context) { - Poll::Pending => { - panic!("Ready set should become ready after block update!"); - }, - Poll::Ready(iterator) => { - let data = iterator.collect::>(); - assert_eq!(data.len(), 1); - } - } + match ready_set_future.poll_unpin(&mut context) { + Poll::Pending => { + panic!("Ready set should become ready after block update!"); + } + Poll::Ready(iterator) => { + let data = iterator.collect::>(); + assert_eq!(data.len(), 1); + } + } } #[test] fn should_not_accept_old_signatures() { - use std::convert::TryFrom; - - let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new_test(Arc::new(FullChainApi::new(client))).0 - ); - - let transfer = Transfer { - from: Alice.into(), - to: Bob.into(), - nonce: 0, - amount: 1, - }; - let _bytes: sp_core::sr25519::Signature = transfer.using_encoded(|e| Alice.sign(e)).into(); - - // generated with schnorrkel 0.1.1 from `_bytes` - let old_singature = sp_core::sr25519::Signature::try_from(&hex::decode( + use std::convert::TryFrom; + + let client = Arc::new(substrate_test_runtime_client::new()); + let pool = Arc::new(BasicPool::new_test(Arc::new(FullChainApi::new(client))).0); + + let transfer = Transfer { + from: Alice.into(), + to: Bob.into(), + nonce: 0, + amount: 1, + }; + let _bytes: sp_core::sr25519::Signature = transfer.using_encoded(|e| Alice.sign(e)).into(); + + // generated with schnorrkel 0.1.1 from `_bytes` + let old_singature = sp_core::sr25519::Signature::try_from(&hex::decode( "c427eb672e8c441c86d31f1a81b22b43102058e9ce237cabe9897ea5099ffd426cd1c6a1f4f2869c3df57901d36bedcb295657adb3a4355add86ed234eb83108" ).expect("hex invalid")[..]).expect("signature construction failed"); - let xt = Extrinsic::Transfer { transfer, signature: old_singature, exhaust_resources_when_not_first: false }; - - assert_matches::assert_matches!( - block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())), - Err(error::Error::Pool( - sp_transaction_pool::error::Error::InvalidTransaction(InvalidTransaction::BadProof) - )), - "Should be invalid transaction with bad proof", - ); + let xt = Extrinsic::Transfer { + transfer, + signature: old_singature, + exhaust_resources_when_not_first: false, + }; + + assert_matches::assert_matches!( + block_on(pool.submit_one(&BlockId::number(0), SOURCE, xt.clone())), + Err(error::Error::Pool( + sp_transaction_pool::error::Error::InvalidTransaction(InvalidTransaction::BadProof) + )), + "Should be invalid transaction with bad proof", + ); } diff --git a/frame/assets/src/lib.rs b/frame/assets/src/lib.rs index 15726c9bcb..0f399d513c 100644 --- a/frame/assets/src/lib.rs +++ b/frame/assets/src/lib.rs @@ -132,272 +132,293 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{Parameter, decl_module, decl_event, decl_storage, decl_error, ensure}; use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; -use sp_runtime::traits::{Member, AtLeast32Bit, Zero, StaticLookup}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure, Parameter}; use frame_system::{self as system, ensure_signed}; use sp_runtime::traits::One; +use sp_runtime::traits::{AtLeast32Bit, Member, StaticLookup, Zero}; /// The module configuration trait. pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// The units in which we record balances. - type Balance: Member + Parameter + AtLeast32Bit + Default + Copy; + /// The units in which we record balances. + type Balance: Member + Parameter + AtLeast32Bit + Default + Copy; - /// The arithmetic type of asset identifier. - type AssetId: Parameter + AtLeast32Bit + Default + Copy; + /// The arithmetic type of asset identifier. + type AssetId: Parameter + AtLeast32Bit + Default + Copy; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - /// Issue a new class of fungible assets. There are, and will only ever be, `total` - /// such assets and they'll all belong to the `origin` initially. It will have an - /// identifier `AssetId` instance: this will be specified in the `Issued` event. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn issue(origin, #[compact] total: T::Balance) { - let origin = ensure_signed(origin)?; - - let id = Self::next_asset_id(); - >::mutate(|id| *id += One::one()); - - >::insert((id, &origin), total); - >::insert(id, total); - - Self::deposit_event(RawEvent::Issued(id, origin, total)); - } - - /// Move some assets from one holder to another. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn transfer(origin, - #[compact] id: T::AssetId, - target: ::Source, - #[compact] amount: T::Balance - ) { - let origin = ensure_signed(origin)?; - let origin_account = (id, origin.clone()); - let origin_balance = >::get(&origin_account); - let target = T::Lookup::lookup(target)?; - ensure!(!amount.is_zero(), Error::::AmountZero); - ensure!(origin_balance >= amount, Error::::BalanceLow); - - Self::deposit_event(RawEvent::Transferred(id, origin, target.clone(), amount)); - >::insert(origin_account, origin_balance - amount); - >::mutate((id, target), |balance| *balance += amount); - } - - /// Destroy any assets of `id` owned by `origin`. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn destroy(origin, #[compact] id: T::AssetId) { - let origin = ensure_signed(origin)?; - let balance = >::take((id, &origin)); - ensure!(!balance.is_zero(), Error::::BalanceZero); - - >::mutate(id, |total_supply| *total_supply -= balance); - Self::deposit_event(RawEvent::Destroyed(id, origin, balance)); - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + /// Issue a new class of fungible assets. There are, and will only ever be, `total` + /// such assets and they'll all belong to the `origin` initially. It will have an + /// identifier `AssetId` instance: this will be specified in the `Issued` event. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn issue(origin, #[compact] total: T::Balance) { + let origin = ensure_signed(origin)?; + + let id = Self::next_asset_id(); + >::mutate(|id| *id += One::one()); + + >::insert((id, &origin), total); + >::insert(id, total); + + Self::deposit_event(RawEvent::Issued(id, origin, total)); + } + + /// Move some assets from one holder to another. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn transfer(origin, + #[compact] id: T::AssetId, + target: ::Source, + #[compact] amount: T::Balance + ) { + let origin = ensure_signed(origin)?; + let origin_account = (id, origin.clone()); + let origin_balance = >::get(&origin_account); + let target = T::Lookup::lookup(target)?; + ensure!(!amount.is_zero(), Error::::AmountZero); + ensure!(origin_balance >= amount, Error::::BalanceLow); + + Self::deposit_event(RawEvent::Transferred(id, origin, target.clone(), amount)); + >::insert(origin_account, origin_balance - amount); + >::mutate((id, target), |balance| *balance += amount); + } + + /// Destroy any assets of `id` owned by `origin`. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn destroy(origin, #[compact] id: T::AssetId) { + let origin = ensure_signed(origin)?; + let balance = >::take((id, &origin)); + ensure!(!balance.is_zero(), Error::::BalanceZero); + + >::mutate(id, |total_supply| *total_supply -= balance); + Self::deposit_event(RawEvent::Destroyed(id, origin, balance)); + } + } } decl_event! { - pub enum Event where - ::AccountId, - ::Balance, - ::AssetId, - { - /// Some assets were issued. - Issued(AssetId, AccountId, Balance), - /// Some assets were transferred. - Transferred(AssetId, AccountId, AccountId, Balance), - /// Some assets were destroyed. - Destroyed(AssetId, AccountId, Balance), - } + pub enum Event where + ::AccountId, + ::Balance, + ::AssetId, + { + /// Some assets were issued. + Issued(AssetId, AccountId, Balance), + /// Some assets were transferred. + Transferred(AssetId, AccountId, AccountId, Balance), + /// Some assets were destroyed. + Destroyed(AssetId, AccountId, Balance), + } } decl_error! { - pub enum Error for Module { - /// Transfer amount should be non-zero - AmountZero, - /// Account balance must be greater than or equal to the transfer amount - BalanceLow, - /// Balance should be non-zero - BalanceZero, - } + pub enum Error for Module { + /// Transfer amount should be non-zero + AmountZero, + /// Account balance must be greater than or equal to the transfer amount + BalanceLow, + /// Balance should be non-zero + BalanceZero, + } } decl_storage! { - trait Store for Module as Assets { - /// The number of units of assets held by any given account. - Balances: map hasher(blake2_128_concat) (T::AssetId, T::AccountId) => T::Balance; - /// The next asset identifier up for grabs. - NextAssetId get(fn next_asset_id): T::AssetId; - /// The total unit supply of an asset. - TotalSupply: map hasher(twox_64_concat) T::AssetId => T::Balance; - } + trait Store for Module as Assets { + /// The number of units of assets held by any given account. + Balances: map hasher(blake2_128_concat) (T::AssetId, T::AccountId) => T::Balance; + /// The next asset identifier up for grabs. + NextAssetId get(fn next_asset_id): T::AssetId; + /// The total unit supply of an asset. + TotalSupply: map hasher(twox_64_concat) T::AssetId => T::Balance; + } } // The main implementation block for the module. impl Module { - // Public immutables + // Public immutables - /// Get the asset `id` balance of `who`. - pub fn balance(id: T::AssetId, who: T::AccountId) -> T::Balance { - >::get((id, who)) - } + /// Get the asset `id` balance of `who`. + pub fn balance(id: T::AssetId, who: T::AccountId) -> T::Balance { + >::get((id, who)) + } - /// Get the total supply of an asset `id`. - pub fn total_supply(id: T::AssetId) -> T::Balance { - >::get(id) - } + /// Get the total supply of an asset `id`. + pub fn total_supply(id: T::AssetId) -> T::Balance { + >::get(id) + } } #[cfg(test)] mod tests { - use super::*; - - use frame_support::{impl_outer_origin, assert_ok, assert_noop, parameter_types, weights::Weight}; - use sp_core::H256; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type Call = (); - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - impl Trait for Test { - type Event = (); - type Balance = u64; - type AssetId = u32; - } - type Assets = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() - } - - #[test] - fn issuing_asset_units_to_issuer_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - }); - } - - #[test] - fn querying_total_supply_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::transfer(Origin::signed(2), 0, 3, 31)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 19); - assert_eq!(Assets::balance(0, 3), 31); - assert_ok!(Assets::destroy(Origin::signed(3), 0)); - assert_eq!(Assets::total_supply(0), 69); - }); - } - - #[test] - fn transferring_amount_above_available_balance_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - }); - } - - #[test] - fn transferring_amount_more_than_available_balance_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); - assert_eq!(Assets::balance(0, 1), 50); - assert_eq!(Assets::balance(0, 2), 50); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); - assert_eq!(Assets::balance(0, 1), 0); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 1, 50), Error::::BalanceLow); - }); - } - - #[test] - fn transferring_less_than_one_unit_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 0), Error::::AmountZero); - }); - } - - #[test] - fn transferring_more_units_than_total_supply_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_noop!(Assets::transfer(Origin::signed(1), 0, 2, 101), Error::::BalanceLow); - }); - } - - #[test] - fn destroying_asset_balance_with_positive_balance_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 1), 100); - assert_ok!(Assets::destroy(Origin::signed(1), 0)); - }); - } - - #[test] - fn destroying_asset_balance_with_zero_balance_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(Assets::issue(Origin::signed(1), 100)); - assert_eq!(Assets::balance(0, 2), 0); - assert_noop!(Assets::destroy(Origin::signed(2), 0), Error::::BalanceZero); - }); - } + use super::*; + + use frame_support::{ + assert_noop, assert_ok, impl_outer_origin, parameter_types, weights::Weight, + }; + use sp_core::H256; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + // For testing the pallet, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of pallets we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type Call = (); + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + impl Trait for Test { + type Event = (); + type Balance = u64; + type AssetId = u32; + } + type Assets = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> sp_io::TestExternalities { + frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into() + } + + #[test] + fn issuing_asset_units_to_issuer_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + }); + } + + #[test] + fn querying_total_supply_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + assert_ok!(Assets::transfer(Origin::signed(2), 0, 3, 31)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 19); + assert_eq!(Assets::balance(0, 3), 31); + assert_ok!(Assets::destroy(Origin::signed(3), 0)); + assert_eq!(Assets::total_supply(0), 69); + }); + } + + #[test] + fn transferring_amount_above_available_balance_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + }); + } + + #[test] + fn transferring_amount_more_than_available_balance_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::transfer(Origin::signed(1), 0, 2, 50)); + assert_eq!(Assets::balance(0, 1), 50); + assert_eq!(Assets::balance(0, 2), 50); + assert_ok!(Assets::destroy(Origin::signed(1), 0)); + assert_eq!(Assets::balance(0, 1), 0); + assert_noop!( + Assets::transfer(Origin::signed(1), 0, 1, 50), + Error::::BalanceLow + ); + }); + } + + #[test] + fn transferring_less_than_one_unit_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_noop!( + Assets::transfer(Origin::signed(1), 0, 2, 0), + Error::::AmountZero + ); + }); + } + + #[test] + fn transferring_more_units_than_total_supply_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_noop!( + Assets::transfer(Origin::signed(1), 0, 2, 101), + Error::::BalanceLow + ); + }); + } + + #[test] + fn destroying_asset_balance_with_positive_balance_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 1), 100); + assert_ok!(Assets::destroy(Origin::signed(1), 0)); + }); + } + + #[test] + fn destroying_asset_balance_with_zero_balance_should_not_work() { + new_test_ext().execute_with(|| { + assert_ok!(Assets::issue(Origin::signed(1), 100)); + assert_eq!(Assets::balance(0, 2), 0); + assert_noop!( + Assets::destroy(Origin::signed(2), 0), + Error::::BalanceZero + ); + }); + } } diff --git a/frame/aura/src/lib.rs b/frame/aura/src/lib.rs index e2aafde9ef..df9cf9ad1b 100644 --- a/frame/aura/src/lib.rs +++ b/frame/aura/src/lib.rs @@ -45,187 +45,202 @@ use pallet_timestamp; -use sp_std::{result, prelude::*}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use frame_support::{ - decl_storage, decl_module, Parameter, traits::{Get, FindAuthor}, - ConsensusEngineId, + decl_module, decl_storage, + traits::{FindAuthor, Get}, + ConsensusEngineId, Parameter, }; +use sp_consensus_aura::{ + inherents::{AuraInherentData, INHERENT_IDENTIFIER}, + AuthorityIndex, ConsensusLog, AURA_ENGINE_ID, +}; +use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}; use sp_runtime::{ - RuntimeAppPublic, - traits::{SaturatedConversion, Saturating, Zero, Member, IsMember}, generic::DigestItem, + generic::DigestItem, + traits::{IsMember, Member, SaturatedConversion, Saturating, Zero}, + RuntimeAppPublic, }; +use sp_std::{prelude::*, result}; use sp_timestamp::OnTimestampSet; -use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; -use sp_consensus_aura::{ - AURA_ENGINE_ID, ConsensusLog, AuthorityIndex, - inherents::{INHERENT_IDENTIFIER, AuraInherentData}, -}; mod mock; mod tests; pub trait Trait: pallet_timestamp::Trait { - /// The identifier type for an authority. - type AuthorityId: Member + Parameter + RuntimeAppPublic + Default; + /// The identifier type for an authority. + type AuthorityId: Member + Parameter + RuntimeAppPublic + Default; } decl_storage! { - trait Store for Module as Aura { - /// The last timestamp. - LastTimestamp get(fn last) build(|_| 0.into()): T::Moment; - - /// The current authorities - pub Authorities get(fn authorities): Vec; - } - add_extra_genesis { - config(authorities): Vec; - build(|config| Module::::initialize_authorities(&config.authorities)) - } + trait Store for Module as Aura { + /// The last timestamp. + LastTimestamp get(fn last) build(|_| 0.into()): T::Moment; + + /// The current authorities + pub Authorities get(fn authorities): Vec; + } + add_extra_genesis { + config(authorities): Vec; + build(|config| Module::::initialize_authorities(&config.authorities)) + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { } + pub struct Module for enum Call where origin: T::Origin { } } impl Module { - fn change_authorities(new: Vec) { - >::put(&new); - - let log: DigestItem = DigestItem::Consensus( - AURA_ENGINE_ID, - ConsensusLog::AuthoritiesChange(new).encode() - ); - >::deposit_log(log.into()); - } - - fn initialize_authorities(authorities: &[T::AuthorityId]) { - if !authorities.is_empty() { - assert!(>::get().is_empty(), "Authorities are already initialized!"); - >::put(authorities); - } - } + fn change_authorities(new: Vec) { + >::put(&new); + + let log: DigestItem = DigestItem::Consensus( + AURA_ENGINE_ID, + ConsensusLog::AuthoritiesChange(new).encode(), + ); + >::deposit_log(log.into()); + } + + fn initialize_authorities(authorities: &[T::AuthorityId]) { + if !authorities.is_empty() { + assert!( + >::get().is_empty(), + "Authorities are already initialized!" + ); + >::put(authorities); + } + } } impl sp_runtime::BoundToRuntimeAppPublic for Module { - type Public = T::AuthorityId; + type Public = T::AuthorityId; } impl pallet_session::OneSessionHandler for Module { - type Key = T::AuthorityId; - - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator - { - let authorities = validators.map(|(_, k)| k).collect::>(); - Self::initialize_authorities(&authorities); - } - - fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) - where I: Iterator - { - // instant changes - if changed { - let next_authorities = validators.map(|(_, k)| k).collect::>(); - let last_authorities = >::authorities(); - if next_authorities != last_authorities { - Self::change_authorities(next_authorities); - } - } - } - - fn on_disabled(i: usize) { - let log: DigestItem = DigestItem::Consensus( - AURA_ENGINE_ID, - ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), - ); - - >::deposit_log(log.into()); - } + type Key = T::AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_, k)| k).collect::>(); + Self::initialize_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) + where + I: Iterator, + { + // instant changes + if changed { + let next_authorities = validators.map(|(_, k)| k).collect::>(); + let last_authorities = >::authorities(); + if next_authorities != last_authorities { + Self::change_authorities(next_authorities); + } + } + } + + fn on_disabled(i: usize) { + let log: DigestItem = DigestItem::Consensus( + AURA_ENGINE_ID, + ConsensusLog::::OnDisabled(i as AuthorityIndex).encode(), + ); + + >::deposit_log(log.into()); + } } impl FindAuthor for Module { - fn find_author<'a, I>(digests: I) -> Option where - I: 'a + IntoIterator - { - for (id, mut data) in digests.into_iter() { - if id == AURA_ENGINE_ID { - if let Ok(slot_num) = u64::decode(&mut data) { - let author_index = slot_num % Self::authorities().len() as u64; - return Some(author_index as u32) - } - } - } - - None - } + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, + { + for (id, mut data) in digests.into_iter() { + if id == AURA_ENGINE_ID { + if let Ok(slot_num) = u64::decode(&mut data) { + let author_index = slot_num % Self::authorities().len() as u64; + return Some(author_index as u32); + } + } + } + + None + } } impl IsMember for Module { - fn is_member(authority_id: &T::AuthorityId) -> bool { - Self::authorities() - .iter() - .any(|id| id == authority_id) - } + fn is_member(authority_id: &T::AuthorityId) -> bool { + Self::authorities().iter().any(|id| id == authority_id) + } } impl Module { - /// Determine the Aura slot-duration based on the Timestamp module configuration. - pub fn slot_duration() -> T::Moment { - // we double the minimum block-period so each author can always propose within - // the majority of its slot. - ::MinimumPeriod::get().saturating_mul(2.into()) - } - - fn on_timestamp_set(now: T::Moment, slot_duration: T::Moment) { - let last = Self::last(); - ::LastTimestamp::put(now); - - if last.is_zero() { - return; - } - - assert!(!slot_duration.is_zero(), "Aura slot duration cannot be zero."); - - let last_slot = last / slot_duration; - let cur_slot = now / slot_duration; - - assert!(last_slot < cur_slot, "Only one block may be authored per slot."); - - // TODO [#3398] Generate offence report for all authorities that skipped their slots. - } + /// Determine the Aura slot-duration based on the Timestamp module configuration. + pub fn slot_duration() -> T::Moment { + // we double the minimum block-period so each author can always propose within + // the majority of its slot. + ::MinimumPeriod::get().saturating_mul(2.into()) + } + + fn on_timestamp_set(now: T::Moment, slot_duration: T::Moment) { + let last = Self::last(); + ::LastTimestamp::put(now); + + if last.is_zero() { + return; + } + + assert!( + !slot_duration.is_zero(), + "Aura slot duration cannot be zero." + ); + + let last_slot = last / slot_duration; + let cur_slot = now / slot_duration; + + assert!( + last_slot < cur_slot, + "Only one block may be authored per slot." + ); + + // TODO [#3398] Generate offence report for all authorities that skipped their slots. + } } impl OnTimestampSet for Module { - fn on_timestamp_set(moment: T::Moment) { - Self::on_timestamp_set(moment, Self::slot_duration()) - } + fn on_timestamp_set(moment: T::Moment) { + Self::on_timestamp_set(moment, Self::slot_duration()) + } } impl ProvideInherent for Module { - type Call = pallet_timestamp::Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_: &InherentData) -> Option { - None - } - - /// Verify the validity of the inherent using the timestamp. - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - let timestamp = match call { - pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), - _ => return Ok(()), - }; - - let timestamp_based_slot = timestamp / Self::slot_duration(); - - let seal_slot = data.aura_inherent_data()?.saturated_into(); - - if timestamp_based_slot == seal_slot { - Ok(()) - } else { - Err(sp_inherents::Error::from("timestamp set in block doesn't match slot in seal").into()) - } - } + type Call = pallet_timestamp::Call; + type Error = MakeFatalError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_: &InherentData) -> Option { + None + } + + /// Verify the validity of the inherent using the timestamp. + fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + let timestamp = match call { + pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), + _ => return Ok(()), + }; + + let timestamp_based_slot = timestamp / Self::slot_duration(); + + let seal_slot = data.aura_inherent_data()?.saturated_into(); + + if timestamp_based_slot == seal_slot { + Ok(()) + } else { + Err( + sp_inherents::Error::from("timestamp set in block doesn't match slot in seal") + .into(), + ) + } + } } diff --git a/frame/aura/src/mock.rs b/frame/aura/src/mock.rs index 2716806b6e..2ed02a8fa3 100644 --- a/frame/aura/src/mock.rs +++ b/frame/aura/src/mock.rs @@ -18,18 +18,19 @@ #![cfg(test)] -use crate::{Trait, Module, GenesisConfig}; +use crate::{GenesisConfig, Module, Trait}; +use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; use sp_consensus_aura::ed25519::AuthorityId; +use sp_core::H256; +use sp_io; use sp_runtime::{ - traits::IdentityLookup, Perbill, - testing::{Header, UintAuthorityId}, + testing::{Header, UintAuthorityId}, + traits::IdentityLookup, + Perbill, }; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; -use sp_io; -use sp_core::H256; -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. @@ -37,52 +38,59 @@ impl_outer_origin!{ pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const MinimumPeriod: u64 = 1; + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const MinimumPeriod: u64 = 1; } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = Aura; - type MinimumPeriod = MinimumPeriod; + type Moment = u64; + type OnTimestampSet = Aura; + type MinimumPeriod = MinimumPeriod; } impl Trait for Test { - type AuthorityId = AuthorityId; + type AuthorityId = AuthorityId; } pub fn new_test_ext(authorities: Vec) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig::{ - authorities: authorities.into_iter().map(|a| UintAuthorityId(a).to_public_key()).collect(), - }.assimilate_storage(&mut t).unwrap(); - t.into() + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + GenesisConfig:: { + authorities: authorities + .into_iter() + .map(|a| UintAuthorityId(a).to_public_key()) + .collect(), + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() } pub type Aura = Module; diff --git a/frame/aura/src/tests.rs b/frame/aura/src/tests.rs index a7cb5503c4..7ba2b21e43 100644 --- a/frame/aura/src/tests.rs +++ b/frame/aura/src/tests.rs @@ -18,12 +18,12 @@ #![cfg(test)] -use crate::mock::{Aura, new_test_ext}; +use crate::mock::{new_test_ext, Aura}; #[test] fn initial_values() { - new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { - assert_eq!(Aura::last(), 0u64); - assert_eq!(Aura::authorities().len(), 4); - }); + new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { + assert_eq!(Aura::last(), 0u64); + assert_eq!(Aura::authorities().len(), 4); + }); } diff --git a/frame/authority-discovery/src/lib.rs b/frame/authority-discovery/src/lib.rs index b3edce4818..f72f734492 100644 --- a/frame/authority-discovery/src/lib.rs +++ b/frame/authority-discovery/src/lib.rs @@ -22,227 +22,240 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; use frame_support::{decl_module, decl_storage}; use sp_authority_discovery::AuthorityId; +use sp_std::prelude::*; /// The module's config trait. pub trait Trait: frame_system::Trait + pallet_session::Trait {} decl_storage! { - trait Store for Module as AuthorityDiscovery { - /// Keys of the current authority set. - Keys get(fn keys): Vec; - } - add_extra_genesis { - config(keys): Vec; - build(|config| Module::::initialize_keys(&config.keys)) - } + trait Store for Module as AuthorityDiscovery { + /// Keys of the current authority set. + Keys get(fn keys): Vec; + } + add_extra_genesis { + config(keys): Vec; + build(|config| Module::::initialize_keys(&config.keys)) + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - } + pub struct Module for enum Call where origin: T::Origin { + } } impl Module { - /// Retrieve authority identifiers of the current authority set. - pub fn authorities() -> Vec { - Keys::get() - } - - fn initialize_keys(keys: &[AuthorityId]) { - if !keys.is_empty() { - assert!(Keys::get().is_empty(), "Keys are already initialized!"); - Keys::put(keys); - } - } + /// Retrieve authority identifiers of the current authority set. + pub fn authorities() -> Vec { + Keys::get() + } + + fn initialize_keys(keys: &[AuthorityId]) { + if !keys.is_empty() { + assert!(Keys::get().is_empty(), "Keys are already initialized!"); + Keys::put(keys); + } + } } impl sp_runtime::BoundToRuntimeAppPublic for Module { - type Public = AuthorityId; + type Public = AuthorityId; } impl pallet_session::OneSessionHandler for Module { - type Key = AuthorityId; - - fn on_genesis_session<'a, I: 'a>(authorities: I) - where - I: Iterator, - { - let keys = authorities.map(|x| x.1).collect::>(); - Self::initialize_keys(&keys); - } - - fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) - where - I: Iterator, - { - // Remember who the authorities are for the new session. - if changed { - Keys::put(validators.map(|x| x.1).collect::>()); - } - } - - fn on_disabled(_i: usize) { - // ignore - } + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(authorities: I) + where + I: Iterator, + { + let keys = authorities.map(|x| x.1).collect::>(); + Self::initialize_keys(&keys); + } + + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) + where + I: Iterator, + { + // Remember who the authorities are for the new session. + if changed { + Keys::put(validators.map(|x| x.1).collect::>()); + } + } + + fn on_disabled(_i: usize) { + // ignore + } } #[cfg(test)] mod tests { - use super::*; - use sp_authority_discovery::{AuthorityPair}; - use sp_application_crypto::Pair; - use sp_core::{crypto::key_types, H256}; - use sp_io::TestExternalities; - use sp_runtime::{ - testing::{Header, UintAuthorityId}, traits::{ConvertInto, IdentityLookup, OpaqueKeys}, - Perbill, KeyTypeId, - }; - use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; - - type AuthorityDiscovery = Module; - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - impl Trait for Test {} - - parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); - } - - impl pallet_session::Trait for Test { - type SessionManager = (); - type Keys = UintAuthorityId; - type ShouldEndSession = pallet_session::PeriodicSessions; - type SessionHandler = TestSessionHandler; - type Event = (); - type ValidatorId = AuthorityId; - type ValidatorIdOf = ConvertInto; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type NextSessionRotation = pallet_session::PeriodicSessions; - } - - impl pallet_session::historical::Trait for Test { - type FullIdentification = (); - type FullIdentificationOf = (); - } - - pub type BlockNumber = u64; - - parameter_types! { - pub const Period: BlockNumber = 1; - pub const Offset: BlockNumber = 0; - pub const UncleGenerations: u64 = 0; - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = BlockNumber; - type Call = (); - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = AuthorityId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - pub struct TestSessionHandler; - impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [KeyTypeId] = &[key_types::DUMMY]; - - fn on_new_session( - _changed: bool, - _validators: &[(AuthorityId, Ks)], - _queued_validators: &[(AuthorityId, Ks)], - ) { - } - - fn on_disabled(_validator_index: usize) {} - - fn on_genesis_session(_validators: &[(AuthorityId, Ks)]) {} - } - - #[test] - fn authorities_returns_current_authority_set() { - // The whole authority discovery module ignores account ids, but we still need it for - // `pallet_session::OneSessionHandler::on_new_session`, thus its safe to use the same value everywhere. - let account_id = AuthorityPair::from_seed_slice(vec![10; 32].as_ref()).unwrap().public(); - - let first_authorities: Vec = vec![0, 1].into_iter() - .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) - .map(AuthorityId::from) - .collect(); - - let second_authorities: Vec = vec![2, 3].into_iter() - .map(|i| AuthorityPair::from_seed_slice(vec![i; 32].as_ref()).unwrap().public()) - .map(AuthorityId::from) - .collect(); - - // Needed for `pallet_session::OneSessionHandler::on_new_session`. - let second_authorities_and_account_ids: Vec<(&AuthorityId, AuthorityId)> = second_authorities.clone() - .into_iter() - .map(|id| (&account_id, id)) - .collect(); - - // Build genesis. - let mut t = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - - GenesisConfig { - keys: vec![], - } - .assimilate_storage::(&mut t) - .unwrap(); - - // Create externalities. - let mut externalities = TestExternalities::new(t); - - externalities.execute_with(|| { - use pallet_session::OneSessionHandler; - - AuthorityDiscovery::on_genesis_session( - first_authorities.iter().map(|id| (id, id.clone())) - ); - assert_eq!(first_authorities, AuthorityDiscovery::authorities()); - - // When `changed` set to false, the authority set should not be updated. - AuthorityDiscovery::on_new_session( - false, - second_authorities_and_account_ids.clone().into_iter(), - vec![].into_iter(), - ); - assert_eq!(first_authorities, AuthorityDiscovery::authorities()); - - // When `changed` set to true, the authority set should be updated. - AuthorityDiscovery::on_new_session( - true, - second_authorities_and_account_ids.into_iter(), - vec![].into_iter(), - ); - assert_eq!(second_authorities, AuthorityDiscovery::authorities()); - }); - } + use super::*; + use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; + use sp_application_crypto::Pair; + use sp_authority_discovery::AuthorityPair; + use sp_core::{crypto::key_types, H256}; + use sp_io::TestExternalities; + use sp_runtime::{ + testing::{Header, UintAuthorityId}, + traits::{ConvertInto, IdentityLookup, OpaqueKeys}, + KeyTypeId, Perbill, + }; + + type AuthorityDiscovery = Module; + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + impl Trait for Test {} + + parameter_types! { + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); + } + + impl pallet_session::Trait for Test { + type SessionManager = (); + type Keys = UintAuthorityId; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionHandler = TestSessionHandler; + type Event = (); + type ValidatorId = AuthorityId; + type ValidatorIdOf = ConvertInto; + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = pallet_session::PeriodicSessions; + } + + impl pallet_session::historical::Trait for Test { + type FullIdentification = (); + type FullIdentificationOf = (); + } + + pub type BlockNumber = u64; + + parameter_types! { + pub const Period: BlockNumber = 1; + pub const Offset: BlockNumber = 0; + pub const UncleGenerations: u64 = 0; + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = BlockNumber; + type Call = (); + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AuthorityId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + pub struct TestSessionHandler; + impl pallet_session::SessionHandler for TestSessionHandler { + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[key_types::DUMMY]; + + fn on_new_session( + _changed: bool, + _validators: &[(AuthorityId, Ks)], + _queued_validators: &[(AuthorityId, Ks)], + ) { + } + + fn on_disabled(_validator_index: usize) {} + + fn on_genesis_session(_validators: &[(AuthorityId, Ks)]) {} + } + + #[test] + fn authorities_returns_current_authority_set() { + // The whole authority discovery module ignores account ids, but we still need it for + // `pallet_session::OneSessionHandler::on_new_session`, thus its safe to use the same value everywhere. + let account_id = AuthorityPair::from_seed_slice(vec![10; 32].as_ref()) + .unwrap() + .public(); + + let first_authorities: Vec = vec![0, 1] + .into_iter() + .map(|i| { + AuthorityPair::from_seed_slice(vec![i; 32].as_ref()) + .unwrap() + .public() + }) + .map(AuthorityId::from) + .collect(); + + let second_authorities: Vec = vec![2, 3] + .into_iter() + .map(|i| { + AuthorityPair::from_seed_slice(vec![i; 32].as_ref()) + .unwrap() + .public() + }) + .map(AuthorityId::from) + .collect(); + + // Needed for `pallet_session::OneSessionHandler::on_new_session`. + let second_authorities_and_account_ids: Vec<(&AuthorityId, AuthorityId)> = + second_authorities + .clone() + .into_iter() + .map(|id| (&account_id, id)) + .collect(); + + // Build genesis. + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + GenesisConfig { keys: vec![] } + .assimilate_storage::(&mut t) + .unwrap(); + + // Create externalities. + let mut externalities = TestExternalities::new(t); + + externalities.execute_with(|| { + use pallet_session::OneSessionHandler; + + AuthorityDiscovery::on_genesis_session( + first_authorities.iter().map(|id| (id, id.clone())), + ); + assert_eq!(first_authorities, AuthorityDiscovery::authorities()); + + // When `changed` set to false, the authority set should not be updated. + AuthorityDiscovery::on_new_session( + false, + second_authorities_and_account_ids.clone().into_iter(), + vec![].into_iter(), + ); + assert_eq!(first_authorities, AuthorityDiscovery::authorities()); + + // When `changed` set to true, the authority set should be updated. + AuthorityDiscovery::on_new_session( + true, + second_authorities_and_account_ids.into_iter(), + vec![].into_iter(), + ); + assert_eq!(second_authorities, AuthorityDiscovery::authorities()); + }); + } } diff --git a/frame/authorship/src/lib.rs b/frame/authorship/src/lib.rs index fac4b7d482..2b624bb838 100644 --- a/frame/authorship/src/lib.rs +++ b/frame/authorship/src/lib.rs @@ -20,78 +20,78 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{result, prelude::*}; -use sp_std::collections::btree_set::BTreeSet; -use frame_support::{decl_module, decl_storage, decl_error, dispatch, ensure}; -use frame_support::traits::{FindAuthor, VerifySeal, Get}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +use frame_support::traits::{FindAuthor, Get, VerifySeal}; +use frame_support::weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_module, decl_storage, dispatch, ensure}; use frame_system::ensure_none; +use sp_authorship::{InherentError, UnclesInherentData, INHERENT_IDENTIFIER}; +use sp_inherents::{InherentData, InherentIdentifier, ProvideInherent}; use sp_runtime::traits::{Header as HeaderT, One, Zero}; -use frame_support::weights::{Weight, MINIMUM_WEIGHT, SimpleDispatchInfo}; -use sp_inherents::{InherentIdentifier, ProvideInherent, InherentData}; -use sp_authorship::{INHERENT_IDENTIFIER, UnclesInherentData, InherentError}; +use sp_std::collections::btree_set::BTreeSet; +use sp_std::{prelude::*, result}; const MAX_UNCLES: usize = 10; pub trait Trait: frame_system::Trait { - /// Find the author of a block. - type FindAuthor: FindAuthor; - /// The number of blocks back we should accept uncles. - /// This means that we will deal with uncle-parents that are - /// `UncleGenerations + 1` before `now`. - type UncleGenerations: Get; - /// A filter for uncles within a block. This is for implementing - /// further constraints on what uncles can be included, other than their ancestry. - /// - /// For PoW, as long as the seals are checked, there is no need to use anything - /// but the `VerifySeal` implementation as the filter. This is because the cost of making many equivocating - /// uncles is high. - /// - /// For PoS, there is no such limitation, so a further constraint must be imposed - /// beyond a seal check in order to prevent an arbitrary number of - /// equivocating uncles from being included. - /// - /// The `OnePerAuthorPerHeight` filter is good for many slot-based PoS - /// engines. - type FilterUncle: FilterUncle; - /// An event handler for authored blocks. - type EventHandler: EventHandler; + /// Find the author of a block. + type FindAuthor: FindAuthor; + /// The number of blocks back we should accept uncles. + /// This means that we will deal with uncle-parents that are + /// `UncleGenerations + 1` before `now`. + type UncleGenerations: Get; + /// A filter for uncles within a block. This is for implementing + /// further constraints on what uncles can be included, other than their ancestry. + /// + /// For PoW, as long as the seals are checked, there is no need to use anything + /// but the `VerifySeal` implementation as the filter. This is because the cost of making many equivocating + /// uncles is high. + /// + /// For PoS, there is no such limitation, so a further constraint must be imposed + /// beyond a seal check in order to prevent an arbitrary number of + /// equivocating uncles from being included. + /// + /// The `OnePerAuthorPerHeight` filter is good for many slot-based PoS + /// engines. + type FilterUncle: FilterUncle; + /// An event handler for authored blocks. + type EventHandler: EventHandler; } /// An event handler for the authorship module. There is a dummy implementation /// for `()`, which does nothing. #[impl_trait_for_tuples::impl_for_tuples(30)] pub trait EventHandler { - /// Note that the given account ID is the author of the current block. - fn note_author(author: Author); + /// Note that the given account ID is the author of the current block. + fn note_author(author: Author); - /// Note that the given account ID authored the given uncle, and how many - /// blocks older than the current block it is (age >= 0, so siblings are allowed) - fn note_uncle(author: Author, age: BlockNumber); + /// Note that the given account ID authored the given uncle, and how many + /// blocks older than the current block it is (age >= 0, so siblings are allowed) + fn note_uncle(author: Author, age: BlockNumber); } /// Additional filtering on uncles that pass preliminary ancestry checks. /// /// This should do work such as checking seals pub trait FilterUncle { - /// An accumulator of data about uncles included. - /// - /// In practice, this is used to validate uncles against others in the same block. - type Accumulator: Default; - - /// Do additional filtering on a seal-checked uncle block, with the accumulated - /// filter. - fn filter_uncle(header: &Header, acc: &mut Self::Accumulator) - -> Result, &'static str>; + /// An accumulator of data about uncles included. + /// + /// In practice, this is used to validate uncles against others in the same block. + type Accumulator: Default; + + /// Do additional filtering on a seal-checked uncle block, with the accumulated + /// filter. + fn filter_uncle( + header: &Header, + acc: &mut Self::Accumulator, + ) -> Result, &'static str>; } impl FilterUncle for () { - type Accumulator = (); - fn filter_uncle(_: &H, _acc: &mut Self::Accumulator) - -> Result, &'static str> - { - Ok(None) - } + type Accumulator = (); + fn filter_uncle(_: &H, _acc: &mut Self::Accumulator) -> Result, &'static str> { + Ok(None) + } } /// A filter on uncles which verifies seals and does no additional checks. @@ -99,16 +99,12 @@ impl FilterUncle for () { /// equivocating is high. pub struct SealVerify(sp_std::marker::PhantomData); -impl> FilterUncle - for SealVerify -{ - type Accumulator = (); +impl> FilterUncle for SealVerify { + type Accumulator = (); - fn filter_uncle(header: &Header, _acc: &mut ()) - -> Result, &'static str> - { - T::verify_seal(header) - } + fn filter_uncle(header: &Header, _acc: &mut ()) -> Result, &'static str> { + T::verify_seal(header) + } } /// A filter on uncles which verifies seals and ensures that there is only @@ -117,616 +113,627 @@ impl> FilterUncle /// This does O(n log n) work in the number of uncles included. pub struct OnePerAuthorPerHeight(sp_std::marker::PhantomData<(T, N)>); -impl FilterUncle - for OnePerAuthorPerHeight +impl FilterUncle for OnePerAuthorPerHeight where - Header: HeaderT + PartialEq, - Header::Number: Ord, - Author: Clone + PartialEq + Ord, - T: VerifySeal, + Header: HeaderT + PartialEq, + Header::Number: Ord, + Author: Clone + PartialEq + Ord, + T: VerifySeal, { - type Accumulator = BTreeSet<(Header::Number, Author)>; - - fn filter_uncle(header: &Header, acc: &mut Self::Accumulator) - -> Result, &'static str> - { - let author = T::verify_seal(header)?; - let number = header.number(); - - if let Some(ref author) = author { - if !acc.insert((number.clone(), author.clone())) { - return Err("more than one uncle per number per author included"); - } - } - - Ok(author) - } + type Accumulator = BTreeSet<(Header::Number, Author)>; + + fn filter_uncle( + header: &Header, + acc: &mut Self::Accumulator, + ) -> Result, &'static str> { + let author = T::verify_seal(header)?; + let number = header.number(); + + if let Some(ref author) = author { + if !acc.insert((number.clone(), author.clone())) { + return Err("more than one uncle per number per author included"); + } + } + + Ok(author) + } } #[derive(Encode, Decode, sp_runtime::RuntimeDebug)] #[cfg_attr(any(feature = "std", test), derive(PartialEq))] enum UncleEntryItem { - InclusionHeight(BlockNumber), - Uncle(Hash, Option), + InclusionHeight(BlockNumber), + Uncle(Hash, Option), } decl_storage! { - trait Store for Module as Authorship { - /// Uncles - Uncles: Vec>; - /// Author of current block. - Author: Option; - /// Whether uncles were already set in this block. - DidSetUncles: bool; - } + trait Store for Module as Authorship { + /// Uncles + Uncles: Vec>; + /// Author of current block. + Author: Option; + /// Whether uncles were already set in this block. + DidSetUncles: bool; + } } decl_error! { - /// Error for the authorship module. - pub enum Error for Module { - /// The uncle parent not in the chain. - InvalidUncleParent, - /// Uncles already set in the block. - UnclesAlreadySet, - /// Too many uncles. - TooManyUncles, - /// The uncle is genesis. - GenesisUncle, - /// The uncle is too high in chain. - TooHighUncle, - /// The uncle is already included. - UncleAlreadyIncluded, - /// The uncle isn't recent enough to be included. - OldUncle, - } + /// Error for the authorship module. + pub enum Error for Module { + /// The uncle parent not in the chain. + InvalidUncleParent, + /// Uncles already set in the block. + UnclesAlreadySet, + /// Too many uncles. + TooManyUncles, + /// The uncle is genesis. + GenesisUncle, + /// The uncle is too high in chain. + TooHighUncle, + /// The uncle is already included. + UncleAlreadyIncluded, + /// The uncle isn't recent enough to be included. + OldUncle, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn on_initialize(now: T::BlockNumber) -> Weight { - let uncle_generations = T::UncleGenerations::get(); - // prune uncles that are older than the allowed number of generations. - if uncle_generations <= now { - let minimum_height = now - uncle_generations; - Self::prune_old_uncles(minimum_height) - } - - ::DidSetUncles::put(false); - - T::EventHandler::note_author(Self::author()); - - MINIMUM_WEIGHT - } - - fn on_finalize() { - // ensure we never go to trie with these values. - ::Author::kill(); - ::DidSetUncles::kill(); - } - - /// Provide a set of uncles. - #[weight = SimpleDispatchInfo::FixedMandatory(MINIMUM_WEIGHT)] - fn set_uncles(origin, new_uncles: Vec) -> dispatch::DispatchResult { - ensure_none(origin)?; - ensure!(new_uncles.len() <= MAX_UNCLES, Error::::TooManyUncles); - - if ::DidSetUncles::get() { - Err(Error::::UnclesAlreadySet)? - } - ::DidSetUncles::put(true); - - Self::verify_and_import_uncles(new_uncles) - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn on_initialize(now: T::BlockNumber) -> Weight { + let uncle_generations = T::UncleGenerations::get(); + // prune uncles that are older than the allowed number of generations. + if uncle_generations <= now { + let minimum_height = now - uncle_generations; + Self::prune_old_uncles(minimum_height) + } + + ::DidSetUncles::put(false); + + T::EventHandler::note_author(Self::author()); + + MINIMUM_WEIGHT + } + + fn on_finalize() { + // ensure we never go to trie with these values. + ::Author::kill(); + ::DidSetUncles::kill(); + } + + /// Provide a set of uncles. + #[weight = SimpleDispatchInfo::FixedMandatory(MINIMUM_WEIGHT)] + fn set_uncles(origin, new_uncles: Vec) -> dispatch::DispatchResult { + ensure_none(origin)?; + ensure!(new_uncles.len() <= MAX_UNCLES, Error::::TooManyUncles); + + if ::DidSetUncles::get() { + Err(Error::::UnclesAlreadySet)? + } + ::DidSetUncles::put(true); + + Self::verify_and_import_uncles(new_uncles) + } + } } impl Module { - /// Fetch the author of the block. - /// - /// This is safe to invoke in `on_initialize` implementations, as well - /// as afterwards. - pub fn author() -> T::AccountId { - // Check the memoized storage value. - if let Some(author) = ::Author::get() { - return author; - } - - let digest = >::digest(); - let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); - if let Some(author) = T::FindAuthor::find_author(pre_runtime_digests) { - ::Author::put(&author); - author - } else { - Default::default() - } - } - - fn verify_and_import_uncles(new_uncles: Vec) -> dispatch::DispatchResult { - let now = >::block_number(); - - let mut uncles = ::Uncles::get(); - uncles.push(UncleEntryItem::InclusionHeight(now)); - - let mut acc: >::Accumulator = Default::default(); - - for uncle in new_uncles { - let prev_uncles = uncles.iter().filter_map(|entry| - match entry { - UncleEntryItem::InclusionHeight(_) => None, - UncleEntryItem::Uncle(h, _) => Some(h), - }); - let author = Self::verify_uncle(&uncle, prev_uncles, &mut acc)?; - let hash = uncle.hash(); - - T::EventHandler::note_uncle( - author.clone().unwrap_or_default(), - now - uncle.number().clone(), - ); - uncles.push(UncleEntryItem::Uncle(hash, author)); - } - - ::Uncles::put(&uncles); - Ok(()) - } - - fn verify_uncle<'a, I: IntoIterator>( - uncle: &T::Header, - existing_uncles: I, - accumulator: &mut >::Accumulator, - ) -> Result, dispatch::DispatchError> - { - let now = >::block_number(); - - let (minimum_height, maximum_height) = { - let uncle_generations = T::UncleGenerations::get(); - let min = if now >= uncle_generations { - now - uncle_generations - } else { - Zero::zero() - }; - - (min, now) - }; - - let hash = uncle.hash(); - - if uncle.number() < &One::one() { - return Err(Error::::GenesisUncle.into()); - } - - if uncle.number() > &maximum_height { - return Err(Error::::TooHighUncle.into()); - } - - { - let parent_number = uncle.number().clone() - One::one(); - let parent_hash = >::block_hash(&parent_number); - if &parent_hash != uncle.parent_hash() { - return Err(Error::::InvalidUncleParent.into()); - } - } - - if uncle.number() < &minimum_height { - return Err(Error::::OldUncle.into()); - } - - let duplicate = existing_uncles.into_iter().find(|h| **h == hash).is_some(); - let in_chain = >::block_hash(uncle.number()) == hash; - - if duplicate || in_chain { - return Err(Error::::UncleAlreadyIncluded.into()) - } - - // check uncle validity. - T::FilterUncle::filter_uncle(&uncle, accumulator).map_err(|e| Into::into(e)) - } - - fn prune_old_uncles(minimum_height: T::BlockNumber) { - let mut uncles = ::Uncles::get(); - let prune_entries = uncles.iter().take_while(|item| match item { - UncleEntryItem::Uncle(_, _) => true, - UncleEntryItem::InclusionHeight(height) => height < &minimum_height, - }); - let prune_index = prune_entries.count(); - - let _ = uncles.drain(..prune_index); - ::Uncles::put(uncles); - } + /// Fetch the author of the block. + /// + /// This is safe to invoke in `on_initialize` implementations, as well + /// as afterwards. + pub fn author() -> T::AccountId { + // Check the memoized storage value. + if let Some(author) = ::Author::get() { + return author; + } + + let digest = >::digest(); + let pre_runtime_digests = digest.logs.iter().filter_map(|d| d.as_pre_runtime()); + if let Some(author) = T::FindAuthor::find_author(pre_runtime_digests) { + ::Author::put(&author); + author + } else { + Default::default() + } + } + + fn verify_and_import_uncles(new_uncles: Vec) -> dispatch::DispatchResult { + let now = >::block_number(); + + let mut uncles = ::Uncles::get(); + uncles.push(UncleEntryItem::InclusionHeight(now)); + + let mut acc: >::Accumulator = Default::default(); + + for uncle in new_uncles { + let prev_uncles = uncles.iter().filter_map(|entry| match entry { + UncleEntryItem::InclusionHeight(_) => None, + UncleEntryItem::Uncle(h, _) => Some(h), + }); + let author = Self::verify_uncle(&uncle, prev_uncles, &mut acc)?; + let hash = uncle.hash(); + + T::EventHandler::note_uncle( + author.clone().unwrap_or_default(), + now - uncle.number().clone(), + ); + uncles.push(UncleEntryItem::Uncle(hash, author)); + } + + ::Uncles::put(&uncles); + Ok(()) + } + + fn verify_uncle<'a, I: IntoIterator>( + uncle: &T::Header, + existing_uncles: I, + accumulator: &mut >::Accumulator, + ) -> Result, dispatch::DispatchError> { + let now = >::block_number(); + + let (minimum_height, maximum_height) = { + let uncle_generations = T::UncleGenerations::get(); + let min = if now >= uncle_generations { + now - uncle_generations + } else { + Zero::zero() + }; + + (min, now) + }; + + let hash = uncle.hash(); + + if uncle.number() < &One::one() { + return Err(Error::::GenesisUncle.into()); + } + + if uncle.number() > &maximum_height { + return Err(Error::::TooHighUncle.into()); + } + + { + let parent_number = uncle.number().clone() - One::one(); + let parent_hash = >::block_hash(&parent_number); + if &parent_hash != uncle.parent_hash() { + return Err(Error::::InvalidUncleParent.into()); + } + } + + if uncle.number() < &minimum_height { + return Err(Error::::OldUncle.into()); + } + + let duplicate = existing_uncles.into_iter().find(|h| **h == hash).is_some(); + let in_chain = >::block_hash(uncle.number()) == hash; + + if duplicate || in_chain { + return Err(Error::::UncleAlreadyIncluded.into()); + } + + // check uncle validity. + T::FilterUncle::filter_uncle(&uncle, accumulator).map_err(|e| Into::into(e)) + } + + fn prune_old_uncles(minimum_height: T::BlockNumber) { + let mut uncles = ::Uncles::get(); + let prune_entries = uncles.iter().take_while(|item| match item { + UncleEntryItem::Uncle(_, _) => true, + UncleEntryItem::InclusionHeight(height) => height < &minimum_height, + }); + let prune_index = prune_entries.count(); + + let _ = uncles.drain(..prune_index); + ::Uncles::put(uncles); + } } impl ProvideInherent for Module { - type Call = Call; - type Error = InherentError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let uncles = data.uncles().unwrap_or_default(); - let mut set_uncles = Vec::new(); - - if !uncles.is_empty() { - let prev_uncles = ::Uncles::get(); - let mut existing_hashes: Vec<_> = prev_uncles.into_iter().filter_map(|entry| - match entry { - UncleEntryItem::InclusionHeight(_) => None, - UncleEntryItem::Uncle(h, _) => Some(h), - } - ).collect(); - - let mut acc: >::Accumulator = Default::default(); - - for uncle in uncles { - match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) { - Ok(_) => { - let hash = uncle.hash(); - set_uncles.push(uncle); - existing_hashes.push(hash); - - if set_uncles.len() == MAX_UNCLES { - break - } - } - Err(_) => { - // skip this uncle - } - } - } - } - - if set_uncles.is_empty() { - None - } else { - Some(Call::set_uncles(set_uncles)) - } - } - - fn check_inherent(call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { - match call { - Call::set_uncles(ref uncles) if uncles.len() > MAX_UNCLES => { - Err(InherentError::Uncles(Error::::TooManyUncles.as_str().into())) - }, - _ => { - Ok(()) - }, - } - } + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let uncles = data.uncles().unwrap_or_default(); + let mut set_uncles = Vec::new(); + + if !uncles.is_empty() { + let prev_uncles = ::Uncles::get(); + let mut existing_hashes: Vec<_> = prev_uncles + .into_iter() + .filter_map(|entry| match entry { + UncleEntryItem::InclusionHeight(_) => None, + UncleEntryItem::Uncle(h, _) => Some(h), + }) + .collect(); + + let mut acc: >::Accumulator = Default::default(); + + for uncle in uncles { + match Self::verify_uncle(&uncle, &existing_hashes, &mut acc) { + Ok(_) => { + let hash = uncle.hash(); + set_uncles.push(uncle); + existing_hashes.push(hash); + + if set_uncles.len() == MAX_UNCLES { + break; + } + } + Err(_) => { + // skip this uncle + } + } + } + } + + if set_uncles.is_empty() { + None + } else { + Some(Call::set_uncles(set_uncles)) + } + } + + fn check_inherent(call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { + match call { + Call::set_uncles(ref uncles) if uncles.len() > MAX_UNCLES => Err( + InherentError::Uncles(Error::::TooManyUncles.as_str().into()), + ), + _ => Ok(()), + } + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::H256; - use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup}, testing::Header, generic::DigestItem, Perbill, - }; - use frame_support::{parameter_types, impl_outer_origin, ConsensusEngineId, weights::Weight}; - - impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - - parameter_types! { - pub const UncleGenerations: u64 = 5; - } - - impl Trait for Test { - type FindAuthor = AuthorGiven; - type UncleGenerations = UncleGenerations; - type FilterUncle = SealVerify; - type EventHandler = (); - } - - type System = frame_system::Module; - type Authorship = Module; - - const TEST_ID: ConsensusEngineId = [1, 2, 3, 4]; - - pub struct AuthorGiven; - - impl FindAuthor for AuthorGiven { - fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator - { - for (id, data) in digests { - if id == TEST_ID { - return u64::decode(&mut &data[..]).ok(); - } - } - - None - } - } - - pub struct VerifyBlock; - - impl VerifySeal for VerifyBlock { - fn verify_seal(header: &Header) -> Result, &'static str> { - let pre_runtime_digests = header.digest.logs.iter().filter_map(|d| d.as_pre_runtime()); - let seals = header.digest.logs.iter().filter_map(|d| d.as_seal()); - - let author = match AuthorGiven::find_author(pre_runtime_digests) { - None => return Err("no author"), - Some(author) => author, - }; - - for (id, seal) in seals { - if id == TEST_ID { - match u64::decode(&mut &seal[..]) { - Err(_) => return Err("wrong seal"), - Ok(a) => { - if a != author { - return Err("wrong author in seal"); - } - break - } - } - } - } - - Ok(Some(author)) - } - } - - fn seal_header(mut header: Header, author: u64) -> Header { - { - let digest = header.digest_mut(); - digest.logs.push(DigestItem::PreRuntime(TEST_ID, author.encode())); - digest.logs.push(DigestItem::Seal(TEST_ID, author.encode())); - } - - header - } - - fn create_header(number: u64, parent_hash: H256, state_root: H256) -> Header { - Header::new( - number, - Default::default(), - state_root, - parent_hash, - Default::default(), - ) - } - - fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - t.into() - } - - #[test] - fn prune_old_uncles_works() { - use UncleEntryItem::*; - new_test_ext().execute_with(|| { - let hash = Default::default(); - let author = Default::default(); - let uncles = vec![ - InclusionHeight(1u64), Uncle(hash, Some(author)), Uncle(hash, None), Uncle(hash, None), - InclusionHeight(2u64), Uncle(hash, None), - InclusionHeight(3u64), Uncle(hash, None), - ]; - - ::Uncles::put(uncles); - Authorship::prune_old_uncles(3); - - let uncles = ::Uncles::get(); - assert_eq!(uncles, vec![InclusionHeight(3u64), Uncle(hash, None)]); - }) - } - - #[test] - fn rejects_bad_uncles() { - new_test_ext().execute_with(|| { - let author_a = 69; - - struct CanonChain { - inner: Vec
, - } - - impl CanonChain { - fn best_hash(&self) -> H256 { - self.inner.last().unwrap().hash() - } - - fn canon_hash(&self, index: usize) -> H256 { - self.inner[index].hash() - } - - fn header(&self, index: usize) -> &Header { - &self.inner[index] - } - - fn push(&mut self, header: Header) { - self.inner.push(header) - } - } - - let mut canon_chain = CanonChain { - inner: vec![seal_header(create_header(0, Default::default(), Default::default()), 999)], - }; - - let initialize_block = |number, hash: H256| System::initialize( - &number, - &hash, - &Default::default(), - &Default::default(), - Default::default() - ); - - for number in 1..8 { - initialize_block(number, canon_chain.best_hash()); - let header = seal_header(System::finalize(), author_a); - canon_chain.push(header); - } - - // initialize so system context is set up correctly. - initialize_block(8, canon_chain.best_hash()); - - // 2 of the same uncle at once - { - let uncle_a = seal_header( - create_header(3, canon_chain.canon_hash(2), [1; 32].into()), - author_a, - ); - assert_eq!( - Authorship::verify_and_import_uncles(vec![uncle_a.clone(), uncle_a.clone()]), - Err(Error::::UncleAlreadyIncluded.into()), - ); - } - - // 2 of the same uncle at different times. - { - let uncle_a = seal_header( - create_header(3, canon_chain.canon_hash(2), [1; 32].into()), - author_a, - ); - - assert!(Authorship::verify_and_import_uncles(vec![uncle_a.clone()]).is_ok()); - - assert_eq!( - Authorship::verify_and_import_uncles(vec![uncle_a.clone()]), - Err(Error::::UncleAlreadyIncluded.into()), - ); - } - - // same uncle as ancestor. - { - let uncle_clone = canon_chain.header(5).clone(); - - assert_eq!( - Authorship::verify_and_import_uncles(vec![uncle_clone]), - Err(Error::::UncleAlreadyIncluded.into()), - ); - } - - // uncle without valid seal. - { - let unsealed = create_header(3, canon_chain.canon_hash(2), [2; 32].into()); - assert_eq!( - Authorship::verify_and_import_uncles(vec![unsealed]), - Err("no author".into()), - ); - } - - // old uncles can't get in. - { - assert_eq!(System::block_number(), 8); - - let gen_2 = seal_header( - create_header(2, canon_chain.canon_hash(1), [3; 32].into()), - author_a, - ); - - assert_eq!( - Authorship::verify_and_import_uncles(vec![gen_2]), - Err(Error::::OldUncle.into()), - ); - } - - // siblings are also allowed - { - let other_8 = seal_header( - create_header(8, canon_chain.canon_hash(7), [1; 32].into()), - author_a, - ); - - assert!(Authorship::verify_and_import_uncles(vec![other_8]).is_ok()); - } - }); - } - - #[test] - fn sets_author_lazily() { - new_test_ext().execute_with(|| { - let author = 42; - let mut header = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author, - ); - - header.digest_mut().pop(); // pop the seal off. - System::initialize( - &1, - &Default::default(), - &Default::default(), - header.digest(), - Default::default(), - ); - - assert_eq!(Authorship::author(), author); - }); - } - - #[test] - fn one_uncle_per_author_per_number() { - type Filter = OnePerAuthorPerHeight; - - let author_a = 42; - let author_b = 43; - - let mut acc: >::Accumulator = Default::default(); - let header_a1 = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author_a, - ); - let header_b1 = seal_header( - create_header(1, Default::default(), [1; 32].into()), - author_b, - ); - - let header_a2_1 = seal_header( - create_header(2, Default::default(), [1; 32].into()), - author_a, - ); - let header_a2_2 = seal_header( - create_header(2, Default::default(), [2; 32].into()), - author_a, - ); - - let mut check_filter = move |uncle| { - Filter::filter_uncle(uncle, &mut acc) - }; - - // same height, different author is OK. - assert_eq!(check_filter(&header_a1), Ok(Some(author_a))); - assert_eq!(check_filter(&header_b1), Ok(Some(author_b))); - - // same author, different height. - assert_eq!(check_filter(&header_a2_1), Ok(Some(author_a))); - - // same author, same height (author a, height 2) - assert!(check_filter(&header_a2_2).is_err()); - } + use super::*; + use frame_support::{impl_outer_origin, parameter_types, weights::Weight, ConsensusEngineId}; + use sp_core::H256; + use sp_runtime::{ + generic::DigestItem, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + + parameter_types! { + pub const UncleGenerations: u64 = 5; + } + + impl Trait for Test { + type FindAuthor = AuthorGiven; + type UncleGenerations = UncleGenerations; + type FilterUncle = SealVerify; + type EventHandler = (); + } + + type System = frame_system::Module; + type Authorship = Module; + + const TEST_ID: ConsensusEngineId = [1, 2, 3, 4]; + + pub struct AuthorGiven; + + impl FindAuthor for AuthorGiven { + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, + { + for (id, data) in digests { + if id == TEST_ID { + return u64::decode(&mut &data[..]).ok(); + } + } + + None + } + } + + pub struct VerifyBlock; + + impl VerifySeal for VerifyBlock { + fn verify_seal(header: &Header) -> Result, &'static str> { + let pre_runtime_digests = header.digest.logs.iter().filter_map(|d| d.as_pre_runtime()); + let seals = header.digest.logs.iter().filter_map(|d| d.as_seal()); + + let author = match AuthorGiven::find_author(pre_runtime_digests) { + None => return Err("no author"), + Some(author) => author, + }; + + for (id, seal) in seals { + if id == TEST_ID { + match u64::decode(&mut &seal[..]) { + Err(_) => return Err("wrong seal"), + Ok(a) => { + if a != author { + return Err("wrong author in seal"); + } + break; + } + } + } + } + + Ok(Some(author)) + } + } + + fn seal_header(mut header: Header, author: u64) -> Header { + { + let digest = header.digest_mut(); + digest + .logs + .push(DigestItem::PreRuntime(TEST_ID, author.encode())); + digest.logs.push(DigestItem::Seal(TEST_ID, author.encode())); + } + + header + } + + fn create_header(number: u64, parent_hash: H256, state_root: H256) -> Header { + Header::new( + number, + Default::default(), + state_root, + parent_hash, + Default::default(), + ) + } + + fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + t.into() + } + + #[test] + fn prune_old_uncles_works() { + use UncleEntryItem::*; + new_test_ext().execute_with(|| { + let hash = Default::default(); + let author = Default::default(); + let uncles = vec![ + InclusionHeight(1u64), + Uncle(hash, Some(author)), + Uncle(hash, None), + Uncle(hash, None), + InclusionHeight(2u64), + Uncle(hash, None), + InclusionHeight(3u64), + Uncle(hash, None), + ]; + + ::Uncles::put(uncles); + Authorship::prune_old_uncles(3); + + let uncles = ::Uncles::get(); + assert_eq!(uncles, vec![InclusionHeight(3u64), Uncle(hash, None)]); + }) + } + + #[test] + fn rejects_bad_uncles() { + new_test_ext().execute_with(|| { + let author_a = 69; + + struct CanonChain { + inner: Vec
, + } + + impl CanonChain { + fn best_hash(&self) -> H256 { + self.inner.last().unwrap().hash() + } + + fn canon_hash(&self, index: usize) -> H256 { + self.inner[index].hash() + } + + fn header(&self, index: usize) -> &Header { + &self.inner[index] + } + + fn push(&mut self, header: Header) { + self.inner.push(header) + } + } + + let mut canon_chain = CanonChain { + inner: vec![seal_header( + create_header(0, Default::default(), Default::default()), + 999, + )], + }; + + let initialize_block = |number, hash: H256| { + System::initialize( + &number, + &hash, + &Default::default(), + &Default::default(), + Default::default(), + ) + }; + + for number in 1..8 { + initialize_block(number, canon_chain.best_hash()); + let header = seal_header(System::finalize(), author_a); + canon_chain.push(header); + } + + // initialize so system context is set up correctly. + initialize_block(8, canon_chain.best_hash()); + + // 2 of the same uncle at once + { + let uncle_a = seal_header( + create_header(3, canon_chain.canon_hash(2), [1; 32].into()), + author_a, + ); + assert_eq!( + Authorship::verify_and_import_uncles(vec![uncle_a.clone(), uncle_a.clone()]), + Err(Error::::UncleAlreadyIncluded.into()), + ); + } + + // 2 of the same uncle at different times. + { + let uncle_a = seal_header( + create_header(3, canon_chain.canon_hash(2), [1; 32].into()), + author_a, + ); + + assert!(Authorship::verify_and_import_uncles(vec![uncle_a.clone()]).is_ok()); + + assert_eq!( + Authorship::verify_and_import_uncles(vec![uncle_a.clone()]), + Err(Error::::UncleAlreadyIncluded.into()), + ); + } + + // same uncle as ancestor. + { + let uncle_clone = canon_chain.header(5).clone(); + + assert_eq!( + Authorship::verify_and_import_uncles(vec![uncle_clone]), + Err(Error::::UncleAlreadyIncluded.into()), + ); + } + + // uncle without valid seal. + { + let unsealed = create_header(3, canon_chain.canon_hash(2), [2; 32].into()); + assert_eq!( + Authorship::verify_and_import_uncles(vec![unsealed]), + Err("no author".into()), + ); + } + + // old uncles can't get in. + { + assert_eq!(System::block_number(), 8); + + let gen_2 = seal_header( + create_header(2, canon_chain.canon_hash(1), [3; 32].into()), + author_a, + ); + + assert_eq!( + Authorship::verify_and_import_uncles(vec![gen_2]), + Err(Error::::OldUncle.into()), + ); + } + + // siblings are also allowed + { + let other_8 = seal_header( + create_header(8, canon_chain.canon_hash(7), [1; 32].into()), + author_a, + ); + + assert!(Authorship::verify_and_import_uncles(vec![other_8]).is_ok()); + } + }); + } + + #[test] + fn sets_author_lazily() { + new_test_ext().execute_with(|| { + let author = 42; + let mut header = + seal_header(create_header(1, Default::default(), [1; 32].into()), author); + + header.digest_mut().pop(); // pop the seal off. + System::initialize( + &1, + &Default::default(), + &Default::default(), + header.digest(), + Default::default(), + ); + + assert_eq!(Authorship::author(), author); + }); + } + + #[test] + fn one_uncle_per_author_per_number() { + type Filter = OnePerAuthorPerHeight; + + let author_a = 42; + let author_b = 43; + + let mut acc: >::Accumulator = Default::default(); + let header_a1 = seal_header( + create_header(1, Default::default(), [1; 32].into()), + author_a, + ); + let header_b1 = seal_header( + create_header(1, Default::default(), [1; 32].into()), + author_b, + ); + + let header_a2_1 = seal_header( + create_header(2, Default::default(), [1; 32].into()), + author_a, + ); + let header_a2_2 = seal_header( + create_header(2, Default::default(), [2; 32].into()), + author_a, + ); + + let mut check_filter = move |uncle| Filter::filter_uncle(uncle, &mut acc); + + // same height, different author is OK. + assert_eq!(check_filter(&header_a1), Ok(Some(author_a))); + assert_eq!(check_filter(&header_b1), Ok(Some(author_b))); + + // same author, different height. + assert_eq!(check_filter(&header_a2_1), Ok(Some(author_a))); + + // same author, same height (author a, height 2) + assert!(check_filter(&header_a2_2).is_err()); + } } diff --git a/frame/babe/src/lib.rs b/frame/babe/src/lib.rs index ea2f97e7a9..f0c82d6cdf 100644 --- a/frame/babe/src/lib.rs +++ b/frame/babe/src/lib.rs @@ -22,28 +22,29 @@ use pallet_timestamp; -use sp_std::{result, prelude::*}; use frame_support::{ - decl_storage, decl_module, traits::{FindAuthor, Get, Randomness as RandomnessT}, - weights::{Weight, MINIMUM_WEIGHT}, + decl_module, decl_storage, + traits::{FindAuthor, Get, Randomness as RandomnessT}, + weights::{Weight, MINIMUM_WEIGHT}, }; -use sp_timestamp::OnTimestampSet; +use sp_runtime::traits::{Hash, IsMember, One, SaturatedConversion, Saturating}; use sp_runtime::{generic::DigestItem, ConsensusEngineId, Perbill}; -use sp_runtime::traits::{IsMember, SaturatedConversion, Saturating, Hash, One}; use sp_staking::{ - SessionIndex, - offence::{Offence, Kind}, + offence::{Kind, Offence}, + SessionIndex, }; +use sp_std::{prelude::*, result}; +use sp_timestamp::OnTimestampSet; -use codec::{Encode, Decode}; -use sp_inherents::{InherentIdentifier, InherentData, ProvideInherent, MakeFatalError}; +use codec::{Decode, Encode}; use sp_consensus_babe::{ - BABE_ENGINE_ID, ConsensusLog, BabeAuthorityWeight, SlotNumber, - inherents::{INHERENT_IDENTIFIER, BabeInherentData}, - digests::{NextEpochDescriptor, RawPreDigest}, + digests::{NextEpochDescriptor, RawPreDigest}, + inherents::{BabeInherentData, INHERENT_IDENTIFIER}, + BabeAuthorityWeight, ConsensusLog, SlotNumber, BABE_ENGINE_ID, }; +pub use sp_consensus_babe::{AuthorityId, PUBLIC_KEY_LENGTH, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH}; use sp_consensus_vrf::schnorrkel; -pub use sp_consensus_babe::{AuthorityId, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH, PUBLIC_KEY_LENGTH}; +use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}; #[cfg(all(feature = "std", test))] mod tests; @@ -52,29 +53,29 @@ mod tests; mod mock; pub trait Trait: pallet_timestamp::Trait { - /// The amount of time, in slots, that each epoch should last. - type EpochDuration: Get; - - /// The expected average block time at which BABE should be creating - /// blocks. Since BABE is probabilistic it is not trivial to figure out - /// what the expected average block time should be based on the slot - /// duration and the security parameter `c` (where `1 - c` represents - /// the probability of a slot being empty). - type ExpectedBlockTime: Get; - - /// BABE requires some logic to be triggered on every block to query for whether an epoch - /// has ended and to perform the transition to the next epoch. - /// - /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only be used - /// when no other module is responsible for changing authority set. - type EpochChangeTrigger: EpochChangeTrigger; + /// The amount of time, in slots, that each epoch should last. + type EpochDuration: Get; + + /// The expected average block time at which BABE should be creating + /// blocks. Since BABE is probabilistic it is not trivial to figure out + /// what the expected average block time should be based on the slot + /// duration and the security parameter `c` (where `1 - c` represents + /// the probability of a slot being empty). + type ExpectedBlockTime: Get; + + /// BABE requires some logic to be triggered on every block to query for whether an epoch + /// has ended and to perform the transition to the next epoch. + /// + /// Typically, the `ExternalTrigger` type should be used. An internal trigger should only be used + /// when no other module is responsible for changing authority set. + type EpochChangeTrigger: EpochChangeTrigger; } /// Trigger an epoch change, if any should take place. pub trait EpochChangeTrigger { - /// Trigger an epoch change, if any should take place. This should be called - /// during every block, after initialization is done. - fn trigger(now: T::BlockNumber); + /// Trigger an epoch change, if any should take place. This should be called + /// during every block, after initialization is done. + fn trigger(now: T::BlockNumber); } /// A type signifying to BABE that an external trigger @@ -82,7 +83,7 @@ pub trait EpochChangeTrigger { pub struct ExternalTrigger; impl EpochChangeTrigger for ExternalTrigger { - fn trigger(_: T::BlockNumber) { } // nothing - trigger is external. + fn trigger(_: T::BlockNumber) {} // nothing - trigger is external. } /// A type signifying to BABE that it should perform epoch changes @@ -90,14 +91,14 @@ impl EpochChangeTrigger for ExternalTrigger { pub struct SameAuthoritiesForever; impl EpochChangeTrigger for SameAuthoritiesForever { - fn trigger(now: T::BlockNumber) { - if >::should_epoch_change(now) { - let authorities = >::authorities(); - let next_authorities = authorities.clone(); - - >::enact_epoch_change(authorities, next_authorities); - } - } + fn trigger(now: T::BlockNumber) { + if >::should_epoch_change(now) { + let authorities = >::authorities(); + let next_authorities = authorities.clone(); + + >::enact_epoch_change(authorities, next_authorities); + } + } } const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; @@ -105,151 +106,152 @@ const UNDER_CONSTRUCTION_SEGMENT_LENGTH: usize = 256; type MaybeVrf = Option; decl_storage! { - trait Store for Module as Babe { - /// Current epoch index. - pub EpochIndex get(fn epoch_index): u64; - - /// Current epoch authorities. - pub Authorities get(fn authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; - - /// The slot at which the first epoch actually started. This is 0 - /// until the first block of the chain. - pub GenesisSlot get(fn genesis_slot): u64; - - /// Current slot number. - pub CurrentSlot get(fn current_slot): u64; - - /// The epoch randomness for the *current* epoch. - /// - /// # Security - /// - /// This MUST NOT be used for gambling, as it can be influenced by a - /// malicious validator in the short term. It MAY be used in many - /// cryptographic protocols, however, so long as one remembers that this - /// (like everything else on-chain) it is public. For example, it can be - /// used where a number is needed that cannot have been chosen by an - /// adversary, for purposes such as public-coin zero-knowledge proofs. - // NOTE: the following fields don't use the constants to define the - // array size because the metadata API currently doesn't resolve the - // variable to its underlying value. - pub Randomness get(fn randomness): schnorrkel::Randomness; - - /// Next epoch randomness. - NextRandomness: schnorrkel::Randomness; - - /// Randomness under construction. - /// - /// We make a tradeoff between storage accesses and list length. - /// We store the under-construction randomness in segments of up to - /// `UNDER_CONSTRUCTION_SEGMENT_LENGTH`. - /// - /// Once a segment reaches this length, we begin the next one. - /// We reset all segments and return to `0` at the beginning of every - /// epoch. - SegmentIndex build(|_| 0): u32; - UnderConstruction: map hasher(twox_64_concat) u32 => Vec; - - /// Temporary value (cleared at block finalization) which is `Some` - /// if per-block initialization has already been called for current block. - Initialized get(fn initialized): Option; - - /// How late the current block is compared to its parent. - /// - /// This entry is populated as part of block execution and is cleaned up - /// on block finalization. Querying this storage entry outside of block - /// execution context should always yield zero. - Lateness get(fn lateness): T::BlockNumber; - } - add_extra_genesis { - config(authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; - build(|config| Module::::initialize_authorities(&config.authorities)) - } + trait Store for Module as Babe { + /// Current epoch index. + pub EpochIndex get(fn epoch_index): u64; + + /// Current epoch authorities. + pub Authorities get(fn authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; + + /// The slot at which the first epoch actually started. This is 0 + /// until the first block of the chain. + pub GenesisSlot get(fn genesis_slot): u64; + + /// Current slot number. + pub CurrentSlot get(fn current_slot): u64; + + /// The epoch randomness for the *current* epoch. + /// + /// # Security + /// + /// This MUST NOT be used for gambling, as it can be influenced by a + /// malicious validator in the short term. It MAY be used in many + /// cryptographic protocols, however, so long as one remembers that this + /// (like everything else on-chain) it is public. For example, it can be + /// used where a number is needed that cannot have been chosen by an + /// adversary, for purposes such as public-coin zero-knowledge proofs. + // NOTE: the following fields don't use the constants to define the + // array size because the metadata API currently doesn't resolve the + // variable to its underlying value. + pub Randomness get(fn randomness): schnorrkel::Randomness; + + /// Next epoch randomness. + NextRandomness: schnorrkel::Randomness; + + /// Randomness under construction. + /// + /// We make a tradeoff between storage accesses and list length. + /// We store the under-construction randomness in segments of up to + /// `UNDER_CONSTRUCTION_SEGMENT_LENGTH`. + /// + /// Once a segment reaches this length, we begin the next one. + /// We reset all segments and return to `0` at the beginning of every + /// epoch. + SegmentIndex build(|_| 0): u32; + UnderConstruction: map hasher(twox_64_concat) u32 => Vec; + + /// Temporary value (cleared at block finalization) which is `Some` + /// if per-block initialization has already been called for current block. + Initialized get(fn initialized): Option; + + /// How late the current block is compared to its parent. + /// + /// This entry is populated as part of block execution and is cleaned up + /// on block finalization. Querying this storage entry outside of block + /// execution context should always yield zero. + Lateness get(fn lateness): T::BlockNumber; + } + add_extra_genesis { + config(authorities): Vec<(AuthorityId, BabeAuthorityWeight)>; + build(|config| Module::::initialize_authorities(&config.authorities)) + } } decl_module! { - /// The BABE Pallet - pub struct Module for enum Call where origin: T::Origin { - /// The number of **slots** that an epoch takes. We couple sessions to - /// epochs, i.e. we start a new session once the new epoch begins. - const EpochDuration: u64 = T::EpochDuration::get(); - - /// The expected average block time at which BABE should be creating - /// blocks. Since BABE is probabilistic it is not trivial to figure out - /// what the expected average block time should be based on the slot - /// duration and the security parameter `c` (where `1 - c` represents - /// the probability of a slot being empty). - const ExpectedBlockTime: T::Moment = T::ExpectedBlockTime::get(); - - /// Initialization - fn on_initialize(now: T::BlockNumber) -> Weight { - Self::do_initialize(now); - - MINIMUM_WEIGHT - } - - /// Block finalization - fn on_finalize() { - // at the end of the block, we can safely include the new VRF output - // from this block into the under-construction randomness. If we've determined - // that this block was the first in a new epoch, the changeover logic has - // already occurred at this point, so the under-construction randomness - // will only contain outputs from the right epoch. - if let Some(Some(vrf_output)) = Initialized::take() { - Self::deposit_vrf_output(&vrf_output); - } - - // remove temporary "environment" entry from storage - Lateness::::kill(); - } - } + /// The BABE Pallet + pub struct Module for enum Call where origin: T::Origin { + /// The number of **slots** that an epoch takes. We couple sessions to + /// epochs, i.e. we start a new session once the new epoch begins. + const EpochDuration: u64 = T::EpochDuration::get(); + + /// The expected average block time at which BABE should be creating + /// blocks. Since BABE is probabilistic it is not trivial to figure out + /// what the expected average block time should be based on the slot + /// duration and the security parameter `c` (where `1 - c` represents + /// the probability of a slot being empty). + const ExpectedBlockTime: T::Moment = T::ExpectedBlockTime::get(); + + /// Initialization + fn on_initialize(now: T::BlockNumber) -> Weight { + Self::do_initialize(now); + + MINIMUM_WEIGHT + } + + /// Block finalization + fn on_finalize() { + // at the end of the block, we can safely include the new VRF output + // from this block into the under-construction randomness. If we've determined + // that this block was the first in a new epoch, the changeover logic has + // already occurred at this point, so the under-construction randomness + // will only contain outputs from the right epoch. + if let Some(Some(vrf_output)) = Initialized::take() { + Self::deposit_vrf_output(&vrf_output); + } + + // remove temporary "environment" entry from storage + Lateness::::kill(); + } + } } impl RandomnessT<::Hash> for Module { - fn random(subject: &[u8]) -> T::Hash { - let mut subject = subject.to_vec(); - subject.reserve(VRF_OUTPUT_LENGTH); - subject.extend_from_slice(&Self::randomness()[..]); + fn random(subject: &[u8]) -> T::Hash { + let mut subject = subject.to_vec(); + subject.reserve(VRF_OUTPUT_LENGTH); + subject.extend_from_slice(&Self::randomness()[..]); - ::Hashing::hash(&subject[..]) - } + ::Hashing::hash(&subject[..]) + } } /// A BABE public key pub type BabeKey = [u8; PUBLIC_KEY_LENGTH]; impl FindAuthor for Module { - fn find_author<'a, I>(digests: I) -> Option where - I: 'a + IntoIterator - { - for (id, mut data) in digests.into_iter() { - if id == BABE_ENGINE_ID { - let pre_digest: RawPreDigest = RawPreDigest::decode(&mut data).ok()?; - return Some(pre_digest.authority_index()) - } - } - - return None; - } + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, + { + for (id, mut data) in digests.into_iter() { + if id == BABE_ENGINE_ID { + let pre_digest: RawPreDigest = RawPreDigest::decode(&mut data).ok()?; + return Some(pre_digest.authority_index()); + } + } + + return None; + } } impl IsMember for Module { - fn is_member(authority_id: &AuthorityId) -> bool { - >::authorities() - .iter() - .any(|id| &id.0 == authority_id) - } + fn is_member(authority_id: &AuthorityId) -> bool { + >::authorities() + .iter() + .any(|id| &id.0 == authority_id) + } } impl pallet_session::ShouldEndSession for Module { - fn should_end_session(now: T::BlockNumber) -> bool { - // it might be (and it is in current implementation) that session module is calling - // should_end_session() from it's own on_initialize() handler - // => because pallet_session on_initialize() is called earlier than ours, let's ensure - // that we have synced with digest before checking if session should be ended. - Self::do_initialize(now); - - Self::should_epoch_change(now) - } + fn should_end_session(now: T::BlockNumber) -> bool { + // it might be (and it is in current implementation) that session module is calling + // should_end_session() from it's own on_initialize() handler + // => because pallet_session on_initialize() is called earlier than ours, let's ensure + // that we have synced with digest before checking if session should be ended. + Self::do_initialize(now); + + Self::should_epoch_change(now) + } } // TODO [slashing]: @marcio use this, remove the dead_code annotation. @@ -257,302 +259,306 @@ impl pallet_session::ShouldEndSession for Module { /// /// When a validator released two or more blocks at the same slot. struct BabeEquivocationOffence { - /// A babe slot number in which this incident happened. - slot: u64, - /// The session index in which the incident happened. - session_index: SessionIndex, - /// The size of the validator set at the time of the offence. - validator_set_count: u32, - /// The authority that produced the equivocation. - offender: FullIdentification, + /// A babe slot number in which this incident happened. + slot: u64, + /// The session index in which the incident happened. + session_index: SessionIndex, + /// The size of the validator set at the time of the offence. + validator_set_count: u32, + /// The authority that produced the equivocation. + offender: FullIdentification, } -impl Offence for BabeEquivocationOffence { - const ID: Kind = *b"babe:equivocatio"; - type TimeSlot = u64; - - fn offenders(&self) -> Vec { - vec![self.offender.clone()] - } - - fn session_index(&self) -> SessionIndex { - self.session_index - } - - fn validator_set_count(&self) -> u32 { - self.validator_set_count - } - - fn time_slot(&self) -> Self::TimeSlot { - self.slot - } - - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill { - // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); - // _ ^ 2 - x.square() - } +impl Offence + for BabeEquivocationOffence +{ + const ID: Kind = *b"babe:equivocatio"; + type TimeSlot = u64; + + fn offenders(&self) -> Vec { + vec![self.offender.clone()] + } + + fn session_index(&self) -> SessionIndex { + self.session_index + } + + fn validator_set_count(&self) -> u32 { + self.validator_set_count + } + + fn time_slot(&self) -> Self::TimeSlot { + self.slot + } + + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + // the formula is min((3k / n)^2, 1) + let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); + // _ ^ 2 + x.square() + } } impl Module { - /// Determine the BABE slot duration based on the Timestamp module configuration. - pub fn slot_duration() -> T::Moment { - // we double the minimum block-period so each author can always propose within - // the majority of their slot. - ::MinimumPeriod::get().saturating_mul(2.into()) - } - - /// Determine whether an epoch change should take place at this block. - /// Assumes that initialization has already taken place. - pub fn should_epoch_change(now: T::BlockNumber) -> bool { - // The epoch has technically ended during the passage of time - // between this block and the last, but we have to "end" the epoch now, - // since there is no earlier possible block we could have done it. - // - // The exception is for block 1: the genesis has slot 0, so we treat - // epoch 0 as having started at the slot of block 1. We want to use - // the same randomness and validator set as signalled in the genesis, - // so we don't rotate the epoch. - now != One::one() && { - let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); - diff >= T::EpochDuration::get() - } - } - - /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. - /// - /// Returns None if the prediction is in the past; This implies an error internally in the Babe - /// and should not happen under normal circumstances. - /// - /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot - /// number will grow while the block number will not. Hence, the result can be interpreted as an - /// upper bound. - // -------------- IMPORTANT NOTE -------------- - // This implementation is linked to how [`should_epoch_change`] is working. This might need to - // be updated accordingly, if the underlying mechanics of slot and epochs change. - pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { - let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); - next_slot - .checked_sub(CurrentSlot::get()) - .map(|slots_remaining| { - // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. - let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); - now.saturating_add(blocks_remaining) - }) - } - - /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` has returned `true`, - /// and the caller is the only caller of this function. - /// - /// Typically, this is not handled directly by the user, but by higher-level validator-set manager logic like - /// `pallet-session`. - pub fn enact_epoch_change( - authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - next_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - ) { - // PRECONDITION: caller has done initialization and is guaranteed - // by the session module to be called before this. - debug_assert!(Self::initialized().is_some()); - - // Update epoch index - let epoch_index = EpochIndex::get() - .checked_add(1) - .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - - EpochIndex::put(epoch_index); - Authorities::put(authorities); - - // Update epoch randomness. - let next_epoch_index = epoch_index - .checked_add(1) - .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); - - // Returns randomness for the current epoch and computes the *next* - // epoch randomness. - let randomness = Self::randomness_change_epoch(next_epoch_index); - Randomness::put(randomness); - - // After we update the current epoch, we signal the *next* epoch change - // so that nodes can track changes. - let next_randomness = NextRandomness::get(); - - let next = NextEpochDescriptor { - authorities: next_authorities, - randomness: next_randomness, - }; - - Self::deposit_consensus(ConsensusLog::NextEpochData(next)) - } - - // finds the start slot of the current epoch. only guaranteed to - // give correct results after `do_initialize` of the first block - // in the chain (as its result is based off of `GenesisSlot`). - pub fn current_epoch_start() -> SlotNumber { - (EpochIndex::get() * T::EpochDuration::get()) + GenesisSlot::get() - } - - fn deposit_consensus(new: U) { - let log: DigestItem = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); - >::deposit_log(log.into()) - } - - fn deposit_vrf_output(vrf_output: &schnorrkel::RawVRFOutput) { - let segment_idx = ::get(); - let mut segment = ::get(&segment_idx); - if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { - // push onto current segment: not full. - segment.push(*vrf_output); - ::insert(&segment_idx, &segment); - } else { - // move onto the next segment and update the index. - let segment_idx = segment_idx + 1; - ::insert(&segment_idx, &vec![vrf_output.clone()]); - ::put(&segment_idx); - } - } - - fn do_initialize(now: T::BlockNumber) { - // since do_initialize can be called twice (if session module is present) - // => let's ensure that we only modify the storage once per block - let initialized = Self::initialized().is_some(); - if initialized { - return; - } - - let maybe_pre_digest: Option = >::digest() - .logs - .iter() - .filter_map(|s| s.as_pre_runtime()) - .filter_map(|(id, mut data)| if id == BABE_ENGINE_ID { - RawPreDigest::decode(&mut data).ok() - } else { - None - }) - .next(); - - let maybe_vrf = maybe_pre_digest.and_then(|digest| { - // on the first non-zero block (i.e. block #1) - // this is where the first epoch (epoch #0) actually starts. - // we need to adjust internal storage accordingly. - if GenesisSlot::get() == 0 { - GenesisSlot::put(digest.slot_number()); - debug_assert_ne!(GenesisSlot::get(), 0); - - // deposit a log because this is the first block in epoch #0 - // we use the same values as genesis because we haven't collected any - // randomness yet. - let next = NextEpochDescriptor { - authorities: Self::authorities(), - randomness: Self::randomness(), - }; - - Self::deposit_consensus(ConsensusLog::NextEpochData(next)) - } - - // the slot number of the current block being initialized - let current_slot = digest.slot_number(); - - // how many slots were skipped between current and last block - let lateness = current_slot.saturating_sub(CurrentSlot::get() + 1); - let lateness = T::BlockNumber::from(lateness as u32); - - Lateness::::put(lateness); - CurrentSlot::put(current_slot); - - if let RawPreDigest::Primary(primary) = digest { - // place the VRF output into the `Initialized` storage item - // and it'll be put onto the under-construction randomness - // later, once we've decided which epoch this block is in. - Some(primary.vrf_output) - } else { - None - } - }); - - Initialized::put(maybe_vrf); - - // enact epoch change, if necessary. - T::EpochChangeTrigger::trigger::(now) - } - - /// Call this function exactly once when an epoch changes, to update the - /// randomness. Returns the new randomness. - fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { - let this_randomness = NextRandomness::get(); - let segment_idx: u32 = ::mutate(|s| sp_std::mem::replace(s, 0)); - - // overestimate to the segment being full. - let rho_size = segment_idx.saturating_add(1) as usize * UNDER_CONSTRUCTION_SEGMENT_LENGTH; - - let next_randomness = compute_randomness( - this_randomness, - next_epoch_index, - (0..segment_idx).flat_map(|i| ::take(&i)), - Some(rho_size), - ); - NextRandomness::put(&next_randomness); - this_randomness - } - - fn initialize_authorities(authorities: &[(AuthorityId, BabeAuthorityWeight)]) { - if !authorities.is_empty() { - assert!(Authorities::get().is_empty(), "Authorities are already initialized!"); - Authorities::put(authorities); - } - } + /// Determine the BABE slot duration based on the Timestamp module configuration. + pub fn slot_duration() -> T::Moment { + // we double the minimum block-period so each author can always propose within + // the majority of their slot. + ::MinimumPeriod::get().saturating_mul(2.into()) + } + + /// Determine whether an epoch change should take place at this block. + /// Assumes that initialization has already taken place. + pub fn should_epoch_change(now: T::BlockNumber) -> bool { + // The epoch has technically ended during the passage of time + // between this block and the last, but we have to "end" the epoch now, + // since there is no earlier possible block we could have done it. + // + // The exception is for block 1: the genesis has slot 0, so we treat + // epoch 0 as having started at the slot of block 1. We want to use + // the same randomness and validator set as signalled in the genesis, + // so we don't rotate the epoch. + now != One::one() && { + let diff = CurrentSlot::get().saturating_sub(Self::current_epoch_start()); + diff >= T::EpochDuration::get() + } + } + + /// Return the _best guess_ block number, at which the next epoch change is predicted to happen. + /// + /// Returns None if the prediction is in the past; This implies an error internally in the Babe + /// and should not happen under normal circumstances. + /// + /// In other word, this is only accurate if no slots are missed. Given missed slots, the slot + /// number will grow while the block number will not. Hence, the result can be interpreted as an + /// upper bound. + // -------------- IMPORTANT NOTE -------------- + // This implementation is linked to how [`should_epoch_change`] is working. This might need to + // be updated accordingly, if the underlying mechanics of slot and epochs change. + pub fn next_expected_epoch_change(now: T::BlockNumber) -> Option { + let next_slot = Self::current_epoch_start().saturating_add(T::EpochDuration::get()); + next_slot + .checked_sub(CurrentSlot::get()) + .map(|slots_remaining| { + // This is a best effort guess. Drifts in the slot/block ratio will cause errors here. + let blocks_remaining: T::BlockNumber = slots_remaining.saturated_into(); + now.saturating_add(blocks_remaining) + }) + } + + /// DANGEROUS: Enact an epoch change. Should be done on every block where `should_epoch_change` has returned `true`, + /// and the caller is the only caller of this function. + /// + /// Typically, this is not handled directly by the user, but by higher-level validator-set manager logic like + /// `pallet-session`. + pub fn enact_epoch_change( + authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + next_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + ) { + // PRECONDITION: caller has done initialization and is guaranteed + // by the session module to be called before this. + debug_assert!(Self::initialized().is_some()); + + // Update epoch index + let epoch_index = EpochIndex::get() + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + + EpochIndex::put(epoch_index); + Authorities::put(authorities); + + // Update epoch randomness. + let next_epoch_index = epoch_index + .checked_add(1) + .expect("epoch indices will never reach 2^64 before the death of the universe; qed"); + + // Returns randomness for the current epoch and computes the *next* + // epoch randomness. + let randomness = Self::randomness_change_epoch(next_epoch_index); + Randomness::put(randomness); + + // After we update the current epoch, we signal the *next* epoch change + // so that nodes can track changes. + let next_randomness = NextRandomness::get(); + + let next = NextEpochDescriptor { + authorities: next_authorities, + randomness: next_randomness, + }; + + Self::deposit_consensus(ConsensusLog::NextEpochData(next)) + } + + // finds the start slot of the current epoch. only guaranteed to + // give correct results after `do_initialize` of the first block + // in the chain (as its result is based off of `GenesisSlot`). + pub fn current_epoch_start() -> SlotNumber { + (EpochIndex::get() * T::EpochDuration::get()) + GenesisSlot::get() + } + + fn deposit_consensus(new: U) { + let log: DigestItem = DigestItem::Consensus(BABE_ENGINE_ID, new.encode()); + >::deposit_log(log.into()) + } + + fn deposit_vrf_output(vrf_output: &schnorrkel::RawVRFOutput) { + let segment_idx = ::get(); + let mut segment = ::get(&segment_idx); + if segment.len() < UNDER_CONSTRUCTION_SEGMENT_LENGTH { + // push onto current segment: not full. + segment.push(*vrf_output); + ::insert(&segment_idx, &segment); + } else { + // move onto the next segment and update the index. + let segment_idx = segment_idx + 1; + ::insert(&segment_idx, &vec![vrf_output.clone()]); + ::put(&segment_idx); + } + } + + fn do_initialize(now: T::BlockNumber) { + // since do_initialize can be called twice (if session module is present) + // => let's ensure that we only modify the storage once per block + let initialized = Self::initialized().is_some(); + if initialized { + return; + } + + let maybe_pre_digest: Option = >::digest() + .logs + .iter() + .filter_map(|s| s.as_pre_runtime()) + .filter_map(|(id, mut data)| { + if id == BABE_ENGINE_ID { + RawPreDigest::decode(&mut data).ok() + } else { + None + } + }) + .next(); + + let maybe_vrf = maybe_pre_digest.and_then(|digest| { + // on the first non-zero block (i.e. block #1) + // this is where the first epoch (epoch #0) actually starts. + // we need to adjust internal storage accordingly. + if GenesisSlot::get() == 0 { + GenesisSlot::put(digest.slot_number()); + debug_assert_ne!(GenesisSlot::get(), 0); + + // deposit a log because this is the first block in epoch #0 + // we use the same values as genesis because we haven't collected any + // randomness yet. + let next = NextEpochDescriptor { + authorities: Self::authorities(), + randomness: Self::randomness(), + }; + + Self::deposit_consensus(ConsensusLog::NextEpochData(next)) + } + + // the slot number of the current block being initialized + let current_slot = digest.slot_number(); + + // how many slots were skipped between current and last block + let lateness = current_slot.saturating_sub(CurrentSlot::get() + 1); + let lateness = T::BlockNumber::from(lateness as u32); + + Lateness::::put(lateness); + CurrentSlot::put(current_slot); + + if let RawPreDigest::Primary(primary) = digest { + // place the VRF output into the `Initialized` storage item + // and it'll be put onto the under-construction randomness + // later, once we've decided which epoch this block is in. + Some(primary.vrf_output) + } else { + None + } + }); + + Initialized::put(maybe_vrf); + + // enact epoch change, if necessary. + T::EpochChangeTrigger::trigger::(now) + } + + /// Call this function exactly once when an epoch changes, to update the + /// randomness. Returns the new randomness. + fn randomness_change_epoch(next_epoch_index: u64) -> schnorrkel::Randomness { + let this_randomness = NextRandomness::get(); + let segment_idx: u32 = ::mutate(|s| sp_std::mem::replace(s, 0)); + + // overestimate to the segment being full. + let rho_size = segment_idx.saturating_add(1) as usize * UNDER_CONSTRUCTION_SEGMENT_LENGTH; + + let next_randomness = compute_randomness( + this_randomness, + next_epoch_index, + (0..segment_idx).flat_map(|i| ::take(&i)), + Some(rho_size), + ); + NextRandomness::put(&next_randomness); + this_randomness + } + + fn initialize_authorities(authorities: &[(AuthorityId, BabeAuthorityWeight)]) { + if !authorities.is_empty() { + assert!( + Authorities::get().is_empty(), + "Authorities are already initialized!" + ); + Authorities::put(authorities); + } + } } impl OnTimestampSet for Module { - fn on_timestamp_set(_moment: T::Moment) { } + fn on_timestamp_set(_moment: T::Moment) {} } impl frame_support::traits::EstimateNextSessionRotation for Module { - fn estimate_next_session_rotation(now: T::BlockNumber) -> Option { - Self::next_expected_epoch_change(now) - } + fn estimate_next_session_rotation(now: T::BlockNumber) -> Option { + Self::next_expected_epoch_change(now) + } } impl frame_support::traits::Lateness for Module { - fn lateness(&self) -> T::BlockNumber { - Self::lateness() - } + fn lateness(&self) -> T::BlockNumber { + Self::lateness() + } } impl sp_runtime::BoundToRuntimeAppPublic for Module { - type Public = AuthorityId; + type Public = AuthorityId; } impl pallet_session::OneSessionHandler for Module { - type Key = AuthorityId; - - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator - { - let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - Self::initialize_authorities(&authorities); - } - - fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) - where I: Iterator - { - let authorities = validators.map(|(_account, k)| { - (k, 1) - }).collect::>(); - - let next_authorities = queued_validators.map(|(_account, k)| { - (k, 1) - }).collect::>(); - - Self::enact_epoch_change(authorities, next_authorities) - } - - fn on_disabled(i: usize) { - Self::deposit_consensus(ConsensusLog::OnDisabled(i as u32)) - } + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); + Self::initialize_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_account, k)| (k, 1)).collect::>(); + + let next_authorities = queued_validators + .map(|(_account, k)| (k, 1)) + .collect::>(); + + Self::enact_epoch_change(authorities, next_authorities) + } + + fn on_disabled(i: usize) { + Self::deposit_consensus(ConsensusLog::OnDisabled(i as u32)) + } } // compute randomness for a new epoch. rho is the concatenation of all @@ -560,44 +566,47 @@ impl pallet_session::OneSessionHandler for Module { // // an optional size hint as to how many VRF outputs there were may be provided. fn compute_randomness( - last_epoch_randomness: schnorrkel::Randomness, - epoch_index: u64, - rho: impl Iterator, - rho_size_hint: Option, + last_epoch_randomness: schnorrkel::Randomness, + epoch_index: u64, + rho: impl Iterator, + rho_size_hint: Option, ) -> schnorrkel::Randomness { - let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * VRF_OUTPUT_LENGTH); - s.extend_from_slice(&last_epoch_randomness); - s.extend_from_slice(&epoch_index.to_le_bytes()); + let mut s = Vec::with_capacity(40 + rho_size_hint.unwrap_or(0) * VRF_OUTPUT_LENGTH); + s.extend_from_slice(&last_epoch_randomness); + s.extend_from_slice(&epoch_index.to_le_bytes()); - for vrf_output in rho { - s.extend_from_slice(&vrf_output[..]); - } + for vrf_output in rho { + s.extend_from_slice(&vrf_output[..]); + } - sp_io::hashing::blake2_256(&s) + sp_io::hashing::blake2_256(&s) } impl ProvideInherent for Module { - type Call = pallet_timestamp::Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_: &InherentData) -> Option { - None - } - - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - let timestamp = match call { - pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), - _ => return Ok(()), - }; - - let timestamp_based_slot = (timestamp / Self::slot_duration()).saturated_into::(); - let seal_slot = data.babe_inherent_data()?; - - if timestamp_based_slot == seal_slot { - Ok(()) - } else { - Err(sp_inherents::Error::from("timestamp set in block doesn't match slot in seal").into()) - } - } + type Call = pallet_timestamp::Call; + type Error = MakeFatalError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_: &InherentData) -> Option { + None + } + + fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + let timestamp = match call { + pallet_timestamp::Call::set(ref timestamp) => timestamp.clone(), + _ => return Ok(()), + }; + + let timestamp_based_slot = (timestamp / Self::slot_duration()).saturated_into::(); + let seal_slot = data.babe_inherent_data()?; + + if timestamp_based_slot == seal_slot { + Ok(()) + } else { + Err( + sp_inherents::Error::from("timestamp set in block doesn't match slot in seal") + .into(), + ) + } + } } diff --git a/frame/babe/src/mock.rs b/frame/babe/src/mock.rs index 1c7d02a56c..a3d3db9642 100644 --- a/frame/babe/src/mock.rs +++ b/frame/babe/src/mock.rs @@ -16,25 +16,24 @@ //! Test utilities +use super::{CurrentSlot, GenesisConfig, Module, Trait}; use codec::Encode; -use super::{Trait, Module, GenesisConfig, CurrentSlot}; -use sp_runtime::{ - Perbill, impl_opaque_keys, - testing::{Header, UintAuthorityId, Digest, DigestItem}, - traits::IdentityLookup, -}; -use frame_system::InitKind; use frame_support::{ - impl_outer_origin, parameter_types, StorageValue, - traits::OnInitialize, - weights::Weight, + impl_outer_origin, parameter_types, traits::OnInitialize, weights::Weight, StorageValue, }; -use sp_io; -use sp_core::H256; +use frame_system::InitKind; use sp_consensus_vrf::schnorrkel::{RawVRFOutput, RawVRFProof}; +use sp_core::H256; +use sp_io; +use sp_runtime::{ + impl_opaque_keys, + testing::{Digest, DigestItem, Header, UintAuthorityId}, + traits::IdentityLookup, + Perbill, +}; -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} } type DummyValidatorId = u64; @@ -44,113 +43,126 @@ type DummyValidatorId = u64; pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const MinimumPeriod: u64 = 1; - pub const EpochDuration: u64 = 3; - pub const ExpectedBlockTime: u64 = 1; - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const MinimumPeriod: u64 = 1; + pub const EpochDuration: u64 = 3; + pub const ExpectedBlockTime: u64 = 1; + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(16); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Version = (); - type Hashing = sp_runtime::traits::BlakeTwo256; - type AccountId = DummyValidatorId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Version = (); + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = DummyValidatorId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl_opaque_keys! { - pub struct MockSessionKeys { - pub dummy: UintAuthorityId, - } + pub struct MockSessionKeys { + pub dummy: UintAuthorityId, + } } impl pallet_session::Trait for Test { - type Event = (); - type ValidatorId = ::AccountId; - type ShouldEndSession = Babe; - type SessionHandler = (Babe,); - type SessionManager = (); - type ValidatorIdOf = (); - type Keys = MockSessionKeys; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type NextSessionRotation = Babe; + type Event = (); + type ValidatorId = ::AccountId; + type ShouldEndSession = Babe; + type SessionHandler = (Babe,); + type SessionManager = (); + type ValidatorIdOf = (); + type Keys = MockSessionKeys; + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = Babe; } impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = Babe; - type MinimumPeriod = MinimumPeriod; + type Moment = u64; + type OnTimestampSet = Babe; + type MinimumPeriod = MinimumPeriod; } impl Trait for Test { - type EpochDuration = EpochDuration; - type ExpectedBlockTime = ExpectedBlockTime; - type EpochChangeTrigger = crate::ExternalTrigger; + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; + type EpochChangeTrigger = crate::ExternalTrigger; } pub fn new_test_ext(authorities: Vec) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig { - authorities: authorities.into_iter().map(|a| (UintAuthorityId(a).to_public_key(), 1)).collect(), - }.assimilate_storage::(&mut t).unwrap(); - t.into() + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + GenesisConfig { + authorities: authorities + .into_iter() + .map(|a| (UintAuthorityId(a).to_public_key(), 1)) + .collect(), + } + .assimilate_storage::(&mut t) + .unwrap(); + t.into() } pub fn go_to_block(n: u64, s: u64) { - let pre_digest = make_pre_digest(0, s, RawVRFOutput([1; 32]), RawVRFProof([0xff; 64])); - System::initialize(&n, &Default::default(), &Default::default(), &pre_digest, InitKind::Full); - System::set_block_number(n); - if s > 1 { - CurrentSlot::put(s); - } - // includes a call into `Babe::do_initialize`. - Session::on_initialize(n); + let pre_digest = make_pre_digest(0, s, RawVRFOutput([1; 32]), RawVRFProof([0xff; 64])); + System::initialize( + &n, + &Default::default(), + &Default::default(), + &pre_digest, + InitKind::Full, + ); + System::set_block_number(n); + if s > 1 { + CurrentSlot::put(s); + } + // includes a call into `Babe::do_initialize`. + Session::on_initialize(n); } /// Slots will grow accordingly to blocks pub fn progress_to_block(n: u64) { - let mut slot = Babe::current_slot() + 1; - for i in System::block_number()+1..=n { - go_to_block(i, slot); - slot += 1; - } + let mut slot = Babe::current_slot() + 1; + for i in System::block_number() + 1..=n { + go_to_block(i, slot); + slot += 1; + } } pub fn make_pre_digest( - authority_index: sp_consensus_babe::AuthorityIndex, - slot_number: sp_consensus_babe::SlotNumber, - vrf_output: RawVRFOutput, - vrf_proof: RawVRFProof, + authority_index: sp_consensus_babe::AuthorityIndex, + slot_number: sp_consensus_babe::SlotNumber, + vrf_output: RawVRFOutput, + vrf_proof: RawVRFProof, ) -> Digest { - let digest_data = sp_consensus_babe::digests::RawPreDigest::Primary( - sp_consensus_babe::digests::RawPrimaryPreDigest { - authority_index, - slot_number, - vrf_output, - vrf_proof, - } - ); - let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); - Digest { logs: vec![log] } + let digest_data = sp_consensus_babe::digests::RawPreDigest::Primary( + sp_consensus_babe::digests::RawPrimaryPreDigest { + authority_index, + slot_number, + vrf_output, + vrf_proof, + }, + ); + let log = DigestItem::PreRuntime(sp_consensus_babe::BABE_ENGINE_ID, digest_data.encode()); + Digest { logs: vec![log] } } pub type System = frame_system::Module; diff --git a/frame/babe/src/tests.rs b/frame/babe/src/tests.rs index 24aba10017..08d068b5c8 100644 --- a/frame/babe/src/tests.rs +++ b/frame/babe/src/tests.rs @@ -17,120 +17,122 @@ //! Consensus extension module tests for BABE consensus. use super::*; -use mock::*; use frame_support::traits::OnFinalize; +use mock::*; use pallet_session::ShouldEndSession; use sp_consensus_vrf::schnorrkel::{RawVRFOutput, RawVRFProof}; const EMPTY_RANDOMNESS: [u8; 32] = [ - 74, 25, 49, 128, 53, 97, 244, 49, - 222, 202, 176, 2, 231, 66, 95, 10, - 133, 49, 213, 228, 86, 161, 164, 127, - 217, 153, 138, 37, 48, 192, 248, 0, + 74, 25, 49, 128, 53, 97, 244, 49, 222, 202, 176, 2, 231, 66, 95, 10, 133, 49, 213, 228, 86, + 161, 164, 127, 217, 153, 138, 37, 48, 192, 248, 0, ]; #[test] fn empty_randomness_is_correct() { - let s = compute_randomness([0; RANDOMNESS_LENGTH], 0, std::iter::empty(), None); - assert_eq!(s, EMPTY_RANDOMNESS); + let s = compute_randomness([0; RANDOMNESS_LENGTH], 0, std::iter::empty(), None); + assert_eq!(s, EMPTY_RANDOMNESS); } #[test] fn initial_values() { - new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { - assert_eq!(Babe::authorities().len(), 4) - }) + new_test_ext(vec![0, 1, 2, 3]).execute_with(|| assert_eq!(Babe::authorities().len(), 4)) } #[test] fn check_module() { - new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { - assert!(!Babe::should_end_session(0), "Genesis does not change sessions"); - assert!(!Babe::should_end_session(200000), - "BABE does not include the block number in epoch calculations"); - }) + new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { + assert!( + !Babe::should_end_session(0), + "Genesis does not change sessions" + ); + assert!( + !Babe::should_end_session(200000), + "BABE does not include the block number in epoch calculations" + ); + }) } #[test] fn first_block_epoch_zero_start() { - new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { - let genesis_slot = 100; - let first_vrf = RawVRFOutput([1; 32]); - let pre_digest = make_pre_digest( - 0, - genesis_slot, - first_vrf.clone(), - RawVRFProof([0xff; 64]), - ); - - assert_eq!(Babe::genesis_slot(), 0); - System::initialize( - &1, - &Default::default(), - &Default::default(), - &pre_digest, - Default::default(), - ); - - // see implementation of the function for details why: we issue an - // epoch-change digest but don't do it via the normal session mechanism. - assert!(!Babe::should_end_session(1)); - assert_eq!(Babe::genesis_slot(), genesis_slot); - assert_eq!(Babe::current_slot(), genesis_slot); - assert_eq!(Babe::epoch_index(), 0); - - Babe::on_finalize(1); - let header = System::finalize(); - - assert_eq!(SegmentIndex::get(), 0); - assert_eq!(UnderConstruction::get(0), vec![first_vrf]); - assert_eq!(Babe::randomness(), [0; 32]); - assert_eq!(NextRandomness::get(), [0; 32]); - - assert_eq!(header.digest.logs.len(), 2); - assert_eq!(pre_digest.logs.len(), 1); - assert_eq!(header.digest.logs[0], pre_digest.logs[0]); - - let authorities = Babe::authorities(); - let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( - sp_consensus_babe::digests::NextEpochDescriptor { - authorities, - randomness: Babe::randomness(), - } - ); - let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); - - // first epoch descriptor has same info as last. - assert_eq!(header.digest.logs[1], consensus_digest.clone()) - }) + new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { + let genesis_slot = 100; + let first_vrf = RawVRFOutput([1; 32]); + let pre_digest = + make_pre_digest(0, genesis_slot, first_vrf.clone(), RawVRFProof([0xff; 64])); + + assert_eq!(Babe::genesis_slot(), 0); + System::initialize( + &1, + &Default::default(), + &Default::default(), + &pre_digest, + Default::default(), + ); + + // see implementation of the function for details why: we issue an + // epoch-change digest but don't do it via the normal session mechanism. + assert!(!Babe::should_end_session(1)); + assert_eq!(Babe::genesis_slot(), genesis_slot); + assert_eq!(Babe::current_slot(), genesis_slot); + assert_eq!(Babe::epoch_index(), 0); + + Babe::on_finalize(1); + let header = System::finalize(); + + assert_eq!(SegmentIndex::get(), 0); + assert_eq!(UnderConstruction::get(0), vec![first_vrf]); + assert_eq!(Babe::randomness(), [0; 32]); + assert_eq!(NextRandomness::get(), [0; 32]); + + assert_eq!(header.digest.logs.len(), 2); + assert_eq!(pre_digest.logs.len(), 1); + assert_eq!(header.digest.logs[0], pre_digest.logs[0]); + + let authorities = Babe::authorities(); + let consensus_log = sp_consensus_babe::ConsensusLog::NextEpochData( + sp_consensus_babe::digests::NextEpochDescriptor { + authorities, + randomness: Babe::randomness(), + }, + ); + let consensus_digest = DigestItem::Consensus(BABE_ENGINE_ID, consensus_log.encode()); + + // first epoch descriptor has same info as last. + assert_eq!(header.digest.logs[1], consensus_digest.clone()) + }) } #[test] fn authority_index() { - new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { - assert_eq!( - Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), None, - "Trivially invalid authorities are ignored") - }) + new_test_ext(vec![0, 1, 2, 3]).execute_with(|| { + assert_eq!( + Babe::find_author((&[(BABE_ENGINE_ID, &[][..])]).into_iter().cloned()), + None, + "Trivially invalid authorities are ignored" + ) + }) } #[test] fn can_predict_next_epoch_change() { - new_test_ext(vec![]).execute_with(|| { - assert_eq!(::EpochDuration::get(), 3); - // this sets the genesis slot to 6; - go_to_block(1, 6); - assert_eq!(Babe::genesis_slot(), 6); - assert_eq!(Babe::current_slot(), 6); - assert_eq!(Babe::epoch_index(), 0); - - progress_to_block(5); - - assert_eq!(Babe::epoch_index(), 5 / 3); - assert_eq!(Babe::current_slot(), 10); - - // next epoch change will be at - assert_eq!(Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now - assert_eq!(Babe::next_expected_epoch_change(System::block_number()), Some(5 + 2)); - }) + new_test_ext(vec![]).execute_with(|| { + assert_eq!(::EpochDuration::get(), 3); + // this sets the genesis slot to 6; + go_to_block(1, 6); + assert_eq!(Babe::genesis_slot(), 6); + assert_eq!(Babe::current_slot(), 6); + assert_eq!(Babe::epoch_index(), 0); + + progress_to_block(5); + + assert_eq!(Babe::epoch_index(), 5 / 3); + assert_eq!(Babe::current_slot(), 10); + + // next epoch change will be at + assert_eq!(Babe::current_epoch_start(), 9); // next change will be 12, 2 slots from now + assert_eq!( + Babe::next_expected_epoch_change(System::block_number()), + Some(5 + 2) + ); + }) } diff --git a/frame/balances/src/benchmarking.rs b/frame/balances/src/benchmarking.rs index 3c2067559f..4a6f7f7420 100644 --- a/frame/balances/src/benchmarking.rs +++ b/frame/balances/src/benchmarking.rs @@ -20,8 +20,8 @@ use super::*; +use frame_benchmarking::{account, benchmarks}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; use sp_runtime::traits::Bounded; use crate::Module as Balances; @@ -31,136 +31,136 @@ const MAX_EXISTENTIAL_DEPOSIT: u32 = 1000; const MAX_USER_INDEX: u32 = 1000; benchmarks! { - _ { - let e in 2 .. MAX_EXISTENTIAL_DEPOSIT => (); - let u in 1 .. MAX_USER_INDEX => (); - } - - // Benchmark `transfer` extrinsic with the worst possible conditions: - // * Transfer will kill the sender account. - // * Transfer will create the recipient account. - transfer { - let u in ...; - let e in ...; - - let existential_deposit = T::ExistentialDeposit::get(); - let caller = account("caller", u, SEED); - - // Give some multiple of the existential deposit + creation fee + transfer fee - let balance = existential_deposit.saturating_mul(e.into()); - let _ = as Currency<_>>::make_free_balance_be(&caller, balance); - - // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. - let recipient: T::AccountId = account("recipient", u, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); - let transfer_amount = existential_deposit.saturating_mul((e - 1).into()) + 1.into(); - }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) - verify { - assert_eq!(Balances::::free_balance(&recipient), transfer_amount); - } - - // Benchmark `transfer` with the best possible condition: - // * Both accounts exist and will continue to exist. - transfer_best_case { - let u in ...; - let e in ...; - - let caller = account("caller", u, SEED); - let recipient: T::AccountId = account("recipient", u, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); - - // Give the sender account max funds for transfer (their account will never reasonably be killed). - let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); - - // Give the recipient account existential deposit (thus their account already exists). - let existential_deposit = T::ExistentialDeposit::get(); - let _ = as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); - let transfer_amount = existential_deposit.saturating_mul(e.into()); - }: transfer(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) - - // Benchmark `transfer_keep_alive` with the worst possible condition: - // * The recipient account is created. - transfer_keep_alive { - let u in ...; - let e in ...; - - let caller = account("caller", u, SEED); - let recipient = account("recipient", u, SEED); - let recipient_lookup: ::Source = T::Lookup::unlookup(recipient); - - // Give the sender account max funds, thus a transfer will not kill account. - let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); - let existential_deposit = T::ExistentialDeposit::get(); - let transfer_amount = existential_deposit.saturating_mul(e.into()); - }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) - - // Benchmark `set_balance` coming from ROOT account. This always creates an account. - set_balance { - let u in ...; - let e in ...; - - let user: T::AccountId = account("user", u, SEED); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); - - // Give the user some initial balance. - let existential_deposit = T::ExistentialDeposit::get(); - let balance_amount = existential_deposit.saturating_mul(e.into()); - let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); - }: _(RawOrigin::Root, user_lookup, balance_amount, balance_amount) - - // Benchmark `set_balance` coming from ROOT account. This always kills an account. - set_balance_killing { - let u in ...; - let e in ...; - - let user: T::AccountId = account("user", u, SEED); - let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); - - // Give the user some initial balance. - let existential_deposit = T::ExistentialDeposit::get(); - let balance_amount = existential_deposit.saturating_mul(e.into()); - let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); - }: set_balance(RawOrigin::Root, user_lookup, 0.into(), 0.into()) + _ { + let e in 2 .. MAX_EXISTENTIAL_DEPOSIT => (); + let u in 1 .. MAX_USER_INDEX => (); + } + + // Benchmark `transfer` extrinsic with the worst possible conditions: + // * Transfer will kill the sender account. + // * Transfer will create the recipient account. + transfer { + let u in ...; + let e in ...; + + let existential_deposit = T::ExistentialDeposit::get(); + let caller = account("caller", u, SEED); + + // Give some multiple of the existential deposit + creation fee + transfer fee + let balance = existential_deposit.saturating_mul(e.into()); + let _ = as Currency<_>>::make_free_balance_be(&caller, balance); + + // Transfer `e - 1` existential deposits + 1 unit, which guarantees to create one account, and reap this user. + let recipient: T::AccountId = account("recipient", u, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + let transfer_amount = existential_deposit.saturating_mul((e - 1).into()) + 1.into(); + }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) + verify { + assert_eq!(Balances::::free_balance(&recipient), transfer_amount); + } + + // Benchmark `transfer` with the best possible condition: + // * Both accounts exist and will continue to exist. + transfer_best_case { + let u in ...; + let e in ...; + + let caller = account("caller", u, SEED); + let recipient: T::AccountId = account("recipient", u, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient.clone()); + + // Give the sender account max funds for transfer (their account will never reasonably be killed). + let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); + + // Give the recipient account existential deposit (thus their account already exists). + let existential_deposit = T::ExistentialDeposit::get(); + let _ = as Currency<_>>::make_free_balance_be(&recipient, existential_deposit); + let transfer_amount = existential_deposit.saturating_mul(e.into()); + }: transfer(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) + + // Benchmark `transfer_keep_alive` with the worst possible condition: + // * The recipient account is created. + transfer_keep_alive { + let u in ...; + let e in ...; + + let caller = account("caller", u, SEED); + let recipient = account("recipient", u, SEED); + let recipient_lookup: ::Source = T::Lookup::unlookup(recipient); + + // Give the sender account max funds, thus a transfer will not kill account. + let _ = as Currency<_>>::make_free_balance_be(&caller, T::Balance::max_value()); + let existential_deposit = T::ExistentialDeposit::get(); + let transfer_amount = existential_deposit.saturating_mul(e.into()); + }: _(RawOrigin::Signed(caller), recipient_lookup, transfer_amount) + + // Benchmark `set_balance` coming from ROOT account. This always creates an account. + set_balance { + let u in ...; + let e in ...; + + let user: T::AccountId = account("user", u, SEED); + let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + + // Give the user some initial balance. + let existential_deposit = T::ExistentialDeposit::get(); + let balance_amount = existential_deposit.saturating_mul(e.into()); + let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); + }: _(RawOrigin::Root, user_lookup, balance_amount, balance_amount) + + // Benchmark `set_balance` coming from ROOT account. This always kills an account. + set_balance_killing { + let u in ...; + let e in ...; + + let user: T::AccountId = account("user", u, SEED); + let user_lookup: ::Source = T::Lookup::unlookup(user.clone()); + + // Give the user some initial balance. + let existential_deposit = T::ExistentialDeposit::get(); + let balance_amount = existential_deposit.saturating_mul(e.into()); + let _ = as Currency<_>>::make_free_balance_be(&user, balance_amount); + }: set_balance(RawOrigin::Root, user_lookup, 0.into(), 0.into()) } #[cfg(test)] mod tests { - use super::*; - use crate::tests_composite::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn transfer() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer::()); - }); - } - - #[test] - fn transfer_best_case() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer_best_case::()); - }); - } - - #[test] - fn transfer_keep_alive() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_transfer_keep_alive::()); - }); - } - - #[test] - fn transfer_set_balance() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_set_balance::()); - }); - } - - #[test] - fn transfer_set_balance_killing() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(test_benchmark_set_balance_killing::()); - }); - } + use super::*; + use crate::tests_composite::{ExtBuilder, Test}; + use frame_support::assert_ok; + + #[test] + fn transfer() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_transfer::()); + }); + } + + #[test] + fn transfer_best_case() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_transfer_best_case::()); + }); + } + + #[test] + fn transfer_keep_alive() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_transfer_keep_alive::()); + }); + } + + #[test] + fn transfer_set_balance() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_set_balance::()); + }); + } + + #[test] + fn transfer_set_balance_killing() { + ExtBuilder::default().build().execute_with(|| { + assert_ok!(test_benchmark_set_balance_killing::()); + }); + } } diff --git a/frame/balances/src/lib.rs b/frame/balances/src/lib.rs index 98d6a93738..f060a49756 100644 --- a/frame/balances/src/lib.rs +++ b/frame/balances/src/lib.rs @@ -150,67 +150,80 @@ #[macro_use] mod tests; -mod tests_local; -mod tests_composite; mod benchmarking; +mod tests_composite; +mod tests_local; -use sp_std::prelude::*; -use sp_std::{cmp, result, mem, fmt::Debug, ops::BitOr, convert::Infallible}; -use codec::{Codec, Encode, Decode}; +use codec::{Codec, Decode, Encode}; use frame_support::{ - StorageValue, Parameter, decl_event, decl_storage, decl_module, decl_error, ensure, - traits::{ - Currency, OnKilledAccount, OnUnbalanced, TryDrop, StoredMap, - WithdrawReason, WithdrawReasons, LockIdentifier, LockableCurrency, ExistenceRequirement, - Imbalance, SignedImbalance, ReservableCurrency, Get, ExistenceRequirement::KeepAlive, - ExistenceRequirement::AllowDeath, IsDeadAccount, BalanceStatus as Status, - } + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::{ + BalanceStatus as Status, Currency, ExistenceRequirement, ExistenceRequirement::AllowDeath, + ExistenceRequirement::KeepAlive, Get, Imbalance, IsDeadAccount, LockIdentifier, + LockableCurrency, OnKilledAccount, OnUnbalanced, ReservableCurrency, SignedImbalance, + StoredMap, TryDrop, WithdrawReason, WithdrawReasons, + }, + Parameter, StorageValue, }; +use frame_system::{self as system, ensure_root, ensure_signed}; use sp_runtime::{ - RuntimeDebug, DispatchResult, DispatchError, - traits::{ - Zero, AtLeast32Bit, StaticLookup, Member, CheckedAdd, CheckedSub, - MaybeSerializeDeserialize, Saturating, Bounded, - }, + traits::{ + AtLeast32Bit, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Member, + Saturating, StaticLookup, Zero, + }, + DispatchError, DispatchResult, RuntimeDebug, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use sp_std::prelude::*; +use sp_std::{cmp, convert::Infallible, fmt::Debug, mem, ops::BitOr, result}; -pub use self::imbalances::{PositiveImbalance, NegativeImbalance}; +pub use self::imbalances::{NegativeImbalance, PositiveImbalance}; pub trait Subtrait: frame_system::Trait { - /// The balance of an account. - type Balance: Parameter + Member + AtLeast32Bit + Codec + Default + Copy + - MaybeSerializeDeserialize + Debug; - - /// The minimum amount required to keep an account open. - type ExistentialDeposit: Get; - - /// The means of storing the balances of an account. - type AccountStore: StoredMap>; + /// The balance of an account. + type Balance: Parameter + + Member + + AtLeast32Bit + + Codec + + Default + + Copy + + MaybeSerializeDeserialize + + Debug; + + /// The minimum amount required to keep an account open. + type ExistentialDeposit: Get; + + /// The means of storing the balances of an account. + type AccountStore: StoredMap>; } pub trait Trait: frame_system::Trait { - /// The balance of an account. - type Balance: Parameter + Member + AtLeast32Bit + Codec + Default + Copy + - MaybeSerializeDeserialize + Debug; - - /// Handler for the unbalanced reduction when removing a dust account. - type DustRemoval: OnUnbalanced>; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The minimum amount required to keep an account open. - type ExistentialDeposit: Get; - - /// The means of storing the balances of an account. - type AccountStore: StoredMap>; + /// The balance of an account. + type Balance: Parameter + + Member + + AtLeast32Bit + + Codec + + Default + + Copy + + MaybeSerializeDeserialize + + Debug; + + /// Handler for the unbalanced reduction when removing a dust account. + type DustRemoval: OnUnbalanced>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// The minimum amount required to keep an account open. + type ExistentialDeposit: Get; + + /// The means of storing the balances of an account. + type AccountStore: StoredMap>; } impl, I: Instance> Subtrait for T { - type Balance = T::Balance; - type ExistentialDeposit = T::ExistentialDeposit; - type AccountStore = T::AccountStore; + type Balance = T::Balance; + type ExistentialDeposit = T::ExistentialDeposit; + type AccountStore = T::AccountStore; } decl_event!( @@ -233,111 +246,113 @@ decl_event!( ); decl_error! { - pub enum Error for Module, I: Instance> { - /// Vesting balance too high to send value - VestingBalance, - /// Account liquidity restrictions prevent withdrawal - LiquidityRestrictions, - /// Got an overflow after adding - Overflow, - /// Balance too low to send value - InsufficientBalance, - /// Value too low to create account due to existential deposit - ExistentialDeposit, - /// Transfer/payment would kill account - KeepAlive, - /// A vesting schedule already exists for this account - ExistingVestingSchedule, - /// Beneficiary account must pre-exist - DeadAccount, - } + pub enum Error for Module, I: Instance> { + /// Vesting balance too high to send value + VestingBalance, + /// Account liquidity restrictions prevent withdrawal + LiquidityRestrictions, + /// Got an overflow after adding + Overflow, + /// Balance too low to send value + InsufficientBalance, + /// Value too low to create account due to existential deposit + ExistentialDeposit, + /// Transfer/payment would kill account + KeepAlive, + /// A vesting schedule already exists for this account + ExistingVestingSchedule, + /// Beneficiary account must pre-exist + DeadAccount, + } } /// Simplified reasons for withdrawing balance. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] pub enum Reasons { - /// Paying system transaction fees. - Fee = 0, - /// Any reason other than paying system transaction fees. - Misc = 1, - /// Any reason at all. - All = 2, + /// Paying system transaction fees. + Fee = 0, + /// Any reason other than paying system transaction fees. + Misc = 1, + /// Any reason at all. + All = 2, } impl From for Reasons { - fn from(r: WithdrawReasons) -> Reasons { - if r == WithdrawReasons::from(WithdrawReason::TransactionPayment) { - Reasons::Fee - } else if r.contains(WithdrawReason::TransactionPayment) { - Reasons::All - } else { - Reasons::Misc - } - } + fn from(r: WithdrawReasons) -> Reasons { + if r == WithdrawReasons::from(WithdrawReason::TransactionPayment) { + Reasons::Fee + } else if r.contains(WithdrawReason::TransactionPayment) { + Reasons::All + } else { + Reasons::Misc + } + } } impl BitOr for Reasons { - type Output = Reasons; - fn bitor(self, other: Reasons) -> Reasons { - if self == other { return self } - Reasons::All - } + type Output = Reasons; + fn bitor(self, other: Reasons) -> Reasons { + if self == other { + return self; + } + Reasons::All + } } /// A single lock on a balance. There can be many of these on an account and they "overlap", so the /// same balance is frozen by multiple locks. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct BalanceLock { - /// An identifier for this lock. Only one lock may be in existence for each identifier. - pub id: LockIdentifier, - /// The amount which the free balance may not drop below when this lock is in effect. - pub amount: Balance, - /// If true, then the lock remains in effect even for payment of transaction fees. - pub reasons: Reasons, + /// An identifier for this lock. Only one lock may be in existence for each identifier. + pub id: LockIdentifier, + /// The amount which the free balance may not drop below when this lock is in effect. + pub amount: Balance, + /// If true, then the lock remains in effect even for payment of transaction fees. + pub reasons: Reasons, } /// All balance information for an account. #[derive(Encode, Decode, Clone, PartialEq, Eq, Default, RuntimeDebug)] pub struct AccountData { - /// Non-reserved part of the balance. There may still be restrictions on this, but it is the - /// total pool what may in principle be transferred, reserved and used for tipping. - /// - /// This is the only balance that matters in terms of most operations on tokens. It - /// alone is used to determine the balance when in the contract execution environment. - pub free: Balance, - /// Balance which is reserved and may not be used at all. - /// - /// This can still get slashed, but gets slashed last of all. - /// - /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens - /// that are still 'owned' by the account holder, but which are suspendable. - pub reserved: Balance, - /// The amount that `free` may not drop below when withdrawing for *anything except transaction - /// fee payment*. - pub misc_frozen: Balance, - /// The amount that `free` may not drop below when withdrawing specifically for transaction - /// fee payment. - pub fee_frozen: Balance, + /// Non-reserved part of the balance. There may still be restrictions on this, but it is the + /// total pool what may in principle be transferred, reserved and used for tipping. + /// + /// This is the only balance that matters in terms of most operations on tokens. It + /// alone is used to determine the balance when in the contract execution environment. + pub free: Balance, + /// Balance which is reserved and may not be used at all. + /// + /// This can still get slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + pub reserved: Balance, + /// The amount that `free` may not drop below when withdrawing for *anything except transaction + /// fee payment*. + pub misc_frozen: Balance, + /// The amount that `free` may not drop below when withdrawing specifically for transaction + /// fee payment. + pub fee_frozen: Balance, } impl AccountData { - /// How much this account's balance can be reduced for the given `reasons`. - fn usable(&self, reasons: Reasons) -> Balance { - self.free.saturating_sub(self.frozen(reasons)) - } - /// The amount that this account's free balance may not be reduced beyond for the given - /// `reasons`. - fn frozen(&self, reasons: Reasons) -> Balance { - match reasons { - Reasons::All => self.misc_frozen.max(self.fee_frozen), - Reasons::Misc => self.misc_frozen, - Reasons::Fee => self.fee_frozen, - } - } - /// The total balance in this account including any that is reserved and ignoring any frozen. - fn total(&self) -> Balance { - self.free.saturating_add(self.reserved) - } + /// How much this account's balance can be reduced for the given `reasons`. + fn usable(&self, reasons: Reasons) -> Balance { + self.free.saturating_sub(self.frozen(reasons)) + } + /// The amount that this account's free balance may not be reduced beyond for the given + /// `reasons`. + fn frozen(&self, reasons: Reasons) -> Balance { + match reasons { + Reasons::All => self.misc_frozen.max(self.fee_frozen), + Reasons::Misc => self.misc_frozen, + Reasons::Fee => self.fee_frozen, + } + } + /// The total balance in this account including any that is reserved and ignoring any frozen. + fn total(&self) -> Balance { + self.free.saturating_add(self.reserved) + } } // A value placed in storage that represents the current version of the Balances storage. @@ -345,473 +360,474 @@ impl AccountData { // storage migration logic. This should match directly with the semantic versions of the Rust crate. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] enum Releases { - V1_0_0, - V2_0_0, + V1_0_0, + V2_0_0, } impl Default for Releases { - fn default() -> Self { - Releases::V1_0_0 - } + fn default() -> Self { + Releases::V1_0_0 + } } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Balances { - /// The total units issued in the system. - pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { - config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n) - }): T::Balance; - - /// The balance of an account. - /// - /// NOTE: THIS MAY NEVER BE IN EXISTENCE AND YET HAVE A `total().is_zero()`. If the total - /// is ever zero, then the entry *MUST* be removed. - /// - /// NOTE: This is only used in the case that this module is used to store balances. - pub Account: map hasher(blake2_128_concat) T::AccountId => AccountData; - - /// Any liquidity locks on some account balances. - /// NOTE: Should only be accessed when setting, changing and freeing a lock. - pub Locks get(fn locks): map hasher(blake2_128_concat) T::AccountId => Vec>; - - /// Storage version of the pallet. - /// - /// This is set to v2.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V2_0_0): Releases; - } - add_extra_genesis { - config(balances): Vec<(T::AccountId, T::Balance)>; - // ^^ begin, length, amount liquid at genesis - build(|config: &GenesisConfig| { - assert!( - >::ExistentialDeposit::get() > Zero::zero(), - "The existential deposit should be greater than zero." - ); - for (_, balance) in &config.balances { - assert!( - *balance >= >::ExistentialDeposit::get(), - "the balance of any account should always be more than existential deposit.", - ) - } - for &(ref who, free) in config.balances.iter() { - T::AccountStore::insert(who, AccountData { free, .. Default::default() }); - } - }); - } + trait Store for Module, I: Instance=DefaultInstance> as Balances { + /// The total units issued in the system. + pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { + config.balances.iter().fold(Zero::zero(), |acc: T::Balance, &(_, n)| acc + n) + }): T::Balance; + + /// The balance of an account. + /// + /// NOTE: THIS MAY NEVER BE IN EXISTENCE AND YET HAVE A `total().is_zero()`. If the total + /// is ever zero, then the entry *MUST* be removed. + /// + /// NOTE: This is only used in the case that this module is used to store balances. + pub Account: map hasher(blake2_128_concat) T::AccountId => AccountData; + + /// Any liquidity locks on some account balances. + /// NOTE: Should only be accessed when setting, changing and freeing a lock. + pub Locks get(fn locks): map hasher(blake2_128_concat) T::AccountId => Vec>; + + /// Storage version of the pallet. + /// + /// This is set to v2.0.0 for new networks. + StorageVersion build(|_: &GenesisConfig| Releases::V2_0_0): Releases; + } + add_extra_genesis { + config(balances): Vec<(T::AccountId, T::Balance)>; + // ^^ begin, length, amount liquid at genesis + build(|config: &GenesisConfig| { + assert!( + >::ExistentialDeposit::get() > Zero::zero(), + "The existential deposit should be greater than zero." + ); + for (_, balance) in &config.balances { + assert!( + *balance >= >::ExistentialDeposit::get(), + "the balance of any account should always be more than existential deposit.", + ) + } + for &(ref who, free) in config.balances.iter() { + T::AccountStore::insert(who, AccountData { free, .. Default::default() }); + } + }); + } } decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { - type Error = Error; - - /// The minimum amount required to keep an account open. - const ExistentialDeposit: T::Balance = T::ExistentialDeposit::get(); - - fn deposit_event() = default; - - /// Transfer some liquid free balance to another account. - /// - /// `transfer` will set the `FreeBalance` of the sender and receiver. - /// It will decrease the total issuance of the system by the `TransferFee`. - /// If the sender's account is below the existential deposit as a result - /// of the transfer, the account will be reaped. - /// - /// The dispatch origin for this call must be `Signed` by the transactor. - /// - /// # - /// - Dependent on arguments but not critical, given proper implementations for - /// input config types. See related functions below. - /// - It contains a limited number of reads and writes internally and no complex computation. - /// - /// Related functions: - /// - /// - `ensure_can_withdraw` is always called internally but has a bounded complexity. - /// - Transferring balances to accounts that did not exist before will cause - /// `T::OnNewAccount::on_new_account` to be called. - /// - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`. - /// - `transfer_keep_alive` works the same way as `transfer`, but has an additional - /// check that the transfer will not kill the origin account. - /// - /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) + 200_000_000] - pub fn transfer( - origin, - dest: ::Source, - #[compact] value: T::Balance - ) { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?; - } - - /// Set the balances of a given account. - /// - /// This will alter `FreeBalance` and `ReservedBalance` in storage. it will - /// also decrease the total issuance of the system (`TotalIssuance`). - /// If the new free or reserved balance is below the existential deposit, - /// it will reset the account nonce (`frame_system::AccountNonce`). - /// - /// The dispatch origin for this call is `root`. - /// - /// # - /// - Independent of the arguments. - /// - Contains a limited number of reads and writes. - /// # - #[weight = T::DbWeight::get().reads_writes(1, 1) + 100_000_000] - fn set_balance( - origin, - who: ::Source, - #[compact] new_free: T::Balance, - #[compact] new_reserved: T::Balance - ) { - ensure_root(origin)?; - let who = T::Lookup::lookup(who)?; - let existential_deposit = T::ExistentialDeposit::get(); - - let wipeout = new_free + new_reserved < existential_deposit; - let new_free = if wipeout { Zero::zero() } else { new_free }; - let new_reserved = if wipeout { Zero::zero() } else { new_reserved }; - - let (free, reserved) = Self::mutate_account(&who, |account| { - if new_free > account.free { - mem::drop(PositiveImbalance::::new(new_free - account.free)); - } else if new_free < account.free { - mem::drop(NegativeImbalance::::new(account.free - new_free)); - } - - if new_reserved > account.reserved { - mem::drop(PositiveImbalance::::new(new_reserved - account.reserved)); - } else if new_reserved < account.reserved { - mem::drop(NegativeImbalance::::new(account.reserved - new_reserved)); - } - - account.free = new_free; - account.reserved = new_reserved; - - (account.free, account.reserved) - }); - Self::deposit_event(RawEvent::BalanceSet(who, free, reserved)); - } - - /// Exactly as `transfer`, except the origin must be root and the source account may be - /// specified. - /// # - /// - Same as transfer, but additional read and write because the source account is - /// not assumed to be in the overlay. - /// # - #[weight = T::DbWeight::get().reads_writes(2, 2) + 200_000_000] - pub fn force_transfer( - origin, - source: ::Source, - dest: ::Source, - #[compact] value: T::Balance - ) { - ensure_root(origin)?; - let source = T::Lookup::lookup(source)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?; - } - - /// Same as the [`transfer`] call, but with a check that the transfer will not kill the - /// origin account. - /// - /// 99% of the time you want [`transfer`] instead. - /// - /// [`transfer`]: struct.Module.html#method.transfer - #[weight = T::DbWeight::get().reads_writes(1, 1) + 150_000_000] - pub fn transfer_keep_alive( - origin, - dest: ::Source, - #[compact] value: T::Balance - ) { - let transactor = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - >::transfer(&transactor, &dest, value, KeepAlive)?; - } - } + pub struct Module, I: Instance = DefaultInstance> for enum Call where origin: T::Origin { + type Error = Error; + + /// The minimum amount required to keep an account open. + const ExistentialDeposit: T::Balance = T::ExistentialDeposit::get(); + + fn deposit_event() = default; + + /// Transfer some liquid free balance to another account. + /// + /// `transfer` will set the `FreeBalance` of the sender and receiver. + /// It will decrease the total issuance of the system by the `TransferFee`. + /// If the sender's account is below the existential deposit as a result + /// of the transfer, the account will be reaped. + /// + /// The dispatch origin for this call must be `Signed` by the transactor. + /// + /// # + /// - Dependent on arguments but not critical, given proper implementations for + /// input config types. See related functions below. + /// - It contains a limited number of reads and writes internally and no complex computation. + /// + /// Related functions: + /// + /// - `ensure_can_withdraw` is always called internally but has a bounded complexity. + /// - Transferring balances to accounts that did not exist before will cause + /// `T::OnNewAccount::on_new_account` to be called. + /// - Removing enough funds from an account will trigger `T::DustRemoval::on_unbalanced`. + /// - `transfer_keep_alive` works the same way as `transfer`, but has an additional + /// check that the transfer will not kill the origin account. + /// + /// # + #[weight = T::DbWeight::get().reads_writes(1, 1) + 200_000_000] + pub fn transfer( + origin, + dest: ::Source, + #[compact] value: T::Balance + ) { + let transactor = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&transactor, &dest, value, ExistenceRequirement::AllowDeath)?; + } + + /// Set the balances of a given account. + /// + /// This will alter `FreeBalance` and `ReservedBalance` in storage. it will + /// also decrease the total issuance of the system (`TotalIssuance`). + /// If the new free or reserved balance is below the existential deposit, + /// it will reset the account nonce (`frame_system::AccountNonce`). + /// + /// The dispatch origin for this call is `root`. + /// + /// # + /// - Independent of the arguments. + /// - Contains a limited number of reads and writes. + /// # + #[weight = T::DbWeight::get().reads_writes(1, 1) + 100_000_000] + fn set_balance( + origin, + who: ::Source, + #[compact] new_free: T::Balance, + #[compact] new_reserved: T::Balance + ) { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let existential_deposit = T::ExistentialDeposit::get(); + + let wipeout = new_free + new_reserved < existential_deposit; + let new_free = if wipeout { Zero::zero() } else { new_free }; + let new_reserved = if wipeout { Zero::zero() } else { new_reserved }; + + let (free, reserved) = Self::mutate_account(&who, |account| { + if new_free > account.free { + mem::drop(PositiveImbalance::::new(new_free - account.free)); + } else if new_free < account.free { + mem::drop(NegativeImbalance::::new(account.free - new_free)); + } + + if new_reserved > account.reserved { + mem::drop(PositiveImbalance::::new(new_reserved - account.reserved)); + } else if new_reserved < account.reserved { + mem::drop(NegativeImbalance::::new(account.reserved - new_reserved)); + } + + account.free = new_free; + account.reserved = new_reserved; + + (account.free, account.reserved) + }); + Self::deposit_event(RawEvent::BalanceSet(who, free, reserved)); + } + + /// Exactly as `transfer`, except the origin must be root and the source account may be + /// specified. + /// # + /// - Same as transfer, but additional read and write because the source account is + /// not assumed to be in the overlay. + /// # + #[weight = T::DbWeight::get().reads_writes(2, 2) + 200_000_000] + pub fn force_transfer( + origin, + source: ::Source, + dest: ::Source, + #[compact] value: T::Balance + ) { + ensure_root(origin)?; + let source = T::Lookup::lookup(source)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&source, &dest, value, ExistenceRequirement::AllowDeath)?; + } + + /// Same as the [`transfer`] call, but with a check that the transfer will not kill the + /// origin account. + /// + /// 99% of the time you want [`transfer`] instead. + /// + /// [`transfer`]: struct.Module.html#method.transfer + #[weight = T::DbWeight::get().reads_writes(1, 1) + 150_000_000] + pub fn transfer_keep_alive( + origin, + dest: ::Source, + #[compact] value: T::Balance + ) { + let transactor = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + >::transfer(&transactor, &dest, value, KeepAlive)?; + } + } } impl, I: Instance> Module { - // PRIVATE MUTABLES - - /// Get the free balance of an account. - pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { - Self::account(who.borrow()).free - } - - /// Get the balance of an account that can be used for transfers, reservations, or any other - /// non-locking, non-transaction-fee activity. Will be at most `free_balance`. - pub fn usable_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { - Self::account(who.borrow()).usable(Reasons::Misc) - } - - /// Get the balance of an account that can be used for paying transaction fees (not tipping, - /// or any other kind of fees, though). Will be at most `free_balance`. - pub fn usable_balance_for_fees(who: impl sp_std::borrow::Borrow) -> T::Balance { - Self::account(who.borrow()).usable(Reasons::Fee) - } - - /// Get the reserved balance of an account. - pub fn reserved_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { - Self::account(who.borrow()).reserved - } - - /// Get both the free and reserved balances of an account. - fn account(who: &T::AccountId) -> AccountData { - T::AccountStore::get(&who) - } - - /// Places the `free` and `reserved` parts of `new` into `account`. Also does any steps needed - /// after mutating an account. This includes DustRemoval unbalancing, in the case than the `new` - /// account's total balance is non-zero but below ED. - /// - /// Returns the final free balance, iff the account was previously of total balance zero, known - /// as its "endowment". - fn post_mutation( - who: &T::AccountId, - new: AccountData, - ) -> Option> { - let total = new.total(); - if total < T::ExistentialDeposit::get() { - if !total.is_zero() { - T::DustRemoval::on_unbalanced(NegativeImbalance::new(total)); - Self::deposit_event(RawEvent::DustLost(who.clone(), total)); - } - None - } else { - Some(new) - } - } - - /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce - /// `ExistentialDeposit` law, annulling the account as needed. - /// - /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used - /// when it is known that the account already exists. - /// - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn mutate_account( - who: &T::AccountId, - f: impl FnOnce(&mut AccountData) -> R - ) -> R { - Self::try_mutate_account(who, |a| -> Result { Ok(f(a)) }) - .expect("Error is infallible; qed") - } - - /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce - /// `ExistentialDeposit` law, annulling the account as needed. This will do nothing if the - /// result of `f` is an `Err`. - /// - /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used - /// when it is known that the account already exists. - /// - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn try_mutate_account( - who: &T::AccountId, - f: impl FnOnce(&mut AccountData) -> Result - ) -> Result { - T::AccountStore::try_mutate_exists(who, |maybe_account| { - let mut account = maybe_account.take().unwrap_or_default(); - let was_zero = account.total().is_zero(); - f(&mut account).map(move |result| { - let maybe_endowed = if was_zero { Some(account.free) } else { None }; - *maybe_account = Self::post_mutation(who, account); - (maybe_endowed, result) - }) - }).map(|(maybe_endowed, result)| { - if let Some(endowed) = maybe_endowed { - Self::deposit_event(RawEvent::Endowed(who.clone(), endowed)); - } - result - }) - } - - /// Update the account entry for `who`, given the locks. - fn update_locks(who: &T::AccountId, locks: &[BalanceLock]) { - Self::mutate_account(who, |b| { - b.misc_frozen = Zero::zero(); - b.fee_frozen = Zero::zero(); - for l in locks.iter() { - if l.reasons == Reasons::All || l.reasons == Reasons::Misc { - b.misc_frozen = b.misc_frozen.max(l.amount); - } - if l.reasons == Reasons::All || l.reasons == Reasons::Fee { - b.fee_frozen = b.fee_frozen.max(l.amount); - } - } - }); - - let existed = Locks::::contains_key(who); - if locks.is_empty() { - Locks::::remove(who); - if existed { - // TODO: use Locks::::hashed_key - // https://github.com/paritytech/substrate/issues/4969 - system::Module::::dec_ref(who); - } - } else { - Locks::::insert(who, locks); - if !existed { - system::Module::::inc_ref(who); - } - } - } + // PRIVATE MUTABLES + + /// Get the free balance of an account. + pub fn free_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + Self::account(who.borrow()).free + } + + /// Get the balance of an account that can be used for transfers, reservations, or any other + /// non-locking, non-transaction-fee activity. Will be at most `free_balance`. + pub fn usable_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + Self::account(who.borrow()).usable(Reasons::Misc) + } + + /// Get the balance of an account that can be used for paying transaction fees (not tipping, + /// or any other kind of fees, though). Will be at most `free_balance`. + pub fn usable_balance_for_fees(who: impl sp_std::borrow::Borrow) -> T::Balance { + Self::account(who.borrow()).usable(Reasons::Fee) + } + + /// Get the reserved balance of an account. + pub fn reserved_balance(who: impl sp_std::borrow::Borrow) -> T::Balance { + Self::account(who.borrow()).reserved + } + + /// Get both the free and reserved balances of an account. + fn account(who: &T::AccountId) -> AccountData { + T::AccountStore::get(&who) + } + + /// Places the `free` and `reserved` parts of `new` into `account`. Also does any steps needed + /// after mutating an account. This includes DustRemoval unbalancing, in the case than the `new` + /// account's total balance is non-zero but below ED. + /// + /// Returns the final free balance, iff the account was previously of total balance zero, known + /// as its "endowment". + fn post_mutation( + who: &T::AccountId, + new: AccountData, + ) -> Option> { + let total = new.total(); + if total < T::ExistentialDeposit::get() { + if !total.is_zero() { + T::DustRemoval::on_unbalanced(NegativeImbalance::new(total)); + Self::deposit_event(RawEvent::DustLost(who.clone(), total)); + } + None + } else { + Some(new) + } + } + + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce + /// `ExistentialDeposit` law, annulling the account as needed. + /// + /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used + /// when it is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + fn mutate_account( + who: &T::AccountId, + f: impl FnOnce(&mut AccountData) -> R, + ) -> R { + Self::try_mutate_account(who, |a| -> Result { Ok(f(a)) }) + .expect("Error is infallible; qed") + } + + /// Mutate an account to some new value, or delete it entirely with `None`. Will enforce + /// `ExistentialDeposit` law, annulling the account as needed. This will do nothing if the + /// result of `f` is an `Err`. + /// + /// NOTE: Doesn't do any preparatory work for creating a new account, so should only be used + /// when it is known that the account already exists. + /// + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + fn try_mutate_account( + who: &T::AccountId, + f: impl FnOnce(&mut AccountData) -> Result, + ) -> Result { + T::AccountStore::try_mutate_exists(who, |maybe_account| { + let mut account = maybe_account.take().unwrap_or_default(); + let was_zero = account.total().is_zero(); + f(&mut account).map(move |result| { + let maybe_endowed = if was_zero { Some(account.free) } else { None }; + *maybe_account = Self::post_mutation(who, account); + (maybe_endowed, result) + }) + }) + .map(|(maybe_endowed, result)| { + if let Some(endowed) = maybe_endowed { + Self::deposit_event(RawEvent::Endowed(who.clone(), endowed)); + } + result + }) + } + + /// Update the account entry for `who`, given the locks. + fn update_locks(who: &T::AccountId, locks: &[BalanceLock]) { + Self::mutate_account(who, |b| { + b.misc_frozen = Zero::zero(); + b.fee_frozen = Zero::zero(); + for l in locks.iter() { + if l.reasons == Reasons::All || l.reasons == Reasons::Misc { + b.misc_frozen = b.misc_frozen.max(l.amount); + } + if l.reasons == Reasons::All || l.reasons == Reasons::Fee { + b.fee_frozen = b.fee_frozen.max(l.amount); + } + } + }); + + let existed = Locks::::contains_key(who); + if locks.is_empty() { + Locks::::remove(who); + if existed { + // TODO: use Locks::::hashed_key + // https://github.com/paritytech/substrate/issues/4969 + system::Module::::dec_ref(who); + } + } else { + Locks::::insert(who, locks); + if !existed { + system::Module::::inc_ref(who); + } + } + } } // wrapping these imbalances in a private module is necessary to ensure absolute privacy // of the inner member. mod imbalances { - use super::{ - result, Subtrait, DefaultInstance, Imbalance, Trait, Zero, Instance, Saturating, - StorageValue, TryDrop, - }; - use sp_std::mem; - - /// Opaque, move-only struct with private fields that serves as a token denoting that - /// funds have been created without any equal and opposite accounting. - #[must_use] - pub struct PositiveImbalance, I: Instance=DefaultInstance>(T::Balance); - - impl, I: Instance> PositiveImbalance { - /// Create a new positive imbalance from a balance. - pub fn new(amount: T::Balance) -> Self { - PositiveImbalance(amount) - } - } - - /// Opaque, move-only struct with private fields that serves as a token denoting that - /// funds have been destroyed without any equal and opposite accounting. - #[must_use] - pub struct NegativeImbalance, I: Instance=DefaultInstance>(T::Balance); - - impl, I: Instance> NegativeImbalance { - /// Create a new negative imbalance from a balance. - pub fn new(amount: T::Balance) -> Self { - NegativeImbalance(amount) - } - } - - impl, I: Instance> TryDrop for PositiveImbalance { - fn try_drop(self) -> result::Result<(), Self> { - self.drop_zero() - } - } - - impl, I: Instance> Imbalance for PositiveImbalance { - type Opposite = NegativeImbalance; - - fn zero() -> Self { - Self(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } - } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - - mem::forget(self); - (Self(first), Self(second)) - } - fn merge(mut self, other: Self) -> Self { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - - self - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - } - fn offset(self, other: Self::Opposite) -> result::Result { - let (a, b) = (self.0, other.0); - mem::forget((self, other)); - - if a >= b { - Ok(Self(a - b)) - } else { - Err(NegativeImbalance::new(b - a)) - } - } - fn peek(&self) -> T::Balance { - self.0.clone() - } - } - - impl, I: Instance> TryDrop for NegativeImbalance { - fn try_drop(self) -> result::Result<(), Self> { - self.drop_zero() - } - } - - impl, I: Instance> Imbalance for NegativeImbalance { - type Opposite = PositiveImbalance; - - fn zero() -> Self { - Self(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } - } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - - mem::forget(self); - (Self(first), Self(second)) - } - fn merge(mut self, other: Self) -> Self { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - - self - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - } - fn offset(self, other: Self::Opposite) -> result::Result { - let (a, b) = (self.0, other.0); - mem::forget((self, other)); - - if a >= b { - Ok(Self(a - b)) - } else { - Err(PositiveImbalance::new(b - a)) - } - } - fn peek(&self) -> T::Balance { - self.0.clone() - } - } - - impl, I: Instance> Drop for PositiveImbalance { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - , I>>::mutate( - |v| *v = v.saturating_add(self.0) - ); - } - } - - impl, I: Instance> Drop for NegativeImbalance { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - , I>>::mutate( - |v| *v = v.saturating_sub(self.0) - ); - } - } + use super::{ + result, DefaultInstance, Imbalance, Instance, Saturating, StorageValue, Subtrait, Trait, + TryDrop, Zero, + }; + use sp_std::mem; + + /// Opaque, move-only struct with private fields that serves as a token denoting that + /// funds have been created without any equal and opposite accounting. + #[must_use] + pub struct PositiveImbalance, I: Instance = DefaultInstance>(T::Balance); + + impl, I: Instance> PositiveImbalance { + /// Create a new positive imbalance from a balance. + pub fn new(amount: T::Balance) -> Self { + PositiveImbalance(amount) + } + } + + /// Opaque, move-only struct with private fields that serves as a token denoting that + /// funds have been destroyed without any equal and opposite accounting. + #[must_use] + pub struct NegativeImbalance, I: Instance = DefaultInstance>(T::Balance); + + impl, I: Instance> NegativeImbalance { + /// Create a new negative imbalance from a balance. + pub fn new(amount: T::Balance) -> Self { + NegativeImbalance(amount) + } + } + + impl, I: Instance> TryDrop for PositiveImbalance { + fn try_drop(self) -> result::Result<(), Self> { + self.drop_zero() + } + } + + impl, I: Instance> Imbalance for PositiveImbalance { + type Opposite = NegativeImbalance; + + fn zero() -> Self { + Self(Zero::zero()) + } + fn drop_zero(self) -> result::Result<(), Self> { + if self.0.is_zero() { + Ok(()) + } else { + Err(self) + } + } + fn split(self, amount: T::Balance) -> (Self, Self) { + let first = self.0.min(amount); + let second = self.0 - first; + + mem::forget(self); + (Self(first), Self(second)) + } + fn merge(mut self, other: Self) -> Self { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + + self + } + fn subsume(&mut self, other: Self) { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + } + fn offset(self, other: Self::Opposite) -> result::Result { + let (a, b) = (self.0, other.0); + mem::forget((self, other)); + + if a >= b { + Ok(Self(a - b)) + } else { + Err(NegativeImbalance::new(b - a)) + } + } + fn peek(&self) -> T::Balance { + self.0.clone() + } + } + + impl, I: Instance> TryDrop for NegativeImbalance { + fn try_drop(self) -> result::Result<(), Self> { + self.drop_zero() + } + } + + impl, I: Instance> Imbalance for NegativeImbalance { + type Opposite = PositiveImbalance; + + fn zero() -> Self { + Self(Zero::zero()) + } + fn drop_zero(self) -> result::Result<(), Self> { + if self.0.is_zero() { + Ok(()) + } else { + Err(self) + } + } + fn split(self, amount: T::Balance) -> (Self, Self) { + let first = self.0.min(amount); + let second = self.0 - first; + + mem::forget(self); + (Self(first), Self(second)) + } + fn merge(mut self, other: Self) -> Self { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + + self + } + fn subsume(&mut self, other: Self) { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + } + fn offset(self, other: Self::Opposite) -> result::Result { + let (a, b) = (self.0, other.0); + mem::forget((self, other)); + + if a >= b { + Ok(Self(a - b)) + } else { + Err(PositiveImbalance::new(b - a)) + } + } + fn peek(&self) -> T::Balance { + self.0.clone() + } + } + + impl, I: Instance> Drop for PositiveImbalance { + /// Basic drop handler will just square up the total issuance. + fn drop(&mut self) { + , I>>::mutate(|v| { + *v = v.saturating_add(self.0) + }); + } + } + + impl, I: Instance> Drop for NegativeImbalance { + /// Basic drop handler will just square up the total issuance. + fn drop(&mut self) { + , I>>::mutate(|v| { + *v = v.saturating_sub(self.0) + }); + } + } } // TODO: #2052 @@ -827,390 +843,484 @@ mod imbalances { // depends on the Imbalance type (DustRemoval) is placed in its own pallet. struct ElevatedTrait, I: Instance>(T, I); impl, I: Instance> Clone for ElevatedTrait { - fn clone(&self) -> Self { unimplemented!() } + fn clone(&self) -> Self { + unimplemented!() + } } impl, I: Instance> PartialEq for ElevatedTrait { - fn eq(&self, _: &Self) -> bool { unimplemented!() } + fn eq(&self, _: &Self) -> bool { + unimplemented!() + } } impl, I: Instance> Eq for ElevatedTrait {} impl, I: Instance> frame_system::Trait for ElevatedTrait { - type Origin = T::Origin; - type Call = T::Call; - type Index = T::Index; - type BlockNumber = T::BlockNumber; - type Hash = T::Hash; - type Hashing = T::Hashing; - type AccountId = T::AccountId; - type Lookup = T::Lookup; - type Header = T::Header; - type Event = (); - type BlockHashCount = T::BlockHashCount; - type MaximumBlockWeight = T::MaximumBlockWeight; - type DbWeight = T::DbWeight; - type MaximumBlockLength = T::MaximumBlockLength; - type AvailableBlockRatio = T::AvailableBlockRatio; - type Version = T::Version; - type ModuleToIndex = T::ModuleToIndex; - type OnNewAccount = T::OnNewAccount; - type OnKilledAccount = T::OnKilledAccount; - type AccountData = T::AccountData; + type Origin = T::Origin; + type Call = T::Call; + type Index = T::Index; + type BlockNumber = T::BlockNumber; + type Hash = T::Hash; + type Hashing = T::Hashing; + type AccountId = T::AccountId; + type Lookup = T::Lookup; + type Header = T::Header; + type Event = (); + type BlockHashCount = T::BlockHashCount; + type MaximumBlockWeight = T::MaximumBlockWeight; + type DbWeight = T::DbWeight; + type MaximumBlockLength = T::MaximumBlockLength; + type AvailableBlockRatio = T::AvailableBlockRatio; + type Version = T::Version; + type ModuleToIndex = T::ModuleToIndex; + type OnNewAccount = T::OnNewAccount; + type OnKilledAccount = T::OnKilledAccount; + type AccountData = T::AccountData; } impl, I: Instance> Trait for ElevatedTrait { - type Balance = T::Balance; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = T::ExistentialDeposit; - type AccountStore = T::AccountStore; + type Balance = T::Balance; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = T::ExistentialDeposit; + type AccountStore = T::AccountStore; } -impl, I: Instance> Currency for Module where - T::Balance: MaybeSerializeDeserialize + Debug +impl, I: Instance> Currency for Module +where + T::Balance: MaybeSerializeDeserialize + Debug, { - type Balance = T::Balance; - type PositiveImbalance = PositiveImbalance; - type NegativeImbalance = NegativeImbalance; - - fn total_balance(who: &T::AccountId) -> Self::Balance { - Self::account(who).total() - } - - // Check if `value` amount of free balance can be slashed from `who`. - fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { - if value.is_zero() { return true } - Self::free_balance(who) >= value - } - - fn total_issuance() -> Self::Balance { - >::get() - } - - fn minimum_balance() -> Self::Balance { - T::ExistentialDeposit::get() - } - - // Burn funds from the total issuance, returning a positive imbalance for the amount burned. - // Is a no-op if amount to be burned is zero. - fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { - if amount.is_zero() { return PositiveImbalance::zero() } - >::mutate(|issued| { - *issued = issued.checked_sub(&amount).unwrap_or_else(|| { - amount = *issued; - Zero::zero() - }); - }); - PositiveImbalance::new(amount) - } - - // Create new funds into the total issuance, returning a negative imbalance - // for the amount issued. - // Is a no-op if amount to be issued it zero. - fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { - if amount.is_zero() { return NegativeImbalance::zero() } - >::mutate(|issued| - *issued = issued.checked_add(&amount).unwrap_or_else(|| { - amount = Self::Balance::max_value() - *issued; - Self::Balance::max_value() - }) - ); - NegativeImbalance::new(amount) - } - - fn free_balance(who: &T::AccountId) -> Self::Balance { - Self::account(who).free - } - - // Ensure that an account can withdraw from their free balance given any existing withdrawal - // restrictions like locks and vesting balance. - // Is a no-op if amount to be withdrawn is zero. - // - // # - // Despite iterating over a list of locks, they are limited by the number of - // lock IDs, which means the number of runtime modules that intend to use and create locks. - // # - fn ensure_can_withdraw( - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - new_balance: T::Balance, - ) -> DispatchResult { - if amount.is_zero() { return Ok(()) } - let min_balance = Self::account(who).frozen(reasons.into()); - ensure!(new_balance >= min_balance, Error::::LiquidityRestrictions); - Ok(()) - } - - // Transfer some free balance from `transactor` to `dest`, respecting existence requirements. - // Is a no-op if value to be transferred is zero or the `transactor` is the same as `dest`. - fn transfer( - transactor: &T::AccountId, - dest: &T::AccountId, - value: Self::Balance, - existence_requirement: ExistenceRequirement, - ) -> DispatchResult { - if value.is_zero() || transactor == dest { return Ok(()) } - - Self::try_mutate_account(dest, |to_account| -> DispatchResult { - Self::try_mutate_account(transactor, |from_account| -> DispatchResult { - from_account.free = from_account.free.checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; - - // NOTE: total stake being stored in the same type means that this could never overflow - // but better to be safe than sorry. - to_account.free = to_account.free.checked_add(&value).ok_or(Error::::Overflow)?; - - let ed = T::ExistentialDeposit::get(); - ensure!(to_account.total() >= ed, Error::::ExistentialDeposit); - - Self::ensure_can_withdraw( - transactor, - value, - WithdrawReason::Transfer.into(), - from_account.free, - )?; - - let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; - let allow_death = allow_death && system::Module::::allow_death(transactor); - ensure!(allow_death || from_account.free >= ed, Error::::KeepAlive); - - Ok(()) - }) - })?; - - // Emit transfer event. - Self::deposit_event(RawEvent::Transfer(transactor.clone(), dest.clone(), value)); - - Ok(()) - } - - /// Slash a target account `who`, returning the negative imbalance created and any left over - /// amount that could not be slashed. - /// - /// Is a no-op if `value` to be slashed is zero. - /// - /// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn - /// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having - /// to draw from reserved funds, however we err on the side of punishment if things are inconsistent - /// or `can_slash` wasn't used appropriately. - fn slash( - who: &T::AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } - - Self::mutate_account(who, |account| { - let free_slash = cmp::min(account.free, value); - account.free -= free_slash; - - let remaining_slash = value - free_slash; - if !remaining_slash.is_zero() { - let reserved_slash = cmp::min(account.reserved, remaining_slash); - account.reserved -= reserved_slash; - (NegativeImbalance::new(free_slash + reserved_slash), remaining_slash - reserved_slash) - } else { - (NegativeImbalance::new(value), Zero::zero()) - } - }) - } - - /// Deposit some `value` into the free balance of an existing target account `who`. - /// - /// Is a no-op if the `value` to be deposited is zero. - fn deposit_into_existing( - who: &T::AccountId, - value: Self::Balance - ) -> Result { - if value.is_zero() { return Ok(PositiveImbalance::zero()) } - - Self::try_mutate_account(who, |account| -> Result { - ensure!(!account.total().is_zero(), Error::::DeadAccount); - account.free = account.free.checked_add(&value).ok_or(Error::::Overflow)?; - Ok(PositiveImbalance::new(value)) - }) - } - - /// Deposit some `value` into the free balance of `who`, possibly creating a new account. - /// - /// This function is a no-op if: - /// - the `value` to be deposited is zero; or - /// - if the `value` to be deposited is less than the ED and the account does not yet exist; or - /// - `value` is so large it would cause the balance of `who` to overflow. - fn deposit_creating( - who: &T::AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance { - if value.is_zero() { return Self::PositiveImbalance::zero() } - - Self::try_mutate_account(who, |account| -> Result { - // bail if not yet created and this operation wouldn't be enough to create it. - let ed = T::ExistentialDeposit::get(); - ensure!(value >= ed || !account.total().is_zero(), Self::PositiveImbalance::zero()); - - // defensive only: overflow should never happen, however in case it does, then this - // operation is a no-op. - account.free = account.free.checked_add(&value).ok_or(Self::PositiveImbalance::zero())?; - - Ok(PositiveImbalance::new(value)) - }).unwrap_or_else(|x| x) - } - - /// Withdraw some free balance from an account, respecting existence requirements. - /// - /// Is a no-op if value to be withdrawn is zero. - fn withdraw( - who: &T::AccountId, - value: Self::Balance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result { - if value.is_zero() { return Ok(NegativeImbalance::zero()); } - - Self::try_mutate_account(who, |account| - -> Result - { - let new_free_account = account.free.checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; - - // bail if we need to keep the account alive and this would kill it. - let ed = T::ExistentialDeposit::get(); - let would_be_dead = new_free_account + account.reserved < ed; - let would_kill = would_be_dead && account.free + account.reserved >= ed; - ensure!(liveness == AllowDeath || !would_kill, Error::::KeepAlive); - - Self::ensure_can_withdraw(who, value, reasons, new_free_account)?; - - account.free = new_free_account; - - Ok(NegativeImbalance::new(value)) - }) - } - - /// Force the new free balance of a target account `who` to some new value `balance`. - fn make_free_balance_be(who: &T::AccountId, value: Self::Balance) - -> SignedImbalance - { - Self::try_mutate_account(who, |account| - -> Result, ()> - { - let ed = T::ExistentialDeposit::get(); - // If we're attempting to set an existing account to less than ED, then - // bypass the entire operation. It's a no-op if you follow it through, but - // since this is an instance where we might account for a negative imbalance - // (in the dust cleaner of set_account) before we account for its actual - // equal and opposite cause (returned as an Imbalance), then in the - // instance that there's no other accounts on the system at all, we might - // underflow the issuance and our arithmetic will be off. - ensure!(value + account.reserved >= ed || !account.total().is_zero(), ()); - - let imbalance = if account.free <= value { - SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) - } else { - SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) - }; - account.free = value; - Ok(imbalance) - }).unwrap_or(SignedImbalance::Positive(Self::PositiveImbalance::zero())) - } + type Balance = T::Balance; + type PositiveImbalance = PositiveImbalance; + type NegativeImbalance = NegativeImbalance; + + fn total_balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).total() + } + + // Check if `value` amount of free balance can be slashed from `who`. + fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { + if value.is_zero() { + return true; + } + Self::free_balance(who) >= value + } + + fn total_issuance() -> Self::Balance { + >::get() + } + + fn minimum_balance() -> Self::Balance { + T::ExistentialDeposit::get() + } + + // Burn funds from the total issuance, returning a positive imbalance for the amount burned. + // Is a no-op if amount to be burned is zero. + fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { + if amount.is_zero() { + return PositiveImbalance::zero(); + } + >::mutate(|issued| { + *issued = issued.checked_sub(&amount).unwrap_or_else(|| { + amount = *issued; + Zero::zero() + }); + }); + PositiveImbalance::new(amount) + } + + // Create new funds into the total issuance, returning a negative imbalance + // for the amount issued. + // Is a no-op if amount to be issued it zero. + fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { + if amount.is_zero() { + return NegativeImbalance::zero(); + } + >::mutate(|issued| { + *issued = issued.checked_add(&amount).unwrap_or_else(|| { + amount = Self::Balance::max_value() - *issued; + Self::Balance::max_value() + }) + }); + NegativeImbalance::new(amount) + } + + fn free_balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).free + } + + // Ensure that an account can withdraw from their free balance given any existing withdrawal + // restrictions like locks and vesting balance. + // Is a no-op if amount to be withdrawn is zero. + // + // # + // Despite iterating over a list of locks, they are limited by the number of + // lock IDs, which means the number of runtime modules that intend to use and create locks. + // # + fn ensure_can_withdraw( + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + new_balance: T::Balance, + ) -> DispatchResult { + if amount.is_zero() { + return Ok(()); + } + let min_balance = Self::account(who).frozen(reasons.into()); + ensure!( + new_balance >= min_balance, + Error::::LiquidityRestrictions + ); + Ok(()) + } + + // Transfer some free balance from `transactor` to `dest`, respecting existence requirements. + // Is a no-op if value to be transferred is zero or the `transactor` is the same as `dest`. + fn transfer( + transactor: &T::AccountId, + dest: &T::AccountId, + value: Self::Balance, + existence_requirement: ExistenceRequirement, + ) -> DispatchResult { + if value.is_zero() || transactor == dest { + return Ok(()); + } + + Self::try_mutate_account(dest, |to_account| -> DispatchResult { + Self::try_mutate_account(transactor, |from_account| -> DispatchResult { + from_account.free = from_account + .free + .checked_sub(&value) + .ok_or(Error::::InsufficientBalance)?; + + // NOTE: total stake being stored in the same type means that this could never overflow + // but better to be safe than sorry. + to_account.free = to_account + .free + .checked_add(&value) + .ok_or(Error::::Overflow)?; + + let ed = T::ExistentialDeposit::get(); + ensure!(to_account.total() >= ed, Error::::ExistentialDeposit); + + Self::ensure_can_withdraw( + transactor, + value, + WithdrawReason::Transfer.into(), + from_account.free, + )?; + + let allow_death = existence_requirement == ExistenceRequirement::AllowDeath; + let allow_death = allow_death && system::Module::::allow_death(transactor); + ensure!( + allow_death || from_account.free >= ed, + Error::::KeepAlive + ); + + Ok(()) + }) + })?; + + // Emit transfer event. + Self::deposit_event(RawEvent::Transfer(transactor.clone(), dest.clone(), value)); + + Ok(()) + } + + /// Slash a target account `who`, returning the negative imbalance created and any left over + /// amount that could not be slashed. + /// + /// Is a no-op if `value` to be slashed is zero. + /// + /// NOTE: `slash()` prefers free balance, but assumes that reserve balance can be drawn + /// from in extreme circumstances. `can_slash()` should be used prior to `slash()` to avoid having + /// to draw from reserved funds, however we err on the side of punishment if things are inconsistent + /// or `can_slash` wasn't used appropriately. + fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()); + } + + Self::mutate_account(who, |account| { + let free_slash = cmp::min(account.free, value); + account.free -= free_slash; + + let remaining_slash = value - free_slash; + if !remaining_slash.is_zero() { + let reserved_slash = cmp::min(account.reserved, remaining_slash); + account.reserved -= reserved_slash; + ( + NegativeImbalance::new(free_slash + reserved_slash), + remaining_slash - reserved_slash, + ) + } else { + (NegativeImbalance::new(value), Zero::zero()) + } + }) + } + + /// Deposit some `value` into the free balance of an existing target account `who`. + /// + /// Is a no-op if the `value` to be deposited is zero. + fn deposit_into_existing( + who: &T::AccountId, + value: Self::Balance, + ) -> Result { + if value.is_zero() { + return Ok(PositiveImbalance::zero()); + } + + Self::try_mutate_account( + who, + |account| -> Result { + ensure!(!account.total().is_zero(), Error::::DeadAccount); + account.free = account + .free + .checked_add(&value) + .ok_or(Error::::Overflow)?; + Ok(PositiveImbalance::new(value)) + }, + ) + } + + /// Deposit some `value` into the free balance of `who`, possibly creating a new account. + /// + /// This function is a no-op if: + /// - the `value` to be deposited is zero; or + /// - if the `value` to be deposited is less than the ED and the account does not yet exist; or + /// - `value` is so large it would cause the balance of `who` to overflow. + fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { + if value.is_zero() { + return Self::PositiveImbalance::zero(); + } + + Self::try_mutate_account( + who, + |account| -> Result { + // bail if not yet created and this operation wouldn't be enough to create it. + let ed = T::ExistentialDeposit::get(); + ensure!( + value >= ed || !account.total().is_zero(), + Self::PositiveImbalance::zero() + ); + + // defensive only: overflow should never happen, however in case it does, then this + // operation is a no-op. + account.free = account + .free + .checked_add(&value) + .ok_or(Self::PositiveImbalance::zero())?; + + Ok(PositiveImbalance::new(value)) + }, + ) + .unwrap_or_else(|x| x) + } + + /// Withdraw some free balance from an account, respecting existence requirements. + /// + /// Is a no-op if value to be withdrawn is zero. + fn withdraw( + who: &T::AccountId, + value: Self::Balance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> result::Result { + if value.is_zero() { + return Ok(NegativeImbalance::zero()); + } + + Self::try_mutate_account( + who, + |account| -> Result { + let new_free_account = account + .free + .checked_sub(&value) + .ok_or(Error::::InsufficientBalance)?; + + // bail if we need to keep the account alive and this would kill it. + let ed = T::ExistentialDeposit::get(); + let would_be_dead = new_free_account + account.reserved < ed; + let would_kill = would_be_dead && account.free + account.reserved >= ed; + ensure!( + liveness == AllowDeath || !would_kill, + Error::::KeepAlive + ); + + Self::ensure_can_withdraw(who, value, reasons, new_free_account)?; + + account.free = new_free_account; + + Ok(NegativeImbalance::new(value)) + }, + ) + } + + /// Force the new free balance of a target account `who` to some new value `balance`. + fn make_free_balance_be( + who: &T::AccountId, + value: Self::Balance, + ) -> SignedImbalance { + Self::try_mutate_account( + who, + |account| -> Result, ()> { + let ed = T::ExistentialDeposit::get(); + // If we're attempting to set an existing account to less than ED, then + // bypass the entire operation. It's a no-op if you follow it through, but + // since this is an instance where we might account for a negative imbalance + // (in the dust cleaner of set_account) before we account for its actual + // equal and opposite cause (returned as an Imbalance), then in the + // instance that there's no other accounts on the system at all, we might + // underflow the issuance and our arithmetic will be off. + ensure!( + value + account.reserved >= ed || !account.total().is_zero(), + () + ); + + let imbalance = if account.free <= value { + SignedImbalance::Positive(PositiveImbalance::new(value - account.free)) + } else { + SignedImbalance::Negative(NegativeImbalance::new(account.free - value)) + }; + account.free = value; + Ok(imbalance) + }, + ) + .unwrap_or(SignedImbalance::Positive(Self::PositiveImbalance::zero())) + } } -impl, I: Instance> ReservableCurrency for Module where - T::Balance: MaybeSerializeDeserialize + Debug +impl, I: Instance> ReservableCurrency for Module +where + T::Balance: MaybeSerializeDeserialize + Debug, { - /// Check if `who` can reserve `value` from their free balance. - /// - /// Always `true` if value to be reserved is zero. - fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { - if value.is_zero() { return true } - Self::account(who).free - .checked_sub(&value) - .map_or(false, |new_balance| - Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve.into(), new_balance).is_ok() - ) - } - - fn reserved_balance(who: &T::AccountId) -> Self::Balance { - Self::account(who).reserved - } - - /// Move `value` from the free balance from `who` to their reserved balance. - /// - /// Is a no-op if value to be reserved is zero. - fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { - if value.is_zero() { return Ok(()) } - - Self::try_mutate_account(who, |account| -> DispatchResult { - account.free = account.free.checked_sub(&value).ok_or(Error::::InsufficientBalance)?; - account.reserved = account.reserved.checked_add(&value).ok_or(Error::::Overflow)?; - Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve.into(), account.free) - }) - } - - /// Unreserve some funds, returning any amount that was unable to be unreserved. - /// - /// Is a no-op if the value to be unreserved is zero. - fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { - if value.is_zero() { return Zero::zero() } - - Self::mutate_account(who, |account| { - let actual = cmp::min(account.reserved, value); - account.reserved -= actual; - // defensive only: this can never fail since total issuance which is at least free+reserved - // fits into the same data type. - account.free = account.free.saturating_add(actual); - value - actual - }) - } - - /// Slash from reserved balance, returning the negative imbalance created, - /// and any amount that was unable to be slashed. - /// - /// Is a no-op if the value to be slashed is zero. - fn slash_reserved( - who: &T::AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance) { - if value.is_zero() { return (NegativeImbalance::zero(), Zero::zero()) } - - Self::mutate_account(who, |account| { - // underflow should never happen, but it if does, there's nothing to be done here. - let actual = cmp::min(account.reserved, value); - account.reserved -= actual; - (NegativeImbalance::new(actual), value - actual) - }) - } - - /// Move the reserved balance of one account into the balance of another, according to `status`. - /// - /// Is a no-op if: - /// - the value to be moved is zero; or - /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. - fn repatriate_reserved( - slashed: &T::AccountId, - beneficiary: &T::AccountId, - value: Self::Balance, - status: Status, - ) -> Result { - if value.is_zero() { return Ok(Zero::zero()) } - - if slashed == beneficiary { - return match status { - Status::Free => Ok(Self::unreserve(slashed, value)), - Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), - }; - } - - Self::try_mutate_account(beneficiary, |to_account| -> Result { - ensure!(!to_account.total().is_zero(), Error::::DeadAccount); - Self::try_mutate_account(slashed, |from_account| -> Result { - let actual = cmp::min(from_account.reserved, value); - match status { - Status::Free => to_account.free = to_account.free.checked_add(&actual).ok_or(Error::::Overflow)?, - Status::Reserved => to_account.reserved = to_account.reserved.checked_add(&actual).ok_or(Error::::Overflow)?, - } - from_account.reserved -= actual; - Ok(value - actual) - }) - }) - } + /// Check if `who` can reserve `value` from their free balance. + /// + /// Always `true` if value to be reserved is zero. + fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { + if value.is_zero() { + return true; + } + Self::account(who) + .free + .checked_sub(&value) + .map_or(false, |new_balance| { + Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve.into(), new_balance) + .is_ok() + }) + } + + fn reserved_balance(who: &T::AccountId) -> Self::Balance { + Self::account(who).reserved + } + + /// Move `value` from the free balance from `who` to their reserved balance. + /// + /// Is a no-op if value to be reserved is zero. + fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { + if value.is_zero() { + return Ok(()); + } + + Self::try_mutate_account(who, |account| -> DispatchResult { + account.free = account + .free + .checked_sub(&value) + .ok_or(Error::::InsufficientBalance)?; + account.reserved = account + .reserved + .checked_add(&value) + .ok_or(Error::::Overflow)?; + Self::ensure_can_withdraw(who, value, WithdrawReason::Reserve.into(), account.free) + }) + } + + /// Unreserve some funds, returning any amount that was unable to be unreserved. + /// + /// Is a no-op if the value to be unreserved is zero. + fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { + if value.is_zero() { + return Zero::zero(); + } + + Self::mutate_account(who, |account| { + let actual = cmp::min(account.reserved, value); + account.reserved -= actual; + // defensive only: this can never fail since total issuance which is at least free+reserved + // fits into the same data type. + account.free = account.free.saturating_add(actual); + value - actual + }) + } + + /// Slash from reserved balance, returning the negative imbalance created, + /// and any amount that was unable to be slashed. + /// + /// Is a no-op if the value to be slashed is zero. + fn slash_reserved( + who: &T::AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance) { + if value.is_zero() { + return (NegativeImbalance::zero(), Zero::zero()); + } + + Self::mutate_account(who, |account| { + // underflow should never happen, but it if does, there's nothing to be done here. + let actual = cmp::min(account.reserved, value); + account.reserved -= actual; + (NegativeImbalance::new(actual), value - actual) + }) + } + + /// Move the reserved balance of one account into the balance of another, according to `status`. + /// + /// Is a no-op if: + /// - the value to be moved is zero; or + /// - the `slashed` id equal to `beneficiary` and the `status` is `Reserved`. + fn repatriate_reserved( + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: Self::Balance, + status: Status, + ) -> Result { + if value.is_zero() { + return Ok(Zero::zero()); + } + + if slashed == beneficiary { + return match status { + Status::Free => Ok(Self::unreserve(slashed, value)), + Status::Reserved => Ok(value.saturating_sub(Self::reserved_balance(slashed))), + }; + } + + Self::try_mutate_account( + beneficiary, + |to_account| -> Result { + ensure!(!to_account.total().is_zero(), Error::::DeadAccount); + Self::try_mutate_account( + slashed, + |from_account| -> Result { + let actual = cmp::min(from_account.reserved, value); + match status { + Status::Free => { + to_account.free = to_account + .free + .checked_add(&actual) + .ok_or(Error::::Overflow)? + } + Status::Reserved => { + to_account.reserved = to_account + .reserved + .checked_add(&actual) + .ok_or(Error::::Overflow)? + } + } + from_account.reserved -= actual; + Ok(value - actual) + }, + ) + }, + ) + } } /// Implement `OnKilledAccount` to remove the local account, if using local account storage. @@ -1219,82 +1329,92 @@ impl, I: Instance> ReservableCurrency for Module /// if you're using the local balance storage. **If you're using the composite system account /// storage (which is the default in most examples and tests) then there's no need.** impl, I: Instance> OnKilledAccount for Module { - fn on_killed_account(who: &T::AccountId) { - Account::::remove(who); - } + fn on_killed_account(who: &T::AccountId) { + Account::::remove(who); + } } impl, I: Instance> LockableCurrency for Module where - T::Balance: MaybeSerializeDeserialize + Debug + T::Balance: MaybeSerializeDeserialize + Debug, { - type Moment = T::BlockNumber; - - // Set a lock on the balance of `who`. - // Is a no-op if lock amount is zero or `reasons` `is_none()`. - fn set_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - if amount.is_zero() || reasons.is_none() { return } - let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); - let mut locks = Self::locks(who).into_iter().filter_map(|l| - if l.id == id { - new_lock.take() - } else { - Some(l) - }).collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - Self::update_locks(who, &locks[..]); - } - - // Extend a lock on the balance of `who`. - // Is a no-op if lock amount is zero or `reasons` `is_none()`. - fn extend_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - if amount.is_zero() || reasons.is_none() { return } - let mut new_lock = Some(BalanceLock { id, amount, reasons: reasons.into() }); - let mut locks = Self::locks(who).into_iter().filter_map(|l| - if l.id == id { - new_lock.take().map(|nl| { - BalanceLock { - id: l.id, - amount: l.amount.max(nl.amount), - reasons: l.reasons | nl.reasons, - } - }) - } else { - Some(l) - }).collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - Self::update_locks(who, &locks[..]); - } - - fn remove_lock( - id: LockIdentifier, - who: &T::AccountId, - ) { - let mut locks = Self::locks(who); - locks.retain(|l| l.id != id); - Self::update_locks(who, &locks[..]); - } + type Moment = T::BlockNumber; + + // Set a lock on the balance of `who`. + // Is a no-op if lock amount is zero or `reasons` `is_none()`. + fn set_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + ) { + if amount.is_zero() || reasons.is_none() { + return; + } + let mut new_lock = Some(BalanceLock { + id, + amount, + reasons: reasons.into(), + }); + let mut locks = Self::locks(who) + .into_iter() + .filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) }) + .collect::>(); + if let Some(lock) = new_lock { + locks.push(lock) + } + Self::update_locks(who, &locks[..]); + } + + // Extend a lock on the balance of `who`. + // Is a no-op if lock amount is zero or `reasons` `is_none()`. + fn extend_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + ) { + if amount.is_zero() || reasons.is_none() { + return; + } + let mut new_lock = Some(BalanceLock { + id, + amount, + reasons: reasons.into(), + }); + let mut locks = Self::locks(who) + .into_iter() + .filter_map(|l| { + if l.id == id { + new_lock.take().map(|nl| BalanceLock { + id: l.id, + amount: l.amount.max(nl.amount), + reasons: l.reasons | nl.reasons, + }) + } else { + Some(l) + } + }) + .collect::>(); + if let Some(lock) = new_lock { + locks.push(lock) + } + Self::update_locks(who, &locks[..]); + } + + fn remove_lock(id: LockIdentifier, who: &T::AccountId) { + let mut locks = Self::locks(who); + locks.retain(|l| l.id != id); + Self::update_locks(who, &locks[..]); + } } -impl, I: Instance> IsDeadAccount for Module where - T::Balance: MaybeSerializeDeserialize + Debug +impl, I: Instance> IsDeadAccount for Module +where + T::Balance: MaybeSerializeDeserialize + Debug, { - fn is_dead_account(who: &T::AccountId) -> bool { - // this should always be exactly equivalent to `Self::account(who).total().is_zero()` - !T::AccountStore::is_explicit(who) - } + fn is_dead_account(who: &T::AccountId) -> bool { + // this should always be exactly equivalent to `Self::account(who).total().is_zero()` + !T::AccountStore::is_explicit(who) + } } diff --git a/frame/balances/src/tests.rs b/frame/balances/src/tests.rs index 7ff7ec0fc4..e856c29bab 100644 --- a/frame/balances/src/tests.rs +++ b/frame/balances/src/tests.rs @@ -21,14 +21,13 @@ #[derive(Debug)] pub struct CallWithDispatchInfo; impl sp_runtime::traits::Dispatchable for CallWithDispatchInfo { - type Origin = (); - type Trait = (); - type Info = frame_support::weights::DispatchInfo; - type PostInfo = frame_support::weights::PostDispatchInfo; - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); - } + type Origin = (); + type Trait = (); + type Info = frame_support::weights::DispatchInfo; + type PostInfo = frame_support::weights::PostDispatchInfo; + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + panic!("Do not use dummy implementation for dispatch."); + } } #[macro_export] diff --git a/frame/balances/src/tests_composite.rs b/frame/balances/src/tests_composite.rs index 72668ad0d8..a240c07c65 100644 --- a/frame/balances/src/tests_composite.rs +++ b/frame/balances/src/tests_composite.rs @@ -18,126 +18,132 @@ #![cfg(test)] -use sp_runtime::{ - Perbill, - traits::{ConvertInto, IdentityLookup}, - testing::Header, -}; +use crate::{decl_tests, tests::CallWithDispatchInfo, GenesisConfig, Module, Trait}; +use frame_support::traits::Get; +use frame_support::weights::{DispatchInfo, Weight}; +use frame_support::{impl_outer_origin, parameter_types}; use sp_core::H256; use sp_io; -use frame_support::{impl_outer_origin, parameter_types}; -use frame_support::traits::Get; -use frame_support::weights::{Weight, DispatchInfo}; +use sp_runtime::{ + testing::Header, + traits::{ConvertInto, IdentityLookup}, + Perbill, +}; use std::cell::RefCell; -use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; use frame_system as system; -impl_outer_origin!{ - pub enum Origin for Test {} +impl_outer_origin! { + pub enum Origin for Test {} } thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); + static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); } pub struct ExistentialDeposit; impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } + fn get() -> u64 { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) + } } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = CallWithDispatchInfo; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = super::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = CallWithDispatchInfo; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = super::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - pub const TransactionBaseFee: u64 = 0; - pub const TransactionByteFee: u64 = 1; + pub const TransactionBaseFee: u64 = 0; + pub const TransactionByteFee: u64 = 1; } impl pallet_transaction_payment::Trait for Test { - type Currency = Module; - type OnTransactionPayment = (); - type TransactionBaseFee = TransactionBaseFee; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = ConvertInto; - type FeeMultiplierUpdate = (); + type Currency = Module; + type OnTransactionPayment = (); + type TransactionBaseFee = TransactionBaseFee; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = ConvertInto; + type FeeMultiplierUpdate = (); } impl Trait for Test { - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = system::Module; + type Balance = u64; + type DustRemoval = (); + type Event = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = system::Module; } pub struct ExtBuilder { - existential_deposit: u64, - monied: bool, + existential_deposit: u64, + monied: bool, } impl Default for ExtBuilder { - fn default() -> Self { - Self { - existential_deposit: 1, - monied: false, - } - } + fn default() -> Self { + Self { + existential_deposit: 1, + monied: false, + } + } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn monied(mut self, monied: bool) -> Self { - self.monied = monied; - self - } - pub fn set_associated_consts(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - } - pub fn build(self) -> sp_io::TestExternalities { - self.set_associated_consts(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { - balances: if self.monied { - vec![ - (1, 10 * self.existential_deposit), - (2, 20 * self.existential_deposit), - (3, 30 * self.existential_deposit), - (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) - ] - } else { - vec![] - }, - }.assimilate_storage(&mut t).unwrap(); - t.into() - } + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + pub fn monied(mut self, monied: bool) -> Self { + self.monied = monied; + self + } + pub fn set_associated_consts(&self) { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + } + pub fn build(self) -> sp_io::TestExternalities { + self.set_associated_consts(); + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + GenesisConfig:: { + balances: if self.monied { + vec![ + (1, 10 * self.existential_deposit), + (2, 20 * self.existential_deposit), + (3, 30 * self.existential_deposit), + (4, 40 * self.existential_deposit), + (12, 10 * self.existential_deposit), + ] + } else { + vec![] + }, + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } } -decl_tests!{ Test, ExtBuilder, EXISTENTIAL_DEPOSIT } +decl_tests! { Test, ExtBuilder, EXISTENTIAL_DEPOSIT } diff --git a/frame/balances/src/tests_local.rs b/frame/balances/src/tests_local.rs index aab275c781..462ae6fc7c 100644 --- a/frame/balances/src/tests_local.rs +++ b/frame/balances/src/tests_local.rs @@ -18,134 +18,141 @@ #![cfg(test)] -use sp_runtime::{ - Perbill, - traits::{ConvertInto, IdentityLookup}, - testing::Header, -}; +use crate::{decl_tests, tests::CallWithDispatchInfo, GenesisConfig, Module, Trait}; +use frame_support::traits::{Get, StorageMapShim}; +use frame_support::weights::{DispatchInfo, Weight}; +use frame_support::{impl_outer_origin, parameter_types}; use sp_core::H256; use sp_io; -use frame_support::{impl_outer_origin, parameter_types}; -use frame_support::traits::{Get, StorageMapShim}; -use frame_support::weights::{Weight, DispatchInfo}; +use sp_runtime::{ + testing::Header, + traits::{ConvertInto, IdentityLookup}, + Perbill, +}; use std::cell::RefCell; -use crate::{GenesisConfig, Module, Trait, decl_tests, tests::CallWithDispatchInfo}; use frame_system as system; -impl_outer_origin!{ - pub enum Origin for Test {} +impl_outer_origin! { + pub enum Origin for Test {} } thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); + static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); } pub struct ExistentialDeposit; impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } + fn get() -> u64 { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) + } } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = CallWithDispatchInfo; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = super::AccountData; - type OnNewAccount = (); - type OnKilledAccount = Module; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = CallWithDispatchInfo; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = super::AccountData; + type OnNewAccount = (); + type OnKilledAccount = Module; } parameter_types! { - pub const TransactionBaseFee: u64 = 0; - pub const TransactionByteFee: u64 = 1; + pub const TransactionBaseFee: u64 = 0; + pub const TransactionByteFee: u64 = 1; } impl pallet_transaction_payment::Trait for Test { - type Currency = Module; - type OnTransactionPayment = (); - type TransactionBaseFee = TransactionBaseFee; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = ConvertInto; - type FeeMultiplierUpdate = (); + type Currency = Module; + type OnTransactionPayment = (); + type TransactionBaseFee = TransactionBaseFee; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = ConvertInto; + type FeeMultiplierUpdate = (); } impl Trait for Test { - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = StorageMapShim< - super::Account, - system::CallOnCreatedAccount, - system::CallKillAccount, - u64, super::AccountData - >; + type Balance = u64; + type DustRemoval = (); + type Event = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = StorageMapShim< + super::Account, + system::CallOnCreatedAccount, + system::CallKillAccount, + u64, + super::AccountData, + >; } pub struct ExtBuilder { - existential_deposit: u64, - monied: bool, + existential_deposit: u64, + monied: bool, } impl Default for ExtBuilder { - fn default() -> Self { - Self { - existential_deposit: 1, - monied: false, - } - } + fn default() -> Self { + Self { + existential_deposit: 1, + monied: false, + } + } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn monied(mut self, monied: bool) -> Self { - self.monied = monied; - if self.existential_deposit == 0 { - self.existential_deposit = 1; - } - self - } - pub fn set_associated_consts(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - } - pub fn build(self) -> sp_io::TestExternalities { - self.set_associated_consts(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { - balances: if self.monied { - vec![ - (1, 10 * self.existential_deposit), - (2, 20 * self.existential_deposit), - (3, 30 * self.existential_deposit), - (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) - ] - } else { - vec![] - }, - }.assimilate_storage(&mut t).unwrap(); - t.into() - } + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + pub fn monied(mut self, monied: bool) -> Self { + self.monied = monied; + if self.existential_deposit == 0 { + self.existential_deposit = 1; + } + self + } + pub fn set_associated_consts(&self) { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + } + pub fn build(self) -> sp_io::TestExternalities { + self.set_associated_consts(); + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + GenesisConfig:: { + balances: if self.monied { + vec![ + (1, 10 * self.existential_deposit), + (2, 20 * self.existential_deposit), + (3, 30 * self.existential_deposit), + (4, 40 * self.existential_deposit), + (12, 10 * self.existential_deposit), + ] + } else { + vec![] + }, + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } } -decl_tests!{ Test, ExtBuilder, EXISTENTIAL_DEPOSIT } +decl_tests! { Test, ExtBuilder, EXISTENTIAL_DEPOSIT } diff --git a/frame/benchmark/src/benchmarking.rs b/frame/benchmark/src/benchmarking.rs index 1e4740da2c..79c591a543 100644 --- a/frame/benchmark/src/benchmarking.rs +++ b/frame/benchmark/src/benchmarking.rs @@ -20,114 +20,114 @@ use super::*; +use frame_benchmarking::{account, benchmarks}; use frame_system::RawOrigin; use sp_std::prelude::*; -use frame_benchmarking::{benchmarks, account}; use crate::Module as Benchmark; const SEED: u32 = 0; benchmarks! { - _ { - let m in 1 .. 1000 => { - let origin = RawOrigin::Signed(account("member", m, SEED)); - Benchmark::::add_member_list(origin.into())? - }; - let i in 1 .. 1000 => { - MyMap::insert(i, i); - }; - let d in 1 .. 1000 => { - for i in 0..d { - for j in 0..100 { - MyDoubleMap::insert(i, j, d); - } - } - }; - } - - add_member_list { - let m in ...; - }: _(RawOrigin::Signed(account("member", m + 1, SEED))) - - append_member_list { - let m in ...; - }: _(RawOrigin::Signed(account("member", m + 1, SEED))) - - read_value { - let n in 1 .. 1000; - MyValue::put(n); - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - put_value { - let n in 1 .. 1000; - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - exists_value { - let n in 1 .. 1000; - MyValue::put(n); - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - remove_value { - let i in ...; - }: _(RawOrigin::Signed(account("user", 0, SEED)), i) - - read_map { - let i in ...; - }: _(RawOrigin::Signed(account("user", 0, SEED)), i) - - insert_map { - let n in 1 .. 1000; - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - contains_key_map { - let i in ...; - }: _(RawOrigin::Signed(account("user", 0, SEED)), i) - - remove_prefix { - let d in ...; - }: _(RawOrigin::Signed(account("user", 0, SEED)), d) - - do_nothing { - let n in 1 .. 1000; - }: _(RawOrigin::Signed(account("user", 0, SEED)), n) - - encode_accounts { - let a in 1 .. 1000; - let mut accounts = Vec::new(); - for _ in 0..a { - accounts.push(account::("encode", a, SEED)); - } - }: _(RawOrigin::Signed(account("user", 0, SEED)), accounts) - - decode_accounts { - let a in 1 .. 1000; - let mut accounts = Vec::new(); - for _ in 0..a { - accounts.push(account::("encode", a, SEED)); - } - let bytes = accounts.encode(); - }: _(RawOrigin::Signed(account("user", 0, SEED)), bytes) - - // Custom implementation to handle benchmarking of storage recalculation. - // Puts `repeat` number of items into random storage keys, and then times how - // long it takes to recalculate the storage root. - storage_root { - let z in 0 .. 10000; - }: { - for index in 0 .. z { - let random = (index).using_encoded(sp_io::hashing::blake2_256); - sp_io::storage::set(&random, &random); - } - } - - // Custom implementation to handle benchmarking of calling a host function. - // Will check how long it takes to call `current_time()`. - current_time { - let z in 0 .. 1000; - }: { - for _ in 0 .. z { - let _ = frame_benchmarking::benchmarking::current_time(); - } - } + _ { + let m in 1 .. 1000 => { + let origin = RawOrigin::Signed(account("member", m, SEED)); + Benchmark::::add_member_list(origin.into())? + }; + let i in 1 .. 1000 => { + MyMap::insert(i, i); + }; + let d in 1 .. 1000 => { + for i in 0..d { + for j in 0..100 { + MyDoubleMap::insert(i, j, d); + } + } + }; + } + + add_member_list { + let m in ...; + }: _(RawOrigin::Signed(account("member", m + 1, SEED))) + + append_member_list { + let m in ...; + }: _(RawOrigin::Signed(account("member", m + 1, SEED))) + + read_value { + let n in 1 .. 1000; + MyValue::put(n); + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + put_value { + let n in 1 .. 1000; + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + exists_value { + let n in 1 .. 1000; + MyValue::put(n); + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + remove_value { + let i in ...; + }: _(RawOrigin::Signed(account("user", 0, SEED)), i) + + read_map { + let i in ...; + }: _(RawOrigin::Signed(account("user", 0, SEED)), i) + + insert_map { + let n in 1 .. 1000; + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + contains_key_map { + let i in ...; + }: _(RawOrigin::Signed(account("user", 0, SEED)), i) + + remove_prefix { + let d in ...; + }: _(RawOrigin::Signed(account("user", 0, SEED)), d) + + do_nothing { + let n in 1 .. 1000; + }: _(RawOrigin::Signed(account("user", 0, SEED)), n) + + encode_accounts { + let a in 1 .. 1000; + let mut accounts = Vec::new(); + for _ in 0..a { + accounts.push(account::("encode", a, SEED)); + } + }: _(RawOrigin::Signed(account("user", 0, SEED)), accounts) + + decode_accounts { + let a in 1 .. 1000; + let mut accounts = Vec::new(); + for _ in 0..a { + accounts.push(account::("encode", a, SEED)); + } + let bytes = accounts.encode(); + }: _(RawOrigin::Signed(account("user", 0, SEED)), bytes) + + // Custom implementation to handle benchmarking of storage recalculation. + // Puts `repeat` number of items into random storage keys, and then times how + // long it takes to recalculate the storage root. + storage_root { + let z in 0 .. 10000; + }: { + for index in 0 .. z { + let random = (index).using_encoded(sp_io::hashing::blake2_256); + sp_io::storage::set(&random, &random); + } + } + + // Custom implementation to handle benchmarking of calling a host function. + // Will check how long it takes to call `current_time()`. + current_time { + let z in 0 .. 1000; + }: { + for _ in 0 .. z { + let _ = frame_benchmarking::benchmarking::current_time(); + } + } } diff --git a/frame/benchmark/src/lib.rs b/frame/benchmark/src/lib.rs index 24b0e43310..c61a621e46 100644 --- a/frame/benchmark/src/lib.rs +++ b/frame/benchmark/src/lib.rs @@ -20,172 +20,176 @@ #![cfg_attr(not(feature = "std"), no_std)] -use frame_support::{decl_module, decl_storage, decl_event, decl_error}; -use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; +use codec::{Decode, Encode}; use frame_support::traits::Currency; +use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage}; use frame_system::{self as system, ensure_signed}; -use codec::{Encode, Decode}; use sp_std::prelude::Vec; mod benchmarking; /// Type alias for currency balance. -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// The pallet's configuration trait. pub trait Trait: system::Trait { - type Event: From> + Into<::Event>; - type Currency: Currency; + type Event: From> + Into<::Event>; + type Currency: Currency; } // This pallet's storage items. decl_storage! { - trait Store for Module as Benchmark { - MyMemberList: Vec; - MyMemberMap: map hasher(blake2_128_concat) T::AccountId => bool; - MyValue: u32; - MyMap: map hasher(twox_64_concat) u32 => u32; - MyDoubleMap: double_map hasher(twox_64_concat) u32, hasher(identity) u32 => u32; - } + trait Store for Module as Benchmark { + MyMemberList: Vec; + MyMemberMap: map hasher(blake2_128_concat) T::AccountId => bool; + MyValue: u32; + MyMap: map hasher(twox_64_concat) u32 => u32; + MyDoubleMap: double_map hasher(twox_64_concat) u32, hasher(identity) u32 => u32; + } } // The pallet's events decl_event!( - pub enum Event where AccountId = ::AccountId { - Dummy(u32, AccountId), - } + pub enum Event + where + AccountId = ::AccountId, + { + Dummy(u32, AccountId), + } ); // The pallet's errors decl_error! { - pub enum Error for Module { - } + pub enum Error for Module { + } } // The pallet's dispatchable functions. decl_module! { - /// The module declaration. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Do nothing. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn do_nothing(_origin, input: u32) { - if input > 0 { - return Ok(()); - } - } - - /// Read a value from storage value `repeat` number of times. - /// Note the first `get()` read here will pull from the underlying - /// storage database, however, the `repeat` calls will all pull from the - /// storage overlay cache. You must consider this when analyzing the - /// results of the benchmark. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn read_value(_origin, repeat: u32) { - for _ in 0..repeat { - MyValue::get(); - } - } - - /// Put a value into a storage value. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn put_value(_origin, repeat: u32) { - for r in 0..repeat { - MyValue::put(r); - } - } - - /// Read a value from storage `repeat` number of times. - /// Note the first `exists()` read here will pull from the underlying - /// storage database, however, the `repeat` calls will all pull from the - /// storage overlay cache. You must consider this when analyzing the - /// results of the benchmark. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn exists_value(_origin, repeat: u32) { - for _ in 0..repeat { - MyValue::exists(); - } - } - - /// Remove a value from storage `repeat` number of times. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn remove_value(_origin, repeat: u32) { - for r in 0..repeat { - MyMap::remove(r); - } - } - - /// Read a value from storage map `repeat` number of times. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn read_map(_origin, repeat: u32) { - for r in 0..repeat { - MyMap::get(r); - } - } - - /// Insert a value into a map. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn insert_map(_origin, repeat: u32) { - for r in 0..repeat { - MyMap::insert(r, r); - } - } - - /// Check is a map contains a value `repeat` number of times. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn contains_key_map(_origin, repeat: u32) { - for r in 0..repeat { - MyMap::contains_key(r); - } - } - - /// Read a value from storage `repeat` number of times. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn remove_prefix(_origin, repeat: u32) { - for r in 0..repeat { - MyDoubleMap::remove_prefix(r); - } - } - - /// Add user to the list. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn add_member_list(origin) { - let who = ensure_signed(origin)?; - MyMemberList::::mutate(|x| x.push(who)); - } - - /// Append user to the list. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn append_member_list(origin) { - let who = ensure_signed(origin)?; - MyMemberList::::append(&[who])?; - } - - /// Encode a vector of accounts to bytes. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn encode_accounts(_origin, accounts: Vec) { - let bytes = accounts.encode(); - - // In an attempt to tell the compiler not to optimize away this benchmark, we will use - // the result of encoding the accounts. - if bytes.is_empty() { - frame_support::print("You are encoding zero accounts."); - } - } - - /// Decode bytes into a vector of accounts. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn decode_accounts(_origin, bytes: Vec) { - let accounts: Vec = Decode::decode(&mut bytes.as_slice()).map_err(|_| "Could not decode")?; - - // In an attempt to tell the compiler not to optimize away this benchmark, we will use - // the result of decoding the bytes. - if accounts.is_empty() { - frame_support::print("You are decoding zero bytes."); - } - } - } + /// The module declaration. + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Do nothing. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn do_nothing(_origin, input: u32) { + if input > 0 { + return Ok(()); + } + } + + /// Read a value from storage value `repeat` number of times. + /// Note the first `get()` read here will pull from the underlying + /// storage database, however, the `repeat` calls will all pull from the + /// storage overlay cache. You must consider this when analyzing the + /// results of the benchmark. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn read_value(_origin, repeat: u32) { + for _ in 0..repeat { + MyValue::get(); + } + } + + /// Put a value into a storage value. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn put_value(_origin, repeat: u32) { + for r in 0..repeat { + MyValue::put(r); + } + } + + /// Read a value from storage `repeat` number of times. + /// Note the first `exists()` read here will pull from the underlying + /// storage database, however, the `repeat` calls will all pull from the + /// storage overlay cache. You must consider this when analyzing the + /// results of the benchmark. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn exists_value(_origin, repeat: u32) { + for _ in 0..repeat { + MyValue::exists(); + } + } + + /// Remove a value from storage `repeat` number of times. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn remove_value(_origin, repeat: u32) { + for r in 0..repeat { + MyMap::remove(r); + } + } + + /// Read a value from storage map `repeat` number of times. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn read_map(_origin, repeat: u32) { + for r in 0..repeat { + MyMap::get(r); + } + } + + /// Insert a value into a map. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn insert_map(_origin, repeat: u32) { + for r in 0..repeat { + MyMap::insert(r, r); + } + } + + /// Check is a map contains a value `repeat` number of times. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn contains_key_map(_origin, repeat: u32) { + for r in 0..repeat { + MyMap::contains_key(r); + } + } + + /// Read a value from storage `repeat` number of times. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn remove_prefix(_origin, repeat: u32) { + for r in 0..repeat { + MyDoubleMap::remove_prefix(r); + } + } + + /// Add user to the list. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn add_member_list(origin) { + let who = ensure_signed(origin)?; + MyMemberList::::mutate(|x| x.push(who)); + } + + /// Append user to the list. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn append_member_list(origin) { + let who = ensure_signed(origin)?; + MyMemberList::::append(&[who])?; + } + + /// Encode a vector of accounts to bytes. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn encode_accounts(_origin, accounts: Vec) { + let bytes = accounts.encode(); + + // In an attempt to tell the compiler not to optimize away this benchmark, we will use + // the result of encoding the accounts. + if bytes.is_empty() { + frame_support::print("You are encoding zero accounts."); + } + } + + /// Decode bytes into a vector of accounts. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn decode_accounts(_origin, bytes: Vec) { + let accounts: Vec = Decode::decode(&mut bytes.as_slice()).map_err(|_| "Could not decode")?; + + // In an attempt to tell the compiler not to optimize away this benchmark, we will use + // the result of decoding the bytes. + if accounts.is_empty() { + frame_support::print("You are decoding zero bytes."); + } + } + } } diff --git a/frame/benchmarking/src/analysis.rs b/frame/benchmarking/src/analysis.rs index fdf1210832..06c79e00e6 100644 --- a/frame/benchmarking/src/analysis.rs +++ b/frame/benchmarking/src/analysis.rs @@ -16,228 +16,339 @@ //! Tools for analysing the benchmark results. -use std::collections::BTreeMap; -use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionModel}; use crate::BenchmarkResults; +use linregress::{FormulaRegressionBuilder, RegressionDataBuilder, RegressionModel}; +use std::collections::BTreeMap; pub struct Analysis { - base: u128, - slopes: Vec, - names: Vec, - value_dists: Option, u128, u128)>>, - model: Option, + base: u128, + slopes: Vec, + names: Vec, + value_dists: Option, u128, u128)>>, + model: Option, } impl Analysis { - pub fn median_slopes(r: &Vec) -> Option { - let results = r[0].0.iter().enumerate().map(|(i, &(param, _))| { - let mut counted = BTreeMap::, usize>::new(); - for (params, _, _) in r.iter() { - let mut p = params.iter().map(|x| x.1).collect::>(); - p[i] = 0; - *counted.entry(p).or_default() += 1; - } - let others: Vec = counted.iter().max_by_key(|i| i.1).expect("r is not empty; qed").0.clone(); - let values = r.iter() - .filter(|v| - v.0.iter() - .map(|x| x.1) - .zip(others.iter()) - .enumerate() - .all(|(j, (v1, v2))| j == i || v1 == *v2) - ).map(|(ps, v, _)| (ps[i].1, *v)) - .collect::>(); - (format!("{:?}", param), i, others, values) - }).collect::>(); - - let models = results.iter().map(|(_, _, _, ref values)| { - let mut slopes = vec![]; - for (i, &(x1, y1)) in values.iter().enumerate() { - for &(x2, y2) in values.iter().skip(i + 1) { - if x1 != x2 { - slopes.push((y1 as f64 - y2 as f64) / (x1 as f64 - x2 as f64)); - } - } - } - slopes.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); - let slope = slopes[slopes.len() / 2]; - - let mut offsets = vec![]; - for &(x, y) in values.iter() { - offsets.push(y as f64 - slope * x as f64); - } - offsets.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); - let offset = offsets[offsets.len() / 2]; - - (offset, slope) - }).collect::>(); - - let models = models.iter() - .zip(results.iter()) - .map(|((offset, slope), (_, i, others, _))| { - let over = others.iter() - .enumerate() - .filter(|(j, _)| j != i) - .map(|(j, v)| models[j].1 * *v as f64) - .fold(0f64, |acc, i| acc + i); - (*offset - over, *slope) - }) - .collect::>(); - - let base = models[0].0.max(0f64) as u128; - let slopes = models.iter().map(|x| x.1.max(0f64) as u128).collect::>(); - - Some(Self { - base, - slopes, - names: results.into_iter().map(|x| x.0).collect::>(), - value_dists: None, - model: None, - }) - } - - pub fn min_squares_iqr(r: &Vec) -> Option { - let mut results = BTreeMap::, Vec>::new(); - for &(ref params, t, _) in r.iter() { - let p = params.iter().map(|x| x.1).collect::>(); - results.entry(p).or_default().push(t); - } - for (_, rs) in results.iter_mut() { - rs.sort(); - let ql = rs.len() / 4; - *rs = rs[ql..rs.len() - ql].to_vec(); - } - - let mut data = vec![("Y", results.iter().flat_map(|x| x.1.iter().map(|v| *v as f64)).collect())]; - - let names = r[0].0.iter().map(|x| format!("{:?}", x.0)).collect::>(); - data.extend(names.iter() - .enumerate() - .map(|(i, p)| ( - p.as_str(), - results.iter() - .flat_map(|x| Some(x.0[i] as f64) - .into_iter() - .cycle() - .take(x.1.len()) - ).collect::>() - )) - ); - - let data = RegressionDataBuilder::new().build_from(data).ok()?; - - let model = FormulaRegressionBuilder::new() - .data(&data) - .formula(format!("Y ~ {}", names.join(" + "))) - .fit() - .ok()?; - - let slopes = model.parameters.regressor_values.iter() - .enumerate() - .map(|(_, x)| (*x + 0.5) as u128) - .collect(); - - let value_dists = results.iter().map(|(p, vs)| { - let total = vs.iter() - .fold(0u128, |acc, v| acc + *v); - let mean = total / vs.len() as u128; - let sum_sq_diff = vs.iter() - .fold(0u128, |acc, v| { - let d = mean.max(*v) - mean.min(*v); - acc + d * d - }); - let stddev = (sum_sq_diff as f64 / vs.len() as f64).sqrt() as u128; - (p.clone(), mean, stddev) - }).collect::>(); - - Some(Self { - base: (model.parameters.intercept_value + 0.5) as u128, - slopes, - names, - value_dists: Some(value_dists), - model: Some(model), - }) - } + pub fn median_slopes(r: &Vec) -> Option { + let results = r[0] + .0 + .iter() + .enumerate() + .map(|(i, &(param, _))| { + let mut counted = BTreeMap::, usize>::new(); + for (params, _, _) in r.iter() { + let mut p = params.iter().map(|x| x.1).collect::>(); + p[i] = 0; + *counted.entry(p).or_default() += 1; + } + let others: Vec = counted + .iter() + .max_by_key(|i| i.1) + .expect("r is not empty; qed") + .0 + .clone(); + let values = r + .iter() + .filter(|v| { + v.0.iter() + .map(|x| x.1) + .zip(others.iter()) + .enumerate() + .all(|(j, (v1, v2))| j == i || v1 == *v2) + }) + .map(|(ps, v, _)| (ps[i].1, *v)) + .collect::>(); + (format!("{:?}", param), i, others, values) + }) + .collect::>(); + + let models = results + .iter() + .map(|(_, _, _, ref values)| { + let mut slopes = vec![]; + for (i, &(x1, y1)) in values.iter().enumerate() { + for &(x2, y2) in values.iter().skip(i + 1) { + if x1 != x2 { + slopes.push((y1 as f64 - y2 as f64) / (x1 as f64 - x2 as f64)); + } + } + } + slopes.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); + let slope = slopes[slopes.len() / 2]; + + let mut offsets = vec![]; + for &(x, y) in values.iter() { + offsets.push(y as f64 - slope * x as f64); + } + offsets.sort_by(|a, b| a.partial_cmp(b).expect("values well defined; qed")); + let offset = offsets[offsets.len() / 2]; + + (offset, slope) + }) + .collect::>(); + + let models = models + .iter() + .zip(results.iter()) + .map(|((offset, slope), (_, i, others, _))| { + let over = others + .iter() + .enumerate() + .filter(|(j, _)| j != i) + .map(|(j, v)| models[j].1 * *v as f64) + .fold(0f64, |acc, i| acc + i); + (*offset - over, *slope) + }) + .collect::>(); + + let base = models[0].0.max(0f64) as u128; + let slopes = models + .iter() + .map(|x| x.1.max(0f64) as u128) + .collect::>(); + + Some(Self { + base, + slopes, + names: results.into_iter().map(|x| x.0).collect::>(), + value_dists: None, + model: None, + }) + } + + pub fn min_squares_iqr(r: &Vec) -> Option { + let mut results = BTreeMap::, Vec>::new(); + for &(ref params, t, _) in r.iter() { + let p = params.iter().map(|x| x.1).collect::>(); + results.entry(p).or_default().push(t); + } + for (_, rs) in results.iter_mut() { + rs.sort(); + let ql = rs.len() / 4; + *rs = rs[ql..rs.len() - ql].to_vec(); + } + + let mut data = vec![( + "Y", + results + .iter() + .flat_map(|x| x.1.iter().map(|v| *v as f64)) + .collect(), + )]; + + let names = r[0] + .0 + .iter() + .map(|x| format!("{:?}", x.0)) + .collect::>(); + data.extend(names.iter().enumerate().map(|(i, p)| { + ( + p.as_str(), + results + .iter() + .flat_map(|x| Some(x.0[i] as f64).into_iter().cycle().take(x.1.len())) + .collect::>(), + ) + })); + + let data = RegressionDataBuilder::new().build_from(data).ok()?; + + let model = FormulaRegressionBuilder::new() + .data(&data) + .formula(format!("Y ~ {}", names.join(" + "))) + .fit() + .ok()?; + + let slopes = model + .parameters + .regressor_values + .iter() + .enumerate() + .map(|(_, x)| (*x + 0.5) as u128) + .collect(); + + let value_dists = results + .iter() + .map(|(p, vs)| { + let total = vs.iter().fold(0u128, |acc, v| acc + *v); + let mean = total / vs.len() as u128; + let sum_sq_diff = vs.iter().fold(0u128, |acc, v| { + let d = mean.max(*v) - mean.min(*v); + acc + d * d + }); + let stddev = (sum_sq_diff as f64 / vs.len() as f64).sqrt() as u128; + (p.clone(), mean, stddev) + }) + .collect::>(); + + Some(Self { + base: (model.parameters.intercept_value + 0.5) as u128, + slopes, + names, + value_dists: Some(value_dists), + model: Some(model), + }) + } } fn ms(mut nanos: u128) -> String { - let mut x = 100_000u128; - while x > 1 { - if nanos > x * 1_000 { - nanos = nanos / x * x; - break; - } - x /= 10; - } - format!("{}", nanos as f64 / 1_000f64) + let mut x = 100_000u128; + while x > 1 { + if nanos > x * 1_000 { + nanos = nanos / x * x; + break; + } + x /= 10; + } + format!("{}", nanos as f64 / 1_000f64) } impl std::fmt::Display for Analysis { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - if let Some(ref value_dists) = self.value_dists { - writeln!(f, "\nData points distribution:")?; - writeln!(f, "{} mean µs sigma µs %", self.names.iter().map(|p| format!("{:>5}", p)).collect::>().join(" "))?; - for (param_values, mean, sigma) in value_dists.iter() { - writeln!(f, "{} {:>8} {:>8} {:>3}.{}%", - param_values.iter().map(|v| format!("{:>5}", v)).collect::>().join(" "), - ms(*mean), - ms(*sigma), - (sigma * 100 / mean), - (sigma * 1000 / mean % 10) - )?; - } - } - if let Some(ref model) = self.model { - writeln!(f, "\nQuality and confidence:")?; - writeln!(f, "param error")?; - for (p, se) in self.names.iter().zip(model.se.regressor_values.iter()) { - writeln!(f, "{} {:>8}", p, ms(*se as u128))?; - } - } - - writeln!(f, "\nModel:")?; - writeln!(f, "Time ~= {:>8}", ms(self.base))?; - for (&t, n) in self.slopes.iter().zip(self.names.iter()) { - writeln!(f, " + {} {:>8}", n, ms(t))?; - } - writeln!(f, " µs") - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + if let Some(ref value_dists) = self.value_dists { + writeln!(f, "\nData points distribution:")?; + writeln!( + f, + "{} mean µs sigma µs %", + self.names + .iter() + .map(|p| format!("{:>5}", p)) + .collect::>() + .join(" ") + )?; + for (param_values, mean, sigma) in value_dists.iter() { + writeln!( + f, + "{} {:>8} {:>8} {:>3}.{}%", + param_values + .iter() + .map(|v| format!("{:>5}", v)) + .collect::>() + .join(" "), + ms(*mean), + ms(*sigma), + (sigma * 100 / mean), + (sigma * 1000 / mean % 10) + )?; + } + } + if let Some(ref model) = self.model { + writeln!(f, "\nQuality and confidence:")?; + writeln!(f, "param error")?; + for (p, se) in self.names.iter().zip(model.se.regressor_values.iter()) { + writeln!(f, "{} {:>8}", p, ms(*se as u128))?; + } + } + + writeln!(f, "\nModel:")?; + writeln!(f, "Time ~= {:>8}", ms(self.base))?; + for (&t, n) in self.slopes.iter().zip(self.names.iter()) { + writeln!(f, " + {} {:>8}", n, ms(t))?; + } + writeln!(f, " µs") + } } #[cfg(test)] mod tests { - use super::*; - use crate::BenchmarkParameter; - - #[test] - fn analysis_median_slopes_should_work() { - let a = Analysis::median_slopes(&vec![ - (vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0), - (vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0), - (vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0), - ]).unwrap(); - assert_eq!(a.base, 10_000_000); - assert_eq!(a.slopes, vec![1_000_000, 100_000]); - } - - #[test] - fn analysis_median_min_squares_should_work() { - let a = Analysis::min_squares_iqr(&vec![ - (vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], 11_500_000, 0), - (vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], 12_500_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], 13_500_000, 0), - (vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], 14_500_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], 13_100_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], 13_300_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], 13_700_000, 0), - (vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], 14_000_000, 0), - ]).unwrap(); - assert_eq!(a.base, 10_000_000); - assert_eq!(a.slopes, vec![1_000_000, 100_000]); - } + use super::*; + use crate::BenchmarkParameter; + + #[test] + fn analysis_median_slopes_should_work() { + let a = Analysis::median_slopes(&vec![ + ( + vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], + 11_500_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], + 12_500_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], + 13_500_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], + 14_500_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], + 13_100_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], + 13_300_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], + 13_700_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], + 14_000_000, + 0, + ), + ]) + .unwrap(); + assert_eq!(a.base, 10_000_000); + assert_eq!(a.slopes, vec![1_000_000, 100_000]); + } + + #[test] + fn analysis_median_min_squares_should_work() { + let a = Analysis::min_squares_iqr(&vec![ + ( + vec![(BenchmarkParameter::n, 1), (BenchmarkParameter::m, 5)], + 11_500_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 2), (BenchmarkParameter::m, 5)], + 12_500_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 5)], + 13_500_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 4), (BenchmarkParameter::m, 5)], + 14_500_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 1)], + 13_100_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 3)], + 13_300_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 7)], + 13_700_000, + 0, + ), + ( + vec![(BenchmarkParameter::n, 3), (BenchmarkParameter::m, 10)], + 14_000_000, + 0, + ), + ]) + .unwrap(); + assert_eq!(a.base, 10_000_000); + assert_eq!(a.slopes, vec![1_000_000, 100_000]); + } } diff --git a/frame/benchmarking/src/lib.rs b/frame/benchmarking/src/lib.rs index 6bb10f3d97..c416384817 100644 --- a/frame/benchmarking/src/lib.rs +++ b/frame/benchmarking/src/lib.rs @@ -18,18 +18,18 @@ #![cfg_attr(not(feature = "std"), no_std)] -mod tests; -mod utils; #[cfg(feature = "std")] mod analysis; +mod tests; +mod utils; -pub use utils::*; #[cfg(feature = "std")] pub use analysis::Analysis; +pub use paste; #[doc(hidden)] pub use sp_io::storage::root as storage_root; pub use sp_runtime::traits::{Dispatchable, Zero}; -pub use paste; +pub use utils::*; /// Construct pallet benchmarks for weighing dispatchables. /// @@ -995,7 +995,6 @@ macro_rules! impl_benchmark_tests { }; } - /// This macro adds pallet benchmarks to a `Vec` object. /// /// First create an object that holds in the input parameters for the benchmark: diff --git a/frame/benchmarking/src/tests.rs b/frame/benchmarking/src/tests.rs index 4b26ec732d..10b26bba12 100644 --- a/frame/benchmarking/src/tests.rs +++ b/frame/benchmarking/src/tests.rs @@ -20,207 +20,222 @@ use super::*; use codec::Decode; -use sp_std::prelude::*; -use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::{H256, Header}}; use frame_support::{ - dispatch::DispatchResult, - weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, - decl_module, decl_storage, impl_outer_origin, assert_ok, assert_err, ensure + assert_err, assert_ok, decl_module, decl_storage, + dispatch::DispatchResult, + ensure, impl_outer_origin, + weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, +}; +use frame_system::{ensure_none, ensure_signed, RawOrigin}; +use sp_runtime::{ + testing::{Header, H256}, + traits::{BlakeTwo256, IdentityLookup}, }; -use frame_system::{RawOrigin, ensure_signed, ensure_none}; +use sp_std::prelude::*; decl_storage! { - trait Store for Module as Test { - Value get(fn value): Option; - } + trait Store for Module as Test { + Value get(fn value): Option; + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn set_value(origin, n: u32) -> DispatchResult { - let _sender = ensure_signed(origin)?; - Value::put(n); - Ok(()) - } - - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn dummy(origin, _n: u32) -> DispatchResult { - let _sender = ensure_none(origin)?; - Ok(()) - } - } + pub struct Module for enum Call where origin: T::Origin { + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn set_value(origin, n: u32) -> DispatchResult { + let _sender = ensure_signed(origin)?; + Value::put(n); + Ok(()) + } + + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn dummy(origin, _n: u32) -> DispatchResult { + let _sender = ensure_none(origin)?; + Ok(()) + } + } } impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } pub trait Trait { - type Event; - type BlockNumber; - type AccountId: 'static + Default + Decode; - type Origin: From> + Into, Self::Origin>>; + type Event; + type BlockNumber; + type AccountId: 'static + Default + Decode; + type Origin: From> + + Into, Self::Origin>>; } #[derive(Clone, Eq, PartialEq)] pub struct Test; impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type MaximumBlockLength = (); - type AvailableBlockRatio = (); - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = (); + type MaximumBlockWeight = (); + type DbWeight = (); + type MaximumBlockLength = (); + type AvailableBlockRatio = (); + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl Trait for Test { - type Event = (); - type BlockNumber = u32; - type Origin = Origin; - type AccountId = u64; + type Event = (); + type BlockNumber = u32; + type Origin = Origin; + type AccountId = u64; } // This function basically just builds a genesis storage key/value store according to // our desired mockup. fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default().build_storage::().unwrap().into() + frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into() } -benchmarks!{ - _ { - // Define a common range for `b`. - let b in 1 .. 1000 => (); - } - - set_value { - let b in ...; - let caller = account::("caller", 0, 0); - }: _ (RawOrigin::Signed(caller), b.into()) - verify { - assert_eq!(Value::get(), Some(b)); - } - - other_name { - let b in ...; - }: dummy (RawOrigin::None, b.into()) - - sort_vector { - let x in 1 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); - } - }: { - m.sort(); - } verify { - ensure!(m[0] == 0, "You forgot to sort!") - } - - bad_origin { - let b in ...; - let caller = account::("caller", 0, 0); - }: dummy (RawOrigin::Signed(caller), b.into()) - - bad_verify { - let x in 1 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); - } - }: { } - verify { - ensure!(m[0] == 0, "You forgot to sort!") - } +benchmarks! { + _ { + // Define a common range for `b`. + let b in 1 .. 1000 => (); + } + + set_value { + let b in ...; + let caller = account::("caller", 0, 0); + }: _ (RawOrigin::Signed(caller), b.into()) + verify { + assert_eq!(Value::get(), Some(b)); + } + + other_name { + let b in ...; + }: dummy (RawOrigin::None, b.into()) + + sort_vector { + let x in 1 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + m.sort(); + } verify { + ensure!(m[0] == 0, "You forgot to sort!") + } + + bad_origin { + let b in ...; + let caller = account::("caller", 0, 0); + }: dummy (RawOrigin::Signed(caller), b.into()) + + bad_verify { + let x in 1 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { } + verify { + ensure!(m[0] == 0, "You forgot to sort!") + } } #[test] fn benchmarks_macro_works() { - // Check benchmark creation for `set_value`. - let selected_benchmark = SelectedBenchmark::set_value; - - let components = >::components(&selected_benchmark); - assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); - - let closure = >::instance( - &selected_benchmark, - &[(BenchmarkParameter::b, 1)], - ).expect("failed to create closure"); - - new_test_ext().execute_with(|| { - assert_eq!(closure(), Ok(())); - }); + // Check benchmark creation for `set_value`. + let selected_benchmark = SelectedBenchmark::set_value; + + let components = + >::components(&selected_benchmark); + assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + + let closure = >::instance( + &selected_benchmark, + &[(BenchmarkParameter::b, 1)], + ) + .expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_eq!(closure(), Ok(())); + }); } #[test] fn benchmarks_macro_rename_works() { - // Check benchmark creation for `other_dummy`. - let selected_benchmark = SelectedBenchmark::other_name; - let components = >::components(&selected_benchmark); - assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); - - let closure = >::instance( - &selected_benchmark, - &[(BenchmarkParameter::b, 1)], - ).expect("failed to create closure"); - - new_test_ext().execute_with(|| { - assert_ok!(closure()); - }); + // Check benchmark creation for `other_dummy`. + let selected_benchmark = SelectedBenchmark::other_name; + let components = + >::components(&selected_benchmark); + assert_eq!(components, vec![(BenchmarkParameter::b, 1, 1000)]); + + let closure = >::instance( + &selected_benchmark, + &[(BenchmarkParameter::b, 1)], + ) + .expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); } #[test] fn benchmarks_macro_works_for_non_dispatchable() { - let selected_benchmark = SelectedBenchmark::sort_vector; + let selected_benchmark = SelectedBenchmark::sort_vector; - let components = >::components(&selected_benchmark); - assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]); + let components = + >::components(&selected_benchmark); + assert_eq!(components, vec![(BenchmarkParameter::x, 1, 10000)]); - let closure = >::instance( - &selected_benchmark, - &[(BenchmarkParameter::x, 1)], - ).expect("failed to create closure"); + let closure = >::instance( + &selected_benchmark, + &[(BenchmarkParameter::x, 1)], + ) + .expect("failed to create closure"); - assert_eq!(closure(), Ok(())); + assert_eq!(closure(), Ok(())); } #[test] fn benchmarks_macro_verify_works() { - // Check postcondition for benchmark `set_value` is valid. - let selected_benchmark = SelectedBenchmark::set_value; - - let closure = >::verify( - &selected_benchmark, - &[(BenchmarkParameter::b, 1)], - ).expect("failed to create closure"); - - new_test_ext().execute_with(|| { - assert_ok!(closure()); - }); + // Check postcondition for benchmark `set_value` is valid. + let selected_benchmark = SelectedBenchmark::set_value; + + let closure = >::verify( + &selected_benchmark, + &[(BenchmarkParameter::b, 1)], + ) + .expect("failed to create closure"); + + new_test_ext().execute_with(|| { + assert_ok!(closure()); + }); } #[test] fn benchmarks_generate_unit_tests() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_value::()); - assert_ok!(test_benchmark_other_name::()); - assert_ok!(test_benchmark_sort_vector::()); - assert_err!(test_benchmark_bad_origin::(), "Bad origin"); - assert_err!(test_benchmark_bad_verify::(), "You forgot to sort!"); - }); + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set_value::()); + assert_ok!(test_benchmark_other_name::()); + assert_ok!(test_benchmark_sort_vector::()); + assert_err!(test_benchmark_bad_origin::(), "Bad origin"); + assert_err!(test_benchmark_bad_verify::(), "You forgot to sort!"); + }); } diff --git a/frame/benchmarking/src/utils.rs b/frame/benchmarking/src/utils.rs index 41b968fbfc..7e81700d2b 100644 --- a/frame/benchmarking/src/utils.rs +++ b/frame/benchmarking/src/utils.rs @@ -16,28 +16,53 @@ //! Interfaces, types and utils for benchmarking a FRAME runtime. -use codec::{Encode, Decode}; -use sp_std::{vec::Vec, prelude::Box}; +use codec::{Decode, Encode}; use sp_io::hashing::blake2_256; use sp_runtime::RuntimeString; +use sp_std::{prelude::Box, vec::Vec}; /// An alphabet of possible parameters to use for benchmarking. #[derive(Encode, Decode, Clone, Copy, PartialEq, Debug)] #[allow(missing_docs)] #[allow(non_camel_case_types)] pub enum BenchmarkParameter { - a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, + a, + b, + c, + d, + e, + f, + g, + h, + i, + j, + k, + l, + m, + n, + o, + p, + q, + r, + s, + t, + u, + v, + w, + x, + y, + z, } /// The results of a single of benchmark. #[derive(Encode, Decode, Clone, PartialEq, Debug)] pub struct BenchmarkBatch { - /// The pallet containing this benchmark. - pub pallet: Vec, - /// The extrinsic (or benchmark name) of this benchmark. - pub benchmark: Vec, - /// The results from this benchmark. - pub results: Vec, + /// The pallet containing this benchmark. + pub pallet: Vec, + /// The extrinsic (or benchmark name) of this benchmark. + pub benchmark: Vec, + /// The results from this benchmark. + pub results: Vec, } /// Results from running benchmarks on a FRAME pallet. @@ -46,94 +71,111 @@ pub struct BenchmarkBatch { pub type BenchmarkResults = (Vec<(BenchmarkParameter, u32)>, u128, u128); sp_api::decl_runtime_apis! { - /// Runtime api for benchmarking a FRAME runtime. - pub trait Benchmark { - /// Dispatch the given benchmark. - fn dispatch_benchmark( - pallet: Vec, - benchmark: Vec, - lowest_range_values: Vec, - highest_range_values: Vec, - steps: Vec, - repeat: u32, - ) -> Result, RuntimeString>; - } + /// Runtime api for benchmarking a FRAME runtime. + pub trait Benchmark { + /// Dispatch the given benchmark. + fn dispatch_benchmark( + pallet: Vec, + benchmark: Vec, + lowest_range_values: Vec, + highest_range_values: Vec, + steps: Vec, + repeat: u32, + ) -> Result, RuntimeString>; + } } /// Interface that provides functions for benchmarking the runtime. #[sp_runtime_interface::runtime_interface] pub trait Benchmarking { - /// Get the number of nanoseconds passed since the UNIX epoch - /// - /// WARNING! This is a non-deterministic call. Do not use this within - /// consensus critical logic. - fn current_time() -> u128 { - std::time::SystemTime::now().duration_since(std::time::SystemTime::UNIX_EPOCH) - .expect("Unix time doesn't go backwards; qed") - .as_nanos() - } - - /// Reset the trie database to the genesis state. - fn wipe_db(&mut self) { - self.wipe() - } - - /// Commit pending storage changes to the trie database and clear the database cache. - fn commit_db(&mut self) { - self.commit() - } + /// Get the number of nanoseconds passed since the UNIX epoch + /// + /// WARNING! This is a non-deterministic call. Do not use this within + /// consensus critical logic. + fn current_time() -> u128 { + std::time::SystemTime::now() + .duration_since(std::time::SystemTime::UNIX_EPOCH) + .expect("Unix time doesn't go backwards; qed") + .as_nanos() + } + + /// Reset the trie database to the genesis state. + fn wipe_db(&mut self) { + self.wipe() + } + + /// Commit pending storage changes to the trie database and clear the database cache. + fn commit_db(&mut self) { + self.commit() + } } /// The pallet benchmarking trait. pub trait Benchmarking { - /// Get the benchmarks available for this pallet. Generally there is one benchmark per - /// extrinsic, so these are sometimes just called "extrinsics". - fn benchmarks() -> Vec<&'static [u8]>; - - /// Run the benchmarks for this pallet. - /// - /// Parameters - /// - `name`: The name of extrinsic function or benchmark you want to benchmark encoded as - /// bytes. - /// - `steps`: The number of sample points you want to take across the range of parameters. - /// - `lowest_range_values`: The lowest number for each range of parameters. - /// - `highest_range_values`: The highest number for each range of parameters. - /// - `repeat`: The number of times you want to repeat a benchmark. - fn run_benchmark( - name: &[u8], - lowest_range_values: &[u32], - highest_range_values: &[u32], - steps: &[u32], - repeat: u32, - ) -> Result, &'static str>; + /// Get the benchmarks available for this pallet. Generally there is one benchmark per + /// extrinsic, so these are sometimes just called "extrinsics". + fn benchmarks() -> Vec<&'static [u8]>; + + /// Run the benchmarks for this pallet. + /// + /// Parameters + /// - `name`: The name of extrinsic function or benchmark you want to benchmark encoded as + /// bytes. + /// - `steps`: The number of sample points you want to take across the range of parameters. + /// - `lowest_range_values`: The lowest number for each range of parameters. + /// - `highest_range_values`: The highest number for each range of parameters. + /// - `repeat`: The number of times you want to repeat a benchmark. + fn run_benchmark( + name: &[u8], + lowest_range_values: &[u32], + highest_range_values: &[u32], + steps: &[u32], + repeat: u32, + ) -> Result, &'static str>; } /// The required setup for creating a benchmark. pub trait BenchmarkingSetup { - /// Return the components and their ranges which should be tested in this benchmark. - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; - - /// Set up the storage, and prepare a closure to run the benchmark. - fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; - - /// Set up the storage, and prepare a closure to test and verify the benchmark - fn verify(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; + /// Return the components and their ranges which should be tested in this benchmark. + fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; + + /// Set up the storage, and prepare a closure to run the benchmark. + fn instance( + &self, + components: &[(BenchmarkParameter, u32)], + ) -> Result Result<(), &'static str>>, &'static str>; + + /// Set up the storage, and prepare a closure to test and verify the benchmark + fn verify( + &self, + components: &[(BenchmarkParameter, u32)], + ) -> Result Result<(), &'static str>>, &'static str>; } /// The required setup for creating a benchmark. pub trait BenchmarkingSetupInstance { - /// Return the components and their ranges which should be tested in this benchmark. - fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; - - /// Set up the storage, and prepare a closure to run the benchmark. - fn instance(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; - - /// Set up the storage, and prepare a closure to test and verify the benchmark - fn verify(&self, components: &[(BenchmarkParameter, u32)]) -> Result Result<(), &'static str>>, &'static str>; + /// Return the components and their ranges which should be tested in this benchmark. + fn components(&self) -> Vec<(BenchmarkParameter, u32, u32)>; + + /// Set up the storage, and prepare a closure to run the benchmark. + fn instance( + &self, + components: &[(BenchmarkParameter, u32)], + ) -> Result Result<(), &'static str>>, &'static str>; + + /// Set up the storage, and prepare a closure to test and verify the benchmark + fn verify( + &self, + components: &[(BenchmarkParameter, u32)], + ) -> Result Result<(), &'static str>>, &'static str>; } /// Grab an account, seeded by a name and index. -pub fn account(name: &'static str, index: u32, seed: u32) -> AccountId { - let entropy = (name, index, seed).using_encoded(blake2_256); - AccountId::decode(&mut &entropy[..]).unwrap_or_default() +pub fn account( + name: &'static str, + index: u32, + seed: u32, +) -> AccountId { + let entropy = (name, index, seed).using_encoded(blake2_256); + AccountId::decode(&mut &entropy[..]).unwrap_or_default() } diff --git a/frame/collective/src/benchmarking.rs b/frame/collective/src/benchmarking.rs index edef5e2e24..82bce6efce 100644 --- a/frame/collective/src/benchmarking.rs +++ b/frame/collective/src/benchmarking.rs @@ -18,193 +18,193 @@ use super::*; +use frame_benchmarking::{account, benchmarks_instance}; use frame_system::RawOrigin as SystemOrigin; -use frame_benchmarking::{benchmarks_instance, account}; -use frame_system::Module as System; use crate::Module as Collective; +use frame_system::Module as System; const SEED: u32 = 0; benchmarks_instance! { - _{ - // User account seed. - let u in 1 .. 1000 => (); - // Old members. - let n in 1 .. 1000 => (); - // New members. - let m in 1 .. 1000 => (); - // Existing proposals. - let p in 1 .. 100 => (); - } + _{ + // User account seed. + let u in 1 .. 1000 => (); + // Old members. + let n in 1 .. 1000 => (); + // New members. + let m in 1 .. 1000 => (); + // Existing proposals. + let p in 1 .. 100 => (); + } - set_members { - let m in ...; - let n in ...; + set_members { + let m in ...; + let n in ...; - // Construct `new_members`. - // It should influence timing since it will sort this vector. - let mut new_members = vec![]; - for i in 0 .. m { - let member = account("member", i, SEED); - new_members.push(member); - } + // Construct `new_members`. + // It should influence timing since it will sort this vector. + let mut new_members = vec![]; + for i in 0 .. m { + let member = account("member", i, SEED); + new_members.push(member); + } - // Set old members. - // We compute the difference of old and new members, so it should influence timing. - let mut old_members = vec![]; - for i in 0 .. n { - let old_member = account("old member", i, SEED); - old_members.push(old_member); - } + // Set old members. + // We compute the difference of old and new members, so it should influence timing. + let mut old_members = vec![]; + for i in 0 .. n { + let old_member = account("old member", i, SEED); + old_members.push(old_member); + } - let prime = Some(account("prime", 0, SEED)); + let prime = Some(account("prime", 0, SEED)); - Collective::::set_members(SystemOrigin::Root.into(), old_members, prime.clone())?; + Collective::::set_members(SystemOrigin::Root.into(), old_members, prime.clone())?; - }: _(SystemOrigin::Root, new_members.clone(), prime) - verify { - new_members.sort(); - assert_eq!(Collective::::members(), new_members); - } + }: _(SystemOrigin::Root, new_members.clone(), prime) + verify { + new_members.sort(); + assert_eq!(Collective::::members(), new_members); + } - execute { - let u in ...; + execute { + let u in ...; - let caller: T::AccountId = account("caller", u, SEED); - let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); + let caller: T::AccountId = account("caller", u, SEED); + let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); - Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; + Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; - }: _(SystemOrigin::Signed(caller), Box::new(proposal)) + }: _(SystemOrigin::Signed(caller), Box::new(proposal)) - propose { - let u in ...; + propose { + let u in ...; - let caller: T::AccountId = account("caller", u, SEED); - let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); + let caller: T::AccountId = account("caller", u, SEED); + let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); - Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; + Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; - let member_count = 0; + let member_count = 0; - }: _(SystemOrigin::Signed(caller), member_count, Box::new(proposal.into())) + }: _(SystemOrigin::Signed(caller), member_count, Box::new(proposal.into())) - propose_else_branch { - let u in ...; - let p in ...; + propose_else_branch { + let u in ...; + let p in ...; - let caller: T::AccountId = account("caller", u, SEED); - let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); + let caller: T::AccountId = account("caller", u, SEED); + let proposal: T::Proposal = Call::::close(Default::default(), Default::default()).into(); - Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; + Collective::::set_members(SystemOrigin::Root.into(), vec![caller.clone()], None)?; - let member_count = 3; + let member_count = 3; - // Add previous proposals. - for i in 0 .. p { - let proposal: T::Proposal = Call::::close(Default::default(), (i + 1).into()).into(); - Collective::::propose(SystemOrigin::Signed(caller.clone()).into(), member_count.clone(), Box::new(proposal.into()))?; - } + // Add previous proposals. + for i in 0 .. p { + let proposal: T::Proposal = Call::::close(Default::default(), (i + 1).into()).into(); + Collective::::propose(SystemOrigin::Signed(caller.clone()).into(), member_count.clone(), Box::new(proposal.into()))?; + } - }: propose(SystemOrigin::Signed(caller), member_count, Box::new(proposal.into())) + }: propose(SystemOrigin::Signed(caller), member_count, Box::new(proposal.into())) - vote { - let u in ...; + vote { + let u in ...; - let caller1: T::AccountId = account("caller1", u, SEED); - let caller2: T::AccountId = account("caller2", u, SEED); + let caller1: T::AccountId = account("caller1", u, SEED); + let caller2: T::AccountId = account("caller2", u, SEED); - let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); - let proposal_hash = T::Hashing::hash_of(&proposal); + let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); + let proposal_hash = T::Hashing::hash_of(&proposal); - Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; + Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; - let member_count = 3; - Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; + let member_count = 3; + Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; - let index = 0; - let approve = true; + let index = 0; + let approve = true; - }: _(SystemOrigin::Signed(caller2), proposal_hash, index, approve) + }: _(SystemOrigin::Signed(caller2), proposal_hash, index, approve) - vote_not_approve { - let u in ...; + vote_not_approve { + let u in ...; - let caller1: T::AccountId = account("caller1", u, SEED); - let caller2: T::AccountId = account("caller2", u, SEED); + let caller1: T::AccountId = account("caller1", u, SEED); + let caller2: T::AccountId = account("caller2", u, SEED); - let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); - let proposal_hash = T::Hashing::hash_of(&proposal); + let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); + let proposal_hash = T::Hashing::hash_of(&proposal); - Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; + Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; - let member_count = 3; - Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; + let member_count = 3; + Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; - let index = 0; - let approve = false; + let index = 0; + let approve = false; - }: vote(SystemOrigin::Signed(caller2), proposal_hash, index, approve) + }: vote(SystemOrigin::Signed(caller2), proposal_hash, index, approve) - vote_approved { - let u in ...; + vote_approved { + let u in ...; - let caller1: T::AccountId = account("caller1", u, SEED); - let caller2: T::AccountId = account("caller2", u, SEED); + let caller1: T::AccountId = account("caller1", u, SEED); + let caller2: T::AccountId = account("caller2", u, SEED); - let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); - let proposal_hash = T::Hashing::hash_of(&proposal); + let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); + let proposal_hash = T::Hashing::hash_of(&proposal); - Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; + Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; - let member_count = 2; - Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; + let member_count = 2; + Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; - let index = 0; - let approve = true; + let index = 0; + let approve = true; - }: vote(SystemOrigin::Signed(caller2), proposal_hash, index, approve) + }: vote(SystemOrigin::Signed(caller2), proposal_hash, index, approve) - close { - let u in ...; + close { + let u in ...; - let caller1: T::AccountId = account("caller1", u, SEED); - let caller2: T::AccountId = account("caller2", u, SEED); + let caller1: T::AccountId = account("caller1", u, SEED); + let caller2: T::AccountId = account("caller2", u, SEED); - let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); - let proposal_hash = T::Hashing::hash_of(&proposal); + let proposal: Box = Box::new(Call::::close(Default::default(), Default::default()).into()); + let proposal_hash = T::Hashing::hash_of(&proposal); - Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; - let member_count = 2; - Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; + Collective::::set_members(SystemOrigin::Root.into(), vec![caller1.clone(), caller2.clone()], None)?; + let member_count = 2; + Collective::::propose(SystemOrigin::Signed(caller1.clone()).into(), member_count, proposal)?; - let index = 0; - let approve = true; + let index = 0; + let approve = true; - let vote_end = T::MotionDuration::get() + 1u32.into(); - System::::set_block_number(vote_end); + let vote_end = T::MotionDuration::get() + 1u32.into(); + System::::set_block_number(vote_end); - }: _(SystemOrigin::Signed(caller2), proposal_hash, index) + }: _(SystemOrigin::Signed(caller2), proposal_hash, index) } #[cfg(test)] mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_members::()); - assert_ok!(test_benchmark_execute::()); - assert_ok!(test_benchmark_propose::()); - assert_ok!(test_benchmark_propose_else_branch::()); - assert_ok!(test_benchmark_vote::()); - assert_ok!(test_benchmark_vote_not_approve::()); - assert_ok!(test_benchmark_vote_approved::()); - assert_ok!(test_benchmark_close::()); - }); - } + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set_members::()); + assert_ok!(test_benchmark_execute::()); + assert_ok!(test_benchmark_propose::()); + assert_ok!(test_benchmark_propose_else_branch::()); + assert_ok!(test_benchmark_vote::()); + assert_ok!(test_benchmark_vote_not_approve::()); + assert_ok!(test_benchmark_vote_approved::()); + assert_ok!(test_benchmark_close::()); + }); + } } diff --git a/frame/collective/src/lib.rs b/frame/collective/src/lib.rs index b5626ae4a6..3933b6c44d 100644 --- a/frame/collective/src/lib.rs +++ b/frame/collective/src/lib.rs @@ -34,19 +34,21 @@ //! If there are not, or if no prime is set, then the motion is dropped without being executed. #![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_std::{prelude::*, result}; -use sp_core::u32_trait::Value as U32; -use sp_runtime::RuntimeDebug; -use sp_runtime::traits::Hash; use frame_support::weights::SimpleDispatchInfo; use frame_support::{ - dispatch::{Dispatchable, Parameter}, codec::{Encode, Decode}, - traits::{Get, ChangeMembers, InitializeMembers, EnsureOrigin}, decl_module, decl_event, - decl_storage, decl_error, ensure, + codec::{Decode, Encode}, + decl_error, decl_event, decl_module, decl_storage, + dispatch::{Dispatchable, Parameter}, + ensure, + traits::{ChangeMembers, EnsureOrigin, Get, InitializeMembers}, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed}; +use sp_core::u32_trait::Value as U32; +use sp_runtime::traits::Hash; +use sp_runtime::RuntimeDebug; +use sp_std::{prelude::*, result}; #[cfg(feature = "runtime-benchmarks")] mod benchmarking; @@ -60,525 +62,545 @@ pub type ProposalIndex = u32; /// vote exactly once, therefore also the number of votes for any given motion. pub type MemberCount = u32; -pub trait Trait: frame_system::Trait { - /// The outer origin type. - type Origin: From>; +pub trait Trait: frame_system::Trait { + /// The outer origin type. + type Origin: From>; - /// The outer call dispatch type. - type Proposal: Parameter + Dispatchable>::Origin> + From>; + /// The outer call dispatch type. + type Proposal: Parameter + + Dispatchable>::Origin> + + From>; - /// The outer event type. - type Event: From> + Into<::Event>; + /// The outer event type. + type Event: From> + Into<::Event>; - /// The time-out for council motions. - type MotionDuration: Get; + /// The time-out for council motions. + type MotionDuration: Get; } /// Origin for the collective module. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] pub enum RawOrigin { - /// It has been condoned by a given number of members of the collective from a given total. - Members(MemberCount, MemberCount), - /// It has been condoned by a single member of the collective. - Member(AccountId), - /// Dummy to manage the fact we have instancing. - _Phantom(sp_std::marker::PhantomData), + /// It has been condoned by a given number of members of the collective from a given total. + Members(MemberCount, MemberCount), + /// It has been condoned by a single member of the collective. + Member(AccountId), + /// Dummy to manage the fact we have instancing. + _Phantom(sp_std::marker::PhantomData), } /// Origin for the collective module. -pub type Origin = RawOrigin<::AccountId, I>; +pub type Origin = RawOrigin<::AccountId, I>; #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] /// Info for keeping track of a motion being voted on. pub struct Votes { - /// The proposal's unique index. - index: ProposalIndex, - /// The number of approval votes that are needed to pass the motion. - threshold: MemberCount, - /// The current set of voters that approved it. - ayes: Vec, - /// The current set of voters that rejected it. - nays: Vec, - /// The hard end time of this vote. - end: BlockNumber, + /// The proposal's unique index. + index: ProposalIndex, + /// The number of approval votes that are needed to pass the motion. + threshold: MemberCount, + /// The current set of voters that approved it. + ayes: Vec, + /// The current set of voters that rejected it. + nays: Vec, + /// The hard end time of this vote. + end: BlockNumber, } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Collective { - /// The hashes of the active proposals. - pub Proposals get(fn proposals): Vec; - /// Actual proposal for a given hash, if it's current. - pub ProposalOf get(fn proposal_of): - map hasher(identity) T::Hash => Option<>::Proposal>; - /// Votes on a given proposal, if it is ongoing. - pub Voting get(fn voting): - map hasher(identity) T::Hash => Option>; - /// Proposals so far. - pub ProposalCount get(fn proposal_count): u32; - /// The current members of the collective. This is stored sorted (just by value). - pub Members get(fn members): Vec; - /// The member who provides the default vote for any other members that do not vote before - /// the timeout. If None, then no member has that privilege. - pub Prime get(fn prime): Option; - } - add_extra_genesis { - config(phantom): sp_std::marker::PhantomData; - config(members): Vec; - build(|config| Module::::initialize_members(&config.members)) - } + trait Store for Module, I: Instance=DefaultInstance> as Collective { + /// The hashes of the active proposals. + pub Proposals get(fn proposals): Vec; + /// Actual proposal for a given hash, if it's current. + pub ProposalOf get(fn proposal_of): + map hasher(identity) T::Hash => Option<>::Proposal>; + /// Votes on a given proposal, if it is ongoing. + pub Voting get(fn voting): + map hasher(identity) T::Hash => Option>; + /// Proposals so far. + pub ProposalCount get(fn proposal_count): u32; + /// The current members of the collective. This is stored sorted (just by value). + pub Members get(fn members): Vec; + /// The member who provides the default vote for any other members that do not vote before + /// the timeout. If None, then no member has that privilege. + pub Prime get(fn prime): Option; + } + add_extra_genesis { + config(phantom): sp_std::marker::PhantomData; + config(members): Vec; + build(|config| Module::::initialize_members(&config.members)) + } } decl_event! { - pub enum Event where - ::Hash, - ::AccountId, - { - /// A motion (given hash) has been proposed (by given account) with a threshold (given - /// `MemberCount`). - Proposed(AccountId, ProposalIndex, Hash, MemberCount), - /// A motion (given hash) has been voted on by given account, leaving - /// a tally (yes votes and no votes given respectively as `MemberCount`). - Voted(AccountId, Hash, bool, MemberCount, MemberCount), - /// A motion was approved by the required threshold. - Approved(Hash), - /// A motion was not approved by the required threshold. - Disapproved(Hash), - /// A motion was executed; `bool` is true if returned without error. - Executed(Hash, bool), - /// A single member did some action; `bool` is true if returned without error. - MemberExecuted(Hash, bool), - /// A proposal was closed after its duration was up. - Closed(Hash, MemberCount, MemberCount), - } + pub enum Event where + ::Hash, + ::AccountId, + { + /// A motion (given hash) has been proposed (by given account) with a threshold (given + /// `MemberCount`). + Proposed(AccountId, ProposalIndex, Hash, MemberCount), + /// A motion (given hash) has been voted on by given account, leaving + /// a tally (yes votes and no votes given respectively as `MemberCount`). + Voted(AccountId, Hash, bool, MemberCount, MemberCount), + /// A motion was approved by the required threshold. + Approved(Hash), + /// A motion was not approved by the required threshold. + Disapproved(Hash), + /// A motion was executed; `bool` is true if returned without error. + Executed(Hash, bool), + /// A single member did some action; `bool` is true if returned without error. + MemberExecuted(Hash, bool), + /// A proposal was closed after its duration was up. + Closed(Hash, MemberCount, MemberCount), + } } decl_error! { - pub enum Error for Module, I: Instance> { - /// Account is not a member - NotMember, - /// Duplicate proposals not allowed - DuplicateProposal, - /// Proposal must exist - ProposalMissing, - /// Mismatched index - WrongIndex, - /// Duplicate vote ignored - DuplicateVote, - /// Members are already initialized! - AlreadyInitialized, - /// The close call is made too early, before the end of the voting. - TooEarly, - } + pub enum Error for Module, I: Instance> { + /// Account is not a member + NotMember, + /// Duplicate proposals not allowed + DuplicateProposal, + /// Proposal must exist + ProposalMissing, + /// Mismatched index + WrongIndex, + /// Duplicate vote ignored + DuplicateVote, + /// Members are already initialized! + AlreadyInitialized, + /// The close call is made too early, before the end of the voting. + TooEarly, + } } // Note: this module is not benchmarked. The weights are obtained based on the similarity of the // executed logic with other democracy function. Note that councillor operations are assigned to the // operational class. decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Set the collective's membership. - /// - /// - `new_members`: The new member list. Be nice to the chain and - // provide it sorted. - /// - `prime`: The prime member whose vote sets the default. - /// - /// Requires root origin. - #[weight = SimpleDispatchInfo::FixedOperational(100_000_000)] - fn set_members(origin, new_members: Vec, prime: Option) { - ensure_root(origin)?; - let mut new_members = new_members; - new_members.sort(); - let old = Members::::get(); - >::set_members_sorted(&new_members[..], &old); - Prime::::set(prime); - } - - /// Dispatch a proposal from a member using the `Member` origin. - /// - /// Origin must be a member of the collective. - #[weight = SimpleDispatchInfo::FixedOperational(100_000_000)] - fn execute(origin, proposal: Box<>::Proposal>) { - let who = ensure_signed(origin)?; - ensure!(Self::is_member(&who), Error::::NotMember); - - let proposal_hash = T::Hashing::hash_of(&proposal); - let ok = proposal.dispatch(RawOrigin::Member(who).into()).is_ok(); - Self::deposit_event(RawEvent::MemberExecuted(proposal_hash, ok)); - } - - /// # - /// - Bounded storage reads and writes. - /// - Argument `threshold` has bearing on weight. - /// # - #[weight = SimpleDispatchInfo::FixedOperational(5_000_000_000)] - fn propose(origin, #[compact] threshold: MemberCount, proposal: Box<>::Proposal>) { - let who = ensure_signed(origin)?; - ensure!(Self::is_member(&who), Error::::NotMember); - - let proposal_hash = T::Hashing::hash_of(&proposal); - - ensure!(!>::contains_key(proposal_hash), Error::::DuplicateProposal); - - if threshold < 2 { - let seats = Self::members().len() as MemberCount; - let ok = proposal.dispatch(RawOrigin::Members(1, seats).into()).is_ok(); - Self::deposit_event(RawEvent::Executed(proposal_hash, ok)); - } else { - let index = Self::proposal_count(); - >::mutate(|i| *i += 1); - >::mutate(|proposals| proposals.push(proposal_hash)); - >::insert(proposal_hash, *proposal); - let end = system::Module::::block_number() + T::MotionDuration::get(); - let votes = Votes { index, threshold, ayes: vec![who.clone()], nays: vec![], end }; - >::insert(proposal_hash, votes); - - Self::deposit_event(RawEvent::Proposed(who, index, proposal_hash, threshold)); - } - } - - /// # - /// - Bounded storage read and writes. - /// - Will be slightly heavier if the proposal is approved / disapproved after the vote. - /// # - #[weight = SimpleDispatchInfo::FixedOperational(200_000_000)] - fn vote(origin, proposal: T::Hash, #[compact] index: ProposalIndex, approve: bool) { - let who = ensure_signed(origin)?; - ensure!(Self::is_member(&who), Error::::NotMember); - - let mut voting = Self::voting(&proposal).ok_or(Error::::ProposalMissing)?; - ensure!(voting.index == index, Error::::WrongIndex); - - let position_yes = voting.ayes.iter().position(|a| a == &who); - let position_no = voting.nays.iter().position(|a| a == &who); - - if approve { - if position_yes.is_none() { - voting.ayes.push(who.clone()); - } else { - Err(Error::::DuplicateVote)? - } - if let Some(pos) = position_no { - voting.nays.swap_remove(pos); - } - } else { - if position_no.is_none() { - voting.nays.push(who.clone()); - } else { - Err(Error::::DuplicateVote)? - } - if let Some(pos) = position_yes { - voting.ayes.swap_remove(pos); - } - } - - let yes_votes = voting.ayes.len() as MemberCount; - let no_votes = voting.nays.len() as MemberCount; - Self::deposit_event(RawEvent::Voted(who, proposal, approve, yes_votes, no_votes)); - - let seats = Self::members().len() as MemberCount; - - let approved = yes_votes >= voting.threshold; - let disapproved = seats.saturating_sub(no_votes) < voting.threshold; - if approved || disapproved { - Self::finalize_proposal(approved, seats, voting, proposal); - } else { - Voting::::insert(&proposal, voting); - } - } - - /// May be called by any signed account after the voting duration has ended in order to - /// finish voting and close the proposal. - /// - /// Abstentions are counted as rejections unless there is a prime member set and the prime - /// member cast an approval. - /// - /// - the weight of `proposal` preimage. - /// - up to three events deposited. - /// - one read, two removals, one mutation. (plus three static reads.) - /// - computation and i/o `O(P + L + M)` where: - /// - `M` is number of members, - /// - `P` is number of active proposals, - /// - `L` is the encoded length of `proposal` preimage. - #[weight = SimpleDispatchInfo::FixedOperational(200_000_000)] - fn close(origin, proposal: T::Hash, #[compact] index: ProposalIndex) { - let _ = ensure_signed(origin)?; - - let voting = Self::voting(&proposal).ok_or(Error::::ProposalMissing)?; - ensure!(voting.index == index, Error::::WrongIndex); - ensure!(system::Module::::block_number() >= voting.end, Error::::TooEarly); - - // default to true only if there's a prime and they voted in favour. - let default = Self::prime().map_or( - false, - |who| voting.ayes.iter().any(|a| a == &who), - ); - - let mut no_votes = voting.nays.len() as MemberCount; - let mut yes_votes = voting.ayes.len() as MemberCount; - let seats = Self::members().len() as MemberCount; - let abstentions = seats - (yes_votes + no_votes); - match default { - true => yes_votes += abstentions, - false => no_votes += abstentions, - } - - Self::deposit_event(RawEvent::Closed(proposal, yes_votes, no_votes)); - Self::finalize_proposal(yes_votes >= voting.threshold, seats, voting, proposal); - } - } + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: ::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Set the collective's membership. + /// + /// - `new_members`: The new member list. Be nice to the chain and + // provide it sorted. + /// - `prime`: The prime member whose vote sets the default. + /// + /// Requires root origin. + #[weight = SimpleDispatchInfo::FixedOperational(100_000_000)] + fn set_members(origin, new_members: Vec, prime: Option) { + ensure_root(origin)?; + let mut new_members = new_members; + new_members.sort(); + let old = Members::::get(); + >::set_members_sorted(&new_members[..], &old); + Prime::::set(prime); + } + + /// Dispatch a proposal from a member using the `Member` origin. + /// + /// Origin must be a member of the collective. + #[weight = SimpleDispatchInfo::FixedOperational(100_000_000)] + fn execute(origin, proposal: Box<>::Proposal>) { + let who = ensure_signed(origin)?; + ensure!(Self::is_member(&who), Error::::NotMember); + + let proposal_hash = T::Hashing::hash_of(&proposal); + let ok = proposal.dispatch(RawOrigin::Member(who).into()).is_ok(); + Self::deposit_event(RawEvent::MemberExecuted(proposal_hash, ok)); + } + + /// # + /// - Bounded storage reads and writes. + /// - Argument `threshold` has bearing on weight. + /// # + #[weight = SimpleDispatchInfo::FixedOperational(5_000_000_000)] + fn propose(origin, #[compact] threshold: MemberCount, proposal: Box<>::Proposal>) { + let who = ensure_signed(origin)?; + ensure!(Self::is_member(&who), Error::::NotMember); + + let proposal_hash = T::Hashing::hash_of(&proposal); + + ensure!(!>::contains_key(proposal_hash), Error::::DuplicateProposal); + + if threshold < 2 { + let seats = Self::members().len() as MemberCount; + let ok = proposal.dispatch(RawOrigin::Members(1, seats).into()).is_ok(); + Self::deposit_event(RawEvent::Executed(proposal_hash, ok)); + } else { + let index = Self::proposal_count(); + >::mutate(|i| *i += 1); + >::mutate(|proposals| proposals.push(proposal_hash)); + >::insert(proposal_hash, *proposal); + let end = system::Module::::block_number() + T::MotionDuration::get(); + let votes = Votes { index, threshold, ayes: vec![who.clone()], nays: vec![], end }; + >::insert(proposal_hash, votes); + + Self::deposit_event(RawEvent::Proposed(who, index, proposal_hash, threshold)); + } + } + + /// # + /// - Bounded storage read and writes. + /// - Will be slightly heavier if the proposal is approved / disapproved after the vote. + /// # + #[weight = SimpleDispatchInfo::FixedOperational(200_000_000)] + fn vote(origin, proposal: T::Hash, #[compact] index: ProposalIndex, approve: bool) { + let who = ensure_signed(origin)?; + ensure!(Self::is_member(&who), Error::::NotMember); + + let mut voting = Self::voting(&proposal).ok_or(Error::::ProposalMissing)?; + ensure!(voting.index == index, Error::::WrongIndex); + + let position_yes = voting.ayes.iter().position(|a| a == &who); + let position_no = voting.nays.iter().position(|a| a == &who); + + if approve { + if position_yes.is_none() { + voting.ayes.push(who.clone()); + } else { + Err(Error::::DuplicateVote)? + } + if let Some(pos) = position_no { + voting.nays.swap_remove(pos); + } + } else { + if position_no.is_none() { + voting.nays.push(who.clone()); + } else { + Err(Error::::DuplicateVote)? + } + if let Some(pos) = position_yes { + voting.ayes.swap_remove(pos); + } + } + + let yes_votes = voting.ayes.len() as MemberCount; + let no_votes = voting.nays.len() as MemberCount; + Self::deposit_event(RawEvent::Voted(who, proposal, approve, yes_votes, no_votes)); + + let seats = Self::members().len() as MemberCount; + + let approved = yes_votes >= voting.threshold; + let disapproved = seats.saturating_sub(no_votes) < voting.threshold; + if approved || disapproved { + Self::finalize_proposal(approved, seats, voting, proposal); + } else { + Voting::::insert(&proposal, voting); + } + } + + /// May be called by any signed account after the voting duration has ended in order to + /// finish voting and close the proposal. + /// + /// Abstentions are counted as rejections unless there is a prime member set and the prime + /// member cast an approval. + /// + /// - the weight of `proposal` preimage. + /// - up to three events deposited. + /// - one read, two removals, one mutation. (plus three static reads.) + /// - computation and i/o `O(P + L + M)` where: + /// - `M` is number of members, + /// - `P` is number of active proposals, + /// - `L` is the encoded length of `proposal` preimage. + #[weight = SimpleDispatchInfo::FixedOperational(200_000_000)] + fn close(origin, proposal: T::Hash, #[compact] index: ProposalIndex) { + let _ = ensure_signed(origin)?; + + let voting = Self::voting(&proposal).ok_or(Error::::ProposalMissing)?; + ensure!(voting.index == index, Error::::WrongIndex); + ensure!(system::Module::::block_number() >= voting.end, Error::::TooEarly); + + // default to true only if there's a prime and they voted in favour. + let default = Self::prime().map_or( + false, + |who| voting.ayes.iter().any(|a| a == &who), + ); + + let mut no_votes = voting.nays.len() as MemberCount; + let mut yes_votes = voting.ayes.len() as MemberCount; + let seats = Self::members().len() as MemberCount; + let abstentions = seats - (yes_votes + no_votes); + match default { + true => yes_votes += abstentions, + false => no_votes += abstentions, + } + + Self::deposit_event(RawEvent::Closed(proposal, yes_votes, no_votes)); + Self::finalize_proposal(yes_votes >= voting.threshold, seats, voting, proposal); + } + } } impl, I: Instance> Module { - pub fn is_member(who: &T::AccountId) -> bool { - Self::members().contains(who) - } - - /// Weight: - /// If `approved`: - /// - the weight of `proposal` preimage. - /// - two events deposited. - /// - two removals, one mutation. - /// - computation and i/o `O(P + L)` where: - /// - `P` is number of active proposals, - /// - `L` is the encoded length of `proposal` preimage. - /// - /// If not `approved`: - /// - one event deposited. - /// Two removals, one mutation. - /// Computation and i/o `O(P)` where: - /// - `P` is number of active proposals - fn finalize_proposal( - approved: bool, - seats: MemberCount, - voting: Votes, - proposal: T::Hash, - ) { - if approved { - Self::deposit_event(RawEvent::Approved(proposal)); - - // execute motion, assuming it exists. - if let Some(p) = ProposalOf::::take(&proposal) { - let origin = RawOrigin::Members(voting.threshold, seats).into(); - let ok = p.dispatch(origin).is_ok(); - Self::deposit_event(RawEvent::Executed(proposal, ok)); - } - } else { - // disapproved - ProposalOf::::remove(&proposal); - Self::deposit_event(RawEvent::Disapproved(proposal)); - } - - // remove vote - Voting::::remove(&proposal); - Proposals::::mutate(|proposals| proposals.retain(|h| h != &proposal)); - } + pub fn is_member(who: &T::AccountId) -> bool { + Self::members().contains(who) + } + + /// Weight: + /// If `approved`: + /// - the weight of `proposal` preimage. + /// - two events deposited. + /// - two removals, one mutation. + /// - computation and i/o `O(P + L)` where: + /// - `P` is number of active proposals, + /// - `L` is the encoded length of `proposal` preimage. + /// + /// If not `approved`: + /// - one event deposited. + /// Two removals, one mutation. + /// Computation and i/o `O(P)` where: + /// - `P` is number of active proposals + fn finalize_proposal( + approved: bool, + seats: MemberCount, + voting: Votes, + proposal: T::Hash, + ) { + if approved { + Self::deposit_event(RawEvent::Approved(proposal)); + + // execute motion, assuming it exists. + if let Some(p) = ProposalOf::::take(&proposal) { + let origin = RawOrigin::Members(voting.threshold, seats).into(); + let ok = p.dispatch(origin).is_ok(); + Self::deposit_event(RawEvent::Executed(proposal, ok)); + } + } else { + // disapproved + ProposalOf::::remove(&proposal); + Self::deposit_event(RawEvent::Disapproved(proposal)); + } + + // remove vote + Voting::::remove(&proposal); + Proposals::::mutate(|proposals| proposals.retain(|h| h != &proposal)); + } } impl, I: Instance> ChangeMembers for Module { - fn change_members_sorted( - _incoming: &[T::AccountId], - outgoing: &[T::AccountId], - new: &[T::AccountId], - ) { - // remove accounts from all current voting in motions. - let mut outgoing = outgoing.to_vec(); - outgoing.sort_unstable(); - for h in Self::proposals().into_iter() { - >::mutate(h, |v| - if let Some(mut votes) = v.take() { - votes.ayes = votes.ayes.into_iter() - .filter(|i| outgoing.binary_search(i).is_err()) - .collect(); - votes.nays = votes.nays.into_iter() - .filter(|i| outgoing.binary_search(i).is_err()) - .collect(); - *v = Some(votes); - } - ); - } - Members::::put(new); - Prime::::kill(); - } - - fn set_prime(prime: Option) { - Prime::::set(prime); - } + fn change_members_sorted( + _incoming: &[T::AccountId], + outgoing: &[T::AccountId], + new: &[T::AccountId], + ) { + // remove accounts from all current voting in motions. + let mut outgoing = outgoing.to_vec(); + outgoing.sort_unstable(); + for h in Self::proposals().into_iter() { + >::mutate(h, |v| { + if let Some(mut votes) = v.take() { + votes.ayes = votes + .ayes + .into_iter() + .filter(|i| outgoing.binary_search(i).is_err()) + .collect(); + votes.nays = votes + .nays + .into_iter() + .filter(|i| outgoing.binary_search(i).is_err()) + .collect(); + *v = Some(votes); + } + }); + } + Members::::put(new); + Prime::::kill(); + } + + fn set_prime(prime: Option) { + Prime::::set(prime); + } } impl, I: Instance> InitializeMembers for Module { - fn initialize_members(members: &[T::AccountId]) { - if !members.is_empty() { - assert!(>::get().is_empty(), "Members are already initialized!"); - >::put(members); - } - } + fn initialize_members(members: &[T::AccountId]) { + if !members.is_empty() { + assert!( + >::get().is_empty(), + "Members are already initialized!" + ); + >::put(members); + } + } } /// Ensure that the origin `o` represents at least `n` members. Returns `Ok` or an `Err` /// otherwise. -pub fn ensure_members(o: OuterOrigin, n: MemberCount) - -> result::Result +pub fn ensure_members( + o: OuterOrigin, + n: MemberCount, +) -> result::Result where - OuterOrigin: Into, OuterOrigin>> + OuterOrigin: Into, OuterOrigin>>, { - match o.into() { - Ok(RawOrigin::Members(x, _)) if x >= n => Ok(n), - _ => Err("bad origin: expected to be a threshold number of members"), - } + match o.into() { + Ok(RawOrigin::Members(x, _)) if x >= n => Ok(n), + _ => Err("bad origin: expected to be a threshold number of members"), + } } -pub struct EnsureMember(sp_std::marker::PhantomData<(AccountId, I)>); +pub struct EnsureMember( + sp_std::marker::PhantomData<(AccountId, I)>, +); impl< - O: Into, O>> + From>, - AccountId: Default, - I, -> EnsureOrigin for EnsureMember { - type Success = AccountId; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Member(id) => Ok(id), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - O::from(RawOrigin::Member(Default::default())) - } + O: Into, O>> + From>, + AccountId: Default, + I, + > EnsureOrigin for EnsureMember +{ + type Success = AccountId; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + RawOrigin::Member(id) => Ok(id), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Member(Default::default())) + } } -pub struct EnsureMembers(sp_std::marker::PhantomData<(N, AccountId, I)>); +pub struct EnsureMembers( + sp_std::marker::PhantomData<(N, AccountId, I)>, +); impl< - O: Into, O>> + From>, - N: U32, - AccountId, - I, -> EnsureOrigin for EnsureMembers { - type Success = (MemberCount, MemberCount); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Members(n, m) if n >= N::VALUE => Ok((n, m)), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - O::from(RawOrigin::Members(N::VALUE, N::VALUE)) - } + O: Into, O>> + From>, + N: U32, + AccountId, + I, + > EnsureOrigin for EnsureMembers +{ + type Success = (MemberCount, MemberCount); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + RawOrigin::Members(n, m) if n >= N::VALUE => Ok((n, m)), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Members(N::VALUE, N::VALUE)) + } } -pub struct EnsureProportionMoreThan( - sp_std::marker::PhantomData<(N, D, AccountId, I)> +pub struct EnsureProportionMoreThan( + sp_std::marker::PhantomData<(N, D, AccountId, I)>, ); impl< - O: Into, O>> + From>, - N: U32, - D: U32, - AccountId, - I, -> EnsureOrigin for EnsureProportionMoreThan { - type Success = (); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Members(n, m) if n * D::VALUE > N::VALUE * m => Ok(()), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - O::from(RawOrigin::Members(1u32, 0u32)) - } + O: Into, O>> + From>, + N: U32, + D: U32, + AccountId, + I, + > EnsureOrigin for EnsureProportionMoreThan +{ + type Success = (); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + RawOrigin::Members(n, m) if n * D::VALUE > N::VALUE * m => Ok(()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Members(1u32, 0u32)) + } } -pub struct EnsureProportionAtLeast( - sp_std::marker::PhantomData<(N, D, AccountId, I)> +pub struct EnsureProportionAtLeast( + sp_std::marker::PhantomData<(N, D, AccountId, I)>, ); impl< - O: Into, O>> + From>, - N: U32, - D: U32, - AccountId, - I, -> EnsureOrigin for EnsureProportionAtLeast { - type Success = (); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Members(n, m) if n * D::VALUE >= N::VALUE * m => Ok(()), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - O::from(RawOrigin::Members(0u32, 0u32)) - } + O: Into, O>> + From>, + N: U32, + D: U32, + AccountId, + I, + > EnsureOrigin for EnsureProportionAtLeast +{ + type Success = (); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + RawOrigin::Members(n, m) if n * D::VALUE >= N::VALUE * m => Ok(()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Members(0u32, 0u32)) + } } #[cfg(test)] mod tests { - use super::*; - use frame_support::{Hashable, assert_ok, assert_noop, parameter_types, weights::Weight}; - use frame_system::{self as system, EventRecord, Phase}; - use hex_literal::hex; - use sp_core::H256; - use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, testing::Header, - BuildStorage, - }; - use crate as collective; - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const MotionDuration: u64 = 3; - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - impl Trait for Test { - type Origin = Origin; - type Proposal = Call; - type Event = Event; - type MotionDuration = MotionDuration; - } - impl Trait for Test { - type Origin = Origin; - type Proposal = Call; - type Event = Event; - type MotionDuration = MotionDuration; - } - - pub type Block = sp_runtime::generic::Block; - pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; - - frame_support::construct_runtime!( + use super::*; + use crate as collective; + use frame_support::{assert_noop, assert_ok, parameter_types, weights::Weight, Hashable}; + use frame_system::{self as system, EventRecord, Phase}; + use hex_literal::hex; + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, Block as BlockT, IdentityLookup}, + BuildStorage, Perbill, + }; + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const MotionDuration: u64 = 3; + } + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + impl Trait for Test { + type Origin = Origin; + type Proposal = Call; + type Event = Event; + type MotionDuration = MotionDuration; + } + impl Trait for Test { + type Origin = Origin; + type Proposal = Call; + type Event = Event; + type MotionDuration = MotionDuration; + } + + pub type Block = sp_runtime::generic::Block; + pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + + frame_support::construct_runtime!( pub enum Test where Block = Block, NodeBlock = Block, @@ -590,392 +612,641 @@ mod tests { } ); - pub fn new_test_ext() -> sp_io::TestExternalities { - let mut ext: sp_io::TestExternalities = GenesisConfig { - collective_Instance1: Some(collective::GenesisConfig { - members: vec![1, 2, 3], - phantom: Default::default(), - }), - collective: None, - }.build_storage().unwrap().into(); - ext.execute_with(|| System::set_block_number(1)); - ext - } - - #[test] - fn motions_basic_environment_works() { - new_test_ext().execute_with(|| { - assert_eq!(Collective::members(), vec![1, 2, 3]); - assert_eq!(Collective::proposals(), Vec::::new()); - }); - } - - fn make_proposal(value: u64) -> Call { - Call::System(frame_system::Call::remark(value.encode())) - } - - #[test] - fn close_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash = BlakeTwo256::hash_of(&proposal); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - - System::set_block_number(3); - assert_noop!( - Collective::close(Origin::signed(4), hash.clone(), 0), - Error::::TooEarly - ); - - System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 2, 1))), - record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))) - ]); - }); - } - - #[test] - fn close_with_prime_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members(Origin::ROOT, vec![1, 2, 3], Some(3))); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - - System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 2, 1))), - record(Event::collective_Instance1(RawEvent::Disapproved(hash.clone()))) - ]); - }); - } - - #[test] - fn close_with_voting_prime_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::set_members(Origin::ROOT, vec![1, 2, 3], Some(1))); - - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - - System::set_block_number(4); - assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); - - let record = |event| EventRecord { phase: Phase::Initialization, event, topics: vec![] }; - assert_eq!(System::events(), vec![ - record(Event::collective_Instance1(RawEvent::Proposed(1, 0, hash.clone(), 3))), - record(Event::collective_Instance1(RawEvent::Voted(2, hash.clone(), true, 2, 0))), - record(Event::collective_Instance1(RawEvent::Closed(hash.clone(), 3, 0))), - record(Event::collective_Instance1(RawEvent::Approved(hash.clone()))), - record(Event::collective_Instance1(RawEvent::Executed(hash.clone(), false))) - ]); - }); - } - - #[test] - fn removal_of_old_voters_votes_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash = BlakeTwo256::hash_of(&proposal); - let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) - ); - Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) - ); - - let proposal = make_proposal(69); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose(Origin::signed(2), 2, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) - ); - Collective::change_members_sorted(&[], &[3], &[2, 4]); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) - ); - }); - } - - #[test] - fn removal_of_old_voters_votes_works_with_set_members() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash = BlakeTwo256::hash_of(&proposal); - let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1, 2], nays: vec![], end }) - ); - assert_ok!(Collective::set_members(Origin::ROOT, vec![2, 3, 4], None)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![2], nays: vec![], end }) - ); - - let proposal = make_proposal(69); - let hash = BlakeTwo256::hash_of(&proposal); - assert_ok!(Collective::propose(Origin::signed(2), 2, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![3], end }) - ); - assert_ok!(Collective::set_members(Origin::ROOT, vec![2, 4], None)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 1, threshold: 2, ayes: vec![2], nays: vec![], end }) - ); - }); - } - - #[test] - fn propose_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash = proposal.blake2_256().into(); - let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_eq!(Collective::proposals(), vec![hash]); - assert_eq!(Collective::proposal_of(&hash), Some(proposal)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 3, ayes: vec![1], nays: vec![], end }) - ); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 3, - )), - topics: vec![], - } - ]); - }); - } - - #[test] - fn motions_ignoring_non_collective_proposals_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - assert_noop!( - Collective::propose(Origin::signed(42), 3, Box::new(proposal.clone())), - Error::::NotMember - ); - }); - } - - #[test] - fn motions_ignoring_non_collective_votes_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_noop!( - Collective::vote(Origin::signed(42), hash.clone(), 0, true), - Error::::NotMember, - ); - }); - } - - #[test] - fn motions_ignoring_bad_index_collective_vote_works() { - new_test_ext().execute_with(|| { - System::set_block_number(3); - let proposal = make_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_noop!( - Collective::vote(Origin::signed(2), hash.clone(), 1, true), - Error::::WrongIndex, - ); - }); - } - - #[test] - fn motions_revoting_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - let end = 4; - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()))); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 2, ayes: vec![1], nays: vec![], end }) - ); - assert_noop!( - Collective::vote(Origin::signed(1), hash.clone(), 0, true), - Error::::DuplicateVote, - ); - assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, false)); - assert_eq!( - Collective::voting(&hash), - Some(Votes { index: 0, threshold: 2, ayes: vec![], nays: vec![1], end }) - ); - assert_noop!( - Collective::vote(Origin::signed(1), hash.clone(), 0, false), - Error::::DuplicateVote, - ); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 2, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( - 1, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - false, - 0, - 1, - )), - topics: vec![], - } - ]); - }); - } - - #[test] - fn motions_reproposing_disapproved_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); - assert_eq!(Collective::proposals(), vec![]); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()))); - assert_eq!(Collective::proposals(), vec![hash]); - }); - } - - #[test] - fn motions_disapproval_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 3, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1( - RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 3, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( - 2, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - false, - 1, - 1, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Disapproved( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - )), - topics: vec![], - } - ]); - }); - } - - #[test] - fn motions_approval_works() { - new_test_ext().execute_with(|| { - let proposal = make_proposal(42); - let hash: H256 = proposal.blake2_256().into(); - assert_ok!(Collective::propose(Origin::signed(1), 2, Box::new(proposal.clone()))); - assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Proposed( - 1, - 0, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - 2, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Voted( - 2, - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - true, - 2, - 0, - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Approved( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - )), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: Event::collective_Instance1(RawEvent::Executed( - hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"].into(), - false, - )), - topics: vec![], - } - ]); - }); - } + pub fn new_test_ext() -> sp_io::TestExternalities { + let mut ext: sp_io::TestExternalities = GenesisConfig { + collective_Instance1: Some(collective::GenesisConfig { + members: vec![1, 2, 3], + phantom: Default::default(), + }), + collective: None, + } + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext + } + + #[test] + fn motions_basic_environment_works() { + new_test_ext().execute_with(|| { + assert_eq!(Collective::members(), vec![1, 2, 3]); + assert_eq!(Collective::proposals(), Vec::::new()); + }); + } + + fn make_proposal(value: u64) -> Call { + Call::System(frame_system::Call::remark(value.encode())) + } + + #[test] + fn close_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash = BlakeTwo256::hash_of(&proposal); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + + System::set_block_number(3); + assert_noop!( + Collective::close(Origin::signed(4), hash.clone(), 0), + Error::::TooEarly + ); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); + + let record = |event| EventRecord { + phase: Phase::Initialization, + event, + topics: vec![], + }; + assert_eq!( + System::events(), + vec![ + record(Event::collective_Instance1(RawEvent::Proposed( + 1, + 0, + hash.clone(), + 3 + ))), + record(Event::collective_Instance1(RawEvent::Voted( + 2, + hash.clone(), + true, + 2, + 0 + ))), + record(Event::collective_Instance1(RawEvent::Closed( + hash.clone(), + 2, + 1 + ))), + record(Event::collective_Instance1(RawEvent::Disapproved( + hash.clone() + ))) + ] + ); + }); + } + + #[test] + fn close_with_prime_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::set_members( + Origin::ROOT, + vec![1, 2, 3], + Some(3) + )); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); + + let record = |event| EventRecord { + phase: Phase::Initialization, + event, + topics: vec![], + }; + assert_eq!( + System::events(), + vec![ + record(Event::collective_Instance1(RawEvent::Proposed( + 1, + 0, + hash.clone(), + 3 + ))), + record(Event::collective_Instance1(RawEvent::Voted( + 2, + hash.clone(), + true, + 2, + 0 + ))), + record(Event::collective_Instance1(RawEvent::Closed( + hash.clone(), + 2, + 1 + ))), + record(Event::collective_Instance1(RawEvent::Disapproved( + hash.clone() + ))) + ] + ); + }); + } + + #[test] + fn close_with_voting_prime_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::set_members( + Origin::ROOT, + vec![1, 2, 3], + Some(1) + )); + + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + + System::set_block_number(4); + assert_ok!(Collective::close(Origin::signed(4), hash.clone(), 0)); + + let record = |event| EventRecord { + phase: Phase::Initialization, + event, + topics: vec![], + }; + assert_eq!( + System::events(), + vec![ + record(Event::collective_Instance1(RawEvent::Proposed( + 1, + 0, + hash.clone(), + 3 + ))), + record(Event::collective_Instance1(RawEvent::Voted( + 2, + hash.clone(), + true, + 2, + 0 + ))), + record(Event::collective_Instance1(RawEvent::Closed( + hash.clone(), + 3, + 0 + ))), + record(Event::collective_Instance1(RawEvent::Approved( + hash.clone() + ))), + record(Event::collective_Instance1(RawEvent::Executed( + hash.clone(), + false + ))) + ] + ); + }); + } + + #[test] + fn removal_of_old_voters_votes_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash = BlakeTwo256::hash_of(&proposal); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 0, + threshold: 3, + ayes: vec![1, 2], + nays: vec![], + end + }) + ); + Collective::change_members_sorted(&[4], &[1], &[2, 3, 4]); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 0, + threshold: 3, + ayes: vec![2], + nays: vec![], + end + }) + ); + + let proposal = make_proposal(69); + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::propose( + Origin::signed(2), + 2, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 1, + threshold: 2, + ayes: vec![2], + nays: vec![3], + end + }) + ); + Collective::change_members_sorted(&[], &[3], &[2, 4]); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 1, + threshold: 2, + ayes: vec![2], + nays: vec![], + end + }) + ); + }); + } + + #[test] + fn removal_of_old_voters_votes_works_with_set_members() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash = BlakeTwo256::hash_of(&proposal); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 0, + threshold: 3, + ayes: vec![1, 2], + nays: vec![], + end + }) + ); + assert_ok!(Collective::set_members(Origin::ROOT, vec![2, 3, 4], None)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 0, + threshold: 3, + ayes: vec![2], + nays: vec![], + end + }) + ); + + let proposal = make_proposal(69); + let hash = BlakeTwo256::hash_of(&proposal); + assert_ok!(Collective::propose( + Origin::signed(2), + 2, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(3), hash.clone(), 1, false)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 1, + threshold: 2, + ayes: vec![2], + nays: vec![3], + end + }) + ); + assert_ok!(Collective::set_members(Origin::ROOT, vec![2, 4], None)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 1, + threshold: 2, + ayes: vec![2], + nays: vec![], + end + }) + ); + }); + } + + #[test] + fn propose_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash = proposal.blake2_256().into(); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_eq!(Collective::proposals(), vec![hash]); + assert_eq!(Collective::proposal_of(&hash), Some(proposal)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 0, + threshold: 3, + ayes: vec![1], + nays: vec![], + end + }) + ); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Proposed( + 1, + 0, + hex!["68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35"] + .into(), + 3, + )), + topics: vec![], + }] + ); + }); + } + + #[test] + fn motions_ignoring_non_collective_proposals_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + assert_noop!( + Collective::propose(Origin::signed(42), 3, Box::new(proposal.clone())), + Error::::NotMember + ); + }); + } + + #[test] + fn motions_ignoring_non_collective_votes_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_noop!( + Collective::vote(Origin::signed(42), hash.clone(), 0, true), + Error::::NotMember, + ); + }); + } + + #[test] + fn motions_ignoring_bad_index_collective_vote_works() { + new_test_ext().execute_with(|| { + System::set_block_number(3); + let proposal = make_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_noop!( + Collective::vote(Origin::signed(2), hash.clone(), 1, true), + Error::::WrongIndex, + ); + }); + } + + #[test] + fn motions_revoting_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + let end = 4; + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()) + )); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 0, + threshold: 2, + ayes: vec![1], + nays: vec![], + end + }) + ); + assert_noop!( + Collective::vote(Origin::signed(1), hash.clone(), 0, true), + Error::::DuplicateVote, + ); + assert_ok!(Collective::vote(Origin::signed(1), hash.clone(), 0, false)); + assert_eq!( + Collective::voting(&hash), + Some(Votes { + index: 0, + threshold: 2, + ayes: vec![], + nays: vec![1], + end + }) + ); + assert_noop!( + Collective::vote(Origin::signed(1), hash.clone(), 0, false), + Error::::DuplicateVote, + ); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Proposed( + 1, + 0, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + 2, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Voted( + 1, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + false, + 0, + 1, + )), + topics: vec![], + } + ] + ); + }); + } + + #[test] + fn motions_reproposing_disapproved_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); + assert_eq!(Collective::proposals(), vec![]); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()) + )); + assert_eq!(Collective::proposals(), vec![hash]); + }); + } + + #[test] + fn motions_disapproval_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 3, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, false)); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Proposed( + 1, + 0, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + 3, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Voted( + 2, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + false, + 1, + 1, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Disapproved( + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + )), + topics: vec![], + } + ] + ); + }); + } + + #[test] + fn motions_approval_works() { + new_test_ext().execute_with(|| { + let proposal = make_proposal(42); + let hash: H256 = proposal.blake2_256().into(); + assert_ok!(Collective::propose( + Origin::signed(1), + 2, + Box::new(proposal.clone()) + )); + assert_ok!(Collective::vote(Origin::signed(2), hash.clone(), 0, true)); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Proposed( + 1, + 0, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + 2, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Voted( + 2, + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + true, + 2, + 0, + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Approved( + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: Event::collective_Instance1(RawEvent::Executed( + hex![ + "68eea8f20b542ec656c6ac2d10435ae3bd1729efc34d1354ab85af840aad2d35" + ] + .into(), + false, + )), + topics: vec![], + } + ] + ); + }); + } } diff --git a/frame/contracts/common/src/lib.rs b/frame/contracts/common/src/lib.rs index 6a74a417fa..ff8e6b65f5 100644 --- a/frame/contracts/common/src/lib.rs +++ b/frame/contracts/common/src/lib.rs @@ -26,22 +26,22 @@ pub type GetStorageResult = Result>, ContractAccessError>; /// The possible errors that can happen querying the storage of a contract. #[derive(Eq, PartialEq, codec::Encode, codec::Decode, sp_runtime::RuntimeDebug)] pub enum ContractAccessError { - /// The given address doesn't point to a contract. - DoesntExist, - /// The specified contract is a tombstone and thus cannot have any storage. - IsTombstone, + /// The given address doesn't point to a contract. + DoesntExist, + /// The specified contract is a tombstone and thus cannot have any storage. + IsTombstone, } /// A result type of a `rent_projection` call. pub type RentProjectionResult = - Result, ContractAccessError>; + Result, ContractAccessError>; #[derive(Eq, PartialEq, codec::Encode, codec::Decode, sp_runtime::RuntimeDebug)] pub enum RentProjection { - /// Eviction is projected to happen at the specified block number. - EvictionAt(BlockNumber), - /// No eviction is scheduled. - /// - /// E.g. because the contract accumulated enough funds to offset the rent storage costs. - NoEviction, + /// Eviction is projected to happen at the specified block number. + EvictionAt(BlockNumber), + /// No eviction is scheduled. + /// + /// E.g. because the contract accumulated enough funds to offset the rent storage costs. + NoEviction, } diff --git a/frame/contracts/rpc/runtime-api/src/lib.rs b/frame/contracts/rpc/runtime-api/src/lib.rs index 6fb629b024..94e03703d4 100644 --- a/frame/contracts/rpc/runtime-api/src/lib.rs +++ b/frame/contracts/rpc/runtime-api/src/lib.rs @@ -30,56 +30,56 @@ use sp_std::vec::Vec; /// A result of execution of a contract. #[derive(Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub enum ContractExecResult { - /// The contract returned successfully. - /// - /// There is a status code and, optionally, some data returned by the contract. - Success { - /// Status code returned by the contract. - status: u8, - /// Output data returned by the contract. - /// - /// Can be empty. - data: Vec, - }, - /// The contract execution either trapped or returned an error. - Error, + /// The contract returned successfully. + /// + /// There is a status code and, optionally, some data returned by the contract. + Success { + /// Status code returned by the contract. + status: u8, + /// Output data returned by the contract. + /// + /// Can be empty. + data: Vec, + }, + /// The contract execution either trapped or returned an error. + Error, } sp_api::decl_runtime_apis! { - /// The API to interact with contracts without using executive. - pub trait ContractsApi where - AccountId: Codec, - Balance: Codec, - BlockNumber: Codec, - { - /// Perform a call from a specified account to a given contract. - /// - /// See the contracts' `call` dispatchable function for more details. - fn call( - origin: AccountId, - dest: AccountId, - value: Balance, - gas_limit: u64, - input_data: Vec, - ) -> ContractExecResult; + /// The API to interact with contracts without using executive. + pub trait ContractsApi where + AccountId: Codec, + Balance: Codec, + BlockNumber: Codec, + { + /// Perform a call from a specified account to a given contract. + /// + /// See the contracts' `call` dispatchable function for more details. + fn call( + origin: AccountId, + dest: AccountId, + value: Balance, + gas_limit: u64, + input_data: Vec, + ) -> ContractExecResult; - /// Query a given storage key in a given contract. - /// - /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the - /// specified account and `Ok(None)` if it doesn't. If the account specified by the address - /// doesn't exist, or doesn't have a contract or if the contract is a tombstone, then `Err` - /// is returned. - fn get_storage( - address: AccountId, - key: [u8; 32], - ) -> GetStorageResult; + /// Query a given storage key in a given contract. + /// + /// Returns `Ok(Some(Vec))` if the storage value exists under the given key in the + /// specified account and `Ok(None)` if it doesn't. If the account specified by the address + /// doesn't exist, or doesn't have a contract or if the contract is a tombstone, then `Err` + /// is returned. + fn get_storage( + address: AccountId, + key: [u8; 32], + ) -> GetStorageResult; - /// Returns the projected time a given contract will be able to sustain paying its rent. - /// - /// The returned projection is relevant for the current block, i.e. it is as if the contract - /// was accessed at the current block. - /// - /// Returns `Err` if the contract is in a tombstone state or doesn't exist. - fn rent_projection(address: AccountId) -> RentProjectionResult; - } + /// Returns the projected time a given contract will be able to sustain paying its rent. + /// + /// The returned projection is relevant for the current block, i.e. it is as if the contract + /// was accessed at the current block. + /// + /// Returns `Err` if the contract is in a tombstone state or doesn't exist. + fn rent_projection(address: AccountId) -> RentProjectionResult; + } } diff --git a/frame/contracts/rpc/src/lib.rs b/frame/contracts/rpc/src/lib.rs index 52dddb177b..c20256826e 100644 --- a/frame/contracts/rpc/src/lib.rs +++ b/frame/contracts/rpc/src/lib.rs @@ -28,13 +28,13 @@ use sp_blockchain::HeaderBackend; use sp_core::{Bytes, H256}; use sp_rpc::number; use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Header as HeaderT}, + generic::BlockId, + traits::{Block as BlockT, Header as HeaderT}, }; pub use self::gen_client::Client as ContractsClient; pub use pallet_contracts_rpc_runtime_api::{ - self as runtime_api, ContractExecResult, ContractsApi as ContractsRuntimeApi, + self as runtime_api, ContractExecResult, ContractsApi as ContractsRuntimeApi, }; const RUNTIME_ERROR: i64 = 1; @@ -53,21 +53,21 @@ const GAS_PER_SECOND: u64 = 1_000_000_000; /// A private newtype for converting `ContractAccessError` into an RPC error. struct ContractAccessError(pallet_contracts_primitives::ContractAccessError); impl From for Error { - fn from(e: ContractAccessError) -> Error { - use pallet_contracts_primitives::ContractAccessError::*; - match e.0 { - DoesntExist => Error { - code: ErrorCode::ServerError(CONTRACT_DOESNT_EXIST), - message: "The specified contract doesn't exist.".into(), - data: None, - }, - IsTombstone => Error { - code: ErrorCode::ServerError(CONTRACT_IS_A_TOMBSTONE), - message: "The contract is a tombstone and doesn't have any storage.".into(), - data: None, - }, - } - } + fn from(e: ContractAccessError) -> Error { + use pallet_contracts_primitives::ContractAccessError::*; + match e.0 { + DoesntExist => Error { + code: ErrorCode::ServerError(CONTRACT_DOESNT_EXIST), + message: "The specified contract doesn't exist.".into(), + data: None, + }, + IsTombstone => Error { + code: ErrorCode::ServerError(CONTRACT_IS_A_TOMBSTONE), + message: "The contract is a tombstone and doesn't have any storage.".into(), + data: None, + }, + } + } } /// A struct that encodes RPC parameters required for a call to a smart-contract. @@ -75,11 +75,11 @@ impl From for Error { #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] pub struct CallRequest { - origin: AccountId, - dest: AccountId, - value: Balance, - gas_limit: number::NumberOrHex, - input_data: Bytes, + origin: AccountId, + dest: AccountId, + value: Balance, + gas_limit: number::NumberOrHex, + input_data: Bytes, } /// An RPC serializable result of contract execution @@ -87,209 +87,209 @@ pub struct CallRequest { #[serde(deny_unknown_fields)] #[serde(rename_all = "camelCase")] pub enum RpcContractExecResult { - /// Successful execution - Success { - /// Status code - status: u8, - /// Output data - data: Bytes, - }, - /// Error execution - Error(()), + /// Successful execution + Success { + /// Status code + status: u8, + /// Output data + data: Bytes, + }, + /// Error execution + Error(()), } impl From for RpcContractExecResult { - fn from(r: ContractExecResult) -> Self { - match r { - ContractExecResult::Success { status, data } => RpcContractExecResult::Success { - status, - data: data.into(), - }, - ContractExecResult::Error => RpcContractExecResult::Error(()), - } - } + fn from(r: ContractExecResult) -> Self { + match r { + ContractExecResult::Success { status, data } => RpcContractExecResult::Success { + status, + data: data.into(), + }, + ContractExecResult::Error => RpcContractExecResult::Error(()), + } + } } /// Contracts RPC methods. #[rpc] pub trait ContractsApi { - /// Executes a call to a contract. - /// - /// This call is performed locally without submitting any transactions. Thus executing this - /// won't change any state. Nonetheless, the calling state-changing contracts is still possible. - /// - /// This method is useful for calling getter-like methods on contracts. - #[rpc(name = "contracts_call")] - fn call( - &self, - call_request: CallRequest, - at: Option, - ) -> Result; - - /// Returns the value under a specified storage `key` in a contract given by `address` param, - /// or `None` if it is not set. - #[rpc(name = "contracts_getStorage")] - fn get_storage( - &self, - address: AccountId, - key: H256, - at: Option, - ) -> Result>; - - /// Returns the projected time a given contract will be able to sustain paying its rent. - /// - /// The returned projection is relevant for the given block, i.e. it is as if the contract was - /// accessed at the beginning of that block. - /// - /// Returns `None` if the contract is exempted from rent. - #[rpc(name = "contracts_rentProjection")] - fn rent_projection( - &self, - address: AccountId, - at: Option, - ) -> Result>; + /// Executes a call to a contract. + /// + /// This call is performed locally without submitting any transactions. Thus executing this + /// won't change any state. Nonetheless, the calling state-changing contracts is still possible. + /// + /// This method is useful for calling getter-like methods on contracts. + #[rpc(name = "contracts_call")] + fn call( + &self, + call_request: CallRequest, + at: Option, + ) -> Result; + + /// Returns the value under a specified storage `key` in a contract given by `address` param, + /// or `None` if it is not set. + #[rpc(name = "contracts_getStorage")] + fn get_storage( + &self, + address: AccountId, + key: H256, + at: Option, + ) -> Result>; + + /// Returns the projected time a given contract will be able to sustain paying its rent. + /// + /// The returned projection is relevant for the given block, i.e. it is as if the contract was + /// accessed at the beginning of that block. + /// + /// Returns `None` if the contract is exempted from rent. + #[rpc(name = "contracts_rentProjection")] + fn rent_projection( + &self, + address: AccountId, + at: Option, + ) -> Result>; } /// An implementation of contract specific RPC methods. pub struct Contracts { - client: Arc, - _marker: std::marker::PhantomData, + client: Arc, + _marker: std::marker::PhantomData, } impl Contracts { - /// Create new `Contracts` with the given reference to the client. - pub fn new(client: Arc) -> Self { - Contracts { - client, - _marker: Default::default(), - } - } + /// Create new `Contracts` with the given reference to the client. + pub fn new(client: Arc) -> Self { + Contracts { + client, + _marker: Default::default(), + } + } } impl - ContractsApi< - ::Hash, - <::Header as HeaderT>::Number, - AccountId, - Balance, - > for Contracts + ContractsApi< + ::Hash, + <::Header as HeaderT>::Number, + AccountId, + Balance, + > for Contracts where - Block: BlockT, - C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - C::Api: ContractsRuntimeApi< - Block, - AccountId, - Balance, - <::Header as HeaderT>::Number, - >, - AccountId: Codec, - Balance: Codec, + Block: BlockT, + C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, + C::Api: ContractsRuntimeApi< + Block, + AccountId, + Balance, + <::Header as HeaderT>::Number, + >, + AccountId: Codec, + Balance: Codec, { - fn call( - &self, - call_request: CallRequest, - at: Option<::Hash>, - ) -> Result { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| + fn call( + &self, + call_request: CallRequest, + at: Option<::Hash>, + ) -> Result { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash)); - let CallRequest { - origin, - dest, - value, - gas_limit, - input_data, - } = call_request; - let gas_limit = gas_limit.to_number().map_err(|e| Error { - code: ErrorCode::InvalidParams, - message: e, - data: None, - })?; - - let max_gas_limit = 5 * GAS_PER_SECOND; - if gas_limit > max_gas_limit { - return Err(Error { - code: ErrorCode::InvalidParams, - message: format!( - "Requested gas limit is greater than maximum allowed: {} > {}", - gas_limit, max_gas_limit - ), - data: None, - }); - } - - let exec_result = api - .call(&at, origin, dest, value, gas_limit, input_data.to_vec()) - .map_err(|e| runtime_error_into_rpc_err(e))?; - - Ok(exec_result.into()) - } - - fn get_storage( - &self, - address: AccountId, - key: H256, - at: Option<::Hash>, - ) -> Result> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| + let CallRequest { + origin, + dest, + value, + gas_limit, + input_data, + } = call_request; + let gas_limit = gas_limit.to_number().map_err(|e| Error { + code: ErrorCode::InvalidParams, + message: e, + data: None, + })?; + + let max_gas_limit = 5 * GAS_PER_SECOND; + if gas_limit > max_gas_limit { + return Err(Error { + code: ErrorCode::InvalidParams, + message: format!( + "Requested gas limit is greater than maximum allowed: {} > {}", + gas_limit, max_gas_limit + ), + data: None, + }); + } + + let exec_result = api + .call(&at, origin, dest, value, gas_limit, input_data.to_vec()) + .map_err(|e| runtime_error_into_rpc_err(e))?; + + Ok(exec_result.into()) + } + + fn get_storage( + &self, + address: AccountId, + key: H256, + at: Option<::Hash>, + ) -> Result> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash)); - let result = api - .get_storage(&at, address, key.into()) - .map_err(|e| runtime_error_into_rpc_err(e))? - .map_err(ContractAccessError)? - .map(Bytes); - - Ok(result) - } - - fn rent_projection( - &self, - address: AccountId, - at: Option<::Hash>, - ) -> Result::Header as HeaderT>::Number>> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| + let result = api + .get_storage(&at, address, key.into()) + .map_err(|e| runtime_error_into_rpc_err(e))? + .map_err(ContractAccessError)? + .map(Bytes); + + Ok(result) + } + + fn rent_projection( + &self, + address: AccountId, + at: Option<::Hash>, + ) -> Result::Header as HeaderT>::Number>> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. self.client.info().best_hash)); - let result = api - .rent_projection(&at, address) - .map_err(|e| runtime_error_into_rpc_err(e))? - .map_err(ContractAccessError)?; + let result = api + .rent_projection(&at, address) + .map_err(|e| runtime_error_into_rpc_err(e))? + .map_err(ContractAccessError)?; - Ok(match result { - RentProjection::NoEviction => None, - RentProjection::EvictionAt(block_num) => Some(block_num), - }) - } + Ok(match result { + RentProjection::NoEviction => None, + RentProjection::EvictionAt(block_num) => Some(block_num), + }) + } } /// Converts a runtime trap into an RPC error. fn runtime_error_into_rpc_err(err: impl std::fmt::Debug) -> Error { - Error { - code: ErrorCode::ServerError(RUNTIME_ERROR), - message: "Runtime trapped".into(), - data: Some(format!("{:?}", err).into()), - } + Error { + code: ErrorCode::ServerError(RUNTIME_ERROR), + message: "Runtime trapped".into(), + data: Some(format!("{:?}", err).into()), + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn should_serialize_deserialize_properly() { - fn test(expected: &str) { - let res: RpcContractExecResult = serde_json::from_str(expected).unwrap(); - let actual = serde_json::to_string(&res).unwrap(); - assert_eq!(actual, expected); - } - - test(r#"{"success":{"status":5,"data":"0x1234"}}"#); - test(r#"{"error":null}"#); - } + use super::*; + + #[test] + fn should_serialize_deserialize_properly() { + fn test(expected: &str) { + let res: RpcContractExecResult = serde_json::from_str(expected).unwrap(); + let actual = serde_json::to_string(&res).unwrap(); + assert_eq!(actual, expected); + } + + test(r#"{"success":{"status":5,"data":"0x1234"}}"#); + test(r#"{"error":null}"#); + } } diff --git a/frame/contracts/src/account_db.rs b/frame/contracts/src/account_db.rs index 165581e676..6a12ec4edb 100644 --- a/frame/contracts/src/account_db.rs +++ b/frame/contracts/src/account_db.rs @@ -17,373 +17,391 @@ //! Auxiliaries to help with managing partial changes to accounts state. use super::{ - AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, - TrieIdGenerator, + AliveContractInfo, BalanceOf, CodeHash, ContractInfo, ContractInfoOf, Trait, TrieId, + TrieIdGenerator, }; use crate::exec::StorageKey; -use sp_std::cell::RefCell; -use sp_std::collections::btree_map::{BTreeMap, Entry}; -use sp_std::prelude::*; -use sp_io::hashing::blake2_256; -use sp_runtime::traits::{Bounded, Zero}; use frame_support::traits::{Currency, Get, Imbalance, SignedImbalance}; use frame_support::{storage::child, StorageMap}; use frame_system; +use sp_io::hashing::blake2_256; +use sp_runtime::traits::{Bounded, Zero}; +use sp_std::cell::RefCell; +use sp_std::collections::btree_map::{BTreeMap, Entry}; +use sp_std::prelude::*; // Note: we don't provide Option because we can't create // the trie_id in the overlay, thus we provide an overlay on the fields // specifically. pub struct ChangeEntry { - /// If Some(_), then the account balance is modified to the value. If None and `reset` is false, - /// the balance unmodified. If None and `reset` is true, the balance is reset to 0. - balance: Option>, - /// If Some(_), then a contract is instantiated with the code hash. If None and `reset` is false, - /// then the contract code is unmodified. If None and `reset` is true, the contract is deleted. - code_hash: Option>, - /// If Some(_), then the rent allowance is set to the value. If None and `reset` is false, then - /// the rent allowance is unmodified. If None and `reset` is true, the contract is deleted. - rent_allowance: Option>, - storage: BTreeMap>>, - /// If true, indicates that the existing contract and all its storage entries should be removed - /// and replaced with the fields on this change entry. Otherwise, the fields on this change - /// entry are updates merged into the existing contract info and storage. - reset: bool, + /// If Some(_), then the account balance is modified to the value. If None and `reset` is false, + /// the balance unmodified. If None and `reset` is true, the balance is reset to 0. + balance: Option>, + /// If Some(_), then a contract is instantiated with the code hash. If None and `reset` is false, + /// then the contract code is unmodified. If None and `reset` is true, the contract is deleted. + code_hash: Option>, + /// If Some(_), then the rent allowance is set to the value. If None and `reset` is false, then + /// the rent allowance is unmodified. If None and `reset` is true, the contract is deleted. + rent_allowance: Option>, + storage: BTreeMap>>, + /// If true, indicates that the existing contract and all its storage entries should be removed + /// and replaced with the fields on this change entry. Otherwise, the fields on this change + /// entry are updates merged into the existing contract info and storage. + reset: bool, } impl ChangeEntry { - fn balance(&self) -> Option> { - self.balance.or_else(|| { - if self.reset { - Some(>::zero()) - } else { - None - } - }) - } - - fn code_hash(&self) -> Option>> { - if self.reset { - Some(self.code_hash) - } else { - self.code_hash.map(Some) - } - } - - fn rent_allowance(&self) -> Option>> { - if self.reset { - Some(self.rent_allowance) - } else { - self.rent_allowance.map(Some) - } - } - - fn storage(&self, location: &StorageKey) -> Option>> { - let value = self.storage.get(location).cloned(); - if self.reset { - Some(value.unwrap_or(None)) - } else { - value - } - } + fn balance(&self) -> Option> { + self.balance.or_else(|| { + if self.reset { + Some(>::zero()) + } else { + None + } + }) + } + + fn code_hash(&self) -> Option>> { + if self.reset { + Some(self.code_hash) + } else { + self.code_hash.map(Some) + } + } + + fn rent_allowance(&self) -> Option>> { + if self.reset { + Some(self.rent_allowance) + } else { + self.rent_allowance.map(Some) + } + } + + fn storage(&self, location: &StorageKey) -> Option>> { + let value = self.storage.get(location).cloned(); + if self.reset { + Some(value.unwrap_or(None)) + } else { + value + } + } } // Cannot derive(Default) since it erroneously bounds T by Default. impl Default for ChangeEntry { - fn default() -> Self { - ChangeEntry { - rent_allowance: Default::default(), - balance: Default::default(), - code_hash: Default::default(), - storage: Default::default(), - reset: false, - } - } + fn default() -> Self { + ChangeEntry { + rent_allowance: Default::default(), + balance: Default::default(), + code_hash: Default::default(), + storage: Default::default(), + reset: false, + } + } } pub type ChangeSet = BTreeMap<::AccountId, ChangeEntry>; pub trait AccountDb { - /// Account is used when overlayed otherwise trie_id must be provided. - /// This is for performance reason. - /// - /// Trie id is None iff account doesn't have an associated trie id in >. - /// Because DirectAccountDb bypass the lookup for this association. - fn get_storage(&self, account: &T::AccountId, trie_id: Option<&TrieId>, location: &StorageKey) -> Option>; - /// If account has an alive contract then return the code hash associated. - fn get_code_hash(&self, account: &T::AccountId) -> Option>; - /// If account has an alive contract then return the rent allowance associated. - fn get_rent_allowance(&self, account: &T::AccountId) -> Option>; - /// Returns false iff account has no alive contract nor tombstone. - fn contract_exists(&self, account: &T::AccountId) -> bool; - fn get_balance(&self, account: &T::AccountId) -> BalanceOf; - - fn commit(&mut self, change_set: ChangeSet); + /// Account is used when overlayed otherwise trie_id must be provided. + /// This is for performance reason. + /// + /// Trie id is None iff account doesn't have an associated trie id in >. + /// Because DirectAccountDb bypass the lookup for this association. + fn get_storage( + &self, + account: &T::AccountId, + trie_id: Option<&TrieId>, + location: &StorageKey, + ) -> Option>; + /// If account has an alive contract then return the code hash associated. + fn get_code_hash(&self, account: &T::AccountId) -> Option>; + /// If account has an alive contract then return the rent allowance associated. + fn get_rent_allowance(&self, account: &T::AccountId) -> Option>; + /// Returns false iff account has no alive contract nor tombstone. + fn contract_exists(&self, account: &T::AccountId) -> bool; + fn get_balance(&self, account: &T::AccountId) -> BalanceOf; + + fn commit(&mut self, change_set: ChangeSet); } pub struct DirectAccountDb; impl AccountDb for DirectAccountDb { - fn get_storage( - &self, - _account: &T::AccountId, - trie_id: Option<&TrieId>, - location: &StorageKey - ) -> Option> { - trie_id.and_then(|id| child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location))) - } - fn get_code_hash(&self, account: &T::AccountId) -> Option> { - >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) - } - fn get_rent_allowance(&self, account: &T::AccountId) -> Option> { - >::get(account).and_then(|i| i.as_alive().map(|i| i.rent_allowance)) - } - fn contract_exists(&self, account: &T::AccountId) -> bool { - >::contains_key(account) - } - fn get_balance(&self, account: &T::AccountId) -> BalanceOf { - T::Currency::free_balance(account) - } - fn commit(&mut self, s: ChangeSet) { - let mut total_imbalance = SignedImbalance::zero(); - for (address, changed) in s.into_iter() { - if let Some(balance) = changed.balance() { - let imbalance = T::Currency::make_free_balance_be(&address, balance); - total_imbalance = total_imbalance.merge(imbalance); - } - - if changed.code_hash().is_some() - || changed.rent_allowance().is_some() - || !changed.storage.is_empty() - || changed.reset - { - let old_info = match >::get(&address) { - Some(ContractInfo::Alive(alive)) => Some(alive), - None => None, - // Cannot commit changes to tombstone contract - Some(ContractInfo::Tombstone(_)) => continue, - }; - - let mut new_info = match (changed.reset, old_info.clone(), changed.code_hash) { - // Existing contract is being modified. - (false, Some(info), _) => info, - // Existing contract is being removed. - (true, Some(info), None) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); - >::remove(&address); - continue; - } - // Existing contract is being replaced by a new one. - (true, Some(info), Some(code_hash)) => { - child::kill_storage(&info.trie_id, info.child_trie_unique_id()); - AliveContractInfo:: { - code_hash, - storage_size: T::StorageSizeOffset::get(), - trie_id: ::TrieIdGenerator::trie_id(&address), - deduct_block: >::block_number(), - rent_allowance: >::max_value(), - last_write: None, - } - } - // New contract is being instantiated. - (_, None, Some(code_hash)) => { - AliveContractInfo:: { - code_hash, - storage_size: T::StorageSizeOffset::get(), - trie_id: ::TrieIdGenerator::trie_id(&address), - deduct_block: >::block_number(), - rent_allowance: >::max_value(), - last_write: None, - } - } - // There is no existing at the address nor a new one to be instantiated. - (_, None, None) => continue, - }; - - if let Some(rent_allowance) = changed.rent_allowance { - new_info.rent_allowance = rent_allowance; - } - - if let Some(code_hash) = changed.code_hash { - new_info.code_hash = code_hash; - } - - if !changed.storage.is_empty() { - new_info.last_write = Some(>::block_number()); - } - - for (k, v) in changed.storage.into_iter() { - if let Some(value) = child::get_raw( - &new_info.trie_id[..], - new_info.child_trie_unique_id(), - &blake2_256(&k), - ) { - new_info.storage_size -= value.len() as u32; - } - if let Some(value) = v { - new_info.storage_size += value.len() as u32; - child::put_raw(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k), &value[..]); - } else { - child::kill(&new_info.trie_id[..], new_info.child_trie_unique_id(), &blake2_256(&k)); - } - } - - if old_info - .map(|old_info| old_info != new_info) - .unwrap_or(true) - { - >::insert(&address, ContractInfo::Alive(new_info)); - } - } - } - - match total_imbalance { - // If we've detected a positive imbalance as a result of our contract-level machinations - // then it's indicative of a buggy contracts system. - // Panicking is far from ideal as it opens up a DoS attack on block validators, however - // it's a less bad option than allowing arbitrary value to be created. - SignedImbalance::Positive(ref p) if !p.peek().is_zero() => - panic!("contract subsystem resulting in positive imbalance!"), - _ => {} - } - } + fn get_storage( + &self, + _account: &T::AccountId, + trie_id: Option<&TrieId>, + location: &StorageKey, + ) -> Option> { + trie_id.and_then(|id| { + child::get_raw(id, crate::trie_unique_id(&id[..]), &blake2_256(location)) + }) + } + fn get_code_hash(&self, account: &T::AccountId) -> Option> { + >::get(account).and_then(|i| i.as_alive().map(|i| i.code_hash)) + } + fn get_rent_allowance(&self, account: &T::AccountId) -> Option> { + >::get(account).and_then(|i| i.as_alive().map(|i| i.rent_allowance)) + } + fn contract_exists(&self, account: &T::AccountId) -> bool { + >::contains_key(account) + } + fn get_balance(&self, account: &T::AccountId) -> BalanceOf { + T::Currency::free_balance(account) + } + fn commit(&mut self, s: ChangeSet) { + let mut total_imbalance = SignedImbalance::zero(); + for (address, changed) in s.into_iter() { + if let Some(balance) = changed.balance() { + let imbalance = T::Currency::make_free_balance_be(&address, balance); + total_imbalance = total_imbalance.merge(imbalance); + } + + if changed.code_hash().is_some() + || changed.rent_allowance().is_some() + || !changed.storage.is_empty() + || changed.reset + { + let old_info = match >::get(&address) { + Some(ContractInfo::Alive(alive)) => Some(alive), + None => None, + // Cannot commit changes to tombstone contract + Some(ContractInfo::Tombstone(_)) => continue, + }; + + let mut new_info = match (changed.reset, old_info.clone(), changed.code_hash) { + // Existing contract is being modified. + (false, Some(info), _) => info, + // Existing contract is being removed. + (true, Some(info), None) => { + child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + >::remove(&address); + continue; + } + // Existing contract is being replaced by a new one. + (true, Some(info), Some(code_hash)) => { + child::kill_storage(&info.trie_id, info.child_trie_unique_id()); + AliveContractInfo:: { + code_hash, + storage_size: T::StorageSizeOffset::get(), + trie_id: ::TrieIdGenerator::trie_id(&address), + deduct_block: >::block_number(), + rent_allowance: >::max_value(), + last_write: None, + } + } + // New contract is being instantiated. + (_, None, Some(code_hash)) => AliveContractInfo:: { + code_hash, + storage_size: T::StorageSizeOffset::get(), + trie_id: ::TrieIdGenerator::trie_id(&address), + deduct_block: >::block_number(), + rent_allowance: >::max_value(), + last_write: None, + }, + // There is no existing at the address nor a new one to be instantiated. + (_, None, None) => continue, + }; + + if let Some(rent_allowance) = changed.rent_allowance { + new_info.rent_allowance = rent_allowance; + } + + if let Some(code_hash) = changed.code_hash { + new_info.code_hash = code_hash; + } + + if !changed.storage.is_empty() { + new_info.last_write = Some(>::block_number()); + } + + for (k, v) in changed.storage.into_iter() { + if let Some(value) = child::get_raw( + &new_info.trie_id[..], + new_info.child_trie_unique_id(), + &blake2_256(&k), + ) { + new_info.storage_size -= value.len() as u32; + } + if let Some(value) = v { + new_info.storage_size += value.len() as u32; + child::put_raw( + &new_info.trie_id[..], + new_info.child_trie_unique_id(), + &blake2_256(&k), + &value[..], + ); + } else { + child::kill( + &new_info.trie_id[..], + new_info.child_trie_unique_id(), + &blake2_256(&k), + ); + } + } + + if old_info + .map(|old_info| old_info != new_info) + .unwrap_or(true) + { + >::insert(&address, ContractInfo::Alive(new_info)); + } + } + } + + match total_imbalance { + // If we've detected a positive imbalance as a result of our contract-level machinations + // then it's indicative of a buggy contracts system. + // Panicking is far from ideal as it opens up a DoS attack on block validators, however + // it's a less bad option than allowing arbitrary value to be created. + SignedImbalance::Positive(ref p) if !p.peek().is_zero() => { + panic!("contract subsystem resulting in positive imbalance!") + } + _ => {} + } + } } pub struct OverlayAccountDb<'a, T: Trait + 'a> { - local: RefCell>, - underlying: &'a dyn AccountDb, + local: RefCell>, + underlying: &'a dyn AccountDb, } impl<'a, T: Trait> OverlayAccountDb<'a, T> { - pub fn new(underlying: &'a dyn AccountDb) -> OverlayAccountDb<'a, T> { - OverlayAccountDb { - local: RefCell::new(ChangeSet::new()), - underlying, - } - } - - pub fn into_change_set(self) -> ChangeSet { - self.local.into_inner() - } - - pub fn set_storage( - &mut self, - account: &T::AccountId, - location: StorageKey, - value: Option>, - ) { - self.local.borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .storage - .insert(location, value); - } - - /// Return an error if contract already exists (either if it is alive or tombstone) - pub fn instantiate_contract( - &mut self, - account: &T::AccountId, - code_hash: CodeHash, - ) -> Result<(), &'static str> { - if self.contract_exists(account) { - return Err("Alive contract or tombstone already exists"); - } - - let mut local = self.local.borrow_mut(); - let contract = local.entry(account.clone()).or_insert_with(|| Default::default()); - - contract.code_hash = Some(code_hash); - contract.rent_allowance = Some(>::max_value()); - - Ok(()) - } - - /// Mark a contract as deleted. - pub fn destroy_contract(&mut self, account: &T::AccountId) { - let mut local = self.local.borrow_mut(); - local.insert( - account.clone(), - ChangeEntry { - reset: true, - ..Default::default() - } - ); - } - - /// Assume contract exists - pub fn set_rent_allowance(&mut self, account: &T::AccountId, rent_allowance: BalanceOf) { - self.local - .borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .rent_allowance = Some(rent_allowance); - } - pub fn set_balance(&mut self, account: &T::AccountId, balance: BalanceOf) { - self.local - .borrow_mut() - .entry(account.clone()) - .or_insert(Default::default()) - .balance = Some(balance); - } + pub fn new(underlying: &'a dyn AccountDb) -> OverlayAccountDb<'a, T> { + OverlayAccountDb { + local: RefCell::new(ChangeSet::new()), + underlying, + } + } + + pub fn into_change_set(self) -> ChangeSet { + self.local.into_inner() + } + + pub fn set_storage( + &mut self, + account: &T::AccountId, + location: StorageKey, + value: Option>, + ) { + self.local + .borrow_mut() + .entry(account.clone()) + .or_insert(Default::default()) + .storage + .insert(location, value); + } + + /// Return an error if contract already exists (either if it is alive or tombstone) + pub fn instantiate_contract( + &mut self, + account: &T::AccountId, + code_hash: CodeHash, + ) -> Result<(), &'static str> { + if self.contract_exists(account) { + return Err("Alive contract or tombstone already exists"); + } + + let mut local = self.local.borrow_mut(); + let contract = local + .entry(account.clone()) + .or_insert_with(|| Default::default()); + + contract.code_hash = Some(code_hash); + contract.rent_allowance = Some(>::max_value()); + + Ok(()) + } + + /// Mark a contract as deleted. + pub fn destroy_contract(&mut self, account: &T::AccountId) { + let mut local = self.local.borrow_mut(); + local.insert( + account.clone(), + ChangeEntry { + reset: true, + ..Default::default() + }, + ); + } + + /// Assume contract exists + pub fn set_rent_allowance(&mut self, account: &T::AccountId, rent_allowance: BalanceOf) { + self.local + .borrow_mut() + .entry(account.clone()) + .or_insert(Default::default()) + .rent_allowance = Some(rent_allowance); + } + pub fn set_balance(&mut self, account: &T::AccountId, balance: BalanceOf) { + self.local + .borrow_mut() + .entry(account.clone()) + .or_insert(Default::default()) + .balance = Some(balance); + } } impl<'a, T: Trait> AccountDb for OverlayAccountDb<'a, T> { - fn get_storage( - &self, - account: &T::AccountId, - trie_id: Option<&TrieId>, - location: &StorageKey - ) -> Option> { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.storage(location)) - .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, location)) - } - fn get_code_hash(&self, account: &T::AccountId) -> Option> { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.code_hash()) - .unwrap_or_else(|| self.underlying.get_code_hash(account)) - } - fn get_rent_allowance(&self, account: &T::AccountId) -> Option> { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.rent_allowance()) - .unwrap_or_else(|| self.underlying.get_rent_allowance(account)) - } - fn contract_exists(&self, account: &T::AccountId) -> bool { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.code_hash().map(|code_hash| code_hash.is_some())) - .unwrap_or_else(|| self.underlying.contract_exists(account)) - } - fn get_balance(&self, account: &T::AccountId) -> BalanceOf { - self.local - .borrow() - .get(account) - .and_then(|changes| changes.balance()) - .unwrap_or_else(|| self.underlying.get_balance(account)) - } - fn commit(&mut self, s: ChangeSet) { - let mut local = self.local.borrow_mut(); - - for (address, changed) in s.into_iter() { - match local.entry(address) { - Entry::Occupied(e) => { - let mut value = e.into_mut(); - if changed.reset { - *value = changed; - } else { - value.balance = changed.balance.or(value.balance); - value.code_hash = changed.code_hash.or(value.code_hash); - value.rent_allowance = changed.rent_allowance.or(value.rent_allowance); - value.storage.extend(changed.storage.into_iter()); - } - } - Entry::Vacant(e) => { - e.insert(changed); - } - } - } - } + fn get_storage( + &self, + account: &T::AccountId, + trie_id: Option<&TrieId>, + location: &StorageKey, + ) -> Option> { + self.local + .borrow() + .get(account) + .and_then(|changes| changes.storage(location)) + .unwrap_or_else(|| self.underlying.get_storage(account, trie_id, location)) + } + fn get_code_hash(&self, account: &T::AccountId) -> Option> { + self.local + .borrow() + .get(account) + .and_then(|changes| changes.code_hash()) + .unwrap_or_else(|| self.underlying.get_code_hash(account)) + } + fn get_rent_allowance(&self, account: &T::AccountId) -> Option> { + self.local + .borrow() + .get(account) + .and_then(|changes| changes.rent_allowance()) + .unwrap_or_else(|| self.underlying.get_rent_allowance(account)) + } + fn contract_exists(&self, account: &T::AccountId) -> bool { + self.local + .borrow() + .get(account) + .and_then(|changes| changes.code_hash().map(|code_hash| code_hash.is_some())) + .unwrap_or_else(|| self.underlying.contract_exists(account)) + } + fn get_balance(&self, account: &T::AccountId) -> BalanceOf { + self.local + .borrow() + .get(account) + .and_then(|changes| changes.balance()) + .unwrap_or_else(|| self.underlying.get_balance(account)) + } + fn commit(&mut self, s: ChangeSet) { + let mut local = self.local.borrow_mut(); + + for (address, changed) in s.into_iter() { + match local.entry(address) { + Entry::Occupied(e) => { + let mut value = e.into_mut(); + if changed.reset { + *value = changed; + } else { + value.balance = changed.balance.or(value.balance); + value.code_hash = changed.code_hash.or(value.code_hash); + value.rent_allowance = changed.rent_allowance.or(value.rent_allowance); + value.storage.extend(changed.storage.into_iter()); + } + } + Entry::Vacant(e) => { + e.insert(changed); + } + } + } + } } diff --git a/frame/contracts/src/exec.rs b/frame/contracts/src/exec.rs index 402622331d..01cef0ecd6 100644 --- a/frame/contracts/src/exec.rs +++ b/frame/contracts/src/exec.rs @@ -14,18 +14,20 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use super::{CodeHash, Config, ContractAddressFor, Event, RawEvent, Trait, - TrieId, BalanceOf, ContractInfo}; +use super::{ + BalanceOf, CodeHash, Config, ContractAddressFor, ContractInfo, Event, RawEvent, Trait, TrieId, +}; use crate::account_db::{AccountDb, DirectAccountDb, OverlayAccountDb}; -use crate::gas::{Gas, GasMeter, Token, approx_gas_for_balance}; +use crate::gas::{approx_gas_for_balance, Gas, GasMeter, Token}; use crate::rent; -use sp_std::prelude::*; -use sp_runtime::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; use frame_support::{ - storage::unhashed, dispatch::DispatchError, - traits::{WithdrawReason, Currency, Time, Randomness}, + dispatch::DispatchError, + storage::unhashed, + traits::{Currency, Randomness, Time, WithdrawReason}, }; +use sp_runtime::traits::{Bounded, CheckedAdd, CheckedSub, Zero}; +use sp_std::prelude::*; pub type AccountIdOf = ::AccountId; pub type CallOf = ::Call; @@ -49,15 +51,15 @@ pub const STATUS_SUCCESS: StatusCode = 0; /// Output of a contract call or instantiation which ran to completion. #[cfg_attr(test, derive(PartialEq, Eq, Debug))] pub struct ExecReturnValue { - pub status: StatusCode, - pub data: Vec, + pub status: StatusCode, + pub data: Vec, } impl ExecReturnValue { - /// Returns whether the call or instantiation exited with a successful status code. - pub fn is_success(&self) -> bool { - self.status == STATUS_SUCCESS - } + /// Returns whether the call or instantiation exited with a successful status code. + pub fn is_success(&self) -> bool { + self.status == STATUS_SUCCESS + } } /// An error indicating some failure to execute a contract call or instantiation. This can include @@ -66,10 +68,10 @@ impl ExecReturnValue { /// non-existent destination contract, etc.). #[cfg_attr(test, derive(sp_runtime::RuntimeDebug))] pub struct ExecError { - pub reason: DispatchError, - /// This is an allocated buffer that may be reused. The buffer must be cleared explicitly - /// before reuse. - pub buffer: Vec, + pub reason: DispatchError, + /// This is an allocated buffer that may be reused. The buffer must be cleared explicitly + /// before reuse. + pub buffer: Vec, } pub type ExecResult = Result; @@ -80,14 +82,17 @@ pub type ExecResult = Result; /// ownership of buffer unless there is an error. #[macro_export] macro_rules! try_or_exec_error { - ($e:expr, $buffer:expr) => { - match $e { - Ok(val) => val, - Err(reason) => return Err( - $crate::exec::ExecError { reason: reason.into(), buffer: $buffer } - ), - } - } + ($e:expr, $buffer:expr) => { + match $e { + Ok(val) => val, + Err(reason) => { + return Err($crate::exec::ExecError { + reason: reason.into(), + buffer: $buffer, + }) + } + } + }; } /// An interface that provides access to the external environment in which the @@ -96,126 +101,126 @@ macro_rules! try_or_exec_error { /// This interface is specialized to an account of the executing code, so all /// operations are implicitly performed on that account. pub trait Ext { - type T: Trait; - - /// Returns the storage entry of the executing account by the given `key`. - /// - /// Returns `None` if the `key` wasn't previously set by `set_storage` or - /// was deleted. - fn get_storage(&self, key: &StorageKey) -> Option>; - - /// Sets the storage entry by the given key to the specified value. If `value` is `None` then - /// the storage entry is deleted. Returns an Err if the value size is too large. - fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str>; - - /// Instantiate a contract from the given code. - /// - /// The newly created account will be associated with `code`. `value` specifies the amount of value - /// transferred from this to the newly created account (also known as endowment). - fn instantiate( - &mut self, - code: &CodeHash, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; - - /// Transfer some amount of funds into the specified account. - fn transfer( - &mut self, - to: &AccountIdOf, - value: BalanceOf, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError>; - - /// Transfer all funds to `beneficiary` and delete the contract. - fn terminate( - &mut self, - beneficiary: &AccountIdOf, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError>; - - /// Call (possibly transferring some amount of funds) into the specified account. - fn call( - &mut self, - to: &AccountIdOf, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> ExecResult; - - /// Notes a call dispatch. - fn note_dispatch_call(&mut self, call: CallOf); - - /// Notes a restoration request. - fn note_restore_to( - &mut self, - dest: AccountIdOf, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec, - ); - - /// Returns a reference to the account id of the caller. - fn caller(&self) -> &AccountIdOf; - - /// Returns a reference to the account id of the current contract. - fn address(&self) -> &AccountIdOf; - - /// Returns the balance of the current contract. - /// - /// The `value_transferred` is already added. - fn balance(&self) -> BalanceOf; - - /// Returns the value transferred along with this call or as endowment. - fn value_transferred(&self) -> BalanceOf; - - /// Returns a reference to the timestamp of the current block - fn now(&self) -> &MomentOf; - - /// Returns the minimum balance that is required for creating an account. - fn minimum_balance(&self) -> BalanceOf; - - /// Returns the deposit required to create a tombstone upon contract eviction. - fn tombstone_deposit(&self) -> BalanceOf; - - /// Returns a random number for the current block with the given subject. - fn random(&self, subject: &[u8]) -> SeedOf; - - /// Deposit an event with the given topics. - /// - /// There should not be any duplicates in `topics`. - fn deposit_event(&mut self, topics: Vec>, data: Vec); - - /// Set rent allowance of the contract - fn set_rent_allowance(&mut self, rent_allowance: BalanceOf); - - /// Rent allowance of the contract - fn rent_allowance(&self) -> BalanceOf; - - /// Returns the current block number. - fn block_number(&self) -> BlockNumberOf; - - /// Returns the maximum allowed size of a storage item. - fn max_value_size(&self) -> u32; - - /// Returns the value of runtime under the given key. - /// - /// Returns `None` if the value doesn't exist. - fn get_runtime_storage(&self, key: &[u8]) -> Option>; + type T: Trait; + + /// Returns the storage entry of the executing account by the given `key`. + /// + /// Returns `None` if the `key` wasn't previously set by `set_storage` or + /// was deleted. + fn get_storage(&self, key: &StorageKey) -> Option>; + + /// Sets the storage entry by the given key to the specified value. If `value` is `None` then + /// the storage entry is deleted. Returns an Err if the value size is too large. + fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str>; + + /// Instantiate a contract from the given code. + /// + /// The newly created account will be associated with `code`. `value` specifies the amount of value + /// transferred from this to the newly created account (also known as endowment). + fn instantiate( + &mut self, + code: &CodeHash, + value: BalanceOf, + gas_meter: &mut GasMeter, + input_data: Vec, + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError>; + + /// Transfer some amount of funds into the specified account. + fn transfer( + &mut self, + to: &AccountIdOf, + value: BalanceOf, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError>; + + /// Transfer all funds to `beneficiary` and delete the contract. + fn terminate( + &mut self, + beneficiary: &AccountIdOf, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError>; + + /// Call (possibly transferring some amount of funds) into the specified account. + fn call( + &mut self, + to: &AccountIdOf, + value: BalanceOf, + gas_meter: &mut GasMeter, + input_data: Vec, + ) -> ExecResult; + + /// Notes a call dispatch. + fn note_dispatch_call(&mut self, call: CallOf); + + /// Notes a restoration request. + fn note_restore_to( + &mut self, + dest: AccountIdOf, + code_hash: CodeHash, + rent_allowance: BalanceOf, + delta: Vec, + ); + + /// Returns a reference to the account id of the caller. + fn caller(&self) -> &AccountIdOf; + + /// Returns a reference to the account id of the current contract. + fn address(&self) -> &AccountIdOf; + + /// Returns the balance of the current contract. + /// + /// The `value_transferred` is already added. + fn balance(&self) -> BalanceOf; + + /// Returns the value transferred along with this call or as endowment. + fn value_transferred(&self) -> BalanceOf; + + /// Returns a reference to the timestamp of the current block + fn now(&self) -> &MomentOf; + + /// Returns the minimum balance that is required for creating an account. + fn minimum_balance(&self) -> BalanceOf; + + /// Returns the deposit required to create a tombstone upon contract eviction. + fn tombstone_deposit(&self) -> BalanceOf; + + /// Returns a random number for the current block with the given subject. + fn random(&self, subject: &[u8]) -> SeedOf; + + /// Deposit an event with the given topics. + /// + /// There should not be any duplicates in `topics`. + fn deposit_event(&mut self, topics: Vec>, data: Vec); + + /// Set rent allowance of the contract + fn set_rent_allowance(&mut self, rent_allowance: BalanceOf); + + /// Rent allowance of the contract + fn rent_allowance(&self) -> BalanceOf; + + /// Returns the current block number. + fn block_number(&self) -> BlockNumberOf; + + /// Returns the maximum allowed size of a storage item. + fn max_value_size(&self) -> u32; + + /// Returns the value of runtime under the given key. + /// + /// Returns `None` if the value doesn't exist. + fn get_runtime_storage(&self, key: &[u8]) -> Option>; } /// Loader is a companion of the `Vm` trait. It loads an appropriate abstract /// executable to be executed by an accompanying `Vm` implementation. pub trait Loader { - type Executable; - - /// Load the initializer portion of the code specified by the `code_hash`. This - /// executable is called upon instantiation. - fn load_init(&self, code_hash: &CodeHash) -> Result; - /// Load the main portion of the code specified by the `code_hash`. This executable - /// is called for each call to a contract. - fn load_main(&self, code_hash: &CodeHash) -> Result; + type Executable; + + /// Load the initializer portion of the code specified by the `code_hash`. This + /// executable is called upon instantiation. + fn load_init(&self, code_hash: &CodeHash) -> Result; + /// Load the main portion of the code specified by the `code_hash`. This executable + /// is called for each call to a contract. + fn load_main(&self, code_hash: &CodeHash) -> Result; } /// A trait that represent a virtual machine. @@ -227,407 +232,408 @@ pub trait Loader { /// Execution of code can end by either implicit termination (that is, reached the end of /// executable), explicit termination via returning a buffer or termination due to a trap. pub trait Vm { - type Executable; - - fn execute>( - &self, - exec: &Self::Executable, - ext: E, - input_data: Vec, - gas_meter: &mut GasMeter, - ) -> ExecResult; + type Executable; + + fn execute>( + &self, + exec: &Self::Executable, + ext: E, + input_data: Vec, + gas_meter: &mut GasMeter, + ) -> ExecResult; } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum ExecFeeToken { - /// Base fee charged for a call. - Call, - /// Base fee charged for a instantiate. - Instantiate, + /// Base fee charged for a call. + Call, + /// Base fee charged for a instantiate. + Instantiate, } impl Token for ExecFeeToken { - type Metadata = Config; - #[inline] - fn calculate_amount(&self, metadata: &Config) -> Gas { - match *self { - ExecFeeToken::Call => metadata.schedule.call_base_cost, - ExecFeeToken::Instantiate => metadata.schedule.instantiate_base_cost, - } - } + type Metadata = Config; + #[inline] + fn calculate_amount(&self, metadata: &Config) -> Gas { + match *self { + ExecFeeToken::Call => metadata.schedule.call_base_cost, + ExecFeeToken::Instantiate => metadata.schedule.instantiate_base_cost, + } + } } #[cfg_attr(any(feature = "std", test), derive(PartialEq, Eq, Clone))] #[derive(sp_runtime::RuntimeDebug)] pub enum DeferredAction { - DepositEvent { - /// A list of topics this event will be deposited with. - topics: Vec, - /// The event to deposit. - event: Event, - }, - DispatchRuntimeCall { - /// The account id of the contract who dispatched this call. - origin: T::AccountId, - /// The call to dispatch. - call: ::Call, - }, - RestoreTo { - /// The account id of the contract which is removed during the restoration and transfers - /// its storage to the restored contract. - donor: T::AccountId, - /// The account id of the restored contract. - dest: T::AccountId, - /// The code hash of the restored contract. - code_hash: CodeHash, - /// The initial rent allowance to set. - rent_allowance: BalanceOf, - /// The keys to delete upon restoration. - delta: Vec, - }, + DepositEvent { + /// A list of topics this event will be deposited with. + topics: Vec, + /// The event to deposit. + event: Event, + }, + DispatchRuntimeCall { + /// The account id of the contract who dispatched this call. + origin: T::AccountId, + /// The call to dispatch. + call: ::Call, + }, + RestoreTo { + /// The account id of the contract which is removed during the restoration and transfers + /// its storage to the restored contract. + donor: T::AccountId, + /// The account id of the restored contract. + dest: T::AccountId, + /// The code hash of the restored contract. + code_hash: CodeHash, + /// The initial rent allowance to set. + rent_allowance: BalanceOf, + /// The keys to delete upon restoration. + delta: Vec, + }, } pub struct ExecutionContext<'a, T: Trait + 'a, V, L> { - pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, - pub self_account: T::AccountId, - pub self_trie_id: Option, - pub overlay: OverlayAccountDb<'a, T>, - pub depth: usize, - pub deferred: Vec>, - pub config: &'a Config, - pub vm: &'a V, - pub loader: &'a L, - pub timestamp: MomentOf, - pub block_number: T::BlockNumber, + pub caller: Option<&'a ExecutionContext<'a, T, V, L>>, + pub self_account: T::AccountId, + pub self_trie_id: Option, + pub overlay: OverlayAccountDb<'a, T>, + pub depth: usize, + pub deferred: Vec>, + pub config: &'a Config, + pub vm: &'a V, + pub loader: &'a L, + pub timestamp: MomentOf, + pub block_number: T::BlockNumber, } impl<'a, T, E, V, L> ExecutionContext<'a, T, V, L> where - T: Trait, - L: Loader, - V: Vm, + T: Trait, + L: Loader, + V: Vm, { - /// Create the top level execution context. - /// - /// The specified `origin` address will be used as `sender` for. The `origin` must be a regular - /// account (not a contract). - pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { - ExecutionContext { - caller: None, - self_trie_id: None, - self_account: origin, - overlay: OverlayAccountDb::::new(&DirectAccountDb), - depth: 0, - deferred: Vec::new(), - config: &cfg, - vm: &vm, - loader: &loader, - timestamp: T::Time::now(), - block_number: >::block_number(), - } - } - - fn nested<'b, 'c: 'b>(&'c self, dest: T::AccountId, trie_id: Option) - -> ExecutionContext<'b, T, V, L> - { - ExecutionContext { - caller: Some(self), - self_trie_id: trie_id, - self_account: dest, - overlay: OverlayAccountDb::new(&self.overlay), - depth: self.depth + 1, - deferred: Vec::new(), - config: self.config, - vm: self.vm, - loader: self.loader, - timestamp: self.timestamp.clone(), - block_number: self.block_number.clone(), - } - } - - /// Transfer balance to `dest` without calling any contract code. - pub fn transfer( - &mut self, - dest: T::AccountId, - value: BalanceOf, - gas_meter: &mut GasMeter - ) -> Result<(), DispatchError> { - transfer( - gas_meter, - TransferCause::Call, - &self.self_account.clone(), - &dest, - value, - self, - ) - } - - /// Make a call to the specified address, optionally transferring some funds. - pub fn call( - &mut self, - dest: T::AccountId, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> ExecResult { - if self.depth == self.config.max_depth as usize { - return Err(ExecError { - reason: "reached maximum depth, cannot make a call".into(), - buffer: input_data, - }); - } - - if gas_meter - .charge(self.config, ExecFeeToken::Call) - .is_out_of_gas() - { - return Err(ExecError { - reason: "not enough gas to pay base call fee".into(), - buffer: input_data, - }); - } - - // Assumption: `collect_rent` doesn't collide with overlay because - // `collect_rent` will be done on first call and destination contract and balance - // cannot be changed before the first call - let contract_info = rent::collect_rent::(&dest); - - // Calls to dead contracts always fail. - if let Some(ContractInfo::Tombstone(_)) = contract_info { - return Err(ExecError { - reason: "contract has been evicted".into(), - buffer: input_data, - }); - }; - - let caller = self.self_account.clone(); - let dest_trie_id = contract_info.and_then(|i| i.as_alive().map(|i| i.trie_id.clone())); - - self.with_nested_context(dest.clone(), dest_trie_id, |nested| { - if value > BalanceOf::::zero() { - try_or_exec_error!( - transfer( - gas_meter, - TransferCause::Call, - &caller, - &dest, - value, - nested, - ), - input_data - ); - } - - // If code_hash is not none, then the destination account is a live contract, otherwise - // it is a regular account since tombstone accounts have already been rejected. - match nested.overlay.get_code_hash(&dest) { - Some(dest_code_hash) => { - let executable = try_or_exec_error!( - nested.loader.load_main(&dest_code_hash), - input_data - ); - let output = nested.vm - .execute( - &executable, - nested.new_call_context(caller, value), - input_data, - gas_meter, - )?; - - Ok(output) - } - None => Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }), - } - }) - } - - pub fn instantiate( - &mut self, - endowment: BalanceOf, - gas_meter: &mut GasMeter, - code_hash: &CodeHash, - input_data: Vec, - ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { - if self.depth == self.config.max_depth as usize { - return Err(ExecError { - reason: "reached maximum depth, cannot instantiate".into(), - buffer: input_data, - }); - } - - if gas_meter - .charge(self.config, ExecFeeToken::Instantiate) - .is_out_of_gas() - { - return Err(ExecError { - reason: "not enough gas to pay base instantiate fee".into(), - buffer: input_data, - }); - } - - let caller = self.self_account.clone(); - let dest = T::DetermineContractAddress::contract_address_for( - code_hash, - &input_data, - &caller, - ); - - // TrieId has not been generated yet and storage is empty since contract is new. - let dest_trie_id = None; - - let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { - try_or_exec_error!( - nested.overlay.instantiate_contract(&dest, code_hash.clone()), - input_data - ); - - // Send funds unconditionally here. If the `endowment` is below existential_deposit - // then error will be returned here. - try_or_exec_error!( - transfer( - gas_meter, - TransferCause::Instantiate, - &caller, - &dest, - endowment, - nested, - ), - input_data - ); - - let executable = try_or_exec_error!( - nested.loader.load_init(&code_hash), - input_data - ); - let output = nested.vm - .execute( - &executable, - nested.new_call_context(caller.clone(), endowment), - input_data, - gas_meter, - )?; - - // Error out if insufficient remaining balance. - if nested.overlay.get_balance(&dest) < nested.config.existential_deposit { - return Err(ExecError { - reason: "insufficient remaining balance".into(), - buffer: output.data, - }); - } - - // Deposit an instantiation event. - nested.deferred.push(DeferredAction::DepositEvent { - event: RawEvent::Instantiated(caller.clone(), dest.clone()), - topics: Vec::new(), - }); - - Ok(output) - })?; - - Ok((dest, output)) - } - - pub fn terminate( - &mut self, - beneficiary: &T::AccountId, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError> { - let self_id = self.self_account.clone(); - let value = self.overlay.get_balance(&self_id); - if let Some(caller) = self.caller { - if caller.is_live(&self_id) { - return Err(DispatchError::Other( - "Cannot terminate a contract that is present on the call stack", - )); - } - } - transfer( - gas_meter, - TransferCause::Terminate, - &self_id, - beneficiary, - value, - self, - )?; - self.overlay.destroy_contract(&self_id); - Ok(()) - } - - fn new_call_context<'b>( - &'b mut self, - caller: T::AccountId, - value: BalanceOf, - ) -> CallContext<'b, 'a, T, V, L> { - let timestamp = self.timestamp.clone(); - let block_number = self.block_number.clone(); - CallContext { - ctx: self, - caller, - value_transferred: value, - timestamp, - block_number, - } - } - - fn with_nested_context(&mut self, dest: T::AccountId, trie_id: Option, func: F) - -> ExecResult - where F: FnOnce(&mut ExecutionContext) -> ExecResult - { - let (output, change_set, deferred) = { - let mut nested = self.nested(dest, trie_id); - let output = func(&mut nested)?; - (output, nested.overlay.into_change_set(), nested.deferred) - }; - - if output.is_success() { - self.overlay.commit(change_set); - self.deferred.extend(deferred); - } - - Ok(output) - } - - /// Returns whether a contract, identified by address, is currently live in the execution - /// stack, meaning it is in the middle of an execution. - fn is_live(&self, account: &T::AccountId) -> bool { - &self.self_account == account || - self.caller.map_or(false, |caller| caller.is_live(account)) - } + /// Create the top level execution context. + /// + /// The specified `origin` address will be used as `sender` for. The `origin` must be a regular + /// account (not a contract). + pub fn top_level(origin: T::AccountId, cfg: &'a Config, vm: &'a V, loader: &'a L) -> Self { + ExecutionContext { + caller: None, + self_trie_id: None, + self_account: origin, + overlay: OverlayAccountDb::::new(&DirectAccountDb), + depth: 0, + deferred: Vec::new(), + config: &cfg, + vm: &vm, + loader: &loader, + timestamp: T::Time::now(), + block_number: >::block_number(), + } + } + + fn nested<'b, 'c: 'b>( + &'c self, + dest: T::AccountId, + trie_id: Option, + ) -> ExecutionContext<'b, T, V, L> { + ExecutionContext { + caller: Some(self), + self_trie_id: trie_id, + self_account: dest, + overlay: OverlayAccountDb::new(&self.overlay), + depth: self.depth + 1, + deferred: Vec::new(), + config: self.config, + vm: self.vm, + loader: self.loader, + timestamp: self.timestamp.clone(), + block_number: self.block_number.clone(), + } + } + + /// Transfer balance to `dest` without calling any contract code. + pub fn transfer( + &mut self, + dest: T::AccountId, + value: BalanceOf, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + transfer( + gas_meter, + TransferCause::Call, + &self.self_account.clone(), + &dest, + value, + self, + ) + } + + /// Make a call to the specified address, optionally transferring some funds. + pub fn call( + &mut self, + dest: T::AccountId, + value: BalanceOf, + gas_meter: &mut GasMeter, + input_data: Vec, + ) -> ExecResult { + if self.depth == self.config.max_depth as usize { + return Err(ExecError { + reason: "reached maximum depth, cannot make a call".into(), + buffer: input_data, + }); + } + + if gas_meter + .charge(self.config, ExecFeeToken::Call) + .is_out_of_gas() + { + return Err(ExecError { + reason: "not enough gas to pay base call fee".into(), + buffer: input_data, + }); + } + + // Assumption: `collect_rent` doesn't collide with overlay because + // `collect_rent` will be done on first call and destination contract and balance + // cannot be changed before the first call + let contract_info = rent::collect_rent::(&dest); + + // Calls to dead contracts always fail. + if let Some(ContractInfo::Tombstone(_)) = contract_info { + return Err(ExecError { + reason: "contract has been evicted".into(), + buffer: input_data, + }); + }; + + let caller = self.self_account.clone(); + let dest_trie_id = contract_info.and_then(|i| i.as_alive().map(|i| i.trie_id.clone())); + + self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + if value > BalanceOf::::zero() { + try_or_exec_error!( + transfer( + gas_meter, + TransferCause::Call, + &caller, + &dest, + value, + nested, + ), + input_data + ); + } + + // If code_hash is not none, then the destination account is a live contract, otherwise + // it is a regular account since tombstone accounts have already been rejected. + match nested.overlay.get_code_hash(&dest) { + Some(dest_code_hash) => { + let executable = + try_or_exec_error!(nested.loader.load_main(&dest_code_hash), input_data); + let output = nested.vm.execute( + &executable, + nested.new_call_context(caller, value), + input_data, + gas_meter, + )?; + + Ok(output) + } + None => Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: Vec::new(), + }), + } + }) + } + + pub fn instantiate( + &mut self, + endowment: BalanceOf, + gas_meter: &mut GasMeter, + code_hash: &CodeHash, + input_data: Vec, + ) -> Result<(T::AccountId, ExecReturnValue), ExecError> { + if self.depth == self.config.max_depth as usize { + return Err(ExecError { + reason: "reached maximum depth, cannot instantiate".into(), + buffer: input_data, + }); + } + + if gas_meter + .charge(self.config, ExecFeeToken::Instantiate) + .is_out_of_gas() + { + return Err(ExecError { + reason: "not enough gas to pay base instantiate fee".into(), + buffer: input_data, + }); + } + + let caller = self.self_account.clone(); + let dest = + T::DetermineContractAddress::contract_address_for(code_hash, &input_data, &caller); + + // TrieId has not been generated yet and storage is empty since contract is new. + let dest_trie_id = None; + + let output = self.with_nested_context(dest.clone(), dest_trie_id, |nested| { + try_or_exec_error!( + nested + .overlay + .instantiate_contract(&dest, code_hash.clone()), + input_data + ); + + // Send funds unconditionally here. If the `endowment` is below existential_deposit + // then error will be returned here. + try_or_exec_error!( + transfer( + gas_meter, + TransferCause::Instantiate, + &caller, + &dest, + endowment, + nested, + ), + input_data + ); + + let executable = try_or_exec_error!(nested.loader.load_init(&code_hash), input_data); + let output = nested.vm.execute( + &executable, + nested.new_call_context(caller.clone(), endowment), + input_data, + gas_meter, + )?; + + // Error out if insufficient remaining balance. + if nested.overlay.get_balance(&dest) < nested.config.existential_deposit { + return Err(ExecError { + reason: "insufficient remaining balance".into(), + buffer: output.data, + }); + } + + // Deposit an instantiation event. + nested.deferred.push(DeferredAction::DepositEvent { + event: RawEvent::Instantiated(caller.clone(), dest.clone()), + topics: Vec::new(), + }); + + Ok(output) + })?; + + Ok((dest, output)) + } + + pub fn terminate( + &mut self, + beneficiary: &T::AccountId, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + let self_id = self.self_account.clone(); + let value = self.overlay.get_balance(&self_id); + if let Some(caller) = self.caller { + if caller.is_live(&self_id) { + return Err(DispatchError::Other( + "Cannot terminate a contract that is present on the call stack", + )); + } + } + transfer( + gas_meter, + TransferCause::Terminate, + &self_id, + beneficiary, + value, + self, + )?; + self.overlay.destroy_contract(&self_id); + Ok(()) + } + + fn new_call_context<'b>( + &'b mut self, + caller: T::AccountId, + value: BalanceOf, + ) -> CallContext<'b, 'a, T, V, L> { + let timestamp = self.timestamp.clone(); + let block_number = self.block_number.clone(); + CallContext { + ctx: self, + caller, + value_transferred: value, + timestamp, + block_number, + } + } + + fn with_nested_context( + &mut self, + dest: T::AccountId, + trie_id: Option, + func: F, + ) -> ExecResult + where + F: FnOnce(&mut ExecutionContext) -> ExecResult, + { + let (output, change_set, deferred) = { + let mut nested = self.nested(dest, trie_id); + let output = func(&mut nested)?; + (output, nested.overlay.into_change_set(), nested.deferred) + }; + + if output.is_success() { + self.overlay.commit(change_set); + self.deferred.extend(deferred); + } + + Ok(output) + } + + /// Returns whether a contract, identified by address, is currently live in the execution + /// stack, meaning it is in the middle of an execution. + fn is_live(&self, account: &T::AccountId) -> bool { + &self.self_account == account || self.caller.map_or(false, |caller| caller.is_live(account)) + } } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum TransferFeeKind { - ContractInstantiate, - Transfer, + ContractInstantiate, + Transfer, } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub struct TransferFeeToken { - kind: TransferFeeKind, - gas_price: Balance, + kind: TransferFeeKind, + gas_price: Balance, } impl Token for TransferFeeToken> { - type Metadata = Config; - - #[inline] - fn calculate_amount(&self, metadata: &Config) -> Gas { - let balance_fee = match self.kind { - TransferFeeKind::ContractInstantiate => metadata.contract_account_instantiate_fee, - TransferFeeKind::Transfer => return metadata.schedule.transfer_cost, - }; - approx_gas_for_balance(self.gas_price, balance_fee) - } + type Metadata = Config; + + #[inline] + fn calculate_amount(&self, metadata: &Config) -> Gas { + let balance_fee = match self.kind { + TransferFeeKind::ContractInstantiate => metadata.contract_account_instantiate_fee, + TransferFeeKind::Transfer => return metadata.schedule.transfer_cost, + }; + approx_gas_for_balance(self.gas_price, balance_fee) + } } /// Describes possible transfer causes. enum TransferCause { - Call, - Instantiate, - Terminate, + Call, + Instantiate, + Terminate, } /// Transfer some funds from `transactor` to `dest`. @@ -647,227 +653,237 @@ enum TransferCause { /// can go below existential deposit, essentially giving a contract /// the chance to give up it's life. fn transfer<'a, T: Trait, V: Vm, L: Loader>( - gas_meter: &mut GasMeter, - cause: TransferCause, - transactor: &T::AccountId, - dest: &T::AccountId, - value: BalanceOf, - ctx: &mut ExecutionContext<'a, T, V, L>, + gas_meter: &mut GasMeter, + cause: TransferCause, + transactor: &T::AccountId, + dest: &T::AccountId, + value: BalanceOf, + ctx: &mut ExecutionContext<'a, T, V, L>, ) -> Result<(), DispatchError> { - use self::TransferCause::*; - use self::TransferFeeKind::*; - - let token = { - let kind: TransferFeeKind = match cause { - // If this function is called from `Instantiate` routine, then we always - // charge contract account creation fee. - Instantiate => ContractInstantiate, - - // Otherwise the fee is to transfer to an account. - Call | Terminate => TransferFeeKind::Transfer, - }; - TransferFeeToken { - kind, - gas_price: gas_meter.gas_price(), - } - }; - - if gas_meter.charge(ctx.config, token).is_out_of_gas() { - Err("not enough gas to pay transfer fee")? - } - - // We allow balance to go below the existential deposit here: - let from_balance = ctx.overlay.get_balance(transactor); - let new_from_balance = match from_balance.checked_sub(&value) { - Some(b) => b, - None => Err("balance too low to send value")?, - }; - let to_balance = ctx.overlay.get_balance(dest); - if to_balance.is_zero() && value < ctx.config.existential_deposit { - Err("value too low to create account")? - } - - // Only ext_terminate is allowed to bring the sender below the existential deposit - let required_balance = match cause { - Terminate => 0.into(), - _ => ctx.config.existential_deposit - }; - - T::Currency::ensure_can_withdraw( - transactor, - value, - WithdrawReason::Transfer.into(), - new_from_balance.checked_sub(&required_balance) - .ok_or("brings sender below existential deposit")?, - )?; - - let new_to_balance = match to_balance.checked_add(&value) { - Some(b) => b, - None => Err("destination balance too high to receive value")?, - }; - - if transactor != dest { - ctx.overlay.set_balance(transactor, new_from_balance); - ctx.overlay.set_balance(dest, new_to_balance); - ctx.deferred.push(DeferredAction::DepositEvent { - event: RawEvent::Transfer(transactor.clone(), dest.clone(), value), - topics: Vec::new(), - }); - } - - Ok(()) + use self::TransferCause::*; + use self::TransferFeeKind::*; + + let token = { + let kind: TransferFeeKind = match cause { + // If this function is called from `Instantiate` routine, then we always + // charge contract account creation fee. + Instantiate => ContractInstantiate, + + // Otherwise the fee is to transfer to an account. + Call | Terminate => TransferFeeKind::Transfer, + }; + TransferFeeToken { + kind, + gas_price: gas_meter.gas_price(), + } + }; + + if gas_meter.charge(ctx.config, token).is_out_of_gas() { + Err("not enough gas to pay transfer fee")? + } + + // We allow balance to go below the existential deposit here: + let from_balance = ctx.overlay.get_balance(transactor); + let new_from_balance = match from_balance.checked_sub(&value) { + Some(b) => b, + None => Err("balance too low to send value")?, + }; + let to_balance = ctx.overlay.get_balance(dest); + if to_balance.is_zero() && value < ctx.config.existential_deposit { + Err("value too low to create account")? + } + + // Only ext_terminate is allowed to bring the sender below the existential deposit + let required_balance = match cause { + Terminate => 0.into(), + _ => ctx.config.existential_deposit, + }; + + T::Currency::ensure_can_withdraw( + transactor, + value, + WithdrawReason::Transfer.into(), + new_from_balance + .checked_sub(&required_balance) + .ok_or("brings sender below existential deposit")?, + )?; + + let new_to_balance = match to_balance.checked_add(&value) { + Some(b) => b, + None => Err("destination balance too high to receive value")?, + }; + + if transactor != dest { + ctx.overlay.set_balance(transactor, new_from_balance); + ctx.overlay.set_balance(dest, new_to_balance); + ctx.deferred.push(DeferredAction::DepositEvent { + event: RawEvent::Transfer(transactor.clone(), dest.clone(), value), + topics: Vec::new(), + }); + } + + Ok(()) } struct CallContext<'a, 'b: 'a, T: Trait + 'b, V: Vm + 'b, L: Loader> { - ctx: &'a mut ExecutionContext<'b, T, V, L>, - caller: T::AccountId, - value_transferred: BalanceOf, - timestamp: MomentOf, - block_number: T::BlockNumber, + ctx: &'a mut ExecutionContext<'b, T, V, L>, + caller: T::AccountId, + value_transferred: BalanceOf, + timestamp: MomentOf, + block_number: T::BlockNumber, } impl<'a, 'b: 'a, T, E, V, L> Ext for CallContext<'a, 'b, T, V, L> where - T: Trait + 'b, - V: Vm, - L: Loader, + T: Trait + 'b, + V: Vm, + L: Loader, { - type T = T; - - fn get_storage(&self, key: &StorageKey) -> Option> { - self.ctx.overlay.get_storage(&self.ctx.self_account, self.ctx.self_trie_id.as_ref(), key) - } - - fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str> { - if let Some(ref value) = value { - if self.max_value_size() < value.len() as u32 { - return Err("value size exceeds maximum"); - } - } - - self.ctx - .overlay - .set_storage(&self.ctx.self_account, key, value); - Ok(()) - } - - fn instantiate( - &mut self, - code_hash: &CodeHash, - endowment: BalanceOf, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { - self.ctx.instantiate(endowment, gas_meter, code_hash, input_data) - } - - fn transfer( - &mut self, - to: &T::AccountId, - value: BalanceOf, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError> { - self.ctx.transfer(to.clone(), value, gas_meter) - } - - fn terminate( - &mut self, - beneficiary: &AccountIdOf, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError> { - self.ctx.terminate(beneficiary, gas_meter) - } - - fn call( - &mut self, - to: &T::AccountId, - value: BalanceOf, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> ExecResult { - self.ctx.call(to.clone(), value, gas_meter, input_data) - } - - fn note_dispatch_call(&mut self, call: CallOf) { - self.ctx.deferred.push(DeferredAction::DispatchRuntimeCall { - origin: self.ctx.self_account.clone(), - call, - }); - } - - fn note_restore_to( - &mut self, - dest: AccountIdOf, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec, - ) { - self.ctx.deferred.push(DeferredAction::RestoreTo { - donor: self.ctx.self_account.clone(), - dest, - code_hash, - rent_allowance, - delta, - }); - } - - fn address(&self) -> &T::AccountId { - &self.ctx.self_account - } - - fn caller(&self) -> &T::AccountId { - &self.caller - } - - fn balance(&self) -> BalanceOf { - self.ctx.overlay.get_balance(&self.ctx.self_account) - } - - fn value_transferred(&self) -> BalanceOf { - self.value_transferred - } - - fn random(&self, subject: &[u8]) -> SeedOf { - T::Randomness::random(subject) - } - - fn now(&self) -> &MomentOf { - &self.timestamp - } - - fn minimum_balance(&self) -> BalanceOf { - self.ctx.config.existential_deposit - } - - fn tombstone_deposit(&self) -> BalanceOf { - self.ctx.config.tombstone_deposit - } - - fn deposit_event(&mut self, topics: Vec, data: Vec) { - self.ctx.deferred.push(DeferredAction::DepositEvent { - topics, - event: RawEvent::ContractExecution(self.ctx.self_account.clone(), data), - }); - } - - fn set_rent_allowance(&mut self, rent_allowance: BalanceOf) { - self.ctx.overlay.set_rent_allowance(&self.ctx.self_account, rent_allowance) - } - - fn rent_allowance(&self) -> BalanceOf { - self.ctx.overlay.get_rent_allowance(&self.ctx.self_account) - .unwrap_or(>::max_value()) // Must never be triggered actually - } - - fn block_number(&self) -> T::BlockNumber { self.block_number } - - fn max_value_size(&self) -> u32 { - self.ctx.config.max_value_size - } - - fn get_runtime_storage(&self, key: &[u8]) -> Option> { - unhashed::get_raw(&key) - } + type T = T; + + fn get_storage(&self, key: &StorageKey) -> Option> { + self.ctx + .overlay + .get_storage(&self.ctx.self_account, self.ctx.self_trie_id.as_ref(), key) + } + + fn set_storage(&mut self, key: StorageKey, value: Option>) -> Result<(), &'static str> { + if let Some(ref value) = value { + if self.max_value_size() < value.len() as u32 { + return Err("value size exceeds maximum"); + } + } + + self.ctx + .overlay + .set_storage(&self.ctx.self_account, key, value); + Ok(()) + } + + fn instantiate( + &mut self, + code_hash: &CodeHash, + endowment: BalanceOf, + gas_meter: &mut GasMeter, + input_data: Vec, + ) -> Result<(AccountIdOf, ExecReturnValue), ExecError> { + self.ctx + .instantiate(endowment, gas_meter, code_hash, input_data) + } + + fn transfer( + &mut self, + to: &T::AccountId, + value: BalanceOf, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + self.ctx.transfer(to.clone(), value, gas_meter) + } + + fn terminate( + &mut self, + beneficiary: &AccountIdOf, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + self.ctx.terminate(beneficiary, gas_meter) + } + + fn call( + &mut self, + to: &T::AccountId, + value: BalanceOf, + gas_meter: &mut GasMeter, + input_data: Vec, + ) -> ExecResult { + self.ctx.call(to.clone(), value, gas_meter, input_data) + } + + fn note_dispatch_call(&mut self, call: CallOf) { + self.ctx.deferred.push(DeferredAction::DispatchRuntimeCall { + origin: self.ctx.self_account.clone(), + call, + }); + } + + fn note_restore_to( + &mut self, + dest: AccountIdOf, + code_hash: CodeHash, + rent_allowance: BalanceOf, + delta: Vec, + ) { + self.ctx.deferred.push(DeferredAction::RestoreTo { + donor: self.ctx.self_account.clone(), + dest, + code_hash, + rent_allowance, + delta, + }); + } + + fn address(&self) -> &T::AccountId { + &self.ctx.self_account + } + + fn caller(&self) -> &T::AccountId { + &self.caller + } + + fn balance(&self) -> BalanceOf { + self.ctx.overlay.get_balance(&self.ctx.self_account) + } + + fn value_transferred(&self) -> BalanceOf { + self.value_transferred + } + + fn random(&self, subject: &[u8]) -> SeedOf { + T::Randomness::random(subject) + } + + fn now(&self) -> &MomentOf { + &self.timestamp + } + + fn minimum_balance(&self) -> BalanceOf { + self.ctx.config.existential_deposit + } + + fn tombstone_deposit(&self) -> BalanceOf { + self.ctx.config.tombstone_deposit + } + + fn deposit_event(&mut self, topics: Vec, data: Vec) { + self.ctx.deferred.push(DeferredAction::DepositEvent { + topics, + event: RawEvent::ContractExecution(self.ctx.self_account.clone(), data), + }); + } + + fn set_rent_allowance(&mut self, rent_allowance: BalanceOf) { + self.ctx + .overlay + .set_rent_allowance(&self.ctx.self_account, rent_allowance) + } + + fn rent_allowance(&self) -> BalanceOf { + self.ctx + .overlay + .get_rent_allowance(&self.ctx.self_account) + .unwrap_or(>::max_value()) // Must never be triggered actually + } + + fn block_number(&self) -> T::BlockNumber { + self.block_number + } + + fn max_value_size(&self) -> u32 { + self.ctx.config.max_value_size + } + + fn get_runtime_storage(&self, key: &[u8]) -> Option> { + unhashed::get_raw(&key) + } } /// These tests exercise the executive layer. @@ -882,895 +898,938 @@ where /// - executive layer doesn't alter any storage! #[cfg(test)] mod tests { - use super::{ - BalanceOf, ExecFeeToken, ExecutionContext, Ext, Loader, TransferFeeKind, TransferFeeToken, - Vm, ExecResult, RawEvent, DeferredAction, - }; - use crate::{ - account_db::AccountDb, gas::GasMeter, tests::{ExtBuilder, Test}, - exec::{ExecReturnValue, ExecError, STATUS_SUCCESS}, CodeHash, Config, - }; - use std::{cell::RefCell, rc::Rc, collections::HashMap, marker::PhantomData}; - use assert_matches::assert_matches; - use sp_runtime::DispatchError; - - const ALICE: u64 = 1; - const BOB: u64 = 2; - const CHARLIE: u64 = 3; - - impl<'a, T, V, L> ExecutionContext<'a, T, V, L> - where T: crate::Trait - { - fn events(&self) -> Vec> { - self.deferred - .iter() - .filter(|action| match *action { - DeferredAction::DepositEvent { .. } => true, - _ => false, - }) - .cloned() - .collect() - } - } - - struct MockCtx<'a> { - ext: &'a mut dyn Ext, - input_data: Vec, - gas_meter: &'a mut GasMeter, - } - - #[derive(Clone)] - struct MockExecutable<'a>(Rc ExecResult + 'a>); - - impl<'a> MockExecutable<'a> { - fn new(f: impl Fn(MockCtx) -> ExecResult + 'a) -> Self { - MockExecutable(Rc::new(f)) - } - } - - struct MockLoader<'a> { - map: HashMap, MockExecutable<'a>>, - counter: u64, - } - - impl<'a> MockLoader<'a> { - fn empty() -> Self { - MockLoader { - map: HashMap::new(), - counter: 0, - } - } - - fn insert(&mut self, f: impl Fn(MockCtx) -> ExecResult + 'a) -> CodeHash { - // Generate code hashes as monotonically increasing values. - let code_hash = ::Hash::from_low_u64_be(self.counter); - - self.counter += 1; - self.map.insert(code_hash, MockExecutable::new(f)); - code_hash - } - } - - struct MockVm<'a> { - _marker: PhantomData<&'a ()>, - } - - impl<'a> MockVm<'a> { - fn new() -> Self { - MockVm { _marker: PhantomData } - } - } - - impl<'a> Loader for MockLoader<'a> { - type Executable = MockExecutable<'a>; - - fn load_init(&self, code_hash: &CodeHash) -> Result { - self.map - .get(code_hash) - .cloned() - .ok_or_else(|| "code not found") - } - fn load_main(&self, code_hash: &CodeHash) -> Result { - self.map - .get(code_hash) - .cloned() - .ok_or_else(|| "code not found") - } - } - - impl<'a> Vm for MockVm<'a> { - type Executable = MockExecutable<'a>; - - fn execute>( - &self, - exec: &MockExecutable, - mut ext: E, - input_data: Vec, - gas_meter: &mut GasMeter, - ) -> ExecResult { - (exec.0)(MockCtx { - ext: &mut ext, - input_data, - gas_meter, - }) - } - } - - fn exec_success() -> ExecResult { - Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }) - } - - #[test] - fn it_works() { - let value = Default::default(); - let mut gas_meter = GasMeter::::with_limit(10000, 1); - let data = vec![]; - - let vm = MockVm::new(); - - let test_data = Rc::new(RefCell::new(vec![0usize])); - - let mut loader = MockLoader::empty(); - let exec_ch = loader.insert(|_ctx| { - test_data.borrow_mut().push(1); - exec_success() - }); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, exec_ch).unwrap(); - - assert_matches!( - ctx.call(BOB, value, &mut gas_meter, data), - Ok(_) - ); - }); - - assert_eq!(&*test_data.borrow(), &vec![0, 1]); - } - - #[test] - fn base_fees() { - let origin = ALICE; - let dest = BOB; - - // This test verifies that base fee for call is taken. - ExtBuilder::default().build().execute_with(|| { - let vm = MockVm::new(); - let loader = MockLoader::empty(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.call(dest, 0, &mut gas_meter, vec![]); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!(toks, ExecFeeToken::Call,); - }); - - // This test verifies that base fee for instantiation is taken. - ExtBuilder::default().build().execute_with(|| { - let mut loader = MockLoader::empty(); - let code = loader.insert(|_| exec_success()); - - let vm = MockVm::new(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - - ctx.overlay.set_balance(&origin, 100); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.instantiate(1, &mut gas_meter, &code, vec![]); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!(toks, ExecFeeToken::Instantiate,); - }); - } - - #[test] - fn transfer_works() { - // This test verifies that a contract is able to transfer - // some funds to another account. - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - let loader = MockLoader::empty(); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); - - let output = ctx.call( - dest, - 55, - &mut GasMeter::::with_limit(1000, 1), - vec![], - ).unwrap(); - - assert!(output.is_success()); - assert_eq!(ctx.overlay.get_balance(&origin), 45); - assert_eq!(ctx.overlay.get_balance(&dest), 55); - }); - } - - #[test] - fn changes_are_reverted_on_failing_call() { - // This test verifies that a contract is able to transfer - // some funds to another account. - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( - |_| Ok(ExecReturnValue { status: 1, data: Vec::new() }) - ); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); - - let output = ctx.call( - dest, - 55, - &mut GasMeter::::with_limit(1000, 1), - vec![], - ).unwrap(); - - assert!(!output.is_success()); - assert_eq!(ctx.overlay.get_balance(&origin), 100); - assert_eq!(ctx.overlay.get_balance(&dest), 0); - }); - } - - #[test] - fn transfer_fees() { - let origin = ALICE; - let dest = BOB; - - // This test sends 50 units of currency to a non-existent account. - // This should lead to creation of a new account thus - // a fee should be charged. - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let vm = MockVm::new(); - let loader = MockLoader::empty(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 0); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.call(dest, 50, &mut gas_meter, vec![]); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!( - toks, - ExecFeeToken::Call, - TransferFeeToken { - kind: TransferFeeKind::Transfer, - gas_price: 1u64 - }, - ); - }); - - // This one is similar to the previous one but transfer to an existing account. - // In this test we expect that a regular transfer fee is charged. - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let vm = MockVm::new(); - let loader = MockLoader::empty(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 15); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.call(dest, 50, &mut gas_meter, vec![]); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!( - toks, - ExecFeeToken::Call, - TransferFeeToken { - kind: TransferFeeKind::Transfer, - gas_price: 1u64 - }, - ); - }); - - // This test sends 50 units of currency as an endowment to a newly - // instantiated contract. - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let mut loader = MockLoader::empty(); - let code = loader.insert(|_| exec_success()); - - let vm = MockVm::new(); - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - - ctx.overlay.set_balance(&origin, 100); - ctx.overlay.set_balance(&dest, 15); - - let mut gas_meter = GasMeter::::with_limit(1000, 1); - - let result = ctx.instantiate(50, &mut gas_meter, &code, vec![]); - assert_matches!(result, Ok(_)); - - let mut toks = gas_meter.tokens().iter(); - match_tokens!( - toks, - ExecFeeToken::Instantiate, - TransferFeeToken { - kind: TransferFeeKind::ContractInstantiate, - gas_price: 1u64 - }, - ); - }); - } - - #[test] - fn balance_too_low() { - // This test verifies that a contract can't send value if it's - // balance is too low. - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - let loader = MockLoader::empty(); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.set_balance(&origin, 0); - - let result = ctx.call( - dest, - 100, - &mut GasMeter::::with_limit(1000, 1), - vec![], - ); - - assert_matches!( - result, - Err(ExecError { - reason: DispatchError::Other("balance too low to send value"), - buffer: _, - }) - ); - assert_eq!(ctx.overlay.get_balance(&origin), 0); - assert_eq!(ctx.overlay.get_balance(&dest), 0); - }); - } - - #[test] - fn output_is_returned_on_success() { - // Verifies that if a contract returns data with a successful exit status, this data - // is returned from the execution context. - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( - |_| Ok(ExecReturnValue { status: STATUS_SUCCESS, data: vec![1, 2, 3, 4] }) - ); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); - - let result = ctx.call( - dest, - 0, - &mut GasMeter::::with_limit(1000, 1), - vec![], - ); - - let output = result.unwrap(); - assert!(output.is_success()); - assert_eq!(output.data, vec![1, 2, 3, 4]); - }); - } - - #[test] - fn output_is_returned_on_failure() { - // Verifies that if a contract returns data with a failing exit status, this data - // is returned from the execution context. - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let return_ch = loader.insert( - |_| Ok(ExecReturnValue { status: 1, data: vec![1, 2, 3, 4] }) - ); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); - - let result = ctx.call( - dest, - 0, - &mut GasMeter::::with_limit(1000, 1), - vec![], - ); - - let output = result.unwrap(); - assert!(!output.is_success()); - assert_eq!(output.data, vec![1, 2, 3, 4]); - }); - } - - #[test] - fn input_data_to_call() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let input_data_ch = loader.insert(|ctx| { - assert_eq!(ctx.input_data, &[1, 2, 3, 4]); - exec_success() - }); - - // This one tests passing the input data into a contract via call. - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, input_data_ch).unwrap(); - - let result = ctx.call( - BOB, - 0, - &mut GasMeter::::with_limit(10000, 1), - vec![1, 2, 3, 4], - ); - assert_matches!(result, Ok(_)); - }); - } - - #[test] - fn input_data_to_instantiate() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let input_data_ch = loader.insert(|ctx| { - assert_eq!(ctx.input_data, &[1, 2, 3, 4]); - exec_success() - }); - - // This one tests passing the input data into a contract via instantiate. - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - - ctx.overlay.set_balance(&ALICE, 100); - - let result = ctx.instantiate( - 1, - &mut GasMeter::::with_limit(10000, 1), - &input_data_ch, - vec![1, 2, 3, 4], - ); - assert_matches!(result, Ok(_)); - }); - } - - #[test] - fn max_depth() { - // This test verifies that when we reach the maximal depth creation of an - // yet another context fails. - let value = Default::default(); - let reached_bottom = RefCell::new(false); - - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let recurse_ch = loader.insert(|ctx| { - // Try to call into yourself. - let r = ctx.ext.call(&BOB, 0, ctx.gas_meter, vec![]); - - let mut reached_bottom = reached_bottom.borrow_mut(); - if !*reached_bottom { - // We are first time here, it means we just reached bottom. - // Verify that we've got proper error and set `reached_bottom`. - assert_matches!( - r, - Err(ExecError { - reason: DispatchError::Other("reached maximum depth, cannot make a call"), - buffer: _, - }) - ); - *reached_bottom = true; - } else { - // We just unwinding stack here. - assert_matches!(r, Ok(_)); - } - - exec_success() - }); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&BOB, 1); - ctx.overlay.instantiate_contract(&BOB, recurse_ch).unwrap(); - - let result = ctx.call( - BOB, - value, - &mut GasMeter::::with_limit(100000, 1), - vec![], - ); - - assert_matches!(result, Ok(_)); - }); - } - - #[test] - fn caller_returns_proper_values() { - let origin = ALICE; - let dest = BOB; - - let vm = MockVm::new(); - - let witnessed_caller_bob = RefCell::new(None::); - let witnessed_caller_charlie = RefCell::new(None::); - - let mut loader = MockLoader::empty(); - let bob_ch = loader.insert(|ctx| { - // Record the caller for bob. - *witnessed_caller_bob.borrow_mut() = Some(*ctx.ext.caller()); - - // Call into CHARLIE contract. - assert_matches!( - ctx.ext.call(&CHARLIE, 0, ctx.gas_meter, vec![]), - Ok(_) - ); - exec_success() - }); - let charlie_ch = loader.insert(|ctx| { - // Record the caller for charlie. - *witnessed_caller_charlie.borrow_mut() = Some(*ctx.ext.caller()); - exec_success() - }); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - - let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&dest, bob_ch).unwrap(); - ctx.overlay.instantiate_contract(&CHARLIE, charlie_ch).unwrap(); - - let result = ctx.call( - dest, - 0, - &mut GasMeter::::with_limit(10000, 1), - vec![], - ); - - assert_matches!(result, Ok(_)); - }); - - assert_eq!(&*witnessed_caller_bob.borrow(), &Some(origin)); - assert_eq!(&*witnessed_caller_charlie.borrow(), &Some(dest)); - } - - #[test] - fn address_returns_proper_values() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let bob_ch = loader.insert(|ctx| { - // Verify that address matches BOB. - assert_eq!(*ctx.ext.address(), BOB); - - // Call into charlie contract. - assert_matches!( - ctx.ext.call(&CHARLIE, 0, ctx.gas_meter, vec![]), - Ok(_) - ); - exec_success() - }); - let charlie_ch = loader.insert(|ctx| { - assert_eq!(*ctx.ext.address(), CHARLIE); - exec_success() - }); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.instantiate_contract(&BOB, bob_ch).unwrap(); - ctx.overlay.instantiate_contract(&CHARLIE, charlie_ch).unwrap(); - - let result = ctx.call( - BOB, - 0, - &mut GasMeter::::with_limit(10000, 1), - vec![], - ); - - assert_matches!(result, Ok(_)); - }); - } - - #[test] - fn refuse_instantiate_with_value_below_existential_deposit() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| exec_success()); - - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - - assert_matches!( - ctx.instantiate( - 0, // <- zero endowment - &mut GasMeter::::with_limit(10000, 1), - &dummy_ch, - vec![], - ), - Err(_) - ); - }); - } - - #[test] - fn instantiation_work_with_success_output() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( - |_| Ok(ExecReturnValue { status: STATUS_SUCCESS, data: vec![80, 65, 83, 83] }) - ); - - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - - let instantiated_contract_address = assert_matches!( - ctx.instantiate( - 100, - &mut GasMeter::::with_limit(10000, 1), - &dummy_ch, - vec![], - ), - Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address - ); - - // Check that the newly created account has the expected code hash and - // there are instantiation event. - assert_eq!(ctx.overlay.get_code_hash(&instantiated_contract_address).unwrap(), dummy_ch); - assert_eq!(&ctx.events(), &[ - DeferredAction::DepositEvent { - event: RawEvent::Transfer(ALICE, instantiated_contract_address, 100), - topics: Vec::new(), - }, - DeferredAction::DepositEvent { - event: RawEvent::Instantiated(ALICE, instantiated_contract_address), - topics: Vec::new(), - } - ]); - }); - } - - #[test] - fn instantiation_fails_with_failing_output() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( - |_| Ok(ExecReturnValue { status: 1, data: vec![70, 65, 73, 76] }) - ); - - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - - let instantiated_contract_address = assert_matches!( - ctx.instantiate( - 100, - &mut GasMeter::::with_limit(10000, 1), - &dummy_ch, - vec![], - ), - Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address - ); - - // Check that the account has not been created. - assert!(ctx.overlay.get_code_hash(&instantiated_contract_address).is_none()); - assert!(ctx.events().is_empty()); - }); - } - - #[test] - fn instantiation_from_contract() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert(|_| exec_success()); - let instantiated_contract_address = Rc::new(RefCell::new(None::)); - let instantiator_ch = loader.insert({ - let dummy_ch = dummy_ch.clone(); - let instantiated_contract_address = Rc::clone(&instantiated_contract_address); - move |ctx| { - // Instantiate a contract and save it's address in `instantiated_contract_address`. - let (address, output) = ctx.ext.instantiate( - &dummy_ch, - 15u64, - ctx.gas_meter, - vec![] - ).unwrap(); - - *instantiated_contract_address.borrow_mut() = address.into(); - Ok(output) - } - }); - - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - ctx.overlay.set_balance(&BOB, 100); - ctx.overlay.instantiate_contract(&BOB, instantiator_ch).unwrap(); - - assert_matches!( - ctx.call(BOB, 20, &mut GasMeter::::with_limit(1000, 1), vec![]), - Ok(_) - ); - - let instantiated_contract_address = instantiated_contract_address.borrow().as_ref().unwrap().clone(); - - // Check that the newly created account has the expected code hash and - // there are instantiation event. - assert_eq!(ctx.overlay.get_code_hash(&instantiated_contract_address).unwrap(), dummy_ch); - assert_eq!(&ctx.events(), &[ - DeferredAction::DepositEvent { - event: RawEvent::Transfer(ALICE, BOB, 20), - topics: Vec::new(), - }, - DeferredAction::DepositEvent { - event: RawEvent::Transfer(BOB, instantiated_contract_address, 15), - topics: Vec::new(), - }, - DeferredAction::DepositEvent { - event: RawEvent::Instantiated(BOB, instantiated_contract_address), - topics: Vec::new(), - }, - ]); - }); - } - - #[test] - fn instantiation_traps() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - let dummy_ch = loader.insert( - |_| Err(ExecError { reason: "It's a trap!".into(), buffer: Vec::new() }) - ); - let instantiator_ch = loader.insert({ - let dummy_ch = dummy_ch.clone(); - move |ctx| { - // Instantiate a contract and save it's address in `instantiated_contract_address`. - assert_matches!( - ctx.ext.instantiate( - &dummy_ch, - 15u64, - ctx.gas_meter, - vec![] - ), - Err(ExecError { reason: DispatchError::Other("It's a trap!"), buffer: _ }) - ); - - exec_success() - } - }); - - ExtBuilder::default().existential_deposit(15).build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - ctx.overlay.set_balance(&BOB, 100); - ctx.overlay.instantiate_contract(&BOB, instantiator_ch).unwrap(); - - assert_matches!( - ctx.call(BOB, 20, &mut GasMeter::::with_limit(1000, 1), vec![]), - Ok(_) - ); - - // The contract wasn't instantiated so we don't expect to see an instantiation - // event here. - assert_eq!(&ctx.events(), &[ - DeferredAction::DepositEvent { - event: RawEvent::Transfer(ALICE, BOB, 20), - topics: Vec::new(), - }, - ]); - }); - } - - #[test] - fn termination_from_instantiate_fails() { - let vm = MockVm::new(); - - let mut loader = MockLoader::empty(); - - let terminate_ch = loader.insert(|mut ctx| { - ctx.ext.terminate(&ALICE, &mut ctx.gas_meter).unwrap(); - exec_success() - }); - - ExtBuilder::default() - .existential_deposit(15) - .build() - .execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - ctx.overlay.set_balance(&ALICE, 1000); - - assert_matches!( - ctx.instantiate( - 100, - &mut GasMeter::::with_limit(10000, 1), - &terminate_ch, - vec![], - ), - Err(ExecError { - reason: DispatchError::Other("insufficient remaining balance"), - buffer - }) if buffer == Vec::::new() - ); - - assert_eq!( - &ctx.events(), - &[] - ); - }); - } - - #[test] - fn rent_allowance() { - let vm = MockVm::new(); - let mut loader = MockLoader::empty(); - let rent_allowance_ch = loader.insert(|ctx| { - assert_eq!(ctx.ext.rent_allowance(), >::max_value()); - ctx.ext.set_rent_allowance(10); - assert_eq!(ctx.ext.rent_allowance(), 10); - exec_success() - }); - - ExtBuilder::default().build().execute_with(|| { - let cfg = Config::preload(); - let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); - - ctx.overlay.set_balance(&ALICE, 100); - - let result = ctx.instantiate( - 1, - &mut GasMeter::::with_limit(10000, 1), - &rent_allowance_ch, - vec![], - ); - assert_matches!(result, Ok(_)); - }); - } + use super::{ + BalanceOf, DeferredAction, ExecFeeToken, ExecResult, ExecutionContext, Ext, Loader, + RawEvent, TransferFeeKind, TransferFeeToken, Vm, + }; + use crate::{ + account_db::AccountDb, + exec::{ExecError, ExecReturnValue, STATUS_SUCCESS}, + gas::GasMeter, + tests::{ExtBuilder, Test}, + CodeHash, Config, + }; + use assert_matches::assert_matches; + use sp_runtime::DispatchError; + use std::{cell::RefCell, collections::HashMap, marker::PhantomData, rc::Rc}; + + const ALICE: u64 = 1; + const BOB: u64 = 2; + const CHARLIE: u64 = 3; + + impl<'a, T, V, L> ExecutionContext<'a, T, V, L> + where + T: crate::Trait, + { + fn events(&self) -> Vec> { + self.deferred + .iter() + .filter(|action| match *action { + DeferredAction::DepositEvent { .. } => true, + _ => false, + }) + .cloned() + .collect() + } + } + + struct MockCtx<'a> { + ext: &'a mut dyn Ext, + input_data: Vec, + gas_meter: &'a mut GasMeter, + } + + #[derive(Clone)] + struct MockExecutable<'a>(Rc ExecResult + 'a>); + + impl<'a> MockExecutable<'a> { + fn new(f: impl Fn(MockCtx) -> ExecResult + 'a) -> Self { + MockExecutable(Rc::new(f)) + } + } + + struct MockLoader<'a> { + map: HashMap, MockExecutable<'a>>, + counter: u64, + } + + impl<'a> MockLoader<'a> { + fn empty() -> Self { + MockLoader { + map: HashMap::new(), + counter: 0, + } + } + + fn insert(&mut self, f: impl Fn(MockCtx) -> ExecResult + 'a) -> CodeHash { + // Generate code hashes as monotonically increasing values. + let code_hash = ::Hash::from_low_u64_be(self.counter); + + self.counter += 1; + self.map.insert(code_hash, MockExecutable::new(f)); + code_hash + } + } + + struct MockVm<'a> { + _marker: PhantomData<&'a ()>, + } + + impl<'a> MockVm<'a> { + fn new() -> Self { + MockVm { + _marker: PhantomData, + } + } + } + + impl<'a> Loader for MockLoader<'a> { + type Executable = MockExecutable<'a>; + + fn load_init(&self, code_hash: &CodeHash) -> Result { + self.map + .get(code_hash) + .cloned() + .ok_or_else(|| "code not found") + } + fn load_main(&self, code_hash: &CodeHash) -> Result { + self.map + .get(code_hash) + .cloned() + .ok_or_else(|| "code not found") + } + } + + impl<'a> Vm for MockVm<'a> { + type Executable = MockExecutable<'a>; + + fn execute>( + &self, + exec: &MockExecutable, + mut ext: E, + input_data: Vec, + gas_meter: &mut GasMeter, + ) -> ExecResult { + (exec.0)(MockCtx { + ext: &mut ext, + input_data, + gas_meter, + }) + } + } + + fn exec_success() -> ExecResult { + Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: Vec::new(), + }) + } + + #[test] + fn it_works() { + let value = Default::default(); + let mut gas_meter = GasMeter::::with_limit(10000, 1); + let data = vec![]; + + let vm = MockVm::new(); + + let test_data = Rc::new(RefCell::new(vec![0usize])); + + let mut loader = MockLoader::empty(); + let exec_ch = loader.insert(|_ctx| { + test_data.borrow_mut().push(1); + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.instantiate_contract(&BOB, exec_ch).unwrap(); + + assert_matches!(ctx.call(BOB, value, &mut gas_meter, data), Ok(_)); + }); + + assert_eq!(&*test_data.borrow(), &vec![0, 1]); + } + + #[test] + fn base_fees() { + let origin = ALICE; + let dest = BOB; + + // This test verifies that base fee for call is taken. + ExtBuilder::default().build().execute_with(|| { + let vm = MockVm::new(); + let loader = MockLoader::empty(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 0); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.call(dest, 0, &mut gas_meter, vec![]); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!(toks, ExecFeeToken::Call,); + }); + + // This test verifies that base fee for instantiation is taken. + ExtBuilder::default().build().execute_with(|| { + let mut loader = MockLoader::empty(); + let code = loader.insert(|_| exec_success()); + + let vm = MockVm::new(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + + ctx.overlay.set_balance(&origin, 100); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.instantiate(1, &mut gas_meter, &code, vec![]); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!(toks, ExecFeeToken::Instantiate,); + }); + } + + #[test] + fn transfer_works() { + // This test verifies that a contract is able to transfer + // some funds to another account. + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + let loader = MockLoader::empty(); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 0); + + let output = ctx + .call(dest, 55, &mut GasMeter::::with_limit(1000, 1), vec![]) + .unwrap(); + + assert!(output.is_success()); + assert_eq!(ctx.overlay.get_balance(&origin), 45); + assert_eq!(ctx.overlay.get_balance(&dest), 55); + }); + } + + #[test] + fn changes_are_reverted_on_failing_call() { + // This test verifies that a contract is able to transfer + // some funds to another account. + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let return_ch = loader.insert(|_| { + Ok(ExecReturnValue { + status: 1, + data: Vec::new(), + }) + }); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 0); + + let output = ctx + .call(dest, 55, &mut GasMeter::::with_limit(1000, 1), vec![]) + .unwrap(); + + assert!(!output.is_success()); + assert_eq!(ctx.overlay.get_balance(&origin), 100); + assert_eq!(ctx.overlay.get_balance(&dest), 0); + }); + } + + #[test] + fn transfer_fees() { + let origin = ALICE; + let dest = BOB; + + // This test sends 50 units of currency to a non-existent account. + // This should lead to creation of a new account thus + // a fee should be charged. + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let vm = MockVm::new(); + let loader = MockLoader::empty(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 0); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.call(dest, 50, &mut gas_meter, vec![]); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!( + toks, + ExecFeeToken::Call, + TransferFeeToken { + kind: TransferFeeKind::Transfer, + gas_price: 1u64 + }, + ); + }); + + // This one is similar to the previous one but transfer to an existing account. + // In this test we expect that a regular transfer fee is charged. + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let vm = MockVm::new(); + let loader = MockLoader::empty(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 15); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.call(dest, 50, &mut gas_meter, vec![]); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!( + toks, + ExecFeeToken::Call, + TransferFeeToken { + kind: TransferFeeKind::Transfer, + gas_price: 1u64 + }, + ); + }); + + // This test sends 50 units of currency as an endowment to a newly + // instantiated contract. + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let mut loader = MockLoader::empty(); + let code = loader.insert(|_| exec_success()); + + let vm = MockVm::new(); + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + + ctx.overlay.set_balance(&origin, 100); + ctx.overlay.set_balance(&dest, 15); + + let mut gas_meter = GasMeter::::with_limit(1000, 1); + + let result = ctx.instantiate(50, &mut gas_meter, &code, vec![]); + assert_matches!(result, Ok(_)); + + let mut toks = gas_meter.tokens().iter(); + match_tokens!( + toks, + ExecFeeToken::Instantiate, + TransferFeeToken { + kind: TransferFeeKind::ContractInstantiate, + gas_price: 1u64 + }, + ); + }); + } + + #[test] + fn balance_too_low() { + // This test verifies that a contract can't send value if it's + // balance is too low. + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + let loader = MockLoader::empty(); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.set_balance(&origin, 0); + + let result = ctx.call( + dest, + 100, + &mut GasMeter::::with_limit(1000, 1), + vec![], + ); + + assert_matches!( + result, + Err(ExecError { + reason: DispatchError::Other("balance too low to send value"), + buffer: _, + }) + ); + assert_eq!(ctx.overlay.get_balance(&origin), 0); + assert_eq!(ctx.overlay.get_balance(&dest), 0); + }); + } + + #[test] + fn output_is_returned_on_success() { + // Verifies that if a contract returns data with a successful exit status, this data + // is returned from the execution context. + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let return_ch = loader.insert(|_| { + Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: vec![1, 2, 3, 4], + }) + }); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); + + let result = ctx.call(dest, 0, &mut GasMeter::::with_limit(1000, 1), vec![]); + + let output = result.unwrap(); + assert!(output.is_success()); + assert_eq!(output.data, vec![1, 2, 3, 4]); + }); + } + + #[test] + fn output_is_returned_on_failure() { + // Verifies that if a contract returns data with a failing exit status, this data + // is returned from the execution context. + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let return_ch = loader.insert(|_| { + Ok(ExecReturnValue { + status: 1, + data: vec![1, 2, 3, 4], + }) + }); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.instantiate_contract(&BOB, return_ch).unwrap(); + + let result = ctx.call(dest, 0, &mut GasMeter::::with_limit(1000, 1), vec![]); + + let output = result.unwrap(); + assert!(!output.is_success()); + assert_eq!(output.data, vec![1, 2, 3, 4]); + }); + } + + #[test] + fn input_data_to_call() { + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let input_data_ch = loader.insert(|ctx| { + assert_eq!(ctx.input_data, &[1, 2, 3, 4]); + exec_success() + }); + + // This one tests passing the input data into a contract via call. + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay + .instantiate_contract(&BOB, input_data_ch) + .unwrap(); + + let result = ctx.call( + BOB, + 0, + &mut GasMeter::::with_limit(10000, 1), + vec![1, 2, 3, 4], + ); + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn input_data_to_instantiate() { + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let input_data_ch = loader.insert(|ctx| { + assert_eq!(ctx.input_data, &[1, 2, 3, 4]); + exec_success() + }); + + // This one tests passing the input data into a contract via instantiate. + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + + ctx.overlay.set_balance(&ALICE, 100); + + let result = ctx.instantiate( + 1, + &mut GasMeter::::with_limit(10000, 1), + &input_data_ch, + vec![1, 2, 3, 4], + ); + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn max_depth() { + // This test verifies that when we reach the maximal depth creation of an + // yet another context fails. + let value = Default::default(); + let reached_bottom = RefCell::new(false); + + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let recurse_ch = loader.insert(|ctx| { + // Try to call into yourself. + let r = ctx.ext.call(&BOB, 0, ctx.gas_meter, vec![]); + + let mut reached_bottom = reached_bottom.borrow_mut(); + if !*reached_bottom { + // We are first time here, it means we just reached bottom. + // Verify that we've got proper error and set `reached_bottom`. + assert_matches!( + r, + Err(ExecError { + reason: DispatchError::Other("reached maximum depth, cannot make a call"), + buffer: _, + }) + ); + *reached_bottom = true; + } else { + // We just unwinding stack here. + assert_matches!(r, Ok(_)); + } + + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&BOB, 1); + ctx.overlay.instantiate_contract(&BOB, recurse_ch).unwrap(); + + let result = ctx.call( + BOB, + value, + &mut GasMeter::::with_limit(100000, 1), + vec![], + ); + + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn caller_returns_proper_values() { + let origin = ALICE; + let dest = BOB; + + let vm = MockVm::new(); + + let witnessed_caller_bob = RefCell::new(None::); + let witnessed_caller_charlie = RefCell::new(None::); + + let mut loader = MockLoader::empty(); + let bob_ch = loader.insert(|ctx| { + // Record the caller for bob. + *witnessed_caller_bob.borrow_mut() = Some(*ctx.ext.caller()); + + // Call into CHARLIE contract. + assert_matches!(ctx.ext.call(&CHARLIE, 0, ctx.gas_meter, vec![]), Ok(_)); + exec_success() + }); + let charlie_ch = loader.insert(|ctx| { + // Record the caller for charlie. + *witnessed_caller_charlie.borrow_mut() = Some(*ctx.ext.caller()); + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + + let mut ctx = ExecutionContext::top_level(origin, &cfg, &vm, &loader); + ctx.overlay.instantiate_contract(&dest, bob_ch).unwrap(); + ctx.overlay + .instantiate_contract(&CHARLIE, charlie_ch) + .unwrap(); + + let result = ctx.call(dest, 0, &mut GasMeter::::with_limit(10000, 1), vec![]); + + assert_matches!(result, Ok(_)); + }); + + assert_eq!(&*witnessed_caller_bob.borrow(), &Some(origin)); + assert_eq!(&*witnessed_caller_charlie.borrow(), &Some(dest)); + } + + #[test] + fn address_returns_proper_values() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let bob_ch = loader.insert(|ctx| { + // Verify that address matches BOB. + assert_eq!(*ctx.ext.address(), BOB); + + // Call into charlie contract. + assert_matches!(ctx.ext.call(&CHARLIE, 0, ctx.gas_meter, vec![]), Ok(_)); + exec_success() + }); + let charlie_ch = loader.insert(|ctx| { + assert_eq!(*ctx.ext.address(), CHARLIE); + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.instantiate_contract(&BOB, bob_ch).unwrap(); + ctx.overlay + .instantiate_contract(&CHARLIE, charlie_ch) + .unwrap(); + + let result = ctx.call(BOB, 0, &mut GasMeter::::with_limit(10000, 1), vec![]); + + assert_matches!(result, Ok(_)); + }); + } + + #[test] + fn refuse_instantiate_with_value_below_existential_deposit() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| exec_success()); + + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + + assert_matches!( + ctx.instantiate( + 0, // <- zero endowment + &mut GasMeter::::with_limit(10000, 1), + &dummy_ch, + vec![], + ), + Err(_) + ); + }); + } + + #[test] + fn instantiation_work_with_success_output() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| { + Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: vec![80, 65, 83, 83], + }) + }); + + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + + let instantiated_contract_address = assert_matches!( + ctx.instantiate( + 100, + &mut GasMeter::::with_limit(10000, 1), + &dummy_ch, + vec![], + ), + Ok((address, ref output)) if output.data == vec![80, 65, 83, 83] => address + ); + + // Check that the newly created account has the expected code hash and + // there are instantiation event. + assert_eq!( + ctx.overlay + .get_code_hash(&instantiated_contract_address) + .unwrap(), + dummy_ch + ); + assert_eq!( + &ctx.events(), + &[ + DeferredAction::DepositEvent { + event: RawEvent::Transfer(ALICE, instantiated_contract_address, 100), + topics: Vec::new(), + }, + DeferredAction::DepositEvent { + event: RawEvent::Instantiated(ALICE, instantiated_contract_address), + topics: Vec::new(), + } + ] + ); + }); + } + + #[test] + fn instantiation_fails_with_failing_output() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| { + Ok(ExecReturnValue { + status: 1, + data: vec![70, 65, 73, 76], + }) + }); + + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + + let instantiated_contract_address = assert_matches!( + ctx.instantiate( + 100, + &mut GasMeter::::with_limit(10000, 1), + &dummy_ch, + vec![], + ), + Ok((address, ref output)) if output.data == vec![70, 65, 73, 76] => address + ); + + // Check that the account has not been created. + assert!(ctx + .overlay + .get_code_hash(&instantiated_contract_address) + .is_none()); + assert!(ctx.events().is_empty()); + }); + } + + #[test] + fn instantiation_from_contract() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| exec_success()); + let instantiated_contract_address = Rc::new(RefCell::new(None::)); + let instantiator_ch = loader.insert({ + let dummy_ch = dummy_ch.clone(); + let instantiated_contract_address = Rc::clone(&instantiated_contract_address); + move |ctx| { + // Instantiate a contract and save it's address in `instantiated_contract_address`. + let (address, output) = ctx + .ext + .instantiate(&dummy_ch, 15u64, ctx.gas_meter, vec![]) + .unwrap(); + + *instantiated_contract_address.borrow_mut() = address.into(); + Ok(output) + } + }); + + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + ctx.overlay.set_balance(&BOB, 100); + ctx.overlay + .instantiate_contract(&BOB, instantiator_ch) + .unwrap(); + + assert_matches!( + ctx.call(BOB, 20, &mut GasMeter::::with_limit(1000, 1), vec![]), + Ok(_) + ); + + let instantiated_contract_address = instantiated_contract_address + .borrow() + .as_ref() + .unwrap() + .clone(); + + // Check that the newly created account has the expected code hash and + // there are instantiation event. + assert_eq!( + ctx.overlay + .get_code_hash(&instantiated_contract_address) + .unwrap(), + dummy_ch + ); + assert_eq!( + &ctx.events(), + &[ + DeferredAction::DepositEvent { + event: RawEvent::Transfer(ALICE, BOB, 20), + topics: Vec::new(), + }, + DeferredAction::DepositEvent { + event: RawEvent::Transfer(BOB, instantiated_contract_address, 15), + topics: Vec::new(), + }, + DeferredAction::DepositEvent { + event: RawEvent::Instantiated(BOB, instantiated_contract_address), + topics: Vec::new(), + }, + ] + ); + }); + } + + #[test] + fn instantiation_traps() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + let dummy_ch = loader.insert(|_| { + Err(ExecError { + reason: "It's a trap!".into(), + buffer: Vec::new(), + }) + }); + let instantiator_ch = loader.insert({ + let dummy_ch = dummy_ch.clone(); + move |ctx| { + // Instantiate a contract and save it's address in `instantiated_contract_address`. + assert_matches!( + ctx.ext.instantiate(&dummy_ch, 15u64, ctx.gas_meter, vec![]), + Err(ExecError { + reason: DispatchError::Other("It's a trap!"), + buffer: _, + }) + ); + + exec_success() + } + }); + + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + ctx.overlay.set_balance(&BOB, 100); + ctx.overlay + .instantiate_contract(&BOB, instantiator_ch) + .unwrap(); + + assert_matches!( + ctx.call(BOB, 20, &mut GasMeter::::with_limit(1000, 1), vec![]), + Ok(_) + ); + + // The contract wasn't instantiated so we don't expect to see an instantiation + // event here. + assert_eq!( + &ctx.events(), + &[DeferredAction::DepositEvent { + event: RawEvent::Transfer(ALICE, BOB, 20), + topics: Vec::new(), + },] + ); + }); + } + + #[test] + fn termination_from_instantiate_fails() { + let vm = MockVm::new(); + + let mut loader = MockLoader::empty(); + + let terminate_ch = loader.insert(|mut ctx| { + ctx.ext.terminate(&ALICE, &mut ctx.gas_meter).unwrap(); + exec_success() + }); + + ExtBuilder::default() + .existential_deposit(15) + .build() + .execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + ctx.overlay.set_balance(&ALICE, 1000); + + assert_matches!( + ctx.instantiate( + 100, + &mut GasMeter::::with_limit(10000, 1), + &terminate_ch, + vec![], + ), + Err(ExecError { + reason: DispatchError::Other("insufficient remaining balance"), + buffer + }) if buffer == Vec::::new() + ); + + assert_eq!(&ctx.events(), &[]); + }); + } + + #[test] + fn rent_allowance() { + let vm = MockVm::new(); + let mut loader = MockLoader::empty(); + let rent_allowance_ch = loader.insert(|ctx| { + assert_eq!(ctx.ext.rent_allowance(), >::max_value()); + ctx.ext.set_rent_allowance(10); + assert_eq!(ctx.ext.rent_allowance(), 10); + exec_success() + }); + + ExtBuilder::default().build().execute_with(|| { + let cfg = Config::preload(); + let mut ctx = ExecutionContext::top_level(ALICE, &cfg, &vm, &loader); + + ctx.overlay.set_balance(&ALICE, 100); + + let result = ctx.instantiate( + 1, + &mut GasMeter::::with_limit(10000, 1), + &rent_allowance_ch, + vec![], + ); + assert_matches!(result, Ok(_)); + }); + } } diff --git a/frame/contracts/src/gas.rs b/frame/contracts/src/gas.rs index 362f15f3aa..afabe454cf 100644 --- a/frame/contracts/src/gas.rs +++ b/frame/contracts/src/gas.rs @@ -14,15 +14,16 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use crate::{GasSpent, Module, Trait, BalanceOf, NegativeImbalanceOf}; -use sp_std::convert::TryFrom; -use sp_runtime::traits::{ - CheckedMul, Zero, SaturatedConversion, AtLeast32Bit, UniqueSaturatedInto, -}; +use crate::{BalanceOf, GasSpent, Module, NegativeImbalanceOf, Trait}; use frame_support::{ - traits::{Currency, ExistenceRequirement, Imbalance, OnUnbalanced, WithdrawReason}, StorageValue, - dispatch::DispatchError, + dispatch::DispatchError, + traits::{Currency, ExistenceRequirement, Imbalance, OnUnbalanced, WithdrawReason}, + StorageValue, }; +use sp_runtime::traits::{ + AtLeast32Bit, CheckedMul, SaturatedConversion, UniqueSaturatedInto, Zero, +}; +use sp_std::convert::TryFrom; #[cfg(test)] use std::{any::Any, fmt::Debug}; @@ -34,17 +35,17 @@ pub type Gas = u64; #[must_use] #[derive(Debug, PartialEq, Eq)] pub enum GasMeterResult { - Proceed, - OutOfGas, + Proceed, + OutOfGas, } impl GasMeterResult { - pub fn is_out_of_gas(&self) -> bool { - match *self { - GasMeterResult::OutOfGas => true, - GasMeterResult::Proceed => false, - } - } + pub fn is_out_of_gas(&self) -> bool { + match *self { + GasMeterResult::OutOfGas => true, + GasMeterResult::Proceed => false, + } + } } #[cfg(not(test))] @@ -64,135 +65,135 @@ impl TestAuxiliaries for T {} /// for consistency). If inlined there should be no observable difference compared /// to a hand-written code. pub trait Token: Copy + Clone + TestAuxiliaries { - /// Metadata type, which the token can require for calculating the amount - /// of gas to charge. Can be a some configuration type or - /// just the `()`. - type Metadata; - - /// Calculate amount of gas that should be taken by this token. - /// - /// This function should be really lightweight and must not fail. It is not - /// expected that implementors will query the storage or do any kinds of heavy operations. - /// - /// That said, implementors of this function still can run into overflows - /// while calculating the amount. In this case it is ok to use saturating operations - /// since on overflow they will return `max_value` which should consume all gas. - fn calculate_amount(&self, metadata: &Self::Metadata) -> Gas; + /// Metadata type, which the token can require for calculating the amount + /// of gas to charge. Can be a some configuration type or + /// just the `()`. + type Metadata; + + /// Calculate amount of gas that should be taken by this token. + /// + /// This function should be really lightweight and must not fail. It is not + /// expected that implementors will query the storage or do any kinds of heavy operations. + /// + /// That said, implementors of this function still can run into overflows + /// while calculating the amount. In this case it is ok to use saturating operations + /// since on overflow they will return `max_value` which should consume all gas. + fn calculate_amount(&self, metadata: &Self::Metadata) -> Gas; } /// A wrapper around a type-erased trait object of what used to be a `Token`. #[cfg(test)] pub struct ErasedToken { - pub description: String, - pub token: Box, + pub description: String, + pub token: Box, } pub struct GasMeter { - limit: Gas, - /// Amount of gas left from initial gas limit. Can reach zero. - gas_left: Gas, - gas_price: BalanceOf, + limit: Gas, + /// Amount of gas left from initial gas limit. Can reach zero. + gas_left: Gas, + gas_price: BalanceOf, - #[cfg(test)] - tokens: Vec, + #[cfg(test)] + tokens: Vec, } impl GasMeter { - pub fn with_limit(gas_limit: Gas, gas_price: BalanceOf) -> GasMeter { - GasMeter { - limit: gas_limit, - gas_left: gas_limit, - gas_price, - #[cfg(test)] - tokens: Vec::new(), - } - } - - /// Account for used gas. - /// - /// Amount is calculated by the given `token`. - /// - /// Returns `OutOfGas` if there is not enough gas or addition of the specified - /// amount of gas has lead to overflow. On success returns `Proceed`. - /// - /// NOTE that amount is always consumed, i.e. if there is not enough gas - /// then the counter will be set to zero. - #[inline] - pub fn charge>( - &mut self, - metadata: &Tok::Metadata, - token: Tok, - ) -> GasMeterResult { - #[cfg(test)] - { - // Unconditionally add the token to the storage. - let erased_tok = ErasedToken { - description: format!("{:?}", token), - token: Box::new(token), - }; - self.tokens.push(erased_tok); - } - - let amount = token.calculate_amount(metadata); - let new_value = match self.gas_left.checked_sub(amount) { - None => None, - Some(val) => Some(val), - }; - - // We always consume the gas even if there is not enough gas. - self.gas_left = new_value.unwrap_or_else(Zero::zero); - - match new_value { - Some(_) => GasMeterResult::Proceed, - None => GasMeterResult::OutOfGas, - } - } - - /// Allocate some amount of gas and perform some work with - /// a newly created nested gas meter. - /// - /// Invokes `f` with either the gas meter that has `amount` gas left or - /// with `None`, if this gas meter has not enough gas to allocate given `amount`. - /// - /// All unused gas in the nested gas meter is returned to this gas meter. - pub fn with_nested>) -> R>( - &mut self, - amount: Gas, - f: F, - ) -> R { - // NOTE that it is ok to allocate all available gas since it still ensured - // by `charge` that it doesn't reach zero. - if self.gas_left < amount { - f(None) - } else { - self.gas_left = self.gas_left - amount; - let mut nested = GasMeter::with_limit(amount, self.gas_price); - - let r = f(Some(&mut nested)); - - self.gas_left = self.gas_left + nested.gas_left; - - r - } - } - - pub fn gas_price(&self) -> BalanceOf { - self.gas_price - } - - /// Returns how much gas left from the initial budget. - pub fn gas_left(&self) -> Gas { - self.gas_left - } - - /// Returns how much gas was spent. - fn spent(&self) -> Gas { - self.limit - self.gas_left - } - - #[cfg(test)] - pub fn tokens(&self) -> &[ErasedToken] { - &self.tokens - } + pub fn with_limit(gas_limit: Gas, gas_price: BalanceOf) -> GasMeter { + GasMeter { + limit: gas_limit, + gas_left: gas_limit, + gas_price, + #[cfg(test)] + tokens: Vec::new(), + } + } + + /// Account for used gas. + /// + /// Amount is calculated by the given `token`. + /// + /// Returns `OutOfGas` if there is not enough gas or addition of the specified + /// amount of gas has lead to overflow. On success returns `Proceed`. + /// + /// NOTE that amount is always consumed, i.e. if there is not enough gas + /// then the counter will be set to zero. + #[inline] + pub fn charge>( + &mut self, + metadata: &Tok::Metadata, + token: Tok, + ) -> GasMeterResult { + #[cfg(test)] + { + // Unconditionally add the token to the storage. + let erased_tok = ErasedToken { + description: format!("{:?}", token), + token: Box::new(token), + }; + self.tokens.push(erased_tok); + } + + let amount = token.calculate_amount(metadata); + let new_value = match self.gas_left.checked_sub(amount) { + None => None, + Some(val) => Some(val), + }; + + // We always consume the gas even if there is not enough gas. + self.gas_left = new_value.unwrap_or_else(Zero::zero); + + match new_value { + Some(_) => GasMeterResult::Proceed, + None => GasMeterResult::OutOfGas, + } + } + + /// Allocate some amount of gas and perform some work with + /// a newly created nested gas meter. + /// + /// Invokes `f` with either the gas meter that has `amount` gas left or + /// with `None`, if this gas meter has not enough gas to allocate given `amount`. + /// + /// All unused gas in the nested gas meter is returned to this gas meter. + pub fn with_nested>) -> R>( + &mut self, + amount: Gas, + f: F, + ) -> R { + // NOTE that it is ok to allocate all available gas since it still ensured + // by `charge` that it doesn't reach zero. + if self.gas_left < amount { + f(None) + } else { + self.gas_left = self.gas_left - amount; + let mut nested = GasMeter::with_limit(amount, self.gas_price); + + let r = f(Some(&mut nested)); + + self.gas_left = self.gas_left + nested.gas_left; + + r + } + } + + pub fn gas_price(&self) -> BalanceOf { + self.gas_price + } + + /// Returns how much gas left from the initial budget. + pub fn gas_left(&self) -> Gas { + self.gas_left + } + + /// Returns how much gas was spent. + fn spent(&self) -> Gas { + self.limit - self.gas_left + } + + #[cfg(test)] + pub fn tokens(&self) -> &[ErasedToken] { + &self.tokens + } } /// Buy the given amount of gas. @@ -200,61 +201,63 @@ impl GasMeter { /// Cost is calculated by multiplying the gas cost (taken from the storage) by the `gas_limit`. /// The funds are deducted from `transactor`. pub fn buy_gas( - transactor: &T::AccountId, - gas_limit: Gas, + transactor: &T::AccountId, + gas_limit: Gas, ) -> Result<(GasMeter, NegativeImbalanceOf), DispatchError> { - // Buy the specified amount of gas. - let gas_price = >::gas_price(); - let cost = if gas_price.is_zero() { - >::zero() - } else { - as TryFrom>::try_from(gas_limit).ok() - .and_then(|gas_limit| gas_price.checked_mul(&gas_limit)) - .ok_or("overflow multiplying gas limit by price")? - }; - - let imbalance = T::Currency::withdraw( - transactor, - cost, - WithdrawReason::Fee.into(), - ExistenceRequirement::KeepAlive - )?; - - Ok((GasMeter::with_limit(gas_limit, gas_price), imbalance)) + // Buy the specified amount of gas. + let gas_price = >::gas_price(); + let cost = if gas_price.is_zero() { + >::zero() + } else { + as TryFrom>::try_from(gas_limit) + .ok() + .and_then(|gas_limit| gas_price.checked_mul(&gas_limit)) + .ok_or("overflow multiplying gas limit by price")? + }; + + let imbalance = T::Currency::withdraw( + transactor, + cost, + WithdrawReason::Fee.into(), + ExistenceRequirement::KeepAlive, + )?; + + Ok((GasMeter::with_limit(gas_limit, gas_price), imbalance)) } /// Refund the unused gas. pub fn refund_unused_gas( - transactor: &T::AccountId, - gas_meter: GasMeter, - imbalance: NegativeImbalanceOf, + transactor: &T::AccountId, + gas_meter: GasMeter, + imbalance: NegativeImbalanceOf, ) { - let gas_spent = gas_meter.spent(); - let gas_left = gas_meter.gas_left(); - - // Increase total spent gas. - // This cannot overflow, since `gas_spent` is never greater than `block_gas_limit`, which - // also has Gas type. - GasSpent::mutate(|block_gas_spent| *block_gas_spent += gas_spent); - - // Refund gas left by the price it was bought at. - let refund = gas_meter.gas_price * gas_left.unique_saturated_into(); - let refund_imbalance = T::Currency::deposit_creating(transactor, refund); - if let Ok(imbalance) = imbalance.offset(refund_imbalance) { - T::GasPayment::on_unbalanced(imbalance); - } + let gas_spent = gas_meter.spent(); + let gas_left = gas_meter.gas_left(); + + // Increase total spent gas. + // This cannot overflow, since `gas_spent` is never greater than `block_gas_limit`, which + // also has Gas type. + GasSpent::mutate(|block_gas_spent| *block_gas_spent += gas_spent); + + // Refund gas left by the price it was bought at. + let refund = gas_meter.gas_price * gas_left.unique_saturated_into(); + let refund_imbalance = T::Currency::deposit_creating(transactor, refund); + if let Ok(imbalance) = imbalance.offset(refund_imbalance) { + T::GasPayment::on_unbalanced(imbalance); + } } /// A little handy utility for converting a value in balance units into approximate value in gas units /// at the given gas price. pub fn approx_gas_for_balance(gas_price: Balance, balance: Balance) -> Gas - where Balance: AtLeast32Bit +where + Balance: AtLeast32Bit, { - if gas_price.is_zero() { - Zero::zero() - } else { - (balance / gas_price).saturated_into::() - } + if gas_price.is_zero() { + Zero::zero() + } else { + (balance / gas_price).saturated_into::() + } } /// A simple utility macro that helps to match against a @@ -297,111 +300,117 @@ macro_rules! match_tokens { #[cfg(test)] mod tests { - use super::{GasMeter, Token}; - use crate::{tests::Test, gas::approx_gas_for_balance}; - - /// A trivial token that charges the specified number of gas units. - #[derive(Copy, Clone, PartialEq, Eq, Debug)] - struct SimpleToken(u64); - impl Token for SimpleToken { - type Metadata = (); - fn calculate_amount(&self, _metadata: &()) -> u64 { self.0 } - } - - struct MultiplierTokenMetadata { - multiplier: u64, - } - /// A simple token that charges for the given amount multiplied to - /// a multiplier taken from a given metadata. - #[derive(Copy, Clone, PartialEq, Eq, Debug)] - struct MultiplierToken(u64); - - impl Token for MultiplierToken { - type Metadata = MultiplierTokenMetadata; - fn calculate_amount(&self, metadata: &MultiplierTokenMetadata) -> u64 { - // Probably you want to use saturating mul in production code. - self.0 * metadata.multiplier - } - } - - #[test] - fn it_works() { - let gas_meter = GasMeter::::with_limit(50000, 10); - assert_eq!(gas_meter.gas_left(), 50000); - } - - #[test] - fn simple() { - let mut gas_meter = GasMeter::::with_limit(50000, 10); - - let result = gas_meter - .charge(&MultiplierTokenMetadata { multiplier: 3 }, MultiplierToken(10)); - assert!(!result.is_out_of_gas()); - - assert_eq!(gas_meter.gas_left(), 49_970); - assert_eq!(gas_meter.spent(), 30); - assert_eq!(gas_meter.gas_price(), 10); - } - - #[test] - fn tracing() { - let mut gas_meter = GasMeter::::with_limit(50000, 10); - assert!(!gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); - assert!(!gas_meter - .charge(&MultiplierTokenMetadata { multiplier: 3 }, MultiplierToken(10)) - .is_out_of_gas()); - - let mut tokens = gas_meter.tokens()[0..2].iter(); - match_tokens!(tokens, SimpleToken(1), MultiplierToken(10),); - } - - // This test makes sure that nothing can be executed if there is no gas. - #[test] - fn refuse_to_execute_anything_if_zero() { - let mut gas_meter = GasMeter::::with_limit(0, 10); - assert!(gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); - } - - // Make sure that if the gas meter is charged by exceeding amount then not only an error - // returned for that charge, but also for all consequent charges. - // - // This is not strictly necessary, because the execution should be interrupted immediately - // if the gas meter runs out of gas. However, this is just a nice property to have. - #[test] - fn overcharge_is_unrecoverable() { - let mut gas_meter = GasMeter::::with_limit(200, 10); - - // The first charge is should lead to OOG. - assert!(gas_meter.charge(&(), SimpleToken(300)).is_out_of_gas()); - - // The gas meter is emptied at this moment, so this should also fail. - assert!(gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); - } - - - // Charging the exact amount that the user paid for should be - // possible. - #[test] - fn charge_exact_amount() { - let mut gas_meter = GasMeter::::with_limit(25, 10); - assert!(!gas_meter.charge(&(), SimpleToken(25)).is_out_of_gas()); - } - - // A unit test for `fn approx_gas_for_balance()`, and makes - // sure setting gas_price 0 does not cause `div by zero` error. - #[test] - fn approx_gas_for_balance_works() { - let tests = vec![ - (approx_gas_for_balance(0_u64, 123), 0), - (approx_gas_for_balance(0_u64, 456), 0), - (approx_gas_for_balance(1_u64, 123), 123), - (approx_gas_for_balance(1_u64, 456), 456), - (approx_gas_for_balance(100_u64, 900), 9), - (approx_gas_for_balance(123_u64, 900), 7), - ]; - - for (lhs, rhs) in tests { - assert_eq!(lhs, rhs); - } - } + use super::{GasMeter, Token}; + use crate::{gas::approx_gas_for_balance, tests::Test}; + + /// A trivial token that charges the specified number of gas units. + #[derive(Copy, Clone, PartialEq, Eq, Debug)] + struct SimpleToken(u64); + impl Token for SimpleToken { + type Metadata = (); + fn calculate_amount(&self, _metadata: &()) -> u64 { + self.0 + } + } + + struct MultiplierTokenMetadata { + multiplier: u64, + } + /// A simple token that charges for the given amount multiplied to + /// a multiplier taken from a given metadata. + #[derive(Copy, Clone, PartialEq, Eq, Debug)] + struct MultiplierToken(u64); + + impl Token for MultiplierToken { + type Metadata = MultiplierTokenMetadata; + fn calculate_amount(&self, metadata: &MultiplierTokenMetadata) -> u64 { + // Probably you want to use saturating mul in production code. + self.0 * metadata.multiplier + } + } + + #[test] + fn it_works() { + let gas_meter = GasMeter::::with_limit(50000, 10); + assert_eq!(gas_meter.gas_left(), 50000); + } + + #[test] + fn simple() { + let mut gas_meter = GasMeter::::with_limit(50000, 10); + + let result = gas_meter.charge( + &MultiplierTokenMetadata { multiplier: 3 }, + MultiplierToken(10), + ); + assert!(!result.is_out_of_gas()); + + assert_eq!(gas_meter.gas_left(), 49_970); + assert_eq!(gas_meter.spent(), 30); + assert_eq!(gas_meter.gas_price(), 10); + } + + #[test] + fn tracing() { + let mut gas_meter = GasMeter::::with_limit(50000, 10); + assert!(!gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); + assert!(!gas_meter + .charge( + &MultiplierTokenMetadata { multiplier: 3 }, + MultiplierToken(10) + ) + .is_out_of_gas()); + + let mut tokens = gas_meter.tokens()[0..2].iter(); + match_tokens!(tokens, SimpleToken(1), MultiplierToken(10),); + } + + // This test makes sure that nothing can be executed if there is no gas. + #[test] + fn refuse_to_execute_anything_if_zero() { + let mut gas_meter = GasMeter::::with_limit(0, 10); + assert!(gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); + } + + // Make sure that if the gas meter is charged by exceeding amount then not only an error + // returned for that charge, but also for all consequent charges. + // + // This is not strictly necessary, because the execution should be interrupted immediately + // if the gas meter runs out of gas. However, this is just a nice property to have. + #[test] + fn overcharge_is_unrecoverable() { + let mut gas_meter = GasMeter::::with_limit(200, 10); + + // The first charge is should lead to OOG. + assert!(gas_meter.charge(&(), SimpleToken(300)).is_out_of_gas()); + + // The gas meter is emptied at this moment, so this should also fail. + assert!(gas_meter.charge(&(), SimpleToken(1)).is_out_of_gas()); + } + + // Charging the exact amount that the user paid for should be + // possible. + #[test] + fn charge_exact_amount() { + let mut gas_meter = GasMeter::::with_limit(25, 10); + assert!(!gas_meter.charge(&(), SimpleToken(25)).is_out_of_gas()); + } + + // A unit test for `fn approx_gas_for_balance()`, and makes + // sure setting gas_price 0 does not cause `div by zero` error. + #[test] + fn approx_gas_for_balance_works() { + let tests = vec![ + (approx_gas_for_balance(0_u64, 123), 0), + (approx_gas_for_balance(0_u64, 456), 0), + (approx_gas_for_balance(1_u64, 123), 123), + (approx_gas_for_balance(1_u64, 456), 456), + (approx_gas_for_balance(100_u64, 900), 9), + (approx_gas_for_balance(123_u64, 900), 7), + ]; + + for (lhs, rhs) in tests { + assert_eq!(lhs, rhs); + } + } } diff --git a/frame/contracts/src/lib.rs b/frame/contracts/src/lib.rs index 91f06d5607..92927e8721 100644 --- a/frame/contracts/src/lib.rs +++ b/frame/contracts/src/lib.rs @@ -93,188 +93,194 @@ mod gas; mod account_db; mod exec; -mod wasm; mod rent; +mod wasm; #[cfg(test)] mod tests; -use crate::exec::ExecutionContext; use crate::account_db::{AccountDb, DirectAccountDb}; +use crate::exec::ExecutionContext; use crate::wasm::{WasmLoader, WasmVm}; +pub use crate::exec::{ExecError, ExecResult, ExecReturnValue, StatusCode}; pub use crate::gas::{Gas, GasMeter}; -pub use crate::exec::{ExecResult, ExecReturnValue, ExecError, StatusCode}; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use sp_core::crypto::UncheckedFrom; -use sp_std::{prelude::*, marker::PhantomData, fmt::Debug}; -use codec::{Codec, Encode, Decode}; -use sp_io::hashing::blake2_256; -use sp_runtime::{ - traits::{ - Hash, StaticLookup, Zero, MaybeSerializeDeserialize, Member, SignedExtension, - DispatchInfoOf, - }, - transaction_validity::{ - ValidTransaction, InvalidTransaction, TransactionValidity, TransactionValidityError, - }, - RuntimeDebug, -}; +use codec::{Codec, Decode, Encode}; use frame_support::dispatch::{DispatchResult, Dispatchable}; +use frame_support::traits::{Currency, Get, OnUnbalanced, Randomness, Time}; use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, storage::child, - parameter_types, IsSubType, + decl_error, decl_event, decl_module, decl_storage, parameter_types, storage::child, IsSubType, + Parameter, }; -use frame_support::traits::{OnUnbalanced, Currency, Get, Time, Randomness}; -use frame_system::{self as system, ensure_signed, RawOrigin, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed, RawOrigin}; +use pallet_contracts_primitives::{ContractAccessError, RentProjection}; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use sp_core::crypto::UncheckedFrom; use sp_core::storage::well_known_keys::CHILD_STORAGE_KEY_PREFIX; -use pallet_contracts_primitives::{RentProjection, ContractAccessError}; +use sp_io::hashing::blake2_256; +use sp_runtime::{ + traits::{ + DispatchInfoOf, Hash, MaybeSerializeDeserialize, Member, SignedExtension, StaticLookup, + Zero, + }, + transaction_validity::{ + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + }, + RuntimeDebug, +}; +use sp_std::{fmt::Debug, marker::PhantomData, prelude::*}; pub type CodeHash = ::Hash; pub type TrieId = Vec; /// A function that generates an `AccountId` for a contract upon instantiation. pub trait ContractAddressFor { - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &AccountId) -> AccountId; + fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &AccountId) -> AccountId; } /// A function that returns the fee for dispatching a `Call`. pub trait ComputeDispatchFee { - fn compute_dispatch_fee(call: &Call) -> Balance; + fn compute_dispatch_fee(call: &Call) -> Balance; } /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account #[derive(Encode, Decode, RuntimeDebug)] pub enum ContractInfo { - Alive(AliveContractInfo), - Tombstone(TombstoneContractInfo), + Alive(AliveContractInfo), + Tombstone(TombstoneContractInfo), } impl ContractInfo { - /// If contract is alive then return some alive info - pub fn get_alive(self) -> Option> { - if let ContractInfo::Alive(alive) = self { - Some(alive) - } else { - None - } - } - /// If contract is alive then return some reference to alive info - pub fn as_alive(&self) -> Option<&AliveContractInfo> { - if let ContractInfo::Alive(ref alive) = self { - Some(alive) - } else { - None - } - } - /// If contract is alive then return some mutable reference to alive info - pub fn as_alive_mut(&mut self) -> Option<&mut AliveContractInfo> { - if let ContractInfo::Alive(ref mut alive) = self { - Some(alive) - } else { - None - } - } - - /// If contract is tombstone then return some tombstone info - pub fn get_tombstone(self) -> Option> { - if let ContractInfo::Tombstone(tombstone) = self { - Some(tombstone) - } else { - None - } - } - /// If contract is tombstone then return some reference to tombstone info - pub fn as_tombstone(&self) -> Option<&TombstoneContractInfo> { - if let ContractInfo::Tombstone(ref tombstone) = self { - Some(tombstone) - } else { - None - } - } - /// If contract is tombstone then return some mutable reference to tombstone info - pub fn as_tombstone_mut(&mut self) -> Option<&mut TombstoneContractInfo> { - if let ContractInfo::Tombstone(ref mut tombstone) = self { - Some(tombstone) - } else { - None - } - } + /// If contract is alive then return some alive info + pub fn get_alive(self) -> Option> { + if let ContractInfo::Alive(alive) = self { + Some(alive) + } else { + None + } + } + /// If contract is alive then return some reference to alive info + pub fn as_alive(&self) -> Option<&AliveContractInfo> { + if let ContractInfo::Alive(ref alive) = self { + Some(alive) + } else { + None + } + } + /// If contract is alive then return some mutable reference to alive info + pub fn as_alive_mut(&mut self) -> Option<&mut AliveContractInfo> { + if let ContractInfo::Alive(ref mut alive) = self { + Some(alive) + } else { + None + } + } + + /// If contract is tombstone then return some tombstone info + pub fn get_tombstone(self) -> Option> { + if let ContractInfo::Tombstone(tombstone) = self { + Some(tombstone) + } else { + None + } + } + /// If contract is tombstone then return some reference to tombstone info + pub fn as_tombstone(&self) -> Option<&TombstoneContractInfo> { + if let ContractInfo::Tombstone(ref tombstone) = self { + Some(tombstone) + } else { + None + } + } + /// If contract is tombstone then return some mutable reference to tombstone info + pub fn as_tombstone_mut(&mut self) -> Option<&mut TombstoneContractInfo> { + if let ContractInfo::Tombstone(ref mut tombstone) = self { + Some(tombstone) + } else { + None + } + } } pub type AliveContractInfo = - RawAliveContractInfo, BalanceOf, ::BlockNumber>; + RawAliveContractInfo, BalanceOf, ::BlockNumber>; /// Information for managing an account and its sub trie abstraction. /// This is the required info to cache for an account. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct RawAliveContractInfo { - /// Unique ID for the subtree encoded as a bytes vector. - pub trie_id: TrieId, - /// The size of stored value in octet. - pub storage_size: u32, - /// The code associated with a given account. - pub code_hash: CodeHash, - /// Pay rent at most up to this value. - pub rent_allowance: Balance, - /// Last block rent has been payed. - pub deduct_block: BlockNumber, - /// Last block child storage has been written. - pub last_write: Option, + /// Unique ID for the subtree encoded as a bytes vector. + pub trie_id: TrieId, + /// The size of stored value in octet. + pub storage_size: u32, + /// The code associated with a given account. + pub code_hash: CodeHash, + /// Pay rent at most up to this value. + pub rent_allowance: Balance, + /// Last block rent has been payed. + pub deduct_block: BlockNumber, + /// Last block child storage has been written. + pub last_write: Option, } impl RawAliveContractInfo { - /// Associated child trie unique id is built from the hash part of the trie id. - pub fn child_trie_unique_id(&self) -> child::ChildInfo { - trie_unique_id(&self.trie_id[..]) - } + /// Associated child trie unique id is built from the hash part of the trie id. + pub fn child_trie_unique_id(&self) -> child::ChildInfo { + trie_unique_id(&self.trie_id[..]) + } } /// Associated child trie unique id is built from the hash part of the trie id. pub(crate) fn trie_unique_id(trie_id: &[u8]) -> child::ChildInfo { - let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); - child::ChildInfo::new_default(&trie_id[start ..]) + let start = CHILD_STORAGE_KEY_PREFIX.len() + b"default:".len(); + child::ChildInfo::new_default(&trie_id[start..]) } pub type TombstoneContractInfo = - RawTombstoneContractInfo<::Hash, ::Hashing>; + RawTombstoneContractInfo<::Hash, ::Hashing>; #[derive(Encode, Decode, PartialEq, Eq, RuntimeDebug)] pub struct RawTombstoneContractInfo(H, PhantomData); impl RawTombstoneContractInfo where - H: Member + MaybeSerializeDeserialize+ Debug - + AsRef<[u8]> + AsMut<[u8]> + Copy + Default - + sp_std::hash::Hash + Codec, - Hasher: Hash, + H: Member + + MaybeSerializeDeserialize + + Debug + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Default + + sp_std::hash::Hash + + Codec, + Hasher: Hash, { - fn new(storage_root: &[u8], code_hash: H) -> Self { - let mut buf = Vec::new(); - storage_root.using_encoded(|encoded| buf.extend_from_slice(encoded)); - buf.extend_from_slice(code_hash.as_ref()); - RawTombstoneContractInfo(::hash(&buf[..]), PhantomData) - } + fn new(storage_root: &[u8], code_hash: H) -> Self { + let mut buf = Vec::new(); + storage_root.using_encoded(|encoded| buf.extend_from_slice(encoded)); + buf.extend_from_slice(code_hash.as_ref()); + RawTombstoneContractInfo(::hash(&buf[..]), PhantomData) + } } /// Get a trie id (trie id must be unique and collision resistant depending upon its context). /// Note that it is different than encode because trie id should be collision resistant /// (being a proper unique identifier). pub trait TrieIdGenerator { - /// Get a trie id for an account, using reference to parent account trie id to ensure - /// uniqueness of trie id. - /// - /// The implementation must ensure every new trie id is unique: two consecutive calls with the - /// same parameter needs to return different trie id values. - /// - /// Also, the implementation is responsible for ensuring that `TrieId` starts with - /// `:child_storage:`. - /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 - fn trie_id(account_id: &AccountId) -> TrieId; + /// Get a trie id for an account, using reference to parent account trie id to ensure + /// uniqueness of trie id. + /// + /// The implementation must ensure every new trie id is unique: two consecutive calls with the + /// same parameter needs to return different trie id values. + /// + /// Also, the implementation is responsible for ensuring that `TrieId` starts with + /// `:child_storage:`. + /// TODO: We want to change this, see https://github.com/paritytech/substrate/issues/2325 + fn trie_id(account_id: &AccountId) -> TrieId; } /// Get trie id from `account_id`. @@ -284,149 +290,153 @@ pub struct TrieIdFromParentCounter(PhantomData); /// accountid_counter`. impl TrieIdGenerator for TrieIdFromParentCounter where - T::AccountId: AsRef<[u8]> + T::AccountId: AsRef<[u8]>, { - fn trie_id(account_id: &T::AccountId) -> TrieId { - // Note that skipping a value due to error is not an issue here. - // We only need uniqueness, not sequence. - let new_seed = AccountCounter::mutate(|v| { - *v = v.wrapping_add(1); - *v - }); - - let mut buf = Vec::new(); - buf.extend_from_slice(account_id.as_ref()); - buf.extend_from_slice(&new_seed.to_le_bytes()[..]); - - // TODO: see https://github.com/paritytech/substrate/issues/2325 - CHILD_STORAGE_KEY_PREFIX.iter() - .chain(b"default:") - .chain(T::Hashing::hash(&buf[..]).as_ref().iter()) - .cloned() - .collect() - } + fn trie_id(account_id: &T::AccountId) -> TrieId { + // Note that skipping a value due to error is not an issue here. + // We only need uniqueness, not sequence. + let new_seed = AccountCounter::mutate(|v| { + *v = v.wrapping_add(1); + *v + }); + + let mut buf = Vec::new(); + buf.extend_from_slice(account_id.as_ref()); + buf.extend_from_slice(&new_seed.to_le_bytes()[..]); + + // TODO: see https://github.com/paritytech/substrate/issues/2325 + CHILD_STORAGE_KEY_PREFIX + .iter() + .chain(b"default:") + .chain(T::Hashing::hash(&buf[..]).as_ref().iter()) + .cloned() + .collect() + } } -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; pub type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; parameter_types! { - /// A reasonable default value for [`Trait::SignedClaimedHandicap`]. - pub const DefaultSignedClaimHandicap: u32 = 2; - /// A reasonable default value for [`Trait::TombstoneDeposit`]. - pub const DefaultTombstoneDeposit: u32 = 16; - /// A reasonable default value for [`Trait::StorageSizeOffset`]. - pub const DefaultStorageSizeOffset: u32 = 8; - /// A reasonable default value for [`Trait::RentByteFee`]. - pub const DefaultRentByteFee: u32 = 4; - /// A reasonable default value for [`Trait::RentDepositOffset`]. - pub const DefaultRentDepositOffset: u32 = 1000; - /// A reasonable default value for [`Trait::SurchargeReward`]. - pub const DefaultSurchargeReward: u32 = 150; - /// A reasonable default value for [`Trait::TransferFee`]. - pub const DefaultTransferFee: u32 = 0; - /// A reasonable default value for [`Trait::InstantiationFee`]. - pub const DefaultInstantiationFee: u32 = 0; - /// A reasonable default value for [`Trait::TransactionBaseFee`]. - pub const DefaultTransactionBaseFee: u32 = 0; - /// A reasonable default value for [`Trait::TransactionByteFee`]. - pub const DefaultTransactionByteFee: u32 = 0; - /// A reasonable default value for [`Trait::ContractFee`]. - pub const DefaultContractFee: u32 = 21; - /// A reasonable default value for [`Trait::CallBaseFee`]. - pub const DefaultCallBaseFee: u32 = 1000; - /// A reasonable default value for [`Trait::InstantiateBaseFee`]. - pub const DefaultInstantiateBaseFee: u32 = 1000; - /// A reasonable default value for [`Trait::MaxDepth`]. - pub const DefaultMaxDepth: u32 = 32; - /// A reasonable default value for [`Trait::MaxValueSize`]. - pub const DefaultMaxValueSize: u32 = 16_384; - /// A reasonable default value for [`Trait::BlockGasLimit`]. - pub const DefaultBlockGasLimit: u32 = 10_000_000; + /// A reasonable default value for [`Trait::SignedClaimedHandicap`]. + pub const DefaultSignedClaimHandicap: u32 = 2; + /// A reasonable default value for [`Trait::TombstoneDeposit`]. + pub const DefaultTombstoneDeposit: u32 = 16; + /// A reasonable default value for [`Trait::StorageSizeOffset`]. + pub const DefaultStorageSizeOffset: u32 = 8; + /// A reasonable default value for [`Trait::RentByteFee`]. + pub const DefaultRentByteFee: u32 = 4; + /// A reasonable default value for [`Trait::RentDepositOffset`]. + pub const DefaultRentDepositOffset: u32 = 1000; + /// A reasonable default value for [`Trait::SurchargeReward`]. + pub const DefaultSurchargeReward: u32 = 150; + /// A reasonable default value for [`Trait::TransferFee`]. + pub const DefaultTransferFee: u32 = 0; + /// A reasonable default value for [`Trait::InstantiationFee`]. + pub const DefaultInstantiationFee: u32 = 0; + /// A reasonable default value for [`Trait::TransactionBaseFee`]. + pub const DefaultTransactionBaseFee: u32 = 0; + /// A reasonable default value for [`Trait::TransactionByteFee`]. + pub const DefaultTransactionByteFee: u32 = 0; + /// A reasonable default value for [`Trait::ContractFee`]. + pub const DefaultContractFee: u32 = 21; + /// A reasonable default value for [`Trait::CallBaseFee`]. + pub const DefaultCallBaseFee: u32 = 1000; + /// A reasonable default value for [`Trait::InstantiateBaseFee`]. + pub const DefaultInstantiateBaseFee: u32 = 1000; + /// A reasonable default value for [`Trait::MaxDepth`]. + pub const DefaultMaxDepth: u32 = 32; + /// A reasonable default value for [`Trait::MaxValueSize`]. + pub const DefaultMaxValueSize: u32 = 16_384; + /// A reasonable default value for [`Trait::BlockGasLimit`]. + pub const DefaultBlockGasLimit: u32 = 10_000_000; } pub trait Trait: frame_system::Trait { - type Currency: Currency; - type Time: Time; - type Randomness: Randomness; + type Currency: Currency; + type Time: Time; + type Randomness: Randomness; - /// The outer call dispatch type. - type Call: Parameter + Dispatchable::Origin> + IsSubType, Self>; + /// The outer call dispatch type. + type Call: Parameter + + Dispatchable::Origin> + + IsSubType, Self>; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// A function type to get the contract address given the instantiator. - type DetermineContractAddress: ContractAddressFor, Self::AccountId>; + /// A function type to get the contract address given the instantiator. + type DetermineContractAddress: ContractAddressFor, Self::AccountId>; - /// A function type that computes the fee for dispatching the given `Call`. - /// - /// It is recommended (though not required) for this function to return a fee that would be - /// taken by the Executive module for regular dispatch. - type ComputeDispatchFee: ComputeDispatchFee<::Call, BalanceOf>; + /// A function type that computes the fee for dispatching the given `Call`. + /// + /// It is recommended (though not required) for this function to return a fee that would be + /// taken by the Executive module for regular dispatch. + type ComputeDispatchFee: ComputeDispatchFee<::Call, BalanceOf>; - /// trie id generator - type TrieIdGenerator: TrieIdGenerator; + /// trie id generator + type TrieIdGenerator: TrieIdGenerator; - /// Handler for the unbalanced reduction when making a gas payment. - type GasPayment: OnUnbalanced>; + /// Handler for the unbalanced reduction when making a gas payment. + type GasPayment: OnUnbalanced>; - /// Handler for rent payments. - type RentPayment: OnUnbalanced>; + /// Handler for rent payments. + type RentPayment: OnUnbalanced>; - /// Number of block delay an extrinsic claim surcharge has. - /// - /// When claim surcharge is called by an extrinsic the rent is checked - /// for current_block - delay - type SignedClaimHandicap: Get; + /// Number of block delay an extrinsic claim surcharge has. + /// + /// When claim surcharge is called by an extrinsic the rent is checked + /// for current_block - delay + type SignedClaimHandicap: Get; - /// The minimum amount required to generate a tombstone. - type TombstoneDeposit: Get>; + /// The minimum amount required to generate a tombstone. + type TombstoneDeposit: Get>; - /// Size of a contract at the time of instantiation. This is a simple way to ensure - /// that empty contracts eventually gets deleted. - type StorageSizeOffset: Get; + /// Size of a contract at the time of instantiation. This is a simple way to ensure + /// that empty contracts eventually gets deleted. + type StorageSizeOffset: Get; - /// Price of a byte of storage per one block interval. Should be greater than 0. - type RentByteFee: Get>; + /// Price of a byte of storage per one block interval. Should be greater than 0. + type RentByteFee: Get>; - /// The amount of funds a contract should deposit in order to offset - /// the cost of one byte. - /// - /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, - /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. - /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, - /// then it would pay 500 BU/day. - type RentDepositOffset: Get>; + /// The amount of funds a contract should deposit in order to offset + /// the cost of one byte. + /// + /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, + /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. + /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, + /// then it would pay 500 BU/day. + type RentDepositOffset: Get>; - /// Reward that is received by the party whose touch has led - /// to removal of a contract. - type SurchargeReward: Get>; + /// Reward that is received by the party whose touch has led + /// to removal of a contract. + type SurchargeReward: Get>; - /// The fee to be paid for making a transaction; the base. - type TransactionBaseFee: Get>; + /// The fee to be paid for making a transaction; the base. + type TransactionBaseFee: Get>; - /// The fee to be paid for making a transaction; the per-byte portion. - type TransactionByteFee: Get>; + /// The fee to be paid for making a transaction; the per-byte portion. + type TransactionByteFee: Get>; - /// The fee required to instantiate a contract instance. - type ContractFee: Get>; + /// The fee required to instantiate a contract instance. + type ContractFee: Get>; - /// The base fee charged for calling into a contract. - type CallBaseFee: Get; + /// The base fee charged for calling into a contract. + type CallBaseFee: Get; - /// The base fee charged for instantiating a contract. - type InstantiateBaseFee: Get; + /// The base fee charged for instantiating a contract. + type InstantiateBaseFee: Get; - /// The maximum nesting level of a call/instantiate stack. - type MaxDepth: Get; + /// The maximum nesting level of a call/instantiate stack. + type MaxDepth: Get; - /// The maximum size of a storage value in bytes. - type MaxValueSize: Get; + /// The maximum size of a storage value in bytes. + type MaxValueSize: Get; - /// The maximum amount of gas that could be expended per block. - type BlockGasLimit: Get; + /// The maximum amount of gas that could be expended per block. + type BlockGasLimit: Get; } /// Simple contract address determiner. @@ -438,515 +448,530 @@ pub trait Trait: frame_system::Trait { pub struct SimpleAddressDeterminer(PhantomData); impl ContractAddressFor, T::AccountId> for SimpleAddressDeterminer where - T::AccountId: UncheckedFrom + AsRef<[u8]> + T::AccountId: UncheckedFrom + AsRef<[u8]>, { - fn contract_address_for(code_hash: &CodeHash, data: &[u8], origin: &T::AccountId) -> T::AccountId { - let data_hash = T::Hashing::hash(data); - - let mut buf = Vec::new(); - buf.extend_from_slice(code_hash.as_ref()); - buf.extend_from_slice(data_hash.as_ref()); - buf.extend_from_slice(origin.as_ref()); - - UncheckedFrom::unchecked_from(T::Hashing::hash(&buf[..])) - } + fn contract_address_for( + code_hash: &CodeHash, + data: &[u8], + origin: &T::AccountId, + ) -> T::AccountId { + let data_hash = T::Hashing::hash(data); + + let mut buf = Vec::new(); + buf.extend_from_slice(code_hash.as_ref()); + buf.extend_from_slice(data_hash.as_ref()); + buf.extend_from_slice(origin.as_ref()); + + UncheckedFrom::unchecked_from(T::Hashing::hash(&buf[..])) + } } /// The default dispatch fee computor computes the fee in the same way that /// the implementation of `ChargeTransactionPayment` for the Balances module does. Note that this only takes a fixed /// fee based on size. Unlike the balances module, weight-fee is applied. pub struct DefaultDispatchFeeComputor(PhantomData); -impl ComputeDispatchFee<::Call, BalanceOf> for DefaultDispatchFeeComputor { - fn compute_dispatch_fee(call: &::Call) -> BalanceOf { - let encoded_len = call.using_encoded(|encoded| encoded.len() as u32); - let base_fee = T::TransactionBaseFee::get(); - let byte_fee = T::TransactionByteFee::get(); - base_fee + byte_fee * encoded_len.into() - } +impl ComputeDispatchFee<::Call, BalanceOf> + for DefaultDispatchFeeComputor +{ + fn compute_dispatch_fee(call: &::Call) -> BalanceOf { + let encoded_len = call.using_encoded(|encoded| encoded.len() as u32); + let base_fee = T::TransactionBaseFee::get(); + let byte_fee = T::TransactionByteFee::get(); + base_fee + byte_fee * encoded_len.into() + } } decl_error! { - /// Error for the contracts module. - pub enum Error for Module { - /// A new schedule must have a greater version than the current one. - InvalidScheduleVersion, - /// An origin must be signed or inherent and auxiliary sender only provided on inherent. - InvalidSurchargeClaim, - /// Cannot restore from nonexisting or tombstone contract. - InvalidSourceContract, - /// Cannot restore to nonexisting or alive contract. - InvalidDestinationContract, - /// Tombstones don't match. - InvalidTombstone, - /// An origin TrieId written in the current block. - InvalidContractOrigin - } + /// Error for the contracts module. + pub enum Error for Module { + /// A new schedule must have a greater version than the current one. + InvalidScheduleVersion, + /// An origin must be signed or inherent and auxiliary sender only provided on inherent. + InvalidSurchargeClaim, + /// Cannot restore from nonexisting or tombstone contract. + InvalidSourceContract, + /// Cannot restore to nonexisting or alive contract. + InvalidDestinationContract, + /// Tombstones don't match. + InvalidTombstone, + /// An origin TrieId written in the current block. + InvalidContractOrigin + } } decl_module! { - /// Contracts module. - pub struct Module for enum Call where origin: ::Origin { - type Error = Error; - - /// Number of block delay an extrinsic claim surcharge has. - /// - /// When claim surcharge is called by an extrinsic the rent is checked - /// for current_block - delay - const SignedClaimHandicap: T::BlockNumber = T::SignedClaimHandicap::get(); - - /// The minimum amount required to generate a tombstone. - const TombstoneDeposit: BalanceOf = T::TombstoneDeposit::get(); - - /// Size of a contract at the time of instantiation. This is a simple way to ensure that - /// empty contracts eventually gets deleted. - const StorageSizeOffset: u32 = T::StorageSizeOffset::get(); - - /// Price of a byte of storage per one block interval. Should be greater than 0. - const RentByteFee: BalanceOf = T::RentByteFee::get(); - - /// The amount of funds a contract should deposit in order to offset - /// the cost of one byte. - /// - /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, - /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. - /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, - /// then it would pay 500 BU/day. - const RentDepositOffset: BalanceOf = T::RentDepositOffset::get(); - - /// Reward that is received by the party whose touch has led - /// to removal of a contract. - const SurchargeReward: BalanceOf = T::SurchargeReward::get(); - - /// The fee to be paid for making a transaction; the base. - const TransactionBaseFee: BalanceOf = T::TransactionBaseFee::get(); - - /// The fee to be paid for making a transaction; the per-byte portion. - const TransactionByteFee: BalanceOf = T::TransactionByteFee::get(); - - /// The fee required to instantiate a contract instance. A reasonable default value - /// is 21. - const ContractFee: BalanceOf = T::ContractFee::get(); - - /// The base fee charged for calling into a contract. A reasonable default - /// value is 135. - const CallBaseFee: Gas = T::CallBaseFee::get(); - - /// The base fee charged for instantiating a contract. A reasonable default value - /// is 175. - const InstantiateBaseFee: Gas = T::InstantiateBaseFee::get(); - - /// The maximum nesting level of a call/instantiate stack. A reasonable default - /// value is 100. - const MaxDepth: u32 = T::MaxDepth::get(); - - /// The maximum size of a storage value in bytes. A reasonable default is 16 KiB. - const MaxValueSize: u32 = T::MaxValueSize::get(); - - /// The maximum amount of gas that could be expended per block. A reasonable - /// default value is 10_000_000. - const BlockGasLimit: Gas = T::BlockGasLimit::get(); - - fn deposit_event() = default; - - /// Updates the schedule for metering contracts. - /// - /// The schedule must have a greater version than the stored schedule. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn update_schedule(origin, schedule: Schedule) -> DispatchResult { - ensure_root(origin)?; - if >::current_schedule().version >= schedule.version { - Err(Error::::InvalidScheduleVersion)? - } - - Self::deposit_event(RawEvent::ScheduleUpdated(schedule.version)); - CurrentSchedule::put(schedule); - - Ok(()) - } - - /// Stores the given binary Wasm code into the chain's storage and returns its `codehash`. - /// You can instantiate contracts only with stored code. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn put_code( - origin, - #[compact] gas_limit: Gas, - code: Vec - ) -> DispatchResult { - let origin = ensure_signed(origin)?; - - let (mut gas_meter, imbalance) = gas::buy_gas::(&origin, gas_limit)?; - - let schedule = >::current_schedule(); - let result = wasm::save_code::(code, &mut gas_meter, &schedule); - if let Ok(code_hash) = result { - Self::deposit_event(RawEvent::CodeStored(code_hash)); - } - - gas::refund_unused_gas::(&origin, gas_meter, imbalance); - - result.map(|_| ()).map_err(Into::into) - } - - /// Makes a call to an account, optionally transferring some balance. - /// - /// * If the account is a smart-contract account, the associated code will be - /// executed and any value will be transferred. - /// * If the account is a regular account, any value will be transferred. - /// * If no account exists and the call value is not less than `existential_deposit`, - /// a regular account will be created and any value will be transferred. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn call( - origin, - dest: ::Source, - #[compact] value: BalanceOf, - #[compact] gas_limit: Gas, - data: Vec - ) -> DispatchResult { - let origin = ensure_signed(origin)?; - let dest = T::Lookup::lookup(dest)?; - - Self::bare_call(origin, dest, value, gas_limit, data) - .map(|_| ()) - .map_err(|e| e.reason.into()) - } - - /// Instantiates a new contract from the `codehash` generated by `put_code`, optionally transferring some balance. - /// - /// Instantiation is executed as follows: - /// - /// - The destination address is computed based on the sender and hash of the code. - /// - The smart-contract account is created at the computed address. - /// - The `ctor_code` is executed in the context of the newly-created account. Buffer returned - /// after the execution is saved as the `code` of the account. That code will be invoked - /// upon any call received by this account. - /// - The contract is initialized. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn instantiate( - origin, - #[compact] endowment: BalanceOf, - #[compact] gas_limit: Gas, - code_hash: CodeHash, - data: Vec - ) -> DispatchResult { - let origin = ensure_signed(origin)?; - - Self::execute_wasm(origin, gas_limit, |ctx, gas_meter| { - ctx.instantiate(endowment, gas_meter, &code_hash, data) - .map(|(_address, output)| output) - }) - .map(|_| ()) - .map_err(|e| e.reason.into()) - } - - /// Allows block producers to claim a small reward for evicting a contract. If a block producer - /// fails to do so, a regular users will be allowed to claim the reward. - /// - /// If contract is not evicted as a result of this call, no actions are taken and - /// the sender is not eligible for the reward. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn claim_surcharge(origin, dest: T::AccountId, aux_sender: Option) { - let origin = origin.into(); - let (signed, rewarded) = match (origin, aux_sender) { - (Ok(frame_system::RawOrigin::Signed(account)), None) => { - (true, account) - }, - (Ok(frame_system::RawOrigin::None), Some(aux_sender)) => { - (false, aux_sender) - }, - _ => Err(Error::::InvalidSurchargeClaim)?, - }; - - // Add some advantage for block producers (who send unsigned extrinsics) by - // adding a handicap: for signed extrinsics we use a slightly older block number - // for the eviction check. This can be viewed as if we pushed regular users back in past. - let handicap = if signed { - T::SignedClaimHandicap::get() - } else { - Zero::zero() - }; - - // If poking the contract has lead to eviction of the contract, give out the rewards. - if rent::snitch_contract_should_be_evicted::(&dest, handicap) { - T::Currency::deposit_into_existing(&rewarded, T::SurchargeReward::get())?; - } - } - - fn on_finalize() { - GasSpent::kill(); - } - } + /// Contracts module. + pub struct Module for enum Call where origin: ::Origin { + type Error = Error; + + /// Number of block delay an extrinsic claim surcharge has. + /// + /// When claim surcharge is called by an extrinsic the rent is checked + /// for current_block - delay + const SignedClaimHandicap: T::BlockNumber = T::SignedClaimHandicap::get(); + + /// The minimum amount required to generate a tombstone. + const TombstoneDeposit: BalanceOf = T::TombstoneDeposit::get(); + + /// Size of a contract at the time of instantiation. This is a simple way to ensure that + /// empty contracts eventually gets deleted. + const StorageSizeOffset: u32 = T::StorageSizeOffset::get(); + + /// Price of a byte of storage per one block interval. Should be greater than 0. + const RentByteFee: BalanceOf = T::RentByteFee::get(); + + /// The amount of funds a contract should deposit in order to offset + /// the cost of one byte. + /// + /// Let's suppose the deposit is 1,000 BU (balance units)/byte and the rent is 1 BU/byte/day, + /// then a contract with 1,000,000 BU that uses 1,000 bytes of storage would pay no rent. + /// But if the balance reduced to 500,000 BU and the storage stayed the same at 1,000, + /// then it would pay 500 BU/day. + const RentDepositOffset: BalanceOf = T::RentDepositOffset::get(); + + /// Reward that is received by the party whose touch has led + /// to removal of a contract. + const SurchargeReward: BalanceOf = T::SurchargeReward::get(); + + /// The fee to be paid for making a transaction; the base. + const TransactionBaseFee: BalanceOf = T::TransactionBaseFee::get(); + + /// The fee to be paid for making a transaction; the per-byte portion. + const TransactionByteFee: BalanceOf = T::TransactionByteFee::get(); + + /// The fee required to instantiate a contract instance. A reasonable default value + /// is 21. + const ContractFee: BalanceOf = T::ContractFee::get(); + + /// The base fee charged for calling into a contract. A reasonable default + /// value is 135. + const CallBaseFee: Gas = T::CallBaseFee::get(); + + /// The base fee charged for instantiating a contract. A reasonable default value + /// is 175. + const InstantiateBaseFee: Gas = T::InstantiateBaseFee::get(); + + /// The maximum nesting level of a call/instantiate stack. A reasonable default + /// value is 100. + const MaxDepth: u32 = T::MaxDepth::get(); + + /// The maximum size of a storage value in bytes. A reasonable default is 16 KiB. + const MaxValueSize: u32 = T::MaxValueSize::get(); + + /// The maximum amount of gas that could be expended per block. A reasonable + /// default value is 10_000_000. + const BlockGasLimit: Gas = T::BlockGasLimit::get(); + + fn deposit_event() = default; + + /// Updates the schedule for metering contracts. + /// + /// The schedule must have a greater version than the stored schedule. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn update_schedule(origin, schedule: Schedule) -> DispatchResult { + ensure_root(origin)?; + if >::current_schedule().version >= schedule.version { + Err(Error::::InvalidScheduleVersion)? + } + + Self::deposit_event(RawEvent::ScheduleUpdated(schedule.version)); + CurrentSchedule::put(schedule); + + Ok(()) + } + + /// Stores the given binary Wasm code into the chain's storage and returns its `codehash`. + /// You can instantiate contracts only with stored code. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn put_code( + origin, + #[compact] gas_limit: Gas, + code: Vec + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let (mut gas_meter, imbalance) = gas::buy_gas::(&origin, gas_limit)?; + + let schedule = >::current_schedule(); + let result = wasm::save_code::(code, &mut gas_meter, &schedule); + if let Ok(code_hash) = result { + Self::deposit_event(RawEvent::CodeStored(code_hash)); + } + + gas::refund_unused_gas::(&origin, gas_meter, imbalance); + + result.map(|_| ()).map_err(Into::into) + } + + /// Makes a call to an account, optionally transferring some balance. + /// + /// * If the account is a smart-contract account, the associated code will be + /// executed and any value will be transferred. + /// * If the account is a regular account, any value will be transferred. + /// * If no account exists and the call value is not less than `existential_deposit`, + /// a regular account will be created and any value will be transferred. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn call( + origin, + dest: ::Source, + #[compact] value: BalanceOf, + #[compact] gas_limit: Gas, + data: Vec + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + let dest = T::Lookup::lookup(dest)?; + + Self::bare_call(origin, dest, value, gas_limit, data) + .map(|_| ()) + .map_err(|e| e.reason.into()) + } + + /// Instantiates a new contract from the `codehash` generated by `put_code`, optionally transferring some balance. + /// + /// Instantiation is executed as follows: + /// + /// - The destination address is computed based on the sender and hash of the code. + /// - The smart-contract account is created at the computed address. + /// - The `ctor_code` is executed in the context of the newly-created account. Buffer returned + /// after the execution is saved as the `code` of the account. That code will be invoked + /// upon any call received by this account. + /// - The contract is initialized. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn instantiate( + origin, + #[compact] endowment: BalanceOf, + #[compact] gas_limit: Gas, + code_hash: CodeHash, + data: Vec + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + Self::execute_wasm(origin, gas_limit, |ctx, gas_meter| { + ctx.instantiate(endowment, gas_meter, &code_hash, data) + .map(|(_address, output)| output) + }) + .map(|_| ()) + .map_err(|e| e.reason.into()) + } + + /// Allows block producers to claim a small reward for evicting a contract. If a block producer + /// fails to do so, a regular users will be allowed to claim the reward. + /// + /// If contract is not evicted as a result of this call, no actions are taken and + /// the sender is not eligible for the reward. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn claim_surcharge(origin, dest: T::AccountId, aux_sender: Option) { + let origin = origin.into(); + let (signed, rewarded) = match (origin, aux_sender) { + (Ok(frame_system::RawOrigin::Signed(account)), None) => { + (true, account) + }, + (Ok(frame_system::RawOrigin::None), Some(aux_sender)) => { + (false, aux_sender) + }, + _ => Err(Error::::InvalidSurchargeClaim)?, + }; + + // Add some advantage for block producers (who send unsigned extrinsics) by + // adding a handicap: for signed extrinsics we use a slightly older block number + // for the eviction check. This can be viewed as if we pushed regular users back in past. + let handicap = if signed { + T::SignedClaimHandicap::get() + } else { + Zero::zero() + }; + + // If poking the contract has lead to eviction of the contract, give out the rewards. + if rent::snitch_contract_should_be_evicted::(&dest, handicap) { + T::Currency::deposit_into_existing(&rewarded, T::SurchargeReward::get())?; + } + } + + fn on_finalize() { + GasSpent::kill(); + } + } } /// Public APIs provided by the contracts module. impl Module { - /// Perform a call to a specified contract. - /// - /// This function is similar to `Self::call`, but doesn't perform any address lookups and better - /// suitable for calling directly from Rust. - pub fn bare_call( - origin: T::AccountId, - dest: T::AccountId, - value: BalanceOf, - gas_limit: Gas, - input_data: Vec, - ) -> ExecResult { - Self::execute_wasm(origin, gas_limit, |ctx, gas_meter| { - ctx.call(dest, value, gas_meter, input_data) - }) - } - - /// Query storage of a specified contract under a specified key. - pub fn get_storage( - address: T::AccountId, - key: [u8; 32], - ) -> sp_std::result::Result>, ContractAccessError> { - let contract_info = >::get(&address) - .ok_or(ContractAccessError::DoesntExist)? - .get_alive() - .ok_or(ContractAccessError::IsTombstone)?; - - let maybe_value = AccountDb::::get_storage( - &DirectAccountDb, - &address, - Some(&contract_info.trie_id), - &key, - ); - Ok(maybe_value) - } - - pub fn rent_projection( - address: T::AccountId, - ) -> sp_std::result::Result, ContractAccessError> { - rent::compute_rent_projection::(&address) - } + /// Perform a call to a specified contract. + /// + /// This function is similar to `Self::call`, but doesn't perform any address lookups and better + /// suitable for calling directly from Rust. + pub fn bare_call( + origin: T::AccountId, + dest: T::AccountId, + value: BalanceOf, + gas_limit: Gas, + input_data: Vec, + ) -> ExecResult { + Self::execute_wasm(origin, gas_limit, |ctx, gas_meter| { + ctx.call(dest, value, gas_meter, input_data) + }) + } + + /// Query storage of a specified contract under a specified key. + pub fn get_storage( + address: T::AccountId, + key: [u8; 32], + ) -> sp_std::result::Result>, ContractAccessError> { + let contract_info = >::get(&address) + .ok_or(ContractAccessError::DoesntExist)? + .get_alive() + .ok_or(ContractAccessError::IsTombstone)?; + + let maybe_value = AccountDb::::get_storage( + &DirectAccountDb, + &address, + Some(&contract_info.trie_id), + &key, + ); + Ok(maybe_value) + } + + pub fn rent_projection( + address: T::AccountId, + ) -> sp_std::result::Result, ContractAccessError> { + rent::compute_rent_projection::(&address) + } } impl Module { - fn execute_wasm( - origin: T::AccountId, - gas_limit: Gas, - func: impl FnOnce(&mut ExecutionContext, &mut GasMeter) -> ExecResult - ) -> ExecResult { - // Pay for the gas upfront. - // - // NOTE: it is very important to avoid any state changes before - // paying for the gas. - let (mut gas_meter, imbalance) = - try_or_exec_error!( - gas::buy_gas::(&origin, gas_limit), - // We don't have a spare buffer here in the first place, so create a new empty one. - Vec::new() - ); - - let cfg = Config::preload(); - let vm = WasmVm::new(&cfg.schedule); - let loader = WasmLoader::new(&cfg.schedule); - let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); - - let result = func(&mut ctx, &mut gas_meter); - - if result.as_ref().map(|output| output.is_success()).unwrap_or(false) { - // Commit all changes that made it thus far into the persistent storage. - DirectAccountDb.commit(ctx.overlay.into_change_set()); - } - - // Refund cost of the unused gas. - // - // NOTE: This should go after the commit to the storage, since the storage changes - // can alter the balance of the caller. - gas::refund_unused_gas::(&origin, gas_meter, imbalance); - - // Execute deferred actions. - ctx.deferred.into_iter().for_each(|deferred| { - use self::exec::DeferredAction::*; - match deferred { - DepositEvent { - topics, - event, - } => >::deposit_event_indexed( - &*topics, - ::Event::from(event).into(), - ), - DispatchRuntimeCall { - origin: who, - call, - } => { - let result = call.dispatch(RawOrigin::Signed(who.clone()).into()); - Self::deposit_event(RawEvent::Dispatched(who, result.is_ok())); - } - RestoreTo { - donor, - dest, - code_hash, - rent_allowance, - delta, - } => { - let result = Self::restore_to( - donor.clone(), dest.clone(), code_hash.clone(), rent_allowance.clone(), delta - ); - Self::deposit_event( - RawEvent::Restored(donor, dest, code_hash, rent_allowance, result.is_ok()) - ); - } - } - }); - - result - } - - fn restore_to( - origin: T::AccountId, - dest: T::AccountId, - code_hash: CodeHash, - rent_allowance: BalanceOf, - delta: Vec - ) -> DispatchResult { - let mut origin_contract = >::get(&origin) - .and_then(|c| c.get_alive()) - .ok_or(Error::::InvalidSourceContract)?; - - let current_block = >::block_number(); - - if origin_contract.last_write == Some(current_block) { - Err(Error::::InvalidContractOrigin)? - } - - let dest_tombstone = >::get(&dest) - .and_then(|c| c.get_tombstone()) - .ok_or(Error::::InvalidDestinationContract)?; - - let last_write = if !delta.is_empty() { - Some(current_block) - } else { - origin_contract.last_write - }; - - let key_values_taken = delta.iter() - .filter_map(|key| { - child::get_raw( - &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), - &blake2_256(key), - ).map(|value| { - child::kill( - &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), - &blake2_256(key), - ); - - (key, value) - }) - }) - .collect::>(); - - let tombstone = >::new( - // This operation is cheap enough because last_write (delta not included) - // is not this block as it has been checked earlier. - &child::child_root( - &origin_contract.trie_id, - )[..], - code_hash, - ); - - if tombstone != dest_tombstone { - for (key, value) in key_values_taken { - child::put_raw( - &origin_contract.trie_id, - origin_contract.child_trie_unique_id(), - &blake2_256(key), - &value, - ); - } - - return Err(Error::::InvalidTombstone.into()); - } - - origin_contract.storage_size -= key_values_taken.iter() - .map(|(_, value)| value.len() as u32) - .sum::(); - - >::remove(&origin); - >::insert(&dest, ContractInfo::Alive(RawAliveContractInfo { - trie_id: origin_contract.trie_id, - storage_size: origin_contract.storage_size, - code_hash, - rent_allowance, - deduct_block: current_block, - last_write, - })); - - let origin_free_balance = T::Currency::free_balance(&origin); - T::Currency::make_free_balance_be(&origin, >::zero()); - T::Currency::deposit_creating(&dest, origin_free_balance); - - Ok(()) - } + fn execute_wasm( + origin: T::AccountId, + gas_limit: Gas, + func: impl FnOnce(&mut ExecutionContext, &mut GasMeter) -> ExecResult, + ) -> ExecResult { + // Pay for the gas upfront. + // + // NOTE: it is very important to avoid any state changes before + // paying for the gas. + let (mut gas_meter, imbalance) = try_or_exec_error!( + gas::buy_gas::(&origin, gas_limit), + // We don't have a spare buffer here in the first place, so create a new empty one. + Vec::new() + ); + + let cfg = Config::preload(); + let vm = WasmVm::new(&cfg.schedule); + let loader = WasmLoader::new(&cfg.schedule); + let mut ctx = ExecutionContext::top_level(origin.clone(), &cfg, &vm, &loader); + + let result = func(&mut ctx, &mut gas_meter); + + if result + .as_ref() + .map(|output| output.is_success()) + .unwrap_or(false) + { + // Commit all changes that made it thus far into the persistent storage. + DirectAccountDb.commit(ctx.overlay.into_change_set()); + } + + // Refund cost of the unused gas. + // + // NOTE: This should go after the commit to the storage, since the storage changes + // can alter the balance of the caller. + gas::refund_unused_gas::(&origin, gas_meter, imbalance); + + // Execute deferred actions. + ctx.deferred.into_iter().for_each(|deferred| { + use self::exec::DeferredAction::*; + match deferred { + DepositEvent { topics, event } => >::deposit_event_indexed( + &*topics, + ::Event::from(event).into(), + ), + DispatchRuntimeCall { origin: who, call } => { + let result = call.dispatch(RawOrigin::Signed(who.clone()).into()); + Self::deposit_event(RawEvent::Dispatched(who, result.is_ok())); + } + RestoreTo { + donor, + dest, + code_hash, + rent_allowance, + delta, + } => { + let result = Self::restore_to( + donor.clone(), + dest.clone(), + code_hash.clone(), + rent_allowance.clone(), + delta, + ); + Self::deposit_event(RawEvent::Restored( + donor, + dest, + code_hash, + rent_allowance, + result.is_ok(), + )); + } + } + }); + + result + } + + fn restore_to( + origin: T::AccountId, + dest: T::AccountId, + code_hash: CodeHash, + rent_allowance: BalanceOf, + delta: Vec, + ) -> DispatchResult { + let mut origin_contract = >::get(&origin) + .and_then(|c| c.get_alive()) + .ok_or(Error::::InvalidSourceContract)?; + + let current_block = >::block_number(); + + if origin_contract.last_write == Some(current_block) { + Err(Error::::InvalidContractOrigin)? + } + + let dest_tombstone = >::get(&dest) + .and_then(|c| c.get_tombstone()) + .ok_or(Error::::InvalidDestinationContract)?; + + let last_write = if !delta.is_empty() { + Some(current_block) + } else { + origin_contract.last_write + }; + + let key_values_taken = delta + .iter() + .filter_map(|key| { + child::get_raw( + &origin_contract.trie_id, + origin_contract.child_trie_unique_id(), + &blake2_256(key), + ) + .map(|value| { + child::kill( + &origin_contract.trie_id, + origin_contract.child_trie_unique_id(), + &blake2_256(key), + ); + + (key, value) + }) + }) + .collect::>(); + + let tombstone = >::new( + // This operation is cheap enough because last_write (delta not included) + // is not this block as it has been checked earlier. + &child::child_root(&origin_contract.trie_id)[..], + code_hash, + ); + + if tombstone != dest_tombstone { + for (key, value) in key_values_taken { + child::put_raw( + &origin_contract.trie_id, + origin_contract.child_trie_unique_id(), + &blake2_256(key), + &value, + ); + } + + return Err(Error::::InvalidTombstone.into()); + } + + origin_contract.storage_size -= key_values_taken + .iter() + .map(|(_, value)| value.len() as u32) + .sum::(); + + >::remove(&origin); + >::insert( + &dest, + ContractInfo::Alive(RawAliveContractInfo { + trie_id: origin_contract.trie_id, + storage_size: origin_contract.storage_size, + code_hash, + rent_allowance, + deduct_block: current_block, + last_write, + }), + ); + + let origin_free_balance = T::Currency::free_balance(&origin); + T::Currency::make_free_balance_be(&origin, >::zero()); + T::Currency::deposit_creating(&dest, origin_free_balance); + + Ok(()) + } } decl_event! { - pub enum Event - where - Balance = BalanceOf, - ::AccountId, - ::Hash - { - /// Transfer happened `from` to `to` with given `value` as part of a `call` or `instantiate`. - Transfer(AccountId, AccountId, Balance), - - /// Contract deployed by address at the specified address. - Instantiated(AccountId, AccountId), - - /// Contract has been evicted and is now in tombstone state. - /// - /// # Params - /// - /// - `contract`: `AccountId`: The account ID of the evicted contract. - /// - `tombstone`: `bool`: True if the evicted contract left behind a tombstone. - Evicted(AccountId, bool), - - /// Restoration for a contract has been initiated. - /// - /// # Params - /// - /// - `donor`: `AccountId`: Account ID of the restoring contract - /// - `dest`: `AccountId`: Account ID of the restored contract - /// - `code_hash`: `Hash`: Code hash of the restored contract - /// - `rent_allowance: `Balance`: Rent allowance of the restored contract - /// - `success`: `bool`: True if the restoration was successful - Restored(AccountId, AccountId, Hash, Balance, bool), - - /// Code with the specified hash has been stored. - CodeStored(Hash), - - /// Triggered when the current schedule is updated. - ScheduleUpdated(u32), - - /// A call was dispatched from the given account. The bool signals whether it was - /// successful execution or not. - Dispatched(AccountId, bool), - - /// An event deposited upon execution of a contract from the account. - ContractExecution(AccountId, Vec), - } + pub enum Event + where + Balance = BalanceOf, + ::AccountId, + ::Hash + { + /// Transfer happened `from` to `to` with given `value` as part of a `call` or `instantiate`. + Transfer(AccountId, AccountId, Balance), + + /// Contract deployed by address at the specified address. + Instantiated(AccountId, AccountId), + + /// Contract has been evicted and is now in tombstone state. + /// + /// # Params + /// + /// - `contract`: `AccountId`: The account ID of the evicted contract. + /// - `tombstone`: `bool`: True if the evicted contract left behind a tombstone. + Evicted(AccountId, bool), + + /// Restoration for a contract has been initiated. + /// + /// # Params + /// + /// - `donor`: `AccountId`: Account ID of the restoring contract + /// - `dest`: `AccountId`: Account ID of the restored contract + /// - `code_hash`: `Hash`: Code hash of the restored contract + /// - `rent_allowance: `Balance`: Rent allowance of the restored contract + /// - `success`: `bool`: True if the restoration was successful + Restored(AccountId, AccountId, Hash, Balance, bool), + + /// Code with the specified hash has been stored. + CodeStored(Hash), + + /// Triggered when the current schedule is updated. + ScheduleUpdated(u32), + + /// A call was dispatched from the given account. The bool signals whether it was + /// successful execution or not. + Dispatched(AccountId, bool), + + /// An event deposited upon execution of a contract from the account. + ContractExecution(AccountId, Vec), + } } decl_storage! { - trait Store for Module as Contracts { - /// Gas spent so far in this block. - GasSpent get(fn gas_spent): Gas; - /// Current cost schedule for contracts. - CurrentSchedule get(fn current_schedule) config(): Schedule = Schedule::default(); - /// A mapping from an original code hash to the original code, untouched by instrumentation. - pub PristineCode: map hasher(identity) CodeHash => Option>; - /// A mapping between an original code hash and instrumented wasm code, ready for execution. - pub CodeStorage: map hasher(identity) CodeHash => Option; - /// The subtrie counter. - pub AccountCounter: u64 = 0; - /// The code associated with a given account. - pub ContractInfoOf: map hasher(twox_64_concat) T::AccountId => Option>; - /// The price of one unit of gas. - GasPrice get(fn gas_price) config(): BalanceOf = 1.into(); - } + trait Store for Module as Contracts { + /// Gas spent so far in this block. + GasSpent get(fn gas_spent): Gas; + /// Current cost schedule for contracts. + CurrentSchedule get(fn current_schedule) config(): Schedule = Schedule::default(); + /// A mapping from an original code hash to the original code, untouched by instrumentation. + pub PristineCode: map hasher(identity) CodeHash => Option>; + /// A mapping between an original code hash and instrumented wasm code, ready for execution. + pub CodeStorage: map hasher(identity) CodeHash => Option; + /// The subtrie counter. + pub AccountCounter: u64 = 0; + /// The code associated with a given account. + pub ContractInfoOf: map hasher(twox_64_concat) T::AccountId => Option>; + /// The price of one unit of gas. + GasPrice get(fn gas_price) config(): BalanceOf = 1.into(); + } } /// In-memory cache of configuration values. @@ -954,117 +979,117 @@ decl_storage! { /// We assume that these values can't be changed in the /// course of transaction execution. pub struct Config { - pub schedule: Schedule, - pub existential_deposit: BalanceOf, - pub tombstone_deposit: BalanceOf, - pub max_depth: u32, - pub max_value_size: u32, - pub contract_account_instantiate_fee: BalanceOf, + pub schedule: Schedule, + pub existential_deposit: BalanceOf, + pub tombstone_deposit: BalanceOf, + pub max_depth: u32, + pub max_value_size: u32, + pub contract_account_instantiate_fee: BalanceOf, } impl Config { - fn preload() -> Config { - Config { - schedule: >::current_schedule(), - existential_deposit: T::Currency::minimum_balance(), - tombstone_deposit: T::TombstoneDeposit::get(), - max_depth: T::MaxDepth::get(), - max_value_size: T::MaxValueSize::get(), - contract_account_instantiate_fee: T::ContractFee::get(), - } - } + fn preload() -> Config { + Config { + schedule: >::current_schedule(), + existential_deposit: T::Currency::minimum_balance(), + tombstone_deposit: T::TombstoneDeposit::get(), + max_depth: T::MaxDepth::get(), + max_value_size: T::MaxValueSize::get(), + contract_account_instantiate_fee: T::ContractFee::get(), + } + } } /// Definition of the cost schedule and other parameterizations for wasm vm. #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] pub struct Schedule { - /// Version of the schedule. - pub version: u32, + /// Version of the schedule. + pub version: u32, - /// Cost of putting a byte of code into storage. - pub put_code_per_byte_cost: Gas, + /// Cost of putting a byte of code into storage. + pub put_code_per_byte_cost: Gas, - /// Gas cost of a growing memory by single page. - pub grow_mem_cost: Gas, + /// Gas cost of a growing memory by single page. + pub grow_mem_cost: Gas, - /// Gas cost of a regular operation. - pub regular_op_cost: Gas, + /// Gas cost of a regular operation. + pub regular_op_cost: Gas, - /// Gas cost per one byte returned. - pub return_data_per_byte_cost: Gas, + /// Gas cost per one byte returned. + pub return_data_per_byte_cost: Gas, - /// Gas cost to deposit an event; the per-byte portion. - pub event_data_per_byte_cost: Gas, + /// Gas cost to deposit an event; the per-byte portion. + pub event_data_per_byte_cost: Gas, - /// Gas cost to deposit an event; the cost per topic. - pub event_per_topic_cost: Gas, + /// Gas cost to deposit an event; the cost per topic. + pub event_per_topic_cost: Gas, - /// Gas cost to deposit an event; the base. - pub event_base_cost: Gas, + /// Gas cost to deposit an event; the base. + pub event_base_cost: Gas, - /// Base gas cost to call into a contract. - pub call_base_cost: Gas, + /// Base gas cost to call into a contract. + pub call_base_cost: Gas, - /// Base gas cost to instantiate a contract. - pub instantiate_base_cost: Gas, + /// Base gas cost to instantiate a contract. + pub instantiate_base_cost: Gas, - /// Gas cost per one byte read from the sandbox memory. - pub sandbox_data_read_cost: Gas, + /// Gas cost per one byte read from the sandbox memory. + pub sandbox_data_read_cost: Gas, - /// Gas cost per one byte written to the sandbox memory. - pub sandbox_data_write_cost: Gas, + /// Gas cost per one byte written to the sandbox memory. + pub sandbox_data_write_cost: Gas, - /// Cost for a simple balance transfer. - pub transfer_cost: Gas, + /// Cost for a simple balance transfer. + pub transfer_cost: Gas, - /// The maximum number of topics supported by an event. - pub max_event_topics: u32, + /// The maximum number of topics supported by an event. + pub max_event_topics: u32, - /// Maximum allowed stack height. - /// - /// See https://wiki.parity.io/WebAssembly-StackHeight to find out - /// how the stack frame cost is calculated. - pub max_stack_height: u32, + /// Maximum allowed stack height. + /// + /// See https://wiki.parity.io/WebAssembly-StackHeight to find out + /// how the stack frame cost is calculated. + pub max_stack_height: u32, - /// Maximum number of memory pages allowed for a contract. - pub max_memory_pages: u32, + /// Maximum number of memory pages allowed for a contract. + pub max_memory_pages: u32, - /// Maximum allowed size of a declared table. - pub max_table_size: u32, + /// Maximum allowed size of a declared table. + pub max_table_size: u32, - /// Whether the `ext_println` function is allowed to be used contracts. - /// MUST only be enabled for `dev` chains, NOT for production chains - pub enable_println: bool, + /// Whether the `ext_println` function is allowed to be used contracts. + /// MUST only be enabled for `dev` chains, NOT for production chains + pub enable_println: bool, - /// The maximum length of a subject used for PRNG generation. - pub max_subject_len: u32, + /// The maximum length of a subject used for PRNG generation. + pub max_subject_len: u32, } impl Default for Schedule { - fn default() -> Schedule { - Schedule { - version: 0, - put_code_per_byte_cost: 1, - grow_mem_cost: 1, - regular_op_cost: 1, - return_data_per_byte_cost: 1, - event_data_per_byte_cost: 1, - event_per_topic_cost: 1, - event_base_cost: 1, - call_base_cost: 135, - instantiate_base_cost: 175, - sandbox_data_read_cost: 1, - sandbox_data_write_cost: 1, - transfer_cost: 100, - max_event_topics: 4, - max_stack_height: 64 * 1024, - max_memory_pages: 16, - max_table_size: 16 * 1024, - enable_println: false, - max_subject_len: 32, - } - } + fn default() -> Schedule { + Schedule { + version: 0, + put_code_per_byte_cost: 1, + grow_mem_cost: 1, + regular_op_cost: 1, + return_data_per_byte_cost: 1, + event_data_per_byte_cost: 1, + event_per_topic_cost: 1, + event_base_cost: 1, + call_base_cost: 135, + instantiate_base_cost: 175, + sandbox_data_read_cost: 1, + sandbox_data_write_cost: 1, + transfer_cost: 100, + max_event_topics: 4, + max_stack_height: 64 * 1024, + max_memory_pages: 16, + max_table_size: 16 * 1024, + enable_println: false, + max_subject_len: 32, + } + } } /// `SignedExtension` that checks if a transaction would exhausts the block gas limit. @@ -1072,62 +1097,64 @@ impl Default for Schedule { pub struct CheckBlockGasLimit(PhantomData); impl Default for CheckBlockGasLimit { - fn default() -> Self { - Self(PhantomData) - } + fn default() -> Self { + Self(PhantomData) + } } impl sp_std::fmt::Debug for CheckBlockGasLimit { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckBlockGasLimit") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckBlockGasLimit") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } impl SignedExtension for CheckBlockGasLimit { - const IDENTIFIER: &'static str = "CheckBlockGasLimit"; - type AccountId = T::AccountId; - type Call = ::Call; - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } - - fn validate( - &self, - _: &Self::AccountId, - call: &Self::Call, - _: &DispatchInfoOf, - _: usize, - ) -> TransactionValidity { - let call = match call.is_sub_type() { - Some(call) => call, - None => return Ok(ValidTransaction::default()), - }; - - match call { - Call::claim_surcharge(_, _) | Call::update_schedule(_) => - Ok(ValidTransaction::default()), - Call::put_code(gas_limit, _) - | Call::call(_, _, gas_limit, _) - | Call::instantiate(_, gas_limit, _, _) - => { - // Check if the specified amount of gas is available in the current block. - // This cannot underflow since `gas_spent` is never greater than `T::BlockGasLimit`. - let gas_available = T::BlockGasLimit::get() - >::gas_spent(); - if *gas_limit > gas_available { - // gas limit reached, revert the transaction and retry again in the future - InvalidTransaction::ExhaustsResources.into() - } else { - Ok(ValidTransaction::default()) - } - }, - Call::__PhantomItem(_, _) => unreachable!("Variant is never constructed"), - } - } + const IDENTIFIER: &'static str = "CheckBlockGasLimit"; + type AccountId = T::AccountId; + type Call = ::Call; + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + + fn validate( + &self, + _: &Self::AccountId, + call: &Self::Call, + _: &DispatchInfoOf, + _: usize, + ) -> TransactionValidity { + let call = match call.is_sub_type() { + Some(call) => call, + None => return Ok(ValidTransaction::default()), + }; + + match call { + Call::claim_surcharge(_, _) | Call::update_schedule(_) => { + Ok(ValidTransaction::default()) + } + Call::put_code(gas_limit, _) + | Call::call(_, _, gas_limit, _) + | Call::instantiate(_, gas_limit, _, _) => { + // Check if the specified amount of gas is available in the current block. + // This cannot underflow since `gas_spent` is never greater than `T::BlockGasLimit`. + let gas_available = T::BlockGasLimit::get() - >::gas_spent(); + if *gas_limit > gas_available { + // gas limit reached, revert the transaction and retry again in the future + InvalidTransaction::ExhaustsResources.into() + } else { + Ok(ValidTransaction::default()) + } + } + Call::__PhantomItem(_, _) => unreachable!("Variant is never constructed"), + } + } } diff --git a/frame/contracts/src/rent.rs b/frame/contracts/src/rent.rs index 8b6825419c..d0e62923a8 100644 --- a/frame/contracts/src/rent.rs +++ b/frame/contracts/src/rent.rs @@ -17,8 +17,8 @@ //! A module responsible for computing the right amount of weight and charging it. use crate::{ - AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, RawEvent, - TombstoneContractInfo, Trait, + AliveContractInfo, BalanceOf, ContractInfo, ContractInfoOf, Module, RawEvent, + TombstoneContractInfo, Trait, }; use frame_support::storage::child; use frame_support::traits::{Currency, ExistenceRequirement, Get, OnUnbalanced, WithdrawReason}; @@ -31,53 +31,53 @@ use sp_runtime::traits::{Bounded, CheckedDiv, CheckedMul, SaturatedConversion, S /// This amount respects the contract's rent allowance and the subsistence deposit. /// Because of that, charging the amount cannot remove the contract. struct OutstandingAmount { - amount: BalanceOf, + amount: BalanceOf, } impl OutstandingAmount { - /// Create the new outstanding amount. - /// - /// The amount should be always withdrawable and it should not kill the account. - fn new(amount: BalanceOf) -> Self { - Self { amount } - } - - /// Returns the amount this instance wraps. - fn peek(&self) -> BalanceOf { - self.amount - } - - /// Withdraws the outstanding amount from the given account. - fn withdraw(self, account: &T::AccountId) { - if let Ok(imbalance) = T::Currency::withdraw( - account, - self.amount, - WithdrawReason::Fee.into(), - ExistenceRequirement::KeepAlive, - ) { - // This should never fail. However, let's err on the safe side. - T::RentPayment::on_unbalanced(imbalance); - } - } + /// Create the new outstanding amount. + /// + /// The amount should be always withdrawable and it should not kill the account. + fn new(amount: BalanceOf) -> Self { + Self { amount } + } + + /// Returns the amount this instance wraps. + fn peek(&self) -> BalanceOf { + self.amount + } + + /// Withdraws the outstanding amount from the given account. + fn withdraw(self, account: &T::AccountId) { + if let Ok(imbalance) = T::Currency::withdraw( + account, + self.amount, + WithdrawReason::Fee.into(), + ExistenceRequirement::KeepAlive, + ) { + // This should never fail. However, let's err on the safe side. + T::RentPayment::on_unbalanced(imbalance); + } + } } enum Verdict { - /// The contract is exempted from paying rent. - /// - /// For example, it already paid its rent in the current block, or it has enough deposit for not - /// paying rent at all. - Exempt, - /// Funds dropped below the subsistence deposit. - /// - /// Remove the contract along with it's storage. - Kill, - /// The contract cannot afford payment within its rent budget so it gets evicted. However, - /// because its balance is greater than the subsistence threshold it leaves a tombstone. - Evict { - amount: Option>, - }, - /// Everything is OK, we just only take some charge. - Charge { amount: OutstandingAmount }, + /// The contract is exempted from paying rent. + /// + /// For example, it already paid its rent in the current block, or it has enough deposit for not + /// paying rent at all. + Exempt, + /// Funds dropped below the subsistence deposit. + /// + /// Remove the contract along with it's storage. + Kill, + /// The contract cannot afford payment within its rent budget so it gets evicted. However, + /// because its balance is greater than the subsistence threshold it leaves a tombstone. + Evict { + amount: Option>, + }, + /// Everything is OK, we just only take some charge. + Charge { amount: OutstandingAmount }, } /// Returns a fee charged per block from the contract. @@ -85,19 +85,19 @@ enum Verdict { /// This function accounts for the storage rent deposit. I.e. if the contract possesses enough funds /// then the fee can drop to zero. fn compute_fee_per_block( - balance: &BalanceOf, - contract: &AliveContractInfo, + balance: &BalanceOf, + contract: &AliveContractInfo, ) -> BalanceOf { - let free_storage = balance - .checked_div(&T::RentDepositOffset::get()) - .unwrap_or_else(Zero::zero); + let free_storage = balance + .checked_div(&T::RentDepositOffset::get()) + .unwrap_or_else(Zero::zero); - let effective_storage_size = - >::from(contract.storage_size).saturating_sub(free_storage); + let effective_storage_size = + >::from(contract.storage_size).saturating_sub(free_storage); - effective_storage_size - .checked_mul(&T::RentByteFee::get()) - .unwrap_or(>::max_value()) + effective_storage_size + .checked_mul(&T::RentByteFee::get()) + .unwrap_or(>::max_value()) } /// Subsistence threshold is the extension of the minimum balance (aka existential deposit) by the @@ -105,7 +105,7 @@ fn compute_fee_per_block( /// /// Rent mechanism cannot make the balance lower than subsistence threshold. fn subsistence_threshold() -> BalanceOf { - T::Currency::minimum_balance() + T::TombstoneDeposit::get() + T::Currency::minimum_balance() + T::TombstoneDeposit::get() } /// Returns amount of funds available to consume by rent mechanism. @@ -115,19 +115,19 @@ fn subsistence_threshold() -> BalanceOf { /// /// In case the balance is below the subsistence threshold, this function returns `None`. fn rent_budget( - balance: &BalanceOf, - contract: &AliveContractInfo, + balance: &BalanceOf, + contract: &AliveContractInfo, ) -> Option> { - let subsistence_threshold = subsistence_threshold::(); - if *balance < subsistence_threshold { - return None; - } - - let rent_allowed_to_charge = *balance - subsistence_threshold; - Some(>::min( - contract.rent_allowance, - rent_allowed_to_charge, - )) + let subsistence_threshold = subsistence_threshold::(); + if *balance < subsistence_threshold { + return None; + } + + let rent_allowed_to_charge = *balance - subsistence_threshold; + Some(>::min( + contract.rent_allowance, + rent_allowed_to_charge, + )) } /// Consider the case for rent payment of the given account and returns a `Verdict`. @@ -135,135 +135,135 @@ fn rent_budget( /// Use `handicap` in case you want to change the reference block number. (To get more details see /// `snitch_contract_should_be_evicted` ). fn consider_case( - account: &T::AccountId, - current_block_number: T::BlockNumber, - handicap: T::BlockNumber, - contract: &AliveContractInfo, + account: &T::AccountId, + current_block_number: T::BlockNumber, + handicap: T::BlockNumber, + contract: &AliveContractInfo, ) -> Verdict { - // How much block has passed since the last deduction for the contract. - let blocks_passed = { - // Calculate an effective block number, i.e. after adjusting for handicap. - let effective_block_number = current_block_number.saturating_sub(handicap); - effective_block_number.saturating_sub(contract.deduct_block) - }; - if blocks_passed.is_zero() { - // Rent has already been paid - return Verdict::Exempt; - } - - let balance = T::Currency::free_balance(account); - - // An amount of funds to charge per block for storage taken up by the contract. - let fee_per_block = compute_fee_per_block::(&balance, contract); - if fee_per_block.is_zero() { - // The rent deposit offset reduced the fee to 0. This means that the contract - // gets the rent for free. - return Verdict::Exempt; - } - - let rent_budget = match rent_budget::(&balance, contract) { - Some(rent_budget) => rent_budget, - None => { - // The contract's balance is already below subsistence threshold. That indicates that - // the contract cannot afford to leave a tombstone. - // - // So cleanly wipe the contract. - return Verdict::Kill; - } - }; - - let dues = fee_per_block - .checked_mul(&blocks_passed.saturated_into::().into()) - .unwrap_or(>::max_value()); - let insufficient_rent = rent_budget < dues; - - // If the rent payment cannot be withdrawn due to locks on the account balance, then evict the - // account. - // - // NOTE: This seems problematic because it provides a way to tombstone an account while - // avoiding the last rent payment. In effect, someone could retroactively set rent_allowance - // for their contract to 0. - let dues_limited = dues.min(rent_budget); - let can_withdraw_rent = T::Currency::ensure_can_withdraw( - account, - dues_limited, - WithdrawReason::Fee.into(), - balance.saturating_sub(dues_limited), - ) - .is_ok(); - - if insufficient_rent || !can_withdraw_rent { - // The contract cannot afford the rent payment and has a balance above the subsistence - // threshold, so it leaves a tombstone. - let amount = if can_withdraw_rent { - Some(OutstandingAmount::new(dues_limited)) - } else { - None - }; - return Verdict::Evict { amount }; - } - - return Verdict::Charge { - // We choose to use `dues_limited` here instead of `dues` just to err on the safer side. - amount: OutstandingAmount::new(dues_limited), - }; + // How much block has passed since the last deduction for the contract. + let blocks_passed = { + // Calculate an effective block number, i.e. after adjusting for handicap. + let effective_block_number = current_block_number.saturating_sub(handicap); + effective_block_number.saturating_sub(contract.deduct_block) + }; + if blocks_passed.is_zero() { + // Rent has already been paid + return Verdict::Exempt; + } + + let balance = T::Currency::free_balance(account); + + // An amount of funds to charge per block for storage taken up by the contract. + let fee_per_block = compute_fee_per_block::(&balance, contract); + if fee_per_block.is_zero() { + // The rent deposit offset reduced the fee to 0. This means that the contract + // gets the rent for free. + return Verdict::Exempt; + } + + let rent_budget = match rent_budget::(&balance, contract) { + Some(rent_budget) => rent_budget, + None => { + // The contract's balance is already below subsistence threshold. That indicates that + // the contract cannot afford to leave a tombstone. + // + // So cleanly wipe the contract. + return Verdict::Kill; + } + }; + + let dues = fee_per_block + .checked_mul(&blocks_passed.saturated_into::().into()) + .unwrap_or(>::max_value()); + let insufficient_rent = rent_budget < dues; + + // If the rent payment cannot be withdrawn due to locks on the account balance, then evict the + // account. + // + // NOTE: This seems problematic because it provides a way to tombstone an account while + // avoiding the last rent payment. In effect, someone could retroactively set rent_allowance + // for their contract to 0. + let dues_limited = dues.min(rent_budget); + let can_withdraw_rent = T::Currency::ensure_can_withdraw( + account, + dues_limited, + WithdrawReason::Fee.into(), + balance.saturating_sub(dues_limited), + ) + .is_ok(); + + if insufficient_rent || !can_withdraw_rent { + // The contract cannot afford the rent payment and has a balance above the subsistence + // threshold, so it leaves a tombstone. + let amount = if can_withdraw_rent { + Some(OutstandingAmount::new(dues_limited)) + } else { + None + }; + return Verdict::Evict { amount }; + } + + return Verdict::Charge { + // We choose to use `dues_limited` here instead of `dues` just to err on the safer side. + amount: OutstandingAmount::new(dues_limited), + }; } /// Enacts the given verdict and returns the updated `ContractInfo`. /// /// `alive_contract_info` should be from the same address as `account`. fn enact_verdict( - account: &T::AccountId, - alive_contract_info: AliveContractInfo, - current_block_number: T::BlockNumber, - verdict: Verdict, + account: &T::AccountId, + alive_contract_info: AliveContractInfo, + current_block_number: T::BlockNumber, + verdict: Verdict, ) -> Option> { - match verdict { - Verdict::Exempt => return Some(ContractInfo::Alive(alive_contract_info)), - Verdict::Kill => { - >::remove(account); - child::kill_storage( - &alive_contract_info.trie_id, - alive_contract_info.child_trie_unique_id(), - ); - >::deposit_event(RawEvent::Evicted(account.clone(), false)); - None - } - Verdict::Evict { amount } => { - if let Some(amount) = amount { - amount.withdraw(account); - } - - // Note: this operation is heavy. - let child_storage_root = child::child_root(&alive_contract_info.trie_id); - - let tombstone = >::new( - &child_storage_root[..], - alive_contract_info.code_hash, - ); - let tombstone_info = ContractInfo::Tombstone(tombstone); - >::insert(account, &tombstone_info); - - child::kill_storage( - &alive_contract_info.trie_id, - alive_contract_info.child_trie_unique_id(), - ); - - >::deposit_event(RawEvent::Evicted(account.clone(), true)); - Some(tombstone_info) - } - Verdict::Charge { amount } => { - let contract_info = ContractInfo::Alive(AliveContractInfo:: { - rent_allowance: alive_contract_info.rent_allowance - amount.peek(), - deduct_block: current_block_number, - ..alive_contract_info - }); - >::insert(account, &contract_info); - - amount.withdraw(account); - Some(contract_info) - } - } + match verdict { + Verdict::Exempt => return Some(ContractInfo::Alive(alive_contract_info)), + Verdict::Kill => { + >::remove(account); + child::kill_storage( + &alive_contract_info.trie_id, + alive_contract_info.child_trie_unique_id(), + ); + >::deposit_event(RawEvent::Evicted(account.clone(), false)); + None + } + Verdict::Evict { amount } => { + if let Some(amount) = amount { + amount.withdraw(account); + } + + // Note: this operation is heavy. + let child_storage_root = child::child_root(&alive_contract_info.trie_id); + + let tombstone = >::new( + &child_storage_root[..], + alive_contract_info.code_hash, + ); + let tombstone_info = ContractInfo::Tombstone(tombstone); + >::insert(account, &tombstone_info); + + child::kill_storage( + &alive_contract_info.trie_id, + alive_contract_info.child_trie_unique_id(), + ); + + >::deposit_event(RawEvent::Evicted(account.clone(), true)); + Some(tombstone_info) + } + Verdict::Charge { amount } => { + let contract_info = ContractInfo::Alive(AliveContractInfo:: { + rent_allowance: alive_contract_info.rent_allowance - amount.peek(), + deduct_block: current_block_number, + ..alive_contract_info + }); + >::insert(account, &contract_info); + + amount.withdraw(account); + Some(contract_info) + } + } } /// Make account paying the rent for the current block number @@ -271,20 +271,20 @@ fn enact_verdict( /// NOTE this function performs eviction eagerly. All changes are read and written directly to /// storage. pub fn collect_rent(account: &T::AccountId) -> Option> { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return contract_info, - Some(ContractInfo::Alive(contract)) => contract, - }; - - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - Zero::zero(), - &alive_contract_info, - ); - enact_verdict(account, alive_contract_info, current_block_number, verdict) + let contract_info = >::get(account); + let alive_contract_info = match contract_info { + None | Some(ContractInfo::Tombstone(_)) => return contract_info, + Some(ContractInfo::Alive(contract)) => contract, + }; + + let current_block_number = >::block_number(); + let verdict = consider_case::( + account, + current_block_number, + Zero::zero(), + &alive_contract_info, + ); + enact_verdict(account, alive_contract_info, current_block_number, verdict) } /// Process a report that a contract under the given address should be evicted. @@ -300,30 +300,30 @@ pub fn collect_rent(account: &T::AccountId) -> Option> /// NOTE this function performs eviction eagerly. All changes are read and written directly to /// storage. pub fn snitch_contract_should_be_evicted( - account: &T::AccountId, - handicap: T::BlockNumber, + account: &T::AccountId, + handicap: T::BlockNumber, ) -> bool { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return false, - Some(ContractInfo::Alive(contract)) => contract, - }; - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - handicap, - &alive_contract_info, - ); - - // Enact the verdict only if the contract gets removed. - match verdict { - Verdict::Kill | Verdict::Evict { .. } => { - enact_verdict(account, alive_contract_info, current_block_number, verdict); - true - } - _ => false, - } + let contract_info = >::get(account); + let alive_contract_info = match contract_info { + None | Some(ContractInfo::Tombstone(_)) => return false, + Some(ContractInfo::Alive(contract)) => contract, + }; + let current_block_number = >::block_number(); + let verdict = consider_case::( + account, + current_block_number, + handicap, + &alive_contract_info, + ); + + // Enact the verdict only if the contract gets removed. + match verdict { + Verdict::Kill | Verdict::Evict { .. } => { + enact_verdict(account, alive_contract_info, current_block_number, verdict); + true + } + _ => false, + } } /// Returns the projected time a given contract will be able to sustain paying its rent. The @@ -338,56 +338,56 @@ pub fn snitch_contract_should_be_evicted( /// compute the projection. This function is only used for implementation of an RPC method through /// `RuntimeApi` meaning that the changes will be discarded anyway. pub fn compute_rent_projection( - account: &T::AccountId, + account: &T::AccountId, ) -> RentProjectionResult { - let contract_info = >::get(account); - let alive_contract_info = match contract_info { - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), - Some(ContractInfo::Alive(contract)) => contract, - }; - let current_block_number = >::block_number(); - let verdict = consider_case::( - account, - current_block_number, - Zero::zero(), - &alive_contract_info, - ); - let new_contract_info = - enact_verdict(account, alive_contract_info, current_block_number, verdict); - - // Check what happened after enaction of the verdict. - let alive_contract_info = match new_contract_info { - None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), - Some(ContractInfo::Alive(contract)) => contract, - }; - - // Compute how much would the fee per block be with the *updated* balance. - let balance = T::Currency::free_balance(account); - let fee_per_block = compute_fee_per_block::(&balance, &alive_contract_info); - if fee_per_block.is_zero() { - return Ok(RentProjection::NoEviction); - } - - // Then compute how much the contract will sustain under these circumstances. - let rent_budget = rent_budget::(&balance, &alive_contract_info).expect( - "the contract exists and in the alive state; + let contract_info = >::get(account); + let alive_contract_info = match contract_info { + None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), + Some(ContractInfo::Alive(contract)) => contract, + }; + let current_block_number = >::block_number(); + let verdict = consider_case::( + account, + current_block_number, + Zero::zero(), + &alive_contract_info, + ); + let new_contract_info = + enact_verdict(account, alive_contract_info, current_block_number, verdict); + + // Check what happened after enaction of the verdict. + let alive_contract_info = match new_contract_info { + None | Some(ContractInfo::Tombstone(_)) => return Err(ContractAccessError::IsTombstone), + Some(ContractInfo::Alive(contract)) => contract, + }; + + // Compute how much would the fee per block be with the *updated* balance. + let balance = T::Currency::free_balance(account); + let fee_per_block = compute_fee_per_block::(&balance, &alive_contract_info); + if fee_per_block.is_zero() { + return Ok(RentProjection::NoEviction); + } + + // Then compute how much the contract will sustain under these circumstances. + let rent_budget = rent_budget::(&balance, &alive_contract_info).expect( + "the contract exists and in the alive state; the updated balance must be greater than subsistence deposit; this function doesn't return `None`; qed ", - ); - let blocks_left = match rent_budget.checked_div(&fee_per_block) { - Some(blocks_left) => blocks_left, - None => { - // `fee_per_block` is not zero here, so `checked_div` can return `None` if - // there is an overflow. This cannot happen with integers though. Return - // `NoEviction` here just in case. - return Ok(RentProjection::NoEviction); - } - }; - - let blocks_left = blocks_left.saturated_into::().into(); - Ok(RentProjection::EvictionAt( - current_block_number + blocks_left, - )) + ); + let blocks_left = match rent_budget.checked_div(&fee_per_block) { + Some(blocks_left) => blocks_left, + None => { + // `fee_per_block` is not zero here, so `checked_div` can return `None` if + // there is an overflow. This cannot happen with integers though. Return + // `NoEviction` here just in case. + return Ok(RentProjection::NoEviction); + } + }; + + let blocks_left = blocks_left.saturated_into::().into(); + Ok(RentProjection::EvictionAt( + current_block_number + blocks_left, + )) } diff --git a/frame/contracts/src/tests.rs b/frame/contracts/src/tests.rs index 2bcd708904..19be8122e1 100644 --- a/frame/contracts/src/tests.rs +++ b/frame/contracts/src/tests.rs @@ -20,161 +20,175 @@ #![allow(unused)] use crate::{ - BalanceOf, ComputeDispatchFee, ContractAddressFor, ContractInfo, ContractInfoOf, GenesisConfig, - Module, RawAliveContractInfo, RawEvent, Trait, TrieId, TrieIdFromParentCounter, Schedule, - TrieIdGenerator, CheckBlockGasLimit, account_db::{AccountDb, DirectAccountDb, OverlayAccountDb}, + account_db::{AccountDb, DirectAccountDb, OverlayAccountDb}, + BalanceOf, CheckBlockGasLimit, ComputeDispatchFee, ContractAddressFor, ContractInfo, + ContractInfoOf, GenesisConfig, Module, RawAliveContractInfo, RawEvent, Schedule, Trait, TrieId, + TrieIdFromParentCounter, TrieIdGenerator, }; use assert_matches::assert_matches; -use hex_literal::*; use codec::{Decode, Encode, KeyedVec}; -use sp_runtime::{ - Perbill, BuildStorage, transaction_validity::{InvalidTransaction, ValidTransaction}, - traits::{BlakeTwo256, Hash, IdentityLookup, SignedExtension}, - testing::{Digest, DigestItem, Header, UintAuthorityId, H256}, -}; use frame_support::{ - assert_ok, assert_err, impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, - storage::child, StorageMap, StorageValue, traits::{Currency, Get}, - weights::{DispatchInfo, DispatchClass, Weight}, + assert_err, assert_ok, impl_outer_dispatch, impl_outer_event, impl_outer_origin, + parameter_types, + storage::child, + traits::{Currency, Get}, + weights::{DispatchClass, DispatchInfo, Weight}, + StorageMap, StorageValue, }; -use std::{cell::RefCell, sync::atomic::{AtomicUsize, Ordering}}; -use sp_core::storage::well_known_keys; use frame_system::{self as system, EventRecord, Phase}; +use hex_literal::*; +use sp_core::storage::well_known_keys; +use sp_runtime::{ + testing::{Digest, DigestItem, Header, UintAuthorityId, H256}, + traits::{BlakeTwo256, Hash, IdentityLookup, SignedExtension}, + transaction_validity::{InvalidTransaction, ValidTransaction}, + BuildStorage, Perbill, +}; +use std::{ + cell::RefCell, + sync::atomic::{AtomicUsize, Ordering}, +}; mod contracts { - // Re-export contents of the root. This basically - // needs to give a name for the current crate. - // This hack is required for `impl_outer_event!`. - pub use super::super::*; - use frame_support::impl_outer_event; + // Re-export contents of the root. This basically + // needs to give a name for the current crate. + // This hack is required for `impl_outer_event!`. + pub use super::super::*; + use frame_support::impl_outer_event; } use pallet_balances as balances; impl_outer_event! { - pub enum MetaEvent for Test { - system, - balances, - contracts, - } + pub enum MetaEvent for Test { + system, + balances, + contracts, + } } impl_outer_origin! { - pub enum Origin for Test where system = frame_system { } + pub enum Origin for Test where system = frame_system { } } impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - balances::Balances, - contracts::Contracts, - } + pub enum Call for Test where origin: Origin { + balances::Balances, + contracts::Contracts, + } } thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - static TRANSFER_FEE: RefCell = RefCell::new(0); - static INSTANTIATION_FEE: RefCell = RefCell::new(0); - static BLOCK_GAS_LIMIT: RefCell = RefCell::new(0); + static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); + static TRANSFER_FEE: RefCell = RefCell::new(0); + static INSTANTIATION_FEE: RefCell = RefCell::new(0); + static BLOCK_GAS_LIMIT: RefCell = RefCell::new(0); } pub struct ExistentialDeposit; impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } + fn get() -> u64 { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) + } } pub struct TransferFee; impl Get for TransferFee { - fn get() -> u64 { TRANSFER_FEE.with(|v| *v.borrow()) } + fn get() -> u64 { + TRANSFER_FEE.with(|v| *v.borrow()) + } } pub struct BlockGasLimit; impl Get for BlockGasLimit { - fn get() -> u64 { BLOCK_GAS_LIMIT.with(|v| *v.borrow()) } + fn get() -> u64 { + BLOCK_GAS_LIMIT.with(|v| *v.borrow()) + } } #[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = MetaEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = MetaEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = MetaEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u64; + type Event = MetaEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const MinimumPeriod: u64 = 1; + pub const MinimumPeriod: u64 = 1; } impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; } parameter_types! { - pub const SignedClaimHandicap: u64 = 2; - pub const TombstoneDeposit: u64 = 16; - pub const StorageSizeOffset: u32 = 8; - pub const RentByteFee: u64 = 4; - pub const RentDepositOffset: u64 = 10_000; - pub const SurchargeReward: u64 = 150; - pub const TransactionBaseFee: u64 = 2; - pub const TransactionByteFee: u64 = 6; - pub const ContractFee: u64 = 21; - pub const CallBaseFee: u64 = 135; - pub const InstantiateBaseFee: u64 = 175; - pub const MaxDepth: u32 = 100; - pub const MaxValueSize: u32 = 16_384; + pub const SignedClaimHandicap: u64 = 2; + pub const TombstoneDeposit: u64 = 16; + pub const StorageSizeOffset: u32 = 8; + pub const RentByteFee: u64 = 4; + pub const RentDepositOffset: u64 = 10_000; + pub const SurchargeReward: u64 = 150; + pub const TransactionBaseFee: u64 = 2; + pub const TransactionByteFee: u64 = 6; + pub const ContractFee: u64 = 21; + pub const CallBaseFee: u64 = 135; + pub const InstantiateBaseFee: u64 = 175; + pub const MaxDepth: u32 = 100; + pub const MaxValueSize: u32 = 16_384; } impl Trait for Test { - type Currency = Balances; - type Time = Timestamp; - type Randomness = Randomness; - type Call = Call; - type DetermineContractAddress = DummyContractAddressFor; - type Event = MetaEvent; - type ComputeDispatchFee = DummyComputeDispatchFee; - type TrieIdGenerator = DummyTrieIdGenerator; - type GasPayment = (); - type RentPayment = (); - type SignedClaimHandicap = SignedClaimHandicap; - type TombstoneDeposit = TombstoneDeposit; - type StorageSizeOffset = StorageSizeOffset; - type RentByteFee = RentByteFee; - type RentDepositOffset = RentDepositOffset; - type SurchargeReward = SurchargeReward; - type TransactionBaseFee = TransactionBaseFee; - type TransactionByteFee = TransactionByteFee; - type ContractFee = ContractFee; - type CallBaseFee = CallBaseFee; - type InstantiateBaseFee = InstantiateBaseFee; - type MaxDepth = MaxDepth; - type MaxValueSize = MaxValueSize; - type BlockGasLimit = BlockGasLimit; + type Currency = Balances; + type Time = Timestamp; + type Randomness = Randomness; + type Call = Call; + type DetermineContractAddress = DummyContractAddressFor; + type Event = MetaEvent; + type ComputeDispatchFee = DummyComputeDispatchFee; + type TrieIdGenerator = DummyTrieIdGenerator; + type GasPayment = (); + type RentPayment = (); + type SignedClaimHandicap = SignedClaimHandicap; + type TombstoneDeposit = TombstoneDeposit; + type StorageSizeOffset = StorageSizeOffset; + type RentByteFee = RentByteFee; + type RentDepositOffset = RentDepositOffset; + type SurchargeReward = SurchargeReward; + type TransactionBaseFee = TransactionBaseFee; + type TransactionByteFee = TransactionByteFee; + type ContractFee = ContractFee; + type CallBaseFee = CallBaseFee; + type InstantiateBaseFee = InstantiateBaseFee; + type MaxDepth = MaxDepth; + type MaxValueSize = MaxValueSize; + type BlockGasLimit = BlockGasLimit; } type Balances = pallet_balances::Module; @@ -185,36 +199,36 @@ type Randomness = pallet_randomness_collective_flip::Module; pub struct DummyContractAddressFor; impl ContractAddressFor for DummyContractAddressFor { - fn contract_address_for(_code_hash: &H256, _data: &[u8], origin: &u64) -> u64 { - *origin + 1 - } + fn contract_address_for(_code_hash: &H256, _data: &[u8], origin: &u64) -> u64 { + *origin + 1 + } } pub struct DummyTrieIdGenerator; impl TrieIdGenerator for DummyTrieIdGenerator { - fn trie_id(account_id: &u64) -> TrieId { - use sp_core::storage::well_known_keys; - - let new_seed = super::AccountCounter::mutate(|v| { - *v = v.wrapping_add(1); - *v - }); - - // TODO: see https://github.com/paritytech/substrate/issues/2325 - let mut res = vec![]; - res.extend_from_slice(well_known_keys::CHILD_STORAGE_KEY_PREFIX); - res.extend_from_slice(b"default:"); - res.extend_from_slice(&new_seed.to_le_bytes()); - res.extend_from_slice(&account_id.to_le_bytes()); - res - } + fn trie_id(account_id: &u64) -> TrieId { + use sp_core::storage::well_known_keys; + + let new_seed = super::AccountCounter::mutate(|v| { + *v = v.wrapping_add(1); + *v + }); + + // TODO: see https://github.com/paritytech/substrate/issues/2325 + let mut res = vec![]; + res.extend_from_slice(well_known_keys::CHILD_STORAGE_KEY_PREFIX); + res.extend_from_slice(b"default:"); + res.extend_from_slice(&new_seed.to_le_bytes()); + res.extend_from_slice(&account_id.to_le_bytes()); + res + } } pub struct DummyComputeDispatchFee; impl ComputeDispatchFee for DummyComputeDispatchFee { - fn compute_dispatch_fee(call: &Call) -> u64 { - 69 - } + fn compute_dispatch_fee(call: &Call) -> u64 { + 69 + } } const ALICE: u64 = 1; @@ -223,694 +237,871 @@ const CHARLIE: u64 = 3; const DJANGO: u64 = 4; pub struct ExtBuilder { - existential_deposit: u64, - gas_price: u64, - block_gas_limit: u64, - transfer_fee: u64, - instantiation_fee: u64, + existential_deposit: u64, + gas_price: u64, + block_gas_limit: u64, + transfer_fee: u64, + instantiation_fee: u64, } impl Default for ExtBuilder { - fn default() -> Self { - Self { - existential_deposit: 1, - gas_price: 2, - block_gas_limit: 100_000_000, - transfer_fee: 0, - instantiation_fee: 0, - } - } + fn default() -> Self { + Self { + existential_deposit: 1, + gas_price: 2, + block_gas_limit: 100_000_000, + transfer_fee: 0, + instantiation_fee: 0, + } + } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn gas_price(mut self, gas_price: u64) -> Self { - self.gas_price = gas_price; - self - } - pub fn block_gas_limit(mut self, block_gas_limit: u64) -> Self { - self.block_gas_limit = block_gas_limit; - self - } - pub fn transfer_fee(mut self, transfer_fee: u64) -> Self { - self.transfer_fee = transfer_fee; - self - } - pub fn instantiation_fee(mut self, instantiation_fee: u64) -> Self { - self.instantiation_fee = instantiation_fee; - self - } - pub fn set_associated_consts(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - TRANSFER_FEE.with(|v| *v.borrow_mut() = self.transfer_fee); - INSTANTIATION_FEE.with(|v| *v.borrow_mut() = self.instantiation_fee); - BLOCK_GAS_LIMIT.with(|v| *v.borrow_mut() = self.block_gas_limit); - } - pub fn build(self) -> sp_io::TestExternalities { - self.set_associated_consts(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig:: { - current_schedule: Schedule { - enable_println: true, - ..Default::default() - }, - gas_price: self.gas_price, - }.assimilate_storage(&mut t).unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext - } + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + pub fn gas_price(mut self, gas_price: u64) -> Self { + self.gas_price = gas_price; + self + } + pub fn block_gas_limit(mut self, block_gas_limit: u64) -> Self { + self.block_gas_limit = block_gas_limit; + self + } + pub fn transfer_fee(mut self, transfer_fee: u64) -> Self { + self.transfer_fee = transfer_fee; + self + } + pub fn instantiation_fee(mut self, instantiation_fee: u64) -> Self { + self.instantiation_fee = instantiation_fee; + self + } + pub fn set_associated_consts(&self) { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + TRANSFER_FEE.with(|v| *v.borrow_mut() = self.transfer_fee); + INSTANTIATION_FEE.with(|v| *v.borrow_mut() = self.instantiation_fee); + BLOCK_GAS_LIMIT.with(|v| *v.borrow_mut() = self.block_gas_limit); + } + pub fn build(self) -> sp_io::TestExternalities { + self.set_associated_consts(); + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { balances: vec![] } + .assimilate_storage(&mut t) + .unwrap(); + GenesisConfig:: { + current_schedule: Schedule { + enable_println: true, + ..Default::default() + }, + gas_price: self.gas_price, + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } } /// Generate Wasm binary and code hash from wabt source. -fn compile_module(wabt_module: &str) - -> Result<(Vec, ::Output), wabt::Error> - where T: frame_system::Trait +fn compile_module( + wabt_module: &str, +) -> Result<(Vec, ::Output), wabt::Error> +where + T: frame_system::Trait, { - let wasm = wabt::wat2wasm(wabt_module)?; - let code_hash = T::Hashing::hash(&wasm); - Ok((wasm, code_hash)) + let wasm = wabt::wat2wasm(wabt_module)?; + let code_hash = T::Hashing::hash(&wasm); + Ok((wasm, code_hash)) } // Perform a simple transfer to a non-existent account supplying way more gas than needed. // Then we check that the all unused gas is refunded. #[test] fn refunds_unused_gas() { - ExtBuilder::default().gas_price(2).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 100_000_000); - - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, Vec::new())); - - // 2 * 135 - gas price multiplied by the call base fee. - assert_eq!(Balances::free_balance(ALICE), 100_000_000 - (2 * 135)); - }); + ExtBuilder::default().gas_price(2).build().execute_with(|| { + Balances::deposit_creating(&ALICE, 100_000_000); + + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + Vec::new() + )); + + // 2 * 135 - gas price multiplied by the call base fee. + assert_eq!(Balances::free_balance(ALICE), 100_000_000 - (2 * 135)); + }); } #[test] fn account_removal_does_not_remove_storage() { - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - let trie_id1 = ::TrieIdGenerator::trie_id(&1); - let trie_id2 = ::TrieIdGenerator::trie_id(&2); - let key1 = &[1; 32]; - let key2 = &[2; 32]; - - // Set up two accounts with free balance above the existential threshold. - { - Balances::deposit_creating(&1, 110); - ContractInfoOf::::insert(1, &ContractInfo::Alive(RawAliveContractInfo { - trie_id: trie_id1.clone(), - storage_size: ::StorageSizeOffset::get(), - deduct_block: System::block_number(), - code_hash: H256::repeat_byte(1), - rent_allowance: 40, - last_write: None, - })); - - let mut overlay = OverlayAccountDb::::new(&DirectAccountDb); - overlay.set_storage(&1, key1.clone(), Some(b"1".to_vec())); - overlay.set_storage(&1, key2.clone(), Some(b"2".to_vec())); - DirectAccountDb.commit(overlay.into_change_set()); - - Balances::deposit_creating(&2, 110); - ContractInfoOf::::insert(2, &ContractInfo::Alive(RawAliveContractInfo { - trie_id: trie_id2.clone(), - storage_size: ::StorageSizeOffset::get(), - deduct_block: System::block_number(), - code_hash: H256::repeat_byte(2), - rent_allowance: 40, - last_write: None, - })); - - let mut overlay = OverlayAccountDb::::new(&DirectAccountDb); - overlay.set_storage(&2, key1.clone(), Some(b"3".to_vec())); - overlay.set_storage(&2, key2.clone(), Some(b"4".to_vec())); - DirectAccountDb.commit(overlay.into_change_set()); - } - - // Transfer funds from account 1 of such amount that after this transfer - // the balance of account 1 will be below the existential threshold. - // - // This does not remove the contract storage as we are not notified about a - // account removal. This cannot happen in reality because a contract can only - // remove itself by `ext_terminate`. There is no external event that can remove - // the account appart from that. - assert_ok!(Balances::transfer(Origin::signed(1), 2, 20)); - - // Verify that no entries are removed. - { - assert_eq!( - >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1), - Some(b"1".to_vec()) - ); - assert_eq!( - >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2), - Some(b"2".to_vec()) - ); - - assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key1), - Some(b"3".to_vec()) - ); - assert_eq!( - >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key2), - Some(b"4".to_vec()) - ); - } - }); + ExtBuilder::default() + .existential_deposit(100) + .build() + .execute_with(|| { + let trie_id1 = ::TrieIdGenerator::trie_id(&1); + let trie_id2 = ::TrieIdGenerator::trie_id(&2); + let key1 = &[1; 32]; + let key2 = &[2; 32]; + + // Set up two accounts with free balance above the existential threshold. + { + Balances::deposit_creating(&1, 110); + ContractInfoOf::::insert( + 1, + &ContractInfo::Alive(RawAliveContractInfo { + trie_id: trie_id1.clone(), + storage_size: ::StorageSizeOffset::get(), + deduct_block: System::block_number(), + code_hash: H256::repeat_byte(1), + rent_allowance: 40, + last_write: None, + }), + ); + + let mut overlay = OverlayAccountDb::::new(&DirectAccountDb); + overlay.set_storage(&1, key1.clone(), Some(b"1".to_vec())); + overlay.set_storage(&1, key2.clone(), Some(b"2".to_vec())); + DirectAccountDb.commit(overlay.into_change_set()); + + Balances::deposit_creating(&2, 110); + ContractInfoOf::::insert( + 2, + &ContractInfo::Alive(RawAliveContractInfo { + trie_id: trie_id2.clone(), + storage_size: ::StorageSizeOffset::get(), + deduct_block: System::block_number(), + code_hash: H256::repeat_byte(2), + rent_allowance: 40, + last_write: None, + }), + ); + + let mut overlay = OverlayAccountDb::::new(&DirectAccountDb); + overlay.set_storage(&2, key1.clone(), Some(b"3".to_vec())); + overlay.set_storage(&2, key2.clone(), Some(b"4".to_vec())); + DirectAccountDb.commit(overlay.into_change_set()); + } + + // Transfer funds from account 1 of such amount that after this transfer + // the balance of account 1 will be below the existential threshold. + // + // This does not remove the contract storage as we are not notified about a + // account removal. This cannot happen in reality because a contract can only + // remove itself by `ext_terminate`. There is no external event that can remove + // the account appart from that. + assert_ok!(Balances::transfer(Origin::signed(1), 2, 20)); + + // Verify that no entries are removed. + { + assert_eq!( + >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key1), + Some(b"1".to_vec()) + ); + assert_eq!( + >::get_storage(&DirectAccountDb, &1, Some(&trie_id1), key2), + Some(b"2".to_vec()) + ); + + assert_eq!( + >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key1), + Some(b"3".to_vec()) + ); + assert_eq!( + >::get_storage(&DirectAccountDb, &2, Some(&trie_id2), key2), + Some(b"4".to_vec()) + ); + } + }); } #[test] fn instantiate_and_call_and_deposit_event() { - let (wasm, code_hash) = compile_module::(&load_wasm("return_from_start_fn.wat")) - .unwrap(); - - ExtBuilder::default().existential_deposit(100).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // Check at the end to get hash on error easily - let creation = Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, - code_hash.into(), - vec![], - ); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, 100) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::ContractExecution(BOB, vec![1, 2, 3, 4])), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), - topics: vec![], - } - ]); - - assert_ok!(creation); - assert!(ContractInfoOf::::contains_key(BOB)); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("return_from_start_fn.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(100) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Check at the end to get hash on error easily + let creation = Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + vec![], + ); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + 1, 1_000_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(BOB, 100)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::ContractExecution( + BOB, + vec![1, 2, 3, 4] + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), + topics: vec![], + } + ] + ); + + assert_ok!(creation); + assert!(ContractInfoOf::::contains_key(BOB)); + }); } #[test] fn dispatch_call() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - - let (wasm, code_hash) = compile_module::(&load_wasm("dispatch_call.wat")) - .unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // Let's keep this assert even though it's redundant. If you ever need to update the - // wasm source this test will fail and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - ]); - - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, - code_hash.into(), - vec![], - )); - - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, // newly created account - 0, - 100_000, - vec![], - )); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, 100) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), - topics: vec![], - }, - - // Dispatching the call. - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(CHARLIE)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(CHARLIE, 50) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Transfer(BOB, CHARLIE, 50) - ), - topics: vec![], - }, - - // Event emitted as a result of dispatch. - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Dispatched(BOB, true)), - topics: vec![], - } - ]); - }); + // This test can fail due to the encoding changes. In case it becomes too annoying + // let's rewrite so as we use this module controlled call or we serialize it in runtime. + let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer( + CHARLIE, 50, + ))); + assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); + + let (wasm, code_hash) = compile_module::(&load_wasm("dispatch_call.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Let's keep this assert even though it's redundant. If you ever need to update the + // wasm source this test will fail and will show you the actual hash. + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + 1, 1_000_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), + topics: vec![], + }, + ] + ); + + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + vec![], + )); + + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, // newly created account + 0, + 100_000, + vec![], + )); + + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + 1, 1_000_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(BOB, 100)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), + topics: vec![], + }, + // Dispatching the call. + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(CHARLIE)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(CHARLIE, 50)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Transfer( + BOB, CHARLIE, 50 + )), + topics: vec![], + }, + // Event emitted as a result of dispatch. + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Dispatched(BOB, true)), + topics: vec![], + } + ] + ); + }); } #[test] fn dispatch_call_not_dispatched_after_top_level_transaction_failure() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - - let (wasm, code_hash) = compile_module::(&load_wasm("dispatch_call_then_trap.wat")) - .unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // Let's keep this assert even though it's redundant. If you ever need to update the - // wasm source this test will fail and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - ]); - - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, - code_hash.into(), - vec![], - )); - - // Call the newly instantiated contract. The contract is expected to dispatch a call - // and then trap. - assert_err!( - Contracts::call( - Origin::signed(ALICE), - BOB, // newly created account - 0, - 100_000, - vec![], - ), - "contract trapped during execution" - ); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances( - pallet_balances::RawEvent::Endowed(BOB, 100) - ), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), - topics: vec![], - }, - // ABSENCE of events which would be caused by dispatched Balances::transfer call - ]); - }); + // This test can fail due to the encoding changes. In case it becomes too annoying + // let's rewrite so as we use this module controlled call or we serialize it in runtime. + let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer( + CHARLIE, 50, + ))); + assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); + + let (wasm, code_hash) = + compile_module::(&load_wasm("dispatch_call_then_trap.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Let's keep this assert even though it's redundant. If you ever need to update the + // wasm source this test will fail and will show you the actual hash. + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + 1, 1_000_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), + topics: vec![], + }, + ] + ); + + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + vec![], + )); + + // Call the newly instantiated contract. The contract is expected to dispatch a call + // and then trap. + assert_err!( + Contracts::call( + Origin::signed(ALICE), + BOB, // newly created account + 0, + 100_000, + vec![], + ), + "contract trapped during execution" + ); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + 1, 1_000_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(BOB)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(BOB, 100)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Transfer(ALICE, BOB, 100)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Instantiated(ALICE, BOB)), + topics: vec![], + }, + // ABSENCE of events which would be caused by dispatched Balances::transfer call + ] + ); + }); } #[test] fn run_out_of_gas() { - let (wasm, code_hash) = compile_module::(&load_wasm("run_out_of_gas.wat")) - .unwrap(); - - ExtBuilder::default() - .existential_deposit(50) - .build() - .execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, - code_hash.into(), - vec![], - )); - - // Call the contract with a fixed gas limit. It must run out of gas because it just - // loops forever. - assert_err!( - Contracts::call( - Origin::signed(ALICE), - BOB, // newly created account - 0, - 1000, - vec![], - ), - "ran out of gas during contract execution" - ); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("run_out_of_gas.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + vec![], + )); + + // Call the contract with a fixed gas limit. It must run out of gas because it just + // loops forever. + assert_err!( + Contracts::call( + Origin::signed(ALICE), + BOB, // newly created account + 0, + 1000, + vec![], + ), + "ran out of gas during contract execution" + ); + }); } /// Input data for each call in set_rent code mod call { - pub fn set_storage_4_byte() -> Vec { vec![] } - pub fn remove_storage_4_byte() -> Vec { vec![0] } - pub fn transfer() -> Vec { vec![0, 0] } - pub fn null() -> Vec { vec![0, 0, 0] } + pub fn set_storage_4_byte() -> Vec { + vec![] + } + pub fn remove_storage_4_byte() -> Vec { + vec![0] + } + pub fn transfer() -> Vec { + vec![0, 0] + } + pub fn null() -> Vec { + vec![0, 0, 0] + } } /// Test correspondence of set_rent code and its hash. /// Also test that encoded extrinsic in code correspond to the correct transfer #[test] fn test_set_rent_code_and_hash() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer(CHARLIE, 50))); - assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); - - let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // If you ever need to update the wasm source this test will fail - // and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), - topics: vec![], - }, - ]); - }); + // This test can fail due to the encoding changes. In case it becomes too annoying + // let's rewrite so as we use this module controlled call or we serialize it in runtime. + let encoded = Encode::encode(&Call::Balances(pallet_balances::Call::transfer( + CHARLIE, 50, + ))); + assert_eq!(&encoded[..], &hex!("00000300000000000000C8")[..]); + + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // If you ever need to update the wasm source this test will fail + // and will show you the actual hash. + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + 1, 1_000_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::CodeStored(code_hash.into())), + topics: vec![], + }, + ] + ); + }); } #[test] fn storage_size() { - let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); - - // Storage size - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - 100_000, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.storage_size, ::StorageSizeOffset::get() + 4); - - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::set_storage_4_byte())); - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.storage_size, ::StorageSizeOffset::get() + 4 + 4); - - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::remove_storage_4_byte())); - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.storage_size, ::StorageSizeOffset::get() + 4); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); + + // Storage size + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 30_000, + 100_000, + code_hash.into(), + ::Balance::from(1_000u32).encode() // rent allowance + )); + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!( + bob_contract.storage_size, + ::StorageSizeOffset::get() + 4 + ); + + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::set_storage_4_byte() + )); + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!( + bob_contract.storage_size, + ::StorageSizeOffset::get() + 4 + 4 + ); + + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::remove_storage_4_byte() + )); + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!( + bob_contract.storage_size, + ::StorageSizeOffset::get() + 4 + ); + }); } fn initialize_block(number: u64) { - System::initialize( - &number, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - Default::default(), - ); + System::initialize( + &number, + &[0u8; 32].into(), + &[0u8; 32].into(), + &Default::default(), + Default::default(), + ); } #[test] fn deduct_blocks() { - let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - 100_000, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000); - - // Advance 4 blocks - initialize_block(5); - - // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null())); - - // Check result - let rent = (8 + 4 - 3) // storage size = size_offset + deploy_set_storage - deposit_offset + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 30_000, + 100_000, + code_hash.into(), + ::Balance::from(1_000u32).encode() // rent allowance + )); + + // Check creation + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(bob_contract.rent_allowance, 1_000); + + // Advance 4 blocks + initialize_block(5); + + // Trigger rent through call + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::null() + )); + + // Check result + let rent = (8 + 4 - 3) // storage size = size_offset + deploy_set_storage - deposit_offset * 4 // rent byte price * 4; // blocks to rent - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent); - assert_eq!(bob_contract.deduct_block, 5); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent); - - // Advance 7 blocks more - initialize_block(12); - - // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null())); - - // Check result - let rent_2 = (8 + 4 - 2) // storage size = size_offset + deploy_set_storage - deposit_offset + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(bob_contract.rent_allowance, 1_000 - rent); + assert_eq!(bob_contract.deduct_block, 5); + assert_eq!(Balances::free_balance(BOB), 30_000 - rent); + + // Advance 7 blocks more + initialize_block(12); + + // Trigger rent through call + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::null() + )); + + // Check result + let rent_2 = (8 + 4 - 2) // storage size = size_offset + deploy_set_storage - deposit_offset * 4 // rent byte price * 7; // blocks to rent - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); - assert_eq!(bob_contract.deduct_block, 12); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); - - // Second call on same block should have no effect on rent - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null())); - - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); - assert_eq!(bob_contract.deduct_block, 12); - assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); - }); + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); + assert_eq!(bob_contract.deduct_block, 12); + assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); + + // Second call on same block should have no effect on rent + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::null() + )); + + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(bob_contract.rent_allowance, 1_000 - rent - rent_2); + assert_eq!(bob_contract.deduct_block, 12); + assert_eq!(Balances::free_balance(BOB), 30_000 - rent - rent_2); + }); } #[test] fn call_contract_removals() { - removals(|| { - // Call on already-removed account might fail, and this is fine. - Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null()); - true - }); + removals(|| { + // Call on already-removed account might fail, and this is fine. + Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null()); + true + }); } #[test] fn inherent_claim_surcharge_contract_removals() { - removals(|| Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok()); + removals(|| Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok()); } #[test] fn signed_claim_surcharge_contract_removals() { - removals(|| Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok()); + removals(|| Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok()); } #[test] fn claim_surcharge_malus() { - // Test surcharge malus for inherent - claim_surcharge(4, || Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(3, || Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(2, || Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok(), true); - claim_surcharge(1, || Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok(), false); - - // Test surcharge malus for signed - claim_surcharge(4, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), true); - claim_surcharge(3, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); - claim_surcharge(2, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); - claim_surcharge(1, || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), false); + // Test surcharge malus for inherent + claim_surcharge( + 4, + || Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok(), + true, + ); + claim_surcharge( + 3, + || Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok(), + true, + ); + claim_surcharge( + 2, + || Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok(), + true, + ); + claim_surcharge( + 1, + || Contracts::claim_surcharge(Origin::NONE, BOB, Some(ALICE)).is_ok(), + false, + ); + + // Test surcharge malus for signed + claim_surcharge( + 4, + || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), + true, + ); + claim_surcharge( + 3, + || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), + false, + ); + claim_surcharge( + 2, + || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), + false, + ); + claim_surcharge( + 1, + || Contracts::claim_surcharge(Origin::signed(ALICE), BOB, None).is_ok(), + false, + ); } /// Claim surcharge with the given trigger_call at the given blocks. /// If `removes` is true then assert that the contract is a tombstone. fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) { - let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - // Advance blocks - initialize_block(blocks); - - // Trigger rent through call - assert!(trigger_call()); - - if removes { - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - } else { - assert!(ContractInfoOf::::get(BOB).unwrap().get_alive().is_some()); - } - }); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + ::Balance::from(1_000u32).encode() // rent allowance + )); + + // Advance blocks + initialize_block(blocks); + + // Trigger rent through call + assert!(trigger_call()); + + if removes { + assert!(ContractInfoOf::::get(BOB) + .unwrap() + .get_tombstone() + .is_some()); + } else { + assert!(ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .is_some()); + } + }); } /// Test for all kind of removals for the given trigger: @@ -918,816 +1109,1012 @@ fn claim_surcharge(blocks: u64, trigger_call: impl Fn() -> bool, removes: bool) /// * if allowance is exceeded /// * if balance is reached and balance < subsistence threshold fn removals(trigger_call: impl Fn() -> bool) { - let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); - - // Balance reached and superior to subsistence threshold - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm.clone())); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - let subsistence_threshold = 50 /*existential_deposit*/ + 16 /*tombstone_deposit*/; - - // Trigger rent must have no effect - assert!(trigger_call()); - assert_eq!(ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap().rent_allowance, 1_000); - assert_eq!(Balances::free_balance(BOB), 100); - - // Advance blocks - initialize_block(10); - - // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); - - // Advance blocks - initialize_block(20); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), subsistence_threshold); - }); - - // Allowance exceeded - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm.clone())); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 1_000, - 100_000, code_hash.into(), - ::Balance::from(100u32).encode() // rent allowance - )); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert_eq!(ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap().rent_allowance, 100); - assert_eq!(Balances::free_balance(BOB), 1_000); - - // Advance blocks - initialize_block(10); - - // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - // Balance should be initial balance - initial rent_allowance - assert_eq!(Balances::free_balance(BOB), 900); - - // Advance blocks - initialize_block(20); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(Balances::free_balance(BOB), 900); - }); - - // Balance reached and inferior to subsistence threshold - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm.clone())); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 50+Balances::minimum_balance(), - 100_000, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert_eq!(ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap().rent_allowance, 1_000); - assert_eq!(Balances::free_balance(BOB), 50 + Balances::minimum_balance()); - - // Transfer funds - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::transfer())); - assert_eq!(ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap().rent_allowance, 1_000); - assert_eq!(Balances::free_balance(BOB), Balances::minimum_balance()); - - // Advance blocks - initialize_block(10); - - // Trigger rent through call - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).is_none()); - assert_eq!(Balances::free_balance(BOB), Balances::minimum_balance()); - - // Advance blocks - initialize_block(20); - - // Trigger rent must have no effect - assert!(trigger_call()); - assert!(ContractInfoOf::::get(BOB).is_none()); - assert_eq!(Balances::free_balance(BOB), Balances::minimum_balance()); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); + + // Balance reached and superior to subsistence threshold + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + wasm.clone() + )); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + ::Balance::from(1_000u32).encode() // rent allowance + )); + + let subsistence_threshold = 50 /*existential_deposit*/ + 16 /*tombstone_deposit*/; + + // Trigger rent must have no effect + assert!(trigger_call()); + assert_eq!( + ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap() + .rent_allowance, + 1_000 + ); + assert_eq!(Balances::free_balance(BOB), 100); + + // Advance blocks + initialize_block(10); + + // Trigger rent through call + assert!(trigger_call()); + assert!(ContractInfoOf::::get(BOB) + .unwrap() + .get_tombstone() + .is_some()); + assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + + // Advance blocks + initialize_block(20); + + // Trigger rent must have no effect + assert!(trigger_call()); + assert!(ContractInfoOf::::get(BOB) + .unwrap() + .get_tombstone() + .is_some()); + assert_eq!(Balances::free_balance(BOB), subsistence_threshold); + }); + + // Allowance exceeded + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + wasm.clone() + )); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 1_000, + 100_000, + code_hash.into(), + ::Balance::from(100u32).encode() // rent allowance + )); + + // Trigger rent must have no effect + assert!(trigger_call()); + assert_eq!( + ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap() + .rent_allowance, + 100 + ); + assert_eq!(Balances::free_balance(BOB), 1_000); + + // Advance blocks + initialize_block(10); + + // Trigger rent through call + assert!(trigger_call()); + assert!(ContractInfoOf::::get(BOB) + .unwrap() + .get_tombstone() + .is_some()); + // Balance should be initial balance - initial rent_allowance + assert_eq!(Balances::free_balance(BOB), 900); + + // Advance blocks + initialize_block(20); + + // Trigger rent must have no effect + assert!(trigger_call()); + assert!(ContractInfoOf::::get(BOB) + .unwrap() + .get_tombstone() + .is_some()); + assert_eq!(Balances::free_balance(BOB), 900); + }); + + // Balance reached and inferior to subsistence threshold + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + wasm.clone() + )); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 50 + Balances::minimum_balance(), + 100_000, + code_hash.into(), + ::Balance::from(1_000u32).encode() // rent allowance + )); + + // Trigger rent must have no effect + assert!(trigger_call()); + assert_eq!( + ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap() + .rent_allowance, + 1_000 + ); + assert_eq!( + Balances::free_balance(BOB), + 50 + Balances::minimum_balance() + ); + + // Transfer funds + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::transfer() + )); + assert_eq!( + ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap() + .rent_allowance, + 1_000 + ); + assert_eq!(Balances::free_balance(BOB), Balances::minimum_balance()); + + // Advance blocks + initialize_block(10); + + // Trigger rent through call + assert!(trigger_call()); + assert!(ContractInfoOf::::get(BOB).is_none()); + assert_eq!(Balances::free_balance(BOB), Balances::minimum_balance()); + + // Advance blocks + initialize_block(20); + + // Trigger rent must have no effect + assert!(trigger_call()); + assert!(ContractInfoOf::::get(BOB).is_none()); + assert_eq!(Balances::free_balance(BOB), Balances::minimum_balance()); + }); } #[test] fn call_removed_contract() { - let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); - - // Balance reached and superior to subsistence threshold - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm.clone())); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, code_hash.into(), - ::Balance::from(1_000u32).encode() // rent allowance - )); - - // Calling contract should succeed. - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null())); - - // Advance blocks - initialize_block(10); - - // Calling contract should remove contract and fail. - assert_err!( - Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null()), - "contract has been evicted" - ); - // Calling a contract that is about to evict shall emit an event. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), - topics: vec![], - }, - ]); - - // Subsequent contract calls should also fail. - assert_err!( - Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null()), - "contract has been evicted" - ); - }) + let (wasm, code_hash) = compile_module::(&load_wasm("set_rent.wat")).unwrap(); + + // Balance reached and superior to subsistence threshold + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + wasm.clone() + )); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + ::Balance::from(1_000u32).encode() // rent allowance + )); + + // Calling contract should succeed. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::null() + )); + + // Advance blocks + initialize_block(10); + + // Calling contract should remove contract and fail. + assert_err!( + Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null()), + "contract has been evicted" + ); + // Calling a contract that is about to evict shall emit an event. + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), + topics: vec![], + },] + ); + + // Subsequent contract calls should also fail. + assert_err!( + Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null()), + "contract has been evicted" + ); + }) } #[test] fn default_rent_allowance_on_instantiate() { - let (wasm, code_hash) = compile_module::( - &load_wasm("check_default_rent_allowance.wat")).unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - 100_000, - code_hash.into(), - vec![], - )); - - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, >::max_value()); - - // Advance blocks - initialize_block(5); - - // Trigger rent through call - assert_ok!(Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null())); - - // Check contract is still alive - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive(); - assert!(bob_contract.is_some()) - }); + let (wasm, code_hash) = + compile_module::(&load_wasm("check_default_rent_allowance.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 30_000, + 100_000, + code_hash.into(), + vec![], + )); + + // Check creation + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(bob_contract.rent_allowance, >::max_value()); + + // Advance blocks + initialize_block(5); + + // Trigger rent through call + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::null() + )); + + // Check contract is still alive + let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive(); + assert!(bob_contract.is_some()) + }); } #[test] fn restorations_dirty_storage_and_different_storage() { - restoration(true, true); + restoration(true, true); } #[test] fn restorations_dirty_storage() { - restoration(false, true); + restoration(false, true); } #[test] fn restoration_different_storage() { - restoration(true, false); + restoration(true, false); } #[test] fn restoration_success() { - restoration(false, false); + restoration(false, false); } fn restoration(test_different_storage: bool, test_restore_to_with_dirty_storage: bool) { - let (set_rent_wasm, set_rent_code_hash) = - compile_module::(&load_wasm("set_rent.wat")).unwrap(); - let (restoration_wasm, restoration_code_hash) = - compile_module::(&load_wasm("restoration.wat")).unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, restoration_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, set_rent_wasm)); - - // If you ever need to update the wasm source this test will fail - // and will show you the actual hash. - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(1, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(restoration_code_hash.into())), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::CodeStored(set_rent_code_hash.into())), - topics: vec![], - }, - ]); - - // Create an account with address `BOB` with code `CODE_SET_RENT`. - // The input parameter sets the rent allowance to 0. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - 100_000, - set_rent_code_hash.into(), - ::Balance::from(0u32).encode() - )); - - // Check if `BOB` was created successfully and that the rent allowance is - // set to 0. - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 0); - - if test_different_storage { - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, 0, 100_000, - call::set_storage_4_byte()) - ); - } - - // Advance 4 blocks, to the 5th. - initialize_block(5); - - /// Preserve `BOB`'s code hash for later introspection. - let bob_code_hash = ContractInfoOf::::get(BOB).unwrap() - .get_alive().unwrap().code_hash; - // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 0 - // we expect that it will get removed leaving tombstone. - assert_err!( - Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null()), - "contract has been evicted" - ); - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts( - RawEvent::Evicted(BOB.clone(), true) - ), - topics: vec![], - }, - ]); - - /// Create another account with the address `DJANGO` with `CODE_RESTORATION`. - /// - /// Note that we can't use `ALICE` for creating `DJANGO` so we create yet another - /// account `CHARLIE` and create `DJANGO` with it. - Balances::deposit_creating(&CHARLIE, 1_000_000); - assert_ok!(Contracts::instantiate( - Origin::signed(CHARLIE), - 30_000, - 100_000, - restoration_code_hash.into(), - ::Balance::from(0u32).encode() - )); - - // Before performing a call to `DJANGO` save its original trie id. - let django_trie_id = ContractInfoOf::::get(DJANGO).unwrap() - .get_alive().unwrap().trie_id; - - if !test_restore_to_with_dirty_storage { - // Advance 1 block, to the 6th. - initialize_block(6); - } - - // Perform a call to `DJANGO`. This should either perform restoration successfully or - // fail depending on the test parameters. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - DJANGO, - 0, - 100_000, - vec![], - )); - - if test_different_storage || test_restore_to_with_dirty_storage { - // Parametrization of the test imply restoration failure. Check that `DJANGO` aka - // restoration contract is still in place and also that `BOB` doesn't exist. - assert!(ContractInfoOf::::get(BOB).unwrap().get_tombstone().is_some()); - let django_contract = ContractInfoOf::::get(DJANGO).unwrap() - .get_alive().unwrap(); - assert_eq!(django_contract.storage_size, 16); - assert_eq!(django_contract.trie_id, django_trie_id); - assert_eq!(django_contract.deduct_block, System::block_number()); - match (test_different_storage, test_restore_to_with_dirty_storage) { - (true, false) => { - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts( - RawEvent::Restored(DJANGO, BOB, bob_code_hash, 50, false) - ), - topics: vec![], - }, - ]); - } - (_, true) => { - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(CHARLIE)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(CHARLIE, 1_000_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(frame_system::RawEvent::NewAccount(DJANGO)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::balances(pallet_balances::RawEvent::Endowed(DJANGO, 30_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Transfer(CHARLIE, DJANGO, 30_000)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Instantiated(CHARLIE, DJANGO)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts(RawEvent::Restored( - DJANGO, - BOB, - bob_code_hash, - 50, - false, - )), - topics: vec![], - }, - ]); - } - _ => unreachable!(), - } - } else { - // Here we expect that the restoration is succeeded. Check that the restoration - // contract `DJANGO` ceased to exist and that `BOB` returned back. - println!("{:?}", ContractInfoOf::::get(BOB)); - let bob_contract = ContractInfoOf::::get(BOB).unwrap() - .get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, 50); - assert_eq!(bob_contract.storage_size, 12); - assert_eq!(bob_contract.trie_id, django_trie_id); - assert_eq!(bob_contract.deduct_block, System::block_number()); - assert!(ContractInfoOf::::get(DJANGO).is_none()); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::system(system::RawEvent::KilledAccount(DJANGO)), - topics: vec![], - }, - EventRecord { - phase: Phase::Initialization, - event: MetaEvent::contracts( - RawEvent::Restored(DJANGO, BOB, bob_contract.code_hash, 50, true) - ), - topics: vec![], - }, - ]); - } - }); + let (set_rent_wasm, set_rent_code_hash) = + compile_module::(&load_wasm("set_rent.wat")).unwrap(); + let (restoration_wasm, restoration_code_hash) = + compile_module::(&load_wasm("restoration.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + restoration_wasm + )); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + set_rent_wasm + )); + + // If you ever need to update the wasm source this test will fail + // and will show you the actual hash. + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount(1)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + 1, 1_000_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::CodeStored( + restoration_code_hash.into() + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::CodeStored( + set_rent_code_hash.into() + )), + topics: vec![], + }, + ] + ); + + // Create an account with address `BOB` with code `CODE_SET_RENT`. + // The input parameter sets the rent allowance to 0. + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 30_000, + 100_000, + set_rent_code_hash.into(), + ::Balance::from(0u32).encode() + )); + + // Check if `BOB` was created successfully and that the rent allowance is + // set to 0. + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(bob_contract.rent_allowance, 0); + + if test_different_storage { + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + call::set_storage_4_byte() + )); + } + + // Advance 4 blocks, to the 5th. + initialize_block(5); + + /// Preserve `BOB`'s code hash for later introspection. + let bob_code_hash = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap() + .code_hash; + // Call `BOB`, which makes it pay rent. Since the rent allowance is set to 0 + // we expect that it will get removed leaving tombstone. + assert_err!( + Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, call::null()), + "contract has been evicted" + ); + assert!(ContractInfoOf::::get(BOB) + .unwrap() + .get_tombstone() + .is_some()); + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Evicted(BOB.clone(), true)), + topics: vec![], + },] + ); + + /// Create another account with the address `DJANGO` with `CODE_RESTORATION`. + /// + /// Note that we can't use `ALICE` for creating `DJANGO` so we create yet another + /// account `CHARLIE` and create `DJANGO` with it. + Balances::deposit_creating(&CHARLIE, 1_000_000); + assert_ok!(Contracts::instantiate( + Origin::signed(CHARLIE), + 30_000, + 100_000, + restoration_code_hash.into(), + ::Balance::from(0u32).encode() + )); + + // Before performing a call to `DJANGO` save its original trie id. + let django_trie_id = ContractInfoOf::::get(DJANGO) + .unwrap() + .get_alive() + .unwrap() + .trie_id; + + if !test_restore_to_with_dirty_storage { + // Advance 1 block, to the 6th. + initialize_block(6); + } + + // Perform a call to `DJANGO`. This should either perform restoration successfully or + // fail depending on the test parameters. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + DJANGO, + 0, + 100_000, + vec![], + )); + + if test_different_storage || test_restore_to_with_dirty_storage { + // Parametrization of the test imply restoration failure. Check that `DJANGO` aka + // restoration contract is still in place and also that `BOB` doesn't exist. + assert!(ContractInfoOf::::get(BOB) + .unwrap() + .get_tombstone() + .is_some()); + let django_contract = ContractInfoOf::::get(DJANGO) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(django_contract.storage_size, 16); + assert_eq!(django_contract.trie_id, django_trie_id); + assert_eq!(django_contract.deduct_block, System::block_number()); + match (test_different_storage, test_restore_to_with_dirty_storage) { + (true, false) => { + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Restored( + DJANGO, + BOB, + bob_code_hash, + 50, + false + )), + topics: vec![], + },] + ); + } + (_, true) => { + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Evicted(BOB, true)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount( + CHARLIE + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + CHARLIE, 1_000_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(frame_system::RawEvent::NewAccount( + DJANGO + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::balances(pallet_balances::RawEvent::Endowed( + DJANGO, 30_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Transfer( + CHARLIE, DJANGO, 30_000 + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Instantiated( + CHARLIE, DJANGO + )), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Restored( + DJANGO, + BOB, + bob_code_hash, + 50, + false, + )), + topics: vec![], + }, + ] + ); + } + _ => unreachable!(), + } + } else { + // Here we expect that the restoration is succeeded. Check that the restoration + // contract `DJANGO` ceased to exist and that `BOB` returned back. + println!("{:?}", ContractInfoOf::::get(BOB)); + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(bob_contract.rent_allowance, 50); + assert_eq!(bob_contract.storage_size, 12); + assert_eq!(bob_contract.trie_id, django_trie_id); + assert_eq!(bob_contract.deduct_block, System::block_number()); + assert!(ContractInfoOf::::get(DJANGO).is_none()); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::system(system::RawEvent::KilledAccount(DJANGO)), + topics: vec![], + }, + EventRecord { + phase: Phase::Initialization, + event: MetaEvent::contracts(RawEvent::Restored( + DJANGO, + BOB, + bob_contract.code_hash, + 50, + true + )), + topics: vec![], + }, + ] + ); + } + }); } #[test] fn storage_max_value_limit() { - let (wasm, code_hash) = compile_module::(&load_wasm("storage_size.wat")) - .unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 30_000, - 100_000, - code_hash.into(), - vec![], - )); - - // Check creation - let bob_contract = ContractInfoOf::::get(BOB).unwrap().get_alive().unwrap(); - assert_eq!(bob_contract.rent_allowance, >::max_value()); - - // Call contract with allowed storage value. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 100_000, - Encode::encode(&self::MaxValueSize::get()), - )); - - // Call contract with too large a storage value. - assert_err!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 100_000, - Encode::encode(&(self::MaxValueSize::get() + 1)), - ), - "contract trapped during execution" - ); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("storage_size.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 30_000, + 100_000, + code_hash.into(), + vec![], + )); + + // Check creation + let bob_contract = ContractInfoOf::::get(BOB) + .unwrap() + .get_alive() + .unwrap(); + assert_eq!(bob_contract.rent_allowance, >::max_value()); + + // Call contract with allowed storage value. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + Encode::encode(&self::MaxValueSize::get()), + )); + + // Call contract with too large a storage value. + assert_err!( + Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + Encode::encode(&(self::MaxValueSize::get() + 1)), + ), + "contract trapped during execution" + ); + }); } #[test] fn deploy_and_call_other_contract() { - let (callee_wasm, callee_code_hash) = - compile_module::(&load_wasm("return_with_data.wat")).unwrap(); - let (caller_wasm, caller_code_hash) = - compile_module::(&load_wasm("caller_contract.wat")).unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, callee_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, caller_wasm)); - - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - 100_000, - caller_code_hash.into(), - vec![], - )); - - // Call BOB contract, which attempts to instantiate and call the callee contract and - // makes various assertions on the results from those calls. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 200_000, - callee_code_hash.as_ref().to_vec(), - )); - }); + let (callee_wasm, callee_code_hash) = + compile_module::(&load_wasm("return_with_data.wat")).unwrap(); + let (caller_wasm, caller_code_hash) = + compile_module::(&load_wasm("caller_contract.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + callee_wasm + )); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + caller_wasm + )); + + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100_000, + 100_000, + caller_code_hash.into(), + vec![], + )); + + // Call BOB contract, which attempts to instantiate and call the callee contract and + // makes various assertions on the results from those calls. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 200_000, + callee_code_hash.as_ref().to_vec(), + )); + }); } #[test] fn deploy_works_without_gas_price() { - let (wasm, code_hash) = compile_module::(&load_wasm("get_runtime_storage.wat")) - .unwrap(); - ExtBuilder::default().existential_deposit(50).gas_price(0).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, - code_hash.into(), - vec![], - )); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("get_runtime_storage.wat")).unwrap(); + ExtBuilder::default() + .existential_deposit(50) + .gas_price(0) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + vec![], + )); + }); } #[test] fn cannot_self_destruct_through_draning() { - let (wasm, code_hash) = compile_module::(&load_wasm("drain.wat")).unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - 100_000, - code_hash.into(), - vec![], - )); - - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(BOB), - Some(ContractInfo::Alive(_)) - ); - - // Call BOB with no input data, forcing it to run until out-of-balance - // and eventually trapping because below existential deposit. - assert_err!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 100_000, - vec![], - ), - "contract trapped during execution" - ); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("drain.wat")).unwrap(); + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100_000, + 100_000, + code_hash.into(), + vec![], + )); + + // Check that the BOB contract has been instantiated. + assert_matches!( + ContractInfoOf::::get(BOB), + Some(ContractInfo::Alive(_)) + ); + + // Call BOB with no input data, forcing it to run until out-of-balance + // and eventually trapping because below existential deposit. + assert_err!( + Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, vec![],), + "contract trapped during execution" + ); + }); } #[test] fn cannot_self_destruct_while_live() { - let (wasm, code_hash) = compile_module::(&load_wasm("self_destruct.wat")) - .unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - 100_000, - code_hash.into(), - vec![], - )); - - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(BOB), - Some(ContractInfo::Alive(_)) - ); - - // Call BOB with input data, forcing it make a recursive call to itself to - // self-destruct, resulting in a trap. - assert_err!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 100_000, - vec![0], - ), - "contract trapped during execution" - ); - - // Check that BOB is still alive. - assert_matches!( - ContractInfoOf::::get(BOB), - Some(ContractInfo::Alive(_)) - ); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("self_destruct.wat")).unwrap(); + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100_000, + 100_000, + code_hash.into(), + vec![], + )); + + // Check that the BOB contract has been instantiated. + assert_matches!( + ContractInfoOf::::get(BOB), + Some(ContractInfo::Alive(_)) + ); + + // Call BOB with input data, forcing it make a recursive call to itself to + // self-destruct, resulting in a trap. + assert_err!( + Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, vec![0],), + "contract trapped during execution" + ); + + // Check that BOB is still alive. + assert_matches!( + ContractInfoOf::::get(BOB), + Some(ContractInfo::Alive(_)) + ); + }); } #[test] fn self_destruct_works() { - let (wasm, code_hash) = compile_module::(&load_wasm("self_destruct.wat")) - .unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // Instantiate the BOB contract. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - 100_000, - code_hash.into(), - vec![], - )); - - // Check that the BOB contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(BOB), - Some(ContractInfo::Alive(_)) - ); - - // Call BOB without input data which triggers termination. - assert_matches!( - Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 100_000, - vec![], - ), - Ok(()) - ); - - // Check that account is gone - assert!(ContractInfoOf::::get(BOB).is_none()); - - // check that the beneficiary (django) got remaining balance - assert_eq!(Balances::free_balance(DJANGO), 100_000); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("self_destruct.wat")).unwrap(); + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Instantiate the BOB contract. + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100_000, + 100_000, + code_hash.into(), + vec![], + )); + + // Check that the BOB contract has been instantiated. + assert_matches!( + ContractInfoOf::::get(BOB), + Some(ContractInfo::Alive(_)) + ); + + // Call BOB without input data which triggers termination. + assert_matches!( + Contracts::call(Origin::signed(ALICE), BOB, 0, 100_000, vec![],), + Ok(()) + ); + + // Check that account is gone + assert!(ContractInfoOf::::get(BOB).is_none()); + + // check that the beneficiary (django) got remaining balance + assert_eq!(Balances::free_balance(DJANGO), 100_000); + }); } // This tests that one contract cannot prevent another from self-destructing by sending it // additional funds after it has been drained. #[test] fn destroy_contract_and_transfer_funds() { - let (callee_wasm, callee_code_hash) = - compile_module::(&load_wasm("self_destruct.wat")).unwrap(); - let (caller_wasm, caller_code_hash) = - compile_module::(&load_wasm("destroy_and_transfer.wat")).unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - // Create - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, callee_wasm)); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, caller_wasm)); - - // This deploys the BOB contract, which in turn deploys the CHARLIE contract during - // construction. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 200_000, - 100_000, - caller_code_hash.into(), - callee_code_hash.as_ref().to_vec(), - )); - - // Check that the CHARLIE contract has been instantiated. - assert_matches!( - ContractInfoOf::::get(CHARLIE), - Some(ContractInfo::Alive(_)) - ); - - // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 100_000, - CHARLIE.encode(), - )); - - // Check that CHARLIE has moved on to the great beyond (ie. died). - assert!(ContractInfoOf::::get(CHARLIE).is_none()); - }); + let (callee_wasm, callee_code_hash) = + compile_module::(&load_wasm("self_destruct.wat")).unwrap(); + let (caller_wasm, caller_code_hash) = + compile_module::(&load_wasm("destroy_and_transfer.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + // Create + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + callee_wasm + )); + assert_ok!(Contracts::put_code( + Origin::signed(ALICE), + 100_000, + caller_wasm + )); + + // This deploys the BOB contract, which in turn deploys the CHARLIE contract during + // construction. + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 200_000, + 100_000, + caller_code_hash.into(), + callee_code_hash.as_ref().to_vec(), + )); + + // Check that the CHARLIE contract has been instantiated. + assert_matches!( + ContractInfoOf::::get(CHARLIE), + Some(ContractInfo::Alive(_)) + ); + + // Call BOB, which calls CHARLIE, forcing CHARLIE to self-destruct. + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + CHARLIE.encode(), + )); + + // Check that CHARLIE has moved on to the great beyond (ie. died). + assert!(ContractInfoOf::::get(CHARLIE).is_none()); + }); } #[test] fn cannot_self_destruct_in_constructor() { - let (wasm, code_hash) = - compile_module::(&load_wasm("self_destructing_constructor.wat")).unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // Fail to instantiate the BOB because the call that is issued in the deploy - // function exhausts all balances which puts it below the existential deposit. - assert_err!( - Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - 100_000, - code_hash.into(), - vec![], - ), - "contract trapped during execution" - ); - }); + let (wasm, code_hash) = + compile_module::(&load_wasm("self_destructing_constructor.wat")).unwrap(); + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Fail to instantiate the BOB because the call that is issued in the deploy + // function exhausts all balances which puts it below the existential deposit. + assert_err!( + Contracts::instantiate( + Origin::signed(ALICE), + 100_000, + 100_000, + code_hash.into(), + vec![], + ), + "contract trapped during execution" + ); + }); } #[test] fn check_block_gas_limit_works() { - ExtBuilder::default().block_gas_limit(50).build().execute_with(|| { - let info = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: true }; - let check = CheckBlockGasLimit::(Default::default()); - let call: Call = crate::Call::put_code(1000, vec![]).into(); - - assert_eq!( - check.validate(&0, &call, &info, 0), InvalidTransaction::ExhaustsResources.into(), - ); - - let call: Call = crate::Call::update_schedule(Default::default()).into(); - assert_eq!(check.validate(&0, &call, &info, 0), Ok(Default::default())); - }); + ExtBuilder::default() + .block_gas_limit(50) + .build() + .execute_with(|| { + let info = DispatchInfo { + weight: 100, + class: DispatchClass::Normal, + pays_fee: true, + }; + let check = CheckBlockGasLimit::(Default::default()); + let call: Call = crate::Call::put_code(1000, vec![]).into(); + + assert_eq!( + check.validate(&0, &call, &info, 0), + InvalidTransaction::ExhaustsResources.into(), + ); + + let call: Call = crate::Call::update_schedule(Default::default()).into(); + assert_eq!(check.validate(&0, &call, &info, 0), Ok(Default::default())); + }); } #[test] fn get_runtime_storage() { - let (wasm, code_hash) = compile_module::(&load_wasm("get_runtime_storage.wat")) - .unwrap(); - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - - frame_support::storage::unhashed::put_raw( - &[1, 2, 3, 4], - 0x14144020u32.to_le_bytes().to_vec().as_ref() - ); - - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100, - 100_000, - code_hash.into(), - vec![], - )); - assert_ok!(Contracts::call( - Origin::signed(ALICE), - BOB, - 0, - 100_000, - vec![], - )); - }); + let (wasm, code_hash) = compile_module::(&load_wasm("get_runtime_storage.wat")).unwrap(); + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + + frame_support::storage::unhashed::put_raw( + &[1, 2, 3, 4], + 0x14144020u32.to_le_bytes().to_vec().as_ref(), + ); + + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100, + 100_000, + code_hash.into(), + vec![], + )); + assert_ok!(Contracts::call( + Origin::signed(ALICE), + BOB, + 0, + 100_000, + vec![], + )); + }); } #[test] fn crypto_hashes() { - let (wasm, code_hash) = compile_module::(&load_wasm("crypto_hashes.wat")).unwrap(); - - ExtBuilder::default().existential_deposit(50).build().execute_with(|| { - Balances::deposit_creating(&ALICE, 1_000_000); - assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); - - // Instantiate the CRYPTO_HASHES contract. - assert_ok!(Contracts::instantiate( - Origin::signed(ALICE), - 100_000, - 100_000, - code_hash.into(), - vec![], - )); - // Perform the call. - let input = b"_DEAD_BEEF"; - use sp_io::hashing::*; - // Wraps a hash function into a more dynamic form usable for testing. - macro_rules! dyn_hash_fn { - ($name:ident) => { - Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) - }; - } - // All hash functions and their associated output byte lengths. - let test_cases: &[(Box Box<[u8]>>, usize)] = &[ - (dyn_hash_fn!(sha2_256), 32), - (dyn_hash_fn!(keccak_256), 32), - (dyn_hash_fn!(blake2_256), 32), - (dyn_hash_fn!(blake2_128), 16), - ]; - // Test the given hash functions for the input: "_DEAD_BEEF" - for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { - // We offset data in the contract tables by 1. - let mut params = vec![(n + 1) as u8]; - params.extend_from_slice(input); - let result = >::bare_call( - ALICE, - BOB, - 0, - 100_000, - params, - ).unwrap(); - assert_eq!(result.status, 0); - let expected = hash_fn(input.as_ref()); - assert_eq!(&result.data[..*expected_size], &*expected); - } - }) + let (wasm, code_hash) = compile_module::(&load_wasm("crypto_hashes.wat")).unwrap(); + + ExtBuilder::default() + .existential_deposit(50) + .build() + .execute_with(|| { + Balances::deposit_creating(&ALICE, 1_000_000); + assert_ok!(Contracts::put_code(Origin::signed(ALICE), 100_000, wasm)); + + // Instantiate the CRYPTO_HASHES contract. + assert_ok!(Contracts::instantiate( + Origin::signed(ALICE), + 100_000, + 100_000, + code_hash.into(), + vec![], + )); + // Perform the call. + let input = b"_DEAD_BEEF"; + use sp_io::hashing::*; + // Wraps a hash function into a more dynamic form usable for testing. + macro_rules! dyn_hash_fn { + ($name:ident) => { + Box::new(|input| $name(input).as_ref().to_vec().into_boxed_slice()) + }; + } + // All hash functions and their associated output byte lengths. + let test_cases: &[(Box Box<[u8]>>, usize)] = &[ + (dyn_hash_fn!(sha2_256), 32), + (dyn_hash_fn!(keccak_256), 32), + (dyn_hash_fn!(blake2_256), 32), + (dyn_hash_fn!(blake2_128), 16), + ]; + // Test the given hash functions for the input: "_DEAD_BEEF" + for (n, (hash_fn, expected_size)) in test_cases.iter().enumerate() { + // We offset data in the contract tables by 1. + let mut params = vec![(n + 1) as u8]; + params.extend_from_slice(input); + let result = >::bare_call(ALICE, BOB, 0, 100_000, params).unwrap(); + assert_eq!(result.status, 0); + let expected = hash_fn(input.as_ref()); + assert_eq!(&result.data[..*expected_size], &*expected); + } + }) } fn load_wasm(file_name: &str) -> String { - let path = ["tests/", file_name].concat(); - std::fs::read_to_string(&path).expect(&format!("Unable to read {} file", path)) + let path = ["tests/", file_name].concat(); + std::fs::read_to_string(&path).expect(&format!("Unable to read {} file", path)) } diff --git a/frame/contracts/src/wasm/code_cache.rs b/frame/contracts/src/wasm/code_cache.rs index cb942a2589..fdeecb5f85 100644 --- a/frame/contracts/src/wasm/code_cache.rs +++ b/frame/contracts/src/wasm/code_cache.rs @@ -29,9 +29,9 @@ use crate::gas::{Gas, GasMeter, Token}; use crate::wasm::{prepare, runtime::Env, PrefabWasmModule}; use crate::{CodeHash, CodeStorage, PristineCode, Schedule, Trait}; -use sp_std::prelude::*; -use sp_runtime::traits::{Hash, Bounded}; use frame_support::StorageMap; +use sp_runtime::traits::{Bounded, Hash}; +use sp_std::prelude::*; /// Gas metering token that used for charging storing code into the code storage. /// @@ -41,14 +41,14 @@ use frame_support::StorageMap; pub struct PutCodeToken(u32); impl Token for PutCodeToken { - type Metadata = Schedule; + type Metadata = Schedule; - fn calculate_amount(&self, metadata: &Schedule) -> Gas { - metadata - .put_code_per_byte_cost - .checked_mul(self.0.into()) - .unwrap_or_else(|| Bounded::max_value()) - } + fn calculate_amount(&self, metadata: &Schedule) -> Gas { + metadata + .put_code_per_byte_cost + .checked_mul(self.0.into()) + .unwrap_or_else(|| Bounded::max_value()) + } } /// Put code in the storage. The hash of code is used as a key and is returned @@ -56,26 +56,26 @@ impl Token for PutCodeToken { /// /// This function instruments the given code and caches it in the storage. pub fn save( - original_code: Vec, - gas_meter: &mut GasMeter, - schedule: &Schedule, + original_code: Vec, + gas_meter: &mut GasMeter, + schedule: &Schedule, ) -> Result, &'static str> { - // The first time instrumentation is on the user. However, consequent reinstrumentation - // due to the schedule changes is on governance system. - if gas_meter - .charge(schedule, PutCodeToken(original_code.len() as u32)) - .is_out_of_gas() - { - return Err("there is not enough gas for storing the code"); - } + // The first time instrumentation is on the user. However, consequent reinstrumentation + // due to the schedule changes is on governance system. + if gas_meter + .charge(schedule, PutCodeToken(original_code.len() as u32)) + .is_out_of_gas() + { + return Err("there is not enough gas for storing the code"); + } - let prefab_module = prepare::prepare_contract::(&original_code, schedule)?; - let code_hash = T::Hashing::hash(&original_code); + let prefab_module = prepare::prepare_contract::(&original_code, schedule)?; + let code_hash = T::Hashing::hash(&original_code); - >::insert(code_hash, prefab_module); - >::insert(code_hash, original_code); + >::insert(code_hash, prefab_module); + >::insert(code_hash, original_code); - Ok(code_hash) + Ok(code_hash) } /// Load code with the given code hash. @@ -84,21 +84,20 @@ pub fn save( /// the current one given as an argument, then this function will perform /// re-instrumentation and update the cache in the storage. pub fn load( - code_hash: &CodeHash, - schedule: &Schedule, + code_hash: &CodeHash, + schedule: &Schedule, ) -> Result { - let mut prefab_module = - >::get(code_hash).ok_or_else(|| "code is not found")?; + let mut prefab_module = >::get(code_hash).ok_or_else(|| "code is not found")?; - if prefab_module.schedule_version < schedule.version { - // The current schedule version is greater than the version of the one cached - // in the storage. - // - // We need to re-instrument the code with the latest schedule here. - let original_code = - >::get(code_hash).ok_or_else(|| "pristine code is not found")?; - prefab_module = prepare::prepare_contract::(&original_code, schedule)?; - >::insert(&code_hash, &prefab_module); - } - Ok(prefab_module) + if prefab_module.schedule_version < schedule.version { + // The current schedule version is greater than the version of the one cached + // in the storage. + // + // We need to re-instrument the code with the latest schedule here. + let original_code = + >::get(code_hash).ok_or_else(|| "pristine code is not found")?; + prefab_module = prepare::prepare_contract::(&original_code, schedule)?; + >::insert(&code_hash, &prefab_module); + } + Ok(prefab_module) } diff --git a/frame/contracts/src/wasm/env_def/macros.rs b/frame/contracts/src/wasm/env_def/macros.rs index 335d35f1e7..c2b3f41b34 100644 --- a/frame/contracts/src/wasm/env_def/macros.rs +++ b/frame/contracts/src/wasm/env_def/macros.rs @@ -96,9 +96,9 @@ macro_rules! unmarshall_then_body { #[inline(always)] pub fn constrain_closure(f: F) -> F where - F: FnOnce() -> Result, + F: FnOnce() -> Result, { - f + f } #[macro_export] @@ -193,131 +193,139 @@ macro_rules! define_env { #[cfg(test)] mod tests { - use parity_wasm::elements::FunctionType; - use parity_wasm::elements::ValueType; - use sp_runtime::traits::Zero; - use sp_sandbox::{ReturnValue, Value}; - use crate::wasm::tests::MockExt; - use crate::wasm::Runtime; - use crate::exec::Ext; - use crate::gas::Gas; - - #[test] - fn macro_unmarshall_then_body_then_marshall_value_or_trap() { - fn test_value( - _ctx: &mut u32, - args: &[sp_sandbox::Value], - ) -> Result { - let mut args = args.iter(); - unmarshall_then_body_then_marshall!( - args, - _ctx, - (a: u32, b: u32) -> u32 => { - if b == 0 { - Err(sp_sandbox::HostError) - } else { - Ok(a / b) - } - } - ) - } - - let ctx = &mut 0; - assert_eq!( - test_value(ctx, &[Value::I32(15), Value::I32(3)]).unwrap(), - ReturnValue::Value(Value::I32(5)), - ); - assert!(test_value(ctx, &[Value::I32(15), Value::I32(0)]).is_err()); - } - - #[test] - fn macro_unmarshall_then_body_then_marshall_unit() { - fn test_unit( - ctx: &mut u32, - args: &[sp_sandbox::Value], - ) -> Result { - let mut args = args.iter(); - unmarshall_then_body_then_marshall!( - args, - ctx, - (a: u32, b: u32) => { - *ctx = a + b; - Ok(()) - } - ) - } - - let ctx = &mut 0; - let result = test_unit(ctx, &[Value::I32(2), Value::I32(3)]).unwrap(); - assert_eq!(result, ReturnValue::Unit); - assert_eq!(*ctx, 5); - } - - #[test] - fn macro_define_func() { - define_func!( ext_gas (_ctx, amount: u32) => { - let amount = Gas::from(amount); - if !amount.is_zero() { - Ok(()) - } else { - Err(sp_sandbox::HostError) - } - }); - let _f: fn(&mut Runtime, &[sp_sandbox::Value]) - -> Result = ext_gas::; - } - - #[test] - fn macro_gen_signature() { - assert_eq!( - gen_signature!((i32)), - FunctionType::new(vec![ValueType::I32], None), - ); - - assert_eq!( - gen_signature!( (i32, u32) -> u32 ), - FunctionType::new(vec![ValueType::I32, ValueType::I32], Some(ValueType::I32)), - ); - } - - #[test] - fn macro_unmarshall_then_body() { - let args = vec![Value::I32(5), Value::I32(3)]; - let mut args = args.iter(); - - let ctx: &mut u32 = &mut 0; - - let r = unmarshall_then_body!( - { - *ctx = a + b; - a * b - }, - ctx, - args, - a: u32, - b: u32 - ); - - assert_eq!(*ctx, 8); - assert_eq!(r, 15); - } - - #[test] - fn macro_define_env() { - use crate::wasm::env_def::ImportSatisfyCheck; - - define_env!(Env, , - ext_gas( _ctx, amount: u32 ) => { - let amount = Gas::from(amount); - if !amount.is_zero() { - Ok(()) - } else { - Err(sp_sandbox::HostError) - } - }, - ); - - assert!(Env::can_satisfy(b"ext_gas", &FunctionType::new(vec![ValueType::I32], None))); - assert!(!Env::can_satisfy(b"not_exists", &FunctionType::new(vec![], None))); - } + use crate::exec::Ext; + use crate::gas::Gas; + use crate::wasm::tests::MockExt; + use crate::wasm::Runtime; + use parity_wasm::elements::FunctionType; + use parity_wasm::elements::ValueType; + use sp_runtime::traits::Zero; + use sp_sandbox::{ReturnValue, Value}; + + #[test] + fn macro_unmarshall_then_body_then_marshall_value_or_trap() { + fn test_value( + _ctx: &mut u32, + args: &[sp_sandbox::Value], + ) -> Result { + let mut args = args.iter(); + unmarshall_then_body_then_marshall!( + args, + _ctx, + (a: u32, b: u32) -> u32 => { + if b == 0 { + Err(sp_sandbox::HostError) + } else { + Ok(a / b) + } + } + ) + } + + let ctx = &mut 0; + assert_eq!( + test_value(ctx, &[Value::I32(15), Value::I32(3)]).unwrap(), + ReturnValue::Value(Value::I32(5)), + ); + assert!(test_value(ctx, &[Value::I32(15), Value::I32(0)]).is_err()); + } + + #[test] + fn macro_unmarshall_then_body_then_marshall_unit() { + fn test_unit( + ctx: &mut u32, + args: &[sp_sandbox::Value], + ) -> Result { + let mut args = args.iter(); + unmarshall_then_body_then_marshall!( + args, + ctx, + (a: u32, b: u32) => { + *ctx = a + b; + Ok(()) + } + ) + } + + let ctx = &mut 0; + let result = test_unit(ctx, &[Value::I32(2), Value::I32(3)]).unwrap(); + assert_eq!(result, ReturnValue::Unit); + assert_eq!(*ctx, 5); + } + + #[test] + fn macro_define_func() { + define_func!( ext_gas (_ctx, amount: u32) => { + let amount = Gas::from(amount); + if !amount.is_zero() { + Ok(()) + } else { + Err(sp_sandbox::HostError) + } + }); + let _f: fn( + &mut Runtime, + &[sp_sandbox::Value], + ) -> Result = ext_gas::; + } + + #[test] + fn macro_gen_signature() { + assert_eq!( + gen_signature!((i32)), + FunctionType::new(vec![ValueType::I32], None), + ); + + assert_eq!( + gen_signature!( (i32, u32) -> u32 ), + FunctionType::new(vec![ValueType::I32, ValueType::I32], Some(ValueType::I32)), + ); + } + + #[test] + fn macro_unmarshall_then_body() { + let args = vec![Value::I32(5), Value::I32(3)]; + let mut args = args.iter(); + + let ctx: &mut u32 = &mut 0; + + let r = unmarshall_then_body!( + { + *ctx = a + b; + a * b + }, + ctx, + args, + a: u32, + b: u32 + ); + + assert_eq!(*ctx, 8); + assert_eq!(r, 15); + } + + #[test] + fn macro_define_env() { + use crate::wasm::env_def::ImportSatisfyCheck; + + define_env!(Env, , + ext_gas( _ctx, amount: u32 ) => { + let amount = Gas::from(amount); + if !amount.is_zero() { + Ok(()) + } else { + Err(sp_sandbox::HostError) + } + }, + ); + + assert!(Env::can_satisfy( + b"ext_gas", + &FunctionType::new(vec![ValueType::I32], None) + )); + assert!(!Env::can_satisfy( + b"not_exists", + &FunctionType::new(vec![], None) + )); + } } diff --git a/frame/contracts/src/wasm/env_def/mod.rs b/frame/contracts/src/wasm/env_def/mod.rs index 7b67f74ec9..9cc9174478 100644 --- a/frame/contracts/src/wasm/env_def/mod.rs +++ b/frame/contracts/src/wasm/env_def/mod.rs @@ -17,70 +17,69 @@ use super::Runtime; use crate::exec::Ext; -use sp_sandbox::Value; use parity_wasm::elements::{FunctionType, ValueType}; +use sp_sandbox::Value; #[macro_use] pub(crate) mod macros; pub trait ConvertibleToWasm: Sized { - const VALUE_TYPE: ValueType; - type NativeType; - fn to_typed_value(self) -> Value; - fn from_typed_value(_: Value) -> Option; + const VALUE_TYPE: ValueType; + type NativeType; + fn to_typed_value(self) -> Value; + fn from_typed_value(_: Value) -> Option; } impl ConvertibleToWasm for i32 { - type NativeType = i32; - const VALUE_TYPE: ValueType = ValueType::I32; - fn to_typed_value(self) -> Value { - Value::I32(self) - } - fn from_typed_value(v: Value) -> Option { - v.as_i32() - } + type NativeType = i32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_typed_value(self) -> Value { + Value::I32(self) + } + fn from_typed_value(v: Value) -> Option { + v.as_i32() + } } impl ConvertibleToWasm for u32 { - type NativeType = u32; - const VALUE_TYPE: ValueType = ValueType::I32; - fn to_typed_value(self) -> Value { - Value::I32(self as i32) - } - fn from_typed_value(v: Value) -> Option { - match v { - Value::I32(v) => Some(v as u32), - _ => None, - } - } + type NativeType = u32; + const VALUE_TYPE: ValueType = ValueType::I32; + fn to_typed_value(self) -> Value { + Value::I32(self as i32) + } + fn from_typed_value(v: Value) -> Option { + match v { + Value::I32(v) => Some(v as u32), + _ => None, + } + } } impl ConvertibleToWasm for u64 { - type NativeType = u64; - const VALUE_TYPE: ValueType = ValueType::I64; - fn to_typed_value(self) -> Value { - Value::I64(self as i64) - } - fn from_typed_value(v: Value) -> Option { - match v { - Value::I64(v) => Some(v as u64), - _ => None, - } - } + type NativeType = u64; + const VALUE_TYPE: ValueType = ValueType::I64; + fn to_typed_value(self) -> Value { + Value::I64(self as i64) + } + fn from_typed_value(v: Value) -> Option { + match v { + Value::I64(v) => Some(v as u64), + _ => None, + } + } } -pub(crate) type HostFunc = - fn( - &mut Runtime, - &[sp_sandbox::Value] - ) -> Result; +pub(crate) type HostFunc = fn( + &mut Runtime, + &[sp_sandbox::Value], +) -> Result; pub(crate) trait FunctionImplProvider { - fn impls)>(f: &mut F); + fn impls)>(f: &mut F); } /// This trait can be used to check whether the host environment can satisfy /// a requested function import. pub trait ImportSatisfyCheck { - /// Returns `true` if the host environment contains a function with - /// the specified name and its type matches to the given type, or `false` - /// otherwise. - fn can_satisfy(name: &[u8], func_type: &FunctionType) -> bool; + /// Returns `true` if the host environment contains a function with + /// the specified name and its type matches to the given type, or `false` + /// otherwise. + fn can_satisfy(name: &[u8], func_type: &FunctionType) -> bool; } diff --git a/frame/contracts/src/wasm/mod.rs b/frame/contracts/src/wasm/mod.rs index 8911fb72b6..eec4215c98 100644 --- a/frame/contracts/src/wasm/mod.rs +++ b/frame/contracts/src/wasm/mod.rs @@ -17,14 +17,14 @@ //! This module provides a means for executing contracts //! represented in wasm. -use crate::{CodeHash, Schedule, Trait}; -use crate::wasm::env_def::FunctionImplProvider; -use crate::exec::{Ext, ExecResult}; +use crate::exec::{ExecResult, Ext}; use crate::gas::GasMeter; +use crate::wasm::env_def::FunctionImplProvider; +use crate::{CodeHash, Schedule, Trait}; -use sp_std::prelude::*; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_sandbox; +use sp_std::prelude::*; #[macro_use] mod env_def; @@ -32,480 +32,476 @@ mod code_cache; mod prepare; mod runtime; -use self::runtime::{to_execution_result, Runtime}; use self::code_cache::load as load_code; +use self::runtime::{to_execution_result, Runtime}; pub use self::code_cache::save as save_code; /// A prepared wasm module ready for execution. #[derive(Clone, Encode, Decode)] pub struct PrefabWasmModule { - /// Version of the schedule with which the code was instrumented. - #[codec(compact)] - schedule_version: u32, - #[codec(compact)] - initial: u32, - #[codec(compact)] - maximum: u32, - /// This field is reserved for future evolution of format. - /// - /// Basically, for now this field will be serialized as `None`. In the future - /// we would be able to extend this structure with. - _reserved: Option<()>, - /// Code instrumented with the latest schedule. - code: Vec, + /// Version of the schedule with which the code was instrumented. + #[codec(compact)] + schedule_version: u32, + #[codec(compact)] + initial: u32, + #[codec(compact)] + maximum: u32, + /// This field is reserved for future evolution of format. + /// + /// Basically, for now this field will be serialized as `None`. In the future + /// we would be able to extend this structure with. + _reserved: Option<()>, + /// Code instrumented with the latest schedule. + code: Vec, } /// Wasm executable loaded by `WasmLoader` and executed by `WasmVm`. pub struct WasmExecutable { - entrypoint_name: &'static str, - prefab_module: PrefabWasmModule, + entrypoint_name: &'static str, + prefab_module: PrefabWasmModule, } /// Loader which fetches `WasmExecutable` from the code cache. pub struct WasmLoader<'a> { - schedule: &'a Schedule, + schedule: &'a Schedule, } impl<'a> WasmLoader<'a> { - pub fn new(schedule: &'a Schedule) -> Self { - WasmLoader { schedule } - } + pub fn new(schedule: &'a Schedule) -> Self { + WasmLoader { schedule } + } } impl<'a, T: Trait> crate::exec::Loader for WasmLoader<'a> { - type Executable = WasmExecutable; - - fn load_init(&self, code_hash: &CodeHash) -> Result { - let prefab_module = load_code::(code_hash, self.schedule)?; - Ok(WasmExecutable { - entrypoint_name: "deploy", - prefab_module, - }) - } - fn load_main(&self, code_hash: &CodeHash) -> Result { - let prefab_module = load_code::(code_hash, self.schedule)?; - Ok(WasmExecutable { - entrypoint_name: "call", - prefab_module, - }) - } + type Executable = WasmExecutable; + + fn load_init(&self, code_hash: &CodeHash) -> Result { + let prefab_module = load_code::(code_hash, self.schedule)?; + Ok(WasmExecutable { + entrypoint_name: "deploy", + prefab_module, + }) + } + fn load_main(&self, code_hash: &CodeHash) -> Result { + let prefab_module = load_code::(code_hash, self.schedule)?; + Ok(WasmExecutable { + entrypoint_name: "call", + prefab_module, + }) + } } /// Implementation of `Vm` that takes `WasmExecutable` and executes it. pub struct WasmVm<'a> { - schedule: &'a Schedule, + schedule: &'a Schedule, } impl<'a> WasmVm<'a> { - pub fn new(schedule: &'a Schedule) -> Self { - WasmVm { schedule } - } + pub fn new(schedule: &'a Schedule) -> Self { + WasmVm { schedule } + } } impl<'a, T: Trait> crate::exec::Vm for WasmVm<'a> { - type Executable = WasmExecutable; - - fn execute>( - &self, - exec: &WasmExecutable, - mut ext: E, - input_data: Vec, - gas_meter: &mut GasMeter, - ) -> ExecResult { - let memory = - sp_sandbox::Memory::new(exec.prefab_module.initial, Some(exec.prefab_module.maximum)) - .unwrap_or_else(|_| { - // unlike `.expect`, explicit panic preserves the source location. - // Needed as we can't use `RUST_BACKTRACE` in here. - panic!( + type Executable = WasmExecutable; + + fn execute>( + &self, + exec: &WasmExecutable, + mut ext: E, + input_data: Vec, + gas_meter: &mut GasMeter, + ) -> ExecResult { + let memory = + sp_sandbox::Memory::new(exec.prefab_module.initial, Some(exec.prefab_module.maximum)) + .unwrap_or_else(|_| { + // unlike `.expect`, explicit panic preserves the source location. + // Needed as we can't use `RUST_BACKTRACE` in here. + panic!( "exec.prefab_module.initial can't be greater than exec.prefab_module.maximum; thus Memory::new must not fail; qed" ) - }); - - let mut imports = sp_sandbox::EnvironmentDefinitionBuilder::new(); - imports.add_memory("env", "memory", memory.clone()); - runtime::Env::impls(&mut |name, func_ptr| { - imports.add_host_func("env", name, func_ptr); - }); - - let mut runtime = Runtime::new( - &mut ext, - input_data, - &self.schedule, - memory, - gas_meter, - ); - - // Instantiate the instance from the instrumented module code and invoke the contract - // entrypoint. - let result = sp_sandbox::Instance::new(&exec.prefab_module.code, &imports, &mut runtime) - .and_then(|mut instance| instance.invoke(exec.entrypoint_name, &[], &mut runtime)); - to_execution_result(runtime, result) - } + }); + + let mut imports = sp_sandbox::EnvironmentDefinitionBuilder::new(); + imports.add_memory("env", "memory", memory.clone()); + runtime::Env::impls(&mut |name, func_ptr| { + imports.add_host_func("env", name, func_ptr); + }); + + let mut runtime = Runtime::new(&mut ext, input_data, &self.schedule, memory, gas_meter); + + // Instantiate the instance from the instrumented module code and invoke the contract + // entrypoint. + let result = sp_sandbox::Instance::new(&exec.prefab_module.code, &imports, &mut runtime) + .and_then(|mut instance| instance.invoke(exec.entrypoint_name, &[], &mut runtime)); + to_execution_result(runtime, result) + } } #[cfg(test)] mod tests { - use super::*; - use std::collections::HashMap; - use std::cell::RefCell; - use sp_core::H256; - use crate::exec::{Ext, StorageKey, ExecError, ExecReturnValue, STATUS_SUCCESS}; - use crate::gas::{Gas, GasMeter}; - use crate::tests::{Test, Call}; - use crate::wasm::prepare::prepare_contract; - use crate::CodeHash; - use wabt; - use hex_literal::hex; - use assert_matches::assert_matches; - use sp_runtime::DispatchError; - - #[derive(Debug, PartialEq, Eq)] - struct DispatchEntry(Call); - - #[derive(Debug, PartialEq, Eq)] - struct RestoreEntry { - dest: u64, - code_hash: H256, - rent_allowance: u64, - delta: Vec, - } - - #[derive(Debug, PartialEq, Eq)] - struct InstantiateEntry { - code_hash: H256, - endowment: u64, - data: Vec, - gas_left: u64, - } - - #[derive(Debug, PartialEq, Eq)] - struct TerminationEntry { - beneficiary: u64, - gas_left: u64, - } - - #[derive(Debug, PartialEq, Eq)] - struct TransferEntry { - to: u64, - value: u64, - data: Vec, - gas_left: u64, - } - - #[derive(Default)] - pub struct MockExt { - storage: HashMap>, - rent_allowance: u64, - instantiates: Vec, - terminations: Vec, - transfers: Vec, - dispatches: Vec, - restores: Vec, - // (topics, data) - events: Vec<(Vec, Vec)>, - next_account_id: u64, - - /// Runtime storage keys works the following way. - /// - /// - If the test code requests a value and it doesn't exist in this storage map then a - /// panic happens. - /// - If the value does exist it is returned and then removed from the map. So a panic - /// happens if the same value is requested for the second time. - /// - /// This behavior is used to prevent mixing up an access to unexpected location and empty - /// cell. - runtime_storage_keys: RefCell, Option>>>, - } - - impl Ext for MockExt { - type T = Test; - - fn get_storage(&self, key: &StorageKey) -> Option> { - self.storage.get(key).cloned() - } - fn set_storage(&mut self, key: StorageKey, value: Option>) - -> Result<(), &'static str> - { - *self.storage.entry(key).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); - Ok(()) - } - fn instantiate( - &mut self, - code_hash: &CodeHash, - endowment: u64, - gas_meter: &mut GasMeter, - data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { - self.instantiates.push(InstantiateEntry { - code_hash: code_hash.clone(), - endowment, - data: data.to_vec(), - gas_left: gas_meter.gas_left(), - }); - let address = self.next_account_id; - self.next_account_id += 1; - - Ok(( - address, - ExecReturnValue { - status: STATUS_SUCCESS, - data: Vec::new(), - }, - )) - } - fn transfer( - &mut self, - to: &u64, - value: u64, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError> { - self.transfers.push(TransferEntry { - to: *to, - value, - data: Vec::new(), - gas_left: gas_meter.gas_left(), - }); - Ok(()) - } - fn call( - &mut self, - to: &u64, - value: u64, - gas_meter: &mut GasMeter, - data: Vec, - ) -> ExecResult { - self.transfers.push(TransferEntry { - to: *to, - value, - data: data, - gas_left: gas_meter.gas_left(), - }); - // Assume for now that it was just a plain transfer. - // TODO: Add tests for different call outcomes. - Ok(ExecReturnValue { status: STATUS_SUCCESS, data: Vec::new() }) - } - fn terminate( - &mut self, - beneficiary: &u64, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError> { - self.terminations.push(TerminationEntry { - beneficiary: *beneficiary, - gas_left: gas_meter.gas_left(), - }); - Ok(()) - } - fn note_dispatch_call(&mut self, call: Call) { - self.dispatches.push(DispatchEntry(call)); - } - fn note_restore_to( - &mut self, - dest: u64, - code_hash: H256, - rent_allowance: u64, - delta: Vec, - ) { - self.restores.push(RestoreEntry { - dest, - code_hash, - rent_allowance, - delta, - }); - } - fn caller(&self) -> &u64 { - &42 - } - fn address(&self) -> &u64 { - &69 - } - fn balance(&self) -> u64 { - 228 - } - fn value_transferred(&self) -> u64 { - 1337 - } - - fn now(&self) -> &u64 { - &1111 - } - - fn minimum_balance(&self) -> u64 { - 666 - } - - fn tombstone_deposit(&self) -> u64 { - 16 - } - - fn random(&self, subject: &[u8]) -> H256 { - H256::from_slice(subject) - } - - fn deposit_event(&mut self, topics: Vec, data: Vec) { - self.events.push((topics, data)) - } - - fn set_rent_allowance(&mut self, rent_allowance: u64) { - self.rent_allowance = rent_allowance; - } - - fn rent_allowance(&self) -> u64 { - self.rent_allowance - } - - fn block_number(&self) -> u64 { 121 } - - fn max_value_size(&self) -> u32 { 16_384 } - - fn get_runtime_storage(&self, key: &[u8]) -> Option> { - let opt_value = self.runtime_storage_keys - .borrow_mut() - .remove(key); - opt_value.unwrap_or_else(|| - panic!( - "{:?} doesn't exist. values that do exist {:?}", - key, - self.runtime_storage_keys - ) - ) - } - } - - impl Ext for &mut MockExt { - type T = ::T; - - fn get_storage(&self, key: &[u8; 32]) -> Option> { - (**self).get_storage(key) - } - fn set_storage(&mut self, key: [u8; 32], value: Option>) - -> Result<(), &'static str> - { - (**self).set_storage(key, value) - } - fn instantiate( - &mut self, - code: &CodeHash, - value: u64, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> Result<(u64, ExecReturnValue), ExecError> { - (**self).instantiate(code, value, gas_meter, input_data) - } - fn transfer( - &mut self, - to: &u64, - value: u64, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError> { - (**self).transfer(to, value, gas_meter) - } - fn terminate( - &mut self, - beneficiary: &u64, - gas_meter: &mut GasMeter, - ) -> Result<(), DispatchError> { - (**self).terminate(beneficiary, gas_meter) - } - fn call( - &mut self, - to: &u64, - value: u64, - gas_meter: &mut GasMeter, - input_data: Vec, - ) -> ExecResult { - (**self).call(to, value, gas_meter, input_data) - } - fn note_dispatch_call(&mut self, call: Call) { - (**self).note_dispatch_call(call) - } - fn note_restore_to( - &mut self, - dest: u64, - code_hash: H256, - rent_allowance: u64, - delta: Vec, - ) { - (**self).note_restore_to( - dest, - code_hash, - rent_allowance, - delta, - ) - } - fn caller(&self) -> &u64 { - (**self).caller() - } - fn address(&self) -> &u64 { - (**self).address() - } - fn balance(&self) -> u64 { - (**self).balance() - } - fn value_transferred(&self) -> u64 { - (**self).value_transferred() - } - fn now(&self) -> &u64 { - (**self).now() - } - fn minimum_balance(&self) -> u64 { - (**self).minimum_balance() - } - fn tombstone_deposit(&self) -> u64 { - (**self).tombstone_deposit() - } - fn random(&self, subject: &[u8]) -> H256 { - (**self).random(subject) - } - fn deposit_event(&mut self, topics: Vec, data: Vec) { - (**self).deposit_event(topics, data) - } - fn set_rent_allowance(&mut self, rent_allowance: u64) { - (**self).set_rent_allowance(rent_allowance) - } - fn rent_allowance(&self) -> u64 { - (**self).rent_allowance() - } - fn block_number(&self) -> u64 { - (**self).block_number() - } - fn max_value_size(&self) -> u32 { - (**self).max_value_size() - } - fn get_runtime_storage(&self, key: &[u8]) -> Option> { - (**self).get_runtime_storage(key) - } - } - - fn execute( - wat: &str, - input_data: Vec, - ext: E, - gas_meter: &mut GasMeter, - ) -> ExecResult { - use crate::exec::Vm; - - let wasm = wabt::wat2wasm(wat).unwrap(); - let schedule = crate::Schedule::default(); - let prefab_module = - prepare_contract::(&wasm, &schedule).unwrap(); - - let exec = WasmExecutable { - // Use a "call" convention. - entrypoint_name: "call", - prefab_module, - }; - - let cfg = Default::default(); - let vm = WasmVm::new(&cfg); - - vm.execute(&exec, ext, input_data, gas_meter) - } - - const CODE_TRANSFER: &str = r#" + use super::*; + use crate::exec::{ExecError, ExecReturnValue, Ext, StorageKey, STATUS_SUCCESS}; + use crate::gas::{Gas, GasMeter}; + use crate::tests::{Call, Test}; + use crate::wasm::prepare::prepare_contract; + use crate::CodeHash; + use assert_matches::assert_matches; + use hex_literal::hex; + use sp_core::H256; + use sp_runtime::DispatchError; + use std::cell::RefCell; + use std::collections::HashMap; + use wabt; + + #[derive(Debug, PartialEq, Eq)] + struct DispatchEntry(Call); + + #[derive(Debug, PartialEq, Eq)] + struct RestoreEntry { + dest: u64, + code_hash: H256, + rent_allowance: u64, + delta: Vec, + } + + #[derive(Debug, PartialEq, Eq)] + struct InstantiateEntry { + code_hash: H256, + endowment: u64, + data: Vec, + gas_left: u64, + } + + #[derive(Debug, PartialEq, Eq)] + struct TerminationEntry { + beneficiary: u64, + gas_left: u64, + } + + #[derive(Debug, PartialEq, Eq)] + struct TransferEntry { + to: u64, + value: u64, + data: Vec, + gas_left: u64, + } + + #[derive(Default)] + pub struct MockExt { + storage: HashMap>, + rent_allowance: u64, + instantiates: Vec, + terminations: Vec, + transfers: Vec, + dispatches: Vec, + restores: Vec, + // (topics, data) + events: Vec<(Vec, Vec)>, + next_account_id: u64, + + /// Runtime storage keys works the following way. + /// + /// - If the test code requests a value and it doesn't exist in this storage map then a + /// panic happens. + /// - If the value does exist it is returned and then removed from the map. So a panic + /// happens if the same value is requested for the second time. + /// + /// This behavior is used to prevent mixing up an access to unexpected location and empty + /// cell. + runtime_storage_keys: RefCell, Option>>>, + } + + impl Ext for MockExt { + type T = Test; + + fn get_storage(&self, key: &StorageKey) -> Option> { + self.storage.get(key).cloned() + } + fn set_storage( + &mut self, + key: StorageKey, + value: Option>, + ) -> Result<(), &'static str> { + *self.storage.entry(key).or_insert(Vec::new()) = value.unwrap_or(Vec::new()); + Ok(()) + } + fn instantiate( + &mut self, + code_hash: &CodeHash, + endowment: u64, + gas_meter: &mut GasMeter, + data: Vec, + ) -> Result<(u64, ExecReturnValue), ExecError> { + self.instantiates.push(InstantiateEntry { + code_hash: code_hash.clone(), + endowment, + data: data.to_vec(), + gas_left: gas_meter.gas_left(), + }); + let address = self.next_account_id; + self.next_account_id += 1; + + Ok(( + address, + ExecReturnValue { + status: STATUS_SUCCESS, + data: Vec::new(), + }, + )) + } + fn transfer( + &mut self, + to: &u64, + value: u64, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + self.transfers.push(TransferEntry { + to: *to, + value, + data: Vec::new(), + gas_left: gas_meter.gas_left(), + }); + Ok(()) + } + fn call( + &mut self, + to: &u64, + value: u64, + gas_meter: &mut GasMeter, + data: Vec, + ) -> ExecResult { + self.transfers.push(TransferEntry { + to: *to, + value, + data: data, + gas_left: gas_meter.gas_left(), + }); + // Assume for now that it was just a plain transfer. + // TODO: Add tests for different call outcomes. + Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: Vec::new(), + }) + } + fn terminate( + &mut self, + beneficiary: &u64, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + self.terminations.push(TerminationEntry { + beneficiary: *beneficiary, + gas_left: gas_meter.gas_left(), + }); + Ok(()) + } + fn note_dispatch_call(&mut self, call: Call) { + self.dispatches.push(DispatchEntry(call)); + } + fn note_restore_to( + &mut self, + dest: u64, + code_hash: H256, + rent_allowance: u64, + delta: Vec, + ) { + self.restores.push(RestoreEntry { + dest, + code_hash, + rent_allowance, + delta, + }); + } + fn caller(&self) -> &u64 { + &42 + } + fn address(&self) -> &u64 { + &69 + } + fn balance(&self) -> u64 { + 228 + } + fn value_transferred(&self) -> u64 { + 1337 + } + + fn now(&self) -> &u64 { + &1111 + } + + fn minimum_balance(&self) -> u64 { + 666 + } + + fn tombstone_deposit(&self) -> u64 { + 16 + } + + fn random(&self, subject: &[u8]) -> H256 { + H256::from_slice(subject) + } + + fn deposit_event(&mut self, topics: Vec, data: Vec) { + self.events.push((topics, data)) + } + + fn set_rent_allowance(&mut self, rent_allowance: u64) { + self.rent_allowance = rent_allowance; + } + + fn rent_allowance(&self) -> u64 { + self.rent_allowance + } + + fn block_number(&self) -> u64 { + 121 + } + + fn max_value_size(&self) -> u32 { + 16_384 + } + + fn get_runtime_storage(&self, key: &[u8]) -> Option> { + let opt_value = self.runtime_storage_keys.borrow_mut().remove(key); + opt_value.unwrap_or_else(|| { + panic!( + "{:?} doesn't exist. values that do exist {:?}", + key, self.runtime_storage_keys + ) + }) + } + } + + impl Ext for &mut MockExt { + type T = ::T; + + fn get_storage(&self, key: &[u8; 32]) -> Option> { + (**self).get_storage(key) + } + fn set_storage( + &mut self, + key: [u8; 32], + value: Option>, + ) -> Result<(), &'static str> { + (**self).set_storage(key, value) + } + fn instantiate( + &mut self, + code: &CodeHash, + value: u64, + gas_meter: &mut GasMeter, + input_data: Vec, + ) -> Result<(u64, ExecReturnValue), ExecError> { + (**self).instantiate(code, value, gas_meter, input_data) + } + fn transfer( + &mut self, + to: &u64, + value: u64, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + (**self).transfer(to, value, gas_meter) + } + fn terminate( + &mut self, + beneficiary: &u64, + gas_meter: &mut GasMeter, + ) -> Result<(), DispatchError> { + (**self).terminate(beneficiary, gas_meter) + } + fn call( + &mut self, + to: &u64, + value: u64, + gas_meter: &mut GasMeter, + input_data: Vec, + ) -> ExecResult { + (**self).call(to, value, gas_meter, input_data) + } + fn note_dispatch_call(&mut self, call: Call) { + (**self).note_dispatch_call(call) + } + fn note_restore_to( + &mut self, + dest: u64, + code_hash: H256, + rent_allowance: u64, + delta: Vec, + ) { + (**self).note_restore_to(dest, code_hash, rent_allowance, delta) + } + fn caller(&self) -> &u64 { + (**self).caller() + } + fn address(&self) -> &u64 { + (**self).address() + } + fn balance(&self) -> u64 { + (**self).balance() + } + fn value_transferred(&self) -> u64 { + (**self).value_transferred() + } + fn now(&self) -> &u64 { + (**self).now() + } + fn minimum_balance(&self) -> u64 { + (**self).minimum_balance() + } + fn tombstone_deposit(&self) -> u64 { + (**self).tombstone_deposit() + } + fn random(&self, subject: &[u8]) -> H256 { + (**self).random(subject) + } + fn deposit_event(&mut self, topics: Vec, data: Vec) { + (**self).deposit_event(topics, data) + } + fn set_rent_allowance(&mut self, rent_allowance: u64) { + (**self).set_rent_allowance(rent_allowance) + } + fn rent_allowance(&self) -> u64 { + (**self).rent_allowance() + } + fn block_number(&self) -> u64 { + (**self).block_number() + } + fn max_value_size(&self) -> u32 { + (**self).max_value_size() + } + fn get_runtime_storage(&self, key: &[u8]) -> Option> { + (**self).get_runtime_storage(key) + } + } + + fn execute( + wat: &str, + input_data: Vec, + ext: E, + gas_meter: &mut GasMeter, + ) -> ExecResult { + use crate::exec::Vm; + + let wasm = wabt::wat2wasm(wat).unwrap(); + let schedule = crate::Schedule::default(); + let prefab_module = prepare_contract::(&wasm, &schedule).unwrap(); + + let exec = WasmExecutable { + // Use a "call" convention. + entrypoint_name: "call", + prefab_module, + }; + + let cfg = Default::default(); + let vm = WasmVm::new(&cfg); + + vm.execute(&exec, ext, input_data, gas_meter) + } + + const CODE_TRANSFER: &str = r#" (module ;; ext_transfer( ;; account_ptr: u32, @@ -537,28 +533,29 @@ mod tests { ) "#; - #[test] - fn contract_transfer() { - let mut mock_ext = MockExt::default(); - let _ = execute( - CODE_TRANSFER, - vec![], - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: 7, - value: 153, - data: Vec::new(), - gas_left: 49978, - }] - ); - } - - const CODE_CALL: &str = r#" + #[test] + fn contract_transfer() { + let mut mock_ext = MockExt::default(); + let _ = execute( + CODE_TRANSFER, + vec![], + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.transfers, + &[TransferEntry { + to: 7, + value: 153, + data: Vec::new(), + gas_left: 49978, + }] + ); + } + + const CODE_CALL: &str = r#" (module ;; ext_call( ;; callee_ptr: u32, @@ -597,28 +594,29 @@ mod tests { ) "#; - #[test] - fn contract_call() { - let mut mock_ext = MockExt::default(); - let _ = execute( - CODE_CALL, - vec![], - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: 9, - value: 6, - data: vec![1, 2, 3, 4], - gas_left: 49971, - }] - ); - } - - const CODE_INSTANTIATE: &str = r#" + #[test] + fn contract_call() { + let mut mock_ext = MockExt::default(); + let _ = execute( + CODE_CALL, + vec![], + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.transfers, + &[TransferEntry { + to: 9, + value: 6, + data: vec![1, 2, 3, 4], + gas_left: 49971, + }] + ); + } + + const CODE_INSTANTIATE: &str = r#" (module ;; ext_instantiate( ;; code_ptr: u32, @@ -659,28 +657,29 @@ mod tests { ) "#; - #[test] - fn contract_instantiate() { - let mut mock_ext = MockExt::default(); - let _ = execute( - CODE_INSTANTIATE, - vec![], - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!( - &mock_ext.instantiates, - &[InstantiateEntry { - code_hash: [0x11; 32].into(), - endowment: 3, - data: vec![1, 2, 3, 4], - gas_left: 49947, - }] - ); - } - - const CODE_TERMINATE: &str = r#" + #[test] + fn contract_instantiate() { + let mut mock_ext = MockExt::default(); + let _ = execute( + CODE_INSTANTIATE, + vec![], + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.instantiates, + &[InstantiateEntry { + code_hash: [0x11; 32].into(), + endowment: 3, + data: vec![1, 2, 3, 4], + gas_left: 49947, + }] + ); + } + + const CODE_TERMINATE: &str = r#" (module ;; ext_terminate( ;; beneficiary_ptr: u32, @@ -702,26 +701,27 @@ mod tests { ) "#; - #[test] - fn contract_terminate() { - let mut mock_ext = MockExt::default(); - execute( - CODE_TERMINATE, - vec![], - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!( - &mock_ext.terminations, - &[TerminationEntry { - beneficiary: 0x09, - gas_left: 49989, - }] - ); - } - - const CODE_TRANSFER_LIMITED_GAS: &str = r#" + #[test] + fn contract_terminate() { + let mut mock_ext = MockExt::default(); + execute( + CODE_TERMINATE, + vec![], + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.terminations, + &[TerminationEntry { + beneficiary: 0x09, + gas_left: 49989, + }] + ); + } + + const CODE_TRANSFER_LIMITED_GAS: &str = r#" (module ;; ext_call( ;; callee_ptr: u32, @@ -760,28 +760,29 @@ mod tests { ) "#; - #[test] - fn contract_call_limited_gas() { - let mut mock_ext = MockExt::default(); - let _ = execute( - &CODE_TRANSFER_LIMITED_GAS, - vec![], - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!( - &mock_ext.transfers, - &[TransferEntry { - to: 9, - value: 6, - data: vec![1, 2, 3, 4], - gas_left: 228, - }] - ); - } - - const CODE_GET_STORAGE: &str = r#" + #[test] + fn contract_call_limited_gas() { + let mut mock_ext = MockExt::default(); + let _ = execute( + &CODE_TRANSFER_LIMITED_GAS, + vec![], + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.transfers, + &[TransferEntry { + to: 9, + value: 6, + data: vec![1, 2, 3, 4], + gas_left: 228, + }] + ); + } + + const CODE_GET_STORAGE: &str = r#" (module (import "env" "ext_get_storage" (func $ext_get_storage (param i32) (result i32))) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -849,26 +850,31 @@ mod tests { ) "#; - #[test] - fn get_storage_puts_data_into_scratch_buf() { - let mut mock_ext = MockExt::default(); - mock_ext - .storage - .insert([0x11; 32], [0x22; 32].to_vec()); - - let output = execute( - CODE_GET_STORAGE, - vec![], - mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!(output, ExecReturnValue { status: STATUS_SUCCESS, data: [0x22; 32].to_vec() }); - } - - /// calls `ext_caller`, loads the address from the scratch buffer and - /// compares it with the constant 42. - const CODE_CALLER: &str = r#" + #[test] + fn get_storage_puts_data_into_scratch_buf() { + let mut mock_ext = MockExt::default(); + mock_ext.storage.insert([0x11; 32], [0x22; 32].to_vec()); + + let output = execute( + CODE_GET_STORAGE, + vec![], + mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + output, + ExecReturnValue { + status: STATUS_SUCCESS, + data: [0x22; 32].to_vec() + } + ); + } + + /// calls `ext_caller`, loads the address from the scratch buffer and + /// compares it with the constant 42. + const CODE_CALLER: &str = r#" (module (import "env" "ext_caller" (func $ext_caller)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -918,19 +924,20 @@ mod tests { ) "#; - #[test] - fn caller() { - let _ = execute( - CODE_CALLER, - vec![], - MockExt::default(), - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - } - - /// calls `ext_address`, loads the address from the scratch buffer and - /// compares it with the constant 69. - const CODE_ADDRESS: &str = r#" + #[test] + fn caller() { + let _ = execute( + CODE_CALLER, + vec![], + MockExt::default(), + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + } + + /// calls `ext_address`, loads the address from the scratch buffer and + /// compares it with the constant 69. + const CODE_ADDRESS: &str = r#" (module (import "env" "ext_address" (func $ext_address)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -980,17 +987,18 @@ mod tests { ) "#; - #[test] - fn address() { - let _ = execute( - CODE_ADDRESS, - vec![], - MockExt::default(), - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - } - - const CODE_BALANCE: &str = r#" + #[test] + fn address() { + let _ = execute( + CODE_ADDRESS, + vec![], + MockExt::default(), + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + } + + const CODE_BALANCE: &str = r#" (module (import "env" "ext_balance" (func $ext_balance)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1039,18 +1047,13 @@ mod tests { ) "#; - #[test] - fn balance() { - let mut gas_meter = GasMeter::with_limit(50_000, 1); - let _ = execute( - CODE_BALANCE, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); - } - - const CODE_GAS_PRICE: &str = r#" + #[test] + fn balance() { + let mut gas_meter = GasMeter::with_limit(50_000, 1); + let _ = execute(CODE_BALANCE, vec![], MockExt::default(), &mut gas_meter).unwrap(); + } + + const CODE_GAS_PRICE: &str = r#" (module (import "env" "ext_gas_price" (func $ext_gas_price)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1099,18 +1102,13 @@ mod tests { ) "#; - #[test] - fn gas_price() { - let mut gas_meter = GasMeter::with_limit(50_000, 1312); - let _ = execute( - CODE_GAS_PRICE, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); - } - - const CODE_GAS_LEFT: &str = r#" + #[test] + fn gas_price() { + let mut gas_meter = GasMeter::with_limit(50_000, 1312); + let _ = execute(CODE_GAS_PRICE, vec![], MockExt::default(), &mut gas_meter).unwrap(); + } + + const CODE_GAS_LEFT: &str = r#" (module (import "env" "ext_gas_left" (func $ext_gas_left)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1157,23 +1155,21 @@ mod tests { ) "#; - #[test] - fn gas_left() { - let mut gas_meter = GasMeter::with_limit(50_000, 1312); + #[test] + fn gas_left() { + let mut gas_meter = GasMeter::with_limit(50_000, 1312); - let output = execute( - CODE_GAS_LEFT, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); + let output = execute(CODE_GAS_LEFT, vec![], MockExt::default(), &mut gas_meter).unwrap(); - let gas_left = Gas::decode(&mut output.data.as_slice()).unwrap(); - assert!(gas_left < 50_000, "gas_left must be less than initial"); - assert!(gas_left > gas_meter.gas_left(), "gas_left must be greater than final"); - } + let gas_left = Gas::decode(&mut output.data.as_slice()).unwrap(); + assert!(gas_left < 50_000, "gas_left must be less than initial"); + assert!( + gas_left > gas_meter.gas_left(), + "gas_left must be greater than final" + ); + } - const CODE_VALUE_TRANSFERRED: &str = r#" + const CODE_VALUE_TRANSFERRED: &str = r#" (module (import "env" "ext_value_transferred" (func $ext_value_transferred)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1222,18 +1218,19 @@ mod tests { ) "#; - #[test] - fn value_transferred() { - let mut gas_meter = GasMeter::with_limit(50_000, 1); - let _ = execute( - CODE_VALUE_TRANSFERRED, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); - } - - const CODE_DISPATCH_CALL: &str = r#" + #[test] + fn value_transferred() { + let mut gas_meter = GasMeter::with_limit(50_000, 1); + let _ = execute( + CODE_VALUE_TRANSFERRED, + vec![], + MockExt::default(), + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_DISPATCH_CALL: &str = r#" (module (import "env" "ext_dispatch_call" (func $ext_dispatch_call (param i32 i32))) (import "env" "memory" (memory 1 1)) @@ -1250,28 +1247,29 @@ mod tests { ) "#; - #[test] - fn dispatch_call() { - // This test can fail due to the encoding changes. In case it becomes too annoying - // let's rewrite so as we use this module controlled call or we serialize it in runtime. - - let mut mock_ext = MockExt::default(); - let _ = execute( - CODE_DISPATCH_CALL, - vec![], - &mut mock_ext, - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!( - &mock_ext.dispatches, - &[DispatchEntry( - Call::Balances(pallet_balances::Call::set_balance(42, 1337, 0)), - )] - ); - } - - const CODE_RETURN_FROM_START_FN: &str = r#" + #[test] + fn dispatch_call() { + // This test can fail due to the encoding changes. In case it becomes too annoying + // let's rewrite so as we use this module controlled call or we serialize it in runtime. + + let mut mock_ext = MockExt::default(); + let _ = execute( + CODE_DISPATCH_CALL, + vec![], + &mut mock_ext, + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + &mock_ext.dispatches, + &[DispatchEntry(Call::Balances( + pallet_balances::Call::set_balance(42, 1337, 0) + ),)] + ); + } + + const CODE_RETURN_FROM_START_FN: &str = r#" (module (import "env" "ext_return" (func $ext_return (param i32 i32))) (import "env" "memory" (memory 1 1)) @@ -1294,19 +1292,26 @@ mod tests { ) "#; - #[test] - fn return_from_start_fn() { - let output = execute( - CODE_RETURN_FROM_START_FN, - vec![], - MockExt::default(), - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!(output, ExecReturnValue { status: STATUS_SUCCESS, data: vec![1, 2, 3, 4] }); - } - - const CODE_TIMESTAMP_NOW: &str = r#" + #[test] + fn return_from_start_fn() { + let output = execute( + CODE_RETURN_FROM_START_FN, + vec![], + MockExt::default(), + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + output, + ExecReturnValue { + status: STATUS_SUCCESS, + data: vec![1, 2, 3, 4] + } + ); + } + + const CODE_TIMESTAMP_NOW: &str = r#" (module (import "env" "ext_now" (func $ext_now)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1355,18 +1360,19 @@ mod tests { ) "#; - #[test] - fn now() { - let mut gas_meter = GasMeter::with_limit(50_000, 1); - let _ = execute( - CODE_TIMESTAMP_NOW, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); - } - - const CODE_MINIMUM_BALANCE: &str = r#" + #[test] + fn now() { + let mut gas_meter = GasMeter::with_limit(50_000, 1); + let _ = execute( + CODE_TIMESTAMP_NOW, + vec![], + MockExt::default(), + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_MINIMUM_BALANCE: &str = r#" (module (import "env" "ext_minimum_balance" (func $ext_minimum_balance)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1414,18 +1420,19 @@ mod tests { ) "#; - #[test] - fn minimum_balance() { - let mut gas_meter = GasMeter::with_limit(50_000, 1); - let _ = execute( - CODE_MINIMUM_BALANCE, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); - } - - const CODE_TOMBSTONE_DEPOSIT: &str = r#" + #[test] + fn minimum_balance() { + let mut gas_meter = GasMeter::with_limit(50_000, 1); + let _ = execute( + CODE_MINIMUM_BALANCE, + vec![], + MockExt::default(), + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_TOMBSTONE_DEPOSIT: &str = r#" (module (import "env" "ext_tombstone_deposit" (func $ext_tombstone_deposit)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1473,18 +1480,19 @@ mod tests { ) "#; - #[test] - fn tombstone_deposit() { - let mut gas_meter = GasMeter::with_limit(50_000, 1); - let _ = execute( - CODE_TOMBSTONE_DEPOSIT, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); - } - - const CODE_RANDOM: &str = r#" + #[test] + fn tombstone_deposit() { + let mut gas_meter = GasMeter::with_limit(50_000, 1); + let _ = execute( + CODE_TOMBSTONE_DEPOSIT, + vec![], + MockExt::default(), + &mut gas_meter, + ) + .unwrap(); + } + + const CODE_RANDOM: &str = r#" (module (import "env" "ext_random" (func $ext_random (param i32 i32))) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1541,28 +1549,24 @@ mod tests { ) "#; - #[test] - fn random() { - let mut gas_meter = GasMeter::with_limit(50_000, 1); - - let output = execute( - CODE_RANDOM, - vec![], - MockExt::default(), - &mut gas_meter, - ).unwrap(); - - // The mock ext just returns the same data that was passed as the subject. - assert_eq!( - output, - ExecReturnValue { - status: STATUS_SUCCESS, - data: hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F").to_vec(), - }, - ); - } - - const CODE_DEPOSIT_EVENT: &str = r#" + #[test] + fn random() { + let mut gas_meter = GasMeter::with_limit(50_000, 1); + + let output = execute(CODE_RANDOM, vec![], MockExt::default(), &mut gas_meter).unwrap(); + + // The mock ext just returns the same data that was passed as the subject. + assert_eq!( + output, + ExecReturnValue { + status: STATUS_SUCCESS, + data: hex!("000102030405060708090A0B0C0D0E0F000102030405060708090A0B0C0D0E0F") + .to_vec(), + }, + ); + } + + const CODE_DEPOSIT_EVENT: &str = r#" (module (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -1585,26 +1589,24 @@ mod tests { ) "#; - #[test] - fn deposit_event() { - let mut mock_ext = MockExt::default(); - let mut gas_meter = GasMeter::with_limit(50_000, 1); - let _ = execute( - CODE_DEPOSIT_EVENT, - vec![], - &mut mock_ext, - &mut gas_meter - ).unwrap(); - - assert_eq!(mock_ext.events, vec![ - (vec![H256::repeat_byte(0x33)], - vec![0x00, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00]) - ]); - - assert_eq!(gas_meter.gas_left(), 49934); - } - - const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" + #[test] + fn deposit_event() { + let mut mock_ext = MockExt::default(); + let mut gas_meter = GasMeter::with_limit(50_000, 1); + let _ = execute(CODE_DEPOSIT_EVENT, vec![], &mut mock_ext, &mut gas_meter).unwrap(); + + assert_eq!( + mock_ext.events, + vec![( + vec![H256::repeat_byte(0x33)], + vec![0x00, 0x01, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe5, 0x14, 0x00] + )] + ); + + assert_eq!(gas_meter.gas_left(), 49934); + } + + const CODE_DEPOSIT_EVENT_MAX_TOPICS: &str = r#" (module (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -1631,25 +1633,26 @@ mod tests { ) "#; - #[test] - fn deposit_event_max_topics() { - // Checks that the runtime traps if there are more than `max_topic_events` topics. - let mut gas_meter = GasMeter::with_limit(50_000, 1); - - assert_matches!( - execute( - CODE_DEPOSIT_EVENT_MAX_TOPICS, - vec![], - MockExt::default(), - &mut gas_meter - ), - Err(ExecError { - reason: DispatchError::Other("contract trapped during execution"), buffer: _ - }) - ); - } - - const CODE_DEPOSIT_EVENT_DUPLICATES: &str = r#" + #[test] + fn deposit_event_max_topics() { + // Checks that the runtime traps if there are more than `max_topic_events` topics. + let mut gas_meter = GasMeter::with_limit(50_000, 1); + + assert_matches!( + execute( + CODE_DEPOSIT_EVENT_MAX_TOPICS, + vec![], + MockExt::default(), + &mut gas_meter + ), + Err(ExecError { + reason: DispatchError::Other("contract trapped during execution"), + buffer: _, + }) + ); + } + + const CODE_DEPOSIT_EVENT_DUPLICATES: &str = r#" (module (import "env" "ext_deposit_event" (func $ext_deposit_event (param i32 i32 i32 i32))) (import "env" "memory" (memory 1 1)) @@ -1675,25 +1678,28 @@ mod tests { ) "#; - #[test] - fn deposit_event_duplicates() { - // Checks that the runtime traps if there are duplicates. - let mut gas_meter = GasMeter::with_limit(50_000, 1); - - assert_matches!( - execute( - CODE_DEPOSIT_EVENT_DUPLICATES, - vec![], - MockExt::default(), - &mut gas_meter - ), - Err(ExecError { reason: DispatchError::Other("contract trapped during execution"), buffer: _ }) - ); - } - - /// calls `ext_block_number`, loads the current block number from the scratch buffer and - /// compares it with the constant 121. - const CODE_BLOCK_NUMBER: &str = r#" + #[test] + fn deposit_event_duplicates() { + // Checks that the runtime traps if there are duplicates. + let mut gas_meter = GasMeter::with_limit(50_000, 1); + + assert_matches!( + execute( + CODE_DEPOSIT_EVENT_DUPLICATES, + vec![], + MockExt::default(), + &mut gas_meter + ), + Err(ExecError { + reason: DispatchError::Other("contract trapped during execution"), + buffer: _, + }) + ); + } + + /// calls `ext_block_number`, loads the current block number from the scratch buffer and + /// compares it with the constant 121. + const CODE_BLOCK_NUMBER: &str = r#" (module (import "env" "ext_block_number" (func $ext_block_number)) (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1743,18 +1749,19 @@ mod tests { ) "#; - #[test] - fn block_number() { - let _ = execute( - CODE_BLOCK_NUMBER, - vec![], - MockExt::default(), - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - } - - // asserts that the size of the input data is 4. - const CODE_SIMPLE_ASSERT: &str = r#" + #[test] + fn block_number() { + let _ = execute( + CODE_BLOCK_NUMBER, + vec![], + MockExt::default(), + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + } + + // asserts that the size of the input data is 4. + const CODE_SIMPLE_ASSERT: &str = r#" (module (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) @@ -1780,38 +1787,41 @@ mod tests { ) "#; - #[test] - fn output_buffer_capacity_preserved_on_success() { - let mut input_data = Vec::with_capacity(1_234); - input_data.extend_from_slice(&[1, 2, 3, 4][..]); - - let output = execute( - CODE_SIMPLE_ASSERT, - input_data, - MockExt::default(), - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!(output.data.len(), 0); - assert_eq!(output.data.capacity(), 1_234); - } - - #[test] - fn output_buffer_capacity_preserved_on_failure() { - let mut input_data = Vec::with_capacity(1_234); - input_data.extend_from_slice(&[1, 2, 3, 4, 5][..]); - - let error = execute( - CODE_SIMPLE_ASSERT, - input_data, - MockExt::default(), - &mut GasMeter::with_limit(50_000, 1), - ).err().unwrap(); - - assert_eq!(error.buffer.capacity(), 1_234); - } - - const CODE_RETURN_WITH_DATA: &str = r#" + #[test] + fn output_buffer_capacity_preserved_on_success() { + let mut input_data = Vec::with_capacity(1_234); + input_data.extend_from_slice(&[1, 2, 3, 4][..]); + + let output = execute( + CODE_SIMPLE_ASSERT, + input_data, + MockExt::default(), + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!(output.data.len(), 0); + assert_eq!(output.data.capacity(), 1_234); + } + + #[test] + fn output_buffer_capacity_preserved_on_failure() { + let mut input_data = Vec::with_capacity(1_234); + input_data.extend_from_slice(&[1, 2, 3, 4, 5][..]); + + let error = execute( + CODE_SIMPLE_ASSERT, + input_data, + MockExt::default(), + &mut GasMeter::with_limit(50_000, 1), + ) + .err() + .unwrap(); + + assert_eq!(error.buffer.capacity(), 1_234); + } + + const CODE_RETURN_WITH_DATA: &str = r#" (module (import "env" "ext_scratch_size" (func $ext_scratch_size (result i32))) (import "env" "ext_scratch_read" (func $ext_scratch_read (param i32 i32 i32))) @@ -1853,33 +1863,47 @@ mod tests { ) "#; - #[test] - fn return_with_success_status() { - let output = execute( - CODE_RETURN_WITH_DATA, - hex!("00112233445566778899").to_vec(), - MockExt::default(), - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!(output, ExecReturnValue { status: 0, data: hex!("445566778899").to_vec() }); - assert!(output.is_success()); - } - - #[test] - fn return_with_failure_status() { - let output = execute( - CODE_RETURN_WITH_DATA, - hex!("112233445566778899").to_vec(), - MockExt::default(), - &mut GasMeter::with_limit(50_000, 1), - ).unwrap(); - - assert_eq!(output, ExecReturnValue { status: 17, data: hex!("5566778899").to_vec() }); - assert!(!output.is_success()); - } - - const CODE_GET_RUNTIME_STORAGE: &str = r#" + #[test] + fn return_with_success_status() { + let output = execute( + CODE_RETURN_WITH_DATA, + hex!("00112233445566778899").to_vec(), + MockExt::default(), + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + output, + ExecReturnValue { + status: 0, + data: hex!("445566778899").to_vec() + } + ); + assert!(output.is_success()); + } + + #[test] + fn return_with_failure_status() { + let output = execute( + CODE_RETURN_WITH_DATA, + hex!("112233445566778899").to_vec(), + MockExt::default(), + &mut GasMeter::with_limit(50_000, 1), + ) + .unwrap(); + + assert_eq!( + output, + ExecReturnValue { + status: 17, + data: hex!("5566778899").to_vec() + } + ); + assert!(!output.is_success()); + } + + const CODE_GET_RUNTIME_STORAGE: &str = r#" (module (import "env" "ext_get_runtime_storage" (func $ext_get_runtime_storage (param i32 i32) (result i32)) @@ -1956,25 +1980,23 @@ mod tests { ) "#; - #[test] - fn get_runtime_storage() { - let mut gas_meter = GasMeter::with_limit(50_000, 1); - let mock_ext = MockExt::default(); - - // "\01\02\03\04" - Some(0x14144020) - // "\02\03\04\05" - None - *mock_ext.runtime_storage_keys.borrow_mut() = [ - ([1, 2, 3, 4].to_vec(), Some(0x14144020u32.to_le_bytes().to_vec())), - ([2, 3, 4, 5].to_vec().to_vec(), None), - ] - .iter() - .cloned() - .collect(); - let _ = execute( - CODE_GET_RUNTIME_STORAGE, - vec![], - mock_ext, - &mut gas_meter, - ).unwrap(); - } + #[test] + fn get_runtime_storage() { + let mut gas_meter = GasMeter::with_limit(50_000, 1); + let mock_ext = MockExt::default(); + + // "\01\02\03\04" - Some(0x14144020) + // "\02\03\04\05" - None + *mock_ext.runtime_storage_keys.borrow_mut() = [ + ( + [1, 2, 3, 4].to_vec(), + Some(0x14144020u32.to_le_bytes().to_vec()), + ), + ([2, 3, 4, 5].to_vec().to_vec(), None), + ] + .iter() + .cloned() + .collect(); + let _ = execute(CODE_GET_RUNTIME_STORAGE, vec![], mock_ext, &mut gas_meter).unwrap(); + } } diff --git a/frame/contracts/src/wasm/prepare.rs b/frame/contracts/src/wasm/prepare.rs index ba934f353e..830afa6e6c 100644 --- a/frame/contracts/src/wasm/prepare.rs +++ b/frame/contracts/src/wasm/prepare.rs @@ -22,304 +22,302 @@ use crate::wasm::env_def::ImportSatisfyCheck; use crate::wasm::PrefabWasmModule; use crate::Schedule; -use parity_wasm::elements::{self, Internal, External, MemoryType, Type, ValueType}; +use parity_wasm::elements::{self, External, Internal, MemoryType, Type, ValueType}; use pwasm_utils; use pwasm_utils::rules; +use sp_runtime::traits::SaturatedConversion; use sp_std::prelude::*; -use sp_runtime::traits::{SaturatedConversion}; struct ContractModule<'a> { - /// A deserialized module. The module is valid (this is Guaranteed by `new` method). - module: elements::Module, - schedule: &'a Schedule, + /// A deserialized module. The module is valid (this is Guaranteed by `new` method). + module: elements::Module, + schedule: &'a Schedule, } impl<'a> ContractModule<'a> { - /// Creates a new instance of `ContractModule`. - /// - /// Returns `Err` if the `original_code` couldn't be decoded or - /// if it contains an invalid module. - fn new( - original_code: &[u8], - schedule: &'a Schedule, - ) -> Result { - use wasmi_validation::{validate_module, PlainValidator}; - - let module = - elements::deserialize_buffer(original_code).map_err(|_| "Can't decode wasm code")?; - - // Make sure that the module is valid. - validate_module::(&module).map_err(|_| "Module is not valid")?; - - // Return a `ContractModule` instance with - // __valid__ module. - Ok(ContractModule { - module, - schedule, - }) - } - - /// Ensures that module doesn't declare internal memories. - /// - /// In this runtime we only allow wasm module to import memory from the environment. - /// Memory section contains declarations of internal linear memories, so if we find one - /// we reject such a module. - fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { - if self.module - .memory_section() - .map_or(false, |ms| ms.entries().len() > 0) - { - return Err("module declares internal memory"); - } - Ok(()) - } - - /// Ensures that tables declared in the module are not too big. - fn ensure_table_size_limit(&self, limit: u32) -> Result<(), &'static str> { - if let Some(table_section) = self.module.table_section() { - // In Wasm MVP spec, there may be at most one table declared. Double check this - // explicitly just in case the Wasm version changes. - if table_section.entries().len() > 1 { - return Err("multiple tables declared"); - } - if let Some(table_type) = table_section.entries().first() { - // Check the table's initial size as there is no instruction or environment function - // capable of growing the table. - if table_type.limits().initial() > limit { - return Err("table exceeds maximum size allowed") - } - } - } - Ok(()) - } - - /// Ensures that no floating point types are in use. - fn ensure_no_floating_types(&self) -> Result<(), &'static str> { - if let Some(global_section) = self.module.global_section() { - for global in global_section.entries() { - match global.global_type().content_type() { - ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in globals is forbidden"), - _ => {} - } - } - } - - if let Some(code_section) = self.module.code_section() { - for func_body in code_section.bodies() { - for local in func_body.locals() { - match local.value_type() { - ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in locals is forbidden"), - _ => {} - } - } - } - } - - if let Some(type_section) = self.module.type_section() { - for wasm_type in type_section.types() { - match wasm_type { - Type::Function(func_type) => { - let return_type = func_type.return_type(); - for value_type in func_type.params().iter().chain(return_type.iter()) { - match value_type { - ValueType::F32 | ValueType::F64 => - return Err("use of floating point type in function types is forbidden"), - _ => {} - } - } - } - } - } - } - - Ok(()) - } - - fn inject_gas_metering(self) -> Result { - let gas_rules = - rules::Set::new( - self.schedule.regular_op_cost.clone().saturated_into(), - Default::default(), - ) - .with_grow_cost(self.schedule.grow_mem_cost.clone().saturated_into()) - .with_forbidden_floats(); - - let contract_module = pwasm_utils::inject_gas_counter(self.module, &gas_rules) - .map_err(|_| "gas instrumentation failed")?; - Ok(ContractModule { - module: contract_module, - schedule: self.schedule, - }) - } - - fn inject_stack_height_metering(self) -> Result { - let contract_module = - pwasm_utils::stack_height::inject_limiter(self.module, self.schedule.max_stack_height) - .map_err(|_| "stack height instrumentation failed")?; - Ok(ContractModule { - module: contract_module, - schedule: self.schedule, - }) - } - - /// Check that the module has required exported functions. For now - /// these are just entrypoints: - /// - /// - 'call' - /// - 'deploy' - /// - /// Any other exports are not allowed. - fn scan_exports(&self) -> Result<(), &'static str> { - let mut deploy_found = false; - let mut call_found = false; - - let module = &self.module; - - let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let export_entries = module - .export_section() - .map(|is| is.entries()) - .unwrap_or(&[]); - let func_entries = module - .function_section() - .map(|fs| fs.entries()) - .unwrap_or(&[]); - - // Function index space consists of imported function following by - // declared functions. Calculate the total number of imported functions so - // we can use it to convert indexes from function space to declared function space. - let fn_space_offset = module - .import_section() - .map(|is| is.entries()) - .unwrap_or(&[]) - .iter() - .filter(|entry| { - match *entry.external() { - External::Function(_) => true, - _ => false, - } - }) - .count(); - - for export in export_entries { - match export.field() { - "call" => call_found = true, - "deploy" => deploy_found = true, - _ => return Err("unknown export: expecting only deploy and call functions"), - } - - // Then check the export kind. "call" and "deploy" are - // functions. - let fn_idx = match export.internal() { - Internal::Function(ref fn_idx) => *fn_idx, - _ => return Err("expected a function"), - }; - - // convert index from function index space to declared index space. - let fn_idx = match fn_idx.checked_sub(fn_space_offset as u32) { - Some(fn_idx) => fn_idx, - None => { - // Underflow here means fn_idx points to imported function which we don't allow! - return Err("entry point points to an imported function"); - } - }; - - // Then check the signature. - // Both "call" and "deploy" has a [] -> [] or [] -> [i32] function type. - // - // The [] -> [] signature predates the [] -> [i32] signature and is supported for - // backwards compatibility. This will likely be removed once ink! is updated to - // generate modules with the new function signatures. - let func_ty_idx = func_entries.get(fn_idx as usize) - .ok_or_else(|| "export refers to non-existent function")? - .type_ref(); - let Type::Function(ref func_ty) = types - .get(func_ty_idx as usize) - .ok_or_else(|| "function has a non-existent type")?; - if !func_ty.params().is_empty() || - !(func_ty.return_type().is_none() || - func_ty.return_type() == Some(ValueType::I32)) { - return Err("entry point has wrong signature"); - } - } - - if !deploy_found { - return Err("deploy function isn't exported"); - } - if !call_found { - return Err("call function isn't exported"); - } - - Ok(()) - } - - /// Scan an import section if any. - /// - /// This accomplishes two tasks: - /// - /// - checks any imported function against defined host functions set, incl. - /// their signatures. - /// - if there is a memory import, returns it's descriptor - fn scan_imports(&self) -> Result, &'static str> { - let module = &self.module; - - let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); - let import_entries = module - .import_section() - .map(|is| is.entries()) - .unwrap_or(&[]); - - let mut imported_mem_type = None; - - for import in import_entries { - if import.module() != "env" { - // This import tries to import something from non-"env" module, - // but all imports are located in "env" at the moment. - return Err("module has imports from a non-'env' namespace"); - } - - let type_idx = match import.external() { - &External::Table(_) => return Err("Cannot import tables"), - &External::Global(_) => return Err("Cannot import globals"), - &External::Function(ref type_idx) => type_idx, - &External::Memory(ref memory_type) => { - if import.field() != "memory" { - return Err("Memory import must have the field name 'memory'") - } - if imported_mem_type.is_some() { - return Err("Multiple memory imports defined") - } - imported_mem_type = Some(memory_type); - continue; - } - }; - - let Type::Function(ref func_ty) = types - .get(*type_idx as usize) - .ok_or_else(|| "validation: import entry points to a non-existent type")?; - - // We disallow importing `ext_println` unless debug features are enabled, - // which should only be allowed on a dev chain - if !self.schedule.enable_println && import.field().as_bytes() == b"ext_println" { - return Err("module imports `ext_println` but debug features disabled"); - } - - // We disallow importing `gas` function here since it is treated as implementation detail. - if import.field().as_bytes() == b"gas" - || !C::can_satisfy(import.field().as_bytes(), func_ty) - { - return Err("module imports a non-existent function"); - } - } - Ok(imported_mem_type) - } - - fn into_wasm_code(self) -> Result, &'static str> { - elements::serialize(self.module) - .map_err(|_| "error serializing instrumented module") - } + /// Creates a new instance of `ContractModule`. + /// + /// Returns `Err` if the `original_code` couldn't be decoded or + /// if it contains an invalid module. + fn new(original_code: &[u8], schedule: &'a Schedule) -> Result { + use wasmi_validation::{validate_module, PlainValidator}; + + let module = + elements::deserialize_buffer(original_code).map_err(|_| "Can't decode wasm code")?; + + // Make sure that the module is valid. + validate_module::(&module).map_err(|_| "Module is not valid")?; + + // Return a `ContractModule` instance with + // __valid__ module. + Ok(ContractModule { module, schedule }) + } + + /// Ensures that module doesn't declare internal memories. + /// + /// In this runtime we only allow wasm module to import memory from the environment. + /// Memory section contains declarations of internal linear memories, so if we find one + /// we reject such a module. + fn ensure_no_internal_memory(&self) -> Result<(), &'static str> { + if self + .module + .memory_section() + .map_or(false, |ms| ms.entries().len() > 0) + { + return Err("module declares internal memory"); + } + Ok(()) + } + + /// Ensures that tables declared in the module are not too big. + fn ensure_table_size_limit(&self, limit: u32) -> Result<(), &'static str> { + if let Some(table_section) = self.module.table_section() { + // In Wasm MVP spec, there may be at most one table declared. Double check this + // explicitly just in case the Wasm version changes. + if table_section.entries().len() > 1 { + return Err("multiple tables declared"); + } + if let Some(table_type) = table_section.entries().first() { + // Check the table's initial size as there is no instruction or environment function + // capable of growing the table. + if table_type.limits().initial() > limit { + return Err("table exceeds maximum size allowed"); + } + } + } + Ok(()) + } + + /// Ensures that no floating point types are in use. + fn ensure_no_floating_types(&self) -> Result<(), &'static str> { + if let Some(global_section) = self.module.global_section() { + for global in global_section.entries() { + match global.global_type().content_type() { + ValueType::F32 | ValueType::F64 => { + return Err("use of floating point type in globals is forbidden") + } + _ => {} + } + } + } + + if let Some(code_section) = self.module.code_section() { + for func_body in code_section.bodies() { + for local in func_body.locals() { + match local.value_type() { + ValueType::F32 | ValueType::F64 => { + return Err("use of floating point type in locals is forbidden") + } + _ => {} + } + } + } + } + + if let Some(type_section) = self.module.type_section() { + for wasm_type in type_section.types() { + match wasm_type { + Type::Function(func_type) => { + let return_type = func_type.return_type(); + for value_type in func_type.params().iter().chain(return_type.iter()) { + match value_type { + ValueType::F32 | ValueType::F64 => { + return Err( + "use of floating point type in function types is forbidden", + ) + } + _ => {} + } + } + } + } + } + } + + Ok(()) + } + + fn inject_gas_metering(self) -> Result { + let gas_rules = rules::Set::new( + self.schedule.regular_op_cost.clone().saturated_into(), + Default::default(), + ) + .with_grow_cost(self.schedule.grow_mem_cost.clone().saturated_into()) + .with_forbidden_floats(); + + let contract_module = pwasm_utils::inject_gas_counter(self.module, &gas_rules) + .map_err(|_| "gas instrumentation failed")?; + Ok(ContractModule { + module: contract_module, + schedule: self.schedule, + }) + } + + fn inject_stack_height_metering(self) -> Result { + let contract_module = + pwasm_utils::stack_height::inject_limiter(self.module, self.schedule.max_stack_height) + .map_err(|_| "stack height instrumentation failed")?; + Ok(ContractModule { + module: contract_module, + schedule: self.schedule, + }) + } + + /// Check that the module has required exported functions. For now + /// these are just entrypoints: + /// + /// - 'call' + /// - 'deploy' + /// + /// Any other exports are not allowed. + fn scan_exports(&self) -> Result<(), &'static str> { + let mut deploy_found = false; + let mut call_found = false; + + let module = &self.module; + + let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); + let export_entries = module + .export_section() + .map(|is| is.entries()) + .unwrap_or(&[]); + let func_entries = module + .function_section() + .map(|fs| fs.entries()) + .unwrap_or(&[]); + + // Function index space consists of imported function following by + // declared functions. Calculate the total number of imported functions so + // we can use it to convert indexes from function space to declared function space. + let fn_space_offset = module + .import_section() + .map(|is| is.entries()) + .unwrap_or(&[]) + .iter() + .filter(|entry| match *entry.external() { + External::Function(_) => true, + _ => false, + }) + .count(); + + for export in export_entries { + match export.field() { + "call" => call_found = true, + "deploy" => deploy_found = true, + _ => return Err("unknown export: expecting only deploy and call functions"), + } + + // Then check the export kind. "call" and "deploy" are + // functions. + let fn_idx = match export.internal() { + Internal::Function(ref fn_idx) => *fn_idx, + _ => return Err("expected a function"), + }; + + // convert index from function index space to declared index space. + let fn_idx = match fn_idx.checked_sub(fn_space_offset as u32) { + Some(fn_idx) => fn_idx, + None => { + // Underflow here means fn_idx points to imported function which we don't allow! + return Err("entry point points to an imported function"); + } + }; + + // Then check the signature. + // Both "call" and "deploy" has a [] -> [] or [] -> [i32] function type. + // + // The [] -> [] signature predates the [] -> [i32] signature and is supported for + // backwards compatibility. This will likely be removed once ink! is updated to + // generate modules with the new function signatures. + let func_ty_idx = func_entries + .get(fn_idx as usize) + .ok_or_else(|| "export refers to non-existent function")? + .type_ref(); + let Type::Function(ref func_ty) = types + .get(func_ty_idx as usize) + .ok_or_else(|| "function has a non-existent type")?; + if !func_ty.params().is_empty() + || !(func_ty.return_type().is_none() + || func_ty.return_type() == Some(ValueType::I32)) + { + return Err("entry point has wrong signature"); + } + } + + if !deploy_found { + return Err("deploy function isn't exported"); + } + if !call_found { + return Err("call function isn't exported"); + } + + Ok(()) + } + + /// Scan an import section if any. + /// + /// This accomplishes two tasks: + /// + /// - checks any imported function against defined host functions set, incl. + /// their signatures. + /// - if there is a memory import, returns it's descriptor + fn scan_imports(&self) -> Result, &'static str> { + let module = &self.module; + + let types = module.type_section().map(|ts| ts.types()).unwrap_or(&[]); + let import_entries = module + .import_section() + .map(|is| is.entries()) + .unwrap_or(&[]); + + let mut imported_mem_type = None; + + for import in import_entries { + if import.module() != "env" { + // This import tries to import something from non-"env" module, + // but all imports are located in "env" at the moment. + return Err("module has imports from a non-'env' namespace"); + } + + let type_idx = match import.external() { + &External::Table(_) => return Err("Cannot import tables"), + &External::Global(_) => return Err("Cannot import globals"), + &External::Function(ref type_idx) => type_idx, + &External::Memory(ref memory_type) => { + if import.field() != "memory" { + return Err("Memory import must have the field name 'memory'"); + } + if imported_mem_type.is_some() { + return Err("Multiple memory imports defined"); + } + imported_mem_type = Some(memory_type); + continue; + } + }; + + let Type::Function(ref func_ty) = types + .get(*type_idx as usize) + .ok_or_else(|| "validation: import entry points to a non-existent type")?; + + // We disallow importing `ext_println` unless debug features are enabled, + // which should only be allowed on a dev chain + if !self.schedule.enable_println && import.field().as_bytes() == b"ext_println" { + return Err("module imports `ext_println` but debug features disabled"); + } + + // We disallow importing `gas` function here since it is treated as implementation detail. + if import.field().as_bytes() == b"gas" + || !C::can_satisfy(import.field().as_bytes(), func_ty) + { + return Err("module imports a non-existent function"); + } + } + Ok(imported_mem_type) + } + + fn into_wasm_code(self) -> Result, &'static str> { + elements::serialize(self.module).map_err(|_| "error serializing instrumented module") + } } /// Loads the given module given in `original_code`, performs some checks on it and @@ -334,79 +332,79 @@ impl<'a> ContractModule<'a> { /// /// The preprocessing includes injecting code for gas metering and metering the height of stack. pub fn prepare_contract( - original_code: &[u8], - schedule: &Schedule, + original_code: &[u8], + schedule: &Schedule, ) -> Result { - let mut contract_module = ContractModule::new(original_code, schedule)?; - contract_module.scan_exports()?; - contract_module.ensure_no_internal_memory()?; - contract_module.ensure_table_size_limit(schedule.max_table_size)?; - contract_module.ensure_no_floating_types()?; - - struct MemoryDefinition { - initial: u32, - maximum: u32, - } - - let memory_def = if let Some(memory_type) = contract_module.scan_imports::()? { - // Inspect the module to extract the initial and maximum page count. - let limits = memory_type.limits(); - match (limits.initial(), limits.maximum()) { - (initial, Some(maximum)) if initial > maximum => { - return Err( - "Requested initial number of pages should not exceed the requested maximum", - ); - } - (_, Some(maximum)) if maximum > schedule.max_memory_pages => { - return Err("Maximum number of pages should not exceed the configured maximum."); - } - (initial, Some(maximum)) => MemoryDefinition { initial, maximum }, - (_, None) => { - // Maximum number of pages should be always declared. - // This isn't a hard requirement and can be treated as a maximum set - // to configured maximum. - return Err("Maximum number of pages should be always declared."); - } - } - } else { - // If none memory imported then just crate an empty placeholder. - // Any access to it will lead to out of bounds trap. - MemoryDefinition { - initial: 0, - maximum: 0, - } - }; - - contract_module = contract_module - .inject_gas_metering()? - .inject_stack_height_metering()?; - - Ok(PrefabWasmModule { - schedule_version: schedule.version, - initial: memory_def.initial, - maximum: memory_def.maximum, - _reserved: None, - code: contract_module.into_wasm_code()?, - }) + let mut contract_module = ContractModule::new(original_code, schedule)?; + contract_module.scan_exports()?; + contract_module.ensure_no_internal_memory()?; + contract_module.ensure_table_size_limit(schedule.max_table_size)?; + contract_module.ensure_no_floating_types()?; + + struct MemoryDefinition { + initial: u32, + maximum: u32, + } + + let memory_def = if let Some(memory_type) = contract_module.scan_imports::()? { + // Inspect the module to extract the initial and maximum page count. + let limits = memory_type.limits(); + match (limits.initial(), limits.maximum()) { + (initial, Some(maximum)) if initial > maximum => { + return Err( + "Requested initial number of pages should not exceed the requested maximum", + ); + } + (_, Some(maximum)) if maximum > schedule.max_memory_pages => { + return Err("Maximum number of pages should not exceed the configured maximum."); + } + (initial, Some(maximum)) => MemoryDefinition { initial, maximum }, + (_, None) => { + // Maximum number of pages should be always declared. + // This isn't a hard requirement and can be treated as a maximum set + // to configured maximum. + return Err("Maximum number of pages should be always declared."); + } + } + } else { + // If none memory imported then just crate an empty placeholder. + // Any access to it will lead to out of bounds trap. + MemoryDefinition { + initial: 0, + maximum: 0, + } + }; + + contract_module = contract_module + .inject_gas_metering()? + .inject_stack_height_metering()?; + + Ok(PrefabWasmModule { + schedule_version: schedule.version, + initial: memory_def.initial, + maximum: memory_def.maximum, + _reserved: None, + code: contract_module.into_wasm_code()?, + }) } #[cfg(test)] mod tests { - use super::*; - use crate::exec::Ext; - use std::fmt; - use wabt; - use assert_matches::assert_matches; - - impl fmt::Debug for PrefabWasmModule { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "PreparedContract {{ .. }}") - } - } - - // Define test environment for tests. We need ImportSatisfyCheck - // implementation from it. So actual implementations doesn't matter. - define_env!(TestEnv, , + use super::*; + use crate::exec::Ext; + use assert_matches::assert_matches; + use std::fmt; + use wabt; + + impl fmt::Debug for PrefabWasmModule { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "PreparedContract {{ .. }}") + } + } + + // Define test environment for tests. We need ImportSatisfyCheck + // implementation from it. So actual implementations doesn't matter. + define_env!(TestEnv, , panic(_ctx) => { unreachable!(); }, // gas is an implementation defined function and a contract can't import it. @@ -417,7 +415,7 @@ mod tests { ext_println(_ctx, _ptr: u32, _len: u32) => { unreachable!(); }, ); - macro_rules! prepare_test { + macro_rules! prepare_test { ($name:ident, $wat:expr, $($expected:tt)*) => { #[test] fn $name() { @@ -429,8 +427,9 @@ mod tests { }; } - prepare_test!(no_floats, - r#" + prepare_test!( + no_floats, + r#" (module (func (export "call") (drop @@ -442,20 +441,21 @@ mod tests { ) (func (export "deploy")) )"#, - Err("gas instrumentation failed") - ); + Err("gas instrumentation failed") + ); - mod memories { - use super::*; + mod memories { + use super::*; - // Tests below assumes that maximum page number is configured to a certain number. - #[test] - fn assume_memory_size() { - assert_eq!(Schedule::default().max_memory_pages, 16); - } + // Tests below assumes that maximum page number is configured to a certain number. + #[test] + fn assume_memory_size() { + assert_eq!(Schedule::default().max_memory_pages, 16); + } - prepare_test!(memory_with_one_page, - r#" + prepare_test!( + memory_with_one_page, + r#" (module (import "env" "memory" (memory 1 1)) @@ -463,11 +463,12 @@ mod tests { (func (export "deploy")) ) "#, - Ok(_) - ); + Ok(_) + ); - prepare_test!(internal_memory_declaration, - r#" + prepare_test!( + internal_memory_declaration, + r#" (module (memory 1 1) @@ -475,22 +476,24 @@ mod tests { (func (export "deploy")) ) "#, - Err("module declares internal memory") - ); + Err("module declares internal memory") + ); - prepare_test!(no_memory_import, - r#" + prepare_test!( + no_memory_import, + r#" (module ;; no memory imported (func (export "call")) (func (export "deploy")) )"#, - Ok(_) - ); + Ok(_) + ); - prepare_test!(initial_exceeds_maximum, - r#" + prepare_test!( + initial_exceeds_maximum, + r#" (module (import "env" "memory" (memory 16 1)) @@ -498,11 +501,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("Module is not valid") - ); + Err("Module is not valid") + ); - prepare_test!(no_maximum, - r#" + prepare_test!( + no_maximum, + r#" (module (import "env" "memory" (memory 1)) @@ -510,11 +514,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("Maximum number of pages should be always declared.") - ); + Err("Maximum number of pages should be always declared.") + ); - prepare_test!(requested_maximum_exceeds_configured_maximum, - r#" + prepare_test!( + requested_maximum_exceeds_configured_maximum, + r#" (module (import "env" "memory" (memory 1 17)) @@ -522,11 +527,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("Maximum number of pages should not exceed the configured maximum.") - ); + Err("Maximum number of pages should not exceed the configured maximum.") + ); - prepare_test!(field_name_not_memory, - r#" + prepare_test!( + field_name_not_memory, + r#" (module (import "env" "forgetit" (memory 1 1)) @@ -534,11 +540,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("Memory import must have the field name 'memory'") - ); + Err("Memory import must have the field name 'memory'") + ); - prepare_test!(multiple_memory_imports, - r#" + prepare_test!( + multiple_memory_imports, + r#" (module (import "env" "memory" (memory 1 1)) (import "env" "memory" (memory 1 1)) @@ -547,11 +554,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("Module is not valid") - ); + Err("Module is not valid") + ); - prepare_test!(table_import, - r#" + prepare_test!( + table_import, + r#" (module (import "env" "table" (table 1 anyfunc)) @@ -559,42 +567,45 @@ mod tests { (func (export "deploy")) ) "#, - Err("Cannot import tables") - ); + Err("Cannot import tables") + ); - prepare_test!(global_import, - r#" + prepare_test!( + global_import, + r#" (module (global $g (import "env" "global") i32) (func (export "call")) (func (export "deploy")) ) "#, - Err("Cannot import globals") - ); - } - - mod tables { - use super::*; - - // Tests below assumes that maximum table size is configured to a certain number. - #[test] - fn assume_table_size() { - assert_eq!(Schedule::default().max_table_size, 16384); - } - - prepare_test!(no_tables, - r#" + Err("Cannot import globals") + ); + } + + mod tables { + use super::*; + + // Tests below assumes that maximum table size is configured to a certain number. + #[test] + fn assume_table_size() { + assert_eq!(Schedule::default().max_table_size, 16384); + } + + prepare_test!( + no_tables, + r#" (module (func (export "call")) (func (export "deploy")) ) "#, - Ok(_) - ); + Ok(_) + ); - prepare_test!(table_valid_size, - r#" + prepare_test!( + table_valid_size, + r#" (module (table 10000 funcref) @@ -602,26 +613,28 @@ mod tests { (func (export "deploy")) ) "#, - Ok(_) - ); + Ok(_) + ); - prepare_test!(table_too_big, - r#" + prepare_test!( + table_too_big, + r#" (module (table 20000 funcref) (func (export "call")) (func (export "deploy")) )"#, - Err("table exceeds maximum size allowed") - ); - } + Err("table exceeds maximum size allowed") + ); + } - mod imports { - use super::*; + mod imports { + use super::*; - prepare_test!(can_import_legit_function, - r#" + prepare_test!( + can_import_legit_function, + r#" (module (import "env" "nop" (func (param i64))) @@ -629,13 +642,14 @@ mod tests { (func (export "deploy")) ) "#, - Ok(_) - ); - - // even though gas is defined the contract can't import it since - // it is an implementation defined. - prepare_test!(can_not_import_gas_function, - r#" + Ok(_) + ); + + // even though gas is defined the contract can't import it since + // it is an implementation defined. + prepare_test!( + can_not_import_gas_function, + r#" (module (import "env" "gas" (func (param i32))) @@ -643,12 +657,13 @@ mod tests { (func (export "deploy")) ) "#, - Err("module imports a non-existent function") - ); + Err("module imports a non-existent function") + ); - // nothing can be imported from non-"env" module for now. - prepare_test!(non_env_import, - r#" + // nothing can be imported from non-"env" module for now. + prepare_test!( + non_env_import, + r#" (module (import "another_module" "memory" (memory 1 1)) @@ -656,12 +671,13 @@ mod tests { (func (export "deploy")) ) "#, - Err("module has imports from a non-'env' namespace") - ); + Err("module has imports from a non-'env' namespace") + ); - // wrong signature - prepare_test!(wrong_signature, - r#" + // wrong signature + prepare_test!( + wrong_signature, + r#" (module (import "env" "gas" (func (param i64))) @@ -669,11 +685,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("module imports a non-existent function") - ); + Err("module imports a non-existent function") + ); - prepare_test!(unknown_func_name, - r#" + prepare_test!( + unknown_func_name, + r#" (module (import "env" "unknown_func" (func)) @@ -681,11 +698,12 @@ mod tests { (func (export "deploy")) ) "#, - Err("module imports a non-existent function") - ); + Err("module imports a non-existent function") + ); - prepare_test!(ext_println_debug_disabled, - r#" + prepare_test!( + ext_println_debug_disabled, + r#" (module (import "env" "ext_println" (func $ext_println (param i32 i32))) @@ -693,62 +711,69 @@ mod tests { (func (export "deploy")) ) "#, - Err("module imports `ext_println` but debug features disabled") - ); - - #[test] - fn ext_println_debug_enabled() { - let wasm = wabt::Wat2Wasm::new().validate(false).convert( - r#" + Err("module imports `ext_println` but debug features disabled") + ); + + #[test] + fn ext_println_debug_enabled() { + let wasm = wabt::Wat2Wasm::new() + .validate(false) + .convert( + r#" (module (import "env" "ext_println" (func $ext_println (param i32 i32))) (func (export "call")) (func (export "deploy")) ) - "# - ).unwrap(); - let mut schedule = Schedule::default(); - schedule.enable_println = true; - let r = prepare_contract::(wasm.as_ref(), &schedule); - assert_matches!(r, Ok(_)); - } - } - - mod entrypoints { - use super::*; - - prepare_test!(it_works, - r#" + "#, + ) + .unwrap(); + let mut schedule = Schedule::default(); + schedule.enable_println = true; + let r = prepare_contract::(wasm.as_ref(), &schedule); + assert_matches!(r, Ok(_)); + } + } + + mod entrypoints { + use super::*; + + prepare_test!( + it_works, + r#" (module (func (export "call")) (func (export "deploy")) ) "#, - Ok(_) - ); + Ok(_) + ); - prepare_test!(omit_deploy, - r#" + prepare_test!( + omit_deploy, + r#" (module (func (export "call")) ) "#, - Err("deploy function isn't exported") - ); + Err("deploy function isn't exported") + ); - prepare_test!(omit_call, - r#" + prepare_test!( + omit_call, + r#" (module (func (export "deploy")) ) "#, - Err("call function isn't exported") - ); + Err("call function isn't exported") + ); - // Try to use imported function as an entry point. - prepare_test!(try_sneak_export_as_entrypoint, - r#" + // Try to use imported function as an entry point. + prepare_test!( + try_sneak_export_as_entrypoint, + r#" (module (import "env" "panic" (func)) @@ -757,83 +782,90 @@ mod tests { (export "call" (func 0)) ) "#, - Err("entry point points to an imported function") - ); + Err("entry point points to an imported function") + ); - // Try to use imported function as an entry point. - prepare_test!(try_sneak_export_as_global, - r#" + // Try to use imported function as an entry point. + prepare_test!( + try_sneak_export_as_global, + r#" (module (func (export "deploy")) (global (export "call") i32 (i32.const 0)) ) "#, - Err("expected a function") - ); + Err("expected a function") + ); - prepare_test!(wrong_signature, - r#" + prepare_test!( + wrong_signature, + r#" (module (func (export "deploy")) (func (export "call") (param i32)) ) "#, - Err("entry point has wrong signature") - ); + Err("entry point has wrong signature") + ); - prepare_test!(unknown_exports, - r#" + prepare_test!( + unknown_exports, + r#" (module (func (export "call")) (func (export "deploy")) (func (export "whatevs")) ) "#, - Err("unknown export: expecting only deploy and call functions") - ); + Err("unknown export: expecting only deploy and call functions") + ); - prepare_test!(global_float, - r#" + prepare_test!( + global_float, + r#" (module (global $x f32 (f32.const 0)) (func (export "call")) (func (export "deploy")) ) "#, - Err("use of floating point type in globals is forbidden") - ); + Err("use of floating point type in globals is forbidden") + ); - prepare_test!(local_float, - r#" + prepare_test!( + local_float, + r#" (module (func $foo (local f32)) (func (export "call")) (func (export "deploy")) ) "#, - Err("use of floating point type in locals is forbidden") - ); + Err("use of floating point type in locals is forbidden") + ); - prepare_test!(param_float, - r#" + prepare_test!( + param_float, + r#" (module (func $foo (param f32)) (func (export "call")) (func (export "deploy")) ) "#, - Err("use of floating point type in function types is forbidden") - ); + Err("use of floating point type in function types is forbidden") + ); - prepare_test!(result_float, - r#" + prepare_test!( + result_float, + r#" (module (func $foo (result f32) (f32.const 0)) (func (export "call")) (func (export "deploy")) ) "#, - Err("use of floating point type in function types is forbidden") - ); - } + Err("use of floating point type in function types is forbidden") + ); + } } diff --git a/frame/contracts/src/wasm/runtime.rs b/frame/contracts/src/wasm/runtime.rs index 7cede5542f..8274c9fdfe 100644 --- a/frame/contracts/src/wasm/runtime.rs +++ b/frame/contracts/src/wasm/runtime.rs @@ -16,22 +16,17 @@ //! Environment definition of the wasm smart-contract runtime. -use crate::{Schedule, Trait, CodeHash, ComputeDispatchFee, BalanceOf}; use crate::exec::{ - Ext, ExecResult, ExecError, ExecReturnValue, StorageKey, TopicOf, STATUS_SUCCESS, + ExecError, ExecResult, ExecReturnValue, Ext, StorageKey, TopicOf, STATUS_SUCCESS, }; -use crate::gas::{Gas, GasMeter, Token, GasMeterResult, approx_gas_for_balance}; -use sp_sandbox; -use frame_system; -use sp_std::{prelude::*, mem, convert::TryInto}; +use crate::gas::{approx_gas_for_balance, Gas, GasMeter, GasMeterResult, Token}; +use crate::{BalanceOf, CodeHash, ComputeDispatchFee, Schedule, Trait}; use codec::{Decode, Encode}; +use frame_system; +use sp_io::hashing::{blake2_128, blake2_256, keccak_256, sha2_256}; use sp_runtime::traits::{Bounded, SaturatedConversion}; -use sp_io::hashing::{ - keccak_256, - blake2_256, - blake2_128, - sha2_256, -}; +use sp_sandbox; +use sp_std::{convert::TryInto, mem, prelude::*}; /// The value returned from ext_call and ext_instantiate contract external functions if the call or /// instantiation traps. This value is chosen as if the execution does not trap, the return value @@ -43,181 +38,193 @@ const TRAP_RETURN_CODE: u32 = 0x0100; /// In this runtime traps used not only for signaling about errors but also /// to just terminate quickly in some cases. enum SpecialTrap { - /// Signals that trap was generated in response to call `ext_return` host function. - Return(Vec), - /// Signals that trap was generated because the contract exhausted its gas limit. - OutOfGas, - /// Signals that a trap was generated in response to a succesful call to the - /// `ext_terminate` host function. - Termination, + /// Signals that trap was generated in response to call `ext_return` host function. + Return(Vec), + /// Signals that trap was generated because the contract exhausted its gas limit. + OutOfGas, + /// Signals that a trap was generated in response to a succesful call to the + /// `ext_terminate` host function. + Termination, } /// Can only be used for one call. pub(crate) struct Runtime<'a, E: Ext + 'a> { - ext: &'a mut E, - scratch_buf: Vec, - schedule: &'a Schedule, - memory: sp_sandbox::Memory, - gas_meter: &'a mut GasMeter, - special_trap: Option, + ext: &'a mut E, + scratch_buf: Vec, + schedule: &'a Schedule, + memory: sp_sandbox::Memory, + gas_meter: &'a mut GasMeter, + special_trap: Option, } impl<'a, E: Ext + 'a> Runtime<'a, E> { - pub(crate) fn new( - ext: &'a mut E, - input_data: Vec, - schedule: &'a Schedule, - memory: sp_sandbox::Memory, - gas_meter: &'a mut GasMeter, - ) -> Self { - Runtime { - ext, - // Put the input data into the scratch buffer immediately. - scratch_buf: input_data, - schedule, - memory, - gas_meter, - special_trap: None, - } - } + pub(crate) fn new( + ext: &'a mut E, + input_data: Vec, + schedule: &'a Schedule, + memory: sp_sandbox::Memory, + gas_meter: &'a mut GasMeter, + ) -> Self { + Runtime { + ext, + // Put the input data into the scratch buffer immediately. + scratch_buf: input_data, + schedule, + memory, + gas_meter, + special_trap: None, + } + } } pub(crate) fn to_execution_result( - runtime: Runtime, - sandbox_result: Result, + runtime: Runtime, + sandbox_result: Result, ) -> ExecResult { - match runtime.special_trap { - // The trap was the result of the execution `return` host function. - Some(SpecialTrap::Return(data)) => { - return Ok(ExecReturnValue { - status: STATUS_SUCCESS, - data, - }) - }, - Some(SpecialTrap::Termination) => { - return Ok(ExecReturnValue { - status: STATUS_SUCCESS, - data: Vec::new(), - }) - }, - Some(SpecialTrap::OutOfGas) => { - return Err(ExecError { - reason: "ran out of gas during contract execution".into(), - buffer: runtime.scratch_buf, - }) - }, - None => (), - } - - // Check the exact type of the error. - match sandbox_result { - // No traps were generated. Proceed normally. - Ok(sp_sandbox::ReturnValue::Unit) => { - let mut buffer = runtime.scratch_buf; - buffer.clear(); - Ok(ExecReturnValue { status: STATUS_SUCCESS, data: buffer }) - } - Ok(sp_sandbox::ReturnValue::Value(sp_sandbox::Value::I32(exit_code))) => { - let status = (exit_code & 0xFF).try_into() - .expect("exit_code is masked into the range of a u8; qed"); - Ok(ExecReturnValue { status, data: runtime.scratch_buf }) - } - // This should never happen as the return type of exported functions should have been - // validated by the code preparation process. However, because panics are really - // undesirable in the runtime code, we treat this as a trap for now. Eventually, we might - // want to revisit this. - Ok(_) => Err(ExecError { reason: "return type error".into(), buffer: runtime.scratch_buf }), - // `Error::Module` is returned only if instantiation or linking failed (i.e. - // wasm binary tried to import a function that is not provided by the host). - // This shouldn't happen because validation process ought to reject such binaries. - // - // Because panics are really undesirable in the runtime code, we treat this as - // a trap for now. Eventually, we might want to revisit this. - Err(sp_sandbox::Error::Module) => - Err(ExecError { reason: "validation error".into(), buffer: runtime.scratch_buf }), - // Any other kind of a trap should result in a failure. - Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => - Err(ExecError { reason: "contract trapped during execution".into(), buffer: runtime.scratch_buf }), - } + match runtime.special_trap { + // The trap was the result of the execution `return` host function. + Some(SpecialTrap::Return(data)) => { + return Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data, + }) + } + Some(SpecialTrap::Termination) => { + return Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: Vec::new(), + }) + } + Some(SpecialTrap::OutOfGas) => { + return Err(ExecError { + reason: "ran out of gas during contract execution".into(), + buffer: runtime.scratch_buf, + }) + } + None => (), + } + + // Check the exact type of the error. + match sandbox_result { + // No traps were generated. Proceed normally. + Ok(sp_sandbox::ReturnValue::Unit) => { + let mut buffer = runtime.scratch_buf; + buffer.clear(); + Ok(ExecReturnValue { + status: STATUS_SUCCESS, + data: buffer, + }) + } + Ok(sp_sandbox::ReturnValue::Value(sp_sandbox::Value::I32(exit_code))) => { + let status = (exit_code & 0xFF) + .try_into() + .expect("exit_code is masked into the range of a u8; qed"); + Ok(ExecReturnValue { + status, + data: runtime.scratch_buf, + }) + } + // This should never happen as the return type of exported functions should have been + // validated by the code preparation process. However, because panics are really + // undesirable in the runtime code, we treat this as a trap for now. Eventually, we might + // want to revisit this. + Ok(_) => Err(ExecError { + reason: "return type error".into(), + buffer: runtime.scratch_buf, + }), + // `Error::Module` is returned only if instantiation or linking failed (i.e. + // wasm binary tried to import a function that is not provided by the host). + // This shouldn't happen because validation process ought to reject such binaries. + // + // Because panics are really undesirable in the runtime code, we treat this as + // a trap for now. Eventually, we might want to revisit this. + Err(sp_sandbox::Error::Module) => Err(ExecError { + reason: "validation error".into(), + buffer: runtime.scratch_buf, + }), + // Any other kind of a trap should result in a failure. + Err(sp_sandbox::Error::Execution) | Err(sp_sandbox::Error::OutOfBounds) => Err(ExecError { + reason: "contract trapped during execution".into(), + buffer: runtime.scratch_buf, + }), + } } #[cfg_attr(test, derive(Debug, PartialEq, Eq))] #[derive(Copy, Clone)] pub enum RuntimeToken { - /// Explicit call to the `gas` function. Charge the gas meter - /// with the value provided. - Explicit(u32), - /// The given number of bytes is read from the sandbox memory. - ReadMemory(u32), - /// The given number of bytes is written to the sandbox memory. - WriteMemory(u32), - /// The given number of bytes is read from the sandbox memory and - /// is returned as the return data buffer of the call. - ReturnData(u32), - /// Dispatch fee calculated by `T::ComputeDispatchFee`. - ComputedDispatchFee(Gas), - /// (topic_count, data_bytes): A buffer of the given size is posted as an event indexed with the - /// given number of topics. - DepositEvent(u32, u32), + /// Explicit call to the `gas` function. Charge the gas meter + /// with the value provided. + Explicit(u32), + /// The given number of bytes is read from the sandbox memory. + ReadMemory(u32), + /// The given number of bytes is written to the sandbox memory. + WriteMemory(u32), + /// The given number of bytes is read from the sandbox memory and + /// is returned as the return data buffer of the call. + ReturnData(u32), + /// Dispatch fee calculated by `T::ComputeDispatchFee`. + ComputedDispatchFee(Gas), + /// (topic_count, data_bytes): A buffer of the given size is posted as an event indexed with the + /// given number of topics. + DepositEvent(u32, u32), } impl Token for RuntimeToken { - type Metadata = Schedule; - - fn calculate_amount(&self, metadata: &Schedule) -> Gas { - use self::RuntimeToken::*; - let value = match *self { - Explicit(amount) => Some(amount.into()), - ReadMemory(byte_count) => metadata - .sandbox_data_read_cost - .checked_mul(byte_count.into()), - WriteMemory(byte_count) => metadata - .sandbox_data_write_cost - .checked_mul(byte_count.into()), - ReturnData(byte_count) => metadata - .return_data_per_byte_cost - .checked_mul(byte_count.into()), - DepositEvent(topic_count, data_byte_count) => { - let data_cost = metadata - .event_data_per_byte_cost - .checked_mul(data_byte_count.into()); - - let topics_cost = metadata - .event_per_topic_cost - .checked_mul(topic_count.into()); - - data_cost - .and_then(|data_cost| { - topics_cost.and_then(|topics_cost| { - data_cost.checked_add(topics_cost) - }) - }) - .and_then(|data_and_topics_cost| - data_and_topics_cost.checked_add(metadata.event_base_cost) - ) - }, - ComputedDispatchFee(gas) => Some(gas), - }; - - value.unwrap_or_else(|| Bounded::max_value()) - } + type Metadata = Schedule; + + fn calculate_amount(&self, metadata: &Schedule) -> Gas { + use self::RuntimeToken::*; + let value = match *self { + Explicit(amount) => Some(amount.into()), + ReadMemory(byte_count) => metadata + .sandbox_data_read_cost + .checked_mul(byte_count.into()), + WriteMemory(byte_count) => metadata + .sandbox_data_write_cost + .checked_mul(byte_count.into()), + ReturnData(byte_count) => metadata + .return_data_per_byte_cost + .checked_mul(byte_count.into()), + DepositEvent(topic_count, data_byte_count) => { + let data_cost = metadata + .event_data_per_byte_cost + .checked_mul(data_byte_count.into()); + + let topics_cost = metadata + .event_per_topic_cost + .checked_mul(topic_count.into()); + + data_cost + .and_then(|data_cost| { + topics_cost.and_then(|topics_cost| data_cost.checked_add(topics_cost)) + }) + .and_then(|data_and_topics_cost| { + data_and_topics_cost.checked_add(metadata.event_base_cost) + }) + } + ComputedDispatchFee(gas) => Some(gas), + }; + + value.unwrap_or_else(|| Bounded::max_value()) + } } /// Charge the gas meter with the specified token. /// /// Returns `Err(HostError)` if there is not enough gas. fn charge_gas>( - gas_meter: &mut GasMeter, - metadata: &Tok::Metadata, - special_trap: &mut Option, - token: Tok, + gas_meter: &mut GasMeter, + metadata: &Tok::Metadata, + special_trap: &mut Option, + token: Tok, ) -> Result<(), sp_sandbox::HostError> { - match gas_meter.charge(metadata, token) { - GasMeterResult::Proceed => Ok(()), - GasMeterResult::OutOfGas => { - *special_trap = Some(SpecialTrap::OutOfGas); - Err(sp_sandbox::HostError) - }, - } + match gas_meter.charge(metadata, token) { + GasMeterResult::Proceed => Ok(()), + GasMeterResult::OutOfGas => { + *special_trap = Some(SpecialTrap::OutOfGas); + Err(sp_sandbox::HostError) + } + } } /// Read designated chunk from the sandbox memory, consuming an appropriate amount of @@ -229,20 +236,22 @@ fn charge_gas>( /// - out of gas /// - requested buffer is not within the bounds of the sandbox memory. fn read_sandbox_memory( - ctx: &mut Runtime, - ptr: u32, - len: u32, + ctx: &mut Runtime, + ptr: u32, + len: u32, ) -> Result, sp_sandbox::HostError> { - charge_gas( - ctx.gas_meter, - ctx.schedule, - &mut ctx.special_trap, - RuntimeToken::ReadMemory(len), - )?; - - let mut buf = vec![0u8; len as usize]; - ctx.memory.get(ptr, buf.as_mut_slice()).map_err(|_| sp_sandbox::HostError)?; - Ok(buf) + charge_gas( + ctx.gas_meter, + ctx.schedule, + &mut ctx.special_trap, + RuntimeToken::ReadMemory(len), + )?; + + let mut buf = vec![0u8; len as usize]; + ctx.memory + .get(ptr, buf.as_mut_slice()) + .map_err(|_| sp_sandbox::HostError)?; + Ok(buf) } /// Read designated chunk from the sandbox memory into the scratch buffer, consuming an @@ -254,20 +263,22 @@ fn read_sandbox_memory( /// - out of gas /// - requested buffer is not within the bounds of the sandbox memory. fn read_sandbox_memory_into_scratch( - ctx: &mut Runtime, - ptr: u32, - len: u32, + ctx: &mut Runtime, + ptr: u32, + len: u32, ) -> Result<(), sp_sandbox::HostError> { - charge_gas( - ctx.gas_meter, - ctx.schedule, - &mut ctx.special_trap, - RuntimeToken::ReadMemory(len), - )?; - - ctx.scratch_buf.resize(len as usize, 0); - ctx.memory.get(ptr, ctx.scratch_buf.as_mut_slice()).map_err(|_| sp_sandbox::HostError)?; - Ok(()) + charge_gas( + ctx.gas_meter, + ctx.schedule, + &mut ctx.special_trap, + RuntimeToken::ReadMemory(len), + )?; + + ctx.scratch_buf.resize(len as usize, 0); + ctx.memory + .get(ptr, ctx.scratch_buf.as_mut_slice()) + .map_err(|_| sp_sandbox::HostError)?; + Ok(()) } /// Read designated chunk from the sandbox memory into the supplied buffer, consuming @@ -279,18 +290,18 @@ fn read_sandbox_memory_into_scratch( /// - out of gas /// - requested buffer is not within the bounds of the sandbox memory. fn read_sandbox_memory_into_buf( - ctx: &mut Runtime, - ptr: u32, - buf: &mut [u8], + ctx: &mut Runtime, + ptr: u32, + buf: &mut [u8], ) -> Result<(), sp_sandbox::HostError> { - charge_gas( - ctx.gas_meter, - ctx.schedule, - &mut ctx.special_trap, - RuntimeToken::ReadMemory(buf.len() as u32), - )?; - - ctx.memory.get(ptr, buf).map_err(Into::into) + charge_gas( + ctx.gas_meter, + ctx.schedule, + &mut ctx.special_trap, + RuntimeToken::ReadMemory(buf.len() as u32), + )?; + + ctx.memory.get(ptr, buf).map_err(Into::into) } /// Read designated chunk from the sandbox memory, consuming an appropriate amount of @@ -303,12 +314,12 @@ fn read_sandbox_memory_into_buf( /// - requested buffer is not within the bounds of the sandbox memory. /// - the buffer contents cannot be decoded as the required type. fn read_sandbox_memory_as( - ctx: &mut Runtime, - ptr: u32, - len: u32, + ctx: &mut Runtime, + ptr: u32, + len: u32, ) -> Result { - let buf = read_sandbox_memory(ctx, ptr, len)?; - D::decode(&mut &buf[..]).map_err(|_| sp_sandbox::HostError) + let buf = read_sandbox_memory(ctx, ptr, len)?; + D::decode(&mut &buf[..]).map_err(|_| sp_sandbox::HostError) } /// Write the given buffer to the designated location in the sandbox memory, consuming @@ -320,23 +331,23 @@ fn read_sandbox_memory_as( /// - out of gas /// - designated area is not within the bounds of the sandbox memory. fn write_sandbox_memory( - schedule: &Schedule, - special_trap: &mut Option, - gas_meter: &mut GasMeter, - memory: &sp_sandbox::Memory, - ptr: u32, - buf: &[u8], + schedule: &Schedule, + special_trap: &mut Option, + gas_meter: &mut GasMeter, + memory: &sp_sandbox::Memory, + ptr: u32, + buf: &[u8], ) -> Result<(), sp_sandbox::HostError> { - charge_gas( - gas_meter, - schedule, - special_trap, - RuntimeToken::WriteMemory(buf.len() as u32), - )?; + charge_gas( + gas_meter, + schedule, + special_trap, + RuntimeToken::WriteMemory(buf.len() as u32), + )?; - memory.set(ptr, buf)?; + memory.set(ptr, buf)?; - Ok(()) + Ok(()) } // *********************************************************** @@ -1130,32 +1141,32 @@ define_env!(Env, , /// /// The `input` and `output` buffers may overlap. fn compute_hash_on_intermediate_buffer( - ctx: &mut Runtime, - hash_fn: F, - input_ptr: u32, - input_len: u32, - output_ptr: u32, + ctx: &mut Runtime, + hash_fn: F, + input_ptr: u32, + input_len: u32, + output_ptr: u32, ) -> Result<(), sp_sandbox::HostError> where - E: Ext, - F: FnOnce(&[u8]) -> R, - R: AsRef<[u8]>, + E: Ext, + F: FnOnce(&[u8]) -> R, + R: AsRef<[u8]>, { - // Copy the input buffer directly into the scratch buffer to avoid - // heap allocations. - let input = read_sandbox_memory(ctx, input_ptr, input_len)?; - // Compute the hash on the scratch buffer using the given hash function. - let hash = hash_fn(&input); - // Write the resulting hash back into the sandboxed output buffer. - write_sandbox_memory( - ctx.schedule, - &mut ctx.special_trap, - ctx.gas_meter, - &ctx.memory, - output_ptr, - hash.as_ref(), - )?; - Ok(()) + // Copy the input buffer directly into the scratch buffer to avoid + // heap allocations. + let input = read_sandbox_memory(ctx, input_ptr, input_len)?; + // Compute the hash on the scratch buffer using the given hash function. + let hash = hash_fn(&input); + // Write the resulting hash back into the sandboxed output buffer. + write_sandbox_memory( + ctx.schedule, + &mut ctx.special_trap, + ctx.gas_meter, + &ctx.memory, + output_ptr, + hash.as_ref(), + )?; + Ok(()) } /// Finds duplicates in a given vector. @@ -1163,15 +1174,11 @@ where /// This function has complexity of O(n log n) and no additional memory is required, although /// the order of items is not preserved. fn has_duplicates>(items: &mut Vec) -> bool { - // Sort the vector - items.sort_unstable_by(|a, b| { - Ord::cmp(a.as_ref(), b.as_ref()) - }); - // And then find any two consecutive equal elements. - items.windows(2).any(|w| { - match w { - &[ref a, ref b] => a == b, - _ => false, - } - }) + // Sort the vector + items.sort_unstable_by(|a, b| Ord::cmp(a.as_ref(), b.as_ref())); + // And then find any two consecutive equal elements. + items.windows(2).any(|w| match w { + &[ref a, ref b] => a == b, + _ => false, + }) } diff --git a/frame/democracy/src/benchmarking.rs b/frame/democracy/src/benchmarking.rs index 6165a4f897..a2ec599ec0 100644 --- a/frame/democracy/src/benchmarking.rs +++ b/frame/democracy/src/benchmarking.rs @@ -18,9 +18,9 @@ use super::*; -use frame_benchmarking::{benchmarks, account}; -use frame_support::traits::{Currency, Get, EnsureOrigin}; -use frame_system::{RawOrigin, Module as System, self}; +use frame_benchmarking::{account, benchmarks}; +use frame_support::traits::{Currency, EnsureOrigin, Get}; +use frame_system::{self, Module as System, RawOrigin}; use sp_runtime::traits::{Bounded, One}; use crate::Module as Democracy; @@ -34,455 +34,450 @@ const MAX_VETOERS: u32 = 100; const MAX_BYTES: u32 = 16_384; fn funded_account(name: &'static str, index: u32) -> T::AccountId { - let caller: T::AccountId = account(name, index, SEED); - T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - caller + let caller: T::AccountId = account(name, index, SEED); + T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + caller } fn add_proposal(n: u32) -> Result { - let other = funded_account::("proposer", n); - let value = T::MinimumDeposit::get(); - let proposal_hash: T::Hash = T::Hashing::hash_of(&n); + let other = funded_account::("proposer", n); + let value = T::MinimumDeposit::get(); + let proposal_hash: T::Hash = T::Hashing::hash_of(&n); - Democracy::::propose(RawOrigin::Signed(other).into(), proposal_hash, value.into())?; + Democracy::::propose(RawOrigin::Signed(other).into(), proposal_hash, value.into())?; - Ok(proposal_hash) + Ok(proposal_hash) } fn add_referendum(n: u32) -> Result { - let proposal_hash = add_proposal::(n)?; - let vote_threshold = VoteThreshold::SimpleMajority; - - Democracy::::inject_referendum( - 0.into(), - proposal_hash, - vote_threshold, - 0.into(), - ); - let referendum_index: ReferendumIndex = ReferendumCount::get() - 1; - let _ = T::Scheduler::schedule_named( - (DEMOCRACY_ID, referendum_index), - 0.into(), - None, - 63, - Call::enact_proposal(proposal_hash, referendum_index).into(), - ); - Ok(referendum_index) + let proposal_hash = add_proposal::(n)?; + let vote_threshold = VoteThreshold::SimpleMajority; + + Democracy::::inject_referendum(0.into(), proposal_hash, vote_threshold, 0.into()); + let referendum_index: ReferendumIndex = ReferendumCount::get() - 1; + let _ = T::Scheduler::schedule_named( + (DEMOCRACY_ID, referendum_index), + 0.into(), + None, + 63, + Call::enact_proposal(proposal_hash, referendum_index).into(), + ); + Ok(referendum_index) } fn account_vote() -> AccountVote> { - let v = Vote { - aye: true, - conviction: Conviction::Locked1x, - }; - - AccountVote::Standard { - vote: v, - balance: BalanceOf::::one(), - } + let v = Vote { + aye: true, + conviction: Conviction::Locked1x, + }; + + AccountVote::Standard { + vote: v, + balance: BalanceOf::::one(), + } } fn open_activate_proxy(u: u32) -> Result { - let caller = funded_account::("caller", u); - let proxy = funded_account::("proxy", u); + let caller = funded_account::("caller", u); + let proxy = funded_account::("proxy", u); - Democracy::::open_proxy(RawOrigin::Signed(proxy.clone()).into(), caller.clone())?; - Democracy::::activate_proxy(RawOrigin::Signed(caller).into(), proxy.clone())?; + Democracy::::open_proxy(RawOrigin::Signed(proxy.clone()).into(), caller.clone())?; + Democracy::::activate_proxy(RawOrigin::Signed(caller).into(), proxy.clone())?; - Ok(proxy) + Ok(proxy) } benchmarks! { - _ { } + _ { } - propose { - let p in 1 .. MAX_PROPOSALS; + propose { + let p in 1 .. MAX_PROPOSALS; - // Add p proposals - for i in 0 .. p { - add_proposal::(i)?; - } + // Add p proposals + for i in 0 .. p { + add_proposal::(i)?; + } - let caller = funded_account::("caller", 0); - let proposal_hash: T::Hash = T::Hashing::hash_of(&p); - let value = T::MinimumDeposit::get(); - }: _(RawOrigin::Signed(caller), proposal_hash, value.into()) + let caller = funded_account::("caller", 0); + let proposal_hash: T::Hash = T::Hashing::hash_of(&p); + let value = T::MinimumDeposit::get(); + }: _(RawOrigin::Signed(caller), proposal_hash, value.into()) - second { - let s in 0 .. MAX_SECONDERS; + second { + let s in 0 .. MAX_SECONDERS; - let caller = funded_account::("caller", 0); - let proposal_hash = add_proposal::(s)?; + let caller = funded_account::("caller", 0); + let proposal_hash = add_proposal::(s)?; - // Create s existing "seconds" - for i in 0 .. s { - let seconder = funded_account::("seconder", i); - Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; - } + // Create s existing "seconds" + for i in 0 .. s { + let seconder = funded_account::("seconder", i); + Democracy::::second(RawOrigin::Signed(seconder).into(), 0)?; + } - }: _(RawOrigin::Signed(caller), 0) + }: _(RawOrigin::Signed(caller), 0) - vote { - let r in 1 .. MAX_REFERENDUMS; + vote { + let r in 1 .. MAX_REFERENDUMS; - let caller = funded_account::("caller", 0); - let account_vote = account_vote::(); + let caller = funded_account::("caller", 0); + let account_vote = account_vote::(); - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; - } + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; + } - let referendum_index = r - 1; + let referendum_index = r - 1; - }: _(RawOrigin::Signed(caller), referendum_index, account_vote) + }: _(RawOrigin::Signed(caller), referendum_index, account_vote) - proxy_vote { - let r in 1 .. MAX_REFERENDUMS; + proxy_vote { + let r in 1 .. MAX_REFERENDUMS; - let caller = funded_account::("caller", r); - let proxy = open_activate_proxy::(r)?; - let account_vote = account_vote::(); + let caller = funded_account::("caller", r); + let proxy = open_activate_proxy::(r)?; + let account_vote = account_vote::(); - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; - } + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; + } - let referendum_index = r - 1; + let referendum_index = r - 1; - }: _(RawOrigin::Signed(proxy), referendum_index, account_vote) + }: _(RawOrigin::Signed(proxy), referendum_index, account_vote) - emergency_cancel { - let u in 1 .. MAX_USERS; + emergency_cancel { + let u in 1 .. MAX_USERS; - let referendum_index = add_referendum::(u)?; - let origin = T::CancellationOrigin::successful_origin(); - let call = Call::::emergency_cancel(referendum_index); - }: { - let _ = call.dispatch(origin)?; - } + let referendum_index = add_referendum::(u)?; + let origin = T::CancellationOrigin::successful_origin(); + let call = Call::::emergency_cancel(referendum_index); + }: { + let _ = call.dispatch(origin)?; + } - external_propose { - let u in 1 .. MAX_USERS; + external_propose { + let u in 1 .. MAX_USERS; - let origin = T::ExternalOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&u); - let call = Call::::external_propose(proposal_hash); - }: { - let _ = call.dispatch(origin)?; - } + let origin = T::ExternalOrigin::successful_origin(); + let proposal_hash = T::Hashing::hash_of(&u); + let call = Call::::external_propose(proposal_hash); + }: { + let _ = call.dispatch(origin)?; + } - external_propose_majority { - let u in 1 .. MAX_USERS; + external_propose_majority { + let u in 1 .. MAX_USERS; - let origin = T::ExternalMajorityOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&u); - let call = Call::::external_propose_majority(proposal_hash); + let origin = T::ExternalMajorityOrigin::successful_origin(); + let proposal_hash = T::Hashing::hash_of(&u); + let call = Call::::external_propose_majority(proposal_hash); - }: { - let _ = call.dispatch(origin)?; - } + }: { + let _ = call.dispatch(origin)?; + } - external_propose_default { - let u in 1 .. MAX_USERS; + external_propose_default { + let u in 1 .. MAX_USERS; - let origin = T::ExternalDefaultOrigin::successful_origin(); - let proposal_hash = T::Hashing::hash_of(&u); - let call = Call::::external_propose_default(proposal_hash); + let origin = T::ExternalDefaultOrigin::successful_origin(); + let proposal_hash = T::Hashing::hash_of(&u); + let call = Call::::external_propose_default(proposal_hash); - }: { - let _ = call.dispatch(origin)?; - } + }: { + let _ = call.dispatch(origin)?; + } - fast_track { - let u in 1 .. MAX_USERS; + fast_track { + let u in 1 .. MAX_USERS; - let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - let proposal_hash: T::Hash = T::Hashing::hash_of(&u); - Democracy::::external_propose_default(origin_propose, proposal_hash.clone())?; + let origin_propose = T::ExternalDefaultOrigin::successful_origin(); + let proposal_hash: T::Hash = T::Hashing::hash_of(&u); + Democracy::::external_propose_default(origin_propose, proposal_hash.clone())?; - let origin_fast_track = T::FastTrackOrigin::successful_origin(); - let voting_period = T::FastTrackVotingPeriod::get(); - let delay = 0; - let call = Call::::fast_track(proposal_hash, voting_period.into(), delay.into()); + let origin_fast_track = T::FastTrackOrigin::successful_origin(); + let voting_period = T::FastTrackVotingPeriod::get(); + let delay = 0; + let call = Call::::fast_track(proposal_hash, voting_period.into(), delay.into()); - }: { - let _ = call.dispatch(origin_fast_track)?; - } + }: { + let _ = call.dispatch(origin_fast_track)?; + } - veto_external { - // Existing veto-ers - let v in 0 .. MAX_VETOERS; + veto_external { + // Existing veto-ers + let v in 0 .. MAX_VETOERS; - let proposal_hash: T::Hash = T::Hashing::hash_of(&v); + let proposal_hash: T::Hash = T::Hashing::hash_of(&v); - let origin_propose = T::ExternalDefaultOrigin::successful_origin(); - Democracy::::external_propose_default(origin_propose, proposal_hash.clone())?; + let origin_propose = T::ExternalDefaultOrigin::successful_origin(); + Democracy::::external_propose_default(origin_propose, proposal_hash.clone())?; - let mut vetoers: Vec = Vec::new(); - for i in 0 .. v { - vetoers.push(account("vetoer", i, SEED)); - } - Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); + let mut vetoers: Vec = Vec::new(); + for i in 0 .. v { + vetoers.push(account("vetoer", i, SEED)); + } + Blacklist::::insert(proposal_hash, (T::BlockNumber::zero(), vetoers)); - let call = Call::::veto_external(proposal_hash); - let origin = T::VetoOrigin::successful_origin(); - }: { - let _ = call.dispatch(origin)?; - } + let call = Call::::veto_external(proposal_hash); + let origin = T::VetoOrigin::successful_origin(); + }: { + let _ = call.dispatch(origin)?; + } - cancel_referendum { - let u in 1 .. MAX_USERS; + cancel_referendum { + let u in 1 .. MAX_USERS; - let referendum_index = add_referendum::(u)?; - }: _(RawOrigin::Root, referendum_index) + let referendum_index = add_referendum::(u)?; + }: _(RawOrigin::Root, referendum_index) - cancel_queued { - let u in 1 .. MAX_USERS; + cancel_queued { + let u in 1 .. MAX_USERS; - let referendum_index = add_referendum::(u)?; - }: _(RawOrigin::Root, referendum_index) + let referendum_index = add_referendum::(u)?; + }: _(RawOrigin::Root, referendum_index) - open_proxy { - let u in 1 .. MAX_USERS; + open_proxy { + let u in 1 .. MAX_USERS; - let caller: T::AccountId = funded_account::("caller", u); - let proxy: T::AccountId = funded_account::("proxy", u); + let caller: T::AccountId = funded_account::("caller", u); + let proxy: T::AccountId = funded_account::("proxy", u); - }: _(RawOrigin::Signed(proxy), caller) + }: _(RawOrigin::Signed(proxy), caller) - activate_proxy { - let u in 1 .. MAX_USERS; + activate_proxy { + let u in 1 .. MAX_USERS; - let caller: T::AccountId = funded_account::("caller", u); - let proxy: T::AccountId = funded_account::("proxy", u); - Democracy::::open_proxy(RawOrigin::Signed(proxy.clone()).into(), caller.clone())?; + let caller: T::AccountId = funded_account::("caller", u); + let proxy: T::AccountId = funded_account::("proxy", u); + Democracy::::open_proxy(RawOrigin::Signed(proxy.clone()).into(), caller.clone())?; - }: _(RawOrigin::Signed(caller), proxy) + }: _(RawOrigin::Signed(caller), proxy) - close_proxy { - let u in 1 .. MAX_USERS; + close_proxy { + let u in 1 .. MAX_USERS; - let proxy = open_activate_proxy::(u)?; + let proxy = open_activate_proxy::(u)?; - }: _(RawOrigin::Signed(proxy)) + }: _(RawOrigin::Signed(proxy)) - deactivate_proxy { - let u in 1 .. MAX_USERS; + deactivate_proxy { + let u in 1 .. MAX_USERS; - let caller = funded_account::("caller", u); - let proxy = open_activate_proxy::(u)?; + let caller = funded_account::("caller", u); + let proxy = open_activate_proxy::(u)?; - }: _(RawOrigin::Signed(caller), proxy) + }: _(RawOrigin::Signed(caller), proxy) - delegate { - let u in 1 .. MAX_USERS; + delegate { + let u in 1 .. MAX_USERS; - let caller = funded_account::("caller", u); - let d: T::AccountId = funded_account::("delegate", u); - let balance = 1u32; + let caller = funded_account::("caller", u); + let d: T::AccountId = funded_account::("delegate", u); + let balance = 1u32; - }: _(RawOrigin::Signed(caller), d.into(), Conviction::Locked1x, balance.into()) + }: _(RawOrigin::Signed(caller), d.into(), Conviction::Locked1x, balance.into()) - undelegate { - let r in 1 .. MAX_REFERENDUMS; + undelegate { + let r in 1 .. MAX_REFERENDUMS; - let other = funded_account::("other", 0); - let account_vote = account_vote::(); + let other = funded_account::("other", 0); + let account_vote = account_vote::(); - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; - } + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; + } - let delegator = funded_account::("delegator", r); - let conviction = Conviction::Locked1x; - let balance = 1u32; + let delegator = funded_account::("delegator", r); + let conviction = Conviction::Locked1x; + let balance = 1u32; - Democracy::::delegate(RawOrigin::Signed(delegator.clone()).into(), other.clone().into(), conviction, balance.into())?; + Democracy::::delegate(RawOrigin::Signed(delegator.clone()).into(), other.clone().into(), conviction, balance.into())?; - }: _(RawOrigin::Signed(delegator)) + }: _(RawOrigin::Signed(delegator)) - clear_public_proposals { - let p in 0 .. MAX_PROPOSALS; + clear_public_proposals { + let p in 0 .. MAX_PROPOSALS; - for i in 0 .. p { - add_proposal::(i)?; - } + for i in 0 .. p { + add_proposal::(i)?; + } - }: _(RawOrigin::Root) + }: _(RawOrigin::Root) - note_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; + note_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; - let caller = funded_account::("caller", b); - let encoded_proposal = vec![0; b as usize]; - }: _(RawOrigin::Signed(caller), encoded_proposal) + let caller = funded_account::("caller", b); + let encoded_proposal = vec![0; b as usize]; + }: _(RawOrigin::Signed(caller), encoded_proposal) - note_imminent_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; + note_imminent_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; - // d + 1 to include the one we are testing - let encoded_proposal = vec![0; b as usize]; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - let block_number = T::BlockNumber::one(); - Preimages::::insert(&proposal_hash, PreimageStatus::Missing(block_number)); + // d + 1 to include the one we are testing + let encoded_proposal = vec![0; b as usize]; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + let block_number = T::BlockNumber::one(); + Preimages::::insert(&proposal_hash, PreimageStatus::Missing(block_number)); - let caller = funded_account::("caller", b); - let encoded_proposal = vec![0; b as usize]; - }: _(RawOrigin::Signed(caller), encoded_proposal) + let caller = funded_account::("caller", b); + let encoded_proposal = vec![0; b as usize]; + }: _(RawOrigin::Signed(caller), encoded_proposal) - reap_preimage { - // Num of bytes in encoded proposal - let b in 0 .. MAX_BYTES; + reap_preimage { + // Num of bytes in encoded proposal + let b in 0 .. MAX_BYTES; - let encoded_proposal = vec![0; b as usize]; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + let encoded_proposal = vec![0; b as usize]; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - let caller = funded_account::("caller", b); - Democracy::::note_preimage(RawOrigin::Signed(caller.clone()).into(), encoded_proposal.clone())?; + let caller = funded_account::("caller", b); + Democracy::::note_preimage(RawOrigin::Signed(caller.clone()).into(), encoded_proposal.clone())?; - // We need to set this otherwise we get `Early` error. - let block_number = T::VotingPeriod::get() + T::EnactmentPeriod::get() + T::BlockNumber::one(); - System::::set_block_number(block_number.into()); + // We need to set this otherwise we get `Early` error. + let block_number = T::VotingPeriod::get() + T::EnactmentPeriod::get() + T::BlockNumber::one(); + System::::set_block_number(block_number.into()); - }: _(RawOrigin::Signed(caller), proposal_hash) + }: _(RawOrigin::Signed(caller), proposal_hash) - unlock { - let u in 1 .. MAX_USERS; + unlock { + let u in 1 .. MAX_USERS; - let caller = funded_account::("caller", u); - let locked_until = T::BlockNumber::zero(); - Locks::::insert(&caller, locked_until); + let caller = funded_account::("caller", u); + let locked_until = T::BlockNumber::zero(); + Locks::::insert(&caller, locked_until); - T::Currency::extend_lock( - DEMOCRACY_ID, - &caller, - Bounded::max_value(), - WithdrawReason::Transfer.into() - ); + T::Currency::extend_lock( + DEMOCRACY_ID, + &caller, + Bounded::max_value(), + WithdrawReason::Transfer.into() + ); - let other = caller.clone(); + let other = caller.clone(); - }: _(RawOrigin::Signed(caller), other) + }: _(RawOrigin::Signed(caller), other) - remove_vote { - let r in 1 .. MAX_REFERENDUMS; + remove_vote { + let r in 1 .. MAX_REFERENDUMS; - let caller = funded_account::("caller", 0); - let account_vote = account_vote::(); + let caller = funded_account::("caller", 0); + let account_vote = account_vote::(); - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; - } + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(caller.clone()).into(), ref_idx, account_vote.clone())?; + } - let referendum_index = r - 1; + let referendum_index = r - 1; - }: _(RawOrigin::Signed(caller), referendum_index) + }: _(RawOrigin::Signed(caller), referendum_index) - remove_other_vote { - let r in 1 .. MAX_REFERENDUMS; + remove_other_vote { + let r in 1 .. MAX_REFERENDUMS; - let other = funded_account::("other", r); - let account_vote = account_vote::(); + let other = funded_account::("other", r); + let account_vote = account_vote::(); - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; - } + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; + } - let referendum_index = r - 1; - ReferendumInfoOf::::insert( - referendum_index, - ReferendumInfo::Finished { end: T::BlockNumber::zero(), approved: true } - ); - let caller = funded_account::("caller", r); + let referendum_index = r - 1; + ReferendumInfoOf::::insert( + referendum_index, + ReferendumInfo::Finished { end: T::BlockNumber::zero(), approved: true } + ); + let caller = funded_account::("caller", r); - System::::set_block_number(T::EnactmentPeriod::get() * 10u32.into()); + System::::set_block_number(T::EnactmentPeriod::get() * 10u32.into()); - }: _(RawOrigin::Signed(caller), other, referendum_index) + }: _(RawOrigin::Signed(caller), other, referendum_index) - proxy_delegate { - let u in 1 .. MAX_USERS; + proxy_delegate { + let u in 1 .. MAX_USERS; - let other: T::AccountId = account("other", u, SEED); - let proxy = open_activate_proxy::(u)?; - let conviction = Conviction::Locked1x; - let balance = 1u32; + let other: T::AccountId = account("other", u, SEED); + let proxy = open_activate_proxy::(u)?; + let conviction = Conviction::Locked1x; + let balance = 1u32; - }: _(RawOrigin::Signed(proxy), other, conviction, balance.into()) + }: _(RawOrigin::Signed(proxy), other, conviction, balance.into()) - proxy_undelegate { - let r in 1 .. MAX_REFERENDUMS; + proxy_undelegate { + let r in 1 .. MAX_REFERENDUMS; - let other = funded_account::("other", 0); - let account_vote = account_vote::(); + let other = funded_account::("other", 0); + let account_vote = account_vote::(); - for i in 0 .. r { - let ref_idx = add_referendum::(i)?; - Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; - } + for i in 0 .. r { + let ref_idx = add_referendum::(i)?; + Democracy::::vote(RawOrigin::Signed(other.clone()).into(), ref_idx, account_vote.clone())?; + } - let proxy = open_activate_proxy::(r)?; - let conviction = Conviction::Locked1x; - let balance = 1u32; - Democracy::::proxy_delegate(RawOrigin::Signed(proxy.clone()).into(), other, conviction, balance.into())?; + let proxy = open_activate_proxy::(r)?; + let conviction = Conviction::Locked1x; + let balance = 1u32; + Democracy::::proxy_delegate(RawOrigin::Signed(proxy.clone()).into(), other, conviction, balance.into())?; - }: _(RawOrigin::Signed(proxy)) + }: _(RawOrigin::Signed(proxy)) - proxy_remove_vote { - let u in 1 .. MAX_USERS; + proxy_remove_vote { + let u in 1 .. MAX_USERS; - let referendum_index = add_referendum::(u)?; - let account_vote = account_vote::(); - let proxy = open_activate_proxy::(u)?; + let referendum_index = add_referendum::(u)?; + let account_vote = account_vote::(); + let proxy = open_activate_proxy::(u)?; - Democracy::::proxy_vote(RawOrigin::Signed(proxy.clone()).into(), referendum_index, account_vote)?; + Democracy::::proxy_vote(RawOrigin::Signed(proxy.clone()).into(), referendum_index, account_vote)?; - }: _(RawOrigin::Signed(proxy), referendum_index) + }: _(RawOrigin::Signed(proxy), referendum_index) } #[cfg(test)] mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose::()); - assert_ok!(test_benchmark_second::()); - assert_ok!(test_benchmark_vote::()); - assert_ok!(test_benchmark_proxy_vote::()); - assert_ok!(test_benchmark_emergency_cancel::()); - assert_ok!(test_benchmark_external_propose::()); - assert_ok!(test_benchmark_external_propose_majority::()); - assert_ok!(test_benchmark_external_propose_default::()); - assert_ok!(test_benchmark_fast_track::()); - assert_ok!(test_benchmark_veto_external::()); - assert_ok!(test_benchmark_cancel_referendum::()); - assert_ok!(test_benchmark_cancel_queued::()); - assert_ok!(test_benchmark_open_proxy::()); - assert_ok!(test_benchmark_activate_proxy::()); - assert_ok!(test_benchmark_close_proxy::()); - assert_ok!(test_benchmark_deactivate_proxy::()); - assert_ok!(test_benchmark_delegate::()); - assert_ok!(test_benchmark_undelegate::()); - assert_ok!(test_benchmark_clear_public_proposals::()); - assert_ok!(test_benchmark_note_preimage::()); - assert_ok!(test_benchmark_note_imminent_preimage::()); - assert_ok!(test_benchmark_reap_preimage::()); - assert_ok!(test_benchmark_unlock::()); - assert_ok!(test_benchmark_remove_vote::()); - assert_ok!(test_benchmark_remove_other_vote::()); - assert_ok!(test_benchmark_proxy_delegate::()); - assert_ok!(test_benchmark_proxy_undelegate::()); - assert_ok!(test_benchmark_proxy_remove_vote::()); - }); - } + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_propose::()); + assert_ok!(test_benchmark_second::()); + assert_ok!(test_benchmark_vote::()); + assert_ok!(test_benchmark_proxy_vote::()); + assert_ok!(test_benchmark_emergency_cancel::()); + assert_ok!(test_benchmark_external_propose::()); + assert_ok!(test_benchmark_external_propose_majority::()); + assert_ok!(test_benchmark_external_propose_default::()); + assert_ok!(test_benchmark_fast_track::()); + assert_ok!(test_benchmark_veto_external::()); + assert_ok!(test_benchmark_cancel_referendum::()); + assert_ok!(test_benchmark_cancel_queued::()); + assert_ok!(test_benchmark_open_proxy::()); + assert_ok!(test_benchmark_activate_proxy::()); + assert_ok!(test_benchmark_close_proxy::()); + assert_ok!(test_benchmark_deactivate_proxy::()); + assert_ok!(test_benchmark_delegate::()); + assert_ok!(test_benchmark_undelegate::()); + assert_ok!(test_benchmark_clear_public_proposals::()); + assert_ok!(test_benchmark_note_preimage::()); + assert_ok!(test_benchmark_note_imminent_preimage::()); + assert_ok!(test_benchmark_reap_preimage::()); + assert_ok!(test_benchmark_unlock::()); + assert_ok!(test_benchmark_remove_vote::()); + assert_ok!(test_benchmark_remove_other_vote::()); + assert_ok!(test_benchmark_proxy_delegate::()); + assert_ok!(test_benchmark_proxy_undelegate::()); + assert_ok!(test_benchmark_proxy_remove_vote::()); + }); + } } diff --git a/frame/democracy/src/conviction.rs b/frame/democracy/src/conviction.rs index a057ee2a35..62570d9d9f 100644 --- a/frame/democracy/src/conviction.rs +++ b/frame/democracy/src/conviction.rs @@ -16,98 +16,104 @@ //! The conviction datatype. -use sp_std::{result::Result, convert::TryFrom}; -use sp_runtime::{RuntimeDebug, traits::{Zero, Bounded, CheckedMul, CheckedDiv}}; -use codec::{Encode, Decode}; use crate::types::Delegations; +use codec::{Decode, Encode}; +use sp_runtime::{ + traits::{Bounded, CheckedDiv, CheckedMul, Zero}, + RuntimeDebug, +}; +use sp_std::{convert::TryFrom, result::Result}; /// A value denoting the strength of conviction of a vote. #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Ord, PartialOrd, RuntimeDebug)] pub enum Conviction { - /// 0.1x votes, unlocked. - None, - /// 1x votes, locked for an enactment period following a successful vote. - Locked1x, - /// 2x votes, locked for 2x enactment periods following a successful vote. - Locked2x, - /// 3x votes, locked for 4x... - Locked3x, - /// 4x votes, locked for 8x... - Locked4x, - /// 5x votes, locked for 16x... - Locked5x, - /// 6x votes, locked for 32x... - Locked6x, + /// 0.1x votes, unlocked. + None, + /// 1x votes, locked for an enactment period following a successful vote. + Locked1x, + /// 2x votes, locked for 2x enactment periods following a successful vote. + Locked2x, + /// 3x votes, locked for 4x... + Locked3x, + /// 4x votes, locked for 8x... + Locked4x, + /// 5x votes, locked for 16x... + Locked5x, + /// 6x votes, locked for 32x... + Locked6x, } impl Default for Conviction { - fn default() -> Self { - Conviction::None - } + fn default() -> Self { + Conviction::None + } } impl From for u8 { - fn from(c: Conviction) -> u8 { - match c { - Conviction::None => 0, - Conviction::Locked1x => 1, - Conviction::Locked2x => 2, - Conviction::Locked3x => 3, - Conviction::Locked4x => 4, - Conviction::Locked5x => 5, - Conviction::Locked6x => 6, - } - } + fn from(c: Conviction) -> u8 { + match c { + Conviction::None => 0, + Conviction::Locked1x => 1, + Conviction::Locked2x => 2, + Conviction::Locked3x => 3, + Conviction::Locked4x => 4, + Conviction::Locked5x => 5, + Conviction::Locked6x => 6, + } + } } impl TryFrom for Conviction { - type Error = (); - fn try_from(i: u8) -> Result { - Ok(match i { - 0 => Conviction::None, - 1 => Conviction::Locked1x, - 2 => Conviction::Locked2x, - 3 => Conviction::Locked3x, - 4 => Conviction::Locked4x, - 5 => Conviction::Locked5x, - 6 => Conviction::Locked6x, - _ => return Err(()), - }) - } + type Error = (); + fn try_from(i: u8) -> Result { + Ok(match i { + 0 => Conviction::None, + 1 => Conviction::Locked1x, + 2 => Conviction::Locked2x, + 3 => Conviction::Locked3x, + 4 => Conviction::Locked4x, + 5 => Conviction::Locked5x, + 6 => Conviction::Locked6x, + _ => return Err(()), + }) + } } impl Conviction { - /// The amount of time (in number of periods) that our conviction implies a successful voter's - /// balance should be locked for. - pub fn lock_periods(self) -> u32 { - match self { - Conviction::None => 0, - Conviction::Locked1x => 1, - Conviction::Locked2x => 2, - Conviction::Locked3x => 4, - Conviction::Locked4x => 8, - Conviction::Locked5x => 16, - Conviction::Locked6x => 32, - } - } + /// The amount of time (in number of periods) that our conviction implies a successful voter's + /// balance should be locked for. + pub fn lock_periods(self) -> u32 { + match self { + Conviction::None => 0, + Conviction::Locked1x => 1, + Conviction::Locked2x => 2, + Conviction::Locked3x => 4, + Conviction::Locked4x => 8, + Conviction::Locked5x => 16, + Conviction::Locked6x => 32, + } + } - /// The votes of a voter of the given `balance` with our conviction. - pub fn votes< - B: From + Zero + Copy + CheckedMul + CheckedDiv + Bounded - >(self, capital: B) -> Delegations { - let votes = match self { - Conviction::None => capital.checked_div(&10u8.into()).unwrap_or_else(Zero::zero), - x => capital.checked_mul(&u8::from(x).into()).unwrap_or_else(B::max_value), - }; - Delegations { votes, capital } - } + /// The votes of a voter of the given `balance` with our conviction. + pub fn votes + Zero + Copy + CheckedMul + CheckedDiv + Bounded>( + self, + capital: B, + ) -> Delegations { + let votes = match self { + Conviction::None => capital.checked_div(&10u8.into()).unwrap_or_else(Zero::zero), + x => capital + .checked_mul(&u8::from(x).into()) + .unwrap_or_else(B::max_value), + }; + Delegations { votes, capital } + } } impl Bounded for Conviction { - fn min_value() -> Self { - Conviction::None - } - fn max_value() -> Self { - Conviction::Locked6x - } + fn min_value() -> Self { + Conviction::None + } + fn max_value() -> Self { + Conviction::Locked6x + } } diff --git a/frame/democracy/src/lib.rs b/frame/democracy/src/lib.rs index a76567ba27..f9fa939f10 100644 --- a/frame/democracy/src/lib.rs +++ b/frame/democracy/src/lib.rs @@ -160,33 +160,34 @@ //! - `cancel_queued` - Cancels a proposal that is queued for enactment. //! - `clear_public_proposal` - Removes all public proposals. -#![recursion_limit="128"] +#![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_runtime::{ - DispatchResult, DispatchError, RuntimeDebug, - traits::{Zero, Hash, Dispatchable, Saturating}, -}; -use codec::{Ref, Encode, Decode}; +use codec::{Decode, Encode, Ref}; use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, ensure, Parameter, - weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}, - traits::{ - Currency, ReservableCurrency, LockableCurrency, WithdrawReason, LockIdentifier, Get, - OnUnbalanced, BalanceStatus, schedule::Named as ScheduleNamed, EnsureOrigin - } + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::{ + schedule::Named as ScheduleNamed, BalanceStatus, Currency, EnsureOrigin, Get, + LockIdentifier, LockableCurrency, OnUnbalanced, ReservableCurrency, WithdrawReason, + }, + weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}, + Parameter, +}; +use frame_system::{self as system, ensure_root, ensure_signed}; +use sp_runtime::{ + traits::{Dispatchable, Hash, Saturating, Zero}, + DispatchError, DispatchResult, RuntimeDebug, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use sp_std::prelude::*; -mod vote_threshold; -mod vote; mod conviction; mod types; -pub use vote_threshold::{Approved, VoteThreshold}; -pub use vote::{Vote, AccountVote, Voting}; +mod vote; +mod vote_threshold; pub use conviction::Conviction; -pub use types::{ReferendumInfo, ReferendumStatus, ProxyState, Tally, UnvoteScope, Delegations}; +pub use types::{Delegations, ProxyState, ReferendumInfo, ReferendumStatus, Tally, UnvoteScope}; +pub use vote::{AccountVote, Vote, Voting}; +pub use vote_threshold::{Approved, VoteThreshold}; #[cfg(test)] mod tests; @@ -202,1420 +203,1492 @@ pub type PropIndex = u32; /// A referendum index. pub type ReferendumIndex = u32; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: frame_system::Trait + Sized { - type Proposal: Parameter + Dispatchable + From>; - type Event: From> + Into<::Event>; + type Proposal: Parameter + Dispatchable + From>; + type Event: From> + Into<::Event>; - /// Currency type for this module. - type Currency: ReservableCurrency - + LockableCurrency; + /// Currency type for this module. + type Currency: ReservableCurrency + + LockableCurrency; - /// The minimum period of locking and the period between a proposal being approved and enacted. - /// - /// It should generally be a little more than the unstake period to ensure that - /// voting stakers have an opportunity to remove themselves from the system in the case where - /// they are on the losing side of a vote. - type EnactmentPeriod: Get; + /// The minimum period of locking and the period between a proposal being approved and enacted. + /// + /// It should generally be a little more than the unstake period to ensure that + /// voting stakers have an opportunity to remove themselves from the system in the case where + /// they are on the losing side of a vote. + type EnactmentPeriod: Get; - /// How often (in blocks) new public referenda are launched. - type LaunchPeriod: Get; + /// How often (in blocks) new public referenda are launched. + type LaunchPeriod: Get; - /// How often (in blocks) to check for new votes. - type VotingPeriod: Get; + /// How often (in blocks) to check for new votes. + type VotingPeriod: Get; - /// The minimum amount to be used as a deposit for a public referendum proposal. - type MinimumDeposit: Get>; + /// The minimum amount to be used as a deposit for a public referendum proposal. + type MinimumDeposit: Get>; - /// Origin from which the next tabled referendum may be forced. This is a normal - /// "super-majority-required" referendum. - type ExternalOrigin: EnsureOrigin; + /// Origin from which the next tabled referendum may be forced. This is a normal + /// "super-majority-required" referendum. + type ExternalOrigin: EnsureOrigin; - /// Origin from which the next tabled referendum may be forced; this allows for the tabling of - /// a majority-carries referendum. - type ExternalMajorityOrigin: EnsureOrigin; + /// Origin from which the next tabled referendum may be forced; this allows for the tabling of + /// a majority-carries referendum. + type ExternalMajorityOrigin: EnsureOrigin; - /// Origin from which the next tabled referendum may be forced; this allows for the tabling of - /// a negative-turnout-bias (default-carries) referendum. - type ExternalDefaultOrigin: EnsureOrigin; + /// Origin from which the next tabled referendum may be forced; this allows for the tabling of + /// a negative-turnout-bias (default-carries) referendum. + type ExternalDefaultOrigin: EnsureOrigin; - /// Origin from which the next majority-carries (or more permissive) referendum may be tabled to - /// vote according to the `FastTrackVotingPeriod` asynchronously in a similar manner to the - /// emergency origin. It retains its threshold method. - type FastTrackOrigin: EnsureOrigin; + /// Origin from which the next majority-carries (or more permissive) referendum may be tabled to + /// vote according to the `FastTrackVotingPeriod` asynchronously in a similar manner to the + /// emergency origin. It retains its threshold method. + type FastTrackOrigin: EnsureOrigin; - /// Origin from which the next majority-carries (or more permissive) referendum may be tabled to - /// vote immediately and asynchronously in a similar manner to the emergency origin. It retains - /// its threshold method. - type InstantOrigin: EnsureOrigin; + /// Origin from which the next majority-carries (or more permissive) referendum may be tabled to + /// vote immediately and asynchronously in a similar manner to the emergency origin. It retains + /// its threshold method. + type InstantOrigin: EnsureOrigin; - /// Indicator for whether an emergency origin is even allowed to happen. Some chains may want - /// to set this permanently to `false`, others may want to condition it on things such as - /// an upgrade having happened recently. - type InstantAllowed: Get; + /// Indicator for whether an emergency origin is even allowed to happen. Some chains may want + /// to set this permanently to `false`, others may want to condition it on things such as + /// an upgrade having happened recently. + type InstantAllowed: Get; - /// Minimum voting period allowed for a fast-track referendum. - type FastTrackVotingPeriod: Get; + /// Minimum voting period allowed for a fast-track referendum. + type FastTrackVotingPeriod: Get; - /// Origin from which any referendum may be cancelled in an emergency. - type CancellationOrigin: EnsureOrigin; + /// Origin from which any referendum may be cancelled in an emergency. + type CancellationOrigin: EnsureOrigin; - /// Origin for anyone able to veto proposals. - type VetoOrigin: EnsureOrigin; + /// Origin for anyone able to veto proposals. + type VetoOrigin: EnsureOrigin; - /// Period in blocks where an external proposal may not be re-submitted after being vetoed. - type CooloffPeriod: Get; + /// Period in blocks where an external proposal may not be re-submitted after being vetoed. + type CooloffPeriod: Get; - /// The amount of balance that must be deposited per byte of preimage stored. - type PreimageByteDeposit: Get>; + /// The amount of balance that must be deposited per byte of preimage stored. + type PreimageByteDeposit: Get>; - /// Handler for the unbalanced reduction when slashing a preimage deposit. - type Slash: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing a preimage deposit. + type Slash: OnUnbalanced>; - /// The Scheduler. - type Scheduler: ScheduleNamed; + /// The Scheduler. + type Scheduler: ScheduleNamed; } #[derive(Clone, Encode, Decode, RuntimeDebug)] pub enum PreimageStatus { - /// The preimage is imminently needed at the argument. - Missing(BlockNumber), - /// The preimage is available. - Available { - data: Vec, - provider: AccountId, - deposit: Balance, - since: BlockNumber, - /// None if it's not imminent. - expiry: Option, - }, + /// The preimage is imminently needed at the argument. + Missing(BlockNumber), + /// The preimage is available. + Available { + data: Vec, + provider: AccountId, + deposit: Balance, + since: BlockNumber, + /// None if it's not imminent. + expiry: Option, + }, } impl PreimageStatus { - fn to_missing_expiry(self) -> Option { - match self { - PreimageStatus::Missing(expiry) => Some(expiry), - _ => None, - } - } + fn to_missing_expiry(self) -> Option { + match self { + PreimageStatus::Missing(expiry) => Some(expiry), + _ => None, + } + } } decl_storage! { - trait Store for Module as Democracy { - // TODO: Refactor public proposal queue into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 - /// The number of (public) proposals that have been made so far. - pub PublicPropCount get(fn public_prop_count) build(|_| 0 as PropIndex) : PropIndex; - /// The public proposals. Unsorted. The second item is the proposal's hash. - pub PublicProps get(fn public_props): Vec<(PropIndex, T::Hash, T::AccountId)>; - /// Those who have locked a deposit. - pub DepositOf get(fn deposit_of): - map hasher(twox_64_concat) PropIndex => Option<(BalanceOf, Vec)>; - - /// Map of hashes to the proposal preimage, along with who registered it and their deposit. - /// The block number is the block at which it was deposited. - // TODO: Refactor Preimages into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 - pub Preimages: - map hasher(identity) T::Hash - => Option, T::BlockNumber>>; - - /// The next free referendum index, aka the number of referenda started so far. - pub ReferendumCount get(fn referendum_count) build(|_| 0 as ReferendumIndex): ReferendumIndex; - /// The lowest referendum index representing an unbaked referendum. Equal to - /// `ReferendumCount` if there isn't a unbaked referendum. - pub LowestUnbaked get(fn lowest_unbaked) build(|_| 0 as ReferendumIndex): ReferendumIndex; - - /// Information concerning any given referendum. - pub ReferendumInfoOf get(fn referendum_info): - map hasher(twox_64_concat) ReferendumIndex - => Option>>; - - /// All votes for a particular voter. We store the balance for the number of votes that we - /// have recorded. The second item is the total amount of delegations, that will be added. - pub VotingOf: map hasher(twox_64_concat) T::AccountId => Voting, T::AccountId, T::BlockNumber>; - - /// Who is able to vote for whom. Value is the fund-holding account, key is the - /// vote-transaction-sending account. - // TODO: Refactor proxy into its own pallet. - // https://github.com/paritytech/substrate/issues/5322 - pub Proxy get(fn proxy): map hasher(twox_64_concat) T::AccountId => Option>; - - /// Accounts for which there are locks in action which may be removed at some point in the - /// future. The value is the block number at which the lock expires and may be removed. - pub Locks get(locks): map hasher(twox_64_concat) T::AccountId => Option; - - /// True if the last referendum tabled was submitted externally. False if it was a public - /// proposal. - // TODO: There should be any number of tabling origins, not just public and "external" (council). - // https://github.com/paritytech/substrate/issues/5322 - pub LastTabledWasExternal: bool; - - /// The referendum to be tabled whenever it would be valid to table an external proposal. - /// This happens when a referendum needs to be tabled and one of two conditions are met: - /// - `LastTabledWasExternal` is `false`; or - /// - `PublicProps` is empty. - pub NextExternal: Option<(T::Hash, VoteThreshold)>; - - /// A record of who vetoed what. Maps proposal hash to a possible existent block number - /// (until when it may not be resubmitted) and who vetoed it. - pub Blacklist get(fn blacklist): - map hasher(identity) T::Hash => Option<(T::BlockNumber, Vec)>; - - /// Record of all proposals that have been subject to emergency cancellation. - pub Cancellations: map hasher(identity) T::Hash => bool; - } + trait Store for Module as Democracy { + // TODO: Refactor public proposal queue into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 + /// The number of (public) proposals that have been made so far. + pub PublicPropCount get(fn public_prop_count) build(|_| 0 as PropIndex) : PropIndex; + /// The public proposals. Unsorted. The second item is the proposal's hash. + pub PublicProps get(fn public_props): Vec<(PropIndex, T::Hash, T::AccountId)>; + /// Those who have locked a deposit. + pub DepositOf get(fn deposit_of): + map hasher(twox_64_concat) PropIndex => Option<(BalanceOf, Vec)>; + + /// Map of hashes to the proposal preimage, along with who registered it and their deposit. + /// The block number is the block at which it was deposited. + // TODO: Refactor Preimages into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 + pub Preimages: + map hasher(identity) T::Hash + => Option, T::BlockNumber>>; + + /// The next free referendum index, aka the number of referenda started so far. + pub ReferendumCount get(fn referendum_count) build(|_| 0 as ReferendumIndex): ReferendumIndex; + /// The lowest referendum index representing an unbaked referendum. Equal to + /// `ReferendumCount` if there isn't a unbaked referendum. + pub LowestUnbaked get(fn lowest_unbaked) build(|_| 0 as ReferendumIndex): ReferendumIndex; + + /// Information concerning any given referendum. + pub ReferendumInfoOf get(fn referendum_info): + map hasher(twox_64_concat) ReferendumIndex + => Option>>; + + /// All votes for a particular voter. We store the balance for the number of votes that we + /// have recorded. The second item is the total amount of delegations, that will be added. + pub VotingOf: map hasher(twox_64_concat) T::AccountId => Voting, T::AccountId, T::BlockNumber>; + + /// Who is able to vote for whom. Value is the fund-holding account, key is the + /// vote-transaction-sending account. + // TODO: Refactor proxy into its own pallet. + // https://github.com/paritytech/substrate/issues/5322 + pub Proxy get(fn proxy): map hasher(twox_64_concat) T::AccountId => Option>; + + /// Accounts for which there are locks in action which may be removed at some point in the + /// future. The value is the block number at which the lock expires and may be removed. + pub Locks get(locks): map hasher(twox_64_concat) T::AccountId => Option; + + /// True if the last referendum tabled was submitted externally. False if it was a public + /// proposal. + // TODO: There should be any number of tabling origins, not just public and "external" (council). + // https://github.com/paritytech/substrate/issues/5322 + pub LastTabledWasExternal: bool; + + /// The referendum to be tabled whenever it would be valid to table an external proposal. + /// This happens when a referendum needs to be tabled and one of two conditions are met: + /// - `LastTabledWasExternal` is `false`; or + /// - `PublicProps` is empty. + pub NextExternal: Option<(T::Hash, VoteThreshold)>; + + /// A record of who vetoed what. Maps proposal hash to a possible existent block number + /// (until when it may not be resubmitted) and who vetoed it. + pub Blacklist get(fn blacklist): + map hasher(identity) T::Hash => Option<(T::BlockNumber, Vec)>; + + /// Record of all proposals that have been subject to emergency cancellation. + pub Cancellations: map hasher(identity) T::Hash => bool; + } } decl_event! { - pub enum Event where - Balance = BalanceOf, - ::AccountId, - ::Hash, - ::BlockNumber, - { - /// A motion has been proposed by a public account. - Proposed(PropIndex, Balance), - /// A public proposal has been tabled for referendum vote. - Tabled(PropIndex, Balance, Vec), - /// An external proposal has been tabled. - ExternalTabled, - /// A referendum has begun. - Started(ReferendumIndex, VoteThreshold), - /// A proposal has been approved by referendum. - Passed(ReferendumIndex), - /// A proposal has been rejected by referendum. - NotPassed(ReferendumIndex), - /// A referendum has been cancelled. - Cancelled(ReferendumIndex), - /// A proposal has been enacted. - Executed(ReferendumIndex, bool), - /// An account has delegated their vote to another account. - Delegated(AccountId, AccountId), - /// An account has cancelled a previous delegation operation. - Undelegated(AccountId), - /// An external proposal has been vetoed. - Vetoed(AccountId, Hash, BlockNumber), - /// A proposal's preimage was noted, and the deposit taken. - PreimageNoted(Hash, AccountId, Balance), - /// A proposal preimage was removed and used (the deposit was returned). - PreimageUsed(Hash, AccountId, Balance), - /// A proposal could not be executed because its preimage was invalid. - PreimageInvalid(Hash, ReferendumIndex), - /// A proposal could not be executed because its preimage was missing. - PreimageMissing(Hash, ReferendumIndex), - /// A registered preimage was removed and the deposit collected by the reaper (last item). - PreimageReaped(Hash, AccountId, Balance, AccountId), - /// An account has been unlocked successfully. - Unlocked(AccountId), - } + pub enum Event where + Balance = BalanceOf, + ::AccountId, + ::Hash, + ::BlockNumber, + { + /// A motion has been proposed by a public account. + Proposed(PropIndex, Balance), + /// A public proposal has been tabled for referendum vote. + Tabled(PropIndex, Balance, Vec), + /// An external proposal has been tabled. + ExternalTabled, + /// A referendum has begun. + Started(ReferendumIndex, VoteThreshold), + /// A proposal has been approved by referendum. + Passed(ReferendumIndex), + /// A proposal has been rejected by referendum. + NotPassed(ReferendumIndex), + /// A referendum has been cancelled. + Cancelled(ReferendumIndex), + /// A proposal has been enacted. + Executed(ReferendumIndex, bool), + /// An account has delegated their vote to another account. + Delegated(AccountId, AccountId), + /// An account has cancelled a previous delegation operation. + Undelegated(AccountId), + /// An external proposal has been vetoed. + Vetoed(AccountId, Hash, BlockNumber), + /// A proposal's preimage was noted, and the deposit taken. + PreimageNoted(Hash, AccountId, Balance), + /// A proposal preimage was removed and used (the deposit was returned). + PreimageUsed(Hash, AccountId, Balance), + /// A proposal could not be executed because its preimage was invalid. + PreimageInvalid(Hash, ReferendumIndex), + /// A proposal could not be executed because its preimage was missing. + PreimageMissing(Hash, ReferendumIndex), + /// A registered preimage was removed and the deposit collected by the reaper (last item). + PreimageReaped(Hash, AccountId, Balance, AccountId), + /// An account has been unlocked successfully. + Unlocked(AccountId), + } } decl_error! { - pub enum Error for Module { - /// Value too low - ValueLow, - /// Proposal does not exist - ProposalMissing, - /// Not a proxy - NotProxy, - /// Unknown index - BadIndex, - /// Cannot cancel the same proposal twice - AlreadyCanceled, - /// Proposal already made - DuplicateProposal, - /// Proposal still blacklisted - ProposalBlacklisted, - /// Next external proposal not simple majority - NotSimpleMajority, - /// Invalid hash - InvalidHash, - /// No external proposal - NoProposal, - /// Identity may not veto a proposal twice - AlreadyVetoed, - /// Already a proxy - AlreadyProxy, - /// Wrong proxy - WrongProxy, - /// Not delegated - NotDelegated, - /// Preimage already noted - DuplicatePreimage, - /// Not imminent - NotImminent, - /// Too early - TooEarly, - /// Imminent - Imminent, - /// Preimage not found - PreimageMissing, - /// Vote given for invalid referendum - ReferendumInvalid, - /// Invalid preimage - PreimageInvalid, - /// No proposals waiting - NoneWaiting, - /// The target account does not have a lock. - NotLocked, - /// The lock on the account to be unlocked has not yet expired. - NotExpired, - /// A proxy-pairing was attempted to an account that was not open. - NotOpen, - /// A proxy-pairing was attempted to an account that was open to another account. - WrongOpen, - /// A proxy-de-pairing was attempted to an account that was not active. - NotActive, - /// The given account did not vote on the referendum. - NotVoter, - /// The actor has no permission to conduct the action. - NoPermission, - /// The account is already delegating. - AlreadyDelegating, - /// An unexpected integer overflow occurred. - Overflow, - /// An unexpected integer underflow occurred. - Underflow, - /// Too high a balance was provided that the account cannot afford. - InsufficientFunds, - /// The account is not currently delegating. - NotDelegating, - /// The account currently has votes attached to it and the operation cannot succeed until - /// these are removed, either through `unvote` or `reap_vote`. - VotesExist, - /// The instant referendum origin is currently disallowed. - InstantNotAllowed, - /// Delegation to oneself makes no sense. - Nonsense, - } + pub enum Error for Module { + /// Value too low + ValueLow, + /// Proposal does not exist + ProposalMissing, + /// Not a proxy + NotProxy, + /// Unknown index + BadIndex, + /// Cannot cancel the same proposal twice + AlreadyCanceled, + /// Proposal already made + DuplicateProposal, + /// Proposal still blacklisted + ProposalBlacklisted, + /// Next external proposal not simple majority + NotSimpleMajority, + /// Invalid hash + InvalidHash, + /// No external proposal + NoProposal, + /// Identity may not veto a proposal twice + AlreadyVetoed, + /// Already a proxy + AlreadyProxy, + /// Wrong proxy + WrongProxy, + /// Not delegated + NotDelegated, + /// Preimage already noted + DuplicatePreimage, + /// Not imminent + NotImminent, + /// Too early + TooEarly, + /// Imminent + Imminent, + /// Preimage not found + PreimageMissing, + /// Vote given for invalid referendum + ReferendumInvalid, + /// Invalid preimage + PreimageInvalid, + /// No proposals waiting + NoneWaiting, + /// The target account does not have a lock. + NotLocked, + /// The lock on the account to be unlocked has not yet expired. + NotExpired, + /// A proxy-pairing was attempted to an account that was not open. + NotOpen, + /// A proxy-pairing was attempted to an account that was open to another account. + WrongOpen, + /// A proxy-de-pairing was attempted to an account that was not active. + NotActive, + /// The given account did not vote on the referendum. + NotVoter, + /// The actor has no permission to conduct the action. + NoPermission, + /// The account is already delegating. + AlreadyDelegating, + /// An unexpected integer overflow occurred. + Overflow, + /// An unexpected integer underflow occurred. + Underflow, + /// Too high a balance was provided that the account cannot afford. + InsufficientFunds, + /// The account is not currently delegating. + NotDelegating, + /// The account currently has votes attached to it and the operation cannot succeed until + /// these are removed, either through `unvote` or `reap_vote`. + VotesExist, + /// The instant referendum origin is currently disallowed. + InstantNotAllowed, + /// Delegation to oneself makes no sense. + Nonsense, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// The minimum period of locking and the period between a proposal being approved and enacted. - /// - /// It should generally be a little more than the unstake period to ensure that - /// voting stakers have an opportunity to remove themselves from the system in the case where - /// they are on the losing side of a vote. - const EnactmentPeriod: T::BlockNumber = T::EnactmentPeriod::get(); - - /// How often (in blocks) new public referenda are launched. - const LaunchPeriod: T::BlockNumber = T::LaunchPeriod::get(); - - /// How often (in blocks) to check for new votes. - const VotingPeriod: T::BlockNumber = T::VotingPeriod::get(); - - /// The minimum amount to be used as a deposit for a public referendum proposal. - const MinimumDeposit: BalanceOf = T::MinimumDeposit::get(); - - /// Minimum voting period allowed for an emergency referendum. - const FastTrackVotingPeriod: T::BlockNumber = T::FastTrackVotingPeriod::get(); - - /// Period in blocks where an external proposal may not be re-submitted after being vetoed. - const CooloffPeriod: T::BlockNumber = T::CooloffPeriod::get(); - - /// The amount of balance that must be deposited per byte of preimage stored. - const PreimageByteDeposit: BalanceOf = T::PreimageByteDeposit::get(); - - fn deposit_event() = default; - - fn on_runtime_upgrade() -> Weight { - Self::migrate(); - - MINIMUM_WEIGHT - } - - /// Propose a sensitive action to be taken. - /// - /// The dispatch origin of this call must be _Signed_ and the sender must - /// have funds to cover the deposit. - /// - /// - `proposal_hash`: The hash of the proposal preimage. - /// - `value`: The amount of deposit (must be at least `MinimumDeposit`). - /// - /// Emits `Proposed`. - /// - /// # - /// - `O(P)` - /// - P is the number proposals in the `PublicProps` vec. - /// - Two DB changes, one DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] - fn propose(origin, - proposal_hash: T::Hash, - #[compact] value: BalanceOf - ) { - let who = ensure_signed(origin)?; - ensure!(value >= T::MinimumDeposit::get(), Error::::ValueLow); - T::Currency::reserve(&who, value)?; - - let index = Self::public_prop_count(); - PublicPropCount::put(index + 1); - >::insert(index, (value, &[&who][..])); - - let new_prop = (index, proposal_hash, who); - >::append_or_put(&[Ref::from(&new_prop)][..]); - - Self::deposit_event(RawEvent::Proposed(index, value)); - } - - /// Signals agreement with a particular proposal. - /// - /// The dispatch origin of this call must be _Signed_ and the sender - /// must have funds to cover the deposit, equal to the original deposit. - /// - /// - `proposal`: The index of the proposal to second. - /// - /// # - /// - `O(S)`. - /// - S is the number of seconds a proposal already has. - /// - One DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] - fn second(origin, #[compact] proposal: PropIndex) { - let who = ensure_signed(origin)?; - let mut deposit = Self::deposit_of(proposal) - .ok_or(Error::::ProposalMissing)?; - T::Currency::reserve(&who, deposit.0)?; - deposit.1.push(who); - >::insert(proposal, deposit); - } - - /// Vote in a referendum. If `vote.is_aye()`, the vote is to enact the proposal; - /// otherwise it is a vote to keep the status quo. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `ref_index`: The index of the referendum to vote for. - /// - `vote`: The vote configuration. - /// - /// # - /// - `O(R)`. - /// - R is the number of referendums the voter has voted on. - /// - One DB change, one DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(200_000_000)] - fn vote(origin, - #[compact] ref_index: ReferendumIndex, - vote: AccountVote>, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::try_vote(&who, ref_index, vote) - } - - /// Vote in a referendum on behalf of a stash. If `vote.is_aye()`, the vote is to enact - /// the proposal; otherwise it is a vote to keep the status quo. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `ref_index`: The index of the referendum to proxy vote for. - /// - `vote`: The vote configuration. - /// - /// # - /// - `O(1)`. - /// - One DB change, one DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(200_000_000)] - fn proxy_vote(origin, - #[compact] ref_index: ReferendumIndex, - vote: AccountVote>, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - let voter = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; - Self::try_vote(&voter, ref_index, vote) - } - - /// Schedule an emergency cancellation of a referendum. Cannot happen twice to the same - /// referendum. - /// - /// The dispatch origin of this call must be `CancellationOrigin`. - /// - /// -`ref_index`: The index of the referendum to cancel. - /// - /// # - /// - `O(1)`. - /// # - #[weight = SimpleDispatchInfo::FixedOperational(500_000_000)] - fn emergency_cancel(origin, ref_index: ReferendumIndex) { - T::CancellationOrigin::ensure_origin(origin)?; - - let status = Self::referendum_status(ref_index)?; - let h = status.proposal_hash; - ensure!(!>::contains_key(h), Error::::AlreadyCanceled); - - >::insert(h, true); - Self::internal_cancel_referendum(ref_index); - } - - /// Schedule a referendum to be tabled once it is legal to schedule an external - /// referendum. - /// - /// The dispatch origin of this call must be `ExternalOrigin`. - /// - /// - `proposal_hash`: The preimage hash of the proposal. - /// - /// # - /// - `O(1)`. - /// - One DB change. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] - fn external_propose(origin, proposal_hash: T::Hash) { - T::ExternalOrigin::ensure_origin(origin)?; - ensure!(!>::exists(), Error::::DuplicateProposal); - if let Some((until, _)) = >::get(proposal_hash) { - ensure!( - >::block_number() >= until, - Error::::ProposalBlacklisted, - ); - } - >::put((proposal_hash, VoteThreshold::SuperMajorityApprove)); - } - - /// Schedule a majority-carries referendum to be tabled next once it is legal to schedule - /// an external referendum. - /// - /// The dispatch of this call must be `ExternalMajorityOrigin`. - /// - /// - `proposal_hash`: The preimage hash of the proposal. - /// - /// Unlike `external_propose`, blacklisting has no effect on this and it may replace a - /// pre-scheduled `external_propose` call. - /// - /// # - /// - `O(1)`. - /// - One DB change. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] - fn external_propose_majority(origin, proposal_hash: T::Hash) { - T::ExternalMajorityOrigin::ensure_origin(origin)?; - >::put((proposal_hash, VoteThreshold::SimpleMajority)); - } - - /// Schedule a negative-turnout-bias referendum to be tabled next once it is legal to - /// schedule an external referendum. - /// - /// The dispatch of this call must be `ExternalDefaultOrigin`. - /// - /// - `proposal_hash`: The preimage hash of the proposal. - /// - /// Unlike `external_propose`, blacklisting has no effect on this and it may replace a - /// pre-scheduled `external_propose` call. - /// - /// # - /// - `O(1)`. - /// - One DB change. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] - fn external_propose_default(origin, proposal_hash: T::Hash) { - T::ExternalDefaultOrigin::ensure_origin(origin)?; - >::put((proposal_hash, VoteThreshold::SuperMajorityAgainst)); - } - - /// Schedule the currently externally-proposed majority-carries referendum to be tabled - /// immediately. If there is no externally-proposed referendum currently, or if there is one - /// but it is not a majority-carries referendum then it fails. - /// - /// The dispatch of this call must be `FastTrackOrigin`. - /// - /// - `proposal_hash`: The hash of the current external proposal. - /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to - /// `FastTrackVotingPeriod` if too low. - /// - `delay`: The number of block after voting has ended in approval and this should be - /// enacted. This doesn't have a minimum amount. - /// - /// Emits `Started`. - /// - /// # - /// - One DB clear. - /// - One DB change. - /// - One extra DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(200_000_000)] - fn fast_track(origin, - proposal_hash: T::Hash, - voting_period: T::BlockNumber, - delay: T::BlockNumber, - ) { - // Rather complicated bit of code to ensure that either: - // - `voting_period` is at least `FastTrackVotingPeriod` and `origin` is `FastTrackOrigin`; or - // - `InstantAllowed` is `true` and `origin` is `InstantOrigin`. - let maybe_ensure_instant = if voting_period < T::FastTrackVotingPeriod::get() { - Some(origin) - } else { - if let Err(origin) = T::FastTrackOrigin::try_origin(origin) { - Some(origin) - } else { - None - } - }; - if let Some(ensure_instant) = maybe_ensure_instant { - T::InstantOrigin::ensure_origin(ensure_instant)?; - ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); - } - - let (e_proposal_hash, threshold) = >::get() - .ok_or(Error::::ProposalMissing)?; - ensure!( - threshold != VoteThreshold::SuperMajorityApprove, - Error::::NotSimpleMajority, - ); - ensure!(proposal_hash == e_proposal_hash, Error::::InvalidHash); - - >::kill(); - let now = >::block_number(); - Self::inject_referendum(now + voting_period, proposal_hash, threshold, delay); - } - - /// Veto and blacklist the external proposal hash. - /// - /// The dispatch origin of this call must be `VetoOrigin`. - /// - /// - `proposal_hash`: The preimage hash of the proposal to veto and blacklist. - /// - /// Emits `Vetoed`. - /// - /// # - /// - Two DB entries. - /// - One DB clear. - /// - Performs a binary search on `existing_vetoers` which should not - /// be very large. - /// - O(log v), v is number of `existing_vetoers` - /// # - #[weight = SimpleDispatchInfo::FixedNormal(200_000_000)] - fn veto_external(origin, proposal_hash: T::Hash) { - let who = T::VetoOrigin::ensure_origin(origin)?; - - if let Some((e_proposal_hash, _)) = >::get() { - ensure!(proposal_hash == e_proposal_hash, Error::::ProposalMissing); - } else { - Err(Error::::NoProposal)?; - } - - let mut existing_vetoers = >::get(&proposal_hash) - .map(|pair| pair.1) - .unwrap_or_else(Vec::new); - let insert_position = existing_vetoers.binary_search(&who) - .err().ok_or(Error::::AlreadyVetoed)?; - - existing_vetoers.insert(insert_position, who.clone()); - let until = >::block_number() + T::CooloffPeriod::get(); - >::insert(&proposal_hash, (until, existing_vetoers)); - - Self::deposit_event(RawEvent::Vetoed(who, proposal_hash, until)); - >::kill(); - } - - /// Remove a referendum. - /// - /// The dispatch origin of this call must be _Root_. - /// - /// - `ref_index`: The index of the referendum to cancel. - /// - /// # - /// - `O(1)`. - /// # - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn cancel_referendum(origin, #[compact] ref_index: ReferendumIndex) { - ensure_root(origin)?; - Self::internal_cancel_referendum(ref_index); - } - - /// Cancel a proposal queued for enactment. - /// - /// The dispatch origin of this call must be _Root_. - /// - /// - `which`: The index of the referendum to cancel. - /// - /// # - /// - One DB change. - /// - O(d) where d is the items in the dispatch queue. - /// # - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn cancel_queued(origin, which: ReferendumIndex) { - ensure_root(origin)?; - T::Scheduler::cancel_named((DEMOCRACY_ID, which)) - .map_err(|_| Error::::ProposalMissing)?; - } - - fn on_initialize(n: T::BlockNumber) -> Weight { - if let Err(e) = Self::begin_block(n) { - sp_runtime::print(e); - } - - MINIMUM_WEIGHT - } - - /// Specify a proxy that is already open to us. Called by the stash. - /// - /// NOTE: Used to be called `set_proxy`. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `proxy`: The account that will be activated as proxy. - /// - /// # - /// - One extra DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn activate_proxy(origin, proxy: T::AccountId) { - let who = ensure_signed(origin)?; - Proxy::::try_mutate(&proxy, |a| match a.take() { - None => Err(Error::::NotOpen), - Some(ProxyState::Active(_)) => Err(Error::::AlreadyProxy), - Some(ProxyState::Open(x)) if &x == &who => { - *a = Some(ProxyState::Active(who)); - Ok(()) - } - Some(ProxyState::Open(_)) => Err(Error::::WrongOpen), - })?; - } - - /// Clear the proxy. Called by the proxy. - /// - /// NOTE: Used to be called `resign_proxy`. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// # - /// - One DB clear. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn close_proxy(origin) { - let who = ensure_signed(origin)?; - Proxy::::mutate(&who, |a| { - if a.is_some() { - system::Module::::dec_ref(&who); - } - *a = None; - }); - } - - /// Deactivate the proxy, but leave open to this account. Called by the stash. - /// - /// The proxy must already be active. - /// - /// NOTE: Used to be called `remove_proxy`. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `proxy`: The account that will be deactivated as proxy. - /// - /// # - /// - One DB clear. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn deactivate_proxy(origin, proxy: T::AccountId) { - let who = ensure_signed(origin)?; - Proxy::::try_mutate(&proxy, |a| match a.take() { - None | Some(ProxyState::Open(_)) => Err(Error::::NotActive), - Some(ProxyState::Active(x)) if &x == &who => { - *a = Some(ProxyState::Open(who)); - Ok(()) - } - Some(ProxyState::Active(_)) => Err(Error::::WrongProxy), - })?; - } - - /// Delegate the voting power (with some given conviction) of the sending account. - /// - /// The balance delegated is locked for as long as it's delegated, and thereafter for the - /// time appropriate for the conviction's lock period. - /// - /// The dispatch origin of this call must be _Signed_, and the signing account must either: - /// - be delegating already; or - /// - have no voting activity (if there is, then it will need to be removed/consolidated - /// through `reap_vote` or `unvote`). - /// - /// - `to`: The account whose voting the `target` account's voting power will follow. - /// - `conviction`: The conviction that will be attached to the delegated votes. When the - /// account is undelegated, the funds will be locked for the corresponding period. - /// - `balance`: The amount of the account's balance to be used in delegating. This must - /// not be more than the account's current balance. - /// - /// Emits `Delegated`. - /// - /// # - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - pub fn delegate(origin, to: T::AccountId, conviction: Conviction, balance: BalanceOf) { - let who = ensure_signed(origin)?; - Self::try_delegate(who, to, conviction, balance)?; - } - - /// Undelegate the voting power of the sending account. - /// - /// Tokens may be unlocked following once an amount of time consistent with the lock period - /// of the conviction with which the delegation was issued. - /// - /// The dispatch origin of this call must be _Signed_ and the signing account must be - /// currently delegating. - /// - /// Emits `Undelegated`. - /// - /// # - /// - O(1). - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn undelegate(origin) { - let who = ensure_signed(origin)?; - Self::try_undelegate(who)?; - } - - /// Clears all public proposals. - /// - /// The dispatch origin of this call must be _Root_. - /// - /// # - /// - `O(1)`. - /// - One DB clear. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn clear_public_proposals(origin) { - ensure_root(origin)?; - - >::kill(); - } - - /// Register the preimage for an upcoming proposal. This doesn't require the proposal to be - /// in the dispatch queue but does require a deposit, returned once enacted. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `encoded_proposal`: The preimage of a proposal. - /// - /// Emits `PreimageNoted`. - /// - /// # - /// - Dependent on the size of `encoded_proposal` but protected by a - /// required deposit. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn note_preimage(origin, encoded_proposal: Vec) { - let who = ensure_signed(origin)?; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - ensure!(!>::contains_key(&proposal_hash), Error::::DuplicatePreimage); - - let deposit = >::from(encoded_proposal.len() as u32) - .saturating_mul(T::PreimageByteDeposit::get()); - T::Currency::reserve(&who, deposit)?; - - let now = >::block_number(); - let a = PreimageStatus::Available { - data: encoded_proposal, - provider: who.clone(), - deposit, - since: now, - expiry: None, - }; - >::insert(proposal_hash, a); - - Self::deposit_event(RawEvent::PreimageNoted(proposal_hash, who, deposit)); - } - - /// Register the preimage for an upcoming proposal. This requires the proposal to be - /// in the dispatch queue. No deposit is needed. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `encoded_proposal`: The preimage of a proposal. - /// - /// Emits `PreimageNoted`. - /// - /// # - /// - Dependent on the size of `encoded_proposal` and length of dispatch queue. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn note_imminent_preimage(origin, encoded_proposal: Vec) { - let who = ensure_signed(origin)?; - let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); - let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; - let expiry = status.to_missing_expiry().ok_or(Error::::DuplicatePreimage)?; - - let now = >::block_number(); - let free = >::zero(); - let a = PreimageStatus::Available { - data: encoded_proposal, - provider: who.clone(), - deposit: Zero::zero(), - since: now, - expiry: Some(expiry), - }; - >::insert(proposal_hash, a); - - Self::deposit_event(RawEvent::PreimageNoted(proposal_hash, who, free)); - } - - /// Remove an expired proposal preimage and collect the deposit. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `proposal_hash`: The preimage hash of a proposal. - /// - /// This will only work after `VotingPeriod` blocks from the time that the preimage was - /// noted, if it's the same account doing it. If it's a different account, then it'll only - /// work an additional `EnactmentPeriod` later. - /// - /// Emits `PreimageReaped`. - /// - /// # - /// - One DB clear. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn reap_preimage(origin, proposal_hash: T::Hash) { - let who = ensure_signed(origin)?; - let (provider, deposit, since, expiry) = >::get(&proposal_hash) - .and_then(|m| match m { - PreimageStatus::Available { provider, deposit, since, expiry, .. } - => Some((provider, deposit, since, expiry)), - _ => None, - }).ok_or(Error::::PreimageMissing)?; - - let now = >::block_number(); - let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); - let additional = if who == provider { Zero::zero() } else { enactment }; - ensure!(now >= since + voting + additional, Error::::TooEarly); - ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); - - let _ = T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); - >::remove(&proposal_hash); - Self::deposit_event(RawEvent::PreimageReaped(proposal_hash, provider, deposit, who)); - } - - /// Unlock tokens that have an expired lock. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `target`: The account to remove the lock on. - /// - /// # - /// - `O(1)`. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn unlock(origin, target: T::AccountId) { - ensure_signed(origin)?; - Self::update_lock(&target); - } - - /// Become a proxy. - /// - /// This must be called prior to a later `activate_proxy`. - /// - /// Origin must be a Signed. - /// - /// - `target`: The account whose votes will later be proxied. - /// - /// `close_proxy` must be called before the account can be destroyed. - /// - /// # - /// - One extra DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn open_proxy(origin, target: T::AccountId) { - let who = ensure_signed(origin)?; - Proxy::::mutate(&who, |a| { - if a.is_none() { - system::Module::::inc_ref(&who); - } - *a = Some(ProxyState::Open(target)); - }); - } - - /// Remove a vote for a referendum. - /// - /// If: - /// - the referendum was cancelled, or - /// - the referendum is ongoing, or - /// - the referendum has ended such that - /// - the vote of the account was in opposition to the result; or - /// - there was no conviction to the account's vote; or - /// - the account made a split vote - /// ...then the vote is removed cleanly and a following call to `unlock` may result in more - /// funds being available. - /// - /// If, however, the referendum has ended and: - /// - it finished corresponding to the vote of the account, and - /// - the account made a standard vote with conviction, and - /// - the lock period of the conviction is not over - /// ...then the lock will be aggregated into the overall account's lock, which may involve - /// *overlocking* (where the two locks are combined into a single lock that is the maximum - /// of both the amount locked and the time is it locked for). - /// - /// The dispatch origin of this call must be _Signed_, and the signer must have a vote - /// registered for referendum `index`. - /// - /// - `index`: The index of referendum of the vote to be removed. - /// - /// # - /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn remove_vote(origin, index: ReferendumIndex) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::try_remove_vote(&who, index, UnvoteScope::Any) - } - - /// Remove a vote for a referendum. - /// - /// If the `target` is equal to the signer, then this function is exactly equivalent to - /// `remove_vote`. If not equal to the signer, then the vote must have expired, - /// either because the referendum was cancelled, because the voter lost the referendum or - /// because the conviction period is over. - /// - /// The dispatch origin of this call must be _Signed_. - /// - /// - `target`: The account of the vote to be removed; this account must have voted for - /// referendum `index`. - /// - `index`: The index of referendum of the vote to be removed. - /// - /// # - /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn remove_other_vote(origin, target: T::AccountId, index: ReferendumIndex) -> DispatchResult { - let who = ensure_signed(origin)?; - let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; - Self::try_remove_vote(&target, index, scope)?; - Ok(()) - } - - /// Delegate the voting power (with some given conviction) of a proxied account. - /// - /// The balance delegated is locked for as long as it's delegated, and thereafter for the - /// time appropriate for the conviction's lock period. - /// - /// The dispatch origin of this call must be _Signed_, and the signing account must have - /// been set as the proxy account for `target`. - /// - /// - `target`: The account whole voting power shall be delegated and whose balance locked. - /// This account must either: - /// - be delegating already; or - /// - have no voting activity (if there is, then it will need to be removed/consolidated - /// through `reap_vote` or `unvote`). - /// - `to`: The account whose voting the `target` account's voting power will follow. - /// - `conviction`: The conviction that will be attached to the delegated votes. When the - /// account is undelegated, the funds will be locked for the corresponding period. - /// - `balance`: The amount of the account's balance to be used in delegating. This must - /// not be more than the account's current balance. - /// - /// Emits `Delegated`. - /// - /// # - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - pub fn proxy_delegate(origin, - to: T::AccountId, - conviction: Conviction, - balance: BalanceOf, - ) { - let who = ensure_signed(origin)?; - let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; - Self::try_delegate(target, to, conviction, balance)?; - } - - /// Undelegate the voting power of a proxied account. - /// - /// Tokens may be unlocked following once an amount of time consistent with the lock period - /// of the conviction with which the delegation was issued. - /// - /// The dispatch origin of this call must be _Signed_ and the signing account must be a - /// proxy for some other account which is currently delegating. - /// - /// Emits `Undelegated`. - /// - /// # - /// - O(1). - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn proxy_undelegate(origin) { - let who = ensure_signed(origin)?; - let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; - Self::try_undelegate(target)?; - } - - /// Remove a proxied vote for a referendum. - /// - /// Exactly equivalent to `remove_vote` except that it operates on the account that the - /// sender is a proxy for. - /// - /// The dispatch origin of this call must be _Signed_ and the signing account must be a - /// proxy for some other account which has a registered vote for the referendum of `index`. - /// - /// - `index`: The index of referendum of the vote to be removed. - /// - /// # - /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn proxy_remove_vote(origin, index: ReferendumIndex) -> DispatchResult { - let who = ensure_signed(origin)?; - let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; - Self::try_remove_vote(&target, index, UnvoteScope::Any) - } - - /// Enact a proposal from a referendum. For now we just make the weight be the maximum. - #[weight = SimpleDispatchInfo::MaxNormal] - fn enact_proposal(origin, proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { - ensure_root(origin)?; - Self::do_enact_proposal(proposal_hash, index) - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + /// The minimum period of locking and the period between a proposal being approved and enacted. + /// + /// It should generally be a little more than the unstake period to ensure that + /// voting stakers have an opportunity to remove themselves from the system in the case where + /// they are on the losing side of a vote. + const EnactmentPeriod: T::BlockNumber = T::EnactmentPeriod::get(); + + /// How often (in blocks) new public referenda are launched. + const LaunchPeriod: T::BlockNumber = T::LaunchPeriod::get(); + + /// How often (in blocks) to check for new votes. + const VotingPeriod: T::BlockNumber = T::VotingPeriod::get(); + + /// The minimum amount to be used as a deposit for a public referendum proposal. + const MinimumDeposit: BalanceOf = T::MinimumDeposit::get(); + + /// Minimum voting period allowed for an emergency referendum. + const FastTrackVotingPeriod: T::BlockNumber = T::FastTrackVotingPeriod::get(); + + /// Period in blocks where an external proposal may not be re-submitted after being vetoed. + const CooloffPeriod: T::BlockNumber = T::CooloffPeriod::get(); + + /// The amount of balance that must be deposited per byte of preimage stored. + const PreimageByteDeposit: BalanceOf = T::PreimageByteDeposit::get(); + + fn deposit_event() = default; + + fn on_runtime_upgrade() -> Weight { + Self::migrate(); + + MINIMUM_WEIGHT + } + + /// Propose a sensitive action to be taken. + /// + /// The dispatch origin of this call must be _Signed_ and the sender must + /// have funds to cover the deposit. + /// + /// - `proposal_hash`: The hash of the proposal preimage. + /// - `value`: The amount of deposit (must be at least `MinimumDeposit`). + /// + /// Emits `Proposed`. + /// + /// # + /// - `O(P)` + /// - P is the number proposals in the `PublicProps` vec. + /// - Two DB changes, one DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] + fn propose(origin, + proposal_hash: T::Hash, + #[compact] value: BalanceOf + ) { + let who = ensure_signed(origin)?; + ensure!(value >= T::MinimumDeposit::get(), Error::::ValueLow); + T::Currency::reserve(&who, value)?; + + let index = Self::public_prop_count(); + PublicPropCount::put(index + 1); + >::insert(index, (value, &[&who][..])); + + let new_prop = (index, proposal_hash, who); + >::append_or_put(&[Ref::from(&new_prop)][..]); + + Self::deposit_event(RawEvent::Proposed(index, value)); + } + + /// Signals agreement with a particular proposal. + /// + /// The dispatch origin of this call must be _Signed_ and the sender + /// must have funds to cover the deposit, equal to the original deposit. + /// + /// - `proposal`: The index of the proposal to second. + /// + /// # + /// - `O(S)`. + /// - S is the number of seconds a proposal already has. + /// - One DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] + fn second(origin, #[compact] proposal: PropIndex) { + let who = ensure_signed(origin)?; + let mut deposit = Self::deposit_of(proposal) + .ok_or(Error::::ProposalMissing)?; + T::Currency::reserve(&who, deposit.0)?; + deposit.1.push(who); + >::insert(proposal, deposit); + } + + /// Vote in a referendum. If `vote.is_aye()`, the vote is to enact the proposal; + /// otherwise it is a vote to keep the status quo. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `ref_index`: The index of the referendum to vote for. + /// - `vote`: The vote configuration. + /// + /// # + /// - `O(R)`. + /// - R is the number of referendums the voter has voted on. + /// - One DB change, one DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(200_000_000)] + fn vote(origin, + #[compact] ref_index: ReferendumIndex, + vote: AccountVote>, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::try_vote(&who, ref_index, vote) + } + + /// Vote in a referendum on behalf of a stash. If `vote.is_aye()`, the vote is to enact + /// the proposal; otherwise it is a vote to keep the status quo. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `ref_index`: The index of the referendum to proxy vote for. + /// - `vote`: The vote configuration. + /// + /// # + /// - `O(1)`. + /// - One DB change, one DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(200_000_000)] + fn proxy_vote(origin, + #[compact] ref_index: ReferendumIndex, + vote: AccountVote>, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + let voter = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; + Self::try_vote(&voter, ref_index, vote) + } + + /// Schedule an emergency cancellation of a referendum. Cannot happen twice to the same + /// referendum. + /// + /// The dispatch origin of this call must be `CancellationOrigin`. + /// + /// -`ref_index`: The index of the referendum to cancel. + /// + /// # + /// - `O(1)`. + /// # + #[weight = SimpleDispatchInfo::FixedOperational(500_000_000)] + fn emergency_cancel(origin, ref_index: ReferendumIndex) { + T::CancellationOrigin::ensure_origin(origin)?; + + let status = Self::referendum_status(ref_index)?; + let h = status.proposal_hash; + ensure!(!>::contains_key(h), Error::::AlreadyCanceled); + + >::insert(h, true); + Self::internal_cancel_referendum(ref_index); + } + + /// Schedule a referendum to be tabled once it is legal to schedule an external + /// referendum. + /// + /// The dispatch origin of this call must be `ExternalOrigin`. + /// + /// - `proposal_hash`: The preimage hash of the proposal. + /// + /// # + /// - `O(1)`. + /// - One DB change. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] + fn external_propose(origin, proposal_hash: T::Hash) { + T::ExternalOrigin::ensure_origin(origin)?; + ensure!(!>::exists(), Error::::DuplicateProposal); + if let Some((until, _)) = >::get(proposal_hash) { + ensure!( + >::block_number() >= until, + Error::::ProposalBlacklisted, + ); + } + >::put((proposal_hash, VoteThreshold::SuperMajorityApprove)); + } + + /// Schedule a majority-carries referendum to be tabled next once it is legal to schedule + /// an external referendum. + /// + /// The dispatch of this call must be `ExternalMajorityOrigin`. + /// + /// - `proposal_hash`: The preimage hash of the proposal. + /// + /// Unlike `external_propose`, blacklisting has no effect on this and it may replace a + /// pre-scheduled `external_propose` call. + /// + /// # + /// - `O(1)`. + /// - One DB change. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] + fn external_propose_majority(origin, proposal_hash: T::Hash) { + T::ExternalMajorityOrigin::ensure_origin(origin)?; + >::put((proposal_hash, VoteThreshold::SimpleMajority)); + } + + /// Schedule a negative-turnout-bias referendum to be tabled next once it is legal to + /// schedule an external referendum. + /// + /// The dispatch of this call must be `ExternalDefaultOrigin`. + /// + /// - `proposal_hash`: The preimage hash of the proposal. + /// + /// Unlike `external_propose`, blacklisting has no effect on this and it may replace a + /// pre-scheduled `external_propose` call. + /// + /// # + /// - `O(1)`. + /// - One DB change. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000_000)] + fn external_propose_default(origin, proposal_hash: T::Hash) { + T::ExternalDefaultOrigin::ensure_origin(origin)?; + >::put((proposal_hash, VoteThreshold::SuperMajorityAgainst)); + } + + /// Schedule the currently externally-proposed majority-carries referendum to be tabled + /// immediately. If there is no externally-proposed referendum currently, or if there is one + /// but it is not a majority-carries referendum then it fails. + /// + /// The dispatch of this call must be `FastTrackOrigin`. + /// + /// - `proposal_hash`: The hash of the current external proposal. + /// - `voting_period`: The period that is allowed for voting on this proposal. Increased to + /// `FastTrackVotingPeriod` if too low. + /// - `delay`: The number of block after voting has ended in approval and this should be + /// enacted. This doesn't have a minimum amount. + /// + /// Emits `Started`. + /// + /// # + /// - One DB clear. + /// - One DB change. + /// - One extra DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(200_000_000)] + fn fast_track(origin, + proposal_hash: T::Hash, + voting_period: T::BlockNumber, + delay: T::BlockNumber, + ) { + // Rather complicated bit of code to ensure that either: + // - `voting_period` is at least `FastTrackVotingPeriod` and `origin` is `FastTrackOrigin`; or + // - `InstantAllowed` is `true` and `origin` is `InstantOrigin`. + let maybe_ensure_instant = if voting_period < T::FastTrackVotingPeriod::get() { + Some(origin) + } else { + if let Err(origin) = T::FastTrackOrigin::try_origin(origin) { + Some(origin) + } else { + None + } + }; + if let Some(ensure_instant) = maybe_ensure_instant { + T::InstantOrigin::ensure_origin(ensure_instant)?; + ensure!(T::InstantAllowed::get(), Error::::InstantNotAllowed); + } + + let (e_proposal_hash, threshold) = >::get() + .ok_or(Error::::ProposalMissing)?; + ensure!( + threshold != VoteThreshold::SuperMajorityApprove, + Error::::NotSimpleMajority, + ); + ensure!(proposal_hash == e_proposal_hash, Error::::InvalidHash); + + >::kill(); + let now = >::block_number(); + Self::inject_referendum(now + voting_period, proposal_hash, threshold, delay); + } + + /// Veto and blacklist the external proposal hash. + /// + /// The dispatch origin of this call must be `VetoOrigin`. + /// + /// - `proposal_hash`: The preimage hash of the proposal to veto and blacklist. + /// + /// Emits `Vetoed`. + /// + /// # + /// - Two DB entries. + /// - One DB clear. + /// - Performs a binary search on `existing_vetoers` which should not + /// be very large. + /// - O(log v), v is number of `existing_vetoers` + /// # + #[weight = SimpleDispatchInfo::FixedNormal(200_000_000)] + fn veto_external(origin, proposal_hash: T::Hash) { + let who = T::VetoOrigin::ensure_origin(origin)?; + + if let Some((e_proposal_hash, _)) = >::get() { + ensure!(proposal_hash == e_proposal_hash, Error::::ProposalMissing); + } else { + Err(Error::::NoProposal)?; + } + + let mut existing_vetoers = >::get(&proposal_hash) + .map(|pair| pair.1) + .unwrap_or_else(Vec::new); + let insert_position = existing_vetoers.binary_search(&who) + .err().ok_or(Error::::AlreadyVetoed)?; + + existing_vetoers.insert(insert_position, who.clone()); + let until = >::block_number() + T::CooloffPeriod::get(); + >::insert(&proposal_hash, (until, existing_vetoers)); + + Self::deposit_event(RawEvent::Vetoed(who, proposal_hash, until)); + >::kill(); + } + + /// Remove a referendum. + /// + /// The dispatch origin of this call must be _Root_. + /// + /// - `ref_index`: The index of the referendum to cancel. + /// + /// # + /// - `O(1)`. + /// # + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn cancel_referendum(origin, #[compact] ref_index: ReferendumIndex) { + ensure_root(origin)?; + Self::internal_cancel_referendum(ref_index); + } + + /// Cancel a proposal queued for enactment. + /// + /// The dispatch origin of this call must be _Root_. + /// + /// - `which`: The index of the referendum to cancel. + /// + /// # + /// - One DB change. + /// - O(d) where d is the items in the dispatch queue. + /// # + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn cancel_queued(origin, which: ReferendumIndex) { + ensure_root(origin)?; + T::Scheduler::cancel_named((DEMOCRACY_ID, which)) + .map_err(|_| Error::::ProposalMissing)?; + } + + fn on_initialize(n: T::BlockNumber) -> Weight { + if let Err(e) = Self::begin_block(n) { + sp_runtime::print(e); + } + + MINIMUM_WEIGHT + } + + /// Specify a proxy that is already open to us. Called by the stash. + /// + /// NOTE: Used to be called `set_proxy`. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `proxy`: The account that will be activated as proxy. + /// + /// # + /// - One extra DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn activate_proxy(origin, proxy: T::AccountId) { + let who = ensure_signed(origin)?; + Proxy::::try_mutate(&proxy, |a| match a.take() { + None => Err(Error::::NotOpen), + Some(ProxyState::Active(_)) => Err(Error::::AlreadyProxy), + Some(ProxyState::Open(x)) if &x == &who => { + *a = Some(ProxyState::Active(who)); + Ok(()) + } + Some(ProxyState::Open(_)) => Err(Error::::WrongOpen), + })?; + } + + /// Clear the proxy. Called by the proxy. + /// + /// NOTE: Used to be called `resign_proxy`. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// # + /// - One DB clear. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn close_proxy(origin) { + let who = ensure_signed(origin)?; + Proxy::::mutate(&who, |a| { + if a.is_some() { + system::Module::::dec_ref(&who); + } + *a = None; + }); + } + + /// Deactivate the proxy, but leave open to this account. Called by the stash. + /// + /// The proxy must already be active. + /// + /// NOTE: Used to be called `remove_proxy`. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `proxy`: The account that will be deactivated as proxy. + /// + /// # + /// - One DB clear. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn deactivate_proxy(origin, proxy: T::AccountId) { + let who = ensure_signed(origin)?; + Proxy::::try_mutate(&proxy, |a| match a.take() { + None | Some(ProxyState::Open(_)) => Err(Error::::NotActive), + Some(ProxyState::Active(x)) if &x == &who => { + *a = Some(ProxyState::Open(who)); + Ok(()) + } + Some(ProxyState::Active(_)) => Err(Error::::WrongProxy), + })?; + } + + /// Delegate the voting power (with some given conviction) of the sending account. + /// + /// The balance delegated is locked for as long as it's delegated, and thereafter for the + /// time appropriate for the conviction's lock period. + /// + /// The dispatch origin of this call must be _Signed_, and the signing account must either: + /// - be delegating already; or + /// - have no voting activity (if there is, then it will need to be removed/consolidated + /// through `reap_vote` or `unvote`). + /// + /// - `to`: The account whose voting the `target` account's voting power will follow. + /// - `conviction`: The conviction that will be attached to the delegated votes. When the + /// account is undelegated, the funds will be locked for the corresponding period. + /// - `balance`: The amount of the account's balance to be used in delegating. This must + /// not be more than the account's current balance. + /// + /// Emits `Delegated`. + /// + /// # + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + pub fn delegate(origin, to: T::AccountId, conviction: Conviction, balance: BalanceOf) { + let who = ensure_signed(origin)?; + Self::try_delegate(who, to, conviction, balance)?; + } + + /// Undelegate the voting power of the sending account. + /// + /// Tokens may be unlocked following once an amount of time consistent with the lock period + /// of the conviction with which the delegation was issued. + /// + /// The dispatch origin of this call must be _Signed_ and the signing account must be + /// currently delegating. + /// + /// Emits `Undelegated`. + /// + /// # + /// - O(1). + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn undelegate(origin) { + let who = ensure_signed(origin)?; + Self::try_undelegate(who)?; + } + + /// Clears all public proposals. + /// + /// The dispatch origin of this call must be _Root_. + /// + /// # + /// - `O(1)`. + /// - One DB clear. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn clear_public_proposals(origin) { + ensure_root(origin)?; + + >::kill(); + } + + /// Register the preimage for an upcoming proposal. This doesn't require the proposal to be + /// in the dispatch queue but does require a deposit, returned once enacted. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `encoded_proposal`: The preimage of a proposal. + /// + /// Emits `PreimageNoted`. + /// + /// # + /// - Dependent on the size of `encoded_proposal` but protected by a + /// required deposit. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn note_preimage(origin, encoded_proposal: Vec) { + let who = ensure_signed(origin)?; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + ensure!(!>::contains_key(&proposal_hash), Error::::DuplicatePreimage); + + let deposit = >::from(encoded_proposal.len() as u32) + .saturating_mul(T::PreimageByteDeposit::get()); + T::Currency::reserve(&who, deposit)?; + + let now = >::block_number(); + let a = PreimageStatus::Available { + data: encoded_proposal, + provider: who.clone(), + deposit, + since: now, + expiry: None, + }; + >::insert(proposal_hash, a); + + Self::deposit_event(RawEvent::PreimageNoted(proposal_hash, who, deposit)); + } + + /// Register the preimage for an upcoming proposal. This requires the proposal to be + /// in the dispatch queue. No deposit is needed. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `encoded_proposal`: The preimage of a proposal. + /// + /// Emits `PreimageNoted`. + /// + /// # + /// - Dependent on the size of `encoded_proposal` and length of dispatch queue. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn note_imminent_preimage(origin, encoded_proposal: Vec) { + let who = ensure_signed(origin)?; + let proposal_hash = T::Hashing::hash(&encoded_proposal[..]); + let status = Preimages::::get(&proposal_hash).ok_or(Error::::NotImminent)?; + let expiry = status.to_missing_expiry().ok_or(Error::::DuplicatePreimage)?; + + let now = >::block_number(); + let free = >::zero(); + let a = PreimageStatus::Available { + data: encoded_proposal, + provider: who.clone(), + deposit: Zero::zero(), + since: now, + expiry: Some(expiry), + }; + >::insert(proposal_hash, a); + + Self::deposit_event(RawEvent::PreimageNoted(proposal_hash, who, free)); + } + + /// Remove an expired proposal preimage and collect the deposit. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `proposal_hash`: The preimage hash of a proposal. + /// + /// This will only work after `VotingPeriod` blocks from the time that the preimage was + /// noted, if it's the same account doing it. If it's a different account, then it'll only + /// work an additional `EnactmentPeriod` later. + /// + /// Emits `PreimageReaped`. + /// + /// # + /// - One DB clear. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn reap_preimage(origin, proposal_hash: T::Hash) { + let who = ensure_signed(origin)?; + let (provider, deposit, since, expiry) = >::get(&proposal_hash) + .and_then(|m| match m { + PreimageStatus::Available { provider, deposit, since, expiry, .. } + => Some((provider, deposit, since, expiry)), + _ => None, + }).ok_or(Error::::PreimageMissing)?; + + let now = >::block_number(); + let (voting, enactment) = (T::VotingPeriod::get(), T::EnactmentPeriod::get()); + let additional = if who == provider { Zero::zero() } else { enactment }; + ensure!(now >= since + voting + additional, Error::::TooEarly); + ensure!(expiry.map_or(true, |e| now > e), Error::::Imminent); + + let _ = T::Currency::repatriate_reserved(&provider, &who, deposit, BalanceStatus::Free); + >::remove(&proposal_hash); + Self::deposit_event(RawEvent::PreimageReaped(proposal_hash, provider, deposit, who)); + } + + /// Unlock tokens that have an expired lock. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `target`: The account to remove the lock on. + /// + /// # + /// - `O(1)`. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn unlock(origin, target: T::AccountId) { + ensure_signed(origin)?; + Self::update_lock(&target); + } + + /// Become a proxy. + /// + /// This must be called prior to a later `activate_proxy`. + /// + /// Origin must be a Signed. + /// + /// - `target`: The account whose votes will later be proxied. + /// + /// `close_proxy` must be called before the account can be destroyed. + /// + /// # + /// - One extra DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn open_proxy(origin, target: T::AccountId) { + let who = ensure_signed(origin)?; + Proxy::::mutate(&who, |a| { + if a.is_none() { + system::Module::::inc_ref(&who); + } + *a = Some(ProxyState::Open(target)); + }); + } + + /// Remove a vote for a referendum. + /// + /// If: + /// - the referendum was cancelled, or + /// - the referendum is ongoing, or + /// - the referendum has ended such that + /// - the vote of the account was in opposition to the result; or + /// - there was no conviction to the account's vote; or + /// - the account made a split vote + /// ...then the vote is removed cleanly and a following call to `unlock` may result in more + /// funds being available. + /// + /// If, however, the referendum has ended and: + /// - it finished corresponding to the vote of the account, and + /// - the account made a standard vote with conviction, and + /// - the lock period of the conviction is not over + /// ...then the lock will be aggregated into the overall account's lock, which may involve + /// *overlocking* (where the two locks are combined into a single lock that is the maximum + /// of both the amount locked and the time is it locked for). + /// + /// The dispatch origin of this call must be _Signed_, and the signer must have a vote + /// registered for referendum `index`. + /// + /// - `index`: The index of referendum of the vote to be removed. + /// + /// # + /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn remove_vote(origin, index: ReferendumIndex) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::try_remove_vote(&who, index, UnvoteScope::Any) + } + + /// Remove a vote for a referendum. + /// + /// If the `target` is equal to the signer, then this function is exactly equivalent to + /// `remove_vote`. If not equal to the signer, then the vote must have expired, + /// either because the referendum was cancelled, because the voter lost the referendum or + /// because the conviction period is over. + /// + /// The dispatch origin of this call must be _Signed_. + /// + /// - `target`: The account of the vote to be removed; this account must have voted for + /// referendum `index`. + /// - `index`: The index of referendum of the vote to be removed. + /// + /// # + /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn remove_other_vote(origin, target: T::AccountId, index: ReferendumIndex) -> DispatchResult { + let who = ensure_signed(origin)?; + let scope = if target == who { UnvoteScope::Any } else { UnvoteScope::OnlyExpired }; + Self::try_remove_vote(&target, index, scope)?; + Ok(()) + } + + /// Delegate the voting power (with some given conviction) of a proxied account. + /// + /// The balance delegated is locked for as long as it's delegated, and thereafter for the + /// time appropriate for the conviction's lock period. + /// + /// The dispatch origin of this call must be _Signed_, and the signing account must have + /// been set as the proxy account for `target`. + /// + /// - `target`: The account whole voting power shall be delegated and whose balance locked. + /// This account must either: + /// - be delegating already; or + /// - have no voting activity (if there is, then it will need to be removed/consolidated + /// through `reap_vote` or `unvote`). + /// - `to`: The account whose voting the `target` account's voting power will follow. + /// - `conviction`: The conviction that will be attached to the delegated votes. When the + /// account is undelegated, the funds will be locked for the corresponding period. + /// - `balance`: The amount of the account's balance to be used in delegating. This must + /// not be more than the account's current balance. + /// + /// Emits `Delegated`. + /// + /// # + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + pub fn proxy_delegate(origin, + to: T::AccountId, + conviction: Conviction, + balance: BalanceOf, + ) { + let who = ensure_signed(origin)?; + let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; + Self::try_delegate(target, to, conviction, balance)?; + } + + /// Undelegate the voting power of a proxied account. + /// + /// Tokens may be unlocked following once an amount of time consistent with the lock period + /// of the conviction with which the delegation was issued. + /// + /// The dispatch origin of this call must be _Signed_ and the signing account must be a + /// proxy for some other account which is currently delegating. + /// + /// Emits `Undelegated`. + /// + /// # + /// - O(1). + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn proxy_undelegate(origin) { + let who = ensure_signed(origin)?; + let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; + Self::try_undelegate(target)?; + } + + /// Remove a proxied vote for a referendum. + /// + /// Exactly equivalent to `remove_vote` except that it operates on the account that the + /// sender is a proxy for. + /// + /// The dispatch origin of this call must be _Signed_ and the signing account must be a + /// proxy for some other account which has a registered vote for the referendum of `index`. + /// + /// - `index`: The index of referendum of the vote to be removed. + /// + /// # + /// - `O(R + log R)` where R is the number of referenda that `target` has voted on. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn proxy_remove_vote(origin, index: ReferendumIndex) -> DispatchResult { + let who = ensure_signed(origin)?; + let target = Self::proxy(who).and_then(|a| a.as_active()).ok_or(Error::::NotProxy)?; + Self::try_remove_vote(&target, index, UnvoteScope::Any) + } + + /// Enact a proposal from a referendum. For now we just make the weight be the maximum. + #[weight = SimpleDispatchInfo::MaxNormal] + fn enact_proposal(origin, proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { + ensure_root(origin)?; + Self::do_enact_proposal(proposal_hash, index) + } + } } impl Module { - fn migrate() { - use frame_support::{Twox64Concat, migration::{StorageKeyIterator, remove_storage_prefix}}; - remove_storage_prefix(b"Democracy", b"VotesOf", &[]); - remove_storage_prefix(b"Democracy", b"VotersFor", &[]); - remove_storage_prefix(b"Democracy", b"Delegations", &[]); - for (who, (end, proposal_hash, threshold, delay)) - in StorageKeyIterator::< - ReferendumIndex, - (T::BlockNumber, T::Hash, VoteThreshold, T::BlockNumber), - Twox64Concat, - >::new(b"Democracy", b"ReferendumInfoOf").drain() - { - let status = ReferendumStatus { - end, proposal_hash, threshold, delay, tally: Tally::default() - }; - ReferendumInfoOf::::insert(who, ReferendumInfo::Ongoing(status)) - } - } - - // exposed immutables. - - /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal - /// index. - pub fn backing_for(proposal: PropIndex) -> Option> { - Self::deposit_of(proposal).map(|(d, l)| d * (l.len() as u32).into()) - } - - /// Get all referenda ready for tally at block `n`. - pub fn maturing_referenda_at( - n: T::BlockNumber - ) -> Vec<(ReferendumIndex, ReferendumStatus>)> { - let next = Self::lowest_unbaked(); - let last = Self::referendum_count(); - (next..last).into_iter() - .map(|i| (i, Self::referendum_info(i))) - .filter_map(|(i, maybe_info)| match maybe_info { - Some(ReferendumInfo::Ongoing(status)) => Some((i, status)), - _ => None, - }) - .filter(|(_, status)| status.end == n) - .collect() - } - - // Exposed mutables. - - #[cfg(feature = "std")] - pub fn force_proxy(stash: T::AccountId, proxy: T::AccountId) { - Proxy::::mutate(&proxy, |o| { - if o.is_none() { - system::Module::::inc_ref(&proxy); - } - *o = Some(ProxyState::Active(stash)) - }) - } - - /// Start a referendum. - pub fn internal_start_referendum( - proposal_hash: T::Hash, - threshold: VoteThreshold, - delay: T::BlockNumber - ) -> ReferendumIndex { - >::inject_referendum( - >::block_number() + T::VotingPeriod::get(), - proposal_hash, - threshold, - delay - ) - } - - /// Remove a referendum. - pub fn internal_cancel_referendum(ref_index: ReferendumIndex) { - Self::deposit_event(RawEvent::Cancelled(ref_index)); - ReferendumInfoOf::::remove(ref_index); - } - - // private. - - /// Ok if the given referendum is active, Err otherwise - fn ensure_ongoing(r: ReferendumInfo>) - -> Result>, DispatchError> - { - match r { - ReferendumInfo::Ongoing(s) => Ok(s), - _ => Err(Error::::ReferendumInvalid.into()), - } - } - - fn referendum_status(ref_index: ReferendumIndex) - -> Result>, DispatchError> - { - let info = ReferendumInfoOf::::get(ref_index) - .ok_or(Error::::ReferendumInvalid)?; - Self::ensure_ongoing(info) - } - - /// Actually enact a vote, if legit. - fn try_vote(who: &T::AccountId, ref_index: ReferendumIndex, vote: AccountVote>) -> DispatchResult { - let mut status = Self::referendum_status(ref_index)?; - ensure!(vote.balance() <= T::Currency::free_balance(who), Error::::InsufficientFunds); - VotingOf::::try_mutate(who, |voting| -> DispatchResult { - if let Voting::Direct { ref mut votes, delegations, .. } = voting { - match votes.binary_search_by_key(&ref_index, |i| i.0) { - Ok(i) => { - // Shouldn't be possible to fail, but we handle it gracefully. - status.tally.remove(votes[i].1).ok_or(Error::::Underflow)?; - if let Some(approve) = votes[i].1.as_standard() { - status.tally.reduce(approve, *delegations); - } - votes[i].1 = vote; - } - Err(i) => votes.insert(i, (ref_index, vote)), - } - // Shouldn't be possible to fail, but we handle it gracefully. - status.tally.add(vote).ok_or(Error::::Overflow)?; - if let Some(approve) = vote.as_standard() { - status.tally.increase(approve, *delegations); - } - Ok(()) - } else { - Err(Error::::AlreadyDelegating.into()) - } - })?; - // Extend the lock to `balance` (rather than setting it) since we don't know what other - // votes are in place. - T::Currency::extend_lock( - DEMOCRACY_ID, - who, - vote.balance(), - WithdrawReason::Transfer.into() - ); - ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); - Ok(()) - } - - /// Remove the account's vote for the given referendum if possible. This is possible when: - /// - The referendum has not finished. - /// - The referendum has finished and the voter lost their direction. - /// - The referendum has finished and the voter's lock period is up. - /// - /// This will generally be combined with a call to `unlock`. - fn try_remove_vote(who: &T::AccountId, ref_index: ReferendumIndex, scope: UnvoteScope) -> DispatchResult { - let info = ReferendumInfoOf::::get(ref_index); - VotingOf::::try_mutate(who, |voting| -> DispatchResult { - if let Voting::Direct { ref mut votes, delegations, ref mut prior } = voting { - let i = votes.binary_search_by_key(&ref_index, |i| i.0).map_err(|_| Error::::NotVoter)?; - match info { - Some(ReferendumInfo::Ongoing(mut status)) => { - ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); - // Shouldn't be possible to fail, but we handle it gracefully. - status.tally.remove(votes[i].1).ok_or(Error::::Underflow)?; - if let Some(approve) = votes[i].1.as_standard() { - status.tally.reduce(approve, *delegations); - } - ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); - } - Some(ReferendumInfo::Finished{end, approved}) => - if let Some((lock_periods, balance)) = votes[i].1.locked_if(approved) { - let unlock_at = end + T::EnactmentPeriod::get() * lock_periods.into(); - let now = system::Module::::block_number(); - if now < unlock_at { - ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); - prior.accumulate(unlock_at, balance) - } - }, - None => {} // Referendum was cancelled. - } - votes.remove(i); - } - Ok(()) - })?; - Ok(()) - } - - fn increase_upstream_delegation(who: &T::AccountId, amount: Delegations>) { - VotingOf::::mutate(who, |voting| match voting { - Voting::Delegating { delegations, .. } => - // We don't support second level delegating, so we don't need to do anything more. - *delegations = delegations.saturating_add(amount), - Voting::Direct { votes, delegations, .. } => { - *delegations = delegations.saturating_add(amount); - for &(ref_index, account_vote) in votes.iter() { - if let AccountVote::Standard { vote, .. } = account_vote { - ReferendumInfoOf::::mutate(ref_index, |maybe_info| - if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { - status.tally.increase(vote.aye, amount); - } - ); - } - } - } - }) - } - - fn reduce_upstream_delegation(who: &T::AccountId, amount: Delegations>) { - VotingOf::::mutate(who, |voting| match voting { - Voting::Delegating { delegations, .. } => - // We don't support second level delegating, so we don't need to do anything more. - *delegations = delegations.saturating_sub(amount), - Voting::Direct { votes, delegations, .. } => { - *delegations = delegations.saturating_sub(amount); - for &(ref_index, account_vote) in votes.iter() { - if let AccountVote::Standard { vote, .. } = account_vote { - ReferendumInfoOf::::mutate(ref_index, |maybe_info| - if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { - status.tally.reduce(vote.aye, amount); - } - ); - } - } - } - }) - } - - /// Attempt to delegate `balance` times `conviction` of voting power from `who` to `target`. - fn try_delegate( - who: T::AccountId, - target: T::AccountId, - conviction: Conviction, - balance: BalanceOf, - ) -> DispatchResult { - ensure!(who != target, Error::::Nonsense); - ensure!(balance <= T::Currency::free_balance(&who), Error::::InsufficientFunds); - VotingOf::::try_mutate(&who, |voting| -> DispatchResult { - let mut old = Voting::Delegating { - balance, - target: target.clone(), - conviction, - delegations: Default::default(), - prior: Default::default(), - }; - sp_std::mem::swap(&mut old, voting); - match old { - Voting::Delegating { balance, target, conviction, delegations, prior, .. } => { - // remove any delegation votes to our current target. - Self::reduce_upstream_delegation(&target, conviction.votes(balance)); - voting.set_common(delegations, prior); - } - Voting::Direct { votes, delegations, prior } => { - // here we just ensure that we're currently idling with no votes recorded. - ensure!(votes.is_empty(), Error::::VotesExist); - voting.set_common(delegations, prior); - } - } - Self::increase_upstream_delegation(&target, conviction.votes(balance)); - // Extend the lock to `balance` (rather than setting it) since we don't know what other - // votes are in place. - T::Currency::extend_lock( - DEMOCRACY_ID, - &who, - balance, - WithdrawReason::Transfer.into() - ); - Ok(()) - })?; - Self::deposit_event(Event::::Delegated(who, target)); - Ok(()) - } - - /// Attempt to end the current delegation. - fn try_undelegate(who: T::AccountId) -> DispatchResult { - VotingOf::::try_mutate(&who, |voting| -> DispatchResult { - let mut old = Voting::default(); - sp_std::mem::swap(&mut old, voting); - match old { - Voting::Delegating { - balance, - target, - conviction, - delegations, - mut prior, - } => { - // remove any delegation votes to our current target. - Self::reduce_upstream_delegation(&target, conviction.votes(balance)); - let now = system::Module::::block_number(); - let lock_periods = conviction.lock_periods().into(); - prior.accumulate(now + T::EnactmentPeriod::get() * lock_periods, balance); - voting.set_common(delegations, prior); - } - Voting::Direct { .. } => { - return Err(Error::::NotDelegating.into()) - } - } - Ok(()) - })?; - Self::deposit_event(Event::::Undelegated(who)); - Ok(()) - } - - /// Rejig the lock on an account. It will never get more stringent (since that would indicate - /// a security hole) but may be reduced from what they are currently. - fn update_lock(who: &T::AccountId) { - let lock_needed = VotingOf::::mutate(who, |voting| { - voting.rejig(system::Module::::block_number()); - voting.locked_balance() - }); - if lock_needed.is_zero() { - T::Currency::remove_lock(DEMOCRACY_ID, who); - } else { - T::Currency::set_lock(DEMOCRACY_ID, who, lock_needed, WithdrawReason::Transfer.into()); - } - } - - /// Start a referendum - fn inject_referendum( - end: T::BlockNumber, - proposal_hash: T::Hash, - threshold: VoteThreshold, - delay: T::BlockNumber, - ) -> ReferendumIndex { - let ref_index = Self::referendum_count(); - ReferendumCount::put(ref_index + 1); - let status = ReferendumStatus { end, proposal_hash, threshold, delay, tally: Default::default() }; - let item = ReferendumInfo::Ongoing(status); - >::insert(ref_index, item); - Self::deposit_event(RawEvent::Started(ref_index, threshold)); - ref_index - } - - /// Table the next waiting proposal for a vote. - fn launch_next(now: T::BlockNumber) -> DispatchResult { - if LastTabledWasExternal::take() { - Self::launch_public(now).or_else(|_| Self::launch_external(now)) - } else { - Self::launch_external(now).or_else(|_| Self::launch_public(now)) - }.map_err(|_| Error::::NoneWaiting.into()) - } - - /// Table the waiting external proposal for a vote, if there is one. - fn launch_external(now: T::BlockNumber) -> DispatchResult { - if let Some((proposal, threshold)) = >::take() { - LastTabledWasExternal::put(true); - Self::deposit_event(RawEvent::ExternalTabled); - Self::inject_referendum( - now + T::VotingPeriod::get(), - proposal, - threshold, - T::EnactmentPeriod::get(), - ); - Ok(()) - } else { - Err(Error::::NoneWaiting)? - } - } - - /// Table the waiting public proposal with the highest backing for a vote. - fn launch_public(now: T::BlockNumber) -> DispatchResult { - let mut public_props = Self::public_props(); - if let Some((winner_index, _)) = public_props.iter() + fn migrate() { + use frame_support::{ + migration::{remove_storage_prefix, StorageKeyIterator}, + Twox64Concat, + }; + remove_storage_prefix(b"Democracy", b"VotesOf", &[]); + remove_storage_prefix(b"Democracy", b"VotersFor", &[]); + remove_storage_prefix(b"Democracy", b"Delegations", &[]); + for (who, (end, proposal_hash, threshold, delay)) in + StorageKeyIterator::< + ReferendumIndex, + (T::BlockNumber, T::Hash, VoteThreshold, T::BlockNumber), + Twox64Concat, + >::new(b"Democracy", b"ReferendumInfoOf") + .drain() + { + let status = ReferendumStatus { + end, + proposal_hash, + threshold, + delay, + tally: Tally::default(), + }; + ReferendumInfoOf::::insert(who, ReferendumInfo::Ongoing(status)) + } + } + + // exposed immutables. + + /// Get the amount locked in support of `proposal`; `None` if proposal isn't a valid proposal + /// index. + pub fn backing_for(proposal: PropIndex) -> Option> { + Self::deposit_of(proposal).map(|(d, l)| d * (l.len() as u32).into()) + } + + /// Get all referenda ready for tally at block `n`. + pub fn maturing_referenda_at( + n: T::BlockNumber, + ) -> Vec<( + ReferendumIndex, + ReferendumStatus>, + )> { + let next = Self::lowest_unbaked(); + let last = Self::referendum_count(); + (next..last) + .into_iter() + .map(|i| (i, Self::referendum_info(i))) + .filter_map(|(i, maybe_info)| match maybe_info { + Some(ReferendumInfo::Ongoing(status)) => Some((i, status)), + _ => None, + }) + .filter(|(_, status)| status.end == n) + .collect() + } + + // Exposed mutables. + + #[cfg(feature = "std")] + pub fn force_proxy(stash: T::AccountId, proxy: T::AccountId) { + Proxy::::mutate(&proxy, |o| { + if o.is_none() { + system::Module::::inc_ref(&proxy); + } + *o = Some(ProxyState::Active(stash)) + }) + } + + /// Start a referendum. + pub fn internal_start_referendum( + proposal_hash: T::Hash, + threshold: VoteThreshold, + delay: T::BlockNumber, + ) -> ReferendumIndex { + >::inject_referendum( + >::block_number() + T::VotingPeriod::get(), + proposal_hash, + threshold, + delay, + ) + } + + /// Remove a referendum. + pub fn internal_cancel_referendum(ref_index: ReferendumIndex) { + Self::deposit_event(RawEvent::Cancelled(ref_index)); + ReferendumInfoOf::::remove(ref_index); + } + + // private. + + /// Ok if the given referendum is active, Err otherwise + fn ensure_ongoing( + r: ReferendumInfo>, + ) -> Result>, DispatchError> { + match r { + ReferendumInfo::Ongoing(s) => Ok(s), + _ => Err(Error::::ReferendumInvalid.into()), + } + } + + fn referendum_status( + ref_index: ReferendumIndex, + ) -> Result>, DispatchError> { + let info = ReferendumInfoOf::::get(ref_index).ok_or(Error::::ReferendumInvalid)?; + Self::ensure_ongoing(info) + } + + /// Actually enact a vote, if legit. + fn try_vote( + who: &T::AccountId, + ref_index: ReferendumIndex, + vote: AccountVote>, + ) -> DispatchResult { + let mut status = Self::referendum_status(ref_index)?; + ensure!( + vote.balance() <= T::Currency::free_balance(who), + Error::::InsufficientFunds + ); + VotingOf::::try_mutate(who, |voting| -> DispatchResult { + if let Voting::Direct { + ref mut votes, + delegations, + .. + } = voting + { + match votes.binary_search_by_key(&ref_index, |i| i.0) { + Ok(i) => { + // Shouldn't be possible to fail, but we handle it gracefully. + status + .tally + .remove(votes[i].1) + .ok_or(Error::::Underflow)?; + if let Some(approve) = votes[i].1.as_standard() { + status.tally.reduce(approve, *delegations); + } + votes[i].1 = vote; + } + Err(i) => votes.insert(i, (ref_index, vote)), + } + // Shouldn't be possible to fail, but we handle it gracefully. + status.tally.add(vote).ok_or(Error::::Overflow)?; + if let Some(approve) = vote.as_standard() { + status.tally.increase(approve, *delegations); + } + Ok(()) + } else { + Err(Error::::AlreadyDelegating.into()) + } + })?; + // Extend the lock to `balance` (rather than setting it) since we don't know what other + // votes are in place. + T::Currency::extend_lock( + DEMOCRACY_ID, + who, + vote.balance(), + WithdrawReason::Transfer.into(), + ); + ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); + Ok(()) + } + + /// Remove the account's vote for the given referendum if possible. This is possible when: + /// - The referendum has not finished. + /// - The referendum has finished and the voter lost their direction. + /// - The referendum has finished and the voter's lock period is up. + /// + /// This will generally be combined with a call to `unlock`. + fn try_remove_vote( + who: &T::AccountId, + ref_index: ReferendumIndex, + scope: UnvoteScope, + ) -> DispatchResult { + let info = ReferendumInfoOf::::get(ref_index); + VotingOf::::try_mutate(who, |voting| -> DispatchResult { + if let Voting::Direct { + ref mut votes, + delegations, + ref mut prior, + } = voting + { + let i = votes + .binary_search_by_key(&ref_index, |i| i.0) + .map_err(|_| Error::::NotVoter)?; + match info { + Some(ReferendumInfo::Ongoing(mut status)) => { + ensure!(matches!(scope, UnvoteScope::Any), Error::::NoPermission); + // Shouldn't be possible to fail, but we handle it gracefully. + status + .tally + .remove(votes[i].1) + .ok_or(Error::::Underflow)?; + if let Some(approve) = votes[i].1.as_standard() { + status.tally.reduce(approve, *delegations); + } + ReferendumInfoOf::::insert(ref_index, ReferendumInfo::Ongoing(status)); + } + Some(ReferendumInfo::Finished { end, approved }) => { + if let Some((lock_periods, balance)) = votes[i].1.locked_if(approved) { + let unlock_at = end + T::EnactmentPeriod::get() * lock_periods.into(); + let now = system::Module::::block_number(); + if now < unlock_at { + ensure!( + matches!(scope, UnvoteScope::Any), + Error::::NoPermission + ); + prior.accumulate(unlock_at, balance) + } + } + } + None => {} // Referendum was cancelled. + } + votes.remove(i); + } + Ok(()) + })?; + Ok(()) + } + + fn increase_upstream_delegation(who: &T::AccountId, amount: Delegations>) { + VotingOf::::mutate(who, |voting| match voting { + Voting::Delegating { delegations, .. } => + // We don't support second level delegating, so we don't need to do anything more. + { + *delegations = delegations.saturating_add(amount) + } + Voting::Direct { + votes, delegations, .. + } => { + *delegations = delegations.saturating_add(amount); + for &(ref_index, account_vote) in votes.iter() { + if let AccountVote::Standard { vote, .. } = account_vote { + ReferendumInfoOf::::mutate(ref_index, |maybe_info| { + if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { + status.tally.increase(vote.aye, amount); + } + }); + } + } + } + }) + } + + fn reduce_upstream_delegation(who: &T::AccountId, amount: Delegations>) { + VotingOf::::mutate(who, |voting| match voting { + Voting::Delegating { delegations, .. } => + // We don't support second level delegating, so we don't need to do anything more. + { + *delegations = delegations.saturating_sub(amount) + } + Voting::Direct { + votes, delegations, .. + } => { + *delegations = delegations.saturating_sub(amount); + for &(ref_index, account_vote) in votes.iter() { + if let AccountVote::Standard { vote, .. } = account_vote { + ReferendumInfoOf::::mutate(ref_index, |maybe_info| { + if let Some(ReferendumInfo::Ongoing(ref mut status)) = maybe_info { + status.tally.reduce(vote.aye, amount); + } + }); + } + } + } + }) + } + + /// Attempt to delegate `balance` times `conviction` of voting power from `who` to `target`. + fn try_delegate( + who: T::AccountId, + target: T::AccountId, + conviction: Conviction, + balance: BalanceOf, + ) -> DispatchResult { + ensure!(who != target, Error::::Nonsense); + ensure!( + balance <= T::Currency::free_balance(&who), + Error::::InsufficientFunds + ); + VotingOf::::try_mutate(&who, |voting| -> DispatchResult { + let mut old = Voting::Delegating { + balance, + target: target.clone(), + conviction, + delegations: Default::default(), + prior: Default::default(), + }; + sp_std::mem::swap(&mut old, voting); + match old { + Voting::Delegating { + balance, + target, + conviction, + delegations, + prior, + .. + } => { + // remove any delegation votes to our current target. + Self::reduce_upstream_delegation(&target, conviction.votes(balance)); + voting.set_common(delegations, prior); + } + Voting::Direct { + votes, + delegations, + prior, + } => { + // here we just ensure that we're currently idling with no votes recorded. + ensure!(votes.is_empty(), Error::::VotesExist); + voting.set_common(delegations, prior); + } + } + Self::increase_upstream_delegation(&target, conviction.votes(balance)); + // Extend the lock to `balance` (rather than setting it) since we don't know what other + // votes are in place. + T::Currency::extend_lock(DEMOCRACY_ID, &who, balance, WithdrawReason::Transfer.into()); + Ok(()) + })?; + Self::deposit_event(Event::::Delegated(who, target)); + Ok(()) + } + + /// Attempt to end the current delegation. + fn try_undelegate(who: T::AccountId) -> DispatchResult { + VotingOf::::try_mutate(&who, |voting| -> DispatchResult { + let mut old = Voting::default(); + sp_std::mem::swap(&mut old, voting); + match old { + Voting::Delegating { + balance, + target, + conviction, + delegations, + mut prior, + } => { + // remove any delegation votes to our current target. + Self::reduce_upstream_delegation(&target, conviction.votes(balance)); + let now = system::Module::::block_number(); + let lock_periods = conviction.lock_periods().into(); + prior.accumulate(now + T::EnactmentPeriod::get() * lock_periods, balance); + voting.set_common(delegations, prior); + } + Voting::Direct { .. } => return Err(Error::::NotDelegating.into()), + } + Ok(()) + })?; + Self::deposit_event(Event::::Undelegated(who)); + Ok(()) + } + + /// Rejig the lock on an account. It will never get more stringent (since that would indicate + /// a security hole) but may be reduced from what they are currently. + fn update_lock(who: &T::AccountId) { + let lock_needed = VotingOf::::mutate(who, |voting| { + voting.rejig(system::Module::::block_number()); + voting.locked_balance() + }); + if lock_needed.is_zero() { + T::Currency::remove_lock(DEMOCRACY_ID, who); + } else { + T::Currency::set_lock( + DEMOCRACY_ID, + who, + lock_needed, + WithdrawReason::Transfer.into(), + ); + } + } + + /// Start a referendum + fn inject_referendum( + end: T::BlockNumber, + proposal_hash: T::Hash, + threshold: VoteThreshold, + delay: T::BlockNumber, + ) -> ReferendumIndex { + let ref_index = Self::referendum_count(); + ReferendumCount::put(ref_index + 1); + let status = ReferendumStatus { + end, + proposal_hash, + threshold, + delay, + tally: Default::default(), + }; + let item = ReferendumInfo::Ongoing(status); + >::insert(ref_index, item); + Self::deposit_event(RawEvent::Started(ref_index, threshold)); + ref_index + } + + /// Table the next waiting proposal for a vote. + fn launch_next(now: T::BlockNumber) -> DispatchResult { + if LastTabledWasExternal::take() { + Self::launch_public(now).or_else(|_| Self::launch_external(now)) + } else { + Self::launch_external(now).or_else(|_| Self::launch_public(now)) + } + .map_err(|_| Error::::NoneWaiting.into()) + } + + /// Table the waiting external proposal for a vote, if there is one. + fn launch_external(now: T::BlockNumber) -> DispatchResult { + if let Some((proposal, threshold)) = >::take() { + LastTabledWasExternal::put(true); + Self::deposit_event(RawEvent::ExternalTabled); + Self::inject_referendum( + now + T::VotingPeriod::get(), + proposal, + threshold, + T::EnactmentPeriod::get(), + ); + Ok(()) + } else { + Err(Error::::NoneWaiting)? + } + } + + /// Table the waiting public proposal with the highest backing for a vote. + fn launch_public(now: T::BlockNumber) -> DispatchResult { + let mut public_props = Self::public_props(); + if let Some((winner_index, _)) = public_props.iter() .enumerate() .max_by_key(|x| Self::backing_for((x.1).0).unwrap_or_else(Zero::zero) /* ^^ defensive only: All current public proposals have an amount locked*/) @@ -1640,81 +1713,96 @@ impl Module { } else { Err(Error::::NoneWaiting)? } - } - - fn do_enact_proposal(proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { - let preimage = >::take(&proposal_hash); - if let Some(PreimageStatus::Available { data, provider, deposit, .. }) = preimage { - if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { - let _ = T::Currency::unreserve(&provider, deposit); - Self::deposit_event(RawEvent::PreimageUsed(proposal_hash, provider, deposit)); - - let ok = proposal.dispatch(frame_system::RawOrigin::Root.into()).is_ok(); - Self::deposit_event(RawEvent::Executed(index, ok)); - - Ok(()) - } else { - T::Slash::on_unbalanced(T::Currency::slash_reserved(&provider, deposit).0); - Self::deposit_event(RawEvent::PreimageInvalid(proposal_hash, index)); - Err(Error::::PreimageInvalid.into()) - } - } else { - Self::deposit_event(RawEvent::PreimageMissing(proposal_hash, index)); - Err(Error::::PreimageMissing.into()) - } - } - - fn bake_referendum( - now: T::BlockNumber, - index: ReferendumIndex, - status: ReferendumStatus>, - ) -> Result { - let total_issuance = T::Currency::total_issuance(); - let approved = status.threshold.approved(status.tally, total_issuance); - - if approved { - Self::deposit_event(RawEvent::Passed(index)); - if status.delay.is_zero() { - let _ = Self::do_enact_proposal(status.proposal_hash, index); - } else { - let when = now + status.delay; - // Note that we need the preimage now. - Preimages::::mutate_exists(&status.proposal_hash, |maybe_pre| match *maybe_pre { - Some(PreimageStatus::Available { ref mut expiry, .. }) => *expiry = Some(when), - ref mut a => *a = Some(PreimageStatus::Missing(when)), - }); - - if T::Scheduler::schedule_named( - (DEMOCRACY_ID, index), - when, - None, - 63, - Call::enact_proposal(status.proposal_hash, index).into(), - ).is_err() { - frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); - } - } - } else { - Self::deposit_event(RawEvent::NotPassed(index)); - } - - Ok(approved) - } - - /// Current era is ending; we should finish up any proposals. - fn begin_block(now: T::BlockNumber) -> DispatchResult { - // pick out another public referendum if it's time. - if (now % T::LaunchPeriod::get()).is_zero() { - // Errors come from the queue being empty. we don't really care about that, and even if - // we did, there is nothing we can do here. - let _ = Self::launch_next(now); - } - - // tally up votes for any expiring referenda. - for (index, info) in Self::maturing_referenda_at(now).into_iter() { - let approved = Self::bake_referendum(now, index, info)?; - ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); - } - Ok(()) - } + } + + fn do_enact_proposal(proposal_hash: T::Hash, index: ReferendumIndex) -> DispatchResult { + let preimage = >::take(&proposal_hash); + if let Some(PreimageStatus::Available { + data, + provider, + deposit, + .. + }) = preimage + { + if let Ok(proposal) = T::Proposal::decode(&mut &data[..]) { + let _ = T::Currency::unreserve(&provider, deposit); + Self::deposit_event(RawEvent::PreimageUsed(proposal_hash, provider, deposit)); + + let ok = proposal + .dispatch(frame_system::RawOrigin::Root.into()) + .is_ok(); + Self::deposit_event(RawEvent::Executed(index, ok)); + + Ok(()) + } else { + T::Slash::on_unbalanced(T::Currency::slash_reserved(&provider, deposit).0); + Self::deposit_event(RawEvent::PreimageInvalid(proposal_hash, index)); + Err(Error::::PreimageInvalid.into()) + } + } else { + Self::deposit_event(RawEvent::PreimageMissing(proposal_hash, index)); + Err(Error::::PreimageMissing.into()) + } + } + + fn bake_referendum( + now: T::BlockNumber, + index: ReferendumIndex, + status: ReferendumStatus>, + ) -> Result { + let total_issuance = T::Currency::total_issuance(); + let approved = status.threshold.approved(status.tally, total_issuance); + + if approved { + Self::deposit_event(RawEvent::Passed(index)); + if status.delay.is_zero() { + let _ = Self::do_enact_proposal(status.proposal_hash, index); + } else { + let when = now + status.delay; + // Note that we need the preimage now. + Preimages::::mutate_exists( + &status.proposal_hash, + |maybe_pre| match *maybe_pre { + Some(PreimageStatus::Available { ref mut expiry, .. }) => { + *expiry = Some(when) + } + ref mut a => *a = Some(PreimageStatus::Missing(when)), + }, + ); + + if T::Scheduler::schedule_named( + (DEMOCRACY_ID, index), + when, + None, + 63, + Call::enact_proposal(status.proposal_hash, index).into(), + ) + .is_err() + { + frame_support::print("LOGIC ERROR: bake_referendum/schedule_named failed"); + } + } + } else { + Self::deposit_event(RawEvent::NotPassed(index)); + } + + Ok(approved) + } + + /// Current era is ending; we should finish up any proposals. + fn begin_block(now: T::BlockNumber) -> DispatchResult { + // pick out another public referendum if it's time. + if (now % T::LaunchPeriod::get()).is_zero() { + // Errors come from the queue being empty. we don't really care about that, and even if + // we did, there is nothing we can do here. + let _ = Self::launch_next(now); + } + + // tally up votes for any expiring referenda. + for (index, info) in Self::maturing_referenda_at(now).into_iter() { + let approved = Self::bake_referendum(now, index, info)?; + ReferendumInfoOf::::insert(index, ReferendumInfo::Finished { end: now, approved }); + } + Ok(()) + } } diff --git a/frame/democracy/src/tests.rs b/frame/democracy/src/tests.rs index 4d540f63d5..486408e8a1 100644 --- a/frame/democracy/src/tests.rs +++ b/frame/democracy/src/tests.rs @@ -17,19 +17,22 @@ //! The crate's tests. use super::*; -use std::cell::RefCell; use codec::Encode; use frame_support::{ - impl_outer_origin, impl_outer_dispatch, assert_noop, assert_ok, parameter_types, - ord_parameter_types, traits::{Contains, OnInitialize}, weights::Weight, + assert_noop, assert_ok, impl_outer_dispatch, impl_outer_origin, ord_parameter_types, + parameter_types, + traits::{Contains, OnInitialize}, + weights::Weight, }; +use frame_system::EnsureSignedBy; +use pallet_balances::{BalanceLock, Error as BalancesError}; use sp_core::H256; use sp_runtime::{ - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, - testing::Header, Perbill, + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, }; -use pallet_balances::{BalanceLock, Error as BalancesError}; -use frame_system::EnsureSignedBy; +use std::cell::RefCell; mod cancellation; mod delegation; @@ -42,138 +45,158 @@ mod public_proposals; mod scheduling; mod voting; -const AYE: Vote = Vote { aye: true, conviction: Conviction::None }; -const NAY: Vote = Vote { aye: false, conviction: Conviction::None }; -const BIG_AYE: Vote = Vote { aye: true, conviction: Conviction::Locked1x }; -const BIG_NAY: Vote = Vote { aye: false, conviction: Conviction::Locked1x }; +const AYE: Vote = Vote { + aye: true, + conviction: Conviction::None, +}; +const NAY: Vote = Vote { + aye: false, + conviction: Conviction::None, +}; +const BIG_AYE: Vote = Vote { + aye: true, + conviction: Conviction::Locked1x, +}; +const BIG_NAY: Vote = Vote { + aye: false, + conviction: Conviction::Locked1x, +}; impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_balances::Balances, - democracy::Democracy, - } + pub enum Call for Test where origin: Origin { + pallet_balances::Balances, + democracy::Democracy, + } } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = Call; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - pub const ExistentialDeposit: u64 = 1; - pub const MaximumWeight: u32 = 1000000; + pub const ExistentialDeposit: u64 = 1; + pub const MaximumWeight: u32 = 1000000; } impl pallet_scheduler::Trait for Test { - type Event = (); - type Origin = Origin; - type Call = Call; - type MaximumWeight = MaximumWeight; + type Event = (); + type Origin = Origin; + type Call = Call; + type MaximumWeight = MaximumWeight; } impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const LaunchPeriod: u64 = 2; - pub const VotingPeriod: u64 = 2; - pub const FastTrackVotingPeriod: u64 = 2; - pub const MinimumDeposit: u64 = 1; - pub const EnactmentPeriod: u64 = 2; - pub const CooloffPeriod: u64 = 2; + pub const LaunchPeriod: u64 = 2; + pub const VotingPeriod: u64 = 2; + pub const FastTrackVotingPeriod: u64 = 2; + pub const MinimumDeposit: u64 = 1; + pub const EnactmentPeriod: u64 = 2; + pub const CooloffPeriod: u64 = 2; } ord_parameter_types! { - pub const One: u64 = 1; - pub const Two: u64 = 2; - pub const Three: u64 = 3; - pub const Four: u64 = 4; - pub const Five: u64 = 5; - pub const Six: u64 = 6; + pub const One: u64 = 1; + pub const Two: u64 = 2; + pub const Three: u64 = 3; + pub const Four: u64 = 4; + pub const Five: u64 = 5; + pub const Six: u64 = 6; } pub struct OneToFive; impl Contains for OneToFive { - fn sorted_members() -> Vec { - vec![1, 2, 3, 4, 5] - } - #[cfg(feature = "runtime-benchmarks")] - fn add(_m: &u64) {} + fn sorted_members() -> Vec { + vec![1, 2, 3, 4, 5] + } + #[cfg(feature = "runtime-benchmarks")] + fn add(_m: &u64) {} } thread_local! { - static PREIMAGE_BYTE_DEPOSIT: RefCell = RefCell::new(0); - static INSTANT_ALLOWED: RefCell = RefCell::new(false); + static PREIMAGE_BYTE_DEPOSIT: RefCell = RefCell::new(0); + static INSTANT_ALLOWED: RefCell = RefCell::new(false); } pub struct PreimageByteDeposit; impl Get for PreimageByteDeposit { - fn get() -> u64 { PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow()) } + fn get() -> u64 { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow()) + } } pub struct InstantAllowed; impl Get for InstantAllowed { - fn get() -> bool { INSTANT_ALLOWED.with(|v| *v.borrow()) } + fn get() -> bool { + INSTANT_ALLOWED.with(|v| *v.borrow()) + } } impl super::Trait for Test { - type Proposal = Call; - type Event = (); - type Currency = pallet_balances::Module; - type EnactmentPeriod = EnactmentPeriod; - type LaunchPeriod = LaunchPeriod; - type VotingPeriod = VotingPeriod; - type FastTrackVotingPeriod = FastTrackVotingPeriod; - type MinimumDeposit = MinimumDeposit; - type ExternalOrigin = EnsureSignedBy; - type ExternalMajorityOrigin = EnsureSignedBy; - type ExternalDefaultOrigin = EnsureSignedBy; - type FastTrackOrigin = EnsureSignedBy; - type CancellationOrigin = EnsureSignedBy; - type VetoOrigin = EnsureSignedBy; - type CooloffPeriod = CooloffPeriod; - type PreimageByteDeposit = PreimageByteDeposit; - type Slash = (); - type InstantOrigin = EnsureSignedBy; - type InstantAllowed = InstantAllowed; - type Scheduler = Scheduler; + type Proposal = Call; + type Event = (); + type Currency = pallet_balances::Module; + type EnactmentPeriod = EnactmentPeriod; + type LaunchPeriod = LaunchPeriod; + type VotingPeriod = VotingPeriod; + type FastTrackVotingPeriod = FastTrackVotingPeriod; + type MinimumDeposit = MinimumDeposit; + type ExternalOrigin = EnsureSignedBy; + type ExternalMajorityOrigin = EnsureSignedBy; + type ExternalDefaultOrigin = EnsureSignedBy; + type FastTrackOrigin = EnsureSignedBy; + type CancellationOrigin = EnsureSignedBy; + type VetoOrigin = EnsureSignedBy; + type CooloffPeriod = CooloffPeriod; + type PreimageByteDeposit = PreimageByteDeposit; + type Slash = (); + type InstantOrigin = EnsureSignedBy; + type InstantAllowed = InstantAllowed; + type Scheduler = Scheduler; } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage(&mut t).unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisConfig::default().assimilate_storage(&mut t).unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } type System = frame_system::Module; @@ -183,83 +206,91 @@ type Democracy = Module; #[test] fn params_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(Democracy::referendum_count(), 0); - assert_eq!(Balances::free_balance(42), 0); - assert_eq!(Balances::total_issuance(), 210); - }); + new_test_ext().execute_with(|| { + assert_eq!(Democracy::referendum_count(), 0); + assert_eq!(Balances::free_balance(42), 0); + assert_eq!(Balances::total_issuance(), 210); + }); } fn set_balance_proposal(value: u64) -> Vec { - Call::Balances(pallet_balances::Call::set_balance(42, value, 0)).encode() + Call::Balances(pallet_balances::Call::set_balance(42, value, 0)).encode() } fn set_balance_proposal_hash(value: u64) -> H256 { - BlakeTwo256::hash(&set_balance_proposal(value)[..]) + BlakeTwo256::hash(&set_balance_proposal(value)[..]) } fn set_balance_proposal_hash_and_note(value: u64) -> H256 { - let p = set_balance_proposal(value); - let h = BlakeTwo256::hash(&p[..]); - match Democracy::note_preimage(Origin::signed(6), p) { - Ok(_) => (), - Err(x) if x == Error::::DuplicatePreimage.into() => (), - Err(x) => panic!(x), - } - h + let p = set_balance_proposal(value); + let h = BlakeTwo256::hash(&p[..]); + match Democracy::note_preimage(Origin::signed(6), p) { + Ok(_) => (), + Err(x) if x == Error::::DuplicatePreimage.into() => (), + Err(x) => panic!(x), + } + h } fn propose_set_balance(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose( - Origin::signed(who), - set_balance_proposal_hash(value), - delay - ) + Democracy::propose(Origin::signed(who), set_balance_proposal_hash(value), delay) } fn propose_set_balance_and_note(who: u64, value: u64, delay: u64) -> DispatchResult { - Democracy::propose( - Origin::signed(who), - set_balance_proposal_hash_and_note(value), - delay - ) + Democracy::propose( + Origin::signed(who), + set_balance_proposal_hash_and_note(value), + delay, + ) } fn next_block() { - System::set_block_number(System::block_number() + 1); - Scheduler::on_initialize(System::block_number()); - assert_eq!(Democracy::begin_block(System::block_number()), Ok(())); + System::set_block_number(System::block_number() + 1); + Scheduler::on_initialize(System::block_number()); + assert_eq!(Democracy::begin_block(System::block_number()), Ok(())); } fn fast_forward_to(n: u64) { - while System::block_number() < n { - next_block(); - } + while System::block_number() < n { + next_block(); + } } fn begin_referendum() -> ReferendumIndex { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - fast_forward_to(2); - 0 + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + fast_forward_to(2); + 0 } fn aye(who: u64) -> AccountVote { - AccountVote::Standard { vote: AYE, balance: Balances::free_balance(&who) } + AccountVote::Standard { + vote: AYE, + balance: Balances::free_balance(&who), + } } fn nay(who: u64) -> AccountVote { - AccountVote::Standard { vote: NAY, balance: Balances::free_balance(&who) } + AccountVote::Standard { + vote: NAY, + balance: Balances::free_balance(&who), + } } fn big_aye(who: u64) -> AccountVote { - AccountVote::Standard { vote: BIG_AYE, balance: Balances::free_balance(&who) } + AccountVote::Standard { + vote: BIG_AYE, + balance: Balances::free_balance(&who), + } } fn big_nay(who: u64) -> AccountVote { - AccountVote::Standard { vote: BIG_NAY, balance: Balances::free_balance(&who) } + AccountVote::Standard { + vote: BIG_NAY, + balance: Balances::free_balance(&who), + } } fn tally(r: ReferendumIndex) -> Tally { - Democracy::referendum_status(r).unwrap().tally + Democracy::referendum_status(r).unwrap().tally } diff --git a/frame/democracy/src/tests/cancellation.rs b/frame/democracy/src/tests/cancellation.rs index 998b0c14d8..a1cfbd293b 100644 --- a/frame/democracy/src/tests/cancellation.rs +++ b/frame/democracy/src/tests/cancellation.rs @@ -20,72 +20,75 @@ use super::*; #[test] fn cancel_referendum_should_work() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_ok!(Democracy::cancel_referendum(Origin::ROOT, r.into())); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::cancel_referendum(Origin::ROOT, r.into())); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); } #[test] fn cancel_queued_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); - // start of 2 => next referendum scheduled. - fast_forward_to(2); + // start of 2 => next referendum scheduled. + fast_forward_to(2); - assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); - fast_forward_to(4); + fast_forward_to(4); - assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); + assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); - assert_noop!(Democracy::cancel_queued(Origin::ROOT, 1), Error::::ProposalMissing); - assert_ok!(Democracy::cancel_queued(Origin::ROOT, 0)); - assert!(pallet_scheduler::Agenda::::get(6)[0].is_none()); - }); + assert_noop!( + Democracy::cancel_queued(Origin::ROOT, 1), + Error::::ProposalMissing + ); + assert_ok!(Democracy::cancel_queued(Origin::ROOT, 0)); + assert!(pallet_scheduler::Agenda::::get(6)[0].is_none()); + }); } #[test] fn emergency_cancel_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 2 - ); - assert!(Democracy::referendum_status(r).is_ok()); - - assert_noop!(Democracy::emergency_cancel(Origin::signed(3), r), BadOrigin); - assert_ok!(Democracy::emergency_cancel(Origin::signed(4), r)); - assert!(Democracy::referendum_info(r).is_none()); - - // some time later... - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 2 - ); - assert!(Democracy::referendum_status(r).is_ok()); - assert_noop!( - Democracy::emergency_cancel(Origin::signed(4), r), - Error::::AlreadyCanceled, - ); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 2, + ); + assert!(Democracy::referendum_status(r).is_ok()); + + assert_noop!(Democracy::emergency_cancel(Origin::signed(3), r), BadOrigin); + assert_ok!(Democracy::emergency_cancel(Origin::signed(4), r)); + assert!(Democracy::referendum_info(r).is_none()); + + // some time later... + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 2, + ); + assert!(Democracy::referendum_status(r).is_ok()); + assert_noop!( + Democracy::emergency_cancel(Origin::signed(4), r), + Error::::AlreadyCanceled, + ); + }); } diff --git a/frame/democracy/src/tests/delegation.rs b/frame/democracy/src/tests/delegation.rs index 061a48b587..002e4bf712 100644 --- a/frame/democracy/src/tests/delegation.rs +++ b/frame/democracy/src/tests/delegation.rs @@ -20,159 +20,333 @@ use super::*; #[test] fn single_proposal_should_work_with_delegation() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - - // Delegate first vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); - - // Delegate a second vote. - assert_ok!(Democracy::delegate(Origin::signed(3), 1, Conviction::None, 30)); - assert_eq!(tally(r), Tally { ayes: 6, nays: 0, turnout: 60 }); - - // Reduce first vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 10)); - assert_eq!(tally(r), Tally { ayes: 5, nays: 0, turnout: 50 }); - - // Second vote delegates to first; we don't do tiered delegation, so it doesn't get used. - assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::None, 30)); - assert_eq!(tally(r), Tally { ayes: 2, nays: 0, turnout: 20 }); - - // Main voter cancels their vote - assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); - assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); - - // First delegator delegates half funds with conviction; nothing changes yet. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked1x, 10)); - assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); - - // Main voter reinstates their vote - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_eq!(tally(r), Tally { ayes: 11, nays: 0, turnout: 20 }); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + fast_forward_to(2); + + // Delegate first vote. + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::None, + 20 + )); + let r = 0; + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_eq!( + tally(r), + Tally { + ayes: 3, + nays: 0, + turnout: 30 + } + ); + + // Delegate a second vote. + assert_ok!(Democracy::delegate( + Origin::signed(3), + 1, + Conviction::None, + 30 + )); + assert_eq!( + tally(r), + Tally { + ayes: 6, + nays: 0, + turnout: 60 + } + ); + + // Reduce first vote. + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::None, + 10 + )); + assert_eq!( + tally(r), + Tally { + ayes: 5, + nays: 0, + turnout: 50 + } + ); + + // Second vote delegates to first; we don't do tiered delegation, so it doesn't get used. + assert_ok!(Democracy::delegate( + Origin::signed(3), + 2, + Conviction::None, + 30 + )); + assert_eq!( + tally(r), + Tally { + ayes: 2, + nays: 0, + turnout: 20 + } + ); + + // Main voter cancels their vote + assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); + assert_eq!( + tally(r), + Tally { + ayes: 0, + nays: 0, + turnout: 0 + } + ); + + // First delegator delegates half funds with conviction; nothing changes yet. + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::Locked1x, + 10 + )); + assert_eq!( + tally(r), + Tally { + ayes: 0, + nays: 0, + turnout: 0 + } + ); + + // Main voter reinstates their vote + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_eq!( + tally(r), + Tally { + ayes: 11, + nays: 0, + turnout: 20 + } + ); + }); } #[test] fn self_delegation_not_allowed() { - new_test_ext().execute_with(|| { - assert_noop!( - Democracy::delegate(Origin::signed(1), 1, Conviction::None, 10), - Error::::Nonsense, - ); - }); + new_test_ext().execute_with(|| { + assert_noop!( + Democracy::delegate(Origin::signed(1), 1, Conviction::None, 10), + Error::::Nonsense, + ); + }); } #[test] fn cyclic_delegation_should_unwind() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - - // Check behavior with cycle. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); - assert_ok!(Democracy::delegate(Origin::signed(3), 2, Conviction::None, 30)); - assert_ok!(Democracy::delegate(Origin::signed(1), 3, Conviction::None, 10)); - let r = 0; - assert_ok!(Democracy::undelegate(Origin::signed(3))); - assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); - assert_ok!(Democracy::undelegate(Origin::signed(1))); - assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); - - // Delegated vote is counted. - assert_eq!(tally(r), Tally { ayes: 3, nays: 3, turnout: 60 }); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + fast_forward_to(2); + + // Check behavior with cycle. + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::None, + 20 + )); + assert_ok!(Democracy::delegate( + Origin::signed(3), + 2, + Conviction::None, + 30 + )); + assert_ok!(Democracy::delegate( + Origin::signed(1), + 3, + Conviction::None, + 10 + )); + let r = 0; + assert_ok!(Democracy::undelegate(Origin::signed(3))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); + assert_ok!(Democracy::undelegate(Origin::signed(1))); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); + + // Delegated vote is counted. + assert_eq!( + tally(r), + Tally { + ayes: 3, + nays: 3, + turnout: 60 + } + ); + }); } #[test] fn single_proposal_should_work_with_vote_and_delegation() { - // If transactor already voted, delegated vote is overwritten. - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_ok!(Democracy::vote(Origin::signed(2), r, nay(2))); - assert_eq!(tally(r), Tally { ayes: 1, nays: 2, turnout: 30 }); - - // Delegate vote. - assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); - // Delegated vote replaces the explicit vote. - assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); - }); + // If transactor already voted, delegated vote is overwritten. + new_test_ext().execute_with(|| { + System::set_block_number(0); + + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + fast_forward_to(2); + + let r = 0; + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, nay(2))); + assert_eq!( + tally(r), + Tally { + ayes: 1, + nays: 2, + turnout: 30 + } + ); + + // Delegate vote. + assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::None, + 20 + )); + // Delegated vote replaces the explicit vote. + assert_eq!( + tally(r), + Tally { + ayes: 3, + nays: 0, + turnout: 30 + } + ); + }); } #[test] fn single_proposal_should_work_with_undelegation() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - // Delegate and undelegate vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); - assert_ok!(Democracy::undelegate(Origin::signed(2))); - - fast_forward_to(2); - let r = 0; - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - - // Delegated vote is not counted. - assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + // Delegate and undelegate vote. + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::None, + 20 + )); + assert_ok!(Democracy::undelegate(Origin::signed(2))); + + fast_forward_to(2); + let r = 0; + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + // Delegated vote is not counted. + assert_eq!( + tally(r), + Tally { + ayes: 1, + nays: 0, + turnout: 10 + } + ); + }); } #[test] fn single_proposal_should_work_with_delegation_and_vote() { - // If transactor voted, delegated vote is overwritten. - new_test_ext().execute_with(|| { - let r = begin_referendum(); - // Delegate, undelegate and vote. - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::None, 20)); - assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); - assert_ok!(Democracy::undelegate(Origin::signed(2))); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); - // Delegated vote is not counted. - assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); - }); + // If transactor voted, delegated vote is overwritten. + new_test_ext().execute_with(|| { + let r = begin_referendum(); + // Delegate, undelegate and vote. + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::None, + 20 + )); + assert_eq!( + tally(r), + Tally { + ayes: 3, + nays: 0, + turnout: 30 + } + ); + assert_ok!(Democracy::undelegate(Origin::signed(2))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); + // Delegated vote is not counted. + assert_eq!( + tally(r), + Tally { + ayes: 3, + nays: 0, + turnout: 30 + } + ); + }); } #[test] fn conviction_should_be_honored_in_delegation() { - // If transactor voted, delegated vote is overwritten. - new_test_ext().execute_with(|| { - let r = begin_referendum(); - // Delegate, undelegate and vote. - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - // Delegated vote is huge. - assert_eq!(tally(r), Tally { ayes: 121, nays: 0, turnout: 30 }); - }); + // If transactor voted, delegated vote is overwritten. + new_test_ext().execute_with(|| { + let r = begin_referendum(); + // Delegate, undelegate and vote. + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::Locked6x, + 20 + )); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + // Delegated vote is huge. + assert_eq!( + tally(r), + Tally { + ayes: 121, + nays: 0, + turnout: 30 + } + ); + }); } #[test] fn split_vote_delegation_should_be_ignored() { - // If transactor voted, delegated vote is overwritten. - new_test_ext().execute_with(|| { - let r = begin_referendum(); - assert_ok!(Democracy::delegate(Origin::signed(2), 1, Conviction::Locked6x, 20)); - assert_ok!(Democracy::vote(Origin::signed(1), r, AccountVote::Split { aye: 10, nay: 0 })); - // Delegated vote is huge. - assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); - }); + // If transactor voted, delegated vote is overwritten. + new_test_ext().execute_with(|| { + let r = begin_referendum(); + assert_ok!(Democracy::delegate( + Origin::signed(2), + 1, + Conviction::Locked6x, + 20 + )); + assert_ok!(Democracy::vote( + Origin::signed(1), + r, + AccountVote::Split { aye: 10, nay: 0 } + )); + // Delegated vote is huge. + assert_eq!( + tally(r), + Tally { + ayes: 1, + nays: 0, + turnout: 10 + } + ); + }); } diff --git a/frame/democracy/src/tests/external_proposing.rs b/frame/democracy/src/tests/external_proposing.rs index a249a806ee..854c568694 100644 --- a/frame/democracy/src/tests/external_proposing.rs +++ b/frame/democracy/src/tests/external_proposing.rs @@ -20,270 +20,296 @@ use super::*; #[test] fn veto_external_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2), - )); - assert!(>::exists()); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); + assert!(>::exists()); - let h = set_balance_proposal_hash_and_note(2); - assert_ok!(Democracy::veto_external(Origin::signed(3), h.clone())); - // cancelled. - assert!(!>::exists()); - // fails - same proposal can't be resubmitted. - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); + let h = set_balance_proposal_hash_and_note(2); + assert_ok!(Democracy::veto_external(Origin::signed(3), h.clone())); + // cancelled. + assert!(!>::exists()); + // fails - same proposal can't be resubmitted. + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); - fast_forward_to(1); - // fails as we're still in cooloff period. - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); + fast_forward_to(1); + // fails as we're still in cooloff period. + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); - fast_forward_to(2); - // works; as we're out of the cooloff period. - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2), - )); - assert!(>::exists()); + fast_forward_to(2); + // works; as we're out of the cooloff period. + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); + assert!(>::exists()); - // 3 can't veto the same thing twice. - assert_noop!( - Democracy::veto_external(Origin::signed(3), h.clone()), - Error::::AlreadyVetoed - ); + // 3 can't veto the same thing twice. + assert_noop!( + Democracy::veto_external(Origin::signed(3), h.clone()), + Error::::AlreadyVetoed + ); - // 4 vetoes. - assert_ok!(Democracy::veto_external(Origin::signed(4), h.clone())); - // cancelled again. - assert!(!>::exists()); + // 4 vetoes. + assert_ok!(Democracy::veto_external(Origin::signed(4), h.clone())); + // cancelled again. + assert!(!>::exists()); - fast_forward_to(3); - // same proposal fails as we're still in cooloff - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(2), - ), Error::::ProposalBlacklisted); - // different proposal works fine. - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(3), - )); - }); + fast_forward_to(3); + // same proposal fails as we're still in cooloff + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(2),), + Error::::ProposalBlacklisted + ); + // different proposal works fine. + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(3), + )); + }); } #[test] fn external_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_noop!( - Democracy::external_propose( - Origin::signed(1), - set_balance_proposal_hash(2), - ), - BadOrigin, - ); - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2), - )); - assert_noop!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash(1), - ), Error::::DuplicateProposal); - fast_forward_to(2); - assert_eq!( - Democracy::referendum_status(0), - Ok(ReferendumStatus { - end: 4, - proposal_hash: set_balance_proposal_hash(2), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_noop!( + Democracy::external_propose(Origin::signed(1), set_balance_proposal_hash(2),), + BadOrigin, + ); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2), + )); + assert_noop!( + Democracy::external_propose(Origin::signed(2), set_balance_proposal_hash(1),), + Error::::DuplicateProposal + ); + fast_forward_to(2); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash(2), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + }); } #[test] fn external_majority_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_noop!( - Democracy::external_propose_majority( - Origin::signed(1), - set_balance_proposal_hash(2) - ), - BadOrigin, - ); - assert_ok!(Democracy::external_propose_majority( - Origin::signed(3), - set_balance_proposal_hash_and_note(2) - )); - fast_forward_to(2); - assert_eq!( - Democracy::referendum_status(0), - Ok(ReferendumStatus { - end: 4, - proposal_hash: set_balance_proposal_hash(2), - threshold: VoteThreshold::SimpleMajority, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_noop!( + Democracy::external_propose_majority(Origin::signed(1), set_balance_proposal_hash(2)), + BadOrigin, + ); + assert_ok!(Democracy::external_propose_majority( + Origin::signed(3), + set_balance_proposal_hash_and_note(2) + )); + fast_forward_to(2); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash(2), + threshold: VoteThreshold::SimpleMajority, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + }); } #[test] fn external_default_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_noop!( - Democracy::external_propose_default( - Origin::signed(3), - set_balance_proposal_hash(2) - ), - BadOrigin, - ); - assert_ok!(Democracy::external_propose_default( - Origin::signed(1), - set_balance_proposal_hash_and_note(2) - )); - fast_forward_to(2); - assert_eq!( - Democracy::referendum_status(0), - Ok(ReferendumStatus { - end: 4, - proposal_hash: set_balance_proposal_hash(2), - threshold: VoteThreshold::SuperMajorityAgainst, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_noop!( + Democracy::external_propose_default(Origin::signed(3), set_balance_proposal_hash(2)), + BadOrigin, + ); + assert_ok!(Democracy::external_propose_default( + Origin::signed(1), + set_balance_proposal_hash_and_note(2) + )); + fast_forward_to(2); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash(2), + threshold: VoteThreshold::SuperMajorityAgainst, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + }); } - #[test] fn external_and_public_interleaving_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(1), - )); - assert_ok!(propose_set_balance_and_note(6, 2, 2)); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(1), + )); + assert_ok!(propose_set_balance_and_note(6, 2, 2)); - fast_forward_to(2); + fast_forward_to(2); - // both waiting: external goes first. - assert_eq!( - Democracy::referendum_status(0), - Ok(ReferendumStatus { - end: 4, - proposal_hash: set_balance_proposal_hash_and_note(1), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - // replenish external - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(3), - )); + // both waiting: external goes first. + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash_and_note(1), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + // replenish external + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(3), + )); - fast_forward_to(4); + fast_forward_to(4); - // both waiting: public goes next. - assert_eq!( - Democracy::referendum_status(1), - Ok(ReferendumStatus { - end: 6, - proposal_hash: set_balance_proposal_hash_and_note(2), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - // don't replenish public + // both waiting: public goes next. + assert_eq!( + Democracy::referendum_status(1), + Ok(ReferendumStatus { + end: 6, + proposal_hash: set_balance_proposal_hash_and_note(2), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + // don't replenish public - fast_forward_to(6); + fast_forward_to(6); - // it's external "turn" again, though since public is empty that doesn't really matter - assert_eq!( - Democracy::referendum_status(2), - Ok(ReferendumStatus { - end: 8, - proposal_hash: set_balance_proposal_hash_and_note(3), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - // replenish external - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(5), - )); + // it's external "turn" again, though since public is empty that doesn't really matter + assert_eq!( + Democracy::referendum_status(2), + Ok(ReferendumStatus { + end: 8, + proposal_hash: set_balance_proposal_hash_and_note(3), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + // replenish external + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(5), + )); - fast_forward_to(8); + fast_forward_to(8); - // external goes again because there's no public waiting. - assert_eq!( - Democracy::referendum_status(3), - Ok(ReferendumStatus { - end: 10, - proposal_hash: set_balance_proposal_hash_and_note(5), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - // replenish both - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(7), - )); - assert_ok!(propose_set_balance_and_note(6, 4, 2)); + // external goes again because there's no public waiting. + assert_eq!( + Democracy::referendum_status(3), + Ok(ReferendumStatus { + end: 10, + proposal_hash: set_balance_proposal_hash_and_note(5), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + // replenish both + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(7), + )); + assert_ok!(propose_set_balance_and_note(6, 4, 2)); - fast_forward_to(10); + fast_forward_to(10); - // public goes now since external went last time. - assert_eq!( - Democracy::referendum_status(4), - Ok(ReferendumStatus { - end: 12, - proposal_hash: set_balance_proposal_hash_and_note(4), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - // replenish public again - assert_ok!(propose_set_balance_and_note(6, 6, 2)); - // cancel external - let h = set_balance_proposal_hash_and_note(7); - assert_ok!(Democracy::veto_external(Origin::signed(3), h)); + // public goes now since external went last time. + assert_eq!( + Democracy::referendum_status(4), + Ok(ReferendumStatus { + end: 12, + proposal_hash: set_balance_proposal_hash_and_note(4), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + // replenish public again + assert_ok!(propose_set_balance_and_note(6, 6, 2)); + // cancel external + let h = set_balance_proposal_hash_and_note(7); + assert_ok!(Democracy::veto_external(Origin::signed(3), h)); - fast_forward_to(12); + fast_forward_to(12); - // public goes again now since there's no external waiting. - assert_eq!( - Democracy::referendum_status(5), - Ok(ReferendumStatus { - end: 14, - proposal_hash: set_balance_proposal_hash_and_note(6), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - }); + // public goes again now since there's no external waiting. + assert_eq!( + Democracy::referendum_status(5), + Ok(ReferendumStatus { + end: 14, + proposal_hash: set_balance_proposal_hash_and_note(6), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + }); } diff --git a/frame/democracy/src/tests/fast_tracking.rs b/frame/democracy/src/tests/fast_tracking.rs index 5ce9b15baf..97bd51361b 100644 --- a/frame/democracy/src/tests/fast_tracking.rs +++ b/frame/democracy/src/tests/fast_tracking.rs @@ -20,69 +20,86 @@ use super::*; #[test] fn fast_track_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); - assert_ok!(Democracy::external_propose_majority( - Origin::signed(3), - set_balance_proposal_hash_and_note(2) - )); - assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); - assert_ok!(Democracy::fast_track(Origin::signed(5), h, 2, 0)); - assert_eq!( - Democracy::referendum_status(0), - Ok(ReferendumStatus { - end: 2, - proposal_hash: set_balance_proposal_hash_and_note(2), - threshold: VoteThreshold::SimpleMajority, - delay: 0, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + let h = set_balance_proposal_hash_and_note(2); + assert_noop!( + Democracy::fast_track(Origin::signed(5), h, 3, 2), + Error::::ProposalMissing + ); + assert_ok!(Democracy::external_propose_majority( + Origin::signed(3), + set_balance_proposal_hash_and_note(2) + )); + assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); + assert_ok!(Democracy::fast_track(Origin::signed(5), h, 2, 0)); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 2, + proposal_hash: set_balance_proposal_hash_and_note(2), + threshold: VoteThreshold::SimpleMajority, + delay: 0, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + }); } #[test] fn instant_referendum_works() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 3, 2), Error::::ProposalMissing); - assert_ok!(Democracy::external_propose_majority( - Origin::signed(3), - set_balance_proposal_hash_and_note(2) - )); - assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); - assert_noop!(Democracy::fast_track(Origin::signed(5), h, 1, 0), BadOrigin); - assert_noop!(Democracy::fast_track(Origin::signed(6), h, 1, 0), Error::::InstantNotAllowed); - INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); - assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); - assert_eq!( - Democracy::referendum_status(0), - Ok(ReferendumStatus { - end: 1, - proposal_hash: set_balance_proposal_hash_and_note(2), - threshold: VoteThreshold::SimpleMajority, - delay: 0, - tally: Tally { ayes: 0, nays: 0, turnout: 0 }, - }) - ); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + let h = set_balance_proposal_hash_and_note(2); + assert_noop!( + Democracy::fast_track(Origin::signed(5), h, 3, 2), + Error::::ProposalMissing + ); + assert_ok!(Democracy::external_propose_majority( + Origin::signed(3), + set_balance_proposal_hash_and_note(2) + )); + assert_noop!(Democracy::fast_track(Origin::signed(1), h, 3, 2), BadOrigin); + assert_noop!(Democracy::fast_track(Origin::signed(5), h, 1, 0), BadOrigin); + assert_noop!( + Democracy::fast_track(Origin::signed(6), h, 1, 0), + Error::::InstantNotAllowed + ); + INSTANT_ALLOWED.with(|v| *v.borrow_mut() = true); + assert_ok!(Democracy::fast_track(Origin::signed(6), h, 1, 0)); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 1, + proposal_hash: set_balance_proposal_hash_and_note(2), + threshold: VoteThreshold::SimpleMajority, + delay: 0, + tally: Tally { + ayes: 0, + nays: 0, + turnout: 0 + }, + }) + ); + }); } #[test] fn fast_track_referendum_fails_when_no_simple_majority() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let h = set_balance_proposal_hash_and_note(2); - assert_ok!(Democracy::external_propose( - Origin::signed(2), - set_balance_proposal_hash_and_note(2) - )); - assert_noop!( - Democracy::fast_track(Origin::signed(5), h, 3, 2), - Error::::NotSimpleMajority - ); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + let h = set_balance_proposal_hash_and_note(2); + assert_ok!(Democracy::external_propose( + Origin::signed(2), + set_balance_proposal_hash_and_note(2) + )); + assert_noop!( + Democracy::fast_track(Origin::signed(5), h, 3, 2), + Error::::NotSimpleMajority + ); + }); } diff --git a/frame/democracy/src/tests/lock_voting.rs b/frame/democracy/src/tests/lock_voting.rs index e83d974a8d..cb8c231fa6 100644 --- a/frame/democracy/src/tests/lock_voting.rs +++ b/frame/democracy/src/tests/lock_voting.rs @@ -20,344 +20,396 @@ use super::*; use std::convert::TryFrom; fn aye(x: u8, balance: u64) -> AccountVote { - AccountVote::Standard { - vote: Vote { aye: true, conviction: Conviction::try_from(x).unwrap() }, - balance - } + AccountVote::Standard { + vote: Vote { + aye: true, + conviction: Conviction::try_from(x).unwrap(), + }, + balance, + } } fn nay(x: u8, balance: u64) -> AccountVote { - AccountVote::Standard { - vote: Vote { aye: false, conviction: Conviction::try_from(x).unwrap() }, - balance - } + AccountVote::Standard { + vote: Vote { + aye: false, + conviction: Conviction::try_from(x).unwrap(), + }, + balance, + } } fn the_lock(amount: u64) -> BalanceLock { - BalanceLock { - id: DEMOCRACY_ID, - amount, - reasons: pallet_balances::Reasons::Misc, - } + BalanceLock { + id: DEMOCRACY_ID, + amount, + reasons: pallet_balances::Reasons::Misc, + } } #[test] fn lock_voting_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); - assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); - assert_ok!(Democracy::vote(Origin::signed(4), r, aye(2, 40))); - assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); - assert_eq!(tally(r), Tally { ayes: 250, nays: 100, turnout: 150 }); - - // All balances are currently locked. - for i in 1..=5 { - assert_eq!(Balances::locks(i), vec![the_lock(i * 10)]); - } - - fast_forward_to(2); - - // Referendum passed; 1 and 5 didn't get their way and can now reap and unlock. - assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); - assert_ok!(Democracy::unlock(Origin::signed(1), 1)); - // Anyone can reap and unlock anyone else's in this context. - assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 5, r)); - assert_ok!(Democracy::unlock(Origin::signed(2), 5)); - - // 2, 3, 4 got their way with the vote, so they cannot be reaped by others. - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 2, r), Error::::NoPermission); - // However, they can be unvoted by the owner, though it will make no difference to the lock. - assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); - assert_ok!(Democracy::unlock(Origin::signed(2), 2)); - - assert_eq!(Balances::locks(1), vec![]); - assert_eq!(Balances::locks(2), vec![the_lock(20)]); - assert_eq!(Balances::locks(3), vec![the_lock(30)]); - assert_eq!(Balances::locks(4), vec![the_lock(40)]); - assert_eq!(Balances::locks(5), vec![]); - assert_eq!(Balances::free_balance(42), 2); - - - fast_forward_to(5); - // No change yet... - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 4, r), Error::::NoPermission); - assert_ok!(Democracy::unlock(Origin::signed(1), 4)); - assert_eq!(Balances::locks(4), vec![the_lock(40)]); - fast_forward_to(6); - // 4 should now be able to reap and unlock - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 4, r)); - assert_ok!(Democracy::unlock(Origin::signed(1), 4)); - assert_eq!(Balances::locks(4), vec![]); - - fast_forward_to(9); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 3, r), Error::::NoPermission); - assert_ok!(Democracy::unlock(Origin::signed(1), 3)); - assert_eq!(Balances::locks(3), vec![the_lock(30)]); - fast_forward_to(10); - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 3, r)); - assert_ok!(Democracy::unlock(Origin::signed(1), 3)); - assert_eq!(Balances::locks(3), vec![]); - - // 2 doesn't need to reap_vote here because it was already done before. - fast_forward_to(17); - assert_ok!(Democracy::unlock(Origin::signed(1), 2)); - assert_eq!(Balances::locks(2), vec![the_lock(20)]); - fast_forward_to(18); - assert_ok!(Democracy::unlock(Origin::signed(1), 2)); - assert_eq!(Balances::locks(2), vec![]); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); + assert_ok!(Democracy::vote(Origin::signed(4), r, aye(2, 40))); + assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); + assert_eq!( + tally(r), + Tally { + ayes: 250, + nays: 100, + turnout: 150 + } + ); + + // All balances are currently locked. + for i in 1..=5 { + assert_eq!(Balances::locks(i), vec![the_lock(i * 10)]); + } + + fast_forward_to(2); + + // Referendum passed; 1 and 5 didn't get their way and can now reap and unlock. + assert_ok!(Democracy::remove_vote(Origin::signed(1), r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 1)); + // Anyone can reap and unlock anyone else's in this context. + assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 5, r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 5)); + + // 2, 3, 4 got their way with the vote, so they cannot be reaped by others. + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 2, r), + Error::::NoPermission + ); + // However, they can be unvoted by the owner, though it will make no difference to the lock. + assert_ok!(Democracy::remove_vote(Origin::signed(2), r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 2)); + + assert_eq!(Balances::locks(1), vec![]); + assert_eq!(Balances::locks(2), vec![the_lock(20)]); + assert_eq!(Balances::locks(3), vec![the_lock(30)]); + assert_eq!(Balances::locks(4), vec![the_lock(40)]); + assert_eq!(Balances::locks(5), vec![]); + assert_eq!(Balances::free_balance(42), 2); + + fast_forward_to(5); + // No change yet... + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 4, r), + Error::::NoPermission + ); + assert_ok!(Democracy::unlock(Origin::signed(1), 4)); + assert_eq!(Balances::locks(4), vec![the_lock(40)]); + fast_forward_to(6); + // 4 should now be able to reap and unlock + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 4, r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 4)); + assert_eq!(Balances::locks(4), vec![]); + + fast_forward_to(9); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 3, r), + Error::::NoPermission + ); + assert_ok!(Democracy::unlock(Origin::signed(1), 3)); + assert_eq!(Balances::locks(3), vec![the_lock(30)]); + fast_forward_to(10); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 3, r)); + assert_ok!(Democracy::unlock(Origin::signed(1), 3)); + assert_eq!(Balances::locks(3), vec![]); + + // 2 doesn't need to reap_vote here because it was already done before. + fast_forward_to(17); + assert_ok!(Democracy::unlock(Origin::signed(1), 2)); + assert_eq!(Balances::locks(2), vec![the_lock(20)]); + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(1), 2)); + assert_eq!(Balances::locks(2), vec![]); + }); } #[test] fn no_locks_without_conviction_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0, - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(0, 10))); - - fast_forward_to(2); - - assert_eq!(Balances::free_balance(42), 2); - assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 1, r)); - assert_ok!(Democracy::unlock(Origin::signed(2), 1)); - assert_eq!(Balances::locks(1), vec![]); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(0, 10))); + + fast_forward_to(2); + + assert_eq!(Balances::free_balance(42), 2); + assert_ok!(Democracy::remove_other_vote(Origin::signed(2), 1, r)); + assert_ok!(Democracy::unlock(Origin::signed(2), 1)); + assert_eq!(Balances::locks(1), vec![]); + }); } #[test] fn lock_voting_should_work_with_delegation() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); - assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); - assert_ok!(Democracy::delegate(Origin::signed(4), 2, Conviction::Locked2x, 40)); - assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); - - assert_eq!(tally(r), Tally { ayes: 250, nays: 100, turnout: 150 }); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(5, 10))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(4, 20))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3, 30))); + assert_ok!(Democracy::delegate( + Origin::signed(4), + 2, + Conviction::Locked2x, + 40 + )); + assert_ok!(Democracy::vote(Origin::signed(5), r, nay(1, 50))); + + assert_eq!( + tally(r), + Tally { + ayes: 250, + nays: 100, + turnout: 150 + } + ); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 2); + }); } fn setup_three_referenda() -> (u32, u32, u32) { - System::set_block_number(0); - let r1 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(5), r1, aye(4, 10))); - - let r2 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(5), r2, aye(3, 20))); - - let r3 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(5), r3, aye(2, 50))); - - fast_forward_to(2); - - (r1, r2, r3) + System::set_block_number(0); + let r1 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(5), r1, aye(4, 10))); + + let r2 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(5), r2, aye(3, 20))); + + let r3 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(5), r3, aye(2, 50))); + + fast_forward_to(2); + + (r1, r2, r3) } #[test] fn prior_lockvotes_should_be_enforced() { - new_test_ext().execute_with(|| { - let r = setup_three_referenda(); - // r.0 locked 10 until #18. - // r.1 locked 20 until #10. - // r.2 locked 50 until #6. - - fast_forward_to(5); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2), Error::::NoPermission); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(50)]); - fast_forward_to(6); - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(20)]); - fast_forward_to(9); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1), Error::::NoPermission); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(20)]); - fast_forward_to(10); - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(10)]); - fast_forward_to(17); - assert_noop!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0), Error::::NoPermission); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(10)]); - fast_forward_to(18); - assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![]); - }); + new_test_ext().execute_with(|| { + let r = setup_three_referenda(); + // r.0 locked 10 until #18. + // r.1 locked 20 until #10. + // r.2 locked 50 until #6. + + fast_forward_to(5); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.2), + Error::::NoPermission + ); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(50)]); + fast_forward_to(6); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.2)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(20)]); + fast_forward_to(9); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.1), + Error::::NoPermission + ); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(20)]); + fast_forward_to(10); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.1)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(10)]); + fast_forward_to(17); + assert_noop!( + Democracy::remove_other_vote(Origin::signed(1), 5, r.0), + Error::::NoPermission + ); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(10)]); + fast_forward_to(18); + assert_ok!(Democracy::remove_other_vote(Origin::signed(1), 5, r.0)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); } #[test] fn single_consolidation_of_lockvotes_should_work_as_before() { - new_test_ext().execute_with(|| { - let r = setup_three_referenda(); - // r.0 locked 10 until #18. - // r.1 locked 20 until #10. - // r.2 locked 50 until #6. - - fast_forward_to(5); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(50)]); - fast_forward_to(6); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(20)]); - - fast_forward_to(9); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(20)]); - fast_forward_to(10); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(10)]); - - fast_forward_to(17); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![the_lock(10)]); - fast_forward_to(18); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![]); - }); + new_test_ext().execute_with(|| { + let r = setup_three_referenda(); + // r.0 locked 10 until #18. + // r.1 locked 20 until #10. + // r.2 locked 50 until #6. + + fast_forward_to(5); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(50)]); + fast_forward_to(6); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(20)]); + + fast_forward_to(9); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(20)]); + fast_forward_to(10); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(10)]); + + fast_forward_to(17); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![the_lock(10)]); + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); } #[test] fn multi_consolidation_of_lockvotes_should_be_conservative() { - new_test_ext().execute_with(|| { - let r = setup_three_referenda(); - // r.0 locked 10 until #18. - // r.1 locked 20 until #10. - // r.2 locked 50 until #6. - - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); - - fast_forward_to(6); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert!(Balances::locks(5)[0].amount >= 20); - - fast_forward_to(10); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert!(Balances::locks(5)[0].amount >= 10); - - fast_forward_to(18); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![]); - }); + new_test_ext().execute_with(|| { + let r = setup_three_referenda(); + // r.0 locked 10 until #18. + // r.1 locked 20 until #10. + // r.2 locked 50 until #6. + + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + + fast_forward_to(6); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 20); + + fast_forward_to(10); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 10); + + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); } #[test] fn locks_should_persist_from_voting_to_delegation() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SimpleMajority, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(5), r, aye(4, 10))); - fast_forward_to(2); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); - // locked 10 until #18. - - assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked3x, 20)); - // locked 20. - assert!(Balances::locks(5)[0].amount == 20); - - assert_ok!(Democracy::undelegate(Origin::signed(5))); - // locked 20 until #10 - - fast_forward_to(9); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert!(Balances::locks(5)[0].amount == 20); - - fast_forward_to(10); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert!(Balances::locks(5)[0].amount >= 10); - - fast_forward_to(17); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert!(Balances::locks(5)[0].amount >= 10); - - fast_forward_to(18); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![]); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SimpleMajority, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(5), r, aye(4, 10))); + fast_forward_to(2); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); + // locked 10 until #18. + + assert_ok!(Democracy::delegate( + Origin::signed(5), + 1, + Conviction::Locked3x, + 20 + )); + // locked 20. + assert!(Balances::locks(5)[0].amount == 20); + + assert_ok!(Democracy::undelegate(Origin::signed(5))); + // locked 20 until #10 + + fast_forward_to(9); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount == 20); + + fast_forward_to(10); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 10); + + fast_forward_to(17); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 10); + + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); } #[test] fn locks_should_persist_from_delegation_to_voting() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(Democracy::delegate(Origin::signed(5), 1, Conviction::Locked5x, 5)); - assert_ok!(Democracy::undelegate(Origin::signed(5))); - // locked 5 until #32 - - let r = setup_three_referenda(); - // r.0 locked 10 until #18. - // r.1 locked 20 until #10. - // r.2 locked 50 until #6. - - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); - - fast_forward_to(6); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert!(Balances::locks(5)[0].amount >= 20); - - fast_forward_to(10); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert!(Balances::locks(5)[0].amount >= 10); - - fast_forward_to(18); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert!(Balances::locks(5)[0].amount >= 5); - - fast_forward_to(32); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![]); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(Democracy::delegate( + Origin::signed(5), + 1, + Conviction::Locked5x, + 5 + )); + assert_ok!(Democracy::undelegate(Origin::signed(5))); + // locked 5 until #32 + + let r = setup_three_referenda(); + // r.0 locked 10 until #18. + // r.1 locked 20 until #10. + // r.2 locked 50 until #6. + + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.2)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.1)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r.0)); + + fast_forward_to(6); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 20); + + fast_forward_to(10); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 10); + + fast_forward_to(18); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert!(Balances::locks(5)[0].amount >= 5); + + fast_forward_to(32); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); } diff --git a/frame/democracy/src/tests/preimage.rs b/frame/democracy/src/tests/preimage.rs index 7d977b0ba8..ac0d52f340 100644 --- a/frame/democracy/src/tests/preimage.rs +++ b/frame/democracy/src/tests/preimage.rs @@ -20,138 +20,156 @@ use super::*; #[test] fn missing_preimage_should_fail() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); } #[test] fn preimage_deposit_should_be_required_and_returned() { - new_test_ext().execute_with(|| { - // fee of 100 is too much. - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); - assert_noop!( - Democracy::note_preimage(Origin::signed(6), vec![0; 500]), - BalancesError::::InsufficientBalance, - ); - // fee of 1 is reasonable. - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - next_block(); - - assert_eq!(Balances::reserved_balance(6), 0); - assert_eq!(Balances::free_balance(6), 60); - assert_eq!(Balances::free_balance(42), 2); - }); + new_test_ext().execute_with(|| { + // fee of 100 is too much. + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 100); + assert_noop!( + Democracy::note_preimage(Origin::signed(6), vec![0; 500]), + BalancesError::::InsufficientBalance, + ); + // fee of 1 is reasonable. + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + next_block(); + + assert_eq!(Balances::reserved_balance(6), 0); + assert_eq!(Balances::free_balance(6), 60); + assert_eq!(Balances::free_balance(42), 2); + }); } #[test] fn preimage_deposit_should_be_reapable_earlier_by_owner() { - new_test_ext().execute_with(|| { - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!(Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2))); - - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - assert_noop!( - Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2)), - Error::::TooEarly - ); - next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2))); - - assert_eq!(Balances::free_balance(6), 60); - assert_eq!(Balances::reserved_balance(6), 0); - }); + new_test_ext().execute_with(|| { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + assert_ok!(Democracy::note_preimage( + Origin::signed(6), + set_balance_proposal(2) + )); + + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + assert_noop!( + Democracy::reap_preimage(Origin::signed(6), set_balance_proposal_hash(2)), + Error::::TooEarly + ); + next_block(); + assert_ok!(Democracy::reap_preimage( + Origin::signed(6), + set_balance_proposal_hash(2) + )); + + assert_eq!(Balances::free_balance(6), 60); + assert_eq!(Balances::reserved_balance(6), 0); + }); } #[test] fn preimage_deposit_should_be_reapable() { - new_test_ext().execute_with(|| { - assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2)), - Error::::PreimageMissing - ); - - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - assert_ok!(Democracy::note_preimage(Origin::signed(6), set_balance_proposal(2))); - assert_eq!(Balances::reserved_balance(6), 12); - - next_block(); - next_block(); - next_block(); - assert_noop!( - Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2)), - Error::::TooEarly - ); - - next_block(); - assert_ok!(Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2))); - assert_eq!(Balances::reserved_balance(6), 0); - assert_eq!(Balances::free_balance(6), 48); - assert_eq!(Balances::free_balance(5), 62); - }); + new_test_ext().execute_with(|| { + assert_noop!( + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2)), + Error::::PreimageMissing + ); + + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + assert_ok!(Democracy::note_preimage( + Origin::signed(6), + set_balance_proposal(2) + )); + assert_eq!(Balances::reserved_balance(6), 12); + + next_block(); + next_block(); + next_block(); + assert_noop!( + Democracy::reap_preimage(Origin::signed(5), set_balance_proposal_hash(2)), + Error::::TooEarly + ); + + next_block(); + assert_ok!(Democracy::reap_preimage( + Origin::signed(5), + set_balance_proposal_hash(2) + )); + assert_eq!(Balances::reserved_balance(6), 0); + assert_eq!(Balances::free_balance(6), 48); + assert_eq!(Balances::free_balance(5), 62); + }); } #[test] fn noting_imminent_preimage_for_free_should_work() { - new_test_ext().execute_with(|| { - PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash(2), - VoteThreshold::SuperMajorityApprove, - 1 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - - assert_noop!( - Democracy::note_imminent_preimage(Origin::signed(7), set_balance_proposal(2)), - Error::::NotImminent - ); - - next_block(); - - // Now we're in the dispatch queue it's all good. - assert_ok!(Democracy::note_imminent_preimage(Origin::signed(7), set_balance_proposal(2))); - - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); + new_test_ext().execute_with(|| { + PREIMAGE_BYTE_DEPOSIT.with(|v| *v.borrow_mut() = 1); + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash(2), + VoteThreshold::SuperMajorityApprove, + 1, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + assert_noop!( + Democracy::note_imminent_preimage(Origin::signed(7), set_balance_proposal(2)), + Error::::NotImminent + ); + + next_block(); + + // Now we're in the dispatch queue it's all good. + assert_ok!(Democracy::note_imminent_preimage( + Origin::signed(7), + set_balance_proposal(2) + )); + + next_block(); + + assert_eq!(Balances::free_balance(42), 2); + }); } #[test] fn reaping_imminent_preimage_should_fail() { - new_test_ext().execute_with(|| { - let h = set_balance_proposal_hash_and_note(2); - let r = Democracy::inject_referendum(3, h, VoteThreshold::SuperMajorityApprove, 1); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - next_block(); - next_block(); - assert_noop!(Democracy::reap_preimage(Origin::signed(6), h), Error::::Imminent); - }); + new_test_ext().execute_with(|| { + let h = set_balance_proposal_hash_and_note(2); + let r = Democracy::inject_referendum(3, h, VoteThreshold::SuperMajorityApprove, 1); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + next_block(); + next_block(); + assert_noop!( + Democracy::reap_preimage(Origin::signed(6), h), + Error::::Imminent + ); + }); } diff --git a/frame/democracy/src/tests/proxying.rs b/frame/democracy/src/tests/proxying.rs index 412adf6be0..941c714e36 100644 --- a/frame/democracy/src/tests/proxying.rs +++ b/frame/democracy/src/tests/proxying.rs @@ -20,85 +20,129 @@ use super::*; #[test] fn proxy_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(Democracy::proxy(10), None); - assert!(System::allow_death(&10)); - - assert_noop!(Democracy::activate_proxy(Origin::signed(1), 10), Error::::NotOpen); - - assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); - assert!(!System::allow_death(&10)); - assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); - - assert_noop!(Democracy::activate_proxy(Origin::signed(2), 10), Error::::WrongOpen); - assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); - assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); - - // Can't set when already set. - assert_noop!(Democracy::activate_proxy(Origin::signed(2), 10), Error::::AlreadyProxy); - - // But this works because 11 isn't proxying. - assert_ok!(Democracy::open_proxy(Origin::signed(11), 2)); - assert_ok!(Democracy::activate_proxy(Origin::signed(2), 11)); - assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); - assert_eq!(Democracy::proxy(11), Some(ProxyState::Active(2))); - - // 2 cannot fire 1's proxy: - assert_noop!(Democracy::deactivate_proxy(Origin::signed(2), 10), Error::::WrongProxy); - - // 1 deactivates their proxy: - assert_ok!(Democracy::deactivate_proxy(Origin::signed(1), 10)); - assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); - // but the proxy account cannot be killed until the proxy is closed. - assert!(!System::allow_death(&10)); - - // and then 10 closes it completely: - assert_ok!(Democracy::close_proxy(Origin::signed(10))); - assert_eq!(Democracy::proxy(10), None); - assert!(System::allow_death(&10)); - - // 11 just closes without 2's "permission". - assert_ok!(Democracy::close_proxy(Origin::signed(11))); - assert_eq!(Democracy::proxy(11), None); - assert!(System::allow_death(&11)); - }); + new_test_ext().execute_with(|| { + assert_eq!(Democracy::proxy(10), None); + assert!(System::allow_death(&10)); + + assert_noop!( + Democracy::activate_proxy(Origin::signed(1), 10), + Error::::NotOpen + ); + + assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); + assert!(!System::allow_death(&10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); + + assert_noop!( + Democracy::activate_proxy(Origin::signed(2), 10), + Error::::WrongOpen + ); + assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); + + // Can't set when already set. + assert_noop!( + Democracy::activate_proxy(Origin::signed(2), 10), + Error::::AlreadyProxy + ); + + // But this works because 11 isn't proxying. + assert_ok!(Democracy::open_proxy(Origin::signed(11), 2)); + assert_ok!(Democracy::activate_proxy(Origin::signed(2), 11)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Active(1))); + assert_eq!(Democracy::proxy(11), Some(ProxyState::Active(2))); + + // 2 cannot fire 1's proxy: + assert_noop!( + Democracy::deactivate_proxy(Origin::signed(2), 10), + Error::::WrongProxy + ); + + // 1 deactivates their proxy: + assert_ok!(Democracy::deactivate_proxy(Origin::signed(1), 10)); + assert_eq!(Democracy::proxy(10), Some(ProxyState::Open(1))); + // but the proxy account cannot be killed until the proxy is closed. + assert!(!System::allow_death(&10)); + + // and then 10 closes it completely: + assert_ok!(Democracy::close_proxy(Origin::signed(10))); + assert_eq!(Democracy::proxy(10), None); + assert!(System::allow_death(&10)); + + // 11 just closes without 2's "permission". + assert_ok!(Democracy::close_proxy(Origin::signed(11))); + assert_eq!(Democracy::proxy(11), None); + assert!(System::allow_death(&11)); + }); } #[test] fn voting_and_removing_votes_should_work_with_proxy() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - - fast_forward_to(2); - let r = 0; - assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); - assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); - - assert_ok!(Democracy::proxy_vote(Origin::signed(10), r, aye(1))); - assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); - - assert_ok!(Democracy::proxy_remove_vote(Origin::signed(10), r)); - assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + + fast_forward_to(2); + let r = 0; + assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); + assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); + + assert_ok!(Democracy::proxy_vote(Origin::signed(10), r, aye(1))); + assert_eq!( + tally(r), + Tally { + ayes: 1, + nays: 0, + turnout: 10 + } + ); + + assert_ok!(Democracy::proxy_remove_vote(Origin::signed(10), r)); + assert_eq!( + tally(r), + Tally { + ayes: 0, + nays: 0, + turnout: 0 + } + ); + }); } #[test] fn delegation_and_undelegation_should_work_with_proxy() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - fast_forward_to(2); - let r = 0; - assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); - assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); - - assert_ok!(Democracy::proxy_delegate(Origin::signed(10), 2, Conviction::None, 10)); - assert_eq!(tally(r), Tally { ayes: 3, nays: 0, turnout: 30 }); - - assert_ok!(Democracy::proxy_undelegate(Origin::signed(10))); - assert_eq!(tally(r), Tally { ayes: 2, nays: 0, turnout: 20 }); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + fast_forward_to(2); + let r = 0; + assert_ok!(Democracy::open_proxy(Origin::signed(10), 1)); + assert_ok!(Democracy::activate_proxy(Origin::signed(1), 10)); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); + + assert_ok!(Democracy::proxy_delegate( + Origin::signed(10), + 2, + Conviction::None, + 10 + )); + assert_eq!( + tally(r), + Tally { + ayes: 3, + nays: 0, + turnout: 30 + } + ); + + assert_ok!(Democracy::proxy_undelegate(Origin::signed(10))); + assert_eq!( + tally(r), + Tally { + ayes: 2, + nays: 0, + turnout: 20 + } + ); + }); } - diff --git a/frame/democracy/src/tests/public_proposals.rs b/frame/democracy/src/tests/public_proposals.rs index 04246e86f1..cab5593178 100644 --- a/frame/democracy/src/tests/public_proposals.rs +++ b/frame/democracy/src/tests/public_proposals.rs @@ -20,79 +20,85 @@ use super::*; #[test] fn backing_for_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_ok!(propose_set_balance_and_note(1, 3, 3)); - assert_eq!(Democracy::backing_for(0), Some(2)); - assert_eq!(Democracy::backing_for(1), Some(4)); - assert_eq!(Democracy::backing_for(2), Some(3)); - }); + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(1, 2, 2)); + assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance_and_note(1, 3, 3)); + assert_eq!(Democracy::backing_for(0), Some(2)); + assert_eq!(Democracy::backing_for(1), Some(4)); + assert_eq!(Democracy::backing_for(2), Some(3)); + }); } #[test] fn deposit_for_proposals_should_be_taken() { - new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::free_balance(2), 15); - assert_eq!(Balances::free_balance(5), 35); - }); + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(1, 2, 5)); + assert_ok!(Democracy::second(Origin::signed(2), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + assert_eq!(Balances::free_balance(5), 35); + }); } #[test] fn deposit_for_proposals_should_be_returned() { - new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(1, 2, 5)); - assert_ok!(Democracy::second(Origin::signed(2), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - assert_ok!(Democracy::second(Origin::signed(5), 0)); - fast_forward_to(3); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 20); - assert_eq!(Balances::free_balance(5), 50); - }); + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(1, 2, 5)); + assert_ok!(Democracy::second(Origin::signed(2), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + assert_ok!(Democracy::second(Origin::signed(5), 0)); + fast_forward_to(3); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 20); + assert_eq!(Balances::free_balance(5), 50); + }); } #[test] fn proposal_with_deposit_below_minimum_should_not_work() { - new_test_ext().execute_with(|| { - assert_noop!(propose_set_balance(1, 2, 0), Error::::ValueLow); - }); + new_test_ext().execute_with(|| { + assert_noop!(propose_set_balance(1, 2, 0), Error::::ValueLow); + }); } #[test] fn poor_proposer_should_not_work() { - new_test_ext().execute_with(|| { - assert_noop!(propose_set_balance(1, 2, 11), BalancesError::::InsufficientBalance); - }); + new_test_ext().execute_with(|| { + assert_noop!( + propose_set_balance(1, 2, 11), + BalancesError::::InsufficientBalance + ); + }); } #[test] fn poor_seconder_should_not_work() { - new_test_ext().execute_with(|| { - assert_ok!(propose_set_balance_and_note(2, 2, 11)); - assert_noop!(Democracy::second(Origin::signed(1), 0), BalancesError::::InsufficientBalance); - }); + new_test_ext().execute_with(|| { + assert_ok!(propose_set_balance_and_note(2, 2, 11)); + assert_noop!( + Democracy::second(Origin::signed(1), 0), + BalancesError::::InsufficientBalance + ); + }); } #[test] fn runners_up_should_come_after() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 2)); - assert_ok!(propose_set_balance_and_note(1, 4, 4)); - assert_ok!(propose_set_balance_and_note(1, 3, 3)); - fast_forward_to(2); - assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); - fast_forward_to(4); - assert_ok!(Democracy::vote(Origin::signed(1), 1, aye(1))); - fast_forward_to(6); - assert_ok!(Democracy::vote(Origin::signed(1), 2, aye(1))); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 2)); + assert_ok!(propose_set_balance_and_note(1, 4, 4)); + assert_ok!(propose_set_balance_and_note(1, 3, 3)); + fast_forward_to(2); + assert_ok!(Democracy::vote(Origin::signed(1), 0, aye(1))); + fast_forward_to(4); + assert_ok!(Democracy::vote(Origin::signed(1), 1, aye(1))); + fast_forward_to(6); + assert_ok!(Democracy::vote(Origin::signed(1), 2, aye(1))); + }); } diff --git a/frame/democracy/src/tests/scheduling.rs b/frame/democracy/src/tests/scheduling.rs index db9724dedd..d766feef41 100644 --- a/frame/democracy/src/tests/scheduling.rs +++ b/frame/democracy/src/tests/scheduling.rs @@ -20,92 +20,127 @@ use super::*; #[test] fn simple_passing_should_work() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_eq!(tally(r), Tally { ayes: 1, nays: 0, turnout: 10 }); - next_block(); - next_block(); - assert_eq!(Balances::free_balance(42), 2); - }); + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_eq!( + tally(r), + Tally { + ayes: 1, + nays: 0, + turnout: 10 + } + ); + next_block(); + next_block(); + assert_eq!(Balances::free_balance(42), 2); + }); } #[test] fn simple_failing_should_work() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); - assert_eq!(tally(r), Tally { ayes: 0, nays: 1, turnout: 10 }); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, nay(1))); + assert_eq!( + tally(r), + Tally { + ayes: 0, + nays: 1, + turnout: 10 + } + ); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); } #[test] fn ooo_inject_referendums_should_work() { - new_test_ext().execute_with(|| { - let r1 = Democracy::inject_referendum( - 3, - set_balance_proposal_hash_and_note(3), - VoteThreshold::SuperMajorityApprove, - 0 - ); - let r2 = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - - assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); - assert_eq!(tally(r2), Tally { ayes: 1, nays: 0, turnout: 10 }); - - next_block(); - assert_eq!(Balances::free_balance(42), 2); - - assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); - assert_eq!(tally(r1), Tally { ayes: 1, nays: 0, turnout: 10 }); - - next_block(); - assert_eq!(Balances::free_balance(42), 3); - }); + new_test_ext().execute_with(|| { + let r1 = Democracy::inject_referendum( + 3, + set_balance_proposal_hash_and_note(3), + VoteThreshold::SuperMajorityApprove, + 0, + ); + let r2 = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + + assert_ok!(Democracy::vote(Origin::signed(1), r2, aye(1))); + assert_eq!( + tally(r2), + Tally { + ayes: 1, + nays: 0, + turnout: 10 + } + ); + + next_block(); + assert_eq!(Balances::free_balance(42), 2); + + assert_ok!(Democracy::vote(Origin::signed(1), r1, aye(1))); + assert_eq!( + tally(r1), + Tally { + ayes: 1, + nays: 0, + turnout: 10 + } + ); + + next_block(); + assert_eq!(Balances::free_balance(42), 3); + }); } #[test] fn delayed_enactment_should_work() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 1 - ); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); - assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); - assert_ok!(Democracy::vote(Origin::signed(4), r, aye(4))); - assert_ok!(Democracy::vote(Origin::signed(5), r, aye(5))); - assert_ok!(Democracy::vote(Origin::signed(6), r, aye(6))); - - assert_eq!(tally(r), Tally { ayes: 21, nays: 0, turnout: 210 }); - - next_block(); - assert_eq!(Balances::free_balance(42), 0); - - next_block(); - assert_eq!(Balances::free_balance(42), 2); - }); + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 1, + ); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, aye(2))); + assert_ok!(Democracy::vote(Origin::signed(3), r, aye(3))); + assert_ok!(Democracy::vote(Origin::signed(4), r, aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, aye(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, aye(6))); + + assert_eq!( + tally(r), + Tally { + ayes: 21, + nays: 0, + turnout: 210 + } + ); + + next_block(); + assert_eq!(Balances::free_balance(42), 0); + + next_block(); + assert_eq!(Balances::free_balance(42), 2); + }); } diff --git a/frame/democracy/src/tests/voting.rs b/frame/democracy/src/tests/voting.rs index 43aed29a32..d9e6a47f93 100644 --- a/frame/democracy/src/tests/voting.rs +++ b/frame/democracy/src/tests/voting.rs @@ -20,146 +20,191 @@ use super::*; #[test] fn overvoting_should_fail() { - new_test_ext().execute_with(|| { - let r = begin_referendum(); - assert_noop!(Democracy::vote(Origin::signed(1), r, aye(2)), Error::::InsufficientFunds); - }); + new_test_ext().execute_with(|| { + let r = begin_referendum(); + assert_noop!( + Democracy::vote(Origin::signed(1), r, aye(2)), + Error::::InsufficientFunds + ); + }); } #[test] fn split_voting_should_work() { - new_test_ext().execute_with(|| { - let r = begin_referendum(); - let v = AccountVote::Split { aye: 40, nay: 20 }; - assert_noop!(Democracy::vote(Origin::signed(5), r, v), Error::::InsufficientFunds); - let v = AccountVote::Split { aye: 30, nay: 20 }; - assert_ok!(Democracy::vote(Origin::signed(5), r, v)); - - assert_eq!(tally(r), Tally { ayes: 3, nays: 2, turnout: 50 }); - }); + new_test_ext().execute_with(|| { + let r = begin_referendum(); + let v = AccountVote::Split { aye: 40, nay: 20 }; + assert_noop!( + Democracy::vote(Origin::signed(5), r, v), + Error::::InsufficientFunds + ); + let v = AccountVote::Split { aye: 30, nay: 20 }; + assert_ok!(Democracy::vote(Origin::signed(5), r, v)); + + assert_eq!( + tally(r), + Tally { + ayes: 3, + nays: 2, + turnout: 50 + } + ); + }); } #[test] fn split_vote_cancellation_should_work() { - new_test_ext().execute_with(|| { - let r = begin_referendum(); - let v = AccountVote::Split { aye: 30, nay: 20 }; - assert_ok!(Democracy::vote(Origin::signed(5), r, v)); - assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); - assert_eq!(tally(r), Tally { ayes: 0, nays: 0, turnout: 0 }); - assert_ok!(Democracy::unlock(Origin::signed(5), 5)); - assert_eq!(Balances::locks(5), vec![]); - }); + new_test_ext().execute_with(|| { + let r = begin_referendum(); + let v = AccountVote::Split { aye: 30, nay: 20 }; + assert_ok!(Democracy::vote(Origin::signed(5), r, v)); + assert_ok!(Democracy::remove_vote(Origin::signed(5), r)); + assert_eq!( + tally(r), + Tally { + ayes: 0, + nays: 0, + turnout: 0 + } + ); + assert_ok!(Democracy::unlock(Origin::signed(5), 5)); + assert_eq!(Balances::locks(5), vec![]); + }); } #[test] fn single_proposal_should_work() { - new_test_ext().execute_with(|| { - System::set_block_number(0); - assert_ok!(propose_set_balance_and_note(1, 2, 1)); - let r = 0; - assert!(Democracy::referendum_info(r).is_none()); - - // start of 2 => next referendum scheduled. - fast_forward_to(2); - assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); - - assert_eq!(Democracy::referendum_count(), 1); - assert_eq!( - Democracy::referendum_status(0), - Ok(ReferendumStatus { - end: 4, - proposal_hash: set_balance_proposal_hash_and_note(2), - threshold: VoteThreshold::SuperMajorityApprove, - delay: 2, - tally: Tally { ayes: 1, nays: 0, turnout: 10 }, - }) - ); - - fast_forward_to(3); - - // referendum still running - assert!(Democracy::referendum_status(0).is_ok()); - - // referendum runs during 2 and 3, ends @ start of 4. - fast_forward_to(4); - - assert!(Democracy::referendum_status(0).is_err()); - assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); - - // referendum passes and wait another two blocks for enactment. - fast_forward_to(6); - - assert_eq!(Balances::free_balance(42), 2); - }); + new_test_ext().execute_with(|| { + System::set_block_number(0); + assert_ok!(propose_set_balance_and_note(1, 2, 1)); + let r = 0; + assert!(Democracy::referendum_info(r).is_none()); + + // start of 2 => next referendum scheduled. + fast_forward_to(2); + assert_ok!(Democracy::vote(Origin::signed(1), r, aye(1))); + + assert_eq!(Democracy::referendum_count(), 1); + assert_eq!( + Democracy::referendum_status(0), + Ok(ReferendumStatus { + end: 4, + proposal_hash: set_balance_proposal_hash_and_note(2), + threshold: VoteThreshold::SuperMajorityApprove, + delay: 2, + tally: Tally { + ayes: 1, + nays: 0, + turnout: 10 + }, + }) + ); + + fast_forward_to(3); + + // referendum still running + assert!(Democracy::referendum_status(0).is_ok()); + + // referendum runs during 2 and 3, ends @ start of 4. + fast_forward_to(4); + + assert!(Democracy::referendum_status(0).is_err()); + assert!(pallet_scheduler::Agenda::::get(6)[0].is_some()); + + // referendum passes and wait another two blocks for enactment. + fast_forward_to(6); + + assert_eq!(Balances::free_balance(42), 2); + }); } #[test] fn controversial_voting_should_work() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - - assert_ok!(Democracy::vote(Origin::signed(1), r, big_aye(1))); - assert_ok!(Democracy::vote(Origin::signed(2), r, big_nay(2))); - assert_ok!(Democracy::vote(Origin::signed(3), r, big_nay(3))); - assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); - assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); - - assert_eq!(tally(r), Tally { ayes: 110, nays: 100, turnout: 210 }); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 2); - }); + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + + assert_ok!(Democracy::vote(Origin::signed(1), r, big_aye(1))); + assert_ok!(Democracy::vote(Origin::signed(2), r, big_nay(2))); + assert_ok!(Democracy::vote(Origin::signed(3), r, big_nay(3))); + assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + + assert_eq!( + tally(r), + Tally { + ayes: 110, + nays: 100, + turnout: 210 + } + ); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 2); + }); } #[test] fn controversial_low_turnout_voting_should_work() { - new_test_ext().execute_with(|| { - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); - - assert_eq!(tally(r), Tally { ayes: 60, nays: 50, turnout: 110 }); - - next_block(); - next_block(); - - assert_eq!(Balances::free_balance(42), 0); - }); + new_test_ext().execute_with(|| { + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + + assert_eq!( + tally(r), + Tally { + ayes: 60, + nays: 50, + turnout: 110 + } + ); + + next_block(); + next_block(); + + assert_eq!(Balances::free_balance(42), 0); + }); } #[test] fn passing_low_turnout_voting_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(42), 0); - assert_eq!(Balances::total_issuance(), 210); - - let r = Democracy::inject_referendum( - 2, - set_balance_proposal_hash_and_note(2), - VoteThreshold::SuperMajorityApprove, - 0 - ); - assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); - assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); - assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); - assert_eq!(tally(r), Tally { ayes: 100, nays: 50, turnout: 150 }); - - next_block(); - next_block(); - assert_eq!(Balances::free_balance(42), 2); - }); + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(42), 0); + assert_eq!(Balances::total_issuance(), 210); + + let r = Democracy::inject_referendum( + 2, + set_balance_proposal_hash_and_note(2), + VoteThreshold::SuperMajorityApprove, + 0, + ); + assert_ok!(Democracy::vote(Origin::signed(4), r, big_aye(4))); + assert_ok!(Democracy::vote(Origin::signed(5), r, big_nay(5))); + assert_ok!(Democracy::vote(Origin::signed(6), r, big_aye(6))); + assert_eq!( + tally(r), + Tally { + ayes: 100, + nays: 50, + turnout: 150 + } + ); + + next_block(); + next_block(); + assert_eq!(Balances::free_balance(42), 2); + }); } diff --git a/frame/democracy/src/types.rs b/frame/democracy/src/types.rs index 3454326364..0ff67a647c 100644 --- a/frame/democracy/src/types.rs +++ b/frame/democracy/src/types.rs @@ -16,209 +16,222 @@ //! Miscellaneous additional datatypes. -use codec::{Encode, Decode}; +use crate::{AccountVote, Conviction, Vote, VoteThreshold}; +use codec::{Decode, Encode}; +use sp_runtime::traits::{ + Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedSub, Saturating, Zero, +}; use sp_runtime::RuntimeDebug; -use sp_runtime::traits::{Zero, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, Saturating}; -use crate::{Vote, VoteThreshold, AccountVote, Conviction}; /// Info regarding an ongoing referendum. #[derive(Encode, Decode, Default, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Tally { - /// The number of aye votes, expressed in terms of post-conviction lock-vote. - pub (crate) ayes: Balance, - /// The number of nay votes, expressed in terms of post-conviction lock-vote. - pub (crate) nays: Balance, - /// The amount of funds currently expressing its opinion. Pre-conviction. - pub (crate) turnout: Balance, + /// The number of aye votes, expressed in terms of post-conviction lock-vote. + pub(crate) ayes: Balance, + /// The number of nay votes, expressed in terms of post-conviction lock-vote. + pub(crate) nays: Balance, + /// The amount of funds currently expressing its opinion. Pre-conviction. + pub(crate) turnout: Balance, } /// Amount of votes and capital placed in delegation for an account. #[derive(Encode, Decode, Default, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Delegations { - /// The number of votes (this is post-conviction). - pub (crate) votes: Balance, - /// The amount of raw capital, used for the turnout. - pub (crate) capital: Balance, + /// The number of votes (this is post-conviction). + pub(crate) votes: Balance, + /// The amount of raw capital, used for the turnout. + pub(crate) capital: Balance, } impl Saturating for Delegations { - fn saturating_add(self, o: Self) -> Self { - Self { - votes: self.votes.saturating_add(o.votes), - capital: self.capital.saturating_add(o.capital), - } - } - - fn saturating_sub(self, o: Self) -> Self { - Self { - votes: self.votes.saturating_sub(o.votes), - capital: self.capital.saturating_sub(o.capital), - } - } - - fn saturating_mul(self, o: Self) -> Self { - Self { - votes: self.votes.saturating_mul(o.votes), - capital: self.capital.saturating_mul(o.capital), - } - } - - fn saturating_pow(self, exp: usize) -> Self { - Self { - votes: self.votes.saturating_pow(exp), - capital: self.capital.saturating_pow(exp), - } - } + fn saturating_add(self, o: Self) -> Self { + Self { + votes: self.votes.saturating_add(o.votes), + capital: self.capital.saturating_add(o.capital), + } + } + + fn saturating_sub(self, o: Self) -> Self { + Self { + votes: self.votes.saturating_sub(o.votes), + capital: self.capital.saturating_sub(o.capital), + } + } + + fn saturating_mul(self, o: Self) -> Self { + Self { + votes: self.votes.saturating_mul(o.votes), + capital: self.capital.saturating_mul(o.capital), + } + } + + fn saturating_pow(self, exp: usize) -> Self { + Self { + votes: self.votes.saturating_pow(exp), + capital: self.capital.saturating_pow(exp), + } + } } impl< - Balance: From + Zero + Copy + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Bounded + - Saturating -> Tally { - /// Create a new tally. - pub fn new( - vote: Vote, - balance: Balance, - ) -> Self { - let Delegations { votes, capital } = vote.conviction.votes(balance); - Self { - ayes: if vote.aye { votes } else { Zero::zero() }, - nays: if vote.aye { Zero::zero() } else { votes }, - turnout: capital, - } - } - - /// Add an account's vote into the tally. - pub fn add( - &mut self, - vote: AccountVote, - ) -> Option<()> { - match vote { - AccountVote::Standard { vote, balance } => { - let Delegations { votes, capital } = vote.conviction.votes(balance); - self.turnout = self.turnout.checked_add(&capital)?; - match vote.aye { - true => self.ayes = self.ayes.checked_add(&votes)?, - false => self.nays = self.nays.checked_add(&votes)?, - } - } - AccountVote::Split { aye, nay } => { - let aye = Conviction::None.votes(aye); - let nay = Conviction::None.votes(nay); - self.turnout = self.turnout.checked_add(&aye.capital)?.checked_add(&nay.capital)?; - self.ayes = self.ayes.checked_add(&aye.votes)?; - self.nays = self.nays.checked_add(&nay.votes)?; - } - } - Some(()) - } - - /// Remove an account's vote from the tally. - pub fn remove( - &mut self, - vote: AccountVote, - ) -> Option<()> { - match vote { - AccountVote::Standard { vote, balance } => { - let Delegations { votes, capital } = vote.conviction.votes(balance); - self.turnout = self.turnout.checked_sub(&capital)?; - match vote.aye { - true => self.ayes = self.ayes.checked_sub(&votes)?, - false => self.nays = self.nays.checked_sub(&votes)?, - } - } - AccountVote::Split { aye, nay } => { - let aye = Conviction::None.votes(aye); - let nay = Conviction::None.votes(nay); - self.turnout = self.turnout.checked_sub(&aye.capital)?.checked_sub(&nay.capital)?; - self.ayes = self.ayes.checked_sub(&aye.votes)?; - self.nays = self.nays.checked_sub(&nay.votes)?; - } - } - Some(()) - } - - /// Increment some amount of votes. - pub fn increase(&mut self, approve: bool, delegations: Delegations) -> Option<()> { - self.turnout = self.turnout.saturating_add(delegations.capital); - match approve { - true => self.ayes = self.ayes.saturating_add(delegations.votes), - false => self.nays = self.nays.saturating_add(delegations.votes), - } - Some(()) - } - - /// Decrement some amount of votes. - pub fn reduce(&mut self, approve: bool, delegations: Delegations) -> Option<()> { - self.turnout = self.turnout.saturating_sub(delegations.capital); - match approve { - true => self.ayes = self.ayes.saturating_sub(delegations.votes), - false => self.nays = self.nays.saturating_sub(delegations.votes), - } - Some(()) - } + Balance: From + + Zero + + Copy + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Bounded + + Saturating, + > Tally +{ + /// Create a new tally. + pub fn new(vote: Vote, balance: Balance) -> Self { + let Delegations { votes, capital } = vote.conviction.votes(balance); + Self { + ayes: if vote.aye { votes } else { Zero::zero() }, + nays: if vote.aye { Zero::zero() } else { votes }, + turnout: capital, + } + } + + /// Add an account's vote into the tally. + pub fn add(&mut self, vote: AccountVote) -> Option<()> { + match vote { + AccountVote::Standard { vote, balance } => { + let Delegations { votes, capital } = vote.conviction.votes(balance); + self.turnout = self.turnout.checked_add(&capital)?; + match vote.aye { + true => self.ayes = self.ayes.checked_add(&votes)?, + false => self.nays = self.nays.checked_add(&votes)?, + } + } + AccountVote::Split { aye, nay } => { + let aye = Conviction::None.votes(aye); + let nay = Conviction::None.votes(nay); + self.turnout = self + .turnout + .checked_add(&aye.capital)? + .checked_add(&nay.capital)?; + self.ayes = self.ayes.checked_add(&aye.votes)?; + self.nays = self.nays.checked_add(&nay.votes)?; + } + } + Some(()) + } + + /// Remove an account's vote from the tally. + pub fn remove(&mut self, vote: AccountVote) -> Option<()> { + match vote { + AccountVote::Standard { vote, balance } => { + let Delegations { votes, capital } = vote.conviction.votes(balance); + self.turnout = self.turnout.checked_sub(&capital)?; + match vote.aye { + true => self.ayes = self.ayes.checked_sub(&votes)?, + false => self.nays = self.nays.checked_sub(&votes)?, + } + } + AccountVote::Split { aye, nay } => { + let aye = Conviction::None.votes(aye); + let nay = Conviction::None.votes(nay); + self.turnout = self + .turnout + .checked_sub(&aye.capital)? + .checked_sub(&nay.capital)?; + self.ayes = self.ayes.checked_sub(&aye.votes)?; + self.nays = self.nays.checked_sub(&nay.votes)?; + } + } + Some(()) + } + + /// Increment some amount of votes. + pub fn increase(&mut self, approve: bool, delegations: Delegations) -> Option<()> { + self.turnout = self.turnout.saturating_add(delegations.capital); + match approve { + true => self.ayes = self.ayes.saturating_add(delegations.votes), + false => self.nays = self.nays.saturating_add(delegations.votes), + } + Some(()) + } + + /// Decrement some amount of votes. + pub fn reduce(&mut self, approve: bool, delegations: Delegations) -> Option<()> { + self.turnout = self.turnout.saturating_sub(delegations.capital); + match approve { + true => self.ayes = self.ayes.saturating_sub(delegations.votes), + false => self.nays = self.nays.saturating_sub(delegations.votes), + } + Some(()) + } } /// Info regarding an ongoing referendum. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct ReferendumStatus { - /// When voting on this referendum will end. - pub (crate) end: BlockNumber, - /// The hash of the proposal being voted on. - pub (crate) proposal_hash: Hash, - /// The thresholding mechanism to determine whether it passed. - pub (crate) threshold: VoteThreshold, - /// The delay (in blocks) to wait after a successful referendum before deploying. - pub (crate) delay: BlockNumber, - /// The current tally of votes in this referendum. - pub (crate) tally: Tally, + /// When voting on this referendum will end. + pub(crate) end: BlockNumber, + /// The hash of the proposal being voted on. + pub(crate) proposal_hash: Hash, + /// The thresholding mechanism to determine whether it passed. + pub(crate) threshold: VoteThreshold, + /// The delay (in blocks) to wait after a successful referendum before deploying. + pub(crate) delay: BlockNumber, + /// The current tally of votes in this referendum. + pub(crate) tally: Tally, } /// Info regarding a referendum, present or past. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub enum ReferendumInfo { - /// Referendum is happening, the arg is the block number at which it will end. - Ongoing(ReferendumStatus), - /// Referendum finished at `end`, and has been `approved` or rejected. - Finished{approved: bool, end: BlockNumber}, + /// Referendum is happening, the arg is the block number at which it will end. + Ongoing(ReferendumStatus), + /// Referendum finished at `end`, and has been `approved` or rejected. + Finished { approved: bool, end: BlockNumber }, } impl ReferendumInfo { - /// Create a new instance. - pub fn new( - end: BlockNumber, - proposal_hash: Hash, - threshold: VoteThreshold, - delay: BlockNumber, - ) -> Self { - let s = ReferendumStatus{ end, proposal_hash, threshold, delay, tally: Tally::default() }; - ReferendumInfo::Ongoing(s) - } + /// Create a new instance. + pub fn new( + end: BlockNumber, + proposal_hash: Hash, + threshold: VoteThreshold, + delay: BlockNumber, + ) -> Self { + let s = ReferendumStatus { + end, + proposal_hash, + threshold, + delay, + tally: Tally::default(), + }; + ReferendumInfo::Ongoing(s) + } } /// State of a proxy voting account. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub enum ProxyState { - /// Account is open to becoming a proxy but is not yet assigned. - Open(AccountId), - /// Account is actively being a proxy. - Active(AccountId), + /// Account is open to becoming a proxy but is not yet assigned. + Open(AccountId), + /// Account is actively being a proxy. + Active(AccountId), } impl ProxyState { - pub (crate) fn as_active(self) -> Option { - match self { - ProxyState::Active(a) => Some(a), - ProxyState::Open(_) => None, - } - } + pub(crate) fn as_active(self) -> Option { + match self { + ProxyState::Active(a) => Some(a), + ProxyState::Open(_) => None, + } + } } /// Whether an `unvote` operation is able to make actions that are not strictly always in the /// interest of an account. pub enum UnvoteScope { - /// Permitted to do everything. - Any, - /// Permitted to do only the changes that do not need the owner's permission. - OnlyExpired, + /// Permitted to do everything. + Any, + /// Permitted to do only the changes that do not need the owner's permission. + OnlyExpired, } diff --git a/frame/democracy/src/vote.rs b/frame/democracy/src/vote.rs index a41eb342aa..5fd11a95de 100644 --- a/frame/democracy/src/vote.rs +++ b/frame/democracy/src/vote.rs @@ -16,75 +16,79 @@ //! The vote datatype. -use sp_std::{prelude::*, result::Result, convert::TryFrom}; -use codec::{Encode, EncodeLike, Decode, Output, Input}; -use sp_runtime::{RuntimeDebug, traits::{Saturating, Zero}}; -use crate::{Conviction, ReferendumIndex, Delegations}; +use crate::{Conviction, Delegations, ReferendumIndex}; +use codec::{Decode, Encode, EncodeLike, Input, Output}; +use sp_runtime::{ + traits::{Saturating, Zero}, + RuntimeDebug, +}; +use sp_std::{convert::TryFrom, prelude::*, result::Result}; /// A number of lock periods, plus a vote, one way or the other. #[derive(Copy, Clone, Eq, PartialEq, Default, RuntimeDebug)] pub struct Vote { - pub aye: bool, - pub conviction: Conviction, + pub aye: bool, + pub conviction: Conviction, } impl Encode for Vote { - fn encode_to(&self, output: &mut T) { - output.push_byte(u8::from(self.conviction) | if self.aye { 0b1000_0000 } else { 0 }); - } + fn encode_to(&self, output: &mut T) { + output.push_byte(u8::from(self.conviction) | if self.aye { 0b1000_0000 } else { 0 }); + } } impl EncodeLike for Vote {} impl Decode for Vote { - fn decode(input: &mut I) -> Result { - let b = input.read_byte()?; - Ok(Vote { - aye: (b & 0b1000_0000) == 0b1000_0000, - conviction: Conviction::try_from(b & 0b0111_1111) - .map_err(|_| codec::Error::from("Invalid conviction"))?, - }) - } + fn decode(input: &mut I) -> Result { + let b = input.read_byte()?; + Ok(Vote { + aye: (b & 0b1000_0000) == 0b1000_0000, + conviction: Conviction::try_from(b & 0b0111_1111) + .map_err(|_| codec::Error::from("Invalid conviction"))?, + }) + } } /// A vote for a referendum of a particular account. #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, RuntimeDebug)] pub enum AccountVote { - /// A standard vote, one-way (approve or reject) with a given amount of conviction. - Standard { vote: Vote, balance: Balance }, - /// A split vote with balances given for both ways, and with no conviction, useful for - /// parachains when voting. - Split { aye: Balance, nay: Balance }, + /// A standard vote, one-way (approve or reject) with a given amount of conviction. + Standard { vote: Vote, balance: Balance }, + /// A split vote with balances given for both ways, and with no conviction, useful for + /// parachains when voting. + Split { aye: Balance, nay: Balance }, } impl AccountVote { - /// Returns `Some` of the lock periods that the account is locked for, assuming that the - /// referendum passed iff `approved` is `true`. - pub fn locked_if(self, approved: bool) -> Option<(u32, Balance)> { - // winning side: can only be removed after the lock period ends. - match self { - AccountVote::Standard { vote, balance } if vote.aye == approved => - Some((vote.conviction.lock_periods(), balance)), - _ => None, - } - } - - /// The total balance involved in this vote. - pub fn balance(self) -> Balance { - match self { - AccountVote::Standard { balance, .. } => balance, - AccountVote::Split { aye, nay } => aye.saturating_add(nay), - } - } - - /// Returns `Some` with whether the vote is an aye vote if it is standard, otherwise `None` if - /// it is split. - pub fn as_standard(self) -> Option { - match self { - AccountVote::Standard { vote, .. } => Some(vote.aye), - _ => None, - } - } + /// Returns `Some` of the lock periods that the account is locked for, assuming that the + /// referendum passed iff `approved` is `true`. + pub fn locked_if(self, approved: bool) -> Option<(u32, Balance)> { + // winning side: can only be removed after the lock period ends. + match self { + AccountVote::Standard { vote, balance } if vote.aye == approved => { + Some((vote.conviction.lock_periods(), balance)) + } + _ => None, + } + } + + /// The total balance involved in this vote. + pub fn balance(self) -> Balance { + match self { + AccountVote::Standard { balance, .. } => balance, + AccountVote::Split { aye, nay } => aye.saturating_add(nay), + } + } + + /// Returns `Some` with whether the vote is an aye vote if it is standard, otherwise `None` if + /// it is split. + pub fn as_standard(self) -> Option { + match self { + AccountVote::Standard { vote, .. } => Some(vote.aye), + _ => None, + } + } } /// A "prior" lock, i.e. a lock for some now-forgotten reason. @@ -92,90 +96,101 @@ impl AccountVote { pub struct PriorLock(BlockNumber, Balance); impl PriorLock { - /// Accumulates an additional lock. - pub fn accumulate(&mut self, until: BlockNumber, amount: Balance) { - self.0 = self.0.max(until); - self.1 = self.1.max(amount); - } - - pub fn locked(&self) -> Balance { - self.1 - } - - pub fn rejig(&mut self, now: BlockNumber) { - if now >= self.0 { - self.0 = Zero::zero(); - self.1 = Zero::zero(); - } - } + /// Accumulates an additional lock. + pub fn accumulate(&mut self, until: BlockNumber, amount: Balance) { + self.0 = self.0.max(until); + self.1 = self.1.max(amount); + } + + pub fn locked(&self) -> Balance { + self.1 + } + + pub fn rejig(&mut self, now: BlockNumber) { + if now >= self.0 { + self.0 = Zero::zero(); + self.1 = Zero::zero(); + } + } } /// An indicator for what an account is doing; it can either be delegating or voting. #[derive(Encode, Decode, Clone, Eq, PartialEq, RuntimeDebug)] pub enum Voting { - /// The account is voting directly. `delegations` is the total amount of post-conviction voting - /// weight that it controls from those that have delegated to it. - Direct { - /// The current votes of the account. - votes: Vec<(ReferendumIndex, AccountVote)>, - /// The total amount of delegations that this account has received. - delegations: Delegations, - /// Any pre-existing locks from past voting/delegating activity. - prior: PriorLock, - }, - /// The account is delegating `balance` of its balance to a `target` account with `conviction`. - Delegating { - balance: Balance, - target: AccountId, - conviction: Conviction, - /// The total amount of delegations that this account has received. - delegations: Delegations, - /// Any pre-existing locks from past voting/delegating activity. - prior: PriorLock, - }, + /// The account is voting directly. `delegations` is the total amount of post-conviction voting + /// weight that it controls from those that have delegated to it. + Direct { + /// The current votes of the account. + votes: Vec<(ReferendumIndex, AccountVote)>, + /// The total amount of delegations that this account has received. + delegations: Delegations, + /// Any pre-existing locks from past voting/delegating activity. + prior: PriorLock, + }, + /// The account is delegating `balance` of its balance to a `target` account with `conviction`. + Delegating { + balance: Balance, + target: AccountId, + conviction: Conviction, + /// The total amount of delegations that this account has received. + delegations: Delegations, + /// Any pre-existing locks from past voting/delegating activity. + prior: PriorLock, + }, } -impl Default for Voting { - fn default() -> Self { - Voting::Direct { - votes: Vec::new(), - delegations: Default::default(), - prior: PriorLock(Zero::zero(), Default::default()), - } - } +impl Default + for Voting +{ + fn default() -> Self { + Voting::Direct { + votes: Vec::new(), + delegations: Default::default(), + prior: PriorLock(Zero::zero(), Default::default()), + } + } } -impl< - Balance: Saturating + Ord + Zero + Copy, - BlockNumber: Ord + Copy + Zero, - AccountId, -> Voting { - pub fn rejig(&mut self, now: BlockNumber) { - match self { - Voting::Direct { prior, .. } => prior, - Voting::Delegating { prior, .. } => prior, - }.rejig(now); - } - - /// The amount of this account's balance that much currently be locked due to voting. - pub fn locked_balance(&self) -> Balance { - match self { - Voting::Direct { votes, prior, .. } => votes.iter() - .map(|i| i.1.balance()) - .fold(prior.locked(), |a, i| a.max(i)), - Voting::Delegating { balance, .. } => *balance, - } - } - - pub fn set_common(&mut self, - delegations: Delegations, - prior: PriorLock - ) { - let (d, p) = match self { - Voting::Direct { ref mut delegations, ref mut prior, .. } => (delegations, prior), - Voting::Delegating { ref mut delegations, ref mut prior, .. } => (delegations, prior), - }; - *d = delegations; - *p = prior; - } +impl + Voting +{ + pub fn rejig(&mut self, now: BlockNumber) { + match self { + Voting::Direct { prior, .. } => prior, + Voting::Delegating { prior, .. } => prior, + } + .rejig(now); + } + + /// The amount of this account's balance that much currently be locked due to voting. + pub fn locked_balance(&self) -> Balance { + match self { + Voting::Direct { votes, prior, .. } => votes + .iter() + .map(|i| i.1.balance()) + .fold(prior.locked(), |a, i| a.max(i)), + Voting::Delegating { balance, .. } => *balance, + } + } + + pub fn set_common( + &mut self, + delegations: Delegations, + prior: PriorLock, + ) { + let (d, p) = match self { + Voting::Direct { + ref mut delegations, + ref mut prior, + .. + } => (delegations, prior), + Voting::Delegating { + ref mut delegations, + ref mut prior, + .. + } => (delegations, prior), + }; + *d = delegations; + *p = prior; + } } diff --git a/frame/democracy/src/vote_threshold.rs b/frame/democracy/src/vote_threshold.rs index fd976b4400..e261a84d5e 100644 --- a/frame/democracy/src/vote_threshold.rs +++ b/frame/democracy/src/vote_threshold.rs @@ -16,85 +16,116 @@ //! Voting thresholds. -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; -use sp_runtime::traits::{Zero, IntegerSquareRoot}; -use sp_std::ops::{Add, Mul, Div, Rem}; use crate::Tally; +use codec::{Decode, Encode}; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use sp_runtime::traits::{IntegerSquareRoot, Zero}; +use sp_std::ops::{Add, Div, Mul, Rem}; /// A means of determining if a vote is past pass threshold. #[derive(Clone, Copy, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum VoteThreshold { - /// A supermajority of approvals is needed to pass this vote. - SuperMajorityApprove, - /// A supermajority of rejects is needed to fail this vote. - SuperMajorityAgainst, - /// A simple majority of approvals is needed to pass this vote. - SimpleMajority, + /// A supermajority of approvals is needed to pass this vote. + SuperMajorityApprove, + /// A supermajority of rejects is needed to fail this vote. + SuperMajorityAgainst, + /// A simple majority of approvals is needed to pass this vote. + SimpleMajority, } pub trait Approved { - /// Given a `tally` of votes and a total size of `electorate`, this returns `true` if the - /// overall outcome is in favor of approval according to `self`'s threshold method. - fn approved(&self, tally: Tally, electorate: Balance) -> bool; + /// Given a `tally` of votes and a total size of `electorate`, this returns `true` if the + /// overall outcome is in favor of approval according to `self`'s threshold method. + fn approved(&self, tally: Tally, electorate: Balance) -> bool; } /// Return `true` iff `n1 / d1 < n2 / d2`. `d1` and `d2` may not be zero. -fn compare_rationals + Div + Rem + Ord + Copy>(mut n1: T, mut d1: T, mut n2: T, mut d2: T) -> bool { - // Uses a continued fractional representation for a non-overflowing compare. - // Detailed at https://janmr.com/blog/2014/05/comparing-rational-numbers-without-overflow/. - loop { - let q1 = n1 / d1; - let q2 = n2 / d2; - if q1 < q2 { - return true; - } - if q2 < q1 { - return false; - } - let r1 = n1 % d1; - let r2 = n2 % d2; - if r2.is_zero() { - return false; - } - if r1.is_zero() { - return true; - } - n1 = d2; - n2 = d1; - d1 = r2; - d2 = r1; - } +fn compare_rationals< + T: Zero + Mul + Div + Rem + Ord + Copy, +>( + mut n1: T, + mut d1: T, + mut n2: T, + mut d2: T, +) -> bool { + // Uses a continued fractional representation for a non-overflowing compare. + // Detailed at https://janmr.com/blog/2014/05/comparing-rational-numbers-without-overflow/. + loop { + let q1 = n1 / d1; + let q2 = n2 / d2; + if q1 < q2 { + return true; + } + if q2 < q1 { + return false; + } + let r1 = n1 % d1; + let r2 = n2 % d2; + if r2.is_zero() { + return false; + } + if r1.is_zero() { + return true; + } + n1 = d2; + n2 = d1; + d1 = r2; + d2 = r1; + } } impl< - Balance: IntegerSquareRoot + Zero + Ord + Add - + Mul + Div - + Rem + Copy, -> Approved for VoteThreshold { - fn approved(&self, tally: Tally, electorate: Balance) -> bool { - let sqrt_voters = tally.turnout.integer_sqrt(); - let sqrt_electorate = electorate.integer_sqrt(); - if sqrt_voters.is_zero() { return false; } - match *self { - VoteThreshold::SuperMajorityApprove => - compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate), - VoteThreshold::SuperMajorityAgainst => - compare_rationals(tally.nays, sqrt_electorate, tally.ayes, sqrt_voters), - VoteThreshold::SimpleMajority => tally.ayes > tally.nays, - } - } + Balance: IntegerSquareRoot + + Zero + + Ord + + Add + + Mul + + Div + + Rem + + Copy, + > Approved for VoteThreshold +{ + fn approved(&self, tally: Tally, electorate: Balance) -> bool { + let sqrt_voters = tally.turnout.integer_sqrt(); + let sqrt_electorate = electorate.integer_sqrt(); + if sqrt_voters.is_zero() { + return false; + } + match *self { + VoteThreshold::SuperMajorityApprove => { + compare_rationals(tally.nays, sqrt_voters, tally.ayes, sqrt_electorate) + } + VoteThreshold::SuperMajorityAgainst => { + compare_rationals(tally.nays, sqrt_electorate, tally.ayes, sqrt_voters) + } + VoteThreshold::SimpleMajority => tally.ayes > tally.nays, + } + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn should_work() { - assert!(!VoteThreshold::SuperMajorityApprove.approved(Tally{ayes: 60, nays: 50, turnout: 110}, 210)); - assert!(VoteThreshold::SuperMajorityApprove.approved(Tally{ayes: 100, nays: 50, turnout: 150}, 210)); - } + #[test] + fn should_work() { + assert!(!VoteThreshold::SuperMajorityApprove.approved( + Tally { + ayes: 60, + nays: 50, + turnout: 110 + }, + 210 + )); + assert!(VoteThreshold::SuperMajorityApprove.approved( + Tally { + ayes: 100, + nays: 50, + turnout: 150 + }, + 210 + )); + } } diff --git a/frame/elections-phragmen/src/lib.rs b/frame/elections-phragmen/src/lib.rs index 610f008457..f85e302e28 100644 --- a/frame/elections-phragmen/src/lib.rs +++ b/frame/elections-phragmen/src/lib.rs @@ -82,437 +82,450 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_runtime::{ - print, DispatchResult, DispatchError, Perbill, traits::{Zero, StaticLookup, Convert}, -}; use frame_support::{ - decl_storage, decl_event, ensure, decl_module, decl_error, - weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}, storage::{StorageMap, IterableStorageMap}, - traits::{ - Currency, Get, LockableCurrency, LockIdentifier, ReservableCurrency, WithdrawReasons, - ChangeMembers, OnUnbalanced, WithdrawReason, Contains, BalanceStatus, InitializeMembers, - } + decl_error, decl_event, decl_module, decl_storage, ensure, + storage::{IterableStorageMap, StorageMap}, + traits::{ + BalanceStatus, ChangeMembers, Contains, Currency, Get, InitializeMembers, LockIdentifier, + LockableCurrency, OnUnbalanced, ReservableCurrency, WithdrawReason, WithdrawReasons, + }, + weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}, }; -use sp_phragmen::{build_support_map, ExtendedBalance, VoteWeight, PhragmenResult}; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed}; +use sp_phragmen::{build_support_map, ExtendedBalance, PhragmenResult, VoteWeight}; +use sp_runtime::{ + print, + traits::{Convert, StaticLookup, Zero}, + DispatchError, DispatchResult, Perbill, +}; +use sp_std::prelude::*; const MODULE_ID: LockIdentifier = *b"phrelect"; /// The maximum votes allowed per voter. pub const MAXIMUM_VOTE: usize = 16; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: frame_system::Trait { - /// The overarching event type.c - type Event: From> + Into<::Event>; + /// The overarching event type.c + type Event: From> + Into<::Event>; - /// The currency that people are electing with. - type Currency: - LockableCurrency + - ReservableCurrency; + /// The currency that people are electing with. + type Currency: LockableCurrency + + ReservableCurrency; - /// What to do when the members change. - type ChangeMembers: ChangeMembers; + /// What to do when the members change. + type ChangeMembers: ChangeMembers; - /// What to do with genesis members - type InitializeMembers: InitializeMembers; + /// What to do with genesis members + type InitializeMembers: InitializeMembers; - /// Convert a balance into a number used for election calculation. - /// This must fit into a `u64` but is allowed to be sensibly lossy. - type CurrencyToVote: Convert, VoteWeight> + Convert>; + /// Convert a balance into a number used for election calculation. + /// This must fit into a `u64` but is allowed to be sensibly lossy. + type CurrencyToVote: Convert, VoteWeight> + + Convert>; - /// How much should be locked up in order to submit one's candidacy. - type CandidacyBond: Get>; + /// How much should be locked up in order to submit one's candidacy. + type CandidacyBond: Get>; - /// How much should be locked up in order to be able to submit votes. - type VotingBond: Get>; + /// How much should be locked up in order to be able to submit votes. + type VotingBond: Get>; - /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner-up) - type LoserCandidate: OnUnbalanced>; + /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner-up) + type LoserCandidate: OnUnbalanced>; - /// Handler for the unbalanced reduction when a reporter has submitted a bad defunct report. - type BadReport: OnUnbalanced>; + /// Handler for the unbalanced reduction when a reporter has submitted a bad defunct report. + type BadReport: OnUnbalanced>; - /// Handler for the unbalanced reduction when a member has been kicked. - type KickedMember: OnUnbalanced>; + /// Handler for the unbalanced reduction when a member has been kicked. + type KickedMember: OnUnbalanced>; - /// Number of members to elect. - type DesiredMembers: Get; + /// Number of members to elect. + type DesiredMembers: Get; - /// Number of runners_up to keep. - type DesiredRunnersUp: Get; + /// Number of runners_up to keep. + type DesiredRunnersUp: Get; - /// How long each seat is kept. This defines the next block number at which an election - /// round will happen. If set to zero, no elections are ever triggered and the module will - /// be in passive mode. - type TermDuration: Get; + /// How long each seat is kept. This defines the next block number at which an election + /// round will happen. If set to zero, no elections are ever triggered and the module will + /// be in passive mode. + type TermDuration: Get; } decl_storage! { - trait Store for Module as PhragmenElection { - // ---- State - /// The current elected membership. Sorted based on account id. - pub Members get(fn members): Vec<(T::AccountId, BalanceOf)>; - /// The current runners_up. Sorted based on low to high merit (worse to best runner). - pub RunnersUp get(fn runners_up): Vec<(T::AccountId, BalanceOf)>; - /// The total number of vote rounds that have happened, excluding the upcoming one. - pub ElectionRounds get(fn election_rounds): u32 = Zero::zero(); - - /// Votes and locked stake of a particular voter. - pub Voting get(fn voting): map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); - - /// The present candidate list. Sorted based on account-id. A current member or runner-up - /// can never enter this vector and is always implicitly assumed to be a candidate. - pub Candidates get(fn candidates): Vec; - } add_extra_genesis { - config(members): Vec<(T::AccountId, BalanceOf)>; - build(|config: &GenesisConfig| { - let members = config.members.iter().map(|(ref member, ref stake)| { - // make sure they have enough stake - assert!( - T::Currency::free_balance(member) >= *stake, - "Genesis member does not have enough stake", - ); - - // reserve candidacy bond and set as members. - T::Currency::reserve(&member, T::CandidacyBond::get()) - .expect("Genesis member does not have enough balance to be a candidate"); - - // Note: all members will only vote for themselves, hence they must be given exactly - // their own stake as total backing. Any sane election should behave as such. - // Nonetheless, stakes will be updated for term 1 onwards according to the election. - Members::::mutate(|members| { - match members.binary_search_by(|(a, _b)| a.cmp(member)) { - Ok(_) => panic!("Duplicate member in elections phragmen genesis: {}", member), - Err(pos) => members.insert(pos, (member.clone(), *stake)), - } - }); - - // set self-votes to make persistent. - >::vote( - T::Origin::from(Some(member.clone()).into()), - vec![member.clone()], - *stake, - ).expect("Genesis member could not vote."); - - member.clone() - }).collect::>(); - - // report genesis members to upstream, if any. - T::InitializeMembers::initialize_members(&members); - }) - } + trait Store for Module as PhragmenElection { + // ---- State + /// The current elected membership. Sorted based on account id. + pub Members get(fn members): Vec<(T::AccountId, BalanceOf)>; + /// The current runners_up. Sorted based on low to high merit (worse to best runner). + pub RunnersUp get(fn runners_up): Vec<(T::AccountId, BalanceOf)>; + /// The total number of vote rounds that have happened, excluding the upcoming one. + pub ElectionRounds get(fn election_rounds): u32 = Zero::zero(); + + /// Votes and locked stake of a particular voter. + pub Voting get(fn voting): map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); + + /// The present candidate list. Sorted based on account-id. A current member or runner-up + /// can never enter this vector and is always implicitly assumed to be a candidate. + pub Candidates get(fn candidates): Vec; + } add_extra_genesis { + config(members): Vec<(T::AccountId, BalanceOf)>; + build(|config: &GenesisConfig| { + let members = config.members.iter().map(|(ref member, ref stake)| { + // make sure they have enough stake + assert!( + T::Currency::free_balance(member) >= *stake, + "Genesis member does not have enough stake", + ); + + // reserve candidacy bond and set as members. + T::Currency::reserve(&member, T::CandidacyBond::get()) + .expect("Genesis member does not have enough balance to be a candidate"); + + // Note: all members will only vote for themselves, hence they must be given exactly + // their own stake as total backing. Any sane election should behave as such. + // Nonetheless, stakes will be updated for term 1 onwards according to the election. + Members::::mutate(|members| { + match members.binary_search_by(|(a, _b)| a.cmp(member)) { + Ok(_) => panic!("Duplicate member in elections phragmen genesis: {}", member), + Err(pos) => members.insert(pos, (member.clone(), *stake)), + } + }); + + // set self-votes to make persistent. + >::vote( + T::Origin::from(Some(member.clone()).into()), + vec![member.clone()], + *stake, + ).expect("Genesis member could not vote."); + + member.clone() + }).collect::>(); + + // report genesis members to upstream, if any. + T::InitializeMembers::initialize_members(&members); + }) + } } decl_error! { - /// Error for the elections-phragmen module. - pub enum Error for Module { - /// Cannot vote when no candidates or members exist. - UnableToVote, - /// Must vote for at least one candidate. - NoVotes, - /// Cannot vote more than candidates. - TooManyVotes, - /// Cannot vote more than maximum allowed. - MaximumVotesExceeded, - /// Cannot vote with stake less than minimum balance. - LowBalance, - /// Voter can not pay voting bond. - UnableToPayBond, - /// Must be a voter. - MustBeVoter, - /// Cannot report self. - ReportSelf, - /// Duplicated candidate submission. - DuplicatedCandidate, - /// Member cannot re-submit candidacy. - MemberSubmit, - /// Runner cannot re-submit candidacy. - RunnerSubmit, - /// Candidate does not have enough funds. - InsufficientCandidateFunds, - /// Origin is not a candidate, member or a runner up. - InvalidOrigin, - /// Not a member. - NotMember, - } + /// Error for the elections-phragmen module. + pub enum Error for Module { + /// Cannot vote when no candidates or members exist. + UnableToVote, + /// Must vote for at least one candidate. + NoVotes, + /// Cannot vote more than candidates. + TooManyVotes, + /// Cannot vote more than maximum allowed. + MaximumVotesExceeded, + /// Cannot vote with stake less than minimum balance. + LowBalance, + /// Voter can not pay voting bond. + UnableToPayBond, + /// Must be a voter. + MustBeVoter, + /// Cannot report self. + ReportSelf, + /// Duplicated candidate submission. + DuplicatedCandidate, + /// Member cannot re-submit candidacy. + MemberSubmit, + /// Runner cannot re-submit candidacy. + RunnerSubmit, + /// Candidate does not have enough funds. + InsufficientCandidateFunds, + /// Origin is not a candidate, member or a runner up. + InvalidOrigin, + /// Not a member. + NotMember, + } } mod migration { - use super::*; - use frame_support::{migration::{StorageKeyIterator, take_storage_item}, Twox64Concat}; - pub fn migrate() { - for (who, votes) in StorageKeyIterator - ::, Twox64Concat> - ::new(b"PhragmenElection", b"VotesOf") - .drain() - { - if let Some(stake) = take_storage_item::<_, BalanceOf, Twox64Concat>(b"PhragmenElection", b"StakeOf", &who) { - Voting::::insert(who, (stake, votes)); - } - } - } + use super::*; + use frame_support::{ + migration::{take_storage_item, StorageKeyIterator}, + Twox64Concat, + }; + pub fn migrate() { + for (who, votes) in + StorageKeyIterator::, Twox64Concat>::new( + b"PhragmenElection", + b"VotesOf", + ) + .drain() + { + if let Some(stake) = take_storage_item::<_, BalanceOf, Twox64Concat>( + b"PhragmenElection", + b"StakeOf", + &who, + ) { + Voting::::insert(who, (stake, votes)); + } + } + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - fn on_runtime_upgrade() -> Weight { - migration::migrate::(); - - MINIMUM_WEIGHT - } - - const CandidacyBond: BalanceOf = T::CandidacyBond::get(); - const VotingBond: BalanceOf = T::VotingBond::get(); - const DesiredMembers: u32 = T::DesiredMembers::get(); - const DesiredRunnersUp: u32 = T::DesiredRunnersUp::get(); - const TermDuration: T::BlockNumber = T::TermDuration::get(); - - /// Vote for a set of candidates for the upcoming round of election. - /// - /// The `votes` should: - /// - not be empty. - /// - be less than the number of candidates. - /// - /// Upon voting, `value` units of `who`'s balance is locked and a bond amount is reserved. - /// It is the responsibility of the caller to not place all of their balance into the lock - /// and keep some for further transactions. - /// - /// # - /// #### State - /// Reads: O(1) - /// Writes: O(V) given `V` votes. V is bounded by 16. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn vote(origin, votes: Vec, #[compact] value: BalanceOf) { - let who = ensure_signed(origin)?; - - let candidates_count = >::decode_len().unwrap_or(0) as usize; - let members_count = >::decode_len().unwrap_or(0) as usize; - // addition is valid: candidates and members never overlap. - let allowed_votes = candidates_count + members_count; - - ensure!(!allowed_votes.is_zero(), Error::::UnableToVote); - ensure!(votes.len() <= allowed_votes, Error::::TooManyVotes); - ensure!(votes.len() <= MAXIMUM_VOTE, Error::::MaximumVotesExceeded); - ensure!(!votes.is_empty(), Error::::NoVotes); - - ensure!( - value > T::Currency::minimum_balance(), - Error::::LowBalance, - ); - - if !Self::is_voter(&who) { - // first time voter. Reserve bond. - T::Currency::reserve(&who, T::VotingBond::get()) - .map_err(|_| Error::::UnableToPayBond)?; - } - // Amount to be locked up. - let locked_balance = value.min(T::Currency::total_balance(&who)); - - // lock - T::Currency::set_lock( - MODULE_ID, - &who, - locked_balance, - WithdrawReasons::except(WithdrawReason::TransactionPayment), - ); - - Voting::::insert(&who, (locked_balance, votes)); - } - - /// Remove `origin` as a voter. This removes the lock and returns the bond. - /// - /// # - /// #### State - /// Reads: O(1) - /// Writes: O(1) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn remove_voter(origin) { - let who = ensure_signed(origin)?; - - ensure!(Self::is_voter(&who), Error::::MustBeVoter); - - Self::do_remove_voter(&who, true); - } - - /// Report `target` for being an defunct voter. In case of a valid report, the reporter is - /// rewarded by the bond amount of `target`. Otherwise, the reporter itself is removed and - /// their bond is slashed. - /// - /// A defunct voter is defined to be: - /// - a voter whose current submitted votes are all invalid. i.e. all of them are no - /// longer a candidate nor an active member. - /// - /// # - /// #### State - /// Reads: O(NLogM) given M current candidates and N votes for `target`. - /// Writes: O(1) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(1_000_000_000)] - fn report_defunct_voter(origin, target: ::Source) { - let reporter = ensure_signed(origin)?; - let target = T::Lookup::lookup(target)?; - - ensure!(reporter != target, Error::::ReportSelf); - ensure!(Self::is_voter(&reporter), Error::::MustBeVoter); - - // Checking if someone is a candidate and a member here is O(LogN), making the whole - // function O(MLonN) with N candidates in total and M of them being voted by `target`. - // We could easily add another mapping to be able to check if someone is a candidate in - // `O(1)` but that would make the process of removing candidates at the end of each - // round slightly harder. Note that for now we have a bound of number of votes (`N`). - let valid = Self::is_defunct_voter(&target); - if valid { - // reporter will get the voting bond of the target - T::Currency::repatriate_reserved(&target, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; - // remove the target. They are defunct. - Self::do_remove_voter(&target, false); - } else { - // slash the bond of the reporter. - let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; - T::BadReport::on_unbalanced(imbalance); - // remove the reporter. - Self::do_remove_voter(&reporter, false); - } - Self::deposit_event(RawEvent::VoterReported(target, reporter, valid)); - } - - - /// Submit oneself for candidacy. - /// - /// A candidate will either: - /// - Lose at the end of the term and forfeit their deposit. - /// - Win and become a member. Members will eventually get their stash back. - /// - Become a runner-up. Runners-ups are reserved members in case one gets forcefully - /// removed. - /// - /// # - /// #### State - /// Reads: O(LogN) Given N candidates. - /// Writes: O(1) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn submit_candidacy(origin) { - let who = ensure_signed(origin)?; - - let is_candidate = Self::is_candidate(&who); - ensure!(is_candidate.is_err(), Error::::DuplicatedCandidate); - // assured to be an error, error always contains the index. - let index = is_candidate.unwrap_err(); - - ensure!(!Self::is_member(&who), Error::::MemberSubmit); - ensure!(!Self::is_runner(&who), Error::::RunnerSubmit); - - T::Currency::reserve(&who, T::CandidacyBond::get()) - .map_err(|_| Error::::InsufficientCandidateFunds)?; - - >::mutate(|c| c.insert(index, who)); - } - - /// Renounce one's intention to be a candidate for the next election round. 3 potential - /// outcomes exist: - /// - `origin` is a candidate and not elected in any set. In this case, the bond is - /// unreserved, returned and origin is removed as a candidate. - /// - `origin` is a current runner up. In this case, the bond is unreserved, returned and - /// origin is removed as a runner. - /// - `origin` is a current member. In this case, the bond is unreserved and origin is - /// removed as a member, consequently not being a candidate for the next round anymore. - /// Similar to [`remove_voter`], if replacement runners exists, they are immediately used. - #[weight = SimpleDispatchInfo::FixedOperational(2_000_000_000)] - fn renounce_candidacy(origin) { - let who = ensure_signed(origin)?; - - // NOTE: this function attempts the 3 conditions (being a candidate, member, runner) and - // fails if none are matched. Unlike other Palette functions and modules where checks - // happen first and then execution happens, this function is written the other way - // around. The main intention is that reading all of the candidates, members and runners - // from storage is expensive. Furthermore, we know (soft proof) that they are always - // mutually exclusive. Hence, we try one, and only then decode more storage. - - if let Ok(_replacement) = Self::remove_and_replace_member(&who) { - T::Currency::unreserve(&who, T::CandidacyBond::get()); - Self::deposit_event(RawEvent::MemberRenounced(who.clone())); - - // safety guard to make sure we do only one arm. Better to read runners later. - return Ok(()); - } - - let mut runners_up_with_stake = Self::runners_up(); - if let Some(index) = runners_up_with_stake.iter() - .position(|(ref r, ref _s)| r == &who) - { - runners_up_with_stake.remove(index); - // unreserve the bond - T::Currency::unreserve(&who, T::CandidacyBond::get()); - // update storage. - >::put(runners_up_with_stake); - // safety guard to make sure we do only one arm. Better to read runners later. - return Ok(()); - } - - let mut candidates = Self::candidates(); - if let Ok(index) = candidates.binary_search(&who) { - candidates.remove(index); - // unreserve the bond - T::Currency::unreserve(&who, T::CandidacyBond::get()); - // update storage. - >::put(candidates); - // safety guard to make sure we do only one arm. Better to read runners later. - return Ok(()); - } - - Err(Error::::InvalidOrigin)? - } - - /// Remove a particular member from the set. This is effective immediately and the bond of - /// the outgoing member is slashed. - /// - /// If a runner-up is available, then the best runner-up will be removed and replaces the - /// outgoing member. Otherwise, a new phragmen round is started. - /// - /// Note that this does not affect the designated block number of the next election. - /// - /// # - /// #### State - /// Reads: O(do_phragmen) - /// Writes: O(do_phragmen) - /// # - #[weight = SimpleDispatchInfo::FixedOperational(2_000_000_000)] - fn remove_member(origin, who: ::Source) -> DispatchResult { - ensure_root(origin)?; - let who = T::Lookup::lookup(who)?; - - Self::remove_and_replace_member(&who).map(|had_replacement| { - let (imbalance, _) = T::Currency::slash_reserved(&who, T::CandidacyBond::get()); - T::KickedMember::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::MemberKicked(who.clone())); - - if !had_replacement { - Self::do_phragmen(); - } - }) - } - - /// What to do at the end of each block. Checks if an election needs to happen or not. - fn on_initialize(n: T::BlockNumber) -> Weight { - if let Err(e) = Self::end_block(n) { - print("Guru meditation"); - print(e); - } - - MINIMUM_WEIGHT - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + fn on_runtime_upgrade() -> Weight { + migration::migrate::(); + + MINIMUM_WEIGHT + } + + const CandidacyBond: BalanceOf = T::CandidacyBond::get(); + const VotingBond: BalanceOf = T::VotingBond::get(); + const DesiredMembers: u32 = T::DesiredMembers::get(); + const DesiredRunnersUp: u32 = T::DesiredRunnersUp::get(); + const TermDuration: T::BlockNumber = T::TermDuration::get(); + + /// Vote for a set of candidates for the upcoming round of election. + /// + /// The `votes` should: + /// - not be empty. + /// - be less than the number of candidates. + /// + /// Upon voting, `value` units of `who`'s balance is locked and a bond amount is reserved. + /// It is the responsibility of the caller to not place all of their balance into the lock + /// and keep some for further transactions. + /// + /// # + /// #### State + /// Reads: O(1) + /// Writes: O(V) given `V` votes. V is bounded by 16. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn vote(origin, votes: Vec, #[compact] value: BalanceOf) { + let who = ensure_signed(origin)?; + + let candidates_count = >::decode_len().unwrap_or(0) as usize; + let members_count = >::decode_len().unwrap_or(0) as usize; + // addition is valid: candidates and members never overlap. + let allowed_votes = candidates_count + members_count; + + ensure!(!allowed_votes.is_zero(), Error::::UnableToVote); + ensure!(votes.len() <= allowed_votes, Error::::TooManyVotes); + ensure!(votes.len() <= MAXIMUM_VOTE, Error::::MaximumVotesExceeded); + ensure!(!votes.is_empty(), Error::::NoVotes); + + ensure!( + value > T::Currency::minimum_balance(), + Error::::LowBalance, + ); + + if !Self::is_voter(&who) { + // first time voter. Reserve bond. + T::Currency::reserve(&who, T::VotingBond::get()) + .map_err(|_| Error::::UnableToPayBond)?; + } + // Amount to be locked up. + let locked_balance = value.min(T::Currency::total_balance(&who)); + + // lock + T::Currency::set_lock( + MODULE_ID, + &who, + locked_balance, + WithdrawReasons::except(WithdrawReason::TransactionPayment), + ); + + Voting::::insert(&who, (locked_balance, votes)); + } + + /// Remove `origin` as a voter. This removes the lock and returns the bond. + /// + /// # + /// #### State + /// Reads: O(1) + /// Writes: O(1) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn remove_voter(origin) { + let who = ensure_signed(origin)?; + + ensure!(Self::is_voter(&who), Error::::MustBeVoter); + + Self::do_remove_voter(&who, true); + } + + /// Report `target` for being an defunct voter. In case of a valid report, the reporter is + /// rewarded by the bond amount of `target`. Otherwise, the reporter itself is removed and + /// their bond is slashed. + /// + /// A defunct voter is defined to be: + /// - a voter whose current submitted votes are all invalid. i.e. all of them are no + /// longer a candidate nor an active member. + /// + /// # + /// #### State + /// Reads: O(NLogM) given M current candidates and N votes for `target`. + /// Writes: O(1) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(1_000_000_000)] + fn report_defunct_voter(origin, target: ::Source) { + let reporter = ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; + + ensure!(reporter != target, Error::::ReportSelf); + ensure!(Self::is_voter(&reporter), Error::::MustBeVoter); + + // Checking if someone is a candidate and a member here is O(LogN), making the whole + // function O(MLonN) with N candidates in total and M of them being voted by `target`. + // We could easily add another mapping to be able to check if someone is a candidate in + // `O(1)` but that would make the process of removing candidates at the end of each + // round slightly harder. Note that for now we have a bound of number of votes (`N`). + let valid = Self::is_defunct_voter(&target); + if valid { + // reporter will get the voting bond of the target + T::Currency::repatriate_reserved(&target, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; + // remove the target. They are defunct. + Self::do_remove_voter(&target, false); + } else { + // slash the bond of the reporter. + let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; + T::BadReport::on_unbalanced(imbalance); + // remove the reporter. + Self::do_remove_voter(&reporter, false); + } + Self::deposit_event(RawEvent::VoterReported(target, reporter, valid)); + } + + + /// Submit oneself for candidacy. + /// + /// A candidate will either: + /// - Lose at the end of the term and forfeit their deposit. + /// - Win and become a member. Members will eventually get their stash back. + /// - Become a runner-up. Runners-ups are reserved members in case one gets forcefully + /// removed. + /// + /// # + /// #### State + /// Reads: O(LogN) Given N candidates. + /// Writes: O(1) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn submit_candidacy(origin) { + let who = ensure_signed(origin)?; + + let is_candidate = Self::is_candidate(&who); + ensure!(is_candidate.is_err(), Error::::DuplicatedCandidate); + // assured to be an error, error always contains the index. + let index = is_candidate.unwrap_err(); + + ensure!(!Self::is_member(&who), Error::::MemberSubmit); + ensure!(!Self::is_runner(&who), Error::::RunnerSubmit); + + T::Currency::reserve(&who, T::CandidacyBond::get()) + .map_err(|_| Error::::InsufficientCandidateFunds)?; + + >::mutate(|c| c.insert(index, who)); + } + + /// Renounce one's intention to be a candidate for the next election round. 3 potential + /// outcomes exist: + /// - `origin` is a candidate and not elected in any set. In this case, the bond is + /// unreserved, returned and origin is removed as a candidate. + /// - `origin` is a current runner up. In this case, the bond is unreserved, returned and + /// origin is removed as a runner. + /// - `origin` is a current member. In this case, the bond is unreserved and origin is + /// removed as a member, consequently not being a candidate for the next round anymore. + /// Similar to [`remove_voter`], if replacement runners exists, they are immediately used. + #[weight = SimpleDispatchInfo::FixedOperational(2_000_000_000)] + fn renounce_candidacy(origin) { + let who = ensure_signed(origin)?; + + // NOTE: this function attempts the 3 conditions (being a candidate, member, runner) and + // fails if none are matched. Unlike other Palette functions and modules where checks + // happen first and then execution happens, this function is written the other way + // around. The main intention is that reading all of the candidates, members and runners + // from storage is expensive. Furthermore, we know (soft proof) that they are always + // mutually exclusive. Hence, we try one, and only then decode more storage. + + if let Ok(_replacement) = Self::remove_and_replace_member(&who) { + T::Currency::unreserve(&who, T::CandidacyBond::get()); + Self::deposit_event(RawEvent::MemberRenounced(who.clone())); + + // safety guard to make sure we do only one arm. Better to read runners later. + return Ok(()); + } + + let mut runners_up_with_stake = Self::runners_up(); + if let Some(index) = runners_up_with_stake.iter() + .position(|(ref r, ref _s)| r == &who) + { + runners_up_with_stake.remove(index); + // unreserve the bond + T::Currency::unreserve(&who, T::CandidacyBond::get()); + // update storage. + >::put(runners_up_with_stake); + // safety guard to make sure we do only one arm. Better to read runners later. + return Ok(()); + } + + let mut candidates = Self::candidates(); + if let Ok(index) = candidates.binary_search(&who) { + candidates.remove(index); + // unreserve the bond + T::Currency::unreserve(&who, T::CandidacyBond::get()); + // update storage. + >::put(candidates); + // safety guard to make sure we do only one arm. Better to read runners later. + return Ok(()); + } + + Err(Error::::InvalidOrigin)? + } + + /// Remove a particular member from the set. This is effective immediately and the bond of + /// the outgoing member is slashed. + /// + /// If a runner-up is available, then the best runner-up will be removed and replaces the + /// outgoing member. Otherwise, a new phragmen round is started. + /// + /// Note that this does not affect the designated block number of the next election. + /// + /// # + /// #### State + /// Reads: O(do_phragmen) + /// Writes: O(do_phragmen) + /// # + #[weight = SimpleDispatchInfo::FixedOperational(2_000_000_000)] + fn remove_member(origin, who: ::Source) -> DispatchResult { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + + Self::remove_and_replace_member(&who).map(|had_replacement| { + let (imbalance, _) = T::Currency::slash_reserved(&who, T::CandidacyBond::get()); + T::KickedMember::on_unbalanced(imbalance); + Self::deposit_event(RawEvent::MemberKicked(who.clone())); + + if !had_replacement { + Self::do_phragmen(); + } + }) + } + + /// What to do at the end of each block. Checks if an election needs to happen or not. + fn on_initialize(n: T::BlockNumber) -> Weight { + if let Err(e) = Self::end_block(n) { + print("Guru meditation"); + print(e); + } + + MINIMUM_WEIGHT + } + } } decl_event!( @@ -537,509 +550,540 @@ decl_event!( ); impl Module { - /// Attempts to remove a member `who`. If a runner up exists, it is used as the replacement. - /// Otherwise, `Ok(false)` is returned to signal the caller. - /// - /// In both cases, [`Members`], [`ElectionRounds`] and [`RunnersUp`] storage are updated - /// accordingly. Furthermore, the membership change is reported. - /// - /// O(phragmen) in the worse case. - fn remove_and_replace_member(who: &T::AccountId) -> Result { - let mut members_with_stake = Self::members(); - if let Ok(index) = members_with_stake.binary_search_by(|(ref m, ref _s)| m.cmp(who)) { - members_with_stake.remove(index); - - let next_up = >::mutate(|runners_up| runners_up.pop()); - let maybe_replacement = next_up.and_then(|(replacement, stake)| - members_with_stake.binary_search_by(|(ref m, ref _s)| m.cmp(&replacement)) - .err() - .map(|index| { - members_with_stake.insert(index, (replacement.clone(), stake)); - replacement - }) - ); - - >::put(&members_with_stake); - let members = members_with_stake.into_iter().map(|m| m.0).collect::>(); - let result = Ok(maybe_replacement.is_some()); - let old = [who.clone()]; - match maybe_replacement { - Some(new) => T::ChangeMembers::change_members_sorted(&[new], &old, &members), - None => T::ChangeMembers::change_members_sorted(&[], &old, &members), - } - result - } else { - Err(Error::::NotMember)? - } - } - - /// Check if `who` is a candidate. It returns the insert index if the element does not exists as - /// an error. - /// - /// State: O(LogN) given N candidates. - fn is_candidate(who: &T::AccountId) -> Result<(), usize> { - Self::candidates().binary_search(who).map(|_| ()) - } - - /// Check if `who` is a voter. It may or may not be a _current_ one. - /// - /// State: O(1). - fn is_voter(who: &T::AccountId) -> bool { - Voting::::contains_key(who) - } - - /// Check if `who` is currently an active member. - /// - /// Limited number of members. Binary search. Constant time factor. O(1) - fn is_member(who: &T::AccountId) -> bool { - Self::members().binary_search_by(|(a, _b)| a.cmp(who)).is_ok() - } - - /// Check if `who` is currently an active runner. - /// - /// Limited number of runners-up. Binary search. Constant time factor. O(1) - fn is_runner(who: &T::AccountId) -> bool { - Self::runners_up().iter().position(|(a, _b)| a == who).is_some() - } - - /// Returns number of desired members. - fn desired_members() -> u32 { - T::DesiredMembers::get() - } - - /// Returns number of desired runners up. - fn desired_runners_up() -> u32 { - T::DesiredRunnersUp::get() - } - - /// Returns the term duration - fn term_duration() -> T::BlockNumber { - T::TermDuration::get() - } - - /// Get the members' account ids. - fn members_ids() -> Vec { - Self::members().into_iter().map(|(m, _)| m).collect::>() - } - - /// The the runners' up account ids. - fn runners_up_ids() -> Vec { - Self::runners_up().into_iter().map(|(r, _)| r).collect::>() - } - - /// Check if `who` is a defunct voter. - /// - /// Note that false is returned if `who` is not a voter at all. - /// - /// O(NLogM) with M candidates and `who` having voted for `N` of them. - fn is_defunct_voter(who: &T::AccountId) -> bool { - if Self::is_voter(who) { - Self::votes_of(who) - .iter() - .all(|v| !Self::is_member(v) && !Self::is_runner(v) && !Self::is_candidate(v).is_ok()) - } else { - false - } - } - - /// Remove a certain someone as a voter. - /// - /// This will clean always clean the storage associated with the voter, and remove the balance - /// lock. Optionally, it would also return the reserved voting bond if indicated by `unreserve`. - fn do_remove_voter(who: &T::AccountId, unreserve: bool) { - // remove storage and lock. - Voting::::remove(who); - T::Currency::remove_lock(MODULE_ID, who); - - if unreserve { - T::Currency::unreserve(who, T::VotingBond::get()); - } - } - - /// The locked stake of a voter. - fn locked_stake_of(who: &T::AccountId) -> BalanceOf { - Voting::::get(who).0 - } - - /// The locked stake of a voter. - fn votes_of(who: &T::AccountId) -> Vec { - Voting::::get(who).1 - } - - /// Check there's nothing to do this block. - /// - /// Runs phragmen election and cleans all the previous candidate state. The voter state is NOT - /// cleaned and voters must themselves submit a transaction to retract. - fn end_block(block_number: T::BlockNumber) -> DispatchResult { - if !Self::term_duration().is_zero() { - if (block_number % Self::term_duration()).is_zero() { - Self::do_phragmen(); - } - } - Ok(()) - } - - /// Run the phragmen election with all required side processes and state updates. - /// - /// Calls the appropriate `ChangeMembers` function variant internally. - /// - /// # - /// #### State - /// Reads: O(C + V*E) where C = candidates, V voters and E votes per voter exits. - /// Writes: O(M + R) with M desired members and R runners_up. - /// # - fn do_phragmen() { - let desired_seats = Self::desired_members() as usize; - let desired_runners_up = Self::desired_runners_up() as usize; - let num_to_elect = desired_runners_up + desired_seats; - - let mut candidates = Self::candidates(); - // candidates who explicitly called `submit_candidacy`. Only these folks are at risk of - // losing their bond. - let exposed_candidates = candidates.clone(); - // current members are always a candidate for the next round as well. - // this is guaranteed to not create any duplicates. - candidates.append(&mut Self::members_ids()); - // previous runners_up are also always candidates for the next round. - candidates.append(&mut Self::runners_up_ids()); - - // helper closures to deal with balance/stake. - let to_votes = |b: BalanceOf| -> VoteWeight { - , VoteWeight>>::convert(b) - }; - let to_balance = |e: ExtendedBalance| -> BalanceOf { - >>::convert(e) - }; - let stake_of = |who: &T::AccountId| -> VoteWeight { - to_votes(Self::locked_stake_of(who)) - }; - - let voters_and_votes = Voting::::iter() - .map(|(voter, (stake, targets))| { (voter, to_votes(stake), targets) }) - .collect::>(); - let maybe_phragmen_result = sp_phragmen::elect::( - num_to_elect, - 0, - candidates, - voters_and_votes.clone(), - ); - - if let Some(PhragmenResult { winners, assignments }) = maybe_phragmen_result { - let old_members_ids = >::take().into_iter() - .map(|(m, _)| m) - .collect::>(); - let old_runners_up_ids = >::take().into_iter() - .map(|(r, _)| r) - .collect::>(); - - // filter out those who had literally no votes at all. - // AUDIT/NOTE: the need to do this is because all candidates, even those who have no - // vote are still considered by phragmen and when good candidates are scarce, then these - // cheap ones might get elected. We might actually want to remove the filter and allow - // zero-voted candidates to also make it to the membership set. - let new_set_with_approval = winners; - let new_set = new_set_with_approval - .into_iter() - .filter_map(|(m, a)| if a.is_zero() { None } else { Some(m) } ) - .collect::>(); - - let staked_assignments = sp_phragmen::assignment_ratio_to_staked( - assignments, - stake_of, - ); - - let (support_map, _) = build_support_map::(&new_set, &staked_assignments); - - let new_set_with_stake = new_set - .into_iter() - .map(|ref m| { - let support = support_map.get(m) - .expect( - "entire new_set was given to build_support_map; en entry must be \ - created for each item; qed" - ); - (m.clone(), to_balance(support.total)) - }) - .collect::)>>(); - - // split new set into winners and runners up. - let split_point = desired_seats.min(new_set_with_stake.len()); - let mut new_members = (&new_set_with_stake[..split_point]).to_vec(); - - // save the runners up as-is. They are sorted based on desirability. - // save the members, sorted based on account id. - new_members.sort_by(|i, j| i.0.cmp(&j.0)); - - let mut prime_votes: Vec<_> = new_members.iter().map(|c| (&c.0, VoteWeight::zero())).collect(); - for (_, stake, targets) in voters_and_votes.into_iter() { - for (votes, who) in targets.iter() - .enumerate() - .map(|(votes, who)| ((MAXIMUM_VOTE - votes) as u32, who)) - { - if let Ok(i) = prime_votes.binary_search_by_key(&who, |k| k.0) { - prime_votes[i].1 += stake * votes as VoteWeight; - } - } - } - let prime = prime_votes.into_iter().max_by_key(|x| x.1).map(|x| x.0.clone()); - - // new_members_ids is sorted by account id. - let new_members_ids = new_members - .iter() - .map(|(m, _)| m.clone()) - .collect::>(); - - let new_runners_up = &new_set_with_stake[split_point..] - .into_iter() - .cloned() - .rev() - .collect::)>>(); - // new_runners_up remains sorted by desirability. - let new_runners_up_ids = new_runners_up - .iter() - .map(|(r, _)| r.clone()) - .collect::>(); - - // report member changes. We compute diff because we need the outgoing list. - let (incoming, outgoing) = T::ChangeMembers::compute_members_diff( - &new_members_ids, - &old_members_ids, - ); - T::ChangeMembers::change_members_sorted( - &incoming, - &outgoing.clone(), - &new_members_ids, - ); - T::ChangeMembers::set_prime(prime); - - // outgoing candidates lose their bond. - let mut to_burn_bond = outgoing.to_vec(); - - // compute the outgoing of runners up as well and append them to the `to_burn_bond` - { - let (_, outgoing) = T::ChangeMembers::compute_members_diff( - &new_runners_up_ids, - &old_runners_up_ids, - ); - to_burn_bond.extend(outgoing); - } - - // Burn loser bond. members list is sorted. O(NLogM) (N candidates, M members) - // runner up list is not sorted. O(K*N) given K runner ups. Overall: O(NLogM + N*K) - // both the member and runner counts are bounded. - exposed_candidates.into_iter().for_each(|c| { - // any candidate who is not a member and not a runner up. - if new_members.binary_search_by_key(&c, |(m, _)| m.clone()).is_err() - && !new_runners_up_ids.contains(&c) - { - let (imbalance, _) = T::Currency::slash_reserved(&c, T::CandidacyBond::get()); - T::LoserCandidate::on_unbalanced(imbalance); - } - }); - - // Burn outgoing bonds - to_burn_bond.into_iter().for_each(|x| { - let (imbalance, _) = T::Currency::slash_reserved(&x, T::CandidacyBond::get()); - T::LoserCandidate::on_unbalanced(imbalance); - }); - - >::put(&new_members); - >::put(new_runners_up); - - Self::deposit_event(RawEvent::NewTerm(new_members.clone().to_vec())); - } else { - Self::deposit_event(RawEvent::EmptyTerm); - } - - // clean candidates. - >::kill(); - - ElectionRounds::mutate(|v| *v += 1); - } + /// Attempts to remove a member `who`. If a runner up exists, it is used as the replacement. + /// Otherwise, `Ok(false)` is returned to signal the caller. + /// + /// In both cases, [`Members`], [`ElectionRounds`] and [`RunnersUp`] storage are updated + /// accordingly. Furthermore, the membership change is reported. + /// + /// O(phragmen) in the worse case. + fn remove_and_replace_member(who: &T::AccountId) -> Result { + let mut members_with_stake = Self::members(); + if let Ok(index) = members_with_stake.binary_search_by(|(ref m, ref _s)| m.cmp(who)) { + members_with_stake.remove(index); + + let next_up = >::mutate(|runners_up| runners_up.pop()); + let maybe_replacement = next_up.and_then(|(replacement, stake)| { + members_with_stake + .binary_search_by(|(ref m, ref _s)| m.cmp(&replacement)) + .err() + .map(|index| { + members_with_stake.insert(index, (replacement.clone(), stake)); + replacement + }) + }); + + >::put(&members_with_stake); + let members = members_with_stake + .into_iter() + .map(|m| m.0) + .collect::>(); + let result = Ok(maybe_replacement.is_some()); + let old = [who.clone()]; + match maybe_replacement { + Some(new) => T::ChangeMembers::change_members_sorted(&[new], &old, &members), + None => T::ChangeMembers::change_members_sorted(&[], &old, &members), + } + result + } else { + Err(Error::::NotMember)? + } + } + + /// Check if `who` is a candidate. It returns the insert index if the element does not exists as + /// an error. + /// + /// State: O(LogN) given N candidates. + fn is_candidate(who: &T::AccountId) -> Result<(), usize> { + Self::candidates().binary_search(who).map(|_| ()) + } + + /// Check if `who` is a voter. It may or may not be a _current_ one. + /// + /// State: O(1). + fn is_voter(who: &T::AccountId) -> bool { + Voting::::contains_key(who) + } + + /// Check if `who` is currently an active member. + /// + /// Limited number of members. Binary search. Constant time factor. O(1) + fn is_member(who: &T::AccountId) -> bool { + Self::members() + .binary_search_by(|(a, _b)| a.cmp(who)) + .is_ok() + } + + /// Check if `who` is currently an active runner. + /// + /// Limited number of runners-up. Binary search. Constant time factor. O(1) + fn is_runner(who: &T::AccountId) -> bool { + Self::runners_up() + .iter() + .position(|(a, _b)| a == who) + .is_some() + } + + /// Returns number of desired members. + fn desired_members() -> u32 { + T::DesiredMembers::get() + } + + /// Returns number of desired runners up. + fn desired_runners_up() -> u32 { + T::DesiredRunnersUp::get() + } + + /// Returns the term duration + fn term_duration() -> T::BlockNumber { + T::TermDuration::get() + } + + /// Get the members' account ids. + fn members_ids() -> Vec { + Self::members() + .into_iter() + .map(|(m, _)| m) + .collect::>() + } + + /// The the runners' up account ids. + fn runners_up_ids() -> Vec { + Self::runners_up() + .into_iter() + .map(|(r, _)| r) + .collect::>() + } + + /// Check if `who` is a defunct voter. + /// + /// Note that false is returned if `who` is not a voter at all. + /// + /// O(NLogM) with M candidates and `who` having voted for `N` of them. + fn is_defunct_voter(who: &T::AccountId) -> bool { + if Self::is_voter(who) { + Self::votes_of(who).iter().all(|v| { + !Self::is_member(v) && !Self::is_runner(v) && !Self::is_candidate(v).is_ok() + }) + } else { + false + } + } + + /// Remove a certain someone as a voter. + /// + /// This will clean always clean the storage associated with the voter, and remove the balance + /// lock. Optionally, it would also return the reserved voting bond if indicated by `unreserve`. + fn do_remove_voter(who: &T::AccountId, unreserve: bool) { + // remove storage and lock. + Voting::::remove(who); + T::Currency::remove_lock(MODULE_ID, who); + + if unreserve { + T::Currency::unreserve(who, T::VotingBond::get()); + } + } + + /// The locked stake of a voter. + fn locked_stake_of(who: &T::AccountId) -> BalanceOf { + Voting::::get(who).0 + } + + /// The locked stake of a voter. + fn votes_of(who: &T::AccountId) -> Vec { + Voting::::get(who).1 + } + + /// Check there's nothing to do this block. + /// + /// Runs phragmen election and cleans all the previous candidate state. The voter state is NOT + /// cleaned and voters must themselves submit a transaction to retract. + fn end_block(block_number: T::BlockNumber) -> DispatchResult { + if !Self::term_duration().is_zero() { + if (block_number % Self::term_duration()).is_zero() { + Self::do_phragmen(); + } + } + Ok(()) + } + + /// Run the phragmen election with all required side processes and state updates. + /// + /// Calls the appropriate `ChangeMembers` function variant internally. + /// + /// # + /// #### State + /// Reads: O(C + V*E) where C = candidates, V voters and E votes per voter exits. + /// Writes: O(M + R) with M desired members and R runners_up. + /// # + fn do_phragmen() { + let desired_seats = Self::desired_members() as usize; + let desired_runners_up = Self::desired_runners_up() as usize; + let num_to_elect = desired_runners_up + desired_seats; + + let mut candidates = Self::candidates(); + // candidates who explicitly called `submit_candidacy`. Only these folks are at risk of + // losing their bond. + let exposed_candidates = candidates.clone(); + // current members are always a candidate for the next round as well. + // this is guaranteed to not create any duplicates. + candidates.append(&mut Self::members_ids()); + // previous runners_up are also always candidates for the next round. + candidates.append(&mut Self::runners_up_ids()); + + // helper closures to deal with balance/stake. + let to_votes = |b: BalanceOf| -> VoteWeight { + , VoteWeight>>::convert(b) + }; + let to_balance = |e: ExtendedBalance| -> BalanceOf { + >>::convert(e) + }; + let stake_of = |who: &T::AccountId| -> VoteWeight { to_votes(Self::locked_stake_of(who)) }; + + let voters_and_votes = Voting::::iter() + .map(|(voter, (stake, targets))| (voter, to_votes(stake), targets)) + .collect::>(); + let maybe_phragmen_result = sp_phragmen::elect::( + num_to_elect, + 0, + candidates, + voters_and_votes.clone(), + ); + + if let Some(PhragmenResult { + winners, + assignments, + }) = maybe_phragmen_result + { + let old_members_ids = >::take() + .into_iter() + .map(|(m, _)| m) + .collect::>(); + let old_runners_up_ids = >::take() + .into_iter() + .map(|(r, _)| r) + .collect::>(); + + // filter out those who had literally no votes at all. + // AUDIT/NOTE: the need to do this is because all candidates, even those who have no + // vote are still considered by phragmen and when good candidates are scarce, then these + // cheap ones might get elected. We might actually want to remove the filter and allow + // zero-voted candidates to also make it to the membership set. + let new_set_with_approval = winners; + let new_set = new_set_with_approval + .into_iter() + .filter_map(|(m, a)| if a.is_zero() { None } else { Some(m) }) + .collect::>(); + + let staked_assignments = sp_phragmen::assignment_ratio_to_staked(assignments, stake_of); + + let (support_map, _) = build_support_map::(&new_set, &staked_assignments); + + let new_set_with_stake = new_set + .into_iter() + .map(|ref m| { + let support = support_map.get(m).expect( + "entire new_set was given to build_support_map; en entry must be \ + created for each item; qed", + ); + (m.clone(), to_balance(support.total)) + }) + .collect::)>>(); + + // split new set into winners and runners up. + let split_point = desired_seats.min(new_set_with_stake.len()); + let mut new_members = (&new_set_with_stake[..split_point]).to_vec(); + + // save the runners up as-is. They are sorted based on desirability. + // save the members, sorted based on account id. + new_members.sort_by(|i, j| i.0.cmp(&j.0)); + + let mut prime_votes: Vec<_> = new_members + .iter() + .map(|c| (&c.0, VoteWeight::zero())) + .collect(); + for (_, stake, targets) in voters_and_votes.into_iter() { + for (votes, who) in targets + .iter() + .enumerate() + .map(|(votes, who)| ((MAXIMUM_VOTE - votes) as u32, who)) + { + if let Ok(i) = prime_votes.binary_search_by_key(&who, |k| k.0) { + prime_votes[i].1 += stake * votes as VoteWeight; + } + } + } + let prime = prime_votes + .into_iter() + .max_by_key(|x| x.1) + .map(|x| x.0.clone()); + + // new_members_ids is sorted by account id. + let new_members_ids = new_members + .iter() + .map(|(m, _)| m.clone()) + .collect::>(); + + let new_runners_up = &new_set_with_stake[split_point..] + .into_iter() + .cloned() + .rev() + .collect::)>>(); + // new_runners_up remains sorted by desirability. + let new_runners_up_ids = new_runners_up + .iter() + .map(|(r, _)| r.clone()) + .collect::>(); + + // report member changes. We compute diff because we need the outgoing list. + let (incoming, outgoing) = + T::ChangeMembers::compute_members_diff(&new_members_ids, &old_members_ids); + T::ChangeMembers::change_members_sorted(&incoming, &outgoing.clone(), &new_members_ids); + T::ChangeMembers::set_prime(prime); + + // outgoing candidates lose their bond. + let mut to_burn_bond = outgoing.to_vec(); + + // compute the outgoing of runners up as well and append them to the `to_burn_bond` + { + let (_, outgoing) = T::ChangeMembers::compute_members_diff( + &new_runners_up_ids, + &old_runners_up_ids, + ); + to_burn_bond.extend(outgoing); + } + + // Burn loser bond. members list is sorted. O(NLogM) (N candidates, M members) + // runner up list is not sorted. O(K*N) given K runner ups. Overall: O(NLogM + N*K) + // both the member and runner counts are bounded. + exposed_candidates.into_iter().for_each(|c| { + // any candidate who is not a member and not a runner up. + if new_members + .binary_search_by_key(&c, |(m, _)| m.clone()) + .is_err() + && !new_runners_up_ids.contains(&c) + { + let (imbalance, _) = T::Currency::slash_reserved(&c, T::CandidacyBond::get()); + T::LoserCandidate::on_unbalanced(imbalance); + } + }); + + // Burn outgoing bonds + to_burn_bond.into_iter().for_each(|x| { + let (imbalance, _) = T::Currency::slash_reserved(&x, T::CandidacyBond::get()); + T::LoserCandidate::on_unbalanced(imbalance); + }); + + >::put(&new_members); + >::put(new_runners_up); + + Self::deposit_event(RawEvent::NewTerm(new_members.clone().to_vec())); + } else { + Self::deposit_event(RawEvent::EmptyTerm); + } + + // clean candidates. + >::kill(); + + ElectionRounds::mutate(|v| *v += 1); + } } impl Contains for Module { - fn contains(who: &T::AccountId) -> bool { - Self::is_member(who) - } - fn sorted_members() -> Vec { Self::members_ids() } - - // A special function to populate members in this pallet for passing Origin - // checks in runtime benchmarking. - #[cfg(feature = "runtime-benchmarks")] - fn add(who: &T::AccountId) { - Members::::mutate(|members| { - match members.binary_search_by(|(a, _b)| a.cmp(who)) { - Ok(_) => (), - Err(pos) => members.insert(pos, (who.clone(), BalanceOf::::default())), - } - }) - } + fn contains(who: &T::AccountId) -> bool { + Self::is_member(who) + } + fn sorted_members() -> Vec { + Self::members_ids() + } + + // A special function to populate members in this pallet for passing Origin + // checks in runtime benchmarking. + #[cfg(feature = "runtime-benchmarks")] + fn add(who: &T::AccountId) { + Members::::mutate( + |members| match members.binary_search_by(|(a, _b)| a.cmp(who)) { + Ok(_) => (), + Err(pos) => members.insert(pos, (who.clone(), BalanceOf::::default())), + }, + ) + } } #[cfg(test)] mod tests { - use super::*; - use std::cell::RefCell; - use frame_support::{assert_ok, assert_noop, parameter_types, weights::Weight}; - use substrate_test_utils::assert_eq_uvec; - use sp_core::H256; - use sp_runtime::{ - Perbill, testing::Header, BuildStorage, - traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, - }; - use crate as elections_phragmen; - use frame_system as system; - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - - parameter_types! { - pub const ExistentialDeposit: u64 = 1; -} - - impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = frame_system::Module; -} - - parameter_types! { - pub const CandidacyBond: u64 = 3; - } - - thread_local! { - static VOTING_BOND: RefCell = RefCell::new(2); - static DESIRED_MEMBERS: RefCell = RefCell::new(2); - static DESIRED_RUNNERS_UP: RefCell = RefCell::new(2); - static TERM_DURATION: RefCell = RefCell::new(5); - } - - pub struct VotingBond; - impl Get for VotingBond { - fn get() -> u64 { VOTING_BOND.with(|v| *v.borrow()) } - } - - pub struct DesiredMembers; - impl Get for DesiredMembers { - fn get() -> u32 { DESIRED_MEMBERS.with(|v| *v.borrow()) } - } - - pub struct DesiredRunnersUp; - impl Get for DesiredRunnersUp { - fn get() -> u32 { DESIRED_RUNNERS_UP.with(|v| *v.borrow()) } - } - - pub struct TermDuration; - impl Get for TermDuration { - fn get() -> u64 { TERM_DURATION.with(|v| *v.borrow()) } - } - - thread_local! { - pub static MEMBERS: RefCell> = RefCell::new(vec![]); - pub static PRIME: RefCell> = RefCell::new(None); - } - - pub struct TestChangeMembers; - impl ChangeMembers for TestChangeMembers { - fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - // new, incoming, outgoing must be sorted. - let mut new_sorted = new.to_vec(); - new_sorted.sort(); - assert_eq!(new, &new_sorted[..]); - - let mut incoming_sorted = incoming.to_vec(); - incoming_sorted.sort(); - assert_eq!(incoming, &incoming_sorted[..]); - - let mut outgoing_sorted = outgoing.to_vec(); - outgoing_sorted.sort(); - assert_eq!(outgoing, &outgoing_sorted[..]); - - // incoming and outgoing must be disjoint - for x in incoming.iter() { - assert!(outgoing.binary_search(x).is_err()); - } - - let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); - old_plus_incoming.extend_from_slice(incoming); - old_plus_incoming.sort(); - - let mut new_plus_outgoing = new.to_vec(); - new_plus_outgoing.extend_from_slice(outgoing); - new_plus_outgoing.sort(); - - assert_eq!(old_plus_incoming, new_plus_outgoing); - - MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); - PRIME.with(|p| *p.borrow_mut() = None); - } - - fn set_prime(who: Option) { - PRIME.with(|p| *p.borrow_mut() = who); - } - } - - /// Simple structure that exposes how u64 currency can be represented as... u64. - pub struct CurrencyToVoteHandler; - impl Convert for CurrencyToVoteHandler { - fn convert(x: u64) -> u64 { x } - } - impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> u64 { - x as u64 - } - } - - impl Trait for Test { - type Event = Event; - type Currency = Balances; - type CurrencyToVote = CurrencyToVoteHandler; - type ChangeMembers = TestChangeMembers; - type InitializeMembers = (); - type CandidacyBond = CandidacyBond; - type VotingBond = VotingBond; - type TermDuration = TermDuration; - type DesiredMembers = DesiredMembers; - type DesiredRunnersUp = DesiredRunnersUp; - type LoserCandidate = (); - type KickedMember = (); - type BadReport = (); - } - - pub type Block = sp_runtime::generic::Block; - pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; - - frame_support::construct_runtime!( + use super::*; + use crate as elections_phragmen; + use frame_support::{assert_noop, assert_ok, parameter_types, weights::Weight}; + use frame_system as system; + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, Block as BlockT, IdentityLookup}, + BuildStorage, Perbill, + }; + use std::cell::RefCell; + use substrate_test_utils::assert_eq_uvec; + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + } + + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + + impl pallet_balances::Trait for Test { + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = frame_system::Module; + } + + parameter_types! { + pub const CandidacyBond: u64 = 3; + } + + thread_local! { + static VOTING_BOND: RefCell = RefCell::new(2); + static DESIRED_MEMBERS: RefCell = RefCell::new(2); + static DESIRED_RUNNERS_UP: RefCell = RefCell::new(2); + static TERM_DURATION: RefCell = RefCell::new(5); + } + + pub struct VotingBond; + impl Get for VotingBond { + fn get() -> u64 { + VOTING_BOND.with(|v| *v.borrow()) + } + } + + pub struct DesiredMembers; + impl Get for DesiredMembers { + fn get() -> u32 { + DESIRED_MEMBERS.with(|v| *v.borrow()) + } + } + + pub struct DesiredRunnersUp; + impl Get for DesiredRunnersUp { + fn get() -> u32 { + DESIRED_RUNNERS_UP.with(|v| *v.borrow()) + } + } + + pub struct TermDuration; + impl Get for TermDuration { + fn get() -> u64 { + TERM_DURATION.with(|v| *v.borrow()) + } + } + + thread_local! { + pub static MEMBERS: RefCell> = RefCell::new(vec![]); + pub static PRIME: RefCell> = RefCell::new(None); + } + + pub struct TestChangeMembers; + impl ChangeMembers for TestChangeMembers { + fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { + // new, incoming, outgoing must be sorted. + let mut new_sorted = new.to_vec(); + new_sorted.sort(); + assert_eq!(new, &new_sorted[..]); + + let mut incoming_sorted = incoming.to_vec(); + incoming_sorted.sort(); + assert_eq!(incoming, &incoming_sorted[..]); + + let mut outgoing_sorted = outgoing.to_vec(); + outgoing_sorted.sort(); + assert_eq!(outgoing, &outgoing_sorted[..]); + + // incoming and outgoing must be disjoint + for x in incoming.iter() { + assert!(outgoing.binary_search(x).is_err()); + } + + let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); + old_plus_incoming.extend_from_slice(incoming); + old_plus_incoming.sort(); + + let mut new_plus_outgoing = new.to_vec(); + new_plus_outgoing.extend_from_slice(outgoing); + new_plus_outgoing.sort(); + + assert_eq!(old_plus_incoming, new_plus_outgoing); + + MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); + PRIME.with(|p| *p.borrow_mut() = None); + } + + fn set_prime(who: Option) { + PRIME.with(|p| *p.borrow_mut() = who); + } + } + + /// Simple structure that exposes how u64 currency can be represented as... u64. + pub struct CurrencyToVoteHandler; + impl Convert for CurrencyToVoteHandler { + fn convert(x: u64) -> u64 { + x + } + } + impl Convert for CurrencyToVoteHandler { + fn convert(x: u128) -> u64 { + x as u64 + } + } + + impl Trait for Test { + type Event = Event; + type Currency = Balances; + type CurrencyToVote = CurrencyToVoteHandler; + type ChangeMembers = TestChangeMembers; + type InitializeMembers = (); + type CandidacyBond = CandidacyBond; + type VotingBond = VotingBond; + type TermDuration = TermDuration; + type DesiredMembers = DesiredMembers; + type DesiredRunnersUp = DesiredRunnersUp; + type LoserCandidate = (); + type KickedMember = (); + type BadReport = (); + } + + pub type Block = sp_runtime::generic::Block; + pub type UncheckedExtrinsic = sp_runtime::generic::UncheckedExtrinsic; + + frame_support::construct_runtime!( pub enum Test where Block = Block, NodeBlock = Block, @@ -1051,1314 +1095,1371 @@ mod tests { } ); - pub struct ExtBuilder { - genesis_members: Vec<(u64, u64)>, - balance_factor: u64, - voter_bond: u64, - term_duration: u64, - desired_runners_up: u32, - } - - impl Default for ExtBuilder { - fn default() -> Self { - Self { - genesis_members: vec![], - balance_factor: 1, - voter_bond: 2, - desired_runners_up: 0, - term_duration: 5, - } - } - } - - impl ExtBuilder { - pub fn voter_bond(mut self, fee: u64) -> Self { - self.voter_bond = fee; - self - } - pub fn desired_runners_up(mut self, count: u32) -> Self { - self.desired_runners_up = count; - self - } - pub fn term_duration(mut self, duration: u64) -> Self { - self.term_duration = duration; - self - } - pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { - self.genesis_members = members; - self - } - pub fn build_and_execute(self, test: impl FnOnce() -> ()) { - VOTING_BOND.with(|v| *v.borrow_mut() = self.voter_bond); - TERM_DURATION.with(|v| *v.borrow_mut() = self.term_duration); - DESIRED_RUNNERS_UP.with(|v| *v.borrow_mut() = self.desired_runners_up); - MEMBERS.with(|m| *m.borrow_mut() = self.genesis_members.iter().map(|(m, _)| m.clone()).collect::>()); - let mut ext: sp_io::TestExternalities = GenesisConfig { - pallet_balances: Some(pallet_balances::GenesisConfig::{ - balances: vec![ - (1, 10 * self.balance_factor), - (2, 20 * self.balance_factor), - (3, 30 * self.balance_factor), - (4, 40 * self.balance_factor), - (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) - ], - }), - elections_phragmen: Some(elections_phragmen::GenesisConfig:: { - members: self.genesis_members - }), - }.build_storage().unwrap().into(); - ext.execute_with(pre_conditions); - ext.execute_with(test); - ext.execute_with(post_conditions) - } - } - - fn all_voters() -> Vec { - Voting::::iter().map(|(v, _)| v).collect::>() - } - - fn balances(who: &u64) -> (u64, u64) { - (Balances::free_balance(who), Balances::reserved_balance(who)) - } - - fn has_lock(who: &u64) -> u64 { - let lock = Balances::locks(who)[0].clone(); - assert_eq!(lock.id, MODULE_ID); - lock.amount - } - - fn intersects(a: &[T], b: &[T]) -> bool { - a.iter().any(|e| b.contains(e)) - } - - fn ensure_members_sorted() { - let mut members = Elections::members().clone(); - members.sort(); - assert_eq!(Elections::members(), members); - } - - fn ensure_candidates_sorted() { - let mut candidates = Elections::candidates().clone(); - candidates.sort(); - assert_eq!(Elections::candidates(), candidates); - } - - fn ensure_members_has_approval_stake() { - // we filter members that have no approval state. This means that even we have more seats - // than candidates, we will never ever chose a member with no votes. - assert!( - Elections::members().iter().chain( - Elections::runners_up().iter() - ).all(|(_, s)| *s != Zero::zero()) - ); - } - - fn ensure_member_candidates_runners_up_disjoint() { - // members, candidates and runners-up must always be disjoint sets. - assert!(!intersects(&Elections::members_ids(), &Elections::candidates())); - assert!(!intersects(&Elections::members_ids(), &Elections::runners_up_ids())); - assert!(!intersects(&Elections::candidates(), &Elections::runners_up_ids())); - } - - fn pre_conditions() { - System::set_block_number(1); - ensure_members_sorted(); - ensure_candidates_sorted(); - } - - fn post_conditions() { - ensure_members_sorted(); - ensure_candidates_sorted(); - ensure_member_candidates_runners_up_disjoint(); - ensure_members_has_approval_stake(); - } - - #[test] - fn params_should_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::desired_members(), 2); - assert_eq!(Elections::term_duration(), 5); - assert_eq!(Elections::election_rounds(), 0); - - assert_eq!(Elections::members(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); - - assert_eq!(Elections::candidates(), vec![]); - assert_eq!(>::decode_len().unwrap(), 0); - assert!(Elections::is_candidate(&1).is_err()); - - assert_eq!(all_voters(), vec![]); - assert_eq!(Elections::votes_of(&1), vec![]); - }); - } - - #[test] - fn genesis_members_should_work() { - ExtBuilder::default().genesis_members(vec![(1, 10), (2, 20)]).build_and_execute(|| { - System::set_block_number(1); - assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); - - assert_eq!(Elections::voting(1), (10, vec![1])); - assert_eq!(Elections::voting(2), (20, vec![2])); - - // they will persist since they have self vote. - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![1, 2]); - }) - } - - #[test] - fn genesis_members_unsorted_should_work() { - ExtBuilder::default().genesis_members(vec![(2, 20), (1, 10)]).build_and_execute(|| { - System::set_block_number(1); - assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); - - assert_eq!(Elections::voting(1), (10, vec![1])); - assert_eq!(Elections::voting(2), (20, vec![2])); - - // they will persist since they have self vote. - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![1, 2]); - }) - } - - #[test] - #[should_panic = "Genesis member does not have enough stake"] - fn genesis_members_cannot_over_stake_0() { - // 10 cannot lock 20 as their stake and extra genesis will panic. - ExtBuilder::default().genesis_members(vec![(1, 20), (2, 20)]).build_and_execute(|| {}); - } - - #[test] - #[should_panic] - fn genesis_members_cannot_over_stake_1() { - // 10 cannot reserve 20 as voting bond and extra genesis will panic. - ExtBuilder::default().voter_bond(20).genesis_members(vec![(1, 10), (2, 20)]).build_and_execute(|| {}); - } - - #[test] - #[should_panic = "Duplicate member in elections phragmen genesis: 2"] - fn genesis_members_cannot_be_duplicate() { - ExtBuilder::default().genesis_members(vec![(1, 10), (2, 10), (2, 10)]).build_and_execute(|| {}); - } - - #[test] - fn term_duration_zero_is_passive() { - ExtBuilder::default() - .term_duration(0) - .build_and_execute(|| - { - assert_eq!(Elections::term_duration(), 0); - assert_eq!(Elections::desired_members(), 2); - assert_eq!(Elections::election_rounds(), 0); - - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); - assert_eq!(Elections::candidates(), vec![]); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); - assert_eq!(Elections::candidates(), vec![]); - }); - } - - #[test] - fn simple_candidate_submission_should_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert!(Elections::is_candidate(&1).is_err()); - assert!(Elections::is_candidate(&2).is_err()); - - assert_eq!(balances(&1), (10, 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(1))); - assert_eq!(balances(&1), (7, 3)); - - assert_eq!(Elections::candidates(), vec![1]); - - assert!(Elections::is_candidate(&1).is_ok()); - assert!(Elections::is_candidate(&2).is_err()); - - assert_eq!(balances(&2), (20, 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - assert_eq!(balances(&2), (17, 3)); - - assert_eq!(Elections::candidates(), vec![1, 2]); - - assert!(Elections::is_candidate(&1).is_ok()); - assert!(Elections::is_candidate(&2).is_ok()); - }); - } - - #[test] - fn simple_candidate_submission_with_no_votes_should_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - - assert_ok!(Elections::submit_candidacy(Origin::signed(1))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert!(Elections::is_candidate(&1).is_ok()); - assert!(Elections::is_candidate(&2).is_ok()); - assert_eq!(Elections::candidates(), vec![1, 2]); - - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert!(Elections::is_candidate(&1).is_err()); - assert!(Elections::is_candidate(&2).is_err()); - assert_eq!(Elections::candidates(), vec![]); - - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::runners_up(), vec![]); - }); - } - - #[test] - fn dupe_candidate_submission_should_not_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_ok!(Elections::submit_candidacy(Origin::signed(1))); - assert_eq!(Elections::candidates(), vec![1]); - assert_noop!( - Elections::submit_candidacy(Origin::signed(1)), - Error::::DuplicatedCandidate, - ); - }); - } - - #[test] - fn member_candidacy_submission_should_not_work() { - // critically important to make sure that outgoing candidates and losers are not mixed up. - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![5]); - assert_eq!(Elections::runners_up(), vec![]); - assert_eq!(Elections::candidates(), vec![]); - - assert_noop!( - Elections::submit_candidacy(Origin::signed(5)), - Error::::MemberSubmit, - ); - }); - } - - #[test] - fn runner_candidate_submission_should_not_work() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![5, 4], 20)); - assert_ok!(Elections::vote(Origin::signed(1), vec![3], 10)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![3]); - - assert_noop!( - Elections::submit_candidacy(Origin::signed(3)), - Error::::RunnerSubmit, - ); - }); - } - - #[test] - fn poor_candidate_submission_should_not_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_noop!( - Elections::submit_candidacy(Origin::signed(7)), - Error::::InsufficientCandidateFunds, - ); - }); - } - - #[test] - fn simple_voting_should_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_eq!(balances(&2), (20, 0)); - - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); - - assert_eq!(balances(&2), (18, 2)); - assert_eq!(has_lock(&2), 20); - }); - } - - #[test] - fn can_vote_with_custom_stake() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_eq!(balances(&2), (20, 0)); - - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 12)); - - assert_eq!(balances(&2), (18, 2)); - assert_eq!(has_lock(&2), 12); - }); - } - - #[test] - fn can_update_votes_and_stake() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(balances(&2), (20, 0)); - - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); - - assert_eq!(balances(&2), (18, 2)); - assert_eq!(has_lock(&2), 20); - assert_eq!(Elections::locked_stake_of(&2), 20); - - // can update; different stake; different lock and reserve. - assert_ok!(Elections::vote(Origin::signed(2), vec![5, 4], 15)); - assert_eq!(balances(&2), (18, 2)); - assert_eq!(has_lock(&2), 15); - assert_eq!(Elections::locked_stake_of(&2), 15); - }); - } - - #[test] - fn cannot_vote_for_no_candidate() { - ExtBuilder::default().build_and_execute(|| { - assert_noop!( - Elections::vote(Origin::signed(2), vec![], 20), - Error::::UnableToVote, - ); - }); - } - - #[test] - fn can_vote_for_old_members_even_when_no_new_candidates() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 20)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::candidates(), vec![]); - - assert_ok!(Elections::vote(Origin::signed(3), vec![4, 5], 10)); - }); - } - - #[test] - fn prime_works() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - - assert_ok!(Elections::vote(Origin::signed(1), vec![4, 3], 10)); - assert_ok!(Elections::vote(Origin::signed(2), vec![4], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::candidates(), vec![]); - - assert_ok!(Elections::vote(Origin::signed(3), vec![4, 5], 10)); - assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); - }); - } - - #[test] - fn prime_votes_for_exiting_members_are_removed() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - - assert_ok!(Elections::vote(Origin::signed(1), vec![4, 3], 10)); - assert_ok!(Elections::vote(Origin::signed(2), vec![4], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - assert_ok!(Elections::renounce_candidacy(Origin::signed(4))); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![3, 5]); - assert_eq!(Elections::candidates(), vec![]); - - assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); - }); - } - - #[test] - fn cannot_vote_for_more_than_candidates() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_noop!( - Elections::vote(Origin::signed(2), vec![10, 20, 30], 20), - Error::::TooManyVotes, - ); - }); - } - - #[test] - fn cannot_vote_for_less_than_ed() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_noop!( - Elections::vote(Origin::signed(2), vec![4], 1), - Error::::LowBalance, - ); - }) - } - - #[test] - fn can_vote_for_more_than_total_balance_but_moot() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 30)); - // you can lie but won't get away with it. - assert_eq!(Elections::locked_stake_of(&2), 20); - assert_eq!(has_lock(&2), 20); - }); - } - - #[test] - fn remove_voter_should_work() { - ExtBuilder::default().voter_bond(8).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![5], 30)); - - assert_eq_uvec!(all_voters(), vec![2, 3]); - assert_eq!(Elections::locked_stake_of(&2), 20); - assert_eq!(Elections::locked_stake_of(&3), 30); - assert_eq!(Elections::votes_of(&2), vec![5]); - assert_eq!(Elections::votes_of(&3), vec![5]); - - assert_ok!(Elections::remove_voter(Origin::signed(2))); - - assert_eq_uvec!(all_voters(), vec![3]); - assert_eq!(Elections::votes_of(&2), vec![]); - assert_eq!(Elections::locked_stake_of(&2), 0); - - assert_eq!(balances(&2), (20, 0)); - assert_eq!(Balances::locks(&2).len(), 0); - }); - } - - #[test] - fn non_voter_remove_should_not_work() { - ExtBuilder::default().build_and_execute(|| { - assert_noop!(Elections::remove_voter(Origin::signed(3)), Error::::MustBeVoter); - }); - } - - #[test] - fn dupe_remove_should_fail() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); - - assert_ok!(Elections::remove_voter(Origin::signed(2))); - assert_eq!(all_voters(), vec![]); - - assert_noop!(Elections::remove_voter(Origin::signed(2)), Error::::MustBeVoter); - }); - } - - #[test] - fn removed_voter_should_not_be_counted() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - - assert_ok!(Elections::remove_voter(Origin::signed(4))); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![3, 5]); - }); - } - - #[test] - fn reporter_must_be_voter() { - ExtBuilder::default().build_and_execute(|| { - assert_noop!( - Elections::report_defunct_voter(Origin::signed(1), 2), - Error::::MustBeVoter, - ); - }); - } - - #[test] - fn can_detect_defunct_voter() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(6))); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 20)); - assert_ok!(Elections::vote(Origin::signed(6), vec![6], 30)); - // will be soon a defunct voter. - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![6]); - assert_eq!(Elections::candidates(), vec![]); - - // all of them have a member or runner-up that they voted for. - assert_eq!(Elections::is_defunct_voter(&5), false); - assert_eq!(Elections::is_defunct_voter(&4), false); - assert_eq!(Elections::is_defunct_voter(&2), false); - assert_eq!(Elections::is_defunct_voter(&6), false); - - // defunct - assert_eq!(Elections::is_defunct_voter(&3), true); - - assert_ok!(Elections::submit_candidacy(Origin::signed(1))); - assert_ok!(Elections::vote(Origin::signed(1), vec![1], 10)); - - // has a candidate voted for. - assert_eq!(Elections::is_defunct_voter(&1), false); - - }); - } - - #[test] - fn report_voter_should_work_and_earn_reward() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 20)); - // will be soon a defunct voter. - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::candidates(), vec![]); - - assert_eq!(balances(&3), (28, 2)); - assert_eq!(balances(&5), (45, 5)); - - assert_ok!(Elections::report_defunct_voter(Origin::signed(5), 3)); - assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::VoterReported(3, 5, true)) - })); - - assert_eq!(balances(&3), (28, 0)); - assert_eq!(balances(&5), (47, 5)); - }); - } - - #[test] - fn report_voter_should_slash_when_bad_report() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::candidates(), vec![]); - - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&5), (45, 5)); - - assert_ok!(Elections::report_defunct_voter(Origin::signed(5), 4)); - assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::VoterReported(4, 5, false)) - })); - - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&5), (45, 3)); - }); - } - - - #[test] - fn simple_voting_rounds_should_work() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 15)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - - assert_eq_uvec!(all_voters(), vec![2, 3, 4]); - - assert_eq!(Elections::votes_of(&2), vec![5]); - assert_eq!(Elections::votes_of(&3), vec![3]); - assert_eq!(Elections::votes_of(&4), vec![4]); - - assert_eq!(Elections::candidates(), vec![3, 4, 5]); - assert_eq!(>::decode_len().unwrap(), 3); - - assert_eq!(Elections::election_rounds(), 0); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(3, 30), (5, 20)]); - assert_eq!(Elections::runners_up(), vec![]); - assert_eq_uvec!(all_voters(), vec![2, 3, 4]); - assert_eq!(Elections::candidates(), vec![]); - assert_eq!(>::decode_len().unwrap(), 0); - - assert_eq!(Elections::election_rounds(), 1); - }); - } - - #[test] - fn defunct_voter_will_be_counted() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - - // This guy's vote is pointless for this round. - assert_ok!(Elections::vote(Origin::signed(3), vec![4], 30)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(5, 50)]); - assert_eq!(Elections::election_rounds(), 1); - - // but now it has a valid target. - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - System::set_block_number(10); - assert_ok!(Elections::end_block(System::block_number())); - - // candidate 4 is affected by an old vote. - assert_eq!(Elections::members(), vec![(4, 30), (5, 50)]); - assert_eq!(Elections::election_rounds(), 2); - assert_eq_uvec!(all_voters(), vec![3, 5]); - }); - } - - #[test] - fn only_desired_seats_are_chosen() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::election_rounds(), 1); - assert_eq!(Elections::members_ids(), vec![4, 5]); - }); - } - - #[test] - fn phragmen_should_not_self_vote() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::candidates(), vec![]); - assert_eq!(Elections::election_rounds(), 1); - assert_eq!(Elections::members_ids(), vec![]); - }); - } - - #[test] - fn runners_up_should_be_kept() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![3], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![2], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![5], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![4], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - // sorted based on account id. - assert_eq!(Elections::members_ids(), vec![4, 5]); - // sorted based on merit (least -> most) - assert_eq!(Elections::runners_up_ids(), vec![3, 2]); - - // runner ups are still locked. - assert_eq!(balances(&4), (35, 5)); - assert_eq!(balances(&5), (45, 5)); - assert_eq!(balances(&3), (25, 5)); - }); - } - - #[test] - fn runners_up_should_be_next_candidates() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); - assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 15)); - - System::set_block_number(10); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members(), vec![(3, 30), (4, 40)]); - assert_eq!(Elections::runners_up(), vec![(5, 15), (2, 20)]); - }); - } - - #[test] - fn runners_up_lose_bond_once_outgoing() { - ExtBuilder::default().desired_runners_up(1).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); - assert_eq!(balances(&2), (15, 5)); - - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - - System::set_block_number(10); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::runners_up_ids(), vec![3]); - assert_eq!(balances(&2), (15, 2)); - }); - } - - #[test] - fn members_lose_bond_once_outgoing() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(balances(&5), (50, 0)); - - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_eq!(balances(&5), (47, 3)); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - assert_eq!(balances(&5), (45, 5)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members_ids(), vec![5]); - - assert_ok!(Elections::remove_voter(Origin::signed(5))); - assert_eq!(balances(&5), (47, 3)); - - System::set_block_number(10); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members_ids(), vec![]); - - assert_eq!(balances(&5), (47, 0)); - }); - } - - #[test] - fn losers_will_lose_the_bond() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - - assert_ok!(Elections::vote(Origin::signed(4), vec![5], 40)); - - assert_eq!(balances(&5), (47, 3)); - assert_eq!(balances(&3), (27, 3)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![5]); - - // winner - assert_eq!(balances(&5), (47, 3)); - // loser - assert_eq!(balances(&3), (27, 0)); - }); - } - - #[test] - fn current_members_are_always_next_candidate() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::election_rounds(), 1); - - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - - assert_ok!(Elections::remove_voter(Origin::signed(4))); - - // 5 will persist as candidates despite not being in the list. - assert_eq!(Elections::candidates(), vec![2, 3]); - - System::set_block_number(10); - assert_ok!(Elections::end_block(System::block_number())); - - // 4 removed; 5 and 3 are the new best. - assert_eq!(Elections::members_ids(), vec![3, 5]); - }); - } - - #[test] - fn election_state_is_uninterrupted() { - // what I mean by uninterrupted: - // given no input or stimulants the same members are re-elected. - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - - let check_at_block = |b: u32| { - System::set_block_number(b.into()); - assert_ok!(Elections::end_block(System::block_number())); - // we keep re-electing the same folks. - assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); - assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); - // no new candidates but old members and runners-up are always added. - assert_eq!(Elections::candidates(), vec![]); - assert_eq!(Elections::election_rounds(), b / 5); - assert_eq_uvec!(all_voters(), vec![2, 3, 4, 5]); - }; - - // this state will always persist when no further input is given. - check_at_block(5); - check_at_block(10); - check_at_block(15); - check_at_block(20); - }); - } - - #[test] - fn remove_members_triggers_election() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::election_rounds(), 1); - - // a new candidate - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - - assert_ok!(Elections::remove_member(Origin::ROOT, 4)); - - assert_eq!(balances(&4), (35, 2)); // slashed - assert_eq!(Elections::election_rounds(), 2); // new election round - assert_eq!(Elections::members_ids(), vec![3, 5]); // new members - }); - } - - #[test] - fn seats_should_be_released_when_no_vote() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![3], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - assert_eq!(>::decode_len().unwrap(), 3); - - assert_eq!(Elections::election_rounds(), 0); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members_ids(), vec![3, 5]); - assert_eq!(Elections::election_rounds(), 1); - - assert_ok!(Elections::remove_voter(Origin::signed(2))); - assert_ok!(Elections::remove_voter(Origin::signed(3))); - assert_ok!(Elections::remove_voter(Origin::signed(4))); - assert_ok!(Elections::remove_voter(Origin::signed(5))); - - // meanwhile, no one cares to become a candidate again. - System::set_block_number(10); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members_ids(), vec![]); - assert_eq!(Elections::election_rounds(), 2); - }); - } - - #[test] - fn incoming_outgoing_are_reported() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - assert_eq!(Elections::members_ids(), vec![4, 5]); - - assert_ok!(Elections::submit_candidacy(Origin::signed(1))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - - // 5 will change their vote and becomes an `outgoing` - assert_ok!(Elections::vote(Origin::signed(5), vec![4], 8)); - // 4 will stay in the set - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - // 3 will become a winner - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - // these two are losers. - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - assert_ok!(Elections::vote(Origin::signed(1), vec![1], 10)); - - System::set_block_number(10); - assert_ok!(Elections::end_block(System::block_number())); - - // 3, 4 are new members, must still be bonded, nothing slashed. - assert_eq!(Elections::members(), vec![(3, 30), (4, 48)]); - assert_eq!(balances(&3), (25, 5)); - assert_eq!(balances(&4), (35, 5)); - - // 1 is a loser, slashed by 3. - assert_eq!(balances(&1), (5, 2)); - - // 5 is an outgoing loser. will also get slashed. - assert_eq!(balances(&5), (45, 2)); - - assert!(System::events().iter().any(|event| { - event.event == Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])) - })); - }) - } - - #[test] - fn invalid_votes_are_moot() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![10], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq_uvec!(Elections::members_ids(), vec![3, 4]); - assert_eq!(Elections::election_rounds(), 1); - }); - } - - #[test] - fn members_are_sorted_based_on_id_runners_on_merit() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![3], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![2], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![5], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![4], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - // id: low -> high. - assert_eq!(Elections::members(), vec![(4, 50), (5, 40)]); - // merit: low -> high. - assert_eq!(Elections::runners_up(), vec![(3, 20), (2, 30)]); - }); - } - - #[test] - fn candidates_are_sorted() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - - assert_eq!(Elections::candidates(), vec![3, 5]); - - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3))); - - assert_eq!(Elections::candidates(), vec![2, 4, 5]); - }) - } - - #[test] - fn runner_up_replacement_maintains_members_order() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![2], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![2, 4]); - assert_ok!(Elections::remove_member(Origin::ROOT, 2)); - assert_eq!(Elections::members_ids(), vec![4, 5]); - }); - } - - #[test] - fn runner_up_replacement_works_when_out_of_order() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(5), vec![2], 50)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![2, 4]); - assert_ok!(Elections::renounce_candidacy(Origin::signed(3))); - }); - } - - #[test] - fn can_renounce_candidacy_member_with_runners_bond_is_refunded() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); - - assert_ok!(Elections::renounce_candidacy(Origin::signed(4))); - assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. - - assert_eq!(Elections::members_ids(), vec![3, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); - }) - } - - #[test] - fn can_renounce_candidacy_member_without_runners_bond_is_refunded() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - - assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![]); - assert_eq!(Elections::candidates(), vec![2, 3]); - - assert_ok!(Elections::renounce_candidacy(Origin::signed(4))); - assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. - - // no replacement - assert_eq!(Elections::members_ids(), vec![5]); - assert_eq!(Elections::runners_up_ids(), vec![]); - // still candidate - assert_eq!(Elections::candidates(), vec![2, 3]); - }) - } - - #[test] - fn can_renounce_candidacy_runner() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_ok!(Elections::submit_candidacy(Origin::signed(4))); - assert_ok!(Elections::submit_candidacy(Origin::signed(3))); - assert_ok!(Elections::submit_candidacy(Origin::signed(2))); - - assert_ok!(Elections::vote(Origin::signed(5), vec![4], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![5], 40)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); - - assert_ok!(Elections::renounce_candidacy(Origin::signed(3))); - assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. - - assert_eq!(Elections::members_ids(), vec![4, 5]); - assert_eq!(Elections::runners_up_ids(), vec![2]); - }) - } - - #[test] - fn can_renounce_candidacy_candidate() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5))); - assert_eq!(balances(&5), (47, 3)); - assert_eq!(Elections::candidates(), vec![5]); - - assert_ok!(Elections::renounce_candidacy(Origin::signed(5))); - assert_eq!(balances(&5), (50, 0)); - assert_eq!(Elections::candidates(), vec![]); - }) - } - - #[test] - fn wrong_renounce_candidacy_should_fail() { - ExtBuilder::default().build_and_execute(|| { - assert_noop!( - Elections::renounce_candidacy(Origin::signed(5)), - Error::::InvalidOrigin, - ); - }) - } - - #[test] - fn behavior_with_dupe_candidate() { - ExtBuilder::default().desired_runners_up(2).build_and_execute(|| { - >::put(vec![1, 1, 2, 3, 4]); - - assert_ok!(Elections::vote(Origin::signed(5), vec![1], 50)); - assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); - assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); - assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); - - System::set_block_number(5); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members_ids(), vec![1, 4]); - assert_eq!(Elections::runners_up_ids(), vec![2, 3]); - assert_eq!(Elections::candidates(), vec![]); - }) - } + pub struct ExtBuilder { + genesis_members: Vec<(u64, u64)>, + balance_factor: u64, + voter_bond: u64, + term_duration: u64, + desired_runners_up: u32, + } + + impl Default for ExtBuilder { + fn default() -> Self { + Self { + genesis_members: vec![], + balance_factor: 1, + voter_bond: 2, + desired_runners_up: 0, + term_duration: 5, + } + } + } + + impl ExtBuilder { + pub fn voter_bond(mut self, fee: u64) -> Self { + self.voter_bond = fee; + self + } + pub fn desired_runners_up(mut self, count: u32) -> Self { + self.desired_runners_up = count; + self + } + pub fn term_duration(mut self, duration: u64) -> Self { + self.term_duration = duration; + self + } + pub fn genesis_members(mut self, members: Vec<(u64, u64)>) -> Self { + self.genesis_members = members; + self + } + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + VOTING_BOND.with(|v| *v.borrow_mut() = self.voter_bond); + TERM_DURATION.with(|v| *v.borrow_mut() = self.term_duration); + DESIRED_RUNNERS_UP.with(|v| *v.borrow_mut() = self.desired_runners_up); + MEMBERS.with(|m| { + *m.borrow_mut() = self + .genesis_members + .iter() + .map(|(m, _)| m.clone()) + .collect::>() + }); + let mut ext: sp_io::TestExternalities = GenesisConfig { + pallet_balances: Some(pallet_balances::GenesisConfig:: { + balances: vec![ + (1, 10 * self.balance_factor), + (2, 20 * self.balance_factor), + (3, 30 * self.balance_factor), + (4, 40 * self.balance_factor), + (5, 50 * self.balance_factor), + (6, 60 * self.balance_factor), + ], + }), + elections_phragmen: Some(elections_phragmen::GenesisConfig:: { + members: self.genesis_members, + }), + } + .build_storage() + .unwrap() + .into(); + ext.execute_with(pre_conditions); + ext.execute_with(test); + ext.execute_with(post_conditions) + } + } + + fn all_voters() -> Vec { + Voting::::iter().map(|(v, _)| v).collect::>() + } + + fn balances(who: &u64) -> (u64, u64) { + (Balances::free_balance(who), Balances::reserved_balance(who)) + } + + fn has_lock(who: &u64) -> u64 { + let lock = Balances::locks(who)[0].clone(); + assert_eq!(lock.id, MODULE_ID); + lock.amount + } + + fn intersects(a: &[T], b: &[T]) -> bool { + a.iter().any(|e| b.contains(e)) + } + + fn ensure_members_sorted() { + let mut members = Elections::members().clone(); + members.sort(); + assert_eq!(Elections::members(), members); + } + + fn ensure_candidates_sorted() { + let mut candidates = Elections::candidates().clone(); + candidates.sort(); + assert_eq!(Elections::candidates(), candidates); + } + + fn ensure_members_has_approval_stake() { + // we filter members that have no approval state. This means that even we have more seats + // than candidates, we will never ever chose a member with no votes. + assert!(Elections::members() + .iter() + .chain(Elections::runners_up().iter()) + .all(|(_, s)| *s != Zero::zero())); + } + + fn ensure_member_candidates_runners_up_disjoint() { + // members, candidates and runners-up must always be disjoint sets. + assert!(!intersects( + &Elections::members_ids(), + &Elections::candidates() + )); + assert!(!intersects( + &Elections::members_ids(), + &Elections::runners_up_ids() + )); + assert!(!intersects( + &Elections::candidates(), + &Elections::runners_up_ids() + )); + } + + fn pre_conditions() { + System::set_block_number(1); + ensure_members_sorted(); + ensure_candidates_sorted(); + } + + fn post_conditions() { + ensure_members_sorted(); + ensure_candidates_sorted(); + ensure_member_candidates_runners_up_disjoint(); + ensure_members_has_approval_stake(); + } + + #[test] + fn params_should_work() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Elections::desired_members(), 2); + assert_eq!(Elections::term_duration(), 5); + assert_eq!(Elections::election_rounds(), 0); + + assert_eq!(Elections::members(), vec![]); + assert_eq!(Elections::runners_up(), vec![]); + + assert_eq!(Elections::candidates(), vec![]); + assert_eq!(>::decode_len().unwrap(), 0); + assert!(Elections::is_candidate(&1).is_err()); + + assert_eq!(all_voters(), vec![]); + assert_eq!(Elections::votes_of(&1), vec![]); + }); + } + + #[test] + fn genesis_members_should_work() { + ExtBuilder::default() + .genesis_members(vec![(1, 10), (2, 20)]) + .build_and_execute(|| { + System::set_block_number(1); + assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); + + assert_eq!(Elections::voting(1), (10, vec![1])); + assert_eq!(Elections::voting(2), (20, vec![2])); + + // they will persist since they have self vote. + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![1, 2]); + }) + } + + #[test] + fn genesis_members_unsorted_should_work() { + ExtBuilder::default() + .genesis_members(vec![(2, 20), (1, 10)]) + .build_and_execute(|| { + System::set_block_number(1); + assert_eq!(Elections::members(), vec![(1, 10), (2, 20)]); + + assert_eq!(Elections::voting(1), (10, vec![1])); + assert_eq!(Elections::voting(2), (20, vec![2])); + + // they will persist since they have self vote. + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![1, 2]); + }) + } + + #[test] + #[should_panic = "Genesis member does not have enough stake"] + fn genesis_members_cannot_over_stake_0() { + // 10 cannot lock 20 as their stake and extra genesis will panic. + ExtBuilder::default() + .genesis_members(vec![(1, 20), (2, 20)]) + .build_and_execute(|| {}); + } + + #[test] + #[should_panic] + fn genesis_members_cannot_over_stake_1() { + // 10 cannot reserve 20 as voting bond and extra genesis will panic. + ExtBuilder::default() + .voter_bond(20) + .genesis_members(vec![(1, 10), (2, 20)]) + .build_and_execute(|| {}); + } + + #[test] + #[should_panic = "Duplicate member in elections phragmen genesis: 2"] + fn genesis_members_cannot_be_duplicate() { + ExtBuilder::default() + .genesis_members(vec![(1, 10), (2, 10), (2, 10)]) + .build_and_execute(|| {}); + } + + #[test] + fn term_duration_zero_is_passive() { + ExtBuilder::default() + .term_duration(0) + .build_and_execute(|| { + assert_eq!(Elections::term_duration(), 0); + assert_eq!(Elections::desired_members(), 2); + assert_eq!(Elections::election_rounds(), 0); + + assert_eq!(Elections::members_ids(), vec![]); + assert_eq!(Elections::runners_up(), vec![]); + assert_eq!(Elections::candidates(), vec![]); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![]); + assert_eq!(Elections::runners_up(), vec![]); + assert_eq!(Elections::candidates(), vec![]); + }); + } + + #[test] + fn simple_candidate_submission_should_work() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert!(Elections::is_candidate(&1).is_err()); + assert!(Elections::is_candidate(&2).is_err()); + + assert_eq!(balances(&1), (10, 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(1))); + assert_eq!(balances(&1), (7, 3)); + + assert_eq!(Elections::candidates(), vec![1]); + + assert!(Elections::is_candidate(&1).is_ok()); + assert!(Elections::is_candidate(&2).is_err()); + + assert_eq!(balances(&2), (20, 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + assert_eq!(balances(&2), (17, 3)); + + assert_eq!(Elections::candidates(), vec![1, 2]); + + assert!(Elections::is_candidate(&1).is_ok()); + assert!(Elections::is_candidate(&2).is_ok()); + }); + } + + #[test] + fn simple_candidate_submission_with_no_votes_should_work() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + + assert_ok!(Elections::submit_candidacy(Origin::signed(1))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert!(Elections::is_candidate(&1).is_ok()); + assert!(Elections::is_candidate(&2).is_ok()); + assert_eq!(Elections::candidates(), vec![1, 2]); + + assert_eq!(Elections::members_ids(), vec![]); + assert_eq!(Elections::runners_up(), vec![]); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert!(Elections::is_candidate(&1).is_err()); + assert!(Elections::is_candidate(&2).is_err()); + assert_eq!(Elections::candidates(), vec![]); + + assert_eq!(Elections::members_ids(), vec![]); + assert_eq!(Elections::runners_up(), vec![]); + }); + } + + #[test] + fn dupe_candidate_submission_should_not_work() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_ok!(Elections::submit_candidacy(Origin::signed(1))); + assert_eq!(Elections::candidates(), vec![1]); + assert_noop!( + Elections::submit_candidacy(Origin::signed(1)), + Error::::DuplicatedCandidate, + ); + }); + } + + #[test] + fn member_candidacy_submission_should_not_work() { + // critically important to make sure that outgoing candidates and losers are not mixed up. + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![5]); + assert_eq!(Elections::runners_up(), vec![]); + assert_eq!(Elections::candidates(), vec![]); + + assert_noop!( + Elections::submit_candidacy(Origin::signed(5)), + Error::::MemberSubmit, + ); + }); + } + + #[test] + fn runner_candidate_submission_should_not_work() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![5, 4], 20)); + assert_ok!(Elections::vote(Origin::signed(1), vec![3], 10)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::runners_up_ids(), vec![3]); + + assert_noop!( + Elections::submit_candidacy(Origin::signed(3)), + Error::::RunnerSubmit, + ); + }); + } + + #[test] + fn poor_candidate_submission_should_not_work() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_noop!( + Elections::submit_candidacy(Origin::signed(7)), + Error::::InsufficientCandidateFunds, + ); + }); + } + + #[test] + fn simple_voting_should_work() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(balances(&2), (20, 0)); + + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); + + assert_eq!(balances(&2), (18, 2)); + assert_eq!(has_lock(&2), 20); + }); + } + + #[test] + fn can_vote_with_custom_stake() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(balances(&2), (20, 0)); + + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 12)); + + assert_eq!(balances(&2), (18, 2)); + assert_eq!(has_lock(&2), 12); + }); + } + + #[test] + fn can_update_votes_and_stake() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(balances(&2), (20, 0)); + + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); + + assert_eq!(balances(&2), (18, 2)); + assert_eq!(has_lock(&2), 20); + assert_eq!(Elections::locked_stake_of(&2), 20); + + // can update; different stake; different lock and reserve. + assert_ok!(Elections::vote(Origin::signed(2), vec![5, 4], 15)); + assert_eq!(balances(&2), (18, 2)); + assert_eq!(has_lock(&2), 15); + assert_eq!(Elections::locked_stake_of(&2), 15); + }); + } + + #[test] + fn cannot_vote_for_no_candidate() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!( + Elections::vote(Origin::signed(2), vec![], 20), + Error::::UnableToVote, + ); + }); + } + + #[test] + fn can_vote_for_old_members_even_when_no_new_candidates() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 20)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::candidates(), vec![]); + + assert_ok!(Elections::vote(Origin::signed(3), vec![4, 5], 10)); + }); + } + + #[test] + fn prime_works() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + + assert_ok!(Elections::vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(Elections::vote(Origin::signed(2), vec![4], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::candidates(), vec![]); + + assert_ok!(Elections::vote(Origin::signed(3), vec![4, 5], 10)); + assert_eq!(PRIME.with(|p| *p.borrow()), Some(4)); + }); + } + + #[test] + fn prime_votes_for_exiting_members_are_removed() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + + assert_ok!(Elections::vote(Origin::signed(1), vec![4, 3], 10)); + assert_ok!(Elections::vote(Origin::signed(2), vec![4], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(4))); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![3, 5]); + assert_eq!(Elections::candidates(), vec![]); + + assert_eq!(PRIME.with(|p| *p.borrow()), Some(5)); + }); + } + + #[test] + fn cannot_vote_for_more_than_candidates() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_noop!( + Elections::vote(Origin::signed(2), vec![10, 20, 30], 20), + Error::::TooManyVotes, + ); + }); + } + + #[test] + fn cannot_vote_for_less_than_ed() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_noop!( + Elections::vote(Origin::signed(2), vec![4], 1), + Error::::LowBalance, + ); + }) + } + + #[test] + fn can_vote_for_more_than_total_balance_but_moot() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 30)); + // you can lie but won't get away with it. + assert_eq!(Elections::locked_stake_of(&2), 20); + assert_eq!(has_lock(&2), 20); + }); + } + + #[test] + fn remove_voter_should_work() { + ExtBuilder::default().voter_bond(8).build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![5], 30)); + + assert_eq_uvec!(all_voters(), vec![2, 3]); + assert_eq!(Elections::locked_stake_of(&2), 20); + assert_eq!(Elections::locked_stake_of(&3), 30); + assert_eq!(Elections::votes_of(&2), vec![5]); + assert_eq!(Elections::votes_of(&3), vec![5]); + + assert_ok!(Elections::remove_voter(Origin::signed(2))); + + assert_eq_uvec!(all_voters(), vec![3]); + assert_eq!(Elections::votes_of(&2), vec![]); + assert_eq!(Elections::locked_stake_of(&2), 0); + + assert_eq!(balances(&2), (20, 0)); + assert_eq!(Balances::locks(&2).len(), 0); + }); + } + + #[test] + fn non_voter_remove_should_not_work() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!( + Elections::remove_voter(Origin::signed(3)), + Error::::MustBeVoter + ); + }); + } + + #[test] + fn dupe_remove_should_fail() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); + + assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_eq!(all_voters(), vec![]); + + assert_noop!( + Elections::remove_voter(Origin::signed(2)), + Error::::MustBeVoter + ); + }); + } + + #[test] + fn removed_voter_should_not_be_counted() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + + assert_ok!(Elections::remove_voter(Origin::signed(4))); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![3, 5]); + }); + } + + #[test] + fn reporter_must_be_voter() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!( + Elections::report_defunct_voter(Origin::signed(1), 2), + Error::::MustBeVoter, + ); + }); + } + + #[test] + fn can_detect_defunct_voter() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(6))); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 20)); + assert_ok!(Elections::vote(Origin::signed(6), vec![6], 30)); + // will be soon a defunct voter. + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::runners_up_ids(), vec![6]); + assert_eq!(Elections::candidates(), vec![]); + + // all of them have a member or runner-up that they voted for. + assert_eq!(Elections::is_defunct_voter(&5), false); + assert_eq!(Elections::is_defunct_voter(&4), false); + assert_eq!(Elections::is_defunct_voter(&2), false); + assert_eq!(Elections::is_defunct_voter(&6), false); + + // defunct + assert_eq!(Elections::is_defunct_voter(&3), true); + + assert_ok!(Elections::submit_candidacy(Origin::signed(1))); + assert_ok!(Elections::vote(Origin::signed(1), vec![1], 10)); + + // has a candidate voted for. + assert_eq!(Elections::is_defunct_voter(&1), false); + }); + } + + #[test] + fn report_voter_should_work_and_earn_reward() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(2), vec![4, 5], 20)); + // will be soon a defunct voter. + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::candidates(), vec![]); + + assert_eq!(balances(&3), (28, 2)); + assert_eq!(balances(&5), (45, 5)); + + assert_ok!(Elections::report_defunct_voter(Origin::signed(5), 3)); + assert!(System::events().iter().any(|event| { + event.event == Event::elections_phragmen(RawEvent::VoterReported(3, 5, true)) + })); + + assert_eq!(balances(&3), (28, 0)); + assert_eq!(balances(&5), (47, 5)); + }); + } + + #[test] + fn report_voter_should_slash_when_bad_report() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::candidates(), vec![]); + + assert_eq!(balances(&4), (35, 5)); + assert_eq!(balances(&5), (45, 5)); + + assert_ok!(Elections::report_defunct_voter(Origin::signed(5), 4)); + assert!(System::events().iter().any(|event| { + event.event == Event::elections_phragmen(RawEvent::VoterReported(4, 5, false)) + })); + + assert_eq!(balances(&4), (35, 5)); + assert_eq!(balances(&5), (45, 3)); + }); + } + + #[test] + fn simple_voting_rounds_should_work() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 15)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + + assert_eq_uvec!(all_voters(), vec![2, 3, 4]); + + assert_eq!(Elections::votes_of(&2), vec![5]); + assert_eq!(Elections::votes_of(&3), vec![3]); + assert_eq!(Elections::votes_of(&4), vec![4]); + + assert_eq!(Elections::candidates(), vec![3, 4, 5]); + assert_eq!(>::decode_len().unwrap(), 3); + + assert_eq!(Elections::election_rounds(), 0); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(3, 30), (5, 20)]); + assert_eq!(Elections::runners_up(), vec![]); + assert_eq_uvec!(all_voters(), vec![2, 3, 4]); + assert_eq!(Elections::candidates(), vec![]); + assert_eq!(>::decode_len().unwrap(), 0); + + assert_eq!(Elections::election_rounds(), 1); + }); + } + + #[test] + fn defunct_voter_will_be_counted() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + + // This guy's vote is pointless for this round. + assert_ok!(Elections::vote(Origin::signed(3), vec![4], 30)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(5, 50)]); + assert_eq!(Elections::election_rounds(), 1); + + // but now it has a valid target. + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + System::set_block_number(10); + assert_ok!(Elections::end_block(System::block_number())); + + // candidate 4 is affected by an old vote. + assert_eq!(Elections::members(), vec![(4, 30), (5, 50)]); + assert_eq!(Elections::election_rounds(), 2); + assert_eq_uvec!(all_voters(), vec![3, 5]); + }); + } + + #[test] + fn only_desired_seats_are_chosen() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::election_rounds(), 1); + assert_eq!(Elections::members_ids(), vec![4, 5]); + }); + } + + #[test] + fn phragmen_should_not_self_vote() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::candidates(), vec![]); + assert_eq!(Elections::election_rounds(), 1); + assert_eq!(Elections::members_ids(), vec![]); + }); + } + + #[test] + fn runners_up_should_be_kept() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![3], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![2], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![5], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![4], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + // sorted based on account id. + assert_eq!(Elections::members_ids(), vec![4, 5]); + // sorted based on merit (least -> most) + assert_eq!(Elections::runners_up_ids(), vec![3, 2]); + + // runner ups are still locked. + assert_eq!(balances(&4), (35, 5)); + assert_eq!(balances(&5), (45, 5)); + assert_eq!(balances(&3), (25, 5)); + }); + } + + #[test] + fn runners_up_should_be_next_candidates() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); + assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 15)); + + System::set_block_number(10); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members(), vec![(3, 30), (4, 40)]); + assert_eq!(Elections::runners_up(), vec![(5, 15), (2, 20)]); + }); + } + + #[test] + fn runners_up_lose_bond_once_outgoing() { + ExtBuilder::default() + .desired_runners_up(1) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::runners_up_ids(), vec![2]); + assert_eq!(balances(&2), (15, 5)); + + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + + System::set_block_number(10); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::runners_up_ids(), vec![3]); + assert_eq!(balances(&2), (15, 2)); + }); + } + + #[test] + fn members_lose_bond_once_outgoing() { + ExtBuilder::default().build_and_execute(|| { + assert_eq!(balances(&5), (50, 0)); + + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_eq!(balances(&5), (47, 3)); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + assert_eq!(balances(&5), (45, 5)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members_ids(), vec![5]); + + assert_ok!(Elections::remove_voter(Origin::signed(5))); + assert_eq!(balances(&5), (47, 3)); + + System::set_block_number(10); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members_ids(), vec![]); + + assert_eq!(balances(&5), (47, 0)); + }); + } + + #[test] + fn losers_will_lose_the_bond() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + + assert_ok!(Elections::vote(Origin::signed(4), vec![5], 40)); + + assert_eq!(balances(&5), (47, 3)); + assert_eq!(balances(&3), (27, 3)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![5]); + + // winner + assert_eq!(balances(&5), (47, 3)); + // loser + assert_eq!(balances(&3), (27, 0)); + }); + } + + #[test] + fn current_members_are_always_next_candidate() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::election_rounds(), 1); + + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + + assert_ok!(Elections::remove_voter(Origin::signed(4))); + + // 5 will persist as candidates despite not being in the list. + assert_eq!(Elections::candidates(), vec![2, 3]); + + System::set_block_number(10); + assert_ok!(Elections::end_block(System::block_number())); + + // 4 removed; 5 and 3 are the new best. + assert_eq!(Elections::members_ids(), vec![3, 5]); + }); + } + + #[test] + fn election_state_is_uninterrupted() { + // what I mean by uninterrupted: + // given no input or stimulants the same members are re-elected. + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + + let check_at_block = |b: u32| { + System::set_block_number(b.into()); + assert_ok!(Elections::end_block(System::block_number())); + // we keep re-electing the same folks. + assert_eq!(Elections::members(), vec![(4, 40), (5, 50)]); + assert_eq!(Elections::runners_up(), vec![(2, 20), (3, 30)]); + // no new candidates but old members and runners-up are always added. + assert_eq!(Elections::candidates(), vec![]); + assert_eq!(Elections::election_rounds(), b / 5); + assert_eq_uvec!(all_voters(), vec![2, 3, 4, 5]); + }; + + // this state will always persist when no further input is given. + check_at_block(5); + check_at_block(10); + check_at_block(15); + check_at_block(20); + }); + } + + #[test] + fn remove_members_triggers_election() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::election_rounds(), 1); + + // a new candidate + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + + assert_ok!(Elections::remove_member(Origin::ROOT, 4)); + + assert_eq!(balances(&4), (35, 2)); // slashed + assert_eq!(Elections::election_rounds(), 2); // new election round + assert_eq!(Elections::members_ids(), vec![3, 5]); // new members + }); + } + + #[test] + fn seats_should_be_released_when_no_vote() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![3], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + assert_eq!(>::decode_len().unwrap(), 3); + + assert_eq!(Elections::election_rounds(), 0); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members_ids(), vec![3, 5]); + assert_eq!(Elections::election_rounds(), 1); + + assert_ok!(Elections::remove_voter(Origin::signed(2))); + assert_ok!(Elections::remove_voter(Origin::signed(3))); + assert_ok!(Elections::remove_voter(Origin::signed(4))); + assert_ok!(Elections::remove_voter(Origin::signed(5))); + + // meanwhile, no one cares to become a candidate again. + System::set_block_number(10); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members_ids(), vec![]); + assert_eq!(Elections::election_rounds(), 2); + }); + } + + #[test] + fn incoming_outgoing_are_reported() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + assert_eq!(Elections::members_ids(), vec![4, 5]); + + assert_ok!(Elections::submit_candidacy(Origin::signed(1))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + + // 5 will change their vote and becomes an `outgoing` + assert_ok!(Elections::vote(Origin::signed(5), vec![4], 8)); + // 4 will stay in the set + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + // 3 will become a winner + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + // these two are losers. + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + assert_ok!(Elections::vote(Origin::signed(1), vec![1], 10)); + + System::set_block_number(10); + assert_ok!(Elections::end_block(System::block_number())); + + // 3, 4 are new members, must still be bonded, nothing slashed. + assert_eq!(Elections::members(), vec![(3, 30), (4, 48)]); + assert_eq!(balances(&3), (25, 5)); + assert_eq!(balances(&4), (35, 5)); + + // 1 is a loser, slashed by 3. + assert_eq!(balances(&1), (5, 2)); + + // 5 is an outgoing loser. will also get slashed. + assert_eq!(balances(&5), (45, 2)); + + assert!(System::events().iter().any(|event| { + event.event == Event::elections_phragmen(RawEvent::NewTerm(vec![(4, 40), (5, 50)])) + })); + }) + } + + #[test] + fn invalid_votes_are_moot() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![10], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq_uvec!(Elections::members_ids(), vec![3, 4]); + assert_eq!(Elections::election_rounds(), 1); + }); + } + + #[test] + fn members_are_sorted_based_on_id_runners_on_merit() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![3], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![2], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![5], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![4], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + // id: low -> high. + assert_eq!(Elections::members(), vec![(4, 50), (5, 40)]); + // merit: low -> high. + assert_eq!(Elections::runners_up(), vec![(3, 20), (2, 30)]); + }); + } + + #[test] + fn candidates_are_sorted() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + + assert_eq!(Elections::candidates(), vec![3, 5]); + + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3))); + + assert_eq!(Elections::candidates(), vec![2, 4, 5]); + }) + } + + #[test] + fn runner_up_replacement_maintains_members_order() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![2], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![2, 4]); + assert_ok!(Elections::remove_member(Origin::ROOT, 2)); + assert_eq!(Elections::members_ids(), vec![4, 5]); + }); + } + + #[test] + fn runner_up_replacement_works_when_out_of_order() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(2), vec![5], 20)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(5), vec![2], 50)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![2, 4]); + assert_ok!(Elections::renounce_candidacy(Origin::signed(3))); + }); + } + + #[test] + fn can_renounce_candidacy_member_with_runners_bond_is_refunded() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(4))); + assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. + + assert_eq!(Elections::members_ids(), vec![3, 5]); + assert_eq!(Elections::runners_up_ids(), vec![2]); + }) + } + + #[test] + fn can_renounce_candidacy_member_without_runners_bond_is_refunded() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + + assert_ok!(Elections::vote(Origin::signed(5), vec![5], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::runners_up_ids(), vec![]); + assert_eq!(Elections::candidates(), vec![2, 3]); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(4))); + assert_eq!(balances(&4), (38, 2)); // 2 is voting bond. + + // no replacement + assert_eq!(Elections::members_ids(), vec![5]); + assert_eq!(Elections::runners_up_ids(), vec![]); + // still candidate + assert_eq!(Elections::candidates(), vec![2, 3]); + }) + } + + #[test] + fn can_renounce_candidacy_runner() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_ok!(Elections::submit_candidacy(Origin::signed(4))); + assert_ok!(Elections::submit_candidacy(Origin::signed(3))); + assert_ok!(Elections::submit_candidacy(Origin::signed(2))); + + assert_ok!(Elections::vote(Origin::signed(5), vec![4], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![5], 40)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(3))); + assert_eq!(balances(&3), (28, 2)); // 2 is voting bond. + + assert_eq!(Elections::members_ids(), vec![4, 5]); + assert_eq!(Elections::runners_up_ids(), vec![2]); + }) + } + + #[test] + fn can_renounce_candidacy_candidate() { + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5))); + assert_eq!(balances(&5), (47, 3)); + assert_eq!(Elections::candidates(), vec![5]); + + assert_ok!(Elections::renounce_candidacy(Origin::signed(5))); + assert_eq!(balances(&5), (50, 0)); + assert_eq!(Elections::candidates(), vec![]); + }) + } + + #[test] + fn wrong_renounce_candidacy_should_fail() { + ExtBuilder::default().build_and_execute(|| { + assert_noop!( + Elections::renounce_candidacy(Origin::signed(5)), + Error::::InvalidOrigin, + ); + }) + } + + #[test] + fn behavior_with_dupe_candidate() { + ExtBuilder::default() + .desired_runners_up(2) + .build_and_execute(|| { + >::put(vec![1, 1, 2, 3, 4]); + + assert_ok!(Elections::vote(Origin::signed(5), vec![1], 50)); + assert_ok!(Elections::vote(Origin::signed(4), vec![4], 40)); + assert_ok!(Elections::vote(Origin::signed(3), vec![3], 30)); + assert_ok!(Elections::vote(Origin::signed(2), vec![2], 20)); + + System::set_block_number(5); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members_ids(), vec![1, 4]); + assert_eq!(Elections::runners_up_ids(), vec![2, 3]); + assert_eq!(Elections::candidates(), vec![]); + }) + } } diff --git a/frame/elections/src/lib.rs b/frame/elections/src/lib.rs index a2398ad485..78ee4bfe67 100644 --- a/frame/elections/src/lib.rs +++ b/frame/elections/src/lib.rs @@ -21,23 +21,24 @@ //! whose voting is serially unsuccessful. #![cfg_attr(not(feature = "std"), no_std)] -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_std::prelude::*; -use sp_runtime::{ - RuntimeDebug, DispatchResult, print, - traits::{Zero, One, StaticLookup, Saturating}, -}; +use codec::{Decode, Encode}; use frame_support::{ - decl_storage, decl_event, ensure, decl_module, decl_error, - weights::{Weight, MINIMUM_WEIGHT, SimpleDispatchInfo}, - traits::{ - Currency, ExistenceRequirement, Get, LockableCurrency, LockIdentifier, BalanceStatus, - OnUnbalanced, ReservableCurrency, WithdrawReason, WithdrawReasons, ChangeMembers - } + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::{ + BalanceStatus, ChangeMembers, Currency, ExistenceRequirement, Get, LockIdentifier, + LockableCurrency, OnUnbalanced, ReservableCurrency, WithdrawReason, WithdrawReasons, + }, + weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}, }; -use codec::{Encode, Decode}; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed}; +use sp_runtime::{ + print, + traits::{One, Saturating, StaticLookup, Zero}, + DispatchResult, RuntimeDebug, +}; +use sp_std::prelude::*; mod mock; mod tests; @@ -102,28 +103,28 @@ mod tests; /// The activity status of a voter. #[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, Default, RuntimeDebug)] pub struct VoterInfo { - /// Last VoteIndex in which this voter assigned (or initialized) approvals. - last_active: VoteIndex, - /// Last VoteIndex in which one of this voter's approvals won. - /// Note that `last_win = N` indicates a last win at index `N-1`, hence `last_win = 0` means no - /// win ever. - last_win: VoteIndex, - /// The amount of stored weight as a result of not winning but changing approvals. - pot: Balance, - /// Current staked amount. A lock equal to this value always exists. - stake: Balance, + /// Last VoteIndex in which this voter assigned (or initialized) approvals. + last_active: VoteIndex, + /// Last VoteIndex in which one of this voter's approvals won. + /// Note that `last_win = N` indicates a last win at index `N-1`, hence `last_win = 0` means no + /// win ever. + last_win: VoteIndex, + /// The amount of stored weight as a result of not winning but changing approvals. + pot: Balance, + /// Current staked amount. A lock equal to this value always exists. + stake: Balance, } /// Used to demonstrate the status of a particular index in the global voter list. #[derive(PartialEq, Eq, RuntimeDebug)] pub enum CellStatus { - /// Any out of bound index. Means a push a must happen to the chunk pointed by `NextVoterSet`. - /// Voting fee is applied in case a new chunk is created. - Head, - /// Already occupied by another voter. Voting fee is applied. - Occupied, - /// Empty hole which should be filled. No fee will be applied. - Hole, + /// Any out of bound index. Means a push a must happen to the chunk pointed by `NextVoterSet`. + /// Voting fee is applied in case a new chunk is created. + Head, + /// Already occupied by another voter. Voting fee is applied. + Occupied, + /// Empty hole which should be filled. No fee will be applied. + Hole, } const MODULE_ID: LockIdentifier = *b"py/elect"; @@ -133,9 +134,10 @@ pub const VOTER_SET_SIZE: usize = 64; /// NUmber of approvals grouped in one chunk. pub const APPROVAL_SET_SIZE: usize = 8; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; /// Index used to access chunks. type SetIndex = u32; @@ -147,565 +149,564 @@ type ApprovalFlag = u32; const APPROVAL_FLAG_LEN: usize = 32; pub trait Trait: frame_system::Trait { - type Event: From> + Into<::Event>; + type Event: From> + Into<::Event>; - /// The currency that people are electing with. - type Currency: - LockableCurrency - + ReservableCurrency; + /// The currency that people are electing with. + type Currency: LockableCurrency + + ReservableCurrency; - /// Handler for the unbalanced reduction when slashing a validator. - type BadPresentation: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing a validator. + type BadPresentation: OnUnbalanced>; - /// Handler for the unbalanced reduction when slashing an invalid reaping attempt. - type BadReaper: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing an invalid reaping attempt. + type BadReaper: OnUnbalanced>; - /// Handler for the unbalanced reduction when submitting a bad `voter_index`. - type BadVoterIndex: OnUnbalanced>; + /// Handler for the unbalanced reduction when submitting a bad `voter_index`. + type BadVoterIndex: OnUnbalanced>; - /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner up) - type LoserCandidate: OnUnbalanced>; + /// Handler for the unbalanced reduction when a candidate has lost (and is not a runner up) + type LoserCandidate: OnUnbalanced>; - /// What to do when the members change. - type ChangeMembers: ChangeMembers; + /// What to do when the members change. + type ChangeMembers: ChangeMembers; - /// How much should be locked up in order to submit one's candidacy. A reasonable - /// default value is 9. - type CandidacyBond: Get>; + /// How much should be locked up in order to submit one's candidacy. A reasonable + /// default value is 9. + type CandidacyBond: Get>; - /// How much should be locked up in order to be able to submit votes. - type VotingBond: Get>; + /// How much should be locked up in order to be able to submit votes. + type VotingBond: Get>; - /// The amount of fee paid upon each vote submission, unless if they submit a - /// _hole_ index and replace it. - type VotingFee: Get>; + /// The amount of fee paid upon each vote submission, unless if they submit a + /// _hole_ index and replace it. + type VotingFee: Get>; - /// Minimum about that can be used as the locked value for voting. - type MinimumVotingLock: Get>; + /// Minimum about that can be used as the locked value for voting. + type MinimumVotingLock: Get>; - /// The punishment, per voter, if you provide an invalid presentation. A - /// reasonable default value is 1. - type PresentSlashPerVoter: Get>; + /// The punishment, per voter, if you provide an invalid presentation. A + /// reasonable default value is 1. + type PresentSlashPerVoter: Get>; - /// How many runners-up should have their approvals persist until the next - /// vote. A reasonable default value is 2. - type CarryCount: Get; + /// How many runners-up should have their approvals persist until the next + /// vote. A reasonable default value is 2. + type CarryCount: Get; - /// How many vote indices need to go by after a target voter's last vote before - /// they can be reaped if their approvals are moot. A reasonable default value - /// is 1. - type InactiveGracePeriod: Get; + /// How many vote indices need to go by after a target voter's last vote before + /// they can be reaped if their approvals are moot. A reasonable default value + /// is 1. + type InactiveGracePeriod: Get; - /// How often (in blocks) to check for new votes. A reasonable default value - /// is 1000. - type VotingPeriod: Get; + /// How often (in blocks) to check for new votes. A reasonable default value + /// is 1000. + type VotingPeriod: Get; - /// Decay factor of weight when being accumulated. It should typically be set to - /// __at least__ `membership_size -1` to keep the collective secure. - /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight - /// increment step `t`. 0 will result in no weight being added at all (normal - /// approval voting). A reasonable default value is 24. - type DecayRatio: Get; + /// Decay factor of weight when being accumulated. It should typically be set to + /// __at least__ `membership_size -1` to keep the collective secure. + /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight + /// increment step `t`. 0 will result in no weight being added at all (normal + /// approval voting). A reasonable default value is 24. + type DecayRatio: Get; } decl_storage! { - trait Store for Module as Elections { - // ---- parameters - - /// How long to give each top candidate to present themselves after the vote ends. - pub PresentationDuration get(fn presentation_duration) config(): T::BlockNumber; - /// How long each position is active for. - pub TermDuration get(fn term_duration) config(): T::BlockNumber; - /// Number of accounts that should constitute the collective. - pub DesiredSeats get(fn desired_seats) config(): u32; - - // ---- permanent state (always relevant, changes only at the finalization of voting) - - /// The current membership. When there's a vote going on, this should still be used for - /// executive matters. The block number (second element in the tuple) is the block that - /// their position is active until (calculated by the sum of the block number when the - /// member was elected and their term duration). - pub Members get(fn members) config(): Vec<(T::AccountId, T::BlockNumber)>; - /// The total number of vote rounds that have happened or are in progress. - pub VoteCount get(fn vote_index): VoteIndex; - - // ---- persistent state (always relevant, changes constantly) - - // A list of votes for each voter. The votes are stored as numeric values and parsed in a - // bit-wise manner. In order to get a human-readable representation (`Vec`), use - // [`all_approvals_of`]. Furthermore, each vector of scalars is chunked with the cap of - // `APPROVAL_SET_SIZE`. - pub ApprovalsOf get(fn approvals_of): - map hasher(twox_64_concat) (T::AccountId, SetIndex) => Vec; - /// The vote index and list slot that the candidate `who` was registered or `None` if they - /// are not currently registered. - pub RegisterInfoOf get(fn candidate_reg_info): - map hasher(twox_64_concat) T::AccountId => Option<(VoteIndex, u32)>; - /// Basic information about a voter. - pub VoterInfoOf get(fn voter_info): - map hasher(twox_64_concat) T::AccountId => Option>>; - /// The present voter list (chunked and capped at [`VOTER_SET_SIZE`]). - pub Voters get(fn voters): map hasher(twox_64_concat) SetIndex => Vec>; - /// the next free set to store a voter in. This will keep growing. - pub NextVoterSet get(fn next_nonfull_voter_set): SetIndex = 0; - /// Current number of Voters. - pub VoterCount get(fn voter_count): SetIndex = 0; - /// The present candidate list. - pub Candidates get(fn candidates): Vec; // has holes - /// Current number of active candidates - pub CandidateCount get(fn candidate_count): u32; - - // ---- temporary state (only relevant during finalization/presentation) - - /// The accounts holding the seats that will become free on the next tally. - pub NextFinalize get(fn next_finalize): Option<(T::BlockNumber, u32, Vec)>; - /// Get the leaderboard if we're in the presentation phase. The first element is the weight - /// of each entry; It may be the direct summed approval stakes, or a weighted version of it. - /// Sorted from low to high. - pub Leaderboard get(fn leaderboard): Option, T::AccountId)> >; - - /// Who is able to vote for whom. Value is the fund-holding account, key is the - /// vote-transaction-sending account. - pub Proxy get(fn proxy): map hasher(blake2_128_concat) T::AccountId => Option; - } + trait Store for Module as Elections { + // ---- parameters + + /// How long to give each top candidate to present themselves after the vote ends. + pub PresentationDuration get(fn presentation_duration) config(): T::BlockNumber; + /// How long each position is active for. + pub TermDuration get(fn term_duration) config(): T::BlockNumber; + /// Number of accounts that should constitute the collective. + pub DesiredSeats get(fn desired_seats) config(): u32; + + // ---- permanent state (always relevant, changes only at the finalization of voting) + + /// The current membership. When there's a vote going on, this should still be used for + /// executive matters. The block number (second element in the tuple) is the block that + /// their position is active until (calculated by the sum of the block number when the + /// member was elected and their term duration). + pub Members get(fn members) config(): Vec<(T::AccountId, T::BlockNumber)>; + /// The total number of vote rounds that have happened or are in progress. + pub VoteCount get(fn vote_index): VoteIndex; + + // ---- persistent state (always relevant, changes constantly) + + // A list of votes for each voter. The votes are stored as numeric values and parsed in a + // bit-wise manner. In order to get a human-readable representation (`Vec`), use + // [`all_approvals_of`]. Furthermore, each vector of scalars is chunked with the cap of + // `APPROVAL_SET_SIZE`. + pub ApprovalsOf get(fn approvals_of): + map hasher(twox_64_concat) (T::AccountId, SetIndex) => Vec; + /// The vote index and list slot that the candidate `who` was registered or `None` if they + /// are not currently registered. + pub RegisterInfoOf get(fn candidate_reg_info): + map hasher(twox_64_concat) T::AccountId => Option<(VoteIndex, u32)>; + /// Basic information about a voter. + pub VoterInfoOf get(fn voter_info): + map hasher(twox_64_concat) T::AccountId => Option>>; + /// The present voter list (chunked and capped at [`VOTER_SET_SIZE`]). + pub Voters get(fn voters): map hasher(twox_64_concat) SetIndex => Vec>; + /// the next free set to store a voter in. This will keep growing. + pub NextVoterSet get(fn next_nonfull_voter_set): SetIndex = 0; + /// Current number of Voters. + pub VoterCount get(fn voter_count): SetIndex = 0; + /// The present candidate list. + pub Candidates get(fn candidates): Vec; // has holes + /// Current number of active candidates + pub CandidateCount get(fn candidate_count): u32; + + // ---- temporary state (only relevant during finalization/presentation) + + /// The accounts holding the seats that will become free on the next tally. + pub NextFinalize get(fn next_finalize): Option<(T::BlockNumber, u32, Vec)>; + /// Get the leaderboard if we're in the presentation phase. The first element is the weight + /// of each entry; It may be the direct summed approval stakes, or a weighted version of it. + /// Sorted from low to high. + pub Leaderboard get(fn leaderboard): Option, T::AccountId)> >; + + /// Who is able to vote for whom. Value is the fund-holding account, key is the + /// vote-transaction-sending account. + pub Proxy get(fn proxy): map hasher(blake2_128_concat) T::AccountId => Option; + } } decl_error! { - /// Error for the elections module. - pub enum Error for Module { - /// Reporter must be a voter. - NotVoter, - /// Target for inactivity cleanup must be active. - InactiveTarget, - /// Cannot reap during presentation period. - CannotReapPresenting, - /// Cannot reap during grace period. - ReapGrace, - /// Not a proxy. - NotProxy, - /// Invalid reporter index. - InvalidReporterIndex, - /// Invalid target index. - InvalidTargetIndex, - /// Invalid vote index. - InvalidVoteIndex, - /// Cannot retract when presenting. - CannotRetractPresenting, - /// Cannot retract non-voter. - RetractNonVoter, - /// Invalid retraction index. - InvalidRetractionIndex, - /// Duplicate candidate submission. - DuplicatedCandidate, - /// Invalid candidate slot. - InvalidCandidateSlot, - /// Candidate has not enough funds. - InsufficientCandidateFunds, - /// Presenter must have sufficient slashable funds. - InsufficientPresenterFunds, - /// Stake deposited to present winner and be added to leaderboard should be non-zero. - ZeroDeposit, - /// Candidate not worthy of leaderboard. - UnworthyCandidate, - /// Leaderboard must exist while present phase active. - LeaderboardMustExist, - /// Cannot present outside of presentation period. - NotPresentationPeriod, - /// Presented candidate must be current. - InvalidCandidate, - /// Duplicated presentation. - DuplicatedPresentation, - /// Incorrect total. - IncorrectTotal, - /// Invalid voter index. - InvalidVoterIndex, - /// New voter must have sufficient funds to pay the bond. - InsufficientVoterFunds, - /// Locked value must be more than limit. - InsufficientLockedValue, - /// Amount of candidate votes cannot exceed amount of candidates. - TooManyVotes, - /// Amount of candidates to receive approval votes should be non-zero. - ZeroCandidates, - /// No approval changes during presentation period. - ApprovalPresentation, - } + /// Error for the elections module. + pub enum Error for Module { + /// Reporter must be a voter. + NotVoter, + /// Target for inactivity cleanup must be active. + InactiveTarget, + /// Cannot reap during presentation period. + CannotReapPresenting, + /// Cannot reap during grace period. + ReapGrace, + /// Not a proxy. + NotProxy, + /// Invalid reporter index. + InvalidReporterIndex, + /// Invalid target index. + InvalidTargetIndex, + /// Invalid vote index. + InvalidVoteIndex, + /// Cannot retract when presenting. + CannotRetractPresenting, + /// Cannot retract non-voter. + RetractNonVoter, + /// Invalid retraction index. + InvalidRetractionIndex, + /// Duplicate candidate submission. + DuplicatedCandidate, + /// Invalid candidate slot. + InvalidCandidateSlot, + /// Candidate has not enough funds. + InsufficientCandidateFunds, + /// Presenter must have sufficient slashable funds. + InsufficientPresenterFunds, + /// Stake deposited to present winner and be added to leaderboard should be non-zero. + ZeroDeposit, + /// Candidate not worthy of leaderboard. + UnworthyCandidate, + /// Leaderboard must exist while present phase active. + LeaderboardMustExist, + /// Cannot present outside of presentation period. + NotPresentationPeriod, + /// Presented candidate must be current. + InvalidCandidate, + /// Duplicated presentation. + DuplicatedPresentation, + /// Incorrect total. + IncorrectTotal, + /// Invalid voter index. + InvalidVoterIndex, + /// New voter must have sufficient funds to pay the bond. + InsufficientVoterFunds, + /// Locked value must be more than limit. + InsufficientLockedValue, + /// Amount of candidate votes cannot exceed amount of candidates. + TooManyVotes, + /// Amount of candidates to receive approval votes should be non-zero. + ZeroCandidates, + /// No approval changes during presentation period. + ApprovalPresentation, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// How much should be locked up in order to submit one's candidacy. A reasonable - /// default value is 9. - const CandidacyBond: BalanceOf = T::CandidacyBond::get(); - - /// How much should be locked up in order to be able to submit votes. - const VotingBond: BalanceOf = T::VotingBond::get(); - - /// The amount of fee paid upon each vote submission, unless if they submit a - /// _hole_ index and replace it. - const VotingFee: BalanceOf = T::VotingFee::get(); - - /// The punishment, per voter, if you provide an invalid presentation. A - /// reasonable default value is 1. - const PresentSlashPerVoter: BalanceOf = T::PresentSlashPerVoter::get(); - - /// How many runners-up should have their approvals persist until the next - /// vote. A reasonable default value is 2. - const CarryCount: u32 = T::CarryCount::get(); - - /// How many vote indices need to go by after a target voter's last vote before - /// they can be reaped if their approvals are moot. A reasonable default value - /// is 1. - const InactiveGracePeriod: VoteIndex = T::InactiveGracePeriod::get(); - - /// How often (in blocks) to check for new votes. A reasonable default value - /// is 1000. - const VotingPeriod: T::BlockNumber = T::VotingPeriod::get(); - - /// Minimum about that can be used as the locked value for voting. - const MinimumVotingLock: BalanceOf = T::MinimumVotingLock::get(); - - /// Decay factor of weight when being accumulated. It should typically be set to - /// __at least__ `membership_size -1` to keep the collective secure. - /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight - /// increment step `t`. 0 will result in no weight being added at all (normal - /// approval voting). A reasonable default value is 24. - const DecayRatio: u32 = T::DecayRatio::get(); - - /// The chunk size of the voter vector. - const VOTER_SET_SIZE: u32 = VOTER_SET_SIZE as u32; - /// The chunk size of the approval vector. - const APPROVAL_SET_SIZE: u32 = APPROVAL_SET_SIZE as u32; - - fn deposit_event() = default; - - /// Set candidate approvals. Approval slots stay valid as long as candidates in those slots - /// are registered. - /// - /// Locks `value` from the balance of `origin` indefinitely. Only [`retract_voter`] or - /// [`reap_inactive_voter`] can unlock the balance. - /// - /// `hint` argument is interpreted differently based on: - /// - if `origin` is setting approvals for the first time: The index will be checked for - /// being a valid _hole_ in the voter list. - /// - if the hint is correctly pointing to a hole, no fee is deducted from `origin`. - /// - Otherwise, the call will succeed but the index is ignored and simply a push to the - /// last chunk with free space happens. If the new push causes a new chunk to be - /// created, a fee indicated by [`VotingFee`] is deducted. - /// - if `origin` is already a voter: the index __must__ be valid and point to the correct - /// position of the `origin` in the current voters list. - /// - /// Note that any trailing `false` votes in `votes` is ignored; In approval voting, not - /// voting for a candidate and voting false, are equal. - /// - /// # - /// - O(1). - /// - Two extra DB entries, one DB change. - /// - Argument `votes` is limited in length to number of candidates. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(2_500_000_000)] - fn set_approvals( - origin, - votes: Vec, - #[compact] index: VoteIndex, - hint: SetIndex, - #[compact] value: BalanceOf - ) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::do_set_approvals(who, votes, index, hint, value) - } - - /// Set candidate approvals from a proxy. Approval slots stay valid as long as candidates in - /// those slots are registered. - /// - /// # - /// - Same as `set_approvals` with one additional storage read. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(2_500_000_000)] - fn proxy_set_approvals(origin, - votes: Vec, - #[compact] index: VoteIndex, - hint: SetIndex, - #[compact] value: BalanceOf - ) -> DispatchResult { - let who = Self::proxy(ensure_signed(origin)?).ok_or(Error::::NotProxy)?; - Self::do_set_approvals(who, votes, index, hint, value) - } - - /// Remove a voter. For it not to be a bond-consuming no-op, all approved candidate indices - /// must now be either unregistered or registered to a candidate that registered the slot - /// after the voter gave their last approval set. - /// - /// Both indices must be provided as explained in [`voter_at`] function. - /// - /// May be called by anyone. Returns the voter deposit to `signed`. - /// - /// # - /// - O(1). - /// - Two fewer DB entries, one DB change. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(2_500_000_000)] - fn reap_inactive_voter( - origin, - #[compact] reporter_index: u32, - who: ::Source, - #[compact] who_index: u32, - #[compact] assumed_vote_index: VoteIndex - ) { - let reporter = ensure_signed(origin)?; - let who = T::Lookup::lookup(who)?; - - ensure!(!Self::presentation_active(), Error::::CannotReapPresenting); - ensure!(Self::voter_info(&reporter).is_some(), Error::::NotVoter); - - let info = Self::voter_info(&who).ok_or(Error::::InactiveTarget)?; - let last_active = info.last_active; - - ensure!(assumed_vote_index == Self::vote_index(), Error::::InvalidVoteIndex); - ensure!( - assumed_vote_index > last_active + T::InactiveGracePeriod::get(), - Error::::ReapGrace, - ); - - let reporter_index = reporter_index as usize; - let who_index = who_index as usize; - let assumed_reporter = Self::voter_at(reporter_index).ok_or(Error::::InvalidReporterIndex)?; - let assumed_who = Self::voter_at(who_index).ok_or(Error::::InvalidTargetIndex)?; - - ensure!(assumed_reporter == reporter, Error::::InvalidReporterIndex); - ensure!(assumed_who == who, Error::::InvalidTargetIndex); - - // will definitely kill one of reporter or who now. - - let valid = !Self::all_approvals_of(&who).iter() - .zip(Self::candidates().iter()) - .any(|(&appr, addr)| - appr && - *addr != T::AccountId::default() && - // defensive only: all items in candidates list are registered - Self::candidate_reg_info(addr).map_or(false, |x| x.0 <= last_active) - ); - - Self::remove_voter( - if valid { &who } else { &reporter }, - if valid { who_index } else { reporter_index } - ); - - T::Currency::remove_lock( - MODULE_ID, - if valid { &who } else { &reporter } - ); - - if valid { - // This only fails if `reporter` doesn't exist, which it clearly must do since its - // the origin. Still, it's no more harmful to propagate any error at this point. - T::Currency::repatriate_reserved(&who, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; - Self::deposit_event(RawEvent::VoterReaped(who, reporter)); - } else { - let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; - T::BadReaper::on_unbalanced(imbalance); - Self::deposit_event(RawEvent::BadReaperSlashed(reporter)); - } - } - - /// Remove a voter. All votes are cancelled and the voter deposit is returned. - /// - /// The index must be provided as explained in [`voter_at`] function. - /// - /// Also removes the lock on the balance of the voter. See [`do_set_approvals()`]. - /// - /// # - /// - O(1). - /// - Two fewer DB entries, one DB change. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(1_250_000_000)] - fn retract_voter(origin, #[compact] index: u32) { - let who = ensure_signed(origin)?; - - ensure!(!Self::presentation_active(), Error::::CannotRetractPresenting); - ensure!(>::contains_key(&who), Error::::RetractNonVoter); - let index = index as usize; - let voter = Self::voter_at(index).ok_or(Error::::InvalidRetractionIndex)?; - ensure!(voter == who, Error::::InvalidRetractionIndex); - - Self::remove_voter(&who, index); - T::Currency::unreserve(&who, T::VotingBond::get()); - T::Currency::remove_lock(MODULE_ID, &who); - } - - /// Submit oneself for candidacy. - /// - /// Account must have enough transferrable funds in it to pay the bond. - /// - /// NOTE: if `origin` has already assigned approvals via [`set_approvals`], - /// it will NOT have any usable funds to pass candidacy bond and must first retract. - /// Note that setting approvals will lock the entire balance of the voter until - /// retraction or being reported. - /// - /// # - /// - Independent of input. - /// - Three DB changes. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(2_500_000_000)] - fn submit_candidacy(origin, #[compact] slot: u32) { - let who = ensure_signed(origin)?; - - ensure!(!Self::is_a_candidate(&who), Error::::DuplicatedCandidate); - let slot = slot as usize; - let count = Self::candidate_count() as usize; - let candidates = Self::candidates(); - ensure!( - (slot == count && count == candidates.len()) || - (slot < candidates.len() && candidates[slot] == T::AccountId::default()), - Error::::InvalidCandidateSlot, - ); - // NOTE: This must be last as it has side-effects. - T::Currency::reserve(&who, T::CandidacyBond::get()) - .map_err(|_| Error::::InsufficientCandidateFunds)?; - - >::insert(&who, (Self::vote_index(), slot as u32)); - let mut candidates = candidates; - if slot == candidates.len() { - candidates.push(who); - } else { - candidates[slot] = who; - } - >::put(candidates); - CandidateCount::put(count as u32 + 1); - } - - /// Claim that `candidate` is one of the top `carry_count + desired_seats` candidates. Only - /// works iff the presentation period is active. `candidate` should have at least collected - /// some non-zero `total` votes and `origin` must have enough funds to pay for a potential - /// slash. - /// - /// # - /// - O(voters) compute. - /// - One DB change. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(10_000_000_000)] - fn present_winner( - origin, - candidate: ::Source, - #[compact] total: BalanceOf, - #[compact] index: VoteIndex - ) -> DispatchResult { - let who = ensure_signed(origin)?; - ensure!( - !total.is_zero(), - Error::::ZeroDeposit, - ); - - let candidate = T::Lookup::lookup(candidate)?; - ensure!(index == Self::vote_index(), Error::::InvalidVoteIndex); - let (_, _, expiring) = Self::next_finalize() - .ok_or(Error::::NotPresentationPeriod)?; - let bad_presentation_punishment = - T::PresentSlashPerVoter::get() - * BalanceOf::::from(Self::voter_count() as u32); - ensure!( - T::Currency::can_slash(&who, bad_presentation_punishment), - Error::::InsufficientPresenterFunds, - ); - - let mut leaderboard = Self::leaderboard() - .ok_or(Error::::LeaderboardMustExist)?; - ensure!(total > leaderboard[0].0, Error::::UnworthyCandidate); - - if let Some(p) = Self::members().iter().position(|&(ref c, _)| c == &candidate) { - ensure!( - p < expiring.len(), - Error::::DuplicatedCandidate, - ); - } - - let voters = Self::all_voters(); - let (registered_since, candidate_index): (VoteIndex, u32) = - Self::candidate_reg_info(&candidate).ok_or(Error::::InvalidCandidate)?; - let actual_total = voters.iter() - .filter_map(|maybe_voter| maybe_voter.as_ref()) - .filter_map(|voter| match Self::voter_info(voter) { - Some(b) if b.last_active >= registered_since => { - let last_win = b.last_win; - let now = Self::vote_index(); - let stake = b.stake; - let offset = Self::get_offset(stake, now - last_win); - let weight = stake + offset + b.pot; - if Self::approvals_of_at(voter, candidate_index as usize) { - Some(weight) - } else { None } - }, - _ => None, - }) - .fold(Zero::zero(), |acc, n| acc + n); - let dupe = leaderboard.iter().find(|&&(_, ref c)| c == &candidate).is_some(); - if total == actual_total && !dupe { - // insert into leaderboard - leaderboard[0] = (total, candidate); - leaderboard.sort_by_key(|&(t, _)| t); - >::put(leaderboard); - Ok(()) - } else { - // we can rest assured it will be Ok since we checked `can_slash` earlier; still - // better safe than sorry. - let imbalance = T::Currency::slash(&who, bad_presentation_punishment).0; - T::BadPresentation::on_unbalanced(imbalance); - Err(if dupe { Error::::DuplicatedPresentation } else { Error::::IncorrectTotal })? - } - } - - /// Set the desired member count; if lower than the current count, then seats will not be up - /// election when they expire. If more, then a new vote will be started if one is not - /// already in progress. - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn set_desired_seats(origin, #[compact] count: u32) { - ensure_root(origin)?; - DesiredSeats::put(count); - } - - /// Remove a particular member from the set. This is effective immediately. - /// - /// Note: A tally should happen instantly (if not already in a presentation - /// period) to fill the seat if removal means that the desired members are not met. - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn remove_member(origin, who: ::Source) { - ensure_root(origin)?; - let who = T::Lookup::lookup(who)?; - let new_set: Vec<(T::AccountId, T::BlockNumber)> = Self::members() - .into_iter() - .filter(|i| i.0 != who) - .collect(); - >::put(&new_set); - let new_set = new_set.into_iter().map(|x| x.0).collect::>(); - T::ChangeMembers::change_members(&[], &[who], new_set); - } - - /// Set the presentation duration. If there is currently a vote being presented for, will - /// invoke `finalize_vote`. - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn set_presentation_duration(origin, #[compact] count: T::BlockNumber) { - ensure_root(origin)?; - >::put(count); - } - - /// Set the presentation duration. If there is current a vote being presented for, will - /// invoke `finalize_vote`. - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn set_term_duration(origin, #[compact] count: T::BlockNumber) { - ensure_root(origin)?; - >::put(count); - } - - fn on_initialize(n: T::BlockNumber) -> Weight { - if let Err(e) = Self::end_block(n) { - print("Guru meditation"); - print(e); - } - MINIMUM_WEIGHT - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + /// How much should be locked up in order to submit one's candidacy. A reasonable + /// default value is 9. + const CandidacyBond: BalanceOf = T::CandidacyBond::get(); + + /// How much should be locked up in order to be able to submit votes. + const VotingBond: BalanceOf = T::VotingBond::get(); + + /// The amount of fee paid upon each vote submission, unless if they submit a + /// _hole_ index and replace it. + const VotingFee: BalanceOf = T::VotingFee::get(); + + /// The punishment, per voter, if you provide an invalid presentation. A + /// reasonable default value is 1. + const PresentSlashPerVoter: BalanceOf = T::PresentSlashPerVoter::get(); + + /// How many runners-up should have their approvals persist until the next + /// vote. A reasonable default value is 2. + const CarryCount: u32 = T::CarryCount::get(); + + /// How many vote indices need to go by after a target voter's last vote before + /// they can be reaped if their approvals are moot. A reasonable default value + /// is 1. + const InactiveGracePeriod: VoteIndex = T::InactiveGracePeriod::get(); + + /// How often (in blocks) to check for new votes. A reasonable default value + /// is 1000. + const VotingPeriod: T::BlockNumber = T::VotingPeriod::get(); + + /// Minimum about that can be used as the locked value for voting. + const MinimumVotingLock: BalanceOf = T::MinimumVotingLock::get(); + + /// Decay factor of weight when being accumulated. It should typically be set to + /// __at least__ `membership_size -1` to keep the collective secure. + /// When set to `N`, it indicates `(1/N)^t` of staked is decayed at weight + /// increment step `t`. 0 will result in no weight being added at all (normal + /// approval voting). A reasonable default value is 24. + const DecayRatio: u32 = T::DecayRatio::get(); + + /// The chunk size of the voter vector. + const VOTER_SET_SIZE: u32 = VOTER_SET_SIZE as u32; + /// The chunk size of the approval vector. + const APPROVAL_SET_SIZE: u32 = APPROVAL_SET_SIZE as u32; + + fn deposit_event() = default; + + /// Set candidate approvals. Approval slots stay valid as long as candidates in those slots + /// are registered. + /// + /// Locks `value` from the balance of `origin` indefinitely. Only [`retract_voter`] or + /// [`reap_inactive_voter`] can unlock the balance. + /// + /// `hint` argument is interpreted differently based on: + /// - if `origin` is setting approvals for the first time: The index will be checked for + /// being a valid _hole_ in the voter list. + /// - if the hint is correctly pointing to a hole, no fee is deducted from `origin`. + /// - Otherwise, the call will succeed but the index is ignored and simply a push to the + /// last chunk with free space happens. If the new push causes a new chunk to be + /// created, a fee indicated by [`VotingFee`] is deducted. + /// - if `origin` is already a voter: the index __must__ be valid and point to the correct + /// position of the `origin` in the current voters list. + /// + /// Note that any trailing `false` votes in `votes` is ignored; In approval voting, not + /// voting for a candidate and voting false, are equal. + /// + /// # + /// - O(1). + /// - Two extra DB entries, one DB change. + /// - Argument `votes` is limited in length to number of candidates. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(2_500_000_000)] + fn set_approvals( + origin, + votes: Vec, + #[compact] index: VoteIndex, + hint: SetIndex, + #[compact] value: BalanceOf + ) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::do_set_approvals(who, votes, index, hint, value) + } + + /// Set candidate approvals from a proxy. Approval slots stay valid as long as candidates in + /// those slots are registered. + /// + /// # + /// - Same as `set_approvals` with one additional storage read. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(2_500_000_000)] + fn proxy_set_approvals(origin, + votes: Vec, + #[compact] index: VoteIndex, + hint: SetIndex, + #[compact] value: BalanceOf + ) -> DispatchResult { + let who = Self::proxy(ensure_signed(origin)?).ok_or(Error::::NotProxy)?; + Self::do_set_approvals(who, votes, index, hint, value) + } + + /// Remove a voter. For it not to be a bond-consuming no-op, all approved candidate indices + /// must now be either unregistered or registered to a candidate that registered the slot + /// after the voter gave their last approval set. + /// + /// Both indices must be provided as explained in [`voter_at`] function. + /// + /// May be called by anyone. Returns the voter deposit to `signed`. + /// + /// # + /// - O(1). + /// - Two fewer DB entries, one DB change. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(2_500_000_000)] + fn reap_inactive_voter( + origin, + #[compact] reporter_index: u32, + who: ::Source, + #[compact] who_index: u32, + #[compact] assumed_vote_index: VoteIndex + ) { + let reporter = ensure_signed(origin)?; + let who = T::Lookup::lookup(who)?; + + ensure!(!Self::presentation_active(), Error::::CannotReapPresenting); + ensure!(Self::voter_info(&reporter).is_some(), Error::::NotVoter); + + let info = Self::voter_info(&who).ok_or(Error::::InactiveTarget)?; + let last_active = info.last_active; + + ensure!(assumed_vote_index == Self::vote_index(), Error::::InvalidVoteIndex); + ensure!( + assumed_vote_index > last_active + T::InactiveGracePeriod::get(), + Error::::ReapGrace, + ); + + let reporter_index = reporter_index as usize; + let who_index = who_index as usize; + let assumed_reporter = Self::voter_at(reporter_index).ok_or(Error::::InvalidReporterIndex)?; + let assumed_who = Self::voter_at(who_index).ok_or(Error::::InvalidTargetIndex)?; + + ensure!(assumed_reporter == reporter, Error::::InvalidReporterIndex); + ensure!(assumed_who == who, Error::::InvalidTargetIndex); + + // will definitely kill one of reporter or who now. + + let valid = !Self::all_approvals_of(&who).iter() + .zip(Self::candidates().iter()) + .any(|(&appr, addr)| + appr && + *addr != T::AccountId::default() && + // defensive only: all items in candidates list are registered + Self::candidate_reg_info(addr).map_or(false, |x| x.0 <= last_active) + ); + + Self::remove_voter( + if valid { &who } else { &reporter }, + if valid { who_index } else { reporter_index } + ); + + T::Currency::remove_lock( + MODULE_ID, + if valid { &who } else { &reporter } + ); + + if valid { + // This only fails if `reporter` doesn't exist, which it clearly must do since its + // the origin. Still, it's no more harmful to propagate any error at this point. + T::Currency::repatriate_reserved(&who, &reporter, T::VotingBond::get(), BalanceStatus::Free)?; + Self::deposit_event(RawEvent::VoterReaped(who, reporter)); + } else { + let imbalance = T::Currency::slash_reserved(&reporter, T::VotingBond::get()).0; + T::BadReaper::on_unbalanced(imbalance); + Self::deposit_event(RawEvent::BadReaperSlashed(reporter)); + } + } + + /// Remove a voter. All votes are cancelled and the voter deposit is returned. + /// + /// The index must be provided as explained in [`voter_at`] function. + /// + /// Also removes the lock on the balance of the voter. See [`do_set_approvals()`]. + /// + /// # + /// - O(1). + /// - Two fewer DB entries, one DB change. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(1_250_000_000)] + fn retract_voter(origin, #[compact] index: u32) { + let who = ensure_signed(origin)?; + + ensure!(!Self::presentation_active(), Error::::CannotRetractPresenting); + ensure!(>::contains_key(&who), Error::::RetractNonVoter); + let index = index as usize; + let voter = Self::voter_at(index).ok_or(Error::::InvalidRetractionIndex)?; + ensure!(voter == who, Error::::InvalidRetractionIndex); + + Self::remove_voter(&who, index); + T::Currency::unreserve(&who, T::VotingBond::get()); + T::Currency::remove_lock(MODULE_ID, &who); + } + + /// Submit oneself for candidacy. + /// + /// Account must have enough transferrable funds in it to pay the bond. + /// + /// NOTE: if `origin` has already assigned approvals via [`set_approvals`], + /// it will NOT have any usable funds to pass candidacy bond and must first retract. + /// Note that setting approvals will lock the entire balance of the voter until + /// retraction or being reported. + /// + /// # + /// - Independent of input. + /// - Three DB changes. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(2_500_000_000)] + fn submit_candidacy(origin, #[compact] slot: u32) { + let who = ensure_signed(origin)?; + + ensure!(!Self::is_a_candidate(&who), Error::::DuplicatedCandidate); + let slot = slot as usize; + let count = Self::candidate_count() as usize; + let candidates = Self::candidates(); + ensure!( + (slot == count && count == candidates.len()) || + (slot < candidates.len() && candidates[slot] == T::AccountId::default()), + Error::::InvalidCandidateSlot, + ); + // NOTE: This must be last as it has side-effects. + T::Currency::reserve(&who, T::CandidacyBond::get()) + .map_err(|_| Error::::InsufficientCandidateFunds)?; + + >::insert(&who, (Self::vote_index(), slot as u32)); + let mut candidates = candidates; + if slot == candidates.len() { + candidates.push(who); + } else { + candidates[slot] = who; + } + >::put(candidates); + CandidateCount::put(count as u32 + 1); + } + + /// Claim that `candidate` is one of the top `carry_count + desired_seats` candidates. Only + /// works iff the presentation period is active. `candidate` should have at least collected + /// some non-zero `total` votes and `origin` must have enough funds to pay for a potential + /// slash. + /// + /// # + /// - O(voters) compute. + /// - One DB change. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(10_000_000_000)] + fn present_winner( + origin, + candidate: ::Source, + #[compact] total: BalanceOf, + #[compact] index: VoteIndex + ) -> DispatchResult { + let who = ensure_signed(origin)?; + ensure!( + !total.is_zero(), + Error::::ZeroDeposit, + ); + + let candidate = T::Lookup::lookup(candidate)?; + ensure!(index == Self::vote_index(), Error::::InvalidVoteIndex); + let (_, _, expiring) = Self::next_finalize() + .ok_or(Error::::NotPresentationPeriod)?; + let bad_presentation_punishment = + T::PresentSlashPerVoter::get() + * BalanceOf::::from(Self::voter_count() as u32); + ensure!( + T::Currency::can_slash(&who, bad_presentation_punishment), + Error::::InsufficientPresenterFunds, + ); + + let mut leaderboard = Self::leaderboard() + .ok_or(Error::::LeaderboardMustExist)?; + ensure!(total > leaderboard[0].0, Error::::UnworthyCandidate); + + if let Some(p) = Self::members().iter().position(|&(ref c, _)| c == &candidate) { + ensure!( + p < expiring.len(), + Error::::DuplicatedCandidate, + ); + } + + let voters = Self::all_voters(); + let (registered_since, candidate_index): (VoteIndex, u32) = + Self::candidate_reg_info(&candidate).ok_or(Error::::InvalidCandidate)?; + let actual_total = voters.iter() + .filter_map(|maybe_voter| maybe_voter.as_ref()) + .filter_map(|voter| match Self::voter_info(voter) { + Some(b) if b.last_active >= registered_since => { + let last_win = b.last_win; + let now = Self::vote_index(); + let stake = b.stake; + let offset = Self::get_offset(stake, now - last_win); + let weight = stake + offset + b.pot; + if Self::approvals_of_at(voter, candidate_index as usize) { + Some(weight) + } else { None } + }, + _ => None, + }) + .fold(Zero::zero(), |acc, n| acc + n); + let dupe = leaderboard.iter().find(|&&(_, ref c)| c == &candidate).is_some(); + if total == actual_total && !dupe { + // insert into leaderboard + leaderboard[0] = (total, candidate); + leaderboard.sort_by_key(|&(t, _)| t); + >::put(leaderboard); + Ok(()) + } else { + // we can rest assured it will be Ok since we checked `can_slash` earlier; still + // better safe than sorry. + let imbalance = T::Currency::slash(&who, bad_presentation_punishment).0; + T::BadPresentation::on_unbalanced(imbalance); + Err(if dupe { Error::::DuplicatedPresentation } else { Error::::IncorrectTotal })? + } + } + + /// Set the desired member count; if lower than the current count, then seats will not be up + /// election when they expire. If more, then a new vote will be started if one is not + /// already in progress. + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn set_desired_seats(origin, #[compact] count: u32) { + ensure_root(origin)?; + DesiredSeats::put(count); + } + + /// Remove a particular member from the set. This is effective immediately. + /// + /// Note: A tally should happen instantly (if not already in a presentation + /// period) to fill the seat if removal means that the desired members are not met. + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn remove_member(origin, who: ::Source) { + ensure_root(origin)?; + let who = T::Lookup::lookup(who)?; + let new_set: Vec<(T::AccountId, T::BlockNumber)> = Self::members() + .into_iter() + .filter(|i| i.0 != who) + .collect(); + >::put(&new_set); + let new_set = new_set.into_iter().map(|x| x.0).collect::>(); + T::ChangeMembers::change_members(&[], &[who], new_set); + } + + /// Set the presentation duration. If there is currently a vote being presented for, will + /// invoke `finalize_vote`. + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn set_presentation_duration(origin, #[compact] count: T::BlockNumber) { + ensure_root(origin)?; + >::put(count); + } + + /// Set the presentation duration. If there is current a vote being presented for, will + /// invoke `finalize_vote`. + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn set_term_duration(origin, #[compact] count: T::BlockNumber) { + ensure_root(origin)?; + >::put(count); + } + + fn on_initialize(n: T::BlockNumber) -> Weight { + if let Err(e) = Self::end_block(n) { + print("Guru meditation"); + print(e); + } + MINIMUM_WEIGHT + } + } } decl_event!( @@ -722,498 +723,526 @@ decl_event!( ); impl Module { - // exposed immutables. - - /// True if we're currently in a presentation period. - pub fn presentation_active() -> bool { - >::exists() - } - - /// If `who` a candidate at the moment? - pub fn is_a_candidate(who: &T::AccountId) -> bool { - >::contains_key(who) - } - - /// Iff the member `who` still has a seat at blocknumber `n` returns `true`. - pub fn will_still_be_member_at(who: &T::AccountId, n: T::BlockNumber) -> bool { - Self::members().iter() - .find(|&&(ref a, _)| a == who) - .map(|&(_, expires)| expires > n) - .unwrap_or(false) - } - - /// Determine the block that a vote can happen on which is no less than `n`. - pub fn next_vote_from(n: T::BlockNumber) -> T::BlockNumber { - let voting_period = T::VotingPeriod::get(); - (n + voting_period - One::one()) / voting_period * voting_period - } - - /// The block number on which the tally for the next election will happen. `None` only if the - /// desired seats of the set is zero. - pub fn next_tally() -> Option { - let desired_seats = Self::desired_seats(); - if desired_seats == 0 { - None - } else { - let c = Self::members(); - let (next_possible, count, coming) = - if let Some((tally_end, comers, leavers)) = Self::next_finalize() { - // if there's a tally in progress, then next tally can begin immediately afterwards - (tally_end, c.len() - leavers.len() + comers as usize, comers) - } else { - (>::block_number(), c.len(), 0) - }; - if count < desired_seats as usize { - Some(next_possible) - } else { - // next tally begins once enough members expire to bring members below desired. - if desired_seats <= coming { - // the entire amount of desired seats is less than those new members - we'll - // have to wait until they expire. - Some(next_possible + Self::term_duration()) - } else { - Some(c[c.len() - (desired_seats - coming) as usize].1) - } - }.map(Self::next_vote_from) - } - } - - // Private - /// Check there's nothing to do this block - fn end_block(block_number: T::BlockNumber) -> DispatchResult { - if (block_number % T::VotingPeriod::get()).is_zero() { - if let Some(number) = Self::next_tally() { - if block_number == number { - Self::start_tally(); - } - } - } - if let Some((number, _, _)) = Self::next_finalize() { - if block_number == number { - Self::finalize_tally()? - } - } - Ok(()) - } - - /// Remove a voter at a specified index from the system. - fn remove_voter(voter: &T::AccountId, index: usize) { - let (set_index, vec_index) = Self::split_index(index, VOTER_SET_SIZE); - let mut set = Self::voters(set_index); - set[vec_index] = None; - >::insert(set_index, set); - VoterCount::mutate(|c| *c = *c - 1); - Self::remove_all_approvals_of(voter); - >::remove(voter); - } - - /// Actually do the voting. - /// - /// The voter index must be provided as explained in [`voter_at`] function. - fn do_set_approvals( - who: T::AccountId, - votes: Vec, - index: VoteIndex, - hint: SetIndex, - value: BalanceOf, - ) -> DispatchResult { - let candidates_len = ::Candidates::decode_len().unwrap_or(0_usize); - - ensure!(!Self::presentation_active(), Error::::ApprovalPresentation); - ensure!(index == Self::vote_index(), Error::::InvalidVoteIndex); - ensure!( - !candidates_len.is_zero(), - Error::::ZeroCandidates, - ); - // Prevent a vote from voters that provide a list of votes that exceeds the candidates - // length since otherwise an attacker may be able to submit a very long list of `votes` that - // far exceeds the amount of candidates and waste more computation than a reasonable voting - // bond would cover. - ensure!( - candidates_len >= votes.len(), - Error::::TooManyVotes, - ); - ensure!(value >= T::MinimumVotingLock::get(), Error::::InsufficientLockedValue); - - // Amount to be locked up. - let mut locked_balance = value.min(T::Currency::total_balance(&who)); - let mut pot_to_set = Zero::zero(); - let hint = hint as usize; - - if let Some(info) = Self::voter_info(&who) { - // already a voter. Index must be valid. No fee. update pot. O(1) - let voter = Self::voter_at(hint).ok_or(Error::::InvalidVoterIndex)?; - ensure!(voter == who, Error::::InvalidVoterIndex); - - // write new accumulated offset. - let last_win = info.last_win; - let now = index; - let offset = Self::get_offset(info.stake, now - last_win); - pot_to_set = info.pot + offset; - } else { - // not yet a voter. Index _could be valid_. Fee might apply. Bond will be reserved O(1). - ensure!( - T::Currency::free_balance(&who) > T::VotingBond::get(), - Error::::InsufficientVoterFunds, - ); - - let (set_index, vec_index) = Self::split_index(hint, VOTER_SET_SIZE); - match Self::cell_status(set_index, vec_index) { - CellStatus::Hole => { - // requested cell was a valid hole. - >::mutate(set_index, |set| set[vec_index] = Some(who.clone())); - }, - CellStatus::Head | CellStatus::Occupied => { - // Either occupied or out-of-range. - let next = Self::next_nonfull_voter_set(); - let set_len = >::decode_len(next).unwrap_or(0_usize); - // Caused a new set to be created. Pay for it. - // This is the last potential error. Writes will begin afterwards. - if set_len == 0 { - let imbalance = T::Currency::withdraw( - &who, - T::VotingFee::get(), - WithdrawReason::Fee.into(), - ExistenceRequirement::KeepAlive, - )?; - T::BadVoterIndex::on_unbalanced(imbalance); - // NOTE: this is safe since the `withdraw()` will check this. - locked_balance -= T::VotingFee::get(); - } - if set_len + 1 == VOTER_SET_SIZE { - NextVoterSet::put(next + 1); - } - >::append_or_insert(next, &[Some(who.clone())][..]) - } - } - - T::Currency::reserve(&who, T::VotingBond::get())?; - VoterCount::mutate(|c| *c = *c + 1); - } - - T::Currency::set_lock( - MODULE_ID, - &who, - locked_balance, - WithdrawReasons::all(), - ); - - >::insert( - &who, - VoterInfo::> { - last_active: index, - last_win: index, - stake: locked_balance, - pot: pot_to_set, - } - ); - Self::set_approvals_chunked(&who, votes); - - Ok(()) - } - - /// Close the voting, record the number of seats that are actually up for grabs. - fn start_tally() { - let members = Self::members(); - let desired_seats = Self::desired_seats() as usize; - let number = >::block_number(); - let expiring = - members.iter().take_while(|i| i.1 <= number).map(|i| i.0.clone()).collect::>(); - let retaining_seats = members.len() - expiring.len(); - if retaining_seats < desired_seats { - let empty_seats = desired_seats - retaining_seats; - >::put( - (number + Self::presentation_duration(), empty_seats as u32, expiring) - ); - - // initialize leaderboard. - let leaderboard_size = empty_seats + T::CarryCount::get() as usize; - >::put(vec![(BalanceOf::::zero(), T::AccountId::default()); leaderboard_size]); - - Self::deposit_event(RawEvent::TallyStarted(empty_seats as u32)); - } - } - - /// Finalize the vote, removing each of the `removals` and inserting `seats` of the most - /// approved candidates in their place. If the total number of members is less than the desired - /// membership a new vote is started. Clears all presented candidates, returning the bond of the - /// elected ones. - fn finalize_tally() -> DispatchResult { - let (_, coming, expiring): (T::BlockNumber, u32, Vec) = - >::take() - .ok_or("finalize can only be called after a tally is started.")?; - let leaderboard: Vec<(BalanceOf, T::AccountId)> = >::take() - .unwrap_or_default(); - let new_expiry = >::block_number() + Self::term_duration(); - - // return bond to winners. - let candidacy_bond = T::CandidacyBond::get(); - let incoming: Vec<_> = leaderboard.iter() - .rev() - .take_while(|&&(b, _)| !b.is_zero()) - .take(coming as usize) - .map(|(_, a)| a) - .cloned() - .inspect(|a| { T::Currency::unreserve(a, candidacy_bond); }) - .collect(); - - // Update last win index for anyone voted for any of the incomings. - incoming.iter().filter_map(|i| Self::candidate_reg_info(i)).for_each(|r| { - let index = r.1 as usize; - Self::all_voters() - .iter() - .filter_map(|mv| mv.as_ref()) - .filter(|v| Self::approvals_of_at(*v, index)) - .for_each(|v| >::mutate(v, |a| { - if let Some(activity) = a { activity.last_win = Self::vote_index() + 1; } - })); - }); - let members = Self::members(); - let outgoing: Vec<_> = members.iter() - .take(expiring.len()) - .map(|a| a.0.clone()).collect(); - - // set the new membership set. - let mut new_set: Vec<_> = members - .into_iter() - .skip(expiring.len()) - .chain(incoming.iter().cloned().map(|a| (a, new_expiry))) - .collect(); - new_set.sort_by_key(|&(_, expiry)| expiry); - >::put(&new_set); - - let new_set = new_set.into_iter().map(|x| x.0).collect::>(); - T::ChangeMembers::change_members(&incoming, &outgoing, new_set); - - // clear all except runners-up from candidate list. - let candidates = Self::candidates(); - let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. - let runners_up = leaderboard.into_iter() - .rev() - .take_while(|&(b, _)| !b.is_zero()) - .skip(coming as usize) - .filter_map(|(_, a)| Self::candidate_reg_info(&a).map(|i| (a, i.1))); - let mut count = 0u32; - for (address, slot) in runners_up { - new_candidates[slot as usize] = address; - count += 1; - } - for (old, new) in candidates.iter().zip(new_candidates.iter()) { - // candidate is not a runner up. - if old != new { - // removed - kill it - >::remove(old); - - // and candidate is not a winner. - if incoming.iter().find(|e| *e == old).is_none() { - // slash the bond. - let (imbalance, _) = T::Currency::slash_reserved(&old, T::CandidacyBond::get()); - T::LoserCandidate::on_unbalanced(imbalance); - } - } - } - // discard any superfluous slots. - if let Some(last_index) = new_candidates - .iter() - .rposition(|c| *c != T::AccountId::default()) { - new_candidates.truncate(last_index + 1); - } - - Self::deposit_event(RawEvent::TallyFinalized(incoming, outgoing)); - - >::put(new_candidates); - CandidateCount::put(count); - VoteCount::put(Self::vote_index() + 1); - Ok(()) - } - - /// Get the set and vector index of a global voter index. - /// - /// Note that this function does not take holes into account. - /// See [`voter_at`]. - fn split_index(index: usize, scale: usize) -> (SetIndex, usize) { - let set_index = (index / scale) as u32; - let vec_index = index % scale; - (set_index, vec_index) - } - - /// Return a concatenated vector over all voter sets. - fn all_voters() -> Vec> { - let mut all = >::get(0); - let mut index = 1; - // NOTE: we could also use `Self::next_nonfull_voter_set()` here but that might change based - // on how we do chunking. This is more generic. - loop { - let next_set = >::get(index); - if next_set.is_empty() { - break; - } else { - index += 1; - all.extend(next_set); - } - } - all - } - - /// Shorthand for fetching a voter at a specific (global) index. - /// - /// NOTE: this function is used for checking indices. Yet, it does not take holes into account. - /// This means that any account submitting an index at any point in time should submit: - /// `VOTER_SET_SIZE * set_index + local_index`, meaning that you are ignoring all holes in the - /// first `set_index` sets. - fn voter_at(index: usize) -> Option { - let (set_index, vec_index) = Self::split_index(index, VOTER_SET_SIZE); - let set = Self::voters(set_index); - if vec_index < set.len() { - set[vec_index].clone() - } else { - None - } - } - - /// A more sophisticated version of `voter_at`. Will be kept separate as most often it is an - /// overdue compared to `voter_at`. Only used when setting approvals. - fn cell_status(set_index: SetIndex, vec_index: usize) -> CellStatus { - let set = Self::voters(set_index); - if vec_index < set.len() { - if let Some(_) = set[vec_index] { - CellStatus::Occupied - } else { - CellStatus::Hole - } - } else { - CellStatus::Head - } - } - - /// Sets the approval of a voter in a chunked manner. - fn set_approvals_chunked(who: &T::AccountId, approvals: Vec) { - let approvals_flag_vec = Self::bool_to_flag(approvals); - approvals_flag_vec - .chunks(APPROVAL_SET_SIZE) - .enumerate() - .for_each(|(index, slice)| >::insert( - (&who, index as SetIndex), slice) - ); - } - - /// shorthand for fetching a specific approval of a voter at a specific (global) index. - /// - /// Using this function to read a vote is preferred as it reads `APPROVAL_SET_SIZE` items of - /// type `ApprovalFlag` from storage at most; not all of them. - /// - /// Note that false is returned in case of no-vote or an explicit `false`. - fn approvals_of_at(who: &T::AccountId, index: usize) -> bool { - let (flag_index, bit) = Self::split_index(index, APPROVAL_FLAG_LEN); - let (set_index, vec_index) = Self::split_index(flag_index as usize, APPROVAL_SET_SIZE); - let set = Self::approvals_of((who.clone(), set_index)); - if vec_index < set.len() { - // This is because bit_at treats numbers in lsb -> msb order. - let reversed_index = set.len() - 1 - vec_index; - Self::bit_at(set[reversed_index], bit) - } else { - false - } - } - - /// Return true of the bit `n` of scalar `x` is set to `1` and false otherwise. - fn bit_at(x: ApprovalFlag, n: usize) -> bool { - if n < APPROVAL_FLAG_LEN { - x & ( 1 << n ) != 0 - } else { - false - } - } - - /// Convert a vec of boolean approval flags to a vec of integers, as denoted by - /// the type `ApprovalFlag`. see `bool_to_flag_should_work` test for examples. - pub fn bool_to_flag(x: Vec) -> Vec { - let mut result: Vec = Vec::with_capacity(x.len() / APPROVAL_FLAG_LEN); - if x.is_empty() { - return result; - } - result.push(0); - let mut index = 0; - let mut counter = 0; - loop { - let shl_index = counter % APPROVAL_FLAG_LEN; - result[index] += (if x[counter] { 1 } else { 0 }) << shl_index; - counter += 1; - if counter > x.len() - 1 { break; } - if counter % APPROVAL_FLAG_LEN == 0 { - result.push(0); - index += 1; - } - } - result - } - - /// Convert a vec of flags (u32) to boolean. - pub fn flag_to_bool(chunk: Vec) -> Vec { - let mut result = Vec::with_capacity(chunk.len()); - if chunk.is_empty() { return vec![] } - chunk.into_iter() - .map(|num| - (0..APPROVAL_FLAG_LEN).map(|bit| Self::bit_at(num, bit)).collect::>() - ) - .for_each(|c| { - let last_approve = match c.iter().rposition(|n| *n) { - Some(index) => index + 1, - None => 0 - }; - result.extend(c.into_iter().take(last_approve)); - }); - result - } - - /// Return a concatenated vector over all approvals of a voter as boolean. - /// The trailing zeros are removed. - fn all_approvals_of(who: &T::AccountId) -> Vec { - let mut all: Vec = vec![]; - let mut index = 0_u32; - loop { - let chunk = Self::approvals_of((who.clone(), index)); - if chunk.is_empty() { break; } - all.extend(Self::flag_to_bool(chunk)); - index += 1; - } - all - } - - /// Remove all approvals associated with one account. - fn remove_all_approvals_of(who: &T::AccountId) { - let mut index = 0; - loop { - let set = Self::approvals_of((who.clone(), index)); - if set.len() > 0 { - >::remove((who.clone(), index)); - index += 1; - } else { - break - } - } - } - - /// Calculates the offset value (stored pot) of a stake, based on the distance - /// to the last win_index, `t`. Regardless of the internal implementation, - /// it should always be used with the following structure: - /// - /// Given Stake of voter `V` being `x` and distance to last_win index `t`, the new weight - /// of `V` is `x + get_offset(x, t)`. - /// - /// In other words, this function returns everything extra that should be added - /// to a voter's stake value to get the correct weight. Indeed, zero is - /// returned if `t` is zero. - fn get_offset(stake: BalanceOf, t: VoteIndex) -> BalanceOf { - let decay_ratio: BalanceOf = T::DecayRatio::get().into(); - if t > 150 { return stake * decay_ratio } - let mut offset = stake; - let mut r = Zero::zero(); - let decay = decay_ratio + One::one(); - for _ in 0..t { - offset = offset.saturating_sub(offset / decay); - r += offset - } - r - } + // exposed immutables. + + /// True if we're currently in a presentation period. + pub fn presentation_active() -> bool { + >::exists() + } + + /// If `who` a candidate at the moment? + pub fn is_a_candidate(who: &T::AccountId) -> bool { + >::contains_key(who) + } + + /// Iff the member `who` still has a seat at blocknumber `n` returns `true`. + pub fn will_still_be_member_at(who: &T::AccountId, n: T::BlockNumber) -> bool { + Self::members() + .iter() + .find(|&&(ref a, _)| a == who) + .map(|&(_, expires)| expires > n) + .unwrap_or(false) + } + + /// Determine the block that a vote can happen on which is no less than `n`. + pub fn next_vote_from(n: T::BlockNumber) -> T::BlockNumber { + let voting_period = T::VotingPeriod::get(); + (n + voting_period - One::one()) / voting_period * voting_period + } + + /// The block number on which the tally for the next election will happen. `None` only if the + /// desired seats of the set is zero. + pub fn next_tally() -> Option { + let desired_seats = Self::desired_seats(); + if desired_seats == 0 { + None + } else { + let c = Self::members(); + let (next_possible, count, coming) = + if let Some((tally_end, comers, leavers)) = Self::next_finalize() { + // if there's a tally in progress, then next tally can begin immediately afterwards + (tally_end, c.len() - leavers.len() + comers as usize, comers) + } else { + (>::block_number(), c.len(), 0) + }; + if count < desired_seats as usize { + Some(next_possible) + } else { + // next tally begins once enough members expire to bring members below desired. + if desired_seats <= coming { + // the entire amount of desired seats is less than those new members - we'll + // have to wait until they expire. + Some(next_possible + Self::term_duration()) + } else { + Some(c[c.len() - (desired_seats - coming) as usize].1) + } + } + .map(Self::next_vote_from) + } + } + + // Private + /// Check there's nothing to do this block + fn end_block(block_number: T::BlockNumber) -> DispatchResult { + if (block_number % T::VotingPeriod::get()).is_zero() { + if let Some(number) = Self::next_tally() { + if block_number == number { + Self::start_tally(); + } + } + } + if let Some((number, _, _)) = Self::next_finalize() { + if block_number == number { + Self::finalize_tally()? + } + } + Ok(()) + } + + /// Remove a voter at a specified index from the system. + fn remove_voter(voter: &T::AccountId, index: usize) { + let (set_index, vec_index) = Self::split_index(index, VOTER_SET_SIZE); + let mut set = Self::voters(set_index); + set[vec_index] = None; + >::insert(set_index, set); + VoterCount::mutate(|c| *c = *c - 1); + Self::remove_all_approvals_of(voter); + >::remove(voter); + } + + /// Actually do the voting. + /// + /// The voter index must be provided as explained in [`voter_at`] function. + fn do_set_approvals( + who: T::AccountId, + votes: Vec, + index: VoteIndex, + hint: SetIndex, + value: BalanceOf, + ) -> DispatchResult { + let candidates_len = ::Candidates::decode_len().unwrap_or(0_usize); + + ensure!( + !Self::presentation_active(), + Error::::ApprovalPresentation + ); + ensure!(index == Self::vote_index(), Error::::InvalidVoteIndex); + ensure!(!candidates_len.is_zero(), Error::::ZeroCandidates,); + // Prevent a vote from voters that provide a list of votes that exceeds the candidates + // length since otherwise an attacker may be able to submit a very long list of `votes` that + // far exceeds the amount of candidates and waste more computation than a reasonable voting + // bond would cover. + ensure!(candidates_len >= votes.len(), Error::::TooManyVotes,); + ensure!( + value >= T::MinimumVotingLock::get(), + Error::::InsufficientLockedValue + ); + + // Amount to be locked up. + let mut locked_balance = value.min(T::Currency::total_balance(&who)); + let mut pot_to_set = Zero::zero(); + let hint = hint as usize; + + if let Some(info) = Self::voter_info(&who) { + // already a voter. Index must be valid. No fee. update pot. O(1) + let voter = Self::voter_at(hint).ok_or(Error::::InvalidVoterIndex)?; + ensure!(voter == who, Error::::InvalidVoterIndex); + + // write new accumulated offset. + let last_win = info.last_win; + let now = index; + let offset = Self::get_offset(info.stake, now - last_win); + pot_to_set = info.pot + offset; + } else { + // not yet a voter. Index _could be valid_. Fee might apply. Bond will be reserved O(1). + ensure!( + T::Currency::free_balance(&who) > T::VotingBond::get(), + Error::::InsufficientVoterFunds, + ); + + let (set_index, vec_index) = Self::split_index(hint, VOTER_SET_SIZE); + match Self::cell_status(set_index, vec_index) { + CellStatus::Hole => { + // requested cell was a valid hole. + >::mutate(set_index, |set| set[vec_index] = Some(who.clone())); + } + CellStatus::Head | CellStatus::Occupied => { + // Either occupied or out-of-range. + let next = Self::next_nonfull_voter_set(); + let set_len = >::decode_len(next).unwrap_or(0_usize); + // Caused a new set to be created. Pay for it. + // This is the last potential error. Writes will begin afterwards. + if set_len == 0 { + let imbalance = T::Currency::withdraw( + &who, + T::VotingFee::get(), + WithdrawReason::Fee.into(), + ExistenceRequirement::KeepAlive, + )?; + T::BadVoterIndex::on_unbalanced(imbalance); + // NOTE: this is safe since the `withdraw()` will check this. + locked_balance -= T::VotingFee::get(); + } + if set_len + 1 == VOTER_SET_SIZE { + NextVoterSet::put(next + 1); + } + >::append_or_insert(next, &[Some(who.clone())][..]) + } + } + + T::Currency::reserve(&who, T::VotingBond::get())?; + VoterCount::mutate(|c| *c = *c + 1); + } + + T::Currency::set_lock(MODULE_ID, &who, locked_balance, WithdrawReasons::all()); + + >::insert( + &who, + VoterInfo::> { + last_active: index, + last_win: index, + stake: locked_balance, + pot: pot_to_set, + }, + ); + Self::set_approvals_chunked(&who, votes); + + Ok(()) + } + + /// Close the voting, record the number of seats that are actually up for grabs. + fn start_tally() { + let members = Self::members(); + let desired_seats = Self::desired_seats() as usize; + let number = >::block_number(); + let expiring = members + .iter() + .take_while(|i| i.1 <= number) + .map(|i| i.0.clone()) + .collect::>(); + let retaining_seats = members.len() - expiring.len(); + if retaining_seats < desired_seats { + let empty_seats = desired_seats - retaining_seats; + >::put(( + number + Self::presentation_duration(), + empty_seats as u32, + expiring, + )); + + // initialize leaderboard. + let leaderboard_size = empty_seats + T::CarryCount::get() as usize; + >::put(vec![ + (BalanceOf::::zero(), T::AccountId::default()); + leaderboard_size + ]); + + Self::deposit_event(RawEvent::TallyStarted(empty_seats as u32)); + } + } + + /// Finalize the vote, removing each of the `removals` and inserting `seats` of the most + /// approved candidates in their place. If the total number of members is less than the desired + /// membership a new vote is started. Clears all presented candidates, returning the bond of the + /// elected ones. + fn finalize_tally() -> DispatchResult { + let (_, coming, expiring): (T::BlockNumber, u32, Vec) = + >::take() + .ok_or("finalize can only be called after a tally is started.")?; + let leaderboard: Vec<(BalanceOf, T::AccountId)> = + >::take().unwrap_or_default(); + let new_expiry = >::block_number() + Self::term_duration(); + + // return bond to winners. + let candidacy_bond = T::CandidacyBond::get(); + let incoming: Vec<_> = leaderboard + .iter() + .rev() + .take_while(|&&(b, _)| !b.is_zero()) + .take(coming as usize) + .map(|(_, a)| a) + .cloned() + .inspect(|a| { + T::Currency::unreserve(a, candidacy_bond); + }) + .collect(); + + // Update last win index for anyone voted for any of the incomings. + incoming + .iter() + .filter_map(|i| Self::candidate_reg_info(i)) + .for_each(|r| { + let index = r.1 as usize; + Self::all_voters() + .iter() + .filter_map(|mv| mv.as_ref()) + .filter(|v| Self::approvals_of_at(*v, index)) + .for_each(|v| { + >::mutate(v, |a| { + if let Some(activity) = a { + activity.last_win = Self::vote_index() + 1; + } + }) + }); + }); + let members = Self::members(); + let outgoing: Vec<_> = members + .iter() + .take(expiring.len()) + .map(|a| a.0.clone()) + .collect(); + + // set the new membership set. + let mut new_set: Vec<_> = members + .into_iter() + .skip(expiring.len()) + .chain(incoming.iter().cloned().map(|a| (a, new_expiry))) + .collect(); + new_set.sort_by_key(|&(_, expiry)| expiry); + >::put(&new_set); + + let new_set = new_set.into_iter().map(|x| x.0).collect::>(); + T::ChangeMembers::change_members(&incoming, &outgoing, new_set); + + // clear all except runners-up from candidate list. + let candidates = Self::candidates(); + let mut new_candidates = vec![T::AccountId::default(); candidates.len()]; // shrink later. + let runners_up = leaderboard + .into_iter() + .rev() + .take_while(|&(b, _)| !b.is_zero()) + .skip(coming as usize) + .filter_map(|(_, a)| Self::candidate_reg_info(&a).map(|i| (a, i.1))); + let mut count = 0u32; + for (address, slot) in runners_up { + new_candidates[slot as usize] = address; + count += 1; + } + for (old, new) in candidates.iter().zip(new_candidates.iter()) { + // candidate is not a runner up. + if old != new { + // removed - kill it + >::remove(old); + + // and candidate is not a winner. + if incoming.iter().find(|e| *e == old).is_none() { + // slash the bond. + let (imbalance, _) = T::Currency::slash_reserved(&old, T::CandidacyBond::get()); + T::LoserCandidate::on_unbalanced(imbalance); + } + } + } + // discard any superfluous slots. + if let Some(last_index) = new_candidates + .iter() + .rposition(|c| *c != T::AccountId::default()) + { + new_candidates.truncate(last_index + 1); + } + + Self::deposit_event(RawEvent::TallyFinalized(incoming, outgoing)); + + >::put(new_candidates); + CandidateCount::put(count); + VoteCount::put(Self::vote_index() + 1); + Ok(()) + } + + /// Get the set and vector index of a global voter index. + /// + /// Note that this function does not take holes into account. + /// See [`voter_at`]. + fn split_index(index: usize, scale: usize) -> (SetIndex, usize) { + let set_index = (index / scale) as u32; + let vec_index = index % scale; + (set_index, vec_index) + } + + /// Return a concatenated vector over all voter sets. + fn all_voters() -> Vec> { + let mut all = >::get(0); + let mut index = 1; + // NOTE: we could also use `Self::next_nonfull_voter_set()` here but that might change based + // on how we do chunking. This is more generic. + loop { + let next_set = >::get(index); + if next_set.is_empty() { + break; + } else { + index += 1; + all.extend(next_set); + } + } + all + } + + /// Shorthand for fetching a voter at a specific (global) index. + /// + /// NOTE: this function is used for checking indices. Yet, it does not take holes into account. + /// This means that any account submitting an index at any point in time should submit: + /// `VOTER_SET_SIZE * set_index + local_index`, meaning that you are ignoring all holes in the + /// first `set_index` sets. + fn voter_at(index: usize) -> Option { + let (set_index, vec_index) = Self::split_index(index, VOTER_SET_SIZE); + let set = Self::voters(set_index); + if vec_index < set.len() { + set[vec_index].clone() + } else { + None + } + } + + /// A more sophisticated version of `voter_at`. Will be kept separate as most often it is an + /// overdue compared to `voter_at`. Only used when setting approvals. + fn cell_status(set_index: SetIndex, vec_index: usize) -> CellStatus { + let set = Self::voters(set_index); + if vec_index < set.len() { + if let Some(_) = set[vec_index] { + CellStatus::Occupied + } else { + CellStatus::Hole + } + } else { + CellStatus::Head + } + } + + /// Sets the approval of a voter in a chunked manner. + fn set_approvals_chunked(who: &T::AccountId, approvals: Vec) { + let approvals_flag_vec = Self::bool_to_flag(approvals); + approvals_flag_vec + .chunks(APPROVAL_SET_SIZE) + .enumerate() + .for_each(|(index, slice)| >::insert((&who, index as SetIndex), slice)); + } + + /// shorthand for fetching a specific approval of a voter at a specific (global) index. + /// + /// Using this function to read a vote is preferred as it reads `APPROVAL_SET_SIZE` items of + /// type `ApprovalFlag` from storage at most; not all of them. + /// + /// Note that false is returned in case of no-vote or an explicit `false`. + fn approvals_of_at(who: &T::AccountId, index: usize) -> bool { + let (flag_index, bit) = Self::split_index(index, APPROVAL_FLAG_LEN); + let (set_index, vec_index) = Self::split_index(flag_index as usize, APPROVAL_SET_SIZE); + let set = Self::approvals_of((who.clone(), set_index)); + if vec_index < set.len() { + // This is because bit_at treats numbers in lsb -> msb order. + let reversed_index = set.len() - 1 - vec_index; + Self::bit_at(set[reversed_index], bit) + } else { + false + } + } + + /// Return true of the bit `n` of scalar `x` is set to `1` and false otherwise. + fn bit_at(x: ApprovalFlag, n: usize) -> bool { + if n < APPROVAL_FLAG_LEN { + x & (1 << n) != 0 + } else { + false + } + } + + /// Convert a vec of boolean approval flags to a vec of integers, as denoted by + /// the type `ApprovalFlag`. see `bool_to_flag_should_work` test for examples. + pub fn bool_to_flag(x: Vec) -> Vec { + let mut result: Vec = Vec::with_capacity(x.len() / APPROVAL_FLAG_LEN); + if x.is_empty() { + return result; + } + result.push(0); + let mut index = 0; + let mut counter = 0; + loop { + let shl_index = counter % APPROVAL_FLAG_LEN; + result[index] += (if x[counter] { 1 } else { 0 }) << shl_index; + counter += 1; + if counter > x.len() - 1 { + break; + } + if counter % APPROVAL_FLAG_LEN == 0 { + result.push(0); + index += 1; + } + } + result + } + + /// Convert a vec of flags (u32) to boolean. + pub fn flag_to_bool(chunk: Vec) -> Vec { + let mut result = Vec::with_capacity(chunk.len()); + if chunk.is_empty() { + return vec![]; + } + chunk + .into_iter() + .map(|num| { + (0..APPROVAL_FLAG_LEN) + .map(|bit| Self::bit_at(num, bit)) + .collect::>() + }) + .for_each(|c| { + let last_approve = match c.iter().rposition(|n| *n) { + Some(index) => index + 1, + None => 0, + }; + result.extend(c.into_iter().take(last_approve)); + }); + result + } + + /// Return a concatenated vector over all approvals of a voter as boolean. + /// The trailing zeros are removed. + fn all_approvals_of(who: &T::AccountId) -> Vec { + let mut all: Vec = vec![]; + let mut index = 0_u32; + loop { + let chunk = Self::approvals_of((who.clone(), index)); + if chunk.is_empty() { + break; + } + all.extend(Self::flag_to_bool(chunk)); + index += 1; + } + all + } + + /// Remove all approvals associated with one account. + fn remove_all_approvals_of(who: &T::AccountId) { + let mut index = 0; + loop { + let set = Self::approvals_of((who.clone(), index)); + if set.len() > 0 { + >::remove((who.clone(), index)); + index += 1; + } else { + break; + } + } + } + + /// Calculates the offset value (stored pot) of a stake, based on the distance + /// to the last win_index, `t`. Regardless of the internal implementation, + /// it should always be used with the following structure: + /// + /// Given Stake of voter `V` being `x` and distance to last_win index `t`, the new weight + /// of `V` is `x + get_offset(x, t)`. + /// + /// In other words, this function returns everything extra that should be added + /// to a voter's stake value to get the correct weight. Indeed, zero is + /// returned if `t` is zero. + fn get_offset(stake: BalanceOf, t: VoteIndex) -> BalanceOf { + let decay_ratio: BalanceOf = T::DecayRatio::get().into(); + if t > 150 { + return stake * decay_ratio; + } + let mut offset = stake; + let mut r = Zero::zero(); + let decay = decay_ratio + One::one(); + for _ in 0..t { + offset = offset.saturating_sub(offset / decay); + r += offset + } + r + } } diff --git a/frame/elections/src/mock.rs b/frame/elections/src/mock.rs index a304478abb..0a87d776e2 100644 --- a/frame/elections/src/mock.rs +++ b/frame/elections/src/mock.rs @@ -18,127 +18,137 @@ #![cfg(test)] -use std::cell::RefCell; +use crate as elections; use frame_support::{ - StorageValue, StorageMap, parameter_types, assert_ok, - traits::{Get, ChangeMembers, Currency}, - weights::Weight, + assert_ok, parameter_types, + traits::{ChangeMembers, Currency, Get}, + weights::Weight, + StorageMap, StorageValue, }; use sp_core::H256; use sp_runtime::{ - Perbill, BuildStorage, testing::Header, traits::{BlakeTwo256, IdentityLookup, Block as BlockT}, + testing::Header, + traits::{BlakeTwo256, Block as BlockT, IdentityLookup}, + BuildStorage, Perbill, }; -use crate as elections; - +use std::cell::RefCell; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Call = (); - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - pub const ExistentialDeposit: u64 = 1; + pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { - type Balance = u64; - type DustRemoval = (); - type Event = Event; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u64; + type DustRemoval = (); + type Event = Event; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const CandidacyBond: u64 = 3; - pub const CarryCount: u32 = 2; - pub const InactiveGracePeriod: u32 = 1; - pub const VotingPeriod: u64 = 4; - pub const MinimumVotingLock: u64 = 5; + pub const CandidacyBond: u64 = 3; + pub const CarryCount: u32 = 2; + pub const InactiveGracePeriod: u32 = 1; + pub const VotingPeriod: u64 = 4; + pub const MinimumVotingLock: u64 = 5; } thread_local! { - static VOTER_BOND: RefCell = RefCell::new(0); - static VOTING_FEE: RefCell = RefCell::new(0); - static PRESENT_SLASH_PER_VOTER: RefCell = RefCell::new(0); - static DECAY_RATIO: RefCell = RefCell::new(0); - static MEMBERS: RefCell> = RefCell::new(vec![]); + static VOTER_BOND: RefCell = RefCell::new(0); + static VOTING_FEE: RefCell = RefCell::new(0); + static PRESENT_SLASH_PER_VOTER: RefCell = RefCell::new(0); + static DECAY_RATIO: RefCell = RefCell::new(0); + static MEMBERS: RefCell> = RefCell::new(vec![]); } pub struct VotingBond; impl Get for VotingBond { - fn get() -> u64 { VOTER_BOND.with(|v| *v.borrow()) } + fn get() -> u64 { + VOTER_BOND.with(|v| *v.borrow()) + } } pub struct VotingFee; impl Get for VotingFee { - fn get() -> u64 { VOTING_FEE.with(|v| *v.borrow()) } + fn get() -> u64 { + VOTING_FEE.with(|v| *v.borrow()) + } } pub struct PresentSlashPerVoter; impl Get for PresentSlashPerVoter { - fn get() -> u64 { PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow()) } + fn get() -> u64 { + PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow()) + } } pub struct DecayRatio; impl Get for DecayRatio { - fn get() -> u32 { DECAY_RATIO.with(|v| *v.borrow()) } + fn get() -> u32 { + DECAY_RATIO.with(|v| *v.borrow()) + } } pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { - fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); - old_plus_incoming.extend_from_slice(incoming); - old_plus_incoming.sort(); - let mut new_plus_outgoing = new.to_vec(); - new_plus_outgoing.extend_from_slice(outgoing); - new_plus_outgoing.sort(); - assert_eq!(old_plus_incoming, new_plus_outgoing); + fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { + let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); + old_plus_incoming.extend_from_slice(incoming); + old_plus_incoming.sort(); + let mut new_plus_outgoing = new.to_vec(); + new_plus_outgoing.extend_from_slice(outgoing); + new_plus_outgoing.sort(); + assert_eq!(old_plus_incoming, new_plus_outgoing); - MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); - } + MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); + } } impl elections::Trait for Test { - type Event = Event; - type Currency = Balances; - type BadPresentation = (); - type BadReaper = (); - type BadVoterIndex = (); - type LoserCandidate = (); - type ChangeMembers = TestChangeMembers; - type CandidacyBond = CandidacyBond; - type VotingBond = VotingBond; - type VotingFee = VotingFee; - type MinimumVotingLock = MinimumVotingLock; - type PresentSlashPerVoter = PresentSlashPerVoter; - type CarryCount = CarryCount; - type InactiveGracePeriod = InactiveGracePeriod; - type VotingPeriod = VotingPeriod; - type DecayRatio = DecayRatio; + type Event = Event; + type Currency = Balances; + type BadPresentation = (); + type BadReaper = (); + type BadVoterIndex = (); + type LoserCandidate = (); + type ChangeMembers = TestChangeMembers; + type CandidacyBond = CandidacyBond; + type VotingBond = VotingBond; + type VotingFee = VotingFee; + type MinimumVotingLock = MinimumVotingLock; + type PresentSlashPerVoter = PresentSlashPerVoter; + type CarryCount = CarryCount; + type InactiveGracePeriod = InactiveGracePeriod; + type VotingPeriod = VotingPeriod; + type DecayRatio = DecayRatio; } pub type Block = sp_runtime::generic::Block; @@ -158,129 +168,134 @@ frame_support::construct_runtime!( ); pub struct ExtBuilder { - balance_factor: u64, - decay_ratio: u32, - desired_seats: u32, - voting_fee: u64, - voter_bond: u64, - bad_presentation_punishment: u64, + balance_factor: u64, + decay_ratio: u32, + desired_seats: u32, + voting_fee: u64, + voter_bond: u64, + bad_presentation_punishment: u64, } impl Default for ExtBuilder { - fn default() -> Self { - Self { - balance_factor: 1, - decay_ratio: 24, - desired_seats: 2, - voting_fee: 0, - voter_bond: 0, - bad_presentation_punishment: 1, - } - } + fn default() -> Self { + Self { + balance_factor: 1, + decay_ratio: 24, + desired_seats: 2, + voting_fee: 0, + voter_bond: 0, + bad_presentation_punishment: 1, + } + } } impl ExtBuilder { - pub fn balance_factor(mut self, factor: u64) -> Self { - self.balance_factor = factor; - self - } - pub fn decay_ratio(mut self, ratio: u32) -> Self { - self.decay_ratio = ratio; - self - } - pub fn voting_fee(mut self, fee: u64) -> Self { - self.voting_fee = fee; - self - } - pub fn bad_presentation_punishment(mut self, fee: u64) -> Self { - self.bad_presentation_punishment = fee; - self - } - pub fn voter_bond(mut self, fee: u64) -> Self { - self.voter_bond = fee; - self - } - pub fn desired_seats(mut self, seats: u32) -> Self { - self.desired_seats = seats; - self - } - pub fn build(self) -> sp_io::TestExternalities { - VOTER_BOND.with(|v| *v.borrow_mut() = self.voter_bond); - VOTING_FEE.with(|v| *v.borrow_mut() = self.voting_fee); - PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow_mut() = self.bad_presentation_punishment); - DECAY_RATIO.with(|v| *v.borrow_mut() = self.decay_ratio); - let mut ext: sp_io::TestExternalities = GenesisConfig { - pallet_balances: Some(pallet_balances::GenesisConfig::{ - balances: vec![ - (1, 10 * self.balance_factor), - (2, 20 * self.balance_factor), - (3, 30 * self.balance_factor), - (4, 40 * self.balance_factor), - (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) - ], - }), - elections: Some(elections::GenesisConfig::{ - members: vec![], - desired_seats: self.desired_seats, - presentation_duration: 2, - term_duration: 5, - }), - }.build_storage().unwrap().into(); - ext.execute_with(|| System::set_block_number(1)); - ext - } + pub fn balance_factor(mut self, factor: u64) -> Self { + self.balance_factor = factor; + self + } + pub fn decay_ratio(mut self, ratio: u32) -> Self { + self.decay_ratio = ratio; + self + } + pub fn voting_fee(mut self, fee: u64) -> Self { + self.voting_fee = fee; + self + } + pub fn bad_presentation_punishment(mut self, fee: u64) -> Self { + self.bad_presentation_punishment = fee; + self + } + pub fn voter_bond(mut self, fee: u64) -> Self { + self.voter_bond = fee; + self + } + pub fn desired_seats(mut self, seats: u32) -> Self { + self.desired_seats = seats; + self + } + pub fn build(self) -> sp_io::TestExternalities { + VOTER_BOND.with(|v| *v.borrow_mut() = self.voter_bond); + VOTING_FEE.with(|v| *v.borrow_mut() = self.voting_fee); + PRESENT_SLASH_PER_VOTER.with(|v| *v.borrow_mut() = self.bad_presentation_punishment); + DECAY_RATIO.with(|v| *v.borrow_mut() = self.decay_ratio); + let mut ext: sp_io::TestExternalities = GenesisConfig { + pallet_balances: Some(pallet_balances::GenesisConfig:: { + balances: vec![ + (1, 10 * self.balance_factor), + (2, 20 * self.balance_factor), + (3, 30 * self.balance_factor), + (4, 40 * self.balance_factor), + (5, 50 * self.balance_factor), + (6, 60 * self.balance_factor), + ], + }), + elections: Some(elections::GenesisConfig:: { + members: vec![], + desired_seats: self.desired_seats, + presentation_duration: 2, + term_duration: 5, + }), + } + .build_storage() + .unwrap() + .into(); + ext.execute_with(|| System::set_block_number(1)); + ext + } } pub(crate) fn voter_ids() -> Vec { - Elections::all_voters().iter().map(|v| v.unwrap_or(0) ).collect::>() + Elections::all_voters() + .iter() + .map(|v| v.unwrap_or(0)) + .collect::>() } pub(crate) fn vote(i: u64, l: usize) { - let _ = Balances::make_free_balance_be(&i, 20); - assert_ok!( - Elections::set_approvals( - Origin::signed(i), - (0..l).map(|_| true).collect::>(), - 0, - 0, - 20, - ) - ); + let _ = Balances::make_free_balance_be(&i, 20); + assert_ok!(Elections::set_approvals( + Origin::signed(i), + (0..l).map(|_| true).collect::>(), + 0, + 0, + 20, + )); } pub(crate) fn vote_at(i: u64, l: usize, index: elections::VoteIndex) { - let _ = Balances::make_free_balance_be(&i, 20); - assert_ok!( - Elections::set_approvals( - Origin::signed(i), - (0..l).map(|_| true).collect::>(), - 0, - index, - 20, - ) - ); + let _ = Balances::make_free_balance_be(&i, 20); + assert_ok!(Elections::set_approvals( + Origin::signed(i), + (0..l).map(|_| true).collect::>(), + 0, + index, + 20, + )); } pub(crate) fn create_candidate(i: u64, index: u32) { - let _ = Balances::make_free_balance_be(&i, 20); - assert_ok!(Elections::submit_candidacy(Origin::signed(i), index)); + let _ = Balances::make_free_balance_be(&i, 20); + assert_ok!(Elections::submit_candidacy(Origin::signed(i), index)); } pub(crate) fn balances(who: &u64) -> (u64, u64) { - (Balances::free_balance(who), Balances::reserved_balance(who)) + (Balances::free_balance(who), Balances::reserved_balance(who)) } pub(crate) fn locks(who: &u64) -> Vec { - Balances::locks(who).iter().map(|l| l.amount).collect::>() + Balances::locks(who) + .iter() + .map(|l| l.amount) + .collect::>() } pub(crate) fn new_test_ext_with_candidate_holes() -> sp_io::TestExternalities { - let mut t = ExtBuilder::default().build(); - t.execute_with(|| { - >::put(vec![0, 0, 1]); - elections::CandidateCount::put(1); - >::insert(1, (0, 2)); - }); - t + let mut t = ExtBuilder::default().build(); + t.execute_with(|| { + >::put(vec![0, 0, 1]); + elections::CandidateCount::put(1); + >::insert(1, (0, 2)); + }); + t } diff --git a/frame/elections/src/tests.rs b/frame/elections/src/tests.rs index 64b01f12e0..e607dd14dc 100644 --- a/frame/elections/src/tests.rs +++ b/frame/elections/src/tests.rs @@ -21,1640 +21,2669 @@ use crate::mock::*; use crate::*; -use frame_support::{assert_ok, assert_err, assert_noop}; +use frame_support::{assert_err, assert_noop, assert_ok}; #[test] fn params_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::next_vote_from(1), 4); - assert_eq!(Elections::next_vote_from(4), 4); - assert_eq!(Elections::next_vote_from(5), 8); - assert_eq!(Elections::vote_index(), 0); - assert_eq!(Elections::presentation_duration(), 2); - assert_eq!(Elections::term_duration(), 5); - assert_eq!(Elections::desired_seats(), 2); - - assert_eq!(Elections::members(), vec![]); - assert_eq!(Elections::next_tally(), Some(4)); - assert_eq!(Elections::presentation_active(), false); - assert_eq!(Elections::next_finalize(), None); - - assert_eq!(Elections::candidates(), Vec::::new()); - assert_eq!(Elections::is_a_candidate(&1), false); - assert_eq!(Elections::candidate_reg_info(1), None); - - assert_eq!(Elections::voters(0), Vec::>::new()); - assert_eq!(Elections::voter_info(1), None); - assert_eq!(Elections::all_approvals_of(&1), vec![]); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::next_vote_from(1), 4); + assert_eq!(Elections::next_vote_from(4), 4); + assert_eq!(Elections::next_vote_from(5), 8); + assert_eq!(Elections::vote_index(), 0); + assert_eq!(Elections::presentation_duration(), 2); + assert_eq!(Elections::term_duration(), 5); + assert_eq!(Elections::desired_seats(), 2); + + assert_eq!(Elections::members(), vec![]); + assert_eq!(Elections::next_tally(), Some(4)); + assert_eq!(Elections::presentation_active(), false); + assert_eq!(Elections::next_finalize(), None); + + assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(Elections::is_a_candidate(&1), false); + assert_eq!(Elections::candidate_reg_info(1), None); + + assert_eq!(Elections::voters(0), Vec::>::new()); + assert_eq!(Elections::voter_info(1), None); + assert_eq!(Elections::all_approvals_of(&1), vec![]); + }); } #[test] fn chunking_bool_to_flag_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::bool_to_flag(vec![]), vec![]); - assert_eq!(Elections::bool_to_flag(vec![false]), vec![0]); - assert_eq!(Elections::bool_to_flag(vec![true]), vec![1]); - assert_eq!(Elections::bool_to_flag(vec![true, true, true, true]), vec![15]); - assert_eq!(Elections::bool_to_flag(vec![true, true, true, true, true]), vec![15 + 16]); - - let set_1 = vec![ - true, false, false, false, // 0x1 - false, true, true, true, // 0xE - ]; - assert_eq!( - Elections::bool_to_flag(set_1.clone()), - vec![0x00_00_00_E1_u32] - ); - assert_eq!( - Elections::flag_to_bool(vec![0x00_00_00_E1_u32]), - set_1 - ); - - let set_2 = vec![ - false, false, false, true, // 0x8 - false, true, false, true, // 0xA - ]; - assert_eq!( - Elections::bool_to_flag(set_2.clone()), - vec![0x00_00_00_A8_u32] - ); - assert_eq!( - Elections::flag_to_bool(vec![0x00_00_00_A8_u32]), - set_2 - ); - - let mut rhs = (0..100/APPROVAL_FLAG_LEN).map(|_| 0xFFFFFFFF_u32).collect::>(); - // NOTE: this might be need change based on `APPROVAL_FLAG_LEN`. - rhs.extend(vec![0x00_00_00_0F]); - assert_eq!( - Elections::bool_to_flag((0..100).map(|_| true).collect()), - rhs - ) - }) + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::bool_to_flag(vec![]), vec![]); + assert_eq!(Elections::bool_to_flag(vec![false]), vec![0]); + assert_eq!(Elections::bool_to_flag(vec![true]), vec![1]); + assert_eq!( + Elections::bool_to_flag(vec![true, true, true, true]), + vec![15] + ); + assert_eq!( + Elections::bool_to_flag(vec![true, true, true, true, true]), + vec![15 + 16] + ); + + let set_1 = vec![ + true, false, false, false, // 0x1 + false, true, true, true, // 0xE + ]; + assert_eq!( + Elections::bool_to_flag(set_1.clone()), + vec![0x00_00_00_E1_u32] + ); + assert_eq!(Elections::flag_to_bool(vec![0x00_00_00_E1_u32]), set_1); + + let set_2 = vec![ + false, false, false, true, // 0x8 + false, true, false, true, // 0xA + ]; + assert_eq!( + Elections::bool_to_flag(set_2.clone()), + vec![0x00_00_00_A8_u32] + ); + assert_eq!(Elections::flag_to_bool(vec![0x00_00_00_A8_u32]), set_2); + + let mut rhs = (0..100 / APPROVAL_FLAG_LEN) + .map(|_| 0xFFFFFFFF_u32) + .collect::>(); + // NOTE: this might be need change based on `APPROVAL_FLAG_LEN`. + rhs.extend(vec![0x00_00_00_0F]); + assert_eq!( + Elections::bool_to_flag((0..100).map(|_| true).collect()), + rhs + ) + }) } #[test] fn chunking_voter_set_growth_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - - // create 65. 64 (set0) + 1 (set1) - (1..=63).for_each(|i| vote(i, 0)); - assert_eq!(Elections::next_nonfull_voter_set(), 0); - vote(64, 0); - assert_eq!(Elections::next_nonfull_voter_set(), 1); - vote(65, 0); - - let set1 = Elections::voters(0); - let set2 = Elections::voters(1); - - assert_eq!(set1.len(), 64); - assert_eq!(set2.len(), 1); - - assert_eq!(set1[0], Some(1)); - assert_eq!(set1[10], Some(11)); - assert_eq!(set2[0], Some(65)); - }) + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + + // create 65. 64 (set0) + 1 (set1) + (1..=63).for_each(|i| vote(i, 0)); + assert_eq!(Elections::next_nonfull_voter_set(), 0); + vote(64, 0); + assert_eq!(Elections::next_nonfull_voter_set(), 1); + vote(65, 0); + + let set1 = Elections::voters(0); + let set2 = Elections::voters(1); + + assert_eq!(set1.len(), 64); + assert_eq!(set2.len(), 1); + + assert_eq!(set1[0], Some(1)); + assert_eq!(set1[10], Some(11)); + assert_eq!(set2[0], Some(65)); + }) } #[test] fn chunking_voter_set_reclaim_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - (1..=129).for_each(|i| vote(i, 0)); - assert_eq!(Elections::next_nonfull_voter_set(), 2); + (1..=129).for_each(|i| vote(i, 0)); + assert_eq!(Elections::next_nonfull_voter_set(), 2); - assert_ok!(Elections::retract_voter(Origin::signed(11), 10)); + assert_ok!(Elections::retract_voter(Origin::signed(11), 10)); - assert_ok!(Elections::retract_voter(Origin::signed(66), 65)); - assert_ok!(Elections::retract_voter(Origin::signed(67), 66)); + assert_ok!(Elections::retract_voter(Origin::signed(66), 65)); + assert_ok!(Elections::retract_voter(Origin::signed(67), 66)); - // length does not show it but holes do exist. - assert_eq!(Elections::voters(0).len(), 64); - assert_eq!(Elections::voters(1).len(), 64); - assert_eq!(Elections::voters(2).len(), 1); + // length does not show it but holes do exist. + assert_eq!(Elections::voters(0).len(), 64); + assert_eq!(Elections::voters(1).len(), 64); + assert_eq!(Elections::voters(2).len(), 1); - assert_eq!(Elections::voters(0)[10], None); - assert_eq!(Elections::voters(1)[1], None); - assert_eq!(Elections::voters(1)[2], None); - // Next set with capacity is 2. - assert_eq!(Elections::next_nonfull_voter_set(), 2); + assert_eq!(Elections::voters(0)[10], None); + assert_eq!(Elections::voters(1)[1], None); + assert_eq!(Elections::voters(1)[2], None); + // Next set with capacity is 2. + assert_eq!(Elections::next_nonfull_voter_set(), 2); - // But we can fill a hole. - vote_at(130, 0, 10); + // But we can fill a hole. + vote_at(130, 0, 10); - // Nothing added to set 2. A hole was filled. - assert_eq!(Elections::voters(0).len(), 64); - assert_eq!(Elections::voters(1).len(), 64); - assert_eq!(Elections::voters(2).len(), 1); + // Nothing added to set 2. A hole was filled. + assert_eq!(Elections::voters(0).len(), 64); + assert_eq!(Elections::voters(1).len(), 64); + assert_eq!(Elections::voters(2).len(), 1); - // and the next two (scheduled) to the second set. - assert_eq!(Elections::next_nonfull_voter_set(), 2); - }) + // and the next two (scheduled) to the second set. + assert_eq!(Elections::next_nonfull_voter_set(), 2); + }) } #[test] fn chunking_approvals_set_growth_should_work() { - ExtBuilder::default().build().execute_with(|| { - // create candidates and voters. - (1..=250).for_each(|i| create_candidate(i, (i-1) as u32)); - (1..=250).for_each(|i| vote(i, i as usize)); - - // all approvals of should return the exact expected vector. - assert_eq!( - Elections::all_approvals_of(&180), - (0..180).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&32), - (0..32).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&8), - (0..8).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&64), - (0..64).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&65), - (0..65).map(|_| true).collect::>() - ); - assert_eq!( - Elections::all_approvals_of(&63), - (0..63).map(|_| true).collect::>() - ); - - // NOTE: assuming that APPROVAL_SET_SIZE is more or less small-ish. Might fail otherwise. - let full_sets = (180 / APPROVAL_FLAG_LEN) / APPROVAL_SET_SIZE; - let left_over = (180 / APPROVAL_FLAG_LEN) / APPROVAL_SET_SIZE; - let rem = 180 % APPROVAL_FLAG_LEN; - - // grab and check the last full set, if it exists. - if full_sets > 0 { - assert_eq!( - Elections::approvals_of((180, (full_sets-1) as SetIndex )), - Elections::bool_to_flag( - (0..APPROVAL_SET_SIZE * APPROVAL_FLAG_LEN) - .map(|_| true).collect::>() - ) - ); - } - - // grab and check the last, half-empty, set. - if left_over > 0 { - assert_eq!( - Elections::approvals_of((180, full_sets as SetIndex)), - Elections::bool_to_flag( - (0..left_over * APPROVAL_FLAG_LEN + rem) - .map(|_| true).collect::>() - ) - ); - } - }) + ExtBuilder::default().build().execute_with(|| { + // create candidates and voters. + (1..=250).for_each(|i| create_candidate(i, (i - 1) as u32)); + (1..=250).for_each(|i| vote(i, i as usize)); + + // all approvals of should return the exact expected vector. + assert_eq!( + Elections::all_approvals_of(&180), + (0..180).map(|_| true).collect::>() + ); + assert_eq!( + Elections::all_approvals_of(&32), + (0..32).map(|_| true).collect::>() + ); + assert_eq!( + Elections::all_approvals_of(&8), + (0..8).map(|_| true).collect::>() + ); + assert_eq!( + Elections::all_approvals_of(&64), + (0..64).map(|_| true).collect::>() + ); + assert_eq!( + Elections::all_approvals_of(&65), + (0..65).map(|_| true).collect::>() + ); + assert_eq!( + Elections::all_approvals_of(&63), + (0..63).map(|_| true).collect::>() + ); + + // NOTE: assuming that APPROVAL_SET_SIZE is more or less small-ish. Might fail otherwise. + let full_sets = (180 / APPROVAL_FLAG_LEN) / APPROVAL_SET_SIZE; + let left_over = (180 / APPROVAL_FLAG_LEN) / APPROVAL_SET_SIZE; + let rem = 180 % APPROVAL_FLAG_LEN; + + // grab and check the last full set, if it exists. + if full_sets > 0 { + assert_eq!( + Elections::approvals_of((180, (full_sets - 1) as SetIndex)), + Elections::bool_to_flag( + (0..APPROVAL_SET_SIZE * APPROVAL_FLAG_LEN) + .map(|_| true) + .collect::>() + ) + ); + } + + // grab and check the last, half-empty, set. + if left_over > 0 { + assert_eq!( + Elections::approvals_of((180, full_sets as SetIndex)), + Elections::bool_to_flag( + (0..left_over * APPROVAL_FLAG_LEN + rem) + .map(|_| true) + .collect::>() + ) + ); + } + }) } #[test] fn chunking_cell_status_works() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - (1..=63).for_each(|i| vote(i, 0)); + (1..=63).for_each(|i| vote(i, 0)); - assert_ok!(Elections::retract_voter(Origin::signed(11), 10)); - assert_ok!(Elections::retract_voter(Origin::signed(21), 20)); + assert_ok!(Elections::retract_voter(Origin::signed(11), 10)); + assert_ok!(Elections::retract_voter(Origin::signed(21), 20)); - assert_eq!(Elections::cell_status(0, 10), CellStatus::Hole); - assert_eq!(Elections::cell_status(0, 0), CellStatus::Occupied); - assert_eq!(Elections::cell_status(0, 20), CellStatus::Hole); - assert_eq!(Elections::cell_status(0, 63), CellStatus::Head); - assert_eq!(Elections::cell_status(1, 0), CellStatus::Head); - assert_eq!(Elections::cell_status(1, 10), CellStatus::Head); - }) + assert_eq!(Elections::cell_status(0, 10), CellStatus::Hole); + assert_eq!(Elections::cell_status(0, 0), CellStatus::Occupied); + assert_eq!(Elections::cell_status(0, 20), CellStatus::Hole); + assert_eq!(Elections::cell_status(0, 63), CellStatus::Head); + assert_eq!(Elections::cell_status(1, 0), CellStatus::Head); + assert_eq!(Elections::cell_status(1, 10), CellStatus::Head); + }) } #[test] fn chunking_voter_index_does_not_take_holes_into_account() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - - // create 65. 64 (set0) + 1 (set1) - (1..=65).for_each(|i| vote(i, 0)); - - // account 65 has global index 65. - assert_eq!(Elections::voter_at(64).unwrap(), 65); - - assert_ok!(Elections::retract_voter(Origin::signed(1), 0)); - assert_ok!(Elections::retract_voter(Origin::signed(2), 1)); - - // still the same. These holes are in some other set. - assert_eq!(Elections::voter_at(64).unwrap(), 65); - // proof: can submit a new approval with the old index. - assert_noop!( - Elections::set_approvals(Origin::signed(65), vec![], 0, 64 - 2, 10), - Error::::InvalidVoterIndex, - ); - assert_ok!(Elections::set_approvals(Origin::signed(65), vec![], 0, 64, 10)); - }) + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + + // create 65. 64 (set0) + 1 (set1) + (1..=65).for_each(|i| vote(i, 0)); + + // account 65 has global index 65. + assert_eq!(Elections::voter_at(64).unwrap(), 65); + + assert_ok!(Elections::retract_voter(Origin::signed(1), 0)); + assert_ok!(Elections::retract_voter(Origin::signed(2), 1)); + + // still the same. These holes are in some other set. + assert_eq!(Elections::voter_at(64).unwrap(), 65); + // proof: can submit a new approval with the old index. + assert_noop!( + Elections::set_approvals(Origin::signed(65), vec![], 0, 64 - 2, 10), + Error::::InvalidVoterIndex, + ); + assert_ok!(Elections::set_approvals( + Origin::signed(65), + vec![], + 0, + 64, + 10 + )); + }) } #[test] fn chunking_approval_storage_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 1)); - - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true, false], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false], 0, 0, 30)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![], 0, 0, 40)); - - assert_eq!(Elections::all_approvals_of(&2), vec![true]); - // NOTE: these two are stored in mem differently though. - assert_eq!(Elections::all_approvals_of(&3), vec![]); - assert_eq!(Elections::all_approvals_of(&4), vec![]); - - assert_eq!(Elections::approvals_of((3, 0)), vec![0]); - assert_eq!(Elections::approvals_of((4, 0)), vec![]); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 1)); + + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true, false], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, false], + 0, + 0, + 30 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![], + 0, + 0, + 40 + )); + + assert_eq!(Elections::all_approvals_of(&2), vec![true]); + // NOTE: these two are stored in mem differently though. + assert_eq!(Elections::all_approvals_of(&3), vec![]); + assert_eq!(Elections::all_approvals_of(&4), vec![]); + + assert_eq!(Elections::approvals_of((3, 0)), vec![0]); + assert_eq!(Elections::approvals_of((4, 0)), vec![]); + }); } #[test] fn voting_initial_set_approvals_ignores_voter_index() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - - // Last argument is essentially irrelevant. You might get or miss a tip. - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![], 0, 0, 30)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![], 0, 5, 40)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![], 0, 100, 50)); - - // indices are more or less ignored. all is pushed. - assert_eq!(voter_ids(), vec![3, 4, 5]); - }) + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + + // Last argument is essentially irrelevant. You might get or miss a tip. + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![], + 0, + 0, + 30 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![], + 0, + 5, + 40 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![], + 0, + 100, + 50 + )); + + // indices are more or less ignored. all is pushed. + assert_eq!(voter_ids(), vec![3, 4, 5]); + }) } #[test] fn voting_bad_approval_index_slashes_voters_and_bond_reduces_stake() { - ExtBuilder::default().voting_fee(5).voter_bond(2).build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - - (1..=63).for_each(|i| vote(i, 0)); - assert_eq!(balances(&1), (13, 2)); - assert_eq!(balances(&10), (18, 2)); - assert_eq!(balances(&60), (18, 2)); - - // still no fee - vote(64, 0); - assert_eq!(balances(&64), (18, 2)); - assert_eq!( - Elections::voter_info(&64).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 20, pot:0 } - ); - - assert_eq!(Elections::next_nonfull_voter_set(), 1); - - // now we charge the next voter. - vote(65, 0); - assert_eq!(balances(&65), (13, 2)); - assert_eq!( - Elections::voter_info(&65).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 15, pot:0 } - ); - }); + ExtBuilder::default() + .voting_fee(5) + .voter_bond(2) + .build() + .execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + + (1..=63).for_each(|i| vote(i, 0)); + assert_eq!(balances(&1), (13, 2)); + assert_eq!(balances(&10), (18, 2)); + assert_eq!(balances(&60), (18, 2)); + + // still no fee + vote(64, 0); + assert_eq!(balances(&64), (18, 2)); + assert_eq!( + Elections::voter_info(&64).unwrap(), + VoterInfo { + last_win: 0, + last_active: 0, + stake: 20, + pot: 0 + } + ); + + assert_eq!(Elections::next_nonfull_voter_set(), 1); + + // now we charge the next voter. + vote(65, 0); + assert_eq!(balances(&65), (13, 2)); + assert_eq!( + Elections::voter_info(&65).unwrap(), + VoterInfo { + last_win: 0, + last_active: 0, + stake: 15, + pot: 0 + } + ); + }); } #[test] fn voting_subsequent_set_approvals_checks_voter_index() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![], 0, 0, 30)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![], 0, 5, 40)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![], 0, 100, 50)); - - // invalid index - assert_noop!( - Elections::set_approvals(Origin::signed(4), vec![true], 0, 5, 40), - Error::::InvalidVoterIndex, - ); - // wrong index - assert_noop!( - Elections::set_approvals(Origin::signed(4), vec![true], 0, 0, 40), - Error::::InvalidVoterIndex, - ); - // correct - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![true], 0, 1, 40)); - }) + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![], + 0, + 0, + 30 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![], + 0, + 5, + 40 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![], + 0, + 100, + 50 + )); + + // invalid index + assert_noop!( + Elections::set_approvals(Origin::signed(4), vec![true], 0, 5, 40), + Error::::InvalidVoterIndex, + ); + // wrong index + assert_noop!( + Elections::set_approvals(Origin::signed(4), vec![true], 0, 0, 40), + Error::::InvalidVoterIndex, + ); + // correct + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![true], + 0, + 1, + 40 + )); + }) } #[test] fn voting_cannot_lock_less_than_limit() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - - assert_noop!( - Elections::set_approvals(Origin::signed(3), vec![], 0, 0, 4), - Error::::InsufficientLockedValue, - ); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![], 0, 0, 5)); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + + assert_noop!( + Elections::set_approvals(Origin::signed(3), vec![], 0, 0, 4), + Error::::InsufficientLockedValue, + ); + assert_ok!(Elections::set_approvals(Origin::signed(3), vec![], 0, 0, 5)); + }); } #[test] fn voting_locking_more_than_total_balance_is_moot() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - - assert_eq!(balances(&3), (30, 0)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![], 0, 0, 35)); - - assert_eq!(balances(&3), (28, 2)); - assert_eq!( - Elections::voter_info(&3).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 30, pot:0 } - ); - }); + ExtBuilder::default() + .voter_bond(2) + .build() + .execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + + assert_eq!(balances(&3), (30, 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![], + 0, + 0, + 35 + )); + + assert_eq!(balances(&3), (28, 2)); + assert_eq!( + Elections::voter_info(&3).unwrap(), + VoterInfo { + last_win: 0, + last_active: 0, + stake: 30, + pot: 0 + } + ); + }); } #[test] fn voting_locking_stake_and_reserving_bond_works() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - - assert_eq!(balances(&2), (20, 0)); - assert_eq!(locks(&2), vec![]); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![], 0, 0, 15)); - assert_eq!(balances(&2), (18, 2)); - assert_eq!(locks(&2), vec![15]); - - // deposit a bit more. - let _ = Balances::make_free_balance_be(&2, 100); - - // change vote - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 70)); - assert_eq!(balances(&2), (100, 2)); - assert_eq!(locks(&2), vec![70]); - - assert_ok!(Elections::retract_voter(Origin::signed(2), 0)); - - assert_eq!(balances(&2), (102, 0)); - assert_eq!(locks(&2), vec![]); - }); + ExtBuilder::default() + .voter_bond(2) + .build() + .execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + + assert_eq!(balances(&2), (20, 0)); + assert_eq!(locks(&2), vec![]); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![], + 0, + 0, + 15 + )); + assert_eq!(balances(&2), (18, 2)); + assert_eq!(locks(&2), vec![15]); + + // deposit a bit more. + let _ = Balances::make_free_balance_be(&2, 100); + + // change vote + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 70 + )); + assert_eq!(balances(&2), (100, 2)); + assert_eq!(locks(&2), vec![70]); + + assert_ok!(Elections::retract_voter(Origin::signed(2), 0)); + + assert_eq!(balances(&2), (102, 0)); + assert_eq!(locks(&2), vec![]); + }); } #[test] fn voting_without_any_candidate_count_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::candidates().len(), 0); - - assert_noop!( - Elections::set_approvals(Origin::signed(4), vec![], 0, 0, 40), - Error::::ZeroCandidates, - ); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::candidates().len(), 0); + + assert_noop!( + Elections::set_approvals(Origin::signed(4), vec![], 0, 0, 40), + Error::::ZeroCandidates, + ); + }); } #[test] fn voting_setting_an_approval_vote_count_more_than_candidate_count_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - assert_eq!(Elections::candidates().len(), 1); - - assert_noop!( - Elections::set_approvals(Origin::signed(4),vec![true, true], 0, 0, 40), - Error::::TooManyVotes, - ); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + assert_eq!(Elections::candidates().len(), 1); + + assert_noop!( + Elections::set_approvals(Origin::signed(4), vec![true, true], 0, 0, 40), + Error::::TooManyVotes, + ); + }); } #[test] fn voting_resubmitting_approvals_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![true], 0, 0, 40)); - - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - assert_eq!(Elections::candidates().len(), 3); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![true, false, true], 0, 0, 40)); - - assert_eq!(Elections::all_approvals_of(&4), vec![true, false, true]); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![true], + 0, + 0, + 40 + )); + + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + assert_eq!(Elections::candidates().len(), 3); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![true, false, true], + 0, + 0, + 40 + )); + + assert_eq!(Elections::all_approvals_of(&4), vec![true, false, true]); + }); } #[test] fn voting_retracting_voter_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - assert_eq!(Elections::candidates().len(), 1); - - assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 1, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![true], 0, 2, 30)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![true], 0, 3, 40)); - - assert_eq!(voter_ids(), vec![1, 2, 3, 4]); - assert_eq!(Elections::all_approvals_of(&1), vec![true]); - assert_eq!(Elections::all_approvals_of(&2), vec![true]); - assert_eq!(Elections::all_approvals_of(&3), vec![true]); - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - - assert_ok!(Elections::retract_voter(Origin::signed(1), 0)); - - assert_eq!(voter_ids(), vec![0, 2, 3, 4]); - assert_eq!(Elections::all_approvals_of(&1), Vec::::new()); - assert_eq!(Elections::all_approvals_of(&2), vec![true]); - assert_eq!(Elections::all_approvals_of(&3), vec![true]); - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - - assert_ok!(Elections::retract_voter(Origin::signed(2), 1)); - - assert_eq!(voter_ids(), vec![0, 0, 3, 4]); - assert_eq!(Elections::all_approvals_of(&1), Vec::::new()); - assert_eq!(Elections::all_approvals_of(&2), Vec::::new()); - assert_eq!(Elections::all_approvals_of(&3), vec![true]); - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - - assert_ok!(Elections::retract_voter(Origin::signed(3), 2)); - - assert_eq!(voter_ids(), vec![0, 0, 0, 4]); - assert_eq!(Elections::all_approvals_of(&1), Vec::::new()); - assert_eq!(Elections::all_approvals_of(&2), Vec::::new()); - assert_eq!(Elections::all_approvals_of(&3), Vec::::new()); - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + assert_eq!(Elections::candidates().len(), 1); + + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![true], + 0, + 0, + 10 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 1, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![true], + 0, + 2, + 30 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![true], + 0, + 3, + 40 + )); + + assert_eq!(voter_ids(), vec![1, 2, 3, 4]); + assert_eq!(Elections::all_approvals_of(&1), vec![true]); + assert_eq!(Elections::all_approvals_of(&2), vec![true]); + assert_eq!(Elections::all_approvals_of(&3), vec![true]); + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + + assert_ok!(Elections::retract_voter(Origin::signed(1), 0)); + + assert_eq!(voter_ids(), vec![0, 2, 3, 4]); + assert_eq!(Elections::all_approvals_of(&1), Vec::::new()); + assert_eq!(Elections::all_approvals_of(&2), vec![true]); + assert_eq!(Elections::all_approvals_of(&3), vec![true]); + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + + assert_ok!(Elections::retract_voter(Origin::signed(2), 1)); + + assert_eq!(voter_ids(), vec![0, 0, 3, 4]); + assert_eq!(Elections::all_approvals_of(&1), Vec::::new()); + assert_eq!(Elections::all_approvals_of(&2), Vec::::new()); + assert_eq!(Elections::all_approvals_of(&3), vec![true]); + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + + assert_ok!(Elections::retract_voter(Origin::signed(3), 2)); + + assert_eq!(voter_ids(), vec![0, 0, 0, 4]); + assert_eq!(Elections::all_approvals_of(&1), Vec::::new()); + assert_eq!(Elections::all_approvals_of(&2), Vec::::new()); + assert_eq!(Elections::all_approvals_of(&3), Vec::::new()); + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + }); } #[test] fn voting_invalid_retraction_index_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); - - assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_eq!(voter_ids(), vec![1, 2]); - assert_noop!(Elections::retract_voter(Origin::signed(1), 1), Error::::InvalidRetractionIndex); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); + + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![true], + 0, + 0, + 10 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_eq!(voter_ids(), vec![1, 2]); + assert_noop!( + Elections::retract_voter(Origin::signed(1), 1), + Error::::InvalidRetractionIndex + ); + }); } #[test] fn voting_overflow_retraction_index_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); - - assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_noop!(Elections::retract_voter(Origin::signed(1), 1), Error::::InvalidRetractionIndex); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); + + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![true], + 0, + 0, + 10 + )); + assert_noop!( + Elections::retract_voter(Origin::signed(1), 1), + Error::::InvalidRetractionIndex + ); + }); } #[test] fn voting_non_voter_retraction_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); - - assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_noop!(Elections::retract_voter(Origin::signed(2), 0), Error::::RetractNonVoter); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); + + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![true], + 0, + 0, + 10 + )); + assert_noop!( + Elections::retract_voter(Origin::signed(2), 0), + Error::::RetractNonVoter + ); + }); } #[test] fn retracting_inactive_voter_should_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![true], 1, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Elections::end_block(System::block_number())); - - assert_ok!(Elections::reap_inactive_voter(Origin::signed(5), - (voter_ids().iter().position(|&i| i == 5).unwrap() as u32).into(), - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - )); - - assert_eq!(voter_ids(), vec![0, 5]); - assert_eq!(Elections::all_approvals_of(&2).len(), 0); - assert_eq!(Balances::total_balance(&2), 20); - assert_eq!(Balances::total_balance(&5), 50); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![true], + 1, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Elections::end_block(System::block_number())); + + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(5), + (voter_ids().iter().position(|&i| i == 5).unwrap() as u32).into(), + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + )); + + assert_eq!(voter_ids(), vec![0, 5]); + assert_eq!(Elections::all_approvals_of(&2).len(), 0); + assert_eq!(Balances::total_balance(&2), 20); + assert_eq!(Balances::total_balance(&5), 50); + }); } #[test] fn retracting_inactive_voter_with_other_candidates_in_slots_should_work() { - ExtBuilder::default().voter_bond(2).build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![true], 1, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(11); - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - - assert_ok!(Elections::reap_inactive_voter(Origin::signed(5), - (voter_ids().iter().position(|&i| i == 5).unwrap() as u32).into(), - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - )); - - assert_eq!(voter_ids(), vec![0, 5]); - assert_eq!(Elections::all_approvals_of(&2).len(), 0); - }); + ExtBuilder::default() + .voter_bond(2) + .build() + .execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![true], + 1, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(11); + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(5), + (voter_ids().iter().position(|&i| i == 5).unwrap() as u32).into(), + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + )); + + assert_eq!(voter_ids(), vec![0, 5]); + assert_eq!(Elections::all_approvals_of(&2).len(), 0); + }); } #[test] fn retracting_inactive_voter_with_bad_reporter_index_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![true], 1, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Elections::end_block(System::block_number())); - - assert_noop!(Elections::reap_inactive_voter(Origin::signed(2), - 42, - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - ), Error::::InvalidReporterIndex); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![true], + 1, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Elections::end_block(System::block_number())); + + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(2), + 42, + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + ), + Error::::InvalidReporterIndex + ); + }); } #[test] fn retracting_inactive_voter_with_bad_target_index_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![true], 1, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Elections::end_block(System::block_number())); - - assert_noop!(Elections::reap_inactive_voter(Origin::signed(2), - (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2, 42, - 2 - ), Error::::InvalidTargetIndex); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![true], + 1, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Elections::end_block(System::block_number())); + + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(2), + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2, + 42, + 2 + ), + Error::::InvalidTargetIndex + ); + }); } #[test] fn retracting_active_voter_should_slash_reporter() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(4), 2)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true, false, false, false], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, true, false, false], 0, 0, 30)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, true, false], 0, 0, 40)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, true], 0, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Elections::set_desired_seats(Origin::ROOT, 3)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20 + Elections::get_offset(20, 1), 1)); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30 + Elections::get_offset(30, 1), 1)); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::vote_index(), 2); - assert_eq!(::InactiveGracePeriod::get(), 1); - assert_eq!(::VotingPeriod::get(), 4); - assert_eq!(Elections::voter_info(4), Some(VoterInfo { last_win: 1, last_active: 0, stake: 40, pot: 0 })); - - assert_ok!(Elections::reap_inactive_voter(Origin::signed(4), - (voter_ids().iter().position(|&i| i == 4).unwrap() as u32).into(), - 2, - (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - )); - - assert_eq!(voter_ids(), vec![2, 3, 0, 5]); - assert_eq!(Elections::all_approvals_of(&4).len(), 0); - assert_eq!(Balances::total_balance(&4), 40); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(4), 2)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 3)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true, false, false, false], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, true, false, false], + 0, + 0, + 30 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, true, false], + 0, + 0, + 40 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, true], + 0, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Elections::set_desired_seats(Origin::ROOT, 3)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 2, + 20 + Elections::get_offset(20, 1), + 1 + )); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 3, + 30 + Elections::get_offset(30, 1), + 1 + )); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::vote_index(), 2); + assert_eq!(::InactiveGracePeriod::get(), 1); + assert_eq!(::VotingPeriod::get(), 4); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { + last_win: 1, + last_active: 0, + stake: 40, + pot: 0 + }) + ); + + assert_ok!(Elections::reap_inactive_voter( + Origin::signed(4), + (voter_ids().iter().position(|&i| i == 4).unwrap() as u32).into(), + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + )); + + assert_eq!(voter_ids(), vec![2, 3, 0, 5]); + assert_eq!(Elections::all_approvals_of(&4).len(), 0); + assert_eq!(Balances::total_balance(&4), 40); + }); } #[test] fn retracting_inactive_voter_by_nonvoter_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![true], 1, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); - assert_ok!(Elections::end_block(System::block_number())); - - assert_noop!(Elections::reap_inactive_voter(Origin::signed(4), - 0, - 2, (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), - 2 - ), Error::::NotVoter); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![true], + 1, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 1)); + assert_ok!(Elections::end_block(System::block_number())); + + assert_noop!( + Elections::reap_inactive_voter( + Origin::signed(4), + 0, + 2, + (voter_ids().iter().position(|&i| i == 2).unwrap() as u32).into(), + 2 + ), + Error::::NotVoter + ); + }); } #[test] fn candidacy_simple_candidate_submission_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_eq!(Elections::candidate_reg_info(1), None); - assert_eq!(Elections::candidate_reg_info(2), None); - assert_eq!(Elections::is_a_candidate(&1), false); - assert_eq!(Elections::is_a_candidate(&2), false); - - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_eq!(Elections::candidates(), vec![1]); - assert_eq!(Elections::candidate_reg_info(1), Some((0, 0))); - assert_eq!(Elections::candidate_reg_info(2), None); - assert_eq!(Elections::is_a_candidate(&1), true); - assert_eq!(Elections::is_a_candidate(&2), false); - - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_eq!(Elections::candidates(), vec![1, 2]); - assert_eq!(Elections::candidate_reg_info(1), Some((0, 0))); - assert_eq!(Elections::candidate_reg_info(2), Some((0, 1))); - assert_eq!(Elections::is_a_candidate(&1), true); - assert_eq!(Elections::is_a_candidate(&2), true); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_eq!(Elections::candidate_reg_info(1), None); + assert_eq!(Elections::candidate_reg_info(2), None); + assert_eq!(Elections::is_a_candidate(&1), false); + assert_eq!(Elections::is_a_candidate(&2), false); + + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_eq!(Elections::candidates(), vec![1]); + assert_eq!(Elections::candidate_reg_info(1), Some((0, 0))); + assert_eq!(Elections::candidate_reg_info(2), None); + assert_eq!(Elections::is_a_candidate(&1), true); + assert_eq!(Elections::is_a_candidate(&2), false); + + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_eq!(Elections::candidates(), vec![1, 2]); + assert_eq!(Elections::candidate_reg_info(1), Some((0, 0))); + assert_eq!(Elections::candidate_reg_info(2), Some((0, 1))); + assert_eq!(Elections::is_a_candidate(&1), true); + assert_eq!(Elections::is_a_candidate(&2), true); + }); } #[test] fn candidacy_submission_using_free_slot_should_work() { - let mut t = new_test_ext_with_candidate_holes(); + let mut t = new_test_ext_with_candidate_holes(); - t.execute_with(|| { - assert_eq!(Elections::candidates(), vec![0, 0, 1]); + t.execute_with(|| { + assert_eq!(Elections::candidates(), vec![0, 0, 1]); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_eq!(Elections::candidates(), vec![0, 2, 1]); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_eq!(Elections::candidates(), vec![0, 2, 1]); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); - assert_eq!(Elections::candidates(), vec![3, 2, 1]); - }); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 0)); + assert_eq!(Elections::candidates(), vec![3, 2, 1]); + }); } #[test] fn candidacy_submission_using_alternative_free_slot_should_work() { - let mut t = new_test_ext_with_candidate_holes(); + let mut t = new_test_ext_with_candidate_holes(); - t.execute_with(|| { - assert_eq!(Elections::candidates(), vec![0, 0, 1]); + t.execute_with(|| { + assert_eq!(Elections::candidates(), vec![0, 0, 1]); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_eq!(Elections::candidates(), vec![2, 0, 1]); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_eq!(Elections::candidates(), vec![2, 0, 1]); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 1)); - assert_eq!(Elections::candidates(), vec![2, 3, 1]); - }); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 1)); + assert_eq!(Elections::candidates(), vec![2, 3, 1]); + }); } #[test] fn candidacy_submission_not_using_free_slot_should_not_work() { - let mut t = new_test_ext_with_candidate_holes(); - - t.execute_with(|| { - assert_noop!( - Elections::submit_candidacy(Origin::signed(4), 3), - Error::::InvalidCandidateSlot - ); - }); + let mut t = new_test_ext_with_candidate_holes(); + + t.execute_with(|| { + assert_noop!( + Elections::submit_candidacy(Origin::signed(4), 3), + Error::::InvalidCandidateSlot + ); + }); } #[test] fn candidacy_bad_candidate_slot_submission_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_noop!( - Elections::submit_candidacy(Origin::signed(1), 1), - Error::::InvalidCandidateSlot - ); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_noop!( + Elections::submit_candidacy(Origin::signed(1), 1), + Error::::InvalidCandidateSlot + ); + }); } #[test] fn candidacy_non_free_candidate_slot_submission_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_eq!(Elections::candidates(), vec![1]); - assert_noop!( - Elections::submit_candidacy(Origin::signed(2), 0), - Error::::InvalidCandidateSlot - ); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_eq!(Elections::candidates(), vec![1]); + assert_noop!( + Elections::submit_candidacy(Origin::signed(2), 0), + Error::::InvalidCandidateSlot + ); + }); } #[test] fn candidacy_dupe_candidate_submission_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_eq!(Elections::candidates(), vec![1]); - assert_noop!( - Elections::submit_candidacy(Origin::signed(1), 1), - Error::::DuplicatedCandidate, - ); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_eq!(Elections::candidates(), vec![1]); + assert_noop!( + Elections::submit_candidacy(Origin::signed(1), 1), + Error::::DuplicatedCandidate, + ); + }); } #[test] fn candidacy_poor_candidate_submission_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::candidates(), Vec::::new()); - assert_noop!( - Elections::submit_candidacy(Origin::signed(7), 0), - Error::::InsufficientCandidateFunds, - ); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::candidates(), Vec::::new()); + assert_noop!( + Elections::submit_candidacy(Origin::signed(7), 0), + Error::::InsufficientCandidateFunds, + ); + }); } #[test] fn election_voting_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - - assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 10)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![true], 0, 1, 40)); - - assert_eq!(Elections::all_approvals_of(&1), vec![true]); - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - assert_eq!(voter_ids(), vec![1, 4]); - - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![false, true, true], 0, 2, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, true, true], 0, 3, 30)); - - assert_eq!(Elections::all_approvals_of(&1), vec![true]); - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - assert_eq!(Elections::all_approvals_of(&2), vec![false, true, true]); - assert_eq!(Elections::all_approvals_of(&3), vec![false, true, true]); - - assert_eq!(voter_ids(), vec![1, 4, 2, 3]); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![true], + 0, + 0, + 10 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![true], + 0, + 1, + 40 + )); + + assert_eq!(Elections::all_approvals_of(&1), vec![true]); + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + assert_eq!(voter_ids(), vec![1, 4]); + + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![false, true, true], + 0, + 2, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, true, true], + 0, + 3, + 30 + )); + + assert_eq!(Elections::all_approvals_of(&1), vec![true]); + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + assert_eq!(Elections::all_approvals_of(&2), vec![false, true, true]); + assert_eq!(Elections::all_approvals_of(&3), vec![false, true, true]); + + assert_eq!(voter_ids(), vec![1, 4, 2, 3]); + }); } #[test] fn election_proxy_voting_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); - - >::insert(11, 1); - >::insert(12, 2); - >::insert(13, 3); - >::insert(14, 4); - assert_ok!( - Elections::proxy_set_approvals(Origin::signed(11), vec![true], 0, 0, 10) - ); - assert_ok!( - Elections::proxy_set_approvals(Origin::signed(14), vec![true], 0, 1, 40) - ); - - assert_eq!(Elections::all_approvals_of(&1), vec![true]); - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - assert_eq!(voter_ids(), vec![1, 4]); - - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - - assert_ok!( - Elections::proxy_set_approvals(Origin::signed(12), vec![false, true], 0, 2, 20) - ); - assert_ok!( - Elections::proxy_set_approvals(Origin::signed(13), vec![false, true], 0, 3, 30) - ); - - assert_eq!(Elections::all_approvals_of(&1), vec![true]); - assert_eq!(Elections::all_approvals_of(&4), vec![true]); - assert_eq!(Elections::all_approvals_of(&2), vec![false, true]); - assert_eq!(Elections::all_approvals_of(&3), vec![false, true]); - - assert_eq!(voter_ids(), vec![1, 4, 2, 3]); - }); + ExtBuilder::default().build().execute_with(|| { + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 0)); + + >::insert(11, 1); + >::insert(12, 2); + >::insert(13, 3); + >::insert(14, 4); + assert_ok!(Elections::proxy_set_approvals( + Origin::signed(11), + vec![true], + 0, + 0, + 10 + )); + assert_ok!(Elections::proxy_set_approvals( + Origin::signed(14), + vec![true], + 0, + 1, + 40 + )); + + assert_eq!(Elections::all_approvals_of(&1), vec![true]); + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + assert_eq!(voter_ids(), vec![1, 4]); + + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + + assert_ok!(Elections::proxy_set_approvals( + Origin::signed(12), + vec![false, true], + 0, + 2, + 20 + )); + assert_ok!(Elections::proxy_set_approvals( + Origin::signed(13), + vec![false, true], + 0, + 3, + 30 + )); + + assert_eq!(Elections::all_approvals_of(&1), vec![true]); + assert_eq!(Elections::all_approvals_of(&4), vec![true]); + assert_eq!(Elections::all_approvals_of(&2), vec![false, true]); + assert_eq!(Elections::all_approvals_of(&3), vec![false, true]); + + assert_eq!(voter_ids(), vec![1, 4, 2, 3]); + }); } #[test] fn election_simple_tally_should_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert!(!Elections::presentation_active()); - - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true], 0, 0, 50)); - assert_eq!(voter_ids(), vec![2, 5]); - assert_eq!(Elections::all_approvals_of(&2), vec![true]); - assert_eq!(Elections::all_approvals_of(&5), vec![false, true]); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(4), 2, 20, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(4), 5, 50, 0), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (0, 0), (20, 2), (50, 5)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert!(!Elections::presentation_active()); - assert_eq!(Elections::members(), vec![(5, 11), (2, 11)]); - - assert!(!Elections::is_a_candidate(&2)); - assert!(!Elections::is_a_candidate(&5)); - assert_eq!(Elections::vote_index(), 1); - assert_eq!( - Elections::voter_info(2), - Some(VoterInfo { last_win: 1, last_active: 0, stake: 20, pot: 0 }) - ); - assert_eq!( - Elections::voter_info(5), - Some(VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0 }) - ); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert!(!Elections::presentation_active()); + + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true], + 0, + 0, + 50 + )); + assert_eq!(voter_ids(), vec![2, 5]); + assert_eq!(Elections::all_approvals_of(&2), vec![true]); + assert_eq!(Elections::all_approvals_of(&5), vec![false, true]); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(4), 2, 20, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(4), 5, 50, 0), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (0, 0), (20, 2), (50, 5)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert!(!Elections::presentation_active()); + assert_eq!(Elections::members(), vec![(5, 11), (2, 11)]); + + assert!(!Elections::is_a_candidate(&2)); + assert!(!Elections::is_a_candidate(&5)); + assert_eq!(Elections::vote_index(), 1); + assert_eq!( + Elections::voter_info(2), + Some(VoterInfo { + last_win: 1, + last_active: 0, + stake: 20, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(5), + Some(VoterInfo { + last_win: 1, + last_active: 0, + stake: 50, + pot: 0 + }) + ); + }); } #[test] fn election_seats_should_be_released() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true, false], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true], 0, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(4), 2, 20, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(4), 5, 50, 0), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (0, 0), (20, 2), (50, 5)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(5, 11), (2, 11)]); - let mut current = System::block_number(); - let free_block; - loop { - current += 1; - System::set_block_number(current); - assert_ok!(Elections::end_block(System::block_number())); - if Elections::members().len() == 0 { - free_block = current; - break; - } - } - // 11 + 2 which is the next voting period. - assert_eq!(free_block, 14); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true, false], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true], + 0, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(4), 2, 20, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(4), 5, 50, 0), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (0, 0), (20, 2), (50, 5)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(5, 11), (2, 11)]); + let mut current = System::block_number(); + let free_block; + loop { + current += 1; + System::set_block_number(current); + assert_ok!(Elections::end_block(System::block_number())); + if Elections::members().len() == 0 { + free_block = current; + break; + } + } + // 11 + 2 which is the next voting period. + assert_eq!(free_block, 14); + }); } #[test] fn election_presentations_with_zero_staked_deposit_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_noop!( - Elections::present_winner(Origin::signed(4), 2, 0, 0), - Error::::ZeroDeposit, - ); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_noop!( + Elections::present_winner(Origin::signed(4), 2, 0, 0), + Error::::ZeroDeposit, + ); + }); } #[test] fn election_double_presentations_should_be_punished() { - ExtBuilder::default().build().execute_with(|| { - assert!(Balances::can_slash(&4, 10)); - - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true, false], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true], 0, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!( - Elections::present_winner(Origin::signed(4), 5, 50, 0), - Err(Error::::DuplicatedPresentation.into()), - ); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(5, 11), (2, 11)]); - assert_eq!(Balances::total_balance(&4), 38); - }); + ExtBuilder::default().build().execute_with(|| { + assert!(Balances::can_slash(&4, 10)); + + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true, false], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true], + 0, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); + assert_eq!( + Elections::present_winner(Origin::signed(4), 5, 50, 0), + Err(Error::::DuplicatedPresentation.into()), + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(5, 11), (2, 11)]); + assert_eq!(Balances::total_balance(&4), 38); + }); } #[test] fn election_presenting_for_double_election_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_eq!(Elections::submit_candidacy(Origin::signed(2), 0), Ok(())); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 0, 0, 20)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(8); - // NOTE: This is now mandatory to disable the lock - assert_ok!(Elections::retract_voter(Origin::signed(2), 0)); - assert_eq!(Elections::submit_candidacy(Origin::signed(2), 0), Ok(())); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true], 1, 0, 20)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(10); - assert_noop!( - Elections::present_winner(Origin::signed(4), 2, 20, 1), - Error::::DuplicatedCandidate, - ); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_eq!(Elections::submit_candidacy(Origin::signed(2), 0), Ok(())); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 0, + 0, + 20 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(8); + // NOTE: This is now mandatory to disable the lock + assert_ok!(Elections::retract_voter(Origin::signed(2), 0)); + assert_eq!(Elections::submit_candidacy(Origin::signed(2), 0), Ok(())); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true], + 1, + 0, + 20 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(10); + assert_noop!( + Elections::present_winner(Origin::signed(4), 2, 20, 1), + Error::::DuplicatedCandidate, + ); + }); } #[test] fn election_presenting_loser_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true], 0, 0, 60)); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![false, true], 0, 0, 20)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); - assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); - - assert_noop!(Elections::present_winner(Origin::signed(4), 2, 20, 0), Error::::UnworthyCandidate); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true], + 0, + 0, + 60 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![false, true], + 0, + 0, + 20 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, false, true], + 0, + 0, + 30 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); + + assert_eq!( + Elections::leaderboard(), + Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)]) + ); + + assert_noop!( + Elections::present_winner(Origin::signed(4), 2, 20, 0), + Error::::UnworthyCandidate + ); + }); } #[test] fn election_presenting_loser_first_should_not_matter() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true], 0, 0, 60)); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![false, true], 0, 0, 20)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); - assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true], + 0, + 0, + 60 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![false, true], + 0, + 0, + 20 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, false, true], + 0, + 0, + 30 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 2, 20, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); + + assert_eq!( + Elections::leaderboard(), + Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)]) + ); + }); } #[test] fn election_present_outside_of_presentation_period_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert!(!Elections::presentation_active()); - assert_noop!( - Elections::present_winner(Origin::signed(5), 5, 1, 0), - Error::::NotPresentationPeriod, - ); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert!(!Elections::presentation_active()); + assert_noop!( + Elections::present_winner(Origin::signed(5), 5, 1, 0), + Error::::NotPresentationPeriod, + ); + }); } #[test] fn election_present_with_invalid_vote_index_should_not_work() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true, false], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true], 0, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_noop!(Elections::present_winner(Origin::signed(4), 2, 20, 1), Error::::InvalidVoteIndex); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true, false], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true], + 0, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_noop!( + Elections::present_winner(Origin::signed(4), 2, 20, 1), + Error::::InvalidVoteIndex + ); + }); } #[test] fn election_present_when_presenter_is_poor_should_not_work() { - let test_present = |p| { - ExtBuilder::default() - .voting_fee(5) - .voter_bond(2) - .bad_presentation_punishment(p) - .build() - .execute_with(|| { - System::set_block_number(4); - let _ = Balances::make_free_balance_be(&1, 15); - assert!(!Elections::presentation_active()); - - // -3 - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_eq!(Balances::free_balance(1), 12); - // -2 -5 - assert_ok!(Elections::set_approvals(Origin::signed(1), vec![true], 0, 0, 15)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::reserved_balance(1), 5); - if p > 5 { - assert_noop!(Elections::present_winner( - Origin::signed(1), 1, 10, 0), - Error::::InsufficientPresenterFunds, - ); - } else { - assert_ok!(Elections::present_winner(Origin::signed(1), 1, 10, 0)); - } - }); - }; - test_present(4); - test_present(6); + let test_present = |p| { + ExtBuilder::default() + .voting_fee(5) + .voter_bond(2) + .bad_presentation_punishment(p) + .build() + .execute_with(|| { + System::set_block_number(4); + let _ = Balances::make_free_balance_be(&1, 15); + assert!(!Elections::presentation_active()); + + // -3 + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_eq!(Balances::free_balance(1), 12); + // -2 -5 + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![true], + 0, + 0, + 15 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::reserved_balance(1), 5); + if p > 5 { + assert_noop!( + Elections::present_winner(Origin::signed(1), 1, 10, 0), + Error::::InsufficientPresenterFunds, + ); + } else { + assert_ok!(Elections::present_winner(Origin::signed(1), 1, 10, 0)); + } + }); + }; + test_present(4); + test_present(6); } #[test] fn election_invalid_present_tally_should_slash() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert!(!Elections::presentation_active()); - assert_eq!(Balances::total_balance(&4), 40); - - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![true, false], 0, 0, 20)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true], 0, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_err!(Elections::present_winner(Origin::signed(4), 2, 80, 0), Error::::IncorrectTotal); - - assert_eq!(Balances::total_balance(&4), 38); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert!(!Elections::presentation_active()); + assert_eq!(Balances::total_balance(&4), 40); + + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![true, false], + 0, + 0, + 20 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true], + 0, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_err!( + Elections::present_winner(Origin::signed(4), 2, 80, 0), + Error::::IncorrectTotal + ); + + assert_eq!(Balances::total_balance(&4), 38); + }); } #[test] fn election_runners_up_should_be_kept() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert!(!Elections::presentation_active()); - - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true], 0, 0, 60)); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![false, true], 0, 0, 20)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); - assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); - - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Elections::presentation_active()); - assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); - // leaderboard length is the empty seats plus the carry count (i.e. 5 + 2), where those - // to be carried are the lowest and stored in lowest indices - assert_eq!(Elections::leaderboard(), Some(vec![ - (0, 0), - (0, 0), - (0, 0), - (60, 1) - ])); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_eq!(Elections::leaderboard(), Some(vec![ - (30, 3), - (40, 4), - (50, 5), - (60, 1) - ])); - - assert_ok!(Elections::end_block(System::block_number())); - - assert!(!Elections::presentation_active()); - assert_eq!(Elections::members(), vec![(1, 11), (5, 11)]); - - assert!(!Elections::is_a_candidate(&1)); - assert!(!Elections::is_a_candidate(&5)); - assert!(!Elections::is_a_candidate(&2)); - assert!(Elections::is_a_candidate(&3)); - assert!(Elections::is_a_candidate(&4)); - assert_eq!(Elections::vote_index(), 1); - assert_eq!(Elections::voter_info(2), Some(VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0 })); - assert_eq!(Elections::voter_info(3), Some(VoterInfo { last_win: 0, last_active: 0, stake: 30, pot: 0 })); - assert_eq!(Elections::voter_info(4), Some(VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0 })); - assert_eq!(Elections::voter_info(5), Some(VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0 })); - assert_eq!(Elections::voter_info(6), Some(VoterInfo { last_win: 1, last_active: 0, stake: 60, pot: 0 })); - assert_eq!(Elections::candidate_reg_info(3), Some((0, 2))); - assert_eq!(Elections::candidate_reg_info(4), Some((0, 3))); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert!(!Elections::presentation_active()); + + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true], + 0, + 0, + 60 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![false, true], + 0, + 0, + 20 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, false, true], + 0, + 0, + 30 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); + + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Elections::presentation_active()); + assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); + // leaderboard length is the empty seats plus the carry count (i.e. 5 + 2), where those + // to be carried are the lowest and stored in lowest indices + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (0, 0), (0, 0), (60, 1)]) + ); + assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); + assert_eq!( + Elections::leaderboard(), + Some(vec![(30, 3), (40, 4), (50, 5), (60, 1)]) + ); + + assert_ok!(Elections::end_block(System::block_number())); + + assert!(!Elections::presentation_active()); + assert_eq!(Elections::members(), vec![(1, 11), (5, 11)]); + + assert!(!Elections::is_a_candidate(&1)); + assert!(!Elections::is_a_candidate(&5)); + assert!(!Elections::is_a_candidate(&2)); + assert!(Elections::is_a_candidate(&3)); + assert!(Elections::is_a_candidate(&4)); + assert_eq!(Elections::vote_index(), 1); + assert_eq!( + Elections::voter_info(2), + Some(VoterInfo { + last_win: 0, + last_active: 0, + stake: 20, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(3), + Some(VoterInfo { + last_win: 0, + last_active: 0, + stake: 30, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { + last_win: 0, + last_active: 0, + stake: 40, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(5), + Some(VoterInfo { + last_win: 1, + last_active: 0, + stake: 50, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(6), + Some(VoterInfo { + last_win: 1, + last_active: 0, + stake: 60, + pot: 0 + }) + ); + assert_eq!(Elections::candidate_reg_info(3), Some((0, 2))); + assert_eq!(Elections::candidate_reg_info(4), Some((0, 3))); + }); } #[test] fn election_second_tally_should_use_runners_up() { - ExtBuilder::default().build().execute_with(|| { - System::set_block_number(4); - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true], 0, 0, 60)); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(2), vec![false, true], 0, 0, 20)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true], 0, 0, 30)); - assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, false, false, true], 0, 0, 40)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, false, false, false, true], 0, 0, 50)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); - assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(8); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![false, false, true, false], 1, 0, 60)); - assert_ok!(Elections::set_desired_seats(Origin::ROOT, 3)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(10); - assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30 + Elections::get_offset(30, 1) + 60, 1)); - assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40 + Elections::get_offset(40, 1), 1)); - assert_ok!(Elections::end_block(System::block_number())); - - assert!(!Elections::presentation_active()); - assert_eq!(Elections::members(), vec![(1, 11), (5, 11), (3, 15)]); - - assert!(!Elections::is_a_candidate(&1)); - assert!(!Elections::is_a_candidate(&2)); - assert!(!Elections::is_a_candidate(&3)); - assert!(!Elections::is_a_candidate(&5)); - assert!(Elections::is_a_candidate(&4)); - assert_eq!(Elections::vote_index(), 2); - assert_eq!(Elections::voter_info(2), Some( VoterInfo { last_win: 0, last_active: 0, stake: 20, pot: 0})); - assert_eq!(Elections::voter_info(3), Some( VoterInfo { last_win: 2, last_active: 0, stake: 30, pot: 0})); - assert_eq!(Elections::voter_info(4), Some( VoterInfo { last_win: 0, last_active: 0, stake: 40, pot: 0})); - assert_eq!(Elections::voter_info(5), Some( VoterInfo { last_win: 1, last_active: 0, stake: 50, pot: 0})); - assert_eq!( - Elections::voter_info(6), - Some(VoterInfo { last_win: 2, last_active: 1, stake: 60, pot: 0}) - ); - - assert_eq!(Elections::candidate_reg_info(4), Some((0, 3))); - }); + ExtBuilder::default().build().execute_with(|| { + System::set_block_number(4); + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true], + 0, + 0, + 60 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(2), + vec![false, true], + 0, + 0, + 20 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, false, true], + 0, + 0, + 30 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, false, false, true], + 0, + 0, + 40 + )); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 4)); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, false, false, false, true], + 0, + 0, + 50 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert_ok!(Elections::present_winner(Origin::signed(4), 1, 60, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 3, 30, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 4, 40, 0)); + assert_ok!(Elections::present_winner(Origin::signed(4), 5, 50, 0)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(8); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![false, false, true, false], + 1, + 0, + 60 + )); + assert_ok!(Elections::set_desired_seats(Origin::ROOT, 3)); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(10); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 3, + 30 + Elections::get_offset(30, 1) + 60, + 1 + )); + assert_ok!(Elections::present_winner( + Origin::signed(4), + 4, + 40 + Elections::get_offset(40, 1), + 1 + )); + assert_ok!(Elections::end_block(System::block_number())); + + assert!(!Elections::presentation_active()); + assert_eq!(Elections::members(), vec![(1, 11), (5, 11), (3, 15)]); + + assert!(!Elections::is_a_candidate(&1)); + assert!(!Elections::is_a_candidate(&2)); + assert!(!Elections::is_a_candidate(&3)); + assert!(!Elections::is_a_candidate(&5)); + assert!(Elections::is_a_candidate(&4)); + assert_eq!(Elections::vote_index(), 2); + assert_eq!( + Elections::voter_info(2), + Some(VoterInfo { + last_win: 0, + last_active: 0, + stake: 20, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(3), + Some(VoterInfo { + last_win: 2, + last_active: 0, + stake: 30, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(4), + Some(VoterInfo { + last_win: 0, + last_active: 0, + stake: 40, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(5), + Some(VoterInfo { + last_win: 1, + last_active: 0, + stake: 50, + pot: 0 + }) + ); + assert_eq!( + Elections::voter_info(6), + Some(VoterInfo { + last_win: 2, + last_active: 1, + stake: 60, + pot: 0 + }) + ); + + assert_eq!(Elections::candidate_reg_info(4), Some((0, 3))); + }); } #[test] fn election_loser_candidates_bond_gets_slashed() { - ExtBuilder::default().desired_seats(1).build().execute_with(|| { - System::set_block_number(4); - assert!(!Elections::presentation_active()); - - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); - - assert_eq!(balances(&2), (17, 3)); - - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![true], 0, 0, 50)); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, true, true, true], 0, 0, 10) - ); - - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(4), 4, 10, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(3), 3, 10, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(2), 2, 10, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 50, 0), Ok(())); - - - // winner + carry - assert_eq!(Elections::leaderboard(), Some(vec![(10, 3), (10, 4), (50, 1)])); - assert_ok!(Elections::end_block(System::block_number())); - assert!(!Elections::presentation_active()); - assert_eq!(Elections::members(), vec![(1, 11)]); - - // account 2 is not a runner up or in leaderboard. - assert_eq!(balances(&2), (17, 0)); - }); + ExtBuilder::default() + .desired_seats(1) + .build() + .execute_with(|| { + System::set_block_number(4); + assert!(!Elections::presentation_active()); + + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Elections::submit_candidacy(Origin::signed(4), 3)); + + assert_eq!(balances(&2), (17, 3)); + + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![true], + 0, + 0, + 50 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, true, true, true], + 0, + 0, + 10 + )); + + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(4), 4, 10, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(3), 3, 10, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(2), 2, 10, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 50, 0), + Ok(()) + ); + + // winner + carry + assert_eq!( + Elections::leaderboard(), + Some(vec![(10, 3), (10, 4), (50, 1)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + assert!(!Elections::presentation_active()); + assert_eq!(Elections::members(), vec![(1, 11)]); + + // account 2 is not a runner up or in leaderboard. + assert_eq!(balances(&2), (17, 0)); + }); } #[test] fn pot_accumulating_weight_and_decaying_should_work() { - ExtBuilder::default().balance_factor(10).build().execute_with(|| { - System::set_block_number(4); - assert!(!Elections::presentation_active()); - - assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 2)); - - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 0, 0, 600) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 0, 0, 500) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 0, 0, 100) - ); - - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Elections::presentation_active()); - - assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100, 0), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100, 1), (500, 5), (600, 6)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(6, 11), (5, 11)]); - assert_eq!( - Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 1, last_active: 0, stake: 600, pot: 0}, - ); - assert_eq!( - Elections::voter_info(5).unwrap(), - VoterInfo { last_win: 1, last_active: 0, stake: 500, pot: 0}, - ); - assert_eq!( - Elections::voter_info(1).unwrap(), - VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}, - ); - - System::set_block_number(12); - // retract needed to unlock approval funds => submit candidacy again. - assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); - assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 1, 0, 600) - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 1, 1, 500) - ); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(14); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 1), 1), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96, 1), (500, 5), (600, 6)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(6, 19), (5, 19)]); - assert_eq!( - Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 2, last_active: 1, stake: 600, pot:0 } - ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 2, last_active: 1, stake: 500, pot:0 }); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot:0 }); - - System::set_block_number(20); - assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); - assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false], 2, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true, false], 2, 1, 500)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(22); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 2), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 2), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 2), 2), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96 + 93, 1), (500, 5), (600, 6)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(6, 27), (5, 27)]); - assert_eq!( - Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 3, last_active: 2, stake: 600, pot: 0} - ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 3, last_active: 2, stake: 500, pot: 0}); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}); - - - System::set_block_number(28); - assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); - assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false], 3, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(5), vec![false, true, false], 3, 1, 500)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(30); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 3), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 3), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100 + Elections::get_offset(100, 3), 3), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100 + 96 + 93 + 90, 1), (500, 5), (600, 6)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(6, 35), (5, 35)]); - assert_eq!( - Elections::voter_info(6).unwrap(), - VoterInfo { last_win: 4, last_active: 3, stake: 600, pot: 0} - ); - assert_eq!(Elections::voter_info(5).unwrap(), VoterInfo { last_win: 4, last_active: 3, stake: 500, pot: 0}); - assert_eq!(Elections::voter_info(1).unwrap(), VoterInfo { last_win: 0, last_active: 0, stake: 100, pot: 0}); - }) + ExtBuilder::default() + .balance_factor(10) + .build() + .execute_with(|| { + System::set_block_number(4); + assert!(!Elections::presentation_active()); + + assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 2)); + + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 0, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 0, + 0, + 500 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 0, + 0, + 100 + )); + + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Elections::presentation_active()); + + assert_eq!( + Elections::present_winner(Origin::signed(6), 6, 600, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(5), 5, 500, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 100, 0), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100, 1), (500, 5), (600, 6)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(6, 11), (5, 11)]); + assert_eq!( + Elections::voter_info(6).unwrap(), + VoterInfo { + last_win: 1, + last_active: 0, + stake: 600, + pot: 0 + }, + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { + last_win: 1, + last_active: 0, + stake: 500, + pot: 0 + }, + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { + last_win: 0, + last_active: 0, + stake: 100, + pot: 0 + }, + ); + + System::set_block_number(12); + // retract needed to unlock approval funds => submit candidacy again. + assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); + assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 1, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 1, + 1, + 500 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(14); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(6), 6, 600, 1), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(5), 5, 500, 1), + Ok(()) + ); + assert_eq!( + Elections::present_winner( + Origin::signed(1), + 1, + 100 + Elections::get_offset(100, 1), + 1 + ), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100 + 96, 1), (500, 5), (600, 6)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(6, 19), (5, 19)]); + assert_eq!( + Elections::voter_info(6).unwrap(), + VoterInfo { + last_win: 2, + last_active: 1, + stake: 600, + pot: 0 + } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { + last_win: 2, + last_active: 1, + stake: 500, + pot: 0 + } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { + last_win: 0, + last_active: 0, + stake: 100, + pot: 0 + } + ); + + System::set_block_number(20); + assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); + assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 2, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 2, + 1, + 500 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(22); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(6), 6, 600, 2), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(5), 5, 500, 2), + Ok(()) + ); + assert_eq!( + Elections::present_winner( + Origin::signed(1), + 1, + 100 + Elections::get_offset(100, 2), + 2 + ), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100 + 96 + 93, 1), (500, 5), (600, 6)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(6, 27), (5, 27)]); + assert_eq!( + Elections::voter_info(6).unwrap(), + VoterInfo { + last_win: 3, + last_active: 2, + stake: 600, + pot: 0 + } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { + last_win: 3, + last_active: 2, + stake: 500, + pot: 0 + } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { + last_win: 0, + last_active: 0, + stake: 100, + pot: 0 + } + ); + + System::set_block_number(28); + assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); + assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 3, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 3, + 1, + 500 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(30); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(6), 6, 600, 3), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(5), 5, 500, 3), + Ok(()) + ); + assert_eq!( + Elections::present_winner( + Origin::signed(1), + 1, + 100 + Elections::get_offset(100, 3), + 3 + ), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100 + 96 + 93 + 90, 1), (500, 5), (600, 6)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(6, 35), (5, 35)]); + assert_eq!( + Elections::voter_info(6).unwrap(), + VoterInfo { + last_win: 4, + last_active: 3, + stake: 600, + pot: 0 + } + ); + assert_eq!( + Elections::voter_info(5).unwrap(), + VoterInfo { + last_win: 4, + last_active: 3, + stake: 500, + pot: 0 + } + ); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { + last_win: 0, + last_active: 0, + stake: 100, + pot: 0 + } + ); + }) } #[test] fn pot_winning_resets_accumulated_pot() { - ExtBuilder::default().balance_factor(10).build().execute_with(|| { - System::set_block_number(4); - assert!(!Elections::presentation_active()); - - assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(4), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); - assert_ok!(Elections::submit_candidacy(Origin::signed(2), 3)); - - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false, false], 0, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, true, false, false], 0, 1, 400)); - assert_ok!(Elections::set_approvals(Origin::signed(3), vec![false, false, true, true], 0, 2, 300)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(4), 4, 400, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(3), 3, 300, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(2), 2, 300, 0), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(300, 2), (300, 3), (400, 4), (600, 6)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(6, 11), (4, 11)]); - - System::set_block_number(12); - assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); - assert_ok!(Elections::retract_voter(Origin::signed(4), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(4), 1)); - assert_ok!(Elections::set_approvals(Origin::signed(6), vec![true, false, false, false], 1, 0, 600)); - assert_ok!(Elections::set_approvals(Origin::signed(4), vec![false, true, false, false], 1, 1, 400)); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(14); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(4), 4, 400, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(3), 3, 300 + Elections::get_offset(300, 1), 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(2), 2, 300 + Elections::get_offset(300, 1), 1), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(400, 4), (588, 2), (588, 3), (600, 6)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(6, 19), (3, 19)]); - - System::set_block_number(20); - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(22); - // 2 will not get re-elected with 300 + 288, instead just 300. - // because one of 3's candidates (3) won in previous round - // 4 on the other hand will get extra weight since it was unlucky. - assert_eq!(Elections::present_winner(Origin::signed(3), 2, 300, 2), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(4), 4, 400 + Elections::get_offset(400, 1), 2), Ok(())); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(4, 27), (2, 27)]); - }) + ExtBuilder::default() + .balance_factor(10) + .build() + .execute_with(|| { + System::set_block_number(4); + assert!(!Elections::presentation_active()); + + assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(4), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(3), 2)); + assert_ok!(Elections::submit_candidacy(Origin::signed(2), 3)); + + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false, false], + 0, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, true, false, false], + 0, + 1, + 400 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(3), + vec![false, false, true, true], + 0, + 2, + 300 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(6), 6, 600, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(4), 4, 400, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(3), 3, 300, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(2), 2, 300, 0), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(300, 2), (300, 3), (400, 4), (600, 6)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(6, 11), (4, 11)]); + + System::set_block_number(12); + assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); + assert_ok!(Elections::retract_voter(Origin::signed(4), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(4), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false, false], + 1, + 0, + 600 + )); + assert_ok!(Elections::set_approvals( + Origin::signed(4), + vec![false, true, false, false], + 1, + 1, + 400 + )); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(14); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(6), 6, 600, 1), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(4), 4, 400, 1), + Ok(()) + ); + assert_eq!( + Elections::present_winner( + Origin::signed(3), + 3, + 300 + Elections::get_offset(300, 1), + 1 + ), + Ok(()) + ); + assert_eq!( + Elections::present_winner( + Origin::signed(2), + 2, + 300 + Elections::get_offset(300, 1), + 1 + ), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(400, 4), (588, 2), (588, 3), (600, 6)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(6, 19), (3, 19)]); + + System::set_block_number(20); + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(22); + // 2 will not get re-elected with 300 + 288, instead just 300. + // because one of 3's candidates (3) won in previous round + // 4 on the other hand will get extra weight since it was unlucky. + assert_eq!( + Elections::present_winner(Origin::signed(3), 2, 300, 2), + Ok(()) + ); + assert_eq!( + Elections::present_winner( + Origin::signed(4), + 4, + 400 + Elections::get_offset(400, 1), + 2 + ), + Ok(()) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(4, 27), (2, 27)]); + }) } #[test] fn pot_resubmitting_approvals_stores_pot() { - ExtBuilder::default() - .voter_bond(0) - .voting_fee(0) - .balance_factor(10) - .build() - .execute_with(|| { - System::set_block_number(4); - assert!(!Elections::presentation_active()); - - assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(1), 2)); - - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 0, 0, 600), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 0, 1, 500), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 0, 2, 100), - ); - - assert_ok!(Elections::end_block(System::block_number())); - - System::set_block_number(6); - assert!(Elections::presentation_active()); - - assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 0), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(1), 1, 100, 0), Ok(())); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (100, 1), (500, 5), (600, 6)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(6, 11), (5, 11)]); - - System::set_block_number(12); - assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); - assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); - assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); - assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); - assert_ok!( - Elections::set_approvals(Origin::signed(6), vec![true, false, false], 1, 0, 600), - ); - assert_ok!( - Elections::set_approvals(Origin::signed(5), vec![false, true, false], 1, 1, 500), - ); - // give 1 some new high balance - let _ = Balances::make_free_balance_be(&1, 997); - assert_ok!( - Elections::set_approvals(Origin::signed(1), vec![false, false, true], 1, 2, 1000), - ); - assert_eq!(Elections::voter_info(1).unwrap(), - VoterInfo { - stake: 1000, // 997 + 3 which is candidacy bond. - pot: Elections::get_offset(100, 1), - last_active: 1, - last_win: 1, - } - ); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(6, 11), (5, 11)]); - - System::set_block_number(14); - assert!(Elections::presentation_active()); - assert_eq!(Elections::present_winner(Origin::signed(6), 6, 600, 1), Ok(())); - assert_eq!(Elections::present_winner(Origin::signed(5), 5, 500, 1), Ok(())); - assert_eq!( - Elections::present_winner(Origin::signed(1), 1, 1000 + 96 /* pot */, 1), - Ok(()), - ); - assert_eq!(Elections::leaderboard(), Some(vec![(0, 0), (500, 5), (600, 6), (1096, 1)])); - assert_ok!(Elections::end_block(System::block_number())); - - assert_eq!(Elections::members(), vec![(1, 19), (6, 19)]); - }) + ExtBuilder::default() + .voter_bond(0) + .voting_fee(0) + .balance_factor(10) + .build() + .execute_with(|| { + System::set_block_number(4); + assert!(!Elections::presentation_active()); + + assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(1), 2)); + + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 0, + 0, + 600 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 0, + 1, + 500 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 0, + 2, + 100 + ),); + + assert_ok!(Elections::end_block(System::block_number())); + + System::set_block_number(6); + assert!(Elections::presentation_active()); + + assert_eq!( + Elections::present_winner(Origin::signed(6), 6, 600, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(5), 5, 500, 0), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 100, 0), + Ok(()) + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (100, 1), (500, 5), (600, 6)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(6, 11), (5, 11)]); + + System::set_block_number(12); + assert_ok!(Elections::retract_voter(Origin::signed(6), 0)); + assert_ok!(Elections::retract_voter(Origin::signed(5), 1)); + assert_ok!(Elections::submit_candidacy(Origin::signed(6), 0)); + assert_ok!(Elections::submit_candidacy(Origin::signed(5), 1)); + assert_ok!(Elections::set_approvals( + Origin::signed(6), + vec![true, false, false], + 1, + 0, + 600 + ),); + assert_ok!(Elections::set_approvals( + Origin::signed(5), + vec![false, true, false], + 1, + 1, + 500 + ),); + // give 1 some new high balance + let _ = Balances::make_free_balance_be(&1, 997); + assert_ok!(Elections::set_approvals( + Origin::signed(1), + vec![false, false, true], + 1, + 2, + 1000 + ),); + assert_eq!( + Elections::voter_info(1).unwrap(), + VoterInfo { + stake: 1000, // 997 + 3 which is candidacy bond. + pot: Elections::get_offset(100, 1), + last_active: 1, + last_win: 1, + } + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(6, 11), (5, 11)]); + + System::set_block_number(14); + assert!(Elections::presentation_active()); + assert_eq!( + Elections::present_winner(Origin::signed(6), 6, 600, 1), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(5), 5, 500, 1), + Ok(()) + ); + assert_eq!( + Elections::present_winner(Origin::signed(1), 1, 1000 + 96 /* pot */, 1), + Ok(()), + ); + assert_eq!( + Elections::leaderboard(), + Some(vec![(0, 0), (500, 5), (600, 6), (1096, 1)]) + ); + assert_ok!(Elections::end_block(System::block_number())); + + assert_eq!(Elections::members(), vec![(1, 19), (6, 19)]); + }) } #[test] fn pot_get_offset_should_work() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(Elections::get_offset(100, 0), 0); - assert_eq!(Elections::get_offset(100, 1), 96); - assert_eq!(Elections::get_offset(100, 2), 96 + 93); - assert_eq!(Elections::get_offset(100, 3), 96 + 93 + 90); - assert_eq!(Elections::get_offset(100, 4), 96 + 93 + 90 + 87); - // limit - assert_eq!(Elections::get_offset(100, 1000), 100 * 24); - - assert_eq!(Elections::get_offset(50_000_000_000, 0), 0); - assert_eq!(Elections::get_offset(50_000_000_000, 1), 48_000_000_000); - assert_eq!(Elections::get_offset(50_000_000_000, 2), 48_000_000_000 + 46_080_000_000); - assert_eq!(Elections::get_offset(50_000_000_000, 3), 48_000_000_000 + 46_080_000_000 + 44_236_800_000); - assert_eq!( - Elections::get_offset(50_000_000_000, 4), - 48_000_000_000 + 46_080_000_000 + 44_236_800_000 + 42_467_328_000 - ); - // limit - assert_eq!(Elections::get_offset(50_000_000_000, 1000), 50_000_000_000 * 24); - }) + ExtBuilder::default().build().execute_with(|| { + assert_eq!(Elections::get_offset(100, 0), 0); + assert_eq!(Elections::get_offset(100, 1), 96); + assert_eq!(Elections::get_offset(100, 2), 96 + 93); + assert_eq!(Elections::get_offset(100, 3), 96 + 93 + 90); + assert_eq!(Elections::get_offset(100, 4), 96 + 93 + 90 + 87); + // limit + assert_eq!(Elections::get_offset(100, 1000), 100 * 24); + + assert_eq!(Elections::get_offset(50_000_000_000, 0), 0); + assert_eq!(Elections::get_offset(50_000_000_000, 1), 48_000_000_000); + assert_eq!( + Elections::get_offset(50_000_000_000, 2), + 48_000_000_000 + 46_080_000_000 + ); + assert_eq!( + Elections::get_offset(50_000_000_000, 3), + 48_000_000_000 + 46_080_000_000 + 44_236_800_000 + ); + assert_eq!( + Elections::get_offset(50_000_000_000, 4), + 48_000_000_000 + 46_080_000_000 + 44_236_800_000 + 42_467_328_000 + ); + // limit + assert_eq!( + Elections::get_offset(50_000_000_000, 1000), + 50_000_000_000 * 24 + ); + }) } #[test] fn pot_get_offset_with_zero_decay() { - ExtBuilder::default().decay_ratio(0).build().execute_with(|| { - assert_eq!(Elections::get_offset(100, 0), 0); - assert_eq!(Elections::get_offset(100, 1), 0); - assert_eq!(Elections::get_offset(100, 2), 0); - assert_eq!(Elections::get_offset(100, 3), 0); - // limit - assert_eq!(Elections::get_offset(100, 1000), 0); - }) + ExtBuilder::default() + .decay_ratio(0) + .build() + .execute_with(|| { + assert_eq!(Elections::get_offset(100, 0), 0); + assert_eq!(Elections::get_offset(100, 1), 0); + assert_eq!(Elections::get_offset(100, 2), 0); + assert_eq!(Elections::get_offset(100, 3), 0); + // limit + assert_eq!(Elections::get_offset(100, 1000), 0); + }) } diff --git a/frame/evm/src/backend.rs b/frame/evm/src/backend.rs index c610f24bb1..f979c53bee 100644 --- a/frame/evm/src/backend.rs +++ b/frame/evm/src/backend.rs @@ -1,182 +1,187 @@ -use sp_std::marker::PhantomData; -use sp_std::vec::Vec; +use crate::{AccountCodes, AccountStorages, Accounts, Event, Module, Trait}; +use codec::{Decode, Encode}; +use evm::backend::{Apply, ApplyBackend, Backend as BackendT}; +use frame_support::storage::{StorageDoubleMap, StorageMap}; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; -use sp_core::{U256, H256, H160}; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Keccak256}; +use sp_core::{H160, H256, U256}; use sp_runtime::traits::UniqueSaturatedInto; -use frame_support::storage::{StorageMap, StorageDoubleMap}; -use sha3::{Keccak256, Digest}; -use evm::backend::{Backend as BackendT, ApplyBackend, Apply}; -use crate::{Trait, Accounts, AccountStorages, AccountCodes, Module, Event}; +use sp_std::marker::PhantomData; +use sp_std::vec::Vec; #[derive(Clone, Eq, PartialEq, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] /// Ethereum account nonce, balance and code. Used by storage. pub struct Account { - /// Account nonce. - pub nonce: U256, - /// Account balance. - pub balance: U256, + /// Account nonce. + pub nonce: U256, + /// Account balance. + pub balance: U256, } #[derive(Clone, Eq, PartialEq, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] /// Ethereum log. Used for `deposit_event`. pub struct Log { - /// Source address of the log. - pub address: H160, - /// Topics of the log. - pub topics: Vec, - /// Byte array data of the log. - pub data: Vec, + /// Source address of the log. + pub address: H160, + /// Topics of the log. + pub topics: Vec, + /// Byte array data of the log. + pub data: Vec, } #[derive(Clone, Eq, PartialEq, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] /// External input from the transaction. pub struct Vicinity { - /// Current transaction gas price. - pub gas_price: U256, - /// Origin of the transaction. - pub origin: H160, + /// Current transaction gas price. + pub gas_price: U256, + /// Origin of the transaction. + pub origin: H160, } /// Substrate backend for EVM. pub struct Backend<'vicinity, T> { - vicinity: &'vicinity Vicinity, - _marker: PhantomData, + vicinity: &'vicinity Vicinity, + _marker: PhantomData, } impl<'vicinity, T> Backend<'vicinity, T> { - /// Create a new backend with given vicinity. - pub fn new(vicinity: &'vicinity Vicinity) -> Self { - Self { vicinity, _marker: PhantomData } - } + /// Create a new backend with given vicinity. + pub fn new(vicinity: &'vicinity Vicinity) -> Self { + Self { + vicinity, + _marker: PhantomData, + } + } } impl<'vicinity, T: Trait> BackendT for Backend<'vicinity, T> { - fn gas_price(&self) -> U256 { self.vicinity.gas_price } - fn origin(&self) -> H160 { self.vicinity.origin } - - fn block_hash(&self, number: U256) -> H256 { - if number > U256::from(u32::max_value()) { - H256::default() - } else { - let number = T::BlockNumber::from(number.as_u32()); - H256::from_slice(frame_system::Module::::block_hash(number).as_ref()) - } - } - - fn block_number(&self) -> U256 { - let number: u128 = frame_system::Module::::block_number().unique_saturated_into(); - U256::from(number) - } - - fn block_coinbase(&self) -> H160 { - H160::default() - } - - fn block_timestamp(&self) -> U256 { - let now: u128 = pallet_timestamp::Module::::get().unique_saturated_into(); - U256::from(now) - } - - fn block_difficulty(&self) -> U256 { - U256::zero() - } - - fn block_gas_limit(&self) -> U256 { - U256::zero() - } - - fn chain_id(&self) -> U256 { - U256::from(sp_io::misc::chain_id()) - } - - fn exists(&self, _address: H160) -> bool { - true - } - - fn basic(&self, address: H160) -> evm::backend::Basic { - let account = Accounts::get(&address); - - evm::backend::Basic { - balance: account.balance, - nonce: account.nonce, - } - } - - fn code_size(&self, address: H160) -> usize { - AccountCodes::decode_len(&address).unwrap_or(0) - } - - fn code_hash(&self, address: H160) -> H256 { - H256::from_slice(Keccak256::digest(&AccountCodes::get(&address)).as_slice()) - } - - fn code(&self, address: H160) -> Vec { - AccountCodes::get(&address) - } - - fn storage(&self, address: H160, index: H256) -> H256 { - AccountStorages::get(address, index) - } + fn gas_price(&self) -> U256 { + self.vicinity.gas_price + } + fn origin(&self) -> H160 { + self.vicinity.origin + } + + fn block_hash(&self, number: U256) -> H256 { + if number > U256::from(u32::max_value()) { + H256::default() + } else { + let number = T::BlockNumber::from(number.as_u32()); + H256::from_slice(frame_system::Module::::block_hash(number).as_ref()) + } + } + + fn block_number(&self) -> U256 { + let number: u128 = frame_system::Module::::block_number().unique_saturated_into(); + U256::from(number) + } + + fn block_coinbase(&self) -> H160 { + H160::default() + } + + fn block_timestamp(&self) -> U256 { + let now: u128 = pallet_timestamp::Module::::get().unique_saturated_into(); + U256::from(now) + } + + fn block_difficulty(&self) -> U256 { + U256::zero() + } + + fn block_gas_limit(&self) -> U256 { + U256::zero() + } + + fn chain_id(&self) -> U256 { + U256::from(sp_io::misc::chain_id()) + } + + fn exists(&self, _address: H160) -> bool { + true + } + + fn basic(&self, address: H160) -> evm::backend::Basic { + let account = Accounts::get(&address); + + evm::backend::Basic { + balance: account.balance, + nonce: account.nonce, + } + } + + fn code_size(&self, address: H160) -> usize { + AccountCodes::decode_len(&address).unwrap_or(0) + } + + fn code_hash(&self, address: H160) -> H256 { + H256::from_slice(Keccak256::digest(&AccountCodes::get(&address)).as_slice()) + } + + fn code(&self, address: H160) -> Vec { + AccountCodes::get(&address) + } + + fn storage(&self, address: H160, index: H256) -> H256 { + AccountStorages::get(address, index) + } } impl<'vicinity, T: Trait> ApplyBackend for Backend<'vicinity, T> { - fn apply( - &mut self, - values: A, - logs: L, - delete_empty: bool, - ) where - A: IntoIterator>, - I: IntoIterator, - L: IntoIterator, - { - for apply in values { - match apply { - Apply::Modify { - address, basic, code, storage, reset_storage, - } => { - Accounts::mutate(&address, |account| { - account.balance = basic.balance; - account.nonce = basic.nonce; - }); - - if let Some(code) = code { - AccountCodes::insert(address, code); - } - - if reset_storage { - AccountStorages::remove_prefix(address); - } - - for (index, value) in storage { - if value == H256::default() { - AccountStorages::remove(address, index); - } else { - AccountStorages::insert(address, index, value); - } - } - - if delete_empty { - Module::::remove_account_if_empty(&address); - } - }, - Apply::Delete { address } => { - Module::::remove_account(&address) - }, - } - } - - for log in logs { - Module::::deposit_event(Event::::Log(Log { - address: log.address, - topics: log.topics, - data: log.data, - })); - } - } + fn apply(&mut self, values: A, logs: L, delete_empty: bool) + where + A: IntoIterator>, + I: IntoIterator, + L: IntoIterator, + { + for apply in values { + match apply { + Apply::Modify { + address, + basic, + code, + storage, + reset_storage, + } => { + Accounts::mutate(&address, |account| { + account.balance = basic.balance; + account.nonce = basic.nonce; + }); + + if let Some(code) = code { + AccountCodes::insert(address, code); + } + + if reset_storage { + AccountStorages::remove_prefix(address); + } + + for (index, value) in storage { + if value == H256::default() { + AccountStorages::remove(address, index); + } else { + AccountStorages::insert(address, index, value); + } + } + + if delete_empty { + Module::::remove_account_if_empty(&address); + } + } + Apply::Delete { address } => Module::::remove_account(&address), + } + } + + for log in logs { + Module::::deposit_event(Event::::Log(Log { + address: log.address, + topics: log.topics, + data: log.data, + })); + } + } } diff --git a/frame/evm/src/lib.rs b/frame/evm/src/lib.rs index f67ab767ed..d8b9abe979 100644 --- a/frame/evm/src/lib.rs +++ b/frame/evm/src/lib.rs @@ -21,37 +21,41 @@ mod backend; -pub use crate::backend::{Account, Log, Vicinity, Backend}; +pub use crate::backend::{Account, Backend, Log, Vicinity}; -use sp_std::{vec::Vec, marker::PhantomData}; -use frame_support::{ensure, decl_module, decl_storage, decl_event, decl_error}; -use frame_support::weights::{Weight, MINIMUM_WEIGHT, DispatchClass, FunctionOf}; -use frame_support::traits::{Currency, WithdrawReason, ExistenceRequirement}; +use evm::backend::ApplyBackend; +use evm::executor::StackExecutor; +use evm::{Config, ExitError, ExitReason, ExitSucceed}; +use frame_support::traits::{Currency, ExistenceRequirement, WithdrawReason}; +use frame_support::weights::SimpleDispatchInfo; +use frame_support::weights::{DispatchClass, FunctionOf, Weight, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure}; use frame_system::{self as system, ensure_signed}; +use sha3::{Digest, Keccak256}; +use sp_core::{Hasher, H160, H256, U256}; use sp_runtime::ModuleId; -use frame_support::weights::SimpleDispatchInfo; -use sp_core::{U256, H256, H160, Hasher}; use sp_runtime::{ - DispatchResult, traits::{UniqueSaturatedInto, AccountIdConversion, SaturatedConversion}, + traits::{AccountIdConversion, SaturatedConversion, UniqueSaturatedInto}, + DispatchResult, }; -use sha3::{Digest, Keccak256}; -use evm::{ExitReason, ExitSucceed, ExitError, Config}; -use evm::executor::StackExecutor; -use evm::backend::ApplyBackend; +use sp_std::{marker::PhantomData, vec::Vec}; const MODULE_ID: ModuleId = ModuleId(*b"py/ethvm"); /// Type alias for currency balance. -pub type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +pub type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// Trait that outputs the current transaction gas price. pub trait FeeCalculator { - /// Return the minimal required gas price. - fn min_gas_price() -> U256; + /// Return the minimal required gas price. + fn min_gas_price() -> U256; } impl FeeCalculator for () { - fn min_gas_price() -> U256 { U256::zero() } + fn min_gas_price() -> U256 { + U256::zero() + } } /// Trait for converting account ids of `balances` module into @@ -62,370 +66,380 @@ impl FeeCalculator for () { /// with the rest of Substrate module, we require an one-to-one /// mapping of Substrate account to Ethereum address. pub trait ConvertAccountId { - /// Given a Substrate address, return the corresponding Ethereum address. - fn convert_account_id(account_id: &A) -> H160; + /// Given a Substrate address, return the corresponding Ethereum address. + fn convert_account_id(account_id: &A) -> H160; } /// Hash and then truncate the account id, taking the last 160-bit as the Ethereum address. pub struct HashTruncateConvertAccountId(PhantomData); impl Default for HashTruncateConvertAccountId { - fn default() -> Self { - Self(PhantomData) - } + fn default() -> Self { + Self(PhantomData) + } } impl> ConvertAccountId for HashTruncateConvertAccountId { - fn convert_account_id(account_id: &A) -> H160 { - let account_id = H::hash(account_id.as_ref()); - let account_id_len = account_id.as_ref().len(); - let mut value = [0u8; 20]; - let value_len = value.len(); - - if value_len > account_id_len { - value[(value_len - account_id_len)..].copy_from_slice(account_id.as_ref()); - } else { - value.copy_from_slice(&account_id.as_ref()[(account_id_len - value_len)..]); - } - - H160::from(value) - } + fn convert_account_id(account_id: &A) -> H160 { + let account_id = H::hash(account_id.as_ref()); + let account_id_len = account_id.as_ref().len(); + let mut value = [0u8; 20]; + let value_len = value.len(); + + if value_len > account_id_len { + value[(value_len - account_id_len)..].copy_from_slice(account_id.as_ref()); + } else { + value.copy_from_slice(&account_id.as_ref()[(account_id_len - value_len)..]); + } + + H160::from(value) + } } /// Custom precompiles to be used by EVM engine. pub trait Precompiles { - /// Try to execute the code address as precompile. If the code address is not - /// a precompile or the precompile is not yet available, return `None`. - /// Otherwise, calculate the amount of gas needed with given `input` and - /// `target_gas`. Return `Some(Ok(status, output, gas_used))` if the execution - /// is successful. Otherwise return `Some(Err(_))`. - fn execute( - address: H160, - input: &[u8], - target_gas: Option - ) -> Option, usize), ExitError>>; + /// Try to execute the code address as precompile. If the code address is not + /// a precompile or the precompile is not yet available, return `None`. + /// Otherwise, calculate the amount of gas needed with given `input` and + /// `target_gas`. Return `Some(Ok(status, output, gas_used))` if the execution + /// is successful. Otherwise return `Some(Err(_))`. + fn execute( + address: H160, + input: &[u8], + target_gas: Option, + ) -> Option, usize), ExitError>>; } impl Precompiles for () { - fn execute( - _address: H160, - _input: &[u8], - _target_gas: Option - ) -> Option, usize), ExitError>> { - None - } + fn execute( + _address: H160, + _input: &[u8], + _target_gas: Option, + ) -> Option, usize), ExitError>> { + None + } } static ISTANBUL_CONFIG: Config = Config::istanbul(); /// EVM module trait pub trait Trait: frame_system::Trait + pallet_timestamp::Trait { - /// Calculator for current gas price. - type FeeCalculator: FeeCalculator; - /// Convert account ID to H160; - type ConvertAccountId: ConvertAccountId; - /// Currency type for deposit and withdraw. - type Currency: Currency; - /// The overarching event type. - type Event: From> + Into<::Event>; - /// Precompiles associated with this EVM engine. - type Precompiles: Precompiles; - - /// EVM config used in the module. - fn config() -> &'static Config { - &ISTANBUL_CONFIG - } + /// Calculator for current gas price. + type FeeCalculator: FeeCalculator; + /// Convert account ID to H160; + type ConvertAccountId: ConvertAccountId; + /// Currency type for deposit and withdraw. + type Currency: Currency; + /// The overarching event type. + type Event: From> + Into<::Event>; + /// Precompiles associated with this EVM engine. + type Precompiles: Precompiles; + + /// EVM config used in the module. + fn config() -> &'static Config { + &ISTANBUL_CONFIG + } } decl_storage! { - trait Store for Module as EVM { - Accounts get(fn accounts) config(): map hasher(blake2_128_concat) H160 => Account; - AccountCodes: map hasher(blake2_128_concat) H160 => Vec; - AccountStorages: double_map hasher(blake2_128_concat) H160, hasher(blake2_128_concat) H256 => H256; - } + trait Store for Module as EVM { + Accounts get(fn accounts) config(): map hasher(blake2_128_concat) H160 => Account; + AccountCodes: map hasher(blake2_128_concat) H160 => Vec; + AccountStorages: double_map hasher(blake2_128_concat) H160, hasher(blake2_128_concat) H256 => H256; + } } decl_event! { - /// EVM events - pub enum Event where - ::AccountId, - { - /// Ethereum events from contracts. - Log(Log), - /// A contract has been created at given address. - Created(H160), - /// A deposit has been made at a given address. - BalanceDeposit(AccountId, H160, U256), - /// A withdrawal has been made from a given address. - BalanceWithdraw(AccountId, H160, U256), - } + /// EVM events + pub enum Event where + ::AccountId, + { + /// Ethereum events from contracts. + Log(Log), + /// A contract has been created at given address. + Created(H160), + /// A deposit has been made at a given address. + BalanceDeposit(AccountId, H160, U256), + /// A withdrawal has been made from a given address. + BalanceWithdraw(AccountId, H160, U256), + } } decl_error! { - pub enum Error for Module { - /// Not enough balance to perform action - BalanceLow, - /// Calculating total fee overflowed - FeeOverflow, - /// Calculating total payment overflowed - PaymentOverflow, - /// Withdraw fee failed - WithdrawFailed, - /// Gas price is too low. - GasPriceTooLow, - /// Call failed - ExitReasonFailed, - /// Call reverted - ExitReasonRevert, - /// Call returned VM fatal error - ExitReasonFatal, - /// Nonce is invalid - InvalidNonce, - } + pub enum Error for Module { + /// Not enough balance to perform action + BalanceLow, + /// Calculating total fee overflowed + FeeOverflow, + /// Calculating total payment overflowed + PaymentOverflow, + /// Withdraw fee failed + WithdrawFailed, + /// Gas price is too low. + GasPriceTooLow, + /// Call failed + ExitReasonFailed, + /// Call reverted + ExitReasonRevert, + /// Call returned VM fatal error + ExitReasonFatal, + /// Nonce is invalid + InvalidNonce, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Deposit balance from currency/balances module into EVM. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn deposit_balance(origin, value: BalanceOf) { - let sender = ensure_signed(origin)?; - - let imbalance = T::Currency::withdraw( - &sender, - value, - WithdrawReason::Reserve.into(), - ExistenceRequirement::AllowDeath, - )?; - T::Currency::resolve_creating(&Self::account_id(), imbalance); - - let bvalue = U256::from(UniqueSaturatedInto::::unique_saturated_into(value)); - let address = T::ConvertAccountId::convert_account_id(&sender); - Accounts::mutate(&address, |account| { - account.balance += bvalue; - }); - Module::::deposit_event(Event::::BalanceDeposit(sender, address, bvalue)); - } - - /// Withdraw balance from EVM into currency/balances module. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn withdraw_balance(origin, value: BalanceOf) { - let sender = ensure_signed(origin)?; - let address = T::ConvertAccountId::convert_account_id(&sender); - let bvalue = U256::from(UniqueSaturatedInto::::unique_saturated_into(value)); - - let mut account = Accounts::get(&address); - account.balance = account.balance.checked_sub(bvalue) - .ok_or(Error::::BalanceLow)?; - - let imbalance = T::Currency::withdraw( - &Self::account_id(), - value, - WithdrawReason::Reserve.into(), - ExistenceRequirement::AllowDeath - )?; - - Accounts::insert(&address, account); - - T::Currency::resolve_creating(&sender, imbalance); - Module::::deposit_event(Event::::BalanceWithdraw(sender, address, bvalue)); - } - - /// Issue an EVM call operation. This is similar to a message call transaction in Ethereum. - #[weight = FunctionOf(|(_, _, _, gas_limit, gas_price, _): (&H160, &Vec, &U256, &u32, &U256, &Option)| (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight), DispatchClass::Normal, true)] - fn call( - origin, - target: H160, - input: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - let source = T::ConvertAccountId::convert_account_id(&sender); - - Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - |executor| ((), executor.transact_call( - source, - target, - value, - input, - gas_limit as usize, - )), - ).map_err(Into::into) - } - - /// Issue an EVM create operation. This is similar to a contract creation transaction in - /// Ethereum. - #[weight = FunctionOf(|(_, _, gas_limit, gas_price, _): (&Vec, &U256, &u32, &U256, &Option)| (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight), DispatchClass::Normal, true)] - fn create( - origin, - init: Vec, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - let source = T::ConvertAccountId::convert_account_id(&sender); - - let create_address = Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - |executor| { - (executor.create_address( - evm::CreateScheme::Legacy { caller: source }, - ), executor.transact_create( - source, - value, - init, - gas_limit as usize, - )) - }, - )?; - - Module::::deposit_event(Event::::Created(create_address)); - Ok(()) - } - - /// Issue an EVM create2 operation. - #[weight = FunctionOf(|(_, _, _, gas_limit, gas_price, _): (&Vec, &H256, &U256, &u32, &U256, &Option)| (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight), DispatchClass::Normal, true)] - fn create2( - origin, - init: Vec, - salt: H256, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - ) -> DispatchResult { - let sender = ensure_signed(origin)?; - let source = T::ConvertAccountId::convert_account_id(&sender); - - let code_hash = H256::from_slice(Keccak256::digest(&init).as_slice()); - let create_address = Self::execute_evm( - source, - value, - gas_limit, - gas_price, - nonce, - |executor| { - (executor.create_address( - evm::CreateScheme::Create2 { caller: source, code_hash, salt }, - ), executor.transact_create2( - source, - value, - init, - salt, - gas_limit as usize, - )) - }, - )?; - - Module::::deposit_event(Event::::Created(create_address)); - Ok(()) - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Deposit balance from currency/balances module into EVM. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn deposit_balance(origin, value: BalanceOf) { + let sender = ensure_signed(origin)?; + + let imbalance = T::Currency::withdraw( + &sender, + value, + WithdrawReason::Reserve.into(), + ExistenceRequirement::AllowDeath, + )?; + T::Currency::resolve_creating(&Self::account_id(), imbalance); + + let bvalue = U256::from(UniqueSaturatedInto::::unique_saturated_into(value)); + let address = T::ConvertAccountId::convert_account_id(&sender); + Accounts::mutate(&address, |account| { + account.balance += bvalue; + }); + Module::::deposit_event(Event::::BalanceDeposit(sender, address, bvalue)); + } + + /// Withdraw balance from EVM into currency/balances module. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn withdraw_balance(origin, value: BalanceOf) { + let sender = ensure_signed(origin)?; + let address = T::ConvertAccountId::convert_account_id(&sender); + let bvalue = U256::from(UniqueSaturatedInto::::unique_saturated_into(value)); + + let mut account = Accounts::get(&address); + account.balance = account.balance.checked_sub(bvalue) + .ok_or(Error::::BalanceLow)?; + + let imbalance = T::Currency::withdraw( + &Self::account_id(), + value, + WithdrawReason::Reserve.into(), + ExistenceRequirement::AllowDeath + )?; + + Accounts::insert(&address, account); + + T::Currency::resolve_creating(&sender, imbalance); + Module::::deposit_event(Event::::BalanceWithdraw(sender, address, bvalue)); + } + + /// Issue an EVM call operation. This is similar to a message call transaction in Ethereum. + #[weight = FunctionOf(|(_, _, _, gas_limit, gas_price, _): (&H160, &Vec, &U256, &u32, &U256, &Option)| (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight), DispatchClass::Normal, true)] + fn call( + origin, + target: H160, + input: Vec, + value: U256, + gas_limit: u32, + gas_price: U256, + nonce: Option, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + let source = T::ConvertAccountId::convert_account_id(&sender); + + Self::execute_evm( + source, + value, + gas_limit, + gas_price, + nonce, + |executor| ((), executor.transact_call( + source, + target, + value, + input, + gas_limit as usize, + )), + ).map_err(Into::into) + } + + /// Issue an EVM create operation. This is similar to a contract creation transaction in + /// Ethereum. + #[weight = FunctionOf(|(_, _, gas_limit, gas_price, _): (&Vec, &U256, &u32, &U256, &Option)| (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight), DispatchClass::Normal, true)] + fn create( + origin, + init: Vec, + value: U256, + gas_limit: u32, + gas_price: U256, + nonce: Option, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + let source = T::ConvertAccountId::convert_account_id(&sender); + + let create_address = Self::execute_evm( + source, + value, + gas_limit, + gas_price, + nonce, + |executor| { + (executor.create_address( + evm::CreateScheme::Legacy { caller: source }, + ), executor.transact_create( + source, + value, + init, + gas_limit as usize, + )) + }, + )?; + + Module::::deposit_event(Event::::Created(create_address)); + Ok(()) + } + + /// Issue an EVM create2 operation. + #[weight = FunctionOf(|(_, _, _, gas_limit, gas_price, _): (&Vec, &H256, &U256, &u32, &U256, &Option)| (*gas_price).saturated_into::().saturating_mul(*gas_limit as Weight), DispatchClass::Normal, true)] + fn create2( + origin, + init: Vec, + salt: H256, + value: U256, + gas_limit: u32, + gas_price: U256, + nonce: Option, + ) -> DispatchResult { + let sender = ensure_signed(origin)?; + let source = T::ConvertAccountId::convert_account_id(&sender); + + let code_hash = H256::from_slice(Keccak256::digest(&init).as_slice()); + let create_address = Self::execute_evm( + source, + value, + gas_limit, + gas_price, + nonce, + |executor| { + (executor.create_address( + evm::CreateScheme::Create2 { caller: source, code_hash, salt }, + ), executor.transact_create2( + source, + value, + init, + salt, + gas_limit as usize, + )) + }, + )?; + + Module::::deposit_event(Event::::Created(create_address)); + Ok(()) + } + } } impl Module { - /// The account ID of the EVM module. - /// - /// This actually does computation. If you need to keep using it, then make sure you cache the - /// value and only call this once. - pub fn account_id() -> T::AccountId { - MODULE_ID.into_account() - } - - /// Check whether an account is empty. - pub fn is_account_empty(address: &H160) -> bool { - let account = Accounts::get(address); - let code_len = AccountCodes::decode_len(address).unwrap_or(0); - - account.nonce == U256::zero() && - account.balance == U256::zero() && - code_len == 0 - } - - /// Remove an account if its empty. - pub fn remove_account_if_empty(address: &H160) { - if Self::is_account_empty(address) { - Self::remove_account(address) - } - } - - /// Remove an account from state. - fn remove_account(address: &H160) { - Accounts::remove(address); - AccountCodes::remove(address); - AccountStorages::remove_prefix(address); - } - - /// Execute an EVM operation. - fn execute_evm( - source: H160, - value: U256, - gas_limit: u32, - gas_price: U256, - nonce: Option, - f: F, - ) -> Result> where - F: FnOnce(&mut StackExecutor>) -> (R, ExitReason), - { - ensure!(gas_price >= T::FeeCalculator::min_gas_price(), Error::::GasPriceTooLow); - - let vicinity = Vicinity { - gas_price, - origin: source, - }; - - let mut backend = Backend::::new(&vicinity); - let mut executor = StackExecutor::new_with_precompile( - &backend, - gas_limit as usize, - T::config(), - T::Precompiles::execute, - ); - - let total_fee = gas_price.checked_mul(U256::from(gas_limit)) - .ok_or(Error::::FeeOverflow)?; - let total_payment = value.checked_add(total_fee).ok_or(Error::::PaymentOverflow)?; - let source_account = Accounts::get(&source); - ensure!(source_account.balance >= total_payment, Error::::BalanceLow); - executor.withdraw(source, total_fee).map_err(|_| Error::::WithdrawFailed)?; - - if let Some(nonce) = nonce { - ensure!(source_account.nonce == nonce, Error::::InvalidNonce); - } - - let (retv, reason) = f(&mut executor); - - let ret = match reason { - ExitReason::Succeed(_) => Ok(retv), - ExitReason::Error(_) => Err(Error::::ExitReasonFailed), - ExitReason::Revert(_) => Err(Error::::ExitReasonRevert), - ExitReason::Fatal(_) => Err(Error::::ExitReasonFatal), - }; - - let actual_fee = executor.fee(gas_price); - executor.deposit(source, total_fee.saturating_sub(actual_fee)); - - let (values, logs) = executor.deconstruct(); - backend.apply(values, logs, true); - - ret - } + /// The account ID of the EVM module. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + MODULE_ID.into_account() + } + + /// Check whether an account is empty. + pub fn is_account_empty(address: &H160) -> bool { + let account = Accounts::get(address); + let code_len = AccountCodes::decode_len(address).unwrap_or(0); + + account.nonce == U256::zero() && account.balance == U256::zero() && code_len == 0 + } + + /// Remove an account if its empty. + pub fn remove_account_if_empty(address: &H160) { + if Self::is_account_empty(address) { + Self::remove_account(address) + } + } + + /// Remove an account from state. + fn remove_account(address: &H160) { + Accounts::remove(address); + AccountCodes::remove(address); + AccountStorages::remove_prefix(address); + } + + /// Execute an EVM operation. + fn execute_evm( + source: H160, + value: U256, + gas_limit: u32, + gas_price: U256, + nonce: Option, + f: F, + ) -> Result> + where + F: FnOnce(&mut StackExecutor>) -> (R, ExitReason), + { + ensure!( + gas_price >= T::FeeCalculator::min_gas_price(), + Error::::GasPriceTooLow + ); + + let vicinity = Vicinity { + gas_price, + origin: source, + }; + + let mut backend = Backend::::new(&vicinity); + let mut executor = StackExecutor::new_with_precompile( + &backend, + gas_limit as usize, + T::config(), + T::Precompiles::execute, + ); + + let total_fee = gas_price + .checked_mul(U256::from(gas_limit)) + .ok_or(Error::::FeeOverflow)?; + let total_payment = value + .checked_add(total_fee) + .ok_or(Error::::PaymentOverflow)?; + let source_account = Accounts::get(&source); + ensure!( + source_account.balance >= total_payment, + Error::::BalanceLow + ); + executor + .withdraw(source, total_fee) + .map_err(|_| Error::::WithdrawFailed)?; + + if let Some(nonce) = nonce { + ensure!(source_account.nonce == nonce, Error::::InvalidNonce); + } + + let (retv, reason) = f(&mut executor); + + let ret = match reason { + ExitReason::Succeed(_) => Ok(retv), + ExitReason::Error(_) => Err(Error::::ExitReasonFailed), + ExitReason::Revert(_) => Err(Error::::ExitReasonRevert), + ExitReason::Fatal(_) => Err(Error::::ExitReasonFatal), + }; + + let actual_fee = executor.fee(gas_price); + executor.deposit(source, total_fee.saturating_sub(actual_fee)); + + let (values, logs) = executor.deconstruct(); + backend.apply(values, logs, true); + + ret + } } diff --git a/frame/example-offchain-worker/src/lib.rs b/frame/example-offchain-worker/src/lib.rs index 29a4859c78..79dd9cbb3a 100644 --- a/frame/example-offchain-worker/src/lib.rs +++ b/frame/example-offchain-worker/src/lib.rs @@ -41,23 +41,23 @@ #![cfg_attr(not(feature = "std"), no_std)] use frame_support::{ - debug, - dispatch::DispatchResult, decl_module, decl_storage, decl_event, - traits::Get, - weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, + debug, decl_event, decl_module, decl_storage, + dispatch::DispatchResult, + traits::Get, + weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, }; -use frame_system::{self as system, ensure_signed, ensure_none, offchain}; +use frame_system::{self as system, ensure_none, ensure_signed, offchain}; +use lite_json::json::JsonValue; use sp_core::crypto::KeyTypeId; use sp_runtime::{ - offchain::{http, Duration, storage::StorageValueRef}, - traits::Zero, - transaction_validity::{ - InvalidTransaction, ValidTransaction, TransactionValidity, TransactionSource, - TransactionPriority, - }, + offchain::{http, storage::StorageValueRef, Duration}, + traits::Zero, + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, + }, }; use sp_std::vec::Vec; -use lite_json::json::JsonValue; #[cfg(test)] mod tests; @@ -75,181 +75,182 @@ pub const KEY_TYPE: KeyTypeId = KeyTypeId(*b"btc!"); /// We can use from supported crypto kinds (`sr25519`, `ed25519` and `ecdsa`) and augment /// the types with this pallet-specific identifier. pub mod crypto { - use super::KEY_TYPE; - use sp_runtime::app_crypto::{app_crypto, sr25519}; - app_crypto!(sr25519, KEY_TYPE); + use super::KEY_TYPE; + use sp_runtime::app_crypto::{app_crypto, sr25519}; + app_crypto!(sr25519, KEY_TYPE); } /// This pallet's configuration trait pub trait Trait: frame_system::Trait { - /// The type to sign and submit transactions. - type SubmitSignedTransaction: - offchain::SubmitSignedTransaction::Call>; - /// The type to submit unsigned transactions. - type SubmitUnsignedTransaction: - offchain::SubmitUnsignedTransaction::Call>; - - /// The overarching event type. - type Event: From> + Into<::Event>; - /// The overarching dispatch call type. - type Call: From>; - - // Configuration parameters - - /// A grace period after we send transaction. - /// - /// To avoid sending too many transactions, we only attempt to send one - /// every `GRACE_PERIOD` blocks. We use Local Storage to coordinate - /// sending between distinct runs of this offchain worker. - type GracePeriod: Get; - - /// Number of blocks of cooldown after unsigned transaction is included. - /// - /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` blocks. - type UnsignedInterval: Get; - - /// A configuration for base priority of unsigned transactions. - /// - /// This is exposed so that it can be tuned for particular runtime, when - /// multiple pallets send unsigned transactions. - type UnsignedPriority: Get; + /// The type to sign and submit transactions. + type SubmitSignedTransaction: offchain::SubmitSignedTransaction::Call>; + /// The type to submit unsigned transactions. + type SubmitUnsignedTransaction: offchain::SubmitUnsignedTransaction::Call>; + + /// The overarching event type. + type Event: From> + Into<::Event>; + /// The overarching dispatch call type. + type Call: From>; + + // Configuration parameters + + /// A grace period after we send transaction. + /// + /// To avoid sending too many transactions, we only attempt to send one + /// every `GRACE_PERIOD` blocks. We use Local Storage to coordinate + /// sending between distinct runs of this offchain worker. + type GracePeriod: Get; + + /// Number of blocks of cooldown after unsigned transaction is included. + /// + /// This ensures that we only accept unsigned transactions once, every `UnsignedInterval` blocks. + type UnsignedInterval: Get; + + /// A configuration for base priority of unsigned transactions. + /// + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + type UnsignedPriority: Get; } decl_storage! { - trait Store for Module as ExampleOffchainWorker { - /// A vector of recently submitted prices. - /// - /// This is used to calculate average price, should have bounded size. - Prices get(fn prices): Vec; - /// Defines the block when next unsigned transaction will be accepted. - /// - /// To prevent spam of unsigned (and unpayed!) transactions on the network, - /// we only allow one transaction every `T::UnsignedInterval` blocks. - /// This storage entry defines when new transaction is going to be accepted. - NextUnsignedAt get(fn next_unsigned_at): T::BlockNumber; - } + trait Store for Module as ExampleOffchainWorker { + /// A vector of recently submitted prices. + /// + /// This is used to calculate average price, should have bounded size. + Prices get(fn prices): Vec; + /// Defines the block when next unsigned transaction will be accepted. + /// + /// To prevent spam of unsigned (and unpayed!) transactions on the network, + /// we only allow one transaction every `T::UnsignedInterval` blocks. + /// This storage entry defines when new transaction is going to be accepted. + NextUnsignedAt get(fn next_unsigned_at): T::BlockNumber; + } } decl_event!( - /// Events generated by the module. - pub enum Event where AccountId = ::AccountId { - /// Event generated when new price is accepted to contribute to the average. - NewPrice(u32, AccountId), - } + /// Events generated by the module. + pub enum Event + where + AccountId = ::AccountId, + { + /// Event generated when new price is accepted to contribute to the average. + NewPrice(u32, AccountId), + } ); decl_module! { - /// A public part of the pallet. - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - /// Submit new price to the list. - /// - /// This method is a public function of the module and can be called from within - /// a transaction. It appends given `price` to current list of prices. - /// In our example the `offchain worker` will create, sign & submit a transaction that - /// calls this function passing the price. - /// - /// The transaction needs to be signed (see `ensure_signed`) check, so that the caller - /// pays a fee to execute it. - /// This makes sure that it's not easy (or rather cheap) to attack the chain by submitting - /// excesive transactions, but note that it doesn't ensure the price oracle is actually - /// working and receives (and provides) meaningful data. - /// This example is not focused on correctness of the oracle itself, but rather its - /// purpose is to showcase offchain worker capabilities. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn submit_price(origin, price: u32) -> DispatchResult { - // Retrieve sender of the transaction. - let who = ensure_signed(origin)?; - // Add the price to the on-chain list. - Self::add_price(who, price); - Ok(()) - } - - /// Submit new price to the list via unsigned transaction. - /// - /// Works exactly like the `submit_price` function, but since we allow sending the - /// transaction without a signature, and hence without paying any fees, - /// we need a way to make sure that only some transactions are accepted. - /// This function can be called only once every `T::UnsignedInterval` blocks. - /// Transactions that call that function are de-duplicated on the pool level - /// via `validate_unsigned` implementation and also are rendered invalid if - /// the function has already been called in current "session". - /// - /// It's important to specify `weight` for unsigned calls as well, because even though - /// they don't charge fees, we still don't want a single block to contain unlimited - /// number of such transactions. - /// - /// This example is not focused on correctness of the oracle itself, but rather its - /// purpose is to showcase offchain worker capabilities. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn submit_price_unsigned(origin, _block_number: T::BlockNumber, price: u32) - -> DispatchResult - { - // This ensures that the function can only be called via unsigned transaction. - ensure_none(origin)?; - // Add the price to the on-chain list, but mark it as coming from an empty address. - Self::add_price(Default::default(), price); - // now increment the block number at which we expect next unsigned transaction. - let current_block = >::block_number(); - >::put(current_block + T::UnsignedInterval::get()); - Ok(()) - } - - /// Offchain Worker entry point. - /// - /// By implementing `fn offchain_worker` within `decl_module!` you declare a new offchain - /// worker. - /// This function will be called when the node is fully synced and a new best block is - /// succesfuly imported. - /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might - /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), - /// so the code should be able to handle that. - /// You can use `Local Storage` API to coordinate runs of the worker. - fn offchain_worker(block_number: T::BlockNumber) { - // It's a good idea to add logs to your offchain workers. - // Using the `frame_support::debug` module you have access to the same API exposed by - // the `log` crate. - // Note that having logs compiled to WASM may cause the size of the blob to increase - // significantly. You can use `RuntimeDebug` custom derive to hide details of the types - // in WASM or use `debug::native` namespace to produce logs only when the worker is - // running natively. - debug::native::info!("Hello World from offchain workers!"); - - // Since off-chain workers are just part of the runtime code, they have direct access - // to the storage and other included pallets. - // - // We can easily import `frame_system` and retrieve a block hash of the parent block. - let parent_hash = >::block_hash(block_number - 1.into()); - debug::debug!("Current block: {:?} (parent hash: {:?})", block_number, parent_hash); - - // It's a good practice to keep `fn offchain_worker()` function minimal, and move most - // of the code to separate `impl` block. - // Here we call a helper function to calculate current average price. - // This function reads storage entries of the current state. - let average: Option = Self::average_price(); - debug::debug!("Current price: {:?}", average); - - // For this example we are going to send both signed and unsigned transactions - // depending on the block number. - // Usually it's enough to choose one or the other. - let should_send = Self::choose_transaction_type(block_number); - let res = match should_send { - TransactionType::Signed => Self::fetch_price_and_send_signed(), - TransactionType::Unsigned => Self::fetch_price_and_send_unsigned(block_number), - TransactionType::None => Ok(()), - }; - if let Err(e) = res { - debug::error!("Error: {}", e); - } - } - } + /// A public part of the pallet. + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + /// Submit new price to the list. + /// + /// This method is a public function of the module and can be called from within + /// a transaction. It appends given `price` to current list of prices. + /// In our example the `offchain worker` will create, sign & submit a transaction that + /// calls this function passing the price. + /// + /// The transaction needs to be signed (see `ensure_signed`) check, so that the caller + /// pays a fee to execute it. + /// This makes sure that it's not easy (or rather cheap) to attack the chain by submitting + /// excesive transactions, but note that it doesn't ensure the price oracle is actually + /// working and receives (and provides) meaningful data. + /// This example is not focused on correctness of the oracle itself, but rather its + /// purpose is to showcase offchain worker capabilities. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn submit_price(origin, price: u32) -> DispatchResult { + // Retrieve sender of the transaction. + let who = ensure_signed(origin)?; + // Add the price to the on-chain list. + Self::add_price(who, price); + Ok(()) + } + + /// Submit new price to the list via unsigned transaction. + /// + /// Works exactly like the `submit_price` function, but since we allow sending the + /// transaction without a signature, and hence without paying any fees, + /// we need a way to make sure that only some transactions are accepted. + /// This function can be called only once every `T::UnsignedInterval` blocks. + /// Transactions that call that function are de-duplicated on the pool level + /// via `validate_unsigned` implementation and also are rendered invalid if + /// the function has already been called in current "session". + /// + /// It's important to specify `weight` for unsigned calls as well, because even though + /// they don't charge fees, we still don't want a single block to contain unlimited + /// number of such transactions. + /// + /// This example is not focused on correctness of the oracle itself, but rather its + /// purpose is to showcase offchain worker capabilities. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn submit_price_unsigned(origin, _block_number: T::BlockNumber, price: u32) + -> DispatchResult + { + // This ensures that the function can only be called via unsigned transaction. + ensure_none(origin)?; + // Add the price to the on-chain list, but mark it as coming from an empty address. + Self::add_price(Default::default(), price); + // now increment the block number at which we expect next unsigned transaction. + let current_block = >::block_number(); + >::put(current_block + T::UnsignedInterval::get()); + Ok(()) + } + + /// Offchain Worker entry point. + /// + /// By implementing `fn offchain_worker` within `decl_module!` you declare a new offchain + /// worker. + /// This function will be called when the node is fully synced and a new best block is + /// succesfuly imported. + /// Note that it's not guaranteed for offchain workers to run on EVERY block, there might + /// be cases where some blocks are skipped, or for some the worker runs twice (re-orgs), + /// so the code should be able to handle that. + /// You can use `Local Storage` API to coordinate runs of the worker. + fn offchain_worker(block_number: T::BlockNumber) { + // It's a good idea to add logs to your offchain workers. + // Using the `frame_support::debug` module you have access to the same API exposed by + // the `log` crate. + // Note that having logs compiled to WASM may cause the size of the blob to increase + // significantly. You can use `RuntimeDebug` custom derive to hide details of the types + // in WASM or use `debug::native` namespace to produce logs only when the worker is + // running natively. + debug::native::info!("Hello World from offchain workers!"); + + // Since off-chain workers are just part of the runtime code, they have direct access + // to the storage and other included pallets. + // + // We can easily import `frame_system` and retrieve a block hash of the parent block. + let parent_hash = >::block_hash(block_number - 1.into()); + debug::debug!("Current block: {:?} (parent hash: {:?})", block_number, parent_hash); + + // It's a good practice to keep `fn offchain_worker()` function minimal, and move most + // of the code to separate `impl` block. + // Here we call a helper function to calculate current average price. + // This function reads storage entries of the current state. + let average: Option = Self::average_price(); + debug::debug!("Current price: {:?}", average); + + // For this example we are going to send both signed and unsigned transactions + // depending on the block number. + // Usually it's enough to choose one or the other. + let should_send = Self::choose_transaction_type(block_number); + let res = match should_send { + TransactionType::Signed => Self::fetch_price_and_send_signed(), + TransactionType::Unsigned => Self::fetch_price_and_send_unsigned(block_number), + TransactionType::None => Ok(()), + }; + if let Err(e) = res { + debug::error!("Error: {}", e); + } + } + } } enum TransactionType { - Signed, - Unsigned, - None, + Signed, + Unsigned, + None, } /// Most of the functions are moved outside of the `decl_module!` macro. @@ -257,322 +258,323 @@ enum TransactionType { /// This greatly helps with error messages, as the ones inside the macro /// can sometimes be hard to debug. impl Module { - /// Chooses which transaction type to send. - /// - /// This function serves mostly to showcase `StorageValue` helper - /// and local storage usage. - /// - /// Returns a type of transaction that should be produced in current run. - fn choose_transaction_type(block_number: T::BlockNumber) -> TransactionType { - /// A friendlier name for the error that is going to be returned in case we are in the grace - /// period. - const RECENTLY_SENT: () = (); - - // Start off by creating a reference to Local Storage value. - // Since the local storage is common for all offchain workers, it's a good practice - // to prepend your entry with the module name. - let val = StorageValueRef::persistent(b"example_ocw::last_send"); - // The Local Storage is persisted and shared between runs of the offchain workers, - // and offchain workers may run concurrently. We can use the `mutate` function, to - // write a storage entry in an atomic fashion. Under the hood it uses `compare_and_set` - // low-level method of local storage API, which means that only one worker - // will be able to "acquire a lock" and send a transaction if multiple workers - // happen to be executed concurrently. - let res = val.mutate(|last_send: Option>| { - // We match on the value decoded from the storage. The first `Option` - // indicates if the value was present in the storage at all, - // the second (inner) `Option` indicates if the value was succesfuly - // decoded to expected type (`T::BlockNumber` in our case). - match last_send { - // If we already have a value in storage and the block number is recent enough - // we avoid sending another transaction at this time. - Some(Some(block)) if block_number < block + T::GracePeriod::get() => { - Err(RECENTLY_SENT) - }, - // In every other case we attempt to acquire the lock and send a transaction. - _ => Ok(block_number) - } - }); - - // The result of `mutate` call will give us a nested `Result` type. - // The first one matches the return of the closure passed to `mutate`, i.e. - // if we return `Err` from the closure, we get an `Err` here. - // In case we return `Ok`, here we will have another (inner) `Result` that indicates - // if the value has been set to the storage correctly - i.e. if it wasn't - // written to in the meantime. - match res { - // The value has been set correctly, which means we can safely send a transaction now. - Ok(Ok(block_number)) => { - // Depending if the block is even or odd we will send a `Signed` or `Unsigned` - // transaction. - // Note that this logic doesn't really guarantee that the transactions will be sent - // in an alternating fashion (i.e. fairly distributed). Depending on the execution - // order and lock acquisition, we may end up for instance sending two `Signed` - // transactions in a row. If a strict order is desired, it's better to use - // the storage entry for that. (for instance store both block number and a flag - // indicating the type of next transaction to send). - let send_signed = block_number % 2.into() == Zero::zero(); - if send_signed { - TransactionType::Signed - } else { - TransactionType::Unsigned - } - }, - // We are in the grace period, we should not send a transaction this time. - Err(RECENTLY_SENT) => TransactionType::None, - // We wanted to send a transaction, but failed to write the block number (acquire a - // lock). This indicates that another offchain worker that was running concurrently - // most likely executed the same logic and succeeded at writing to storage. - // Thus we don't really want to send the transaction, knowing that the other run - // already did. - Ok(Err(_)) => TransactionType::None, - } - } - - /// A helper function to fetch the price and send signed transaction. - fn fetch_price_and_send_signed() -> Result<(), &'static str> { - use system::offchain::SubmitSignedTransaction; - // Firstly we check if there are any accounts in the local keystore that are capable of - // signing the transaction. - // If not it doesn't even make sense to make external HTTP requests, since we won't be able - // to put the results back on-chain. - if !T::SubmitSignedTransaction::can_sign() { - return Err( - "No local accounts available. Consider adding one via `author_insertKey` RPC." - )? - } - - // Make an external HTTP request to fetch the current price. - // Note this call will block until response is received. - let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; - - // Received price is wrapped into a call to `submit_price` public function of this pallet. - // This means that the transaction, when executed, will simply call that function passing - // `price` as an argument. - let call = Call::submit_price(price); - - // Using `SubmitSignedTransaction` associated type we create and submit a transaction - // representing the call, we've just created. - // Submit signed will return a vector of results for all accounts that were found in the - // local keystore with expected `KEY_TYPE`. - let results = T::SubmitSignedTransaction::submit_signed(call); - for (acc, res) in &results { - match res { - Ok(()) => debug::info!("[{:?}] Submitted price of {} cents", acc, price), - Err(e) => debug::error!("[{:?}] Failed to submit transaction: {:?}", acc, e), - } - } - - Ok(()) - } - - /// A helper function to fetch the price and send unsigned transaction. - fn fetch_price_and_send_unsigned(block_number: T::BlockNumber) -> Result<(), &'static str> { - use system::offchain::SubmitUnsignedTransaction; - // Make sure we don't fetch the price if unsigned transaction is going to be rejected - // anyway. - let next_unsigned_at = >::get(); - if next_unsigned_at > block_number { - return Err("Too early to send unsigned transaction") - } - - // Make an external HTTP request to fetch the current price. - // Note this call will block until response is received. - let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; - - // Received price is wrapped into a call to `submit_price_unsigned` public function of this - // pallet. This means that the transaction, when executed, will simply call that function - // passing `price` as an argument. - let call = Call::submit_price_unsigned(block_number, price); - - // Now let's create an unsigned transaction out of this call and submit it to the pool. - // By default unsigned transactions are disallowed, so we need to whitelist this case - // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefuly - // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam - // attack vectors. See validation logic docs for more details. - T::SubmitUnsignedTransaction::submit_unsigned(call) - .map_err(|()| "Unable to submit unsigned transaction.".into()) - - } - - /// Fetch current price and return the result in cents. - fn fetch_price() -> Result { - // We want to keep the offchain worker execution time reasonable, so we set a hard-coded - // deadline to 2s to complete the external call. - // You can also wait idefinitely for the response, however you may still get a timeout - // coming from the host machine. - let deadline = sp_io::offchain::timestamp().add(Duration::from_millis(2_000)); - // Initiate an external HTTP GET request. - // This is using high-level wrappers from `sp_runtime`, for the low-level calls that - // you can find in `sp_io`. The API is trying to be similar to `reqwest`, but - // since we are running in a custom WASM execution environment we can't simply - // import the library here. - let request = http::Request::get( - "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD" - ); - // We set the deadline for sending of the request, note that awaiting response can - // have a separate deadline. Next we send the request, before that it's also possible - // to alter request headers or stream body content in case of non-GET requests. - let pending = request - .deadline(deadline) - .send() - .map_err(|_| http::Error::IoError)?; - - // The request is already being processed by the host, we are free to do anything - // else in the worker (we can send multiple concurrent requests too). - // At some point however we probably want to check the response though, - // so we can block current thread and wait for it to finish. - // Note that since the request is being driven by the host, we don't have to wait - // for the request to have it complete, we will just not read the response. - let response = pending.try_wait(deadline) - .map_err(|_| http::Error::DeadlineReached)??; - // Let's check the status code before we proceed to reading the response. - if response.code != 200 { - debug::warn!("Unexpected status code: {}", response.code); - return Err(http::Error::Unknown); - } - - // Next we want to fully read the response body and collect it to a vector of bytes. - // Note that the return object allows you to read the body in chunks as well - // with a way to control the deadline. - let body = response.body().collect::>(); - - // Create a str slice from the body. - let body_str = sp_std::str::from_utf8(&body).map_err(|_| { - debug::warn!("No UTF8 body"); - http::Error::Unknown - })?; - - let price = match Self::parse_price(body_str) { - Some(price) => Ok(price), - None => { - debug::warn!("Unable to extract price from the response: {:?}", body_str); - Err(http::Error::Unknown) - } - }?; - - debug::warn!("Got price: {} cents", price); - - Ok(price) - } - - /// Parse the price from the given JSON string using `lite-json`. - /// - /// Returns `None` when parsing failed or `Some(price in cents)` when parsing is successful. - fn parse_price(price_str: &str) -> Option { - let val = lite_json::parse_json(price_str); - let price = val.ok().and_then(|v| match v { - JsonValue::Object(obj) => { - let mut chars = "USD".chars(); - obj.into_iter() - .find(|(k, _)| k.iter().all(|k| Some(*k) == chars.next())) - .and_then(|v| match v.1 { - JsonValue::Number(number) => Some(number), - _ => None, - }) - }, - _ => None - })?; - - let exp = price.fraction_length.checked_sub(2).unwrap_or(0); - Some(price.integer as u32 * 100 + (price.fraction / 10_u64.pow(exp)) as u32) - } - - /// Add new price to the list. - fn add_price(who: T::AccountId, price: u32) { - debug::info!("Adding to the average: {}", price); - Prices::mutate(|prices| { - const MAX_LEN: usize = 64; - - if prices.len() < MAX_LEN { - prices.push(price); - } else { - prices[price as usize % MAX_LEN] = price; - } - }); - - let average = Self::average_price() - .expect("The average is not empty, because it was just mutated; qed"); - debug::info!("Current average price is: {}", average); - // here we are raising the NewPrice event - Self::deposit_event(RawEvent::NewPrice(price, who)); - } - - /// Calculate current average price. - fn average_price() -> Option { - let prices = Prices::get(); - if prices.is_empty() { - None - } else { - Some(prices.iter().fold(0_u32, |a, b| a.saturating_add(*b)) / prices.len() as u32) - } - } + /// Chooses which transaction type to send. + /// + /// This function serves mostly to showcase `StorageValue` helper + /// and local storage usage. + /// + /// Returns a type of transaction that should be produced in current run. + fn choose_transaction_type(block_number: T::BlockNumber) -> TransactionType { + /// A friendlier name for the error that is going to be returned in case we are in the grace + /// period. + const RECENTLY_SENT: () = (); + + // Start off by creating a reference to Local Storage value. + // Since the local storage is common for all offchain workers, it's a good practice + // to prepend your entry with the module name. + let val = StorageValueRef::persistent(b"example_ocw::last_send"); + // The Local Storage is persisted and shared between runs of the offchain workers, + // and offchain workers may run concurrently. We can use the `mutate` function, to + // write a storage entry in an atomic fashion. Under the hood it uses `compare_and_set` + // low-level method of local storage API, which means that only one worker + // will be able to "acquire a lock" and send a transaction if multiple workers + // happen to be executed concurrently. + let res = val.mutate(|last_send: Option>| { + // We match on the value decoded from the storage. The first `Option` + // indicates if the value was present in the storage at all, + // the second (inner) `Option` indicates if the value was succesfuly + // decoded to expected type (`T::BlockNumber` in our case). + match last_send { + // If we already have a value in storage and the block number is recent enough + // we avoid sending another transaction at this time. + Some(Some(block)) if block_number < block + T::GracePeriod::get() => { + Err(RECENTLY_SENT) + } + // In every other case we attempt to acquire the lock and send a transaction. + _ => Ok(block_number), + } + }); + + // The result of `mutate` call will give us a nested `Result` type. + // The first one matches the return of the closure passed to `mutate`, i.e. + // if we return `Err` from the closure, we get an `Err` here. + // In case we return `Ok`, here we will have another (inner) `Result` that indicates + // if the value has been set to the storage correctly - i.e. if it wasn't + // written to in the meantime. + match res { + // The value has been set correctly, which means we can safely send a transaction now. + Ok(Ok(block_number)) => { + // Depending if the block is even or odd we will send a `Signed` or `Unsigned` + // transaction. + // Note that this logic doesn't really guarantee that the transactions will be sent + // in an alternating fashion (i.e. fairly distributed). Depending on the execution + // order and lock acquisition, we may end up for instance sending two `Signed` + // transactions in a row. If a strict order is desired, it's better to use + // the storage entry for that. (for instance store both block number and a flag + // indicating the type of next transaction to send). + let send_signed = block_number % 2.into() == Zero::zero(); + if send_signed { + TransactionType::Signed + } else { + TransactionType::Unsigned + } + } + // We are in the grace period, we should not send a transaction this time. + Err(RECENTLY_SENT) => TransactionType::None, + // We wanted to send a transaction, but failed to write the block number (acquire a + // lock). This indicates that another offchain worker that was running concurrently + // most likely executed the same logic and succeeded at writing to storage. + // Thus we don't really want to send the transaction, knowing that the other run + // already did. + Ok(Err(_)) => TransactionType::None, + } + } + + /// A helper function to fetch the price and send signed transaction. + fn fetch_price_and_send_signed() -> Result<(), &'static str> { + use system::offchain::SubmitSignedTransaction; + // Firstly we check if there are any accounts in the local keystore that are capable of + // signing the transaction. + // If not it doesn't even make sense to make external HTTP requests, since we won't be able + // to put the results back on-chain. + if !T::SubmitSignedTransaction::can_sign() { + return Err( + "No local accounts available. Consider adding one via `author_insertKey` RPC.", + )?; + } + + // Make an external HTTP request to fetch the current price. + // Note this call will block until response is received. + let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; + + // Received price is wrapped into a call to `submit_price` public function of this pallet. + // This means that the transaction, when executed, will simply call that function passing + // `price` as an argument. + let call = Call::submit_price(price); + + // Using `SubmitSignedTransaction` associated type we create and submit a transaction + // representing the call, we've just created. + // Submit signed will return a vector of results for all accounts that were found in the + // local keystore with expected `KEY_TYPE`. + let results = T::SubmitSignedTransaction::submit_signed(call); + for (acc, res) in &results { + match res { + Ok(()) => debug::info!("[{:?}] Submitted price of {} cents", acc, price), + Err(e) => debug::error!("[{:?}] Failed to submit transaction: {:?}", acc, e), + } + } + + Ok(()) + } + + /// A helper function to fetch the price and send unsigned transaction. + fn fetch_price_and_send_unsigned(block_number: T::BlockNumber) -> Result<(), &'static str> { + use system::offchain::SubmitUnsignedTransaction; + // Make sure we don't fetch the price if unsigned transaction is going to be rejected + // anyway. + let next_unsigned_at = >::get(); + if next_unsigned_at > block_number { + return Err("Too early to send unsigned transaction"); + } + + // Make an external HTTP request to fetch the current price. + // Note this call will block until response is received. + let price = Self::fetch_price().map_err(|_| "Failed to fetch price")?; + + // Received price is wrapped into a call to `submit_price_unsigned` public function of this + // pallet. This means that the transaction, when executed, will simply call that function + // passing `price` as an argument. + let call = Call::submit_price_unsigned(block_number, price); + + // Now let's create an unsigned transaction out of this call and submit it to the pool. + // By default unsigned transactions are disallowed, so we need to whitelist this case + // by writing `UnsignedValidator`. Note that it's EXTREMELY important to carefuly + // implement unsigned validation logic, as any mistakes can lead to opening DoS or spam + // attack vectors. See validation logic docs for more details. + T::SubmitUnsignedTransaction::submit_unsigned(call) + .map_err(|()| "Unable to submit unsigned transaction.".into()) + } + + /// Fetch current price and return the result in cents. + fn fetch_price() -> Result { + // We want to keep the offchain worker execution time reasonable, so we set a hard-coded + // deadline to 2s to complete the external call. + // You can also wait idefinitely for the response, however you may still get a timeout + // coming from the host machine. + let deadline = sp_io::offchain::timestamp().add(Duration::from_millis(2_000)); + // Initiate an external HTTP GET request. + // This is using high-level wrappers from `sp_runtime`, for the low-level calls that + // you can find in `sp_io`. The API is trying to be similar to `reqwest`, but + // since we are running in a custom WASM execution environment we can't simply + // import the library here. + let request = + http::Request::get("https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD"); + // We set the deadline for sending of the request, note that awaiting response can + // have a separate deadline. Next we send the request, before that it's also possible + // to alter request headers or stream body content in case of non-GET requests. + let pending = request + .deadline(deadline) + .send() + .map_err(|_| http::Error::IoError)?; + + // The request is already being processed by the host, we are free to do anything + // else in the worker (we can send multiple concurrent requests too). + // At some point however we probably want to check the response though, + // so we can block current thread and wait for it to finish. + // Note that since the request is being driven by the host, we don't have to wait + // for the request to have it complete, we will just not read the response. + let response = pending + .try_wait(deadline) + .map_err(|_| http::Error::DeadlineReached)??; + // Let's check the status code before we proceed to reading the response. + if response.code != 200 { + debug::warn!("Unexpected status code: {}", response.code); + return Err(http::Error::Unknown); + } + + // Next we want to fully read the response body and collect it to a vector of bytes. + // Note that the return object allows you to read the body in chunks as well + // with a way to control the deadline. + let body = response.body().collect::>(); + + // Create a str slice from the body. + let body_str = sp_std::str::from_utf8(&body).map_err(|_| { + debug::warn!("No UTF8 body"); + http::Error::Unknown + })?; + + let price = match Self::parse_price(body_str) { + Some(price) => Ok(price), + None => { + debug::warn!("Unable to extract price from the response: {:?}", body_str); + Err(http::Error::Unknown) + } + }?; + + debug::warn!("Got price: {} cents", price); + + Ok(price) + } + + /// Parse the price from the given JSON string using `lite-json`. + /// + /// Returns `None` when parsing failed or `Some(price in cents)` when parsing is successful. + fn parse_price(price_str: &str) -> Option { + let val = lite_json::parse_json(price_str); + let price = val.ok().and_then(|v| match v { + JsonValue::Object(obj) => { + let mut chars = "USD".chars(); + obj.into_iter() + .find(|(k, _)| k.iter().all(|k| Some(*k) == chars.next())) + .and_then(|v| match v.1 { + JsonValue::Number(number) => Some(number), + _ => None, + }) + } + _ => None, + })?; + + let exp = price.fraction_length.checked_sub(2).unwrap_or(0); + Some(price.integer as u32 * 100 + (price.fraction / 10_u64.pow(exp)) as u32) + } + + /// Add new price to the list. + fn add_price(who: T::AccountId, price: u32) { + debug::info!("Adding to the average: {}", price); + Prices::mutate(|prices| { + const MAX_LEN: usize = 64; + + if prices.len() < MAX_LEN { + prices.push(price); + } else { + prices[price as usize % MAX_LEN] = price; + } + }); + + let average = Self::average_price() + .expect("The average is not empty, because it was just mutated; qed"); + debug::info!("Current average price is: {}", average); + // here we are raising the NewPrice event + Self::deposit_event(RawEvent::NewPrice(price, who)); + } + + /// Calculate current average price. + fn average_price() -> Option { + let prices = Prices::get(); + if prices.is_empty() { + None + } else { + Some(prices.iter().fold(0_u32, |a, b| a.saturating_add(*b)) / prices.len() as u32) + } + } } #[allow(deprecated)] // ValidateUnsigned impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - - /// Validate unsigned call to this module. - /// - /// By default unsigned transactions are disallowed, but implementing the validator - /// here we make sure that some particular calls (the ones produced by offchain worker) - /// are being whitelisted and marked as valid. - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - // Firstly let's check that we call the right function. - if let Call::submit_price_unsigned(block_number, new_price) = call { - // Now let's check if the transaction has any chance to succeed. - let next_unsigned_at = >::get(); - if &next_unsigned_at > block_number { - return InvalidTransaction::Stale.into(); - } - // Let's make sure to reject transactions from the future. - let current_block = >::block_number(); - if ¤t_block < block_number { - return InvalidTransaction::Future.into(); - } - - // We prioritize transactions that are more far away from current average. - // - // Note this doesn't make much sense when building an actual oracle, but this example - // is here mostly to show off offchain workers capabilities, not about building an - // oracle. - let avg_price = Self::average_price() - .map(|price| if &price > new_price { price - new_price } else { new_price - price }) - .unwrap_or(0); - - ValidTransaction::with_tag_prefix("ExampleOffchainWorker") - // We set base priority to 2**20 to make sure it's included before any other - // transactions in the pool. Next we tweak the priority depending on how much - // it differs from the current average. (the more it differs the more priority it - // has). - .priority(T::UnsignedPriority::get().saturating_add(avg_price as _)) - // This transaction does not require anything else to go before into the pool. - // In theory we could require `previous_unsigned_at` transaction to go first, - // but it's not necessary in our case. - //.and_requires() - - // We set the `provides` tag to be the same as `next_unsigned_at`. This makes - // sure only one transaction produced after `next_unsigned_at` will ever - // get to the transaction pool and will end up in the block. - // We can still have multiple transactions compete for the same "spot", - // and the one with higher priority will replace other one in the pool. - .and_provides(next_unsigned_at) - // The transaction is only valid for next 5 blocks. After that it's - // going to be revalidated by the pool. - .longevity(5) - // It's fine to propagate that transaction to other peers, which means it can be - // created even by nodes that don't produce blocks. - // Note that sometimes it's better to keep it for yourself (if you are the block - // producer), since for instance in some schemes others may copy your solution and - // claim a reward. - .propagate(true) - .build() - } else { - InvalidTransaction::Call.into() - } - } + type Call = Call; + + /// Validate unsigned call to this module. + /// + /// By default unsigned transactions are disallowed, but implementing the validator + /// here we make sure that some particular calls (the ones produced by offchain worker) + /// are being whitelisted and marked as valid. + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + // Firstly let's check that we call the right function. + if let Call::submit_price_unsigned(block_number, new_price) = call { + // Now let's check if the transaction has any chance to succeed. + let next_unsigned_at = >::get(); + if &next_unsigned_at > block_number { + return InvalidTransaction::Stale.into(); + } + // Let's make sure to reject transactions from the future. + let current_block = >::block_number(); + if ¤t_block < block_number { + return InvalidTransaction::Future.into(); + } + + // We prioritize transactions that are more far away from current average. + // + // Note this doesn't make much sense when building an actual oracle, but this example + // is here mostly to show off offchain workers capabilities, not about building an + // oracle. + let avg_price = Self::average_price() + .map(|price| { + if &price > new_price { + price - new_price + } else { + new_price - price + } + }) + .unwrap_or(0); + + ValidTransaction::with_tag_prefix("ExampleOffchainWorker") + // We set base priority to 2**20 to make sure it's included before any other + // transactions in the pool. Next we tweak the priority depending on how much + // it differs from the current average. (the more it differs the more priority it + // has). + .priority(T::UnsignedPriority::get().saturating_add(avg_price as _)) + // This transaction does not require anything else to go before into the pool. + // In theory we could require `previous_unsigned_at` transaction to go first, + // but it's not necessary in our case. + //.and_requires() + // We set the `provides` tag to be the same as `next_unsigned_at`. This makes + // sure only one transaction produced after `next_unsigned_at` will ever + // get to the transaction pool and will end up in the block. + // We can still have multiple transactions compete for the same "spot", + // and the one with higher priority will replace other one in the pool. + .and_provides(next_unsigned_at) + // The transaction is only valid for next 5 blocks. After that it's + // going to be revalidated by the pool. + .longevity(5) + // It's fine to propagate that transaction to other peers, which means it can be + // created even by nodes that don't produce blocks. + // Note that sometimes it's better to keep it for yourself (if you are the block + // producer), since for instance in some schemes others may copy your solution and + // claim a reward. + .propagate(true) + .build() + } else { + InvalidTransaction::Call.into() + } + } } diff --git a/frame/example-offchain-worker/src/tests.rs b/frame/example-offchain-worker/src/tests.rs index 279de7ef4a..22f179b2f6 100644 --- a/frame/example-offchain-worker/src/tests.rs +++ b/frame/example-offchain-worker/src/tests.rs @@ -17,24 +17,21 @@ use crate::*; use codec::Decode; -use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, - weights::Weight, -}; +use frame_support::{assert_ok, impl_outer_origin, parameter_types, weights::Weight}; use sp_core::{ - H256, - offchain::{OffchainExt, TransactionPoolExt, testing}, - testing::KeyStore, - traits::KeystoreExt, + offchain::{testing, OffchainExt, TransactionPoolExt}, + testing::KeyStore, + traits::KeystoreExt, + H256, }; use sp_runtime::{ - Perbill, RuntimeAppPublic, - testing::{Header, TestXt}, - traits::{BlakeTwo256, IdentityLookup, Extrinsic as ExtrinsicsT}, + testing::{Header, TestXt}, + traits::{BlakeTwo256, Extrinsic as ExtrinsicsT, IdentityLookup}, + Perbill, RuntimeAppPublic, }; impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } // For testing the module, we construct most of a mock runtime. This means @@ -43,178 +40,190 @@ impl_outer_origin! { #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Call = (); - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = sp_core::sr25519::Public; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = sp_core::sr25519::Public; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } type Extrinsic = TestXt, ()>; -type SubmitTransaction = frame_system::offchain::TransactionSubmitter< - crypto::Public, - Test, - Extrinsic ->; +type SubmitTransaction = + frame_system::offchain::TransactionSubmitter; impl frame_system::offchain::CreateTransaction for Test { - type Public = sp_core::sr25519::Public; - type Signature = sp_core::sr25519::Signature; - - fn create_transaction>( - call: ::Call, - _public: Self::Public, - _account: ::AccountId, - nonce: ::Index, - ) -> Option<(::Call, ::SignaturePayload)> { - Some((call, (nonce, ()))) - } + type Public = sp_core::sr25519::Public; + type Signature = sp_core::sr25519::Signature; + + fn create_transaction>( + call: ::Call, + _public: Self::Public, + _account: ::AccountId, + nonce: ::Index, + ) -> Option<( + ::Call, + ::SignaturePayload, + )> { + Some((call, (nonce, ()))) + } } parameter_types! { - pub const GracePeriod: u64 = 5; - pub const UnsignedInterval: u64 = 128; - pub const UnsignedPriority: u64 = 1 << 20; + pub const GracePeriod: u64 = 5; + pub const UnsignedInterval: u64 = 128; + pub const UnsignedPriority: u64 = 1 << 20; } impl Trait for Test { - type Event = (); - type Call = Call; - type SubmitSignedTransaction = SubmitTransaction; - type SubmitUnsignedTransaction = SubmitTransaction; - type GracePeriod = GracePeriod; - type UnsignedInterval = UnsignedInterval; - type UnsignedPriority = UnsignedPriority; + type Event = (); + type Call = Call; + type SubmitSignedTransaction = SubmitTransaction; + type SubmitUnsignedTransaction = SubmitTransaction; + type GracePeriod = GracePeriod; + type UnsignedInterval = UnsignedInterval; + type UnsignedPriority = UnsignedPriority; } type Example = Module; #[test] fn it_aggregates_the_price() { - sp_io::TestExternalities::default().execute_with(|| { - assert_eq!(Example::average_price(), None); - - assert_ok!(Example::submit_price(Origin::signed(Default::default()), 27)); - assert_eq!(Example::average_price(), Some(27)); - - assert_ok!(Example::submit_price(Origin::signed(Default::default()), 43)); - assert_eq!(Example::average_price(), Some(35)); - }); + sp_io::TestExternalities::default().execute_with(|| { + assert_eq!(Example::average_price(), None); + + assert_ok!(Example::submit_price( + Origin::signed(Default::default()), + 27 + )); + assert_eq!(Example::average_price(), Some(27)); + + assert_ok!(Example::submit_price( + Origin::signed(Default::default()), + 43 + )); + assert_eq!(Example::average_price(), Some(35)); + }); } #[test] fn should_make_http_call_and_parse_result() { - let (offchain, state) = testing::TestOffchainExt::new(); - let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); - - price_oracle_response(&mut state.write()); - - t.execute_with(|| { - // when - let price = Example::fetch_price().unwrap(); - // then - assert_eq!(price, 15523); - }); + let (offchain, state) = testing::TestOffchainExt::new(); + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + + price_oracle_response(&mut state.write()); + + t.execute_with(|| { + // when + let price = Example::fetch_price().unwrap(); + // then + assert_eq!(price, 15523); + }); } #[test] fn should_submit_signed_transaction_on_chain() { - const PHRASE: &str = "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; - - let (offchain, offchain_state) = testing::TestOffchainExt::new(); - let (pool, pool_state) = testing::TestTransactionPoolExt::new(); - let keystore = KeyStore::new(); - keystore.write().sr25519_generate_new( - crate::crypto::Public::ID, - Some(&format!("{}/hunter1", PHRASE)) - ).unwrap(); - - - let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); - t.register_extension(TransactionPoolExt::new(pool)); - t.register_extension(KeystoreExt(keystore)); - - price_oracle_response(&mut offchain_state.write()); - - t.execute_with(|| { - // when - Example::fetch_price_and_send_signed().unwrap(); - // then - let tx = pool_state.write().transactions.pop().unwrap(); - assert!(pool_state.read().transactions.is_empty()); - let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature.unwrap().0, 0); - assert_eq!(tx.call, Call::submit_price(15523)); - }); + const PHRASE: &str = + "news slush supreme milk chapter athlete soap sausage put clutch what kitten"; + + let (offchain, offchain_state) = testing::TestOffchainExt::new(); + let (pool, pool_state) = testing::TestTransactionPoolExt::new(); + let keystore = KeyStore::new(); + keystore + .write() + .sr25519_generate_new( + crate::crypto::Public::ID, + Some(&format!("{}/hunter1", PHRASE)), + ) + .unwrap(); + + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + t.register_extension(TransactionPoolExt::new(pool)); + t.register_extension(KeystoreExt(keystore)); + + price_oracle_response(&mut offchain_state.write()); + + t.execute_with(|| { + // when + Example::fetch_price_and_send_signed().unwrap(); + // then + let tx = pool_state.write().transactions.pop().unwrap(); + assert!(pool_state.read().transactions.is_empty()); + let tx = Extrinsic::decode(&mut &*tx).unwrap(); + assert_eq!(tx.signature.unwrap().0, 0); + assert_eq!(tx.call, Call::submit_price(15523)); + }); } #[test] fn should_submit_unsigned_transaction_on_chain() { - let (offchain, offchain_state) = testing::TestOffchainExt::new(); - let (pool, pool_state) = testing::TestTransactionPoolExt::new(); - let mut t = sp_io::TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); - t.register_extension(TransactionPoolExt::new(pool)); - - price_oracle_response(&mut offchain_state.write()); - - t.execute_with(|| { - // when - Example::fetch_price_and_send_unsigned(1).unwrap(); - // then - let tx = pool_state.write().transactions.pop().unwrap(); - assert!(pool_state.read().transactions.is_empty()); - let tx = Extrinsic::decode(&mut &*tx).unwrap(); - assert_eq!(tx.signature, None); - assert_eq!(tx.call, Call::submit_price_unsigned(1, 15523)); - }); + let (offchain, offchain_state) = testing::TestOffchainExt::new(); + let (pool, pool_state) = testing::TestTransactionPoolExt::new(); + let mut t = sp_io::TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + t.register_extension(TransactionPoolExt::new(pool)); + + price_oracle_response(&mut offchain_state.write()); + + t.execute_with(|| { + // when + Example::fetch_price_and_send_unsigned(1).unwrap(); + // then + let tx = pool_state.write().transactions.pop().unwrap(); + assert!(pool_state.read().transactions.is_empty()); + let tx = Extrinsic::decode(&mut &*tx).unwrap(); + assert_eq!(tx.signature, None); + assert_eq!(tx.call, Call::submit_price_unsigned(1, 15523)); + }); } fn price_oracle_response(state: &mut testing::OffchainState) { - state.expect_request(0, testing::PendingRequest { - method: "GET".into(), - uri: "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD".into(), - response: Some(br#"{"USD": 155.23}"#.to_vec()), - sent: true, - ..Default::default() - }); + state.expect_request( + 0, + testing::PendingRequest { + method: "GET".into(), + uri: "https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD".into(), + response: Some(br#"{"USD": 155.23}"#.to_vec()), + sent: true, + ..Default::default() + }, + ); } #[test] fn parse_price_works() { - let test_data = vec![ - ("{\"USD\":6536.92}", Some(653692)), - ("{\"USD\":65.92}", Some(6592)), - ("{\"USD\":6536.924565}", Some(653692)), - ("{\"USD\":6536}", Some(653600)), - ("{\"USD2\":6536}", None), - ("{\"USD\":\"6432\"}", None), - ]; - - for (json, expected) in test_data { - assert_eq!(expected, Example::parse_price(json)); - } + let test_data = vec![ + ("{\"USD\":6536.92}", Some(653692)), + ("{\"USD\":65.92}", Some(6592)), + ("{\"USD\":6536.924565}", Some(653692)), + ("{\"USD\":6536}", Some(653600)), + ("{\"USD2\":6536}", None), + ("{\"USD\":\"6432\"}", None), + ]; + + for (json, expected) in test_data { + assert_eq!(expected, Example::parse_price(json)); + } } diff --git a/frame/example/src/lib.rs b/frame/example/src/lib.rs index 97cad2856a..970957d286 100644 --- a/frame/example/src/lib.rs +++ b/frame/example/src/lib.rs @@ -253,25 +253,24 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::marker::PhantomData; +use codec::{Decode, Encode}; use frame_support::{ - dispatch::DispatchResult, decl_module, decl_storage, decl_event, - weights::{ - SimpleDispatchInfo, DispatchClass, ClassifyDispatch, WeighData, Weight, PaysFee, - MINIMUM_WEIGHT, - }, + decl_event, decl_module, decl_storage, + dispatch::DispatchResult, + weights::{ + ClassifyDispatch, DispatchClass, PaysFee, SimpleDispatchInfo, WeighData, Weight, + MINIMUM_WEIGHT, + }, }; -use sp_std::prelude::*; -use frame_system::{self as system, ensure_signed, ensure_root}; -use codec::{Encode, Decode}; +use frame_system::{self as system, ensure_root, ensure_signed}; use sp_runtime::{ - traits::{ - SignedExtension, Bounded, SaturatedConversion, DispatchInfoOf, - }, - transaction_validity::{ - ValidTransaction, TransactionValidityError, InvalidTransaction, TransactionValidity, - }, + traits::{Bounded, DispatchInfoOf, SaturatedConversion, SignedExtension}, + transaction_validity::{ + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + }, }; +use sp_std::marker::PhantomData; +use sp_std::prelude::*; // A custom weight calculator tailored for the dispatch call `set_dummy()`. This actually examines // the arguments and makes a decision based upon them. @@ -289,28 +288,27 @@ use sp_runtime::{ // - assigns a dispatch class `operational` if the argument of the call is more than 1000. struct WeightForSetDummy(BalanceOf); -impl WeighData<(&BalanceOf,)> for WeightForSetDummy -{ - fn weigh_data(&self, target: (&BalanceOf,)) -> Weight { - let multiplier = self.0; - (*target.0 * multiplier).saturated_into::() - } +impl WeighData<(&BalanceOf,)> for WeightForSetDummy { + fn weigh_data(&self, target: (&BalanceOf,)) -> Weight { + let multiplier = self.0; + (*target.0 * multiplier).saturated_into::() + } } impl ClassifyDispatch<(&BalanceOf,)> for WeightForSetDummy { - fn classify_dispatch(&self, target: (&BalanceOf,)) -> DispatchClass { - if *target.0 > >::from(1000u32) { - DispatchClass::Operational - } else { - DispatchClass::Normal - } - } + fn classify_dispatch(&self, target: (&BalanceOf,)) -> DispatchClass { + if *target.0 > >::from(1000u32) { + DispatchClass::Operational + } else { + DispatchClass::Normal + } + } } impl PaysFee<(&BalanceOf,)> for WeightForSetDummy { - fn pays_fee(&self, _target: (&BalanceOf,)) -> bool { - true - } + fn pays_fee(&self, _target: (&BalanceOf,)) -> bool { + true + } } /// A type alias for the balance type from this pallet's point of view. @@ -322,61 +320,64 @@ type BalanceOf = ::Balance; /// /// `frame_system::Trait` should always be included in our implied traits. pub trait Trait: pallet_balances::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; } decl_storage! { - // A macro for the Storage trait, and its implementation, for this pallet. - // This allows for type-safe usage of the Substrate storage database, so you can - // keep things around between blocks. - // - // It is important to update your storage name so that your pallet's - // storage items are isolated from other pallets. - // ---------------------------------vvvvvvv - trait Store for Module as Example { - // Any storage declarations of the form: - // `pub? Name get(fn getter_name)? [config()|config(myname)] [build(|_| {...})] : (= )?;` - // where `` is either: - // - `Type` (a basic value item); or - // - `map hasher(HasherKind) KeyType => ValueType` (a map item). - // - // Note that there are two optional modifiers for the storage type declaration. - // - `Foo: Option`: - // - `Foo::put(1); Foo::get()` returns `Some(1)`; - // - `Foo::kill(); Foo::get()` returns `None`. - // - `Foo: u32`: - // - `Foo::put(1); Foo::get()` returns `1`; - // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). - // e.g. Foo: u32; - // e.g. pub Bar get(fn bar): map hasher(blake2_128_concat) T::AccountId => Vec<(T::Balance, u64)>; - // - // For basic value items, you'll get a type which implements - // `frame_support::StorageValue`. For map items, you'll get a type which - // implements `frame_support::StorageMap`. - // - // If they have a getter (`get(getter_name)`), then your pallet will come - // equipped with `fn getter_name() -> Type` for basic value items or - // `fn getter_name(key: KeyType) -> ValueType` for map items. - Dummy get(fn dummy) config(): Option; - - // A map that has enumerable entries. - Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; - - // this one uses the default, we'll demonstrate the usage of 'mutate' API. - Foo get(fn foo) config(): T::Balance; - } + // A macro for the Storage trait, and its implementation, for this pallet. + // This allows for type-safe usage of the Substrate storage database, so you can + // keep things around between blocks. + // + // It is important to update your storage name so that your pallet's + // storage items are isolated from other pallets. + // ---------------------------------vvvvvvv + trait Store for Module as Example { + // Any storage declarations of the form: + // `pub? Name get(fn getter_name)? [config()|config(myname)] [build(|_| {...})] : (= )?;` + // where `` is either: + // - `Type` (a basic value item); or + // - `map hasher(HasherKind) KeyType => ValueType` (a map item). + // + // Note that there are two optional modifiers for the storage type declaration. + // - `Foo: Option`: + // - `Foo::put(1); Foo::get()` returns `Some(1)`; + // - `Foo::kill(); Foo::get()` returns `None`. + // - `Foo: u32`: + // - `Foo::put(1); Foo::get()` returns `1`; + // - `Foo::kill(); Foo::get()` returns `0` (u32::default()). + // e.g. Foo: u32; + // e.g. pub Bar get(fn bar): map hasher(blake2_128_concat) T::AccountId => Vec<(T::Balance, u64)>; + // + // For basic value items, you'll get a type which implements + // `frame_support::StorageValue`. For map items, you'll get a type which + // implements `frame_support::StorageMap`. + // + // If they have a getter (`get(getter_name)`), then your pallet will come + // equipped with `fn getter_name() -> Type` for basic value items or + // `fn getter_name(key: KeyType) -> ValueType` for map items. + Dummy get(fn dummy) config(): Option; + + // A map that has enumerable entries. + Bar get(fn bar) config(): map hasher(blake2_128_concat) T::AccountId => T::Balance; + + // this one uses the default, we'll demonstrate the usage of 'mutate' API. + Foo get(fn foo) config(): T::Balance; + } } decl_event!( - /// Events are a simple means of reporting specific conditions and - /// circumstances that have happened that users, Dapps and/or chain explorers would find - /// interesting and otherwise difficult to detect. - pub enum Event where B = ::Balance { - // Just a normal `enum`, here's a dummy event to ensure it compiles. - /// Dummy event, just here so there's a generic type that's used. - Dummy(B), - } + /// Events are a simple means of reporting specific conditions and + /// circumstances that have happened that users, Dapps and/or chain explorers would find + /// interesting and otherwise difficult to detect. + pub enum Event + where + B = ::Balance, + { + // Just a normal `enum`, here's a dummy event to ensure it compiles. + /// Dummy event, just here so there's a generic type that's used. + Dummy(B), + } ); // The module declaration. This states the entry points that we handle. The @@ -414,135 +415,135 @@ decl_event!( // in system that do the matching for you and return a convenient result: `ensure_signed`, // `ensure_root` and `ensure_none`. decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: T::Origin { - /// Deposit one of this pallet's events by using the default implementation. - /// It is also possible to provide a custom implementation. - /// For non-generic events, the generic parameter just needs to be dropped, so that it - /// looks like: `fn deposit_event() = default;`. - fn deposit_event() = default; - /// This is your public interface. Be extremely careful. - /// This is just a simple example of how to interact with the pallet from the external - /// world. - // This just increases the value of `Dummy` by `increase_by`. - // - // Since this is a dispatched function there are two extremely important things to - // remember: - // - // - MUST NOT PANIC: Under no circumstances (save, perhaps, storage getting into an - // irreparably damaged state) must this function panic. - // - NO SIDE-EFFECTS ON ERROR: This function must either complete totally (and return - // `Ok(())` or it must have no side-effects on storage and return `Err('Some reason')`. - // - // The first is relatively easy to audit for - just ensure all panickers are removed from - // logic that executes in production (which you do anyway, right?!). To ensure the second - // is followed, you should do all tests for validity at the top of your function. This - // is stuff like checking the sender (`origin`) or that state is such that the operation - // makes sense. - // - // Once you've determined that it's all good, then enact the operation and change storage. - // If you can't be certain that the operation will succeed without substantial computation - // then you have a classic blockchain attack scenario. The normal way of managing this is - // to attach a bond to the operation. As the first major alteration of storage, reserve - // some value from the sender's account (`Balances` Pallet has a `reserve` function for - // exactly this scenario). This amount should be enough to cover any costs of the - // substantial execution in case it turns out that you can't proceed with the operation. - // - // If it eventually transpires that the operation is fine and, therefore, that the - // expense of the checks should be borne by the network, then you can refund the reserved - // deposit. If, however, the operation turns out to be invalid and the computation is - // wasted, then you can burn it or repatriate elsewhere. - // - // Security bonds ensure that attackers can't game it by ensuring that anyone interacting - // with the system either progresses it or pays for the trouble of faffing around with - // no progress. - // - // If you don't respect these rules, it is likely that your chain will be attackable. - // - // Each transaction can define an optional `#[weight]` attribute to convey a set of static - // information about its dispatch. FRAME System and FRAME Executive pallet then use this - // information to properly execute the transaction, whilst keeping the total load of the - // chain in a moderate rate. - // - // The _right-hand-side_ value of the `#[weight]` attribute can be any type that implements - // a set of traits, namely [`WeighData`] and [`ClassifyDispatch`]. The former conveys the - // weight (a numeric representation of pure execution time and difficulty) of the - // transaction and the latter demonstrates the [`DispatchClass`] of the call. A higher - // weight means a larger transaction (less of which can be placed in a single block). - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn accumulate_dummy(origin, increase_by: T::Balance) -> DispatchResult { - // This is a public call, so we ensure that the origin is some signed account. - let _sender = ensure_signed(origin)?; - - // Read the value of dummy from storage. - // let dummy = Self::dummy(); - // Will also work using the `::get` on the storage item type itself: - // let dummy = >::get(); - - // Calculate the new value. - // let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); - - // Put the new value into storage. - // >::put(new_dummy); - // Will also work with a reference: - // >::put(&new_dummy); - - // Here's the new one of read and then modify the value. - >::mutate(|dummy| { - let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); - *dummy = Some(new_dummy); - }); - - // Let's deposit an event to let the outside world know this happened. - Self::deposit_event(RawEvent::Dummy(increase_by)); - - // All good. - Ok(()) - } - - /// A privileged call; in this case it resets our dummy value to something new. - // Implementation of a privileged call. The `origin` parameter is ROOT because - // it's not (directly) from an extrinsic, but rather the system as a whole has decided - // to execute it. Different runtimes have different reasons for allow privileged - // calls to be executed - we don't need to care why. Because it's privileged, we can - // assume it's a one-off operation and substantial processing/storage/memory can be used - // without worrying about gameability or attack scenarios. - // If you do not specify `Result` explicitly as return value, it will be added automatically - // for you and `Ok(())` will be returned. - #[weight = WeightForSetDummy::(>::from(100u32))] - fn set_dummy(origin, #[compact] new_value: T::Balance) { - ensure_root(origin)?; - // Put the new value into storage. - >::put(new_value); - } - - // The signature could also look like: `fn on_initialize()`. - // This function could also very well have a weight annotation, similar to any other. The - // only difference being that if it is not annotated, the default is - // `SimpleDispatchInfo::zero()`, which resolves into no weight. - fn on_initialize(_n: T::BlockNumber) -> Weight { - // Anything that needs to be done at the start of the block. - // We don't do anything here. - - MINIMUM_WEIGHT - } - - // The signature could also look like: `fn on_finalize()` - fn on_finalize(_n: T::BlockNumber) { - // Anything that needs to be done at the end of the block. - // We just kill our dummy storage item. - >::kill(); - } - - // A runtime code run after every block and have access to extended set of APIs. - // - // For instance you can generate extrinsics for the upcoming produced block. - fn offchain_worker(_n: T::BlockNumber) { - // We don't do anything here. - // but we could dispatch extrinsic (transaction/unsigned/inherent) using - // sp_io::submit_extrinsic - } - } + // Simple declaration of the `Module` type. Lets the macro know what its working on. + pub struct Module for enum Call where origin: T::Origin { + /// Deposit one of this pallet's events by using the default implementation. + /// It is also possible to provide a custom implementation. + /// For non-generic events, the generic parameter just needs to be dropped, so that it + /// looks like: `fn deposit_event() = default;`. + fn deposit_event() = default; + /// This is your public interface. Be extremely careful. + /// This is just a simple example of how to interact with the pallet from the external + /// world. + // This just increases the value of `Dummy` by `increase_by`. + // + // Since this is a dispatched function there are two extremely important things to + // remember: + // + // - MUST NOT PANIC: Under no circumstances (save, perhaps, storage getting into an + // irreparably damaged state) must this function panic. + // - NO SIDE-EFFECTS ON ERROR: This function must either complete totally (and return + // `Ok(())` or it must have no side-effects on storage and return `Err('Some reason')`. + // + // The first is relatively easy to audit for - just ensure all panickers are removed from + // logic that executes in production (which you do anyway, right?!). To ensure the second + // is followed, you should do all tests for validity at the top of your function. This + // is stuff like checking the sender (`origin`) or that state is such that the operation + // makes sense. + // + // Once you've determined that it's all good, then enact the operation and change storage. + // If you can't be certain that the operation will succeed without substantial computation + // then you have a classic blockchain attack scenario. The normal way of managing this is + // to attach a bond to the operation. As the first major alteration of storage, reserve + // some value from the sender's account (`Balances` Pallet has a `reserve` function for + // exactly this scenario). This amount should be enough to cover any costs of the + // substantial execution in case it turns out that you can't proceed with the operation. + // + // If it eventually transpires that the operation is fine and, therefore, that the + // expense of the checks should be borne by the network, then you can refund the reserved + // deposit. If, however, the operation turns out to be invalid and the computation is + // wasted, then you can burn it or repatriate elsewhere. + // + // Security bonds ensure that attackers can't game it by ensuring that anyone interacting + // with the system either progresses it or pays for the trouble of faffing around with + // no progress. + // + // If you don't respect these rules, it is likely that your chain will be attackable. + // + // Each transaction can define an optional `#[weight]` attribute to convey a set of static + // information about its dispatch. FRAME System and FRAME Executive pallet then use this + // information to properly execute the transaction, whilst keeping the total load of the + // chain in a moderate rate. + // + // The _right-hand-side_ value of the `#[weight]` attribute can be any type that implements + // a set of traits, namely [`WeighData`] and [`ClassifyDispatch`]. The former conveys the + // weight (a numeric representation of pure execution time and difficulty) of the + // transaction and the latter demonstrates the [`DispatchClass`] of the call. A higher + // weight means a larger transaction (less of which can be placed in a single block). + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn accumulate_dummy(origin, increase_by: T::Balance) -> DispatchResult { + // This is a public call, so we ensure that the origin is some signed account. + let _sender = ensure_signed(origin)?; + + // Read the value of dummy from storage. + // let dummy = Self::dummy(); + // Will also work using the `::get` on the storage item type itself: + // let dummy = >::get(); + + // Calculate the new value. + // let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); + + // Put the new value into storage. + // >::put(new_dummy); + // Will also work with a reference: + // >::put(&new_dummy); + + // Here's the new one of read and then modify the value. + >::mutate(|dummy| { + let new_dummy = dummy.map_or(increase_by, |dummy| dummy + increase_by); + *dummy = Some(new_dummy); + }); + + // Let's deposit an event to let the outside world know this happened. + Self::deposit_event(RawEvent::Dummy(increase_by)); + + // All good. + Ok(()) + } + + /// A privileged call; in this case it resets our dummy value to something new. + // Implementation of a privileged call. The `origin` parameter is ROOT because + // it's not (directly) from an extrinsic, but rather the system as a whole has decided + // to execute it. Different runtimes have different reasons for allow privileged + // calls to be executed - we don't need to care why. Because it's privileged, we can + // assume it's a one-off operation and substantial processing/storage/memory can be used + // without worrying about gameability or attack scenarios. + // If you do not specify `Result` explicitly as return value, it will be added automatically + // for you and `Ok(())` will be returned. + #[weight = WeightForSetDummy::(>::from(100u32))] + fn set_dummy(origin, #[compact] new_value: T::Balance) { + ensure_root(origin)?; + // Put the new value into storage. + >::put(new_value); + } + + // The signature could also look like: `fn on_initialize()`. + // This function could also very well have a weight annotation, similar to any other. The + // only difference being that if it is not annotated, the default is + // `SimpleDispatchInfo::zero()`, which resolves into no weight. + fn on_initialize(_n: T::BlockNumber) -> Weight { + // Anything that needs to be done at the start of the block. + // We don't do anything here. + + MINIMUM_WEIGHT + } + + // The signature could also look like: `fn on_finalize()` + fn on_finalize(_n: T::BlockNumber) { + // Anything that needs to be done at the end of the block. + // We just kill our dummy storage item. + >::kill(); + } + + // A runtime code run after every block and have access to extended set of APIs. + // + // For instance you can generate extrinsics for the upcoming produced block. + fn offchain_worker(_n: T::BlockNumber) { + // We don't do anything here. + // but we could dispatch extrinsic (transaction/unsigned/inherent) using + // sp_io::submit_extrinsic + } + } } // The main implementation block for the pallet. Functions here fall into three broad @@ -551,21 +552,21 @@ decl_module! { // functions that do not write to storage and operation functions that do. // - Private functions. These are your usual private utilities unavailable to other pallets. impl Module { - // Add public immutables and private mutables. - #[allow(dead_code)] - fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> DispatchResult { - let _sender = ensure_signed(origin)?; - - let prev = >::get(); - // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an Option<> type. - let result = >::mutate(|foo| { - *foo = *foo + increase_by; - *foo - }); - assert!(prev + increase_by == result); - - Ok(()) - } + // Add public immutables and private mutables. + #[allow(dead_code)] + fn accumulate_foo(origin: T::Origin, increase_by: T::Balance) -> DispatchResult { + let _sender = ensure_signed(origin)?; + + let prev = >::get(); + // Because Foo has 'default', the type of 'foo' in closure is the raw type instead of an Option<> type. + let result = >::mutate(|foo| { + *foo = *foo + increase_by; + *foo + }); + assert!(prev + increase_by == result); + + Ok(()) + } } // Similar to other FRAME pallets, your pallet can also define a signed extension and perform some @@ -607,253 +608,263 @@ impl Module { pub struct WatchDummy(PhantomData); impl sp_std::fmt::Debug for WatchDummy { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "WatchDummy") - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "WatchDummy") + } } impl SignedExtension for WatchDummy { - const IDENTIFIER: &'static str = "WatchDummy"; - type AccountId = T::AccountId; - // Note that this could also be assigned to the top-level call enum. It is passed into the - // Balances Pallet directly and since `Trait: pallet_balances::Trait`, you could also use `T::Call`. - // In that case, you would have had access to all call variants and could match on variants from - // other pallets. - type Call = Call; - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } - - fn validate( - &self, - _who: &Self::AccountId, - call: &Self::Call, - _info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - // if the transaction is too big, just drop it. - if len > 200 { - return InvalidTransaction::ExhaustsResources.into() - } - - // check for `set_dummy` - match call { - Call::set_dummy(..) => { - sp_runtime::print("set_dummy was received."); - - let mut valid_tx = ValidTransaction::default(); - valid_tx.priority = Bounded::max_value(); - Ok(valid_tx) - } - _ => Ok(Default::default()), - } - } + const IDENTIFIER: &'static str = "WatchDummy"; + type AccountId = T::AccountId; + // Note that this could also be assigned to the top-level call enum. It is passed into the + // Balances Pallet directly and since `Trait: pallet_balances::Trait`, you could also use `T::Call`. + // In that case, you would have had access to all call variants and could match on variants from + // other pallets. + type Call = Call; + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + + fn validate( + &self, + _who: &Self::AccountId, + call: &Self::Call, + _info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + // if the transaction is too big, just drop it. + if len > 200 { + return InvalidTransaction::ExhaustsResources.into(); + } + + // check for `set_dummy` + match call { + Call::set_dummy(..) => { + sp_runtime::print("set_dummy was received."); + + let mut valid_tx = ValidTransaction::default(); + valid_tx.priority = Bounded::max_value(); + Ok(valid_tx) + } + _ => Ok(Default::default()), + } + } } #[cfg(feature = "runtime-benchmarks")] mod benchmarking { - use super::*; - use frame_benchmarking::{benchmarks, account}; - use frame_system::RawOrigin; - - benchmarks!{ - _ { - // Define a common range for `b`. - let b in 1 .. 1000 => (); - } - - // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. - accumulate_dummy { - let b in ...; - let caller = account("caller", 0, 0); - }: _ (RawOrigin::Signed(caller), b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..1000] range. - set_dummy { - let b in ...; - }: set_dummy (RawOrigin::Root, b.into()) - - // This will measure the execution time of `set_dummy` for b in [1..10] range. - another_set_dummy { - let b in 1 .. 10; - }: set_dummy (RawOrigin::Root, b.into()) - - // This will measure the execution time of sorting a vector. - sort_vector { - let x in 0 .. 10000; - let mut m = Vec::::new(); - for i in (0..x).rev() { - m.push(i); - } - }: { - m.sort(); - } - } - - #[cfg(test)] - mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_accumulate_dummy::()); - assert_ok!(test_benchmark_set_dummy::()); - assert_ok!(test_benchmark_another_set_dummy::()); - assert_ok!(test_benchmark_sort_vector::()); - }); - } - } + use super::*; + use frame_benchmarking::{account, benchmarks}; + use frame_system::RawOrigin; + + benchmarks! { + _ { + // Define a common range for `b`. + let b in 1 .. 1000 => (); + } + + // This will measure the execution time of `accumulate_dummy` for b in [1..1000] range. + accumulate_dummy { + let b in ...; + let caller = account("caller", 0, 0); + }: _ (RawOrigin::Signed(caller), b.into()) + + // This will measure the execution time of `set_dummy` for b in [1..1000] range. + set_dummy { + let b in ...; + }: set_dummy (RawOrigin::Root, b.into()) + + // This will measure the execution time of `set_dummy` for b in [1..10] range. + another_set_dummy { + let b in 1 .. 10; + }: set_dummy (RawOrigin::Root, b.into()) + + // This will measure the execution time of sorting a vector. + sort_vector { + let x in 0 .. 10000; + let mut m = Vec::::new(); + for i in (0..x).rev() { + m.push(i); + } + }: { + m.sort(); + } + } + + #[cfg(test)] + mod tests { + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_accumulate_dummy::()); + assert_ok!(test_benchmark_set_dummy::()); + assert_ok!(test_benchmark_another_set_dummy::()); + assert_ok!(test_benchmark_sort_vector::()); + }); + } + } } #[cfg(test)] mod tests { - use super::*; - - use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, weights::{DispatchInfo, GetDispatchInfo}, - traits::{OnInitialize, OnFinalize} - }; - use sp_core::H256; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Trait for Test { - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - impl Trait for Test { - type Event = (); - } - type System = frame_system::Module; - type Example = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - pallet_balances::GenesisConfig::::default().assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ - dummy: 42, - // we configure the map with (key, value) pairs. - bar: vec![(1, 2), (2, 3)], - foo: 24, - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - #[test] - fn it_works_for_optional_value() { - new_test_ext().execute_with(|| { - // Check that GenesisBuilder works properly. - assert_eq!(Example::dummy(), Some(42)); - - // Check that accumulate works when we have Some value in Dummy already. - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 27)); - assert_eq!(Example::dummy(), Some(69)); - - // Check that finalizing the block removes Dummy from storage. - >::on_finalize(1); - assert_eq!(Example::dummy(), None); - - // Check that accumulate works when we Dummy has None in it. - >::on_initialize(2); - assert_ok!(Example::accumulate_dummy(Origin::signed(1), 42)); - assert_eq!(Example::dummy(), Some(42)); - }); - } - - #[test] - fn it_works_for_default_value() { - new_test_ext().execute_with(|| { - assert_eq!(Example::foo(), 24); - assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); - assert_eq!(Example::foo(), 25); - }); - } - - #[test] - fn signed_ext_watch_dummy_works() { - new_test_ext().execute_with(|| { - let call = >::set_dummy(10); - let info = DispatchInfo::default(); - - assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 150) - .unwrap() - .priority, - Bounded::max_value(), - ); - assert_eq!( - WatchDummy::(PhantomData).validate(&1, &call, &info, 250), - InvalidTransaction::ExhaustsResources.into(), - ); - }) - } - - #[test] - fn weights_work() { - // must have a defined weight. - let default_call = >::accumulate_dummy(10); - let info = default_call.get_dispatch_info(); - // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` - assert_eq!(info.weight, 10_000_000); - - // must have a custom weight of `100 * arg = 2000` - let custom_call = >::set_dummy(20); - let info = custom_call.get_dispatch_info(); - assert_eq!(info.weight, 2000); - } + use super::*; + + use frame_support::{ + assert_ok, impl_outer_origin, parameter_types, + traits::{OnFinalize, OnInitialize}, + weights::{DispatchInfo, GetDispatchInfo}, + }; + use sp_core::H256; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + // For testing the pallet, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of pallets we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + } + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + impl pallet_balances::Trait for Test { + type Balance = u64; + type DustRemoval = (); + type Event = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + } + impl Trait for Test { + type Event = (); + } + type System = frame_system::Module; + type Example = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + // We use default for brevity, but you can configure as desired if needed. + pallet_balances::GenesisConfig::::default() + .assimilate_storage(&mut t) + .unwrap(); + GenesisConfig:: { + dummy: 42, + // we configure the map with (key, value) pairs. + bar: vec![(1, 2), (2, 3)], + foo: 24, + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } + + #[test] + fn it_works_for_optional_value() { + new_test_ext().execute_with(|| { + // Check that GenesisBuilder works properly. + assert_eq!(Example::dummy(), Some(42)); + + // Check that accumulate works when we have Some value in Dummy already. + assert_ok!(Example::accumulate_dummy(Origin::signed(1), 27)); + assert_eq!(Example::dummy(), Some(69)); + + // Check that finalizing the block removes Dummy from storage. + >::on_finalize(1); + assert_eq!(Example::dummy(), None); + + // Check that accumulate works when we Dummy has None in it. + >::on_initialize(2); + assert_ok!(Example::accumulate_dummy(Origin::signed(1), 42)); + assert_eq!(Example::dummy(), Some(42)); + }); + } + + #[test] + fn it_works_for_default_value() { + new_test_ext().execute_with(|| { + assert_eq!(Example::foo(), 24); + assert_ok!(Example::accumulate_foo(Origin::signed(1), 1)); + assert_eq!(Example::foo(), 25); + }); + } + + #[test] + fn signed_ext_watch_dummy_works() { + new_test_ext().execute_with(|| { + let call = >::set_dummy(10); + let info = DispatchInfo::default(); + + assert_eq!( + WatchDummy::(PhantomData) + .validate(&1, &call, &info, 150) + .unwrap() + .priority, + Bounded::max_value(), + ); + assert_eq!( + WatchDummy::(PhantomData).validate(&1, &call, &info, 250), + InvalidTransaction::ExhaustsResources.into(), + ); + }) + } + + #[test] + fn weights_work() { + // must have a defined weight. + let default_call = >::accumulate_dummy(10); + let info = default_call.get_dispatch_info(); + // aka. `let info = as GetDispatchInfo>::get_dispatch_info(&default_call);` + assert_eq!(info.weight, 10_000_000); + + // must have a custom weight of `100 * arg = 2000` + let custom_call = >::set_dummy(20); + let info = custom_call.get_dispatch_info(); + assert_eq!(info.weight, 2000); + } } diff --git a/frame/executive/src/lib.rs b/frame/executive/src/lib.rs index 747fc85866..e51b666b80 100644 --- a/frame/executive/src/lib.rs +++ b/frame/executive/src/lib.rs @@ -76,26 +76,28 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, marker::PhantomData}; +use codec::{Codec, Encode}; use frame_support::{ - storage::StorageValue, weights::{GetDispatchInfo, DispatchInfo}, - traits::{OnInitialize, OnFinalize, OnRuntimeUpgrade, OffchainWorker}, + storage::StorageValue, + traits::{OffchainWorker, OnFinalize, OnInitialize, OnRuntimeUpgrade}, + weights::{DispatchInfo, GetDispatchInfo}, }; +use frame_system::{extrinsics_root, DigestOf}; use sp_runtime::{ - generic::Digest, ApplyExtrinsicResult, - traits::{ - self, Header, Zero, One, Checkable, Applyable, CheckEqual, ValidateUnsigned, NumberFor, - Block as BlockT, Dispatchable, Saturating, - }, - transaction_validity::{TransactionValidity, TransactionSource}, + generic::Digest, + traits::{ + self, Applyable, Block as BlockT, CheckEqual, Checkable, Dispatchable, Header, NumberFor, + One, Saturating, ValidateUnsigned, Zero, + }, + transaction_validity::{TransactionSource, TransactionValidity}, + ApplyExtrinsicResult, }; -use codec::{Codec, Encode}; -use frame_system::{extrinsics_root, DigestOf}; +use sp_std::{marker::PhantomData, prelude::*}; /// Trait that can be used to execute a block. pub trait ExecuteBlock { - /// Actually execute all transitions for `block`. - fn execute_block(block: Block); + /// Actually execute all transitions for `block`. + fn execute_block(block: Block); } pub type CheckedOf = >::Checked; @@ -103,786 +105,886 @@ pub type CallOf = as Applyable>::Call; pub type OriginOf = as Dispatchable>::Origin; pub struct Executive( - PhantomData<(System, Block, Context, UnsignedValidator, AllModules)> + PhantomData<(System, Block, Context, UnsignedValidator, AllModules)>, ); impl< - System: frame_system::Trait, - Block: traits::Block, - Context: Default, - UnsignedValidator, - AllModules: - OnRuntimeUpgrade + - OnInitialize + - OnFinalize + - OffchainWorker, -> ExecuteBlock for Executive + System: frame_system::Trait, + Block: traits::Block
, + Context: Default, + UnsignedValidator, + AllModules: OnRuntimeUpgrade + + OnInitialize + + OnFinalize + + OffchainWorker, + > ExecuteBlock for Executive where - Block::Extrinsic: Checkable + Codec, - CheckedOf: - Applyable + - GetDispatchInfo, - CallOf: Dispatchable, - OriginOf: From>, - UnsignedValidator: ValidateUnsigned>, + Block::Extrinsic: Checkable + Codec, + CheckedOf: Applyable + GetDispatchInfo, + CallOf: Dispatchable, + OriginOf: From>, + UnsignedValidator: ValidateUnsigned>, { - fn execute_block(block: Block) { - Executive::::execute_block(block); - } + fn execute_block(block: Block) { + Executive::::execute_block(block); + } } impl< - System: frame_system::Trait, - Block: traits::Block, - Context: Default, - UnsignedValidator, - AllModules: - OnRuntimeUpgrade + - OnInitialize + - OnFinalize + - OffchainWorker, -> Executive + System: frame_system::Trait, + Block: traits::Block
, + Context: Default, + UnsignedValidator, + AllModules: OnRuntimeUpgrade + + OnInitialize + + OnFinalize + + OffchainWorker, + > Executive where - Block::Extrinsic: Checkable + Codec, - CheckedOf: - Applyable + - GetDispatchInfo, - CallOf: Dispatchable, - OriginOf: From>, - UnsignedValidator: ValidateUnsigned>, + Block::Extrinsic: Checkable + Codec, + CheckedOf: Applyable + GetDispatchInfo, + CallOf: Dispatchable, + OriginOf: From>, + UnsignedValidator: ValidateUnsigned>, { - /// Start the execution of a particular block. - pub fn initialize_block(header: &System::Header) { - let digests = Self::extract_pre_digest(&header); - Self::initialize_block_impl( - header.number(), - header.parent_hash(), - header.extrinsics_root(), - &digests - ); - } - - fn extract_pre_digest(header: &System::Header) -> DigestOf { - let mut digest = >::default(); - header.digest().logs() - .iter() - .for_each(|d| if d.as_pre_runtime().is_some() { - digest.push(d.clone()) - }); - digest - } - - fn initialize_block_impl( - block_number: &System::BlockNumber, - parent_hash: &System::Hash, - extrinsics_root: &System::Hash, - digest: &Digest, - ) { - if Self::runtime_upgraded() { - // System is not part of `AllModules`, so we need to call this manually. - as OnRuntimeUpgrade>::on_runtime_upgrade(); - let weight = ::on_runtime_upgrade(); - >::register_extra_weight_unchecked(weight); - } - >::initialize( - block_number, - parent_hash, - extrinsics_root, - digest, - frame_system::InitKind::Full, - ); - as OnInitialize>::on_initialize(*block_number); - let weight = >::on_initialize(*block_number); - >::register_extra_weight_unchecked(weight); - - frame_system::Module::::note_finished_initialize(); - } - - /// Returns if the runtime was upgraded since the last time this function was called. - fn runtime_upgraded() -> bool { - let last = frame_system::LastRuntimeUpgrade::get(); - let current = >::get(); - - if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { - frame_system::LastRuntimeUpgrade::put( - frame_system::LastRuntimeUpgradeInfo::from(current), - ); - true - } else { - false - } - } - - fn initial_checks(block: &Block) { - let header = block.header(); - - // Check that `parent_hash` is correct. - let n = header.number().clone(); - assert!( - n > System::BlockNumber::zero() - && >::block_hash(n - System::BlockNumber::one()) == *header.parent_hash(), - "Parent hash should be valid." - ); - - // Check that transaction trie root represents the transactions. - let xts_root = extrinsics_root::(&block.extrinsics()); - header.extrinsics_root().check_equal(&xts_root); - assert!(header.extrinsics_root() == &xts_root, "Transaction trie root must be valid."); - } - - /// Actually execute all transitions for `block`. - pub fn execute_block(block: Block) { - Self::initialize_block(block.header()); - - // any initial checks - Self::initial_checks(&block); - - let batching_safeguard = sp_runtime::SignatureBatching::start(); - // execute extrinsics - let (header, extrinsics) = block.deconstruct(); - Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); - if !sp_runtime::SignatureBatching::verify(batching_safeguard) { - panic!("Signature verification failed."); - } - - // any final checks - Self::final_checks(&header); - } - - /// Execute given extrinsics and take care of post-extrinsics book-keeping. - fn execute_extrinsics_with_book_keeping(extrinsics: Vec, block_number: NumberFor) { - extrinsics.into_iter().for_each(Self::apply_extrinsic_no_note); - - // post-extrinsics book-keeping - >::note_finished_extrinsics(); - as OnFinalize>::on_finalize(block_number); - >::on_finalize(block_number); - } - - /// Finalize the block - it is up the caller to ensure that all header fields are valid - /// except state-root. - pub fn finalize_block() -> System::Header { - >::note_finished_extrinsics(); - let block_number = >::block_number(); - as OnFinalize>::on_finalize(block_number); - >::on_finalize(block_number); - - // set up extrinsics - >::derive_extrinsics(); - >::finalize() - } - - /// Apply extrinsic outside of the block execution function. - /// - /// This doesn't attempt to validate anything regarding the block, but it builds a list of uxt - /// hashes. - pub fn apply_extrinsic(uxt: Block::Extrinsic) -> ApplyExtrinsicResult { - let encoded = uxt.encode(); - let encoded_len = encoded.len(); - Self::apply_extrinsic_with_len(uxt, encoded_len, Some(encoded)) - } - - /// Apply an extrinsic inside the block execution function. - fn apply_extrinsic_no_note(uxt: Block::Extrinsic) { - let l = uxt.encode().len(); - match Self::apply_extrinsic_with_len(uxt, l, None) { - Ok(_) => (), - Err(e) => { let err: &'static str = e.into(); panic!(err) }, - } - } - - /// Actually apply an extrinsic given its `encoded_len`; this doesn't note its hash. - fn apply_extrinsic_with_len( - uxt: Block::Extrinsic, - encoded_len: usize, - to_note: Option>, - ) -> ApplyExtrinsicResult { - // Verify that the signature is good. - let xt = uxt.check(&Default::default())?; - - // We don't need to make sure to `note_extrinsic` only after we know it's going to be - // executed to prevent it from leaking in storage since at this point, it will either - // execute or panic (and revert storage changes). - if let Some(encoded) = to_note { - >::note_extrinsic(encoded); - } - - // AUDIT: Under no circumstances may this function panic from here onwards. - - // Decode parameters and dispatch - let dispatch_info = xt.get_dispatch_info(); - let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; - - >::note_applied_extrinsic(&r, encoded_len as u32, dispatch_info); - - Ok(r) - } - - fn final_checks(header: &System::Header) { - // remove temporaries - let new_header = >::finalize(); - - // check digest - assert_eq!( - header.digest().logs().len(), - new_header.digest().logs().len(), - "Number of digest items must match that calculated." - ); - let items_zip = header.digest().logs().iter().zip(new_header.digest().logs().iter()); - for (header_item, computed_item) in items_zip { - header_item.check_equal(&computed_item); - assert!(header_item == computed_item, "Digest item must match that calculated."); - } - - // check storage root. - let storage_root = new_header.state_root(); - header.state_root().check_equal(&storage_root); - assert!(header.state_root() == storage_root, "Storage root must match that calculated."); - } - - /// Check a given signed transaction for validity. This doesn't execute any - /// side-effects; it merely checks whether the transaction would panic if it were included or not. - /// - /// Changes made to storage should be discarded. - pub fn validate_transaction( - source: TransactionSource, - uxt: Block::Extrinsic, - ) -> TransactionValidity { - use frame_support::tracing_span; - - tracing_span!{ "validate_transaction::using_encoded"; - let encoded_len = uxt.using_encoded(|d| d.len()); - }; - - tracing_span!{ "validate_transaction::check"; - let xt = uxt.check(&Default::default())?; - }; - - tracing_span!{ "validate_transaction::dispatch_info"; - let dispatch_info = xt.get_dispatch_info(); - }; - - tracing_span!{ "validate_transaction::validate"; - let result = xt.validate::(source, &dispatch_info, encoded_len); - }; - - result - } - - /// Start an offchain worker and generate extrinsics. - pub fn offchain_worker(header: &System::Header) { - // We need to keep events available for offchain workers, - // hence we initialize the block manually. - // OffchainWorker RuntimeApi should skip initialization. - let digests = Self::extract_pre_digest(header); - - >::initialize( - header.number(), - header.parent_hash(), - header.extrinsics_root(), - &digests, - frame_system::InitKind::Inspection, - ); - - // Initialize logger, so the log messages are visible - // also when running WASM. - frame_support::debug::RuntimeLogger::init(); - - >::offchain_worker( - // to maintain backward compatibility we call module offchain workers - // with parent block number. - header.number().saturating_sub(1.into()) - ) - } + /// Start the execution of a particular block. + pub fn initialize_block(header: &System::Header) { + let digests = Self::extract_pre_digest(&header); + Self::initialize_block_impl( + header.number(), + header.parent_hash(), + header.extrinsics_root(), + &digests, + ); + } + + fn extract_pre_digest(header: &System::Header) -> DigestOf { + let mut digest = >::default(); + header.digest().logs().iter().for_each(|d| { + if d.as_pre_runtime().is_some() { + digest.push(d.clone()) + } + }); + digest + } + + fn initialize_block_impl( + block_number: &System::BlockNumber, + parent_hash: &System::Hash, + extrinsics_root: &System::Hash, + digest: &Digest, + ) { + if Self::runtime_upgraded() { + // System is not part of `AllModules`, so we need to call this manually. + as OnRuntimeUpgrade>::on_runtime_upgrade(); + let weight = ::on_runtime_upgrade(); + >::register_extra_weight_unchecked(weight); + } + >::initialize( + block_number, + parent_hash, + extrinsics_root, + digest, + frame_system::InitKind::Full, + ); + as OnInitialize>::on_initialize( + *block_number, + ); + let weight = + >::on_initialize(*block_number); + >::register_extra_weight_unchecked(weight); + + frame_system::Module::::note_finished_initialize(); + } + + /// Returns if the runtime was upgraded since the last time this function was called. + fn runtime_upgraded() -> bool { + let last = frame_system::LastRuntimeUpgrade::get(); + let current = >::get(); + + if last.map(|v| v.was_upgraded(¤t)).unwrap_or(true) { + frame_system::LastRuntimeUpgrade::put(frame_system::LastRuntimeUpgradeInfo::from( + current, + )); + true + } else { + false + } + } + + fn initial_checks(block: &Block) { + let header = block.header(); + + // Check that `parent_hash` is correct. + let n = header.number().clone(); + assert!( + n > System::BlockNumber::zero() + && >::block_hash(n - System::BlockNumber::one()) + == *header.parent_hash(), + "Parent hash should be valid." + ); + + // Check that transaction trie root represents the transactions. + let xts_root = extrinsics_root::(&block.extrinsics()); + header.extrinsics_root().check_equal(&xts_root); + assert!( + header.extrinsics_root() == &xts_root, + "Transaction trie root must be valid." + ); + } + + /// Actually execute all transitions for `block`. + pub fn execute_block(block: Block) { + Self::initialize_block(block.header()); + + // any initial checks + Self::initial_checks(&block); + + let batching_safeguard = sp_runtime::SignatureBatching::start(); + // execute extrinsics + let (header, extrinsics) = block.deconstruct(); + Self::execute_extrinsics_with_book_keeping(extrinsics, *header.number()); + if !sp_runtime::SignatureBatching::verify(batching_safeguard) { + panic!("Signature verification failed."); + } + + // any final checks + Self::final_checks(&header); + } + + /// Execute given extrinsics and take care of post-extrinsics book-keeping. + fn execute_extrinsics_with_book_keeping( + extrinsics: Vec, + block_number: NumberFor, + ) { + extrinsics + .into_iter() + .for_each(Self::apply_extrinsic_no_note); + + // post-extrinsics book-keeping + >::note_finished_extrinsics(); + as OnFinalize>::on_finalize( + block_number, + ); + >::on_finalize(block_number); + } + + /// Finalize the block - it is up the caller to ensure that all header fields are valid + /// except state-root. + pub fn finalize_block() -> System::Header { + >::note_finished_extrinsics(); + let block_number = >::block_number(); + as OnFinalize>::on_finalize( + block_number, + ); + >::on_finalize(block_number); + + // set up extrinsics + >::derive_extrinsics(); + >::finalize() + } + + /// Apply extrinsic outside of the block execution function. + /// + /// This doesn't attempt to validate anything regarding the block, but it builds a list of uxt + /// hashes. + pub fn apply_extrinsic(uxt: Block::Extrinsic) -> ApplyExtrinsicResult { + let encoded = uxt.encode(); + let encoded_len = encoded.len(); + Self::apply_extrinsic_with_len(uxt, encoded_len, Some(encoded)) + } + + /// Apply an extrinsic inside the block execution function. + fn apply_extrinsic_no_note(uxt: Block::Extrinsic) { + let l = uxt.encode().len(); + match Self::apply_extrinsic_with_len(uxt, l, None) { + Ok(_) => (), + Err(e) => { + let err: &'static str = e.into(); + panic!(err) + } + } + } + + /// Actually apply an extrinsic given its `encoded_len`; this doesn't note its hash. + fn apply_extrinsic_with_len( + uxt: Block::Extrinsic, + encoded_len: usize, + to_note: Option>, + ) -> ApplyExtrinsicResult { + // Verify that the signature is good. + let xt = uxt.check(&Default::default())?; + + // We don't need to make sure to `note_extrinsic` only after we know it's going to be + // executed to prevent it from leaking in storage since at this point, it will either + // execute or panic (and revert storage changes). + if let Some(encoded) = to_note { + >::note_extrinsic(encoded); + } + + // AUDIT: Under no circumstances may this function panic from here onwards. + + // Decode parameters and dispatch + let dispatch_info = xt.get_dispatch_info(); + let r = Applyable::apply::(xt, &dispatch_info, encoded_len)?; + + >::note_applied_extrinsic( + &r, + encoded_len as u32, + dispatch_info, + ); + + Ok(r) + } + + fn final_checks(header: &System::Header) { + // remove temporaries + let new_header = >::finalize(); + + // check digest + assert_eq!( + header.digest().logs().len(), + new_header.digest().logs().len(), + "Number of digest items must match that calculated." + ); + let items_zip = header + .digest() + .logs() + .iter() + .zip(new_header.digest().logs().iter()); + for (header_item, computed_item) in items_zip { + header_item.check_equal(&computed_item); + assert!( + header_item == computed_item, + "Digest item must match that calculated." + ); + } + + // check storage root. + let storage_root = new_header.state_root(); + header.state_root().check_equal(&storage_root); + assert!( + header.state_root() == storage_root, + "Storage root must match that calculated." + ); + } + + /// Check a given signed transaction for validity. This doesn't execute any + /// side-effects; it merely checks whether the transaction would panic if it were included or not. + /// + /// Changes made to storage should be discarded. + pub fn validate_transaction( + source: TransactionSource, + uxt: Block::Extrinsic, + ) -> TransactionValidity { + use frame_support::tracing_span; + + tracing_span! { "validate_transaction::using_encoded"; + let encoded_len = uxt.using_encoded(|d| d.len()); + }; + + tracing_span! { "validate_transaction::check"; + let xt = uxt.check(&Default::default())?; + }; + + tracing_span! { "validate_transaction::dispatch_info"; + let dispatch_info = xt.get_dispatch_info(); + }; + + tracing_span! { "validate_transaction::validate"; + let result = xt.validate::(source, &dispatch_info, encoded_len); + }; + + result + } + + /// Start an offchain worker and generate extrinsics. + pub fn offchain_worker(header: &System::Header) { + // We need to keep events available for offchain workers, + // hence we initialize the block manually. + // OffchainWorker RuntimeApi should skip initialization. + let digests = Self::extract_pre_digest(header); + + >::initialize( + header.number(), + header.parent_hash(), + header.extrinsics_root(), + &digests, + frame_system::InitKind::Inspection, + ); + + // Initialize logger, so the log messages are visible + // also when running WASM. + frame_support::debug::RuntimeLogger::init(); + + >::offchain_worker( + // to maintain backward compatibility we call module offchain workers + // with parent block number. + header.number().saturating_sub(1.into()), + ) + } } - #[cfg(test)] mod tests { - use super::*; - use sp_core::H256; - use sp_runtime::{ - generic::Era, Perbill, DispatchError, testing::{Digest, Header, Block}, - traits::{Header as HeaderT, BlakeTwo256, IdentityLookup, ConvertInto}, - transaction_validity::{InvalidTransaction, UnknownTransaction, TransactionValidityError}, - }; - use frame_support::{ - impl_outer_event, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, - traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReasons, WithdrawReason}, - }; - use frame_system::{self as system, Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; - use pallet_balances::Call as BalancesCall; - use hex_literal::hex; - - mod custom { - use frame_support::weights::{SimpleDispatchInfo, Weight}; - - pub trait Trait: frame_system::Trait {} - - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin { - #[weight = SimpleDispatchInfo::FixedNormal(100)] - fn some_function(origin) { - // NOTE: does not make any different. - let _ = frame_system::ensure_signed(origin); - } - #[weight = SimpleDispatchInfo::FixedOperational(200)] - fn some_root_operation(origin) { - let _ = frame_system::ensure_root(origin); - } - #[weight = SimpleDispatchInfo::InsecureFreeNormal] - fn some_unsigned_message(origin) { - let _ = frame_system::ensure_none(origin); - } - - // module hooks. - // one with block number arg and one without - fn on_initialize(n: T::BlockNumber) -> Weight { - println!("on_initialize({})", n); - 175 - } - - fn on_finalize() { - println!("on_finalize(?)"); - } - } - } - } - - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Custom = custom::Module; - - use pallet_balances as balances; - - impl_outer_origin! { - pub enum Origin for Runtime { } - } - - impl_outer_event!{ - pub enum MetaEvent for Runtime { - system, - balances, - } - } - impl_outer_dispatch! { - pub enum Call for Runtime where origin: Origin { - frame_system::System, - pallet_balances::Balances, - } - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Runtime; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Runtime { - type Origin = Origin; - type Index = u64; - type Call = Call; - type BlockNumber = u64; - type Hash = sp_core::H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = MetaEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = RuntimeVersion; - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Trait for Runtime { - type Balance = u64; - type Event = MetaEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - - parameter_types! { - pub const TransactionBaseFee: u64 = 10; - pub const TransactionByteFee: u64 = 0; - } - impl pallet_transaction_payment::Trait for Runtime { - type Currency = Balances; - type OnTransactionPayment = (); - type TransactionBaseFee = TransactionBaseFee; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = ConvertInto; - type FeeMultiplierUpdate = (); - } - impl custom::Trait for Runtime {} - - impl ValidateUnsigned for Runtime { - type Call = Call; - - fn pre_dispatch(_call: &Self::Call) -> Result<(), TransactionValidityError> { - Ok(()) - } - - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - match call { - Call::Balances(BalancesCall::set_balance(_, _, _)) => Ok(Default::default()), - _ => UnknownTransaction::NoUnsignedValidator.into(), - } - } - } - - pub struct RuntimeVersion; - impl frame_support::traits::Get for RuntimeVersion { - fn get() -> sp_version::RuntimeVersion { - RUNTIME_VERSION.with(|v| v.borrow().clone()) - } - } - - thread_local! { - pub static RUNTIME_VERSION: std::cell::RefCell = - Default::default(); - } - - type SignedExtra = ( - frame_system::CheckEra, - frame_system::CheckNonce, - frame_system::CheckWeight, - pallet_transaction_payment::ChargeTransactionPayment, - ); - type AllModules = (System, Balances, Custom); - type TestXt = sp_runtime::testing::TestXt; - type Executive = super::Executive, ChainContext, Runtime, AllModules>; - - fn extra(nonce: u64, fee: u64) -> SignedExtra { - ( - frame_system::CheckEra::from(Era::Immortal), - frame_system::CheckNonce::from(nonce), - frame_system::CheckWeight::new(), - pallet_transaction_payment::ChargeTransactionPayment::from(fee) - ) - } - - fn sign_extra(who: u64, nonce: u64, fee: u64) -> Option<(u64, SignedExtra)> { - Some((who, extra(nonce, fee))) - } - - #[test] - fn balance_transfer_dispatch_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 211)], - }.assimilate_storage(&mut t).unwrap(); - let xt = TestXt::new(Call::Balances(BalancesCall::transfer(2, 69)), sign_extra(1, 0, 0)); - let weight = xt.get_dispatch_info().weight as u64; - let mut t = sp_io::TestExternalities::new(t); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - let r = Executive::apply_extrinsic(xt); - assert!(r.is_ok()); - assert_eq!(>::total_balance(&1), 142 - 10 - weight); - assert_eq!(>::total_balance(&2), 69); - }); - } - - fn new_test_ext(balance_factor: u64) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 111 * balance_factor)], - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - #[test] - fn block_import_works() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: hex!("489ae9b57a19bb4733a264dc64bbcae9b140a904657a681ed3bb5fbbe8cf412b").into(), - extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), - digest: Digest { logs: vec![], }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_state_root_fails() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: [0u8; 32].into(), - extrinsics_root: hex!("03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314").into(), - digest: Digest { logs: vec![], }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - #[should_panic] - fn block_import_of_bad_extrinsic_root_fails() { - new_test_ext(1).execute_with(|| { - Executive::execute_block(Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: hex!("49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48").into(), - extrinsics_root: [0u8; 32].into(), - digest: Digest { logs: vec![], }, - }, - extrinsics: vec![], - }); - }); - } - - #[test] - fn bad_extrinsic_not_inserted() { - let mut t = new_test_ext(1); - // bad nonce check! - let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 69)), sign_extra(1, 30, 0)); - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - assert!(Executive::apply_extrinsic(xt).is_err()); - assert_eq!(>::extrinsic_index(), Some(0)); - }); - } - - #[test] - fn block_weight_limit_enforced() { - let mut t = new_test_ext(10000); - // given: TestXt uses the encoded len as fixed Len: - let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); - let encoded = xt.encode(); - let encoded_len = encoded.len() as Weight; - let limit = AvailableBlockRatio::get() * MaximumBlockWeight::get() - 175; - let num_to_exhaust_block = limit / encoded_len; - t.execute_with(|| { - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - // Initial block weight form the custom module. - assert_eq!(>::all_extrinsics_weight(), 175); - - for nonce in 0..=num_to_exhaust_block { - let xt = TestXt::new( - Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, nonce.into(), 0), - ); - let res = Executive::apply_extrinsic(xt); - if nonce != num_to_exhaust_block { - assert!(res.is_ok()); - assert_eq!( - >::all_extrinsics_weight(), - encoded_len * (nonce + 1) + 175, - ); - assert_eq!(>::extrinsic_index(), Some(nonce as u32 + 1)); - } else { - assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); - } - } - }); - } - - #[test] - fn block_weight_and_size_is_stored_per_tx() { - let xt = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 0, 0)); - let x1 = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 1, 0)); - let x2 = TestXt::new(Call::Balances(BalancesCall::transfer(33, 0)), sign_extra(1, 2, 0)); - let len = xt.clone().encode().len() as u32; - let mut t = new_test_ext(1); - t.execute_with(|| { - assert_eq!(>::all_extrinsics_weight(), 0); - assert_eq!(>::all_extrinsics_len(), 0); - - assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); - assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); - assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); - - // default weight for `TestXt` == encoded length. - assert_eq!(>::all_extrinsics_weight(), (3 * len) as Weight); - assert_eq!(>::all_extrinsics_len(), 3 * len); - - let _ = >::finalize(); - - assert_eq!(>::all_extrinsics_weight(), 0); - assert_eq!(>::all_extrinsics_len(), 0); - }); - } - - #[test] - fn validate_unsigned() { - let xt = TestXt::new(Call::Balances(BalancesCall::set_balance(33, 69, 69)), None); - let mut t = new_test_ext(1); - - t.execute_with(|| { - assert_eq!( - Executive::validate_transaction(TransactionSource::InBlock, xt.clone()), - Ok(Default::default()), - ); - assert_eq!(Executive::apply_extrinsic(xt), Ok(Err(DispatchError::BadOrigin))); - }); - } - - #[test] - fn can_pay_for_tx_fee_on_full_lock() { - let id: LockIdentifier = *b"0 "; - let execute_with_lock = |lock: WithdrawReasons| { - let mut t = new_test_ext(1); - t.execute_with(|| { - as LockableCurrency>::set_lock( - id, - &1, - 110, - lock, - ); - let xt = TestXt::new( - Call::System(SystemCall::remark(vec![1u8])), - sign_extra(1, 0, 0), - ); - let weight = xt.get_dispatch_info().weight as u64; - Executive::initialize_block(&Header::new( - 1, - H256::default(), - H256::default(), - [69u8; 32].into(), - Digest::default(), - )); - - if lock == WithdrawReasons::except(WithdrawReason::TransactionPayment) { - assert!(Executive::apply_extrinsic(xt).unwrap().is_ok()); - // tx fee has been deducted. - assert_eq!(>::total_balance(&1), 111 - 10 - weight); - } else { - assert_eq!( - Executive::apply_extrinsic(xt), - Err(InvalidTransaction::Payment.into()), - ); - assert_eq!(>::total_balance(&1), 111); - } - }); - }; - - execute_with_lock(WithdrawReasons::all()); - execute_with_lock(WithdrawReasons::except(WithdrawReason::TransactionPayment)); - } - - #[test] - fn block_hooks_weight_is_stored() { - new_test_ext(1).execute_with(|| { - - Executive::initialize_block(&Header::new_from_number(1)); - // NOTE: might need updates over time if system and balance introduce new weights. For - // now only accounts for the custom module. - assert_eq!(>::all_extrinsics_weight(), 150 + 25); - }) - } - - #[test] - fn runtime_upgraded_should_work() { - new_test_ext(1).execute_with(|| { - RUNTIME_VERSION.with(|v| *v.borrow_mut() = Default::default()); - // It should be added at genesis - assert!(frame_system::LastRuntimeUpgrade::exists()); - assert!(!Executive::runtime_upgraded()); - - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - ..Default::default() - }); - assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "".into() }), - frame_system::LastRuntimeUpgrade::get(), - ); - - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - ..Default::default() - }); - assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::get(), - ); - - RUNTIME_VERSION.with(|v| *v.borrow_mut() = sp_version::RuntimeVersion { - spec_version: 1, - spec_name: "test".into(), - impl_version: 2, - ..Default::default() - }); - assert!(!Executive::runtime_upgraded()); - - frame_system::LastRuntimeUpgrade::take(); - assert!(Executive::runtime_upgraded()); - assert_eq!( - Some(LastRuntimeUpgradeInfo { spec_version: 1.into(), spec_name: "test".into() }), - frame_system::LastRuntimeUpgrade::get(), - ); - }) - } - - #[test] - fn last_runtime_upgrade_was_upgraded_works() { - let test_data = vec![ - (0, "", 1, "", true), - (1, "", 1, "", false), - (1, "", 1, "test", true), - (1, "", 0, "", false), - (1, "", 0, "test", true), - ]; - - for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { - let current = sp_version::RuntimeVersion { - spec_version: c_spec_version, - spec_name: c_spec_name.into(), - ..Default::default() - }; - - let last = LastRuntimeUpgradeInfo { - spec_version: spec_version.into(), - spec_name: spec_name.into(), - }; - - assert_eq!(result, last.was_upgraded(¤t)); - } - } + use super::*; + use frame_support::{ + impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, + traits::{Currency, LockIdentifier, LockableCurrency, WithdrawReason, WithdrawReasons}, + weights::Weight, + }; + use frame_system::{self as system, Call as SystemCall, ChainContext, LastRuntimeUpgradeInfo}; + use hex_literal::hex; + use pallet_balances::Call as BalancesCall; + use sp_core::H256; + use sp_runtime::{ + generic::Era, + testing::{Block, Digest, Header}, + traits::{BlakeTwo256, ConvertInto, Header as HeaderT, IdentityLookup}, + transaction_validity::{InvalidTransaction, TransactionValidityError, UnknownTransaction}, + DispatchError, Perbill, + }; + + mod custom { + use frame_support::weights::{SimpleDispatchInfo, Weight}; + + pub trait Trait: frame_system::Trait {} + + frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin { + #[weight = SimpleDispatchInfo::FixedNormal(100)] + fn some_function(origin) { + // NOTE: does not make any different. + let _ = frame_system::ensure_signed(origin); + } + #[weight = SimpleDispatchInfo::FixedOperational(200)] + fn some_root_operation(origin) { + let _ = frame_system::ensure_root(origin); + } + #[weight = SimpleDispatchInfo::InsecureFreeNormal] + fn some_unsigned_message(origin) { + let _ = frame_system::ensure_none(origin); + } + + // module hooks. + // one with block number arg and one without + fn on_initialize(n: T::BlockNumber) -> Weight { + println!("on_initialize({})", n); + 175 + } + + fn on_finalize() { + println!("on_finalize(?)"); + } + } + } + } + + type System = frame_system::Module; + type Balances = pallet_balances::Module; + type Custom = custom::Module; + + use pallet_balances as balances; + + impl_outer_origin! { + pub enum Origin for Runtime { } + } + + impl_outer_event! { + pub enum MetaEvent for Runtime { + system, + balances, + } + } + impl_outer_dispatch! { + pub enum Call for Runtime where origin: Origin { + frame_system::System, + pallet_balances::Balances, + } + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Runtime; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Runtime { + type Origin = Origin; + type Index = u64; + type Call = Call; + type BlockNumber = u64; + type Hash = sp_core::H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = MetaEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = RuntimeVersion; + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + } + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + impl pallet_balances::Trait for Runtime { + type Balance = u64; + type Event = MetaEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + } + + parameter_types! { + pub const TransactionBaseFee: u64 = 10; + pub const TransactionByteFee: u64 = 0; + } + impl pallet_transaction_payment::Trait for Runtime { + type Currency = Balances; + type OnTransactionPayment = (); + type TransactionBaseFee = TransactionBaseFee; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = ConvertInto; + type FeeMultiplierUpdate = (); + } + impl custom::Trait for Runtime {} + + impl ValidateUnsigned for Runtime { + type Call = Call; + + fn pre_dispatch(_call: &Self::Call) -> Result<(), TransactionValidityError> { + Ok(()) + } + + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + match call { + Call::Balances(BalancesCall::set_balance(_, _, _)) => Ok(Default::default()), + _ => UnknownTransaction::NoUnsignedValidator.into(), + } + } + } + + pub struct RuntimeVersion; + impl frame_support::traits::Get for RuntimeVersion { + fn get() -> sp_version::RuntimeVersion { + RUNTIME_VERSION.with(|v| v.borrow().clone()) + } + } + + thread_local! { + pub static RUNTIME_VERSION: std::cell::RefCell = + Default::default(); + } + + type SignedExtra = ( + frame_system::CheckEra, + frame_system::CheckNonce, + frame_system::CheckWeight, + pallet_transaction_payment::ChargeTransactionPayment, + ); + type AllModules = (System, Balances, Custom); + type TestXt = sp_runtime::testing::TestXt; + type Executive = + super::Executive, ChainContext, Runtime, AllModules>; + + fn extra(nonce: u64, fee: u64) -> SignedExtra { + ( + frame_system::CheckEra::from(Era::Immortal), + frame_system::CheckNonce::from(nonce), + frame_system::CheckWeight::new(), + pallet_transaction_payment::ChargeTransactionPayment::from(fee), + ) + } + + fn sign_extra(who: u64, nonce: u64, fee: u64) -> Option<(u64, SignedExtra)> { + Some((who, extra(nonce, fee))) + } + + #[test] + fn balance_transfer_dispatch_works() { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 211)], + } + .assimilate_storage(&mut t) + .unwrap(); + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer(2, 69)), + sign_extra(1, 0, 0), + ); + let weight = xt.get_dispatch_info().weight as u64; + let mut t = sp_io::TestExternalities::new(t); + t.execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + let r = Executive::apply_extrinsic(xt); + assert!(r.is_ok()); + assert_eq!( + >::total_balance(&1), + 142 - 10 - weight + ); + assert_eq!(>::total_balance(&2), 69); + }); + } + + fn new_test_ext(balance_factor: u64) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 111 * balance_factor)], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } + + #[test] + fn block_import_works() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: hex!( + "489ae9b57a19bb4733a264dc64bbcae9b140a904657a681ed3bb5fbbe8cf412b" + ) + .into(), + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + #[should_panic] + fn block_import_of_bad_state_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: [0u8; 32].into(), + extrinsics_root: hex!( + "03170a2e7597b7b7e3d84c05391d139a62b157e78786d8c082f29dcf4c111314" + ) + .into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + #[should_panic] + fn block_import_of_bad_extrinsic_root_fails() { + new_test_ext(1).execute_with(|| { + Executive::execute_block(Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: hex!( + "49cd58a254ccf6abc4a023d9a22dcfc421e385527a250faec69f8ad0d8ed3e48" + ) + .into(), + extrinsics_root: [0u8; 32].into(), + digest: Digest { logs: vec![] }, + }, + extrinsics: vec![], + }); + }); + } + + #[test] + fn bad_extrinsic_not_inserted() { + let mut t = new_test_ext(1); + // bad nonce check! + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer(33, 69)), + sign_extra(1, 30, 0), + ); + t.execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + assert!(Executive::apply_extrinsic(xt).is_err()); + assert_eq!(>::extrinsic_index(), Some(0)); + }); + } + + #[test] + fn block_weight_limit_enforced() { + let mut t = new_test_ext(10000); + // given: TestXt uses the encoded len as fixed Len: + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer(33, 0)), + sign_extra(1, 0, 0), + ); + let encoded = xt.encode(); + let encoded_len = encoded.len() as Weight; + let limit = AvailableBlockRatio::get() * MaximumBlockWeight::get() - 175; + let num_to_exhaust_block = limit / encoded_len; + t.execute_with(|| { + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + // Initial block weight form the custom module. + assert_eq!( + >::all_extrinsics_weight(), + 175 + ); + + for nonce in 0..=num_to_exhaust_block { + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer(33, 0)), + sign_extra(1, nonce.into(), 0), + ); + let res = Executive::apply_extrinsic(xt); + if nonce != num_to_exhaust_block { + assert!(res.is_ok()); + assert_eq!( + >::all_extrinsics_weight(), + encoded_len * (nonce + 1) + 175, + ); + assert_eq!( + >::extrinsic_index(), + Some(nonce as u32 + 1) + ); + } else { + assert_eq!(res, Err(InvalidTransaction::ExhaustsResources.into())); + } + } + }); + } + + #[test] + fn block_weight_and_size_is_stored_per_tx() { + let xt = TestXt::new( + Call::Balances(BalancesCall::transfer(33, 0)), + sign_extra(1, 0, 0), + ); + let x1 = TestXt::new( + Call::Balances(BalancesCall::transfer(33, 0)), + sign_extra(1, 1, 0), + ); + let x2 = TestXt::new( + Call::Balances(BalancesCall::transfer(33, 0)), + sign_extra(1, 2, 0), + ); + let len = xt.clone().encode().len() as u32; + let mut t = new_test_ext(1); + t.execute_with(|| { + assert_eq!(>::all_extrinsics_weight(), 0); + assert_eq!(>::all_extrinsics_len(), 0); + + assert!(Executive::apply_extrinsic(xt.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x1.clone()).unwrap().is_ok()); + assert!(Executive::apply_extrinsic(x2.clone()).unwrap().is_ok()); + + // default weight for `TestXt` == encoded length. + assert_eq!( + >::all_extrinsics_weight(), + (3 * len) as Weight + ); + assert_eq!( + >::all_extrinsics_len(), + 3 * len + ); + + let _ = >::finalize(); + + assert_eq!(>::all_extrinsics_weight(), 0); + assert_eq!(>::all_extrinsics_len(), 0); + }); + } + + #[test] + fn validate_unsigned() { + let xt = TestXt::new(Call::Balances(BalancesCall::set_balance(33, 69, 69)), None); + let mut t = new_test_ext(1); + + t.execute_with(|| { + assert_eq!( + Executive::validate_transaction(TransactionSource::InBlock, xt.clone()), + Ok(Default::default()), + ); + assert_eq!( + Executive::apply_extrinsic(xt), + Ok(Err(DispatchError::BadOrigin)) + ); + }); + } + + #[test] + fn can_pay_for_tx_fee_on_full_lock() { + let id: LockIdentifier = *b"0 "; + let execute_with_lock = |lock: WithdrawReasons| { + let mut t = new_test_ext(1); + t.execute_with(|| { + as LockableCurrency>::set_lock( + id, &1, 110, lock, + ); + let xt = TestXt::new( + Call::System(SystemCall::remark(vec![1u8])), + sign_extra(1, 0, 0), + ); + let weight = xt.get_dispatch_info().weight as u64; + Executive::initialize_block(&Header::new( + 1, + H256::default(), + H256::default(), + [69u8; 32].into(), + Digest::default(), + )); + + if lock == WithdrawReasons::except(WithdrawReason::TransactionPayment) { + assert!(Executive::apply_extrinsic(xt).unwrap().is_ok()); + // tx fee has been deducted. + assert_eq!( + >::total_balance(&1), + 111 - 10 - weight + ); + } else { + assert_eq!( + Executive::apply_extrinsic(xt), + Err(InvalidTransaction::Payment.into()), + ); + assert_eq!(>::total_balance(&1), 111); + } + }); + }; + + execute_with_lock(WithdrawReasons::all()); + execute_with_lock(WithdrawReasons::except(WithdrawReason::TransactionPayment)); + } + + #[test] + fn block_hooks_weight_is_stored() { + new_test_ext(1).execute_with(|| { + Executive::initialize_block(&Header::new_from_number(1)); + // NOTE: might need updates over time if system and balance introduce new weights. For + // now only accounts for the custom module. + assert_eq!( + >::all_extrinsics_weight(), + 150 + 25 + ); + }) + } + + #[test] + fn runtime_upgraded_should_work() { + new_test_ext(1).execute_with(|| { + RUNTIME_VERSION.with(|v| *v.borrow_mut() = Default::default()); + // It should be added at genesis + assert!(frame_system::LastRuntimeUpgrade::exists()); + assert!(!Executive::runtime_upgraded()); + + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + ..Default::default() + } + }); + assert!(Executive::runtime_upgraded()); + assert_eq!( + Some(LastRuntimeUpgradeInfo { + spec_version: 1.into(), + spec_name: "".into() + }), + frame_system::LastRuntimeUpgrade::get(), + ); + + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + ..Default::default() + } + }); + assert!(Executive::runtime_upgraded()); + assert_eq!( + Some(LastRuntimeUpgradeInfo { + spec_version: 1.into(), + spec_name: "test".into() + }), + frame_system::LastRuntimeUpgrade::get(), + ); + + RUNTIME_VERSION.with(|v| { + *v.borrow_mut() = sp_version::RuntimeVersion { + spec_version: 1, + spec_name: "test".into(), + impl_version: 2, + ..Default::default() + } + }); + assert!(!Executive::runtime_upgraded()); + + frame_system::LastRuntimeUpgrade::take(); + assert!(Executive::runtime_upgraded()); + assert_eq!( + Some(LastRuntimeUpgradeInfo { + spec_version: 1.into(), + spec_name: "test".into() + }), + frame_system::LastRuntimeUpgrade::get(), + ); + }) + } + + #[test] + fn last_runtime_upgrade_was_upgraded_works() { + let test_data = vec![ + (0, "", 1, "", true), + (1, "", 1, "", false), + (1, "", 1, "test", true), + (1, "", 0, "", false), + (1, "", 0, "test", true), + ]; + + for (spec_version, spec_name, c_spec_version, c_spec_name, result) in test_data { + let current = sp_version::RuntimeVersion { + spec_version: c_spec_version, + spec_name: c_spec_name.into(), + ..Default::default() + }; + + let last = LastRuntimeUpgradeInfo { + spec_version: spec_version.into(), + spec_name: spec_name.into(), + }; + + assert_eq!(result, last.was_upgraded(¤t)); + } + } } diff --git a/frame/finality-tracker/src/lib.rs b/frame/finality-tracker/src/lib.rs index 54506784a9..52543efd1f 100644 --- a/frame/finality-tracker/src/lib.rs +++ b/frame/finality-tracker/src/lib.rs @@ -18,329 +18,352 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_inherents::{InherentIdentifier, ProvideInherent, InherentData, MakeFatalError}; -use sp_runtime::traits::{One, Zero, SaturatedConversion}; -use sp_std::{prelude::*, result, cmp, vec}; -use frame_support::{decl_module, decl_storage, decl_error, ensure}; use frame_support::traits::Get; use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_module, decl_storage, ensure}; use frame_system::{ensure_none, Trait as SystemTrait}; -use sp_finality_tracker::{INHERENT_IDENTIFIER, FinalizedInherentData}; +use sp_finality_tracker::{FinalizedInherentData, INHERENT_IDENTIFIER}; +use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}; +use sp_runtime::traits::{One, SaturatedConversion, Zero}; +use sp_std::{cmp, prelude::*, result, vec}; pub const DEFAULT_WINDOW_SIZE: u32 = 101; pub const DEFAULT_REPORT_LATENCY: u32 = 1000; pub trait Trait: SystemTrait { - /// Something which can be notified when the timestamp is set. Set this to `()` - /// if not needed. - type OnFinalizationStalled: OnFinalizationStalled; - /// The number of recent samples to keep from this chain. Default is 101. - type WindowSize: Get; - /// The delay after which point things become suspicious. Default is 1000. - type ReportLatency: Get; + /// Something which can be notified when the timestamp is set. Set this to `()` + /// if not needed. + type OnFinalizationStalled: OnFinalizationStalled; + /// The number of recent samples to keep from this chain. Default is 101. + type WindowSize: Get; + /// The delay after which point things become suspicious. Default is 1000. + type ReportLatency: Get; } decl_storage! { - trait Store for Module as FinalityTracker { - /// Recent hints. - RecentHints get(fn recent_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; - /// Ordered recent hints. - OrderedHints get(fn ordered_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; - /// The median. - Median get(fn median) build(|_| T::BlockNumber::zero()): T::BlockNumber; - - /// Final hint to apply in the block. `None` means "same as parent". - Update: Option; - - // when initialized through config this is set in the beginning. - Initialized get(fn initialized) build(|_| false): bool; - } + trait Store for Module as FinalityTracker { + /// Recent hints. + RecentHints get(fn recent_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; + /// Ordered recent hints. + OrderedHints get(fn ordered_hints) build(|_| vec![T::BlockNumber::zero()]): Vec; + /// The median. + Median get(fn median) build(|_| T::BlockNumber::zero()): T::BlockNumber; + + /// Final hint to apply in the block. `None` means "same as parent". + Update: Option; + + // when initialized through config this is set in the beginning. + Initialized get(fn initialized) build(|_| false): bool; + } } decl_error! { - pub enum Error for Module { - /// Final hint must be updated only once in the block - AlreadyUpdated, - /// Finalized height above block number - BadHint, - } + pub enum Error for Module { + /// Final hint must be updated only once in the block + AlreadyUpdated, + /// Finalized height above block number + BadHint, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - /// The number of recent samples to keep from this chain. Default is 101. - const WindowSize: T::BlockNumber = T::WindowSize::get(); - - /// The delay after which point things become suspicious. Default is 1000. - const ReportLatency: T::BlockNumber = T::ReportLatency::get(); - - /// Hint that the author of this block thinks the best finalized - /// block is the given number. - #[weight = SimpleDispatchInfo::FixedMandatory(MINIMUM_WEIGHT)] - fn final_hint(origin, #[compact] hint: T::BlockNumber) { - ensure_none(origin)?; - ensure!(!::Update::exists(), Error::::AlreadyUpdated); - ensure!( - frame_system::Module::::block_number() >= hint, - Error::::BadHint, - ); - ::Update::put(hint); - } - - fn on_finalize() { - Self::update_hint(::Update::take()) - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + /// The number of recent samples to keep from this chain. Default is 101. + const WindowSize: T::BlockNumber = T::WindowSize::get(); + + /// The delay after which point things become suspicious. Default is 1000. + const ReportLatency: T::BlockNumber = T::ReportLatency::get(); + + /// Hint that the author of this block thinks the best finalized + /// block is the given number. + #[weight = SimpleDispatchInfo::FixedMandatory(MINIMUM_WEIGHT)] + fn final_hint(origin, #[compact] hint: T::BlockNumber) { + ensure_none(origin)?; + ensure!(!::Update::exists(), Error::::AlreadyUpdated); + ensure!( + frame_system::Module::::block_number() >= hint, + Error::::BadHint, + ); + ::Update::put(hint); + } + + fn on_finalize() { + Self::update_hint(::Update::take()) + } + } } impl Module { - fn update_hint(hint: Option) { - if !Self::initialized() { - ::RecentHints::put(vec![T::BlockNumber::zero()]); - ::OrderedHints::put(vec![T::BlockNumber::zero()]); - ::Median::put(T::BlockNumber::zero()); - - ::Initialized::put(true); - } - - let mut recent = Self::recent_hints(); - let mut ordered = Self::ordered_hints(); - let window_size = cmp::max(T::BlockNumber::one(), T::WindowSize::get()); - - let hint = hint.unwrap_or_else(|| recent.last() - .expect("always at least one recent sample; qed").clone() - ); - - // prune off the front of the list -- typically 1 except for when - // the sample size has just been shrunk. - { - // take into account the item we haven't pushed yet. - let to_prune = (recent.len() + 1).saturating_sub(window_size.saturated_into::()); - - for drained in recent.drain(..to_prune) { - let idx = ordered.binary_search(&drained) - .expect("recent and ordered contain the same items; qed"); - - ordered.remove(idx); - } - } - - // find the position in the ordered list where the new item goes. - let ordered_idx = ordered.binary_search(&hint) - .unwrap_or_else(|idx| idx); - - ordered.insert(ordered_idx, hint); - recent.push(hint); - - let two = T::BlockNumber::one() + T::BlockNumber::one(); - - let median = { - let len = ordered.len(); - assert!(len > 0, "pruning dictated by window_size which is always saturated at 1; qed"); - - if len % 2 == 0 { - let a = ordered[len / 2]; - let b = ordered[(len / 2) - 1]; - - // compute average. - (a + b) / two - } else { - ordered[len / 2] - } - }; - - let our_window_size = recent.len() as u32; - - ::RecentHints::put(recent); - ::OrderedHints::put(ordered); - ::Median::put(median); - - if T::BlockNumber::from(our_window_size) == window_size { - let now = frame_system::Module::::block_number(); - let latency = T::ReportLatency::get(); - - // the delay is the latency plus half the window size. - let delay = latency + (window_size / two); - // median may be at most n - delay - if median + delay <= now { - T::OnFinalizationStalled::on_stalled(window_size - T::BlockNumber::one(), median); - } - } - } + fn update_hint(hint: Option) { + if !Self::initialized() { + ::RecentHints::put(vec![T::BlockNumber::zero()]); + ::OrderedHints::put(vec![T::BlockNumber::zero()]); + ::Median::put(T::BlockNumber::zero()); + + ::Initialized::put(true); + } + + let mut recent = Self::recent_hints(); + let mut ordered = Self::ordered_hints(); + let window_size = cmp::max(T::BlockNumber::one(), T::WindowSize::get()); + + let hint = hint.unwrap_or_else(|| { + recent + .last() + .expect("always at least one recent sample; qed") + .clone() + }); + + // prune off the front of the list -- typically 1 except for when + // the sample size has just been shrunk. + { + // take into account the item we haven't pushed yet. + let to_prune = (recent.len() + 1).saturating_sub(window_size.saturated_into::()); + + for drained in recent.drain(..to_prune) { + let idx = ordered + .binary_search(&drained) + .expect("recent and ordered contain the same items; qed"); + + ordered.remove(idx); + } + } + + // find the position in the ordered list where the new item goes. + let ordered_idx = ordered.binary_search(&hint).unwrap_or_else(|idx| idx); + + ordered.insert(ordered_idx, hint); + recent.push(hint); + + let two = T::BlockNumber::one() + T::BlockNumber::one(); + + let median = { + let len = ordered.len(); + assert!( + len > 0, + "pruning dictated by window_size which is always saturated at 1; qed" + ); + + if len % 2 == 0 { + let a = ordered[len / 2]; + let b = ordered[(len / 2) - 1]; + + // compute average. + (a + b) / two + } else { + ordered[len / 2] + } + }; + + let our_window_size = recent.len() as u32; + + ::RecentHints::put(recent); + ::OrderedHints::put(ordered); + ::Median::put(median); + + if T::BlockNumber::from(our_window_size) == window_size { + let now = frame_system::Module::::block_number(); + let latency = T::ReportLatency::get(); + + // the delay is the latency plus half the window size. + let delay = latency + (window_size / two); + // median may be at most n - delay + if median + delay <= now { + T::OnFinalizationStalled::on_stalled(window_size - T::BlockNumber::one(), median); + } + } + } } /// Called when finalization stalled at a given number. #[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OnFinalizationStalled { - /// The parameter here is how many more blocks to wait before applying - /// changes triggered by finality stalling. - fn on_stalled(further_wait: N, median: N); + /// The parameter here is how many more blocks to wait before applying + /// changes triggered by finality stalling. + fn on_stalled(further_wait: N, median: N); } impl ProvideInherent for Module { - type Call = Call; - type Error = MakeFatalError<()>; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - if let Ok(final_num) = data.finalized_number() { - // make hint only when not same as last to avoid bloat. - Self::recent_hints().last().and_then(|last| if last == &final_num { - None - } else { - Some(Call::final_hint(final_num)) - }) - } else { - None - } - } - - fn check_inherent(_call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { - Ok(()) - } + type Call = Call; + type Error = MakeFatalError<()>; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + if let Ok(final_num) = data.finalized_number() { + // make hint only when not same as last to avoid bloat. + Self::recent_hints().last().and_then(|last| { + if last == &final_num { + None + } else { + Some(Call::final_hint(final_num)) + } + }) + } else { + None + } + } + + fn check_inherent(_call: &Self::Call, _data: &InherentData) -> result::Result<(), Self::Error> { + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - - use sp_io::TestExternalities; - use sp_core::H256; - use sp_runtime::{ - testing::Header, Perbill, - traits::{BlakeTwo256, IdentityLookup, Header as HeaderT}, - }; - use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, weights::Weight, traits::OnFinalize - }; - use frame_system as system; - use std::cell::RefCell; - - #[derive(Clone, PartialEq, Debug)] - pub struct StallEvent { - at: u64, - further_wait: u64, - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - thread_local! { - static NOTIFICATIONS: RefCell> = Default::default(); - } - - pub struct StallTracker; - impl OnFinalizationStalled for StallTracker { - fn on_stalled(further_wait: u64, _median: u64) { - let now = System::block_number(); - NOTIFICATIONS.with(|v| v.borrow_mut().push(StallEvent { at: now, further_wait })); - } - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const WindowSize: u64 = 11; - pub const ReportLatency: u64 = 100; - } - impl Trait for Test { - type OnFinalizationStalled = StallTracker; - type WindowSize = WindowSize; - type ReportLatency = ReportLatency; - } - - type System = system::Module; - type FinalityTracker = Module; - - #[test] - fn median_works() { - let t = system::GenesisConfig::default().build_storage::().unwrap(); - TestExternalities::new(t).execute_with(|| { - FinalityTracker::update_hint(Some(500)); - assert_eq!(FinalityTracker::median(), 250); - assert!(NOTIFICATIONS.with(|n| n.borrow().is_empty())); - }); - } - - #[test] - fn notifies_when_stalled() { - let t = system::GenesisConfig::default().build_storage::().unwrap(); - TestExternalities::new(t).execute_with(|| { - let mut parent_hash = System::parent_hash(); - for i in 2..106 { - System::initialize( - &i, - &parent_hash, - &Default::default(), - &Default::default(), - Default::default() - ); - FinalityTracker::on_finalize(i); - let hdr = System::finalize(); - parent_hash = hdr.hash(); - } - - assert_eq!( - NOTIFICATIONS.with(|n| n.borrow().clone()), - vec![StallEvent { at: 105, further_wait: 10 }] - ) - }); - } - - #[test] - fn recent_notifications_prevent_stalling() { - let t = system::GenesisConfig::default().build_storage::().unwrap(); - TestExternalities::new(t).execute_with(|| { - let mut parent_hash = System::parent_hash(); - for i in 2..106 { - System::initialize( - &i, - &parent_hash, - &Default::default(), - &Default::default(), - Default::default(), - ); - assert_ok!(FinalityTracker::dispatch( - Call::final_hint(i-1), - Origin::NONE, - )); - FinalityTracker::on_finalize(i); - let hdr = System::finalize(); - parent_hash = hdr.hash(); - } - - assert!(NOTIFICATIONS.with(|n| n.borrow().is_empty())); - }); - } + use super::*; + + use frame_support::{ + assert_ok, impl_outer_origin, parameter_types, traits::OnFinalize, weights::Weight, + }; + use frame_system as system; + use sp_core::H256; + use sp_io::TestExternalities; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, Header as HeaderT, IdentityLookup}, + Perbill, + }; + use std::cell::RefCell; + + #[derive(Clone, PartialEq, Debug)] + pub struct StallEvent { + at: u64, + further_wait: u64, + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + thread_local! { + static NOTIFICATIONS: RefCell> = Default::default(); + } + + pub struct StallTracker; + impl OnFinalizationStalled for StallTracker { + fn on_stalled(further_wait: u64, _median: u64) { + let now = System::block_number(); + NOTIFICATIONS.with(|v| { + v.borrow_mut().push(StallEvent { + at: now, + further_wait, + }) + }); + } + } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + parameter_types! { + pub const WindowSize: u64 = 11; + pub const ReportLatency: u64 = 100; + } + impl Trait for Test { + type OnFinalizationStalled = StallTracker; + type WindowSize = WindowSize; + type ReportLatency = ReportLatency; + } + + type System = system::Module; + type FinalityTracker = Module; + + #[test] + fn median_works() { + let t = system::GenesisConfig::default() + .build_storage::() + .unwrap(); + TestExternalities::new(t).execute_with(|| { + FinalityTracker::update_hint(Some(500)); + assert_eq!(FinalityTracker::median(), 250); + assert!(NOTIFICATIONS.with(|n| n.borrow().is_empty())); + }); + } + + #[test] + fn notifies_when_stalled() { + let t = system::GenesisConfig::default() + .build_storage::() + .unwrap(); + TestExternalities::new(t).execute_with(|| { + let mut parent_hash = System::parent_hash(); + for i in 2..106 { + System::initialize( + &i, + &parent_hash, + &Default::default(), + &Default::default(), + Default::default(), + ); + FinalityTracker::on_finalize(i); + let hdr = System::finalize(); + parent_hash = hdr.hash(); + } + + assert_eq!( + NOTIFICATIONS.with(|n| n.borrow().clone()), + vec![StallEvent { + at: 105, + further_wait: 10 + }] + ) + }); + } + + #[test] + fn recent_notifications_prevent_stalling() { + let t = system::GenesisConfig::default() + .build_storage::() + .unwrap(); + TestExternalities::new(t).execute_with(|| { + let mut parent_hash = System::parent_hash(); + for i in 2..106 { + System::initialize( + &i, + &parent_hash, + &Default::default(), + &Default::default(), + Default::default(), + ); + assert_ok!(FinalityTracker::dispatch( + Call::final_hint(i - 1), + Origin::NONE, + )); + FinalityTracker::on_finalize(i); + let hdr = System::finalize(); + parent_hash = hdr.hash(); + } + + assert!(NOTIFICATIONS.with(|n| n.borrow().is_empty())); + }); + } } diff --git a/frame/generic-asset/src/lib.rs b/frame/generic-asset/src/lib.rs index 720ccd85cc..ff2e271f1b 100644 --- a/frame/generic-asset/src/lib.rs +++ b/frame/generic-asset/src/lib.rs @@ -152,26 +152,26 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Decode, Encode, HasCompact, Input, Output, Error as CodecError}; +use codec::{Decode, Encode, Error as CodecError, HasCompact, Input, Output}; -use sp_runtime::{RuntimeDebug, DispatchResult, DispatchError}; use sp_runtime::traits::{ - CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Member, One, Saturating, AtLeast32Bit, - Zero, Bounded, + AtLeast32Bit, Bounded, CheckedAdd, CheckedSub, MaybeSerializeDeserialize, Member, One, + Saturating, Zero, }; +use sp_runtime::{DispatchError, DispatchResult, RuntimeDebug}; -use sp_std::prelude::*; -use sp_std::{cmp, result, fmt::Debug}; use frame_support::{ - decl_event, decl_module, decl_storage, ensure, decl_error, - weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, - traits::{ - Currency, ExistenceRequirement, Imbalance, LockIdentifier, LockableCurrency, ReservableCurrency, - SignedImbalance, WithdrawReason, WithdrawReasons, TryDrop, BalanceStatus, - }, - Parameter, StorageMap, + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::{ + BalanceStatus, Currency, ExistenceRequirement, Imbalance, LockIdentifier, LockableCurrency, + ReservableCurrency, SignedImbalance, TryDrop, WithdrawReason, WithdrawReasons, + }, + weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, + Parameter, StorageMap, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed}; +use sp_std::prelude::*; +use sp_std::{cmp, fmt::Debug, result}; mod mock; mod tests; @@ -179,313 +179,311 @@ mod tests; pub use self::imbalances::{NegativeImbalance, PositiveImbalance}; pub trait Trait: frame_system::Trait { - type Balance: Parameter - + Member - + AtLeast32Bit - + Default - + Copy - + MaybeSerializeDeserialize - + Debug; - type AssetId: Parameter + Member + AtLeast32Bit + Default + Copy; - type Event: From> + Into<::Event>; + type Balance: Parameter + + Member + + AtLeast32Bit + + Default + + Copy + + MaybeSerializeDeserialize + + Debug; + type AssetId: Parameter + Member + AtLeast32Bit + Default + Copy; + type Event: From> + Into<::Event>; } pub trait Subtrait: frame_system::Trait { - type Balance: Parameter - + Member - + AtLeast32Bit - + Default - + Copy - + MaybeSerializeDeserialize - + Debug; - type AssetId: Parameter + Member + AtLeast32Bit + Default + Copy; + type Balance: Parameter + + Member + + AtLeast32Bit + + Default + + Copy + + MaybeSerializeDeserialize + + Debug; + type AssetId: Parameter + Member + AtLeast32Bit + Default + Copy; } impl Subtrait for T { - type Balance = T::Balance; - type AssetId = T::AssetId; + type Balance = T::Balance; + type AssetId = T::AssetId; } /// Asset creation options. #[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] pub struct AssetOptions { - /// Initial issuance of this asset. All deposit to the creator of the asset. - #[codec(compact)] - pub initial_issuance: Balance, - /// Which accounts are allowed to possess this asset. - pub permissions: PermissionLatest, + /// Initial issuance of this asset. All deposit to the creator of the asset. + #[codec(compact)] + pub initial_issuance: Balance, + /// Which accounts are allowed to possess this asset. + pub permissions: PermissionLatest, } /// Owner of an asset. #[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] pub enum Owner { - /// No owner. - None, - /// Owned by an AccountId - Address(AccountId), + /// No owner. + None, + /// Owned by an AccountId + Address(AccountId), } impl Default for Owner { - fn default() -> Self { - Owner::None - } + fn default() -> Self { + Owner::None + } } /// Asset permissions #[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] pub struct PermissionsV1 { - /// Who have permission to update asset permission - pub update: Owner, - /// Who have permission to mint new asset - pub mint: Owner, - /// Who have permission to burn asset - pub burn: Owner, + /// Who have permission to update asset permission + pub update: Owner, + /// Who have permission to mint new asset + pub mint: Owner, + /// Who have permission to burn asset + pub burn: Owner, } #[derive(Clone, Encode, Decode, PartialEq, Eq, RuntimeDebug)] #[repr(u8)] enum PermissionVersionNumber { - V1 = 0, + V1 = 0, } /// Versioned asset permission #[derive(Clone, PartialEq, Eq, RuntimeDebug)] pub enum PermissionVersions { - V1(PermissionsV1), + V1(PermissionsV1), } /// Asset permission types pub enum PermissionType { - /// Permission to burn asset permission - Burn, - /// Permission to mint new asset - Mint, - /// Permission to update asset - Update, + /// Permission to burn asset permission + Burn, + /// Permission to mint new asset + Mint, + /// Permission to update asset + Update, } /// Alias to latest asset permissions pub type PermissionLatest = PermissionsV1; impl Default for PermissionVersions { - fn default() -> Self { - PermissionVersions::V1(Default::default()) - } + fn default() -> Self { + PermissionVersions::V1(Default::default()) + } } impl Encode for PermissionVersions { - fn encode_to(&self, dest: &mut T) { - match self { - PermissionVersions::V1(payload) => { - dest.push(&PermissionVersionNumber::V1); - dest.push(payload); - }, - } - } + fn encode_to(&self, dest: &mut T) { + match self { + PermissionVersions::V1(payload) => { + dest.push(&PermissionVersionNumber::V1); + dest.push(payload); + } + } + } } impl codec::EncodeLike for PermissionVersions {} impl Decode for PermissionVersions { - fn decode(input: &mut I) -> core::result::Result { - let version = PermissionVersionNumber::decode(input)?; - Ok( - match version { - PermissionVersionNumber::V1 => PermissionVersions::V1(Decode::decode(input)?) - } - ) - } + fn decode(input: &mut I) -> core::result::Result { + let version = PermissionVersionNumber::decode(input)?; + Ok(match version { + PermissionVersionNumber::V1 => PermissionVersions::V1(Decode::decode(input)?), + }) + } } impl Default for PermissionsV1 { - fn default() -> Self { - PermissionsV1 { - update: Owner::None, - mint: Owner::None, - burn: Owner::None, - } - } + fn default() -> Self { + PermissionsV1 { + update: Owner::None, + mint: Owner::None, + burn: Owner::None, + } + } } impl Into> for PermissionVersions { - fn into(self) -> PermissionLatest { - match self { - PermissionVersions::V1(v1) => v1, - } - } + fn into(self) -> PermissionLatest { + match self { + PermissionVersions::V1(v1) => v1, + } + } } /// Converts the latest permission to other version. impl Into> for PermissionLatest { - fn into(self) -> PermissionVersions { - PermissionVersions::V1(self) - } + fn into(self) -> PermissionVersions { + PermissionVersions::V1(self) + } } decl_error! { - /// Error for the generic-asset module. - pub enum Error for Module { - /// No new assets id available. - NoIdAvailable, - /// Cannot transfer zero amount. - ZeroAmount, - /// The origin does not have enough permission to update permissions. - NoUpdatePermission, - /// The origin does not have permission to mint an asset. - NoMintPermission, - /// The origin does not have permission to burn an asset. - NoBurnPermission, - /// Total issuance got overflowed after minting. - TotalMintingOverflow, - /// Free balance got overflowed after minting. - FreeMintingOverflow, - /// Total issuance got underflowed after burning. - TotalBurningUnderflow, - /// Free balance got underflowed after burning. - FreeBurningUnderflow, - /// Asset id is already taken. - IdAlreadyTaken, - /// Asset id not available. - IdUnavailable, - /// The balance is too low to send amount. - InsufficientBalance, - /// The account liquidity restrictions prevent withdrawal. - LiquidityRestrictions, - } + /// Error for the generic-asset module. + pub enum Error for Module { + /// No new assets id available. + NoIdAvailable, + /// Cannot transfer zero amount. + ZeroAmount, + /// The origin does not have enough permission to update permissions. + NoUpdatePermission, + /// The origin does not have permission to mint an asset. + NoMintPermission, + /// The origin does not have permission to burn an asset. + NoBurnPermission, + /// Total issuance got overflowed after minting. + TotalMintingOverflow, + /// Free balance got overflowed after minting. + FreeMintingOverflow, + /// Total issuance got underflowed after burning. + TotalBurningUnderflow, + /// Free balance got underflowed after burning. + FreeBurningUnderflow, + /// Asset id is already taken. + IdAlreadyTaken, + /// Asset id not available. + IdUnavailable, + /// The balance is too low to send amount. + InsufficientBalance, + /// The account liquidity restrictions prevent withdrawal. + LiquidityRestrictions, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Create a new kind of asset. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn create(origin, options: AssetOptions) -> DispatchResult { - let origin = ensure_signed(origin)?; - Self::create_asset(None, Some(origin), options) - } - - /// Transfer some liquid free balance to another account. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn transfer(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, #[compact] amount: T::Balance) { - let origin = ensure_signed(origin)?; - ensure!(!amount.is_zero(), Error::::ZeroAmount); - Self::make_transfer_with_event(&asset_id, &origin, &to, amount)?; - } - - /// Updates permission for a given `asset_id` and an account. - /// - /// The `origin` must have `update` permission. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn update_permission( - origin, - #[compact] asset_id: T::AssetId, - new_permission: PermissionLatest - ) -> DispatchResult { - let origin = ensure_signed(origin)?; - - let permissions: PermissionVersions = new_permission.into(); - - if Self::check_permission(&asset_id, &origin, &PermissionType::Update) { - >::insert(asset_id, &permissions); - - Self::deposit_event(RawEvent::PermissionUpdated(asset_id, permissions.into())); - - Ok(()) - } else { - Err(Error::::NoUpdatePermission)? - } - } - - /// Mints an asset, increases its total issuance. - /// The origin must have `mint` permissions. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn mint(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::mint_free(&asset_id, &who, &to, &amount)?; - Self::deposit_event(RawEvent::Minted(asset_id, to, amount)); - Ok(()) - } - - /// Burns an asset, decreases its total issuance. - /// The `origin` must have `burn` permissions. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn burn(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::burn_free(&asset_id, &who, &to, &amount)?; - Self::deposit_event(RawEvent::Burned(asset_id, to, amount)); - Ok(()) - } - - /// Can be used to create reserved tokens. - /// Requires Root call. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn create_reserved( - origin, - asset_id: T::AssetId, - options: AssetOptions - ) -> DispatchResult { - ensure_root(origin)?; - Self::create_asset(Some(asset_id), None, options) - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Create a new kind of asset. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn create(origin, options: AssetOptions) -> DispatchResult { + let origin = ensure_signed(origin)?; + Self::create_asset(None, Some(origin), options) + } + + /// Transfer some liquid free balance to another account. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn transfer(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, #[compact] amount: T::Balance) { + let origin = ensure_signed(origin)?; + ensure!(!amount.is_zero(), Error::::ZeroAmount); + Self::make_transfer_with_event(&asset_id, &origin, &to, amount)?; + } + + /// Updates permission for a given `asset_id` and an account. + /// + /// The `origin` must have `update` permission. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn update_permission( + origin, + #[compact] asset_id: T::AssetId, + new_permission: PermissionLatest + ) -> DispatchResult { + let origin = ensure_signed(origin)?; + + let permissions: PermissionVersions = new_permission.into(); + + if Self::check_permission(&asset_id, &origin, &PermissionType::Update) { + >::insert(asset_id, &permissions); + + Self::deposit_event(RawEvent::PermissionUpdated(asset_id, permissions.into())); + + Ok(()) + } else { + Err(Error::::NoUpdatePermission)? + } + } + + /// Mints an asset, increases its total issuance. + /// The origin must have `mint` permissions. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn mint(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::mint_free(&asset_id, &who, &to, &amount)?; + Self::deposit_event(RawEvent::Minted(asset_id, to, amount)); + Ok(()) + } + + /// Burns an asset, decreases its total issuance. + /// The `origin` must have `burn` permissions. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn burn(origin, #[compact] asset_id: T::AssetId, to: T::AccountId, amount: T::Balance) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::burn_free(&asset_id, &who, &to, &amount)?; + Self::deposit_event(RawEvent::Burned(asset_id, to, amount)); + Ok(()) + } + + /// Can be used to create reserved tokens. + /// Requires Root call. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn create_reserved( + origin, + asset_id: T::AssetId, + options: AssetOptions + ) -> DispatchResult { + ensure_root(origin)?; + Self::create_asset(Some(asset_id), None, options) + } + } } #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct BalanceLock { - pub id: LockIdentifier, - pub amount: Balance, - pub reasons: WithdrawReasons, + pub id: LockIdentifier, + pub amount: Balance, + pub reasons: WithdrawReasons, } decl_storage! { - trait Store for Module as GenericAsset { - /// Total issuance of a given asset. - pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { - let issuance = config.initial_balance * (config.endowed_accounts.len() as u32).into(); - config.assets.iter().map(|id| (id.clone(), issuance)).collect::>() - }): map hasher(twox_64_concat) T::AssetId => T::Balance; - - /// The free balance of a given asset under an account. - pub FreeBalance: - double_map hasher(twox_64_concat) T::AssetId, hasher(blake2_128_concat) T::AccountId => T::Balance; - - /// The reserved balance of a given asset under an account. - pub ReservedBalance: - double_map hasher(twox_64_concat) T::AssetId, hasher(blake2_128_concat) T::AccountId => T::Balance; - - /// Next available ID for user-created asset. - pub NextAssetId get(fn next_asset_id) config(): T::AssetId; - - /// Permission options for a given asset. - pub Permissions get(fn get_permission): - map hasher(twox_64_concat) T::AssetId => PermissionVersions; - - /// Any liquidity locks on some account balances. - pub Locks get(fn locks): - map hasher(blake2_128_concat) T::AccountId => Vec>; - - /// The identity of the asset which is the one that is designated for the chain's staking system. - pub StakingAssetId get(fn staking_asset_id) config(): T::AssetId; - - /// The identity of the asset which is the one that is designated for paying the chain's transaction fee. - pub SpendingAssetId get(fn spending_asset_id) config(): T::AssetId; - } - add_extra_genesis { - config(assets): Vec; - config(initial_balance): T::Balance; - config(endowed_accounts): Vec; - - build(|config: &GenesisConfig| { - config.assets.iter().for_each(|asset_id| { - config.endowed_accounts.iter().for_each(|account_id| { - >::insert(asset_id, account_id, &config.initial_balance); - }); - }); - }); - } + trait Store for Module as GenericAsset { + /// Total issuance of a given asset. + pub TotalIssuance get(fn total_issuance) build(|config: &GenesisConfig| { + let issuance = config.initial_balance * (config.endowed_accounts.len() as u32).into(); + config.assets.iter().map(|id| (id.clone(), issuance)).collect::>() + }): map hasher(twox_64_concat) T::AssetId => T::Balance; + + /// The free balance of a given asset under an account. + pub FreeBalance: + double_map hasher(twox_64_concat) T::AssetId, hasher(blake2_128_concat) T::AccountId => T::Balance; + + /// The reserved balance of a given asset under an account. + pub ReservedBalance: + double_map hasher(twox_64_concat) T::AssetId, hasher(blake2_128_concat) T::AccountId => T::Balance; + + /// Next available ID for user-created asset. + pub NextAssetId get(fn next_asset_id) config(): T::AssetId; + + /// Permission options for a given asset. + pub Permissions get(fn get_permission): + map hasher(twox_64_concat) T::AssetId => PermissionVersions; + + /// Any liquidity locks on some account balances. + pub Locks get(fn locks): + map hasher(blake2_128_concat) T::AccountId => Vec>; + + /// The identity of the asset which is the one that is designated for the chain's staking system. + pub StakingAssetId get(fn staking_asset_id) config(): T::AssetId; + + /// The identity of the asset which is the one that is designated for paying the chain's transaction fee. + pub SpendingAssetId get(fn spending_asset_id) config(): T::AssetId; + } + add_extra_genesis { + config(assets): Vec; + config(initial_balance): T::Balance; + config(endowed_accounts): Vec; + + build(|config: &GenesisConfig| { + config.assets.iter().for_each(|asset_id| { + config.endowed_accounts.iter().for_each(|account_id| { + >::insert(asset_id, account_id, &config.initial_balance); + }); + }); + }); + } } decl_event!( @@ -509,583 +507,614 @@ decl_event!( ); impl Module { - // PUBLIC IMMUTABLES - - /// Get an account's total balance of an asset kind. - pub fn total_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { - Self::free_balance(asset_id, who) + Self::reserved_balance(asset_id, who) - } - - /// Get an account's free balance of an asset kind. - pub fn free_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { - >::get(asset_id, who) - } - - /// Get an account's reserved balance of an asset kind. - pub fn reserved_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { - >::get(asset_id, who) - } - - /// Mint to an account's free balance, without event - pub fn mint_free( - asset_id: &T::AssetId, - who: &T::AccountId, - to: &T::AccountId, - amount: &T::Balance, - ) -> DispatchResult { - if Self::check_permission(asset_id, who, &PermissionType::Mint) { - let original_free_balance = Self::free_balance(&asset_id, &to); - let current_total_issuance = >::get(asset_id); - let new_total_issuance = current_total_issuance.checked_add(&amount) - .ok_or(Error::::TotalMintingOverflow)?; - let value = original_free_balance.checked_add(&amount) - .ok_or(Error::::FreeMintingOverflow)?; - - >::insert(asset_id, new_total_issuance); - Self::set_free_balance(&asset_id, &to, value); - Ok(()) - } else { - Err(Error::::NoMintPermission)? - } - } - - /// Burn an account's free balance, without event - pub fn burn_free( - asset_id: &T::AssetId, - who: &T::AccountId, - to: &T::AccountId, - amount: &T::Balance, - ) -> DispatchResult { - if Self::check_permission(asset_id, who, &PermissionType::Burn) { - let original_free_balance = Self::free_balance(asset_id, to); - - let current_total_issuance = >::get(asset_id); - let new_total_issuance = current_total_issuance.checked_sub(amount) - .ok_or(Error::::TotalBurningUnderflow)?; - let value = original_free_balance.checked_sub(amount) - .ok_or(Error::::FreeBurningUnderflow)?; - - >::insert(asset_id, new_total_issuance); - Self::set_free_balance(asset_id, to, value); - Ok(()) - } else { - Err(Error::::NoBurnPermission)? - } - } - - /// Creates an asset. - /// - /// # Arguments - /// * `asset_id`: An ID of a reserved asset. - /// If not provided, a user-generated asset will be created with the next available ID. - /// * `from_account`: The initiator account of this call - /// * `asset_options`: Asset creation options. - /// - pub fn create_asset( - asset_id: Option, - from_account: Option, - options: AssetOptions, - ) -> DispatchResult { - let asset_id = if let Some(asset_id) = asset_id { - ensure!(!>::contains_key(&asset_id), Error::::IdAlreadyTaken); - ensure!(asset_id < Self::next_asset_id(), Error::::IdUnavailable); - asset_id - } else { - let asset_id = Self::next_asset_id(); - let next_id = asset_id - .checked_add(&One::one()) - .ok_or(Error::::NoIdAvailable)?; - >::put(next_id); - asset_id - }; - - let account_id = from_account.unwrap_or_default(); - let permissions: PermissionVersions = options.permissions.clone().into(); - - >::insert(asset_id, &options.initial_issuance); - >::insert(&asset_id, &account_id, &options.initial_issuance); - >::insert(&asset_id, permissions); - - Self::deposit_event(RawEvent::Created(asset_id, account_id, options)); - - Ok(()) - } - - /// Transfer some liquid free balance from one account to another. - /// This will not emit the `Transferred` event. - pub fn make_transfer( - asset_id: &T::AssetId, - from: &T::AccountId, - to: &T::AccountId, - amount: T::Balance - ) -> DispatchResult { - let new_balance = Self::free_balance(asset_id, from) - .checked_sub(&amount) - .ok_or(Error::::InsufficientBalance)?; - Self::ensure_can_withdraw(asset_id, from, amount, WithdrawReason::Transfer.into(), new_balance)?; - - if from != to { - >::mutate(asset_id, from, |balance| *balance -= amount); - >::mutate(asset_id, to, |balance| *balance += amount); - } - - Ok(()) - } - - /// Transfer some liquid free balance from one account to another. - /// This will emit the `Transferred` event. - pub fn make_transfer_with_event( - asset_id: &T::AssetId, - from: &T::AccountId, - to: &T::AccountId, - amount: T::Balance, - ) -> DispatchResult { - Self::make_transfer(asset_id, from, to, amount)?; - - if from != to { - Self::deposit_event(RawEvent::Transferred(*asset_id, from.clone(), to.clone(), amount)); - } - - Ok(()) - } - - /// Move `amount` from free balance to reserved balance. - /// - /// If the free balance is lower than `amount`, then no funds will be moved and an `Err` will - /// be returned. This is different behavior than `unreserve`. - pub fn reserve(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) - -> DispatchResult - { - // Do we need to consider that this is an atomic transaction? - let original_reserve_balance = Self::reserved_balance(asset_id, who); - let original_free_balance = Self::free_balance(asset_id, who); - if original_free_balance < amount { - Err(Error::::InsufficientBalance)? - } - let new_reserve_balance = original_reserve_balance + amount; - Self::set_reserved_balance(asset_id, who, new_reserve_balance); - let new_free_balance = original_free_balance - amount; - Self::set_free_balance(asset_id, who, new_free_balance); - Ok(()) - } - - /// Moves up to `amount` from reserved balance to free balance. This function cannot fail. - /// - /// As many assets up to `amount` will be moved as possible. If the reserve balance of `who` - /// is less than `amount`, then the remaining amount will be returned. - /// NOTE: This is different behavior than `reserve`. - pub fn unreserve(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> T::Balance { - let b = Self::reserved_balance(asset_id, who); - let actual = sp_std::cmp::min(b, amount); - let original_free_balance = Self::free_balance(asset_id, who); - let new_free_balance = original_free_balance + actual; - Self::set_free_balance(asset_id, who, new_free_balance); - Self::set_reserved_balance(asset_id, who, b - actual); - amount - actual - } - - /// Deduct up to `amount` from the combined balance of `who`, preferring to deduct from the - /// free balance. This function cannot fail. - /// - /// As much funds up to `amount` will be deducted as possible. If this is less than `amount` - /// then `Some(remaining)` will be returned. Full completion is given by `None`. - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - pub fn slash(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> Option { - let free_balance = Self::free_balance(asset_id, who); - let free_slash = sp_std::cmp::min(free_balance, amount); - let new_free_balance = free_balance - free_slash; - Self::set_free_balance(asset_id, who, new_free_balance); - if free_slash < amount { - Self::slash_reserved(asset_id, who, amount - free_slash) - } else { - None - } - } - - /// Deducts up to `amount` from reserved balance of `who`. This function cannot fail. - /// - /// As much funds up to `amount` will be deducted as possible. If the reserve balance of `who` - /// is less than `amount`, then a non-zero second item will be returned. - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - pub fn slash_reserved(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> Option { - let original_reserve_balance = Self::reserved_balance(asset_id, who); - let slash = sp_std::cmp::min(original_reserve_balance, amount); - let new_reserve_balance = original_reserve_balance - slash; - Self::set_reserved_balance(asset_id, who, new_reserve_balance); - if amount == slash { - None - } else { - Some(amount - slash) - } - } - - /// Move up to `amount` from reserved balance of account `who` to balance of account - /// `beneficiary`, either free or reserved depending on `status`. - /// - /// As much funds up to `amount` will be moved as possible. If this is less than `amount`, then - /// the `remaining` would be returned, else `Zero::zero()`. - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - pub fn repatriate_reserved( - asset_id: &T::AssetId, - who: &T::AccountId, - beneficiary: &T::AccountId, - amount: T::Balance, - status: BalanceStatus, - ) -> T::Balance { - let b = Self::reserved_balance(asset_id, who); - let slash = sp_std::cmp::min(b, amount); - - match status { - BalanceStatus::Free => { - let original_free_balance = Self::free_balance(asset_id, beneficiary); - let new_free_balance = original_free_balance + slash; - Self::set_free_balance(asset_id, beneficiary, new_free_balance); - } - BalanceStatus::Reserved => { - let original_reserved_balance = Self::reserved_balance(asset_id, beneficiary); - let new_reserved_balance = original_reserved_balance + slash; - Self::set_reserved_balance(asset_id, beneficiary, new_reserved_balance); - } - } - - let new_reserve_balance = b - slash; - Self::set_reserved_balance(asset_id, who, new_reserve_balance); - amount - slash - } - - /// Check permission to perform burn, mint or update. - /// - /// # Arguments - /// * `asset_id`: A `T::AssetId` type that contains the `asset_id`, which has the permission embedded. - /// * `who`: A `T::AccountId` type that contains the `account_id` for which to check permissions. - /// * `what`: The permission to check. - /// - pub fn check_permission(asset_id: &T::AssetId, who: &T::AccountId, what: &PermissionType) -> bool { - let permission_versions: PermissionVersions = Self::get_permission(asset_id); - let permission = permission_versions.into(); - - match (what, permission) { - ( - PermissionType::Burn, - PermissionLatest { - burn: Owner::Address(account), - .. - }, - ) => account == *who, - ( - PermissionType::Mint, - PermissionLatest { - mint: Owner::Address(account), - .. - }, - ) => account == *who, - ( - PermissionType::Update, - PermissionLatest { - update: Owner::Address(account), - .. - }, - ) => account == *who, - _ => false, - } - } - - /// Return `Ok` iff the account is able to make a withdrawal of the given amount - /// for the given reason. - /// - /// `Err(...)` with the reason why not otherwise. - pub fn ensure_can_withdraw( - asset_id: &T::AssetId, - who: &T::AccountId, - _amount: T::Balance, - reasons: WithdrawReasons, - new_balance: T::Balance, - ) -> DispatchResult { - if asset_id != &Self::staking_asset_id() { - return Ok(()); - } - - let locks = Self::locks(who); - if locks.is_empty() { - return Ok(()); - } - if Self::locks(who) - .into_iter().all(|l| new_balance >= l.amount || !l.reasons.intersects(reasons)) - { - Ok(()) - } else { - Err(Error::::LiquidityRestrictions)? - } - } - - // PRIVATE MUTABLES - - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn set_reserved_balance(asset_id: &T::AssetId, who: &T::AccountId, balance: T::Balance) { - >::insert(asset_id, who, &balance); - } - - /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that - /// the caller will do this. - fn set_free_balance(asset_id: &T::AssetId, who: &T::AccountId, balance: T::Balance) { - >::insert(asset_id, who, &balance); - } - - fn set_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - let mut new_lock = Some(BalanceLock { - id, - amount, - reasons, - }); - let mut locks = >::locks(who) - .into_iter() - .filter_map(|l| { - if l.id == id { - new_lock.take() - } else { - Some(l) - } - }) - .collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - >::insert(who, locks); - } - - fn extend_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - let mut new_lock = Some(BalanceLock { - id, - amount, - reasons, - }); - let mut locks = >::locks(who) - .into_iter() - .filter_map(|l| { - if l.id == id { - new_lock.take().map(|nl| BalanceLock { - id: l.id, - amount: l.amount.max(nl.amount), - reasons: l.reasons | nl.reasons, - }) - } else { - Some(l) - } - }) - .collect::>(); - if let Some(lock) = new_lock { - locks.push(lock) - } - >::insert(who, locks); - } - - fn remove_lock(id: LockIdentifier, who: &T::AccountId) { - let mut locks = >::locks(who); - locks.retain(|l| l.id != id); - >::insert(who, locks); - } + // PUBLIC IMMUTABLES + + /// Get an account's total balance of an asset kind. + pub fn total_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { + Self::free_balance(asset_id, who) + Self::reserved_balance(asset_id, who) + } + + /// Get an account's free balance of an asset kind. + pub fn free_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { + >::get(asset_id, who) + } + + /// Get an account's reserved balance of an asset kind. + pub fn reserved_balance(asset_id: &T::AssetId, who: &T::AccountId) -> T::Balance { + >::get(asset_id, who) + } + + /// Mint to an account's free balance, without event + pub fn mint_free( + asset_id: &T::AssetId, + who: &T::AccountId, + to: &T::AccountId, + amount: &T::Balance, + ) -> DispatchResult { + if Self::check_permission(asset_id, who, &PermissionType::Mint) { + let original_free_balance = Self::free_balance(&asset_id, &to); + let current_total_issuance = >::get(asset_id); + let new_total_issuance = current_total_issuance + .checked_add(&amount) + .ok_or(Error::::TotalMintingOverflow)?; + let value = original_free_balance + .checked_add(&amount) + .ok_or(Error::::FreeMintingOverflow)?; + + >::insert(asset_id, new_total_issuance); + Self::set_free_balance(&asset_id, &to, value); + Ok(()) + } else { + Err(Error::::NoMintPermission)? + } + } + + /// Burn an account's free balance, without event + pub fn burn_free( + asset_id: &T::AssetId, + who: &T::AccountId, + to: &T::AccountId, + amount: &T::Balance, + ) -> DispatchResult { + if Self::check_permission(asset_id, who, &PermissionType::Burn) { + let original_free_balance = Self::free_balance(asset_id, to); + + let current_total_issuance = >::get(asset_id); + let new_total_issuance = current_total_issuance + .checked_sub(amount) + .ok_or(Error::::TotalBurningUnderflow)?; + let value = original_free_balance + .checked_sub(amount) + .ok_or(Error::::FreeBurningUnderflow)?; + + >::insert(asset_id, new_total_issuance); + Self::set_free_balance(asset_id, to, value); + Ok(()) + } else { + Err(Error::::NoBurnPermission)? + } + } + + /// Creates an asset. + /// + /// # Arguments + /// * `asset_id`: An ID of a reserved asset. + /// If not provided, a user-generated asset will be created with the next available ID. + /// * `from_account`: The initiator account of this call + /// * `asset_options`: Asset creation options. + /// + pub fn create_asset( + asset_id: Option, + from_account: Option, + options: AssetOptions, + ) -> DispatchResult { + let asset_id = if let Some(asset_id) = asset_id { + ensure!( + !>::contains_key(&asset_id), + Error::::IdAlreadyTaken + ); + ensure!(asset_id < Self::next_asset_id(), Error::::IdUnavailable); + asset_id + } else { + let asset_id = Self::next_asset_id(); + let next_id = asset_id + .checked_add(&One::one()) + .ok_or(Error::::NoIdAvailable)?; + >::put(next_id); + asset_id + }; + + let account_id = from_account.unwrap_or_default(); + let permissions: PermissionVersions = options.permissions.clone().into(); + + >::insert(asset_id, &options.initial_issuance); + >::insert(&asset_id, &account_id, &options.initial_issuance); + >::insert(&asset_id, permissions); + + Self::deposit_event(RawEvent::Created(asset_id, account_id, options)); + + Ok(()) + } + + /// Transfer some liquid free balance from one account to another. + /// This will not emit the `Transferred` event. + pub fn make_transfer( + asset_id: &T::AssetId, + from: &T::AccountId, + to: &T::AccountId, + amount: T::Balance, + ) -> DispatchResult { + let new_balance = Self::free_balance(asset_id, from) + .checked_sub(&amount) + .ok_or(Error::::InsufficientBalance)?; + Self::ensure_can_withdraw( + asset_id, + from, + amount, + WithdrawReason::Transfer.into(), + new_balance, + )?; + + if from != to { + >::mutate(asset_id, from, |balance| *balance -= amount); + >::mutate(asset_id, to, |balance| *balance += amount); + } + + Ok(()) + } + + /// Transfer some liquid free balance from one account to another. + /// This will emit the `Transferred` event. + pub fn make_transfer_with_event( + asset_id: &T::AssetId, + from: &T::AccountId, + to: &T::AccountId, + amount: T::Balance, + ) -> DispatchResult { + Self::make_transfer(asset_id, from, to, amount)?; + + if from != to { + Self::deposit_event(RawEvent::Transferred( + *asset_id, + from.clone(), + to.clone(), + amount, + )); + } + + Ok(()) + } + + /// Move `amount` from free balance to reserved balance. + /// + /// If the free balance is lower than `amount`, then no funds will be moved and an `Err` will + /// be returned. This is different behavior than `unreserve`. + pub fn reserve( + asset_id: &T::AssetId, + who: &T::AccountId, + amount: T::Balance, + ) -> DispatchResult { + // Do we need to consider that this is an atomic transaction? + let original_reserve_balance = Self::reserved_balance(asset_id, who); + let original_free_balance = Self::free_balance(asset_id, who); + if original_free_balance < amount { + Err(Error::::InsufficientBalance)? + } + let new_reserve_balance = original_reserve_balance + amount; + Self::set_reserved_balance(asset_id, who, new_reserve_balance); + let new_free_balance = original_free_balance - amount; + Self::set_free_balance(asset_id, who, new_free_balance); + Ok(()) + } + + /// Moves up to `amount` from reserved balance to free balance. This function cannot fail. + /// + /// As many assets up to `amount` will be moved as possible. If the reserve balance of `who` + /// is less than `amount`, then the remaining amount will be returned. + /// NOTE: This is different behavior than `reserve`. + pub fn unreserve(asset_id: &T::AssetId, who: &T::AccountId, amount: T::Balance) -> T::Balance { + let b = Self::reserved_balance(asset_id, who); + let actual = sp_std::cmp::min(b, amount); + let original_free_balance = Self::free_balance(asset_id, who); + let new_free_balance = original_free_balance + actual; + Self::set_free_balance(asset_id, who, new_free_balance); + Self::set_reserved_balance(asset_id, who, b - actual); + amount - actual + } + + /// Deduct up to `amount` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// As much funds up to `amount` will be deducted as possible. If this is less than `amount` + /// then `Some(remaining)` will be returned. Full completion is given by `None`. + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + pub fn slash( + asset_id: &T::AssetId, + who: &T::AccountId, + amount: T::Balance, + ) -> Option { + let free_balance = Self::free_balance(asset_id, who); + let free_slash = sp_std::cmp::min(free_balance, amount); + let new_free_balance = free_balance - free_slash; + Self::set_free_balance(asset_id, who, new_free_balance); + if free_slash < amount { + Self::slash_reserved(asset_id, who, amount - free_slash) + } else { + None + } + } + + /// Deducts up to `amount` from reserved balance of `who`. This function cannot fail. + /// + /// As much funds up to `amount` will be deducted as possible. If the reserve balance of `who` + /// is less than `amount`, then a non-zero second item will be returned. + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + pub fn slash_reserved( + asset_id: &T::AssetId, + who: &T::AccountId, + amount: T::Balance, + ) -> Option { + let original_reserve_balance = Self::reserved_balance(asset_id, who); + let slash = sp_std::cmp::min(original_reserve_balance, amount); + let new_reserve_balance = original_reserve_balance - slash; + Self::set_reserved_balance(asset_id, who, new_reserve_balance); + if amount == slash { + None + } else { + Some(amount - slash) + } + } + + /// Move up to `amount` from reserved balance of account `who` to balance of account + /// `beneficiary`, either free or reserved depending on `status`. + /// + /// As much funds up to `amount` will be moved as possible. If this is less than `amount`, then + /// the `remaining` would be returned, else `Zero::zero()`. + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + pub fn repatriate_reserved( + asset_id: &T::AssetId, + who: &T::AccountId, + beneficiary: &T::AccountId, + amount: T::Balance, + status: BalanceStatus, + ) -> T::Balance { + let b = Self::reserved_balance(asset_id, who); + let slash = sp_std::cmp::min(b, amount); + + match status { + BalanceStatus::Free => { + let original_free_balance = Self::free_balance(asset_id, beneficiary); + let new_free_balance = original_free_balance + slash; + Self::set_free_balance(asset_id, beneficiary, new_free_balance); + } + BalanceStatus::Reserved => { + let original_reserved_balance = Self::reserved_balance(asset_id, beneficiary); + let new_reserved_balance = original_reserved_balance + slash; + Self::set_reserved_balance(asset_id, beneficiary, new_reserved_balance); + } + } + + let new_reserve_balance = b - slash; + Self::set_reserved_balance(asset_id, who, new_reserve_balance); + amount - slash + } + + /// Check permission to perform burn, mint or update. + /// + /// # Arguments + /// * `asset_id`: A `T::AssetId` type that contains the `asset_id`, which has the permission embedded. + /// * `who`: A `T::AccountId` type that contains the `account_id` for which to check permissions. + /// * `what`: The permission to check. + /// + pub fn check_permission( + asset_id: &T::AssetId, + who: &T::AccountId, + what: &PermissionType, + ) -> bool { + let permission_versions: PermissionVersions = Self::get_permission(asset_id); + let permission = permission_versions.into(); + + match (what, permission) { + ( + PermissionType::Burn, + PermissionLatest { + burn: Owner::Address(account), + .. + }, + ) => account == *who, + ( + PermissionType::Mint, + PermissionLatest { + mint: Owner::Address(account), + .. + }, + ) => account == *who, + ( + PermissionType::Update, + PermissionLatest { + update: Owner::Address(account), + .. + }, + ) => account == *who, + _ => false, + } + } + + /// Return `Ok` iff the account is able to make a withdrawal of the given amount + /// for the given reason. + /// + /// `Err(...)` with the reason why not otherwise. + pub fn ensure_can_withdraw( + asset_id: &T::AssetId, + who: &T::AccountId, + _amount: T::Balance, + reasons: WithdrawReasons, + new_balance: T::Balance, + ) -> DispatchResult { + if asset_id != &Self::staking_asset_id() { + return Ok(()); + } + + let locks = Self::locks(who); + if locks.is_empty() { + return Ok(()); + } + if Self::locks(who) + .into_iter() + .all(|l| new_balance >= l.amount || !l.reasons.intersects(reasons)) + { + Ok(()) + } else { + Err(Error::::LiquidityRestrictions)? + } + } + + // PRIVATE MUTABLES + + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + fn set_reserved_balance(asset_id: &T::AssetId, who: &T::AccountId, balance: T::Balance) { + >::insert(asset_id, who, &balance); + } + + /// NOTE: LOW-LEVEL: This will not attempt to maintain total issuance. It is expected that + /// the caller will do this. + fn set_free_balance(asset_id: &T::AssetId, who: &T::AccountId, balance: T::Balance) { + >::insert(asset_id, who, &balance); + } + + fn set_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + ) { + let mut new_lock = Some(BalanceLock { + id, + amount, + reasons, + }); + let mut locks = >::locks(who) + .into_iter() + .filter_map(|l| if l.id == id { new_lock.take() } else { Some(l) }) + .collect::>(); + if let Some(lock) = new_lock { + locks.push(lock) + } + >::insert(who, locks); + } + + fn extend_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + ) { + let mut new_lock = Some(BalanceLock { + id, + amount, + reasons, + }); + let mut locks = >::locks(who) + .into_iter() + .filter_map(|l| { + if l.id == id { + new_lock.take().map(|nl| BalanceLock { + id: l.id, + amount: l.amount.max(nl.amount), + reasons: l.reasons | nl.reasons, + }) + } else { + Some(l) + } + }) + .collect::>(); + if let Some(lock) = new_lock { + locks.push(lock) + } + >::insert(who, locks); + } + + fn remove_lock(id: LockIdentifier, who: &T::AccountId) { + let mut locks = >::locks(who); + locks.retain(|l| l.id != id); + >::insert(who, locks); + } } pub trait AssetIdProvider { - type AssetId; - fn asset_id() -> Self::AssetId; + type AssetId; + fn asset_id() -> Self::AssetId; } // wrapping these imbalances in a private module is necessary to ensure absolute privacy // of the inner member. mod imbalances { - use super::{ - result, AssetIdProvider, Imbalance, Saturating, StorageMap, Subtrait, Zero, TryDrop - }; - use sp_std::mem; - - /// Opaque, move-only struct with private fields that serves as a token denoting that - /// funds have been created without any equal and opposite accounting. - #[must_use] - pub struct PositiveImbalance>( - T::Balance, - sp_std::marker::PhantomData, - ); - impl PositiveImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - pub fn new(amount: T::Balance) -> Self { - PositiveImbalance(amount, Default::default()) - } - } - - /// Opaque, move-only struct with private fields that serves as a token denoting that - /// funds have been destroyed without any equal and opposite accounting. - #[must_use] - pub struct NegativeImbalance>( - T::Balance, - sp_std::marker::PhantomData, - ); - impl NegativeImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - pub fn new(amount: T::Balance) -> Self { - NegativeImbalance(amount, Default::default()) - } - } - - impl TryDrop for PositiveImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - fn try_drop(self) -> result::Result<(), Self> { - self.drop_zero() - } - } - - impl Imbalance for PositiveImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - type Opposite = NegativeImbalance; - - fn zero() -> Self { - Self::new(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } - } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - - mem::forget(self); - (Self::new(first), Self::new(second)) - } - fn merge(mut self, other: Self) -> Self { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - - self - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - } - fn offset(self, other: Self::Opposite) -> result::Result { - let (a, b) = (self.0, other.0); - mem::forget((self, other)); - - if a >= b { - Ok(Self::new(a - b)) - } else { - Err(NegativeImbalance::new(b - a)) - } - } - fn peek(&self) -> T::Balance { - self.0.clone() - } - } - - impl TryDrop for NegativeImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - fn try_drop(self) -> result::Result<(), Self> { - self.drop_zero() - } - } - - impl Imbalance for NegativeImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - type Opposite = PositiveImbalance; - - fn zero() -> Self { - Self::new(Zero::zero()) - } - fn drop_zero(self) -> result::Result<(), Self> { - if self.0.is_zero() { - Ok(()) - } else { - Err(self) - } - } - fn split(self, amount: T::Balance) -> (Self, Self) { - let first = self.0.min(amount); - let second = self.0 - first; - - mem::forget(self); - (Self::new(first), Self::new(second)) - } - fn merge(mut self, other: Self) -> Self { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - - self - } - fn subsume(&mut self, other: Self) { - self.0 = self.0.saturating_add(other.0); - mem::forget(other); - } - fn offset(self, other: Self::Opposite) -> result::Result { - let (a, b) = (self.0, other.0); - mem::forget((self, other)); - - if a >= b { - Ok(Self::new(a - b)) - } else { - Err(PositiveImbalance::new(b - a)) - } - } - fn peek(&self) -> T::Balance { - self.0.clone() - } - } - - impl Drop for PositiveImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - >>::mutate(&U::asset_id(), |v| *v = v.saturating_add(self.0)); - } - } - - impl Drop for NegativeImbalance - where - T: Subtrait, - U: AssetIdProvider, - { - /// Basic drop handler will just square up the total issuance. - fn drop(&mut self) { - >>::mutate(&U::asset_id(), |v| *v = v.saturating_sub(self.0)); - } - } + use super::{ + result, AssetIdProvider, Imbalance, Saturating, StorageMap, Subtrait, TryDrop, Zero, + }; + use sp_std::mem; + + /// Opaque, move-only struct with private fields that serves as a token denoting that + /// funds have been created without any equal and opposite accounting. + #[must_use] + pub struct PositiveImbalance>( + T::Balance, + sp_std::marker::PhantomData, + ); + impl PositiveImbalance + where + T: Subtrait, + U: AssetIdProvider, + { + pub fn new(amount: T::Balance) -> Self { + PositiveImbalance(amount, Default::default()) + } + } + + /// Opaque, move-only struct with private fields that serves as a token denoting that + /// funds have been destroyed without any equal and opposite accounting. + #[must_use] + pub struct NegativeImbalance>( + T::Balance, + sp_std::marker::PhantomData, + ); + impl NegativeImbalance + where + T: Subtrait, + U: AssetIdProvider, + { + pub fn new(amount: T::Balance) -> Self { + NegativeImbalance(amount, Default::default()) + } + } + + impl TryDrop for PositiveImbalance + where + T: Subtrait, + U: AssetIdProvider, + { + fn try_drop(self) -> result::Result<(), Self> { + self.drop_zero() + } + } + + impl Imbalance for PositiveImbalance + where + T: Subtrait, + U: AssetIdProvider, + { + type Opposite = NegativeImbalance; + + fn zero() -> Self { + Self::new(Zero::zero()) + } + fn drop_zero(self) -> result::Result<(), Self> { + if self.0.is_zero() { + Ok(()) + } else { + Err(self) + } + } + fn split(self, amount: T::Balance) -> (Self, Self) { + let first = self.0.min(amount); + let second = self.0 - first; + + mem::forget(self); + (Self::new(first), Self::new(second)) + } + fn merge(mut self, other: Self) -> Self { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + + self + } + fn subsume(&mut self, other: Self) { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + } + fn offset(self, other: Self::Opposite) -> result::Result { + let (a, b) = (self.0, other.0); + mem::forget((self, other)); + + if a >= b { + Ok(Self::new(a - b)) + } else { + Err(NegativeImbalance::new(b - a)) + } + } + fn peek(&self) -> T::Balance { + self.0.clone() + } + } + + impl TryDrop for NegativeImbalance + where + T: Subtrait, + U: AssetIdProvider, + { + fn try_drop(self) -> result::Result<(), Self> { + self.drop_zero() + } + } + + impl Imbalance for NegativeImbalance + where + T: Subtrait, + U: AssetIdProvider, + { + type Opposite = PositiveImbalance; + + fn zero() -> Self { + Self::new(Zero::zero()) + } + fn drop_zero(self) -> result::Result<(), Self> { + if self.0.is_zero() { + Ok(()) + } else { + Err(self) + } + } + fn split(self, amount: T::Balance) -> (Self, Self) { + let first = self.0.min(amount); + let second = self.0 - first; + + mem::forget(self); + (Self::new(first), Self::new(second)) + } + fn merge(mut self, other: Self) -> Self { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + + self + } + fn subsume(&mut self, other: Self) { + self.0 = self.0.saturating_add(other.0); + mem::forget(other); + } + fn offset(self, other: Self::Opposite) -> result::Result { + let (a, b) = (self.0, other.0); + mem::forget((self, other)); + + if a >= b { + Ok(Self::new(a - b)) + } else { + Err(PositiveImbalance::new(b - a)) + } + } + fn peek(&self) -> T::Balance { + self.0.clone() + } + } + + impl Drop for PositiveImbalance + where + T: Subtrait, + U: AssetIdProvider, + { + /// Basic drop handler will just square up the total issuance. + fn drop(&mut self) { + >>::mutate(&U::asset_id(), |v| { + *v = v.saturating_add(self.0) + }); + } + } + + impl Drop for NegativeImbalance + where + T: Subtrait, + U: AssetIdProvider, + { + /// Basic drop handler will just square up the total issuance. + fn drop(&mut self) { + >>::mutate(&U::asset_id(), |v| { + *v = v.saturating_sub(self.0) + }); + } + } } // TODO: #2052 @@ -1102,262 +1131,279 @@ mod imbalances { // are placed in their own pallet. struct ElevatedTrait(T); impl Clone for ElevatedTrait { - fn clone(&self) -> Self { - unimplemented!() - } + fn clone(&self) -> Self { + unimplemented!() + } } impl PartialEq for ElevatedTrait { - fn eq(&self, _: &Self) -> bool { - unimplemented!() - } + fn eq(&self, _: &Self) -> bool { + unimplemented!() + } } impl Eq for ElevatedTrait {} impl frame_system::Trait for ElevatedTrait { - type Origin = T::Origin; - type Call = T::Call; - type Index = T::Index; - type BlockNumber = T::BlockNumber; - type Hash = T::Hash; - type Hashing = T::Hashing; - type AccountId = T::AccountId; - type Lookup = T::Lookup; - type Header = T::Header; - type Event = (); - type BlockHashCount = T::BlockHashCount; - type MaximumBlockWeight = T::MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = T::MaximumBlockLength; - type AvailableBlockRatio = T::AvailableBlockRatio; - type Version = T::Version; - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = T::Origin; + type Call = T::Call; + type Index = T::Index; + type BlockNumber = T::BlockNumber; + type Hash = T::Hash; + type Hashing = T::Hashing; + type AccountId = T::AccountId; + type Lookup = T::Lookup; + type Header = T::Header; + type Event = (); + type BlockHashCount = T::BlockHashCount; + type MaximumBlockWeight = T::MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = T::MaximumBlockLength; + type AvailableBlockRatio = T::AvailableBlockRatio; + type Version = T::Version; + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl Trait for ElevatedTrait { - type Balance = T::Balance; - type AssetId = T::AssetId; - type Event = (); + type Balance = T::Balance; + type AssetId = T::AssetId; + type Event = (); } #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] -pub struct AssetCurrency(sp_std::marker::PhantomData, sp_std::marker::PhantomData); +pub struct AssetCurrency( + sp_std::marker::PhantomData, + sp_std::marker::PhantomData, +); impl Currency for AssetCurrency where - T: Trait, - U: AssetIdProvider, + T: Trait, + U: AssetIdProvider, { - type Balance = T::Balance; - type PositiveImbalance = PositiveImbalance; - type NegativeImbalance = NegativeImbalance; - - fn total_balance(who: &T::AccountId) -> Self::Balance { - Self::free_balance(&who) + Self::reserved_balance(&who) - } - - fn free_balance(who: &T::AccountId) -> Self::Balance { - >::free_balance(&U::asset_id(), &who) - } - - /// Returns the total staking asset issuance - fn total_issuance() -> Self::Balance { - >::total_issuance(U::asset_id()) - } - - fn minimum_balance() -> Self::Balance { - Zero::zero() - } - - fn transfer( - transactor: &T::AccountId, - dest: &T::AccountId, - value: Self::Balance, - _: ExistenceRequirement, // no existential deposit policy for generic asset - ) -> DispatchResult { - >::make_transfer(&U::asset_id(), transactor, dest, value) - } - - fn ensure_can_withdraw( - who: &T::AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - new_balance: Self::Balance, - ) -> DispatchResult { - >::ensure_can_withdraw(&U::asset_id(), who, amount, reasons, new_balance) - } - - fn withdraw( - who: &T::AccountId, - value: Self::Balance, - reasons: WithdrawReasons, - _: ExistenceRequirement, // no existential deposit policy for generic asset - ) -> result::Result { - let new_balance = Self::free_balance(who) - .checked_sub(&value) - .ok_or(Error::::InsufficientBalance)?; - Self::ensure_can_withdraw(who, value, reasons, new_balance)?; - >::set_free_balance(&U::asset_id(), who, new_balance); - Ok(NegativeImbalance::new(value)) - } - - fn deposit_into_existing( - who: &T::AccountId, - value: Self::Balance, - ) -> result::Result { - // No existential deposit rule and creation fee in GA. `deposit_into_existing` is same with `deposit_creating`. - Ok(Self::deposit_creating(who, value)) - } - - fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { - let imbalance = Self::make_free_balance_be(who, Self::free_balance(who) + value); - if let SignedImbalance::Positive(p) = imbalance { - p - } else { - // Impossible, but be defensive. - Self::PositiveImbalance::zero() - } - } - - fn make_free_balance_be( - who: &T::AccountId, - balance: Self::Balance, - ) -> SignedImbalance { - let original = >::free_balance(&U::asset_id(), who); - let imbalance = if original <= balance { - SignedImbalance::Positive(PositiveImbalance::new(balance - original)) - } else { - SignedImbalance::Negative(NegativeImbalance::new(original - balance)) - }; - >::set_free_balance(&U::asset_id(), who, balance); - imbalance - } - - fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { - >::free_balance(&U::asset_id(), &who) >= value - } - - fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { - let remaining = >::slash(&U::asset_id(), who, value); - if let Some(r) = remaining { - (NegativeImbalance::new(value - r), r) - } else { - (NegativeImbalance::new(value), Zero::zero()) - } - } - - fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { - >::mutate(&U::asset_id(), |issued| - issued.checked_sub(&amount).unwrap_or_else(|| { - amount = *issued; - Zero::zero() - }) - ); - PositiveImbalance::new(amount) - } - - fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { - >::mutate(&U::asset_id(), |issued| - *issued = issued.checked_add(&amount).unwrap_or_else(|| { - amount = Self::Balance::max_value() - *issued; - Self::Balance::max_value() - }) - ); - NegativeImbalance::new(amount) - } + type Balance = T::Balance; + type PositiveImbalance = PositiveImbalance; + type NegativeImbalance = NegativeImbalance; + + fn total_balance(who: &T::AccountId) -> Self::Balance { + Self::free_balance(&who) + Self::reserved_balance(&who) + } + + fn free_balance(who: &T::AccountId) -> Self::Balance { + >::free_balance(&U::asset_id(), &who) + } + + /// Returns the total staking asset issuance + fn total_issuance() -> Self::Balance { + >::total_issuance(U::asset_id()) + } + + fn minimum_balance() -> Self::Balance { + Zero::zero() + } + + fn transfer( + transactor: &T::AccountId, + dest: &T::AccountId, + value: Self::Balance, + _: ExistenceRequirement, // no existential deposit policy for generic asset + ) -> DispatchResult { + >::make_transfer(&U::asset_id(), transactor, dest, value) + } + + fn ensure_can_withdraw( + who: &T::AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + new_balance: Self::Balance, + ) -> DispatchResult { + >::ensure_can_withdraw(&U::asset_id(), who, amount, reasons, new_balance) + } + + fn withdraw( + who: &T::AccountId, + value: Self::Balance, + reasons: WithdrawReasons, + _: ExistenceRequirement, // no existential deposit policy for generic asset + ) -> result::Result { + let new_balance = Self::free_balance(who) + .checked_sub(&value) + .ok_or(Error::::InsufficientBalance)?; + Self::ensure_can_withdraw(who, value, reasons, new_balance)?; + >::set_free_balance(&U::asset_id(), who, new_balance); + Ok(NegativeImbalance::new(value)) + } + + fn deposit_into_existing( + who: &T::AccountId, + value: Self::Balance, + ) -> result::Result { + // No existential deposit rule and creation fee in GA. `deposit_into_existing` is same with `deposit_creating`. + Ok(Self::deposit_creating(who, value)) + } + + fn deposit_creating(who: &T::AccountId, value: Self::Balance) -> Self::PositiveImbalance { + let imbalance = Self::make_free_balance_be(who, Self::free_balance(who) + value); + if let SignedImbalance::Positive(p) = imbalance { + p + } else { + // Impossible, but be defensive. + Self::PositiveImbalance::zero() + } + } + + fn make_free_balance_be( + who: &T::AccountId, + balance: Self::Balance, + ) -> SignedImbalance { + let original = >::free_balance(&U::asset_id(), who); + let imbalance = if original <= balance { + SignedImbalance::Positive(PositiveImbalance::new(balance - original)) + } else { + SignedImbalance::Negative(NegativeImbalance::new(original - balance)) + }; + >::set_free_balance(&U::asset_id(), who, balance); + imbalance + } + + fn can_slash(who: &T::AccountId, value: Self::Balance) -> bool { + >::free_balance(&U::asset_id(), &who) >= value + } + + fn slash(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { + let remaining = >::slash(&U::asset_id(), who, value); + if let Some(r) = remaining { + (NegativeImbalance::new(value - r), r) + } else { + (NegativeImbalance::new(value), Zero::zero()) + } + } + + fn burn(mut amount: Self::Balance) -> Self::PositiveImbalance { + >::mutate(&U::asset_id(), |issued| { + issued.checked_sub(&amount).unwrap_or_else(|| { + amount = *issued; + Zero::zero() + }) + }); + PositiveImbalance::new(amount) + } + + fn issue(mut amount: Self::Balance) -> Self::NegativeImbalance { + >::mutate(&U::asset_id(), |issued| { + *issued = issued.checked_add(&amount).unwrap_or_else(|| { + amount = Self::Balance::max_value() - *issued; + Self::Balance::max_value() + }) + }); + NegativeImbalance::new(amount) + } } impl ReservableCurrency for AssetCurrency where - T: Trait, - U: AssetIdProvider, + T: Trait, + U: AssetIdProvider, { - fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { - Self::free_balance(who) - .checked_sub(&value) - .map_or(false, |new_balance| - >::ensure_can_withdraw( - &U::asset_id(), who, value, WithdrawReason::Reserve.into(), new_balance - ).is_ok() - ) - } - - fn reserved_balance(who: &T::AccountId) -> Self::Balance { - >::reserved_balance(&U::asset_id(), &who) - } - - fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { - >::reserve(&U::asset_id(), who, value) - } - - fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { - >::unreserve(&U::asset_id(), who, value) - } - - fn slash_reserved(who: &T::AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance) { - let b = Self::reserved_balance(&who.clone()); - let slash = cmp::min(b, value); - - >::set_reserved_balance(&U::asset_id(), who, b - slash); - (NegativeImbalance::new(slash), value - slash) - } - - fn repatriate_reserved( - slashed: &T::AccountId, - beneficiary: &T::AccountId, - value: Self::Balance, - status: BalanceStatus, - ) -> result::Result { - Ok(>::repatriate_reserved(&U::asset_id(), slashed, beneficiary, value, status)) - } + fn can_reserve(who: &T::AccountId, value: Self::Balance) -> bool { + Self::free_balance(who) + .checked_sub(&value) + .map_or(false, |new_balance| { + >::ensure_can_withdraw( + &U::asset_id(), + who, + value, + WithdrawReason::Reserve.into(), + new_balance, + ) + .is_ok() + }) + } + + fn reserved_balance(who: &T::AccountId) -> Self::Balance { + >::reserved_balance(&U::asset_id(), &who) + } + + fn reserve(who: &T::AccountId, value: Self::Balance) -> DispatchResult { + >::reserve(&U::asset_id(), who, value) + } + + fn unreserve(who: &T::AccountId, value: Self::Balance) -> Self::Balance { + >::unreserve(&U::asset_id(), who, value) + } + + fn slash_reserved( + who: &T::AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance) { + let b = Self::reserved_balance(&who.clone()); + let slash = cmp::min(b, value); + + >::set_reserved_balance(&U::asset_id(), who, b - slash); + (NegativeImbalance::new(slash), value - slash) + } + + fn repatriate_reserved( + slashed: &T::AccountId, + beneficiary: &T::AccountId, + value: Self::Balance, + status: BalanceStatus, + ) -> result::Result { + Ok(>::repatriate_reserved( + &U::asset_id(), + slashed, + beneficiary, + value, + status, + )) + } } pub struct StakingAssetIdProvider(sp_std::marker::PhantomData); impl AssetIdProvider for StakingAssetIdProvider { - type AssetId = T::AssetId; - fn asset_id() -> Self::AssetId { - >::staking_asset_id() - } + type AssetId = T::AssetId; + fn asset_id() -> Self::AssetId { + >::staking_asset_id() + } } pub struct SpendingAssetIdProvider(sp_std::marker::PhantomData); impl AssetIdProvider for SpendingAssetIdProvider { - type AssetId = T::AssetId; - fn asset_id() -> Self::AssetId { - >::spending_asset_id() - } + type AssetId = T::AssetId; + fn asset_id() -> Self::AssetId { + >::spending_asset_id() + } } impl LockableCurrency for AssetCurrency> where - T: Trait, - T::Balance: MaybeSerializeDeserialize + Debug, + T: Trait, + T::Balance: MaybeSerializeDeserialize + Debug, { - type Moment = T::BlockNumber; - - fn set_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - >::set_lock(id, who, amount, reasons) - } - - fn extend_lock( - id: LockIdentifier, - who: &T::AccountId, - amount: T::Balance, - reasons: WithdrawReasons, - ) { - >::extend_lock(id, who, amount, reasons) - } - - fn remove_lock(id: LockIdentifier, who: &T::AccountId) { - >::remove_lock(id, who) - } + type Moment = T::BlockNumber; + + fn set_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + ) { + >::set_lock(id, who, amount, reasons) + } + + fn extend_lock( + id: LockIdentifier, + who: &T::AccountId, + amount: T::Balance, + reasons: WithdrawReasons, + ) { + >::extend_lock(id, who, amount, reasons) + } + + fn remove_lock(id: LockIdentifier, who: &T::AccountId) { + >::remove_lock(id, who) + } } pub type StakingAssetCurrency = AssetCurrency>; diff --git a/frame/generic-asset/src/mock.rs b/frame/generic-asset/src/mock.rs index 2cd779da03..874fb07bf1 100644 --- a/frame/generic-asset/src/mock.rs +++ b/frame/generic-asset/src/mock.rs @@ -20,18 +20,18 @@ #![cfg(test)] +use frame_support::{impl_outer_event, impl_outer_origin, parameter_types, weights::Weight}; +use sp_core::H256; use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; -use sp_core::H256; -use frame_support::{parameter_types, impl_outer_event, impl_outer_origin, weights::Weight}; use super::*; impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } // For testing the pallet, we construct most of a mock runtime. This means @@ -40,50 +40,50 @@ impl_outer_origin! { #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = TestEvent; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type BlockHashCount = BlockHashCount; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = TestEvent; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type BlockHashCount = BlockHashCount; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl Trait for Test { - type Balance = u64; - type AssetId = u32; - type Event = TestEvent; + type Balance = u64; + type AssetId = u32; + type Event = TestEvent; } mod generic_asset { - pub use crate::Event; + pub use crate::Event; } use frame_system as system; impl_outer_event! { - pub enum TestEvent for Test { - system, - generic_asset, - } + pub enum TestEvent for Test { + system, + generic_asset, + } } pub type GenericAsset = Module; @@ -91,62 +91,66 @@ pub type GenericAsset = Module; pub type System = frame_system::Module; pub struct ExtBuilder { - asset_id: u32, - next_asset_id: u32, - accounts: Vec, - initial_balance: u64, + asset_id: u32, + next_asset_id: u32, + accounts: Vec, + initial_balance: u64, } // Returns default values for genesis config impl Default for ExtBuilder { - fn default() -> Self { - Self { - asset_id: 0, - next_asset_id: 1000, - accounts: vec![0], - initial_balance: 0, - } - } + fn default() -> Self { + Self { + asset_id: 0, + next_asset_id: 1000, + accounts: vec![0], + initial_balance: 0, + } + } } impl ExtBuilder { - // Sets free balance to genesis config - pub fn free_balance(mut self, free_balance: (u32, u64, u64)) -> Self { - self.asset_id = free_balance.0; - self.accounts = vec![free_balance.1]; - self.initial_balance = free_balance.2; - self - } - - pub fn next_asset_id(mut self, asset_id: u32) -> Self { - self.next_asset_id = asset_id; - self - } - - // builds genesis config - pub fn build(self) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - - GenesisConfig:: { - assets: vec![self.asset_id], - endowed_accounts: self.accounts, - initial_balance: self.initial_balance, - next_asset_id: self.next_asset_id, - staking_asset_id: 16000, - spending_asset_id: 16001, - }.assimilate_storage(&mut t).unwrap(); - - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext - } + // Sets free balance to genesis config + pub fn free_balance(mut self, free_balance: (u32, u64, u64)) -> Self { + self.asset_id = free_balance.0; + self.accounts = vec![free_balance.1]; + self.initial_balance = free_balance.2; + self + } + + pub fn next_asset_id(mut self, asset_id: u32) -> Self { + self.next_asset_id = asset_id; + self + } + + // builds genesis config + pub fn build(self) -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + + GenesisConfig:: { + assets: vec![self.asset_id], + endowed_accounts: self.accounts, + initial_balance: self.initial_balance, + next_asset_id: self.next_asset_id, + staking_asset_id: 16000, + spending_asset_id: 16001, + } + .assimilate_storage(&mut t) + .unwrap(); + + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } } // This function basically just builds a genesis storage key/value store according to // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { - frame_system::GenesisConfig::default() - .build_storage::() - .unwrap() - .into() + frame_system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into() } diff --git a/frame/generic-asset/src/tests.rs b/frame/generic-asset/src/tests.rs index d5c0a877df..5fb7843c08 100644 --- a/frame/generic-asset/src/tests.rs +++ b/frame/generic-asset/src/tests.rs @@ -26,79 +26,88 @@ use frame_support::{assert_noop, assert_ok}; #[test] fn issuing_asset_units_to_issuer_should_work() { - let balance = 100; - - ExtBuilder::default().free_balance((16000, 1, 100)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - - let expected_balance = balance; - - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: balance, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&16000, &1), expected_balance); - }); + let balance = 100; + + ExtBuilder::default() + .free_balance((16000, 1, 100)) + .build() + .execute_with(|| { + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + + let expected_balance = balance; + + assert_ok!(GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: balance, + permissions: default_permission + } + )); + assert_eq!(GenericAsset::free_balance(&16000, &1), expected_balance); + }); } #[test] fn issuing_with_next_asset_id_overflow_should_not_work() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - NextAssetId::::put(u32::max_value()); - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_noop!( - GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 1, - permissions: default_permission - } - ), - Error::::NoIdAvailable - ); - assert_eq!(GenericAsset::next_asset_id(), u32::max_value()); - }); + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + NextAssetId::::put(u32::max_value()); + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + assert_noop!( + GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: 1, + permissions: default_permission + } + ), + Error::::NoIdAvailable + ); + assert_eq!(GenericAsset::next_asset_id(), u32::max_value()); + }); } #[test] fn querying_total_supply_should_work() { - let asset_id = 1000; - - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); - assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 2, 50)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); - assert_eq!(GenericAsset::free_balance(&asset_id, &2), 50); - assert_ok!(GenericAsset::transfer(Origin::signed(2), asset_id, 3, 31)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); - assert_eq!(GenericAsset::free_balance(&asset_id, &2), 19); - assert_eq!(GenericAsset::free_balance(&asset_id, &3), 31); - assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 1, 1)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); - }); + let asset_id = 1000; + + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + assert_ok!(GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: 100, + permissions: default_permission + } + )); + assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); + assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 2, 50)); + assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); + assert_eq!(GenericAsset::free_balance(&asset_id, &2), 50); + assert_ok!(GenericAsset::transfer(Origin::signed(2), asset_id, 3, 31)); + assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); + assert_eq!(GenericAsset::free_balance(&asset_id, &2), 19); + assert_eq!(GenericAsset::free_balance(&asset_id, &3), 31); + assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 1, 1)); + assert_eq!(GenericAsset::free_balance(&asset_id, &1), 50); + }); } // Given @@ -118,26 +127,29 @@ fn querying_total_supply_should_work() { // - account 2's `free_balance` = 40. #[test] fn transferring_amount_should_work() { - let asset_id = 1000; - let free_balance = 100; - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: free_balance, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), free_balance); - assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 2, 40)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 60); - assert_eq!(GenericAsset::free_balance(&asset_id, &2), 40); - }); + let asset_id = 1000; + let free_balance = 100; + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + assert_ok!(GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: free_balance, + permissions: default_permission + } + )); + assert_eq!(GenericAsset::free_balance(&asset_id, &1), free_balance); + assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 2, 40)); + assert_eq!(GenericAsset::free_balance(&asset_id, &1), 60); + assert_eq!(GenericAsset::free_balance(&asset_id, &2), 40); + }); } // Given @@ -157,50 +169,56 @@ fn transferring_amount_should_work() { // - account 2's `free_balance` = 40. #[test] fn transferring_amount_should_fail_when_transferring_more_than_free_balance() { - let asset_id = 1000; - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_noop!( - GenericAsset::transfer(Origin::signed(1), asset_id, 2, 2000), - Error::::InsufficientBalance - ); - }); + let asset_id = 1000; + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + assert_ok!(GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: 100, + permissions: default_permission + } + )); + assert_noop!( + GenericAsset::transfer(Origin::signed(1), asset_id, 2, 2000), + Error::::InsufficientBalance + ); + }); } #[test] fn transferring_less_than_one_unit_should_not_work() { - let asset_id = 1000; - - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); - assert_noop!( - GenericAsset::transfer(Origin::signed(1), asset_id, 2, 0), - Error::::ZeroAmount - ); - }); + let asset_id = 1000; + + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + assert_ok!(GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: 100, + permissions: default_permission + } + )); + assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); + assert_noop!( + GenericAsset::transfer(Origin::signed(1), asset_id, 2, 0), + Error::::ZeroAmount + ); + }); } // Given @@ -214,67 +232,76 @@ fn transferring_less_than_one_unit_should_not_work() { // - Free balance after self transfer should equal to the free balance before self transfer. #[test] fn self_transfer_should_fail() { - let asset_id = 1000; - let balance = 100; - - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: balance, - permissions: default_permission - } - )); - - let initial_free_balance = GenericAsset::free_balance(&asset_id, &1); - assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 1, 10)); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), initial_free_balance); - }); + let asset_id = 1000; + let balance = 100; + + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + assert_ok!(GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: balance, + permissions: default_permission + } + )); + + let initial_free_balance = GenericAsset::free_balance(&asset_id, &1); + assert_ok!(GenericAsset::transfer(Origin::signed(1), asset_id, 1, 10)); + assert_eq!( + GenericAsset::free_balance(&asset_id, &1), + initial_free_balance + ); + }); } #[test] fn transferring_more_units_than_total_supply_should_not_work() { - let asset_id = 1000; - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); - assert_noop!( - GenericAsset::transfer(Origin::signed(1), asset_id, 2, 101), - Error::::InsufficientBalance - ); - }); + let asset_id = 1000; + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + assert_ok!(GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: 100, + permissions: default_permission + } + )); + assert_eq!(GenericAsset::free_balance(&asset_id, &1), 100); + assert_noop!( + GenericAsset::transfer(Origin::signed(1), asset_id, 2, 101), + Error::::InsufficientBalance + ); + }); } // Ensures it uses fake money for staking asset id. #[test] fn staking_asset_id_should_return_0() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(GenericAsset::staking_asset_id(), 16000); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(GenericAsset::staking_asset_id(), 16000); + }); } // Ensures it uses fake money for spending asset id. #[test] fn spending_asset_id_should_return_10() { - ExtBuilder::default().build().execute_with(|| { - assert_eq!(GenericAsset::spending_asset_id(), 16001); - }); + ExtBuilder::default().build().execute_with(|| { + assert_eq!(GenericAsset::spending_asset_id(), 16001); + }); } // Given @@ -283,9 +310,9 @@ fn spending_asset_id_should_return_10() { // -Â total_balance should return 0 #[test] fn total_balance_should_be_zero() { - new_test_ext().execute_with(|| { - assert_eq!(GenericAsset::total_balance(&0, &0), 0); - }); + new_test_ext().execute_with(|| { + assert_eq!(GenericAsset::total_balance(&0, &0), 0); + }); } // Given @@ -296,21 +323,24 @@ fn total_balance_should_be_zero() { // -Â total_balance should equals to reserved balance. #[test] fn total_balance_should_be_equal_to_account_balance() { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - assert_ok!(GenericAsset::create( - Origin::signed(1), - AssetOptions { - initial_issuance: 100, - permissions: default_permission - } - )); - assert_eq!(GenericAsset::total_balance(&1000, &1), 100); - }); + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + assert_ok!(GenericAsset::create( + Origin::signed(1), + AssetOptions { + initial_issuance: 100, + permissions: default_permission + } + )); + assert_eq!(GenericAsset::total_balance(&1000, &1), 100); + }); } // Given @@ -323,10 +353,13 @@ fn total_balance_should_be_equal_to_account_balance() { // -Â free_balance should return 50. #[test] fn free_balance_should_only_return_account_free_balance() { - ExtBuilder::default().free_balance((1, 0, 50)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 70); - assert_eq!(GenericAsset::free_balance(&1, &0), 50); - }); + ExtBuilder::default() + .free_balance((1, 0, 50)) + .build() + .execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 70); + assert_eq!(GenericAsset::free_balance(&1, &0), 50); + }); } // Given @@ -338,10 +371,13 @@ fn free_balance_should_only_return_account_free_balance() { // -Â total_balance should equals to account balance + free balance. #[test] fn total_balance_should_be_equal_to_sum_of_account_balance_and_free_balance() { - ExtBuilder::default().free_balance((1, 0, 50)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 70); - assert_eq!(GenericAsset::total_balance(&1, &0), 120); - }); + ExtBuilder::default() + .free_balance((1, 0, 50)) + .build() + .execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 70); + assert_eq!(GenericAsset::total_balance(&1, &0), 120); + }); } // Given @@ -353,10 +389,13 @@ fn total_balance_should_be_equal_to_sum_of_account_balance_and_free_balance() { // - reserved_balance should return 70. #[test] fn reserved_balance_should_only_return_account_reserved_balance() { - ExtBuilder::default().free_balance((1, 0, 50)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 70); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 70); - }); + ExtBuilder::default() + .free_balance((1, 0, 50)) + .build() + .execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 70); + assert_eq!(GenericAsset::reserved_balance(&1, &0), 70); + }); } // Given @@ -369,10 +408,10 @@ fn reserved_balance_should_only_return_account_reserved_balance() { // - reserved_balance = amount #[test] fn set_reserved_balance_should_add_balance_as_reserved() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 50); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 50); - }); + ExtBuilder::default().build().execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 50); + assert_eq!(GenericAsset::reserved_balance(&1, &0), 50); + }); } // Given @@ -385,10 +424,13 @@ fn set_reserved_balance_should_add_balance_as_reserved() { // - New free_balance should replace older free_balance. #[test] fn set_free_balance_should_add_amount_as_free_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_free_balance(&1, &0, 50); - assert_eq!(GenericAsset::free_balance(&1, &0), 50); - }); + ExtBuilder::default() + .free_balance((1, 0, 100)) + .build() + .execute_with(|| { + GenericAsset::set_free_balance(&1, &0, 50); + assert_eq!(GenericAsset::free_balance(&1, &0), 50); + }); } // Given @@ -404,11 +446,14 @@ fn set_free_balance_should_add_amount_as_free_balance() { // - new reserved_balance = original free balance + reserved amount #[test] fn reserve_should_moves_amount_from_balance_to_reserved_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - assert_ok!(GenericAsset::reserve(&1, &0, 70)); - assert_eq!(GenericAsset::free_balance(&1, &0), 30); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 70); - }); + ExtBuilder::default() + .free_balance((1, 0, 100)) + .build() + .execute_with(|| { + assert_ok!(GenericAsset::reserve(&1, &0, 70)); + assert_eq!(GenericAsset::free_balance(&1, &0), 30); + assert_eq!(GenericAsset::reserved_balance(&1, &0), 70); + }); } // Given @@ -423,11 +468,17 @@ fn reserve_should_moves_amount_from_balance_to_reserved_balance() { // - Should throw an error. #[test] fn reserve_should_not_moves_amount_from_balance_to_reserved_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - assert_noop!(GenericAsset::reserve(&1, &0, 120), Error::::InsufficientBalance); - assert_eq!(GenericAsset::free_balance(&1, &0), 100); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 0); - }); + ExtBuilder::default() + .free_balance((1, 0, 100)) + .build() + .execute_with(|| { + assert_noop!( + GenericAsset::reserve(&1, &0, 120), + Error::::InsufficientBalance + ); + assert_eq!(GenericAsset::free_balance(&1, &0), 100); + assert_eq!(GenericAsset::reserved_balance(&1, &0), 0); + }); } // Given @@ -441,10 +492,13 @@ fn reserve_should_not_moves_amount_from_balance_to_reserved_balance() { // - unreserved should return 20. #[test] fn unreserve_should_return_subtracted_value_from_unreserved_amount_by_actual_account_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::unreserve(&1, &0, 120), 20); - }); + ExtBuilder::default() + .free_balance((1, 0, 100)) + .build() + .execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 100); + assert_eq!(GenericAsset::unreserve(&1, &0, 120), 20); + }); } // Given @@ -458,10 +512,13 @@ fn unreserve_should_return_subtracted_value_from_unreserved_amount_by_actual_acc // - unreserved should return None. #[test] fn unreserve_should_return_none() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::unreserve(&1, &0, 50), 0); - }); + ExtBuilder::default() + .free_balance((1, 0, 100)) + .build() + .execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 100); + assert_eq!(GenericAsset::unreserve(&1, &0, 50), 0); + }); } // Given @@ -475,11 +532,14 @@ fn unreserve_should_return_none() { // - free_balance should be 200. #[test] fn unreserve_should_increase_free_balance_by_reserved_balance() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - GenericAsset::unreserve(&1, &0, 120); - assert_eq!(GenericAsset::free_balance(&1, &0), 200); - }); + ExtBuilder::default() + .free_balance((1, 0, 100)) + .build() + .execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 100); + GenericAsset::unreserve(&1, &0, 120); + assert_eq!(GenericAsset::free_balance(&1, &0), 200); + }); } // Given @@ -493,11 +553,14 @@ fn unreserve_should_increase_free_balance_by_reserved_balance() { // - reserved_balance should be 0. #[test] fn unreserve_should_deduct_reserved_balance_by_reserved_amount() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_free_balance(&1, &0, 100); - GenericAsset::unreserve(&1, &0, 120); - assert_eq!(GenericAsset::reserved_balance(&1, &0), 0); - }); + ExtBuilder::default() + .free_balance((1, 0, 100)) + .build() + .execute_with(|| { + GenericAsset::set_free_balance(&1, &0, 100); + GenericAsset::unreserve(&1, &0, 120); + assert_eq!(GenericAsset::reserved_balance(&1, &0), 0); + }); } // Given @@ -511,10 +574,13 @@ fn unreserve_should_deduct_reserved_balance_by_reserved_amount() { // - slash should return None. #[test] fn slash_should_return_slash_reserved_amount() { - ExtBuilder::default().free_balance((1, 0, 100)).build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::slash(&1, &0, 70), None); - }); + ExtBuilder::default() + .free_balance((1, 0, 100)) + .build() + .execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 100); + assert_eq!(GenericAsset::slash(&1, &0, 70), None); + }); } // Given @@ -525,10 +591,10 @@ fn slash_should_return_slash_reserved_amount() { // - Should return slashed_reserved - reserved_balance. #[test] fn slash_reserved_should_deducts_up_to_amount_from_reserved_balance() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::slash_reserved(&1, &0, 150), Some(50)); - }); + ExtBuilder::default().build().execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 100); + assert_eq!(GenericAsset::slash_reserved(&1, &0, 150), Some(50)); + }); } // Given @@ -539,10 +605,10 @@ fn slash_reserved_should_deducts_up_to_amount_from_reserved_balance() { // - Should return None. #[test] fn slash_reserved_should_return_none() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::slash_reserved(&1, &0, 100), None); - }); + ExtBuilder::default().build().execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 100); + assert_eq!(GenericAsset::slash_reserved(&1, &0, 100), None); + }); } // Given @@ -554,10 +620,13 @@ fn slash_reserved_should_return_none() { // - Should not return None. #[test] fn repatriate_reserved_return_amount_subtracted_by_slash_amount() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::repatriate_reserved(&1, &0, &1, 130, BalanceStatus::Free), 30); - }); + ExtBuilder::default().build().execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 100); + assert_eq!( + GenericAsset::repatriate_reserved(&1, &0, &1, 130, BalanceStatus::Free), + 30 + ); + }); } // Given @@ -569,10 +638,13 @@ fn repatriate_reserved_return_amount_subtracted_by_slash_amount() { // - Should return None. #[test] fn repatriate_reserved_return_none() { - ExtBuilder::default().build().execute_with(|| { - GenericAsset::set_reserved_balance(&1, &0, 100); - assert_eq!(GenericAsset::repatriate_reserved(&1, &0, &1, 90, BalanceStatus::Free), 0); - }); + ExtBuilder::default().build().execute_with(|| { + GenericAsset::set_reserved_balance(&1, &0, 100); + assert_eq!( + GenericAsset::repatriate_reserved(&1, &0, &1, 90, BalanceStatus::Free), + 0 + ); + }); } // Given @@ -583,30 +655,40 @@ fn repatriate_reserved_return_none() { // - Should create a new reserved asset. #[test] fn create_reserved_should_create_a_default_account_with_the_balance_given() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let default_permission = PermissionLatest { - update: Owner::Address(1), - mint: Owner::Address(1), - burn: Owner::Address(1), - }; - let options = AssetOptions { - initial_issuance: 500, - permissions: default_permission, - }; - - let expected_total_issuance = 500; - let created_asset_id = 9; - let created_account_id = 0; - - assert_ok!(GenericAsset::create_reserved(Origin::ROOT, created_asset_id, options)); - - // Tests for side effects. - assert_eq!(>::get(created_asset_id), expected_total_issuance); - assert_eq!( - >::get(&created_asset_id, &created_account_id), - expected_total_issuance - ); - }); + ExtBuilder::default() + .next_asset_id(10) + .build() + .execute_with(|| { + let default_permission = PermissionLatest { + update: Owner::Address(1), + mint: Owner::Address(1), + burn: Owner::Address(1), + }; + let options = AssetOptions { + initial_issuance: 500, + permissions: default_permission, + }; + + let expected_total_issuance = 500; + let created_asset_id = 9; + let created_account_id = 0; + + assert_ok!(GenericAsset::create_reserved( + Origin::ROOT, + created_asset_id, + options + )); + + // Tests for side effects. + assert_eq!( + >::get(created_asset_id), + expected_total_issuance + ); + assert_eq!( + >::get(&created_asset_id, &created_account_id), + expected_total_issuance + ); + }); } // Given @@ -618,17 +700,17 @@ fn create_reserved_should_create_a_default_account_with_the_balance_given() { // - Should throw a permission error #[test] fn mint_should_throw_permission_error() { - ExtBuilder::default().build().execute_with(|| { - let origin = 1; - let asset_id = 4; - let to_account = 2; - let amount = 100; - - assert_noop!( - GenericAsset::mint(Origin::signed(origin), asset_id, to_account, amount), - Error::::NoMintPermission, - ); - }); + ExtBuilder::default().build().execute_with(|| { + let origin = 1; + let asset_id = 4; + let to_account = 2; + let amount = 100; + + assert_noop!( + GenericAsset::mint(Origin::signed(origin), asset_id, to_account, amount), + Error::::NoMintPermission, + ); + }); } // Given @@ -641,33 +723,44 @@ fn mint_should_throw_permission_error() { // - Should not change `origins` free_balance. #[test] fn mint_should_increase_asset() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let to_account = 2; - let amount = 500; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - assert_ok!(GenericAsset::mint(Origin::signed(origin), asset_id, to_account, amount)); - assert_eq!(GenericAsset::free_balance(&asset_id, &to_account), amount); - - // Origin's free_balance should not change. - assert_eq!(GenericAsset::free_balance(&asset_id, &origin), initial_issuance); - }); + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let origin = 1; + let asset_id = 1000; + let to_account = 2; + let amount = 500; + let initial_issuance = 100; + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission + } + )); + + assert_ok!(GenericAsset::mint( + Origin::signed(origin), + asset_id, + to_account, + amount + )); + assert_eq!(GenericAsset::free_balance(&asset_id, &to_account), amount); + + // Origin's free_balance should not change. + assert_eq!( + GenericAsset::free_balance(&asset_id, &origin), + initial_issuance + ); + }); } // Given @@ -679,17 +772,20 @@ fn mint_should_increase_asset() { // - Should throw a permission error. #[test] fn burn_should_throw_permission_error() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 4; - let to_account = 2; - let amount = 10; - - assert_noop!( - GenericAsset::burn(Origin::signed(origin), asset_id, to_account, amount), - Error::::NoBurnPermission, - ); - }); + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let origin = 1; + let asset_id = 4; + let to_account = 2; + let amount = 10; + + assert_noop!( + GenericAsset::burn(Origin::signed(origin), asset_id, to_account, amount), + Error::::NoBurnPermission, + ); + }); } // Given @@ -702,38 +798,49 @@ fn burn_should_throw_permission_error() { // - Should not change `origin`'s free_balance. #[test] fn burn_should_burn_an_asset() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let to_account = 2; - let amount = 1000; - let initial_issuance = 100; - let burn_amount = 400; - let expected_amount = 600; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - assert_ok!(GenericAsset::mint(Origin::signed(origin), asset_id, to_account, amount)); - - assert_ok!(GenericAsset::burn( - Origin::signed(origin), - asset_id, - to_account, - burn_amount - )); - assert_eq!(GenericAsset::free_balance(&asset_id, &to_account), expected_amount); - }); + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let origin = 1; + let asset_id = 1000; + let to_account = 2; + let amount = 1000; + let initial_issuance = 100; + let burn_amount = 400; + let expected_amount = 600; + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission + } + )); + assert_ok!(GenericAsset::mint( + Origin::signed(origin), + asset_id, + to_account, + amount + )); + + assert_ok!(GenericAsset::burn( + Origin::signed(origin), + asset_id, + to_account, + burn_amount + )); + assert_eq!( + GenericAsset::free_balance(&asset_id, &to_account), + expected_amount + ); + }); } // Given @@ -745,29 +852,44 @@ fn burn_should_burn_an_asset() { // - The account origin should have burn, mint and update permissions. #[test] fn check_permission_should_return_correct_permission() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - }, - )); - - assert!(GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Burn)); - assert!(GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Mint)); - assert!(GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Update)); - }); + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let origin = 1; + let asset_id = 1000; + let initial_issuance = 100; + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission + }, + )); + + assert!(GenericAsset::check_permission( + &asset_id, + &origin, + &PermissionType::Burn + )); + assert!(GenericAsset::check_permission( + &asset_id, + &origin, + &PermissionType::Mint + )); + assert!(GenericAsset::check_permission( + &asset_id, + &origin, + &PermissionType::Update + )); + }); } // Given @@ -779,29 +901,44 @@ fn check_permission_should_return_correct_permission() { // - The account origin should not have burn, mint and update permissions. #[test] fn check_permission_should_return_false_for_no_permission() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::None, - mint: Owner::None, - burn: Owner::None, - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - assert!(!GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Burn)); - assert!(!GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Mint)); - assert!(!GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Update)); - }); + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let origin = 1; + let asset_id = 1000; + let initial_issuance = 100; + + let default_permission = PermissionLatest { + update: Owner::None, + mint: Owner::None, + burn: Owner::None, + }; + + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission + } + )); + + assert!(!GenericAsset::check_permission( + &asset_id, + &origin, + &PermissionType::Burn + )); + assert!(!GenericAsset::check_permission( + &asset_id, + &origin, + &PermissionType::Mint + )); + assert!(!GenericAsset::check_permission( + &asset_id, + &origin, + &PermissionType::Update + )); + }); } // Given @@ -813,39 +950,50 @@ fn check_permission_should_return_false_for_no_permission() { // - The account origin should have update and mint permissions. #[test] fn update_permission_should_change_permission() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::None, - burn: Owner::None, - }; - - let new_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::None, - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - assert_ok!(GenericAsset::update_permission( - Origin::signed(origin), - asset_id, - new_permission, - )); - assert!(GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Mint)); - assert!(!GenericAsset::check_permission(&asset_id, &origin, &PermissionType::Burn)); - }); + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let origin = 1; + let asset_id = 1000; + let initial_issuance = 100; + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::None, + burn: Owner::None, + }; + + let new_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::None, + }; + + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission + } + )); + + assert_ok!(GenericAsset::update_permission( + Origin::signed(origin), + asset_id, + new_permission, + )); + assert!(GenericAsset::check_permission( + &asset_id, + &origin, + &PermissionType::Mint + )); + assert!(!GenericAsset::check_permission( + &asset_id, + &origin, + &PermissionType::Burn + )); + }); } // Given @@ -856,36 +1004,39 @@ fn update_permission_should_change_permission() { // - Should throw an error stating "Origin does not have enough permission to update permissions." #[test] fn update_permission_should_throw_error_when_lack_of_permissions() { - ExtBuilder::default().free_balance((16000, 1, 100000)).build().execute_with(|| { - let origin = 1; - let asset_id = 1000; - let initial_issuance = 100; - - let default_permission = PermissionLatest { - update: Owner::None, - mint: Owner::None, - burn: Owner::None, - }; - - let new_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::None, - }; - - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - }, - )); - - assert_noop!( - GenericAsset::update_permission(Origin::signed(origin), asset_id, new_permission), - Error::::NoUpdatePermission, - ); - }); + ExtBuilder::default() + .free_balance((16000, 1, 100000)) + .build() + .execute_with(|| { + let origin = 1; + let asset_id = 1000; + let initial_issuance = 100; + + let default_permission = PermissionLatest { + update: Owner::None, + mint: Owner::None, + burn: Owner::None, + }; + + let new_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::None, + }; + + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission + }, + )); + + assert_noop!( + GenericAsset::update_permission(Origin::signed(origin), asset_id, new_permission), + Error::::NoUpdatePermission, + ); + }); } // Given @@ -902,34 +1053,40 @@ fn update_permission_should_throw_error_when_lack_of_permissions() { // - Permissions must have burn, mint and updatePermission for the given asset_id. #[test] fn create_asset_works_with_given_asset_id_and_from_account() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = Some(1); - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - let expected_permission = PermissionVersions::V1(default_permission.clone()); - let asset_id = 9; - let initial_issuance = 100; - - assert_ok!(GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission.clone() - } - )); - - // Test for side effects. - assert_eq!(>::get(), 10); - assert_eq!(>::get(asset_id), initial_issuance); - assert_eq!(>::get(&asset_id, &origin), initial_issuance); - assert_eq!(>::get(&asset_id), expected_permission); - }); + ExtBuilder::default() + .next_asset_id(10) + .build() + .execute_with(|| { + let origin = 1; + let from_account: Option<::AccountId> = Some(1); + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + let expected_permission = PermissionVersions::V1(default_permission.clone()); + let asset_id = 9; + let initial_issuance = 100; + + assert_ok!(GenericAsset::create_asset( + Some(asset_id), + from_account, + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission.clone() + } + )); + + // Test for side effects. + assert_eq!(>::get(), 10); + assert_eq!(>::get(asset_id), initial_issuance); + assert_eq!( + >::get(&asset_id, &origin), + initial_issuance + ); + assert_eq!(>::get(&asset_id), expected_permission); + }); } // Given @@ -939,31 +1096,34 @@ fn create_asset_works_with_given_asset_id_and_from_account() { // - `create_asset` should not work. #[test] fn create_asset_with_non_reserved_asset_id_should_not_work() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = Some(1); - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - let asset_id = 11; - let initial_issuance = 100; - - assert_noop!( - GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance, - permissions: default_permission.clone() - } - ), - Error::::IdUnavailable, - ); - }); + ExtBuilder::default() + .next_asset_id(10) + .build() + .execute_with(|| { + let origin = 1; + let from_account: Option<::AccountId> = Some(1); + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + + let asset_id = 11; + let initial_issuance = 100; + + assert_noop!( + GenericAsset::create_asset( + Some(asset_id), + from_account, + AssetOptions { + initial_issuance, + permissions: default_permission.clone() + } + ), + Error::::IdUnavailable, + ); + }); } // Given @@ -973,39 +1133,42 @@ fn create_asset_with_non_reserved_asset_id_should_not_work() { // - `create_asset` should not work. #[test] fn create_asset_with_a_taken_asset_id_should_not_work() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = Some(1); - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - let asset_id = 9; - let initial_issuance = 100; - - assert_ok!(GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance, - permissions: default_permission.clone() - } - )); - assert_noop!( - GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance, - permissions: default_permission.clone() - } - ), - Error::::IdAlreadyTaken, - ); - }); + ExtBuilder::default() + .next_asset_id(10) + .build() + .execute_with(|| { + let origin = 1; + let from_account: Option<::AccountId> = Some(1); + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + + let asset_id = 9; + let initial_issuance = 100; + + assert_ok!(GenericAsset::create_asset( + Some(asset_id), + from_account, + AssetOptions { + initial_issuance, + permissions: default_permission.clone() + } + )); + assert_noop!( + GenericAsset::create_asset( + Some(asset_id), + from_account, + AssetOptions { + initial_issuance, + permissions: default_permission.clone() + } + ), + Error::::IdAlreadyTaken, + ); + }); } // Given @@ -1018,35 +1181,38 @@ fn create_asset_with_a_taken_asset_id_should_not_work() { // - Should create a reserved token. #[test] fn create_asset_should_create_a_reserved_asset_when_from_account_is_none() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = None; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - let created_account_id = 0; - let asset_id = 9; - let initial_issuance = 100; - - assert_ok!(GenericAsset::create_asset( - Some(asset_id), - from_account, - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - // Test for a side effect. - assert_eq!( - >::get(&asset_id, &created_account_id), - initial_issuance - ); - }); + ExtBuilder::default() + .next_asset_id(10) + .build() + .execute_with(|| { + let origin = 1; + let from_account: Option<::AccountId> = None; + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + + let created_account_id = 0; + let asset_id = 9; + let initial_issuance = 100; + + assert_ok!(GenericAsset::create_asset( + Some(asset_id), + from_account, + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission + } + )); + + // Test for a side effect. + assert_eq!( + >::get(&asset_id, &created_account_id), + initial_issuance + ); + }); } // Given @@ -1061,155 +1227,179 @@ fn create_asset_should_create_a_reserved_asset_when_from_account_is_none() { // - Should not create a `reserved_asset`. #[test] fn create_asset_should_create_a_user_asset() { - ExtBuilder::default().next_asset_id(10).build().execute_with(|| { - let origin = 1; - let from_account: Option<::AccountId> = None; - - let default_permission = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - let created_account_id = 0; - let reserved_asset_id = 100000; - let initial_issuance = 100; - let created_user_asset_id = 10; - - assert_ok!(GenericAsset::create_asset( - None, - from_account, - AssetOptions { - initial_issuance: initial_issuance, - permissions: default_permission - } - )); - - // Test for side effects. - assert_eq!(>::get(&reserved_asset_id, &created_account_id), 0); - assert_eq!( - >::get(&created_user_asset_id, &created_account_id), - initial_issuance - ); - assert_eq!(>::get(created_user_asset_id), initial_issuance); - }); + ExtBuilder::default() + .next_asset_id(10) + .build() + .execute_with(|| { + let origin = 1; + let from_account: Option<::AccountId> = None; + + let default_permission = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + + let created_account_id = 0; + let reserved_asset_id = 100000; + let initial_issuance = 100; + let created_user_asset_id = 10; + + assert_ok!(GenericAsset::create_asset( + None, + from_account, + AssetOptions { + initial_issuance: initial_issuance, + permissions: default_permission + } + )); + + // Test for side effects. + assert_eq!( + >::get(&reserved_asset_id, &created_account_id), + 0 + ); + assert_eq!( + >::get(&created_user_asset_id, &created_account_id), + initial_issuance + ); + assert_eq!( + >::get(created_user_asset_id), + initial_issuance + ); + }); } #[test] fn update_permission_should_raise_event() { - // Arrange - let staking_asset_id = 16000; - let asset_id = 1000; - let origin = 1; - let initial_balance = 1000; - let permissions = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - - ExtBuilder::default() - .next_asset_id(asset_id) - .free_balance((staking_asset_id, origin, initial_balance)) - .build() - .execute_with(|| { - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: 0, - permissions: permissions.clone(), - } - )); - - // Act - assert_ok!(GenericAsset::update_permission( - Origin::signed(origin), - asset_id, - permissions.clone() - )); - - let expected_event = TestEvent::generic_asset( - RawEvent::PermissionUpdated(asset_id, permissions.clone()), - ); - // Assert - assert!(System::events().iter().any(|record| record.event == expected_event)); - }, - ); + // Arrange + let staking_asset_id = 16000; + let asset_id = 1000; + let origin = 1; + let initial_balance = 1000; + let permissions = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + + ExtBuilder::default() + .next_asset_id(asset_id) + .free_balance((staking_asset_id, origin, initial_balance)) + .build() + .execute_with(|| { + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: 0, + permissions: permissions.clone(), + } + )); + + // Act + assert_ok!(GenericAsset::update_permission( + Origin::signed(origin), + asset_id, + permissions.clone() + )); + + let expected_event = TestEvent::generic_asset(RawEvent::PermissionUpdated( + asset_id, + permissions.clone(), + )); + // Assert + assert!(System::events() + .iter() + .any(|record| record.event == expected_event)); + }); } #[test] fn mint_should_raise_event() { - // Arrange - let staking_asset_id = 16000; - let asset_id = 1000; - let origin = 1; - let initial_balance = 1000; - let permissions = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - let to = 2; - let amount = 100; - - ExtBuilder::default() - .next_asset_id(asset_id) - .free_balance((staking_asset_id, origin, initial_balance)) - .build() - .execute_with(|| { - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: 0, - permissions: permissions.clone(), - }, - )); - - // Act - assert_ok!(GenericAsset::mint(Origin::signed(origin), asset_id, to, amount)); - - let expected_event = TestEvent::generic_asset(RawEvent::Minted(asset_id, to, amount)); - - // Assert - assert!(System::events().iter().any(|record| record.event == expected_event)); - }, - ); + // Arrange + let staking_asset_id = 16000; + let asset_id = 1000; + let origin = 1; + let initial_balance = 1000; + let permissions = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + let to = 2; + let amount = 100; + + ExtBuilder::default() + .next_asset_id(asset_id) + .free_balance((staking_asset_id, origin, initial_balance)) + .build() + .execute_with(|| { + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: 0, + permissions: permissions.clone(), + }, + )); + + // Act + assert_ok!(GenericAsset::mint( + Origin::signed(origin), + asset_id, + to, + amount + )); + + let expected_event = TestEvent::generic_asset(RawEvent::Minted(asset_id, to, amount)); + + // Assert + assert!(System::events() + .iter() + .any(|record| record.event == expected_event)); + }); } #[test] fn burn_should_raise_event() { - // Arrange - let staking_asset_id = 16000; - let asset_id = 1000; - let origin = 1; - let initial_balance = 1000; - let permissions = PermissionLatest { - update: Owner::Address(origin), - mint: Owner::Address(origin), - burn: Owner::Address(origin), - }; - let amount = 100; - - ExtBuilder::default() - .next_asset_id(asset_id) - .free_balance((staking_asset_id, origin, initial_balance)) - .build() - .execute_with(|| { - assert_ok!(GenericAsset::create( - Origin::signed(origin), - AssetOptions { - initial_issuance: amount, - permissions: permissions.clone(), - }, - )); - - // Act - assert_ok!(GenericAsset::burn(Origin::signed(origin), asset_id, origin, amount)); - - let expected_event = TestEvent::generic_asset(RawEvent::Burned(asset_id, origin, amount)); - - // Assert - assert!(System::events().iter().any(|record| record.event == expected_event)); - }, - ); + // Arrange + let staking_asset_id = 16000; + let asset_id = 1000; + let origin = 1; + let initial_balance = 1000; + let permissions = PermissionLatest { + update: Owner::Address(origin), + mint: Owner::Address(origin), + burn: Owner::Address(origin), + }; + let amount = 100; + + ExtBuilder::default() + .next_asset_id(asset_id) + .free_balance((staking_asset_id, origin, initial_balance)) + .build() + .execute_with(|| { + assert_ok!(GenericAsset::create( + Origin::signed(origin), + AssetOptions { + initial_issuance: amount, + permissions: permissions.clone(), + }, + )); + + // Act + assert_ok!(GenericAsset::burn( + Origin::signed(origin), + asset_id, + origin, + amount + )); + + let expected_event = + TestEvent::generic_asset(RawEvent::Burned(asset_id, origin, amount)); + + // Assert + assert!(System::events() + .iter() + .any(|record| record.event == expected_event)); + }); } diff --git a/frame/grandpa/src/lib.rs b/frame/grandpa/src/lib.rs index 10cc8162db..0f2cdbd4bd 100644 --- a/frame/grandpa/src/lib.rs +++ b/frame/grandpa/src/lib.rs @@ -30,29 +30,31 @@ // re-export since this is necessary for `impl_apis` in runtime. pub use sp_finality_grandpa as fg_primitives; -use sp_std::prelude::*; -use codec::{self as codec, Encode, Decode}; -use frame_support::{decl_event, decl_storage, decl_module, decl_error, storage}; +use codec::{self as codec, Decode, Encode}; +pub use fg_primitives::{AuthorityId, AuthorityList, AuthorityWeight, VersionedAuthorityList}; +use fg_primitives::{ + ConsensusLog, RoundNumber, ScheduledChange, SetId, GRANDPA_AUTHORITIES_KEY, GRANDPA_ENGINE_ID, +}; use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, storage}; +use frame_system::{self as system, ensure_signed, DigestOf}; use sp_runtime::{ - DispatchResult, generic::{DigestItem, OpaqueDigestItemId}, traits::Zero, Perbill, + generic::{DigestItem, OpaqueDigestItemId}, + traits::Zero, + DispatchResult, Perbill, }; use sp_staking::{ - SessionIndex, - offence::{Offence, Kind}, + offence::{Kind, Offence}, + SessionIndex, }; -use fg_primitives::{ - GRANDPA_AUTHORITIES_KEY, GRANDPA_ENGINE_ID, ScheduledChange, ConsensusLog, SetId, RoundNumber, -}; -pub use fg_primitives::{AuthorityId, AuthorityList, AuthorityWeight, VersionedAuthorityList}; -use frame_system::{self as system, ensure_signed, DigestOf}; +use sp_std::prelude::*; mod mock; mod tests; pub trait Trait: frame_system::Trait { - /// The event type of this module. - type Event: From + Into<::Event>; + /// The event type of this module. + type Event: From + Into<::Event>; } /// A stored pending change, old format. @@ -60,40 +62,40 @@ pub trait Trait: frame_system::Trait { // https://github.com/paritytech/substrate/issues/1614 #[derive(Encode, Decode)] pub struct OldStoredPendingChange { - /// The block number this was scheduled at. - pub scheduled_at: N, - /// The delay in blocks until it will be applied. - pub delay: N, - /// The next authority set. - pub next_authorities: AuthorityList, + /// The block number this was scheduled at. + pub scheduled_at: N, + /// The delay in blocks until it will be applied. + pub delay: N, + /// The next authority set. + pub next_authorities: AuthorityList, } /// A stored pending change. #[derive(Encode)] pub struct StoredPendingChange { - /// The block number this was scheduled at. - pub scheduled_at: N, - /// The delay in blocks until it will be applied. - pub delay: N, - /// The next authority set. - pub next_authorities: AuthorityList, - /// If defined it means the change was forced and the given block number - /// indicates the median last finalized block when the change was signaled. - pub forced: Option, + /// The block number this was scheduled at. + pub scheduled_at: N, + /// The delay in blocks until it will be applied. + pub delay: N, + /// The next authority set. + pub next_authorities: AuthorityList, + /// If defined it means the change was forced and the given block number + /// indicates the median last finalized block when the change was signaled. + pub forced: Option, } impl Decode for StoredPendingChange { - fn decode(value: &mut I) -> core::result::Result { - let old = OldStoredPendingChange::decode(value)?; - let forced = >::decode(value).unwrap_or(None); - - Ok(StoredPendingChange { - scheduled_at: old.scheduled_at, - delay: old.delay, - next_authorities: old.next_authorities, - forced, - }) - } + fn decode(value: &mut I) -> core::result::Result { + let old = OldStoredPendingChange::decode(value)?; + let forced = >::decode(value).unwrap_or(None); + + Ok(StoredPendingChange { + scheduled_at: old.scheduled_at, + delay: old.delay, + next_authorities: old.next_authorities, + forced, + }) + } } /// Current state of the GRANDPA authority set. State transitions must happen in @@ -102,407 +104,406 @@ impl Decode for StoredPendingChange { #[derive(Decode, Encode)] #[cfg_attr(test, derive(Debug, PartialEq))] pub enum StoredState { - /// The current authority set is live, and GRANDPA is enabled. - Live, - /// There is a pending pause event which will be enacted at the given block - /// height. - PendingPause { - /// Block at which the intention to pause was scheduled. - scheduled_at: N, - /// Number of blocks after which the change will be enacted. - delay: N - }, - /// The current GRANDPA authority set is paused. - Paused, - /// There is a pending resume event which will be enacted at the given block - /// height. - PendingResume { - /// Block at which the intention to resume was scheduled. - scheduled_at: N, - /// Number of blocks after which the change will be enacted. - delay: N, - }, + /// The current authority set is live, and GRANDPA is enabled. + Live, + /// There is a pending pause event which will be enacted at the given block + /// height. + PendingPause { + /// Block at which the intention to pause was scheduled. + scheduled_at: N, + /// Number of blocks after which the change will be enacted. + delay: N, + }, + /// The current GRANDPA authority set is paused. + Paused, + /// There is a pending resume event which will be enacted at the given block + /// height. + PendingResume { + /// Block at which the intention to resume was scheduled. + scheduled_at: N, + /// Number of blocks after which the change will be enacted. + delay: N, + }, } decl_event! { - pub enum Event { - /// New authority set has been applied. - NewAuthorities(AuthorityList), - /// Current authority set has been paused. - Paused, - /// Current authority set has been resumed. - Resumed, - } + pub enum Event { + /// New authority set has been applied. + NewAuthorities(AuthorityList), + /// Current authority set has been paused. + Paused, + /// Current authority set has been resumed. + Resumed, + } } decl_error! { - pub enum Error for Module { - /// Attempt to signal GRANDPA pause when the authority set isn't live - /// (either paused or already pending pause). - PauseFailed, - /// Attempt to signal GRANDPA resume when the authority set isn't paused - /// (either live or already pending resume). - ResumeFailed, - /// Attempt to signal GRANDPA change with one already pending. - ChangePending, - /// Cannot signal forced change so soon after last. - TooSoon, - } + pub enum Error for Module { + /// Attempt to signal GRANDPA pause when the authority set isn't live + /// (either paused or already pending pause). + PauseFailed, + /// Attempt to signal GRANDPA resume when the authority set isn't paused + /// (either live or already pending resume). + ResumeFailed, + /// Attempt to signal GRANDPA change with one already pending. + ChangePending, + /// Cannot signal forced change so soon after last. + TooSoon, + } } decl_storage! { - trait Store for Module as GrandpaFinality { - /// State of the current authority set. - State get(fn state): StoredState = StoredState::Live; - - /// Pending change: (signaled at, scheduled change). - PendingChange: Option>; - - /// next block number where we can force a change. - NextForced get(fn next_forced): Option; - - /// `true` if we are currently stalled. - Stalled get(fn stalled): Option<(T::BlockNumber, T::BlockNumber)>; - - /// The number of changes (both in terms of keys and underlying economic responsibilities) - /// in the "set" of Grandpa validators from genesis. - CurrentSetId get(fn current_set_id) build(|_| fg_primitives::SetId::default()): SetId; - - /// A mapping from grandpa set ID to the index of the *most recent* session for which its - /// members were responsible. - SetIdSession get(fn session_for_set): map hasher(twox_64_concat) SetId => Option; - } - add_extra_genesis { - config(authorities): AuthorityList; - build(|config| Module::::initialize_authorities(&config.authorities)) - } + trait Store for Module as GrandpaFinality { + /// State of the current authority set. + State get(fn state): StoredState = StoredState::Live; + + /// Pending change: (signaled at, scheduled change). + PendingChange: Option>; + + /// next block number where we can force a change. + NextForced get(fn next_forced): Option; + + /// `true` if we are currently stalled. + Stalled get(fn stalled): Option<(T::BlockNumber, T::BlockNumber)>; + + /// The number of changes (both in terms of keys and underlying economic responsibilities) + /// in the "set" of Grandpa validators from genesis. + CurrentSetId get(fn current_set_id) build(|_| fg_primitives::SetId::default()): SetId; + + /// A mapping from grandpa set ID to the index of the *most recent* session for which its + /// members were responsible. + SetIdSession get(fn session_for_set): map hasher(twox_64_concat) SetId => Option; + } + add_extra_genesis { + config(authorities): AuthorityList; + build(|config| Module::::initialize_authorities(&config.authorities)) + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Report some misbehavior. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn report_misbehavior(origin, _report: Vec) { - ensure_signed(origin)?; - // FIXME: https://github.com/paritytech/substrate/issues/1112 - } - - fn on_finalize(block_number: T::BlockNumber) { - // check for scheduled pending authority set changes - if let Some(pending_change) = >::get() { - // emit signal if we're at the block that scheduled the change - if block_number == pending_change.scheduled_at { - if let Some(median) = pending_change.forced { - Self::deposit_log(ConsensusLog::ForcedChange( - median, - ScheduledChange { - delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), - } - )) - } else { - Self::deposit_log(ConsensusLog::ScheduledChange( - ScheduledChange{ - delay: pending_change.delay, - next_authorities: pending_change.next_authorities.clone(), - } - )); - } - } - - // enact the change if we've reached the enacting block - if block_number == pending_change.scheduled_at + pending_change.delay { - Self::set_grandpa_authorities(&pending_change.next_authorities); - Self::deposit_event( - Event::NewAuthorities(pending_change.next_authorities) - ); - >::kill(); - } - } - - // check for scheduled pending state changes - match >::get() { - StoredState::PendingPause { scheduled_at, delay } => { - // signal change to pause - if block_number == scheduled_at { - Self::deposit_log(ConsensusLog::Pause(delay)); - } - - // enact change to paused state - if block_number == scheduled_at + delay { - >::put(StoredState::Paused); - Self::deposit_event(Event::Paused); - } - }, - StoredState::PendingResume { scheduled_at, delay } => { - // signal change to resume - if block_number == scheduled_at { - Self::deposit_log(ConsensusLog::Resume(delay)); - } - - // enact change to live state - if block_number == scheduled_at + delay { - >::put(StoredState::Live); - Self::deposit_event(Event::Resumed); - } - }, - _ => {}, - } - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Report some misbehavior. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn report_misbehavior(origin, _report: Vec) { + ensure_signed(origin)?; + // FIXME: https://github.com/paritytech/substrate/issues/1112 + } + + fn on_finalize(block_number: T::BlockNumber) { + // check for scheduled pending authority set changes + if let Some(pending_change) = >::get() { + // emit signal if we're at the block that scheduled the change + if block_number == pending_change.scheduled_at { + if let Some(median) = pending_change.forced { + Self::deposit_log(ConsensusLog::ForcedChange( + median, + ScheduledChange { + delay: pending_change.delay, + next_authorities: pending_change.next_authorities.clone(), + } + )) + } else { + Self::deposit_log(ConsensusLog::ScheduledChange( + ScheduledChange{ + delay: pending_change.delay, + next_authorities: pending_change.next_authorities.clone(), + } + )); + } + } + + // enact the change if we've reached the enacting block + if block_number == pending_change.scheduled_at + pending_change.delay { + Self::set_grandpa_authorities(&pending_change.next_authorities); + Self::deposit_event( + Event::NewAuthorities(pending_change.next_authorities) + ); + >::kill(); + } + } + + // check for scheduled pending state changes + match >::get() { + StoredState::PendingPause { scheduled_at, delay } => { + // signal change to pause + if block_number == scheduled_at { + Self::deposit_log(ConsensusLog::Pause(delay)); + } + + // enact change to paused state + if block_number == scheduled_at + delay { + >::put(StoredState::Paused); + Self::deposit_event(Event::Paused); + } + }, + StoredState::PendingResume { scheduled_at, delay } => { + // signal change to resume + if block_number == scheduled_at { + Self::deposit_log(ConsensusLog::Resume(delay)); + } + + // enact change to live state + if block_number == scheduled_at + delay { + >::put(StoredState::Live); + Self::deposit_event(Event::Resumed); + } + }, + _ => {}, + } + } + } } impl Module { - /// Get the current set of authorities, along with their respective weights. - pub fn grandpa_authorities() -> AuthorityList { - storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() - } - - /// Set the current set of authorities, along with their respective weights. - fn set_grandpa_authorities(authorities: &AuthorityList) { - storage::unhashed::put( - GRANDPA_AUTHORITIES_KEY, - &VersionedAuthorityList::from(authorities), - ); - } - - /// Schedule GRANDPA to pause starting in the given number of blocks. - /// Cannot be done when already paused. - pub fn schedule_pause(in_blocks: T::BlockNumber) -> DispatchResult { - if let StoredState::Live = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingPause { - delay: in_blocks, - scheduled_at, - }); - - Ok(()) - } else { - Err(Error::::PauseFailed)? - } - } - - /// Schedule a resume of GRANDPA after pausing. - pub fn schedule_resume(in_blocks: T::BlockNumber) -> DispatchResult { - if let StoredState::Paused = >::get() { - let scheduled_at = >::block_number(); - >::put(StoredState::PendingResume { - delay: in_blocks, - scheduled_at, - }); - - Ok(()) - } else { - Err(Error::::ResumeFailed)? - } - } - - /// Schedule a change in the authorities. - /// - /// The change will be applied at the end of execution of the block - /// `in_blocks` after the current block. This value may be 0, in which - /// case the change is applied at the end of the current block. - /// - /// If the `forced` parameter is defined, this indicates that the current - /// set has been synchronously determined to be offline and that after - /// `in_blocks` the given change should be applied. The given block number - /// indicates the median last finalized block number and it should be used - /// as the canon block when starting the new grandpa voter. - /// - /// No change should be signaled while any change is pending. Returns - /// an error if a change is already pending. - pub fn schedule_change( - next_authorities: AuthorityList, - in_blocks: T::BlockNumber, - forced: Option, - ) -> DispatchResult { - if !>::exists() { - let scheduled_at = >::block_number(); - - if let Some(_) = forced { - if Self::next_forced().map_or(false, |next| next > scheduled_at) { - Err(Error::::TooSoon)? - } - - // only allow the next forced change when twice the window has passed since - // this one. - >::put(scheduled_at + in_blocks * 2.into()); - } - - >::put(StoredPendingChange { - delay: in_blocks, - scheduled_at, - next_authorities, - forced, - }); - - Ok(()) - } else { - Err(Error::::ChangePending)? - } - } - - /// Deposit one of this module's logs. - fn deposit_log(log: ConsensusLog) { - let log: DigestItem = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); - >::deposit_log(log.into()); - } - - fn initialize_authorities(authorities: &AuthorityList) { - if !authorities.is_empty() { - assert!( - Self::grandpa_authorities().is_empty(), - "Authorities are already initialized!" - ); - Self::set_grandpa_authorities(authorities); - } - } + /// Get the current set of authorities, along with their respective weights. + pub fn grandpa_authorities() -> AuthorityList { + storage::unhashed::get_or_default::(GRANDPA_AUTHORITIES_KEY).into() + } + + /// Set the current set of authorities, along with their respective weights. + fn set_grandpa_authorities(authorities: &AuthorityList) { + storage::unhashed::put( + GRANDPA_AUTHORITIES_KEY, + &VersionedAuthorityList::from(authorities), + ); + } + + /// Schedule GRANDPA to pause starting in the given number of blocks. + /// Cannot be done when already paused. + pub fn schedule_pause(in_blocks: T::BlockNumber) -> DispatchResult { + if let StoredState::Live = >::get() { + let scheduled_at = >::block_number(); + >::put(StoredState::PendingPause { + delay: in_blocks, + scheduled_at, + }); + + Ok(()) + } else { + Err(Error::::PauseFailed)? + } + } + + /// Schedule a resume of GRANDPA after pausing. + pub fn schedule_resume(in_blocks: T::BlockNumber) -> DispatchResult { + if let StoredState::Paused = >::get() { + let scheduled_at = >::block_number(); + >::put(StoredState::PendingResume { + delay: in_blocks, + scheduled_at, + }); + + Ok(()) + } else { + Err(Error::::ResumeFailed)? + } + } + + /// Schedule a change in the authorities. + /// + /// The change will be applied at the end of execution of the block + /// `in_blocks` after the current block. This value may be 0, in which + /// case the change is applied at the end of the current block. + /// + /// If the `forced` parameter is defined, this indicates that the current + /// set has been synchronously determined to be offline and that after + /// `in_blocks` the given change should be applied. The given block number + /// indicates the median last finalized block number and it should be used + /// as the canon block when starting the new grandpa voter. + /// + /// No change should be signaled while any change is pending. Returns + /// an error if a change is already pending. + pub fn schedule_change( + next_authorities: AuthorityList, + in_blocks: T::BlockNumber, + forced: Option, + ) -> DispatchResult { + if !>::exists() { + let scheduled_at = >::block_number(); + + if let Some(_) = forced { + if Self::next_forced().map_or(false, |next| next > scheduled_at) { + Err(Error::::TooSoon)? + } + + // only allow the next forced change when twice the window has passed since + // this one. + >::put(scheduled_at + in_blocks * 2.into()); + } + + >::put(StoredPendingChange { + delay: in_blocks, + scheduled_at, + next_authorities, + forced, + }); + + Ok(()) + } else { + Err(Error::::ChangePending)? + } + } + + /// Deposit one of this module's logs. + fn deposit_log(log: ConsensusLog) { + let log: DigestItem = DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()); + >::deposit_log(log.into()); + } + + fn initialize_authorities(authorities: &AuthorityList) { + if !authorities.is_empty() { + assert!( + Self::grandpa_authorities().is_empty(), + "Authorities are already initialized!" + ); + Self::set_grandpa_authorities(authorities); + } + } } impl Module { - /// Attempt to extract a GRANDPA log from a generic digest. - pub fn grandpa_log(digest: &DigestOf) -> Option> { - let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); - digest.convert_first(|l| l.try_to::>(id)) - } - - /// Attempt to extract a pending set-change signal from a digest. - pub fn pending_change(digest: &DigestOf) - -> Option> - { - Self::grandpa_log(digest).and_then(|signal| signal.try_into_change()) - } - - /// Attempt to extract a forced set-change signal from a digest. - pub fn forced_change(digest: &DigestOf) - -> Option<(T::BlockNumber, ScheduledChange)> - { - Self::grandpa_log(digest).and_then(|signal| signal.try_into_forced_change()) - } - - /// Attempt to extract a pause signal from a digest. - pub fn pending_pause(digest: &DigestOf) - -> Option - { - Self::grandpa_log(digest).and_then(|signal| signal.try_into_pause()) - } - - /// Attempt to extract a resume signal from a digest. - pub fn pending_resume(digest: &DigestOf) - -> Option - { - Self::grandpa_log(digest).and_then(|signal| signal.try_into_resume()) - } + /// Attempt to extract a GRANDPA log from a generic digest. + pub fn grandpa_log(digest: &DigestOf) -> Option> { + let id = OpaqueDigestItemId::Consensus(&GRANDPA_ENGINE_ID); + digest.convert_first(|l| l.try_to::>(id)) + } + + /// Attempt to extract a pending set-change signal from a digest. + pub fn pending_change(digest: &DigestOf) -> Option> { + Self::grandpa_log(digest).and_then(|signal| signal.try_into_change()) + } + + /// Attempt to extract a forced set-change signal from a digest. + pub fn forced_change( + digest: &DigestOf, + ) -> Option<(T::BlockNumber, ScheduledChange)> { + Self::grandpa_log(digest).and_then(|signal| signal.try_into_forced_change()) + } + + /// Attempt to extract a pause signal from a digest. + pub fn pending_pause(digest: &DigestOf) -> Option { + Self::grandpa_log(digest).and_then(|signal| signal.try_into_pause()) + } + + /// Attempt to extract a resume signal from a digest. + pub fn pending_resume(digest: &DigestOf) -> Option { + Self::grandpa_log(digest).and_then(|signal| signal.try_into_resume()) + } } impl sp_runtime::BoundToRuntimeAppPublic for Module { - type Public = AuthorityId; + type Public = AuthorityId; } impl pallet_session::OneSessionHandler for Module - where T: pallet_session::Trait +where + T: pallet_session::Trait, { - type Key = AuthorityId; - - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator - { - let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - Self::initialize_authorities(&authorities); - } - - fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) - where I: Iterator - { - // Always issue a change if `session` says that the validators have changed. - // Even if their session keys are the same as before, the underlying economic - // identities have changed. - let current_set_id = if changed { - let next_authorities = validators.map(|(_, k)| (k, 1)).collect::>(); - if let Some((further_wait, median)) = >::take() { - let _ = Self::schedule_change(next_authorities, further_wait, Some(median)); - } else { - let _ = Self::schedule_change(next_authorities, Zero::zero(), None); - } - CurrentSetId::mutate(|s| { *s += 1; *s }) - } else { - // nothing's changed, neither economic conditions nor session keys. update the pointer - // of the current set. - Self::current_set_id() - }; - - // if we didn't issue a change, we update the mapping to note that the current - // set corresponds to the latest equivalent session (i.e. now). - let session_index = >::current_index(); - SetIdSession::insert(current_set_id, &session_index); - } - - fn on_disabled(i: usize) { - Self::deposit_log(ConsensusLog::OnDisabled(i as u64)) - } + type Key = AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let authorities = validators.map(|(_, k)| (k, 1)).collect::>(); + Self::initialize_authorities(&authorities); + } + + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, _queued_validators: I) + where + I: Iterator, + { + // Always issue a change if `session` says that the validators have changed. + // Even if their session keys are the same as before, the underlying economic + // identities have changed. + let current_set_id = if changed { + let next_authorities = validators.map(|(_, k)| (k, 1)).collect::>(); + if let Some((further_wait, median)) = >::take() { + let _ = Self::schedule_change(next_authorities, further_wait, Some(median)); + } else { + let _ = Self::schedule_change(next_authorities, Zero::zero(), None); + } + CurrentSetId::mutate(|s| { + *s += 1; + *s + }) + } else { + // nothing's changed, neither economic conditions nor session keys. update the pointer + // of the current set. + Self::current_set_id() + }; + + // if we didn't issue a change, we update the mapping to note that the current + // set corresponds to the latest equivalent session (i.e. now). + let session_index = >::current_index(); + SetIdSession::insert(current_set_id, &session_index); + } + + fn on_disabled(i: usize) { + Self::deposit_log(ConsensusLog::OnDisabled(i as u64)) + } } impl pallet_finality_tracker::OnFinalizationStalled for Module { - fn on_stalled(further_wait: T::BlockNumber, median: T::BlockNumber) { - // when we record old authority sets, we can use `pallet_finality_tracker::median` - // to figure out _who_ failed. until then, we can't meaningfully guard - // against `next == last` the way that normal session changes do. - >::put((further_wait, median)); - } + fn on_stalled(further_wait: T::BlockNumber, median: T::BlockNumber) { + // when we record old authority sets, we can use `pallet_finality_tracker::median` + // to figure out _who_ failed. until then, we can't meaningfully guard + // against `next == last` the way that normal session changes do. + >::put((further_wait, median)); + } } /// A round number and set id which point on the time of an offence. #[derive(Copy, Clone, PartialOrd, Ord, Eq, PartialEq, Encode, Decode)] struct GrandpaTimeSlot { - // The order of these matters for `derive(Ord)`. - set_id: SetId, - round: RoundNumber, + // The order of these matters for `derive(Ord)`. + set_id: SetId, + round: RoundNumber, } // TODO [slashing]: Integrate this. /// A grandpa equivocation offence report. struct GrandpaEquivocationOffence { - /// Time slot at which this incident happened. - time_slot: GrandpaTimeSlot, - /// The session index in which the incident happened. - session_index: SessionIndex, - /// The size of the validator set at the time of the offence. - validator_set_count: u32, - /// The authority which produced this equivocation. - offender: FullIdentification, + /// Time slot at which this incident happened. + time_slot: GrandpaTimeSlot, + /// The session index in which the incident happened. + session_index: SessionIndex, + /// The size of the validator set at the time of the offence. + validator_set_count: u32, + /// The authority which produced this equivocation. + offender: FullIdentification, } -impl Offence for GrandpaEquivocationOffence { - const ID: Kind = *b"grandpa:equivoca"; - type TimeSlot = GrandpaTimeSlot; - - fn offenders(&self) -> Vec { - vec![self.offender.clone()] - } - - fn session_index(&self) -> SessionIndex { - self.session_index - } - - fn validator_set_count(&self) -> u32 { - self.validator_set_count - } - - fn time_slot(&self) -> Self::TimeSlot { - self.time_slot - } - - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill { - // the formula is min((3k / n)^2, 1) - let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); - // _ ^ 2 - x.square() - } +impl Offence + for GrandpaEquivocationOffence +{ + const ID: Kind = *b"grandpa:equivoca"; + type TimeSlot = GrandpaTimeSlot; + + fn offenders(&self) -> Vec { + vec![self.offender.clone()] + } + + fn session_index(&self) -> SessionIndex { + self.session_index + } + + fn validator_set_count(&self) -> u32 { + self.validator_set_count + } + + fn time_slot(&self) -> Self::TimeSlot { + self.time_slot + } + + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + // the formula is min((3k / n)^2, 1) + let x = Perbill::from_rational_approximation(3 * offenders_count, validator_set_count); + // _ ^ 2 + x.square() + } } diff --git a/frame/grandpa/src/mock.rs b/frame/grandpa/src/mock.rs index 90b7c97437..3f8637d9e2 100644 --- a/frame/grandpa/src/mock.rs +++ b/frame/grandpa/src/mock.rs @@ -18,21 +18,25 @@ #![cfg(test)] -use sp_runtime::{Perbill, DigestItem, traits::IdentityLookup, testing::{Header, UintAuthorityId}}; -use sp_io; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types, weights::Weight}; +use crate::{AuthorityId, AuthorityList, ConsensusLog, GenesisConfig, Module, Trait}; +use codec::{Decode, Encode}; +use frame_support::{impl_outer_event, impl_outer_origin, parameter_types, weights::Weight}; use sp_core::H256; -use codec::{Encode, Decode}; -use crate::{AuthorityId, AuthorityList, GenesisConfig, Trait, Module, ConsensusLog}; use sp_finality_grandpa::GRANDPA_ENGINE_ID; +use sp_io; +use sp_runtime::{ + testing::{Header, UintAuthorityId}, + traits::IdentityLookup, + DigestItem, Perbill, +}; use frame_system as system; -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} } pub fn grandpa_log(log: ConsensusLog) -> DigestItem { - DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()) + DigestItem::Consensus(GRANDPA_ENGINE_ID, log.encode()) } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. @@ -40,60 +44,64 @@ pub fn grandpa_log(log: ConsensusLog) -> DigestItem { pub struct Test; impl Trait for Test { - type Event = TestEvent; + type Event = TestEvent; } parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = sp_runtime::traits::BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = TestEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = TestEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } mod grandpa { - pub use crate::Event; + pub use crate::Event; } -impl_outer_event!{ - pub enum TestEvent for Test { - system, - grandpa, - } +impl_outer_event! { + pub enum TestEvent for Test { + system, + grandpa, + } } pub fn to_authorities(vec: Vec<(u64, u64)>) -> AuthorityList { - vec.into_iter() - .map(|(id, weight)| (UintAuthorityId(id).to_public_key::(), weight)) - .collect() + vec.into_iter() + .map(|(id, weight)| (UintAuthorityId(id).to_public_key::(), weight)) + .collect() } pub fn new_test_ext(authorities: Vec<(u64, u64)>) -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig { - authorities: to_authorities(authorities), - }.assimilate_storage::(&mut t).unwrap(); - t.into() + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + GenesisConfig { + authorities: to_authorities(authorities), + } + .assimilate_storage::(&mut t) + .unwrap(); + t.into() } pub type System = frame_system::Module; diff --git a/frame/grandpa/src/tests.rs b/frame/grandpa/src/tests.rs index b583c31968..2ddc6746dd 100644 --- a/frame/grandpa/src/tests.rs +++ b/frame/grandpa/src/tests.rs @@ -18,304 +18,314 @@ #![cfg(test)] -use sp_runtime::{testing::{H256, Digest}, traits::Header}; -use frame_support::traits::OnFinalize; +use super::*; use crate::mock::*; -use frame_system::{EventRecord, Phase}; use codec::{Decode, Encode}; use fg_primitives::ScheduledChange; -use super::*; +use frame_support::traits::OnFinalize; +use frame_system::{EventRecord, Phase}; +use sp_runtime::{ + testing::{Digest, H256}, + traits::Header, +}; fn initialize_block(number: u64, parent_hash: H256) { - System::initialize( - &number, - &parent_hash, - &Default::default(), - &Default::default(), - Default::default(), - ); + System::initialize( + &number, + &parent_hash, + &Default::default(), + &Default::default(), + Default::default(), + ); } #[test] fn authorities_change_logged() { - new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - initialize_block(1, Default::default()); - Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 0, None).unwrap(); - - System::note_finished_extrinsics(); - Grandpa::on_finalize(1); - - let header = System::finalize(); - assert_eq!(header.digest, Digest { - logs: vec![ - grandpa_log(ConsensusLog::ScheduledChange( - ScheduledChange { delay: 0, next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) } - )), - ], - }); - - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Finalization, - event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), - topics: vec![], - }, - ]); - }); + new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { + initialize_block(1, Default::default()); + Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 0, None).unwrap(); + + System::note_finished_extrinsics(); + Grandpa::on_finalize(1); + + let header = System::finalize(); + assert_eq!( + header.digest, + Digest { + logs: vec![grandpa_log(ConsensusLog::ScheduledChange( + ScheduledChange { + delay: 0, + next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + } + )),], + } + ); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Finalization, + event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), + topics: vec![], + },] + ); + }); } #[test] fn authorities_change_logged_after_delay() { - new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - initialize_block(1, Default::default()); - Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); - Grandpa::on_finalize(1); - let header = System::finalize(); - assert_eq!(header.digest, Digest { - logs: vec![ - grandpa_log(ConsensusLog::ScheduledChange( - ScheduledChange { delay: 1, next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) } - )), - ], - }); - - // no change at this height. - assert_eq!(System::events(), vec![]); - - initialize_block(2, header.hash()); - System::note_finished_extrinsics(); - Grandpa::on_finalize(2); - - let _header = System::finalize(); - assert_eq!(System::events(), vec![ - EventRecord { - phase: Phase::Finalization, - event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), - topics: vec![], - }, - ]); - }); + new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { + initialize_block(1, Default::default()); + Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); + Grandpa::on_finalize(1); + let header = System::finalize(); + assert_eq!( + header.digest, + Digest { + logs: vec![grandpa_log(ConsensusLog::ScheduledChange( + ScheduledChange { + delay: 1, + next_authorities: to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + } + )),], + } + ); + + // no change at this height. + assert_eq!(System::events(), vec![]); + + initialize_block(2, header.hash()); + System::note_finished_extrinsics(); + Grandpa::on_finalize(2); + + let _header = System::finalize(); + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Finalization, + event: Event::NewAuthorities(to_authorities(vec![(4, 1), (5, 1), (6, 1)])).into(), + topics: vec![], + },] + ); + }); } #[test] fn cannot_schedule_change_when_one_pending() { - new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - initialize_block(1, Default::default()); - Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); - assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { + initialize_block(1, Default::default()); + Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 1, None).unwrap(); + assert!(>::exists()); + assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); - Grandpa::on_finalize(1); - let header = System::finalize(); + Grandpa::on_finalize(1); + let header = System::finalize(); - initialize_block(2, header.hash()); - assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + initialize_block(2, header.hash()); + assert!(>::exists()); + assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); - Grandpa::on_finalize(2); - let header = System::finalize(); + Grandpa::on_finalize(2); + let header = System::finalize(); - initialize_block(3, header.hash()); - assert!(!>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_ok()); + initialize_block(3, header.hash()); + assert!(!>::exists()); + assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_ok()); - Grandpa::on_finalize(3); - let _header = System::finalize(); - }); + Grandpa::on_finalize(3); + let _header = System::finalize(); + }); } #[test] fn new_decodes_from_old() { - let old = OldStoredPendingChange { - scheduled_at: 5u32, - delay: 100u32, - next_authorities: to_authorities(vec![(1, 5), (2, 10), (3, 2)]), - }; - - let encoded = old.encode(); - let new = StoredPendingChange::::decode(&mut &encoded[..]).unwrap(); - assert!(new.forced.is_none()); - assert_eq!(new.scheduled_at, old.scheduled_at); - assert_eq!(new.delay, old.delay); - assert_eq!(new.next_authorities, old.next_authorities); + let old = OldStoredPendingChange { + scheduled_at: 5u32, + delay: 100u32, + next_authorities: to_authorities(vec![(1, 5), (2, 10), (3, 2)]), + }; + + let encoded = old.encode(); + let new = StoredPendingChange::::decode(&mut &encoded[..]).unwrap(); + assert!(new.forced.is_none()); + assert_eq!(new.scheduled_at, old.scheduled_at); + assert_eq!(new.delay, old.delay); + assert_eq!(new.next_authorities, old.next_authorities); } #[test] fn dispatch_forced_change() { - new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - initialize_block(1, Default::default()); - Grandpa::schedule_change( - to_authorities(vec![(4, 1), (5, 1), (6, 1)]), - 5, - Some(0), - ).unwrap(); - - assert!(>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)).is_err()); - - Grandpa::on_finalize(1); - let mut header = System::finalize(); - - for i in 2..7 { - initialize_block(i, header.hash()); - assert!(>::get().unwrap().forced.is_some()); - assert_eq!(Grandpa::next_forced(), Some(11)); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)).is_err()); - - Grandpa::on_finalize(i); - header = System::finalize(); - } - - // change has been applied at the end of block 6. - // add a normal change. - { - initialize_block(7, header.hash()); - assert!(!>::exists()); - assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)])); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_ok()); - Grandpa::on_finalize(7); - header = System::finalize(); - } - - // run the normal change. - { - initialize_block(8, header.hash()); - assert!(>::exists()); - assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(4, 1), (5, 1), (6, 1)])); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); - Grandpa::on_finalize(8); - header = System::finalize(); - } - - // normal change applied. but we can't apply a new forced change for some - // time. - for i in 9..11 { - initialize_block(i, header.hash()); - assert!(!>::exists()); - assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)])); - assert_eq!(Grandpa::next_forced(), Some(11)); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)).is_err()); - Grandpa::on_finalize(i); - header = System::finalize(); - } - - { - initialize_block(11, header.hash()); - assert!(!>::exists()); - assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1), (7, 1)]), 5, Some(0)).is_ok()); - assert_eq!(Grandpa::next_forced(), Some(21)); - Grandpa::on_finalize(11); - header = System::finalize(); - } - let _ = header; - }); + new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { + initialize_block(1, Default::default()); + Grandpa::schedule_change(to_authorities(vec![(4, 1), (5, 1), (6, 1)]), 5, Some(0)).unwrap(); + + assert!(>::exists()); + assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)).is_err()); + + Grandpa::on_finalize(1); + let mut header = System::finalize(); + + for i in 2..7 { + initialize_block(i, header.hash()); + assert!(>::get().unwrap().forced.is_some()); + assert_eq!(Grandpa::next_forced(), Some(11)); + assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, Some(0)).is_err()); + + Grandpa::on_finalize(i); + header = System::finalize(); + } + + // change has been applied at the end of block 6. + // add a normal change. + { + initialize_block(7, header.hash()); + assert!(!>::exists()); + assert_eq!( + Grandpa::grandpa_authorities(), + to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + ); + assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_ok()); + Grandpa::on_finalize(7); + header = System::finalize(); + } + + // run the normal change. + { + initialize_block(8, header.hash()); + assert!(>::exists()); + assert_eq!( + Grandpa::grandpa_authorities(), + to_authorities(vec![(4, 1), (5, 1), (6, 1)]) + ); + assert!(Grandpa::schedule_change(to_authorities(vec![(5, 1)]), 1, None).is_err()); + Grandpa::on_finalize(8); + header = System::finalize(); + } + + // normal change applied. but we can't apply a new forced change for some + // time. + for i in 9..11 { + initialize_block(i, header.hash()); + assert!(!>::exists()); + assert_eq!(Grandpa::grandpa_authorities(), to_authorities(vec![(5, 1)])); + assert_eq!(Grandpa::next_forced(), Some(11)); + assert!( + Grandpa::schedule_change(to_authorities(vec![(5, 1), (6, 1)]), 5, Some(0)).is_err() + ); + Grandpa::on_finalize(i); + header = System::finalize(); + } + + { + initialize_block(11, header.hash()); + assert!(!>::exists()); + assert!(Grandpa::schedule_change( + to_authorities(vec![(5, 1), (6, 1), (7, 1)]), + 5, + Some(0) + ) + .is_ok()); + assert_eq!(Grandpa::next_forced(), Some(21)); + Grandpa::on_finalize(11); + header = System::finalize(); + } + let _ = header; + }); } #[test] fn schedule_pause_only_when_live() { - new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - // we schedule a pause at block 1 with delay of 1 - initialize_block(1, Default::default()); - Grandpa::schedule_pause(1).unwrap(); - - // we've switched to the pending pause state - assert_eq!( - Grandpa::state(), - StoredState::PendingPause { - scheduled_at: 1u64, - delay: 1, - }, - ); - - Grandpa::on_finalize(1); - let _ = System::finalize(); - - initialize_block(2, Default::default()); - - // signaling a pause now should fail - assert!(Grandpa::schedule_pause(1).is_err()); - - Grandpa::on_finalize(2); - let _ = System::finalize(); - - // after finalizing block 2 the set should have switched to paused state - assert_eq!( - Grandpa::state(), - StoredState::Paused, - ); - }); + new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { + // we schedule a pause at block 1 with delay of 1 + initialize_block(1, Default::default()); + Grandpa::schedule_pause(1).unwrap(); + + // we've switched to the pending pause state + assert_eq!( + Grandpa::state(), + StoredState::PendingPause { + scheduled_at: 1u64, + delay: 1, + }, + ); + + Grandpa::on_finalize(1); + let _ = System::finalize(); + + initialize_block(2, Default::default()); + + // signaling a pause now should fail + assert!(Grandpa::schedule_pause(1).is_err()); + + Grandpa::on_finalize(2); + let _ = System::finalize(); + + // after finalizing block 2 the set should have switched to paused state + assert_eq!(Grandpa::state(), StoredState::Paused,); + }); } #[test] fn schedule_resume_only_when_paused() { - new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { - initialize_block(1, Default::default()); - - // the set is currently live, resuming it is an error - assert!(Grandpa::schedule_resume(1).is_err()); - - assert_eq!( - Grandpa::state(), - StoredState::Live, - ); - - // we schedule a pause to be applied instantly - Grandpa::schedule_pause(0).unwrap(); - Grandpa::on_finalize(1); - let _ = System::finalize(); - - assert_eq!( - Grandpa::state(), - StoredState::Paused, - ); - - // we schedule the set to go back live in 2 blocks - initialize_block(2, Default::default()); - Grandpa::schedule_resume(2).unwrap(); - Grandpa::on_finalize(2); - let _ = System::finalize(); - - initialize_block(3, Default::default()); - Grandpa::on_finalize(3); - let _ = System::finalize(); - - initialize_block(4, Default::default()); - Grandpa::on_finalize(4); - let _ = System::finalize(); - - // it should be live at block 4 - assert_eq!( - Grandpa::state(), - StoredState::Live, - ); - }); + new_test_ext(vec![(1, 1), (2, 1), (3, 1)]).execute_with(|| { + initialize_block(1, Default::default()); + + // the set is currently live, resuming it is an error + assert!(Grandpa::schedule_resume(1).is_err()); + + assert_eq!(Grandpa::state(), StoredState::Live,); + + // we schedule a pause to be applied instantly + Grandpa::schedule_pause(0).unwrap(); + Grandpa::on_finalize(1); + let _ = System::finalize(); + + assert_eq!(Grandpa::state(), StoredState::Paused,); + + // we schedule the set to go back live in 2 blocks + initialize_block(2, Default::default()); + Grandpa::schedule_resume(2).unwrap(); + Grandpa::on_finalize(2); + let _ = System::finalize(); + + initialize_block(3, Default::default()); + Grandpa::on_finalize(3); + let _ = System::finalize(); + + initialize_block(4, Default::default()); + Grandpa::on_finalize(4); + let _ = System::finalize(); + + // it should be live at block 4 + assert_eq!(Grandpa::state(), StoredState::Live,); + }); } #[test] fn time_slot_have_sane_ord() { - // Ensure that `Ord` implementation is sane. - const FIXTURE: &[GrandpaTimeSlot] = &[ - GrandpaTimeSlot { - set_id: 0, - round: 0, - }, - GrandpaTimeSlot { - set_id: 0, - round: 1, - }, - GrandpaTimeSlot { - set_id: 1, - round: 0, - }, - GrandpaTimeSlot { - set_id: 1, - round: 1, - }, - GrandpaTimeSlot { - set_id: 1, - round: 2, - } - ]; - assert!(FIXTURE.windows(2).all(|f| f[0] < f[1])); + // Ensure that `Ord` implementation is sane. + const FIXTURE: &[GrandpaTimeSlot] = &[ + GrandpaTimeSlot { + set_id: 0, + round: 0, + }, + GrandpaTimeSlot { + set_id: 0, + round: 1, + }, + GrandpaTimeSlot { + set_id: 1, + round: 0, + }, + GrandpaTimeSlot { + set_id: 1, + round: 1, + }, + GrandpaTimeSlot { + set_id: 1, + round: 2, + }, + ]; + assert!(FIXTURE.windows(2).all(|f| f[0] < f[1])); } diff --git a/frame/identity/src/benchmarking.rs b/frame/identity/src/benchmarking.rs index fe99cd9907..4dc3645d91 100644 --- a/frame/identity/src/benchmarking.rs +++ b/frame/identity/src/benchmarking.rs @@ -20,9 +20,9 @@ use super::*; +use frame_benchmarking::benchmarks; use frame_system::RawOrigin; use sp_io::hashing::blake2_256; -use frame_benchmarking::benchmarks; use sp_runtime::traits::Bounded; use crate::Module as Identity; @@ -32,275 +32,295 @@ const MAX_REGISTRARS: u32 = 50; // Support Functions fn account(name: &'static str, index: u32) -> T::AccountId { - let entropy = (name, index).using_encoded(blake2_256); - T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() + let entropy = (name, index).using_encoded(blake2_256); + T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() } // Adds `r` registrars to the Identity Pallet. These registrars will have set fees and fields. fn add_registrars(r: u32) -> Result<(), &'static str> { - for i in 0..r { - let _ = T::Currency::make_free_balance_be(&account::("registrar", i), BalanceOf::::max_value()); - Identity::::add_registrar(RawOrigin::Root.into(), account::("registrar", i))?; - Identity::::set_fee(RawOrigin::Signed(account::("registrar", i)).into(), i.into(), 10.into())?; - let fields = IdentityFields( - IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot - | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter - ); - Identity::::set_fields(RawOrigin::Signed(account::("registrar", i)).into(), i.into(), fields)?; - } - - assert_eq!(Registrars::::get().len(), r as usize); - Ok(()) + for i in 0..r { + let _ = T::Currency::make_free_balance_be( + &account::("registrar", i), + BalanceOf::::max_value(), + ); + Identity::::add_registrar(RawOrigin::Root.into(), account::("registrar", i))?; + Identity::::set_fee( + RawOrigin::Signed(account::("registrar", i)).into(), + i.into(), + 10.into(), + )?; + let fields = IdentityFields( + IdentityField::Display + | IdentityField::Legal + | IdentityField::Web + | IdentityField::Riot + | IdentityField::Email + | IdentityField::PgpFingerprint + | IdentityField::Image + | IdentityField::Twitter, + ); + Identity::::set_fields( + RawOrigin::Signed(account::("registrar", i)).into(), + i.into(), + fields, + )?; + } + + assert_eq!(Registrars::::get().len(), r as usize); + Ok(()) } // Adds `s` sub-accounts to the identity of `who`. Each wil have 32 bytes of raw data added to it. // This additionally returns the vector of sub-accounts to it can be modified if needed. -fn add_sub_accounts(who: &T::AccountId, s: u32) -> Result, &'static str> { - let mut subs = Vec::new(); - let who_origin = RawOrigin::Signed(who.clone()); - let data = Data::Raw(vec![0; 32]); - - for i in 0..s { - let sub_account = account::("sub", i); - subs.push((sub_account, data.clone())); - } - - // Set identity so `set_subs` does not fail. - let _ = T::Currency::make_free_balance_be(&who, BalanceOf::::max_value()); - let info = create_identity_info::(1); - Identity::::set_identity(who_origin.clone().into(), info)?; - - Identity::::set_subs(who_origin.into(), subs.clone())?; - - return Ok(subs) +fn add_sub_accounts( + who: &T::AccountId, + s: u32, +) -> Result, &'static str> { + let mut subs = Vec::new(); + let who_origin = RawOrigin::Signed(who.clone()); + let data = Data::Raw(vec![0; 32]); + + for i in 0..s { + let sub_account = account::("sub", i); + subs.push((sub_account, data.clone())); + } + + // Set identity so `set_subs` does not fail. + let _ = T::Currency::make_free_balance_be(&who, BalanceOf::::max_value()); + let info = create_identity_info::(1); + Identity::::set_identity(who_origin.clone().into(), info)?; + + Identity::::set_subs(who_origin.into(), subs.clone())?; + + return Ok(subs); } // This creates an `IdentityInfo` object with `num_fields` extra fields. // All data is pre-populated with some arbitrary bytes. fn create_identity_info(num_fields: u32) -> IdentityInfo { - let data = Data::Raw(vec![0; 32]); - - let info = IdentityInfo { - additional: vec![(data.clone(), data.clone()); num_fields as usize], - display: data.clone(), - legal: data.clone(), - web: data.clone(), - riot: data.clone(), - email: data.clone(), - pgp_fingerprint: Some([0; 20]), - image: data.clone(), - twitter: data.clone(), - }; - - return info + let data = Data::Raw(vec![0; 32]); + + let info = IdentityInfo { + additional: vec![(data.clone(), data.clone()); num_fields as usize], + display: data.clone(), + legal: data.clone(), + web: data.clone(), + riot: data.clone(), + email: data.clone(), + pgp_fingerprint: Some([0; 20]), + image: data.clone(), + twitter: data.clone(), + }; + + return info; } benchmarks! { - // These are the common parameters along with their instancing. - _ { - let r in 1 .. MAX_REGISTRARS => add_registrars::(r)?; - let s in 1 .. T::MaxSubAccounts::get() => { - // Give them s many sub accounts - let caller = account::("caller", 0); - let _ = add_sub_accounts::(&caller, s)?; - }; - let x in 1 .. T::MaxAdditionalFields::get() => { - // Create their main identity with x additional fields - let info = create_identity_info::(x); - let caller = account::("caller", 0); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); - Identity::::set_identity(caller_origin, info)?; - }; - } - - add_registrar { - let r in ...; - }: _(RawOrigin::Root, account::("registrar", r + 1)) - - set_identity { - let r in ...; - // This X doesn't affect the caller ID up front like with the others, so we don't use the - // standard preparation. - let x in _ .. _ => (); - let caller = { - // The target user - let caller = account::("caller", 0); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - // Add an initial identity - let initial_info = create_identity_info::(1); - Identity::::set_identity(caller_origin.clone(), initial_info)?; - - // User requests judgement from all the registrars, and they approve - for i in 0..r { - Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; - Identity::::provide_judgement( - RawOrigin::Signed(account::("registrar", i)).into(), - i, - caller_lookup.clone(), - Judgement::Reasonable - )?; - } - caller - }; - }: _( - RawOrigin::Signed(caller), - create_identity_info::(x) - ) - - set_subs { - let caller = account::("caller", 0); - - // Give them s many sub accounts. - let s in 1 .. T::MaxSubAccounts::get() - 1 => { - let _ = add_sub_accounts::(&caller, s)?; - }; - - let mut subs = Module::::subs(&caller); - - // Create an s + 1 sub account. - let data = Data::Raw(vec![0; 32]); - subs.push((account::("sub", s + 1), data)); - - }: _(RawOrigin::Signed(caller), subs) - - clear_identity { - let caller = account::("caller", 0); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); - let caller_lookup = ::unlookup(caller.clone()); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - let r in ...; - let s in ...; - let x in ...; - - // User requests judgement from all the registrars, and they approve - for i in 0..r { - Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; - Identity::::provide_judgement( - RawOrigin::Signed(account::("registrar", i)).into(), - i, - caller_lookup.clone(), - Judgement::Reasonable - )?; - } - }: _(RawOrigin::Signed(caller)) - - request_judgement { - let caller = account::("caller", 0); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - let r in ...; - let x in ...; - }: _(RawOrigin::Signed(caller), r - 1, 10.into()) - - cancel_request { - let caller = account::("caller", 0); - let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - let r in ...; - let x in ...; - - Identity::::request_judgement(caller_origin, r - 1, 10.into())?; - }: _(RawOrigin::Signed(caller), r - 1) - - set_fee { - let caller = account::("caller", 0); - - let r in ...; - - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; - }: _(RawOrigin::Signed(caller), r, 10.into()) - - set_account_id { - let caller = account::("caller", 0); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - let r in ...; - - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; - }: _(RawOrigin::Signed(caller), r, account::("new", 0)) - - set_fields { - let caller = account::("caller", 0); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - let r in ...; - - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; - let fields = IdentityFields( - IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot - | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter - ); - }: _(RawOrigin::Signed(caller), r, fields) - - provide_judgement { - // The user - let user = account::("user", r); - let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); - let user_lookup = ::unlookup(user.clone()); - let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); - - let caller = account::("caller", 0); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - let r in ...; - // For this x, it's the user identity that gts the fields, not the caller. - let x in _ .. _ => { - let info = create_identity_info::(x); - Identity::::set_identity(user_origin.clone(), info)?; - }; - - Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; - Identity::::request_judgement(user_origin.clone(), r, 10.into())?; - }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) - - kill_identity { - let caller = account::("caller", 0); - let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); - let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); - let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); - - let r in ...; - let s in ...; - let x in ...; - - // User requests judgement from all the registrars, and they approve - for i in 0..r { - Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; - Identity::::provide_judgement( - RawOrigin::Signed(account::("registrar", i)).into(), - i, - caller_lookup.clone(), - Judgement::Reasonable - )?; - } - }: _(RawOrigin::Root, caller_lookup) + // These are the common parameters along with their instancing. + _ { + let r in 1 .. MAX_REGISTRARS => add_registrars::(r)?; + let s in 1 .. T::MaxSubAccounts::get() => { + // Give them s many sub accounts + let caller = account::("caller", 0); + let _ = add_sub_accounts::(&caller, s)?; + }; + let x in 1 .. T::MaxAdditionalFields::get() => { + // Create their main identity with x additional fields + let info = create_identity_info::(x); + let caller = account::("caller", 0); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller)); + Identity::::set_identity(caller_origin, info)?; + }; + } + + add_registrar { + let r in ...; + }: _(RawOrigin::Root, account::("registrar", r + 1)) + + set_identity { + let r in ...; + // This X doesn't affect the caller ID up front like with the others, so we don't use the + // standard preparation. + let x in _ .. _ => (); + let caller = { + // The target user + let caller = account::("caller", 0); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + // Add an initial identity + let initial_info = create_identity_info::(1); + Identity::::set_identity(caller_origin.clone(), initial_info)?; + + // User requests judgement from all the registrars, and they approve + for i in 0..r { + Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(account::("registrar", i)).into(), + i, + caller_lookup.clone(), + Judgement::Reasonable + )?; + } + caller + }; + }: _( + RawOrigin::Signed(caller), + create_identity_info::(x) + ) + + set_subs { + let caller = account::("caller", 0); + + // Give them s many sub accounts. + let s in 1 .. T::MaxSubAccounts::get() - 1 => { + let _ = add_sub_accounts::(&caller, s)?; + }; + + let mut subs = Module::::subs(&caller); + + // Create an s + 1 sub account. + let data = Data::Raw(vec![0; 32]); + subs.push((account::("sub", s + 1), data)); + + }: _(RawOrigin::Signed(caller), subs) + + clear_identity { + let caller = account::("caller", 0); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let caller_lookup = ::unlookup(caller.clone()); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + let r in ...; + let s in ...; + let x in ...; + + // User requests judgement from all the registrars, and they approve + for i in 0..r { + Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(account::("registrar", i)).into(), + i, + caller_lookup.clone(), + Judgement::Reasonable + )?; + } + }: _(RawOrigin::Signed(caller)) + + request_judgement { + let caller = account::("caller", 0); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + let r in ...; + let x in ...; + }: _(RawOrigin::Signed(caller), r - 1, 10.into()) + + cancel_request { + let caller = account::("caller", 0); + let caller_origin = ::Origin::from(RawOrigin::Signed(caller.clone())); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + let r in ...; + let x in ...; + + Identity::::request_judgement(caller_origin, r - 1, 10.into())?; + }: _(RawOrigin::Signed(caller), r - 1) + + set_fee { + let caller = account::("caller", 0); + + let r in ...; + + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + }: _(RawOrigin::Signed(caller), r, 10.into()) + + set_account_id { + let caller = account::("caller", 0); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + let r in ...; + + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + }: _(RawOrigin::Signed(caller), r, account::("new", 0)) + + set_fields { + let caller = account::("caller", 0); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + let r in ...; + + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + let fields = IdentityFields( + IdentityField::Display | IdentityField::Legal | IdentityField::Web | IdentityField::Riot + | IdentityField::Email | IdentityField::PgpFingerprint | IdentityField::Image | IdentityField::Twitter + ); + }: _(RawOrigin::Signed(caller), r, fields) + + provide_judgement { + // The user + let user = account::("user", r); + let user_origin = ::Origin::from(RawOrigin::Signed(user.clone())); + let user_lookup = ::unlookup(user.clone()); + let _ = T::Currency::make_free_balance_be(&user, BalanceOf::::max_value()); + + let caller = account::("caller", 0); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + let r in ...; + // For this x, it's the user identity that gts the fields, not the caller. + let x in _ .. _ => { + let info = create_identity_info::(x); + Identity::::set_identity(user_origin.clone(), info)?; + }; + + Identity::::add_registrar(RawOrigin::Root.into(), caller.clone())?; + Identity::::request_judgement(user_origin.clone(), r, 10.into())?; + }: _(RawOrigin::Signed(caller), r, user_lookup, Judgement::Reasonable) + + kill_identity { + let caller = account::("caller", 0); + let caller_origin: ::Origin = RawOrigin::Signed(caller.clone()).into(); + let caller_lookup: ::Source = T::Lookup::unlookup(caller.clone()); + let _ = T::Currency::make_free_balance_be(&caller, BalanceOf::::max_value()); + + let r in ...; + let s in ...; + let x in ...; + + // User requests judgement from all the registrars, and they approve + for i in 0..r { + Identity::::request_judgement(caller_origin.clone(), i, 10.into())?; + Identity::::provide_judgement( + RawOrigin::Signed(account::("registrar", i)).into(), + i, + caller_lookup.clone(), + Judgement::Reasonable + )?; + } + }: _(RawOrigin::Root, caller_lookup) } #[cfg(test)] mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_add_registrar::()); - assert_ok!(test_benchmark_set_identity::()); - assert_ok!(test_benchmark_set_subs::()); - assert_ok!(test_benchmark_clear_identity::()); - assert_ok!(test_benchmark_request_judgement::()); - assert_ok!(test_benchmark_cancel_request::()); - assert_ok!(test_benchmark_set_fee::()); - assert_ok!(test_benchmark_set_account_id::()); - assert_ok!(test_benchmark_set_fields::()); - assert_ok!(test_benchmark_provide_judgement::()); - assert_ok!(test_benchmark_kill_identity::()); - }); - } + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_add_registrar::()); + assert_ok!(test_benchmark_set_identity::()); + assert_ok!(test_benchmark_set_subs::()); + assert_ok!(test_benchmark_clear_identity::()); + assert_ok!(test_benchmark_request_judgement::()); + assert_ok!(test_benchmark_cancel_request::()); + assert_ok!(test_benchmark_set_fee::()); + assert_ok!(test_benchmark_set_account_id::()); + assert_ok!(test_benchmark_set_fields::()); + assert_ok!(test_benchmark_provide_judgement::()); + assert_ok!(test_benchmark_kill_identity::()); + }); + } } diff --git a/frame/identity/src/lib.rs b/frame/identity/src/lib.rs index ddb9bdcce2..7c005d65e4 100644 --- a/frame/identity/src/lib.rs +++ b/frame/identity/src/lib.rs @@ -65,57 +65,59 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_std::{fmt::Debug, ops::Add, iter::once}; +use codec::{Decode, Encode}; use enumflags2::BitFlags; -use codec::{Encode, Decode}; -use sp_runtime::{DispatchResult, RuntimeDebug}; -use sp_runtime::traits::{StaticLookup, Zero, AppendZerosInput}; use frame_support::{ - decl_module, decl_event, decl_storage, ensure, decl_error, - traits::{Currency, ReservableCurrency, OnUnbalanced, Get, BalanceStatus, EnsureOrigin}, - weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::{BalanceStatus, Currency, EnsureOrigin, Get, OnUnbalanced, ReservableCurrency}, + weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed}; +use sp_runtime::traits::{AppendZerosInput, StaticLookup, Zero}; +use sp_runtime::{DispatchResult, RuntimeDebug}; +use sp_std::prelude::*; +use sp_std::{fmt::Debug, iter::once, ops::Add}; mod benchmarking; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// The currency trait. - type Currency: ReservableCurrency; + /// The currency trait. + type Currency: ReservableCurrency; - /// The amount held on deposit for a registered identity. - type BasicDeposit: Get>; + /// The amount held on deposit for a registered identity. + type BasicDeposit: Get>; - /// The amount held on deposit per additional field for a registered identity. - type FieldDeposit: Get>; + /// The amount held on deposit per additional field for a registered identity. + type FieldDeposit: Get>; - /// The amount held on deposit for a registered subaccount. This should account for the fact - /// that one storage item's value will increase by the size of an account ID, and there will be - /// another trie item whose value is the size of an account ID plus 32 bytes. - type SubAccountDeposit: Get>; + /// The amount held on deposit for a registered subaccount. This should account for the fact + /// that one storage item's value will increase by the size of an account ID, and there will be + /// another trie item whose value is the size of an account ID plus 32 bytes. + type SubAccountDeposit: Get>; - /// The maximum number of sub-accounts allowed per identified account. - type MaxSubAccounts: Get; + /// The maximum number of sub-accounts allowed per identified account. + type MaxSubAccounts: Get; - /// Maximum number of additional fields that may be stored in an ID. Needed to bound the I/O - /// required to access an identity, but can be pretty high. - type MaxAdditionalFields: Get; + /// Maximum number of additional fields that may be stored in an ID. Needed to bound the I/O + /// required to access an identity, but can be pretty high. + type MaxAdditionalFields: Get; - /// What to do with slashed funds. - type Slashed: OnUnbalanced>; + /// What to do with slashed funds. + type Slashed: OnUnbalanced>; - /// The origin which may forcibly set or remove a name. Root can always do this. - type ForceOrigin: EnsureOrigin; + /// The origin which may forcibly set or remove a name. Root can always do this. + type ForceOrigin: EnsureOrigin; - /// The origin which may add or remove registrars. Root can always do this. - type RegistrarOrigin: EnsureOrigin; + /// The origin which may add or remove registrars. Root can always do this. + type RegistrarOrigin: EnsureOrigin; } /// Either underlying data blob if it is at most 32 bytes, or a hash of it. If the data is greater @@ -124,66 +126,66 @@ pub trait Trait: frame_system::Trait { /// Can also be `None`. #[derive(Clone, Eq, PartialEq, RuntimeDebug)] pub enum Data { - /// No data here. - None, - /// The data is stored directly. - Raw(Vec), - /// Only the Blake2 hash of the data is stored. The preimage of the hash may be retrieved - /// through some hash-lookup service. - BlakeTwo256([u8; 32]), - /// Only the SHA2-256 hash of the data is stored. The preimage of the hash may be retrieved - /// through some hash-lookup service. - Sha256([u8; 32]), - /// Only the Keccak-256 hash of the data is stored. The preimage of the hash may be retrieved - /// through some hash-lookup service. - Keccak256([u8; 32]), - /// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved - /// through some hash-lookup service. - ShaThree256([u8; 32]), + /// No data here. + None, + /// The data is stored directly. + Raw(Vec), + /// Only the Blake2 hash of the data is stored. The preimage of the hash may be retrieved + /// through some hash-lookup service. + BlakeTwo256([u8; 32]), + /// Only the SHA2-256 hash of the data is stored. The preimage of the hash may be retrieved + /// through some hash-lookup service. + Sha256([u8; 32]), + /// Only the Keccak-256 hash of the data is stored. The preimage of the hash may be retrieved + /// through some hash-lookup service. + Keccak256([u8; 32]), + /// Only the SHA3-256 hash of the data is stored. The preimage of the hash may be retrieved + /// through some hash-lookup service. + ShaThree256([u8; 32]), } impl Decode for Data { - fn decode(input: &mut I) -> sp_std::result::Result { - let b = input.read_byte()?; - Ok(match b { - 0 => Data::None, - n @ 1 ..= 33 => { - let mut r = vec![0u8; n as usize - 1]; - input.read(&mut r[..])?; - Data::Raw(r) - } - 34 => Data::BlakeTwo256(<[u8; 32]>::decode(input)?), - 35 => Data::Sha256(<[u8; 32]>::decode(input)?), - 36 => Data::Keccak256(<[u8; 32]>::decode(input)?), - 37 => Data::ShaThree256(<[u8; 32]>::decode(input)?), - _ => return Err(codec::Error::from("invalid leading byte")), - }) - } + fn decode(input: &mut I) -> sp_std::result::Result { + let b = input.read_byte()?; + Ok(match b { + 0 => Data::None, + n @ 1..=33 => { + let mut r = vec![0u8; n as usize - 1]; + input.read(&mut r[..])?; + Data::Raw(r) + } + 34 => Data::BlakeTwo256(<[u8; 32]>::decode(input)?), + 35 => Data::Sha256(<[u8; 32]>::decode(input)?), + 36 => Data::Keccak256(<[u8; 32]>::decode(input)?), + 37 => Data::ShaThree256(<[u8; 32]>::decode(input)?), + _ => return Err(codec::Error::from("invalid leading byte")), + }) + } } impl Encode for Data { - fn encode(&self) -> Vec { - match self { - Data::None => vec![0u8; 1], - Data::Raw(ref x) => { - let l = x.len().min(32); - let mut r = vec![l as u8 + 1; l + 1]; - &mut r[1..].copy_from_slice(&x[..l as usize]); - r - } - Data::BlakeTwo256(ref h) => once(34u8).chain(h.iter().cloned()).collect(), - Data::Sha256(ref h) => once(35u8).chain(h.iter().cloned()).collect(), - Data::Keccak256(ref h) => once(36u8).chain(h.iter().cloned()).collect(), - Data::ShaThree256(ref h) => once(37u8).chain(h.iter().cloned()).collect(), - } - } + fn encode(&self) -> Vec { + match self { + Data::None => vec![0u8; 1], + Data::Raw(ref x) => { + let l = x.len().min(32); + let mut r = vec![l as u8 + 1; l + 1]; + &mut r[1..].copy_from_slice(&x[..l as usize]); + r + } + Data::BlakeTwo256(ref h) => once(34u8).chain(h.iter().cloned()).collect(), + Data::Sha256(ref h) => once(35u8).chain(h.iter().cloned()).collect(), + Data::Keccak256(ref h) => once(36u8).chain(h.iter().cloned()).collect(), + Data::ShaThree256(ref h) => once(37u8).chain(h.iter().cloned()).collect(), + } + } } impl codec::EncodeLike for Data {} impl Default for Data { - fn default() -> Self { - Self::None - } + fn default() -> Self { + Self::None + } } /// An identifier for a single name registrar/identity verification service. @@ -194,51 +196,47 @@ pub type RegistrarIndex = u32; /// NOTE: Registrars may pay little attention to some fields. Registrars may want to make clear /// which fields their attestation is relevant for by off-chain means. #[derive(Copy, Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] -pub enum Judgement< - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq -> { - /// The default value; no opinion is held. - Unknown, - /// No judgement is yet in place, but a deposit is reserved as payment for providing one. - FeePaid(Balance), - /// The data appears to be reasonably acceptable in terms of its accuracy, however no in depth - /// checks (such as in-person meetings or formal KYC) have been conducted. - Reasonable, - /// The target is known directly by the registrar and the registrar can fully attest to the - /// the data's accuracy. - KnownGood, - /// The data was once good but is currently out of date. There is no malicious intent in the - /// inaccuracy. This judgement can be removed through updating the data. - OutOfDate, - /// The data is imprecise or of sufficiently low-quality to be problematic. It is not - /// indicative of malicious intent. This judgement can be removed through updating the data. - LowQuality, - /// The data is erroneous. This may be indicative of malicious intent. This cannot be removed - /// except by the registrar. - Erroneous, +pub enum Judgement { + /// The default value; no opinion is held. + Unknown, + /// No judgement is yet in place, but a deposit is reserved as payment for providing one. + FeePaid(Balance), + /// The data appears to be reasonably acceptable in terms of its accuracy, however no in depth + /// checks (such as in-person meetings or formal KYC) have been conducted. + Reasonable, + /// The target is known directly by the registrar and the registrar can fully attest to the + /// the data's accuracy. + KnownGood, + /// The data was once good but is currently out of date. There is no malicious intent in the + /// inaccuracy. This judgement can be removed through updating the data. + OutOfDate, + /// The data is imprecise or of sufficiently low-quality to be problematic. It is not + /// indicative of malicious intent. This judgement can be removed through updating the data. + LowQuality, + /// The data is erroneous. This may be indicative of malicious intent. This cannot be removed + /// except by the registrar. + Erroneous, } -impl< - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq -> Judgement { - /// Returns `true` if this judgement is indicative of a deposit being currently held. This means - /// it should not be cleared or replaced except by an operation which utilizes the deposit. - fn has_deposit(&self) -> bool { - match self { - Judgement::FeePaid(_) => true, - _ => false, - } - } - - /// Returns `true` if this judgement is one that should not be generally be replaced outside - /// of specialized handlers. Examples include "malicious" judgements and deposit-holding - /// judgements. - fn is_sticky(&self) -> bool { - match self { - Judgement::FeePaid(_) | Judgement::Erroneous => true, - _ => false, - } - } +impl Judgement { + /// Returns `true` if this judgement is indicative of a deposit being currently held. This means + /// it should not be cleared or replaced except by an operation which utilizes the deposit. + fn has_deposit(&self) -> bool { + match self { + Judgement::FeePaid(_) => true, + _ => false, + } + } + + /// Returns `true` if this judgement is one that should not be generally be replaced outside + /// of specialized handlers. Examples include "malicious" judgements and deposit-holding + /// judgements. + fn is_sticky(&self) -> bool { + match self { + Judgement::FeePaid(_) | Judgement::Erroneous => true, + _ => false, + } + } } /// The fields that we use to identify the owner of an account with. Each corresponds to a field @@ -246,14 +244,14 @@ impl< #[repr(u64)] #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, BitFlags, RuntimeDebug)] pub enum IdentityField { - Display = 0b0000000000000000000000000000000000000000000000000000000000000001, - Legal = 0b0000000000000000000000000000000000000000000000000000000000000010, - Web = 0b0000000000000000000000000000000000000000000000000000000000000100, - Riot = 0b0000000000000000000000000000000000000000000000000000000000001000, - Email = 0b0000000000000000000000000000000000000000000000000000000000010000, - PgpFingerprint = 0b0000000000000000000000000000000000000000000000000000000000100000, - Image = 0b0000000000000000000000000000000000000000000000000000000001000000, - Twitter = 0b0000000000000000000000000000000000000000000000000000000010000000, + Display = 0b0000000000000000000000000000000000000000000000000000000000000001, + Legal = 0b0000000000000000000000000000000000000000000000000000000000000010, + Web = 0b0000000000000000000000000000000000000000000000000000000000000100, + Riot = 0b0000000000000000000000000000000000000000000000000000000000001000, + Email = 0b0000000000000000000000000000000000000000000000000000000000010000, + PgpFingerprint = 0b0000000000000000000000000000000000000000000000000000000000100000, + Image = 0b0000000000000000000000000000000000000000000000000000000001000000, + Twitter = 0b0000000000000000000000000000000000000000000000000000000010000000, } /// Wrapper type for `BitFlags` that implements `Codec`. @@ -262,15 +260,17 @@ pub struct IdentityFields(BitFlags); impl Eq for IdentityFields {} impl Encode for IdentityFields { - fn using_encoded R>(&self, f: F) -> R { - self.0.bits().using_encoded(f) - } + fn using_encoded R>(&self, f: F) -> R { + self.0.bits().using_encoded(f) + } } impl Decode for IdentityFields { - fn decode(input: &mut I) -> sp_std::result::Result { - let field = u64::decode(input)?; - Ok(Self(>::from_bits(field as u64).map_err(|_| "invalid value")?)) - } + fn decode(input: &mut I) -> sp_std::result::Result { + let field = u64::decode(input)?; + Ok(Self( + >::from_bits(field as u64).map_err(|_| "invalid value")?, + )) + } } /// Information concerning the identity of the controller of an account. @@ -280,49 +280,49 @@ impl Decode for IdentityFields { #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] #[cfg_attr(test, derive(Default))] pub struct IdentityInfo { - /// Additional fields of the identity that are not catered for with the struct's explicit - /// fields. - pub additional: Vec<(Data, Data)>, - - /// A reasonable display name for the controller of the account. This should be whatever it is - /// that it is typically known as and should not be confusable with other entities, given - /// reasonable context. - /// - /// Stored as UTF-8. - pub display: Data, - - /// The full legal name in the local jurisdiction of the entity. This might be a bit - /// long-winded. - /// - /// Stored as UTF-8. - pub legal: Data, - - /// A representative website held by the controller of the account. - /// - /// NOTE: `https://` is automatically prepended. - /// - /// Stored as UTF-8. - pub web: Data, - - /// The Riot/Matrix handle held by the controller of the account. - /// - /// Stored as UTF-8. - pub riot: Data, - - /// The email address of the controller of the account. - /// - /// Stored as UTF-8. - pub email: Data, - - /// The PGP/GPG public key of the controller of the account. - pub pgp_fingerprint: Option<[u8; 20]>, - - /// A graphic image representing the controller of the account. Should be a company, - /// organization or project logo or a headshot in the case of a human. - pub image: Data, - - /// The Twitter identity. The leading `@` character may be elided. - pub twitter: Data, + /// Additional fields of the identity that are not catered for with the struct's explicit + /// fields. + pub additional: Vec<(Data, Data)>, + + /// A reasonable display name for the controller of the account. This should be whatever it is + /// that it is typically known as and should not be confusable with other entities, given + /// reasonable context. + /// + /// Stored as UTF-8. + pub display: Data, + + /// The full legal name in the local jurisdiction of the entity. This might be a bit + /// long-winded. + /// + /// Stored as UTF-8. + pub legal: Data, + + /// A representative website held by the controller of the account. + /// + /// NOTE: `https://` is automatically prepended. + /// + /// Stored as UTF-8. + pub web: Data, + + /// The Riot/Matrix handle held by the controller of the account. + /// + /// Stored as UTF-8. + pub riot: Data, + + /// The email address of the controller of the account. + /// + /// Stored as UTF-8. + pub email: Data, + + /// The PGP/GPG public key of the controller of the account. + pub pgp_fingerprint: Option<[u8; 20]>, + + /// A graphic image representing the controller of the account. Should be a company, + /// organization or project logo or a headshot in the case of a human. + pub image: Data, + + /// The Twitter identity. The leading `@` character may be elided. + pub twitter: Data, } /// Information concerning the identity of the controller of an account. @@ -330,910 +330,1001 @@ pub struct IdentityInfo { /// NOTE: This is stored separately primarily to facilitate the addition of extra fields in a /// backwards compatible way through a specialized `Decode` impl. #[derive(Clone, Encode, Eq, PartialEq, RuntimeDebug)] -pub struct Registration< - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq -> { - /// Judgements from the registrars on this identity. Stored ordered by `RegistrarIndex`. There - /// may be only a single judgement from each registrar. - pub judgements: Vec<(RegistrarIndex, Judgement)>, +pub struct Registration { + /// Judgements from the registrars on this identity. Stored ordered by `RegistrarIndex`. There + /// may be only a single judgement from each registrar. + pub judgements: Vec<(RegistrarIndex, Judgement)>, - /// Amount held on deposit for this information. - pub deposit: Balance, + /// Amount held on deposit for this information. + pub deposit: Balance, - /// Information on the identity. - pub info: IdentityInfo, + /// Information on the identity. + pub info: IdentityInfo, } -impl < - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq + Zero + Add, -> Registration { - fn total_deposit(&self) -> Balance { - self.deposit + self.judgements.iter() - .map(|(_, ref j)| if let Judgement::FeePaid(fee) = j { *fee } else { Zero::zero() }) - .fold(Zero::zero(), |a, i| a + i) - } +impl + Registration +{ + fn total_deposit(&self) -> Balance { + self.deposit + + self + .judgements + .iter() + .map(|(_, ref j)| { + if let Judgement::FeePaid(fee) = j { + *fee + } else { + Zero::zero() + } + }) + .fold(Zero::zero(), |a, i| a + i) + } } -impl< - Balance: Encode + Decode + Copy + Clone + Debug + Eq + PartialEq, -> Decode for Registration { - fn decode(input: &mut I) -> sp_std::result::Result { - let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; - Ok(Self { judgements, deposit, info }) - } +impl Decode + for Registration +{ + fn decode(input: &mut I) -> sp_std::result::Result { + let (judgements, deposit, info) = Decode::decode(&mut AppendZerosInput::new(input))?; + Ok(Self { + judgements, + deposit, + info, + }) + } } /// Information concerning a registrar. #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] pub struct RegistrarInfo< - Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, - AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq + Balance: Encode + Decode + Clone + Debug + Eq + PartialEq, + AccountId: Encode + Decode + Clone + Debug + Eq + PartialEq, > { - /// The account of the registrar. - pub account: AccountId, + /// The account of the registrar. + pub account: AccountId, - /// Amount required to be given to the registrar for them to provide judgement. - pub fee: Balance, + /// Amount required to be given to the registrar for them to provide judgement. + pub fee: Balance, - /// Relevant fields for this registrar. Registrar judgements are limited to attestations on - /// these fields. - pub fields: IdentityFields, + /// Relevant fields for this registrar. Registrar judgements are limited to attestations on + /// these fields. + pub fields: IdentityFields, } decl_storage! { - trait Store for Module as Identity { - /// Information that is pertinent to identify the entity behind an account. - pub IdentityOf get(fn identity): - map hasher(twox_64_concat) T::AccountId => Option>>; - - /// The super-identity of an alternative "sub" identity together with its name, within that - /// context. If the account is not some other account's sub-identity, then just `None`. - pub SuperOf get(fn super_of): - map hasher(blake2_128_concat) T::AccountId => Option<(T::AccountId, Data)>; - - /// Alternative "sub" identities of this account. - /// - /// The first item is the deposit, the second is a vector of the accounts. - pub SubsOf get(fn subs_of): - map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); - - /// The set of registrars. Not expected to get very big as can only be added through a - /// special origin (likely a council motion). - /// - /// The index into this can be cast to `RegistrarIndex` to get a valid value. - pub Registrars get(fn registrars): Vec, T::AccountId>>>; - } + trait Store for Module as Identity { + /// Information that is pertinent to identify the entity behind an account. + pub IdentityOf get(fn identity): + map hasher(twox_64_concat) T::AccountId => Option>>; + + /// The super-identity of an alternative "sub" identity together with its name, within that + /// context. If the account is not some other account's sub-identity, then just `None`. + pub SuperOf get(fn super_of): + map hasher(blake2_128_concat) T::AccountId => Option<(T::AccountId, Data)>; + + /// Alternative "sub" identities of this account. + /// + /// The first item is the deposit, the second is a vector of the accounts. + pub SubsOf get(fn subs_of): + map hasher(twox_64_concat) T::AccountId => (BalanceOf, Vec); + + /// The set of registrars. Not expected to get very big as can only be added through a + /// special origin (likely a council motion). + /// + /// The index into this can be cast to `RegistrarIndex` to get a valid value. + pub Registrars get(fn registrars): Vec, T::AccountId>>>; + } } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { - /// A name was set or reset (which will remove all judgements). - IdentitySet(AccountId), - /// A name was cleared, and the given balance returned. - IdentityCleared(AccountId, Balance), - /// A name was removed and the given balance slashed. - IdentityKilled(AccountId, Balance), - /// A judgement was asked from a registrar. - JudgementRequested(AccountId, RegistrarIndex), - /// A judgement request was retracted. - JudgementUnrequested(AccountId, RegistrarIndex), - /// A judgement was given by a registrar. - JudgementGiven(AccountId, RegistrarIndex), - /// A registrar was added. - RegistrarAdded(RegistrarIndex), - } + pub enum Event + where + AccountId = ::AccountId, + Balance = BalanceOf, + { + /// A name was set or reset (which will remove all judgements). + IdentitySet(AccountId), + /// A name was cleared, and the given balance returned. + IdentityCleared(AccountId, Balance), + /// A name was removed and the given balance slashed. + IdentityKilled(AccountId, Balance), + /// A judgement was asked from a registrar. + JudgementRequested(AccountId, RegistrarIndex), + /// A judgement request was retracted. + JudgementUnrequested(AccountId, RegistrarIndex), + /// A judgement was given by a registrar. + JudgementGiven(AccountId, RegistrarIndex), + /// A registrar was added. + RegistrarAdded(RegistrarIndex), + } ); decl_error! { - /// Error for the identity module. - pub enum Error for Module { - /// Too many subs-accounts. - TooManySubAccounts, - /// Account isn't found. - NotFound, - /// Account isn't named. - NotNamed, - /// Empty index. - EmptyIndex, - /// Fee is changed. - FeeChanged, - /// No identity found. - NoIdentity, - /// Sticky judgement. - StickyJudgement, - /// Judgement given. - JudgementGiven, - /// Invalid judgement. - InvalidJudgement, - /// The index is invalid. - InvalidIndex, - /// The target is invalid. - InvalidTarget, - /// Too many additional fields. - TooManyFields, + /// Error for the identity module. + pub enum Error for Module { + /// Too many subs-accounts. + TooManySubAccounts, + /// Account isn't found. + NotFound, + /// Account isn't named. + NotNamed, + /// Empty index. + EmptyIndex, + /// Fee is changed. + FeeChanged, + /// No identity found. + NoIdentity, + /// Sticky judgement. + StickyJudgement, + /// Judgement given. + JudgementGiven, + /// Invalid judgement. + InvalidJudgement, + /// The index is invalid. + InvalidIndex, + /// The target is invalid. + InvalidTarget, + /// Too many additional fields. + TooManyFields, } } decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what it's working on. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Add a registrar to the system. - /// - /// The dispatch origin for this call must be `RegistrarOrigin` or `Root`. - /// - /// - `account`: the account of the registrar. - /// - /// Emits `RegistrarAdded` if successful. - /// - /// # - /// - `O(R)` where `R` registrar-count (governance-bounded). - /// - One storage mutation (codec `O(R)`). - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn add_registrar(origin, account: T::AccountId) { - T::RegistrarOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - let i = >::mutate(|r| { - r.push(Some(RegistrarInfo { account, fee: Zero::zero(), fields: Default::default() })); - (r.len() - 1) as RegistrarIndex - }); - - Self::deposit_event(RawEvent::RegistrarAdded(i)); - } - - /// Set an account's identity information and reserve the appropriate deposit. - /// - /// If the account already has identity information, the deposit is taken as part payment - /// for the new deposit. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must have a registered - /// identity. - /// - /// - `info`: The identity information. - /// - /// Emits `IdentitySet` if successful. - /// - /// # - /// - `O(X + X' + R)` where `X` additional-field-count (deposit-bounded and code-bounded). - /// - At most two balance operations. - /// - One storage mutation (codec-read `O(X' + R)`, codec-write `O(X + R)`). - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn set_identity(origin, info: IdentityInfo) { - let sender = ensure_signed(origin)?; - let extra_fields = info.additional.len() as u32; - ensure!(extra_fields <= T::MaxAdditionalFields::get(), Error::::TooManyFields); - let fd = >::from(extra_fields) * T::FieldDeposit::get(); - - let mut id = match >::get(&sender) { - Some(mut id) => { - // Only keep non-positive judgements. - id.judgements.retain(|j| j.1.is_sticky()); - id.info = info; - id - } - None => Registration { info, judgements: Vec::new(), deposit: Zero::zero() }, - }; - - let old_deposit = id.deposit; - id.deposit = T::BasicDeposit::get() + fd; - if id.deposit > old_deposit { - T::Currency::reserve(&sender, id.deposit - old_deposit)?; - } - if old_deposit > id.deposit { - let _ = T::Currency::unreserve(&sender, old_deposit - id.deposit); - } - - >::insert(&sender, id); - Self::deposit_event(RawEvent::IdentitySet(sender)); - } - - /// Set the sub-accounts of the sender. - /// - /// Payment: Any aggregate balance reserved by previous `set_subs` calls will be returned - /// and an amount `SubAccountDeposit` will be reserved for each item in `subs`. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must have a registered - /// identity. - /// - /// - `subs`: The identity's sub-accounts. - /// - /// # - /// - `O(S)` where `S` subs-count (hard- and deposit-bounded). - /// - At most two balance operations. - /// - At most O(2 * S + 1) storage mutations; codec complexity `O(1 * S + S * 1)`); - /// one storage-exists. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn set_subs(origin, subs: Vec<(T::AccountId, Data)>) { - let sender = ensure_signed(origin)?; - ensure!(>::contains_key(&sender), Error::::NotFound); - ensure!(subs.len() <= T::MaxSubAccounts::get() as usize, Error::::TooManySubAccounts); - - let (old_deposit, old_ids) = >::get(&sender); - let new_deposit = T::SubAccountDeposit::get() * >::from(subs.len() as u32); - - if old_deposit < new_deposit { - T::Currency::reserve(&sender, new_deposit - old_deposit)?; - } - // do nothing if they're equal. - if old_deposit > new_deposit { - let _ = T::Currency::unreserve(&sender, old_deposit - new_deposit); - } - - for s in old_ids.iter() { - >::remove(s); - } - let ids = subs.into_iter().map(|(id, name)| { - >::insert(&id, (sender.clone(), name)); - id - }).collect::>(); - - if ids.is_empty() { - >::remove(&sender); - } else { - >::insert(&sender, (new_deposit, ids)); - } - } - - /// Clear an account's identity info and all sub-account and return all deposits. - /// - /// Payment: All reserved balances on the account are returned. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must have a registered - /// identity. - /// - /// Emits `IdentityCleared` if successful. - /// - /// # - /// - `O(R + S + X)`. - /// - One balance-reserve operation. - /// - `S + 2` storage deletions. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn clear_identity(origin) { - let sender = ensure_signed(origin)?; - - let (subs_deposit, sub_ids) = >::take(&sender); - let deposit = >::take(&sender).ok_or(Error::::NotNamed)?.total_deposit() - + subs_deposit; - for sub in sub_ids.iter() { - >::remove(sub); - } - - let _ = T::Currency::unreserve(&sender, deposit.clone()); - - Self::deposit_event(RawEvent::IdentityCleared(sender, deposit)); - } - - /// Request a judgement from a registrar. - /// - /// Payment: At most `max_fee` will be reserved for payment to the registrar if judgement - /// given. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must have a - /// registered identity. - /// - /// - `reg_index`: The index of the registrar whose judgement is requested. - /// - `max_fee`: The maximum fee that may be paid. This should just be auto-populated as: - /// - /// ```nocompile - /// Self::registrars(reg_index).unwrap().fee - /// ``` - /// - /// Emits `JudgementRequested` if successful. - /// - /// # - /// - `O(R + X)`. - /// - One balance-reserve operation. - /// - Storage: 1 read `O(R)`, 1 mutate `O(X + R)`. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn request_judgement(origin, - #[compact] reg_index: RegistrarIndex, - #[compact] max_fee: BalanceOf, - ) { - let sender = ensure_signed(origin)?; - let registrars = >::get(); - let registrar = registrars.get(reg_index as usize).and_then(Option::as_ref) - .ok_or(Error::::EmptyIndex)?; - ensure!(max_fee >= registrar.fee, Error::::FeeChanged); - let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; - - let item = (reg_index, Judgement::FeePaid(registrar.fee)); - match id.judgements.binary_search_by_key(®_index, |x| x.0) { - Ok(i) => if id.judgements[i].1.is_sticky() { - Err(Error::::StickyJudgement)? - } else { - id.judgements[i] = item - }, - Err(i) => id.judgements.insert(i, item), - } - - T::Currency::reserve(&sender, registrar.fee)?; - - >::insert(&sender, id); - - Self::deposit_event(RawEvent::JudgementRequested(sender, reg_index)); - } - - /// Cancel a previous request. - /// - /// Payment: A previously reserved deposit is returned on success. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must have a - /// registered identity. - /// - /// - `reg_index`: The index of the registrar whose judgement is no longer requested. - /// - /// Emits `JudgementUnrequested` if successful. - /// - /// # - /// - `O(R + X)`. - /// - One balance-reserve operation. - /// - One storage mutation `O(R + X)`. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn cancel_request(origin, reg_index: RegistrarIndex) { - let sender = ensure_signed(origin)?; - let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; - - let pos = id.judgements.binary_search_by_key(®_index, |x| x.0) - .map_err(|_| Error::::NotFound)?; - let fee = if let Judgement::FeePaid(fee) = id.judgements.remove(pos).1 { - fee - } else { - Err(Error::::JudgementGiven)? - }; - - let _ = T::Currency::unreserve(&sender, fee); - >::insert(&sender, id); - - Self::deposit_event(RawEvent::JudgementUnrequested(sender, reg_index)); - } - - /// Set the fee required for a judgement to be requested from a registrar. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must be the account - /// of the registrar whose index is `index`. - /// - /// - `index`: the index of the registrar whose fee is to be set. - /// - `fee`: the new fee. - /// - /// # - /// - `O(R)`. - /// - One storage mutation `O(R)`. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn set_fee(origin, - #[compact] index: RegistrarIndex, - #[compact] fee: BalanceOf, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - - >::mutate(|rs| - rs.get_mut(index as usize) - .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.fee = fee; Some(()) } else { None }) - .ok_or_else(|| Error::::InvalidIndex.into()) - ) - } - - /// Change the account associated with a registrar. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must be the account - /// of the registrar whose index is `index`. - /// - /// - `index`: the index of the registrar whose fee is to be set. - /// - `new`: the new account ID. - /// - /// # - /// - `O(R)`. - /// - One storage mutation `O(R)`. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn set_account_id(origin, - #[compact] index: RegistrarIndex, - new: T::AccountId, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - - >::mutate(|rs| - rs.get_mut(index as usize) - .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.account = new; Some(()) } else { None }) - .ok_or_else(|| Error::::InvalidIndex.into()) - ) - } - - /// Set the field information for a registrar. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must be the account - /// of the registrar whose index is `index`. - /// - /// - `index`: the index of the registrar whose fee is to be set. - /// - `fields`: the fields that the registrar concerns themselves with. - /// - /// # - /// - `O(R)`. - /// - One storage mutation `O(R)`. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn set_fields(origin, - #[compact] index: RegistrarIndex, - fields: IdentityFields, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - - >::mutate(|rs| - rs.get_mut(index as usize) - .and_then(|x| x.as_mut()) - .and_then(|r| if r.account == who { r.fields = fields; Some(()) } else { None }) - .ok_or_else(|| Error::::InvalidIndex.into()) - ) - } - - /// Provide a judgement for an account's identity. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must be the account - /// of the registrar whose index is `reg_index`. - /// - /// - `reg_index`: the index of the registrar whose judgement is being made. - /// - `target`: the account whose identity the judgement is upon. This must be an account - /// with a registered identity. - /// - `judgement`: the judgement of the registrar of index `reg_index` about `target`. - /// - /// Emits `JudgementGiven` if successful. - /// - /// # - /// - `O(R + X)`. - /// - One balance-transfer operation. - /// - Up to one account-lookup operation. - /// - Storage: 1 read `O(R)`, 1 mutate `O(R + X)`. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn provide_judgement(origin, - #[compact] reg_index: RegistrarIndex, - target: ::Source, - judgement: Judgement>, - ) { - let sender = ensure_signed(origin)?; - let target = T::Lookup::lookup(target)?; - ensure!(!judgement.has_deposit(), Error::::InvalidJudgement); - >::get() - .get(reg_index as usize) - .and_then(Option::as_ref) - .and_then(|r| if r.account == sender { Some(r) } else { None }) - .ok_or(Error::::InvalidIndex)?; - let mut id = >::get(&target).ok_or(Error::::InvalidTarget)?; - - let item = (reg_index, judgement); - match id.judgements.binary_search_by_key(®_index, |x| x.0) { - Ok(position) => { - if let Judgement::FeePaid(fee) = id.judgements[position].1 { - let _ = T::Currency::repatriate_reserved(&target, &sender, fee, BalanceStatus::Free); - } - id.judgements[position] = item - } - Err(position) => id.judgements.insert(position, item), - } - >::insert(&target, id); - Self::deposit_event(RawEvent::JudgementGiven(target, reg_index)); - } - - /// Remove an account's identity and sub-account information and slash the deposits. - /// - /// Payment: Reserved balances from `set_subs` and `set_identity` are slashed and handled by - /// `Slash`. Verification request deposits are not returned; they should be cancelled - /// manually using `cancel_request`. - /// - /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. - /// - /// - `target`: the account whose identity the judgement is upon. This must be an account - /// with a registered identity. - /// - /// Emits `IdentityKilled` if successful. - /// - /// # - /// - `O(R + S + X)`. - /// - One balance-reserve operation. - /// - `S + 2` storage mutations. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn kill_identity(origin, target: ::Source) { - T::ForceOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - // Figure out who we're meant to be clearing. - let target = T::Lookup::lookup(target)?; - // Grab their deposit (and check that they have one). - let (subs_deposit, sub_ids) = >::take(&target); - let deposit = >::take(&target).ok_or(Error::::NotNamed)?.total_deposit() - + subs_deposit; - for sub in sub_ids.iter() { - >::remove(sub); - } - // Slash their deposit from them. - T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit).0); - - Self::deposit_event(RawEvent::IdentityKilled(target, deposit)); - } - } + // Simple declaration of the `Module` type. Lets the macro know what it's working on. + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Add a registrar to the system. + /// + /// The dispatch origin for this call must be `RegistrarOrigin` or `Root`. + /// + /// - `account`: the account of the registrar. + /// + /// Emits `RegistrarAdded` if successful. + /// + /// # + /// - `O(R)` where `R` registrar-count (governance-bounded). + /// - One storage mutation (codec `O(R)`). + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn add_registrar(origin, account: T::AccountId) { + T::RegistrarOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + let i = >::mutate(|r| { + r.push(Some(RegistrarInfo { account, fee: Zero::zero(), fields: Default::default() })); + (r.len() - 1) as RegistrarIndex + }); + + Self::deposit_event(RawEvent::RegistrarAdded(i)); + } + + /// Set an account's identity information and reserve the appropriate deposit. + /// + /// If the account already has identity information, the deposit is taken as part payment + /// for the new deposit. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must have a registered + /// identity. + /// + /// - `info`: The identity information. + /// + /// Emits `IdentitySet` if successful. + /// + /// # + /// - `O(X + X' + R)` where `X` additional-field-count (deposit-bounded and code-bounded). + /// - At most two balance operations. + /// - One storage mutation (codec-read `O(X' + R)`, codec-write `O(X + R)`). + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn set_identity(origin, info: IdentityInfo) { + let sender = ensure_signed(origin)?; + let extra_fields = info.additional.len() as u32; + ensure!(extra_fields <= T::MaxAdditionalFields::get(), Error::::TooManyFields); + let fd = >::from(extra_fields) * T::FieldDeposit::get(); + + let mut id = match >::get(&sender) { + Some(mut id) => { + // Only keep non-positive judgements. + id.judgements.retain(|j| j.1.is_sticky()); + id.info = info; + id + } + None => Registration { info, judgements: Vec::new(), deposit: Zero::zero() }, + }; + + let old_deposit = id.deposit; + id.deposit = T::BasicDeposit::get() + fd; + if id.deposit > old_deposit { + T::Currency::reserve(&sender, id.deposit - old_deposit)?; + } + if old_deposit > id.deposit { + let _ = T::Currency::unreserve(&sender, old_deposit - id.deposit); + } + + >::insert(&sender, id); + Self::deposit_event(RawEvent::IdentitySet(sender)); + } + + /// Set the sub-accounts of the sender. + /// + /// Payment: Any aggregate balance reserved by previous `set_subs` calls will be returned + /// and an amount `SubAccountDeposit` will be reserved for each item in `subs`. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must have a registered + /// identity. + /// + /// - `subs`: The identity's sub-accounts. + /// + /// # + /// - `O(S)` where `S` subs-count (hard- and deposit-bounded). + /// - At most two balance operations. + /// - At most O(2 * S + 1) storage mutations; codec complexity `O(1 * S + S * 1)`); + /// one storage-exists. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn set_subs(origin, subs: Vec<(T::AccountId, Data)>) { + let sender = ensure_signed(origin)?; + ensure!(>::contains_key(&sender), Error::::NotFound); + ensure!(subs.len() <= T::MaxSubAccounts::get() as usize, Error::::TooManySubAccounts); + + let (old_deposit, old_ids) = >::get(&sender); + let new_deposit = T::SubAccountDeposit::get() * >::from(subs.len() as u32); + + if old_deposit < new_deposit { + T::Currency::reserve(&sender, new_deposit - old_deposit)?; + } + // do nothing if they're equal. + if old_deposit > new_deposit { + let _ = T::Currency::unreserve(&sender, old_deposit - new_deposit); + } + + for s in old_ids.iter() { + >::remove(s); + } + let ids = subs.into_iter().map(|(id, name)| { + >::insert(&id, (sender.clone(), name)); + id + }).collect::>(); + + if ids.is_empty() { + >::remove(&sender); + } else { + >::insert(&sender, (new_deposit, ids)); + } + } + + /// Clear an account's identity info and all sub-account and return all deposits. + /// + /// Payment: All reserved balances on the account are returned. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must have a registered + /// identity. + /// + /// Emits `IdentityCleared` if successful. + /// + /// # + /// - `O(R + S + X)`. + /// - One balance-reserve operation. + /// - `S + 2` storage deletions. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn clear_identity(origin) { + let sender = ensure_signed(origin)?; + + let (subs_deposit, sub_ids) = >::take(&sender); + let deposit = >::take(&sender).ok_or(Error::::NotNamed)?.total_deposit() + + subs_deposit; + for sub in sub_ids.iter() { + >::remove(sub); + } + + let _ = T::Currency::unreserve(&sender, deposit.clone()); + + Self::deposit_event(RawEvent::IdentityCleared(sender, deposit)); + } + + /// Request a judgement from a registrar. + /// + /// Payment: At most `max_fee` will be reserved for payment to the registrar if judgement + /// given. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must have a + /// registered identity. + /// + /// - `reg_index`: The index of the registrar whose judgement is requested. + /// - `max_fee`: The maximum fee that may be paid. This should just be auto-populated as: + /// + /// ```nocompile + /// Self::registrars(reg_index).unwrap().fee + /// ``` + /// + /// Emits `JudgementRequested` if successful. + /// + /// # + /// - `O(R + X)`. + /// - One balance-reserve operation. + /// - Storage: 1 read `O(R)`, 1 mutate `O(X + R)`. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn request_judgement(origin, + #[compact] reg_index: RegistrarIndex, + #[compact] max_fee: BalanceOf, + ) { + let sender = ensure_signed(origin)?; + let registrars = >::get(); + let registrar = registrars.get(reg_index as usize).and_then(Option::as_ref) + .ok_or(Error::::EmptyIndex)?; + ensure!(max_fee >= registrar.fee, Error::::FeeChanged); + let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; + + let item = (reg_index, Judgement::FeePaid(registrar.fee)); + match id.judgements.binary_search_by_key(®_index, |x| x.0) { + Ok(i) => if id.judgements[i].1.is_sticky() { + Err(Error::::StickyJudgement)? + } else { + id.judgements[i] = item + }, + Err(i) => id.judgements.insert(i, item), + } + + T::Currency::reserve(&sender, registrar.fee)?; + + >::insert(&sender, id); + + Self::deposit_event(RawEvent::JudgementRequested(sender, reg_index)); + } + + /// Cancel a previous request. + /// + /// Payment: A previously reserved deposit is returned on success. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must have a + /// registered identity. + /// + /// - `reg_index`: The index of the registrar whose judgement is no longer requested. + /// + /// Emits `JudgementUnrequested` if successful. + /// + /// # + /// - `O(R + X)`. + /// - One balance-reserve operation. + /// - One storage mutation `O(R + X)`. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn cancel_request(origin, reg_index: RegistrarIndex) { + let sender = ensure_signed(origin)?; + let mut id = >::get(&sender).ok_or(Error::::NoIdentity)?; + + let pos = id.judgements.binary_search_by_key(®_index, |x| x.0) + .map_err(|_| Error::::NotFound)?; + let fee = if let Judgement::FeePaid(fee) = id.judgements.remove(pos).1 { + fee + } else { + Err(Error::::JudgementGiven)? + }; + + let _ = T::Currency::unreserve(&sender, fee); + >::insert(&sender, id); + + Self::deposit_event(RawEvent::JudgementUnrequested(sender, reg_index)); + } + + /// Set the fee required for a judgement to be requested from a registrar. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must be the account + /// of the registrar whose index is `index`. + /// + /// - `index`: the index of the registrar whose fee is to be set. + /// - `fee`: the new fee. + /// + /// # + /// - `O(R)`. + /// - One storage mutation `O(R)`. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn set_fee(origin, + #[compact] index: RegistrarIndex, + #[compact] fee: BalanceOf, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + >::mutate(|rs| + rs.get_mut(index as usize) + .and_then(|x| x.as_mut()) + .and_then(|r| if r.account == who { r.fee = fee; Some(()) } else { None }) + .ok_or_else(|| Error::::InvalidIndex.into()) + ) + } + + /// Change the account associated with a registrar. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must be the account + /// of the registrar whose index is `index`. + /// + /// - `index`: the index of the registrar whose fee is to be set. + /// - `new`: the new account ID. + /// + /// # + /// - `O(R)`. + /// - One storage mutation `O(R)`. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn set_account_id(origin, + #[compact] index: RegistrarIndex, + new: T::AccountId, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + >::mutate(|rs| + rs.get_mut(index as usize) + .and_then(|x| x.as_mut()) + .and_then(|r| if r.account == who { r.account = new; Some(()) } else { None }) + .ok_or_else(|| Error::::InvalidIndex.into()) + ) + } + + /// Set the field information for a registrar. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must be the account + /// of the registrar whose index is `index`. + /// + /// - `index`: the index of the registrar whose fee is to be set. + /// - `fields`: the fields that the registrar concerns themselves with. + /// + /// # + /// - `O(R)`. + /// - One storage mutation `O(R)`. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn set_fields(origin, + #[compact] index: RegistrarIndex, + fields: IdentityFields, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + + >::mutate(|rs| + rs.get_mut(index as usize) + .and_then(|x| x.as_mut()) + .and_then(|r| if r.account == who { r.fields = fields; Some(()) } else { None }) + .ok_or_else(|| Error::::InvalidIndex.into()) + ) + } + + /// Provide a judgement for an account's identity. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must be the account + /// of the registrar whose index is `reg_index`. + /// + /// - `reg_index`: the index of the registrar whose judgement is being made. + /// - `target`: the account whose identity the judgement is upon. This must be an account + /// with a registered identity. + /// - `judgement`: the judgement of the registrar of index `reg_index` about `target`. + /// + /// Emits `JudgementGiven` if successful. + /// + /// # + /// - `O(R + X)`. + /// - One balance-transfer operation. + /// - Up to one account-lookup operation. + /// - Storage: 1 read `O(R)`, 1 mutate `O(R + X)`. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn provide_judgement(origin, + #[compact] reg_index: RegistrarIndex, + target: ::Source, + judgement: Judgement>, + ) { + let sender = ensure_signed(origin)?; + let target = T::Lookup::lookup(target)?; + ensure!(!judgement.has_deposit(), Error::::InvalidJudgement); + >::get() + .get(reg_index as usize) + .and_then(Option::as_ref) + .and_then(|r| if r.account == sender { Some(r) } else { None }) + .ok_or(Error::::InvalidIndex)?; + let mut id = >::get(&target).ok_or(Error::::InvalidTarget)?; + + let item = (reg_index, judgement); + match id.judgements.binary_search_by_key(®_index, |x| x.0) { + Ok(position) => { + if let Judgement::FeePaid(fee) = id.judgements[position].1 { + let _ = T::Currency::repatriate_reserved(&target, &sender, fee, BalanceStatus::Free); + } + id.judgements[position] = item + } + Err(position) => id.judgements.insert(position, item), + } + >::insert(&target, id); + Self::deposit_event(RawEvent::JudgementGiven(target, reg_index)); + } + + /// Remove an account's identity and sub-account information and slash the deposits. + /// + /// Payment: Reserved balances from `set_subs` and `set_identity` are slashed and handled by + /// `Slash`. Verification request deposits are not returned; they should be cancelled + /// manually using `cancel_request`. + /// + /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. + /// + /// - `target`: the account whose identity the judgement is upon. This must be an account + /// with a registered identity. + /// + /// Emits `IdentityKilled` if successful. + /// + /// # + /// - `O(R + S + X)`. + /// - One balance-reserve operation. + /// - `S + 2` storage mutations. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn kill_identity(origin, target: ::Source) { + T::ForceOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + // Figure out who we're meant to be clearing. + let target = T::Lookup::lookup(target)?; + // Grab their deposit (and check that they have one). + let (subs_deposit, sub_ids) = >::take(&target); + let deposit = >::take(&target).ok_or(Error::::NotNamed)?.total_deposit() + + subs_deposit; + for sub in sub_ids.iter() { + >::remove(sub); + } + // Slash their deposit from them. + T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit).0); + + Self::deposit_event(RawEvent::IdentityKilled(target, deposit)); + } + } } impl Module { - /// Get the subs of an account. - pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { - SubsOf::::get(who).1 - .into_iter() - .filter_map(|a| SuperOf::::get(&a).map(|x| (a, x.1))) - .collect() - } + /// Get the subs of an account. + pub fn subs(who: &T::AccountId) -> Vec<(T::AccountId, Data)> { + SubsOf::::get(who) + .1 + .into_iter() + .filter_map(|a| SuperOf::::get(&a).map(|x| (a, x.1))) + .collect() + } } #[cfg(test)] mod tests { - use super::*; - - use sp_runtime::traits::BadOrigin; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types, - }; - use sp_core::H256; - use frame_system::EnsureSignedBy; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup}, - }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - parameter_types! { - pub const BasicDeposit: u64 = 10; - pub const FieldDeposit: u64 = 10; - pub const SubAccountDeposit: u64 = 10; - pub const MaxSubAccounts: u32 = 2; - pub const MaxAdditionalFields: u32 = 2; - } - ord_parameter_types! { - pub const One: u64 = 1; - pub const Two: u64 = 2; - } - impl Trait for Test { - type Event = (); - type Currency = Balances; - type Slashed = (); - type BasicDeposit = BasicDeposit; - type FieldDeposit = FieldDeposit; - type SubAccountDeposit = SubAccountDeposit; - type MaxSubAccounts = MaxSubAccounts; - type MaxAdditionalFields = MaxAdditionalFields; - type RegistrarOrigin = EnsureSignedBy; - type ForceOrigin = EnsureSignedBy; - } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Identity = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10), - (2, 10), - (3, 10), - (10, 100), - (20, 100), - (30, 100), - ], - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - fn ten() -> IdentityInfo { - IdentityInfo { - display: Data::Raw(b"ten".to_vec()), - legal: Data::Raw(b"The Right Ordinal Ten, Esq.".to_vec()), - .. Default::default() - } - } - - #[test] - fn trailing_zeros_decodes_into_default_data() { - let encoded = Data::Raw(b"Hello".to_vec()).encode(); - assert!(<(Data, Data)>::decode(&mut &encoded[..]).is_err()); - let input = &mut &encoded[..]; - let (a, b) = <(Data, Data)>::decode(&mut AppendZerosInput::new(input)).unwrap(); - assert_eq!(a, Data::Raw(b"Hello".to_vec())); - assert_eq!(b, Data::None); - } - - #[test] - fn adding_registrar_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); - assert_ok!(Identity::set_fields(Origin::signed(3), 0, fields)); - assert_eq!(Identity::registrars(), vec![ - Some(RegistrarInfo { account: 3, fee: 10, fields }) - ]); - }); - } - - #[test] - fn registration_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - let mut three_fields = ten(); - three_fields.additional.push(Default::default()); - three_fields.additional.push(Default::default()); - three_fields.additional.push(Default::default()); - assert_noop!( - Identity::set_identity(Origin::signed(10), three_fields), - Error::::TooManyFields - ); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_eq!(Identity::identity(10).unwrap().info, ten()); - assert_eq!(Balances::free_balance(10), 90); - assert_ok!(Identity::clear_identity(Origin::signed(10))); - assert_eq!(Balances::free_balance(10), 100); - assert_noop!(Identity::clear_identity(Origin::signed(10)), Error::::NotNamed); - }); - } - - #[test] - fn uninvited_judgement_should_work() { - new_test_ext().execute_with(|| { - assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), - Error::::InvalidIndex - ); - - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), - Error::::InvalidTarget - ); - - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_noop!( - Identity::provide_judgement(Origin::signed(10), 0, 10, Judgement::Reasonable), - Error::::InvalidIndex - ); - assert_noop!( - Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::FeePaid(1)), - Error::::InvalidJudgement - ); - - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); - assert_eq!(Identity::identity(10).unwrap().judgements, vec![(0, Judgement::Reasonable)]); - }); - } - - #[test] - fn clearing_judgement_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); - assert_ok!(Identity::clear_identity(Origin::signed(10))); - assert_eq!(Identity::identity(10), None); - }); - } - - #[test] - fn killing_slashing_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_noop!(Identity::kill_identity(Origin::signed(1), 10), BadOrigin); - assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); - assert_eq!(Identity::identity(10), None); - assert_eq!(Balances::free_balance(10), 90); - assert_noop!(Identity::kill_identity(Origin::signed(2), 10), Error::::NotNamed); - }); - } - - #[test] - fn setting_subaccounts_should_work() { - new_test_ext().execute_with(|| { - let mut subs = vec![(20, Data::Raw(vec![40; 1]))]; - assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::NotFound); - - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 80); - assert_eq!(Identity::subs_of(10), (10, vec![20])); - assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); - - // push another item and re-set it. - subs.push((30, Data::Raw(vec![50; 1]))); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 70); - assert_eq!(Identity::subs_of(10), (20, vec![20, 30])); - assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); - assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); - - // switch out one of the items and re-set. - subs[0] = (40, Data::Raw(vec![60; 1])); - assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); - assert_eq!(Balances::free_balance(10), 70); // no change in the balance - assert_eq!(Identity::subs_of(10), (20, vec![40, 30])); - assert_eq!(Identity::super_of(20), None); - assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); - assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1])))); - - // clear - assert_ok!(Identity::set_subs(Origin::signed(10), vec![])); - assert_eq!(Balances::free_balance(10), 90); - assert_eq!(Identity::subs_of(10), (0, vec![])); - assert_eq!(Identity::super_of(30), None); - assert_eq!(Identity::super_of(40), None); - - subs.push((20, Data::Raw(vec![40; 1]))); - assert_noop!(Identity::set_subs(Origin::signed(10), subs.clone()), Error::::TooManySubAccounts); - }); - } - - #[test] - fn clearing_account_should_remove_subaccounts_and_refund() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1]))])); - assert_ok!(Identity::clear_identity(Origin::signed(10))); - assert_eq!(Balances::free_balance(10), 100); - assert!(Identity::super_of(20).is_none()); - }); - } - - #[test] - fn killing_account_should_remove_subaccounts_and_not_refund() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::set_subs(Origin::signed(10), vec![(20, Data::Raw(vec![40; 1]))])); - assert_ok!(Identity::kill_identity(Origin::ROOT, 10)); - assert_eq!(Balances::free_balance(10), 80); - assert!(Identity::super_of(20).is_none()); - }); - } - - #[test] - fn cancelling_requested_judgement_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NoIdentity); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); - assert_ok!(Identity::cancel_request(Origin::signed(10), 0)); - assert_eq!(Balances::free_balance(10), 90); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::NotFound); - - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable)); - assert_noop!(Identity::cancel_request(Origin::signed(10), 0), Error::::JudgementGiven); - }); - } - - #[test] - fn requesting_judgement_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(Origin::signed(10), ten())); - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 9), Error::::FeeChanged); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); - // 10 for the judgement request, 10 for the identity. - assert_eq!(Balances::free_balance(10), 80); - - // Re-requesting won't work as we already paid. - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Erroneous)); - // Registrar got their payment now. - assert_eq!(Balances::free_balance(3), 20); - - // Re-requesting still won't work as it's erroneous. - assert_noop!(Identity::request_judgement(Origin::signed(10), 0, 10), Error::::StickyJudgement); - - // Requesting from a second registrar still works. - assert_ok!(Identity::add_registrar(Origin::signed(1), 4)); - assert_ok!(Identity::request_judgement(Origin::signed(10), 1, 10)); - - // Re-requesting after the judgement has been reduced works. - assert_ok!(Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::OutOfDate)); - assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); - }); - } - - #[test] - fn field_deposit_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); - assert_ok!(Identity::set_identity(Origin::signed(10), IdentityInfo { - additional: vec![ - (Data::Raw(b"number".to_vec()), Data::Raw(10u32.encode())), - (Data::Raw(b"text".to_vec()), Data::Raw(b"10".to_vec())), - ], .. Default::default() - })); - assert_eq!(Balances::free_balance(10), 70); - }); - } - - #[test] - fn setting_account_id_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); - // account 4 cannot change the first registrar's identity since it's owned by 3. - assert_noop!(Identity::set_account_id(Origin::signed(4), 0, 3), Error::::InvalidIndex); - // account 3 can, because that's the registrar's current account. - assert_ok!(Identity::set_account_id(Origin::signed(3), 0, 4)); - // account 4 can now, because that's their new ID. - assert_ok!(Identity::set_account_id(Origin::signed(4), 0, 3)); - }); - } + use super::*; + + use frame_support::{ + assert_noop, assert_ok, impl_outer_origin, ord_parameter_types, parameter_types, + weights::Weight, + }; + use frame_system::EnsureSignedBy; + use sp_core::H256; + use sp_runtime::traits::BadOrigin; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + // For testing the pallet, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of pallets we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + } + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + impl pallet_balances::Trait for Test { + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + } + parameter_types! { + pub const BasicDeposit: u64 = 10; + pub const FieldDeposit: u64 = 10; + pub const SubAccountDeposit: u64 = 10; + pub const MaxSubAccounts: u32 = 2; + pub const MaxAdditionalFields: u32 = 2; + } + ord_parameter_types! { + pub const One: u64 = 1; + pub const Two: u64 = 2; + } + impl Trait for Test { + type Event = (); + type Currency = Balances; + type Slashed = (); + type BasicDeposit = BasicDeposit; + type FieldDeposit = FieldDeposit; + type SubAccountDeposit = SubAccountDeposit; + type MaxSubAccounts = MaxSubAccounts; + type MaxAdditionalFields = MaxAdditionalFields; + type RegistrarOrigin = EnsureSignedBy; + type ForceOrigin = EnsureSignedBy; + } + type System = frame_system::Module; + type Balances = pallet_balances::Module; + type Identity = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + pub fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + // We use default for brevity, but you can configure as desired if needed. + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 10), (3, 10), (10, 100), (20, 100), (30, 100)], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } + + fn ten() -> IdentityInfo { + IdentityInfo { + display: Data::Raw(b"ten".to_vec()), + legal: Data::Raw(b"The Right Ordinal Ten, Esq.".to_vec()), + ..Default::default() + } + } + + #[test] + fn trailing_zeros_decodes_into_default_data() { + let encoded = Data::Raw(b"Hello".to_vec()).encode(); + assert!(<(Data, Data)>::decode(&mut &encoded[..]).is_err()); + let input = &mut &encoded[..]; + let (a, b) = <(Data, Data)>::decode(&mut AppendZerosInput::new(input)).unwrap(); + assert_eq!(a, Data::Raw(b"Hello".to_vec())); + assert_eq!(b, Data::None); + } + + #[test] + fn adding_registrar_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + let fields = IdentityFields(IdentityField::Display | IdentityField::Legal); + assert_ok!(Identity::set_fields(Origin::signed(3), 0, fields)); + assert_eq!( + Identity::registrars(), + vec![Some(RegistrarInfo { + account: 3, + fee: 10, + fields + })] + ); + }); + } + + #[test] + fn registration_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + let mut three_fields = ten(); + three_fields.additional.push(Default::default()); + three_fields.additional.push(Default::default()); + three_fields.additional.push(Default::default()); + assert_noop!( + Identity::set_identity(Origin::signed(10), three_fields), + Error::::TooManyFields + ); + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_eq!(Identity::identity(10).unwrap().info, ten()); + assert_eq!(Balances::free_balance(10), 90); + assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_eq!(Balances::free_balance(10), 100); + assert_noop!( + Identity::clear_identity(Origin::signed(10)), + Error::::NotNamed + ); + }); + } + + #[test] + fn uninvited_judgement_should_work() { + new_test_ext().execute_with(|| { + assert_noop!( + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), + Error::::InvalidIndex + ); + + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_noop!( + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::Reasonable), + Error::::InvalidTarget + ); + + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_noop!( + Identity::provide_judgement(Origin::signed(10), 0, 10, Judgement::Reasonable), + Error::::InvalidIndex + ); + assert_noop!( + Identity::provide_judgement(Origin::signed(3), 0, 10, Judgement::FeePaid(1)), + Error::::InvalidJudgement + ); + + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable + )); + assert_eq!( + Identity::identity(10).unwrap().judgements, + vec![(0, Judgement::Reasonable)] + ); + }); + } + + #[test] + fn clearing_judgement_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable + )); + assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_eq!(Identity::identity(10), None); + }); + } + + #[test] + fn killing_slashing_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_noop!(Identity::kill_identity(Origin::signed(1), 10), BadOrigin); + assert_ok!(Identity::kill_identity(Origin::signed(2), 10)); + assert_eq!(Identity::identity(10), None); + assert_eq!(Balances::free_balance(10), 90); + assert_noop!( + Identity::kill_identity(Origin::signed(2), 10), + Error::::NotNamed + ); + }); + } + + #[test] + fn setting_subaccounts_should_work() { + new_test_ext().execute_with(|| { + let mut subs = vec![(20, Data::Raw(vec![40; 1]))]; + assert_noop!( + Identity::set_subs(Origin::signed(10), subs.clone()), + Error::::NotFound + ); + + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_eq!(Balances::free_balance(10), 80); + assert_eq!(Identity::subs_of(10), (10, vec![20])); + assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); + + // push another item and re-set it. + subs.push((30, Data::Raw(vec![50; 1]))); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_eq!(Balances::free_balance(10), 70); + assert_eq!(Identity::subs_of(10), (20, vec![20, 30])); + assert_eq!(Identity::super_of(20), Some((10, Data::Raw(vec![40; 1])))); + assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); + + // switch out one of the items and re-set. + subs[0] = (40, Data::Raw(vec![60; 1])); + assert_ok!(Identity::set_subs(Origin::signed(10), subs.clone())); + assert_eq!(Balances::free_balance(10), 70); // no change in the balance + assert_eq!(Identity::subs_of(10), (20, vec![40, 30])); + assert_eq!(Identity::super_of(20), None); + assert_eq!(Identity::super_of(30), Some((10, Data::Raw(vec![50; 1])))); + assert_eq!(Identity::super_of(40), Some((10, Data::Raw(vec![60; 1])))); + + // clear + assert_ok!(Identity::set_subs(Origin::signed(10), vec![])); + assert_eq!(Balances::free_balance(10), 90); + assert_eq!(Identity::subs_of(10), (0, vec![])); + assert_eq!(Identity::super_of(30), None); + assert_eq!(Identity::super_of(40), None); + + subs.push((20, Data::Raw(vec![40; 1]))); + assert_noop!( + Identity::set_subs(Origin::signed(10), subs.clone()), + Error::::TooManySubAccounts + ); + }); + } + + #[test] + fn clearing_account_should_remove_subaccounts_and_refund() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_subs( + Origin::signed(10), + vec![(20, Data::Raw(vec![40; 1]))] + )); + assert_ok!(Identity::clear_identity(Origin::signed(10))); + assert_eq!(Balances::free_balance(10), 100); + assert!(Identity::super_of(20).is_none()); + }); + } + + #[test] + fn killing_account_should_remove_subaccounts_and_not_refund() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::set_subs( + Origin::signed(10), + vec![(20, Data::Raw(vec![40; 1]))] + )); + assert_ok!(Identity::kill_identity(Origin::ROOT, 10)); + assert_eq!(Balances::free_balance(10), 80); + assert!(Identity::super_of(20).is_none()); + }); + } + + #[test] + fn cancelling_requested_judgement_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_noop!( + Identity::cancel_request(Origin::signed(10), 0), + Error::::NoIdentity + ); + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + assert_ok!(Identity::cancel_request(Origin::signed(10), 0)); + assert_eq!(Balances::free_balance(10), 90); + assert_noop!( + Identity::cancel_request(Origin::signed(10), 0), + Error::::NotFound + ); + + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Reasonable + )); + assert_noop!( + Identity::cancel_request(Origin::signed(10), 0), + Error::::JudgementGiven + ); + }); + } + + #[test] + fn requesting_judgement_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_ok!(Identity::set_identity(Origin::signed(10), ten())); + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 9), + Error::::FeeChanged + ); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + // 10 for the judgement request, 10 for the identity. + assert_eq!(Balances::free_balance(10), 80); + + // Re-requesting won't work as we already paid. + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 10), + Error::::StickyJudgement + ); + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::Erroneous + )); + // Registrar got their payment now. + assert_eq!(Balances::free_balance(3), 20); + + // Re-requesting still won't work as it's erroneous. + assert_noop!( + Identity::request_judgement(Origin::signed(10), 0, 10), + Error::::StickyJudgement + ); + + // Requesting from a second registrar still works. + assert_ok!(Identity::add_registrar(Origin::signed(1), 4)); + assert_ok!(Identity::request_judgement(Origin::signed(10), 1, 10)); + + // Re-requesting after the judgement has been reduced works. + assert_ok!(Identity::provide_judgement( + Origin::signed(3), + 0, + 10, + Judgement::OutOfDate + )); + assert_ok!(Identity::request_judgement(Origin::signed(10), 0, 10)); + }); + } + + #[test] + fn field_deposit_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + assert_ok!(Identity::set_fee(Origin::signed(3), 0, 10)); + assert_ok!(Identity::set_identity( + Origin::signed(10), + IdentityInfo { + additional: vec![ + (Data::Raw(b"number".to_vec()), Data::Raw(10u32.encode())), + (Data::Raw(b"text".to_vec()), Data::Raw(b"10".to_vec())), + ], + ..Default::default() + } + )); + assert_eq!(Balances::free_balance(10), 70); + }); + } + + #[test] + fn setting_account_id_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Identity::add_registrar(Origin::signed(1), 3)); + // account 4 cannot change the first registrar's identity since it's owned by 3. + assert_noop!( + Identity::set_account_id(Origin::signed(4), 0, 3), + Error::::InvalidIndex + ); + // account 3 can, because that's the registrar's current account. + assert_ok!(Identity::set_account_id(Origin::signed(3), 0, 4)); + // account 4 can now, because that's their new ID. + assert_ok!(Identity::set_account_id(Origin::signed(4), 0, 3)); + }); + } } diff --git a/frame/im-online/src/benchmarking.rs b/frame/im-online/src/benchmarking.rs index e0e74bccfa..4ca2c21cda 100644 --- a/frame/im-online/src/benchmarking.rs +++ b/frame/im-online/src/benchmarking.rs @@ -20,9 +20,9 @@ use super::*; -use frame_system::RawOrigin; use frame_benchmarking::benchmarks; -use sp_core::offchain::{OpaquePeerId, OpaqueMultiaddr}; +use frame_system::RawOrigin; +use sp_core::offchain::{OpaqueMultiaddr, OpaquePeerId}; use sp_runtime::traits::{ValidateUnsigned, Zero}; use sp_runtime::transaction_validity::TransactionSource; @@ -31,63 +31,72 @@ use crate::Module as ImOnline; const MAX_KEYS: u32 = 1000; const MAX_EXTERNAL_ADDRESSES: u32 = 100; -pub fn create_heartbeat(k: u32, e: u32) -> - Result<(crate::Heartbeat, ::Signature), &'static str> -{ - let mut keys = Vec::new(); - for _ in 0..k { - keys.push(T::AuthorityId::generate_pair(None)); - } - Keys::::put(keys.clone()); - - let network_state = OpaqueNetworkState { - peer_id: OpaquePeerId::default(), - external_addresses: vec![OpaqueMultiaddr::new(vec![0; 32]); e as usize], - }; - let input_heartbeat = Heartbeat { - block_number: T::BlockNumber::zero(), - network_state, - session_index: 0, - authority_index: k-1, - }; - - let encoded_heartbeat = input_heartbeat.encode(); - let authority_id = keys.get((k-1) as usize).ok_or("out of range")?; - let signature = authority_id.sign(&encoded_heartbeat).ok_or("couldn't make signature")?; - - Ok((input_heartbeat, signature)) +pub fn create_heartbeat( + k: u32, + e: u32, +) -> Result< + ( + crate::Heartbeat, + ::Signature, + ), + &'static str, +> { + let mut keys = Vec::new(); + for _ in 0..k { + keys.push(T::AuthorityId::generate_pair(None)); + } + Keys::::put(keys.clone()); + + let network_state = OpaqueNetworkState { + peer_id: OpaquePeerId::default(), + external_addresses: vec![OpaqueMultiaddr::new(vec![0; 32]); e as usize], + }; + let input_heartbeat = Heartbeat { + block_number: T::BlockNumber::zero(), + network_state, + session_index: 0, + authority_index: k - 1, + }; + + let encoded_heartbeat = input_heartbeat.encode(); + let authority_id = keys.get((k - 1) as usize).ok_or("out of range")?; + let signature = authority_id + .sign(&encoded_heartbeat) + .ok_or("couldn't make signature")?; + + Ok((input_heartbeat, signature)) } benchmarks! { - _{ } - - heartbeat { - let k in 1 .. MAX_KEYS; - let e in 1 .. MAX_EXTERNAL_ADDRESSES; - let (input_heartbeat, signature) = create_heartbeat::(k, e)?; - }: _(RawOrigin::None, input_heartbeat, signature) - - validate_unsigned { - let k in 1 .. MAX_KEYS; - let e in 1 .. MAX_EXTERNAL_ADDRESSES; - let (input_heartbeat, signature) = create_heartbeat::(k, e)?; - let call = Call::heartbeat(input_heartbeat, signature); - }: { - ImOnline::::validate_unsigned(TransactionSource::InBlock, &call)?; - } + _{ } + + heartbeat { + let k in 1 .. MAX_KEYS; + let e in 1 .. MAX_EXTERNAL_ADDRESSES; + let (input_heartbeat, signature) = create_heartbeat::(k, e)?; + }: _(RawOrigin::None, input_heartbeat, signature) + + validate_unsigned { + let k in 1 .. MAX_KEYS; + let e in 1 .. MAX_EXTERNAL_ADDRESSES; + let (input_heartbeat, signature) = create_heartbeat::(k, e)?; + let call = Call::heartbeat(input_heartbeat, signature); + }: { + ImOnline::::validate_unsigned(TransactionSource::InBlock, &call)?; + } } #[cfg(test)] mod tests { - use super::*; - use crate::mock::{new_test_ext, Runtime}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_heartbeat::()); - assert_ok!(test_benchmark_validate_unsigned::()); - }); - } + use super::*; + use crate::mock::{new_test_ext, Runtime}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_heartbeat::()); + assert_ok!(test_benchmark_validate_unsigned::()); + }); + } } diff --git a/frame/im-online/src/lib.rs b/frame/im-online/src/lib.rs index 1137fc2699..1f330a0098 100644 --- a/frame/im-online/src/lib.rs +++ b/frame/im-online/src/lib.rs @@ -69,71 +69,72 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] +mod benchmarking; mod mock; mod tests; -mod benchmarking; +use codec::{Decode, Encode}; +use frame_support::{ + debug, decl_error, decl_event, decl_module, decl_storage, + traits::Get, + weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, + Parameter, +}; +use frame_system::offchain::SubmitUnsignedTransaction; +use frame_system::{self as system, ensure_none}; +use pallet_session::historical::IdentificationTuple; use sp_application_crypto::RuntimeAppPublic; -use codec::{Encode, Decode}; use sp_core::offchain::OpaqueNetworkState; -use sp_std::prelude::*; -use sp_std::convert::TryInto; -use pallet_session::historical::IdentificationTuple; use sp_runtime::{ - offchain::storage::StorageValueRef, - RuntimeDebug, - traits::{Convert, Member, Saturating, AtLeast32Bit}, Perbill, - transaction_validity::{ - TransactionValidity, ValidTransaction, InvalidTransaction, TransactionSource, - TransactionPriority, - }, + offchain::storage::StorageValueRef, + traits::{AtLeast32Bit, Convert, Member, Saturating}, + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + ValidTransaction, + }, + Perbill, RuntimeDebug, }; use sp_staking::{ - SessionIndex, - offence::{ReportOffence, Offence, Kind}, + offence::{Kind, Offence, ReportOffence}, + SessionIndex, }; -use frame_support::{ - decl_module, decl_event, decl_storage, Parameter, debug, decl_error, - traits::Get, - weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, -}; -use frame_system::{self as system, ensure_none}; -use frame_system::offchain::SubmitUnsignedTransaction; +use sp_std::convert::TryInto; +use sp_std::prelude::*; pub mod sr25519 { - mod app_sr25519 { - use sp_application_crypto::{app_crypto, key_types::IM_ONLINE, sr25519}; - app_crypto!(sr25519, IM_ONLINE); - } + mod app_sr25519 { + use sp_application_crypto::{app_crypto, key_types::IM_ONLINE, sr25519}; + app_crypto!(sr25519, IM_ONLINE); + } - sp_application_crypto::with_pair! { - /// An i'm online keypair using sr25519 as its crypto. - pub type AuthorityPair = app_sr25519::Pair; - } + sp_application_crypto::with_pair! { + /// An i'm online keypair using sr25519 as its crypto. + pub type AuthorityPair = app_sr25519::Pair; + } - /// An i'm online signature using sr25519 as its crypto. - pub type AuthoritySignature = app_sr25519::Signature; + /// An i'm online signature using sr25519 as its crypto. + pub type AuthoritySignature = app_sr25519::Signature; - /// An i'm online identifier using sr25519 as its crypto. - pub type AuthorityId = app_sr25519::Public; + /// An i'm online identifier using sr25519 as its crypto. + pub type AuthorityId = app_sr25519::Public; } pub mod ed25519 { - mod app_ed25519 { - use sp_application_crypto::{app_crypto, key_types::IM_ONLINE, ed25519}; - app_crypto!(ed25519, IM_ONLINE); - } + mod app_ed25519 { + use sp_application_crypto::{app_crypto, ed25519, key_types::IM_ONLINE}; + app_crypto!(ed25519, IM_ONLINE); + } - sp_application_crypto::with_pair! { - /// An i'm online keypair using ed25519 as its crypto. - pub type AuthorityPair = app_ed25519::Pair; - } + sp_application_crypto::with_pair! { + /// An i'm online keypair using ed25519 as its crypto. + pub type AuthorityPair = app_ed25519::Pair; + } - /// An i'm online signature using ed25519 as its crypto. - pub type AuthoritySignature = app_ed25519::Signature; + /// An i'm online signature using ed25519 as its crypto. + pub type AuthoritySignature = app_ed25519::Signature; - /// An i'm online identifier using ed25519 as its crypto. - pub type AuthorityId = app_ed25519::Public; + /// An i'm online identifier using ed25519 as its crypto. + pub type AuthorityId = app_ed25519::Public; } const DB_PREFIX: &[u8] = b"parity/im-online-heartbeat/"; @@ -148,60 +149,67 @@ const INCLUDE_THRESHOLD: u32 = 3; /// Note we store such status for every `authority_index` separately. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] struct HeartbeatStatus { - /// An index of the session that we are supposed to send heartbeat for. - pub session_index: SessionIndex, - /// A block number at which the heartbeat for that session has been actually sent. - /// - /// It may be 0 in case the sending failed. In such case we should just retry - /// as soon as possible (i.e. in a worker running for the next block). - pub sent_at: BlockNumber, + /// An index of the session that we are supposed to send heartbeat for. + pub session_index: SessionIndex, + /// A block number at which the heartbeat for that session has been actually sent. + /// + /// It may be 0 in case the sending failed. In such case we should just retry + /// as soon as possible (i.e. in a worker running for the next block). + pub sent_at: BlockNumber, } impl HeartbeatStatus { - /// Returns true if heartbeat has been recently sent. - /// - /// Parameters: - /// `session_index` - index of current session. - /// `now` - block at which the offchain worker is running. - /// - /// This function will return `true` iff: - /// 1. the session index is the same (we don't care if it went up or down) - /// 2. the heartbeat has been sent recently (within the threshold) - /// - /// The reasoning for 1. is that it's better to send an extra heartbeat than - /// to stall or not send one in case of a bug. - fn is_recent(&self, session_index: SessionIndex, now: BlockNumber) -> bool { - self.session_index == session_index && self.sent_at + INCLUDE_THRESHOLD.into() > now - } + /// Returns true if heartbeat has been recently sent. + /// + /// Parameters: + /// `session_index` - index of current session. + /// `now` - block at which the offchain worker is running. + /// + /// This function will return `true` iff: + /// 1. the session index is the same (we don't care if it went up or down) + /// 2. the heartbeat has been sent recently (within the threshold) + /// + /// The reasoning for 1. is that it's better to send an extra heartbeat than + /// to stall or not send one in case of a bug. + fn is_recent(&self, session_index: SessionIndex, now: BlockNumber) -> bool { + self.session_index == session_index && self.sent_at + INCLUDE_THRESHOLD.into() > now + } } /// Error which may occur while executing the off-chain code. #[cfg_attr(test, derive(PartialEq))] enum OffchainErr { - TooEarly(BlockNumber), - WaitingForInclusion(BlockNumber), - AlreadyOnline(u32), - FailedSigning, - FailedToAcquireLock, - NetworkState, - SubmitTransaction, + TooEarly(BlockNumber), + WaitingForInclusion(BlockNumber), + AlreadyOnline(u32), + FailedSigning, + FailedToAcquireLock, + NetworkState, + SubmitTransaction, } impl sp_std::fmt::Debug for OffchainErr { - fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - match *self { - OffchainErr::TooEarly(ref block) => - write!(fmt, "Too early to send heartbeat, next expected at {:?}", block), - OffchainErr::WaitingForInclusion(ref block) => - write!(fmt, "Heartbeat already sent at {:?}. Waiting for inclusion.", block), - OffchainErr::AlreadyOnline(auth_idx) => - write!(fmt, "Authority {} is already online", auth_idx), - OffchainErr::FailedSigning => write!(fmt, "Failed to sign heartbeat"), - OffchainErr::FailedToAcquireLock => write!(fmt, "Failed to acquire lock"), - OffchainErr::NetworkState => write!(fmt, "Failed to fetch network state"), - OffchainErr::SubmitTransaction => write!(fmt, "Failed to submit transaction"), - } - } + fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + match *self { + OffchainErr::TooEarly(ref block) => write!( + fmt, + "Too early to send heartbeat, next expected at {:?}", + block + ), + OffchainErr::WaitingForInclusion(ref block) => write!( + fmt, + "Heartbeat already sent at {:?}. Waiting for inclusion.", + block + ), + OffchainErr::AlreadyOnline(auth_idx) => { + write!(fmt, "Authority {} is already online", auth_idx) + } + OffchainErr::FailedSigning => write!(fmt, "Failed to sign heartbeat"), + OffchainErr::FailedToAcquireLock => write!(fmt, "Failed to acquire lock"), + OffchainErr::NetworkState => write!(fmt, "Failed to fetch network state"), + OffchainErr::SubmitTransaction => write!(fmt, "Failed to submit transaction"), + } + } } pub type AuthIndex = u32; @@ -209,52 +217,52 @@ pub type AuthIndex = u32; /// Heartbeat which is sent/received. #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Heartbeat - where BlockNumber: PartialEq + Eq + Decode + Encode, +where + BlockNumber: PartialEq + Eq + Decode + Encode, { - /// Block number at the time heartbeat is created.. - pub block_number: BlockNumber, - /// A state of local network (peer id and external addresses) - pub network_state: OpaqueNetworkState, - /// Index of the current session. - pub session_index: SessionIndex, - /// An index of the authority on the list of validators. - pub authority_index: AuthIndex, + /// Block number at the time heartbeat is created.. + pub block_number: BlockNumber, + /// A state of local network (peer id and external addresses) + pub network_state: OpaqueNetworkState, + /// Index of the current session. + pub session_index: SessionIndex, + /// An index of the authority on the list of validators. + pub authority_index: AuthIndex, } pub trait Trait: frame_system::Trait + pallet_session::historical::Trait { - /// The identifier type for an authority. - type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + Ord; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// A dispatchable call type. - type Call: From>; - - /// A transaction submitter. - type SubmitTransaction: SubmitUnsignedTransaction::Call>; - - /// An expected duration of the session. - /// - /// This parameter is used to determine the longevity of `heartbeat` transaction - /// and a rough time when we should start considering sending heartbeats, - /// since the workers avoids sending them at the very beginning of the session, assuming - /// there is a chance the authority will produce a block and they won't be necessary. - type SessionDuration: Get; - - /// A type that gives us the ability to submit unresponsiveness offence reports. - type ReportUnresponsiveness: - ReportOffence< - Self::AccountId, - IdentificationTuple, - UnresponsivenessOffence>, - >; - - /// A configuration for base priority of unsigned transactions. - /// - /// This is exposed so that it can be tuned for particular runtime, when - /// multiple pallets send unsigned transactions. - type UnsignedPriority: Get; + /// The identifier type for an authority. + type AuthorityId: Member + Parameter + RuntimeAppPublic + Default + Ord; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// A dispatchable call type. + type Call: From>; + + /// A transaction submitter. + type SubmitTransaction: SubmitUnsignedTransaction::Call>; + + /// An expected duration of the session. + /// + /// This parameter is used to determine the longevity of `heartbeat` transaction + /// and a rough time when we should start considering sending heartbeats, + /// since the workers avoids sending them at the very beginning of the session, assuming + /// there is a chance the authority will produce a block and they won't be necessary. + type SessionDuration: Get; + + /// A type that gives us the ability to submit unresponsiveness offence reports. + type ReportUnresponsiveness: ReportOffence< + Self::AccountId, + IdentificationTuple, + UnresponsivenessOffence>, + >; + + /// A configuration for base priority of unsigned transactions. + /// + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + type UnsignedPriority: Get; } decl_event!( @@ -272,458 +280,458 @@ decl_event!( ); decl_storage! { - trait Store for Module as ImOnline { - /// The block number after which it's ok to send heartbeats in current session. - /// - /// At the beginning of each session we set this to a value that should - /// fall roughly in the middle of the session duration. - /// The idea is to first wait for the validators to produce a block - /// in the current session, so that the heartbeat later on will not be necessary. - HeartbeatAfter get(fn heartbeat_after): T::BlockNumber; - - /// The current set of keys that may issue a heartbeat. - Keys get(fn keys): Vec; - - /// For each session index, we keep a mapping of `AuthIndex` to - /// `offchain::OpaqueNetworkState`. - ReceivedHeartbeats get(fn received_heartbeats): - double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) AuthIndex - => Option>; - - /// For each session index, we keep a mapping of `T::ValidatorId` to the - /// number of blocks authored by the given authority. - AuthoredBlocks get(fn authored_blocks): - double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) T::ValidatorId - => u32; - } - add_extra_genesis { - config(keys): Vec; - build(|config| Module::::initialize_keys(&config.keys)) - } + trait Store for Module as ImOnline { + /// The block number after which it's ok to send heartbeats in current session. + /// + /// At the beginning of each session we set this to a value that should + /// fall roughly in the middle of the session duration. + /// The idea is to first wait for the validators to produce a block + /// in the current session, so that the heartbeat later on will not be necessary. + HeartbeatAfter get(fn heartbeat_after): T::BlockNumber; + + /// The current set of keys that may issue a heartbeat. + Keys get(fn keys): Vec; + + /// For each session index, we keep a mapping of `AuthIndex` to + /// `offchain::OpaqueNetworkState`. + ReceivedHeartbeats get(fn received_heartbeats): + double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) AuthIndex + => Option>; + + /// For each session index, we keep a mapping of `T::ValidatorId` to the + /// number of blocks authored by the given authority. + AuthoredBlocks get(fn authored_blocks): + double_map hasher(twox_64_concat) SessionIndex, hasher(twox_64_concat) T::ValidatorId + => u32; + } + add_extra_genesis { + config(keys): Vec; + build(|config| Module::::initialize_keys(&config.keys)) + } } decl_error! { - /// Error for the im-online module. - pub enum Error for Module { - /// Non existent public key. - InvalidKey, - /// Duplicated heartbeat. - DuplicatedHeartbeat, - } + /// Error for the im-online module. + pub enum Error for Module { + /// Non existent public key. + InvalidKey, + /// Duplicated heartbeat. + DuplicatedHeartbeat, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn heartbeat( - origin, - heartbeat: Heartbeat, - // since signature verification is done in `validate_unsigned` - // we can skip doing it here again. - _signature: ::Signature - ) { - ensure_none(origin)?; - - let current_session = >::current_index(); - let exists = ::contains_key( - ¤t_session, - &heartbeat.authority_index - ); - let keys = Keys::::get(); - let public = keys.get(heartbeat.authority_index as usize); - if let (false, Some(public)) = (exists, public) { - Self::deposit_event(Event::::HeartbeatReceived(public.clone())); - - let network_state = heartbeat.network_state.encode(); - ::insert( - ¤t_session, - &heartbeat.authority_index, - &network_state - ); - } else if exists { - Err(Error::::DuplicatedHeartbeat)? - } else { - Err(Error::::InvalidKey)? - } - } - - // Runs after every block. - fn offchain_worker(now: T::BlockNumber) { - // Only send messages if we are a potential validator. - if sp_io::offchain::is_validator() { - for res in Self::send_heartbeats(now).into_iter().flatten() { - if let Err(e) = res { - debug::debug!( - target: "imonline", - "Skipping heartbeat at {:?}: {:?}", - now, - e, - ) - } - } - } else { - debug::trace!( - target: "imonline", - "Skipping heartbeat at {:?}. Not a validator.", - now, - ) - } - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn heartbeat( + origin, + heartbeat: Heartbeat, + // since signature verification is done in `validate_unsigned` + // we can skip doing it here again. + _signature: ::Signature + ) { + ensure_none(origin)?; + + let current_session = >::current_index(); + let exists = ::contains_key( + ¤t_session, + &heartbeat.authority_index + ); + let keys = Keys::::get(); + let public = keys.get(heartbeat.authority_index as usize); + if let (false, Some(public)) = (exists, public) { + Self::deposit_event(Event::::HeartbeatReceived(public.clone())); + + let network_state = heartbeat.network_state.encode(); + ::insert( + ¤t_session, + &heartbeat.authority_index, + &network_state + ); + } else if exists { + Err(Error::::DuplicatedHeartbeat)? + } else { + Err(Error::::InvalidKey)? + } + } + + // Runs after every block. + fn offchain_worker(now: T::BlockNumber) { + // Only send messages if we are a potential validator. + if sp_io::offchain::is_validator() { + for res in Self::send_heartbeats(now).into_iter().flatten() { + if let Err(e) = res { + debug::debug!( + target: "imonline", + "Skipping heartbeat at {:?}: {:?}", + now, + e, + ) + } + } + } else { + debug::trace!( + target: "imonline", + "Skipping heartbeat at {:?}. Not a validator.", + now, + ) + } + } + } } type OffchainResult = Result::BlockNumber>>; /// Keep track of number of authored blocks per authority, uncles are counted as /// well since they're a valid proof of being online. -impl pallet_authorship::EventHandler for Module { - fn note_author(author: T::ValidatorId) { - Self::note_authorship(author); - } +impl + pallet_authorship::EventHandler for Module +{ + fn note_author(author: T::ValidatorId) { + Self::note_authorship(author); + } - fn note_uncle(author: T::ValidatorId, _age: T::BlockNumber) { - Self::note_authorship(author); - } + fn note_uncle(author: T::ValidatorId, _age: T::BlockNumber) { + Self::note_authorship(author); + } } impl Module { - /// Returns `true` if a heartbeat has been received for the authority at - /// `authority_index` in the authorities series or if the authority has - /// authored at least one block, during the current session. Otherwise - /// `false`. - pub fn is_online(authority_index: AuthIndex) -> bool { - let current_validators = >::validators(); - - if authority_index >= current_validators.len() as u32 { - return false; - } - - let authority = ¤t_validators[authority_index as usize]; - - Self::is_online_aux(authority_index, authority) - } - - fn is_online_aux(authority_index: AuthIndex, authority: &T::ValidatorId) -> bool { - let current_session = >::current_index(); - - ::contains_key(¤t_session, &authority_index) || - >::get( - ¤t_session, - authority, - ) != 0 - } - - /// Returns `true` if a heartbeat has been received for the authority at `authority_index` in - /// the authorities series, during the current session. Otherwise `false`. - pub fn received_heartbeat_in_current_session(authority_index: AuthIndex) -> bool { - let current_session = >::current_index(); - ::contains_key(¤t_session, &authority_index) - } - - /// Note that the given authority has authored a block in the current session. - fn note_authorship(author: T::ValidatorId) { - let current_session = >::current_index(); - - >::mutate( - ¤t_session, - author, - |authored| *authored += 1, - ); - } - - pub(crate) fn send_heartbeats(block_number: T::BlockNumber) - -> OffchainResult>> - { - let heartbeat_after = >::get(); - if block_number < heartbeat_after { - return Err(OffchainErr::TooEarly(heartbeat_after)) - } - - let session_index = >::current_index(); - Ok(Self::local_authority_keys() - .map(move |(authority_index, key)| - Self::send_single_heartbeat(authority_index, key, session_index, block_number) - )) - } - - - fn send_single_heartbeat( - authority_index: u32, - key: T::AuthorityId, - session_index: SessionIndex, - block_number: T::BlockNumber - ) -> OffchainResult { - // A helper function to prepare heartbeat call. - let prepare_heartbeat = || -> OffchainResult> { - let network_state = sp_io::offchain::network_state() - .map_err(|_| OffchainErr::NetworkState)?; - let heartbeat_data = Heartbeat { - block_number, - network_state, - session_index, - authority_index, - }; - let signature = key.sign(&heartbeat_data.encode()).ok_or(OffchainErr::FailedSigning)?; - Ok(Call::heartbeat(heartbeat_data, signature)) - }; - - if Self::is_online(authority_index) { - return Err(OffchainErr::AlreadyOnline(authority_index)); - } - - // acquire lock for that authority at current heartbeat to make sure we don't - // send concurrent heartbeats. - Self::with_heartbeat_lock( - authority_index, - session_index, - block_number, - || { - let call = prepare_heartbeat()?; - debug::info!( - target: "imonline", - "[index: {:?}] Reporting im-online at block: {:?} (session: {:?}): {:?}", - authority_index, - block_number, - session_index, - call, - ); - - T::SubmitTransaction::submit_unsigned(call) - .map_err(|_| OffchainErr::SubmitTransaction)?; - - Ok(()) - }, - ) - } - - fn local_authority_keys() -> impl Iterator { - // we run only when a local authority key is configured - let authorities = Keys::::get(); - let mut local_keys = T::AuthorityId::all(); - local_keys.sort(); - - authorities.into_iter() - .enumerate() - .filter_map(move |(index, authority)| { - local_keys.binary_search(&authority) - .ok() - .map(|location| (index as u32, local_keys[location].clone())) - }) - } - - fn with_heartbeat_lock( - authority_index: u32, - session_index: SessionIndex, - now: T::BlockNumber, - f: impl FnOnce() -> OffchainResult, - ) -> OffchainResult { - let key = { - let mut key = DB_PREFIX.to_vec(); - key.extend(authority_index.encode()); - key - }; - let storage = StorageValueRef::persistent(&key); - let res = storage.mutate(|status: Option>>| { - // Check if there is already a lock for that particular block. - // This means that the heartbeat has already been sent, and we are just waiting - // for it to be included. However if it doesn't get included for INCLUDE_THRESHOLD - // we will re-send it. - match status { - // we are still waiting for inclusion. - Some(Some(status)) if status.is_recent(session_index, now) => { - Err(OffchainErr::WaitingForInclusion(status.sent_at)) - }, - // attempt to set new status - _ => Ok(HeartbeatStatus { - session_index, - sent_at: now, - }), - } - })?; - - let mut new_status = res.map_err(|_| OffchainErr::FailedToAcquireLock)?; - - // we got the lock, let's try to send the heartbeat. - let res = f(); - - // clear the lock in case we have failed to send transaction. - if res.is_err() { - new_status.sent_at = 0.into(); - storage.set(&new_status); - } - - res - } - - fn initialize_keys(keys: &[T::AuthorityId]) { - if !keys.is_empty() { - assert!(Keys::::get().is_empty(), "Keys are already initialized!"); - Keys::::put(keys); - } - } + /// Returns `true` if a heartbeat has been received for the authority at + /// `authority_index` in the authorities series or if the authority has + /// authored at least one block, during the current session. Otherwise + /// `false`. + pub fn is_online(authority_index: AuthIndex) -> bool { + let current_validators = >::validators(); + + if authority_index >= current_validators.len() as u32 { + return false; + } + + let authority = ¤t_validators[authority_index as usize]; + + Self::is_online_aux(authority_index, authority) + } + + fn is_online_aux(authority_index: AuthIndex, authority: &T::ValidatorId) -> bool { + let current_session = >::current_index(); + + ::contains_key(¤t_session, &authority_index) + || >::get(¤t_session, authority) != 0 + } + + /// Returns `true` if a heartbeat has been received for the authority at `authority_index` in + /// the authorities series, during the current session. Otherwise `false`. + pub fn received_heartbeat_in_current_session(authority_index: AuthIndex) -> bool { + let current_session = >::current_index(); + ::contains_key(¤t_session, &authority_index) + } + + /// Note that the given authority has authored a block in the current session. + fn note_authorship(author: T::ValidatorId) { + let current_session = >::current_index(); + + >::mutate(¤t_session, author, |authored| *authored += 1); + } + + pub(crate) fn send_heartbeats( + block_number: T::BlockNumber, + ) -> OffchainResult>> { + let heartbeat_after = >::get(); + if block_number < heartbeat_after { + return Err(OffchainErr::TooEarly(heartbeat_after)); + } + + let session_index = >::current_index(); + Ok( + Self::local_authority_keys().map(move |(authority_index, key)| { + Self::send_single_heartbeat(authority_index, key, session_index, block_number) + }), + ) + } + + fn send_single_heartbeat( + authority_index: u32, + key: T::AuthorityId, + session_index: SessionIndex, + block_number: T::BlockNumber, + ) -> OffchainResult { + // A helper function to prepare heartbeat call. + let prepare_heartbeat = || -> OffchainResult> { + let network_state = + sp_io::offchain::network_state().map_err(|_| OffchainErr::NetworkState)?; + let heartbeat_data = Heartbeat { + block_number, + network_state, + session_index, + authority_index, + }; + let signature = key + .sign(&heartbeat_data.encode()) + .ok_or(OffchainErr::FailedSigning)?; + Ok(Call::heartbeat(heartbeat_data, signature)) + }; + + if Self::is_online(authority_index) { + return Err(OffchainErr::AlreadyOnline(authority_index)); + } + + // acquire lock for that authority at current heartbeat to make sure we don't + // send concurrent heartbeats. + Self::with_heartbeat_lock(authority_index, session_index, block_number, || { + let call = prepare_heartbeat()?; + debug::info!( + target: "imonline", + "[index: {:?}] Reporting im-online at block: {:?} (session: {:?}): {:?}", + authority_index, + block_number, + session_index, + call, + ); + + T::SubmitTransaction::submit_unsigned(call) + .map_err(|_| OffchainErr::SubmitTransaction)?; + + Ok(()) + }) + } + + fn local_authority_keys() -> impl Iterator { + // we run only when a local authority key is configured + let authorities = Keys::::get(); + let mut local_keys = T::AuthorityId::all(); + local_keys.sort(); + + authorities + .into_iter() + .enumerate() + .filter_map(move |(index, authority)| { + local_keys + .binary_search(&authority) + .ok() + .map(|location| (index as u32, local_keys[location].clone())) + }) + } + + fn with_heartbeat_lock( + authority_index: u32, + session_index: SessionIndex, + now: T::BlockNumber, + f: impl FnOnce() -> OffchainResult, + ) -> OffchainResult { + let key = { + let mut key = DB_PREFIX.to_vec(); + key.extend(authority_index.encode()); + key + }; + let storage = StorageValueRef::persistent(&key); + let res = storage.mutate(|status: Option>>| { + // Check if there is already a lock for that particular block. + // This means that the heartbeat has already been sent, and we are just waiting + // for it to be included. However if it doesn't get included for INCLUDE_THRESHOLD + // we will re-send it. + match status { + // we are still waiting for inclusion. + Some(Some(status)) if status.is_recent(session_index, now) => { + Err(OffchainErr::WaitingForInclusion(status.sent_at)) + } + // attempt to set new status + _ => Ok(HeartbeatStatus { + session_index, + sent_at: now, + }), + } + })?; + + let mut new_status = res.map_err(|_| OffchainErr::FailedToAcquireLock)?; + + // we got the lock, let's try to send the heartbeat. + let res = f(); + + // clear the lock in case we have failed to send transaction. + if res.is_err() { + new_status.sent_at = 0.into(); + storage.set(&new_status); + } + + res + } + + fn initialize_keys(keys: &[T::AuthorityId]) { + if !keys.is_empty() { + assert!(Keys::::get().is_empty(), "Keys are already initialized!"); + Keys::::put(keys); + } + } } impl sp_runtime::BoundToRuntimeAppPublic for Module { - type Public = T::AuthorityId; + type Public = T::AuthorityId; } impl pallet_session::OneSessionHandler for Module { - type Key = T::AuthorityId; - - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator - { - let keys = validators.map(|x| x.1).collect::>(); - Self::initialize_keys(&keys); - } - - fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, _queued_validators: I) - where I: Iterator - { - // Tell the offchain worker to start making the next session's heartbeats. - // Since we consider producing blocks as being online, - // the heartbeat is deferred a bit to prevent spamming. - let block_number = >::block_number(); - let half_session = T::SessionDuration::get() / 2.into(); - >::put(block_number + half_session); - - // Remember who the authorities are for the new session. - Keys::::put(validators.map(|x| x.1).collect::>()); - } - - fn on_before_session_ending() { - let session_index = >::current_index(); - let keys = Keys::::get(); - let current_validators = >::validators(); - - let offenders = current_validators.into_iter().enumerate() - .filter(|(index, id)| - !Self::is_online_aux(*index as u32, id) - ).filter_map(|(_, id)| - T::FullIdentificationOf::convert(id.clone()).map(|full_id| (id, full_id)) - ).collect::>>(); - - // Remove all received heartbeats and number of authored blocks from the - // current session, they have already been processed and won't be needed - // anymore. - ::remove_prefix(&>::current_index()); - >::remove_prefix(&>::current_index()); - - if offenders.is_empty() { - Self::deposit_event(RawEvent::AllGood); - } else { - Self::deposit_event(RawEvent::SomeOffline(offenders.clone())); - - let validator_set_count = keys.len() as u32; - let offence = UnresponsivenessOffence { session_index, validator_set_count, offenders }; - if let Err(e) = T::ReportUnresponsiveness::report_offence(vec![], offence) { - sp_runtime::print(e); - } - } - } - - fn on_disabled(_i: usize) { - // ignore - } + type Key = T::AuthorityId; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + { + let keys = validators.map(|x| x.1).collect::>(); + Self::initialize_keys(&keys); + } + + fn on_new_session<'a, I: 'a>(_changed: bool, validators: I, _queued_validators: I) + where + I: Iterator, + { + // Tell the offchain worker to start making the next session's heartbeats. + // Since we consider producing blocks as being online, + // the heartbeat is deferred a bit to prevent spamming. + let block_number = >::block_number(); + let half_session = T::SessionDuration::get() / 2.into(); + >::put(block_number + half_session); + + // Remember who the authorities are for the new session. + Keys::::put(validators.map(|x| x.1).collect::>()); + } + + fn on_before_session_ending() { + let session_index = >::current_index(); + let keys = Keys::::get(); + let current_validators = >::validators(); + + let offenders = current_validators + .into_iter() + .enumerate() + .filter(|(index, id)| !Self::is_online_aux(*index as u32, id)) + .filter_map(|(_, id)| { + T::FullIdentificationOf::convert(id.clone()).map(|full_id| (id, full_id)) + }) + .collect::>>(); + + // Remove all received heartbeats and number of authored blocks from the + // current session, they have already been processed and won't be needed + // anymore. + ::remove_prefix(&>::current_index()); + >::remove_prefix(&>::current_index()); + + if offenders.is_empty() { + Self::deposit_event(RawEvent::AllGood); + } else { + Self::deposit_event(RawEvent::SomeOffline(offenders.clone())); + + let validator_set_count = keys.len() as u32; + let offence = UnresponsivenessOffence { + session_index, + validator_set_count, + offenders, + }; + if let Err(e) = T::ReportUnresponsiveness::report_offence(vec![], offence) { + sp_runtime::print(e); + } + } + } + + fn on_disabled(_i: usize) { + // ignore + } } impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - - fn validate_unsigned( - _source: TransactionSource, - call: &Self::Call, - ) -> TransactionValidity { - if let Call::heartbeat(heartbeat, signature) = call { - if >::is_online(heartbeat.authority_index) { - // we already received a heartbeat for this authority - return InvalidTransaction::Stale.into(); - } - - // check if session index from heartbeat is recent - let current_session = >::current_index(); - if heartbeat.session_index != current_session { - return InvalidTransaction::Stale.into(); - } - - // verify that the incoming (unverified) pubkey is actually an authority id - let keys = Keys::::get(); - let authority_id = match keys.get(heartbeat.authority_index as usize) { - Some(id) => id, - None => return InvalidTransaction::BadProof.into(), - }; - - // check signature (this is expensive so we do it last). - let signature_valid = heartbeat.using_encoded(|encoded_heartbeat| { - authority_id.verify(&encoded_heartbeat, &signature) - }); - - if !signature_valid { - return InvalidTransaction::BadProof.into(); - } - - ValidTransaction::with_tag_prefix("ImOnline") - .priority(T::UnsignedPriority::get()) - .and_provides((current_session, authority_id)) - .longevity(TryInto::::try_into( - T::SessionDuration::get() / 2.into() - ).unwrap_or(64_u64)) - .propagate(true) - .build() - } else { - InvalidTransaction::Call.into() - } - } + type Call = Call; + + fn validate_unsigned(_source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::heartbeat(heartbeat, signature) = call { + if >::is_online(heartbeat.authority_index) { + // we already received a heartbeat for this authority + return InvalidTransaction::Stale.into(); + } + + // check if session index from heartbeat is recent + let current_session = >::current_index(); + if heartbeat.session_index != current_session { + return InvalidTransaction::Stale.into(); + } + + // verify that the incoming (unverified) pubkey is actually an authority id + let keys = Keys::::get(); + let authority_id = match keys.get(heartbeat.authority_index as usize) { + Some(id) => id, + None => return InvalidTransaction::BadProof.into(), + }; + + // check signature (this is expensive so we do it last). + let signature_valid = heartbeat.using_encoded(|encoded_heartbeat| { + authority_id.verify(&encoded_heartbeat, &signature) + }); + + if !signature_valid { + return InvalidTransaction::BadProof.into(); + } + + ValidTransaction::with_tag_prefix("ImOnline") + .priority(T::UnsignedPriority::get()) + .and_provides((current_session, authority_id)) + .longevity( + TryInto::::try_into(T::SessionDuration::get() / 2.into()) + .unwrap_or(64_u64), + ) + .propagate(true) + .build() + } else { + InvalidTransaction::Call.into() + } + } } /// An offence that is filed if a validator didn't send a heartbeat message. #[derive(RuntimeDebug)] #[cfg_attr(feature = "std", derive(Clone, PartialEq, Eq))] pub struct UnresponsivenessOffence { - /// The current session index in which we report the unresponsive validators. - /// - /// It acts as a time measure for unresponsiveness reports and effectively will always point - /// at the end of the session. - pub session_index: SessionIndex, - /// The size of the validator set in current session/era. - pub validator_set_count: u32, - /// Authorities that were unresponsive during the current era. - pub offenders: Vec, + /// The current session index in which we report the unresponsive validators. + /// + /// It acts as a time measure for unresponsiveness reports and effectively will always point + /// at the end of the session. + pub session_index: SessionIndex, + /// The size of the validator set in current session/era. + pub validator_set_count: u32, + /// Authorities that were unresponsive during the current era. + pub offenders: Vec, } impl Offence for UnresponsivenessOffence { - const ID: Kind = *b"im-online:offlin"; - type TimeSlot = SessionIndex; - - fn offenders(&self) -> Vec { - self.offenders.clone() - } - - fn session_index(&self) -> SessionIndex { - self.session_index - } - - fn validator_set_count(&self) -> u32 { - self.validator_set_count - } - - fn time_slot(&self) -> Self::TimeSlot { - self.session_index - } - - fn slash_fraction(offenders: u32, validator_set_count: u32) -> Perbill { - // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 - // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% - // when 13/30 are offline (around 5% when 1/3 are offline). - if let Some(threshold) = offenders.checked_sub(validator_set_count / 10 + 1) { - let x = Perbill::from_rational_approximation(3 * threshold, validator_set_count); - x.saturating_mul(Perbill::from_percent(7)) - } else { - Perbill::default() - } - } + const ID: Kind = *b"im-online:offlin"; + type TimeSlot = SessionIndex; + + fn offenders(&self) -> Vec { + self.offenders.clone() + } + + fn session_index(&self) -> SessionIndex { + self.session_index + } + + fn validator_set_count(&self) -> u32 { + self.validator_set_count + } + + fn time_slot(&self) -> Self::TimeSlot { + self.session_index + } + + fn slash_fraction(offenders: u32, validator_set_count: u32) -> Perbill { + // the formula is min((3 * (k - (n / 10 + 1))) / n, 1) * 0.07 + // basically, 10% can be offline with no slash, but after that, it linearly climbs up to 7% + // when 13/30 are offline (around 5% when 1/3 are offline). + if let Some(threshold) = offenders.checked_sub(validator_set_count / 10 + 1) { + let x = Perbill::from_rational_approximation(3 * threshold, validator_set_count); + x.saturating_mul(Perbill::from_percent(7)) + } else { + Perbill::default() + } + } } diff --git a/frame/im-online/src/mock.rs b/frame/im-online/src/mock.rs index d620bb51b7..4f29651f38 100644 --- a/frame/im-online/src/mock.rs +++ b/frame/im-online/src/mock.rs @@ -21,49 +21,50 @@ use std::cell::RefCell; use crate::{Module, Trait}; -use sp_runtime::Perbill; -use sp_staking::{SessionIndex, offence::{ReportOffence, OffenceError}}; -use sp_runtime::testing::{Header, UintAuthorityId, TestXt}; -use sp_runtime::traits::{IdentityLookup, BlakeTwo256, ConvertInto}; +use frame_support::{impl_outer_dispatch, impl_outer_origin, parameter_types, weights::Weight}; use sp_core::H256; -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types, weights::Weight}; +use sp_runtime::testing::{Header, TestXt, UintAuthorityId}; +use sp_runtime::traits::{BlakeTwo256, ConvertInto, IdentityLookup}; +use sp_runtime::Perbill; +use sp_staking::{ + offence::{OffenceError, ReportOffence}, + SessionIndex, +}; use frame_system as system; -impl_outer_origin!{ - pub enum Origin for Runtime {} +impl_outer_origin! { + pub enum Origin for Runtime {} } impl_outer_dispatch! { - pub enum Call for Runtime where origin: Origin { - imonline::ImOnline, - } + pub enum Call for Runtime where origin: Origin { + imonline::ImOnline, + } } thread_local! { - pub static VALIDATORS: RefCell>> = RefCell::new(Some(vec![1, 2, 3])); + pub static VALIDATORS: RefCell>> = RefCell::new(Some(vec![1, 2, 3])); } pub struct TestSessionManager; impl pallet_session::SessionManager for TestSessionManager { - fn new_session(_new_index: SessionIndex) -> Option> { - VALIDATORS.with(|l| l.borrow_mut().take()) - } - fn end_session(_: SessionIndex) {} - fn start_session(_: SessionIndex) {} + fn new_session(_new_index: SessionIndex) -> Option> { + VALIDATORS.with(|l| l.borrow_mut().take()) + } + fn end_session(_: SessionIndex) {} + fn start_session(_: SessionIndex) {} } impl pallet_session::historical::SessionManager for TestSessionManager { - fn new_session(_new_index: SessionIndex) -> Option> { - VALIDATORS.with(|l| l - .borrow_mut() - .take() - .map(|validators| { - validators.iter().map(|v| (*v, *v)).collect() - }) - ) - } - fn end_session(_: SessionIndex) {} - fn start_session(_: SessionIndex) {} + fn new_session(_new_index: SessionIndex) -> Option> { + VALIDATORS.with(|l| { + l.borrow_mut() + .take() + .map(|validators| validators.iter().map(|v| (*v, *v)).collect()) + }) + } + fn end_session(_: SessionIndex) {} + fn start_session(_: SessionIndex) {} } /// An extrinsic type used for tests. @@ -73,106 +74,108 @@ type IdentificationTuple = (u64, u64); type Offence = crate::UnresponsivenessOffence; thread_local! { - pub static OFFENCES: RefCell, Offence)>> = RefCell::new(vec![]); + pub static OFFENCES: RefCell, Offence)>> = RefCell::new(vec![]); } /// A mock offence report handler. pub struct OffenceHandler; impl ReportOffence for OffenceHandler { - fn report_offence(reporters: Vec, offence: Offence) -> Result<(), OffenceError> { - OFFENCES.with(|l| l.borrow_mut().push((reporters, offence))); - Ok(()) - } + fn report_offence(reporters: Vec, offence: Offence) -> Result<(), OffenceError> { + OFFENCES.with(|l| l.borrow_mut().push((reporters, offence))); + Ok(()) + } } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - t.into() + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + t.into() } - #[derive(Clone, PartialEq, Eq, Debug)] pub struct Runtime; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Runtime { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = Call; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - pub const Period: u64 = 1; - pub const Offset: u64 = 0; + pub const Period: u64 = 1; + pub const Offset: u64 = 0; } parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } impl pallet_session::Trait for Runtime { - type ShouldEndSession = pallet_session::PeriodicSessions; - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - type SessionHandler = (ImOnline, ); - type ValidatorId = u64; - type ValidatorIdOf = ConvertInto; - type Keys = UintAuthorityId; - type Event = (); - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type NextSessionRotation = pallet_session::PeriodicSessions; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionManager = + pallet_session::historical::NoteHistoricalRoot; + type SessionHandler = (ImOnline,); + type ValidatorId = u64; + type ValidatorIdOf = ConvertInto; + type Keys = UintAuthorityId; + type Event = (); + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = pallet_session::PeriodicSessions; } impl pallet_session::historical::Trait for Runtime { - type FullIdentification = u64; - type FullIdentificationOf = ConvertInto; + type FullIdentification = u64; + type FullIdentificationOf = ConvertInto; } parameter_types! { - pub const UncleGenerations: u32 = 5; + pub const UncleGenerations: u32 = 5; } impl pallet_authorship::Trait for Runtime { - type FindAuthor = (); - type UncleGenerations = UncleGenerations; - type FilterUncle = (); - type EventHandler = ImOnline; + type FindAuthor = (); + type UncleGenerations = UncleGenerations; + type FilterUncle = (); + type EventHandler = ImOnline; } parameter_types! { - pub const UnsignedPriority: u64 = 1 << 20; + pub const UnsignedPriority: u64 = 1 << 20; } impl Trait for Runtime { - type AuthorityId = UintAuthorityId; - type Event = (); - type Call = Call; - type SubmitTransaction = SubmitTransaction; - type ReportUnresponsiveness = OffenceHandler; - type SessionDuration = Period; - type UnsignedPriority = UnsignedPriority; + type AuthorityId = UintAuthorityId; + type Event = (); + type Call = Call; + type SubmitTransaction = SubmitTransaction; + type ReportUnresponsiveness = OffenceHandler; + type SessionDuration = Period; + type UnsignedPriority = UnsignedPriority; } /// Im Online module. @@ -181,8 +184,8 @@ pub type System = frame_system::Module; pub type Session = pallet_session::Module; pub fn advance_session() { - let now = System::block_number().max(1); - System::set_block_number(now + 1); - Session::rotate_session(); - assert_eq!(Session::current_index(), (now / Period::get()) as u32); + let now = System::block_number().max(1); + System::set_block_number(now + 1); + Session::rotate_session(); + assert_eq!(Session::current_index(), (now / Period::get()) as u32); } diff --git a/frame/im-online/src/tests.rs b/frame/im-online/src/tests.rs index c7bf2afcca..0981972cbc 100644 --- a/frame/im-online/src/tests.rs +++ b/frame/im-online/src/tests.rs @@ -20,325 +20,339 @@ use super::*; use crate::mock::*; +use frame_support::{assert_noop, dispatch}; use sp_core::offchain::{ - OpaquePeerId, - OffchainExt, - TransactionPoolExt, - testing::{TestOffchainExt, TestTransactionPoolExt}, + testing::{TestOffchainExt, TestTransactionPoolExt}, + OffchainExt, OpaquePeerId, TransactionPoolExt, }; -use frame_support::{dispatch, assert_noop}; use sp_runtime::testing::UintAuthorityId; #[test] fn test_unresponsiveness_slash_fraction() { - // A single case of unresponsiveness is not slashed. - assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(1, 50), - Perbill::zero(), - ); - - assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(5, 50), - Perbill::zero(), // 0% - ); - - assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(7, 50), - Perbill::from_parts(4200000), // 0.42% - ); - - // One third offline should be punished around 5%. - assert_eq!( - UnresponsivenessOffence::<()>::slash_fraction(17, 50), - Perbill::from_parts(46200000), // 4.62% - ); + // A single case of unresponsiveness is not slashed. + assert_eq!( + UnresponsivenessOffence::<()>::slash_fraction(1, 50), + Perbill::zero(), + ); + + assert_eq!( + UnresponsivenessOffence::<()>::slash_fraction(5, 50), + Perbill::zero(), // 0% + ); + + assert_eq!( + UnresponsivenessOffence::<()>::slash_fraction(7, 50), + Perbill::from_parts(4200000), // 0.42% + ); + + // One third offline should be punished around 5%. + assert_eq!( + UnresponsivenessOffence::<()>::slash_fraction(17, 50), + Perbill::from_parts(46200000), // 4.62% + ); } #[test] fn should_report_offline_validators() { - new_test_ext().execute_with(|| { - // given - let block = 1; - System::set_block_number(block); - // buffer new validators - Session::rotate_session(); - // enact the change and buffer another one - let validators = vec![1, 2, 3, 4, 5, 6]; - VALIDATORS.with(|l| *l.borrow_mut() = Some(validators.clone())); - Session::rotate_session(); - - // when - // we end current session and start the next one - Session::rotate_session(); - - // then - let offences = OFFENCES.with(|l| l.replace(vec![])); - assert_eq!(offences, vec![ - (vec![], UnresponsivenessOffence { - session_index: 2, - validator_set_count: 3, - offenders: vec![ - (1, 1), - (2, 2), - (3, 3), - ], - }) - ]); - - // should not report when heartbeat is sent - for (idx, v) in validators.into_iter().take(4).enumerate() { - let _ = heartbeat(block, 3, idx as u32, v.into()).unwrap(); - } - Session::rotate_session(); - - // then - let offences = OFFENCES.with(|l| l.replace(vec![])); - assert_eq!(offences, vec![ - (vec![], UnresponsivenessOffence { - session_index: 3, - validator_set_count: 6, - offenders: vec![ - (5, 5), - (6, 6), - ], - }) - ]); - }); + new_test_ext().execute_with(|| { + // given + let block = 1; + System::set_block_number(block); + // buffer new validators + Session::rotate_session(); + // enact the change and buffer another one + let validators = vec![1, 2, 3, 4, 5, 6]; + VALIDATORS.with(|l| *l.borrow_mut() = Some(validators.clone())); + Session::rotate_session(); + + // when + // we end current session and start the next one + Session::rotate_session(); + + // then + let offences = OFFENCES.with(|l| l.replace(vec![])); + assert_eq!( + offences, + vec![( + vec![], + UnresponsivenessOffence { + session_index: 2, + validator_set_count: 3, + offenders: vec![(1, 1), (2, 2), (3, 3),], + } + )] + ); + + // should not report when heartbeat is sent + for (idx, v) in validators.into_iter().take(4).enumerate() { + let _ = heartbeat(block, 3, idx as u32, v.into()).unwrap(); + } + Session::rotate_session(); + + // then + let offences = OFFENCES.with(|l| l.replace(vec![])); + assert_eq!( + offences, + vec![( + vec![], + UnresponsivenessOffence { + session_index: 3, + validator_set_count: 6, + offenders: vec![(5, 5), (6, 6),], + } + )] + ); + }); } fn heartbeat( - block_number: u64, - session_index: u32, - authority_index: u32, - id: UintAuthorityId, + block_number: u64, + session_index: u32, + authority_index: u32, + id: UintAuthorityId, ) -> dispatch::DispatchResult { - use frame_support::unsigned::ValidateUnsigned; - - let heartbeat = Heartbeat { - block_number, - network_state: OpaqueNetworkState { - peer_id: OpaquePeerId(vec![1]), - external_addresses: vec![], - }, - session_index, - authority_index, - }; - let signature = id.sign(&heartbeat.encode()).unwrap(); - - ImOnline::pre_dispatch(&crate::Call::heartbeat(heartbeat.clone(), signature.clone())) - .map_err(|e| <&'static str>::from(e))?; - ImOnline::heartbeat( - Origin::system(frame_system::RawOrigin::None), - heartbeat, - signature - ) + use frame_support::unsigned::ValidateUnsigned; + + let heartbeat = Heartbeat { + block_number, + network_state: OpaqueNetworkState { + peer_id: OpaquePeerId(vec![1]), + external_addresses: vec![], + }, + session_index, + authority_index, + }; + let signature = id.sign(&heartbeat.encode()).unwrap(); + + ImOnline::pre_dispatch(&crate::Call::heartbeat( + heartbeat.clone(), + signature.clone(), + )) + .map_err(|e| <&'static str>::from(e))?; + ImOnline::heartbeat( + Origin::system(frame_system::RawOrigin::None), + heartbeat, + signature, + ) } #[test] fn should_mark_online_validator_when_heartbeat_is_received() { - new_test_ext().execute_with(|| { - advance_session(); - // given - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); - assert_eq!(Session::validators(), Vec::::new()); - // enact the change and buffer another one - advance_session(); - - assert_eq!(Session::current_index(), 2); - assert_eq!(Session::validators(), vec![1, 2, 3]); - - assert!(!ImOnline::is_online(0)); - assert!(!ImOnline::is_online(1)); - assert!(!ImOnline::is_online(2)); - - // when - let _ = heartbeat(1, 2, 0, 1.into()).unwrap(); - - // then - assert!(ImOnline::is_online(0)); - assert!(!ImOnline::is_online(1)); - assert!(!ImOnline::is_online(2)); - - // and when - let _ = heartbeat(1, 2, 2, 3.into()).unwrap(); - - // then - assert!(ImOnline::is_online(0)); - assert!(!ImOnline::is_online(1)); - assert!(ImOnline::is_online(2)); - }); + new_test_ext().execute_with(|| { + advance_session(); + // given + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + assert_eq!(Session::validators(), Vec::::new()); + // enact the change and buffer another one + advance_session(); + + assert_eq!(Session::current_index(), 2); + assert_eq!(Session::validators(), vec![1, 2, 3]); + + assert!(!ImOnline::is_online(0)); + assert!(!ImOnline::is_online(1)); + assert!(!ImOnline::is_online(2)); + + // when + let _ = heartbeat(1, 2, 0, 1.into()).unwrap(); + + // then + assert!(ImOnline::is_online(0)); + assert!(!ImOnline::is_online(1)); + assert!(!ImOnline::is_online(2)); + + // and when + let _ = heartbeat(1, 2, 2, 3.into()).unwrap(); + + // then + assert!(ImOnline::is_online(0)); + assert!(!ImOnline::is_online(1)); + assert!(ImOnline::is_online(2)); + }); } #[test] fn late_heartbeat_should_fail() { - new_test_ext().execute_with(|| { - advance_session(); - // given - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 4, 4, 5, 6])); - assert_eq!(Session::validators(), Vec::::new()); - // enact the change and buffer another one - advance_session(); - - assert_eq!(Session::current_index(), 2); - assert_eq!(Session::validators(), vec![1, 2, 3]); - - // when - assert_noop!(heartbeat(1, 3, 0, 1.into()), "Transaction is outdated"); - assert_noop!(heartbeat(1, 1, 0, 1.into()), "Transaction is outdated"); - }); + new_test_ext().execute_with(|| { + advance_session(); + // given + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 4, 4, 5, 6])); + assert_eq!(Session::validators(), Vec::::new()); + // enact the change and buffer another one + advance_session(); + + assert_eq!(Session::current_index(), 2); + assert_eq!(Session::validators(), vec![1, 2, 3]); + + // when + assert_noop!(heartbeat(1, 3, 0, 1.into()), "Transaction is outdated"); + assert_noop!(heartbeat(1, 1, 0, 1.into()), "Transaction is outdated"); + }); } #[test] fn should_generate_heartbeats() { - use frame_support::traits::OffchainWorker; - - let mut ext = new_test_ext(); - let (offchain, _state) = TestOffchainExt::new(); - let (pool, state) = TestTransactionPoolExt::new(); - ext.register_extension(OffchainExt::new(offchain)); - ext.register_extension(TransactionPoolExt::new(pool)); - - ext.execute_with(|| { - // given - let block = 1; - System::set_block_number(block); - UintAuthorityId::set_all_keys(vec![0, 1, 2]); - // buffer new validators - Session::rotate_session(); - // enact the change and buffer another one - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); - Session::rotate_session(); - - // when - ImOnline::offchain_worker(block); - - // then - let transaction = state.write().transactions.pop().unwrap(); - // All validators have `0` as their session key, so we generate 2 transactions. - assert_eq!(state.read().transactions.len(), 2); - - // check stuff about the transaction. - let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { - crate::mock::Call::ImOnline(crate::Call::heartbeat(h, _)) => h, - e => panic!("Unexpected call: {:?}", e), - }; - - assert_eq!(heartbeat, Heartbeat { - block_number: block, - network_state: sp_io::offchain::network_state().unwrap(), - session_index: 2, - authority_index: 2, - }); - }); + use frame_support::traits::OffchainWorker; + + let mut ext = new_test_ext(); + let (offchain, _state) = TestOffchainExt::new(); + let (pool, state) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + // given + let block = 1; + System::set_block_number(block); + UintAuthorityId::set_all_keys(vec![0, 1, 2]); + // buffer new validators + Session::rotate_session(); + // enact the change and buffer another one + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + Session::rotate_session(); + + // when + ImOnline::offchain_worker(block); + + // then + let transaction = state.write().transactions.pop().unwrap(); + // All validators have `0` as their session key, so we generate 2 transactions. + assert_eq!(state.read().transactions.len(), 2); + + // check stuff about the transaction. + let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); + let heartbeat = match ex.call { + crate::mock::Call::ImOnline(crate::Call::heartbeat(h, _)) => h, + e => panic!("Unexpected call: {:?}", e), + }; + + assert_eq!( + heartbeat, + Heartbeat { + block_number: block, + network_state: sp_io::offchain::network_state().unwrap(), + session_index: 2, + authority_index: 2, + } + ); + }); } #[test] fn should_cleanup_received_heartbeats_on_session_end() { - new_test_ext().execute_with(|| { - advance_session(); + new_test_ext().execute_with(|| { + advance_session(); - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3])); - assert_eq!(Session::validators(), Vec::::new()); + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3])); + assert_eq!(Session::validators(), Vec::::new()); - // enact the change and buffer another one - advance_session(); + // enact the change and buffer another one + advance_session(); - assert_eq!(Session::current_index(), 2); - assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!(Session::current_index(), 2); + assert_eq!(Session::validators(), vec![1, 2, 3]); - // send an heartbeat from authority id 0 at session 2 - let _ = heartbeat(1, 2, 0, 1.into()).unwrap(); + // send an heartbeat from authority id 0 at session 2 + let _ = heartbeat(1, 2, 0, 1.into()).unwrap(); - // the heartbeat is stored - assert!(!ImOnline::received_heartbeats(&2, &0).is_none()); + // the heartbeat is stored + assert!(!ImOnline::received_heartbeats(&2, &0).is_none()); - advance_session(); + advance_session(); - // after the session has ended we have already processed the heartbeat - // message, so any messages received on the previous session should have - // been pruned. - assert!(ImOnline::received_heartbeats(&2, &0).is_none()); - }); + // after the session has ended we have already processed the heartbeat + // message, so any messages received on the previous session should have + // been pruned. + assert!(ImOnline::received_heartbeats(&2, &0).is_none()); + }); } #[test] fn should_mark_online_validator_when_block_is_authored() { - use pallet_authorship::EventHandler; - - new_test_ext().execute_with(|| { - advance_session(); - // given - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); - assert_eq!(Session::validators(), Vec::::new()); - // enact the change and buffer another one - advance_session(); - - assert_eq!(Session::current_index(), 2); - assert_eq!(Session::validators(), vec![1, 2, 3]); - - for i in 0..3 { - assert!(!ImOnline::is_online(i)); - } - - // when - ImOnline::note_author(1); - ImOnline::note_uncle(2, 0); - - // then - assert!(ImOnline::is_online(0)); - assert!(ImOnline::is_online(1)); - assert!(!ImOnline::is_online(2)); - }); + use pallet_authorship::EventHandler; + + new_test_ext().execute_with(|| { + advance_session(); + // given + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + assert_eq!(Session::validators(), Vec::::new()); + // enact the change and buffer another one + advance_session(); + + assert_eq!(Session::current_index(), 2); + assert_eq!(Session::validators(), vec![1, 2, 3]); + + for i in 0..3 { + assert!(!ImOnline::is_online(i)); + } + + // when + ImOnline::note_author(1); + ImOnline::note_uncle(2, 0); + + // then + assert!(ImOnline::is_online(0)); + assert!(ImOnline::is_online(1)); + assert!(!ImOnline::is_online(2)); + }); } #[test] fn should_not_send_a_report_if_already_online() { - use pallet_authorship::EventHandler; - - let mut ext = new_test_ext(); - let (offchain, _state) = TestOffchainExt::new(); - let (pool, pool_state) = TestTransactionPoolExt::new(); - ext.register_extension(OffchainExt::new(offchain)); - ext.register_extension(TransactionPoolExt::new(pool)); - - ext.execute_with(|| { - advance_session(); - // given - VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); - assert_eq!(Session::validators(), Vec::::new()); - // enact the change and buffer another one - advance_session(); - assert_eq!(Session::current_index(), 2); - assert_eq!(Session::validators(), vec![1, 2, 3]); - ImOnline::note_author(2); - ImOnline::note_uncle(3, 0); - - // when - UintAuthorityId::set_all_keys(vec![0]); // all authorities use pallet_session key 0 - // we expect error, since the authority is already online. - let mut res = ImOnline::send_heartbeats(4).unwrap(); - assert_eq!(res.next().unwrap().unwrap(), ()); - assert_eq!(res.next().unwrap().unwrap_err(), OffchainErr::AlreadyOnline(1)); - assert_eq!(res.next().unwrap().unwrap_err(), OffchainErr::AlreadyOnline(2)); - assert_eq!(res.next(), None); - - // then - let transaction = pool_state.write().transactions.pop().unwrap(); - // All validators have `0` as their session key, but we should only produce 1 heartbeat. - assert_eq!(pool_state.read().transactions.len(), 0); - // check stuff about the transaction. - let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); - let heartbeat = match ex.call { - crate::mock::Call::ImOnline(crate::Call::heartbeat(h, _)) => h, - e => panic!("Unexpected call: {:?}", e), - }; - - assert_eq!(heartbeat, Heartbeat { - block_number: 4, - network_state: sp_io::offchain::network_state().unwrap(), - session_index: 2, - authority_index: 0, - }); - }); + use pallet_authorship::EventHandler; + + let mut ext = new_test_ext(); + let (offchain, _state) = TestOffchainExt::new(); + let (pool, pool_state) = TestTransactionPoolExt::new(); + ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + ext.execute_with(|| { + advance_session(); + // given + VALIDATORS.with(|l| *l.borrow_mut() = Some(vec![1, 2, 3, 4, 5, 6])); + assert_eq!(Session::validators(), Vec::::new()); + // enact the change and buffer another one + advance_session(); + assert_eq!(Session::current_index(), 2); + assert_eq!(Session::validators(), vec![1, 2, 3]); + ImOnline::note_author(2); + ImOnline::note_uncle(3, 0); + + // when + UintAuthorityId::set_all_keys(vec![0]); // all authorities use pallet_session key 0 + // we expect error, since the authority is already online. + let mut res = ImOnline::send_heartbeats(4).unwrap(); + assert_eq!(res.next().unwrap().unwrap(), ()); + assert_eq!( + res.next().unwrap().unwrap_err(), + OffchainErr::AlreadyOnline(1) + ); + assert_eq!( + res.next().unwrap().unwrap_err(), + OffchainErr::AlreadyOnline(2) + ); + assert_eq!(res.next(), None); + + // then + let transaction = pool_state.write().transactions.pop().unwrap(); + // All validators have `0` as their session key, but we should only produce 1 heartbeat. + assert_eq!(pool_state.read().transactions.len(), 0); + // check stuff about the transaction. + let ex: Extrinsic = Decode::decode(&mut &*transaction).unwrap(); + let heartbeat = match ex.call { + crate::mock::Call::ImOnline(crate::Call::heartbeat(h, _)) => h, + e => panic!("Unexpected call: {:?}", e), + }; + + assert_eq!( + heartbeat, + Heartbeat { + block_number: 4, + network_state: sp_io::offchain::network_state().unwrap(), + session_index: 2, + authority_index: 0, + } + ); + }); } diff --git a/frame/indices/src/address.rs b/frame/indices/src/address.rs index f4487eeb69..aa16d68bc9 100644 --- a/frame/indices/src/address.rs +++ b/frame/indices/src/address.rs @@ -16,143 +16,163 @@ //! Address type that is union of index and id for an account. +use crate::Member; +use codec::{Decode, Encode, Error, Input, Output}; +use sp_std::convert::TryInto; #[cfg(feature = "std")] use std::fmt; -use sp_std::convert::TryInto; -use crate::Member; -use codec::{Encode, Decode, Input, Output, Error}; /// An indices-aware address, which can be either a direct `AccountId` or /// an index. #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] #[cfg_attr(feature = "std", derive(Hash))] -pub enum Address where - AccountId: Member, - AccountIndex: Member, +pub enum Address +where + AccountId: Member, + AccountIndex: Member, { - /// It's an account ID (pubkey). - Id(AccountId), - /// It's an account index. - Index(AccountIndex), + /// It's an account ID (pubkey). + Id(AccountId), + /// It's an account index. + Index(AccountIndex), } #[cfg(feature = "std")] -impl fmt::Display for Address where - AccountId: Member, - AccountIndex: Member, +impl fmt::Display for Address +where + AccountId: Member, + AccountIndex: Member, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } } -impl From for Address where - AccountId: Member, - AccountIndex: Member, +impl From for Address +where + AccountId: Member, + AccountIndex: Member, { - fn from(a: AccountId) -> Self { - Address::Id(a) - } + fn from(a: AccountId) -> Self { + Address::Id(a) + } } fn need_more_than(a: T, b: T) -> Result { - if a < b { Ok(b) } else { Err("Invalid range".into()) } + if a < b { + Ok(b) + } else { + Err("Invalid range".into()) + } } -impl Decode for Address where - AccountId: Member + Decode, - AccountIndex: Member + Decode + PartialOrd + Ord + From + Copy, +impl Decode for Address +where + AccountId: Member + Decode, + AccountIndex: Member + Decode + PartialOrd + Ord + From + Copy, { - fn decode(input: &mut I) -> Result { - Ok(match input.read_byte()? { - x @ 0x00..=0xef => Address::Index(AccountIndex::from(x as u32)), - 0xfc => Address::Index(AccountIndex::from( - need_more_than(0xef, u16::decode(input)?)? as u32 - )), - 0xfd => Address::Index(AccountIndex::from( - need_more_than(0xffff, u32::decode(input)?)? - )), - 0xfe => Address::Index( - need_more_than(0xffffffffu32.into(), Decode::decode(input)?)? - ), - 0xff => Address::Id(Decode::decode(input)?), - _ => return Err("Invalid address variant".into()), - }) - } + fn decode(input: &mut I) -> Result { + Ok(match input.read_byte()? { + x @ 0x00..=0xef => Address::Index(AccountIndex::from(x as u32)), + 0xfc => Address::Index(AccountIndex::from( + need_more_than(0xef, u16::decode(input)?)? as u32, + )), + 0xfd => Address::Index(AccountIndex::from(need_more_than( + 0xffff, + u32::decode(input)?, + )?)), + 0xfe => Address::Index(need_more_than( + 0xffffffffu32.into(), + Decode::decode(input)?, + )?), + 0xff => Address::Id(Decode::decode(input)?), + _ => return Err("Invalid address variant".into()), + }) + } } -impl Encode for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + Copy + From + TryInto, +impl Encode for Address +where + AccountId: Member + Encode, + AccountIndex: + Member + Encode + PartialOrd + Ord + Copy + From + TryInto, { - fn encode_to(&self, dest: &mut T) { - match *self { - Address::Id(ref i) => { - dest.push_byte(255); - dest.push(i); - } - Address::Index(i) => { - let maybe_u32: Result = i.try_into(); - if let Ok(x) = maybe_u32 { - if x > 0xffff { - dest.push_byte(253); - dest.push(&x); - } - else if x >= 0xf0 { - dest.push_byte(252); - dest.push(&(x as u16)); - } - else { - dest.push_byte(x as u8); - } - - } else { - dest.push_byte(254); - dest.push(&i); - } - }, - } - } + fn encode_to(&self, dest: &mut T) { + match *self { + Address::Id(ref i) => { + dest.push_byte(255); + dest.push(i); + } + Address::Index(i) => { + let maybe_u32: Result = i.try_into(); + if let Ok(x) = maybe_u32 { + if x > 0xffff { + dest.push_byte(253); + dest.push(&x); + } else if x >= 0xf0 { + dest.push_byte(252); + dest.push(&(x as u16)); + } else { + dest.push_byte(x as u8); + } + } else { + dest.push_byte(254); + dest.push(&i); + } + } + } + } } -impl codec::EncodeLike for Address where - AccountId: Member + Encode, - AccountIndex: Member + Encode + PartialOrd + Ord + Copy + From + TryInto, -{} +impl codec::EncodeLike for Address +where + AccountId: Member + Encode, + AccountIndex: + Member + Encode + PartialOrd + Ord + Copy + From + TryInto, +{ +} -impl Default for Address where - AccountId: Member + Default, - AccountIndex: Member, +impl Default for Address +where + AccountId: Member + Default, + AccountIndex: Member, { - fn default() -> Self { - Address::Id(Default::default()) - } + fn default() -> Self { + Address::Id(Default::default()) + } } #[cfg(test)] mod tests { - use codec::{Encode, Decode}; - - type Address = super::Address<[u8; 8], u32>; - fn index(i: u32) -> Address { super::Address::Index(i) } - fn id(i: [u8; 8]) -> Address { super::Address::Id(i) } - - fn compare(a: Option
, d: &[u8]) { - if let Some(ref a) = a { - assert_eq!(d, &a.encode()[..]); - } - assert_eq!(Address::decode(&mut &d[..]).ok(), a); - } - - #[test] - fn it_should_work() { - compare(Some(index(2)), &[2][..]); - compare(None, &[240][..]); - compare(None, &[252, 239, 0][..]); - compare(Some(index(240)), &[252, 240, 0][..]); - compare(Some(index(304)), &[252, 48, 1][..]); - compare(None, &[253, 255, 255, 0, 0][..]); - compare(Some(index(0x10000)), &[253, 0, 0, 1, 0][..]); - compare(Some(id([42, 69, 42, 69, 42, 69, 42, 69])), &[255, 42, 69, 42, 69, 42, 69, 42, 69][..]); - } + use codec::{Decode, Encode}; + + type Address = super::Address<[u8; 8], u32>; + fn index(i: u32) -> Address { + super::Address::Index(i) + } + fn id(i: [u8; 8]) -> Address { + super::Address::Id(i) + } + + fn compare(a: Option
, d: &[u8]) { + if let Some(ref a) = a { + assert_eq!(d, &a.encode()[..]); + } + assert_eq!(Address::decode(&mut &d[..]).ok(), a); + } + + #[test] + fn it_should_work() { + compare(Some(index(2)), &[2][..]); + compare(None, &[240][..]); + compare(None, &[252, 239, 0][..]); + compare(Some(index(240)), &[252, 240, 0][..]); + compare(Some(index(304)), &[252, 48, 1][..]); + compare(None, &[253, 255, 255, 0, 0][..]); + compare(Some(index(0x10000)), &[253, 0, 0, 1, 0][..]); + compare( + Some(id([42, 69, 42, 69, 42, 69, 42, 69])), + &[255, 42, 69, 42, 69, 42, 69, 42, 69][..], + ); + } } diff --git a/frame/indices/src/lib.rs b/frame/indices/src/lib.rs index 2a66af7e7f..0a267b14b4 100644 --- a/frame/indices/src/lib.rs +++ b/frame/indices/src/lib.rs @@ -19,55 +19,56 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; +use self::address::Address as RawAddress; use codec::Codec; -use sp_runtime::traits::{ - StaticLookup, Member, LookupError, Zero, One, BlakeTwo256, Hash, Saturating, AtLeast32Bit -}; -use frame_support::{Parameter, decl_module, decl_error, decl_event, decl_storage, ensure}; -use frame_support::weights::{Weight, MINIMUM_WEIGHT, SimpleDispatchInfo}; use frame_support::dispatch::DispatchResult; -use frame_support::traits::{Currency, ReservableCurrency, Get, BalanceStatus::Reserved}; use frame_support::storage::migration::take_storage_value; -use frame_system::{ensure_signed, ensure_root}; -use self::address::Address as RawAddress; +use frame_support::traits::{BalanceStatus::Reserved, Currency, Get, ReservableCurrency}; +use frame_support::weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure, Parameter}; +use frame_system::{ensure_root, ensure_signed}; +use sp_runtime::traits::{ + AtLeast32Bit, BlakeTwo256, Hash, LookupError, Member, One, Saturating, StaticLookup, Zero, +}; +use sp_std::prelude::*; -mod mock; pub mod address; +mod mock; mod tests; pub type Address = RawAddress<::AccountId, ::AccountIndex>; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// The module's config trait. pub trait Trait: frame_system::Trait { - /// Type used for storing an account's index; implies the maximum number of accounts the system - /// can hold. - type AccountIndex: Parameter + Member + Codec + Default + AtLeast32Bit + Copy; + /// Type used for storing an account's index; implies the maximum number of accounts the system + /// can hold. + type AccountIndex: Parameter + Member + Codec + Default + AtLeast32Bit + Copy; - /// The currency trait. - type Currency: ReservableCurrency; + /// The currency trait. + type Currency: ReservableCurrency; - /// The deposit needed for reserving an index. - type Deposit: Get>; + /// The deposit needed for reserving an index. + type Deposit: Get>; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; } decl_storage! { - trait Store for Module as Indices { - /// The lookup from index to account. - pub Accounts build(|config: &GenesisConfig| - config.indices.iter() - .cloned() - .map(|(a, b)| (a, (b, Zero::zero()))) - .collect::>() - ): map hasher(blake2_128_concat) T::AccountIndex => Option<(T::AccountId, BalanceOf)>; - } - add_extra_genesis { - config(indices): Vec<(T::AccountIndex, T::AccountId)>; - } + trait Store for Module as Indices { + /// The lookup from index to account. + pub Accounts build(|config: &GenesisConfig| + config.indices.iter() + .cloned() + .map(|(a, b)| (a, (b, Zero::zero()))) + .collect::>() + ): map hasher(blake2_128_concat) T::AccountIndex => Option<(T::AccountId, BalanceOf)>; + } + add_extra_genesis { + config(indices): Vec<(T::AccountIndex, T::AccountId)>; + } } decl_event!( @@ -83,199 +84,208 @@ decl_event!( ); decl_error! { - pub enum Error for Module { - /// The index was not already assigned. - NotAssigned, - /// The index is assigned to another account. - NotOwner, - /// The index was not available. - InUse, - /// The source and destination accounts are identical. - NotTransfer, - } + pub enum Error for Module { + /// The index was not already assigned. + NotAssigned, + /// The index is assigned to another account. + NotOwner, + /// The index was not available. + InUse, + /// The source and destination accounts are identical. + NotTransfer, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin, system = frame_system { - fn deposit_event() = default; - - fn on_initialize() -> Weight { - Self::migrations(); - - MINIMUM_WEIGHT - } - - /// Assign an previously unassigned index. - /// - /// Payment: `Deposit` is reserved from the sender account. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// - `index`: the index to be claimed. This must not be in use. - /// - /// Emits `IndexAssigned` if successful. - /// - /// # - /// - `O(1)`. - /// - One storage mutation (codec `O(1)`). - /// - One reserve operation. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn claim(origin, index: T::AccountIndex) { - let who = ensure_signed(origin)?; - - Accounts::::try_mutate(index, |maybe_value| { - ensure!(maybe_value.is_none(), Error::::InUse); - *maybe_value = Some((who.clone(), T::Deposit::get())); - T::Currency::reserve(&who, T::Deposit::get()) - })?; - Self::deposit_event(RawEvent::IndexAssigned(who, index)); - } - - /// Assign an index already owned by the sender to another account. The balance reservation - /// is effectively transferred to the new account. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// - `index`: the index to be re-assigned. This must be owned by the sender. - /// - `new`: the new owner of the index. This function is a no-op if it is equal to sender. - /// - /// Emits `IndexAssigned` if successful. - /// - /// # - /// - `O(1)`. - /// - One storage mutation (codec `O(1)`). - /// - One transfer operation. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn transfer(origin, new: T::AccountId, index: T::AccountIndex) { - let who = ensure_signed(origin)?; - ensure!(who != new, Error::::NotTransfer); - - Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { - let (account, amount) = maybe_value.take().ok_or(Error::::NotAssigned)?; - ensure!(&account == &who, Error::::NotOwner); - let lost = T::Currency::repatriate_reserved(&who, &new, amount, Reserved)?; - *maybe_value = Some((new.clone(), amount.saturating_sub(lost))); - Ok(()) - })?; - Self::deposit_event(RawEvent::IndexAssigned(new, index)); - } - - /// Free up an index owned by the sender. - /// - /// Payment: Any previous deposit placed for the index is unreserved in the sender account. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must own the index. - /// - /// - `index`: the index to be freed. This must be owned by the sender. - /// - /// Emits `IndexFreed` if successful. - /// - /// # - /// - `O(1)`. - /// - One storage mutation (codec `O(1)`). - /// - One reserve operation. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn free(origin, index: T::AccountIndex) { - let who = ensure_signed(origin)?; - - Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { - let (account, amount) = maybe_value.take().ok_or(Error::::NotAssigned)?; - ensure!(&account == &who, Error::::NotOwner); - T::Currency::unreserve(&who, amount); - Ok(()) - })?; - Self::deposit_event(RawEvent::IndexFreed(index)); - } - - /// Force an index to an account. This doesn't require a deposit. If the index is already - /// held, then any deposit is reimbursed to its current owner. - /// - /// The dispatch origin for this call must be _Root_. - /// - /// - `index`: the index to be (re-)assigned. - /// - `new`: the new owner of the index. This function is a no-op if it is equal to sender. - /// - /// Emits `IndexAssigned` if successful. - /// - /// # - /// - `O(1)`. - /// - One storage mutation (codec `O(1)`). - /// - Up to one reserve operation. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn force_transfer(origin, new: T::AccountId, index: T::AccountIndex) { - ensure_root(origin)?; - - Accounts::::mutate(index, |maybe_value| { - if let Some((account, amount)) = maybe_value.take() { - T::Currency::unreserve(&account, amount); - } - *maybe_value = Some((new.clone(), Zero::zero())); - }); - Self::deposit_event(RawEvent::IndexAssigned(new, index)); - } - } + pub struct Module for enum Call where origin: T::Origin, system = frame_system { + fn deposit_event() = default; + + fn on_initialize() -> Weight { + Self::migrations(); + + MINIMUM_WEIGHT + } + + /// Assign an previously unassigned index. + /// + /// Payment: `Deposit` is reserved from the sender account. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `index`: the index to be claimed. This must not be in use. + /// + /// Emits `IndexAssigned` if successful. + /// + /// # + /// - `O(1)`. + /// - One storage mutation (codec `O(1)`). + /// - One reserve operation. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn claim(origin, index: T::AccountIndex) { + let who = ensure_signed(origin)?; + + Accounts::::try_mutate(index, |maybe_value| { + ensure!(maybe_value.is_none(), Error::::InUse); + *maybe_value = Some((who.clone(), T::Deposit::get())); + T::Currency::reserve(&who, T::Deposit::get()) + })?; + Self::deposit_event(RawEvent::IndexAssigned(who, index)); + } + + /// Assign an index already owned by the sender to another account. The balance reservation + /// is effectively transferred to the new account. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `index`: the index to be re-assigned. This must be owned by the sender. + /// - `new`: the new owner of the index. This function is a no-op if it is equal to sender. + /// + /// Emits `IndexAssigned` if successful. + /// + /// # + /// - `O(1)`. + /// - One storage mutation (codec `O(1)`). + /// - One transfer operation. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn transfer(origin, new: T::AccountId, index: T::AccountIndex) { + let who = ensure_signed(origin)?; + ensure!(who != new, Error::::NotTransfer); + + Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { + let (account, amount) = maybe_value.take().ok_or(Error::::NotAssigned)?; + ensure!(&account == &who, Error::::NotOwner); + let lost = T::Currency::repatriate_reserved(&who, &new, amount, Reserved)?; + *maybe_value = Some((new.clone(), amount.saturating_sub(lost))); + Ok(()) + })?; + Self::deposit_event(RawEvent::IndexAssigned(new, index)); + } + + /// Free up an index owned by the sender. + /// + /// Payment: Any previous deposit placed for the index is unreserved in the sender account. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must own the index. + /// + /// - `index`: the index to be freed. This must be owned by the sender. + /// + /// Emits `IndexFreed` if successful. + /// + /// # + /// - `O(1)`. + /// - One storage mutation (codec `O(1)`). + /// - One reserve operation. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn free(origin, index: T::AccountIndex) { + let who = ensure_signed(origin)?; + + Accounts::::try_mutate(index, |maybe_value| -> DispatchResult { + let (account, amount) = maybe_value.take().ok_or(Error::::NotAssigned)?; + ensure!(&account == &who, Error::::NotOwner); + T::Currency::unreserve(&who, amount); + Ok(()) + })?; + Self::deposit_event(RawEvent::IndexFreed(index)); + } + + /// Force an index to an account. This doesn't require a deposit. If the index is already + /// held, then any deposit is reimbursed to its current owner. + /// + /// The dispatch origin for this call must be _Root_. + /// + /// - `index`: the index to be (re-)assigned. + /// - `new`: the new owner of the index. This function is a no-op if it is equal to sender. + /// + /// Emits `IndexAssigned` if successful. + /// + /// # + /// - `O(1)`. + /// - One storage mutation (codec `O(1)`). + /// - Up to one reserve operation. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn force_transfer(origin, new: T::AccountId, index: T::AccountIndex) { + ensure_root(origin)?; + + Accounts::::mutate(index, |maybe_value| { + if let Some((account, amount)) = maybe_value.take() { + T::Currency::unreserve(&account, amount); + } + *maybe_value = Some((new.clone(), Zero::zero())); + }); + Self::deposit_event(RawEvent::IndexAssigned(new, index)); + } + } } impl Module { - // PUBLIC IMMUTABLES - - /// Lookup an T::AccountIndex to get an Id, if there's one there. - pub fn lookup_index(index: T::AccountIndex) -> Option { - Accounts::::get(index).map(|x| x.0) - } - - /// Lookup an address to get an Id, if there's one there. - pub fn lookup_address( - a: address::Address - ) -> Option { - match a { - address::Address::Id(i) => Some(i), - address::Address::Index(i) => Self::lookup_index(i), - } - } - - /// Do any migrations. - fn migrations() { - if let Some(set_count) = take_storage_value::(b"Indices", b"NextEnumSet", b"") { - // migrations need doing. - let set_size: T::AccountIndex = 64.into(); - - let mut set_index: T::AccountIndex = Zero::zero(); - while set_index < set_count { - let maybe_accounts = take_storage_value::>(b"Indices", b"EnumSet", BlakeTwo256::hash_of(&set_index).as_ref()); - if let Some(accounts) = maybe_accounts { - for (item_index, target) in accounts.into_iter().enumerate() { - if target != T::AccountId::default() && !T::Currency::total_balance(&target).is_zero() { - let index = set_index * set_size + T::AccountIndex::from(item_index as u32); - Accounts::::insert(index, (target, BalanceOf::::zero())); - } - } - } else { - break; - } - set_index += One::one(); - } - } - } + // PUBLIC IMMUTABLES + + /// Lookup an T::AccountIndex to get an Id, if there's one there. + pub fn lookup_index(index: T::AccountIndex) -> Option { + Accounts::::get(index).map(|x| x.0) + } + + /// Lookup an address to get an Id, if there's one there. + pub fn lookup_address( + a: address::Address, + ) -> Option { + match a { + address::Address::Id(i) => Some(i), + address::Address::Index(i) => Self::lookup_index(i), + } + } + + /// Do any migrations. + fn migrations() { + if let Some(set_count) = + take_storage_value::(b"Indices", b"NextEnumSet", b"") + { + // migrations need doing. + let set_size: T::AccountIndex = 64.into(); + + let mut set_index: T::AccountIndex = Zero::zero(); + while set_index < set_count { + let maybe_accounts = take_storage_value::>( + b"Indices", + b"EnumSet", + BlakeTwo256::hash_of(&set_index).as_ref(), + ); + if let Some(accounts) = maybe_accounts { + for (item_index, target) in accounts.into_iter().enumerate() { + if target != T::AccountId::default() + && !T::Currency::total_balance(&target).is_zero() + { + let index = + set_index * set_size + T::AccountIndex::from(item_index as u32); + Accounts::::insert(index, (target, BalanceOf::::zero())); + } + } + } else { + break; + } + set_index += One::one(); + } + } + } } impl StaticLookup for Module { - type Source = address::Address; - type Target = T::AccountId; + type Source = address::Address; + type Target = T::AccountId; - fn lookup(a: Self::Source) -> Result { - Self::lookup_address(a).ok_or(LookupError) - } + fn lookup(a: Self::Source) -> Result { + Self::lookup_address(a).ok_or(LookupError) + } - fn unlookup(a: Self::Target) -> Self::Source { - address::Address::Id(a) - } + fn unlookup(a: Self::Target) -> Self::Source { + address::Address::Id(a) + } } diff --git a/frame/indices/src/mock.rs b/frame/indices/src/mock.rs index b8786c2dc8..8f8c3354aa 100644 --- a/frame/indices/src/mock.rs +++ b/frame/indices/src/mock.rs @@ -18,23 +18,23 @@ #![cfg(test)] -use sp_runtime::testing::Header; -use sp_runtime::Perbill; -use sp_core::H256; -use frame_support::{impl_outer_origin, impl_outer_event, parameter_types, weights::Weight}; use crate::{self as indices, Module, Trait}; +use frame_support::{impl_outer_event, impl_outer_origin, parameter_types, weights::Weight}; use frame_system as system; use pallet_balances as balances; +use sp_core::H256; +use sp_runtime::testing::Header; +use sp_runtime::Perbill; -impl_outer_origin!{ - pub enum Origin for Test where system = frame_system {} +impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} } -impl_outer_event!{ - pub enum MetaEvent for Test { - system, - balances, - indices, - } +impl_outer_event! { + pub enum MetaEvent for Test { + system, + balances, + indices, + } } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. @@ -42,64 +42,68 @@ impl_outer_event!{ pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Call = (); - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = u64; - type Lookup = Indices; - type Header = Header; - type Event = MetaEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = u64; + type Lookup = Indices; + type Header = Header; + type Event = MetaEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - pub const ExistentialDeposit: u64 = 1; + pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { - type Balance = u64; - type DustRemoval = (); - type Event = MetaEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u64; + type DustRemoval = (); + type Event = MetaEvent; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const Deposit: u64 = 1; + pub const Deposit: u64 = 1; } impl Trait for Test { - type AccountIndex = u64; - type Currency = Balances; - type Deposit = Deposit; - type Event = MetaEvent; + type AccountIndex = u64; + type Currency = Balances; + type Deposit = Deposit; + type Event = MetaEvent; } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], - }.assimilate_storage(&mut t).unwrap(); - t.into() + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 20), (3, 30), (4, 40), (5, 50), (6, 60)], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() } pub type System = frame_system::Module; diff --git a/frame/indices/src/tests.rs b/frame/indices/src/tests.rs index 9e434cfbe2..1ef2128654 100644 --- a/frame/indices/src/tests.rs +++ b/frame/indices/src/tests.rs @@ -18,86 +18,95 @@ #![cfg(test)] -use super::*; use super::mock::*; -use frame_support::{assert_ok, assert_noop}; +use super::*; +use frame_support::{assert_noop, assert_ok}; use pallet_balances::Error as BalancesError; #[test] fn claiming_should_work() { - new_test_ext().execute_with(|| { - assert_noop!(Indices::claim(Some(0).into(), 0), BalancesError::::InsufficientBalance); - assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_noop!(Indices::claim(Some(2).into(), 0), Error::::InUse); - assert_eq!(Balances::reserved_balance(1), 1); - }); + new_test_ext().execute_with(|| { + assert_noop!( + Indices::claim(Some(0).into(), 0), + BalancesError::::InsufficientBalance + ); + assert_ok!(Indices::claim(Some(1).into(), 0)); + assert_noop!(Indices::claim(Some(2).into(), 0), Error::::InUse); + assert_eq!(Balances::reserved_balance(1), 1); + }); } #[test] fn freeing_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_ok!(Indices::claim(Some(2).into(), 1)); - assert_noop!(Indices::free(Some(0).into(), 0), Error::::NotOwner); - assert_noop!(Indices::free(Some(1).into(), 1), Error::::NotOwner); - assert_noop!(Indices::free(Some(1).into(), 2), Error::::NotAssigned); - assert_ok!(Indices::free(Some(1).into(), 0)); - assert_eq!(Balances::reserved_balance(1), 0); - assert_noop!(Indices::free(Some(1).into(), 0), Error::::NotAssigned); - }); + new_test_ext().execute_with(|| { + assert_ok!(Indices::claim(Some(1).into(), 0)); + assert_ok!(Indices::claim(Some(2).into(), 1)); + assert_noop!(Indices::free(Some(0).into(), 0), Error::::NotOwner); + assert_noop!(Indices::free(Some(1).into(), 1), Error::::NotOwner); + assert_noop!(Indices::free(Some(1).into(), 2), Error::::NotAssigned); + assert_ok!(Indices::free(Some(1).into(), 0)); + assert_eq!(Balances::reserved_balance(1), 0); + assert_noop!(Indices::free(Some(1).into(), 0), Error::::NotAssigned); + }); } #[test] fn indexing_lookup_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_ok!(Indices::claim(Some(2).into(), 1)); - assert_eq!(Indices::lookup_index(0), Some(1)); - assert_eq!(Indices::lookup_index(1), Some(2)); - assert_eq!(Indices::lookup_index(2), None); - }); + new_test_ext().execute_with(|| { + assert_ok!(Indices::claim(Some(1).into(), 0)); + assert_ok!(Indices::claim(Some(2).into(), 1)); + assert_eq!(Indices::lookup_index(0), Some(1)); + assert_eq!(Indices::lookup_index(1), Some(2)); + assert_eq!(Indices::lookup_index(2), None); + }); } #[test] fn reclaim_index_on_accounts_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_ok!(Indices::free(Some(1).into(), 0)); - assert_ok!(Indices::claim(Some(2).into(), 0)); - assert_eq!(Indices::lookup_index(0), Some(2)); - assert_eq!(Balances::reserved_balance(2), 1); - }); + new_test_ext().execute_with(|| { + assert_ok!(Indices::claim(Some(1).into(), 0)); + assert_ok!(Indices::free(Some(1).into(), 0)); + assert_ok!(Indices::claim(Some(2).into(), 0)); + assert_eq!(Indices::lookup_index(0), Some(2)); + assert_eq!(Balances::reserved_balance(2), 1); + }); } #[test] fn transfer_index_on_accounts_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_noop!(Indices::transfer(Some(1).into(), 2, 1), Error::::NotAssigned); - assert_noop!(Indices::transfer(Some(2).into(), 3, 0), Error::::NotOwner); - assert_ok!(Indices::transfer(Some(1).into(), 3, 0)); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::reserved_balance(3), 1); - assert_eq!(Indices::lookup_index(0), Some(3)); - }); + new_test_ext().execute_with(|| { + assert_ok!(Indices::claim(Some(1).into(), 0)); + assert_noop!( + Indices::transfer(Some(1).into(), 2, 1), + Error::::NotAssigned + ); + assert_noop!( + Indices::transfer(Some(2).into(), 3, 0), + Error::::NotOwner + ); + assert_ok!(Indices::transfer(Some(1).into(), 3, 0)); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::reserved_balance(3), 1); + assert_eq!(Indices::lookup_index(0), Some(3)); + }); } #[test] fn force_transfer_index_on_preowned_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Indices::claim(Some(1).into(), 0)); - assert_ok!(Indices::force_transfer(Origin::ROOT, 3, 0)); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::reserved_balance(3), 0); - assert_eq!(Indices::lookup_index(0), Some(3)); - }); + new_test_ext().execute_with(|| { + assert_ok!(Indices::claim(Some(1).into(), 0)); + assert_ok!(Indices::force_transfer(Origin::ROOT, 3, 0)); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::reserved_balance(3), 0); + assert_eq!(Indices::lookup_index(0), Some(3)); + }); } #[test] fn force_transfer_index_on_free_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Indices::force_transfer(Origin::ROOT, 3, 0)); - assert_eq!(Balances::reserved_balance(3), 0); - assert_eq!(Indices::lookup_index(0), Some(3)); - }); + new_test_ext().execute_with(|| { + assert_ok!(Indices::force_transfer(Origin::ROOT, 3, 0)); + assert_eq!(Balances::reserved_balance(3), 0); + assert_eq!(Indices::lookup_index(0), Some(3)); + }); } diff --git a/frame/membership/src/lib.rs b/frame/membership/src/lib.rs index e968be19a6..7eac62649b 100644 --- a/frame/membership/src/lib.rs +++ b/frame/membership/src/lib.rs @@ -22,60 +22,60 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; use frame_support::{ - decl_module, decl_storage, decl_event, decl_error, - traits::{ChangeMembers, InitializeMembers, EnsureOrigin}, - weights::SimpleDispatchInfo, + decl_error, decl_event, decl_module, decl_storage, + traits::{ChangeMembers, EnsureOrigin, InitializeMembers}, + weights::SimpleDispatchInfo, }; use frame_system::{self as system, ensure_root, ensure_signed}; +use sp_std::prelude::*; -pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; +pub trait Trait: frame_system::Trait { + /// The overarching event type. + type Event: From> + Into<::Event>; - /// Required origin for adding a member (though can always be Root). - type AddOrigin: EnsureOrigin; + /// Required origin for adding a member (though can always be Root). + type AddOrigin: EnsureOrigin; - /// Required origin for removing a member (though can always be Root). - type RemoveOrigin: EnsureOrigin; + /// Required origin for removing a member (though can always be Root). + type RemoveOrigin: EnsureOrigin; - /// Required origin for adding and removing a member in a single action. - type SwapOrigin: EnsureOrigin; + /// Required origin for adding and removing a member in a single action. + type SwapOrigin: EnsureOrigin; - /// Required origin for resetting membership. - type ResetOrigin: EnsureOrigin; + /// Required origin for resetting membership. + type ResetOrigin: EnsureOrigin; - /// Required origin for setting or resetting the prime member. - type PrimeOrigin: EnsureOrigin; + /// Required origin for setting or resetting the prime member. + type PrimeOrigin: EnsureOrigin; - /// The receiver of the signal for when the membership has been initialized. This happens pre- - /// genesis and will usually be the same as `MembershipChanged`. If you need to do something - /// different on initialization, then you can change this accordingly. - type MembershipInitialized: InitializeMembers; + /// The receiver of the signal for when the membership has been initialized. This happens pre- + /// genesis and will usually be the same as `MembershipChanged`. If you need to do something + /// different on initialization, then you can change this accordingly. + type MembershipInitialized: InitializeMembers; - /// The receiver of the signal for when the membership has changed. - type MembershipChanged: ChangeMembers; + /// The receiver of the signal for when the membership has changed. + type MembershipChanged: ChangeMembers; } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Membership { - /// The current membership, stored as an ordered Vec. - Members get(fn members): Vec; - - /// The current prime member, if one exists. - Prime get(fn prime): Option; - } - add_extra_genesis { - config(members): Vec; - config(phantom): sp_std::marker::PhantomData; - build(|config: &Self| { - let mut members = config.members.clone(); - members.sort(); - T::MembershipInitialized::initialize_members(&members); - >::put(members); - }) - } + trait Store for Module, I: Instance=DefaultInstance> as Membership { + /// The current membership, stored as an ordered Vec. + Members get(fn members): Vec; + + /// The current prime member, if one exists. + Prime get(fn prime): Option; + } + add_extra_genesis { + config(members): Vec; + config(phantom): sp_std::marker::PhantomData; + build(|config: &Self| { + let mut members = config.members.clone(); + members.sort(); + T::MembershipInitialized::initialize_members(&members); + >::put(members); + }) + } } decl_event!( @@ -99,412 +99,453 @@ decl_event!( ); decl_error! { - /// Error for the nicks module. - pub enum Error for Module, I: Instance> { - /// Already a member. - AlreadyMember, - /// Not a member. - NotMember, - } + /// Error for the nicks module. + pub enum Error for Module, I: Instance> { + /// Already a member. + AlreadyMember, + /// Not a member. + NotMember, + } } decl_module! { - pub struct Module, I: Instance=DefaultInstance> - for enum Call - where origin: T::Origin - { - fn deposit_event() = default; - - /// Add a member `who` to the set. - /// - /// May only be called from `AddOrigin` or root. - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn add_member(origin, who: T::AccountId) { - T::AddOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - let mut members = >::get(); - let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; - members.insert(location, who.clone()); - >::put(&members); - - T::MembershipChanged::change_members_sorted(&[who], &[], &members[..]); - - Self::deposit_event(RawEvent::MemberAdded); - } - - /// Remove a member `who` from the set. - /// - /// May only be called from `RemoveOrigin` or root. - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn remove_member(origin, who: T::AccountId) { - T::RemoveOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - let mut members = >::get(); - let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; - members.remove(location); - >::put(&members); - - T::MembershipChanged::change_members_sorted(&[], &[who], &members[..]); - Self::rejig_prime(&members); - - Self::deposit_event(RawEvent::MemberRemoved); - } - - /// Swap out one member `remove` for another `add`. - /// - /// May only be called from `SwapOrigin` or root. - /// - /// Prime membership is *not* passed from `remove` to `add`, if extant. - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn swap_member(origin, remove: T::AccountId, add: T::AccountId) { - T::SwapOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - if remove == add { return Ok(()) } - - let mut members = >::get(); - let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; - let _ = members.binary_search(&add).err().ok_or(Error::::AlreadyMember)?; - members[location] = add.clone(); - members.sort(); - >::put(&members); - - T::MembershipChanged::change_members_sorted( - &[add], - &[remove], - &members[..], - ); - Self::rejig_prime(&members); - - Self::deposit_event(RawEvent::MembersSwapped); - } - - /// Change the membership to a new set, disregarding the existing membership. Be nice and - /// pass `members` pre-sorted. - /// - /// May only be called from `ResetOrigin` or root. - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn reset_members(origin, members: Vec) { - T::ResetOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - let mut members = members; - members.sort(); - >::mutate(|m| { - T::MembershipChanged::set_members_sorted(&members[..], m); - Self::rejig_prime(&members); - *m = members; - }); - - - Self::deposit_event(RawEvent::MembersReset); - } - - /// Swap out the sending member for some other key `new`. - /// - /// May only be called from `Signed` origin of a current member. - /// - /// Prime membership is passed from the origin account to `new`, if extant. - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn change_key(origin, new: T::AccountId) { - let remove = ensure_signed(origin)?; - - if remove != new { - let mut members = >::get(); - let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; - let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; - members[location] = new.clone(); - members.sort(); - >::put(&members); - - T::MembershipChanged::change_members_sorted( - &[new.clone()], - &[remove.clone()], - &members[..], - ); - - if Prime::::get() == Some(remove) { - Prime::::put(&new); - T::MembershipChanged::set_prime(Some(new)); - } - } - - Self::deposit_event(RawEvent::KeyChanged); - } - - /// Set the prime member. Must be a current member. - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn set_prime(origin, who: T::AccountId) { - T::PrimeOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; - Prime::::put(&who); - T::MembershipChanged::set_prime(Some(who)); - } - - /// Remove the prime member if it exists. - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn clear_prime(origin) { - T::PrimeOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - Prime::::kill(); - T::MembershipChanged::set_prime(None); - } - } + pub struct Module, I: Instance=DefaultInstance> + for enum Call + where origin: T::Origin + { + fn deposit_event() = default; + + /// Add a member `who` to the set. + /// + /// May only be called from `AddOrigin` or root. + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn add_member(origin, who: T::AccountId) { + T::AddOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + let mut members = >::get(); + let location = members.binary_search(&who).err().ok_or(Error::::AlreadyMember)?; + members.insert(location, who.clone()); + >::put(&members); + + T::MembershipChanged::change_members_sorted(&[who], &[], &members[..]); + + Self::deposit_event(RawEvent::MemberAdded); + } + + /// Remove a member `who` from the set. + /// + /// May only be called from `RemoveOrigin` or root. + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn remove_member(origin, who: T::AccountId) { + T::RemoveOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + let mut members = >::get(); + let location = members.binary_search(&who).ok().ok_or(Error::::NotMember)?; + members.remove(location); + >::put(&members); + + T::MembershipChanged::change_members_sorted(&[], &[who], &members[..]); + Self::rejig_prime(&members); + + Self::deposit_event(RawEvent::MemberRemoved); + } + + /// Swap out one member `remove` for another `add`. + /// + /// May only be called from `SwapOrigin` or root. + /// + /// Prime membership is *not* passed from `remove` to `add`, if extant. + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn swap_member(origin, remove: T::AccountId, add: T::AccountId) { + T::SwapOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + if remove == add { return Ok(()) } + + let mut members = >::get(); + let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; + let _ = members.binary_search(&add).err().ok_or(Error::::AlreadyMember)?; + members[location] = add.clone(); + members.sort(); + >::put(&members); + + T::MembershipChanged::change_members_sorted( + &[add], + &[remove], + &members[..], + ); + Self::rejig_prime(&members); + + Self::deposit_event(RawEvent::MembersSwapped); + } + + /// Change the membership to a new set, disregarding the existing membership. Be nice and + /// pass `members` pre-sorted. + /// + /// May only be called from `ResetOrigin` or root. + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn reset_members(origin, members: Vec) { + T::ResetOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + let mut members = members; + members.sort(); + >::mutate(|m| { + T::MembershipChanged::set_members_sorted(&members[..], m); + Self::rejig_prime(&members); + *m = members; + }); + + + Self::deposit_event(RawEvent::MembersReset); + } + + /// Swap out the sending member for some other key `new`. + /// + /// May only be called from `Signed` origin of a current member. + /// + /// Prime membership is passed from the origin account to `new`, if extant. + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn change_key(origin, new: T::AccountId) { + let remove = ensure_signed(origin)?; + + if remove != new { + let mut members = >::get(); + let location = members.binary_search(&remove).ok().ok_or(Error::::NotMember)?; + let _ = members.binary_search(&new).err().ok_or(Error::::AlreadyMember)?; + members[location] = new.clone(); + members.sort(); + >::put(&members); + + T::MembershipChanged::change_members_sorted( + &[new.clone()], + &[remove.clone()], + &members[..], + ); + + if Prime::::get() == Some(remove) { + Prime::::put(&new); + T::MembershipChanged::set_prime(Some(new)); + } + } + + Self::deposit_event(RawEvent::KeyChanged); + } + + /// Set the prime member. Must be a current member. + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn set_prime(origin, who: T::AccountId) { + T::PrimeOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + Self::members().binary_search(&who).ok().ok_or(Error::::NotMember)?; + Prime::::put(&who); + T::MembershipChanged::set_prime(Some(who)); + } + + /// Remove the prime member if it exists. + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn clear_prime(origin) { + T::PrimeOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + Prime::::kill(); + T::MembershipChanged::set_prime(None); + } + } } impl, I: Instance> Module { - fn rejig_prime(members: &[T::AccountId]) { - if let Some(prime) = Prime::::get() { - match members.binary_search(&prime) { - Ok(_) => T::MembershipChanged::set_prime(Some(prime)), - Err(_) => Prime::::kill(), - } - } - } + fn rejig_prime(members: &[T::AccountId]) { + if let Some(prime) = Prime::::get() { + match members.binary_search(&prime) { + Ok(_) => T::MembershipChanged::set_prime(Some(prime)), + Err(_) => Prime::::kill(), + } + } + } } #[cfg(test)] mod tests { - use super::*; - - use std::cell::RefCell; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types - }; - use sp_core::H256; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, testing::Header}; - use frame_system::EnsureSignedBy; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - ord_parameter_types! { - pub const One: u64 = 1; - pub const Two: u64 = 2; - pub const Three: u64 = 3; - pub const Four: u64 = 4; - pub const Five: u64 = 5; - } - - thread_local! { - static MEMBERS: RefCell> = RefCell::new(vec![]); - static PRIME: RefCell> = RefCell::new(None); - } - - pub struct TestChangeMembers; - impl ChangeMembers for TestChangeMembers { - fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); - old_plus_incoming.extend_from_slice(incoming); - old_plus_incoming.sort(); - let mut new_plus_outgoing = new.to_vec(); - new_plus_outgoing.extend_from_slice(outgoing); - new_plus_outgoing.sort(); - assert_eq!(old_plus_incoming, new_plus_outgoing); - - MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); - PRIME.with(|p| *p.borrow_mut() = None); - } - fn set_prime(who: Option) { - PRIME.with(|p| *p.borrow_mut() = who); - } - } - impl InitializeMembers for TestChangeMembers { - fn initialize_members(members: &[u64]) { - MEMBERS.with(|m| *m.borrow_mut() = members.to_vec()); - } - } - - impl Trait for Test { - type Event = (); - type AddOrigin = EnsureSignedBy; - type RemoveOrigin = EnsureSignedBy; - type SwapOrigin = EnsureSignedBy; - type ResetOrigin = EnsureSignedBy; - type PrimeOrigin = EnsureSignedBy; - type MembershipInitialized = TestChangeMembers; - type MembershipChanged = TestChangeMembers; - } - - type Membership = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - GenesisConfig::{ - members: vec![10, 20, 30], - .. Default::default() - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - #[test] - fn query_membership_works() { - new_test_ext().execute_with(|| { - assert_eq!(Membership::members(), vec![10, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), vec![10, 20, 30]); - }); - } - - #[test] - fn prime_member_works() { - new_test_ext().execute_with(|| { - assert_noop!(Membership::set_prime(Origin::signed(4), 20), BadOrigin); - assert_noop!(Membership::set_prime(Origin::signed(5), 15), Error::::NotMember); - assert_ok!(Membership::set_prime(Origin::signed(5), 20)); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - - assert_ok!(Membership::clear_prime(Origin::signed(5))); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - }); - } - - #[test] - fn add_member_works() { - new_test_ext().execute_with(|| { - assert_noop!(Membership::add_member(Origin::signed(5), 15), BadOrigin); - assert_noop!(Membership::add_member(Origin::signed(1), 10), Error::::AlreadyMember); - assert_ok!(Membership::add_member(Origin::signed(1), 15)); - assert_eq!(Membership::members(), vec![10, 15, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); - }); - } - - #[test] - fn remove_member_works() { - new_test_ext().execute_with(|| { - assert_noop!(Membership::remove_member(Origin::signed(5), 20), BadOrigin); - assert_noop!(Membership::remove_member(Origin::signed(2), 15), Error::::NotMember); - assert_ok!(Membership::set_prime(Origin::signed(5), 20)); - assert_ok!(Membership::remove_member(Origin::signed(2), 20)); - assert_eq!(Membership::members(), vec![10, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - }); - } - - #[test] - fn swap_member_works() { - new_test_ext().execute_with(|| { - assert_noop!(Membership::swap_member(Origin::signed(5), 10, 25), BadOrigin); - assert_noop!(Membership::swap_member(Origin::signed(3), 15, 25), Error::::NotMember); - assert_noop!(Membership::swap_member(Origin::signed(3), 10, 30), Error::::AlreadyMember); - - assert_ok!(Membership::set_prime(Origin::signed(5), 20)); - assert_ok!(Membership::swap_member(Origin::signed(3), 20, 20)); - assert_eq!(Membership::members(), vec![10, 20, 30]); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - - assert_ok!(Membership::set_prime(Origin::signed(5), 10)); - assert_ok!(Membership::swap_member(Origin::signed(3), 10, 25)); - assert_eq!(Membership::members(), vec![20, 25, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - }); - } - - #[test] - fn swap_member_works_that_does_not_change_order() { - new_test_ext().execute_with(|| { - assert_ok!(Membership::swap_member(Origin::signed(3), 10, 5)); - assert_eq!(Membership::members(), vec![5, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); - }); - } - - #[test] - fn change_key_works() { - new_test_ext().execute_with(|| { - assert_ok!(Membership::set_prime(Origin::signed(5), 10)); - assert_noop!(Membership::change_key(Origin::signed(3), 25), Error::::NotMember); - assert_noop!(Membership::change_key(Origin::signed(10), 20), Error::::AlreadyMember); - assert_ok!(Membership::change_key(Origin::signed(10), 40)); - assert_eq!(Membership::members(), vec![20, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); - assert_eq!(Membership::prime(), Some(40)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - }); - } - - #[test] - fn change_key_works_that_does_not_change_order() { - new_test_ext().execute_with(|| { - assert_ok!(Membership::change_key(Origin::signed(10), 5)); - assert_eq!(Membership::members(), vec![5, 20, 30]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); - }); - } - - #[test] - fn reset_members_works() { - new_test_ext().execute_with(|| { - assert_ok!(Membership::set_prime(Origin::signed(5), 20)); - assert_noop!(Membership::reset_members(Origin::signed(1), vec![20, 40, 30]), BadOrigin); - - assert_ok!(Membership::reset_members(Origin::signed(4), vec![20, 40, 30])); - assert_eq!(Membership::members(), vec![20, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); - assert_eq!(Membership::prime(), Some(20)); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - - assert_ok!(Membership::reset_members(Origin::signed(4), vec![10, 40, 30])); - assert_eq!(Membership::members(), vec![10, 30, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); - assert_eq!(Membership::prime(), None); - assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); - }); - } + use super::*; + + use frame_support::{ + assert_noop, assert_ok, impl_outer_origin, ord_parameter_types, parameter_types, + weights::Weight, + }; + use sp_core::H256; + use std::cell::RefCell; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use frame_system::EnsureSignedBy; + use sp_runtime::{ + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, + }; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + // For testing the pallet, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of pallets we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + ord_parameter_types! { + pub const One: u64 = 1; + pub const Two: u64 = 2; + pub const Three: u64 = 3; + pub const Four: u64 = 4; + pub const Five: u64 = 5; + } + + thread_local! { + static MEMBERS: RefCell> = RefCell::new(vec![]); + static PRIME: RefCell> = RefCell::new(None); + } + + pub struct TestChangeMembers; + impl ChangeMembers for TestChangeMembers { + fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { + let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); + old_plus_incoming.extend_from_slice(incoming); + old_plus_incoming.sort(); + let mut new_plus_outgoing = new.to_vec(); + new_plus_outgoing.extend_from_slice(outgoing); + new_plus_outgoing.sort(); + assert_eq!(old_plus_incoming, new_plus_outgoing); + + MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); + PRIME.with(|p| *p.borrow_mut() = None); + } + fn set_prime(who: Option) { + PRIME.with(|p| *p.borrow_mut() = who); + } + } + impl InitializeMembers for TestChangeMembers { + fn initialize_members(members: &[u64]) { + MEMBERS.with(|m| *m.borrow_mut() = members.to_vec()); + } + } + + impl Trait for Test { + type Event = (); + type AddOrigin = EnsureSignedBy; + type RemoveOrigin = EnsureSignedBy; + type SwapOrigin = EnsureSignedBy; + type ResetOrigin = EnsureSignedBy; + type PrimeOrigin = EnsureSignedBy; + type MembershipInitialized = TestChangeMembers; + type MembershipChanged = TestChangeMembers; + } + + type Membership = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + // We use default for brevity, but you can configure as desired if needed. + GenesisConfig:: { + members: vec![10, 20, 30], + ..Default::default() + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } + + #[test] + fn query_membership_works() { + new_test_ext().execute_with(|| { + assert_eq!(Membership::members(), vec![10, 20, 30]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), vec![10, 20, 30]); + }); + } + + #[test] + fn prime_member_works() { + new_test_ext().execute_with(|| { + assert_noop!(Membership::set_prime(Origin::signed(4), 20), BadOrigin); + assert_noop!( + Membership::set_prime(Origin::signed(5), 15), + Error::::NotMember + ); + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_eq!(Membership::prime(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + + assert_ok!(Membership::clear_prime(Origin::signed(5))); + assert_eq!(Membership::prime(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + }); + } + + #[test] + fn add_member_works() { + new_test_ext().execute_with(|| { + assert_noop!(Membership::add_member(Origin::signed(5), 15), BadOrigin); + assert_noop!( + Membership::add_member(Origin::signed(1), 10), + Error::::AlreadyMember + ); + assert_ok!(Membership::add_member(Origin::signed(1), 15)); + assert_eq!(Membership::members(), vec![10, 15, 20, 30]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + }); + } + + #[test] + fn remove_member_works() { + new_test_ext().execute_with(|| { + assert_noop!(Membership::remove_member(Origin::signed(5), 20), BadOrigin); + assert_noop!( + Membership::remove_member(Origin::signed(2), 15), + Error::::NotMember + ); + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_ok!(Membership::remove_member(Origin::signed(2), 20)); + assert_eq!(Membership::members(), vec![10, 30]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + }); + } + + #[test] + fn swap_member_works() { + new_test_ext().execute_with(|| { + assert_noop!( + Membership::swap_member(Origin::signed(5), 10, 25), + BadOrigin + ); + assert_noop!( + Membership::swap_member(Origin::signed(3), 15, 25), + Error::::NotMember + ); + assert_noop!( + Membership::swap_member(Origin::signed(3), 10, 30), + Error::::AlreadyMember + ); + + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_ok!(Membership::swap_member(Origin::signed(3), 20, 20)); + assert_eq!(Membership::members(), vec![10, 20, 30]); + assert_eq!(Membership::prime(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + + assert_ok!(Membership::set_prime(Origin::signed(5), 10)); + assert_ok!(Membership::swap_member(Origin::signed(3), 10, 25)); + assert_eq!(Membership::members(), vec![20, 25, 30]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + }); + } + + #[test] + fn swap_member_works_that_does_not_change_order() { + new_test_ext().execute_with(|| { + assert_ok!(Membership::swap_member(Origin::signed(3), 10, 5)); + assert_eq!(Membership::members(), vec![5, 20, 30]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + }); + } + + #[test] + fn change_key_works() { + new_test_ext().execute_with(|| { + assert_ok!(Membership::set_prime(Origin::signed(5), 10)); + assert_noop!( + Membership::change_key(Origin::signed(3), 25), + Error::::NotMember + ); + assert_noop!( + Membership::change_key(Origin::signed(10), 20), + Error::::AlreadyMember + ); + assert_ok!(Membership::change_key(Origin::signed(10), 40)); + assert_eq!(Membership::members(), vec![20, 30, 40]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), Some(40)); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + }); + } + + #[test] + fn change_key_works_that_does_not_change_order() { + new_test_ext().execute_with(|| { + assert_ok!(Membership::change_key(Origin::signed(10), 5)); + assert_eq!(Membership::members(), vec![5, 20, 30]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + }); + } + + #[test] + fn reset_members_works() { + new_test_ext().execute_with(|| { + assert_ok!(Membership::set_prime(Origin::signed(5), 20)); + assert_noop!( + Membership::reset_members(Origin::signed(1), vec![20, 40, 30]), + BadOrigin + ); + + assert_ok!(Membership::reset_members( + Origin::signed(4), + vec![20, 40, 30] + )); + assert_eq!(Membership::members(), vec![20, 30, 40]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), Some(20)); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + + assert_ok!(Membership::reset_members( + Origin::signed(4), + vec![10, 40, 30] + )); + assert_eq!(Membership::members(), vec![10, 30, 40]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), Membership::members()); + assert_eq!(Membership::prime(), None); + assert_eq!(PRIME.with(|m| *m.borrow()), Membership::prime()); + }); + } } diff --git a/frame/metadata/src/lib.rs b/frame/metadata/src/lib.rs index bec69999b2..b87ac2b0e2 100644 --- a/frame/metadata/src/lib.rs +++ b/frame/metadata/src/lib.rs @@ -23,12 +23,12 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde::Serialize; -#[cfg(feature = "std")] -use codec::{Decode, Input, Error}; +use codec::{Decode, Error, Input}; use codec::{Encode, Output}; -use sp_std::vec::Vec; +#[cfg(feature = "std")] +use serde::Serialize; use sp_core::RuntimeDebug; +use sp_std::vec::Vec; #[cfg(feature = "std")] type StringBuf = String; @@ -46,73 +46,94 @@ type StringBuf = &'static str; /// /// For example a `&'static [ &'static str ]` can be decoded to a `Vec`. #[derive(Clone)] -pub enum DecodeDifferent where B: 'static, O: 'static { - Encode(B), - Decoded(O), +pub enum DecodeDifferent +where + B: 'static, + O: 'static, +{ + Encode(B), + Decoded(O), } -impl Encode for DecodeDifferent where B: Encode + 'static, O: Encode + 'static { - fn encode_to(&self, dest: &mut W) { - match self { - DecodeDifferent::Encode(b) => b.encode_to(dest), - DecodeDifferent::Decoded(o) => o.encode_to(dest), - } - } +impl Encode for DecodeDifferent +where + B: Encode + 'static, + O: Encode + 'static, +{ + fn encode_to(&self, dest: &mut W) { + match self { + DecodeDifferent::Encode(b) => b.encode_to(dest), + DecodeDifferent::Decoded(o) => o.encode_to(dest), + } + } } -impl codec::EncodeLike for DecodeDifferent where B: Encode + 'static, O: Encode + 'static {} +impl codec::EncodeLike for DecodeDifferent +where + B: Encode + 'static, + O: Encode + 'static, +{ +} #[cfg(feature = "std")] -impl Decode for DecodeDifferent where B: 'static, O: Decode + 'static { - fn decode(input: &mut I) -> Result { - ::decode(input).map(|val| { - DecodeDifferent::Decoded(val) - }) - } +impl Decode for DecodeDifferent +where + B: 'static, + O: Decode + 'static, +{ + fn decode(input: &mut I) -> Result { + ::decode(input).map(|val| DecodeDifferent::Decoded(val)) + } } impl PartialEq for DecodeDifferent where - B: Encode + Eq + PartialEq + 'static, - O: Encode + Eq + PartialEq + 'static, + B: Encode + Eq + PartialEq + 'static, + O: Encode + Eq + PartialEq + 'static, { - fn eq(&self, other: &Self) -> bool { - self.encode() == other.encode() - } + fn eq(&self, other: &Self) -> bool { + self.encode() == other.encode() + } } impl Eq for DecodeDifferent - where B: Encode + Eq + PartialEq + 'static, O: Encode + Eq + PartialEq + 'static -{} +where + B: Encode + Eq + PartialEq + 'static, + O: Encode + Eq + PartialEq + 'static, +{ +} impl sp_std::fmt::Debug for DecodeDifferent - where - B: sp_std::fmt::Debug + Eq + 'static, - O: sp_std::fmt::Debug + Eq + 'static, +where + B: sp_std::fmt::Debug + Eq + 'static, + O: sp_std::fmt::Debug + Eq + 'static, { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - match self { - DecodeDifferent::Encode(b) => b.fmt(f), - DecodeDifferent::Decoded(o) => o.fmt(f), - } - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + match self { + DecodeDifferent::Encode(b) => b.fmt(f), + DecodeDifferent::Decoded(o) => o.fmt(f), + } + } } #[cfg(feature = "std")] impl serde::Serialize for DecodeDifferent - where - B: serde::Serialize + 'static, - O: serde::Serialize + 'static, +where + B: serde::Serialize + 'static, + O: serde::Serialize + 'static, { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { - match self { - DecodeDifferent::Encode(b) => b.serialize(serializer), - DecodeDifferent::Decoded(o) => o.serialize(serializer), - } - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + match self { + DecodeDifferent::Encode(b) => b.serialize(serializer), + DecodeDifferent::Decoded(o) => o.serialize(serializer), + } + } } -pub type DecodeDifferentArray = DecodeDifferent<&'static [B], Vec>; +pub type DecodeDifferentArray = DecodeDifferent<&'static [B], Vec>; type DecodeDifferentStr = DecodeDifferent<&'static str, StringBuf>; @@ -120,113 +141,118 @@ type DecodeDifferentStr = DecodeDifferent<&'static str, StringBuf>; #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct FunctionMetadata { - pub name: DecodeDifferentStr, - pub arguments: DecodeDifferentArray, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, + pub name: DecodeDifferentStr, + pub arguments: DecodeDifferentArray, + pub documentation: DecodeDifferentArray<&'static str, StringBuf>, } /// All the metadata about a function argument. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct FunctionArgumentMetadata { - pub name: DecodeDifferentStr, - pub ty: DecodeDifferentStr, + pub name: DecodeDifferentStr, + pub ty: DecodeDifferentStr, } /// Newtype wrapper for support encoding functions (actual the result of the function). #[derive(Clone, Eq)] -pub struct FnEncode(pub fn() -> E) where E: Encode + 'static; +pub struct FnEncode(pub fn() -> E) +where + E: Encode + 'static; impl Encode for FnEncode { - fn encode_to(&self, dest: &mut W) { - self.0().encode_to(dest); - } + fn encode_to(&self, dest: &mut W) { + self.0().encode_to(dest); + } } impl codec::EncodeLike for FnEncode {} impl PartialEq for FnEncode { - fn eq(&self, other: &Self) -> bool { - self.0().eq(&other.0()) - } + fn eq(&self, other: &Self) -> bool { + self.0().eq(&other.0()) + } } impl sp_std::fmt::Debug for FnEncode { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - self.0().fmt(f) - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + self.0().fmt(f) + } } #[cfg(feature = "std")] impl serde::Serialize for FnEncode { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { - self.0().serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0().serialize(serializer) + } } /// All the metadata about an outer event. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct OuterEventMetadata { - pub name: DecodeDifferentStr, - pub events: DecodeDifferentArray< - (&'static str, FnEncode<&'static [EventMetadata]>), - (StringBuf, Vec) - >, + pub name: DecodeDifferentStr, + pub events: DecodeDifferentArray< + (&'static str, FnEncode<&'static [EventMetadata]>), + (StringBuf, Vec), + >, } /// All the metadata about an event. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct EventMetadata { - pub name: DecodeDifferentStr, - pub arguments: DecodeDifferentArray<&'static str, StringBuf>, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, + pub name: DecodeDifferentStr, + pub arguments: DecodeDifferentArray<&'static str, StringBuf>, + pub documentation: DecodeDifferentArray<&'static str, StringBuf>, } /// All the metadata about one storage entry. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct StorageEntryMetadata { - pub name: DecodeDifferentStr, - pub modifier: StorageEntryModifier, - pub ty: StorageEntryType, - pub default: ByteGetter, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, + pub name: DecodeDifferentStr, + pub modifier: StorageEntryModifier, + pub ty: StorageEntryType, + pub default: ByteGetter, + pub documentation: DecodeDifferentArray<&'static str, StringBuf>, } /// All the metadata about one module constant. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct ModuleConstantMetadata { - pub name: DecodeDifferentStr, - pub ty: DecodeDifferentStr, - pub value: ByteGetter, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, + pub name: DecodeDifferentStr, + pub ty: DecodeDifferentStr, + pub value: ByteGetter, + pub documentation: DecodeDifferentArray<&'static str, StringBuf>, } /// All the metadata about a module error. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct ErrorMetadata { - pub name: DecodeDifferentStr, - pub documentation: DecodeDifferentArray<&'static str, StringBuf>, + pub name: DecodeDifferentStr, + pub documentation: DecodeDifferentArray<&'static str, StringBuf>, } /// All the metadata about errors in a module. pub trait ModuleErrorMetadata { - fn metadata() -> &'static [ErrorMetadata]; + fn metadata() -> &'static [ErrorMetadata]; } impl ModuleErrorMetadata for &'static str { - fn metadata() -> &'static [ErrorMetadata] { - &[] - } + fn metadata() -> &'static [ErrorMetadata] { + &[] + } } /// A technical trait to store lazy initiated vec value as static dyn pointer. pub trait DefaultByte: Send + Sync { - fn default_byte(&self) -> Vec; + fn default_byte(&self) -> Vec; } /// Wrapper over dyn pointer for accessing a cached once byte value. @@ -237,85 +263,88 @@ pub struct DefaultByteGetter(pub &'static dyn DefaultByte); pub type ByteGetter = DecodeDifferent>; impl Encode for DefaultByteGetter { - fn encode_to(&self, dest: &mut W) { - self.0.default_byte().encode_to(dest) - } + fn encode_to(&self, dest: &mut W) { + self.0.default_byte().encode_to(dest) + } } impl codec::EncodeLike for DefaultByteGetter {} impl PartialEq for DefaultByteGetter { - fn eq(&self, other: &DefaultByteGetter) -> bool { - let left = self.0.default_byte(); - let right = other.0.default_byte(); - left.eq(&right) - } + fn eq(&self, other: &DefaultByteGetter) -> bool { + let left = self.0.default_byte(); + let right = other.0.default_byte(); + left.eq(&right) + } } -impl Eq for DefaultByteGetter { } +impl Eq for DefaultByteGetter {} #[cfg(feature = "std")] impl serde::Serialize for DefaultByteGetter { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { - self.0.default_byte().serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.0.default_byte().serialize(serializer) + } } impl sp_std::fmt::Debug for DefaultByteGetter { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - self.0.default_byte().fmt(f) - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + self.0.default_byte().fmt(f) + } } /// Hasher used by storage maps #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub enum StorageHasher { - Blake2_128, - Blake2_256, - Blake2_128Concat, - Twox128, - Twox256, - Twox64Concat, - Identity, + Blake2_128, + Blake2_256, + Blake2_128Concat, + Twox128, + Twox256, + Twox64Concat, + Identity, } /// A storage entry type. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub enum StorageEntryType { - Plain(DecodeDifferentStr), - Map { - hasher: StorageHasher, - key: DecodeDifferentStr, - value: DecodeDifferentStr, - // is_linked flag previously, unused now to keep backwards compat - unused: bool, - }, - DoubleMap { - hasher: StorageHasher, - key1: DecodeDifferentStr, - key2: DecodeDifferentStr, - value: DecodeDifferentStr, - key2_hasher: StorageHasher, - }, + Plain(DecodeDifferentStr), + Map { + hasher: StorageHasher, + key: DecodeDifferentStr, + value: DecodeDifferentStr, + // is_linked flag previously, unused now to keep backwards compat + unused: bool, + }, + DoubleMap { + hasher: StorageHasher, + key1: DecodeDifferentStr, + key2: DecodeDifferentStr, + value: DecodeDifferentStr, + key2_hasher: StorageHasher, + }, } /// A storage entry modifier. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub enum StorageEntryModifier { - Optional, - Default, + Optional, + Default, } /// All metadata of the storage. #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct StorageMetadata { - /// The common prefix used by all storage entries. - pub prefix: DecodeDifferent<&'static str, StringBuf>, - pub entries: DecodeDifferent<&'static [StorageEntryMetadata], Vec>, + /// The common prefix used by all storage entries. + pub prefix: DecodeDifferent<&'static str, StringBuf>, + pub entries: DecodeDifferent<&'static [StorageEntryMetadata], Vec>, } /// Metadata prefixed by a u32 for reserved usage @@ -327,10 +356,10 @@ pub struct RuntimeMetadataPrefixed(pub u32, pub RuntimeMetadata); #[derive(Eq, Encode, PartialEq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct ExtrinsicMetadata { - /// Extrinsic version. - pub version: u8, - /// The signed extensions in the order they appear in the extrinsic. - pub signed_extensions: Vec, + /// Extrinsic version. + pub version: u8, + /// The signed extensions in the order they appear in the extrinsic. + pub signed_extensions: Vec, } /// The metadata of a runtime. @@ -339,58 +368,58 @@ pub struct ExtrinsicMetadata { #[derive(Eq, Encode, PartialEq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub enum RuntimeMetadata { - /// Unused; enum filler. - V0(RuntimeMetadataDeprecated), - /// Version 1 for runtime metadata. No longer used. - V1(RuntimeMetadataDeprecated), - /// Version 2 for runtime metadata. No longer used. - V2(RuntimeMetadataDeprecated), - /// Version 3 for runtime metadata. No longer used. - V3(RuntimeMetadataDeprecated), - /// Version 4 for runtime metadata. No longer used. - V4(RuntimeMetadataDeprecated), - /// Version 5 for runtime metadata. No longer used. - V5(RuntimeMetadataDeprecated), - /// Version 6 for runtime metadata. No longer used. - V6(RuntimeMetadataDeprecated), - /// Version 7 for runtime metadata. No longer used. - V7(RuntimeMetadataDeprecated), - /// Version 8 for runtime metadata. No longer used. - V8(RuntimeMetadataDeprecated), - /// Version 9 for runtime metadata. No longer used. - V9(RuntimeMetadataDeprecated), - /// Version 10 for runtime metadata. No longer used. - V10(RuntimeMetadataDeprecated), - /// Version 11 for runtime metadata. - V11(RuntimeMetadataV11), + /// Unused; enum filler. + V0(RuntimeMetadataDeprecated), + /// Version 1 for runtime metadata. No longer used. + V1(RuntimeMetadataDeprecated), + /// Version 2 for runtime metadata. No longer used. + V2(RuntimeMetadataDeprecated), + /// Version 3 for runtime metadata. No longer used. + V3(RuntimeMetadataDeprecated), + /// Version 4 for runtime metadata. No longer used. + V4(RuntimeMetadataDeprecated), + /// Version 5 for runtime metadata. No longer used. + V5(RuntimeMetadataDeprecated), + /// Version 6 for runtime metadata. No longer used. + V6(RuntimeMetadataDeprecated), + /// Version 7 for runtime metadata. No longer used. + V7(RuntimeMetadataDeprecated), + /// Version 8 for runtime metadata. No longer used. + V8(RuntimeMetadataDeprecated), + /// Version 9 for runtime metadata. No longer used. + V9(RuntimeMetadataDeprecated), + /// Version 10 for runtime metadata. No longer used. + V10(RuntimeMetadataDeprecated), + /// Version 11 for runtime metadata. + V11(RuntimeMetadataV11), } /// Enum that should fail. #[derive(Eq, PartialEq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize))] -pub enum RuntimeMetadataDeprecated { } +pub enum RuntimeMetadataDeprecated {} impl Encode for RuntimeMetadataDeprecated { - fn encode_to(&self, _dest: &mut W) {} + fn encode_to(&self, _dest: &mut W) {} } impl codec::EncodeLike for RuntimeMetadataDeprecated {} #[cfg(feature = "std")] impl Decode for RuntimeMetadataDeprecated { - fn decode(_input: &mut I) -> Result { - Err("Decoding is not supported".into()) - } + fn decode(_input: &mut I) -> Result { + Err("Decoding is not supported".into()) + } } /// The metadata of a runtime. #[derive(Eq, Encode, PartialEq, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct RuntimeMetadataV11 { - /// Metadata of all the modules. - pub modules: DecodeDifferentArray, - /// Metadata of the extrinsic. - pub extrinsic: ExtrinsicMetadata, + /// Metadata of all the modules. + pub modules: DecodeDifferentArray, + /// Metadata of the extrinsic. + pub extrinsic: ExtrinsicMetadata, } /// The latest version of the metadata. @@ -400,25 +429,25 @@ pub type RuntimeMetadataLastVersion = RuntimeMetadataV11; #[derive(Clone, PartialEq, Eq, Encode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode, Serialize))] pub struct ModuleMetadata { - pub name: DecodeDifferentStr, - pub storage: Option, StorageMetadata>>, - pub calls: ODFnA, - pub event: ODFnA, - pub constants: DFnA, - pub errors: DFnA, + pub name: DecodeDifferentStr, + pub storage: Option, StorageMetadata>>, + pub calls: ODFnA, + pub event: ODFnA, + pub constants: DFnA, + pub errors: DFnA, } type ODFnA = Option>; type DFnA = DecodeDifferent, Vec>; impl Into for RuntimeMetadataPrefixed { - fn into(self) -> sp_core::OpaqueMetadata { - sp_core::OpaqueMetadata::new(self.encode()) - } + fn into(self) -> sp_core::OpaqueMetadata { + sp_core::OpaqueMetadata::new(self.encode()) + } } impl Into for RuntimeMetadataLastVersion { - fn into(self) -> RuntimeMetadataPrefixed { - RuntimeMetadataPrefixed(META_RESERVED, RuntimeMetadata::V11(self)) - } + fn into(self) -> RuntimeMetadataPrefixed { + RuntimeMetadataPrefixed(META_RESERVED, RuntimeMetadata::V11(self)) + } } diff --git a/frame/nicks/src/lib.rs b/frame/nicks/src/lib.rs index b8a2359450..e4d6f8a2bb 100644 --- a/frame/nicks/src/lib.rs +++ b/frame/nicks/src/lib.rs @@ -38,368 +38,388 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_runtime::{ - traits::{StaticLookup, Zero} -}; use frame_support::{ - decl_module, decl_event, decl_storage, ensure, decl_error, - traits::{Currency, EnsureOrigin, ReservableCurrency, OnUnbalanced, Get}, - weights::SimpleDispatchInfo, + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::{Currency, EnsureOrigin, Get, OnUnbalanced, ReservableCurrency}, + weights::SimpleDispatchInfo, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed}; +use sp_runtime::traits::{StaticLookup, Zero}; +use sp_std::prelude::*; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type NegativeImbalanceOf = + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// The currency trait. - type Currency: ReservableCurrency; + /// The currency trait. + type Currency: ReservableCurrency; - /// Reservation fee. - type ReservationFee: Get>; + /// Reservation fee. + type ReservationFee: Get>; - /// What to do with slashed funds. - type Slashed: OnUnbalanced>; + /// What to do with slashed funds. + type Slashed: OnUnbalanced>; - /// The origin which may forcibly set or remove a name. Root can always do this. - type ForceOrigin: EnsureOrigin; + /// The origin which may forcibly set or remove a name. Root can always do this. + type ForceOrigin: EnsureOrigin; - /// The minimum length a name may be. - type MinLength: Get; + /// The minimum length a name may be. + type MinLength: Get; - /// The maximum length a name may be. - type MaxLength: Get; + /// The maximum length a name may be. + type MaxLength: Get; } decl_storage! { - trait Store for Module as Nicks { - /// The lookup table for names. - NameOf: map hasher(twox_64_concat) T::AccountId => Option<(Vec, BalanceOf)>; - } + trait Store for Module as Nicks { + /// The lookup table for names. + NameOf: map hasher(twox_64_concat) T::AccountId => Option<(Vec, BalanceOf)>; + } } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { - /// A name was set. - NameSet(AccountId), - /// A name was forcibly set. - NameForced(AccountId), - /// A name was changed. - NameChanged(AccountId), - /// A name was cleared, and the given balance returned. - NameCleared(AccountId, Balance), - /// A name was removed and the given balance slashed. - NameKilled(AccountId, Balance), - } + pub enum Event + where + AccountId = ::AccountId, + Balance = BalanceOf, + { + /// A name was set. + NameSet(AccountId), + /// A name was forcibly set. + NameForced(AccountId), + /// A name was changed. + NameChanged(AccountId), + /// A name was cleared, and the given balance returned. + NameCleared(AccountId, Balance), + /// A name was removed and the given balance slashed. + NameKilled(AccountId, Balance), + } ); decl_error! { - /// Error for the nicks module. - pub enum Error for Module { - /// A name is too short. - TooShort, - /// A name is too long. - TooLong, - /// An account isn't named. - Unnamed, - } + /// Error for the nicks module. + pub enum Error for Module { + /// A name is too short. + TooShort, + /// A name is too long. + TooLong, + /// An account isn't named. + Unnamed, + } } decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what it's working on. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Reservation fee. - const ReservationFee: BalanceOf = T::ReservationFee::get(); - - /// The minimum length a name may be. - const MinLength: u32 = T::MinLength::get() as u32; - - /// The maximum length a name may be. - const MaxLength: u32 = T::MaxLength::get() as u32; - - /// Set an account's name. The name should be a UTF-8-encoded string by convention, though - /// we don't check it. - /// - /// The name may not be more than `T::MaxLength` bytes, nor less than `T::MinLength` bytes. - /// - /// If the account doesn't already have a name, then a fee of `ReservationFee` is reserved - /// in the account. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - O(1). - /// - At most one balance operation. - /// - One storage read/write. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn set_name(origin, name: Vec) { - let sender = ensure_signed(origin)?; - - ensure!(name.len() >= T::MinLength::get(), Error::::TooShort); - ensure!(name.len() <= T::MaxLength::get(), Error::::TooLong); - - let deposit = if let Some((_, deposit)) = >::get(&sender) { - Self::deposit_event(RawEvent::NameSet(sender.clone())); - deposit - } else { - let deposit = T::ReservationFee::get(); - T::Currency::reserve(&sender, deposit.clone())?; - Self::deposit_event(RawEvent::NameChanged(sender.clone())); - deposit - }; - - >::insert(&sender, (name, deposit)); - } - - /// Clear an account's name and return the deposit. Fails if the account was not named. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - O(1). - /// - One balance operation. - /// - One storage read/write. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(70_000_000)] - fn clear_name(origin) { - let sender = ensure_signed(origin)?; - - let deposit = >::take(&sender).ok_or(Error::::Unnamed)?.1; - - let _ = T::Currency::unreserve(&sender, deposit.clone()); - - Self::deposit_event(RawEvent::NameCleared(sender, deposit)); - } - - /// Remove an account's name and take charge of the deposit. - /// - /// Fails if `who` has not been named. The deposit is dealt with through `T::Slashed` - /// imbalance handler. - /// - /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. - /// - /// # - /// - O(1). - /// - One unbalanced handler (probably a balance transfer) - /// - One storage read/write. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(70_000_000)] - fn kill_name(origin, target: ::Source) { - T::ForceOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - // Figure out who we're meant to be clearing. - let target = T::Lookup::lookup(target)?; - // Grab their deposit (and check that they have one). - let deposit = >::take(&target).ok_or(Error::::Unnamed)?.1; - // Slash their deposit from them. - T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit.clone()).0); - - Self::deposit_event(RawEvent::NameKilled(target, deposit)); - } - - /// Set a third-party account's name with no deposit. - /// - /// No length checking is done on the name. - /// - /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. - /// - /// # - /// - O(1). - /// - At most one balance operation. - /// - One storage read/write. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(70_000_000)] - fn force_name(origin, target: ::Source, name: Vec) { - T::ForceOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - let target = T::Lookup::lookup(target)?; - let deposit = >::get(&target).map(|x| x.1).unwrap_or_else(Zero::zero); - >::insert(&target, (name, deposit)); - - Self::deposit_event(RawEvent::NameForced(target)); - } - } + // Simple declaration of the `Module` type. Lets the macro know what it's working on. + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Reservation fee. + const ReservationFee: BalanceOf = T::ReservationFee::get(); + + /// The minimum length a name may be. + const MinLength: u32 = T::MinLength::get() as u32; + + /// The maximum length a name may be. + const MaxLength: u32 = T::MaxLength::get() as u32; + + /// Set an account's name. The name should be a UTF-8-encoded string by convention, though + /// we don't check it. + /// + /// The name may not be more than `T::MaxLength` bytes, nor less than `T::MinLength` bytes. + /// + /// If the account doesn't already have a name, then a fee of `ReservationFee` is reserved + /// in the account. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// # + /// - O(1). + /// - At most one balance operation. + /// - One storage read/write. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn set_name(origin, name: Vec) { + let sender = ensure_signed(origin)?; + + ensure!(name.len() >= T::MinLength::get(), Error::::TooShort); + ensure!(name.len() <= T::MaxLength::get(), Error::::TooLong); + + let deposit = if let Some((_, deposit)) = >::get(&sender) { + Self::deposit_event(RawEvent::NameSet(sender.clone())); + deposit + } else { + let deposit = T::ReservationFee::get(); + T::Currency::reserve(&sender, deposit.clone())?; + Self::deposit_event(RawEvent::NameChanged(sender.clone())); + deposit + }; + + >::insert(&sender, (name, deposit)); + } + + /// Clear an account's name and return the deposit. Fails if the account was not named. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// # + /// - O(1). + /// - One balance operation. + /// - One storage read/write. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(70_000_000)] + fn clear_name(origin) { + let sender = ensure_signed(origin)?; + + let deposit = >::take(&sender).ok_or(Error::::Unnamed)?.1; + + let _ = T::Currency::unreserve(&sender, deposit.clone()); + + Self::deposit_event(RawEvent::NameCleared(sender, deposit)); + } + + /// Remove an account's name and take charge of the deposit. + /// + /// Fails if `who` has not been named. The deposit is dealt with through `T::Slashed` + /// imbalance handler. + /// + /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. + /// + /// # + /// - O(1). + /// - One unbalanced handler (probably a balance transfer) + /// - One storage read/write. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(70_000_000)] + fn kill_name(origin, target: ::Source) { + T::ForceOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + // Figure out who we're meant to be clearing. + let target = T::Lookup::lookup(target)?; + // Grab their deposit (and check that they have one). + let deposit = >::take(&target).ok_or(Error::::Unnamed)?.1; + // Slash their deposit from them. + T::Slashed::on_unbalanced(T::Currency::slash_reserved(&target, deposit.clone()).0); + + Self::deposit_event(RawEvent::NameKilled(target, deposit)); + } + + /// Set a third-party account's name with no deposit. + /// + /// No length checking is done on the name. + /// + /// The dispatch origin for this call must be _Root_ or match `T::ForceOrigin`. + /// + /// # + /// - O(1). + /// - At most one balance operation. + /// - One storage read/write. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(70_000_000)] + fn force_name(origin, target: ::Source, name: Vec) { + T::ForceOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + let target = T::Lookup::lookup(target)?; + let deposit = >::get(&target).map(|x| x.1).unwrap_or_else(Zero::zero); + >::insert(&target, (name, deposit)); + + Self::deposit_event(RawEvent::NameForced(target)); + } + } } #[cfg(test)] mod tests { - use super::*; - - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - ord_parameter_types - }; - use sp_core::H256; - use frame_system::EnsureSignedBy; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{ - Perbill, testing::Header, traits::{BlakeTwo256, IdentityLookup, BadOrigin}, - }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - parameter_types! { - pub const ReservationFee: u64 = 2; - pub const MinLength: usize = 3; - pub const MaxLength: usize = 16; - } - ord_parameter_types! { - pub const One: u64 = 1; - } - impl Trait for Test { - type Event = (); - type Currency = Balances; - type ReservationFee = ReservationFee; - type Slashed = (); - type ForceOrigin = EnsureSignedBy; - type MinLength = MinLength; - type MaxLength = MaxLength; - } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Nicks = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10), - (2, 10), - ], - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - - #[test] - fn kill_name_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Nicks::set_name(Origin::signed(2), b"Dave".to_vec())); - assert_eq!(Balances::total_balance(&2), 10); - assert_ok!(Nicks::kill_name(Origin::signed(1), 2)); - assert_eq!(Balances::total_balance(&2), 8); - assert_eq!(>::get(2), None); - }); - } - - #[test] - fn force_name_should_work() { - new_test_ext().execute_with(|| { - assert_noop!( - Nicks::set_name(Origin::signed(2), b"Dr. David Brubeck, III".to_vec()), - Error::::TooLong, - ); - - assert_ok!(Nicks::set_name(Origin::signed(2), b"Dave".to_vec())); - assert_eq!(Balances::reserved_balance(2), 2); - assert_ok!(Nicks::force_name(Origin::signed(1), 2, b"Dr. David Brubeck, III".to_vec())); - assert_eq!(Balances::reserved_balance(2), 2); - assert_eq!(>::get(2).unwrap(), (b"Dr. David Brubeck, III".to_vec(), 2)); - }); - } - - #[test] - fn normal_operation_should_work() { - new_test_ext().execute_with(|| { - assert_ok!(Nicks::set_name(Origin::signed(1), b"Gav".to_vec())); - assert_eq!(Balances::reserved_balance(1), 2); - assert_eq!(Balances::free_balance(1), 8); - assert_eq!(>::get(1).unwrap().0, b"Gav".to_vec()); - - assert_ok!(Nicks::set_name(Origin::signed(1), b"Gavin".to_vec())); - assert_eq!(Balances::reserved_balance(1), 2); - assert_eq!(Balances::free_balance(1), 8); - assert_eq!(>::get(1).unwrap().0, b"Gavin".to_vec()); - - assert_ok!(Nicks::clear_name(Origin::signed(1))); - assert_eq!(Balances::reserved_balance(1), 0); - assert_eq!(Balances::free_balance(1), 10); - }); - } - - #[test] - fn error_catching_should_work() { - new_test_ext().execute_with(|| { - assert_noop!(Nicks::clear_name(Origin::signed(1)), Error::::Unnamed); - - assert_noop!( - Nicks::set_name(Origin::signed(3), b"Dave".to_vec()), - pallet_balances::Error::::InsufficientBalance - ); - - assert_noop!(Nicks::set_name(Origin::signed(1), b"Ga".to_vec()), Error::::TooShort); - assert_noop!( - Nicks::set_name(Origin::signed(1), b"Gavin James Wood, Esquire".to_vec()), - Error::::TooLong - ); - assert_ok!(Nicks::set_name(Origin::signed(1), b"Dave".to_vec())); - assert_noop!(Nicks::kill_name(Origin::signed(2), 1), BadOrigin); - assert_noop!(Nicks::force_name(Origin::signed(2), 1, b"Whatever".to_vec()), BadOrigin); - }); - } + use super::*; + + use frame_support::{ + assert_noop, assert_ok, impl_outer_origin, ord_parameter_types, parameter_types, + weights::Weight, + }; + use frame_system::EnsureSignedBy; + use sp_core::H256; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use sp_runtime::{ + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, + }; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + // For testing the pallet, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of pallets we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + } + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + impl pallet_balances::Trait for Test { + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + } + parameter_types! { + pub const ReservationFee: u64 = 2; + pub const MinLength: usize = 3; + pub const MaxLength: usize = 16; + } + ord_parameter_types! { + pub const One: u64 = 1; + } + impl Trait for Test { + type Event = (); + type Currency = Balances; + type ReservationFee = ReservationFee; + type Slashed = (); + type ForceOrigin = EnsureSignedBy; + type MinLength = MinLength; + type MaxLength = MaxLength; + } + type System = frame_system::Module; + type Balances = pallet_balances::Module; + type Nicks = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + // We use default for brevity, but you can configure as desired if needed. + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 10)], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } + + #[test] + fn kill_name_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Nicks::set_name(Origin::signed(2), b"Dave".to_vec())); + assert_eq!(Balances::total_balance(&2), 10); + assert_ok!(Nicks::kill_name(Origin::signed(1), 2)); + assert_eq!(Balances::total_balance(&2), 8); + assert_eq!(>::get(2), None); + }); + } + + #[test] + fn force_name_should_work() { + new_test_ext().execute_with(|| { + assert_noop!( + Nicks::set_name(Origin::signed(2), b"Dr. David Brubeck, III".to_vec()), + Error::::TooLong, + ); + + assert_ok!(Nicks::set_name(Origin::signed(2), b"Dave".to_vec())); + assert_eq!(Balances::reserved_balance(2), 2); + assert_ok!(Nicks::force_name( + Origin::signed(1), + 2, + b"Dr. David Brubeck, III".to_vec() + )); + assert_eq!(Balances::reserved_balance(2), 2); + assert_eq!( + >::get(2).unwrap(), + (b"Dr. David Brubeck, III".to_vec(), 2) + ); + }); + } + + #[test] + fn normal_operation_should_work() { + new_test_ext().execute_with(|| { + assert_ok!(Nicks::set_name(Origin::signed(1), b"Gav".to_vec())); + assert_eq!(Balances::reserved_balance(1), 2); + assert_eq!(Balances::free_balance(1), 8); + assert_eq!(>::get(1).unwrap().0, b"Gav".to_vec()); + + assert_ok!(Nicks::set_name(Origin::signed(1), b"Gavin".to_vec())); + assert_eq!(Balances::reserved_balance(1), 2); + assert_eq!(Balances::free_balance(1), 8); + assert_eq!(>::get(1).unwrap().0, b"Gavin".to_vec()); + + assert_ok!(Nicks::clear_name(Origin::signed(1))); + assert_eq!(Balances::reserved_balance(1), 0); + assert_eq!(Balances::free_balance(1), 10); + }); + } + + #[test] + fn error_catching_should_work() { + new_test_ext().execute_with(|| { + assert_noop!(Nicks::clear_name(Origin::signed(1)), Error::::Unnamed); + + assert_noop!( + Nicks::set_name(Origin::signed(3), b"Dave".to_vec()), + pallet_balances::Error::::InsufficientBalance + ); + + assert_noop!( + Nicks::set_name(Origin::signed(1), b"Ga".to_vec()), + Error::::TooShort + ); + assert_noop!( + Nicks::set_name(Origin::signed(1), b"Gavin James Wood, Esquire".to_vec()), + Error::::TooLong + ); + assert_ok!(Nicks::set_name(Origin::signed(1), b"Dave".to_vec())); + assert_noop!(Nicks::kill_name(Origin::signed(2), 1), BadOrigin); + assert_noop!( + Nicks::force_name(Origin::signed(2), 1, b"Whatever".to_vec()), + BadOrigin + ); + }); + } } diff --git a/frame/offences/benchmarking/src/lib.rs b/frame/offences/benchmarking/src/lib.rs index a88714a89a..1600fbe59b 100644 --- a/frame/offences/benchmarking/src/lib.rs +++ b/frame/offences/benchmarking/src/lib.rs @@ -21,21 +21,24 @@ use sp_std::prelude::*; use sp_std::vec; -use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{account, benchmarks}; use frame_support::traits::{Currency, OnInitialize}; +use frame_system::RawOrigin; -use sp_runtime::{Perbill, traits::{Convert, StaticLookup}}; +use sp_runtime::{ + traits::{Convert, StaticLookup}, + Perbill, +}; use sp_staking::offence::ReportOffence; -use pallet_im_online::{Trait as ImOnlineTrait, Module as ImOnline, UnresponsivenessOffence}; -use pallet_offences::{Trait as OffencesTrait, Module as Offences}; +use pallet_im_online::{Module as ImOnline, Trait as ImOnlineTrait, UnresponsivenessOffence}; +use pallet_offences::{Module as Offences, Trait as OffencesTrait}; +use pallet_session::historical::{IdentificationTuple, Trait as HistoricalTrait}; +use pallet_session::Trait as SessionTrait; use pallet_staking::{ - Module as Staking, Trait as StakingTrait, RewardDestination, ValidatorPrefs, - Exposure, IndividualExposure, ElectionStatus + ElectionStatus, Exposure, IndividualExposure, Module as Staking, RewardDestination, + Trait as StakingTrait, ValidatorPrefs, }; -use pallet_session::Trait as SessionTrait; -use pallet_session::historical::{Trait as HistoricalTrait, IdentificationTuple}; const SEED: u32 = 0; @@ -47,129 +50,149 @@ const MAX_DEFERRED_OFFENCES: u32 = 100; pub struct Module(Offences); -pub trait Trait: SessionTrait + StakingTrait + OffencesTrait + ImOnlineTrait + HistoricalTrait {} +pub trait Trait: + SessionTrait + StakingTrait + OffencesTrait + ImOnlineTrait + HistoricalTrait +{ +} fn create_offender(n: u32, nominators: u32) -> Result { - let stash: T::AccountId = account("stash", n, SEED); - let controller: T::AccountId = account("controller", n, SEED); - let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); - let reward_destination = RewardDestination::Staked; - let amount = T::Currency::minimum_balance(); - - Staking::::bond( - RawOrigin::Signed(stash.clone()).into(), - controller_lookup.clone(), - amount.clone(), - reward_destination.clone(), - )?; - - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - }; - Staking::::validate(RawOrigin::Signed(controller.clone()).into(), validator_prefs)?; - - let mut individual_exposures = vec![]; - - // Create n nominators - for i in 0 .. nominators { - let nominator_stash: T::AccountId = account("nominator stash", n * MAX_NOMINATORS + i, SEED); - let nominator_controller: T::AccountId = account("nominator controller", n * MAX_NOMINATORS + i, SEED); - let nominator_controller_lookup: ::Source = T::Lookup::unlookup(nominator_controller.clone()); - - Staking::::bond( - RawOrigin::Signed(nominator_stash.clone()).into(), - nominator_controller_lookup.clone(), - amount, - reward_destination, - )?; - - let selected_validators: Vec<::Source> = vec![controller_lookup.clone()]; - Staking::::nominate(RawOrigin::Signed(nominator_controller.clone()).into(), selected_validators)?; - - individual_exposures.push(IndividualExposure { - who: nominator_controller.clone(), - value: amount.clone(), - }); - } - - let exposure = Exposure { - total: amount.clone() * n.into(), - own: amount, - others: individual_exposures, - }; - let current_era = 0u32; - Staking::::add_era_stakers(current_era.into(), stash.clone().into(), exposure); - - Ok(controller) + let stash: T::AccountId = account("stash", n, SEED); + let controller: T::AccountId = account("controller", n, SEED); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); + let reward_destination = RewardDestination::Staked; + let amount = T::Currency::minimum_balance(); + + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + controller_lookup.clone(), + amount.clone(), + reward_destination.clone(), + )?; + + let validator_prefs = ValidatorPrefs { + commission: Perbill::from_percent(50), + }; + Staking::::validate( + RawOrigin::Signed(controller.clone()).into(), + validator_prefs, + )?; + + let mut individual_exposures = vec![]; + + // Create n nominators + for i in 0..nominators { + let nominator_stash: T::AccountId = + account("nominator stash", n * MAX_NOMINATORS + i, SEED); + let nominator_controller: T::AccountId = + account("nominator controller", n * MAX_NOMINATORS + i, SEED); + let nominator_controller_lookup: ::Source = + T::Lookup::unlookup(nominator_controller.clone()); + + Staking::::bond( + RawOrigin::Signed(nominator_stash.clone()).into(), + nominator_controller_lookup.clone(), + amount, + reward_destination, + )?; + + let selected_validators: Vec<::Source> = + vec![controller_lookup.clone()]; + Staking::::nominate( + RawOrigin::Signed(nominator_controller.clone()).into(), + selected_validators, + )?; + + individual_exposures.push(IndividualExposure { + who: nominator_controller.clone(), + value: amount.clone(), + }); + } + + let exposure = Exposure { + total: amount.clone() * n.into(), + own: amount, + others: individual_exposures, + }; + let current_era = 0u32; + Staking::::add_era_stakers(current_era.into(), stash.clone().into(), exposure); + + Ok(controller) } -fn make_offenders(num_offenders: u32, num_nominators: u32) -> Result>, &'static str> { - let mut offenders: Vec = vec![]; - - for i in 0 .. num_offenders { - let offender = create_offender::(i, num_nominators)?; - offenders.push(offender); - } - - Ok(offenders.iter() - .map(|id| - ::ValidatorIdOf::convert(id.clone()) - .expect("failed to get validator id from account id")) - .map(|validator_id| - ::FullIdentificationOf::convert(validator_id.clone()) - .map(|full_id| (validator_id, full_id)) - .expect("failed to convert validator id to full identification")) - .collect::>>()) +fn make_offenders( + num_offenders: u32, + num_nominators: u32, +) -> Result>, &'static str> { + let mut offenders: Vec = vec![]; + + for i in 0..num_offenders { + let offender = create_offender::(i, num_nominators)?; + offenders.push(offender); + } + + Ok(offenders + .iter() + .map(|id| { + ::ValidatorIdOf::convert(id.clone()) + .expect("failed to get validator id from account id") + }) + .map(|validator_id| { + ::FullIdentificationOf::convert(validator_id.clone()) + .map(|full_id| (validator_id, full_id)) + .expect("failed to convert validator id to full identification") + }) + .collect::>>()) } benchmarks! { - _ { - let u in 1 .. MAX_USERS => (); - let r in 1 .. MAX_REPORTERS => (); - let o in 1 .. MAX_OFFENDERS => (); - let n in 1 .. MAX_NOMINATORS => (); - let d in 1 .. MAX_DEFERRED_OFFENCES => (); - } - - report_offence { - let r in ...; - let o in ...; - let n in ...; - - let mut reporters = vec![]; - - for i in 0 .. r { - let reporter = account("reporter", i, SEED); - reporters.push(reporter); - } - - let offenders = make_offenders::(o, n).expect("failed to create offenders"); - let keys = ImOnline::::keys(); - - let offence = UnresponsivenessOffence { - session_index: 0, - validator_set_count: keys.len() as u32, - offenders, - }; - - }: { - let _ = ::ReportUnresponsiveness::report_offence(reporters, offence); - } - - on_initialize { - let d in ...; - - Staking::::put_election_status(ElectionStatus::Closed); - - let mut deferred_offences = vec![]; - - for i in 0 .. d { - deferred_offences.push((vec![], vec![], 0u32)); - } - - Offences::::set_deferred_offences(deferred_offences); - - }: { - Offences::::on_initialize(u.into()); - } + _ { + let u in 1 .. MAX_USERS => (); + let r in 1 .. MAX_REPORTERS => (); + let o in 1 .. MAX_OFFENDERS => (); + let n in 1 .. MAX_NOMINATORS => (); + let d in 1 .. MAX_DEFERRED_OFFENCES => (); + } + + report_offence { + let r in ...; + let o in ...; + let n in ...; + + let mut reporters = vec![]; + + for i in 0 .. r { + let reporter = account("reporter", i, SEED); + reporters.push(reporter); + } + + let offenders = make_offenders::(o, n).expect("failed to create offenders"); + let keys = ImOnline::::keys(); + + let offence = UnresponsivenessOffence { + session_index: 0, + validator_set_count: keys.len() as u32, + offenders, + }; + + }: { + let _ = ::ReportUnresponsiveness::report_offence(reporters, offence); + } + + on_initialize { + let d in ...; + + Staking::::put_election_status(ElectionStatus::Closed); + + let mut deferred_offences = vec![]; + + for i in 0 .. d { + deferred_offences.push((vec![], vec![], 0u32)); + } + + Offences::::set_deferred_offences(deferred_offences); + + }: { + Offences::::on_initialize(u.into()); + } } diff --git a/frame/offences/src/lib.rs b/frame/offences/src/lib.rs index 2b59c5e796..91b3118ccc 100644 --- a/frame/offences/src/lib.rs +++ b/frame/offences/src/lib.rs @@ -24,18 +24,19 @@ mod mock; mod tests; -use sp_std::vec::Vec; +use codec::{Decode, Encode}; use frame_support::{ - decl_module, decl_event, decl_storage, Parameter, debug, - weights::{Weight, MINIMUM_WEIGHT}, + debug, decl_event, decl_module, decl_storage, + weights::{Weight, MINIMUM_WEIGHT}, + Parameter, }; +use frame_system as system; use sp_runtime::{traits::Hash, Perbill}; use sp_staking::{ - SessionIndex, - offence::{Offence, ReportOffence, Kind, OnOffenceHandler, OffenceDetails, OffenceError}, + offence::{Kind, Offence, OffenceDetails, OffenceError, OnOffenceHandler, ReportOffence}, + SessionIndex, }; -use codec::{Encode, Decode}; -use frame_system as system; +use sp_std::vec::Vec; /// A binary blob which represents a SCALE codec-encoded `O::TimeSlot`. type OpaqueTimeSlot = Vec; @@ -45,220 +46,221 @@ type ReportIdOf = ::Hash; /// Type of data stored as a deferred offence pub type DeferredOffenceOf = ( - Vec::AccountId, ::IdentificationTuple>>, - Vec, - SessionIndex, + Vec::AccountId, ::IdentificationTuple>>, + Vec, + SessionIndex, ); /// Offences trait pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From + Into<::Event>; - /// Full identification of the validator. - type IdentificationTuple: Parameter + Ord; - /// A handler called for every offence report. - type OnOffenceHandler: OnOffenceHandler; + /// The overarching event type. + type Event: From + Into<::Event>; + /// Full identification of the validator. + type IdentificationTuple: Parameter + Ord; + /// A handler called for every offence report. + type OnOffenceHandler: OnOffenceHandler; } decl_storage! { - trait Store for Module as Offences { - /// The primary structure that holds all offence records keyed by report identifiers. - Reports get(fn reports): - map hasher(twox_64_concat) ReportIdOf - => Option>; - - /// Deferred reports that have been rejected by the offence handler and need to be submitted - /// at a later time. - DeferredOffences get(deferred_offences): Vec>; - - /// A vector of reports of the same kind that happened at the same time slot. - ConcurrentReportsIndex: - double_map hasher(twox_64_concat) Kind, hasher(twox_64_concat) OpaqueTimeSlot - => Vec>; - - /// Enumerates all reports of a kind along with the time they happened. - /// - /// All reports are sorted by the time of offence. - /// - /// Note that the actual type of this mapping is `Vec`, this is because values of - /// different types are not supported at the moment so we are doing the manual serialization. - ReportsByKindIndex: map hasher(twox_64_concat) Kind => Vec; // (O::TimeSlot, ReportIdOf) - } + trait Store for Module as Offences { + /// The primary structure that holds all offence records keyed by report identifiers. + Reports get(fn reports): + map hasher(twox_64_concat) ReportIdOf + => Option>; + + /// Deferred reports that have been rejected by the offence handler and need to be submitted + /// at a later time. + DeferredOffences get(deferred_offences): Vec>; + + /// A vector of reports of the same kind that happened at the same time slot. + ConcurrentReportsIndex: + double_map hasher(twox_64_concat) Kind, hasher(twox_64_concat) OpaqueTimeSlot + => Vec>; + + /// Enumerates all reports of a kind along with the time they happened. + /// + /// All reports are sorted by the time of offence. + /// + /// Note that the actual type of this mapping is `Vec`, this is because values of + /// different types are not supported at the moment so we are doing the manual serialization. + ReportsByKindIndex: map hasher(twox_64_concat) Kind => Vec; // (O::TimeSlot, ReportIdOf) + } } decl_event!( - pub enum Event { - /// There is an offence reported of the given `kind` happened at the `session_index` and - /// (kind-specific) time slot. This event is not deposited for duplicate slashes. last - /// element indicates of the offence was applied (true) or queued (false). - Offence(Kind, OpaqueTimeSlot, bool), - } + pub enum Event { + /// There is an offence reported of the given `kind` happened at the `session_index` and + /// (kind-specific) time slot. This event is not deposited for duplicate slashes. last + /// element indicates of the offence was applied (true) or queued (false). + Offence(Kind, OpaqueTimeSlot, bool), + } ); decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn deposit_event() = default; - - fn on_runtime_upgrade() -> Weight { - Reports::::remove_all(); - ConcurrentReportsIndex::::remove_all(); - ReportsByKindIndex::remove_all(); - - MINIMUM_WEIGHT - } - - fn on_initialize(now: T::BlockNumber) -> Weight { - // only decode storage if we can actually submit anything again. - if T::OnOffenceHandler::can_report() { - >::mutate(|deferred| { - // keep those that fail to be reported again. An error log is emitted here; this - // should not happen if staking's `can_report` is implemented properly. - deferred.retain(|(o, p, s)| { - T::OnOffenceHandler::on_offence(&o, &p, *s).map_err(|_| { - debug::native::error!( - target: "pallet-offences", - "re-submitting a deferred slash returned Err at {}. This should not happen with pallet-staking", - now, - ); - }).is_err() - }) - }) - } - - MINIMUM_WEIGHT - } - } + pub struct Module for enum Call where origin: T::Origin { + fn deposit_event() = default; + + fn on_runtime_upgrade() -> Weight { + Reports::::remove_all(); + ConcurrentReportsIndex::::remove_all(); + ReportsByKindIndex::remove_all(); + + MINIMUM_WEIGHT + } + + fn on_initialize(now: T::BlockNumber) -> Weight { + // only decode storage if we can actually submit anything again. + if T::OnOffenceHandler::can_report() { + >::mutate(|deferred| { + // keep those that fail to be reported again. An error log is emitted here; this + // should not happen if staking's `can_report` is implemented properly. + deferred.retain(|(o, p, s)| { + T::OnOffenceHandler::on_offence(&o, &p, *s).map_err(|_| { + debug::native::error!( + target: "pallet-offences", + "re-submitting a deferred slash returned Err at {}. This should not happen with pallet-staking", + now, + ); + }).is_err() + }) + }) + } + + MINIMUM_WEIGHT + } + } } impl> - ReportOffence for Module + ReportOffence for Module where - T::IdentificationTuple: Clone, + T::IdentificationTuple: Clone, { - fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { - let offenders = offence.offenders(); - let time_slot = offence.time_slot(); - let validator_set_count = offence.validator_set_count(); - - // Go through all offenders in the offence report and find all offenders that was spotted - // in unique reports. - let TriageOutcome { concurrent_offenders } = match Self::triage_offence_report::( - reporters, - &time_slot, - offenders, - ) { - Some(triage) => triage, - // The report contained only duplicates, so there is no need to slash again. - None => return Err(OffenceError::DuplicateReport), - }; - - let offenders_count = concurrent_offenders.len() as u32; - - // The amount new offenders are slashed - let new_fraction = O::slash_fraction(offenders_count, validator_set_count); - - let slash_perbill: Vec<_> = (0..concurrent_offenders.len()) - .map(|_| new_fraction.clone()).collect(); - - let applied = Self::report_or_store_offence( - &concurrent_offenders, - &slash_perbill, - offence.session_index(), - ); - - // Deposit the event. - Self::deposit_event(Event::Offence(O::ID, time_slot.encode(), applied)); - - Ok(()) - } + fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { + let offenders = offence.offenders(); + let time_slot = offence.time_slot(); + let validator_set_count = offence.validator_set_count(); + + // Go through all offenders in the offence report and find all offenders that was spotted + // in unique reports. + let TriageOutcome { + concurrent_offenders, + } = match Self::triage_offence_report::(reporters, &time_slot, offenders) { + Some(triage) => triage, + // The report contained only duplicates, so there is no need to slash again. + None => return Err(OffenceError::DuplicateReport), + }; + + let offenders_count = concurrent_offenders.len() as u32; + + // The amount new offenders are slashed + let new_fraction = O::slash_fraction(offenders_count, validator_set_count); + + let slash_perbill: Vec<_> = (0..concurrent_offenders.len()) + .map(|_| new_fraction.clone()) + .collect(); + + let applied = Self::report_or_store_offence( + &concurrent_offenders, + &slash_perbill, + offence.session_index(), + ); + + // Deposit the event. + Self::deposit_event(Event::Offence(O::ID, time_slot.encode(), applied)); + + Ok(()) + } } impl Module { - /// Tries (without checking) to report an offence. Stores them in [`DeferredOffences`] in case - /// it fails. Returns false in case it has to store the offence. - fn report_or_store_offence( - concurrent_offenders: &[OffenceDetails], - slash_perbill: &[Perbill], - session_index: SessionIndex, - ) -> bool { - match T::OnOffenceHandler::on_offence( - &concurrent_offenders, - &slash_perbill, - session_index, - ) { - Ok(_) => true, - Err(_) => { - >::mutate(|d| - d.push((concurrent_offenders.to_vec(), slash_perbill.to_vec(), session_index)) - ); - false - } - } - } - - /// Compute the ID for the given report properties. - /// - /// The report id depends on the offence kind, time slot and the id of offender. - fn report_id>( - time_slot: &O::TimeSlot, - offender: &T::IdentificationTuple, - ) -> ReportIdOf { - (O::ID, time_slot.encode(), offender).using_encoded(T::Hashing::hash) - } - - /// Triages the offence report and returns the set of offenders that was involved in unique - /// reports along with the list of the concurrent offences. - fn triage_offence_report>( - reporters: Vec, - time_slot: &O::TimeSlot, - offenders: Vec, - ) -> Option> { - let mut storage = ReportIndexStorage::::load(time_slot); - - let mut any_new = false; - for offender in offenders { - let report_id = Self::report_id::(time_slot, &offender); - - if !>::contains_key(&report_id) { - any_new = true; - >::insert( - &report_id, - OffenceDetails { - offender, - reporters: reporters.clone(), - }, - ); - - storage.insert(time_slot, report_id); - } - } - - if any_new { - // Load report details for the all reports happened at the same time. - let concurrent_offenders = storage.concurrent_reports - .iter() - .filter_map(|report_id| >::get(report_id)) - .collect::>(); - - storage.save(); - - Some(TriageOutcome { - concurrent_offenders, - }) - } else { - None - } - } - - #[cfg(feature = "runtime-benchmarks")] - pub fn set_deferred_offences(offences: Vec>) { - >::put(offences); - } + /// Tries (without checking) to report an offence. Stores them in [`DeferredOffences`] in case + /// it fails. Returns false in case it has to store the offence. + fn report_or_store_offence( + concurrent_offenders: &[OffenceDetails], + slash_perbill: &[Perbill], + session_index: SessionIndex, + ) -> bool { + match T::OnOffenceHandler::on_offence(&concurrent_offenders, &slash_perbill, session_index) + { + Ok(_) => true, + Err(_) => { + >::mutate(|d| { + d.push(( + concurrent_offenders.to_vec(), + slash_perbill.to_vec(), + session_index, + )) + }); + false + } + } + } + + /// Compute the ID for the given report properties. + /// + /// The report id depends on the offence kind, time slot and the id of offender. + fn report_id>( + time_slot: &O::TimeSlot, + offender: &T::IdentificationTuple, + ) -> ReportIdOf { + (O::ID, time_slot.encode(), offender).using_encoded(T::Hashing::hash) + } + + /// Triages the offence report and returns the set of offenders that was involved in unique + /// reports along with the list of the concurrent offences. + fn triage_offence_report>( + reporters: Vec, + time_slot: &O::TimeSlot, + offenders: Vec, + ) -> Option> { + let mut storage = ReportIndexStorage::::load(time_slot); + + let mut any_new = false; + for offender in offenders { + let report_id = Self::report_id::(time_slot, &offender); + + if !>::contains_key(&report_id) { + any_new = true; + >::insert( + &report_id, + OffenceDetails { + offender, + reporters: reporters.clone(), + }, + ); + + storage.insert(time_slot, report_id); + } + } + + if any_new { + // Load report details for the all reports happened at the same time. + let concurrent_offenders = storage + .concurrent_reports + .iter() + .filter_map(|report_id| >::get(report_id)) + .collect::>(); + + storage.save(); + + Some(TriageOutcome { + concurrent_offenders, + }) + } else { + None + } + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn set_deferred_offences(offences: Vec>) { + >::put(offences); + } } struct TriageOutcome { - /// Other reports for the same report kinds. - concurrent_offenders: Vec>, + /// Other reports for the same report kinds. + concurrent_offenders: Vec>, } /// An auxiliary struct for working with storage of indexes localized for a specific offence @@ -268,55 +270,55 @@ struct TriageOutcome { /// accessed directly meanwhile. #[must_use = "The changes are not saved without called `save`"] struct ReportIndexStorage> { - opaque_time_slot: OpaqueTimeSlot, - concurrent_reports: Vec>, - same_kind_reports: Vec<(O::TimeSlot, ReportIdOf)>, + opaque_time_slot: OpaqueTimeSlot, + concurrent_reports: Vec>, + same_kind_reports: Vec<(O::TimeSlot, ReportIdOf)>, } impl> ReportIndexStorage { - /// Preload indexes from the storage for the specific `time_slot` and the kind of the offence. - fn load(time_slot: &O::TimeSlot) -> Self { - let opaque_time_slot = time_slot.encode(); - - let same_kind_reports = ::get(&O::ID); - let same_kind_reports = - Vec::<(O::TimeSlot, ReportIdOf)>::decode(&mut &same_kind_reports[..]) - .unwrap_or_default(); - - let concurrent_reports = >::get(&O::ID, &opaque_time_slot); - - Self { - opaque_time_slot, - concurrent_reports, - same_kind_reports, - } - } - - /// Insert a new report to the index. - fn insert(&mut self, time_slot: &O::TimeSlot, report_id: ReportIdOf) { - // Insert the report id into the list while maintaining the ordering by the time - // slot. - let pos = match self - .same_kind_reports - .binary_search_by_key(&time_slot, |&(ref when, _)| when) - { - Ok(pos) => pos, - Err(pos) => pos, - }; - self.same_kind_reports - .insert(pos, (time_slot.clone(), report_id)); - - // Update the list of concurrent reports. - self.concurrent_reports.push(report_id); - } - - /// Dump the indexes to the storage. - fn save(self) { - ::insert(&O::ID, self.same_kind_reports.encode()); - >::insert( - &O::ID, - &self.opaque_time_slot, - &self.concurrent_reports, - ); - } + /// Preload indexes from the storage for the specific `time_slot` and the kind of the offence. + fn load(time_slot: &O::TimeSlot) -> Self { + let opaque_time_slot = time_slot.encode(); + + let same_kind_reports = ::get(&O::ID); + let same_kind_reports = + Vec::<(O::TimeSlot, ReportIdOf)>::decode(&mut &same_kind_reports[..]) + .unwrap_or_default(); + + let concurrent_reports = >::get(&O::ID, &opaque_time_slot); + + Self { + opaque_time_slot, + concurrent_reports, + same_kind_reports, + } + } + + /// Insert a new report to the index. + fn insert(&mut self, time_slot: &O::TimeSlot, report_id: ReportIdOf) { + // Insert the report id into the list while maintaining the ordering by the time + // slot. + let pos = match self + .same_kind_reports + .binary_search_by_key(&time_slot, |&(ref when, _)| when) + { + Ok(pos) => pos, + Err(pos) => pos, + }; + self.same_kind_reports + .insert(pos, (time_slot.clone(), report_id)); + + // Update the list of concurrent reports. + self.concurrent_reports.push(report_id); + } + + /// Dump the indexes to the storage. + fn save(self) { + ::insert(&O::ID, self.same_kind_reports.encode()); + >::insert( + &O::ID, + &self.opaque_time_slot, + &self.concurrent_reports, + ); + } } diff --git a/frame/offences/src/mock.rs b/frame/offences/src/mock.rs index 7eda40cbbb..dd7cd814fe 100644 --- a/frame/offences/src/mock.rs +++ b/frame/offences/src/mock.rs @@ -18,120 +18,120 @@ #![cfg(test)] -use std::cell::RefCell; use crate::{Module, Trait}; use codec::Encode; -use sp_runtime::Perbill; -use sp_staking::{ - SessionIndex, - offence::{self, Kind, OffenceDetails}, -}; -use sp_runtime::testing::Header; -use sp_runtime::traits::{IdentityLookup, BlakeTwo256}; -use sp_core::H256; use frame_support::{ - impl_outer_origin, impl_outer_event, parameter_types, StorageMap, StorageDoubleMap, - weights::Weight, + impl_outer_event, impl_outer_origin, parameter_types, weights::Weight, StorageDoubleMap, + StorageMap, }; use frame_system as system; +use sp_core::H256; +use sp_runtime::testing::Header; +use sp_runtime::traits::{BlakeTwo256, IdentityLookup}; +use sp_runtime::Perbill; +use sp_staking::{ + offence::{self, Kind, OffenceDetails}, + SessionIndex, +}; +use std::cell::RefCell; -impl_outer_origin!{ - pub enum Origin for Runtime {} +impl_outer_origin! { + pub enum Origin for Runtime {} } pub struct OnOffenceHandler; thread_local! { - pub static ON_OFFENCE_PERBILL: RefCell> = RefCell::new(Default::default()); - pub static CAN_REPORT: RefCell = RefCell::new(true); + pub static ON_OFFENCE_PERBILL: RefCell> = RefCell::new(Default::default()); + pub static CAN_REPORT: RefCell = RefCell::new(true); } impl offence::OnOffenceHandler for OnOffenceHandler { - fn on_offence( - _offenders: &[OffenceDetails], - slash_fraction: &[Perbill], - _offence_session: SessionIndex, - ) -> Result<(), ()> { - if >::can_report() { - ON_OFFENCE_PERBILL.with(|f| { - *f.borrow_mut() = slash_fraction.to_vec(); - }); - - Ok(()) - } else { - Err(()) - } - } - - fn can_report() -> bool { - CAN_REPORT.with(|c| *c.borrow()) - } + fn on_offence( + _offenders: &[OffenceDetails], + slash_fraction: &[Perbill], + _offence_session: SessionIndex, + ) -> Result<(), ()> { + if >::can_report() { + ON_OFFENCE_PERBILL.with(|f| { + *f.borrow_mut() = slash_fraction.to_vec(); + }); + + Ok(()) + } else { + Err(()) + } + } + + fn can_report() -> bool { + CAN_REPORT.with(|c| *c.borrow()) + } } pub fn set_can_report(can_report: bool) { - CAN_REPORT.with(|c| *c.borrow_mut() = can_report); + CAN_REPORT.with(|c| *c.borrow_mut() = can_report); } pub fn with_on_offence_fractions) -> R>(f: F) -> R { - ON_OFFENCE_PERBILL.with(|fractions| { - f(&mut *fractions.borrow_mut()) - }) + ON_OFFENCE_PERBILL.with(|fractions| f(&mut *fractions.borrow_mut())) } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. #[derive(Clone, PartialEq, Eq, Debug)] pub struct Runtime; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Runtime { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = TestEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = TestEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl Trait for Runtime { - type Event = TestEvent; - type IdentificationTuple = u64; - type OnOffenceHandler = OnOffenceHandler; + type Event = TestEvent; + type IdentificationTuple = u64; + type OnOffenceHandler = OnOffenceHandler; } mod offences { - pub use crate::Event; + pub use crate::Event; } impl_outer_event! { - pub enum TestEvent for Runtime { - system, - offences, - } + pub enum TestEvent for Runtime { + system, + offences, + } } pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } /// Offences module. @@ -142,46 +142,43 @@ pub const KIND: [u8; 16] = *b"test_report_1234"; /// Returns all offence details for the specific `kind` happened at the specific time slot. pub fn offence_reports(kind: Kind, time_slot: u128) -> Vec> { - >::get(&kind, &time_slot.encode()) - .into_iter() - .map(|report_id| { - >::get(&report_id) - .expect("dangling report id is found in ConcurrentReportsIndex") - }) - .collect() + >::get(&kind, &time_slot.encode()) + .into_iter() + .map(|report_id| { + >::get(&report_id) + .expect("dangling report id is found in ConcurrentReportsIndex") + }) + .collect() } #[derive(Clone)] pub struct Offence { - pub validator_set_count: u32, - pub offenders: Vec, - pub time_slot: u128, + pub validator_set_count: u32, + pub offenders: Vec, + pub time_slot: u128, } impl offence::Offence for Offence { - const ID: offence::Kind = KIND; - type TimeSlot = u128; - - fn offenders(&self) -> Vec { - self.offenders.clone() - } - - fn validator_set_count(&self) -> u32 { - self.validator_set_count - } - - fn time_slot(&self) -> u128 { - self.time_slot - } - - fn session_index(&self) -> SessionIndex { - 1 - } - - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill { - Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) - } + const ID: offence::Kind = KIND; + type TimeSlot = u128; + + fn offenders(&self) -> Vec { + self.offenders.clone() + } + + fn validator_set_count(&self) -> u32 { + self.validator_set_count + } + + fn time_slot(&self) -> u128 { + self.time_slot + } + + fn session_index(&self) -> SessionIndex { + 1 + } + + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill { + Perbill::from_percent(5 + offenders_count * 100 / validator_set_count) + } } diff --git a/frame/offences/src/tests.rs b/frame/offences/src/tests.rs index 3179a07523..e7c5392e13 100644 --- a/frame/offences/src/tests.rs +++ b/frame/offences/src/tests.rs @@ -20,247 +20,258 @@ use super::*; use crate::mock::{ - Offences, System, Offence, TestEvent, KIND, new_test_ext, with_on_offence_fractions, - offence_reports, set_can_report, + new_test_ext, offence_reports, set_can_report, with_on_offence_fractions, Offence, Offences, + System, TestEvent, KIND, }; -use sp_runtime::Perbill; use frame_support::traits::OnInitialize; use frame_system::{EventRecord, Phase}; +use sp_runtime::Perbill; #[test] fn should_report_an_authority_and_trigger_on_offence() { - new_test_ext().execute_with(|| { - // given - let time_slot = 42; - assert_eq!(offence_reports(KIND, time_slot), vec![]); - - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - - // when - Offences::report_offence(vec![], offence).unwrap(); - - // then - with_on_offence_fractions(|f| { - assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); - }); - }); + new_test_ext().execute_with(|| { + // given + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); + + let offence = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![5], + }; + + // when + Offences::report_offence(vec![], offence).unwrap(); + + // then + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); + }); + }); } #[test] fn should_not_report_the_same_authority_twice_in_the_same_slot() { - new_test_ext().execute_with(|| { - // given - let time_slot = 42; - assert_eq!(offence_reports(KIND, time_slot), vec![]); - - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence.clone()).unwrap(); - with_on_offence_fractions(|f| { - assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); - f.clear(); - }); - - // when - // report for the second time - assert_eq!(Offences::report_offence(vec![], offence), Err(OffenceError::DuplicateReport)); - - // then - with_on_offence_fractions(|f| { - assert_eq!(f.clone(), vec![]); - }); - }); + new_test_ext().execute_with(|| { + // given + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); + + let offence = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence.clone()).unwrap(); + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); + f.clear(); + }); + + // when + // report for the second time + assert_eq!( + Offences::report_offence(vec![], offence), + Err(OffenceError::DuplicateReport) + ); + + // then + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![]); + }); + }); } - #[test] fn should_report_in_different_time_slot() { - new_test_ext().execute_with(|| { - // given - let time_slot = 42; - assert_eq!(offence_reports(KIND, time_slot), vec![]); - - let mut offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence.clone()).unwrap(); - with_on_offence_fractions(|f| { - assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); - f.clear(); - }); - - // when - // report for the second time - offence.time_slot += 1; - Offences::report_offence(vec![], offence).unwrap(); - - // then - with_on_offence_fractions(|f| { - assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); - }); - }); + new_test_ext().execute_with(|| { + // given + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); + + let mut offence = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence.clone()).unwrap(); + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); + f.clear(); + }); + + // when + // report for the second time + offence.time_slot += 1; + Offences::report_offence(vec![], offence).unwrap(); + + // then + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); + }); + }); } #[test] fn should_deposit_event() { - new_test_ext().execute_with(|| { - // given - let time_slot = 42; - assert_eq!(offence_reports(KIND, time_slot), vec![]); - - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - - // when - Offences::report_offence(vec![], offence).unwrap(); - - // then - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), - topics: vec![], - }] - ); - }); + new_test_ext().execute_with(|| { + // given + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); + + let offence = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![5], + }; + + // when + Offences::report_offence(vec![], offence).unwrap(); + + // then + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), + topics: vec![], + }] + ); + }); } #[test] fn doesnt_deposit_event_for_dups() { - new_test_ext().execute_with(|| { - // given - let time_slot = 42; - assert_eq!(offence_reports(KIND, time_slot), vec![]); - - let offence = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence.clone()).unwrap(); - with_on_offence_fractions(|f| { - assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); - f.clear(); - }); - - // when - // report for the second time - assert_eq!(Offences::report_offence(vec![], offence), Err(OffenceError::DuplicateReport)); - - // then - // there is only one event. - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), - topics: vec![], - }] - ); - }); + new_test_ext().execute_with(|| { + // given + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); + + let offence = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence.clone()).unwrap(); + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); + f.clear(); + }); + + // when + // report for the second time + assert_eq!( + Offences::report_offence(vec![], offence), + Err(OffenceError::DuplicateReport) + ); + + // then + // there is only one event. + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::offences(crate::Event::Offence(KIND, time_slot.encode(), true)), + topics: vec![], + }] + ); + }); } #[test] fn should_properly_count_offences() { - // We report two different authorities for the same issue. Ultimately, the 1st authority - // should have `count` equal 2 and the count of the 2nd one should be equal to 1. - new_test_ext().execute_with(|| { - // given - let time_slot = 42; - assert_eq!(offence_reports(KIND, time_slot), vec![]); - - let offence1 = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![5], - }; - let offence2 = Offence { - validator_set_count: 5, - time_slot, - offenders: vec![4], - }; - Offences::report_offence(vec![], offence1).unwrap(); - with_on_offence_fractions(|f| { - assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); - f.clear(); - }); - - // when - // report for the second time - Offences::report_offence(vec![], offence2).unwrap(); - - // then - // the 1st authority should have count 2 and the 2nd one should be reported only once. - assert_eq!( - offence_reports(KIND, time_slot), - vec![ - OffenceDetails { offender: 5, reporters: vec![] }, - OffenceDetails { offender: 4, reporters: vec![] }, - ] - ); - }); + // We report two different authorities for the same issue. Ultimately, the 1st authority + // should have `count` equal 2 and the count of the 2nd one should be equal to 1. + new_test_ext().execute_with(|| { + // given + let time_slot = 42; + assert_eq!(offence_reports(KIND, time_slot), vec![]); + + let offence1 = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![5], + }; + let offence2 = Offence { + validator_set_count: 5, + time_slot, + offenders: vec![4], + }; + Offences::report_offence(vec![], offence1).unwrap(); + with_on_offence_fractions(|f| { + assert_eq!(f.clone(), vec![Perbill::from_percent(25)]); + f.clear(); + }); + + // when + // report for the second time + Offences::report_offence(vec![], offence2).unwrap(); + + // then + // the 1st authority should have count 2 and the 2nd one should be reported only once. + assert_eq!( + offence_reports(KIND, time_slot), + vec![ + OffenceDetails { + offender: 5, + reporters: vec![] + }, + OffenceDetails { + offender: 4, + reporters: vec![] + }, + ] + ); + }); } #[test] fn should_queue_and_resubmit_rejected_offence() { - new_test_ext().execute_with(|| { - set_can_report(false); - - // will get deferred - let offence = Offence { - validator_set_count: 5, - time_slot: 42, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - assert_eq!(Offences::deferred_offences().len(), 1); - // event also indicates unapplied. - assert_eq!( - System::events(), - vec![EventRecord { - phase: Phase::Initialization, - event: TestEvent::offences(crate::Event::Offence(KIND, 42u128.encode(), false)), - topics: vec![], - }] - ); - - // will not dequeue - Offences::on_initialize(2); - - // again - let offence = Offence { - validator_set_count: 5, - time_slot: 62, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - assert_eq!(Offences::deferred_offences().len(), 2); - - set_can_report(true); - - // can be submitted - let offence = Offence { - validator_set_count: 5, - time_slot: 72, - offenders: vec![5], - }; - Offences::report_offence(vec![], offence).unwrap(); - assert_eq!(Offences::deferred_offences().len(), 2); - - Offences::on_initialize(3); - assert_eq!(Offences::deferred_offences().len(), 0); - }) + new_test_ext().execute_with(|| { + set_can_report(false); + + // will get deferred + let offence = Offence { + validator_set_count: 5, + time_slot: 42, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence).unwrap(); + assert_eq!(Offences::deferred_offences().len(), 1); + // event also indicates unapplied. + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: TestEvent::offences(crate::Event::Offence(KIND, 42u128.encode(), false)), + topics: vec![], + }] + ); + + // will not dequeue + Offences::on_initialize(2); + + // again + let offence = Offence { + validator_set_count: 5, + time_slot: 62, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence).unwrap(); + assert_eq!(Offences::deferred_offences().len(), 2); + + set_can_report(true); + + // can be submitted + let offence = Offence { + validator_set_count: 5, + time_slot: 72, + offenders: vec![5], + }; + Offences::report_offence(vec![], offence).unwrap(); + assert_eq!(Offences::deferred_offences().len(), 2); + + Offences::on_initialize(3); + assert_eq!(Offences::deferred_offences().len(), 0); + }) } diff --git a/frame/randomness-collective-flip/src/lib.rs b/frame/randomness-collective-flip/src/lib.rs index 194879eb65..865e588d71 100644 --- a/frame/randomness-collective-flip/src/lib.rs +++ b/frame/randomness-collective-flip/src/lib.rs @@ -53,249 +53,261 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, convert::TryInto}; -use sp_runtime::traits::Hash; +use codec::Encode; use frame_support::{ - decl_module, decl_storage, traits::Randomness, - weights::{Weight, MINIMUM_WEIGHT} + decl_module, decl_storage, + traits::Randomness, + weights::{Weight, MINIMUM_WEIGHT}, }; -use safe_mix::TripletMix; -use codec::Encode; use frame_system::Trait; +use safe_mix::TripletMix; +use sp_runtime::traits::Hash; +use sp_std::{convert::TryInto, prelude::*}; const RANDOM_MATERIAL_LEN: u32 = 81; fn block_number_to_index(block_number: T::BlockNumber) -> usize { - // on_initialize is called on the first block after genesis - let index = (block_number - 1.into()) % RANDOM_MATERIAL_LEN.into(); - index.try_into().ok().expect("Something % 81 is always smaller than usize; qed") + // on_initialize is called on the first block after genesis + let index = (block_number - 1.into()) % RANDOM_MATERIAL_LEN.into(); + index + .try_into() + .ok() + .expect("Something % 81 is always smaller than usize; qed") } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - fn on_initialize(block_number: T::BlockNumber) -> Weight { - let parent_hash = >::parent_hash(); - - >::mutate(|ref mut values| if values.len() < RANDOM_MATERIAL_LEN as usize { - values.push(parent_hash) - } else { - let index = block_number_to_index::(block_number); - values[index] = parent_hash; - }); - - MINIMUM_WEIGHT - } - } + pub struct Module for enum Call where origin: T::Origin { + fn on_initialize(block_number: T::BlockNumber) -> Weight { + let parent_hash = >::parent_hash(); + + >::mutate(|ref mut values| if values.len() < RANDOM_MATERIAL_LEN as usize { + values.push(parent_hash) + } else { + let index = block_number_to_index::(block_number); + values[index] = parent_hash; + }); + + MINIMUM_WEIGHT + } + } } decl_storage! { - trait Store for Module as RandomnessCollectiveFlip { - /// Series of block headers from the last 81 blocks that acts as random seed material. This - /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of - /// the oldest hash. - RandomMaterial get(fn random_material): Vec; - } + trait Store for Module as RandomnessCollectiveFlip { + /// Series of block headers from the last 81 blocks that acts as random seed material. This + /// is arranged as a ring buffer with `block_number % 81` being the index into the `Vec` of + /// the oldest hash. + RandomMaterial get(fn random_material): Vec; + } } impl Randomness for Module { - /// Get a low-influence "random" value. - /// - /// Being a deterministic block chain, real randomness is difficult to come by. This gives you - /// something that approximates it. `subject` is a context identifier and allows you to get a - /// different result to other callers of this function; use it like - /// `random(&b"my context"[..])`. This is initially implemented through a low-influence - /// "triplet mix" convolution of previous block hash values. In the future it will be generated - /// from a secure verifiable random function (VRF). - /// - /// ### Security Notes - /// - /// This randomness uses a low-influence function, drawing upon the block hashes from the - /// previous 81 blocks. Its result for any given subject will be known far in advance by anyone - /// observing the chain. Any block producer has significant influence over their block hashes - /// bounded only by their computational resources. Our low-influence function reduces the actual - /// block producer's influence over the randomness, but increases the influence of small - /// colluding groups of recent block producers. - /// - /// Some BABE blocks have VRF outputs where the block producer has exactly one bit of influence, - /// either they make the block or they do not make the block and thus someone else makes the - /// next block. Yet, this randomness is not fresh in all BABE blocks. - /// - /// If that is an insufficient security guarantee then two things can be used to improve this - /// randomness: - /// - /// - Name, in advance, the block number whose random value will be used; ensure your module - /// retains a buffer of previous random values for its subject and then index into these in - /// order to obviate the ability of your user to look up the parent hash and choose when to - /// transact based upon it. - /// - Require your user to first commit to an additional value by first posting its hash. - /// Require them to reveal the value to determine the final result, hashing it with the - /// output of this random function. This reduces the ability of a cabal of block producers - /// from conspiring against individuals. - /// - /// WARNING: Hashing the result of this function will remove any low-influence properties it has - /// and mean that all bits of the resulting value are entirely manipulatable by the author of - /// the parent block, who can determine the value of `parent_hash`. - fn random(subject: &[u8]) -> T::Hash { - let block_number = >::block_number(); - let index = block_number_to_index::(block_number); - - let hash_series = >::get(); - if !hash_series.is_empty() { - // Always the case after block 1 is initialized. - hash_series.iter() - .cycle() - .skip(index) - .take(RANDOM_MATERIAL_LEN as usize) - .enumerate() - .map(|(i, h)| (i as i8, subject, h).using_encoded(T::Hashing::hash)) - .triplet_mix() - } else { - T::Hash::default() - } - } + /// Get a low-influence "random" value. + /// + /// Being a deterministic block chain, real randomness is difficult to come by. This gives you + /// something that approximates it. `subject` is a context identifier and allows you to get a + /// different result to other callers of this function; use it like + /// `random(&b"my context"[..])`. This is initially implemented through a low-influence + /// "triplet mix" convolution of previous block hash values. In the future it will be generated + /// from a secure verifiable random function (VRF). + /// + /// ### Security Notes + /// + /// This randomness uses a low-influence function, drawing upon the block hashes from the + /// previous 81 blocks. Its result for any given subject will be known far in advance by anyone + /// observing the chain. Any block producer has significant influence over their block hashes + /// bounded only by their computational resources. Our low-influence function reduces the actual + /// block producer's influence over the randomness, but increases the influence of small + /// colluding groups of recent block producers. + /// + /// Some BABE blocks have VRF outputs where the block producer has exactly one bit of influence, + /// either they make the block or they do not make the block and thus someone else makes the + /// next block. Yet, this randomness is not fresh in all BABE blocks. + /// + /// If that is an insufficient security guarantee then two things can be used to improve this + /// randomness: + /// + /// - Name, in advance, the block number whose random value will be used; ensure your module + /// retains a buffer of previous random values for its subject and then index into these in + /// order to obviate the ability of your user to look up the parent hash and choose when to + /// transact based upon it. + /// - Require your user to first commit to an additional value by first posting its hash. + /// Require them to reveal the value to determine the final result, hashing it with the + /// output of this random function. This reduces the ability of a cabal of block producers + /// from conspiring against individuals. + /// + /// WARNING: Hashing the result of this function will remove any low-influence properties it has + /// and mean that all bits of the resulting value are entirely manipulatable by the author of + /// the parent block, who can determine the value of `parent_hash`. + fn random(subject: &[u8]) -> T::Hash { + let block_number = >::block_number(); + let index = block_number_to_index::(block_number); + + let hash_series = >::get(); + if !hash_series.is_empty() { + // Always the case after block 1 is initialized. + hash_series + .iter() + .cycle() + .skip(index) + .take(RANDOM_MATERIAL_LEN as usize) + .enumerate() + .map(|(i, h)| (i as i8, subject, h).using_encoded(T::Hashing::hash)) + .triplet_mix() + } else { + T::Hash::default() + } + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::H256; - use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, Header as _, IdentityLookup}, - }; - use frame_support::{ - impl_outer_origin, parameter_types, weights::Weight, traits::{Randomness, OnInitialize}, - }; - - #[derive(Clone, PartialEq, Eq)] - pub struct Test; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - - type System = frame_system::Module; - type CollectiveFlip = Module; - - fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - t.into() - } - - #[test] - fn test_block_number_to_index() { - for i in 1 .. 1000 { - assert_eq!((i - 1) as usize % 81, block_number_to_index::(i)); - } - } - - fn setup_blocks(blocks: u64) { - let mut parent_hash = System::parent_hash(); - - for i in 1 .. (blocks + 1) { - System::initialize( - &i, - &parent_hash, - &Default::default(), - &Default::default(), - frame_system::InitKind::Full, - ); - CollectiveFlip::on_initialize(i); - - let header = System::finalize(); - parent_hash = header.hash(); - System::set_block_number(*header.number()); - } - } - - #[test] - fn test_random_material_partial() { - new_test_ext().execute_with(|| { - let genesis_hash = System::parent_hash(); - - setup_blocks(38); - - let random_material = CollectiveFlip::random_material(); - - assert_eq!(random_material.len(), 38); - assert_eq!(random_material[0], genesis_hash); - }); - } - - #[test] - fn test_random_material_filled() { - new_test_ext().execute_with(|| { - let genesis_hash = System::parent_hash(); - - setup_blocks(81); - - let random_material = CollectiveFlip::random_material(); - - assert_eq!(random_material.len(), 81); - assert_ne!(random_material[0], random_material[1]); - assert_eq!(random_material[0], genesis_hash); - }); - } - - #[test] - fn test_random_material_filled_twice() { - new_test_ext().execute_with(|| { - let genesis_hash = System::parent_hash(); - - setup_blocks(162); - - let random_material = CollectiveFlip::random_material(); - - assert_eq!(random_material.len(), 81); - assert_ne!(random_material[0], random_material[1]); - assert_ne!(random_material[0], genesis_hash); - }); - } - - #[test] - fn test_random() { - new_test_ext().execute_with(|| { - setup_blocks(162); - - assert_eq!(System::block_number(), 162); - assert_eq!(CollectiveFlip::random_seed(), CollectiveFlip::random_seed()); - assert_ne!(CollectiveFlip::random(b"random_1"), CollectiveFlip::random(b"random_2")); - - let random = CollectiveFlip::random_seed(); - - assert_ne!(random, H256::zero()); - assert!(!CollectiveFlip::random_material().contains(&random)); - }); - } + use super::*; + use frame_support::{ + impl_outer_origin, parameter_types, + traits::{OnInitialize, Randomness}, + weights::Weight, + }; + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, Header as _, IdentityLookup}, + Perbill, + }; + + #[derive(Clone, PartialEq, Eq)] + pub struct Test; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + + type System = frame_system::Module; + type CollectiveFlip = Module; + + fn new_test_ext() -> sp_io::TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + t.into() + } + + #[test] + fn test_block_number_to_index() { + for i in 1..1000 { + assert_eq!((i - 1) as usize % 81, block_number_to_index::(i)); + } + } + + fn setup_blocks(blocks: u64) { + let mut parent_hash = System::parent_hash(); + + for i in 1..(blocks + 1) { + System::initialize( + &i, + &parent_hash, + &Default::default(), + &Default::default(), + frame_system::InitKind::Full, + ); + CollectiveFlip::on_initialize(i); + + let header = System::finalize(); + parent_hash = header.hash(); + System::set_block_number(*header.number()); + } + } + + #[test] + fn test_random_material_partial() { + new_test_ext().execute_with(|| { + let genesis_hash = System::parent_hash(); + + setup_blocks(38); + + let random_material = CollectiveFlip::random_material(); + + assert_eq!(random_material.len(), 38); + assert_eq!(random_material[0], genesis_hash); + }); + } + + #[test] + fn test_random_material_filled() { + new_test_ext().execute_with(|| { + let genesis_hash = System::parent_hash(); + + setup_blocks(81); + + let random_material = CollectiveFlip::random_material(); + + assert_eq!(random_material.len(), 81); + assert_ne!(random_material[0], random_material[1]); + assert_eq!(random_material[0], genesis_hash); + }); + } + + #[test] + fn test_random_material_filled_twice() { + new_test_ext().execute_with(|| { + let genesis_hash = System::parent_hash(); + + setup_blocks(162); + + let random_material = CollectiveFlip::random_material(); + + assert_eq!(random_material.len(), 81); + assert_ne!(random_material[0], random_material[1]); + assert_ne!(random_material[0], genesis_hash); + }); + } + + #[test] + fn test_random() { + new_test_ext().execute_with(|| { + setup_blocks(162); + + assert_eq!(System::block_number(), 162); + assert_eq!(CollectiveFlip::random_seed(), CollectiveFlip::random_seed()); + assert_ne!( + CollectiveFlip::random(b"random_1"), + CollectiveFlip::random(b"random_2") + ); + + let random = CollectiveFlip::random_seed(); + + assert_ne!(random, H256::zero()); + assert!(!CollectiveFlip::random_material().contains(&random)); + }); + } } diff --git a/frame/recovery/src/lib.rs b/frame/recovery/src/lib.rs index 9f30061f93..15f2d606a0 100644 --- a/frame/recovery/src/lib.rs +++ b/frame/recovery/src/lib.rs @@ -150,20 +150,22 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; +use codec::{Decode, Encode}; use sp_runtime::{ - traits::{Dispatchable, SaturatedConversion, CheckedAdd, CheckedMul}, - DispatchResult + traits::{CheckedAdd, CheckedMul, Dispatchable, SaturatedConversion}, + DispatchResult, }; -use codec::{Encode, Decode}; +use sp_std::prelude::*; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, ensure, - Parameter, RuntimeDebug, weights::{MINIMUM_WEIGHT, GetDispatchInfo, SimpleDispatchInfo, FunctionOf}, - traits::{Currency, ReservableCurrency, Get, BalanceStatus}, - dispatch::PostDispatchInfo, + decl_error, decl_event, decl_module, decl_storage, + dispatch::PostDispatchInfo, + ensure, + traits::{BalanceStatus, Currency, Get, ReservableCurrency}, + weights::{FunctionOf, GetDispatchInfo, SimpleDispatchInfo, MINIMUM_WEIGHT}, + Parameter, RuntimeDebug, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_system::{self as system, ensure_root, ensure_signed}; #[cfg(test)] mod mock; @@ -171,501 +173,503 @@ mod mock; mod tests; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; /// Configuration trait. pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The overarching call type. - type Call: Parameter + Dispatchable + GetDispatchInfo; - - /// The currency mechanism. - type Currency: ReservableCurrency; - - /// The base amount of currency needed to reserve for creating a recovery configuration. - /// - /// This is held for an additional storage item whose value size is - /// `2 + sizeof(BlockNumber, Balance)` bytes. - type ConfigDepositBase: Get>; - - /// The amount of currency needed per additional user when creating a recovery configuration. - /// - /// This is held for adding `sizeof(AccountId)` bytes more into a pre-existing storage value. - type FriendDepositFactor: Get>; - - /// The maximum amount of friends allowed in a recovery configuration. - type MaxFriends: Get; - - /// The base amount of currency needed to reserve for starting a recovery. - /// - /// This is primarily held for deterring malicious recovery attempts, and should - /// have a value large enough that a bad actor would choose not to place this - /// deposit. It also acts to fund additional storage item whose value size is - /// `sizeof(BlockNumber, Balance + T * AccountId)` bytes. Where T is a configurable - /// threshold. - type RecoveryDeposit: Get>; + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// The overarching call type. + type Call: Parameter + + Dispatchable + + GetDispatchInfo; + + /// The currency mechanism. + type Currency: ReservableCurrency; + + /// The base amount of currency needed to reserve for creating a recovery configuration. + /// + /// This is held for an additional storage item whose value size is + /// `2 + sizeof(BlockNumber, Balance)` bytes. + type ConfigDepositBase: Get>; + + /// The amount of currency needed per additional user when creating a recovery configuration. + /// + /// This is held for adding `sizeof(AccountId)` bytes more into a pre-existing storage value. + type FriendDepositFactor: Get>; + + /// The maximum amount of friends allowed in a recovery configuration. + type MaxFriends: Get; + + /// The base amount of currency needed to reserve for starting a recovery. + /// + /// This is primarily held for deterring malicious recovery attempts, and should + /// have a value large enough that a bad actor would choose not to place this + /// deposit. It also acts to fund additional storage item whose value size is + /// `sizeof(BlockNumber, Balance + T * AccountId)` bytes. Where T is a configurable + /// threshold. + type RecoveryDeposit: Get>; } /// An active recovery process. #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug)] pub struct ActiveRecovery { - /// The block number when the recovery process started. - created: BlockNumber, - /// The amount held in reserve of the `depositor`, - /// To be returned once this recovery process is closed. - deposit: Balance, - /// The friends which have vouched so far. Always sorted. - friends: Vec, + /// The block number when the recovery process started. + created: BlockNumber, + /// The amount held in reserve of the `depositor`, + /// To be returned once this recovery process is closed. + deposit: Balance, + /// The friends which have vouched so far. Always sorted. + friends: Vec, } /// Configuration for recovering an account. #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug)] pub struct RecoveryConfig { - /// The minimum number of blocks since the start of the recovery process before the account - /// can be recovered. - delay_period: BlockNumber, - /// The amount held in reserve of the `depositor`, - /// to be returned once this configuration is removed. - deposit: Balance, - /// The list of friends which can help recover an account. Always sorted. - friends: Vec, - /// The number of approving friends needed to recover an account. - threshold: u16, + /// The minimum number of blocks since the start of the recovery process before the account + /// can be recovered. + delay_period: BlockNumber, + /// The amount held in reserve of the `depositor`, + /// to be returned once this configuration is removed. + deposit: Balance, + /// The list of friends which can help recover an account. Always sorted. + friends: Vec, + /// The number of approving friends needed to recover an account. + threshold: u16, } decl_storage! { - trait Store for Module as Recovery { - /// The set of recoverable accounts and their recovery configuration. - pub Recoverable get(fn recovery_config): - map hasher(twox_64_concat) T::AccountId - => Option, T::AccountId>>; - - /// Active recovery attempts. - /// - /// First account is the account to be recovered, and the second account - /// is the user trying to recover the account. - pub ActiveRecoveries get(fn active_recovery): - double_map hasher(twox_64_concat) T::AccountId, hasher(twox_64_concat) T::AccountId => - Option, T::AccountId>>; - - /// The list of allowed proxy accounts. - /// - /// Map from the user who can access it to the recovered account. - pub Proxy get(fn proxy): - map hasher(blake2_128_concat) T::AccountId => Option; - } + trait Store for Module as Recovery { + /// The set of recoverable accounts and their recovery configuration. + pub Recoverable get(fn recovery_config): + map hasher(twox_64_concat) T::AccountId + => Option, T::AccountId>>; + + /// Active recovery attempts. + /// + /// First account is the account to be recovered, and the second account + /// is the user trying to recover the account. + pub ActiveRecoveries get(fn active_recovery): + double_map hasher(twox_64_concat) T::AccountId, hasher(twox_64_concat) T::AccountId => + Option, T::AccountId>>; + + /// The list of allowed proxy accounts. + /// + /// Map from the user who can access it to the recovered account. + pub Proxy get(fn proxy): + map hasher(blake2_128_concat) T::AccountId => Option; + } } decl_event! { - /// Events type. - pub enum Event where - AccountId = ::AccountId, - { - /// A recovery process has been set up for an account - RecoveryCreated(AccountId), - /// A recovery process has been initiated for account_1 by account_2 - RecoveryInitiated(AccountId, AccountId), - /// A recovery process for account_1 by account_2 has been vouched for by account_3 - RecoveryVouched(AccountId, AccountId, AccountId), - /// A recovery process for account_1 by account_2 has been closed - RecoveryClosed(AccountId, AccountId), - /// Account_1 has been successfully recovered by account_2 - AccountRecovered(AccountId, AccountId), - /// A recovery process has been removed for an account - RecoveryRemoved(AccountId), - } + /// Events type. + pub enum Event where + AccountId = ::AccountId, + { + /// A recovery process has been set up for an account + RecoveryCreated(AccountId), + /// A recovery process has been initiated for account_1 by account_2 + RecoveryInitiated(AccountId, AccountId), + /// A recovery process for account_1 by account_2 has been vouched for by account_3 + RecoveryVouched(AccountId, AccountId, AccountId), + /// A recovery process for account_1 by account_2 has been closed + RecoveryClosed(AccountId, AccountId), + /// Account_1 has been successfully recovered by account_2 + AccountRecovered(AccountId, AccountId), + /// A recovery process has been removed for an account + RecoveryRemoved(AccountId), + } } decl_error! { - pub enum Error for Module { - /// User is not allowed to make a call on behalf of this account - NotAllowed, - /// Threshold must be greater than zero - ZeroThreshold, - /// Friends list must be greater than zero and threshold - NotEnoughFriends, - /// Friends list must be less than max friends - MaxFriends, - /// Friends list must be sorted and free of duplicates - NotSorted, - /// This account is not set up for recovery - NotRecoverable, - /// This account is already set up for recovery - AlreadyRecoverable, - /// A recovery process has already started for this account - AlreadyStarted, - /// A recovery process has not started for this rescuer - NotStarted, - /// This account is not a friend who can vouch - NotFriend, - /// The friend must wait until the delay period to vouch for this recovery - DelayPeriod, - /// This user has already vouched for this recovery - AlreadyVouched, - /// The threshold for recovering this account has not been met - Threshold, - /// There are still active recovery attempts that need to be closed - StillActive, - /// There was an overflow in a calculation - Overflow, - /// This account is already set up for recovery - AlreadyProxy, - } + pub enum Error for Module { + /// User is not allowed to make a call on behalf of this account + NotAllowed, + /// Threshold must be greater than zero + ZeroThreshold, + /// Friends list must be greater than zero and threshold + NotEnoughFriends, + /// Friends list must be less than max friends + MaxFriends, + /// Friends list must be sorted and free of duplicates + NotSorted, + /// This account is not set up for recovery + NotRecoverable, + /// This account is already set up for recovery + AlreadyRecoverable, + /// A recovery process has already started for this account + AlreadyStarted, + /// A recovery process has not started for this rescuer + NotStarted, + /// This account is not a friend who can vouch + NotFriend, + /// The friend must wait until the delay period to vouch for this recovery + DelayPeriod, + /// This user has already vouched for this recovery + AlreadyVouched, + /// The threshold for recovering this account has not been met + Threshold, + /// There are still active recovery attempts that need to be closed + StillActive, + /// There was an overflow in a calculation + Overflow, + /// This account is already set up for recovery + AlreadyProxy, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; - - /// Send a call through a recovered account. - /// - /// The dispatch origin for this call must be _Signed_ and registered to - /// be able to make calls on behalf of the recovered account. - /// - /// Parameters: - /// - `account`: The recovered account you want to make a call on-behalf-of. - /// - `call`: The call you want to make with the recovered account. - /// - /// # - /// - The weight of the `call` + 10,000. - /// - One storage lookup to check account is recovered by `who`. O(1) - /// # - #[weight = FunctionOf( - |args: (&T::AccountId, &Box<::Call>)| args.1.get_dispatch_info().weight + 10_000, - |args: (&T::AccountId, &Box<::Call>)| args.1.get_dispatch_info().class, - true - )] - fn as_recovered(origin, - account: T::AccountId, - call: Box<::Call> - ) -> DispatchResult { - let who = ensure_signed(origin)?; - // Check `who` is allowed to make a call on behalf of `account` - let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; - ensure!(&target == &account, Error::::NotAllowed); - call.dispatch(frame_system::RawOrigin::Signed(account).into()) - .map(|_| ()).map_err(|e| e.error) - } - - /// Allow ROOT to bypass the recovery process and set an a rescuer account - /// for a lost account directly. - /// - /// The dispatch origin for this call must be _ROOT_. - /// - /// Parameters: - /// - `lost`: The "lost account" to be recovered. - /// - `rescuer`: The "rescuer account" which can call as the lost account. - /// - /// # - /// - One storage write O(1) - /// - One event - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn set_recovered(origin, lost: T::AccountId, rescuer: T::AccountId) { - ensure_root(origin)?; - // Create the recovery storage item. - >::insert(&rescuer, &lost); - Self::deposit_event(RawEvent::AccountRecovered(lost, rescuer)); - } - - /// Create a recovery configuration for your account. This makes your account recoverable. - /// - /// Payment: `ConfigDepositBase` + `FriendDepositFactor` * #_of_friends balance - /// will be reserved for storing the recovery configuration. This deposit is returned - /// in full when the user calls `remove_recovery`. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Parameters: - /// - `friends`: A list of friends you trust to vouch for recovery attempts. - /// Should be ordered and contain no duplicate values. - /// - `threshold`: The number of friends that must vouch for a recovery attempt - /// before the account can be recovered. Should be less than or equal to - /// the length of the list of friends. - /// - `delay_period`: The number of blocks after a recovery attempt is initialized - /// that needs to pass before the account can be recovered. - /// - /// # - /// - Key: F (len of friends) - /// - One storage read to check that account is not already recoverable. O(1). - /// - A check that the friends list is sorted and unique. O(F) - /// - One currency reserve operation. O(X) - /// - One storage write. O(1). Codec O(F). - /// - One event. - /// - /// Total Complexity: O(F + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn create_recovery(origin, - friends: Vec, - threshold: u16, - delay_period: T::BlockNumber - ) { - let who = ensure_signed(origin)?; - // Check account is not already set up for recovery - ensure!(!>::contains_key(&who), Error::::AlreadyRecoverable); - // Check user input is valid - ensure!(threshold >= 1, Error::::ZeroThreshold); - ensure!(!friends.is_empty(), Error::::NotEnoughFriends); - ensure!(threshold as usize <= friends.len(), Error::::NotEnoughFriends); - let max_friends = T::MaxFriends::get() as usize; - ensure!(friends.len() <= max_friends, Error::::MaxFriends); - ensure!(Self::is_sorted_and_unique(&friends), Error::::NotSorted); - // Total deposit is base fee + number of friends * factor fee - let friend_deposit = T::FriendDepositFactor::get() - .checked_mul(&friends.len().saturated_into()) - .ok_or(Error::::Overflow)?; - let total_deposit = T::ConfigDepositBase::get() - .checked_add(&friend_deposit) - .ok_or(Error::::Overflow)?; - // Reserve the deposit - T::Currency::reserve(&who, total_deposit)?; - // Create the recovery configuration - let recovery_config = RecoveryConfig { - delay_period, - deposit: total_deposit, - friends, - threshold, - }; - // Create the recovery configuration storage item - >::insert(&who, recovery_config); - - Self::deposit_event(RawEvent::RecoveryCreated(who)); - } - - /// Initiate the process for recovering a recoverable account. - /// - /// Payment: `RecoveryDeposit` balance will be reserved for initiating the - /// recovery process. This deposit will always be repatriated to the account - /// trying to be recovered. See `close_recovery`. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Parameters: - /// - `account`: The lost account that you want to recover. This account - /// needs to be recoverable (i.e. have a recovery configuration). - /// - /// # - /// - One storage read to check that account is recoverable. O(F) - /// - One storage read to check that this recovery process hasn't already started. O(1) - /// - One currency reserve operation. O(X) - /// - One storage read to get the current block number. O(1) - /// - One storage write. O(1). - /// - One event. - /// - /// Total Complexity: O(F + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn initiate_recovery(origin, account: T::AccountId) { - let who = ensure_signed(origin)?; - // Check that the account is recoverable - ensure!(>::contains_key(&account), Error::::NotRecoverable); - // Check that the recovery process has not already been started - ensure!(!>::contains_key(&account, &who), Error::::AlreadyStarted); - // Take recovery deposit - let recovery_deposit = T::RecoveryDeposit::get(); - T::Currency::reserve(&who, recovery_deposit)?; - // Create an active recovery status - let recovery_status = ActiveRecovery { - created: >::block_number(), - deposit: recovery_deposit, - friends: vec![], - }; - // Create the active recovery storage item - >::insert(&account, &who, recovery_status); - Self::deposit_event(RawEvent::RecoveryInitiated(account, who)); - } - - /// Allow a "friend" of a recoverable account to vouch for an active recovery - /// process for that account. - /// - /// The dispatch origin for this call must be _Signed_ and must be a "friend" - /// for the recoverable account. - /// - /// Parameters: - /// - `lost`: The lost account that you want to recover. - /// - `rescuer`: The account trying to rescue the lost account that you - /// want to vouch for. - /// - /// The combination of these two parameters must point to an active recovery - /// process. - /// - /// # - /// Key: F (len of friends in config), V (len of vouching friends) - /// - One storage read to get the recovery configuration. O(1), Codec O(F) - /// - One storage read to get the active recovery process. O(1), Codec O(V) - /// - One binary search to confirm caller is a friend. O(logF) - /// - One binary search to confirm caller has not already vouched. O(logV) - /// - One storage write. O(1), Codec O(V). - /// - One event. - /// - /// Total Complexity: O(F + logF + V + logV) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn vouch_recovery(origin, lost: T::AccountId, rescuer: T::AccountId) { - let who = ensure_signed(origin)?; - // Get the recovery configuration for the lost account. - let recovery_config = Self::recovery_config(&lost).ok_or(Error::::NotRecoverable)?; - // Get the active recovery process for the rescuer. - let mut active_recovery = Self::active_recovery(&lost, &rescuer).ok_or(Error::::NotStarted)?; - // Make sure the voter is a friend - ensure!(Self::is_friend(&recovery_config.friends, &who), Error::::NotFriend); - // Either insert the vouch, or return an error that the user already vouched. - match active_recovery.friends.binary_search(&who) { - Ok(_pos) => Err(Error::::AlreadyVouched)?, - Err(pos) => active_recovery.friends.insert(pos, who.clone()), - } - // Update storage with the latest details - >::insert(&lost, &rescuer, active_recovery); - Self::deposit_event(RawEvent::RecoveryVouched(lost, rescuer, who)); - } - - /// Allow a successful rescuer to claim their recovered account. - /// - /// The dispatch origin for this call must be _Signed_ and must be a "rescuer" - /// who has successfully completed the account recovery process: collected - /// `threshold` or more vouches, waited `delay_period` blocks since initiation. - /// - /// Parameters: - /// - `account`: The lost account that you want to claim has been successfully - /// recovered by you. - /// - /// # - /// Key: F (len of friends in config), V (len of vouching friends) - /// - One storage read to get the recovery configuration. O(1), Codec O(F) - /// - One storage read to get the active recovery process. O(1), Codec O(V) - /// - One storage read to get the current block number. O(1) - /// - One storage write. O(1), Codec O(V). - /// - One event. - /// - /// Total Complexity: O(F + V) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn claim_recovery(origin, account: T::AccountId) { - let who = ensure_signed(origin)?; - // Get the recovery configuration for the lost account - let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; - // Get the active recovery process for the rescuer - let active_recovery = Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; - ensure!(!Proxy::::contains_key(&who), Error::::AlreadyProxy); - // Make sure the delay period has passed - let current_block_number = >::block_number(); - let recoverable_block_number = active_recovery.created - .checked_add(&recovery_config.delay_period) - .ok_or(Error::::Overflow)?; - ensure!(recoverable_block_number <= current_block_number, Error::::DelayPeriod); - // Make sure the threshold is met - ensure!( - recovery_config.threshold as usize <= active_recovery.friends.len(), - Error::::Threshold - ); - // Create the recovery storage item - Proxy::::insert(&who, &account); - system::Module::::inc_ref(&who); - Self::deposit_event(RawEvent::AccountRecovered(account, who)); - } - - /// As the controller of a recoverable account, close an active recovery - /// process for your account. - /// - /// Payment: By calling this function, the recoverable account will receive - /// the recovery deposit `RecoveryDeposit` placed by the rescuer. - /// - /// The dispatch origin for this call must be _Signed_ and must be a - /// recoverable account with an active recovery process for it. - /// - /// Parameters: - /// - `rescuer`: The account trying to rescue this recoverable account. - /// - /// # - /// Key: V (len of vouching friends) - /// - One storage read/remove to get the active recovery process. O(1), Codec O(V) - /// - One balance call to repatriate reserved. O(X) - /// - One event. - /// - /// Total Complexity: O(V + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] - fn close_recovery(origin, rescuer: T::AccountId) { - let who = ensure_signed(origin)?; - // Take the active recovery process started by the rescuer for this account. - let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; - // Move the reserved funds from the rescuer to the rescued account. - // Acts like a slashing mechanism for those who try to maliciously recover accounts. - let _ = T::Currency::repatriate_reserved(&rescuer, &who, active_recovery.deposit, BalanceStatus::Free); - Self::deposit_event(RawEvent::RecoveryClosed(who, rescuer)); - } - - /// Remove the recovery process for your account. Recovered accounts are still accessible. - /// - /// NOTE: The user must make sure to call `close_recovery` on all active - /// recovery attempts before calling this function else it will fail. - /// - /// Payment: By calling this function the recoverable account will unreserve - /// their recovery configuration deposit. - /// (`ConfigDepositBase` + `FriendDepositFactor` * #_of_friends) - /// - /// The dispatch origin for this call must be _Signed_ and must be a - /// recoverable account (i.e. has a recovery configuration). - /// - /// # - /// Key: F (len of friends) - /// - One storage read to get the prefix iterator for active recoveries. O(1) - /// - One storage read/remove to get the recovery configuration. O(1), Codec O(F) - /// - One balance call to unreserved. O(X) - /// - One event. - /// - /// Total Complexity: O(F + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] - fn remove_recovery(origin) { - let who = ensure_signed(origin)?; - // Check there are no active recoveries - let mut active_recoveries = >::iter_prefix_values(&who); - ensure!(active_recoveries.next().is_none(), Error::::StillActive); - // Take the recovery configuration for this account. - let recovery_config = >::take(&who).ok_or(Error::::NotRecoverable)?; - - // Unreserve the initial deposit for the recovery configuration. - T::Currency::unreserve(&who, recovery_config.deposit); - Self::deposit_event(RawEvent::RecoveryRemoved(who)); - } - - /// Cancel the ability to use `as_recovered` for `account`. - /// - /// The dispatch origin for this call must be _Signed_ and registered to - /// be able to make calls on behalf of the recovered account. - /// - /// Parameters: - /// - `account`: The recovered account you are able to call on-behalf-of. - /// - /// # - /// - One storage mutation to check account is recovered by `who`. O(1) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn cancel_recovered(origin, account: T::AccountId) { - let who = ensure_signed(origin)?; - // Check `who` is allowed to make a call on behalf of `account` - ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); - Proxy::::remove(&who); - system::Module::::dec_ref(&who); - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + /// Deposit one of this module's events by using the default implementation. + fn deposit_event() = default; + + /// Send a call through a recovered account. + /// + /// The dispatch origin for this call must be _Signed_ and registered to + /// be able to make calls on behalf of the recovered account. + /// + /// Parameters: + /// - `account`: The recovered account you want to make a call on-behalf-of. + /// - `call`: The call you want to make with the recovered account. + /// + /// # + /// - The weight of the `call` + 10,000. + /// - One storage lookup to check account is recovered by `who`. O(1) + /// # + #[weight = FunctionOf( + |args: (&T::AccountId, &Box<::Call>)| args.1.get_dispatch_info().weight + 10_000, + |args: (&T::AccountId, &Box<::Call>)| args.1.get_dispatch_info().class, + true + )] + fn as_recovered(origin, + account: T::AccountId, + call: Box<::Call> + ) -> DispatchResult { + let who = ensure_signed(origin)?; + // Check `who` is allowed to make a call on behalf of `account` + let target = Self::proxy(&who).ok_or(Error::::NotAllowed)?; + ensure!(&target == &account, Error::::NotAllowed); + call.dispatch(frame_system::RawOrigin::Signed(account).into()) + .map(|_| ()).map_err(|e| e.error) + } + + /// Allow ROOT to bypass the recovery process and set an a rescuer account + /// for a lost account directly. + /// + /// The dispatch origin for this call must be _ROOT_. + /// + /// Parameters: + /// - `lost`: The "lost account" to be recovered. + /// - `rescuer`: The "rescuer account" which can call as the lost account. + /// + /// # + /// - One storage write O(1) + /// - One event + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn set_recovered(origin, lost: T::AccountId, rescuer: T::AccountId) { + ensure_root(origin)?; + // Create the recovery storage item. + >::insert(&rescuer, &lost); + Self::deposit_event(RawEvent::AccountRecovered(lost, rescuer)); + } + + /// Create a recovery configuration for your account. This makes your account recoverable. + /// + /// Payment: `ConfigDepositBase` + `FriendDepositFactor` * #_of_friends balance + /// will be reserved for storing the recovery configuration. This deposit is returned + /// in full when the user calls `remove_recovery`. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `friends`: A list of friends you trust to vouch for recovery attempts. + /// Should be ordered and contain no duplicate values. + /// - `threshold`: The number of friends that must vouch for a recovery attempt + /// before the account can be recovered. Should be less than or equal to + /// the length of the list of friends. + /// - `delay_period`: The number of blocks after a recovery attempt is initialized + /// that needs to pass before the account can be recovered. + /// + /// # + /// - Key: F (len of friends) + /// - One storage read to check that account is not already recoverable. O(1). + /// - A check that the friends list is sorted and unique. O(F) + /// - One currency reserve operation. O(X) + /// - One storage write. O(1). Codec O(F). + /// - One event. + /// + /// Total Complexity: O(F + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn create_recovery(origin, + friends: Vec, + threshold: u16, + delay_period: T::BlockNumber + ) { + let who = ensure_signed(origin)?; + // Check account is not already set up for recovery + ensure!(!>::contains_key(&who), Error::::AlreadyRecoverable); + // Check user input is valid + ensure!(threshold >= 1, Error::::ZeroThreshold); + ensure!(!friends.is_empty(), Error::::NotEnoughFriends); + ensure!(threshold as usize <= friends.len(), Error::::NotEnoughFriends); + let max_friends = T::MaxFriends::get() as usize; + ensure!(friends.len() <= max_friends, Error::::MaxFriends); + ensure!(Self::is_sorted_and_unique(&friends), Error::::NotSorted); + // Total deposit is base fee + number of friends * factor fee + let friend_deposit = T::FriendDepositFactor::get() + .checked_mul(&friends.len().saturated_into()) + .ok_or(Error::::Overflow)?; + let total_deposit = T::ConfigDepositBase::get() + .checked_add(&friend_deposit) + .ok_or(Error::::Overflow)?; + // Reserve the deposit + T::Currency::reserve(&who, total_deposit)?; + // Create the recovery configuration + let recovery_config = RecoveryConfig { + delay_period, + deposit: total_deposit, + friends, + threshold, + }; + // Create the recovery configuration storage item + >::insert(&who, recovery_config); + + Self::deposit_event(RawEvent::RecoveryCreated(who)); + } + + /// Initiate the process for recovering a recoverable account. + /// + /// Payment: `RecoveryDeposit` balance will be reserved for initiating the + /// recovery process. This deposit will always be repatriated to the account + /// trying to be recovered. See `close_recovery`. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `account`: The lost account that you want to recover. This account + /// needs to be recoverable (i.e. have a recovery configuration). + /// + /// # + /// - One storage read to check that account is recoverable. O(F) + /// - One storage read to check that this recovery process hasn't already started. O(1) + /// - One currency reserve operation. O(X) + /// - One storage read to get the current block number. O(1) + /// - One storage write. O(1). + /// - One event. + /// + /// Total Complexity: O(F + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn initiate_recovery(origin, account: T::AccountId) { + let who = ensure_signed(origin)?; + // Check that the account is recoverable + ensure!(>::contains_key(&account), Error::::NotRecoverable); + // Check that the recovery process has not already been started + ensure!(!>::contains_key(&account, &who), Error::::AlreadyStarted); + // Take recovery deposit + let recovery_deposit = T::RecoveryDeposit::get(); + T::Currency::reserve(&who, recovery_deposit)?; + // Create an active recovery status + let recovery_status = ActiveRecovery { + created: >::block_number(), + deposit: recovery_deposit, + friends: vec![], + }; + // Create the active recovery storage item + >::insert(&account, &who, recovery_status); + Self::deposit_event(RawEvent::RecoveryInitiated(account, who)); + } + + /// Allow a "friend" of a recoverable account to vouch for an active recovery + /// process for that account. + /// + /// The dispatch origin for this call must be _Signed_ and must be a "friend" + /// for the recoverable account. + /// + /// Parameters: + /// - `lost`: The lost account that you want to recover. + /// - `rescuer`: The account trying to rescue the lost account that you + /// want to vouch for. + /// + /// The combination of these two parameters must point to an active recovery + /// process. + /// + /// # + /// Key: F (len of friends in config), V (len of vouching friends) + /// - One storage read to get the recovery configuration. O(1), Codec O(F) + /// - One storage read to get the active recovery process. O(1), Codec O(V) + /// - One binary search to confirm caller is a friend. O(logF) + /// - One binary search to confirm caller has not already vouched. O(logV) + /// - One storage write. O(1), Codec O(V). + /// - One event. + /// + /// Total Complexity: O(F + logF + V + logV) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn vouch_recovery(origin, lost: T::AccountId, rescuer: T::AccountId) { + let who = ensure_signed(origin)?; + // Get the recovery configuration for the lost account. + let recovery_config = Self::recovery_config(&lost).ok_or(Error::::NotRecoverable)?; + // Get the active recovery process for the rescuer. + let mut active_recovery = Self::active_recovery(&lost, &rescuer).ok_or(Error::::NotStarted)?; + // Make sure the voter is a friend + ensure!(Self::is_friend(&recovery_config.friends, &who), Error::::NotFriend); + // Either insert the vouch, or return an error that the user already vouched. + match active_recovery.friends.binary_search(&who) { + Ok(_pos) => Err(Error::::AlreadyVouched)?, + Err(pos) => active_recovery.friends.insert(pos, who.clone()), + } + // Update storage with the latest details + >::insert(&lost, &rescuer, active_recovery); + Self::deposit_event(RawEvent::RecoveryVouched(lost, rescuer, who)); + } + + /// Allow a successful rescuer to claim their recovered account. + /// + /// The dispatch origin for this call must be _Signed_ and must be a "rescuer" + /// who has successfully completed the account recovery process: collected + /// `threshold` or more vouches, waited `delay_period` blocks since initiation. + /// + /// Parameters: + /// - `account`: The lost account that you want to claim has been successfully + /// recovered by you. + /// + /// # + /// Key: F (len of friends in config), V (len of vouching friends) + /// - One storage read to get the recovery configuration. O(1), Codec O(F) + /// - One storage read to get the active recovery process. O(1), Codec O(V) + /// - One storage read to get the current block number. O(1) + /// - One storage write. O(1), Codec O(V). + /// - One event. + /// + /// Total Complexity: O(F + V) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn claim_recovery(origin, account: T::AccountId) { + let who = ensure_signed(origin)?; + // Get the recovery configuration for the lost account + let recovery_config = Self::recovery_config(&account).ok_or(Error::::NotRecoverable)?; + // Get the active recovery process for the rescuer + let active_recovery = Self::active_recovery(&account, &who).ok_or(Error::::NotStarted)?; + ensure!(!Proxy::::contains_key(&who), Error::::AlreadyProxy); + // Make sure the delay period has passed + let current_block_number = >::block_number(); + let recoverable_block_number = active_recovery.created + .checked_add(&recovery_config.delay_period) + .ok_or(Error::::Overflow)?; + ensure!(recoverable_block_number <= current_block_number, Error::::DelayPeriod); + // Make sure the threshold is met + ensure!( + recovery_config.threshold as usize <= active_recovery.friends.len(), + Error::::Threshold + ); + // Create the recovery storage item + Proxy::::insert(&who, &account); + system::Module::::inc_ref(&who); + Self::deposit_event(RawEvent::AccountRecovered(account, who)); + } + + /// As the controller of a recoverable account, close an active recovery + /// process for your account. + /// + /// Payment: By calling this function, the recoverable account will receive + /// the recovery deposit `RecoveryDeposit` placed by the rescuer. + /// + /// The dispatch origin for this call must be _Signed_ and must be a + /// recoverable account with an active recovery process for it. + /// + /// Parameters: + /// - `rescuer`: The account trying to rescue this recoverable account. + /// + /// # + /// Key: V (len of vouching friends) + /// - One storage read/remove to get the active recovery process. O(1), Codec O(V) + /// - One balance call to repatriate reserved. O(X) + /// - One event. + /// + /// Total Complexity: O(V + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] + fn close_recovery(origin, rescuer: T::AccountId) { + let who = ensure_signed(origin)?; + // Take the active recovery process started by the rescuer for this account. + let active_recovery = >::take(&who, &rescuer).ok_or(Error::::NotStarted)?; + // Move the reserved funds from the rescuer to the rescued account. + // Acts like a slashing mechanism for those who try to maliciously recover accounts. + let _ = T::Currency::repatriate_reserved(&rescuer, &who, active_recovery.deposit, BalanceStatus::Free); + Self::deposit_event(RawEvent::RecoveryClosed(who, rescuer)); + } + + /// Remove the recovery process for your account. Recovered accounts are still accessible. + /// + /// NOTE: The user must make sure to call `close_recovery` on all active + /// recovery attempts before calling this function else it will fail. + /// + /// Payment: By calling this function the recoverable account will unreserve + /// their recovery configuration deposit. + /// (`ConfigDepositBase` + `FriendDepositFactor` * #_of_friends) + /// + /// The dispatch origin for this call must be _Signed_ and must be a + /// recoverable account (i.e. has a recovery configuration). + /// + /// # + /// Key: F (len of friends) + /// - One storage read to get the prefix iterator for active recoveries. O(1) + /// - One storage read/remove to get the recovery configuration. O(1), Codec O(F) + /// - One balance call to unreserved. O(X) + /// - One event. + /// + /// Total Complexity: O(F + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] + fn remove_recovery(origin) { + let who = ensure_signed(origin)?; + // Check there are no active recoveries + let mut active_recoveries = >::iter_prefix_values(&who); + ensure!(active_recoveries.next().is_none(), Error::::StillActive); + // Take the recovery configuration for this account. + let recovery_config = >::take(&who).ok_or(Error::::NotRecoverable)?; + + // Unreserve the initial deposit for the recovery configuration. + T::Currency::unreserve(&who, recovery_config.deposit); + Self::deposit_event(RawEvent::RecoveryRemoved(who)); + } + + /// Cancel the ability to use `as_recovered` for `account`. + /// + /// The dispatch origin for this call must be _Signed_ and registered to + /// be able to make calls on behalf of the recovered account. + /// + /// Parameters: + /// - `account`: The recovered account you are able to call on-behalf-of. + /// + /// # + /// - One storage mutation to check account is recovered by `who`. O(1) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn cancel_recovered(origin, account: T::AccountId) { + let who = ensure_signed(origin)?; + // Check `who` is allowed to make a call on behalf of `account` + ensure!(Self::proxy(&who) == Some(account), Error::::NotAllowed); + Proxy::::remove(&who); + system::Module::::dec_ref(&who); + } + } } impl Module { - /// Check that friends list is sorted and has no duplicates. - fn is_sorted_and_unique(friends: &Vec) -> bool { - friends.windows(2).all(|w| w[0] < w[1]) - } - - /// Check that a user is a friend in the friends list. - fn is_friend(friends: &Vec, friend: &T::AccountId) -> bool { - friends.binary_search(&friend).is_ok() - } + /// Check that friends list is sorted and has no duplicates. + fn is_sorted_and_unique(friends: &Vec) -> bool { + friends.windows(2).all(|w| w[0] < w[1]) + } + + /// Check that a user is a friend in the friends list. + fn is_friend(friends: &Vec, friend: &T::AccountId) -> bool { + friends.binary_search(&friend).is_ok() + } } diff --git a/frame/recovery/src/mock.rs b/frame/recovery/src/mock.rs index ccc80730a1..7ea11c66c5 100644 --- a/frame/recovery/src/mock.rs +++ b/frame/recovery/src/mock.rs @@ -19,34 +19,36 @@ use super::*; use frame_support::{ - impl_outer_origin, impl_outer_dispatch, impl_outer_event, parameter_types, - weights::Weight, - traits::{OnInitialize, OnFinalize}, + impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, + traits::{OnFinalize, OnInitialize}, + weights::Weight, }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use crate as recovery; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; -use crate as recovery; impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - recovery, - } + pub enum TestEvent for Test { + system, + pallet_balances, + recovery, + } } impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_balances::Balances, - recovery::Recovery, - } + pub enum Call for Test where origin: Origin { + pallet_balances::Balances, + recovery::Recovery, + } } // For testing the pallet, we construct most of a mock runtime. This means @@ -56,62 +58,62 @@ impl_outer_dispatch! { pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Call = Call; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = TestEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = TestEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - pub const ExistentialDeposit: u64 = 1; + pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { - type Balance = u128; - type DustRemoval = (); - type Event = TestEvent; - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u128; + type DustRemoval = (); + type Event = TestEvent; + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const ConfigDepositBase: u64 = 10; - pub const FriendDepositFactor: u64 = 1; - pub const MaxFriends: u16 = 3; - pub const RecoveryDeposit: u64 = 10; + pub const ConfigDepositBase: u64 = 10; + pub const FriendDepositFactor: u64 = 1; + pub const MaxFriends: u16 = 3; + pub const RecoveryDeposit: u64 = 10; } impl Trait for Test { - type Event = TestEvent; - type Call = Call; - type Currency = Balances; - type ConfigDepositBase = ConfigDepositBase; - type FriendDepositFactor = FriendDepositFactor; - type MaxFriends = MaxFriends; - type RecoveryDeposit = RecoveryDeposit; + type Event = TestEvent; + type Call = Call; + type Currency = Balances; + type ConfigDepositBase = ConfigDepositBase; + type FriendDepositFactor = FriendDepositFactor; + type MaxFriends = MaxFriends; + type RecoveryDeposit = RecoveryDeposit; } pub type Recovery = Module; @@ -122,20 +124,24 @@ pub type BalancesCall = pallet_balances::Call; pub type RecoveryCall = super::Call; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], - }.assimilate_storage(&mut t).unwrap(); - t.into() + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 100), (2, 100), (3, 100), (4, 100), (5, 100)], + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() } /// Run until a particular block. pub fn run_to_block(n: u64) { - while System::block_number() < n { - if System::block_number() > 1 { - System::on_finalize(System::block_number()); - } - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - } + while System::block_number() < n { + if System::block_number() > 1 { + System::on_finalize(System::block_number()); + } + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + } } diff --git a/frame/recovery/src/tests.rs b/frame/recovery/src/tests.rs index fb993043a5..c59e34e200 100644 --- a/frame/recovery/src/tests.rs +++ b/frame/recovery/src/tests.rs @@ -17,370 +17,475 @@ //! Tests for the module. use super::*; +use frame_support::{assert_noop, assert_ok, traits::Currency}; use mock::{ - Recovery, Balances, Test, Origin, Call, BalancesCall, RecoveryCall, - new_test_ext, run_to_block -}; -use sp_runtime::traits::{BadOrigin}; -use frame_support::{ - assert_noop, assert_ok, - traits::{Currency}, + new_test_ext, run_to_block, Balances, BalancesCall, Call, Origin, Recovery, RecoveryCall, Test, }; +use sp_runtime::traits::BadOrigin; #[test] fn basic_setup_works() { - new_test_ext().execute_with(|| { - // Nothing in storage to start - assert_eq!(Recovery::proxy(&2), None); - assert_eq!(Recovery::active_recovery(&1, &2), None); - assert_eq!(Recovery::recovery_config(&1), None); - // Everyone should have starting balance of 100 - assert_eq!(Balances::free_balance(1), 100); - }); + new_test_ext().execute_with(|| { + // Nothing in storage to start + assert_eq!(Recovery::proxy(&2), None); + assert_eq!(Recovery::active_recovery(&1, &2), None); + assert_eq!(Recovery::recovery_config(&1), None); + // Everyone should have starting balance of 100 + assert_eq!(Balances::free_balance(1), 100); + }); } #[test] fn set_recovered_works() { - new_test_ext().execute_with(|| { - // Not accessible by a normal user - assert_noop!(Recovery::set_recovered(Origin::signed(1), 5, 1), BadOrigin); - // Root can set a recovered account though - assert_ok!(Recovery::set_recovered(Origin::ROOT, 5, 1)); - // Account 1 should now be able to make a call through account 5 - let call = Box::new(Call::Balances(BalancesCall::transfer(1, 100))); - assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); - // Account 1 has successfully drained the funds from account 5 - assert_eq!(Balances::free_balance(1), 200); - assert_eq!(Balances::free_balance(5), 0); - }); + new_test_ext().execute_with(|| { + // Not accessible by a normal user + assert_noop!(Recovery::set_recovered(Origin::signed(1), 5, 1), BadOrigin); + // Root can set a recovered account though + assert_ok!(Recovery::set_recovered(Origin::ROOT, 5, 1)); + // Account 1 should now be able to make a call through account 5 + let call = Box::new(Call::Balances(BalancesCall::transfer(1, 100))); + assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); + // Account 1 has successfully drained the funds from account 5 + assert_eq!(Balances::free_balance(1), 200); + assert_eq!(Balances::free_balance(5), 0); + }); } #[test] fn recovery_life_cycle_works() { - new_test_ext().execute_with(|| { - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends, threshold, delay_period)); - // Some time has passed, and the user lost their keys! - run_to_block(10); - // Using account 1, the user begins the recovery process to recover the lost account - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - // Off chain, the user contacts their friends and asks them to vouch for the recovery attempt - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); - // We met the threshold, lets try to recover the account...? - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); - // We need to wait at least the delay_period number of blocks before we can recover - run_to_block(20); - assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); - // Account 1 can use account 5 to close the active recovery process, claiming the deposited - // funds used to initiate the recovery process into account 5. - let call = Box::new(Call::Recovery(RecoveryCall::close_recovery(1))); - assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); - // Account 1 can then use account 5 to remove the recovery configuration, claiming the - // deposited funds used to create the recovery configuration into account 5. - let call = Box::new(Call::Recovery(RecoveryCall::remove_recovery())); - assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); - // Account 1 should now be able to make a call through account 5 to get all of their funds - assert_eq!(Balances::free_balance(5), 110); - let call = Box::new(Call::Balances(BalancesCall::transfer(1, 110))); - assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); - // All funds have been fully recovered! - assert_eq!(Balances::free_balance(1), 200); - assert_eq!(Balances::free_balance(5), 0); - // Remove the proxy link. - assert_ok!(Recovery::cancel_recovered(Origin::signed(1), 5)); + new_test_ext().execute_with(|| { + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + // Account 5 sets up a recovery configuration on their account + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends, + threshold, + delay_period + )); + // Some time has passed, and the user lost their keys! + run_to_block(10); + // Using account 1, the user begins the recovery process to recover the lost account + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + // Off chain, the user contacts their friends and asks them to vouch for the recovery attempt + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); + // We met the threshold, lets try to recover the account...? + assert_noop!( + Recovery::claim_recovery(Origin::signed(1), 5), + Error::::DelayPeriod + ); + // We need to wait at least the delay_period number of blocks before we can recover + run_to_block(20); + assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); + // Account 1 can use account 5 to close the active recovery process, claiming the deposited + // funds used to initiate the recovery process into account 5. + let call = Box::new(Call::Recovery(RecoveryCall::close_recovery(1))); + assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); + // Account 1 can then use account 5 to remove the recovery configuration, claiming the + // deposited funds used to create the recovery configuration into account 5. + let call = Box::new(Call::Recovery(RecoveryCall::remove_recovery())); + assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); + // Account 1 should now be able to make a call through account 5 to get all of their funds + assert_eq!(Balances::free_balance(5), 110); + let call = Box::new(Call::Balances(BalancesCall::transfer(1, 110))); + assert_ok!(Recovery::as_recovered(Origin::signed(1), 5, call)); + // All funds have been fully recovered! + assert_eq!(Balances::free_balance(1), 200); + assert_eq!(Balances::free_balance(5), 0); + // Remove the proxy link. + assert_ok!(Recovery::cancel_recovered(Origin::signed(1), 5)); - // All storage items are removed from the module - assert!(!>::contains_key(&5, &1)); - assert!(!>::contains_key(&5)); - assert!(!>::contains_key(&1)); - }); + // All storage items are removed from the module + assert!(!>::contains_key(&5, &1)); + assert!(!>::contains_key(&5)); + assert!(!>::contains_key(&1)); + }); } #[test] fn malicious_recovery_fails() { - new_test_ext().execute_with(|| { - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends, threshold, delay_period)); - // Some time has passed, and account 1 wants to try and attack this account! - run_to_block(10); - // Using account 1, the malicious user begins the recovery process on account 5 - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - // Off chain, the user **tricks** their friends and asks them to vouch for the recovery - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); // shame on you - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // shame on you - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); // shame on you - // We met the threshold, lets try to recover the account...? - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); - // Account 1 needs to wait... - run_to_block(19); - // One more block to wait! - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); - // Account 5 checks their account every `delay_period` and notices the malicious attack! - // Account 5 can close the recovery process before account 1 can claim it - assert_ok!(Recovery::close_recovery(Origin::signed(5), 1)); - // By doing so, account 5 has now claimed the deposit originally reserved by account 1 - assert_eq!(Balances::total_balance(&1), 90); - // Thanks for the free money! - assert_eq!(Balances::total_balance(&5), 110); - // The recovery process has been closed, so account 1 can't make the claim - run_to_block(20); - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); - // Account 5 can remove their recovery config and pick some better friends - assert_ok!(Recovery::remove_recovery(Origin::signed(5))); - assert_ok!(Recovery::create_recovery(Origin::signed(5), vec![22, 33, 44], threshold, delay_period)); - }); + new_test_ext().execute_with(|| { + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + // Account 5 sets up a recovery configuration on their account + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends, + threshold, + delay_period + )); + // Some time has passed, and account 1 wants to try and attack this account! + run_to_block(10); + // Using account 1, the malicious user begins the recovery process on account 5 + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + // Off chain, the user **tricks** their friends and asks them to vouch for the recovery + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); // shame on you + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); // shame on you + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); // shame on you + // We met the threshold, lets try to recover the account...? + assert_noop!( + Recovery::claim_recovery(Origin::signed(1), 5), + Error::::DelayPeriod + ); + // Account 1 needs to wait... + run_to_block(19); + // One more block to wait! + assert_noop!( + Recovery::claim_recovery(Origin::signed(1), 5), + Error::::DelayPeriod + ); + // Account 5 checks their account every `delay_period` and notices the malicious attack! + // Account 5 can close the recovery process before account 1 can claim it + assert_ok!(Recovery::close_recovery(Origin::signed(5), 1)); + // By doing so, account 5 has now claimed the deposit originally reserved by account 1 + assert_eq!(Balances::total_balance(&1), 90); + // Thanks for the free money! + assert_eq!(Balances::total_balance(&5), 110); + // The recovery process has been closed, so account 1 can't make the claim + run_to_block(20); + assert_noop!( + Recovery::claim_recovery(Origin::signed(1), 5), + Error::::NotStarted + ); + // Account 5 can remove their recovery config and pick some better friends + assert_ok!(Recovery::remove_recovery(Origin::signed(5))); + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + vec![22, 33, 44], + threshold, + delay_period + )); + }); } #[test] fn create_recovery_handles_basic_errors() { - new_test_ext().execute_with(|| { - // No friends - assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![], 1, 0), - Error::::NotEnoughFriends - ); - // Zero threshold - assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![2], 0, 0), - Error::::ZeroThreshold - ); - // Threshold greater than friends length - assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 4, 0), - Error::::NotEnoughFriends - ); - // Too many friends - assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![1, 2, 3, 4], 4, 0), - Error::::MaxFriends - ); - // Unsorted friends - assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![3, 2, 4], 3, 0), - Error::::NotSorted - ); - // Duplicate friends - assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![2, 2, 4], 3, 0), - Error::::NotSorted - ); - // Already configured - assert_ok!( - Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10) - ); - assert_noop!( - Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10), - Error::::AlreadyRecoverable - ); - }); + new_test_ext().execute_with(|| { + // No friends + assert_noop!( + Recovery::create_recovery(Origin::signed(5), vec![], 1, 0), + Error::::NotEnoughFriends + ); + // Zero threshold + assert_noop!( + Recovery::create_recovery(Origin::signed(5), vec![2], 0, 0), + Error::::ZeroThreshold + ); + // Threshold greater than friends length + assert_noop!( + Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 4, 0), + Error::::NotEnoughFriends + ); + // Too many friends + assert_noop!( + Recovery::create_recovery(Origin::signed(5), vec![1, 2, 3, 4], 4, 0), + Error::::MaxFriends + ); + // Unsorted friends + assert_noop!( + Recovery::create_recovery(Origin::signed(5), vec![3, 2, 4], 3, 0), + Error::::NotSorted + ); + // Duplicate friends + assert_noop!( + Recovery::create_recovery(Origin::signed(5), vec![2, 2, 4], 3, 0), + Error::::NotSorted + ); + // Already configured + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + vec![2, 3, 4], + 3, + 10 + )); + assert_noop!( + Recovery::create_recovery(Origin::signed(5), vec![2, 3, 4], 3, 10), + Error::::AlreadyRecoverable + ); + }); } #[test] fn create_recovery_works() { - new_test_ext().execute_with(|| { - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - // Account 5 sets up a recovery configuration on their account - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); - // Deposit is taken, and scales with the number of friends they pick - // Base 10 + 1 per friends = 13 total reserved - assert_eq!(Balances::reserved_balance(5), 13); - // Recovery configuration is correctly stored - let recovery_config = RecoveryConfig { - delay_period, - deposit: 13, - friends: friends.clone(), - threshold, - }; - assert_eq!(Recovery::recovery_config(5), Some(recovery_config)); - }); + new_test_ext().execute_with(|| { + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + // Account 5 sets up a recovery configuration on their account + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); + // Deposit is taken, and scales with the number of friends they pick + // Base 10 + 1 per friends = 13 total reserved + assert_eq!(Balances::reserved_balance(5), 13); + // Recovery configuration is correctly stored + let recovery_config = RecoveryConfig { + delay_period, + deposit: 13, + friends: friends.clone(), + threshold, + }; + assert_eq!(Recovery::recovery_config(5), Some(recovery_config)); + }); } #[test] fn initiate_recovery_handles_basic_errors() { - new_test_ext().execute_with(|| { - // No recovery process set up for the account - assert_noop!( - Recovery::initiate_recovery(Origin::signed(1), 5), - Error::::NotRecoverable - ); - // Create a recovery process for next test - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); - // Same user cannot recover same account twice - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - assert_noop!(Recovery::initiate_recovery(Origin::signed(1), 5), Error::::AlreadyStarted); - // No double deposit - assert_eq!(Balances::reserved_balance(1), 10); - }); + new_test_ext().execute_with(|| { + // No recovery process set up for the account + assert_noop!( + Recovery::initiate_recovery(Origin::signed(1), 5), + Error::::NotRecoverable + ); + // Create a recovery process for next test + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); + // Same user cannot recover same account twice + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_noop!( + Recovery::initiate_recovery(Origin::signed(1), 5), + Error::::AlreadyStarted + ); + // No double deposit + assert_eq!(Balances::reserved_balance(1), 10); + }); } #[test] fn initiate_recovery_works() { - new_test_ext().execute_with(|| { - // Create a recovery process for the test - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); - // Recovery can be initiated - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - // Deposit is reserved - assert_eq!(Balances::reserved_balance(1), 10); - // Recovery status object is created correctly - let recovery_status = ActiveRecovery { - created: 0, - deposit: 10, - friends: vec![], - }; - assert_eq!(>::get(&5, &1), Some(recovery_status)); - // Multiple users can attempt to recover the same account - assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); - }); + new_test_ext().execute_with(|| { + // Create a recovery process for the test + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); + // Recovery can be initiated + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + // Deposit is reserved + assert_eq!(Balances::reserved_balance(1), 10); + // Recovery status object is created correctly + let recovery_status = ActiveRecovery { + created: 0, + deposit: 10, + friends: vec![], + }; + assert_eq!(>::get(&5, &1), Some(recovery_status)); + // Multiple users can attempt to recover the same account + assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); + }); } #[test] fn vouch_recovery_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Cannot vouch for non-recoverable account - assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotRecoverable); - // Create a recovery process for next tests - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); - // Cannot vouch a recovery process that has not started - assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::NotStarted); - // Initiate a recovery process - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - // Cannot vouch if you are not a friend - assert_noop!(Recovery::vouch_recovery(Origin::signed(22), 5, 1), Error::::NotFriend); - // Cannot vouch twice - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_noop!(Recovery::vouch_recovery(Origin::signed(2), 5, 1), Error::::AlreadyVouched); - }); + new_test_ext().execute_with(|| { + // Cannot vouch for non-recoverable account + assert_noop!( + Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Error::::NotRecoverable + ); + // Create a recovery process for next tests + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); + // Cannot vouch a recovery process that has not started + assert_noop!( + Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Error::::NotStarted + ); + // Initiate a recovery process + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + // Cannot vouch if you are not a friend + assert_noop!( + Recovery::vouch_recovery(Origin::signed(22), 5, 1), + Error::::NotFriend + ); + // Cannot vouch twice + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_noop!( + Recovery::vouch_recovery(Origin::signed(2), 5, 1), + Error::::AlreadyVouched + ); + }); } #[test] fn vouch_recovery_works() { - new_test_ext().execute_with(|| { - // Create and initiate a recovery process for the test - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - // Vouching works - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - // Handles out of order vouches - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); - // Final recovery status object is updated correctly - let recovery_status = ActiveRecovery { - created: 0, - deposit: 10, - friends: vec![2, 3, 4], - }; - assert_eq!(>::get(&5, &1), Some(recovery_status)); - }); + new_test_ext().execute_with(|| { + // Create and initiate a recovery process for the test + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + // Vouching works + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + // Handles out of order vouches + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + // Final recovery status object is updated correctly + let recovery_status = ActiveRecovery { + created: 0, + deposit: 10, + friends: vec![2, 3, 4], + }; + assert_eq!(>::get(&5, &1), Some(recovery_status)); + }); } #[test] fn claim_recovery_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Cannot claim a non-recoverable account - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotRecoverable); - // Create a recovery process for the test - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); - // Cannot claim an account which has not started the recovery process - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::NotStarted); - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - // Cannot claim an account which has not passed the delay period - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::DelayPeriod); - run_to_block(11); - // Cannot claim an account which has not passed the threshold number of votes - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); - // Only 2/3 is not good enough - assert_noop!(Recovery::claim_recovery(Origin::signed(1), 5), Error::::Threshold); - }); + new_test_ext().execute_with(|| { + // Cannot claim a non-recoverable account + assert_noop!( + Recovery::claim_recovery(Origin::signed(1), 5), + Error::::NotRecoverable + ); + // Create a recovery process for the test + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); + // Cannot claim an account which has not started the recovery process + assert_noop!( + Recovery::claim_recovery(Origin::signed(1), 5), + Error::::NotStarted + ); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + // Cannot claim an account which has not passed the delay period + assert_noop!( + Recovery::claim_recovery(Origin::signed(1), 5), + Error::::DelayPeriod + ); + run_to_block(11); + // Cannot claim an account which has not passed the threshold number of votes + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + // Only 2/3 is not good enough + assert_noop!( + Recovery::claim_recovery(Origin::signed(1), 5), + Error::::Threshold + ); + }); } #[test] fn claim_recovery_works() { - new_test_ext().execute_with(|| { - // Create, initiate, and vouch recovery process for the test - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); + new_test_ext().execute_with(|| { + // Create, initiate, and vouch recovery process for the test + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 1)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 1)); - run_to_block(11); + run_to_block(11); - // Account can be recovered. - assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); - // Recovered storage item is correctly created - assert_eq!(>::get(&1), Some(5)); - // Account could be re-recovered in the case that the recoverer account also gets lost. - assert_ok!(Recovery::initiate_recovery(Origin::signed(4), 5)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 4)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 4)); - assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 4)); + // Account can be recovered. + assert_ok!(Recovery::claim_recovery(Origin::signed(1), 5)); + // Recovered storage item is correctly created + assert_eq!(>::get(&1), Some(5)); + // Account could be re-recovered in the case that the recoverer account also gets lost. + assert_ok!(Recovery::initiate_recovery(Origin::signed(4), 5)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(2), 5, 4)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(3), 5, 4)); + assert_ok!(Recovery::vouch_recovery(Origin::signed(4), 5, 4)); - run_to_block(21); + run_to_block(21); - // Account is re-recovered. - assert_ok!(Recovery::claim_recovery(Origin::signed(4), 5)); - // Recovered storage item is correctly updated - assert_eq!(>::get(&4), Some(5)); - }); + // Account is re-recovered. + assert_ok!(Recovery::claim_recovery(Origin::signed(4), 5)); + // Recovered storage item is correctly updated + assert_eq!(>::get(&4), Some(5)); + }); } #[test] fn close_recovery_handles_basic_errors() { - new_test_ext().execute_with(|| { - // Cannot close a non-active recovery - assert_noop!(Recovery::close_recovery(Origin::signed(5), 1), Error::::NotStarted); - }); + new_test_ext().execute_with(|| { + // Cannot close a non-active recovery + assert_noop!( + Recovery::close_recovery(Origin::signed(5), 1), + Error::::NotStarted + ); + }); } #[test] fn remove_recovery_works() { - new_test_ext().execute_with(|| { - // Cannot remove an unrecoverable account - assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::NotRecoverable); - // Create and initiate a recovery process for the test - let friends = vec![2, 3, 4]; - let threshold = 3; - let delay_period = 10; - assert_ok!(Recovery::create_recovery(Origin::signed(5), friends.clone(), threshold, delay_period)); - assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); - assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); - // Cannot remove a recovery when there are active recoveries. - assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::StillActive); - assert_ok!(Recovery::close_recovery(Origin::signed(5), 1)); - // Still need to remove one more! - assert_noop!(Recovery::remove_recovery(Origin::signed(5)), Error::::StillActive); - assert_ok!(Recovery::close_recovery(Origin::signed(5), 2)); - // Finally removed - assert_ok!(Recovery::remove_recovery(Origin::signed(5))); - }); + new_test_ext().execute_with(|| { + // Cannot remove an unrecoverable account + assert_noop!( + Recovery::remove_recovery(Origin::signed(5)), + Error::::NotRecoverable + ); + // Create and initiate a recovery process for the test + let friends = vec![2, 3, 4]; + let threshold = 3; + let delay_period = 10; + assert_ok!(Recovery::create_recovery( + Origin::signed(5), + friends.clone(), + threshold, + delay_period + )); + assert_ok!(Recovery::initiate_recovery(Origin::signed(1), 5)); + assert_ok!(Recovery::initiate_recovery(Origin::signed(2), 5)); + // Cannot remove a recovery when there are active recoveries. + assert_noop!( + Recovery::remove_recovery(Origin::signed(5)), + Error::::StillActive + ); + assert_ok!(Recovery::close_recovery(Origin::signed(5), 1)); + // Still need to remove one more! + assert_noop!( + Recovery::remove_recovery(Origin::signed(5)), + Error::::StillActive + ); + assert_ok!(Recovery::close_recovery(Origin::signed(5), 2)); + // Finally removed + assert_ok!(Recovery::remove_recovery(Origin::signed(5))); + }); } diff --git a/frame/scheduler/src/lib.rs b/frame/scheduler/src/lib.rs index 3e53b7a505..969197dd94 100644 --- a/frame/scheduler/src/lib.rs +++ b/frame/scheduler/src/lib.rs @@ -44,15 +44,19 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::{RuntimeDebug, traits::{Zero, One}}; +use codec::{Decode, Encode}; use frame_support::{ - dispatch::{Dispatchable, DispatchResult, Parameter}, decl_module, decl_storage, decl_event, - traits::{Get, schedule}, - weights::{GetDispatchInfo, Weight}, + decl_event, decl_module, decl_storage, + dispatch::{DispatchResult, Dispatchable, Parameter}, + traits::{schedule, Get}, + weights::{GetDispatchInfo, Weight}, }; use frame_system::{self as system}; +use sp_runtime::{ + traits::{One, Zero}, + RuntimeDebug, +}; +use sp_std::prelude::*; /// Our pallet's configuration trait. All our types and constants go in here. If the /// pallet is dependent on specific other pallets, then their configuration traits @@ -60,18 +64,18 @@ use frame_system::{self as system}; /// /// `system::Trait` should always be included in our implied traits. pub trait Trait: system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// The aggregated origin which the dispatch will take. - type Origin: From>; + /// The aggregated origin which the dispatch will take. + type Origin: From>; - /// The aggregated call type. - type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo; + /// The aggregated call type. + type Call: Parameter + Dispatchable::Origin> + GetDispatchInfo; - /// The maximum weight that may be scheduled per block for any dispatchables of less priority - /// than `schedule::HARD_DEADLINE`. - type MaximumWeight: Get; + /// The maximum weight that may be scheduled per block for any dispatchables of less priority + /// than `schedule::HARD_DEADLINE`. + type MaximumWeight: Get; } /// Just a simple index for naming period tasks. @@ -82,25 +86,25 @@ pub type TaskAddress = (BlockNumber, u32); /// Information regarding an item to be executed in the future. #[derive(Clone, RuntimeDebug, Encode, Decode)] pub struct Scheduled { - /// The unique identity for this task, if there is one. - maybe_id: Option>, - /// This task's priority. - priority: schedule::Priority, - /// The call to be dispatched. - call: Call, - /// If the call is periodic, then this points to the information concerning that. - maybe_periodic: Option>, + /// The unique identity for this task, if there is one. + maybe_id: Option>, + /// This task's priority. + priority: schedule::Priority, + /// The call to be dispatched. + call: Call, + /// If the call is periodic, then this points to the information concerning that. + maybe_periodic: Option>, } decl_storage! { - trait Store for Module as Scheduler { - /// Items to be executed, indexed by the block number that they should be executed on. - pub Agenda: map hasher(twox_64_concat) T::BlockNumber - => Vec::Call, T::BlockNumber>>>; - - /// Lookup from identity to the block number and index of the task. - Lookup: map hasher(twox_64_concat) Vec => Option>; - } + trait Store for Module as Scheduler { + /// Items to be executed, indexed by the block number that they should be executed on. + pub Agenda: map hasher(twox_64_concat) T::BlockNumber + => Vec::Call, T::BlockNumber>>>; + + /// Lookup from identity to the block number and index of the task. + Lookup: map hasher(twox_64_concat) Vec => Option>; + } } decl_event!( @@ -111,413 +115,467 @@ decl_event!( ); decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: ::Origin { - fn deposit_event() = default; - - fn on_initialize(now: T::BlockNumber) -> Weight { - let limit = T::MaximumWeight::get(); - let mut queued = Agenda::::take(now).into_iter() - .enumerate() - .filter_map(|(index, s)| s.map(|inner| (index as u32, inner))) - .collect::>(); - queued.sort_by_key(|(_, s)| s.priority); - let mut result = 0; - let unused_items = queued.into_iter() - .enumerate() - .scan(0, |cumulative_weight, (order, (index, s))| { - *cumulative_weight += s.call.get_dispatch_info().weight; - Some((order, index, *cumulative_weight, s)) - }) - .filter_map(|(order, index, cumulative_weight, mut s)| { - if s.priority <= schedule::HARD_DEADLINE || cumulative_weight <= limit || order == 0 { - let r = s.call.clone().dispatch(system::RawOrigin::Root.into()); - let maybe_id = s.maybe_id.clone(); - if let &Some((period, count)) = &s.maybe_periodic { - if count > 1 { - s.maybe_periodic = Some((period, count - 1)); - } else { - s.maybe_periodic = None; - } - let next = now + period; - if let Some(ref id) = s.maybe_id { - let next_index = Agenda::::decode_len(now + period).unwrap_or(0) as u32; - Lookup::::insert(id, (next, next_index)); - } - Agenda::::append_or_insert(next, &[Some(s)][..]); - } else { - if let Some(ref id) = s.maybe_id { - Lookup::::remove(id); - } - } - Self::deposit_event(RawEvent::Dispatched( - (now, index), - maybe_id, - r.map(|_| ()).map_err(|e| e.error) - )); - result = cumulative_weight; - None - } else { - Some(Some(s)) - } - }) - .collect::>(); - if !unused_items.is_empty() { - let next = now + One::one(); - Agenda::::append_or_insert(next, &unused_items[..]); - } - result - } - } + // Simple declaration of the `Module` type. Lets the macro know what its working on. + pub struct Module for enum Call where origin: ::Origin { + fn deposit_event() = default; + + fn on_initialize(now: T::BlockNumber) -> Weight { + let limit = T::MaximumWeight::get(); + let mut queued = Agenda::::take(now).into_iter() + .enumerate() + .filter_map(|(index, s)| s.map(|inner| (index as u32, inner))) + .collect::>(); + queued.sort_by_key(|(_, s)| s.priority); + let mut result = 0; + let unused_items = queued.into_iter() + .enumerate() + .scan(0, |cumulative_weight, (order, (index, s))| { + *cumulative_weight += s.call.get_dispatch_info().weight; + Some((order, index, *cumulative_weight, s)) + }) + .filter_map(|(order, index, cumulative_weight, mut s)| { + if s.priority <= schedule::HARD_DEADLINE || cumulative_weight <= limit || order == 0 { + let r = s.call.clone().dispatch(system::RawOrigin::Root.into()); + let maybe_id = s.maybe_id.clone(); + if let &Some((period, count)) = &s.maybe_periodic { + if count > 1 { + s.maybe_periodic = Some((period, count - 1)); + } else { + s.maybe_periodic = None; + } + let next = now + period; + if let Some(ref id) = s.maybe_id { + let next_index = Agenda::::decode_len(now + period).unwrap_or(0) as u32; + Lookup::::insert(id, (next, next_index)); + } + Agenda::::append_or_insert(next, &[Some(s)][..]); + } else { + if let Some(ref id) = s.maybe_id { + Lookup::::remove(id); + } + } + Self::deposit_event(RawEvent::Dispatched( + (now, index), + maybe_id, + r.map(|_| ()).map_err(|e| e.error) + )); + result = cumulative_weight; + None + } else { + Some(Some(s)) + } + }) + .collect::>(); + if !unused_items.is_empty() { + let next = now + One::one(); + Agenda::::append_or_insert(next, &unused_items[..]); + } + result + } + } } impl schedule::Anon::Call> for Module { - type Address = TaskAddress; - - fn schedule( - when: T::BlockNumber, - maybe_periodic: Option>, - priority: schedule::Priority, - call: ::Call - ) -> Self::Address { - // sanitize maybe_periodic - let maybe_periodic = maybe_periodic - .filter(|p| p.1 > 1 && !p.0.is_zero()) - // Remove one from the number of repetitions since we will schedule one now. - .map(|(p, c)| (p, c - 1)); - let s = Some(Scheduled { maybe_id: None, priority, call, maybe_periodic }); - Agenda::::append_or_insert(when, &[s][..]); - (when, Agenda::::decode_len(when).unwrap_or(1) as u32 - 1) - } - - fn cancel((when, index): Self::Address) -> Result<(), ()> { - if let Some(s) = Agenda::::mutate(when, |agenda| agenda.get_mut(index as usize).and_then(Option::take)) { - if let Some(id) = s.maybe_id { - Lookup::::remove(id) - } - Ok(()) - } else { - Err(()) - } - } + type Address = TaskAddress; + + fn schedule( + when: T::BlockNumber, + maybe_periodic: Option>, + priority: schedule::Priority, + call: ::Call, + ) -> Self::Address { + // sanitize maybe_periodic + let maybe_periodic = maybe_periodic + .filter(|p| p.1 > 1 && !p.0.is_zero()) + // Remove one from the number of repetitions since we will schedule one now. + .map(|(p, c)| (p, c - 1)); + let s = Some(Scheduled { + maybe_id: None, + priority, + call, + maybe_periodic, + }); + Agenda::::append_or_insert(when, &[s][..]); + (when, Agenda::::decode_len(when).unwrap_or(1) as u32 - 1) + } + + fn cancel((when, index): Self::Address) -> Result<(), ()> { + if let Some(s) = Agenda::::mutate(when, |agenda| { + agenda.get_mut(index as usize).and_then(Option::take) + }) { + if let Some(id) = s.maybe_id { + Lookup::::remove(id) + } + Ok(()) + } else { + Err(()) + } + } } impl schedule::Named::Call> for Module { - type Address = TaskAddress; - - fn schedule_named( - id: impl Encode, - when: T::BlockNumber, - maybe_periodic: Option>, - priority: schedule::Priority, - call: ::Call, - ) -> Result { - // determine id and ensure it is unique - let id = id.encode(); - if Lookup::::contains_key(&id) { - return Err(()) - } - - // sanitize maybe_periodic - let maybe_periodic = maybe_periodic - .filter(|p| p.1 > 1 && !p.0.is_zero()) - // Remove one from the number of repetitions since we will schedule one now. - .map(|(p, c)| (p, c - 1)); - - let s = Scheduled { maybe_id: Some(id.clone()), priority, call, maybe_periodic }; - Agenda::::append_or_insert(when, &[Some(s)][..]); - let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; - let address = (when, index); - Lookup::::insert(&id, &address); - Ok(address) - } - - fn cancel_named(id: impl Encode) -> Result<(), ()> { - if let Some((when, index)) = id.using_encoded(|d| Lookup::::take(d)) { - let i = index as usize; - Agenda::::mutate(when, |agenda| if let Some(s) = agenda.get_mut(i) { *s = None }); - Ok(()) - } else { - Err(()) - } - } + type Address = TaskAddress; + + fn schedule_named( + id: impl Encode, + when: T::BlockNumber, + maybe_periodic: Option>, + priority: schedule::Priority, + call: ::Call, + ) -> Result { + // determine id and ensure it is unique + let id = id.encode(); + if Lookup::::contains_key(&id) { + return Err(()); + } + + // sanitize maybe_periodic + let maybe_periodic = maybe_periodic + .filter(|p| p.1 > 1 && !p.0.is_zero()) + // Remove one from the number of repetitions since we will schedule one now. + .map(|(p, c)| (p, c - 1)); + + let s = Scheduled { + maybe_id: Some(id.clone()), + priority, + call, + maybe_periodic, + }; + Agenda::::append_or_insert(when, &[Some(s)][..]); + let index = Agenda::::decode_len(when).unwrap_or(1) as u32 - 1; + let address = (when, index); + Lookup::::insert(&id, &address); + Ok(address) + } + + fn cancel_named(id: impl Encode) -> Result<(), ()> { + if let Some((when, index)) = id.using_encoded(|d| Lookup::::take(d)) { + let i = index as usize; + Agenda::::mutate(when, |agenda| { + if let Some(s) = agenda.get_mut(i) { + *s = None + } + }); + Ok(()) + } else { + Err(()) + } + } } #[cfg(test)] mod tests { - use super::*; - - use frame_support::{ - impl_outer_event, impl_outer_origin, impl_outer_dispatch, parameter_types, assert_ok, - traits::{OnInitialize, OnFinalize, schedule::{Anon, Named}}, - weights::{DispatchClass, FunctionOf} - }; - use sp_core::H256; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, - }; - use crate as scheduler; - - mod logger { - use super::*; - use std::cell::RefCell; - use frame_system::ensure_root; - - thread_local! { - static LOG: RefCell> = RefCell::new(Vec::new()); - } - pub fn log() -> Vec { - LOG.with(|log| log.borrow().clone()) - } - pub trait Trait: system::Trait { - type Event: From + Into<::Event>; - } - decl_storage! { - trait Store for Module as Logger { - } - } - decl_event! { - pub enum Event { - Logged(u32, Weight), - } - } - decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what its working on. - pub struct Module for enum Call where origin: ::Origin { - fn deposit_event() = default; - - #[weight = FunctionOf( - |args: (&u32, &Weight)| *args.1, - |_: (&u32, &Weight)| DispatchClass::Normal, - true - )] - fn log(origin, i: u32, weight: Weight) { - ensure_root(origin)?; - Self::deposit_event(Event::Logged(i, weight)); - LOG.with(|log| { - log.borrow_mut().push(i); - }) - } - } - } - } - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - system::System, - logger::Logger, - } - } - - impl_outer_event! { - pub enum Event for Test { - system, - logger, - scheduler, - } - } - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl system::Trait for Test { - type Origin = Origin; - type Call = (); - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - impl logger::Trait for Test { - type Event = (); - } - parameter_types! { - pub const MaximumWeight: Weight = 10_000; - } - impl Trait for Test { - type Event = (); - type Origin = Origin; - type Call = Call; - type MaximumWeight = MaximumWeight; - } - type System = system::Module; - type Logger = logger::Module; - type Scheduler = Module; - - // This function basically just builds a genesis storage key/value store according to - // our desired mockup. - fn new_test_ext() -> sp_io::TestExternalities { - let t = system::GenesisConfig::default().build_storage::().unwrap(); - t.into() - } - - fn run_to_block(n: u64) { - while System::block_number() < n { - Scheduler::on_finalize(System::block_number()); - System::set_block_number(System::block_number() + 1); - Scheduler::on_initialize(System::block_number()); - } - } - - #[test] - fn basic_scheduling_works() { - new_test_ext().execute_with(|| { - Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))); - run_to_block(3); - assert!(logger::log().is_empty()); - run_to_block(4); - assert_eq!(logger::log(), vec![42u32]); - run_to_block(100); - assert_eq!(logger::log(), vec![42u32]); - }); - } - - #[test] - fn periodic_scheduling_works() { - new_test_ext().execute_with(|| { - // at #4, every 3 blocks, 3 times. - Scheduler::schedule(4, Some((3, 3)), 127, Call::Logger(logger::Call::log(42, 1000))); - run_to_block(3); - assert!(logger::log().is_empty()); - run_to_block(4); - assert_eq!(logger::log(), vec![42u32]); - run_to_block(6); - assert_eq!(logger::log(), vec![42u32]); - run_to_block(7); - assert_eq!(logger::log(), vec![42u32, 42u32]); - run_to_block(9); - assert_eq!(logger::log(), vec![42u32, 42u32]); - run_to_block(10); - assert_eq!(logger::log(), vec![42u32, 42u32, 42u32]); - run_to_block(100); - assert_eq!(logger::log(), vec![42u32, 42u32, 42u32]); - }); - } - - #[test] - fn cancel_named_scheduling_works_with_normal_cancel() { - new_test_ext().execute_with(|| { - // at #4. - Scheduler::schedule_named(1u32, 4, None, 127, Call::Logger(logger::Call::log(69, 1000))).unwrap(); - let i = Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))); - run_to_block(3); - assert!(logger::log().is_empty()); - assert_ok!(Scheduler::cancel_named(1u32)); - assert_ok!(Scheduler::cancel(i)); - run_to_block(100); - assert!(logger::log().is_empty()); - }); - } - - #[test] - fn cancel_named_periodic_scheduling_works() { - new_test_ext().execute_with(|| { - // at #4, every 3 blocks, 3 times. - Scheduler::schedule_named(1u32, 4, Some((3, 3)), 127, Call::Logger(logger::Call::log(42, 1000))).unwrap(); - // same id results in error. - assert!(Scheduler::schedule_named(1u32, 4, None, 127, Call::Logger(logger::Call::log(69, 1000))).is_err()); - // different id is ok. - Scheduler::schedule_named(2u32, 8, None, 127, Call::Logger(logger::Call::log(69, 1000))).unwrap(); - run_to_block(3); - assert!(logger::log().is_empty()); - run_to_block(4); - assert_eq!(logger::log(), vec![42u32]); - run_to_block(6); - assert_ok!(Scheduler::cancel_named(1u32)); - run_to_block(100); - assert_eq!(logger::log(), vec![42u32, 69u32]); - }); - } - - #[test] - fn scheduler_respects_weight_limits() { - new_test_ext().execute_with(|| { - Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 6000))); - Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(69, 6000))); - run_to_block(4); - assert_eq!(logger::log(), vec![42u32]); - run_to_block(5); - assert_eq!(logger::log(), vec![42u32, 69u32]); - }); - } - - #[test] - fn scheduler_respects_hard_deadlines_more() { - new_test_ext().execute_with(|| { - Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(42, 6000))); - Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(69, 6000))); - run_to_block(4); - assert_eq!(logger::log(), vec![42u32, 69u32]); - }); - } - - #[test] - fn scheduler_respects_priority_ordering() { - new_test_ext().execute_with(|| { - Scheduler::schedule(4, None, 1, Call::Logger(logger::Call::log(42, 6000))); - Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(69, 6000))); - run_to_block(4); - assert_eq!(logger::log(), vec![69u32, 42u32]); - }); - } - - #[test] - fn scheduler_respects_priority_ordering_with_soft_deadlines() { - new_test_ext().execute_with(|| { - Scheduler::schedule(4, None, 255, Call::Logger(logger::Call::log(42, 5000))); - Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(69, 5000))); - Scheduler::schedule(4, None, 126, Call::Logger(logger::Call::log(2600, 6000))); - run_to_block(4); - assert_eq!(logger::log(), vec![2600u32]); - run_to_block(5); - assert_eq!(logger::log(), vec![2600u32, 69u32, 42u32]); - }); - } - - #[test] - fn initialize_weight_is_correct() { - new_test_ext().execute_with(|| { - Scheduler::schedule(1, None, 255, Call::Logger(logger::Call::log(3, 1000))); - Scheduler::schedule(1, None, 128, Call::Logger(logger::Call::log(42, 5000))); - Scheduler::schedule(1, None, 127, Call::Logger(logger::Call::log(69, 5000))); - Scheduler::schedule(1, None, 126, Call::Logger(logger::Call::log(2600, 6000))); - let weight = Scheduler::on_initialize(1); - assert_eq!(weight, 6000); - let weight = Scheduler::on_initialize(2); - assert_eq!(weight, 10000); - let weight = Scheduler::on_initialize(3); - assert_eq!(weight, 1000); - let weight = Scheduler::on_initialize(4); - assert_eq!(weight, 0); - }); - } + use super::*; + + use frame_support::{ + assert_ok, impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, + traits::{ + schedule::{Anon, Named}, + OnFinalize, OnInitialize, + }, + weights::{DispatchClass, FunctionOf}, + }; + use sp_core::H256; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use crate as scheduler; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + + mod logger { + use super::*; + use frame_system::ensure_root; + use std::cell::RefCell; + + thread_local! { + static LOG: RefCell> = RefCell::new(Vec::new()); + } + pub fn log() -> Vec { + LOG.with(|log| log.borrow().clone()) + } + pub trait Trait: system::Trait { + type Event: From + Into<::Event>; + } + decl_storage! { + trait Store for Module as Logger { + } + } + decl_event! { + pub enum Event { + Logged(u32, Weight), + } + } + decl_module! { + // Simple declaration of the `Module` type. Lets the macro know what its working on. + pub struct Module for enum Call where origin: ::Origin { + fn deposit_event() = default; + + #[weight = FunctionOf( + |args: (&u32, &Weight)| *args.1, + |_: (&u32, &Weight)| DispatchClass::Normal, + true + )] + fn log(origin, i: u32, weight: Weight) { + ensure_root(origin)?; + Self::deposit_event(Event::Logged(i, weight)); + LOG.with(|log| { + log.borrow_mut().push(i); + }) + } + } + } + } + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + impl_outer_dispatch! { + pub enum Call for Test where origin: Origin { + system::System, + logger::Logger, + } + } + + impl_outer_event! { + pub enum Event for Test { + system, + logger, + scheduler, + } + } + // For testing the pallet, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of pallets we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl system::Trait for Test { + type Origin = Origin; + type Call = (); + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + impl logger::Trait for Test { + type Event = (); + } + parameter_types! { + pub const MaximumWeight: Weight = 10_000; + } + impl Trait for Test { + type Event = (); + type Origin = Origin; + type Call = Call; + type MaximumWeight = MaximumWeight; + } + type System = system::Module; + type Logger = logger::Module; + type Scheduler = Module; + + // This function basically just builds a genesis storage key/value store according to + // our desired mockup. + fn new_test_ext() -> sp_io::TestExternalities { + let t = system::GenesisConfig::default() + .build_storage::() + .unwrap(); + t.into() + } + + fn run_to_block(n: u64) { + while System::block_number() < n { + Scheduler::on_finalize(System::block_number()); + System::set_block_number(System::block_number() + 1); + Scheduler::on_initialize(System::block_number()); + } + } + + #[test] + fn basic_scheduling_works() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))); + run_to_block(3); + assert!(logger::log().is_empty()); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(100); + assert_eq!(logger::log(), vec![42u32]); + }); + } + + #[test] + fn periodic_scheduling_works() { + new_test_ext().execute_with(|| { + // at #4, every 3 blocks, 3 times. + Scheduler::schedule( + 4, + Some((3, 3)), + 127, + Call::Logger(logger::Call::log(42, 1000)), + ); + run_to_block(3); + assert!(logger::log().is_empty()); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(6); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(7); + assert_eq!(logger::log(), vec![42u32, 42u32]); + run_to_block(9); + assert_eq!(logger::log(), vec![42u32, 42u32]); + run_to_block(10); + assert_eq!(logger::log(), vec![42u32, 42u32, 42u32]); + run_to_block(100); + assert_eq!(logger::log(), vec![42u32, 42u32, 42u32]); + }); + } + + #[test] + fn cancel_named_scheduling_works_with_normal_cancel() { + new_test_ext().execute_with(|| { + // at #4. + Scheduler::schedule_named( + 1u32, + 4, + None, + 127, + Call::Logger(logger::Call::log(69, 1000)), + ) + .unwrap(); + let i = Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 1000))); + run_to_block(3); + assert!(logger::log().is_empty()); + assert_ok!(Scheduler::cancel_named(1u32)); + assert_ok!(Scheduler::cancel(i)); + run_to_block(100); + assert!(logger::log().is_empty()); + }); + } + + #[test] + fn cancel_named_periodic_scheduling_works() { + new_test_ext().execute_with(|| { + // at #4, every 3 blocks, 3 times. + Scheduler::schedule_named( + 1u32, + 4, + Some((3, 3)), + 127, + Call::Logger(logger::Call::log(42, 1000)), + ) + .unwrap(); + // same id results in error. + assert!(Scheduler::schedule_named( + 1u32, + 4, + None, + 127, + Call::Logger(logger::Call::log(69, 1000)) + ) + .is_err()); + // different id is ok. + Scheduler::schedule_named( + 2u32, + 8, + None, + 127, + Call::Logger(logger::Call::log(69, 1000)), + ) + .unwrap(); + run_to_block(3); + assert!(logger::log().is_empty()); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(6); + assert_ok!(Scheduler::cancel_named(1u32)); + run_to_block(100); + assert_eq!(logger::log(), vec![42u32, 69u32]); + }); + } + + #[test] + fn scheduler_respects_weight_limits() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(42, 6000))); + Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(69, 6000))); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32]); + run_to_block(5); + assert_eq!(logger::log(), vec![42u32, 69u32]); + }); + } + + #[test] + fn scheduler_respects_hard_deadlines_more() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(42, 6000))); + Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(69, 6000))); + run_to_block(4); + assert_eq!(logger::log(), vec![42u32, 69u32]); + }); + } + + #[test] + fn scheduler_respects_priority_ordering() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 1, Call::Logger(logger::Call::log(42, 6000))); + Scheduler::schedule(4, None, 0, Call::Logger(logger::Call::log(69, 6000))); + run_to_block(4); + assert_eq!(logger::log(), vec![69u32, 42u32]); + }); + } + + #[test] + fn scheduler_respects_priority_ordering_with_soft_deadlines() { + new_test_ext().execute_with(|| { + Scheduler::schedule(4, None, 255, Call::Logger(logger::Call::log(42, 5000))); + Scheduler::schedule(4, None, 127, Call::Logger(logger::Call::log(69, 5000))); + Scheduler::schedule(4, None, 126, Call::Logger(logger::Call::log(2600, 6000))); + run_to_block(4); + assert_eq!(logger::log(), vec![2600u32]); + run_to_block(5); + assert_eq!(logger::log(), vec![2600u32, 69u32, 42u32]); + }); + } + + #[test] + fn initialize_weight_is_correct() { + new_test_ext().execute_with(|| { + Scheduler::schedule(1, None, 255, Call::Logger(logger::Call::log(3, 1000))); + Scheduler::schedule(1, None, 128, Call::Logger(logger::Call::log(42, 5000))); + Scheduler::schedule(1, None, 127, Call::Logger(logger::Call::log(69, 5000))); + Scheduler::schedule(1, None, 126, Call::Logger(logger::Call::log(2600, 6000))); + let weight = Scheduler::on_initialize(1); + assert_eq!(weight, 6000); + let weight = Scheduler::on_initialize(2); + assert_eq!(weight, 10000); + let weight = Scheduler::on_initialize(3); + assert_eq!(weight, 1000); + let weight = Scheduler::on_initialize(4); + assert_eq!(weight, 0); + }); + } } diff --git a/frame/scored-pool/src/lib.rs b/frame/scored-pool/src/lib.rs index eca877f096..6ef4c6e533 100644 --- a/frame/scored-pool/src/lib.rs +++ b/frame/scored-pool/src/lib.rs @@ -91,117 +91,121 @@ mod mock; mod tests; use codec::FullCodec; -use sp_std::{ - fmt::Debug, - prelude::*, -}; use frame_support::{ - decl_module, decl_storage, decl_event, ensure, decl_error, - traits::{EnsureOrigin, ChangeMembers, InitializeMembers, Currency, Get, ReservableCurrency}, - weights::{Weight, MINIMUM_WEIGHT, SimpleDispatchInfo}, + decl_error, decl_event, decl_module, decl_storage, ensure, + traits::{ChangeMembers, Currency, EnsureOrigin, Get, InitializeMembers, ReservableCurrency}, + weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}, }; use frame_system::{self as system, ensure_root, ensure_signed}; -use sp_runtime::{ - traits::{AtLeast32Bit, MaybeSerializeDeserialize, Zero, StaticLookup}, -}; +use sp_runtime::traits::{AtLeast32Bit, MaybeSerializeDeserialize, StaticLookup, Zero}; +use sp_std::{fmt::Debug, prelude::*}; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; -type PoolT = Vec<(::AccountId, Option<>::Score>)>; +type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; +type PoolT = Vec<( + ::AccountId, + Option<>::Score>, +)>; /// The enum is supplied when refreshing the members set. /// Depending on the enum variant the corresponding associated /// type function will be invoked. enum ChangeReceiver { - /// Should call `T::MembershipInitialized`. - MembershipInitialized, - /// Should call `T::MembershipChanged`. - MembershipChanged, + /// Should call `T::MembershipInitialized`. + MembershipInitialized, + /// Should call `T::MembershipChanged`. + MembershipChanged, } -pub trait Trait: frame_system::Trait { - /// The currency used for deposits. - type Currency: Currency + ReservableCurrency; - - /// The score attributed to a member or candidate. - type Score: - AtLeast32Bit + Clone + Copy + Default + FullCodec + MaybeSerializeDeserialize + Debug; - - /// The overarching event type. - type Event: From> + Into<::Event>; - - // The deposit which is reserved from candidates if they want to - // start a candidacy. The deposit gets returned when the candidacy is - // withdrawn or when the candidate is kicked. - type CandidateDeposit: Get>; - - /// Every `Period` blocks the `Members` are filled with the highest scoring - /// members in the `Pool`. - type Period: Get; - - /// The receiver of the signal for when the membership has been initialized. - /// This happens pre-genesis and will usually be the same as `MembershipChanged`. - /// If you need to do something different on initialization, then you can change - /// this accordingly. - type MembershipInitialized: InitializeMembers; - - /// The receiver of the signal for when the members have changed. - type MembershipChanged: ChangeMembers; - - /// Allows a configurable origin type to set a score to a candidate in the pool. - type ScoreOrigin: EnsureOrigin; - - /// Required origin for removing a member (though can always be Root). - /// Configurable origin which enables removing an entity. If the entity - /// is part of the `Members` it is immediately replaced by the next - /// highest scoring candidate, if available. - type KickOrigin: EnsureOrigin; +pub trait Trait: frame_system::Trait { + /// The currency used for deposits. + type Currency: Currency + ReservableCurrency; + + /// The score attributed to a member or candidate. + type Score: AtLeast32Bit + + Clone + + Copy + + Default + + FullCodec + + MaybeSerializeDeserialize + + Debug; + + /// The overarching event type. + type Event: From> + Into<::Event>; + + // The deposit which is reserved from candidates if they want to + // start a candidacy. The deposit gets returned when the candidacy is + // withdrawn or when the candidate is kicked. + type CandidateDeposit: Get>; + + /// Every `Period` blocks the `Members` are filled with the highest scoring + /// members in the `Pool`. + type Period: Get; + + /// The receiver of the signal for when the membership has been initialized. + /// This happens pre-genesis and will usually be the same as `MembershipChanged`. + /// If you need to do something different on initialization, then you can change + /// this accordingly. + type MembershipInitialized: InitializeMembers; + + /// The receiver of the signal for when the members have changed. + type MembershipChanged: ChangeMembers; + + /// Allows a configurable origin type to set a score to a candidate in the pool. + type ScoreOrigin: EnsureOrigin; + + /// Required origin for removing a member (though can always be Root). + /// Configurable origin which enables removing an entity. If the entity + /// is part of the `Members` it is immediately replaced by the next + /// highest scoring candidate, if available. + type KickOrigin: EnsureOrigin; } decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as ScoredPool { - /// The current pool of candidates, stored as an ordered Vec - /// (ordered descending by score, `None` last, highest first). - Pool get(fn pool) config(): PoolT; - - /// A Map of the candidates. The information in this Map is redundant - /// to the information in the `Pool`. But the Map enables us to easily - /// check if a candidate is already in the pool, without having to - /// iterate over the entire pool (the `Pool` is not sorted by - /// `T::AccountId`, but by `T::Score` instead). - CandidateExists get(fn candidate_exists): map hasher(twox_64_concat) T::AccountId => bool; - - /// The current membership, stored as an ordered Vec. - Members get(fn members): Vec; - - /// Size of the `Members` set. - MemberCount get(fn member_count) config(): u32; - } - add_extra_genesis { - config(members): Vec; - config(phantom): sp_std::marker::PhantomData; - build(|config| { - let mut pool = config.pool.clone(); - - // reserve balance for each candidate in the pool. - // panicking here is ok, since this just happens one time, pre-genesis. - pool - .iter() - .for_each(|(who, _)| { - T::Currency::reserve(&who, T::CandidateDeposit::get()) - .expect("balance too low to create candidacy"); - >::insert(who, true); - }); - - // Sorts the `Pool` by score in a descending order. Entities which - // have a score of `None` are sorted to the beginning of the vec. - pool.sort_by_key(|(_, maybe_score)| - Reverse(maybe_score.unwrap_or_default()) - ); - - >::put(&pool); - >::refresh_members(pool, ChangeReceiver::MembershipInitialized); - }) - } + trait Store for Module, I: Instance=DefaultInstance> as ScoredPool { + /// The current pool of candidates, stored as an ordered Vec + /// (ordered descending by score, `None` last, highest first). + Pool get(fn pool) config(): PoolT; + + /// A Map of the candidates. The information in this Map is redundant + /// to the information in the `Pool`. But the Map enables us to easily + /// check if a candidate is already in the pool, without having to + /// iterate over the entire pool (the `Pool` is not sorted by + /// `T::AccountId`, but by `T::Score` instead). + CandidateExists get(fn candidate_exists): map hasher(twox_64_concat) T::AccountId => bool; + + /// The current membership, stored as an ordered Vec. + Members get(fn members): Vec; + + /// Size of the `Members` set. + MemberCount get(fn member_count) config(): u32; + } + add_extra_genesis { + config(members): Vec; + config(phantom): sp_std::marker::PhantomData; + build(|config| { + let mut pool = config.pool.clone(); + + // reserve balance for each candidate in the pool. + // panicking here is ok, since this just happens one time, pre-genesis. + pool + .iter() + .for_each(|(who, _)| { + T::Currency::reserve(&who, T::CandidateDeposit::get()) + .expect("balance too low to create candidacy"); + >::insert(who, true); + }); + + // Sorts the `Pool` by score in a descending order. Entities which + // have a score of `None` are sorted to the beginning of the vec. + pool.sort_by_key(|(_, maybe_score)| + Reverse(maybe_score.unwrap_or_default()) + ); + + >::put(&pool); + >::refresh_members(pool, ChangeReceiver::MembershipInitialized); + }) + } } decl_event!( @@ -226,250 +230,241 @@ decl_event!( ); decl_error! { - /// Error for the scored-pool module. - pub enum Error for Module, I: Instance> { - /// Already a member. - AlreadyInPool, - /// Index out of bounds. - InvalidIndex, - /// Index does not match requested account. - WrongAccountIndex, - } + /// Error for the scored-pool module. + pub enum Error for Module, I: Instance> { + /// Already a member. + AlreadyInPool, + /// Index out of bounds. + InvalidIndex, + /// Index does not match requested account. + WrongAccountIndex, + } } decl_module! { - pub struct Module, I: Instance=DefaultInstance> - for enum Call - where origin: T::Origin - { - type Error = Error; - - fn deposit_event() = default; - - /// Every `Period` blocks the `Members` set is refreshed from the - /// highest scoring members in the pool. - fn on_initialize(n: T::BlockNumber) -> Weight { - if n % T::Period::get() == Zero::zero() { - let pool = >::get(); - >::refresh_members(pool, ChangeReceiver::MembershipChanged); - } - MINIMUM_WEIGHT - } - - /// Add `origin` to the pool of candidates. - /// - /// This results in `CandidateDeposit` being reserved from - /// the `origin` account. The deposit is returned once - /// candidacy is withdrawn by the candidate or the entity - /// is kicked by `KickOrigin`. - /// - /// The dispatch origin of this function must be signed. - /// - /// The `index` parameter of this function must be set to - /// the index of the transactor in the `Pool`. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn submit_candidacy(origin) { - let who = ensure_signed(origin)?; - ensure!(!>::contains_key(&who), Error::::AlreadyInPool); - - let deposit = T::CandidateDeposit::get(); - T::Currency::reserve(&who, deposit)?; - - // can be inserted as last element in pool, since entities with - // `None` are always sorted to the end. - if let Err(e) = >::append(&[(who.clone(), None)]) { - T::Currency::unreserve(&who, deposit); - Err(e)? - } - - >::insert(&who, true); - - Self::deposit_event(RawEvent::CandidateAdded); - } - - /// An entity withdraws candidacy and gets its deposit back. - /// - /// If the entity is part of the `Members`, then the highest member - /// of the `Pool` that is not currently in `Members` is immediately - /// placed in the set instead. - /// - /// The dispatch origin of this function must be signed. - /// - /// The `index` parameter of this function must be set to - /// the index of the transactor in the `Pool`. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn withdraw_candidacy( - origin, - index: u32 - ) { - let who = ensure_signed(origin)?; - - let pool = >::get(); - Self::ensure_index(&pool, &who, index)?; - - Self::remove_member(pool, who, index)?; - Self::deposit_event(RawEvent::CandidateWithdrew); - } - - /// Kick a member `who` from the set. - /// - /// May only be called from `KickOrigin` or root. - /// - /// The `index` parameter of this function must be set to - /// the index of `dest` in the `Pool`. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn kick( - origin, - dest: ::Source, - index: u32 - ) { - T::KickOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - let who = T::Lookup::lookup(dest)?; - - let pool = >::get(); - Self::ensure_index(&pool, &who, index)?; - - Self::remove_member(pool, who, index)?; - Self::deposit_event(RawEvent::CandidateKicked); - } - - /// Score a member `who` with `score`. - /// - /// May only be called from `ScoreOrigin` or root. - /// - /// The `index` parameter of this function must be set to - /// the index of the `dest` in the `Pool`. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn score( - origin, - dest: ::Source, - index: u32, - score: T::Score - ) { - T::ScoreOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - let who = T::Lookup::lookup(dest)?; - - let mut pool = >::get(); - Self::ensure_index(&pool, &who, index)?; - - pool.remove(index as usize); - - // we binary search the pool (which is sorted descending by score). - // if there is already an element with `score`, we insert - // right before that. if not, the search returns a location - // where we can insert while maintaining order. - let item = (who.clone(), Some(score.clone())); - let location = pool - .binary_search_by_key( - &Reverse(score), - |(_, maybe_score)| Reverse(maybe_score.unwrap_or_default()) - ) - .unwrap_or_else(|l| l); - pool.insert(location, item); - - >::put(&pool); - Self::deposit_event(RawEvent::CandidateScored); - } - - /// Dispatchable call to change `MemberCount`. - /// - /// This will only have an effect the next time a refresh happens - /// (this happens each `Period`). - /// - /// May only be called from root. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn change_member_count(origin, count: u32) { - ensure_root(origin)?; - >::put(&count); - } - } + pub struct Module, I: Instance=DefaultInstance> + for enum Call + where origin: T::Origin + { + type Error = Error; + + fn deposit_event() = default; + + /// Every `Period` blocks the `Members` set is refreshed from the + /// highest scoring members in the pool. + fn on_initialize(n: T::BlockNumber) -> Weight { + if n % T::Period::get() == Zero::zero() { + let pool = >::get(); + >::refresh_members(pool, ChangeReceiver::MembershipChanged); + } + MINIMUM_WEIGHT + } + + /// Add `origin` to the pool of candidates. + /// + /// This results in `CandidateDeposit` being reserved from + /// the `origin` account. The deposit is returned once + /// candidacy is withdrawn by the candidate or the entity + /// is kicked by `KickOrigin`. + /// + /// The dispatch origin of this function must be signed. + /// + /// The `index` parameter of this function must be set to + /// the index of the transactor in the `Pool`. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn submit_candidacy(origin) { + let who = ensure_signed(origin)?; + ensure!(!>::contains_key(&who), Error::::AlreadyInPool); + + let deposit = T::CandidateDeposit::get(); + T::Currency::reserve(&who, deposit)?; + + // can be inserted as last element in pool, since entities with + // `None` are always sorted to the end. + if let Err(e) = >::append(&[(who.clone(), None)]) { + T::Currency::unreserve(&who, deposit); + Err(e)? + } + + >::insert(&who, true); + + Self::deposit_event(RawEvent::CandidateAdded); + } + + /// An entity withdraws candidacy and gets its deposit back. + /// + /// If the entity is part of the `Members`, then the highest member + /// of the `Pool` that is not currently in `Members` is immediately + /// placed in the set instead. + /// + /// The dispatch origin of this function must be signed. + /// + /// The `index` parameter of this function must be set to + /// the index of the transactor in the `Pool`. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn withdraw_candidacy( + origin, + index: u32 + ) { + let who = ensure_signed(origin)?; + + let pool = >::get(); + Self::ensure_index(&pool, &who, index)?; + + Self::remove_member(pool, who, index)?; + Self::deposit_event(RawEvent::CandidateWithdrew); + } + + /// Kick a member `who` from the set. + /// + /// May only be called from `KickOrigin` or root. + /// + /// The `index` parameter of this function must be set to + /// the index of `dest` in the `Pool`. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn kick( + origin, + dest: ::Source, + index: u32 + ) { + T::KickOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + let who = T::Lookup::lookup(dest)?; + + let pool = >::get(); + Self::ensure_index(&pool, &who, index)?; + + Self::remove_member(pool, who, index)?; + Self::deposit_event(RawEvent::CandidateKicked); + } + + /// Score a member `who` with `score`. + /// + /// May only be called from `ScoreOrigin` or root. + /// + /// The `index` parameter of this function must be set to + /// the index of the `dest` in the `Pool`. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn score( + origin, + dest: ::Source, + index: u32, + score: T::Score + ) { + T::ScoreOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + let who = T::Lookup::lookup(dest)?; + + let mut pool = >::get(); + Self::ensure_index(&pool, &who, index)?; + + pool.remove(index as usize); + + // we binary search the pool (which is sorted descending by score). + // if there is already an element with `score`, we insert + // right before that. if not, the search returns a location + // where we can insert while maintaining order. + let item = (who.clone(), Some(score.clone())); + let location = pool + .binary_search_by_key( + &Reverse(score), + |(_, maybe_score)| Reverse(maybe_score.unwrap_or_default()) + ) + .unwrap_or_else(|l| l); + pool.insert(location, item); + + >::put(&pool); + Self::deposit_event(RawEvent::CandidateScored); + } + + /// Dispatchable call to change `MemberCount`. + /// + /// This will only have an effect the next time a refresh happens + /// (this happens each `Period`). + /// + /// May only be called from root. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn change_member_count(origin, count: u32) { + ensure_root(origin)?; + >::put(&count); + } + } } impl, I: Instance> Module { - - /// Fetches the `MemberCount` highest scoring members from - /// `Pool` and puts them into `Members`. - /// - /// The `notify` parameter is used to deduct which associated - /// type function to invoke at the end of the method. - fn refresh_members( - pool: PoolT, - notify: ChangeReceiver - ) { - let count = >::get(); - - let mut new_members: Vec = pool - .into_iter() - .filter(|(_, score)| score.is_some()) - .take(count as usize) - .map(|(account_id, _)| account_id) - .collect(); - new_members.sort(); - - let old_members = >::get(); - >::put(&new_members); - - match notify { - ChangeReceiver::MembershipInitialized => - T::MembershipInitialized::initialize_members(&new_members), - ChangeReceiver::MembershipChanged => - T::MembershipChanged::set_members_sorted( - &new_members[..], - &old_members[..], - ), - } - } - - /// Removes an entity `remove` at `index` from the `Pool`. - /// - /// If the entity is a member it is also removed from `Members` and - /// the deposit is returned. - fn remove_member( - mut pool: PoolT, - remove: T::AccountId, - index: u32 - ) -> Result<(), Error> { - // all callers of this function in this module also check - // the index for validity before calling this function. - // nevertheless we check again here, to assert that there was - // no mistake when invoking this sensible function. - Self::ensure_index(&pool, &remove, index)?; - - pool.remove(index as usize); - >::put(&pool); - - // remove from set, if it was in there - let members = >::get(); - if members.binary_search(&remove).is_ok() { - Self::refresh_members(pool, ChangeReceiver::MembershipChanged); - } - - >::remove(&remove); - - T::Currency::unreserve(&remove, T::CandidateDeposit::get()); - - Self::deposit_event(RawEvent::MemberRemoved); - Ok(()) - } - - /// Checks if `index` is a valid number and if the element found - /// at `index` in `Pool` is equal to `who`. - fn ensure_index( - pool: &PoolT, - who: &T::AccountId, - index: u32 - ) -> Result<(), Error> { - ensure!(index < pool.len() as u32, Error::::InvalidIndex); - - let (index_who, _index_score) = &pool[index as usize]; - ensure!(index_who == who, Error::::WrongAccountIndex); - - Ok(()) - } + /// Fetches the `MemberCount` highest scoring members from + /// `Pool` and puts them into `Members`. + /// + /// The `notify` parameter is used to deduct which associated + /// type function to invoke at the end of the method. + fn refresh_members(pool: PoolT, notify: ChangeReceiver) { + let count = >::get(); + + let mut new_members: Vec = pool + .into_iter() + .filter(|(_, score)| score.is_some()) + .take(count as usize) + .map(|(account_id, _)| account_id) + .collect(); + new_members.sort(); + + let old_members = >::get(); + >::put(&new_members); + + match notify { + ChangeReceiver::MembershipInitialized => { + T::MembershipInitialized::initialize_members(&new_members) + } + ChangeReceiver::MembershipChanged => { + T::MembershipChanged::set_members_sorted(&new_members[..], &old_members[..]) + } + } + } + + /// Removes an entity `remove` at `index` from the `Pool`. + /// + /// If the entity is a member it is also removed from `Members` and + /// the deposit is returned. + fn remove_member( + mut pool: PoolT, + remove: T::AccountId, + index: u32, + ) -> Result<(), Error> { + // all callers of this function in this module also check + // the index for validity before calling this function. + // nevertheless we check again here, to assert that there was + // no mistake when invoking this sensible function. + Self::ensure_index(&pool, &remove, index)?; + + pool.remove(index as usize); + >::put(&pool); + + // remove from set, if it was in there + let members = >::get(); + if members.binary_search(&remove).is_ok() { + Self::refresh_members(pool, ChangeReceiver::MembershipChanged); + } + + >::remove(&remove); + + T::Currency::unreserve(&remove, T::CandidateDeposit::get()); + + Self::deposit_event(RawEvent::MemberRemoved); + Ok(()) + } + + /// Checks if `index` is a valid number and if the element found + /// at `index` in `Pool` is equal to `who`. + fn ensure_index(pool: &PoolT, who: &T::AccountId, index: u32) -> Result<(), Error> { + ensure!(index < pool.len() as u32, Error::::InvalidIndex); + + let (index_who, _index_score) = &pool[index as usize]; + ensure!(index_who == who, Error::::WrongAccountIndex); + + Ok(()) + } } diff --git a/frame/scored-pool/src/mock.rs b/frame/scored-pool/src/mock.rs index 07bd8cffbf..9540f7b037 100644 --- a/frame/scored-pool/src/mock.rs +++ b/frame/scored-pool/src/mock.rs @@ -18,18 +18,20 @@ use super::*; -use std::cell::RefCell; -use frame_support::{impl_outer_origin, parameter_types, weights::Weight, ord_parameter_types}; +use frame_support::{impl_outer_origin, ord_parameter_types, parameter_types, weights::Weight}; use sp_core::H256; +use std::cell::RefCell; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use frame_system::EnsureSignedBy; use sp_runtime::{ - Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; -use frame_system::EnsureSignedBy; impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } // For testing the pallet, we construct most of a mock runtime. This means @@ -38,89 +40,89 @@ impl_outer_origin! { #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { - pub const CandidateDeposit: u64 = 25; - pub const Period: u64 = 4; + pub const CandidateDeposit: u64 = 25; + pub const Period: u64 = 4; - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); - pub const ExistentialDeposit: u64 = 1; + pub const ExistentialDeposit: u64 = 1; } ord_parameter_types! { - pub const KickOrigin: u64 = 2; - pub const ScoreOrigin: u64 = 3; + pub const KickOrigin: u64 = 2; + pub const ScoreOrigin: u64 = 3; } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } thread_local! { - pub static MEMBERS: RefCell> = RefCell::new(vec![]); + pub static MEMBERS: RefCell> = RefCell::new(vec![]); } pub struct TestChangeMembers; impl ChangeMembers for TestChangeMembers { - fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { - let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); - old_plus_incoming.extend_from_slice(incoming); - old_plus_incoming.sort(); + fn change_members_sorted(incoming: &[u64], outgoing: &[u64], new: &[u64]) { + let mut old_plus_incoming = MEMBERS.with(|m| m.borrow().to_vec()); + old_plus_incoming.extend_from_slice(incoming); + old_plus_incoming.sort(); - let mut new_plus_outgoing = new.to_vec(); - new_plus_outgoing.extend_from_slice(outgoing); - new_plus_outgoing.sort(); + let mut new_plus_outgoing = new.to_vec(); + new_plus_outgoing.extend_from_slice(outgoing); + new_plus_outgoing.sort(); - assert_eq!(old_plus_incoming, new_plus_outgoing); + assert_eq!(old_plus_incoming, new_plus_outgoing); - MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); - } + MEMBERS.with(|m| *m.borrow_mut() = new.to_vec()); + } } impl InitializeMembers for TestChangeMembers { - fn initialize_members(new_members: &[u64]) { - MEMBERS.with(|m| *m.borrow_mut() = new_members.to_vec()); - } + fn initialize_members(new_members: &[u64]) { + MEMBERS.with(|m| *m.borrow_mut() = new_members.to_vec()); + } } impl Trait for Test { - type Event = (); - type KickOrigin = EnsureSignedBy; - type MembershipInitialized = TestChangeMembers; - type MembershipChanged = TestChangeMembers; - type Currency = Balances; - type CandidateDeposit = CandidateDeposit; - type Period = Period; - type Score = u64; - type ScoreOrigin = EnsureSignedBy; + type Event = (); + type KickOrigin = EnsureSignedBy; + type MembershipInitialized = TestChangeMembers; + type MembershipChanged = TestChangeMembers; + type Currency = Balances; + type CandidateDeposit = CandidateDeposit; + type Period = Period; + type Score = u64; + type ScoreOrigin = EnsureSignedBy; } type System = frame_system::Module; @@ -129,44 +131,50 @@ type Balances = pallet_balances::Module; // This function basically just builds a genesis storage key/value store according to // our desired mockup. pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - // We use default for brevity, but you can configure as desired if needed. - pallet_balances::GenesisConfig:: { - balances: vec![ - (5, 500_000), - (10, 500_000), - (15, 500_000), - (20, 500_000), - (31, 500_000), - (40, 500_000), - (99, 1), - ], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ - pool: vec![ - (5, None), - (10, Some(1)), - (20, Some(2)), - (31, Some(2)), - (40, Some(3)), - ], - member_count: 2, - .. Default::default() - }.assimilate_storage(&mut t).unwrap(); - t.into() + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + // We use default for brevity, but you can configure as desired if needed. + pallet_balances::GenesisConfig:: { + balances: vec![ + (5, 500_000), + (10, 500_000), + (15, 500_000), + (20, 500_000), + (31, 500_000), + (40, 500_000), + (99, 1), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisConfig:: { + pool: vec![ + (5, None), + (10, Some(1)), + (20, Some(2)), + (31, Some(2)), + (40, Some(3)), + ], + member_count: 2, + ..Default::default() + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() } /// Fetch an entity from the pool, if existent. pub fn fetch_from_pool(who: u64) -> Option<(u64, Option)> { - >::pool() - .into_iter() - .find(|item| item.0 == who) + >::pool() + .into_iter() + .find(|item| item.0 == who) } /// Find an entity in the pool. /// Returns its position in the `Pool` vec, if existent. pub fn find_in_pool(who: u64) -> Option { - >::pool() - .into_iter() - .position(|item| item.0 == who) + >::pool() + .into_iter() + .position(|item| item.0 == who) } diff --git a/frame/scored-pool/src/tests.rs b/frame/scored-pool/src/tests.rs index 8d87a20f75..455e477711 100644 --- a/frame/scored-pool/src/tests.rs +++ b/frame/scored-pool/src/tests.rs @@ -19,7 +19,7 @@ use super::*; use mock::*; -use frame_support::{assert_ok, assert_noop, traits::OnInitialize}; +use frame_support::{assert_noop, assert_ok, traits::OnInitialize}; use sp_runtime::traits::BadOrigin; type ScoredPool = Module; @@ -28,252 +28,299 @@ type Balances = pallet_balances::Module; #[test] fn query_membership_works() { - new_test_ext().execute_with(|| { - assert_eq!(ScoredPool::members(), vec![20, 40]); - assert_eq!(Balances::reserved_balance(31), CandidateDeposit::get()); - assert_eq!(Balances::reserved_balance(40), CandidateDeposit::get()); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), vec![20, 40]); - }); + new_test_ext().execute_with(|| { + assert_eq!(ScoredPool::members(), vec![20, 40]); + assert_eq!(Balances::reserved_balance(31), CandidateDeposit::get()); + assert_eq!(Balances::reserved_balance(40), CandidateDeposit::get()); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), vec![20, 40]); + }); } #[test] fn submit_candidacy_must_not_work() { - new_test_ext().execute_with(|| { - assert_noop!( - ScoredPool::submit_candidacy(Origin::signed(99)), - pallet_balances::Error::::InsufficientBalance, - ); - assert_noop!( - ScoredPool::submit_candidacy(Origin::signed(40)), - Error::::AlreadyInPool - ); - }); + new_test_ext().execute_with(|| { + assert_noop!( + ScoredPool::submit_candidacy(Origin::signed(99)), + pallet_balances::Error::::InsufficientBalance, + ); + assert_noop!( + ScoredPool::submit_candidacy(Origin::signed(40)), + Error::::AlreadyInPool + ); + }); } #[test] fn submit_candidacy_works() { - new_test_ext().execute_with(|| { - // given - let who = 15; + new_test_ext().execute_with(|| { + // given + let who = 15; - // when - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); - assert_eq!(fetch_from_pool(15), Some((who, None))); + // when + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + assert_eq!(fetch_from_pool(15), Some((who, None))); - // then - assert_eq!(Balances::reserved_balance(who), CandidateDeposit::get()); - }); + // then + assert_eq!(Balances::reserved_balance(who), CandidateDeposit::get()); + }); } #[test] fn scoring_works() { - new_test_ext().execute_with(|| { - // given - let who = 15; - let score = 99; - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); - - // when - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, score)); - - // then - assert_eq!(fetch_from_pool(who), Some((who, Some(score)))); - assert_eq!(find_in_pool(who), Some(0)); // must be first element, since highest scored - }); + new_test_ext().execute_with(|| { + // given + let who = 15; + let score = 99; + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + + // when + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::score( + Origin::signed(ScoreOrigin::get()), + who, + index, + score + )); + + // then + assert_eq!(fetch_from_pool(who), Some((who, Some(score)))); + assert_eq!(find_in_pool(who), Some(0)); // must be first element, since highest scored + }); } #[test] fn scoring_same_element_with_same_score_works() { - new_test_ext().execute_with(|| { - // given - let who = 31; - let index = find_in_pool(who).expect("entity must be in pool") as u32; - let score = 2; - - // when - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, score)); - - // then - assert_eq!(fetch_from_pool(who), Some((who, Some(score)))); - - // must have been inserted right before the `20` element which is - // of the same score as `31`. so sort order is maintained. - assert_eq!(find_in_pool(who), Some(1)); - }); + new_test_ext().execute_with(|| { + // given + let who = 31; + let index = find_in_pool(who).expect("entity must be in pool") as u32; + let score = 2; + + // when + assert_ok!(ScoredPool::score( + Origin::signed(ScoreOrigin::get()), + who, + index, + score + )); + + // then + assert_eq!(fetch_from_pool(who), Some((who, Some(score)))); + + // must have been inserted right before the `20` element which is + // of the same score as `31`. so sort order is maintained. + assert_eq!(find_in_pool(who), Some(1)); + }); } #[test] fn kicking_works_only_for_authorized() { - new_test_ext().execute_with(|| { - let who = 40; - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_noop!(ScoredPool::kick(Origin::signed(99), who, index), BadOrigin); - }); + new_test_ext().execute_with(|| { + let who = 40; + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_noop!(ScoredPool::kick(Origin::signed(99), who, index), BadOrigin); + }); } #[test] fn kicking_works() { - new_test_ext().execute_with(|| { - // given - let who = 40; - assert_eq!(Balances::reserved_balance(who), CandidateDeposit::get()); - assert_eq!(find_in_pool(who), Some(0)); - - // when - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); - - // then - assert_eq!(find_in_pool(who), None); - assert_eq!(ScoredPool::members(), vec![20, 31]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); - assert_eq!(Balances::reserved_balance(who), 0); // deposit must have been returned - }); + new_test_ext().execute_with(|| { + // given + let who = 40; + assert_eq!(Balances::reserved_balance(who), CandidateDeposit::get()); + assert_eq!(find_in_pool(who), Some(0)); + + // when + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::kick( + Origin::signed(KickOrigin::get()), + who, + index + )); + + // then + assert_eq!(find_in_pool(who), None); + assert_eq!(ScoredPool::members(), vec![20, 31]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); + assert_eq!(Balances::reserved_balance(who), 0); // deposit must have been returned + }); } #[test] fn unscored_entities_must_not_be_used_for_filling_members() { - new_test_ext().execute_with(|| { - // given - // we submit a candidacy, score will be `None` - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(15))); - - // when - // we remove every scored member - ScoredPool::pool() - .into_iter() - .for_each(|(who, score)| { - if let Some(_) = score { - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index)); - } - }); - - // then - // the `None` candidates should not have been filled in - assert_eq!(ScoredPool::members(), vec![]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); - }); + new_test_ext().execute_with(|| { + // given + // we submit a candidacy, score will be `None` + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(15))); + + // when + // we remove every scored member + ScoredPool::pool().into_iter().for_each(|(who, score)| { + if let Some(_) = score { + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::kick( + Origin::signed(KickOrigin::get()), + who, + index + )); + } + }); + + // then + // the `None` candidates should not have been filled in + assert_eq!(ScoredPool::members(), vec![]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); + }); } #[test] fn refreshing_works() { - new_test_ext().execute_with(|| { - // given - let who = 15; - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99)); - - // when - ScoredPool::refresh_members(ScoredPool::pool(), ChangeReceiver::MembershipChanged); - - // then - assert_eq!(ScoredPool::members(), vec![15, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); - }); + new_test_ext().execute_with(|| { + // given + let who = 15; + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::score( + Origin::signed(ScoreOrigin::get()), + who, + index, + 99 + )); + + // when + ScoredPool::refresh_members(ScoredPool::pool(), ChangeReceiver::MembershipChanged); + + // then + assert_eq!(ScoredPool::members(), vec![15, 40]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); + }); } #[test] fn refreshing_happens_every_period() { - new_test_ext().execute_with(|| { - // given - System::set_block_number(1); - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(15))); - let index = find_in_pool(15).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), 15, index, 99)); - assert_eq!(ScoredPool::members(), vec![20, 40]); - - // when - System::set_block_number(4); - ScoredPool::on_initialize(4); - - // then - assert_eq!(ScoredPool::members(), vec![15, 40]); - assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); - }); + new_test_ext().execute_with(|| { + // given + System::set_block_number(1); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(15))); + let index = find_in_pool(15).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::score( + Origin::signed(ScoreOrigin::get()), + 15, + index, + 99 + )); + assert_eq!(ScoredPool::members(), vec![20, 40]); + + // when + System::set_block_number(4); + ScoredPool::on_initialize(4); + + // then + assert_eq!(ScoredPool::members(), vec![15, 40]); + assert_eq!(MEMBERS.with(|m| m.borrow().clone()), ScoredPool::members()); + }); } #[test] fn withdraw_candidacy_must_only_work_for_members() { - new_test_ext().execute_with(|| { - let who = 77; - let index = 0; - assert_noop!( ScoredPool::withdraw_candidacy(Origin::signed(who), index), Error::::WrongAccountIndex); - }); + new_test_ext().execute_with(|| { + let who = 77; + let index = 0; + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), index), + Error::::WrongAccountIndex + ); + }); } #[test] fn oob_index_should_abort() { - new_test_ext().execute_with(|| { - let who = 40; - let oob_index = ScoredPool::pool().len() as u32; - assert_noop!(ScoredPool::withdraw_candidacy(Origin::signed(who), oob_index), Error::::InvalidIndex); - assert_noop!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, oob_index, 99), Error::::InvalidIndex); - assert_noop!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, oob_index), Error::::InvalidIndex); - }); + new_test_ext().execute_with(|| { + let who = 40; + let oob_index = ScoredPool::pool().len() as u32; + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), oob_index), + Error::::InvalidIndex + ); + assert_noop!( + ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, oob_index, 99), + Error::::InvalidIndex + ); + assert_noop!( + ScoredPool::kick(Origin::signed(KickOrigin::get()), who, oob_index), + Error::::InvalidIndex + ); + }); } #[test] fn index_mismatches_should_abort() { - new_test_ext().execute_with(|| { - let who = 40; - let index = 3; - assert_noop!(ScoredPool::withdraw_candidacy(Origin::signed(who), index), Error::::WrongAccountIndex); - assert_noop!(ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99), Error::::WrongAccountIndex); - assert_noop!(ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index), Error::::WrongAccountIndex); - }); + new_test_ext().execute_with(|| { + let who = 40; + let index = 3; + assert_noop!( + ScoredPool::withdraw_candidacy(Origin::signed(who), index), + Error::::WrongAccountIndex + ); + assert_noop!( + ScoredPool::score(Origin::signed(ScoreOrigin::get()), who, index, 99), + Error::::WrongAccountIndex + ); + assert_noop!( + ScoredPool::kick(Origin::signed(KickOrigin::get()), who, index), + Error::::WrongAccountIndex + ); + }); } #[test] fn withdraw_unscored_candidacy_must_work() { - new_test_ext().execute_with(|| { - // given - let who = 5; + new_test_ext().execute_with(|| { + // given + let who = 5; - // when - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); + // when + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); - // then - assert_eq!(fetch_from_pool(5), None); - }); + // then + assert_eq!(fetch_from_pool(5), None); + }); } #[test] fn withdraw_scored_candidacy_must_work() { - new_test_ext().execute_with(|| { - // given - let who = 40; - assert_eq!(Balances::reserved_balance(who), CandidateDeposit::get()); - - // when - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); - - // then - assert_eq!(fetch_from_pool(who), None); - assert_eq!(ScoredPool::members(), vec![20, 31]); - assert_eq!(Balances::reserved_balance(who), 0); - }); + new_test_ext().execute_with(|| { + // given + let who = 40; + assert_eq!(Balances::reserved_balance(who), CandidateDeposit::get()); + + // when + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); + + // then + assert_eq!(fetch_from_pool(who), None); + assert_eq!(ScoredPool::members(), vec![20, 31]); + assert_eq!(Balances::reserved_balance(who), 0); + }); } #[test] fn candidacy_resubmitting_works() { - new_test_ext().execute_with(|| { - // given - let who = 15; - - // when - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); - assert_eq!(ScoredPool::candidate_exists(who), true); - let index = find_in_pool(who).expect("entity must be in pool") as u32; - assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); - assert_eq!(ScoredPool::candidate_exists(who), false); - assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); - - // then - assert_eq!(ScoredPool::candidate_exists(who), true); - }); + new_test_ext().execute_with(|| { + // given + let who = 15; + + // when + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + assert_eq!(ScoredPool::candidate_exists(who), true); + let index = find_in_pool(who).expect("entity must be in pool") as u32; + assert_ok!(ScoredPool::withdraw_candidacy(Origin::signed(who), index)); + assert_eq!(ScoredPool::candidate_exists(who), false); + assert_ok!(ScoredPool::submit_candidacy(Origin::signed(who))); + + // then + assert_eq!(ScoredPool::candidate_exists(who), true); + }); } diff --git a/frame/session/benchmarking/src/lib.rs b/frame/session/benchmarking/src/lib.rs index 3b91c2fdc5..be8264d7da 100644 --- a/frame/session/benchmarking/src/lib.rs +++ b/frame/session/benchmarking/src/lib.rs @@ -24,53 +24,50 @@ mod mock; use sp_std::prelude::*; use sp_std::vec; -use frame_system::RawOrigin; use frame_benchmarking::benchmarks; +use frame_system::RawOrigin; -use pallet_session::*; use pallet_session::Module as Session; +use pallet_session::*; -use pallet_staking::{ - MAX_NOMINATIONS, - benchmarking::create_validator_with_nominators, -}; +use pallet_staking::{benchmarking::create_validator_with_nominators, MAX_NOMINATIONS}; pub struct Module(pallet_session::Module); pub trait Trait: pallet_session::Trait + pallet_staking::Trait {} benchmarks! { - _ { } - - set_keys { - let n in 1 .. MAX_NOMINATIONS as u32; - let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; - let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; - let keys = T::Keys::default(); - let proof: Vec = vec![0,1,2,3]; - }: _(RawOrigin::Signed(v_controller), keys, proof) - - purge_keys { - let n in 1 .. MAX_NOMINATIONS as u32; - let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; - let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; - let keys = T::Keys::default(); - let proof: Vec = vec![0,1,2,3]; - Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; - }: _(RawOrigin::Signed(v_controller)) + _ { } + + set_keys { + let n in 1 .. MAX_NOMINATIONS as u32; + let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; + let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; + let keys = T::Keys::default(); + let proof: Vec = vec![0,1,2,3]; + }: _(RawOrigin::Signed(v_controller), keys, proof) + + purge_keys { + let n in 1 .. MAX_NOMINATIONS as u32; + let v_stash = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; + let v_controller = pallet_staking::Module::::bonded(&v_stash).ok_or("not stash")?; + let keys = T::Keys::default(); + let proof: Vec = vec![0,1,2,3]; + Session::::set_keys(RawOrigin::Signed(v_controller.clone()).into(), keys, proof)?; + }: _(RawOrigin::Signed(v_controller)) } #[cfg(test)] mod tests { - use super::*; - use crate::mock::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set_keys::()); - assert_ok!(test_benchmark_purge_keys::()); - }); - } + use super::*; + use crate::mock::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set_keys::()); + assert_ok!(test_benchmark_purge_keys::()); + }); + } } diff --git a/frame/session/benchmarking/src/mock.rs b/frame/session/benchmarking/src/mock.rs index 4c022eb8b8..29da12f06d 100644 --- a/frame/session/benchmarking/src/mock.rs +++ b/frame/session/benchmarking/src/mock.rs @@ -18,8 +18,8 @@ #![cfg(test)] -use sp_runtime::traits::{Convert, SaturatedConversion, IdentityLookup}; -use frame_support::{impl_outer_origin, impl_outer_dispatch, parameter_types}; +use frame_support::{impl_outer_dispatch, impl_outer_origin, parameter_types}; +use sp_runtime::traits::{Convert, IdentityLookup, SaturatedConversion}; type AccountId = u64; type AccountIndex = u32; @@ -32,156 +32,159 @@ type Staking = pallet_staking::Module; type Session = pallet_session::Module; impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - pallet_staking::Staking, - } + pub enum Call for Test where origin: Origin { + pallet_staking::Staking, + } } pub struct CurrencyToVoteHandler; impl Convert for CurrencyToVoteHandler { - fn convert(x: u64) -> u64 { - x - } + fn convert(x: u64) -> u64 { + x + } } impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> u64 { - x.saturated_into() - } + fn convert(x: u128) -> u64 { + x.saturated_into() + } } #[derive(Clone, Eq, PartialEq, Debug)] pub struct Test; impl frame_system::Trait for Test { - type Origin = Origin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; - type Call = Call; - type Hash = sp_core::H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = sp_runtime::testing::Header; - type Event = (); - type BlockHashCount = (); - type MaximumBlockWeight = (); - type DbWeight = (); - type AvailableBlockRatio = (); - type MaximumBlockLength = (); - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (Balances,); + type Origin = Origin; + type Index = AccountIndex; + type BlockNumber = BlockNumber; + type Call = Call; + type Hash = sp_core::H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = sp_runtime::testing::Header; + type Event = (); + type BlockHashCount = (); + type MaximumBlockWeight = (); + type DbWeight = (); + type AvailableBlockRatio = (); + type MaximumBlockLength = (); + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (Balances,); } parameter_types! { - pub const ExistentialDeposit: Balance = 10; + pub const ExistentialDeposit: Balance = 10; } impl pallet_balances::Trait for Test { - type Balance = Balance; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = Balance; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const MinimumPeriod: u64 = 5; + pub const MinimumPeriod: u64 = 5; } impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; } impl pallet_session::historical::Trait for Test { - type FullIdentification = pallet_staking::Exposure; - type FullIdentificationOf = pallet_staking::ExposureOf; + type FullIdentification = pallet_staking::Exposure; + type FullIdentificationOf = pallet_staking::ExposureOf; } sp_runtime::impl_opaque_keys! { - pub struct SessionKeys { - pub foo: sp_runtime::testing::UintAuthorityId, - } + pub struct SessionKeys { + pub foo: sp_runtime::testing::UintAuthorityId, + } } pub struct TestSessionHandler; impl pallet_session::SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; + const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[]; - fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} + fn on_genesis_session(_validators: &[(AccountId, Ks)]) {} - fn on_new_session( - _: bool, - _: &[(AccountId, Ks)], - _: &[(AccountId, Ks)], - ) {} + fn on_new_session( + _: bool, + _: &[(AccountId, Ks)], + _: &[(AccountId, Ks)], + ) { + } - fn on_disabled(_: usize) {} + fn on_disabled(_: usize) {} } impl pallet_session::Trait for Test { - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - type Keys = SessionKeys; - type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; - type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; - type SessionHandler = TestSessionHandler; - type Event = (); - type ValidatorId = AccountId; - type ValidatorIdOf = pallet_staking::StashOf; - type DisabledValidatorsThreshold = (); + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions<(), ()>; + type NextSessionRotation = pallet_session::PeriodicSessions<(), ()>; + type SessionHandler = TestSessionHandler; + type Event = (); + type ValidatorId = AccountId; + type ValidatorIdOf = pallet_staking::StashOf; + type DisabledValidatorsThreshold = (); } pallet_staking_reward_curve::build! { - const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( - min_inflation: 0_025_000, - max_inflation: 0_100_000, - ideal_stake: 0_500_000, - falloff: 0_050_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); + const I_NPOS: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); } parameter_types! { - pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; - pub const MaxNominatorRewardedPerValidator: u32 = 64; - pub const UnsignedPriority: u64 = 1 << 20; + pub const RewardCurve: &'static sp_runtime::curve::PiecewiseLinear<'static> = &I_NPOS; + pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const UnsignedPriority: u64 = 1 << 20; } pub type Extrinsic = sp_runtime::testing::TestXt; type SubmitTransaction = frame_system::offchain::TransactionSubmitter< - sp_runtime::testing::UintAuthorityId, - Test, - Extrinsic, + sp_runtime::testing::UintAuthorityId, + Test, + Extrinsic, >; impl pallet_staking::Trait for Test { - type Currency = Balances; - type UnixTime = pallet_timestamp::Module; - type CurrencyToVote = CurrencyToVoteHandler; - type RewardRemainder = (); - type Event = (); - type Slash = (); - type Reward = (); - type SessionsPerEra = (); - type SlashDeferDuration = (); - type SlashCancelOrigin = frame_system::EnsureRoot; - type BondingDuration = (); - type SessionInterface = Self; - type RewardCurve = RewardCurve; - type NextNewSession = Session; - type ElectionLookahead = (); - type Call = Call; - type SubmitTransaction = SubmitTransaction; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = UnsignedPriority; + type Currency = Balances; + type UnixTime = pallet_timestamp::Module; + type CurrencyToVote = CurrencyToVoteHandler; + type RewardRemainder = (); + type Event = (); + type Slash = (); + type Reward = (); + type SessionsPerEra = (); + type SlashDeferDuration = (); + type SlashCancelOrigin = frame_system::EnsureRoot; + type BondingDuration = (); + type SessionInterface = Self; + type RewardCurve = RewardCurve; + type NextNewSession = Session; + type ElectionLookahead = (); + type Call = Call; + type SubmitTransaction = SubmitTransaction; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type UnsignedPriority = UnsignedPriority; } impl crate::Trait for Test {} pub fn new_test_ext() -> sp_io::TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - sp_io::TestExternalities::new(t) + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + sp_io::TestExternalities::new(t) } diff --git a/frame/session/src/historical.rs b/frame/session/src/historical.rs index f9990dd1e8..0951939459 100644 --- a/frame/session/src/historical.rs +++ b/frame/session/src/historical.rs @@ -25,87 +25,89 @@ //! These roots and proofs of inclusion can be generated at any time during the current session. //! Afterwards, the proofs can be fed to a consensus module when reporting misbehavior. -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::{KeyTypeId, RuntimeDebug}; -use sp_runtime::traits::{Convert, OpaqueKeys}; +use super::{Module as SessionModule, SessionIndex}; +use codec::{Decode, Encode}; use frame_support::{decl_module, decl_storage}; -use frame_support::{Parameter, print}; -use sp_trie::{MemoryDB, Trie, TrieMut, Recorder, EMPTY_PREFIX}; -use sp_trie::trie_types::{TrieDBMut, TrieDB}; -use super::{SessionIndex, Module as SessionModule}; +use frame_support::{print, Parameter}; +use sp_runtime::traits::{Convert, OpaqueKeys}; +use sp_runtime::{KeyTypeId, RuntimeDebug}; +use sp_std::prelude::*; +use sp_trie::trie_types::{TrieDB, TrieDBMut}; +use sp_trie::{MemoryDB, Recorder, Trie, TrieMut, EMPTY_PREFIX}; type ValidatorCount = u32; /// Trait necessary for the historical module. pub trait Trait: super::Trait { - /// Full identification of the validator. - type FullIdentification: Parameter; - - /// A conversion from validator ID to full identification. - /// - /// This should contain any references to economic actors associated with the - /// validator, since they may be outdated by the time this is queried from a - /// historical trie. - /// - /// It must return the identification for the current session index. - type FullIdentificationOf: Convert>; + /// Full identification of the validator. + type FullIdentification: Parameter; + + /// A conversion from validator ID to full identification. + /// + /// This should contain any references to economic actors associated with the + /// validator, since they may be outdated by the time this is queried from a + /// historical trie. + /// + /// It must return the identification for the current session index. + type FullIdentificationOf: Convert>; } decl_storage! { - trait Store for Module as Session { - /// Mapping from historical session indices to session-data root hash and validator count. - HistoricalSessions get(fn historical_root): - map hasher(twox_64_concat) SessionIndex => Option<(T::Hash, ValidatorCount)>; - /// The range of historical sessions we store. [first, last) - StoredRange: Option<(SessionIndex, SessionIndex)>; - /// Deprecated. - CachedObsolete: - map hasher(twox_64_concat) SessionIndex - => Option>; - } + trait Store for Module as Session { + /// Mapping from historical session indices to session-data root hash and validator count. + HistoricalSessions get(fn historical_root): + map hasher(twox_64_concat) SessionIndex => Option<(T::Hash, ValidatorCount)>; + /// The range of historical sessions we store. [first, last) + StoredRange: Option<(SessionIndex, SessionIndex)>; + /// Deprecated. + CachedObsolete: + map hasher(twox_64_concat) SessionIndex + => Option>; + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } impl Module { - /// Prune historical stored session roots up to (but not including) - /// `up_to`. - pub fn prune_up_to(up_to: SessionIndex) { - ::StoredRange::mutate(|range| { - let (start, end) = match *range { - Some(range) => range, - None => return, // nothing to prune. - }; - - let up_to = sp_std::cmp::min(up_to, end); - - if up_to < start { - return // out of bounds. harmless. - } - - (start..up_to).for_each(::HistoricalSessions::remove); - - let new_start = up_to; - *range = if new_start == end { - None // nothing is stored. - } else { - Some((new_start, end)) - } - }) - } + /// Prune historical stored session roots up to (but not including) + /// `up_to`. + pub fn prune_up_to(up_to: SessionIndex) { + ::StoredRange::mutate(|range| { + let (start, end) = match *range { + Some(range) => range, + None => return, // nothing to prune. + }; + + let up_to = sp_std::cmp::min(up_to, end); + + if up_to < start { + return; // out of bounds. harmless. + } + + (start..up_to).for_each(::HistoricalSessions::remove); + + let new_start = up_to; + *range = if new_start == end { + None // nothing is stored. + } else { + Some((new_start, end)) + } + }) + } } /// Specialization of the crate-level `SessionManager` which returns the set of full identification /// when creating a new session. -pub trait SessionManager: crate::SessionManager { - /// If there was a validator set change, its returns the set of new validators along with their - /// full identifications. - fn new_session(new_index: SessionIndex) -> Option>; - fn start_session(start_index: SessionIndex); - fn end_session(end_index: SessionIndex); +pub trait SessionManager: + crate::SessionManager +{ + /// If there was a validator set change, its returns the set of new validators along with their + /// full identifications. + fn new_session(new_index: SessionIndex) -> Option>; + fn start_session(start_index: SessionIndex); + fn end_session(end_index: SessionIndex); } /// An `SessionManager` implementation that wraps an inner `I` and also @@ -113,312 +115,321 @@ pub trait SessionManager: crate::SessionManager pub struct NoteHistoricalRoot(sp_std::marker::PhantomData<(T, I)>); impl crate::SessionManager for NoteHistoricalRoot - where I: SessionManager +where + I: SessionManager, { - fn new_session(new_index: SessionIndex) -> Option> { - StoredRange::mutate(|range| { - range.get_or_insert_with(|| (new_index, new_index)).1 = new_index + 1; - }); - - let new_validators_and_id = >::new_session(new_index); - let new_validators = new_validators_and_id.as_ref().map(|new_validators| { - new_validators.iter().map(|(v, _id)| v.clone()).collect() - }); - - if let Some(new_validators) = new_validators_and_id { - let count = new_validators.len() as u32; - match ProvingTrie::::generate_for(new_validators) { - Ok(trie) => >::insert(new_index, &(trie.root, count)), - Err(reason) => { - print("Failed to generate historical ancestry-inclusion proof."); - print(reason); - } - }; - } else { - let previous_index = new_index.saturating_sub(1); - if let Some(previous_session) = >::get(previous_index) { - >::insert(new_index, previous_session); - } - } - - new_validators - } - fn start_session(start_index: SessionIndex) { - >::start_session(start_index) - } - fn end_session(end_index: SessionIndex) { - >::end_session(end_index) - } + fn new_session(new_index: SessionIndex) -> Option> { + StoredRange::mutate(|range| { + range.get_or_insert_with(|| (new_index, new_index)).1 = new_index + 1; + }); + + let new_validators_and_id = >::new_session(new_index); + let new_validators = new_validators_and_id + .as_ref() + .map(|new_validators| new_validators.iter().map(|(v, _id)| v.clone()).collect()); + + if let Some(new_validators) = new_validators_and_id { + let count = new_validators.len() as u32; + match ProvingTrie::::generate_for(new_validators) { + Ok(trie) => >::insert(new_index, &(trie.root, count)), + Err(reason) => { + print("Failed to generate historical ancestry-inclusion proof."); + print(reason); + } + }; + } else { + let previous_index = new_index.saturating_sub(1); + if let Some(previous_session) = >::get(previous_index) { + >::insert(new_index, previous_session); + } + } + + new_validators + } + fn start_session(start_index: SessionIndex) { + >::start_session(start_index) + } + fn end_session(end_index: SessionIndex) { + >::end_session(end_index) + } } /// A tuple of the validator's ID and their full identification. -pub type IdentificationTuple = (::ValidatorId, ::FullIdentification); +pub type IdentificationTuple = ( + ::ValidatorId, + ::FullIdentification, +); /// a trie instance for checking and generating proofs. pub struct ProvingTrie { - db: MemoryDB, - root: T::Hash, + db: MemoryDB, + root: T::Hash, } impl ProvingTrie { - fn generate_for(validators: I) -> Result - where I: IntoIterator - { - let mut db = MemoryDB::default(); - let mut root = Default::default(); - - { - let mut trie = TrieDBMut::new(&mut db, &mut root); - for (i, (validator, full_id)) in validators.into_iter().enumerate() { - let i = i as u32; - let keys = match >::load_keys(&validator) { - None => continue, - Some(k) => k, - }; - - let full_id = (validator, full_id); - - // map each key to the owner index. - for key_id in T::Keys::key_ids() { - let key = keys.get_raw(*key_id); - let res = (key_id, key).using_encoded(|k| - i.using_encoded(|v| trie.insert(k, v)) - ); - - let _ = res.map_err(|_| "failed to insert into trie")?; - } - - // map each owner index to the full identification. - let _ = i.using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) - .map_err(|_| "failed to insert into trie")?; - } - } - - Ok(ProvingTrie { - db, - root, - }) - } - - fn from_nodes(root: T::Hash, nodes: &[Vec]) -> Self { - use sp_trie::HashDBT; - - let mut memory_db = MemoryDB::default(); - for node in nodes { - HashDBT::insert(&mut memory_db, EMPTY_PREFIX, &node[..]); - } - - ProvingTrie { - db: memory_db, - root, - } - } - - /// Prove the full verification data for a given key and key ID. - pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option>> { - let trie = TrieDB::new(&self.db, &self.root).ok()?; - let mut recorder = Recorder::new(); - let val_idx = (key_id, key_data).using_encoded(|s| { - trie.get_with(s, &mut recorder) - .ok()? - .and_then(|raw| u32::decode(&mut &*raw).ok()) - })?; - - val_idx.using_encoded(|s| { - trie.get_with(s, &mut recorder) - .ok()? - .and_then(|raw| >::decode(&mut &*raw).ok()) - })?; - - Some(recorder.drain().into_iter().map(|r| r.data).collect()) - } - - /// Access the underlying trie root. - pub fn root(&self) -> &T::Hash { - &self.root - } - - // Check a proof contained within the current memory-db. Returns `None` if the - // nodes within the current `MemoryDB` are insufficient to query the item. - fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { - let trie = TrieDB::new(&self.db, &self.root).ok()?; - let val_idx = (key_id, key_data).using_encoded(|s| trie.get(s)) - .ok()? - .and_then(|raw| u32::decode(&mut &*raw).ok())?; - - val_idx.using_encoded(|s| trie.get(s)) - .ok()? - .and_then(|raw| >::decode(&mut &*raw).ok()) - } - + fn generate_for(validators: I) -> Result + where + I: IntoIterator, + { + let mut db = MemoryDB::default(); + let mut root = Default::default(); + + { + let mut trie = TrieDBMut::new(&mut db, &mut root); + for (i, (validator, full_id)) in validators.into_iter().enumerate() { + let i = i as u32; + let keys = match >::load_keys(&validator) { + None => continue, + Some(k) => k, + }; + + let full_id = (validator, full_id); + + // map each key to the owner index. + for key_id in T::Keys::key_ids() { + let key = keys.get_raw(*key_id); + let res = + (key_id, key).using_encoded(|k| i.using_encoded(|v| trie.insert(k, v))); + + let _ = res.map_err(|_| "failed to insert into trie")?; + } + + // map each owner index to the full identification. + let _ = i + .using_encoded(|k| full_id.using_encoded(|v| trie.insert(k, v))) + .map_err(|_| "failed to insert into trie")?; + } + } + + Ok(ProvingTrie { db, root }) + } + + fn from_nodes(root: T::Hash, nodes: &[Vec]) -> Self { + use sp_trie::HashDBT; + + let mut memory_db = MemoryDB::default(); + for node in nodes { + HashDBT::insert(&mut memory_db, EMPTY_PREFIX, &node[..]); + } + + ProvingTrie { + db: memory_db, + root, + } + } + + /// Prove the full verification data for a given key and key ID. + pub fn prove(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option>> { + let trie = TrieDB::new(&self.db, &self.root).ok()?; + let mut recorder = Recorder::new(); + let val_idx = (key_id, key_data).using_encoded(|s| { + trie.get_with(s, &mut recorder) + .ok()? + .and_then(|raw| u32::decode(&mut &*raw).ok()) + })?; + + val_idx.using_encoded(|s| { + trie.get_with(s, &mut recorder) + .ok()? + .and_then(|raw| >::decode(&mut &*raw).ok()) + })?; + + Some(recorder.drain().into_iter().map(|r| r.data).collect()) + } + + /// Access the underlying trie root. + pub fn root(&self) -> &T::Hash { + &self.root + } + + // Check a proof contained within the current memory-db. Returns `None` if the + // nodes within the current `MemoryDB` are insufficient to query the item. + fn query(&self, key_id: KeyTypeId, key_data: &[u8]) -> Option> { + let trie = TrieDB::new(&self.db, &self.root).ok()?; + let val_idx = (key_id, key_data) + .using_encoded(|s| trie.get(s)) + .ok()? + .and_then(|raw| u32::decode(&mut &*raw).ok())?; + + val_idx + .using_encoded(|s| trie.get(s)) + .ok()? + .and_then(|raw| >::decode(&mut &*raw).ok()) + } } /// Proof of ownership of a specific key. #[derive(Encode, Decode, Clone, Eq, PartialEq, RuntimeDebug)] pub struct Proof { - session: SessionIndex, - trie_nodes: Vec>, + session: SessionIndex, + trie_nodes: Vec>, } impl Proof { - /// Returns a session this proof was generated for. - pub fn session(&self) -> SessionIndex { - self.session - } + /// Returns a session this proof was generated for. + pub fn session(&self) -> SessionIndex { + self.session + } } impl> frame_support::traits::KeyOwnerProofSystem<(KeyTypeId, D)> - for Module + for Module { - type Proof = Proof; - type IdentificationTuple = IdentificationTuple; - - fn prove(key: (KeyTypeId, D)) -> Option { - let session = >::current_index(); - let validators = >::validators().into_iter() - .filter_map(|validator| { - T::FullIdentificationOf::convert(validator.clone()) - .map(|full_id| (validator, full_id)) - }); - let trie = ProvingTrie::::generate_for(validators).ok()?; - - let (id, data) = key; - - trie.prove(id, data.as_ref()).map(|trie_nodes| Proof { - session, - trie_nodes, - }) - } - - fn check_proof(key: (KeyTypeId, D), proof: Proof) -> Option> { - let (id, data) = key; - - if proof.session == >::current_index() { - >::key_owner(id, data.as_ref()).and_then(|owner| - T::FullIdentificationOf::convert(owner.clone()).map(move |id| (owner, id)) - ) - } else { - let (root, _) = >::get(&proof.session)?; - let trie = ProvingTrie::::from_nodes(root, &proof.trie_nodes); - - trie.query(id, data.as_ref()) - } - } + type Proof = Proof; + type IdentificationTuple = IdentificationTuple; + + fn prove(key: (KeyTypeId, D)) -> Option { + let session = >::current_index(); + let validators = >::validators() + .into_iter() + .filter_map(|validator| { + T::FullIdentificationOf::convert(validator.clone()) + .map(|full_id| (validator, full_id)) + }); + let trie = ProvingTrie::::generate_for(validators).ok()?; + + let (id, data) = key; + + trie.prove(id, data.as_ref()).map(|trie_nodes| Proof { + session, + trie_nodes, + }) + } + + fn check_proof(key: (KeyTypeId, D), proof: Proof) -> Option> { + let (id, data) = key; + + if proof.session == >::current_index() { + >::key_owner(id, data.as_ref()).and_then(|owner| { + T::FullIdentificationOf::convert(owner.clone()).map(move |id| (owner, id)) + }) + } else { + let (root, _) = >::get(&proof.session)?; + let trie = ProvingTrie::::from_nodes(root, &proof.trie_nodes); + + trie.query(id, data.as_ref()) + } + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::crypto::key_types::DUMMY; - use sp_runtime::testing::UintAuthorityId; - use crate::mock::{ - NEXT_VALIDATORS, force_new_session, - set_next_validators, Test, System, Session, - }; - use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; - - type Historical = Module; - - fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - crate::GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ), - }.assimilate_storage(&mut t).unwrap(); - sp_io::TestExternalities::new(t) - } - - #[test] - fn generated_proof_is_good() { - new_test_ext().execute_with(|| { - set_next_validators(vec![1, 2]); - force_new_session(); - - System::set_block_number(1); - Session::on_initialize(1); - - let encoded_key_1 = UintAuthorityId(1).encode(); - let proof = Historical::prove((DUMMY, &encoded_key_1[..])).unwrap(); - - // proof-checking in the same session is OK. - assert!(Historical::check_proof((DUMMY, &encoded_key_1[..]), proof.clone()).is_some()); - - set_next_validators(vec![1, 2, 4]); - force_new_session(); - - System::set_block_number(2); - Session::on_initialize(2); - - assert!(Historical::historical_root(proof.session).is_some()); - assert!(Session::current_index() > proof.session); - - // proof-checking in the next session is also OK. - assert!(Historical::check_proof((DUMMY, &encoded_key_1[..]), proof.clone()).is_some()); - - set_next_validators(vec![1, 2, 5]); - - force_new_session(); - System::set_block_number(3); - Session::on_initialize(3); - }); - } - - #[test] - fn prune_up_to_works() { - new_test_ext().execute_with(|| { - for i in 1..99u64 { - set_next_validators(vec![i]); - force_new_session(); - - System::set_block_number(i); - Session::on_initialize(i); - - } - - assert_eq!(StoredRange::get(), Some((0, 100))); - - for i in 0..100 { - assert!(Historical::historical_root(i).is_some()) - } - - Historical::prune_up_to(10); - assert_eq!(StoredRange::get(), Some((10, 100))); - - Historical::prune_up_to(9); - assert_eq!(StoredRange::get(), Some((10, 100))); - - for i in 10..100 { - assert!(Historical::historical_root(i).is_some()) - } - - Historical::prune_up_to(99); - assert_eq!(StoredRange::get(), Some((99, 100))); - - Historical::prune_up_to(100); - assert_eq!(StoredRange::get(), None); - - for i in 99..199u64 { - set_next_validators(vec![i]); - force_new_session(); - - System::set_block_number(i); - Session::on_initialize(i); - - } - - assert_eq!(StoredRange::get(), Some((100, 200))); - - for i in 100..200 { - assert!(Historical::historical_root(i).is_some()) - } - - Historical::prune_up_to(9999); - assert_eq!(StoredRange::get(), None); - - for i in 100..200 { - assert!(Historical::historical_root(i).is_none()) - } - }); - } + use super::*; + use crate::mock::{ + force_new_session, set_next_validators, Session, System, Test, NEXT_VALIDATORS, + }; + use frame_support::traits::{KeyOwnerProofSystem, OnInitialize}; + use sp_core::crypto::key_types::DUMMY; + use sp_runtime::testing::UintAuthorityId; + + type Historical = Module; + + fn new_test_ext() -> sp_io::TestExternalities { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + crate::GenesisConfig:: { + keys: NEXT_VALIDATORS.with(|l| { + l.borrow() + .iter() + .cloned() + .map(|i| (i, i, UintAuthorityId(i).into())) + .collect() + }), + } + .assimilate_storage(&mut t) + .unwrap(); + sp_io::TestExternalities::new(t) + } + + #[test] + fn generated_proof_is_good() { + new_test_ext().execute_with(|| { + set_next_validators(vec![1, 2]); + force_new_session(); + + System::set_block_number(1); + Session::on_initialize(1); + + let encoded_key_1 = UintAuthorityId(1).encode(); + let proof = Historical::prove((DUMMY, &encoded_key_1[..])).unwrap(); + + // proof-checking in the same session is OK. + assert!(Historical::check_proof((DUMMY, &encoded_key_1[..]), proof.clone()).is_some()); + + set_next_validators(vec![1, 2, 4]); + force_new_session(); + + System::set_block_number(2); + Session::on_initialize(2); + + assert!(Historical::historical_root(proof.session).is_some()); + assert!(Session::current_index() > proof.session); + + // proof-checking in the next session is also OK. + assert!(Historical::check_proof((DUMMY, &encoded_key_1[..]), proof.clone()).is_some()); + + set_next_validators(vec![1, 2, 5]); + + force_new_session(); + System::set_block_number(3); + Session::on_initialize(3); + }); + } + + #[test] + fn prune_up_to_works() { + new_test_ext().execute_with(|| { + for i in 1..99u64 { + set_next_validators(vec![i]); + force_new_session(); + + System::set_block_number(i); + Session::on_initialize(i); + } + + assert_eq!(StoredRange::get(), Some((0, 100))); + + for i in 0..100 { + assert!(Historical::historical_root(i).is_some()) + } + + Historical::prune_up_to(10); + assert_eq!(StoredRange::get(), Some((10, 100))); + + Historical::prune_up_to(9); + assert_eq!(StoredRange::get(), Some((10, 100))); + + for i in 10..100 { + assert!(Historical::historical_root(i).is_some()) + } + + Historical::prune_up_to(99); + assert_eq!(StoredRange::get(), Some((99, 100))); + + Historical::prune_up_to(100); + assert_eq!(StoredRange::get(), None); + + for i in 99..199u64 { + set_next_validators(vec![i]); + force_new_session(); + + System::set_block_number(i); + Session::on_initialize(i); + } + + assert_eq!(StoredRange::get(), Some((100, 200))); + + for i in 100..200 { + assert!(Historical::historical_root(i).is_some()) + } + + Historical::prune_up_to(9999); + assert_eq!(StoredRange::get(), None); + + for i in 100..200 { + assert!(Historical::historical_root(i).is_none()) + } + }); + } } diff --git a/frame/session/src/lib.rs b/frame/session/src/lib.rs index f539004189..9cb4b05e91 100644 --- a/frame/session/src/lib.rs +++ b/frame/session/src/lib.rs @@ -99,20 +99,26 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, marker::PhantomData, ops::{Sub, Rem}}; use codec::Decode; -use sp_runtime::{KeyTypeId, Perbill, RuntimeAppPublic, BoundToRuntimeAppPublic}; -use sp_runtime::traits::{Convert, Zero, Member, OpaqueKeys, Saturating}; -use sp_staking::SessionIndex; use frame_support::{ - ensure, decl_module, decl_event, decl_storage, decl_error, ConsensusEngineId, Parameter, - traits::{ - Get, FindAuthor, ValidatorRegistration, EstimateNextSessionRotation, EstimateNextNewSession, - }, - dispatch::{self, DispatchResult, DispatchError}, - weights::{Weight, MINIMUM_WEIGHT, SimpleDispatchInfo}, + decl_error, decl_event, decl_module, decl_storage, + dispatch::{self, DispatchError, DispatchResult}, + ensure, + traits::{ + EstimateNextNewSession, EstimateNextSessionRotation, FindAuthor, Get, ValidatorRegistration, + }, + weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}, + ConsensusEngineId, Parameter, }; use frame_system::{self as system, ensure_signed}; +use sp_runtime::traits::{Convert, Member, OpaqueKeys, Saturating, Zero}; +use sp_runtime::{BoundToRuntimeAppPublic, KeyTypeId, Perbill, RuntimeAppPublic}; +use sp_staking::SessionIndex; +use sp_std::{ + marker::PhantomData, + ops::{Rem, Sub}, + prelude::*, +}; #[cfg(test)] mod mock; @@ -124,8 +130,8 @@ pub mod historical; /// Decides whether the session should be ended. pub trait ShouldEndSession { - /// Return `true` if the session should be ended. - fn should_end_session(now: BlockNumber) -> bool; + /// Return `true` if the session should be ended. + fn should_end_session(now: BlockNumber) -> bool; } /// Ends the session after a fixed period of blocks. @@ -133,619 +139,629 @@ pub trait ShouldEndSession { /// The first session will have length of `Offset`, and /// the following sessions will have length of `Period`. /// This may prove nonsensical if `Offset` >= `Period`. -pub struct PeriodicSessions< - Period, - Offset, ->(PhantomData<(Period, Offset)>); +pub struct PeriodicSessions(PhantomData<(Period, Offset)>); impl< - BlockNumber: Rem + Sub + Zero + PartialOrd, - Period: Get, - Offset: Get, -> ShouldEndSession for PeriodicSessions { - fn should_end_session(now: BlockNumber) -> bool { - let offset = Offset::get(); - now >= offset && ((now - offset) % Period::get()).is_zero() - } + BlockNumber: Rem + Sub + Zero + PartialOrd, + Period: Get, + Offset: Get, + > ShouldEndSession for PeriodicSessions +{ + fn should_end_session(now: BlockNumber) -> bool { + let offset = Offset::get(); + now >= offset && ((now - offset) % Period::get()).is_zero() + } } impl< - BlockNumber: Rem + Sub + Zero + PartialOrd + Saturating + Clone, - Period: Get, - Offset: Get, -> EstimateNextSessionRotation for PeriodicSessions { - fn estimate_next_session_rotation(now: BlockNumber) -> Option { - let offset = Offset::get(); - let period = Period::get(); - Some(if now > offset { - let block_after_last_session = (now.clone() - offset) % period.clone(); - if block_after_last_session > Zero::zero() { - now.saturating_add( - period.saturating_sub(block_after_last_session) - ) - } else { - Zero::zero() - } - } else { - offset - }) - } + BlockNumber: Rem + + Sub + + Zero + + PartialOrd + + Saturating + + Clone, + Period: Get, + Offset: Get, + > EstimateNextSessionRotation for PeriodicSessions +{ + fn estimate_next_session_rotation(now: BlockNumber) -> Option { + let offset = Offset::get(); + let period = Period::get(); + Some(if now > offset { + let block_after_last_session = (now.clone() - offset) % period.clone(); + if block_after_last_session > Zero::zero() { + now.saturating_add(period.saturating_sub(block_after_last_session)) + } else { + Zero::zero() + } + } else { + offset + }) + } } /// A trait for managing creation of new validator set. pub trait SessionManager { - /// Plan a new session, and optionally provide the new validator set. - /// - /// Even if the validator-set is the same as before, if any underlying economic - /// conditions have changed (i.e. stake-weights), the new validator set must be returned. - /// This is necessary for consensus engines making use of the session module to - /// issue a validator-set change so misbehavior can be provably associated with the new - /// economic conditions as opposed to the old. - /// The returned validator set, if any, will not be applied until `new_index`. - /// `new_index` is strictly greater than from previous call. - /// - /// The first session start at index 0. - /// - /// `new_session(session)` is guaranteed to be called before `end_session(session-1)`. - fn new_session(new_index: SessionIndex) -> Option>; - /// End the session. - /// - /// Because the session pallet can queue validator set the ending session can be lower than the - /// last new session index. - fn end_session(end_index: SessionIndex); - /// Start the session. - /// - /// The session start to be used for validation - fn start_session(start_index: SessionIndex); + /// Plan a new session, and optionally provide the new validator set. + /// + /// Even if the validator-set is the same as before, if any underlying economic + /// conditions have changed (i.e. stake-weights), the new validator set must be returned. + /// This is necessary for consensus engines making use of the session module to + /// issue a validator-set change so misbehavior can be provably associated with the new + /// economic conditions as opposed to the old. + /// The returned validator set, if any, will not be applied until `new_index`. + /// `new_index` is strictly greater than from previous call. + /// + /// The first session start at index 0. + /// + /// `new_session(session)` is guaranteed to be called before `end_session(session-1)`. + fn new_session(new_index: SessionIndex) -> Option>; + /// End the session. + /// + /// Because the session pallet can queue validator set the ending session can be lower than the + /// last new session index. + fn end_session(end_index: SessionIndex); + /// Start the session. + /// + /// The session start to be used for validation + fn start_session(start_index: SessionIndex); } impl SessionManager for () { - fn new_session(_: SessionIndex) -> Option> { None } - fn start_session(_: SessionIndex) {} - fn end_session(_: SessionIndex) {} + fn new_session(_: SessionIndex) -> Option> { + None + } + fn start_session(_: SessionIndex) {} + fn end_session(_: SessionIndex) {} } /// Handler for session life cycle events. pub trait SessionHandler { - /// All the key type ids this session handler can process. - /// - /// The order must be the same as it expects them in - /// [`on_new_session`](Self::on_new_session) and [`on_genesis_session`](Self::on_genesis_session). - const KEY_TYPE_IDS: &'static [KeyTypeId]; - - /// The given validator set will be used for the genesis session. - /// It is guaranteed that the given validator set will also be used - /// for the second session, therefore the first call to `on_new_session` - /// should provide the same validator set. - fn on_genesis_session(validators: &[(ValidatorId, Ks)]); - - /// Session set has changed; act appropriately. Note that this can be called - /// before initialization of your module. - /// - /// `changed` is true whenever any of the session keys or underlying economic - /// identities or weightings behind those keys has changed. - fn on_new_session( - changed: bool, - validators: &[(ValidatorId, Ks)], - queued_validators: &[(ValidatorId, Ks)], - ); - - /// A notification for end of the session. - /// - /// Note it is triggered before any `SessionManager::end_session` handlers, - /// so we can still affect the validator set. - fn on_before_session_ending() {} - - /// A validator got disabled. Act accordingly until a new session begins. - fn on_disabled(validator_index: usize); + /// All the key type ids this session handler can process. + /// + /// The order must be the same as it expects them in + /// [`on_new_session`](Self::on_new_session) and [`on_genesis_session`](Self::on_genesis_session). + const KEY_TYPE_IDS: &'static [KeyTypeId]; + + /// The given validator set will be used for the genesis session. + /// It is guaranteed that the given validator set will also be used + /// for the second session, therefore the first call to `on_new_session` + /// should provide the same validator set. + fn on_genesis_session(validators: &[(ValidatorId, Ks)]); + + /// Session set has changed; act appropriately. Note that this can be called + /// before initialization of your module. + /// + /// `changed` is true whenever any of the session keys or underlying economic + /// identities or weightings behind those keys has changed. + fn on_new_session( + changed: bool, + validators: &[(ValidatorId, Ks)], + queued_validators: &[(ValidatorId, Ks)], + ); + + /// A notification for end of the session. + /// + /// Note it is triggered before any `SessionManager::end_session` handlers, + /// so we can still affect the validator set. + fn on_before_session_ending() {} + + /// A validator got disabled. Act accordingly until a new session begins. + fn on_disabled(validator_index: usize); } /// A session handler for specific key type. pub trait OneSessionHandler: BoundToRuntimeAppPublic { - /// The key type expected. - type Key: Decode + Default + RuntimeAppPublic; - - fn on_genesis_session<'a, I: 'a>(validators: I) - where I: Iterator, ValidatorId: 'a; - - /// Session set has changed; act appropriately. Note that this can be called - /// before initialization of your module. - /// - /// `changed` is true when at least one of the session keys - /// or the underlying economic identities/distribution behind one the - /// session keys has changed, false otherwise. - /// - /// The `validators` are the validators of the incoming session, and `queued_validators` - /// will follow. - fn on_new_session<'a, I: 'a>( - changed: bool, - validators: I, - queued_validators: I, - ) where I: Iterator, ValidatorId: 'a; - - - /// A notification for end of the session. - /// - /// Note it is triggered before any `SessionManager::end_session` handlers, - /// so we can still affect the validator set. - fn on_before_session_ending() {} - - /// A validator got disabled. Act accordingly until a new session begins. - fn on_disabled(_validator_index: usize); + /// The key type expected. + type Key: Decode + Default + RuntimeAppPublic; + + fn on_genesis_session<'a, I: 'a>(validators: I) + where + I: Iterator, + ValidatorId: 'a; + + /// Session set has changed; act appropriately. Note that this can be called + /// before initialization of your module. + /// + /// `changed` is true when at least one of the session keys + /// or the underlying economic identities/distribution behind one the + /// session keys has changed, false otherwise. + /// + /// The `validators` are the validators of the incoming session, and `queued_validators` + /// will follow. + fn on_new_session<'a, I: 'a>(changed: bool, validators: I, queued_validators: I) + where + I: Iterator, + ValidatorId: 'a; + + /// A notification for end of the session. + /// + /// Note it is triggered before any `SessionManager::end_session` handlers, + /// so we can still affect the validator set. + fn on_before_session_ending() {} + + /// A validator got disabled. Act accordingly until a new session begins. + fn on_disabled(_validator_index: usize); } #[impl_trait_for_tuples::impl_for_tuples(1, 30)] #[tuple_types_no_default_trait_bound] impl SessionHandler for Tuple { - for_tuples!( where #( Tuple: OneSessionHandler )* ); + for_tuples!( where #( Tuple: OneSessionHandler )* ); - for_tuples!( + for_tuples!( const KEY_TYPE_IDS: &'static [KeyTypeId] = &[ #( ::ID ),* ]; ); - fn on_genesis_session(validators: &[(AId, Ks)]) { - for_tuples!( - #( - let our_keys: Box> = Box::new(validators.iter() - .map(|k| (&k.0, k.1.get::(::ID) - .unwrap_or_default()))); - - Tuple::on_genesis_session(our_keys); - )* - ) - } - - fn on_new_session( - changed: bool, - validators: &[(AId, Ks)], - queued_validators: &[(AId, Ks)], - ) { - for_tuples!( - #( - let our_keys: Box> = Box::new(validators.iter() - .map(|k| (&k.0, k.1.get::(::ID) - .unwrap_or_default()))); - let queued_keys: Box> = Box::new(queued_validators.iter() - .map(|k| (&k.0, k.1.get::(::ID) - .unwrap_or_default()))); - Tuple::on_new_session(changed, our_keys, queued_keys); - )* - ) - } - - fn on_before_session_ending() { - for_tuples!( #( Tuple::on_before_session_ending(); )* ) - } - - fn on_disabled(i: usize) { - for_tuples!( #( Tuple::on_disabled(i); )* ) - } + fn on_genesis_session(validators: &[(AId, Ks)]) { + for_tuples!( + #( + let our_keys: Box> = Box::new(validators.iter() + .map(|k| (&k.0, k.1.get::(::ID) + .unwrap_or_default()))); + + Tuple::on_genesis_session(our_keys); + )* + ) + } + + fn on_new_session( + changed: bool, + validators: &[(AId, Ks)], + queued_validators: &[(AId, Ks)], + ) { + for_tuples!( + #( + let our_keys: Box> = Box::new(validators.iter() + .map(|k| (&k.0, k.1.get::(::ID) + .unwrap_or_default()))); + let queued_keys: Box> = Box::new(queued_validators.iter() + .map(|k| (&k.0, k.1.get::(::ID) + .unwrap_or_default()))); + Tuple::on_new_session(changed, our_keys, queued_keys); + )* + ) + } + + fn on_before_session_ending() { + for_tuples!( #( Tuple::on_before_session_ending(); )* ) + } + + fn on_disabled(i: usize) { + for_tuples!( #( Tuple::on_disabled(i); )* ) + } } /// `SessionHandler` for tests that use `UintAuthorityId` as `Keys`. pub struct TestSessionHandler; impl SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [KeyTypeId] = &[sp_runtime::key_types::DUMMY]; + const KEY_TYPE_IDS: &'static [KeyTypeId] = &[sp_runtime::key_types::DUMMY]; - fn on_genesis_session(_: &[(AId, Ks)]) {} + fn on_genesis_session(_: &[(AId, Ks)]) {} - fn on_new_session(_: bool, _: &[(AId, Ks)], _: &[(AId, Ks)]) {} + fn on_new_session(_: bool, _: &[(AId, Ks)], _: &[(AId, Ks)]) {} - fn on_before_session_ending() {} + fn on_before_session_ending() {} - fn on_disabled(_: usize) {} + fn on_disabled(_: usize) {} } impl ValidatorRegistration for Module { - fn is_registered(id: &T::ValidatorId) -> bool { - Self::load_keys(id).is_some() - } + fn is_registered(id: &T::ValidatorId) -> bool { + Self::load_keys(id).is_some() + } } pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From + Into<::Event>; + /// The overarching event type. + type Event: From + Into<::Event>; - /// A stable ID for a validator. - type ValidatorId: Member + Parameter; + /// A stable ID for a validator. + type ValidatorId: Member + Parameter; - /// A conversion from account ID to validator ID. - type ValidatorIdOf: Convert>; + /// A conversion from account ID to validator ID. + type ValidatorIdOf: Convert>; - /// Indicator for when to end the session. - type ShouldEndSession: ShouldEndSession; + /// Indicator for when to end the session. + type ShouldEndSession: ShouldEndSession; - /// Something that can predict the next session rotation. This should typically come from the - /// same logical unit that provides [`ShouldEndSession`], yet, it gives a best effort estimate. - /// It is helpful to implement [`EstimateNextNewSession`]. - type NextSessionRotation: EstimateNextSessionRotation; + /// Something that can predict the next session rotation. This should typically come from the + /// same logical unit that provides [`ShouldEndSession`], yet, it gives a best effort estimate. + /// It is helpful to implement [`EstimateNextNewSession`]. + type NextSessionRotation: EstimateNextSessionRotation; - /// Handler for managing new session. - type SessionManager: SessionManager; + /// Handler for managing new session. + type SessionManager: SessionManager; - /// Handler when a session has changed. - type SessionHandler: SessionHandler; + /// Handler when a session has changed. + type SessionHandler: SessionHandler; - /// The keys. - type Keys: OpaqueKeys + Member + Parameter + Default; + /// The keys. + type Keys: OpaqueKeys + Member + Parameter + Default; - /// The fraction of validators set that is safe to be disabled. - /// - /// After the threshold is reached `disabled` method starts to return true, - /// which in combination with `pallet_staking` forces a new era. - type DisabledValidatorsThreshold: Get; + /// The fraction of validators set that is safe to be disabled. + /// + /// After the threshold is reached `disabled` method starts to return true, + /// which in combination with `pallet_staking` forces a new era. + type DisabledValidatorsThreshold: Get; } decl_storage! { - trait Store for Module as Session { - /// The current set of validators. - Validators get(fn validators): Vec; - - /// Current index of the session. - CurrentIndex get(fn current_index): SessionIndex; - - /// True if the underlying economic identities or weighting behind the validators - /// has changed in the queued validator set. - QueuedChanged: bool; - - /// The queued keys for the next session. When the next session begins, these keys - /// will be used to determine the validator's session keys. - QueuedKeys get(fn queued_keys): Vec<(T::ValidatorId, T::Keys)>; - - /// Indices of disabled validators. - /// - /// The set is cleared when `on_session_ending` returns a new set of identities. - DisabledValidators get(fn disabled_validators): Vec; - - /// The next session keys for a validator. - NextKeys: map hasher(twox_64_concat) T::ValidatorId => Option; - - /// The owner of a key. The key is the `KeyTypeId` + the encoded key. - KeyOwner: map hasher(twox_64_concat) (KeyTypeId, Vec) => Option; - } - add_extra_genesis { - config(keys): Vec<(T::AccountId, T::ValidatorId, T::Keys)>; - build(|config: &GenesisConfig| { - if T::SessionHandler::KEY_TYPE_IDS.len() != T::Keys::key_ids().len() { - panic!("Number of keys in session handler and session keys does not match"); - } - - T::SessionHandler::KEY_TYPE_IDS.iter().zip(T::Keys::key_ids()).enumerate() - .for_each(|(i, (sk, kk))| { - if sk != kk { - panic!( - "Session handler and session key expect different key type at index: {}", - i, - ); - } - }); - - for (account, val, keys) in config.keys.iter().cloned() { - >::inner_set_keys(&val, keys) - .expect("genesis config must not contain duplicates; qed"); - system::Module::::inc_ref(&account); - } - - let initial_validators_0 = T::SessionManager::new_session(0) - .unwrap_or_else(|| { - frame_support::print("No initial validator provided by `SessionManager`, use \ - session config keys to generate initial validator set."); - config.keys.iter().map(|x| x.1.clone()).collect() - }); - assert!(!initial_validators_0.is_empty(), "Empty validator set for session 0 in genesis block!"); - - let initial_validators_1 = T::SessionManager::new_session(1) - .unwrap_or_else(|| initial_validators_0.clone()); - assert!(!initial_validators_1.is_empty(), "Empty validator set for session 1 in genesis block!"); - - let queued_keys: Vec<_> = initial_validators_1 - .iter() - .cloned() - .map(|v| ( - v.clone(), - >::load_keys(&v).unwrap_or_default(), - )) - .collect(); - - // Tell everyone about the genesis session keys - T::SessionHandler::on_genesis_session::(&queued_keys); - - >::put(initial_validators_0); - >::put(queued_keys); - - T::SessionManager::start_session(0); - }); - } + trait Store for Module as Session { + /// The current set of validators. + Validators get(fn validators): Vec; + + /// Current index of the session. + CurrentIndex get(fn current_index): SessionIndex; + + /// True if the underlying economic identities or weighting behind the validators + /// has changed in the queued validator set. + QueuedChanged: bool; + + /// The queued keys for the next session. When the next session begins, these keys + /// will be used to determine the validator's session keys. + QueuedKeys get(fn queued_keys): Vec<(T::ValidatorId, T::Keys)>; + + /// Indices of disabled validators. + /// + /// The set is cleared when `on_session_ending` returns a new set of identities. + DisabledValidators get(fn disabled_validators): Vec; + + /// The next session keys for a validator. + NextKeys: map hasher(twox_64_concat) T::ValidatorId => Option; + + /// The owner of a key. The key is the `KeyTypeId` + the encoded key. + KeyOwner: map hasher(twox_64_concat) (KeyTypeId, Vec) => Option; + } + add_extra_genesis { + config(keys): Vec<(T::AccountId, T::ValidatorId, T::Keys)>; + build(|config: &GenesisConfig| { + if T::SessionHandler::KEY_TYPE_IDS.len() != T::Keys::key_ids().len() { + panic!("Number of keys in session handler and session keys does not match"); + } + + T::SessionHandler::KEY_TYPE_IDS.iter().zip(T::Keys::key_ids()).enumerate() + .for_each(|(i, (sk, kk))| { + if sk != kk { + panic!( + "Session handler and session key expect different key type at index: {}", + i, + ); + } + }); + + for (account, val, keys) in config.keys.iter().cloned() { + >::inner_set_keys(&val, keys) + .expect("genesis config must not contain duplicates; qed"); + system::Module::::inc_ref(&account); + } + + let initial_validators_0 = T::SessionManager::new_session(0) + .unwrap_or_else(|| { + frame_support::print("No initial validator provided by `SessionManager`, use \ + session config keys to generate initial validator set."); + config.keys.iter().map(|x| x.1.clone()).collect() + }); + assert!(!initial_validators_0.is_empty(), "Empty validator set for session 0 in genesis block!"); + + let initial_validators_1 = T::SessionManager::new_session(1) + .unwrap_or_else(|| initial_validators_0.clone()); + assert!(!initial_validators_1.is_empty(), "Empty validator set for session 1 in genesis block!"); + + let queued_keys: Vec<_> = initial_validators_1 + .iter() + .cloned() + .map(|v| ( + v.clone(), + >::load_keys(&v).unwrap_or_default(), + )) + .collect(); + + // Tell everyone about the genesis session keys + T::SessionHandler::on_genesis_session::(&queued_keys); + + >::put(initial_validators_0); + >::put(queued_keys); + + T::SessionManager::start_session(0); + }); + } } decl_event!( - pub enum Event { - /// New session has happened. Note that the argument is the session index, not the block - /// number as the type might suggest. - NewSession(SessionIndex), - } + pub enum Event { + /// New session has happened. Note that the argument is the session index, not the block + /// number as the type might suggest. + NewSession(SessionIndex), + } ); decl_error! { - /// Error for the session module. - pub enum Error for Module { - /// Invalid ownership proof. - InvalidProof, - /// No associated validator ID for account. - NoAssociatedValidatorId, - /// Registered duplicate key. - DuplicatedKey, - /// No keys are associated with this account. - NoKeys, - } + /// Error for the session module. + pub enum Error for Module { + /// Invalid ownership proof. + InvalidProof, + /// No associated validator ID for account. + NoAssociatedValidatorId, + /// Registered duplicate key. + DuplicatedKey, + /// No keys are associated with this account. + NoKeys, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Sets the session key(s) of the function caller to `keys`. - /// Allows an account to set its session key prior to becoming a validator. - /// This doesn't take effect until the next session. - /// - /// The dispatch origin of this function must be signed. - /// - /// # - /// - O(log n) in number of accounts. - /// - One extra DB entry. - /// - Increases system account refs by one on success iff there were previously no keys set. - /// In this case, purge_keys will need to be called before the account can be removed. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(150_000_000)] - pub fn set_keys(origin, keys: T::Keys, proof: Vec) -> dispatch::DispatchResult { - let who = ensure_signed(origin)?; - - ensure!(keys.ownership_proof_is_valid(&proof), Error::::InvalidProof); - - Self::do_set_keys(&who, keys)?; - - Ok(()) - } - - /// Removes any session key(s) of the function caller. - /// This doesn't take effect until the next session. - /// - /// The dispatch origin of this function must be signed. - /// - /// # - /// - O(N) in number of key types. - /// - Removes N + 1 DB entries. - /// - Reduces system account refs by one on success. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(150_000_000)] - pub fn purge_keys(origin) { - let who = ensure_signed(origin)?; - Self::do_purge_keys(&who)?; - } - - /// Called when a block is initialized. Will rotate session if it is the last - /// block of the current session. - fn on_initialize(n: T::BlockNumber) -> Weight { - if T::ShouldEndSession::should_end_session(n) { - Self::rotate_session(); - } - - MINIMUM_WEIGHT - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Sets the session key(s) of the function caller to `keys`. + /// Allows an account to set its session key prior to becoming a validator. + /// This doesn't take effect until the next session. + /// + /// The dispatch origin of this function must be signed. + /// + /// # + /// - O(log n) in number of accounts. + /// - One extra DB entry. + /// - Increases system account refs by one on success iff there were previously no keys set. + /// In this case, purge_keys will need to be called before the account can be removed. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(150_000_000)] + pub fn set_keys(origin, keys: T::Keys, proof: Vec) -> dispatch::DispatchResult { + let who = ensure_signed(origin)?; + + ensure!(keys.ownership_proof_is_valid(&proof), Error::::InvalidProof); + + Self::do_set_keys(&who, keys)?; + + Ok(()) + } + + /// Removes any session key(s) of the function caller. + /// This doesn't take effect until the next session. + /// + /// The dispatch origin of this function must be signed. + /// + /// # + /// - O(N) in number of key types. + /// - Removes N + 1 DB entries. + /// - Reduces system account refs by one on success. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(150_000_000)] + pub fn purge_keys(origin) { + let who = ensure_signed(origin)?; + Self::do_purge_keys(&who)?; + } + + /// Called when a block is initialized. Will rotate session if it is the last + /// block of the current session. + fn on_initialize(n: T::BlockNumber) -> Weight { + if T::ShouldEndSession::should_end_session(n) { + Self::rotate_session(); + } + + MINIMUM_WEIGHT + } + } } impl Module { - /// Move on to next session. Register new validator set and session keys. Changes - /// to the validator set have a session of delay to take effect. This allows for - /// equivocation punishment after a fork. - pub fn rotate_session() { - let session_index = CurrentIndex::get(); - - let changed = QueuedChanged::get(); - - // Inform the session handlers that a session is going to end. - T::SessionHandler::on_before_session_ending(); - - T::SessionManager::end_session(session_index); - - // Get queued session keys and validators. - let session_keys = >::get(); - let validators = session_keys.iter() - .map(|(validator, _)| validator.clone()) - .collect::>(); - >::put(&validators); - - if changed { - // reset disabled validators - DisabledValidators::take(); - } - - // Increment session index. - let session_index = session_index + 1; - CurrentIndex::put(session_index); - - T::SessionManager::start_session(session_index); - - // Get next validator set. - let maybe_next_validators = T::SessionManager::new_session(session_index + 1); - let (next_validators, next_identities_changed) - = if let Some(validators) = maybe_next_validators - { - // NOTE: as per the documentation on `OnSessionEnding`, we consider - // the validator set as having changed even if the validators are the - // same as before, as underlying economic conditions may have changed. - (validators, true) - } else { - (>::get(), false) - }; - - // Queue next session keys. - let (queued_amalgamated, next_changed) = { - // until we are certain there has been a change, iterate the prior - // validators along with the current and check for changes - let mut changed = next_identities_changed; - - let mut now_session_keys = session_keys.iter(); - let mut check_next_changed = |keys: &T::Keys| { - if changed { return } - // since a new validator set always leads to `changed` starting - // as true, we can ensure that `now_session_keys` and `next_validators` - // have the same length. this function is called once per iteration. - if let Some(&(_, ref old_keys)) = now_session_keys.next() { - if old_keys != keys { - changed = true; - return - } - } - }; - let queued_amalgamated = next_validators.into_iter() - .map(|a| { - let k = Self::load_keys(&a).unwrap_or_default(); - check_next_changed(&k); - (a, k) - }) - .collect::>(); - - (queued_amalgamated, changed) - }; - - >::put(queued_amalgamated.clone()); - QueuedChanged::put(next_changed); - - // Record that this happened. - Self::deposit_event(Event::NewSession(session_index)); - - // Tell everyone about the new session keys. - T::SessionHandler::on_new_session::( - changed, - &session_keys, - &queued_amalgamated, - ); - } - - /// Disable the validator of index `i`. - /// - /// Returns `true` if this causes a `DisabledValidatorsThreshold` of validators - /// to be already disabled. - pub fn disable_index(i: usize) -> bool { - let (fire_event, threshold_reached) = DisabledValidators::mutate(|disabled| { - let i = i as u32; - if let Err(index) = disabled.binary_search(&i) { - let count = >::decode_len().unwrap_or(0) as u32; - let threshold = T::DisabledValidatorsThreshold::get() * count; - disabled.insert(index, i); - (true, disabled.len() as u32 > threshold) - } else { - (false, false) - } - }); - - if fire_event { - T::SessionHandler::on_disabled(i); - } - - threshold_reached - } - - /// Disable the validator identified by `c`. (If using with the staking module, - /// this would be their *stash* account.) - /// - /// Returns `Ok(true)` if more than `DisabledValidatorsThreshold` validators in current - /// session is already disabled. - /// If used with the staking module it allows to force a new era in such case. - pub fn disable(c: &T::ValidatorId) -> sp_std::result::Result { - Self::validators().iter().position(|i| i == c).map(Self::disable_index).ok_or(()) - } - - /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. - /// - /// This ensures that the reference counter in system is incremented appropriately and as such - /// must accept an account ID, rather than a validator ID. - fn do_set_keys(account: &T::AccountId, keys: T::Keys) -> dispatch::DispatchResult { - let who = T::ValidatorIdOf::convert(account.clone()) - .ok_or(Error::::NoAssociatedValidatorId)?; - - let old_keys = Self::inner_set_keys(&who, keys)?; - if old_keys.is_none() { - system::Module::::inc_ref(&account); - } - - Ok(()) - } - - /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. - /// - /// The old keys for this validator are returned, or `None` if there were none. - /// - /// This does not ensure that the reference counter in system is incremented appropriately, it - /// must be done by the caller or the keys will be leaked in storage. - fn inner_set_keys(who: &T::ValidatorId, keys: T::Keys) -> Result, DispatchError> { - let old_keys = Self::load_keys(who); - - for id in T::Keys::key_ids() { - let key = keys.get_raw(*id); - - // ensure keys are without duplication. - ensure!( - Self::key_owner(*id, key).map_or(true, |owner| &owner == who), - Error::::DuplicatedKey, - ); - - if let Some(old) = old_keys.as_ref().map(|k| k.get_raw(*id)) { - if key == old { - continue; - } - - Self::clear_key_owner(*id, old); - } - - Self::put_key_owner(*id, key, who); - } - - Self::put_keys(who, &keys); - Ok(old_keys) - } - - fn do_purge_keys(account: &T::AccountId) -> DispatchResult { - let who = T::ValidatorIdOf::convert(account.clone()) - .ok_or(Error::::NoAssociatedValidatorId)?; - - let old_keys = Self::take_keys(&who).ok_or(Error::::NoKeys)?; - for id in T::Keys::key_ids() { - let key_data = old_keys.get_raw(*id); - Self::clear_key_owner(*id, key_data); - } - system::Module::::dec_ref(&account); - - Ok(()) - } - - fn load_keys(v: &T::ValidatorId) -> Option { - >::get(v) - } - - fn take_keys(v: &T::ValidatorId) -> Option { - >::take(v) - } - - fn put_keys(v: &T::ValidatorId, keys: &T::Keys) { - >::insert(v, keys); - } - - fn key_owner(id: KeyTypeId, key_data: &[u8]) -> Option { - >::get((id, key_data)) - } - - fn put_key_owner(id: KeyTypeId, key_data: &[u8], v: &T::ValidatorId) { - >::insert((id, key_data), v) - } - - fn clear_key_owner(id: KeyTypeId, key_data: &[u8]) { - >::remove((id, key_data)); - } + /// Move on to next session. Register new validator set and session keys. Changes + /// to the validator set have a session of delay to take effect. This allows for + /// equivocation punishment after a fork. + pub fn rotate_session() { + let session_index = CurrentIndex::get(); + + let changed = QueuedChanged::get(); + + // Inform the session handlers that a session is going to end. + T::SessionHandler::on_before_session_ending(); + + T::SessionManager::end_session(session_index); + + // Get queued session keys and validators. + let session_keys = >::get(); + let validators = session_keys + .iter() + .map(|(validator, _)| validator.clone()) + .collect::>(); + >::put(&validators); + + if changed { + // reset disabled validators + DisabledValidators::take(); + } + + // Increment session index. + let session_index = session_index + 1; + CurrentIndex::put(session_index); + + T::SessionManager::start_session(session_index); + + // Get next validator set. + let maybe_next_validators = T::SessionManager::new_session(session_index + 1); + let (next_validators, next_identities_changed) = + if let Some(validators) = maybe_next_validators { + // NOTE: as per the documentation on `OnSessionEnding`, we consider + // the validator set as having changed even if the validators are the + // same as before, as underlying economic conditions may have changed. + (validators, true) + } else { + (>::get(), false) + }; + + // Queue next session keys. + let (queued_amalgamated, next_changed) = { + // until we are certain there has been a change, iterate the prior + // validators along with the current and check for changes + let mut changed = next_identities_changed; + + let mut now_session_keys = session_keys.iter(); + let mut check_next_changed = |keys: &T::Keys| { + if changed { + return; + } + // since a new validator set always leads to `changed` starting + // as true, we can ensure that `now_session_keys` and `next_validators` + // have the same length. this function is called once per iteration. + if let Some(&(_, ref old_keys)) = now_session_keys.next() { + if old_keys != keys { + changed = true; + return; + } + } + }; + let queued_amalgamated = next_validators + .into_iter() + .map(|a| { + let k = Self::load_keys(&a).unwrap_or_default(); + check_next_changed(&k); + (a, k) + }) + .collect::>(); + + (queued_amalgamated, changed) + }; + + >::put(queued_amalgamated.clone()); + QueuedChanged::put(next_changed); + + // Record that this happened. + Self::deposit_event(Event::NewSession(session_index)); + + // Tell everyone about the new session keys. + T::SessionHandler::on_new_session::(changed, &session_keys, &queued_amalgamated); + } + + /// Disable the validator of index `i`. + /// + /// Returns `true` if this causes a `DisabledValidatorsThreshold` of validators + /// to be already disabled. + pub fn disable_index(i: usize) -> bool { + let (fire_event, threshold_reached) = DisabledValidators::mutate(|disabled| { + let i = i as u32; + if let Err(index) = disabled.binary_search(&i) { + let count = >::decode_len().unwrap_or(0) as u32; + let threshold = T::DisabledValidatorsThreshold::get() * count; + disabled.insert(index, i); + (true, disabled.len() as u32 > threshold) + } else { + (false, false) + } + }); + + if fire_event { + T::SessionHandler::on_disabled(i); + } + + threshold_reached + } + + /// Disable the validator identified by `c`. (If using with the staking module, + /// this would be their *stash* account.) + /// + /// Returns `Ok(true)` if more than `DisabledValidatorsThreshold` validators in current + /// session is already disabled. + /// If used with the staking module it allows to force a new era in such case. + pub fn disable(c: &T::ValidatorId) -> sp_std::result::Result { + Self::validators() + .iter() + .position(|i| i == c) + .map(Self::disable_index) + .ok_or(()) + } + + /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. + /// + /// This ensures that the reference counter in system is incremented appropriately and as such + /// must accept an account ID, rather than a validator ID. + fn do_set_keys(account: &T::AccountId, keys: T::Keys) -> dispatch::DispatchResult { + let who = T::ValidatorIdOf::convert(account.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + + let old_keys = Self::inner_set_keys(&who, keys)?; + if old_keys.is_none() { + system::Module::::inc_ref(&account); + } + + Ok(()) + } + + /// Perform the set_key operation, checking for duplicates. Does not set `Changed`. + /// + /// The old keys for this validator are returned, or `None` if there were none. + /// + /// This does not ensure that the reference counter in system is incremented appropriately, it + /// must be done by the caller or the keys will be leaked in storage. + fn inner_set_keys( + who: &T::ValidatorId, + keys: T::Keys, + ) -> Result, DispatchError> { + let old_keys = Self::load_keys(who); + + for id in T::Keys::key_ids() { + let key = keys.get_raw(*id); + + // ensure keys are without duplication. + ensure!( + Self::key_owner(*id, key).map_or(true, |owner| &owner == who), + Error::::DuplicatedKey, + ); + + if let Some(old) = old_keys.as_ref().map(|k| k.get_raw(*id)) { + if key == old { + continue; + } + + Self::clear_key_owner(*id, old); + } + + Self::put_key_owner(*id, key, who); + } + + Self::put_keys(who, &keys); + Ok(old_keys) + } + + fn do_purge_keys(account: &T::AccountId) -> DispatchResult { + let who = T::ValidatorIdOf::convert(account.clone()) + .ok_or(Error::::NoAssociatedValidatorId)?; + + let old_keys = Self::take_keys(&who).ok_or(Error::::NoKeys)?; + for id in T::Keys::key_ids() { + let key_data = old_keys.get_raw(*id); + Self::clear_key_owner(*id, key_data); + } + system::Module::::dec_ref(&account); + + Ok(()) + } + + fn load_keys(v: &T::ValidatorId) -> Option { + >::get(v) + } + + fn take_keys(v: &T::ValidatorId) -> Option { + >::take(v) + } + + fn put_keys(v: &T::ValidatorId, keys: &T::Keys) { + >::insert(v, keys); + } + + fn key_owner(id: KeyTypeId, key_data: &[u8]) -> Option { + >::get((id, key_data)) + } + + fn put_key_owner(id: KeyTypeId, key_data: &[u8], v: &T::ValidatorId) { + >::insert((id, key_data), v) + } + + fn clear_key_owner(id: KeyTypeId, key_data: &[u8]) { + >::remove((id, key_data)); + } } /// Wraps the author-scraping logic for consensus engines that can recover @@ -754,22 +770,23 @@ impl Module { pub struct FindAccountFromAuthorIndex(sp_std::marker::PhantomData<(T, Inner)>); impl> FindAuthor - for FindAccountFromAuthorIndex + for FindAccountFromAuthorIndex { - fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator - { - let i = Inner::find_author(digests)?; - - let validators = >::validators(); - validators.get(i as usize).map(|k| k.clone()) - } + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator, + { + let i = Inner::find_author(digests)?; + + let validators = >::validators(); + validators.get(i as usize).map(|k| k.clone()) + } } impl EstimateNextNewSession for Module { - /// This session module always calls new_session and next_session at the same time, hence we - /// do a simple proxy and pass the function to next rotation. - fn estimate_next_new_session(now: T::BlockNumber) -> Option { - T::NextSessionRotation::estimate_next_session_rotation(now) - } + /// This session module always calls new_session and next_session at the same time, hence we + /// do a simple proxy and pass the function to next rotation. + fn estimate_next_new_session(now: T::BlockNumber) -> Option { + T::NextSessionRotation::estimate_next_session_rotation(now) + } } diff --git a/frame/session/src/mock.rs b/frame/session/src/mock.rs index 9e8c77edf3..9eb4eafb9b 100644 --- a/frame/session/src/mock.rs +++ b/frame/session/src/mock.rs @@ -17,212 +17,225 @@ //! Mock helpers for Session. use super::*; -use std::cell::RefCell; use frame_support::{impl_outer_origin, parameter_types, weights::Weight}; use sp_core::{crypto::key_types::DUMMY, H256}; use sp_runtime::{ - Perbill, impl_opaque_keys, - traits::{BlakeTwo256, IdentityLookup, ConvertInto}, - testing::{Header, UintAuthorityId}, + impl_opaque_keys, + testing::{Header, UintAuthorityId}, + traits::{BlakeTwo256, ConvertInto, IdentityLookup}, + Perbill, }; use sp_staking::SessionIndex; +use std::cell::RefCell; impl_opaque_keys! { - pub struct MockSessionKeys { - pub dummy: UintAuthorityId, - } + pub struct MockSessionKeys { + pub dummy: UintAuthorityId, + } } impl From for MockSessionKeys { - fn from(dummy: UintAuthorityId) -> Self { - Self { dummy } - } + fn from(dummy: UintAuthorityId) -> Self { + Self { dummy } + } } impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } thread_local! { - pub static VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); - pub static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); - pub static AUTHORITIES: RefCell> = - RefCell::new(vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - pub static FORCE_SESSION_END: RefCell = RefCell::new(false); - pub static SESSION_LENGTH: RefCell = RefCell::new(2); - pub static SESSION_CHANGED: RefCell = RefCell::new(false); - pub static TEST_SESSION_CHANGED: RefCell = RefCell::new(false); - pub static DISABLED: RefCell = RefCell::new(false); - // Stores if `on_before_session_end` was called - pub static BEFORE_SESSION_END_CALLED: RefCell = RefCell::new(false); + pub static VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); + pub static NEXT_VALIDATORS: RefCell> = RefCell::new(vec![1, 2, 3]); + pub static AUTHORITIES: RefCell> = + RefCell::new(vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); + pub static FORCE_SESSION_END: RefCell = RefCell::new(false); + pub static SESSION_LENGTH: RefCell = RefCell::new(2); + pub static SESSION_CHANGED: RefCell = RefCell::new(false); + pub static TEST_SESSION_CHANGED: RefCell = RefCell::new(false); + pub static DISABLED: RefCell = RefCell::new(false); + // Stores if `on_before_session_end` was called + pub static BEFORE_SESSION_END_CALLED: RefCell = RefCell::new(false); } pub struct TestShouldEndSession; impl ShouldEndSession for TestShouldEndSession { - fn should_end_session(now: u64) -> bool { - let l = SESSION_LENGTH.with(|l| *l.borrow()); - now % l == 0 || FORCE_SESSION_END.with(|l| { let r = *l.borrow(); *l.borrow_mut() = false; r }) - } + fn should_end_session(now: u64) -> bool { + let l = SESSION_LENGTH.with(|l| *l.borrow()); + now % l == 0 + || FORCE_SESSION_END.with(|l| { + let r = *l.borrow(); + *l.borrow_mut() = false; + r + }) + } } pub struct TestSessionHandler; impl SessionHandler for TestSessionHandler { - const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; - fn on_genesis_session(_validators: &[(u64, T)]) {} - fn on_new_session( - changed: bool, - validators: &[(u64, T)], - _queued_validators: &[(u64, T)], - ) { - SESSION_CHANGED.with(|l| *l.borrow_mut() = changed); - AUTHORITIES.with(|l| - *l.borrow_mut() = validators.iter() - .map(|(_, id)| id.get::(DUMMY).unwrap_or_default()) - .collect() - ); - } - fn on_disabled(_validator_index: usize) { - DISABLED.with(|l| *l.borrow_mut() = true) - } - fn on_before_session_ending() { - BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = true); - } + const KEY_TYPE_IDS: &'static [sp_runtime::KeyTypeId] = &[UintAuthorityId::ID]; + fn on_genesis_session(_validators: &[(u64, T)]) {} + fn on_new_session( + changed: bool, + validators: &[(u64, T)], + _queued_validators: &[(u64, T)], + ) { + SESSION_CHANGED.with(|l| *l.borrow_mut() = changed); + AUTHORITIES.with(|l| { + *l.borrow_mut() = validators + .iter() + .map(|(_, id)| id.get::(DUMMY).unwrap_or_default()) + .collect() + }); + } + fn on_disabled(_validator_index: usize) { + DISABLED.with(|l| *l.borrow_mut() = true) + } + fn on_before_session_ending() { + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = true); + } } pub struct TestSessionManager; impl SessionManager for TestSessionManager { - fn end_session(_: SessionIndex) {} - fn start_session(_: SessionIndex) {} - fn new_session(_: SessionIndex) -> Option> { - if !TEST_SESSION_CHANGED.with(|l| *l.borrow()) { - VALIDATORS.with(|v| { - let mut v = v.borrow_mut(); - *v = NEXT_VALIDATORS.with(|l| l.borrow().clone()); - Some(v.clone()) - }) - } else if DISABLED.with(|l| std::mem::replace(&mut *l.borrow_mut(), false)) { - // If there was a disabled validator, underlying conditions have changed - // so we return `Some`. - Some(VALIDATORS.with(|v| v.borrow().clone())) - } else { - None - } - } + fn end_session(_: SessionIndex) {} + fn start_session(_: SessionIndex) {} + fn new_session(_: SessionIndex) -> Option> { + if !TEST_SESSION_CHANGED.with(|l| *l.borrow()) { + VALIDATORS.with(|v| { + let mut v = v.borrow_mut(); + *v = NEXT_VALIDATORS.with(|l| l.borrow().clone()); + Some(v.clone()) + }) + } else if DISABLED.with(|l| std::mem::replace(&mut *l.borrow_mut(), false)) { + // If there was a disabled validator, underlying conditions have changed + // so we return `Some`. + Some(VALIDATORS.with(|v| v.borrow().clone())) + } else { + None + } + } } #[cfg(feature = "historical")] impl crate::historical::SessionManager for TestSessionManager { - fn end_session(_: SessionIndex) {} - fn start_session(_: SessionIndex) {} - fn new_session(new_index: SessionIndex) - -> Option> - { - >::new_session(new_index) - .map(|vals| vals.into_iter().map(|val| (val, val)).collect()) - } + fn end_session(_: SessionIndex) {} + fn start_session(_: SessionIndex) {} + fn new_session(new_index: SessionIndex) -> Option> { + >::new_session(new_index) + .map(|vals| vals.into_iter().map(|val| (val, val)).collect()) + } } pub fn authorities() -> Vec { - AUTHORITIES.with(|l| l.borrow().to_vec()) + AUTHORITIES.with(|l| l.borrow().to_vec()) } pub fn force_new_session() { - FORCE_SESSION_END.with(|l| *l.borrow_mut() = true ) + FORCE_SESSION_END.with(|l| *l.borrow_mut() = true) } pub fn set_session_length(x: u64) { - SESSION_LENGTH.with(|l| *l.borrow_mut() = x ) + SESSION_LENGTH.with(|l| *l.borrow_mut() = x) } pub fn session_changed() -> bool { - SESSION_CHANGED.with(|l| *l.borrow()) + SESSION_CHANGED.with(|l| *l.borrow()) } pub fn set_next_validators(next: Vec) { - NEXT_VALIDATORS.with(|v| *v.borrow_mut() = next); + NEXT_VALIDATORS.with(|v| *v.borrow_mut() = next); } pub fn before_session_end_called() -> bool { - BEFORE_SESSION_END_CALLED.with(|b| *b.borrow()) + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow()) } pub fn reset_before_session_end_called() { - BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = false); + BEFORE_SESSION_END_CALLED.with(|b| *b.borrow_mut() = false); } pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - GenesisConfig:: { - keys: NEXT_VALIDATORS.with(|l| - l.borrow().iter().cloned().map(|i| (i, i, UintAuthorityId(i).into())).collect() - ), - }.assimilate_storage(&mut t).unwrap(); - sp_io::TestExternalities::new(t) + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + GenesisConfig:: { + keys: NEXT_VALIDATORS.with(|l| { + l.borrow() + .iter() + .cloned() + .map(|i| (i, i, UintAuthorityId(i).into())) + .collect() + }), + } + .assimilate_storage(&mut t) + .unwrap(); + sp_io::TestExternalities::new(t) } #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const MinimumPeriod: u64 = 5; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const MinimumPeriod: u64 = 5; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; } parameter_types! { - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(33); } impl Trait for Test { - type ShouldEndSession = TestShouldEndSession; - #[cfg(feature = "historical")] - type SessionManager = crate::historical::NoteHistoricalRoot; - #[cfg(not(feature = "historical"))] - type SessionManager = TestSessionManager; - type SessionHandler = TestSessionHandler; - type ValidatorId = u64; - type ValidatorIdOf = ConvertInto; - type Keys = MockSessionKeys; - type Event = (); - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type NextSessionRotation = (); + type ShouldEndSession = TestShouldEndSession; + #[cfg(feature = "historical")] + type SessionManager = crate::historical::NoteHistoricalRoot; + #[cfg(not(feature = "historical"))] + type SessionManager = TestSessionManager; + type SessionHandler = TestSessionHandler; + type ValidatorId = u64; + type ValidatorIdOf = ConvertInto; + type Keys = MockSessionKeys; + type Event = (); + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = (); } #[cfg(feature = "historical")] impl crate::historical::Trait for Test { - type FullIdentification = u64; - type FullIdentificationOf = sp_runtime::traits::ConvertInto; + type FullIdentification = u64; + type FullIdentificationOf = sp_runtime::traits::ConvertInto; } pub type System = frame_system::Module; diff --git a/frame/session/src/tests.rs b/frame/session/src/tests.rs index abfd9f738b..17e947b9c2 100644 --- a/frame/session/src/tests.rs +++ b/frame/session/src/tests.rs @@ -17,293 +17,352 @@ // Tests for the Session Pallet use super::*; -use frame_support::{traits::OnInitialize, assert_ok}; -use sp_core::crypto::key_types::DUMMY; -use sp_runtime::testing::UintAuthorityId; +use frame_support::{assert_ok, traits::OnInitialize}; use mock::{ - SESSION_CHANGED, TEST_SESSION_CHANGED, authorities, force_new_session, - set_next_validators, set_session_length, session_changed, Origin, System, Session, - reset_before_session_end_called, before_session_end_called, new_test_ext, + authorities, before_session_end_called, force_new_session, new_test_ext, + reset_before_session_end_called, session_changed, set_next_validators, set_session_length, + Origin, Session, System, SESSION_CHANGED, TEST_SESSION_CHANGED, }; +use sp_core::crypto::key_types::DUMMY; +use sp_runtime::testing::UintAuthorityId; fn initialize_block(block: u64) { - SESSION_CHANGED.with(|l| *l.borrow_mut() = false); - System::set_block_number(block); - Session::on_initialize(block); + SESSION_CHANGED.with(|l| *l.borrow_mut() = false); + System::set_block_number(block); + Session::on_initialize(block); } #[test] fn simple_setup_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - assert_eq!(Session::validators(), vec![1, 2, 3]); - }); + new_test_ext().execute_with(|| { + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + assert_eq!(Session::validators(), vec![1, 2, 3]); + }); } #[test] fn put_get_keys() { - new_test_ext().execute_with(|| { - Session::put_keys(&10, &UintAuthorityId(10).into()); - assert_eq!(Session::load_keys(&10), Some(UintAuthorityId(10).into())); - }) + new_test_ext().execute_with(|| { + Session::put_keys(&10, &UintAuthorityId(10).into()); + assert_eq!(Session::load_keys(&10), Some(UintAuthorityId(10).into())); + }) } #[test] fn keys_cleared_on_kill() { - let mut ext = new_test_ext(); - ext.execute_with(|| { - assert_eq!(Session::validators(), vec![1, 2, 3]); - assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into())); - - let id = DUMMY; - assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), Some(1)); - - assert!(!System::allow_death(&1)); - assert_ok!(Session::purge_keys(Origin::signed(1))); - assert!(System::allow_death(&1)); - - assert_eq!(Session::load_keys(&1), None); - assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None); - }) + let mut ext = new_test_ext(); + ext.execute_with(|| { + assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!(Session::load_keys(&1), Some(UintAuthorityId(1).into())); + + let id = DUMMY; + assert_eq!( + Session::key_owner(id, UintAuthorityId(1).get_raw(id)), + Some(1) + ); + + assert!(!System::allow_death(&1)); + assert_ok!(Session::purge_keys(Origin::signed(1))); + assert!(System::allow_death(&1)); + + assert_eq!(Session::load_keys(&1), None); + assert_eq!(Session::key_owner(id, UintAuthorityId(1).get_raw(id)), None); + }) } #[test] fn authorities_should_track_validators() { - reset_before_session_end_called(); - - new_test_ext().execute_with(|| { - set_next_validators(vec![1, 2]); - force_new_session(); - initialize_block(1); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - ]); - assert_eq!(Session::validators(), vec![1, 2, 3]); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - force_new_session(); - initialize_block(2); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - ]); - assert_eq!(Session::validators(), vec![1, 2]); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - set_next_validators(vec![1, 2, 4]); - assert_ok!(Session::set_keys(Origin::signed(4), UintAuthorityId(4).into(), vec![])); - force_new_session(); - initialize_block(3); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - (4, UintAuthorityId(4).into()), - ]); - assert_eq!(Session::validators(), vec![1, 2]); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); - assert!(before_session_end_called()); - - force_new_session(); - initialize_block(4); - assert_eq!(Session::queued_keys(), vec![ - (1, UintAuthorityId(1).into()), - (2, UintAuthorityId(2).into()), - (4, UintAuthorityId(4).into()), - ]); - assert_eq!(Session::validators(), vec![1, 2, 4]); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)]); - }); + reset_before_session_end_called(); + + new_test_ext().execute_with(|| { + set_next_validators(vec![1, 2]); + force_new_session(); + initialize_block(1); + assert_eq!( + Session::queued_keys(), + vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + ] + ); + assert_eq!(Session::validators(), vec![1, 2, 3]); + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + force_new_session(); + initialize_block(2); + assert_eq!( + Session::queued_keys(), + vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + ] + ); + assert_eq!(Session::validators(), vec![1, 2]); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + set_next_validators(vec![1, 2, 4]); + assert_ok!(Session::set_keys( + Origin::signed(4), + UintAuthorityId(4).into(), + vec![] + )); + force_new_session(); + initialize_block(3); + assert_eq!( + Session::queued_keys(), + vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + (4, UintAuthorityId(4).into()), + ] + ); + assert_eq!(Session::validators(), vec![1, 2]); + assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2)]); + assert!(before_session_end_called()); + + force_new_session(); + initialize_block(4); + assert_eq!( + Session::queued_keys(), + vec![ + (1, UintAuthorityId(1).into()), + (2, UintAuthorityId(2).into()), + (4, UintAuthorityId(4).into()), + ] + ); + assert_eq!(Session::validators(), vec![1, 2, 4]); + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(4)] + ); + }); } #[test] fn should_work_with_early_exit() { - new_test_ext().execute_with(|| { - set_session_length(10); + new_test_ext().execute_with(|| { + set_session_length(10); - initialize_block(1); - assert_eq!(Session::current_index(), 0); + initialize_block(1); + assert_eq!(Session::current_index(), 0); - initialize_block(2); - assert_eq!(Session::current_index(), 0); + initialize_block(2); + assert_eq!(Session::current_index(), 0); - force_new_session(); - initialize_block(3); - assert_eq!(Session::current_index(), 1); + force_new_session(); + initialize_block(3); + assert_eq!(Session::current_index(), 1); - initialize_block(9); - assert_eq!(Session::current_index(), 1); + initialize_block(9); + assert_eq!(Session::current_index(), 1); - initialize_block(10); - assert_eq!(Session::current_index(), 2); - }); + initialize_block(10); + assert_eq!(Session::current_index(), 2); + }); } #[test] fn session_change_should_work() { - new_test_ext().execute_with(|| { - // Block 1: No change - initialize_block(1); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 2: Session rollover, but no change. - initialize_block(2); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 3: Set new key for validator 2; no visible change. - initialize_block(3); - assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 4: Session rollover; no visible change. - initialize_block(4); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 5: No change. - initialize_block(5); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)]); - - // Block 6: Session rollover; authority 2 changes. - initialize_block(6); - assert_eq!(authorities(), vec![UintAuthorityId(1), UintAuthorityId(5), UintAuthorityId(3)]); - }); + new_test_ext().execute_with(|| { + // Block 1: No change + initialize_block(1); + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + // Block 2: Session rollover, but no change. + initialize_block(2); + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + // Block 3: Set new key for validator 2; no visible change. + initialize_block(3); + assert_ok!(Session::set_keys( + Origin::signed(2), + UintAuthorityId(5).into(), + vec![] + )); + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + // Block 4: Session rollover; no visible change. + initialize_block(4); + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + // Block 5: No change. + initialize_block(5); + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(2), UintAuthorityId(3)] + ); + + // Block 6: Session rollover; authority 2 changes. + initialize_block(6); + assert_eq!( + authorities(), + vec![UintAuthorityId(1), UintAuthorityId(5), UintAuthorityId(3)] + ); + }); } #[test] fn duplicates_are_not_allowed() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - Session::on_initialize(1); - assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_err()); - assert!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![]).is_ok()); - - // is fine now that 1 has migrated off. - assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_ok()); - }); + new_test_ext().execute_with(|| { + System::set_block_number(1); + Session::on_initialize(1); + assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_err()); + assert!(Session::set_keys(Origin::signed(1), UintAuthorityId(10).into(), vec![]).is_ok()); + + // is fine now that 1 has migrated off. + assert!(Session::set_keys(Origin::signed(4), UintAuthorityId(1).into(), vec![]).is_ok()); + }); } #[test] fn session_changed_flag_works() { - reset_before_session_end_called(); - - new_test_ext().execute_with(|| { - TEST_SESSION_CHANGED.with(|l| *l.borrow_mut() = true); - - force_new_session(); - initialize_block(1); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - force_new_session(); - initialize_block(2); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - Session::disable_index(0); - force_new_session(); - initialize_block(3); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - force_new_session(); - initialize_block(4); - assert!(session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - force_new_session(); - initialize_block(5); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - assert_ok!(Session::set_keys(Origin::signed(2), UintAuthorityId(5).into(), vec![])); - force_new_session(); - initialize_block(6); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - // changing the keys of a validator leads to change. - assert_ok!(Session::set_keys(Origin::signed(69), UintAuthorityId(69).into(), vec![])); - force_new_session(); - initialize_block(7); - assert!(session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - - // while changing the keys of a non-validator does not. - force_new_session(); - initialize_block(7); - assert!(!session_changed()); - assert!(before_session_end_called()); - reset_before_session_end_called(); - }); + reset_before_session_end_called(); + + new_test_ext().execute_with(|| { + TEST_SESSION_CHANGED.with(|l| *l.borrow_mut() = true); + + force_new_session(); + initialize_block(1); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + force_new_session(); + initialize_block(2); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + Session::disable_index(0); + force_new_session(); + initialize_block(3); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + force_new_session(); + initialize_block(4); + assert!(session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + force_new_session(); + initialize_block(5); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + assert_ok!(Session::set_keys( + Origin::signed(2), + UintAuthorityId(5).into(), + vec![] + )); + force_new_session(); + initialize_block(6); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + // changing the keys of a validator leads to change. + assert_ok!(Session::set_keys( + Origin::signed(69), + UintAuthorityId(69).into(), + vec![] + )); + force_new_session(); + initialize_block(7); + assert!(session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + + // while changing the keys of a non-validator does not. + force_new_session(); + initialize_block(7); + assert!(!session_changed()); + assert!(before_session_end_called()); + reset_before_session_end_called(); + }); } #[test] fn periodic_session_works() { - struct Period; - struct Offset; - - impl Get for Period { - fn get() -> u64 { 10 } - } + struct Period; + struct Offset; - impl Get for Offset { - fn get() -> u64 { 3 } - } + impl Get for Period { + fn get() -> u64 { + 10 + } + } + impl Get for Offset { + fn get() -> u64 { + 3 + } + } - type P = PeriodicSessions; + type P = PeriodicSessions; - for i in 0..3 { - assert!(!P::should_end_session(i)); - } + for i in 0..3 { + assert!(!P::should_end_session(i)); + } - assert!(P::should_end_session(3)); + assert!(P::should_end_session(3)); - for i in (1..10).map(|i| 3 + i) { - assert!(!P::should_end_session(i)); - } + for i in (1..10).map(|i| 3 + i) { + assert!(!P::should_end_session(i)); + } - assert!(P::should_end_session(13)); + assert!(P::should_end_session(13)); } #[test] fn session_keys_generate_output_works_as_set_keys_input() { - new_test_ext().execute_with(|| { - let new_keys = mock::MockSessionKeys::generate(None); - assert_ok!( - Session::set_keys( - Origin::signed(2), - ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), - vec![], - ) - ); - }); + new_test_ext().execute_with(|| { + let new_keys = mock::MockSessionKeys::generate(None); + assert_ok!(Session::set_keys( + Origin::signed(2), + ::Keys::decode(&mut &new_keys[..]).expect("Decode keys"), + vec![], + )); + }); } #[test] fn return_true_if_more_than_third_is_disabled() { - new_test_ext().execute_with(|| { - set_next_validators(vec![1, 2, 3, 4, 5, 6, 7]); - force_new_session(); - initialize_block(1); - // apply the new validator set - force_new_session(); - initialize_block(2); - - assert_eq!(Session::disable_index(0), false); - assert_eq!(Session::disable_index(1), false); - assert_eq!(Session::disable_index(2), true); - assert_eq!(Session::disable_index(3), true); - }); + new_test_ext().execute_with(|| { + set_next_validators(vec![1, 2, 3, 4, 5, 6, 7]); + force_new_session(); + initialize_block(1); + // apply the new validator set + force_new_session(); + initialize_block(2); + + assert_eq!(Session::disable_index(0), false); + assert_eq!(Session::disable_index(1), false); + assert_eq!(Session::disable_index(2), true); + assert_eq!(Session::disable_index(3), true); + }); } diff --git a/frame/society/src/lib.rs b/frame/society/src/lib.rs index f9908f5d9c..b9695053b5 100644 --- a/frame/society/src/lib.rs +++ b/frame/society/src/lib.rs @@ -250,1407 +250,1469 @@ mod mock; #[cfg(test)] mod tests; -use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_runtime::{Percent, ModuleId, RuntimeDebug, - traits::{ - StaticLookup, AccountIdConversion, Saturating, Zero, IntegerSquareRoot, Hash, - TrailingZeroInput, CheckedSub - } +use codec::{Decode, Encode}; +use frame_support::traits::{ + BalanceStatus, ChangeMembers, Currency, EnsureOrigin, ExistenceRequirement::AllowDeath, Get, + Randomness, ReservableCurrency, }; -use frame_support::{decl_error, decl_module, decl_storage, decl_event, ensure, dispatch::DispatchResult}; use frame_support::weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}; -use frame_support::traits::{ - Currency, ReservableCurrency, Randomness, Get, ChangeMembers, BalanceStatus, - ExistenceRequirement::AllowDeath, EnsureOrigin +use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, dispatch::DispatchResult, ensure, +}; +use frame_system::{self as system, ensure_root, ensure_signed}; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, }; -use frame_system::{self as system, ensure_signed, ensure_root}; +use sp_runtime::{ + traits::{ + AccountIdConversion, CheckedSub, Hash, IntegerSquareRoot, Saturating, StaticLookup, + TrailingZeroInput, Zero, + }, + ModuleId, Percent, RuntimeDebug, +}; +use sp_std::prelude::*; -type BalanceOf = <>::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <>::Currency as Currency<::AccountId>>::Balance; const MODULE_ID: ModuleId = ModuleId(*b"py/socie"); /// The module's configuration trait. -pub trait Trait: system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; +pub trait Trait: system::Trait { + /// The overarching event type. + type Event: From> + Into<::Event>; - /// The currency type used for bidding. - type Currency: ReservableCurrency; + /// The currency type used for bidding. + type Currency: ReservableCurrency; - /// Something that provides randomness in the runtime. - type Randomness: Randomness; + /// Something that provides randomness in the runtime. + type Randomness: Randomness; - /// The minimum amount of a deposit required for a bid to be made. - type CandidateDeposit: Get>; + /// The minimum amount of a deposit required for a bid to be made. + type CandidateDeposit: Get>; - /// The amount of the unpaid reward that gets deducted in the case that either a skeptic - /// doesn't vote or someone votes in the wrong way. - type WrongSideDeduction: Get>; + /// The amount of the unpaid reward that gets deducted in the case that either a skeptic + /// doesn't vote or someone votes in the wrong way. + type WrongSideDeduction: Get>; - /// The number of times a member may vote the wrong way (or not at all, when they are a skeptic) - /// before they become suspended. - type MaxStrikes: Get; + /// The number of times a member may vote the wrong way (or not at all, when they are a skeptic) + /// before they become suspended. + type MaxStrikes: Get; - /// The amount of incentive paid within each period. Doesn't include VoterTip. - type PeriodSpend: Get>; + /// The amount of incentive paid within each period. Doesn't include VoterTip. + type PeriodSpend: Get>; - /// The receiver of the signal for when the members have changed. - type MembershipChanged: ChangeMembers; + /// The receiver of the signal for when the members have changed. + type MembershipChanged: ChangeMembers; - /// The number of blocks between candidate/membership rotation periods. - type RotationPeriod: Get; + /// The number of blocks between candidate/membership rotation periods. + type RotationPeriod: Get; - /// The maximum duration of the payout lock. - type MaxLockDuration: Get; + /// The maximum duration of the payout lock. + type MaxLockDuration: Get; - /// The origin that is allowed to call `found`. - type FounderSetOrigin: EnsureOrigin; + /// The origin that is allowed to call `found`. + type FounderSetOrigin: EnsureOrigin; - /// The origin that is allowed to make suspension judgements. - type SuspensionJudgementOrigin: EnsureOrigin; + /// The origin that is allowed to make suspension judgements. + type SuspensionJudgementOrigin: EnsureOrigin; - /// The number of blocks between membership challenges. - type ChallengePeriod: Get; + /// The number of blocks between membership challenges. + type ChallengePeriod: Get; } /// A vote by a member on a candidate application. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub enum Vote { - /// The member has been chosen to be skeptic and has not yet taken any action. - Skeptic, - /// The member has rejected the candidate's application. - Reject, - /// The member approves of the candidate's application. - Approve, + /// The member has been chosen to be skeptic and has not yet taken any action. + Skeptic, + /// The member has rejected the candidate's application. + Reject, + /// The member approves of the candidate's application. + Approve, } /// A judgement by the suspension judgement origin on a suspended candidate. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub enum Judgement { - /// The suspension judgement origin takes no direct judgment - /// and places the candidate back into the bid pool. - Rebid, - /// The suspension judgement origin has rejected the candidate's application. - Reject, - /// The suspension judgement origin approves of the candidate's application. - Approve, + /// The suspension judgement origin takes no direct judgment + /// and places the candidate back into the bid pool. + Rebid, + /// The suspension judgement origin has rejected the candidate's application. + Reject, + /// The suspension judgement origin approves of the candidate's application. + Approve, } /// Details of a payout given as a per-block linear "trickle". #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug, Default)] pub struct Payout { - /// Total value of the payout. - value: Balance, - /// Block number at which the payout begins. - begin: BlockNumber, - /// Total number of blocks over which the payout is spread. - duration: BlockNumber, - /// Total value paid out so far. - paid: Balance, + /// Total value of the payout. + value: Balance, + /// Block number at which the payout begins. + begin: BlockNumber, + /// Total number of blocks over which the payout is spread. + duration: BlockNumber, + /// Total value paid out so far. + paid: Balance, } /// Status of a vouching member. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub enum VouchingStatus { - /// Member is currently vouching for a user. - Vouching, - /// Member is banned from vouching for other members. - Banned, + /// Member is currently vouching for a user. + Vouching, + /// Member is banned from vouching for other members. + Banned, } /// Number of strikes that a member has against them. pub type StrikeCount = u32; /// A bid for entry into society. -#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug,)] +#[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Bid { - /// The bidder/candidate trying to enter society - who: AccountId, - /// The kind of bid placed for this bidder/candidate. See `BidKind`. - kind: BidKind, - /// The reward that the bidder has requested for successfully joining the society. - value: Balance, + /// The bidder/candidate trying to enter society + who: AccountId, + /// The kind of bid placed for this bidder/candidate. See `BidKind`. + kind: BidKind, + /// The reward that the bidder has requested for successfully joining the society. + value: Balance, } /// A vote by a member on a candidate application. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub enum BidKind { - /// The CandidateDeposit was paid for this bid. - Deposit(Balance), - /// A member vouched for this bid. The account should be reinstated into `Members` once the - /// bid is successful (or if it is rescinded prior to launch). - Vouch(AccountId, Balance), + /// The CandidateDeposit was paid for this bid. + Deposit(Balance), + /// A member vouched for this bid. The account should be reinstated into `Members` once the + /// bid is successful (or if it is rescinded prior to launch). + Vouch(AccountId, Balance), } impl BidKind { - fn check_voucher(&self, v: &AccountId) -> DispatchResult { - if let BidKind::Vouch(ref a, _) = self { - if a == v { - Ok(()) - } else { - Err("incorrect identity")? - } - } else { - Err("not vouched")? - } - } + fn check_voucher(&self, v: &AccountId) -> DispatchResult { + if let BidKind::Vouch(ref a, _) = self { + if a == v { + Ok(()) + } else { + Err("incorrect identity")? + } + } else { + Err("not vouched")? + } + } } // This module's storage items. decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Society { - /// The first member. - pub Founder get(founder) build(|config: &GenesisConfig| config.members.first().cloned()): - Option; - - /// A hash of the rules of this society concerning membership. Can only be set once and - /// only by the founder. - pub Rules get(rules): Option; - - /// The current set of candidates; bidders that are attempting to become members. - pub Candidates get(candidates): Vec>>; - - /// The set of suspended candidates. - pub SuspendedCandidates get(suspended_candidate): - map hasher(twox_64_concat) T::AccountId - => Option<(BalanceOf, BidKind>)>; - - /// Amount of our account balance that is specifically for the next round's bid(s). - pub Pot get(fn pot) config(): BalanceOf; - - /// The most primary from the most recently approved members. - pub Head get(head) build(|config: &GenesisConfig| config.members.first().cloned()): - Option; - - /// The current set of members, ordered. - pub Members get(fn members) build(|config: &GenesisConfig| { - let mut m = config.members.clone(); - m.sort(); - m - }): Vec; - - /// The set of suspended members. - pub SuspendedMembers get(fn suspended_member): map hasher(twox_64_concat) T::AccountId => bool; - - /// The current bids, stored ordered by the value of the bid. - Bids: Vec>>; - - /// Members currently vouching or banned from vouching again - Vouching get(fn vouching): map hasher(twox_64_concat) T::AccountId => Option; - - /// Pending payouts; ordered by block number, with the amount that should be paid out. - Payouts: map hasher(twox_64_concat) T::AccountId => Vec<(T::BlockNumber, BalanceOf)>; - - /// The ongoing number of losing votes cast by the member. - Strikes: map hasher(twox_64_concat) T::AccountId => StrikeCount; - - /// Double map from Candidate -> Voter -> (Maybe) Vote. - Votes: double_map - hasher(twox_64_concat) T::AccountId, - hasher(twox_64_concat) T::AccountId - => Option; - - /// The defending member currently being challenged. - Defender get(fn defender): Option; - - /// Votes for the defender. - DefenderVotes: map hasher(twox_64_concat) T::AccountId => Option; - - /// The max number of members for the society at one time. - MaxMembers get(fn max_members) config(): u32; - } - add_extra_genesis { - config(members): Vec; - } + trait Store for Module, I: Instance=DefaultInstance> as Society { + /// The first member. + pub Founder get(founder) build(|config: &GenesisConfig| config.members.first().cloned()): + Option; + + /// A hash of the rules of this society concerning membership. Can only be set once and + /// only by the founder. + pub Rules get(rules): Option; + + /// The current set of candidates; bidders that are attempting to become members. + pub Candidates get(candidates): Vec>>; + + /// The set of suspended candidates. + pub SuspendedCandidates get(suspended_candidate): + map hasher(twox_64_concat) T::AccountId + => Option<(BalanceOf, BidKind>)>; + + /// Amount of our account balance that is specifically for the next round's bid(s). + pub Pot get(fn pot) config(): BalanceOf; + + /// The most primary from the most recently approved members. + pub Head get(head) build(|config: &GenesisConfig| config.members.first().cloned()): + Option; + + /// The current set of members, ordered. + pub Members get(fn members) build(|config: &GenesisConfig| { + let mut m = config.members.clone(); + m.sort(); + m + }): Vec; + + /// The set of suspended members. + pub SuspendedMembers get(fn suspended_member): map hasher(twox_64_concat) T::AccountId => bool; + + /// The current bids, stored ordered by the value of the bid. + Bids: Vec>>; + + /// Members currently vouching or banned from vouching again + Vouching get(fn vouching): map hasher(twox_64_concat) T::AccountId => Option; + + /// Pending payouts; ordered by block number, with the amount that should be paid out. + Payouts: map hasher(twox_64_concat) T::AccountId => Vec<(T::BlockNumber, BalanceOf)>; + + /// The ongoing number of losing votes cast by the member. + Strikes: map hasher(twox_64_concat) T::AccountId => StrikeCount; + + /// Double map from Candidate -> Voter -> (Maybe) Vote. + Votes: double_map + hasher(twox_64_concat) T::AccountId, + hasher(twox_64_concat) T::AccountId + => Option; + + /// The defending member currently being challenged. + Defender get(fn defender): Option; + + /// Votes for the defender. + DefenderVotes: map hasher(twox_64_concat) T::AccountId => Option; + + /// The max number of members for the society at one time. + MaxMembers get(fn max_members) config(): u32; + } + add_extra_genesis { + config(members): Vec; + } } // The module's dispatchable functions. decl_module! { - /// The module declaration. - pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { - type Error = Error; - /// The minimum amount of a deposit required for a bid to be made. - const CandidateDeposit: BalanceOf = T::CandidateDeposit::get(); - - /// The amount of the unpaid reward that gets deducted in the case that either a skeptic - /// doesn't vote or someone votes in the wrong way. - const WrongSideDeduction: BalanceOf = T::WrongSideDeduction::get(); - - /// The number of times a member may vote the wrong way (or not at all, when they are a skeptic) - /// before they become suspended. - const MaxStrikes: u32 = T::MaxStrikes::get(); - - /// The amount of incentive paid within each period. Doesn't include VoterTip. - const PeriodSpend: BalanceOf = T::PeriodSpend::get(); - - /// The number of blocks between candidate/membership rotation periods. - const RotationPeriod: T::BlockNumber = T::RotationPeriod::get(); - - /// The number of blocks between membership challenges. - const ChallengePeriod: T::BlockNumber = T::ChallengePeriod::get(); - - // Used for handling module events. - fn deposit_event() = default; - - /// A user outside of the society can make a bid for entry. - /// - /// Payment: `CandidateDeposit` will be reserved for making a bid. It is returned - /// when the bid becomes a member, or if the bid calls `unbid`. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Parameters: - /// - `value`: A one time payment the bid would like to receive when joining the society. - /// - /// # - /// Key: B (len of bids), C (len of candidates), M (len of members), X (balance reserve) - /// - Storage Reads: - /// - One storage read to check for suspended candidate. O(1) - /// - One storage read to check for suspended member. O(1) - /// - One storage read to retrieve all current bids. O(B) - /// - One storage read to retrieve all current candidates. O(C) - /// - One storage read to retrieve all members. O(M) - /// - Storage Writes: - /// - One storage mutate to add a new bid to the vector O(B) (TODO: possible optimization w/ read) - /// - Up to one storage removal if bid.len() > MAX_BID_COUNT. O(1) - /// - Notable Computation: - /// - O(B + C + log M) search to check user is not already a part of society. - /// - O(log B) search to insert the new bid sorted. - /// - External Module Operations: - /// - One balance reserve operation. O(X) - /// - Up to one balance unreserve operation if bids.len() > MAX_BID_COUNT. - /// - Events: - /// - One event for new bid. - /// - Up to one event for AutoUnbid if bid.len() > MAX_BID_COUNT. - /// - /// Total Complexity: O(M + B + C + logM + logB + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - pub fn bid(origin, value: BalanceOf) -> DispatchResult { - let who = ensure_signed(origin)?; - ensure!(!>::contains_key(&who), Error::::Suspended); - ensure!(!>::contains_key(&who), Error::::Suspended); - let bids = >::get(); - ensure!(!Self::is_bid(&bids, &who), Error::::AlreadyBid); - let candidates = >::get(); - ensure!(!Self::is_candidate(&candidates, &who), Error::::AlreadyCandidate); - let members = >::get(); - ensure!(!Self::is_member(&members ,&who), Error::::AlreadyMember); - - let deposit = T::CandidateDeposit::get(); - T::Currency::reserve(&who, deposit)?; - - Self::put_bid(bids, &who, value.clone(), BidKind::Deposit(deposit)); - Self::deposit_event(RawEvent::Bid(who, value)); - Ok(()) - } - - /// A bidder can remove their bid for entry into society. - /// By doing so, they will have their candidate deposit returned or - /// they will unvouch their voucher. - /// - /// Payment: The bid deposit is unreserved if the user made a bid. - /// - /// The dispatch origin for this call must be _Signed_ and a bidder. - /// - /// Parameters: - /// - `pos`: Position in the `Bids` vector of the bid who wants to unbid. - /// - /// # - /// Key: B (len of bids), X (balance unreserve) - /// - One storage read and write to retrieve and update the bids. O(B) - /// - Either one unreserve balance action O(X) or one vouching storage removal. O(1) - /// - One event. - /// - /// Total Complexity: O(B + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(20_000_000)] - pub fn unbid(origin, pos: u32) -> DispatchResult { - let who = ensure_signed(origin)?; - - let pos = pos as usize; - >::mutate(|b| - if pos < b.len() && b[pos].who == who { - // Either unreserve the deposit or free up the vouching member. - // In neither case can we do much if the action isn't completable, but there's - // no reason that either should fail. - match b.remove(pos).kind { - BidKind::Deposit(deposit) => { - let _ = T::Currency::unreserve(&who, deposit); - } - BidKind::Vouch(voucher, _) => { - >::remove(&voucher); - } - } - Self::deposit_event(RawEvent::Unbid(who)); - Ok(()) - } else { - Err(Error::::BadPosition)? - } - ) - } - - /// As a member, vouch for someone to join society by placing a bid on their behalf. - /// - /// There is no deposit required to vouch for a new bid, but a member can only vouch for - /// one bid at a time. If the bid becomes a suspended candidate and ultimately rejected by - /// the suspension judgement origin, the member will be banned from vouching again. - /// - /// As a vouching member, you can claim a tip if the candidate is accepted. This tip will - /// be paid as a portion of the reward the member will receive for joining the society. - /// - /// The dispatch origin for this call must be _Signed_ and a member. - /// - /// Parameters: - /// - `who`: The user who you would like to vouch for. - /// - `value`: The total reward to be paid between you and the candidate if they become - /// a member in the society. - /// - `tip`: Your cut of the total `value` payout when the candidate is inducted into - /// the society. Tips larger than `value` will be saturated upon payout. - /// - /// # - /// Key: B (len of bids), C (len of candidates), M (len of members) - /// - Storage Reads: - /// - One storage read to retrieve all members. O(M) - /// - One storage read to check member is not already vouching. O(1) - /// - One storage read to check for suspended candidate. O(1) - /// - One storage read to check for suspended member. O(1) - /// - One storage read to retrieve all current bids. O(B) - /// - One storage read to retrieve all current candidates. O(C) - /// - Storage Writes: - /// - One storage write to insert vouching status to the member. O(1) - /// - One storage mutate to add a new bid to the vector O(B) (TODO: possible optimization w/ read) - /// - Up to one storage removal if bid.len() > MAX_BID_COUNT. O(1) - /// - Notable Computation: - /// - O(log M) search to check sender is a member. - /// - O(B + C + log M) search to check user is not already a part of society. - /// - O(log B) search to insert the new bid sorted. - /// - External Module Operations: - /// - One balance reserve operation. O(X) - /// - Up to one balance unreserve operation if bids.len() > MAX_BID_COUNT. - /// - Events: - /// - One event for vouch. - /// - Up to one event for AutoUnbid if bid.len() > MAX_BID_COUNT. - /// - /// Total Complexity: O(M + B + C + logM + logB + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - pub fn vouch(origin, who: T::AccountId, value: BalanceOf, tip: BalanceOf) -> DispatchResult { - let voucher = ensure_signed(origin)?; - // Check user is not suspended. - ensure!(!>::contains_key(&who), Error::::Suspended); - ensure!(!>::contains_key(&who), Error::::Suspended); - // Check user is not a bid or candidate. - let bids = >::get(); - ensure!(!Self::is_bid(&bids, &who), Error::::AlreadyBid); - let candidates = >::get(); - ensure!(!Self::is_candidate(&candidates, &who), Error::::AlreadyCandidate); - // Check user is not already a member. - let members = >::get(); - ensure!(!Self::is_member(&members, &who), Error::::AlreadyMember); - // Check sender can vouch. - ensure!(Self::is_member(&members, &voucher), Error::::NotMember); - ensure!(!>::contains_key(&voucher), Error::::AlreadyVouching); - - >::insert(&voucher, VouchingStatus::Vouching); - Self::put_bid(bids, &who, value.clone(), BidKind::Vouch(voucher.clone(), tip)); - Self::deposit_event(RawEvent::Vouch(who, value, voucher)); - Ok(()) - } - - /// As a vouching member, unvouch a bid. This only works while vouched user is - /// only a bidder (and not a candidate). - /// - /// The dispatch origin for this call must be _Signed_ and a vouching member. - /// - /// Parameters: - /// - `pos`: Position in the `Bids` vector of the bid who should be unvouched. - /// - /// # - /// Key: B (len of bids) - /// - One storage read O(1) to check the signer is a vouching member. - /// - One storage mutate to retrieve and update the bids. O(B) - /// - One vouching storage removal. O(1) - /// - One event. - /// - /// Total Complexity: O(B) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(20_000_000)] - pub fn unvouch(origin, pos: u32) -> DispatchResult { - let voucher = ensure_signed(origin)?; - ensure!(Self::vouching(&voucher) == Some(VouchingStatus::Vouching), Error::::NotVouching); - - let pos = pos as usize; - >::mutate(|b| - if pos < b.len() { - b[pos].kind.check_voucher(&voucher)?; - >::remove(&voucher); - let who = b.remove(pos).who; - Self::deposit_event(RawEvent::Unvouch(who)); - Ok(()) - } else { - Err(Error::::BadPosition)? - } - ) - } - - /// As a member, vote on a candidate. - /// - /// The dispatch origin for this call must be _Signed_ and a member. - /// - /// Parameters: - /// - `candidate`: The candidate that the member would like to bid on. - /// - `approve`: A boolean which says if the candidate should be - /// approved (`true`) or rejected (`false`). - /// - /// # - /// Key: C (len of candidates), M (len of members) - /// - One storage read O(M) and O(log M) search to check user is a member. - /// - One account lookup. - /// - One storage read O(C) and O(C) search to check that user is a candidate. - /// - One storage write to add vote to votes. O(1) - /// - One event. - /// - /// Total Complexity: O(M + logM + C) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] - pub fn vote(origin, candidate: ::Source, approve: bool) { - let voter = ensure_signed(origin)?; - let candidate = T::Lookup::lookup(candidate)?; - let candidates = >::get(); - ensure!(Self::is_candidate(&candidates, &candidate), Error::::NotCandidate); - let members = >::get(); - ensure!(Self::is_member(&members, &voter), Error::::NotMember); - - let vote = if approve { Vote::Approve } else { Vote::Reject }; - >::insert(&candidate, &voter, vote); - - Self::deposit_event(RawEvent::Vote(candidate, voter, approve)); - } - - /// As a member, vote on the defender. - /// - /// The dispatch origin for this call must be _Signed_ and a member. - /// - /// Parameters: - /// - `approve`: A boolean which says if the candidate should be - /// approved (`true`) or rejected (`false`). - /// - /// # - /// - Key: M (len of members) - /// - One storage read O(M) and O(log M) search to check user is a member. - /// - One storage write to add vote to votes. O(1) - /// - One event. - /// - /// Total Complexity: O(M + logM) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(20_000_000)] - pub fn defender_vote(origin, approve: bool) { - let voter = ensure_signed(origin)?; - let members = >::get(); - ensure!(Self::is_member(&members, &voter), Error::::NotMember); - - let vote = if approve { Vote::Approve } else { Vote::Reject }; - >::insert(&voter, vote); - - Self::deposit_event(RawEvent::DefenderVote(voter, approve)); - } - - /// Transfer the first matured payout for the sender and remove it from the records. - /// - /// NOTE: This extrinsic needs to be called multiple times to claim multiple matured payouts. - /// - /// Payment: The member will receive a payment equal to their first matured - /// payout to their free balance. - /// - /// The dispatch origin for this call must be _Signed_ and a member with - /// payouts remaining. - /// - /// # - /// Key: M (len of members), P (number of payouts for a particular member) - /// - One storage read O(M) and O(log M) search to check signer is a member. - /// - One storage read O(P) to get all payouts for a member. - /// - One storage read O(1) to get the current block number. - /// - One currency transfer call. O(X) - /// - One storage write or removal to update the member's payouts. O(P) - /// - /// Total Complexity: O(M + logM + P + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] - pub fn payout(origin) { - let who = ensure_signed(origin)?; - - let members = >::get(); - ensure!(Self::is_member(&members, &who), Error::::NotMember); - - let mut payouts = >::get(&who); - if let Some((when, amount)) = payouts.first() { - if when <= &>::block_number() { - T::Currency::transfer(&Self::payouts(), &who, *amount, AllowDeath)?; - payouts.remove(0); - if payouts.is_empty() { - >::remove(&who); - } else { - >::insert(&who, payouts); - } - return Ok(()) - } - } - Err(Error::::NoPayout)? - } - - /// Found the society. - /// - /// This is done as a discrete action in order to allow for the - /// module to be included into a running chain and can only be done once. - /// - /// The dispatch origin for this call must be from the _FounderSetOrigin_. - /// - /// Parameters: - /// - `founder` - The first member and head of the newly founded society. - /// - `max_members` - The initial max number of members for the society. - /// - `rules` - The rules of this society concerning membership. - /// - /// # - /// - Two storage mutates to set `Head` and `Founder`. O(1) - /// - One storage write to add the first member to society. O(1) - /// - One event. - /// - /// Total Complexity: O(1) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn found(origin, founder: T::AccountId, max_members: u32, rules: Vec) { - T::FounderSetOrigin::ensure_origin(origin)?; - ensure!(!>::exists(), Error::::AlreadyFounded); - ensure!(max_members > 1, Error::::MaxMembers); - // This should never fail in the context of this function... - >::put(max_members); - Self::add_member(&founder)?; - >::put(&founder); - >::put(&founder); - Rules::::put(T::Hashing::hash(&rules)); - Self::deposit_event(RawEvent::Founded(founder)); - } - - /// Annul the founding of the society. - /// - /// The dispatch origin for this call must be Signed, and the signing account must be both - /// the `Founder` and the `Head`. This implies that it may only be done when there is one - /// member. - /// - /// # - /// - Two storage reads O(1). - /// - Four storage removals O(1). - /// - One event. - /// - /// Total Complexity: O(1) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(20_000_000)] - fn unfound(origin) { - let founder = ensure_signed(origin)?; - ensure!(Founder::::get() == Some(founder.clone()), Error::::NotFounder); - ensure!(Head::::get() == Some(founder.clone()), Error::::NotHead); - - Members::::kill(); - Head::::kill(); - Founder::::kill(); - Rules::::kill(); - Candidates::::kill(); - SuspendedCandidates::::remove_all(); - Self::deposit_event(RawEvent::Unfounded(founder)); - } - - /// Allow suspension judgement origin to make judgement on a suspended member. - /// - /// If a suspended member is forgiven, we simply add them back as a member, not affecting - /// any of the existing storage items for that member. - /// - /// If a suspended member is rejected, remove all associated storage items, including - /// their payouts, and remove any vouched bids they currently have. - /// - /// The dispatch origin for this call must be from the _SuspensionJudgementOrigin_. - /// - /// Parameters: - /// - `who` - The suspended member to be judged. - /// - `forgive` - A boolean representing whether the suspension judgement origin - /// forgives (`true`) or rejects (`false`) a suspended member. - /// - /// # - /// Key: B (len of bids), M (len of members) - /// - One storage read to check `who` is a suspended member. O(1) - /// - Up to one storage write O(M) with O(log M) binary search to add a member back to society. - /// - Up to 3 storage removals O(1) to clean up a removed member. - /// - Up to one storage write O(B) with O(B) search to remove vouched bid from bids. - /// - Up to one additional event if unvouch takes place. - /// - One storage removal. O(1) - /// - One event for the judgement. - /// - /// Total Complexity: O(M + logM + B) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] - fn judge_suspended_member(origin, who: T::AccountId, forgive: bool) { - T::SuspensionJudgementOrigin::ensure_origin(origin)?; - ensure!(>::contains_key(&who), Error::::NotSuspended); - - if forgive { - // Try to add member back to society. Can fail with `MaxMembers` limit. - Self::add_member(&who)?; - } else { - // Cancel a suspended member's membership, remove their payouts. - >::remove(&who); - >::remove(&who); - // Remove their vouching status, potentially unbanning them in the future. - if >::take(&who) == Some(VouchingStatus::Vouching) { - // Try to remove their bid if they are vouching. - // If their vouch is already a candidate, do nothing. - >::mutate(|bids| - // Try to find the matching bid - if let Some(pos) = bids.iter().position(|b| b.kind.check_voucher(&who).is_ok()) { - // Remove the bid, and emit an event - let vouched = bids.remove(pos).who; - Self::deposit_event(RawEvent::Unvouch(vouched)); - } - ); - } - } - - >::remove(&who); - Self::deposit_event(RawEvent::SuspendedMemberJudgement(who, forgive)); - } - - /// Allow suspended judgement origin to make judgement on a suspended candidate. - /// - /// If the judgement is `Approve`, we add them to society as a member with the appropriate - /// payment for joining society. - /// - /// If the judgement is `Reject`, we either slash the deposit of the bid, giving it back - /// to the society treasury, or we ban the voucher from vouching again. - /// - /// If the judgement is `Rebid`, we put the candidate back in the bid pool and let them go - /// through the induction process again. - /// - /// The dispatch origin for this call must be from the _SuspensionJudgementOrigin_. - /// - /// Parameters: - /// - `who` - The suspended candidate to be judged. - /// - `judgement` - `Approve`, `Reject`, or `Rebid`. - /// - /// # - /// Key: B (len of bids), M (len of members), X (balance action) - /// - One storage read to check `who` is a suspended candidate. - /// - One storage removal of the suspended candidate. - /// - Approve Logic - /// - One storage read to get the available pot to pay users with. O(1) - /// - One storage write to update the available pot. O(1) - /// - One storage read to get the current block number. O(1) - /// - One storage read to get all members. O(M) - /// - Up to one unreserve currency action. - /// - Up to two new storage writes to payouts. - /// - Up to one storage write with O(log M) binary search to add a member to society. - /// - Reject Logic - /// - Up to one repatriate reserved currency action. O(X) - /// - Up to one storage write to ban the vouching member from vouching again. - /// - Rebid Logic - /// - Storage mutate with O(log B) binary search to place the user back into bids. - /// - Up to one additional event if unvouch takes place. - /// - One storage removal. - /// - One event for the judgement. - /// - /// Total Complexity: O(M + logM + B + X) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn judge_suspended_candidate(origin, who: T::AccountId, judgement: Judgement) { - T::SuspensionJudgementOrigin::ensure_origin(origin)?; - if let Some((value, kind)) = >::get(&who) { - match judgement { - Judgement::Approve => { - // Suspension Judgement origin has approved this candidate - // Make sure we can pay them - let pot = Self::pot(); - ensure!(pot >= value, Error::::InsufficientPot); - // Try to add user as a member! Can fail with `MaxMember` limit. - Self::add_member(&who)?; - // Reduce next pot by payout - >::put(pot - value); - // Add payout for new candidate - let maturity = >::block_number() - + Self::lock_duration(Self::members().len() as u32); - Self::pay_accepted_candidate(&who, value, kind, maturity); - } - Judgement::Reject => { - // Founder has rejected this candidate - match kind { - BidKind::Deposit(deposit) => { - // Slash deposit and move it to the society account - let _ = T::Currency::repatriate_reserved(&who, &Self::account_id(), deposit, BalanceStatus::Free); - } - BidKind::Vouch(voucher, _) => { - // Ban the voucher from vouching again - >::insert(&voucher, VouchingStatus::Banned); - } - } - } - Judgement::Rebid => { - // Founder has taken no judgement, and candidate is placed back into the pool. - let bids = >::get(); - Self::put_bid(bids, &who, value, kind); - } - } - - // Remove suspended candidate - >::remove(who); - } else { - Err(Error::::NotSuspended)? - } - } - - /// Allows root origin to change the maximum number of members in society. - /// Max membership count must be greater than 1. - /// - /// The dispatch origin for this call must be from _ROOT_. - /// - /// Parameters: - /// - `max` - The maximum number of members for the society. - /// - /// # - /// - One storage write to update the max. O(1) - /// - One event. - /// - /// Total Complexity: O(1) - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn set_max_members(origin, max: u32) { - ensure_root(origin)?; - ensure!(max > 1, Error::::MaxMembers); - MaxMembers::::put(max); - Self::deposit_event(RawEvent::NewMaxMembers(max)); - } - - fn on_initialize(n: T::BlockNumber) -> Weight { - let mut members = vec![]; - - // Run a candidate/membership rotation - if (n % T::RotationPeriod::get()).is_zero() { - members = >::get(); - Self::rotate_period(&mut members); - } - - // Run a challenge rotation - if (n % T::ChallengePeriod::get()).is_zero() { - // Only read members if not already read. - if members.is_empty() { - members = >::get(); - } - Self::rotate_challenge(&mut members); - } - - MINIMUM_WEIGHT - } - } + /// The module declaration. + pub struct Module, I: Instance=DefaultInstance> for enum Call where origin: T::Origin { + type Error = Error; + /// The minimum amount of a deposit required for a bid to be made. + const CandidateDeposit: BalanceOf = T::CandidateDeposit::get(); + + /// The amount of the unpaid reward that gets deducted in the case that either a skeptic + /// doesn't vote or someone votes in the wrong way. + const WrongSideDeduction: BalanceOf = T::WrongSideDeduction::get(); + + /// The number of times a member may vote the wrong way (or not at all, when they are a skeptic) + /// before they become suspended. + const MaxStrikes: u32 = T::MaxStrikes::get(); + + /// The amount of incentive paid within each period. Doesn't include VoterTip. + const PeriodSpend: BalanceOf = T::PeriodSpend::get(); + + /// The number of blocks between candidate/membership rotation periods. + const RotationPeriod: T::BlockNumber = T::RotationPeriod::get(); + + /// The number of blocks between membership challenges. + const ChallengePeriod: T::BlockNumber = T::ChallengePeriod::get(); + + // Used for handling module events. + fn deposit_event() = default; + + /// A user outside of the society can make a bid for entry. + /// + /// Payment: `CandidateDeposit` will be reserved for making a bid. It is returned + /// when the bid becomes a member, or if the bid calls `unbid`. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Parameters: + /// - `value`: A one time payment the bid would like to receive when joining the society. + /// + /// # + /// Key: B (len of bids), C (len of candidates), M (len of members), X (balance reserve) + /// - Storage Reads: + /// - One storage read to check for suspended candidate. O(1) + /// - One storage read to check for suspended member. O(1) + /// - One storage read to retrieve all current bids. O(B) + /// - One storage read to retrieve all current candidates. O(C) + /// - One storage read to retrieve all members. O(M) + /// - Storage Writes: + /// - One storage mutate to add a new bid to the vector O(B) (TODO: possible optimization w/ read) + /// - Up to one storage removal if bid.len() > MAX_BID_COUNT. O(1) + /// - Notable Computation: + /// - O(B + C + log M) search to check user is not already a part of society. + /// - O(log B) search to insert the new bid sorted. + /// - External Module Operations: + /// - One balance reserve operation. O(X) + /// - Up to one balance unreserve operation if bids.len() > MAX_BID_COUNT. + /// - Events: + /// - One event for new bid. + /// - Up to one event for AutoUnbid if bid.len() > MAX_BID_COUNT. + /// + /// Total Complexity: O(M + B + C + logM + logB + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + pub fn bid(origin, value: BalanceOf) -> DispatchResult { + let who = ensure_signed(origin)?; + ensure!(!>::contains_key(&who), Error::::Suspended); + ensure!(!>::contains_key(&who), Error::::Suspended); + let bids = >::get(); + ensure!(!Self::is_bid(&bids, &who), Error::::AlreadyBid); + let candidates = >::get(); + ensure!(!Self::is_candidate(&candidates, &who), Error::::AlreadyCandidate); + let members = >::get(); + ensure!(!Self::is_member(&members ,&who), Error::::AlreadyMember); + + let deposit = T::CandidateDeposit::get(); + T::Currency::reserve(&who, deposit)?; + + Self::put_bid(bids, &who, value.clone(), BidKind::Deposit(deposit)); + Self::deposit_event(RawEvent::Bid(who, value)); + Ok(()) + } + + /// A bidder can remove their bid for entry into society. + /// By doing so, they will have their candidate deposit returned or + /// they will unvouch their voucher. + /// + /// Payment: The bid deposit is unreserved if the user made a bid. + /// + /// The dispatch origin for this call must be _Signed_ and a bidder. + /// + /// Parameters: + /// - `pos`: Position in the `Bids` vector of the bid who wants to unbid. + /// + /// # + /// Key: B (len of bids), X (balance unreserve) + /// - One storage read and write to retrieve and update the bids. O(B) + /// - Either one unreserve balance action O(X) or one vouching storage removal. O(1) + /// - One event. + /// + /// Total Complexity: O(B + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(20_000_000)] + pub fn unbid(origin, pos: u32) -> DispatchResult { + let who = ensure_signed(origin)?; + + let pos = pos as usize; + >::mutate(|b| + if pos < b.len() && b[pos].who == who { + // Either unreserve the deposit or free up the vouching member. + // In neither case can we do much if the action isn't completable, but there's + // no reason that either should fail. + match b.remove(pos).kind { + BidKind::Deposit(deposit) => { + let _ = T::Currency::unreserve(&who, deposit); + } + BidKind::Vouch(voucher, _) => { + >::remove(&voucher); + } + } + Self::deposit_event(RawEvent::Unbid(who)); + Ok(()) + } else { + Err(Error::::BadPosition)? + } + ) + } + + /// As a member, vouch for someone to join society by placing a bid on their behalf. + /// + /// There is no deposit required to vouch for a new bid, but a member can only vouch for + /// one bid at a time. If the bid becomes a suspended candidate and ultimately rejected by + /// the suspension judgement origin, the member will be banned from vouching again. + /// + /// As a vouching member, you can claim a tip if the candidate is accepted. This tip will + /// be paid as a portion of the reward the member will receive for joining the society. + /// + /// The dispatch origin for this call must be _Signed_ and a member. + /// + /// Parameters: + /// - `who`: The user who you would like to vouch for. + /// - `value`: The total reward to be paid between you and the candidate if they become + /// a member in the society. + /// - `tip`: Your cut of the total `value` payout when the candidate is inducted into + /// the society. Tips larger than `value` will be saturated upon payout. + /// + /// # + /// Key: B (len of bids), C (len of candidates), M (len of members) + /// - Storage Reads: + /// - One storage read to retrieve all members. O(M) + /// - One storage read to check member is not already vouching. O(1) + /// - One storage read to check for suspended candidate. O(1) + /// - One storage read to check for suspended member. O(1) + /// - One storage read to retrieve all current bids. O(B) + /// - One storage read to retrieve all current candidates. O(C) + /// - Storage Writes: + /// - One storage write to insert vouching status to the member. O(1) + /// - One storage mutate to add a new bid to the vector O(B) (TODO: possible optimization w/ read) + /// - Up to one storage removal if bid.len() > MAX_BID_COUNT. O(1) + /// - Notable Computation: + /// - O(log M) search to check sender is a member. + /// - O(B + C + log M) search to check user is not already a part of society. + /// - O(log B) search to insert the new bid sorted. + /// - External Module Operations: + /// - One balance reserve operation. O(X) + /// - Up to one balance unreserve operation if bids.len() > MAX_BID_COUNT. + /// - Events: + /// - One event for vouch. + /// - Up to one event for AutoUnbid if bid.len() > MAX_BID_COUNT. + /// + /// Total Complexity: O(M + B + C + logM + logB + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + pub fn vouch(origin, who: T::AccountId, value: BalanceOf, tip: BalanceOf) -> DispatchResult { + let voucher = ensure_signed(origin)?; + // Check user is not suspended. + ensure!(!>::contains_key(&who), Error::::Suspended); + ensure!(!>::contains_key(&who), Error::::Suspended); + // Check user is not a bid or candidate. + let bids = >::get(); + ensure!(!Self::is_bid(&bids, &who), Error::::AlreadyBid); + let candidates = >::get(); + ensure!(!Self::is_candidate(&candidates, &who), Error::::AlreadyCandidate); + // Check user is not already a member. + let members = >::get(); + ensure!(!Self::is_member(&members, &who), Error::::AlreadyMember); + // Check sender can vouch. + ensure!(Self::is_member(&members, &voucher), Error::::NotMember); + ensure!(!>::contains_key(&voucher), Error::::AlreadyVouching); + + >::insert(&voucher, VouchingStatus::Vouching); + Self::put_bid(bids, &who, value.clone(), BidKind::Vouch(voucher.clone(), tip)); + Self::deposit_event(RawEvent::Vouch(who, value, voucher)); + Ok(()) + } + + /// As a vouching member, unvouch a bid. This only works while vouched user is + /// only a bidder (and not a candidate). + /// + /// The dispatch origin for this call must be _Signed_ and a vouching member. + /// + /// Parameters: + /// - `pos`: Position in the `Bids` vector of the bid who should be unvouched. + /// + /// # + /// Key: B (len of bids) + /// - One storage read O(1) to check the signer is a vouching member. + /// - One storage mutate to retrieve and update the bids. O(B) + /// - One vouching storage removal. O(1) + /// - One event. + /// + /// Total Complexity: O(B) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(20_000_000)] + pub fn unvouch(origin, pos: u32) -> DispatchResult { + let voucher = ensure_signed(origin)?; + ensure!(Self::vouching(&voucher) == Some(VouchingStatus::Vouching), Error::::NotVouching); + + let pos = pos as usize; + >::mutate(|b| + if pos < b.len() { + b[pos].kind.check_voucher(&voucher)?; + >::remove(&voucher); + let who = b.remove(pos).who; + Self::deposit_event(RawEvent::Unvouch(who)); + Ok(()) + } else { + Err(Error::::BadPosition)? + } + ) + } + + /// As a member, vote on a candidate. + /// + /// The dispatch origin for this call must be _Signed_ and a member. + /// + /// Parameters: + /// - `candidate`: The candidate that the member would like to bid on. + /// - `approve`: A boolean which says if the candidate should be + /// approved (`true`) or rejected (`false`). + /// + /// # + /// Key: C (len of candidates), M (len of members) + /// - One storage read O(M) and O(log M) search to check user is a member. + /// - One account lookup. + /// - One storage read O(C) and O(C) search to check that user is a candidate. + /// - One storage write to add vote to votes. O(1) + /// - One event. + /// + /// Total Complexity: O(M + logM + C) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] + pub fn vote(origin, candidate: ::Source, approve: bool) { + let voter = ensure_signed(origin)?; + let candidate = T::Lookup::lookup(candidate)?; + let candidates = >::get(); + ensure!(Self::is_candidate(&candidates, &candidate), Error::::NotCandidate); + let members = >::get(); + ensure!(Self::is_member(&members, &voter), Error::::NotMember); + + let vote = if approve { Vote::Approve } else { Vote::Reject }; + >::insert(&candidate, &voter, vote); + + Self::deposit_event(RawEvent::Vote(candidate, voter, approve)); + } + + /// As a member, vote on the defender. + /// + /// The dispatch origin for this call must be _Signed_ and a member. + /// + /// Parameters: + /// - `approve`: A boolean which says if the candidate should be + /// approved (`true`) or rejected (`false`). + /// + /// # + /// - Key: M (len of members) + /// - One storage read O(M) and O(log M) search to check user is a member. + /// - One storage write to add vote to votes. O(1) + /// - One event. + /// + /// Total Complexity: O(M + logM) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(20_000_000)] + pub fn defender_vote(origin, approve: bool) { + let voter = ensure_signed(origin)?; + let members = >::get(); + ensure!(Self::is_member(&members, &voter), Error::::NotMember); + + let vote = if approve { Vote::Approve } else { Vote::Reject }; + >::insert(&voter, vote); + + Self::deposit_event(RawEvent::DefenderVote(voter, approve)); + } + + /// Transfer the first matured payout for the sender and remove it from the records. + /// + /// NOTE: This extrinsic needs to be called multiple times to claim multiple matured payouts. + /// + /// Payment: The member will receive a payment equal to their first matured + /// payout to their free balance. + /// + /// The dispatch origin for this call must be _Signed_ and a member with + /// payouts remaining. + /// + /// # + /// Key: M (len of members), P (number of payouts for a particular member) + /// - One storage read O(M) and O(log M) search to check signer is a member. + /// - One storage read O(P) to get all payouts for a member. + /// - One storage read O(1) to get the current block number. + /// - One currency transfer call. O(X) + /// - One storage write or removal to update the member's payouts. O(P) + /// + /// Total Complexity: O(M + logM + P + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] + pub fn payout(origin) { + let who = ensure_signed(origin)?; + + let members = >::get(); + ensure!(Self::is_member(&members, &who), Error::::NotMember); + + let mut payouts = >::get(&who); + if let Some((when, amount)) = payouts.first() { + if when <= &>::block_number() { + T::Currency::transfer(&Self::payouts(), &who, *amount, AllowDeath)?; + payouts.remove(0); + if payouts.is_empty() { + >::remove(&who); + } else { + >::insert(&who, payouts); + } + return Ok(()) + } + } + Err(Error::::NoPayout)? + } + + /// Found the society. + /// + /// This is done as a discrete action in order to allow for the + /// module to be included into a running chain and can only be done once. + /// + /// The dispatch origin for this call must be from the _FounderSetOrigin_. + /// + /// Parameters: + /// - `founder` - The first member and head of the newly founded society. + /// - `max_members` - The initial max number of members for the society. + /// - `rules` - The rules of this society concerning membership. + /// + /// # + /// - Two storage mutates to set `Head` and `Founder`. O(1) + /// - One storage write to add the first member to society. O(1) + /// - One event. + /// + /// Total Complexity: O(1) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn found(origin, founder: T::AccountId, max_members: u32, rules: Vec) { + T::FounderSetOrigin::ensure_origin(origin)?; + ensure!(!>::exists(), Error::::AlreadyFounded); + ensure!(max_members > 1, Error::::MaxMembers); + // This should never fail in the context of this function... + >::put(max_members); + Self::add_member(&founder)?; + >::put(&founder); + >::put(&founder); + Rules::::put(T::Hashing::hash(&rules)); + Self::deposit_event(RawEvent::Founded(founder)); + } + + /// Annul the founding of the society. + /// + /// The dispatch origin for this call must be Signed, and the signing account must be both + /// the `Founder` and the `Head`. This implies that it may only be done when there is one + /// member. + /// + /// # + /// - Two storage reads O(1). + /// - Four storage removals O(1). + /// - One event. + /// + /// Total Complexity: O(1) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(20_000_000)] + fn unfound(origin) { + let founder = ensure_signed(origin)?; + ensure!(Founder::::get() == Some(founder.clone()), Error::::NotFounder); + ensure!(Head::::get() == Some(founder.clone()), Error::::NotHead); + + Members::::kill(); + Head::::kill(); + Founder::::kill(); + Rules::::kill(); + Candidates::::kill(); + SuspendedCandidates::::remove_all(); + Self::deposit_event(RawEvent::Unfounded(founder)); + } + + /// Allow suspension judgement origin to make judgement on a suspended member. + /// + /// If a suspended member is forgiven, we simply add them back as a member, not affecting + /// any of the existing storage items for that member. + /// + /// If a suspended member is rejected, remove all associated storage items, including + /// their payouts, and remove any vouched bids they currently have. + /// + /// The dispatch origin for this call must be from the _SuspensionJudgementOrigin_. + /// + /// Parameters: + /// - `who` - The suspended member to be judged. + /// - `forgive` - A boolean representing whether the suspension judgement origin + /// forgives (`true`) or rejects (`false`) a suspended member. + /// + /// # + /// Key: B (len of bids), M (len of members) + /// - One storage read to check `who` is a suspended member. O(1) + /// - Up to one storage write O(M) with O(log M) binary search to add a member back to society. + /// - Up to 3 storage removals O(1) to clean up a removed member. + /// - Up to one storage write O(B) with O(B) search to remove vouched bid from bids. + /// - Up to one additional event if unvouch takes place. + /// - One storage removal. O(1) + /// - One event for the judgement. + /// + /// Total Complexity: O(M + logM + B) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(30_000_000)] + fn judge_suspended_member(origin, who: T::AccountId, forgive: bool) { + T::SuspensionJudgementOrigin::ensure_origin(origin)?; + ensure!(>::contains_key(&who), Error::::NotSuspended); + + if forgive { + // Try to add member back to society. Can fail with `MaxMembers` limit. + Self::add_member(&who)?; + } else { + // Cancel a suspended member's membership, remove their payouts. + >::remove(&who); + >::remove(&who); + // Remove their vouching status, potentially unbanning them in the future. + if >::take(&who) == Some(VouchingStatus::Vouching) { + // Try to remove their bid if they are vouching. + // If their vouch is already a candidate, do nothing. + >::mutate(|bids| + // Try to find the matching bid + if let Some(pos) = bids.iter().position(|b| b.kind.check_voucher(&who).is_ok()) { + // Remove the bid, and emit an event + let vouched = bids.remove(pos).who; + Self::deposit_event(RawEvent::Unvouch(vouched)); + } + ); + } + } + + >::remove(&who); + Self::deposit_event(RawEvent::SuspendedMemberJudgement(who, forgive)); + } + + /// Allow suspended judgement origin to make judgement on a suspended candidate. + /// + /// If the judgement is `Approve`, we add them to society as a member with the appropriate + /// payment for joining society. + /// + /// If the judgement is `Reject`, we either slash the deposit of the bid, giving it back + /// to the society treasury, or we ban the voucher from vouching again. + /// + /// If the judgement is `Rebid`, we put the candidate back in the bid pool and let them go + /// through the induction process again. + /// + /// The dispatch origin for this call must be from the _SuspensionJudgementOrigin_. + /// + /// Parameters: + /// - `who` - The suspended candidate to be judged. + /// - `judgement` - `Approve`, `Reject`, or `Rebid`. + /// + /// # + /// Key: B (len of bids), M (len of members), X (balance action) + /// - One storage read to check `who` is a suspended candidate. + /// - One storage removal of the suspended candidate. + /// - Approve Logic + /// - One storage read to get the available pot to pay users with. O(1) + /// - One storage write to update the available pot. O(1) + /// - One storage read to get the current block number. O(1) + /// - One storage read to get all members. O(M) + /// - Up to one unreserve currency action. + /// - Up to two new storage writes to payouts. + /// - Up to one storage write with O(log M) binary search to add a member to society. + /// - Reject Logic + /// - Up to one repatriate reserved currency action. O(X) + /// - Up to one storage write to ban the vouching member from vouching again. + /// - Rebid Logic + /// - Storage mutate with O(log B) binary search to place the user back into bids. + /// - Up to one additional event if unvouch takes place. + /// - One storage removal. + /// - One event for the judgement. + /// + /// Total Complexity: O(M + logM + B + X) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn judge_suspended_candidate(origin, who: T::AccountId, judgement: Judgement) { + T::SuspensionJudgementOrigin::ensure_origin(origin)?; + if let Some((value, kind)) = >::get(&who) { + match judgement { + Judgement::Approve => { + // Suspension Judgement origin has approved this candidate + // Make sure we can pay them + let pot = Self::pot(); + ensure!(pot >= value, Error::::InsufficientPot); + // Try to add user as a member! Can fail with `MaxMember` limit. + Self::add_member(&who)?; + // Reduce next pot by payout + >::put(pot - value); + // Add payout for new candidate + let maturity = >::block_number() + + Self::lock_duration(Self::members().len() as u32); + Self::pay_accepted_candidate(&who, value, kind, maturity); + } + Judgement::Reject => { + // Founder has rejected this candidate + match kind { + BidKind::Deposit(deposit) => { + // Slash deposit and move it to the society account + let _ = T::Currency::repatriate_reserved(&who, &Self::account_id(), deposit, BalanceStatus::Free); + } + BidKind::Vouch(voucher, _) => { + // Ban the voucher from vouching again + >::insert(&voucher, VouchingStatus::Banned); + } + } + } + Judgement::Rebid => { + // Founder has taken no judgement, and candidate is placed back into the pool. + let bids = >::get(); + Self::put_bid(bids, &who, value, kind); + } + } + + // Remove suspended candidate + >::remove(who); + } else { + Err(Error::::NotSuspended)? + } + } + + /// Allows root origin to change the maximum number of members in society. + /// Max membership count must be greater than 1. + /// + /// The dispatch origin for this call must be from _ROOT_. + /// + /// Parameters: + /// - `max` - The maximum number of members for the society. + /// + /// # + /// - One storage write to update the max. O(1) + /// - One event. + /// + /// Total Complexity: O(1) + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn set_max_members(origin, max: u32) { + ensure_root(origin)?; + ensure!(max > 1, Error::::MaxMembers); + MaxMembers::::put(max); + Self::deposit_event(RawEvent::NewMaxMembers(max)); + } + + fn on_initialize(n: T::BlockNumber) -> Weight { + let mut members = vec![]; + + // Run a candidate/membership rotation + if (n % T::RotationPeriod::get()).is_zero() { + members = >::get(); + Self::rotate_period(&mut members); + } + + // Run a challenge rotation + if (n % T::ChallengePeriod::get()).is_zero() { + // Only read members if not already read. + if members.is_empty() { + members = >::get(); + } + Self::rotate_challenge(&mut members); + } + + MINIMUM_WEIGHT + } + } } decl_error! { - /// Errors for this module. - pub enum Error for Module, I: Instance> { - /// An incorrect position was provided. - BadPosition, - /// User is not a member. - NotMember, - /// User is already a member. - AlreadyMember, - /// User is suspended. - Suspended, - /// User is not suspended. - NotSuspended, - /// Nothing to payout. - NoPayout, - /// Society already founded. - AlreadyFounded, - /// Not enough in pot to accept candidate. - InsufficientPot, - /// Member is already vouching or banned from vouching again. - AlreadyVouching, - /// Member is not vouching. - NotVouching, - /// Cannot remove the head of the chain. - Head, - /// Cannot remove the founder. - Founder, - /// User has already made a bid. - AlreadyBid, - /// User is already a candidate. - AlreadyCandidate, - /// User is not a candidate. - NotCandidate, - /// Too many members in the society. - MaxMembers, - /// The caller is not the founder. - NotFounder, - /// The caller is not the head. - NotHead, - } + /// Errors for this module. + pub enum Error for Module, I: Instance> { + /// An incorrect position was provided. + BadPosition, + /// User is not a member. + NotMember, + /// User is already a member. + AlreadyMember, + /// User is suspended. + Suspended, + /// User is not suspended. + NotSuspended, + /// Nothing to payout. + NoPayout, + /// Society already founded. + AlreadyFounded, + /// Not enough in pot to accept candidate. + InsufficientPot, + /// Member is already vouching or banned from vouching again. + AlreadyVouching, + /// Member is not vouching. + NotVouching, + /// Cannot remove the head of the chain. + Head, + /// Cannot remove the founder. + Founder, + /// User has already made a bid. + AlreadyBid, + /// User is already a candidate. + AlreadyCandidate, + /// User is not a candidate. + NotCandidate, + /// Too many members in the society. + MaxMembers, + /// The caller is not the founder. + NotFounder, + /// The caller is not the head. + NotHead, + } } decl_event! { - /// Events for this module. - pub enum Event where - AccountId = ::AccountId, - Balance = BalanceOf - { - /// The society is founded by the given identity. - Founded(AccountId), - /// A membership bid just happened. The given account is the candidate's ID and their offer - /// is the second. - Bid(AccountId, Balance), - /// A membership bid just happened by vouching. The given account is the candidate's ID and - /// their offer is the second. The vouching party is the third. - Vouch(AccountId, Balance, AccountId), - /// A candidate was dropped (due to an excess of bids in the system). - AutoUnbid(AccountId), - /// A candidate was dropped (by their request). - Unbid(AccountId), - /// A candidate was dropped (by request of who vouched for them). - Unvouch(AccountId), - /// A group of candidates have been inducted. The batch's primary is the first value, the - /// batch in full is the second. - Inducted(AccountId, Vec), - /// A suspended member has been judged - SuspendedMemberJudgement(AccountId, bool), - /// A candidate has been suspended - CandidateSuspended(AccountId), - /// A member has been suspended - MemberSuspended(AccountId), - /// A member has been challenged - Challenged(AccountId), - /// A vote has been placed (candidate, voter, vote) - Vote(AccountId, AccountId, bool), - /// A vote has been placed for a defending member (voter, vote) - DefenderVote(AccountId, bool), - /// A new max member count has been set - NewMaxMembers(u32), - /// Society is unfounded. - Unfounded(AccountId), - } + /// Events for this module. + pub enum Event where + AccountId = ::AccountId, + Balance = BalanceOf + { + /// The society is founded by the given identity. + Founded(AccountId), + /// A membership bid just happened. The given account is the candidate's ID and their offer + /// is the second. + Bid(AccountId, Balance), + /// A membership bid just happened by vouching. The given account is the candidate's ID and + /// their offer is the second. The vouching party is the third. + Vouch(AccountId, Balance, AccountId), + /// A candidate was dropped (due to an excess of bids in the system). + AutoUnbid(AccountId), + /// A candidate was dropped (by their request). + Unbid(AccountId), + /// A candidate was dropped (by request of who vouched for them). + Unvouch(AccountId), + /// A group of candidates have been inducted. The batch's primary is the first value, the + /// batch in full is the second. + Inducted(AccountId, Vec), + /// A suspended member has been judged + SuspendedMemberJudgement(AccountId, bool), + /// A candidate has been suspended + CandidateSuspended(AccountId), + /// A member has been suspended + MemberSuspended(AccountId), + /// A member has been challenged + Challenged(AccountId), + /// A vote has been placed (candidate, voter, vote) + Vote(AccountId, AccountId, bool), + /// A vote has been placed for a defending member (voter, vote) + DefenderVote(AccountId, bool), + /// A new max member count has been set + NewMaxMembers(u32), + /// Society is unfounded. + Unfounded(AccountId), + } } /// Simple ensure origin struct to filter for the founder account. pub struct EnsureFounder(sp_std::marker::PhantomData); impl EnsureOrigin for EnsureFounder { - type Success = T::AccountId; - fn try_origin(o: T::Origin) -> Result { - o.into().and_then(|o| match (o, Founder::::get()) { - (system::RawOrigin::Signed(ref who), Some(ref f)) if who == f => Ok(who.clone()), - (r, _) => Err(T::Origin::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> T::Origin { - let founder = Founder::::get().expect("society founder should exist"); - T::Origin::from(system::RawOrigin::Signed(founder)) - } + type Success = T::AccountId; + fn try_origin(o: T::Origin) -> Result { + o.into().and_then(|o| match (o, Founder::::get()) { + (system::RawOrigin::Signed(ref who), Some(ref f)) if who == f => Ok(who.clone()), + (r, _) => Err(T::Origin::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> T::Origin { + let founder = Founder::::get().expect("society founder should exist"); + T::Origin::from(system::RawOrigin::Signed(founder)) + } } /// Pick an item at pseudo-random from the slice, given the `rng`. `None` iff the slice is empty. fn pick_item<'a, R: RngCore, T>(rng: &mut R, items: &'a [T]) -> Option<&'a T> { - if items.is_empty() { - None - } else { - Some(&items[pick_usize(rng, items.len() - 1)]) - } + if items.is_empty() { + None + } else { + Some(&items[pick_usize(rng, items.len() - 1)]) + } } /// Pick a new PRN, in the range [0, `max`] (inclusive). fn pick_usize<'a, R: RngCore>(rng: &mut R, max: usize) -> usize { - - (rng.next_u32() % (max as u32 + 1)) as usize + (rng.next_u32() % (max as u32 + 1)) as usize } impl, I: Instance> Module { - /// Puts a bid into storage ordered by smallest to largest value. - /// Allows a maximum of 1000 bids in queue, removing largest value people first. - fn put_bid( - mut bids: Vec>>, - who: &T::AccountId, - value: BalanceOf, - bid_kind: BidKind> - ) { - const MAX_BID_COUNT: usize = 1000; - - match bids.binary_search_by(|bid| bid.value.cmp(&value)) { - // Insert new elements after the existing ones. This ensures new bids - // with the same bid value are further down the list than existing ones. - Ok(pos) => { - let different_bid = bids.iter() - // Easily extract the index we are on - .enumerate() - // Skip ahead to the suggested position - .skip(pos) - // Keep skipping ahead until the position changes - .skip_while(|(_, x)| x.value <= bids[pos].value) - // Get the element when things changed - .next(); - // If the element is not at the end of the list, insert the new element - // in the spot. - if let Some((p, _)) = different_bid { - bids.insert(p, Bid { - value, - who: who.clone(), - kind: bid_kind, - }); - // If the element is at the end of the list, push the element on the end. - } else { - bids.push(Bid { - value, - who: who.clone(), - kind: bid_kind, - }); - } - }, - Err(pos) => bids.insert(pos, Bid { - value, - who: who.clone(), - kind: bid_kind, - }), - } - // Keep it reasonably small. - if bids.len() > MAX_BID_COUNT { - let Bid { who: popped, kind, .. } = bids.pop().expect("b.len() > 1000; qed"); - match kind { - BidKind::Deposit(deposit) => { - let _ = T::Currency::unreserve(&popped, deposit); - } - BidKind::Vouch(voucher, _) => { - >::remove(&voucher); - } - } - Self::deposit_event(RawEvent::AutoUnbid(popped)); - } - - >::put(bids); - } - - /// Check a user is a bid. - fn is_bid(bids: &Vec>>, who: &T::AccountId) -> bool { - // Bids are ordered by `value`, so we cannot binary search for a user. - bids.iter().find(|bid| bid.who == *who).is_some() - } - - /// Check a user is a candidate. - fn is_candidate(candidates: &Vec>>, who: &T::AccountId) -> bool { - // Looking up a candidate is the same as looking up a bid - Self::is_bid(candidates, who) - } - - /// Check a user is a member. - fn is_member(members: &Vec, who: &T::AccountId) -> bool { - members.binary_search(who).is_ok() - } - - /// Add a member to the sorted members list. If the user is already a member, do nothing. - /// Can fail when `MaxMember` limit is reached, but has no side-effects. - fn add_member(who: &T::AccountId) -> DispatchResult { - let mut members = >::get(); - ensure!(members.len() < MaxMembers::::get() as usize, Error::::MaxMembers); - match members.binary_search(who) { - // Add the new member - Err(i) => { - members.insert(i, who.clone()); - T::MembershipChanged::change_members_sorted(&[who.clone()], &[], &members); - >::put(members); - Ok(()) - }, - // User is already a member, do nothing. - Ok(_) => Ok(()), - } - } - - /// Remove a member from the members list, except the Head. - /// - /// NOTE: This does not correctly clean up a member from storage. It simply - /// removes them from the Members storage item. - pub fn remove_member(m: &T::AccountId) -> DispatchResult { - ensure!(Self::head() != Some(m.clone()), Error::::Head); - ensure!(Self::founder() != Some(m.clone()), Error::::Founder); - - let mut members = >::get(); - match members.binary_search(&m) { - Err(_) => Err(Error::::NotMember)?, - Ok(i) => { - members.remove(i); - T::MembershipChanged::change_members_sorted(&[], &[m.clone()], &members[..]); - >::put(members); - Ok(()) - } - } - } - - /// End the current period and begin a new one. - fn rotate_period(members: &mut Vec) { - let phrase = b"society_rotation"; - - let mut pot = >::get(); - - // we'll need a random seed here. - let seed = T::Randomness::random(phrase); - // seed needs to be guaranteed to be 32 bytes. - let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) - .expect("input is padded with zeroes; qed"); - let mut rng = ChaChaRng::from_seed(seed); - - // we assume there's at least one member or this logic won't work. - if !members.is_empty() { - let candidates = >::take(); - // NOTE: This may cause member length to surpass `MaxMembers`, but results in no consensus - // critical issues or side-effects. This is auto-correcting as members fall out of society. - members.reserve(candidates.len()); - - let maturity = >::block_number() - + Self::lock_duration(members.len() as u32); - - let mut rewardees = Vec::new(); - let mut total_approvals = 0; - let mut total_slash = >::zero(); - let mut total_payouts = >::zero(); - - let accepted = candidates.into_iter().filter_map(|Bid {value, who: candidate, kind }| { - let mut approval_count = 0; - - // Creates a vector of (vote, member) for the given candidate - // and tallies total number of approve votes for that candidate. - let votes = members.iter() - .filter_map(|m| >::take(&candidate, m).map(|v| (v, m))) - .inspect(|&(v, _)| if v == Vote::Approve { approval_count += 1 }) - .collect::>(); - - // Select one of the votes at random. - // Note that `Vote::Skeptical` and `Vote::Reject` both reject the candidate. - let is_accepted = pick_item(&mut rng, &votes).map(|x| x.0) == Some(Vote::Approve); - - let matching_vote = if is_accepted { Vote::Approve } else { Vote::Reject }; - - let bad_vote = |m: &T::AccountId| { - // Voter voted wrong way (or was just a lazy skeptic) then reduce their payout - // and increase their strikes. after MaxStrikes then they go into suspension. - let amount = Self::slash_payout(m, T::WrongSideDeduction::get()); - - let strikes = >::mutate(m, |s| { - *s += 1; - *s - }); - if strikes >= T::MaxStrikes::get() { - Self::suspend_member(m); - } - amount - }; - - // Collect the voters who had a matching vote. - rewardees.extend(votes.into_iter() - .filter_map(|(v, m)| - if v == matching_vote { Some(m) } else { - total_slash += bad_vote(m); - None - } - ).cloned() - ); - - if is_accepted { - total_approvals += approval_count; - total_payouts += value; - members.push(candidate.clone()); - - Self::pay_accepted_candidate(&candidate, value, kind, maturity); - - // We track here the total_approvals so that every candidate has a unique range - // of numbers from 0 to `total_approvals` with length `approval_count` so each - // candidate is proportionally represented when selecting a "primary" below. - Some((candidate, total_approvals, value)) - } else { - // Suspend Candidate - >::insert(&candidate, (value, kind)); - Self::deposit_event(RawEvent::CandidateSuspended(candidate)); - None - } - }).collect::>(); - - // Clean up all votes. - >::remove_all(); - - // Reward one of the voters who voted the right way. - if !total_slash.is_zero() { - if let Some(winner) = pick_item(&mut rng, &rewardees) { - // If we can't reward them, not much that can be done. - Self::bump_payout(winner, maturity, total_slash); - } else { - // Move the slashed amount back from payouts account to local treasury. - let _ = T::Currency::transfer(&Self::payouts(), &Self::account_id(), total_slash, AllowDeath); - } - } - - // Fund the total payouts from the local treasury. - if !total_payouts.is_zero() { - // remove payout from pot and shift needed funds to the payout account. - pot = pot.saturating_sub(total_payouts); - - // this should never fail since we ensure we can afford the payouts in a previous - // block, but there's not much we can do to recover if it fails anyway. - let _ = T::Currency::transfer(&Self::account_id(), &Self::payouts(), total_payouts, AllowDeath); - } - - // if at least one candidate was accepted... - if !accepted.is_empty() { - // select one as primary, randomly chosen from the accepted, weighted by approvals. - // Choose a random number between 0 and `total_approvals` - let primary_point = pick_usize(&mut rng, total_approvals - 1); - // Find the zero bid or the user who falls on that point - let primary = accepted.iter().find(|e| e.2.is_zero() || e.1 > primary_point) - .expect("e.1 of final item == total_approvals; \ - worst case find will always return that item; qed") - .0.clone(); - - let accounts = accepted.into_iter().map(|x| x.0).collect::>(); - - // Then write everything back out, signal the changed membership and leave an event. - members.sort(); - // NOTE: This may cause member length to surpass `MaxMembers`, but results in no consensus - // critical issues or side-effects. This is auto-correcting as members fall out of society. - >::put(&members[..]); - >::put(&primary); - - T::MembershipChanged::change_members_sorted(&accounts, &[], &members); - Self::deposit_event(RawEvent::Inducted(primary, accounts)); - } - - // Bump the pot by at most PeriodSpend, but less if there's not very much left in our - // account. - let unaccounted = T::Currency::free_balance(&Self::account_id()).saturating_sub(pot); - pot += T::PeriodSpend::get().min(unaccounted / 2u8.into()); - - >::put(&pot); - } - - // Setup the candidates for the new intake - let candidates = Self::take_selected(members.len(), pot); - >::put(&candidates); - - // Select sqrt(n) random members from the society and make them skeptics. - let pick_member = |_| pick_item(&mut rng, &members[..]).expect("exited if members empty; qed"); - for skeptic in (0..members.len().integer_sqrt()).map(pick_member) { - for Bid{ who: c, .. } in candidates.iter() { - >::insert(c, skeptic, Vote::Skeptic); - } - } - } - - /// Attempt to slash the payout of some member. Return the total amount that was deducted. - fn slash_payout(who: &T::AccountId, value: BalanceOf) -> BalanceOf { - let mut rest = value; - let mut payouts = >::get(who); - if !payouts.is_empty() { - let mut dropped = 0; - for (_, amount) in payouts.iter_mut() { - if let Some(new_rest) = rest.checked_sub(&amount) { - // not yet totally slashed after this one; drop it completely. - rest = new_rest; - dropped += 1; - } else { - // whole slash is accounted for. - *amount -= rest; - rest = Zero::zero(); - break; - } - } - >::insert(who, &payouts[dropped..]); - } - value - rest - } - - /// Bump the payout amount of `who`, to be unlocked at the given block number. - fn bump_payout(who: &T::AccountId, when: T::BlockNumber, value: BalanceOf) { - if !value.is_zero(){ - >::mutate(who, |payouts| match payouts.binary_search_by_key(&when, |x| x.0) { - Ok(index) => payouts[index].1 += value, - Err(index) => payouts.insert(index, (when, value)), - }); - } - } - - /// Suspend a user, removing them from the member list. - fn suspend_member(who: &T::AccountId) { - if Self::remove_member(&who).is_ok() { - >::insert(who, true); - >::remove(who); - Self::deposit_event(RawEvent::MemberSuspended(who.clone())); - } - } - - /// Pay an accepted candidate their bid value. - fn pay_accepted_candidate( - candidate: &T::AccountId, - value: BalanceOf, - kind: BidKind>, - maturity: T::BlockNumber, - ) { - let value = match kind { - BidKind::Deposit(deposit) => { - // In the case that a normal deposit bid is accepted we unreserve - // the deposit. - let _ = T::Currency::unreserve(candidate, deposit); - value - } - BidKind::Vouch(voucher, tip) => { - // Check that the voucher is still vouching, else some other logic may have removed their status. - if >::take(&voucher) == Some(VouchingStatus::Vouching) { - // In the case that a vouched-for bid is accepted we unset the - // vouching status and transfer the tip over to the voucher. - Self::bump_payout(&voucher, maturity, tip.min(value)); - value.saturating_sub(tip) - } else { - value - } - } - }; - - Self::bump_payout(candidate, maturity, value); - } - - /// End the current challenge period and start a new one. - fn rotate_challenge(members: &mut Vec) { - // Assume there are members, else don't run this logic. - if !members.is_empty() { - // End current defender rotation - if let Some(defender) = Self::defender() { - let mut approval_count = 0; - let mut rejection_count = 0; - // Tallies total number of approve and reject votes for the defender. - members.iter() - .filter_map(|m| >::take(m)) - .for_each(|v| { - match v { - Vote::Approve => approval_count += 1, - _ => rejection_count += 1, - } - }); - - if approval_count <= rejection_count { - // User has failed the challenge - Self::suspend_member(&defender); - *members = Self::members(); - } - - // Clean up all votes. - >::remove_all(); - } - - // Avoid challenging if there's only two members since we never challenge the Head or - // the Founder. - if members.len() > 2 { - // Start a new defender rotation - let phrase = b"society_challenge"; - // we'll need a random seed here. - let seed = T::Randomness::random(phrase); - // seed needs to be guaranteed to be 32 bytes. - let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) - .expect("input is padded with zeroes; qed"); - let mut rng = ChaChaRng::from_seed(seed); - let chosen = pick_item(&mut rng, &members[1..members.len() - 1]) - .expect("exited if members empty; qed"); - >::put(&chosen); - Self::deposit_event(RawEvent::Challenged(chosen.clone())); - } else { - >::kill(); - } - } - } - - /// The account ID of the treasury pot. - /// - /// This actually does computation. If you need to keep using it, then make sure you cache the - /// value and only call this once. - pub fn account_id() -> T::AccountId { - MODULE_ID.into_account() - } - - /// The account ID of the payouts pot. This is where payouts are made from. - /// - /// This actually does computation. If you need to keep using it, then make sure you cache the - /// value and only call this once. - pub fn payouts() -> T::AccountId { - MODULE_ID.into_sub_account(b"payouts") - } - - /// Return the duration of the lock, in blocks, with the given number of members. - /// - /// This is a rather opaque calculation based on the formula here: - /// https://www.desmos.com/calculator/9itkal1tce - fn lock_duration(x: u32) -> T::BlockNumber { - let lock_pc = 100 - 50_000 / (x + 500); - Percent::from_percent(lock_pc as u8) * T::MaxLockDuration::get() - } - - /// Get a selection of bidding accounts such that the total bids is no greater than `Pot` and - /// the number of bids would not surpass `MaxMembers` if all were accepted. - /// - /// May be empty. - pub fn take_selected( - members_len: usize, - pot: BalanceOf - ) -> Vec>> { - let max_members = MaxMembers::::get() as usize; - // No more than 10 will be returned. - let mut max_selections: usize = 10.min(max_members.saturating_sub(members_len)); - - if max_selections > 0 { - // Get the number of left-most bidders whose bids add up to less than `pot`. - let mut bids = >::get(); - - // The list of selected candidates - let mut selected = Vec::new(); - - if bids.len() > 0 { - // Can only select at most the length of bids - max_selections = max_selections.min(bids.len()); - // Number of selected bids so far - let mut count = 0; - // Check if we have already selected a candidate with zero bid - let mut zero_selected = false; - // A running total of the cost to onboard these bids - let mut total_cost: BalanceOf = Zero::zero(); - - bids.retain(|bid| { - if count < max_selections { - // Handle zero bids. We only want one of them. - if bid.value.is_zero() { - // Select only the first zero bid - if !zero_selected { - selected.push(bid.clone()); - zero_selected = true; - count += 1; - return false - } - } else { - total_cost += bid.value; - // Select only as many users as the pot can support. - if total_cost <= pot { - selected.push(bid.clone()); - count += 1; - return false - } - } - } - true - }); - - // No need to reset Bids if we're not taking anything. - if count > 0 { - >::put(bids); - } - } - selected - } else { - vec![] - } - } + /// Puts a bid into storage ordered by smallest to largest value. + /// Allows a maximum of 1000 bids in queue, removing largest value people first. + fn put_bid( + mut bids: Vec>>, + who: &T::AccountId, + value: BalanceOf, + bid_kind: BidKind>, + ) { + const MAX_BID_COUNT: usize = 1000; + + match bids.binary_search_by(|bid| bid.value.cmp(&value)) { + // Insert new elements after the existing ones. This ensures new bids + // with the same bid value are further down the list than existing ones. + Ok(pos) => { + let different_bid = bids + .iter() + // Easily extract the index we are on + .enumerate() + // Skip ahead to the suggested position + .skip(pos) + // Keep skipping ahead until the position changes + .skip_while(|(_, x)| x.value <= bids[pos].value) + // Get the element when things changed + .next(); + // If the element is not at the end of the list, insert the new element + // in the spot. + if let Some((p, _)) = different_bid { + bids.insert( + p, + Bid { + value, + who: who.clone(), + kind: bid_kind, + }, + ); + // If the element is at the end of the list, push the element on the end. + } else { + bids.push(Bid { + value, + who: who.clone(), + kind: bid_kind, + }); + } + } + Err(pos) => bids.insert( + pos, + Bid { + value, + who: who.clone(), + kind: bid_kind, + }, + ), + } + // Keep it reasonably small. + if bids.len() > MAX_BID_COUNT { + let Bid { + who: popped, kind, .. + } = bids.pop().expect("b.len() > 1000; qed"); + match kind { + BidKind::Deposit(deposit) => { + let _ = T::Currency::unreserve(&popped, deposit); + } + BidKind::Vouch(voucher, _) => { + >::remove(&voucher); + } + } + Self::deposit_event(RawEvent::AutoUnbid(popped)); + } + + >::put(bids); + } + + /// Check a user is a bid. + fn is_bid(bids: &Vec>>, who: &T::AccountId) -> bool { + // Bids are ordered by `value`, so we cannot binary search for a user. + bids.iter().find(|bid| bid.who == *who).is_some() + } + + /// Check a user is a candidate. + fn is_candidate( + candidates: &Vec>>, + who: &T::AccountId, + ) -> bool { + // Looking up a candidate is the same as looking up a bid + Self::is_bid(candidates, who) + } + + /// Check a user is a member. + fn is_member(members: &Vec, who: &T::AccountId) -> bool { + members.binary_search(who).is_ok() + } + + /// Add a member to the sorted members list. If the user is already a member, do nothing. + /// Can fail when `MaxMember` limit is reached, but has no side-effects. + fn add_member(who: &T::AccountId) -> DispatchResult { + let mut members = >::get(); + ensure!( + members.len() < MaxMembers::::get() as usize, + Error::::MaxMembers + ); + match members.binary_search(who) { + // Add the new member + Err(i) => { + members.insert(i, who.clone()); + T::MembershipChanged::change_members_sorted(&[who.clone()], &[], &members); + >::put(members); + Ok(()) + } + // User is already a member, do nothing. + Ok(_) => Ok(()), + } + } + + /// Remove a member from the members list, except the Head. + /// + /// NOTE: This does not correctly clean up a member from storage. It simply + /// removes them from the Members storage item. + pub fn remove_member(m: &T::AccountId) -> DispatchResult { + ensure!(Self::head() != Some(m.clone()), Error::::Head); + ensure!(Self::founder() != Some(m.clone()), Error::::Founder); + + let mut members = >::get(); + match members.binary_search(&m) { + Err(_) => Err(Error::::NotMember)?, + Ok(i) => { + members.remove(i); + T::MembershipChanged::change_members_sorted(&[], &[m.clone()], &members[..]); + >::put(members); + Ok(()) + } + } + } + + /// End the current period and begin a new one. + fn rotate_period(members: &mut Vec) { + let phrase = b"society_rotation"; + + let mut pot = >::get(); + + // we'll need a random seed here. + let seed = T::Randomness::random(phrase); + // seed needs to be guaranteed to be 32 bytes. + let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) + .expect("input is padded with zeroes; qed"); + let mut rng = ChaChaRng::from_seed(seed); + + // we assume there's at least one member or this logic won't work. + if !members.is_empty() { + let candidates = >::take(); + // NOTE: This may cause member length to surpass `MaxMembers`, but results in no consensus + // critical issues or side-effects. This is auto-correcting as members fall out of society. + members.reserve(candidates.len()); + + let maturity = + >::block_number() + Self::lock_duration(members.len() as u32); + + let mut rewardees = Vec::new(); + let mut total_approvals = 0; + let mut total_slash = >::zero(); + let mut total_payouts = >::zero(); + + let accepted = candidates + .into_iter() + .filter_map( + |Bid { + value, + who: candidate, + kind, + }| { + let mut approval_count = 0; + + // Creates a vector of (vote, member) for the given candidate + // and tallies total number of approve votes for that candidate. + let votes = members + .iter() + .filter_map(|m| >::take(&candidate, m).map(|v| (v, m))) + .inspect(|&(v, _)| { + if v == Vote::Approve { + approval_count += 1 + } + }) + .collect::>(); + + // Select one of the votes at random. + // Note that `Vote::Skeptical` and `Vote::Reject` both reject the candidate. + let is_accepted = + pick_item(&mut rng, &votes).map(|x| x.0) == Some(Vote::Approve); + + let matching_vote = if is_accepted { + Vote::Approve + } else { + Vote::Reject + }; + + let bad_vote = |m: &T::AccountId| { + // Voter voted wrong way (or was just a lazy skeptic) then reduce their payout + // and increase their strikes. after MaxStrikes then they go into suspension. + let amount = Self::slash_payout(m, T::WrongSideDeduction::get()); + + let strikes = >::mutate(m, |s| { + *s += 1; + *s + }); + if strikes >= T::MaxStrikes::get() { + Self::suspend_member(m); + } + amount + }; + + // Collect the voters who had a matching vote. + rewardees.extend( + votes + .into_iter() + .filter_map(|(v, m)| { + if v == matching_vote { + Some(m) + } else { + total_slash += bad_vote(m); + None + } + }) + .cloned(), + ); + + if is_accepted { + total_approvals += approval_count; + total_payouts += value; + members.push(candidate.clone()); + + Self::pay_accepted_candidate(&candidate, value, kind, maturity); + + // We track here the total_approvals so that every candidate has a unique range + // of numbers from 0 to `total_approvals` with length `approval_count` so each + // candidate is proportionally represented when selecting a "primary" below. + Some((candidate, total_approvals, value)) + } else { + // Suspend Candidate + >::insert(&candidate, (value, kind)); + Self::deposit_event(RawEvent::CandidateSuspended(candidate)); + None + } + }, + ) + .collect::>(); + + // Clean up all votes. + >::remove_all(); + + // Reward one of the voters who voted the right way. + if !total_slash.is_zero() { + if let Some(winner) = pick_item(&mut rng, &rewardees) { + // If we can't reward them, not much that can be done. + Self::bump_payout(winner, maturity, total_slash); + } else { + // Move the slashed amount back from payouts account to local treasury. + let _ = T::Currency::transfer( + &Self::payouts(), + &Self::account_id(), + total_slash, + AllowDeath, + ); + } + } + + // Fund the total payouts from the local treasury. + if !total_payouts.is_zero() { + // remove payout from pot and shift needed funds to the payout account. + pot = pot.saturating_sub(total_payouts); + + // this should never fail since we ensure we can afford the payouts in a previous + // block, but there's not much we can do to recover if it fails anyway. + let _ = T::Currency::transfer( + &Self::account_id(), + &Self::payouts(), + total_payouts, + AllowDeath, + ); + } + + // if at least one candidate was accepted... + if !accepted.is_empty() { + // select one as primary, randomly chosen from the accepted, weighted by approvals. + // Choose a random number between 0 and `total_approvals` + let primary_point = pick_usize(&mut rng, total_approvals - 1); + // Find the zero bid or the user who falls on that point + let primary = accepted + .iter() + .find(|e| e.2.is_zero() || e.1 > primary_point) + .expect( + "e.1 of final item == total_approvals; \ + worst case find will always return that item; qed", + ) + .0 + .clone(); + + let accounts = accepted.into_iter().map(|x| x.0).collect::>(); + + // Then write everything back out, signal the changed membership and leave an event. + members.sort(); + // NOTE: This may cause member length to surpass `MaxMembers`, but results in no consensus + // critical issues or side-effects. This is auto-correcting as members fall out of society. + >::put(&members[..]); + >::put(&primary); + + T::MembershipChanged::change_members_sorted(&accounts, &[], &members); + Self::deposit_event(RawEvent::Inducted(primary, accounts)); + } + + // Bump the pot by at most PeriodSpend, but less if there's not very much left in our + // account. + let unaccounted = T::Currency::free_balance(&Self::account_id()).saturating_sub(pot); + pot += T::PeriodSpend::get().min(unaccounted / 2u8.into()); + + >::put(&pot); + } + + // Setup the candidates for the new intake + let candidates = Self::take_selected(members.len(), pot); + >::put(&candidates); + + // Select sqrt(n) random members from the society and make them skeptics. + let pick_member = + |_| pick_item(&mut rng, &members[..]).expect("exited if members empty; qed"); + for skeptic in (0..members.len().integer_sqrt()).map(pick_member) { + for Bid { who: c, .. } in candidates.iter() { + >::insert(c, skeptic, Vote::Skeptic); + } + } + } + + /// Attempt to slash the payout of some member. Return the total amount that was deducted. + fn slash_payout(who: &T::AccountId, value: BalanceOf) -> BalanceOf { + let mut rest = value; + let mut payouts = >::get(who); + if !payouts.is_empty() { + let mut dropped = 0; + for (_, amount) in payouts.iter_mut() { + if let Some(new_rest) = rest.checked_sub(&amount) { + // not yet totally slashed after this one; drop it completely. + rest = new_rest; + dropped += 1; + } else { + // whole slash is accounted for. + *amount -= rest; + rest = Zero::zero(); + break; + } + } + >::insert(who, &payouts[dropped..]); + } + value - rest + } + + /// Bump the payout amount of `who`, to be unlocked at the given block number. + fn bump_payout(who: &T::AccountId, when: T::BlockNumber, value: BalanceOf) { + if !value.is_zero() { + >::mutate(who, |payouts| { + match payouts.binary_search_by_key(&when, |x| x.0) { + Ok(index) => payouts[index].1 += value, + Err(index) => payouts.insert(index, (when, value)), + } + }); + } + } + + /// Suspend a user, removing them from the member list. + fn suspend_member(who: &T::AccountId) { + if Self::remove_member(&who).is_ok() { + >::insert(who, true); + >::remove(who); + Self::deposit_event(RawEvent::MemberSuspended(who.clone())); + } + } + + /// Pay an accepted candidate their bid value. + fn pay_accepted_candidate( + candidate: &T::AccountId, + value: BalanceOf, + kind: BidKind>, + maturity: T::BlockNumber, + ) { + let value = match kind { + BidKind::Deposit(deposit) => { + // In the case that a normal deposit bid is accepted we unreserve + // the deposit. + let _ = T::Currency::unreserve(candidate, deposit); + value + } + BidKind::Vouch(voucher, tip) => { + // Check that the voucher is still vouching, else some other logic may have removed their status. + if >::take(&voucher) == Some(VouchingStatus::Vouching) { + // In the case that a vouched-for bid is accepted we unset the + // vouching status and transfer the tip over to the voucher. + Self::bump_payout(&voucher, maturity, tip.min(value)); + value.saturating_sub(tip) + } else { + value + } + } + }; + + Self::bump_payout(candidate, maturity, value); + } + + /// End the current challenge period and start a new one. + fn rotate_challenge(members: &mut Vec) { + // Assume there are members, else don't run this logic. + if !members.is_empty() { + // End current defender rotation + if let Some(defender) = Self::defender() { + let mut approval_count = 0; + let mut rejection_count = 0; + // Tallies total number of approve and reject votes for the defender. + members + .iter() + .filter_map(|m| >::take(m)) + .for_each(|v| match v { + Vote::Approve => approval_count += 1, + _ => rejection_count += 1, + }); + + if approval_count <= rejection_count { + // User has failed the challenge + Self::suspend_member(&defender); + *members = Self::members(); + } + + // Clean up all votes. + >::remove_all(); + } + + // Avoid challenging if there's only two members since we never challenge the Head or + // the Founder. + if members.len() > 2 { + // Start a new defender rotation + let phrase = b"society_challenge"; + // we'll need a random seed here. + let seed = T::Randomness::random(phrase); + // seed needs to be guaranteed to be 32 bytes. + let seed = <[u8; 32]>::decode(&mut TrailingZeroInput::new(seed.as_ref())) + .expect("input is padded with zeroes; qed"); + let mut rng = ChaChaRng::from_seed(seed); + let chosen = pick_item(&mut rng, &members[1..members.len() - 1]) + .expect("exited if members empty; qed"); + >::put(&chosen); + Self::deposit_event(RawEvent::Challenged(chosen.clone())); + } else { + >::kill(); + } + } + } + + /// The account ID of the treasury pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { + MODULE_ID.into_account() + } + + /// The account ID of the payouts pot. This is where payouts are made from. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn payouts() -> T::AccountId { + MODULE_ID.into_sub_account(b"payouts") + } + + /// Return the duration of the lock, in blocks, with the given number of members. + /// + /// This is a rather opaque calculation based on the formula here: + /// https://www.desmos.com/calculator/9itkal1tce + fn lock_duration(x: u32) -> T::BlockNumber { + let lock_pc = 100 - 50_000 / (x + 500); + Percent::from_percent(lock_pc as u8) * T::MaxLockDuration::get() + } + + /// Get a selection of bidding accounts such that the total bids is no greater than `Pot` and + /// the number of bids would not surpass `MaxMembers` if all were accepted. + /// + /// May be empty. + pub fn take_selected( + members_len: usize, + pot: BalanceOf, + ) -> Vec>> { + let max_members = MaxMembers::::get() as usize; + // No more than 10 will be returned. + let mut max_selections: usize = 10.min(max_members.saturating_sub(members_len)); + + if max_selections > 0 { + // Get the number of left-most bidders whose bids add up to less than `pot`. + let mut bids = >::get(); + + // The list of selected candidates + let mut selected = Vec::new(); + + if bids.len() > 0 { + // Can only select at most the length of bids + max_selections = max_selections.min(bids.len()); + // Number of selected bids so far + let mut count = 0; + // Check if we have already selected a candidate with zero bid + let mut zero_selected = false; + // A running total of the cost to onboard these bids + let mut total_cost: BalanceOf = Zero::zero(); + + bids.retain(|bid| { + if count < max_selections { + // Handle zero bids. We only want one of them. + if bid.value.is_zero() { + // Select only the first zero bid + if !zero_selected { + selected.push(bid.clone()); + zero_selected = true; + count += 1; + return false; + } + } else { + total_cost += bid.value; + // Select only as many users as the pot can support. + if total_cost <= pot { + selected.push(bid.clone()); + count += 1; + return false; + } + } + } + true + }); + + // No need to reset Bids if we're not taking anything. + if count > 0 { + >::put(bids); + } + } + selected + } else { + vec![] + } + } } diff --git a/frame/society/src/mock.rs b/frame/society/src/mock.rs index a410fdbd04..8262001ae2 100644 --- a/frame/society/src/mock.rs +++ b/frame/society/src/mock.rs @@ -19,20 +19,21 @@ use super::*; use frame_support::{ - impl_outer_origin, parameter_types, ord_parameter_types, traits::{OnInitialize, OnFinalize} + impl_outer_origin, ord_parameter_types, parameter_types, + traits::{OnFinalize, OnInitialize}, }; use sp_core::H256; // The testing primitives are very useful for avoiding having to work with signatures // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. +use frame_system::EnsureSignedBy; use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup}, + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, }; -use frame_system::EnsureSignedBy; impl_outer_origin! { - pub enum Origin for Test {} + pub enum Origin for Test {} } // For testing the pallet, we construct most of a mock runtime. This means @@ -41,72 +42,72 @@ impl_outer_origin! { #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { - pub const CandidateDeposit: u64 = 25; - pub const WrongSideDeduction: u64 = 2; - pub const MaxStrikes: u32 = 2; - pub const RotationPeriod: u64 = 4; - pub const PeriodSpend: u64 = 1000; - pub const MaxLockDuration: u64 = 100; - pub const ChallengePeriod: u64 = 8; - - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: u32 = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - - pub const ExistentialDeposit: u64 = 1; + pub const CandidateDeposit: u64 = 25; + pub const WrongSideDeduction: u64 = 2; + pub const MaxStrikes: u32 = 2; + pub const RotationPeriod: u64 = 4; + pub const PeriodSpend: u64 = 1000; + pub const MaxLockDuration: u64 = 100; + pub const ChallengePeriod: u64 = 8; + + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: u32 = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + + pub const ExistentialDeposit: u64 = 1; } ord_parameter_types! { - pub const FounderSetAccount: u128 = 1; - pub const SuspensionJudgementSetAccount: u128 = 2; + pub const FounderSetAccount: u128 = 1; + pub const SuspensionJudgementSetAccount: u128 = 2; } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u128; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type OnNewAccount = (); - type OnKilledAccount = (); - type AccountData = pallet_balances::AccountData; + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u128; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type OnNewAccount = (); + type OnKilledAccount = (); + type AccountData = pallet_balances::AccountData; } impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } impl Trait for Test { - type Event = (); - type Currency = pallet_balances::Module; - type Randomness = (); - type CandidateDeposit = CandidateDeposit; - type WrongSideDeduction = WrongSideDeduction; - type MaxStrikes = MaxStrikes; - type PeriodSpend = PeriodSpend; - type MembershipChanged = (); - type RotationPeriod = RotationPeriod; - type MaxLockDuration = MaxLockDuration; - type FounderSetOrigin = EnsureSignedBy; - type SuspensionJudgementOrigin = EnsureSignedBy; - type ChallengePeriod = ChallengePeriod; + type Event = (); + type Currency = pallet_balances::Module; + type Randomness = (); + type CandidateDeposit = CandidateDeposit; + type WrongSideDeduction = WrongSideDeduction; + type MaxStrikes = MaxStrikes; + type PeriodSpend = PeriodSpend; + type MembershipChanged = (); + type RotationPeriod = RotationPeriod; + type MaxLockDuration = MaxLockDuration; + type FounderSetOrigin = EnsureSignedBy; + type SuspensionJudgementOrigin = EnsureSignedBy; + type ChallengePeriod = ChallengePeriod; } pub type Society = Module; @@ -114,97 +115,99 @@ pub type System = frame_system::Module; pub type Balances = pallet_balances::Module; pub struct EnvBuilder { - members: Vec, - balance: u64, - balances: Vec<(u128, u64)>, - pot: u64, - max_members: u32, + members: Vec, + balance: u64, + balances: Vec<(u128, u64)>, + pot: u64, + max_members: u32, } impl EnvBuilder { - pub fn new() -> Self { - Self { - members: vec![10], - balance: 10_000, - balances: vec![ - (10, 50), - (20, 50), - (30, 50), - (40, 50), - (50, 50), - (60, 50), - (70, 50), - (80, 50), - (90, 50), - ], - pot: 0, - max_members: 100, - } - } - - pub fn execute R>(mut self, f: F) -> R { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - self.balances.push((Society::account_id(), self.balance.max(self.pot))); - pallet_balances::GenesisConfig:: { - balances: self.balances, - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::{ - members: self.members, - pot: self.pot, - max_members: self.max_members, - }.assimilate_storage(&mut t).unwrap(); - let mut ext: sp_io::TestExternalities = t.into(); - ext.execute_with(f) - } - #[allow(dead_code)] - pub fn with_members(mut self, m: Vec) -> Self { - self.members = m; - self - } - #[allow(dead_code)] - pub fn with_balances(mut self, b: Vec<(u128, u64)>) -> Self { - self.balances = b; - self - } - #[allow(dead_code)] - pub fn with_pot(mut self, p: u64) -> Self { - self.pot = p; - self - } - #[allow(dead_code)] - pub fn with_balance(mut self, b: u64) -> Self { - self.balance = b; - self - } - #[allow(dead_code)] - pub fn with_max_members(mut self, n: u32) -> Self { - self.max_members = n; - self - } + pub fn new() -> Self { + Self { + members: vec![10], + balance: 10_000, + balances: vec![ + (10, 50), + (20, 50), + (30, 50), + (40, 50), + (50, 50), + (60, 50), + (70, 50), + (80, 50), + (90, 50), + ], + pot: 0, + max_members: 100, + } + } + + pub fn execute R>(mut self, f: F) -> R { + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + self.balances + .push((Society::account_id(), self.balance.max(self.pot))); + pallet_balances::GenesisConfig:: { + balances: self.balances, + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisConfig:: { + members: self.members, + pot: self.pot, + max_members: self.max_members, + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext: sp_io::TestExternalities = t.into(); + ext.execute_with(f) + } + #[allow(dead_code)] + pub fn with_members(mut self, m: Vec) -> Self { + self.members = m; + self + } + #[allow(dead_code)] + pub fn with_balances(mut self, b: Vec<(u128, u64)>) -> Self { + self.balances = b; + self + } + #[allow(dead_code)] + pub fn with_pot(mut self, p: u64) -> Self { + self.pot = p; + self + } + #[allow(dead_code)] + pub fn with_balance(mut self, b: u64) -> Self { + self.balance = b; + self + } + #[allow(dead_code)] + pub fn with_max_members(mut self, n: u32) -> Self { + self.max_members = n; + self + } } /// Run until a particular block. pub fn run_to_block(n: u64) { - while System::block_number() < n { - if System::block_number() > 1 { - System::on_finalize(System::block_number()); - } - System::set_block_number(System::block_number() + 1); - System::on_initialize(System::block_number()); - Society::on_initialize(System::block_number()); - } + while System::block_number() < n { + if System::block_number() > 1 { + System::on_finalize(System::block_number()); + } + System::set_block_number(System::block_number() + 1); + System::on_initialize(System::block_number()); + Society::on_initialize(System::block_number()); + } } /// Creates a bid struct using input parameters. pub fn create_bid( - value: Balance, - who: AccountId, - kind: BidKind -) -> Bid -{ - Bid { - who, - kind, - value - } + value: Balance, + who: AccountId, + kind: BidKind, +) -> Bid { + Bid { who, kind, value } } diff --git a/frame/society/src/tests.rs b/frame/society/src/tests.rs index 8b10dc32e7..b962e95439 100644 --- a/frame/society/src/tests.rs +++ b/frame/society/src/tests.rs @@ -19,868 +19,1046 @@ use super::*; use mock::*; -use frame_support::{assert_ok, assert_noop}; -use sp_runtime::traits::BadOrigin; +use frame_support::{assert_noop, assert_ok}; use sp_core::blake2_256; +use sp_runtime::traits::BadOrigin; #[test] fn founding_works() { - EnvBuilder::new().with_max_members(0).with_members(vec![]).execute(|| { - // Not set up initially. - assert_eq!(Society::founder(), None); - assert_eq!(Society::max_members(), 0); - assert_eq!(Society::pot(), 0); - // Account 1 is set as the founder origin - // Account 5 cannot start a society - assert_noop!(Society::found(Origin::signed(5), 20, 100, vec![]), BadOrigin); - // Account 1 can start a society, where 10 is the founding member - assert_ok!(Society::found(Origin::signed(1), 10, 100, b"be cool".to_vec())); - // Society members only include 10 - assert_eq!(Society::members(), vec![10]); - // 10 is the head of the society - assert_eq!(Society::head(), Some(10)); - // ...and also the founder - assert_eq!(Society::founder(), Some(10)); - // 100 members max - assert_eq!(Society::max_members(), 100); - // rules are correct - assert_eq!(Society::rules(), Some(blake2_256(b"be cool").into())); - // Pot grows after first rotation period - run_to_block(4); - assert_eq!(Society::pot(), 1000); - // Cannot start another society - assert_noop!( - Society::found(Origin::signed(1), 20, 100, vec![]), - Error::::AlreadyFounded - ); - }); + EnvBuilder::new() + .with_max_members(0) + .with_members(vec![]) + .execute(|| { + // Not set up initially. + assert_eq!(Society::founder(), None); + assert_eq!(Society::max_members(), 0); + assert_eq!(Society::pot(), 0); + // Account 1 is set as the founder origin + // Account 5 cannot start a society + assert_noop!( + Society::found(Origin::signed(5), 20, 100, vec![]), + BadOrigin + ); + // Account 1 can start a society, where 10 is the founding member + assert_ok!(Society::found( + Origin::signed(1), + 10, + 100, + b"be cool".to_vec() + )); + // Society members only include 10 + assert_eq!(Society::members(), vec![10]); + // 10 is the head of the society + assert_eq!(Society::head(), Some(10)); + // ...and also the founder + assert_eq!(Society::founder(), Some(10)); + // 100 members max + assert_eq!(Society::max_members(), 100); + // rules are correct + assert_eq!(Society::rules(), Some(blake2_256(b"be cool").into())); + // Pot grows after first rotation period + run_to_block(4); + assert_eq!(Society::pot(), 1000); + // Cannot start another society + assert_noop!( + Society::found(Origin::signed(1), 20, 100, vec![]), + Error::::AlreadyFounded + ); + }); } #[test] fn unfounding_works() { - EnvBuilder::new().with_max_members(0).with_members(vec![]).execute(|| { - // Account 1 sets the founder... - assert_ok!(Society::found(Origin::signed(1), 10, 100, vec![])); - // Account 2 cannot unfound it as it's not the founder. - assert_noop!(Society::unfound(Origin::signed(2)), Error::::NotFounder); - // Account 10 can, though. - assert_ok!(Society::unfound(Origin::signed(10))); - - // 1 sets the founder to 20 this time - assert_ok!(Society::found(Origin::signed(1), 20, 100, vec![])); - // Bring in a new member... - assert_ok!(Society::bid(Origin::signed(10), 0)); - run_to_block(4); - assert_ok!(Society::vote(Origin::signed(20), 10, true)); - run_to_block(8); - - // Unfounding won't work now, even though it's from 20. - assert_noop!(Society::unfound(Origin::signed(20)), Error::::NotHead); - }); + EnvBuilder::new() + .with_max_members(0) + .with_members(vec![]) + .execute(|| { + // Account 1 sets the founder... + assert_ok!(Society::found(Origin::signed(1), 10, 100, vec![])); + // Account 2 cannot unfound it as it's not the founder. + assert_noop!( + Society::unfound(Origin::signed(2)), + Error::::NotFounder + ); + // Account 10 can, though. + assert_ok!(Society::unfound(Origin::signed(10))); + + // 1 sets the founder to 20 this time + assert_ok!(Society::found(Origin::signed(1), 20, 100, vec![])); + // Bring in a new member... + assert_ok!(Society::bid(Origin::signed(10), 0)); + run_to_block(4); + assert_ok!(Society::vote(Origin::signed(20), 10, true)); + run_to_block(8); + + // Unfounding won't work now, even though it's from 20. + assert_noop!( + Society::unfound(Origin::signed(20)), + Error::::NotHead + ); + }); } #[test] fn basic_new_member_works() { - EnvBuilder::new().execute(|| { - assert_eq!(Balances::free_balance(20), 50); - // Bid causes Candidate Deposit to be reserved. - assert_ok!(Society::bid(Origin::signed(20), 0)); - assert_eq!(Balances::free_balance(20), 25); - assert_eq!(Balances::reserved_balance(20), 25); - // Rotate period every 4 blocks - run_to_block(4); - // 20 is now a candidate - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); - // 10 (a member) can vote for the candidate - assert_ok!(Society::vote(Origin::signed(10), 20, true)); - // Rotate period every 4 blocks - run_to_block(8); - // 20 is now a member of the society - assert_eq!(Society::members(), vec![10, 20]); - // Reserved balance is returned - assert_eq!(Balances::free_balance(20), 50); - assert_eq!(Balances::reserved_balance(20), 0); - }); + EnvBuilder::new().execute(|| { + assert_eq!(Balances::free_balance(20), 50); + // Bid causes Candidate Deposit to be reserved. + assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_eq!(Balances::free_balance(20), 25); + assert_eq!(Balances::reserved_balance(20), 25); + // Rotate period every 4 blocks + run_to_block(4); + // 20 is now a candidate + assert_eq!( + Society::candidates(), + vec![create_bid(0, 20, BidKind::Deposit(25))] + ); + // 10 (a member) can vote for the candidate + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + // Rotate period every 4 blocks + run_to_block(8); + // 20 is now a member of the society + assert_eq!(Society::members(), vec![10, 20]); + // Reserved balance is returned + assert_eq!(Balances::free_balance(20), 50); + assert_eq!(Balances::reserved_balance(20), 0); + }); } #[test] fn bidding_works() { - EnvBuilder::new().execute(|| { - // Users make bids of various amounts - assert_ok!(Society::bid(Origin::signed(60), 1900)); - assert_ok!(Society::bid(Origin::signed(50), 500)); - assert_ok!(Society::bid(Origin::signed(40), 400)); - assert_ok!(Society::bid(Origin::signed(30), 300)); - // Rotate period - run_to_block(4); - // Pot is 1000 after "PeriodSpend" - assert_eq!(Society::pot(), 1000); - assert_eq!(Balances::free_balance(Society::account_id()), 10_000); - // Choose smallest bidding users whose total is less than pot - assert_eq!(Society::candidates(), vec![ - create_bid(300, 30, BidKind::Deposit(25)), - create_bid(400, 40, BidKind::Deposit(25)), - ]); - // A member votes for these candidates to join the society - assert_ok!(Society::vote(Origin::signed(10), 30, true)); - assert_ok!(Society::vote(Origin::signed(10), 40, true)); - run_to_block(8); - // Candidates become members after a period rotation - assert_eq!(Society::members(), vec![10, 30, 40]); - // Pot is increased by 1000, but pays out 700 to the members - assert_eq!(Balances::free_balance(Society::account_id()), 9_300); - assert_eq!(Society::pot(), 1_300); - // Left over from the original bids is 50 who satisfies the condition of bid less than pot. - assert_eq!(Society::candidates(), vec![ create_bid(500, 50, BidKind::Deposit(25)) ]); - // 40, now a member, can vote for 50 - assert_ok!(Society::vote(Origin::signed(40), 50, true)); - run_to_block(12); - // 50 is now a member - assert_eq!(Society::members(), vec![10, 30, 40, 50]); - // Pot is increased by 1000, and 500 is paid out. Total payout so far is 1200. - assert_eq!(Society::pot(), 1_800); - assert_eq!(Balances::free_balance(Society::account_id()), 8_800); - // No more candidates satisfy the requirements - assert_eq!(Society::candidates(), vec![]); - assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around - // Next period - run_to_block(16); - // Same members - assert_eq!(Society::members(), vec![10, 30, 40, 50]); - // Pot is increased by 1000 again - assert_eq!(Society::pot(), 2_800); - // No payouts - assert_eq!(Balances::free_balance(Society::account_id()), 8_800); - // Candidate 60 now qualifies based on the increased pot size. - assert_eq!(Society::candidates(), vec![ create_bid(1900, 60, BidKind::Deposit(25)) ]); - // Candidate 60 is voted in. - assert_ok!(Society::vote(Origin::signed(50), 60, true)); - run_to_block(20); - // 60 joins as a member - assert_eq!(Society::members(), vec![10, 30, 40, 50, 60]); - // Pay them - assert_eq!(Society::pot(), 1_900); - assert_eq!(Balances::free_balance(Society::account_id()), 6_900); - }); + EnvBuilder::new().execute(|| { + // Users make bids of various amounts + assert_ok!(Society::bid(Origin::signed(60), 1900)); + assert_ok!(Society::bid(Origin::signed(50), 500)); + assert_ok!(Society::bid(Origin::signed(40), 400)); + assert_ok!(Society::bid(Origin::signed(30), 300)); + // Rotate period + run_to_block(4); + // Pot is 1000 after "PeriodSpend" + assert_eq!(Society::pot(), 1000); + assert_eq!(Balances::free_balance(Society::account_id()), 10_000); + // Choose smallest bidding users whose total is less than pot + assert_eq!( + Society::candidates(), + vec![ + create_bid(300, 30, BidKind::Deposit(25)), + create_bid(400, 40, BidKind::Deposit(25)), + ] + ); + // A member votes for these candidates to join the society + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 40, true)); + run_to_block(8); + // Candidates become members after a period rotation + assert_eq!(Society::members(), vec![10, 30, 40]); + // Pot is increased by 1000, but pays out 700 to the members + assert_eq!(Balances::free_balance(Society::account_id()), 9_300); + assert_eq!(Society::pot(), 1_300); + // Left over from the original bids is 50 who satisfies the condition of bid less than pot. + assert_eq!( + Society::candidates(), + vec![create_bid(500, 50, BidKind::Deposit(25))] + ); + // 40, now a member, can vote for 50 + assert_ok!(Society::vote(Origin::signed(40), 50, true)); + run_to_block(12); + // 50 is now a member + assert_eq!(Society::members(), vec![10, 30, 40, 50]); + // Pot is increased by 1000, and 500 is paid out. Total payout so far is 1200. + assert_eq!(Society::pot(), 1_800); + assert_eq!(Balances::free_balance(Society::account_id()), 8_800); + // No more candidates satisfy the requirements + assert_eq!(Society::candidates(), vec![]); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around + // Next period + run_to_block(16); + // Same members + assert_eq!(Society::members(), vec![10, 30, 40, 50]); + // Pot is increased by 1000 again + assert_eq!(Society::pot(), 2_800); + // No payouts + assert_eq!(Balances::free_balance(Society::account_id()), 8_800); + // Candidate 60 now qualifies based on the increased pot size. + assert_eq!( + Society::candidates(), + vec![create_bid(1900, 60, BidKind::Deposit(25))] + ); + // Candidate 60 is voted in. + assert_ok!(Society::vote(Origin::signed(50), 60, true)); + run_to_block(20); + // 60 joins as a member + assert_eq!(Society::members(), vec![10, 30, 40, 50, 60]); + // Pay them + assert_eq!(Society::pot(), 1_900); + assert_eq!(Balances::free_balance(Society::account_id()), 6_900); + }); } #[test] fn unbidding_works() { - EnvBuilder::new().execute(|| { - // 20 and 30 make bids - assert_ok!(Society::bid(Origin::signed(20), 1000)); - assert_ok!(Society::bid(Origin::signed(30), 0)); - // Balances are reserved - assert_eq!(Balances::free_balance(30), 25); - assert_eq!(Balances::reserved_balance(30), 25); - // Must know right position to unbid + cannot unbid someone else - assert_noop!(Society::unbid(Origin::signed(30), 1), Error::::BadPosition); - // Can unbid themselves with the right position - assert_ok!(Society::unbid(Origin::signed(30), 0)); - // Balance is returned - assert_eq!(Balances::free_balance(30), 50); - assert_eq!(Balances::reserved_balance(30), 0); - // 20 wins candidacy - run_to_block(4); - assert_eq!(Society::candidates(), vec![ create_bid(1000, 20, BidKind::Deposit(25)) ]); - }); + EnvBuilder::new().execute(|| { + // 20 and 30 make bids + assert_ok!(Society::bid(Origin::signed(20), 1000)); + assert_ok!(Society::bid(Origin::signed(30), 0)); + // Balances are reserved + assert_eq!(Balances::free_balance(30), 25); + assert_eq!(Balances::reserved_balance(30), 25); + // Must know right position to unbid + cannot unbid someone else + assert_noop!( + Society::unbid(Origin::signed(30), 1), + Error::::BadPosition + ); + // Can unbid themselves with the right position + assert_ok!(Society::unbid(Origin::signed(30), 0)); + // Balance is returned + assert_eq!(Balances::free_balance(30), 50); + assert_eq!(Balances::reserved_balance(30), 0); + // 20 wins candidacy + run_to_block(4); + assert_eq!( + Society::candidates(), + vec![create_bid(1000, 20, BidKind::Deposit(25))] + ); + }); } #[test] fn payout_works() { - EnvBuilder::new().execute(|| { - // Original balance of 50 - assert_eq!(Balances::free_balance(20), 50); - assert_ok!(Society::bid(Origin::signed(20), 1000)); - run_to_block(4); - assert_ok!(Society::vote(Origin::signed(10), 20, true)); - run_to_block(8); - // payout not ready - assert_noop!(Society::payout(Origin::signed(20)), Error::::NoPayout); - run_to_block(9); - // payout should be here - assert_ok!(Society::payout(Origin::signed(20))); - assert_eq!(Balances::free_balance(20), 1050); - }); + EnvBuilder::new().execute(|| { + // Original balance of 50 + assert_eq!(Balances::free_balance(20), 50); + assert_ok!(Society::bid(Origin::signed(20), 1000)); + run_to_block(4); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + run_to_block(8); + // payout not ready + assert_noop!( + Society::payout(Origin::signed(20)), + Error::::NoPayout + ); + run_to_block(9); + // payout should be here + assert_ok!(Society::payout(Origin::signed(20))); + assert_eq!(Balances::free_balance(20), 1050); + }); } #[test] fn basic_new_member_skeptic_works() { - EnvBuilder::new().execute(|| { - assert_eq!(Strikes::::get(10), 0); - assert_ok!(Society::bid(Origin::signed(20), 0)); - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); - run_to_block(8); - assert_eq!(Society::members(), vec![10]); - assert_eq!(Strikes::::get(10), 1); - }); + EnvBuilder::new().execute(|| { + assert_eq!(Strikes::::get(10), 0); + assert_ok!(Society::bid(Origin::signed(20), 0)); + run_to_block(4); + assert_eq!( + Society::candidates(), + vec![create_bid(0, 20, BidKind::Deposit(25))] + ); + run_to_block(8); + assert_eq!(Society::members(), vec![10]); + assert_eq!(Strikes::::get(10), 1); + }); } #[test] fn basic_new_member_reject_works() { - EnvBuilder::new().execute(|| { - // Starting Balance - assert_eq!(Balances::free_balance(20), 50); - // 20 makes a bid - assert_ok!(Society::bid(Origin::signed(20), 0)); - assert_eq!(Balances::free_balance(20), 25); - assert_eq!(Balances::reserved_balance(20), 25); - // Rotation Period - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); - // We say no - assert_ok!(Society::vote(Origin::signed(10), 20, false)); - run_to_block(8); - // User is not added as member - assert_eq!(Society::members(), vec![10]); - // User is suspended - assert_eq!(Society::candidates(), vec![]); - assert_eq!(Society::suspended_candidate(20).is_some(), true); - }); + EnvBuilder::new().execute(|| { + // Starting Balance + assert_eq!(Balances::free_balance(20), 50); + // 20 makes a bid + assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_eq!(Balances::free_balance(20), 25); + assert_eq!(Balances::reserved_balance(20), 25); + // Rotation Period + run_to_block(4); + assert_eq!( + Society::candidates(), + vec![create_bid(0, 20, BidKind::Deposit(25))] + ); + // We say no + assert_ok!(Society::vote(Origin::signed(10), 20, false)); + run_to_block(8); + // User is not added as member + assert_eq!(Society::members(), vec![10]); + // User is suspended + assert_eq!(Society::candidates(), vec![]); + assert_eq!(Society::suspended_candidate(20).is_some(), true); + }); } #[test] fn slash_payout_works() { - EnvBuilder::new().execute(|| { - assert_eq!(Balances::free_balance(20), 50); - assert_ok!(Society::bid(Origin::signed(20), 1000)); - run_to_block(4); - assert_ok!(Society::vote(Origin::signed(10), 20, true)); - run_to_block(8); - // payout in queue - assert_eq!(Payouts::::get(20), vec![(9, 1000)]); - assert_noop!(Society::payout(Origin::signed(20)), Error::::NoPayout); - // slash payout - assert_eq!(Society::slash_payout(&20, 500), 500); - assert_eq!(Payouts::::get(20), vec![(9, 500)]); - run_to_block(9); - // payout should be here, but 500 less - assert_ok!(Society::payout(Origin::signed(20))); - assert_eq!(Balances::free_balance(20), 550); - }); + EnvBuilder::new().execute(|| { + assert_eq!(Balances::free_balance(20), 50); + assert_ok!(Society::bid(Origin::signed(20), 1000)); + run_to_block(4); + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + run_to_block(8); + // payout in queue + assert_eq!(Payouts::::get(20), vec![(9, 1000)]); + assert_noop!( + Society::payout(Origin::signed(20)), + Error::::NoPayout + ); + // slash payout + assert_eq!(Society::slash_payout(&20, 500), 500); + assert_eq!(Payouts::::get(20), vec![(9, 500)]); + run_to_block(9); + // payout should be here, but 500 less + assert_ok!(Society::payout(Origin::signed(20))); + assert_eq!(Balances::free_balance(20), 550); + }); } #[test] fn slash_payout_multi_works() { - EnvBuilder::new().execute(|| { - assert_eq!(Balances::free_balance(20), 50); - // create a few payouts - Society::bump_payout(&20, 5, 100); - Society::bump_payout(&20, 10, 100); - Society::bump_payout(&20, 15, 100); - Society::bump_payout(&20, 20, 100); - // payouts in queue - assert_eq!(Payouts::::get(20), vec![(5, 100), (10, 100), (15, 100), (20, 100)]); - // slash payout - assert_eq!(Society::slash_payout(&20, 250), 250); - assert_eq!(Payouts::::get(20), vec![(15, 50), (20, 100)]); - // slash again - assert_eq!(Society::slash_payout(&20, 50), 50); - assert_eq!(Payouts::::get(20), vec![(20, 100)]); - }); + EnvBuilder::new().execute(|| { + assert_eq!(Balances::free_balance(20), 50); + // create a few payouts + Society::bump_payout(&20, 5, 100); + Society::bump_payout(&20, 10, 100); + Society::bump_payout(&20, 15, 100); + Society::bump_payout(&20, 20, 100); + // payouts in queue + assert_eq!( + Payouts::::get(20), + vec![(5, 100), (10, 100), (15, 100), (20, 100)] + ); + // slash payout + assert_eq!(Society::slash_payout(&20, 250), 250); + assert_eq!(Payouts::::get(20), vec![(15, 50), (20, 100)]); + // slash again + assert_eq!(Society::slash_payout(&20, 50), 50); + assert_eq!(Payouts::::get(20), vec![(20, 100)]); + }); } #[test] fn suspended_member_life_cycle_works() { - EnvBuilder::new().execute(|| { - // Add 20 to members, who is not the head and can be suspended/removed. - assert_ok!(Society::add_member(&20)); - assert_eq!(>::get(), vec![10, 20]); - assert_eq!(Strikes::::get(20), 0); - assert_eq!(>::get(20), false); - - // Let's suspend account 20 by giving them 2 strikes by not voting - assert_ok!(Society::bid(Origin::signed(30), 0)); - run_to_block(8); - assert_eq!(Strikes::::get(20), 1); - assert_ok!(Society::bid(Origin::signed(40), 0)); - run_to_block(16); - - // Strike 2 is accumulated, and 20 is suspended :( - assert_eq!(>::get(20), true); - assert_eq!(>::get(), vec![10]); - - // Suspended members cannot get payout - Society::bump_payout(&20, 10, 100); - assert_noop!(Society::payout(Origin::signed(20)), Error::::NotMember); - - // Normal people cannot make judgement - assert_noop!(Society::judge_suspended_member(Origin::signed(20), 20, true), BadOrigin); - - // Suspension judgment origin can judge thee - // Suspension judgement origin forgives the suspended member - assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, true)); - assert_eq!(>::get(20), false); - assert_eq!(>::get(), vec![10, 20]); - - // Let's suspend them again, directly - Society::suspend_member(&20); - assert_eq!(>::get(20), true); - // Suspension judgement origin does not forgive the suspended member - assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); - // Cleaned up - assert_eq!(>::get(20), false); - assert_eq!(>::get(), vec![10]); - assert_eq!(>::get(20), vec![]); - }); + EnvBuilder::new().execute(|| { + // Add 20 to members, who is not the head and can be suspended/removed. + assert_ok!(Society::add_member(&20)); + assert_eq!(>::get(), vec![10, 20]); + assert_eq!(Strikes::::get(20), 0); + assert_eq!(>::get(20), false); + + // Let's suspend account 20 by giving them 2 strikes by not voting + assert_ok!(Society::bid(Origin::signed(30), 0)); + run_to_block(8); + assert_eq!(Strikes::::get(20), 1); + assert_ok!(Society::bid(Origin::signed(40), 0)); + run_to_block(16); + + // Strike 2 is accumulated, and 20 is suspended :( + assert_eq!(>::get(20), true); + assert_eq!(>::get(), vec![10]); + + // Suspended members cannot get payout + Society::bump_payout(&20, 10, 100); + assert_noop!( + Society::payout(Origin::signed(20)), + Error::::NotMember + ); + + // Normal people cannot make judgement + assert_noop!( + Society::judge_suspended_member(Origin::signed(20), 20, true), + BadOrigin + ); + + // Suspension judgment origin can judge thee + // Suspension judgement origin forgives the suspended member + assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, true)); + assert_eq!(>::get(20), false); + assert_eq!(>::get(), vec![10, 20]); + + // Let's suspend them again, directly + Society::suspend_member(&20); + assert_eq!(>::get(20), true); + // Suspension judgement origin does not forgive the suspended member + assert_ok!(Society::judge_suspended_member( + Origin::signed(2), + 20, + false + )); + // Cleaned up + assert_eq!(>::get(20), false); + assert_eq!(>::get(), vec![10]); + assert_eq!(>::get(20), vec![]); + }); } #[test] fn suspended_candidate_rejected_works() { - EnvBuilder::new().execute(|| { - // Starting Balance - assert_eq!(Balances::free_balance(20), 50); - assert_eq!(Balances::free_balance(Society::account_id()), 10000); - // 20 makes a bid - assert_ok!(Society::bid(Origin::signed(20), 0)); - assert_eq!(Balances::free_balance(20), 25); - assert_eq!(Balances::reserved_balance(20), 25); - // Rotation Period - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); - // We say no - assert_ok!(Society::vote(Origin::signed(10), 20, false)); - run_to_block(8); - // User is not added as member - assert_eq!(Society::members(), vec![10]); - // User is suspended - assert_eq!(Society::candidates(), vec![]); - assert_eq!(Society::suspended_candidate(20).is_some(), true); - - // Normal user cannot make judgement on suspended candidate - assert_noop!(Society::judge_suspended_candidate(Origin::signed(20), 20, Judgement::Approve), BadOrigin); - - // Suspension judgement origin makes no direct judgement - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Rebid)); - // They are placed back in bid pool, repeat suspension process - // Rotation Period - run_to_block(12); - assert_eq!(Society::candidates(), vec![create_bid(0, 20, BidKind::Deposit(25))]); - // We say no - assert_ok!(Society::vote(Origin::signed(10), 20, false)); - run_to_block(16); - // User is not added as member - assert_eq!(Society::members(), vec![10]); - // User is suspended - assert_eq!(Society::candidates(), vec![]); - assert_eq!(Society::suspended_candidate(20).is_some(), true); - - // Suspension judgement origin rejects the candidate - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Reject)); - // User is slashed - assert_eq!(Balances::free_balance(20), 25); - assert_eq!(Balances::reserved_balance(20), 0); - // Funds are deposited to society account - assert_eq!(Balances::free_balance(Society::account_id()), 10025); - // Cleaned up - assert_eq!(Society::candidates(), vec![]); - assert_eq!(>::get(20), None); - }); + EnvBuilder::new().execute(|| { + // Starting Balance + assert_eq!(Balances::free_balance(20), 50); + assert_eq!(Balances::free_balance(Society::account_id()), 10000); + // 20 makes a bid + assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_eq!(Balances::free_balance(20), 25); + assert_eq!(Balances::reserved_balance(20), 25); + // Rotation Period + run_to_block(4); + assert_eq!( + Society::candidates(), + vec![create_bid(0, 20, BidKind::Deposit(25))] + ); + // We say no + assert_ok!(Society::vote(Origin::signed(10), 20, false)); + run_to_block(8); + // User is not added as member + assert_eq!(Society::members(), vec![10]); + // User is suspended + assert_eq!(Society::candidates(), vec![]); + assert_eq!(Society::suspended_candidate(20).is_some(), true); + + // Normal user cannot make judgement on suspended candidate + assert_noop!( + Society::judge_suspended_candidate(Origin::signed(20), 20, Judgement::Approve), + BadOrigin + ); + + // Suspension judgement origin makes no direct judgement + assert_ok!(Society::judge_suspended_candidate( + Origin::signed(2), + 20, + Judgement::Rebid + )); + // They are placed back in bid pool, repeat suspension process + // Rotation Period + run_to_block(12); + assert_eq!( + Society::candidates(), + vec![create_bid(0, 20, BidKind::Deposit(25))] + ); + // We say no + assert_ok!(Society::vote(Origin::signed(10), 20, false)); + run_to_block(16); + // User is not added as member + assert_eq!(Society::members(), vec![10]); + // User is suspended + assert_eq!(Society::candidates(), vec![]); + assert_eq!(Society::suspended_candidate(20).is_some(), true); + + // Suspension judgement origin rejects the candidate + assert_ok!(Society::judge_suspended_candidate( + Origin::signed(2), + 20, + Judgement::Reject + )); + // User is slashed + assert_eq!(Balances::free_balance(20), 25); + assert_eq!(Balances::reserved_balance(20), 0); + // Funds are deposited to society account + assert_eq!(Balances::free_balance(Society::account_id()), 10025); + // Cleaned up + assert_eq!(Society::candidates(), vec![]); + assert_eq!(>::get(20), None); + }); } #[test] fn vouch_works() { - EnvBuilder::new().execute(|| { - // 10 is the only member - assert_eq!(Society::members(), vec![10]); - // A non-member cannot vouch - assert_noop!(Society::vouch(Origin::signed(1), 20, 1000, 100), Error::::NotMember); - // A member can though - assert_ok!(Society::vouch(Origin::signed(10), 20, 1000, 100)); - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); - // A member cannot vouch twice at the same time - assert_noop!(Society::vouch(Origin::signed(10), 30, 100, 0), Error::::AlreadyVouching); - // Vouching creates the right kind of bid - assert_eq!(>::get(), vec![create_bid(1000, 20, BidKind::Vouch(10, 100))]); - // Vouched user can become candidate - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(1000, 20, BidKind::Vouch(10, 100))]); - // Vote yes - assert_ok!(Society::vote(Origin::signed(10), 20, true)); - // Vouched user can win - run_to_block(8); - assert_eq!(Society::members(), vec![10, 20]); - // Voucher wins a portion of the payment - assert_eq!(>::get(10), vec![(9, 100)]); - // Vouched user wins the rest - assert_eq!(>::get(20), vec![(9, 900)]); - // 10 is no longer vouching - assert_eq!(>::get(10), None); - }); + EnvBuilder::new().execute(|| { + // 10 is the only member + assert_eq!(Society::members(), vec![10]); + // A non-member cannot vouch + assert_noop!( + Society::vouch(Origin::signed(1), 20, 1000, 100), + Error::::NotMember + ); + // A member can though + assert_ok!(Society::vouch(Origin::signed(10), 20, 1000, 100)); + assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); + // A member cannot vouch twice at the same time + assert_noop!( + Society::vouch(Origin::signed(10), 30, 100, 0), + Error::::AlreadyVouching + ); + // Vouching creates the right kind of bid + assert_eq!( + >::get(), + vec![create_bid(1000, 20, BidKind::Vouch(10, 100))] + ); + // Vouched user can become candidate + run_to_block(4); + assert_eq!( + Society::candidates(), + vec![create_bid(1000, 20, BidKind::Vouch(10, 100))] + ); + // Vote yes + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + // Vouched user can win + run_to_block(8); + assert_eq!(Society::members(), vec![10, 20]); + // Voucher wins a portion of the payment + assert_eq!(>::get(10), vec![(9, 100)]); + // Vouched user wins the rest + assert_eq!(>::get(20), vec![(9, 900)]); + // 10 is no longer vouching + assert_eq!(>::get(10), None); + }); } #[test] fn voucher_cannot_win_more_than_bid() { - EnvBuilder::new().execute(|| { - // 10 is the only member - assert_eq!(Society::members(), vec![10]); - // 10 vouches, but asks for more than the bid - assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 1000)); - // Vouching creates the right kind of bid - assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 1000))]); - // Vouched user can become candidate - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(100, 20, BidKind::Vouch(10, 1000))]); - // Vote yes - assert_ok!(Society::vote(Origin::signed(10), 20, true)); - // Vouched user can win - run_to_block(8); - assert_eq!(Society::members(), vec![10, 20]); - // Voucher wins as much as the bid - assert_eq!(>::get(10), vec![(9, 100)]); - // Vouched user gets nothing - assert_eq!(>::get(20), vec![]); - }); + EnvBuilder::new().execute(|| { + // 10 is the only member + assert_eq!(Society::members(), vec![10]); + // 10 vouches, but asks for more than the bid + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 1000)); + // Vouching creates the right kind of bid + assert_eq!( + >::get(), + vec![create_bid(100, 20, BidKind::Vouch(10, 1000))] + ); + // Vouched user can become candidate + run_to_block(4); + assert_eq!( + Society::candidates(), + vec![create_bid(100, 20, BidKind::Vouch(10, 1000))] + ); + // Vote yes + assert_ok!(Society::vote(Origin::signed(10), 20, true)); + // Vouched user can win + run_to_block(8); + assert_eq!(Society::members(), vec![10, 20]); + // Voucher wins as much as the bid + assert_eq!(>::get(10), vec![(9, 100)]); + // Vouched user gets nothing + assert_eq!(>::get(20), vec![]); + }); } #[test] fn unvouch_works() { - EnvBuilder::new().execute(|| { - // 10 is the only member - assert_eq!(Society::members(), vec![10]); - // 10 vouches for 20 - assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); - // 20 has a bid - assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); - // 10 is vouched - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); - // To unvouch, you must know the right bid position - assert_noop!(Society::unvouch(Origin::signed(10), 2), Error::::BadPosition); - // 10 can unvouch with the right position - assert_ok!(Society::unvouch(Origin::signed(10), 0)); - // 20 no longer has a bid - assert_eq!(>::get(), vec![]); - // 10 is no longer vouching - assert_eq!(>::get(10), None); - - // Cannot unvouch after they become candidate - assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); - assert_noop!(Society::unvouch(Origin::signed(10), 0), Error::::BadPosition); - // 10 is still vouching until candidate is approved or rejected - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); - run_to_block(8); - // In this case candidate is denied and suspended - assert!(Society::suspended_candidate(&20).is_some()); - assert_eq!(Society::members(), vec![10]); - // User is stuck vouching until judgement origin resolves suspended candidate - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); - // Judge denies candidate - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), 20, Judgement::Reject)); - // 10 is banned from vouching - assert_eq!(>::get(10), Some(VouchingStatus::Banned)); - assert_eq!(Society::members(), vec![10]); - - // 10 cannot vouch again - assert_noop!(Society::vouch(Origin::signed(10), 30, 100, 0), Error::::AlreadyVouching); - // 10 cannot unvouch either, so they are banned forever. - assert_noop!(Society::unvouch(Origin::signed(10), 0), Error::::NotVouching); - }); + EnvBuilder::new().execute(|| { + // 10 is the only member + assert_eq!(Society::members(), vec![10]); + // 10 vouches for 20 + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); + // 20 has a bid + assert_eq!( + >::get(), + vec![create_bid(100, 20, BidKind::Vouch(10, 0))] + ); + // 10 is vouched + assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); + // To unvouch, you must know the right bid position + assert_noop!( + Society::unvouch(Origin::signed(10), 2), + Error::::BadPosition + ); + // 10 can unvouch with the right position + assert_ok!(Society::unvouch(Origin::signed(10), 0)); + // 20 no longer has a bid + assert_eq!(>::get(), vec![]); + // 10 is no longer vouching + assert_eq!(>::get(10), None); + + // Cannot unvouch after they become candidate + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); + run_to_block(4); + assert_eq!( + Society::candidates(), + vec![create_bid(100, 20, BidKind::Vouch(10, 0))] + ); + assert_noop!( + Society::unvouch(Origin::signed(10), 0), + Error::::BadPosition + ); + // 10 is still vouching until candidate is approved or rejected + assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); + run_to_block(8); + // In this case candidate is denied and suspended + assert!(Society::suspended_candidate(&20).is_some()); + assert_eq!(Society::members(), vec![10]); + // User is stuck vouching until judgement origin resolves suspended candidate + assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); + // Judge denies candidate + assert_ok!(Society::judge_suspended_candidate( + Origin::signed(2), + 20, + Judgement::Reject + )); + // 10 is banned from vouching + assert_eq!(>::get(10), Some(VouchingStatus::Banned)); + assert_eq!(Society::members(), vec![10]); + + // 10 cannot vouch again + assert_noop!( + Society::vouch(Origin::signed(10), 30, 100, 0), + Error::::AlreadyVouching + ); + // 10 cannot unvouch either, so they are banned forever. + assert_noop!( + Society::unvouch(Origin::signed(10), 0), + Error::::NotVouching + ); + }); } #[test] fn unbid_vouch_works() { - EnvBuilder::new().execute(|| { - // 10 is the only member - assert_eq!(Society::members(), vec![10]); - // 10 vouches for 20 - assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); - // 20 has a bid - assert_eq!(>::get(), vec![create_bid(100, 20, BidKind::Vouch(10, 0))]); - // 10 is vouched - assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); - // 20 doesn't want to be a member and can unbid themselves. - assert_ok!(Society::unbid(Origin::signed(20), 0)); - // Everything is cleaned up - assert_eq!(>::get(10), None); - assert_eq!(>::get(), vec![]); - }); + EnvBuilder::new().execute(|| { + // 10 is the only member + assert_eq!(Society::members(), vec![10]); + // 10 vouches for 20 + assert_ok!(Society::vouch(Origin::signed(10), 20, 100, 0)); + // 20 has a bid + assert_eq!( + >::get(), + vec![create_bid(100, 20, BidKind::Vouch(10, 0))] + ); + // 10 is vouched + assert_eq!(>::get(10), Some(VouchingStatus::Vouching)); + // 20 doesn't want to be a member and can unbid themselves. + assert_ok!(Society::unbid(Origin::signed(20), 0)); + // Everything is cleaned up + assert_eq!(>::get(10), None); + assert_eq!(>::get(), vec![]); + }); } #[test] fn founder_and_head_cannot_be_removed() { - EnvBuilder::new().execute(|| { - // 10 is the only member, founder, and head - assert_eq!(Society::members(), vec![10]); - assert_eq!(Society::founder(), Some(10)); - assert_eq!(Society::head(), Some(10)); - // 10 can still accumulate strikes - assert_ok!(Society::bid(Origin::signed(20), 0)); - run_to_block(8); - assert_eq!(Strikes::::get(10), 1); - assert_ok!(Society::bid(Origin::signed(30), 0)); - run_to_block(16); - assert_eq!(Strikes::::get(10), 2); - // Awkwardly they can obtain more than MAX_STRIKES... - assert_ok!(Society::bid(Origin::signed(40), 0)); - run_to_block(24); - assert_eq!(Strikes::::get(10), 3); - - // Replace the head - assert_ok!(Society::bid(Origin::signed(50), 0)); - run_to_block(28); - assert_ok!(Society::vote(Origin::signed(10), 50, true)); - assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around - run_to_block(32); - assert_eq!(Society::members(), vec![10, 50]); - assert_eq!(Society::head(), Some(50)); - // Founder is unchanged - assert_eq!(Society::founder(), Some(10)); - - // 50 can still accumulate strikes - assert_ok!(Society::bid(Origin::signed(60), 0)); - run_to_block(40); - assert_eq!(Strikes::::get(50), 1); - assert_ok!(Society::bid(Origin::signed(70), 0)); - run_to_block(48); - assert_eq!(Strikes::::get(50), 2); - - // Replace the head - assert_ok!(Society::bid(Origin::signed(80), 0)); - run_to_block(52); - assert_ok!(Society::vote(Origin::signed(10), 80, true)); - assert_ok!(Society::vote(Origin::signed(50), 80, true)); - assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around - run_to_block(56); - assert_eq!(Society::members(), vec![10, 50, 80]); - assert_eq!(Society::head(), Some(80)); - assert_eq!(Society::founder(), Some(10)); - - // 50 can now be suspended for strikes - assert_ok!(Society::bid(Origin::signed(90), 0)); - run_to_block(60); - // The candidate is rejected, so voting approve will give a strike - assert_ok!(Society::vote(Origin::signed(50), 90, true)); - run_to_block(64); - assert_eq!(Strikes::::get(50), 0); - assert_eq!(>::get(50), true); - assert_eq!(Society::members(), vec![10, 80]); - }); + EnvBuilder::new().execute(|| { + // 10 is the only member, founder, and head + assert_eq!(Society::members(), vec![10]); + assert_eq!(Society::founder(), Some(10)); + assert_eq!(Society::head(), Some(10)); + // 10 can still accumulate strikes + assert_ok!(Society::bid(Origin::signed(20), 0)); + run_to_block(8); + assert_eq!(Strikes::::get(10), 1); + assert_ok!(Society::bid(Origin::signed(30), 0)); + run_to_block(16); + assert_eq!(Strikes::::get(10), 2); + // Awkwardly they can obtain more than MAX_STRIKES... + assert_ok!(Society::bid(Origin::signed(40), 0)); + run_to_block(24); + assert_eq!(Strikes::::get(10), 3); + + // Replace the head + assert_ok!(Society::bid(Origin::signed(50), 0)); + run_to_block(28); + assert_ok!(Society::vote(Origin::signed(10), 50, true)); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around + run_to_block(32); + assert_eq!(Society::members(), vec![10, 50]); + assert_eq!(Society::head(), Some(50)); + // Founder is unchanged + assert_eq!(Society::founder(), Some(10)); + + // 50 can still accumulate strikes + assert_ok!(Society::bid(Origin::signed(60), 0)); + run_to_block(40); + assert_eq!(Strikes::::get(50), 1); + assert_ok!(Society::bid(Origin::signed(70), 0)); + run_to_block(48); + assert_eq!(Strikes::::get(50), 2); + + // Replace the head + assert_ok!(Society::bid(Origin::signed(80), 0)); + run_to_block(52); + assert_ok!(Society::vote(Origin::signed(10), 80, true)); + assert_ok!(Society::vote(Origin::signed(50), 80, true)); + assert_ok!(Society::defender_vote(Origin::signed(10), true)); // Keep defender around + run_to_block(56); + assert_eq!(Society::members(), vec![10, 50, 80]); + assert_eq!(Society::head(), Some(80)); + assert_eq!(Society::founder(), Some(10)); + + // 50 can now be suspended for strikes + assert_ok!(Society::bid(Origin::signed(90), 0)); + run_to_block(60); + // The candidate is rejected, so voting approve will give a strike + assert_ok!(Society::vote(Origin::signed(50), 90, true)); + run_to_block(64); + assert_eq!(Strikes::::get(50), 0); + assert_eq!(>::get(50), true); + assert_eq!(Society::members(), vec![10, 80]); + }); } #[test] fn challenges_work() { - EnvBuilder::new().execute(|| { - // Add some members - assert_ok!(Society::add_member(&20)); - assert_ok!(Society::add_member(&30)); - assert_ok!(Society::add_member(&40)); - // Votes are empty - assert_eq!(>::get(10), None); - assert_eq!(>::get(20), None); - assert_eq!(>::get(30), None); - assert_eq!(>::get(40), None); - // Check starting point - assert_eq!(Society::members(), vec![10, 20, 30, 40]); - assert_eq!(Society::defender(), None); - // 20 will be challenged during the challenge rotation - run_to_block(8); - assert_eq!(Society::defender(), Some(30)); - // They can always free vote for themselves - assert_ok!(Society::defender_vote(Origin::signed(30), true)); - // If no one else votes, nothing happens - run_to_block(16); - assert_eq!(Society::members(), vec![10, 20, 30, 40]); - // New challenge period - assert_eq!(Society::defender(), Some(30)); - // Non-member cannot challenge - assert_noop!(Society::defender_vote(Origin::signed(1), true), Error::::NotMember); - // 3 people say accept, 1 reject - assert_ok!(Society::defender_vote(Origin::signed(10), true)); - assert_ok!(Society::defender_vote(Origin::signed(20), true)); - assert_ok!(Society::defender_vote(Origin::signed(30), true)); - assert_ok!(Society::defender_vote(Origin::signed(40), false)); - run_to_block(24); - // 20 survives - assert_eq!(Society::members(), vec![10, 20, 30, 40]); - // Votes are reset - assert_eq!(>::get(10), None); - assert_eq!(>::get(20), None); - assert_eq!(>::get(30), None); - assert_eq!(>::get(40), None); - // One more time - assert_eq!(Society::defender(), Some(30)); - // 2 people say accept, 2 reject - assert_ok!(Society::defender_vote(Origin::signed(10), true)); - assert_ok!(Society::defender_vote(Origin::signed(20), true)); - assert_ok!(Society::defender_vote(Origin::signed(30), false)); - assert_ok!(Society::defender_vote(Origin::signed(40), false)); - run_to_block(32); - // 20 is suspended - assert_eq!(Society::members(), vec![10, 20, 40]); - assert_eq!(Society::suspended_member(30), true); - // New defender is chosen - assert_eq!(Society::defender(), Some(20)); - // Votes are reset - assert_eq!(>::get(10), None); - assert_eq!(>::get(20), None); - assert_eq!(>::get(30), None); - assert_eq!(>::get(40), None); - }); + EnvBuilder::new().execute(|| { + // Add some members + assert_ok!(Society::add_member(&20)); + assert_ok!(Society::add_member(&30)); + assert_ok!(Society::add_member(&40)); + // Votes are empty + assert_eq!(>::get(10), None); + assert_eq!(>::get(20), None); + assert_eq!(>::get(30), None); + assert_eq!(>::get(40), None); + // Check starting point + assert_eq!(Society::members(), vec![10, 20, 30, 40]); + assert_eq!(Society::defender(), None); + // 20 will be challenged during the challenge rotation + run_to_block(8); + assert_eq!(Society::defender(), Some(30)); + // They can always free vote for themselves + assert_ok!(Society::defender_vote(Origin::signed(30), true)); + // If no one else votes, nothing happens + run_to_block(16); + assert_eq!(Society::members(), vec![10, 20, 30, 40]); + // New challenge period + assert_eq!(Society::defender(), Some(30)); + // Non-member cannot challenge + assert_noop!( + Society::defender_vote(Origin::signed(1), true), + Error::::NotMember + ); + // 3 people say accept, 1 reject + assert_ok!(Society::defender_vote(Origin::signed(10), true)); + assert_ok!(Society::defender_vote(Origin::signed(20), true)); + assert_ok!(Society::defender_vote(Origin::signed(30), true)); + assert_ok!(Society::defender_vote(Origin::signed(40), false)); + run_to_block(24); + // 20 survives + assert_eq!(Society::members(), vec![10, 20, 30, 40]); + // Votes are reset + assert_eq!(>::get(10), None); + assert_eq!(>::get(20), None); + assert_eq!(>::get(30), None); + assert_eq!(>::get(40), None); + // One more time + assert_eq!(Society::defender(), Some(30)); + // 2 people say accept, 2 reject + assert_ok!(Society::defender_vote(Origin::signed(10), true)); + assert_ok!(Society::defender_vote(Origin::signed(20), true)); + assert_ok!(Society::defender_vote(Origin::signed(30), false)); + assert_ok!(Society::defender_vote(Origin::signed(40), false)); + run_to_block(32); + // 20 is suspended + assert_eq!(Society::members(), vec![10, 20, 40]); + assert_eq!(Society::suspended_member(30), true); + // New defender is chosen + assert_eq!(Society::defender(), Some(20)); + // Votes are reset + assert_eq!(>::get(10), None); + assert_eq!(>::get(20), None); + assert_eq!(>::get(30), None); + assert_eq!(>::get(40), None); + }); } #[test] fn bad_vote_slash_works() { - EnvBuilder::new().execute(|| { - // Add some members - assert_ok!(Society::add_member(&20)); - assert_ok!(Society::add_member(&30)); - assert_ok!(Society::add_member(&40)); - // Create some payouts - Society::bump_payout(&10, 5, 100); - Society::bump_payout(&20, 5, 100); - Society::bump_payout(&30, 5, 100); - Society::bump_payout(&40, 5, 100); - // Check starting point - assert_eq!(Society::members(), vec![10, 20, 30, 40]); - assert_eq!(>::get(10), vec![(5, 100)]); - assert_eq!(>::get(20), vec![(5, 100)]); - assert_eq!(>::get(30), vec![(5, 100)]); - assert_eq!(>::get(40), vec![(5, 100)]); - // Create a new bid - assert_ok!(Society::bid(Origin::signed(50), 1000)); - run_to_block(4); - assert_ok!(Society::vote(Origin::signed(10), 50, false)); - assert_ok!(Society::vote(Origin::signed(20), 50, true)); - assert_ok!(Society::vote(Origin::signed(30), 50, false)); - assert_ok!(Society::vote(Origin::signed(40), 50, false)); - run_to_block(8); - // Wrong voter gained a strike - assert_eq!(>::get(10), 0); - assert_eq!(>::get(20), 1); - assert_eq!(>::get(30), 0); - assert_eq!(>::get(40), 0); - // Their payout is slashed, a random person is rewarded - assert_eq!(>::get(10), vec![(5, 100), (9,2)]); - assert_eq!(>::get(20), vec![(5, 98)]); - assert_eq!(>::get(30), vec![(5, 100)]); - assert_eq!(>::get(40), vec![(5, 100)]); - }); + EnvBuilder::new().execute(|| { + // Add some members + assert_ok!(Society::add_member(&20)); + assert_ok!(Society::add_member(&30)); + assert_ok!(Society::add_member(&40)); + // Create some payouts + Society::bump_payout(&10, 5, 100); + Society::bump_payout(&20, 5, 100); + Society::bump_payout(&30, 5, 100); + Society::bump_payout(&40, 5, 100); + // Check starting point + assert_eq!(Society::members(), vec![10, 20, 30, 40]); + assert_eq!(>::get(10), vec![(5, 100)]); + assert_eq!(>::get(20), vec![(5, 100)]); + assert_eq!(>::get(30), vec![(5, 100)]); + assert_eq!(>::get(40), vec![(5, 100)]); + // Create a new bid + assert_ok!(Society::bid(Origin::signed(50), 1000)); + run_to_block(4); + assert_ok!(Society::vote(Origin::signed(10), 50, false)); + assert_ok!(Society::vote(Origin::signed(20), 50, true)); + assert_ok!(Society::vote(Origin::signed(30), 50, false)); + assert_ok!(Society::vote(Origin::signed(40), 50, false)); + run_to_block(8); + // Wrong voter gained a strike + assert_eq!(>::get(10), 0); + assert_eq!(>::get(20), 1); + assert_eq!(>::get(30), 0); + assert_eq!(>::get(40), 0); + // Their payout is slashed, a random person is rewarded + assert_eq!(>::get(10), vec![(5, 100), (9, 2)]); + assert_eq!(>::get(20), vec![(5, 98)]); + assert_eq!(>::get(30), vec![(5, 100)]); + assert_eq!(>::get(40), vec![(5, 100)]); + }); } #[test] fn user_cannot_bid_twice() { - EnvBuilder::new().execute(|| { - // Cannot bid twice - assert_ok!(Society::bid(Origin::signed(20), 100)); - assert_noop!(Society::bid(Origin::signed(20), 100), Error::::AlreadyBid); - // Cannot bid when vouched - assert_ok!(Society::vouch(Origin::signed(10), 30, 100, 100)); - assert_noop!(Society::bid(Origin::signed(30), 100), Error::::AlreadyBid); - // Cannot vouch when already bid - assert_ok!(Society::add_member(&50)); - assert_noop!(Society::vouch(Origin::signed(50), 20, 100, 100), Error::::AlreadyBid); - }); + EnvBuilder::new().execute(|| { + // Cannot bid twice + assert_ok!(Society::bid(Origin::signed(20), 100)); + assert_noop!( + Society::bid(Origin::signed(20), 100), + Error::::AlreadyBid + ); + // Cannot bid when vouched + assert_ok!(Society::vouch(Origin::signed(10), 30, 100, 100)); + assert_noop!( + Society::bid(Origin::signed(30), 100), + Error::::AlreadyBid + ); + // Cannot vouch when already bid + assert_ok!(Society::add_member(&50)); + assert_noop!( + Society::vouch(Origin::signed(50), 20, 100, 100), + Error::::AlreadyBid + ); + }); } #[test] fn vouching_handles_removed_member_with_bid() { - EnvBuilder::new().execute(|| { - // Add a member - assert_ok!(Society::add_member(&20)); - // Have that member vouch for a user - assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); - // That user is now a bid and the member is vouching - assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); - // Suspend that member - Society::suspend_member(&20); - assert_eq!(>::get(20), true); - // Nothing changes yet - assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); - // Remove member - assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); - // Bid is removed, vouching status is removed - assert_eq!(>::get(), vec![]); - assert_eq!(>::get(20), None); - }); + EnvBuilder::new().execute(|| { + // Add a member + assert_ok!(Society::add_member(&20)); + // Have that member vouch for a user + assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); + // That user is now a bid and the member is vouching + assert_eq!( + >::get(), + vec![create_bid(1000, 30, BidKind::Vouch(20, 100))] + ); + assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); + // Suspend that member + Society::suspend_member(&20); + assert_eq!(>::get(20), true); + // Nothing changes yet + assert_eq!( + >::get(), + vec![create_bid(1000, 30, BidKind::Vouch(20, 100))] + ); + assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); + // Remove member + assert_ok!(Society::judge_suspended_member( + Origin::signed(2), + 20, + false + )); + // Bid is removed, vouching status is removed + assert_eq!(>::get(), vec![]); + assert_eq!(>::get(20), None); + }); } #[test] fn vouching_handles_removed_member_with_candidate() { - EnvBuilder::new().execute(|| { - // Add a member - assert_ok!(Society::add_member(&20)); - // Have that member vouch for a user - assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); - // That user is now a bid and the member is vouching - assert_eq!(>::get(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); - // Make that bid a candidate - run_to_block(4); - assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - // Suspend that member - Society::suspend_member(&20); - assert_eq!(>::get(20), true); - // Nothing changes yet - assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); - // Remove member - assert_ok!(Society::judge_suspended_member(Origin::signed(2), 20, false)); - // Vouching status is removed, but candidate is still in the queue - assert_eq!(>::get(20), None); - assert_eq!(Society::candidates(), vec![create_bid(1000, 30, BidKind::Vouch(20, 100))]); - // Candidate wins - assert_ok!(Society::vote(Origin::signed(10), 30, true)); - run_to_block(8); - assert_eq!(Society::members(), vec![10, 30]); - // Payout does not go to removed member - assert_eq!(>::get(20), vec![]); - assert_eq!(>::get(30), vec![(9, 1000)]); - }); + EnvBuilder::new().execute(|| { + // Add a member + assert_ok!(Society::add_member(&20)); + // Have that member vouch for a user + assert_ok!(Society::vouch(Origin::signed(20), 30, 1000, 100)); + // That user is now a bid and the member is vouching + assert_eq!( + >::get(), + vec![create_bid(1000, 30, BidKind::Vouch(20, 100))] + ); + assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); + // Make that bid a candidate + run_to_block(4); + assert_eq!( + Society::candidates(), + vec![create_bid(1000, 30, BidKind::Vouch(20, 100))] + ); + // Suspend that member + Society::suspend_member(&20); + assert_eq!(>::get(20), true); + // Nothing changes yet + assert_eq!( + Society::candidates(), + vec![create_bid(1000, 30, BidKind::Vouch(20, 100))] + ); + assert_eq!(>::get(20), Some(VouchingStatus::Vouching)); + // Remove member + assert_ok!(Society::judge_suspended_member( + Origin::signed(2), + 20, + false + )); + // Vouching status is removed, but candidate is still in the queue + assert_eq!(>::get(20), None); + assert_eq!( + Society::candidates(), + vec![create_bid(1000, 30, BidKind::Vouch(20, 100))] + ); + // Candidate wins + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + run_to_block(8); + assert_eq!(Society::members(), vec![10, 30]); + // Payout does not go to removed member + assert_eq!(>::get(20), vec![]); + assert_eq!(>::get(30), vec![(9, 1000)]); + }); } #[test] fn votes_are_working() { - EnvBuilder::new().execute(|| { - // Users make bids of various amounts - assert_ok!(Society::bid(Origin::signed(50), 500)); - assert_ok!(Society::bid(Origin::signed(40), 400)); - assert_ok!(Society::bid(Origin::signed(30), 300)); - // Rotate period - run_to_block(4); - // A member votes for these candidates to join the society - assert_ok!(Society::vote(Origin::signed(10), 30, true)); - assert_ok!(Society::vote(Origin::signed(10), 40, true)); - // You cannot vote for a non-candidate - assert_noop!(Society::vote(Origin::signed(10), 50, true), Error::::NotCandidate); - // Votes are stored - assert_eq!(>::get(30, 10), Some(Vote::Approve)); - assert_eq!(>::get(40, 10), Some(Vote::Approve)); - assert_eq!(>::get(50, 10), None); - run_to_block(8); - // Candidates become members after a period rotation - assert_eq!(Society::members(), vec![10, 30, 40]); - // Votes are cleaned up - assert_eq!(>::get(30, 10), None); - assert_eq!(>::get(40, 10), None); - }); + EnvBuilder::new().execute(|| { + // Users make bids of various amounts + assert_ok!(Society::bid(Origin::signed(50), 500)); + assert_ok!(Society::bid(Origin::signed(40), 400)); + assert_ok!(Society::bid(Origin::signed(30), 300)); + // Rotate period + run_to_block(4); + // A member votes for these candidates to join the society + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 40, true)); + // You cannot vote for a non-candidate + assert_noop!( + Society::vote(Origin::signed(10), 50, true), + Error::::NotCandidate + ); + // Votes are stored + assert_eq!(>::get(30, 10), Some(Vote::Approve)); + assert_eq!(>::get(40, 10), Some(Vote::Approve)); + assert_eq!(>::get(50, 10), None); + run_to_block(8); + // Candidates become members after a period rotation + assert_eq!(Society::members(), vec![10, 30, 40]); + // Votes are cleaned up + assert_eq!(>::get(30, 10), None); + assert_eq!(>::get(40, 10), None); + }); } #[test] fn max_limits_work() { - EnvBuilder::new().with_pot(100000).execute(|| { - // Max bids is 1000, when extra bids come in, it pops the larger ones off the stack. - // Try to put 1010 users into the bid pool - for i in (100..1110).rev() { - // Give them some funds - let _ = Balances::make_free_balance_be(&(i as u128), 1000); - assert_ok!(Society::bid(Origin::signed(i as u128), i)); - } - let bids = >::get(); - // Length is 1000 - assert_eq!(bids.len(), 1000); - // First bid is smallest number (100) - assert_eq!(bids[0], create_bid(100, 100, BidKind::Deposit(25))); - // Last bid is smallest number + 99 (1099) - assert_eq!(bids[999], create_bid(1099, 1099, BidKind::Deposit(25))); - // Rotate period - run_to_block(4); - // Max of 10 candidates - assert_eq!(Society::candidates().len(), 10); - // Fill up membership, max 100, we will do just 95 - for i in 2000..2095 { - assert_ok!(Society::add_member(&(i as u128))); - } - // Remember there was 1 original member, so 96 total - assert_eq!(Society::members().len(), 96); - // Rotate period - run_to_block(8); - // Only of 4 candidates possible now - assert_eq!(Society::candidates().len(), 4); - // Fill up members with suspended candidates from the first rotation - for i in 100..104 { - assert_ok!(Society::judge_suspended_candidate(Origin::signed(2), i, Judgement::Approve)); - } - assert_eq!(Society::members().len(), 100); - // Can't add any more members - assert_noop!(Society::add_member(&98), Error::::MaxMembers); - // However, a fringe scenario allows for in-progress candidates to increase the membership - // pool, but it has no real after-effects. - for i in Society::members().iter() { - assert_ok!(Society::vote(Origin::signed(*i), 110, true)); - assert_ok!(Society::vote(Origin::signed(*i), 111, true)); - assert_ok!(Society::vote(Origin::signed(*i), 112, true)); - } - // Rotate period - run_to_block(12); - // Members length is over 100, no problem... - assert_eq!(Society::members().len(), 103); - // No candidates because full - assert_eq!(Society::candidates().len(), 0); - // Increase member limit - assert_ok!(Society::set_max_members(Origin::ROOT, 200)); - // Rotate period - run_to_block(16); - // Candidates are back! - assert_eq!(Society::candidates().len(), 10); - }); + EnvBuilder::new().with_pot(100000).execute(|| { + // Max bids is 1000, when extra bids come in, it pops the larger ones off the stack. + // Try to put 1010 users into the bid pool + for i in (100..1110).rev() { + // Give them some funds + let _ = Balances::make_free_balance_be(&(i as u128), 1000); + assert_ok!(Society::bid(Origin::signed(i as u128), i)); + } + let bids = >::get(); + // Length is 1000 + assert_eq!(bids.len(), 1000); + // First bid is smallest number (100) + assert_eq!(bids[0], create_bid(100, 100, BidKind::Deposit(25))); + // Last bid is smallest number + 99 (1099) + assert_eq!(bids[999], create_bid(1099, 1099, BidKind::Deposit(25))); + // Rotate period + run_to_block(4); + // Max of 10 candidates + assert_eq!(Society::candidates().len(), 10); + // Fill up membership, max 100, we will do just 95 + for i in 2000..2095 { + assert_ok!(Society::add_member(&(i as u128))); + } + // Remember there was 1 original member, so 96 total + assert_eq!(Society::members().len(), 96); + // Rotate period + run_to_block(8); + // Only of 4 candidates possible now + assert_eq!(Society::candidates().len(), 4); + // Fill up members with suspended candidates from the first rotation + for i in 100..104 { + assert_ok!(Society::judge_suspended_candidate( + Origin::signed(2), + i, + Judgement::Approve + )); + } + assert_eq!(Society::members().len(), 100); + // Can't add any more members + assert_noop!(Society::add_member(&98), Error::::MaxMembers); + // However, a fringe scenario allows for in-progress candidates to increase the membership + // pool, but it has no real after-effects. + for i in Society::members().iter() { + assert_ok!(Society::vote(Origin::signed(*i), 110, true)); + assert_ok!(Society::vote(Origin::signed(*i), 111, true)); + assert_ok!(Society::vote(Origin::signed(*i), 112, true)); + } + // Rotate period + run_to_block(12); + // Members length is over 100, no problem... + assert_eq!(Society::members().len(), 103); + // No candidates because full + assert_eq!(Society::candidates().len(), 0); + // Increase member limit + assert_ok!(Society::set_max_members(Origin::ROOT, 200)); + // Rotate period + run_to_block(16); + // Candidates are back! + assert_eq!(Society::candidates().len(), 10); + }); } #[test] fn zero_bid_works() { - // This tests: - // * Only one zero bid is selected. - // * That zero bid is placed as head when accepted. - EnvBuilder::new().execute(|| { - // Users make bids of various amounts - assert_ok!(Society::bid(Origin::signed(60), 400)); - assert_ok!(Society::bid(Origin::signed(50), 300)); - assert_ok!(Society::bid(Origin::signed(30), 0)); - assert_ok!(Society::bid(Origin::signed(20), 0)); - assert_ok!(Society::bid(Origin::signed(40), 0)); - - // Rotate period - run_to_block(4); - // Pot is 1000 after "PeriodSpend" - assert_eq!(Society::pot(), 1000); - assert_eq!(Balances::free_balance(Society::account_id()), 10_000); - // Choose smallest bidding users whose total is less than pot, with only one zero bid. - assert_eq!(Society::candidates(), vec![ - create_bid(0, 30, BidKind::Deposit(25)), - create_bid(300, 50, BidKind::Deposit(25)), - create_bid(400, 60, BidKind::Deposit(25)), - ]); - assert_eq!(>::get(), vec![ - create_bid(0, 20, BidKind::Deposit(25)), - create_bid(0, 40, BidKind::Deposit(25)), - ]); - // A member votes for these candidates to join the society - assert_ok!(Society::vote(Origin::signed(10), 30, true)); - assert_ok!(Society::vote(Origin::signed(10), 50, true)); - assert_ok!(Society::vote(Origin::signed(10), 60, true)); - run_to_block(8); - // Candidates become members after a period rotation - assert_eq!(Society::members(), vec![10, 30, 50, 60]); - // The zero bid is selected as head - assert_eq!(Society::head(), Some(30)); - }); + // This tests: + // * Only one zero bid is selected. + // * That zero bid is placed as head when accepted. + EnvBuilder::new().execute(|| { + // Users make bids of various amounts + assert_ok!(Society::bid(Origin::signed(60), 400)); + assert_ok!(Society::bid(Origin::signed(50), 300)); + assert_ok!(Society::bid(Origin::signed(30), 0)); + assert_ok!(Society::bid(Origin::signed(20), 0)); + assert_ok!(Society::bid(Origin::signed(40), 0)); + + // Rotate period + run_to_block(4); + // Pot is 1000 after "PeriodSpend" + assert_eq!(Society::pot(), 1000); + assert_eq!(Balances::free_balance(Society::account_id()), 10_000); + // Choose smallest bidding users whose total is less than pot, with only one zero bid. + assert_eq!( + Society::candidates(), + vec![ + create_bid(0, 30, BidKind::Deposit(25)), + create_bid(300, 50, BidKind::Deposit(25)), + create_bid(400, 60, BidKind::Deposit(25)), + ] + ); + assert_eq!( + >::get(), + vec![ + create_bid(0, 20, BidKind::Deposit(25)), + create_bid(0, 40, BidKind::Deposit(25)), + ] + ); + // A member votes for these candidates to join the society + assert_ok!(Society::vote(Origin::signed(10), 30, true)); + assert_ok!(Society::vote(Origin::signed(10), 50, true)); + assert_ok!(Society::vote(Origin::signed(10), 60, true)); + run_to_block(8); + // Candidates become members after a period rotation + assert_eq!(Society::members(), vec![10, 30, 50, 60]); + // The zero bid is selected as head + assert_eq!(Society::head(), Some(30)); + }); } #[test] fn bids_ordered_correctly() { - // This tests that bids with the same value are placed in the list ordered - // with bidders who bid first earlier on the list. - EnvBuilder::new().execute(|| { - for i in 0..5 { - for j in 0..5 { - // Give them some funds - let _ = Balances::make_free_balance_be(&(100 + (i * 5 + j) as u128), 1000); - assert_ok!(Society::bid(Origin::signed(100 + (i * 5 + j) as u128), j)); - } - } - - let mut final_list = Vec::new(); - - for j in 0..5 { - for i in 0..5 { - final_list.push(create_bid(j, 100 + (i * 5 + j) as u128, BidKind::Deposit(25))); - } - } - - assert_eq!(>::get(), final_list); - }); + // This tests that bids with the same value are placed in the list ordered + // with bidders who bid first earlier on the list. + EnvBuilder::new().execute(|| { + for i in 0..5 { + for j in 0..5 { + // Give them some funds + let _ = Balances::make_free_balance_be(&(100 + (i * 5 + j) as u128), 1000); + assert_ok!(Society::bid(Origin::signed(100 + (i * 5 + j) as u128), j)); + } + } + + let mut final_list = Vec::new(); + + for j in 0..5 { + for i in 0..5 { + final_list.push(create_bid( + j, + 100 + (i * 5 + j) as u128, + BidKind::Deposit(25), + )); + } + } + + assert_eq!(>::get(), final_list); + }); } diff --git a/frame/staking/reward-curve/src/lib.rs b/frame/staking/reward-curve/src/lib.rs index d000afc49b..2f681d4924 100644 --- a/frame/staking/reward-curve/src/lib.rs +++ b/frame/staking/reward-curve/src/lib.rs @@ -20,7 +20,7 @@ mod log; use log::log2; use proc_macro::TokenStream; -use proc_macro2::{TokenStream as TokenStream2, Span}; +use proc_macro2::{Span, TokenStream as TokenStream2}; use proc_macro_crate::crate_name; use quote::{quote, ToTokens}; use std::convert::TryInto; @@ -73,321 +73,352 @@ use syn::parse::{Parse, ParseStream}; /// ``` #[proc_macro] pub fn build(input: TokenStream) -> TokenStream { - let input = syn::parse_macro_input!(input as INposInput); - - let points = compute_points(&input); - - let declaration = generate_piecewise_linear(points); - let test_module = generate_test_module(&input); - - let imports = match crate_name("sp-runtime") { - Ok(sp_runtime) => { - let ident = syn::Ident::new(&sp_runtime, Span::call_site()); - quote!( extern crate #ident as _sp_runtime; ) - }, - Err(e) => syn::Error::new(Span::call_site(), &e).to_compile_error(), - }; - - let const_name = input.ident; - let const_type = input.typ; - - quote!( - const #const_name: #const_type = { - #imports - #declaration - }; - #test_module - ).into() + let input = syn::parse_macro_input!(input as INposInput); + + let points = compute_points(&input); + + let declaration = generate_piecewise_linear(points); + let test_module = generate_test_module(&input); + + let imports = match crate_name("sp-runtime") { + Ok(sp_runtime) => { + let ident = syn::Ident::new(&sp_runtime, Span::call_site()); + quote!( extern crate #ident as _sp_runtime; ) + } + Err(e) => syn::Error::new(Span::call_site(), &e).to_compile_error(), + }; + + let const_name = input.ident; + let const_type = input.typ; + + quote!( + const #const_name: #const_type = { + #imports + #declaration + }; + #test_module + ) + .into() } const MILLION: u32 = 1_000_000; mod keyword { - syn::custom_keyword!(curve); - syn::custom_keyword!(min_inflation); - syn::custom_keyword!(max_inflation); - syn::custom_keyword!(ideal_stake); - syn::custom_keyword!(falloff); - syn::custom_keyword!(max_piece_count); - syn::custom_keyword!(test_precision); + syn::custom_keyword!(curve); + syn::custom_keyword!(min_inflation); + syn::custom_keyword!(max_inflation); + syn::custom_keyword!(ideal_stake); + syn::custom_keyword!(falloff); + syn::custom_keyword!(max_piece_count); + syn::custom_keyword!(test_precision); } struct INposInput { - ident: syn::Ident, - typ: syn::Type, - min_inflation: u32, - ideal_stake: u32, - max_inflation: u32, - falloff: u32, - max_piece_count: u32, - test_precision: u32, + ident: syn::Ident, + typ: syn::Type, + min_inflation: u32, + ideal_stake: u32, + max_inflation: u32, + falloff: u32, + max_piece_count: u32, + test_precision: u32, } struct Bounds { - min: u32, - min_strict: bool, - max: u32, - max_strict: bool, + min: u32, + min_strict: bool, + max: u32, + max_strict: bool, } impl Bounds { - fn check(&self, value: u32) -> bool { - let wrong = (self.min_strict && value <= self.min) - || (!self.min_strict && value < self.min) - || (self.max_strict && value >= self.max) - || (!self.max_strict && value > self.max); - - !wrong - } + fn check(&self, value: u32) -> bool { + let wrong = (self.min_strict && value <= self.min) + || (!self.min_strict && value < self.min) + || (self.max_strict && value >= self.max) + || (!self.max_strict && value > self.max); + + !wrong + } } impl core::fmt::Display for Bounds { - fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { - write!( - f, - "{}{:07}; {:07}{}", - if self.min_strict { "]" } else { "[" }, - self.min, - self.max, - if self.max_strict { "[" } else { "]" }, - ) - } + fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result { + write!( + f, + "{}{:07}; {:07}{}", + if self.min_strict { "]" } else { "[" }, + self.min, + self.max, + if self.max_strict { "[" } else { "]" }, + ) + } } -fn parse_field(input: ParseStream, bounds: Bounds) - -> syn::Result -{ - ::parse(&input)?; - ::parse(&input)?; - let value_lit = syn::LitInt::parse(&input)?; - let value: u32 = value_lit.base10_parse()?; - if !bounds.check(value) { - return Err(syn::Error::new(value_lit.span(), format!( - "Invalid {}: {}, must be in {}", Token::default().to_token_stream(), value, bounds, - ))); - } - - Ok(value) +fn parse_field( + input: ParseStream, + bounds: Bounds, +) -> syn::Result { + ::parse(&input)?; + ::parse(&input)?; + let value_lit = syn::LitInt::parse(&input)?; + let value: u32 = value_lit.base10_parse()?; + if !bounds.check(value) { + return Err(syn::Error::new( + value_lit.span(), + format!( + "Invalid {}: {}, must be in {}", + Token::default().to_token_stream(), + value, + bounds, + ), + )); + } + + Ok(value) } impl Parse for INposInput { - fn parse(input: ParseStream) -> syn::Result { - let args_input; - - ::parse(&input)?; - let ident = ::parse(&input)?; - ::parse(&input)?; - let typ = ::parse(&input)?; - ::parse(&input)?; - ::parse(&input)?; - ::parse(&input)?; - syn::parenthesized!(args_input in input); - ::parse(&input)?; - - if !input.is_empty() { - return Err(input.error("expected end of input stream, no token expected")); - } - - let min_inflation = parse_field::(&args_input, Bounds { - min: 0, - min_strict: true, - max: 1_000_000, - max_strict: false, - })?; - ::parse(&args_input)?; - let max_inflation = parse_field::(&args_input, Bounds { - min: min_inflation, - min_strict: true, - max: 1_000_000, - max_strict: false, - })?; - ::parse(&args_input)?; - let ideal_stake = parse_field::(&args_input, Bounds { - min: 0_100_000, - min_strict: false, - max: 0_900_000, - max_strict: false, - })?; - ::parse(&args_input)?; - let falloff = parse_field::(&args_input, Bounds { - min: 0_010_000, - min_strict: false, - max: 1_000_000, - max_strict: false, - })?; - ::parse(&args_input)?; - let max_piece_count = parse_field::(&args_input, Bounds { - min: 2, - min_strict: false, - max: 1_000, - max_strict: false, - })?; - ::parse(&args_input)?; - let test_precision = parse_field::(&args_input, Bounds { - min: 0, - min_strict: false, - max: 1_000_000, - max_strict: false, - })?; - >::parse(&args_input)?; - - if !args_input.is_empty() { - return Err(args_input.error("expected end of input stream, no token expected")); - } - - Ok(Self { - ident, - typ, - min_inflation, - ideal_stake, - max_inflation, - falloff, - max_piece_count, - test_precision, - }) - } + fn parse(input: ParseStream) -> syn::Result { + let args_input; + + ::parse(&input)?; + let ident = ::parse(&input)?; + ::parse(&input)?; + let typ = ::parse(&input)?; + ::parse(&input)?; + ::parse(&input)?; + ::parse(&input)?; + syn::parenthesized!(args_input in input); + ::parse(&input)?; + + if !input.is_empty() { + return Err(input.error("expected end of input stream, no token expected")); + } + + let min_inflation = parse_field::( + &args_input, + Bounds { + min: 0, + min_strict: true, + max: 1_000_000, + max_strict: false, + }, + )?; + ::parse(&args_input)?; + let max_inflation = parse_field::( + &args_input, + Bounds { + min: min_inflation, + min_strict: true, + max: 1_000_000, + max_strict: false, + }, + )?; + ::parse(&args_input)?; + let ideal_stake = parse_field::( + &args_input, + Bounds { + min: 0_100_000, + min_strict: false, + max: 0_900_000, + max_strict: false, + }, + )?; + ::parse(&args_input)?; + let falloff = parse_field::( + &args_input, + Bounds { + min: 0_010_000, + min_strict: false, + max: 1_000_000, + max_strict: false, + }, + )?; + ::parse(&args_input)?; + let max_piece_count = parse_field::( + &args_input, + Bounds { + min: 2, + min_strict: false, + max: 1_000, + max_strict: false, + }, + )?; + ::parse(&args_input)?; + let test_precision = parse_field::( + &args_input, + Bounds { + min: 0, + min_strict: false, + max: 1_000_000, + max_strict: false, + }, + )?; + >::parse(&args_input)?; + + if !args_input.is_empty() { + return Err(args_input.error("expected end of input stream, no token expected")); + } + + Ok(Self { + ident, + typ, + min_inflation, + ideal_stake, + max_inflation, + falloff, + max_piece_count, + test_precision, + }) + } } struct INPoS { - i_0: u32, - i_ideal_times_x_ideal: u32, - i_ideal: u32, - x_ideal: u32, - d: u32, + i_0: u32, + i_ideal_times_x_ideal: u32, + i_ideal: u32, + x_ideal: u32, + d: u32, } impl INPoS { - fn from_input(input: &INposInput) -> Self { - INPoS { - i_0: input.min_inflation, - i_ideal: (input.max_inflation as u64 * MILLION as u64 / input.ideal_stake as u64) - .try_into().unwrap(), - i_ideal_times_x_ideal: input.max_inflation, - x_ideal: input.ideal_stake, - d: input.falloff, - } - } - - fn compute_opposite_after_x_ideal(&self, y: u32) -> u32 { - if y == self.i_0 { - return u32::max_value(); - } - let log = log2(self.i_ideal_times_x_ideal - self.i_0, y - self.i_0); - - let term: u32 = ((self.d as u64 * log as u64) / 1_000_000).try_into().unwrap(); - - self.x_ideal + term - } + fn from_input(input: &INposInput) -> Self { + INPoS { + i_0: input.min_inflation, + i_ideal: (input.max_inflation as u64 * MILLION as u64 / input.ideal_stake as u64) + .try_into() + .unwrap(), + i_ideal_times_x_ideal: input.max_inflation, + x_ideal: input.ideal_stake, + d: input.falloff, + } + } + + fn compute_opposite_after_x_ideal(&self, y: u32) -> u32 { + if y == self.i_0 { + return u32::max_value(); + } + let log = log2(self.i_ideal_times_x_ideal - self.i_0, y - self.i_0); + + let term: u32 = ((self.d as u64 * log as u64) / 1_000_000) + .try_into() + .unwrap(); + + self.x_ideal + term + } } fn compute_points(input: &INposInput) -> Vec<(u32, u32)> { - let inpos = INPoS::from_input(input); + let inpos = INPoS::from_input(input); - let mut points = vec![]; - points.push((0, inpos.i_0)); - points.push((inpos.x_ideal, inpos.i_ideal_times_x_ideal)); + let mut points = vec![]; + points.push((0, inpos.i_0)); + points.push((inpos.x_ideal, inpos.i_ideal_times_x_ideal)); - // For each point p: (next_p.0 - p.0) < segment_length && (next_p.1 - p.1) < segment_length. - // This ensures that the total number of segment doesn't overflow max_piece_count. - let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) - / (input.max_piece_count - 1); + // For each point p: (next_p.0 - p.0) < segment_length && (next_p.1 - p.1) < segment_length. + // This ensures that the total number of segment doesn't overflow max_piece_count. + let max_length = (input.max_inflation - input.min_inflation + 1_000_000 - inpos.x_ideal) + / (input.max_piece_count - 1); - let mut delta_y = max_length; - let mut y = input.max_inflation; + let mut delta_y = max_length; + let mut y = input.max_inflation; - // The algorithm divide the curve in segment with vertical len and horizontal len less - // than `max_length`. This is not very accurate in case of very consequent steep. - while delta_y != 0 { - let next_y = y - delta_y; + // The algorithm divide the curve in segment with vertical len and horizontal len less + // than `max_length`. This is not very accurate in case of very consequent steep. + while delta_y != 0 { + let next_y = y - delta_y; - if next_y <= input.min_inflation { - delta_y = delta_y.saturating_sub(1); - continue - } + if next_y <= input.min_inflation { + delta_y = delta_y.saturating_sub(1); + continue; + } - let next_x = inpos.compute_opposite_after_x_ideal(next_y); + let next_x = inpos.compute_opposite_after_x_ideal(next_y); - if (next_x - points.last().unwrap().0) > max_length { - delta_y = delta_y.saturating_sub(1); - continue - } + if (next_x - points.last().unwrap().0) > max_length { + delta_y = delta_y.saturating_sub(1); + continue; + } - if next_x >= 1_000_000 { - let prev = points.last().unwrap(); - // Compute the y corresponding to x=1_000_000 using the this point and the previous one. + if next_x >= 1_000_000 { + let prev = points.last().unwrap(); + // Compute the y corresponding to x=1_000_000 using the this point and the previous one. - let delta_y: u32 = ( - (next_x - 1_000_000) as u64 - * (prev.1 - next_y) as u64 - / (next_x - prev.0) as u64 - ).try_into().unwrap(); + let delta_y: u32 = ((next_x - 1_000_000) as u64 * (prev.1 - next_y) as u64 + / (next_x - prev.0) as u64) + .try_into() + .unwrap(); - let y = next_y + delta_y; + let y = next_y + delta_y; - points.push((1_000_000, y)); - return points; - } - points.push((next_x, next_y)); - y = next_y; - } + points.push((1_000_000, y)); + return points; + } + points.push((next_x, next_y)); + y = next_y; + } - points.push((1_000_000, inpos.i_0)); + points.push((1_000_000, inpos.i_0)); - points + points } fn generate_piecewise_linear(points: Vec<(u32, u32)>) -> TokenStream2 { - let mut points_tokens = quote!(); - - let max = points.iter() - .map(|&(_, x)| x) - .max() - .unwrap_or(0) - .checked_mul(1_000) - // clip at 1.0 for sanity only since it'll panic later if too high. - .unwrap_or(1_000_000_000); - - for (x, y) in points { - let error = || panic!(format!( - "Generated reward curve approximation doesn't fit into [0, 1] -> [0, 1] \ + let mut points_tokens = quote!(); + + let max = points + .iter() + .map(|&(_, x)| x) + .max() + .unwrap_or(0) + .checked_mul(1_000) + // clip at 1.0 for sanity only since it'll panic later if too high. + .unwrap_or(1_000_000_000); + + for (x, y) in points { + let error = || { + panic!(format!( + "Generated reward curve approximation doesn't fit into [0, 1] -> [0, 1] \ because of point: x = {:07} per million y = {:07} per million", - x, y - )); - - let x_perbill = x.checked_mul(1_000).unwrap_or_else(error); - let y_perbill = y.checked_mul(1_000).unwrap_or_else(error); - - points_tokens.extend(quote!( - ( - _sp_runtime::Perbill::from_parts(#x_perbill), - _sp_runtime::Perbill::from_parts(#y_perbill), - ), - )); - } - - quote!( - _sp_runtime::curve::PiecewiseLinear::<'static> { - points: & [ #points_tokens ], - maximum: _sp_runtime::Perbill::from_parts(#max), - } - ) + x, y + )) + }; + + let x_perbill = x.checked_mul(1_000).unwrap_or_else(error); + let y_perbill = y.checked_mul(1_000).unwrap_or_else(error); + + points_tokens.extend(quote!( + ( + _sp_runtime::Perbill::from_parts(#x_perbill), + _sp_runtime::Perbill::from_parts(#y_perbill), + ), + )); + } + + quote!( + _sp_runtime::curve::PiecewiseLinear::<'static> { + points: & [ #points_tokens ], + maximum: _sp_runtime::Perbill::from_parts(#max), + } + ) } fn generate_test_module(input: &INposInput) -> TokenStream2 { - let inpos = INPoS::from_input(input); - - let ident = &input.ident; - let precision = input.test_precision; - let i_0 = inpos.i_0 as f64/ MILLION as f64; - let i_ideal_times_x_ideal = inpos.i_ideal_times_x_ideal as f64 / MILLION as f64; - let i_ideal = inpos.i_ideal as f64 / MILLION as f64; - let x_ideal = inpos.x_ideal as f64 / MILLION as f64; - let d = inpos.d as f64 / MILLION as f64; - let max_piece_count = input.max_piece_count; - - quote!( + let inpos = INPoS::from_input(input); + + let ident = &input.ident; + let precision = input.test_precision; + let i_0 = inpos.i_0 as f64 / MILLION as f64; + let i_ideal_times_x_ideal = inpos.i_ideal_times_x_ideal as f64 / MILLION as f64; + let i_ideal = inpos.i_ideal as f64 / MILLION as f64; + let x_ideal = inpos.x_ideal as f64 / MILLION as f64; + let d = inpos.d as f64 / MILLION as f64; + let max_piece_count = input.max_piece_count; + + quote!( #[cfg(test)] mod __pallet_staking_reward_curve_test_module { fn i_npos(x: f64) -> f64 { diff --git a/frame/staking/reward-curve/src/log.rs b/frame/staking/reward-curve/src/log.rs index e0929a9597..c9d8fcab07 100644 --- a/frame/staking/reward-curve/src/log.rs +++ b/frame/staking/reward-curve/src/log.rs @@ -2,69 +2,74 @@ use std::convert::TryInto; /// Return Per-million value. pub fn log2(p: u32, q: u32) -> u32 { - assert!(p >= q); - assert!(p <= u32::max_value()/2); + assert!(p >= q); + assert!(p <= u32::max_value() / 2); - // This restriction should not be mandatory. But function is only tested and used for this. - assert!(p <= 1_000_000); - assert!(q <= 1_000_000); + // This restriction should not be mandatory. But function is only tested and used for this. + assert!(p <= 1_000_000); + assert!(q <= 1_000_000); - if p == q { - return 0 - } + if p == q { + return 0; + } - let mut n = 0u32; - while !(p >= (1u32 << n)*q) || !(p < (1u32 << (n+1))*q) { - n += 1; - } - assert!(p < (1u32 << (n+1)) * q); + let mut n = 0u32; + while !(p >= (1u32 << n) * q) || !(p < (1u32 << (n + 1)) * q) { + n += 1; + } + assert!(p < (1u32 << (n + 1)) * q); - let y_num: u32 = (p - (1u32 << n) * q).try_into().unwrap(); - let y_den: u32 = (p + (1u32 << n) * q).try_into().unwrap(); + let y_num: u32 = (p - (1u32 << n) * q).try_into().unwrap(); + let y_den: u32 = (p + (1u32 << n) * q).try_into().unwrap(); - let _2_div_ln_2 = 2_885_390u32; + let _2_div_ln_2 = 2_885_390u32; - let taylor_term = |k: u32| -> u32 { - if k == 0 { - (_2_div_ln_2 as u128 * (y_num as u128).pow(1) / (y_den as u128).pow(1)) - .try_into().unwrap() - } else { - let mut res = _2_div_ln_2 as u128 * (y_num as u128).pow(3) / (y_den as u128).pow(3); - for _ in 1..k { - res = res * (y_num as u128).pow(2) / (y_den as u128).pow(2); - } - res /= 2 * k as u128 + 1; + let taylor_term = |k: u32| -> u32 { + if k == 0 { + (_2_div_ln_2 as u128 * (y_num as u128).pow(1) / (y_den as u128).pow(1)) + .try_into() + .unwrap() + } else { + let mut res = _2_div_ln_2 as u128 * (y_num as u128).pow(3) / (y_den as u128).pow(3); + for _ in 1..k { + res = res * (y_num as u128).pow(2) / (y_den as u128).pow(2); + } + res /= 2 * k as u128 + 1; - res.try_into().unwrap() - } - }; + res.try_into().unwrap() + } + }; - let mut res = n * 1_000_000u32; - let mut k = 0; - loop { - let term = taylor_term(k); - if term == 0 { - break - } + let mut res = n * 1_000_000u32; + let mut k = 0; + loop { + let term = taylor_term(k); + if term == 0 { + break; + } - res += term; - k += 1; - } + res += term; + k += 1; + } - res + res } #[test] fn test_log() { - let div = 1_000; - for p in 0..=div { - for q in 1..=p { - let p: u32 = (1_000_000 as u64 * p as u64 / div as u64).try_into().unwrap(); - let q: u32 = (1_000_000 as u64 * q as u64 / div as u64).try_into().unwrap(); + let div = 1_000; + for p in 0..=div { + for q in 1..=p { + let p: u32 = (1_000_000 as u64 * p as u64 / div as u64) + .try_into() + .unwrap(); + let q: u32 = (1_000_000 as u64 * q as u64 / div as u64) + .try_into() + .unwrap(); - let res = - (log2(p, q) as i64); - let expected = ((q as f64 / p as f64).log(2.0) * 1_000_000 as f64).round() as i64; - assert!((res - expected).abs() <= 6); - } - } + let res = -(log2(p, q) as i64); + let expected = ((q as f64 / p as f64).log(2.0) * 1_000_000 as f64).round() as i64; + assert!((res - expected).abs() <= 6); + } + } } diff --git a/frame/staking/reward-curve/tests/test.rs b/frame/staking/reward-curve/tests/test.rs index 89f8653fe1..8e8f29fdef 100644 --- a/frame/staking/reward-curve/tests/test.rs +++ b/frame/staking/reward-curve/tests/test.rs @@ -18,27 +18,27 @@ //! See tests directory. mod test_small_falloff { - pallet_staking_reward_curve::build! { - const REWARD_CURVE: sp_runtime::curve::PiecewiseLinear<'static> = curve!( - min_inflation: 0_020_000, - max_inflation: 0_200_000, - ideal_stake: 0_600_000, - falloff: 0_010_000, - max_piece_count: 200, - test_precision: 0_005_000, - ); - } + pallet_staking_reward_curve::build! { + const REWARD_CURVE: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_020_000, + max_inflation: 0_200_000, + ideal_stake: 0_600_000, + falloff: 0_010_000, + max_piece_count: 200, + test_precision: 0_005_000, + ); + } } mod test_big_falloff { - pallet_staking_reward_curve::build! { - const REWARD_CURVE: sp_runtime::curve::PiecewiseLinear<'static> = curve!( - min_inflation: 0_100_000, - max_inflation: 0_400_000, - ideal_stake: 0_400_000, - falloff: 1_000_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); - } + pallet_staking_reward_curve::build! { + const REWARD_CURVE: sp_runtime::curve::PiecewiseLinear<'static> = curve!( + min_inflation: 0_100_000, + max_inflation: 0_400_000, + ideal_stake: 0_400_000, + falloff: 1_000_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); + } } diff --git a/frame/staking/src/benchmarking.rs b/frame/staking/src/benchmarking.rs index 2686623aa1..8c0fd4ee99 100644 --- a/frame/staking/src/benchmarking.rs +++ b/frame/staking/src/benchmarking.rs @@ -18,13 +18,16 @@ use super::*; -use rand_chacha::{rand_core::{RngCore, SeedableRng}, ChaChaRng}; +use rand_chacha::{ + rand_core::{RngCore, SeedableRng}, + ChaChaRng, +}; -use sp_runtime::traits::{Dispatchable, One}; use sp_io::hashing::blake2_256; +use sp_runtime::traits::{Dispatchable, One}; +use frame_benchmarking::{account, benchmarks}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; use crate::Module as Staking; use frame_system::Module as System; @@ -32,426 +35,474 @@ use frame_system::Module as System; const SEED: u32 = 0; fn create_funded_user(string: &'static str, n: u32) -> T::AccountId { - let user = account(string, n, SEED); - let balance = T::Currency::minimum_balance() * 100.into(); - T::Currency::make_free_balance_be(&user, balance); - user + let user = account(string, n, SEED); + let balance = T::Currency::minimum_balance() * 100.into(); + T::Currency::make_free_balance_be(&user, balance); + user } -pub fn create_stash_controller(n: u32) -> Result<(T::AccountId, T::AccountId), &'static str> { - let stash = create_funded_user::("stash", n); - let controller = create_funded_user::("controller", n); - let controller_lookup: ::Source = T::Lookup::unlookup(controller.clone()); - let reward_destination = RewardDestination::Staked; - let amount = T::Currency::minimum_balance() * 10.into(); - Staking::::bond(RawOrigin::Signed(stash.clone()).into(), controller_lookup, amount, reward_destination)?; - return Ok((stash, controller)) +pub fn create_stash_controller( + n: u32, +) -> Result<(T::AccountId, T::AccountId), &'static str> { + let stash = create_funded_user::("stash", n); + let controller = create_funded_user::("controller", n); + let controller_lookup: ::Source = + T::Lookup::unlookup(controller.clone()); + let reward_destination = RewardDestination::Staked; + let amount = T::Currency::minimum_balance() * 10.into(); + Staking::::bond( + RawOrigin::Signed(stash.clone()).into(), + controller_lookup, + amount, + reward_destination, + )?; + return Ok((stash, controller)); } -fn create_validators(max: u32) -> Result::Source>, &'static str> { - let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); - for i in 0 .. max { - let (stash, controller) = create_stash_controller::(i)?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - }; - Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(stash); - validators.push(stash_lookup); - } - Ok(validators) +fn create_validators( + max: u32, +) -> Result::Source>, &'static str> { + let mut validators: Vec<::Source> = Vec::with_capacity(max as usize); + for i in 0..max { + let (stash, controller) = create_stash_controller::(i)?; + let validator_prefs = ValidatorPrefs { + commission: Perbill::from_percent(50), + }; + Staking::::validate(RawOrigin::Signed(controller).into(), validator_prefs)?; + let stash_lookup: ::Source = T::Lookup::unlookup(stash); + validators.push(stash_lookup); + } + Ok(validators) } // This function generates v validators and n nominators who are randomly nominating up to MAX_NOMINATIONS. -pub fn create_validators_with_nominators_for_era(v: u32, n: u32) -> Result<(), &'static str> { - let mut validators: Vec<::Source> = Vec::with_capacity(v as usize); - let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); - - // Create v validators - for i in 0 .. v { - let (v_stash, v_controller) = create_stash_controller::(i)?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - }; - Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); - validators.push(stash_lookup.clone()); - } - - // Create n nominators - for j in 0 .. n { - let (_n_stash, n_controller) = create_stash_controller::(u32::max_value() - j)?; - - // Have them randomly validate - let mut available_validators = validators.clone(); - let mut selected_validators: Vec<::Source> = Vec::with_capacity(MAX_NOMINATIONS); - for _ in 0 .. v.min(MAX_NOMINATIONS as u32) { - let selected = rng.next_u32() as usize % available_validators.len(); - let validator = available_validators.remove(selected); - selected_validators.push(validator); - } - Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), selected_validators)?; - } - - ValidatorCount::put(v); - - Ok(()) +pub fn create_validators_with_nominators_for_era( + v: u32, + n: u32, +) -> Result<(), &'static str> { + let mut validators: Vec<::Source> = Vec::with_capacity(v as usize); + let mut rng = ChaChaRng::from_seed(SEED.using_encoded(blake2_256)); + + // Create v validators + for i in 0..v { + let (v_stash, v_controller) = create_stash_controller::(i)?; + let validator_prefs = ValidatorPrefs { + commission: Perbill::from_percent(50), + }; + Staking::::validate( + RawOrigin::Signed(v_controller.clone()).into(), + validator_prefs, + )?; + let stash_lookup: ::Source = + T::Lookup::unlookup(v_stash.clone()); + validators.push(stash_lookup.clone()); + } + + // Create n nominators + for j in 0..n { + let (_n_stash, n_controller) = create_stash_controller::(u32::max_value() - j)?; + + // Have them randomly validate + let mut available_validators = validators.clone(); + let mut selected_validators: Vec<::Source> = + Vec::with_capacity(MAX_NOMINATIONS); + for _ in 0..v.min(MAX_NOMINATIONS as u32) { + let selected = rng.next_u32() as usize % available_validators.len(); + let validator = available_validators.remove(selected); + selected_validators.push(validator); + } + Staking::::nominate( + RawOrigin::Signed(n_controller.clone()).into(), + selected_validators, + )?; + } + + ValidatorCount::put(v); + + Ok(()) } // This function generates one validator being nominated by n nominators, and returns //the validator stash account. It also starts an era and creates pending payouts. -pub fn create_validator_with_nominators(n: u32, upper_bound: u32) -> Result { - let mut points_total = 0; - let mut points_individual = Vec::new(); - - MinimumValidatorCount::put(0); - - let (v_stash, v_controller) = create_stash_controller::(0)?; - let validator_prefs = ValidatorPrefs { - commission: Perbill::from_percent(50), - }; - Staking::::validate(RawOrigin::Signed(v_controller.clone()).into(), validator_prefs)?; - let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); - - points_total += 10; - points_individual.push((v_stash.clone(), 10)); - - // Give the validator n nominators, but keep total users in the system the same. - for i in 0 .. upper_bound { - let (_n_stash, n_controller) = create_stash_controller::(u32::max_value() - i)?; - if i < n { - Staking::::nominate(RawOrigin::Signed(n_controller.clone()).into(), vec![stash_lookup.clone()])?; - } - } - - ValidatorCount::put(1); - - // Start a new Era - let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); - - assert!(new_validators.len() == 1); - - // Give Era Points - let reward = EraRewardPoints:: { - total: points_total, - individual: points_individual.into_iter().collect(), - }; - - let current_era = CurrentEra::get().unwrap(); - ErasRewardPoints::::insert(current_era, reward); - - // Create reward pool - let total_payout = T::Currency::minimum_balance() * 1000.into(); - >::insert(current_era, total_payout); - - Ok(v_stash) +pub fn create_validator_with_nominators( + n: u32, + upper_bound: u32, +) -> Result { + let mut points_total = 0; + let mut points_individual = Vec::new(); + + MinimumValidatorCount::put(0); + + let (v_stash, v_controller) = create_stash_controller::(0)?; + let validator_prefs = ValidatorPrefs { + commission: Perbill::from_percent(50), + }; + Staking::::validate( + RawOrigin::Signed(v_controller.clone()).into(), + validator_prefs, + )?; + let stash_lookup: ::Source = T::Lookup::unlookup(v_stash.clone()); + + points_total += 10; + points_individual.push((v_stash.clone(), 10)); + + // Give the validator n nominators, but keep total users in the system the same. + for i in 0..upper_bound { + let (_n_stash, n_controller) = create_stash_controller::(u32::max_value() - i)?; + if i < n { + Staking::::nominate( + RawOrigin::Signed(n_controller.clone()).into(), + vec![stash_lookup.clone()], + )?; + } + } + + ValidatorCount::put(1); + + // Start a new Era + let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + + assert!(new_validators.len() == 1); + + // Give Era Points + let reward = EraRewardPoints:: { + total: points_total, + individual: points_individual.into_iter().collect(), + }; + + let current_era = CurrentEra::get().unwrap(); + ErasRewardPoints::::insert(current_era, reward); + + // Create reward pool + let total_payout = T::Currency::minimum_balance() * 1000.into(); + >::insert(current_era, total_payout); + + Ok(v_stash) } benchmarks! { - _{ - // User account seed - let u in 0 .. 1000 => (); - } - - bond { - let u in ...; - let stash = create_funded_user::("stash",u); - let controller = create_funded_user::("controller", u); - let controller_lookup: ::Source = T::Lookup::unlookup(controller); - let reward_destination = RewardDestination::Staked; - let amount = T::Currency::minimum_balance() * 10.into(); - }: _(RawOrigin::Signed(stash), controller_lookup, amount, reward_destination) - - bond_extra { - let u in ...; - let (stash, _) = create_stash_controller::(u)?; - let max_additional = T::Currency::minimum_balance() * 10.into(); - }: _(RawOrigin::Signed(stash), max_additional) - - unbond { - let u in ...; - let (_, controller) = create_stash_controller::(u)?; - let amount = T::Currency::minimum_balance() * 10.into(); - }: _(RawOrigin::Signed(controller), amount) - - // Worst case scenario, everything is removed after the bonding duration - withdraw_unbonded { - let u in ...; - let (stash, controller) = create_stash_controller::(u)?; - let amount = T::Currency::minimum_balance() * 10.into(); - Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; - let current_block = System::::block_number(); - // let unbond_block = current_block + T::BondingDuration::get().into() + 10.into(); - // System::::set_block_number(unbond_block); - }: _(RawOrigin::Signed(controller)) - - validate { - let u in ...; - let (_, controller) = create_stash_controller::(u)?; - let prefs = ValidatorPrefs::default(); - }: _(RawOrigin::Signed(controller), prefs) - - // Worst case scenario, MAX_NOMINATIONS - nominate { - let n in 1 .. MAX_NOMINATIONS as u32; - let (_, controller) = create_stash_controller::(n + 1)?; - let validators = create_validators::(n)?; - }: _(RawOrigin::Signed(controller), validators) - - chill { - let u in ...; - let (_, controller) = create_stash_controller::(u)?; - }: _(RawOrigin::Signed(controller)) - - set_payee { - let u in ...; - let (_, controller) = create_stash_controller::(u)?; - }: _(RawOrigin::Signed(controller), RewardDestination::Controller) - - set_controller { - let u in ...; - let (stash, _) = create_stash_controller::(u)?; - let new_controller = create_funded_user::("new_controller", u); - let new_controller_lookup = T::Lookup::unlookup(new_controller); - }: _(RawOrigin::Signed(stash), new_controller_lookup) - - set_validator_count { - let c in 0 .. 1000; - }: _(RawOrigin::Root, c) - - force_no_eras { let i in 0 .. 1; }: _(RawOrigin::Root) - - force_new_era {let i in 0 .. 1; }: _(RawOrigin::Root) - - force_new_era_always { let i in 0 .. 1; }: _(RawOrigin::Root) - - // Worst case scenario, the list of invulnerables is very long. - set_invulnerables { - let v in 0 .. 1000; - let mut invulnerables = Vec::new(); - for i in 0 .. v { - invulnerables.push(account("invulnerable", i, SEED)); - } - }: _(RawOrigin::Root, invulnerables) - - force_unstake { - let u in ...; - let (stash, _) = create_stash_controller::(u)?; - }: _(RawOrigin::Root, stash) - - cancel_deferred_slash { - let s in 1 .. 1000; - let mut unapplied_slashes = Vec::new(); - let era = EraIndex::one(); - for _ in 0 .. 1000 { - unapplied_slashes.push(UnappliedSlash::>::default()); - } - UnappliedSlashes::::insert(era, &unapplied_slashes); - - let slash_indices: Vec = (0 .. s).collect(); - }: _(RawOrigin::Root, era, slash_indices) - - payout_stakers { - let n in 1 .. MAX_NOMINATIONS as u32; - let validator = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; - let current_era = CurrentEra::get().unwrap(); - let caller = account("caller", n, SEED); - }: _(RawOrigin::Signed(caller), validator, current_era) - - rebond { - let l in 1 .. 1000; - let (_, controller) = create_stash_controller::(u)?; - let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); - let unlock_chunk = UnlockChunk::> { - value: 1.into(), - era: EraIndex::zero(), - }; - for _ in 0 .. l { - staking_ledger.unlocking.push(unlock_chunk.clone()) - } - Ledger::::insert(controller.clone(), staking_ledger); - }: _(RawOrigin::Signed(controller), (l + 100).into()) - - set_history_depth { - let e in 1 .. 100; - HistoryDepth::put(e); - CurrentEra::put(e); - for i in 0 .. e { - >::insert(i, T::AccountId::default(), Exposure::>::default()); - >::insert(i, T::AccountId::default(), Exposure::>::default()); - >::insert(i, T::AccountId::default(), ValidatorPrefs::default()); - >::insert(i, BalanceOf::::one()); - >::insert(i, EraRewardPoints::::default()); - >::insert(i, BalanceOf::::one()); - ErasStartSessionIndex::insert(i, i); - } - }: _(RawOrigin::Root, EraIndex::zero()) - - reap_stash { - let u in 1 .. 1000; - let (stash, controller) = create_stash_controller::(u)?; - T::Currency::make_free_balance_be(&stash, 0.into()); - }: _(RawOrigin::Signed(controller), stash) - - new_era { - let v in 1 .. 10; - let n in 1 .. 100; - MinimumValidatorCount::put(0); - create_validators_with_nominators_for_era::(v, n)?; - let session_index = SessionIndex::one(); - }: { - let validators = Staking::::new_era(session_index).ok_or("`new_era` failed")?; - assert!(validators.len() == v as usize); - } - - do_slash { - let l in 1 .. 1000; - let (stash, controller) = create_stash_controller::(0)?; - let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); - let unlock_chunk = UnlockChunk::> { - value: 1.into(), - era: EraIndex::zero(), - }; - for _ in 0 .. l { - staking_ledger.unlocking.push(unlock_chunk.clone()) - } - Ledger::::insert(controller.clone(), staking_ledger.clone()); - let slash_amount = T::Currency::minimum_balance() * 10.into(); - }: { - crate::slashing::do_slash::( - &stash, - slash_amount, - &mut BalanceOf::::zero(), - &mut NegativeImbalanceOf::::zero() - ); - } - - payout_all { - let v in 1 .. 10; - let n in 1 .. 100; - MinimumValidatorCount::put(0); - create_validators_with_nominators_for_era::(v, n)?; - // Start a new Era - let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); - assert!(new_validators.len() == v as usize); - - let current_era = CurrentEra::get().unwrap(); - let mut points_total = 0; - let mut points_individual = Vec::new(); - let mut payout_calls = Vec::new(); - - for validator in new_validators.iter() { - points_total += 10; - points_individual.push((validator.clone(), 10)); - payout_calls.push(Call::::payout_stakers(validator.clone(), current_era)) - } - - // Give Era Points - let reward = EraRewardPoints:: { - total: points_total, - individual: points_individual.into_iter().collect(), - }; - - ErasRewardPoints::::insert(current_era, reward); - - // Create reward pool - let total_payout = T::Currency::minimum_balance() * 1000.into(); - >::insert(current_era, total_payout); - - let caller: T::AccountId = account("caller", 0, SEED); - }: { - for call in payout_calls { - call.dispatch(RawOrigin::Signed(caller.clone()).into())?; - } - } + _{ + // User account seed + let u in 0 .. 1000 => (); + } + + bond { + let u in ...; + let stash = create_funded_user::("stash",u); + let controller = create_funded_user::("controller", u); + let controller_lookup: ::Source = T::Lookup::unlookup(controller); + let reward_destination = RewardDestination::Staked; + let amount = T::Currency::minimum_balance() * 10.into(); + }: _(RawOrigin::Signed(stash), controller_lookup, amount, reward_destination) + + bond_extra { + let u in ...; + let (stash, _) = create_stash_controller::(u)?; + let max_additional = T::Currency::minimum_balance() * 10.into(); + }: _(RawOrigin::Signed(stash), max_additional) + + unbond { + let u in ...; + let (_, controller) = create_stash_controller::(u)?; + let amount = T::Currency::minimum_balance() * 10.into(); + }: _(RawOrigin::Signed(controller), amount) + + // Worst case scenario, everything is removed after the bonding duration + withdraw_unbonded { + let u in ...; + let (stash, controller) = create_stash_controller::(u)?; + let amount = T::Currency::minimum_balance() * 10.into(); + Staking::::unbond(RawOrigin::Signed(controller.clone()).into(), amount)?; + let current_block = System::::block_number(); + // let unbond_block = current_block + T::BondingDuration::get().into() + 10.into(); + // System::::set_block_number(unbond_block); + }: _(RawOrigin::Signed(controller)) + + validate { + let u in ...; + let (_, controller) = create_stash_controller::(u)?; + let prefs = ValidatorPrefs::default(); + }: _(RawOrigin::Signed(controller), prefs) + + // Worst case scenario, MAX_NOMINATIONS + nominate { + let n in 1 .. MAX_NOMINATIONS as u32; + let (_, controller) = create_stash_controller::(n + 1)?; + let validators = create_validators::(n)?; + }: _(RawOrigin::Signed(controller), validators) + + chill { + let u in ...; + let (_, controller) = create_stash_controller::(u)?; + }: _(RawOrigin::Signed(controller)) + + set_payee { + let u in ...; + let (_, controller) = create_stash_controller::(u)?; + }: _(RawOrigin::Signed(controller), RewardDestination::Controller) + + set_controller { + let u in ...; + let (stash, _) = create_stash_controller::(u)?; + let new_controller = create_funded_user::("new_controller", u); + let new_controller_lookup = T::Lookup::unlookup(new_controller); + }: _(RawOrigin::Signed(stash), new_controller_lookup) + + set_validator_count { + let c in 0 .. 1000; + }: _(RawOrigin::Root, c) + + force_no_eras { let i in 0 .. 1; }: _(RawOrigin::Root) + + force_new_era {let i in 0 .. 1; }: _(RawOrigin::Root) + + force_new_era_always { let i in 0 .. 1; }: _(RawOrigin::Root) + + // Worst case scenario, the list of invulnerables is very long. + set_invulnerables { + let v in 0 .. 1000; + let mut invulnerables = Vec::new(); + for i in 0 .. v { + invulnerables.push(account("invulnerable", i, SEED)); + } + }: _(RawOrigin::Root, invulnerables) + + force_unstake { + let u in ...; + let (stash, _) = create_stash_controller::(u)?; + }: _(RawOrigin::Root, stash) + + cancel_deferred_slash { + let s in 1 .. 1000; + let mut unapplied_slashes = Vec::new(); + let era = EraIndex::one(); + for _ in 0 .. 1000 { + unapplied_slashes.push(UnappliedSlash::>::default()); + } + UnappliedSlashes::::insert(era, &unapplied_slashes); + + let slash_indices: Vec = (0 .. s).collect(); + }: _(RawOrigin::Root, era, slash_indices) + + payout_stakers { + let n in 1 .. MAX_NOMINATIONS as u32; + let validator = create_validator_with_nominators::(n, MAX_NOMINATIONS as u32)?; + let current_era = CurrentEra::get().unwrap(); + let caller = account("caller", n, SEED); + }: _(RawOrigin::Signed(caller), validator, current_era) + + rebond { + let l in 1 .. 1000; + let (_, controller) = create_stash_controller::(u)?; + let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); + let unlock_chunk = UnlockChunk::> { + value: 1.into(), + era: EraIndex::zero(), + }; + for _ in 0 .. l { + staking_ledger.unlocking.push(unlock_chunk.clone()) + } + Ledger::::insert(controller.clone(), staking_ledger); + }: _(RawOrigin::Signed(controller), (l + 100).into()) + + set_history_depth { + let e in 1 .. 100; + HistoryDepth::put(e); + CurrentEra::put(e); + for i in 0 .. e { + >::insert(i, T::AccountId::default(), Exposure::>::default()); + >::insert(i, T::AccountId::default(), Exposure::>::default()); + >::insert(i, T::AccountId::default(), ValidatorPrefs::default()); + >::insert(i, BalanceOf::::one()); + >::insert(i, EraRewardPoints::::default()); + >::insert(i, BalanceOf::::one()); + ErasStartSessionIndex::insert(i, i); + } + }: _(RawOrigin::Root, EraIndex::zero()) + + reap_stash { + let u in 1 .. 1000; + let (stash, controller) = create_stash_controller::(u)?; + T::Currency::make_free_balance_be(&stash, 0.into()); + }: _(RawOrigin::Signed(controller), stash) + + new_era { + let v in 1 .. 10; + let n in 1 .. 100; + MinimumValidatorCount::put(0); + create_validators_with_nominators_for_era::(v, n)?; + let session_index = SessionIndex::one(); + }: { + let validators = Staking::::new_era(session_index).ok_or("`new_era` failed")?; + assert!(validators.len() == v as usize); + } + + do_slash { + let l in 1 .. 1000; + let (stash, controller) = create_stash_controller::(0)?; + let mut staking_ledger = Ledger::::get(controller.clone()).unwrap(); + let unlock_chunk = UnlockChunk::> { + value: 1.into(), + era: EraIndex::zero(), + }; + for _ in 0 .. l { + staking_ledger.unlocking.push(unlock_chunk.clone()) + } + Ledger::::insert(controller.clone(), staking_ledger.clone()); + let slash_amount = T::Currency::minimum_balance() * 10.into(); + }: { + crate::slashing::do_slash::( + &stash, + slash_amount, + &mut BalanceOf::::zero(), + &mut NegativeImbalanceOf::::zero() + ); + } + + payout_all { + let v in 1 .. 10; + let n in 1 .. 100; + MinimumValidatorCount::put(0); + create_validators_with_nominators_for_era::(v, n)?; + // Start a new Era + let new_validators = Staking::::new_era(SessionIndex::one()).unwrap(); + assert!(new_validators.len() == v as usize); + + let current_era = CurrentEra::get().unwrap(); + let mut points_total = 0; + let mut points_individual = Vec::new(); + let mut payout_calls = Vec::new(); + + for validator in new_validators.iter() { + points_total += 10; + points_individual.push((validator.clone(), 10)); + payout_calls.push(Call::::payout_stakers(validator.clone(), current_era)) + } + + // Give Era Points + let reward = EraRewardPoints:: { + total: points_total, + individual: points_individual.into_iter().collect(), + }; + + ErasRewardPoints::::insert(current_era, reward); + + // Create reward pool + let total_payout = T::Currency::minimum_balance() * 1000.into(); + >::insert(current_era, total_payout); + + let caller: T::AccountId = account("caller", 0, SEED); + }: { + for call in payout_calls { + call.dispatch(RawOrigin::Signed(caller.clone()).into())?; + } + } } #[cfg(test)] mod tests { - use super::*; - use crate::mock::{ExtBuilder, Test, Balances, Staking, Origin}; - use frame_support::assert_ok; - - #[test] - fn create_validators_with_nominators_for_era_works() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { - let v = 10; - let n = 100; - - create_validators_with_nominators_for_era::(v,n).unwrap(); - - let count_validators = Validators::::iter().count(); - let count_nominators = Nominators::::iter().count(); - - assert_eq!(count_validators, v as usize); - assert_eq!(count_nominators, n as usize); - }); - } - - #[test] - fn create_validator_with_nominators_works() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { - let n = 10; - - let validator_stash = create_validator_with_nominators::( - n, - MAX_NOMINATIONS as u32, - ).unwrap(); - - let current_era = CurrentEra::get().unwrap(); - - let original_free_balance = Balances::free_balance(&validator_stash); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), validator_stash, current_era)); - let new_free_balance = Balances::free_balance(&validator_stash); - - assert!(original_free_balance < new_free_balance); - }); - } - - #[test] - fn test_payout_all() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { - let v = 10; - let n = 100; - - let selected_benchmark = SelectedBenchmark::payout_all; - let c = vec![(frame_benchmarking::BenchmarkParameter::v, v), (frame_benchmarking::BenchmarkParameter::n, n)]; - let closure_to_benchmark = - >::instance( - &selected_benchmark, - &c - ).unwrap(); - - assert_ok!(closure_to_benchmark()); - }); - } - - #[test] - fn test_benchmarks() { - ExtBuilder::default().has_stakers(false).build().execute_with(|| { - assert_ok!(test_benchmark_bond::()); - assert_ok!(test_benchmark_bond_extra::()); - assert_ok!(test_benchmark_unbond::()); - assert_ok!(test_benchmark_withdraw_unbonded::()); - assert_ok!(test_benchmark_validate::()); - assert_ok!(test_benchmark_nominate::()); - assert_ok!(test_benchmark_chill::()); - assert_ok!(test_benchmark_set_payee::()); - assert_ok!(test_benchmark_set_controller::()); - assert_ok!(test_benchmark_set_validator_count::()); - assert_ok!(test_benchmark_force_no_eras::()); - assert_ok!(test_benchmark_force_new_era::()); - assert_ok!(test_benchmark_force_new_era_always::()); - assert_ok!(test_benchmark_set_invulnerables::()); - assert_ok!(test_benchmark_force_unstake::()); - assert_ok!(test_benchmark_cancel_deferred_slash::()); - assert_ok!(test_benchmark_payout_stakers::()); - assert_ok!(test_benchmark_rebond::()); - assert_ok!(test_benchmark_set_history_depth::()); - assert_ok!(test_benchmark_reap_stash::()); - assert_ok!(test_benchmark_new_era::()); - assert_ok!(test_benchmark_do_slash::()); - assert_ok!(test_benchmark_payout_all::()); - }); - } + use super::*; + use crate::mock::{Balances, ExtBuilder, Origin, Staking, Test}; + use frame_support::assert_ok; + + #[test] + fn create_validators_with_nominators_for_era_works() { + ExtBuilder::default() + .has_stakers(false) + .build() + .execute_with(|| { + let v = 10; + let n = 100; + + create_validators_with_nominators_for_era::(v, n).unwrap(); + + let count_validators = Validators::::iter().count(); + let count_nominators = Nominators::::iter().count(); + + assert_eq!(count_validators, v as usize); + assert_eq!(count_nominators, n as usize); + }); + } + + #[test] + fn create_validator_with_nominators_works() { + ExtBuilder::default() + .has_stakers(false) + .build() + .execute_with(|| { + let n = 10; + + let validator_stash = + create_validator_with_nominators::(n, MAX_NOMINATIONS as u32).unwrap(); + + let current_era = CurrentEra::get().unwrap(); + + let original_free_balance = Balances::free_balance(&validator_stash); + assert_ok!(Staking::payout_stakers( + Origin::signed(1337), + validator_stash, + current_era + )); + let new_free_balance = Balances::free_balance(&validator_stash); + + assert!(original_free_balance < new_free_balance); + }); + } + + #[test] + fn test_payout_all() { + ExtBuilder::default() + .has_stakers(false) + .build() + .execute_with(|| { + let v = 10; + let n = 100; + + let selected_benchmark = SelectedBenchmark::payout_all; + let c = vec![ + (frame_benchmarking::BenchmarkParameter::v, v), + (frame_benchmarking::BenchmarkParameter::n, n), + ]; + let closure_to_benchmark = + >::instance( + &selected_benchmark, + &c, + ) + .unwrap(); + + assert_ok!(closure_to_benchmark()); + }); + } + + #[test] + fn test_benchmarks() { + ExtBuilder::default() + .has_stakers(false) + .build() + .execute_with(|| { + assert_ok!(test_benchmark_bond::()); + assert_ok!(test_benchmark_bond_extra::()); + assert_ok!(test_benchmark_unbond::()); + assert_ok!(test_benchmark_withdraw_unbonded::()); + assert_ok!(test_benchmark_validate::()); + assert_ok!(test_benchmark_nominate::()); + assert_ok!(test_benchmark_chill::()); + assert_ok!(test_benchmark_set_payee::()); + assert_ok!(test_benchmark_set_controller::()); + assert_ok!(test_benchmark_set_validator_count::()); + assert_ok!(test_benchmark_force_no_eras::()); + assert_ok!(test_benchmark_force_new_era::()); + assert_ok!(test_benchmark_force_new_era_always::()); + assert_ok!(test_benchmark_set_invulnerables::()); + assert_ok!(test_benchmark_force_unstake::()); + assert_ok!(test_benchmark_cancel_deferred_slash::()); + assert_ok!(test_benchmark_payout_stakers::()); + assert_ok!(test_benchmark_rebond::()); + assert_ok!(test_benchmark_set_history_depth::()); + assert_ok!(test_benchmark_reap_stash::()); + assert_ok!(test_benchmark_new_era::()); + assert_ok!(test_benchmark_do_slash::()); + assert_ok!(test_benchmark_payout_all::()); + }); + } } diff --git a/frame/staking/src/inflation.rs b/frame/staking/src/inflation.rs index d20741d9bc..230826c392 100644 --- a/frame/staking/src/inflation.rs +++ b/frame/staking/src/inflation.rs @@ -19,7 +19,7 @@ //! The staking rate in NPoS is the total amount of tokens staked by nominators and validators, //! divided by the total token supply. -use sp_runtime::{Perbill, traits::AtLeast32Bit, curve::PiecewiseLinear}; +use sp_runtime::{curve::PiecewiseLinear, traits::AtLeast32Bit, Perbill}; /// The total payout to all validators (and their nominators) per era. /// @@ -28,76 +28,127 @@ use sp_runtime::{Perbill, traits::AtLeast32Bit, curve::PiecewiseLinear}; /// /// `era_duration` is expressed in millisecond. pub fn compute_total_payout( - yearly_inflation: &PiecewiseLinear<'static>, - npos_token_staked: N, - total_tokens: N, - era_duration: u64 -) -> (N, N) where N: AtLeast32Bit + Clone { - // Milliseconds per year for the Julian year (365.25 days). - const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; + yearly_inflation: &PiecewiseLinear<'static>, + npos_token_staked: N, + total_tokens: N, + era_duration: u64, +) -> (N, N) +where + N: AtLeast32Bit + Clone, +{ + // Milliseconds per year for the Julian year (365.25 days). + const MILLISECONDS_PER_YEAR: u64 = 1000 * 3600 * 24 * 36525 / 100; - let portion = Perbill::from_rational_approximation(era_duration as u64, MILLISECONDS_PER_YEAR); - let payout = portion * yearly_inflation.calculate_for_fraction_times_denominator( - npos_token_staked, - total_tokens.clone(), - ); - let maximum = portion * (yearly_inflation.maximum * total_tokens); - (payout, maximum) + let portion = Perbill::from_rational_approximation(era_duration as u64, MILLISECONDS_PER_YEAR); + let payout = portion + * yearly_inflation + .calculate_for_fraction_times_denominator(npos_token_staked, total_tokens.clone()); + let maximum = portion * (yearly_inflation.maximum * total_tokens); + (payout, maximum) } #[cfg(test)] mod test { - use sp_runtime::curve::PiecewiseLinear; + use sp_runtime::curve::PiecewiseLinear; - pallet_staking_reward_curve::build! { - const I_NPOS: PiecewiseLinear<'static> = curve!( - min_inflation: 0_025_000, - max_inflation: 0_100_000, - ideal_stake: 0_500_000, - falloff: 0_050_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); - } + pallet_staking_reward_curve::build! { + const I_NPOS: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); + } - #[test] - fn npos_curve_is_sensible() { - const YEAR: u64 = 365 * 24 * 60 * 60 * 1000; + #[test] + fn npos_curve_is_sensible() { + const YEAR: u64 = 365 * 24 * 60 * 60 * 1000; - // check maximum inflation. - // not 10_000 due to rounding error. - assert_eq!(super::compute_total_payout(&I_NPOS, 0, 100_000u64, YEAR).1, 9_993); + // check maximum inflation. + // not 10_000 due to rounding error. + assert_eq!( + super::compute_total_payout(&I_NPOS, 0, 100_000u64, YEAR).1, + 9_993 + ); - //super::I_NPOS.calculate_for_fraction_times_denominator(25, 100) - assert_eq!(super::compute_total_payout(&I_NPOS, 0, 100_000u64, YEAR).0, 2_498); - assert_eq!(super::compute_total_payout(&I_NPOS, 5_000, 100_000u64, YEAR).0, 3_248); - assert_eq!(super::compute_total_payout(&I_NPOS, 25_000, 100_000u64, YEAR).0, 6_246); - assert_eq!(super::compute_total_payout(&I_NPOS, 40_000, 100_000u64, YEAR).0, 8_494); - assert_eq!(super::compute_total_payout(&I_NPOS, 50_000, 100_000u64, YEAR).0, 9_993); - assert_eq!(super::compute_total_payout(&I_NPOS, 60_000, 100_000u64, YEAR).0, 4_379); - assert_eq!(super::compute_total_payout(&I_NPOS, 75_000, 100_000u64, YEAR).0, 2_733); - assert_eq!(super::compute_total_payout(&I_NPOS, 95_000, 100_000u64, YEAR).0, 2_513); - assert_eq!(super::compute_total_payout(&I_NPOS, 100_000, 100_000u64, YEAR).0, 2_505); + //super::I_NPOS.calculate_for_fraction_times_denominator(25, 100) + assert_eq!( + super::compute_total_payout(&I_NPOS, 0, 100_000u64, YEAR).0, + 2_498 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 5_000, 100_000u64, YEAR).0, + 3_248 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 25_000, 100_000u64, YEAR).0, + 6_246 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 40_000, 100_000u64, YEAR).0, + 8_494 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 50_000, 100_000u64, YEAR).0, + 9_993 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 60_000, 100_000u64, YEAR).0, + 4_379 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 75_000, 100_000u64, YEAR).0, + 2_733 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 95_000, 100_000u64, YEAR).0, + 2_513 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 100_000, 100_000u64, YEAR).0, + 2_505 + ); - const DAY: u64 = 24 * 60 * 60 * 1000; - assert_eq!(super::compute_total_payout(&I_NPOS, 25_000, 100_000u64, DAY).0, 17); - assert_eq!(super::compute_total_payout(&I_NPOS, 50_000, 100_000u64, DAY).0, 27); - assert_eq!(super::compute_total_payout(&I_NPOS, 75_000, 100_000u64, DAY).0, 7); + const DAY: u64 = 24 * 60 * 60 * 1000; + assert_eq!( + super::compute_total_payout(&I_NPOS, 25_000, 100_000u64, DAY).0, + 17 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 50_000, 100_000u64, DAY).0, + 27 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 75_000, 100_000u64, DAY).0, + 7 + ); - const SIX_HOURS: u64 = 6 * 60 * 60 * 1000; - assert_eq!(super::compute_total_payout(&I_NPOS, 25_000, 100_000u64, SIX_HOURS).0, 4); - assert_eq!(super::compute_total_payout(&I_NPOS, 50_000, 100_000u64, SIX_HOURS).0, 7); - assert_eq!(super::compute_total_payout(&I_NPOS, 75_000, 100_000u64, SIX_HOURS).0, 2); + const SIX_HOURS: u64 = 6 * 60 * 60 * 1000; + assert_eq!( + super::compute_total_payout(&I_NPOS, 25_000, 100_000u64, SIX_HOURS).0, + 4 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 50_000, 100_000u64, SIX_HOURS).0, + 7 + ); + assert_eq!( + super::compute_total_payout(&I_NPOS, 75_000, 100_000u64, SIX_HOURS).0, + 2 + ); - const HOUR: u64 = 60 * 60 * 1000; - assert_eq!( - super::compute_total_payout( - &I_NPOS, - 2_500_000_000_000_000_000_000_000_000u128, - 5_000_000_000_000_000_000_000_000_000u128, - HOUR - ).0, - 57_038_500_000_000_000_000_000 - ); - } + const HOUR: u64 = 60 * 60 * 1000; + assert_eq!( + super::compute_total_payout( + &I_NPOS, + 2_500_000_000_000_000_000_000_000_000u128, + 5_000_000_000_000_000_000_000_000_000u128, + HOUR + ) + .0, + 57_038_500_000_000_000_000_000 + ); + } } diff --git a/frame/staking/src/lib.rs b/frame/staking/src/lib.rs index b6ffa9081b..dd418a54ad 100644 --- a/frame/staking/src/lib.rs +++ b/frame/staking/src/lib.rs @@ -252,63 +252,64 @@ #![recursion_limit = "128"] #![cfg_attr(not(feature = "std"), no_std)] +#[cfg(any(feature = "runtime-benchmarks", test))] +pub mod benchmarking; #[cfg(test)] mod mock; -#[cfg(test)] -mod tests; #[cfg(feature = "testing-utils")] pub mod testing_utils; -#[cfg(any(feature = "runtime-benchmarks", test))] -pub mod benchmarking; +#[cfg(test)] +mod tests; -pub mod slashing; -pub mod offchain_election; pub mod inflation; +pub mod offchain_election; +pub mod slashing; -use sp_std::{ - result, - prelude::*, - collections::btree_map::BTreeMap, - convert::{TryInto, From}, - mem::size_of, -}; -use codec::{HasCompact, Encode, Decode}; +use codec::{Decode, Encode, HasCompact}; use frame_support::{ - decl_module, decl_event, decl_storage, ensure, decl_error, debug, - weights::{SimpleDispatchInfo, MINIMUM_WEIGHT, Weight}, - storage::IterableStorageMap, - dispatch::{IsSubType, DispatchResult}, - traits::{ - Currency, LockIdentifier, LockableCurrency, WithdrawReasons, OnUnbalanced, Imbalance, Get, - UnixTime, EstimateNextNewSession, EnsureOrigin, - } + debug, decl_error, decl_event, decl_module, decl_storage, + dispatch::{DispatchResult, IsSubType}, + ensure, + storage::IterableStorageMap, + traits::{ + Currency, EnsureOrigin, EstimateNextNewSession, Get, Imbalance, LockIdentifier, + LockableCurrency, OnUnbalanced, UnixTime, WithdrawReasons, + }, + weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}, +}; +use frame_system::{ + self as system, ensure_none, ensure_root, ensure_signed, offchain::SubmitUnsignedTransaction, }; use pallet_session::historical; -use sp_runtime::{ - Perbill, PerU16, PerThing, RuntimeDebug, - curve::PiecewiseLinear, - traits::{ - Convert, Zero, StaticLookup, CheckedSub, Saturating, SaturatedConversion, AtLeast32Bit, - Dispatchable, - }, - transaction_validity::{ - TransactionValidityError, TransactionValidity, ValidTransaction, InvalidTransaction, - TransactionSource, TransactionPriority, - }, +use sp_phragmen::{ + build_support_map, elect, evaluate_support, generate_compact_solution_type, is_score_better, + Assignment, ExtendedBalance, PhragmenResult, PhragmenScore, SupportMap, VoteWeight, + VotingLimit, }; -use sp_staking::{ - SessionIndex, - offence::{OnOffenceHandler, OffenceDetails, Offence, ReportOffence, OffenceError}, +use sp_runtime::{ + curve::PiecewiseLinear, + traits::{ + AtLeast32Bit, CheckedSub, Convert, Dispatchable, SaturatedConversion, Saturating, + StaticLookup, Zero, + }, + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionSource, TransactionValidity, + TransactionValidityError, ValidTransaction, + }, + PerThing, PerU16, Perbill, RuntimeDebug, }; #[cfg(feature = "std")] -use sp_runtime::{Serialize, Deserialize}; -use frame_system::{ - self as system, ensure_signed, ensure_root, ensure_none, - offchain::SubmitUnsignedTransaction, +use sp_runtime::{Deserialize, Serialize}; +use sp_staking::{ + offence::{Offence, OffenceDetails, OffenceError, OnOffenceHandler, ReportOffence}, + SessionIndex, }; -use sp_phragmen::{ - ExtendedBalance, Assignment, PhragmenScore, PhragmenResult, build_support_map, evaluate_support, - elect, generate_compact_solution_type, is_score_better, VotingLimit, SupportMap, VoteWeight, +use sp_std::{ + collections::btree_map::BTreeMap, + convert::{From, TryInto}, + mem::size_of, + prelude::*, + result, }; const DEFAULT_MINIMUM_VALIDATOR_COUNT: u32 = 4; @@ -354,13 +355,13 @@ generate_compact_solution_type!(pub GenericCompactAssignments, 16); /// Information regarding the active era (era in used in session). #[derive(Encode, Decode, RuntimeDebug)] pub struct ActiveEraInfo { - /// Index of era. - index: EraIndex, - /// Moment of start expresed as millisecond from `$UNIX_EPOCH`. - /// - /// Start can be none if start hasn't been set for the era yet, - /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. - start: Option, + /// Index of era. + index: EraIndex, + /// Moment of start expresed as millisecond from `$UNIX_EPOCH`. + /// + /// Start can be none if start hasn't been set for the era yet, + /// Start is set on the first on_finalize of the era to guarantee usage of `Time`. + start: Option, } /// Accuracy used for on-chain phragmen. @@ -371,453 +372,457 @@ pub type OffchainAccuracy = PerU16; /// The balance type of this module. pub type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; /// The compact type for election solutions. pub type CompactAssignments = - GenericCompactAssignments; + GenericCompactAssignments; type PositiveImbalanceOf = - <::Currency as Currency<::AccountId>>::PositiveImbalance; + <::Currency as Currency<::AccountId>>::PositiveImbalance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; /// Reward points of an era. Used to split era total payout between validators. /// /// This points will be used to reward validators and their respective nominators. #[derive(PartialEq, Encode, Decode, Default, RuntimeDebug)] pub struct EraRewardPoints { - /// Total number of points. Equals the sum of reward points for each validator. - total: RewardPoint, - /// The reward points earned by a given validator. - individual: BTreeMap, + /// Total number of points. Equals the sum of reward points for each validator. + total: RewardPoint, + /// The reward points earned by a given validator. + individual: BTreeMap, } /// Indicates the initial status of the staker. #[derive(RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum StakerStatus { - /// Chilling. - Idle, - /// Declared desire in validating or already participating in it. - Validator, - /// Nominating for a group of other stakers. - Nominator(Vec), + /// Chilling. + Idle, + /// Declared desire in validating or already participating in it. + Validator, + /// Nominating for a group of other stakers. + Nominator(Vec), } /// A destination account for payment. #[derive(PartialEq, Eq, Copy, Clone, Encode, Decode, RuntimeDebug)] pub enum RewardDestination { - /// Pay into the stash account, increasing the amount at stake accordingly. - Staked, - /// Pay into the stash account, not increasing the amount at stake. - Stash, - /// Pay into the controller account. - Controller, + /// Pay into the stash account, increasing the amount at stake accordingly. + Staked, + /// Pay into the stash account, not increasing the amount at stake. + Stash, + /// Pay into the controller account. + Controller, } impl Default for RewardDestination { - fn default() -> Self { - RewardDestination::Staked - } + fn default() -> Self { + RewardDestination::Staked + } } /// Preference of what happens regarding validation. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] pub struct ValidatorPrefs { - /// Reward that validator takes up-front; only the rest is split between themselves and - /// nominators. - #[codec(compact)] - pub commission: Perbill, + /// Reward that validator takes up-front; only the rest is split between themselves and + /// nominators. + #[codec(compact)] + pub commission: Perbill, } impl Default for ValidatorPrefs { - fn default() -> Self { - ValidatorPrefs { - commission: Default::default(), - } - } + fn default() -> Self { + ValidatorPrefs { + commission: Default::default(), + } + } } /// Just a Balance/BlockNumber tuple to encode when a chunk of funds will be unlocked. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] pub struct UnlockChunk { - /// Amount of funds to be unlocked. - #[codec(compact)] - value: Balance, - /// Era number at which point it'll be unlocked. - #[codec(compact)] - era: EraIndex, + /// Amount of funds to be unlocked. + #[codec(compact)] + value: Balance, + /// Era number at which point it'll be unlocked. + #[codec(compact)] + era: EraIndex, } /// The ledger of a (bonded) stash. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] pub struct StakingLedger { - /// The stash account whose balance is actually locked and at stake. - pub stash: AccountId, - /// The total amount of the stash's balance that we are currently accounting for. - /// It's just `active` plus all the `unlocking` balances. - #[codec(compact)] - pub total: Balance, - /// The total amount of the stash's balance that will be at stake in any forthcoming - /// rounds. - #[codec(compact)] - pub active: Balance, - /// Any balance that is becoming free, which may eventually be transferred out - /// of the stash (assuming it doesn't get slashed first). - pub unlocking: Vec>, - /// List of eras for which the stakers behind a validator have claimed rewards. Only updated - /// for validators. - pub claimed_rewards: Vec, + /// The stash account whose balance is actually locked and at stake. + pub stash: AccountId, + /// The total amount of the stash's balance that we are currently accounting for. + /// It's just `active` plus all the `unlocking` balances. + #[codec(compact)] + pub total: Balance, + /// The total amount of the stash's balance that will be at stake in any forthcoming + /// rounds. + #[codec(compact)] + pub active: Balance, + /// Any balance that is becoming free, which may eventually be transferred out + /// of the stash (assuming it doesn't get slashed first). + pub unlocking: Vec>, + /// List of eras for which the stakers behind a validator have claimed rewards. Only updated + /// for validators. + pub claimed_rewards: Vec, } -impl< - AccountId, - Balance: HasCompact + Copy + Saturating + AtLeast32Bit, -> StakingLedger { - /// Remove entries from `unlocking` that are sufficiently old and reduce the - /// total by the sum of their balances. - fn consolidate_unlocked(self, current_era: EraIndex) -> Self { - let mut total = self.total; - let unlocking = self.unlocking.into_iter() - .filter(|chunk| if chunk.era > current_era { - true - } else { - total = total.saturating_sub(chunk.value); - false - }) - .collect(); - - Self { - stash: self.stash, - total, - active: self.active, - unlocking, - claimed_rewards: self.claimed_rewards - } - } - - /// Re-bond funds that were scheduled for unlocking. - fn rebond(mut self, value: Balance) -> Self { - let mut unlocking_balance: Balance = Zero::zero(); - - while let Some(last) = self.unlocking.last_mut() { - if unlocking_balance + last.value <= value { - unlocking_balance += last.value; - self.active += last.value; - self.unlocking.pop(); - } else { - let diff = value - unlocking_balance; - - unlocking_balance += diff; - self.active += diff; - last.value -= diff; - } - - if unlocking_balance >= value { - break - } - } - - self - } +impl + StakingLedger +{ + /// Remove entries from `unlocking` that are sufficiently old and reduce the + /// total by the sum of their balances. + fn consolidate_unlocked(self, current_era: EraIndex) -> Self { + let mut total = self.total; + let unlocking = self + .unlocking + .into_iter() + .filter(|chunk| { + if chunk.era > current_era { + true + } else { + total = total.saturating_sub(chunk.value); + false + } + }) + .collect(); + + Self { + stash: self.stash, + total, + active: self.active, + unlocking, + claimed_rewards: self.claimed_rewards, + } + } + + /// Re-bond funds that were scheduled for unlocking. + fn rebond(mut self, value: Balance) -> Self { + let mut unlocking_balance: Balance = Zero::zero(); + + while let Some(last) = self.unlocking.last_mut() { + if unlocking_balance + last.value <= value { + unlocking_balance += last.value; + self.active += last.value; + self.unlocking.pop(); + } else { + let diff = value - unlocking_balance; + + unlocking_balance += diff; + self.active += diff; + last.value -= diff; + } + + if unlocking_balance >= value { + break; + } + } + + self + } } -impl StakingLedger where - Balance: AtLeast32Bit + Saturating + Copy, +impl StakingLedger +where + Balance: AtLeast32Bit + Saturating + Copy, { - /// Slash the validator for a given amount of balance. This can grow the value - /// of the slash in the case that the validator has less than `minimum_balance` - /// active funds. Returns the amount of funds actually slashed. - /// - /// Slashes from `active` funds first, and then `unlocking`, starting with the - /// chunks that are closest to unlocking. - fn slash( - &mut self, - mut value: Balance, - minimum_balance: Balance, - ) -> Balance { - let pre_total = self.total; - let total = &mut self.total; - let active = &mut self.active; - - let slash_out_of = | - total_remaining: &mut Balance, - target: &mut Balance, - value: &mut Balance, - | { - let mut slash_from_target = (*value).min(*target); - - if !slash_from_target.is_zero() { - *target -= slash_from_target; - - // don't leave a dust balance in the staking system. - if *target <= minimum_balance { - slash_from_target += *target; - *value += sp_std::mem::replace(target, Zero::zero()); - } - - *total_remaining = total_remaining.saturating_sub(slash_from_target); - *value -= slash_from_target; - } - }; - - slash_out_of(total, active, &mut value); - - let i = self.unlocking.iter_mut() - .map(|chunk| { - slash_out_of(total, &mut chunk.value, &mut value); - chunk.value - }) - .take_while(|value| value.is_zero()) // take all fully-consumed chunks out. - .count(); - - // kill all drained chunks. - let _ = self.unlocking.drain(..i); - - pre_total.saturating_sub(*total) - } + /// Slash the validator for a given amount of balance. This can grow the value + /// of the slash in the case that the validator has less than `minimum_balance` + /// active funds. Returns the amount of funds actually slashed. + /// + /// Slashes from `active` funds first, and then `unlocking`, starting with the + /// chunks that are closest to unlocking. + fn slash(&mut self, mut value: Balance, minimum_balance: Balance) -> Balance { + let pre_total = self.total; + let total = &mut self.total; + let active = &mut self.active; + + let slash_out_of = + |total_remaining: &mut Balance, target: &mut Balance, value: &mut Balance| { + let mut slash_from_target = (*value).min(*target); + + if !slash_from_target.is_zero() { + *target -= slash_from_target; + + // don't leave a dust balance in the staking system. + if *target <= minimum_balance { + slash_from_target += *target; + *value += sp_std::mem::replace(target, Zero::zero()); + } + + *total_remaining = total_remaining.saturating_sub(slash_from_target); + *value -= slash_from_target; + } + }; + + slash_out_of(total, active, &mut value); + + let i = self + .unlocking + .iter_mut() + .map(|chunk| { + slash_out_of(total, &mut chunk.value, &mut value); + chunk.value + }) + .take_while(|value| value.is_zero()) // take all fully-consumed chunks out. + .count(); + + // kill all drained chunks. + let _ = self.unlocking.drain(..i); + + pre_total.saturating_sub(*total) + } } /// A record of the nominations made by a specific account. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] pub struct Nominations { - /// The targets of nomination. - pub targets: Vec, - /// The era the nominations were submitted. - /// - /// Except for initial nominations which are considered submitted at era 0. - pub submitted_in: EraIndex, - /// Whether the nominations have been suppressed. - pub suppressed: bool, + /// The targets of nomination. + pub targets: Vec, + /// The era the nominations were submitted. + /// + /// Except for initial nominations which are considered submitted at era 0. + pub submitted_in: EraIndex, + /// Whether the nominations have been suppressed. + pub suppressed: bool, } /// The amount of exposure (to slashing) than an individual nominator has. #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, RuntimeDebug)] pub struct IndividualExposure { - /// The stash account of the nominator in question. - pub who: AccountId, - /// Amount of funds exposed. - #[codec(compact)] - pub value: Balance, + /// The stash account of the nominator in question. + pub who: AccountId, + /// Amount of funds exposed. + #[codec(compact)] + pub value: Balance, } /// A snapshot of the stake backing a single validator in the system. #[derive(PartialEq, Eq, PartialOrd, Ord, Clone, Encode, Decode, Default, RuntimeDebug)] pub struct Exposure { - /// The total balance backing this validator. - #[codec(compact)] - pub total: Balance, - /// The validator's own stash that is exposed. - #[codec(compact)] - pub own: Balance, - /// The portions of nominators stashes that are exposed. - pub others: Vec>, + /// The total balance backing this validator. + #[codec(compact)] + pub total: Balance, + /// The validator's own stash that is exposed. + #[codec(compact)] + pub own: Balance, + /// The portions of nominators stashes that are exposed. + pub others: Vec>, } /// A pending slash record. The value of the slash has been computed but not applied yet, /// rather deferred for several eras. #[derive(Encode, Decode, Default, RuntimeDebug)] pub struct UnappliedSlash { - /// The stash ID of the offending validator. - validator: AccountId, - /// The validator's own slash. - own: Balance, - /// All other slashed stakers and amounts. - others: Vec<(AccountId, Balance)>, - /// Reporters of the offence; bounty payout recipients. - reporters: Vec, - /// The amount of payout. - payout: Balance, + /// The stash ID of the offending validator. + validator: AccountId, + /// The validator's own slash. + own: Balance, + /// All other slashed stakers and amounts. + others: Vec<(AccountId, Balance)>, + /// Reporters of the offence; bounty payout recipients. + reporters: Vec, + /// The amount of payout. + payout: Balance, } /// Indicate how an election round was computed. #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] pub enum ElectionCompute { - /// Result was forcefully computed on chain at the end of the session. - OnChain, - /// Result was submitted and accepted to the chain via a signed transaction. - Signed, - /// Result was submitted and accepted to the chain via an unsigned transaction (by an - /// authority). - Unsigned, + /// Result was forcefully computed on chain at the end of the session. + OnChain, + /// Result was submitted and accepted to the chain via a signed transaction. + Signed, + /// Result was submitted and accepted to the chain via an unsigned transaction (by an + /// authority). + Unsigned, } /// The result of an election round. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] pub struct ElectionResult { - /// Flat list of validators who have been elected. - elected_stashes: Vec, - /// Flat list of new exposures, to be updated in the [`Exposure`] storage. - exposures: Vec<(AccountId, Exposure)>, - /// Type of the result. This is kept on chain only to track and report the best score's - /// submission type. An optimisation could remove this. - compute: ElectionCompute, + /// Flat list of validators who have been elected. + elected_stashes: Vec, + /// Flat list of new exposures, to be updated in the [`Exposure`] storage. + exposures: Vec<(AccountId, Exposure)>, + /// Type of the result. This is kept on chain only to track and report the best score's + /// submission type. An optimisation could remove this. + compute: ElectionCompute, } /// The status of the upcoming (offchain) election. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] pub enum ElectionStatus { - /// Nothing has and will happen for now. submission window is not open. - Closed, - /// The submission window has been open since the contained block number. - Open(BlockNumber), + /// Nothing has and will happen for now. submission window is not open. + Closed, + /// The submission window has been open since the contained block number. + Open(BlockNumber), } impl ElectionStatus { - fn is_open_at(&self, n: BlockNumber) -> bool { - *self == Self::Open(n) - } - - fn is_closed(&self) -> bool { - match self { - Self::Closed => true, - _ => false - } - } - - fn is_open(&self) -> bool { - !self.is_closed() - } + fn is_open_at(&self, n: BlockNumber) -> bool { + *self == Self::Open(n) + } + + fn is_closed(&self) -> bool { + match self { + Self::Closed => true, + _ => false, + } + } + + fn is_open(&self) -> bool { + !self.is_closed() + } } impl Default for ElectionStatus { - fn default() -> Self { - Self::Closed - } + fn default() -> Self { + Self::Closed + } } /// Means for interacting with a specialized version of the `session` trait. /// /// This is needed because `Staking` sets the `ValidatorIdOf` of the `pallet_session::Trait` pub trait SessionInterface: frame_system::Trait { - /// Disable a given validator by stash ID. - /// - /// Returns `true` if new era should be forced at the end of this session. - /// This allows preventing a situation where there is too many validators - /// disabled and block production stalls. - fn disable_validator(validator: &AccountId) -> Result; - /// Get the validators from session. - fn validators() -> Vec; - /// Prune historical session tries up to but not including the given index. - fn prune_historical_up_to(up_to: SessionIndex); + /// Disable a given validator by stash ID. + /// + /// Returns `true` if new era should be forced at the end of this session. + /// This allows preventing a situation where there is too many validators + /// disabled and block production stalls. + fn disable_validator(validator: &AccountId) -> Result; + /// Get the validators from session. + fn validators() -> Vec; + /// Prune historical session tries up to but not including the given index. + fn prune_historical_up_to(up_to: SessionIndex); } -impl SessionInterface<::AccountId> for T where - T: pallet_session::Trait::AccountId>, - T: pallet_session::historical::Trait< - FullIdentification = Exposure<::AccountId, BalanceOf>, - FullIdentificationOf = ExposureOf, - >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, - T::ValidatorIdOf: - Convert<::AccountId, Option<::AccountId>>, +impl SessionInterface<::AccountId> for T +where + T: pallet_session::Trait::AccountId>, + T: pallet_session::historical::Trait< + FullIdentification = Exposure<::AccountId, BalanceOf>, + FullIdentificationOf = ExposureOf, + >, + T::SessionHandler: pallet_session::SessionHandler<::AccountId>, + T::SessionManager: pallet_session::SessionManager<::AccountId>, + T::ValidatorIdOf: Convert< + ::AccountId, + Option<::AccountId>, + >, { - fn disable_validator(validator: &::AccountId) -> Result { - >::disable(validator) - } + fn disable_validator(validator: &::AccountId) -> Result { + >::disable(validator) + } - fn validators() -> Vec<::AccountId> { - >::validators() - } + fn validators() -> Vec<::AccountId> { + >::validators() + } - fn prune_historical_up_to(up_to: SessionIndex) { - >::prune_up_to(up_to); - } + fn prune_historical_up_to(up_to: SessionIndex) { + >::prune_up_to(up_to); + } } pub trait Trait: frame_system::Trait { - /// The staking balance. - type Currency: LockableCurrency; + /// The staking balance. + type Currency: LockableCurrency; - /// Time used for computing era duration. - /// - /// It is guaranteed to start being called from the first `on_finalize`. Thus value at genesis - /// is not used. - type UnixTime: UnixTime; + /// Time used for computing era duration. + /// + /// It is guaranteed to start being called from the first `on_finalize`. Thus value at genesis + /// is not used. + type UnixTime: UnixTime; - /// Convert a balance into a number used for election calculation. This must fit into a `u64` - /// but is allowed to be sensibly lossy. The `u64` is used to communicate with the - /// [`sp_phragmen`] crate which accepts u64 numbers and does operations in 128. Consequently, - /// the backward convert is used convert the u128s from phragmen back to a [`BalanceOf`]. - type CurrencyToVote: Convert, VoteWeight> + Convert>; + /// Convert a balance into a number used for election calculation. This must fit into a `u64` + /// but is allowed to be sensibly lossy. The `u64` is used to communicate with the + /// [`sp_phragmen`] crate which accepts u64 numbers and does operations in 128. Consequently, + /// the backward convert is used convert the u128s from phragmen back to a [`BalanceOf`]. + type CurrencyToVote: Convert, VoteWeight> + Convert>; - /// Tokens have been minted and are unused for validator-reward. - type RewardRemainder: OnUnbalanced>; + /// Tokens have been minted and are unused for validator-reward. + type RewardRemainder: OnUnbalanced>; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// Handler for the unbalanced reduction when slashing a staker. - type Slash: OnUnbalanced>; + /// Handler for the unbalanced reduction when slashing a staker. + type Slash: OnUnbalanced>; - /// Handler for the unbalanced increment when rewarding a staker. - type Reward: OnUnbalanced>; + /// Handler for the unbalanced increment when rewarding a staker. + type Reward: OnUnbalanced>; - /// Number of sessions per era. - type SessionsPerEra: Get; + /// Number of sessions per era. + type SessionsPerEra: Get; - /// Number of eras that staked funds must remain bonded for. - type BondingDuration: Get; + /// Number of eras that staked funds must remain bonded for. + type BondingDuration: Get; - /// Number of eras that slashes are deferred by, after computation. This - /// should be less than the bonding duration. Set to 0 if slashes should be - /// applied immediately, without opportunity for intervention. - type SlashDeferDuration: Get; + /// Number of eras that slashes are deferred by, after computation. This + /// should be less than the bonding duration. Set to 0 if slashes should be + /// applied immediately, without opportunity for intervention. + type SlashDeferDuration: Get; - /// The origin which can cancel a deferred slash. Root can always do this. - type SlashCancelOrigin: EnsureOrigin; + /// The origin which can cancel a deferred slash. Root can always do this. + type SlashCancelOrigin: EnsureOrigin; - /// Interface for interacting with a session module. - type SessionInterface: self::SessionInterface; + /// Interface for interacting with a session module. + type SessionInterface: self::SessionInterface; - /// The NPoS reward curve to use. - type RewardCurve: Get<&'static PiecewiseLinear<'static>>; + /// The NPoS reward curve to use. + type RewardCurve: Get<&'static PiecewiseLinear<'static>>; - /// Something that can estimate the next session change, accurately or as a best effort guess. - type NextNewSession: EstimateNextNewSession; + /// Something that can estimate the next session change, accurately or as a best effort guess. + type NextNewSession: EstimateNextNewSession; - /// How many blocks ahead of the era, within the last do we try to run the phragmen offchain? - /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will - /// be used. - type ElectionLookahead: Get; + /// How many blocks ahead of the era, within the last do we try to run the phragmen offchain? + /// Setting this to zero will disable the offchain compute and only on-chain seq-phragmen will + /// be used. + type ElectionLookahead: Get; - /// The overarching call type. - type Call: Dispatchable + From> + IsSubType, Self> + Clone; + /// The overarching call type. + type Call: Dispatchable + From> + IsSubType, Self> + Clone; - /// A transaction submitter. - type SubmitTransaction: SubmitUnsignedTransaction::Call>; + /// A transaction submitter. + type SubmitTransaction: SubmitUnsignedTransaction::Call>; - /// The maximum number of nominators rewarded for each validator. - /// - /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim - /// their reward. This used to limit the i/o cost for the nominator payout. - type MaxNominatorRewardedPerValidator: Get; + /// The maximum number of nominators rewarded for each validator. + /// + /// For each validator only the `$MaxNominatorRewardedPerValidator` biggest stakers can claim + /// their reward. This used to limit the i/o cost for the nominator payout. + type MaxNominatorRewardedPerValidator: Get; - /// A configuration for base priority of unsigned transactions. - /// - /// This is exposed so that it can be tuned for particular runtime, when - /// multiple pallets send unsigned transactions. - type UnsignedPriority: Get; + /// A configuration for base priority of unsigned transactions. + /// + /// This is exposed so that it can be tuned for particular runtime, when + /// multiple pallets send unsigned transactions. + type UnsignedPriority: Get; } /// Mode of era-forcing. #[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum Forcing { - /// Not forcing anything - just let whatever happen. - NotForcing, - /// Force a new era, then reset to `NotForcing` as soon as it is done. - ForceNew, - /// Avoid a new era indefinitely. - ForceNone, - /// Force a new era at the end of all sessions indefinitely. - ForceAlways, + /// Not forcing anything - just let whatever happen. + NotForcing, + /// Force a new era, then reset to `NotForcing` as soon as it is done. + ForceNew, + /// Avoid a new era indefinitely. + ForceNone, + /// Force a new era at the end of all sessions indefinitely. + ForceAlways, } impl Default for Forcing { - fn default() -> Self { Forcing::NotForcing } + fn default() -> Self { + Forcing::NotForcing + } } // A value placed in storage that represents the current version of the Staking storage. @@ -825,236 +830,236 @@ impl Default for Forcing { // storage migration logic. This should match directly with the semantic versions of the Rust crate. #[derive(Encode, Decode, Clone, Copy, PartialEq, Eq, RuntimeDebug)] enum Releases { - V1_0_0Ancient, - V2_0_0, - V3_0_0, + V1_0_0Ancient, + V2_0_0, + V3_0_0, } impl Default for Releases { - fn default() -> Self { - Releases::V3_0_0 - } + fn default() -> Self { + Releases::V3_0_0 + } } decl_storage! { - trait Store for Module as Staking { - /// Number of eras to keep in history. - /// - /// Information is kept for eras in `[current_era - history_depth; current_era]`. - /// - /// Must be more than the number of eras delayed by session otherwise. - /// I.e. active era must always be in history. - /// I.e. `active_era > current_era - history_depth` must be guaranteed. - HistoryDepth get(fn history_depth) config(): u32 = 84; - - /// The ideal number of staking participants. - pub ValidatorCount get(fn validator_count) config(): u32; - - /// Minimum number of staking participants before emergency conditions are imposed. - pub MinimumValidatorCount get(fn minimum_validator_count) config(): - u32 = DEFAULT_MINIMUM_VALIDATOR_COUNT; - - /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're - /// easy to initialize and the performance hit is minimal (we expect no more than four - /// invulnerables) and restricted to testnets. - pub Invulnerables get(fn invulnerables) config(): Vec; - - /// Map from all locked "stash" accounts to the controller account. - pub Bonded get(fn bonded): map hasher(twox_64_concat) T::AccountId => Option; - - /// Map from all (unlocked) "controller" accounts to the info regarding the staking. - pub Ledger get(fn ledger): - map hasher(blake2_128_concat) T::AccountId - => Option>>; - - /// Where the reward payment should be made. Keyed by stash. - pub Payee get(fn payee): map hasher(twox_64_concat) T::AccountId => RewardDestination; - - /// The map from (wannabe) validator stash key to the preferences of that validator. - pub Validators get(fn validators): - map hasher(twox_64_concat) T::AccountId => ValidatorPrefs; - - /// The map from nominator stash key to the set of stash keys of all validators to nominate. - pub Nominators get(fn nominators): - map hasher(twox_64_concat) T::AccountId => Option>; - - /// The current era index. - /// - /// This is the latest planned era, depending on how the Session pallet queues the validator - /// set, it might be active or not. - pub CurrentEra get(fn current_era): Option; - - /// The active era information, it holds index and start. - /// - /// The active era is the era currently rewarded. - /// Validator set of this era must be equal to `SessionInterface::validators`. - pub ActiveEra get(fn active_era): Option; - - /// The session index at which the era start for the last `HISTORY_DEPTH` eras. - pub ErasStartSessionIndex get(fn eras_start_session_index): - map hasher(twox_64_concat) EraIndex => Option; - - /// Exposure of validator at era. - /// - /// This is keyed first by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - /// If stakers hasn't been set or has been removed then empty exposure is returned. - pub ErasStakers get(fn eras_stakers): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Exposure>; - - /// Clipped Exposure of validator at era. - /// - /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the - /// `T::MaxNominatorRewardedPerValidator` biggest stakers. - /// (Note: the field `total` and `own` of the exposure remains unchanged). - /// This is used to limit the i/o cost for the nominator payout. - /// - /// This is keyed fist by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - /// If stakers hasn't been set or has been removed then empty exposure is returned. - pub ErasStakersClipped get(fn eras_stakers_clipped): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Exposure>; - - /// Similar to `ErasStakers`, this holds the preferences of validators. - /// - /// This is keyed first by the era index to allow bulk deletion and then the stash account. - /// - /// Is it removed after `HISTORY_DEPTH` eras. - // If prefs hasn't been set or has been removed then 0 commission is returned. - pub ErasValidatorPrefs get(fn eras_validator_prefs): - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => ValidatorPrefs; - - /// The total validator era payout for the last `HISTORY_DEPTH` eras. - /// - /// Eras that haven't finished yet or has been removed doesn't have reward. - pub ErasValidatorReward get(fn eras_validator_reward): - map hasher(twox_64_concat) EraIndex => Option>; - - /// Rewards for the last `HISTORY_DEPTH` eras. - /// If reward hasn't been set or has been removed then 0 reward is returned. - pub ErasRewardPoints get(fn eras_reward_points): - map hasher(twox_64_concat) EraIndex => EraRewardPoints; - - /// The total amount staked for the last `HISTORY_DEPTH` eras. - /// If total hasn't been set or has been removed then 0 stake is returned. - pub ErasTotalStake get(fn eras_total_stake): - map hasher(twox_64_concat) EraIndex => BalanceOf; - - /// Mode of era forcing. - pub ForceEra get(fn force_era) config(): Forcing; - - /// The percentage of the slash that is distributed to reporters. - /// - /// The rest of the slashed value is handled by the `Slash`. - pub SlashRewardFraction get(fn slash_reward_fraction) config(): Perbill; - - /// The amount of currency given to reporters of a slash event which was - /// canceled by extraordinary circumstances (e.g. governance). - pub CanceledSlashPayout get(fn canceled_payout) config(): BalanceOf; - - /// All unapplied slashes that are queued for later. - pub UnappliedSlashes: - map hasher(twox_64_concat) EraIndex => Vec>>; - - /// A mapping from still-bonded eras to the first session index of that era. - /// - /// Must contains information for eras for the range: - /// `[active_era - bounding_duration; active_era]` - BondedEras: Vec<(EraIndex, SessionIndex)>; - - /// All slashing events on validators, mapped by era to the highest slash proportion - /// and slash value of the era. - ValidatorSlashInEra: - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Option<(Perbill, BalanceOf)>; - - /// All slashing events on nominators, mapped by era to the highest slash value of the era. - NominatorSlashInEra: - double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId - => Option>; - - /// Slashing spans for stash accounts. - SlashingSpans: map hasher(twox_64_concat) T::AccountId => Option; - - /// Records information about the maximum slash of a stash within a slashing span, - /// as well as how much reward has been paid out. - SpanSlash: - map hasher(twox_64_concat) (T::AccountId, slashing::SpanIndex) - => slashing::SpanRecord>; - - /// The earliest era for which we have a pending, unapplied slash. - EarliestUnappliedSlash: Option; - - /// Snapshot of validators at the beginning of the current election window. This should only - /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. - pub SnapshotValidators get(fn snapshot_validators): Option>; - - /// Snapshot of nominators at the beginning of the current election window. This should only - /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. - pub SnapshotNominators get(fn snapshot_nominators): Option>; - - /// The next validator set. At the end of an era, if this is available (potentially from the - /// result of an offchain worker), it is immediately used. Otherwise, the on-chain election - /// is executed. - pub QueuedElected get(fn queued_elected): Option>>; - - /// The score of the current [`QueuedElected`]. - pub QueuedScore get(fn queued_score): Option; - - /// Flag to control the execution of the offchain election. When `Open(_)`, we accept - /// solutions to be submitted. - pub EraElectionStatus get(fn era_election_status): ElectionStatus; - - /// True if the current **planned** session is final. Note that this does not take era - /// forcing into account. - pub IsCurrentSessionFinal get(fn is_current_session_final): bool = false; - - /// True if network has been upgraded to this version. - /// Storage version of the pallet. - /// - /// This is set to v3.0.0 for new networks. - StorageVersion build(|_: &GenesisConfig| Releases::V3_0_0): Releases; - - /// The era where we migrated from Lazy Payouts to Simple Payouts - MigrateEra: Option; - } - add_extra_genesis { - config(stakers): - Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>; - build(|config: &GenesisConfig| { - for &(ref stash, ref controller, balance, ref status) in &config.stakers { - assert!( - T::Currency::free_balance(&stash) >= balance, - "Stash does not have enough balance to bond." - ); - let _ = >::bond( - T::Origin::from(Some(stash.clone()).into()), - T::Lookup::unlookup(controller.clone()), - balance, - RewardDestination::Staked, - ); - let _ = match status { - StakerStatus::Validator => { - >::validate( - T::Origin::from(Some(controller.clone()).into()), - Default::default(), - ) - }, - StakerStatus::Nominator(votes) => { - >::nominate( - T::Origin::from(Some(controller.clone()).into()), - votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), - ) - }, _ => Ok(()) - }; - } - }); - } + trait Store for Module as Staking { + /// Number of eras to keep in history. + /// + /// Information is kept for eras in `[current_era - history_depth; current_era]`. + /// + /// Must be more than the number of eras delayed by session otherwise. + /// I.e. active era must always be in history. + /// I.e. `active_era > current_era - history_depth` must be guaranteed. + HistoryDepth get(fn history_depth) config(): u32 = 84; + + /// The ideal number of staking participants. + pub ValidatorCount get(fn validator_count) config(): u32; + + /// Minimum number of staking participants before emergency conditions are imposed. + pub MinimumValidatorCount get(fn minimum_validator_count) config(): + u32 = DEFAULT_MINIMUM_VALIDATOR_COUNT; + + /// Any validators that may never be slashed or forcibly kicked. It's a Vec since they're + /// easy to initialize and the performance hit is minimal (we expect no more than four + /// invulnerables) and restricted to testnets. + pub Invulnerables get(fn invulnerables) config(): Vec; + + /// Map from all locked "stash" accounts to the controller account. + pub Bonded get(fn bonded): map hasher(twox_64_concat) T::AccountId => Option; + + /// Map from all (unlocked) "controller" accounts to the info regarding the staking. + pub Ledger get(fn ledger): + map hasher(blake2_128_concat) T::AccountId + => Option>>; + + /// Where the reward payment should be made. Keyed by stash. + pub Payee get(fn payee): map hasher(twox_64_concat) T::AccountId => RewardDestination; + + /// The map from (wannabe) validator stash key to the preferences of that validator. + pub Validators get(fn validators): + map hasher(twox_64_concat) T::AccountId => ValidatorPrefs; + + /// The map from nominator stash key to the set of stash keys of all validators to nominate. + pub Nominators get(fn nominators): + map hasher(twox_64_concat) T::AccountId => Option>; + + /// The current era index. + /// + /// This is the latest planned era, depending on how the Session pallet queues the validator + /// set, it might be active or not. + pub CurrentEra get(fn current_era): Option; + + /// The active era information, it holds index and start. + /// + /// The active era is the era currently rewarded. + /// Validator set of this era must be equal to `SessionInterface::validators`. + pub ActiveEra get(fn active_era): Option; + + /// The session index at which the era start for the last `HISTORY_DEPTH` eras. + pub ErasStartSessionIndex get(fn eras_start_session_index): + map hasher(twox_64_concat) EraIndex => Option; + + /// Exposure of validator at era. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + pub ErasStakers get(fn eras_stakers): + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId + => Exposure>; + + /// Clipped Exposure of validator at era. + /// + /// This is similar to [`ErasStakers`] but number of nominators exposed is reduced to the + /// `T::MaxNominatorRewardedPerValidator` biggest stakers. + /// (Note: the field `total` and `own` of the exposure remains unchanged). + /// This is used to limit the i/o cost for the nominator payout. + /// + /// This is keyed fist by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + /// If stakers hasn't been set or has been removed then empty exposure is returned. + pub ErasStakersClipped get(fn eras_stakers_clipped): + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId + => Exposure>; + + /// Similar to `ErasStakers`, this holds the preferences of validators. + /// + /// This is keyed first by the era index to allow bulk deletion and then the stash account. + /// + /// Is it removed after `HISTORY_DEPTH` eras. + // If prefs hasn't been set or has been removed then 0 commission is returned. + pub ErasValidatorPrefs get(fn eras_validator_prefs): + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId + => ValidatorPrefs; + + /// The total validator era payout for the last `HISTORY_DEPTH` eras. + /// + /// Eras that haven't finished yet or has been removed doesn't have reward. + pub ErasValidatorReward get(fn eras_validator_reward): + map hasher(twox_64_concat) EraIndex => Option>; + + /// Rewards for the last `HISTORY_DEPTH` eras. + /// If reward hasn't been set or has been removed then 0 reward is returned. + pub ErasRewardPoints get(fn eras_reward_points): + map hasher(twox_64_concat) EraIndex => EraRewardPoints; + + /// The total amount staked for the last `HISTORY_DEPTH` eras. + /// If total hasn't been set or has been removed then 0 stake is returned. + pub ErasTotalStake get(fn eras_total_stake): + map hasher(twox_64_concat) EraIndex => BalanceOf; + + /// Mode of era forcing. + pub ForceEra get(fn force_era) config(): Forcing; + + /// The percentage of the slash that is distributed to reporters. + /// + /// The rest of the slashed value is handled by the `Slash`. + pub SlashRewardFraction get(fn slash_reward_fraction) config(): Perbill; + + /// The amount of currency given to reporters of a slash event which was + /// canceled by extraordinary circumstances (e.g. governance). + pub CanceledSlashPayout get(fn canceled_payout) config(): BalanceOf; + + /// All unapplied slashes that are queued for later. + pub UnappliedSlashes: + map hasher(twox_64_concat) EraIndex => Vec>>; + + /// A mapping from still-bonded eras to the first session index of that era. + /// + /// Must contains information for eras for the range: + /// `[active_era - bounding_duration; active_era]` + BondedEras: Vec<(EraIndex, SessionIndex)>; + + /// All slashing events on validators, mapped by era to the highest slash proportion + /// and slash value of the era. + ValidatorSlashInEra: + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId + => Option<(Perbill, BalanceOf)>; + + /// All slashing events on nominators, mapped by era to the highest slash value of the era. + NominatorSlashInEra: + double_map hasher(twox_64_concat) EraIndex, hasher(twox_64_concat) T::AccountId + => Option>; + + /// Slashing spans for stash accounts. + SlashingSpans: map hasher(twox_64_concat) T::AccountId => Option; + + /// Records information about the maximum slash of a stash within a slashing span, + /// as well as how much reward has been paid out. + SpanSlash: + map hasher(twox_64_concat) (T::AccountId, slashing::SpanIndex) + => slashing::SpanRecord>; + + /// The earliest era for which we have a pending, unapplied slash. + EarliestUnappliedSlash: Option; + + /// Snapshot of validators at the beginning of the current election window. This should only + /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. + pub SnapshotValidators get(fn snapshot_validators): Option>; + + /// Snapshot of nominators at the beginning of the current election window. This should only + /// have a value when [`EraElectionStatus`] == `ElectionStatus::Open(_)`. + pub SnapshotNominators get(fn snapshot_nominators): Option>; + + /// The next validator set. At the end of an era, if this is available (potentially from the + /// result of an offchain worker), it is immediately used. Otherwise, the on-chain election + /// is executed. + pub QueuedElected get(fn queued_elected): Option>>; + + /// The score of the current [`QueuedElected`]. + pub QueuedScore get(fn queued_score): Option; + + /// Flag to control the execution of the offchain election. When `Open(_)`, we accept + /// solutions to be submitted. + pub EraElectionStatus get(fn era_election_status): ElectionStatus; + + /// True if the current **planned** session is final. Note that this does not take era + /// forcing into account. + pub IsCurrentSessionFinal get(fn is_current_session_final): bool = false; + + /// True if network has been upgraded to this version. + /// Storage version of the pallet. + /// + /// This is set to v3.0.0 for new networks. + StorageVersion build(|_: &GenesisConfig| Releases::V3_0_0): Releases; + + /// The era where we migrated from Lazy Payouts to Simple Payouts + MigrateEra: Option; + } + add_extra_genesis { + config(stakers): + Vec<(T::AccountId, T::AccountId, BalanceOf, StakerStatus)>; + build(|config: &GenesisConfig| { + for &(ref stash, ref controller, balance, ref status) in &config.stakers { + assert!( + T::Currency::free_balance(&stash) >= balance, + "Stash does not have enough balance to bond." + ); + let _ = >::bond( + T::Origin::from(Some(stash.clone()).into()), + T::Lookup::unlookup(controller.clone()), + balance, + RewardDestination::Staked, + ); + let _ = match status { + StakerStatus::Validator => { + >::validate( + T::Origin::from(Some(controller.clone()).into()), + Default::default(), + ) + }, + StakerStatus::Nominator(votes) => { + >::nominate( + T::Origin::from(Some(controller.clone()).into()), + votes.iter().map(|l| T::Lookup::unlookup(l.clone())).collect(), + ) + }, _ => Ok(()) + }; + } + }); + } } decl_event!( @@ -1082,1836 +1087,1885 @@ decl_event!( ); decl_error! { - /// Error for the staking module. - pub enum Error for Module { - /// Not a controller account. - NotController, - /// Not a stash account. - NotStash, - /// Stash is already bonded. - AlreadyBonded, - /// Controller is already paired. - AlreadyPaired, - /// Targets cannot be empty. - EmptyTargets, - /// Duplicate index. - DuplicateIndex, - /// Slash record index out of bounds. - InvalidSlashIndex, - /// Can not bond with value less than minimum balance. - InsufficientValue, - /// Can not schedule more unlock chunks. - NoMoreChunks, - /// Can not rebond without unlocking chunks. - NoUnlockChunk, - /// Attempting to target a stash that still has funds. - FundedTarget, - /// Invalid era to reward. - InvalidEraToReward, - /// Invalid number of nominations. - InvalidNumberOfNominations, - /// Items are not sorted and unique. - NotSortedAndUnique, - /// Rewards for this era have already been claimed for this validator. - AlreadyClaimed, - /// The submitted result is received out of the open window. - PhragmenEarlySubmission, - /// The submitted result is not as good as the one stored on chain. - PhragmenWeakSubmission, - /// The snapshot data of the current window is missing. - SnapshotUnavailable, - /// Incorrect number of winners were presented. - PhragmenBogusWinnerCount, - /// One of the submitted winners is not an active candidate on chain (index is out of range - /// in snapshot). - PhragmenBogusWinner, - /// Error while building the assignment type from the compact. This can happen if an index - /// is invalid, or if the weights _overflow_. - PhragmenBogusCompact, - /// One of the submitted nominators is not an active nominator on chain. - PhragmenBogusNominator, - /// One of the submitted nominators has an edge to which they have not voted on chain. - PhragmenBogusNomination, - /// One of the submitted nominators has an edge which is submitted before the last non-zero - /// slash of the target. - PhragmenSlashedNomination, - /// A self vote must only be originated from a validator to ONLY themselves. - PhragmenBogusSelfVote, - /// The submitted result has unknown edges that are not among the presented winners. - PhragmenBogusEdge, - /// The claimed score does not match with the one computed from the data. - PhragmenBogusScore, - /// The call is not allowed at the given time due to restrictions of election period. - CallNotAllowed, - } + /// Error for the staking module. + pub enum Error for Module { + /// Not a controller account. + NotController, + /// Not a stash account. + NotStash, + /// Stash is already bonded. + AlreadyBonded, + /// Controller is already paired. + AlreadyPaired, + /// Targets cannot be empty. + EmptyTargets, + /// Duplicate index. + DuplicateIndex, + /// Slash record index out of bounds. + InvalidSlashIndex, + /// Can not bond with value less than minimum balance. + InsufficientValue, + /// Can not schedule more unlock chunks. + NoMoreChunks, + /// Can not rebond without unlocking chunks. + NoUnlockChunk, + /// Attempting to target a stash that still has funds. + FundedTarget, + /// Invalid era to reward. + InvalidEraToReward, + /// Invalid number of nominations. + InvalidNumberOfNominations, + /// Items are not sorted and unique. + NotSortedAndUnique, + /// Rewards for this era have already been claimed for this validator. + AlreadyClaimed, + /// The submitted result is received out of the open window. + PhragmenEarlySubmission, + /// The submitted result is not as good as the one stored on chain. + PhragmenWeakSubmission, + /// The snapshot data of the current window is missing. + SnapshotUnavailable, + /// Incorrect number of winners were presented. + PhragmenBogusWinnerCount, + /// One of the submitted winners is not an active candidate on chain (index is out of range + /// in snapshot). + PhragmenBogusWinner, + /// Error while building the assignment type from the compact. This can happen if an index + /// is invalid, or if the weights _overflow_. + PhragmenBogusCompact, + /// One of the submitted nominators is not an active nominator on chain. + PhragmenBogusNominator, + /// One of the submitted nominators has an edge to which they have not voted on chain. + PhragmenBogusNomination, + /// One of the submitted nominators has an edge which is submitted before the last non-zero + /// slash of the target. + PhragmenSlashedNomination, + /// A self vote must only be originated from a validator to ONLY themselves. + PhragmenBogusSelfVote, + /// The submitted result has unknown edges that are not among the presented winners. + PhragmenBogusEdge, + /// The claimed score does not match with the one computed from the data. + PhragmenBogusScore, + /// The call is not allowed at the given time due to restrictions of election period. + CallNotAllowed, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Number of sessions per era. - const SessionsPerEra: SessionIndex = T::SessionsPerEra::get(); - - /// Number of eras that staked funds must remain bonded for. - const BondingDuration: EraIndex = T::BondingDuration::get(); - - type Error = Error; - - fn deposit_event() = default; - - /// sets `ElectionStatus` to `Open(now)` where `now` is the block number at which the - /// election window has opened, if we are at the last session and less blocks than - /// `T::ElectionLookahead` is remaining until the next new session schedule. The offchain - /// worker, if applicable, will execute at the end of the current block, and solutions may - /// be submitted. - fn on_initialize(now: T::BlockNumber) -> Weight { - if - // if we don't have any ongoing offchain compute. - Self::era_election_status().is_closed() && - // either current session final based on the plan, or we're forcing. - (Self::is_current_session_final() || Self::will_era_be_forced()) - { - if let Some(next_session_change) = T::NextNewSession::estimate_next_new_session(now){ - if let Some(remaining) = next_session_change.checked_sub(&now) { - if remaining <= T::ElectionLookahead::get() && !remaining.is_zero() { - // create snapshot. - if Self::create_stakers_snapshot() { - // Set the flag to make sure we don't waste any compute here in the same era - // after we have triggered the offline compute. - >::put( - ElectionStatus::::Open(now) - ); - log!(info, "💸 Election window is Open({:?}). Snapshot created", now); - } else { - log!(warn, "💸 Failed to create snapshot at {:?}.", now); - } - } - } - } else { - log!(warn, "💸 Estimating next session change failed."); - } - } - - // weight - 50_000 - } - - /// Check if the current block number is the one at which the election window has been set - /// to open. If so, it runs the offchain worker code. - fn offchain_worker(now: T::BlockNumber) { - use offchain_election::{set_check_offchain_execution_status, compute_offchain_election}; - - if Self::era_election_status().is_open_at(now) { - let offchain_status = set_check_offchain_execution_status::(now); - if let Err(why) = offchain_status { - log!(debug, "skipping offchain worker in open election window due to [{}]", why); - } else { - if let Err(e) = compute_offchain_election::() { - log!(warn, "💸 Error in phragmen offchain worker: {:?}", e); - } else { - log!(debug, "Executed offchain worker thread without errors."); - } - } - } - } - - fn on_finalize() { - // Set the start of the first era. - if let Some(mut active_era) = Self::active_era() { - if active_era.start.is_none() { - let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); - active_era.start = Some(now_as_millis_u64); - ActiveEra::put(active_era); - } - } - } - - fn on_runtime_upgrade() -> Weight { - // For Kusama the type hasn't actually changed as Moment was u64 and was the number of - // millisecond since unix epoch. - StorageVersion::put(Releases::V3_0_0); - Self::migrate_last_reward_to_claimed_rewards(); - 0 - } - - /// Take the origin account as a stash and lock up `value` of its balance. `controller` will - /// be the account that controls it. - /// - /// `value` must be more than the `minimum_balance` specified by `T::Currency`. - /// - /// The dispatch origin for this call must be _Signed_ by the stash account. - /// - /// Emits `Bonded`. - /// - /// # - /// - Independent of the arguments. Moderate complexity. - /// - O(1). - /// - Three extra DB entries. - /// - /// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned - /// unless the `origin` falls below _existential deposit_ and gets removed as dust. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - pub fn bond(origin, - controller: ::Source, - #[compact] value: BalanceOf, - payee: RewardDestination, - ) { - let stash = ensure_signed(origin)?; - - if >::contains_key(&stash) { - Err(Error::::AlreadyBonded)? - } - - let controller = T::Lookup::lookup(controller)?; - - if >::contains_key(&controller) { - Err(Error::::AlreadyPaired)? - } - - // reject a bond which is considered to be _dust_. - if value < T::Currency::minimum_balance() { - Err(Error::::InsufficientValue)? - } - - // You're auto-bonded forever, here. We might improve this by only bonding when - // you actually validate/nominate and remove once you unbond __everything__. - >::insert(&stash, &controller); - >::insert(&stash, payee); - - system::Module::::inc_ref(&stash); - - let current_era = CurrentEra::get().unwrap_or(0); - let history_depth = Self::history_depth(); - let last_reward_era = current_era.saturating_sub(history_depth); - - let stash_balance = T::Currency::free_balance(&stash); - let value = value.min(stash_balance); - Self::deposit_event(RawEvent::Bonded(stash.clone(), value)); - let item = StakingLedger { - stash, - total: value, - active: value, - unlocking: vec![], - claimed_rewards: (last_reward_era..current_era).collect(), - }; - Self::update_ledger(&controller, &item); - } - - /// Add some extra amount that have appeared in the stash `free_balance` into the balance up - /// for staking. - /// - /// Use this if there are additional funds in your stash account that you wish to bond. - /// Unlike [`bond`] or [`unbond`] this function does not impose any limitation on the amount - /// that can be added. - /// - /// The dispatch origin for this call must be _Signed_ by the stash, not the controller and - /// it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// Emits `Bonded`. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - O(1). - /// - One DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn bond_extra(origin, #[compact] max_additional: BalanceOf) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let stash = ensure_signed(origin)?; - - let controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - - let stash_balance = T::Currency::free_balance(&stash); - - if let Some(extra) = stash_balance.checked_sub(&ledger.total) { - let extra = extra.min(max_additional); - ledger.total += extra; - ledger.active += extra; - Self::deposit_event(RawEvent::Bonded(stash, extra)); - Self::update_ledger(&controller, &ledger); - } - } - - /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond - /// period ends. If this leaves an amount actively bonded less than - /// T::Currency::minimum_balance(), then it is increased to the full amount. - /// - /// Once the unlock period is done, you can call `withdraw_unbonded` to actually move - /// the funds out of management ready for transfer. - /// - /// No more than a limited number of unlocking chunks (see `MAX_UNLOCKING_CHUNKS`) - /// can co-exists at the same time. In that case, [`Call::withdraw_unbonded`] need - /// to be called first to remove some of the chunks (if possible). - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// Emits `Unbonded`. - /// - /// See also [`Call::withdraw_unbonded`]. - /// - /// # - /// - Independent of the arguments. Limited but potentially exploitable complexity. - /// - Contains a limited number of reads. - /// - Each call (requires the remainder of the bonded balance to be above `minimum_balance`) - /// will cause a new entry to be inserted into a vector (`Ledger.unlocking`) kept in storage. - /// The only way to clean the aforementioned storage item is also user-controlled via - /// `withdraw_unbonded`. - /// - One DB entry. - /// - #[weight = SimpleDispatchInfo::FixedNormal(400_000_000)] - fn unbond(origin, #[compact] value: BalanceOf) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - ensure!( - ledger.unlocking.len() < MAX_UNLOCKING_CHUNKS, - Error::::NoMoreChunks, - ); - - let mut value = value.min(ledger.active); - - if !value.is_zero() { - ledger.active -= value; - - // Avoid there being a dust balance left in the staking system. - if ledger.active < T::Currency::minimum_balance() { - value += ledger.active; - ledger.active = Zero::zero(); - } - - // Note: in case there is no current era it is fine to bond one era more. - let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); - ledger.unlocking.push(UnlockChunk { value, era }); - Self::update_ledger(&controller, &ledger); - Self::deposit_event(RawEvent::Unbonded(ledger.stash.clone(), value)); - } - } - - /// Remove any unlocked chunks from the `unlocking` queue from our management. - /// - /// This essentially frees up that balance to be used by the stash account to do - /// whatever it wants. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// Emits `Withdrawn`. - /// - /// See also [`Call::unbond`]. - /// - /// # - /// - Could be dependent on the `origin` argument and how much `unlocking` chunks exist. - /// It implies `consolidate_unlocked` which loops over `Ledger.unlocking`, which is - /// indirectly user-controlled. See [`unbond`] for more detail. - /// - Contains a limited number of reads, yet the size of which could be large based on `ledger`. - /// - Writes are limited to the `origin` account key. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(400_000_000)] - fn withdraw_unbonded(origin) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let (stash, old_total) = (ledger.stash.clone(), ledger.total); - if let Some(current_era) = Self::current_era() { - ledger = ledger.consolidate_unlocked(current_era) - } - - if ledger.unlocking.is_empty() && ledger.active.is_zero() { - // This account must have called `unbond()` with some value that caused the active - // portion to fall below existential deposit + will have no more unlocking chunks - // left. We can now safely remove all staking-related information. - Self::kill_stash(&stash)?; - // remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); - } else { - // This was the consequence of a partial unbond. just update the ledger and move on. - Self::update_ledger(&controller, &ledger); - } - - // `old_total` should never be less than the new total because - // `consolidate_unlocked` strictly subtracts balance. - if ledger.total < old_total { - // Already checked that this won't overflow by entry condition. - let value = old_total - ledger.total; - Self::deposit_event(RawEvent::Withdrawn(stash, value)); - } - } - - /// Declare the desire to validate for the origin controller. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - Contains a limited number of reads. - /// - Writes are limited to the `origin` account key. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(750_000_000)] - pub fn validate(origin, prefs: ValidatorPrefs) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let stash = &ledger.stash; - >::remove(stash); - >::insert(stash, prefs); - } - - /// Declare the desire to nominate `targets` for the origin controller. - /// - /// Effects will be felt at the beginning of the next era. This can only be called when - /// [`EraElectionStatus`] is `Closed`. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - The transaction's complexity is proportional to the size of `targets`, - /// which is capped at CompactAssignments::LIMIT. - /// - Both the reads and writes follow a similar pattern. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(750_000_000)] - pub fn nominate(origin, targets: Vec<::Source>) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let stash = &ledger.stash; - ensure!(!targets.is_empty(), Error::::EmptyTargets); - let targets = targets.into_iter() - .take(::LIMIT) - .map(|t| T::Lookup::lookup(t)) - .collect::, _>>()?; - - let nominations = Nominations { - targets, - // initial nominations are considered submitted at era 0. See `Nominations` doc - submitted_in: Self::current_era().unwrap_or(0), - suppressed: false, - }; - - >::remove(stash); - >::insert(stash, &nominations); - } - - /// Declare no desire to either validate or nominate. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// And, it can be only called when [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - Contains one read. - /// - Writes are limited to the `origin` account key. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn chill(origin) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - Self::chill_stash(&ledger.stash); - } - - /// (Re-)set the payment target for a controller. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - Contains a limited number of reads. - /// - Writes are limited to the `origin` account key. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn set_payee(origin, payee: RewardDestination) { - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - let stash = &ledger.stash; - >::insert(stash, payee); - } - - /// (Re-)set the controller of a stash. - /// - /// Effects will be felt at the beginning of the next era. - /// - /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. - /// - /// # - /// - Independent of the arguments. Insignificant complexity. - /// - Contains a limited number of reads. - /// - Writes are limited to the `origin` account key. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(750_000_000)] - fn set_controller(origin, controller: ::Source) { - let stash = ensure_signed(origin)?; - let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; - let controller = T::Lookup::lookup(controller)?; - if >::contains_key(&controller) { - Err(Error::::AlreadyPaired)? - } - if controller != old_controller { - >::insert(&stash, &controller); - if let Some(l) = >::take(&old_controller) { - >::insert(&controller, l); - } - } - } - - /// The ideal number of validators. - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] - fn set_validator_count(origin, #[compact] new: u32) { - ensure_root(origin)?; - ValidatorCount::put(new); - } - - /// Force there to be no new eras indefinitely. - /// - /// # - /// - No arguments. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] - fn force_no_eras(origin) { - ensure_root(origin)?; - ForceEra::put(Forcing::ForceNone); - } - - /// Force there to be a new era at the end of the next session. After this, it will be - /// reset to normal (non-forced) behaviour. - /// - /// # - /// - No arguments. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] - fn force_new_era(origin) { - ensure_root(origin)?; - ForceEra::put(Forcing::ForceNew); - } - - /// Set the validators who cannot be slashed (if any). - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] - fn set_invulnerables(origin, validators: Vec) { - ensure_root(origin)?; - >::put(validators); - } - - /// Force a current staker to become completely unstaked, immediately. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn force_unstake(origin, stash: T::AccountId) { - ensure_root(origin)?; - - // remove all staking-related information. - Self::kill_stash(&stash)?; - - // remove the lock. - T::Currency::remove_lock(STAKING_ID, &stash); - } - - /// Force there to be a new era at the end of sessions indefinitely. - /// - /// # - /// - One storage write - /// # - #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] - fn force_new_era_always(origin) { - ensure_root(origin)?; - ForceEra::put(Forcing::ForceAlways); - } - - /// Cancel enactment of a deferred slash. Can be called by either the root origin or - /// the `T::SlashCancelOrigin`. - /// passing the era and indices of the slashes for that era to kill. - /// - /// # - /// - One storage write. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(1_000_000_000)] - fn cancel_deferred_slash(origin, era: EraIndex, slash_indices: Vec) { - T::SlashCancelOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - ensure!(!slash_indices.is_empty(), Error::::EmptyTargets); - ensure!(is_sorted_and_unique(&slash_indices), Error::::NotSortedAndUnique); - - let mut unapplied = ::UnappliedSlashes::get(&era); - let last_item = slash_indices[slash_indices.len() - 1]; - ensure!((last_item as usize) < unapplied.len(), Error::::InvalidSlashIndex); - - for (removed, index) in slash_indices.into_iter().enumerate() { - let index = (index as usize) - removed; - unapplied.remove(index); - } - - ::UnappliedSlashes::insert(&era, &unapplied); - } - - /// **This extrinsic will be removed after `MigrationEra + HistoryDepth` has passed, giving - /// opportunity for users to claim all rewards before moving to Simple Payouts. After this - /// time, you should use `payout_stakers` instead.** - /// - /// Make one nominator's payout for one era. - /// - /// - `who` is the controller account of the nominator to pay out. - /// - `era` may not be lower than one following the most recently paid era. If it is higher, - /// then it indicates an instruction to skip the payout of all previous eras. - /// - `validators` is the list of all validators that `who` had exposure to during `era`, - /// alongside the index of `who` in the clipped exposure of the validator. - /// I.e. each element is a tuple of - /// `(validator, index of `who` in clipped exposure of validator)`. - /// If it is incomplete, then less than the full reward will be paid out. - /// It must not exceed `MAX_NOMINATIONS`. - /// - /// WARNING: once an era is payed for a validator such validator can't claim the payout of - /// previous era. - /// - /// WARNING: Incorrect arguments here can result in loss of payout. Be very careful. - /// - /// # - /// - Number of storage read of `O(validators)`; `validators` is the argument of the call, - /// and is bounded by `MAX_NOMINATIONS`. - /// - Each storage read is `O(N)` size and decode complexity; `N` is the maximum - /// nominations that can be given to a single validator. - /// - Computation complexity: `O(MAX_NOMINATIONS * logN)`; `MAX_NOMINATIONS` is the - /// maximum number of validators that may be nominated by a single nominator, it is - /// bounded only economically (all nominators are required to place a minimum stake). - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn payout_nominator(origin, era: EraIndex, validators: Vec<(T::AccountId, u32)>) - -> DispatchResult - { - let ctrl = ensure_signed(origin)?; - Self::do_payout_nominator(ctrl, era, validators) - } - - /// **This extrinsic will be removed after `MigrationEra + HistoryDepth` has passed, giving - /// opportunity for users to claim all rewards before moving to Simple Payouts. After this - /// time, you should use `payout_stakers` instead.** - /// - /// Make one validator's payout for one era. - /// - /// - `who` is the controller account of the validator to pay out. - /// - `era` may not be lower than one following the most recently paid era. If it is higher, - /// then it indicates an instruction to skip the payout of all previous eras. - /// - /// WARNING: once an era is payed for a validator such validator can't claim the payout of - /// previous era. - /// - /// WARNING: Incorrect arguments here can result in loss of payout. Be very careful. - /// - /// # - /// - Time complexity: O(1). - /// - Contains a limited number of reads and writes. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn payout_validator(origin, era: EraIndex) -> DispatchResult { - let ctrl = ensure_signed(origin)?; - Self::do_payout_validator(ctrl, era) - } - - /// Pay out all the stakers behind a single validator for a single era. - /// - /// - `validator_stash` is the stash account of the validator. Their nominators, up to - /// `T::MaxNominatorRewardedPerValidator`, will also receive their rewards. - /// - `era` may be any era between `[current_era - history_depth; current_era]`. - /// - /// The origin of this call must be _Signed_. Any account can call this function, even if - /// it is not one of the stakers. - /// - /// This can only be called when [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - Time complexity: at most O(MaxNominatorRewardedPerValidator). - /// - Contains a limited number of reads and writes. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - ensure_signed(origin)?; - Self::do_payout_stakers(validator_stash, era) - } - - /// Rebond a portion of the stash scheduled to be unlocked. - /// - /// The dispatch origin must be signed by the controller, and it can be only called when - /// [`EraElectionStatus`] is `Closed`. - /// - /// # - /// - Time complexity: O(1). Bounded by `MAX_UNLOCKING_CHUNKS`. - /// - Storage changes: Can't increase storage, only decrease it. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn rebond(origin, #[compact] value: BalanceOf) { - ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); - let controller = ensure_signed(origin)?; - let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; - ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); - - let ledger = ledger.rebond(value); - Self::update_ledger(&controller, &ledger); - } - - /// Set history_depth value. - /// - /// Origin must be root. - #[weight = SimpleDispatchInfo::FixedOperational(500_000_000)] - fn set_history_depth(origin, #[compact] new_history_depth: EraIndex) { - ensure_root(origin)?; - if let Some(current_era) = Self::current_era() { - HistoryDepth::mutate(|history_depth| { - let last_kept = current_era.checked_sub(*history_depth).unwrap_or(0); - let new_last_kept = current_era.checked_sub(new_history_depth).unwrap_or(0); - for era_index in last_kept..new_last_kept { - Self::clear_era_information(era_index); - } - *history_depth = new_history_depth - }) - } - } - - /// Remove all data structure concerning a staker/stash once its balance is zero. - /// This is essentially equivalent to `withdraw_unbonded` except it can be called by anyone - /// and the target `stash` must have no funds left. - /// - /// This can be called from any origin. - /// - /// - `stash`: The stash account to reap. Its balance must be zero. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn reap_stash(_origin, stash: T::AccountId) { - ensure!(T::Currency::total_balance(&stash).is_zero(), Error::::FundedTarget); - Self::kill_stash(&stash)?; - T::Currency::remove_lock(STAKING_ID, &stash); - } - - /// Submit a phragmen result to the chain. If the solution: - /// - /// 1. is valid. - /// 2. has a better score than a potentially existing solution on chain. - /// - /// then, it will be _put_ on chain. - /// - /// A solution consists of two pieces of data: - /// - /// 1. `winners`: a flat vector of all the winners of the round. - /// 2. `assignments`: the compact version of an assignment vector that encodes the edge - /// weights. - /// - /// Both of which may be computed using [`phragmen`], or any other algorithm. - /// - /// Additionally, the submitter must provide: - /// - /// - The `score` that they claim their solution has. - /// - /// Both validators and nominators will be represented by indices in the solution. The - /// indices should respect the corresponding types ([`ValidatorIndex`] and - /// [`NominatorIndex`]). Moreover, they should be valid when used to index into - /// [`SnapshotValidators`] and [`SnapshotNominators`]. Any invalid index will cause the - /// solution to be rejected. These two storage items are set during the election window and - /// may be used to determine the indices. - /// - /// A solution is valid if: - /// - /// 0. It is submitted when [`EraElectionStatus`] is `Open`. - /// 1. Its claimed score is equal to the score computed on-chain. - /// 2. Presents the correct number of winners. - /// 3. All indexes must be value according to the snapshot vectors. All edge values must - /// also be correct and should not overflow the granularity of the ratio type (i.e. 256 - /// or billion). - /// 4. For each edge, all targets are actually nominated by the voter. - /// 5. Has correct self-votes. - /// - /// A solutions score is consisted of 3 parameters: - /// - /// 1. `min { support.total }` for each support of a winner. This value should be maximized. - /// 2. `sum { support.total }` for each support of a winner. This value should be minimized. - /// 3. `sum { support.total^2 }` for each support of a winner. This value should be - /// minimized (to ensure less variance) - /// - /// # - /// E: number of edges. m: size of winner committee. n: number of nominators. d: edge degree - /// (16 for now) v: number of on-chain validator candidates. - /// - /// NOTE: given a solution which is reduced, we can enable a new check the ensure `|E| < n + - /// m`. We don't do this _yet_, but our offchain worker code executes it nonetheless. - /// - /// major steps (all done in `check_and_replace_solution`): - /// - /// - Storage: O(1) read `ElectionStatus`. - /// - Storage: O(1) read `PhragmenScore`. - /// - Storage: O(1) read `ValidatorCount`. - /// - Storage: O(1) length read from `SnapshotValidators`. - /// - /// - Storage: O(v) reads of `AccountId` to fetch `snapshot_validators`. - /// - Memory: O(m) iterations to map winner index to validator id. - /// - Storage: O(n) reads `AccountId` to fetch `snapshot_nominators`. - /// - Memory: O(n + m) reads to map index to `AccountId` for un-compact. - /// - /// - Storage: O(e) accountid reads from `Nomination` to read correct nominations. - /// - Storage: O(e) calls into `slashable_balance_of_vote_weight` to convert ratio to staked. - /// - /// - Memory: build_support_map. O(e). - /// - Memory: evaluate_support: O(E). - /// - /// - Storage: O(e) writes to `QueuedElected`. - /// - Storage: O(1) write to `QueuedScore` - /// - /// The weight of this call is 1/10th of the blocks total weight. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000_000)] - pub fn submit_election_solution( - origin, - winners: Vec, - compact_assignments: CompactAssignments, - score: PhragmenScore, - era: EraIndex, - ) { - let _who = ensure_signed(origin)?; - Self::check_and_replace_solution( - winners, - compact_assignments, - ElectionCompute::Signed, - score, - era, - )? - } - - /// Unsigned version of `submit_election_solution`. - /// - /// Note that this must pass the [`ValidateUnsigned`] check which only allows transactions - /// from the local node to be included. In other words, only the block author can include a - /// transaction in the block. - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000_000)] - pub fn submit_election_solution_unsigned( - origin, - winners: Vec, - compact_assignments: CompactAssignments, - score: PhragmenScore, - era: EraIndex, - ) { - ensure_none(origin)?; - Self::check_and_replace_solution( - winners, - compact_assignments, - ElectionCompute::Unsigned, - score, - era, - )? - // TODO: instead of returning an error, panic. This makes the entire produced block - // invalid. - // This ensures that block authors will not ever try and submit a solution which is not - // an improvement, since they will lose their authoring points/rewards. - } - } + pub struct Module for enum Call where origin: T::Origin { + /// Number of sessions per era. + const SessionsPerEra: SessionIndex = T::SessionsPerEra::get(); + + /// Number of eras that staked funds must remain bonded for. + const BondingDuration: EraIndex = T::BondingDuration::get(); + + type Error = Error; + + fn deposit_event() = default; + + /// sets `ElectionStatus` to `Open(now)` where `now` is the block number at which the + /// election window has opened, if we are at the last session and less blocks than + /// `T::ElectionLookahead` is remaining until the next new session schedule. The offchain + /// worker, if applicable, will execute at the end of the current block, and solutions may + /// be submitted. + fn on_initialize(now: T::BlockNumber) -> Weight { + if + // if we don't have any ongoing offchain compute. + Self::era_election_status().is_closed() && + // either current session final based on the plan, or we're forcing. + (Self::is_current_session_final() || Self::will_era_be_forced()) + { + if let Some(next_session_change) = T::NextNewSession::estimate_next_new_session(now){ + if let Some(remaining) = next_session_change.checked_sub(&now) { + if remaining <= T::ElectionLookahead::get() && !remaining.is_zero() { + // create snapshot. + if Self::create_stakers_snapshot() { + // Set the flag to make sure we don't waste any compute here in the same era + // after we have triggered the offline compute. + >::put( + ElectionStatus::::Open(now) + ); + log!(info, "💸 Election window is Open({:?}). Snapshot created", now); + } else { + log!(warn, "💸 Failed to create snapshot at {:?}.", now); + } + } + } + } else { + log!(warn, "💸 Estimating next session change failed."); + } + } + + // weight + 50_000 + } + + /// Check if the current block number is the one at which the election window has been set + /// to open. If so, it runs the offchain worker code. + fn offchain_worker(now: T::BlockNumber) { + use offchain_election::{set_check_offchain_execution_status, compute_offchain_election}; + + if Self::era_election_status().is_open_at(now) { + let offchain_status = set_check_offchain_execution_status::(now); + if let Err(why) = offchain_status { + log!(debug, "skipping offchain worker in open election window due to [{}]", why); + } else { + if let Err(e) = compute_offchain_election::() { + log!(warn, "💸 Error in phragmen offchain worker: {:?}", e); + } else { + log!(debug, "Executed offchain worker thread without errors."); + } + } + } + } + + fn on_finalize() { + // Set the start of the first era. + if let Some(mut active_era) = Self::active_era() { + if active_era.start.is_none() { + let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); + active_era.start = Some(now_as_millis_u64); + ActiveEra::put(active_era); + } + } + } + + fn on_runtime_upgrade() -> Weight { + // For Kusama the type hasn't actually changed as Moment was u64 and was the number of + // millisecond since unix epoch. + StorageVersion::put(Releases::V3_0_0); + Self::migrate_last_reward_to_claimed_rewards(); + 0 + } + + /// Take the origin account as a stash and lock up `value` of its balance. `controller` will + /// be the account that controls it. + /// + /// `value` must be more than the `minimum_balance` specified by `T::Currency`. + /// + /// The dispatch origin for this call must be _Signed_ by the stash account. + /// + /// Emits `Bonded`. + /// + /// # + /// - Independent of the arguments. Moderate complexity. + /// - O(1). + /// - Three extra DB entries. + /// + /// NOTE: Two of the storage writes (`Self::bonded`, `Self::payee`) are _never_ cleaned + /// unless the `origin` falls below _existential deposit_ and gets removed as dust. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + pub fn bond(origin, + controller: ::Source, + #[compact] value: BalanceOf, + payee: RewardDestination, + ) { + let stash = ensure_signed(origin)?; + + if >::contains_key(&stash) { + Err(Error::::AlreadyBonded)? + } + + let controller = T::Lookup::lookup(controller)?; + + if >::contains_key(&controller) { + Err(Error::::AlreadyPaired)? + } + + // reject a bond which is considered to be _dust_. + if value < T::Currency::minimum_balance() { + Err(Error::::InsufficientValue)? + } + + // You're auto-bonded forever, here. We might improve this by only bonding when + // you actually validate/nominate and remove once you unbond __everything__. + >::insert(&stash, &controller); + >::insert(&stash, payee); + + system::Module::::inc_ref(&stash); + + let current_era = CurrentEra::get().unwrap_or(0); + let history_depth = Self::history_depth(); + let last_reward_era = current_era.saturating_sub(history_depth); + + let stash_balance = T::Currency::free_balance(&stash); + let value = value.min(stash_balance); + Self::deposit_event(RawEvent::Bonded(stash.clone(), value)); + let item = StakingLedger { + stash, + total: value, + active: value, + unlocking: vec![], + claimed_rewards: (last_reward_era..current_era).collect(), + }; + Self::update_ledger(&controller, &item); + } + + /// Add some extra amount that have appeared in the stash `free_balance` into the balance up + /// for staking. + /// + /// Use this if there are additional funds in your stash account that you wish to bond. + /// Unlike [`bond`] or [`unbond`] this function does not impose any limitation on the amount + /// that can be added. + /// + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller and + /// it can be only called when [`EraElectionStatus`] is `Closed`. + /// + /// Emits `Bonded`. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - O(1). + /// - One DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn bond_extra(origin, #[compact] max_additional: BalanceOf) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + let stash = ensure_signed(origin)?; + + let controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; + let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + + let stash_balance = T::Currency::free_balance(&stash); + + if let Some(extra) = stash_balance.checked_sub(&ledger.total) { + let extra = extra.min(max_additional); + ledger.total += extra; + ledger.active += extra; + Self::deposit_event(RawEvent::Bonded(stash, extra)); + Self::update_ledger(&controller, &ledger); + } + } + + /// Schedule a portion of the stash to be unlocked ready for transfer out after the bond + /// period ends. If this leaves an amount actively bonded less than + /// T::Currency::minimum_balance(), then it is increased to the full amount. + /// + /// Once the unlock period is done, you can call `withdraw_unbonded` to actually move + /// the funds out of management ready for transfer. + /// + /// No more than a limited number of unlocking chunks (see `MAX_UNLOCKING_CHUNKS`) + /// can co-exists at the same time. In that case, [`Call::withdraw_unbonded`] need + /// to be called first to remove some of the chunks (if possible). + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. + /// + /// Emits `Unbonded`. + /// + /// See also [`Call::withdraw_unbonded`]. + /// + /// # + /// - Independent of the arguments. Limited but potentially exploitable complexity. + /// - Contains a limited number of reads. + /// - Each call (requires the remainder of the bonded balance to be above `minimum_balance`) + /// will cause a new entry to be inserted into a vector (`Ledger.unlocking`) kept in storage. + /// The only way to clean the aforementioned storage item is also user-controlled via + /// `withdraw_unbonded`. + /// - One DB entry. + /// + #[weight = SimpleDispatchInfo::FixedNormal(400_000_000)] + fn unbond(origin, #[compact] value: BalanceOf) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + let controller = ensure_signed(origin)?; + let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!( + ledger.unlocking.len() < MAX_UNLOCKING_CHUNKS, + Error::::NoMoreChunks, + ); + + let mut value = value.min(ledger.active); + + if !value.is_zero() { + ledger.active -= value; + + // Avoid there being a dust balance left in the staking system. + if ledger.active < T::Currency::minimum_balance() { + value += ledger.active; + ledger.active = Zero::zero(); + } + + // Note: in case there is no current era it is fine to bond one era more. + let era = Self::current_era().unwrap_or(0) + T::BondingDuration::get(); + ledger.unlocking.push(UnlockChunk { value, era }); + Self::update_ledger(&controller, &ledger); + Self::deposit_event(RawEvent::Unbonded(ledger.stash.clone(), value)); + } + } + + /// Remove any unlocked chunks from the `unlocking` queue from our management. + /// + /// This essentially frees up that balance to be used by the stash account to do + /// whatever it wants. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. + /// + /// Emits `Withdrawn`. + /// + /// See also [`Call::unbond`]. + /// + /// # + /// - Could be dependent on the `origin` argument and how much `unlocking` chunks exist. + /// It implies `consolidate_unlocked` which loops over `Ledger.unlocking`, which is + /// indirectly user-controlled. See [`unbond`] for more detail. + /// - Contains a limited number of reads, yet the size of which could be large based on `ledger`. + /// - Writes are limited to the `origin` account key. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(400_000_000)] + fn withdraw_unbonded(origin) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + let controller = ensure_signed(origin)?; + let mut ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let (stash, old_total) = (ledger.stash.clone(), ledger.total); + if let Some(current_era) = Self::current_era() { + ledger = ledger.consolidate_unlocked(current_era) + } + + if ledger.unlocking.is_empty() && ledger.active.is_zero() { + // This account must have called `unbond()` with some value that caused the active + // portion to fall below existential deposit + will have no more unlocking chunks + // left. We can now safely remove all staking-related information. + Self::kill_stash(&stash)?; + // remove the lock. + T::Currency::remove_lock(STAKING_ID, &stash); + } else { + // This was the consequence of a partial unbond. just update the ledger and move on. + Self::update_ledger(&controller, &ledger); + } + + // `old_total` should never be less than the new total because + // `consolidate_unlocked` strictly subtracts balance. + if ledger.total < old_total { + // Already checked that this won't overflow by entry condition. + let value = old_total - ledger.total; + Self::deposit_event(RawEvent::Withdrawn(stash, value)); + } + } + + /// Declare the desire to validate for the origin controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - Contains a limited number of reads. + /// - Writes are limited to the `origin` account key. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(750_000_000)] + pub fn validate(origin, prefs: ValidatorPrefs) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = &ledger.stash; + >::remove(stash); + >::insert(stash, prefs); + } + + /// Declare the desire to nominate `targets` for the origin controller. + /// + /// Effects will be felt at the beginning of the next era. This can only be called when + /// [`EraElectionStatus`] is `Closed`. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. + /// + /// # + /// - The transaction's complexity is proportional to the size of `targets`, + /// which is capped at CompactAssignments::LIMIT. + /// - Both the reads and writes follow a similar pattern. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(750_000_000)] + pub fn nominate(origin, targets: Vec<::Source>) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = &ledger.stash; + ensure!(!targets.is_empty(), Error::::EmptyTargets); + let targets = targets.into_iter() + .take(::LIMIT) + .map(|t| T::Lookup::lookup(t)) + .collect::, _>>()?; + + let nominations = Nominations { + targets, + // initial nominations are considered submitted at era 0. See `Nominations` doc + submitted_in: Self::current_era().unwrap_or(0), + suppressed: false, + }; + + >::remove(stash); + >::insert(stash, &nominations); + } + + /// Declare no desire to either validate or nominate. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// And, it can be only called when [`EraElectionStatus`] is `Closed`. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - Contains one read. + /// - Writes are limited to the `origin` account key. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn chill(origin) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + Self::chill_stash(&ledger.stash); + } + + /// (Re-)set the payment target for a controller. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the controller, not the stash. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - Contains a limited number of reads. + /// - Writes are limited to the `origin` account key. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn set_payee(origin, payee: RewardDestination) { + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + let stash = &ledger.stash; + >::insert(stash, payee); + } + + /// (Re-)set the controller of a stash. + /// + /// Effects will be felt at the beginning of the next era. + /// + /// The dispatch origin for this call must be _Signed_ by the stash, not the controller. + /// + /// # + /// - Independent of the arguments. Insignificant complexity. + /// - Contains a limited number of reads. + /// - Writes are limited to the `origin` account key. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(750_000_000)] + fn set_controller(origin, controller: ::Source) { + let stash = ensure_signed(origin)?; + let old_controller = Self::bonded(&stash).ok_or(Error::::NotStash)?; + let controller = T::Lookup::lookup(controller)?; + if >::contains_key(&controller) { + Err(Error::::AlreadyPaired)? + } + if controller != old_controller { + >::insert(&stash, &controller); + if let Some(l) = >::take(&old_controller) { + >::insert(&controller, l); + } + } + } + + /// The ideal number of validators. + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] + fn set_validator_count(origin, #[compact] new: u32) { + ensure_root(origin)?; + ValidatorCount::put(new); + } + + /// Force there to be no new eras indefinitely. + /// + /// # + /// - No arguments. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] + fn force_no_eras(origin) { + ensure_root(origin)?; + ForceEra::put(Forcing::ForceNone); + } + + /// Force there to be a new era at the end of the next session. After this, it will be + /// reset to normal (non-forced) behaviour. + /// + /// # + /// - No arguments. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] + fn force_new_era(origin) { + ensure_root(origin)?; + ForceEra::put(Forcing::ForceNew); + } + + /// Set the validators who cannot be slashed (if any). + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] + fn set_invulnerables(origin, validators: Vec) { + ensure_root(origin)?; + >::put(validators); + } + + /// Force a current staker to become completely unstaked, immediately. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn force_unstake(origin, stash: T::AccountId) { + ensure_root(origin)?; + + // remove all staking-related information. + Self::kill_stash(&stash)?; + + // remove the lock. + T::Currency::remove_lock(STAKING_ID, &stash); + } + + /// Force there to be a new era at the end of sessions indefinitely. + /// + /// # + /// - One storage write + /// # + #[weight = SimpleDispatchInfo::FixedNormal(5_000_000)] + fn force_new_era_always(origin) { + ensure_root(origin)?; + ForceEra::put(Forcing::ForceAlways); + } + + /// Cancel enactment of a deferred slash. Can be called by either the root origin or + /// the `T::SlashCancelOrigin`. + /// passing the era and indices of the slashes for that era to kill. + /// + /// # + /// - One storage write. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(1_000_000_000)] + fn cancel_deferred_slash(origin, era: EraIndex, slash_indices: Vec) { + T::SlashCancelOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + ensure!(!slash_indices.is_empty(), Error::::EmptyTargets); + ensure!(is_sorted_and_unique(&slash_indices), Error::::NotSortedAndUnique); + + let mut unapplied = ::UnappliedSlashes::get(&era); + let last_item = slash_indices[slash_indices.len() - 1]; + ensure!((last_item as usize) < unapplied.len(), Error::::InvalidSlashIndex); + + for (removed, index) in slash_indices.into_iter().enumerate() { + let index = (index as usize) - removed; + unapplied.remove(index); + } + + ::UnappliedSlashes::insert(&era, &unapplied); + } + + /// **This extrinsic will be removed after `MigrationEra + HistoryDepth` has passed, giving + /// opportunity for users to claim all rewards before moving to Simple Payouts. After this + /// time, you should use `payout_stakers` instead.** + /// + /// Make one nominator's payout for one era. + /// + /// - `who` is the controller account of the nominator to pay out. + /// - `era` may not be lower than one following the most recently paid era. If it is higher, + /// then it indicates an instruction to skip the payout of all previous eras. + /// - `validators` is the list of all validators that `who` had exposure to during `era`, + /// alongside the index of `who` in the clipped exposure of the validator. + /// I.e. each element is a tuple of + /// `(validator, index of `who` in clipped exposure of validator)`. + /// If it is incomplete, then less than the full reward will be paid out. + /// It must not exceed `MAX_NOMINATIONS`. + /// + /// WARNING: once an era is payed for a validator such validator can't claim the payout of + /// previous era. + /// + /// WARNING: Incorrect arguments here can result in loss of payout. Be very careful. + /// + /// # + /// - Number of storage read of `O(validators)`; `validators` is the argument of the call, + /// and is bounded by `MAX_NOMINATIONS`. + /// - Each storage read is `O(N)` size and decode complexity; `N` is the maximum + /// nominations that can be given to a single validator. + /// - Computation complexity: `O(MAX_NOMINATIONS * logN)`; `MAX_NOMINATIONS` is the + /// maximum number of validators that may be nominated by a single nominator, it is + /// bounded only economically (all nominators are required to place a minimum stake). + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn payout_nominator(origin, era: EraIndex, validators: Vec<(T::AccountId, u32)>) + -> DispatchResult + { + let ctrl = ensure_signed(origin)?; + Self::do_payout_nominator(ctrl, era, validators) + } + + /// **This extrinsic will be removed after `MigrationEra + HistoryDepth` has passed, giving + /// opportunity for users to claim all rewards before moving to Simple Payouts. After this + /// time, you should use `payout_stakers` instead.** + /// + /// Make one validator's payout for one era. + /// + /// - `who` is the controller account of the validator to pay out. + /// - `era` may not be lower than one following the most recently paid era. If it is higher, + /// then it indicates an instruction to skip the payout of all previous eras. + /// + /// WARNING: once an era is payed for a validator such validator can't claim the payout of + /// previous era. + /// + /// WARNING: Incorrect arguments here can result in loss of payout. Be very careful. + /// + /// # + /// - Time complexity: O(1). + /// - Contains a limited number of reads and writes. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn payout_validator(origin, era: EraIndex) -> DispatchResult { + let ctrl = ensure_signed(origin)?; + Self::do_payout_validator(ctrl, era) + } + + /// Pay out all the stakers behind a single validator for a single era. + /// + /// - `validator_stash` is the stash account of the validator. Their nominators, up to + /// `T::MaxNominatorRewardedPerValidator`, will also receive their rewards. + /// - `era` may be any era between `[current_era - history_depth; current_era]`. + /// + /// The origin of this call must be _Signed_. Any account can call this function, even if + /// it is not one of the stakers. + /// + /// This can only be called when [`EraElectionStatus`] is `Closed`. + /// + /// # + /// - Time complexity: at most O(MaxNominatorRewardedPerValidator). + /// - Contains a limited number of reads and writes. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn payout_stakers(origin, validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + ensure_signed(origin)?; + Self::do_payout_stakers(validator_stash, era) + } + + /// Rebond a portion of the stash scheduled to be unlocked. + /// + /// The dispatch origin must be signed by the controller, and it can be only called when + /// [`EraElectionStatus`] is `Closed`. + /// + /// # + /// - Time complexity: O(1). Bounded by `MAX_UNLOCKING_CHUNKS`. + /// - Storage changes: Can't increase storage, only decrease it. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn rebond(origin, #[compact] value: BalanceOf) { + ensure!(Self::era_election_status().is_closed(), Error::::CallNotAllowed); + let controller = ensure_signed(origin)?; + let ledger = Self::ledger(&controller).ok_or(Error::::NotController)?; + ensure!(!ledger.unlocking.is_empty(), Error::::NoUnlockChunk); + + let ledger = ledger.rebond(value); + Self::update_ledger(&controller, &ledger); + } + + /// Set history_depth value. + /// + /// Origin must be root. + #[weight = SimpleDispatchInfo::FixedOperational(500_000_000)] + fn set_history_depth(origin, #[compact] new_history_depth: EraIndex) { + ensure_root(origin)?; + if let Some(current_era) = Self::current_era() { + HistoryDepth::mutate(|history_depth| { + let last_kept = current_era.checked_sub(*history_depth).unwrap_or(0); + let new_last_kept = current_era.checked_sub(new_history_depth).unwrap_or(0); + for era_index in last_kept..new_last_kept { + Self::clear_era_information(era_index); + } + *history_depth = new_history_depth + }) + } + } + + /// Remove all data structure concerning a staker/stash once its balance is zero. + /// This is essentially equivalent to `withdraw_unbonded` except it can be called by anyone + /// and the target `stash` must have no funds left. + /// + /// This can be called from any origin. + /// + /// - `stash`: The stash account to reap. Its balance must be zero. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn reap_stash(_origin, stash: T::AccountId) { + ensure!(T::Currency::total_balance(&stash).is_zero(), Error::::FundedTarget); + Self::kill_stash(&stash)?; + T::Currency::remove_lock(STAKING_ID, &stash); + } + + /// Submit a phragmen result to the chain. If the solution: + /// + /// 1. is valid. + /// 2. has a better score than a potentially existing solution on chain. + /// + /// then, it will be _put_ on chain. + /// + /// A solution consists of two pieces of data: + /// + /// 1. `winners`: a flat vector of all the winners of the round. + /// 2. `assignments`: the compact version of an assignment vector that encodes the edge + /// weights. + /// + /// Both of which may be computed using [`phragmen`], or any other algorithm. + /// + /// Additionally, the submitter must provide: + /// + /// - The `score` that they claim their solution has. + /// + /// Both validators and nominators will be represented by indices in the solution. The + /// indices should respect the corresponding types ([`ValidatorIndex`] and + /// [`NominatorIndex`]). Moreover, they should be valid when used to index into + /// [`SnapshotValidators`] and [`SnapshotNominators`]. Any invalid index will cause the + /// solution to be rejected. These two storage items are set during the election window and + /// may be used to determine the indices. + /// + /// A solution is valid if: + /// + /// 0. It is submitted when [`EraElectionStatus`] is `Open`. + /// 1. Its claimed score is equal to the score computed on-chain. + /// 2. Presents the correct number of winners. + /// 3. All indexes must be value according to the snapshot vectors. All edge values must + /// also be correct and should not overflow the granularity of the ratio type (i.e. 256 + /// or billion). + /// 4. For each edge, all targets are actually nominated by the voter. + /// 5. Has correct self-votes. + /// + /// A solutions score is consisted of 3 parameters: + /// + /// 1. `min { support.total }` for each support of a winner. This value should be maximized. + /// 2. `sum { support.total }` for each support of a winner. This value should be minimized. + /// 3. `sum { support.total^2 }` for each support of a winner. This value should be + /// minimized (to ensure less variance) + /// + /// # + /// E: number of edges. m: size of winner committee. n: number of nominators. d: edge degree + /// (16 for now) v: number of on-chain validator candidates. + /// + /// NOTE: given a solution which is reduced, we can enable a new check the ensure `|E| < n + + /// m`. We don't do this _yet_, but our offchain worker code executes it nonetheless. + /// + /// major steps (all done in `check_and_replace_solution`): + /// + /// - Storage: O(1) read `ElectionStatus`. + /// - Storage: O(1) read `PhragmenScore`. + /// - Storage: O(1) read `ValidatorCount`. + /// - Storage: O(1) length read from `SnapshotValidators`. + /// + /// - Storage: O(v) reads of `AccountId` to fetch `snapshot_validators`. + /// - Memory: O(m) iterations to map winner index to validator id. + /// - Storage: O(n) reads `AccountId` to fetch `snapshot_nominators`. + /// - Memory: O(n + m) reads to map index to `AccountId` for un-compact. + /// + /// - Storage: O(e) accountid reads from `Nomination` to read correct nominations. + /// - Storage: O(e) calls into `slashable_balance_of_vote_weight` to convert ratio to staked. + /// + /// - Memory: build_support_map. O(e). + /// - Memory: evaluate_support: O(E). + /// + /// - Storage: O(e) writes to `QueuedElected`. + /// - Storage: O(1) write to `QueuedScore` + /// + /// The weight of this call is 1/10th of the blocks total weight. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000_000)] + pub fn submit_election_solution( + origin, + winners: Vec, + compact_assignments: CompactAssignments, + score: PhragmenScore, + era: EraIndex, + ) { + let _who = ensure_signed(origin)?; + Self::check_and_replace_solution( + winners, + compact_assignments, + ElectionCompute::Signed, + score, + era, + )? + } + + /// Unsigned version of `submit_election_solution`. + /// + /// Note that this must pass the [`ValidateUnsigned`] check which only allows transactions + /// from the local node to be included. In other words, only the block author can include a + /// transaction in the block. + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000_000)] + pub fn submit_election_solution_unsigned( + origin, + winners: Vec, + compact_assignments: CompactAssignments, + score: PhragmenScore, + era: EraIndex, + ) { + ensure_none(origin)?; + Self::check_and_replace_solution( + winners, + compact_assignments, + ElectionCompute::Unsigned, + score, + era, + )? + // TODO: instead of returning an error, panic. This makes the entire produced block + // invalid. + // This ensures that block authors will not ever try and submit a solution which is not + // an improvement, since they will lose their authoring points/rewards. + } + } } impl Module { - /// Migrate `last_reward` to `claimed_rewards` - pub fn migrate_last_reward_to_claimed_rewards() { - use frame_support::migration::{StorageIterator, put_storage_value}; - // Migrate from `last_reward` to `claimed_rewards`. - // We will construct a vector from `current_era - history_depth` to `last_reward` - // for each validator and nominator. - // - // Old Staking Ledger - #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] - struct OldStakingLedger { - pub stash: AccountId, - #[codec(compact)] - pub total: Balance, - #[codec(compact)] - pub active: Balance, - pub unlocking: Vec>, - pub last_reward: Option, - } - // Current era and history depth - let current_era = Self::current_era().unwrap_or(0); - let history_depth = Self::history_depth(); - let last_payout_era = current_era.saturating_sub(history_depth); - // Convert all ledgers to the new format. - for (hash, old_ledger) in StorageIterator::>>::new(b"Staking", b"Ledger").drain() { - let last_reward = old_ledger.last_reward.unwrap_or(0); - let new_ledger = StakingLedger { - stash: old_ledger.stash, - total: old_ledger.total, - active: old_ledger.active, - unlocking: old_ledger.unlocking, - claimed_rewards: (last_payout_era..=last_reward).collect(), - }; - put_storage_value(b"Staking", b"Ledger", &hash, new_ledger); - } - MigrateEra::put(current_era); - } - - /// The total balance that can be slashed from a stash account as of right now. - pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { - Self::bonded(stash).and_then(Self::ledger).map(|l| l.active).unwrap_or_default() - } - - /// internal impl of [`slashable_balance_of`] that returns [`VoteWeight`]. - fn slashable_balance_of_vote_weight(stash: &T::AccountId) -> VoteWeight { - , VoteWeight>>::convert( - Self::slashable_balance_of(stash) - ) - } - - /// Dump the list of validators and nominators into vectors and keep them on-chain. - /// - /// This data is used to efficiently evaluate election results. returns `true` if the operation - /// is successful. - fn create_stakers_snapshot() -> bool { - let validators = >::iter().map(|(v, _)| v).collect::>(); - let mut nominators = >::iter().map(|(n, _)| n).collect::>(); - - let num_validators = validators.len(); - let num_nominators = nominators.len(); - if - num_validators > MAX_VALIDATORS || - num_nominators.saturating_add(num_validators) > MAX_NOMINATORS - { - log!( - warn, - "💸 Snapshot size too big [{} <> {}][{} <> {}].", - num_validators, - MAX_VALIDATORS, - num_nominators, - MAX_NOMINATORS, - ); - false - } else { - // all validators nominate themselves; - nominators.extend(validators.clone()); - - >::put(validators); - >::put(nominators); - true - } - } - - /// Clears both snapshots of stakers. - fn kill_stakers_snapshot() { - >::kill(); - >::kill(); - } - - fn do_payout_nominator(ctrl: T::AccountId, era: EraIndex, validators: Vec<(T::AccountId, u32)>) - -> DispatchResult - { - // validators len must not exceed `MAX_NOMINATIONS` to avoid querying more validator - // exposure than necessary. - if validators.len() > MAX_NOMINATIONS { - return Err(Error::::InvalidNumberOfNominations.into()); - } - // If migrate_era is not populated, then you should use `payout_stakers` - let migrate_era = MigrateEra::get().ok_or(Error::::InvalidEraToReward)?; - // This payout mechanism will only work for eras before the migration. - // Subsequent payouts should use `payout_stakers`. - ensure!(era < migrate_era, Error::::InvalidEraToReward); - let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; - ensure!(era <= current_era, Error::::InvalidEraToReward); - let history_depth = Self::history_depth(); - ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); - - // Note: if era has no reward to be claimed, era may be future. better not to update - // `nominator_ledger.last_reward` in this case. - let era_payout = >::get(&era) - .ok_or_else(|| Error::::InvalidEraToReward)?; - - let mut nominator_ledger = >::get(&ctrl).ok_or_else(|| Error::::NotController)?; - - ensure!( - Self::era_election_status().is_closed() || Self::payee(&nominator_ledger.stash) != RewardDestination::Staked, - Error::::CallNotAllowed, - ); - - nominator_ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); - match nominator_ledger.claimed_rewards.binary_search(&era) { - Ok(_) => Err(Error::::AlreadyClaimed)?, - Err(pos) => nominator_ledger.claimed_rewards.insert(pos, era), - } - - >::insert(&ctrl, &nominator_ledger); - - let mut reward = Perbill::zero(); - let era_reward_points = >::get(&era); - - for (validator, nominator_index) in validators.into_iter() { - let commission = Self::eras_validator_prefs(&era, &validator).commission; - let validator_exposure = >::get(&era, &validator); - - if let Some(nominator_exposure) = validator_exposure.others - .get(nominator_index as usize) - { - if nominator_exposure.who != nominator_ledger.stash { - continue; - } - - let nominator_exposure_part = Perbill::from_rational_approximation( - nominator_exposure.value, - validator_exposure.total, - ); - let validator_point = era_reward_points.individual.get(&validator) - .map(|points| *points) - .unwrap_or_else(|| Zero::zero()); - let validator_point_part = Perbill::from_rational_approximation( - validator_point, - era_reward_points.total, - ); - reward = reward.saturating_add( - validator_point_part - .saturating_mul(Perbill::one().saturating_sub(commission)) - .saturating_mul(nominator_exposure_part) - ); - } - } - - if let Some(imbalance) = Self::make_payout(&nominator_ledger.stash, reward * era_payout) { - Self::deposit_event(RawEvent::Reward(ctrl, imbalance.peek())); - } - - Ok(()) - } - - fn do_payout_validator(ctrl: T::AccountId, era: EraIndex) -> DispatchResult { - // If migrate_era is not populated, then you should use `payout_stakers` - let migrate_era = MigrateEra::get().ok_or(Error::::InvalidEraToReward)?; - // This payout mechanism will only work for eras before the migration. - // Subsequent payouts should use `payout_stakers`. - ensure!(era < migrate_era, Error::::InvalidEraToReward); - let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; - ensure!(era <= current_era, Error::::InvalidEraToReward); - let history_depth = Self::history_depth(); - ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); - - // Note: if era has no reward to be claimed, era may be future. better not to update - // `ledger.last_reward` in this case. - let era_payout = >::get(&era) - .ok_or_else(|| Error::::InvalidEraToReward)?; - - let mut ledger = >::get(&ctrl).ok_or_else(|| Error::::NotController)?; - - ensure!( - Self::era_election_status().is_closed() || Self::payee(&ledger.stash) != RewardDestination::Staked, - Error::::CallNotAllowed, - ); - - ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); - match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => Err(Error::::AlreadyClaimed)?, - Err(pos) => ledger.claimed_rewards.insert(pos, era), - } - - >::insert(&ctrl, &ledger); - - let era_reward_points = >::get(&era); - let commission = Self::eras_validator_prefs(&era, &ledger.stash).commission; - let exposure = >::get(&era, &ledger.stash); - - let exposure_part = Perbill::from_rational_approximation( - exposure.own, - exposure.total, - ); - let validator_point = era_reward_points.individual.get(&ledger.stash) - .map(|points| *points) - .unwrap_or_else(|| Zero::zero()); - let validator_point_part = Perbill::from_rational_approximation( - validator_point, - era_reward_points.total, - ); - let reward = validator_point_part.saturating_mul( - commission.saturating_add( - Perbill::one().saturating_sub(commission).saturating_mul(exposure_part) - ) - ); - - if let Some(imbalance) = Self::make_payout(&ledger.stash, reward * era_payout) { - Self::deposit_event(RawEvent::Reward(ctrl, imbalance.peek())); - } - - Ok(()) - } - - fn do_payout_stakers( - validator_stash: T::AccountId, - era: EraIndex, - ) -> DispatchResult { - // Validate input data - let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; - ensure!(era <= current_era, Error::::InvalidEraToReward); - let history_depth = Self::history_depth(); - ensure!(era >= current_era.saturating_sub(history_depth), Error::::InvalidEraToReward); - - // If there was no migration, then this function is always valid. - if let Some(migrate_era) = MigrateEra::get() { - // This payout mechanism will only work for eras on and after the migration. - // Payouts before then should use `payout_nominator`/`payout_validator`. - ensure!(migrate_era <= era, Error::::InvalidEraToReward); - } - - // Note: if era has no reward to be claimed, era may be future. better not to update - // `ledger.claimed_rewards` in this case. - let era_payout = >::get(&era) - .ok_or_else(|| Error::::InvalidEraToReward)?; - - let controller = Self::bonded(&validator_stash).ok_or(Error::::NotStash)?; - let mut ledger = >::get(&controller).ok_or_else(|| Error::::NotController)?; - - ledger.claimed_rewards.retain(|&x| x >= current_era.saturating_sub(history_depth)); - match ledger.claimed_rewards.binary_search(&era) { - Ok(_) => Err(Error::::AlreadyClaimed)?, - Err(pos) => ledger.claimed_rewards.insert(pos, era), - } - - let exposure = >::get(&era, &ledger.stash); - - /* Input data seems good, no errors allowed after this point */ - - >::insert(&controller, &ledger); - - // Get Era reward points. It has TOTAL and INDIVIDUAL - // Find the fraction of the era reward that belongs to the validator - // Take that fraction of the eras rewards to split to nominator and validator - // - // Then look at the validator, figure out the proportion of their reward - // which goes to them and each of their nominators. - - let era_reward_points = >::get(&era); - let total_reward_points = era_reward_points.total; - let validator_reward_points = era_reward_points.individual.get(&ledger.stash) - .map(|points| *points) - .unwrap_or_else(|| Zero::zero()); - - // Nothing to do if they have no reward points. - if validator_reward_points.is_zero() { return Ok(())} - - // This is the fraction of the total reward that the validator and the - // nominators will get. - let validator_total_reward_part = Perbill::from_rational_approximation( - validator_reward_points, - total_reward_points, - ); - - // This is how much validator + nominators are entitled to. - let validator_total_payout = validator_total_reward_part * era_payout; - - let validator_prefs = Self::eras_validator_prefs(&era, &validator_stash); - // Validator first gets a cut off the top. - let validator_commission = validator_prefs.commission; - let validator_commission_payout = validator_commission * validator_total_payout; - - let validator_leftover_payout = validator_total_payout - validator_commission_payout; - // Now let's calculate how this is split to the validator. - let validator_exposure_part = Perbill::from_rational_approximation( - exposure.own, - exposure.total, - ); - let validator_staking_payout = validator_exposure_part * validator_leftover_payout; - - // We can now make total validator payout: - if let Some(imbalance) = Self::make_payout( - &ledger.stash, - validator_staking_payout + validator_commission_payout - ) { - Self::deposit_event(RawEvent::Reward(ledger.stash, imbalance.peek())); - } - - // Lets now calculate how this is split to the nominators. - // Sort nominators by highest to lowest exposure, but only keep `max_nominator_payouts` of them. - for nominator in exposure.others.iter() { - let nominator_exposure_part = Perbill::from_rational_approximation( - nominator.value, - exposure.total, - ); - - let nominator_reward: BalanceOf = nominator_exposure_part * validator_leftover_payout; - // We can now make nominator payout: - if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { - Self::deposit_event(RawEvent::Reward(nominator.who.clone(), imbalance.peek())); - } - } - - Ok(()) - } - - /// Update the ledger for a controller. This will also update the stash lock. The lock will - /// will lock the entire funds except paying for further transactions. - fn update_ledger( - controller: &T::AccountId, - ledger: &StakingLedger> - ) { - T::Currency::set_lock( - STAKING_ID, - &ledger.stash, - ledger.total, - WithdrawReasons::all(), - ); - >::insert(controller, ledger); - } - - /// Chill a stash account. - fn chill_stash(stash: &T::AccountId) { - >::remove(stash); - >::remove(stash); - } - - /// Actually make a payment to a staker. This uses the currency's reward function - /// to pay the right payee for the given staker account. - fn make_payout(stash: &T::AccountId, amount: BalanceOf) -> Option> { - let dest = Self::payee(stash); - match dest { - RewardDestination::Controller => Self::bonded(stash) - .and_then(|controller| - T::Currency::deposit_into_existing(&controller, amount).ok() - ), - RewardDestination::Stash => - T::Currency::deposit_into_existing(stash, amount).ok(), - RewardDestination::Staked => Self::bonded(stash) - .and_then(|c| Self::ledger(&c).map(|l| (c, l))) - .and_then(|(controller, mut l)| { - l.active += amount; - l.total += amount; - let r = T::Currency::deposit_into_existing(stash, amount).ok(); - Self::update_ledger(&controller, &l); - r - }), - } - } - - /// Plan a new session potentially trigger a new era. - fn new_session(session_index: SessionIndex) -> Option> { - if let Some(current_era) = Self::current_era() { - // Initial era has been set. - - let current_era_start_session_index = Self::eras_start_session_index(current_era) - .unwrap_or_else(|| { - frame_support::print("Error: start_session_index must be set for current_era"); - 0 - }); - - let era_length = session_index.checked_sub(current_era_start_session_index) - .unwrap_or(0); // Must never happen. - - match ForceEra::get() { - Forcing::ForceNew => ForceEra::kill(), - Forcing::ForceAlways => (), - Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), - _ => { - // not forcing, not a new era either. If final, set the flag. - if era_length + 1 >= T::SessionsPerEra::get() { - IsCurrentSessionFinal::put(true); - } - return None - }, - } - - // new era. - IsCurrentSessionFinal::put(false); - Self::new_era(session_index) - } else { - // Set initial era - Self::new_era(session_index) - } - } - - /// Basic and cheap checks that we perform in validate unsigned, and in the execution. - pub fn pre_dispatch_checks(score: PhragmenScore, era: EraIndex) -> Result<(), Error> { - // discard solutions that are not in-time - // check window open - ensure!( - Self::era_election_status().is_open(), - Error::::PhragmenEarlySubmission, - ); - - // check current era. - if let Some(current_era) = Self::current_era() { - ensure!( - current_era == era, - Error::::PhragmenEarlySubmission, - ) - } - - // assume the given score is valid. Is it better than what we have on-chain, if we have any? - if let Some(queued_score) = Self::queued_score() { - ensure!( - is_score_better(queued_score, score), - Error::::PhragmenWeakSubmission, - ) - } - - Ok(()) - } - - /// Checks a given solution and if correct and improved, writes it on chain as the queued result - /// of the next round. This may be called by both a signed and an unsigned transaction. - pub fn check_and_replace_solution( - winners: Vec, - compact_assignments: CompactAssignments, - compute: ElectionCompute, - claimed_score: PhragmenScore, - era: EraIndex, - ) -> Result<(), Error> { - // Do the basic checks. era, claimed score and window open. - Self::pre_dispatch_checks(claimed_score, era)?; - - // Check that the number of presented winners is sane. Most often we have more candidates - // that we need. Then it should be Self::validator_count(). Else it should be all the - // candidates. - let snapshot_length = >::decode_len() - .map_err(|_| Error::::SnapshotUnavailable)?; - let desired_winners = Self::validator_count().min(snapshot_length as u32); - ensure!(winners.len() as u32 == desired_winners, Error::::PhragmenBogusWinnerCount); - - // decode snapshot validators. - let snapshot_validators = Self::snapshot_validators() - .ok_or(Error::::SnapshotUnavailable)?; - - // check if all winners were legit; this is rather cheap. Replace with accountId. - let winners = winners.into_iter().map(|widx| { - // NOTE: at the moment, since staking is explicitly blocking any offence until election - // is closed, we don't check here if the account id at `snapshot_validators[widx]` is - // actually a validator. If this ever changes, this loop needs to also check this. - snapshot_validators.get(widx as usize).cloned().ok_or(Error::::PhragmenBogusWinner) - }).collect::, Error>>()?; - - // decode the rest of the snapshot. - let snapshot_nominators = >::snapshot_nominators() - .ok_or(Error::::SnapshotUnavailable)?; - - // helpers - let nominator_at = |i: NominatorIndex| -> Option { - snapshot_nominators.get(i as usize).cloned() - }; - let validator_at = |i: ValidatorIndex| -> Option { - snapshot_validators.get(i as usize).cloned() - }; - - // un-compact. - let assignments = compact_assignments.into_assignment( - nominator_at, - validator_at, - ).map_err(|e| { - // log the error since it is not propagated into the runtime error. - log!(warn, "💸 un-compacting solution failed due to {:?}", e); - Error::::PhragmenBogusCompact - })?; - - // check all nominators actually including the claimed vote. Also check correct self votes. - // Note that we assume all validators and nominators in `assignments` are properly bonded, - // because they are coming from the snapshot via a given index. - for Assignment { who, distribution } in assignments.iter() { - let is_validator = >::contains_key(&who); - let maybe_nomination = Self::nominators(&who); - - if !(maybe_nomination.is_some() ^ is_validator) { - // all of the indices must map to either a validator or a nominator. If this is ever - // not the case, then the locking system of staking is most likely faulty, or we - // have bigger problems. - log!(error, "💸 detected an error in the staking locking and snapshot."); - // abort. - return Err(Error::::PhragmenBogusNominator); - } - - if !is_validator { - // a normal vote - let nomination = maybe_nomination.expect( - "exactly one of `maybe_validator` and `maybe_nomination.is_some` is true. \ - is_validator is false; maybe_nomination is some; qed" - ); - - // NOTE: we don't really have to check here if the sum of all edges are the - // nominator correct. Un-compacting assures this by definition. - - for (t, _) in distribution { - // each target in the provided distribution must be actually nominated by the - // nominator after the last non-zero slash. - if nomination.targets.iter().find(|&tt| tt == t).is_none() { - return Err(Error::::PhragmenBogusNomination); - } - - if ::SlashingSpans::get(&t).map_or( - false, - |spans| nomination.submitted_in < spans.last_nonzero_slash(), - ) { - return Err(Error::::PhragmenSlashedNomination); - } - } - } else { - // a self vote - ensure!(distribution.len() == 1, Error::::PhragmenBogusSelfVote); - ensure!(distribution[0].0 == *who, Error::::PhragmenBogusSelfVote); - // defensive only. A compact assignment of length one does NOT encode the weight and - // it is always created to be 100%. - ensure!( - distribution[0].1 == OffchainAccuracy::one(), - Error::::PhragmenBogusSelfVote, - ); - } - } - - // convert into staked assignments. - let staked_assignments = sp_phragmen::assignment_ratio_to_staked( - assignments, - Self::slashable_balance_of_vote_weight, - ); - - // build the support map thereof in order to evaluate. - // OPTIMIZATION: loop to create the staked assignments but it would bloat the code. Okay for - // now as it does not add to the complexity order. - let (supports, num_error) = build_support_map::( - &winners, - &staked_assignments, - ); - // This technically checks that all targets in all nominators were among the winners. - ensure!(num_error == 0, Error::::PhragmenBogusEdge); - - // Check if the score is the same as the claimed one. - let submitted_score = evaluate_support(&supports); - ensure!(submitted_score == claimed_score, Error::::PhragmenBogusScore); - - // At last, alles Ok. Exposures and store the result. - let exposures = Self::collect_exposure(supports); - log!( - info, - "💸 A better solution (with compute {:?}) has been validated and stored on chain.", - compute, - ); - - // write new results. - >::put(ElectionResult { - elected_stashes: winners, - compute, - exposures, - }); - QueuedScore::put(submitted_score); - - Ok(()) - - } - - /// Start a session potentially starting an era. - fn start_session(start_session: SessionIndex) { - let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); - if let Some(next_active_era_start_session_index) = - Self::eras_start_session_index(next_active_era) - { - if next_active_era_start_session_index == start_session { - Self::start_era(start_session); - } else if next_active_era_start_session_index < start_session { - // This arm should never happen, but better handle it than to stall the - // staking pallet. - frame_support::print("Warning: A session appears to have been skipped."); - Self::start_era(start_session); - } - } - } - - /// End a session potentially ending an era. - fn end_session(session_index: SessionIndex) { - if let Some(active_era) = Self::active_era() { - if let Some(next_active_era_start_session_index) = - Self::eras_start_session_index(active_era.index + 1) - { - if next_active_era_start_session_index == session_index + 1 { - Self::end_era(active_era, session_index); - } - } - } - } - - /// * Increment `active_era.index`, - /// * reset `active_era.start`, - /// * update `BondedEras` and apply slashes. - fn start_era(start_session: SessionIndex) { - let active_era = ActiveEra::mutate(|active_era| { - let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); - *active_era = Some(ActiveEraInfo { - index: new_index, - // Set new active era start in next `on_finalize`. To guarantee usage of `Time` - start: None, - }); - new_index - }); - - let bonding_duration = T::BondingDuration::get(); - - BondedEras::mutate(|bonded| { - bonded.push((active_era, start_session)); - - if active_era > bonding_duration { - let first_kept = active_era - bonding_duration; - - // prune out everything that's from before the first-kept index. - let n_to_prune = bonded.iter() - .take_while(|&&(era_idx, _)| era_idx < first_kept) - .count(); - - // kill slashing metadata. - for (pruned_era, _) in bonded.drain(..n_to_prune) { - slashing::clear_era_metadata::(pruned_era); - } - - if let Some(&(_, first_session)) = bonded.first() { - T::SessionInterface::prune_historical_up_to(first_session); - } - } - }); - - Self::apply_unapplied_slashes(active_era); - } - - /// Compute payout for era. - fn end_era(active_era: ActiveEraInfo, _session_index: SessionIndex) { - // Note: active_era_start can be None if end era is called during genesis config. - if let Some(active_era_start) = active_era.start { - let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); - - let era_duration = now_as_millis_u64 - active_era_start; - let (total_payout, _max_payout) = inflation::compute_total_payout( - &T::RewardCurve::get(), - Self::eras_total_stake(&active_era.index), - T::Currency::total_issuance(), - // Duration of era; more than u64::MAX is rewarded as u64::MAX. - era_duration.saturated_into::(), - ); - - // Set ending era reward. - >::insert(&active_era.index, total_payout); - } - } - - /// Plan a new era. Return the potential new staking set. - fn new_era(start_session_index: SessionIndex) -> Option> { - // Increment or set current era. - let current_era = CurrentEra::mutate(|s| { - *s = Some(s.map(|s| s + 1).unwrap_or(0)); - s.unwrap() - }); - ErasStartSessionIndex::insert(¤t_era, &start_session_index); - - // Clean old era information. - if let Some(old_era) = current_era.checked_sub(Self::history_depth() + 1) { - Self::clear_era_information(old_era); - } - - // Set staking information for new era. - let maybe_new_validators = Self::select_and_update_validators(current_era); - - maybe_new_validators - } - - /// Select the new validator set at the end of the era. - /// - /// Runs [`try_do_phragmen`] and updates the following storage items: - /// - [`EraElectionStatus`]: with `None`. - /// - [`ErasStakers`]: with the new staker set. - /// - [`ErasStakersClipped`]. - /// - [`ErasValidatorPrefs`]. - /// - [`ErasTotalStake`]: with the new total stake. - /// - [`SnapshotValidators`] and [`SnapshotNominators`] are both removed. - /// - /// Internally, [`QueuedElected`], snapshots and [`QueuedScore`] are also consumed. - /// - /// If the election has been successful, It passes the new set upwards. - /// - /// This should only be called at the end of an era. - fn select_and_update_validators(current_era: EraIndex) -> Option> { - if let Some(ElectionResult::> { - elected_stashes, - exposures, - compute, - }) = Self::try_do_phragmen() { - // We have chosen the new validator set. Submission is no longer allowed. - >::put(ElectionStatus::Closed); - - // kill the snapshots. - Self::kill_stakers_snapshot(); - - // Populate Stakers and write slot stake. - let mut total_stake: BalanceOf = Zero::zero(); - exposures.into_iter().for_each(|(stash, exposure)| { - total_stake = total_stake.saturating_add(exposure.total); - >::insert(current_era, &stash, &exposure); - - let mut exposure_clipped = exposure; - let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; - if exposure_clipped.others.len() > clipped_max_len { - exposure_clipped.others.sort_unstable_by(|a, b| a.value.cmp(&b.value).reverse()); - exposure_clipped.others.truncate(clipped_max_len); - } - >::insert(¤t_era, &stash, exposure_clipped); - }); - - // Insert current era staking information - >::insert(¤t_era, total_stake); - - // collect the pref of all winners - for stash in &elected_stashes { - let pref = Self::validators(stash); - >::insert(¤t_era, stash, pref); - } - - // emit event - Self::deposit_event(RawEvent::StakingElection(compute)); - - log!( - info, - "💸 new validator set of size {:?} has been elected via {:?} for era {:?}", - elected_stashes.len(), - compute, - current_era, - ); - - Some(elected_stashes) - } else { - None - } - } - - /// Select a new validator set from the assembled stakers and their role preferences. It tries - /// first to peek into [`QueuedElected`]. Otherwise, it runs a new phragmen. - /// - /// If [`QueuedElected`] and [`QueuedScore`] exists, they are both removed. No further storage - /// is updated. - fn try_do_phragmen() -> Option>> { - // a phragmen result from either a stored submission or locally executed one. - let next_result = >::take().or_else(|| - Self::do_phragmen_with_post_processing::(ElectionCompute::OnChain) - ); - - // either way, kill this. We remove it here to make sure it always has the exact same - // lifetime as `QueuedElected`. - QueuedScore::kill(); - - next_result - } - - /// Execute phragmen and return the new results. The edge weights are processed into support - /// values. - /// - /// This is basically a wrapper around [`do_phragmen`] which translates `PhragmenResult` into - /// `ElectionResult`. - /// - /// No storage item is updated. - fn do_phragmen_with_post_processing(compute: ElectionCompute) - -> Option>> - where - Accuracy: sp_std::ops::Mul, - ExtendedBalance: From<::Inner>, - { - if let Some(phragmen_result) = Self::do_phragmen::() { - let elected_stashes = phragmen_result.winners.iter() - .map(|(s, _)| s.clone()) - .collect::>(); - let assignments = phragmen_result.assignments; - - let staked_assignments = sp_phragmen::assignment_ratio_to_staked( - assignments, - Self::slashable_balance_of_vote_weight, - ); - - let (supports, _) = build_support_map::( - &elected_stashes, - &staked_assignments, - ); - - // collect exposures - let exposures = Self::collect_exposure(supports); - - // In order to keep the property required by `on_session_ending` that we must return the - // new validator set even if it's the same as the old, as long as any underlying - // economic conditions have changed, we don't attempt to do any optimization where we - // compare against the prior set. - Some(ElectionResult::> { - elected_stashes, - exposures, - compute, - }) - } else { - // There were not enough candidates for even our minimal level of functionality. This is - // bad. We should probably disable all functionality except for block production and let - // the chain keep producing blocks until we can decide on a sufficiently substantial - // set. TODO: #2494 - None - } - } - - /// Execute phragmen and return the new results. No post-processing is applied and the raw edge - /// weights are returned. - /// - /// Self votes are added and nominations before the most recent slashing span are reaped. - /// - /// No storage item is updated. - fn do_phragmen() -> Option> { - let mut all_nominators: Vec<(T::AccountId, VoteWeight, Vec)> = Vec::new(); - let mut all_validators = Vec::new(); - for (validator, _) in >::iter() { - // append self vote - let self_vote = (validator.clone(), Self::slashable_balance_of_vote_weight(&validator), vec![validator.clone()]); - all_nominators.push(self_vote); - all_validators.push(validator); - } - - let nominator_votes = >::iter().map(|(nominator, nominations)| { - let Nominations { submitted_in, mut targets, suppressed: _ } = nominations; - - // Filter out nomination targets which were nominated before the most recent - // slashing span. - targets.retain(|stash| { - ::SlashingSpans::get(&stash).map_or( - true, - |spans| submitted_in >= spans.last_nonzero_slash(), - ) - }); - - (nominator, targets) - }); - all_nominators.extend(nominator_votes.map(|(n, ns)| { - let s = Self::slashable_balance_of_vote_weight(&n); - (n, s, ns) - })); - - elect::<_, Accuracy>( - Self::validator_count() as usize, - Self::minimum_validator_count().max(1) as usize, - all_validators, - all_nominators, - ) - } - - /// Consume a set of [`Supports`] from [`sp_phragmen`] and collect them into a [`Exposure`] - fn collect_exposure(supports: SupportMap) -> Vec<(T::AccountId, Exposure>)> { - let to_balance = |e: ExtendedBalance| - >>::convert(e); - - supports.into_iter().map(|(validator, support)| { - // build `struct exposure` from `support` - let mut others = Vec::new(); - let mut own: BalanceOf = Zero::zero(); - let mut total: BalanceOf = Zero::zero(); - support.voters - .into_iter() - .map(|(nominator, weight)| (nominator, to_balance(weight))) - .for_each(|(nominator, stake)| { - if nominator == validator { - own = own.saturating_add(stake); - } else { - others.push(IndividualExposure { who: nominator, value: stake }); - } - total = total.saturating_add(stake); - }); - - let exposure = Exposure { - own, - others, - total, - }; - - (validator, exposure) - }).collect::)>>() - } - - /// Remove all associated data of a stash account from the staking system. - /// - /// Assumes storage is upgraded before calling. - /// - /// This is called: - /// - after a `withdraw_unbond()` call that frees all of a stash's bonded balance. - /// - through `reap_stash()` if the balance has fallen to zero (through slashing). - fn kill_stash(stash: &T::AccountId) -> DispatchResult { - let controller = Bonded::::take(stash).ok_or(Error::::NotStash)?; - >::remove(&controller); - - >::remove(stash); - >::remove(stash); - >::remove(stash); - - slashing::clear_stash_metadata::(stash); - - system::Module::::dec_ref(stash); - - Ok(()) - } - - /// Clear all era information for given era. - fn clear_era_information(era_index: EraIndex) { - >::remove_prefix(era_index); - >::remove_prefix(era_index); - >::remove_prefix(era_index); - >::remove(era_index); - >::remove(era_index); - >::remove(era_index); - ErasStartSessionIndex::remove(era_index); - } - - /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. - fn apply_unapplied_slashes(active_era: EraIndex) { - let slash_defer_duration = T::SlashDeferDuration::get(); - ::EarliestUnappliedSlash::mutate(|earliest| if let Some(ref mut earliest) = earliest { - let keep_from = active_era.saturating_sub(slash_defer_duration); - for era in (*earliest)..keep_from { - let era_slashes = ::UnappliedSlashes::take(&era); - for slash in era_slashes { - slashing::apply_slash::(slash); - } - } - - *earliest = (*earliest).max(keep_from) - }) - } - - /// Add reward points to validators using their stash account ID. - /// - /// Validators are keyed by stash account ID and must be in the current elected set. - /// - /// For each element in the iterator the given number of points in u32 is added to the - /// validator, thus duplicates are handled. - /// - /// At the end of the era each the total payout will be distributed among validator - /// relatively to their points. - /// - /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. - /// If you need to reward lots of validator consider using `reward_by_indices`. - pub fn reward_by_ids( - validators_points: impl IntoIterator - ) { - if let Some(active_era) = Self::active_era() { - >::mutate(active_era.index, |era_rewards| { - for (validator, points) in validators_points.into_iter() { - *era_rewards.individual.entry(validator).or_default() += points; - era_rewards.total += points; - } - }); - } - } - - /// Ensures that at the end of the current session there will be a new era. - fn ensure_new_era() { - match ForceEra::get() { - Forcing::ForceAlways | Forcing::ForceNew => (), - _ => ForceEra::put(Forcing::ForceNew), - } - } - - fn will_era_be_forced() -> bool { - match ForceEra::get() { - Forcing::ForceAlways | Forcing::ForceNew => true, - Forcing::ForceNone | Forcing::NotForcing => false, - } - } - - #[cfg(feature = "runtime-benchmarks")] - pub fn add_era_stakers(current_era: EraIndex, controller: T::AccountId, exposure: Exposure>) { - >::insert(¤t_era, &controller, &exposure); - } - - #[cfg(feature = "runtime-benchmarks")] - pub fn put_election_status(status: ElectionStatus::) { - >::put(status); - } + /// Migrate `last_reward` to `claimed_rewards` + pub fn migrate_last_reward_to_claimed_rewards() { + use frame_support::migration::{put_storage_value, StorageIterator}; + // Migrate from `last_reward` to `claimed_rewards`. + // We will construct a vector from `current_era - history_depth` to `last_reward` + // for each validator and nominator. + // + // Old Staking Ledger + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] + struct OldStakingLedger { + pub stash: AccountId, + #[codec(compact)] + pub total: Balance, + #[codec(compact)] + pub active: Balance, + pub unlocking: Vec>, + pub last_reward: Option, + } + // Current era and history depth + let current_era = Self::current_era().unwrap_or(0); + let history_depth = Self::history_depth(); + let last_payout_era = current_era.saturating_sub(history_depth); + // Convert all ledgers to the new format. + for (hash, old_ledger) in + StorageIterator::>>::new( + b"Staking", b"Ledger", + ) + .drain() + { + let last_reward = old_ledger.last_reward.unwrap_or(0); + let new_ledger = StakingLedger { + stash: old_ledger.stash, + total: old_ledger.total, + active: old_ledger.active, + unlocking: old_ledger.unlocking, + claimed_rewards: (last_payout_era..=last_reward).collect(), + }; + put_storage_value(b"Staking", b"Ledger", &hash, new_ledger); + } + MigrateEra::put(current_era); + } + + /// The total balance that can be slashed from a stash account as of right now. + pub fn slashable_balance_of(stash: &T::AccountId) -> BalanceOf { + Self::bonded(stash) + .and_then(Self::ledger) + .map(|l| l.active) + .unwrap_or_default() + } + + /// internal impl of [`slashable_balance_of`] that returns [`VoteWeight`]. + fn slashable_balance_of_vote_weight(stash: &T::AccountId) -> VoteWeight { + , VoteWeight>>::convert( + Self::slashable_balance_of(stash), + ) + } + + /// Dump the list of validators and nominators into vectors and keep them on-chain. + /// + /// This data is used to efficiently evaluate election results. returns `true` if the operation + /// is successful. + fn create_stakers_snapshot() -> bool { + let validators = >::iter().map(|(v, _)| v).collect::>(); + let mut nominators = >::iter().map(|(n, _)| n).collect::>(); + + let num_validators = validators.len(); + let num_nominators = nominators.len(); + if num_validators > MAX_VALIDATORS + || num_nominators.saturating_add(num_validators) > MAX_NOMINATORS + { + log!( + warn, + "💸 Snapshot size too big [{} <> {}][{} <> {}].", + num_validators, + MAX_VALIDATORS, + num_nominators, + MAX_NOMINATORS, + ); + false + } else { + // all validators nominate themselves; + nominators.extend(validators.clone()); + + >::put(validators); + >::put(nominators); + true + } + } + + /// Clears both snapshots of stakers. + fn kill_stakers_snapshot() { + >::kill(); + >::kill(); + } + + fn do_payout_nominator( + ctrl: T::AccountId, + era: EraIndex, + validators: Vec<(T::AccountId, u32)>, + ) -> DispatchResult { + // validators len must not exceed `MAX_NOMINATIONS` to avoid querying more validator + // exposure than necessary. + if validators.len() > MAX_NOMINATIONS { + return Err(Error::::InvalidNumberOfNominations.into()); + } + // If migrate_era is not populated, then you should use `payout_stakers` + let migrate_era = MigrateEra::get().ok_or(Error::::InvalidEraToReward)?; + // This payout mechanism will only work for eras before the migration. + // Subsequent payouts should use `payout_stakers`. + ensure!(era < migrate_era, Error::::InvalidEraToReward); + let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; + ensure!(era <= current_era, Error::::InvalidEraToReward); + let history_depth = Self::history_depth(); + ensure!( + era >= current_era.saturating_sub(history_depth), + Error::::InvalidEraToReward + ); + + // Note: if era has no reward to be claimed, era may be future. better not to update + // `nominator_ledger.last_reward` in this case. + let era_payout = + >::get(&era).ok_or_else(|| Error::::InvalidEraToReward)?; + + let mut nominator_ledger = + >::get(&ctrl).ok_or_else(|| Error::::NotController)?; + + ensure!( + Self::era_election_status().is_closed() + || Self::payee(&nominator_ledger.stash) != RewardDestination::Staked, + Error::::CallNotAllowed, + ); + + nominator_ledger + .claimed_rewards + .retain(|&x| x >= current_era.saturating_sub(history_depth)); + match nominator_ledger.claimed_rewards.binary_search(&era) { + Ok(_) => Err(Error::::AlreadyClaimed)?, + Err(pos) => nominator_ledger.claimed_rewards.insert(pos, era), + } + + >::insert(&ctrl, &nominator_ledger); + + let mut reward = Perbill::zero(); + let era_reward_points = >::get(&era); + + for (validator, nominator_index) in validators.into_iter() { + let commission = Self::eras_validator_prefs(&era, &validator).commission; + let validator_exposure = >::get(&era, &validator); + + if let Some(nominator_exposure) = + validator_exposure.others.get(nominator_index as usize) + { + if nominator_exposure.who != nominator_ledger.stash { + continue; + } + + let nominator_exposure_part = Perbill::from_rational_approximation( + nominator_exposure.value, + validator_exposure.total, + ); + let validator_point = era_reward_points + .individual + .get(&validator) + .map(|points| *points) + .unwrap_or_else(|| Zero::zero()); + let validator_point_part = + Perbill::from_rational_approximation(validator_point, era_reward_points.total); + reward = reward.saturating_add( + validator_point_part + .saturating_mul(Perbill::one().saturating_sub(commission)) + .saturating_mul(nominator_exposure_part), + ); + } + } + + if let Some(imbalance) = Self::make_payout(&nominator_ledger.stash, reward * era_payout) { + Self::deposit_event(RawEvent::Reward(ctrl, imbalance.peek())); + } + + Ok(()) + } + + fn do_payout_validator(ctrl: T::AccountId, era: EraIndex) -> DispatchResult { + // If migrate_era is not populated, then you should use `payout_stakers` + let migrate_era = MigrateEra::get().ok_or(Error::::InvalidEraToReward)?; + // This payout mechanism will only work for eras before the migration. + // Subsequent payouts should use `payout_stakers`. + ensure!(era < migrate_era, Error::::InvalidEraToReward); + let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; + ensure!(era <= current_era, Error::::InvalidEraToReward); + let history_depth = Self::history_depth(); + ensure!( + era >= current_era.saturating_sub(history_depth), + Error::::InvalidEraToReward + ); + + // Note: if era has no reward to be claimed, era may be future. better not to update + // `ledger.last_reward` in this case. + let era_payout = + >::get(&era).ok_or_else(|| Error::::InvalidEraToReward)?; + + let mut ledger = >::get(&ctrl).ok_or_else(|| Error::::NotController)?; + + ensure!( + Self::era_election_status().is_closed() + || Self::payee(&ledger.stash) != RewardDestination::Staked, + Error::::CallNotAllowed, + ); + + ledger + .claimed_rewards + .retain(|&x| x >= current_era.saturating_sub(history_depth)); + match ledger.claimed_rewards.binary_search(&era) { + Ok(_) => Err(Error::::AlreadyClaimed)?, + Err(pos) => ledger.claimed_rewards.insert(pos, era), + } + + >::insert(&ctrl, &ledger); + + let era_reward_points = >::get(&era); + let commission = Self::eras_validator_prefs(&era, &ledger.stash).commission; + let exposure = >::get(&era, &ledger.stash); + + let exposure_part = Perbill::from_rational_approximation(exposure.own, exposure.total); + let validator_point = era_reward_points + .individual + .get(&ledger.stash) + .map(|points| *points) + .unwrap_or_else(|| Zero::zero()); + let validator_point_part = + Perbill::from_rational_approximation(validator_point, era_reward_points.total); + let reward = validator_point_part.saturating_mul( + commission.saturating_add( + Perbill::one() + .saturating_sub(commission) + .saturating_mul(exposure_part), + ), + ); + + if let Some(imbalance) = Self::make_payout(&ledger.stash, reward * era_payout) { + Self::deposit_event(RawEvent::Reward(ctrl, imbalance.peek())); + } + + Ok(()) + } + + fn do_payout_stakers(validator_stash: T::AccountId, era: EraIndex) -> DispatchResult { + // Validate input data + let current_era = CurrentEra::get().ok_or(Error::::InvalidEraToReward)?; + ensure!(era <= current_era, Error::::InvalidEraToReward); + let history_depth = Self::history_depth(); + ensure!( + era >= current_era.saturating_sub(history_depth), + Error::::InvalidEraToReward + ); + + // If there was no migration, then this function is always valid. + if let Some(migrate_era) = MigrateEra::get() { + // This payout mechanism will only work for eras on and after the migration. + // Payouts before then should use `payout_nominator`/`payout_validator`. + ensure!(migrate_era <= era, Error::::InvalidEraToReward); + } + + // Note: if era has no reward to be claimed, era may be future. better not to update + // `ledger.claimed_rewards` in this case. + let era_payout = + >::get(&era).ok_or_else(|| Error::::InvalidEraToReward)?; + + let controller = Self::bonded(&validator_stash).ok_or(Error::::NotStash)?; + let mut ledger = >::get(&controller).ok_or_else(|| Error::::NotController)?; + + ledger + .claimed_rewards + .retain(|&x| x >= current_era.saturating_sub(history_depth)); + match ledger.claimed_rewards.binary_search(&era) { + Ok(_) => Err(Error::::AlreadyClaimed)?, + Err(pos) => ledger.claimed_rewards.insert(pos, era), + } + + let exposure = >::get(&era, &ledger.stash); + + /* Input data seems good, no errors allowed after this point */ + + >::insert(&controller, &ledger); + + // Get Era reward points. It has TOTAL and INDIVIDUAL + // Find the fraction of the era reward that belongs to the validator + // Take that fraction of the eras rewards to split to nominator and validator + // + // Then look at the validator, figure out the proportion of their reward + // which goes to them and each of their nominators. + + let era_reward_points = >::get(&era); + let total_reward_points = era_reward_points.total; + let validator_reward_points = era_reward_points + .individual + .get(&ledger.stash) + .map(|points| *points) + .unwrap_or_else(|| Zero::zero()); + + // Nothing to do if they have no reward points. + if validator_reward_points.is_zero() { + return Ok(()); + } + + // This is the fraction of the total reward that the validator and the + // nominators will get. + let validator_total_reward_part = + Perbill::from_rational_approximation(validator_reward_points, total_reward_points); + + // This is how much validator + nominators are entitled to. + let validator_total_payout = validator_total_reward_part * era_payout; + + let validator_prefs = Self::eras_validator_prefs(&era, &validator_stash); + // Validator first gets a cut off the top. + let validator_commission = validator_prefs.commission; + let validator_commission_payout = validator_commission * validator_total_payout; + + let validator_leftover_payout = validator_total_payout - validator_commission_payout; + // Now let's calculate how this is split to the validator. + let validator_exposure_part = + Perbill::from_rational_approximation(exposure.own, exposure.total); + let validator_staking_payout = validator_exposure_part * validator_leftover_payout; + + // We can now make total validator payout: + if let Some(imbalance) = Self::make_payout( + &ledger.stash, + validator_staking_payout + validator_commission_payout, + ) { + Self::deposit_event(RawEvent::Reward(ledger.stash, imbalance.peek())); + } + + // Lets now calculate how this is split to the nominators. + // Sort nominators by highest to lowest exposure, but only keep `max_nominator_payouts` of them. + for nominator in exposure.others.iter() { + let nominator_exposure_part = + Perbill::from_rational_approximation(nominator.value, exposure.total); + + let nominator_reward: BalanceOf = + nominator_exposure_part * validator_leftover_payout; + // We can now make nominator payout: + if let Some(imbalance) = Self::make_payout(&nominator.who, nominator_reward) { + Self::deposit_event(RawEvent::Reward(nominator.who.clone(), imbalance.peek())); + } + } + + Ok(()) + } + + /// Update the ledger for a controller. This will also update the stash lock. The lock will + /// will lock the entire funds except paying for further transactions. + fn update_ledger( + controller: &T::AccountId, + ledger: &StakingLedger>, + ) { + T::Currency::set_lock( + STAKING_ID, + &ledger.stash, + ledger.total, + WithdrawReasons::all(), + ); + >::insert(controller, ledger); + } + + /// Chill a stash account. + fn chill_stash(stash: &T::AccountId) { + >::remove(stash); + >::remove(stash); + } + + /// Actually make a payment to a staker. This uses the currency's reward function + /// to pay the right payee for the given staker account. + fn make_payout(stash: &T::AccountId, amount: BalanceOf) -> Option> { + let dest = Self::payee(stash); + match dest { + RewardDestination::Controller => Self::bonded(stash).and_then(|controller| { + T::Currency::deposit_into_existing(&controller, amount).ok() + }), + RewardDestination::Stash => T::Currency::deposit_into_existing(stash, amount).ok(), + RewardDestination::Staked => Self::bonded(stash) + .and_then(|c| Self::ledger(&c).map(|l| (c, l))) + .and_then(|(controller, mut l)| { + l.active += amount; + l.total += amount; + let r = T::Currency::deposit_into_existing(stash, amount).ok(); + Self::update_ledger(&controller, &l); + r + }), + } + } + + /// Plan a new session potentially trigger a new era. + fn new_session(session_index: SessionIndex) -> Option> { + if let Some(current_era) = Self::current_era() { + // Initial era has been set. + + let current_era_start_session_index = Self::eras_start_session_index(current_era) + .unwrap_or_else(|| { + frame_support::print("Error: start_session_index must be set for current_era"); + 0 + }); + + let era_length = session_index + .checked_sub(current_era_start_session_index) + .unwrap_or(0); // Must never happen. + + match ForceEra::get() { + Forcing::ForceNew => ForceEra::kill(), + Forcing::ForceAlways => (), + Forcing::NotForcing if era_length >= T::SessionsPerEra::get() => (), + _ => { + // not forcing, not a new era either. If final, set the flag. + if era_length + 1 >= T::SessionsPerEra::get() { + IsCurrentSessionFinal::put(true); + } + return None; + } + } + + // new era. + IsCurrentSessionFinal::put(false); + Self::new_era(session_index) + } else { + // Set initial era + Self::new_era(session_index) + } + } + + /// Basic and cheap checks that we perform in validate unsigned, and in the execution. + pub fn pre_dispatch_checks(score: PhragmenScore, era: EraIndex) -> Result<(), Error> { + // discard solutions that are not in-time + // check window open + ensure!( + Self::era_election_status().is_open(), + Error::::PhragmenEarlySubmission, + ); + + // check current era. + if let Some(current_era) = Self::current_era() { + ensure!(current_era == era, Error::::PhragmenEarlySubmission,) + } + + // assume the given score is valid. Is it better than what we have on-chain, if we have any? + if let Some(queued_score) = Self::queued_score() { + ensure!( + is_score_better(queued_score, score), + Error::::PhragmenWeakSubmission, + ) + } + + Ok(()) + } + + /// Checks a given solution and if correct and improved, writes it on chain as the queued result + /// of the next round. This may be called by both a signed and an unsigned transaction. + pub fn check_and_replace_solution( + winners: Vec, + compact_assignments: CompactAssignments, + compute: ElectionCompute, + claimed_score: PhragmenScore, + era: EraIndex, + ) -> Result<(), Error> { + // Do the basic checks. era, claimed score and window open. + Self::pre_dispatch_checks(claimed_score, era)?; + + // Check that the number of presented winners is sane. Most often we have more candidates + // that we need. Then it should be Self::validator_count(). Else it should be all the + // candidates. + let snapshot_length = + >::decode_len().map_err(|_| Error::::SnapshotUnavailable)?; + let desired_winners = Self::validator_count().min(snapshot_length as u32); + ensure!( + winners.len() as u32 == desired_winners, + Error::::PhragmenBogusWinnerCount + ); + + // decode snapshot validators. + let snapshot_validators = + Self::snapshot_validators().ok_or(Error::::SnapshotUnavailable)?; + + // check if all winners were legit; this is rather cheap. Replace with accountId. + let winners = winners + .into_iter() + .map(|widx| { + // NOTE: at the moment, since staking is explicitly blocking any offence until election + // is closed, we don't check here if the account id at `snapshot_validators[widx]` is + // actually a validator. If this ever changes, this loop needs to also check this. + snapshot_validators + .get(widx as usize) + .cloned() + .ok_or(Error::::PhragmenBogusWinner) + }) + .collect::, Error>>()?; + + // decode the rest of the snapshot. + let snapshot_nominators = + >::snapshot_nominators().ok_or(Error::::SnapshotUnavailable)?; + + // helpers + let nominator_at = |i: NominatorIndex| -> Option { + snapshot_nominators.get(i as usize).cloned() + }; + let validator_at = |i: ValidatorIndex| -> Option { + snapshot_validators.get(i as usize).cloned() + }; + + // un-compact. + let assignments = compact_assignments + .into_assignment(nominator_at, validator_at) + .map_err(|e| { + // log the error since it is not propagated into the runtime error. + log!(warn, "💸 un-compacting solution failed due to {:?}", e); + Error::::PhragmenBogusCompact + })?; + + // check all nominators actually including the claimed vote. Also check correct self votes. + // Note that we assume all validators and nominators in `assignments` are properly bonded, + // because they are coming from the snapshot via a given index. + for Assignment { who, distribution } in assignments.iter() { + let is_validator = >::contains_key(&who); + let maybe_nomination = Self::nominators(&who); + + if !(maybe_nomination.is_some() ^ is_validator) { + // all of the indices must map to either a validator or a nominator. If this is ever + // not the case, then the locking system of staking is most likely faulty, or we + // have bigger problems. + log!( + error, + "💸 detected an error in the staking locking and snapshot." + ); + // abort. + return Err(Error::::PhragmenBogusNominator); + } + + if !is_validator { + // a normal vote + let nomination = maybe_nomination.expect( + "exactly one of `maybe_validator` and `maybe_nomination.is_some` is true. \ + is_validator is false; maybe_nomination is some; qed", + ); + + // NOTE: we don't really have to check here if the sum of all edges are the + // nominator correct. Un-compacting assures this by definition. + + for (t, _) in distribution { + // each target in the provided distribution must be actually nominated by the + // nominator after the last non-zero slash. + if nomination.targets.iter().find(|&tt| tt == t).is_none() { + return Err(Error::::PhragmenBogusNomination); + } + + if ::SlashingSpans::get(&t).map_or(false, |spans| { + nomination.submitted_in < spans.last_nonzero_slash() + }) { + return Err(Error::::PhragmenSlashedNomination); + } + } + } else { + // a self vote + ensure!(distribution.len() == 1, Error::::PhragmenBogusSelfVote); + ensure!(distribution[0].0 == *who, Error::::PhragmenBogusSelfVote); + // defensive only. A compact assignment of length one does NOT encode the weight and + // it is always created to be 100%. + ensure!( + distribution[0].1 == OffchainAccuracy::one(), + Error::::PhragmenBogusSelfVote, + ); + } + } + + // convert into staked assignments. + let staked_assignments = sp_phragmen::assignment_ratio_to_staked( + assignments, + Self::slashable_balance_of_vote_weight, + ); + + // build the support map thereof in order to evaluate. + // OPTIMIZATION: loop to create the staked assignments but it would bloat the code. Okay for + // now as it does not add to the complexity order. + let (supports, num_error) = + build_support_map::(&winners, &staked_assignments); + // This technically checks that all targets in all nominators were among the winners. + ensure!(num_error == 0, Error::::PhragmenBogusEdge); + + // Check if the score is the same as the claimed one. + let submitted_score = evaluate_support(&supports); + ensure!( + submitted_score == claimed_score, + Error::::PhragmenBogusScore + ); + + // At last, alles Ok. Exposures and store the result. + let exposures = Self::collect_exposure(supports); + log!( + info, + "💸 A better solution (with compute {:?}) has been validated and stored on chain.", + compute, + ); + + // write new results. + >::put(ElectionResult { + elected_stashes: winners, + compute, + exposures, + }); + QueuedScore::put(submitted_score); + + Ok(()) + } + + /// Start a session potentially starting an era. + fn start_session(start_session: SessionIndex) { + let next_active_era = Self::active_era().map(|e| e.index + 1).unwrap_or(0); + if let Some(next_active_era_start_session_index) = + Self::eras_start_session_index(next_active_era) + { + if next_active_era_start_session_index == start_session { + Self::start_era(start_session); + } else if next_active_era_start_session_index < start_session { + // This arm should never happen, but better handle it than to stall the + // staking pallet. + frame_support::print("Warning: A session appears to have been skipped."); + Self::start_era(start_session); + } + } + } + + /// End a session potentially ending an era. + fn end_session(session_index: SessionIndex) { + if let Some(active_era) = Self::active_era() { + if let Some(next_active_era_start_session_index) = + Self::eras_start_session_index(active_era.index + 1) + { + if next_active_era_start_session_index == session_index + 1 { + Self::end_era(active_era, session_index); + } + } + } + } + + /// * Increment `active_era.index`, + /// * reset `active_era.start`, + /// * update `BondedEras` and apply slashes. + fn start_era(start_session: SessionIndex) { + let active_era = ActiveEra::mutate(|active_era| { + let new_index = active_era.as_ref().map(|info| info.index + 1).unwrap_or(0); + *active_era = Some(ActiveEraInfo { + index: new_index, + // Set new active era start in next `on_finalize`. To guarantee usage of `Time` + start: None, + }); + new_index + }); + + let bonding_duration = T::BondingDuration::get(); + + BondedEras::mutate(|bonded| { + bonded.push((active_era, start_session)); + + if active_era > bonding_duration { + let first_kept = active_era - bonding_duration; + + // prune out everything that's from before the first-kept index. + let n_to_prune = bonded + .iter() + .take_while(|&&(era_idx, _)| era_idx < first_kept) + .count(); + + // kill slashing metadata. + for (pruned_era, _) in bonded.drain(..n_to_prune) { + slashing::clear_era_metadata::(pruned_era); + } + + if let Some(&(_, first_session)) = bonded.first() { + T::SessionInterface::prune_historical_up_to(first_session); + } + } + }); + + Self::apply_unapplied_slashes(active_era); + } + + /// Compute payout for era. + fn end_era(active_era: ActiveEraInfo, _session_index: SessionIndex) { + // Note: active_era_start can be None if end era is called during genesis config. + if let Some(active_era_start) = active_era.start { + let now_as_millis_u64 = T::UnixTime::now().as_millis().saturated_into::(); + + let era_duration = now_as_millis_u64 - active_era_start; + let (total_payout, _max_payout) = inflation::compute_total_payout( + &T::RewardCurve::get(), + Self::eras_total_stake(&active_era.index), + T::Currency::total_issuance(), + // Duration of era; more than u64::MAX is rewarded as u64::MAX. + era_duration.saturated_into::(), + ); + + // Set ending era reward. + >::insert(&active_era.index, total_payout); + } + } + + /// Plan a new era. Return the potential new staking set. + fn new_era(start_session_index: SessionIndex) -> Option> { + // Increment or set current era. + let current_era = CurrentEra::mutate(|s| { + *s = Some(s.map(|s| s + 1).unwrap_or(0)); + s.unwrap() + }); + ErasStartSessionIndex::insert(¤t_era, &start_session_index); + + // Clean old era information. + if let Some(old_era) = current_era.checked_sub(Self::history_depth() + 1) { + Self::clear_era_information(old_era); + } + + // Set staking information for new era. + let maybe_new_validators = Self::select_and_update_validators(current_era); + + maybe_new_validators + } + + /// Select the new validator set at the end of the era. + /// + /// Runs [`try_do_phragmen`] and updates the following storage items: + /// - [`EraElectionStatus`]: with `None`. + /// - [`ErasStakers`]: with the new staker set. + /// - [`ErasStakersClipped`]. + /// - [`ErasValidatorPrefs`]. + /// - [`ErasTotalStake`]: with the new total stake. + /// - [`SnapshotValidators`] and [`SnapshotNominators`] are both removed. + /// + /// Internally, [`QueuedElected`], snapshots and [`QueuedScore`] are also consumed. + /// + /// If the election has been successful, It passes the new set upwards. + /// + /// This should only be called at the end of an era. + fn select_and_update_validators(current_era: EraIndex) -> Option> { + if let Some(ElectionResult::> { + elected_stashes, + exposures, + compute, + }) = Self::try_do_phragmen() + { + // We have chosen the new validator set. Submission is no longer allowed. + >::put(ElectionStatus::Closed); + + // kill the snapshots. + Self::kill_stakers_snapshot(); + + // Populate Stakers and write slot stake. + let mut total_stake: BalanceOf = Zero::zero(); + exposures.into_iter().for_each(|(stash, exposure)| { + total_stake = total_stake.saturating_add(exposure.total); + >::insert(current_era, &stash, &exposure); + + let mut exposure_clipped = exposure; + let clipped_max_len = T::MaxNominatorRewardedPerValidator::get() as usize; + if exposure_clipped.others.len() > clipped_max_len { + exposure_clipped + .others + .sort_unstable_by(|a, b| a.value.cmp(&b.value).reverse()); + exposure_clipped.others.truncate(clipped_max_len); + } + >::insert(¤t_era, &stash, exposure_clipped); + }); + + // Insert current era staking information + >::insert(¤t_era, total_stake); + + // collect the pref of all winners + for stash in &elected_stashes { + let pref = Self::validators(stash); + >::insert(¤t_era, stash, pref); + } + + // emit event + Self::deposit_event(RawEvent::StakingElection(compute)); + + log!( + info, + "💸 new validator set of size {:?} has been elected via {:?} for era {:?}", + elected_stashes.len(), + compute, + current_era, + ); + + Some(elected_stashes) + } else { + None + } + } + + /// Select a new validator set from the assembled stakers and their role preferences. It tries + /// first to peek into [`QueuedElected`]. Otherwise, it runs a new phragmen. + /// + /// If [`QueuedElected`] and [`QueuedScore`] exists, they are both removed. No further storage + /// is updated. + fn try_do_phragmen() -> Option>> { + // a phragmen result from either a stored submission or locally executed one. + let next_result = >::take().or_else(|| { + Self::do_phragmen_with_post_processing::(ElectionCompute::OnChain) + }); + + // either way, kill this. We remove it here to make sure it always has the exact same + // lifetime as `QueuedElected`. + QueuedScore::kill(); + + next_result + } + + /// Execute phragmen and return the new results. The edge weights are processed into support + /// values. + /// + /// This is basically a wrapper around [`do_phragmen`] which translates `PhragmenResult` into + /// `ElectionResult`. + /// + /// No storage item is updated. + fn do_phragmen_with_post_processing( + compute: ElectionCompute, + ) -> Option>> + where + Accuracy: sp_std::ops::Mul, + ExtendedBalance: From<::Inner>, + { + if let Some(phragmen_result) = Self::do_phragmen::() { + let elected_stashes = phragmen_result + .winners + .iter() + .map(|(s, _)| s.clone()) + .collect::>(); + let assignments = phragmen_result.assignments; + + let staked_assignments = sp_phragmen::assignment_ratio_to_staked( + assignments, + Self::slashable_balance_of_vote_weight, + ); + + let (supports, _) = + build_support_map::(&elected_stashes, &staked_assignments); + + // collect exposures + let exposures = Self::collect_exposure(supports); + + // In order to keep the property required by `on_session_ending` that we must return the + // new validator set even if it's the same as the old, as long as any underlying + // economic conditions have changed, we don't attempt to do any optimization where we + // compare against the prior set. + Some(ElectionResult::> { + elected_stashes, + exposures, + compute, + }) + } else { + // There were not enough candidates for even our minimal level of functionality. This is + // bad. We should probably disable all functionality except for block production and let + // the chain keep producing blocks until we can decide on a sufficiently substantial + // set. TODO: #2494 + None + } + } + + /// Execute phragmen and return the new results. No post-processing is applied and the raw edge + /// weights are returned. + /// + /// Self votes are added and nominations before the most recent slashing span are reaped. + /// + /// No storage item is updated. + fn do_phragmen() -> Option> { + let mut all_nominators: Vec<(T::AccountId, VoteWeight, Vec)> = Vec::new(); + let mut all_validators = Vec::new(); + for (validator, _) in >::iter() { + // append self vote + let self_vote = ( + validator.clone(), + Self::slashable_balance_of_vote_weight(&validator), + vec![validator.clone()], + ); + all_nominators.push(self_vote); + all_validators.push(validator); + } + + let nominator_votes = >::iter().map(|(nominator, nominations)| { + let Nominations { + submitted_in, + mut targets, + suppressed: _, + } = nominations; + + // Filter out nomination targets which were nominated before the most recent + // slashing span. + targets.retain(|stash| { + ::SlashingSpans::get(&stash) + .map_or(true, |spans| submitted_in >= spans.last_nonzero_slash()) + }); + + (nominator, targets) + }); + all_nominators.extend(nominator_votes.map(|(n, ns)| { + let s = Self::slashable_balance_of_vote_weight(&n); + (n, s, ns) + })); + + elect::<_, Accuracy>( + Self::validator_count() as usize, + Self::minimum_validator_count().max(1) as usize, + all_validators, + all_nominators, + ) + } + + /// Consume a set of [`Supports`] from [`sp_phragmen`] and collect them into a [`Exposure`] + fn collect_exposure( + supports: SupportMap, + ) -> Vec<(T::AccountId, Exposure>)> { + let to_balance = |e: ExtendedBalance| { + >>::convert(e) + }; + + supports + .into_iter() + .map(|(validator, support)| { + // build `struct exposure` from `support` + let mut others = Vec::new(); + let mut own: BalanceOf = Zero::zero(); + let mut total: BalanceOf = Zero::zero(); + support + .voters + .into_iter() + .map(|(nominator, weight)| (nominator, to_balance(weight))) + .for_each(|(nominator, stake)| { + if nominator == validator { + own = own.saturating_add(stake); + } else { + others.push(IndividualExposure { + who: nominator, + value: stake, + }); + } + total = total.saturating_add(stake); + }); + + let exposure = Exposure { own, others, total }; + + (validator, exposure) + }) + .collect::)>>() + } + + /// Remove all associated data of a stash account from the staking system. + /// + /// Assumes storage is upgraded before calling. + /// + /// This is called: + /// - after a `withdraw_unbond()` call that frees all of a stash's bonded balance. + /// - through `reap_stash()` if the balance has fallen to zero (through slashing). + fn kill_stash(stash: &T::AccountId) -> DispatchResult { + let controller = Bonded::::take(stash).ok_or(Error::::NotStash)?; + >::remove(&controller); + + >::remove(stash); + >::remove(stash); + >::remove(stash); + + slashing::clear_stash_metadata::(stash); + + system::Module::::dec_ref(stash); + + Ok(()) + } + + /// Clear all era information for given era. + fn clear_era_information(era_index: EraIndex) { + >::remove_prefix(era_index); + >::remove_prefix(era_index); + >::remove_prefix(era_index); + >::remove(era_index); + >::remove(era_index); + >::remove(era_index); + ErasStartSessionIndex::remove(era_index); + } + + /// Apply previously-unapplied slashes on the beginning of a new era, after a delay. + fn apply_unapplied_slashes(active_era: EraIndex) { + let slash_defer_duration = T::SlashDeferDuration::get(); + ::EarliestUnappliedSlash::mutate(|earliest| { + if let Some(ref mut earliest) = earliest { + let keep_from = active_era.saturating_sub(slash_defer_duration); + for era in (*earliest)..keep_from { + let era_slashes = ::UnappliedSlashes::take(&era); + for slash in era_slashes { + slashing::apply_slash::(slash); + } + } + + *earliest = (*earliest).max(keep_from) + } + }) + } + + /// Add reward points to validators using their stash account ID. + /// + /// Validators are keyed by stash account ID and must be in the current elected set. + /// + /// For each element in the iterator the given number of points in u32 is added to the + /// validator, thus duplicates are handled. + /// + /// At the end of the era each the total payout will be distributed among validator + /// relatively to their points. + /// + /// COMPLEXITY: Complexity is `number_of_validator_to_reward x current_elected_len`. + /// If you need to reward lots of validator consider using `reward_by_indices`. + pub fn reward_by_ids(validators_points: impl IntoIterator) { + if let Some(active_era) = Self::active_era() { + >::mutate(active_era.index, |era_rewards| { + for (validator, points) in validators_points.into_iter() { + *era_rewards.individual.entry(validator).or_default() += points; + era_rewards.total += points; + } + }); + } + } + + /// Ensures that at the end of the current session there will be a new era. + fn ensure_new_era() { + match ForceEra::get() { + Forcing::ForceAlways | Forcing::ForceNew => (), + _ => ForceEra::put(Forcing::ForceNew), + } + } + + fn will_era_be_forced() -> bool { + match ForceEra::get() { + Forcing::ForceAlways | Forcing::ForceNew => true, + Forcing::ForceNone | Forcing::NotForcing => false, + } + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn add_era_stakers( + current_era: EraIndex, + controller: T::AccountId, + exposure: Exposure>, + ) { + >::insert(¤t_era, &controller, &exposure); + } + + #[cfg(feature = "runtime-benchmarks")] + pub fn put_election_status(status: ElectionStatus) { + >::put(status); + } } /// In this implementation `new_session(session)` must be called before `end_session(session-1)` @@ -2920,38 +2974,43 @@ impl Module { /// Once the first new_session is planned, all session must start and then end in order, though /// some session can lag in between the newest session planned and the latest session started. impl pallet_session::SessionManager for Module { - fn new_session(new_index: SessionIndex) -> Option> { - Self::new_session(new_index) - } - fn start_session(start_index: SessionIndex) { - Self::start_session(start_index) - } - fn end_session(end_index: SessionIndex) { - Self::end_session(end_index) - } + fn new_session(new_index: SessionIndex) -> Option> { + Self::new_session(new_index) + } + fn start_session(start_index: SessionIndex) { + Self::start_session(start_index) + } + fn end_session(end_index: SessionIndex) { + Self::end_session(end_index) + } } -impl historical::SessionManager>> for Module { - fn new_session(new_index: SessionIndex) - -> Option>)>> - { - >::new_session(new_index).map(|validators| { - let current_era = Self::current_era() - // Must be some as a new era has been created. - .unwrap_or(0); - - validators.into_iter().map(|v| { - let exposure = Self::eras_stakers(current_era, &v); - (v, exposure) - }).collect() - }) - } - fn start_session(start_index: SessionIndex) { - >::start_session(start_index) - } - fn end_session(end_index: SessionIndex) { - >::end_session(end_index) - } +impl historical::SessionManager>> + for Module +{ + fn new_session( + new_index: SessionIndex, + ) -> Option>)>> { + >::new_session(new_index).map(|validators| { + let current_era = Self::current_era() + // Must be some as a new era has been created. + .unwrap_or(0); + + validators + .into_iter() + .map(|v| { + let exposure = Self::eras_stakers(current_era, &v); + (v, exposure) + }) + .collect() + }) + } + fn start_session(start_index: SessionIndex) { + >::start_session(start_index) + } + fn end_session(end_index: SessionIndex) { + >::end_session(end_index) + } } /// Add reward points to block authors: @@ -2959,18 +3018,18 @@ impl historical::SessionManager pallet_authorship::EventHandler for Module - where - T: Trait + pallet_authorship::Trait + pallet_session::Trait +where + T: Trait + pallet_authorship::Trait + pallet_session::Trait, { - fn note_author(author: T::AccountId) { - Self::reward_by_ids(vec![(author, 20)]) - } - fn note_uncle(author: T::AccountId, _age: T::BlockNumber) { - Self::reward_by_ids(vec![ - (>::author(), 2), - (author, 1) - ]) - } + fn note_author(author: T::AccountId) { + Self::reward_by_ids(vec![(author, 20)]) + } + fn note_uncle(author: T::AccountId, _age: T::BlockNumber) { + Self::reward_by_ids(vec![ + (>::author(), 2), + (author, 1), + ]) + } } /// A `Convert` implementation that finds the stash of the given controller account, @@ -2978,9 +3037,9 @@ impl pallet_authorship::EventHandler for Module pub struct StashOf(sp_std::marker::PhantomData); impl Convert> for StashOf { - fn convert(controller: T::AccountId) -> Option { - >::ledger(&controller).map(|l| l.stash) - } + fn convert(controller: T::AccountId) -> Option { + >::ledger(&controller).map(|l| l.stash) + } } /// A typed conversion from stash account ID to the active exposure of nominators @@ -2991,213 +3050,237 @@ impl Convert> for StashOf { pub struct ExposureOf(sp_std::marker::PhantomData); impl Convert>>> - for ExposureOf + for ExposureOf { - fn convert(validator: T::AccountId) -> Option>> { - if let Some(active_era) = >::active_era() { - Some(>::eras_stakers(active_era.index, &validator)) - } else { - None - } - } + fn convert(validator: T::AccountId) -> Option>> { + if let Some(active_era) = >::active_era() { + Some(>::eras_stakers(active_era.index, &validator)) + } else { + None + } + } } /// This is intended to be used with `FilterHistoricalOffences`. -impl OnOffenceHandler> for Module where - T: pallet_session::Trait::AccountId>, - T: pallet_session::historical::Trait< - FullIdentification = Exposure<::AccountId, BalanceOf>, - FullIdentificationOf = ExposureOf, - >, - T::SessionHandler: pallet_session::SessionHandler<::AccountId>, - T::SessionManager: pallet_session::SessionManager<::AccountId>, - T::ValidatorIdOf: Convert<::AccountId, Option<::AccountId>> +impl OnOffenceHandler> + for Module +where + T: pallet_session::Trait::AccountId>, + T: pallet_session::historical::Trait< + FullIdentification = Exposure<::AccountId, BalanceOf>, + FullIdentificationOf = ExposureOf, + >, + T::SessionHandler: pallet_session::SessionHandler<::AccountId>, + T::SessionManager: pallet_session::SessionManager<::AccountId>, + T::ValidatorIdOf: Convert< + ::AccountId, + Option<::AccountId>, + >, { - fn on_offence( - offenders: &[OffenceDetails>], - slash_fraction: &[Perbill], - slash_session: SessionIndex, - ) -> Result<(), ()> { - if !Self::can_report() { - return Err(()) - } - - let reward_proportion = SlashRewardFraction::get(); - - let active_era = { - let active_era = Self::active_era(); - if active_era.is_none() { - // this offence need not be re-submitted. - return Ok(()) - } - active_era.expect("value checked not to be `None`; qed").index - }; - let active_era_start_session_index = Self::eras_start_session_index(active_era) - .unwrap_or_else(|| { - frame_support::print("Error: start_session_index must be set for current_era"); - 0 - }); - - let window_start = active_era.saturating_sub(T::BondingDuration::get()); - - // fast path for active-era report - most likely. - // `slash_session` cannot be in a future active era. It must be in `active_era` or before. - let slash_era = if slash_session >= active_era_start_session_index { - active_era - } else { - let eras = BondedEras::get(); - - // reverse because it's more likely to find reports from recent eras. - match eras.iter().rev().filter(|&&(_, ref sesh)| sesh <= &slash_session).next() { - None => return Ok(()), // before bonding period. defensive - should be filtered out. - Some(&(ref slash_era, _)) => *slash_era, - } - }; - - ::EarliestUnappliedSlash::mutate(|earliest| { - if earliest.is_none() { - *earliest = Some(active_era) - } - }); - - let slash_defer_duration = T::SlashDeferDuration::get(); - - for (details, slash_fraction) in offenders.iter().zip(slash_fraction) { - let (stash, exposure) = &details.offender; - - // Skip if the validator is invulnerable. - if Self::invulnerables().contains(stash) { - continue - } - - let unapplied = slashing::compute_slash::(slashing::SlashParams { - stash, - slash: *slash_fraction, - exposure, - slash_era, - window_start, - now: active_era, - reward_proportion, - }); - - if let Some(mut unapplied) = unapplied { - unapplied.reporters = details.reporters.clone(); - if slash_defer_duration == 0 { - // apply right away. - slashing::apply_slash::(unapplied); - } else { - // defer to end of some `slash_defer_duration` from now. - ::UnappliedSlashes::mutate( - active_era, - move |for_later| for_later.push(unapplied), - ); - } - } - } - - Ok(()) - } - - fn can_report() -> bool { - Self::era_election_status().is_closed() - } + fn on_offence( + offenders: &[OffenceDetails< + T::AccountId, + pallet_session::historical::IdentificationTuple, + >], + slash_fraction: &[Perbill], + slash_session: SessionIndex, + ) -> Result<(), ()> { + if !Self::can_report() { + return Err(()); + } + + let reward_proportion = SlashRewardFraction::get(); + + let active_era = { + let active_era = Self::active_era(); + if active_era.is_none() { + // this offence need not be re-submitted. + return Ok(()); + } + active_era + .expect("value checked not to be `None`; qed") + .index + }; + let active_era_start_session_index = Self::eras_start_session_index(active_era) + .unwrap_or_else(|| { + frame_support::print("Error: start_session_index must be set for current_era"); + 0 + }); + + let window_start = active_era.saturating_sub(T::BondingDuration::get()); + + // fast path for active-era report - most likely. + // `slash_session` cannot be in a future active era. It must be in `active_era` or before. + let slash_era = if slash_session >= active_era_start_session_index { + active_era + } else { + let eras = BondedEras::get(); + + // reverse because it's more likely to find reports from recent eras. + match eras + .iter() + .rev() + .filter(|&&(_, ref sesh)| sesh <= &slash_session) + .next() + { + None => return Ok(()), // before bonding period. defensive - should be filtered out. + Some(&(ref slash_era, _)) => *slash_era, + } + }; + + ::EarliestUnappliedSlash::mutate(|earliest| { + if earliest.is_none() { + *earliest = Some(active_era) + } + }); + + let slash_defer_duration = T::SlashDeferDuration::get(); + + for (details, slash_fraction) in offenders.iter().zip(slash_fraction) { + let (stash, exposure) = &details.offender; + + // Skip if the validator is invulnerable. + if Self::invulnerables().contains(stash) { + continue; + } + + let unapplied = slashing::compute_slash::(slashing::SlashParams { + stash, + slash: *slash_fraction, + exposure, + slash_era, + window_start, + now: active_era, + reward_proportion, + }); + + if let Some(mut unapplied) = unapplied { + unapplied.reporters = details.reporters.clone(); + if slash_defer_duration == 0 { + // apply right away. + slashing::apply_slash::(unapplied); + } else { + // defer to end of some `slash_defer_duration` from now. + ::UnappliedSlashes::mutate(active_era, move |for_later| { + for_later.push(unapplied) + }); + } + } + } + + Ok(()) + } + + fn can_report() -> bool { + Self::era_election_status().is_closed() + } } /// Filter historical offences out and only allow those from the bonding period. pub struct FilterHistoricalOffences { - _inner: sp_std::marker::PhantomData<(T, R)>, + _inner: sp_std::marker::PhantomData<(T, R)>, } impl ReportOffence - for FilterHistoricalOffences, R> where - T: Trait, - R: ReportOffence, - O: Offence, + for FilterHistoricalOffences, R> +where + T: Trait, + R: ReportOffence, + O: Offence, { - fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { - // disallow any slashing from before the current bonding period. - let offence_session = offence.session_index(); - let bonded_eras = BondedEras::get(); - - if bonded_eras.first().filter(|(_, start)| offence_session >= *start).is_some() { - R::report_offence(reporters, offence) - } else { - >::deposit_event( - RawEvent::OldSlashingReportDiscarded(offence_session) - ); - Ok(()) - } - } + fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError> { + // disallow any slashing from before the current bonding period. + let offence_session = offence.session_index(); + let bonded_eras = BondedEras::get(); + + if bonded_eras + .first() + .filter(|(_, start)| offence_session >= *start) + .is_some() + { + R::report_offence(reporters, offence) + } else { + >::deposit_event(RawEvent::OldSlashingReportDiscarded(offence_session)); + Ok(()) + } + } } impl From> for InvalidTransaction { - fn from(e: Error) -> Self { - InvalidTransaction::Custom(e.as_u8()) - } + fn from(e: Error) -> Self { + InvalidTransaction::Custom(e.as_u8()) + } } #[allow(deprecated)] impl frame_support::unsigned::ValidateUnsigned for Module { - type Call = Call; - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { - if let Call::submit_election_solution_unsigned( - _, - _, - score, - era, - ) = call { - use offchain_election::DEFAULT_LONGEVITY; - - // discard solution not coming from the local OCW. - match source { - TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } - _ => { - log!(debug, "rejecting unsigned transaction because it is not local/in-block."); - return InvalidTransaction::Call.into(); - } - } - - if let Err(e) = Self::pre_dispatch_checks(*score, *era) { - log!(debug, "validate unsigned pre dispatch checks failed due to {:?}.", e); - return InvalidTransaction::from(e).into(); - } - - log!(debug, "validateUnsigned succeeded for a solution at era {}.", era); - - ValidTransaction::with_tag_prefix("StakingOffchain") - // The higher the score[0], the better a solution is. - .priority(T::UnsignedPriority::get().saturating_add(score[0].saturated_into())) - // Defensive only. A single solution can exist in the pool per era. Each validator - // will run OCW at most once per era, hence there should never exist more than one - // transaction anyhow. - .and_provides(era) - // Note: this can be more accurate in the future. We do something like - // `era_end_block - current_block` but that is not needed now as we eagerly run - // offchain workers now and the above should be same as `T::ElectionLookahead` - // without the need to query more storage in the validation phase. If we randomize - // offchain worker, then we might re-consider this. - .longevity(TryInto::::try_into( - T::ElectionLookahead::get()).unwrap_or(DEFAULT_LONGEVITY) - ) - // We don't propagate this. This can never the validated at a remote node. - .propagate(false) - .build() - } else { - InvalidTransaction::Call.into() - } - } - - fn pre_dispatch(_: &Self::Call) -> Result<(), TransactionValidityError> { - // IMPORTANT NOTE: By default, a sane `pre-dispatch` should always do the same checks as - // `validate_unsigned` and overriding this should be done with care. this module has only - // one unsigned entry point, in which we call into `>::pre_dispatch_checks()` - // which is all the important checks that we do in `validate_unsigned`. Hence, we can safely - // override this to save some time. - Ok(()) - } + type Call = Call; + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity { + if let Call::submit_election_solution_unsigned(_, _, score, era) = call { + use offchain_election::DEFAULT_LONGEVITY; + + // discard solution not coming from the local OCW. + match source { + TransactionSource::Local | TransactionSource::InBlock => { /* allowed */ } + _ => { + log!( + debug, + "rejecting unsigned transaction because it is not local/in-block." + ); + return InvalidTransaction::Call.into(); + } + } + + if let Err(e) = Self::pre_dispatch_checks(*score, *era) { + log!( + debug, + "validate unsigned pre dispatch checks failed due to {:?}.", + e + ); + return InvalidTransaction::from(e).into(); + } + + log!( + debug, + "validateUnsigned succeeded for a solution at era {}.", + era + ); + + ValidTransaction::with_tag_prefix("StakingOffchain") + // The higher the score[0], the better a solution is. + .priority(T::UnsignedPriority::get().saturating_add(score[0].saturated_into())) + // Defensive only. A single solution can exist in the pool per era. Each validator + // will run OCW at most once per era, hence there should never exist more than one + // transaction anyhow. + .and_provides(era) + // Note: this can be more accurate in the future. We do something like + // `era_end_block - current_block` but that is not needed now as we eagerly run + // offchain workers now and the above should be same as `T::ElectionLookahead` + // without the need to query more storage in the validation phase. If we randomize + // offchain worker, then we might re-consider this. + .longevity( + TryInto::::try_into(T::ElectionLookahead::get()) + .unwrap_or(DEFAULT_LONGEVITY), + ) + // We don't propagate this. This can never the validated at a remote node. + .propagate(false) + .build() + } else { + InvalidTransaction::Call.into() + } + } + + fn pre_dispatch(_: &Self::Call) -> Result<(), TransactionValidityError> { + // IMPORTANT NOTE: By default, a sane `pre-dispatch` should always do the same checks as + // `validate_unsigned` and overriding this should be done with care. this module has only + // one unsigned entry point, in which we call into `>::pre_dispatch_checks()` + // which is all the important checks that we do in `validate_unsigned`. Hence, we can safely + // override this to save some time. + Ok(()) + } } /// Check that list is sorted and has no duplicates. fn is_sorted_and_unique(list: &[u32]) -> bool { - list.windows(2).all(|w| w[0] < w[1]) + list.windows(2).all(|w| w[0] < w[1]) } diff --git a/frame/staking/src/mock.rs b/frame/staking/src/mock.rs index d522a19615..040087d22b 100644 --- a/frame/staking/src/mock.rs +++ b/frame/staking/src/mock.rs @@ -16,26 +16,32 @@ //! Test utilities -use std::{collections::{HashSet, HashMap}, cell::RefCell}; -use sp_runtime::Perbill; -use sp_runtime::curve::PiecewiseLinear; -use sp_runtime::traits::{IdentityLookup, Convert, SaturatedConversion, Zero}; -use sp_runtime::testing::{Header, UintAuthorityId, TestXt}; -use sp_staking::{SessionIndex, offence::{OffenceDetails, OnOffenceHandler}}; -use sp_core::H256; +use crate::*; use frame_support::{ - assert_ok, impl_outer_origin, parameter_types, impl_outer_dispatch, impl_outer_event, - StorageValue, StorageMap, StorageDoubleMap, IterableStorageMap, - traits::{Currency, Get, FindAuthor, OnFinalize, OnInitialize}, - weights::Weight, + assert_ok, impl_outer_dispatch, impl_outer_event, impl_outer_origin, parameter_types, + traits::{Currency, FindAuthor, Get, OnFinalize, OnInitialize}, + weights::Weight, + IterableStorageMap, StorageDoubleMap, StorageMap, StorageValue, }; use frame_system::offchain::TransactionSubmitter; +use sp_core::H256; use sp_io; use sp_phragmen::{ - build_support_map, evaluate_support, reduce, ExtendedBalance, StakedAssignment, PhragmenScore, - VoteWeight, + build_support_map, evaluate_support, reduce, ExtendedBalance, PhragmenScore, StakedAssignment, + VoteWeight, +}; +use sp_runtime::curve::PiecewiseLinear; +use sp_runtime::testing::{Header, TestXt, UintAuthorityId}; +use sp_runtime::traits::{Convert, IdentityLookup, SaturatedConversion, Zero}; +use sp_runtime::Perbill; +use sp_staking::{ + offence::{OffenceDetails, OnOffenceHandler}, + SessionIndex, +}; +use std::{ + cell::RefCell, + collections::{HashMap, HashSet}, }; -use crate::*; const INIT_TIMESTAMP: u64 = 30_000; @@ -48,137 +54,141 @@ pub(crate) type Balance = u128; /// Simple structure that exposes how u64 currency can be represented as... u64. pub struct CurrencyToVoteHandler; impl Convert for CurrencyToVoteHandler { - fn convert(x: Balance) -> u64 { - x.saturated_into() - } + fn convert(x: Balance) -> u64 { + x.saturated_into() + } } impl Convert for CurrencyToVoteHandler { - fn convert(x: u128) -> Balance { - x - } + fn convert(x: u128) -> Balance { + x + } } thread_local! { - static SESSION: RefCell<(Vec, HashSet)> = RefCell::new(Default::default()); - static SESSION_PER_ERA: RefCell = RefCell::new(3); - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - static SLASH_DEFER_DURATION: RefCell = RefCell::new(0); - static ELECTION_LOOKAHEAD: RefCell = RefCell::new(0); - static PERIOD: RefCell = RefCell::new(1); + static SESSION: RefCell<(Vec, HashSet)> = RefCell::new(Default::default()); + static SESSION_PER_ERA: RefCell = RefCell::new(3); + static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); + static SLASH_DEFER_DURATION: RefCell = RefCell::new(0); + static ELECTION_LOOKAHEAD: RefCell = RefCell::new(0); + static PERIOD: RefCell = RefCell::new(1); } /// Another session handler struct to test on_disabled. pub struct OtherSessionHandler; impl pallet_session::OneSessionHandler for OtherSessionHandler { - type Key = UintAuthorityId; - - fn on_genesis_session<'a, I: 'a>(_: I) - where I: Iterator, AccountId: 'a {} - - fn on_new_session<'a, I: 'a>(_: bool, validators: I, _: I,) - where I: Iterator, AccountId: 'a - { - SESSION.with(|x| { - *x.borrow_mut() = ( - validators.map(|x| x.0.clone()).collect(), - HashSet::new(), - ) - }); - } - - fn on_disabled(validator_index: usize) { - SESSION.with(|d| { - let mut d = d.borrow_mut(); - let value = d.0[validator_index]; - d.1.insert(value); - }) - } + type Key = UintAuthorityId; + + fn on_genesis_session<'a, I: 'a>(_: I) + where + I: Iterator, + AccountId: 'a, + { + } + + fn on_new_session<'a, I: 'a>(_: bool, validators: I, _: I) + where + I: Iterator, + AccountId: 'a, + { + SESSION.with(|x| { + *x.borrow_mut() = (validators.map(|x| x.0.clone()).collect(), HashSet::new()) + }); + } + + fn on_disabled(validator_index: usize) { + SESSION.with(|d| { + let mut d = d.borrow_mut(); + let value = d.0[validator_index]; + d.1.insert(value); + }) + } } impl sp_runtime::BoundToRuntimeAppPublic for OtherSessionHandler { - type Public = UintAuthorityId; + type Public = UintAuthorityId; } pub fn is_disabled(controller: AccountId) -> bool { - let stash = Staking::ledger(&controller).unwrap().stash; - SESSION.with(|d| d.borrow().1.contains(&stash)) + let stash = Staking::ledger(&controller).unwrap().stash; + SESSION.with(|d| d.borrow().1.contains(&stash)) } pub struct ExistentialDeposit; impl Get for ExistentialDeposit { - fn get() -> Balance { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) - } + fn get() -> Balance { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) + } } pub struct SessionsPerEra; impl Get for SessionsPerEra { - fn get() -> SessionIndex { - SESSION_PER_ERA.with(|v| *v.borrow()) - } + fn get() -> SessionIndex { + SESSION_PER_ERA.with(|v| *v.borrow()) + } } impl Get for SessionsPerEra { - fn get() -> BlockNumber { - SESSION_PER_ERA.with(|v| *v.borrow() as BlockNumber) - } + fn get() -> BlockNumber { + SESSION_PER_ERA.with(|v| *v.borrow() as BlockNumber) + } } pub struct ElectionLookahead; impl Get for ElectionLookahead { - fn get() -> BlockNumber { - ELECTION_LOOKAHEAD.with(|v| *v.borrow()) - } + fn get() -> BlockNumber { + ELECTION_LOOKAHEAD.with(|v| *v.borrow()) + } } pub struct Period; impl Get for Period { - fn get() -> BlockNumber { - PERIOD.with(|v| *v.borrow()) - } + fn get() -> BlockNumber { + PERIOD.with(|v| *v.borrow()) + } } pub struct SlashDeferDuration; impl Get for SlashDeferDuration { - fn get() -> EraIndex { - SLASH_DEFER_DURATION.with(|v| *v.borrow()) - } + fn get() -> EraIndex { + SLASH_DEFER_DURATION.with(|v| *v.borrow()) + } } impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - staking::Staking, - } + pub enum Call for Test where origin: Origin { + staking::Staking, + } } mod staking { - // Re-export needed for `impl_outer_event!`. - pub use super::super::*; + // Re-export needed for `impl_outer_event!`. + pub use super::super::*; } use frame_system as system; use pallet_balances as balances; use pallet_session as session; impl_outer_event! { - pub enum MetaEvent for Test { - system, - balances, - session, - staking, - } + pub enum MetaEvent for Test { + system, + balances, + session, + staking, + } } /// Author of block is always 11 pub struct Author11; impl FindAuthor for Author11 { - fn find_author<'a, I>(_digests: I) -> Option - where I: 'a + IntoIterator, - { - Some(11) - } + fn find_author<'a, I>(_digests: I) -> Option + where + I: 'a + IntoIterator, + { + Some(11) + } } // Workaround for https://github.com/rust-lang/rust/issues/26925 . Remove when sorted. @@ -186,320 +196,339 @@ impl FindAuthor for Author11 { pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = AccountIndex; - type BlockNumber = BlockNumber; - type Call = Call; - type Hash = H256; - type Hashing = ::sp_runtime::traits::BlakeTwo256; - type AccountId = AccountId; - type Lookup = IdentityLookup; - type Header = Header; - type Event = MetaEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = AccountIndex; + type BlockNumber = BlockNumber; + type Call = Call; + type Hash = H256; + type Hashing = ::sp_runtime::traits::BlakeTwo256; + type AccountId = AccountId; + type Lookup = IdentityLookup; + type Header = Header; + type Event = MetaEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } impl pallet_balances::Trait for Test { - type Balance = Balance; - type Event = MetaEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = Balance; + type Event = MetaEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const Offset: BlockNumber = 0; - pub const UncleGenerations: u64 = 0; - pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(25); + pub const Offset: BlockNumber = 0; + pub const UncleGenerations: u64 = 0; + pub const DisabledValidatorsThreshold: Perbill = Perbill::from_percent(25); } sp_runtime::impl_opaque_keys! { - pub struct SessionKeys { - pub other: OtherSessionHandler, - } + pub struct SessionKeys { + pub other: OtherSessionHandler, + } } impl pallet_session::Trait for Test { - type SessionManager = pallet_session::historical::NoteHistoricalRoot; - type Keys = SessionKeys; - type ShouldEndSession = pallet_session::PeriodicSessions; - type SessionHandler = (OtherSessionHandler,); - type Event = MetaEvent; - type ValidatorId = AccountId; - type ValidatorIdOf = crate::StashOf; - type DisabledValidatorsThreshold = DisabledValidatorsThreshold; - type NextSessionRotation = pallet_session::PeriodicSessions; + type SessionManager = pallet_session::historical::NoteHistoricalRoot; + type Keys = SessionKeys; + type ShouldEndSession = pallet_session::PeriodicSessions; + type SessionHandler = (OtherSessionHandler,); + type Event = MetaEvent; + type ValidatorId = AccountId; + type ValidatorIdOf = crate::StashOf; + type DisabledValidatorsThreshold = DisabledValidatorsThreshold; + type NextSessionRotation = pallet_session::PeriodicSessions; } impl pallet_session::historical::Trait for Test { - type FullIdentification = crate::Exposure; - type FullIdentificationOf = crate::ExposureOf; + type FullIdentification = crate::Exposure; + type FullIdentificationOf = crate::ExposureOf; } impl pallet_authorship::Trait for Test { - type FindAuthor = Author11; - type UncleGenerations = UncleGenerations; - type FilterUncle = (); - type EventHandler = Module; + type FindAuthor = Author11; + type UncleGenerations = UncleGenerations; + type FilterUncle = (); + type EventHandler = Module; } parameter_types! { - pub const MinimumPeriod: u64 = 5; + pub const MinimumPeriod: u64 = 5; } impl pallet_timestamp::Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; } pallet_staking_reward_curve::build! { - const I_NPOS: PiecewiseLinear<'static> = curve!( - min_inflation: 0_025_000, - max_inflation: 0_100_000, - ideal_stake: 0_500_000, - falloff: 0_050_000, - max_piece_count: 40, - test_precision: 0_005_000, - ); + const I_NPOS: PiecewiseLinear<'static> = curve!( + min_inflation: 0_025_000, + max_inflation: 0_100_000, + ideal_stake: 0_500_000, + falloff: 0_050_000, + max_piece_count: 40, + test_precision: 0_005_000, + ); } parameter_types! { - pub const BondingDuration: EraIndex = 3; - pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; - pub const MaxNominatorRewardedPerValidator: u32 = 64; - pub const UnsignedPriority: u64 = 1 << 20; + pub const BondingDuration: EraIndex = 3; + pub const RewardCurve: &'static PiecewiseLinear<'static> = &I_NPOS; + pub const MaxNominatorRewardedPerValidator: u32 = 64; + pub const UnsignedPriority: u64 = 1 << 20; } impl Trait for Test { - type Currency = Balances; - type UnixTime = Timestamp; - type CurrencyToVote = CurrencyToVoteHandler; - type RewardRemainder = (); - type Event = MetaEvent; - type Slash = (); - type Reward = (); - type SessionsPerEra = SessionsPerEra; - type SlashDeferDuration = SlashDeferDuration; - type SlashCancelOrigin = frame_system::EnsureRoot; - type BondingDuration = BondingDuration; - type SessionInterface = Self; - type RewardCurve = RewardCurve; - type NextNewSession = Session; - type ElectionLookahead = ElectionLookahead; - type Call = Call; - type SubmitTransaction = SubmitTransaction; - type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; - type UnsignedPriority = UnsignedPriority; + type Currency = Balances; + type UnixTime = Timestamp; + type CurrencyToVote = CurrencyToVoteHandler; + type RewardRemainder = (); + type Event = MetaEvent; + type Slash = (); + type Reward = (); + type SessionsPerEra = SessionsPerEra; + type SlashDeferDuration = SlashDeferDuration; + type SlashCancelOrigin = frame_system::EnsureRoot; + type BondingDuration = BondingDuration; + type SessionInterface = Self; + type RewardCurve = RewardCurve; + type NextNewSession = Session; + type ElectionLookahead = ElectionLookahead; + type Call = Call; + type SubmitTransaction = SubmitTransaction; + type MaxNominatorRewardedPerValidator = MaxNominatorRewardedPerValidator; + type UnsignedPriority = UnsignedPriority; } pub type Extrinsic = TestXt; type SubmitTransaction = TransactionSubmitter<(), Test, Extrinsic>; pub struct ExtBuilder { - session_length: BlockNumber, - election_lookahead: BlockNumber, - session_per_era: SessionIndex, - existential_deposit: Balance, - validator_pool: bool, - nominate: bool, - validator_count: u32, - minimum_validator_count: u32, - slash_defer_duration: EraIndex, - fair: bool, - num_validators: Option, - invulnerables: Vec, - has_stakers: bool, + session_length: BlockNumber, + election_lookahead: BlockNumber, + session_per_era: SessionIndex, + existential_deposit: Balance, + validator_pool: bool, + nominate: bool, + validator_count: u32, + minimum_validator_count: u32, + slash_defer_duration: EraIndex, + fair: bool, + num_validators: Option, + invulnerables: Vec, + has_stakers: bool, } impl Default for ExtBuilder { - fn default() -> Self { - Self { - session_length: 1, - election_lookahead: 0, - session_per_era: 3, - existential_deposit: 1, - validator_pool: false, - nominate: true, - validator_count: 2, - minimum_validator_count: 0, - slash_defer_duration: 0, - fair: true, - num_validators: None, - invulnerables: vec![], - has_stakers: true, - } - } + fn default() -> Self { + Self { + session_length: 1, + election_lookahead: 0, + session_per_era: 3, + existential_deposit: 1, + validator_pool: false, + nominate: true, + validator_count: 2, + minimum_validator_count: 0, + slash_defer_duration: 0, + fair: true, + num_validators: None, + invulnerables: vec![], + has_stakers: true, + } + } } impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: Balance) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn validator_pool(mut self, validator_pool: bool) -> Self { - self.validator_pool = validator_pool; - self - } - pub fn nominate(mut self, nominate: bool) -> Self { - self.nominate = nominate; - self - } - pub fn validator_count(mut self, count: u32) -> Self { - self.validator_count = count; - self - } - pub fn minimum_validator_count(mut self, count: u32) -> Self { - self.minimum_validator_count = count; - self - } - pub fn slash_defer_duration(mut self, eras: EraIndex) -> Self { - self.slash_defer_duration = eras; - self - } - pub fn fair(mut self, is_fair: bool) -> Self { - self.fair = is_fair; - self - } - pub fn num_validators(mut self, num_validators: u32) -> Self { - self.num_validators = Some(num_validators); - self - } - pub fn invulnerables(mut self, invulnerables: Vec) -> Self { - self.invulnerables = invulnerables; - self - } - pub fn session_per_era(mut self, length: SessionIndex) -> Self { - self.session_per_era = length; - self - } - pub fn election_lookahead(mut self, look: BlockNumber) -> Self { - self.election_lookahead = look; - self - } - pub fn session_length(mut self, length: BlockNumber) -> Self { - self.session_length = length; - self - } - pub fn has_stakers(mut self, has: bool) -> Self { - self.has_stakers = has; - self - } - pub fn offchain_phragmen_ext(self) -> Self { - self.session_per_era(4) - .session_length(5) - .election_lookahead(3) - } - pub fn set_associated_constants(&self) { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - SLASH_DEFER_DURATION.with(|v| *v.borrow_mut() = self.slash_defer_duration); - SESSION_PER_ERA.with(|v| *v.borrow_mut() = self.session_per_era); - ELECTION_LOOKAHEAD.with(|v| *v.borrow_mut() = self.election_lookahead); - PERIOD.with(|v| *v.borrow_mut() = self.session_length); - } - pub fn build(self) -> sp_io::TestExternalities { - let _ = env_logger::try_init(); - self.set_associated_constants(); - let mut storage = frame_system::GenesisConfig::default() - .build_storage::() - .unwrap(); - let balance_factor = if self.existential_deposit > 1 { - 256 - } else { - 1 - }; - - let num_validators = self.num_validators.unwrap_or(self.validator_count); - let validators = (0..num_validators) - .map(|x| ((x + 1) * 10 + 1) as AccountId) - .collect::>(); - - let _ = pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10 * balance_factor), - (2, 20 * balance_factor), - (3, 300 * balance_factor), - (4, 400 * balance_factor), - (10, balance_factor), - (11, balance_factor * 1000), - (20, balance_factor), - (21, balance_factor * 2000), - (30, balance_factor), - (31, balance_factor * 2000), - (40, balance_factor), - (41, balance_factor * 2000), - (100, 2000 * balance_factor), - (101, 2000 * balance_factor), - // This allow us to have a total_payout different from 0. - (999, 1_000_000_000_000), - ], - }.assimilate_storage(&mut storage); - - let mut stakers = vec![]; - if self.has_stakers { - let stake_21 = if self.fair { 1000 } else { 2000 }; - let stake_31 = if self.validator_pool { balance_factor * 1000 } else { 1 }; - let status_41 = if self.validator_pool { - StakerStatus::::Validator - } else { - StakerStatus::::Idle - }; - let nominated = if self.nominate { vec![11, 21] } else { vec![] }; - stakers = vec![ - // (stash, controller, staked_amount, status) - (11, 10, balance_factor * 1000, StakerStatus::::Validator), - (21, 20, stake_21, StakerStatus::::Validator), - (31, 30, stake_31, StakerStatus::::Validator), - (41, 40, balance_factor * 1000, status_41), - // nominator - (101, 100, balance_factor * 500, StakerStatus::::Nominator(nominated)) - ]; - } - let _ = GenesisConfig::{ - stakers: stakers, - validator_count: self.validator_count, - minimum_validator_count: self.minimum_validator_count, - invulnerables: self.invulnerables, - slash_reward_fraction: Perbill::from_percent(10), - ..Default::default() - } - .assimilate_storage(&mut storage); - - let _ = pallet_session::GenesisConfig:: { - keys: validators.iter().map(|x| ( - *x, - *x, - SessionKeys { other: UintAuthorityId(*x as u64) } - )).collect(), - }.assimilate_storage(&mut storage); - - let mut ext = sp_io::TestExternalities::from(storage); - ext.execute_with(|| { - let validators = Session::validators(); - SESSION.with(|x| *x.borrow_mut() = (validators.clone(), HashSet::new())); - }); - - // We consider all test to start after timestamp is initialized - // This must be ensured by having `timestamp::on_initialize` called before - // `staking::on_initialize` - ext.execute_with(|| { - System::set_block_number(1); - Timestamp::set_timestamp(INIT_TIMESTAMP); - }); - - ext - } - pub fn build_and_execute(self, test: impl FnOnce() -> ()) { - let mut ext = self.build(); - ext.execute_with(test); - ext.execute_with(post_conditions); - } + pub fn existential_deposit(mut self, existential_deposit: Balance) -> Self { + self.existential_deposit = existential_deposit; + self + } + pub fn validator_pool(mut self, validator_pool: bool) -> Self { + self.validator_pool = validator_pool; + self + } + pub fn nominate(mut self, nominate: bool) -> Self { + self.nominate = nominate; + self + } + pub fn validator_count(mut self, count: u32) -> Self { + self.validator_count = count; + self + } + pub fn minimum_validator_count(mut self, count: u32) -> Self { + self.minimum_validator_count = count; + self + } + pub fn slash_defer_duration(mut self, eras: EraIndex) -> Self { + self.slash_defer_duration = eras; + self + } + pub fn fair(mut self, is_fair: bool) -> Self { + self.fair = is_fair; + self + } + pub fn num_validators(mut self, num_validators: u32) -> Self { + self.num_validators = Some(num_validators); + self + } + pub fn invulnerables(mut self, invulnerables: Vec) -> Self { + self.invulnerables = invulnerables; + self + } + pub fn session_per_era(mut self, length: SessionIndex) -> Self { + self.session_per_era = length; + self + } + pub fn election_lookahead(mut self, look: BlockNumber) -> Self { + self.election_lookahead = look; + self + } + pub fn session_length(mut self, length: BlockNumber) -> Self { + self.session_length = length; + self + } + pub fn has_stakers(mut self, has: bool) -> Self { + self.has_stakers = has; + self + } + pub fn offchain_phragmen_ext(self) -> Self { + self.session_per_era(4) + .session_length(5) + .election_lookahead(3) + } + pub fn set_associated_constants(&self) { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + SLASH_DEFER_DURATION.with(|v| *v.borrow_mut() = self.slash_defer_duration); + SESSION_PER_ERA.with(|v| *v.borrow_mut() = self.session_per_era); + ELECTION_LOOKAHEAD.with(|v| *v.borrow_mut() = self.election_lookahead); + PERIOD.with(|v| *v.borrow_mut() = self.session_length); + } + pub fn build(self) -> sp_io::TestExternalities { + let _ = env_logger::try_init(); + self.set_associated_constants(); + let mut storage = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + let balance_factor = if self.existential_deposit > 1 { 256 } else { 1 }; + + let num_validators = self.num_validators.unwrap_or(self.validator_count); + let validators = (0..num_validators) + .map(|x| ((x + 1) * 10 + 1) as AccountId) + .collect::>(); + + let _ = pallet_balances::GenesisConfig:: { + balances: vec![ + (1, 10 * balance_factor), + (2, 20 * balance_factor), + (3, 300 * balance_factor), + (4, 400 * balance_factor), + (10, balance_factor), + (11, balance_factor * 1000), + (20, balance_factor), + (21, balance_factor * 2000), + (30, balance_factor), + (31, balance_factor * 2000), + (40, balance_factor), + (41, balance_factor * 2000), + (100, 2000 * balance_factor), + (101, 2000 * balance_factor), + // This allow us to have a total_payout different from 0. + (999, 1_000_000_000_000), + ], + } + .assimilate_storage(&mut storage); + + let mut stakers = vec![]; + if self.has_stakers { + let stake_21 = if self.fair { 1000 } else { 2000 }; + let stake_31 = if self.validator_pool { + balance_factor * 1000 + } else { + 1 + }; + let status_41 = if self.validator_pool { + StakerStatus::::Validator + } else { + StakerStatus::::Idle + }; + let nominated = if self.nominate { vec![11, 21] } else { vec![] }; + stakers = vec![ + // (stash, controller, staked_amount, status) + ( + 11, + 10, + balance_factor * 1000, + StakerStatus::::Validator, + ), + (21, 20, stake_21, StakerStatus::::Validator), + (31, 30, stake_31, StakerStatus::::Validator), + (41, 40, balance_factor * 1000, status_41), + // nominator + ( + 101, + 100, + balance_factor * 500, + StakerStatus::::Nominator(nominated), + ), + ]; + } + let _ = GenesisConfig:: { + stakers: stakers, + validator_count: self.validator_count, + minimum_validator_count: self.minimum_validator_count, + invulnerables: self.invulnerables, + slash_reward_fraction: Perbill::from_percent(10), + ..Default::default() + } + .assimilate_storage(&mut storage); + + let _ = pallet_session::GenesisConfig:: { + keys: validators + .iter() + .map(|x| { + ( + *x, + *x, + SessionKeys { + other: UintAuthorityId(*x as u64), + }, + ) + }) + .collect(), + } + .assimilate_storage(&mut storage); + + let mut ext = sp_io::TestExternalities::from(storage); + ext.execute_with(|| { + let validators = Session::validators(); + SESSION.with(|x| *x.borrow_mut() = (validators.clone(), HashSet::new())); + }); + + // We consider all test to start after timestamp is initialized + // This must be ensured by having `timestamp::on_initialize` called before + // `staking::on_initialize` + ext.execute_with(|| { + System::set_block_number(1); + Timestamp::set_timestamp(INIT_TIMESTAMP); + }); + + ext + } + pub fn build_and_execute(self, test: impl FnOnce() -> ()) { + let mut ext = self.build(); + ext.execute_with(test); + ext.execute_with(post_conditions); + } } pub type System = frame_system::Module; @@ -509,470 +538,513 @@ pub type Timestamp = pallet_timestamp::Module; pub type Staking = Module; pub(crate) fn current_era() -> EraIndex { - Staking::current_era().unwrap() + Staking::current_era().unwrap() } fn post_conditions() { - check_nominators(); - check_exposures(); - check_ledgers(); + check_nominators(); + check_exposures(); + check_ledgers(); } pub(crate) fn active_era() -> EraIndex { - Staking::active_era().unwrap().index + Staking::active_era().unwrap().index } fn check_ledgers() { - // check the ledger of all stakers. - Bonded::::iter().for_each(|(_, ctrl)| assert_ledger_consistent(ctrl)) + // check the ledger of all stakers. + Bonded::::iter().for_each(|(_, ctrl)| assert_ledger_consistent(ctrl)) } fn check_exposures() { - // a check per validator to ensure the exposure struct is always sane. - let era = active_era(); - ErasStakers::::iter_prefix_values(era).for_each(|expo| { - assert_eq!( - expo.total as u128, - expo.own as u128 + expo.others.iter().map(|e| e.value as u128).sum::(), - "wrong total exposure.", - ); - }) + // a check per validator to ensure the exposure struct is always sane. + let era = active_era(); + ErasStakers::::iter_prefix_values(era).for_each(|expo| { + assert_eq!( + expo.total as u128, + expo.own as u128 + expo.others.iter().map(|e| e.value as u128).sum::(), + "wrong total exposure.", + ); + }) } fn check_nominators() { - // a check per nominator to ensure their entire stake is correctly distributed. Will only kick- - // in if the nomination was submitted before the current era. - let era = active_era(); - >::iter() - .filter_map(|(nominator, nomination)| - if nomination.submitted_in > era { - Some(nominator) - } else { - None - }) - .for_each(|nominator| { - // must be bonded. - assert_is_stash(nominator); - let mut sum = 0; - Session::validators() - .iter() - .map(|v| Staking::eras_stakers(era, v)) - .for_each(|e| { - let individual = e.others.iter().filter(|e| e.who == nominator).collect::>(); - let len = individual.len(); - match len { - 0 => { /* not supporting this validator at all. */ }, - 1 => sum += individual[0].value, - _ => panic!("nominator cannot back a validator more than once."), - }; - }); - - let nominator_stake = Staking::slashable_balance_of(&nominator); - // a nominator cannot over-spend. - assert!( - nominator_stake >= sum, - "failed: Nominator({}) stake({}) >= sum divided({})", - nominator, - nominator_stake, - sum, - ); - - let diff = nominator_stake - sum; - assert!(diff < 100); - }); + // a check per nominator to ensure their entire stake is correctly distributed. Will only kick- + // in if the nomination was submitted before the current era. + let era = active_era(); + >::iter() + .filter_map(|(nominator, nomination)| { + if nomination.submitted_in > era { + Some(nominator) + } else { + None + } + }) + .for_each(|nominator| { + // must be bonded. + assert_is_stash(nominator); + let mut sum = 0; + Session::validators() + .iter() + .map(|v| Staking::eras_stakers(era, v)) + .for_each(|e| { + let individual = e + .others + .iter() + .filter(|e| e.who == nominator) + .collect::>(); + let len = individual.len(); + match len { + 0 => { /* not supporting this validator at all. */ } + 1 => sum += individual[0].value, + _ => panic!("nominator cannot back a validator more than once."), + }; + }); + + let nominator_stake = Staking::slashable_balance_of(&nominator); + // a nominator cannot over-spend. + assert!( + nominator_stake >= sum, + "failed: Nominator({}) stake({}) >= sum divided({})", + nominator, + nominator_stake, + sum, + ); + + let diff = nominator_stake - sum; + assert!(diff < 100); + }); } fn assert_is_stash(acc: AccountId) { - assert!(Staking::bonded(&acc).is_some(), "Not a stash."); + assert!(Staking::bonded(&acc).is_some(), "Not a stash."); } fn assert_ledger_consistent(ctrl: AccountId) { - // ensures ledger.total == ledger.active + sum(ledger.unlocking). - let ledger = Staking::ledger(ctrl).expect("Not a controller."); - let real_total: Balance = ledger.unlocking.iter().fold(ledger.active, |a, c| a + c.value); - assert_eq!(real_total, ledger.total); + // ensures ledger.total == ledger.active + sum(ledger.unlocking). + let ledger = Staking::ledger(ctrl).expect("Not a controller."); + let real_total: Balance = ledger + .unlocking + .iter() + .fold(ledger.active, |a, c| a + c.value); + assert_eq!(real_total, ledger.total); } pub(crate) fn bond_validator(stash: AccountId, ctrl: AccountId, val: Balance) { - let _ = Balances::make_free_balance_be(&stash, val); - let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond( - Origin::signed(stash), - ctrl, - val, - RewardDestination::Controller, - )); - assert_ok!(Staking::validate( - Origin::signed(ctrl), - ValidatorPrefs::default() - )); + let _ = Balances::make_free_balance_be(&stash, val); + let _ = Balances::make_free_balance_be(&ctrl, val); + assert_ok!(Staking::bond( + Origin::signed(stash), + ctrl, + val, + RewardDestination::Controller, + )); + assert_ok!(Staking::validate( + Origin::signed(ctrl), + ValidatorPrefs::default() + )); } pub(crate) fn bond_nominator( - stash: AccountId, - ctrl: AccountId, - val: Balance, - target: Vec, + stash: AccountId, + ctrl: AccountId, + val: Balance, + target: Vec, ) { - let _ = Balances::make_free_balance_be(&stash, val); - let _ = Balances::make_free_balance_be(&ctrl, val); - assert_ok!(Staking::bond( - Origin::signed(stash), - ctrl, - val, - RewardDestination::Controller, - )); - assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); + let _ = Balances::make_free_balance_be(&stash, val); + let _ = Balances::make_free_balance_be(&ctrl, val); + assert_ok!(Staking::bond( + Origin::signed(stash), + ctrl, + val, + RewardDestination::Controller, + )); + assert_ok!(Staking::nominate(Origin::signed(ctrl), target)); } pub(crate) fn run_to_block(n: BlockNumber) { - Staking::on_finalize(System::block_number()); - for b in System::block_number() + 1..=n { - System::set_block_number(b); - Session::on_initialize(b); - Staking::on_initialize(b); - if b != n { - Staking::on_finalize(System::block_number()); - } - } + Staking::on_finalize(System::block_number()); + for b in System::block_number() + 1..=n { + System::set_block_number(b); + Session::on_initialize(b); + Staking::on_initialize(b); + if b != n { + Staking::on_finalize(System::block_number()); + } + } } pub(crate) fn advance_session() { - let current_index = Session::current_index(); - start_session(current_index + 1); + let current_index = Session::current_index(); + start_session(current_index + 1); } pub(crate) fn start_session(session_index: SessionIndex) { - assert_eq!(>::get(), 1, "start_session can only be used with session length 1."); - for i in Session::current_index()..session_index { - Staking::on_finalize(System::block_number()); - System::set_block_number((i + 1).into()); - Timestamp::set_timestamp(System::block_number() * 1000 + INIT_TIMESTAMP); - Session::on_initialize(System::block_number()); - Staking::on_initialize(System::block_number()); - } - - assert_eq!(Session::current_index(), session_index); + assert_eq!( + >::get(), + 1, + "start_session can only be used with session length 1." + ); + for i in Session::current_index()..session_index { + Staking::on_finalize(System::block_number()); + System::set_block_number((i + 1).into()); + Timestamp::set_timestamp(System::block_number() * 1000 + INIT_TIMESTAMP); + Session::on_initialize(System::block_number()); + Staking::on_initialize(System::block_number()); + } + + assert_eq!(Session::current_index(), session_index); } // This start and activate the era given. // Because the mock use pallet-session which delays session by one, this will be one session after // the election happened, not the first session after the election has happened. pub(crate) fn start_era(era_index: EraIndex) { - start_session((era_index * >::get()).into()); - assert_eq!(Staking::current_era().unwrap(), era_index); - assert_eq!(Staking::active_era().unwrap().index, era_index); + start_session((era_index * >::get()).into()); + assert_eq!(Staking::current_era().unwrap(), era_index); + assert_eq!(Staking::active_era().unwrap().index, era_index); } pub(crate) fn current_total_payout_for_duration(duration: u64) -> Balance { - inflation::compute_total_payout( - ::RewardCurve::get(), - Staking::eras_total_stake(Staking::active_era().unwrap().index), - Balances::total_issuance(), - duration, - ).0 + inflation::compute_total_payout( + ::RewardCurve::get(), + Staking::eras_total_stake(Staking::active_era().unwrap().index), + Balances::total_issuance(), + duration, + ) + .0 } pub(crate) fn reward_all_elected() { - let rewards = ::SessionInterface::validators() - .into_iter() - .map(|v| (v, 1)); + let rewards = ::SessionInterface::validators() + .into_iter() + .map(|v| (v, 1)); - >::reward_by_ids(rewards) + >::reward_by_ids(rewards) } pub(crate) fn validator_controllers() -> Vec { - Session::validators() - .into_iter() - .map(|s| Staking::bonded(&s).expect("no controller for validator")) - .collect() + Session::validators() + .into_iter() + .map(|s| Staking::bonded(&s).expect("no controller for validator")) + .collect() } pub(crate) fn on_offence_in_era( - offenders: &[OffenceDetails< - AccountId, - pallet_session::historical::IdentificationTuple, - >], - slash_fraction: &[Perbill], - era: EraIndex, + offenders: &[OffenceDetails< + AccountId, + pallet_session::historical::IdentificationTuple, + >], + slash_fraction: &[Perbill], + era: EraIndex, ) { - let bonded_eras = crate::BondedEras::get(); - for &(bonded_era, start_session) in bonded_eras.iter() { - if bonded_era == era { - let _ = Staking::on_offence(offenders, slash_fraction, start_session).unwrap(); - return; - } else if bonded_era > era { - break; - } - } - - if Staking::active_era().unwrap().index == era { - let _ = - Staking::on_offence( - offenders, - slash_fraction, - Staking::eras_start_session_index(era).unwrap() - ).unwrap(); - } else { - panic!("cannot slash in era {}", era); - } + let bonded_eras = crate::BondedEras::get(); + for &(bonded_era, start_session) in bonded_eras.iter() { + if bonded_era == era { + let _ = Staking::on_offence(offenders, slash_fraction, start_session).unwrap(); + return; + } else if bonded_era > era { + break; + } + } + + if Staking::active_era().unwrap().index == era { + let _ = Staking::on_offence( + offenders, + slash_fraction, + Staking::eras_start_session_index(era).unwrap(), + ) + .unwrap(); + } else { + panic!("cannot slash in era {}", era); + } } pub(crate) fn on_offence_now( - offenders: &[OffenceDetails>], - slash_fraction: &[Perbill], + offenders: &[OffenceDetails< + AccountId, + pallet_session::historical::IdentificationTuple, + >], + slash_fraction: &[Perbill], ) { - let now = Staking::active_era().unwrap().index; - on_offence_in_era(offenders, slash_fraction, now) + let now = Staking::active_era().unwrap().index; + on_offence_in_era(offenders, slash_fraction, now) } // winners will be chosen by simply their unweighted total backing stake. Nominator stake is // distributed evenly. pub(crate) fn horrible_phragmen_with_post_processing( - do_reduce: bool, + do_reduce: bool, ) -> (CompactAssignments, Vec, PhragmenScore) { - let mut backing_stake_of: BTreeMap = BTreeMap::new(); - - // self stake - >::iter().for_each(|(who, _p)| { - *backing_stake_of.entry(who).or_insert(Zero::zero()) += Staking::slashable_balance_of(&who) - }); - - // add nominator stuff - >::iter().for_each(|(who, nomination)| { - nomination.targets.iter().for_each(|v| { - *backing_stake_of.entry(*v).or_insert(Zero::zero()) += - Staking::slashable_balance_of(&who) - }) - }); - - // elect winners - let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); - sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); - let winners: Vec = sorted - .iter() - .cloned() - .take(Staking::validator_count() as usize) - .collect(); - - // create assignments - let mut staked_assignment: Vec> = Vec::new(); - >::iter().for_each(|(who, nomination)| { - let mut dist: Vec<(AccountId, ExtendedBalance)> = Vec::new(); - nomination.targets.iter().for_each(|v| { - if winners.iter().find(|w| *w == v).is_some() { - dist.push((*v, ExtendedBalance::zero())); - } - }); - - if dist.len() == 0 { - return; - } - - // assign real stakes. just split the stake. - let stake = Staking::slashable_balance_of(&who) as ExtendedBalance; - let mut sum: ExtendedBalance = Zero::zero(); - let dist_len = dist.len(); - { - dist.iter_mut().for_each(|(_, w)| { - let partial = stake / (dist_len as ExtendedBalance); - *w = partial; - sum += partial; - }); - } - - // assign the leftover to last. - { - let leftover = stake - sum; - let last = dist.last_mut().unwrap(); - last.1 += leftover; - } - - staked_assignment.push(StakedAssignment { - who, - distribution: dist, - }); - }); - - // Ensure that this result is worse than seq-phragmen. Otherwise, it should not have been used - // for testing. - let score = { - let (_, _, better_score) = prepare_submission_with(true, |_| {}); - - let support = build_support_map::(&winners, &staked_assignment).0; - let score = evaluate_support(&support); - - assert!(sp_phragmen::is_score_better(score, better_score)); - - score - }; - - if do_reduce { - reduce(&mut staked_assignment); - } - - let snapshot_validators = Staking::snapshot_validators().unwrap(); - let snapshot_nominators = Staking::snapshot_nominators().unwrap(); - let nominator_index = |a: &AccountId| -> Option { - snapshot_nominators.iter().position(|x| x == a).map(|i| i as NominatorIndex) - }; - let validator_index = |a: &AccountId| -> Option { - snapshot_validators.iter().position(|x| x == a).map(|i| i as ValidatorIndex) - }; - - // convert back to ratio assignment. This takes less space. - let assignments_reduced = - sp_phragmen::assignment_staked_to_ratio::(staked_assignment); - - let compact = - CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) - .unwrap(); - - // winner ids to index - let winners = winners.into_iter().map(|w| validator_index(&w).unwrap()).collect::>(); - - (compact, winners, score) + let mut backing_stake_of: BTreeMap = BTreeMap::new(); + + // self stake + >::iter().for_each(|(who, _p)| { + *backing_stake_of.entry(who).or_insert(Zero::zero()) += Staking::slashable_balance_of(&who) + }); + + // add nominator stuff + >::iter().for_each(|(who, nomination)| { + nomination.targets.iter().for_each(|v| { + *backing_stake_of.entry(*v).or_insert(Zero::zero()) += + Staking::slashable_balance_of(&who) + }) + }); + + // elect winners + let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); + sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); + let winners: Vec = sorted + .iter() + .cloned() + .take(Staking::validator_count() as usize) + .collect(); + + // create assignments + let mut staked_assignment: Vec> = Vec::new(); + >::iter().for_each(|(who, nomination)| { + let mut dist: Vec<(AccountId, ExtendedBalance)> = Vec::new(); + nomination.targets.iter().for_each(|v| { + if winners.iter().find(|w| *w == v).is_some() { + dist.push((*v, ExtendedBalance::zero())); + } + }); + + if dist.len() == 0 { + return; + } + + // assign real stakes. just split the stake. + let stake = Staking::slashable_balance_of(&who) as ExtendedBalance; + let mut sum: ExtendedBalance = Zero::zero(); + let dist_len = dist.len(); + { + dist.iter_mut().for_each(|(_, w)| { + let partial = stake / (dist_len as ExtendedBalance); + *w = partial; + sum += partial; + }); + } + + // assign the leftover to last. + { + let leftover = stake - sum; + let last = dist.last_mut().unwrap(); + last.1 += leftover; + } + + staked_assignment.push(StakedAssignment { + who, + distribution: dist, + }); + }); + + // Ensure that this result is worse than seq-phragmen. Otherwise, it should not have been used + // for testing. + let score = { + let (_, _, better_score) = prepare_submission_with(true, |_| {}); + + let support = build_support_map::(&winners, &staked_assignment).0; + let score = evaluate_support(&support); + + assert!(sp_phragmen::is_score_better(score, better_score)); + + score + }; + + if do_reduce { + reduce(&mut staked_assignment); + } + + let snapshot_validators = Staking::snapshot_validators().unwrap(); + let snapshot_nominators = Staking::snapshot_nominators().unwrap(); + let nominator_index = |a: &AccountId| -> Option { + snapshot_nominators + .iter() + .position(|x| x == a) + .map(|i| i as NominatorIndex) + }; + let validator_index = |a: &AccountId| -> Option { + snapshot_validators + .iter() + .position(|x| x == a) + .map(|i| i as ValidatorIndex) + }; + + // convert back to ratio assignment. This takes less space. + let assignments_reduced = + sp_phragmen::assignment_staked_to_ratio::(staked_assignment); + + let compact = + CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) + .unwrap(); + + // winner ids to index + let winners = winners + .into_iter() + .map(|w| validator_index(&w).unwrap()) + .collect::>(); + + (compact, winners, score) } // Note: this should always logically reproduce [`offchain_election::prepare_submission`], yet we // cannot do it since we want to have `tweak` injected into the process. pub(crate) fn prepare_submission_with( - do_reduce: bool, - tweak: impl FnOnce(&mut Vec>), + do_reduce: bool, + tweak: impl FnOnce(&mut Vec>), ) -> (CompactAssignments, Vec, PhragmenScore) { - // run phragmen on the default stuff. - let sp_phragmen::PhragmenResult { - winners, - assignments, - } = Staking::do_phragmen::().unwrap(); - let winners = winners.into_iter().map(|(w, _)| w).collect::>(); - - let stake_of = |who: &AccountId| -> VoteWeight { - >::convert( - Staking::slashable_balance_of(&who) - ) - }; - let mut staked = sp_phragmen::assignment_ratio_to_staked(assignments, stake_of); - - // apply custom tweaks. awesome for testing. - tweak(&mut staked); - - if do_reduce { - reduce(&mut staked); - } - - // convert back to ratio assignment. This takes less space. - let snapshot_validators = Staking::snapshot_validators().expect("snapshot not created."); - let snapshot_nominators = Staking::snapshot_nominators().expect("snapshot not created."); - let nominator_index = |a: &AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .map_or_else( - || { println!("unable to find nominator index for {:?}", a); None }, - |i| Some(i as NominatorIndex), - ) - }; - let validator_index = |a: &AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .map_or_else( - || { println!("unable to find validator index for {:?}", a); None }, - |i| Some(i as ValidatorIndex), - ) - }; - - let assignments_reduced = sp_phragmen::assignment_staked_to_ratio(staked); - - // re-compute score by converting, yet again, into staked type - let score = { - let staked = sp_phragmen::assignment_ratio_to_staked( - assignments_reduced.clone(), - Staking::slashable_balance_of_vote_weight, - ); - - let (support_map, _) = build_support_map::( - winners.as_slice(), - staked.as_slice(), - ); - evaluate_support::(&support_map) - }; - - let compact = - CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) - .map_err(|e| { println!("error in compact: {:?}", e); e }) - .expect("Failed to create compact"); - - - // winner ids to index - let winners = winners.into_iter().map(|w| validator_index(&w).unwrap()).collect::>(); - - (compact, winners, score) + // run phragmen on the default stuff. + let sp_phragmen::PhragmenResult { + winners, + assignments, + } = Staking::do_phragmen::().unwrap(); + let winners = winners + .into_iter() + .map(|(w, _)| w) + .collect::>(); + + let stake_of = |who: &AccountId| -> VoteWeight { + >::convert( + Staking::slashable_balance_of(&who), + ) + }; + let mut staked = sp_phragmen::assignment_ratio_to_staked(assignments, stake_of); + + // apply custom tweaks. awesome for testing. + tweak(&mut staked); + + if do_reduce { + reduce(&mut staked); + } + + // convert back to ratio assignment. This takes less space. + let snapshot_validators = Staking::snapshot_validators().expect("snapshot not created."); + let snapshot_nominators = Staking::snapshot_nominators().expect("snapshot not created."); + let nominator_index = |a: &AccountId| -> Option { + snapshot_nominators.iter().position(|x| x == a).map_or_else( + || { + println!("unable to find nominator index for {:?}", a); + None + }, + |i| Some(i as NominatorIndex), + ) + }; + let validator_index = |a: &AccountId| -> Option { + snapshot_validators.iter().position(|x| x == a).map_or_else( + || { + println!("unable to find validator index for {:?}", a); + None + }, + |i| Some(i as ValidatorIndex), + ) + }; + + let assignments_reduced = sp_phragmen::assignment_staked_to_ratio(staked); + + // re-compute score by converting, yet again, into staked type + let score = { + let staked = sp_phragmen::assignment_ratio_to_staked( + assignments_reduced.clone(), + Staking::slashable_balance_of_vote_weight, + ); + + let (support_map, _) = + build_support_map::(winners.as_slice(), staked.as_slice()); + evaluate_support::(&support_map) + }; + + let compact = + CompactAssignments::from_assignment(assignments_reduced, nominator_index, validator_index) + .map_err(|e| { + println!("error in compact: {:?}", e); + e + }) + .expect("Failed to create compact"); + + // winner ids to index + let winners = winners + .into_iter() + .map(|w| validator_index(&w).unwrap()) + .collect::>(); + + (compact, winners, score) } /// Make all validator and nominator request their payment pub(crate) fn make_all_reward_payment_before_migration(era: EraIndex) { - let validators_with_reward = ErasRewardPoints::::get(era).individual.keys() - .cloned() - .collect::>(); - - // reward nominators - let mut nominator_controllers = HashMap::new(); - for validator in Staking::eras_reward_points(era).individual.keys() { - let validator_exposure = Staking::eras_stakers_clipped(era, validator); - for (nom_index, nom) in validator_exposure.others.iter().enumerate() { - if let Some(nom_ctrl) = Staking::bonded(nom.who) { - nominator_controllers.entry(nom_ctrl) - .or_insert(vec![]) - .push((validator.clone(), nom_index as u32)); - } - } - } - for (nominator_controller, validators_with_nom_index) in nominator_controllers { - assert_ok!(Staking::payout_nominator( - Origin::signed(nominator_controller), - era, - validators_with_nom_index, - )); - } - - // reward validators - for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { - assert_ok!(Staking::payout_validator(Origin::signed(validator_controller), era)); - } + let validators_with_reward = ErasRewardPoints::::get(era) + .individual + .keys() + .cloned() + .collect::>(); + + // reward nominators + let mut nominator_controllers = HashMap::new(); + for validator in Staking::eras_reward_points(era).individual.keys() { + let validator_exposure = Staking::eras_stakers_clipped(era, validator); + for (nom_index, nom) in validator_exposure.others.iter().enumerate() { + if let Some(nom_ctrl) = Staking::bonded(nom.who) { + nominator_controllers + .entry(nom_ctrl) + .or_insert(vec![]) + .push((validator.clone(), nom_index as u32)); + } + } + } + for (nominator_controller, validators_with_nom_index) in nominator_controllers { + assert_ok!(Staking::payout_nominator( + Origin::signed(nominator_controller), + era, + validators_with_nom_index, + )); + } + + // reward validators + for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { + assert_ok!(Staking::payout_validator( + Origin::signed(validator_controller), + era + )); + } } /// Make all validator and nominator request their payment pub(crate) fn make_all_reward_payment(era: EraIndex) { - let validators_with_reward = ErasRewardPoints::::get(era).individual.keys() - .cloned() - .collect::>(); - - // reward validators - for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { - let ledger = >::get(&validator_controller).unwrap(); - - assert_ok!(Staking::payout_stakers(Origin::signed(1337), ledger.stash, era)); - } + let validators_with_reward = ErasRewardPoints::::get(era) + .individual + .keys() + .cloned() + .collect::>(); + + // reward validators + for validator_controller in validators_with_reward.iter().filter_map(Staking::bonded) { + let ledger = >::get(&validator_controller).unwrap(); + + assert_ok!(Staking::payout_stakers( + Origin::signed(1337), + ledger.stash, + era + )); + } } #[macro_export] macro_rules! assert_session_era { - ($session:expr, $era:expr) => { - assert_eq!( - Session::current_index(), - $session, - "wrong session {} != {}", - Session::current_index(), - $session, - ); - assert_eq!( - Staking::active_era().unwrap().index, - $era, - "wrong active era {} != {}", - Staking::active_era().unwrap().index, - $era, - ); - }; + ($session:expr, $era:expr) => { + assert_eq!( + Session::current_index(), + $session, + "wrong session {} != {}", + Session::current_index(), + $session, + ); + assert_eq!( + Staking::active_era().unwrap().index, + $era, + "wrong active era {} != {}", + Staking::active_era().unwrap().index, + $era, + ); + }; } diff --git a/frame/staking/src/offchain_election.rs b/frame/staking/src/offchain_election.rs index 4d8ccc6f25..530fe2f7f3 100644 --- a/frame/staking/src/offchain_election.rs +++ b/frame/staking/src/offchain_election.rs @@ -17,12 +17,12 @@ //! Helpers for offchain worker election. use crate::{ - Call, CompactAssignments, Module, NominatorIndex, OffchainAccuracy, Trait, ValidatorIndex, + Call, CompactAssignments, Module, NominatorIndex, OffchainAccuracy, Trait, ValidatorIndex, }; use frame_system::offchain::SubmitUnsignedTransaction; use sp_phragmen::{ - build_support_map, evaluate_support, reduce, Assignment, ExtendedBalance, PhragmenResult, - PhragmenScore, + build_support_map, evaluate_support, reduce, Assignment, ExtendedBalance, PhragmenResult, + PhragmenScore, }; use sp_runtime::offchain::storage::StorageValueRef; use sp_runtime::PerThing; @@ -32,23 +32,23 @@ use sp_std::{convert::TryInto, prelude::*}; /// Error types related to the offchain election machinery. #[derive(RuntimeDebug)] pub enum OffchainElectionError { - /// Phragmen election returned None. This means less candidate that minimum number of needed - /// validators were present. The chain is in trouble and not much that we can do about it. - ElectionFailed, - /// Submission to the transaction pool failed. - PoolSubmissionFailed, - /// The snapshot data is not available. - SnapshotUnavailable, - /// Error from phragmen crate. This usually relates to compact operation. - PhragmenError(sp_phragmen::Error), - /// One of the computed winners is invalid. - InvalidWinner, + /// Phragmen election returned None. This means less candidate that minimum number of needed + /// validators were present. The chain is in trouble and not much that we can do about it. + ElectionFailed, + /// Submission to the transaction pool failed. + PoolSubmissionFailed, + /// The snapshot data is not available. + SnapshotUnavailable, + /// Error from phragmen crate. This usually relates to compact operation. + PhragmenError(sp_phragmen::Error), + /// One of the computed winners is invalid. + InvalidWinner, } impl From for OffchainElectionError { - fn from(e: sp_phragmen::Error) -> Self { - Self::PhragmenError(e) - } + fn from(e: sp_phragmen::Error) -> Self { + Self::PhragmenError(e) + } } /// Storage key used to store the persistent offchain worker status. @@ -66,154 +66,152 @@ pub(crate) const DEFAULT_LONGEVITY: u64 = 25; /// /// Returns `Ok(())` if offchain worker should happen, `Err(reason)` otherwise. pub(crate) fn set_check_offchain_execution_status( - now: T::BlockNumber, + now: T::BlockNumber, ) -> Result<(), &'static str> { - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - let threshold = T::BlockNumber::from(OFFCHAIN_REPEAT); - - let mutate_stat = - storage.mutate::<_, &'static str, _>(|maybe_head: Option>| { - match maybe_head { - Some(Some(head)) if now < head => Err("fork."), - Some(Some(head)) if now >= head && now <= head + threshold => { - Err("recently executed.") - } - Some(Some(head)) if now > head + threshold => { - // we can run again now. Write the new head. - Ok(now) - } - _ => { - // value doesn't exists. Probably this node just booted up. Write, and run - Ok(now) - } - } - }); - - match mutate_stat { - // all good - Ok(Ok(_)) => Ok(()), - // failed to write. - Ok(Err(_)) => Err("failed to write to offchain db."), - // fork etc. - Err(why) => Err(why), - } + let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); + let threshold = T::BlockNumber::from(OFFCHAIN_REPEAT); + + let mutate_stat = + storage.mutate::<_, &'static str, _>(|maybe_head: Option>| { + match maybe_head { + Some(Some(head)) if now < head => Err("fork."), + Some(Some(head)) if now >= head && now <= head + threshold => { + Err("recently executed.") + } + Some(Some(head)) if now > head + threshold => { + // we can run again now. Write the new head. + Ok(now) + } + _ => { + // value doesn't exists. Probably this node just booted up. Write, and run + Ok(now) + } + } + }); + + match mutate_stat { + // all good + Ok(Ok(_)) => Ok(()), + // failed to write. + Ok(Err(_)) => Err("failed to write to offchain db."), + // fork etc. + Err(why) => Err(why), + } } /// The internal logic of the offchain worker of this module. This runs the phragmen election, /// compacts and reduces the solution, computes the score and submits it back to the chain as an /// unsigned transaction, without any signature. pub(crate) fn compute_offchain_election() -> Result<(), OffchainElectionError> { - // compute raw solution. Note that we use `OffchainAccuracy`. - let PhragmenResult { - winners, - assignments, - } = >::do_phragmen::() - .ok_or(OffchainElectionError::ElectionFailed)?; - - // process and prepare it for submission. - let (winners, compact, score) = prepare_submission::(assignments, winners, true)?; - - // defensive-only: current era can never be none except genesis. - let current_era = >::current_era().unwrap_or_default(); - - // send it. - let call: ::Call = Call::submit_election_solution_unsigned( - winners, - compact, - score, - current_era, - ).into(); - - T::SubmitTransaction::submit_unsigned(call) - .map_err(|_| OffchainElectionError::PoolSubmissionFailed) + // compute raw solution. Note that we use `OffchainAccuracy`. + let PhragmenResult { + winners, + assignments, + } = >::do_phragmen::() + .ok_or(OffchainElectionError::ElectionFailed)?; + + // process and prepare it for submission. + let (winners, compact, score) = prepare_submission::(assignments, winners, true)?; + + // defensive-only: current era can never be none except genesis. + let current_era = >::current_era().unwrap_or_default(); + + // send it. + let call: ::Call = + Call::submit_election_solution_unsigned(winners, compact, score, current_era).into(); + + T::SubmitTransaction::submit_unsigned(call) + .map_err(|_| OffchainElectionError::PoolSubmissionFailed) } /// Takes a phragmen result and spits out some data that can be submitted to the chain. /// /// This does a lot of stuff; read the inline comments. pub fn prepare_submission( - assignments: Vec>, - winners: Vec<(T::AccountId, ExtendedBalance)>, - do_reduce: bool, -) -> Result<(Vec, CompactAssignments, PhragmenScore), OffchainElectionError> where - ExtendedBalance: From<::Inner>, + assignments: Vec>, + winners: Vec<(T::AccountId, ExtendedBalance)>, + do_reduce: bool, +) -> Result<(Vec, CompactAssignments, PhragmenScore), OffchainElectionError> +where + ExtendedBalance: From<::Inner>, { - // make sure that the snapshot is available. - let snapshot_validators = - >::snapshot_validators().ok_or(OffchainElectionError::SnapshotUnavailable)?; - let snapshot_nominators = - >::snapshot_nominators().ok_or(OffchainElectionError::SnapshotUnavailable)?; - - // all helper closures - let nominator_index = |a: &T::AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let validator_index = |a: &T::AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - - // Clean winners. - let winners = winners - .into_iter() - .map(|(w, _)| w) - .collect::>(); - - // convert into absolute value and to obtain the reduced version. - let mut staked = sp_phragmen::assignment_ratio_to_staked( - assignments, - >::slashable_balance_of_vote_weight, - ); - - if do_reduce { - reduce(&mut staked); - } - - // Convert back to ratio assignment. This takes less space. - let low_accuracy_assignment = sp_phragmen::assignment_staked_to_ratio(staked); - - // convert back to staked to compute the score in the receiver's accuracy. This can be done - // nicer, for now we do it as such since this code is not time-critical. This ensure that the - // score _predicted_ here is the same as the one computed on chain and you will not get a - // `PhragmenBogusScore` error. This is totally NOT needed if we don't do reduce. This whole - // _accuracy glitch_ happens because reduce breaks that assumption of rounding and **scale**. - // The initial phragmen results are computed in `OffchainAccuracy` and the initial `staked` - // assignment set is also all multiples of this value. After reduce, this no longer holds. Hence - // converting to ratio thereafter is not trivially reversible. - let score = { - let staked = sp_phragmen::assignment_ratio_to_staked( - low_accuracy_assignment.clone(), - >::slashable_balance_of_vote_weight, - ); - - let (support_map, _) = build_support_map::(&winners, &staked); - evaluate_support::(&support_map) - }; - - // compact encode the assignment. - let compact = CompactAssignments::from_assignment( - low_accuracy_assignment, - nominator_index, - validator_index, - ).map_err(|e| OffchainElectionError::from(e))?; - - // winners to index. Use a simple for loop for a more expressive early exit in case of error. - let mut winners_indexed: Vec = Vec::with_capacity(winners.len()); - for w in winners { - if let Some(idx) = snapshot_validators.iter().position(|v| *v == w) { - let compact_index: ValidatorIndex = idx - .try_into() - .map_err(|_| OffchainElectionError::InvalidWinner)?; - winners_indexed.push(compact_index); - } else { - return Err(OffchainElectionError::InvalidWinner); - } - } - - Ok((winners_indexed, compact, score)) + // make sure that the snapshot is available. + let snapshot_validators = + >::snapshot_validators().ok_or(OffchainElectionError::SnapshotUnavailable)?; + let snapshot_nominators = + >::snapshot_nominators().ok_or(OffchainElectionError::SnapshotUnavailable)?; + + // all helper closures + let nominator_index = |a: &T::AccountId| -> Option { + snapshot_nominators + .iter() + .position(|x| x == a) + .and_then(|i| >::try_into(i).ok()) + }; + let validator_index = |a: &T::AccountId| -> Option { + snapshot_validators + .iter() + .position(|x| x == a) + .and_then(|i| >::try_into(i).ok()) + }; + + // Clean winners. + let winners = winners + .into_iter() + .map(|(w, _)| w) + .collect::>(); + + // convert into absolute value and to obtain the reduced version. + let mut staked = sp_phragmen::assignment_ratio_to_staked( + assignments, + >::slashable_balance_of_vote_weight, + ); + + if do_reduce { + reduce(&mut staked); + } + + // Convert back to ratio assignment. This takes less space. + let low_accuracy_assignment = sp_phragmen::assignment_staked_to_ratio(staked); + + // convert back to staked to compute the score in the receiver's accuracy. This can be done + // nicer, for now we do it as such since this code is not time-critical. This ensure that the + // score _predicted_ here is the same as the one computed on chain and you will not get a + // `PhragmenBogusScore` error. This is totally NOT needed if we don't do reduce. This whole + // _accuracy glitch_ happens because reduce breaks that assumption of rounding and **scale**. + // The initial phragmen results are computed in `OffchainAccuracy` and the initial `staked` + // assignment set is also all multiples of this value. After reduce, this no longer holds. Hence + // converting to ratio thereafter is not trivially reversible. + let score = { + let staked = sp_phragmen::assignment_ratio_to_staked( + low_accuracy_assignment.clone(), + >::slashable_balance_of_vote_weight, + ); + + let (support_map, _) = build_support_map::(&winners, &staked); + evaluate_support::(&support_map) + }; + + // compact encode the assignment. + let compact = CompactAssignments::from_assignment( + low_accuracy_assignment, + nominator_index, + validator_index, + ) + .map_err(|e| OffchainElectionError::from(e))?; + + // winners to index. Use a simple for loop for a more expressive early exit in case of error. + let mut winners_indexed: Vec = Vec::with_capacity(winners.len()); + for w in winners { + if let Some(idx) = snapshot_validators.iter().position(|v| *v == w) { + let compact_index: ValidatorIndex = idx + .try_into() + .map_err(|_| OffchainElectionError::InvalidWinner)?; + winners_indexed.push(compact_index); + } else { + return Err(OffchainElectionError::InvalidWinner); + } + } + + Ok((winners_indexed, compact, score)) } diff --git a/frame/staking/src/slashing.rs b/frame/staking/src/slashing.rs index 26f0828989..4666ca840f 100644 --- a/frame/staking/src/slashing.rs +++ b/frame/staking/src/slashing.rs @@ -49,16 +49,19 @@ //! Based on research at https://research.web3.foundation/en/latest/polkadot/slashing/npos/ use super::{ - EraIndex, Trait, Module, Store, BalanceOf, Exposure, Perbill, SessionInterface, - NegativeImbalanceOf, UnappliedSlash, + BalanceOf, EraIndex, Exposure, Module, NegativeImbalanceOf, Perbill, SessionInterface, Store, + Trait, UnappliedSlash, }; -use sp_runtime::{traits::{Zero, Saturating}, RuntimeDebug}; +use codec::{Decode, Encode}; use frame_support::{ - StorageMap, StorageDoubleMap, - traits::{Currency, OnUnbalanced, Imbalance}, + traits::{Currency, Imbalance, OnUnbalanced}, + StorageDoubleMap, StorageMap, +}; +use sp_runtime::{ + traits::{Saturating, Zero}, + RuntimeDebug, }; use sp_std::vec::Vec; -use codec::{Encode, Decode}; /// The proportion of the slashing reward to be paid out on the first slashing detection. /// This is f_1 in the paper. @@ -71,140 +74,154 @@ pub type SpanIndex = u32; #[derive(Encode, Decode)] #[cfg_attr(test, derive(Debug, PartialEq))] pub(crate) struct SlashingSpan { - pub(crate) index: SpanIndex, - pub(crate) start: EraIndex, - pub(crate) length: Option, // the ongoing slashing span has indeterminate length. + pub(crate) index: SpanIndex, + pub(crate) start: EraIndex, + pub(crate) length: Option, // the ongoing slashing span has indeterminate length. } impl SlashingSpan { - fn contains_era(&self, era: EraIndex) -> bool { - self.start <= era && self.length.map_or(true, |l| self.start + l > era) - } + fn contains_era(&self, era: EraIndex) -> bool { + self.start <= era && self.length.map_or(true, |l| self.start + l > era) + } } /// An encoding of all of a nominator's slashing spans. #[derive(Encode, Decode, RuntimeDebug)] pub struct SlashingSpans { - // the index of the current slashing span of the nominator. different for - // every stash, resets when the account hits free balance 0. - span_index: SpanIndex, - // the start era of the most recent (ongoing) slashing span. - last_start: EraIndex, - // the last era at which a non-zero slash occurred. - last_nonzero_slash: EraIndex, - // all prior slashing spans' start indices, in reverse order (most recent first) - // encoded as offsets relative to the slashing span after it. - prior: Vec, + // the index of the current slashing span of the nominator. different for + // every stash, resets when the account hits free balance 0. + span_index: SpanIndex, + // the start era of the most recent (ongoing) slashing span. + last_start: EraIndex, + // the last era at which a non-zero slash occurred. + last_nonzero_slash: EraIndex, + // all prior slashing spans' start indices, in reverse order (most recent first) + // encoded as offsets relative to the slashing span after it. + prior: Vec, } impl SlashingSpans { - // creates a new record of slashing spans for a stash, starting at the beginning - // of the bonding period, relative to now. - fn new(window_start: EraIndex) -> Self { - SlashingSpans { - span_index: 0, - last_start: window_start, - // initialize to zero, as this structure is lazily created until - // the first slash is applied. setting equal to `window_start` would - // put a time limit on nominations. - last_nonzero_slash: 0, - prior: Vec::new(), - } - } - - // update the slashing spans to reflect the start of a new span at the era after `now` - // returns `true` if a new span was started, `false` otherwise. `false` indicates - // that internal state is unchanged. - fn end_span(&mut self, now: EraIndex) -> bool { - let next_start = now + 1; - if next_start <= self.last_start { return false } - - let last_length = next_start - self.last_start; - self.prior.insert(0, last_length); - self.last_start = next_start; - self.span_index += 1; - true - } - - // an iterator over all slashing spans in _reverse_ order - most recent first. - pub(crate) fn iter(&'_ self) -> impl Iterator + '_ { - let mut last_start = self.last_start; - let mut index = self.span_index; - let last = SlashingSpan { index, start: last_start, length: None }; - let prior = self.prior.iter().cloned().map(move |length| { - let start = last_start - length; - last_start = start; - index -= 1; - - SlashingSpan { index, start, length: Some(length) } - }); - - sp_std::iter::once(last).chain(prior) - } - - /// Yields the era index where the most recent non-zero slash occurred. - pub fn last_nonzero_slash(&self) -> EraIndex { - self.last_nonzero_slash - } - - // prune the slashing spans against a window, whose start era index is given. - // - // If this returns `Some`, then it includes a range start..end of all the span - // indices which were pruned. - fn prune(&mut self, window_start: EraIndex) -> Option<(SpanIndex, SpanIndex)> { - let old_idx = self.iter() - .skip(1) // skip ongoing span. - .position(|span| span.length.map_or(false, |len| span.start + len <= window_start)); - - let earliest_span_index = self.span_index - self.prior.len() as SpanIndex; - let pruned = match old_idx { - Some(o) => { - self.prior.truncate(o); - let new_earliest = self.span_index - self.prior.len() as SpanIndex; - Some((earliest_span_index, new_earliest)) - } - None => None, - }; - - // readjust the ongoing span, if it started before the beginning of the window. - self.last_start = sp_std::cmp::max(self.last_start, window_start); - pruned - } + // creates a new record of slashing spans for a stash, starting at the beginning + // of the bonding period, relative to now. + fn new(window_start: EraIndex) -> Self { + SlashingSpans { + span_index: 0, + last_start: window_start, + // initialize to zero, as this structure is lazily created until + // the first slash is applied. setting equal to `window_start` would + // put a time limit on nominations. + last_nonzero_slash: 0, + prior: Vec::new(), + } + } + + // update the slashing spans to reflect the start of a new span at the era after `now` + // returns `true` if a new span was started, `false` otherwise. `false` indicates + // that internal state is unchanged. + fn end_span(&mut self, now: EraIndex) -> bool { + let next_start = now + 1; + if next_start <= self.last_start { + return false; + } + + let last_length = next_start - self.last_start; + self.prior.insert(0, last_length); + self.last_start = next_start; + self.span_index += 1; + true + } + + // an iterator over all slashing spans in _reverse_ order - most recent first. + pub(crate) fn iter(&'_ self) -> impl Iterator + '_ { + let mut last_start = self.last_start; + let mut index = self.span_index; + let last = SlashingSpan { + index, + start: last_start, + length: None, + }; + let prior = self.prior.iter().cloned().map(move |length| { + let start = last_start - length; + last_start = start; + index -= 1; + + SlashingSpan { + index, + start, + length: Some(length), + } + }); + + sp_std::iter::once(last).chain(prior) + } + + /// Yields the era index where the most recent non-zero slash occurred. + pub fn last_nonzero_slash(&self) -> EraIndex { + self.last_nonzero_slash + } + + // prune the slashing spans against a window, whose start era index is given. + // + // If this returns `Some`, then it includes a range start..end of all the span + // indices which were pruned. + fn prune(&mut self, window_start: EraIndex) -> Option<(SpanIndex, SpanIndex)> { + let old_idx = self + .iter() + .skip(1) // skip ongoing span. + .position(|span| { + span.length + .map_or(false, |len| span.start + len <= window_start) + }); + + let earliest_span_index = self.span_index - self.prior.len() as SpanIndex; + let pruned = match old_idx { + Some(o) => { + self.prior.truncate(o); + let new_earliest = self.span_index - self.prior.len() as SpanIndex; + Some((earliest_span_index, new_earliest)) + } + None => None, + }; + + // readjust the ongoing span, if it started before the beginning of the window. + self.last_start = sp_std::cmp::max(self.last_start, window_start); + pruned + } } /// A slashing-span record for a particular stash. #[derive(Encode, Decode, Default)] pub(crate) struct SpanRecord { - slashed: Balance, - paid_out: Balance, + slashed: Balance, + paid_out: Balance, } impl SpanRecord { - /// The value of stash balance slashed in this span. - #[cfg(test)] - pub(crate) fn amount_slashed(&self) -> &Balance { - &self.slashed - } + /// The value of stash balance slashed in this span. + #[cfg(test)] + pub(crate) fn amount_slashed(&self) -> &Balance { + &self.slashed + } } /// Parameters for performing a slash. #[derive(Clone)] pub(crate) struct SlashParams<'a, T: 'a + Trait> { - /// The stash account being slashed. - pub(crate) stash: &'a T::AccountId, - /// The proportion of the slash. - pub(crate) slash: Perbill, - /// The exposure of the stash and all nominators. - pub(crate) exposure: &'a Exposure>, - /// The era where the offence occurred. - pub(crate) slash_era: EraIndex, - /// The first era in the current bonding period. - pub(crate) window_start: EraIndex, - /// The current era. - pub(crate) now: EraIndex, - /// The maximum percentage of a slash that ever gets paid out. - /// This is f_inf in the paper. - pub(crate) reward_proportion: Perbill, + /// The stash account being slashed. + pub(crate) stash: &'a T::AccountId, + /// The proportion of the slash. + pub(crate) slash: Perbill, + /// The exposure of the stash and all nominators. + pub(crate) exposure: &'a Exposure>, + /// The era where the offence occurred. + pub(crate) slash_era: EraIndex, + /// The first era in the current bonding period. + pub(crate) window_start: EraIndex, + /// The current era. + pub(crate) now: EraIndex, + /// The maximum percentage of a slash that ever gets paid out. + /// This is f_inf in the paper. + pub(crate) reward_proportion: Perbill, } /// Computes a slash of a validator and nominators. It returns an unapplied @@ -213,201 +230,182 @@ pub(crate) struct SlashParams<'a, T: 'a + Trait> { /// /// The pending slash record returned does not have initialized reporters. Those have /// to be set at a higher level, if any. -pub(crate) fn compute_slash(params: SlashParams) - -> Option>> -{ - let SlashParams { - stash, - slash, - exposure, - slash_era, - window_start, - now, - reward_proportion, - } = params.clone(); - - let mut reward_payout = Zero::zero(); - let mut val_slashed = Zero::zero(); - - // is the slash amount here a maximum for the era? - let own_slash = slash * exposure.own; - if slash * exposure.total == Zero::zero() { - // kick out the validator even if they won't be slashed, - // as long as the misbehavior is from their most recent slashing span. - kick_out_if_recent::(params); - return None; - } - - let (prior_slash_p, _era_slash) = as Store>::ValidatorSlashInEra::get( - &slash_era, - stash, - ).unwrap_or((Perbill::zero(), Zero::zero())); - - // compare slash proportions rather than slash values to avoid issues due to rounding - // error. - if slash.deconstruct() > prior_slash_p.deconstruct() { - as Store>::ValidatorSlashInEra::insert( - &slash_era, - stash, - &(slash, own_slash), - ); - } else { - // we slash based on the max in era - this new event is not the max, - // so neither the validator or any nominators will need an update. - // - // this does lead to a divergence of our system from the paper, which - // pays out some reward even if the latest report is not max-in-era. - // we opt to avoid the nominator lookups and edits and leave more rewards - // for more drastic misbehavior. - return None; - } - - // apply slash to validator. - { - let mut spans = fetch_spans::( - stash, - window_start, - &mut reward_payout, - &mut val_slashed, - reward_proportion, - ); - - let target_span = spans.compare_and_update_span_slash( - slash_era, - own_slash, - ); - - if target_span == Some(spans.span_index()) { - // misbehavior occurred within the current slashing span - take appropriate - // actions. - - // chill the validator - it misbehaved in the current span and should - // not continue in the next election. also end the slashing span. - spans.end_span(now); - >::chill_stash(stash); - - // make sure to disable validator till the end of this session - if T::SessionInterface::disable_validator(stash).unwrap_or(false) { - // force a new era, to select a new validator set - >::ensure_new_era() - } - } - } - - let mut nominators_slashed = Vec::new(); - reward_payout += slash_nominators::(params, prior_slash_p, &mut nominators_slashed); - - Some(UnappliedSlash { - validator: stash.clone(), - own: val_slashed, - others: nominators_slashed, - reporters: Vec::new(), - payout: reward_payout, - }) +pub(crate) fn compute_slash( + params: SlashParams, +) -> Option>> { + let SlashParams { + stash, + slash, + exposure, + slash_era, + window_start, + now, + reward_proportion, + } = params.clone(); + + let mut reward_payout = Zero::zero(); + let mut val_slashed = Zero::zero(); + + // is the slash amount here a maximum for the era? + let own_slash = slash * exposure.own; + if slash * exposure.total == Zero::zero() { + // kick out the validator even if they won't be slashed, + // as long as the misbehavior is from their most recent slashing span. + kick_out_if_recent::(params); + return None; + } + + let (prior_slash_p, _era_slash) = + as Store>::ValidatorSlashInEra::get(&slash_era, stash) + .unwrap_or((Perbill::zero(), Zero::zero())); + + // compare slash proportions rather than slash values to avoid issues due to rounding + // error. + if slash.deconstruct() > prior_slash_p.deconstruct() { + as Store>::ValidatorSlashInEra::insert(&slash_era, stash, &(slash, own_slash)); + } else { + // we slash based on the max in era - this new event is not the max, + // so neither the validator or any nominators will need an update. + // + // this does lead to a divergence of our system from the paper, which + // pays out some reward even if the latest report is not max-in-era. + // we opt to avoid the nominator lookups and edits and leave more rewards + // for more drastic misbehavior. + return None; + } + + // apply slash to validator. + { + let mut spans = fetch_spans::( + stash, + window_start, + &mut reward_payout, + &mut val_slashed, + reward_proportion, + ); + + let target_span = spans.compare_and_update_span_slash(slash_era, own_slash); + + if target_span == Some(spans.span_index()) { + // misbehavior occurred within the current slashing span - take appropriate + // actions. + + // chill the validator - it misbehaved in the current span and should + // not continue in the next election. also end the slashing span. + spans.end_span(now); + >::chill_stash(stash); + + // make sure to disable validator till the end of this session + if T::SessionInterface::disable_validator(stash).unwrap_or(false) { + // force a new era, to select a new validator set + >::ensure_new_era() + } + } + } + + let mut nominators_slashed = Vec::new(); + reward_payout += slash_nominators::(params, prior_slash_p, &mut nominators_slashed); + + Some(UnappliedSlash { + validator: stash.clone(), + own: val_slashed, + others: nominators_slashed, + reporters: Vec::new(), + payout: reward_payout, + }) } // doesn't apply any slash, but kicks out the validator if the misbehavior is from // the most recent slashing span. -fn kick_out_if_recent( - params: SlashParams, -) { - // these are not updated by era-span or end-span. - let mut reward_payout = Zero::zero(); - let mut val_slashed = Zero::zero(); - let mut spans = fetch_spans::( - params.stash, - params.window_start, - &mut reward_payout, - &mut val_slashed, - params.reward_proportion, - ); - - if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { - spans.end_span(params.now); - >::chill_stash(params.stash); - - // make sure to disable validator till the end of this session - if T::SessionInterface::disable_validator(params.stash).unwrap_or(false) { - // force a new era, to select a new validator set - >::ensure_new_era() - } - } +fn kick_out_if_recent(params: SlashParams) { + // these are not updated by era-span or end-span. + let mut reward_payout = Zero::zero(); + let mut val_slashed = Zero::zero(); + let mut spans = fetch_spans::( + params.stash, + params.window_start, + &mut reward_payout, + &mut val_slashed, + params.reward_proportion, + ); + + if spans.era_span(params.slash_era).map(|s| s.index) == Some(spans.span_index()) { + spans.end_span(params.now); + >::chill_stash(params.stash); + + // make sure to disable validator till the end of this session + if T::SessionInterface::disable_validator(params.stash).unwrap_or(false) { + // force a new era, to select a new validator set + >::ensure_new_era() + } + } } /// Slash nominators. Accepts general parameters and the prior slash percentage of the validator. /// /// Returns the amount of reward to pay out. fn slash_nominators( - params: SlashParams, - prior_slash_p: Perbill, - nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, + params: SlashParams, + prior_slash_p: Perbill, + nominators_slashed: &mut Vec<(T::AccountId, BalanceOf)>, ) -> BalanceOf { - let SlashParams { - stash: _, - slash, - exposure, - slash_era, - window_start, - now, - reward_proportion, - } = params; - - let mut reward_payout = Zero::zero(); - - nominators_slashed.reserve(exposure.others.len()); - for nominator in &exposure.others { - let stash = &nominator.who; - let mut nom_slashed = Zero::zero(); - - // the era slash of a nominator always grows, if the validator - // had a new max slash for the era. - let era_slash = { - let own_slash_prior = prior_slash_p * nominator.value; - let own_slash_by_validator = slash * nominator.value; - let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior); - - let mut era_slash = as Store>::NominatorSlashInEra::get( - &slash_era, - stash, - ).unwrap_or(Zero::zero()); - - era_slash += own_slash_difference; - - as Store>::NominatorSlashInEra::insert( - &slash_era, - stash, - &era_slash, - ); - - era_slash - }; - - // compare the era slash against other eras in the same span. - { - let mut spans = fetch_spans::( - stash, - window_start, - &mut reward_payout, - &mut nom_slashed, - reward_proportion, - ); - - let target_span = spans.compare_and_update_span_slash( - slash_era, - era_slash, - ); - - if target_span == Some(spans.span_index()) { - // End the span, but don't chill the nominator. its nomination - // on this validator will be ignored in the future. - spans.end_span(now); - } - } - - nominators_slashed.push((stash.clone(), nom_slashed)); - } - - reward_payout + let SlashParams { + stash: _, + slash, + exposure, + slash_era, + window_start, + now, + reward_proportion, + } = params; + + let mut reward_payout = Zero::zero(); + + nominators_slashed.reserve(exposure.others.len()); + for nominator in &exposure.others { + let stash = &nominator.who; + let mut nom_slashed = Zero::zero(); + + // the era slash of a nominator always grows, if the validator + // had a new max slash for the era. + let era_slash = { + let own_slash_prior = prior_slash_p * nominator.value; + let own_slash_by_validator = slash * nominator.value; + let own_slash_difference = own_slash_by_validator.saturating_sub(own_slash_prior); + + let mut era_slash = as Store>::NominatorSlashInEra::get(&slash_era, stash) + .unwrap_or(Zero::zero()); + + era_slash += own_slash_difference; + + as Store>::NominatorSlashInEra::insert(&slash_era, stash, &era_slash); + + era_slash + }; + + // compare the era slash against other eras in the same span. + { + let mut spans = fetch_spans::( + stash, + window_start, + &mut reward_payout, + &mut nom_slashed, + reward_proportion, + ); + + let target_span = spans.compare_and_update_span_slash(slash_era, era_slash); + + if target_span == Some(spans.span_index()) { + // End the span, but don't chill the nominator. its nomination + // on this validator will be ignored in the future. + spans.end_span(now); + } + } + + nominators_slashed.push((stash.clone(), nom_slashed)); + } + + reward_payout } // helper struct for managing a set of spans we are currently inspecting. @@ -418,419 +416,524 @@ fn slash_nominators( // being 0, and the account being garbage-collected -- a dead account should get no new // metadata. struct InspectingSpans<'a, T: Trait + 'a> { - dirty: bool, - window_start: EraIndex, - stash: &'a T::AccountId, - spans: SlashingSpans, - paid_out: &'a mut BalanceOf, - slash_of: &'a mut BalanceOf, - reward_proportion: Perbill, - _marker: sp_std::marker::PhantomData, + dirty: bool, + window_start: EraIndex, + stash: &'a T::AccountId, + spans: SlashingSpans, + paid_out: &'a mut BalanceOf, + slash_of: &'a mut BalanceOf, + reward_proportion: Perbill, + _marker: sp_std::marker::PhantomData, } // fetches the slashing spans record for a stash account, initializing it if necessary. fn fetch_spans<'a, T: Trait + 'a>( - stash: &'a T::AccountId, - window_start: EraIndex, - paid_out: &'a mut BalanceOf, - slash_of: &'a mut BalanceOf, - reward_proportion: Perbill, + stash: &'a T::AccountId, + window_start: EraIndex, + paid_out: &'a mut BalanceOf, + slash_of: &'a mut BalanceOf, + reward_proportion: Perbill, ) -> InspectingSpans<'a, T> { - let spans = as Store>::SlashingSpans::get(stash).unwrap_or_else(|| { - let spans = SlashingSpans::new(window_start); - as Store>::SlashingSpans::insert(stash, &spans); - spans - }); - - InspectingSpans { - dirty: false, - window_start, - stash, - spans, - slash_of, - paid_out, - reward_proportion, - _marker: sp_std::marker::PhantomData, - } + let spans = as Store>::SlashingSpans::get(stash).unwrap_or_else(|| { + let spans = SlashingSpans::new(window_start); + as Store>::SlashingSpans::insert(stash, &spans); + spans + }); + + InspectingSpans { + dirty: false, + window_start, + stash, + spans, + slash_of, + paid_out, + reward_proportion, + _marker: sp_std::marker::PhantomData, + } } impl<'a, T: 'a + Trait> InspectingSpans<'a, T> { - fn span_index(&self) -> SpanIndex { - self.spans.span_index - } - - fn end_span(&mut self, now: EraIndex) { - self.dirty = self.spans.end_span(now) || self.dirty; - } - - // add some value to the slash of the staker. - // invariant: the staker is being slashed for non-zero value here - // although `amount` may be zero, as it is only a difference. - fn add_slash(&mut self, amount: BalanceOf, slash_era: EraIndex) { - *self.slash_of += amount; - self.spans.last_nonzero_slash = sp_std::cmp::max(self.spans.last_nonzero_slash, slash_era); - } - - // find the span index of the given era, if covered. - fn era_span(&self, era: EraIndex) -> Option { - self.spans.iter().find(|span| span.contains_era(era)) - } - - // compares the slash in an era to the overall current span slash. - // if it's higher, applies the difference of the slashes and then updates the span on disk. - // - // returns the span index of the era where the slash occurred, if any. - fn compare_and_update_span_slash( - &mut self, - slash_era: EraIndex, - slash: BalanceOf, - ) -> Option { - let target_span = self.era_span(slash_era)?; - let span_slash_key = (self.stash.clone(), target_span.index); - let mut span_record = as Store>::SpanSlash::get(&span_slash_key); - let mut changed = false; - - let reward = if span_record.slashed < slash { - // new maximum span slash. apply the difference. - let difference = slash - span_record.slashed; - span_record.slashed = slash; - - // compute reward. - let reward = REWARD_F1 - * (self.reward_proportion * slash).saturating_sub(span_record.paid_out); - - self.add_slash(difference, slash_era); - changed = true; - - reward - } else if span_record.slashed == slash { - // compute reward. no slash difference to apply. - REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out) - } else { - Zero::zero() - }; - - if !reward.is_zero() { - changed = true; - span_record.paid_out += reward; - *self.paid_out += reward; - } - - if changed { - self.dirty = true; - as Store>::SpanSlash::insert(&span_slash_key, &span_record); - } - - Some(target_span.index) - } + fn span_index(&self) -> SpanIndex { + self.spans.span_index + } + + fn end_span(&mut self, now: EraIndex) { + self.dirty = self.spans.end_span(now) || self.dirty; + } + + // add some value to the slash of the staker. + // invariant: the staker is being slashed for non-zero value here + // although `amount` may be zero, as it is only a difference. + fn add_slash(&mut self, amount: BalanceOf, slash_era: EraIndex) { + *self.slash_of += amount; + self.spans.last_nonzero_slash = sp_std::cmp::max(self.spans.last_nonzero_slash, slash_era); + } + + // find the span index of the given era, if covered. + fn era_span(&self, era: EraIndex) -> Option { + self.spans.iter().find(|span| span.contains_era(era)) + } + + // compares the slash in an era to the overall current span slash. + // if it's higher, applies the difference of the slashes and then updates the span on disk. + // + // returns the span index of the era where the slash occurred, if any. + fn compare_and_update_span_slash( + &mut self, + slash_era: EraIndex, + slash: BalanceOf, + ) -> Option { + let target_span = self.era_span(slash_era)?; + let span_slash_key = (self.stash.clone(), target_span.index); + let mut span_record = as Store>::SpanSlash::get(&span_slash_key); + let mut changed = false; + + let reward = if span_record.slashed < slash { + // new maximum span slash. apply the difference. + let difference = slash - span_record.slashed; + span_record.slashed = slash; + + // compute reward. + let reward = + REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out); + + self.add_slash(difference, slash_era); + changed = true; + + reward + } else if span_record.slashed == slash { + // compute reward. no slash difference to apply. + REWARD_F1 * (self.reward_proportion * slash).saturating_sub(span_record.paid_out) + } else { + Zero::zero() + }; + + if !reward.is_zero() { + changed = true; + span_record.paid_out += reward; + *self.paid_out += reward; + } + + if changed { + self.dirty = true; + as Store>::SpanSlash::insert(&span_slash_key, &span_record); + } + + Some(target_span.index) + } } impl<'a, T: 'a + Trait> Drop for InspectingSpans<'a, T> { - fn drop(&mut self) { - // only update on disk if we slashed this account. - if !self.dirty { return } - - if let Some((start, end)) = self.spans.prune(self.window_start) { - for span_index in start..end { - as Store>::SpanSlash::remove(&(self.stash.clone(), span_index)); - } - } - - as Store>::SlashingSpans::insert(self.stash, &self.spans); - } + fn drop(&mut self) { + // only update on disk if we slashed this account. + if !self.dirty { + return; + } + + if let Some((start, end)) = self.spans.prune(self.window_start) { + for span_index in start..end { + as Store>::SpanSlash::remove(&(self.stash.clone(), span_index)); + } + } + + as Store>::SlashingSpans::insert(self.stash, &self.spans); + } } /// Clear slashing metadata for an obsolete era. pub(crate) fn clear_era_metadata(obsolete_era: EraIndex) { - as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era); - as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era); + as Store>::ValidatorSlashInEra::remove_prefix(&obsolete_era); + as Store>::NominatorSlashInEra::remove_prefix(&obsolete_era); } /// Clear slashing metadata for a dead account. pub(crate) fn clear_stash_metadata(stash: &T::AccountId) { - let spans = match as Store>::SlashingSpans::take(stash) { - None => return, - Some(s) => s, - }; - - // kill slashing-span metadata for account. - // - // this can only happen while the account is staked _if_ they are completely slashed. - // in that case, they may re-bond, but it would count again as span 0. Further ancient - // slashes would slash into this new bond, since metadata has now been cleared. - for span in spans.iter() { - as Store>::SpanSlash::remove(&(stash.clone(), span.index)); - } + let spans = match as Store>::SlashingSpans::take(stash) { + None => return, + Some(s) => s, + }; + + // kill slashing-span metadata for account. + // + // this can only happen while the account is staked _if_ they are completely slashed. + // in that case, they may re-bond, but it would count again as span 0. Further ancient + // slashes would slash into this new bond, since metadata has now been cleared. + for span in spans.iter() { + as Store>::SpanSlash::remove(&(stash.clone(), span.index)); + } } // apply the slash to a stash account, deducting any missing funds from the reward // payout, saturating at 0. this is mildly unfair but also an edge-case that // can only occur when overlapping locked funds have been slashed. pub fn do_slash( - stash: &T::AccountId, - value: BalanceOf, - reward_payout: &mut BalanceOf, - slashed_imbalance: &mut NegativeImbalanceOf, + stash: &T::AccountId, + value: BalanceOf, + reward_payout: &mut BalanceOf, + slashed_imbalance: &mut NegativeImbalanceOf, ) { - let controller = match >::bonded(stash) { - None => return, // defensive: should always exist. - Some(c) => c, - }; - - let mut ledger = match >::ledger(&controller) { - Some(ledger) => ledger, - None => return, // nothing to do. - }; - - let value = ledger.slash(value, T::Currency::minimum_balance()); - - if !value.is_zero() { - let (imbalance, missing) = T::Currency::slash(stash, value); - slashed_imbalance.subsume(imbalance); - - if !missing.is_zero() { - // deduct overslash from the reward payout - *reward_payout = reward_payout.saturating_sub(missing); - } - - >::update_ledger(&controller, &ledger); - - // trigger the event - >::deposit_event( - super::RawEvent::Slash(stash.clone(), value) - ); - } + let controller = match >::bonded(stash) { + None => return, // defensive: should always exist. + Some(c) => c, + }; + + let mut ledger = match >::ledger(&controller) { + Some(ledger) => ledger, + None => return, // nothing to do. + }; + + let value = ledger.slash(value, T::Currency::minimum_balance()); + + if !value.is_zero() { + let (imbalance, missing) = T::Currency::slash(stash, value); + slashed_imbalance.subsume(imbalance); + + if !missing.is_zero() { + // deduct overslash from the reward payout + *reward_payout = reward_payout.saturating_sub(missing); + } + + >::update_ledger(&controller, &ledger); + + // trigger the event + >::deposit_event(super::RawEvent::Slash(stash.clone(), value)); + } } /// Apply a previously-unapplied slash. pub(crate) fn apply_slash(unapplied_slash: UnappliedSlash>) { - let mut slashed_imbalance = NegativeImbalanceOf::::zero(); - let mut reward_payout = unapplied_slash.payout; - - do_slash::( - &unapplied_slash.validator, - unapplied_slash.own, - &mut reward_payout, - &mut slashed_imbalance, - ); - - for &(ref nominator, nominator_slash) in &unapplied_slash.others { - do_slash::( - &nominator, - nominator_slash, - &mut reward_payout, - &mut slashed_imbalance, - ); - } - - pay_reporters::(reward_payout, slashed_imbalance, &unapplied_slash.reporters); + let mut slashed_imbalance = NegativeImbalanceOf::::zero(); + let mut reward_payout = unapplied_slash.payout; + + do_slash::( + &unapplied_slash.validator, + unapplied_slash.own, + &mut reward_payout, + &mut slashed_imbalance, + ); + + for &(ref nominator, nominator_slash) in &unapplied_slash.others { + do_slash::( + &nominator, + nominator_slash, + &mut reward_payout, + &mut slashed_imbalance, + ); + } + + pay_reporters::(reward_payout, slashed_imbalance, &unapplied_slash.reporters); } - /// Apply a reward payout to some reporters, paying the rewards out of the slashed imbalance. fn pay_reporters( - reward_payout: BalanceOf, - slashed_imbalance: NegativeImbalanceOf, - reporters: &[T::AccountId], + reward_payout: BalanceOf, + slashed_imbalance: NegativeImbalanceOf, + reporters: &[T::AccountId], ) { - if reward_payout.is_zero() || reporters.is_empty() { - // nobody to pay out to or nothing to pay; - // just treat the whole value as slashed. - T::Slash::on_unbalanced(slashed_imbalance); - return - } - - // take rewards out of the slashed imbalance. - let reward_payout = reward_payout.min(slashed_imbalance.peek()); - let (mut reward_payout, mut value_slashed) = slashed_imbalance.split(reward_payout); - - let per_reporter = reward_payout.peek() / (reporters.len() as u32).into(); - for reporter in reporters { - let (reporter_reward, rest) = reward_payout.split(per_reporter); - reward_payout = rest; - - // this cancels out the reporter reward imbalance internally, leading - // to no change in total issuance. - T::Currency::resolve_creating(reporter, reporter_reward); - } - - // the rest goes to the on-slash imbalance handler (e.g. treasury) - value_slashed.subsume(reward_payout); // remainder of reward division remains. - T::Slash::on_unbalanced(value_slashed); + if reward_payout.is_zero() || reporters.is_empty() { + // nobody to pay out to or nothing to pay; + // just treat the whole value as slashed. + T::Slash::on_unbalanced(slashed_imbalance); + return; + } + + // take rewards out of the slashed imbalance. + let reward_payout = reward_payout.min(slashed_imbalance.peek()); + let (mut reward_payout, mut value_slashed) = slashed_imbalance.split(reward_payout); + + let per_reporter = reward_payout.peek() / (reporters.len() as u32).into(); + for reporter in reporters { + let (reporter_reward, rest) = reward_payout.split(per_reporter); + reward_payout = rest; + + // this cancels out the reporter reward imbalance internally, leading + // to no change in total issuance. + T::Currency::resolve_creating(reporter, reporter_reward); + } + + // the rest goes to the on-slash imbalance handler (e.g. treasury) + value_slashed.subsume(reward_payout); // remainder of reward division remains. + T::Slash::on_unbalanced(value_slashed); } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn span_contains_era() { - // unbounded end - let span = SlashingSpan { index: 0, start: 1000, length: None }; - assert!(!span.contains_era(0)); - assert!(!span.contains_era(999)); - - assert!(span.contains_era(1000)); - assert!(span.contains_era(1001)); - assert!(span.contains_era(10000)); - - // bounded end - non-inclusive range. - let span = SlashingSpan { index: 0, start: 1000, length: Some(10) }; - assert!(!span.contains_era(0)); - assert!(!span.contains_era(999)); - - assert!(span.contains_era(1000)); - assert!(span.contains_era(1001)); - assert!(span.contains_era(1009)); - assert!(!span.contains_era(1010)); - assert!(!span.contains_era(1011)); - } - - #[test] - fn single_slashing_span() { - let spans = SlashingSpans { - span_index: 0, - last_start: 1000, - last_nonzero_slash: 0, - prior: Vec::new(), - }; - - assert_eq!( - spans.iter().collect::>(), - vec![SlashingSpan { index: 0, start: 1000, length: None }], - ); - } - - #[test] - fn many_prior_spans() { - let spans = SlashingSpans { - span_index: 10, - last_start: 1000, - last_nonzero_slash: 0, - prior: vec![10, 9, 8, 10], - }; - - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 1000, length: None }, - SlashingSpan { index: 9, start: 990, length: Some(10) }, - SlashingSpan { index: 8, start: 981, length: Some(9) }, - SlashingSpan { index: 7, start: 973, length: Some(8) }, - SlashingSpan { index: 6, start: 963, length: Some(10) }, - ], - ) - } - - #[test] - fn pruning_spans() { - let mut spans = SlashingSpans { - span_index: 10, - last_start: 1000, - last_nonzero_slash: 0, - prior: vec![10, 9, 8, 10], - }; - - assert_eq!(spans.prune(981), Some((6, 8))); - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 1000, length: None }, - SlashingSpan { index: 9, start: 990, length: Some(10) }, - SlashingSpan { index: 8, start: 981, length: Some(9) }, - ], - ); - - assert_eq!(spans.prune(982), None); - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 1000, length: None }, - SlashingSpan { index: 9, start: 990, length: Some(10) }, - SlashingSpan { index: 8, start: 981, length: Some(9) }, - ], - ); - - assert_eq!(spans.prune(989), None); - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 1000, length: None }, - SlashingSpan { index: 9, start: 990, length: Some(10) }, - SlashingSpan { index: 8, start: 981, length: Some(9) }, - ], - ); - - assert_eq!(spans.prune(1000), Some((8, 10))); - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 1000, length: None }, - ], - ); - - assert_eq!(spans.prune(2000), None); - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 2000, length: None }, - ], - ); - - // now all in one shot. - let mut spans = SlashingSpans { - span_index: 10, - last_start: 1000, - last_nonzero_slash: 0, - prior: vec![10, 9, 8, 10], - }; - assert_eq!(spans.prune(2000), Some((6, 10))); - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 10, start: 2000, length: None }, - ], - ); - } - - #[test] - fn ending_span() { - let mut spans = SlashingSpans { - span_index: 1, - last_start: 10, - last_nonzero_slash: 0, - prior: Vec::new(), - }; - - assert!(spans.end_span(10)); - - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 2, start: 11, length: None }, - SlashingSpan { index: 1, start: 10, length: Some(1) }, - ], - ); - - assert!(spans.end_span(15)); - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 3, start: 16, length: None }, - SlashingSpan { index: 2, start: 11, length: Some(5) }, - SlashingSpan { index: 1, start: 10, length: Some(1) }, - ], - ); - - // does nothing if not a valid end. - assert!(!spans.end_span(15)); - assert_eq!( - spans.iter().collect::>(), - vec![ - SlashingSpan { index: 3, start: 16, length: None }, - SlashingSpan { index: 2, start: 11, length: Some(5) }, - SlashingSpan { index: 1, start: 10, length: Some(1) }, - ], - ); - } + use super::*; + + #[test] + fn span_contains_era() { + // unbounded end + let span = SlashingSpan { + index: 0, + start: 1000, + length: None, + }; + assert!(!span.contains_era(0)); + assert!(!span.contains_era(999)); + + assert!(span.contains_era(1000)); + assert!(span.contains_era(1001)); + assert!(span.contains_era(10000)); + + // bounded end - non-inclusive range. + let span = SlashingSpan { + index: 0, + start: 1000, + length: Some(10), + }; + assert!(!span.contains_era(0)); + assert!(!span.contains_era(999)); + + assert!(span.contains_era(1000)); + assert!(span.contains_era(1001)); + assert!(span.contains_era(1009)); + assert!(!span.contains_era(1010)); + assert!(!span.contains_era(1011)); + } + + #[test] + fn single_slashing_span() { + let spans = SlashingSpans { + span_index: 0, + last_start: 1000, + last_nonzero_slash: 0, + prior: Vec::new(), + }; + + assert_eq!( + spans.iter().collect::>(), + vec![SlashingSpan { + index: 0, + start: 1000, + length: None + }], + ); + } + + #[test] + fn many_prior_spans() { + let spans = SlashingSpans { + span_index: 10, + last_start: 1000, + last_nonzero_slash: 0, + prior: vec![10, 9, 8, 10], + }; + + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { + index: 10, + start: 1000, + length: None + }, + SlashingSpan { + index: 9, + start: 990, + length: Some(10) + }, + SlashingSpan { + index: 8, + start: 981, + length: Some(9) + }, + SlashingSpan { + index: 7, + start: 973, + length: Some(8) + }, + SlashingSpan { + index: 6, + start: 963, + length: Some(10) + }, + ], + ) + } + + #[test] + fn pruning_spans() { + let mut spans = SlashingSpans { + span_index: 10, + last_start: 1000, + last_nonzero_slash: 0, + prior: vec![10, 9, 8, 10], + }; + + assert_eq!(spans.prune(981), Some((6, 8))); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { + index: 10, + start: 1000, + length: None + }, + SlashingSpan { + index: 9, + start: 990, + length: Some(10) + }, + SlashingSpan { + index: 8, + start: 981, + length: Some(9) + }, + ], + ); + + assert_eq!(spans.prune(982), None); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { + index: 10, + start: 1000, + length: None + }, + SlashingSpan { + index: 9, + start: 990, + length: Some(10) + }, + SlashingSpan { + index: 8, + start: 981, + length: Some(9) + }, + ], + ); + + assert_eq!(spans.prune(989), None); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { + index: 10, + start: 1000, + length: None + }, + SlashingSpan { + index: 9, + start: 990, + length: Some(10) + }, + SlashingSpan { + index: 8, + start: 981, + length: Some(9) + }, + ], + ); + + assert_eq!(spans.prune(1000), Some((8, 10))); + assert_eq!( + spans.iter().collect::>(), + vec![SlashingSpan { + index: 10, + start: 1000, + length: None + },], + ); + + assert_eq!(spans.prune(2000), None); + assert_eq!( + spans.iter().collect::>(), + vec![SlashingSpan { + index: 10, + start: 2000, + length: None + },], + ); + + // now all in one shot. + let mut spans = SlashingSpans { + span_index: 10, + last_start: 1000, + last_nonzero_slash: 0, + prior: vec![10, 9, 8, 10], + }; + assert_eq!(spans.prune(2000), Some((6, 10))); + assert_eq!( + spans.iter().collect::>(), + vec![SlashingSpan { + index: 10, + start: 2000, + length: None + },], + ); + } + + #[test] + fn ending_span() { + let mut spans = SlashingSpans { + span_index: 1, + last_start: 10, + last_nonzero_slash: 0, + prior: Vec::new(), + }; + + assert!(spans.end_span(10)); + + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { + index: 2, + start: 11, + length: None + }, + SlashingSpan { + index: 1, + start: 10, + length: Some(1) + }, + ], + ); + + assert!(spans.end_span(15)); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { + index: 3, + start: 16, + length: None + }, + SlashingSpan { + index: 2, + start: 11, + length: Some(5) + }, + SlashingSpan { + index: 1, + start: 10, + length: Some(1) + }, + ], + ); + + // does nothing if not a valid end. + assert!(!spans.end_span(15)); + assert_eq!( + spans.iter().collect::>(), + vec![ + SlashingSpan { + index: 3, + start: 16, + length: None + }, + SlashingSpan { + index: 2, + start: 11, + length: Some(5) + }, + SlashingSpan { + index: 1, + start: 10, + length: Some(1) + }, + ], + ); + } } diff --git a/frame/staking/src/testing_utils.rs b/frame/staking/src/testing_utils.rs index 4c1ee66a75..10eda7dcb4 100644 --- a/frame/staking/src/testing_utils.rs +++ b/frame/staking/src/testing_utils.rs @@ -29,7 +29,7 @@ use pallet_indices::address::Address; use rand::Rng; use sp_core::hashing::blake2_256; use sp_phragmen::{ - build_support_map, evaluate_support, reduce, Assignment, PhragmenScore, StakedAssignment, + build_support_map, evaluate_support, reduce, Assignment, PhragmenScore, StakedAssignment, }; const CTRL_PREFIX: u32 = 1000; @@ -43,298 +43,298 @@ pub type AddressOf = Address<::AccountId, u32>; /// Random number in the range `[a, b]`. pub fn random(a: u32, b: u32) -> u32 { - rand::thread_rng().gen_range(a, b) + rand::thread_rng().gen_range(a, b) } /// Set the desired validator count, with related storage items. pub fn set_validator_count(to_elect: u32) { - ValidatorCount::put(to_elect); - MinimumValidatorCount::put(to_elect / 2); - >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); + ValidatorCount::put(to_elect); + MinimumValidatorCount::put(to_elect / 2); + >::put(ElectionStatus::Open(T::BlockNumber::from(1u32))); } /// Build an account with the given index. pub fn account(index: u32) -> T::AccountId { - let entropy = (b"benchmark/staking", index).using_encoded(blake2_256); - T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() + let entropy = (b"benchmark/staking", index).using_encoded(blake2_256); + T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() } /// Build an address given Index pub fn address(index: u32) -> AddressOf { - pallet_indices::address::Address::Id(account::(index)) + pallet_indices::address::Address::Id(account::(index)) } /// Generate signed origin from `who`. pub fn signed(who: T::AccountId) -> T::Origin { - RawOrigin::Signed(who).into() + RawOrigin::Signed(who).into() } /// Generate signed origin from `index`. pub fn signed_account(index: u32) -> T::Origin { - signed::(account::(index)) + signed::(account::(index)) } /// Bond a validator. pub fn bond_validator(stash: T::AccountId, ctrl: u32, val: BalanceOf) where - T::Lookup: StaticLookup>, + T::Lookup: StaticLookup>, { - let _ = T::Currency::make_free_balance_be(&stash, val); - assert_ok!(>::bond( - signed::(stash), - address::(ctrl), - val, - RewardDestination::Controller - )); - assert_ok!(>::validate( - signed_account::(ctrl), - ValidatorPrefs::default() - )); + let _ = T::Currency::make_free_balance_be(&stash, val); + assert_ok!(>::bond( + signed::(stash), + address::(ctrl), + val, + RewardDestination::Controller + )); + assert_ok!(>::validate( + signed_account::(ctrl), + ValidatorPrefs::default() + )); } pub fn bond_nominator( - stash: T::AccountId, - ctrl: u32, - val: BalanceOf, - target: Vec>, + stash: T::AccountId, + ctrl: u32, + val: BalanceOf, + target: Vec>, ) where - T::Lookup: StaticLookup>, + T::Lookup: StaticLookup>, { - let _ = T::Currency::make_free_balance_be(&stash, val); - assert_ok!(>::bond( - signed::(stash), - address::(ctrl), - val, - RewardDestination::Controller - )); - assert_ok!(>::nominate(signed_account::(ctrl), target)); + let _ = T::Currency::make_free_balance_be(&stash, val); + assert_ok!(>::bond( + signed::(stash), + address::(ctrl), + val, + RewardDestination::Controller + )); + assert_ok!(>::nominate(signed_account::(ctrl), target)); } /// Bond `nun_validators` validators and `num_nominator` nominators with `edge_per_voter` random /// votes per nominator. pub fn setup_chain_stakers(num_validators: u32, num_voters: u32, edge_per_voter: u32) where - T::Lookup: StaticLookup>, + T::Lookup: StaticLookup>, { - (0..num_validators).for_each(|i| { - bond_validator::( - account::(i), - i + CTRL_PREFIX, - >::from(random(1, 1000)) * T::Currency::minimum_balance(), - ); - }); - - (0..num_voters).for_each(|i| { - let mut targets: Vec> = Vec::with_capacity(edge_per_voter as usize); - let mut all_targets = (0..num_validators) - .map(|t| address::(t)) - .collect::>(); - assert!(num_validators >= edge_per_voter); - (0..edge_per_voter).for_each(|_| { - let target = all_targets.remove(random(0, all_targets.len() as u32 - 1) as usize); - targets.push(target); - }); - bond_nominator::( - account::(i + NOMINATOR_PREFIX), - i + NOMINATOR_PREFIX + CTRL_PREFIX, - >::from(random(1, 1000)) * T::Currency::minimum_balance(), - targets, - ); - }); - - >::create_stakers_snapshot(); + (0..num_validators).for_each(|i| { + bond_validator::( + account::(i), + i + CTRL_PREFIX, + >::from(random(1, 1000)) * T::Currency::minimum_balance(), + ); + }); + + (0..num_voters).for_each(|i| { + let mut targets: Vec> = Vec::with_capacity(edge_per_voter as usize); + let mut all_targets = (0..num_validators) + .map(|t| address::(t)) + .collect::>(); + assert!(num_validators >= edge_per_voter); + (0..edge_per_voter).for_each(|_| { + let target = all_targets.remove(random(0, all_targets.len() as u32 - 1) as usize); + targets.push(target); + }); + bond_nominator::( + account::(i + NOMINATOR_PREFIX), + i + NOMINATOR_PREFIX + CTRL_PREFIX, + >::from(random(1, 1000)) * T::Currency::minimum_balance(), + targets, + ); + }); + + >::create_stakers_snapshot(); } /// Build a _really bad_ but acceptable solution for election. This should always yield a solution /// which has a less score than the seq-phragmen. pub fn get_weak_solution( - do_reduce: bool, + do_reduce: bool, ) -> (Vec, CompactAssignments, PhragmenScore) { - let mut backing_stake_of: BTreeMap> = BTreeMap::new(); - - // self stake - >::enumerate().for_each(|(who, _p)| { - *backing_stake_of.entry(who.clone()).or_insert(Zero::zero()) += - >::slashable_balance_of(&who) - }); - - // add nominator stuff - >::enumerate().for_each(|(who, nomination)| { - nomination.targets.into_iter().for_each(|v| { - *backing_stake_of.entry(v).or_insert(Zero::zero()) += - >::slashable_balance_of(&who) - }) - }); - - // elect winners - let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); - sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); - let winners: Vec = sorted - .iter() - .cloned() - .take(>::validator_count() as usize) - .collect(); - - let mut staked_assignments: Vec> = Vec::new(); - >::enumerate().for_each(|(who, nomination)| { - let mut dist: Vec<(T::AccountId, ExtendedBalance)> = Vec::new(); - nomination.targets.into_iter().for_each(|v| { - if winners.iter().find(|&w| *w == v).is_some() { - dist.push((v, ExtendedBalance::zero())); - } - }); - - if dist.len() == 0 { - return; - } - - // assign real stakes. just split the stake. - let stake = , u64>>::convert( - >::slashable_balance_of(&who), - ) as ExtendedBalance; - - let mut sum: ExtendedBalance = Zero::zero(); - let dist_len = dist.len() as ExtendedBalance; - - // assign main portion - // only take the first half into account. This should highly imbalance stuff, which is good. - dist.iter_mut() - .take(if dist_len > 1 { - (dist_len as usize) / 2 - } else { - 1 - }) - .for_each(|(_, w)| { - let partial = stake / dist_len; - *w = partial; - sum += partial; - }); - - // assign the leftover to last. - let leftover = stake - sum; - let last = dist.last_mut().unwrap(); - last.1 += leftover; - - staked_assignments.push(StakedAssignment { - who, - distribution: dist, - }); - }); - - // add self support to winners. - winners.iter().for_each(|w| { - staked_assignments.push(StakedAssignment { - who: w.clone(), - distribution: vec![( - w.clone(), - , u64>>::convert( - >::slashable_balance_of(&w), - ) as ExtendedBalance, - )], - }) - }); - - if do_reduce { - reduce(&mut staked_assignments); - } - - // helpers for building the compact - let snapshot_validators = >::snapshot_validators().unwrap(); - let snapshot_nominators = >::snapshot_nominators().unwrap(); - - let nominator_index = |a: &T::AccountId| -> Option { - snapshot_nominators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let validator_index = |a: &T::AccountId| -> Option { - snapshot_validators - .iter() - .position(|x| x == a) - .and_then(|i| >::try_into(i).ok()) - }; - let stake_of = |who: &T::AccountId| -> ExtendedBalance { - , u64>>::convert( - >::slashable_balance_of(who), - ) as ExtendedBalance - }; - - // convert back to ratio assignment. This takes less space. - let low_accuracy_assignment: Vec> = - staked_assignments - .into_iter() - .map(|sa| sa.into_assignment(true)) - .collect(); - - // re-calculate score based on what the chain will decode. - let score = { - let staked: Vec> = low_accuracy_assignment - .iter() - .map(|a| { - let stake = stake_of(&a.who); - a.clone().into_staked(stake, true) - }) - .collect(); - - let (support_map, _) = - build_support_map::(winners.as_slice(), staked.as_slice()); - evaluate_support::(&support_map) - }; - - // compact encode the assignment. - let compact = CompactAssignments::from_assignment( - low_accuracy_assignment, - nominator_index, - validator_index, - ) - .unwrap(); - - // winners to index. - let winners = winners - .into_iter() - .map(|w| { - snapshot_validators - .iter() - .position(|v| *v == w) - .unwrap() - .try_into() - .unwrap() - }) - .collect::>(); - - (winners, compact, score) + let mut backing_stake_of: BTreeMap> = BTreeMap::new(); + + // self stake + >::enumerate().for_each(|(who, _p)| { + *backing_stake_of.entry(who.clone()).or_insert(Zero::zero()) += + >::slashable_balance_of(&who) + }); + + // add nominator stuff + >::enumerate().for_each(|(who, nomination)| { + nomination.targets.into_iter().for_each(|v| { + *backing_stake_of.entry(v).or_insert(Zero::zero()) += + >::slashable_balance_of(&who) + }) + }); + + // elect winners + let mut sorted: Vec = backing_stake_of.keys().cloned().collect(); + sorted.sort_by_key(|x| backing_stake_of.get(x).unwrap()); + let winners: Vec = sorted + .iter() + .cloned() + .take(>::validator_count() as usize) + .collect(); + + let mut staked_assignments: Vec> = Vec::new(); + >::enumerate().for_each(|(who, nomination)| { + let mut dist: Vec<(T::AccountId, ExtendedBalance)> = Vec::new(); + nomination.targets.into_iter().for_each(|v| { + if winners.iter().find(|&w| *w == v).is_some() { + dist.push((v, ExtendedBalance::zero())); + } + }); + + if dist.len() == 0 { + return; + } + + // assign real stakes. just split the stake. + let stake = , u64>>::convert( + >::slashable_balance_of(&who), + ) as ExtendedBalance; + + let mut sum: ExtendedBalance = Zero::zero(); + let dist_len = dist.len() as ExtendedBalance; + + // assign main portion + // only take the first half into account. This should highly imbalance stuff, which is good. + dist.iter_mut() + .take(if dist_len > 1 { + (dist_len as usize) / 2 + } else { + 1 + }) + .for_each(|(_, w)| { + let partial = stake / dist_len; + *w = partial; + sum += partial; + }); + + // assign the leftover to last. + let leftover = stake - sum; + let last = dist.last_mut().unwrap(); + last.1 += leftover; + + staked_assignments.push(StakedAssignment { + who, + distribution: dist, + }); + }); + + // add self support to winners. + winners.iter().for_each(|w| { + staked_assignments.push(StakedAssignment { + who: w.clone(), + distribution: vec![( + w.clone(), + , u64>>::convert( + >::slashable_balance_of(&w), + ) as ExtendedBalance, + )], + }) + }); + + if do_reduce { + reduce(&mut staked_assignments); + } + + // helpers for building the compact + let snapshot_validators = >::snapshot_validators().unwrap(); + let snapshot_nominators = >::snapshot_nominators().unwrap(); + + let nominator_index = |a: &T::AccountId| -> Option { + snapshot_nominators + .iter() + .position(|x| x == a) + .and_then(|i| >::try_into(i).ok()) + }; + let validator_index = |a: &T::AccountId| -> Option { + snapshot_validators + .iter() + .position(|x| x == a) + .and_then(|i| >::try_into(i).ok()) + }; + let stake_of = |who: &T::AccountId| -> ExtendedBalance { + , u64>>::convert( + >::slashable_balance_of(who), + ) as ExtendedBalance + }; + + // convert back to ratio assignment. This takes less space. + let low_accuracy_assignment: Vec> = + staked_assignments + .into_iter() + .map(|sa| sa.into_assignment(true)) + .collect(); + + // re-calculate score based on what the chain will decode. + let score = { + let staked: Vec> = low_accuracy_assignment + .iter() + .map(|a| { + let stake = stake_of(&a.who); + a.clone().into_staked(stake, true) + }) + .collect(); + + let (support_map, _) = + build_support_map::(winners.as_slice(), staked.as_slice()); + evaluate_support::(&support_map) + }; + + // compact encode the assignment. + let compact = CompactAssignments::from_assignment( + low_accuracy_assignment, + nominator_index, + validator_index, + ) + .unwrap(); + + // winners to index. + let winners = winners + .into_iter() + .map(|w| { + snapshot_validators + .iter() + .position(|v| *v == w) + .unwrap() + .try_into() + .unwrap() + }) + .collect::>(); + + (winners, compact, score) } /// Create a solution for seq-phragmen. This uses the same internal function as used by the offchain /// worker code. pub fn get_seq_phragmen_solution( - do_reduce: bool, + do_reduce: bool, ) -> (Vec, CompactAssignments, PhragmenScore) { - let sp_phragmen::PhragmenResult { - winners, - assignments, - } = >::do_phragmen::().unwrap(); + let sp_phragmen::PhragmenResult { + winners, + assignments, + } = >::do_phragmen::().unwrap(); - offchain_election::prepare_submission::(assignments, winners, do_reduce).unwrap() + offchain_election::prepare_submission::(assignments, winners, do_reduce).unwrap() } /// Remove all validator, nominators, votes and exposures. pub fn clean(era: EraIndex) - where - ::AccountId: codec::EncodeLike, - u32: codec::EncodeLike, +where + ::AccountId: codec::EncodeLike, + u32: codec::EncodeLike, { - >::enumerate().for_each(|(k, _)| { - let ctrl = >::bonded(&k).unwrap(); - >::remove(&k); - >::remove(&k); - >::remove(&ctrl); - >::remove(k, era); - }); - >::enumerate().for_each(|(k, _)| >::remove(k)); - >::remove_all(); - >::remove_all(); - >::kill(); - QueuedScore::kill(); + >::enumerate().for_each(|(k, _)| { + let ctrl = >::bonded(&k).unwrap(); + >::remove(&k); + >::remove(&k); + >::remove(&ctrl); + >::remove(k, era); + }); + >::enumerate().for_each(|(k, _)| >::remove(k)); + >::remove_all(); + >::remove_all(); + >::kill(); + QueuedScore::kill(); } diff --git a/frame/staking/src/tests.rs b/frame/staking/src/tests.rs index 15afda1e3a..f39220531c 100644 --- a/frame/staking/src/tests.rs +++ b/frame/staking/src/tests.rs @@ -17,4654 +17,5266 @@ //! Tests for the module. use super::*; -use mock::*; -use sp_runtime::{ - assert_eq_error_rate, traits::BadOrigin, -}; -use sp_staking::offence::OffenceDetails; +use crate::Store; use frame_support::{ - assert_ok, assert_noop, StorageMap, - traits::{Currency, ReservableCurrency, OnInitialize}, + assert_noop, assert_ok, + traits::{Currency, OnInitialize, ReservableCurrency}, + StorageMap, }; +use mock::*; use pallet_balances::Error as BalancesError; +use sp_runtime::{assert_eq_error_rate, traits::BadOrigin}; +use sp_staking::offence::OffenceDetails; use substrate_test_utils::assert_eq_uvec; -use crate::Store; #[test] fn force_unstake_works() { - // Verifies initial conditions of mock - ExtBuilder::default().build_and_execute(|| { - // Account 11 is stashed and locked, and account 10 is the controller - assert_eq!(Staking::bonded(&11), Some(10)); - // Cant transfer - assert_noop!( - Balances::transfer(Origin::signed(11), 1, 10), - BalancesError::::LiquidityRestrictions - ); - // Force unstake requires root. - assert_noop!(Staking::force_unstake(Origin::signed(11), 11), BadOrigin); - // We now force them to unstake - assert_ok!(Staking::force_unstake(Origin::ROOT, 11)); - // No longer bonded. - assert_eq!(Staking::bonded(&11), None); - // Transfer works. - assert_ok!(Balances::transfer(Origin::signed(11), 1, 10)); - }); + // Verifies initial conditions of mock + ExtBuilder::default().build_and_execute(|| { + // Account 11 is stashed and locked, and account 10 is the controller + assert_eq!(Staking::bonded(&11), Some(10)); + // Cant transfer + assert_noop!( + Balances::transfer(Origin::signed(11), 1, 10), + BalancesError::::LiquidityRestrictions + ); + // Force unstake requires root. + assert_noop!(Staking::force_unstake(Origin::signed(11), 11), BadOrigin); + // We now force them to unstake + assert_ok!(Staking::force_unstake(Origin::ROOT, 11)); + // No longer bonded. + assert_eq!(Staking::bonded(&11), None); + // Transfer works. + assert_ok!(Balances::transfer(Origin::signed(11), 1, 10)); + }); } #[test] fn basic_setup_works() { - // Verifies initial conditions of mock - ExtBuilder::default().build_and_execute(|| { - // Account 11 is stashed and locked, and account 10 is the controller - assert_eq!(Staking::bonded(&11), Some(10)); - // Account 21 is stashed and locked, and account 20 is the controller - assert_eq!(Staking::bonded(&21), Some(20)); - // Account 1 is not a stashed - assert_eq!(Staking::bonded(&1), None); - - // Account 10 controls the stash from account 11, which is 100 * balance_factor units - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![] }) - ); - // Account 20 controls the stash from account 21, which is 200 * balance_factor units - assert_eq!( - Staking::ledger(&20), - Some(StakingLedger { stash: 21, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![] }) - ); - // Account 1 does not control any stash - assert_eq!(Staking::ledger(&1), None); - - // ValidatorPrefs are default - assert_eq_uvec!(>::iter().collect::>(), vec![ - (31, ValidatorPrefs::default()), - (21, ValidatorPrefs::default()), - (11, ValidatorPrefs::default()) - ]); - - assert_eq!( - Staking::ledger(100), - Some(StakingLedger { stash: 101, total: 500, active: 500, unlocking: vec![], claimed_rewards: vec![] }) - ); - assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); - - assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - Exposure { - total: 1125, - own: 1000, - others: vec![ IndividualExposure { who: 101, value: 125 }] - }, - ); - assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 21), - Exposure { - total: 1375, - own: 1000, - others: vec![ IndividualExposure { who: 101, value: 375 }] - }, - ); - - // initial total stake = 1125 + 1375 - assert_eq!(Staking::eras_total_stake(Staking::active_era().unwrap().index), 2500); - - - // The number of validators required. - assert_eq!(Staking::validator_count(), 2); - - // Initial Era and session - assert_eq!(Staking::active_era().unwrap().index, 0); - - // Account 10 has `balance_factor` free balance - assert_eq!(Balances::free_balance(10), 1); - assert_eq!(Balances::free_balance(10), 1); - - // New era is not being forced - assert_eq!(Staking::force_era(), Forcing::NotForcing); - }); + // Verifies initial conditions of mock + ExtBuilder::default().build_and_execute(|| { + // Account 11 is stashed and locked, and account 10 is the controller + assert_eq!(Staking::bonded(&11), Some(10)); + // Account 21 is stashed and locked, and account 20 is the controller + assert_eq!(Staking::bonded(&21), Some(20)); + // Account 1 is not a stashed + assert_eq!(Staking::bonded(&1), None); + + // Account 10 controls the stash from account 11, which is 100 * balance_factor units + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + }) + ); + // Account 20 controls the stash from account 21, which is 200 * balance_factor units + assert_eq!( + Staking::ledger(&20), + Some(StakingLedger { + stash: 21, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![] + }) + ); + // Account 1 does not control any stash + assert_eq!(Staking::ledger(&1), None); + + // ValidatorPrefs are default + assert_eq_uvec!( + >::iter().collect::>(), + vec![ + (31, ValidatorPrefs::default()), + (21, ValidatorPrefs::default()), + (11, ValidatorPrefs::default()) + ] + ); + + assert_eq!( + Staking::ledger(100), + Some(StakingLedger { + stash: 101, + total: 500, + active: 500, + unlocking: vec![], + claimed_rewards: vec![] + }) + ); + assert_eq!(Staking::nominators(101).unwrap().targets, vec![11, 21]); + + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Exposure { + total: 1125, + own: 1000, + others: vec![IndividualExposure { + who: 101, + value: 125 + }] + }, + ); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + Exposure { + total: 1375, + own: 1000, + others: vec![IndividualExposure { + who: 101, + value: 375 + }] + }, + ); + + // initial total stake = 1125 + 1375 + assert_eq!( + Staking::eras_total_stake(Staking::active_era().unwrap().index), + 2500 + ); + + // The number of validators required. + assert_eq!(Staking::validator_count(), 2); + + // Initial Era and session + assert_eq!(Staking::active_era().unwrap().index, 0); + + // Account 10 has `balance_factor` free balance + assert_eq!(Balances::free_balance(10), 1); + assert_eq!(Balances::free_balance(10), 1); + + // New era is not being forced + assert_eq!(Staking::force_era(), Forcing::NotForcing); + }); } #[test] fn change_controller_works() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::bonded(&11), Some(10)); - - assert!(Session::validators().contains(&11)); - // 10 can control 11 who is initially a validator. - assert_ok!(Staking::chill(Origin::signed(10))); - assert!(Session::validators().contains(&11)); - - assert_ok!(Staking::set_controller(Origin::signed(11), 5)); - - mock::start_era(1); - - assert_noop!( - Staking::validate(Origin::signed(10), ValidatorPrefs::default()), - Error::::NotController, - ); - assert_ok!(Staking::validate(Origin::signed(5), ValidatorPrefs::default())); - }) + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Staking::bonded(&11), Some(10)); + + assert!(Session::validators().contains(&11)); + // 10 can control 11 who is initially a validator. + assert_ok!(Staking::chill(Origin::signed(10))); + assert!(Session::validators().contains(&11)); + + assert_ok!(Staking::set_controller(Origin::signed(11), 5)); + + mock::start_era(1); + + assert_noop!( + Staking::validate(Origin::signed(10), ValidatorPrefs::default()), + Error::::NotController, + ); + assert_ok!(Staking::validate( + Origin::signed(5), + ValidatorPrefs::default() + )); + }) } #[test] fn rewards_should_work() { - // should check that: - // * rewards get recorded per session - // * rewards get paid per Era - // * Check that nominators are also rewarded - ExtBuilder::default().nominate(true).build_and_execute(|| { - let init_balance_10 = Balances::total_balance(&10); - let init_balance_11 = Balances::total_balance(&11); - let init_balance_20 = Balances::total_balance(&20); - let init_balance_21 = Balances::total_balance(&21); - let init_balance_100 = Balances::total_balance(&100); - let init_balance_101 = Balances::total_balance(&101); - - // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(21, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); - - >::reward_by_ids(vec![(11, 50)]); - >::reward_by_ids(vec![(11, 50)]); - // This is the second validator of the current elected set. - >::reward_by_ids(vec![(21, 50)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something - - start_session(1); - - assert_eq!(Balances::total_balance(&10), init_balance_10); - assert_eq!(Balances::total_balance(&11), init_balance_11); - assert_eq!(Balances::total_balance(&20), init_balance_20); - assert_eq!(Balances::total_balance(&21), init_balance_21); - assert_eq!(Balances::total_balance(&100), init_balance_100); - assert_eq!(Balances::total_balance(&101), init_balance_101); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { - total: 50*3, - individual: vec![(11, 100), (21, 50)].into_iter().collect(), - }); - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); - let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); - let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); - - start_session(2); - start_session(3); - - assert_eq!(Staking::active_era().unwrap().index, 1); - mock::make_all_reward_payment(0); - - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); - assert_eq_error_rate!( - Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * total_payout_0 * 2/3 - + part_for_100_from_20 * total_payout_0 * 1/3, - 2 - ); - assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - - assert_eq_uvec!(Session::validators(), vec![11, 21]); - >::reward_by_ids(vec![(11, 1)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something - - mock::start_era(2); - mock::make_all_reward_payment(1); - - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); - assert_eq_error_rate!( - Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) - + part_for_100_from_20 * total_payout_0 * 1/3, - 2 - ); - assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - }); + // should check that: + // * rewards get recorded per session + // * rewards get paid per Era + // * Check that nominators are also rewarded + ExtBuilder::default().nominate(true).build_and_execute(|| { + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!( + Staking::eras_reward_points(Staking::active_era().unwrap().index), + EraRewardPoints { + total: 50 * 3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + } + ); + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment(0); + + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * total_payout_0 * 2 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2 / 3 + + part_for_100_from_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + >::reward_by_ids(vec![(11, 1)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment(1); + + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * (total_payout_0 * 2 / 3 + total_payout_1), + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + }); } #[test] fn staking_should_work() { - // should test: - // * new validators can be added to the default set - // * new ones will be chosen per era - // * either one can unlock the stash and back-down from being a validator via `chill`ing. - ExtBuilder::default() - .nominate(false) - .fair(false) // to give 20 more staked value - .build() - .execute_with(|| { - // --- Block 1: - start_session(1); - - // remember + compare this along with the test. - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - - // put some money in account that we'll use. - for i in 1..5 { let _ = Balances::make_free_balance_be(&i, 2000); } - - // --- Block 2: - start_session(2); - // add a new candidate for being a validator. account 3 controlled by 4. - assert_ok!(Staking::bond(Origin::signed(3), 4, 1500, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(4), ValidatorPrefs::default())); - - // No effects will be seen so far. - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - - // --- Block 3: - start_session(3); - - // No effects will be seen so far. Era has not been yet triggered. - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - - - // --- Block 4: the validators will now be queued. - start_session(4); - assert_eq!(Staking::active_era().unwrap().index, 1); - - // --- Block 5: the validators are still in queue. - start_session(5); - - // --- Block 6: the validators will now be changed. - start_session(6); - - assert_eq_uvec!(validator_controllers(), vec![20, 4]); - // --- Block 6: Unstake 4 as a validator, freeing up the balance stashed in 3 - // 4 will chill - Staking::chill(Origin::signed(4)).unwrap(); - - // --- Block 7: nothing. 4 is still there. - start_session(7); - assert_eq_uvec!(validator_controllers(), vec![20, 4]); - - // --- Block 8: - start_session(8); - - // --- Block 9: 4 will not be a validator. - start_session(9); - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - - // Note: the stashed value of 4 is still lock - assert_eq!( - Staking::ledger(&4), - Some(StakingLedger { - stash: 3, - total: 1500, - active: 1500, - unlocking: vec![], - claimed_rewards: vec![0], - }) - ); - // e.g. it cannot spend more than 500 that it has free from the total 2000 - assert_noop!( - Balances::reserve(&3, 501), - BalancesError::::LiquidityRestrictions - ); - assert_ok!(Balances::reserve(&3, 409)); - }); + // should test: + // * new validators can be added to the default set + // * new ones will be chosen per era + // * either one can unlock the stash and back-down from being a validator via `chill`ing. + ExtBuilder::default() + .nominate(false) + .fair(false) // to give 20 more staked value + .build() + .execute_with(|| { + // --- Block 1: + start_session(1); + + // remember + compare this along with the test. + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + + // put some money in account that we'll use. + for i in 1..5 { + let _ = Balances::make_free_balance_be(&i, 2000); + } + + // --- Block 2: + start_session(2); + // add a new candidate for being a validator. account 3 controlled by 4. + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 1500, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(4), + ValidatorPrefs::default() + )); + + // No effects will be seen so far. + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + + // --- Block 3: + start_session(3); + + // No effects will be seen so far. Era has not been yet triggered. + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + + // --- Block 4: the validators will now be queued. + start_session(4); + assert_eq!(Staking::active_era().unwrap().index, 1); + + // --- Block 5: the validators are still in queue. + start_session(5); + + // --- Block 6: the validators will now be changed. + start_session(6); + + assert_eq_uvec!(validator_controllers(), vec![20, 4]); + // --- Block 6: Unstake 4 as a validator, freeing up the balance stashed in 3 + // 4 will chill + Staking::chill(Origin::signed(4)).unwrap(); + + // --- Block 7: nothing. 4 is still there. + start_session(7); + assert_eq_uvec!(validator_controllers(), vec![20, 4]); + + // --- Block 8: + start_session(8); + + // --- Block 9: 4 will not be a validator. + start_session(9); + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + + // Note: the stashed value of 4 is still lock + assert_eq!( + Staking::ledger(&4), + Some(StakingLedger { + stash: 3, + total: 1500, + active: 1500, + unlocking: vec![], + claimed_rewards: vec![0], + }) + ); + // e.g. it cannot spend more than 500 that it has free from the total 2000 + assert_noop!( + Balances::reserve(&3, 501), + BalancesError::::LiquidityRestrictions + ); + assert_ok!(Balances::reserve(&3, 409)); + }); } #[test] fn less_than_needed_candidates_works() { - ExtBuilder::default() - .minimum_validator_count(1) - .validator_count(4) - .nominate(false) - .num_validators(3) - .build() - .execute_with(|| { - assert_eq!(Staking::validator_count(), 4); - assert_eq!(Staking::minimum_validator_count(), 1); - assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); - - mock::start_era(1); - - // Previous set is selected. NO election algorithm is even executed. - assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); - - // But the exposure is updated in a simple way. No external votes exists. - // This is purely self-vote. - assert!( - ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index) - .all(|exposure| exposure.others.is_empty()) - ); - }); + ExtBuilder::default() + .minimum_validator_count(1) + .validator_count(4) + .nominate(false) + .num_validators(3) + .build() + .execute_with(|| { + assert_eq!(Staking::validator_count(), 4); + assert_eq!(Staking::minimum_validator_count(), 1); + assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); + + mock::start_era(1); + + // Previous set is selected. NO election algorithm is even executed. + assert_eq_uvec!(validator_controllers(), vec![30, 20, 10]); + + // But the exposure is updated in a simple way. No external votes exists. + // This is purely self-vote. + assert!( + ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index) + .all(|exposure| exposure.others.is_empty()) + ); + }); } #[test] fn no_candidate_emergency_condition() { - ExtBuilder::default() - .minimum_validator_count(1) - .validator_count(15) - .num_validators(4) - .validator_pool(true) - .nominate(false) - .build() - .execute_with(|| { - // initial validators - assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); - let prefs = ValidatorPrefs { commission: Perbill::one() }; - ::Validators::insert(11, prefs.clone()); - - // set the minimum validator count. - ::MinimumValidatorCount::put(10); - - // try to chill - let _ = Staking::chill(Origin::signed(10)); - - // trigger era - mock::start_era(1); - - // Previous ones are elected. chill is invalidates. TODO: #2494 - assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); - // Though the validator preferences has been removed. - assert!(Staking::validators(11) != prefs); - }); + ExtBuilder::default() + .minimum_validator_count(1) + .validator_count(15) + .num_validators(4) + .validator_pool(true) + .nominate(false) + .build() + .execute_with(|| { + // initial validators + assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); + let prefs = ValidatorPrefs { + commission: Perbill::one(), + }; + ::Validators::insert(11, prefs.clone()); + + // set the minimum validator count. + ::MinimumValidatorCount::put(10); + + // try to chill + let _ = Staking::chill(Origin::signed(10)); + + // trigger era + mock::start_era(1); + + // Previous ones are elected. chill is invalidates. TODO: #2494 + assert_eq_uvec!(validator_controllers(), vec![10, 20, 30, 40]); + // Though the validator preferences has been removed. + assert!(Staking::validators(11) != prefs); + }); } #[test] fn nominating_and_rewards_should_work() { - // PHRAGMEN OUTPUT: running this test with the reference impl gives: - // - // Sequential Phragmén gives - // 10 is elected with stake 2200.0 and score 0.0003333333333333333 - // 20 is elected with stake 1800.0 and score 0.0005555555555555556 - - // 10 has load 0.0003333333333333333 and supported - // 10 with stake 1000.0 - // 20 has load 0.0005555555555555556 and supported - // 20 with stake 1000.0 - // 30 has load 0 and supported - // 30 with stake 0 - // 40 has load 0 and supported - // 40 with stake 0 - // 2 has load 0.0005555555555555556 and supported - // 10 with stake 600.0 20 with stake 400.0 30 with stake 0.0 - // 4 has load 0.0005555555555555556 and supported - // 10 with stake 600.0 20 with stake 400.0 40 with stake 0.0 - - // Sequential Phragmén with post processing gives - // 10 is elected with stake 2000.0 and score 0.0003333333333333333 - // 20 is elected with stake 2000.0 and score 0.0005555555555555556 - - // 10 has load 0.0003333333333333333 and supported - // 10 with stake 1000.0 - // 20 has load 0.0005555555555555556 and supported - // 20 with stake 1000.0 - // 30 has load 0 and supported - // 30 with stake 0 - // 40 has load 0 and supported - // 40 with stake 0 - // 2 has load 0.0005555555555555556 and supported - // 10 with stake 400.0 20 with stake 600.0 30 with stake 0 - // 4 has load 0.0005555555555555556 and supported - // 10 with stake 600.0 20 with stake 400.0 40 with stake 0.0 - ExtBuilder::default() - .nominate(false) - .validator_pool(true) - .build() - .execute_with(|| { - // initial validators -- everyone is actually even. - assert_eq_uvec!(validator_controllers(), vec![40, 30]); - - // Set payee to controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(20), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(30), RewardDestination::Controller)); - assert_ok!(Staking::set_payee(Origin::signed(40), RewardDestination::Controller)); - - // give the man some money - let initial_balance = 1000; - for i in [1, 2, 3, 4, 5, 10, 11, 20, 21].iter() { - let _ = Balances::make_free_balance_be(i, initial_balance); - } - - // bond two account pairs and state interest in nomination. - // 2 will nominate for 10, 20, 30 - assert_ok!(Staking::bond(Origin::signed(1), 2, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); - // 4 will nominate for 10, 20, 40 - assert_ok!(Staking::bond(Origin::signed(3), 4, 1000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); - - // the total reward for era 0 - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(41, 1)]); - >::reward_by_ids(vec![(31, 1)]); - - mock::start_era(1); - - // 10 and 20 have more votes, they will be chosen by phragmen. - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - - // OLD validators must have already received some rewards. - mock::make_all_reward_payment(0); - assert_eq!(Balances::total_balance(&40), 1 + total_payout_0 / 2); - assert_eq!(Balances::total_balance(&30), 1 + total_payout_0 / 2); - - // ------ check the staked value of all parties. - - // 30 and 40 are not chosen anymore - assert_eq!(ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index).count(), 2); - assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - Exposure { - total: 1000 + 800, - own: 1000, - others: vec![ - IndividualExposure { who: 3, value: 400 }, - IndividualExposure { who: 1, value: 400 }, - ] - }, - ); - assert_eq!( - Staking::eras_stakers(Staking::active_era().unwrap().index, 21), - Exposure { - total: 1000 + 1200, - own: 1000, - others: vec![ - IndividualExposure { who: 3, value: 600 }, - IndividualExposure { who: 1, value: 600 }, - ] - }, - ); - - // the total reward for era 1 - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(21, 2)]); - >::reward_by_ids(vec![(11, 1)]); - - mock::start_era(2); - - // nothing else will happen, era ends and rewards are paid again, - // it is expected that nominators will also be paid. See below - - mock::make_all_reward_payment(1); - let payout_for_10 = total_payout_1 / 3; - let payout_for_20 = 2 * total_payout_1 / 3; - // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 - assert_eq_error_rate!( - Balances::total_balance(&2), - initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), - 1, - ); - // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 - assert_eq_error_rate!( - Balances::total_balance(&4), - initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), - 1, - ); - - // Validator 10: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 - assert_eq_error_rate!( - Balances::total_balance(&10), - initial_balance + 5 * payout_for_10 / 9, - 1, - ); - // Validator 20: got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = 5/11 - assert_eq_error_rate!( - Balances::total_balance(&20), - initial_balance + 5 * payout_for_20 / 11, - 1, - ); - }); + // PHRAGMEN OUTPUT: running this test with the reference impl gives: + // + // Sequential Phragmén gives + // 10 is elected with stake 2200.0 and score 0.0003333333333333333 + // 20 is elected with stake 1800.0 and score 0.0005555555555555556 + + // 10 has load 0.0003333333333333333 and supported + // 10 with stake 1000.0 + // 20 has load 0.0005555555555555556 and supported + // 20 with stake 1000.0 + // 30 has load 0 and supported + // 30 with stake 0 + // 40 has load 0 and supported + // 40 with stake 0 + // 2 has load 0.0005555555555555556 and supported + // 10 with stake 600.0 20 with stake 400.0 30 with stake 0.0 + // 4 has load 0.0005555555555555556 and supported + // 10 with stake 600.0 20 with stake 400.0 40 with stake 0.0 + + // Sequential Phragmén with post processing gives + // 10 is elected with stake 2000.0 and score 0.0003333333333333333 + // 20 is elected with stake 2000.0 and score 0.0005555555555555556 + + // 10 has load 0.0003333333333333333 and supported + // 10 with stake 1000.0 + // 20 has load 0.0005555555555555556 and supported + // 20 with stake 1000.0 + // 30 has load 0 and supported + // 30 with stake 0 + // 40 has load 0 and supported + // 40 with stake 0 + // 2 has load 0.0005555555555555556 and supported + // 10 with stake 400.0 20 with stake 600.0 30 with stake 0 + // 4 has load 0.0005555555555555556 and supported + // 10 with stake 600.0 20 with stake 400.0 40 with stake 0.0 + ExtBuilder::default() + .nominate(false) + .validator_pool(true) + .build() + .execute_with(|| { + // initial validators -- everyone is actually even. + assert_eq_uvec!(validator_controllers(), vec![40, 30]); + + // Set payee to controller + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(20), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(30), + RewardDestination::Controller + )); + assert_ok!(Staking::set_payee( + Origin::signed(40), + RewardDestination::Controller + )); + + // give the man some money + let initial_balance = 1000; + for i in [1, 2, 3, 4, 5, 10, 11, 20, 21].iter() { + let _ = Balances::make_free_balance_be(i, initial_balance); + } + + // bond two account pairs and state interest in nomination. + // 2 will nominate for 10, 20, 30 + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 21, 31])); + // 4 will nominate for 10, 20, 40 + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(4), vec![11, 21, 41])); + + // the total reward for era 0 + let total_payout_0 = current_total_payout_for_duration(3000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + >::reward_by_ids(vec![(41, 1)]); + >::reward_by_ids(vec![(31, 1)]); + + mock::start_era(1); + + // 10 and 20 have more votes, they will be chosen by phragmen. + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + + // OLD validators must have already received some rewards. + mock::make_all_reward_payment(0); + assert_eq!(Balances::total_balance(&40), 1 + total_payout_0 / 2); + assert_eq!(Balances::total_balance(&30), 1 + total_payout_0 / 2); + + // ------ check the staked value of all parties. + + // 30 and 40 are not chosen anymore + assert_eq!( + ErasStakers::::iter_prefix_values(Staking::active_era().unwrap().index) + .count(), + 2 + ); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Exposure { + total: 1000 + 800, + own: 1000, + others: vec![ + IndividualExposure { who: 3, value: 400 }, + IndividualExposure { who: 1, value: 400 }, + ] + }, + ); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + Exposure { + total: 1000 + 1200, + own: 1000, + others: vec![ + IndividualExposure { who: 3, value: 600 }, + IndividualExposure { who: 1, value: 600 }, + ] + }, + ); + + // the total reward for era 1 + let total_payout_1 = current_total_payout_for_duration(3000); + assert!(total_payout_1 > 100); // Test is meaningful if reward something + >::reward_by_ids(vec![(21, 2)]); + >::reward_by_ids(vec![(11, 1)]); + + mock::start_era(2); + + // nothing else will happen, era ends and rewards are paid again, + // it is expected that nominators will also be paid. See below + + mock::make_all_reward_payment(1); + let payout_for_10 = total_payout_1 / 3; + let payout_for_20 = 2 * total_payout_1 / 3; + // Nominator 2: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 + assert_eq_error_rate!( + Balances::total_balance(&2), + initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), + 1, + ); + // Nominator 4: has [400/1800 ~ 2/9 from 10] + [600/2200 ~ 3/11 from 20]'s reward. ==> 2/9 + 3/11 + assert_eq_error_rate!( + Balances::total_balance(&4), + initial_balance + (2 * payout_for_10 / 9 + 3 * payout_for_20 / 11), + 1, + ); + + // Validator 10: got 800 / 1800 external stake => 8/18 =? 4/9 => Validator's share = 5/9 + assert_eq_error_rate!( + Balances::total_balance(&10), + initial_balance + 5 * payout_for_10 / 9, + 1, + ); + // Validator 20: got 1200 / 2200 external stake => 12/22 =? 6/11 => Validator's share = 5/11 + assert_eq_error_rate!( + Balances::total_balance(&20), + initial_balance + 5 * payout_for_20 / 11, + 1, + ); + }); } #[test] fn nominators_also_get_slashed() { - // A nominator should be slashed if the validator they nominated is slashed - // Here is the breakdown of roles: - // 10 - is the controller of 11 - // 11 - is the stash. - // 2 - is the nominator of 20, 10 - ExtBuilder::default().nominate(false).build_and_execute(|| { - assert_eq!(Staking::validator_count(), 2); - - // Set payee to controller - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - - // give the man some money. - let initial_balance = 1000; - for i in [1, 2, 3, 10].iter() { - let _ = Balances::make_free_balance_be(i, initial_balance); - } - - // 2 will nominate for 10, 20 - let nominator_stake = 500; - assert_ok!(Staking::bond(Origin::signed(1), 2, nominator_stake, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(2), vec![20, 10])); - - let total_payout = current_total_payout_for_duration(3000); - assert!(total_payout > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); - - // new era, pay rewards, - mock::start_era(1); - - // Nominator stash didn't collect any. - assert_eq!(Balances::total_balance(&2), initial_balance); - - // 10 goes offline - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![], - }], - &[Perbill::from_percent(5)], - ); - let expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - let slash_value = 50; - let total_slash = expo.total.min(slash_value); - let validator_slash = expo.own.min(total_slash); - let nominator_slash = nominator_stake.min(total_slash - validator_slash); - - // initial + first era reward + slash - assert_eq!(Balances::total_balance(&11), initial_balance - validator_slash); - assert_eq!(Balances::total_balance(&2), initial_balance - nominator_slash); - - // Because slashing happened. - assert!(is_disabled(10)); - }); + // A nominator should be slashed if the validator they nominated is slashed + // Here is the breakdown of roles: + // 10 - is the controller of 11 + // 11 - is the stash. + // 2 - is the nominator of 20, 10 + ExtBuilder::default().nominate(false).build_and_execute(|| { + assert_eq!(Staking::validator_count(), 2); + + // Set payee to controller + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + + // give the man some money. + let initial_balance = 1000; + for i in [1, 2, 3, 10].iter() { + let _ = Balances::make_free_balance_be(i, initial_balance); + } + + // 2 will nominate for 10, 20 + let nominator_stake = 500; + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + nominator_stake, + RewardDestination::default() + )); + assert_ok!(Staking::nominate(Origin::signed(2), vec![20, 10])); + + let total_payout = current_total_payout_for_duration(3000); + assert!(total_payout > 100); // Test is meaningful if reward something + >::reward_by_ids(vec![(11, 1)]); + + // new era, pay rewards, + mock::start_era(1); + + // Nominator stash didn't collect any. + assert_eq!(Balances::total_balance(&2), initial_balance); + + // 10 goes offline + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(5)], + ); + let expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let slash_value = 50; + let total_slash = expo.total.min(slash_value); + let validator_slash = expo.own.min(total_slash); + let nominator_slash = nominator_stake.min(total_slash - validator_slash); + + // initial + first era reward + slash + assert_eq!( + Balances::total_balance(&11), + initial_balance - validator_slash + ); + assert_eq!( + Balances::total_balance(&2), + initial_balance - nominator_slash + ); + + // Because slashing happened. + assert!(is_disabled(10)); + }); } #[test] fn double_staking_should_fail() { - // should test (in the same order): - // * an account already bonded as stash cannot be be stashed again. - // * an account already bonded as stash cannot nominate. - // * an account already bonded as controller can nominate. - ExtBuilder::default().build_and_execute(|| { - let arbitrary_value = 5; - // 2 = controller, 1 stashed => ok - assert_ok!( - Staking::bond(Origin::signed(1), 2, arbitrary_value, - RewardDestination::default()) - ); - // 4 = not used so far, 1 stashed => not allowed. - assert_noop!( - Staking::bond(Origin::signed(1), 4, arbitrary_value, - RewardDestination::default()), Error::::AlreadyBonded, - ); - // 1 = stashed => attempting to nominate should fail. - assert_noop!(Staking::nominate(Origin::signed(1), vec![1]), Error::::NotController); - // 2 = controller => nominating should work. - assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); - }); + // should test (in the same order): + // * an account already bonded as stash cannot be be stashed again. + // * an account already bonded as stash cannot nominate. + // * an account already bonded as controller can nominate. + ExtBuilder::default().build_and_execute(|| { + let arbitrary_value = 5; + // 2 = controller, 1 stashed => ok + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + arbitrary_value, + RewardDestination::default() + )); + // 4 = not used so far, 1 stashed => not allowed. + assert_noop!( + Staking::bond( + Origin::signed(1), + 4, + arbitrary_value, + RewardDestination::default() + ), + Error::::AlreadyBonded, + ); + // 1 = stashed => attempting to nominate should fail. + assert_noop!( + Staking::nominate(Origin::signed(1), vec![1]), + Error::::NotController + ); + // 2 = controller => nominating should work. + assert_ok!(Staking::nominate(Origin::signed(2), vec![1])); + }); } #[test] fn double_controlling_should_fail() { - // should test (in the same order): - // * an account already bonded as controller CANNOT be reused as the controller of another account. - ExtBuilder::default().build_and_execute(|| { - let arbitrary_value = 5; - // 2 = controller, 1 stashed => ok - assert_ok!(Staking::bond( - Origin::signed(1), - 2, - arbitrary_value, - RewardDestination::default(), - )); - // 2 = controller, 3 stashed (Note that 2 is reused.) => no-op - assert_noop!( - Staking::bond(Origin::signed(3), 2, arbitrary_value, RewardDestination::default()), - Error::::AlreadyPaired, - ); - }); + // should test (in the same order): + // * an account already bonded as controller CANNOT be reused as the controller of another account. + ExtBuilder::default().build_and_execute(|| { + let arbitrary_value = 5; + // 2 = controller, 1 stashed => ok + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + arbitrary_value, + RewardDestination::default(), + )); + // 2 = controller, 3 stashed (Note that 2 is reused.) => no-op + assert_noop!( + Staking::bond( + Origin::signed(3), + 2, + arbitrary_value, + RewardDestination::default() + ), + Error::::AlreadyPaired, + ); + }); } #[test] fn session_and_eras_work() { - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::active_era().unwrap().index, 0); - assert_eq!(Session::current_index(), 0); - - // Session 1: No change. - start_session(1); - assert_eq!(Session::current_index(), 1); - assert_eq!(Staking::active_era().unwrap().index, 0); - - // Session 2: No change. - start_session(2); - assert_eq!(Session::current_index(), 2); - assert_eq!(Staking::active_era().unwrap().index, 0); - - // Session 3: Era increment. - start_session(3); - assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::active_era().unwrap().index, 1); - - // Session 4: No change. - start_session(4); - assert_eq!(Session::current_index(), 4); - assert_eq!(Staking::active_era().unwrap().index, 1); - - // Session 5: No change. - start_session(5); - assert_eq!(Session::current_index(), 5); - assert_eq!(Staking::active_era().unwrap().index, 1); - - // Session 6: Era increment. - start_session(6); - assert_eq!(Session::current_index(), 6); - assert_eq!(Staking::active_era().unwrap().index, 2); - }); + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(Session::current_index(), 0); + + // Session 1: No change. + start_session(1); + assert_eq!(Session::current_index(), 1); + assert_eq!(Staking::active_era().unwrap().index, 0); + + // Session 2: No change. + start_session(2); + assert_eq!(Session::current_index(), 2); + assert_eq!(Staking::active_era().unwrap().index, 0); + + // Session 3: Era increment. + start_session(3); + assert_eq!(Session::current_index(), 3); + assert_eq!(Staking::active_era().unwrap().index, 1); + + // Session 4: No change. + start_session(4); + assert_eq!(Session::current_index(), 4); + assert_eq!(Staking::active_era().unwrap().index, 1); + + // Session 5: No change. + start_session(5); + assert_eq!(Session::current_index(), 5); + assert_eq!(Staking::active_era().unwrap().index, 1); + + // Session 6: Era increment. + start_session(6); + assert_eq!(Session::current_index(), 6); + assert_eq!(Staking::active_era().unwrap().index, 2); + }); } #[test] fn forcing_new_era_works() { - ExtBuilder::default().build_and_execute(|| { - // normal flow of session. - assert_eq!(Staking::active_era().unwrap().index, 0); - start_session(0); - assert_eq!(Staking::active_era().unwrap().index, 0); - start_session(1); - assert_eq!(Staking::active_era().unwrap().index, 0); - start_session(2); - assert_eq!(Staking::active_era().unwrap().index, 0); - start_session(3); - assert_eq!(Staking::active_era().unwrap().index, 1); - - // no era change. - ForceEra::put(Forcing::ForceNone); - start_session(4); - assert_eq!(Staking::active_era().unwrap().index, 1); - start_session(5); - assert_eq!(Staking::active_era().unwrap().index, 1); - start_session(6); - assert_eq!(Staking::active_era().unwrap().index, 1); - start_session(7); - assert_eq!(Staking::active_era().unwrap().index, 1); - - // back to normal. - // this immediately starts a new session. - ForceEra::put(Forcing::NotForcing); - start_session(8); - assert_eq!(Staking::active_era().unwrap().index, 1); // There is one session delay - start_session(9); - assert_eq!(Staking::active_era().unwrap().index, 2); - - // forceful change - ForceEra::put(Forcing::ForceAlways); - start_session(10); - assert_eq!(Staking::active_era().unwrap().index, 2); // There is one session delay - start_session(11); - assert_eq!(Staking::active_era().unwrap().index, 3); - start_session(12); - assert_eq!(Staking::active_era().unwrap().index, 4); - - // just one forceful change - ForceEra::put(Forcing::ForceNew); - start_session(13); - assert_eq!(Staking::active_era().unwrap().index, 5); - assert_eq!(ForceEra::get(), Forcing::NotForcing); - start_session(14); - assert_eq!(Staking::active_era().unwrap().index, 6); - start_session(15); - assert_eq!(Staking::active_era().unwrap().index, 6); - - }); + ExtBuilder::default().build_and_execute(|| { + // normal flow of session. + assert_eq!(Staking::active_era().unwrap().index, 0); + start_session(0); + assert_eq!(Staking::active_era().unwrap().index, 0); + start_session(1); + assert_eq!(Staking::active_era().unwrap().index, 0); + start_session(2); + assert_eq!(Staking::active_era().unwrap().index, 0); + start_session(3); + assert_eq!(Staking::active_era().unwrap().index, 1); + + // no era change. + ForceEra::put(Forcing::ForceNone); + start_session(4); + assert_eq!(Staking::active_era().unwrap().index, 1); + start_session(5); + assert_eq!(Staking::active_era().unwrap().index, 1); + start_session(6); + assert_eq!(Staking::active_era().unwrap().index, 1); + start_session(7); + assert_eq!(Staking::active_era().unwrap().index, 1); + + // back to normal. + // this immediately starts a new session. + ForceEra::put(Forcing::NotForcing); + start_session(8); + assert_eq!(Staking::active_era().unwrap().index, 1); // There is one session delay + start_session(9); + assert_eq!(Staking::active_era().unwrap().index, 2); + + // forceful change + ForceEra::put(Forcing::ForceAlways); + start_session(10); + assert_eq!(Staking::active_era().unwrap().index, 2); // There is one session delay + start_session(11); + assert_eq!(Staking::active_era().unwrap().index, 3); + start_session(12); + assert_eq!(Staking::active_era().unwrap().index, 4); + + // just one forceful change + ForceEra::put(Forcing::ForceNew); + start_session(13); + assert_eq!(Staking::active_era().unwrap().index, 5); + assert_eq!(ForceEra::get(), Forcing::NotForcing); + start_session(14); + assert_eq!(Staking::active_era().unwrap().index, 6); + start_session(15); + assert_eq!(Staking::active_era().unwrap().index, 6); + }); } #[test] fn cannot_transfer_staked_balance() { - // Tests that a stash account cannot transfer funds - ExtBuilder::default().nominate(false).build_and_execute(|| { - // Confirm account 11 is stashed - assert_eq!(Staking::bonded(&11), Some(10)); - // Confirm account 11 has some free balance - assert_eq!(Balances::free_balance(11), 1000); - // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); - // Confirm account 11 cannot transfer as a result - assert_noop!( - Balances::transfer(Origin::signed(11), 20, 1), - BalancesError::::LiquidityRestrictions - ); - - // Give account 11 extra free balance - let _ = Balances::make_free_balance_be(&11, 10000); - // Confirm that account 11 can now transfer some balance - assert_ok!(Balances::transfer(Origin::signed(11), 20, 1)); - }); + // Tests that a stash account cannot transfer funds + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Confirm account 11 is stashed + assert_eq!(Staking::bonded(&11), Some(10)); + // Confirm account 11 has some free balance + assert_eq!(Balances::free_balance(11), 1000); + // Confirm account 11 (via controller 10) is totally staked + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + 1000 + ); + // Confirm account 11 cannot transfer as a result + assert_noop!( + Balances::transfer(Origin::signed(11), 20, 1), + BalancesError::::LiquidityRestrictions + ); + + // Give account 11 extra free balance + let _ = Balances::make_free_balance_be(&11, 10000); + // Confirm that account 11 can now transfer some balance + assert_ok!(Balances::transfer(Origin::signed(11), 20, 1)); + }); } #[test] fn cannot_transfer_staked_balance_2() { - // Tests that a stash account cannot transfer funds - // Same test as above but with 20, and more accurate. - // 21 has 2000 free balance but 1000 at stake - ExtBuilder::default().nominate(false).fair(true).build_and_execute(|| { - // Confirm account 21 is stashed - assert_eq!(Staking::bonded(&21), Some(20)); - // Confirm account 21 has some free balance - assert_eq!(Balances::free_balance(21), 2000); - // Confirm account 21 (via controller 20) is totally staked - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 1000); - // Confirm account 21 can transfer at most 1000 - assert_noop!( - Balances::transfer(Origin::signed(21), 20, 1001), - BalancesError::::LiquidityRestrictions - ); - assert_ok!(Balances::transfer(Origin::signed(21), 20, 1000)); - }); + // Tests that a stash account cannot transfer funds + // Same test as above but with 20, and more accurate. + // 21 has 2000 free balance but 1000 at stake + ExtBuilder::default() + .nominate(false) + .fair(true) + .build_and_execute(|| { + // Confirm account 21 is stashed + assert_eq!(Staking::bonded(&21), Some(20)); + // Confirm account 21 has some free balance + assert_eq!(Balances::free_balance(21), 2000); + // Confirm account 21 (via controller 20) is totally staked + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, + 1000 + ); + // Confirm account 21 can transfer at most 1000 + assert_noop!( + Balances::transfer(Origin::signed(21), 20, 1001), + BalancesError::::LiquidityRestrictions + ); + assert_ok!(Balances::transfer(Origin::signed(21), 20, 1000)); + }); } #[test] fn cannot_reserve_staked_balance() { - // Checks that a bonded account cannot reserve balance from free balance - ExtBuilder::default().build_and_execute(|| { - // Confirm account 11 is stashed - assert_eq!(Staking::bonded(&11), Some(10)); - // Confirm account 11 has some free balance - assert_eq!(Balances::free_balance(11), 1000); - // Confirm account 11 (via controller 10) is totally staked - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, 1000); - // Confirm account 11 cannot transfer as a result - assert_noop!( - Balances::reserve(&11, 1), - BalancesError::::LiquidityRestrictions - ); - - // Give account 11 extra free balance - let _ = Balances::make_free_balance_be(&11, 10000); - // Confirm account 11 can now reserve balance - assert_ok!(Balances::reserve(&11, 1)); - }); + // Checks that a bonded account cannot reserve balance from free balance + ExtBuilder::default().build_and_execute(|| { + // Confirm account 11 is stashed + assert_eq!(Staking::bonded(&11), Some(10)); + // Confirm account 11 has some free balance + assert_eq!(Balances::free_balance(11), 1000); + // Confirm account 11 (via controller 10) is totally staked + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, + 1000 + ); + // Confirm account 11 cannot transfer as a result + assert_noop!( + Balances::reserve(&11, 1), + BalancesError::::LiquidityRestrictions + ); + + // Give account 11 extra free balance + let _ = Balances::make_free_balance_be(&11, 10000); + // Confirm account 11 can now reserve balance + assert_ok!(Balances::reserve(&11, 1)); + }); } #[test] fn reward_destination_works() { - // Rewards go to the correct destination as determined in Payee - ExtBuilder::default().nominate(false).build_and_execute(|| { - // Check that account 11 is a validator - assert!(Session::validators().contains(&11)); - // Check the balance of the validator account - assert_eq!(Balances::free_balance(10), 1); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 1000); - // Check how much is at stake - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); - - mock::start_era(1); - mock::make_all_reward_payment(0); - - // Check that RewardDestination is Staked (default) - assert_eq!(Staking::payee(&11), RewardDestination::Staked); - // Check that reward went to the stash account of validator - assert_eq!(Balances::free_balance(11), 1000 + total_payout_0); - // Check that amount at stake increased accordingly - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0], - })); - - //Change RewardDestination to Stash - >::insert(&11, RewardDestination::Stash); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); - - mock::start_era(2); - mock::make_all_reward_payment(1); - - // Check that RewardDestination is Stash - assert_eq!(Staking::payee(&11), RewardDestination::Stash); - // Check that reward went to the stash account - assert_eq!(Balances::free_balance(11), 1000 + total_payout_0 + total_payout_1); - // Record this value - let recorded_stash_balance = 1000 + total_payout_0 + total_payout_1; - // Check that amount at stake is NOT increased - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0,1], - })); - - // Change RewardDestination to Controller - >::insert(&11, RewardDestination::Controller); - - // Check controller balance - assert_eq!(Balances::free_balance(10), 1); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_2 = current_total_payout_for_duration(3000); - assert!(total_payout_2 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); - - mock::start_era(3); - mock::make_all_reward_payment(2); - - // Check that RewardDestination is Controller - assert_eq!(Staking::payee(&11), RewardDestination::Controller); - // Check that reward went to the controller account - assert_eq!(Balances::free_balance(10), 1 + total_payout_2); - // Check that amount at stake is NOT increased - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + total_payout_0, - active: 1000 + total_payout_0, - unlocking: vec![], - claimed_rewards: vec![0,1,2], - })); - // Check that amount in staked account is NOT increased. - assert_eq!(Balances::free_balance(11), recorded_stash_balance); - }); + // Rewards go to the correct destination as determined in Payee + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Check that account 11 is a validator + assert!(Session::validators().contains(&11)); + // Check the balance of the validator account + assert_eq!(Balances::free_balance(10), 1); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 1000); + // Check how much is at stake + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + >::reward_by_ids(vec![(11, 1)]); + + mock::start_era(1); + mock::make_all_reward_payment(0); + + // Check that RewardDestination is Staked (default) + assert_eq!(Staking::payee(&11), RewardDestination::Staked); + // Check that reward went to the stash account of validator + assert_eq!(Balances::free_balance(11), 1000 + total_payout_0); + // Check that amount at stake increased accordingly + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0], + }) + ); + + //Change RewardDestination to Stash + >::insert(&11, RewardDestination::Stash); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3000); + assert!(total_payout_1 > 100); // Test is meaningful if reward something + >::reward_by_ids(vec![(11, 1)]); + + mock::start_era(2); + mock::make_all_reward_payment(1); + + // Check that RewardDestination is Stash + assert_eq!(Staking::payee(&11), RewardDestination::Stash); + // Check that reward went to the stash account + assert_eq!( + Balances::free_balance(11), + 1000 + total_payout_0 + total_payout_1 + ); + // Record this value + let recorded_stash_balance = 1000 + total_payout_0 + total_payout_1; + // Check that amount at stake is NOT increased + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0, 1], + }) + ); + + // Change RewardDestination to Controller + >::insert(&11, RewardDestination::Controller); + + // Check controller balance + assert_eq!(Balances::free_balance(10), 1); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_2 = current_total_payout_for_duration(3000); + assert!(total_payout_2 > 100); // Test is meaningful if reward something + >::reward_by_ids(vec![(11, 1)]); + + mock::start_era(3); + mock::make_all_reward_payment(2); + + // Check that RewardDestination is Controller + assert_eq!(Staking::payee(&11), RewardDestination::Controller); + // Check that reward went to the controller account + assert_eq!(Balances::free_balance(10), 1 + total_payout_2); + // Check that amount at stake is NOT increased + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + total_payout_0, + active: 1000 + total_payout_0, + unlocking: vec![], + claimed_rewards: vec![0, 1, 2], + }) + ); + // Check that amount in staked account is NOT increased. + assert_eq!(Balances::free_balance(11), recorded_stash_balance); + }); } #[test] fn validator_payment_prefs_work() { - // Test that validator preferences are correctly honored - // Note: unstake threshold is being directly tested in slashing tests. - // This test will focus on validator payment. - ExtBuilder::default().build_and_execute(|| { - let commission = Perbill::from_percent(40); - >::insert(&11, ValidatorPrefs { - commission: commission.clone(), - }); - - // Reward controller so staked ratio doesn't change. - >::insert(&11, RewardDestination::Controller); - >::insert(&101, RewardDestination::Controller); - - mock::start_era(1); - mock::make_all_reward_payment(0); - - let balance_era_1_10 = Balances::total_balance(&10); - let balance_era_1_100 = Balances::total_balance(&100); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something - let exposure_1 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - >::reward_by_ids(vec![(11, 1)]); - - mock::start_era(2); - mock::make_all_reward_payment(1); - - let taken_cut = commission * total_payout_1; - let shared_cut = total_payout_1 - taken_cut; - let reward_of_10 = shared_cut * exposure_1.own / exposure_1.total + taken_cut; - let reward_of_100 = shared_cut * exposure_1.others[0].value / exposure_1.total; - assert_eq_error_rate!(Balances::total_balance(&10), balance_era_1_10 + reward_of_10, 2); - assert_eq_error_rate!(Balances::total_balance(&100), balance_era_1_100 + reward_of_100, 2); - }); - + // Test that validator preferences are correctly honored + // Note: unstake threshold is being directly tested in slashing tests. + // This test will focus on validator payment. + ExtBuilder::default().build_and_execute(|| { + let commission = Perbill::from_percent(40); + >::insert( + &11, + ValidatorPrefs { + commission: commission.clone(), + }, + ); + + // Reward controller so staked ratio doesn't change. + >::insert(&11, RewardDestination::Controller); + >::insert(&101, RewardDestination::Controller); + + mock::start_era(1); + mock::make_all_reward_payment(0); + + let balance_era_1_10 = Balances::total_balance(&10); + let balance_era_1_100 = Balances::total_balance(&100); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3000); + assert!(total_payout_1 > 100); // Test is meaningful if reward something + let exposure_1 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + >::reward_by_ids(vec![(11, 1)]); + + mock::start_era(2); + mock::make_all_reward_payment(1); + + let taken_cut = commission * total_payout_1; + let shared_cut = total_payout_1 - taken_cut; + let reward_of_10 = shared_cut * exposure_1.own / exposure_1.total + taken_cut; + let reward_of_100 = shared_cut * exposure_1.others[0].value / exposure_1.total; + assert_eq_error_rate!( + Balances::total_balance(&10), + balance_era_1_10 + reward_of_10, + 2 + ); + assert_eq_error_rate!( + Balances::total_balance(&100), + balance_era_1_100 + reward_of_100, + 2 + ); + }); } #[test] fn bond_extra_works() { - // Tests that extra `free_balance` in the stash can be added to stake - // NOTE: this tests only verifies `StakingLedger` for correct updates - // See `bond_extra_and_withdraw_unbonded_works` for more details and updates on `Exposure`. - ExtBuilder::default().build_and_execute(|| { - // Check that account 10 is a validator - assert!(>::contains_key(11)); - // Check that account 10 is bonded to account 11 - assert_eq!(Staking::bonded(&11), Some(10)); - // Check how much is at stake - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); - - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); - - // Call the bond_extra function from controller, add only 100 - assert_ok!(Staking::bond_extra(Origin::signed(11), 100)); - // There should be 100 more `total` and `active` in the ledger - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); - - // Call the bond_extra function with a large number, should handle it - assert_ok!(Staking::bond_extra(Origin::signed(11), Balance::max_value())); - // The full amount of the funds should now be in the total and active - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000000, - active: 1000000, - unlocking: vec![], - claimed_rewards: vec![], - })); - }); + // Tests that extra `free_balance` in the stash can be added to stake + // NOTE: this tests only verifies `StakingLedger` for correct updates + // See `bond_extra_and_withdraw_unbonded_works` for more details and updates on `Exposure`. + ExtBuilder::default().build_and_execute(|| { + // Check that account 10 is a validator + assert!(>::contains_key(11)); + // Check that account 10 is bonded to account 11 + assert_eq!(Staking::bonded(&11), Some(10)); + // Check how much is at stake + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); + + // Call the bond_extra function from controller, add only 100 + assert_ok!(Staking::bond_extra(Origin::signed(11), 100)); + // There should be 100 more `total` and `active` in the ledger + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + + // Call the bond_extra function with a large number, should handle it + assert_ok!(Staking::bond_extra( + Origin::signed(11), + Balance::max_value() + )); + // The full amount of the funds should now be in the total and active + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000000, + active: 1000000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + }); } #[test] fn bond_extra_and_withdraw_unbonded_works() { - // * Should test - // * Given an account being bonded [and chosen as a validator](not mandatory) - // * It can add extra funds to the bonded account. - // * it can unbond a portion of its funds from the stash account. - // * Once the unbonding period is done, it can actually take the funds out of the stash. - ExtBuilder::default().nominate(false).build_and_execute(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); - - // Initial config should be correct - assert_eq!(Staking::active_era().unwrap().index, 0); - assert_eq!(Session::current_index(), 0); - - // check the balance of a validator accounts. - assert_eq!(Balances::total_balance(&10), 1); - - // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); - - // Initial state of 10 - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - })); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000, own: 1000, others: vec![] }); - - // deposit the extra 100 units - Staking::bond_extra(Origin::signed(11), 100).unwrap(); - - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); - // Exposure is a snapshot! only updated after the next era update. - assert_ne!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); - - // trigger next era. - mock::start_era(2); - assert_eq!(Staking::active_era().unwrap().index, 2); - - // ledger should be the same. - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, - total: 1000 + 100, - active: 1000 + 100, - unlocking: vec![], - claimed_rewards: vec![], - })); - // Exposure is now updated. - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11), Exposure { total: 1000 + 100, own: 1000 + 100, others: vec![] }); - - // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 1000).unwrap(); - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], claimed_rewards: vec![] }) - ); - - // Attempting to free the balances now will fail. 2 eras need to pass. - Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], claimed_rewards: vec![] })); - - // trigger next era. - mock::start_era(3); - - // nothing yet - Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 1000 + 100, active: 100, unlocking: vec![UnlockChunk{ value: 1000, era: 2 + 3}], claimed_rewards: vec![] })); - - // trigger next era. - mock::start_era(5); - - Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); - // Now the value is free and the staking ledger is updated. - assert_eq!(Staking::ledger(&10), Some(StakingLedger { - stash: 11, total: 100, active: 100, unlocking: vec![], claimed_rewards: vec![] })); - }) + // * Should test + // * Given an account being bonded [and chosen as a validator](not mandatory) + // * It can add extra funds to the bonded account. + // * it can unbond a portion of its funds from the stash account. + // * Once the unbonding period is done, it can actually take the funds out of the stash. + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); + + // Initial config should be correct + assert_eq!(Staking::active_era().unwrap().index, 0); + assert_eq!(Session::current_index(), 0); + + // check the balance of a validator accounts. + assert_eq!(Balances::total_balance(&10), 1); + + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_era(1); + + // Initial state of 10 + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Exposure { + total: 1000, + own: 1000, + others: vec![] + } + ); + + // deposit the extra 100 units + Staking::bond_extra(Origin::signed(11), 100).unwrap(); + + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + // Exposure is a snapshot! only updated after the next era update. + assert_ne!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Exposure { + total: 1000 + 100, + own: 1000 + 100, + others: vec![] + } + ); + + // trigger next era. + mock::start_era(2); + assert_eq!(Staking::active_era().unwrap().index, 2); + + // ledger should be the same. + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 1000 + 100, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + // Exposure is now updated. + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + Exposure { + total: 1000 + 100, + own: 1000 + 100, + others: vec![] + } + ); + + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 1000).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 100, + unlocking: vec![UnlockChunk { + value: 1000, + era: 2 + 3 + }], + claimed_rewards: vec![] + }) + ); + + // Attempting to free the balances now will fail. 2 eras need to pass. + Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 100, + unlocking: vec![UnlockChunk { + value: 1000, + era: 2 + 3 + }], + claimed_rewards: vec![] + }) + ); + + // trigger next era. + mock::start_era(3); + + // nothing yet + Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000 + 100, + active: 100, + unlocking: vec![UnlockChunk { + value: 1000, + era: 2 + 3 + }], + claimed_rewards: vec![] + }) + ); + + // trigger next era. + mock::start_era(5); + + Staking::withdraw_unbonded(Origin::signed(10)).unwrap(); + // Now the value is free and the staking ledger is updated. + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 100, + active: 100, + unlocking: vec![], + claimed_rewards: vec![] + }) + ); + }) } #[test] fn too_many_unbond_calls_should_not_work() { - ExtBuilder::default().build_and_execute(|| { - // locked at era 0 until 3 - for _ in 0..MAX_UNLOCKING_CHUNKS-1 { - assert_ok!(Staking::unbond(Origin::signed(10), 1)); - } - - mock::start_era(1); - - // locked at era 1 until 4 - assert_ok!(Staking::unbond(Origin::signed(10), 1)); - // can't do more. - assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); - - mock::start_era(3); - - assert_noop!(Staking::unbond(Origin::signed(10), 1), Error::::NoMoreChunks); - // free up. - assert_ok!(Staking::withdraw_unbonded(Origin::signed(10))); - - // Can add again. - assert_ok!(Staking::unbond(Origin::signed(10), 1)); - assert_eq!(Staking::ledger(&10).unwrap().unlocking.len(), 2); - }) + ExtBuilder::default().build_and_execute(|| { + // locked at era 0 until 3 + for _ in 0..MAX_UNLOCKING_CHUNKS - 1 { + assert_ok!(Staking::unbond(Origin::signed(10), 1)); + } + + mock::start_era(1); + + // locked at era 1 until 4 + assert_ok!(Staking::unbond(Origin::signed(10), 1)); + // can't do more. + assert_noop!( + Staking::unbond(Origin::signed(10), 1), + Error::::NoMoreChunks + ); + + mock::start_era(3); + + assert_noop!( + Staking::unbond(Origin::signed(10), 1), + Error::::NoMoreChunks + ); + // free up. + assert_ok!(Staking::withdraw_unbonded(Origin::signed(10))); + + // Can add again. + assert_ok!(Staking::unbond(Origin::signed(10), 1)); + assert_eq!(Staking::ledger(&10).unwrap().unlocking.len(), 2); + }) } #[test] fn rebond_works() { - // * Should test - // * Given an account being bonded [and chosen as a validator](not mandatory) - // * it can unbond a portion of its funds from the stash account. - // * it can re-bond a portion of the funds scheduled to unlock. - ExtBuilder::default() - .nominate(false) - .build() - .execute_with(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee( - Origin::signed(10), - RewardDestination::Controller - )); - - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); - - // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); - - // Initial state of 10 - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); - - mock::start_era(2); - assert_eq!(Staking::active_era().unwrap().index, 2); - - // Try to rebond some funds. We get an error since no fund is unbonded. - assert_noop!( - Staking::rebond(Origin::signed(10), 500), - Error::::NoUnlockChunk, - ); - - // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![UnlockChunk { - value: 900, - era: 2 + 3, - }], - claimed_rewards: vec![], - }) - ); - - // Re-bond all the funds unbonded. - Staking::rebond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); - - // Unbond almost all of the funds in stash. - Staking::unbond(Origin::signed(10), 900).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![UnlockChunk { value: 900, era: 5 }], - claimed_rewards: vec![], - }) - ); - - // Re-bond part of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![UnlockChunk { value: 400, era: 5 }], - claimed_rewards: vec![], - }) - ); - - // Re-bond the remainder of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); - - // Unbond parts of the funds in stash. - Staking::unbond(Origin::signed(10), 300).unwrap(); - Staking::unbond(Origin::signed(10), 300).unwrap(); - Staking::unbond(Origin::signed(10), 300).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![ - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 300, era: 5 }, - ], - claimed_rewards: vec![], - }) - ); - - // Re-bond part of the funds unbonded. - Staking::rebond(Origin::signed(10), 500).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![ - UnlockChunk { value: 300, era: 5 }, - UnlockChunk { value: 100, era: 5 }, - ], - claimed_rewards: vec![], - }) - ); - }) + // * Should test + // * Given an account being bonded [and chosen as a validator](not mandatory) + // * it can unbond a portion of its funds from the stash account. + // * it can re-bond a portion of the funds scheduled to unlock. + ExtBuilder::default() + .nominate(false) + .build() + .execute_with(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); + + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_era(1); + + // Initial state of 10 + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + + mock::start_era(2); + assert_eq!(Staking::active_era().unwrap().index, 2); + + // Try to rebond some funds. We get an error since no fund is unbonded. + assert_noop!( + Staking::rebond(Origin::signed(10), 500), + Error::::NoUnlockChunk, + ); + + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![UnlockChunk { + value: 900, + era: 2 + 3, + }], + claimed_rewards: vec![], + }) + ); + + // Re-bond all the funds unbonded. + Staking::rebond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + + // Unbond almost all of the funds in stash. + Staking::unbond(Origin::signed(10), 900).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![UnlockChunk { value: 900, era: 5 }], + claimed_rewards: vec![], + }) + ); + + // Re-bond part of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![UnlockChunk { value: 400, era: 5 }], + claimed_rewards: vec![], + }) + ); + + // Re-bond the remainder of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + + // Unbond parts of the funds in stash. + Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); + Staking::unbond(Origin::signed(10), 300).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![ + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 300, era: 5 }, + ], + claimed_rewards: vec![], + }) + ); + + // Re-bond part of the funds unbonded. + Staking::rebond(Origin::signed(10), 500).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![ + UnlockChunk { value: 300, era: 5 }, + UnlockChunk { value: 100, era: 5 }, + ], + claimed_rewards: vec![], + }) + ); + }) } #[test] fn rebond_is_fifo() { - // Rebond should proceed by reversing the most recent bond operations. - ExtBuilder::default() - .nominate(false) - .build() - .execute_with(|| { - // Set payee to controller. avoids confusion - assert_ok!(Staking::set_payee( - Origin::signed(10), - RewardDestination::Controller - )); - - // Give account 11 some large free balance greater than total - let _ = Balances::make_free_balance_be(&11, 1000000); - - // confirm that 10 is a normal validator and gets paid at the end of the era. - mock::start_era(1); - - // Initial state of 10 - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); - - mock::start_era(2); - - // Unbond some of the funds in stash. - Staking::unbond(Origin::signed(10), 400).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 600, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - ], - claimed_rewards: vec![], - }) - ); - - mock::start_era(3); - - // Unbond more of the funds in stash. - Staking::unbond(Origin::signed(10), 300).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 300, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 300, era: 3 + 3 }, - ], - claimed_rewards: vec![], - }) - ); - - mock::start_era(4); - - // Unbond yet more of the funds in stash. - Staking::unbond(Origin::signed(10), 200).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 100, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 300, era: 3 + 3 }, - UnlockChunk { value: 200, era: 4 + 3 }, - ], - claimed_rewards: vec![], - }) - ); - - // Re-bond half of the unbonding funds. - Staking::rebond(Origin::signed(10), 400).unwrap(); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 500, - unlocking: vec![ - UnlockChunk { value: 400, era: 2 + 3 }, - UnlockChunk { value: 100, era: 3 + 3 }, - ], - claimed_rewards: vec![], - }) - ); - }) + // Rebond should proceed by reversing the most recent bond operations. + ExtBuilder::default() + .nominate(false) + .build() + .execute_with(|| { + // Set payee to controller. avoids confusion + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + + // Give account 11 some large free balance greater than total + let _ = Balances::make_free_balance_be(&11, 1000000); + + // confirm that 10 is a normal validator and gets paid at the end of the era. + mock::start_era(1); + + // Initial state of 10 + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + + mock::start_era(2); + + // Unbond some of the funds in stash. + Staking::unbond(Origin::signed(10), 400).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 600, + unlocking: vec![UnlockChunk { + value: 400, + era: 2 + 3 + },], + claimed_rewards: vec![], + }) + ); + + mock::start_era(3); + + // Unbond more of the funds in stash. + Staking::unbond(Origin::signed(10), 300).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 300, + unlocking: vec![ + UnlockChunk { + value: 400, + era: 2 + 3 + }, + UnlockChunk { + value: 300, + era: 3 + 3 + }, + ], + claimed_rewards: vec![], + }) + ); + + mock::start_era(4); + + // Unbond yet more of the funds in stash. + Staking::unbond(Origin::signed(10), 200).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 100, + unlocking: vec![ + UnlockChunk { + value: 400, + era: 2 + 3 + }, + UnlockChunk { + value: 300, + era: 3 + 3 + }, + UnlockChunk { + value: 200, + era: 4 + 3 + }, + ], + claimed_rewards: vec![], + }) + ); + + // Re-bond half of the unbonding funds. + Staking::rebond(Origin::signed(10), 400).unwrap(); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 500, + unlocking: vec![ + UnlockChunk { + value: 400, + era: 2 + 3 + }, + UnlockChunk { + value: 100, + era: 3 + 3 + }, + ], + claimed_rewards: vec![], + }) + ); + }) } #[test] fn reward_to_stake_works() { - ExtBuilder::default().nominate(false).fair(false).build_and_execute(|| { - // Confirm validator count is 2 - assert_eq!(Staking::validator_count(), 2); - // Confirm account 10 and 20 are validators - assert!(>::contains_key(&11) && >::contains_key(&21)); - - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 2000); - - // Give the man some money. - let _ = Balances::make_free_balance_be(&10, 1000); - let _ = Balances::make_free_balance_be(&20, 1000); - - // Bypass logic and change current exposure - ErasStakers::::insert(0, 21, Exposure { total: 69, own: 69, others: vec![] }); - - // Now lets lower account 20 stake - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69); - >::insert(&20, StakingLedger { stash: 21, total: 69, active: 69, unlocking: vec![], claimed_rewards: vec![] }); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - >::reward_by_ids(vec![(11, 1)]); - >::reward_by_ids(vec![(21, 1)]); - - // New era --> rewards are paid --> stakes are changed - mock::start_era(1); - mock::make_all_reward_payment(0); - - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69); - - let _11_balance = Balances::free_balance(&11); - assert_eq!(_11_balance, 1000 + total_payout_0 / 2); - - // Trigger another new era as the info are frozen before the era start. - mock::start_era(2); - - // -- new infos - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, 1000 + total_payout_0 / 2); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, 69 + total_payout_0 / 2); - }); + ExtBuilder::default() + .nominate(false) + .fair(false) + .build_and_execute(|| { + // Confirm validator count is 2 + assert_eq!(Staking::validator_count(), 2); + // Confirm account 10 and 20 are validators + assert!(>::contains_key(&11) && >::contains_key(&21)); + + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + 1000 + ); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, + 2000 + ); + + // Give the man some money. + let _ = Balances::make_free_balance_be(&10, 1000); + let _ = Balances::make_free_balance_be(&20, 1000); + + // Bypass logic and change current exposure + ErasStakers::::insert( + 0, + 21, + Exposure { + total: 69, + own: 69, + others: vec![], + }, + ); + + // Now lets lower account 20 stake + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, + 69 + ); + >::insert( + &20, + StakingLedger { + stash: 21, + total: 69, + active: 69, + unlocking: vec![], + claimed_rewards: vec![], + }, + ); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + >::reward_by_ids(vec![(11, 1)]); + >::reward_by_ids(vec![(21, 1)]); + + // New era --> rewards are paid --> stakes are changed + mock::start_era(1); + mock::make_all_reward_payment(0); + + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + 1000 + ); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, + 69 + ); + + let _11_balance = Balances::free_balance(&11); + assert_eq!(_11_balance, 1000 + total_payout_0 / 2); + + // Trigger another new era as the info are frozen before the era start. + mock::start_era(2); + + // -- new infos + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + 1000 + total_payout_0 / 2 + ); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 21).total, + 69 + total_payout_0 / 2 + ); + }); } #[test] fn on_free_balance_zero_stash_removes_validator() { - // Tests that validator storage items are cleaned up when stash is empty - // Tests that storage items are untouched when controller is empty - ExtBuilder::default().existential_deposit(10).build_and_execute(|| { - // Check the balance of the validator account - assert_eq!(Balances::free_balance(10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Set some storage items which we expect to be cleaned up - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 0); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::NONE, 11)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); + // Tests that validator storage items are cleaned up when stash is empty + // Tests that storage items are untouched when controller is empty + ExtBuilder::default() + .existential_deposit(10) + .build_and_execute(|| { + // Check the balance of the validator account + assert_eq!(Balances::free_balance(10), 256); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Set some storage items which we expect to be cleaned up + // Set payee information + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Stash + )); + + // Check storage items that should be cleaned up + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of controller to 0 + let _ = Balances::slash(&10, Balance::max_value()); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of stash to 0 + let _ = Balances::slash(&11, Balance::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 0); + + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::NONE, 11)); + + // Check storage items do not exist + assert!(!>::contains_key(&10)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + }); } #[test] fn on_free_balance_zero_stash_removes_nominator() { - // Tests that nominator storage items are cleaned up when stash is empty - // Tests that storage items are untouched when controller is empty - ExtBuilder::default().existential_deposit(10).build_and_execute(|| { - // Make 10 a nominator - assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); - // Check that account 10 is a nominator - assert!(>::contains_key(11)); - // Check the balance of the nominator account - assert_eq!(Balances::free_balance(10), 256); - // Check the balance of the stash account - assert_eq!(Balances::free_balance(11), 256000); - - // Set payee information - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Stash)); - - // Check storage items that should be cleaned up - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of controller to 0 - let _ = Balances::slash(&10, Balance::max_value()); - // Check total balance of account 10 - assert_eq!(Balances::total_balance(&10), 0); - - // Check the balance of the stash account has not been touched - assert_eq!(Balances::free_balance(11), 256000); - // Check these two accounts are still bonded - assert_eq!(Staking::bonded(&11), Some(10)); - - // Check storage items have not changed - assert!(>::contains_key(&10)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - assert!(>::contains_key(&11)); - - // Reduce free_balance of stash to 0 - let _ = Balances::slash(&11, Balance::max_value()); - // Check total balance of stash - assert_eq!(Balances::total_balance(&11), 0); - - // Reap the stash - assert_ok!(Staking::reap_stash(Origin::NONE, 11)); - - // Check storage items do not exist - assert!(!>::contains_key(&10)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - assert!(!>::contains_key(&11)); - }); + // Tests that nominator storage items are cleaned up when stash is empty + // Tests that storage items are untouched when controller is empty + ExtBuilder::default() + .existential_deposit(10) + .build_and_execute(|| { + // Make 10 a nominator + assert_ok!(Staking::nominate(Origin::signed(10), vec![20])); + // Check that account 10 is a nominator + assert!(>::contains_key(11)); + // Check the balance of the nominator account + assert_eq!(Balances::free_balance(10), 256); + // Check the balance of the stash account + assert_eq!(Balances::free_balance(11), 256000); + + // Set payee information + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Stash + )); + + // Check storage items that should be cleaned up + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of controller to 0 + let _ = Balances::slash(&10, Balance::max_value()); + // Check total balance of account 10 + assert_eq!(Balances::total_balance(&10), 0); + + // Check the balance of the stash account has not been touched + assert_eq!(Balances::free_balance(11), 256000); + // Check these two accounts are still bonded + assert_eq!(Staking::bonded(&11), Some(10)); + + // Check storage items have not changed + assert!(>::contains_key(&10)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + assert!(>::contains_key(&11)); + + // Reduce free_balance of stash to 0 + let _ = Balances::slash(&11, Balance::max_value()); + // Check total balance of stash + assert_eq!(Balances::total_balance(&11), 0); + + // Reap the stash + assert_ok!(Staking::reap_stash(Origin::NONE, 11)); + + // Check storage items do not exist + assert!(!>::contains_key(&10)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + assert!(!>::contains_key(&11)); + }); } - #[test] fn switching_roles() { - // Test that it should be possible to switch between roles (nominator, validator, idle) with minimal overhead. - ExtBuilder::default().nominate(false).build_and_execute(|| { - // Reset reward destination - for i in &[10, 20] { assert_ok!(Staking::set_payee(Origin::signed(*i), RewardDestination::Controller)); } - - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - - // put some money in account that we'll use. - for i in 1..7 { let _ = Balances::deposit_creating(&i, 5000); } - - // add 2 nominators - assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 5])); - - assert_ok!(Staking::bond(Origin::signed(3), 4, 500, RewardDestination::Controller)); - assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 1])); - - // add a new validator candidate - assert_ok!(Staking::bond(Origin::signed(5), 6, 1000, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(6), ValidatorPrefs::default())); - - mock::start_era(1); - - // with current nominators 10 and 5 have the most stake - assert_eq_uvec!(validator_controllers(), vec![6, 10]); - - // 2 decides to be a validator. Consequences: - assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); - // new stakes: - // 10: 1000 self vote - // 20: 1000 self vote + 250 vote - // 6 : 1000 self vote - // 2 : 2000 self vote + 250 vote. - // Winners: 20 and 2 - - mock::start_era(2); - - assert_eq_uvec!(validator_controllers(), vec![2, 20]); - }); + // Test that it should be possible to switch between roles (nominator, validator, idle) with minimal overhead. + ExtBuilder::default().nominate(false).build_and_execute(|| { + // Reset reward destination + for i in &[10, 20] { + assert_ok!(Staking::set_payee( + Origin::signed(*i), + RewardDestination::Controller + )); + } + + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + + // put some money in account that we'll use. + for i in 1..7 { + let _ = Balances::deposit_creating(&i, 5000); + } + + // add 2 nominators + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 2000, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(2), vec![11, 5])); + + assert_ok!(Staking::bond( + Origin::signed(3), + 4, + 500, + RewardDestination::Controller + )); + assert_ok!(Staking::nominate(Origin::signed(4), vec![21, 1])); + + // add a new validator candidate + assert_ok!(Staking::bond( + Origin::signed(5), + 6, + 1000, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(6), + ValidatorPrefs::default() + )); + + mock::start_era(1); + + // with current nominators 10 and 5 have the most stake + assert_eq_uvec!(validator_controllers(), vec![6, 10]); + + // 2 decides to be a validator. Consequences: + assert_ok!(Staking::validate( + Origin::signed(2), + ValidatorPrefs::default() + )); + // new stakes: + // 10: 1000 self vote + // 20: 1000 self vote + 250 vote + // 6 : 1000 self vote + // 2 : 2000 self vote + 250 vote. + // Winners: 20 and 2 + + mock::start_era(2); + + assert_eq_uvec!(validator_controllers(), vec![2, 20]); + }); } #[test] fn wrong_vote_is_null() { - ExtBuilder::default().nominate(false).validator_pool(true).build_and_execute(|| { - assert_eq_uvec!(validator_controllers(), vec![40, 30]); - - // put some money in account that we'll use. - for i in 1..3 { let _ = Balances::deposit_creating(&i, 5000); } - - // add 1 nominators - assert_ok!(Staking::bond(Origin::signed(1), 2, 2000, RewardDestination::default())); - assert_ok!(Staking::nominate(Origin::signed(2), vec![ - 11, 21, // good votes - 1, 2, 15, 1000, 25 // crap votes. No effect. - ])); - - // new block - mock::start_era(1); - - assert_eq_uvec!(validator_controllers(), vec![20, 10]); - }); + ExtBuilder::default() + .nominate(false) + .validator_pool(true) + .build_and_execute(|| { + assert_eq_uvec!(validator_controllers(), vec![40, 30]); + + // put some money in account that we'll use. + for i in 1..3 { + let _ = Balances::deposit_creating(&i, 5000); + } + + // add 1 nominators + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 2000, + RewardDestination::default() + )); + assert_ok!(Staking::nominate( + Origin::signed(2), + vec![ + 11, 21, // good votes + 1, 2, 15, 1000, 25 // crap votes. No effect. + ] + )); + + // new block + mock::start_era(1); + + assert_eq_uvec!(validator_controllers(), vec![20, 10]); + }); } #[test] fn bond_with_no_staked_value() { - // Behavior when someone bonds with no staked value. - // Particularly when she votes and the candidate is elected. - ExtBuilder::default() - .validator_count(3) - .existential_deposit(5) - .nominate(false) - .minimum_validator_count(1) - .build() - .execute_with(|| { - // Can't bond with 1 - assert_noop!( - Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller), - Error::::InsufficientValue, - ); - // bonded with absolute minimum value possible. - assert_ok!(Staking::bond(Origin::signed(1), 2, 5, RewardDestination::Controller)); - assert_eq!(Balances::locks(&1)[0].amount, 5); - - // unbonding even 1 will cause all to be unbonded. - assert_ok!(Staking::unbond(Origin::signed(2), 1)); - assert_eq!( - Staking::ledger(2), - Some(StakingLedger { - stash: 1, - active: 0, - total: 5, - unlocking: vec![UnlockChunk {value: 5, era: 3}], - claimed_rewards: vec![], - }) - ); - - mock::start_era(1); - mock::start_era(2); - - // not yet removed. - assert_ok!(Staking::withdraw_unbonded(Origin::signed(2))); - assert!(Staking::ledger(2).is_some()); - assert_eq!(Balances::locks(&1)[0].amount, 5); - - mock::start_era(3); - - // poof. Account 1 is removed from the staking system. - assert_ok!(Staking::withdraw_unbonded(Origin::signed(2))); - assert!(Staking::ledger(2).is_none()); - assert_eq!(Balances::locks(&1).len(), 0); - }); + // Behavior when someone bonds with no staked value. + // Particularly when she votes and the candidate is elected. + ExtBuilder::default() + .validator_count(3) + .existential_deposit(5) + .nominate(false) + .minimum_validator_count(1) + .build() + .execute_with(|| { + // Can't bond with 1 + assert_noop!( + Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller), + Error::::InsufficientValue, + ); + // bonded with absolute minimum value possible. + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 5, + RewardDestination::Controller + )); + assert_eq!(Balances::locks(&1)[0].amount, 5); + + // unbonding even 1 will cause all to be unbonded. + assert_ok!(Staking::unbond(Origin::signed(2), 1)); + assert_eq!( + Staking::ledger(2), + Some(StakingLedger { + stash: 1, + active: 0, + total: 5, + unlocking: vec![UnlockChunk { value: 5, era: 3 }], + claimed_rewards: vec![], + }) + ); + + mock::start_era(1); + mock::start_era(2); + + // not yet removed. + assert_ok!(Staking::withdraw_unbonded(Origin::signed(2))); + assert!(Staking::ledger(2).is_some()); + assert_eq!(Balances::locks(&1)[0].amount, 5); + + mock::start_era(3); + + // poof. Account 1 is removed from the staking system. + assert_ok!(Staking::withdraw_unbonded(Origin::signed(2))); + assert!(Staking::ledger(2).is_none()); + assert_eq!(Balances::locks(&1).len(), 0); + }); } #[test] fn bond_with_little_staked_value_bounded() { - // Behavior when someone bonds with little staked value. - // Particularly when she votes and the candidate is elected. - ExtBuilder::default() - .validator_count(3) - .nominate(false) - .minimum_validator_count(1) - .build() - .execute_with(|| { - // setup - assert_ok!(Staking::chill(Origin::signed(30))); - assert_ok!(Staking::set_payee(Origin::signed(10), RewardDestination::Controller)); - let init_balance_2 = Balances::free_balance(&2); - let init_balance_10 = Balances::free_balance(&10); - - // Stingy validator. - assert_ok!(Staking::bond(Origin::signed(1), 2, 1, RewardDestination::Controller)); - assert_ok!(Staking::validate(Origin::signed(2), ValidatorPrefs::default())); - - // reward era 0 - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - reward_all_elected(); - mock::start_era(1); - mock::make_all_reward_payment(0); - - // 2 is elected. - assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); - // And has minimal stake - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, 0); - - // Old ones are rewarded. - assert_eq!(Balances::free_balance(10), init_balance_10 + total_payout_0 / 3); - // no rewards paid to 2. This was initial election. - assert_eq!(Balances::free_balance(2), init_balance_2); - - // reward era 1 - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 100); // Test is meaningful if reward something - reward_all_elected(); - mock::start_era(2); - mock::make_all_reward_payment(1); - - assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, 0); - - assert_eq!(Balances::free_balance(2), init_balance_2 + total_payout_1 / 3); - assert_eq!( - Balances::free_balance(&10), - init_balance_10 + total_payout_0 / 3 + total_payout_1 / 3, - ); - }); + // Behavior when someone bonds with little staked value. + // Particularly when she votes and the candidate is elected. + ExtBuilder::default() + .validator_count(3) + .nominate(false) + .minimum_validator_count(1) + .build() + .execute_with(|| { + // setup + assert_ok!(Staking::chill(Origin::signed(30))); + assert_ok!(Staking::set_payee( + Origin::signed(10), + RewardDestination::Controller + )); + let init_balance_2 = Balances::free_balance(&2); + let init_balance_10 = Balances::free_balance(&10); + + // Stingy validator. + assert_ok!(Staking::bond( + Origin::signed(1), + 2, + 1, + RewardDestination::Controller + )); + assert_ok!(Staking::validate( + Origin::signed(2), + ValidatorPrefs::default() + )); + + // reward era 0 + let total_payout_0 = current_total_payout_for_duration(3000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + reward_all_elected(); + mock::start_era(1); + mock::make_all_reward_payment(0); + + // 2 is elected. + assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); + // And has minimal stake + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, + 0 + ); + + // Old ones are rewarded. + assert_eq!( + Balances::free_balance(10), + init_balance_10 + total_payout_0 / 3 + ); + // no rewards paid to 2. This was initial election. + assert_eq!(Balances::free_balance(2), init_balance_2); + + // reward era 1 + let total_payout_1 = current_total_payout_for_duration(3000); + assert!(total_payout_1 > 100); // Test is meaningful if reward something + reward_all_elected(); + mock::start_era(2); + mock::make_all_reward_payment(1); + + assert_eq_uvec!(validator_controllers(), vec![20, 10, 2]); + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 2).total, + 0 + ); + + assert_eq!( + Balances::free_balance(2), + init_balance_2 + total_payout_1 / 3 + ); + assert_eq!( + Balances::free_balance(&10), + init_balance_10 + total_payout_0 / 3 + total_payout_1 / 3, + ); + }); } #[test] fn new_era_elects_correct_number_of_validators() { - ExtBuilder::default() - .nominate(true) - .validator_pool(true) - .fair(true) - .validator_count(1) - .build() - .execute_with(|| { - assert_eq!(Staking::validator_count(), 1); - assert_eq!(validator_controllers().len(), 1); - - Session::on_initialize(System::block_number()); - - assert_eq!(validator_controllers().len(), 1); - }) + ExtBuilder::default() + .nominate(true) + .validator_pool(true) + .fair(true) + .validator_count(1) + .build() + .execute_with(|| { + assert_eq!(Staking::validator_count(), 1); + assert_eq!(validator_controllers().len(), 1); + + Session::on_initialize(System::block_number()); + + assert_eq!(validator_controllers().len(), 1); + }) } #[test] fn phragmen_should_not_overflow() { - ExtBuilder::default().nominate(false).build_and_execute(|| { - // This is the maximum value that we can have as the outcome of CurrencyToVote. - type Votes = u64; + ExtBuilder::default().nominate(false).build_and_execute(|| { + // This is the maximum value that we can have as the outcome of CurrencyToVote. + type Votes = u64; - let _ = Staking::chill(Origin::signed(10)); - let _ = Staking::chill(Origin::signed(20)); + let _ = Staking::chill(Origin::signed(10)); + let _ = Staking::chill(Origin::signed(20)); - bond_validator(3, 2, Votes::max_value() as Balance); - bond_validator(5, 4, Votes::max_value() as Balance); + bond_validator(3, 2, Votes::max_value() as Balance); + bond_validator(5, 4, Votes::max_value() as Balance); - bond_nominator(7, 6, Votes::max_value() as Balance, vec![3, 5]); - bond_nominator(9, 8, Votes::max_value() as Balance, vec![3, 5]); + bond_nominator(7, 6, Votes::max_value() as Balance, vec![3, 5]); + bond_nominator(9, 8, Votes::max_value() as Balance, vec![3, 5]); - mock::start_era(1); + mock::start_era(1); - assert_eq_uvec!(validator_controllers(), vec![4, 2]); + assert_eq_uvec!(validator_controllers(), vec![4, 2]); - // We can safely convert back to values within [u64, u128]. - assert!(Staking::eras_stakers(active_era(), 3).total > Votes::max_value() as Balance); - assert!(Staking::eras_stakers(active_era(), 5).total > Votes::max_value() as Balance); - }) + // We can safely convert back to values within [u64, u128]. + assert!(Staking::eras_stakers(active_era(), 3).total > Votes::max_value() as Balance); + assert!(Staking::eras_stakers(active_era(), 5).total > Votes::max_value() as Balance); + }) } #[test] fn reward_validator_slashing_validator_does_not_overflow() { - ExtBuilder::default().build_and_execute(|| { - let stake = u64::max_value() as Balance * 2; - let reward_slash = u64::max_value() as Balance * 2; - - // Assert multiplication overflows in balance arithmetic. - assert!(stake.checked_mul(reward_slash).is_none()); - - // Set staker - let _ = Balances::make_free_balance_be(&11, stake); - - let exposure = Exposure:: { total: stake, own: stake, others: vec![] }; - let reward = EraRewardPoints:: { - total: 1, - individual: vec![(11, 1)].into_iter().collect(), - }; - - // Check reward - ErasRewardPoints::::insert(0, reward); - ErasStakers::::insert(0, 11, &exposure); - ErasStakersClipped::::insert(0, 11, exposure); - ErasValidatorReward::::insert(0, stake); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 0)); - assert_eq!(Balances::total_balance(&11), stake * 2); - - // Set staker - let _ = Balances::make_free_balance_be(&11, stake); - let _ = Balances::make_free_balance_be(&2, stake); - - // only slashes out of bonded stake are applied. without this line, - // it is 0. - Staking::bond(Origin::signed(2), 20000, stake - 1, RewardDestination::default()).unwrap(); - // Override exposure of 11 - ErasStakers::::insert(0, 11, Exposure { - total: stake, - own: 1, - others: vec![ IndividualExposure { who: 2, value: stake - 1 }] - }); - - // Check slashing - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(100)], - ); - - assert_eq!(Balances::total_balance(&11), stake - 1); - assert_eq!(Balances::total_balance(&2), 1); - }) + ExtBuilder::default().build_and_execute(|| { + let stake = u64::max_value() as Balance * 2; + let reward_slash = u64::max_value() as Balance * 2; + + // Assert multiplication overflows in balance arithmetic. + assert!(stake.checked_mul(reward_slash).is_none()); + + // Set staker + let _ = Balances::make_free_balance_be(&11, stake); + + let exposure = Exposure:: { + total: stake, + own: stake, + others: vec![], + }; + let reward = EraRewardPoints:: { + total: 1, + individual: vec![(11, 1)].into_iter().collect(), + }; + + // Check reward + ErasRewardPoints::::insert(0, reward); + ErasStakers::::insert(0, 11, &exposure); + ErasStakersClipped::::insert(0, 11, exposure); + ErasValidatorReward::::insert(0, stake); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 0)); + assert_eq!(Balances::total_balance(&11), stake * 2); + + // Set staker + let _ = Balances::make_free_balance_be(&11, stake); + let _ = Balances::make_free_balance_be(&2, stake); + + // only slashes out of bonded stake are applied. without this line, + // it is 0. + Staking::bond( + Origin::signed(2), + 20000, + stake - 1, + RewardDestination::default(), + ) + .unwrap(); + // Override exposure of 11 + ErasStakers::::insert( + 0, + 11, + Exposure { + total: stake, + own: 1, + others: vec![IndividualExposure { + who: 2, + value: stake - 1, + }], + }, + ); + + // Check slashing + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(100)], + ); + + assert_eq!(Balances::total_balance(&11), stake - 1); + assert_eq!(Balances::total_balance(&2), 1); + }) } #[test] fn reward_from_authorship_event_handler_works() { - ExtBuilder::default().build_and_execute(|| { - use pallet_authorship::EventHandler; - - assert_eq!(>::author(), 11); - - >::note_author(11); - >::note_uncle(21, 1); - // Rewarding the same two times works. - >::note_uncle(11, 1); - - // Not mandatory but must be coherent with rewards - assert_eq_uvec!(Session::validators(), vec![11, 21]); - - // 21 is rewarded as an uncle producer - // 11 is rewarded as a block producer and uncle referencer and uncle producer - assert_eq!( - ErasRewardPoints::::get(Staking::active_era().unwrap().index), - EraRewardPoints { - individual: vec![(11, 20 + 2 * 2 + 1), (21, 1)].into_iter().collect(), - total: 26, - }, - ); - }) + ExtBuilder::default().build_and_execute(|| { + use pallet_authorship::EventHandler; + + assert_eq!(>::author(), 11); + + >::note_author(11); + >::note_uncle(21, 1); + // Rewarding the same two times works. + >::note_uncle(11, 1); + + // Not mandatory but must be coherent with rewards + assert_eq_uvec!(Session::validators(), vec![11, 21]); + + // 21 is rewarded as an uncle producer + // 11 is rewarded as a block producer and uncle referencer and uncle producer + assert_eq!( + ErasRewardPoints::::get(Staking::active_era().unwrap().index), + EraRewardPoints { + individual: vec![(11, 20 + 2 * 2 + 1), (21, 1)].into_iter().collect(), + total: 26, + }, + ); + }) } #[test] fn add_reward_points_fns_works() { - ExtBuilder::default().build_and_execute(|| { - // Not mandatory but must be coherent with rewards - assert_eq!(Session::validators(), vec![21, 11]); - - >::reward_by_ids(vec![ - (21, 1), - (11, 1), - (11, 1), - ]); - - >::reward_by_ids(vec![ - (21, 1), - (11, 1), - (11, 1), - ]); - - assert_eq!( - ErasRewardPoints::::get(Staking::active_era().unwrap().index), - EraRewardPoints { - individual: vec![(11, 4), (21, 2)].into_iter().collect(), - total: 6, - }, - ); - }) + ExtBuilder::default().build_and_execute(|| { + // Not mandatory but must be coherent with rewards + assert_eq!(Session::validators(), vec![21, 11]); + + >::reward_by_ids(vec![(21, 1), (11, 1), (11, 1)]); + + >::reward_by_ids(vec![(21, 1), (11, 1), (11, 1)]); + + assert_eq!( + ErasRewardPoints::::get(Staking::active_era().unwrap().index), + EraRewardPoints { + individual: vec![(11, 4), (21, 2)].into_iter().collect(), + total: 6, + }, + ); + }) } #[test] fn unbonded_balance_is_not_slashable() { - ExtBuilder::default().build_and_execute(|| { - // total amount staked is slashable. - assert_eq!(Staking::slashable_balance_of(&11), 1000); + ExtBuilder::default().build_and_execute(|| { + // total amount staked is slashable. + assert_eq!(Staking::slashable_balance_of(&11), 1000); - assert_ok!(Staking::unbond(Origin::signed(10), 800)); + assert_ok!(Staking::unbond(Origin::signed(10), 800)); - // only the active portion. - assert_eq!(Staking::slashable_balance_of(&11), 200); - }) + // only the active portion. + assert_eq!(Staking::slashable_balance_of(&11), 200); + }) } #[test] fn era_is_always_same_length() { - // This ensures that the sessions is always of the same length if there is no forcing no - // session changes. - ExtBuilder::default().build_and_execute(|| { - let session_per_era = >::get(); - - mock::start_era(1); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era); - - mock::start_era(2); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session_per_era * 2u32); - - let session = Session::current_index(); - ForceEra::put(Forcing::ForceNew); - advance_session(); - advance_session(); - assert_eq!(current_era(), 3); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2); - - mock::start_era(4); - assert_eq!(Staking::eras_start_session_index(current_era()).unwrap(), session + 2u32 + session_per_era); - }); + // This ensures that the sessions is always of the same length if there is no forcing no + // session changes. + ExtBuilder::default().build_and_execute(|| { + let session_per_era = >::get(); + + mock::start_era(1); + assert_eq!( + Staking::eras_start_session_index(current_era()).unwrap(), + session_per_era + ); + + mock::start_era(2); + assert_eq!( + Staking::eras_start_session_index(current_era()).unwrap(), + session_per_era * 2u32 + ); + + let session = Session::current_index(); + ForceEra::put(Forcing::ForceNew); + advance_session(); + advance_session(); + assert_eq!(current_era(), 3); + assert_eq!( + Staking::eras_start_session_index(current_era()).unwrap(), + session + 2 + ); + + mock::start_era(4); + assert_eq!( + Staking::eras_start_session_index(current_era()).unwrap(), + session + 2u32 + session_per_era + ); + }); } #[test] fn offence_forces_new_era() { - ExtBuilder::default().build_and_execute(|| { - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![], - }], - &[Perbill::from_percent(5)], - ); - - assert_eq!(Staking::force_era(), Forcing::ForceNew); - }); + ExtBuilder::default().build_and_execute(|| { + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(5)], + ); + + assert_eq!(Staking::force_era(), Forcing::ForceNew); + }); } #[test] fn offence_ensures_new_era_without_clobbering() { - ExtBuilder::default().build_and_execute(|| { - assert_ok!(Staking::force_new_era_always(Origin::ROOT)); - assert_eq!(Staking::force_era(), Forcing::ForceAlways); - - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![], - }], - &[Perbill::from_percent(5)], - ); - - assert_eq!(Staking::force_era(), Forcing::ForceAlways); - }); + ExtBuilder::default().build_and_execute(|| { + assert_ok!(Staking::force_new_era_always(Origin::ROOT)); + assert_eq!(Staking::force_era(), Forcing::ForceAlways); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(5)], + ); + + assert_eq!(Staking::force_era(), Forcing::ForceAlways); + }); } #[test] fn offence_deselects_validator_even_when_slash_is_zero() { - ExtBuilder::default().build_and_execute(|| { - assert!(Session::validators().contains(&11)); - assert!(>::contains_key(11)); - - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); - - assert_eq!(Staking::force_era(), Forcing::ForceNew); - assert!(!>::contains_key(11)); - - mock::start_era(1); - - assert!(!Session::validators().contains(&11)); - assert!(!>::contains_key(11)); - }); + ExtBuilder::default().build_and_execute(|| { + assert!(Session::validators().contains(&11)); + assert!(>::contains_key(11)); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); + + assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert!(!>::contains_key(11)); + + mock::start_era(1); + + assert!(!Session::validators().contains(&11)); + assert!(!>::contains_key(11)); + }); } #[test] fn slashing_performed_according_exposure() { - // This test checks that slashing is performed according the exposure (or more precisely, - // historical exposure), not the current balance. - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, 1000); - - // Handle an offence with a historical exposure. - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Exposure { - total: 500, - own: 500, - others: vec![], - }, - ), - reporters: vec![], - }], - &[Perbill::from_percent(50)], - ); - - // The stash account should be slashed for 250 (50% of 500). - assert_eq!(Balances::free_balance(11), 1000 - 250); - }); + // This test checks that slashing is performed according the exposure (or more precisely, + // historical exposure), not the current balance. + ExtBuilder::default().build_and_execute(|| { + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).own, + 1000 + ); + + // Handle an offence with a historical exposure. + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Exposure { + total: 500, + own: 500, + others: vec![], + }, + ), + reporters: vec![], + }], + &[Perbill::from_percent(50)], + ); + + // The stash account should be slashed for 250 (50% of 500). + assert_eq!(Balances::free_balance(11), 1000 - 250); + }); } #[test] fn slash_in_old_span_does_not_deselect() { - ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - - assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); - - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); - - assert_eq!(Staking::force_era(), Forcing::ForceNew); - assert!(!>::contains_key(11)); - - mock::start_era(2); - - Staking::validate(Origin::signed(10), Default::default()).unwrap(); - assert_eq!(Staking::force_era(), Forcing::NotForcing); - assert!(>::contains_key(11)); - assert!(!Session::validators().contains(&11)); - - mock::start_era(3); - - // this staker is in a new slashing span now, having re-registered after - // their prior slash. - - on_offence_in_era( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - 1, - ); - - // not forcing for zero-slash and previous span. - assert_eq!(Staking::force_era(), Forcing::NotForcing); - assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); - - on_offence_in_era( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![], - }], - // NOTE: A 100% slash here would clean up the account, causing de-registration. - &[Perbill::from_percent(95)], - 1, - ); - - // or non-zero. - assert_eq!(Staking::force_era(), Forcing::NotForcing); - assert!(>::contains_key(11)); - assert!(Session::validators().contains(&11)); - }); + ExtBuilder::default().build_and_execute(|| { + mock::start_era(1); + + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); + + assert_eq!(Staking::force_era(), Forcing::ForceNew); + assert!(!>::contains_key(11)); + + mock::start_era(2); + + Staking::validate(Origin::signed(10), Default::default()).unwrap(); + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(>::contains_key(11)); + assert!(!Session::validators().contains(&11)); + + mock::start_era(3); + + // this staker is in a new slashing span now, having re-registered after + // their prior slash. + + on_offence_in_era( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + 1, + ); + + // not forcing for zero-slash and previous span. + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); + + on_offence_in_era( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + // NOTE: A 100% slash here would clean up the account, causing de-registration. + &[Perbill::from_percent(95)], + 1, + ); + + // or non-zero. + assert_eq!(Staking::force_era(), Forcing::NotForcing); + assert!(>::contains_key(11)); + assert!(Session::validators().contains(&11)); + }); } #[test] fn reporters_receive_their_slice() { - // This test verifies that the reporters of the offence receive their slice from the slashed - // amount. - ExtBuilder::default().build_and_execute(|| { - // The reporters' reward is calculated from the total exposure. - let initial_balance = 1125; - - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, initial_balance); - - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![1, 2], - }], - &[Perbill::from_percent(50)], - ); - - // F1 * (reward_proportion * slash - 0) - // 50% * (10% * initial_balance / 2) - let reward = (initial_balance / 20) / 2; - let reward_each = reward / 2; // split into two pieces. - assert_eq!(Balances::free_balance(1), 10 + reward_each); - assert_eq!(Balances::free_balance(2), 20 + reward_each); - }); + // This test verifies that the reporters of the offence receive their slice from the slashed + // amount. + ExtBuilder::default().build_and_execute(|| { + // The reporters' reward is calculated from the total exposure. + let initial_balance = 1125; + + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + initial_balance + ); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![1, 2], + }], + &[Perbill::from_percent(50)], + ); + + // F1 * (reward_proportion * slash - 0) + // 50% * (10% * initial_balance / 2) + let reward = (initial_balance / 20) / 2; + let reward_each = reward / 2; // split into two pieces. + assert_eq!(Balances::free_balance(1), 10 + reward_each); + assert_eq!(Balances::free_balance(2), 20 + reward_each); + }); } #[test] fn subsequent_reports_in_same_span_pay_out_less() { - // This test verifies that the reporters of the offence receive their slice from the slashed - // amount, but less and less if they submit multiple reports in one span. - ExtBuilder::default().build_and_execute(|| { - // The reporters' reward is calculated from the total exposure. - let initial_balance = 1125; - - assert_eq!(Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, initial_balance); - - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![1], - }], - &[Perbill::from_percent(20)], - ); - - // F1 * (reward_proportion * slash - 0) - // 50% * (10% * initial_balance * 20%) - let reward = (initial_balance / 5) / 20; - assert_eq!(Balances::free_balance(1), 10 + reward); - - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![1], - }], - &[Perbill::from_percent(50)], - ); - - let prior_payout = reward; - - // F1 * (reward_proportion * slash - prior_payout) - // 50% * (10% * (initial_balance / 2) - prior_payout) - let reward = ((initial_balance / 20) - prior_payout) / 2; - assert_eq!(Balances::free_balance(1), 10 + prior_payout + reward); - }); + // This test verifies that the reporters of the offence receive their slice from the slashed + // amount, but less and less if they submit multiple reports in one span. + ExtBuilder::default().build_and_execute(|| { + // The reporters' reward is calculated from the total exposure. + let initial_balance = 1125; + + assert_eq!( + Staking::eras_stakers(Staking::active_era().unwrap().index, 11).total, + initial_balance + ); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![1], + }], + &[Perbill::from_percent(20)], + ); + + // F1 * (reward_proportion * slash - 0) + // 50% * (10% * initial_balance * 20%) + let reward = (initial_balance / 5) / 20; + assert_eq!(Balances::free_balance(1), 10 + reward); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![1], + }], + &[Perbill::from_percent(50)], + ); + + let prior_payout = reward; + + // F1 * (reward_proportion * slash - prior_payout) + // 50% * (10% * (initial_balance / 2) - prior_payout) + let reward = ((initial_balance / 20) - prior_payout) / 2; + assert_eq!(Balances::free_balance(1), 10 + prior_payout + reward); + }); } #[test] fn invulnerables_are_not_slashed() { - // For invulnerable validators no slashing is performed. - ExtBuilder::default().invulnerables(vec![11]).build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(21), 2000); - - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); - let initial_balance = Staking::slashable_balance_of(&21); - - let nominator_balances: Vec<_> = exposure.others - .iter().map(|o| Balances::free_balance(&o.who)).collect(); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(50), Perbill::from_percent(20)], - ); - - // The validator 11 hasn't been slashed, but 21 has been. - assert_eq!(Balances::free_balance(11), 1000); - // 2000 - (0.2 * initial_balance) - assert_eq!(Balances::free_balance(21), 2000 - (2 * initial_balance / 10)); - - // ensure that nominators were slashed as well. - for (initial_balance, other) in nominator_balances.into_iter().zip(exposure.others) { - assert_eq!( - Balances::free_balance(&other.who), - initial_balance - (2 * other.value / 10), - ); - } - }); + // For invulnerable validators no slashing is performed. + ExtBuilder::default() + .invulnerables(vec![11]) + .build_and_execute(|| { + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(21), 2000); + + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); + let initial_balance = Staking::slashable_balance_of(&21); + + let nominator_balances: Vec<_> = exposure + .others + .iter() + .map(|o| Balances::free_balance(&o.who)) + .collect(); + + on_offence_now( + &[ + OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }, + OffenceDetails { + offender: ( + 21, + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + ), + reporters: vec![], + }, + ], + &[Perbill::from_percent(50), Perbill::from_percent(20)], + ); + + // The validator 11 hasn't been slashed, but 21 has been. + assert_eq!(Balances::free_balance(11), 1000); + // 2000 - (0.2 * initial_balance) + assert_eq!( + Balances::free_balance(21), + 2000 - (2 * initial_balance / 10) + ); + + // ensure that nominators were slashed as well. + for (initial_balance, other) in nominator_balances.into_iter().zip(exposure.others) { + assert_eq!( + Balances::free_balance(&other.who), + initial_balance - (2 * other.value / 10), + ); + } + }); } #[test] fn dont_slash_if_fraction_is_zero() { - // Don't slash if the fraction is zero. - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 1000); - - on_offence_now( - &[OffenceDetails { - offender: ( - 11, - Staking::eras_stakers(Staking::active_era().unwrap().index, 11), - ), - reporters: vec![], - }], - &[Perbill::from_percent(0)], - ); - - // The validator hasn't been slashed. The new era is not forced. - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Staking::force_era(), Forcing::ForceNew); - }); + // Don't slash if the fraction is zero. + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Balances::free_balance(11), 1000); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); + + // The validator hasn't been slashed. The new era is not forced. + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Staking::force_era(), Forcing::ForceNew); + }); } #[test] fn only_slash_for_max_in_era() { - // multiple slashes within one era are only applied if it is more than any previous slash in the - // same era. - ExtBuilder::default().build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 1000); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(50)], - ); - - // The validator has been slashed and has been force-chilled. - assert_eq!(Balances::free_balance(11), 500); - assert_eq!(Staking::force_era(), Forcing::ForceNew); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(25)], - ); - - // The validator has not been slashed additionally. - assert_eq!(Balances::free_balance(11), 500); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(60)], - ); - - // The validator got slashed 10% more. - assert_eq!(Balances::free_balance(11), 400); - }) + // multiple slashes within one era are only applied if it is more than any previous slash in the + // same era. + ExtBuilder::default().build_and_execute(|| { + assert_eq!(Balances::free_balance(11), 1000); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(50)], + ); + + // The validator has been slashed and has been force-chilled. + assert_eq!(Balances::free_balance(11), 500); + assert_eq!(Staking::force_era(), Forcing::ForceNew); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(25)], + ); + + // The validator has not been slashed additionally. + assert_eq!(Balances::free_balance(11), 500); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(60)], + ); + + // The validator got slashed 10% more. + assert_eq!(Balances::free_balance(11), 400); + }) } #[test] fn garbage_collection_after_slashing() { - // ensures that `SlashingSpans` and `SpanSlash` of an account is removed after reaping. - ExtBuilder::default().existential_deposit(2).build_and_execute(|| { - assert_eq!(Balances::free_balance(11), 256_000); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); - - assert_eq!(Balances::free_balance(11), 256_000 - 25_600); - assert!(::SlashingSpans::get(&11).is_some()); - assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &25_600); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(100)], - ); - - // validator and nominator slash in era are garbage-collected by era change, - // so we don't test those here. - - assert_eq!(Balances::free_balance(11), 0); - assert_eq!(Balances::total_balance(&11), 0); - - assert_ok!(Staking::reap_stash(Origin::NONE, 11)); - - assert!(::SlashingSpans::get(&11).is_none()); - assert_eq!(::SpanSlash::get(&(11, 0)).amount_slashed(), &0); - }) + // ensures that `SlashingSpans` and `SpanSlash` of an account is removed after reaping. + ExtBuilder::default() + .existential_deposit(2) + .build_and_execute(|| { + assert_eq!(Balances::free_balance(11), 256_000); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + + assert_eq!(Balances::free_balance(11), 256_000 - 25_600); + assert!(::SlashingSpans::get(&11).is_some()); + assert_eq!( + ::SpanSlash::get(&(11, 0)).amount_slashed(), + &25_600 + ); + + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(100)], + ); + + // validator and nominator slash in era are garbage-collected by era change, + // so we don't test those here. + + assert_eq!(Balances::free_balance(11), 0); + assert_eq!(Balances::total_balance(&11), 0); + + assert_ok!(Staking::reap_stash(Origin::NONE, 11)); + + assert!(::SlashingSpans::get(&11).is_none()); + assert_eq!( + ::SpanSlash::get(&(11, 0)).amount_slashed(), + &0 + ); + }) } #[test] fn garbage_collection_on_window_pruning() { - // ensures that `ValidatorSlashInEra` and `NominatorSlashInEra` are cleared after - // `BondingDuration`. - ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - - assert_eq!(Balances::free_balance(11), 1000); - let now = Staking::active_era().unwrap().index; - - let exposure = Staking::eras_stakers(now, 11); - assert_eq!(Balances::free_balance(101), 2000); - let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(now, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); - - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); - - assert!(::ValidatorSlashInEra::get(&now, &11).is_some()); - assert!(::NominatorSlashInEra::get(&now, &101).is_some()); - - // + 1 because we have to exit the bonding window. - for era in (0..(BondingDuration::get() + 1)).map(|offset| offset + now + 1) { - assert!(::ValidatorSlashInEra::get(&now, &11).is_some()); - assert!(::NominatorSlashInEra::get(&now, &101).is_some()); - - mock::start_era(era); - } - - assert!(::ValidatorSlashInEra::get(&now, &11).is_none()); - assert!(::NominatorSlashInEra::get(&now, &101).is_none()); - }) + // ensures that `ValidatorSlashInEra` and `NominatorSlashInEra` are cleared after + // `BondingDuration`. + ExtBuilder::default().build_and_execute(|| { + mock::start_era(1); + + assert_eq!(Balances::free_balance(11), 1000); + let now = Staking::active_era().unwrap().index; + + let exposure = Staking::eras_stakers(now, 11); + assert_eq!(Balances::free_balance(101), 2000); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + + on_offence_now( + &[OffenceDetails { + offender: (11, Staking::eras_stakers(now, 11)), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + + assert!(::ValidatorSlashInEra::get(&now, &11).is_some()); + assert!(::NominatorSlashInEra::get(&now, &101).is_some()); + + // + 1 because we have to exit the bonding window. + for era in (0..(BondingDuration::get() + 1)).map(|offset| offset + now + 1) { + assert!(::ValidatorSlashInEra::get(&now, &11).is_some()); + assert!(::NominatorSlashInEra::get(&now, &101).is_some()); + + mock::start_era(era); + } + + assert!(::ValidatorSlashInEra::get(&now, &11).is_none()); + assert!(::NominatorSlashInEra::get(&now, &101).is_none()); + }) } #[test] fn slashing_nominators_by_span_max() { - ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - mock::start_era(2); - mock::start_era(3); - - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(21), 2000); - assert_eq!(Balances::free_balance(101), 2000); - assert_eq!(Staking::slashable_balance_of(&21), 1000); - - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); - let nominated_value_11 = exposure_11.others.iter().find(|o| o.who == 101).unwrap().value; - let nominated_value_21 = exposure_21.others.iter().find(|o| o.who == 101).unwrap().value; - - on_offence_in_era( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - 2, - ); - - assert_eq!(Balances::free_balance(11), 900); - - let slash_1_amount = Perbill::from_percent(10) * nominated_value_11; - assert_eq!(Balances::free_balance(101), 2000 - slash_1_amount); - - let expected_spans = vec![ - slashing::SlashingSpan { index: 1, start: 4, length: None }, - slashing::SlashingSpan { index: 0, start: 0, length: Some(4) }, - ]; - - let get_span = |account| ::SlashingSpans::get(&account).unwrap(); - - assert_eq!( - get_span(11).iter().collect::>(), - expected_spans, - ); - - assert_eq!( - get_span(101).iter().collect::>(), - expected_spans, - ); - - // second slash: higher era, higher value, same span. - on_offence_in_era( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(30)], - 3, - ); - - // 11 was not further slashed, but 21 and 101 were. - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(21), 1700); - - let slash_2_amount = Perbill::from_percent(30) * nominated_value_21; - assert!(slash_2_amount > slash_1_amount); - - // only the maximum slash in a single span is taken. - assert_eq!(Balances::free_balance(101), 2000 - slash_2_amount); - - // third slash: in same era and on same validator as first, higher - // in-era value, but lower slash value than slash 2. - on_offence_in_era( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(20)], - 2, - ); - - // 11 was further slashed, but 21 and 101 were not. - assert_eq!(Balances::free_balance(11), 800); - assert_eq!(Balances::free_balance(21), 1700); - - let slash_3_amount = Perbill::from_percent(20) * nominated_value_21; - assert!(slash_3_amount < slash_2_amount); - assert!(slash_3_amount > slash_1_amount); - - // only the maximum slash in a single span is taken. - assert_eq!(Balances::free_balance(101), 2000 - slash_2_amount); - }); + ExtBuilder::default().build_and_execute(|| { + mock::start_era(1); + mock::start_era(2); + mock::start_era(3); + + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(21), 2000); + assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Staking::slashable_balance_of(&21), 1000); + + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, 21); + let nominated_value_11 = exposure_11 + .others + .iter() + .find(|o| o.who == 101) + .unwrap() + .value; + let nominated_value_21 = exposure_21 + .others + .iter() + .find(|o| o.who == 101) + .unwrap() + .value; + + on_offence_in_era( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + 2, + ); + + assert_eq!(Balances::free_balance(11), 900); + + let slash_1_amount = Perbill::from_percent(10) * nominated_value_11; + assert_eq!(Balances::free_balance(101), 2000 - slash_1_amount); + + let expected_spans = vec![ + slashing::SlashingSpan { + index: 1, + start: 4, + length: None, + }, + slashing::SlashingSpan { + index: 0, + start: 0, + length: Some(4), + }, + ]; + + let get_span = |account| ::SlashingSpans::get(&account).unwrap(); + + assert_eq!(get_span(11).iter().collect::>(), expected_spans,); + + assert_eq!(get_span(101).iter().collect::>(), expected_spans,); + + // second slash: higher era, higher value, same span. + on_offence_in_era( + &[OffenceDetails { + offender: ( + 21, + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + ), + reporters: vec![], + }], + &[Perbill::from_percent(30)], + 3, + ); + + // 11 was not further slashed, but 21 and 101 were. + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(21), 1700); + + let slash_2_amount = Perbill::from_percent(30) * nominated_value_21; + assert!(slash_2_amount > slash_1_amount); + + // only the maximum slash in a single span is taken. + assert_eq!(Balances::free_balance(101), 2000 - slash_2_amount); + + // third slash: in same era and on same validator as first, higher + // in-era value, but lower slash value than slash 2. + on_offence_in_era( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(20)], + 2, + ); + + // 11 was further slashed, but 21 and 101 were not. + assert_eq!(Balances::free_balance(11), 800); + assert_eq!(Balances::free_balance(21), 1700); + + let slash_3_amount = Perbill::from_percent(20) * nominated_value_21; + assert!(slash_3_amount < slash_2_amount); + assert!(slash_3_amount > slash_1_amount); + + // only the maximum slash in a single span is taken. + assert_eq!(Balances::free_balance(101), 2000 - slash_2_amount); + }); } #[test] fn slashes_are_summed_across_spans() { - ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - mock::start_era(2); - mock::start_era(3); - - assert_eq!(Balances::free_balance(21), 2000); - assert_eq!(Staking::slashable_balance_of(&21), 1000); - - let get_span = |account| ::SlashingSpans::get(&account).unwrap(); - - on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); - - let expected_spans = vec![ - slashing::SlashingSpan { index: 1, start: 4, length: None }, - slashing::SlashingSpan { index: 0, start: 0, length: Some(4) }, - ]; - - assert_eq!(get_span(21).iter().collect::>(), expected_spans); - assert_eq!(Balances::free_balance(21), 1900); - - // 21 has been force-chilled. re-signal intent to validate. - Staking::validate(Origin::signed(20), Default::default()).unwrap(); - - mock::start_era(4); - - assert_eq!(Staking::slashable_balance_of(&21), 900); - - on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); - - let expected_spans = vec![ - slashing::SlashingSpan { index: 2, start: 5, length: None }, - slashing::SlashingSpan { index: 1, start: 4, length: Some(1) }, - slashing::SlashingSpan { index: 0, start: 0, length: Some(4) }, - ]; - - assert_eq!(get_span(21).iter().collect::>(), expected_spans); - assert_eq!(Balances::free_balance(21), 1810); - }); + ExtBuilder::default().build_and_execute(|| { + mock::start_era(1); + mock::start_era(2); + mock::start_era(3); + + assert_eq!(Balances::free_balance(21), 2000); + assert_eq!(Staking::slashable_balance_of(&21), 1000); + + let get_span = |account| ::SlashingSpans::get(&account).unwrap(); + + on_offence_now( + &[OffenceDetails { + offender: ( + 21, + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + ), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + + let expected_spans = vec![ + slashing::SlashingSpan { + index: 1, + start: 4, + length: None, + }, + slashing::SlashingSpan { + index: 0, + start: 0, + length: Some(4), + }, + ]; + + assert_eq!(get_span(21).iter().collect::>(), expected_spans); + assert_eq!(Balances::free_balance(21), 1900); + + // 21 has been force-chilled. re-signal intent to validate. + Staking::validate(Origin::signed(20), Default::default()).unwrap(); + + mock::start_era(4); + + assert_eq!(Staking::slashable_balance_of(&21), 900); + + on_offence_now( + &[OffenceDetails { + offender: ( + 21, + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + ), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + + let expected_spans = vec![ + slashing::SlashingSpan { + index: 2, + start: 5, + length: None, + }, + slashing::SlashingSpan { + index: 1, + start: 4, + length: Some(1), + }, + slashing::SlashingSpan { + index: 0, + start: 0, + length: Some(4), + }, + ]; + + assert_eq!(get_span(21).iter().collect::>(), expected_spans); + assert_eq!(Balances::free_balance(21), 1810); + }); } #[test] fn deferred_slashes_are_deferred() { - ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); + ExtBuilder::default() + .slash_defer_duration(2) + .build_and_execute(|| { + mock::start_era(1); - assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(11), 1000); - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); - let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - on_offence_now( - &[ - OffenceDetails { - offender: (11, Staking::eras_stakers(Staking::active_era().unwrap().index, 11)), - reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); + on_offence_now( + &[OffenceDetails { + offender: ( + 11, + Staking::eras_stakers(Staking::active_era().unwrap().index, 11), + ), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(2); + mock::start_era(2); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - mock::start_era(3); + mock::start_era(3); - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); - // at the start of era 4, slashes from era 1 are processed, - // after being deferred for at least 2 full eras. - mock::start_era(4); + // at the start of era 4, slashes from era 1 are processed, + // after being deferred for at least 2 full eras. + mock::start_era(4); - assert_eq!(Balances::free_balance(11), 900); - assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); - }) + assert_eq!(Balances::free_balance(11), 900); + assert_eq!(Balances::free_balance(101), 2000 - (nominated_value / 10)); + }) } #[test] fn remove_deferred() { - ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); - - assert_eq!(Balances::free_balance(11), 1000); - - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); - let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; - - on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); - - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - mock::start_era(2); - - on_offence_in_era( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], - &[Perbill::from_percent(15)], - 1, - ); - - // fails if empty - assert_noop!( - Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![]), - Error::::EmptyTargets - ); - - assert_ok!(Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![0])); - - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - mock::start_era(3); - - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - // at the start of era 4, slashes from era 1 are processed, - // after being deferred for at least 2 full eras. - mock::start_era(4); - - // the first slash for 10% was cancelled, so no effect. - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - mock::start_era(5); - - let slash_10 = Perbill::from_percent(10); - let slash_15 = Perbill::from_percent(15); - let initial_slash = slash_10 * nominated_value; - - let total_slash = slash_15 * nominated_value; - let actual_slash = total_slash - initial_slash; - - // 5% slash (15 - 10) processed now. - assert_eq!(Balances::free_balance(11), 950); - assert_eq!(Balances::free_balance(101), 2000 - actual_slash); - }) + ExtBuilder::default() + .slash_defer_duration(2) + .build_and_execute(|| { + mock::start_era(1); + + assert_eq!(Balances::free_balance(11), 1000); + + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); + let nominated_value = exposure.others.iter().find(|o| o.who == 101).unwrap().value; + + on_offence_now( + &[OffenceDetails { + offender: (11, exposure.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); + + mock::start_era(2); + + on_offence_in_era( + &[OffenceDetails { + offender: (11, exposure.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(15)], + 1, + ); + + // fails if empty + assert_noop!( + Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![]), + Error::::EmptyTargets + ); + + assert_ok!(Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![0])); + + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); + + mock::start_era(3); + + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); + + // at the start of era 4, slashes from era 1 are processed, + // after being deferred for at least 2 full eras. + mock::start_era(4); + + // the first slash for 10% was cancelled, so no effect. + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); + + mock::start_era(5); + + let slash_10 = Perbill::from_percent(10); + let slash_15 = Perbill::from_percent(15); + let initial_slash = slash_10 * nominated_value; + + let total_slash = slash_15 * nominated_value; + let actual_slash = total_slash - initial_slash; + + // 5% slash (15 - 10) processed now. + assert_eq!(Balances::free_balance(11), 950); + assert_eq!(Balances::free_balance(101), 2000 - actual_slash); + }) } #[test] fn remove_multi_deferred() { - ExtBuilder::default().slash_defer_duration(2).build_and_execute(|| { - mock::start_era(1); - - assert_eq!(Balances::free_balance(11), 1000); - - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], - &[Perbill::from_percent(10)], - ); - - on_offence_now( - &[ - OffenceDetails { - offender: (21, Staking::eras_stakers(Staking::active_era().unwrap().index, 21)), - reporters: vec![], - } - ], - &[Perbill::from_percent(10)], - ); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], - &[Perbill::from_percent(25)], - ); - - on_offence_now( - &[ - OffenceDetails { - offender: (42, exposure.clone()), - reporters: vec![], - }, - ], - &[Perbill::from_percent(25)], - ); - - on_offence_now( - &[ - OffenceDetails { - offender: (69, exposure.clone()), - reporters: vec![], - }, - ], - &[Perbill::from_percent(25)], - ); - - assert_eq!(::UnappliedSlashes::get(&1).len(), 5); - - // fails if list is not sorted - assert_noop!( - Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![2, 0, 4]), - Error::::NotSortedAndUnique - ); - // fails if list is not unique - assert_noop!( - Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![0, 2, 2]), - Error::::NotSortedAndUnique - ); - // fails if bad index - assert_noop!( - Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![1, 2, 3, 4, 5]), - Error::::InvalidSlashIndex - ); - - assert_ok!(Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![0, 2, 4])); - - let slashes = ::UnappliedSlashes::get(&1); - assert_eq!(slashes.len(), 2); - assert_eq!(slashes[0].validator, 21); - assert_eq!(slashes[1].validator, 42); - }) + ExtBuilder::default() + .slash_defer_duration(2) + .build_and_execute(|| { + mock::start_era(1); + + assert_eq!(Balances::free_balance(11), 1000); + + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); + + on_offence_now( + &[OffenceDetails { + offender: (11, exposure.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + + on_offence_now( + &[OffenceDetails { + offender: ( + 21, + Staking::eras_stakers(Staking::active_era().unwrap().index, 21), + ), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + + on_offence_now( + &[OffenceDetails { + offender: (11, exposure.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(25)], + ); + + on_offence_now( + &[OffenceDetails { + offender: (42, exposure.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(25)], + ); + + on_offence_now( + &[OffenceDetails { + offender: (69, exposure.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(25)], + ); + + assert_eq!(::UnappliedSlashes::get(&1).len(), 5); + + // fails if list is not sorted + assert_noop!( + Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![2, 0, 4]), + Error::::NotSortedAndUnique + ); + // fails if list is not unique + assert_noop!( + Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![0, 2, 2]), + Error::::NotSortedAndUnique + ); + // fails if bad index + assert_noop!( + Staking::cancel_deferred_slash(Origin::ROOT, 1, vec![1, 2, 3, 4, 5]), + Error::::InvalidSlashIndex + ); + + assert_ok!(Staking::cancel_deferred_slash( + Origin::ROOT, + 1, + vec![0, 2, 4] + )); + + let slashes = ::UnappliedSlashes::get(&1); + assert_eq!(slashes.len(), 2); + assert_eq!(slashes[0].validator, 21); + assert_eq!(slashes[1].validator, 42); + }) } mod offchain_phragmen { - use crate::*; - use frame_support::{assert_noop, assert_ok}; - use sp_runtime::transaction_validity::TransactionSource; - use mock::*; - use parking_lot::RwLock; - use sp_core::offchain::{ - testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, - OffchainExt, TransactionPoolExt, - }; - use sp_io::TestExternalities; - use sp_phragmen::StakedAssignment; - use frame_support::traits::OffchainWorker; - use std::sync::Arc; - use substrate_test_utils::assert_eq_uvec; - - fn percent(x: u16) -> OffchainAccuracy { - OffchainAccuracy::from_percent(x) - } - - /// setup a new set of validators and nominator storage items independent of the parent mock - /// file. This produces a edge graph that can be reduced. - fn build_offchain_phragmen_test_ext() { - for i in (10..=40).step_by(10) { - // Note: we respect the convention of the mock (10, 11 pairs etc.) since these accounts - // have corresponding keys in session which makes everything more ergonomic and - // realistic. - bond_validator(i + 1, i, 100); - } - - let mut voter = 1; - bond_nominator(voter, 1000 + voter, 100, vec![11]); - voter = 2; - bond_nominator(voter, 1000 + voter, 100, vec![11, 11]); - voter = 3; - bond_nominator(voter, 1000 + voter, 100, vec![21, 41]); - voter = 4; - bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); - voter = 5; - bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); - } - - fn offchainify(ext: &mut TestExternalities) -> Arc> { - let (offchain, _state) = TestOffchainExt::new(); - let (pool, state) = TestTransactionPoolExt::new(); - - ext.register_extension(OffchainExt::new(offchain)); - ext.register_extension(TransactionPoolExt::new(pool)); - - state - } - - #[test] - fn is_current_session_final_works() { - ExtBuilder::default() - .session_per_era(3) - .build() - .execute_with(|| { - mock::start_era(1); - assert_eq!(Session::current_index(), 3); - assert_eq!(Staking::current_era(), Some(1)); - assert_eq!(Staking::is_current_session_final(), false); - - start_session(4); - assert_eq!(Session::current_index(), 4); - assert_eq!(Staking::current_era(), Some(1)); - assert_eq!(Staking::is_current_session_final(), true); - - start_session(5); - assert_eq!(Session::current_index(), 5); - // era changed. - assert_eq!(Staking::current_era(), Some(2)); - assert_eq!(Staking::is_current_session_final(), false); - }) - } - - #[test] - fn offchain_election_flag_is_triggered() { - ExtBuilder::default() - .session_per_era(5) - .session_length(10) - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(7); - assert_session_era!(0, 0); - - run_to_block(10); - assert_session_era!(1, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - run_to_block(36); - assert_session_era!(3, 0); - - // fist era has session 0, which has 0 blocks length, so we have in total 40 blocks - // in the era. - run_to_block(37); - assert_session_era!(3, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - assert!(Staking::snapshot_nominators().is_some()); - assert!(Staking::snapshot_validators().is_some()); - - run_to_block(38); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - - run_to_block(39); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); - - run_to_block(40); - assert_session_era!(4, 0); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - run_to_block(86); - assert_session_era!(8, 1); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - - // second era onwards has 50 blocks per era. - run_to_block(87); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(87)); - assert!(Staking::snapshot_nominators().is_some()); - assert!(Staking::snapshot_validators().is_some()); - - run_to_block(90); - assert_session_era!(9, 1); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - assert!(Staking::snapshot_nominators().is_none()); - assert!(Staking::snapshot_validators().is_none()); - }) - } - - #[test] - fn offchain_election_flag_is_triggered_when_forcing() { - ExtBuilder::default() - .session_per_era(5) - .session_length(10) - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(7); - assert_session_era!(0, 0); - - run_to_block(12); - ForceEra::put(Forcing::ForceNew); - run_to_block(13); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - run_to_block(17); // instead of 47 - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(17)); - }) - } - - #[test] - fn election_on_chain_fallback_works() { - ExtBuilder::default().build_and_execute(|| { - start_session(1); - start_session(2); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - // some election must have happened by now. - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let MetaEvent::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::OnChain), - ); - }) - } - - #[test] - #[ignore] // This takes a few mins - fn offchain_wont_work_if_snapshot_fails() { - ExtBuilder::default() - .offchain_phragmen_ext() - .build() - .execute_with(|| { - run_to_block(12); - assert!(Staking::snapshot_validators().is_some()); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - // validate more than the limit - let limit: NominatorIndex = ValidatorIndex::max_value() as NominatorIndex + 1; - let ctrl = 1_000_000; - for i in 0..limit { - bond_validator((1000 + i).into(), (1000 + i + ctrl).into(), 100); - } - - // window stays closed since no snapshot was taken. - run_to_block(27); - assert!(Staking::snapshot_validators().is_none()); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - }) - } - - #[test] - fn staking_is_locked_when_election_window_open() { - ExtBuilder::default() - .offchain_phragmen_ext() - .election_lookahead(3) - .build() - .execute_with(|| { - run_to_block(12); - assert!(Staking::snapshot_validators().is_some()); - // given - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - // chill et. al. are now not allowed. - assert_noop!( - Staking::chill(Origin::signed(10)), - Error::::CallNotAllowed, - ); - }) - } - - #[test] - fn signed_result_can_be_submitted() { - // should check that we have a new validator set normally, - // event says that it comes from offchain. - ExtBuilder::default() - .offchain_phragmen_ext() - .build() - .execute_with(|| { - run_to_block(12); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - assert!(Staking::snapshot_validators().is_some()); - - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - assert_ok!(Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - )); - - let queued_result = Staking::queued_elected().unwrap(); - assert_eq!(queued_result.compute, ElectionCompute::Signed); - - run_to_block(15); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let MetaEvent::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::Signed), - ); - }) - } - - #[test] - fn signed_result_can_be_submitted_later() { - // same as `signed_result_can_be_submitted` but at a later block. - ExtBuilder::default() - .offchain_phragmen_ext() - .build() - .execute_with(|| { - run_to_block(14); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - assert_ok!(Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - )); - - let queued_result = Staking::queued_elected().unwrap(); - assert_eq!(queued_result.compute, ElectionCompute::Signed); - - run_to_block(15); - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - assert_eq!( - System::events() - .into_iter() - .map(|r| r.event) - .filter_map(|e| { - if let MetaEvent::staking(inner) = e { - Some(inner) - } else { - None - } - }) - .last() - .unwrap(), - RawEvent::StakingElection(ElectionCompute::Signed), - ); - }) - } - - #[test] - fn early_solution_submission_is_rejected() { - // should check that we have a new validator set normally, - // event says that it comes from offchain. - ExtBuilder::default() - .offchain_phragmen_ext() - .build() - .execute_with(|| { - run_to_block(11); - // submission is not yet allowed - assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); - - // create all the indices just to build the solution. - Staking::create_stakers_snapshot(); - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - Staking::kill_stakers_snapshot(); - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenEarlySubmission, - ); - }) - } - - #[test] - fn weak_solution_is_rejected() { - // A solution which is weaker than what we currently have on-chain is rejected. - ExtBuilder::default() - .offchain_phragmen_ext() - .has_stakers(false) - .validator_count(4) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - // a good solution - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - assert_ok!(Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - )); - - // a bad solution - let (compact, winners, score) = horrible_phragmen_with_post_processing(false); - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenWeakSubmission, - ); - }) - } - - #[test] - fn better_solution_is_accepted() { - // A solution which is better than what we currently have on-chain is accepted. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - // a meeeeh solution - let (compact, winners, score) = horrible_phragmen_with_post_processing(false); - assert_ok!(Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - )); - - // a better solution - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - assert_ok!(Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - )); - }) - } - - #[test] - fn offchain_worker_runs_when_window_open() { - // at the end of the first finalized block with ElectionStatus::open(_), it should execute. - let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(2) - .build(); - let state = offchainify(&mut ext); - ext.execute_with(|| { - run_to_block(12); - - // local key 11 is in the elected set. - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(state.read().transactions.len(), 0); - Staking::offchain_worker(12); - assert_eq!(state.read().transactions.len(), 1); - - let encoded = state.read().transactions[0].clone(); - let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); - - let call = extrinsic.call; - let inner = match call { - mock::Call::Staking(inner) => inner, - }; - - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &inner, - ), - TransactionValidity::Ok(ValidTransaction { - priority: (1 << 20) + 1125, // the proposed slot stake. - requires: vec![], - provides: vec![("StakingOffchain", current_era()).encode()], - longevity: 3, - propagate: false, - }) - ) - }) - } - - #[test] - fn mediocre_submission_from_authority_is_early_rejected() { - let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .build(); - let state = offchainify(&mut ext); - ext.execute_with(|| { - run_to_block(12); - // put a good solution on-chain - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - assert_ok!(Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ),); - - // now run the offchain worker in the same chain state. - Staking::offchain_worker(12); - assert_eq!(state.read().transactions.len(), 1); - - let encoded = state.read().transactions[0].clone(); - let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); - - let call = extrinsic.call; - let inner = match call { - mock::Call::Staking(inner) => inner, - }; - - // pass this call to ValidateUnsigned - assert_eq!( - ::validate_unsigned( - TransactionSource::Local, - &inner, - ), - TransactionValidity::Err( - InvalidTransaction::Custom(>::PhragmenWeakSubmission.as_u8()).into(), - ), - ) - }) - } - - #[test] - fn invalid_phragmen_result_correct_number_of_winners() { - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - ValidatorCount::put(3); - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - ValidatorCount::put(4); - - assert_eq!(winners.len(), 3); - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusWinnerCount, - ); - }) - } - - #[test] - fn invalid_phragmen_result_correct_number_of_winners_1() { - // if we have too little validators, then the number of candidates is the bound. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(8) // we simply cannot elect 8 - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - ValidatorCount::put(3); - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - ValidatorCount::put(4); - - assert_eq!(winners.len(), 3); - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusWinnerCount, - ); - }) - } - - #[test] - fn invalid_phragmen_result_correct_number_of_winners_2() { - // if we have too little validators, then the number of candidates is the bound. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(8) // we simply cannot elect 8 - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, |_| {}); - - assert_eq!(winners.len(), 4); - - // all good. We chose 4 and it works. - assert_ok!(Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ),); - }) - } - - #[test] - fn invalid_phragmen_result_out_of_bound_nominator_index() { - // A nominator index which is simply invalid - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (mut compact, winners, score) = prepare_submission_with(true, |_| {}); - - // index 9 doesn't exist. - compact.votes1.push((9, 2)); - - // The error type sadly cannot be more specific now. - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusCompact, - ); - }) - } - - #[test] - fn invalid_phragmen_result_out_of_bound_validator_index() { - // A validator index which is out of bound - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (mut compact, winners, score) = prepare_submission_with(true, |_| {}); - - // index 4 doesn't exist. - compact.votes1.push((3, 4)); - - // The error type sadly cannot be more specific now. - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusCompact, - ); - }) - } - - #[test] - fn invalid_phragmen_result_out_of_bound_winner_index() { - // A winner index which is simply invalid - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (compact, _, score) = prepare_submission_with(true, |_| {}); - - // index 4 doesn't exist. - let winners = vec![0, 1, 2, 4]; - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusWinner, - ); - }) - } - - #[test] - fn invalid_phragmen_result_non_winner_validator_index() { - // An edge that points to a correct validator index who is NOT a winner. This is very - // similar to the test that raises `PhragmenBogusNomination`. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(2) // we select only 2. - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); - assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); - let (compact, winners, score) = prepare_submission_with(true, |a| { - a.iter_mut() - .find(|x| x.who == 5) - // all 3 cannot be among the winners. Although, all of them are validator - // candidates. - .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); - }); - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusEdge, - ); - }) - } - - #[test] - fn invalid_phragmen_result_wrong_self_vote() { - // A self vote for someone else. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, |a| { - // mutate a self vote to target someone else. That someone else is still among the - // winners - a.iter_mut().find(|x| x.who == 11).map(|x| { - x.distribution - .iter_mut() - .find(|y| y.0 == 11) - .map(|y| y.0 = 21) - }); - }); - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusSelfVote, - ); - }) - } - - #[test] - fn invalid_phragmen_result_wrong_self_vote_2() { - // A self validator voting for someone else next to self vote. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(true, |a| { - // Remove the self vote. - a.retain(|x| x.who != 11); - // add is as a new double vote - a.push(StakedAssignment { - who: 11, - distribution: vec![(11, 50), (21, 50)], - }); - }); - - // This raises score issue. - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusSelfVote, - ); - }) - } - - #[test] - fn invalid_phragmen_result_over_stake() { - // Someone's edge ratios sums to more than 100%. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - // Note: we don't reduce here to be able to tweak votes3. votes3 will vanish if you - // reduce. - let (mut compact, winners, score) = prepare_submission_with(false, |_| {}); - - if let Some(c) = compact.votes3.iter_mut().find(|x| x.0 == 0) { - // by default it should have been (0, [(2, 33%), (1, 33%)], 0) - // now the sum is above 100% - c.1 = [(2, percent(66)), (1, percent(66))]; - } - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusCompact, - ); - }) - } - - #[test] - fn invalid_phragmen_result_under_stake() { - // at the time of this writing, we cannot under stake someone. The compact assignment works - // in a way that some of the stakes are presented by the submitter, and the last one is read - // from chain by subtracting the rest from total. Hence, the sum is always correct. - // This test is only here as a demonstration. - } - - #[test] - fn invalid_phragmen_result_invalid_target_stealing() { - // A valid voter who voted for someone who is a candidate, and is a correct winner, but is - // actually NOT nominated by this nominator. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - let (compact, winners, score) = prepare_submission_with(false, |a| { - // 3 only voted for 20 and 40. We add a fake vote to 30. The stake sum is still - // correctly 100. - a.iter_mut() - .find(|x| x.who == 3) - .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); - }); - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusNomination, - ); - }) - } - - #[test] - fn nomination_slash_filter_is_checked() { - // If a nominator has voted for someone who has been recently slashed, that particular - // nomination should be disabled for the upcoming election. A solution must respect this - // rule. - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - - // finalize the round with fallback. This is needed since all nominator submission - // are in era zero and we want this one to pass with no problems. - run_to_block(15); - - // go to the next session to trigger mock::start_era and bump the active era - run_to_block(20); - - // slash 10. This must happen outside of the election window. - let offender_expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - on_offence_now( - &[OffenceDetails { - offender: (11, offender_expo.clone()), - reporters: vec![], - }], - &[Perbill::from_percent(50)], - ); - - // validate 10 again for the next round. But this guy will not have the votes that - // it should have had from 1 and 2. - assert_ok!(Staking::validate( - Origin::signed(10), - Default::default() - )); - - // open the election window and create snapshots. - run_to_block(32); - - // a solution that has been prepared after the slash. - let (compact, winners, score) = prepare_submission_with(false, |a| { - // no one is allowed to vote for 10, except for itself. - a.into_iter() - .filter(|s| s.who != 11) - .for_each(|s| - assert!(s.distribution.iter().find(|(t, _)| *t == 11).is_none()) - ); - }); - - // can be submitted. - assert_ok!(Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - )); - - // a wrong solution. - let (compact, winners, score) = prepare_submission_with(false, |a| { - // add back the vote that has been filtered out. - a.push(StakedAssignment { - who: 1, - distribution: vec![(11, 100)] - }); - }); - - // is rejected. - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenSlashedNomination, - ); - }) - } - - #[test] - fn invalid_phragmen_result_wrong_score() { - // A valid voter who's total distributed stake is more than what they bond - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - build_offchain_phragmen_test_ext(); - run_to_block(12); - - let (compact, winners, mut score) = prepare_submission_with(true, |_| {}); - score[0] += 1; - - assert_noop!( - Staking::submit_election_solution( - Origin::signed(10), - winners, - compact, - score, - current_era(), - ), - Error::::PhragmenBogusScore, - ); - }) - } - - #[test] - fn offchain_storage_is_set() { - let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .build(); - let state = offchainify(&mut ext); - - ext.execute_with(|| { - use offchain_election::OFFCHAIN_HEAD_DB; - use sp_runtime::offchain::storage::StorageValueRef; - - run_to_block(12); - - Staking::offchain_worker(12); - // it works - assert_eq!(state.read().transactions.len(), 1); - - // and it is set - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - assert_eq!(storage.get::().unwrap().unwrap(), 12); - }) - } - - #[test] - fn offchain_storage_prevents_duplicate() { - let mut ext = ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .build(); - let _ = offchainify(&mut ext); - - ext.execute_with(|| { - use offchain_election::OFFCHAIN_HEAD_DB; - use sp_runtime::offchain::storage::StorageValueRef; - let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); - - run_to_block(12); - - // first run -- ok - assert_eq!( - offchain_election::set_check_offchain_execution_status::(12), - Ok(()), - ); - assert_eq!(storage.get::().unwrap().unwrap(), 12); - - // re-execute after the next. not allowed. - assert_eq!( - offchain_election::set_check_offchain_execution_status::(13), - Err("recently executed."), - ); - - // a fork like situation -- re-execute 10, 11, 12. But it won't go through. - assert_eq!( - offchain_election::set_check_offchain_execution_status::(10), - Err("fork."), - ); - assert_eq!( - offchain_election::set_check_offchain_execution_status::(11), - Err("fork."), - ); - assert_eq!( - offchain_election::set_check_offchain_execution_status::(12), - Err("recently executed."), - ); - }) - } - - #[test] - #[should_panic] - fn offence_is_blocked_when_window_open() { - ExtBuilder::default() - .offchain_phragmen_ext() - .validator_count(4) - .has_stakers(false) - .build() - .execute_with(|| { - run_to_block(12); - assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); - - let offender_expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 10); - - // panic from the impl in mock - on_offence_now( - &[OffenceDetails { - offender: (10, offender_expo.clone()), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); - }) - } + use crate::*; + use frame_support::traits::OffchainWorker; + use frame_support::{assert_noop, assert_ok}; + use mock::*; + use parking_lot::RwLock; + use sp_core::offchain::{ + testing::{PoolState, TestOffchainExt, TestTransactionPoolExt}, + OffchainExt, TransactionPoolExt, + }; + use sp_io::TestExternalities; + use sp_phragmen::StakedAssignment; + use sp_runtime::transaction_validity::TransactionSource; + use std::sync::Arc; + use substrate_test_utils::assert_eq_uvec; + + fn percent(x: u16) -> OffchainAccuracy { + OffchainAccuracy::from_percent(x) + } + + /// setup a new set of validators and nominator storage items independent of the parent mock + /// file. This produces a edge graph that can be reduced. + fn build_offchain_phragmen_test_ext() { + for i in (10..=40).step_by(10) { + // Note: we respect the convention of the mock (10, 11 pairs etc.) since these accounts + // have corresponding keys in session which makes everything more ergonomic and + // realistic. + bond_validator(i + 1, i, 100); + } + + let mut voter = 1; + bond_nominator(voter, 1000 + voter, 100, vec![11]); + voter = 2; + bond_nominator(voter, 1000 + voter, 100, vec![11, 11]); + voter = 3; + bond_nominator(voter, 1000 + voter, 100, vec![21, 41]); + voter = 4; + bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); + voter = 5; + bond_nominator(voter, 1000 + voter, 100, vec![21, 31, 41]); + } + + fn offchainify(ext: &mut TestExternalities) -> Arc> { + let (offchain, _state) = TestOffchainExt::new(); + let (pool, state) = TestTransactionPoolExt::new(); + + ext.register_extension(OffchainExt::new(offchain)); + ext.register_extension(TransactionPoolExt::new(pool)); + + state + } + + #[test] + fn is_current_session_final_works() { + ExtBuilder::default() + .session_per_era(3) + .build() + .execute_with(|| { + mock::start_era(1); + assert_eq!(Session::current_index(), 3); + assert_eq!(Staking::current_era(), Some(1)); + assert_eq!(Staking::is_current_session_final(), false); + + start_session(4); + assert_eq!(Session::current_index(), 4); + assert_eq!(Staking::current_era(), Some(1)); + assert_eq!(Staking::is_current_session_final(), true); + + start_session(5); + assert_eq!(Session::current_index(), 5); + // era changed. + assert_eq!(Staking::current_era(), Some(2)); + assert_eq!(Staking::is_current_session_final(), false); + }) + } + + #[test] + fn offchain_election_flag_is_triggered() { + ExtBuilder::default() + .session_per_era(5) + .session_length(10) + .election_lookahead(3) + .build() + .execute_with(|| { + run_to_block(7); + assert_session_era!(0, 0); + + run_to_block(10); + assert_session_era!(1, 0); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + assert!(Staking::snapshot_nominators().is_none()); + assert!(Staking::snapshot_validators().is_none()); + + run_to_block(36); + assert_session_era!(3, 0); + + // fist era has session 0, which has 0 blocks length, so we have in total 40 blocks + // in the era. + run_to_block(37); + assert_session_era!(3, 0); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); + assert!(Staking::snapshot_nominators().is_some()); + assert!(Staking::snapshot_validators().is_some()); + + run_to_block(38); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); + + run_to_block(39); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(37)); + + run_to_block(40); + assert_session_era!(4, 0); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + assert!(Staking::snapshot_nominators().is_none()); + assert!(Staking::snapshot_validators().is_none()); + + run_to_block(86); + assert_session_era!(8, 1); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + assert!(Staking::snapshot_nominators().is_none()); + assert!(Staking::snapshot_validators().is_none()); + + // second era onwards has 50 blocks per era. + run_to_block(87); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(87)); + assert!(Staking::snapshot_nominators().is_some()); + assert!(Staking::snapshot_validators().is_some()); + + run_to_block(90); + assert_session_era!(9, 1); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + assert!(Staking::snapshot_nominators().is_none()); + assert!(Staking::snapshot_validators().is_none()); + }) + } + + #[test] + fn offchain_election_flag_is_triggered_when_forcing() { + ExtBuilder::default() + .session_per_era(5) + .session_length(10) + .election_lookahead(3) + .build() + .execute_with(|| { + run_to_block(7); + assert_session_era!(0, 0); + + run_to_block(12); + ForceEra::put(Forcing::ForceNew); + run_to_block(13); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + + run_to_block(17); // instead of 47 + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(17)); + }) + } + + #[test] + fn election_on_chain_fallback_works() { + ExtBuilder::default().build_and_execute(|| { + start_session(1); + start_session(2); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + // some election must have happened by now. + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let MetaEvent::staking(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::StakingElection(ElectionCompute::OnChain), + ); + }) + } + + #[test] + #[ignore] // This takes a few mins + fn offchain_wont_work_if_snapshot_fails() { + ExtBuilder::default() + .offchain_phragmen_ext() + .build() + .execute_with(|| { + run_to_block(12); + assert!(Staking::snapshot_validators().is_some()); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + + // validate more than the limit + let limit: NominatorIndex = ValidatorIndex::max_value() as NominatorIndex + 1; + let ctrl = 1_000_000; + for i in 0..limit { + bond_validator((1000 + i).into(), (1000 + i + ctrl).into(), 100); + } + + // window stays closed since no snapshot was taken. + run_to_block(27); + assert!(Staking::snapshot_validators().is_none()); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + }) + } + + #[test] + fn staking_is_locked_when_election_window_open() { + ExtBuilder::default() + .offchain_phragmen_ext() + .election_lookahead(3) + .build() + .execute_with(|| { + run_to_block(12); + assert!(Staking::snapshot_validators().is_some()); + // given + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + + // chill et. al. are now not allowed. + assert_noop!( + Staking::chill(Origin::signed(10)), + Error::::CallNotAllowed, + ); + }) + } + + #[test] + fn signed_result_can_be_submitted() { + // should check that we have a new validator set normally, + // event says that it comes from offchain. + ExtBuilder::default() + .offchain_phragmen_ext() + .build() + .execute_with(|| { + run_to_block(12); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + assert!(Staking::snapshot_validators().is_some()); + + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + )); + + let queued_result = Staking::queued_elected().unwrap(); + assert_eq!(queued_result.compute, ElectionCompute::Signed); + + run_to_block(15); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let MetaEvent::staking(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::StakingElection(ElectionCompute::Signed), + ); + }) + } + + #[test] + fn signed_result_can_be_submitted_later() { + // same as `signed_result_can_be_submitted` but at a later block. + ExtBuilder::default() + .offchain_phragmen_ext() + .build() + .execute_with(|| { + run_to_block(14); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + )); + + let queued_result = Staking::queued_elected().unwrap(); + assert_eq!(queued_result.compute, ElectionCompute::Signed); + + run_to_block(15); + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let MetaEvent::staking(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::StakingElection(ElectionCompute::Signed), + ); + }) + } + + #[test] + fn early_solution_submission_is_rejected() { + // should check that we have a new validator set normally, + // event says that it comes from offchain. + ExtBuilder::default() + .offchain_phragmen_ext() + .build() + .execute_with(|| { + run_to_block(11); + // submission is not yet allowed + assert_eq!(Staking::era_election_status(), ElectionStatus::Closed); + + // create all the indices just to build the solution. + Staking::create_stakers_snapshot(); + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + Staking::kill_stakers_snapshot(); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenEarlySubmission, + ); + }) + } + + #[test] + fn weak_solution_is_rejected() { + // A solution which is weaker than what we currently have on-chain is rejected. + ExtBuilder::default() + .offchain_phragmen_ext() + .has_stakers(false) + .validator_count(4) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + // a good solution + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + )); + + // a bad solution + let (compact, winners, score) = horrible_phragmen_with_post_processing(false); + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenWeakSubmission, + ); + }) + } + + #[test] + fn better_solution_is_accepted() { + // A solution which is better than what we currently have on-chain is accepted. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + // a meeeeh solution + let (compact, winners, score) = horrible_phragmen_with_post_processing(false); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + )); + + // a better solution + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + )); + }) + } + + #[test] + fn offchain_worker_runs_when_window_open() { + // at the end of the first finalized block with ElectionStatus::open(_), it should execute. + let mut ext = ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(2) + .build(); + let state = offchainify(&mut ext); + ext.execute_with(|| { + run_to_block(12); + + // local key 11 is in the elected set. + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!(state.read().transactions.len(), 0); + Staking::offchain_worker(12); + assert_eq!(state.read().transactions.len(), 1); + + let encoded = state.read().transactions[0].clone(); + let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); + + let call = extrinsic.call; + let inner = match call { + mock::Call::Staking(inner) => inner, + }; + + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &inner, + ), + TransactionValidity::Ok(ValidTransaction { + priority: (1 << 20) + 1125, // the proposed slot stake. + requires: vec![], + provides: vec![("StakingOffchain", current_era()).encode()], + longevity: 3, + propagate: false, + }) + ) + }) + } + + #[test] + fn mediocre_submission_from_authority_is_early_rejected() { + let mut ext = ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .build(); + let state = offchainify(&mut ext); + ext.execute_with(|| { + run_to_block(12); + // put a good solution on-chain + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ),); + + // now run the offchain worker in the same chain state. + Staking::offchain_worker(12); + assert_eq!(state.read().transactions.len(), 1); + + let encoded = state.read().transactions[0].clone(); + let extrinsic: Extrinsic = Decode::decode(&mut &*encoded).unwrap(); + + let call = extrinsic.call; + let inner = match call { + mock::Call::Staking(inner) => inner, + }; + + // pass this call to ValidateUnsigned + assert_eq!( + ::validate_unsigned( + TransactionSource::Local, + &inner, + ), + TransactionValidity::Err( + InvalidTransaction::Custom(>::PhragmenWeakSubmission.as_u8()) + .into(), + ), + ) + }) + } + + #[test] + fn invalid_phragmen_result_correct_number_of_winners() { + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + ValidatorCount::put(3); + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + ValidatorCount::put(4); + + assert_eq!(winners.len(), 3); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusWinnerCount, + ); + }) + } + + #[test] + fn invalid_phragmen_result_correct_number_of_winners_1() { + // if we have too little validators, then the number of candidates is the bound. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(8) // we simply cannot elect 8 + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + ValidatorCount::put(3); + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + ValidatorCount::put(4); + + assert_eq!(winners.len(), 3); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusWinnerCount, + ); + }) + } + + #[test] + fn invalid_phragmen_result_correct_number_of_winners_2() { + // if we have too little validators, then the number of candidates is the bound. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(8) // we simply cannot elect 8 + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, score) = prepare_submission_with(true, |_| {}); + + assert_eq!(winners.len(), 4); + + // all good. We chose 4 and it works. + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ),); + }) + } + + #[test] + fn invalid_phragmen_result_out_of_bound_nominator_index() { + // A nominator index which is simply invalid + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (mut compact, winners, score) = prepare_submission_with(true, |_| {}); + + // index 9 doesn't exist. + compact.votes1.push((9, 2)); + + // The error type sadly cannot be more specific now. + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusCompact, + ); + }) + } + + #[test] + fn invalid_phragmen_result_out_of_bound_validator_index() { + // A validator index which is out of bound + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (mut compact, winners, score) = prepare_submission_with(true, |_| {}); + + // index 4 doesn't exist. + compact.votes1.push((3, 4)); + + // The error type sadly cannot be more specific now. + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusCompact, + ); + }) + } + + #[test] + fn invalid_phragmen_result_out_of_bound_winner_index() { + // A winner index which is simply invalid + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (compact, _, score) = prepare_submission_with(true, |_| {}); + + // index 4 doesn't exist. + let winners = vec![0, 1, 2, 4]; + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusWinner, + ); + }) + } + + #[test] + fn invalid_phragmen_result_non_winner_validator_index() { + // An edge that points to a correct validator index who is NOT a winner. This is very + // similar to the test that raises `PhragmenBogusNomination`. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(2) // we select only 2. + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + assert_eq!(Staking::snapshot_nominators().unwrap().len(), 5 + 4); + assert_eq!(Staking::snapshot_validators().unwrap().len(), 4); + let (compact, winners, score) = prepare_submission_with(true, |a| { + a.iter_mut() + .find(|x| x.who == 5) + // all 3 cannot be among the winners. Although, all of them are validator + // candidates. + .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); + }); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusEdge, + ); + }) + } + + #[test] + fn invalid_phragmen_result_wrong_self_vote() { + // A self vote for someone else. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, score) = prepare_submission_with(true, |a| { + // mutate a self vote to target someone else. That someone else is still among the + // winners + a.iter_mut().find(|x| x.who == 11).map(|x| { + x.distribution + .iter_mut() + .find(|y| y.0 == 11) + .map(|y| y.0 = 21) + }); + }); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusSelfVote, + ); + }) + } + + #[test] + fn invalid_phragmen_result_wrong_self_vote_2() { + // A self validator voting for someone else next to self vote. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, score) = prepare_submission_with(true, |a| { + // Remove the self vote. + a.retain(|x| x.who != 11); + // add is as a new double vote + a.push(StakedAssignment { + who: 11, + distribution: vec![(11, 50), (21, 50)], + }); + }); + + // This raises score issue. + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusSelfVote, + ); + }) + } + + #[test] + fn invalid_phragmen_result_over_stake() { + // Someone's edge ratios sums to more than 100%. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + // Note: we don't reduce here to be able to tweak votes3. votes3 will vanish if you + // reduce. + let (mut compact, winners, score) = prepare_submission_with(false, |_| {}); + + if let Some(c) = compact.votes3.iter_mut().find(|x| x.0 == 0) { + // by default it should have been (0, [(2, 33%), (1, 33%)], 0) + // now the sum is above 100% + c.1 = [(2, percent(66)), (1, percent(66))]; + } + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusCompact, + ); + }) + } + + #[test] + fn invalid_phragmen_result_under_stake() { + // at the time of this writing, we cannot under stake someone. The compact assignment works + // in a way that some of the stakes are presented by the submitter, and the last one is read + // from chain by subtracting the rest from total. Hence, the sum is always correct. + // This test is only here as a demonstration. + } + + #[test] + fn invalid_phragmen_result_invalid_target_stealing() { + // A valid voter who voted for someone who is a candidate, and is a correct winner, but is + // actually NOT nominated by this nominator. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, score) = prepare_submission_with(false, |a| { + // 3 only voted for 20 and 40. We add a fake vote to 30. The stake sum is still + // correctly 100. + a.iter_mut() + .find(|x| x.who == 3) + .map(|x| x.distribution = vec![(21, 50), (41, 30), (31, 20)]); + }); + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusNomination, + ); + }) + } + + #[test] + fn nomination_slash_filter_is_checked() { + // If a nominator has voted for someone who has been recently slashed, that particular + // nomination should be disabled for the upcoming election. A solution must respect this + // rule. + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + + // finalize the round with fallback. This is needed since all nominator submission + // are in era zero and we want this one to pass with no problems. + run_to_block(15); + + // go to the next session to trigger mock::start_era and bump the active era + run_to_block(20); + + // slash 10. This must happen outside of the election window. + let offender_expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + on_offence_now( + &[OffenceDetails { + offender: (11, offender_expo.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(50)], + ); + + // validate 10 again for the next round. But this guy will not have the votes that + // it should have had from 1 and 2. + assert_ok!(Staking::validate(Origin::signed(10), Default::default())); + + // open the election window and create snapshots. + run_to_block(32); + + // a solution that has been prepared after the slash. + let (compact, winners, score) = prepare_submission_with(false, |a| { + // no one is allowed to vote for 10, except for itself. + a.into_iter().filter(|s| s.who != 11).for_each(|s| { + assert!(s.distribution.iter().find(|(t, _)| *t == 11).is_none()) + }); + }); + + // can be submitted. + assert_ok!(Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + )); + + // a wrong solution. + let (compact, winners, score) = prepare_submission_with(false, |a| { + // add back the vote that has been filtered out. + a.push(StakedAssignment { + who: 1, + distribution: vec![(11, 100)], + }); + }); + + // is rejected. + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenSlashedNomination, + ); + }) + } + + #[test] + fn invalid_phragmen_result_wrong_score() { + // A valid voter who's total distributed stake is more than what they bond + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + build_offchain_phragmen_test_ext(); + run_to_block(12); + + let (compact, winners, mut score) = prepare_submission_with(true, |_| {}); + score[0] += 1; + + assert_noop!( + Staking::submit_election_solution( + Origin::signed(10), + winners, + compact, + score, + current_era(), + ), + Error::::PhragmenBogusScore, + ); + }) + } + + #[test] + fn offchain_storage_is_set() { + let mut ext = ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .build(); + let state = offchainify(&mut ext); + + ext.execute_with(|| { + use offchain_election::OFFCHAIN_HEAD_DB; + use sp_runtime::offchain::storage::StorageValueRef; + + run_to_block(12); + + Staking::offchain_worker(12); + // it works + assert_eq!(state.read().transactions.len(), 1); + + // and it is set + let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); + assert_eq!(storage.get::().unwrap().unwrap(), 12); + }) + } + + #[test] + fn offchain_storage_prevents_duplicate() { + let mut ext = ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .build(); + let _ = offchainify(&mut ext); + + ext.execute_with(|| { + use offchain_election::OFFCHAIN_HEAD_DB; + use sp_runtime::offchain::storage::StorageValueRef; + let storage = StorageValueRef::persistent(&OFFCHAIN_HEAD_DB); + + run_to_block(12); + + // first run -- ok + assert_eq!( + offchain_election::set_check_offchain_execution_status::(12), + Ok(()), + ); + assert_eq!(storage.get::().unwrap().unwrap(), 12); + + // re-execute after the next. not allowed. + assert_eq!( + offchain_election::set_check_offchain_execution_status::(13), + Err("recently executed."), + ); + + // a fork like situation -- re-execute 10, 11, 12. But it won't go through. + assert_eq!( + offchain_election::set_check_offchain_execution_status::(10), + Err("fork."), + ); + assert_eq!( + offchain_election::set_check_offchain_execution_status::(11), + Err("fork."), + ); + assert_eq!( + offchain_election::set_check_offchain_execution_status::(12), + Err("recently executed."), + ); + }) + } + + #[test] + #[should_panic] + fn offence_is_blocked_when_window_open() { + ExtBuilder::default() + .offchain_phragmen_ext() + .validator_count(4) + .has_stakers(false) + .build() + .execute_with(|| { + run_to_block(12); + assert_eq!(Staking::era_election_status(), ElectionStatus::Open(12)); + + let offender_expo = Staking::eras_stakers(Staking::active_era().unwrap().index, 10); + + // panic from the impl in mock + on_offence_now( + &[OffenceDetails { + offender: (10, offender_expo.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + }) + } } #[test] fn slash_kicks_validators_not_nominators_and_disables_nominator_for_kicked_validator() { - ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - - // pre-slash balance - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - // 11 and 21 both have the support of 100 - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - - assert_eq!(exposure_11.total, 1000 + 125); - assert_eq!(exposure_21.total, 1000 + 375); - - on_offence_now( - &[OffenceDetails { - offender: (11, exposure_11.clone()), - reporters: vec![], - }], - &[Perbill::from_percent(10)], - ); - - // post-slash balance - let nominator_slash_amount_11 = 125 / 10; - assert_eq!(Balances::free_balance(11), 900); - assert_eq!( - Balances::free_balance(101), - 2000 - nominator_slash_amount_11 - ); - - // This is the best way to check that the validator was chilled; `get` will - // return default value. - for (stash, _) in ::Validators::iter() { - assert!(stash != 11); - } - - let nominations = ::Nominators::get(&101).unwrap(); - - // and make sure that the vote will be ignored even if the validator - // re-registers. - let last_slash = ::SlashingSpans::get(&11) - .unwrap() - .last_nonzero_slash(); - assert!(nominations.submitted_in < last_slash); - - // actually re-bond the slashed validator - assert_ok!(Staking::validate(Origin::signed(10), Default::default())); - - mock::start_era(2); - let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); - let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); - - // 10 is re-elected, but without the support of 100 - assert_eq!(exposure_11.total, 900); - - // 20 is re-elected, with the (almost) entire support of 100 - assert_eq!(exposure_21.total, 1000 + 500 - nominator_slash_amount_11); - }); + ExtBuilder::default().build_and_execute(|| { + mock::start_era(1); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + + // pre-slash balance + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); + + // 11 and 21 both have the support of 100 + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + + assert_eq!(exposure_11.total, 1000 + 125); + assert_eq!(exposure_21.total, 1000 + 375); + + on_offence_now( + &[OffenceDetails { + offender: (11, exposure_11.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(10)], + ); + + // post-slash balance + let nominator_slash_amount_11 = 125 / 10; + assert_eq!(Balances::free_balance(11), 900); + assert_eq!( + Balances::free_balance(101), + 2000 - nominator_slash_amount_11 + ); + + // This is the best way to check that the validator was chilled; `get` will + // return default value. + for (stash, _) in ::Validators::iter() { + assert!(stash != 11); + } + + let nominations = ::Nominators::get(&101).unwrap(); + + // and make sure that the vote will be ignored even if the validator + // re-registers. + let last_slash = ::SlashingSpans::get(&11) + .unwrap() + .last_nonzero_slash(); + assert!(nominations.submitted_in < last_slash); + + // actually re-bond the slashed validator + assert_ok!(Staking::validate(Origin::signed(10), Default::default())); + + mock::start_era(2); + let exposure_11 = Staking::eras_stakers(Staking::active_era().unwrap().index, &11); + let exposure_21 = Staking::eras_stakers(Staking::active_era().unwrap().index, &21); + + // 10 is re-elected, but without the support of 100 + assert_eq!(exposure_11.total, 900); + + // 20 is re-elected, with the (almost) entire support of 100 + assert_eq!(exposure_21.total, 1000 + 500 - nominator_slash_amount_11); + }); } #[test] fn claim_reward_at_the_last_era_and_no_double_claim_and_invalid_claim() { - // should check that: - // * rewards get paid until history_depth for both validators and nominators - // * an invalid era to claim doesn't update last_reward - // * double claim of one era fails - ExtBuilder::default().nominate(true).build_and_execute(|| { - let init_balance_10 = Balances::total_balance(&10); - let init_balance_100 = Balances::total_balance(&100); - - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_100 = Perbill::from_rational_approximation::(125, 1125); - - // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); - - >::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something - - mock::start_era(1); - - >::reward_by_ids(vec![(11, 1)]); - // Change total issuance in order to modify total payout - let _ = Balances::deposit_creating(&999, 1_000_000_000); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something - assert!(total_payout_1 != total_payout_0); - - mock::start_era(2); - - >::reward_by_ids(vec![(11, 1)]); - // Change total issuance in order to modify total payout - let _ = Balances::deposit_creating(&999, 1_000_000_000); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_2 = current_total_payout_for_duration(3000); - assert!(total_payout_2 > 10); // Test is meaningful if reward something - assert!(total_payout_2 != total_payout_0); - assert!(total_payout_2 != total_payout_1); - - mock::start_era(Staking::history_depth() + 1); - - let active_era = Staking::active_era().unwrap().index; - - // This is the latest planned era in staking, not the active era - let current_era = Staking::current_era().unwrap(); - - // Last kept is 1: - assert!(current_era - Staking::history_depth() == 1); - assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 0), - // Fail: Era out of history - Error::::InvalidEraToReward - ); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 2)); - assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, 2), - // Fail: Double claim - Error::::AlreadyClaimed - ); - assert_noop!( - Staking::payout_stakers(Origin::signed(1337), 11, active_era), - // Fail: Era not finished yet - Error::::InvalidEraToReward - ); - - // Era 0 can't be rewarded anymore and current era can't be rewarded yet - // only era 1 and 2 can be rewarded. - - assert_eq!( - Balances::total_balance(&10), - init_balance_10 + part_for_10 * (total_payout_1 + total_payout_2), - ); - assert_eq!( - Balances::total_balance(&100), - init_balance_100 + part_for_100 * (total_payout_1 + total_payout_2), - ); - }); + // should check that: + // * rewards get paid until history_depth for both validators and nominators + // * an invalid era to claim doesn't update last_reward + // * double claim of one era fails + ExtBuilder::default().nominate(true).build_and_execute(|| { + let init_balance_10 = Balances::total_balance(&10); + let init_balance_100 = Balances::total_balance(&100); + + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_100 = Perbill::from_rational_approximation::(125, 1125); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + mock::start_era(1); + + >::reward_by_ids(vec![(11, 1)]); + // Change total issuance in order to modify total payout + let _ = Balances::deposit_creating(&999, 1_000_000_000); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + assert!(total_payout_1 != total_payout_0); + + mock::start_era(2); + + >::reward_by_ids(vec![(11, 1)]); + // Change total issuance in order to modify total payout + let _ = Balances::deposit_creating(&999, 1_000_000_000); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_2 = current_total_payout_for_duration(3000); + assert!(total_payout_2 > 10); // Test is meaningful if reward something + assert!(total_payout_2 != total_payout_0); + assert!(total_payout_2 != total_payout_1); + + mock::start_era(Staking::history_depth() + 1); + + let active_era = Staking::active_era().unwrap().index; + + // This is the latest planned era in staking, not the active era + let current_era = Staking::current_era().unwrap(); + + // Last kept is 1: + assert!(current_era - Staking::history_depth() == 1); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 0), + // Fail: Era out of history + Error::::InvalidEraToReward + ); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 2)); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 2), + // Fail: Double claim + Error::::AlreadyClaimed + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, active_era), + // Fail: Era not finished yet + Error::::InvalidEraToReward + ); + + // Era 0 can't be rewarded anymore and current era can't be rewarded yet + // only era 1 and 2 can be rewarded. + + assert_eq!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * (total_payout_1 + total_payout_2), + ); + assert_eq!( + Balances::total_balance(&100), + init_balance_100 + part_for_100 * (total_payout_1 + total_payout_2), + ); + }); } #[test] fn zero_slash_keeps_nominators() { - ExtBuilder::default().build_and_execute(|| { - mock::start_era(1); - - assert_eq!(Balances::free_balance(11), 1000); - - let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); - assert_eq!(Balances::free_balance(101), 2000); - - on_offence_now( - &[ - OffenceDetails { - offender: (11, exposure.clone()), - reporters: vec![], - }, - ], - &[Perbill::from_percent(0)], - ); - - assert_eq!(Balances::free_balance(11), 1000); - assert_eq!(Balances::free_balance(101), 2000); - - // This is the best way to check that the validator was chilled; `get` will - // return default value. - for (stash, _) in ::Validators::iter() { - assert!(stash != 11); - } - - let nominations = ::Nominators::get(&101).unwrap(); - - // and make sure that the vote will not be ignored, because the slash was - // zero. - let last_slash = ::SlashingSpans::get(&11).unwrap().last_nonzero_slash(); - assert!(nominations.submitted_in >= last_slash); - }); + ExtBuilder::default().build_and_execute(|| { + mock::start_era(1); + + assert_eq!(Balances::free_balance(11), 1000); + + let exposure = Staking::eras_stakers(Staking::active_era().unwrap().index, 11); + assert_eq!(Balances::free_balance(101), 2000); + + on_offence_now( + &[OffenceDetails { + offender: (11, exposure.clone()), + reporters: vec![], + }], + &[Perbill::from_percent(0)], + ); + + assert_eq!(Balances::free_balance(11), 1000); + assert_eq!(Balances::free_balance(101), 2000); + + // This is the best way to check that the validator was chilled; `get` will + // return default value. + for (stash, _) in ::Validators::iter() { + assert!(stash != 11); + } + + let nominations = ::Nominators::get(&101).unwrap(); + + // and make sure that the vote will not be ignored, because the slash was + // zero. + let last_slash = ::SlashingSpans::get(&11) + .unwrap() + .last_nonzero_slash(); + assert!(nominations.submitted_in >= last_slash); + }); } #[test] fn six_session_delay() { - ExtBuilder::default().build_and_execute(|| { - use pallet_session::SessionManager; - - let val_set = Session::validators(); - let init_session = Session::current_index(); - let init_active_era = Staking::active_era().unwrap().index; - // pallet-session is delaying session by one, thus the next session to plan is +2. - assert_eq!(>::new_session(init_session + 2), None); - assert_eq!(>::new_session(init_session + 3), Some(val_set.clone())); - assert_eq!(>::new_session(init_session + 4), None); - assert_eq!(>::new_session(init_session + 5), None); - assert_eq!(>::new_session(init_session + 6), Some(val_set.clone())); - - >::end_session(init_session); - >::start_session(init_session + 1); - assert_eq!(Staking::active_era().unwrap().index, init_active_era); - >::end_session(init_session + 1); - >::start_session(init_session + 2); - assert_eq!(Staking::active_era().unwrap().index, init_active_era); - - // Reward current era - Staking::reward_by_ids(vec![(11, 1)]); - - // New active era is triggered here. - >::end_session(init_session + 2); - >::start_session(init_session + 3); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); - >::end_session(init_session + 3); - >::start_session(init_session + 4); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); - >::end_session(init_session + 4); - >::start_session(init_session + 5); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); - - // Reward current era - Staking::reward_by_ids(vec![(21, 2)]); - - // New active era is triggered here. - >::end_session(init_session + 5); - >::start_session(init_session + 6); - assert_eq!(Staking::active_era().unwrap().index, init_active_era + 2); - - // That reward are correct - assert_eq!(Staking::eras_reward_points(init_active_era).total, 1); - assert_eq!(Staking::eras_reward_points(init_active_era + 1).total, 2); - }); + ExtBuilder::default().build_and_execute(|| { + use pallet_session::SessionManager; + + let val_set = Session::validators(); + let init_session = Session::current_index(); + let init_active_era = Staking::active_era().unwrap().index; + // pallet-session is delaying session by one, thus the next session to plan is +2. + assert_eq!( + >::new_session(init_session + 2), + None + ); + assert_eq!( + >::new_session(init_session + 3), + Some(val_set.clone()) + ); + assert_eq!( + >::new_session(init_session + 4), + None + ); + assert_eq!( + >::new_session(init_session + 5), + None + ); + assert_eq!( + >::new_session(init_session + 6), + Some(val_set.clone()) + ); + + >::end_session(init_session); + >::start_session(init_session + 1); + assert_eq!(Staking::active_era().unwrap().index, init_active_era); + >::end_session(init_session + 1); + >::start_session(init_session + 2); + assert_eq!(Staking::active_era().unwrap().index, init_active_era); + + // Reward current era + Staking::reward_by_ids(vec![(11, 1)]); + + // New active era is triggered here. + >::end_session(init_session + 2); + >::start_session(init_session + 3); + assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + >::end_session(init_session + 3); + >::start_session(init_session + 4); + assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + >::end_session(init_session + 4); + >::start_session(init_session + 5); + assert_eq!(Staking::active_era().unwrap().index, init_active_era + 1); + + // Reward current era + Staking::reward_by_ids(vec![(21, 2)]); + + // New active era is triggered here. + >::end_session(init_session + 5); + >::start_session(init_session + 6); + assert_eq!(Staking::active_era().unwrap().index, init_active_era + 2); + + // That reward are correct + assert_eq!(Staking::eras_reward_points(init_active_era).total, 1); + assert_eq!(Staking::eras_reward_points(init_active_era + 1).total, 2); + }); } #[test] fn test_max_nominator_rewarded_per_validator_and_cant_steal_someone_else_reward() { - // Test: - // * If nominator nomination is below the $MaxNominatorRewardedPerValidator other nominator - // then the nominator can't claim its reward - // * A nominator can't claim another nominator reward - ExtBuilder::default().build_and_execute(|| { - for i in 0..=::MaxNominatorRewardedPerValidator::get() { - let stash = 10_000 + i as AccountId; - let controller = 20_000 + i as AccountId; - let balance = 10_000 + i as Balance; - Balances::make_free_balance_be(&stash, balance); - assert_ok!( - Staking::bond( - Origin::signed(stash), - controller, - balance, - RewardDestination::Stash - ) - ); - assert_ok!(Staking::nominate(Origin::signed(controller), vec![11])); - } - mock::start_era(1); - - >::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - - mock::start_era(2); - mock::make_all_reward_payment(1); - - // Assert only nominators from 1 to Max are rewarded - for i in 0..=::MaxNominatorRewardedPerValidator::get() { - let stash = 10_000 + i as AccountId; - let balance = 10_000 + i as Balance; - if stash == 10_000 { - assert!(Balances::free_balance(&stash) == balance); - } else { - assert!(Balances::free_balance(&stash) > balance); - } - } - }); + // Test: + // * If nominator nomination is below the $MaxNominatorRewardedPerValidator other nominator + // then the nominator can't claim its reward + // * A nominator can't claim another nominator reward + ExtBuilder::default().build_and_execute(|| { + for i in 0..=::MaxNominatorRewardedPerValidator::get() { + let stash = 10_000 + i as AccountId; + let controller = 20_000 + i as AccountId; + let balance = 10_000 + i as Balance; + Balances::make_free_balance_be(&stash, balance); + assert_ok!(Staking::bond( + Origin::signed(stash), + controller, + balance, + RewardDestination::Stash + )); + assert_ok!(Staking::nominate(Origin::signed(controller), vec![11])); + } + mock::start_era(1); + + >::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment(1); + + // Assert only nominators from 1 to Max are rewarded + for i in 0..=::MaxNominatorRewardedPerValidator::get() { + let stash = 10_000 + i as AccountId; + let balance = 10_000 + i as Balance; + if stash == 10_000 { + assert!(Balances::free_balance(&stash) == balance); + } else { + assert!(Balances::free_balance(&stash) > balance); + } + } + }); } #[test] fn set_history_depth_works() { - ExtBuilder::default().build_and_execute(|| { - mock::start_era(10); - Staking::set_history_depth(Origin::ROOT, 20).unwrap(); - assert!(::ErasTotalStake::contains_key(10 - 4)); - assert!(::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(Origin::ROOT, 4).unwrap(); - assert!(::ErasTotalStake::contains_key(10 - 4)); - assert!(!::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(Origin::ROOT, 3).unwrap(); - assert!(!::ErasTotalStake::contains_key(10 - 4)); - assert!(!::ErasTotalStake::contains_key(10 - 5)); - Staking::set_history_depth(Origin::ROOT, 8).unwrap(); - assert!(!::ErasTotalStake::contains_key(10 - 4)); - assert!(!::ErasTotalStake::contains_key(10 - 5)); - }); + ExtBuilder::default().build_and_execute(|| { + mock::start_era(10); + Staking::set_history_depth(Origin::ROOT, 20).unwrap(); + assert!(::ErasTotalStake::contains_key(10 - 4)); + assert!(::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::ROOT, 4).unwrap(); + assert!(::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::ROOT, 3).unwrap(); + assert!(!::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + Staking::set_history_depth(Origin::ROOT, 8).unwrap(); + assert!(!::ErasTotalStake::contains_key(10 - 4)); + assert!(!::ErasTotalStake::contains_key(10 - 5)); + }); } #[test] fn test_payout_stakers() { - // Here we will test validator can set `max_nominators_payout` and it works. - // We also test that `payout_extra_nominators` works. - ExtBuilder::default().has_stakers(false).build_and_execute(|| { - let balance = 1000; - // Create three validators: - bond_validator(11, 10, balance); // Default(64) - - // Create nominators, targeting stash of validators - for i in 0..100 { - bond_nominator(1000 + i, 100 + i, balance + i as Balance, vec![11]); - } - - mock::start_era(1); - Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); - - // Top 64 nominators of validator 11 automatically paid out, including the validator - // Validator payout goes to controller. - assert!(Balances::free_balance(&10) > balance); - for i in 36..100 { - assert!(Balances::free_balance(&(100 + i)) > balance + i as Balance); - } - // The bottom 36 do not - for i in 0..36 { - assert_eq!(Balances::free_balance(&(100 + i)), balance + i as Balance); - } - - // We track rewards in `claimed_rewards` vec - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![1] }) - ); - - for i in 3..16 { - Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, i - 1)); - } - - // We track rewards in `claimed_rewards` vec - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: (1..=14).collect() }) - ); - - for i in 16..100 { - Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); - } - - // We clean it up as history passes - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![15, 98] }) - ); - - // Out of order claims works. - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 69)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 23)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 42)); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { stash: 11, total: 1000, active: 1000, unlocking: vec![], claimed_rewards: vec![15, 23, 42, 69, 98] }) - ); - }); + // Here we will test validator can set `max_nominators_payout` and it works. + // We also test that `payout_extra_nominators` works. + ExtBuilder::default() + .has_stakers(false) + .build_and_execute(|| { + let balance = 1000; + // Create three validators: + bond_validator(11, 10, balance); // Default(64) + + // Create nominators, targeting stash of validators + for i in 0..100 { + bond_nominator(1000 + i, 100 + i, balance + i as Balance, vec![11]); + } + + mock::start_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(2); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 1)); + + // Top 64 nominators of validator 11 automatically paid out, including the validator + // Validator payout goes to controller. + assert!(Balances::free_balance(&10) > balance); + for i in 36..100 { + assert!(Balances::free_balance(&(100 + i)) > balance + i as Balance); + } + // The bottom 36 do not + for i in 0..36 { + assert_eq!(Balances::free_balance(&(100 + i)), balance + i as Balance); + } + + // We track rewards in `claimed_rewards` vec + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![1] + }) + ); + + for i in 3..16 { + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(i); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, i - 1)); + } + + // We track rewards in `claimed_rewards` vec + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: (1..=14).collect() + }) + ); + + for i in 16..100 { + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(i); + } + + // We clean it up as history passes + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![15, 98] + }) + ); + + // Out of order claims works. + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 69)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 23)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 42)); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![15, 23, 42, 69, 98] + }) + ); + }); } #[test] fn payout_stakers_handles_basic_errors() { - // Here we will test payouts handle all errors. - ExtBuilder::default().has_stakers(false).build_and_execute(|| { - // Same setup as the test above - let balance = 1000; - bond_validator(11, 10, balance); // Default(64) - - // Create nominators, targeting stash - for i in 0..100 { - bond_nominator(1000 + i, 100 + i, balance + i as Balance, vec![11]); - } - - mock::start_era(1); - Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(2); - - // Wrong Era, too big - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 2), Error::::InvalidEraToReward); - // Wrong Staker - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 10, 1), Error::::NotStash); - - for i in 3..100 { - Staking::reward_by_ids(vec![(11, 1)]); - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 100); // Test is meaningful if reward something - mock::start_era(i); - } - // We are at era 99, with history depth of 84 - // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 14), Error::::InvalidEraToReward); - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 99), Error::::InvalidEraToReward); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); - assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); - - // Can't claim again - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 15), Error::::AlreadyClaimed); - assert_noop!(Staking::payout_stakers(Origin::signed(1337), 11, 98), Error::::AlreadyClaimed); - }); + // Here we will test payouts handle all errors. + ExtBuilder::default() + .has_stakers(false) + .build_and_execute(|| { + // Same setup as the test above + let balance = 1000; + bond_validator(11, 10, balance); // Default(64) + + // Create nominators, targeting stash + for i in 0..100 { + bond_nominator(1000 + i, 100 + i, balance + i as Balance, vec![11]); + } + + mock::start_era(1); + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(2); + + // Wrong Era, too big + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 2), + Error::::InvalidEraToReward + ); + // Wrong Staker + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 10, 1), + Error::::NotStash + ); + + for i in 3..100 { + Staking::reward_by_ids(vec![(11, 1)]); + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 100); // Test is meaningful if reward something + mock::start_era(i); + } + // We are at era 99, with history depth of 84 + // We should be able to payout era 15 through 98 (84 total eras), but not 14 or 99. + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 14), + Error::::InvalidEraToReward + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 99), + Error::::InvalidEraToReward + ); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 15)); + assert_ok!(Staking::payout_stakers(Origin::signed(1337), 11, 98)); + + // Can't claim again + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 15), + Error::::AlreadyClaimed + ); + assert_noop!( + Staking::payout_stakers(Origin::signed(1337), 11, 98), + Error::::AlreadyClaimed + ); + }); } #[test] fn bond_during_era_correctly_populates_claimed_rewards() { - ExtBuilder::default().has_stakers(false).build_and_execute(|| { - // Era = None - bond_validator(9, 8, 1000); - assert_eq!( - Staking::ledger(&8), - Some(StakingLedger { - stash: 9, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); - mock::start_era(5); - bond_validator(11, 10, 1000); - assert_eq!( - Staking::ledger(&10), - Some(StakingLedger { - stash: 11, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: (0..5).collect(), - }) - ); - mock::start_era(99); - bond_validator(13, 12, 1000); - assert_eq!( - Staking::ledger(&12), - Some(StakingLedger { - stash: 13, - total: 1000, - active: 1000, - unlocking: vec![], - claimed_rewards: (15..99).collect(), - }) - ); - }); + ExtBuilder::default() + .has_stakers(false) + .build_and_execute(|| { + // Era = None + bond_validator(9, 8, 1000); + assert_eq!( + Staking::ledger(&8), + Some(StakingLedger { + stash: 9, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + mock::start_era(5); + bond_validator(11, 10, 1000); + assert_eq!( + Staking::ledger(&10), + Some(StakingLedger { + stash: 11, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: (0..5).collect(), + }) + ); + mock::start_era(99); + bond_validator(13, 12, 1000); + assert_eq!( + Staking::ledger(&12), + Some(StakingLedger { + stash: 13, + total: 1000, + active: 1000, + unlocking: vec![], + claimed_rewards: (15..99).collect(), + }) + ); + }); } /* These migration tests below can be removed once migration code is removed */ #[test] fn assert_migration_is_noop() { - let kusama_active_era = "4a0200000190e2721171010000"; - let era = ActiveEraInfo::decode(&mut &hex::decode(kusama_active_era).unwrap()[..]).unwrap(); - assert_eq!(era.index, 586); - assert_eq!(era.start, Some(1585135674000)); + let kusama_active_era = "4a0200000190e2721171010000"; + let era = ActiveEraInfo::decode(&mut &hex::decode(kusama_active_era).unwrap()[..]).unwrap(); + assert_eq!(era.index, 586); + assert_eq!(era.start, Some(1585135674000)); } #[test] fn test_last_reward_migration() { - use sp_storage::Storage; - - let mut s = Storage::default(); - - #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] - struct OldStakingLedger { - pub stash: AccountId, - #[codec(compact)] - pub total: Balance, - #[codec(compact)] - pub active: Balance, - pub unlocking: Vec>, - pub last_reward: Option, - } - - let old_staking10 = OldStakingLedger:: { - stash: 0, - total: 10, - active: 10, - unlocking: vec![UnlockChunk{ value: 1234, era: 56}], - last_reward: Some(8), - }; - - let old_staking11 = OldStakingLedger:: { - stash: 1, - total: 0, - active: 0, - unlocking: vec![], - last_reward: None, - }; - - let old_staking12 = OldStakingLedger:: { - stash: 2, - total: 100, - active: 100, - unlocking: vec![UnlockChunk{ value: 9876, era: 54}, UnlockChunk{ value: 98, era: 76}], - last_reward: Some(23), - }; - - let old_staking13 = OldStakingLedger:: { - stash: 3, - total: 100, - active: 100, - unlocking: vec![], - last_reward: Some(23), - }; - - let data = vec![ - ( - Ledger::::hashed_key_for(10), - old_staking10.encode().to_vec() - ), - ( - Ledger::::hashed_key_for(11), - old_staking11.encode().to_vec() - ), - ( - Ledger::::hashed_key_for(12), - old_staking12.encode().to_vec() - ), - ( - Ledger::::hashed_key_for(13), - old_staking13.encode().to_vec() - ), - ]; - - s.top = data.into_iter().collect(); - sp_io::TestExternalities::new(s).execute_with(|| { - HistoryDepth::put(84); - CurrentEra::put(99); - let nominations = Nominations:: { - targets: vec![], - submitted_in: 0, - suppressed: false - }; - Nominators::::insert(3, nominations); - Bonded::::insert(3, 13); - Staking::migrate_last_reward_to_claimed_rewards(); - // Test staker out of range - assert_eq!( - Ledger::::get(10), - Some(StakingLedger { - stash: 0, - total: 10, - active: 10, - unlocking: vec![UnlockChunk{ value: 1234, era: 56}], - claimed_rewards: vec![], - }) - ); - // Test staker none - assert_eq!( - Ledger::::get(11), - Some(StakingLedger { - stash: 1, - total: 0, - active: 0, - unlocking: vec![], - claimed_rewards: vec![], - }) - ); - // Test staker migration - assert_eq!( - Ledger::::get(12), - Some(StakingLedger { - stash: 2, - total: 100, - active: 100, - unlocking: vec![UnlockChunk{ value: 9876, era: 54}, UnlockChunk{ value: 98, era: 76}], - claimed_rewards: vec![15,16,17,18,19,20,21,22,23], - }) - ); - // Test nominator migration - assert_eq!( - Ledger::::get(13), - Some(StakingLedger { - stash: 3, - total: 100, - active: 100, - unlocking: vec![], - claimed_rewards: vec![15,16,17,18,19,20,21,22,23], - }) - ); - }); + use sp_storage::Storage; + + let mut s = Storage::default(); + + #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] + struct OldStakingLedger { + pub stash: AccountId, + #[codec(compact)] + pub total: Balance, + #[codec(compact)] + pub active: Balance, + pub unlocking: Vec>, + pub last_reward: Option, + } + + let old_staking10 = OldStakingLedger:: { + stash: 0, + total: 10, + active: 10, + unlocking: vec![UnlockChunk { + value: 1234, + era: 56, + }], + last_reward: Some(8), + }; + + let old_staking11 = OldStakingLedger:: { + stash: 1, + total: 0, + active: 0, + unlocking: vec![], + last_reward: None, + }; + + let old_staking12 = OldStakingLedger:: { + stash: 2, + total: 100, + active: 100, + unlocking: vec![ + UnlockChunk { + value: 9876, + era: 54, + }, + UnlockChunk { value: 98, era: 76 }, + ], + last_reward: Some(23), + }; + + let old_staking13 = OldStakingLedger:: { + stash: 3, + total: 100, + active: 100, + unlocking: vec![], + last_reward: Some(23), + }; + + let data = vec![ + ( + Ledger::::hashed_key_for(10), + old_staking10.encode().to_vec(), + ), + ( + Ledger::::hashed_key_for(11), + old_staking11.encode().to_vec(), + ), + ( + Ledger::::hashed_key_for(12), + old_staking12.encode().to_vec(), + ), + ( + Ledger::::hashed_key_for(13), + old_staking13.encode().to_vec(), + ), + ]; + + s.top = data.into_iter().collect(); + sp_io::TestExternalities::new(s).execute_with(|| { + HistoryDepth::put(84); + CurrentEra::put(99); + let nominations = Nominations:: { + targets: vec![], + submitted_in: 0, + suppressed: false, + }; + Nominators::::insert(3, nominations); + Bonded::::insert(3, 13); + Staking::migrate_last_reward_to_claimed_rewards(); + // Test staker out of range + assert_eq!( + Ledger::::get(10), + Some(StakingLedger { + stash: 0, + total: 10, + active: 10, + unlocking: vec![UnlockChunk { + value: 1234, + era: 56 + }], + claimed_rewards: vec![], + }) + ); + // Test staker none + assert_eq!( + Ledger::::get(11), + Some(StakingLedger { + stash: 1, + total: 0, + active: 0, + unlocking: vec![], + claimed_rewards: vec![], + }) + ); + // Test staker migration + assert_eq!( + Ledger::::get(12), + Some(StakingLedger { + stash: 2, + total: 100, + active: 100, + unlocking: vec![ + UnlockChunk { + value: 9876, + era: 54 + }, + UnlockChunk { value: 98, era: 76 } + ], + claimed_rewards: vec![15, 16, 17, 18, 19, 20, 21, 22, 23], + }) + ); + // Test nominator migration + assert_eq!( + Ledger::::get(13), + Some(StakingLedger { + stash: 3, + total: 100, + active: 100, + unlocking: vec![], + claimed_rewards: vec![15, 16, 17, 18, 19, 20, 21, 22, 23], + }) + ); + }); } #[test] fn rewards_should_work_before_migration() { - // should check that before migration: - // * rewards get recorded per session - // * rewards get paid per Era - // * Check that nominators are also rewarded - ExtBuilder::default().nominate(true).build_and_execute(|| { - MigrateEra::put(10); - let init_balance_10 = Balances::total_balance(&10); - let init_balance_11 = Balances::total_balance(&11); - let init_balance_20 = Balances::total_balance(&20); - let init_balance_21 = Balances::total_balance(&21); - let init_balance_100 = Balances::total_balance(&100); - let init_balance_101 = Balances::total_balance(&101); - - // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(21, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); - - >::reward_by_ids(vec![(11, 50)]); - >::reward_by_ids(vec![(11, 50)]); - // This is the second validator of the current elected set. - >::reward_by_ids(vec![(21, 50)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something - - start_session(1); - - assert_eq!(Balances::total_balance(&10), init_balance_10); - assert_eq!(Balances::total_balance(&11), init_balance_11); - assert_eq!(Balances::total_balance(&20), init_balance_20); - assert_eq!(Balances::total_balance(&21), init_balance_21); - assert_eq!(Balances::total_balance(&100), init_balance_100); - assert_eq!(Balances::total_balance(&101), init_balance_101); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { - total: 50*3, - individual: vec![(11, 100), (21, 50)].into_iter().collect(), - }); - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); - let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); - let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); - - start_session(2); - start_session(3); - - assert_eq!(Staking::active_era().unwrap().index, 1); - mock::make_all_reward_payment_before_migration(0); - - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); - assert_eq_error_rate!( - Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * total_payout_0 * 2/3 - + part_for_100_from_20 * total_payout_0 * 1/3, - 2 - ); - assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - - assert_eq_uvec!(Session::validators(), vec![11, 21]); - >::reward_by_ids(vec![(11, 1)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something - - mock::start_era(2); - mock::make_all_reward_payment_before_migration(1); - - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); - assert_eq_error_rate!( - Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) - + part_for_100_from_20 * total_payout_0 * 1/3, - 2 - ); - assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - }); + // should check that before migration: + // * rewards get recorded per session + // * rewards get paid per Era + // * Check that nominators are also rewarded + ExtBuilder::default().nominate(true).build_and_execute(|| { + MigrateEra::put(10); + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!( + Staking::eras_reward_points(Staking::active_era().unwrap().index), + EraRewardPoints { + total: 50 * 3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + } + ); + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment_before_migration(0); + + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * total_payout_0 * 2 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2 / 3 + + part_for_100_from_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + >::reward_by_ids(vec![(11, 1)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment_before_migration(1); + + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * (total_payout_0 * 2 / 3 + total_payout_1), + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + }); } #[test] fn migrate_era_should_work() { - // should check that before and after migration: - // * rewards get recorded per session - // * rewards get paid per Era - // * Check that nominators are also rewarded - ExtBuilder::default().nominate(true).build_and_execute(|| { - MigrateEra::put(1); - let init_balance_10 = Balances::total_balance(&10); - let init_balance_11 = Balances::total_balance(&11); - let init_balance_20 = Balances::total_balance(&20); - let init_balance_21 = Balances::total_balance(&21); - let init_balance_100 = Balances::total_balance(&100); - let init_balance_101 = Balances::total_balance(&101); - - // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(21, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); - - >::reward_by_ids(vec![(11, 50)]); - >::reward_by_ids(vec![(11, 50)]); - // This is the second validator of the current elected set. - >::reward_by_ids(vec![(21, 50)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something - - start_session(1); - - assert_eq!(Balances::total_balance(&10), init_balance_10); - assert_eq!(Balances::total_balance(&11), init_balance_11); - assert_eq!(Balances::total_balance(&20), init_balance_20); - assert_eq!(Balances::total_balance(&21), init_balance_21); - assert_eq!(Balances::total_balance(&100), init_balance_100); - assert_eq!(Balances::total_balance(&101), init_balance_101); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { - total: 50*3, - individual: vec![(11, 100), (21, 50)].into_iter().collect(), - }); - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); - let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); - let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); - - start_session(2); - start_session(3); - - assert_eq!(Staking::active_era().unwrap().index, 1); - mock::make_all_reward_payment_before_migration(0); - - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); - assert_eq_error_rate!( - Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * total_payout_0 * 2/3 - + part_for_100_from_20 * total_payout_0 * 1/3, - 2 - ); - assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - - assert_eq_uvec!(Session::validators(), vec![11, 21]); - >::reward_by_ids(vec![(11, 1)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something - - mock::start_era(2); - mock::make_all_reward_payment(1); - - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); - assert_eq_error_rate!( - Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) - + part_for_100_from_20 * total_payout_0 * 1/3, - 2 - ); - assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - }); + // should check that before and after migration: + // * rewards get recorded per session + // * rewards get paid per Era + // * Check that nominators are also rewarded + ExtBuilder::default().nominate(true).build_and_execute(|| { + MigrateEra::put(1); + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!( + Staking::eras_reward_points(Staking::active_era().unwrap().index), + EraRewardPoints { + total: 50 * 3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + } + ); + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment_before_migration(0); + + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * total_payout_0 * 2 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2 / 3 + + part_for_100_from_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + >::reward_by_ids(vec![(11, 1)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment(1); + + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * (total_payout_0 * 2 / 3 + total_payout_1), + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + }); } #[test] #[should_panic] fn migrate_era_should_handle_error() { - ExtBuilder::default().nominate(true).build_and_execute(|| { - MigrateEra::put(1); - let init_balance_10 = Balances::total_balance(&10); - let init_balance_11 = Balances::total_balance(&11); - let init_balance_20 = Balances::total_balance(&20); - let init_balance_21 = Balances::total_balance(&21); - let init_balance_100 = Balances::total_balance(&100); - let init_balance_101 = Balances::total_balance(&101); - - // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(21, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); - - >::reward_by_ids(vec![(11, 50)]); - >::reward_by_ids(vec![(11, 50)]); - // This is the second validator of the current elected set. - >::reward_by_ids(vec![(21, 50)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something - - start_session(1); - - assert_eq!(Balances::total_balance(&10), init_balance_10); - assert_eq!(Balances::total_balance(&11), init_balance_11); - assert_eq!(Balances::total_balance(&20), init_balance_20); - assert_eq!(Balances::total_balance(&21), init_balance_21); - assert_eq!(Balances::total_balance(&100), init_balance_100); - assert_eq!(Balances::total_balance(&101), init_balance_101); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { - total: 50*3, - individual: vec![(11, 100), (21, 50)].into_iter().collect(), - }); - - start_session(2); - start_session(3); - - assert_eq!(Staking::active_era().unwrap().index, 1); - mock::make_all_reward_payment(0); - }); + ExtBuilder::default().nominate(true).build_and_execute(|| { + MigrateEra::put(1); + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!( + Staking::eras_reward_points(Staking::active_era().unwrap().index), + EraRewardPoints { + total: 50 * 3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + } + ); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment(0); + }); } #[test] #[should_panic] fn migrate_era_should_handle_errors_2() { - // should check that before and after migration: - // * rewards get recorded per session - // * rewards get paid per Era - // * Check that nominators are also rewarded - ExtBuilder::default().nominate(true).build_and_execute(|| { - MigrateEra::put(1); - let init_balance_10 = Balances::total_balance(&10); - let init_balance_11 = Balances::total_balance(&11); - let init_balance_20 = Balances::total_balance(&20); - let init_balance_21 = Balances::total_balance(&21); - let init_balance_100 = Balances::total_balance(&100); - let init_balance_101 = Balances::total_balance(&101); - - // Check state - Payee::::insert(11, RewardDestination::Controller); - Payee::::insert(21, RewardDestination::Controller); - Payee::::insert(101, RewardDestination::Controller); - - >::reward_by_ids(vec![(11, 50)]); - >::reward_by_ids(vec![(11, 50)]); - // This is the second validator of the current elected set. - >::reward_by_ids(vec![(21, 50)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_0 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_0 > 10); // Test is meaningful if reward something - - start_session(1); - - assert_eq!(Balances::total_balance(&10), init_balance_10); - assert_eq!(Balances::total_balance(&11), init_balance_11); - assert_eq!(Balances::total_balance(&20), init_balance_20); - assert_eq!(Balances::total_balance(&21), init_balance_21); - assert_eq!(Balances::total_balance(&100), init_balance_100); - assert_eq!(Balances::total_balance(&101), init_balance_101); - assert_eq_uvec!(Session::validators(), vec![11, 21]); - assert_eq!(Staking::eras_reward_points(Staking::active_era().unwrap().index), EraRewardPoints { - total: 50*3, - individual: vec![(11, 100), (21, 50)].into_iter().collect(), - }); - let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); - let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); - let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); - let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); - - start_session(2); - start_session(3); - - assert_eq!(Staking::active_era().unwrap().index, 1); - mock::make_all_reward_payment_before_migration(0); - - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * total_payout_0*2/3, 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0*1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); - assert_eq_error_rate!( - Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * total_payout_0 * 2/3 - + part_for_100_from_20 * total_payout_0 * 1/3, - 2 - ); - assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - - assert_eq_uvec!(Session::validators(), vec![11, 21]); - >::reward_by_ids(vec![(11, 1)]); - - // Compute total payout now for whole duration as other parameter won't change - let total_payout_1 = current_total_payout_for_duration(3 * 1000); - assert!(total_payout_1 > 10); // Test is meaningful if reward something - - mock::start_era(2); - mock::make_all_reward_payment_before_migration(1); - - assert_eq_error_rate!(Balances::total_balance(&10), init_balance_10 + part_for_10 * (total_payout_0 * 2/3 + total_payout_1), 2); - assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); - assert_eq_error_rate!(Balances::total_balance(&20), init_balance_20 + part_for_20 * total_payout_0 * 1/3, 2); - assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); - assert_eq_error_rate!( - Balances::total_balance(&100), - init_balance_100 - + part_for_100_from_10 * (total_payout_0 * 2/3 + total_payout_1) - + part_for_100_from_20 * total_payout_0 * 1/3, - 2 - ); - assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); - }); + // should check that before and after migration: + // * rewards get recorded per session + // * rewards get paid per Era + // * Check that nominators are also rewarded + ExtBuilder::default().nominate(true).build_and_execute(|| { + MigrateEra::put(1); + let init_balance_10 = Balances::total_balance(&10); + let init_balance_11 = Balances::total_balance(&11); + let init_balance_20 = Balances::total_balance(&20); + let init_balance_21 = Balances::total_balance(&21); + let init_balance_100 = Balances::total_balance(&100); + let init_balance_101 = Balances::total_balance(&101); + + // Check state + Payee::::insert(11, RewardDestination::Controller); + Payee::::insert(21, RewardDestination::Controller); + Payee::::insert(101, RewardDestination::Controller); + + >::reward_by_ids(vec![(11, 50)]); + >::reward_by_ids(vec![(11, 50)]); + // This is the second validator of the current elected set. + >::reward_by_ids(vec![(21, 50)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_0 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_0 > 10); // Test is meaningful if reward something + + start_session(1); + + assert_eq!(Balances::total_balance(&10), init_balance_10); + assert_eq!(Balances::total_balance(&11), init_balance_11); + assert_eq!(Balances::total_balance(&20), init_balance_20); + assert_eq!(Balances::total_balance(&21), init_balance_21); + assert_eq!(Balances::total_balance(&100), init_balance_100); + assert_eq!(Balances::total_balance(&101), init_balance_101); + assert_eq_uvec!(Session::validators(), vec![11, 21]); + assert_eq!( + Staking::eras_reward_points(Staking::active_era().unwrap().index), + EraRewardPoints { + total: 50 * 3, + individual: vec![(11, 100), (21, 50)].into_iter().collect(), + } + ); + let part_for_10 = Perbill::from_rational_approximation::(1000, 1125); + let part_for_20 = Perbill::from_rational_approximation::(1000, 1375); + let part_for_100_from_10 = Perbill::from_rational_approximation::(125, 1125); + let part_for_100_from_20 = Perbill::from_rational_approximation::(375, 1375); + + start_session(2); + start_session(3); + + assert_eq!(Staking::active_era().unwrap().index, 1); + mock::make_all_reward_payment_before_migration(0); + + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * total_payout_0 * 2 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * total_payout_0 * 2 / 3 + + part_for_100_from_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + + assert_eq_uvec!(Session::validators(), vec![11, 21]); + >::reward_by_ids(vec![(11, 1)]); + + // Compute total payout now for whole duration as other parameter won't change + let total_payout_1 = current_total_payout_for_duration(3 * 1000); + assert!(total_payout_1 > 10); // Test is meaningful if reward something + + mock::start_era(2); + mock::make_all_reward_payment_before_migration(1); + + assert_eq_error_rate!( + Balances::total_balance(&10), + init_balance_10 + part_for_10 * (total_payout_0 * 2 / 3 + total_payout_1), + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&11), init_balance_11, 2); + assert_eq_error_rate!( + Balances::total_balance(&20), + init_balance_20 + part_for_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&21), init_balance_21, 2); + assert_eq_error_rate!( + Balances::total_balance(&100), + init_balance_100 + + part_for_100_from_10 * (total_payout_0 * 2 / 3 + total_payout_1) + + part_for_100_from_20 * total_payout_0 * 1 / 3, + 2 + ); + assert_eq_error_rate!(Balances::total_balance(&101), init_balance_101, 2); + }); } diff --git a/frame/sudo/src/lib.rs b/frame/sudo/src/lib.rs index b8cf9a353f..de7a474a93 100644 --- a/frame/sudo/src/lib.rs +++ b/frame/sudo/src/lib.rs @@ -87,143 +87,144 @@ #![cfg_attr(not(feature = "std"), no_std)] +use sp_runtime::traits::{Dispatchable, StaticLookup}; use sp_std::prelude::*; -use sp_runtime::traits::{StaticLookup, Dispatchable}; -use frame_support::{ - Parameter, decl_module, decl_event, decl_storage, decl_error, ensure, -}; -use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT, GetDispatchInfo, FunctionOf}; +use frame_support::weights::{FunctionOf, GetDispatchInfo, SimpleDispatchInfo, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure, Parameter}; use frame_system::{self as system, ensure_signed}; pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// A sudo-able call. - type Call: Parameter + Dispatchable + GetDispatchInfo; + /// A sudo-able call. + type Call: Parameter + Dispatchable + GetDispatchInfo; } decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what it's working on. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - fn deposit_event() = default; - - /// Authenticates the sudo key and dispatches a function call with `Root` origin. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB write (event). - /// - Weight of derivative `call` execution + 10,000. - /// # - #[weight = FunctionOf( - |args: (&Box<::Call>,)| args.0.get_dispatch_info().weight + 10_000, - |args: (&Box<::Call>,)| args.0.get_dispatch_info().class, - true - )] - fn sudo(origin, call: Box<::Call>) { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(sender == Self::key(), Error::::RequireSudo); - - let res = match call.dispatch(frame_system::RawOrigin::Root.into()) { - Ok(_) => true, - Err(e) => { - sp_runtime::print(e); - false - } - }; - - Self::deposit_event(RawEvent::Sudid(res)); - } - - /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo key. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn set_key(origin, new: ::Source) { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(sender == Self::key(), Error::::RequireSudo); - let new = T::Lookup::lookup(new)?; - - Self::deposit_event(RawEvent::KeyChanged(Self::key())); - >::put(new); - } - - /// Authenticates the sudo key and dispatches a function call with `Signed` origin from - /// a given account. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB write (event). - /// - Weight of derivative `call` execution + 10,000. - /// # - #[weight = FunctionOf( - |args: (&::Source, &Box<::Call>,)| { - args.1.get_dispatch_info().weight + 10_000 - }, - |args: (&::Source, &Box<::Call>,)| { - args.1.get_dispatch_info().class - }, - true - )] - fn sudo_as(origin, who: ::Source, call: Box<::Call>) { - // This is a public call, so we ensure that the origin is some signed account. - let sender = ensure_signed(origin)?; - ensure!(sender == Self::key(), Error::::RequireSudo); - - let who = T::Lookup::lookup(who)?; - - let res = match call.dispatch(frame_system::RawOrigin::Signed(who).into()) { - Ok(_) => true, - Err(e) => { - sp_runtime::print(e); - false - } - }; - - Self::deposit_event(RawEvent::SudoAsDone(res)); - } - } + // Simple declaration of the `Module` type. Lets the macro know what it's working on. + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + fn deposit_event() = default; + + /// Authenticates the sudo key and dispatches a function call with `Root` origin. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB write (event). + /// - Weight of derivative `call` execution + 10,000. + /// # + #[weight = FunctionOf( + |args: (&Box<::Call>,)| args.0.get_dispatch_info().weight + 10_000, + |args: (&Box<::Call>,)| args.0.get_dispatch_info().class, + true + )] + fn sudo(origin, call: Box<::Call>) { + // This is a public call, so we ensure that the origin is some signed account. + let sender = ensure_signed(origin)?; + ensure!(sender == Self::key(), Error::::RequireSudo); + + let res = match call.dispatch(frame_system::RawOrigin::Root.into()) { + Ok(_) => true, + Err(e) => { + sp_runtime::print(e); + false + } + }; + + Self::deposit_event(RawEvent::Sudid(res)); + } + + /// Authenticates the current sudo key and sets the given AccountId (`new`) as the new sudo key. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB change. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn set_key(origin, new: ::Source) { + // This is a public call, so we ensure that the origin is some signed account. + let sender = ensure_signed(origin)?; + ensure!(sender == Self::key(), Error::::RequireSudo); + let new = T::Lookup::lookup(new)?; + + Self::deposit_event(RawEvent::KeyChanged(Self::key())); + >::put(new); + } + + /// Authenticates the sudo key and dispatches a function call with `Signed` origin from + /// a given account. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB write (event). + /// - Weight of derivative `call` execution + 10,000. + /// # + #[weight = FunctionOf( + |args: (&::Source, &Box<::Call>,)| { + args.1.get_dispatch_info().weight + 10_000 + }, + |args: (&::Source, &Box<::Call>,)| { + args.1.get_dispatch_info().class + }, + true + )] + fn sudo_as(origin, who: ::Source, call: Box<::Call>) { + // This is a public call, so we ensure that the origin is some signed account. + let sender = ensure_signed(origin)?; + ensure!(sender == Self::key(), Error::::RequireSudo); + + let who = T::Lookup::lookup(who)?; + + let res = match call.dispatch(frame_system::RawOrigin::Signed(who).into()) { + Ok(_) => true, + Err(e) => { + sp_runtime::print(e); + false + } + }; + + Self::deposit_event(RawEvent::SudoAsDone(res)); + } + } } decl_event!( - pub enum Event where AccountId = ::AccountId { - /// A sudo just took place. - Sudid(bool), - /// The sudoer just switched identity; the old key is supplied. - KeyChanged(AccountId), - /// A sudo just took place. - SudoAsDone(bool), - } + pub enum Event + where + AccountId = ::AccountId, + { + /// A sudo just took place. + Sudid(bool), + /// The sudoer just switched identity; the old key is supplied. + KeyChanged(AccountId), + /// A sudo just took place. + SudoAsDone(bool), + } ); decl_storage! { - trait Store for Module as Sudo { - /// The `AccountId` of the sudo key. - Key get(fn key) config(): T::AccountId; - } + trait Store for Module as Sudo { + /// The `AccountId` of the sudo key. + Key get(fn key) config(): T::AccountId; + } } decl_error! { - /// Error for the Sudo module - pub enum Error for Module { - /// Sender must be the Sudo account - RequireSudo, - } + /// Error for the Sudo module + pub enum Error for Module { + /// Sender must be the Sudo account + RequireSudo, + } } diff --git a/frame/support/procedural/src/construct_runtime/mod.rs b/frame/support/procedural/src/construct_runtime/mod.rs index b74a27e7ba..5c6a19ee18 100644 --- a/frame/support/procedural/src/construct_runtime/mod.rs +++ b/frame/support/procedural/src/construct_runtime/mod.rs @@ -28,380 +28,380 @@ use syn::{Ident, Result, TypePath}; const SYSTEM_MODULE_NAME: &str = "System"; pub fn construct_runtime(input: TokenStream) -> TokenStream { - let definition = syn::parse_macro_input!(input as RuntimeDefinition); - construct_runtime_parsed(definition) - .unwrap_or_else(|e| e.to_compile_error()) - .into() + let definition = syn::parse_macro_input!(input as RuntimeDefinition); + construct_runtime_parsed(definition) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn construct_runtime_parsed(definition: RuntimeDefinition) -> Result { - let RuntimeDefinition { - name, - where_section: WhereSection { - block, - node_block, - unchecked_extrinsic, - .. - }, - modules: - ext::Braces { - content: ext::Punctuated { inner: modules, .. }, - token: modules_token, - }, - .. - } = definition; - - // Assert we have system module declared - let system_module = match find_system_module(modules.iter()) { - Some(sm) => sm, - None => { - return Err(syn::Error::new( - modules_token.span, - "`System` module declaration is missing. \ + let RuntimeDefinition { + name, + where_section: + WhereSection { + block, + node_block, + unchecked_extrinsic, + .. + }, + modules: + ext::Braces { + content: ext::Punctuated { inner: modules, .. }, + token: modules_token, + }, + .. + } = definition; + + // Assert we have system module declared + let system_module = match find_system_module(modules.iter()) { + Some(sm) => sm, + None => { + return Err(syn::Error::new( + modules_token.span, + "`System` module declaration is missing. \ Please add this line: `System: system::{Module, Call, Storage, Config, Event},`", - )) - } - }; - let hidden_crate_name = "construct_runtime"; - let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); - let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); + )) + } + }; + let hidden_crate_name = "construct_runtime"; + let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); + let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); - let all_but_system_modules = modules.iter().filter(|module| module.name != SYSTEM_MODULE_NAME); + let all_but_system_modules = modules + .iter() + .filter(|module| module.name != SYSTEM_MODULE_NAME); - let outer_event = decl_outer_event( - &name, - modules.iter(), - &scrate, - )?; + let outer_event = decl_outer_event(&name, modules.iter(), &scrate)?; - let outer_origin = decl_outer_origin( - &name, - all_but_system_modules.clone(), - &system_module, - &scrate, - )?; - let all_modules = decl_all_modules(&name, modules.iter()); - let module_to_index = decl_module_to_index(modules.iter(), modules.len(), &scrate); + let outer_origin = decl_outer_origin( + &name, + all_but_system_modules.clone(), + &system_module, + &scrate, + )?; + let all_modules = decl_all_modules(&name, modules.iter()); + let module_to_index = decl_module_to_index(modules.iter(), modules.len(), &scrate); - let dispatch = decl_outer_dispatch(&name, modules.iter(), &scrate); - let metadata = decl_runtime_metadata(&name, modules.iter(), &scrate, &unchecked_extrinsic); - let outer_config = decl_outer_config(&name, modules.iter(), &scrate); - let inherent = decl_outer_inherent(&block, &unchecked_extrinsic, modules.iter(), &scrate); - let validate_unsigned = decl_validate_unsigned(&name, modules.iter(), &scrate); + let dispatch = decl_outer_dispatch(&name, modules.iter(), &scrate); + let metadata = decl_runtime_metadata(&name, modules.iter(), &scrate, &unchecked_extrinsic); + let outer_config = decl_outer_config(&name, modules.iter(), &scrate); + let inherent = decl_outer_inherent(&block, &unchecked_extrinsic, modules.iter(), &scrate); + let validate_unsigned = decl_validate_unsigned(&name, modules.iter(), &scrate); - let res = quote!( - #scrate_decl + let res = quote!( + #scrate_decl - #[derive(Clone, Copy, PartialEq, Eq)] - #[cfg_attr(feature = "std", derive(Debug))] - pub struct #name; - impl #scrate::sp_runtime::traits::GetNodeBlockType for #name { - type NodeBlock = #node_block; - } - impl #scrate::sp_runtime::traits::GetRuntimeBlockType for #name { - type RuntimeBlock = #block; - } + #[derive(Clone, Copy, PartialEq, Eq)] + #[cfg_attr(feature = "std", derive(Debug))] + pub struct #name; + impl #scrate::sp_runtime::traits::GetNodeBlockType for #name { + type NodeBlock = #node_block; + } + impl #scrate::sp_runtime::traits::GetRuntimeBlockType for #name { + type RuntimeBlock = #block; + } - #outer_event + #outer_event - #outer_origin + #outer_origin - #all_modules + #all_modules - #module_to_index + #module_to_index - #dispatch + #dispatch - #metadata + #metadata - #outer_config + #outer_config - #inherent + #inherent - #validate_unsigned - ); + #validate_unsigned + ); - Ok(res.into()) + Ok(res.into()) } fn decl_validate_unsigned<'a>( - runtime: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, + runtime: &'a Ident, + module_declarations: impl Iterator, + scrate: &'a TokenStream2, ) -> TokenStream2 { - let modules_tokens = module_declarations - .filter(|module_declaration| module_declaration.exists_part("ValidateUnsigned")) - .map(|module_declaration| &module_declaration.name); - quote!( - #scrate::impl_outer_validate_unsigned!( - impl ValidateUnsigned for #runtime { - #( #modules_tokens )* - } - ); - ) + let modules_tokens = module_declarations + .filter(|module_declaration| module_declaration.exists_part("ValidateUnsigned")) + .map(|module_declaration| &module_declaration.name); + quote!( + #scrate::impl_outer_validate_unsigned!( + impl ValidateUnsigned for #runtime { + #( #modules_tokens )* + } + ); + ) } fn decl_outer_inherent<'a>( - block: &'a syn::TypePath, - unchecked_extrinsic: &'a syn::TypePath, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, + block: &'a syn::TypePath, + unchecked_extrinsic: &'a syn::TypePath, + module_declarations: impl Iterator, + scrate: &'a TokenStream2, ) -> TokenStream2 { - let modules_tokens = module_declarations.filter_map(|module_declaration| { - let maybe_config_part = module_declaration.find_part("Inherent"); - maybe_config_part.map(|config_part| { - let arg = config_part - .args - .as_ref() - .and_then(|parens| parens.content.inner.iter().next()) - .unwrap_or(&module_declaration.name); - let name = &module_declaration.name; - quote!(#name : #arg,) - }) - }); - quote!( - #scrate::impl_outer_inherent!( - impl Inherents where Block = #block, UncheckedExtrinsic = #unchecked_extrinsic { - #(#modules_tokens)* - } - ); - ) + let modules_tokens = module_declarations.filter_map(|module_declaration| { + let maybe_config_part = module_declaration.find_part("Inherent"); + maybe_config_part.map(|config_part| { + let arg = config_part + .args + .as_ref() + .and_then(|parens| parens.content.inner.iter().next()) + .unwrap_or(&module_declaration.name); + let name = &module_declaration.name; + quote!(#name : #arg,) + }) + }); + quote!( + #scrate::impl_outer_inherent!( + impl Inherents where Block = #block, UncheckedExtrinsic = #unchecked_extrinsic { + #(#modules_tokens)* + } + ); + ) } fn decl_outer_config<'a>( - runtime: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, + runtime: &'a Ident, + module_declarations: impl Iterator, + scrate: &'a TokenStream2, ) -> TokenStream2 { - let modules_tokens = module_declarations - .filter_map(|module_declaration| { - module_declaration.find_part("Config").map(|part| { - let transformed_generics: Vec<_> = part - .generics - .params - .iter() - .map(|param| quote!(<#param>)) - .collect(); - (module_declaration, transformed_generics) - }) - }) - .map(|(module_declaration, generics)| { - let module = &module_declaration.module; - let name = Ident::new( - &format!("{}Config", module_declaration.name), - module_declaration.name.span(), - ); - let instance = module_declaration.instance.as_ref().into_iter(); - quote!( - #name => - #module #(#instance)* #(#generics)*, - ) - }); - quote!( - #scrate::sp_runtime::impl_outer_config! { - pub struct GenesisConfig for #runtime { - #(#modules_tokens)* - } - } - ) + let modules_tokens = module_declarations + .filter_map(|module_declaration| { + module_declaration.find_part("Config").map(|part| { + let transformed_generics: Vec<_> = part + .generics + .params + .iter() + .map(|param| quote!(<#param>)) + .collect(); + (module_declaration, transformed_generics) + }) + }) + .map(|(module_declaration, generics)| { + let module = &module_declaration.module; + let name = Ident::new( + &format!("{}Config", module_declaration.name), + module_declaration.name.span(), + ); + let instance = module_declaration.instance.as_ref().into_iter(); + quote!( + #name => + #module #(#instance)* #(#generics)*, + ) + }); + quote!( + #scrate::sp_runtime::impl_outer_config! { + pub struct GenesisConfig for #runtime { + #(#modules_tokens)* + } + } + ) } fn decl_runtime_metadata<'a>( - runtime: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, - extrinsic: &TypePath, + runtime: &'a Ident, + module_declarations: impl Iterator, + scrate: &'a TokenStream2, + extrinsic: &TypePath, ) -> TokenStream2 { - let modules_tokens = module_declarations - .filter_map(|module_declaration| { - module_declaration.find_part("Module").map(|_| { - let filtered_names: Vec<_> = module_declaration - .module_parts() - .into_iter() - .filter(|part| part.name() != "Module") - .map(|part| part.ident()) - .collect(); - (module_declaration, filtered_names) - }) - }) - .map(|(module_declaration, filtered_names)| { - let module = &module_declaration.module; - let name = &module_declaration.name; - let instance = module_declaration - .instance - .as_ref() - .map(|name| quote!(<#name>)) - .into_iter(); - quote!(#module::Module #(#instance)* as #name with #(#filtered_names)* ,) - }); - quote!( - #scrate::impl_runtime_metadata!{ - for #runtime with modules where Extrinsic = #extrinsic - #(#modules_tokens)* - } - ) + let modules_tokens = module_declarations + .filter_map(|module_declaration| { + module_declaration.find_part("Module").map(|_| { + let filtered_names: Vec<_> = module_declaration + .module_parts() + .into_iter() + .filter(|part| part.name() != "Module") + .map(|part| part.ident()) + .collect(); + (module_declaration, filtered_names) + }) + }) + .map(|(module_declaration, filtered_names)| { + let module = &module_declaration.module; + let name = &module_declaration.name; + let instance = module_declaration + .instance + .as_ref() + .map(|name| quote!(<#name>)) + .into_iter(); + quote!(#module::Module #(#instance)* as #name with #(#filtered_names)* ,) + }); + quote!( + #scrate::impl_runtime_metadata!{ + for #runtime with modules where Extrinsic = #extrinsic + #(#modules_tokens)* + } + ) } fn decl_outer_dispatch<'a>( - runtime: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, + runtime: &'a Ident, + module_declarations: impl Iterator, + scrate: &'a TokenStream2, ) -> TokenStream2 { - let modules_tokens = module_declarations - .filter(|module_declaration| module_declaration.exists_part("Call")) - .map(|module_declaration| { - let module = &module_declaration.module; - let name = &module_declaration.name; - quote!(#module::#name) - }); - quote!( - #scrate::impl_outer_dispatch! { - pub enum Call for #runtime where origin: Origin { - #(#modules_tokens,)* - } - } - ) + let modules_tokens = module_declarations + .filter(|module_declaration| module_declaration.exists_part("Call")) + .map(|module_declaration| { + let module = &module_declaration.module; + let name = &module_declaration.name; + quote!(#module::#name) + }); + quote!( + #scrate::impl_outer_dispatch! { + pub enum Call for #runtime where origin: Origin { + #(#modules_tokens,)* + } + } + ) } fn decl_outer_origin<'a>( - runtime_name: &'a Ident, - module_declarations: impl Iterator, - system_name: &'a Ident, - scrate: &'a TokenStream2, + runtime_name: &'a Ident, + module_declarations: impl Iterator, + system_name: &'a Ident, + scrate: &'a TokenStream2, ) -> syn::Result { - let mut modules_tokens = TokenStream2::new(); - for module_declaration in module_declarations { - match module_declaration.find_part("Origin") { - Some(module_entry) => { - let module = &module_declaration.module; - let instance = module_declaration.instance.as_ref(); - let generics = &module_entry.generics; - if instance.is_some() && generics.params.len() == 0 { - let msg = format!( - "Instantiable module with no generic `Origin` cannot \ + let mut modules_tokens = TokenStream2::new(); + for module_declaration in module_declarations { + match module_declaration.find_part("Origin") { + Some(module_entry) => { + let module = &module_declaration.module; + let instance = module_declaration.instance.as_ref(); + let generics = &module_entry.generics; + if instance.is_some() && generics.params.len() == 0 { + let msg = format!( + "Instantiable module with no generic `Origin` cannot \ be constructed: module `{}` must have generic `Origin`", - module_declaration.name - ); - return Err(syn::Error::new(module_declaration.name.span(), msg)); - } - let tokens = quote!(#module #instance #generics ,); - modules_tokens.extend(tokens); - } - None => {} - } - } - - Ok(quote!( - #scrate::impl_outer_origin! { - pub enum Origin for #runtime_name where system = #system_name { - #modules_tokens - } - } - )) + module_declaration.name + ); + return Err(syn::Error::new(module_declaration.name.span(), msg)); + } + let tokens = quote!(#module #instance #generics ,); + modules_tokens.extend(tokens); + } + None => {} + } + } + + Ok(quote!( + #scrate::impl_outer_origin! { + pub enum Origin for #runtime_name where system = #system_name { + #modules_tokens + } + } + )) } fn decl_outer_event<'a>( - runtime_name: &'a Ident, - module_declarations: impl Iterator, - scrate: &'a TokenStream2, + runtime_name: &'a Ident, + module_declarations: impl Iterator, + scrate: &'a TokenStream2, ) -> syn::Result { - let mut modules_tokens = TokenStream2::new(); - for module_declaration in module_declarations { - match module_declaration.find_part("Event") { - Some(module_entry) => { - let module = &module_declaration.module; - let instance = module_declaration.instance.as_ref(); - let generics = &module_entry.generics; - if instance.is_some() && generics.params.len() == 0 { - let msg = format!( - "Instantiable module with no generic `Event` cannot \ + let mut modules_tokens = TokenStream2::new(); + for module_declaration in module_declarations { + match module_declaration.find_part("Event") { + Some(module_entry) => { + let module = &module_declaration.module; + let instance = module_declaration.instance.as_ref(); + let generics = &module_entry.generics; + if instance.is_some() && generics.params.len() == 0 { + let msg = format!( + "Instantiable module with no generic `Event` cannot \ be constructed: module `{}` must have generic `Event`", - module_declaration.name, - ); - return Err(syn::Error::new(module_declaration.name.span(), msg)); - } - let tokens = quote!(#module #instance #generics ,); - modules_tokens.extend(tokens); - } - None => {} - } - } - - Ok(quote!( - #scrate::impl_outer_event! { - pub enum Event for #runtime_name { - #modules_tokens - } - } - )) + module_declaration.name, + ); + return Err(syn::Error::new(module_declaration.name.span(), msg)); + } + let tokens = quote!(#module #instance #generics ,); + modules_tokens.extend(tokens); + } + None => {} + } + } + + Ok(quote!( + #scrate::impl_outer_event! { + pub enum Event for #runtime_name { + #modules_tokens + } + } + )) } fn decl_all_modules<'a>( - runtime: &'a Ident, - module_declarations: impl Iterator, + runtime: &'a Ident, + module_declarations: impl Iterator, ) -> TokenStream2 { - let mut types = TokenStream2::new(); - let mut names = Vec::new(); - for module_declaration in module_declarations { - let type_name = &module_declaration.name; - let module = &module_declaration.module; - let mut generics = vec![quote!(#runtime)]; - generics.extend( - module_declaration - .instance - .iter() - .map(|name| quote!(#module::#name)), - ); - let type_decl = quote!( - pub type #type_name = #module::Module <#(#generics),*>; - ); - types.extend(type_decl); - names.push(&module_declaration.name); - } - // Make nested tuple structure like (((Babe, Consensus), Grandpa), ...) - // But ignore the system module. - let all_modules = names.iter() - .filter(|n| **n != SYSTEM_MODULE_NAME) - .fold(TokenStream2::default(), |combined, name| quote!((#name, #combined))); - - quote!( - #types - type AllModules = ( #all_modules ); - ) + let mut types = TokenStream2::new(); + let mut names = Vec::new(); + for module_declaration in module_declarations { + let type_name = &module_declaration.name; + let module = &module_declaration.module; + let mut generics = vec![quote!(#runtime)]; + generics.extend( + module_declaration + .instance + .iter() + .map(|name| quote!(#module::#name)), + ); + let type_decl = quote!( + pub type #type_name = #module::Module <#(#generics),*>; + ); + types.extend(type_decl); + names.push(&module_declaration.name); + } + // Make nested tuple structure like (((Babe, Consensus), Grandpa), ...) + // But ignore the system module. + let all_modules = names.iter().filter(|n| **n != SYSTEM_MODULE_NAME).fold( + TokenStream2::default(), + |combined, name| quote!((#name, #combined)), + ); + + quote!( + #types + type AllModules = ( #all_modules ); + ) } fn decl_module_to_index<'a>( - module_declarations: impl Iterator, - num_modules: usize, - scrate: &TokenStream2, + module_declarations: impl Iterator, + num_modules: usize, + scrate: &TokenStream2, ) -> TokenStream2 { - let names = module_declarations.map(|d| &d.name); - let indices = 0..num_modules; - - quote!( - /// Provides an implementation of `ModuleToIndex` to map a module - /// to its index in the runtime. - pub struct ModuleToIndex; - - impl #scrate::traits::ModuleToIndex for ModuleToIndex { - fn module_to_index() -> Option { - let type_id = #scrate::sp_std::any::TypeId::of::(); - #( - if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { - return Some(#indices) - } - )* - - None - } - } - ) + let names = module_declarations.map(|d| &d.name); + let indices = 0..num_modules; + + quote!( + /// Provides an implementation of `ModuleToIndex` to map a module + /// to its index in the runtime. + pub struct ModuleToIndex; + + impl #scrate::traits::ModuleToIndex for ModuleToIndex { + fn module_to_index() -> Option { + let type_id = #scrate::sp_std::any::TypeId::of::(); + #( + if type_id == #scrate::sp_std::any::TypeId::of::<#names>() { + return Some(#indices) + } + )* + + None + } + } + ) } fn find_system_module<'a>( - mut module_declarations: impl Iterator, + mut module_declarations: impl Iterator, ) -> Option<&'a Ident> { - module_declarations - .find(|decl| decl.name == SYSTEM_MODULE_NAME) - .map(|decl| &decl.module) + module_declarations + .find(|decl| decl.name == SYSTEM_MODULE_NAME) + .map(|decl| &decl.module) } diff --git a/frame/support/procedural/src/construct_runtime/parse.rs b/frame/support/procedural/src/construct_runtime/parse.rs index 4a81a7efd6..335e164ca5 100644 --- a/frame/support/procedural/src/construct_runtime/parse.rs +++ b/frame/support/procedural/src/construct_runtime/parse.rs @@ -18,380 +18,380 @@ use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::Span; use std::collections::HashSet; use syn::{ - parse::{Parse, ParseStream}, - spanned::Spanned, - token, Error, Ident, Result, Token, + parse::{Parse, ParseStream}, + spanned::Spanned, + token, Error, Ident, Result, Token, }; mod keyword { - syn::custom_keyword!(Block); - syn::custom_keyword!(NodeBlock); - syn::custom_keyword!(UncheckedExtrinsic); - syn::custom_keyword!(Module); - syn::custom_keyword!(Call); - syn::custom_keyword!(Storage); - syn::custom_keyword!(Event); - syn::custom_keyword!(Config); - syn::custom_keyword!(Origin); - syn::custom_keyword!(Inherent); - syn::custom_keyword!(ValidateUnsigned); + syn::custom_keyword!(Block); + syn::custom_keyword!(NodeBlock); + syn::custom_keyword!(UncheckedExtrinsic); + syn::custom_keyword!(Module); + syn::custom_keyword!(Call); + syn::custom_keyword!(Storage); + syn::custom_keyword!(Event); + syn::custom_keyword!(Config); + syn::custom_keyword!(Origin); + syn::custom_keyword!(Inherent); + syn::custom_keyword!(ValidateUnsigned); } #[derive(Debug)] pub struct RuntimeDefinition { - pub visibility_token: Token![pub], - pub enum_token: Token![enum], - pub name: Ident, - pub where_section: WhereSection, - pub modules: ext::Braces>, + pub visibility_token: Token![pub], + pub enum_token: Token![enum], + pub name: Ident, + pub where_section: WhereSection, + pub modules: ext::Braces>, } impl Parse for RuntimeDefinition { - fn parse(input: ParseStream) -> Result { - Ok(Self { - visibility_token: input.parse()?, - enum_token: input.parse()?, - name: input.parse()?, - where_section: input.parse()?, - modules: input.parse()?, - }) - } + fn parse(input: ParseStream) -> Result { + Ok(Self { + visibility_token: input.parse()?, + enum_token: input.parse()?, + name: input.parse()?, + where_section: input.parse()?, + modules: input.parse()?, + }) + } } #[derive(Debug)] pub struct WhereSection { - pub block: syn::TypePath, - pub node_block: syn::TypePath, - pub unchecked_extrinsic: syn::TypePath, + pub block: syn::TypePath, + pub node_block: syn::TypePath, + pub unchecked_extrinsic: syn::TypePath, } impl Parse for WhereSection { - fn parse(input: ParseStream) -> Result { - input.parse::()?; - let mut definitions = Vec::new(); - while !input.peek(token::Brace) { - let definition: WhereDefinition = input.parse()?; - definitions.push(definition); - if !input.peek(Token![,]) { - if !input.peek(token::Brace) { - return Err(input.error("Expected `,` or `{`")); - } - break; - } - input.parse::()?; - } - let block = remove_kind(input, WhereKind::Block, &mut definitions)?.value; - let node_block = remove_kind(input, WhereKind::NodeBlock, &mut definitions)?.value; - let unchecked_extrinsic = - remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?.value; - if let Some(WhereDefinition { - ref kind_span, - ref kind, - .. - }) = definitions.first() - { - let msg = format!( - "`{:?}` was declared above. Please use exactly one declaration for `{:?}`.", - kind, kind - ); - return Err(Error::new(*kind_span, msg)); - } - Ok(Self { - block, - node_block, - unchecked_extrinsic, - }) - } + fn parse(input: ParseStream) -> Result { + input.parse::()?; + let mut definitions = Vec::new(); + while !input.peek(token::Brace) { + let definition: WhereDefinition = input.parse()?; + definitions.push(definition); + if !input.peek(Token![,]) { + if !input.peek(token::Brace) { + return Err(input.error("Expected `,` or `{`")); + } + break; + } + input.parse::()?; + } + let block = remove_kind(input, WhereKind::Block, &mut definitions)?.value; + let node_block = remove_kind(input, WhereKind::NodeBlock, &mut definitions)?.value; + let unchecked_extrinsic = + remove_kind(input, WhereKind::UncheckedExtrinsic, &mut definitions)?.value; + if let Some(WhereDefinition { + ref kind_span, + ref kind, + .. + }) = definitions.first() + { + let msg = format!( + "`{:?}` was declared above. Please use exactly one declaration for `{:?}`.", + kind, kind + ); + return Err(Error::new(*kind_span, msg)); + } + Ok(Self { + block, + node_block, + unchecked_extrinsic, + }) + } } #[derive(Debug, PartialEq, Eq, Clone, Copy, Hash)] pub enum WhereKind { - Block, - NodeBlock, - UncheckedExtrinsic, + Block, + NodeBlock, + UncheckedExtrinsic, } #[derive(Debug)] pub struct WhereDefinition { - pub kind_span: Span, - pub kind: WhereKind, - pub value: syn::TypePath, + pub kind_span: Span, + pub kind: WhereKind, + pub value: syn::TypePath, } impl Parse for WhereDefinition { - fn parse(input: ParseStream) -> Result { - let lookahead = input.lookahead1(); - let (kind_span, kind) = if lookahead.peek(keyword::Block) { - (input.parse::()?.span(), WhereKind::Block) - } else if lookahead.peek(keyword::NodeBlock) { - ( - input.parse::()?.span(), - WhereKind::NodeBlock, - ) - } else if lookahead.peek(keyword::UncheckedExtrinsic) { - ( - input.parse::()?.span(), - WhereKind::UncheckedExtrinsic, - ) - } else { - return Err(lookahead.error()); - }; - - Ok(Self { - kind_span, - kind, - value: { - let _: Token![=] = input.parse()?; - input.parse()? - }, - }) - } + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + let (kind_span, kind) = if lookahead.peek(keyword::Block) { + (input.parse::()?.span(), WhereKind::Block) + } else if lookahead.peek(keyword::NodeBlock) { + ( + input.parse::()?.span(), + WhereKind::NodeBlock, + ) + } else if lookahead.peek(keyword::UncheckedExtrinsic) { + ( + input.parse::()?.span(), + WhereKind::UncheckedExtrinsic, + ) + } else { + return Err(lookahead.error()); + }; + + Ok(Self { + kind_span, + kind, + value: { + let _: Token![=] = input.parse()?; + input.parse()? + }, + }) + } } #[derive(Debug)] pub struct ModuleDeclaration { - pub name: Ident, - pub module: Ident, - pub instance: Option, - pub module_parts: Vec, + pub name: Ident, + pub module: Ident, + pub instance: Option, + pub module_parts: Vec, } impl Parse for ModuleDeclaration { - fn parse(input: ParseStream) -> Result { - let name = input.parse()?; - let _: Token![:] = input.parse()?; - let module = input.parse()?; - let instance = if input.peek(Token![::]) && input.peek3(Token![<]) { - let _: Token![::] = input.parse()?; - let _: Token![<] = input.parse()?; - let res = Some(input.parse()?); - let _: Token![>] = input.parse()?; - res - } else { - None - }; - - let _: Token![::] = input.parse()?; - let module_parts = parse_module_parts(input)?; - - let parsed = Self { - name, - module, - instance, - module_parts, - }; - - Ok(parsed) - } + fn parse(input: ParseStream) -> Result { + let name = input.parse()?; + let _: Token![:] = input.parse()?; + let module = input.parse()?; + let instance = if input.peek(Token![::]) && input.peek3(Token![<]) { + let _: Token![::] = input.parse()?; + let _: Token![<] = input.parse()?; + let res = Some(input.parse()?); + let _: Token![>] = input.parse()?; + res + } else { + None + }; + + let _: Token![::] = input.parse()?; + let module_parts = parse_module_parts(input)?; + + let parsed = Self { + name, + module, + instance, + module_parts, + }; + + Ok(parsed) + } } impl ModuleDeclaration { - /// Get resolved module parts - pub fn module_parts(&self) -> &[ModulePart] { - &self.module_parts - } - - pub fn find_part(&self, name: &str) -> Option<&ModulePart> { - self.module_parts.iter().find(|part| part.name() == name) - } - - pub fn exists_part(&self, name: &str) -> bool { - self.find_part(name).is_some() - } + /// Get resolved module parts + pub fn module_parts(&self) -> &[ModulePart] { + &self.module_parts + } + + pub fn find_part(&self, name: &str) -> Option<&ModulePart> { + self.module_parts.iter().find(|part| part.name() == name) + } + + pub fn exists_part(&self, name: &str) -> bool { + self.find_part(name).is_some() + } } /// Parse [`ModulePart`]'s from a braces enclosed list that is split by commas, e.g. /// /// `{ Call, Event }` fn parse_module_parts(input: ParseStream) -> Result> { - let module_parts :ext::Braces> = input.parse()?; - - let mut resolved = HashSet::new(); - for part in module_parts.content.inner.iter() { - if !resolved.insert(part.name()) { - let msg = format!( - "`{}` was already declared before. Please remove the duplicate declaration", - part.name(), - ); - return Err(Error::new(part.keyword.span(), msg)); - } - } - - Ok(module_parts.content.inner.into_iter().collect()) + let module_parts: ext::Braces> = input.parse()?; + + let mut resolved = HashSet::new(); + for part in module_parts.content.inner.iter() { + if !resolved.insert(part.name()) { + let msg = format!( + "`{}` was already declared before. Please remove the duplicate declaration", + part.name(), + ); + return Err(Error::new(part.keyword.span(), msg)); + } + } + + Ok(module_parts.content.inner.into_iter().collect()) } #[derive(Debug, Clone)] pub enum ModulePartKeyword { - Module(keyword::Module), - Call(keyword::Call), - Storage(keyword::Storage), - Event(keyword::Event), - Config(keyword::Config), - Origin(keyword::Origin), - Inherent(keyword::Inherent), - ValidateUnsigned(keyword::ValidateUnsigned), + Module(keyword::Module), + Call(keyword::Call), + Storage(keyword::Storage), + Event(keyword::Event), + Config(keyword::Config), + Origin(keyword::Origin), + Inherent(keyword::Inherent), + ValidateUnsigned(keyword::ValidateUnsigned), } impl Parse for ModulePartKeyword { - fn parse(input: ParseStream) -> Result { - let lookahead = input.lookahead1(); - - if lookahead.peek(keyword::Module) { - Ok(Self::Module(input.parse()?)) - } else if lookahead.peek(keyword::Call) { - Ok(Self::Call(input.parse()?)) - } else if lookahead.peek(keyword::Storage) { - Ok(Self::Storage(input.parse()?)) - } else if lookahead.peek(keyword::Event) { - Ok(Self::Event(input.parse()?)) - } else if lookahead.peek(keyword::Config) { - Ok(Self::Config(input.parse()?)) - } else if lookahead.peek(keyword::Origin) { - Ok(Self::Origin(input.parse()?)) - } else if lookahead.peek(keyword::Inherent) { - Ok(Self::Inherent(input.parse()?)) - } else if lookahead.peek(keyword::ValidateUnsigned) { - Ok(Self::ValidateUnsigned(input.parse()?)) - } else { - Err(lookahead.error()) - } - } + fn parse(input: ParseStream) -> Result { + let lookahead = input.lookahead1(); + + if lookahead.peek(keyword::Module) { + Ok(Self::Module(input.parse()?)) + } else if lookahead.peek(keyword::Call) { + Ok(Self::Call(input.parse()?)) + } else if lookahead.peek(keyword::Storage) { + Ok(Self::Storage(input.parse()?)) + } else if lookahead.peek(keyword::Event) { + Ok(Self::Event(input.parse()?)) + } else if lookahead.peek(keyword::Config) { + Ok(Self::Config(input.parse()?)) + } else if lookahead.peek(keyword::Origin) { + Ok(Self::Origin(input.parse()?)) + } else if lookahead.peek(keyword::Inherent) { + Ok(Self::Inherent(input.parse()?)) + } else if lookahead.peek(keyword::ValidateUnsigned) { + Ok(Self::ValidateUnsigned(input.parse()?)) + } else { + Err(lookahead.error()) + } + } } impl ModulePartKeyword { - /// Returns the name of `Self`. - fn name(&self) -> &'static str { - match self { - Self::Module(_) => "Module", - Self::Call(_) => "Call", - Self::Storage(_) => "Storage", - Self::Event(_) => "Event", - Self::Config(_) => "Config", - Self::Origin(_) => "Origin", - Self::Inherent(_) => "Inherent", - Self::ValidateUnsigned(_) => "ValidateUnsigned", - } - } - - /// Returns the name as `Ident`. - fn ident(&self) -> Ident { - Ident::new(self.name(), self.span()) - } - - /// Returns `true` if this module part allows to have an argument. - /// - /// For example `Inherent(Timestamp)`. - fn allows_arg(&self) -> bool { - Self::all_allow_arg().iter().any(|n| *n == self.name()) - } - - /// Returns the names of all module parts that allow to have an argument. - fn all_allow_arg() -> &'static [&'static str] { - &["Inherent"] - } - - /// Returns `true` if this module part is allowed to have generic arguments. - fn allows_generic(&self) -> bool { - Self::all_generic_arg().iter().any(|n| *n == self.name()) - } - - /// Returns the names of all module parts that allow to have a generic argument. - fn all_generic_arg() -> &'static [&'static str] { - &["Event", "Origin", "Config"] - } + /// Returns the name of `Self`. + fn name(&self) -> &'static str { + match self { + Self::Module(_) => "Module", + Self::Call(_) => "Call", + Self::Storage(_) => "Storage", + Self::Event(_) => "Event", + Self::Config(_) => "Config", + Self::Origin(_) => "Origin", + Self::Inherent(_) => "Inherent", + Self::ValidateUnsigned(_) => "ValidateUnsigned", + } + } + + /// Returns the name as `Ident`. + fn ident(&self) -> Ident { + Ident::new(self.name(), self.span()) + } + + /// Returns `true` if this module part allows to have an argument. + /// + /// For example `Inherent(Timestamp)`. + fn allows_arg(&self) -> bool { + Self::all_allow_arg().iter().any(|n| *n == self.name()) + } + + /// Returns the names of all module parts that allow to have an argument. + fn all_allow_arg() -> &'static [&'static str] { + &["Inherent"] + } + + /// Returns `true` if this module part is allowed to have generic arguments. + fn allows_generic(&self) -> bool { + Self::all_generic_arg().iter().any(|n| *n == self.name()) + } + + /// Returns the names of all module parts that allow to have a generic argument. + fn all_generic_arg() -> &'static [&'static str] { + &["Event", "Origin", "Config"] + } } impl Spanned for ModulePartKeyword { - fn span(&self) -> Span { - match self { - Self::Module(inner) => inner.span(), - Self::Call(inner) => inner.span(), - Self::Storage(inner) => inner.span(), - Self::Event(inner) => inner.span(), - Self::Config(inner) => inner.span(), - Self::Origin(inner) => inner.span(), - Self::Inherent(inner) => inner.span(), - Self::ValidateUnsigned(inner) => inner.span(), - } - } + fn span(&self) -> Span { + match self { + Self::Module(inner) => inner.span(), + Self::Call(inner) => inner.span(), + Self::Storage(inner) => inner.span(), + Self::Event(inner) => inner.span(), + Self::Config(inner) => inner.span(), + Self::Origin(inner) => inner.span(), + Self::Inherent(inner) => inner.span(), + Self::ValidateUnsigned(inner) => inner.span(), + } + } } #[derive(Debug, Clone)] pub struct ModulePart { - pub keyword: ModulePartKeyword, - pub generics: syn::Generics, - pub args: Option>>, + pub keyword: ModulePartKeyword, + pub generics: syn::Generics, + pub args: Option>>, } impl Parse for ModulePart { - fn parse(input: ParseStream) -> Result { - let keyword: ModulePartKeyword = input.parse()?; - - let generics: syn::Generics = input.parse()?; - if !generics.params.is_empty() && !keyword.allows_generic() { - let valid_generics = ModulePart::format_names(ModulePartKeyword::all_generic_arg()); - let msg = format!( - "`{}` is not allowed to have generics. \ + fn parse(input: ParseStream) -> Result { + let keyword: ModulePartKeyword = input.parse()?; + + let generics: syn::Generics = input.parse()?; + if !generics.params.is_empty() && !keyword.allows_generic() { + let valid_generics = ModulePart::format_names(ModulePartKeyword::all_generic_arg()); + let msg = format!( + "`{}` is not allowed to have generics. \ Only the following modules are allowed to have generics: {}.", - keyword.name(), - valid_generics, - ); - return Err(syn::Error::new(keyword.span(), msg)); - } - let args = if input.peek(token::Paren) { - if !keyword.allows_arg() { - let syn::group::Parens { token: parens, .. } = syn::group::parse_parens(input)?; - let valid_names = ModulePart::format_names(ModulePartKeyword::all_allow_arg()); - let msg = format!( - "`{}` is not allowed to have arguments in parens. \ + keyword.name(), + valid_generics, + ); + return Err(syn::Error::new(keyword.span(), msg)); + } + let args = if input.peek(token::Paren) { + if !keyword.allows_arg() { + let syn::group::Parens { token: parens, .. } = syn::group::parse_parens(input)?; + let valid_names = ModulePart::format_names(ModulePartKeyword::all_allow_arg()); + let msg = format!( + "`{}` is not allowed to have arguments in parens. \ Only the following modules are allowed to have arguments in parens: {}.", - keyword.name(), - valid_names, - ); - return Err(syn::Error::new(parens.span, msg)); - } - Some(input.parse()?) - } else { - None - }; - - Ok(Self { - keyword, - generics, - args, - }) - } + keyword.name(), + valid_names, + ); + return Err(syn::Error::new(parens.span, msg)); + } + Some(input.parse()?) + } else { + None + }; + + Ok(Self { + keyword, + generics, + args, + }) + } } impl ModulePart { - pub fn format_names(names: &[&'static str]) -> String { - let res: Vec<_> = names.into_iter().map(|s| format!("`{}`", s)).collect(); - res.join(", ") - } - - /// The name of this module part. - pub fn name(&self) -> &'static str { - self.keyword.name() - } - - /// The name of this module part as `Ident`. - pub fn ident(&self) -> Ident { - self.keyword.ident() - } + pub fn format_names(names: &[&'static str]) -> String { + let res: Vec<_> = names.into_iter().map(|s| format!("`{}`", s)).collect(); + res.join(", ") + } + + /// The name of this module part. + pub fn name(&self) -> &'static str { + self.keyword.name() + } + + /// The name of this module part as `Ident`. + pub fn ident(&self) -> Ident { + self.keyword.ident() + } } fn remove_kind( - input: ParseStream, - kind: WhereKind, - definitions: &mut Vec, + input: ParseStream, + kind: WhereKind, + definitions: &mut Vec, ) -> Result { - if let Some(pos) = definitions.iter().position(|d| d.kind == kind) { - Ok(definitions.remove(pos)) - } else { - let msg = format!( - "Missing associated type for `{:?}`. Add `{:?}` = ... to where section.", - kind, kind - ); - Err(input.error(msg)) - } + if let Some(pos) = definitions.iter().position(|d| d.kind == kind) { + Ok(definitions.remove(pos)) + } else { + let msg = format!( + "Missing associated type for `{:?}`. Add `{:?}` = ... to where section.", + kind, kind + ); + Err(input.error(msg)) + } } diff --git a/frame/support/procedural/src/lib.rs b/frame/support/procedural/src/lib.rs index a9662f530a..2fcd5b070c 100644 --- a/frame/support/procedural/src/lib.rs +++ b/frame/support/procedural/src/lib.rs @@ -18,10 +18,10 @@ //! Proc macro of Support code for the runtime. // end::description[] -#![recursion_limit="512"] +#![recursion_limit = "512"] -mod storage; mod construct_runtime; +mod storage; use proc_macro::TokenStream; @@ -233,7 +233,7 @@ use proc_macro::TokenStream; /// #[proc_macro] pub fn decl_storage(input: TokenStream) -> TokenStream { - storage::decl_storage_impl(input) + storage::decl_storage_impl(input) } /// Construct a runtime, with the given name and the given modules. @@ -288,5 +288,5 @@ pub fn decl_storage(input: TokenStream) -> TokenStream { /// the module depending on it. #[proc_macro] pub fn construct_runtime(input: TokenStream) -> TokenStream { - construct_runtime::construct_runtime(input) + construct_runtime::construct_runtime(input) } diff --git a/frame/support/procedural/src/storage/genesis_config/builder_def.rs b/frame/support/procedural/src/storage/genesis_config/builder_def.rs index 87255ee481..8da9ca7552 100644 --- a/frame/support/procedural/src/storage/genesis_config/builder_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/builder_def.rs @@ -16,61 +16,63 @@ //! Builder logic definition used to build genesis storage. +use super::super::{DeclStorageDefExt, StorageLineTypeDef}; use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::TokenStream; -use syn::spanned::Spanned; use quote::{quote, quote_spanned}; -use super::super::{DeclStorageDefExt, StorageLineTypeDef}; +use syn::spanned::Spanned; /// Definition of builder blocks, each block insert some value in the storage. /// They must be called inside externalities, and with `self` being the genesis config. pub struct BuilderDef { - /// Contains: - /// * build block for storage with build attribute. - /// * build block for storage with config attribute and no build attribute. - /// * build block for extra genesis build expression. - pub blocks: Vec, - /// The build blocks requires generic traits. - pub is_generic: bool, + /// Contains: + /// * build block for storage with build attribute. + /// * build block for storage with config attribute and no build attribute. + /// * build block for extra genesis build expression. + pub blocks: Vec, + /// The build blocks requires generic traits. + pub is_generic: bool, } impl BuilderDef { - pub fn from_def(scrate: &TokenStream, def: &DeclStorageDefExt) -> Self { - let mut blocks = Vec::new(); - let mut is_generic = false; - - for line in def.storage_lines.iter() { - let storage_struct = &line.storage_struct; - let storage_trait = &line.storage_trait; - let value_type = &line.value_type; - - // Defines the data variable to use for insert at genesis either from build or config. - let mut data = None; - - if let Some(builder) = &line.build { - is_generic |= ext::expr_contains_ident(&builder, &def.module_runtime_generic); - is_generic |= line.is_generic; - - data = Some(match &line.storage_type { - StorageLineTypeDef::Simple(_) if line.is_option => - quote_spanned!(builder.span() => - let data = (#builder)(self); - let data = Option::as_ref(&data); - ), - _ => quote_spanned!(builder.span() => let data = &(#builder)(self); ), - }); - } else if let Some(config) = &line.config { - is_generic |= line.is_generic; - - data = Some(match &line.storage_type { - StorageLineTypeDef::Simple(_) if line.is_option => - quote!( let data = Some(&self.#config); ), - _ => quote!( let data = &self.#config; ), - }); - }; - - if let Some(data) = data { - blocks.push(match &line.storage_type { + pub fn from_def(scrate: &TokenStream, def: &DeclStorageDefExt) -> Self { + let mut blocks = Vec::new(); + let mut is_generic = false; + + for line in def.storage_lines.iter() { + let storage_struct = &line.storage_struct; + let storage_trait = &line.storage_trait; + let value_type = &line.value_type; + + // Defines the data variable to use for insert at genesis either from build or config. + let mut data = None; + + if let Some(builder) = &line.build { + is_generic |= ext::expr_contains_ident(&builder, &def.module_runtime_generic); + is_generic |= line.is_generic; + + data = Some(match &line.storage_type { + StorageLineTypeDef::Simple(_) if line.is_option => { + quote_spanned!(builder.span() => + let data = (#builder)(self); + let data = Option::as_ref(&data); + ) + } + _ => quote_spanned!(builder.span() => let data = &(#builder)(self); ), + }); + } else if let Some(config) = &line.config { + is_generic |= line.is_generic; + + data = Some(match &line.storage_type { + StorageLineTypeDef::Simple(_) if line.is_option => { + quote!( let data = Some(&self.#config); ) + } + _ => quote!( let data = &self.#config; ), + }); + }; + + if let Some(data) = data { + blocks.push(match &line.storage_type { StorageLineTypeDef::Simple(_) if line.is_option => { quote!{{ #data @@ -114,22 +116,18 @@ impl BuilderDef { }} }, }); - } - } - - if let Some(builder) = def.extra_genesis_build.as_ref() { - is_generic |= ext::expr_contains_ident(&builder, &def.module_runtime_generic); + } + } - blocks.push(quote_spanned! { builder.span() => - let extra_genesis_builder: fn(&Self) = #builder; - extra_genesis_builder(self); - }); - } + if let Some(builder) = def.extra_genesis_build.as_ref() { + is_generic |= ext::expr_contains_ident(&builder, &def.module_runtime_generic); + blocks.push(quote_spanned! { builder.span() => + let extra_genesis_builder: fn(&Self) = #builder; + extra_genesis_builder(self); + }); + } - Self { - blocks, - is_generic, - } - } + Self { blocks, is_generic } + } } diff --git a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs index 9b6ddc9217..de9d2b09df 100644 --- a/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs +++ b/frame/support/procedural/src/storage/genesis_config/genesis_config_def.rs @@ -16,139 +16,145 @@ //! Genesis config definition. +use super::super::{DeclStorageDefExt, StorageLineTypeDef}; use frame_support_procedural_tools::syn_ext as ext; use proc_macro2::TokenStream; -use syn::{spanned::Spanned, parse_quote}; use quote::quote; -use super::super::{DeclStorageDefExt, StorageLineTypeDef}; +use syn::{parse_quote, spanned::Spanned}; pub struct GenesisConfigFieldDef { - pub name: syn::Ident, - pub typ: syn::Type, - pub attrs: Vec, - pub default: TokenStream, + pub name: syn::Ident, + pub typ: syn::Type, + pub attrs: Vec, + pub default: TokenStream, } pub struct GenesisConfigDef { - pub is_generic: bool, - pub fields: Vec, - /// For example: `, I: Instance=DefaultInstance>`. - pub genesis_struct_decl: TokenStream, - /// For example: ``. - pub genesis_struct: TokenStream, - /// For example: `, I: Instance>`. - pub genesis_impl: TokenStream, - /// The where clause to use to constrain generics if genesis config is generic. - pub genesis_where_clause: Option, + pub is_generic: bool, + pub fields: Vec, + /// For example: `, I: Instance=DefaultInstance>`. + pub genesis_struct_decl: TokenStream, + /// For example: ``. + pub genesis_struct: TokenStream, + /// For example: `, I: Instance>`. + pub genesis_impl: TokenStream, + /// The where clause to use to constrain generics if genesis config is generic. + pub genesis_where_clause: Option, } impl GenesisConfigDef { - pub fn from_def(def: &DeclStorageDefExt) -> syn::Result { - let fields = Self::get_genesis_config_field_defs(def)?; - - let is_generic = fields.iter() - .any(|field| ext::type_contains_ident(&field.typ, &def.module_runtime_generic)); - - let ( - genesis_struct_decl, - genesis_impl, - genesis_struct, - genesis_where_clause - ) = if is_generic { - let runtime_generic = &def.module_runtime_generic; - let runtime_trait = &def.module_runtime_trait; - let optional_instance = &def.optional_instance; - let optional_instance_bound = &def.optional_instance_bound; - let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; - let where_clause = &def.where_clause; - ( - quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound_optional_default>), - quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound>), - quote!(<#runtime_generic, #optional_instance>), - where_clause.clone(), - ) - } else { - (quote!(), quote!(), quote!(), None) - }; - - Ok(Self { - is_generic, - fields, - genesis_struct_decl, - genesis_struct, - genesis_impl, - genesis_where_clause, - }) - } - - fn get_genesis_config_field_defs(def: &DeclStorageDefExt) - -> syn::Result> - { - let mut config_field_defs = Vec::new(); - - for (config_field, line) in def.storage_lines.iter() - .filter_map(|line| line.config.as_ref().map(|config_field| (config_field.clone(), line))) - { - let value_type = &line.value_type; - - let typ = match &line.storage_type { - StorageLineTypeDef::Simple(_) => (*value_type).clone(), - StorageLineTypeDef::Map(map) => { - let key = &map.key; - parse_quote!( Vec<(#key, #value_type)> ) - }, - StorageLineTypeDef::DoubleMap(map) => { - let key1 = &map.key1; - let key2 = &map.key2; - - parse_quote!( Vec<(#key1, #key2, #value_type)> ) - }, - }; - - let default = line.default_value.as_ref() - .map(|d| { - if line.is_option { - quote!( #d.unwrap_or_default() ) - } else { - quote!( #d ) - } - }) - .unwrap_or_else(|| quote!( Default::default() )); - - config_field_defs.push(GenesisConfigFieldDef { - name: config_field, - typ, - attrs: line.doc_attrs.clone(), - default, - }); - } - - for line in &def.extra_genesis_config_lines { - let attrs = line.attrs.iter() - .map(|attr| { - let meta = attr.parse_meta()?; - if meta.path().is_ident("cfg") { - return Err(syn::Error::new( - meta.span(), - "extra genesis config items do not support `cfg` attribute" - )); - } - Ok(meta) - }) - .collect::>()?; - - let default = line.default.as_ref().map(|e| quote!( #e )) - .unwrap_or_else(|| quote!( Default::default() )); - - - config_field_defs.push(GenesisConfigFieldDef { - name: line.name.clone(), - typ: line.typ.clone(), - attrs, - default, - }); - } - - Ok(config_field_defs) - } + pub fn from_def(def: &DeclStorageDefExt) -> syn::Result { + let fields = Self::get_genesis_config_field_defs(def)?; + + let is_generic = fields + .iter() + .any(|field| ext::type_contains_ident(&field.typ, &def.module_runtime_generic)); + + let (genesis_struct_decl, genesis_impl, genesis_struct, genesis_where_clause) = + if is_generic { + let runtime_generic = &def.module_runtime_generic; + let runtime_trait = &def.module_runtime_trait; + let optional_instance = &def.optional_instance; + let optional_instance_bound = &def.optional_instance_bound; + let optional_instance_bound_optional_default = + &def.optional_instance_bound_optional_default; + let where_clause = &def.where_clause; + ( + quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound_optional_default>), + quote!(<#runtime_generic: #runtime_trait, #optional_instance_bound>), + quote!(<#runtime_generic, #optional_instance>), + where_clause.clone(), + ) + } else { + (quote!(), quote!(), quote!(), None) + }; + + Ok(Self { + is_generic, + fields, + genesis_struct_decl, + genesis_struct, + genesis_impl, + genesis_where_clause, + }) + } + + fn get_genesis_config_field_defs( + def: &DeclStorageDefExt, + ) -> syn::Result> { + let mut config_field_defs = Vec::new(); + + for (config_field, line) in def.storage_lines.iter().filter_map(|line| { + line.config + .as_ref() + .map(|config_field| (config_field.clone(), line)) + }) { + let value_type = &line.value_type; + + let typ = match &line.storage_type { + StorageLineTypeDef::Simple(_) => (*value_type).clone(), + StorageLineTypeDef::Map(map) => { + let key = &map.key; + parse_quote!( Vec<(#key, #value_type)> ) + } + StorageLineTypeDef::DoubleMap(map) => { + let key1 = &map.key1; + let key2 = &map.key2; + + parse_quote!( Vec<(#key1, #key2, #value_type)> ) + } + }; + + let default = line + .default_value + .as_ref() + .map(|d| { + if line.is_option { + quote!( #d.unwrap_or_default() ) + } else { + quote!( #d ) + } + }) + .unwrap_or_else(|| quote!(Default::default())); + + config_field_defs.push(GenesisConfigFieldDef { + name: config_field, + typ, + attrs: line.doc_attrs.clone(), + default, + }); + } + + for line in &def.extra_genesis_config_lines { + let attrs = line + .attrs + .iter() + .map(|attr| { + let meta = attr.parse_meta()?; + if meta.path().is_ident("cfg") { + return Err(syn::Error::new( + meta.span(), + "extra genesis config items do not support `cfg` attribute", + )); + } + Ok(meta) + }) + .collect::>()?; + + let default = line + .default + .as_ref() + .map(|e| quote!( #e )) + .unwrap_or_else(|| quote!(Default::default())); + + config_field_defs.push(GenesisConfigFieldDef { + name: line.name.clone(), + typ: line.typ.clone(), + attrs, + default, + }); + } + + Ok(config_field_defs) + } } diff --git a/frame/support/procedural/src/storage/genesis_config/mod.rs b/frame/support/procedural/src/storage/genesis_config/mod.rs index eeeca150d9..c772174f65 100644 --- a/frame/support/procedural/src/storage/genesis_config/mod.rs +++ b/frame/support/procedural/src/storage/genesis_config/mod.rs @@ -17,183 +17,183 @@ //! Declaration of genesis config structure and implementation of build storage trait and //! functions. -use proc_macro2::{TokenStream, Span}; -use quote::quote; -use super::{DeclStorageDefExt, instance_trait::DEFAULT_INSTANTIABLE_TRAIT_NAME}; -use genesis_config_def::GenesisConfigDef; +use super::{instance_trait::DEFAULT_INSTANTIABLE_TRAIT_NAME, DeclStorageDefExt}; use builder_def::BuilderDef; +use genesis_config_def::GenesisConfigDef; +use proc_macro2::{Span, TokenStream}; +use quote::quote; -mod genesis_config_def; mod builder_def; +mod genesis_config_def; const DEFAULT_INSTANCE_NAME: &str = "__GeneratedInstance"; fn decl_genesis_config_and_impl_default( - scrate: &TokenStream, - genesis_config: &GenesisConfigDef, + scrate: &TokenStream, + genesis_config: &GenesisConfigDef, ) -> TokenStream { - let config_fields = genesis_config.fields.iter().map(|field| { - let (name, typ, attrs) = (&field.name, &field.typ, &field.attrs); - quote!( #( #[ #attrs] )* pub #name: #typ, ) - }); - - let config_field_defaults = genesis_config.fields.iter().map(|field| { - let (name, default) = (&field.name, &field.default); - quote!( #name: #default, ) - }); - - let serde_bug_bound = if !genesis_config.fields.is_empty() { - let mut b_ser = String::new(); - let mut b_dser = String::new(); - - for typ in genesis_config.fields.iter().map(|c| &c.typ) { - let typ = quote!( #typ ); - b_ser.push_str(&format!("{} : {}::serde::Serialize, ", typ, scrate)); - b_dser.push_str(&format!("{} : {}::serde::de::DeserializeOwned, ", typ, scrate)); - } - - quote! { - #[serde(bound(serialize = #b_ser))] - #[serde(bound(deserialize = #b_dser))] - } - } else { - quote!() - }; - - let genesis_struct_decl = &genesis_config.genesis_struct_decl; - let genesis_struct = &genesis_config.genesis_struct; - let genesis_impl = &genesis_config.genesis_impl; - let genesis_where_clause = &genesis_config.genesis_where_clause; - - quote!( - /// Genesis config for the module, allow to build genesis storage. - #[derive(#scrate::Serialize, #scrate::Deserialize)] - #[cfg(feature = "std")] - #[serde(rename_all = "camelCase")] - #[serde(deny_unknown_fields)] - #serde_bug_bound - pub struct GenesisConfig#genesis_struct_decl #genesis_where_clause { - #( #config_fields )* - } - - #[cfg(feature = "std")] - impl#genesis_impl Default for GenesisConfig#genesis_struct #genesis_where_clause { - fn default() -> Self { - GenesisConfig { - #( #config_field_defaults )* - } - } - } - ) + let config_fields = genesis_config.fields.iter().map(|field| { + let (name, typ, attrs) = (&field.name, &field.typ, &field.attrs); + quote!( #( #[ #attrs] )* pub #name: #typ, ) + }); + + let config_field_defaults = genesis_config.fields.iter().map(|field| { + let (name, default) = (&field.name, &field.default); + quote!( #name: #default, ) + }); + + let serde_bug_bound = if !genesis_config.fields.is_empty() { + let mut b_ser = String::new(); + let mut b_dser = String::new(); + + for typ in genesis_config.fields.iter().map(|c| &c.typ) { + let typ = quote!( #typ ); + b_ser.push_str(&format!("{} : {}::serde::Serialize, ", typ, scrate)); + b_dser.push_str(&format!( + "{} : {}::serde::de::DeserializeOwned, ", + typ, scrate + )); + } + + quote! { + #[serde(bound(serialize = #b_ser))] + #[serde(bound(deserialize = #b_dser))] + } + } else { + quote!() + }; + + let genesis_struct_decl = &genesis_config.genesis_struct_decl; + let genesis_struct = &genesis_config.genesis_struct; + let genesis_impl = &genesis_config.genesis_impl; + let genesis_where_clause = &genesis_config.genesis_where_clause; + + quote!( + /// Genesis config for the module, allow to build genesis storage. + #[derive(#scrate::Serialize, #scrate::Deserialize)] + #[cfg(feature = "std")] + #[serde(rename_all = "camelCase")] + #[serde(deny_unknown_fields)] + #serde_bug_bound + pub struct GenesisConfig#genesis_struct_decl #genesis_where_clause { + #( #config_fields )* + } + + #[cfg(feature = "std")] + impl#genesis_impl Default for GenesisConfig#genesis_struct #genesis_where_clause { + fn default() -> Self { + GenesisConfig { + #( #config_field_defaults )* + } + } + } + ) } fn impl_build_storage( - scrate: &TokenStream, - def: &DeclStorageDefExt, - genesis_config: &GenesisConfigDef, - builders: &BuilderDef, + scrate: &TokenStream, + def: &DeclStorageDefExt, + genesis_config: &GenesisConfigDef, + builders: &BuilderDef, ) -> TokenStream { - let runtime_generic = &def.module_runtime_generic; - let runtime_trait = &def.module_runtime_trait; - let optional_instance = &def.optional_instance; - let optional_instance_bound = &def.optional_instance_bound; - let where_clause = &def.where_clause; - - let inherent_instance = def.optional_instance.clone().unwrap_or_else(|| { - let name = syn::Ident::new(DEFAULT_INSTANCE_NAME, Span::call_site()); - quote!( #name ) - }); - let inherent_instance_bound = def.optional_instance_bound.clone().unwrap_or_else(|| { - let bound = syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site()); - quote!( #inherent_instance: #bound ) - }); - - let build_storage_impl = quote!( - <#runtime_generic: #runtime_trait, #inherent_instance_bound> - ); - - let genesis_struct = &genesis_config.genesis_struct; - let genesis_impl = &genesis_config.genesis_impl; - let genesis_where_clause = &genesis_config.genesis_where_clause; - - let ( - fn_generic, - fn_traitinstance, - fn_where_clause - ) = if !genesis_config.is_generic && builders.is_generic { - ( - quote!( <#runtime_generic: #runtime_trait, #optional_instance_bound> ), - quote!( #runtime_generic, #optional_instance ), - Some(&def.where_clause), - ) - } else { - (quote!(), quote!(), None) - }; - - let builder_blocks = &builders.blocks; - - let build_storage_impl_trait = quote!( - #scrate::sp_runtime::BuildModuleGenesisStorage<#runtime_generic, #inherent_instance> - ); - - quote!{ - #[cfg(feature = "std")] - impl#genesis_impl GenesisConfig#genesis_struct #genesis_where_clause { - /// Build the storage for this module. - pub fn build_storage #fn_generic (&self) -> std::result::Result< - #scrate::sp_runtime::Storage, - String - > #fn_where_clause { - let mut storage = Default::default(); - self.assimilate_storage::<#fn_traitinstance>(&mut storage)?; - Ok(storage) - } - - /// Assimilate the storage for this module into pre-existing overlays. - pub fn assimilate_storage #fn_generic ( - &self, - storage: &mut #scrate::sp_runtime::Storage, - ) -> std::result::Result<(), String> #fn_where_clause { - #scrate::BasicExternalities::execute_with_storage(storage, || { - #( #builder_blocks )* - Ok(()) - }) - } - } - - #[cfg(feature = "std")] - impl#build_storage_impl #build_storage_impl_trait for GenesisConfig#genesis_struct - #where_clause - { - fn build_module_genesis_storage( - &self, - storage: &mut #scrate::sp_runtime::Storage, - ) -> std::result::Result<(), String> { - self.assimilate_storage::<#fn_traitinstance> (storage) - } - } - } + let runtime_generic = &def.module_runtime_generic; + let runtime_trait = &def.module_runtime_trait; + let optional_instance = &def.optional_instance; + let optional_instance_bound = &def.optional_instance_bound; + let where_clause = &def.where_clause; + + let inherent_instance = def.optional_instance.clone().unwrap_or_else(|| { + let name = syn::Ident::new(DEFAULT_INSTANCE_NAME, Span::call_site()); + quote!( #name ) + }); + let inherent_instance_bound = def.optional_instance_bound.clone().unwrap_or_else(|| { + let bound = syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site()); + quote!( #inherent_instance: #bound ) + }); + + let build_storage_impl = quote!( + <#runtime_generic: #runtime_trait, #inherent_instance_bound> + ); + + let genesis_struct = &genesis_config.genesis_struct; + let genesis_impl = &genesis_config.genesis_impl; + let genesis_where_clause = &genesis_config.genesis_where_clause; + + let (fn_generic, fn_traitinstance, fn_where_clause) = + if !genesis_config.is_generic && builders.is_generic { + ( + quote!( <#runtime_generic: #runtime_trait, #optional_instance_bound> ), + quote!( #runtime_generic, #optional_instance ), + Some(&def.where_clause), + ) + } else { + (quote!(), quote!(), None) + }; + + let builder_blocks = &builders.blocks; + + let build_storage_impl_trait = quote!( + #scrate::sp_runtime::BuildModuleGenesisStorage<#runtime_generic, #inherent_instance> + ); + + quote! { + #[cfg(feature = "std")] + impl#genesis_impl GenesisConfig#genesis_struct #genesis_where_clause { + /// Build the storage for this module. + pub fn build_storage #fn_generic (&self) -> std::result::Result< + #scrate::sp_runtime::Storage, + String + > #fn_where_clause { + let mut storage = Default::default(); + self.assimilate_storage::<#fn_traitinstance>(&mut storage)?; + Ok(storage) + } + + /// Assimilate the storage for this module into pre-existing overlays. + pub fn assimilate_storage #fn_generic ( + &self, + storage: &mut #scrate::sp_runtime::Storage, + ) -> std::result::Result<(), String> #fn_where_clause { + #scrate::BasicExternalities::execute_with_storage(storage, || { + #( #builder_blocks )* + Ok(()) + }) + } + } + + #[cfg(feature = "std")] + impl#build_storage_impl #build_storage_impl_trait for GenesisConfig#genesis_struct + #where_clause + { + fn build_module_genesis_storage( + &self, + storage: &mut #scrate::sp_runtime::Storage, + ) -> std::result::Result<(), String> { + self.assimilate_storage::<#fn_traitinstance> (storage) + } + } + } } pub fn genesis_config_and_build_storage( - scrate: &TokenStream, - def: &DeclStorageDefExt, + scrate: &TokenStream, + def: &DeclStorageDefExt, ) -> TokenStream { - let builders = BuilderDef::from_def(scrate, def); - if !builders.blocks.is_empty() { - let genesis_config = match GenesisConfigDef::from_def(def) { - Ok(genesis_config) => genesis_config, - Err(err) => return err.to_compile_error(), - }; - let decl_genesis_config_and_impl_default = - decl_genesis_config_and_impl_default(scrate, &genesis_config); - let impl_build_storage = impl_build_storage(scrate, def, &genesis_config, &builders); - - quote!{ - #decl_genesis_config_and_impl_default - #impl_build_storage - } - } else { - quote!() - } + let builders = BuilderDef::from_def(scrate, def); + if !builders.blocks.is_empty() { + let genesis_config = match GenesisConfigDef::from_def(def) { + Ok(genesis_config) => genesis_config, + Err(err) => return err.to_compile_error(), + }; + let decl_genesis_config_and_impl_default = + decl_genesis_config_and_impl_default(scrate, &genesis_config); + let impl_build_storage = impl_build_storage(scrate, def, &genesis_config, &builders); + + quote! { + #decl_genesis_config_and_impl_default + #impl_build_storage + } + } else { + quote!() + } } diff --git a/frame/support/procedural/src/storage/getters.rs b/frame/support/procedural/src/storage/getters.rs index ae0e646fcd..d8c8e8f58a 100644 --- a/frame/support/procedural/src/storage/getters.rs +++ b/frame/support/procedural/src/storage/getters.rs @@ -16,65 +16,67 @@ //! Implementation of getters on module structure. +use super::{DeclStorageDefExt, StorageLineTypeDef}; use proc_macro2::TokenStream; use quote::quote; -use super::{DeclStorageDefExt, StorageLineTypeDef}; pub fn impl_getters(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { - let mut getters = TokenStream::new(); + let mut getters = TokenStream::new(); - for (get_fn, line) in def.storage_lines.iter() - .filter_map(|line| line.getter.as_ref().map(|get_fn| (get_fn, line))) - { - let attrs = &line.doc_attrs; + for (get_fn, line) in def + .storage_lines + .iter() + .filter_map(|line| line.getter.as_ref().map(|get_fn| (get_fn, line))) + { + let attrs = &line.doc_attrs; - let storage_struct = &line.storage_struct; - let storage_trait = &line.storage_trait; + let storage_struct = &line.storage_struct; + let storage_trait = &line.storage_trait; - let getter = match &line.storage_type { - StorageLineTypeDef::Simple(value) => { - quote!{ - #( #[ #attrs ] )* - pub fn #get_fn() -> #value { - <#storage_struct as #scrate::#storage_trait>::get() - } - } - }, - StorageLineTypeDef::Map(map) => { - let key = &map.key; - let value = &map.value; - quote!{ - #( #[ #attrs ] )* - pub fn #get_fn>(key: K) -> #value { - <#storage_struct as #scrate::#storage_trait>::get(key) - } - } - }, - StorageLineTypeDef::DoubleMap(map) => { - let key1 = &map.key1; - let key2 = &map.key2; - let value = &map.value; - quote!{ - pub fn #get_fn(k1: KArg1, k2: KArg2) -> #value - where - KArg1: #scrate::codec::EncodeLike<#key1>, - KArg2: #scrate::codec::EncodeLike<#key2>, - { - <#storage_struct as #scrate::#storage_trait>::get(k1, k2) - } - } - }, - }; - getters.extend(getter); - } + let getter = match &line.storage_type { + StorageLineTypeDef::Simple(value) => { + quote! { + #( #[ #attrs ] )* + pub fn #get_fn() -> #value { + <#storage_struct as #scrate::#storage_trait>::get() + } + } + } + StorageLineTypeDef::Map(map) => { + let key = &map.key; + let value = &map.value; + quote! { + #( #[ #attrs ] )* + pub fn #get_fn>(key: K) -> #value { + <#storage_struct as #scrate::#storage_trait>::get(key) + } + } + } + StorageLineTypeDef::DoubleMap(map) => { + let key1 = &map.key1; + let key2 = &map.key2; + let value = &map.value; + quote! { + pub fn #get_fn(k1: KArg1, k2: KArg2) -> #value + where + KArg1: #scrate::codec::EncodeLike<#key1>, + KArg2: #scrate::codec::EncodeLike<#key2>, + { + <#storage_struct as #scrate::#storage_trait>::get(k1, k2) + } + } + } + }; + getters.extend(getter); + } - let module_struct = &def.module_struct; - let module_impl = &def.module_impl; - let where_clause = &def.where_clause; + let module_struct = &def.module_struct; + let module_impl = &def.module_impl; + let where_clause = &def.where_clause; - quote!( - impl#module_impl #module_struct #where_clause { - #getters - } - ) + quote!( + impl#module_impl #module_struct #where_clause { + #getters + } + ) } diff --git a/frame/support/procedural/src/storage/instance_trait.rs b/frame/support/procedural/src/storage/instance_trait.rs index b2f0ad9c06..50b38b3556 100644 --- a/frame/support/procedural/src/storage/instance_trait.rs +++ b/frame/support/procedural/src/storage/instance_trait.rs @@ -17,9 +17,9 @@ //! Implementation of the trait instance and the instance structures implementing it. //! (For not instantiable traits there is still the inherent instance implemented). -use proc_macro2::{TokenStream, Span}; -use quote::quote; use super::DeclStorageDefExt; +use proc_macro2::{Span, TokenStream}; +use quote::quote; const NUMBER_OF_INSTANCE: usize = 16; pub(crate) const INHERENT_INSTANCE_NAME: &str = "__InherentHiddenInstance"; @@ -27,111 +27,120 @@ pub(crate) const DEFAULT_INSTANTIABLE_TRAIT_NAME: &str = "__GeneratedInstantiabl // Used to generate an instance implementation. struct InstanceDef { - prefix: String, - instance_struct: syn::Ident, - doc: TokenStream, + prefix: String, + instance_struct: syn::Ident, + doc: TokenStream, } pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { - let mut impls = TokenStream::new(); - - impls.extend(create_instance_trait(def)); - - // Implementation of instances. - if let Some(module_instance) = &def.module_instance { - let instance_defs = (0..NUMBER_OF_INSTANCE) - .map(|i| { - let name = format!("Instance{}", i); - InstanceDef { - instance_struct: syn::Ident::new(&name, proc_macro2::Span::call_site()), - prefix: name, - doc: quote!(#[doc=r"Module instance"]), - } - }) - .chain( - module_instance.instance_default.as_ref().map(|ident| InstanceDef { - prefix: String::new(), - instance_struct: ident.clone(), - doc: quote!(#[doc=r"Default module instance"]), - }) - ); - - for instance_def in instance_defs { - impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); - } - } - - // The name of the inherently available instance. - let inherent_instance = syn::Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()); - - // Implementation of inherent instance. - if let Some(default_instance) = def.module_instance.as_ref() - .and_then(|i| i.instance_default.as_ref()) - { - impls.extend(quote! { - #[doc(hidden)] - pub type #inherent_instance = #default_instance; - }); - } else { - let instance_def = InstanceDef { - prefix: String::new(), - instance_struct: inherent_instance, - doc: quote!(#[doc(hidden)]), - }; - impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); - } - - impls + let mut impls = TokenStream::new(); + + impls.extend(create_instance_trait(def)); + + // Implementation of instances. + if let Some(module_instance) = &def.module_instance { + let instance_defs = (0..NUMBER_OF_INSTANCE) + .map(|i| { + let name = format!("Instance{}", i); + InstanceDef { + instance_struct: syn::Ident::new(&name, proc_macro2::Span::call_site()), + prefix: name, + doc: quote!(#[doc=r"Module instance"]), + } + }) + .chain( + module_instance + .instance_default + .as_ref() + .map(|ident| InstanceDef { + prefix: String::new(), + instance_struct: ident.clone(), + doc: quote!(#[doc=r"Default module instance"]), + }), + ); + + for instance_def in instance_defs { + impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); + } + } + + // The name of the inherently available instance. + let inherent_instance = syn::Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()); + + // Implementation of inherent instance. + if let Some(default_instance) = def + .module_instance + .as_ref() + .and_then(|i| i.instance_default.as_ref()) + { + impls.extend(quote! { + #[doc(hidden)] + pub type #inherent_instance = #default_instance; + }); + } else { + let instance_def = InstanceDef { + prefix: String::new(), + instance_struct: inherent_instance, + doc: quote!(#[doc(hidden)]), + }; + impls.extend(create_and_impl_instance_struct(scrate, &instance_def, def)); + } + + impls } -fn create_instance_trait( - def: &DeclStorageDefExt, -) -> TokenStream { - let instance_trait = def.module_instance.as_ref().map(|i| i.instance_trait.clone()) - .unwrap_or_else(|| syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site())); - - let optional_hide = if def.module_instance.is_some() { - quote!() - } else { - quote!(#[doc(hidden)]) - }; - - quote! { - /// Tag a type as an instance of a module. - /// - /// Defines storage prefixes, they must be unique. - #optional_hide - pub trait #instance_trait: 'static { - /// The prefix used by any storage entry of an instance. - const PREFIX: &'static str; - } - } +fn create_instance_trait(def: &DeclStorageDefExt) -> TokenStream { + let instance_trait = def + .module_instance + .as_ref() + .map(|i| i.instance_trait.clone()) + .unwrap_or_else(|| syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site())); + + let optional_hide = if def.module_instance.is_some() { + quote!() + } else { + quote!(#[doc(hidden)]) + }; + + quote! { + /// Tag a type as an instance of a module. + /// + /// Defines storage prefixes, they must be unique. + #optional_hide + pub trait #instance_trait: 'static { + /// The prefix used by any storage entry of an instance. + const PREFIX: &'static str; + } + } } fn create_and_impl_instance_struct( - scrate: &TokenStream, - instance_def: &InstanceDef, - def: &DeclStorageDefExt, + scrate: &TokenStream, + instance_def: &InstanceDef, + def: &DeclStorageDefExt, ) -> TokenStream { - let instance_trait = def.module_instance.as_ref().map(|i| i.instance_trait.clone()) - .unwrap_or_else(|| syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site())); - - let instance_struct = &instance_def.instance_struct; - let prefix = format!("{}{}", instance_def.prefix, def.crate_name.to_string()); - let doc = &instance_def.doc; - - quote! { - // Those trait are derived because of wrong bounds for generics - #[derive( - Clone, Eq, PartialEq, - #scrate::codec::Encode, - #scrate::codec::Decode, - #scrate::RuntimeDebug, - )] - #doc - pub struct #instance_struct; - impl #instance_trait for #instance_struct { - const PREFIX: &'static str = #prefix; - } - } + let instance_trait = def + .module_instance + .as_ref() + .map(|i| i.instance_trait.clone()) + .unwrap_or_else(|| syn::Ident::new(DEFAULT_INSTANTIABLE_TRAIT_NAME, Span::call_site())); + + let instance_struct = &instance_def.instance_struct; + let prefix = format!("{}{}", instance_def.prefix, def.crate_name.to_string()); + let doc = &instance_def.doc; + + quote! { + // Those trait are derived because of wrong bounds for generics + #[derive( + Clone, Eq, PartialEq, + #scrate::codec::Encode, + #scrate::codec::Decode, + #scrate::RuntimeDebug, + )] + #doc + pub struct #instance_struct; + impl #instance_trait for #instance_struct { + const PREFIX: &'static str = #prefix; + } + } } diff --git a/frame/support/procedural/src/storage/metadata.rs b/frame/support/procedural/src/storage/metadata.rs index bb23c99d9d..b3abb3c12c 100644 --- a/frame/support/procedural/src/storage/metadata.rs +++ b/frame/support/procedural/src/storage/metadata.rs @@ -16,202 +16,209 @@ //! Implementation of `storage_metadata` on module structure, used by construct_runtime. +use super::{DeclStorageDefExt, StorageLineDefExt, StorageLineTypeDef}; use frame_support_procedural_tools::clean_type_string; use proc_macro2::TokenStream; use quote::quote; -use super::{DeclStorageDefExt, StorageLineDefExt, StorageLineTypeDef}; fn storage_line_metadata_type(scrate: &TokenStream, line: &StorageLineDefExt) -> TokenStream { - let value_type = &line.value_type; - let value_type = clean_type_string("e!( #value_type ).to_string()); - match &line.storage_type { - StorageLineTypeDef::Simple(_) => { - quote!{ - #scrate::metadata::StorageEntryType::Plain( - #scrate::metadata::DecodeDifferent::Encode(#value_type), - ) - } - }, - StorageLineTypeDef::Map(map) => { - let hasher = map.hasher.into_metadata(); - let key = &map.key; - let key = clean_type_string("e!(#key).to_string()); - quote!{ - #scrate::metadata::StorageEntryType::Map { - hasher: #scrate::metadata::#hasher, - key: #scrate::metadata::DecodeDifferent::Encode(#key), - value: #scrate::metadata::DecodeDifferent::Encode(#value_type), - unused: false, - } - } - }, - StorageLineTypeDef::DoubleMap(map) => { - let hasher1 = map.hasher1.into_metadata(); - let hasher2 = map.hasher2.into_metadata(); - let key1 = &map.key1; - let key1 = clean_type_string("e!(#key1).to_string()); - let key2 = &map.key2; - let key2 = clean_type_string("e!(#key2).to_string()); - quote!{ - #scrate::metadata::StorageEntryType::DoubleMap { - hasher: #scrate::metadata::#hasher1, - key1: #scrate::metadata::DecodeDifferent::Encode(#key1), - key2: #scrate::metadata::DecodeDifferent::Encode(#key2), - value: #scrate::metadata::DecodeDifferent::Encode(#value_type), - key2_hasher: #scrate::metadata::#hasher2, - } - } - }, - } + let value_type = &line.value_type; + let value_type = clean_type_string("e!( #value_type ).to_string()); + match &line.storage_type { + StorageLineTypeDef::Simple(_) => { + quote! { + #scrate::metadata::StorageEntryType::Plain( + #scrate::metadata::DecodeDifferent::Encode(#value_type), + ) + } + } + StorageLineTypeDef::Map(map) => { + let hasher = map.hasher.into_metadata(); + let key = &map.key; + let key = clean_type_string("e!(#key).to_string()); + quote! { + #scrate::metadata::StorageEntryType::Map { + hasher: #scrate::metadata::#hasher, + key: #scrate::metadata::DecodeDifferent::Encode(#key), + value: #scrate::metadata::DecodeDifferent::Encode(#value_type), + unused: false, + } + } + } + StorageLineTypeDef::DoubleMap(map) => { + let hasher1 = map.hasher1.into_metadata(); + let hasher2 = map.hasher2.into_metadata(); + let key1 = &map.key1; + let key1 = clean_type_string("e!(#key1).to_string()); + let key2 = &map.key2; + let key2 = clean_type_string("e!(#key2).to_string()); + quote! { + #scrate::metadata::StorageEntryType::DoubleMap { + hasher: #scrate::metadata::#hasher1, + key1: #scrate::metadata::DecodeDifferent::Encode(#key1), + key2: #scrate::metadata::DecodeDifferent::Encode(#key2), + value: #scrate::metadata::DecodeDifferent::Encode(#value_type), + key2_hasher: #scrate::metadata::#hasher2, + } + } + } + } } fn default_byte_getter( - scrate: &TokenStream, - line: &StorageLineDefExt, - def: &DeclStorageDefExt, + scrate: &TokenStream, + line: &StorageLineDefExt, + def: &DeclStorageDefExt, ) -> (TokenStream, TokenStream) { - let default = line.default_value.as_ref().map(|d| quote!( #d )) - .unwrap_or_else(|| quote!( Default::default() )); - - let str_name = line.name.to_string(); - let struct_name = syn::Ident::new(&("__GetByteStruct".to_string() + &str_name), line.name.span()); - let cache_name = syn::Ident::new(&("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), line.name.span()); - - let runtime_generic = &def.module_runtime_generic; - let runtime_trait = &def.module_runtime_trait; - let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; - let optional_instance_bound = &def.optional_instance_bound; - let optional_instance = &def.optional_instance; - let optional_comma_instance = optional_instance.as_ref().map(|i| quote!(, #i)); - let where_clause = &def.where_clause; - - let query_type = &line.query_type; - - let struct_def = quote! { - #[doc(hidden)] - pub struct #struct_name< - #runtime_generic, #optional_instance_bound_optional_default - >(pub #scrate::sp_std::marker::PhantomData<(#runtime_generic #optional_comma_instance)>); - - #[cfg(feature = "std")] - #[allow(non_upper_case_globals)] - static #cache_name: #scrate::once_cell::sync::OnceCell<#scrate::sp_std::vec::Vec> = - #scrate::once_cell::sync::OnceCell::new(); - - #[cfg(feature = "std")] - impl<#runtime_generic: #runtime_trait, #optional_instance_bound> - #scrate::metadata::DefaultByte - for #struct_name<#runtime_generic, #optional_instance> - #where_clause - { - fn default_byte(&self) -> #scrate::sp_std::vec::Vec { - use #scrate::codec::Encode; - #cache_name.get_or_init(|| { - let def_val: #query_type = #default; - <#query_type as Encode>::encode(&def_val) - }).clone() - } - } - - unsafe impl<#runtime_generic: #runtime_trait, #optional_instance_bound> Send - for #struct_name<#runtime_generic, #optional_instance> #where_clause {} - - unsafe impl<#runtime_generic: #runtime_trait, #optional_instance_bound> Sync - for #struct_name<#runtime_generic, #optional_instance> #where_clause {} - - #[cfg(not(feature = "std"))] - impl<#runtime_generic: #runtime_trait, #optional_instance_bound> - #scrate::metadata::DefaultByte - for #struct_name<#runtime_generic, #optional_instance> - #where_clause - { - fn default_byte(&self) -> #scrate::sp_std::vec::Vec { - use #scrate::codec::Encode; - let def_val: #query_type = #default; - <#query_type as Encode>::encode(&def_val) - } - } - }; - let struct_instance = quote!( - #struct_name::<#runtime_generic, #optional_instance>(#scrate::sp_std::marker::PhantomData) - ); - - (struct_def, struct_instance) + let default = line + .default_value + .as_ref() + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote!(Default::default())); + + let str_name = line.name.to_string(); + let struct_name = syn::Ident::new( + &("__GetByteStruct".to_string() + &str_name), + line.name.span(), + ); + let cache_name = syn::Ident::new( + &("__CACHE_GET_BYTE_STRUCT_".to_string() + &str_name), + line.name.span(), + ); + + let runtime_generic = &def.module_runtime_generic; + let runtime_trait = &def.module_runtime_trait; + let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; + let optional_instance_bound = &def.optional_instance_bound; + let optional_instance = &def.optional_instance; + let optional_comma_instance = optional_instance.as_ref().map(|i| quote!(, #i)); + let where_clause = &def.where_clause; + + let query_type = &line.query_type; + + let struct_def = quote! { + #[doc(hidden)] + pub struct #struct_name< + #runtime_generic, #optional_instance_bound_optional_default + >(pub #scrate::sp_std::marker::PhantomData<(#runtime_generic #optional_comma_instance)>); + + #[cfg(feature = "std")] + #[allow(non_upper_case_globals)] + static #cache_name: #scrate::once_cell::sync::OnceCell<#scrate::sp_std::vec::Vec> = + #scrate::once_cell::sync::OnceCell::new(); + + #[cfg(feature = "std")] + impl<#runtime_generic: #runtime_trait, #optional_instance_bound> + #scrate::metadata::DefaultByte + for #struct_name<#runtime_generic, #optional_instance> + #where_clause + { + fn default_byte(&self) -> #scrate::sp_std::vec::Vec { + use #scrate::codec::Encode; + #cache_name.get_or_init(|| { + let def_val: #query_type = #default; + <#query_type as Encode>::encode(&def_val) + }).clone() + } + } + + unsafe impl<#runtime_generic: #runtime_trait, #optional_instance_bound> Send + for #struct_name<#runtime_generic, #optional_instance> #where_clause {} + + unsafe impl<#runtime_generic: #runtime_trait, #optional_instance_bound> Sync + for #struct_name<#runtime_generic, #optional_instance> #where_clause {} + + #[cfg(not(feature = "std"))] + impl<#runtime_generic: #runtime_trait, #optional_instance_bound> + #scrate::metadata::DefaultByte + for #struct_name<#runtime_generic, #optional_instance> + #where_clause + { + fn default_byte(&self) -> #scrate::sp_std::vec::Vec { + use #scrate::codec::Encode; + let def_val: #query_type = #default; + <#query_type as Encode>::encode(&def_val) + } + } + }; + let struct_instance = quote!( + #struct_name::<#runtime_generic, #optional_instance>(#scrate::sp_std::marker::PhantomData) + ); + + (struct_def, struct_instance) } pub fn impl_metadata(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { - let mut entries = TokenStream::new(); - let mut default_byte_getter_struct_defs = TokenStream::new(); - - for line in def.storage_lines.iter() { - let str_name = line.name.to_string(); - - let modifier = if line.is_option { - quote!(#scrate::metadata::StorageEntryModifier::Optional) - } else { - quote!(#scrate::metadata::StorageEntryModifier::Default) - }; - - let ty = storage_line_metadata_type(scrate, line); - - let ( - default_byte_getter_struct_def, - default_byte_getter_struct_instance, - ) = default_byte_getter(scrate, line, def); - - let mut docs = TokenStream::new(); - for attr in line.attrs.iter().filter_map(|v| v.parse_meta().ok()) { - if let syn::Meta::NameValue(meta) = attr { - if meta.path.is_ident("doc") { - let lit = meta.lit; - docs.extend(quote!(#lit,)); - } - } - } - - let entry = quote! { - #scrate::metadata::StorageEntryMetadata { - name: #scrate::metadata::DecodeDifferent::Encode(#str_name), - modifier: #modifier, - ty: #ty, - default: #scrate::metadata::DecodeDifferent::Encode( - #scrate::metadata::DefaultByteGetter(&#default_byte_getter_struct_instance) - ), - documentation: #scrate::metadata::DecodeDifferent::Encode(&[ #docs ]), - }, - }; - - default_byte_getter_struct_defs.extend(default_byte_getter_struct_def); - entries.extend(entry); - } - - let prefix = if let Some(instance) = &def.module_instance { - let instance_generic = &instance.instance_generic; - quote!(#instance_generic::PREFIX) - } else { - let prefix = def.crate_name.to_string(); - quote!(#prefix) - }; - - let store_metadata = quote!( - #scrate::metadata::StorageMetadata { - prefix: #scrate::metadata::DecodeDifferent::Encode(#prefix), - entries: #scrate::metadata::DecodeDifferent::Encode(&[ #entries ][..]), - } - ); - - let module_struct = &def.module_struct; - let module_impl = &def.module_impl; - let where_clause = &def.where_clause; - - quote!( - #default_byte_getter_struct_defs - - impl#module_impl #module_struct #where_clause { - #[doc(hidden)] - pub fn storage_metadata() -> #scrate::metadata::StorageMetadata { - #store_metadata - } - } - ) + let mut entries = TokenStream::new(); + let mut default_byte_getter_struct_defs = TokenStream::new(); + + for line in def.storage_lines.iter() { + let str_name = line.name.to_string(); + + let modifier = if line.is_option { + quote!(#scrate::metadata::StorageEntryModifier::Optional) + } else { + quote!(#scrate::metadata::StorageEntryModifier::Default) + }; + + let ty = storage_line_metadata_type(scrate, line); + + let (default_byte_getter_struct_def, default_byte_getter_struct_instance) = + default_byte_getter(scrate, line, def); + + let mut docs = TokenStream::new(); + for attr in line.attrs.iter().filter_map(|v| v.parse_meta().ok()) { + if let syn::Meta::NameValue(meta) = attr { + if meta.path.is_ident("doc") { + let lit = meta.lit; + docs.extend(quote!(#lit,)); + } + } + } + + let entry = quote! { + #scrate::metadata::StorageEntryMetadata { + name: #scrate::metadata::DecodeDifferent::Encode(#str_name), + modifier: #modifier, + ty: #ty, + default: #scrate::metadata::DecodeDifferent::Encode( + #scrate::metadata::DefaultByteGetter(&#default_byte_getter_struct_instance) + ), + documentation: #scrate::metadata::DecodeDifferent::Encode(&[ #docs ]), + }, + }; + + default_byte_getter_struct_defs.extend(default_byte_getter_struct_def); + entries.extend(entry); + } + + let prefix = if let Some(instance) = &def.module_instance { + let instance_generic = &instance.instance_generic; + quote!(#instance_generic::PREFIX) + } else { + let prefix = def.crate_name.to_string(); + quote!(#prefix) + }; + + let store_metadata = quote!( + #scrate::metadata::StorageMetadata { + prefix: #scrate::metadata::DecodeDifferent::Encode(#prefix), + entries: #scrate::metadata::DecodeDifferent::Encode(&[ #entries ][..]), + } + ); + + let module_struct = &def.module_struct; + let module_impl = &def.module_impl; + let where_clause = &def.where_clause; + + quote!( + #default_byte_getter_struct_defs + + impl#module_impl #module_struct #where_clause { + #[doc(hidden)] + pub fn storage_metadata() -> #scrate::metadata::StorageMetadata { + #store_metadata + } + } + ) } diff --git a/frame/support/procedural/src/storage/mod.rs b/frame/support/procedural/src/storage/mod.rs index e8599c52a9..cec31bf7b9 100644 --- a/frame/support/procedural/src/storage/mod.rs +++ b/frame/support/procedural/src/storage/mod.rs @@ -16,413 +16,417 @@ //! `decl_storage` input definition and expansion. -mod storage_struct; -mod parse; -mod store_trait; +mod genesis_config; mod getters; -mod metadata; mod instance_trait; -mod genesis_config; +mod metadata; +mod parse; +mod storage_struct; +mod store_trait; -use quote::quote; use frame_support_procedural_tools::{ - generate_crate_access, generate_hidden_includes, syn_ext as ext + generate_crate_access, generate_hidden_includes, syn_ext as ext, }; +use quote::quote; /// All information contained in input of decl_storage pub struct DeclStorageDef { - /// Name of the module used to import hidden imports. - hidden_crate: Option, - /// Visibility of store trait. - visibility: syn::Visibility, - /// Name of store trait: usually `Store`. - store_trait: syn::Ident, - /// Module name used by construct_runtime: usually `Module`. - module_name: syn::Ident, - /// Usually `T`. - module_runtime_generic: syn::Ident, - /// Usually `Trait` - module_runtime_trait: syn::Path, - /// For instantiable module: usually `I: Instance=DefaultInstance`. - module_instance: Option, - /// Where claused used to constrain T and I even more. - where_clause: Option, - /// The extra build function used to build storage at genesis. - extra_genesis_build: Option, - /// The extra genesis config fields. - extra_genesis_config_lines: Vec, - /// Definition of storages. - storage_lines: Vec, - /// Name of the crate, used for storage prefixes. - crate_name: syn::Ident, + /// Name of the module used to import hidden imports. + hidden_crate: Option, + /// Visibility of store trait. + visibility: syn::Visibility, + /// Name of store trait: usually `Store`. + store_trait: syn::Ident, + /// Module name used by construct_runtime: usually `Module`. + module_name: syn::Ident, + /// Usually `T`. + module_runtime_generic: syn::Ident, + /// Usually `Trait` + module_runtime_trait: syn::Path, + /// For instantiable module: usually `I: Instance=DefaultInstance`. + module_instance: Option, + /// Where claused used to constrain T and I even more. + where_clause: Option, + /// The extra build function used to build storage at genesis. + extra_genesis_build: Option, + /// The extra genesis config fields. + extra_genesis_config_lines: Vec, + /// Definition of storages. + storage_lines: Vec, + /// Name of the crate, used for storage prefixes. + crate_name: syn::Ident, } impl syn::parse::Parse for DeclStorageDef { - fn parse(input: syn::parse::ParseStream) -> syn::Result { - parse::parse(input) - } + fn parse(input: syn::parse::ParseStream) -> syn::Result { + parse::parse(input) + } } /// Extended version of `DeclStorageDef` with useful precomputed value. pub struct DeclStorageDefExt { - /// Name of the module used to import hidden imports. - hidden_crate: Option, - /// Visibility of store trait. - visibility: syn::Visibility, - /// Name of store trait: usually `Store`. - store_trait: syn::Ident, - /// Module name used by construct_runtime: usually `Module`. - #[allow(unused)] - module_name: syn::Ident, - /// Usually `T`. - module_runtime_generic: syn::Ident, - /// Usually `Trait`. - module_runtime_trait: syn::Path, - /// For instantiable module: usually `I: Instance=DefaultInstance`. - module_instance: Option, - /// Where claused used to constrain T and I even more. - where_clause: Option, - /// The extra build function used to build storage at genesis. - extra_genesis_build: Option, - /// The extra genesis config fields. - extra_genesis_config_lines: Vec, - /// Definition of storages. - storage_lines: Vec, - /// Name of the crate, used for storage prefixes. - crate_name: syn::Ident, - /// Full struct expansion: `Module`. - module_struct: proc_macro2::TokenStream, - /// Impl block for module: ``. - module_impl: proc_macro2::TokenStream, - /// For instantiable: `I`. - optional_instance: Option, - /// For instantiable: `I: Instance`. - optional_instance_bound: Option, - /// For instantiable: `I: Instance = DefaultInstance`. - optional_instance_bound_optional_default: Option, + /// Name of the module used to import hidden imports. + hidden_crate: Option, + /// Visibility of store trait. + visibility: syn::Visibility, + /// Name of store trait: usually `Store`. + store_trait: syn::Ident, + /// Module name used by construct_runtime: usually `Module`. + #[allow(unused)] + module_name: syn::Ident, + /// Usually `T`. + module_runtime_generic: syn::Ident, + /// Usually `Trait`. + module_runtime_trait: syn::Path, + /// For instantiable module: usually `I: Instance=DefaultInstance`. + module_instance: Option, + /// Where claused used to constrain T and I even more. + where_clause: Option, + /// The extra build function used to build storage at genesis. + extra_genesis_build: Option, + /// The extra genesis config fields. + extra_genesis_config_lines: Vec, + /// Definition of storages. + storage_lines: Vec, + /// Name of the crate, used for storage prefixes. + crate_name: syn::Ident, + /// Full struct expansion: `Module`. + module_struct: proc_macro2::TokenStream, + /// Impl block for module: ``. + module_impl: proc_macro2::TokenStream, + /// For instantiable: `I`. + optional_instance: Option, + /// For instantiable: `I: Instance`. + optional_instance_bound: Option, + /// For instantiable: `I: Instance = DefaultInstance`. + optional_instance_bound_optional_default: Option, } impl From for DeclStorageDefExt { - fn from(mut def: DeclStorageDef) -> Self { - let storage_lines = def.storage_lines.drain(..).collect::>(); - let storage_lines = storage_lines.into_iter() - .map(|line| StorageLineDefExt::from_def(line, &def)) - .collect(); - - let ( - optional_instance, - optional_instance_bound, - optional_instance_bound_optional_default, - ) = if let Some(instance) = def.module_instance.as_ref() { - let instance_generic = &instance.instance_generic; - let instance_trait= &instance.instance_trait; - let optional_equal_instance_default = instance.instance_default.as_ref() - .map(|d| quote!( = #d )); - ( - Some(quote!(#instance_generic)), - Some(quote!(#instance_generic: #instance_trait)), - Some(quote!(#instance_generic: #instance_trait #optional_equal_instance_default)), - ) - } else { - (None, None, None) - }; - - let module_runtime_generic = &def.module_runtime_generic; - let module_runtime_trait = &def.module_runtime_trait; - let module_name = &def.module_name; - - let module_struct = quote!( - #module_name<#module_runtime_generic, #optional_instance> - ); - - let module_impl = quote!( - <#module_runtime_generic: #module_runtime_trait + 'static, #optional_instance_bound> - ); - - Self { - hidden_crate: def.hidden_crate, - visibility: def.visibility, - store_trait: def.store_trait, - module_name: def.module_name, - module_runtime_generic: def.module_runtime_generic, - module_runtime_trait: def.module_runtime_trait, - module_instance: def.module_instance, - where_clause: def.where_clause, - extra_genesis_build: def.extra_genesis_build, - extra_genesis_config_lines: def.extra_genesis_config_lines, - crate_name: def.crate_name, - storage_lines, - module_struct, - module_impl, - optional_instance, - optional_instance_bound, - optional_instance_bound_optional_default, - } - } + fn from(mut def: DeclStorageDef) -> Self { + let storage_lines = def.storage_lines.drain(..).collect::>(); + let storage_lines = storage_lines + .into_iter() + .map(|line| StorageLineDefExt::from_def(line, &def)) + .collect(); + + let (optional_instance, optional_instance_bound, optional_instance_bound_optional_default) = + if let Some(instance) = def.module_instance.as_ref() { + let instance_generic = &instance.instance_generic; + let instance_trait = &instance.instance_trait; + let optional_equal_instance_default = + instance.instance_default.as_ref().map(|d| quote!( = #d )); + ( + Some(quote!(#instance_generic)), + Some(quote!(#instance_generic: #instance_trait)), + Some( + quote!(#instance_generic: #instance_trait #optional_equal_instance_default), + ), + ) + } else { + (None, None, None) + }; + + let module_runtime_generic = &def.module_runtime_generic; + let module_runtime_trait = &def.module_runtime_trait; + let module_name = &def.module_name; + + let module_struct = quote!( + #module_name<#module_runtime_generic, #optional_instance> + ); + + let module_impl = quote!( + <#module_runtime_generic: #module_runtime_trait + 'static, #optional_instance_bound> + ); + + Self { + hidden_crate: def.hidden_crate, + visibility: def.visibility, + store_trait: def.store_trait, + module_name: def.module_name, + module_runtime_generic: def.module_runtime_generic, + module_runtime_trait: def.module_runtime_trait, + module_instance: def.module_instance, + where_clause: def.where_clause, + extra_genesis_build: def.extra_genesis_build, + extra_genesis_config_lines: def.extra_genesis_config_lines, + crate_name: def.crate_name, + storage_lines, + module_struct, + module_impl, + optional_instance, + optional_instance_bound, + optional_instance_bound_optional_default, + } + } } /// Usually `I: Instance=DefaultInstance`. pub struct ModuleInstanceDef { - /// Usually: `I`. - instance_generic: syn::Ident, - /// Usually: `Instance`. - instance_trait: syn::Ident, - /// Usually: `DefaultInstance`. - instance_default: Option, + /// Usually: `I`. + instance_generic: syn::Ident, + /// Usually: `Instance`. + instance_trait: syn::Ident, + /// Usually: `DefaultInstance`. + instance_default: Option, } pub struct StorageLineDef { - attrs: Vec, - /// Visibility of the storage struct. - visibility: syn::Visibility, - name: syn::Ident, - /// The name of getter function to be implemented on Module struct. - getter: Option, - /// The name of the field to be used in genesis config if any. - config: Option, - /// The build function of the storage if any. - build: Option, - /// Default value of genesis config field and also for storage when no value available. - default_value: Option, - storage_type: StorageLineTypeDef, + attrs: Vec, + /// Visibility of the storage struct. + visibility: syn::Visibility, + name: syn::Ident, + /// The name of getter function to be implemented on Module struct. + getter: Option, + /// The name of the field to be used in genesis config if any. + config: Option, + /// The build function of the storage if any. + build: Option, + /// Default value of genesis config field and also for storage when no value available. + default_value: Option, + storage_type: StorageLineTypeDef, } pub struct StorageLineDefExt { - #[allow(unused)] - attrs: Vec, - /// Visibility of the storage struct. - visibility: syn::Visibility, - name: syn::Ident, - /// The name of getter function to be implemented on Module struct. - getter: Option, - /// The name of the field to be used in genesis config if any. - config: Option, - /// The build function of the storage if any. - build: Option, - /// Default value of genesis config field and also for storage when no value available. - default_value: Option, - storage_type: StorageLineTypeDef, - doc_attrs: Vec, - /// Either the type stored in storage or wrapped in an Option. - query_type: syn::Type, - /// The type stored in storage. - value_type: syn::Type, - /// Full struct, for example: `StorageName`. - storage_struct: proc_macro2::TokenStream, - /// If storage is generic over runtime then `T`. - optional_storage_runtime_comma: Option, - /// If storage is generic over runtime then `T: Trait`. - optional_storage_runtime_bound_comma: Option, - /// The where clause to use to constrain generics if storage is generic over runtime. - optional_storage_where_clause: Option, - /// Full trait, for example: `storage::StorageMap`. - storage_trait: proc_macro2::TokenStream, - /// Full trait, for example: `storage::generator::StorageMap`. - storage_generator_trait: proc_macro2::TokenStream, - /// Whether the storage is generic. - is_generic: bool, - /// Whether the storage value is an option. - is_option: bool, + #[allow(unused)] + attrs: Vec, + /// Visibility of the storage struct. + visibility: syn::Visibility, + name: syn::Ident, + /// The name of getter function to be implemented on Module struct. + getter: Option, + /// The name of the field to be used in genesis config if any. + config: Option, + /// The build function of the storage if any. + build: Option, + /// Default value of genesis config field and also for storage when no value available. + default_value: Option, + storage_type: StorageLineTypeDef, + doc_attrs: Vec, + /// Either the type stored in storage or wrapped in an Option. + query_type: syn::Type, + /// The type stored in storage. + value_type: syn::Type, + /// Full struct, for example: `StorageName`. + storage_struct: proc_macro2::TokenStream, + /// If storage is generic over runtime then `T`. + optional_storage_runtime_comma: Option, + /// If storage is generic over runtime then `T: Trait`. + optional_storage_runtime_bound_comma: Option, + /// The where clause to use to constrain generics if storage is generic over runtime. + optional_storage_where_clause: Option, + /// Full trait, for example: `storage::StorageMap`. + storage_trait: proc_macro2::TokenStream, + /// Full trait, for example: `storage::generator::StorageMap`. + storage_generator_trait: proc_macro2::TokenStream, + /// Whether the storage is generic. + is_generic: bool, + /// Whether the storage value is an option. + is_option: bool, } impl StorageLineDefExt { - fn from_def(storage_def: StorageLineDef, def: &DeclStorageDef) -> Self { - let is_generic = match &storage_def.storage_type { - StorageLineTypeDef::Simple(value) => { - ext::type_contains_ident(&value, &def.module_runtime_generic) - }, - StorageLineTypeDef::Map(map) => { - ext::type_contains_ident(&map.key, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - } - StorageLineTypeDef::DoubleMap(map) => { - ext::type_contains_ident(&map.key1, &def.module_runtime_generic) - || ext::type_contains_ident(&map.key2, &def.module_runtime_generic) - || ext::type_contains_ident(&map.value, &def.module_runtime_generic) - } - }; - - let query_type = match &storage_def.storage_type { - StorageLineTypeDef::Simple(value) => value.clone(), - StorageLineTypeDef::Map(map) => map.value.clone(), - StorageLineTypeDef::DoubleMap(map) => map.value.clone(), - }; - let is_option = ext::extract_type_option(&query_type).is_some(); - let value_type = ext::extract_type_option(&query_type).unwrap_or(query_type.clone()); - - let module_runtime_generic = &def.module_runtime_generic; - let module_runtime_trait = &def.module_runtime_trait; - let optional_storage_runtime_comma = if is_generic { - Some(quote!( #module_runtime_generic, )) - } else { - None - }; - let optional_storage_runtime_bound_comma = if is_generic { - Some(quote!( #module_runtime_generic: #module_runtime_trait, )) - } else { - None - }; - - let storage_name = &storage_def.name; - let optional_instance_generic = def.module_instance.as_ref().map(|i| { - let instance_generic = &i.instance_generic; - quote!( #instance_generic ) - }); - let storage_struct = quote!( - #storage_name<#optional_storage_runtime_comma #optional_instance_generic> - ); - - let optional_storage_where_clause = if is_generic { - def.where_clause.as_ref().map(|w| quote!( #w )) - } else { - None - }; - - let storage_trait_truncated = match &storage_def.storage_type { - StorageLineTypeDef::Simple(_) => { - quote!( StorageValue<#value_type> ) - }, - StorageLineTypeDef::Map(map) => { - let key = &map.key; - quote!( StorageMap<#key, #value_type> ) - }, - StorageLineTypeDef::DoubleMap(map) => { - let key1 = &map.key1; - let key2 = &map.key2; - quote!( StorageDoubleMap<#key1, #key2, #value_type> ) - }, - }; - - let storage_trait = quote!( storage::#storage_trait_truncated ); - let storage_generator_trait = quote!( storage::generator::#storage_trait_truncated ); - - let doc_attrs = storage_def.attrs.iter() - .filter_map(|a| a.parse_meta().ok()) - .filter(|m| m.path().is_ident("doc")) - .collect(); - - Self { - attrs: storage_def.attrs, - visibility: storage_def.visibility, - name: storage_def.name, - getter: storage_def.getter, - config: storage_def.config, - build: storage_def.build, - default_value: storage_def.default_value, - storage_type: storage_def.storage_type, - doc_attrs, - query_type, - value_type, - storage_struct, - optional_storage_runtime_comma, - optional_storage_runtime_bound_comma, - optional_storage_where_clause, - storage_trait, - storage_generator_trait, - is_generic, - is_option, - } - } + fn from_def(storage_def: StorageLineDef, def: &DeclStorageDef) -> Self { + let is_generic = match &storage_def.storage_type { + StorageLineTypeDef::Simple(value) => { + ext::type_contains_ident(&value, &def.module_runtime_generic) + } + StorageLineTypeDef::Map(map) => { + ext::type_contains_ident(&map.key, &def.module_runtime_generic) + || ext::type_contains_ident(&map.value, &def.module_runtime_generic) + } + StorageLineTypeDef::DoubleMap(map) => { + ext::type_contains_ident(&map.key1, &def.module_runtime_generic) + || ext::type_contains_ident(&map.key2, &def.module_runtime_generic) + || ext::type_contains_ident(&map.value, &def.module_runtime_generic) + } + }; + + let query_type = match &storage_def.storage_type { + StorageLineTypeDef::Simple(value) => value.clone(), + StorageLineTypeDef::Map(map) => map.value.clone(), + StorageLineTypeDef::DoubleMap(map) => map.value.clone(), + }; + let is_option = ext::extract_type_option(&query_type).is_some(); + let value_type = ext::extract_type_option(&query_type).unwrap_or(query_type.clone()); + + let module_runtime_generic = &def.module_runtime_generic; + let module_runtime_trait = &def.module_runtime_trait; + let optional_storage_runtime_comma = if is_generic { + Some(quote!( #module_runtime_generic, )) + } else { + None + }; + let optional_storage_runtime_bound_comma = if is_generic { + Some(quote!( #module_runtime_generic: #module_runtime_trait, )) + } else { + None + }; + + let storage_name = &storage_def.name; + let optional_instance_generic = def.module_instance.as_ref().map(|i| { + let instance_generic = &i.instance_generic; + quote!( #instance_generic ) + }); + let storage_struct = quote!( + #storage_name<#optional_storage_runtime_comma #optional_instance_generic> + ); + + let optional_storage_where_clause = if is_generic { + def.where_clause.as_ref().map(|w| quote!( #w )) + } else { + None + }; + + let storage_trait_truncated = match &storage_def.storage_type { + StorageLineTypeDef::Simple(_) => quote!( StorageValue<#value_type> ), + StorageLineTypeDef::Map(map) => { + let key = &map.key; + quote!( StorageMap<#key, #value_type> ) + } + StorageLineTypeDef::DoubleMap(map) => { + let key1 = &map.key1; + let key2 = &map.key2; + quote!( StorageDoubleMap<#key1, #key2, #value_type> ) + } + }; + + let storage_trait = quote!( storage::#storage_trait_truncated ); + let storage_generator_trait = quote!( storage::generator::#storage_trait_truncated ); + + let doc_attrs = storage_def + .attrs + .iter() + .filter_map(|a| a.parse_meta().ok()) + .filter(|m| m.path().is_ident("doc")) + .collect(); + + Self { + attrs: storage_def.attrs, + visibility: storage_def.visibility, + name: storage_def.name, + getter: storage_def.getter, + config: storage_def.config, + build: storage_def.build, + default_value: storage_def.default_value, + storage_type: storage_def.storage_type, + doc_attrs, + query_type, + value_type, + storage_struct, + optional_storage_runtime_comma, + optional_storage_runtime_bound_comma, + optional_storage_where_clause, + storage_trait, + storage_generator_trait, + is_generic, + is_option, + } + } } pub enum StorageLineTypeDef { - Map(MapDef), - DoubleMap(DoubleMapDef), - Simple(syn::Type), + Map(MapDef), + DoubleMap(DoubleMapDef), + Simple(syn::Type), } pub struct MapDef { - pub hasher: HasherKind, - pub key: syn::Type, - /// This is the query value not the inner value used in storage trait implementation. - pub value: syn::Type, + pub hasher: HasherKind, + pub key: syn::Type, + /// This is the query value not the inner value used in storage trait implementation. + pub value: syn::Type, } pub struct DoubleMapDef { - pub hasher1: HasherKind, - pub hasher2: HasherKind, - pub key1: syn::Type, - pub key2: syn::Type, - /// This is the query value not the inner value used in storage trait implementation. - pub value: syn::Type, + pub hasher1: HasherKind, + pub hasher2: HasherKind, + pub key1: syn::Type, + pub key2: syn::Type, + /// This is the query value not the inner value used in storage trait implementation. + pub value: syn::Type, } pub struct ExtraGenesisLineDef { - attrs: Vec, - name: syn::Ident, - typ: syn::Type, - default: Option, + attrs: Vec, + name: syn::Ident, + typ: syn::Type, + default: Option, } #[derive(Debug, Clone)] pub enum HasherKind { - Blake2_256, - Blake2_128, - Blake2_128Concat, - Twox256, - Twox128, - Twox64Concat, - Identity, + Blake2_256, + Blake2_128, + Blake2_128Concat, + Twox256, + Twox128, + Twox64Concat, + Identity, } impl HasherKind { - fn to_storage_hasher_struct(&self) -> proc_macro2::TokenStream { - match self { - HasherKind::Blake2_256 => quote!( Blake2_256 ), - HasherKind::Blake2_128 => quote!( Blake2_128 ), - HasherKind::Blake2_128Concat => quote!( Blake2_128Concat ), - HasherKind::Twox256 => quote!( Twox256 ), - HasherKind::Twox128 => quote!( Twox128 ), - HasherKind::Twox64Concat => quote!( Twox64Concat ), - HasherKind::Identity => quote!( Identity ), - } - } - - fn into_metadata(&self) -> proc_macro2::TokenStream { - match self { - HasherKind::Blake2_256 => quote!( StorageHasher::Blake2_256 ), - HasherKind::Blake2_128 => quote!( StorageHasher::Blake2_128 ), - HasherKind::Blake2_128Concat => quote!( StorageHasher::Blake2_128Concat ), - HasherKind::Twox256 => quote!( StorageHasher::Twox256 ), - HasherKind::Twox128 => quote!( StorageHasher::Twox128 ), - HasherKind::Twox64Concat => quote!( StorageHasher::Twox64Concat ), - HasherKind::Identity => quote!( StorageHasher::Identity ), - } - } + fn to_storage_hasher_struct(&self) -> proc_macro2::TokenStream { + match self { + HasherKind::Blake2_256 => quote!(Blake2_256), + HasherKind::Blake2_128 => quote!(Blake2_128), + HasherKind::Blake2_128Concat => quote!(Blake2_128Concat), + HasherKind::Twox256 => quote!(Twox256), + HasherKind::Twox128 => quote!(Twox128), + HasherKind::Twox64Concat => quote!(Twox64Concat), + HasherKind::Identity => quote!(Identity), + } + } + + fn into_metadata(&self) -> proc_macro2::TokenStream { + match self { + HasherKind::Blake2_256 => quote!(StorageHasher::Blake2_256), + HasherKind::Blake2_128 => quote!(StorageHasher::Blake2_128), + HasherKind::Blake2_128Concat => quote!(StorageHasher::Blake2_128Concat), + HasherKind::Twox256 => quote!(StorageHasher::Twox256), + HasherKind::Twox128 => quote!(StorageHasher::Twox128), + HasherKind::Twox64Concat => quote!(StorageHasher::Twox64Concat), + HasherKind::Identity => quote!(StorageHasher::Identity), + } + } } /// Full implementation of decl_storage. pub fn decl_storage_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let def = syn::parse_macro_input!(input as DeclStorageDef); - let def_ext = DeclStorageDefExt::from(def); - - let hidden_crate_name = def_ext.hidden_crate.as_ref().map(|i| i.to_string()) - .unwrap_or_else(|| "decl_storage".to_string()); - - let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); - let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); - - let store_trait = store_trait::decl_and_impl(&def_ext); - let getters = getters::impl_getters(&scrate, &def_ext); - let metadata = metadata::impl_metadata(&scrate, &def_ext); - let instance_trait = instance_trait::decl_and_impl(&scrate, &def_ext); - let genesis_config = genesis_config::genesis_config_and_build_storage(&scrate, &def_ext); - let storage_struct = storage_struct::decl_and_impl(&scrate, &def_ext); - - quote!( - use #scrate::{ - StorageValue as _, - StorageMap as _, - StorageDoubleMap as _, - StoragePrefixedMap as _, - }; - - #scrate_decl - #store_trait - #getters - #metadata - #instance_trait - #genesis_config - #storage_struct - ).into() + let def = syn::parse_macro_input!(input as DeclStorageDef); + let def_ext = DeclStorageDefExt::from(def); + + let hidden_crate_name = def_ext + .hidden_crate + .as_ref() + .map(|i| i.to_string()) + .unwrap_or_else(|| "decl_storage".to_string()); + + let scrate = generate_crate_access(&hidden_crate_name, "frame-support"); + let scrate_decl = generate_hidden_includes(&hidden_crate_name, "frame-support"); + + let store_trait = store_trait::decl_and_impl(&def_ext); + let getters = getters::impl_getters(&scrate, &def_ext); + let metadata = metadata::impl_metadata(&scrate, &def_ext); + let instance_trait = instance_trait::decl_and_impl(&scrate, &def_ext); + let genesis_config = genesis_config::genesis_config_and_build_storage(&scrate, &def_ext); + let storage_struct = storage_struct::decl_and_impl(&scrate, &def_ext); + + quote!( + use #scrate::{ + StorageValue as _, + StorageMap as _, + StorageDoubleMap as _, + StoragePrefixedMap as _, + }; + + #scrate_decl + #store_trait + #getters + #metadata + #instance_trait + #genesis_config + #storage_struct + ) + .into() } diff --git a/frame/support/procedural/src/storage/parse.rs b/frame/support/procedural/src/storage/parse.rs index af568c78cc..6b518dc18c 100644 --- a/frame/support/procedural/src/storage/parse.rs +++ b/frame/support/procedural/src/storage/parse.rs @@ -16,489 +16,486 @@ //! Parsing of decl_storage input. -use frame_support_procedural_tools::{ToTokens, Parse, syn_ext as ext}; -use syn::{Ident, Token, spanned::Spanned}; +use frame_support_procedural_tools::{syn_ext as ext, Parse, ToTokens}; +use syn::{spanned::Spanned, Ident, Token}; mod keyword { - syn::custom_keyword!(hiddencrate); - syn::custom_keyword!(add_extra_genesis); - syn::custom_keyword!(extra_genesis_skip_phantom_data_field); - syn::custom_keyword!(config); - syn::custom_keyword!(build); - syn::custom_keyword!(get); - syn::custom_keyword!(map); - syn::custom_keyword!(double_map); - syn::custom_keyword!(opaque_blake2_256); - syn::custom_keyword!(opaque_blake2_128); - syn::custom_keyword!(blake2_128_concat); - syn::custom_keyword!(opaque_twox_256); - syn::custom_keyword!(opaque_twox_128); - syn::custom_keyword!(twox_64_concat); - syn::custom_keyword!(identity); - syn::custom_keyword!(hasher); - syn::custom_keyword!(tainted); - syn::custom_keyword!(natural); - syn::custom_keyword!(prehashed); + syn::custom_keyword!(hiddencrate); + syn::custom_keyword!(add_extra_genesis); + syn::custom_keyword!(extra_genesis_skip_phantom_data_field); + syn::custom_keyword!(config); + syn::custom_keyword!(build); + syn::custom_keyword!(get); + syn::custom_keyword!(map); + syn::custom_keyword!(double_map); + syn::custom_keyword!(opaque_blake2_256); + syn::custom_keyword!(opaque_blake2_128); + syn::custom_keyword!(blake2_128_concat); + syn::custom_keyword!(opaque_twox_256); + syn::custom_keyword!(opaque_twox_128); + syn::custom_keyword!(twox_64_concat); + syn::custom_keyword!(identity); + syn::custom_keyword!(hasher); + syn::custom_keyword!(tainted); + syn::custom_keyword!(natural); + syn::custom_keyword!(prehashed); } /// Specific `Opt` to implement structure with optional parsing #[derive(Debug, Clone)] pub struct Opt

{ - pub inner: Option

, + pub inner: Option

, } impl syn::export::ToTokens for Opt

{ - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - if let Some(ref p) = self.inner { - p.to_tokens(tokens); - } - } + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + if let Some(ref p) = self.inner { + p.to_tokens(tokens); + } + } } macro_rules! impl_parse_for_opt { - ($struct:ident => $token:path) => { - impl syn::parse::Parse for Opt<$struct> { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - if input.peek($token) { - input.parse().map(|p| Opt { inner: Some(p) }) - } else { - Ok(Opt { inner: None }) - } - } - } - }; + ($struct:ident => $token:path) => { + impl syn::parse::Parse for Opt<$struct> { + fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { + if input.peek($token) { + input.parse().map(|p| Opt { inner: Some(p) }) + } else { + Ok(Opt { inner: None }) + } + } + } + }; } /// Parsing usage only #[derive(Parse, ToTokens, Debug)] struct StorageDefinition { - pub hidden_crate: Opt, - pub visibility: syn::Visibility, - pub trait_token: Token![trait], - pub ident: Ident, - pub for_token: Token![for], - pub module_ident: Ident, - pub mod_lt_token: Token![<], - pub mod_param_generic: syn::Ident, - pub mod_param_bound_token: Option, - pub mod_param_bound: syn::Path, - pub mod_instance_param_token: Option, - pub mod_instance: Option, - pub mod_instantiable_token: Option, - pub mod_instantiable: Option, - pub mod_default_instance_token: Option, - pub mod_default_instance: Option, - pub mod_gt_token: Token![>], - pub as_token: Token![as], - pub crate_ident: Ident, - pub where_clause: Option, - pub content: ext::Braces>, - pub extra_genesis: Opt, + pub hidden_crate: Opt, + pub visibility: syn::Visibility, + pub trait_token: Token![trait], + pub ident: Ident, + pub for_token: Token![for], + pub module_ident: Ident, + pub mod_lt_token: Token![<], + pub mod_param_generic: syn::Ident, + pub mod_param_bound_token: Option, + pub mod_param_bound: syn::Path, + pub mod_instance_param_token: Option, + pub mod_instance: Option, + pub mod_instantiable_token: Option, + pub mod_instantiable: Option, + pub mod_default_instance_token: Option, + pub mod_default_instance: Option, + pub mod_gt_token: Token![>], + pub as_token: Token![as], + pub crate_ident: Ident, + pub where_clause: Option, + pub content: ext::Braces>, + pub extra_genesis: Opt, } #[derive(Parse, ToTokens, Debug)] struct SpecificHiddenCrate { - pub keyword: keyword::hiddencrate, - pub ident: ext::Parens, + pub keyword: keyword::hiddencrate, + pub ident: ext::Parens, } impl_parse_for_opt!(SpecificHiddenCrate => keyword::hiddencrate); #[derive(Parse, ToTokens, Debug)] struct AddExtraGenesis { - pub extragenesis_keyword: keyword::add_extra_genesis, - pub content: ext::Braces, + pub extragenesis_keyword: keyword::add_extra_genesis, + pub content: ext::Braces, } impl_parse_for_opt!(AddExtraGenesis => keyword::add_extra_genesis); #[derive(Parse, ToTokens, Debug)] struct AddExtraGenesisContent { - pub lines: ext::Punctuated, + pub lines: ext::Punctuated, } #[derive(ToTokens, Debug)] enum AddExtraGenesisLineEnum { - AddExtraGenesisLine(AddExtraGenesisLine), - AddExtraGenesisBuild(DeclStorageBuild), + AddExtraGenesisLine(AddExtraGenesisLine), + AddExtraGenesisBuild(DeclStorageBuild), } impl syn::parse::Parse for AddExtraGenesisLineEnum { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - let input_fork = input.fork(); - // OuterAttributes are forbidden for build variant, - // However to have better documentation we match against the keyword after those attributes. - let _: ext::OuterAttributes = input_fork.parse()?; - let lookahead = input_fork.lookahead1(); - if lookahead.peek(keyword::build) { - Ok(Self::AddExtraGenesisBuild(input.parse()?)) - } else if lookahead.peek(keyword::config) { - Ok(Self::AddExtraGenesisLine(input.parse()?)) - } else { - Err(lookahead.error()) - } - } + fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { + let input_fork = input.fork(); + // OuterAttributes are forbidden for build variant, + // However to have better documentation we match against the keyword after those attributes. + let _: ext::OuterAttributes = input_fork.parse()?; + let lookahead = input_fork.lookahead1(); + if lookahead.peek(keyword::build) { + Ok(Self::AddExtraGenesisBuild(input.parse()?)) + } else if lookahead.peek(keyword::config) { + Ok(Self::AddExtraGenesisLine(input.parse()?)) + } else { + Err(lookahead.error()) + } + } } #[derive(Parse, ToTokens, Debug)] struct AddExtraGenesisLine { - pub attrs: ext::OuterAttributes, - pub config_keyword: keyword::config, - pub extra_field: ext::Parens, - pub coldot_token: Token![:], - pub extra_type: syn::Type, - pub default_value: Opt, + pub attrs: ext::OuterAttributes, + pub config_keyword: keyword::config, + pub extra_field: ext::Parens, + pub coldot_token: Token![:], + pub extra_type: syn::Type, + pub default_value: Opt, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageLine { - // attrs (main use case is doc) - pub attrs: ext::OuterAttributes, - // visibility (no need to make optional - pub visibility: syn::Visibility, - // name - pub name: Ident, - pub getter: Opt, - pub config: Opt, - pub build: Opt, - pub coldot_token: Token![:], - pub storage_type: DeclStorageType, - pub default_value: Opt, + // attrs (main use case is doc) + pub attrs: ext::OuterAttributes, + // visibility (no need to make optional + pub visibility: syn::Visibility, + // name + pub name: Ident, + pub getter: Opt, + pub config: Opt, + pub build: Opt, + pub coldot_token: Token![:], + pub storage_type: DeclStorageType, + pub default_value: Opt, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageGetterBody { - fn_keyword: Option, - ident: Ident, + fn_keyword: Option, + ident: Ident, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageGetter { - pub getter_keyword: keyword::get, - pub getfn: ext::Parens, + pub getter_keyword: keyword::get, + pub getfn: ext::Parens, } impl_parse_for_opt!(DeclStorageGetter => keyword::get); #[derive(Parse, ToTokens, Debug)] struct DeclStorageConfig { - pub config_keyword: keyword::config, - pub expr: ext::Parens>, + pub config_keyword: keyword::config, + pub expr: ext::Parens>, } impl_parse_for_opt!(DeclStorageConfig => keyword::config); #[derive(Parse, ToTokens, Debug)] struct DeclStorageBuild { - pub build_keyword: keyword::build, - pub expr: ext::Parens, + pub build_keyword: keyword::build, + pub expr: ext::Parens, } impl_parse_for_opt!(DeclStorageBuild => keyword::build); #[derive(ToTokens, Debug)] enum DeclStorageType { - Map(DeclStorageMap), - DoubleMap(DeclStorageDoubleMap), - Simple(syn::Type), + Map(DeclStorageMap), + DoubleMap(DeclStorageDoubleMap), + Simple(syn::Type), } impl syn::parse::Parse for DeclStorageType { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - if input.peek(keyword::map) { - Ok(Self::Map(input.parse()?)) - } else if input.peek(keyword::double_map) { - Ok(Self::DoubleMap(input.parse()?)) - } else { - Ok(Self::Simple(input.parse()?)) - } - } + fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { + if input.peek(keyword::map) { + Ok(Self::Map(input.parse()?)) + } else if input.peek(keyword::double_map) { + Ok(Self::DoubleMap(input.parse()?)) + } else { + Ok(Self::Simple(input.parse()?)) + } + } } #[derive(Parse, ToTokens, Debug)] struct DeclStorageMap { - pub map_keyword: keyword::map, - pub hasher: Opt, - pub key: syn::Type, - pub ass_keyword: Token![=>], - pub value: syn::Type, + pub map_keyword: keyword::map, + pub hasher: Opt, + pub key: syn::Type, + pub ass_keyword: Token![=>], + pub value: syn::Type, } #[derive(Parse, ToTokens, Debug)] struct DeclStorageDoubleMap { - pub map_keyword: keyword::double_map, - pub hasher1: Opt, - pub key1: syn::Type, - pub comma_keyword: Token![,], - pub hasher2: Opt, - pub key2: syn::Type, - pub ass_keyword: Token![=>], - pub value: syn::Type, + pub map_keyword: keyword::double_map, + pub hasher1: Opt, + pub key1: syn::Type, + pub comma_keyword: Token![,], + pub hasher2: Opt, + pub key2: syn::Type, + pub ass_keyword: Token![=>], + pub value: syn::Type, } #[derive(ToTokens, Debug)] enum Hasher { - Blake2_256(keyword::opaque_blake2_256), - Blake2_128(keyword::opaque_blake2_128), - Blake2_128Concat(keyword::blake2_128_concat), - Twox256(keyword::opaque_twox_256), - Twox128(keyword::opaque_twox_128), - Twox64Concat(keyword::twox_64_concat), - Identity(keyword::identity), + Blake2_256(keyword::opaque_blake2_256), + Blake2_128(keyword::opaque_blake2_128), + Blake2_128Concat(keyword::blake2_128_concat), + Twox256(keyword::opaque_twox_256), + Twox128(keyword::opaque_twox_128), + Twox64Concat(keyword::twox_64_concat), + Identity(keyword::identity), } impl syn::parse::Parse for Hasher { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - let lookahead = input.lookahead1(); - if lookahead.peek(keyword::opaque_blake2_256) { - Ok(Self::Blake2_256(input.parse()?)) - } else if lookahead.peek(keyword::opaque_blake2_128) { - Ok(Self::Blake2_128(input.parse()?)) - } else if lookahead.peek(keyword::blake2_128_concat) { - Ok(Self::Blake2_128Concat(input.parse()?)) - } else if lookahead.peek(keyword::opaque_twox_256) { - Ok(Self::Twox256(input.parse()?)) - } else if lookahead.peek(keyword::opaque_twox_128) { - Ok(Self::Twox128(input.parse()?)) - } else if lookahead.peek(keyword::twox_64_concat) { - Ok(Self::Twox64Concat(input.parse()?)) - } else if lookahead.peek(keyword::identity) { - Ok(Self::Identity(input.parse()?)) - } else if lookahead.peek(keyword::tainted) { - Ok(Self::Blake2_128Concat(input.parse()?)) - } else if lookahead.peek(keyword::natural) { - Ok(Self::Twox64Concat(input.parse()?)) - } else if lookahead.peek(keyword::prehashed) { - Ok(Self::Identity(input.parse()?)) - } else { - Err(lookahead.error()) - } - } + fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { + let lookahead = input.lookahead1(); + if lookahead.peek(keyword::opaque_blake2_256) { + Ok(Self::Blake2_256(input.parse()?)) + } else if lookahead.peek(keyword::opaque_blake2_128) { + Ok(Self::Blake2_128(input.parse()?)) + } else if lookahead.peek(keyword::blake2_128_concat) { + Ok(Self::Blake2_128Concat(input.parse()?)) + } else if lookahead.peek(keyword::opaque_twox_256) { + Ok(Self::Twox256(input.parse()?)) + } else if lookahead.peek(keyword::opaque_twox_128) { + Ok(Self::Twox128(input.parse()?)) + } else if lookahead.peek(keyword::twox_64_concat) { + Ok(Self::Twox64Concat(input.parse()?)) + } else if lookahead.peek(keyword::identity) { + Ok(Self::Identity(input.parse()?)) + } else if lookahead.peek(keyword::tainted) { + Ok(Self::Blake2_128Concat(input.parse()?)) + } else if lookahead.peek(keyword::natural) { + Ok(Self::Twox64Concat(input.parse()?)) + } else if lookahead.peek(keyword::prehashed) { + Ok(Self::Identity(input.parse()?)) + } else { + Err(lookahead.error()) + } + } } #[derive(Parse, ToTokens, Debug)] struct DeclStorageDefault { - pub equal_token: Token![=], - pub expr: syn::Expr, + pub equal_token: Token![=], + pub expr: syn::Expr, } impl syn::parse::Parse for Opt { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - if input.peek(Token![=]) { - input.parse().map(|p| Opt { inner: Some(p) }) - } else { - Ok(Opt { inner: None }) - } - } + fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { + if input.peek(Token![=]) { + input.parse().map(|p| Opt { inner: Some(p) }) + } else { + Ok(Opt { inner: None }) + } + } } #[derive(Parse, ToTokens, Debug)] struct SetHasher { - pub hasher_keyword: keyword::hasher, - pub inner: ext::Parens, + pub hasher_keyword: keyword::hasher, + pub inner: ext::Parens, } impl_parse_for_opt!(SetHasher => keyword::hasher); impl From for super::HasherKind { - fn from(set_hasher: SetHasher) -> Self { - set_hasher.inner.content.into() - } + fn from(set_hasher: SetHasher) -> Self { + set_hasher.inner.content.into() + } } impl From for super::HasherKind { - fn from(hasher: Hasher) -> Self { - match hasher { - Hasher::Blake2_256(_) => super::HasherKind::Blake2_256, - Hasher::Blake2_128(_) => super::HasherKind::Blake2_128, - Hasher::Blake2_128Concat(_) => super::HasherKind::Blake2_128Concat, - Hasher::Twox256(_) => super::HasherKind::Twox256, - Hasher::Twox128(_) => super::HasherKind::Twox128, - Hasher::Twox64Concat(_) => super::HasherKind::Twox64Concat, - Hasher::Identity(_) => super::HasherKind::Identity, - } - } + fn from(hasher: Hasher) -> Self { + match hasher { + Hasher::Blake2_256(_) => super::HasherKind::Blake2_256, + Hasher::Blake2_128(_) => super::HasherKind::Blake2_128, + Hasher::Blake2_128Concat(_) => super::HasherKind::Blake2_128Concat, + Hasher::Twox256(_) => super::HasherKind::Twox256, + Hasher::Twox128(_) => super::HasherKind::Twox128, + Hasher::Twox64Concat(_) => super::HasherKind::Twox64Concat, + Hasher::Identity(_) => super::HasherKind::Identity, + } + } } fn get_module_instance( - instance: Option, - instantiable: Option, - default_instance: Option, + instance: Option, + instantiable: Option, + default_instance: Option, ) -> syn::Result> { - let right_syntax = "Should be $Instance: $Instantiable = $DefaultInstance"; - - match (instance, instantiable, default_instance) { - (Some(instance), Some(instantiable), default_instance) => { - Ok(Some(super::ModuleInstanceDef { - instance_generic: instance, - instance_trait: instantiable, - instance_default: default_instance, - })) - }, - (None, None, None) => Ok(None), - (Some(instance), None, _) => Err( - syn::Error::new( - instance.span(), - format!( - "Expect instantiable trait bound for instance: {}. {}", - instance, - right_syntax, - ) - ) - ), - (None, Some(instantiable), _) => Err( - syn::Error::new( - instantiable.span(), - format!( - "Expect instance generic for bound instantiable: {}. {}", - instantiable, - right_syntax, - ) - ) - ), - (None, _, Some(default_instance)) => Err( - syn::Error::new( - default_instance.span(), - format!( - "Expect instance generic for default instance: {}. {}", - default_instance, - right_syntax, - ) - ) - ), - } + let right_syntax = "Should be $Instance: $Instantiable = $DefaultInstance"; + + match (instance, instantiable, default_instance) { + (Some(instance), Some(instantiable), default_instance) => { + Ok(Some(super::ModuleInstanceDef { + instance_generic: instance, + instance_trait: instantiable, + instance_default: default_instance, + })) + } + (None, None, None) => Ok(None), + (Some(instance), None, _) => Err(syn::Error::new( + instance.span(), + format!( + "Expect instantiable trait bound for instance: {}. {}", + instance, right_syntax, + ), + )), + (None, Some(instantiable), _) => Err(syn::Error::new( + instantiable.span(), + format!( + "Expect instance generic for bound instantiable: {}. {}", + instantiable, right_syntax, + ), + )), + (None, _, Some(default_instance)) => Err(syn::Error::new( + default_instance.span(), + format!( + "Expect instance generic for default instance: {}. {}", + default_instance, right_syntax, + ), + )), + } } pub fn parse(input: syn::parse::ParseStream) -> syn::Result { - use syn::parse::Parse; - - let def = StorageDefinition::parse(input)?; - - let module_instance = get_module_instance( - def.mod_instance, - def.mod_instantiable, - def.mod_default_instance, - )?; - - let mut extra_genesis_config_lines = vec![]; - let mut extra_genesis_build = None; - - for line in def.extra_genesis.inner.into_iter() - .flat_map(|o| o.content.content.lines.inner.into_iter()) - { - match line { - AddExtraGenesisLineEnum::AddExtraGenesisLine(def) => { - extra_genesis_config_lines.push(super::ExtraGenesisLineDef{ - attrs: def.attrs.inner, - name: def.extra_field.content, - typ: def.extra_type, - default: def.default_value.inner.map(|o| o.expr), - }); - } - AddExtraGenesisLineEnum::AddExtraGenesisBuild(def) => { - if extra_genesis_build.is_some() { - return Err(syn::Error::new( - def.span(), - "Only one build expression allowed for extra genesis" - )) - } - - extra_genesis_build = Some(def.expr.content); - } - } - } - - let storage_lines = parse_storage_line_defs(def.content.content.inner.into_iter())?; - - Ok(super::DeclStorageDef { - hidden_crate: def.hidden_crate.inner.map(|i| i.ident.content), - visibility: def.visibility, - module_name: def.module_ident, - store_trait: def.ident, - module_runtime_generic: def.mod_param_generic, - module_runtime_trait: def.mod_param_bound, - where_clause: def.where_clause, - crate_name: def.crate_ident, - module_instance, - extra_genesis_build, - extra_genesis_config_lines, - storage_lines, - }) + use syn::parse::Parse; + + let def = StorageDefinition::parse(input)?; + + let module_instance = get_module_instance( + def.mod_instance, + def.mod_instantiable, + def.mod_default_instance, + )?; + + let mut extra_genesis_config_lines = vec![]; + let mut extra_genesis_build = None; + + for line in def + .extra_genesis + .inner + .into_iter() + .flat_map(|o| o.content.content.lines.inner.into_iter()) + { + match line { + AddExtraGenesisLineEnum::AddExtraGenesisLine(def) => { + extra_genesis_config_lines.push(super::ExtraGenesisLineDef { + attrs: def.attrs.inner, + name: def.extra_field.content, + typ: def.extra_type, + default: def.default_value.inner.map(|o| o.expr), + }); + } + AddExtraGenesisLineEnum::AddExtraGenesisBuild(def) => { + if extra_genesis_build.is_some() { + return Err(syn::Error::new( + def.span(), + "Only one build expression allowed for extra genesis", + )); + } + + extra_genesis_build = Some(def.expr.content); + } + } + } + + let storage_lines = parse_storage_line_defs(def.content.content.inner.into_iter())?; + + Ok(super::DeclStorageDef { + hidden_crate: def.hidden_crate.inner.map(|i| i.ident.content), + visibility: def.visibility, + module_name: def.module_ident, + store_trait: def.ident, + module_runtime_generic: def.mod_param_generic, + module_runtime_trait: def.mod_param_bound, + where_clause: def.where_clause, + crate_name: def.crate_ident, + module_instance, + extra_genesis_build, + extra_genesis_config_lines, + storage_lines, + }) } /// Parse the `DeclStorageLine` into `StorageLineDef`. fn parse_storage_line_defs( - defs: impl Iterator, + defs: impl Iterator, ) -> syn::Result> { - let mut storage_lines = Vec::::new(); - - for line in defs { - let getter = line.getter.inner.map(|o| o.getfn.content.ident); - let config = if let Some(config) = line.config.inner { - if let Some(ident) = config.expr.content { - Some(ident) - } else if let Some(ref ident) = getter { - Some(ident.clone()) - } else { - return Err(syn::Error::new( - config.span(), - "Invalid storage definition, couldn't find config identifier: storage must \ + let mut storage_lines = Vec::::new(); + + for line in defs { + let getter = line.getter.inner.map(|o| o.getfn.content.ident); + let config = if let Some(config) = line.config.inner { + if let Some(ident) = config.expr.content { + Some(ident) + } else if let Some(ref ident) = getter { + Some(ident.clone()) + } else { + return Err(syn::Error::new( + config.span(), + "Invalid storage definition, couldn't find config identifier: storage must \ either have a get identifier `get(fn ident)` or a defined config identifier \ `config(ident)`", - )) - } - } else { - None - }; - - if let Some(ref config) = config { - storage_lines.iter().filter_map(|sl| sl.config.as_ref()).try_for_each(|other_config| { - if other_config == config { - Err(syn::Error::new( - config.span(), - "`config()`/`get()` with the same name already defined.", - )) - } else { - Ok(()) - } - })?; - } - - let span = line.storage_type.span(); - let no_hasher_error = || syn::Error::new( - span, - "Default hasher has been removed, use explicit hasher(blake2_128_concat) instead." - ); - - let storage_type = match line.storage_type { - DeclStorageType::Map(map) => super::StorageLineTypeDef::Map( - super::MapDef { - hasher: map.hasher.inner.ok_or_else(no_hasher_error)?.into(), - key: map.key, - value: map.value, - } - ), - DeclStorageType::DoubleMap(map) => super::StorageLineTypeDef::DoubleMap( - super::DoubleMapDef { - hasher1: map.hasher1.inner.ok_or_else(no_hasher_error)?.into(), - hasher2: map.hasher2.inner.ok_or_else(no_hasher_error)?.into(), - key1: map.key1, - key2: map.key2, - value: map.value, - } - ), - DeclStorageType::Simple(expr) => super::StorageLineTypeDef::Simple(expr), - }; - - storage_lines.push(super::StorageLineDef { - attrs: line.attrs.inner, - visibility: line.visibility, - name: line.name, - getter, - config, - build: line.build.inner.map(|o| o.expr.content), - default_value: line.default_value.inner.map(|o| o.expr), - storage_type, - }) - } - - Ok(storage_lines) + )); + } + } else { + None + }; + + if let Some(ref config) = config { + storage_lines + .iter() + .filter_map(|sl| sl.config.as_ref()) + .try_for_each(|other_config| { + if other_config == config { + Err(syn::Error::new( + config.span(), + "`config()`/`get()` with the same name already defined.", + )) + } else { + Ok(()) + } + })?; + } + + let span = line.storage_type.span(); + let no_hasher_error = || { + syn::Error::new( + span, + "Default hasher has been removed, use explicit hasher(blake2_128_concat) instead.", + ) + }; + + let storage_type = match line.storage_type { + DeclStorageType::Map(map) => super::StorageLineTypeDef::Map(super::MapDef { + hasher: map.hasher.inner.ok_or_else(no_hasher_error)?.into(), + key: map.key, + value: map.value, + }), + DeclStorageType::DoubleMap(map) => { + super::StorageLineTypeDef::DoubleMap(super::DoubleMapDef { + hasher1: map.hasher1.inner.ok_or_else(no_hasher_error)?.into(), + hasher2: map.hasher2.inner.ok_or_else(no_hasher_error)?.into(), + key1: map.key1, + key2: map.key2, + value: map.value, + }) + } + DeclStorageType::Simple(expr) => super::StorageLineTypeDef::Simple(expr), + }; + + storage_lines.push(super::StorageLineDef { + attrs: line.attrs.inner, + visibility: line.visibility, + name: line.name, + getter, + config, + build: line.build.inner.map(|o| o.expr.content), + default_value: line.default_value.inner.map(|o| o.expr), + storage_type, + }) + } + + Ok(storage_lines) } diff --git a/frame/support/procedural/src/storage/storage_struct.rs b/frame/support/procedural/src/storage/storage_struct.rs index cbd477354e..0e72ea1c76 100644 --- a/frame/support/procedural/src/storage/storage_struct.rs +++ b/frame/support/procedural/src/storage/storage_struct.rs @@ -16,198 +16,195 @@ //! Implementation of storage structures and implementation of storage traits on them. -use proc_macro2::{TokenStream, Ident, Span}; +use super::{instance_trait::INHERENT_INSTANCE_NAME, DeclStorageDefExt, StorageLineTypeDef}; +use proc_macro2::{Ident, Span, TokenStream}; use quote::quote; -use super::{ - DeclStorageDefExt, StorageLineTypeDef, - instance_trait::INHERENT_INSTANCE_NAME, -}; fn from_optional_value_to_query(is_option: bool, default: &Option) -> TokenStream { - let default = default.as_ref().map(|d| quote!( #d )) - .unwrap_or_else(|| quote!( Default::default() )); - - if !is_option { - // raw type case - quote!( v.unwrap_or_else(|| #default ) ) - } else { - // Option<> type case - quote!( v.or_else(|| #default ) ) - } + let default = default + .as_ref() + .map(|d| quote!( #d )) + .unwrap_or_else(|| quote!(Default::default())); + + if !is_option { + // raw type case + quote!( v.unwrap_or_else(|| #default ) ) + } else { + // Option<> type case + quote!( v.or_else(|| #default ) ) + } } fn from_query_to_optional_value(is_option: bool) -> TokenStream { - if !is_option { - // raw type case - quote!( Some(v) ) - } else { - // Option<> type case - quote!( v ) - } + if !is_option { + // raw type case + quote!(Some(v)) + } else { + // Option<> type case + quote!(v) + } } pub fn decl_and_impl(scrate: &TokenStream, def: &DeclStorageDefExt) -> TokenStream { - let mut impls = TokenStream::new(); - - for line in &def.storage_lines { - - // Propagate doc attributes. - let attrs = &line.doc_attrs; - - let visibility = &line.visibility; - let optional_storage_runtime_comma = &line.optional_storage_runtime_comma; - let optional_storage_runtime_bound_comma = &line.optional_storage_runtime_bound_comma; - let optional_storage_where_clause = &line.optional_storage_where_clause; - let optional_instance_bound_optional_default = &def.optional_instance_bound_optional_default; - let optional_instance_bound = &def.optional_instance_bound; - let optional_instance = &def.optional_instance; - let name = &line.name; - - let struct_decl = quote!( - #( #[ #attrs ] )* - #visibility struct #name< - #optional_storage_runtime_bound_comma #optional_instance_bound_optional_default - >( - #scrate::sp_std::marker::PhantomData< - (#optional_storage_runtime_comma #optional_instance) - > - ) #optional_storage_where_clause; - ); - - let from_query_to_optional_value = from_query_to_optional_value(line.is_option); - let from_optional_value_to_query = - from_optional_value_to_query(line.is_option, &line.default_value); - - // Contains accessor to instance, used to get prefixes - let instance_or_inherent = if let Some(instance) = def.module_instance.as_ref() { - instance.instance_generic.clone() - } else { - Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()) - }; - - let storage_name_str = syn::LitStr::new(&line.name.to_string(), line.name.span()); - - let storage_generator_trait = &line.storage_generator_trait; - let storage_struct = &line.storage_struct; - let impl_trait = quote!( #optional_storage_runtime_bound_comma #optional_instance_bound ); - let value_type = &line.value_type; - let query_type = &line.query_type; - - let struct_impl = match &line.storage_type { - StorageLineTypeDef::Simple(_) => { - quote!( - impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct - #optional_storage_where_clause - { - type Query = #query_type; - - fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_str.as_bytes() - } - - fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { - #from_optional_value_to_query - } - - fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { - #from_query_to_optional_value - } - } - ) - }, - StorageLineTypeDef::Map(map) => { - let hasher = map.hasher.to_storage_hasher_struct(); - quote!( - impl<#impl_trait> #scrate::storage::StoragePrefixedMap<#value_type> - for #storage_struct #optional_storage_where_clause - { - fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_str.as_bytes() - } - } - - impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct - #optional_storage_where_clause - { - type Query = #query_type; - type Hasher = #scrate::#hasher; - - fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_str.as_bytes() - } - - fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { - #from_optional_value_to_query - } - - fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { - #from_query_to_optional_value - } - } - ) - }, - StorageLineTypeDef::DoubleMap(map) => { - let hasher1 = map.hasher1.to_storage_hasher_struct(); - let hasher2 = map.hasher2.to_storage_hasher_struct(); - quote!( - impl<#impl_trait> #scrate::storage::StoragePrefixedMap<#value_type> - for #storage_struct #optional_storage_where_clause - { - fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_str.as_bytes() - } - } - - impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct - #optional_storage_where_clause - { - type Query = #query_type; - - type Hasher1 = #scrate::#hasher1; - - type Hasher2 = #scrate::#hasher2; - - fn module_prefix() -> &'static [u8] { - #instance_or_inherent::PREFIX.as_bytes() - } - - fn storage_prefix() -> &'static [u8] { - #storage_name_str.as_bytes() - } - - fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { - #from_optional_value_to_query - } - - fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { - #from_query_to_optional_value - } - } - ) - } - }; - - impls.extend(quote!( - #struct_decl - #struct_impl - )) - } - - impls + let mut impls = TokenStream::new(); + + for line in &def.storage_lines { + // Propagate doc attributes. + let attrs = &line.doc_attrs; + + let visibility = &line.visibility; + let optional_storage_runtime_comma = &line.optional_storage_runtime_comma; + let optional_storage_runtime_bound_comma = &line.optional_storage_runtime_bound_comma; + let optional_storage_where_clause = &line.optional_storage_where_clause; + let optional_instance_bound_optional_default = + &def.optional_instance_bound_optional_default; + let optional_instance_bound = &def.optional_instance_bound; + let optional_instance = &def.optional_instance; + let name = &line.name; + + let struct_decl = quote!( + #( #[ #attrs ] )* + #visibility struct #name< + #optional_storage_runtime_bound_comma #optional_instance_bound_optional_default + >( + #scrate::sp_std::marker::PhantomData< + (#optional_storage_runtime_comma #optional_instance) + > + ) #optional_storage_where_clause; + ); + + let from_query_to_optional_value = from_query_to_optional_value(line.is_option); + let from_optional_value_to_query = + from_optional_value_to_query(line.is_option, &line.default_value); + + // Contains accessor to instance, used to get prefixes + let instance_or_inherent = if let Some(instance) = def.module_instance.as_ref() { + instance.instance_generic.clone() + } else { + Ident::new(INHERENT_INSTANCE_NAME, Span::call_site()) + }; + + let storage_name_str = syn::LitStr::new(&line.name.to_string(), line.name.span()); + + let storage_generator_trait = &line.storage_generator_trait; + let storage_struct = &line.storage_struct; + let impl_trait = quote!( #optional_storage_runtime_bound_comma #optional_instance_bound ); + let value_type = &line.value_type; + let query_type = &line.query_type; + + let struct_impl = match &line.storage_type { + StorageLineTypeDef::Simple(_) => quote!( + impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct + #optional_storage_where_clause + { + type Query = #query_type; + + fn module_prefix() -> &'static [u8] { + #instance_or_inherent::PREFIX.as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + #storage_name_str.as_bytes() + } + + fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { + #from_optional_value_to_query + } + + fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { + #from_query_to_optional_value + } + } + ), + StorageLineTypeDef::Map(map) => { + let hasher = map.hasher.to_storage_hasher_struct(); + quote!( + impl<#impl_trait> #scrate::storage::StoragePrefixedMap<#value_type> + for #storage_struct #optional_storage_where_clause + { + fn module_prefix() -> &'static [u8] { + #instance_or_inherent::PREFIX.as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + #storage_name_str.as_bytes() + } + } + + impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct + #optional_storage_where_clause + { + type Query = #query_type; + type Hasher = #scrate::#hasher; + + fn module_prefix() -> &'static [u8] { + #instance_or_inherent::PREFIX.as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + #storage_name_str.as_bytes() + } + + fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { + #from_optional_value_to_query + } + + fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { + #from_query_to_optional_value + } + } + ) + } + StorageLineTypeDef::DoubleMap(map) => { + let hasher1 = map.hasher1.to_storage_hasher_struct(); + let hasher2 = map.hasher2.to_storage_hasher_struct(); + quote!( + impl<#impl_trait> #scrate::storage::StoragePrefixedMap<#value_type> + for #storage_struct #optional_storage_where_clause + { + fn module_prefix() -> &'static [u8] { + #instance_or_inherent::PREFIX.as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + #storage_name_str.as_bytes() + } + } + + impl<#impl_trait> #scrate::#storage_generator_trait for #storage_struct + #optional_storage_where_clause + { + type Query = #query_type; + + type Hasher1 = #scrate::#hasher1; + + type Hasher2 = #scrate::#hasher2; + + fn module_prefix() -> &'static [u8] { + #instance_or_inherent::PREFIX.as_bytes() + } + + fn storage_prefix() -> &'static [u8] { + #storage_name_str.as_bytes() + } + + fn from_optional_value_to_query(v: Option<#value_type>) -> Self::Query { + #from_optional_value_to_query + } + + fn from_query_to_optional_value(v: Self::Query) -> Option<#value_type> { + #from_query_to_optional_value + } + } + ) + } + }; + + impls.extend(quote!( + #struct_decl + #struct_impl + )) + } + + impls } diff --git a/frame/support/procedural/src/storage/store_trait.rs b/frame/support/procedural/src/storage/store_trait.rs index 96281e408e..df3e2c90a9 100644 --- a/frame/support/procedural/src/storage/store_trait.rs +++ b/frame/support/procedural/src/storage/store_trait.rs @@ -16,39 +16,42 @@ //! Declaration of store trait and implementation on module structure. +use super::DeclStorageDefExt; use proc_macro2::TokenStream; use quote::quote; -use super::DeclStorageDefExt; pub fn decl_and_impl(def: &DeclStorageDefExt) -> TokenStream { - let decl_store_items = def.storage_lines.iter() - .map(|sline| &sline.name) - .fold(TokenStream::new(), |mut items, name| { - items.extend(quote!(type #name;)); - items - }); - - let impl_store_items = def.storage_lines.iter() - .fold(TokenStream::new(), |mut items, line| { - let name = &line.name; - let storage_struct = &line.storage_struct; - - items.extend(quote!(type #name = #storage_struct;)); - items - }); - - let visibility = &def.visibility; - let store_trait = &def.store_trait; - let module_struct = &def.module_struct; - let module_impl = &def.module_impl; - let where_clause = &def.where_clause; - - quote!( - #visibility trait #store_trait { - #decl_store_items - } - impl#module_impl #store_trait for #module_struct #where_clause { - #impl_store_items - } - ) + let decl_store_items = def.storage_lines.iter().map(|sline| &sline.name).fold( + TokenStream::new(), + |mut items, name| { + items.extend(quote!(type #name;)); + items + }, + ); + + let impl_store_items = def + .storage_lines + .iter() + .fold(TokenStream::new(), |mut items, line| { + let name = &line.name; + let storage_struct = &line.storage_struct; + + items.extend(quote!(type #name = #storage_struct;)); + items + }); + + let visibility = &def.visibility; + let store_trait = &def.store_trait; + let module_struct = &def.module_struct; + let module_impl = &def.module_impl; + let where_clause = &def.where_clause; + + quote!( + #visibility trait #store_trait { + #decl_store_items + } + impl#module_impl #store_trait for #module_struct #where_clause { + #impl_store_items + } + ) } diff --git a/frame/support/procedural/tools/derive/src/lib.rs b/frame/support/procedural/tools/derive/src/lib.rs index 0c5930892b..aa705b962e 100644 --- a/frame/support/procedural/tools/derive/src/lib.rs +++ b/frame/support/procedural/tools/derive/src/lib.rs @@ -22,32 +22,36 @@ use proc_macro::TokenStream; use proc_macro2::Span; -use syn::parse_macro_input; use quote::quote; +use syn::parse_macro_input; pub(crate) fn fields_idents( - fields: impl Iterator, + fields: impl Iterator, ) -> impl Iterator { - fields.enumerate().map(|(ix, field)| { - field.ident.clone().map(|i| quote!{#i}).unwrap_or_else(|| { - let f_ix: syn::Ident = syn::Ident::new(&format!("f_{}", ix), Span::call_site()); - quote!( #f_ix ) - }) - }) + fields.enumerate().map(|(ix, field)| { + field.ident.clone().map(|i| quote! {#i}).unwrap_or_else(|| { + let f_ix: syn::Ident = syn::Ident::new(&format!("f_{}", ix), Span::call_site()); + quote!( #f_ix ) + }) + }) } pub(crate) fn fields_access( - fields: impl Iterator, + fields: impl Iterator, ) -> impl Iterator { - fields.enumerate().map(|(ix, field)| { - field.ident.clone().map(|i| quote!( #i )).unwrap_or_else(|| { - let f_ix: syn::Index = syn::Index { - index: ix as u32, - span: Span::call_site(), - }; - quote!( #f_ix ) - }) - }) + fields.enumerate().map(|(ix, field)| { + field + .ident + .clone() + .map(|i| quote!( #i )) + .unwrap_or_else(|| { + let f_ix: syn::Index = syn::Index { + index: ix as u32, + span: Span::call_site(), + }; + quote!( #f_ix ) + }) + }) } /// self defined parsing struct. @@ -55,42 +59,42 @@ pub(crate) fn fields_access( /// parse implementation. #[proc_macro_derive(Parse)] pub fn derive_parse(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as syn::Item); - match item { - syn::Item::Struct(input) => derive_parse_struct(input), - _ => TokenStream::new(), // ignore - } + let item = parse_macro_input!(input as syn::Item); + match item { + syn::Item::Struct(input) => derive_parse_struct(input), + _ => TokenStream::new(), // ignore + } } fn derive_parse_struct(input: syn::ItemStruct) -> TokenStream { - let syn::ItemStruct { - ident, - generics, - fields, - .. - } = input; - let field_names = { - let name = fields_idents(fields.iter().map(Clone::clone)); - quote!{ - #( - #name, - )* - } - }; - let field = fields_idents(fields.iter().map(Clone::clone)); - let tokens = quote! { - impl #generics syn::parse::Parse for #ident #generics { - fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { - #( - let #field = input.parse()?; - )* - Ok(Self { - #field_names - }) - } - } - }; - tokens.into() + let syn::ItemStruct { + ident, + generics, + fields, + .. + } = input; + let field_names = { + let name = fields_idents(fields.iter().map(Clone::clone)); + quote! { + #( + #name, + )* + } + }; + let field = fields_idents(fields.iter().map(Clone::clone)); + let tokens = quote! { + impl #generics syn::parse::Parse for #ident #generics { + fn parse(input: syn::parse::ParseStream) -> syn::parse::Result { + #( + let #field = input.parse()?; + )* + Ok(Self { + #field_names + }) + } + } + }; + tokens.into() } /// self defined parsing struct or enum. @@ -100,72 +104,72 @@ fn derive_parse_struct(input: syn::ItemStruct) -> TokenStream { /// it only output fields (empty field act as a None). #[proc_macro_derive(ToTokens)] pub fn derive_totokens(input: TokenStream) -> TokenStream { - let item = parse_macro_input!(input as syn::Item); - match item { - syn::Item::Enum(input) => derive_totokens_enum(input), - syn::Item::Struct(input) => derive_totokens_struct(input), - _ => TokenStream::new(), // ignore - } + let item = parse_macro_input!(input as syn::Item); + match item { + syn::Item::Enum(input) => derive_totokens_enum(input), + syn::Item::Struct(input) => derive_totokens_struct(input), + _ => TokenStream::new(), // ignore + } } fn derive_totokens_struct(input: syn::ItemStruct) -> TokenStream { - let syn::ItemStruct { - ident, - generics, - fields, - .. - } = input; - - let fields = fields_access(fields.iter().map(Clone::clone)); - let tokens = quote! { - - impl #generics quote::ToTokens for #ident #generics { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - #( - self.#fields.to_tokens(tokens); - )* - } - } - - }; - tokens.into() + let syn::ItemStruct { + ident, + generics, + fields, + .. + } = input; + + let fields = fields_access(fields.iter().map(Clone::clone)); + let tokens = quote! { + + impl #generics quote::ToTokens for #ident #generics { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + #( + self.#fields.to_tokens(tokens); + )* + } + } + + }; + tokens.into() } fn derive_totokens_enum(input: syn::ItemEnum) -> TokenStream { - let syn::ItemEnum { - ident, - generics, - variants, - .. - } = input; - let variants = variants.iter().map(|v| { - let v_ident = v.ident.clone(); - let fields_build = if v.fields.iter().count() > 0 { - let fields_id = fields_idents(v.fields.iter().map(Clone::clone)); - quote!( (#(#fields_id), *) ) - } else { - quote!() - }; - let field = fields_idents(v.fields.iter().map(Clone::clone)); - quote! { - #ident::#v_ident#fields_build => { - #( - #field.to_tokens(tokens); - )* - }, - } - }); - let tokens = quote! { - impl #generics quote::ToTokens for #ident #generics { - fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { - match self { - #( - #variants - )* - } - } - } - }; - - tokens.into() + let syn::ItemEnum { + ident, + generics, + variants, + .. + } = input; + let variants = variants.iter().map(|v| { + let v_ident = v.ident.clone(); + let fields_build = if v.fields.iter().count() > 0 { + let fields_id = fields_idents(v.fields.iter().map(Clone::clone)); + quote!( (#(#fields_id), *) ) + } else { + quote!() + }; + let field = fields_idents(v.fields.iter().map(Clone::clone)); + quote! { + #ident::#v_ident#fields_build => { + #( + #field.to_tokens(tokens); + )* + }, + } + }); + let tokens = quote! { + impl #generics quote::ToTokens for #ident #generics { + fn to_tokens(&self, tokens: &mut proc_macro2::TokenStream) { + match self { + #( + #variants + )* + } + } + } + }; + + tokens.into() } diff --git a/frame/support/procedural/tools/src/lib.rs b/frame/support/procedural/tools/src/lib.rs index 102fee0e18..7df971575f 100644 --- a/frame/support/procedural/tools/src/lib.rs +++ b/frame/support/procedural/tools/src/lib.rs @@ -22,70 +22,72 @@ pub use frame_support_procedural_tools_derive::*; use proc_macro_crate::crate_name; -use syn::parse::Error; use quote::quote; +use syn::parse::Error; pub mod syn_ext; // FIXME #1569, remove the following functions, which are copied from sp-api-macros -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::Ident; fn generate_hidden_includes_mod_name(unique_id: &str) -> Ident { - Ident::new(&format!("sp_api_hidden_includes_{}", unique_id), Span::call_site()) + Ident::new( + &format!("sp_api_hidden_includes_{}", unique_id), + Span::call_site(), + ) } /// Generates the access to the `frame-support` crate. pub fn generate_crate_access(unique_id: &str, def_crate: &str) -> TokenStream { - if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { - quote::quote!( frame_support ) - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - quote::quote!( self::#mod_name::hidden_include ) - } + if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { + quote::quote!(frame_support) + } else { + let mod_name = generate_hidden_includes_mod_name(unique_id); + quote::quote!( self::#mod_name::hidden_include ) + } } /// Generates the hidden includes that are required to make the macro independent from its scope. pub fn generate_hidden_includes(unique_id: &str, def_crate: &str) -> TokenStream { - if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { - TokenStream::new() - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - - match crate_name(def_crate) { - Ok(name) => { - let name = Ident::new(&name, Span::call_site()); - quote::quote!( - #[doc(hidden)] - mod #mod_name { - pub extern crate #name as hidden_include; - } - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - quote!( #err ) - } - } + if std::env::var("CARGO_PKG_NAME").unwrap() == def_crate { + TokenStream::new() + } else { + let mod_name = generate_hidden_includes_mod_name(unique_id); - } + match crate_name(def_crate) { + Ok(name) => { + let name = Ident::new(&name, Span::call_site()); + quote::quote!( + #[doc(hidden)] + mod #mod_name { + pub extern crate #name as hidden_include; + } + ) + } + Err(e) => { + let err = Error::new(Span::call_site(), &e).to_compile_error(); + quote!( #err ) + } + } + } } // fn to remove white spaces around string types // (basically whitespaces around tokens) pub fn clean_type_string(input: &str) -> String { - input - .replace(" ::", "::") - .replace(":: ", "::") - .replace(" ,", ",") - .replace(" ;", ";") - .replace(" [", "[") - .replace("[ ", "[") - .replace(" ]", "]") - .replace(" (", "(") - .replace("( ", "(") - .replace(" )", ")") - .replace(" <", "<") - .replace("< ", "<") - .replace(" >", ">") + input + .replace(" ::", "::") + .replace(":: ", "::") + .replace(" ,", ",") + .replace(" ;", ";") + .replace(" [", "[") + .replace("[ ", "[") + .replace(" ]", "]") + .replace(" (", "(") + .replace("( ", "(") + .replace(" )", ")") + .replace(" <", "<") + .replace("< ", "<") + .replace(" >", ">") } diff --git a/frame/support/procedural/tools/src/syn_ext.rs b/frame/support/procedural/tools/src/syn_ext.rs index 4577437232..12646b3735 100644 --- a/frame/support/procedural/tools/src/syn_ext.rs +++ b/frame/support/procedural/tools/src/syn_ext.rs @@ -18,53 +18,59 @@ //! Extension to syn types, mainly for parsing // end::description[] -use syn::{visit::{Visit, self}, parse::{Parse, ParseStream, Result}, Ident}; +use frame_support_procedural_tools_derive::{Parse, ToTokens}; use proc_macro2::{TokenStream, TokenTree}; use quote::ToTokens; use std::iter::once; -use frame_support_procedural_tools_derive::{ToTokens, Parse}; +use syn::{ + parse::{Parse, ParseStream, Result}, + visit::{self, Visit}, + Ident, +}; /// stop parsing here getting remaining token as content /// Warn duplicate stream (part of) #[derive(Parse, ToTokens, Debug)] pub struct StopParse { - pub inner: TokenStream, + pub inner: TokenStream, } // inner macro really dependant on syn naming convention, do not export macro_rules! groups_impl { - ($name:ident, $tok:ident, $deli:ident, $parse:ident) => { - - #[derive(Debug)] - pub struct $name

{ - pub token: syn::token::$tok, - pub content: P, - } - - impl Parse for $name

{ - fn parse(input: ParseStream) -> Result { - let syn::group::$name { token, content } = syn::group::$parse(input)?; - let content = content.parse()?; - Ok($name { token, content, }) - } - } - - impl ToTokens for $name

{ - fn to_tokens(&self, tokens: &mut TokenStream) { - let mut inner_stream = TokenStream::new(); - self.content.to_tokens(&mut inner_stream); - let token_tree: proc_macro2::TokenTree = - proc_macro2::Group::new(proc_macro2::Delimiter::$deli, inner_stream).into(); - tokens.extend(once(token_tree)); - } - } - - impl Clone for $name

{ - fn clone(&self) -> Self { - Self { token: self.token.clone(), content: self.content.clone() } - } - } - } + ($name:ident, $tok:ident, $deli:ident, $parse:ident) => { + #[derive(Debug)] + pub struct $name

{ + pub token: syn::token::$tok, + pub content: P, + } + + impl Parse for $name

{ + fn parse(input: ParseStream) -> Result { + let syn::group::$name { token, content } = syn::group::$parse(input)?; + let content = content.parse()?; + Ok($name { token, content }) + } + } + + impl ToTokens for $name

{ + fn to_tokens(&self, tokens: &mut TokenStream) { + let mut inner_stream = TokenStream::new(); + self.content.to_tokens(&mut inner_stream); + let token_tree: proc_macro2::TokenTree = + proc_macro2::Group::new(proc_macro2::Delimiter::$deli, inner_stream).into(); + tokens.extend(once(token_tree)); + } + } + + impl Clone for $name

, + client: Arc, + _marker: std::marker::PhantomData

, } impl TransactionPayment { - /// Create new `TransactionPayment` with the given reference to the client. - pub fn new(client: Arc) -> Self { - TransactionPayment { client, _marker: Default::default() } - } + /// Create new `TransactionPayment` with the given reference to the client. + pub fn new(client: Arc) -> Self { + TransactionPayment { + client, + _marker: Default::default(), + } + } } /// Error type of this RPC api. pub enum Error { - /// The transaction was not decodable. - DecodeError, - /// The call to runtime failed. - RuntimeError, + /// The transaction was not decodable. + DecodeError, + /// The call to runtime failed. + RuntimeError, } impl From for i64 { - fn from(e: Error) -> i64 { - match e { - Error::RuntimeError => 1, - Error::DecodeError => 2, - } - } + fn from(e: Error) -> i64 { + match e { + Error::RuntimeError => 1, + Error::DecodeError => 2, + } + } } -impl TransactionPaymentApi<::Hash, RuntimeDispatchInfo> - for TransactionPayment +impl + TransactionPaymentApi<::Hash, RuntimeDispatchInfo> + for TransactionPayment where - Block: BlockT, - C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, - C::Api: TransactionPaymentRuntimeApi, - Balance: Codec + MaybeDisplay + MaybeFromStr, - Extrinsic: Codec + Send + Sync + 'static, + Block: BlockT, + C: Send + Sync + 'static + ProvideRuntimeApi + HeaderBackend, + C::Api: TransactionPaymentRuntimeApi, + Balance: Codec + MaybeDisplay + MaybeFromStr, + Extrinsic: Codec + Send + Sync + 'static, { - fn query_info( - &self, - encoded_xt: Bytes, - at: Option<::Hash> - ) -> Result> { - let api = self.client.runtime_api(); - let at = BlockId::hash(at.unwrap_or_else(|| + fn query_info( + &self, + encoded_xt: Bytes, + at: Option<::Hash>, + ) -> Result> { + let api = self.client.runtime_api(); + let at = BlockId::hash(at.unwrap_or_else(|| // If the block hash is not supplied assume the best block. - self.client.info().best_hash - )); + self.client.info().best_hash)); - let encoded_len = encoded_xt.len() as u32; + let encoded_len = encoded_xt.len() as u32; - let uxt: Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::DecodeError.into()), - message: "Unable to query dispatch info.".into(), - data: Some(format!("{:?}", e).into()), - })?; - api.query_info(&at, uxt, encoded_len).map_err(|e| RpcError { - code: ErrorCode::ServerError(Error::RuntimeError.into()), - message: "Unable to query dispatch info.".into(), - data: Some(format!("{:?}", e).into()), - }) - } + let uxt: Extrinsic = Decode::decode(&mut &*encoded_xt).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::DecodeError.into()), + message: "Unable to query dispatch info.".into(), + data: Some(format!("{:?}", e).into()), + })?; + api.query_info(&at, uxt, encoded_len).map_err(|e| RpcError { + code: ErrorCode::ServerError(Error::RuntimeError.into()), + message: "Unable to query dispatch info.".into(), + data: Some(format!("{:?}", e).into()), + }) + } } diff --git a/frame/transaction-payment/src/lib.rs b/frame/transaction-payment/src/lib.rs index 75809e0ed6..79c25d4d73 100644 --- a/frame/transaction-payment/src/lib.rs +++ b/frame/transaction-payment/src/lib.rs @@ -31,177 +31,184 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use frame_support::{ - decl_storage, decl_module, - traits::{Currency, Get, OnUnbalanced, ExistenceRequirement, WithdrawReason, Imbalance}, - weights::{Weight, DispatchInfo, PostDispatchInfo, GetDispatchInfo}, - dispatch::DispatchResult, + decl_module, decl_storage, + dispatch::DispatchResult, + traits::{Currency, ExistenceRequirement, Get, Imbalance, OnUnbalanced, WithdrawReason}, + weights::{DispatchInfo, GetDispatchInfo, PostDispatchInfo, Weight}, }; +use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; use sp_runtime::{ - Fixed128, - transaction_validity::{ - TransactionPriority, ValidTransaction, InvalidTransaction, TransactionValidityError, - TransactionValidity, - }, - traits::{ - Zero, Saturating, SignedExtension, SaturatedConversion, Convert, Dispatchable, - DispatchInfoOf, PostDispatchInfoOf, - }, + traits::{ + Convert, DispatchInfoOf, Dispatchable, PostDispatchInfoOf, SaturatedConversion, Saturating, + SignedExtension, Zero, + }, + transaction_validity::{ + InvalidTransaction, TransactionPriority, TransactionValidity, TransactionValidityError, + ValidTransaction, + }, + Fixed128, }; -use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; +use sp_std::prelude::*; type Multiplier = Fixed128; type BalanceOf = - <::Currency as Currency<::AccountId>>::Balance; + <::Currency as Currency<::AccountId>>::Balance; type NegativeImbalanceOf = - <::Currency as Currency<::AccountId>>::NegativeImbalance; + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: frame_system::Trait { - /// The currency type in which fees will be paid. - type Currency: Currency + Send + Sync; + /// The currency type in which fees will be paid. + type Currency: Currency + Send + Sync; - /// Handler for the unbalanced reduction when taking transaction fees. This is either one or - /// two separate imbalances, the first is the transaction fee paid, the second is the tip paid, - /// if any. - type OnTransactionPayment: OnUnbalanced>; + /// Handler for the unbalanced reduction when taking transaction fees. This is either one or + /// two separate imbalances, the first is the transaction fee paid, the second is the tip paid, + /// if any. + type OnTransactionPayment: OnUnbalanced>; - /// The fee to be paid for making a transaction; the base. - type TransactionBaseFee: Get>; + /// The fee to be paid for making a transaction; the base. + type TransactionBaseFee: Get>; - /// The fee to be paid for making a transaction; the per-byte portion. - type TransactionByteFee: Get>; + /// The fee to be paid for making a transaction; the per-byte portion. + type TransactionByteFee: Get>; - /// Convert a weight value into a deductible fee based on the currency type. - type WeightToFee: Convert>; + /// Convert a weight value into a deductible fee based on the currency type. + type WeightToFee: Convert>; - /// Update the multiplier of the next block, based on the previous block's weight. - type FeeMultiplierUpdate: Convert; + /// Update the multiplier of the next block, based on the previous block's weight. + type FeeMultiplierUpdate: Convert; } decl_storage! { - trait Store for Module as TransactionPayment { - pub NextFeeMultiplier get(fn next_fee_multiplier): Multiplier = Multiplier::from_parts(0); - } + trait Store for Module as TransactionPayment { + pub NextFeeMultiplier get(fn next_fee_multiplier): Multiplier = Multiplier::from_parts(0); + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// The fee to be paid for making a transaction; the base. - const TransactionBaseFee: BalanceOf = T::TransactionBaseFee::get(); - - /// The fee to be paid for making a transaction; the per-byte portion. - const TransactionByteFee: BalanceOf = T::TransactionByteFee::get(); - - fn on_finalize() { - NextFeeMultiplier::mutate(|fm| { - *fm = T::FeeMultiplierUpdate::convert(*fm) - }); - } - - fn on_runtime_upgrade() -> Weight { - // TODO: Remove this code after on-chain upgrade from u32 to u64 weights - use sp_runtime::Fixed64; - use frame_support::migration::take_storage_value; - if let Some(old_next_fee_multiplier) = take_storage_value::(b"TransactionPayment", b"NextFeeMultiplier", &[]) { - let raw_multiplier = old_next_fee_multiplier.into_inner() as i128; - // Fixed64 used 10^9 precision, where Fixed128 uses 10^18, so we need to add 9 zeros. - let new_raw_multiplier: i128 = raw_multiplier.saturating_mul(1_000_000_000); - let new_next_fee_multiplier: Fixed128 = Fixed128::from_parts(new_raw_multiplier); - NextFeeMultiplier::put(new_next_fee_multiplier); - } - 0 - } - } + pub struct Module for enum Call where origin: T::Origin { + /// The fee to be paid for making a transaction; the base. + const TransactionBaseFee: BalanceOf = T::TransactionBaseFee::get(); + + /// The fee to be paid for making a transaction; the per-byte portion. + const TransactionByteFee: BalanceOf = T::TransactionByteFee::get(); + + fn on_finalize() { + NextFeeMultiplier::mutate(|fm| { + *fm = T::FeeMultiplierUpdate::convert(*fm) + }); + } + + fn on_runtime_upgrade() -> Weight { + // TODO: Remove this code after on-chain upgrade from u32 to u64 weights + use sp_runtime::Fixed64; + use frame_support::migration::take_storage_value; + if let Some(old_next_fee_multiplier) = take_storage_value::(b"TransactionPayment", b"NextFeeMultiplier", &[]) { + let raw_multiplier = old_next_fee_multiplier.into_inner() as i128; + // Fixed64 used 10^9 precision, where Fixed128 uses 10^18, so we need to add 9 zeros. + let new_raw_multiplier: i128 = raw_multiplier.saturating_mul(1_000_000_000); + let new_next_fee_multiplier: Fixed128 = Fixed128::from_parts(new_raw_multiplier); + NextFeeMultiplier::put(new_next_fee_multiplier); + } + 0 + } + } } -impl Module where - T::Call: Dispatchable, +impl Module +where + T::Call: Dispatchable, { - /// Query the data that we know about the fee of a given `call`. - /// - /// As this module is not and cannot be aware of the internals of a signed extension, it only - /// interprets them as some encoded value and takes their length into account. - /// - /// All dispatchables must be annotated with weight and will have some fee info. This function - /// always returns. - pub fn query_info( - unchecked_extrinsic: Extrinsic, - len: u32, - ) -> RuntimeDispatchInfo> - where - T: Send + Sync, - BalanceOf: Send + Sync, - { - // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some - // hassle for sure. We have to make it aware of the index of `ChargeTransactionPayment` in - // `Extra`. Alternatively, we could actually execute the tx's per-dispatch and record the - // balance of the sender before and after the pipeline.. but this is way too much hassle for - // a very very little potential gain in the future. - let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); - - let partial_fee = Self::compute_fee(len, &dispatch_info, 0u32.into()); - let DispatchInfo { weight, class, .. } = dispatch_info; - - RuntimeDispatchInfo { weight, class, partial_fee } - } - - /// Compute the final fee value for a particular transaction. - /// - /// The final fee is composed of: - /// - _base_fee_: This is the minimum amount a user pays for a transaction. - /// - _len_fee_: This is the amount paid merely to pay for size of the transaction. - /// - _weight_fee_: This amount is computed based on the weight of the transaction. Unlike - /// size-fee, this is not input dependent and reflects the _complexity_ of the execution - /// and the time it consumes. - /// - _targeted_fee_adjustment_: This is a multiplier that can tune the final fee based on - /// the congestion of the network. - /// - (optional) _tip_: if included in the transaction, it will be added on top. Only signed - /// transactions can have a tip. - /// - /// final_fee = base_fee + targeted_fee_adjustment(len_fee + weight_fee) + tip; - pub fn compute_fee( - len: u32, - info: &DispatchInfoOf, - tip: BalanceOf, - ) -> BalanceOf { - if info.pays_fee { - let len = >::from(len); - let per_byte = T::TransactionByteFee::get(); - let len_fee = per_byte.saturating_mul(len); - let unadjusted_weight_fee = Self::weight_to_fee(info.weight); - - // the adjustable part of the fee - let adjustable_fee = len_fee.saturating_add(unadjusted_weight_fee); - let targeted_fee_adjustment = NextFeeMultiplier::get(); - let adjusted_fee = targeted_fee_adjustment.saturated_multiply_accumulate(adjustable_fee.saturated_into()); - - let base_fee = T::TransactionBaseFee::get(); - base_fee.saturating_add(adjusted_fee.saturated_into()).saturating_add(tip) - } else { - tip - } - } - - /// Compute the fee for the specified weight. - /// - /// This fee is already adjusted by the per block fee adjustment factor and is therefore - /// the share that the weight contributes to the overall fee of a transaction. - pub fn weight_to_fee_with_adjustment(weight: Weight) -> BalanceOf where - BalanceOf: From - { - NextFeeMultiplier::get().saturated_multiply_accumulate( - Self::weight_to_fee(weight) - ) - } - - fn weight_to_fee(weight: Weight) -> BalanceOf { - // cap the weight to the maximum defined in runtime, otherwise it will be the - // `Bounded` maximum of its data type, which is not desired. - let capped_weight = weight.min(::MaximumBlockWeight::get()); - T::WeightToFee::convert(capped_weight) - } + /// Query the data that we know about the fee of a given `call`. + /// + /// As this module is not and cannot be aware of the internals of a signed extension, it only + /// interprets them as some encoded value and takes their length into account. + /// + /// All dispatchables must be annotated with weight and will have some fee info. This function + /// always returns. + pub fn query_info( + unchecked_extrinsic: Extrinsic, + len: u32, + ) -> RuntimeDispatchInfo> + where + T: Send + Sync, + BalanceOf: Send + Sync, + { + // NOTE: we can actually make it understand `ChargeTransactionPayment`, but would be some + // hassle for sure. We have to make it aware of the index of `ChargeTransactionPayment` in + // `Extra`. Alternatively, we could actually execute the tx's per-dispatch and record the + // balance of the sender before and after the pipeline.. but this is way too much hassle for + // a very very little potential gain in the future. + let dispatch_info = ::get_dispatch_info(&unchecked_extrinsic); + + let partial_fee = Self::compute_fee(len, &dispatch_info, 0u32.into()); + let DispatchInfo { weight, class, .. } = dispatch_info; + + RuntimeDispatchInfo { + weight, + class, + partial_fee, + } + } + + /// Compute the final fee value for a particular transaction. + /// + /// The final fee is composed of: + /// - _base_fee_: This is the minimum amount a user pays for a transaction. + /// - _len_fee_: This is the amount paid merely to pay for size of the transaction. + /// - _weight_fee_: This amount is computed based on the weight of the transaction. Unlike + /// size-fee, this is not input dependent and reflects the _complexity_ of the execution + /// and the time it consumes. + /// - _targeted_fee_adjustment_: This is a multiplier that can tune the final fee based on + /// the congestion of the network. + /// - (optional) _tip_: if included in the transaction, it will be added on top. Only signed + /// transactions can have a tip. + /// + /// final_fee = base_fee + targeted_fee_adjustment(len_fee + weight_fee) + tip; + pub fn compute_fee( + len: u32, + info: &DispatchInfoOf, + tip: BalanceOf, + ) -> BalanceOf { + if info.pays_fee { + let len = >::from(len); + let per_byte = T::TransactionByteFee::get(); + let len_fee = per_byte.saturating_mul(len); + let unadjusted_weight_fee = Self::weight_to_fee(info.weight); + + // the adjustable part of the fee + let adjustable_fee = len_fee.saturating_add(unadjusted_weight_fee); + let targeted_fee_adjustment = NextFeeMultiplier::get(); + let adjusted_fee = targeted_fee_adjustment + .saturated_multiply_accumulate(adjustable_fee.saturated_into()); + + let base_fee = T::TransactionBaseFee::get(); + base_fee + .saturating_add(adjusted_fee.saturated_into()) + .saturating_add(tip) + } else { + tip + } + } + + /// Compute the fee for the specified weight. + /// + /// This fee is already adjusted by the per block fee adjustment factor and is therefore + /// the share that the weight contributes to the overall fee of a transaction. + pub fn weight_to_fee_with_adjustment(weight: Weight) -> BalanceOf + where + BalanceOf: From, + { + NextFeeMultiplier::get().saturated_multiply_accumulate(Self::weight_to_fee(weight)) + } + + fn weight_to_fee(weight: Weight) -> BalanceOf { + // cap the weight to the maximum defined in runtime, otherwise it will be the + // `Bounded` maximum of its data type, which is not desired. + let capped_weight = weight.min(::MaximumBlockWeight::get()); + T::WeightToFee::convert(capped_weight) + } } /// Require the transactor pay for themselves and maybe include a tip to gain additional priority @@ -209,672 +216,695 @@ impl Module where #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct ChargeTransactionPayment(#[codec(compact)] BalanceOf); -impl ChargeTransactionPayment where - T::Call: Dispatchable, - BalanceOf: Send + Sync, +impl ChargeTransactionPayment +where + T::Call: Dispatchable, + BalanceOf: Send + Sync, { - /// utility constructor. Used only in client/factory code. - pub fn from(fee: BalanceOf) -> Self { - Self(fee) - } - - fn withdraw_fee( - &self, - who: &T::AccountId, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(BalanceOf, Option>), TransactionValidityError> { - let tip = self.0; - let fee = Module::::compute_fee(len as u32, info, tip); - - // Only mess with balances if fee is not zero. - if fee.is_zero() { - return Ok((fee, None)); - } - - match T::Currency::withdraw( - who, - fee, - if tip.is_zero() { - WithdrawReason::TransactionPayment.into() - } else { - WithdrawReason::TransactionPayment | WithdrawReason::Tip - }, - ExistenceRequirement::KeepAlive, - ) { - Ok(imbalance) => Ok((fee, Some(imbalance))), - Err(_) => Err(InvalidTransaction::Payment.into()), - } - } + /// utility constructor. Used only in client/factory code. + pub fn from(fee: BalanceOf) -> Self { + Self(fee) + } + + fn withdraw_fee( + &self, + who: &T::AccountId, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(BalanceOf, Option>), TransactionValidityError> { + let tip = self.0; + let fee = Module::::compute_fee(len as u32, info, tip); + + // Only mess with balances if fee is not zero. + if fee.is_zero() { + return Ok((fee, None)); + } + + match T::Currency::withdraw( + who, + fee, + if tip.is_zero() { + WithdrawReason::TransactionPayment.into() + } else { + WithdrawReason::TransactionPayment | WithdrawReason::Tip + }, + ExistenceRequirement::KeepAlive, + ) { + Ok(imbalance) => Ok((fee, Some(imbalance))), + Err(_) => Err(InvalidTransaction::Payment.into()), + } + } } impl sp_std::fmt::Debug for ChargeTransactionPayment { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "ChargeTransactionPayment<{:?}>", self.0) - } - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "ChargeTransactionPayment<{:?}>", self.0) + } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } -impl SignedExtension for ChargeTransactionPayment where - BalanceOf: Send + Sync + From, - T::Call: Dispatchable, +impl SignedExtension for ChargeTransactionPayment +where + BalanceOf: Send + Sync + From, + T::Call: Dispatchable, { - const IDENTIFIER: &'static str = "ChargeTransactionPayment"; - type AccountId = T::AccountId; - type Call = T::Call; - type AdditionalSigned = (); - type Pre = (BalanceOf, Self::AccountId, Option>); - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } - - fn validate( - &self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - let (fee, _) = self.withdraw_fee(who, info, len)?; - - let mut r = ValidTransaction::default(); - // NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which - // will be a bit more than setting the priority to tip. For now, this is enough. - r.priority = fee.saturated_into::(); - Ok(r) - } - - fn pre_dispatch( - self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize - ) -> Result { - let (_, imbalance) = self.withdraw_fee(who, info, len)?; - Ok((self.0, who.clone(), imbalance)) - } - - fn post_dispatch( - pre: Self::Pre, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - _len: usize, - _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - let (tip, who, imbalance) = pre; - if let Some(payed) = imbalance { - let refund = Module::::weight_to_fee_with_adjustment(post_info.calc_unspent(info)); - let actual_payment = match T::Currency::deposit_into_existing(&who, refund) { - Ok(refund_imbalance) => { - // The refund cannot be larger than the up front payed max weight. - // `PostDispatchInfo::calc_unspent` guards against such a case. - match payed.offset(refund_imbalance) { - Ok(actual_payment) => actual_payment, - Err(_) => return Err(InvalidTransaction::Payment.into()), - } - } - // We do not recreate the account using the refund. The up front payment - // is gone in that case. - Err(_) => payed, - }; - let imbalances = actual_payment.split(tip); - T::OnTransactionPayment::on_unbalanceds(Some(imbalances.0).into_iter() - .chain(Some(imbalances.1))); - } - Ok(()) - } + const IDENTIFIER: &'static str = "ChargeTransactionPayment"; + type AccountId = T::AccountId; + type Call = T::Call; + type AdditionalSigned = (); + type Pre = ( + BalanceOf, + Self::AccountId, + Option>, + ); + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + + fn validate( + &self, + who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + let (fee, _) = self.withdraw_fee(who, info, len)?; + + let mut r = ValidTransaction::default(); + // NOTE: we probably want to maximize the _fee (of any type) per weight unit_ here, which + // will be a bit more than setting the priority to tip. For now, this is enough. + r.priority = fee.saturated_into::(); + Ok(r) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + let (_, imbalance) = self.withdraw_fee(who, info, len)?; + Ok((self.0, who.clone(), imbalance)) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + let (tip, who, imbalance) = pre; + if let Some(payed) = imbalance { + let refund = Module::::weight_to_fee_with_adjustment(post_info.calc_unspent(info)); + let actual_payment = match T::Currency::deposit_into_existing(&who, refund) { + Ok(refund_imbalance) => { + // The refund cannot be larger than the up front payed max weight. + // `PostDispatchInfo::calc_unspent` guards against such a case. + match payed.offset(refund_imbalance) { + Ok(actual_payment) => actual_payment, + Err(_) => return Err(InvalidTransaction::Payment.into()), + } + } + // We do not recreate the account using the refund. The up front payment + // is gone in that case. + Err(_) => payed, + }; + let imbalances = actual_payment.split(tip); + T::OnTransactionPayment::on_unbalanceds( + Some(imbalances.0).into_iter().chain(Some(imbalances.1)), + ); + } + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - use core::num::NonZeroI128; - use codec::Encode; - use frame_support::{ - impl_outer_dispatch, impl_outer_origin, parameter_types, - weights::{DispatchClass, DispatchInfo, PostDispatchInfo, GetDispatchInfo, Weight}, - }; - use pallet_balances::Call as BalancesCall; - use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; - use sp_core::H256; - use sp_runtime::{ - testing::{Header, TestXt}, - traits::{BlakeTwo256, IdentityLookup}, - Perbill, - }; - use std::cell::RefCell; - - const CALL: &::Call = - &Call::Balances(BalancesCall::transfer(2, 69)); - - impl_outer_dispatch! { - pub enum Call for Runtime where origin: Origin { - pallet_balances::Balances, - frame_system::System, - } - } - - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct Runtime; - - use frame_system as system; - impl_outer_origin!{ - pub enum Origin for Runtime {} - } - - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - - impl frame_system::Trait for Runtime { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = Call; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - - parameter_types! { - pub const ExistentialDeposit: u64 = 1; - } - - impl pallet_balances::Trait for Runtime { - type Balance = u64; - type Event = (); - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - thread_local! { - static TRANSACTION_BASE_FEE: RefCell = RefCell::new(0); - static TRANSACTION_BYTE_FEE: RefCell = RefCell::new(1); - static WEIGHT_TO_FEE: RefCell = RefCell::new(1); - } - - pub struct TransactionBaseFee; - impl Get for TransactionBaseFee { - fn get() -> u64 { TRANSACTION_BASE_FEE.with(|v| *v.borrow()) } - } - - pub struct TransactionByteFee; - impl Get for TransactionByteFee { - fn get() -> u64 { TRANSACTION_BYTE_FEE.with(|v| *v.borrow()) } - } - - pub struct WeightToFee(u64); - impl Convert for WeightToFee { - fn convert(t: Weight) -> u64 { - WEIGHT_TO_FEE.with(|v| *v.borrow() * (t as u64)) - } - } - - impl Trait for Runtime { - type Currency = pallet_balances::Module; - type OnTransactionPayment = (); - type TransactionBaseFee = TransactionBaseFee; - type TransactionByteFee = TransactionByteFee; - type WeightToFee = WeightToFee; - type FeeMultiplierUpdate = (); - } - - type Balances = pallet_balances::Module; - type System = frame_system::Module; - type TransactionPayment = Module; - - pub struct ExtBuilder { - balance_factor: u64, - base_fee: u64, - byte_fee: u64, - weight_to_fee: u64 - } - - impl Default for ExtBuilder { - fn default() -> Self { - Self { - balance_factor: 1, - base_fee: 0, - byte_fee: 1, - weight_to_fee: 1, - } - } - } - - impl ExtBuilder { - pub fn base_fee(mut self, base_fee: u64) -> Self { - self.base_fee = base_fee; - self - } - pub fn byte_fee(mut self, byte_fee: u64) -> Self { - self.byte_fee = byte_fee; - self - } - pub fn weight_fee(mut self, weight_to_fee: u64) -> Self { - self.weight_to_fee = weight_to_fee; - self - } - pub fn balance_factor(mut self, factor: u64) -> Self { - self.balance_factor = factor; - self - } - fn set_constants(&self) { - TRANSACTION_BASE_FEE.with(|v| *v.borrow_mut() = self.base_fee); - TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); - WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); - } - pub fn build(self) -> sp_io::TestExternalities { - self.set_constants(); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: if self.balance_factor > 0 { - vec![ - (1, 10 * self.balance_factor), - (2, 20 * self.balance_factor), - (3, 30 * self.balance_factor), - (4, 40 * self.balance_factor), - (5, 50 * self.balance_factor), - (6, 60 * self.balance_factor) - ] - } else { - vec![] - }, - }.assimilate_storage(&mut t).unwrap(); - t.into() - } - } - - /// create a transaction info struct from weight. Handy to avoid building the whole struct. - pub fn info_from_weight(w: Weight) -> DispatchInfo { - DispatchInfo { weight: w, pays_fee: true, ..Default::default() } - } - - fn post_info_from_weight(w: Weight) -> PostDispatchInfo { - PostDispatchInfo { actual_weight: Some(w), } - } - - fn default_post_info() -> PostDispatchInfo { - PostDispatchInfo { actual_weight: None, } - } - - #[test] - fn signed_extension_transaction_payment_work() { - ExtBuilder::default() - .balance_factor(10) - .base_fee(5) - .build() - .execute_with(|| - { - let len = 10; - let pre = ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(5), len) - .unwrap(); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(5), &default_post_info(), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); - - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); - }); - } - - #[test] - fn signed_extension_transaction_payment_multiplied_refund_works() { - ExtBuilder::default() - .balance_factor(10) - .base_fee(5) - .build() - .execute_with(|| - { - let len = 10; - NextFeeMultiplier::put(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); - - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - // 5 base fee, 3/2 * 10 byte fee, 3/2 * 100 weight fee, 5 tip - assert_eq!(Balances::free_balance(2), 200 - 5 - 15 - 150 - 5); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() - ); - // 75 (3/2 of the returned 50 units of weight ) is refunded - assert_eq!(Balances::free_balance(2), 200 - 5 - 15 - 75 - 5); - }); - } - - #[test] - fn signed_extension_transaction_payment_is_bounded() { - ExtBuilder::default() - .balance_factor(1000) - .byte_fee(0) - .build() - .execute_with(|| - { - // maximum weight possible - assert!( - ChargeTransactionPayment::::from(0) - .pre_dispatch(&1, CALL, &info_from_weight(Weight::max_value()), 10) - .is_ok() - ); - // fee will be proportional to what is the actual maximum weight in the runtime. - assert_eq!( - Balances::free_balance(&1), - (10000 - ::MaximumBlockWeight::get()) as u64 - ); - }); - } - - #[test] - fn signed_extension_allows_free_transactions() { - ExtBuilder::default() - .base_fee(100) - .balance_factor(0) - .build() - .execute_with(|| - { - // 1 ain't have a penny. - assert_eq!(Balances::free_balance(1), 0); - - let len = 100; - - // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. - let operational_transaction = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: false, - }; - assert!( - ChargeTransactionPayment::::from(0) - .validate(&1, CALL, &operational_transaction , len) - .is_ok() - ); - - // like a InsecureFreeNormal - let free_transaction = DispatchInfo { - weight: 0, - class: DispatchClass::Normal, - pays_fee: true, - }; - assert!( - ChargeTransactionPayment::::from(0) - .validate(&1, CALL, &free_transaction , len) - .is_err() - ); - }); - } - - #[test] - fn signed_ext_length_fee_is_also_updated_per_congestion() { - ExtBuilder::default() - .base_fee(5) - .balance_factor(10) - .build() - .execute_with(|| - { - // all fees should be x1.5 - NextFeeMultiplier::put(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); - let len = 10; - - assert!( - ChargeTransactionPayment::::from(10) // tipped - .pre_dispatch(&1, CALL, &info_from_weight(3), len) - .is_ok() - ); - assert_eq!(Balances::free_balance(1), 100 - 10 - 5 - (10 + 3) * 3 / 2); - }) - } - - #[test] - fn query_info_works() { - let call = Call::Balances(BalancesCall::transfer(2, 69)); - let origin = 111111; - let extra = (); - let xt = TestXt::new(call, Some((origin, extra))); - let info = xt.get_dispatch_info(); - let ext = xt.encode(); - let len = ext.len() as u32; - ExtBuilder::default() - .base_fee(5) - .weight_fee(2) - .build() - .execute_with(|| - { - // all fees should be x1.5 - NextFeeMultiplier::put(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); - - assert_eq!( - TransactionPayment::query_info(xt, len), - RuntimeDispatchInfo { - weight: info.weight, - class: info.class, - partial_fee: - 5 /* base */ + use super::*; + use codec::Encode; + use core::num::NonZeroI128; + use frame_support::{ + impl_outer_dispatch, impl_outer_origin, parameter_types, + weights::{DispatchClass, DispatchInfo, GetDispatchInfo, PostDispatchInfo, Weight}, + }; + use pallet_balances::Call as BalancesCall; + use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; + use sp_core::H256; + use sp_runtime::{ + testing::{Header, TestXt}, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + use std::cell::RefCell; + + const CALL: &::Call = + &Call::Balances(BalancesCall::transfer(2, 69)); + + impl_outer_dispatch! { + pub enum Call for Runtime where origin: Origin { + pallet_balances::Balances, + frame_system::System, + } + } + + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct Runtime; + + use frame_system as system; + impl_outer_origin! { + pub enum Origin for Runtime {} + } + + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + + impl frame_system::Trait for Runtime { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = Call; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + } + + parameter_types! { + pub const ExistentialDeposit: u64 = 1; + } + + impl pallet_balances::Trait for Runtime { + type Balance = u64; + type Event = (); + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + } + thread_local! { + static TRANSACTION_BASE_FEE: RefCell = RefCell::new(0); + static TRANSACTION_BYTE_FEE: RefCell = RefCell::new(1); + static WEIGHT_TO_FEE: RefCell = RefCell::new(1); + } + + pub struct TransactionBaseFee; + impl Get for TransactionBaseFee { + fn get() -> u64 { + TRANSACTION_BASE_FEE.with(|v| *v.borrow()) + } + } + + pub struct TransactionByteFee; + impl Get for TransactionByteFee { + fn get() -> u64 { + TRANSACTION_BYTE_FEE.with(|v| *v.borrow()) + } + } + + pub struct WeightToFee(u64); + impl Convert for WeightToFee { + fn convert(t: Weight) -> u64 { + WEIGHT_TO_FEE.with(|v| *v.borrow() * (t as u64)) + } + } + + impl Trait for Runtime { + type Currency = pallet_balances::Module; + type OnTransactionPayment = (); + type TransactionBaseFee = TransactionBaseFee; + type TransactionByteFee = TransactionByteFee; + type WeightToFee = WeightToFee; + type FeeMultiplierUpdate = (); + } + + type Balances = pallet_balances::Module; + type System = frame_system::Module; + type TransactionPayment = Module; + + pub struct ExtBuilder { + balance_factor: u64, + base_fee: u64, + byte_fee: u64, + weight_to_fee: u64, + } + + impl Default for ExtBuilder { + fn default() -> Self { + Self { + balance_factor: 1, + base_fee: 0, + byte_fee: 1, + weight_to_fee: 1, + } + } + } + + impl ExtBuilder { + pub fn base_fee(mut self, base_fee: u64) -> Self { + self.base_fee = base_fee; + self + } + pub fn byte_fee(mut self, byte_fee: u64) -> Self { + self.byte_fee = byte_fee; + self + } + pub fn weight_fee(mut self, weight_to_fee: u64) -> Self { + self.weight_to_fee = weight_to_fee; + self + } + pub fn balance_factor(mut self, factor: u64) -> Self { + self.balance_factor = factor; + self + } + fn set_constants(&self) { + TRANSACTION_BASE_FEE.with(|v| *v.borrow_mut() = self.base_fee); + TRANSACTION_BYTE_FEE.with(|v| *v.borrow_mut() = self.byte_fee); + WEIGHT_TO_FEE.with(|v| *v.borrow_mut() = self.weight_to_fee); + } + pub fn build(self) -> sp_io::TestExternalities { + self.set_constants(); + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: if self.balance_factor > 0 { + vec![ + (1, 10 * self.balance_factor), + (2, 20 * self.balance_factor), + (3, 30 * self.balance_factor), + (4, 40 * self.balance_factor), + (5, 50 * self.balance_factor), + (6, 60 * self.balance_factor), + ] + } else { + vec![] + }, + } + .assimilate_storage(&mut t) + .unwrap(); + t.into() + } + } + + /// create a transaction info struct from weight. Handy to avoid building the whole struct. + pub fn info_from_weight(w: Weight) -> DispatchInfo { + DispatchInfo { + weight: w, + pays_fee: true, + ..Default::default() + } + } + + fn post_info_from_weight(w: Weight) -> PostDispatchInfo { + PostDispatchInfo { + actual_weight: Some(w), + } + } + + fn default_post_info() -> PostDispatchInfo { + PostDispatchInfo { + actual_weight: None, + } + } + + #[test] + fn signed_extension_transaction_payment_work() { + ExtBuilder::default() + .balance_factor(10) + .base_fee(5) + .build() + .execute_with(|| { + let len = 10; + let pre = ChargeTransactionPayment::::from(0) + .pre_dispatch(&1, CALL, &info_from_weight(5), len) + .unwrap(); + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); + + assert!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(5), + &default_post_info(), + len, + &Ok(()) + ) + .is_ok()); + assert_eq!(Balances::free_balance(1), 100 - 5 - 5 - 10); + + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + assert!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + ) + .is_ok()); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 50 - 5); + }); + } + + #[test] + fn signed_extension_transaction_payment_multiplied_refund_works() { + ExtBuilder::default() + .balance_factor(10) + .base_fee(5) + .build() + .execute_with(|| { + let len = 10; + NextFeeMultiplier::put(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); + + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + // 5 base fee, 3/2 * 10 byte fee, 3/2 * 100 weight fee, 5 tip + assert_eq!(Balances::free_balance(2), 200 - 5 - 15 - 150 - 5); + + assert!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + ) + .is_ok()); + // 75 (3/2 of the returned 50 units of weight ) is refunded + assert_eq!(Balances::free_balance(2), 200 - 5 - 15 - 75 - 5); + }); + } + + #[test] + fn signed_extension_transaction_payment_is_bounded() { + ExtBuilder::default() + .balance_factor(1000) + .byte_fee(0) + .build() + .execute_with(|| { + // maximum weight possible + assert!(ChargeTransactionPayment::::from(0) + .pre_dispatch(&1, CALL, &info_from_weight(Weight::max_value()), 10) + .is_ok()); + // fee will be proportional to what is the actual maximum weight in the runtime. + assert_eq!( + Balances::free_balance(&1), + (10000 - ::MaximumBlockWeight::get()) as u64 + ); + }); + } + + #[test] + fn signed_extension_allows_free_transactions() { + ExtBuilder::default() + .base_fee(100) + .balance_factor(0) + .build() + .execute_with(|| { + // 1 ain't have a penny. + assert_eq!(Balances::free_balance(1), 0); + + let len = 100; + + // This is a completely free (and thus wholly insecure/DoS-ridden) transaction. + let operational_transaction = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: false, + }; + assert!(ChargeTransactionPayment::::from(0) + .validate(&1, CALL, &operational_transaction, len) + .is_ok()); + + // like a InsecureFreeNormal + let free_transaction = DispatchInfo { + weight: 0, + class: DispatchClass::Normal, + pays_fee: true, + }; + assert!(ChargeTransactionPayment::::from(0) + .validate(&1, CALL, &free_transaction, len) + .is_err()); + }); + } + + #[test] + fn signed_ext_length_fee_is_also_updated_per_congestion() { + ExtBuilder::default() + .base_fee(5) + .balance_factor(10) + .build() + .execute_with(|| { + // all fees should be x1.5 + NextFeeMultiplier::put(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); + let len = 10; + + assert!(ChargeTransactionPayment::::from(10) // tipped + .pre_dispatch(&1, CALL, &info_from_weight(3), len) + .is_ok()); + assert_eq!(Balances::free_balance(1), 100 - 10 - 5 - (10 + 3) * 3 / 2); + }) + } + + #[test] + fn query_info_works() { + let call = Call::Balances(BalancesCall::transfer(2, 69)); + let origin = 111111; + let extra = (); + let xt = TestXt::new(call, Some((origin, extra))); + let info = xt.get_dispatch_info(); + let ext = xt.encode(); + let len = ext.len() as u32; + ExtBuilder::default() + .base_fee(5) + .weight_fee(2) + .build() + .execute_with(|| { + // all fees should be x1.5 + NextFeeMultiplier::put(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); + + assert_eq!( + TransactionPayment::query_info(xt, len), + RuntimeDispatchInfo { + weight: info.weight, + class: info.class, + partial_fee: 5 /* base */ + ( len as u64 /* len * 1 */ + info.weight.min(MaximumBlockWeight::get()) as u64 * 2 /* weight * weight_to_fee */ ) * 3 / 2 - }, - ); - - }); - } - - #[test] - fn compute_fee_works_without_multiplier() { - ExtBuilder::default() - .base_fee(100) - .byte_fee(10) - .balance_factor(0) - .build() - .execute_with(|| - { - // Next fee multiplier is zero - assert_eq!(NextFeeMultiplier::get(), Fixed128::from_natural(0)); - - // Tip only, no fees works - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: false, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 10), 10); - // No tip, only base fee works - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: true, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); - // Tip + base fee works - assert_eq!(Module::::compute_fee(0, &dispatch_info, 69), 169); - // Len (byte fee) + base fee works - assert_eq!(Module::::compute_fee(42, &dispatch_info, 0), 520); - // Weight fee + base fee works - let dispatch_info = DispatchInfo { - weight: 1000, - class: DispatchClass::Operational, - pays_fee: true, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 1100); - }); - } - - #[test] - fn compute_fee_works_with_multiplier() { - ExtBuilder::default() - .base_fee(100) - .byte_fee(10) - .balance_factor(0) - .build() - .execute_with(|| - { - // Add a next fee multiplier - NextFeeMultiplier::put(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); // = 1/2 = .5 - // Base fee is unaffected by multiplier - let dispatch_info = DispatchInfo { - weight: 0, - class: DispatchClass::Operational, - pays_fee: true, - }; - assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); - - // Everything works together :) - let dispatch_info = DispatchInfo { - weight: 123, - class: DispatchClass::Operational, - pays_fee: true, - }; - // 123 weight, 456 length, 100 base - // adjustable fee = (123 * 1) + (456 * 10) = 4683 - // adjusted fee = (4683 * .5) + 4683 = 7024.5 -> 7024 - // final fee = 100 + 7024 + 789 tip = 7913 - assert_eq!(Module::::compute_fee(456, &dispatch_info, 789), 7913); - }); - } - - #[test] - fn compute_fee_does_not_overflow() { - ExtBuilder::default() - .base_fee(100) - .byte_fee(10) - .balance_factor(0) - .build() - .execute_with(|| - { - // Overflow is handled - let dispatch_info = DispatchInfo { - weight: Weight::max_value(), - class: DispatchClass::Operational, - pays_fee: true, - }; - assert_eq!( - Module::::compute_fee( - ::max_value(), - &dispatch_info, - ::max_value() - ), - ::max_value() - ); - }); - } - - #[test] - fn refund_does_not_recreate_account() { - ExtBuilder::default() - .balance_factor(10) - .base_fee(5) - .build() - .execute_with(|| - { - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - // kill the account between pre and post dispatch - assert!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2)).is_ok()); - assert_eq!(Balances::free_balance(2), 0); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(50), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::free_balance(2), 0); - }); - } - - #[test] - fn actual_weight_higher_than_max_refunds_nothing() { - ExtBuilder::default() - .balance_factor(10) - .base_fee(5) - .build() - .execute_with(|| - { - let len = 10; - let pre = ChargeTransactionPayment::::from(5 /* tipped */) - .pre_dispatch(&2, CALL, &info_from_weight(100), len) - .unwrap(); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - - assert!( - ChargeTransactionPayment:: - ::post_dispatch(pre, &info_from_weight(100), &post_info_from_weight(101), len, &Ok(())) - .is_ok() - ); - assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); - }); - } - - // TODO Remove after u32 to u64 weights upgrade - #[test] - fn upgrade_to_fixed128_works() { - // TODO You can remove this from dev-dependencies after removing this test - use sp_storage::Storage; - use sp_runtime::Fixed64; - use frame_support::storage::generator::StorageValue; - use frame_support::traits::OnRuntimeUpgrade; - use core::num::NonZeroI128; - - let mut s = Storage::default(); - - let original_multiplier = Fixed64::from_rational(1, 2); - - let data = vec![ - ( - NextFeeMultiplier::storage_value_final_key().to_vec(), - original_multiplier.encode().to_vec() - ), - ]; - - s.top = data.into_iter().collect(); - - sp_io::TestExternalities::new(s).execute_with(|| { - let old_value = NextFeeMultiplier::get(); - assert!(old_value != Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); - - // Convert Fixed64(.5) to Fixed128(.5) - TransactionPayment::on_runtime_upgrade(); - let new_value = NextFeeMultiplier::get(); - assert_eq!(new_value, Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); - }); - } + }, + ); + }); + } + + #[test] + fn compute_fee_works_without_multiplier() { + ExtBuilder::default() + .base_fee(100) + .byte_fee(10) + .balance_factor(0) + .build() + .execute_with(|| { + // Next fee multiplier is zero + assert_eq!(NextFeeMultiplier::get(), Fixed128::from_natural(0)); + + // Tip only, no fees works + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: false, + }; + assert_eq!(Module::::compute_fee(0, &dispatch_info, 10), 10); + // No tip, only base fee works + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: true, + }; + assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); + // Tip + base fee works + assert_eq!(Module::::compute_fee(0, &dispatch_info, 69), 169); + // Len (byte fee) + base fee works + assert_eq!(Module::::compute_fee(42, &dispatch_info, 0), 520); + // Weight fee + base fee works + let dispatch_info = DispatchInfo { + weight: 1000, + class: DispatchClass::Operational, + pays_fee: true, + }; + assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 1100); + }); + } + + #[test] + fn compute_fee_works_with_multiplier() { + ExtBuilder::default() + .base_fee(100) + .byte_fee(10) + .balance_factor(0) + .build() + .execute_with(|| { + // Add a next fee multiplier + NextFeeMultiplier::put(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); // = 1/2 = .5 + // Base fee is unaffected by multiplier + let dispatch_info = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: true, + }; + assert_eq!(Module::::compute_fee(0, &dispatch_info, 0), 100); + + // Everything works together :) + let dispatch_info = DispatchInfo { + weight: 123, + class: DispatchClass::Operational, + pays_fee: true, + }; + // 123 weight, 456 length, 100 base + // adjustable fee = (123 * 1) + (456 * 10) = 4683 + // adjusted fee = (4683 * .5) + 4683 = 7024.5 -> 7024 + // final fee = 100 + 7024 + 789 tip = 7913 + assert_eq!( + Module::::compute_fee(456, &dispatch_info, 789), + 7913 + ); + }); + } + + #[test] + fn compute_fee_does_not_overflow() { + ExtBuilder::default() + .base_fee(100) + .byte_fee(10) + .balance_factor(0) + .build() + .execute_with(|| { + // Overflow is handled + let dispatch_info = DispatchInfo { + weight: Weight::max_value(), + class: DispatchClass::Operational, + pays_fee: true, + }; + assert_eq!( + Module::::compute_fee( + ::max_value(), + &dispatch_info, + ::max_value() + ), + ::max_value() + ); + }); + } + + #[test] + fn refund_does_not_recreate_account() { + ExtBuilder::default() + .balance_factor(10) + .base_fee(5) + .build() + .execute_with(|| { + let len = 10; + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + // kill the account between pre and post dispatch + assert!(Balances::transfer(Some(2).into(), 3, Balances::free_balance(2)).is_ok()); + assert_eq!(Balances::free_balance(2), 0); + + assert!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(50), + len, + &Ok(()) + ) + .is_ok()); + assert_eq!(Balances::free_balance(2), 0); + }); + } + + #[test] + fn actual_weight_higher_than_max_refunds_nothing() { + ExtBuilder::default() + .balance_factor(10) + .base_fee(5) + .build() + .execute_with(|| { + let len = 10; + let pre = ChargeTransactionPayment::::from(5 /* tipped */) + .pre_dispatch(&2, CALL, &info_from_weight(100), len) + .unwrap(); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + + assert!(ChargeTransactionPayment::::post_dispatch( + pre, + &info_from_weight(100), + &post_info_from_weight(101), + len, + &Ok(()) + ) + .is_ok()); + assert_eq!(Balances::free_balance(2), 200 - 5 - 10 - 100 - 5); + }); + } + + // TODO Remove after u32 to u64 weights upgrade + #[test] + fn upgrade_to_fixed128_works() { + // TODO You can remove this from dev-dependencies after removing this test + use core::num::NonZeroI128; + use frame_support::storage::generator::StorageValue; + use frame_support::traits::OnRuntimeUpgrade; + use sp_runtime::Fixed64; + use sp_storage::Storage; + + let mut s = Storage::default(); + + let original_multiplier = Fixed64::from_rational(1, 2); + + let data = vec![( + NextFeeMultiplier::storage_value_final_key().to_vec(), + original_multiplier.encode().to_vec(), + )]; + + s.top = data.into_iter().collect(); + + sp_io::TestExternalities::new(s).execute_with(|| { + let old_value = NextFeeMultiplier::get(); + assert!(old_value != Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())); + + // Convert Fixed64(.5) to Fixed128(.5) + TransactionPayment::on_runtime_upgrade(); + let new_value = NextFeeMultiplier::get(); + assert_eq!( + new_value, + Fixed128::from_rational(1, NonZeroI128::new(2).unwrap()) + ); + }); + } } diff --git a/frame/treasury/src/benchmarking.rs b/frame/treasury/src/benchmarking.rs index f901576c95..703d34be18 100644 --- a/frame/treasury/src/benchmarking.rs +++ b/frame/treasury/src/benchmarking.rs @@ -20,222 +20,224 @@ use super::*; -use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; +use frame_benchmarking::{account, benchmarks}; use frame_support::traits::OnInitialize; +use frame_system::RawOrigin; use crate::Module as Treasury; const SEED: u32 = 0; // Create the pre-requisite information needed to create a treasury `propose_spend`. -fn setup_proposal(u: u32) -> ( - T::AccountId, - BalanceOf, - ::Source, +fn setup_proposal( + u: u32, +) -> ( + T::AccountId, + BalanceOf, + ::Source, ) { - let caller = account("caller", u, SEED); - let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100.into()); - let _ = T::Currency::make_free_balance_be(&caller, value); - let beneficiary = account("beneficiary", u, SEED); - let beneficiary_lookup = T::Lookup::unlookup(beneficiary); - (caller, value, beneficiary_lookup) + let caller = account("caller", u, SEED); + let value: BalanceOf = T::ProposalBondMinimum::get().saturating_mul(100.into()); + let _ = T::Currency::make_free_balance_be(&caller, value); + let beneficiary = account("beneficiary", u, SEED); + let beneficiary_lookup = T::Lookup::unlookup(beneficiary); + (caller, value, beneficiary_lookup) } // Create the pre-requisite information needed to create a `report_awesome`. fn setup_awesome(length: u32) -> (T::AccountId, Vec, T::AccountId) { - let caller = account("caller", 0, SEED); - let value = T::TipReportDepositBase::get() - + T::TipReportDepositPerByte::get() * length.into() - + T::Currency::minimum_balance(); - let _ = T::Currency::make_free_balance_be(&caller, value); - let reason = vec![0; length as usize]; - let awesome_person = account("awesome", 0, SEED); - (caller, reason, awesome_person) + let caller = account("caller", 0, SEED); + let value = T::TipReportDepositBase::get() + + T::TipReportDepositPerByte::get() * length.into() + + T::Currency::minimum_balance(); + let _ = T::Currency::make_free_balance_be(&caller, value); + let reason = vec![0; length as usize]; + let awesome_person = account("awesome", 0, SEED); + (caller, reason, awesome_person) } // Create the pre-requisite information needed to call `tip_new`. -fn setup_tip(r: u32, t: u32) -> - Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> -{ - let tippers_count = T::Tippers::count(); - - for i in 0 .. t { - let member = account("member", i, SEED); - T::Tippers::add(&member); - ensure!(T::Tippers::contains(&member), "failed to add tipper"); - } - - ensure!(T::Tippers::count() == tippers_count + t as usize, "problem creating tippers"); - let caller = account("member", t - 1, SEED); - let reason = vec![0; r as usize]; - let beneficiary = account("beneficiary", t, SEED); - let value = T::Currency::minimum_balance().saturating_mul(100.into()); - Ok((caller, reason, beneficiary, value)) +fn setup_tip( + r: u32, + t: u32, +) -> Result<(T::AccountId, Vec, T::AccountId, BalanceOf), &'static str> { + let tippers_count = T::Tippers::count(); + + for i in 0..t { + let member = account("member", i, SEED); + T::Tippers::add(&member); + ensure!(T::Tippers::contains(&member), "failed to add tipper"); + } + + ensure!( + T::Tippers::count() == tippers_count + t as usize, + "problem creating tippers" + ); + let caller = account("member", t - 1, SEED); + let reason = vec![0; r as usize]; + let beneficiary = account("beneficiary", t, SEED); + let value = T::Currency::minimum_balance().saturating_mul(100.into()); + Ok((caller, reason, beneficiary, value)) } // Create `t` new tips for the tip proposal with `hash`. // This function automatically makes the tip able to close. fn create_tips(t: u32, hash: T::Hash, value: BalanceOf) -> Result<(), &'static str> { - for i in 0 .. t { - let caller = account("member", i, SEED); - ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); - Treasury::::tip(RawOrigin::Signed(caller).into(), hash, value)?; - } - Tips::::mutate(hash, |maybe_tip| { - if let Some(open_tip) = maybe_tip { - open_tip.closes = Some(T::BlockNumber::zero()); - } - }); - Ok(()) + for i in 0..t { + let caller = account("member", i, SEED); + ensure!(T::Tippers::contains(&caller), "caller is not a tipper"); + Treasury::::tip(RawOrigin::Signed(caller).into(), hash, value)?; + } + Tips::::mutate(hash, |maybe_tip| { + if let Some(open_tip) = maybe_tip { + open_tip.closes = Some(T::BlockNumber::zero()); + } + }); + Ok(()) } // Create proposals that are approved for use in `on_initialize`. fn create_approved_proposals(n: u32) -> Result<(), &'static str> { - for i in 0 .. n { - let (caller, value, lookup) = setup_proposal::(i); - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - lookup - )?; - let proposal_id = ProposalCount::get() - 1; - Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; - } - ensure!(Approvals::get().len() == n as usize, "Not all approved"); - Ok(()) + for i in 0..n { + let (caller, value, lookup) = setup_proposal::(i); + Treasury::::propose_spend(RawOrigin::Signed(caller).into(), value, lookup)?; + let proposal_id = ProposalCount::get() - 1; + Treasury::::approve_proposal(RawOrigin::Root.into(), proposal_id)?; + } + ensure!(Approvals::get().len() == n as usize, "Not all approved"); + Ok(()) } const MAX_BYTES: u32 = 16384; const MAX_TIPPERS: u32 = 100; benchmarks! { - _ { } - - propose_spend { - let u in 0 .. 1000; - let (caller, value, beneficiary_lookup) = setup_proposal::(u); - }: _(RawOrigin::Signed(caller), value, beneficiary_lookup) - - reject_proposal { - let u in 0 .. 1000; - let (caller, value, beneficiary_lookup) = setup_proposal::(u); - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup - )?; - let proposal_id = ProposalCount::get() - 1; - }: _(RawOrigin::Root, proposal_id) - - approve_proposal { - let u in 0 .. 1000; - let (caller, value, beneficiary_lookup) = setup_proposal::(u); - Treasury::::propose_spend( - RawOrigin::Signed(caller).into(), - value, - beneficiary_lookup - )?; - let proposal_id = ProposalCount::get() - 1; - }: _(RawOrigin::Root, proposal_id) - - report_awesome { - let r in 0 .. MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - }: _(RawOrigin::Signed(caller), reason, awesome_person) - - retract_tip { - let r in 0 .. MAX_BYTES; - let (caller, reason, awesome_person) = setup_awesome::(r); - Treasury::::report_awesome( - RawOrigin::Signed(caller.clone()).into(), - reason.clone(), - awesome_person.clone() - )?; - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); - }: _(RawOrigin::Signed(caller), hash) - - tip_new { - let r in 0 .. MAX_BYTES; - let t in 1 .. MAX_TIPPERS; - - let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; - }: _(RawOrigin::Signed(caller), reason, beneficiary, value) - - tip { - let t in 1 .. MAX_TIPPERS; - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let value = T::Currency::minimum_balance().saturating_mul(100.into()); - Treasury::::tip_new( - RawOrigin::Signed(member).into(), - reason.clone(), - beneficiary.clone(), - value - )?; - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t - 1, hash.clone(), value)?; - let caller = account("member", t - 1, SEED); - }: _(RawOrigin::Signed(caller), hash, value) - - close_tip { - let t in 1 .. MAX_TIPPERS; - - // Make sure pot is funded - let pot_account = Treasury::::account_id(); - let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); - let _ = T::Currency::make_free_balance_be(&pot_account, value); - - // Set up a new tip proposal - let (member, reason, beneficiary, value) = setup_tip::(0, t)?; - let value = T::Currency::minimum_balance().saturating_mul(100.into()); - Treasury::::tip_new( - RawOrigin::Signed(member).into(), - reason.clone(), - beneficiary.clone(), - value - )?; - - // Create a bunch of tips - let reason_hash = T::Hashing::hash(&reason[..]); - let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); - ensure!(Tips::::contains_key(hash), "tip does not exist"); - create_tips::(t, hash.clone(), value)?; - - let caller = account("caller", t, SEED); - }: _(RawOrigin::Signed(caller), hash) - - on_initialize { - let p in 0 .. 100; - let pot_account = Treasury::::account_id(); - let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); - let _ = T::Currency::make_free_balance_be(&pot_account, value); - create_approved_proposals::(p)?; - }: { - Treasury::::on_initialize(T::BlockNumber::zero()); - } + _ { } + + propose_spend { + let u in 0 .. 1000; + let (caller, value, beneficiary_lookup) = setup_proposal::(u); + }: _(RawOrigin::Signed(caller), value, beneficiary_lookup) + + reject_proposal { + let u in 0 .. 1000; + let (caller, value, beneficiary_lookup) = setup_proposal::(u); + Treasury::::propose_spend( + RawOrigin::Signed(caller).into(), + value, + beneficiary_lookup + )?; + let proposal_id = ProposalCount::get() - 1; + }: _(RawOrigin::Root, proposal_id) + + approve_proposal { + let u in 0 .. 1000; + let (caller, value, beneficiary_lookup) = setup_proposal::(u); + Treasury::::propose_spend( + RawOrigin::Signed(caller).into(), + value, + beneficiary_lookup + )?; + let proposal_id = ProposalCount::get() - 1; + }: _(RawOrigin::Root, proposal_id) + + report_awesome { + let r in 0 .. MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + }: _(RawOrigin::Signed(caller), reason, awesome_person) + + retract_tip { + let r in 0 .. MAX_BYTES; + let (caller, reason, awesome_person) = setup_awesome::(r); + Treasury::::report_awesome( + RawOrigin::Signed(caller.clone()).into(), + reason.clone(), + awesome_person.clone() + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &awesome_person)); + }: _(RawOrigin::Signed(caller), hash) + + tip_new { + let r in 0 .. MAX_BYTES; + let t in 1 .. MAX_TIPPERS; + + let (caller, reason, beneficiary, value) = setup_tip::(r, t)?; + }: _(RawOrigin::Signed(caller), reason, beneficiary, value) + + tip { + let t in 1 .. MAX_TIPPERS; + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100.into()); + Treasury::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t - 1, hash.clone(), value)?; + let caller = account("member", t - 1, SEED); + }: _(RawOrigin::Signed(caller), hash, value) + + close_tip { + let t in 1 .. MAX_TIPPERS; + + // Make sure pot is funded + let pot_account = Treasury::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); + + // Set up a new tip proposal + let (member, reason, beneficiary, value) = setup_tip::(0, t)?; + let value = T::Currency::minimum_balance().saturating_mul(100.into()); + Treasury::::tip_new( + RawOrigin::Signed(member).into(), + reason.clone(), + beneficiary.clone(), + value + )?; + + // Create a bunch of tips + let reason_hash = T::Hashing::hash(&reason[..]); + let hash = T::Hashing::hash_of(&(&reason_hash, &beneficiary)); + ensure!(Tips::::contains_key(hash), "tip does not exist"); + create_tips::(t, hash.clone(), value)?; + + let caller = account("caller", t, SEED); + }: _(RawOrigin::Signed(caller), hash) + + on_initialize { + let p in 0 .. 100; + let pot_account = Treasury::::account_id(); + let value = T::Currency::minimum_balance().saturating_mul(1_000_000_000.into()); + let _ = T::Currency::make_free_balance_be(&pot_account, value); + create_approved_proposals::(p)?; + }: { + Treasury::::on_initialize(T::BlockNumber::zero()); + } } #[cfg(test)] mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_propose_spend::()); - assert_ok!(test_benchmark_reject_proposal::()); - assert_ok!(test_benchmark_approve_proposal::()); - assert_ok!(test_benchmark_report_awesome::()); - assert_ok!(test_benchmark_retract_tip::()); - assert_ok!(test_benchmark_tip_new::()); - assert_ok!(test_benchmark_tip::()); - assert_ok!(test_benchmark_close_tip::()); - assert_ok!(test_benchmark_on_initialize::()); - }); - } + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_propose_spend::()); + assert_ok!(test_benchmark_reject_proposal::()); + assert_ok!(test_benchmark_approve_proposal::()); + assert_ok!(test_benchmark_report_awesome::()); + assert_ok!(test_benchmark_retract_tip::()); + assert_ok!(test_benchmark_tip_new::()); + assert_ok!(test_benchmark_tip::()); + assert_ok!(test_benchmark_close_tip::()); + assert_ok!(test_benchmark_on_initialize::()); + }); + } } diff --git a/frame/treasury/src/lib.rs b/frame/treasury/src/lib.rs index bf70443d22..70a962d3c2 100644 --- a/frame/treasury/src/lib.rs +++ b/frame/treasury/src/lib.rs @@ -87,78 +87,79 @@ #![cfg_attr(not(feature = "std"), no_std)] -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use sp_std::prelude::*; -use frame_support::{decl_module, decl_storage, decl_event, ensure, print, decl_error, Parameter}; +use codec::{Decode, Encode}; +use frame_support::traits::{Contains, EnsureOrigin}; use frame_support::traits::{ - Currency, Get, Imbalance, OnUnbalanced, ExistenceRequirement::KeepAlive, - ReservableCurrency, WithdrawReason + Currency, ExistenceRequirement::KeepAlive, Get, Imbalance, OnUnbalanced, ReservableCurrency, + WithdrawReason, }; -use sp_runtime::{Permill, ModuleId, Percent, RuntimeDebug, traits::{ - Zero, StaticLookup, AccountIdConversion, Saturating, Hash, BadOrigin -}}; -use frame_support::weights::{Weight, MINIMUM_WEIGHT, SimpleDispatchInfo}; -use frame_support::traits::{Contains, EnsureOrigin}; -use codec::{Encode, Decode}; -use frame_system::{self as system, ensure_signed, ensure_root}; +use frame_support::weights::{SimpleDispatchInfo, Weight, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure, print, Parameter}; +use frame_system::{self as system, ensure_root, ensure_signed}; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; +use sp_runtime::{ + traits::{AccountIdConversion, BadOrigin, Hash, Saturating, StaticLookup, Zero}, + ModuleId, Percent, Permill, RuntimeDebug, +}; +use sp_std::prelude::*; -mod tests; mod benchmarking; +mod tests; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; -type PositiveImbalanceOf = <::Currency as Currency<::AccountId>>::PositiveImbalance; -type NegativeImbalanceOf = <::Currency as Currency<::AccountId>>::NegativeImbalance; - -/// The treasury's module id, used for deriving its sovereign account ID. -// const MODULE_ID: ModuleId = ModuleId(*b"py/trsry"); +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; +type PositiveImbalanceOf = + <::Currency as Currency<::AccountId>>::PositiveImbalance; +type NegativeImbalanceOf = + <::Currency as Currency<::AccountId>>::NegativeImbalance; pub trait Trait: frame_system::Trait { /// The treasury's module id, used for deriving its sovereign account ID. type ModuleId: Get; - /// The staking balance. - type Currency: Currency + ReservableCurrency; + /// The staking balance. + type Currency: Currency + ReservableCurrency; - /// Origin from which approvals must come. - type ApproveOrigin: EnsureOrigin; + /// Origin from which approvals must come. + type ApproveOrigin: EnsureOrigin; - /// Origin from which rejections must come. - type RejectOrigin: EnsureOrigin; + /// Origin from which rejections must come. + type RejectOrigin: EnsureOrigin; - /// Origin from which tippers must come. - type Tippers: Contains; + /// Origin from which tippers must come. + type Tippers: Contains; - /// The period for which a tip remains open after is has achieved threshold tippers. - type TipCountdown: Get; + /// The period for which a tip remains open after is has achieved threshold tippers. + type TipCountdown: Get; - /// The percent of the final tip which goes to the original reporter of the tip. - type TipFindersFee: Get; + /// The percent of the final tip which goes to the original reporter of the tip. + type TipFindersFee: Get; - /// The amount held on deposit for placing a tip report. - type TipReportDepositBase: Get>; + /// The amount held on deposit for placing a tip report. + type TipReportDepositBase: Get>; - /// The amount held on deposit per byte within the tip report reason. - type TipReportDepositPerByte: Get>; + /// The amount held on deposit per byte within the tip report reason. + type TipReportDepositPerByte: Get>; - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// Handler for the unbalanced decrease when slashing for a rejected proposal. - type ProposalRejection: OnUnbalanced>; + /// Handler for the unbalanced decrease when slashing for a rejected proposal. + type ProposalRejection: OnUnbalanced>; - /// Fraction of a proposal's value that should be bonded in order to place the proposal. - /// An accepted proposal gets these back. A rejected proposal does not. - type ProposalBond: Get; + /// Fraction of a proposal's value that should be bonded in order to place the proposal. + /// An accepted proposal gets these back. A rejected proposal does not. + type ProposalBond: Get; - /// Minimum amount of funds that should be placed in a deposit for making a proposal. - type ProposalBondMinimum: Get>; + /// Minimum amount of funds that should be placed in a deposit for making a proposal. + type ProposalBondMinimum: Get>; - /// Period between successive spends. - type SpendPeriod: Get; + /// Period between successive spends. + type SpendPeriod: Get; - /// Percentage of spare funds (if any) that are burnt per spend period. - type Burn: Get; + /// Percentage of spare funds (if any) that are burnt per spend period. + type Burn: Get; } /// An index of a proposal. Just a `u32`. @@ -168,72 +169,72 @@ pub type ProposalIndex = u32; #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Encode, Decode, Clone, PartialEq, Eq, RuntimeDebug)] pub struct Proposal { - /// The account proposing it. - proposer: AccountId, - /// The (total) amount that should be paid if the proposal is accepted. - value: Balance, - /// The account to whom the payment should be made if the proposal is accepted. - beneficiary: AccountId, - /// The amount held on deposit (reserved) for making this proposal. - bond: Balance, + /// The account proposing it. + proposer: AccountId, + /// The (total) amount that should be paid if the proposal is accepted. + value: Balance, + /// The account to whom the payment should be made if the proposal is accepted. + beneficiary: AccountId, + /// The amount held on deposit (reserved) for making this proposal. + bond: Balance, } /// An open tipping "motion". Retains all details of a tip including information on the finder /// and the members who have voted. #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub struct OpenTip< - AccountId: Parameter, - Balance: Parameter, - BlockNumber: Parameter, - Hash: Parameter, + AccountId: Parameter, + Balance: Parameter, + BlockNumber: Parameter, + Hash: Parameter, > { - /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be - /// sensible. - reason: Hash, - /// The account to be tipped. - who: AccountId, - /// The account who began this tip and the amount held on deposit. - finder: Option<(AccountId, Balance)>, - /// The block number at which this tip will close if `Some`. If `None`, then no closing is - /// scheduled. - closes: Option, - /// The members who have voted for this tip. Sorted by AccountId. - tips: Vec<(AccountId, Balance)>, + /// The hash of the reason for the tip. The reason should be a human-readable UTF-8 encoded string. A URL would be + /// sensible. + reason: Hash, + /// The account to be tipped. + who: AccountId, + /// The account who began this tip and the amount held on deposit. + finder: Option<(AccountId, Balance)>, + /// The block number at which this tip will close if `Some`. If `None`, then no closing is + /// scheduled. + closes: Option, + /// The members who have voted for this tip. Sorted by AccountId. + tips: Vec<(AccountId, Balance)>, } decl_storage! { - trait Store for Module as Treasury { - /// Number of proposals that have been made. - ProposalCount get(fn proposal_count): ProposalIndex; - - /// Proposals that have been made. - Proposals get(fn proposals): - map hasher(twox_64_concat) ProposalIndex - => Option>>; - - /// Proposal indices that have been approved but not yet awarded. - Approvals get(fn approvals): Vec; - - /// Tips that are not yet completed. Keyed by the hash of `(reason, who)` from the value. - /// This has the insecure enumerable hash function since the key itself is already - /// guaranteed to be a secure hash. - pub Tips get(fn tips): - map hasher(twox_64_concat) T::Hash - => Option, T::BlockNumber, T::Hash>>; - - /// Simple preimage lookup from the reason's hash to the original data. Again, has an - /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. - pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; - } - add_extra_genesis { - build(|_config| { - // Create Treasury account - let _ = T::Currency::make_free_balance_be( - &>::account_id(), - T::Currency::minimum_balance(), - ); - }); - } + trait Store for Module as Treasury { + /// Number of proposals that have been made. + ProposalCount get(fn proposal_count): ProposalIndex; + + /// Proposals that have been made. + Proposals get(fn proposals): + map hasher(twox_64_concat) ProposalIndex + => Option>>; + + /// Proposal indices that have been approved but not yet awarded. + Approvals get(fn approvals): Vec; + + /// Tips that are not yet completed. Keyed by the hash of `(reason, who)` from the value. + /// This has the insecure enumerable hash function since the key itself is already + /// guaranteed to be a secure hash. + pub Tips get(fn tips): + map hasher(twox_64_concat) T::Hash + => Option, T::BlockNumber, T::Hash>>; + + /// Simple preimage lookup from the reason's hash to the original data. Again, has an + /// insecure enumerable hash since the key is guaranteed to be the result of a secure hash. + pub Reasons get(fn reasons): map hasher(identity) T::Hash => Option>; + } + add_extra_genesis { + build(|_config| { + // Create Treasury account + let _ = T::Currency::make_free_balance_be( + &>::account_id(), + T::Currency::minimum_balance(), + ); + }); + } } decl_event!( @@ -269,468 +270,471 @@ decl_event!( ); decl_error! { - /// Error for the treasury module. - pub enum Error for Module { - /// Proposer's balance is too low. - InsufficientProposersBalance, - /// No proposal at that index. - InvalidProposalIndex, - /// The reason given is just too big. - ReasonTooBig, - /// The tip was already found/started. - AlreadyKnown, - /// The tip hash is unknown. - UnknownTip, - /// The account attempting to retract the tip is not the finder of the tip. - NotFinder, - /// The tip cannot be claimed/closed because there are not enough tippers yet. - StillOpen, - /// The tip cannot be claimed/closed because it's still in the countdown period. - Premature, - } + /// Error for the treasury module. + pub enum Error for Module { + /// Proposer's balance is too low. + InsufficientProposersBalance, + /// No proposal at that index. + InvalidProposalIndex, + /// The reason given is just too big. + ReasonTooBig, + /// The tip was already found/started. + AlreadyKnown, + /// The tip hash is unknown. + UnknownTip, + /// The account attempting to retract the tip is not the finder of the tip. + NotFinder, + /// The tip cannot be claimed/closed because there are not enough tippers yet. + StillOpen, + /// The tip cannot be claimed/closed because it's still in the countdown period. + Premature, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Fraction of a proposal's value that should be bonded in order to place the proposal. - /// An accepted proposal gets these back. A rejected proposal does not. - const ProposalBond: Permill = T::ProposalBond::get(); + pub struct Module for enum Call where origin: T::Origin { + /// Fraction of a proposal's value that should be bonded in order to place the proposal. + /// An accepted proposal gets these back. A rejected proposal does not. + const ProposalBond: Permill = T::ProposalBond::get(); - /// Minimum amount of funds that should be placed in a deposit for making a proposal. - const ProposalBondMinimum: BalanceOf = T::ProposalBondMinimum::get(); + /// Minimum amount of funds that should be placed in a deposit for making a proposal. + const ProposalBondMinimum: BalanceOf = T::ProposalBondMinimum::get(); - /// Period between successive spends. - const SpendPeriod: T::BlockNumber = T::SpendPeriod::get(); + /// Period between successive spends. + const SpendPeriod: T::BlockNumber = T::SpendPeriod::get(); - /// Percentage of spare funds (if any) that are burnt per spend period. - const Burn: Permill = T::Burn::get(); + /// Percentage of spare funds (if any) that are burnt per spend period. + const Burn: Permill = T::Burn::get(); - /// The period for which a tip remains open after is has achieved threshold tippers. - const TipCountdown: T::BlockNumber = T::TipCountdown::get(); + /// The period for which a tip remains open after is has achieved threshold tippers. + const TipCountdown: T::BlockNumber = T::TipCountdown::get(); - /// The amount of the final tip which goes to the original reporter of the tip. - const TipFindersFee: Percent = T::TipFindersFee::get(); + /// The amount of the final tip which goes to the original reporter of the tip. + const TipFindersFee: Percent = T::TipFindersFee::get(); - /// The amount held on deposit for placing a tip report. - const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); + /// The amount held on deposit for placing a tip report. + const TipReportDepositBase: BalanceOf = T::TipReportDepositBase::get(); - /// The amount held on deposit per byte within the tip report reason. + /// The amount held on deposit per byte within the tip report reason. const TipReportDepositPerByte: BalanceOf = T::TipReportDepositPerByte::get(); - + /// The treasury's module id, used for deriving its sovereign account ID. const MouduleId: ModuleId = T::ModuleId::get(); - type Error = Error; - - fn deposit_event() = default; - - /// Put forward a suggestion for spending. A deposit proportional to the value - /// is reserved and slashed if the proposal is rejected. It is returned once the - /// proposal is awarded. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change, one extra DB entry. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] - fn propose_spend( - origin, - #[compact] value: BalanceOf, - beneficiary: ::Source - ) { - let proposer = ensure_signed(origin)?; - let beneficiary = T::Lookup::lookup(beneficiary)?; - - let bond = Self::calculate_bond(value); - T::Currency::reserve(&proposer, bond) - .map_err(|_| Error::::InsufficientProposersBalance)?; - - let c = Self::proposal_count(); - ProposalCount::put(c + 1); - >::insert(c, Proposal { proposer, value, beneficiary, bond }); - - Self::deposit_event(RawEvent::Proposed(c)); - } - - /// Reject a proposed spend. The original deposit will be slashed. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB clear. - /// # - #[weight = SimpleDispatchInfo::FixedOperational(100_000_000)] - fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { - T::RejectOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - let proposal = >::take(&proposal_id).ok_or(Error::::InvalidProposalIndex)?; - let value = proposal.bond; - let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; - T::ProposalRejection::on_unbalanced(imbalance); - - Self::deposit_event(Event::::Rejected(proposal_id, value)); - } - - /// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary - /// and the original deposit will be returned. - /// - /// # - /// - O(1). - /// - Limited storage reads. - /// - One DB change. - /// # - #[weight = SimpleDispatchInfo::FixedOperational(100_000_000)] - fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { - T::ApproveOrigin::try_origin(origin) - .map(|_| ()) - .or_else(ensure_root)?; - - ensure!(>::contains_key(proposal_id), Error::::InvalidProposalIndex); - Approvals::mutate(|v| v.push(proposal_id)); - } - - /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as - /// `TipReportDepositPerByte` for each byte in `reason`. - /// - /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be - /// a UTF-8-encoded URL. - /// - `who`: The account which should be credited for the tip. - /// - /// Emits `NewTip` if successful. - /// - /// # - /// - `O(R)` where `R` length of `reason`. - /// - One balance operation. - /// - One storage mutation (codec `O(R)`). - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] - fn report_awesome(origin, reason: Vec, who: T::AccountId) { - let finder = ensure_signed(origin)?; - - const MAX_SENSIBLE_REASON_LENGTH: usize = 16384; - ensure!(reason.len() <= MAX_SENSIBLE_REASON_LENGTH, Error::::ReasonTooBig); - - let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); - - let deposit = T::TipReportDepositBase::get() - + T::TipReportDepositPerByte::get() * (reason.len() as u32).into(); - T::Currency::reserve(&finder, deposit)?; - - Reasons::::insert(&reason_hash, &reason); - let finder = Some((finder, deposit)); - let tip = OpenTip { reason: reason_hash, who, finder, closes: None, tips: vec![] }; - Tips::::insert(&hash, tip); - Self::deposit_event(RawEvent::NewTip(hash)); - } - - /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. - /// - /// If successful, the original deposit will be unreserved. - /// - /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` - /// must have been reported by the signing account through `report_awesome` (and not - /// through `tip_new`). - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. - /// - /// Emits `TipRetracted` if successful. - /// - /// # - /// - `O(T)` - /// - One balance operation. - /// - Two storage removals (one read, codec `O(T)`). - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn retract_tip(origin, hash: T::Hash) { - let who = ensure_signed(origin)?; - let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; - let (finder, deposit) = tip.finder.ok_or(Error::::NotFinder)?; - ensure!(finder == who, Error::::NotFinder); - - Reasons::::remove(&tip.reason); - Tips::::remove(&hash); - let _ = T::Currency::unreserve(&who, deposit); - Self::deposit_event(RawEvent::TipRetracted(hash)); - } - - /// Give a tip for something new; no finder's fee will be taken. - /// - /// The dispatch origin for this call must be _Signed_ and the signing account must be a - /// member of the `Tippers` set. - /// - /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be - /// a UTF-8-encoded URL. - /// - `who`: The account which should be credited for the tip. - /// - `tip_value`: The amount of tip that the sender would like to give. The median tip - /// value of active tippers will be given to the `who`. - /// - /// Emits `NewTip` if successful. - /// - /// # - /// - `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. `T` is - /// naturally capped as a membership set, `R` is limited through transaction-size. - /// - Two storage insertions (codecs `O(R)`, `O(T)`), one read `O(1)`. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(150_000_000)] - fn tip_new(origin, reason: Vec, who: T::AccountId, tip_value: BalanceOf) { - let tipper = ensure_signed(origin)?; - ensure!(T::Tippers::contains(&tipper), BadOrigin); - let reason_hash = T::Hashing::hash(&reason[..]); - ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); - let hash = T::Hashing::hash_of(&(&reason_hash, &who)); - - Reasons::::insert(&reason_hash, &reason); - Self::deposit_event(RawEvent::NewTip(hash.clone())); - let tips = vec![(tipper, tip_value)]; - let tip = OpenTip { reason: reason_hash, who, finder: None, closes: None, tips }; - Tips::::insert(&hash, tip); - } - - /// Declare a tip value for an already-open tip. - /// - /// The dispatch origin for this call must be _Signed_ and the signing account must be a - /// member of the `Tippers` set. - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary - /// account ID. - /// - `tip_value`: The amount of tip that the sender would like to give. The median tip - /// value of active tippers will be given to the `who`. - /// - /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period - /// has started. - /// - /// # - /// - `O(T)` - /// - One storage mutation (codec `O(T)`), one storage read `O(1)`. - /// - Up to one event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn tip(origin, hash: T::Hash, tip_value: BalanceOf) { - let tipper = ensure_signed(origin)?; - ensure!(T::Tippers::contains(&tipper), BadOrigin); - - let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { - Self::deposit_event(RawEvent::TipClosing(hash.clone())); - } - Tips::::insert(&hash, tip); - } - - /// Close and payout a tip. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// The tip identified by `hash` must have finished its countdown period. - /// - /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed - /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. - /// - /// # - /// - `O(T)` - /// - One storage retrieval (codec `O(T)`) and two removals. - /// - Up to three balance operations. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] - fn close_tip(origin, hash: T::Hash) { - ensure_signed(origin)?; - - let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; - let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; - ensure!(system::Module::::block_number() >= *n, Error::::Premature); - // closed. - Reasons::::remove(&tip.reason); - Tips::::remove(hash); - Self::payout_tip(hash, tip); - } - - fn on_initialize(n: T::BlockNumber) -> Weight { - // Check to see if we should spend some funds! - if (n % T::SpendPeriod::get()).is_zero() { - Self::spend_funds(); - } - - MINIMUM_WEIGHT - } - } + type Error = Error; + + fn deposit_event() = default; + + /// Put forward a suggestion for spending. A deposit proportional to the value + /// is reserved and slashed if the proposal is rejected. It is returned once the + /// proposal is awarded. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB change, one extra DB entry. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(500_000_000)] + fn propose_spend( + origin, + #[compact] value: BalanceOf, + beneficiary: ::Source + ) { + let proposer = ensure_signed(origin)?; + let beneficiary = T::Lookup::lookup(beneficiary)?; + + let bond = Self::calculate_bond(value); + T::Currency::reserve(&proposer, bond) + .map_err(|_| Error::::InsufficientProposersBalance)?; + + let c = Self::proposal_count(); + ProposalCount::put(c + 1); + >::insert(c, Proposal { proposer, value, beneficiary, bond }); + + Self::deposit_event(RawEvent::Proposed(c)); + } + + /// Reject a proposed spend. The original deposit will be slashed. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB clear. + /// # + #[weight = SimpleDispatchInfo::FixedOperational(100_000_000)] + fn reject_proposal(origin, #[compact] proposal_id: ProposalIndex) { + T::RejectOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + let proposal = >::take(&proposal_id).ok_or(Error::::InvalidProposalIndex)?; + let value = proposal.bond; + let imbalance = T::Currency::slash_reserved(&proposal.proposer, value).0; + T::ProposalRejection::on_unbalanced(imbalance); + + Self::deposit_event(Event::::Rejected(proposal_id, value)); + } + + /// Approve a proposal. At a later time, the proposal will be allocated to the beneficiary + /// and the original deposit will be returned. + /// + /// # + /// - O(1). + /// - Limited storage reads. + /// - One DB change. + /// # + #[weight = SimpleDispatchInfo::FixedOperational(100_000_000)] + fn approve_proposal(origin, #[compact] proposal_id: ProposalIndex) { + T::ApproveOrigin::try_origin(origin) + .map(|_| ()) + .or_else(ensure_root)?; + + ensure!(>::contains_key(proposal_id), Error::::InvalidProposalIndex); + Approvals::mutate(|v| v.push(proposal_id)); + } + + /// Report something `reason` that deserves a tip and claim any eventual the finder's fee. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// Payment: `TipReportDepositBase` will be reserved from the origin account, as well as + /// `TipReportDepositPerByte` for each byte in `reason`. + /// + /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be + /// a UTF-8-encoded URL. + /// - `who`: The account which should be credited for the tip. + /// + /// Emits `NewTip` if successful. + /// + /// # + /// - `O(R)` where `R` length of `reason`. + /// - One balance operation. + /// - One storage mutation (codec `O(R)`). + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(100_000_000)] + fn report_awesome(origin, reason: Vec, who: T::AccountId) { + let finder = ensure_signed(origin)?; + + const MAX_SENSIBLE_REASON_LENGTH: usize = 16384; + ensure!(reason.len() <= MAX_SENSIBLE_REASON_LENGTH, Error::::ReasonTooBig); + + let reason_hash = T::Hashing::hash(&reason[..]); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + ensure!(!Tips::::contains_key(&hash), Error::::AlreadyKnown); + + let deposit = T::TipReportDepositBase::get() + + T::TipReportDepositPerByte::get() * (reason.len() as u32).into(); + T::Currency::reserve(&finder, deposit)?; + + Reasons::::insert(&reason_hash, &reason); + let finder = Some((finder, deposit)); + let tip = OpenTip { reason: reason_hash, who, finder, closes: None, tips: vec![] }; + Tips::::insert(&hash, tip); + Self::deposit_event(RawEvent::NewTip(hash)); + } + + /// Retract a prior tip-report from `report_awesome`, and cancel the process of tipping. + /// + /// If successful, the original deposit will be unreserved. + /// + /// The dispatch origin for this call must be _Signed_ and the tip identified by `hash` + /// must have been reported by the signing account through `report_awesome` (and not + /// through `tip_new`). + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. + /// + /// Emits `TipRetracted` if successful. + /// + /// # + /// - `O(T)` + /// - One balance operation. + /// - Two storage removals (one read, codec `O(T)`). + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn retract_tip(origin, hash: T::Hash) { + let who = ensure_signed(origin)?; + let tip = Tips::::get(&hash).ok_or(Error::::UnknownTip)?; + let (finder, deposit) = tip.finder.ok_or(Error::::NotFinder)?; + ensure!(finder == who, Error::::NotFinder); + + Reasons::::remove(&tip.reason); + Tips::::remove(&hash); + let _ = T::Currency::unreserve(&who, deposit); + Self::deposit_event(RawEvent::TipRetracted(hash)); + } + + /// Give a tip for something new; no finder's fee will be taken. + /// + /// The dispatch origin for this call must be _Signed_ and the signing account must be a + /// member of the `Tippers` set. + /// + /// - `reason`: The reason for, or the thing that deserves, the tip; generally this will be + /// a UTF-8-encoded URL. + /// - `who`: The account which should be credited for the tip. + /// - `tip_value`: The amount of tip that the sender would like to give. The median tip + /// value of active tippers will be given to the `who`. + /// + /// Emits `NewTip` if successful. + /// + /// # + /// - `O(R + T)` where `R` length of `reason`, `T` is the number of tippers. `T` is + /// naturally capped as a membership set, `R` is limited through transaction-size. + /// - Two storage insertions (codecs `O(R)`, `O(T)`), one read `O(1)`. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(150_000_000)] + fn tip_new(origin, reason: Vec, who: T::AccountId, tip_value: BalanceOf) { + let tipper = ensure_signed(origin)?; + ensure!(T::Tippers::contains(&tipper), BadOrigin); + let reason_hash = T::Hashing::hash(&reason[..]); + ensure!(!Reasons::::contains_key(&reason_hash), Error::::AlreadyKnown); + let hash = T::Hashing::hash_of(&(&reason_hash, &who)); + + Reasons::::insert(&reason_hash, &reason); + Self::deposit_event(RawEvent::NewTip(hash.clone())); + let tips = vec![(tipper, tip_value)]; + let tip = OpenTip { reason: reason_hash, who, finder: None, closes: None, tips }; + Tips::::insert(&hash, tip); + } + + /// Declare a tip value for an already-open tip. + /// + /// The dispatch origin for this call must be _Signed_ and the signing account must be a + /// member of the `Tippers` set. + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the hash of the original tip `reason` and the beneficiary + /// account ID. + /// - `tip_value`: The amount of tip that the sender would like to give. The median tip + /// value of active tippers will be given to the `who`. + /// + /// Emits `TipClosing` if the threshold of tippers has been reached and the countdown period + /// has started. + /// + /// # + /// - `O(T)` + /// - One storage mutation (codec `O(T)`), one storage read `O(1)`. + /// - Up to one event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn tip(origin, hash: T::Hash, tip_value: BalanceOf) { + let tipper = ensure_signed(origin)?; + ensure!(T::Tippers::contains(&tipper), BadOrigin); + + let mut tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + if Self::insert_tip_and_check_closing(&mut tip, tipper, tip_value) { + Self::deposit_event(RawEvent::TipClosing(hash.clone())); + } + Tips::::insert(&hash, tip); + } + + /// Close and payout a tip. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// The tip identified by `hash` must have finished its countdown period. + /// + /// - `hash`: The identity of the open tip for which a tip value is declared. This is formed + /// as the hash of the tuple of the original tip `reason` and the beneficiary account ID. + /// + /// # + /// - `O(T)` + /// - One storage retrieval (codec `O(T)`) and two removals. + /// - Up to three balance operations. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(50_000_000)] + fn close_tip(origin, hash: T::Hash) { + ensure_signed(origin)?; + + let tip = Tips::::get(hash).ok_or(Error::::UnknownTip)?; + let n = tip.closes.as_ref().ok_or(Error::::StillOpen)?; + ensure!(system::Module::::block_number() >= *n, Error::::Premature); + // closed. + Reasons::::remove(&tip.reason); + Tips::::remove(hash); + Self::payout_tip(hash, tip); + } + + fn on_initialize(n: T::BlockNumber) -> Weight { + // Check to see if we should spend some funds! + if (n % T::SpendPeriod::get()).is_zero() { + Self::spend_funds(); + } + + MINIMUM_WEIGHT + } + } } impl Module { - // Add public immutables and private mutables. + // Add public immutables and private mutables. - /// The account ID of the treasury pot. - /// - /// This actually does computation. If you need to keep using it, then make sure you cache the - /// value and only call this once. - pub fn account_id() -> T::AccountId { + /// The account ID of the treasury pot. + /// + /// This actually does computation. If you need to keep using it, then make sure you cache the + /// value and only call this once. + pub fn account_id() -> T::AccountId { T::ModuleId::get().into_account() - } - - /// The needed bond for a proposal whose spend is `value`. - fn calculate_bond(value: BalanceOf) -> BalanceOf { - T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value) - } - - /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it - /// closes, if so, then deposit the relevant event and set closing accordingly. - /// - /// `O(T)` and one storage access. - fn insert_tip_and_check_closing( - tip: &mut OpenTip, T::BlockNumber, T::Hash>, - tipper: T::AccountId, - tip_value: BalanceOf, - ) -> bool { - match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { - Ok(pos) => tip.tips[pos] = (tipper, tip_value), - Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), - } - Self::retain_active_tips(&mut tip.tips); - let threshold = (T::Tippers::count() + 1) / 2; - if tip.tips.len() >= threshold && tip.closes.is_none() { - tip.closes = Some(system::Module::::block_number() + T::TipCountdown::get()); - true - } else { - false - } - } - - /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. - fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { - let members = T::Tippers::sorted_members(); - let mut members_iter = members.iter(); - let mut member = members_iter.next(); - tips.retain(|(ref a, _)| loop { - match member { - None => break false, - Some(m) if m > a => break false, - Some(m) => { - member = members_iter.next(); - if m < a { - continue - } else { - break true; - } - } - } - }); - } - - /// Execute the payout of a tip. - /// - /// Up to three balance operations. - /// Plus `O(T)` (`T` is Tippers length). - fn payout_tip(hash: T::Hash, tip: OpenTip, T::BlockNumber, T::Hash>) { - let mut tips = tip.tips; - Self::retain_active_tips(&mut tips); - tips.sort_by_key(|i| i.1); - let treasury = Self::account_id(); - let max_payout = Self::pot(); - let mut payout = tips[tips.len() / 2].1.min(max_payout); - if let Some((finder, deposit)) = tip.finder { - let _ = T::Currency::unreserve(&finder, deposit); - if finder != tip.who { - // pay out the finder's fee. - let finders_fee = T::TipFindersFee::get() * payout; - payout -= finders_fee; - // this should go through given we checked it's at most the free balance, but still - // we only make a best-effort. - let _ = T::Currency::transfer(&treasury, &finder, finders_fee, KeepAlive); - } - } - // same as above: best-effort only. - let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); - Self::deposit_event(RawEvent::TipClosed(hash, tip.who, payout)); - } - - // Spend some money! - fn spend_funds() { - let mut budget_remaining = Self::pot(); - Self::deposit_event(RawEvent::Spending(budget_remaining)); - - let mut missed_any = false; - let mut imbalance = >::zero(); - Approvals::mutate(|v| { - v.retain(|&index| { - // Should always be true, but shouldn't panic if false or we're screwed. - if let Some(p) = Self::proposals(index) { - if p.value <= budget_remaining { - budget_remaining -= p.value; - >::remove(index); - - // return their deposit. - let _ = T::Currency::unreserve(&p.proposer, p.bond); - - // provide the allocation. - imbalance.subsume(T::Currency::deposit_creating(&p.beneficiary, p.value)); - - Self::deposit_event(RawEvent::Awarded(index, p.value, p.beneficiary)); - false - } else { - missed_any = true; - true - } - } else { - false - } - }); - }); - - if !missed_any { - // burn some proportion of the remaining budget if we run a surplus. - let burn = (T::Burn::get() * budget_remaining).min(budget_remaining); - budget_remaining -= burn; - imbalance.subsume(T::Currency::burn(burn)); - Self::deposit_event(RawEvent::Burnt(burn)) - } - - // Must never be an error, but better to be safe. - // proof: budget_remaining is account free balance minus ED; - // Thus we can't spend more than account free balance minus ED; - // Thus account is kept alive; qed; - if let Err(problem) = T::Currency::settle( - &Self::account_id(), - imbalance, - WithdrawReason::Transfer.into(), - KeepAlive - ) { - print("Inconsistent state - couldn't settle imbalance for funds spent by treasury"); - // Nothing else to do here. - drop(problem); - } - - Self::deposit_event(RawEvent::Rollover(budget_remaining)); - } - - /// Return the amount of money in the pot. - // The existential deposit is not part of the pot so treasury account never gets deleted. - fn pot() -> BalanceOf { - T::Currency::free_balance(&Self::account_id()) - // Must never be less than 0 but better be safe. - .saturating_sub(T::Currency::minimum_balance()) - } + } + + /// The needed bond for a proposal whose spend is `value`. + fn calculate_bond(value: BalanceOf) -> BalanceOf { + T::ProposalBondMinimum::get().max(T::ProposalBond::get() * value) + } + + /// Given a mutable reference to an `OpenTip`, insert the tip into it and check whether it + /// closes, if so, then deposit the relevant event and set closing accordingly. + /// + /// `O(T)` and one storage access. + fn insert_tip_and_check_closing( + tip: &mut OpenTip, T::BlockNumber, T::Hash>, + tipper: T::AccountId, + tip_value: BalanceOf, + ) -> bool { + match tip.tips.binary_search_by_key(&&tipper, |x| &x.0) { + Ok(pos) => tip.tips[pos] = (tipper, tip_value), + Err(pos) => tip.tips.insert(pos, (tipper, tip_value)), + } + Self::retain_active_tips(&mut tip.tips); + let threshold = (T::Tippers::count() + 1) / 2; + if tip.tips.len() >= threshold && tip.closes.is_none() { + tip.closes = Some(system::Module::::block_number() + T::TipCountdown::get()); + true + } else { + false + } + } + + /// Remove any non-members of `Tippers` from a `tips` vector. `O(T)`. + fn retain_active_tips(tips: &mut Vec<(T::AccountId, BalanceOf)>) { + let members = T::Tippers::sorted_members(); + let mut members_iter = members.iter(); + let mut member = members_iter.next(); + tips.retain(|(ref a, _)| loop { + match member { + None => break false, + Some(m) if m > a => break false, + Some(m) => { + member = members_iter.next(); + if m < a { + continue; + } else { + break true; + } + } + } + }); + } + + /// Execute the payout of a tip. + /// + /// Up to three balance operations. + /// Plus `O(T)` (`T` is Tippers length). + fn payout_tip( + hash: T::Hash, + tip: OpenTip, T::BlockNumber, T::Hash>, + ) { + let mut tips = tip.tips; + Self::retain_active_tips(&mut tips); + tips.sort_by_key(|i| i.1); + let treasury = Self::account_id(); + let max_payout = Self::pot(); + let mut payout = tips[tips.len() / 2].1.min(max_payout); + if let Some((finder, deposit)) = tip.finder { + let _ = T::Currency::unreserve(&finder, deposit); + if finder != tip.who { + // pay out the finder's fee. + let finders_fee = T::TipFindersFee::get() * payout; + payout -= finders_fee; + // this should go through given we checked it's at most the free balance, but still + // we only make a best-effort. + let _ = T::Currency::transfer(&treasury, &finder, finders_fee, KeepAlive); + } + } + // same as above: best-effort only. + let _ = T::Currency::transfer(&treasury, &tip.who, payout, KeepAlive); + Self::deposit_event(RawEvent::TipClosed(hash, tip.who, payout)); + } + + // Spend some money! + fn spend_funds() { + let mut budget_remaining = Self::pot(); + Self::deposit_event(RawEvent::Spending(budget_remaining)); + + let mut missed_any = false; + let mut imbalance = >::zero(); + Approvals::mutate(|v| { + v.retain(|&index| { + // Should always be true, but shouldn't panic if false or we're screwed. + if let Some(p) = Self::proposals(index) { + if p.value <= budget_remaining { + budget_remaining -= p.value; + >::remove(index); + + // return their deposit. + let _ = T::Currency::unreserve(&p.proposer, p.bond); + + // provide the allocation. + imbalance.subsume(T::Currency::deposit_creating(&p.beneficiary, p.value)); + + Self::deposit_event(RawEvent::Awarded(index, p.value, p.beneficiary)); + false + } else { + missed_any = true; + true + } + } else { + false + } + }); + }); + + if !missed_any { + // burn some proportion of the remaining budget if we run a surplus. + let burn = (T::Burn::get() * budget_remaining).min(budget_remaining); + budget_remaining -= burn; + imbalance.subsume(T::Currency::burn(burn)); + Self::deposit_event(RawEvent::Burnt(burn)) + } + + // Must never be an error, but better to be safe. + // proof: budget_remaining is account free balance minus ED; + // Thus we can't spend more than account free balance minus ED; + // Thus account is kept alive; qed; + if let Err(problem) = T::Currency::settle( + &Self::account_id(), + imbalance, + WithdrawReason::Transfer.into(), + KeepAlive, + ) { + print("Inconsistent state - couldn't settle imbalance for funds spent by treasury"); + // Nothing else to do here. + drop(problem); + } + + Self::deposit_event(RawEvent::Rollover(budget_remaining)); + } + + /// Return the amount of money in the pot. + // The existential deposit is not part of the pot so treasury account never gets deleted. + fn pot() -> BalanceOf { + T::Currency::free_balance(&Self::account_id()) + // Must never be less than 0 but better be safe. + .saturating_sub(T::Currency::minimum_balance()) + } } impl OnUnbalanced> for Module { - fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { - let numeric_amount = amount.peek(); + fn on_nonzero_unbalanced(amount: NegativeImbalanceOf) { + let numeric_amount = amount.peek(); - // Must resolve into existing but better to be safe. - let _ = T::Currency::resolve_creating(&Self::account_id(), amount); + // Must resolve into existing but better to be safe. + let _ = T::Currency::resolve_creating(&Self::account_id(), amount); - Self::deposit_event(RawEvent::Deposit(numeric_amount)); - } + Self::deposit_event(RawEvent::Deposit(numeric_amount)); + } } diff --git a/frame/treasury/src/tests.rs b/frame/treasury/src/tests.rs index 8752ba746b..72c1d39d14 100644 --- a/frame/treasury/src/tests.rs +++ b/frame/treasury/src/tests.rs @@ -19,515 +19,607 @@ #![cfg(test)] use super::*; -use std::cell::RefCell; use frame_support::{ - assert_noop, assert_ok, impl_outer_origin, impl_outer_event, parameter_types, weights::Weight, - traits::{Contains, OnInitialize} + assert_noop, assert_ok, impl_outer_event, impl_outer_origin, parameter_types, + traits::{Contains, OnInitialize}, + weights::Weight, }; use sp_core::H256; use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup, BadOrigin}, + testing::Header, + traits::{BadOrigin, BlakeTwo256, IdentityLookup}, + Perbill, }; +use std::cell::RefCell; impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } - mod treasury { - // Re-export needed for `impl_outer_event!`. - pub use super::super::*; + // Re-export needed for `impl_outer_event!`. + pub use super::super::*; } impl_outer_event! { - pub enum Event for Test { - system, - pallet_balances, - treasury, - } + pub enum Event for Test { + system, + pallet_balances, + treasury, + } } - #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - pub const ExistentialDeposit: u64 = 1; + pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = Event; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u64; + type Event = Event; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } thread_local! { - static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); + static TEN_TO_FOURTEEN: RefCell> = RefCell::new(vec![10,11,12,13,14]); } pub struct TenToFourteen; impl Contains for TenToFourteen { - fn sorted_members() -> Vec { - TEN_TO_FOURTEEN.with(|v| { - v.borrow().clone() - }) - } - #[cfg(feature = "runtime-benchmarks")] - fn add(new: &u64) { - TEN_TO_FOURTEEN.with(|v| { - let mut members = v.borrow_mut(); - members.push(*new); - members.sort(); - }) - } + fn sorted_members() -> Vec { + TEN_TO_FOURTEEN.with(|v| v.borrow().clone()) + } + #[cfg(feature = "runtime-benchmarks")] + fn add(new: &u64) { + TEN_TO_FOURTEEN.with(|v| { + let mut members = v.borrow_mut(); + members.push(*new); + members.sort(); + }) + } } parameter_types! { - pub const ProposalBond: Permill = Permill::from_percent(5); - pub const ProposalBondMinimum: u64 = 1; - pub const SpendPeriod: u64 = 2; - pub const Burn: Permill = Permill::from_percent(50); - pub const TipCountdown: u64 = 1; - pub const TipFindersFee: Percent = Percent::from_percent(20); - pub const TipReportDepositBase: u64 = 1; - pub const TipReportDepositPerByte: u64 = 1; + pub const ProposalBond: Permill = Permill::from_percent(5); + pub const ProposalBondMinimum: u64 = 1; + pub const SpendPeriod: u64 = 2; + pub const Burn: Permill = Permill::from_percent(50); + pub const TipCountdown: u64 = 1; + pub const TipFindersFee: Percent = Percent::from_percent(20); + pub const TipReportDepositBase: u64 = 1; + pub const TipReportDepositPerByte: u64 = 1; } impl Trait for Test { - type Currency = pallet_balances::Module; - type ApproveOrigin = frame_system::EnsureRoot; - type RejectOrigin = frame_system::EnsureRoot; - type Tippers = TenToFourteen; - type TipCountdown = TipCountdown; - type TipFindersFee = TipFindersFee; - type TipReportDepositBase = TipReportDepositBase; - type TipReportDepositPerByte = TipReportDepositPerByte; - type Event = Event; - type ProposalRejection = (); - type ProposalBond = ProposalBond; - type ProposalBondMinimum = ProposalBondMinimum; - type SpendPeriod = SpendPeriod; - type Burn = Burn; + type Currency = pallet_balances::Module; + type ApproveOrigin = frame_system::EnsureRoot; + type RejectOrigin = frame_system::EnsureRoot; + type Tippers = TenToFourteen; + type TipCountdown = TipCountdown; + type TipFindersFee = TipFindersFee; + type TipReportDepositBase = TipReportDepositBase; + type TipReportDepositPerByte = TipReportDepositPerByte; + type Event = Event; + type ProposalRejection = (); + type ProposalBond = ProposalBond; + type ProposalBondMinimum = ProposalBondMinimum; + type SpendPeriod = SpendPeriod; + type Burn = Burn; } type System = frame_system::Module; type Balances = pallet_balances::Module; type Treasury = Module; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - // Total issuance will be 200 with treasury account initialized at ED. - balances: vec![(0, 100), (1, 98), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig::default().assimilate_storage::(&mut t).unwrap(); - t.into() + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + // Total issuance will be 200 with treasury account initialized at ED. + balances: vec![(0, 100), (1, 98), (2, 1)], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisConfig::default() + .assimilate_storage::(&mut t) + .unwrap(); + t.into() } #[test] fn genesis_config_works() { - new_test_ext().execute_with(|| { - assert_eq!(Treasury::pot(), 0); - assert_eq!(Treasury::proposal_count(), 0); - }); + new_test_ext().execute_with(|| { + assert_eq!(Treasury::pot(), 0); + assert_eq!(Treasury::proposal_count(), 0); + }); } fn tip_hash() -> H256 { - BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u64)) + BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 3u64)) } #[test] fn tip_new_cannot_be_used_twice() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - assert_noop!( - Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), - Error::::AlreadyKnown - ); - }); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::tip_new( + Origin::signed(10), + b"awesome.dot".to_vec(), + 3, + 10 + )); + assert_noop!( + Treasury::tip_new(Origin::signed(11), b"awesome.dot".to_vec(), 3, 10), + Error::::AlreadyKnown + ); + }); } #[test] fn report_awesome_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - - // other reports don't count. - assert_noop!( - Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), - Error::::AlreadyKnown - ); - - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::tip(Origin::signed(9), h.clone(), 10), BadOrigin); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 102); - assert_eq!(Balances::free_balance(3), 8); - }); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::report_awesome( + Origin::signed(0), + b"awesome.dot".to_vec(), + 3 + )); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + + // other reports don't count. + assert_noop!( + Treasury::report_awesome(Origin::signed(1), b"awesome.dot".to_vec(), 3), + Error::::AlreadyKnown + ); + + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!(Treasury::tip(Origin::signed(9), h.clone(), 10), BadOrigin); + System::set_block_number(2); + assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 102); + assert_eq!(Balances::free_balance(3), 8); + }); } #[test] fn report_awesome_from_beneficiary_and_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 0)); - assert_eq!(Balances::reserved_balance(0), 12); - assert_eq!(Balances::free_balance(0), 88); - let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u64)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); - assert_eq!(Balances::reserved_balance(0), 0); - assert_eq!(Balances::free_balance(0), 110); - }); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::report_awesome( + Origin::signed(0), + b"awesome.dot".to_vec(), + 0 + )); + assert_eq!(Balances::reserved_balance(0), 12); + assert_eq!(Balances::free_balance(0), 88); + let h = BlakeTwo256::hash_of(&(BlakeTwo256::hash(b"awesome.dot"), 0u64)); + assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(Treasury::close_tip(Origin::signed(100), h.into())); + assert_eq!(Balances::reserved_balance(0), 0); + assert_eq!(Balances::free_balance(0), 110); + }); } #[test] fn close_tip_works() { - new_test_ext().execute_with(|| { - System::set_block_number(1); - - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10)); - - let h = tip_hash(); - - assert_eq!( - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap(), - RawEvent::NewTip(h), - ); - - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::StillOpen); - - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - - assert_eq!( - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap(), - RawEvent::TipClosing(h), - ); - - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::Premature); - - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::NONE, h.into()), BadOrigin); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - - assert_eq!( - System::events().into_iter().map(|r| r.event) - .filter_map(|e| { - if let Event::treasury(inner) = e { Some(inner) } else { None } - }) - .last() - .unwrap(), - RawEvent::TipClosed(h, 3, 10), - ); - - assert_noop!(Treasury::close_tip(Origin::signed(100), h.into()), Error::::UnknownTip); - }); + new_test_ext().execute_with(|| { + System::set_block_number(1); + + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + + assert_ok!(Treasury::tip_new( + Origin::signed(10), + b"awesome.dot".to_vec(), + 3, + 10 + )); + + let h = tip_hash(); + + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let Event::treasury(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::NewTip(h), + ); + + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + + assert_noop!( + Treasury::close_tip(Origin::signed(0), h.into()), + Error::::StillOpen + ); + + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let Event::treasury(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::TipClosing(h), + ); + + assert_noop!( + Treasury::close_tip(Origin::signed(0), h.into()), + Error::::Premature + ); + + System::set_block_number(2); + assert_noop!(Treasury::close_tip(Origin::NONE, h.into()), BadOrigin); + assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + + assert_eq!( + System::events() + .into_iter() + .map(|r| r.event) + .filter_map(|e| { + if let Event::treasury(inner) = e { + Some(inner) + } else { + None + } + }) + .last() + .unwrap(), + RawEvent::TipClosed(h, 3, 10), + ); + + assert_noop!( + Treasury::close_tip(Origin::signed(100), h.into()), + Error::::UnknownTip + ); + }); } #[test] fn retract_tip_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::report_awesome(Origin::signed(0), b"awesome.dot".to_vec(), 3)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); - assert_noop!(Treasury::retract_tip(Origin::signed(10), h.clone()), Error::::NotFinder); - assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); - System::set_block_number(2); - assert_noop!(Treasury::close_tip(Origin::signed(0), h.into()), Error::::UnknownTip); - }); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::report_awesome( + Origin::signed(0), + b"awesome.dot".to_vec(), + 3 + )); + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10)); + assert_noop!( + Treasury::retract_tip(Origin::signed(10), h.clone()), + Error::::NotFinder + ); + assert_ok!(Treasury::retract_tip(Origin::signed(0), h.clone())); + System::set_block_number(2); + assert_noop!( + Treasury::close_tip(Origin::signed(0), h.into()), + Error::::UnknownTip + ); + }); } #[test] fn tip_median_calculation_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 0)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000000)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::tip_new( + Origin::signed(10), + b"awesome.dot".to_vec(), + 3, + 0 + )); + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000000)); + System::set_block_number(2); + assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); } #[test] fn tip_changing_works() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::tip_new(Origin::signed(10), b"awesome.dot".to_vec(), 3, 10000)); - let h = tip_hash(); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10000)); - assert_ok!(Treasury::tip(Origin::signed(13), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(14), h.clone(), 0)); - assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000)); - assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 100)); - assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); - System::set_block_number(2); - assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); - assert_eq!(Balances::free_balance(3), 10); - }); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_ok!(Treasury::tip_new( + Origin::signed(10), + b"awesome.dot".to_vec(), + 3, + 10000 + )); + let h = tip_hash(); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 10000)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 10000)); + assert_ok!(Treasury::tip(Origin::signed(13), h.clone(), 0)); + assert_ok!(Treasury::tip(Origin::signed(14), h.clone(), 0)); + assert_ok!(Treasury::tip(Origin::signed(12), h.clone(), 1000)); + assert_ok!(Treasury::tip(Origin::signed(11), h.clone(), 100)); + assert_ok!(Treasury::tip(Origin::signed(10), h.clone(), 10)); + System::set_block_number(2); + assert_ok!(Treasury::close_tip(Origin::signed(0), h.into())); + assert_eq!(Balances::free_balance(3), 10); + }); } #[test] fn minting_works() { - new_test_ext().execute_with(|| { - // Check that accumulate works when we have Some value in Dummy already. - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - }); + new_test_ext().execute_with(|| { + // Check that accumulate works when we have Some value in Dummy already. + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + }); } #[test] fn spend_proposal_takes_min_deposit() { - new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); - assert_eq!(Balances::free_balance(0), 99); - assert_eq!(Balances::reserved_balance(0), 1); - }); + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_eq!(Balances::free_balance(0), 99); + assert_eq!(Balances::reserved_balance(0), 1); + }); } #[test] fn spend_proposal_takes_proportional_deposit() { - new_test_ext().execute_with(|| { - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_eq!(Balances::free_balance(0), 95); - assert_eq!(Balances::reserved_balance(0), 5); - }); + new_test_ext().execute_with(|| { + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_eq!(Balances::free_balance(0), 95); + assert_eq!(Balances::reserved_balance(0), 5); + }); } #[test] fn spend_proposal_fails_when_proposer_poor() { - new_test_ext().execute_with(|| { - assert_noop!( - Treasury::propose_spend(Origin::signed(2), 100, 3), - Error::::InsufficientProposersBalance, - ); - }); + new_test_ext().execute_with(|| { + assert_noop!( + Treasury::propose_spend(Origin::signed(2), 100, 3), + Error::::InsufficientProposersBalance, + ); + }); } #[test] fn accepted_spend_proposal_ignored_outside_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - >::on_initialize(1); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 100); - }); + >::on_initialize(1); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 100); + }); } #[test] fn unused_pot_should_diminish() { - new_test_ext().execute_with(|| { - let init_total_issuance = Balances::total_issuance(); - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Balances::total_issuance(), init_total_issuance + 100); + new_test_ext().execute_with(|| { + let init_total_issuance = Balances::total_issuance(); + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Balances::total_issuance(), init_total_issuance + 100); - >::on_initialize(2); - assert_eq!(Treasury::pot(), 50); - assert_eq!(Balances::total_issuance(), init_total_issuance + 50); - }); + >::on_initialize(2); + assert_eq!(Treasury::pot(), 50); + assert_eq!(Balances::total_issuance(), init_total_issuance + 50); + }); } #[test] fn rejected_spend_proposal_ignored_on_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - >::on_initialize(2); - assert_eq!(Balances::free_balance(3), 0); - assert_eq!(Treasury::pot(), 50); - }); + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 0); + assert_eq!(Treasury::pot(), 50); + }); } #[test] fn reject_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - assert_noop!(Treasury::reject_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); - }); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + assert_noop!( + Treasury::reject_proposal(Origin::ROOT, 0), + Error::::InvalidProposalIndex + ); + }); } #[test] fn reject_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!(Treasury::reject_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); - }); + new_test_ext().execute_with(|| { + assert_noop!( + Treasury::reject_proposal(Origin::ROOT, 0), + Error::::InvalidProposalIndex + ); + }); } #[test] fn accept_non_existent_spend_proposal_fails() { - new_test_ext().execute_with(|| { - assert_noop!(Treasury::approve_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); - }); + new_test_ext().execute_with(|| { + assert_noop!( + Treasury::approve_proposal(Origin::ROOT, 0), + Error::::InvalidProposalIndex + ); + }); } #[test] fn accept_already_rejected_spend_proposal_fails() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); - assert_noop!(Treasury::approve_proposal(Origin::ROOT, 0), Error::::InvalidProposalIndex); - }); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::reject_proposal(Origin::ROOT, 0)); + assert_noop!( + Treasury::approve_proposal(Origin::ROOT, 0), + Error::::InvalidProposalIndex + ); + }); } #[test] fn accepted_spend_proposal_enacted_on_spend_period() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 100, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - >::on_initialize(2); - assert_eq!(Balances::free_balance(3), 100); - assert_eq!(Treasury::pot(), 0); - }); + >::on_initialize(2); + assert_eq!(Balances::free_balance(3), 100); + assert_eq!(Treasury::pot(), 0); + }); } #[test] fn pot_underflow_should_not_diminish() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 150, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - >::on_initialize(2); - assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap(); - >::on_initialize(4); - assert_eq!(Balances::free_balance(3), 150); // Fund has been spent - assert_eq!(Treasury::pot(), 25); // Pot has finally changed - }); + let _ = Balances::deposit_into_existing(&Treasury::account_id(), 100).unwrap(); + >::on_initialize(4); + assert_eq!(Balances::free_balance(3), 150); // Fund has been spent + assert_eq!(Treasury::pot(), 25); // Pot has finally changed + }); } // Treasury account doesn't get deleted if amount approved to spend is all its free balance. // i.e. pot should not include existential deposit needed for account survival. #[test] fn treasury_account_doesnt_get_deleted() { - new_test_ext().execute_with(|| { - Balances::make_free_balance_be(&Treasury::account_id(), 101); - assert_eq!(Treasury::pot(), 100); - let treasury_balance = Balances::free_balance(&Treasury::account_id()); - - assert_ok!(Treasury::propose_spend(Origin::signed(0), treasury_balance, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - - >::on_initialize(2); - assert_eq!(Treasury::pot(), 100); // Pot hasn't changed - - assert_ok!(Treasury::propose_spend(Origin::signed(0), Treasury::pot(), 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 1)); - - >::on_initialize(4); - assert_eq!(Treasury::pot(), 0); // Pot is emptied - assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there - }); + new_test_ext().execute_with(|| { + Balances::make_free_balance_be(&Treasury::account_id(), 101); + assert_eq!(Treasury::pot(), 100); + let treasury_balance = Balances::free_balance(&Treasury::account_id()); + + assert_ok!(Treasury::propose_spend( + Origin::signed(0), + treasury_balance, + 3 + )); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + + >::on_initialize(2); + assert_eq!(Treasury::pot(), 100); // Pot hasn't changed + + assert_ok!(Treasury::propose_spend( + Origin::signed(0), + Treasury::pot(), + 3 + )); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 1)); + + >::on_initialize(4); + assert_eq!(Treasury::pot(), 0); // Pot is emptied + assert_eq!(Balances::free_balance(Treasury::account_id()), 1); // but the account is still there + }); } // In case treasury account is not existing then it works fine. // This is useful for chain that will just update runtime. #[test] fn inexistent_account_works() { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig::{ - balances: vec![(0, 100), (1, 99), (2, 1)], - }.assimilate_storage(&mut t).unwrap(); - // Treasury genesis config is not build thus treasury account does not exist - let mut t: sp_io::TestExternalities = t.into(); - - t.execute_with(|| { - assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist - assert_eq!(Treasury::pot(), 0); // Pot is empty - - assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); - assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); - assert_ok!(Treasury::approve_proposal(Origin::ROOT, 1)); - >::on_initialize(2); - assert_eq!(Treasury::pot(), 0); // Pot hasn't changed - assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed - - Balances::make_free_balance_be(&Treasury::account_id(), 100); - assert_eq!(Treasury::pot(), 99); // Pot now contains funds - assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist - - >::on_initialize(4); - - assert_eq!(Treasury::pot(), 0); // Pot has changed - assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed - }); + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(0, 100), (1, 99), (2, 1)], + } + .assimilate_storage(&mut t) + .unwrap(); + // Treasury genesis config is not build thus treasury account does not exist + let mut t: sp_io::TestExternalities = t.into(); + + t.execute_with(|| { + assert_eq!(Balances::free_balance(Treasury::account_id()), 0); // Account does not exist + assert_eq!(Treasury::pot(), 0); // Pot is empty + + assert_ok!(Treasury::propose_spend(Origin::signed(0), 99, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 0)); + assert_ok!(Treasury::propose_spend(Origin::signed(0), 1, 3)); + assert_ok!(Treasury::approve_proposal(Origin::ROOT, 1)); + >::on_initialize(2); + assert_eq!(Treasury::pot(), 0); // Pot hasn't changed + assert_eq!(Balances::free_balance(3), 0); // Balance of `3` hasn't changed + + Balances::make_free_balance_be(&Treasury::account_id(), 100); + assert_eq!(Treasury::pot(), 99); // Pot now contains funds + assert_eq!(Balances::free_balance(Treasury::account_id()), 100); // Account does exist + + >::on_initialize(4); + + assert_eq!(Treasury::pot(), 0); // Pot has changed + assert_eq!(Balances::free_balance(3), 99); // Balance of `3` has changed + }); } diff --git a/frame/utility/src/benchmarking.rs b/frame/utility/src/benchmarking.rs index fc8783b49a..0ec55b3b9f 100644 --- a/frame/utility/src/benchmarking.rs +++ b/frame/utility/src/benchmarking.rs @@ -19,151 +19,155 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; +use frame_benchmarking::{account, benchmarks}; use frame_system::RawOrigin; -use frame_benchmarking::{benchmarks, account}; use sp_runtime::traits::Saturating; use crate::Module as Utility; const SEED: u32 = 0; -fn setup_multi(s: u32, z: u32) -> Result<(Vec, Box<::Call>), &'static str>{ - let mut signatories: Vec = Vec::new(); - for i in 0 .. s { - let signatory = account("signatory", i, SEED); - // Give them some balance for a possible deposit - let deposit = T::MultisigDepositBase::get() + T::MultisigDepositFactor::get() * s.into(); - let balance = T::Currency::minimum_balance().saturating_mul(100.into()) + deposit; - T::Currency::make_free_balance_be(&signatory, balance); - signatories.push(signatory); - } - signatories.sort(); - let call: Box<::Call> = Box::new(frame_system::Call::remark(vec![0; z as usize]).into()); - return Ok((signatories, call)) +fn setup_multi( + s: u32, + z: u32, +) -> Result<(Vec, Box<::Call>), &'static str> { + let mut signatories: Vec = Vec::new(); + for i in 0..s { + let signatory = account("signatory", i, SEED); + // Give them some balance for a possible deposit + let deposit = T::MultisigDepositBase::get() + T::MultisigDepositFactor::get() * s.into(); + let balance = T::Currency::minimum_balance().saturating_mul(100.into()) + deposit; + T::Currency::make_free_balance_be(&signatory, balance); + signatories.push(signatory); + } + signatories.sort(); + let call: Box<::Call> = + Box::new(frame_system::Call::remark(vec![0; z as usize]).into()); + return Ok((signatories, call)); } benchmarks! { - _ { } - - batch { - let c in 0 .. 1000; - let mut calls: Vec<::Call> = Vec::new(); - for i in 0 .. c { - let call = frame_system::Call::remark(vec![]).into(); - calls.push(call); - } - let caller = account("caller", 0, SEED); - }: _(RawOrigin::Signed(caller), calls) - - as_sub { - let u in 0 .. 1000; - let caller = account("caller", u, SEED); - let call = Box::new(frame_system::Call::remark(vec![]).into()); - }: _(RawOrigin::Signed(caller), u as u16, call) - - as_multi_create { - // Signatories, need at least 2 total people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call) - - as_multi_approve { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let mut signatories2 = signatories.clone(); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - // before the call, get the timepoint - let timepoint = Utility::::timepoint(); - // Create the multi - Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; - let caller2 = signatories2.remove(0); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call) - - as_multi_complete { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let mut signatories2 = signatories.clone(); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - // before the call, get the timepoint - let timepoint = Utility::::timepoint(); - // Create the multi - Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; - // Everyone except the first person approves - for i in 1 .. s - 1 { - let mut signatories_loop = signatories2.clone(); - let caller_loop = signatories_loop.remove(i as usize); - Utility::::as_multi(RawOrigin::Signed(caller_loop).into(), s as u16, signatories_loop, Some(timepoint), call.clone())?; - } - let caller2 = signatories2.remove(0); - }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call) - - approve_as_multi_create { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); - // Create the multi - }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash) - - approve_as_multi_approve { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let mut signatories2 = signatories.clone(); - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); - // before the call, get the timepoint - let timepoint = Utility::::timepoint(); - // Create the multi - Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; - let caller2 = signatories2.remove(0); - }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash) - - cancel_as_multi { - // Signatories, need at least 2 people - let s in 2 .. T::MaxSignatories::get() as u32; - // Transaction Length - let z in 0 .. 10_000; - let (mut signatories, call) = setup_multi::(s, z)?; - let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; - let call_hash = call.using_encoded(blake2_256); - let timepoint = Utility::::timepoint(); - // Create the multi - Utility::::as_multi(RawOrigin::Signed(caller.clone()).into(), s as u16, signatories.clone(), None, call.clone())?; - }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) + _ { } + + batch { + let c in 0 .. 1000; + let mut calls: Vec<::Call> = Vec::new(); + for i in 0 .. c { + let call = frame_system::Call::remark(vec![]).into(); + calls.push(call); + } + let caller = account("caller", 0, SEED); + }: _(RawOrigin::Signed(caller), calls) + + as_sub { + let u in 0 .. 1000; + let caller = account("caller", u, SEED); + let call = Box::new(frame_system::Call::remark(vec![]).into()); + }: _(RawOrigin::Signed(caller), u as u16, call) + + as_multi_create { + // Signatories, need at least 2 total people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + }: as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call) + + as_multi_approve { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // before the call, get the timepoint + let timepoint = Utility::::timepoint(); + // Create the multi + Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + let caller2 = signatories2.remove(0); + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call) + + as_multi_complete { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + // before the call, get the timepoint + let timepoint = Utility::::timepoint(); + // Create the multi + Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + // Everyone except the first person approves + for i in 1 .. s - 1 { + let mut signatories_loop = signatories2.clone(); + let caller_loop = signatories_loop.remove(i as usize); + Utility::::as_multi(RawOrigin::Signed(caller_loop).into(), s as u16, signatories_loop, Some(timepoint), call.clone())?; + } + let caller2 = signatories2.remove(0); + }: as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call) + + approve_as_multi_create { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + let call_hash = call.using_encoded(blake2_256); + // Create the multi + }: approve_as_multi(RawOrigin::Signed(caller), s as u16, signatories, None, call_hash) + + approve_as_multi_approve { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let mut signatories2 = signatories.clone(); + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + let call_hash = call.using_encoded(blake2_256); + // before the call, get the timepoint + let timepoint = Utility::::timepoint(); + // Create the multi + Utility::::as_multi(RawOrigin::Signed(caller).into(), s as u16, signatories, None, call.clone())?; + let caller2 = signatories2.remove(0); + }: approve_as_multi(RawOrigin::Signed(caller2), s as u16, signatories2, Some(timepoint), call_hash) + + cancel_as_multi { + // Signatories, need at least 2 people + let s in 2 .. T::MaxSignatories::get() as u32; + // Transaction Length + let z in 0 .. 10_000; + let (mut signatories, call) = setup_multi::(s, z)?; + let caller = signatories.pop().ok_or("signatories should have len 2 or more")?; + let call_hash = call.using_encoded(blake2_256); + let timepoint = Utility::::timepoint(); + // Create the multi + Utility::::as_multi(RawOrigin::Signed(caller.clone()).into(), s as u16, signatories.clone(), None, call.clone())?; + }: _(RawOrigin::Signed(caller), s as u16, signatories, timepoint, call_hash) } #[cfg(test)] mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_batch::()); - assert_ok!(test_benchmark_as_sub::()); - assert_ok!(test_benchmark_as_multi_create::()); - assert_ok!(test_benchmark_as_multi_approve::()); - assert_ok!(test_benchmark_as_multi_complete::()); - assert_ok!(test_benchmark_approve_as_multi_create::()); - assert_ok!(test_benchmark_approve_as_multi_approve::()); - assert_ok!(test_benchmark_cancel_as_multi::()); - }); - } + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_batch::()); + assert_ok!(test_benchmark_as_sub::()); + assert_ok!(test_benchmark_as_multi_create::()); + assert_ok!(test_benchmark_as_multi_approve::()); + assert_ok!(test_benchmark_as_multi_complete::()); + assert_ok!(test_benchmark_approve_as_multi_create::()); + assert_ok!(test_benchmark_approve_as_multi_approve::()); + assert_ok!(test_benchmark_cancel_as_multi::()); + }); + } } diff --git a/frame/utility/src/lib.rs b/frame/utility/src/lib.rs index 4a1c36b5ad..775b884385 100644 --- a/frame/utility/src/lib.rs +++ b/frame/utility/src/lib.rs @@ -61,47 +61,54 @@ // Ensure we're `no_std` when compiling for Wasm. #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use sp_core::TypeId; -use sp_io::hashing::blake2_256; -use frame_support::{decl_module, decl_event, decl_error, decl_storage, Parameter, ensure, RuntimeDebug}; -use frame_support::{traits::{Get, ReservableCurrency, Currency}, - weights::{Weight, GetDispatchInfo, DispatchClass, FunctionOf}, - dispatch::PostDispatchInfo, +use codec::{Decode, Encode}; +use frame_support::{ + decl_error, decl_event, decl_module, decl_storage, ensure, Parameter, RuntimeDebug, +}; +use frame_support::{ + dispatch::PostDispatchInfo, + traits::{Currency, Get, ReservableCurrency}, + weights::{DispatchClass, FunctionOf, GetDispatchInfo, Weight}, }; use frame_system::{self as system, ensure_signed}; -use sp_runtime::{DispatchError, DispatchResult, traits::Dispatchable}; +use sp_core::TypeId; +use sp_io::hashing::blake2_256; +use sp_runtime::{traits::Dispatchable, DispatchError, DispatchResult}; +use sp_std::prelude::*; -mod tests; mod benchmarking; +mod tests; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; /// Configuration trait. pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; - - /// The overarching call type. - type Call: Parameter + Dispatchable + GetDispatchInfo + From>; - - /// The currency mechanism. - type Currency: ReservableCurrency; - - /// The base amount of currency needed to reserve for creating a multisig execution. - /// - /// This is held for an additional storage item whose value size is - /// `4 + sizeof((BlockNumber, Balance, AccountId))` bytes. - type MultisigDepositBase: Get>; - - /// The amount of currency needed per unit threshold when creating a multisig execution. - /// - /// This is held for adding 32 bytes more into a pre-existing storage value. - type MultisigDepositFactor: Get>; - - /// The maximum amount of signatories allowed in the multisig. - type MaxSignatories: Get; + /// The overarching event type. + type Event: From> + Into<::Event>; + + /// The overarching call type. + type Call: Parameter + + Dispatchable + + GetDispatchInfo + + From>; + + /// The currency mechanism. + type Currency: ReservableCurrency; + + /// The base amount of currency needed to reserve for creating a multisig execution. + /// + /// This is held for an additional storage item whose value size is + /// `4 + sizeof((BlockNumber, Balance, AccountId))` bytes. + type MultisigDepositBase: Get>; + + /// The amount of currency needed per unit threshold when creating a multisig execution. + /// + /// This is held for adding 32 bytes more into a pre-existing storage value. + type MultisigDepositFactor: Get>; + + /// The maximum amount of signatories allowed in the multisig. + type MaxSignatories: Get; } /// A global extrinsic index, formed as the extrinsic index within a block, together with that @@ -109,88 +116,88 @@ pub trait Trait: frame_system::Trait { /// composite was created to be uniquely identified. #[derive(Copy, Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug)] pub struct Timepoint { - /// The height of the chain at the point in time. - height: BlockNumber, - /// The index of the extrinsic at the point in time. - index: u32, + /// The height of the chain at the point in time. + height: BlockNumber, + /// The index of the extrinsic at the point in time. + index: u32, } /// An open multisig operation. #[derive(Clone, Eq, PartialEq, Encode, Decode, Default, RuntimeDebug)] pub struct Multisig { - /// The extrinsic when the multisig operation was opened. - when: Timepoint, - /// The amount held in reserve of the `depositor`, to be returned once the operation ends. - deposit: Balance, - /// The account who opened it (i.e. the first to approve it). - depositor: AccountId, - /// The approvals achieved so far, including the depositor. Always sorted. - approvals: Vec, + /// The extrinsic when the multisig operation was opened. + when: Timepoint, + /// The amount held in reserve of the `depositor`, to be returned once the operation ends. + deposit: Balance, + /// The account who opened it (i.e. the first to approve it). + depositor: AccountId, + /// The approvals achieved so far, including the depositor. Always sorted. + approvals: Vec, } decl_storage! { - trait Store for Module as Utility { - /// The set of open multisig operations. - pub Multisigs: double_map - hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) [u8; 32] - => Option, T::AccountId>>; - } + trait Store for Module as Utility { + /// The set of open multisig operations. + pub Multisigs: double_map + hasher(twox_64_concat) T::AccountId, hasher(blake2_128_concat) [u8; 32] + => Option, T::AccountId>>; + } } decl_error! { - pub enum Error for Module { - /// Threshold is too low (zero). - ZeroThreshold, - /// Call is already approved by this signatory. - AlreadyApproved, - /// Call doesn't need any (more) approvals. - NoApprovalsNeeded, - /// There are too few signatories in the list. - TooFewSignatories, - /// There are too many signatories in the list. - TooManySignatories, - /// The signatories were provided out of order; they should be ordered. - SignatoriesOutOfOrder, - /// The sender was contained in the other signatories; it shouldn't be. - SenderInSignatories, - /// Multisig operation not found when attempting to cancel. - NotFound, - /// Only the account that originally created the multisig is able to cancel it. - NotOwner, - /// No timepoint was given, yet the multisig operation is already underway. - NoTimepoint, - /// A different timepoint was given to the multisig operation that is underway. - WrongTimepoint, - /// A timepoint was given, yet no multisig operation is underway. - UnexpectedTimepoint, - } + pub enum Error for Module { + /// Threshold is too low (zero). + ZeroThreshold, + /// Call is already approved by this signatory. + AlreadyApproved, + /// Call doesn't need any (more) approvals. + NoApprovalsNeeded, + /// There are too few signatories in the list. + TooFewSignatories, + /// There are too many signatories in the list. + TooManySignatories, + /// The signatories were provided out of order; they should be ordered. + SignatoriesOutOfOrder, + /// The sender was contained in the other signatories; it shouldn't be. + SenderInSignatories, + /// Multisig operation not found when attempting to cancel. + NotFound, + /// Only the account that originally created the multisig is able to cancel it. + NotOwner, + /// No timepoint was given, yet the multisig operation is already underway. + NoTimepoint, + /// A different timepoint was given to the multisig operation that is underway. + WrongTimepoint, + /// A timepoint was given, yet no multisig operation is underway. + UnexpectedTimepoint, + } } decl_event! { - /// Events type. - pub enum Event where - AccountId = ::AccountId, - BlockNumber = ::BlockNumber, - CallHash = [u8; 32] - { - /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as - /// well as the error. - BatchInterrupted(u32, DispatchError), - /// Batch of dispatches completed fully with no error. - BatchCompleted, - /// A new multisig operation has begun. First param is the account that is approving, - /// second is the multisig account, third is hash of the call. - NewMultisig(AccountId, AccountId, CallHash), - /// A multisig operation has been approved by someone. First param is the account that is - /// approving, third is the multisig account, fourth is hash of the call. - MultisigApproval(AccountId, Timepoint, AccountId, CallHash), - /// A multisig operation has been executed. First param is the account that is - /// approving, third is the multisig account, fourth is hash of the call to be executed. - MultisigExecuted(AccountId, Timepoint, AccountId, CallHash, DispatchResult), - /// A multisig operation has been cancelled. First param is the account that is - /// cancelling, third is the multisig account, fourth is hash of the call. - MultisigCancelled(AccountId, Timepoint, AccountId, CallHash), - } + /// Events type. + pub enum Event where + AccountId = ::AccountId, + BlockNumber = ::BlockNumber, + CallHash = [u8; 32] + { + /// Batch of dispatches did not complete fully. Index of first failing dispatch given, as + /// well as the error. + BatchInterrupted(u32, DispatchError), + /// Batch of dispatches completed fully with no error. + BatchCompleted, + /// A new multisig operation has begun. First param is the account that is approving, + /// second is the multisig account, third is hash of the call. + NewMultisig(AccountId, AccountId, CallHash), + /// A multisig operation has been approved by someone. First param is the account that is + /// approving, third is the multisig account, fourth is hash of the call. + MultisigApproval(AccountId, Timepoint, AccountId, CallHash), + /// A multisig operation has been executed. First param is the account that is + /// approving, third is the multisig account, fourth is hash of the call to be executed. + MultisigExecuted(AccountId, Timepoint, AccountId, CallHash, DispatchResult), + /// A multisig operation has been cancelled. First param is the account that is + /// cancelling, third is the multisig account, fourth is hash of the call. + MultisigCancelled(AccountId, Timepoint, AccountId, CallHash), + } } /// A module identifier. These are per module and should be stored in a registry somewhere. @@ -198,377 +205,378 @@ decl_event! { struct IndexedUtilityModuleId(u16); impl TypeId for IndexedUtilityModuleId { - const TYPE_ID: [u8; 4] = *b"suba"; + const TYPE_ID: [u8; 4] = *b"suba"; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// Deposit one of this module's events by using the default implementation. - fn deposit_event() = default; - - /// Send a batch of dispatch calls. - /// - /// This will execute until the first one fails and then stop. - /// - /// May be called from any origin. - /// - /// - `calls`: The calls to be dispatched from the same origin. - /// - /// # - /// - The sum of the weights of the `calls`. - /// - One event. - /// # - /// - /// This will return `Ok` in all circumstances. To determine the success of the batch, an - /// event is deposited. If a call failed and the batch was interrupted, then the - /// `BatchInterrupted` event is deposited, along with the number of successful calls made - /// and the error of the failed call. If all were successful, then the `BatchCompleted` - /// event is deposited. - #[weight = FunctionOf( - |args: (&Vec<::Call>,)| { - args.0.iter() - .map(|call| call.get_dispatch_info().weight) - .fold(10_000, |a, n| a + n) - }, - |args: (&Vec<::Call>,)| { - let all_operational = args.0.iter() - .map(|call| call.get_dispatch_info().class) - .all(|class| class == DispatchClass::Operational); - if all_operational { - DispatchClass::Operational - } else { - DispatchClass::Normal - } - }, - true - )] - fn batch(origin, calls: Vec<::Call>) { - for (index, call) in calls.into_iter().enumerate() { - let result = call.dispatch(origin.clone()); - if let Err(e) = result { - Self::deposit_event(Event::::BatchInterrupted(index as u32, e.error)); - return Ok(()); - } - } - Self::deposit_event(Event::::BatchCompleted); - } - - /// Send a call through an indexed pseudonym of the sender. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// # - /// - The weight of the `call` + 10,000. - /// # - #[weight = FunctionOf( - |args: (&u16, &Box<::Call>)| args.1.get_dispatch_info().weight + 10_000, - |args: (&u16, &Box<::Call>)| args.1.get_dispatch_info().class, - true - )] - fn as_sub(origin, index: u16, call: Box<::Call>) -> DispatchResult { - let who = ensure_signed(origin)?; - let pseudonym = Self::sub_account_id(who, index); - call.dispatch(frame_system::RawOrigin::Signed(pseudonym).into()) - .map(|_| ()).map_err(|e| e.error) - } - - /// Register approval for a dispatch to be made from a deterministic composite account if - /// approved by a total of `threshold - 1` of `other_signatories`. - /// - /// If there are enough, then dispatch the call. - /// - /// Payment: `MultisigDepositBase` will be reserved if this is the first approval, plus - /// `threshold` times `MultisigDepositFactor`. It is returned once this dispatch happens or - /// is cancelled. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// - `threshold`: The total number of approvals for this dispatch before it is executed. - /// - `other_signatories`: The accounts (other than the sender) who can approve this - /// dispatch. May not be empty. - /// - `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is - /// not the first approval, then it must be `Some`, with the timepoint (block number and - /// transaction index) of the first approval transaction. - /// - `call`: The call to be executed. - /// - /// NOTE: Unless this is the final approval, you will generally want to use - /// `approve_as_multi` instead, since it only requires a hash of the call. - /// - /// Result is equivalent to the dispatched result if `threshold` is exactly `1`. Otherwise - /// on success, result is `Ok` and the result from the interior call, if it was executed, - /// may be found in the deposited `MultisigExecuted` event. - /// - /// # - /// - `O(S + Z + Call)`. - /// - Up to one balance-reserve or unreserve operation. - /// - One passthrough operation, one insert, both `O(S)` where `S` is the number of - /// signatories. `S` is capped by `MaxSignatories`, with weight being proportional. - /// - One call encode & hash, both of complexity `O(Z)` where `Z` is tx-len. - /// - One encode & hash, both of complexity `O(S)`. - /// - Up to one binary search and insert (`O(logS + S)`). - /// - I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove. - /// - One event. - /// - The weight of the `call`. - /// - Storage: inserts one item, value size bounded by `MaxSignatories`, with a - /// deposit taken for its lifetime of - /// `MultisigDepositBase + threshold * MultisigDepositFactor`. - /// # - #[weight = FunctionOf( - |args: (&u16, &Vec, &Option>, &Box<::Call>)| { - args.3.get_dispatch_info().weight + 10_000 * (args.1.len() as Weight + 1) - }, - |args: (&u16, &Vec, &Option>, &Box<::Call>)| { - args.3.get_dispatch_info().class - }, - true - )] - fn as_multi(origin, - threshold: u16, - other_signatories: Vec, - maybe_timepoint: Option>, - call: Box<::Call>, - ) -> DispatchResult { - let who = ensure_signed(origin)?; - ensure!(threshold >= 1, Error::::ZeroThreshold); - let max_sigs = T::MaxSignatories::get() as usize; - ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); - ensure!(other_signatories.len() < max_sigs, Error::::TooManySignatories); - let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; - - let id = Self::multi_account_id(&signatories, threshold); - let call_hash = call.using_encoded(blake2_256); - - if let Some(mut m) = >::get(&id, call_hash) { - let timepoint = maybe_timepoint.ok_or(Error::::NoTimepoint)?; - ensure!(m.when == timepoint, Error::::WrongTimepoint); - if let Err(pos) = m.approvals.binary_search(&who) { - // we know threshold is greater than zero from the above ensure. - if (m.approvals.len() as u16) < threshold - 1 { - m.approvals.insert(pos, who.clone()); - >::insert(&id, call_hash, m); - Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); - return Ok(()) - } - } else { - if (m.approvals.len() as u16) < threshold { - Err(Error::::AlreadyApproved)? - } - } - - let result = call.dispatch(frame_system::RawOrigin::Signed(id.clone()).into()); - let _ = T::Currency::unreserve(&m.depositor, m.deposit); - >::remove(&id, call_hash); - Self::deposit_event(RawEvent::MultisigExecuted( - who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) - )); - } else { - ensure!(maybe_timepoint.is_none(), Error::::UnexpectedTimepoint); - if threshold > 1 { - let deposit = T::MultisigDepositBase::get() - + T::MultisigDepositFactor::get() * threshold.into(); - T::Currency::reserve(&who, deposit)?; - >::insert(&id, call_hash, Multisig { - when: Self::timepoint(), - deposit, - depositor: who.clone(), - approvals: vec![who.clone()], - }); - Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); - } else { - return call.dispatch(frame_system::RawOrigin::Signed(id).into()) - .map(|_| ()).map_err(|e| e.error) - } - } - Ok(()) - } - - /// Register approval for a dispatch to be made from a deterministic composite account if - /// approved by a total of `threshold - 1` of `other_signatories`. - /// - /// Payment: `MultisigDepositBase` will be reserved if this is the first approval, plus - /// `threshold` times `MultisigDepositFactor`. It is returned once this dispatch happens or - /// is cancelled. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// - `threshold`: The total number of approvals for this dispatch before it is executed. - /// - `other_signatories`: The accounts (other than the sender) who can approve this - /// dispatch. May not be empty. - /// - `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is - /// not the first approval, then it must be `Some`, with the timepoint (block number and - /// transaction index) of the first approval transaction. - /// - `call_hash`: The hash of the call to be executed. - /// - /// NOTE: If this is the final approval, you will want to use `as_multi` instead. - /// - /// # - /// - `O(S)`. - /// - Up to one balance-reserve or unreserve operation. - /// - One passthrough operation, one insert, both `O(S)` where `S` is the number of - /// signatories. `S` is capped by `MaxSignatories`, with weight being proportional. - /// - One encode & hash, both of complexity `O(S)`. - /// - Up to one binary search and insert (`O(logS + S)`). - /// - I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove. - /// - One event. - /// - Storage: inserts one item, value size bounded by `MaxSignatories`, with a - /// deposit taken for its lifetime of - /// `MultisigDepositBase + threshold * MultisigDepositFactor`. - /// # - #[weight = FunctionOf( - |args: (&u16, &Vec, &Option>, &[u8; 32])| { - 10_000 * (args.1.len() as Weight + 1) - }, - DispatchClass::Normal, - true - )] - fn approve_as_multi(origin, - threshold: u16, - other_signatories: Vec, - maybe_timepoint: Option>, - call_hash: [u8; 32], - ) -> DispatchResult { - let who = ensure_signed(origin)?; - ensure!(threshold >= 1, Error::::ZeroThreshold); - let max_sigs = T::MaxSignatories::get() as usize; - ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); - ensure!(other_signatories.len() < max_sigs, Error::::TooManySignatories); - let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; - - let id = Self::multi_account_id(&signatories, threshold); - - if let Some(mut m) = >::get(&id, call_hash) { - let timepoint = maybe_timepoint.ok_or(Error::::NoTimepoint)?; - ensure!(m.when == timepoint, Error::::WrongTimepoint); - ensure!(m.approvals.len() < threshold as usize, Error::::NoApprovalsNeeded); - if let Err(pos) = m.approvals.binary_search(&who) { - m.approvals.insert(pos, who.clone()); - >::insert(&id, call_hash, m); - Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); - } else { - Err(Error::::AlreadyApproved)? - } - } else { - if threshold > 1 { - ensure!(maybe_timepoint.is_none(), Error::::UnexpectedTimepoint); - let deposit = T::MultisigDepositBase::get() - + T::MultisigDepositFactor::get() * threshold.into(); - T::Currency::reserve(&who, deposit)?; - >::insert(&id, call_hash, Multisig { - when: Self::timepoint(), - deposit, - depositor: who.clone(), - approvals: vec![who.clone()], - }); - Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); - } else { - Err(Error::::NoApprovalsNeeded)? - } - } - Ok(()) - } - - /// Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously - /// for this operation will be unreserved on success. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// - `threshold`: The total number of approvals for this dispatch before it is executed. - /// - `other_signatories`: The accounts (other than the sender) who can approve this - /// dispatch. May not be empty. - /// - `timepoint`: The timepoint (block number and transaction index) of the first approval - /// transaction for this dispatch. - /// - `call_hash`: The hash of the call to be executed. - /// - /// # - /// - `O(S)`. - /// - Up to one balance-reserve or unreserve operation. - /// - One passthrough operation, one insert, both `O(S)` where `S` is the number of - /// signatories. `S` is capped by `MaxSignatories`, with weight being proportional. - /// - One encode & hash, both of complexity `O(S)`. - /// - One event. - /// - I/O: 1 read `O(S)`, one remove. - /// - Storage: removes one item. - /// # - #[weight = FunctionOf( - |args: (&u16, &Vec, &Timepoint, &[u8; 32])| { - 10_000 * (args.1.len() as Weight + 1) - }, - DispatchClass::Normal, - true - )] - fn cancel_as_multi(origin, - threshold: u16, - other_signatories: Vec, - timepoint: Timepoint, - call_hash: [u8; 32], - ) -> DispatchResult { - let who = ensure_signed(origin)?; - ensure!(threshold >= 1, Error::::ZeroThreshold); - let max_sigs = T::MaxSignatories::get() as usize; - ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); - ensure!(other_signatories.len() < max_sigs, Error::::TooManySignatories); - let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; - - let id = Self::multi_account_id(&signatories, threshold); - - let m = >::get(&id, call_hash) - .ok_or(Error::::NotFound)?; - ensure!(m.when == timepoint, Error::::WrongTimepoint); - ensure!(m.depositor == who, Error::::NotOwner); - - let _ = T::Currency::unreserve(&m.depositor, m.deposit); - >::remove(&id, call_hash); - - Self::deposit_event(RawEvent::MultisigCancelled(who, timepoint, id, call_hash)); - Ok(()) - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + /// Deposit one of this module's events by using the default implementation. + fn deposit_event() = default; + + /// Send a batch of dispatch calls. + /// + /// This will execute until the first one fails and then stop. + /// + /// May be called from any origin. + /// + /// - `calls`: The calls to be dispatched from the same origin. + /// + /// # + /// - The sum of the weights of the `calls`. + /// - One event. + /// # + /// + /// This will return `Ok` in all circumstances. To determine the success of the batch, an + /// event is deposited. If a call failed and the batch was interrupted, then the + /// `BatchInterrupted` event is deposited, along with the number of successful calls made + /// and the error of the failed call. If all were successful, then the `BatchCompleted` + /// event is deposited. + #[weight = FunctionOf( + |args: (&Vec<::Call>,)| { + args.0.iter() + .map(|call| call.get_dispatch_info().weight) + .fold(10_000, |a, n| a + n) + }, + |args: (&Vec<::Call>,)| { + let all_operational = args.0.iter() + .map(|call| call.get_dispatch_info().class) + .all(|class| class == DispatchClass::Operational); + if all_operational { + DispatchClass::Operational + } else { + DispatchClass::Normal + } + }, + true + )] + fn batch(origin, calls: Vec<::Call>) { + for (index, call) in calls.into_iter().enumerate() { + let result = call.dispatch(origin.clone()); + if let Err(e) = result { + Self::deposit_event(Event::::BatchInterrupted(index as u32, e.error)); + return Ok(()); + } + } + Self::deposit_event(Event::::BatchCompleted); + } + + /// Send a call through an indexed pseudonym of the sender. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// # + /// - The weight of the `call` + 10,000. + /// # + #[weight = FunctionOf( + |args: (&u16, &Box<::Call>)| args.1.get_dispatch_info().weight + 10_000, + |args: (&u16, &Box<::Call>)| args.1.get_dispatch_info().class, + true + )] + fn as_sub(origin, index: u16, call: Box<::Call>) -> DispatchResult { + let who = ensure_signed(origin)?; + let pseudonym = Self::sub_account_id(who, index); + call.dispatch(frame_system::RawOrigin::Signed(pseudonym).into()) + .map(|_| ()).map_err(|e| e.error) + } + + /// Register approval for a dispatch to be made from a deterministic composite account if + /// approved by a total of `threshold - 1` of `other_signatories`. + /// + /// If there are enough, then dispatch the call. + /// + /// Payment: `MultisigDepositBase` will be reserved if this is the first approval, plus + /// `threshold` times `MultisigDepositFactor`. It is returned once this dispatch happens or + /// is cancelled. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `threshold`: The total number of approvals for this dispatch before it is executed. + /// - `other_signatories`: The accounts (other than the sender) who can approve this + /// dispatch. May not be empty. + /// - `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is + /// not the first approval, then it must be `Some`, with the timepoint (block number and + /// transaction index) of the first approval transaction. + /// - `call`: The call to be executed. + /// + /// NOTE: Unless this is the final approval, you will generally want to use + /// `approve_as_multi` instead, since it only requires a hash of the call. + /// + /// Result is equivalent to the dispatched result if `threshold` is exactly `1`. Otherwise + /// on success, result is `Ok` and the result from the interior call, if it was executed, + /// may be found in the deposited `MultisigExecuted` event. + /// + /// # + /// - `O(S + Z + Call)`. + /// - Up to one balance-reserve or unreserve operation. + /// - One passthrough operation, one insert, both `O(S)` where `S` is the number of + /// signatories. `S` is capped by `MaxSignatories`, with weight being proportional. + /// - One call encode & hash, both of complexity `O(Z)` where `Z` is tx-len. + /// - One encode & hash, both of complexity `O(S)`. + /// - Up to one binary search and insert (`O(logS + S)`). + /// - I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove. + /// - One event. + /// - The weight of the `call`. + /// - Storage: inserts one item, value size bounded by `MaxSignatories`, with a + /// deposit taken for its lifetime of + /// `MultisigDepositBase + threshold * MultisigDepositFactor`. + /// # + #[weight = FunctionOf( + |args: (&u16, &Vec, &Option>, &Box<::Call>)| { + args.3.get_dispatch_info().weight + 10_000 * (args.1.len() as Weight + 1) + }, + |args: (&u16, &Vec, &Option>, &Box<::Call>)| { + args.3.get_dispatch_info().class + }, + true + )] + fn as_multi(origin, + threshold: u16, + other_signatories: Vec, + maybe_timepoint: Option>, + call: Box<::Call>, + ) -> DispatchResult { + let who = ensure_signed(origin)?; + ensure!(threshold >= 1, Error::::ZeroThreshold); + let max_sigs = T::MaxSignatories::get() as usize; + ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); + ensure!(other_signatories.len() < max_sigs, Error::::TooManySignatories); + let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; + + let id = Self::multi_account_id(&signatories, threshold); + let call_hash = call.using_encoded(blake2_256); + + if let Some(mut m) = >::get(&id, call_hash) { + let timepoint = maybe_timepoint.ok_or(Error::::NoTimepoint)?; + ensure!(m.when == timepoint, Error::::WrongTimepoint); + if let Err(pos) = m.approvals.binary_search(&who) { + // we know threshold is greater than zero from the above ensure. + if (m.approvals.len() as u16) < threshold - 1 { + m.approvals.insert(pos, who.clone()); + >::insert(&id, call_hash, m); + Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); + return Ok(()) + } + } else { + if (m.approvals.len() as u16) < threshold { + Err(Error::::AlreadyApproved)? + } + } + + let result = call.dispatch(frame_system::RawOrigin::Signed(id.clone()).into()); + let _ = T::Currency::unreserve(&m.depositor, m.deposit); + >::remove(&id, call_hash); + Self::deposit_event(RawEvent::MultisigExecuted( + who, timepoint, id, call_hash, result.map(|_| ()).map_err(|e| e.error) + )); + } else { + ensure!(maybe_timepoint.is_none(), Error::::UnexpectedTimepoint); + if threshold > 1 { + let deposit = T::MultisigDepositBase::get() + + T::MultisigDepositFactor::get() * threshold.into(); + T::Currency::reserve(&who, deposit)?; + >::insert(&id, call_hash, Multisig { + when: Self::timepoint(), + deposit, + depositor: who.clone(), + approvals: vec![who.clone()], + }); + Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); + } else { + return call.dispatch(frame_system::RawOrigin::Signed(id).into()) + .map(|_| ()).map_err(|e| e.error) + } + } + Ok(()) + } + + /// Register approval for a dispatch to be made from a deterministic composite account if + /// approved by a total of `threshold - 1` of `other_signatories`. + /// + /// Payment: `MultisigDepositBase` will be reserved if this is the first approval, plus + /// `threshold` times `MultisigDepositFactor`. It is returned once this dispatch happens or + /// is cancelled. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `threshold`: The total number of approvals for this dispatch before it is executed. + /// - `other_signatories`: The accounts (other than the sender) who can approve this + /// dispatch. May not be empty. + /// - `maybe_timepoint`: If this is the first approval, then this must be `None`. If it is + /// not the first approval, then it must be `Some`, with the timepoint (block number and + /// transaction index) of the first approval transaction. + /// - `call_hash`: The hash of the call to be executed. + /// + /// NOTE: If this is the final approval, you will want to use `as_multi` instead. + /// + /// # + /// - `O(S)`. + /// - Up to one balance-reserve or unreserve operation. + /// - One passthrough operation, one insert, both `O(S)` where `S` is the number of + /// signatories. `S` is capped by `MaxSignatories`, with weight being proportional. + /// - One encode & hash, both of complexity `O(S)`. + /// - Up to one binary search and insert (`O(logS + S)`). + /// - I/O: 1 read `O(S)`, up to 1 mutate `O(S)`. Up to one remove. + /// - One event. + /// - Storage: inserts one item, value size bounded by `MaxSignatories`, with a + /// deposit taken for its lifetime of + /// `MultisigDepositBase + threshold * MultisigDepositFactor`. + /// # + #[weight = FunctionOf( + |args: (&u16, &Vec, &Option>, &[u8; 32])| { + 10_000 * (args.1.len() as Weight + 1) + }, + DispatchClass::Normal, + true + )] + fn approve_as_multi(origin, + threshold: u16, + other_signatories: Vec, + maybe_timepoint: Option>, + call_hash: [u8; 32], + ) -> DispatchResult { + let who = ensure_signed(origin)?; + ensure!(threshold >= 1, Error::::ZeroThreshold); + let max_sigs = T::MaxSignatories::get() as usize; + ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); + ensure!(other_signatories.len() < max_sigs, Error::::TooManySignatories); + let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; + + let id = Self::multi_account_id(&signatories, threshold); + + if let Some(mut m) = >::get(&id, call_hash) { + let timepoint = maybe_timepoint.ok_or(Error::::NoTimepoint)?; + ensure!(m.when == timepoint, Error::::WrongTimepoint); + ensure!(m.approvals.len() < threshold as usize, Error::::NoApprovalsNeeded); + if let Err(pos) = m.approvals.binary_search(&who) { + m.approvals.insert(pos, who.clone()); + >::insert(&id, call_hash, m); + Self::deposit_event(RawEvent::MultisigApproval(who, timepoint, id, call_hash)); + } else { + Err(Error::::AlreadyApproved)? + } + } else { + if threshold > 1 { + ensure!(maybe_timepoint.is_none(), Error::::UnexpectedTimepoint); + let deposit = T::MultisigDepositBase::get() + + T::MultisigDepositFactor::get() * threshold.into(); + T::Currency::reserve(&who, deposit)?; + >::insert(&id, call_hash, Multisig { + when: Self::timepoint(), + deposit, + depositor: who.clone(), + approvals: vec![who.clone()], + }); + Self::deposit_event(RawEvent::NewMultisig(who, id, call_hash)); + } else { + Err(Error::::NoApprovalsNeeded)? + } + } + Ok(()) + } + + /// Cancel a pre-existing, on-going multisig transaction. Any deposit reserved previously + /// for this operation will be unreserved on success. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `threshold`: The total number of approvals for this dispatch before it is executed. + /// - `other_signatories`: The accounts (other than the sender) who can approve this + /// dispatch. May not be empty. + /// - `timepoint`: The timepoint (block number and transaction index) of the first approval + /// transaction for this dispatch. + /// - `call_hash`: The hash of the call to be executed. + /// + /// # + /// - `O(S)`. + /// - Up to one balance-reserve or unreserve operation. + /// - One passthrough operation, one insert, both `O(S)` where `S` is the number of + /// signatories. `S` is capped by `MaxSignatories`, with weight being proportional. + /// - One encode & hash, both of complexity `O(S)`. + /// - One event. + /// - I/O: 1 read `O(S)`, one remove. + /// - Storage: removes one item. + /// # + #[weight = FunctionOf( + |args: (&u16, &Vec, &Timepoint, &[u8; 32])| { + 10_000 * (args.1.len() as Weight + 1) + }, + DispatchClass::Normal, + true + )] + fn cancel_as_multi(origin, + threshold: u16, + other_signatories: Vec, + timepoint: Timepoint, + call_hash: [u8; 32], + ) -> DispatchResult { + let who = ensure_signed(origin)?; + ensure!(threshold >= 1, Error::::ZeroThreshold); + let max_sigs = T::MaxSignatories::get() as usize; + ensure!(!other_signatories.is_empty(), Error::::TooFewSignatories); + ensure!(other_signatories.len() < max_sigs, Error::::TooManySignatories); + let signatories = Self::ensure_sorted_and_insert(other_signatories, who.clone())?; + + let id = Self::multi_account_id(&signatories, threshold); + + let m = >::get(&id, call_hash) + .ok_or(Error::::NotFound)?; + ensure!(m.when == timepoint, Error::::WrongTimepoint); + ensure!(m.depositor == who, Error::::NotOwner); + + let _ = T::Currency::unreserve(&m.depositor, m.deposit); + >::remove(&id, call_hash); + + Self::deposit_event(RawEvent::MultisigCancelled(who, timepoint, id, call_hash)); + Ok(()) + } + } } impl Module { - /// Derive a sub-account ID from the owner account and the sub-account index. - pub fn sub_account_id(who: T::AccountId, index: u16) -> T::AccountId { - let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); - T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() - } - - /// Derive a multi-account ID from the sorted list of accounts and the threshold that are - /// required. - /// - /// NOTE: `who` must be sorted. If it is not, then you'll get the wrong answer. - pub fn multi_account_id(who: &[T::AccountId], threshold: u16) -> T::AccountId { - let entropy = (b"modlpy/utilisuba", who, threshold).using_encoded(blake2_256); - T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() - } - - /// The current `Timepoint`. - pub fn timepoint() -> Timepoint { - Timepoint { - height: >::block_number(), - index: >::extrinsic_index().unwrap_or_default(), - } - } - - /// Check that signatories is sorted and doesn't contain sender, then insert sender. - fn ensure_sorted_and_insert(other_signatories: Vec, who: T::AccountId) - -> Result, DispatchError> - { - let mut signatories = other_signatories; - let mut maybe_last = None; - let mut index = 0; - for item in signatories.iter() { - if let Some(last) = maybe_last { - ensure!(last < item, Error::::SignatoriesOutOfOrder); - } - if item <= &who { - ensure!(item != &who, Error::::SenderInSignatories); - index += 1; - } - maybe_last = Some(item); - } - signatories.insert(index, who); - Ok(signatories) - } + /// Derive a sub-account ID from the owner account and the sub-account index. + pub fn sub_account_id(who: T::AccountId, index: u16) -> T::AccountId { + let entropy = (b"modlpy/utilisuba", who, index).using_encoded(blake2_256); + T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() + } + + /// Derive a multi-account ID from the sorted list of accounts and the threshold that are + /// required. + /// + /// NOTE: `who` must be sorted. If it is not, then you'll get the wrong answer. + pub fn multi_account_id(who: &[T::AccountId], threshold: u16) -> T::AccountId { + let entropy = (b"modlpy/utilisuba", who, threshold).using_encoded(blake2_256); + T::AccountId::decode(&mut &entropy[..]).unwrap_or_default() + } + + /// The current `Timepoint`. + pub fn timepoint() -> Timepoint { + Timepoint { + height: >::block_number(), + index: >::extrinsic_index().unwrap_or_default(), + } + } + + /// Check that signatories is sorted and doesn't contain sender, then insert sender. + fn ensure_sorted_and_insert( + other_signatories: Vec, + who: T::AccountId, + ) -> Result, DispatchError> { + let mut signatories = other_signatories; + let mut maybe_last = None; + let mut index = 0; + for item in signatories.iter() { + if let Some(last) = maybe_last { + ensure!(last < item, Error::::SignatoriesOutOfOrder); + } + if item <= &who { + ensure!(item != &who, Error::::SenderInSignatories); + index += 1; + } + maybe_last = Some(item); + } + signatories.insert(index, who); + Ok(signatories) + } } diff --git a/frame/utility/src/tests.rs b/frame/utility/src/tests.rs index 1b26bb5d5b..89b6f76436 100644 --- a/frame/utility/src/tests.rs +++ b/frame/utility/src/tests.rs @@ -20,31 +20,35 @@ use super::*; +use crate as utility; use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, impl_outer_dispatch, - weights::Weight, impl_outer_event + assert_noop, assert_ok, impl_outer_dispatch, impl_outer_event, impl_outer_origin, + parameter_types, weights::Weight, }; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; -use crate as utility; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} + pub enum Origin for Test where system = frame_system {} } impl_outer_event! { - pub enum TestEvent for Test { - system, - pallet_balances, - utility, - } + pub enum TestEvent for Test { + system, + pallet_balances, + utility, + } } impl_outer_dispatch! { - pub enum Call for Test where origin: Origin { - frame_system::System, - pallet_balances::Balances, - utility::Utility, - } + pub enum Call for Test where origin: Origin { + frame_system::System, + pallet_balances::Balances, + utility::Utility, + } } // For testing the pallet, we construct most of a mock runtime. This means @@ -53,55 +57,55 @@ impl_outer_dispatch! { #[derive(Clone, Eq, PartialEq)] pub struct Test; parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); } impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = Call; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = TestEvent; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = Call; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = TestEvent; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); } parameter_types! { - pub const ExistentialDeposit: u64 = 1; + pub const ExistentialDeposit: u64 = 1; } impl pallet_balances::Trait for Test { - type Balance = u64; - type Event = TestEvent; - type DustRemoval = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; + type Balance = u64; + type Event = TestEvent; + type DustRemoval = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; } parameter_types! { - pub const MultisigDepositBase: u64 = 1; - pub const MultisigDepositFactor: u64 = 1; - pub const MaxSignatories: u16 = 3; + pub const MultisigDepositBase: u64 = 1; + pub const MultisigDepositFactor: u64 = 1; + pub const MaxSignatories: u16 = 3; } impl Trait for Test { - type Event = TestEvent; - type Call = Call; - type Currency = Balances; - type MultisigDepositBase = MultisigDepositBase; - type MultisigDepositFactor = MultisigDepositFactor; - type MaxSignatories = MaxSignatories; + type Event = TestEvent; + type Call = Call; + type Currency = Balances; + type MultisigDepositBase = MultisigDepositBase; + type MultisigDepositFactor = MultisigDepositFactor; + type MaxSignatories = MaxSignatories; } type System = frame_system::Module; type Balances = pallet_balances::Module; @@ -111,333 +115,512 @@ use pallet_balances::Call as BalancesCall; use pallet_balances::Error as BalancesError; pub fn new_test_ext() -> sp_io::TestExternalities { - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 10)], - }.assimilate_storage(&mut t).unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![(1, 10), (2, 10), (3, 10), (4, 10), (5, 10)], + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext } fn last_event() -> TestEvent { - system::Module::::events().pop().map(|e| e.event).expect("Event expected") + system::Module::::events() + .pop() + .map(|e| e.event) + .expect("Event expected") } fn expect_event>(e: E) { - assert_eq!(last_event(), e.into()); + assert_eq!(last_event(), e.into()); } fn now() -> Timepoint { - Utility::timepoint() + Utility::timepoint() } #[test] fn multisig_deposit_is_taken_and_returned() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_eq!(Balances::free_balance(1), 2); - assert_eq!(Balances::reserved_balance(1), 3); - - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::reserved_balance(1), 0); - }); + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_ok!(Utility::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + call.clone() + )); + assert_eq!(Balances::free_balance(1), 2); + assert_eq!(Balances::reserved_balance(1), 3); + + assert_ok!(Utility::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + call + )); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::reserved_balance(1), 0); + }); } #[test] fn cancel_multisig_returns_deposit() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); - assert_eq!(Balances::free_balance(1), 6); - assert_eq!(Balances::reserved_balance(1), 4); - assert_ok!( - Utility::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::reserved_balance(1), 0); - }); + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone() + )); + assert_ok!(Utility::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone() + )); + assert_eq!(Balances::free_balance(1), 6); + assert_eq!(Balances::reserved_balance(1), 4); + assert_ok!(Utility::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::reserved_balance(1), 0); + }); } #[test] fn timepoint_checking_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - - assert_noop!( - Utility::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone()), - Error::::UnexpectedTimepoint, - ); - - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash)); - - assert_noop!( - Utility::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone()), - Error::::NoTimepoint, - ); - let later = Timepoint { index: 1, .. now() }; - assert_noop!( - Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(later), call.clone()), - Error::::WrongTimepoint, - ); - }); + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + + assert_noop!( + Utility::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone()), + Error::::UnexpectedTimepoint, + ); + + assert_ok!(Utility::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash + )); + + assert_noop!( + Utility::as_multi(Origin::signed(2), 2, vec![1, 3], None, call.clone()), + Error::::NoTimepoint, + ); + let later = Timepoint { index: 1, ..now() }; + assert_noop!( + Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(later), call.clone()), + Error::::WrongTimepoint, + ); + }); } #[test] fn multisig_2_of_3_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash)); - assert_eq!(Balances::free_balance(6), 0); - - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); - assert_eq!(Balances::free_balance(6), 15); - }); + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash + )); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Utility::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + call + )); + assert_eq!(Balances::free_balance(6), 15); + }); } #[test] fn multisig_3_of_3_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 3); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); - assert_eq!(Balances::free_balance(6), 0); - - assert_ok!(Utility::as_multi(Origin::signed(3), 3, vec![1, 2], Some(now()), call)); - assert_eq!(Balances::free_balance(6), 15); - }); + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 3); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone() + )); + assert_ok!(Utility::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone() + )); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Utility::as_multi( + Origin::signed(3), + 3, + vec![1, 2], + Some(now()), + call + )); + assert_eq!(Balances::free_balance(6), 15); + }); } #[test] fn cancel_multisig_works() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 3, vec![2, 3], None, hash.clone())); - assert_ok!(Utility::approve_as_multi(Origin::signed(2), 3, vec![1, 3], Some(now()), hash.clone())); - assert_noop!( - Utility::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), - Error::::NotOwner, - ); - assert_ok!( - Utility::cancel_as_multi(Origin::signed(1), 3, vec![2, 3], now(), hash.clone()), - ); - }); + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + None, + hash.clone() + )); + assert_ok!(Utility::approve_as_multi( + Origin::signed(2), + 3, + vec![1, 3], + Some(now()), + hash.clone() + )); + assert_noop!( + Utility::cancel_as_multi(Origin::signed(2), 3, vec![1, 3], now(), hash.clone()), + Error::::NotOwner, + ); + assert_ok!(Utility::cancel_as_multi( + Origin::signed(1), + 3, + vec![2, 3], + now(), + hash.clone() + ),); + }); } #[test] fn multisig_2_of_3_as_multi_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_eq!(Balances::free_balance(6), 0); - - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call)); - assert_eq!(Balances::free_balance(6), 15); - }); + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_ok!(Utility::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + call.clone() + )); + assert_eq!(Balances::free_balance(6), 0); + + assert_ok!(Utility::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + call + )); + assert_eq!(Balances::free_balance(6), 15); + }); } #[test] fn multisig_2_of_3_as_multi_with_many_calls_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call1 = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); - let call2 = Box::new(Call::Balances(BalancesCall::transfer(7, 5))); - - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call1.clone())); - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], None, call2.clone())); - assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call2)); - assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call1)); - - assert_eq!(Balances::free_balance(6), 10); - assert_eq!(Balances::free_balance(7), 5); - }); + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call1 = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); + let call2 = Box::new(Call::Balances(BalancesCall::transfer(7, 5))); + + assert_ok!(Utility::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + call1.clone() + )); + assert_ok!(Utility::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + None, + call2.clone() + )); + assert_ok!(Utility::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + call2 + )); + assert_ok!(Utility::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + call1 + )); + + assert_eq!(Balances::free_balance(6), 10); + assert_eq!(Balances::free_balance(7), 5); + }); } #[test] fn multisig_2_of_3_cannot_reissue_same_call() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_ok!(Utility::as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), call.clone())); - assert_eq!(Balances::free_balance(multi), 5); - - assert_ok!(Utility::as_multi(Origin::signed(1), 2, vec![2, 3], None, call.clone())); - assert_ok!(Utility::as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), call.clone())); - - let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); - expect_event(RawEvent::MultisigExecuted(3, now(), multi, call.using_encoded(blake2_256), Err(err))); - }); + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 2); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 10))); + assert_ok!(Utility::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + call.clone() + )); + assert_ok!(Utility::as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + call.clone() + )); + assert_eq!(Balances::free_balance(multi), 5); + + assert_ok!(Utility::as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + call.clone() + )); + assert_ok!(Utility::as_multi( + Origin::signed(3), + 2, + vec![1, 2], + Some(now()), + call.clone() + )); + + let err = DispatchError::from(BalancesError::::InsufficientBalance).stripped(); + expect_event(RawEvent::MultisigExecuted( + 3, + now(), + multi, + call.using_encoded(blake2_256), + Err(err), + )); + }); } #[test] fn zero_threshold_fails() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_noop!( - Utility::as_multi(Origin::signed(1), 0, vec![2], None, call), - Error::::ZeroThreshold, - ); - }); + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_noop!( + Utility::as_multi(Origin::signed(1), 0, vec![2], None, call), + Error::::ZeroThreshold, + ); + }); } #[test] fn too_many_signatories_fails() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - assert_noop!( - Utility::as_multi(Origin::signed(1), 2, vec![2, 3, 4], None, call.clone()), - Error::::TooManySignatories, - ); - }); + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + assert_noop!( + Utility::as_multi(Origin::signed(1), 2, vec![2, 3, 4], None, call.clone()), + Error::::TooManySignatories, + ); + }); } #[test] fn duplicate_approvals_are_ignored() { - new_test_ext().execute_with(|| { - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_ok!(Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], None, hash.clone())); - assert_noop!( - Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash.clone()), - Error::::AlreadyApproved, - ); - assert_ok!(Utility::approve_as_multi(Origin::signed(2), 2, vec![1, 3], Some(now()), hash.clone())); - assert_noop!( - Utility::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash.clone()), - Error::::NoApprovalsNeeded, - ); - }); + new_test_ext().execute_with(|| { + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_ok!(Utility::approve_as_multi( + Origin::signed(1), + 2, + vec![2, 3], + None, + hash.clone() + )); + assert_noop!( + Utility::approve_as_multi(Origin::signed(1), 2, vec![2, 3], Some(now()), hash.clone()), + Error::::AlreadyApproved, + ); + assert_ok!(Utility::approve_as_multi( + Origin::signed(2), + 2, + vec![1, 3], + Some(now()), + hash.clone() + )); + assert_noop!( + Utility::approve_as_multi(Origin::signed(3), 2, vec![1, 2], Some(now()), hash.clone()), + Error::::NoApprovalsNeeded, + ); + }); } #[test] fn multisig_1_of_3_works() { - new_test_ext().execute_with(|| { - let multi = Utility::multi_account_id(&[1, 2, 3][..], 1); - assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); - assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); - - let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); - let hash = call.using_encoded(blake2_256); - assert_noop!( - Utility::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone()), - Error::::NoApprovalsNeeded, - ); - assert_noop!( - Utility::as_multi(Origin::signed(4), 1, vec![2, 3], None, call.clone()), - BalancesError::::InsufficientBalance, - ); - assert_ok!(Utility::as_multi(Origin::signed(1), 1, vec![2, 3], None, call)); - - assert_eq!(Balances::free_balance(6), 15); - }); + new_test_ext().execute_with(|| { + let multi = Utility::multi_account_id(&[1, 2, 3][..], 1); + assert_ok!(Balances::transfer(Origin::signed(1), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(2), multi, 5)); + assert_ok!(Balances::transfer(Origin::signed(3), multi, 5)); + + let call = Box::new(Call::Balances(BalancesCall::transfer(6, 15))); + let hash = call.using_encoded(blake2_256); + assert_noop!( + Utility::approve_as_multi(Origin::signed(1), 1, vec![2, 3], None, hash.clone()), + Error::::NoApprovalsNeeded, + ); + assert_noop!( + Utility::as_multi(Origin::signed(4), 1, vec![2, 3], None, call.clone()), + BalancesError::::InsufficientBalance, + ); + assert_ok!(Utility::as_multi( + Origin::signed(1), + 1, + vec![2, 3], + None, + call + )); + + assert_eq!(Balances::free_balance(6), 15); + }); } #[test] fn as_sub_works() { - new_test_ext().execute_with(|| { - let sub_1_0 = Utility::sub_account_id(1, 0); - assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); - assert_noop!(Utility::as_sub( - Origin::signed(1), - 1, - Box::new(Call::Balances(BalancesCall::transfer(6, 3))), - ), BalancesError::::InsufficientBalance); - assert_ok!(Utility::as_sub( - Origin::signed(1), - 0, - Box::new(Call::Balances(BalancesCall::transfer(2, 3))), - )); - assert_eq!(Balances::free_balance(sub_1_0), 2); - assert_eq!(Balances::free_balance(2), 13); - }); + new_test_ext().execute_with(|| { + let sub_1_0 = Utility::sub_account_id(1, 0); + assert_ok!(Balances::transfer(Origin::signed(1), sub_1_0, 5)); + assert_noop!( + Utility::as_sub( + Origin::signed(1), + 1, + Box::new(Call::Balances(BalancesCall::transfer(6, 3))), + ), + BalancesError::::InsufficientBalance + ); + assert_ok!(Utility::as_sub( + Origin::signed(1), + 0, + Box::new(Call::Balances(BalancesCall::transfer(2, 3))), + )); + assert_eq!(Balances::free_balance(sub_1_0), 2); + assert_eq!(Balances::free_balance(2), 13); + }); } #[test] fn batch_with_root_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 10); - assert_ok!(Utility::batch(Origin::ROOT, vec![ - Call::Balances(BalancesCall::force_transfer(1, 2, 5)), - Call::Balances(BalancesCall::force_transfer(1, 2, 5)) - ])); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::free_balance(2), 20); - }); + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch( + Origin::ROOT, + vec![ + Call::Balances(BalancesCall::force_transfer(1, 2, 5)), + Call::Balances(BalancesCall::force_transfer(1, 2, 5)) + ] + )); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + }); } #[test] fn batch_with_signed_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch(Origin::signed(1), vec![ - Call::Balances(BalancesCall::transfer(2, 5)), - Call::Balances(BalancesCall::transfer(2, 5)) - ]), - ); - assert_eq!(Balances::free_balance(1), 0); - assert_eq!(Balances::free_balance(2), 20); - }); + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch( + Origin::signed(1), + vec![ + Call::Balances(BalancesCall::transfer(2, 5)), + Call::Balances(BalancesCall::transfer(2, 5)) + ] + ),); + assert_eq!(Balances::free_balance(1), 0); + assert_eq!(Balances::free_balance(2), 20); + }); } #[test] fn batch_early_exit_works() { - new_test_ext().execute_with(|| { - assert_eq!(Balances::free_balance(1), 10); - assert_eq!(Balances::free_balance(2), 10); - assert_ok!( - Utility::batch(Origin::signed(1), vec![ - Call::Balances(BalancesCall::transfer(2, 5)), - Call::Balances(BalancesCall::transfer(2, 10)), - Call::Balances(BalancesCall::transfer(2, 5)), - ]), - ); - assert_eq!(Balances::free_balance(1), 5); - assert_eq!(Balances::free_balance(2), 15); - }); + new_test_ext().execute_with(|| { + assert_eq!(Balances::free_balance(1), 10); + assert_eq!(Balances::free_balance(2), 10); + assert_ok!(Utility::batch( + Origin::signed(1), + vec![ + Call::Balances(BalancesCall::transfer(2, 5)), + Call::Balances(BalancesCall::transfer(2, 10)), + Call::Balances(BalancesCall::transfer(2, 5)), + ] + ),); + assert_eq!(Balances::free_balance(1), 5); + assert_eq!(Balances::free_balance(2), 15); + }); } diff --git a/frame/vesting/src/benchmarking.rs b/frame/vesting/src/benchmarking.rs index be2cb4cb2b..efd4812b63 100644 --- a/frame/vesting/src/benchmarking.rs +++ b/frame/vesting/src/benchmarking.rs @@ -20,9 +20,9 @@ use super::*; -use frame_system::{RawOrigin, Module as System}; +use frame_benchmarking::{account, benchmarks}; +use frame_system::{Module as System, RawOrigin}; use sp_io::hashing::blake2_256; -use frame_benchmarking::{benchmarks, account}; use crate::Module as Vesting; @@ -30,115 +30,117 @@ const SEED: u32 = 0; const MAX_LOCKS: u32 = 20; fn add_locks(l: u32) { - for id in 0..l { - let lock_id = <[u8; 8]>::decode(&mut &id.using_encoded(blake2_256)[..]) - .unwrap_or_default(); - let locker = account("locker", 0, SEED); - let locked = 1; - let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; - T::Currency::set_lock(lock_id, &locker, locked.into(), reasons); - } + for id in 0..l { + let lock_id = <[u8; 8]>::decode(&mut &id.using_encoded(blake2_256)[..]).unwrap_or_default(); + let locker = account("locker", 0, SEED); + let locked = 1; + let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; + T::Currency::set_lock(lock_id, &locker, locked.into(), reasons); + } } fn setup(b: u32) -> T::AccountId { - let locked = 1; - let per_block = 1; - let starting_block = 0; - - let caller = account("caller", 0, SEED); - System::::set_block_number(0.into()); - - // Add schedule to avoid `NotVesting` error. - let _ = Vesting::::add_vesting_schedule( - &caller, - locked.into(), - per_block.into(), - starting_block.into(), - ); - - // Set lock and block number to take different code paths. - let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; - T::Currency::set_lock(VESTING_ID, &caller, locked.into(), reasons); - System::::set_block_number(b.into()); - - caller + let locked = 1; + let per_block = 1; + let starting_block = 0; + + let caller = account("caller", 0, SEED); + System::::set_block_number(0.into()); + + // Add schedule to avoid `NotVesting` error. + let _ = Vesting::::add_vesting_schedule( + &caller, + locked.into(), + per_block.into(), + starting_block.into(), + ); + + // Set lock and block number to take different code paths. + let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; + T::Currency::set_lock(VESTING_ID, &caller, locked.into(), reasons); + System::::set_block_number(b.into()); + + caller } benchmarks! { - _ { - // Number of previous locks. - // It doesn't seems to influence the timings for lower values. - let l in 0 .. MAX_LOCKS => add_locks::(l); - } + _ { + // Number of previous locks. + // It doesn't seems to influence the timings for lower values. + let l in 0 .. MAX_LOCKS => add_locks::(l); + } - vest_locked { - let l in ...; + vest_locked { + let l in ...; - let caller = setup::(0u32); + let caller = setup::(0u32); - }: vest(RawOrigin::Signed(caller)) + }: vest(RawOrigin::Signed(caller)) - vest_not_locked { - let l in ...; + vest_not_locked { + let l in ...; - let caller = setup::(1u32); + let caller = setup::(1u32); - }: vest(RawOrigin::Signed(caller)) + }: vest(RawOrigin::Signed(caller)) - vest_other_locked { - let l in ...; + vest_other_locked { + let l in ...; - let other: T::AccountId = setup::(0u32); - let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); + let other: T::AccountId = setup::(0u32); + let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); - let caller = account("caller", 0, SEED); + let caller = account("caller", 0, SEED); - }: vest_other(RawOrigin::Signed(caller), other_lookup) + }: vest_other(RawOrigin::Signed(caller), other_lookup) - vest_other_not_locked { - let l in ...; + vest_other_not_locked { + let l in ...; - let other: T::AccountId = setup::(1u32); - let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); + let other: T::AccountId = setup::(1u32); + let other_lookup: ::Source = T::Lookup::unlookup(other.clone()); - let caller = account("caller", 0, SEED); + let caller = account("caller", 0, SEED); - }: vest_other(RawOrigin::Signed(caller), other_lookup) + }: vest_other(RawOrigin::Signed(caller), other_lookup) - vested_transfer { - let u in 0 .. 1000; + vested_transfer { + let u in 0 .. 1000; - let from = account("from", u, SEED); - let to = account("to", u, SEED); - let to_lookup: ::Source = T::Lookup::unlookup(to); + let from = account("from", u, SEED); + let to = account("to", u, SEED); + let to_lookup: ::Source = T::Lookup::unlookup(to); - let transfer_amount = T::MinVestedTransfer::get(); + let transfer_amount = T::MinVestedTransfer::get(); - let vesting_schedule = VestingInfo { - locked: transfer_amount, - per_block: 1.into(), - starting_block: 0.into(), - }; + let vesting_schedule = VestingInfo { + locked: transfer_amount, + per_block: 1.into(), + starting_block: 0.into(), + }; - let _ = T::Currency::make_free_balance_be(&from, transfer_amount * 10.into()); + let _ = T::Currency::make_free_balance_be(&from, transfer_amount * 10.into()); - }: _(RawOrigin::Signed(from), to_lookup, vesting_schedule) + }: _(RawOrigin::Signed(from), to_lookup, vesting_schedule) } #[cfg(test)] mod tests { - use super::*; - use crate::tests::{ExtBuilder, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - ExtBuilder::default().existential_deposit(256).build().execute_with(|| { - assert_ok!(test_benchmark_vest_locked::()); - assert_ok!(test_benchmark_vest_not_locked::()); - assert_ok!(test_benchmark_vest_other_locked::()); - assert_ok!(test_benchmark_vest_other_not_locked::()); - assert_ok!(test_benchmark_vested_transfer::()); - }); - } + use super::*; + use crate::tests::{ExtBuilder, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + assert_ok!(test_benchmark_vest_locked::()); + assert_ok!(test_benchmark_vest_not_locked::()); + assert_ok!(test_benchmark_vest_other_locked::()); + assert_ok!(test_benchmark_vest_other_not_locked::()); + assert_ok!(test_benchmark_vested_transfer::()); + }); + } } diff --git a/frame/vesting/src/lib.rs b/frame/vesting/src/lib.rs index 85545d92b0..8a0537319f 100644 --- a/frame/vesting/src/lib.rs +++ b/frame/vesting/src/lib.rs @@ -46,36 +46,38 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use sp_std::fmt::Debug; -use codec::{Encode, Decode}; -use sp_runtime::{DispatchResult, RuntimeDebug, traits::{ - StaticLookup, Zero, AtLeast32Bit, MaybeSerializeDeserialize, Convert -}}; -use frame_support::{decl_module, decl_event, decl_storage, decl_error, ensure}; +use codec::{Decode, Encode}; use frame_support::traits::{ - Currency, LockableCurrency, VestingSchedule, WithdrawReason, LockIdentifier, - ExistenceRequirement, Get + Currency, ExistenceRequirement, Get, LockIdentifier, LockableCurrency, VestingSchedule, + WithdrawReason, }; use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; +use frame_support::{decl_error, decl_event, decl_module, decl_storage, ensure}; use frame_system::{self as system, ensure_signed}; +use sp_runtime::{ + traits::{AtLeast32Bit, Convert, MaybeSerializeDeserialize, StaticLookup, Zero}, + DispatchResult, RuntimeDebug, +}; +use sp_std::fmt::Debug; +use sp_std::prelude::*; mod benchmarking; -type BalanceOf = <::Currency as Currency<::AccountId>>::Balance; +type BalanceOf = + <::Currency as Currency<::AccountId>>::Balance; pub trait Trait: frame_system::Trait { - /// The overarching event type. - type Event: From> + Into<::Event>; + /// The overarching event type. + type Event: From> + Into<::Event>; - /// The currency trait. - type Currency: LockableCurrency; + /// The currency trait. + type Currency: LockableCurrency; - /// Convert the block number into a balance. - type BlockNumberToBalance: Convert>; + /// Convert the block number into a balance. + type BlockNumberToBalance: Convert>; - /// The minimum amount transferred to call `vested_transfer`. - type MinVestedTransfer: Get>; + /// The minimum amount transferred to call `vested_transfer`. + type MinVestedTransfer: Get>; } const VESTING_ID: LockIdentifier = *b"vesting "; @@ -83,624 +85,650 @@ const VESTING_ID: LockIdentifier = *b"vesting "; /// Struct to encode the vesting schedule of an individual account. #[derive(Encode, Decode, Copy, Clone, PartialEq, Eq, RuntimeDebug)] pub struct VestingInfo { - /// Locked amount at genesis. - pub locked: Balance, - /// Amount that gets unlocked every block after `starting_block`. - pub per_block: Balance, - /// Starting block for unlocking(vesting). - pub starting_block: BlockNumber, + /// Locked amount at genesis. + pub locked: Balance, + /// Amount that gets unlocked every block after `starting_block`. + pub per_block: Balance, + /// Starting block for unlocking(vesting). + pub starting_block: BlockNumber, } -impl< - Balance: AtLeast32Bit + Copy, - BlockNumber: AtLeast32Bit + Copy, -> VestingInfo { - /// Amount locked at block `n`. - pub fn locked_at< - BlockNumberToBalance: Convert - >(&self, n: BlockNumber) -> Balance { - // Number of blocks that count toward vesting - // Saturating to 0 when n < starting_block - let vested_block_count = n.saturating_sub(self.starting_block); - let vested_block_count = BlockNumberToBalance::convert(vested_block_count); - // Return amount that is still locked in vesting - let maybe_balance = vested_block_count.checked_mul(&self.per_block); - if let Some(balance) = maybe_balance { - self.locked.saturating_sub(balance) - } else { - Zero::zero() - } - } +impl + VestingInfo +{ + /// Amount locked at block `n`. + pub fn locked_at>( + &self, + n: BlockNumber, + ) -> Balance { + // Number of blocks that count toward vesting + // Saturating to 0 when n < starting_block + let vested_block_count = n.saturating_sub(self.starting_block); + let vested_block_count = BlockNumberToBalance::convert(vested_block_count); + // Return amount that is still locked in vesting + let maybe_balance = vested_block_count.checked_mul(&self.per_block); + if let Some(balance) = maybe_balance { + self.locked.saturating_sub(balance) + } else { + Zero::zero() + } + } } decl_storage! { - trait Store for Module as Vesting { - /// Information regarding the vesting of a given account. - pub Vesting get(fn vesting): - map hasher(blake2_128_concat) T::AccountId - => Option, T::BlockNumber>>; - } - add_extra_genesis { - config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, BalanceOf)>; - build(|config: &GenesisConfig| { - use sp_runtime::traits::Saturating; - // Generate initial vesting configuration - // * who - Account which we are generating vesting configuration for - // * begin - Block when the account will start to vest - // * length - Number of blocks from `begin` until fully vested - // * liquid - Number of units which can be spent before vesting begins - for &(ref who, begin, length, liquid) in config.vesting.iter() { - let balance = T::Currency::free_balance(who); - assert!(!balance.is_zero(), "Currencies must be init'd before vesting"); - // Total genesis `balance` minus `liquid` equals funds locked for vesting - let locked = balance.saturating_sub(liquid); - let length_as_balance = T::BlockNumberToBalance::convert(length); - let per_block = locked / length_as_balance.max(sp_runtime::traits::One::one()); - - Vesting::::insert(who, VestingInfo { - locked: locked, - per_block: per_block, - starting_block: begin - }); - let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; - T::Currency::set_lock(VESTING_ID, who, locked, reasons); - } - }) - } + trait Store for Module as Vesting { + /// Information regarding the vesting of a given account. + pub Vesting get(fn vesting): + map hasher(blake2_128_concat) T::AccountId + => Option, T::BlockNumber>>; + } + add_extra_genesis { + config(vesting): Vec<(T::AccountId, T::BlockNumber, T::BlockNumber, BalanceOf)>; + build(|config: &GenesisConfig| { + use sp_runtime::traits::Saturating; + // Generate initial vesting configuration + // * who - Account which we are generating vesting configuration for + // * begin - Block when the account will start to vest + // * length - Number of blocks from `begin` until fully vested + // * liquid - Number of units which can be spent before vesting begins + for &(ref who, begin, length, liquid) in config.vesting.iter() { + let balance = T::Currency::free_balance(who); + assert!(!balance.is_zero(), "Currencies must be init'd before vesting"); + // Total genesis `balance` minus `liquid` equals funds locked for vesting + let locked = balance.saturating_sub(liquid); + let length_as_balance = T::BlockNumberToBalance::convert(length); + let per_block = locked / length_as_balance.max(sp_runtime::traits::One::one()); + + Vesting::::insert(who, VestingInfo { + locked: locked, + per_block: per_block, + starting_block: begin + }); + let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; + T::Currency::set_lock(VESTING_ID, who, locked, reasons); + } + }) + } } decl_event!( - pub enum Event where AccountId = ::AccountId, Balance = BalanceOf { - /// The amount vested has been updated. This could indicate more funds are available. The - /// balance given is the amount which is left unvested (and thus locked). - VestingUpdated(AccountId, Balance), - /// An account (given) has become fully vested. No further vesting can happen. - VestingCompleted(AccountId), - } + pub enum Event + where + AccountId = ::AccountId, + Balance = BalanceOf, + { + /// The amount vested has been updated. This could indicate more funds are available. The + /// balance given is the amount which is left unvested (and thus locked). + VestingUpdated(AccountId, Balance), + /// An account (given) has become fully vested. No further vesting can happen. + VestingCompleted(AccountId), + } ); decl_error! { - /// Error for the vesting module. - pub enum Error for Module { - /// The account given is not vesting. - NotVesting, - /// An existing vesting schedule already exists for this account that cannot be clobbered. - ExistingVestingSchedule, - /// Amount being transferred is too low to create a vesting schedule. - AmountLow, - } + /// Error for the vesting module. + pub enum Error for Module { + /// The account given is not vesting. + NotVesting, + /// An existing vesting schedule already exists for this account that cannot be clobbered. + ExistingVestingSchedule, + /// Amount being transferred is too low to create a vesting schedule. + AmountLow, + } } decl_module! { - // Simple declaration of the `Module` type. Lets the macro know what it's working on. - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// The minimum amount to be transferred to create a new vesting schedule. - const MinVestedTransfer: BalanceOf = T::MinVestedTransfer::get(); - - fn deposit_event() = default; - - /// Unlock any vested funds of the sender account. - /// - /// The dispatch origin for this call must be _Signed_ and the sender must have funds still - /// locked under this module. - /// - /// Emits either `VestingCompleted` or `VestingUpdated`. - /// - /// # - /// - `O(1)`. - /// - One balance-lock operation. - /// - One storage read (codec `O(1)`) and up to one removal. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn vest(origin) -> DispatchResult { - let who = ensure_signed(origin)?; - Self::update_lock(who) - } - - /// Unlock any vested funds of a `target` account. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// - `target`: The account whose vested funds should be unlocked. Must have funds still - /// locked under this module. - /// - /// Emits either `VestingCompleted` or `VestingUpdated`. - /// - /// # - /// - `O(1)`. - /// - Up to one account lookup. - /// - One balance-lock operation. - /// - One storage read (codec `O(1)`) and up to one removal. - /// - One event. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn vest_other(origin, target: ::Source) -> DispatchResult { - ensure_signed(origin)?; - Self::update_lock(T::Lookup::lookup(target)?) - } - - /// Create a vested transfer. - /// - /// The dispatch origin for this call must be _Signed_. - /// - /// - `target`: The account that should be transferred the vested funds. - /// - `amount`: The amount of funds to transfer and will be vested. - /// - `schedule`: The vesting schedule attached to the transfer. - /// - /// Emits `VestingCreated`. - /// - /// # - /// - Creates a new storage entry, but is protected by a minimum transfer - /// amount needed to succeed. - /// # - #[weight = SimpleDispatchInfo::FixedNormal(1_000_000_000)] - pub fn vested_transfer( - origin, - target: ::Source, - schedule: VestingInfo, T::BlockNumber>, - ) -> DispatchResult { - let transactor = ensure_signed(origin)?; - ensure!(schedule.locked >= T::MinVestedTransfer::get(), Error::::AmountLow); - - let who = T::Lookup::lookup(target)?; - ensure!(!Vesting::::contains_key(&who), Error::::ExistingVestingSchedule); - - T::Currency::transfer(&transactor, &who, schedule.locked, ExistenceRequirement::AllowDeath)?; - - Self::add_vesting_schedule(&who, schedule.locked, schedule.per_block, schedule.starting_block) - .expect("user does not have an existing vesting schedule; q.e.d."); - - Ok(()) - } - } + // Simple declaration of the `Module` type. Lets the macro know what it's working on. + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + /// The minimum amount to be transferred to create a new vesting schedule. + const MinVestedTransfer: BalanceOf = T::MinVestedTransfer::get(); + + fn deposit_event() = default; + + /// Unlock any vested funds of the sender account. + /// + /// The dispatch origin for this call must be _Signed_ and the sender must have funds still + /// locked under this module. + /// + /// Emits either `VestingCompleted` or `VestingUpdated`. + /// + /// # + /// - `O(1)`. + /// - One balance-lock operation. + /// - One storage read (codec `O(1)`) and up to one removal. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn vest(origin) -> DispatchResult { + let who = ensure_signed(origin)?; + Self::update_lock(who) + } + + /// Unlock any vested funds of a `target` account. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `target`: The account whose vested funds should be unlocked. Must have funds still + /// locked under this module. + /// + /// Emits either `VestingCompleted` or `VestingUpdated`. + /// + /// # + /// - `O(1)`. + /// - Up to one account lookup. + /// - One balance-lock operation. + /// - One storage read (codec `O(1)`) and up to one removal. + /// - One event. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn vest_other(origin, target: ::Source) -> DispatchResult { + ensure_signed(origin)?; + Self::update_lock(T::Lookup::lookup(target)?) + } + + /// Create a vested transfer. + /// + /// The dispatch origin for this call must be _Signed_. + /// + /// - `target`: The account that should be transferred the vested funds. + /// - `amount`: The amount of funds to transfer and will be vested. + /// - `schedule`: The vesting schedule attached to the transfer. + /// + /// Emits `VestingCreated`. + /// + /// # + /// - Creates a new storage entry, but is protected by a minimum transfer + /// amount needed to succeed. + /// # + #[weight = SimpleDispatchInfo::FixedNormal(1_000_000_000)] + pub fn vested_transfer( + origin, + target: ::Source, + schedule: VestingInfo, T::BlockNumber>, + ) -> DispatchResult { + let transactor = ensure_signed(origin)?; + ensure!(schedule.locked >= T::MinVestedTransfer::get(), Error::::AmountLow); + + let who = T::Lookup::lookup(target)?; + ensure!(!Vesting::::contains_key(&who), Error::::ExistingVestingSchedule); + + T::Currency::transfer(&transactor, &who, schedule.locked, ExistenceRequirement::AllowDeath)?; + + Self::add_vesting_schedule(&who, schedule.locked, schedule.per_block, schedule.starting_block) + .expect("user does not have an existing vesting schedule; q.e.d."); + + Ok(()) + } + } } impl Module { - /// (Re)set or remove the module's currency lock on `who`'s account in accordance with their - /// current unvested amount. - fn update_lock(who: T::AccountId) -> DispatchResult { - let vesting = Self::vesting(&who).ok_or(Error::::NotVesting)?; - let now = >::block_number(); - let locked_now = vesting.locked_at::(now); - - if locked_now.is_zero() { - T::Currency::remove_lock(VESTING_ID, &who); - Vesting::::remove(&who); - Self::deposit_event(RawEvent::VestingCompleted(who)); - } else { - let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; - T::Currency::set_lock(VESTING_ID, &who, locked_now, reasons); - Self::deposit_event(RawEvent::VestingUpdated(who, locked_now)); - } - Ok(()) - } + /// (Re)set or remove the module's currency lock on `who`'s account in accordance with their + /// current unvested amount. + fn update_lock(who: T::AccountId) -> DispatchResult { + let vesting = Self::vesting(&who).ok_or(Error::::NotVesting)?; + let now = >::block_number(); + let locked_now = vesting.locked_at::(now); + + if locked_now.is_zero() { + T::Currency::remove_lock(VESTING_ID, &who); + Vesting::::remove(&who); + Self::deposit_event(RawEvent::VestingCompleted(who)); + } else { + let reasons = WithdrawReason::Transfer | WithdrawReason::Reserve; + T::Currency::set_lock(VESTING_ID, &who, locked_now, reasons); + Self::deposit_event(RawEvent::VestingUpdated(who, locked_now)); + } + Ok(()) + } } -impl VestingSchedule for Module where - BalanceOf: MaybeSerializeDeserialize + Debug +impl VestingSchedule for Module +where + BalanceOf: MaybeSerializeDeserialize + Debug, { - type Moment = T::BlockNumber; - type Currency = T::Currency; - - /// Get the amount that is currently being vested and cannot be transferred out of this account. - fn vesting_balance(who: &T::AccountId) -> Option> { - if let Some(v) = Self::vesting(who) { - let now = >::block_number(); - let locked_now = v.locked_at::(now); - Some(T::Currency::free_balance(who).min(locked_now)) - } else { - None - } - } - - /// Adds a vesting schedule to a given account. - /// - /// If there already exists a vesting schedule for the given account, an `Err` is returned - /// and nothing is updated. - /// - /// On success, a linearly reducing amount of funds will be locked. In order to realise any - /// reduction of the lock over time as it diminishes, the account owner must use `vest` or - /// `vest_other`. - /// - /// Is a no-op if the amount to be vested is zero. - fn add_vesting_schedule( - who: &T::AccountId, - locked: BalanceOf, - per_block: BalanceOf, - starting_block: T::BlockNumber - ) -> DispatchResult { - if locked.is_zero() { return Ok(()) } - if Vesting::::contains_key(who) { - Err(Error::::ExistingVestingSchedule)? - } - let vesting_schedule = VestingInfo { - locked, - per_block, - starting_block - }; - Vesting::::insert(who, vesting_schedule); - // it can't fail, but even if somehow it did, we don't really care. - let _ = Self::update_lock(who.clone()); - Ok(()) - } - - /// Remove a vesting schedule for a given account. - fn remove_vesting_schedule(who: &T::AccountId) { - Vesting::::remove(who); - // it can't fail, but even if somehow it did, we don't really care. - let _ = Self::update_lock(who.clone()); - } + type Moment = T::BlockNumber; + type Currency = T::Currency; + + /// Get the amount that is currently being vested and cannot be transferred out of this account. + fn vesting_balance(who: &T::AccountId) -> Option> { + if let Some(v) = Self::vesting(who) { + let now = >::block_number(); + let locked_now = v.locked_at::(now); + Some(T::Currency::free_balance(who).min(locked_now)) + } else { + None + } + } + + /// Adds a vesting schedule to a given account. + /// + /// If there already exists a vesting schedule for the given account, an `Err` is returned + /// and nothing is updated. + /// + /// On success, a linearly reducing amount of funds will be locked. In order to realise any + /// reduction of the lock over time as it diminishes, the account owner must use `vest` or + /// `vest_other`. + /// + /// Is a no-op if the amount to be vested is zero. + fn add_vesting_schedule( + who: &T::AccountId, + locked: BalanceOf, + per_block: BalanceOf, + starting_block: T::BlockNumber, + ) -> DispatchResult { + if locked.is_zero() { + return Ok(()); + } + if Vesting::::contains_key(who) { + Err(Error::::ExistingVestingSchedule)? + } + let vesting_schedule = VestingInfo { + locked, + per_block, + starting_block, + }; + Vesting::::insert(who, vesting_schedule); + // it can't fail, but even if somehow it did, we don't really care. + let _ = Self::update_lock(who.clone()); + Ok(()) + } + + /// Remove a vesting schedule for a given account. + fn remove_vesting_schedule(who: &T::AccountId) { + Vesting::::remove(who); + // it can't fail, but even if somehow it did, we don't really care. + let _ = Self::update_lock(who.clone()); + } } #[cfg(test)] mod tests { - use super::*; - - use std::cell::RefCell; - use frame_support::{ - assert_ok, assert_noop, impl_outer_origin, parameter_types, weights::Weight, - traits::Get - }; - use sp_core::H256; - // The testing primitives are very useful for avoiding having to work with signatures - // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. - use sp_runtime::{ - Perbill, - testing::Header, - traits::{BlakeTwo256, IdentityLookup, Identity}, - }; - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - // For testing the pallet, we construct most of a mock runtime. This means - // first constructing a configuration type (`Test`) which `impl`s each of the - // configuration traits of pallets we want to use. - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Call = (); - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = pallet_balances::AccountData; - type OnNewAccount = (); - type OnKilledAccount = (); - } - impl pallet_balances::Trait for Test { - type Balance = u64; - type DustRemoval = (); - type Event = (); - type ExistentialDeposit = ExistentialDeposit; - type AccountStore = System; - } - parameter_types! { - pub const MinVestedTransfer: u64 = 256 * 2; - } - impl Trait for Test { - type Event = (); - type Currency = Balances; - type BlockNumberToBalance = Identity; - type MinVestedTransfer = MinVestedTransfer; - } - type System = frame_system::Module; - type Balances = pallet_balances::Module; - type Vesting = Module; - - thread_local! { - static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); - } - pub struct ExistentialDeposit; - impl Get for ExistentialDeposit { - fn get() -> u64 { EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) } - } - - pub struct ExtBuilder { - existential_deposit: u64, - } - impl Default for ExtBuilder { - fn default() -> Self { - Self { - existential_deposit: 1, - } - } - } - impl ExtBuilder { - pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { - self.existential_deposit = existential_deposit; - self - } - pub fn build(self) -> sp_io::TestExternalities { - EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); - let mut t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - pallet_balances::GenesisConfig:: { - balances: vec![ - (1, 10 * self.existential_deposit), - (2, 20 * self.existential_deposit), - (3, 30 * self.existential_deposit), - (4, 40 * self.existential_deposit), - (12, 10 * self.existential_deposit) - ], - }.assimilate_storage(&mut t).unwrap(); - GenesisConfig:: { - vesting: vec![ - (1, 0, 10, 5 * self.existential_deposit), - (2, 10, 20, 0), - (12, 10, 20, 5 * self.existential_deposit) - ], - }.assimilate_storage(&mut t).unwrap(); - let mut ext = sp_io::TestExternalities::new(t); - ext.execute_with(|| System::set_block_number(1)); - ext - } - } - - #[test] - fn check_vesting_status() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - let user2_free_balance = Balances::free_balance(&2); - let user12_free_balance = Balances::free_balance(&12); - assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance - assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance - assert_eq!(user12_free_balance, 256 * 10); // Account 12 has free balance - let user1_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 128, // Vesting over 10 blocks - starting_block: 0, - }; - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule - - // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 - assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); - // Account 2 has their full balance locked - assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); - // Account 12 has only their illiquid funds locked - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - System::set_block_number(10); - assert_eq!(System::block_number(), 10); - - // Account 1 has fully vested by block 10 - assert_eq!(Vesting::vesting_balance(&1), Some(0)); - // Account 2 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); - // Account 12 has started vesting by block 10 - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative - assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 - assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 - - }); - } - - #[test] - fn unvested_balance_should_not_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_noop!( - Balances::transfer(Some(1).into(), 2, 56), - pallet_balances::Error::::LiquidityRestrictions, - ); // Account 1 cannot send more than vested amount - }); - } - - #[test] - fn vested_balance_should_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); - }); - } - - #[test] - fn vested_balance_should_transfer_using_vest_other() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 100); // Account 1 has free balance - // Account 1 has only 5 units vested at block 1 (plus 50 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest_other(Some(2).into(), 1)); - assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); - }); - } - - #[test] - fn extra_balance_should_transfer() { - ExtBuilder::default() - .existential_deposit(10) - .build() - .execute_with(|| { - assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); - assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); - - let user1_free_balance = Balances::free_balance(&1); - assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal - - let user2_free_balance = Balances::free_balance(&2); - assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal - - // Account 1 has only 5 units vested at block 1 (plus 150 unvested) - assert_eq!(Vesting::vesting_balance(&1), Some(45)); - assert_ok!(Vesting::vest(Some(1).into())); - assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained - - // Account 2 has no units vested at block 1, but gained 100 - assert_eq!(Vesting::vesting_balance(&2), Some(200)); - assert_ok!(Vesting::vest(Some(2).into())); - assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained - }); - } - - #[test] - fn liquid_funds_should_transfer_with_delayed_vesting() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user12_free_balance = Balances::free_balance(&12); - - assert_eq!(user12_free_balance, 2560); // Account 12 has free balance - // Account 12 has liquid funds - assert_eq!(Vesting::vesting_balance(&12), Some(user12_free_balance - 256 * 5)); - - // Account 12 has delayed vesting - let user12_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); - - // Account 12 can still send liquid funds - assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); - }); - } - - #[test] - fn vested_transfer_works() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user3_free_balance = Balances::free_balance(&3); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user3_free_balance, 256 * 30); - assert_eq!(user4_free_balance, 256 * 40); - // Account 4 should not have any vesting yet. - assert_eq!(Vesting::vesting(&4), None); - // Make the schedule for the new transfer. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_ok!(Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule)); - // Now account 4 should have vesting. - assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); - // Ensure the transfer happened correctly. - let user3_free_balance_updated = Balances::free_balance(&3); - assert_eq!(user3_free_balance_updated, 256 * 25); - let user4_free_balance_updated = Balances::free_balance(&4); - assert_eq!(user4_free_balance_updated, 256 * 45); - // Account 4 has 5 * 256 locked. - assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); - - System::set_block_number(20); - assert_eq!(System::block_number(), 20); - - // Account 4 has 5 * 64 units vested by block 20. - assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); - - System::set_block_number(30); - assert_eq!(System::block_number(), 30); - - // Account 4 has fully vested. - assert_eq!(Vesting::vesting_balance(&4), Some(0)); - }); - } - - #[test] - fn vested_transfer_correctly_fails() { - ExtBuilder::default() - .existential_deposit(256) - .build() - .execute_with(|| { - let user2_free_balance = Balances::free_balance(&2); - let user4_free_balance = Balances::free_balance(&4); - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - // Account 2 should already have a vesting schedule. - let user2_vesting_schedule = VestingInfo { - locked: 256 * 20, - per_block: 256, // Vesting over 20 blocks - starting_block: 10, - }; - assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); - - // The vesting schedule we will try to create, fails due to pre-existence of schedule. - let new_vesting_schedule = VestingInfo { - locked: 256 * 5, - per_block: 64, // Vesting over 20 blocks - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), - Error::::ExistingVestingSchedule, - ); - - // Fails due to too low transfer amount. - let new_vesting_schedule_too_low = VestingInfo { - locked: 256 * 1, - per_block: 64, - starting_block: 10, - }; - assert_noop!( - Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), - Error::::AmountLow, - ); - - // Verify no currency transfer happened. - assert_eq!(user2_free_balance, 256 * 20); - assert_eq!(user4_free_balance, 256 * 40); - }); - } + use super::*; + + use frame_support::{ + assert_noop, assert_ok, impl_outer_origin, parameter_types, traits::Get, weights::Weight, + }; + use sp_core::H256; + use std::cell::RefCell; + // The testing primitives are very useful for avoiding having to work with signatures + // or public keys. `u64` is used as the `AccountId` and no `Signature`s are required. + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, Identity, IdentityLookup}, + Perbill, + }; + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + // For testing the pallet, we construct most of a mock runtime. This means + // first constructing a configuration type (`Test`) which `impl`s each of the + // configuration traits of pallets we want to use. + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Call = (); + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = pallet_balances::AccountData; + type OnNewAccount = (); + type OnKilledAccount = (); + } + impl pallet_balances::Trait for Test { + type Balance = u64; + type DustRemoval = (); + type Event = (); + type ExistentialDeposit = ExistentialDeposit; + type AccountStore = System; + } + parameter_types! { + pub const MinVestedTransfer: u64 = 256 * 2; + } + impl Trait for Test { + type Event = (); + type Currency = Balances; + type BlockNumberToBalance = Identity; + type MinVestedTransfer = MinVestedTransfer; + } + type System = frame_system::Module; + type Balances = pallet_balances::Module; + type Vesting = Module; + + thread_local! { + static EXISTENTIAL_DEPOSIT: RefCell = RefCell::new(0); + } + pub struct ExistentialDeposit; + impl Get for ExistentialDeposit { + fn get() -> u64 { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow()) + } + } + + pub struct ExtBuilder { + existential_deposit: u64, + } + impl Default for ExtBuilder { + fn default() -> Self { + Self { + existential_deposit: 1, + } + } + } + impl ExtBuilder { + pub fn existential_deposit(mut self, existential_deposit: u64) -> Self { + self.existential_deposit = existential_deposit; + self + } + pub fn build(self) -> sp_io::TestExternalities { + EXISTENTIAL_DEPOSIT.with(|v| *v.borrow_mut() = self.existential_deposit); + let mut t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + pallet_balances::GenesisConfig:: { + balances: vec![ + (1, 10 * self.existential_deposit), + (2, 20 * self.existential_deposit), + (3, 30 * self.existential_deposit), + (4, 40 * self.existential_deposit), + (12, 10 * self.existential_deposit), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + GenesisConfig:: { + vesting: vec![ + (1, 0, 10, 5 * self.existential_deposit), + (2, 10, 20, 0), + (12, 10, 20, 5 * self.existential_deposit), + ], + } + .assimilate_storage(&mut t) + .unwrap(); + let mut ext = sp_io::TestExternalities::new(t); + ext.execute_with(|| System::set_block_number(1)); + ext + } + } + + #[test] + fn check_vesting_status() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + let user2_free_balance = Balances::free_balance(&2); + let user12_free_balance = Balances::free_balance(&12); + assert_eq!(user1_free_balance, 256 * 10); // Account 1 has free balance + assert_eq!(user2_free_balance, 256 * 20); // Account 2 has free balance + assert_eq!(user12_free_balance, 256 * 10); // Account 12 has free balance + let user1_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 128, // Vesting over 10 blocks + starting_block: 0, + }; + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + let user12_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&1), Some(user1_vesting_schedule)); // Account 1 has a vesting schedule + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); // Account 2 has a vesting schedule + assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); // Account 12 has a vesting schedule + + // Account 1 has only 128 units vested from their illiquid 256 * 5 units at block 1 + assert_eq!(Vesting::vesting_balance(&1), Some(128 * 9)); + // Account 2 has their full balance locked + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); + // Account 12 has only their illiquid funds locked + assert_eq!( + Vesting::vesting_balance(&12), + Some(user12_free_balance - 256 * 5) + ); + + System::set_block_number(10); + assert_eq!(System::block_number(), 10); + + // Account 1 has fully vested by block 10 + assert_eq!(Vesting::vesting_balance(&1), Some(0)); + // Account 2 has started vesting by block 10 + assert_eq!(Vesting::vesting_balance(&2), Some(user2_free_balance)); + // Account 12 has started vesting by block 10 + assert_eq!( + Vesting::vesting_balance(&12), + Some(user12_free_balance - 256 * 5) + ); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + assert_eq!(Vesting::vesting_balance(&1), Some(0)); // Account 1 is still fully vested, and not negative + assert_eq!(Vesting::vesting_balance(&2), Some(0)); // Account 2 has fully vested by block 30 + assert_eq!(Vesting::vesting_balance(&12), Some(0)); // Account 2 has fully vested by block 30 + }); + } + + #[test] + fn unvested_balance_should_not_transfer() { + ExtBuilder::default() + .existential_deposit(10) + .build() + .execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_noop!( + Balances::transfer(Some(1).into(), 2, 56), + pallet_balances::Error::::LiquidityRestrictions, + ); // Account 1 cannot send more than vested amount + }); + } + + #[test] + fn vested_balance_should_transfer() { + ExtBuilder::default() + .existential_deposit(10) + .build() + .execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + }); + } + + #[test] + fn vested_balance_should_transfer_using_vest_other() { + ExtBuilder::default() + .existential_deposit(10) + .build() + .execute_with(|| { + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 100); // Account 1 has free balance + // Account 1 has only 5 units vested at block 1 (plus 50 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest_other(Some(2).into(), 1)); + assert_ok!(Balances::transfer(Some(1).into(), 2, 55)); + }); + } + + #[test] + fn extra_balance_should_transfer() { + ExtBuilder::default() + .existential_deposit(10) + .build() + .execute_with(|| { + assert_ok!(Balances::transfer(Some(3).into(), 1, 100)); + assert_ok!(Balances::transfer(Some(3).into(), 2, 100)); + + let user1_free_balance = Balances::free_balance(&1); + assert_eq!(user1_free_balance, 200); // Account 1 has 100 more free balance than normal + + let user2_free_balance = Balances::free_balance(&2); + assert_eq!(user2_free_balance, 300); // Account 2 has 100 more free balance than normal + + // Account 1 has only 5 units vested at block 1 (plus 150 unvested) + assert_eq!(Vesting::vesting_balance(&1), Some(45)); + assert_ok!(Vesting::vest(Some(1).into())); + assert_ok!(Balances::transfer(Some(1).into(), 3, 155)); // Account 1 can send extra units gained + + // Account 2 has no units vested at block 1, but gained 100 + assert_eq!(Vesting::vesting_balance(&2), Some(200)); + assert_ok!(Vesting::vest(Some(2).into())); + assert_ok!(Balances::transfer(Some(2).into(), 3, 100)); // Account 2 can send extra units gained + }); + } + + #[test] + fn liquid_funds_should_transfer_with_delayed_vesting() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user12_free_balance = Balances::free_balance(&12); + + assert_eq!(user12_free_balance, 2560); // Account 12 has free balance + // Account 12 has liquid funds + assert_eq!( + Vesting::vesting_balance(&12), + Some(user12_free_balance - 256 * 5) + ); + + // Account 12 has delayed vesting + let user12_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&12), Some(user12_vesting_schedule)); + + // Account 12 can still send liquid funds + assert_ok!(Balances::transfer(Some(12).into(), 3, 256 * 5)); + }); + } + + #[test] + fn vested_transfer_works() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user3_free_balance = Balances::free_balance(&3); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user3_free_balance, 256 * 30); + assert_eq!(user4_free_balance, 256 * 40); + // Account 4 should not have any vesting yet. + assert_eq!(Vesting::vesting(&4), None); + // Make the schedule for the new transfer. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_ok!(Vesting::vested_transfer( + Some(3).into(), + 4, + new_vesting_schedule + )); + // Now account 4 should have vesting. + assert_eq!(Vesting::vesting(&4), Some(new_vesting_schedule)); + // Ensure the transfer happened correctly. + let user3_free_balance_updated = Balances::free_balance(&3); + assert_eq!(user3_free_balance_updated, 256 * 25); + let user4_free_balance_updated = Balances::free_balance(&4); + assert_eq!(user4_free_balance_updated, 256 * 45); + // Account 4 has 5 * 256 locked. + assert_eq!(Vesting::vesting_balance(&4), Some(256 * 5)); + + System::set_block_number(20); + assert_eq!(System::block_number(), 20); + + // Account 4 has 5 * 64 units vested by block 20. + assert_eq!(Vesting::vesting_balance(&4), Some(10 * 64)); + + System::set_block_number(30); + assert_eq!(System::block_number(), 30); + + // Account 4 has fully vested. + assert_eq!(Vesting::vesting_balance(&4), Some(0)); + }); + } + + #[test] + fn vested_transfer_correctly_fails() { + ExtBuilder::default() + .existential_deposit(256) + .build() + .execute_with(|| { + let user2_free_balance = Balances::free_balance(&2); + let user4_free_balance = Balances::free_balance(&4); + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + // Account 2 should already have a vesting schedule. + let user2_vesting_schedule = VestingInfo { + locked: 256 * 20, + per_block: 256, // Vesting over 20 blocks + starting_block: 10, + }; + assert_eq!(Vesting::vesting(&2), Some(user2_vesting_schedule)); + + // The vesting schedule we will try to create, fails due to pre-existence of schedule. + let new_vesting_schedule = VestingInfo { + locked: 256 * 5, + per_block: 64, // Vesting over 20 blocks + starting_block: 10, + }; + assert_noop!( + Vesting::vested_transfer(Some(4).into(), 2, new_vesting_schedule), + Error::::ExistingVestingSchedule, + ); + + // Fails due to too low transfer amount. + let new_vesting_schedule_too_low = VestingInfo { + locked: 256 * 1, + per_block: 64, + starting_block: 10, + }; + assert_noop!( + Vesting::vested_transfer(Some(3).into(), 4, new_vesting_schedule_too_low), + Error::::AmountLow, + ); + + // Verify no currency transfer happened. + assert_eq!(user2_free_balance, 256 * 20); + assert_eq!(user4_free_balance, 256 * 40); + }); + } } diff --git a/primitives/allocator/src/error.rs b/primitives/allocator/src/error.rs index 9357bc4560..c5bbe62013 100644 --- a/primitives/allocator/src/error.rs +++ b/primitives/allocator/src/error.rs @@ -18,21 +18,24 @@ #[derive(sp_core::RuntimeDebug)] #[cfg_attr(feature = "std", derive(derive_more::Display))] pub enum Error { - /// Someone tried to allocate more memory than the allowed maximum per allocation. - #[cfg_attr(feature = "std", display(fmt="Requested allocation size is too large"))] - RequestedAllocationTooLarge, - /// Allocator run out of space. - #[cfg_attr(feature = "std", display(fmt="Allocator ran out of space"))] - AllocatorOutOfSpace, - /// Some other error occurred. - Other(&'static str) + /// Someone tried to allocate more memory than the allowed maximum per allocation. + #[cfg_attr( + feature = "std", + display(fmt = "Requested allocation size is too large") + )] + RequestedAllocationTooLarge, + /// Allocator run out of space. + #[cfg_attr(feature = "std", display(fmt = "Allocator ran out of space"))] + AllocatorOutOfSpace, + /// Some other error occurred. + Other(&'static str), } #[cfg(feature = "std")] impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - _ => None, - } - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + _ => None, + } + } } diff --git a/primitives/allocator/src/freeing_bump.rs b/primitives/allocator/src/freeing_bump.rs index 0d15ed11f7..073aea886f 100644 --- a/primitives/allocator/src/freeing_bump.rs +++ b/primitives/allocator/src/freeing_bump.rs @@ -45,7 +45,10 @@ //! allocation to the linked list for the respective order. use crate::Error; -use sp_std::{convert::{TryFrom, TryInto}, ops::{Range, Index, IndexMut}}; +use sp_std::{ + convert::{TryFrom, TryInto}, + ops::{Index, IndexMut, Range}, +}; use sp_wasm_interface::{Pointer, WordSize}; /// The minimal alignment guaranteed by this allocator. The alignment of 8 is chosen because it is @@ -67,7 +70,7 @@ const HEADER_SIZE: u32 = 8; /// Create an allocator error. fn error(msg: &'static str) -> Error { - Error::Other(msg) + Error::Other(msg) } /// A custom "trace" implementation that is only activated when `feature = std`. @@ -96,54 +99,54 @@ macro_rules! trace { struct Order(u32); impl Order { - /// Create `Order` object from a raw order. - /// - /// Returns `Err` if it is greater than the maximum supported order. - fn from_raw(order: u32) -> Result { - if order < N as u32 { - Ok(Self(order)) - } else { - Err(error("invalid order")) - } - } - - /// Compute the order by the given size - /// - /// The size is clamped, so that the following holds: - /// - /// `MIN_POSSIBLE_ALLOCATION <= size <= MAX_POSSIBLE_ALLOCATION` - fn from_size(size: u32) -> Result { - let clamped_size = if size > MAX_POSSIBLE_ALLOCATION { - return Err(Error::RequestedAllocationTooLarge); - } else if size < MIN_POSSIBLE_ALLOCATION { - MIN_POSSIBLE_ALLOCATION - } else { - size - }; - - // Round the clamped size to the next power of two. - // - // It returns the unchanged value if the value is already a power of two. - let power_of_two_size = clamped_size.next_power_of_two(); - - // Compute the number of trailing zeroes to get the order. We adjust it by the number of - // trailing zeroes in the minimum possible allocation. - let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros(); - - Ok(Self(order)) - } - - /// Returns the corresponding size for this order. - /// - /// Note that it is always a power of two. - fn size(&self) -> u32 { - MIN_POSSIBLE_ALLOCATION << self.0 - } - - /// Extract the order as `u32`. - fn into_raw(self) -> u32 { - self.0 - } + /// Create `Order` object from a raw order. + /// + /// Returns `Err` if it is greater than the maximum supported order. + fn from_raw(order: u32) -> Result { + if order < N as u32 { + Ok(Self(order)) + } else { + Err(error("invalid order")) + } + } + + /// Compute the order by the given size + /// + /// The size is clamped, so that the following holds: + /// + /// `MIN_POSSIBLE_ALLOCATION <= size <= MAX_POSSIBLE_ALLOCATION` + fn from_size(size: u32) -> Result { + let clamped_size = if size > MAX_POSSIBLE_ALLOCATION { + return Err(Error::RequestedAllocationTooLarge); + } else if size < MIN_POSSIBLE_ALLOCATION { + MIN_POSSIBLE_ALLOCATION + } else { + size + }; + + // Round the clamped size to the next power of two. + // + // It returns the unchanged value if the value is already a power of two. + let power_of_two_size = clamped_size.next_power_of_two(); + + // Compute the number of trailing zeroes to get the order. We adjust it by the number of + // trailing zeroes in the minimum possible allocation. + let order = power_of_two_size.trailing_zeros() - MIN_POSSIBLE_ALLOCATION.trailing_zeros(); + + Ok(Self(order)) + } + + /// Returns the corresponding size for this order. + /// + /// Note that it is always a power of two. + fn size(&self) -> u32 { + MIN_POSSIBLE_ALLOCATION << self.0 + } + + /// Extract the order as `u32`. + fn into_raw(self) -> u32 { + self.0 + } } /// A marker for denoting the end of the linked list. @@ -152,29 +155,29 @@ const EMPTY_MARKER: u32 = u32::max_value(); /// A link between headers in the free list. #[derive(Clone, Copy, Debug, PartialEq, Eq)] enum Link { - /// Null, denotes that there is no next element. - Null, - /// Link to the next element represented as a pointer to the a header. - Ptr(u32), + /// Null, denotes that there is no next element. + Null, + /// Link to the next element represented as a pointer to the a header. + Ptr(u32), } impl Link { - /// Creates a link from raw value. - fn from_raw(raw: u32) -> Self { - if raw != EMPTY_MARKER { - Self::Ptr(raw) - } else { - Self::Null - } - } - - /// Converts this link into a raw u32. - fn into_raw(self) -> u32 { - match self { - Self::Null => EMPTY_MARKER, - Self::Ptr(ptr) => ptr, - } - } + /// Creates a link from raw value. + fn from_raw(raw: u32) -> Self { + if raw != EMPTY_MARKER { + Self::Ptr(raw) + } else { + Self::Null + } + } + + /// Converts this link into a raw u32. + fn into_raw(self) -> u32 { + match self { + Self::Null => EMPTY_MARKER, + Self::Ptr(ptr) => ptr, + } + } } /// A header of an allocation. @@ -200,592 +203,616 @@ impl Link { /// ``` #[derive(Clone, Debug, PartialEq, Eq)] enum Header { - /// A free header contains a link to the next element to form a free linked list. - Free(Link), - /// An occupied header has attached order to know in which free list we should put the - /// allocation upon deallocation. - Occupied(Order), + /// A free header contains a link to the next element to form a free linked list. + Free(Link), + /// An occupied header has attached order to know in which free list we should put the + /// allocation upon deallocation. + Occupied(Order), } impl Header { - fn read_from(memory: &M, header_ptr: u32) -> Result { - let raw_header = memory.read_le_u64(header_ptr)?; - - // Check if the header represents an occupied or free allocation and extract the header data - // by trimming (and discarding) the high bits. - let occupied = raw_header & 0x00000001_00000000 != 0; - let header_data = raw_header as u32; - - Ok(if occupied { - Self::Occupied(Order::from_raw(header_data)?) - } else { - Self::Free(Link::from_raw(header_data)) - }) - } - - /// Write out this header to memory. - fn write_into(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> { - let (header_data, occupied_mask) = match *self { - Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000), - Self::Free(link) => (link.into_raw(), 0x00000000_00000000), - }; - let raw_header = header_data as u64 | occupied_mask; - memory.write_le_u64(header_ptr, raw_header)?; - Ok(()) - } - - /// Returns the order of the allocation if this is an occupied header. - fn into_occupied(self) -> Option { - match self { - Self::Occupied(order) => Some(order), - _ => None, - } - } - - /// Returns the link to the next element in the free list if this is a free header. - fn into_free(self) -> Option { - match self { - Self::Free(link) => Some(link), - _ => None, - } - } + fn read_from(memory: &M, header_ptr: u32) -> Result { + let raw_header = memory.read_le_u64(header_ptr)?; + + // Check if the header represents an occupied or free allocation and extract the header data + // by trimming (and discarding) the high bits. + let occupied = raw_header & 0x00000001_00000000 != 0; + let header_data = raw_header as u32; + + Ok(if occupied { + Self::Occupied(Order::from_raw(header_data)?) + } else { + Self::Free(Link::from_raw(header_data)) + }) + } + + /// Write out this header to memory. + fn write_into(&self, memory: &mut M, header_ptr: u32) -> Result<(), Error> { + let (header_data, occupied_mask) = match *self { + Self::Occupied(order) => (order.into_raw(), 0x00000001_00000000), + Self::Free(link) => (link.into_raw(), 0x00000000_00000000), + }; + let raw_header = header_data as u64 | occupied_mask; + memory.write_le_u64(header_ptr, raw_header)?; + Ok(()) + } + + /// Returns the order of the allocation if this is an occupied header. + fn into_occupied(self) -> Option { + match self { + Self::Occupied(order) => Some(order), + _ => None, + } + } + + /// Returns the link to the next element in the free list if this is a free header. + fn into_free(self) -> Option { + match self { + Self::Free(link) => Some(link), + _ => None, + } + } } /// This struct represents a collection of intrusive linked lists for each order. struct FreeLists { - heads: [Link; N], + heads: [Link; N], } impl FreeLists { - /// Creates the free empty lists. - fn new() -> Self { - Self { - heads: [Link::Null; N] - } - } - - /// Replaces a given link for the specified order and returns the old one. - fn replace(&mut self, order: Order, new: Link) -> Link { - let prev = self[order]; - self[order] = new; - prev - } + /// Creates the free empty lists. + fn new() -> Self { + Self { + heads: [Link::Null; N], + } + } + + /// Replaces a given link for the specified order and returns the old one. + fn replace(&mut self, order: Order, new: Link) -> Link { + let prev = self[order]; + self[order] = new; + prev + } } impl Index for FreeLists { - type Output = Link; - fn index(&self, index: Order) -> &Link { - &self.heads[index.0 as usize] - } + type Output = Link; + fn index(&self, index: Order) -> &Link { + &self.heads[index.0 as usize] + } } impl IndexMut for FreeLists { - fn index_mut(&mut self, index: Order) -> &mut Link { - &mut self.heads[index.0 as usize] - } + fn index_mut(&mut self, index: Order) -> &mut Link { + &mut self.heads[index.0 as usize] + } } /// An implementation of freeing bump allocator. /// /// Refer to the module-level documentation for further details. pub struct FreeingBumpHeapAllocator { - bumper: u32, - free_lists: FreeLists, - total_size: u32, + bumper: u32, + free_lists: FreeLists, + total_size: u32, } impl FreeingBumpHeapAllocator { - /// Creates a new allocation heap which follows a freeing-bump strategy. - /// The maximum size which can be allocated at once is 16 MiB. - /// - /// # Arguments - /// - /// - `heap_base` - the offset from the beginning of the linear memory where the heap starts. - pub fn new(heap_base: u32) -> Self { - let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; - - FreeingBumpHeapAllocator { - bumper: aligned_heap_base, - free_lists: FreeLists::new(), - total_size: 0, - } - } - - /// Gets requested number of bytes to allocate and returns a pointer. - /// The maximum size which can be allocated at once is 16 MiB. - /// There is no minimum size, but whatever size is passed into - /// this function is rounded to the next power of two. If the requested - /// size is below 8 bytes it will be rounded up to 8 bytes. - /// - /// # Arguments - /// - /// - `mem` - a slice representing the linear memory on which this allocator operates. - /// - `size` - size in bytes of the allocation request - pub fn allocate( - &mut self, - mem: &mut M, - size: WordSize, - ) -> Result, Error> { - let order = Order::from_size(size)?; - - let header_ptr: u32 = match self.free_lists[order] { - Link::Ptr(header_ptr) => { - assert!( - header_ptr + order.size() + HEADER_SIZE <= mem.size(), - "Pointer is looked up in list of free entries, into which + /// Creates a new allocation heap which follows a freeing-bump strategy. + /// The maximum size which can be allocated at once is 16 MiB. + /// + /// # Arguments + /// + /// - `heap_base` - the offset from the beginning of the linear memory where the heap starts. + pub fn new(heap_base: u32) -> Self { + let aligned_heap_base = (heap_base + ALIGNMENT - 1) / ALIGNMENT * ALIGNMENT; + + FreeingBumpHeapAllocator { + bumper: aligned_heap_base, + free_lists: FreeLists::new(), + total_size: 0, + } + } + + /// Gets requested number of bytes to allocate and returns a pointer. + /// The maximum size which can be allocated at once is 16 MiB. + /// There is no minimum size, but whatever size is passed into + /// this function is rounded to the next power of two. If the requested + /// size is below 8 bytes it will be rounded up to 8 bytes. + /// + /// # Arguments + /// + /// - `mem` - a slice representing the linear memory on which this allocator operates. + /// - `size` - size in bytes of the allocation request + pub fn allocate( + &mut self, + mem: &mut M, + size: WordSize, + ) -> Result, Error> { + let order = Order::from_size(size)?; + + let header_ptr: u32 = match self.free_lists[order] { + Link::Ptr(header_ptr) => { + assert!( + header_ptr + order.size() + HEADER_SIZE <= mem.size(), + "Pointer is looked up in list of free entries, into which only valid values are inserted; qed" - ); - - // Remove this header from the free list. - let next_free = Header::read_from(mem, header_ptr)? - .into_free() - .ok_or_else(|| error("free list points to a occupied header"))?; - self.free_lists[order] = next_free; - - header_ptr - } - Link::Null => { - // Corresponding free list is empty. Allocate a new item. - self.bump(order.size() + HEADER_SIZE, mem.size())? - } - }; - - // Write the order in the occupied header. - Header::Occupied(order).write_into(mem, header_ptr)?; - - self.total_size += order.size() + HEADER_SIZE; - trace!("Heap size is {} bytes after allocation", self.total_size); - - Ok(Pointer::new(header_ptr + HEADER_SIZE)) - } - - /// Deallocates the space which was allocated for a pointer. - /// - /// # Arguments - /// - /// - `mem` - a slice representing the linear memory on which this allocator operates. - /// - `ptr` - pointer to the allocated chunk - pub fn deallocate(&mut self, mem: &mut M, ptr: Pointer) -> Result<(), Error> { - let header_ptr = u32::from(ptr) - .checked_sub(HEADER_SIZE) - .ok_or_else(|| error("Invalid pointer for deallocation"))?; - - let order = Header::read_from(mem, header_ptr)? - .into_occupied() - .ok_or_else(|| error("the allocation points to an empty header"))?; - - // Update the just freed header and knit it back to the free list. - let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr)); - Header::Free(prev_head).write_into(mem, header_ptr)?; - - // Do the total_size book keeping. - self.total_size = self - .total_size - .checked_sub(order.size() + HEADER_SIZE) - .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?; - trace!("Heap size is {} bytes after deallocation", self.total_size); - - Ok(()) - } - - /// Increases the `bumper` by `size`. - /// - /// Returns the `bumper` from before the increase. - /// Returns an `Error::AllocatorOutOfSpace` if the operation - /// would exhaust the heap. - fn bump(&mut self, size: u32, heap_end: u32) -> Result { - if self.bumper + size > heap_end { - return Err(Error::AllocatorOutOfSpace); - } - - let res = self.bumper; - self.bumper += size; - Ok(res) - } + ); + + // Remove this header from the free list. + let next_free = Header::read_from(mem, header_ptr)? + .into_free() + .ok_or_else(|| error("free list points to a occupied header"))?; + self.free_lists[order] = next_free; + + header_ptr + } + Link::Null => { + // Corresponding free list is empty. Allocate a new item. + self.bump(order.size() + HEADER_SIZE, mem.size())? + } + }; + + // Write the order in the occupied header. + Header::Occupied(order).write_into(mem, header_ptr)?; + + self.total_size += order.size() + HEADER_SIZE; + trace!("Heap size is {} bytes after allocation", self.total_size); + + Ok(Pointer::new(header_ptr + HEADER_SIZE)) + } + + /// Deallocates the space which was allocated for a pointer. + /// + /// # Arguments + /// + /// - `mem` - a slice representing the linear memory on which this allocator operates. + /// - `ptr` - pointer to the allocated chunk + pub fn deallocate( + &mut self, + mem: &mut M, + ptr: Pointer, + ) -> Result<(), Error> { + let header_ptr = u32::from(ptr) + .checked_sub(HEADER_SIZE) + .ok_or_else(|| error("Invalid pointer for deallocation"))?; + + let order = Header::read_from(mem, header_ptr)? + .into_occupied() + .ok_or_else(|| error("the allocation points to an empty header"))?; + + // Update the just freed header and knit it back to the free list. + let prev_head = self.free_lists.replace(order, Link::Ptr(header_ptr)); + Header::Free(prev_head).write_into(mem, header_ptr)?; + + // Do the total_size book keeping. + self.total_size = self + .total_size + .checked_sub(order.size() + HEADER_SIZE) + .ok_or_else(|| error("Unable to subtract from total heap size without overflow"))?; + trace!("Heap size is {} bytes after deallocation", self.total_size); + + Ok(()) + } + + /// Increases the `bumper` by `size`. + /// + /// Returns the `bumper` from before the increase. + /// Returns an `Error::AllocatorOutOfSpace` if the operation + /// would exhaust the heap. + fn bump(&mut self, size: u32, heap_end: u32) -> Result { + if self.bumper + size > heap_end { + return Err(Error::AllocatorOutOfSpace); + } + + let res = self.bumper; + self.bumper += size; + Ok(res) + } } /// A trait for abstraction of accesses to linear memory. pub trait Memory { - /// Read a u64 from the heap in LE form. Used to read heap allocation prefixes. - fn read_le_u64(&self, ptr: u32) -> Result; - /// Write a u64 to the heap in LE form. Used to write heap allocation prefixes. - fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error>; - /// Returns the full size of the memory. - fn size(&self) -> u32; + /// Read a u64 from the heap in LE form. Used to read heap allocation prefixes. + fn read_le_u64(&self, ptr: u32) -> Result; + /// Write a u64 to the heap in LE form. Used to write heap allocation prefixes. + fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error>; + /// Returns the full size of the memory. + fn size(&self) -> u32; } impl Memory for [u8] { - fn read_le_u64(&self, ptr: u32) -> Result { - let range = - heap_range(ptr, 8, self.len()).ok_or_else(|| error("read out of heap bounds"))?; - let bytes = self[range] - .try_into() - .expect("[u8] slice of length 8 must be convertible to [u8; 8]"); - Ok(u64::from_le_bytes(bytes)) - } - fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> { - let range = - heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds"))?; - let bytes = val.to_le_bytes(); - &mut self[range].copy_from_slice(&bytes[..]); - Ok(()) - } - fn size(&self) -> u32 { - u32::try_from(self.len()).expect("size of Wasm linear memory is <2^32; qed") - } + fn read_le_u64(&self, ptr: u32) -> Result { + let range = + heap_range(ptr, 8, self.len()).ok_or_else(|| error("read out of heap bounds"))?; + let bytes = self[range] + .try_into() + .expect("[u8] slice of length 8 must be convertible to [u8; 8]"); + Ok(u64::from_le_bytes(bytes)) + } + fn write_le_u64(&mut self, ptr: u32, val: u64) -> Result<(), Error> { + let range = + heap_range(ptr, 8, self.len()).ok_or_else(|| error("write out of heap bounds"))?; + let bytes = val.to_le_bytes(); + &mut self[range].copy_from_slice(&bytes[..]); + Ok(()) + } + fn size(&self) -> u32 { + u32::try_from(self.len()).expect("size of Wasm linear memory is <2^32; qed") + } } fn heap_range(offset: u32, length: u32, heap_len: usize) -> Option> { - let start = offset as usize; - let end = offset.checked_add(length)? as usize; - if end <= heap_len { - Some(start..end) - } else { - None - } + let start = offset as usize; + let end = offset.checked_add(length)? as usize; + if end <= heap_len { + Some(start..end) + } else { + None + } } #[cfg(test)] mod tests { - use super::*; - - const PAGE_SIZE: u32 = 65536; - - /// Makes a pointer out of the given address. - fn to_pointer(address: u32) -> Pointer { - Pointer::new(address) - } - - #[test] - fn should_allocate_properly() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(0); - - // when - let ptr = heap.allocate(&mut mem[..], 1).unwrap(); - - // then - // returned pointer must start right after `HEADER_SIZE` - assert_eq!(ptr, to_pointer(HEADER_SIZE)); - } - - #[test] - fn should_always_align_pointers_to_multiples_of_8() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(13); - - // when - let ptr = heap.allocate(&mut mem[..], 1).unwrap(); - - // then - // the pointer must start at the next multiple of 8 from 13 - // + the prefix of 8 bytes. - assert_eq!(ptr, to_pointer(24)); - } - - #[test] - fn should_increment_pointers_properly() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(0); - - // when - let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); - let ptr2 = heap.allocate(&mut mem[..], 9).unwrap(); - let ptr3 = heap.allocate(&mut mem[..], 1).unwrap(); - - // then - // a prefix of 8 bytes is prepended to each pointer - assert_eq!(ptr1, to_pointer(HEADER_SIZE)); - - // the prefix of 8 bytes + the content of ptr1 padded to the lowest possible - // item size of 8 bytes + the prefix of ptr1 - assert_eq!(ptr2, to_pointer(24)); - - // ptr2 + its content of 16 bytes + the prefix of 8 bytes - assert_eq!(ptr3, to_pointer(24 + 16 + HEADER_SIZE)); - } - - #[test] - fn should_free_properly() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(0); - let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); - // the prefix of 8 bytes is prepended to the pointer - assert_eq!(ptr1, to_pointer(HEADER_SIZE)); - - let ptr2 = heap.allocate(&mut mem[..], 1).unwrap(); - // the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer - assert_eq!(ptr2, to_pointer(24)); - - // when - heap.deallocate(&mut mem[..], ptr2).unwrap(); - - // then - // then the heads table should contain a pointer to the - // prefix of ptr2 in the leftmost entry - assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE)); - } - - #[test] - fn should_deallocate_and_reallocate_properly() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let padded_offset = 16; - let mut heap = FreeingBumpHeapAllocator::new(13); - - let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); - // the prefix of 8 bytes is prepended to the pointer - assert_eq!(ptr1, to_pointer(padded_offset + HEADER_SIZE)); - - let ptr2 = heap.allocate(&mut mem[..], 9).unwrap(); - // the padded_offset + the previously allocated ptr (8 bytes prefix + - // 8 bytes content) + the prefix of 8 bytes which is prepended to the - // current pointer - assert_eq!(ptr2, to_pointer(padded_offset + 16 + HEADER_SIZE)); - - // when - heap.deallocate(&mut mem[..], ptr2).unwrap(); - let ptr3 = heap.allocate(&mut mem[..], 9).unwrap(); - - // then - // should have re-allocated - assert_eq!(ptr3, to_pointer(padded_offset + 16 + HEADER_SIZE)); - assert_eq!(heap.free_lists.heads, [Link::Null; N]); - } - - #[test] - fn should_build_linked_list_of_free_areas_properly() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(0); - - let ptr1 = heap.allocate(&mut mem[..], 8).unwrap(); - let ptr2 = heap.allocate(&mut mem[..], 8).unwrap(); - let ptr3 = heap.allocate(&mut mem[..], 8).unwrap(); - - // when - heap.deallocate(&mut mem[..], ptr1).unwrap(); - heap.deallocate(&mut mem[..], ptr2).unwrap(); - heap.deallocate(&mut mem[..], ptr3).unwrap(); - - // then - assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr3) - HEADER_SIZE)); - - let ptr4 = heap.allocate(&mut mem[..], 8).unwrap(); - assert_eq!(ptr4, ptr3); - - assert_eq!(heap.free_lists.heads[0], Link::Ptr(u32::from(ptr2) - HEADER_SIZE)); - } - - #[test] - fn should_not_allocate_if_too_large() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(13); - - // when - let ptr = heap.allocate(&mut mem[..], PAGE_SIZE - 13); - - // then - match ptr.unwrap_err() { - Error::AllocatorOutOfSpace => {}, - e => panic!("Expected allocator out of space error, got: {:?}", e), - } - } - - #[test] - fn should_not_allocate_if_full() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(0); - let ptr1 = heap.allocate(&mut mem[..], (PAGE_SIZE / 2) - HEADER_SIZE).unwrap(); - assert_eq!(ptr1, to_pointer(HEADER_SIZE)); - - // when - let ptr2 = heap.allocate(&mut mem[..], PAGE_SIZE / 2); - - // then - // there is no room for another half page incl. its 8 byte prefix - match ptr2.unwrap_err() { - Error::AllocatorOutOfSpace => {}, - e => panic!("Expected allocator out of space error, got: {:?}", e), - } - } - - #[test] - fn should_allocate_max_possible_allocation_size() { - // given - let mut mem = vec![0u8; (MAX_POSSIBLE_ALLOCATION + PAGE_SIZE) as usize]; - let mut heap = FreeingBumpHeapAllocator::new(0); - - // when - let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION).unwrap(); - - // then - assert_eq!(ptr, to_pointer(HEADER_SIZE)); - } - - #[test] - fn should_not_allocate_if_requested_size_too_large() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(0); - - // when - let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION + 1); - - // then - match ptr.unwrap_err() { - Error::RequestedAllocationTooLarge => {}, - e => panic!("Expected allocation size too large error, got: {:?}", e), - } - } - - #[test] - fn should_return_error_when_bumper_greater_than_heap_size() { - // given - let mut mem = [0u8; 64]; - let mut heap = FreeingBumpHeapAllocator::new(0); - - let ptr1 = heap.allocate(&mut mem[..], 32).unwrap(); - assert_eq!(ptr1, to_pointer(HEADER_SIZE)); - heap.deallocate(&mut mem[..], ptr1).expect("failed freeing ptr1"); - assert_eq!(heap.total_size, 0); - assert_eq!(heap.bumper, 40); - - let ptr2 = heap.allocate(&mut mem[..], 16).unwrap(); - assert_eq!(ptr2, to_pointer(48)); - heap.deallocate(&mut mem[..], ptr2).expect("failed freeing ptr2"); - assert_eq!(heap.total_size, 0); - assert_eq!(heap.bumper, 64); - - // when - // the `bumper` value is equal to `size` here and any - // further allocation which would increment the bumper must fail. - // we try to allocate 8 bytes here, which will increment the - // bumper since no 8 byte item has been allocated+freed before. - let ptr = heap.allocate(&mut mem[..], 8); - - // then - match ptr.unwrap_err() { - Error::AllocatorOutOfSpace => {}, - e => panic!("Expected allocator out of space error, got: {:?}", e), - } - } - - #[test] - fn should_include_prefixes_in_total_heap_size() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(1); - - // when - // an item size of 16 must be used then - heap.allocate(&mut mem[..], 9).unwrap(); - - // then - assert_eq!(heap.total_size, HEADER_SIZE + 16); - } - - #[test] - fn should_calculate_total_heap_size_to_zero() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(13); - - // when - let ptr = heap.allocate(&mut mem[..], 42).unwrap(); - assert_eq!(ptr, to_pointer(16 + HEADER_SIZE)); - heap.deallocate(&mut mem[..], ptr).unwrap(); - - // then - assert_eq!(heap.total_size, 0); - } - - #[test] - fn should_calculate_total_size_of_zero() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - let mut heap = FreeingBumpHeapAllocator::new(19); - - // when - for _ in 1..10 { - let ptr = heap.allocate(&mut mem[..], 42).unwrap(); - heap.deallocate(&mut mem[..], ptr).unwrap(); - } - - // then - assert_eq!(heap.total_size, 0); - } - - #[test] - fn should_read_and_write_u64_correctly() { - // given - let mut mem = [0u8; PAGE_SIZE as usize]; - - // when - Memory::write_le_u64(mem.as_mut(), 40, 4480113).unwrap(); - - // then - let value = Memory::read_le_u64(mem.as_mut(), 40).unwrap(); - assert_eq!(value, 4480113); - } - - #[test] - fn should_get_item_size_from_order() { - // given - let raw_order = 0; - - // when - let item_size = Order::from_raw(raw_order).unwrap().size(); - - // then - assert_eq!(item_size, 8); - } - - #[test] - fn should_get_max_item_size_from_index() { - // given - let raw_order = 21; - - // when - let item_size = Order::from_raw(raw_order).unwrap().size(); - - // then - assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION); - } - - #[test] - fn deallocate_needs_to_maintain_linked_list() { - let mut mem = [0u8; 8 * 2 * 4 + ALIGNMENT as usize]; - let mut heap = FreeingBumpHeapAllocator::new(0); - - // Allocate and free some pointers - let ptrs = (0..4).map(|_| heap.allocate(&mut mem[..], 8).unwrap()).collect::>(); - ptrs.into_iter().for_each(|ptr| heap.deallocate(&mut mem[..], ptr).unwrap()); - - // Second time we should be able to allocate all of them again. - let _ = (0..4).map(|_| heap.allocate(&mut mem[..], 8).unwrap()).collect::>(); - } - - #[test] - fn header_read_write() { - let roundtrip = |header: Header| { - let mut memory = [0u8; 32]; - header.write_into(memory.as_mut(), 0).unwrap(); - - let read_header = Header::read_from(memory.as_mut(), 0).unwrap(); - assert_eq!(header, read_header); - }; - - roundtrip(Header::Occupied(Order(0))); - roundtrip(Header::Occupied(Order(1))); - roundtrip(Header::Free(Link::Null)); - roundtrip(Header::Free(Link::Ptr(0))); - roundtrip(Header::Free(Link::Ptr(4))); - } + use super::*; + + const PAGE_SIZE: u32 = 65536; + + /// Makes a pointer out of the given address. + fn to_pointer(address: u32) -> Pointer { + Pointer::new(address) + } + + #[test] + fn should_allocate_properly() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + // when + let ptr = heap.allocate(&mut mem[..], 1).unwrap(); + + // then + // returned pointer must start right after `HEADER_SIZE` + assert_eq!(ptr, to_pointer(HEADER_SIZE)); + } + + #[test] + fn should_always_align_pointers_to_multiples_of_8() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(13); + + // when + let ptr = heap.allocate(&mut mem[..], 1).unwrap(); + + // then + // the pointer must start at the next multiple of 8 from 13 + // + the prefix of 8 bytes. + assert_eq!(ptr, to_pointer(24)); + } + + #[test] + fn should_increment_pointers_properly() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + // when + let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); + let ptr2 = heap.allocate(&mut mem[..], 9).unwrap(); + let ptr3 = heap.allocate(&mut mem[..], 1).unwrap(); + + // then + // a prefix of 8 bytes is prepended to each pointer + assert_eq!(ptr1, to_pointer(HEADER_SIZE)); + + // the prefix of 8 bytes + the content of ptr1 padded to the lowest possible + // item size of 8 bytes + the prefix of ptr1 + assert_eq!(ptr2, to_pointer(24)); + + // ptr2 + its content of 16 bytes + the prefix of 8 bytes + assert_eq!(ptr3, to_pointer(24 + 16 + HEADER_SIZE)); + } + + #[test] + fn should_free_properly() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); + // the prefix of 8 bytes is prepended to the pointer + assert_eq!(ptr1, to_pointer(HEADER_SIZE)); + + let ptr2 = heap.allocate(&mut mem[..], 1).unwrap(); + // the prefix of 8 bytes + the content of ptr 1 is prepended to the pointer + assert_eq!(ptr2, to_pointer(24)); + + // when + heap.deallocate(&mut mem[..], ptr2).unwrap(); + + // then + // then the heads table should contain a pointer to the + // prefix of ptr2 in the leftmost entry + assert_eq!( + heap.free_lists.heads[0], + Link::Ptr(u32::from(ptr2) - HEADER_SIZE) + ); + } + + #[test] + fn should_deallocate_and_reallocate_properly() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let padded_offset = 16; + let mut heap = FreeingBumpHeapAllocator::new(13); + + let ptr1 = heap.allocate(&mut mem[..], 1).unwrap(); + // the prefix of 8 bytes is prepended to the pointer + assert_eq!(ptr1, to_pointer(padded_offset + HEADER_SIZE)); + + let ptr2 = heap.allocate(&mut mem[..], 9).unwrap(); + // the padded_offset + the previously allocated ptr (8 bytes prefix + + // 8 bytes content) + the prefix of 8 bytes which is prepended to the + // current pointer + assert_eq!(ptr2, to_pointer(padded_offset + 16 + HEADER_SIZE)); + + // when + heap.deallocate(&mut mem[..], ptr2).unwrap(); + let ptr3 = heap.allocate(&mut mem[..], 9).unwrap(); + + // then + // should have re-allocated + assert_eq!(ptr3, to_pointer(padded_offset + 16 + HEADER_SIZE)); + assert_eq!(heap.free_lists.heads, [Link::Null; N]); + } + + #[test] + fn should_build_linked_list_of_free_areas_properly() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + let ptr1 = heap.allocate(&mut mem[..], 8).unwrap(); + let ptr2 = heap.allocate(&mut mem[..], 8).unwrap(); + let ptr3 = heap.allocate(&mut mem[..], 8).unwrap(); + + // when + heap.deallocate(&mut mem[..], ptr1).unwrap(); + heap.deallocate(&mut mem[..], ptr2).unwrap(); + heap.deallocate(&mut mem[..], ptr3).unwrap(); + + // then + assert_eq!( + heap.free_lists.heads[0], + Link::Ptr(u32::from(ptr3) - HEADER_SIZE) + ); + + let ptr4 = heap.allocate(&mut mem[..], 8).unwrap(); + assert_eq!(ptr4, ptr3); + + assert_eq!( + heap.free_lists.heads[0], + Link::Ptr(u32::from(ptr2) - HEADER_SIZE) + ); + } + + #[test] + fn should_not_allocate_if_too_large() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(13); + + // when + let ptr = heap.allocate(&mut mem[..], PAGE_SIZE - 13); + + // then + match ptr.unwrap_err() { + Error::AllocatorOutOfSpace => {} + e => panic!("Expected allocator out of space error, got: {:?}", e), + } + } + + #[test] + fn should_not_allocate_if_full() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + let ptr1 = heap + .allocate(&mut mem[..], (PAGE_SIZE / 2) - HEADER_SIZE) + .unwrap(); + assert_eq!(ptr1, to_pointer(HEADER_SIZE)); + + // when + let ptr2 = heap.allocate(&mut mem[..], PAGE_SIZE / 2); + + // then + // there is no room for another half page incl. its 8 byte prefix + match ptr2.unwrap_err() { + Error::AllocatorOutOfSpace => {} + e => panic!("Expected allocator out of space error, got: {:?}", e), + } + } + + #[test] + fn should_allocate_max_possible_allocation_size() { + // given + let mut mem = vec![0u8; (MAX_POSSIBLE_ALLOCATION + PAGE_SIZE) as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + // when + let ptr = heap + .allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION) + .unwrap(); + + // then + assert_eq!(ptr, to_pointer(HEADER_SIZE)); + } + + #[test] + fn should_not_allocate_if_requested_size_too_large() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + // when + let ptr = heap.allocate(&mut mem[..], MAX_POSSIBLE_ALLOCATION + 1); + + // then + match ptr.unwrap_err() { + Error::RequestedAllocationTooLarge => {} + e => panic!("Expected allocation size too large error, got: {:?}", e), + } + } + + #[test] + fn should_return_error_when_bumper_greater_than_heap_size() { + // given + let mut mem = [0u8; 64]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + let ptr1 = heap.allocate(&mut mem[..], 32).unwrap(); + assert_eq!(ptr1, to_pointer(HEADER_SIZE)); + heap.deallocate(&mut mem[..], ptr1) + .expect("failed freeing ptr1"); + assert_eq!(heap.total_size, 0); + assert_eq!(heap.bumper, 40); + + let ptr2 = heap.allocate(&mut mem[..], 16).unwrap(); + assert_eq!(ptr2, to_pointer(48)); + heap.deallocate(&mut mem[..], ptr2) + .expect("failed freeing ptr2"); + assert_eq!(heap.total_size, 0); + assert_eq!(heap.bumper, 64); + + // when + // the `bumper` value is equal to `size` here and any + // further allocation which would increment the bumper must fail. + // we try to allocate 8 bytes here, which will increment the + // bumper since no 8 byte item has been allocated+freed before. + let ptr = heap.allocate(&mut mem[..], 8); + + // then + match ptr.unwrap_err() { + Error::AllocatorOutOfSpace => {} + e => panic!("Expected allocator out of space error, got: {:?}", e), + } + } + + #[test] + fn should_include_prefixes_in_total_heap_size() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(1); + + // when + // an item size of 16 must be used then + heap.allocate(&mut mem[..], 9).unwrap(); + + // then + assert_eq!(heap.total_size, HEADER_SIZE + 16); + } + + #[test] + fn should_calculate_total_heap_size_to_zero() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(13); + + // when + let ptr = heap.allocate(&mut mem[..], 42).unwrap(); + assert_eq!(ptr, to_pointer(16 + HEADER_SIZE)); + heap.deallocate(&mut mem[..], ptr).unwrap(); + + // then + assert_eq!(heap.total_size, 0); + } + + #[test] + fn should_calculate_total_size_of_zero() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + let mut heap = FreeingBumpHeapAllocator::new(19); + + // when + for _ in 1..10 { + let ptr = heap.allocate(&mut mem[..], 42).unwrap(); + heap.deallocate(&mut mem[..], ptr).unwrap(); + } + + // then + assert_eq!(heap.total_size, 0); + } + + #[test] + fn should_read_and_write_u64_correctly() { + // given + let mut mem = [0u8; PAGE_SIZE as usize]; + + // when + Memory::write_le_u64(mem.as_mut(), 40, 4480113).unwrap(); + + // then + let value = Memory::read_le_u64(mem.as_mut(), 40).unwrap(); + assert_eq!(value, 4480113); + } + + #[test] + fn should_get_item_size_from_order() { + // given + let raw_order = 0; + + // when + let item_size = Order::from_raw(raw_order).unwrap().size(); + + // then + assert_eq!(item_size, 8); + } + + #[test] + fn should_get_max_item_size_from_index() { + // given + let raw_order = 21; + + // when + let item_size = Order::from_raw(raw_order).unwrap().size(); + + // then + assert_eq!(item_size as u32, MAX_POSSIBLE_ALLOCATION); + } + + #[test] + fn deallocate_needs_to_maintain_linked_list() { + let mut mem = [0u8; 8 * 2 * 4 + ALIGNMENT as usize]; + let mut heap = FreeingBumpHeapAllocator::new(0); + + // Allocate and free some pointers + let ptrs = (0..4) + .map(|_| heap.allocate(&mut mem[..], 8).unwrap()) + .collect::>(); + ptrs.into_iter() + .for_each(|ptr| heap.deallocate(&mut mem[..], ptr).unwrap()); + + // Second time we should be able to allocate all of them again. + let _ = (0..4) + .map(|_| heap.allocate(&mut mem[..], 8).unwrap()) + .collect::>(); + } + + #[test] + fn header_read_write() { + let roundtrip = |header: Header| { + let mut memory = [0u8; 32]; + header.write_into(memory.as_mut(), 0).unwrap(); + + let read_header = Header::read_from(memory.as_mut(), 0).unwrap(); + assert_eq!(header, read_header); + }; + + roundtrip(Header::Occupied(Order(0))); + roundtrip(Header::Occupied(Order(1))); + roundtrip(Header::Free(Link::Null)); + roundtrip(Header::Free(Link::Ptr(0))); + roundtrip(Header::Free(Link::Ptr(4))); + } } diff --git a/primitives/allocator/src/lib.rs b/primitives/allocator/src/lib.rs index 0efadbc7f6..102abd471b 100644 --- a/primitives/allocator/src/lib.rs +++ b/primitives/allocator/src/lib.rs @@ -25,5 +25,5 @@ mod error; mod freeing_bump; -pub use freeing_bump::FreeingBumpHeapAllocator; pub use error::Error; +pub use freeing_bump::FreeingBumpHeapAllocator; diff --git a/primitives/api/proc-macro/src/decl_runtime_apis.rs b/primitives/api/proc-macro/src/decl_runtime_apis.rs index ef50bd840a..159047246b 100644 --- a/primitives/api/proc-macro/src/decl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/decl_runtime_apis.rs @@ -15,21 +15,25 @@ // along with Substrate. If not, see . use crate::utils::{ - generate_crate_access, generate_hidden_includes, generate_runtime_mod_name_for_trait, - fold_fn_decl_for_client_side, extract_parameter_names_types_and_borrows, - generate_native_call_generator_fn_name, return_type_extract_type, - generate_method_runtime_api_impl_name, generate_call_api_at_fn_name, prefix_function_with_trait, - replace_wild_card_parameter_names, AllowSelfRefInParameters, + extract_parameter_names_types_and_borrows, fold_fn_decl_for_client_side, + generate_call_api_at_fn_name, generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, generate_native_call_generator_fn_name, + generate_runtime_mod_name_for_trait, prefix_function_with_trait, + replace_wild_card_parameter_names, return_type_extract_type, AllowSelfRefInParameters, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, parse::{Parse, ParseStream, Result, Error}, ReturnType, - fold::{self, Fold}, parse_quote, ItemTrait, Generics, GenericParam, Attribute, FnArg, Type, - visit::{Visit, self}, TraitBound, Meta, NestedMeta, Lit, TraitItem, Ident, TraitItemMethod, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + visit::{self, Visit}, + Attribute, FnArg, GenericParam, Generics, Ident, ItemTrait, Lit, Meta, NestedMeta, ReturnType, + TraitBound, TraitItem, TraitItemMethod, Type, }; use std::collections::HashMap; @@ -69,231 +73,243 @@ const SKIP_INITIALIZE_BLOCK_ATTRIBUTE: &str = "skip_initialize_block"; const INITIALIZE_BLOCK_ATTRIBUTE: &str = "initialize_block"; /// All attributes that we support in the declaration of a runtime api trait. const SUPPORTED_ATTRIBUTE_NAMES: &[&str] = &[ - CORE_TRAIT_ATTRIBUTE, API_VERSION_ATTRIBUTE, CHANGED_IN_ATTRIBUTE, - RENAMED_ATTRIBUTE, SKIP_INITIALIZE_BLOCK_ATTRIBUTE, - INITIALIZE_BLOCK_ATTRIBUTE, + CORE_TRAIT_ATTRIBUTE, + API_VERSION_ATTRIBUTE, + CHANGED_IN_ATTRIBUTE, + RENAMED_ATTRIBUTE, + SKIP_INITIALIZE_BLOCK_ATTRIBUTE, + INITIALIZE_BLOCK_ATTRIBUTE, ]; /// The structure used for parsing the runtime api declarations. struct RuntimeApiDecls { - decls: Vec, + decls: Vec, } impl Parse for RuntimeApiDecls { - fn parse(input: ParseStream) -> Result { - let mut decls = Vec::new(); + fn parse(input: ParseStream) -> Result { + let mut decls = Vec::new(); - while !input.is_empty() { - decls.push(ItemTrait::parse(input)?); - } + while !input.is_empty() { + decls.push(ItemTrait::parse(input)?); + } - Ok(Self { decls }) - } + Ok(Self { decls }) + } } /// Extend the given generics with `Block: BlockT` as first generic parameter. fn extend_generics_with_block(generics: &mut Generics) { - let c = generate_crate_access(HIDDEN_INCLUDES_ID); + let c = generate_crate_access(HIDDEN_INCLUDES_ID); - generics.lt_token = Some(Default::default()); - generics.params.insert(0, parse_quote!( Block: #c::BlockT )); - generics.gt_token = Some(Default::default()); + generics.lt_token = Some(Default::default()); + generics.params.insert(0, parse_quote!( Block: #c::BlockT )); + generics.gt_token = Some(Default::default()); } /// Remove all attributes from the vector that are supported by us in the declaration of a runtime /// api trait. The returned hashmap contains all found attribute names as keys and the rest of the /// attribute body as `TokenStream`. fn remove_supported_attributes(attrs: &mut Vec) -> HashMap<&'static str, Attribute> { - let mut result = HashMap::new(); - attrs.retain(|v| { - match SUPPORTED_ATTRIBUTE_NAMES.iter().find(|a| v.path.is_ident(a)) { - Some(attribute) => { - result.insert(*attribute, v.clone()); - false - }, - None => true, - } - }); - - result + let mut result = HashMap::new(); + attrs.retain(|v| { + match SUPPORTED_ATTRIBUTE_NAMES + .iter() + .find(|a| v.path.is_ident(a)) + { + Some(attribute) => { + result.insert(*attribute, v.clone()); + false + } + None => true, + } + }); + + result } /// Visits the ast and checks if `Block` ident is used somewhere. struct IsUsingBlock { - result: bool, + result: bool, } impl<'ast> Visit<'ast> for IsUsingBlock { - fn visit_ident(&mut self, i: &'ast Ident) { - if i == BLOCK_GENERIC_IDENT { - self.result = true; - } - } + fn visit_ident(&mut self, i: &'ast Ident) { + if i == BLOCK_GENERIC_IDENT { + self.result = true; + } + } } /// Visits the ast and checks if `Block` ident is used somewhere. fn type_is_using_block(ty: &Type) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_type(ty); - visitor.result + let mut visitor = IsUsingBlock { result: false }; + visitor.visit_type(ty); + visitor.result } /// Visits the ast and checks if `Block` ident is used somewhere. fn return_type_is_using_block(ty: &ReturnType) -> bool { - let mut visitor = IsUsingBlock { result: false }; - visitor.visit_return_type(ty); - visitor.result + let mut visitor = IsUsingBlock { result: false }; + visitor.visit_return_type(ty); + visitor.result } /// Replace all occurrences of `Block` with `NodeBlock` struct ReplaceBlockWithNodeBlock {} impl Fold for ReplaceBlockWithNodeBlock { - fn fold_ident(&mut self, input: Ident) -> Ident { - if input == BLOCK_GENERIC_IDENT { - Ident::new("NodeBlock", Span::call_site()) - } else { - input - } - } + fn fold_ident(&mut self, input: Ident) -> Ident { + if input == BLOCK_GENERIC_IDENT { + Ident::new("NodeBlock", Span::call_site()) + } else { + input + } + } } /// Replace all occurrences of `Block` with `NodeBlock` fn fn_arg_replace_block_with_node_block(fn_arg: FnArg) -> FnArg { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_fn_arg(&mut replace, fn_arg) + let mut replace = ReplaceBlockWithNodeBlock {}; + fold::fold_fn_arg(&mut replace, fn_arg) } /// Replace all occurrences of `Block` with `NodeBlock` fn return_type_replace_block_with_node_block(return_type: ReturnType) -> ReturnType { - let mut replace = ReplaceBlockWithNodeBlock {}; - fold::fold_return_type(&mut replace, return_type) + let mut replace = ReplaceBlockWithNodeBlock {}; + fold::fold_return_type(&mut replace, return_type) } /// Generate the functions that generate the native call closure for each trait method. fn generate_native_call_generators(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some(&m.sig), - _ => None, - }); - - let mut result = Vec::new(); - let trait_ = &decl.ident; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Auxiliary function that is used to convert between types that use different block types. - // The function expects that both are convertible by encoding the one and decoding the other. - result.push(quote!( - #[cfg(any(feature = "std", test))] - fn convert_between_block_types - ( - input: &I, error_desc: &'static str, - ) -> std::result::Result - { - ::decode( - &mut &#crate_::Encode::encode(input)[..], - ).map_err(|e| format!("{} {}", error_desc, e.what())) - } - )); - - // Generate a native call generator for each function of the given trait. - for fn_ in fns { - let params = extract_parameter_names_types_and_borrows(&fn_, AllowSelfRefInParameters::No)?; - let trait_fn_name = &fn_.ident; - let fn_name = generate_native_call_generator_fn_name(&fn_.ident); - let output = return_type_replace_block_with_node_block(fn_.output.clone()); - let output_ty = return_type_extract_type(&output); - let output = quote!( std::result::Result<#output_ty, String> ); - - // Every type that is using the `Block` generic parameter, we need to encode/decode, - // to make it compatible between the runtime/node. - let conversions = params.iter().filter(|v| type_is_using_block(&v.1)).map(|(n, t, _)| { - let name_str = format!( - "Could not convert parameter `{}` between node and runtime:", quote!(#n) - ); - quote!( - let #n: #t = convert_between_block_types(&#n, #name_str)?; - ) - }); - // Same as for the input types, we need to check if we also need to convert the output, - // before returning it. - let output_conversion = if return_type_is_using_block(&fn_.output) { - quote!( - convert_between_block_types( - &res, - "Could not convert return value from runtime to node!" - ) - ) - } else { - quote!( Ok(res) ) - }; - - let input_names = params.iter().map(|v| &v.0); - // If the type is using the block generic type, we will encode/decode it to make it - // compatible. To ensure that we forward it by ref/value, we use the value given by the - // the user. Otherwise if it is not using the block, we don't need to add anything. - let input_borrows = params - .iter() - .map(|v| if type_is_using_block(&v.1) { v.2.clone() } else { None }); - - // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect - // all the function inputs. - let fn_inputs = fn_ - .inputs - .iter() - .map(|v| fn_arg_replace_block_with_node_block(v.clone())) - .map(|v| match v { - FnArg::Typed(ref arg) => { - let mut arg = arg.clone(); - if let Type::Reference(ref mut r) = *arg.ty { - r.lifetime = Some(parse_quote!( 'a )); - } - FnArg::Typed(arg) - }, - r => r.clone(), - }); - - let (impl_generics, ty_generics, where_clause) = decl.generics.split_for_impl(); - // We need to parse them again, to get an easy access to the actual parameters. - let impl_generics: Generics = parse_quote!( #impl_generics ); - let impl_generics_params = impl_generics.params.iter().map(|p| { - match p { - GenericParam::Type(ref ty) => { - let mut ty = ty.clone(); - ty.bounds.push(parse_quote!( 'a )); - GenericParam::Type(ty) - }, - // We should not see anything different than type params here. - r => r.clone(), - } - }); - - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - pub fn #fn_name< - 'a, ApiImpl: #trait_ #ty_generics, NodeBlock: #crate_::BlockT - #(, #impl_generics_params)* - >( - #( #fn_inputs ),* - ) -> impl FnOnce() -> #output + 'a #where_clause { - move || { - #( #conversions )* - let res = ApiImpl::#trait_fn_name(#( #input_borrows #input_names ),*); - #output_conversion - } - } - )); - } - - Ok(quote!( #( #result )* )) + let fns = decl.items.iter().filter_map(|i| match i { + TraitItem::Method(ref m) => Some(&m.sig), + _ => None, + }); + + let mut result = Vec::new(); + let trait_ = &decl.ident; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // Auxiliary function that is used to convert between types that use different block types. + // The function expects that both are convertible by encoding the one and decoding the other. + result.push(quote!( + #[cfg(any(feature = "std", test))] + fn convert_between_block_types + ( + input: &I, error_desc: &'static str, + ) -> std::result::Result + { + ::decode( + &mut &#crate_::Encode::encode(input)[..], + ).map_err(|e| format!("{} {}", error_desc, e.what())) + } + )); + + // Generate a native call generator for each function of the given trait. + for fn_ in fns { + let params = extract_parameter_names_types_and_borrows(&fn_, AllowSelfRefInParameters::No)?; + let trait_fn_name = &fn_.ident; + let fn_name = generate_native_call_generator_fn_name(&fn_.ident); + let output = return_type_replace_block_with_node_block(fn_.output.clone()); + let output_ty = return_type_extract_type(&output); + let output = quote!( std::result::Result<#output_ty, String> ); + + // Every type that is using the `Block` generic parameter, we need to encode/decode, + // to make it compatible between the runtime/node. + let conversions = params + .iter() + .filter(|v| type_is_using_block(&v.1)) + .map(|(n, t, _)| { + let name_str = format!( + "Could not convert parameter `{}` between node and runtime:", + quote!(#n) + ); + quote!( + let #n: #t = convert_between_block_types(&#n, #name_str)?; + ) + }); + // Same as for the input types, we need to check if we also need to convert the output, + // before returning it. + let output_conversion = if return_type_is_using_block(&fn_.output) { + quote!(convert_between_block_types( + &res, + "Could not convert return value from runtime to node!" + )) + } else { + quote!(Ok(res)) + }; + + let input_names = params.iter().map(|v| &v.0); + // If the type is using the block generic type, we will encode/decode it to make it + // compatible. To ensure that we forward it by ref/value, we use the value given by the + // the user. Otherwise if it is not using the block, we don't need to add anything. + let input_borrows = params.iter().map(|v| { + if type_is_using_block(&v.1) { + v.2.clone() + } else { + None + } + }); + + // Replace all `Block` with `NodeBlock`, add `'a` lifetime to references and collect + // all the function inputs. + let fn_inputs = fn_ + .inputs + .iter() + .map(|v| fn_arg_replace_block_with_node_block(v.clone())) + .map(|v| match v { + FnArg::Typed(ref arg) => { + let mut arg = arg.clone(); + if let Type::Reference(ref mut r) = *arg.ty { + r.lifetime = Some(parse_quote!( 'a )); + } + FnArg::Typed(arg) + } + r => r.clone(), + }); + + let (impl_generics, ty_generics, where_clause) = decl.generics.split_for_impl(); + // We need to parse them again, to get an easy access to the actual parameters. + let impl_generics: Generics = parse_quote!( #impl_generics ); + let impl_generics_params = impl_generics.params.iter().map(|p| { + match p { + GenericParam::Type(ref ty) => { + let mut ty = ty.clone(); + ty.bounds.push(parse_quote!( 'a )); + GenericParam::Type(ty) + } + // We should not see anything different than type params here. + r => r.clone(), + } + }); + + // Generate the generator function + result.push(quote!( + #[cfg(any(feature = "std", test))] + pub fn #fn_name< + 'a, ApiImpl: #trait_ #ty_generics, NodeBlock: #crate_::BlockT + #(, #impl_generics_params)* + >( + #( #fn_inputs ),* + ) -> impl FnOnce() -> #output + 'a #where_clause { + move || { + #( #conversions )* + let res = ApiImpl::#trait_fn_name(#( #input_borrows #input_names ),*); + #output_conversion + } + } + )); + } + + Ok(quote!( #( #result )* )) } /// Try to parse the given `Attribute` as `renamed` attribute. fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { - let meta = renamed.parse_meta()?; + let meta = renamed.parse_meta()?; - let err = Err(Error::new( + let err = Err(Error::new( meta.span(), &format!( "Unexpected `{renamed}` attribute. The supported format is `{renamed}(\"old_name\", version_it_was_renamed)`", @@ -302,708 +318,734 @@ fn parse_renamed_attribute(renamed: &Attribute) -> Result<(String, u32)> { ) ); - match meta { - Meta::List(list) => { - if list.nested.len() > 2 && list.nested.is_empty() { - err - } else { - let mut itr = list.nested.iter(); - let old_name = match itr.next() { - Some(NestedMeta::Lit(Lit::Str(i))) => { - i.value() - }, - _ => return err, - }; - - let version = match itr.next() { - Some(NestedMeta::Lit(Lit::Int(i))) => { - i.base10_parse()? - }, - _ => return err, - }; - - Ok((old_name, version)) - } - }, - _ => err, - } + match meta { + Meta::List(list) => { + if list.nested.len() > 2 && list.nested.is_empty() { + err + } else { + let mut itr = list.nested.iter(); + let old_name = match itr.next() { + Some(NestedMeta::Lit(Lit::Str(i))) => i.value(), + _ => return err, + }; + + let version = match itr.next() { + Some(NestedMeta::Lit(Lit::Int(i))) => i.base10_parse()?, + _ => return err, + }; + + Ok((old_name, version)) + } + } + _ => err, + } } /// Generate the functions that call the api at a given block for a given trait method. fn generate_call_api_at_calls(decl: &ItemTrait) -> Result { - let fns = decl.items.iter().filter_map(|i| match i { - TraitItem::Method(ref m) => Some((&m.attrs, &m.sig)), - _ => None, - }); - - let mut result = Vec::new(); - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Generate a native call generator for each function of the given trait. - for (attrs, fn_) in fns { - let trait_name = &decl.ident; - let trait_fn_name = prefix_function_with_trait(&trait_name, &fn_.ident); - let fn_name = generate_call_api_at_fn_name(&fn_.ident); - - let attrs = remove_supported_attributes(&mut attrs.clone()); - - if attrs.contains_key(RENAMED_ATTRIBUTE) && attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - return Err(Error::new( - fn_.span(), - format!( - "`{}` and `{}` are not supported at once.", - RENAMED_ATTRIBUTE, - CHANGED_IN_ATTRIBUTE - ) - )); - } - - // We do not need to generate this function for a method that signature was changed. - if attrs.contains_key(CHANGED_IN_ATTRIBUTE) { - continue; - } - - let skip_initialize_block = attrs.contains_key(SKIP_INITIALIZE_BLOCK_ATTRIBUTE); - let update_initialized_block = if attrs.contains_key(INITIALIZE_BLOCK_ATTRIBUTE) { - quote!( - || *initialized_block.borrow_mut() = Some(*at) - ) - } else { - quote!(|| ()) - }; - - // Parse the renamed attributes. - let mut renames = Vec::new(); - if let Some((_, a)) = attrs - .iter() - .find(|a| a.0 == &RENAMED_ATTRIBUTE) - { - let (old_name, version) = parse_renamed_attribute(a)?; - renames.push((version, prefix_function_with_trait(&trait_name, &old_name))); - } - - renames.sort_unstable_by(|l, r| r.cmp(l)); - let (versions, old_names) = renames.into_iter().fold( - (Vec::new(), Vec::new()), - |(mut versions, mut old_names), (version, old_name)| { - versions.push(version); - old_names.push(old_name); - (versions, old_names) - } - ); - - // Generate the generator function - result.push(quote!( - #[cfg(any(feature = "std", test))] - pub fn #fn_name< - R: #crate_::Encode + #crate_::Decode + PartialEq, - NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, - Block: #crate_::BlockT, - T: #crate_::CallApiAt, - C: #crate_::Core, - >( - call_runtime_at: &T, - core_api: &C, - at: &#crate_::BlockId, - args: Vec, - changes: &std::cell::RefCell<#crate_::OverlayedChanges>, - storage_transaction_cache: &std::cell::RefCell< - #crate_::StorageTransactionCache - >, - initialized_block: &std::cell::RefCell>>, - native_call: Option, - context: #crate_::ExecutionContext, - recorder: &Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { - let version = call_runtime_at.runtime_version_at(at)?; - use #crate_::InitializeBlock; - let initialize_block = if #skip_initialize_block { - InitializeBlock::Skip - } else { - InitializeBlock::Do(&initialized_block) - }; - let update_initialized_block = #update_initialized_block; - - #( - // Check if we need to call the function by an old name. - if version.apis.iter().any(|(s, v)| { - s == &ID && *v < #versions - }) { - let params = #crate_::CallApiAtParams::<_, _, fn() -> _, _> { - core_api, - at, - function: #old_names, - native_call: None, - arguments: args, - overlayed_changes: changes, - storage_transaction_cache, - initialize_block, - context, - recorder, - }; - - let ret = call_runtime_at.call_api_at(params)?; - - update_initialized_block(); - return Ok(ret) - } - )* - - let params = #crate_::CallApiAtParams { - core_api, - at, - function: #trait_fn_name, - native_call, - arguments: args, - overlayed_changes: changes, - storage_transaction_cache, - initialize_block, - context, - recorder, - }; - - let ret = call_runtime_at.call_api_at(params)?; - - update_initialized_block(); - Ok(ret) - } - )); - } - - Ok(quote!( #( #result )* )) + let fns = decl.items.iter().filter_map(|i| match i { + TraitItem::Method(ref m) => Some((&m.attrs, &m.sig)), + _ => None, + }); + + let mut result = Vec::new(); + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // Generate a native call generator for each function of the given trait. + for (attrs, fn_) in fns { + let trait_name = &decl.ident; + let trait_fn_name = prefix_function_with_trait(&trait_name, &fn_.ident); + let fn_name = generate_call_api_at_fn_name(&fn_.ident); + + let attrs = remove_supported_attributes(&mut attrs.clone()); + + if attrs.contains_key(RENAMED_ATTRIBUTE) && attrs.contains_key(CHANGED_IN_ATTRIBUTE) { + return Err(Error::new( + fn_.span(), + format!( + "`{}` and `{}` are not supported at once.", + RENAMED_ATTRIBUTE, CHANGED_IN_ATTRIBUTE + ), + )); + } + + // We do not need to generate this function for a method that signature was changed. + if attrs.contains_key(CHANGED_IN_ATTRIBUTE) { + continue; + } + + let skip_initialize_block = attrs.contains_key(SKIP_INITIALIZE_BLOCK_ATTRIBUTE); + let update_initialized_block = if attrs.contains_key(INITIALIZE_BLOCK_ATTRIBUTE) { + quote!(|| *initialized_block.borrow_mut() = Some(*at)) + } else { + quote!(|| ()) + }; + + // Parse the renamed attributes. + let mut renames = Vec::new(); + if let Some((_, a)) = attrs.iter().find(|a| a.0 == &RENAMED_ATTRIBUTE) { + let (old_name, version) = parse_renamed_attribute(a)?; + renames.push((version, prefix_function_with_trait(&trait_name, &old_name))); + } + + renames.sort_unstable_by(|l, r| r.cmp(l)); + let (versions, old_names) = renames.into_iter().fold( + (Vec::new(), Vec::new()), + |(mut versions, mut old_names), (version, old_name)| { + versions.push(version); + old_names.push(old_name); + (versions, old_names) + }, + ); + + // Generate the generator function + result.push(quote!( + #[cfg(any(feature = "std", test))] + pub fn #fn_name< + R: #crate_::Encode + #crate_::Decode + PartialEq, + NC: FnOnce() -> std::result::Result + std::panic::UnwindSafe, + Block: #crate_::BlockT, + T: #crate_::CallApiAt, + C: #crate_::Core, + >( + call_runtime_at: &T, + core_api: &C, + at: &#crate_::BlockId, + args: Vec, + changes: &std::cell::RefCell<#crate_::OverlayedChanges>, + storage_transaction_cache: &std::cell::RefCell< + #crate_::StorageTransactionCache + >, + initialized_block: &std::cell::RefCell>>, + native_call: Option, + context: #crate_::ExecutionContext, + recorder: &Option<#crate_::ProofRecorder>, + ) -> std::result::Result<#crate_::NativeOrEncoded, T::Error> { + let version = call_runtime_at.runtime_version_at(at)?; + use #crate_::InitializeBlock; + let initialize_block = if #skip_initialize_block { + InitializeBlock::Skip + } else { + InitializeBlock::Do(&initialized_block) + }; + let update_initialized_block = #update_initialized_block; + + #( + // Check if we need to call the function by an old name. + if version.apis.iter().any(|(s, v)| { + s == &ID && *v < #versions + }) { + let params = #crate_::CallApiAtParams::<_, _, fn() -> _, _> { + core_api, + at, + function: #old_names, + native_call: None, + arguments: args, + overlayed_changes: changes, + storage_transaction_cache, + initialize_block, + context, + recorder, + }; + + let ret = call_runtime_at.call_api_at(params)?; + + update_initialized_block(); + return Ok(ret) + } + )* + + let params = #crate_::CallApiAtParams { + core_api, + at, + function: #trait_fn_name, + native_call, + arguments: args, + overlayed_changes: changes, + storage_transaction_cache, + initialize_block, + context, + recorder, + }; + + let ret = call_runtime_at.call_api_at(params)?; + + update_initialized_block(); + Ok(ret) + } + )); + } + + Ok(quote!( #( #result )* )) } /// Generate the declaration of the trait for the runtime. fn generate_runtime_decls(decls: &[ItemTrait]) -> Result { - let mut result = Vec::new(); - - for decl in decls { - let mut decl = decl.clone(); - extend_generics_with_block(&mut decl.generics); - let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); - let found_attributes = remove_supported_attributes(&mut decl.attrs); - let api_version = get_api_version(&found_attributes).map(|v| { - generate_runtime_api_version(v as u32) - })?; - let id = generate_runtime_api_id(&decl.ident.to_string()); - - let call_api_at_calls = generate_call_api_at_calls(&decl)?; - - // Remove methods that have the `changed_in` attribute as they are not required for the - // runtime anymore. - decl.items = decl.items.iter_mut().filter_map(|i| match i { - TraitItem::Method(ref mut method) => { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - None - } else { - // Make sure we replace all the wild card parameter names. - replace_wild_card_parameter_names(&mut method.sig); - Some(TraitItem::Method(method.clone())) - } - } - r => Some(r.clone()), - }).collect(); - - let native_call_generators = generate_native_call_generators(&decl)?; - - result.push(quote!( - #[doc(hidden)] - #[allow(dead_code)] - #[allow(deprecated)] - pub mod #mod_name { - use super::*; - - #decl - - pub #api_version - - pub #id - - #native_call_generators - - #call_api_at_calls - } - )); - } - - Ok(quote!( #( #result )* )) + let mut result = Vec::new(); + + for decl in decls { + let mut decl = decl.clone(); + extend_generics_with_block(&mut decl.generics); + let mod_name = generate_runtime_mod_name_for_trait(&decl.ident); + let found_attributes = remove_supported_attributes(&mut decl.attrs); + let api_version = + get_api_version(&found_attributes).map(|v| generate_runtime_api_version(v as u32))?; + let id = generate_runtime_api_id(&decl.ident.to_string()); + + let call_api_at_calls = generate_call_api_at_calls(&decl)?; + + // Remove methods that have the `changed_in` attribute as they are not required for the + // runtime anymore. + decl.items = decl + .items + .iter_mut() + .filter_map(|i| match i { + TraitItem::Method(ref mut method) => { + if remove_supported_attributes(&mut method.attrs) + .contains_key(CHANGED_IN_ATTRIBUTE) + { + None + } else { + // Make sure we replace all the wild card parameter names. + replace_wild_card_parameter_names(&mut method.sig); + Some(TraitItem::Method(method.clone())) + } + } + r => Some(r.clone()), + }) + .collect(); + + let native_call_generators = generate_native_call_generators(&decl)?; + + result.push(quote!( + #[doc(hidden)] + #[allow(dead_code)] + #[allow(deprecated)] + pub mod #mod_name { + use super::*; + + #decl + + pub #api_version + + pub #id + + #native_call_generators + + #call_api_at_calls + } + )); + } + + Ok(quote!( #( #result )* )) } /// Modify the given runtime api declaration to be usable on the client side. struct ToClientSideDecl<'a> { - block_id: &'a TokenStream, - crate_: &'a TokenStream, - found_attributes: &'a mut HashMap<&'static str, Attribute>, - /// Any error that we found while converting this declaration. - errors: &'a mut Vec, - trait_: &'a Ident, + block_id: &'a TokenStream, + crate_: &'a TokenStream, + found_attributes: &'a mut HashMap<&'static str, Attribute>, + /// Any error that we found while converting this declaration. + errors: &'a mut Vec, + trait_: &'a Ident, } impl<'a> ToClientSideDecl<'a> { - fn fold_item_trait_items(&mut self, items: Vec) -> Vec { - let mut result = Vec::new(); - - items.into_iter().for_each(|i| match i { - TraitItem::Method(method) => { - let (fn_decl, fn_impl, fn_decl_ctx) = self.fold_trait_item_method(method); - result.push(fn_decl.into()); - result.push(fn_decl_ctx.into()); - - if let Some(fn_impl) = fn_impl { - result.push(fn_impl.into()); - } - }, - r => result.push(r), - }); - - result - } - - fn fold_trait_item_method(&mut self, method: TraitItemMethod) - -> (TraitItemMethod, Option, TraitItemMethod) { - let crate_ = self.crate_; - let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); - let fn_impl = self.create_method_runtime_api_impl(method.clone()); - let fn_decl = self.create_method_decl(method.clone(), context); - let fn_decl_ctx = self.create_method_decl_with_context(method); - - (fn_decl, fn_impl, fn_decl_ctx) - } - - fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { - let crate_ = self.crate_; - let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); - let mut fn_decl_ctx = self.create_method_decl(method, quote!( context )); - fn_decl_ctx.sig.ident = Ident::new(&format!("{}_with_context", &fn_decl_ctx.sig.ident), Span::call_site()); - fn_decl_ctx.sig.inputs.insert(2, context_arg); - - fn_decl_ctx - } - - /// Takes the given method and creates a `method_runtime_api_impl` method that will be - /// implemented in the runtime for the client side. - fn create_method_runtime_api_impl(&mut self, mut method: TraitItemMethod) -> Option { - if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { - return None; - } - - let fn_sig = &method.sig; - let ret_type = return_type_extract_type(&fn_sig.output); - - // Get types and if the value is borrowed from all parameters. - // If there is an error, we push it as the block to the user. - let param_types = match extract_parameter_names_types_and_borrows( - fn_sig, - AllowSelfRefInParameters::No, - ) { - Ok(res) => res.into_iter().map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }).collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - } - }; - let name = generate_method_runtime_api_impl_name(&self.trait_, &method.sig.ident); - let block_id = self.block_id; - let crate_ = self.crate_; - - Some( - parse_quote!{ - #[doc(hidden)] - fn #name( - &self, - at: &#block_id, - context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error>; - } - ) - } - - /// Takes the method declared by the user and creates the declaration we require for the runtime - /// api client side. This method will call by default the `method_runtime_api_impl` for doing - /// the actual call into the runtime. - fn create_method_decl( - &mut self, - mut method: TraitItemMethod, - context: TokenStream, - ) -> TraitItemMethod { - let params = match extract_parameter_names_types_and_borrows( - &method.sig, - AllowSelfRefInParameters::No, - ) { - Ok(res) => res.into_iter().map(|v| v.0).collect::>(), - Err(e) => { - self.errors.push(e.to_compile_error()); - Vec::new() - } - }; - let params2 = params.clone(); - let ret_type = return_type_extract_type(&method.sig.output); - - fold_fn_decl_for_client_side(&mut method.sig, &self.block_id); - - let name_impl = generate_method_runtime_api_impl_name(&self.trait_, &method.sig.ident); - let crate_ = self.crate_; - - let found_attributes = remove_supported_attributes(&mut method.attrs); - // If the method has a `changed_in` attribute, we need to alter the method name to - // `method_before_version_VERSION`. - let (native_handling, param_tuple) = match get_changed_in(&found_attributes) { - Ok(Some(version)) => { - // Make sure that the `changed_in` version is at least the current `api_version`. - if get_api_version(&self.found_attributes).ok() < Some(version) { - self.errors.push( - Error::new( - method.span(), - "`changed_in` version can not be greater than the `api_version`", - ).to_compile_error() - ); - } - - let ident = Ident::new( - &format!("{}_before_version_{}", method.sig.ident, version), - method.sig.ident.span(), - ); - method.sig.ident = ident; - method.attrs.push(parse_quote!( #[deprecated] )); - - let panic = format!("Calling `{}` should not return a native value!", method.sig.ident); - (quote!( panic!(#panic) ), quote!( None )) - }, - Ok(None) => (quote!( Ok(n) ), quote!( Some(( #( #params2 ),* )) )), - Err(e) => { - self.errors.push(e.to_compile_error()); - (quote!( unimplemented!() ), quote!( None )) - } - }; - - let function_name = method.sig.ident.to_string(); - - // Generate the default implementation that calls the `method_runtime_api_impl` method. - method.default = Some( - parse_quote! { - { - let runtime_api_impl_params_encoded = - #crate_::Encode::encode(&( #( &#params ),* )); - - self.#name_impl( - __runtime_api_at_param__, - #context, - #param_tuple, - runtime_api_impl_params_encoded, - ).and_then(|r| - match r { - #crate_::NativeOrEncoded::Native(n) => { - #native_handling - }, - #crate_::NativeOrEncoded::Encoded(r) => { - <#ret_type as #crate_::Decode>::decode(&mut &r[..]) - .map_err(|err| - format!( - "Failed to decode result of `{}`: {}", - #function_name, - err.what(), - ).into() - ) - } - } - ) - } - } - ); - - method - } + fn fold_item_trait_items(&mut self, items: Vec) -> Vec { + let mut result = Vec::new(); + + items.into_iter().for_each(|i| match i { + TraitItem::Method(method) => { + let (fn_decl, fn_impl, fn_decl_ctx) = self.fold_trait_item_method(method); + result.push(fn_decl.into()); + result.push(fn_decl_ctx.into()); + + if let Some(fn_impl) = fn_impl { + result.push(fn_impl.into()); + } + } + r => result.push(r), + }); + + result + } + + fn fold_trait_item_method( + &mut self, + method: TraitItemMethod, + ) -> (TraitItemMethod, Option, TraitItemMethod) { + let crate_ = self.crate_; + let context = quote!( #crate_::ExecutionContext::OffchainCall(None) ); + let fn_impl = self.create_method_runtime_api_impl(method.clone()); + let fn_decl = self.create_method_decl(method.clone(), context); + let fn_decl_ctx = self.create_method_decl_with_context(method); + + (fn_decl, fn_impl, fn_decl_ctx) + } + + fn create_method_decl_with_context(&mut self, method: TraitItemMethod) -> TraitItemMethod { + let crate_ = self.crate_; + let context_arg: syn::FnArg = parse_quote!( context: #crate_::ExecutionContext ); + let mut fn_decl_ctx = self.create_method_decl(method, quote!(context)); + fn_decl_ctx.sig.ident = Ident::new( + &format!("{}_with_context", &fn_decl_ctx.sig.ident), + Span::call_site(), + ); + fn_decl_ctx.sig.inputs.insert(2, context_arg); + + fn_decl_ctx + } + + /// Takes the given method and creates a `method_runtime_api_impl` method that will be + /// implemented in the runtime for the client side. + fn create_method_runtime_api_impl( + &mut self, + mut method: TraitItemMethod, + ) -> Option { + if remove_supported_attributes(&mut method.attrs).contains_key(CHANGED_IN_ATTRIBUTE) { + return None; + } + + let fn_sig = &method.sig; + let ret_type = return_type_extract_type(&fn_sig.output); + + // Get types and if the value is borrowed from all parameters. + // If there is an error, we push it as the block to the user. + let param_types = + match extract_parameter_names_types_and_borrows(fn_sig, AllowSelfRefInParameters::No) { + Ok(res) => res + .into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + Err(e) => { + self.errors.push(e.to_compile_error()); + Vec::new() + } + }; + let name = generate_method_runtime_api_impl_name(&self.trait_, &method.sig.ident); + let block_id = self.block_id; + let crate_ = self.crate_; + + Some(parse_quote! { + #[doc(hidden)] + fn #name( + &self, + at: &#block_id, + context: #crate_::ExecutionContext, + params: Option<( #( #param_types ),* )>, + params_encoded: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error>; + }) + } + + /// Takes the method declared by the user and creates the declaration we require for the runtime + /// api client side. This method will call by default the `method_runtime_api_impl` for doing + /// the actual call into the runtime. + fn create_method_decl( + &mut self, + mut method: TraitItemMethod, + context: TokenStream, + ) -> TraitItemMethod { + let params = match extract_parameter_names_types_and_borrows( + &method.sig, + AllowSelfRefInParameters::No, + ) { + Ok(res) => res.into_iter().map(|v| v.0).collect::>(), + Err(e) => { + self.errors.push(e.to_compile_error()); + Vec::new() + } + }; + let params2 = params.clone(); + let ret_type = return_type_extract_type(&method.sig.output); + + fold_fn_decl_for_client_side(&mut method.sig, &self.block_id); + + let name_impl = generate_method_runtime_api_impl_name(&self.trait_, &method.sig.ident); + let crate_ = self.crate_; + + let found_attributes = remove_supported_attributes(&mut method.attrs); + // If the method has a `changed_in` attribute, we need to alter the method name to + // `method_before_version_VERSION`. + let (native_handling, param_tuple) = match get_changed_in(&found_attributes) { + Ok(Some(version)) => { + // Make sure that the `changed_in` version is at least the current `api_version`. + if get_api_version(&self.found_attributes).ok() < Some(version) { + self.errors.push( + Error::new( + method.span(), + "`changed_in` version can not be greater than the `api_version`", + ) + .to_compile_error(), + ); + } + + let ident = Ident::new( + &format!("{}_before_version_{}", method.sig.ident, version), + method.sig.ident.span(), + ); + method.sig.ident = ident; + method.attrs.push(parse_quote!( #[deprecated] )); + + let panic = format!( + "Calling `{}` should not return a native value!", + method.sig.ident + ); + (quote!(panic!(#panic)), quote!(None)) + } + Ok(None) => (quote!(Ok(n)), quote!( Some(( #( #params2 ),* )) )), + Err(e) => { + self.errors.push(e.to_compile_error()); + (quote!(unimplemented!()), quote!(None)) + } + }; + + let function_name = method.sig.ident.to_string(); + + // Generate the default implementation that calls the `method_runtime_api_impl` method. + method.default = Some(parse_quote! { + { + let runtime_api_impl_params_encoded = + #crate_::Encode::encode(&( #( &#params ),* )); + + self.#name_impl( + __runtime_api_at_param__, + #context, + #param_tuple, + runtime_api_impl_params_encoded, + ).and_then(|r| + match r { + #crate_::NativeOrEncoded::Native(n) => { + #native_handling + }, + #crate_::NativeOrEncoded::Encoded(r) => { + <#ret_type as #crate_::Decode>::decode(&mut &r[..]) + .map_err(|err| + format!( + "Failed to decode result of `{}`: {}", + #function_name, + err.what(), + ).into() + ) + } + } + ) + } + }); + + method + } } impl<'a> Fold for ToClientSideDecl<'a> { - fn fold_item_trait(&mut self, mut input: ItemTrait) -> ItemTrait { - extend_generics_with_block(&mut input.generics); - - *self.found_attributes = remove_supported_attributes(&mut input.attrs); - // Check if this is the `Core` runtime api trait. - let is_core_trait = self.found_attributes.contains_key(CORE_TRAIT_ATTRIBUTE); - let block_ident = Ident::new(BLOCK_GENERIC_IDENT, Span::call_site()); - - if is_core_trait { - // Add all the supertraits we want to have for `Core`. - let crate_ = &self.crate_; - input.supertraits = parse_quote!( - 'static - + Send - + Sync - + #crate_::ApiErrorExt - ); - } else { - // Add the `Core` runtime api as super trait. - let crate_ = &self.crate_; - input.supertraits.push(parse_quote!( #crate_::Core<#block_ident> )); - } - - // The client side trait is only required when compiling with the feature `std` or `test`. - input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); - input.items = self.fold_item_trait_items(input.items); - - fold::fold_item_trait(self, input) - } + fn fold_item_trait(&mut self, mut input: ItemTrait) -> ItemTrait { + extend_generics_with_block(&mut input.generics); + + *self.found_attributes = remove_supported_attributes(&mut input.attrs); + // Check if this is the `Core` runtime api trait. + let is_core_trait = self.found_attributes.contains_key(CORE_TRAIT_ATTRIBUTE); + let block_ident = Ident::new(BLOCK_GENERIC_IDENT, Span::call_site()); + + if is_core_trait { + // Add all the supertraits we want to have for `Core`. + let crate_ = &self.crate_; + input.supertraits = parse_quote!( + 'static + + Send + + Sync + + #crate_::ApiErrorExt + ); + } else { + // Add the `Core` runtime api as super trait. + let crate_ = &self.crate_; + input + .supertraits + .push(parse_quote!( #crate_::Core<#block_ident> )); + } + + // The client side trait is only required when compiling with the feature `std` or `test`. + input + .attrs + .push(parse_quote!( #[cfg(any(feature = "std", test))] )); + input.items = self.fold_item_trait_items(input.items); + + fold::fold_item_trait(self, input) + } } /// Parse the given attribute as `API_VERSION_ATTRIBUTE`. fn parse_runtime_api_version(version: &Attribute) -> Result { - let meta = version.parse_meta()?; - - let err = Err(Error::new( - meta.span(), - &format!( - "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", - api_version = API_VERSION_ATTRIBUTE - ) - ) - ); - - match meta { - Meta::List(list) => { - if list.nested.len() != 1 { - err - } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { - i.base10_parse() - } else { - err - } - }, - _ => err, - } + let meta = version.parse_meta()?; + + let err = Err(Error::new( + meta.span(), + &format!( + "Unexpected `{api_version}` attribute. The supported format is `{api_version}(1)`", + api_version = API_VERSION_ATTRIBUTE + ), + )); + + match meta { + Meta::List(list) => { + if list.nested.len() != 1 { + err + } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { + i.base10_parse() + } else { + err + } + } + _ => err, + } } /// Generates the identifier as const variable for the given `trait_name` /// by hashing the `trait_name`. fn generate_runtime_api_id(trait_name: &str) -> TokenStream { - let mut res = [0; 8]; - res.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], trait_name.as_bytes()).as_bytes()); + let mut res = [0; 8]; + res.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], trait_name.as_bytes()).as_bytes()); - quote!( const ID: [u8; 8] = [ #( #res ),* ]; ) + quote!( const ID: [u8; 8] = [ #( #res ),* ]; ) } /// Generates the const variable that holds the runtime api version. fn generate_runtime_api_version(version: u32) -> TokenStream { - quote!( const VERSION: u32 = #version; ) + quote!( const VERSION: u32 = #version; ) } /// Generates the implementation of `RuntimeApiInfo` for the given trait. fn generate_runtime_info_impl(trait_: &ItemTrait, version: u64) -> TokenStream { - let trait_name = &trait_.ident; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let id = generate_runtime_api_id(&trait_name.to_string()); - let version = generate_runtime_api_version(version as u32); - - let impl_generics = trait_.generics.type_params().map(|t| { - let ident = &t.ident; - let colon_token = &t.colon_token; - let bounds = &t.bounds; - - quote! { #ident #colon_token #bounds } - }).chain(std::iter::once(quote! { __Sr_Api_Error__ })); - - let ty_generics = trait_.generics.type_params().map(|t| { - let ident = &t.ident; - quote! { #ident } - }).chain(std::iter::once(quote! { Error = __Sr_Api_Error__ })); - - quote!( - #[cfg(any(feature = "std", test))] - impl < #( #impl_generics, )* > #crate_::RuntimeApiInfo - for #trait_name < #( #ty_generics, )* > - { - #id - #version - } - ) + let trait_name = &trait_.ident; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let id = generate_runtime_api_id(&trait_name.to_string()); + let version = generate_runtime_api_version(version as u32); + + let impl_generics = trait_ + .generics + .type_params() + .map(|t| { + let ident = &t.ident; + let colon_token = &t.colon_token; + let bounds = &t.bounds; + + quote! { #ident #colon_token #bounds } + }) + .chain(std::iter::once(quote! { __Sr_Api_Error__ })); + + let ty_generics = trait_ + .generics + .type_params() + .map(|t| { + let ident = &t.ident; + quote! { #ident } + }) + .chain(std::iter::once(quote! { Error = __Sr_Api_Error__ })); + + quote!( + #[cfg(any(feature = "std", test))] + impl < #( #impl_generics, )* > #crate_::RuntimeApiInfo + for #trait_name < #( #ty_generics, )* > + { + #id + #version + } + ) } /// Get changed in version from the user given attribute or `Ok(None)`, if no attribute was given. fn get_changed_in(found_attributes: &HashMap<&'static str, Attribute>) -> Result> { - found_attributes.get(&CHANGED_IN_ATTRIBUTE) - .map(|v| parse_runtime_api_version(v).map(Some)) - .unwrap_or(Ok(None)) + found_attributes + .get(&CHANGED_IN_ATTRIBUTE) + .map(|v| parse_runtime_api_version(v).map(Some)) + .unwrap_or(Ok(None)) } /// Get the api version from the user given attribute or `Ok(1)`, if no attribute was given. fn get_api_version(found_attributes: &HashMap<&'static str, Attribute>) -> Result { - found_attributes.get(&API_VERSION_ATTRIBUTE).map(parse_runtime_api_version).unwrap_or(Ok(1)) + found_attributes + .get(&API_VERSION_ATTRIBUTE) + .map(parse_runtime_api_version) + .unwrap_or(Ok(1)) } /// Generate the declaration of the trait for the client side. fn generate_client_side_decls(decls: &[ItemTrait]) -> Result { - let mut result = Vec::new(); + let mut result = Vec::new(); - for decl in decls { - let decl = decl.clone(); + for decl in decls { + let decl = decl.clone(); - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - let block_id = quote!( #crate_::BlockId ); - let mut found_attributes = HashMap::new(); - let mut errors = Vec::new(); - let trait_ = decl.ident.clone(); + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + let block_id = quote!( #crate_::BlockId ); + let mut found_attributes = HashMap::new(); + let mut errors = Vec::new(); + let trait_ = decl.ident.clone(); - let decl = { - let mut to_client_side = ToClientSideDecl { - crate_: &crate_, - block_id: &block_id, - found_attributes: &mut found_attributes, - errors: &mut errors, - trait_: &trait_, - }; - to_client_side.fold_item_trait(decl) - }; + let decl = { + let mut to_client_side = ToClientSideDecl { + crate_: &crate_, + block_id: &block_id, + found_attributes: &mut found_attributes, + errors: &mut errors, + trait_: &trait_, + }; + to_client_side.fold_item_trait(decl) + }; - let api_version = get_api_version(&found_attributes); + let api_version = get_api_version(&found_attributes); - let runtime_info = api_version.map(|v| generate_runtime_info_impl(&decl, v))?; + let runtime_info = api_version.map(|v| generate_runtime_info_impl(&decl, v))?; - result.push(quote!( #decl #runtime_info #( #errors )* )); - } + result.push(quote!( #decl #runtime_info #( #errors )* )); + } - Ok(quote!( #( #result )* )) + Ok(quote!( #( #result )* )) } /// Checks that a trait declaration is in the format we expect. struct CheckTraitDecl { - errors: Vec, + errors: Vec, } impl CheckTraitDecl { - /// Check the given trait. - /// - /// All errors will be collected in `self.errors`. - fn check(&mut self, trait_: &ItemTrait) { - self.check_method_declarations(trait_.items.iter().filter_map(|i| match i { - TraitItem::Method(method) => Some(method), - _ => None, - })); - - visit::visit_item_trait(self, trait_); - } - - /// Check that the given method declarations are correct. - /// - /// Any error is stored in `self.errors`. - fn check_method_declarations<'a>(&mut self, methods: impl Iterator) { - let mut method_to_signature_changed = HashMap::>>::new(); - - methods.into_iter().for_each(|method| { - let attributes = remove_supported_attributes(&mut method.attrs.clone()); - - let changed_in = match get_changed_in(&attributes) { - Ok(r) => r, - Err(e) => { self.errors.push(e); return; }, - }; - - method_to_signature_changed - .entry(method.sig.ident.clone()) - .or_default() - .push(changed_in); - }); - - method_to_signature_changed.into_iter().for_each(|(f, changed)| { - // If `changed_in` is `None`, it means it is the current "default" method that calls - // into the latest implementation. - if changed.iter().filter(|c| c.is_none()).count() == 0 { - self.errors.push(Error::new( + /// Check the given trait. + /// + /// All errors will be collected in `self.errors`. + fn check(&mut self, trait_: &ItemTrait) { + self.check_method_declarations(trait_.items.iter().filter_map(|i| match i { + TraitItem::Method(method) => Some(method), + _ => None, + })); + + visit::visit_item_trait(self, trait_); + } + + /// Check that the given method declarations are correct. + /// + /// Any error is stored in `self.errors`. + fn check_method_declarations<'a>( + &mut self, + methods: impl Iterator, + ) { + let mut method_to_signature_changed = HashMap::>>::new(); + + methods.into_iter().for_each(|method| { + let attributes = remove_supported_attributes(&mut method.attrs.clone()); + + let changed_in = match get_changed_in(&attributes) { + Ok(r) => r, + Err(e) => { + self.errors.push(e); + return; + } + }; + + method_to_signature_changed + .entry(method.sig.ident.clone()) + .or_default() + .push(changed_in); + }); + + method_to_signature_changed + .into_iter() + .for_each(|(f, changed)| { + // If `changed_in` is `None`, it means it is the current "default" method that calls + // into the latest implementation. + if changed.iter().filter(|c| c.is_none()).count() == 0 { + self.errors.push(Error::new( f.span(), "There is no 'default' method with this name (without `changed_in` attribute).\n\ The 'default' method is used to call into the latest implementation.", )); - } - }); - } + } + }); + } } impl<'ast> Visit<'ast> for CheckTraitDecl { - fn visit_fn_arg(&mut self, input: &'ast FnArg) { - if let FnArg::Receiver(_) = input { - self.errors.push(Error::new(input.span(), "`self` as argument not supported.")) - } - - visit::visit_fn_arg(self, input); - } - - fn visit_generic_param(&mut self, input: &'ast GenericParam) { - match input { - GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => { - self.errors.push( - Error::new( - input.span(), - "`Block: BlockT` generic parameter will be added automatically by the \ - `decl_runtime_apis!` macro!" - ) - ) - }, - _ => {} - } - - visit::visit_generic_param(self, input); - } - - fn visit_trait_bound(&mut self, input: &'ast TraitBound) { - if let Some(last_ident) = input.path.segments.last().map(|v| &v.ident) { - if last_ident == "BlockT" || last_ident == BLOCK_GENERIC_IDENT { - self.errors.push( - Error::new( - input.span(), - "`Block: BlockT` generic parameter will be added automatically by the \ + fn visit_fn_arg(&mut self, input: &'ast FnArg) { + if let FnArg::Receiver(_) = input { + self.errors.push(Error::new( + input.span(), + "`self` as argument not supported.", + )) + } + + visit::visit_fn_arg(self, input); + } + + fn visit_generic_param(&mut self, input: &'ast GenericParam) { + match input { + GenericParam::Type(ty) if ty.ident == BLOCK_GENERIC_IDENT => { + self.errors.push(Error::new( + input.span(), + "`Block: BlockT` generic parameter will be added automatically by the \ + `decl_runtime_apis!` macro!", + )) + } + _ => {} + } + + visit::visit_generic_param(self, input); + } + + fn visit_trait_bound(&mut self, input: &'ast TraitBound) { + if let Some(last_ident) = input.path.segments.last().map(|v| &v.ident) { + if last_ident == "BlockT" || last_ident == BLOCK_GENERIC_IDENT { + self.errors.push(Error::new( + input.span(), + "`Block: BlockT` generic parameter will be added automatically by the \ `decl_runtime_apis!` macro! If you try to use a different trait than the \ - substrate `Block` trait, please rename it locally." - ) - ) - } - } - - visit::visit_trait_bound(self, input) - } + substrate `Block` trait, please rename it locally.", + )) + } + } + + visit::visit_trait_bound(self, input) + } } /// Check that the trait declarations are in the format we expect. fn check_trait_decls(decls: &[ItemTrait]) -> Result<()> { - let mut checker = CheckTraitDecl { errors: Vec::new() }; - decls.iter().for_each(|decl| checker.check(decl)); - - if let Some(err) = checker.errors.pop() { - Err(checker.errors.into_iter().fold(err, |mut err, other| { - err.combine(other); - err - })) - } else { - Ok(()) - } + let mut checker = CheckTraitDecl { errors: Vec::new() }; + decls.iter().for_each(|decl| checker.check(decl)); + + if let Some(err) = checker.errors.pop() { + Err(checker.errors.into_iter().fold(err, |mut err, other| { + err.combine(other); + err + })) + } else { + Ok(()) + } } /// The implementation of the `decl_runtime_apis!` macro. pub fn decl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - // Parse all trait declarations - let RuntimeApiDecls { decls: api_decls } = parse_macro_input!(input as RuntimeApiDecls); + // Parse all trait declarations + let RuntimeApiDecls { decls: api_decls } = parse_macro_input!(input as RuntimeApiDecls); - decl_runtime_apis_impl_inner(&api_decls).unwrap_or_else(|e| e.to_compile_error()).into() + decl_runtime_apis_impl_inner(&api_decls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn decl_runtime_apis_impl_inner(api_decls: &[ItemTrait]) -> Result { - check_trait_decls(&api_decls)?; + check_trait_decls(&api_decls)?; - let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); - let runtime_decls = generate_runtime_decls(api_decls)?; - let client_side_decls = generate_client_side_decls(api_decls)?; + let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); + let runtime_decls = generate_runtime_decls(api_decls)?; + let client_side_decls = generate_client_side_decls(api_decls)?; - Ok( - quote!( - #hidden_includes + Ok(quote!( + #hidden_includes - #runtime_decls + #runtime_decls - #client_side_decls - ) - ) + #client_side_decls + )) } diff --git a/primitives/api/proc-macro/src/impl_runtime_apis.rs b/primitives/api/proc-macro/src/impl_runtime_apis.rs index 7def6aa0fb..5afdea9ac1 100644 --- a/primitives/api/proc-macro/src/impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/impl_runtime_apis.rs @@ -15,12 +15,12 @@ // along with Substrate. If not, see . use crate::utils::{ - generate_crate_access, generate_hidden_includes, - generate_runtime_mod_name_for_trait, generate_method_runtime_api_impl_name, - extract_parameter_names_types_and_borrows, generate_native_call_generator_fn_name, - return_type_extract_type, generate_call_api_at_fn_name, prefix_function_with_trait, - extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, - AllowSelfRefInParameters, RequireQualifiedTraitPath, + extract_all_signature_types, extract_block_type_from_trait_path, extract_impl_trait, + extract_parameter_names_types_and_borrows, generate_call_api_at_fn_name, generate_crate_access, + generate_hidden_includes, generate_method_runtime_api_impl_name, + generate_native_call_generator_fn_name, generate_runtime_mod_name_for_trait, + prefix_function_with_trait, return_type_extract_type, AllowSelfRefInParameters, + RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -28,9 +28,12 @@ use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, Path, Signature, Attribute, - ImplItem, parse::{Parse, ParseStream, Result, Error}, PathArguments, GenericArgument, TypePath, - fold::{self, Fold}, parse_quote, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + Attribute, GenericArgument, Ident, ImplItem, ItemImpl, Path, PathArguments, Signature, Type, + TypePath, }; use std::{collections::HashSet, iter}; @@ -40,721 +43,723 @@ const HIDDEN_INCLUDES_ID: &str = "IMPL_RUNTIME_APIS"; /// The structure used for parsing the runtime api implementations. struct RuntimeApiImpls { - impls: Vec, + impls: Vec, } impl Parse for RuntimeApiImpls { - fn parse(input: ParseStream) -> Result { - let mut impls = Vec::new(); - - while !input.is_empty() { - impls.push(ItemImpl::parse(input)?); - } - - if impls.is_empty() { - Err(Error::new(Span::call_site(), "No api implementation given!")) - } else { - Ok(Self { impls }) - } - } + fn parse(input: ParseStream) -> Result { + let mut impls = Vec::new(); + + while !input.is_empty() { + impls.push(ItemImpl::parse(input)?); + } + + if impls.is_empty() { + Err(Error::new( + Span::call_site(), + "No api implementation given!", + )) + } else { + Ok(Self { impls }) + } + } } /// Generates the call to the implementation of the requested function. /// The generated code includes decoding of the input arguments and encoding of the output. fn generate_impl_call( - signature: &Signature, - runtime: &Type, - input: &Ident, - impl_trait: &Path + signature: &Signature, + runtime: &Type, + input: &Ident, + impl_trait: &Path, ) -> Result { - let params = extract_parameter_names_types_and_borrows(signature, AllowSelfRefInParameters::No)?; - - let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let c_iter = iter::repeat(&c); - let fn_name = &signature.ident; - let fn_name_str = iter::repeat(fn_name.to_string()); - let input = iter::repeat(input); - let pnames = params.iter().map(|v| &v.0); - let pnames2 = params.iter().map(|v| &v.0); - let ptypes = params.iter().map(|v| &v.1); - let pborrow = params.iter().map(|v| &v.2); - - Ok( - quote!( - #( - let #pnames : #ptypes = match #c_iter::Decode::decode(&mut #input) { - Ok(input) => input, - Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e.what()), - }; - )* - - #[allow(deprecated)] - <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*) - ) - ) + let params = + extract_parameter_names_types_and_borrows(signature, AllowSelfRefInParameters::No)?; + + let c = generate_crate_access(HIDDEN_INCLUDES_ID); + let c_iter = iter::repeat(&c); + let fn_name = &signature.ident; + let fn_name_str = iter::repeat(fn_name.to_string()); + let input = iter::repeat(input); + let pnames = params.iter().map(|v| &v.0); + let pnames2 = params.iter().map(|v| &v.0); + let ptypes = params.iter().map(|v| &v.1); + let pborrow = params.iter().map(|v| &v.2); + + Ok(quote!( + #( + let #pnames : #ptypes = match #c_iter::Decode::decode(&mut #input) { + Ok(input) => input, + Err(e) => panic!("Bad input data provided to {}: {}", #fn_name_str, e.what()), + }; + )* + + #[allow(deprecated)] + <#runtime as #impl_trait>::#fn_name(#( #pborrow #pnames2 ),*) + )) } /// Generate all the implementation calls for the given functions. fn generate_impl_calls( - impls: &[ItemImpl], - input: &Ident + impls: &[ItemImpl], + input: &Ident, ) -> Result)>> { - let mut impl_calls = Vec::new(); - - for impl_ in impls { - let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; - let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); - let impl_trait_ident = &impl_trait_path - .segments - .last() - .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? - .ident; - - for item in &impl_.items { - if let ImplItem::Method(method) = item { - let impl_call = generate_impl_call( - &method.sig, - &impl_.self_ty, - input, - &impl_trait - )?; - - impl_calls.push(( - impl_trait_ident.clone(), - method.sig.ident.clone(), - impl_call, - filter_cfg_attrs(&impl_.attrs), - )); - } - } - } - - Ok(impl_calls) + let mut impl_calls = Vec::new(); + + for impl_ in impls { + let impl_trait_path = extract_impl_trait(impl_, RequireQualifiedTraitPath::Yes)?; + let impl_trait = extend_with_runtime_decl_path(impl_trait_path.clone()); + let impl_trait_ident = &impl_trait_path + .segments + .last() + .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? + .ident; + + for item in &impl_.items { + if let ImplItem::Method(method) = item { + let impl_call = + generate_impl_call(&method.sig, &impl_.self_ty, input, &impl_trait)?; + + impl_calls.push(( + impl_trait_ident.clone(), + method.sig.ident.clone(), + impl_call, + filter_cfg_attrs(&impl_.attrs), + )); + } + } + } + + Ok(impl_calls) } /// Generate the dispatch function that is used in native to call into the runtime. fn generate_dispatch_function(impls: &[ItemImpl]) -> Result { - let data = Ident::new("data", Span::call_site()); - let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let impl_calls = generate_impl_calls(impls, &data)? - .into_iter() - .map(|(trait_, fn_name, impl_, attrs)| { - let name = prefix_function_with_trait(&trait_, &fn_name); - quote!( - #( #attrs )* - #name => Some(#c::Encode::encode(&{ #impl_ })), - ) - }); - - Ok(quote!( - #[cfg(feature = "std")] - pub fn dispatch(method: &str, mut #data: &[u8]) -> Option> { - match method { - #( #impl_calls )* - _ => None, - } - } - )) + let data = Ident::new("data", Span::call_site()); + let c = generate_crate_access(HIDDEN_INCLUDES_ID); + let impl_calls = + generate_impl_calls(impls, &data)? + .into_iter() + .map(|(trait_, fn_name, impl_, attrs)| { + let name = prefix_function_with_trait(&trait_, &fn_name); + quote!( + #( #attrs )* + #name => Some(#c::Encode::encode(&{ #impl_ })), + ) + }); + + Ok(quote!( + #[cfg(feature = "std")] + pub fn dispatch(method: &str, mut #data: &[u8]) -> Option> { + match method { + #( #impl_calls )* + _ => None, + } + } + )) } /// Generate the interface functions that are used to call into the runtime in wasm. fn generate_wasm_interface(impls: &[ItemImpl]) -> Result { - let input = Ident::new("input", Span::call_site()); - let c = generate_crate_access(HIDDEN_INCLUDES_ID); - let impl_calls = generate_impl_calls(impls, &input)? - .into_iter() - .map(|(trait_, fn_name, impl_, attrs)| { - let fn_name = Ident::new( - &prefix_function_with_trait(&trait_, &fn_name), - Span::call_site() - ); - - quote!( - #( #attrs )* - #[cfg(not(feature = "std"))] - #[no_mangle] - pub fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { - let mut #input = if input_len == 0 { - &[0u8; 0] - } else { - unsafe { - #c::slice::from_raw_parts(input_data, input_len) - } - }; - - let output = { #impl_ }; - #c::to_substrate_wasm_fn_return_value(&output) - } - ) - }); - - Ok(quote!( #( #impl_calls )* )) + let input = Ident::new("input", Span::call_site()); + let c = generate_crate_access(HIDDEN_INCLUDES_ID); + let impl_calls = + generate_impl_calls(impls, &input)? + .into_iter() + .map(|(trait_, fn_name, impl_, attrs)| { + let fn_name = Ident::new( + &prefix_function_with_trait(&trait_, &fn_name), + Span::call_site(), + ); + + quote!( + #( #attrs )* + #[cfg(not(feature = "std"))] + #[no_mangle] + pub fn #fn_name(input_data: *mut u8, input_len: usize) -> u64 { + let mut #input = if input_len == 0 { + &[0u8; 0] + } else { + unsafe { + #c::slice::from_raw_parts(input_data, input_len) + } + }; + + let output = { #impl_ }; + #c::to_substrate_wasm_fn_return_value(&output) + } + ) + }); + + Ok(quote!( #( #impl_calls )* )) } fn generate_runtime_api_base_structures() -> Result { - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - Ok(quote!( - pub struct RuntimeApi {} - /// Implements all runtime apis for the client side. - #[cfg(any(feature = "std", test))] - pub struct RuntimeApiImpl + 'static> - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { - call: &'static C, - commit_on_success: std::cell::RefCell, - initialized_block: std::cell::RefCell>>, - changes: std::cell::RefCell<#crate_::OverlayedChanges>, - storage_transaction_cache: std::cell::RefCell< - #crate_::StorageTransactionCache - >, - recorder: Option<#crate_::ProofRecorder>, - } - - // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a - // `ApiRef` object and `ApiRef` also has an associated lifetime. This lifetimes makes it - // impossible to move `RuntimeApi` into another thread. - #[cfg(any(feature = "std", test))] - unsafe impl> Send - for RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - {} - - #[cfg(any(feature = "std", test))] - unsafe impl> Sync - for RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - {} - - #[cfg(any(feature = "std", test))] - impl> #crate_::ApiErrorExt - for RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { - type Error = C::Error; - } - - #[cfg(any(feature = "std", test))] - impl> #crate_::ApiExt for - RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { - type StateBackend = C::StateBackend; - - fn map_api_result std::result::Result, R, E>( - &self, - map_call: F, - ) -> std::result::Result where Self: Sized { - *self.commit_on_success.borrow_mut() = false; - let res = map_call(self); - *self.commit_on_success.borrow_mut() = true; - - self.commit_on_ok(&res); - - res - } - - fn has_api( - &self, - at: &#crate_::BlockId, - ) -> std::result::Result where Self: Sized { - self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, |v| v == A::VERSION)) - } - - fn has_api_with bool>( - &self, - at: &#crate_::BlockId, - pred: P, - ) -> std::result::Result where Self: Sized { - self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, pred)) - } - - fn record_proof(&mut self) { - self.recorder = Some(Default::default()); - } - - fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { - self.recorder - .take() - .map(|recorder| { - let trie_nodes = recorder.read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - #crate_::StorageProof::new(trie_nodes) - }) - } - - fn into_storage_changes( - &self, - backend: &Self::StateBackend, - changes_trie_state: Option<&#crate_::ChangesTrieState< - #crate_::HashFor, - #crate_::NumberFor, - >>, - parent_hash: Block::Hash, - ) -> std::result::Result< - #crate_::StorageChanges, - String - > where Self: Sized { - self.initialized_block.borrow_mut().take(); - self.changes.replace(Default::default()).into_storage_changes( - backend, - changes_trie_state, - parent_hash, - self.storage_transaction_cache.replace(Default::default()), - ) - } - } - - #[cfg(any(feature = "std", test))] - impl #crate_::ConstructRuntimeApi - for RuntimeApi - where - C: #crate_::CallApiAt + 'static, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { - type RuntimeApi = RuntimeApiImpl; - - fn construct_runtime_api<'a>( - call: &'a C, - ) -> #crate_::ApiRef<'a, Self::RuntimeApi> { - RuntimeApiImpl { - call: unsafe { std::mem::transmute(call) }, - commit_on_success: true.into(), - initialized_block: None.into(), - changes: Default::default(), - recorder: Default::default(), - storage_transaction_cache: Default::default(), - }.into() - } - } - - #[cfg(any(feature = "std", test))] - impl> RuntimeApiImpl - where - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, - { - fn call_api_at< - R: #crate_::Encode + #crate_::Decode + PartialEq, - F: FnOnce( - &C, - &Self, - &std::cell::RefCell<#crate_::OverlayedChanges>, - &std::cell::RefCell<#crate_::StorageTransactionCache>, - &std::cell::RefCell>>, - &Option<#crate_::ProofRecorder>, - ) -> std::result::Result<#crate_::NativeOrEncoded, E>, - E, - >( - &self, - call_api_at: F, - ) -> std::result::Result<#crate_::NativeOrEncoded, E> { - let res = call_api_at( - &self.call, - self, - &self.changes, - &self.storage_transaction_cache, - &self.initialized_block, - &self.recorder, - ); - - self.commit_on_ok(&res); - res - } - - fn commit_on_ok(&self, res: &std::result::Result) { - if *self.commit_on_success.borrow() { - if res.is_err() { - self.changes.borrow_mut().discard_prospective(); - } else { - self.changes.borrow_mut().commit_prospective(); - } - } - } - } - )) + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + Ok(quote!( + pub struct RuntimeApi {} + /// Implements all runtime apis for the client side. + #[cfg(any(feature = "std", test))] + pub struct RuntimeApiImpl + 'static> + where + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, + { + call: &'static C, + commit_on_success: std::cell::RefCell, + initialized_block: std::cell::RefCell>>, + changes: std::cell::RefCell<#crate_::OverlayedChanges>, + storage_transaction_cache: std::cell::RefCell< + #crate_::StorageTransactionCache + >, + recorder: Option<#crate_::ProofRecorder>, + } + + // `RuntimeApi` itself is not threadsafe. However, an instance is only available in a + // `ApiRef` object and `ApiRef` also has an associated lifetime. This lifetimes makes it + // impossible to move `RuntimeApi` into another thread. + #[cfg(any(feature = "std", test))] + unsafe impl> Send + for RuntimeApiImpl + where + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, + {} + + #[cfg(any(feature = "std", test))] + unsafe impl> Sync + for RuntimeApiImpl + where + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, + {} + + #[cfg(any(feature = "std", test))] + impl> #crate_::ApiErrorExt + for RuntimeApiImpl + where + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, + { + type Error = C::Error; + } + + #[cfg(any(feature = "std", test))] + impl> #crate_::ApiExt for + RuntimeApiImpl + where + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, + { + type StateBackend = C::StateBackend; + + fn map_api_result std::result::Result, R, E>( + &self, + map_call: F, + ) -> std::result::Result where Self: Sized { + *self.commit_on_success.borrow_mut() = false; + let res = map_call(self); + *self.commit_on_success.borrow_mut() = true; + + self.commit_on_ok(&res); + + res + } + + fn has_api( + &self, + at: &#crate_::BlockId, + ) -> std::result::Result where Self: Sized { + self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, |v| v == A::VERSION)) + } + + fn has_api_with bool>( + &self, + at: &#crate_::BlockId, + pred: P, + ) -> std::result::Result where Self: Sized { + self.call.runtime_version_at(at).map(|v| v.has_api_with(&A::ID, pred)) + } + + fn record_proof(&mut self) { + self.recorder = Some(Default::default()); + } + + fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + self.recorder + .take() + .map(|recorder| { + let trie_nodes = recorder.read() + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + #crate_::StorageProof::new(trie_nodes) + }) + } + + fn into_storage_changes( + &self, + backend: &Self::StateBackend, + changes_trie_state: Option<&#crate_::ChangesTrieState< + #crate_::HashFor, + #crate_::NumberFor, + >>, + parent_hash: Block::Hash, + ) -> std::result::Result< + #crate_::StorageChanges, + String + > where Self: Sized { + self.initialized_block.borrow_mut().take(); + self.changes.replace(Default::default()).into_storage_changes( + backend, + changes_trie_state, + parent_hash, + self.storage_transaction_cache.replace(Default::default()), + ) + } + } + + #[cfg(any(feature = "std", test))] + impl #crate_::ConstructRuntimeApi + for RuntimeApi + where + C: #crate_::CallApiAt + 'static, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, + { + type RuntimeApi = RuntimeApiImpl; + + fn construct_runtime_api<'a>( + call: &'a C, + ) -> #crate_::ApiRef<'a, Self::RuntimeApi> { + RuntimeApiImpl { + call: unsafe { std::mem::transmute(call) }, + commit_on_success: true.into(), + initialized_block: None.into(), + changes: Default::default(), + recorder: Default::default(), + storage_transaction_cache: Default::default(), + }.into() + } + } + + #[cfg(any(feature = "std", test))] + impl> RuntimeApiImpl + where + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + C::StateBackend: #crate_::StateBackend<#crate_::HashFor>, + { + fn call_api_at< + R: #crate_::Encode + #crate_::Decode + PartialEq, + F: FnOnce( + &C, + &Self, + &std::cell::RefCell<#crate_::OverlayedChanges>, + &std::cell::RefCell<#crate_::StorageTransactionCache>, + &std::cell::RefCell>>, + &Option<#crate_::ProofRecorder>, + ) -> std::result::Result<#crate_::NativeOrEncoded, E>, + E, + >( + &self, + call_api_at: F, + ) -> std::result::Result<#crate_::NativeOrEncoded, E> { + let res = call_api_at( + &self.call, + self, + &self.changes, + &self.storage_transaction_cache, + &self.initialized_block, + &self.recorder, + ); + + self.commit_on_ok(&res); + res + } + + fn commit_on_ok(&self, res: &std::result::Result) { + if *self.commit_on_success.borrow() { + if res.is_err() { + self.changes.borrow_mut().discard_prospective(); + } else { + self.changes.borrow_mut().commit_prospective(); + } + } + } + } + )) } /// Extend the given trait path with module that contains the declaration of the trait for the /// runtime. fn extend_with_runtime_decl_path(mut trait_: Path) -> Path { - let runtime = { - let trait_name = &trait_ - .segments - .last() - .as_ref() - .expect("Trait path should always contain at least one item; qed") - .ident; - - generate_runtime_mod_name_for_trait(trait_name) - }; - - let pos = trait_.segments.len() - 1; - trait_.segments.insert(pos, runtime.clone().into()); - trait_ + let runtime = { + let trait_name = &trait_ + .segments + .last() + .as_ref() + .expect("Trait path should always contain at least one item; qed") + .ident; + + generate_runtime_mod_name_for_trait(trait_name) + }; + + let pos = trait_.segments.len() - 1; + trait_.segments.insert(pos, runtime.clone().into()); + trait_ } /// Generates the implementations of the apis for the runtime. fn generate_api_impl_for_runtime(impls: &[ItemImpl]) -> Result { - let mut impls_prepared = Vec::new(); + let mut impls_prepared = Vec::new(); - // We put `runtime` before each trait to get the trait that is intended for the runtime and - // we put the `RuntimeBlock` as first argument for the trait generics. - for impl_ in impls.iter() { - let mut impl_ = impl_.clone(); - let trait_ = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(); - let trait_ = extend_with_runtime_decl_path(trait_); + // We put `runtime` before each trait to get the trait that is intended for the runtime and + // we put the `RuntimeBlock` as first argument for the trait generics. + for impl_ in impls.iter() { + let mut impl_ = impl_.clone(); + let trait_ = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(); + let trait_ = extend_with_runtime_decl_path(trait_); - impl_.trait_.as_mut().unwrap().1 = trait_; - impl_.attrs = filter_cfg_attrs(&impl_.attrs); - impls_prepared.push(impl_); - } + impl_.trait_.as_mut().unwrap().1 = trait_; + impl_.attrs = filter_cfg_attrs(&impl_.attrs); + impls_prepared.push(impl_); + } - Ok(quote!( #( #impls_prepared )* )) + Ok(quote!( #( #impls_prepared )* )) } - /// Auxiliary data structure that is used to convert `impl Api for Runtime` to /// `impl Api for RuntimeApi`. /// This requires us to replace the runtime `Block` with the node `Block`, /// `impl Api for Runtime` with `impl Api for RuntimeApi` and replace the method implementations /// with code that calls into the runtime. struct ApiRuntimeImplToApiRuntimeApiImpl<'a> { - runtime_block: &'a TypePath, - runtime_mod_path: &'a Path, - runtime_type: &'a Type, - trait_generic_arguments: &'a [GenericArgument], - impl_trait: &'a Ident, + runtime_block: &'a TypePath, + runtime_mod_path: &'a Path, + runtime_type: &'a Type, + trait_generic_arguments: &'a [GenericArgument], + impl_trait: &'a Ident, } impl<'a> Fold for ApiRuntimeImplToApiRuntimeApiImpl<'a> { - fn fold_type_path(&mut self, input: TypePath) -> TypePath { - let new_ty_path = if input == *self.runtime_block { - parse_quote!( __SR_API_BLOCK__ ) - } else { - input - }; - - fold::fold_type_path(self, new_ty_path) - } - - fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { - let block = { - let runtime_mod_path = self.runtime_mod_path; - let runtime = self.runtime_type; - let native_call_generator_ident = - generate_native_call_generator_fn_name(&input.sig.ident); - let call_api_at_call = generate_call_api_at_fn_name(&input.sig.ident); - let trait_generic_arguments = self.trait_generic_arguments; - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Generate the access to the native parameters - let param_tuple_access = if input.sig.inputs.len() == 1 { - vec![ quote!( p ) ] - } else { - input.sig.inputs.iter().enumerate().map(|(i, _)| { - let i = syn::Index::from(i); - quote!( p.#i ) - }).collect::>() - }; - - let (param_types, error) = match extract_parameter_names_types_and_borrows( - &input.sig, - AllowSelfRefInParameters::No, - ) { - Ok(res) => ( - res.into_iter().map(|v| { - let ty = v.1; - let borrow = v.2; - quote!( #borrow #ty ) - }).collect::>(), - None - ), - Err(e) => (Vec::new(), Some(e.to_compile_error())), - }; - - // Rewrite the input parameters. - input.sig.inputs = parse_quote! { - &self, - at: &#crate_::BlockId<__SR_API_BLOCK__>, - context: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - params_encoded: Vec, - }; - - input.sig.ident = generate_method_runtime_api_impl_name( - &self.impl_trait, - &input.sig.ident, - ); - let ret_type = return_type_extract_type(&input.sig.output); - - // Generate the correct return type. - input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, RuntimeApiImplCall::Error> - ); - - // Generate the new method implementation that calls into the runtime. - parse_quote!( - { - // Get the error to the user (if we have one). - #error - - self.call_api_at( - | - call_runtime_at, - core_api, - changes, - storage_transaction_cache, - initialized_block, - recorder - | { - #runtime_mod_path #call_api_at_call( - call_runtime_at, - core_api, - at, - params_encoded, - changes, - storage_transaction_cache, - initialized_block, - params.map(|p| { - #runtime_mod_path #native_call_generator_ident :: - <#runtime, __SR_API_BLOCK__ #(, #trait_generic_arguments )*> ( - #( #param_tuple_access ),* - ) - }), - context, - recorder, - ) - } - ) - } - ) - }; - - let mut input = fold::fold_impl_item_method(self, input); - // We need to set the block, after we modified the rest of the ast, otherwise we would - // modify our generated block as well. - input.block = block; - input - } - - fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { - // All this `UnwindSafe` magic below here is required for this rust bug: - // https://github.com/rust-lang/rust/issues/24159 - // Before we directly had the final block type and rust could determine that it is unwind - // safe, but now we just have a generic parameter `Block`. - - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - // Implement the trait for the `RuntimeApiImpl` - input.self_ty = Box::new( - parse_quote!( RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall> ) - ); - - input.generics.params.push( - parse_quote!( - __SR_API_BLOCK__: #crate_::BlockT + std::panic::UnwindSafe + - std::panic::RefUnwindSafe - ) - ); - input.generics.params.push( - parse_quote!( RuntimeApiImplCall: #crate_::CallApiAt<__SR_API_BLOCK__> + 'static ) - ); - - let where_clause = input.generics.make_where_clause(); - - where_clause.predicates.push( - parse_quote! { - RuntimeApiImplCall::StateBackend: - #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> - } - ); - - // Require that all types used in the function signatures are unwind safe. - extract_all_signature_types(&input.items).iter().for_each(|i| { - where_clause.predicates.push( - parse_quote! { - #i: std::panic::UnwindSafe + std::panic::RefUnwindSafe - } - ); - }); - - where_clause.predicates.push( - parse_quote! { - __SR_API_BLOCK__::Header: std::panic::UnwindSafe + std::panic::RefUnwindSafe - } - ); - - input.attrs = filter_cfg_attrs(&input.attrs); - - // The implementation for the `RuntimeApiImpl` is only required when compiling with - // the feature `std` or `test`. - input.attrs.push(parse_quote!( #[cfg(any(feature = "std", test))] )); - - fold::fold_item_impl(self, input) - } + fn fold_type_path(&mut self, input: TypePath) -> TypePath { + let new_ty_path = if input == *self.runtime_block { + parse_quote!(__SR_API_BLOCK__) + } else { + input + }; + + fold::fold_type_path(self, new_ty_path) + } + + fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { + let block = { + let runtime_mod_path = self.runtime_mod_path; + let runtime = self.runtime_type; + let native_call_generator_ident = + generate_native_call_generator_fn_name(&input.sig.ident); + let call_api_at_call = generate_call_api_at_fn_name(&input.sig.ident); + let trait_generic_arguments = self.trait_generic_arguments; + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // Generate the access to the native parameters + let param_tuple_access = if input.sig.inputs.len() == 1 { + vec![quote!(p)] + } else { + input + .sig + .inputs + .iter() + .enumerate() + .map(|(i, _)| { + let i = syn::Index::from(i); + quote!( p.#i ) + }) + .collect::>() + }; + + let (param_types, error) = match extract_parameter_names_types_and_borrows( + &input.sig, + AllowSelfRefInParameters::No, + ) { + Ok(res) => ( + res.into_iter() + .map(|v| { + let ty = v.1; + let borrow = v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + None, + ), + Err(e) => (Vec::new(), Some(e.to_compile_error())), + }; + + // Rewrite the input parameters. + input.sig.inputs = parse_quote! { + &self, + at: &#crate_::BlockId<__SR_API_BLOCK__>, + context: #crate_::ExecutionContext, + params: Option<( #( #param_types ),* )>, + params_encoded: Vec, + }; + + input.sig.ident = + generate_method_runtime_api_impl_name(&self.impl_trait, &input.sig.ident); + let ret_type = return_type_extract_type(&input.sig.output); + + // Generate the correct return type. + input.sig.output = parse_quote!( + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, RuntimeApiImplCall::Error> + ); + + // Generate the new method implementation that calls into the runtime. + parse_quote!( + { + // Get the error to the user (if we have one). + #error + + self.call_api_at( + | + call_runtime_at, + core_api, + changes, + storage_transaction_cache, + initialized_block, + recorder + | { + #runtime_mod_path #call_api_at_call( + call_runtime_at, + core_api, + at, + params_encoded, + changes, + storage_transaction_cache, + initialized_block, + params.map(|p| { + #runtime_mod_path #native_call_generator_ident :: + <#runtime, __SR_API_BLOCK__ #(, #trait_generic_arguments )*> ( + #( #param_tuple_access ),* + ) + }), + context, + recorder, + ) + } + ) + } + ) + }; + + let mut input = fold::fold_impl_item_method(self, input); + // We need to set the block, after we modified the rest of the ast, otherwise we would + // modify our generated block as well. + input.block = block; + input + } + + fn fold_item_impl(&mut self, mut input: ItemImpl) -> ItemImpl { + // All this `UnwindSafe` magic below here is required for this rust bug: + // https://github.com/rust-lang/rust/issues/24159 + // Before we directly had the final block type and rust could determine that it is unwind + // safe, but now we just have a generic parameter `Block`. + + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + // Implement the trait for the `RuntimeApiImpl` + input.self_ty = + Box::new(parse_quote!( RuntimeApiImpl<__SR_API_BLOCK__, RuntimeApiImplCall> )); + + input.generics.params.push(parse_quote!( + __SR_API_BLOCK__: #crate_::BlockT + std::panic::UnwindSafe + + std::panic::RefUnwindSafe + )); + input.generics.params.push( + parse_quote!( RuntimeApiImplCall: #crate_::CallApiAt<__SR_API_BLOCK__> + 'static ), + ); + + let where_clause = input.generics.make_where_clause(); + + where_clause.predicates.push(parse_quote! { + RuntimeApiImplCall::StateBackend: + #crate_::StateBackend<#crate_::HashFor<__SR_API_BLOCK__>> + }); + + // Require that all types used in the function signatures are unwind safe. + extract_all_signature_types(&input.items) + .iter() + .for_each(|i| { + where_clause.predicates.push(parse_quote! { + #i: std::panic::UnwindSafe + std::panic::RefUnwindSafe + }); + }); + + where_clause.predicates.push(parse_quote! { + __SR_API_BLOCK__::Header: std::panic::UnwindSafe + std::panic::RefUnwindSafe + }); + + input.attrs = filter_cfg_attrs(&input.attrs); + + // The implementation for the `RuntimeApiImpl` is only required when compiling with + // the feature `std` or `test`. + input + .attrs + .push(parse_quote!( #[cfg(any(feature = "std", test))] )); + + fold::fold_item_impl(self, input) + } } /// Generate the implementations of the runtime apis for the `RuntimeApi` type. fn generate_api_impl_for_runtime_api(impls: &[ItemImpl]) -> Result { - let mut result = Vec::with_capacity(impls.len()); - - for impl_ in impls { - let impl_trait_path = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?; - let impl_trait = &impl_trait_path - .segments - .last() - .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? - .clone(); - let runtime_block = extract_block_type_from_trait_path(impl_trait_path)?; - let runtime_type = &impl_.self_ty; - let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); - // remove the trait to get just the module path - runtime_mod_path.segments.pop(); - - let trait_generic_arguments = match impl_trait.arguments { - PathArguments::Parenthesized(_) | PathArguments::None => vec![], - PathArguments::AngleBracketed(ref b) => b.args.iter().cloned().collect(), - }; - - let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { - runtime_block, - runtime_mod_path: &runtime_mod_path, - runtime_type: &*runtime_type, - trait_generic_arguments: &trait_generic_arguments, - impl_trait: &impl_trait.ident, - }; - - result.push(visitor.fold_item_impl(impl_.clone())); - } - Ok(quote!( #( #result )* )) + let mut result = Vec::with_capacity(impls.len()); + + for impl_ in impls { + let impl_trait_path = extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?; + let impl_trait = &impl_trait_path + .segments + .last() + .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? + .clone(); + let runtime_block = extract_block_type_from_trait_path(impl_trait_path)?; + let runtime_type = &impl_.self_ty; + let mut runtime_mod_path = extend_with_runtime_decl_path(impl_trait_path.clone()); + // remove the trait to get just the module path + runtime_mod_path.segments.pop(); + + let trait_generic_arguments = match impl_trait.arguments { + PathArguments::Parenthesized(_) | PathArguments::None => vec![], + PathArguments::AngleBracketed(ref b) => b.args.iter().cloned().collect(), + }; + + let mut visitor = ApiRuntimeImplToApiRuntimeApiImpl { + runtime_block, + runtime_mod_path: &runtime_mod_path, + runtime_type: &*runtime_type, + trait_generic_arguments: &trait_generic_arguments, + impl_trait: &impl_trait.ident, + }; + + result.push(visitor.fold_item_impl(impl_.clone())); + } + Ok(quote!( #( #result )* )) } /// Generates `RUNTIME_API_VERSIONS` that holds all version information about the implemented /// runtime apis. fn generate_runtime_api_versions(impls: &[ItemImpl]) -> Result { - let mut result = Vec::with_capacity(impls.len()); - let mut processed_traits = HashSet::new(); - - for impl_ in impls { - let mut path = extend_with_runtime_decl_path( - extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(), - ); - // Remove the trait - let trait_ = path - .segments - .pop() - .expect("extract_impl_trait already checks that this is valid; qed") - .into_value() - .ident; - - let span = trait_.span(); - if !processed_traits.insert(trait_) { - return Err( - Error::new( - span, - "Two traits with the same name detected! \ + let mut result = Vec::with_capacity(impls.len()); + let mut processed_traits = HashSet::new(); + + for impl_ in impls { + let mut path = extend_with_runtime_decl_path( + extract_impl_trait(&impl_, RequireQualifiedTraitPath::Yes)?.clone(), + ); + // Remove the trait + let trait_ = path + .segments + .pop() + .expect("extract_impl_trait already checks that this is valid; qed") + .into_value() + .ident; + + let span = trait_.span(); + if !processed_traits.insert(trait_) { + return Err(Error::new( + span, + "Two traits with the same name detected! \ The trait name is used to generate its ID. \ - Please rename one trait at the declaration!" - ) - ) - } - - let id: Path = parse_quote!( #path ID ); - let version: Path = parse_quote!( #path VERSION ); - let attrs = filter_cfg_attrs(&impl_.attrs); - - result.push(quote!( - #( #attrs )* - (#id, #version) - )); - } - - let c = generate_crate_access(HIDDEN_INCLUDES_ID); - - Ok(quote!( - const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); - )) + Please rename one trait at the declaration!", + )); + } + + let id: Path = parse_quote!( #path ID ); + let version: Path = parse_quote!( #path VERSION ); + let attrs = filter_cfg_attrs(&impl_.attrs); + + result.push(quote!( + #( #attrs )* + (#id, #version) + )); + } + + let c = generate_crate_access(HIDDEN_INCLUDES_ID); + + Ok(quote!( + const RUNTIME_API_VERSIONS: #c::ApisVec = #c::create_apis_vec!([ #( #result ),* ]); + )) } /// The implementation of the `impl_runtime_apis!` macro. pub fn impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - // Parse all impl blocks - let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); + // Parse all impl blocks + let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); - impl_runtime_apis_impl_inner(&api_impls).unwrap_or_else(|e| e.to_compile_error()).into() + impl_runtime_apis_impl_inner(&api_impls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { - let dispatch_impl = generate_dispatch_function(api_impls)?; - let api_impls_for_runtime = generate_api_impl_for_runtime(api_impls)?; - let base_runtime_api = generate_runtime_api_base_structures()?; - let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); - let runtime_api_versions = generate_runtime_api_versions(api_impls)?; - let wasm_interface = generate_wasm_interface(api_impls)?; - let api_impls_for_runtime_api = generate_api_impl_for_runtime_api(api_impls)?; + let dispatch_impl = generate_dispatch_function(api_impls)?; + let api_impls_for_runtime = generate_api_impl_for_runtime(api_impls)?; + let base_runtime_api = generate_runtime_api_base_structures()?; + let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); + let runtime_api_versions = generate_runtime_api_versions(api_impls)?; + let wasm_interface = generate_wasm_interface(api_impls)?; + let api_impls_for_runtime_api = generate_api_impl_for_runtime_api(api_impls)?; - Ok( - quote!( - #hidden_includes + Ok(quote!( + #hidden_includes - #base_runtime_api + #base_runtime_api - #api_impls_for_runtime + #api_impls_for_runtime - #api_impls_for_runtime_api + #api_impls_for_runtime_api - #runtime_api_versions + #runtime_api_versions - pub mod api { - use super::*; + pub mod api { + use super::*; - #dispatch_impl + #dispatch_impl - #wasm_interface - } - ) - ) + #wasm_interface + } + )) } // Filters all attributes except the cfg ones. fn filter_cfg_attrs(attrs: &[Attribute]) -> Vec { - attrs.into_iter().filter(|a| a.path.is_ident("cfg")).cloned().collect() + attrs + .into_iter() + .filter(|a| a.path.is_ident("cfg")) + .cloned() + .collect() } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn filter_non_cfg_attributes() { - let cfg_std: Attribute = parse_quote!(#[cfg(feature = "std")]); - let cfg_benchmarks: Attribute = parse_quote!(#[cfg(feature = "runtime-benchmarks")]); - - let attrs = vec![ - cfg_std.clone(), - parse_quote!(#[derive(Debug)]), - parse_quote!(#[test]), - cfg_benchmarks.clone(), - parse_quote!(#[allow(non_camel_case_types)]), - ]; - - let filtered = filter_cfg_attrs(&attrs); - assert_eq!(filtered.len(), 2); - assert_eq!(cfg_std, filtered[0]); - assert_eq!(cfg_benchmarks, filtered[1]); - } + use super::*; + + #[test] + fn filter_non_cfg_attributes() { + let cfg_std: Attribute = parse_quote!(#[cfg(feature = "std")]); + let cfg_benchmarks: Attribute = parse_quote!(#[cfg(feature = "runtime-benchmarks")]); + + let attrs = vec![ + cfg_std.clone(), + parse_quote!(#[derive(Debug)]), + parse_quote!(#[test]), + cfg_benchmarks.clone(), + parse_quote!(#[allow(non_camel_case_types)]), + ]; + + let filtered = filter_cfg_attrs(&attrs); + assert_eq!(filtered.len(), 2); + assert_eq!(cfg_std, filtered[0]); + assert_eq!(cfg_benchmarks, filtered[1]); + } } diff --git a/primitives/api/proc-macro/src/lib.rs b/primitives/api/proc-macro/src/lib.rs index 12f435bd16..0dc9a9044c 100644 --- a/primitives/api/proc-macro/src/lib.rs +++ b/primitives/api/proc-macro/src/lib.rs @@ -20,22 +20,22 @@ use proc_macro::TokenStream; +mod decl_runtime_apis; mod impl_runtime_apis; mod mock_impl_runtime_apis; -mod decl_runtime_apis; mod utils; #[proc_macro] pub fn impl_runtime_apis(input: TokenStream) -> TokenStream { - impl_runtime_apis::impl_runtime_apis_impl(input) + impl_runtime_apis::impl_runtime_apis_impl(input) } #[proc_macro] pub fn mock_impl_runtime_apis(input: TokenStream) -> TokenStream { - mock_impl_runtime_apis::mock_impl_runtime_apis_impl(input) + mock_impl_runtime_apis::mock_impl_runtime_apis_impl(input) } #[proc_macro] pub fn decl_runtime_apis(input: TokenStream) -> TokenStream { - decl_runtime_apis::decl_runtime_apis_impl(input) + decl_runtime_apis::decl_runtime_apis_impl(input) } diff --git a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs index 0767c804a6..f3a0917507 100644 --- a/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs +++ b/primitives/api/proc-macro/src/mock_impl_runtime_apis.rs @@ -15,10 +15,10 @@ // along with Substrate. If not, see . use crate::utils::{ - generate_crate_access, generate_hidden_includes, - generate_method_runtime_api_impl_name, extract_parameter_names_types_and_borrows, - return_type_extract_type, extract_block_type_from_trait_path, extract_impl_trait, - AllowSelfRefInParameters, RequireQualifiedTraitPath, + extract_block_type_from_trait_path, extract_impl_trait, + extract_parameter_names_types_and_borrows, generate_crate_access, generate_hidden_includes, + generate_method_runtime_api_impl_name, return_type_extract_type, AllowSelfRefInParameters, + RequireQualifiedTraitPath, }; use proc_macro2::{Span, TokenStream}; @@ -26,8 +26,11 @@ use proc_macro2::{Span, TokenStream}; use quote::quote; use syn::{ - spanned::Spanned, parse_macro_input, Ident, Type, ItemImpl, ImplItem, TypePath, parse_quote, - parse::{Parse, ParseStream, Result, Error}, fold::{self, Fold}, + fold::{self, Fold}, + parse::{Error, Parse, ParseStream, Result}, + parse_macro_input, parse_quote, + spanned::Spanned, + Ident, ImplItem, ItemImpl, Type, TypePath, }; /// Unique identifier used to make the hidden includes unique for this macro. @@ -35,245 +38,250 @@ const HIDDEN_INCLUDES_ID: &str = "MOCK_IMPL_RUNTIME_APIS"; /// The structure used for parsing the runtime api implementations. struct RuntimeApiImpls { - impls: Vec, + impls: Vec, } impl Parse for RuntimeApiImpls { - fn parse(input: ParseStream) -> Result { - let mut impls = Vec::new(); - - while !input.is_empty() { - impls.push(ItemImpl::parse(input)?); - } - - if impls.is_empty() { - Err(Error::new(Span::call_site(), "No api implementation given!")) - } else { - Ok(Self { impls }) - } - } + fn parse(input: ParseStream) -> Result { + let mut impls = Vec::new(); + + while !input.is_empty() { + impls.push(ItemImpl::parse(input)?); + } + + if impls.is_empty() { + Err(Error::new( + Span::call_site(), + "No api implementation given!", + )) + } else { + Ok(Self { impls }) + } + } } /// Implement the `ApiExt` trait, `ApiErrorExt` trait and the `Core` runtime api. fn implement_common_api_traits( - error_type: Option, - block_type: TypePath, - self_ty: Type, + error_type: Option, + block_type: TypePath, + self_ty: Type, ) -> Result { - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - let error_type = error_type.map(|e| quote!(#e)).unwrap_or_else(|| quote!(String)); - - Ok(quote!( - impl #crate_::ApiErrorExt for #self_ty { - type Error = #error_type; - } - - impl #crate_::ApiExt<#block_type> for #self_ty { - type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; - - fn map_api_result std::result::Result, R, E>( - &self, - map_call: F, - ) -> std::result::Result where Self: Sized { - map_call(self) - } - - fn has_api( - &self, - _: &#crate_::BlockId<#block_type>, - ) -> std::result::Result where Self: Sized { - Ok(true) - } - - fn has_api_with bool>( - &self, - at: &#crate_::BlockId<#block_type>, - pred: P, - ) -> std::result::Result where Self: Sized { - Ok(pred(A::VERSION)) - } - - fn record_proof(&mut self) { - unimplemented!("`record_proof` not implemented for runtime api mocks") - } - - fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { - unimplemented!("`extract_proof` not implemented for runtime api mocks") - } - - fn into_storage_changes( - &self, - _: &Self::StateBackend, - _: Option<&#crate_::ChangesTrieState< - #crate_::HashFor<#block_type>, - #crate_::NumberFor<#block_type>, - >>, - _: <#block_type as #crate_::BlockT>::Hash, - ) -> std::result::Result< - #crate_::StorageChanges, - String - > where Self: Sized { - unimplemented!("`into_storage_changes` not implemented for runtime api mocks") - } - } - - impl #crate_::Core<#block_type> for #self_ty { - fn Core_version_runtime_api_impl( - &self, - _: &#crate_::BlockId<#block_type>, - _: #crate_::ExecutionContext, - _: Option<()>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #error_type> { - unimplemented!("Not required for testing!") - } - - fn Core_execute_block_runtime_api_impl( - &self, - _: &#crate_::BlockId<#block_type>, - _: #crate_::ExecutionContext, - _: Option<#block_type>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { - unimplemented!("Not required for testing!") - } - - fn Core_initialize_block_runtime_api_impl( - &self, - _: &#crate_::BlockId<#block_type>, - _: #crate_::ExecutionContext, - _: Option<&<#block_type as #crate_::BlockT>::Header>, - _: Vec, - ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { - unimplemented!("Not required for testing!") - } - } - )) + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + let error_type = error_type + .map(|e| quote!(#e)) + .unwrap_or_else(|| quote!(String)); + + Ok(quote!( + impl #crate_::ApiErrorExt for #self_ty { + type Error = #error_type; + } + + impl #crate_::ApiExt<#block_type> for #self_ty { + type StateBackend = #crate_::InMemoryBackend<#crate_::HashFor<#block_type>>; + + fn map_api_result std::result::Result, R, E>( + &self, + map_call: F, + ) -> std::result::Result where Self: Sized { + map_call(self) + } + + fn has_api( + &self, + _: &#crate_::BlockId<#block_type>, + ) -> std::result::Result where Self: Sized { + Ok(true) + } + + fn has_api_with bool>( + &self, + at: &#crate_::BlockId<#block_type>, + pred: P, + ) -> std::result::Result where Self: Sized { + Ok(pred(A::VERSION)) + } + + fn record_proof(&mut self) { + unimplemented!("`record_proof` not implemented for runtime api mocks") + } + + fn extract_proof(&mut self) -> Option<#crate_::StorageProof> { + unimplemented!("`extract_proof` not implemented for runtime api mocks") + } + + fn into_storage_changes( + &self, + _: &Self::StateBackend, + _: Option<&#crate_::ChangesTrieState< + #crate_::HashFor<#block_type>, + #crate_::NumberFor<#block_type>, + >>, + _: <#block_type as #crate_::BlockT>::Hash, + ) -> std::result::Result< + #crate_::StorageChanges, + String + > where Self: Sized { + unimplemented!("`into_storage_changes` not implemented for runtime api mocks") + } + } + + impl #crate_::Core<#block_type> for #self_ty { + fn Core_version_runtime_api_impl( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: Option<()>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<#crate_::RuntimeVersion>, #error_type> { + unimplemented!("Not required for testing!") + } + + fn Core_execute_block_runtime_api_impl( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: Option<#block_type>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { + unimplemented!("Not required for testing!") + } + + fn Core_initialize_block_runtime_api_impl( + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + _: Option<&<#block_type as #crate_::BlockT>::Header>, + _: Vec, + ) -> std::result::Result<#crate_::NativeOrEncoded<()>, #error_type> { + unimplemented!("Not required for testing!") + } + } + )) } /// Auxialiry structure to fold a runtime api trait implementation into the expected format. /// /// This renames the methods, changes the method parameters and extracts the error type. struct FoldRuntimeApiImpl<'a> { - /// The block type that is being used. - block_type: &'a TypePath, - /// The identifier of the trait being implemented. - impl_trait: &'a Ident, - /// Stores the error type that is being found in the trait implementation as associated type - /// with the name `Error`. - error_type: &'a mut Option, + /// The block type that is being used. + block_type: &'a TypePath, + /// The identifier of the trait being implemented. + impl_trait: &'a Ident, + /// Stores the error type that is being found in the trait implementation as associated type + /// with the name `Error`. + error_type: &'a mut Option, } impl<'a> Fold for FoldRuntimeApiImpl<'a> { - fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { - let block = { - let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); - - let (param_names, param_types, error) = match extract_parameter_names_types_and_borrows( - &input.sig, - AllowSelfRefInParameters::YesButIgnore, - ) { - Ok(res) => ( - res.iter().map(|v| v.0.clone()).collect::>(), - res.iter().map(|v| { - let ty = &v.1; - let borrow = &v.2; - quote!( #borrow #ty ) - }).collect::>(), - None - ), - Err(e) => (Vec::new(), Vec::new(), Some(e.to_compile_error())), - }; - - let block_type = &self.block_type; - - // Rewrite the input parameters. - input.sig.inputs = parse_quote! { - &self, - _: &#crate_::BlockId<#block_type>, - _: #crate_::ExecutionContext, - params: Option<( #( #param_types ),* )>, - _: Vec, - }; - - input.sig.ident = generate_method_runtime_api_impl_name( - &self.impl_trait, - &input.sig.ident, - ); - let ret_type = return_type_extract_type(&input.sig.output); - - // Generate the correct return type. - input.sig.output = parse_quote!( - -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error> - ); - - let orig_block = input.block.clone(); - - // Generate the new method implementation that calls into the runtime. - parse_quote!( - { - // Get the error to the user (if we have one). - #error - - let (#( #param_names ),*) = params - .expect("Mocked runtime apis don't support calling deprecated api versions"); - - let __fn_implementation__ = move || #orig_block; - - Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) - } - ) - }; - - let mut input = fold::fold_impl_item_method(self, input); - // We need to set the block, after we modified the rest of the ast, otherwise we would - // modify our generated block as well. - input.block = block; - input - } - - fn fold_impl_item(&mut self, input: ImplItem) -> ImplItem { - match input { - ImplItem::Type(ty) => { - if ty.ident == "Error" { - if let Some(error_type) = self.error_type { - if *error_type != ty.ty { - let error = Error::new( - ty.span(), - "Error type can not change between runtime apis", - ); - ImplItem::Verbatim(error.to_compile_error()) - } else { - ImplItem::Verbatim(Default::default()) - } - } else { - *self.error_type = Some(ty.ty); - ImplItem::Verbatim(Default::default()) - } - } else { - let error = Error::new( - ty.span(), - "Only associated type with name `Error` is allowed", - ); - ImplItem::Verbatim(error.to_compile_error()) - } - }, - o => fold::fold_impl_item(self, o), - } - } + fn fold_impl_item_method(&mut self, mut input: syn::ImplItemMethod) -> syn::ImplItemMethod { + let block = { + let crate_ = generate_crate_access(HIDDEN_INCLUDES_ID); + + let (param_names, param_types, error) = match extract_parameter_names_types_and_borrows( + &input.sig, + AllowSelfRefInParameters::YesButIgnore, + ) { + Ok(res) => ( + res.iter().map(|v| v.0.clone()).collect::>(), + res.iter() + .map(|v| { + let ty = &v.1; + let borrow = &v.2; + quote!( #borrow #ty ) + }) + .collect::>(), + None, + ), + Err(e) => (Vec::new(), Vec::new(), Some(e.to_compile_error())), + }; + + let block_type = &self.block_type; + + // Rewrite the input parameters. + input.sig.inputs = parse_quote! { + &self, + _: &#crate_::BlockId<#block_type>, + _: #crate_::ExecutionContext, + params: Option<( #( #param_types ),* )>, + _: Vec, + }; + + input.sig.ident = + generate_method_runtime_api_impl_name(&self.impl_trait, &input.sig.ident); + let ret_type = return_type_extract_type(&input.sig.output); + + // Generate the correct return type. + input.sig.output = parse_quote!( + -> std::result::Result<#crate_::NativeOrEncoded<#ret_type>, Self::Error> + ); + + let orig_block = input.block.clone(); + + // Generate the new method implementation that calls into the runtime. + parse_quote!( + { + // Get the error to the user (if we have one). + #error + + let (#( #param_names ),*) = params + .expect("Mocked runtime apis don't support calling deprecated api versions"); + + let __fn_implementation__ = move || #orig_block; + + Ok(#crate_::NativeOrEncoded::Native(__fn_implementation__())) + } + ) + }; + + let mut input = fold::fold_impl_item_method(self, input); + // We need to set the block, after we modified the rest of the ast, otherwise we would + // modify our generated block as well. + input.block = block; + input + } + + fn fold_impl_item(&mut self, input: ImplItem) -> ImplItem { + match input { + ImplItem::Type(ty) => { + if ty.ident == "Error" { + if let Some(error_type) = self.error_type { + if *error_type != ty.ty { + let error = Error::new( + ty.span(), + "Error type can not change between runtime apis", + ); + ImplItem::Verbatim(error.to_compile_error()) + } else { + ImplItem::Verbatim(Default::default()) + } + } else { + *self.error_type = Some(ty.ty); + ImplItem::Verbatim(Default::default()) + } + } else { + let error = Error::new( + ty.span(), + "Only associated type with name `Error` is allowed", + ); + ImplItem::Verbatim(error.to_compile_error()) + } + } + o => fold::fold_impl_item(self, o), + } + } } /// Result of [`generate_runtime_api_impls`]. struct GeneratedRuntimeApiImpls { - /// All the runtime api implementations. - impls: TokenStream, - /// The error type that should be used by the runtime apis. - error_type: Option, - /// The block type that is being used by the runtime apis. - block_type: TypePath, - /// The type the traits are implemented for. - self_ty: Type, + /// All the runtime api implementations. + impls: TokenStream, + /// The error type that should be used by the runtime apis. + error_type: Option, + /// The block type that is being used by the runtime apis. + block_type: TypePath, + /// The type the traits are implemented for. + self_ty: Type, } /// Generate the runtime api implementations from the given trait implementations. @@ -281,98 +289,101 @@ struct GeneratedRuntimeApiImpls { /// This folds the method names, changes the method parameters, method return type, /// extracts the error type, self type and the block type. fn generate_runtime_api_impls(impls: &[ItemImpl]) -> Result { - let mut result = Vec::with_capacity(impls.len()); - let mut error_type = None; - let mut global_block_type: Option = None; - let mut self_ty: Option> = None; - - for impl_ in impls { - let impl_trait_path = extract_impl_trait(&impl_, RequireQualifiedTraitPath::No)?; - let impl_trait = &impl_trait_path - .segments - .last() - .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? - .clone(); - let block_type = extract_block_type_from_trait_path(impl_trait_path)?; - - self_ty = match self_ty.take() { - Some(self_ty) => { - if self_ty == impl_.self_ty { - Some(self_ty) - } else { - let mut error =Error::new( - impl_.self_ty.span(), - "Self type should not change between runtime apis", - ); - - error.combine(Error::new( - self_ty.span(), - "First self type found here", - )); - - return Err(error) - } - }, - None => Some(impl_.self_ty.clone()), - }; - - global_block_type = match global_block_type.take() { - Some(global_block_type) => { - if global_block_type == *block_type { - Some(global_block_type) - } else { - let mut error = Error::new( - block_type.span(), - "Block type should be the same between all runtime apis.", - ); - - error.combine(Error::new( - global_block_type.span(), - "First block type found here", - )); - - return Err(error) - } - }, - None => Some(block_type.clone()), - }; - - let mut visitor = FoldRuntimeApiImpl { - block_type, - impl_trait: &impl_trait.ident, - error_type: &mut error_type, - }; - - result.push(visitor.fold_item_impl(impl_.clone())); - } - - Ok(GeneratedRuntimeApiImpls { - impls: quote!( #( #result )* ), - error_type, - block_type: global_block_type.expect("There is a least one runtime api; qed"), - self_ty: *self_ty.expect("There is at least one runtime api; qed"), - }) + let mut result = Vec::with_capacity(impls.len()); + let mut error_type = None; + let mut global_block_type: Option = None; + let mut self_ty: Option> = None; + + for impl_ in impls { + let impl_trait_path = extract_impl_trait(&impl_, RequireQualifiedTraitPath::No)?; + let impl_trait = &impl_trait_path + .segments + .last() + .ok_or_else(|| Error::new(impl_trait_path.span(), "Empty trait path not possible!"))? + .clone(); + let block_type = extract_block_type_from_trait_path(impl_trait_path)?; + + self_ty = match self_ty.take() { + Some(self_ty) => { + if self_ty == impl_.self_ty { + Some(self_ty) + } else { + let mut error = Error::new( + impl_.self_ty.span(), + "Self type should not change between runtime apis", + ); + + error.combine(Error::new(self_ty.span(), "First self type found here")); + + return Err(error); + } + } + None => Some(impl_.self_ty.clone()), + }; + + global_block_type = match global_block_type.take() { + Some(global_block_type) => { + if global_block_type == *block_type { + Some(global_block_type) + } else { + let mut error = Error::new( + block_type.span(), + "Block type should be the same between all runtime apis.", + ); + + error.combine(Error::new( + global_block_type.span(), + "First block type found here", + )); + + return Err(error); + } + } + None => Some(block_type.clone()), + }; + + let mut visitor = FoldRuntimeApiImpl { + block_type, + impl_trait: &impl_trait.ident, + error_type: &mut error_type, + }; + + result.push(visitor.fold_item_impl(impl_.clone())); + } + + Ok(GeneratedRuntimeApiImpls { + impls: quote!( #( #result )* ), + error_type, + block_type: global_block_type.expect("There is a least one runtime api; qed"), + self_ty: *self_ty.expect("There is at least one runtime api; qed"), + }) } /// The implementation of the `mock_impl_runtime_apis!` macro. pub fn mock_impl_runtime_apis_impl(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - // Parse all impl blocks - let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); + // Parse all impl blocks + let RuntimeApiImpls { impls: api_impls } = parse_macro_input!(input as RuntimeApiImpls); - mock_impl_runtime_apis_impl_inner(&api_impls).unwrap_or_else(|e| e.to_compile_error()).into() + mock_impl_runtime_apis_impl_inner(&api_impls) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } fn mock_impl_runtime_apis_impl_inner(api_impls: &[ItemImpl]) -> Result { - let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); - let GeneratedRuntimeApiImpls { impls, error_type, block_type, self_ty } = - generate_runtime_api_impls(api_impls)?; - let api_traits = implement_common_api_traits(error_type, block_type, self_ty)?; - - Ok(quote!( - #hidden_includes - - #impls - - #api_traits - )) + let hidden_includes = generate_hidden_includes(HIDDEN_INCLUDES_ID); + let GeneratedRuntimeApiImpls { + impls, + error_type, + block_type, + self_ty, + } = generate_runtime_api_impls(api_impls)?; + let api_traits = implement_common_api_traits(error_type, block_type, self_ty)?; + + Ok(quote!( + #hidden_includes + + #impls + + #api_traits + )) } diff --git a/primitives/api/proc-macro/src/utils.rs b/primitives/api/proc-macro/src/utils.rs index 1a79cf6c1e..8a97250898 100644 --- a/primitives/api/proc-macro/src/utils.rs +++ b/primitives/api/proc-macro/src/utils.rs @@ -14,11 +14,11 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::{ - Result, Ident, Signature, parse_quote, Type, Pat, spanned::Spanned, FnArg, Error, token::And, - ImplItem, ReturnType, PathArguments, Path, GenericArgument, TypePath, ItemImpl, + parse_quote, spanned::Spanned, token::And, Error, FnArg, GenericArgument, Ident, ImplItem, + ItemImpl, Pat, Path, PathArguments, Result, ReturnType, Signature, Type, TypePath, }; use quote::quote; @@ -28,246 +28,269 @@ use std::env; use proc_macro_crate::crate_name; fn generate_hidden_includes_mod_name(unique_id: &'static str) -> Ident { - Ident::new(&format!("sp_api_hidden_includes_{}", unique_id), Span::call_site()) + Ident::new( + &format!("sp_api_hidden_includes_{}", unique_id), + Span::call_site(), + ) } /// Generates the hidden includes that are required to make the macro independent from its scope. pub fn generate_hidden_includes(unique_id: &'static str) -> TokenStream { - if env::var("CARGO_PKG_NAME").unwrap() == "sp-api" { - TokenStream::new() - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - match crate_name("sp-api") { - Ok(client_name) => { - let client_name = Ident::new(&client_name, Span::call_site()); - quote!( - #[doc(hidden)] - mod #mod_name { - pub extern crate #client_name as sp_api; - } - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - quote!( #err ) - } - } - - }.into() + if env::var("CARGO_PKG_NAME").unwrap() == "sp-api" { + TokenStream::new() + } else { + let mod_name = generate_hidden_includes_mod_name(unique_id); + match crate_name("sp-api") { + Ok(client_name) => { + let client_name = Ident::new(&client_name, Span::call_site()); + quote!( + #[doc(hidden)] + mod #mod_name { + pub extern crate #client_name as sp_api; + } + ) + } + Err(e) => { + let err = Error::new(Span::call_site(), &e).to_compile_error(); + quote!( #err ) + } + } + } + .into() } /// Generates the access to the `sc_client` crate. pub fn generate_crate_access(unique_id: &'static str) -> TokenStream { - if env::var("CARGO_PKG_NAME").unwrap() == "sp-api" { - quote!( sp_api ) - } else { - let mod_name = generate_hidden_includes_mod_name(unique_id); - quote!( self::#mod_name::sp_api ) - }.into() + if env::var("CARGO_PKG_NAME").unwrap() == "sp-api" { + quote!(sp_api) + } else { + let mod_name = generate_hidden_includes_mod_name(unique_id); + quote!( self::#mod_name::sp_api ) + } + .into() } /// Generates the name of the module that contains the trait declaration for the runtime. pub fn generate_runtime_mod_name_for_trait(trait_: &Ident) -> Ident { - Ident::new(&format!("runtime_decl_for_{}", trait_.to_string()), Span::call_site()) + Ident::new( + &format!("runtime_decl_for_{}", trait_.to_string()), + Span::call_site(), + ) } /// Generates a name for a method that needs to be implemented in the runtime for the client side. pub fn generate_method_runtime_api_impl_name(trait_: &Ident, method: &Ident) -> Ident { - Ident::new(&format!("{}_{}_runtime_api_impl", trait_, method), Span::call_site()) + Ident::new( + &format!("{}_{}_runtime_api_impl", trait_, method), + Span::call_site(), + ) } /// Get the type of a `syn::ReturnType`. pub fn return_type_extract_type(rt: &ReturnType) -> Type { - match rt { - ReturnType::Default => parse_quote!( () ), - ReturnType::Type(_, ref ty) => *ty.clone(), - } + match rt { + ReturnType::Default => parse_quote!(()), + ReturnType::Type(_, ref ty) => *ty.clone(), + } } /// Replace the `_` (wild card) parameter names in the given signature with unique identifiers. pub fn replace_wild_card_parameter_names(input: &mut Signature) { - let mut generated_pattern_counter = 0; - input.inputs.iter_mut().for_each(|arg| if let FnArg::Typed(arg) = arg { - arg.pat = Box::new( - generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter), - ); - }); + let mut generated_pattern_counter = 0; + input.inputs.iter_mut().for_each(|arg| { + if let FnArg::Typed(arg) = arg { + arg.pat = Box::new(generate_unique_pattern( + (*arg.pat).clone(), + &mut generated_pattern_counter, + )); + } + }); } /// Fold the given `Signature` to make it usable on the client side. -pub fn fold_fn_decl_for_client_side( - input: &mut Signature, - block_id: &TokenStream, -) { - replace_wild_card_parameter_names(input); - - // Add `&self, at:& BlockId` as parameters to each function at the beginning. - input.inputs.insert(0, parse_quote!( __runtime_api_at_param__: &#block_id )); - input.inputs.insert(0, parse_quote!( &self )); - - // Wrap the output in a `Result` - input.output = { - let ty = return_type_extract_type(&input.output); - parse_quote!( -> std::result::Result<#ty, Self::Error> ) - }; +pub fn fold_fn_decl_for_client_side(input: &mut Signature, block_id: &TokenStream) { + replace_wild_card_parameter_names(input); + + // Add `&self, at:& BlockId` as parameters to each function at the beginning. + input + .inputs + .insert(0, parse_quote!( __runtime_api_at_param__: &#block_id )); + input.inputs.insert(0, parse_quote!(&self)); + + // Wrap the output in a `Result` + input.output = { + let ty = return_type_extract_type(&input.output); + parse_quote!( -> std::result::Result<#ty, Self::Error> ) + }; } /// Generate an unique pattern based on the given counter, if the given pattern is a `_`. pub fn generate_unique_pattern(pat: Pat, counter: &mut u32) -> Pat { - match pat { - Pat::Wild(_) => { - let generated_name = Ident::new( - &format!("__runtime_api_generated_name_{}__", counter), - pat.span(), - ); - *counter += 1; - - parse_quote!( #generated_name ) - }, - _ => pat, - } + match pat { + Pat::Wild(_) => { + let generated_name = Ident::new( + &format!("__runtime_api_generated_name_{}__", counter), + pat.span(), + ); + *counter += 1; + + parse_quote!( #generated_name ) + } + _ => pat, + } } /// Allow `&self` in parameters of a method. pub enum AllowSelfRefInParameters { - /// Allows `&self` in parameters, but doesn't return it as part of the parameters. - YesButIgnore, - No, + /// Allows `&self` in parameters, but doesn't return it as part of the parameters. + YesButIgnore, + No, } /// Extracts the name, the type and `&` or ``(if it is a reference or not) /// for each parameter in the given function signature. pub fn extract_parameter_names_types_and_borrows( - sig: &Signature, - allow_self: AllowSelfRefInParameters, + sig: &Signature, + allow_self: AllowSelfRefInParameters, ) -> Result)>> { - let mut result = Vec::new(); - let mut generated_pattern_counter = 0; - for input in sig.inputs.iter() { - match input { - FnArg::Typed(arg) => { - let (ty, borrow) = match &*arg.ty { - Type::Reference(t) => { - ((*t.elem).clone(), Some(t.and_token)) - }, - t => { (t.clone(), None) }, - }; - - let name = generate_unique_pattern( - (*arg.pat).clone(), - &mut generated_pattern_counter, - ); - result.push((name, ty, borrow)); - }, - FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => { - return Err(Error::new(input.span(), "`self` parameter not supported!")) - }, - FnArg::Receiver(recv) => { - if recv.mutability.is_some() || recv.reference.is_none() { - return Err(Error::new(recv.span(), "Only `&self` is supported!")) - } - }, - } - } - - Ok(result) + let mut result = Vec::new(); + let mut generated_pattern_counter = 0; + for input in sig.inputs.iter() { + match input { + FnArg::Typed(arg) => { + let (ty, borrow) = match &*arg.ty { + Type::Reference(t) => ((*t.elem).clone(), Some(t.and_token)), + t => (t.clone(), None), + }; + + let name = + generate_unique_pattern((*arg.pat).clone(), &mut generated_pattern_counter); + result.push((name, ty, borrow)); + } + FnArg::Receiver(_) if matches!(allow_self, AllowSelfRefInParameters::No) => { + return Err(Error::new(input.span(), "`self` parameter not supported!")) + } + FnArg::Receiver(recv) => { + if recv.mutability.is_some() || recv.reference.is_none() { + return Err(Error::new(recv.span(), "Only `&self` is supported!")); + } + } + } + } + + Ok(result) } /// Generates the name for the native call generator function. pub fn generate_native_call_generator_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_native_call_generator", fn_name.to_string()), Span::call_site()) + Ident::new( + &format!("{}_native_call_generator", fn_name.to_string()), + Span::call_site(), + ) } /// Generates the name for the call api at function. pub fn generate_call_api_at_fn_name(fn_name: &Ident) -> Ident { - Ident::new(&format!("{}_call_api_at", fn_name.to_string()), Span::call_site()) + Ident::new( + &format!("{}_call_api_at", fn_name.to_string()), + Span::call_site(), + ) } /// Prefix the given function with the trait name. pub fn prefix_function_with_trait(trait_: &Ident, function: &F) -> String { - format!("{}_{}", trait_.to_string(), function.to_string()) + format!("{}_{}", trait_.to_string(), function.to_string()) } /// Extract all types that appear in signatures in the given `ImplItem`'s. /// /// If a type is a reference, the inner type is extracted (without the reference). pub fn extract_all_signature_types(items: &[ImplItem]) -> Vec { - items.iter() - .filter_map(|i| match i { - ImplItem::Method(method) => Some(&method.sig), - _ => None, - }) - .map(|sig| { - let ret_ty = match &sig.output { - ReturnType::Default => None, - ReturnType::Type(_, ty) => Some((**ty).clone()), - }; - - sig.inputs.iter().filter_map(|i| match i { - FnArg::Typed(arg) => Some(&arg.ty), - _ => None, - }).map(|ty| match &**ty { - Type::Reference(t) => (*t.elem).clone(), - _ => (**ty).clone(), - }).chain(ret_ty) - }) - .flatten() - .collect() + items + .iter() + .filter_map(|i| match i { + ImplItem::Method(method) => Some(&method.sig), + _ => None, + }) + .map(|sig| { + let ret_ty = match &sig.output { + ReturnType::Default => None, + ReturnType::Type(_, ty) => Some((**ty).clone()), + }; + + sig.inputs + .iter() + .filter_map(|i| match i { + FnArg::Typed(arg) => Some(&arg.ty), + _ => None, + }) + .map(|ty| match &**ty { + Type::Reference(t) => (*t.elem).clone(), + _ => (**ty).clone(), + }) + .chain(ret_ty) + }) + .flatten() + .collect() } /// Extracts the block type from a trait path. /// /// It is expected that the block type is the first type in the generic arguments. pub fn extract_block_type_from_trait_path(trait_: &Path) -> Result<&TypePath> { - let span = trait_.span(); - let generics = trait_ - .segments - .last() - .ok_or_else(|| Error::new(span, "Empty path not supported"))?; - - match &generics.arguments { - PathArguments::AngleBracketed(ref args) => { - args.args.first().and_then(|v| match v { - GenericArgument::Type(Type::Path(ref block)) => Some(block), - _ => None - }).ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")) - }, - PathArguments::None => { - let span = trait_.segments.last().as_ref().unwrap().span(); - Err(Error::new(span, "Missing `Block` generic parameter.")) - }, - PathArguments::Parenthesized(_) => { - Err(Error::new(generics.arguments.span(), "Unexpected parentheses in path!")) - }, - } + let span = trait_.span(); + let generics = trait_ + .segments + .last() + .ok_or_else(|| Error::new(span, "Empty path not supported"))?; + + match &generics.arguments { + PathArguments::AngleBracketed(ref args) => args + .args + .first() + .and_then(|v| match v { + GenericArgument::Type(Type::Path(ref block)) => Some(block), + _ => None, + }) + .ok_or_else(|| Error::new(args.span(), "Missing `Block` generic parameter.")), + PathArguments::None => { + let span = trait_.segments.last().as_ref().unwrap().span(); + Err(Error::new(span, "Missing `Block` generic parameter.")) + } + PathArguments::Parenthesized(_) => Err(Error::new( + generics.arguments.span(), + "Unexpected parentheses in path!", + )), + } } /// Should a qualified trait path be required? /// /// e.g. `path::Trait` is qualified and `Trait` is not. pub enum RequireQualifiedTraitPath { - Yes, - No, + Yes, + No, } /// Extract the trait that is implemented by the given `ItemImpl`. pub fn extract_impl_trait<'a>( - impl_: &'a ItemImpl, - require: RequireQualifiedTraitPath, + impl_: &'a ItemImpl, + require: RequireQualifiedTraitPath, ) -> Result<&'a Path> { - impl_.trait_.as_ref().map(|v| &v.1).ok_or_else( - || Error::new(impl_.span(), "Only implementation of traits are supported!") - ).and_then(|p| { - if p.segments.len() > 1 || matches!(require, RequireQualifiedTraitPath::No) { - Ok(p) - } else { - Err( - Error::new( - p.span(), - "The implemented trait has to be referenced with a path, \ - e.g. `impl client::Core for Runtime`." - ) - ) - } - }) + impl_ + .trait_ + .as_ref() + .map(|v| &v.1) + .ok_or_else(|| Error::new(impl_.span(), "Only implementation of traits are supported!")) + .and_then(|p| { + if p.segments.len() > 1 || matches!(require, RequireQualifiedTraitPath::No) { + Ok(p) + } else { + Err(Error::new( + p.span(), + "The implemented trait has to be referenced with a path, \ + e.g. `impl client::Core for Runtime`.", + )) + } + }) } diff --git a/primitives/api/src/lib.rs b/primitives/api/src/lib.rs index a3fc15ba7e..01aeeb3f5f 100644 --- a/primitives/api/src/lib.rs +++ b/primitives/api/src/lib.rs @@ -34,13 +34,7 @@ extern crate self as sp_api; #[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_state_machine::{ - OverlayedChanges, StorageProof, Backend as StateBackend, ChangesTrieState, InMemoryBackend, -}; -#[doc(hidden)] -#[cfg(feature = "std")] -pub use sp_core::NativeOrEncoded; +pub use codec::{Decode, Encode}; #[doc(hidden)] #[cfg(feature = "std")] pub use hash_db::Hasher; @@ -48,26 +42,34 @@ pub use hash_db::Hasher; #[cfg(not(feature = "std"))] pub use sp_core::to_substrate_wasm_fn_return_value; #[doc(hidden)] -pub use sp_runtime::{ - traits::{ - Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, HashFor, NumberFor, - Header as HeaderT, Hash as HashT, - }, - generic::BlockId, transaction_validity::TransactionValidity, RuntimeString, -}; +#[cfg(feature = "std")] +pub use sp_core::NativeOrEncoded; +use sp_core::OpaqueMetadata; #[doc(hidden)] pub use sp_core::{offchain, ExecutionContext}; #[doc(hidden)] -pub use sp_version::{ApiId, RuntimeVersion, ApisVec, create_apis_vec}; +pub use sp_runtime::{ + generic::BlockId, + traits::{ + Block as BlockT, GetNodeBlockType, GetRuntimeBlockType, Hash as HashT, HashFor, + Header as HeaderT, NumberFor, + }, + transaction_validity::TransactionValidity, + RuntimeString, +}; #[doc(hidden)] -pub use sp_std::{slice, mem}; +#[cfg(feature = "std")] +pub use sp_state_machine::{ + Backend as StateBackend, ChangesTrieState, InMemoryBackend, OverlayedChanges, StorageProof, +}; #[cfg(feature = "std")] use sp_std::result; #[doc(hidden)] -pub use codec::{Encode, Decode}; -use sp_core::OpaqueMetadata; +pub use sp_std::{mem, slice}; +#[doc(hidden)] +pub use sp_version::{create_apis_vec, ApiId, ApisVec, RuntimeVersion}; #[cfg(feature = "std")] -use std::{panic::UnwindSafe, cell::RefCell}; +use std::{cell::RefCell, panic::UnwindSafe}; /// Declares given traits as runtime apis. /// @@ -301,96 +303,101 @@ pub type ProofRecorder = sp_state_machine::ProofRecorder>; /// A type that is used as cache for the storage transactions. #[cfg(feature = "std")] -pub type StorageTransactionCache = - sp_state_machine::StorageTransactionCache< - >>::Transaction, HashFor, NumberFor - >; +pub type StorageTransactionCache = sp_state_machine::StorageTransactionCache< + >>::Transaction, + HashFor, + NumberFor, +>; #[cfg(feature = "std")] -pub type StorageChanges = - sp_state_machine::StorageChanges< - >>::Transaction, - HashFor, - NumberFor - >; +pub type StorageChanges = sp_state_machine::StorageChanges< + >>::Transaction, + HashFor, + NumberFor, +>; /// Extract the state backend type for a type that implements `ProvideRuntimeApi`. #[cfg(feature = "std")] pub type StateBackendFor = - <

>::Api as ApiExt>::StateBackend; + <

>::Api as ApiExt>::StateBackend; /// Extract the state backend transaction type for a type that implements `ProvideRuntimeApi`. #[cfg(feature = "std")] pub type TransactionFor = - as StateBackend>>::Transaction; + as StateBackend>>::Transaction; /// Something that can be constructed to a runtime api. #[cfg(feature = "std")] pub trait ConstructRuntimeApi> { - /// The actual runtime api that will be constructed. - type RuntimeApi: ApiExt; + /// The actual runtime api that will be constructed. + type RuntimeApi: ApiExt; - /// Construct an instance of the runtime api. - fn construct_runtime_api<'a>(call: &'a C) -> ApiRef<'a, Self::RuntimeApi>; + /// Construct an instance of the runtime api. + fn construct_runtime_api<'a>(call: &'a C) -> ApiRef<'a, Self::RuntimeApi>; } /// Extends the runtime api traits with an associated error type. This trait is given as super /// trait to every runtime api trait. #[cfg(feature = "std")] pub trait ApiErrorExt { - /// Error type used by the runtime apis. - type Error: std::fmt::Debug + From; + /// Error type used by the runtime apis. + type Error: std::fmt::Debug + From; } /// Extends the runtime api implementation with some common functionality. #[cfg(feature = "std")] pub trait ApiExt: ApiErrorExt { - /// The state backend that is used to store the block states. - type StateBackend: StateBackend>; - - /// The given closure will be called with api instance. Inside the closure any api call is - /// allowed. After doing the api call, the closure is allowed to map the `Result` to a - /// different `Result` type. This can be important, as the internal data structure that keeps - /// track of modifications to the storage, discards changes when the `Result` is an `Err`. - /// On `Ok`, the structure commits the changes to an internal buffer. - fn map_api_result result::Result, R, E>( - &self, - map_call: F, - ) -> result::Result where Self: Sized; - - /// Checks if the given api is implemented and versions match. - fn has_api( - &self, - at: &BlockId, - ) -> Result where Self: Sized; - - /// Check if the given api is implemented and the version passes a predicate. - fn has_api_with bool>( - &self, - at: &BlockId, - pred: P, - ) -> Result where Self: Sized; - - /// Start recording all accessed trie nodes for generating proofs. - fn record_proof(&mut self); - - /// Extract the recorded proof. - /// - /// This stops the proof recording. - /// - /// If `record_proof` was not called before, this will return `None`. - fn extract_proof(&mut self) -> Option; - - /// Convert the api object into the storage changes that were done while executing runtime - /// api functions. - /// - /// After executing this function, all collected changes are reset. - fn into_storage_changes( - &self, - backend: &Self::StateBackend, - changes_trie_state: Option<&ChangesTrieState, NumberFor>>, - parent_hash: Block::Hash, - ) -> Result, String> where Self: Sized; + /// The state backend that is used to store the block states. + type StateBackend: StateBackend>; + + /// The given closure will be called with api instance. Inside the closure any api call is + /// allowed. After doing the api call, the closure is allowed to map the `Result` to a + /// different `Result` type. This can be important, as the internal data structure that keeps + /// track of modifications to the storage, discards changes when the `Result` is an `Err`. + /// On `Ok`, the structure commits the changes to an internal buffer. + fn map_api_result result::Result, R, E>( + &self, + map_call: F, + ) -> result::Result + where + Self: Sized; + + /// Checks if the given api is implemented and versions match. + fn has_api(&self, at: &BlockId) -> Result + where + Self: Sized; + + /// Check if the given api is implemented and the version passes a predicate. + fn has_api_with bool>( + &self, + at: &BlockId, + pred: P, + ) -> Result + where + Self: Sized; + + /// Start recording all accessed trie nodes for generating proofs. + fn record_proof(&mut self); + + /// Extract the recorded proof. + /// + /// This stops the proof recording. + /// + /// If `record_proof` was not called before, this will return `None`. + fn extract_proof(&mut self) -> Option; + + /// Convert the api object into the storage changes that were done while executing runtime + /// api functions. + /// + /// After executing this function, all collected changes are reset. + fn into_storage_changes( + &self, + backend: &Self::StateBackend, + changes_trie_state: Option<&ChangesTrieState, NumberFor>>, + parent_hash: Block::Hash, + ) -> Result, String> + where + Self: Sized; } /// Before calling any runtime api function, the runtime need to be initialized @@ -403,68 +410,68 @@ pub trait ApiExt: ApiErrorExt { #[cfg(feature = "std")] #[derive(Clone, Copy)] pub enum InitializeBlock<'a, Block: BlockT> { - /// Skip initializing the runtime for a given block. - /// - /// This is used by functions who do the initialization by themselves or don't require it. - Skip, - /// Initialize the runtime for a given block. - /// - /// If the stored `BlockId` is `Some(_)`, the runtime is currently initialized at this block. - Do(&'a RefCell>>), + /// Skip initializing the runtime for a given block. + /// + /// This is used by functions who do the initialization by themselves or don't require it. + Skip, + /// Initialize the runtime for a given block. + /// + /// If the stored `BlockId` is `Some(_)`, the runtime is currently initialized at this block. + Do(&'a RefCell>>), } /// Parameters for [`CallApiAt::call_api_at`]. #[cfg(feature = "std")] pub struct CallApiAtParams<'a, Block: BlockT, C, NC, Backend: StateBackend>> { - /// A reference to something that implements the [`Core`] api. - pub core_api: &'a C, - /// The block id that determines the state that should be setup when calling the function. - pub at: &'a BlockId, - /// The name of the function that should be called. - pub function: &'static str, - /// An optional native call that calls the `function`. This is an optimization to call into a - /// native runtime without requiring to encode/decode the parameters. The native runtime can - /// still be called when this value is `None`, we then just fallback to encoding/decoding the - /// parameters. - pub native_call: Option, - /// The encoded arguments of the function. - pub arguments: Vec, - /// The overlayed changes that are on top of the state. - pub overlayed_changes: &'a RefCell, - /// The cache for storage transactions. - pub storage_transaction_cache: &'a RefCell>, - /// Determines if the function requires that `initialize_block` should be called before calling - /// the actual function. - pub initialize_block: InitializeBlock<'a, Block>, - /// The context this function is executed in. - pub context: ExecutionContext, - /// The optional proof recorder for recording storage accesses. - pub recorder: &'a Option>, + /// A reference to something that implements the [`Core`] api. + pub core_api: &'a C, + /// The block id that determines the state that should be setup when calling the function. + pub at: &'a BlockId, + /// The name of the function that should be called. + pub function: &'static str, + /// An optional native call that calls the `function`. This is an optimization to call into a + /// native runtime without requiring to encode/decode the parameters. The native runtime can + /// still be called when this value is `None`, we then just fallback to encoding/decoding the + /// parameters. + pub native_call: Option, + /// The encoded arguments of the function. + pub arguments: Vec, + /// The overlayed changes that are on top of the state. + pub overlayed_changes: &'a RefCell, + /// The cache for storage transactions. + pub storage_transaction_cache: &'a RefCell>, + /// Determines if the function requires that `initialize_block` should be called before calling + /// the actual function. + pub initialize_block: InitializeBlock<'a, Block>, + /// The context this function is executed in. + pub context: ExecutionContext, + /// The optional proof recorder for recording storage accesses. + pub recorder: &'a Option>, } /// Something that can call into the an api at a given block. #[cfg(feature = "std")] pub trait CallApiAt { - /// Error type used by the implementation. - type Error: std::fmt::Debug + From; - - /// The state backend that is used to store the block states. - type StateBackend: StateBackend>; - - /// Calls the given api function with the given encoded arguments at the given block and returns - /// the encoded result. - fn call_api_at< - 'a, - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - C: Core, - >( - &self, - params: CallApiAtParams<'a, Block, C, NC, Self::StateBackend>, - ) -> Result, Self::Error>; - - /// Returns the runtime version at the given block. - fn runtime_version_at(&self, at: &BlockId) -> Result; + /// Error type used by the implementation. + type Error: std::fmt::Debug + From; + + /// The state backend that is used to store the block states. + type StateBackend: StateBackend>; + + /// Calls the given api function with the given encoded arguments at the given block and returns + /// the encoded result. + fn call_api_at< + 'a, + R: Encode + Decode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + C: Core, + >( + &self, + params: CallApiAtParams<'a, Block, C, NC, Self::StateBackend>, + ) -> Result, Self::Error>; + + /// Returns the runtime version at the given block. + fn runtime_version_at(&self, at: &BlockId) -> Result; } /// Auxiliary wrapper that holds an api instance and binds it to the given lifetime. @@ -473,48 +480,48 @@ pub struct ApiRef<'a, T>(T, std::marker::PhantomData<&'a ()>); #[cfg(feature = "std")] impl<'a, T> From for ApiRef<'a, T> { - fn from(api: T) -> Self { - ApiRef(api, Default::default()) - } + fn from(api: T) -> Self { + ApiRef(api, Default::default()) + } } #[cfg(feature = "std")] impl<'a, T> std::ops::Deref for ApiRef<'a, T> { - type Target = T; + type Target = T; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } #[cfg(feature = "std")] impl<'a, T> std::ops::DerefMut for ApiRef<'a, T> { - fn deref_mut(&mut self) -> &mut T { - &mut self.0 - } + fn deref_mut(&mut self) -> &mut T { + &mut self.0 + } } /// Something that provides a runtime api. #[cfg(feature = "std")] pub trait ProvideRuntimeApi { - /// The concrete type that provides the api. - type Api: ApiExt; - - /// Returns the runtime api. - /// The returned instance will keep track of modifications to the storage. Any successful - /// call to an api function, will `commit` its changes to an internal buffer. Otherwise, - /// the modifications will be `discarded`. The modifications will not be applied to the - /// storage, even on a `commit`. - fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api>; + /// The concrete type that provides the api. + type Api: ApiExt; + + /// Returns the runtime api. + /// The returned instance will keep track of modifications to the storage. Any successful + /// call to an api function, will `commit` its changes to an internal buffer. Otherwise, + /// the modifications will be `discarded`. The modifications will not be applied to the + /// storage, even on a `commit`. + fn runtime_api<'a>(&'a self) -> ApiRef<'a, Self::Api>; } /// Something that provides information about a runtime api. #[cfg(feature = "std")] pub trait RuntimeApiInfo { - /// The identifier of the runtime api. - const ID: [u8; 8]; - /// The version of the runtime api. - const VERSION: u32; + /// The identifier of the runtime api. + const ID: [u8; 8]; + /// The version of the runtime api. + const VERSION: u32; } /// Extracts the `Api::Error` for a type that provides a runtime api. @@ -523,64 +530,64 @@ pub type ApiErrorFor = <>::Api as ApiErr #[derive(codec::Encode, codec::Decode)] pub struct OldRuntimeVersion { - pub spec_name: RuntimeString, - pub impl_name: RuntimeString, - pub authoring_version: u32, - pub spec_version: u32, - pub impl_version: u32, - pub apis: ApisVec, + pub spec_name: RuntimeString, + pub impl_name: RuntimeString, + pub authoring_version: u32, + pub spec_version: u32, + pub impl_version: u32, + pub apis: ApisVec, } impl From for RuntimeVersion { - fn from(x: OldRuntimeVersion) -> Self { - Self { - spec_name: x.spec_name, - impl_name: x.impl_name, - authoring_version: x.authoring_version, - spec_version: x.spec_version, - impl_version: x.impl_version, - apis: x.apis, - transaction_version: 1, - } - } + fn from(x: OldRuntimeVersion) -> Self { + Self { + spec_name: x.spec_name, + impl_name: x.impl_name, + authoring_version: x.authoring_version, + spec_version: x.spec_version, + impl_version: x.impl_version, + apis: x.apis, + transaction_version: 1, + } + } } impl From for OldRuntimeVersion { - fn from(x: RuntimeVersion) -> Self { - Self { - spec_name: x.spec_name, - impl_name: x.impl_name, - authoring_version: x.authoring_version, - spec_version: x.spec_version, - impl_version: x.impl_version, - apis: x.apis, - } - } + fn from(x: RuntimeVersion) -> Self { + Self { + spec_name: x.spec_name, + impl_name: x.impl_name, + authoring_version: x.authoring_version, + spec_version: x.spec_version, + impl_version: x.impl_version, + apis: x.apis, + } + } } decl_runtime_apis! { - /// The `Core` runtime api that every Substrate runtime needs to implement. - #[core_trait] - #[api_version(3)] - pub trait Core { - /// Returns the version of the runtime. - fn version() -> RuntimeVersion; - /// Returns the version of the runtime. - #[changed_in(3)] - fn version() -> OldRuntimeVersion; - /// Execute the given block. - #[skip_initialize_block] - fn execute_block(block: Block); - /// Initialize a block with the given header. - #[renamed("initialise_block", 2)] - #[skip_initialize_block] - #[initialize_block] - fn initialize_block(header: &::Header); - } - - /// The `Metadata` api trait that returns metadata for the runtime. - pub trait Metadata { - /// Returns the metadata of a runtime. - fn metadata() -> OpaqueMetadata; - } + /// The `Core` runtime api that every Substrate runtime needs to implement. + #[core_trait] + #[api_version(3)] + pub trait Core { + /// Returns the version of the runtime. + fn version() -> RuntimeVersion; + /// Returns the version of the runtime. + #[changed_in(3)] + fn version() -> OldRuntimeVersion; + /// Execute the given block. + #[skip_initialize_block] + fn execute_block(block: Block); + /// Initialize a block with the given header. + #[renamed("initialise_block", 2)] + #[skip_initialize_block] + #[initialize_block] + fn initialize_block(header: &::Header); + } + + /// The `Metadata` api trait that returns metadata for the runtime. + pub trait Metadata { + /// Returns the metadata of a runtime. + fn metadata() -> OpaqueMetadata; + } } diff --git a/primitives/api/test/benches/bench.rs b/primitives/api/test/benches/bench.rs index 9a90ca6e38..72dfa5e690 100644 --- a/primitives/api/test/benches/bench.rs +++ b/primitives/api/test/benches/bench.rs @@ -14,59 +14,76 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use criterion::{Criterion, criterion_group, criterion_main}; -use substrate_test_runtime_client::{ - DefaultTestClientBuilderExt, TestClientBuilder, - TestClientBuilderExt, runtime::TestAPI, -}; +use criterion::{criterion_group, criterion_main, Criterion}; +use sp_api::ProvideRuntimeApi; use sp_runtime::generic::BlockId; use sp_state_machine::ExecutionStrategy; -use sp_api::ProvideRuntimeApi; +use substrate_test_runtime_client::{ + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, +}; fn sp_api_benchmark(c: &mut Criterion) { - c.bench_function("add one with same runtime api", |b| { - let client = substrate_test_runtime_client::new(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + c.bench_function("add one with same runtime api", |b| { + let client = substrate_test_runtime_client::new(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); - b.iter(|| runtime_api.benchmark_add_one(&block_id, &1)) - }); + b.iter(|| runtime_api.benchmark_add_one(&block_id, &1)) + }); - c.bench_function("add one with recreating runtime api", |b| { - let client = substrate_test_runtime_client::new(); - let block_id = BlockId::Number(client.chain_info().best_number); + c.bench_function("add one with recreating runtime api", |b| { + let client = substrate_test_runtime_client::new(); + let block_id = BlockId::Number(client.chain_info().best_number); - b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) - }); + b.iter(|| client.runtime_api().benchmark_add_one(&block_id, &1)) + }); - c.bench_function("vector add one with same runtime api", |b| { - let client = substrate_test_runtime_client::new(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - let data = vec![0; 1000]; + c.bench_function("vector add one with same runtime api", |b| { + let client = substrate_test_runtime_client::new(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + let data = vec![0; 1000]; - b.iter_with_large_drop(|| runtime_api.benchmark_vector_add_one(&block_id, &data)) - }); + b.iter_with_large_drop(|| runtime_api.benchmark_vector_add_one(&block_id, &data)) + }); - c.bench_function("vector add one with recreating runtime api", |b| { - let client = substrate_test_runtime_client::new(); - let block_id = BlockId::Number(client.chain_info().best_number); - let data = vec![0; 1000]; + c.bench_function("vector add one with recreating runtime api", |b| { + let client = substrate_test_runtime_client::new(); + let block_id = BlockId::Number(client.chain_info().best_number); + let data = vec![0; 1000]; - b.iter_with_large_drop(|| client.runtime_api().benchmark_vector_add_one(&block_id, &data)) - }); + b.iter_with_large_drop(|| { + client + .runtime_api() + .benchmark_vector_add_one(&block_id, &data) + }) + }); - c.bench_function("calling function by function pointer in wasm", |b| { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); - let block_id = BlockId::Number(client.chain_info().best_number); - b.iter(|| client.runtime_api().benchmark_indirect_call(&block_id).unwrap()) - }); + c.bench_function("calling function by function pointer in wasm", |b| { + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); + let block_id = BlockId::Number(client.chain_info().best_number); + b.iter(|| { + client + .runtime_api() + .benchmark_indirect_call(&block_id) + .unwrap() + }) + }); - c.bench_function("calling function in wasm", |b| { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); - let block_id = BlockId::Number(client.chain_info().best_number); - b.iter(|| client.runtime_api().benchmark_direct_call(&block_id).unwrap()) - }); + c.bench_function("calling function in wasm", |b| { + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); + let block_id = BlockId::Number(client.chain_info().best_number); + b.iter(|| { + client + .runtime_api() + .benchmark_direct_call(&block_id) + .unwrap() + }) + }); } criterion_group!(benches, sp_api_benchmark); diff --git a/primitives/api/test/tests/decl_and_impl.rs b/primitives/api/test/tests/decl_and_impl.rs index a09bd0412c..3a70e7b393 100644 --- a/primitives/api/test/tests/decl_and_impl.rs +++ b/primitives/api/test/tests/decl_and_impl.rs @@ -15,167 +15,179 @@ // along with Substrate. If not, see . use sp_api::{ - RuntimeApiInfo, decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, - ApiExt, + decl_runtime_apis, impl_runtime_apis, mock_impl_runtime_apis, ApiExt, RuntimeApiInfo, }; -use sp_runtime::{traits::{GetNodeBlockType, Block as BlockT}, generic::BlockId}; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, GetNodeBlockType}, +}; -use substrate_test_runtime_client::runtime::Block; use sp_blockchain::Result; +use substrate_test_runtime_client::runtime::Block; /// The declaration of the `Runtime` type and the implementation of the `GetNodeBlockType` /// trait are done by the `construct_runtime!` macro in a real runtime. pub struct Runtime {} impl GetNodeBlockType for Runtime { - type NodeBlock = Block; + type NodeBlock = Block; } decl_runtime_apis! { - pub trait Api { - fn test(data: u64); - fn something_with_block(block: Block) -> Block; - fn function_with_two_args(data: u64, block: Block); - fn same_name(); - fn wild_card(_: u32); - } - - #[api_version(2)] - pub trait ApiWithCustomVersion { - fn same_name(); - #[changed_in(2)] - fn same_name() -> String; - } + pub trait Api { + fn test(data: u64); + fn something_with_block(block: Block) -> Block; + fn function_with_two_args(data: u64, block: Block); + fn same_name(); + fn wild_card(_: u32); + } + + #[api_version(2)] + pub trait ApiWithCustomVersion { + fn same_name(); + #[changed_in(2)] + fn same_name() -> String; + } } impl_runtime_apis! { - impl self::Api for Runtime { - fn test(_: u64) { - unimplemented!() - } - - fn something_with_block(_: Block) -> Block { - unimplemented!() - } - - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } - - fn same_name() {} - - fn wild_card(_: u32) {} - } - - impl self::ApiWithCustomVersion for Runtime { - fn same_name() {} - } - - impl sp_api::Core for Runtime { - fn version() -> sp_version::RuntimeVersion { - unimplemented!() - } - fn execute_block(_: Block) { - unimplemented!() - } - fn initialize_block(_: &::Header) { - unimplemented!() - } - } + impl self::Api for Runtime { + fn test(_: u64) { + unimplemented!() + } + + fn something_with_block(_: Block) -> Block { + unimplemented!() + } + + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } + + fn same_name() {} + + fn wild_card(_: u32) {} + } + + impl self::ApiWithCustomVersion for Runtime { + fn same_name() {} + } + + impl sp_api::Core for Runtime { + fn version() -> sp_version::RuntimeVersion { + unimplemented!() + } + fn execute_block(_: Block) { + unimplemented!() + } + fn initialize_block(_: &::Header) { + unimplemented!() + } + } } struct MockApi { - block: Option, + block: Option, } mock_impl_runtime_apis! { - impl Api for MockApi { - fn test(_: u64) { - unimplemented!() - } + impl Api for MockApi { + fn test(_: u64) { + unimplemented!() + } - fn something_with_block(&self, _: Block) -> Block { - self.block.clone().unwrap() - } + fn something_with_block(&self, _: Block) -> Block { + self.block.clone().unwrap() + } - fn function_with_two_args(_: u64, _: Block) { - unimplemented!() - } + fn function_with_two_args(_: u64, _: Block) { + unimplemented!() + } - fn same_name() {} + fn same_name() {} - fn wild_card(_: u32) {} - } + fn wild_card(_: u32) {} + } - impl ApiWithCustomVersion for MockApi { - fn same_name() {} - } + impl ApiWithCustomVersion for MockApi { + fn same_name() {} + } } type TestClient = substrate_test_runtime_client::sc_client::Client< - substrate_test_runtime_client::Backend, - substrate_test_runtime_client::Executor, - Block, - RuntimeApi, + substrate_test_runtime_client::Backend, + substrate_test_runtime_client::Executor, + Block, + RuntimeApi, >; #[test] fn test_client_side_function_signature() { - let _test: fn(&RuntimeApiImpl, &BlockId, u64) -> Result<()> = - RuntimeApiImpl::::test; - let _something_with_block: - fn(&RuntimeApiImpl, &BlockId, Block) -> Result = - RuntimeApiImpl::::something_with_block; - - #[allow(deprecated)] - let _same_name_before_version_2: - fn(&RuntimeApiImpl, &BlockId) -> Result = - RuntimeApiImpl::::same_name_before_version_2; + let _test: fn(&RuntimeApiImpl, &BlockId, u64) -> Result<()> = + RuntimeApiImpl::::test; + let _something_with_block: fn( + &RuntimeApiImpl, + &BlockId, + Block, + ) -> Result = RuntimeApiImpl::::something_with_block; + + #[allow(deprecated)] + let _same_name_before_version_2: fn( + &RuntimeApiImpl, + &BlockId, + ) -> Result = RuntimeApiImpl::::same_name_before_version_2; } #[test] fn check_runtime_api_info() { - assert_eq!(&Api::::ID, &runtime_decl_for_Api::ID); - assert_eq!(Api::::VERSION, runtime_decl_for_Api::VERSION); - assert_eq!(Api::::VERSION, 1); - - assert_eq!( - ApiWithCustomVersion::::VERSION, - runtime_decl_for_ApiWithCustomVersion::VERSION, - ); - assert_eq!( - &ApiWithCustomVersion::::ID, - &runtime_decl_for_ApiWithCustomVersion::ID, - ); - assert_eq!(ApiWithCustomVersion::::VERSION, 2); + assert_eq!(&Api::::ID, &runtime_decl_for_Api::ID); + assert_eq!( + Api::::VERSION, + runtime_decl_for_Api::VERSION + ); + assert_eq!(Api::::VERSION, 1); + + assert_eq!( + ApiWithCustomVersion::::VERSION, + runtime_decl_for_ApiWithCustomVersion::VERSION, + ); + assert_eq!( + &ApiWithCustomVersion::::ID, + &runtime_decl_for_ApiWithCustomVersion::ID, + ); + assert_eq!(ApiWithCustomVersion::::VERSION, 2); } fn check_runtime_api_versions_contains() { - assert!(RUNTIME_API_VERSIONS.iter().any(|v| v == &(T::ID, T::VERSION))); + assert!(RUNTIME_API_VERSIONS + .iter() + .any(|v| v == &(T::ID, T::VERSION))); } #[test] fn check_runtime_api_versions() { - check_runtime_api_versions_contains::>(); - check_runtime_api_versions_contains::>(); - check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); + check_runtime_api_versions_contains::>(); } #[test] fn mock_runtime_api_has_api() { - let mock = MockApi { block: None }; - - assert!( - mock.has_api::>(&BlockId::Number(0)).unwrap(), - ); - assert!(mock.has_api::>(&BlockId::Number(0)).unwrap()); + let mock = MockApi { block: None }; + + assert!(mock + .has_api::>(&BlockId::Number(0)) + .unwrap(),); + assert!(mock + .has_api::>(&BlockId::Number(0)) + .unwrap()); } #[test] #[should_panic(expected = "Mocked runtime apis don't support calling deprecated api versions")] fn mock_runtime_api_panics_on_calling_old_version() { - let mock = MockApi { block: None }; + let mock = MockApi { block: None }; - #[allow(deprecated)] - let _ = mock.same_name_before_version_2(&BlockId::Number(0)); + #[allow(deprecated)] + let _ = mock.same_name_before_version_2(&BlockId::Number(0)); } diff --git a/primitives/api/test/tests/runtime_calls.rs b/primitives/api/test/tests/runtime_calls.rs index a907ac8095..3052550366 100644 --- a/primitives/api/test/tests/runtime_calls.rs +++ b/primitives/api/test/tests/runtime_calls.rs @@ -15,194 +15,228 @@ // along with Substrate. If not, see . use sp_api::ProvideRuntimeApi; -use substrate_test_runtime_client::{ - prelude::*, - DefaultTestClientBuilderExt, TestClientBuilder, - runtime::{TestAPI, DecodeFails, Transfer, Block}, +use sp_runtime::{ + generic::BlockId, + traits::{HashFor, Header as HeaderT}, }; -use sp_runtime::{generic::BlockId, traits::{Header as HeaderT, HashFor}}; use sp_state_machine::{ - ExecutionStrategy, create_proof_check_backend, - execution_proof_check_on_trie_backend, + create_proof_check_backend, execution_proof_check_on_trie_backend, ExecutionStrategy, +}; +use substrate_test_runtime_client::{ + prelude::*, + runtime::{Block, DecodeFails, TestAPI, Transfer}, + DefaultTestClientBuilderExt, TestClientBuilder, }; -use sp_consensus::SelectChain; use codec::Encode; use sc_block_builder::BlockBuilderProvider; +use sp_consensus::SelectChain; fn calling_function_with_strat(strat: ExecutionStrategy) { - let client = TestClientBuilder::new().set_execution_strategy(strat).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let client = TestClientBuilder::new() + .set_execution_strategy(strat) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.benchmark_add_one(&block_id, &1).unwrap(), 2); + assert_eq!(runtime_api.benchmark_add_one(&block_id, &1).unwrap(), 2); } #[test] fn calling_native_runtime_function() { - calling_function_with_strat(ExecutionStrategy::NativeWhenPossible); + calling_function_with_strat(ExecutionStrategy::NativeWhenPossible); } #[test] fn calling_wasm_runtime_function() { - calling_function_with_strat(ExecutionStrategy::AlwaysWasm); + calling_function_with_strat(ExecutionStrategy::AlwaysWasm); } #[test] #[should_panic( - expected = - "Could not convert parameter `param` between node and runtime: DecodeFails always fails" + expected = "Could not convert parameter `param` between node and runtime: DecodeFails always fails" )] fn calling_native_runtime_function_with_non_decodable_parameter() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.fail_convert_parameter(&block_id, DecodeFails::new()).unwrap(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + runtime_api + .fail_convert_parameter(&block_id, DecodeFails::new()) + .unwrap(); } #[test] #[should_panic(expected = "Could not convert return value from runtime to node!")] fn calling_native_runtime_function_with_non_decodable_return_value() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - runtime_api.fail_convert_return_value(&block_id).unwrap(); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + runtime_api.fail_convert_return_value(&block_id).unwrap(); } #[test] fn calling_native_runtime_signature_changed_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeWhenPossible).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeWhenPossible) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.function_signature_changed(&block_id).unwrap(), 1); + assert_eq!( + runtime_api.function_signature_changed(&block_id).unwrap(), + 1 + ); } #[test] fn calling_wasm_runtime_signature_changed_old_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); - #[allow(deprecated)] - let res = runtime_api.function_signature_changed_before_version_2(&block_id).unwrap(); - assert_eq!(&res, &[1, 2]); + #[allow(deprecated)] + let res = runtime_api + .function_signature_changed_before_version_2(&block_id) + .unwrap(); + assert_eq!(&res, &[1, 2]); } #[test] fn calling_with_both_strategy_and_fail_on_wasm_should_return_error() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert!(runtime_api.fail_on_wasm(&block_id).is_err()); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::Both) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + assert!(runtime_api.fail_on_wasm(&block_id).is_err()); } #[test] fn calling_with_both_strategy_and_fail_on_native_should_work() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::Both) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); } - #[test] fn calling_with_native_else_wasm_and_fail_on_wasm_should_work() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeElseWasm).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.fail_on_wasm(&block_id).unwrap(), 1); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeElseWasm) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + assert_eq!(runtime_api.fail_on_wasm(&block_id).unwrap(), 1); } #[test] fn calling_with_native_else_wasm_and_fail_on_native_should_work() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::NativeElseWasm).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::NativeElseWasm) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + assert_eq!(runtime_api.fail_on_native(&block_id).unwrap(), 1); } #[test] fn use_trie_function() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::AlwaysWasm).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.use_trie(&block_id).unwrap(), 2); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + assert_eq!(runtime_api.use_trie(&block_id).unwrap(), 2); } #[test] fn initialize_block_works() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.get_block_number(&block_id).unwrap(), 1); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::Both) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + assert_eq!(runtime_api.get_block_number(&block_id).unwrap(), 1); } #[test] fn initialize_block_is_called_only_once() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert_eq!(runtime_api.take_block_number(&block_id).unwrap(), Some(1)); - assert_eq!(runtime_api.take_block_number(&block_id).unwrap(), None); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::Both) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + assert_eq!(runtime_api.take_block_number(&block_id).unwrap(), Some(1)); + assert_eq!(runtime_api.take_block_number(&block_id).unwrap(), None); } #[test] fn initialize_block_is_skipped() { - let client = TestClientBuilder::new().set_execution_strategy(ExecutionStrategy::Both).build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - assert!(runtime_api.without_initialize_block(&block_id).unwrap()); + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::Both) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + assert!(runtime_api.without_initialize_block(&block_id).unwrap()); } #[test] fn record_proof_works() { - let (client, longest_chain) = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::Both) - .build_with_longest_chain(); - - let block_id = BlockId::Number(client.chain_info().best_number); - let storage_root = longest_chain.best_chain().unwrap().state_root().clone(); - - let runtime_code = sp_core::traits::RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(client.code_at(&block_id).unwrap().into()), - hash: vec![1], - heap_pages: None, - }; - - let transaction = Transfer { - amount: 1000, - nonce: 0, - from: AccountKeyring::Alice.into(), - to: Default::default(), - }.into_signed_tx(); - - // Build the block and record proof - let mut builder = client - .new_block_at(&block_id, Default::default(), true) - .expect("Creates block builder"); - builder.push(transaction.clone()).unwrap(); - let (block, _, proof) = builder.build().expect("Bake block").into_inner(); - - let backend = create_proof_check_backend::>( - storage_root, - proof.expect("Proof was generated"), - ).expect("Creates proof backend."); - - // Use the proof backend to execute `execute_block`. - let mut overlay = Default::default(); - let executor = NativeExecutor::::new( - WasmExecutionMethod::Interpreted, - None, - 8, - ); - execution_proof_check_on_trie_backend::<_, u64, _>( - &backend, - &mut overlay, - &executor, - sp_core::tasks::executor(), - "Core_execute_block", - &block.encode(), - &runtime_code, - ).expect("Executes block while using the proof backend"); + let (client, longest_chain) = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::Both) + .build_with_longest_chain(); + + let block_id = BlockId::Number(client.chain_info().best_number); + let storage_root = longest_chain.best_chain().unwrap().state_root().clone(); + + let runtime_code = sp_core::traits::RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode( + client.code_at(&block_id).unwrap().into(), + ), + hash: vec![1], + heap_pages: None, + }; + + let transaction = Transfer { + amount: 1000, + nonce: 0, + from: AccountKeyring::Alice.into(), + to: Default::default(), + } + .into_signed_tx(); + + // Build the block and record proof + let mut builder = client + .new_block_at(&block_id, Default::default(), true) + .expect("Creates block builder"); + builder.push(transaction.clone()).unwrap(); + let (block, _, proof) = builder.build().expect("Bake block").into_inner(); + + let backend = create_proof_check_backend::>( + storage_root, + proof.expect("Proof was generated"), + ) + .expect("Creates proof backend."); + + // Use the proof backend to execute `execute_block`. + let mut overlay = Default::default(); + let executor = NativeExecutor::::new(WasmExecutionMethod::Interpreted, None, 8); + execution_proof_check_on_trie_backend::<_, u64, _>( + &backend, + &mut overlay, + &executor, + sp_core::tasks::executor(), + "Core_execute_block", + &block.encode(), + &runtime_code, + ) + .expect("Executes block while using the proof backend"); } diff --git a/primitives/api/test/tests/trybuild.rs b/primitives/api/test/tests/trybuild.rs index 910771f938..22aef0964b 100644 --- a/primitives/api/test/tests/trybuild.rs +++ b/primitives/api/test/tests/trybuild.rs @@ -19,9 +19,9 @@ use std::env; #[rustversion::attr(not(stable), ignore)] #[test] fn ui() { - // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); - let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/*.rs"); + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/*.rs"); } diff --git a/primitives/application-crypto/src/ed25519.rs b/primitives/application-crypto/src/ed25519.rs index 5be79ff4f7..989b2580d7 100644 --- a/primitives/application-crypto/src/ed25519.rs +++ b/primitives/application-crypto/src/ed25519.rs @@ -16,60 +16,60 @@ //! Ed25519 crypto types. -use crate::{RuntimePublic, KeyTypeId}; +use crate::{KeyTypeId, RuntimePublic}; use sp_std::vec::Vec; pub use sp_core::ed25519::*; mod app { - use sp_core::crypto::{CryptoTypePublicPair, Public as TraitPublic}; - use sp_core::testing::ED25519; - use sp_core::ed25519::CRYPTO_ID; - - crate::app_crypto!(super, ED25519); - - impl crate::traits::BoundToRuntimeAppPublic for Public { - type Public = Self; - } - - impl From for CryptoTypePublicPair { - fn from(key: Public) -> Self { - (&key).into() - } - } - - impl From<&Public> for CryptoTypePublicPair { - fn from(key: &Public) -> Self { - CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) - } - } + use sp_core::crypto::{CryptoTypePublicPair, Public as TraitPublic}; + use sp_core::ed25519::CRYPTO_ID; + use sp_core::testing::ED25519; + + crate::app_crypto!(super, ED25519); + + impl crate::traits::BoundToRuntimeAppPublic for Public { + type Public = Self; + } + + impl From for CryptoTypePublicPair { + fn from(key: Public) -> Self { + (&key).into() + } + } + + impl From<&Public> for CryptoTypePublicPair { + fn from(key: &Public) -> Self { + CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) + } + } } -pub use app::{Public as AppPublic, Signature as AppSignature}; #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { - type Signature = Signature; + type Signature = Signature; - fn all(key_type: KeyTypeId) -> crate::Vec { - sp_io::crypto::ed25519_public_keys(key_type) - } + fn all(key_type: KeyTypeId) -> crate::Vec { + sp_io::crypto::ed25519_public_keys(key_type) + } - fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { - sp_io::crypto::ed25519_generate(key_type, seed) - } + fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { + sp_io::crypto::ed25519_generate(key_type, seed) + } - fn sign>(&self, key_type: KeyTypeId, msg: &M) -> Option { - sp_io::crypto::ed25519_sign(key_type, self, msg.as_ref()) - } + fn sign>(&self, key_type: KeyTypeId, msg: &M) -> Option { + sp_io::crypto::ed25519_sign(key_type, self, msg.as_ref()) + } - fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { - sp_io::crypto::ed25519_verify(&signature, msg.as_ref(), self) - } + fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { + sp_io::crypto::ed25519_verify(&signature, msg.as_ref(), self) + } - fn to_raw_vec(&self) -> Vec { - sp_core::crypto::Public::to_raw_vec(self) - } + fn to_raw_vec(&self) -> Vec { + sp_core::crypto::Public::to_raw_vec(self) + } } diff --git a/primitives/application-crypto/src/lib.rs b/primitives/application-crypto/src/lib.rs index 79572eb49d..0e8fee3a33 100644 --- a/primitives/application-crypto/src/lib.rs +++ b/primitives/application-crypto/src/lib.rs @@ -17,15 +17,18 @@ //! Traits and macros for constructing application specific strongly typed crypto wrappers. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] -#[doc(hidden)] -pub use sp_core::{self, crypto::{CryptoType, CryptoTypePublicPair, Public, Derive, IsWrappedBy, Wraps}, RuntimeDebug}; +pub use sp_core::crypto::{key_types, CryptoTypeId, KeyTypeId}; #[doc(hidden)] #[cfg(feature = "full_crypto")] -pub use sp_core::crypto::{SecretStringError, DeriveJunction, Ss58Codec, Pair}; -pub use sp_core::crypto::{CryptoTypeId, KeyTypeId, key_types}; +pub use sp_core::crypto::{DeriveJunction, Pair, SecretStringError, Ss58Codec}; +#[doc(hidden)] +pub use sp_core::{ + self, + crypto::{CryptoType, CryptoTypePublicPair, Derive, IsWrappedBy, Public, Wraps}, + RuntimeDebug, +}; #[doc(hidden)] pub use codec; @@ -53,13 +56,13 @@ pub use traits::*; #[cfg(feature = "full_crypto")] #[macro_export] macro_rules! app_crypto { - ($module:ident, $key_type:expr) => { - $crate::app_crypto_public_full_crypto!($module::Public, $key_type); - $crate::app_crypto_public_common!($module::Public, $module::Signature, $key_type); - $crate::app_crypto_signature_full_crypto!($module::Signature, $key_type); - $crate::app_crypto_signature_common!($module::Signature, $key_type); - $crate::app_crypto_pair!($module::Pair, $key_type); - }; + ($module:ident, $key_type:expr) => { + $crate::app_crypto_public_full_crypto!($module::Public, $key_type); + $crate::app_crypto_public_common!($module::Public, $module::Signature, $key_type); + $crate::app_crypto_signature_full_crypto!($module::Signature, $key_type); + $crate::app_crypto_signature_common!($module::Signature, $key_type); + $crate::app_crypto_pair!($module::Pair, $key_type); + }; } /// Declares Public, Pair, Signature types which are functionally equivalent to `$pair`, but are new @@ -74,79 +77,87 @@ macro_rules! app_crypto { #[cfg(not(feature = "full_crypto"))] #[macro_export] macro_rules! app_crypto { - ($module:ident, $key_type:expr) => { - $crate::app_crypto_public_not_full_crypto!($module::Public, $key_type); - $crate::app_crypto_public_common!($module::Public, $module::Signature, $key_type); - $crate::app_crypto_signature_not_full_crypto!($module::Signature, $key_type); - $crate::app_crypto_signature_common!($module::Signature, $key_type); - }; + ($module:ident, $key_type:expr) => { + $crate::app_crypto_public_not_full_crypto!($module::Public, $key_type); + $crate::app_crypto_public_common!($module::Public, $module::Signature, $key_type); + $crate::app_crypto_signature_not_full_crypto!($module::Signature, $key_type); + $crate::app_crypto_signature_common!($module::Signature, $key_type); + }; } /// Declares Pair type which is functionally equivalent to `$pair`, but is new /// Application-specific type whose identifier is `$key_type`. #[macro_export] macro_rules! app_crypto_pair { - ($pair:ty, $key_type:expr) => { - $crate::wrap!{ - /// A generic `AppPublic` wrapper type over $pair crypto; this has no specific App. - #[derive(Clone)] - pub struct Pair($pair); - } - - impl $crate::CryptoType for Pair { - type Pair = Pair; - } - - impl $crate::Pair for Pair { - type Public = Public; - type Seed = <$pair as $crate::Pair>::Seed; - type Signature = Signature; - type DeriveError = <$pair as $crate::Pair>::DeriveError; - - $crate::app_crypto_pair_functions_if_std!($pair); - - fn derive< - Iter: Iterator - >(&self, path: Iter, seed: Option) -> Result<(Self, Option), Self::DeriveError> { - self.0.derive(path, seed).map(|x| (Self(x.0), x.1)) - } - fn from_seed(seed: &Self::Seed) -> Self { Self(<$pair>::from_seed(seed)) } - fn from_seed_slice(seed: &[u8]) -> Result { - <$pair>::from_seed_slice(seed).map(Self) - } - fn sign(&self, msg: &[u8]) -> Self::Signature { - Signature(self.0.sign(msg)) - } - fn verify>( - sig: &Self::Signature, - message: M, - pubkey: &Self::Public, - ) -> bool { - <$pair>::verify(&sig.0, message, pubkey.as_ref()) - } - fn verify_weak, M: AsRef<[u8]>>( - sig: &[u8], - message: M, - pubkey: P, - ) -> bool { - <$pair>::verify_weak(sig, message, pubkey) - } - fn public(&self) -> Self::Public { Public(self.0.public()) } - fn to_raw_vec(&self) -> $crate::Vec { self.0.to_raw_vec() } - } - - impl $crate::AppKey for Pair { - type UntypedGeneric = $pair; - type Public = Public; - type Pair = Pair; - type Signature = Signature; - const ID: $crate::KeyTypeId = $key_type; - } - - impl $crate::AppPair for Pair { - type Generic = $pair; - } - }; + ($pair:ty, $key_type:expr) => { + $crate::wrap! { + /// A generic `AppPublic` wrapper type over $pair crypto; this has no specific App. + #[derive(Clone)] + pub struct Pair($pair); + } + + impl $crate::CryptoType for Pair { + type Pair = Pair; + } + + impl $crate::Pair for Pair { + type Public = Public; + type Seed = <$pair as $crate::Pair>::Seed; + type Signature = Signature; + type DeriveError = <$pair as $crate::Pair>::DeriveError; + + $crate::app_crypto_pair_functions_if_std!($pair); + + fn derive>( + &self, + path: Iter, + seed: Option, + ) -> Result<(Self, Option), Self::DeriveError> { + self.0.derive(path, seed).map(|x| (Self(x.0), x.1)) + } + fn from_seed(seed: &Self::Seed) -> Self { + Self(<$pair>::from_seed(seed)) + } + fn from_seed_slice(seed: &[u8]) -> Result { + <$pair>::from_seed_slice(seed).map(Self) + } + fn sign(&self, msg: &[u8]) -> Self::Signature { + Signature(self.0.sign(msg)) + } + fn verify>( + sig: &Self::Signature, + message: M, + pubkey: &Self::Public, + ) -> bool { + <$pair>::verify(&sig.0, message, pubkey.as_ref()) + } + fn verify_weak, M: AsRef<[u8]>>( + sig: &[u8], + message: M, + pubkey: P, + ) -> bool { + <$pair>::verify_weak(sig, message, pubkey) + } + fn public(&self) -> Self::Public { + Public(self.0.public()) + } + fn to_raw_vec(&self) -> $crate::Vec { + self.0.to_raw_vec() + } + } + + impl $crate::AppKey for Pair { + type UntypedGeneric = $pair; + type Public = Public; + type Pair = Pair; + type Signature = Signature; + const ID: $crate::KeyTypeId = $key_type; + } + + impl $crate::AppPair for Pair { + type Generic = $pair; + } + }; } /// Implements functions for the `Pair` trait when `feature = "std"` is enabled. @@ -154,28 +165,28 @@ macro_rules! app_crypto_pair { #[cfg(feature = "std")] #[macro_export] macro_rules! app_crypto_pair_functions_if_std { - ($pair:ty) => { - fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { - let r = <$pair>::generate_with_phrase(password); - (Self(r.0), r.1, r.2) - } - - fn from_phrase(phrase: &str, password: Option<&str>) - -> Result<(Self, Self::Seed), $crate::SecretStringError> - { - <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) - } - } + ($pair:ty) => { + fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed) { + let r = <$pair>::generate_with_phrase(password); + (Self(r.0), r.1, r.2) + } + + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, Self::Seed), $crate::SecretStringError> { + <$pair>::from_phrase(phrase, password).map(|r| (Self(r.0), r.1)) + } + }; } #[doc(hidden)] #[cfg(not(feature = "std"))] #[macro_export] macro_rules! app_crypto_pair_functions_if_std { - ($pair:ty) => {} + ($pair:ty) => {}; } - /// Declares Public type which is functionally equivalent to `$public`, but is new /// Application-specific type whose identifier is `$key_type`. /// can only be used together with `full_crypto` feature @@ -183,31 +194,31 @@ macro_rules! app_crypto_pair_functions_if_std { #[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_full_crypto { - ($public:ty, $key_type:expr) => { - $crate::wrap!{ - /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. - #[derive( - Clone, Default, Eq, PartialEq, Ord, PartialOrd, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::RuntimeDebug, - )] - #[derive(Hash)] - pub struct Public($public); - } - - impl $crate::CryptoType for Public { - type Pair = Pair; - } - - impl $crate::AppKey for Public { - type UntypedGeneric = $public; - type Public = Public; - type Pair = Pair; - type Signature = Signature; - const ID: $crate::KeyTypeId = $key_type; - } - } + ($public:ty, $key_type:expr) => { + $crate::wrap! { + /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. + #[derive( + Clone, Default, Eq, PartialEq, Ord, PartialOrd, + $crate::codec::Encode, + $crate::codec::Decode, + $crate::RuntimeDebug, + )] + #[derive(Hash)] + pub struct Public($public); + } + + impl $crate::CryptoType for Public { + type Pair = Pair; + } + + impl $crate::AppKey for Public { + type UntypedGeneric = $public; + type Public = Public; + type Pair = Pair; + type Signature = Signature; + const ID: $crate::KeyTypeId = $key_type; + } + }; } /// Declares Public type which is functionally equivalent to `$public`, but is new @@ -217,27 +228,27 @@ macro_rules! app_crypto_public_full_crypto { #[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_not_full_crypto { - ($public:ty, $key_type:expr) => { - $crate::wrap!{ - /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. - #[derive( - Clone, Default, Eq, PartialEq, Ord, PartialOrd, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::RuntimeDebug, - )] - pub struct Public($public); - } - - impl $crate::CryptoType for Public {} - - impl $crate::AppKey for Public { - type UntypedGeneric = $public; - type Public = Public; - type Signature = Signature; - const ID: $crate::KeyTypeId = $key_type; - } - } + ($public:ty, $key_type:expr) => { + $crate::wrap! { + /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. + #[derive( + Clone, Default, Eq, PartialEq, Ord, PartialOrd, + $crate::codec::Encode, + $crate::codec::Decode, + $crate::RuntimeDebug, + )] + pub struct Public($public); + } + + impl $crate::CryptoType for Public {} + + impl $crate::AppKey for Public { + type UntypedGeneric = $public; + type Public = Public; + type Signature = Signature; + const ID: $crate::KeyTypeId = $key_type; + } + }; } /// Declares Public type which is functionally equivalent to `$public`, but is new @@ -246,54 +257,65 @@ macro_rules! app_crypto_public_not_full_crypto { #[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_common { - ($public:ty, $sig:ty, $key_type:expr) => { - $crate::app_crypto_public_common_if_std!(); - - impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { self.0.as_ref() } - } - - impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { self.0.as_mut() } - } - - impl $crate::Public for Public { - fn from_slice(x: &[u8]) -> Self { Self(<$public>::from_slice(x)) } - } - - impl $crate::AppPublic for Public { - type Generic = $public; - } - - impl $crate::RuntimeAppPublic for Public where $public: $crate::RuntimePublic { - const ID: $crate::KeyTypeId = $key_type; - type Signature = Signature; - - fn all() -> $crate::Vec { - <$public as $crate::RuntimePublic>::all($key_type).into_iter().map(Self).collect() - } - - fn generate_pair(seed: Option<$crate::Vec>) -> Self { - Self(<$public as $crate::RuntimePublic>::generate_pair($key_type, seed)) - } - - fn sign>(&self, msg: &M) -> Option { - <$public as $crate::RuntimePublic>::sign( - self.as_ref(), - $key_type, - msg, - ).map(Signature) - } - - fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { - <$public as $crate::RuntimePublic>::verify(self.as_ref(), msg, &signature.as_ref()) - } - - fn to_raw_vec(&self) -> $crate::Vec { - <$public as $crate::RuntimePublic>::to_raw_vec(&self.0) - } - } - } + ($public:ty, $sig:ty, $key_type:expr) => { + $crate::app_crypto_public_common_if_std!(); + + impl AsRef<[u8]> for Public { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } + } + + impl AsMut<[u8]> for Public { + fn as_mut(&mut self) -> &mut [u8] { + self.0.as_mut() + } + } + + impl $crate::Public for Public { + fn from_slice(x: &[u8]) -> Self { + Self(<$public>::from_slice(x)) + } + } + + impl $crate::AppPublic for Public { + type Generic = $public; + } + + impl $crate::RuntimeAppPublic for Public + where + $public: $crate::RuntimePublic, + { + const ID: $crate::KeyTypeId = $key_type; + type Signature = Signature; + + fn all() -> $crate::Vec { + <$public as $crate::RuntimePublic>::all($key_type) + .into_iter() + .map(Self) + .collect() + } + + fn generate_pair(seed: Option<$crate::Vec>) -> Self { + Self(<$public as $crate::RuntimePublic>::generate_pair( + $key_type, seed, + )) + } + + fn sign>(&self, msg: &M) -> Option { + <$public as $crate::RuntimePublic>::sign(self.as_ref(), $key_type, msg) + .map(Signature) + } + + fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { + <$public as $crate::RuntimePublic>::verify(self.as_ref(), msg, &signature.as_ref()) + } + + fn to_raw_vec(&self) -> $crate::Vec { + <$public as $crate::RuntimePublic>::to_raw_vec(&self.0) + } + } + }; } /// Implements traits for the public key type if `feature = "std"` is enabled. @@ -301,53 +323,55 @@ macro_rules! app_crypto_public_common { #[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_common_if_std { - () => { - impl $crate::Derive for Public { - fn derive>(&self, - path: Iter - ) -> Option { - self.0.derive(path).map(Self) - } - } - - impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - use $crate::Ss58Codec; - write!(f, "{}", self.0.to_ss58check()) - } - } - - impl $crate::serde::Serialize for Public { - fn serialize(&self, serializer: S) -> std::result::Result where - S: $crate::serde::Serializer - { - use $crate::Ss58Codec; - serializer.serialize_str(&self.to_ss58check()) - } - } - - impl<'de> $crate::serde::Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> std::result::Result where - D: $crate::serde::Deserializer<'de> - { - use $crate::Ss58Codec; - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| $crate::serde::de::Error::custom(format!("{:?}", e))) - } - } - } + () => { + impl $crate::Derive for Public { + fn derive>( + &self, + path: Iter, + ) -> Option { + self.0.derive(path).map(Self) + } + } + + impl std::fmt::Display for Public { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + use $crate::Ss58Codec; + write!(f, "{}", self.0.to_ss58check()) + } + } + + impl $crate::serde::Serialize for Public { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: $crate::serde::Serializer, + { + use $crate::Ss58Codec; + serializer.serialize_str(&self.to_ss58check()) + } + } + + impl<'de> $crate::serde::Deserialize<'de> for Public { + fn deserialize(deserializer: D) -> std::result::Result + where + D: $crate::serde::Deserializer<'de>, + { + use $crate::Ss58Codec; + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| $crate::serde::de::Error::custom(format!("{:?}", e))) + } + } + }; } #[cfg(not(feature = "std"))] #[doc(hidden)] #[macro_export] macro_rules! app_crypto_public_common_if_std { - () => { - impl $crate::Derive for Public {} - } + () => { + impl $crate::Derive for Public {} + }; } - /// Declares Signature type which is functionally equivalent to `$sig`, but is new /// Application-specific type whose identifier is `$key_type`. /// can only be used together with `full_crypto` feature @@ -355,30 +379,30 @@ macro_rules! app_crypto_public_common_if_std { #[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_full_crypto { - ($sig:ty, $key_type:expr) => { - $crate::wrap! { - /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. - #[derive(Clone, Default, Eq, PartialEq, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::RuntimeDebug, - )] - #[derive(Hash)] - pub struct Signature($sig); - } - - impl $crate::CryptoType for Signature { - type Pair = Pair; - } - - impl $crate::AppKey for Signature { - type UntypedGeneric = $sig; - type Public = Public; - type Pair = Pair; - type Signature = Signature; - const ID: $crate::KeyTypeId = $key_type; - } - } + ($sig:ty, $key_type:expr) => { + $crate::wrap! { + /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. + #[derive(Clone, Default, Eq, PartialEq, + $crate::codec::Encode, + $crate::codec::Decode, + $crate::RuntimeDebug, + )] + #[derive(Hash)] + pub struct Signature($sig); + } + + impl $crate::CryptoType for Signature { + type Pair = Pair; + } + + impl $crate::AppKey for Signature { + type UntypedGeneric = $sig; + type Public = Public; + type Pair = Pair; + type Signature = Signature; + const ID: $crate::KeyTypeId = $key_type; + } + }; } /// Declares Signature type which is functionally equivalent to `$sig`, but is new @@ -388,26 +412,26 @@ macro_rules! app_crypto_signature_full_crypto { #[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_not_full_crypto { - ($sig:ty, $key_type:expr) => { - $crate::wrap! { - /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. - #[derive(Clone, Default, Eq, PartialEq, - $crate::codec::Encode, - $crate::codec::Decode, - $crate::RuntimeDebug, - )] - pub struct Signature($sig); - } - - impl $crate::CryptoType for Signature {} - - impl $crate::AppKey for Signature { - type UntypedGeneric = $sig; - type Public = Public; - type Signature = Signature; - const ID: $crate::KeyTypeId = $key_type; - } - } + ($sig:ty, $key_type:expr) => { + $crate::wrap! { + /// A generic `AppPublic` wrapper type over $public crypto; this has no specific App. + #[derive(Clone, Default, Eq, PartialEq, + $crate::codec::Encode, + $crate::codec::Decode, + $crate::RuntimeDebug, + )] + pub struct Signature($sig); + } + + impl $crate::CryptoType for Signature {} + + impl $crate::AppKey for Signature { + type UntypedGeneric = $sig; + type Public = Public; + type Signature = Signature; + const ID: $crate::KeyTypeId = $key_type; + } + }; } /// Declares Signature type which is functionally equivalent to `$sig`, but is new @@ -416,21 +440,25 @@ macro_rules! app_crypto_signature_not_full_crypto { #[doc(hidden)] #[macro_export] macro_rules! app_crypto_signature_common { - ($sig:ty, $key_type:expr) => { - impl $crate::Deref for Signature { - type Target = [u8]; - - fn deref(&self) -> &Self::Target { self.0.as_ref() } - } - - impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { self.0.as_ref() } - } - - impl $crate::AppSignature for Signature { - type Generic = $sig; - } - } + ($sig:ty, $key_type:expr) => { + impl $crate::Deref for Signature { + type Target = [u8]; + + fn deref(&self) -> &Self::Target { + self.0.as_ref() + } + } + + impl AsRef<[u8]> for Signature { + fn as_ref(&self) -> &[u8] { + self.0.as_ref() + } + } + + impl $crate::AppSignature for Signature { + type Generic = $sig; + } + }; } /// Implement bidirectional `From` and on-way `AsRef`/`AsMut` for two types, `$inner` and `$outer`. @@ -498,10 +526,9 @@ macro_rules! with_pair { } } - #[doc(hidden)] #[macro_export] #[cfg(all(not(feature = "std"), not(feature = "full_crypto")))] macro_rules! with_pair { - ( $( $def:tt )* ) => {} + ( $( $def:tt )* ) => {}; } diff --git a/primitives/application-crypto/src/sr25519.rs b/primitives/application-crypto/src/sr25519.rs index a0f2cef1c4..1484bb64a4 100644 --- a/primitives/application-crypto/src/sr25519.rs +++ b/primitives/application-crypto/src/sr25519.rs @@ -16,60 +16,60 @@ //! Sr25519 crypto types. -use crate::{RuntimePublic, KeyTypeId}; +use crate::{KeyTypeId, RuntimePublic}; use sp_std::vec::Vec; pub use sp_core::sr25519::*; mod app { - use sp_core::crypto::{CryptoTypePublicPair, Public as TraitPublic}; - use sp_core::testing::SR25519; - use sp_core::sr25519::CRYPTO_ID; - - crate::app_crypto!(super, SR25519); - - impl crate::traits::BoundToRuntimeAppPublic for Public { - type Public = Self; - } - - impl From for CryptoTypePublicPair { - fn from(key: Public) -> Self { - (&key).into() - } - } - - impl From<&Public> for CryptoTypePublicPair { - fn from(key: &Public) -> Self { - CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) - } - } + use sp_core::crypto::{CryptoTypePublicPair, Public as TraitPublic}; + use sp_core::sr25519::CRYPTO_ID; + use sp_core::testing::SR25519; + + crate::app_crypto!(super, SR25519); + + impl crate::traits::BoundToRuntimeAppPublic for Public { + type Public = Self; + } + + impl From for CryptoTypePublicPair { + fn from(key: Public) -> Self { + (&key).into() + } + } + + impl From<&Public> for CryptoTypePublicPair { + fn from(key: &Public) -> Self { + CryptoTypePublicPair(CRYPTO_ID, key.to_raw_vec()) + } + } } -pub use app::{Public as AppPublic, Signature as AppSignature}; #[cfg(feature = "full_crypto")] pub use app::Pair as AppPair; +pub use app::{Public as AppPublic, Signature as AppSignature}; impl RuntimePublic for Public { - type Signature = Signature; + type Signature = Signature; - fn all(key_type: KeyTypeId) -> crate::Vec { - sp_io::crypto::sr25519_public_keys(key_type) - } + fn all(key_type: KeyTypeId) -> crate::Vec { + sp_io::crypto::sr25519_public_keys(key_type) + } - fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { - sp_io::crypto::sr25519_generate(key_type, seed) - } + fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self { + sp_io::crypto::sr25519_generate(key_type, seed) + } - fn sign>(&self, key_type: KeyTypeId, msg: &M) -> Option { - sp_io::crypto::sr25519_sign(key_type, self, msg.as_ref()) - } + fn sign>(&self, key_type: KeyTypeId, msg: &M) -> Option { + sp_io::crypto::sr25519_sign(key_type, self, msg.as_ref()) + } - fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { - sp_io::crypto::sr25519_verify(&signature, msg.as_ref(), self) - } + fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { + sp_io::crypto::sr25519_verify(&signature, msg.as_ref(), self) + } - fn to_raw_vec(&self) -> Vec { - sp_core::crypto::Public::to_raw_vec(self) - } + fn to_raw_vec(&self) -> Vec { + sp_core::crypto::Public::to_raw_vec(self) + } } diff --git a/primitives/application-crypto/src/traits.rs b/primitives/application-crypto/src/traits.rs index 2af039a88d..352a8501ed 100644 --- a/primitives/application-crypto/src/traits.rs +++ b/primitives/application-crypto/src/traits.rs @@ -18,26 +18,26 @@ use sp_core::crypto::Pair; use codec::Codec; -use sp_core::crypto::{KeyTypeId, CryptoType, IsWrappedBy, Public}; +use sp_core::crypto::{CryptoType, IsWrappedBy, KeyTypeId, Public}; use sp_std::{fmt::Debug, vec::Vec}; /// An application-specific key. pub trait AppKey: 'static + Send + Sync + Sized + CryptoType + Clone { - /// The corresponding type as a generic crypto type. - type UntypedGeneric: IsWrappedBy; + /// The corresponding type as a generic crypto type. + type UntypedGeneric: IsWrappedBy; - /// The corresponding public key type in this application scheme. - type Public: AppPublic; + /// The corresponding public key type in this application scheme. + type Public: AppPublic; - /// The corresponding key pair type in this application scheme. - #[cfg(feature = "full_crypto")] - type Pair: AppPair; + /// The corresponding key pair type in this application scheme. + #[cfg(feature = "full_crypto")] + type Pair: AppPair; - /// The corresponding signature type in this application scheme. - type Signature: AppSignature; + /// The corresponding signature type in this application scheme. + type Signature: AppSignature; - /// An identifier for this application-specific key type. - const ID: KeyTypeId; + /// An identifier for this application-specific key type. + const ID: KeyTypeId; } /// Type which implements Hash in std, not when no-std (std variant). @@ -54,98 +54,106 @@ impl MaybeHash for T {} /// Type which implements Debug and Hash in std, not when no-std (no-std variant with crypto). #[cfg(all(not(feature = "std"), feature = "full_crypto"))] -pub trait MaybeDebugHash: sp_std::hash::Hash {} +pub trait MaybeDebugHash: sp_std::hash::Hash {} #[cfg(all(not(feature = "std"), feature = "full_crypto"))] impl MaybeDebugHash for T {} /// A application's public key. pub trait AppPublic: - AppKey + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + codec::Codec + AppKey + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + codec::Codec { - /// The wrapped type which is just a plain instance of `Public`. - type Generic: - IsWrappedBy + Public + Ord + PartialOrd + Eq + PartialEq + Debug + MaybeHash + codec::Codec; + /// The wrapped type which is just a plain instance of `Public`. + type Generic: IsWrappedBy + + Public + + Ord + + PartialOrd + + Eq + + PartialEq + + Debug + + MaybeHash + + codec::Codec; } /// A application's key pair. #[cfg(feature = "full_crypto")] -pub trait AppPair: AppKey + Pair::Public> { - /// The wrapped type which is just a plain instance of `Pair`. - type Generic: IsWrappedBy + Pair::Public as AppPublic>::Generic>; +pub trait AppPair: AppKey + Pair::Public> { + /// The wrapped type which is just a plain instance of `Pair`. + type Generic: IsWrappedBy + + Pair::Public as AppPublic>::Generic>; } /// A application's signature. pub trait AppSignature: AppKey + Eq + PartialEq + Debug + MaybeHash { - /// The wrapped type which is just a plain instance of `Signature`. - type Generic: IsWrappedBy + Eq + PartialEq + Debug + MaybeHash; + /// The wrapped type which is just a plain instance of `Signature`. + type Generic: IsWrappedBy + Eq + PartialEq + Debug + MaybeHash; } /// A runtime interface for a public key. pub trait RuntimePublic: Sized { - /// The signature that will be generated when signing with the corresponding private key. - type Signature: Codec + Debug + MaybeHash + Eq + PartialEq + Clone; - - /// Returns all public keys for the given key type in the keystore. - fn all(key_type: KeyTypeId) -> crate::Vec; - - /// Generate a public/private pair for the given key type with an optional `seed` and - /// store it in the keystore. - /// - /// The `seed` needs to be valid utf8. - /// - /// Returns the generated public key. - fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self; - - /// Sign the given message with the corresponding private key of this public key. - /// - /// The private key will be requested from the keystore using the given key type. - /// - /// Returns the signature or `None` if the private key could not be found or some other error - /// occurred. - fn sign>(&self, key_type: KeyTypeId, msg: &M) -> Option; - - /// Verify that the given signature matches the given message using this public key. - fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool; - - /// Returns `Self` as raw vec. - fn to_raw_vec(&self) -> Vec; + /// The signature that will be generated when signing with the corresponding private key. + type Signature: Codec + Debug + MaybeHash + Eq + PartialEq + Clone; + + /// Returns all public keys for the given key type in the keystore. + fn all(key_type: KeyTypeId) -> crate::Vec; + + /// Generate a public/private pair for the given key type with an optional `seed` and + /// store it in the keystore. + /// + /// The `seed` needs to be valid utf8. + /// + /// Returns the generated public key. + fn generate_pair(key_type: KeyTypeId, seed: Option>) -> Self; + + /// Sign the given message with the corresponding private key of this public key. + /// + /// The private key will be requested from the keystore using the given key type. + /// + /// Returns the signature or `None` if the private key could not be found or some other error + /// occurred. + fn sign>(&self, key_type: KeyTypeId, msg: &M) -> Option; + + /// Verify that the given signature matches the given message using this public key. + fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool; + + /// Returns `Self` as raw vec. + fn to_raw_vec(&self) -> Vec; } /// A runtime interface for an application's public key. pub trait RuntimeAppPublic: Sized { - /// An identifier for this application-specific key type. - const ID: KeyTypeId; - - /// The signature that will be generated when signing with the corresponding private key. - type Signature: Codec + Debug + MaybeHash + Eq + PartialEq + Clone; - - /// Returns all public keys for this application in the keystore. - fn all() -> crate::Vec; - - /// Generate a public/private pair with an optional `seed` and store it in the keystore. - /// - /// The `seed` needs to be valid utf8. - /// - /// Returns the generated public key. - fn generate_pair(seed: Option>) -> Self; - - /// Sign the given message with the corresponding private key of this public key. - /// - /// The private key will be requested from the keystore. - /// - /// Returns the signature or `None` if the private key could not be found or some other error - /// occurred. - fn sign>(&self, msg: &M) -> Option; - - /// Verify that the given signature matches the given message using this public key. - fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool; - - /// Returns `Self` as raw vec. - fn to_raw_vec(&self) -> Vec; + /// An identifier for this application-specific key type. + const ID: KeyTypeId; + + /// The signature that will be generated when signing with the corresponding private key. + type Signature: Codec + Debug + MaybeHash + Eq + PartialEq + Clone; + + /// Returns all public keys for this application in the keystore. + fn all() -> crate::Vec; + + /// Generate a public/private pair with an optional `seed` and store it in the keystore. + /// + /// The `seed` needs to be valid utf8. + /// + /// Returns the generated public key. + fn generate_pair(seed: Option>) -> Self; + + /// Sign the given message with the corresponding private key of this public key. + /// + /// The private key will be requested from the keystore. + /// + /// Returns the signature or `None` if the private key could not be found or some other error + /// occurred. + fn sign>(&self, msg: &M) -> Option; + + /// Verify that the given signature matches the given message using this public key. + fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool; + + /// Returns `Self` as raw vec. + fn to_raw_vec(&self) -> Vec; } /// Something that bound to a fixed `RuntimeAppPublic`. pub trait BoundToRuntimeAppPublic { - /// The `RuntimeAppPublic` this type is bound to. - type Public: RuntimeAppPublic; + /// The `RuntimeAppPublic` this type is bound to. + type Public: RuntimeAppPublic; } diff --git a/primitives/application-crypto/test/src/ed25519.rs b/primitives/application-crypto/test/src/ed25519.rs index 1d72962829..bb9cff2d1d 100644 --- a/primitives/application-crypto/test/src/ed25519.rs +++ b/primitives/application-crypto/test/src/ed25519.rs @@ -16,27 +16,33 @@ //! Integration tests for ed25519 -use sp_runtime::generic::BlockId; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::ed25519::{AppPair, AppPublic}; use sp_core::{ - crypto::Pair, - testing::{KeyStore, ED25519}, + crypto::Pair, + testing::{KeyStore, ED25519}, }; +use sp_runtime::generic::BlockId; use substrate_test_runtime_client::{ - TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, - runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::ed25519::{AppPair, AppPublic}; #[test] fn ed25519_works_in_runtime() { - let keystore = KeyStore::new(); - let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client.runtime_api() - .test_ed25519_crypto(&BlockId::Number(0)) - .expect("Tests `ed25519` crypto."); + let keystore = KeyStore::new(); + let test_client = TestClientBuilder::new() + .set_keystore(keystore.clone()) + .build(); + let (signature, public) = test_client + .runtime_api() + .test_ed25519_crypto(&BlockId::Number(0)) + .expect("Tests `ed25519` crypto."); - let supported_keys = keystore.read().keys(ED25519).unwrap(); - assert!(supported_keys.contains(&public.clone().into())); - assert!(AppPair::verify(&signature, "ed25519", &AppPublic::from(public))); + let supported_keys = keystore.read().keys(ED25519).unwrap(); + assert!(supported_keys.contains(&public.clone().into())); + assert!(AppPair::verify( + &signature, + "ed25519", + &AppPublic::from(public) + )); } diff --git a/primitives/application-crypto/test/src/lib.rs b/primitives/application-crypto/test/src/lib.rs index cb045e81a7..7bdb5f9a77 100644 --- a/primitives/application-crypto/test/src/lib.rs +++ b/primitives/application-crypto/test/src/lib.rs @@ -19,4 +19,4 @@ #[cfg(test)] mod ed25519; #[cfg(test)] -mod sr25519; \ No newline at end of file +mod sr25519; diff --git a/primitives/application-crypto/test/src/sr25519.rs b/primitives/application-crypto/test/src/sr25519.rs index f2c7c48b2b..8ce22771ab 100644 --- a/primitives/application-crypto/test/src/sr25519.rs +++ b/primitives/application-crypto/test/src/sr25519.rs @@ -16,28 +16,33 @@ //! Integration tests for sr25519 - -use sp_runtime::generic::BlockId; +use sp_api::ProvideRuntimeApi; +use sp_application_crypto::sr25519::{AppPair, AppPublic}; use sp_core::{ - crypto::Pair, - testing::{KeyStore, SR25519}, + crypto::Pair, + testing::{KeyStore, SR25519}, }; +use sp_runtime::generic::BlockId; use substrate_test_runtime_client::{ - TestClientBuilder, DefaultTestClientBuilderExt, TestClientBuilderExt, - runtime::TestAPI, + runtime::TestAPI, DefaultTestClientBuilderExt, TestClientBuilder, TestClientBuilderExt, }; -use sp_api::ProvideRuntimeApi; -use sp_application_crypto::sr25519::{AppPair, AppPublic}; #[test] fn sr25519_works_in_runtime() { - let keystore = KeyStore::new(); - let test_client = TestClientBuilder::new().set_keystore(keystore.clone()).build(); - let (signature, public) = test_client.runtime_api() - .test_sr25519_crypto(&BlockId::Number(0)) - .expect("Tests `sr25519` crypto."); - - let supported_keys = keystore.read().keys(SR25519).unwrap(); - assert!(supported_keys.contains(&public.clone().into())); - assert!(AppPair::verify(&signature, "sr25519", &AppPublic::from(public))); + let keystore = KeyStore::new(); + let test_client = TestClientBuilder::new() + .set_keystore(keystore.clone()) + .build(); + let (signature, public) = test_client + .runtime_api() + .test_sr25519_crypto(&BlockId::Number(0)) + .expect("Tests `sr25519` crypto."); + + let supported_keys = keystore.read().keys(SR25519).unwrap(); + assert!(supported_keys.contains(&public.clone().into())); + assert!(AppPair::verify( + &signature, + "sr25519", + &AppPublic::from(public) + )); } diff --git a/primitives/arithmetic/benches/bench.rs b/primitives/arithmetic/benches/bench.rs index 1dbcf260af..885a1be8fa 100644 --- a/primitives/arithmetic/benches/bench.rs +++ b/primitives/arithmetic/benches/bench.rs @@ -14,67 +14,69 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use criterion::{Criterion, Throughput, BenchmarkId, criterion_group, criterion_main}; -use sp_arithmetic::biguint::{BigUint, Single}; +use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput}; use rand::Rng; +use sp_arithmetic::biguint::{BigUint, Single}; fn random_big_uint(size: usize) -> BigUint { - let mut rng = rand::thread_rng(); - let digits: Vec<_> = (0..size).map(|_| rng.gen_range(0, Single::max_value())).collect(); - BigUint::from_limbs(&digits) + let mut rng = rand::thread_rng(); + let digits: Vec<_> = (0..size) + .map(|_| rng.gen_range(0, Single::max_value())) + .collect(); + BigUint::from_limbs(&digits) } fn bench_op(c: &mut Criterion, name: &str, op: F) { - let mut group = c.benchmark_group(name); + let mut group = c.benchmark_group(name); - for size in [2, 4, 6, 8, 10].iter() { - group.throughput(Throughput::Elements(*size)); - group.bench_with_input(BenchmarkId::from_parameter(size), size, |bencher, &size| { - let a = random_big_uint(size as usize); - let b = random_big_uint(size as usize); + for size in [2, 4, 6, 8, 10].iter() { + group.throughput(Throughput::Elements(*size)); + group.bench_with_input(BenchmarkId::from_parameter(size), size, |bencher, &size| { + let a = random_big_uint(size as usize); + let b = random_big_uint(size as usize); - bencher.iter(|| op(&a, &b)); - }); - } + bencher.iter(|| op(&a, &b)); + }); + } } fn bench_addition(c: &mut Criterion) { - bench_op(c, "addition", |a, b| { - let _ = a.clone().add(&b); - }); + bench_op(c, "addition", |a, b| { + let _ = a.clone().add(&b); + }); } fn bench_subtraction(c: &mut Criterion) { - bench_op(c, "subtraction", |a, b| { - let _ = a.clone().sub(&b); - }); + bench_op(c, "subtraction", |a, b| { + let _ = a.clone().sub(&b); + }); } fn bench_multiplication(c: &mut Criterion) { - bench_op(c, "multiplication", |a, b| { - let _ = a.clone().mul(&b); - }); + bench_op(c, "multiplication", |a, b| { + let _ = a.clone().mul(&b); + }); } fn bench_division(c: &mut Criterion) { - let mut group = c.benchmark_group("division"); + let mut group = c.benchmark_group("division"); - for size in [4, 6, 8, 10].iter() { - group.throughput(Throughput::Elements(*size)); - group.bench_with_input(BenchmarkId::from_parameter(size), size, |bencher, &size| { - let a = random_big_uint(size as usize); - let b = random_big_uint(rand::thread_rng().gen_range(2, size as usize)); + for size in [4, 6, 8, 10].iter() { + group.throughput(Throughput::Elements(*size)); + group.bench_with_input(BenchmarkId::from_parameter(size), size, |bencher, &size| { + let a = random_big_uint(size as usize); + let b = random_big_uint(rand::thread_rng().gen_range(2, size as usize)); - bencher.iter(|| { - let _ = a.clone().div(&b, true); - }); - }); - } + bencher.iter(|| { + let _ = a.clone().div(&b, true); + }); + }); + } } -criterion_group!{ - name = benches; - config = Criterion::default(); - targets = bench_addition, bench_subtraction, bench_multiplication, bench_division +criterion_group! { + name = benches; + config = Criterion::default(); + targets = bench_addition, bench_subtraction, bench_multiplication, bench_division } criterion_main!(benches); diff --git a/primitives/arithmetic/fuzzer/src/biguint.rs b/primitives/arithmetic/fuzzer/src/biguint.rs index f217b080d2..d746066479 100644 --- a/primitives/arithmetic/fuzzer/src/biguint.rs +++ b/primitives/arithmetic/fuzzer/src/biguint.rs @@ -31,151 +31,181 @@ use sp_arithmetic::biguint::{BigUint, Single}; use std::convert::TryFrom; fn main() { - loop { - fuzz!(|data: (Vec, Vec, bool)| { - let (mut digits_u, mut digits_v, return_remainder) = data; - - let mut u = BigUint::from_limbs(&digits_u); - let mut v = BigUint::from_limbs(&digits_v); - - u.lstrip(); - v.lstrip(); - - let ue = u128::try_from(u.clone()); - let ve = u128::try_from(v.clone()); - - digits_u.reverse(); - digits_v.reverse(); - - let num_u = num_bigint::BigUint::new(digits_u.clone()); - let num_v = num_bigint::BigUint::new(digits_v.clone()); - - if check_digit_lengths(&u, &v, 4) { - assert_eq!(u.cmp(&v), ue.cmp(&ve)); - assert_eq!(u.eq(&v), ue.eq(&ve)); - } - - if check_digit_lengths(&u, &v, 3) { - let expected = ue.unwrap() + ve.unwrap(); - let t = u.clone().add(&v); - assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} + {:?} ===> {:?} != {:?}", u, v, t, expected, - ); - } - - if check_digit_lengths(&u, &v, 4) { - let expected = ue.unwrap().checked_sub(ve.unwrap()); - let t = u.clone().sub(&v); - if expected.is_none() { - assert!(t.is_err()) - } else { - let t = t.unwrap(); - let expected = expected.unwrap(); - assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} - {:?} ===> {:?} != {:?}", u, v, t, expected, - ); - } - } - - if check_digit_lengths(&u, &v, 2) { - let expected = ue.unwrap() * ve.unwrap(); - let t = u.clone().mul(&v); - assert_eq!( - u128::try_from(t.clone()).unwrap(), expected, - "{:?} * {:?} ===> {:?} != {:?}", u, v, t, expected, - ); - } - - if check_digit_lengths(&u, &v, 4) { - let (ue, ve) = (ue.unwrap(), ve.unwrap()); - if ve == 0 { - return; - } - let (q, r) = (ue / ve, ue % ve); - if let Some((qq, rr)) = u.clone().div(&v, true) { - assert_eq!( - u128::try_from(qq.clone()).unwrap(), q, - "{:?} / {:?} ===> {:?} != {:?}", u, v, qq, q, - ); - assert_eq!( - u128::try_from(rr.clone()).unwrap(), r, - "{:?} % {:?} ===> {:?} != {:?}", u, v, rr, r, - ); - } else if v.len() == 1 { - let qq = u.clone().div_unit(ve as Single); - assert_eq!( - u128::try_from(qq.clone()).unwrap(), q, - "[single] {:?} / {:?} ===> {:?} != {:?}", u, v, qq, q, - ); - } else if v.msb() != 0 && u.msb() != 0 && u.len() > v.len() { - panic!("div returned none for an unexpected reason"); - } - } - - // Test against num_bigint - - // Equality - - assert_eq!(u.cmp(&v), num_u.cmp(&num_v)); - - // Addition - - let w = u.clone().add(&v); - let num_w = num_u.clone() + &num_v; - - assert_biguints_eq(&w, &num_w); - - // Subtraction - - if let Ok(w) = u.clone().sub(&v) { - let num_w = num_u.clone() - &num_v; - - assert_biguints_eq(&w, &num_w); - } - - // Multiplication - - let w = u.clone().mul(&v); - let num_w = num_u.clone() * &num_v; - - assert_biguints_eq(&w, &num_w); - - // Division - - if v.len() == 1 && v.get(0) != 0 { - let w = u.clone().div_unit(v.get(0)); - let num_w = num_u.clone() / &num_v; - assert_biguints_eq(&w, &num_w); - } else if u.len() > v.len() && v.len() > 0 { - let num_remainder = num_u.clone() % num_v.clone(); - - let (w, remainder) = u.clone().div(&v, return_remainder).unwrap(); - let num_w = num_u.clone() / &num_v; - - assert_biguints_eq(&w, &num_w); - - if return_remainder { - assert_biguints_eq(&remainder, &num_remainder); - } - } - }); - } + loop { + fuzz!(|data: (Vec, Vec, bool)| { + let (mut digits_u, mut digits_v, return_remainder) = data; + + let mut u = BigUint::from_limbs(&digits_u); + let mut v = BigUint::from_limbs(&digits_v); + + u.lstrip(); + v.lstrip(); + + let ue = u128::try_from(u.clone()); + let ve = u128::try_from(v.clone()); + + digits_u.reverse(); + digits_v.reverse(); + + let num_u = num_bigint::BigUint::new(digits_u.clone()); + let num_v = num_bigint::BigUint::new(digits_v.clone()); + + if check_digit_lengths(&u, &v, 4) { + assert_eq!(u.cmp(&v), ue.cmp(&ve)); + assert_eq!(u.eq(&v), ue.eq(&ve)); + } + + if check_digit_lengths(&u, &v, 3) { + let expected = ue.unwrap() + ve.unwrap(); + let t = u.clone().add(&v); + assert_eq!( + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} + {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, + ); + } + + if check_digit_lengths(&u, &v, 4) { + let expected = ue.unwrap().checked_sub(ve.unwrap()); + let t = u.clone().sub(&v); + if expected.is_none() { + assert!(t.is_err()) + } else { + let t = t.unwrap(); + let expected = expected.unwrap(); + assert_eq!( + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} - {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, + ); + } + } + + if check_digit_lengths(&u, &v, 2) { + let expected = ue.unwrap() * ve.unwrap(); + let t = u.clone().mul(&v); + assert_eq!( + u128::try_from(t.clone()).unwrap(), + expected, + "{:?} * {:?} ===> {:?} != {:?}", + u, + v, + t, + expected, + ); + } + + if check_digit_lengths(&u, &v, 4) { + let (ue, ve) = (ue.unwrap(), ve.unwrap()); + if ve == 0 { + return; + } + let (q, r) = (ue / ve, ue % ve); + if let Some((qq, rr)) = u.clone().div(&v, true) { + assert_eq!( + u128::try_from(qq.clone()).unwrap(), + q, + "{:?} / {:?} ===> {:?} != {:?}", + u, + v, + qq, + q, + ); + assert_eq!( + u128::try_from(rr.clone()).unwrap(), + r, + "{:?} % {:?} ===> {:?} != {:?}", + u, + v, + rr, + r, + ); + } else if v.len() == 1 { + let qq = u.clone().div_unit(ve as Single); + assert_eq!( + u128::try_from(qq.clone()).unwrap(), + q, + "[single] {:?} / {:?} ===> {:?} != {:?}", + u, + v, + qq, + q, + ); + } else if v.msb() != 0 && u.msb() != 0 && u.len() > v.len() { + panic!("div returned none for an unexpected reason"); + } + } + + // Test against num_bigint + + // Equality + + assert_eq!(u.cmp(&v), num_u.cmp(&num_v)); + + // Addition + + let w = u.clone().add(&v); + let num_w = num_u.clone() + &num_v; + + assert_biguints_eq(&w, &num_w); + + // Subtraction + + if let Ok(w) = u.clone().sub(&v) { + let num_w = num_u.clone() - &num_v; + + assert_biguints_eq(&w, &num_w); + } + + // Multiplication + + let w = u.clone().mul(&v); + let num_w = num_u.clone() * &num_v; + + assert_biguints_eq(&w, &num_w); + + // Division + + if v.len() == 1 && v.get(0) != 0 { + let w = u.clone().div_unit(v.get(0)); + let num_w = num_u.clone() / &num_v; + assert_biguints_eq(&w, &num_w); + } else if u.len() > v.len() && v.len() > 0 { + let num_remainder = num_u.clone() % num_v.clone(); + + let (w, remainder) = u.clone().div(&v, return_remainder).unwrap(); + let num_w = num_u.clone() / &num_v; + + assert_biguints_eq(&w, &num_w); + + if return_remainder { + assert_biguints_eq(&remainder, &num_remainder); + } + } + }); + } } fn check_digit_lengths(u: &BigUint, v: &BigUint, max_limbs: usize) -> bool { - 1 <= u.len() && u.len() <= max_limbs && 1 <= v.len() && v.len() <= max_limbs + 1 <= u.len() && u.len() <= max_limbs && 1 <= v.len() && v.len() <= max_limbs } fn assert_biguints_eq(a: &BigUint, b: &num_bigint::BigUint) { - let mut a = a.clone(); - a.lstrip(); + let mut a = a.clone(); + a.lstrip(); - // `num_bigint::BigUint` doesn't expose it's internals, so we need to convert into that to - // compare. - let limbs = (0 .. a.len()).map(|i| a.get(i)).collect(); - let num_a = num_bigint::BigUint::new(limbs); + // `num_bigint::BigUint` doesn't expose it's internals, so we need to convert into that to + // compare. + let limbs = (0..a.len()).map(|i| a.get(i)).collect(); + let num_a = num_bigint::BigUint::new(limbs); - assert!(&num_a == b, "\narithmetic: {:?}\nnum-bigint: {:?}", a, b); + assert!(&num_a == b, "\narithmetic: {:?}\nnum-bigint: {:?}", a, b); } diff --git a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs index c2dda3de22..fb143a9a84 100644 --- a/primitives/arithmetic/fuzzer/src/per_thing_rational.rs +++ b/primitives/arithmetic/fuzzer/src/per_thing_rational.rs @@ -23,100 +23,94 @@ //! `cargo hfuzz run-debug per_thing_rational hfuzz_workspace/per_thing_rational/*.fuzz`. use honggfuzz::fuzz; -use sp_arithmetic::{ - PerThing, PerU16, Percent, Perbill, Perquintill, traits::SaturatedConversion, -}; +use sp_arithmetic::{traits::SaturatedConversion, PerThing, PerU16, Perbill, Percent, Perquintill}; fn main() { - loop { - fuzz!(| - data: ((u16, u16), (u32, u32), (u64, u64)) - | { - - let (u16_pair, u32_pair, u64_pair) = data; - - // peru16 - let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), - 1, - ); - let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), - 1, - ); - let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = PerU16::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), - 1, - ); - - // percent - let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), - 1, - ); - - let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), - 1, - ); - - let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Percent::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), - 1, - ); - - // perbill - let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); - let ratio = Perbill::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), - 100, - ); - - let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Perbill::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), - 100, - ); - - // perquintillion - let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); - let ratio = Perquintill::from_rational_approximation(smaller, bigger); - assert_per_thing_equal_error( - ratio, - Perquintill::from_fraction(smaller as f64 / bigger.max(1) as f64), - 1000, - ); - - }) - } + loop { + fuzz!(|data: ((u16, u16), (u32, u32), (u64, u64))| { + let (u16_pair, u32_pair, u64_pair) = data; + + // peru16 + let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); + let ratio = PerU16::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); + let ratio = PerU16::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); + let ratio = PerU16::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + PerU16::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + + // percent + let (smaller, bigger) = (u16_pair.0.min(u16_pair.1), u16_pair.0.max(u16_pair.1)); + let ratio = Percent::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + + let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); + let ratio = Percent::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + + let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); + let ratio = Percent::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Percent::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1, + ); + + // perbill + let (smaller, bigger) = (u32_pair.0.min(u32_pair.1), u32_pair.0.max(u32_pair.1)); + let ratio = Perbill::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), + 100, + ); + + let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); + let ratio = Perbill::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Perbill::from_fraction(smaller as f64 / bigger.max(1) as f64), + 100, + ); + + // perquintillion + let (smaller, bigger) = (u64_pair.0.min(u64_pair.1), u64_pair.0.max(u64_pair.1)); + let ratio = Perquintill::from_rational_approximation(smaller, bigger); + assert_per_thing_equal_error( + ratio, + Perquintill::from_fraction(smaller as f64 / bigger.max(1) as f64), + 1000, + ); + }) + } } fn assert_per_thing_equal_error(a: T, b: T, err: u128) { - let a_abs = a.deconstruct().saturated_into::(); - let b_abs = b.deconstruct().saturated_into::(); - let diff = a_abs.max(b_abs) - a_abs.min(b_abs); - dbg!(&diff); - assert!(diff <= err, "{:?} !~ {:?}", a, b); + let a_abs = a.deconstruct().saturated_into::(); + let b_abs = b.deconstruct().saturated_into::(); + let diff = a_abs.max(b_abs) - a_abs.min(b_abs); + dbg!(&diff); + assert!(diff <= err, "{:?} !~ {:?}", a, b); } diff --git a/primitives/arithmetic/fuzzer/src/rational128.rs b/primitives/arithmetic/fuzzer/src/rational128.rs index 586a165272..4e7818d695 100644 --- a/primitives/arithmetic/fuzzer/src/rational128.rs +++ b/primitives/arithmetic/fuzzer/src/rational128.rs @@ -30,48 +30,48 @@ use honggfuzz::fuzz; use sp_arithmetic::{helpers_128bit::multiply_by_rational, traits::Zero}; fn main() { - loop { - fuzz!(|data: ([u8; 16], [u8; 16], [u8; 16])| { - let (a_bytes, b_bytes, c_bytes) = data; - let (a, b, c) = ( - u128::from_be_bytes(a_bytes), - u128::from_be_bytes(b_bytes), - u128::from_be_bytes(c_bytes), - ); + loop { + fuzz!(|data: ([u8; 16], [u8; 16], [u8; 16])| { + let (a_bytes, b_bytes, c_bytes) = data; + let (a, b, c) = ( + u128::from_be_bytes(a_bytes), + u128::from_be_bytes(b_bytes), + u128::from_be_bytes(c_bytes), + ); - println!("++ Equation: {} * {} / {}", a, b, c); + println!("++ Equation: {} * {} / {}", a, b, c); - // The point of this fuzzing is to make sure that `multiply_by_rational` is 100% - // accurate as long as the value fits in a u128. - if let Ok(result) = multiply_by_rational(a, b, c) { - let truth = mul_div(a, b, c); + // The point of this fuzzing is to make sure that `multiply_by_rational` is 100% + // accurate as long as the value fits in a u128. + if let Ok(result) = multiply_by_rational(a, b, c) { + let truth = mul_div(a, b, c); - if result != truth && result != truth + 1 { - println!("++ Expected {}", truth); - println!("+++++++ Got {}", result); - panic!(); - } - } - }) - } + if result != truth && result != truth + 1 { + println!("++ Expected {}", truth); + println!("+++++++ Got {}", result); + panic!(); + } + } + }) + } } fn mul_div(a: u128, b: u128, c: u128) -> u128 { - use primitive_types::U256; - if a.is_zero() { - return Zero::zero(); - } - let c = c.max(1); + use primitive_types::U256; + if a.is_zero() { + return Zero::zero(); + } + let c = c.max(1); - // e for extended - let ae: U256 = a.into(); - let be: U256 = b.into(); - let ce: U256 = c.into(); + // e for extended + let ae: U256 = a.into(); + let be: U256 = b.into(); + let ce: U256 = c.into(); - let r = ae * be / ce; - if r > u128::max_value().into() { - a - } else { - r.as_u128() - } + let r = ae * be / ce; + if r > u128::max_value().into() { + a + } else { + r.as_u128() + } } diff --git a/primitives/arithmetic/src/biguint.rs b/primitives/arithmetic/src/biguint.rs index 6c3ca58a52..533bb9d636 100644 --- a/primitives/arithmetic/src/biguint.rs +++ b/primitives/arithmetic/src/biguint.rs @@ -17,7 +17,7 @@ //! Infinite precision unsigned integer for substrate runtime. use num_traits::Zero; -use sp_std::{cmp::Ordering, ops, prelude::*, cell::RefCell, convert::TryFrom}; +use sp_std::{cell::RefCell, cmp::Ordering, convert::TryFrom, ops, prelude::*}; // A sensible value for this would be half of the dword size of the host machine. Since the // runtime is compiled to 32bit webassembly, using 32 and 64 for single and double respectively @@ -33,18 +33,18 @@ const B: Double = Single::max_value() as Double + 1; /// Splits a [`Double`] limb number into a tuple of two [`Single`] limb numbers. pub fn split(a: Double) -> (Single, Single) { - let al = a as Single; - let ah = (a >> SHIFT) as Single; - (ah, al) + let al = a as Single; + let ah = (a >> SHIFT) as Single; + (ah, al) } /// Assumed as a given primitive. /// /// Multiplication of two singles, which at most yields 1 double. pub fn mul_single(a: Single, b: Single) -> Double { - let a: Double = a.into(); - let b: Double = b.into(); - a * b + let a: Double = a.into(); + let b: Double = b.into(); + a * b } /// Assumed as a given primitive. @@ -52,11 +52,11 @@ pub fn mul_single(a: Single, b: Single) -> Double { /// Addition of two singles, which at most takes a single limb of result and a carry, /// returned as a tuple respectively. pub fn add_single(a: Single, b: Single) -> (Single, Single) { - let a: Double = a.into(); - let b: Double = b.into(); - let q = a + b; - let (carry, r) = split(q); - (r, carry) + let a: Double = a.into(); + let b: Double = b.into(); + let q = a + b; + let (carry, r) = split(q); + (r, carry) } /// Assumed as a given primitive. @@ -64,454 +64,470 @@ pub fn add_single(a: Single, b: Single) -> (Single, Single) { /// Division of double by a single limb. Always returns a double limb of quotient and a single /// limb of remainder. fn div_single(a: Double, b: Single) -> (Double, Single) { - let b: Double = b.into(); - let q = a / b; - let r = a % b; - // both conversions are trivially safe. - (q, r as Single) + let b: Double = b.into(); + let q = a / b; + let r = a % b; + // both conversions are trivially safe. + (q, r as Single) } /// Simple wrapper around an infinitely large integer, represented as limbs of [`Single`]. #[derive(Clone, Default)] pub struct BigUint { - /// digits (limbs) of this number (sorted as msb -> lsd). - pub(crate) digits: Vec, + /// digits (limbs) of this number (sorted as msb -> lsd). + pub(crate) digits: Vec, } impl BigUint { - /// Create a new instance with `size` limbs. This prevents any number with zero limbs to be - /// created. - /// - /// The behavior of the type is undefined with zero limbs. - pub fn with_capacity(size: usize) -> Self { - Self { digits: vec![0; size.max(1)] } - } - - /// Raw constructor from custom limbs. If `limbs` is empty, `Zero::zero()` implementation is - /// used. - pub fn from_limbs(limbs: &[Single]) -> Self { - if !limbs.is_empty() { - Self { digits: limbs.to_vec() } - } else { - Zero::zero() - } - } - - /// Number of limbs. - pub fn len(&self) -> usize { self.digits.len() } - - /// A naive getter for limb at `index`. Note that the order is lsb -> msb. - /// - /// #### Panics - /// - /// This panics if index is out of range. - pub fn get(&self, index: usize) -> Single { - self.digits[self.len() - 1 - index] - } - - /// A naive getter for limb at `index`. Note that the order is lsb -> msb. - pub fn checked_get(&self, index: usize) -> Option { - let i = self.len().checked_sub(1)?; - let j = i.checked_sub(index)?; - self.digits.get(j).cloned() - } - - /// A naive setter for limb at `index`. Note that the order is lsb -> msb. - /// - /// #### Panics - /// - /// This panics if index is out of range. - pub fn set(&mut self, index: usize, value: Single) { - let len = self.digits.len(); - self.digits[len - 1 - index] = value; - } - - /// returns the least significant limb of the number. - /// - /// #### Panics - /// - /// While the constructor of the type prevents this, this can panic if `self` has no digits. - pub fn lsb(&self) -> Single { - self.digits[self.len() - 1] - } - - /// returns the most significant limb of the number. - /// - /// #### Panics - /// - /// While the constructor of the type prevents this, this can panic if `self` has no digits. - pub fn msb(&self) -> Single { - self.digits[0] - } - - /// Strips zeros from the left side (the most significant limbs) of `self`, if any. - pub fn lstrip(&mut self) { - // by definition, a big-int number should never have leading zero limbs. This function - // has the ability to cause this. There is nothing to do if the number already has 1 - // limb only. call it a day and return. - if self.len().is_zero() { return; } - let index = self.digits.iter().position(|&elem| elem != 0).unwrap_or(0); - - if index > 0 { - self.digits = self.digits[index..].to_vec() - } - } - - /// Zero-pad `self` from left to reach `size` limbs. Will not make any difference if `self` - /// is already bigger than `size` limbs. - pub fn lpad(&mut self, size: usize) { - let n = self.len(); - if n >= size { return; } - let pad = size - n; - let mut new_digits = (0..pad).map(|_| 0).collect::>(); - new_digits.extend(self.digits.iter()); - self.digits = new_digits; - } - - /// Adds `self` with `other`. self and other do not have to have any particular size. Given - /// that the `n = max{size(self), size(other)}`, it will produce a number with `n + 1` - /// limbs. - /// - /// This function does not strip the output and returns the original allocated `n + 1` - /// limbs. The caller may strip the output if desired. - /// - /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. - pub fn add(self, other: &Self) -> Self { - let n = self.len().max(other.len()); - let mut k: Double = 0; - let mut w = Self::with_capacity(n + 1); - - for j in 0..n { - let u = Double::from(self.checked_get(j).unwrap_or(0)); - let v = Double::from(other.checked_get(j).unwrap_or(0)); - let s = u + v + k; - w.set(j, (s % B) as Single); - k = s / B; - } - // k is always 0 or 1. - w.set(n, k as Single); - w - } - - /// Subtracts `other` from `self`. self and other do not have to have any particular size. - /// Given that the `n = max{size(self), size(other)}`, it will produce a number of size `n`. - /// - /// If `other` is bigger than `self`, `Err(B - borrow)` is returned. - /// - /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. - pub fn sub(self, other: &Self) -> Result { - let n = self.len().max(other.len()); - let mut k = 0; - let mut w = Self::with_capacity(n); - for j in 0..n { - let s = { - let u = Double::from(self.checked_get(j).unwrap_or(0)); - let v = Double::from(other.checked_get(j).unwrap_or(0)); - let mut needs_borrow = false; - let mut t = 0; - - if let Some(v) = u.checked_sub(v) { - if let Some(v2) = v.checked_sub(k) { - t = v2 % B; - k = 0; - } else { - needs_borrow = true; - } - } else { - needs_borrow = true; - } - if needs_borrow { - t = u + B - v - k; - k = 1; - } - t - }; - // PROOF: t either comes from `v2 % B`, or from `u + B - v - k`. The former is - // trivial. The latter will not overflow this branch will only happen if the sum of - // `u - v - k` part has been negative, hence `u + B - v - k < b`. - w.set(j, s as Single); - } - - if k.is_zero() { - Ok(w) - } else { - Err(w) - } - } - - /// Multiplies n-limb number `self` with m-limb number `other`. - /// - /// The resulting number will always have `n + m` limbs. - /// - /// This function does not strip the output and returns the original allocated `n + m` - /// limbs. The caller may strip the output if desired. - /// - /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. - pub fn mul(self, other: &Self) -> Self { - let n = self.len(); - let m = other.len(); - let mut w = Self::with_capacity(m + n); - - for j in 0..n { - if self.get(j) == 0 { - // Note: `with_capacity` allocates with 0. Explicitly set j + m to zero if - // otherwise. - continue; - } - - let mut k = 0; - for i in 0..m { - // PROOF: (B−1) × (B−1) + (B−1) + (B−1) = B^2 −1 < B^2. addition is safe. - let t = - mul_single(self.get(j), other.get(i)) - + Double::from(w.get(i + j)) - + Double::from(k); - w.set(i + j, (t % B) as Single); - // PROOF: (B^2 - 1) / B < B. conversion is safe. - k = (t / B) as Single; - } - w.set(j + m, k); - } - w - } - - /// Divides `self` by a single limb `other`. This can be used in cases where the original - /// division cannot work due to the divisor (`other`) being just one limb. - /// - /// Invariant: `other` cannot be zero. - pub fn div_unit(self, mut other: Single) -> Self { - other = other.max(1); - let n = self.len(); - let mut out = Self::with_capacity(n); - let mut r: Single = 0; - // PROOF: (B-1) * B + (B-1) still fits in double - let with_r = |x: Double, r: Single| { Double::from(r) * B + x }; - for d in (0..n).rev() { - let (q, rr) = div_single(with_r(self.get(d).into(), r), other) ; - out.set(d, q as Single); - r = rr; - } - out - } - - /// Divides an `n + m` limb self by a `n` limb `other`. The result is a `m + 1` limb - /// quotient and a `n` limb remainder, if enabled by passing `true` in `rem` argument, both - /// in the form of an option's `Ok`. - /// - /// - requires `other` to be stripped and have no leading zeros. - /// - requires `self` to be stripped and have no leading zeros. - /// - requires `other` to have at least two limbs. - /// - requires `self` to have a greater length compared to `other`. - /// - /// All arguments are examined without being stripped for the above conditions. If any of - /// the above fails, `None` is returned.` - /// - /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. - pub fn div(self, other: &Self, rem: bool) -> Option<(Self, Self)> { - if other.len() <= 1 - || other.msb() == 0 - || self.msb() == 0 - || self.len() <= other.len() - { - return None - } - let n = other.len(); - let m = self.len() - n; - - let mut q = Self::with_capacity(m + 1); - let mut r = Self::with_capacity(n); - - // PROOF: 0 <= normalizer_bits < SHIFT 0 <= normalizer < B. all conversions are - // safe. - let normalizer_bits = other.msb().leading_zeros() as Single; - let normalizer = (2 as Single).pow(normalizer_bits as u32) as Single; - - // step D1. - let mut self_norm = self.mul(&Self::from(normalizer)); - let mut other_norm = other.clone().mul(&Self::from(normalizer)); - - // defensive only; the mul implementation should always create this. - self_norm.lpad(n + m + 1); - other_norm.lstrip(); - - // step D2. - for j in (0..=m).rev() { - // step D3.0 Find an estimate of q[j], named qhat. - let (qhat, rhat) = { - // PROOF: this always fits into `Double`. In the context of Single = u8, and - // Double = u16, think of 255 * 256 + 255 which is just u16::max_value(). - let dividend = - Double::from(self_norm.get(j + n)) - * B - + Double::from(self_norm.get(j + n - 1)); - let divisor = other_norm.get(n - 1); - div_single(dividend, divisor) - }; - - // D3.1 test qhat - // replace qhat and rhat with RefCells. This helps share state with the closure - let qhat = RefCell::new(qhat); - let rhat = RefCell::new(Double::from(rhat)); - - let test = || { - // decrease qhat if it is bigger than the base (B) - let qhat_local = *qhat.borrow(); - let rhat_local = *rhat.borrow(); - let predicate_1 = qhat_local >= B; - let predicate_2 = { - let lhs = qhat_local * Double::from(other_norm.get(n - 2)); - let rhs = B * rhat_local + Double::from(self_norm.get(j + n - 2)); - lhs > rhs - }; - if predicate_1 || predicate_2 { - *qhat.borrow_mut() -= 1; - *rhat.borrow_mut() += Double::from(other_norm.get(n - 1)); - true - } else { - false - } - }; - - test(); - while (*rhat.borrow() as Double) < B { - if !test() { break; } - } - - let qhat = qhat.into_inner(); - // we don't need rhat anymore. just let it go out of scope when it does. - - // step D4 - let lhs = Self { digits: (j..=j+n).rev().map(|d| self_norm.get(d)).collect() }; - let rhs = other_norm.clone().mul(&Self::from(qhat)); - - let maybe_sub = lhs.sub(&rhs); - let mut negative = false; - let sub = match maybe_sub { - Ok(t) => t, - Err(t) => { negative = true; t } - }; - (j..=j+n).for_each(|d| { self_norm.set(d, sub.get(d - j)); }); - - // step D5 - // PROOF: the `test()` specifically decreases qhat until it is below `B`. conversion - // is safe. - q.set(j, qhat as Single); - - // step D6: add back if negative happened. - if negative { - q.set(j, q.get(j) - 1); - let u = Self { digits: (j..=j+n).rev().map(|d| self_norm.get(d)).collect() }; - let r = other_norm.clone().add(&u); - (j..=j+n).rev().for_each(|d| { self_norm.set(d, r.get(d - j)); }) - } - } - - // if requested, calculate remainder. - if rem { - // undo the normalization. - if normalizer_bits > 0 { - let s = SHIFT as u32; - let nb = normalizer_bits; - for d in 0..n-1 { - let v = self_norm.get(d) >> nb - | self_norm.get(d + 1).overflowing_shl(s - nb).0; - r.set(d, v); - } - r.set(n - 1, self_norm.get(n - 1) >> normalizer_bits); - } else { - r = self_norm; - } - } - - Some((q, r)) - } + /// Create a new instance with `size` limbs. This prevents any number with zero limbs to be + /// created. + /// + /// The behavior of the type is undefined with zero limbs. + pub fn with_capacity(size: usize) -> Self { + Self { + digits: vec![0; size.max(1)], + } + } + + /// Raw constructor from custom limbs. If `limbs` is empty, `Zero::zero()` implementation is + /// used. + pub fn from_limbs(limbs: &[Single]) -> Self { + if !limbs.is_empty() { + Self { + digits: limbs.to_vec(), + } + } else { + Zero::zero() + } + } + + /// Number of limbs. + pub fn len(&self) -> usize { + self.digits.len() + } + + /// A naive getter for limb at `index`. Note that the order is lsb -> msb. + /// + /// #### Panics + /// + /// This panics if index is out of range. + pub fn get(&self, index: usize) -> Single { + self.digits[self.len() - 1 - index] + } + + /// A naive getter for limb at `index`. Note that the order is lsb -> msb. + pub fn checked_get(&self, index: usize) -> Option { + let i = self.len().checked_sub(1)?; + let j = i.checked_sub(index)?; + self.digits.get(j).cloned() + } + + /// A naive setter for limb at `index`. Note that the order is lsb -> msb. + /// + /// #### Panics + /// + /// This panics if index is out of range. + pub fn set(&mut self, index: usize, value: Single) { + let len = self.digits.len(); + self.digits[len - 1 - index] = value; + } + + /// returns the least significant limb of the number. + /// + /// #### Panics + /// + /// While the constructor of the type prevents this, this can panic if `self` has no digits. + pub fn lsb(&self) -> Single { + self.digits[self.len() - 1] + } + + /// returns the most significant limb of the number. + /// + /// #### Panics + /// + /// While the constructor of the type prevents this, this can panic if `self` has no digits. + pub fn msb(&self) -> Single { + self.digits[0] + } + + /// Strips zeros from the left side (the most significant limbs) of `self`, if any. + pub fn lstrip(&mut self) { + // by definition, a big-int number should never have leading zero limbs. This function + // has the ability to cause this. There is nothing to do if the number already has 1 + // limb only. call it a day and return. + if self.len().is_zero() { + return; + } + let index = self.digits.iter().position(|&elem| elem != 0).unwrap_or(0); + + if index > 0 { + self.digits = self.digits[index..].to_vec() + } + } + + /// Zero-pad `self` from left to reach `size` limbs. Will not make any difference if `self` + /// is already bigger than `size` limbs. + pub fn lpad(&mut self, size: usize) { + let n = self.len(); + if n >= size { + return; + } + let pad = size - n; + let mut new_digits = (0..pad).map(|_| 0).collect::>(); + new_digits.extend(self.digits.iter()); + self.digits = new_digits; + } + + /// Adds `self` with `other`. self and other do not have to have any particular size. Given + /// that the `n = max{size(self), size(other)}`, it will produce a number with `n + 1` + /// limbs. + /// + /// This function does not strip the output and returns the original allocated `n + 1` + /// limbs. The caller may strip the output if desired. + /// + /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. + pub fn add(self, other: &Self) -> Self { + let n = self.len().max(other.len()); + let mut k: Double = 0; + let mut w = Self::with_capacity(n + 1); + + for j in 0..n { + let u = Double::from(self.checked_get(j).unwrap_or(0)); + let v = Double::from(other.checked_get(j).unwrap_or(0)); + let s = u + v + k; + w.set(j, (s % B) as Single); + k = s / B; + } + // k is always 0 or 1. + w.set(n, k as Single); + w + } + + /// Subtracts `other` from `self`. self and other do not have to have any particular size. + /// Given that the `n = max{size(self), size(other)}`, it will produce a number of size `n`. + /// + /// If `other` is bigger than `self`, `Err(B - borrow)` is returned. + /// + /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. + pub fn sub(self, other: &Self) -> Result { + let n = self.len().max(other.len()); + let mut k = 0; + let mut w = Self::with_capacity(n); + for j in 0..n { + let s = { + let u = Double::from(self.checked_get(j).unwrap_or(0)); + let v = Double::from(other.checked_get(j).unwrap_or(0)); + let mut needs_borrow = false; + let mut t = 0; + + if let Some(v) = u.checked_sub(v) { + if let Some(v2) = v.checked_sub(k) { + t = v2 % B; + k = 0; + } else { + needs_borrow = true; + } + } else { + needs_borrow = true; + } + if needs_borrow { + t = u + B - v - k; + k = 1; + } + t + }; + // PROOF: t either comes from `v2 % B`, or from `u + B - v - k`. The former is + // trivial. The latter will not overflow this branch will only happen if the sum of + // `u - v - k` part has been negative, hence `u + B - v - k < b`. + w.set(j, s as Single); + } + + if k.is_zero() { + Ok(w) + } else { + Err(w) + } + } + + /// Multiplies n-limb number `self` with m-limb number `other`. + /// + /// The resulting number will always have `n + m` limbs. + /// + /// This function does not strip the output and returns the original allocated `n + m` + /// limbs. The caller may strip the output if desired. + /// + /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. + pub fn mul(self, other: &Self) -> Self { + let n = self.len(); + let m = other.len(); + let mut w = Self::with_capacity(m + n); + + for j in 0..n { + if self.get(j) == 0 { + // Note: `with_capacity` allocates with 0. Explicitly set j + m to zero if + // otherwise. + continue; + } + + let mut k = 0; + for i in 0..m { + // PROOF: (B−1) × (B−1) + (B−1) + (B−1) = B^2 −1 < B^2. addition is safe. + let t = mul_single(self.get(j), other.get(i)) + + Double::from(w.get(i + j)) + + Double::from(k); + w.set(i + j, (t % B) as Single); + // PROOF: (B^2 - 1) / B < B. conversion is safe. + k = (t / B) as Single; + } + w.set(j + m, k); + } + w + } + + /// Divides `self` by a single limb `other`. This can be used in cases where the original + /// division cannot work due to the divisor (`other`) being just one limb. + /// + /// Invariant: `other` cannot be zero. + pub fn div_unit(self, mut other: Single) -> Self { + other = other.max(1); + let n = self.len(); + let mut out = Self::with_capacity(n); + let mut r: Single = 0; + // PROOF: (B-1) * B + (B-1) still fits in double + let with_r = |x: Double, r: Single| Double::from(r) * B + x; + for d in (0..n).rev() { + let (q, rr) = div_single(with_r(self.get(d).into(), r), other); + out.set(d, q as Single); + r = rr; + } + out + } + + /// Divides an `n + m` limb self by a `n` limb `other`. The result is a `m + 1` limb + /// quotient and a `n` limb remainder, if enabled by passing `true` in `rem` argument, both + /// in the form of an option's `Ok`. + /// + /// - requires `other` to be stripped and have no leading zeros. + /// - requires `self` to be stripped and have no leading zeros. + /// - requires `other` to have at least two limbs. + /// - requires `self` to have a greater length compared to `other`. + /// + /// All arguments are examined without being stripped for the above conditions. If any of + /// the above fails, `None` is returned.` + /// + /// Taken from "The Art of Computer Programming" by D.E. Knuth, vol 2, chapter 4. + pub fn div(self, other: &Self, rem: bool) -> Option<(Self, Self)> { + if other.len() <= 1 || other.msb() == 0 || self.msb() == 0 || self.len() <= other.len() { + return None; + } + let n = other.len(); + let m = self.len() - n; + + let mut q = Self::with_capacity(m + 1); + let mut r = Self::with_capacity(n); + + // PROOF: 0 <= normalizer_bits < SHIFT 0 <= normalizer < B. all conversions are + // safe. + let normalizer_bits = other.msb().leading_zeros() as Single; + let normalizer = (2 as Single).pow(normalizer_bits as u32) as Single; + + // step D1. + let mut self_norm = self.mul(&Self::from(normalizer)); + let mut other_norm = other.clone().mul(&Self::from(normalizer)); + + // defensive only; the mul implementation should always create this. + self_norm.lpad(n + m + 1); + other_norm.lstrip(); + + // step D2. + for j in (0..=m).rev() { + // step D3.0 Find an estimate of q[j], named qhat. + let (qhat, rhat) = { + // PROOF: this always fits into `Double`. In the context of Single = u8, and + // Double = u16, think of 255 * 256 + 255 which is just u16::max_value(). + let dividend = + Double::from(self_norm.get(j + n)) * B + Double::from(self_norm.get(j + n - 1)); + let divisor = other_norm.get(n - 1); + div_single(dividend, divisor) + }; + + // D3.1 test qhat + // replace qhat and rhat with RefCells. This helps share state with the closure + let qhat = RefCell::new(qhat); + let rhat = RefCell::new(Double::from(rhat)); + + let test = || { + // decrease qhat if it is bigger than the base (B) + let qhat_local = *qhat.borrow(); + let rhat_local = *rhat.borrow(); + let predicate_1 = qhat_local >= B; + let predicate_2 = { + let lhs = qhat_local * Double::from(other_norm.get(n - 2)); + let rhs = B * rhat_local + Double::from(self_norm.get(j + n - 2)); + lhs > rhs + }; + if predicate_1 || predicate_2 { + *qhat.borrow_mut() -= 1; + *rhat.borrow_mut() += Double::from(other_norm.get(n - 1)); + true + } else { + false + } + }; + + test(); + while (*rhat.borrow() as Double) < B { + if !test() { + break; + } + } + + let qhat = qhat.into_inner(); + // we don't need rhat anymore. just let it go out of scope when it does. + + // step D4 + let lhs = Self { + digits: (j..=j + n).rev().map(|d| self_norm.get(d)).collect(), + }; + let rhs = other_norm.clone().mul(&Self::from(qhat)); + + let maybe_sub = lhs.sub(&rhs); + let mut negative = false; + let sub = match maybe_sub { + Ok(t) => t, + Err(t) => { + negative = true; + t + } + }; + (j..=j + n).for_each(|d| { + self_norm.set(d, sub.get(d - j)); + }); + + // step D5 + // PROOF: the `test()` specifically decreases qhat until it is below `B`. conversion + // is safe. + q.set(j, qhat as Single); + + // step D6: add back if negative happened. + if negative { + q.set(j, q.get(j) - 1); + let u = Self { + digits: (j..=j + n).rev().map(|d| self_norm.get(d)).collect(), + }; + let r = other_norm.clone().add(&u); + (j..=j + n).rev().for_each(|d| { + self_norm.set(d, r.get(d - j)); + }) + } + } + + // if requested, calculate remainder. + if rem { + // undo the normalization. + if normalizer_bits > 0 { + let s = SHIFT as u32; + let nb = normalizer_bits; + for d in 0..n - 1 { + let v = self_norm.get(d) >> nb | self_norm.get(d + 1).overflowing_shl(s - nb).0; + r.set(d, v); + } + r.set(n - 1, self_norm.get(n - 1) >> normalizer_bits); + } else { + r = self_norm; + } + } + + Some((q, r)) + } } impl sp_std::fmt::Debug for BigUint { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - write!( - f, - "BigUint {{ {:?} ({:?})}}", - self.digits, - u128::try_from(self.clone()).unwrap_or(0), - ) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { - Ok(()) - } - + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + write!( + f, + "BigUint {{ {:?} ({:?})}}", + self.digits, + u128::try_from(self.clone()).unwrap_or(0), + ) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter<'_>) -> sp_std::fmt::Result { + Ok(()) + } } impl PartialEq for BigUint { - fn eq(&self, other: &Self) -> bool { - self.cmp(other) == Ordering::Equal - } + fn eq(&self, other: &Self) -> bool { + self.cmp(other) == Ordering::Equal + } } impl Eq for BigUint {} impl Ord for BigUint { - fn cmp(&self, other: &Self) -> Ordering { - let lhs_first = self.digits.iter().position(|&e| e != 0); - let rhs_first = other.digits.iter().position(|&e| e != 0); - - match (lhs_first, rhs_first) { - // edge cases that should not happen. This basically means that one or both were - // zero. - (None, None) => Ordering::Equal, - (Some(_), None) => Ordering::Greater, - (None, Some(_)) => Ordering::Less, - (Some(lhs_idx), Some(rhs_idx)) => { - let lhs = &self.digits[lhs_idx..]; - let rhs = &other.digits[rhs_idx..]; - let len_cmp = lhs.len().cmp(&rhs.len()); - match len_cmp { - Ordering::Equal => lhs.cmp(rhs), - _ => len_cmp, - } - } - } - } + fn cmp(&self, other: &Self) -> Ordering { + let lhs_first = self.digits.iter().position(|&e| e != 0); + let rhs_first = other.digits.iter().position(|&e| e != 0); + + match (lhs_first, rhs_first) { + // edge cases that should not happen. This basically means that one or both were + // zero. + (None, None) => Ordering::Equal, + (Some(_), None) => Ordering::Greater, + (None, Some(_)) => Ordering::Less, + (Some(lhs_idx), Some(rhs_idx)) => { + let lhs = &self.digits[lhs_idx..]; + let rhs = &other.digits[rhs_idx..]; + let len_cmp = lhs.len().cmp(&rhs.len()); + match len_cmp { + Ordering::Equal => lhs.cmp(rhs), + _ => len_cmp, + } + } + } + } } impl PartialOrd for BigUint { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl ops::Add for BigUint { - type Output = Self; - fn add(self, rhs: Self) -> Self::Output { - self.add(&rhs) - } + type Output = Self; + fn add(self, rhs: Self) -> Self::Output { + self.add(&rhs) + } } impl ops::Sub for BigUint { - type Output = Self; - fn sub(self, rhs: Self) -> Self::Output { - self.sub(&rhs).unwrap_or_else(|e| e) - } + type Output = Self; + fn sub(self, rhs: Self) -> Self::Output { + self.sub(&rhs).unwrap_or_else(|e| e) + } } impl ops::Mul for BigUint { - type Output = Self; - fn mul(self, rhs: Self) -> Self::Output { - self.mul(&rhs) - } + type Output = Self; + fn mul(self, rhs: Self) -> Self::Output { + self.mul(&rhs) + } } impl Zero for BigUint { - fn zero() -> Self { - Self { digits: vec![Zero::zero()] } - } - - fn is_zero(&self) -> bool { - self.digits.iter().all(|d| d.is_zero()) - } + fn zero() -> Self { + Self { + digits: vec![Zero::zero()], + } + } + + fn is_zero(&self) -> bool { + self.digits.iter().all(|d| d.is_zero()) + } } macro_rules! impl_try_from_number_for { @@ -552,184 +568,266 @@ macro_rules! impl_from_for_smaller_than_word { impl_from_for_smaller_than_word!(u8, u16, Single); impl From for BigUint { - fn from(a: Double) -> Self { - let (ah, al) = split(a); - Self { digits: vec![ah, al] } - } + fn from(a: Double) -> Self { + let (ah, al) = split(a); + Self { + digits: vec![ah, al], + } + } } #[cfg(test)] pub mod tests { - use super::*; - - fn with_limbs(n: usize) -> BigUint { - BigUint { digits: vec![1; n] } - } - - #[test] - fn split_works() { - let a = SHIFT / 2; - let b = SHIFT * 3 / 2; - let num: Double = 1 << a | 1 << b; - // example when `Single = u8` - // assert_eq!(num, 0b_0001_0000_0001_0000) - assert_eq!(split(num), (1 << a, 1 << a)); - } - - #[test] - fn strip_works() { - let mut a = BigUint::from_limbs(&[0, 1, 0]); - a.lstrip(); - assert_eq!(a, BigUint { digits: vec![1, 0] }); - - let mut a = BigUint::from_limbs(&[0, 0, 1]); - a.lstrip(); - assert_eq!(a, BigUint { digits: vec![1] }); - - let mut a = BigUint::from_limbs(&[0, 0]); - a.lstrip(); - assert_eq!(a, BigUint { digits: vec![0] }); - - let mut a = BigUint::from_limbs(&[0, 0, 0]); - a.lstrip(); - assert_eq!(a, BigUint { digits: vec![0] }); - } - - #[test] - fn lpad_works() { - let mut a = BigUint::from_limbs(&[0, 1, 0]); - a.lpad(2); - assert_eq!(a.digits, vec![0, 1, 0]); - - let mut a = BigUint::from_limbs(&[0, 1, 0]); - a.lpad(3); - assert_eq!(a.digits, vec![0, 1, 0]); - - let mut a = BigUint::from_limbs(&[0, 1, 0]); - a.lpad(4); - assert_eq!(a.digits, vec![0, 0, 1, 0]); - } - - #[test] - fn equality_works() { - assert_eq!( - BigUint { digits: vec![1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - true, - ); - assert_eq!( - BigUint { digits: vec![3, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - false, - ); - assert_eq!( - BigUint { digits: vec![0, 1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }, - true, - ); - } - - #[test] - fn ordering_works() { - assert!(BigUint { digits: vec![0] } < BigUint { digits: vec![1] }); - assert!(BigUint { digits: vec![0] } == BigUint { digits: vec![0] }); - assert!(BigUint { digits: vec![] } == BigUint { digits: vec![0] }); - assert!(BigUint { digits: vec![] } == BigUint { digits: vec![] }); - assert!(BigUint { digits: vec![] } < BigUint { digits: vec![1] }); - - assert!(BigUint { digits: vec![1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }); - assert!(BigUint { digits: vec![0, 1, 2, 3] } == BigUint { digits: vec![1, 2, 3] }); - - assert!(BigUint { digits: vec![1, 2, 4] } > BigUint { digits: vec![1, 2, 3] }); - assert!(BigUint { digits: vec![0, 1, 2, 4] } > BigUint { digits: vec![1, 2, 3] }); - assert!(BigUint { digits: vec![1, 2, 1, 0] } > BigUint { digits: vec![1, 2, 3] }); - - assert!(BigUint { digits: vec![0, 1, 2, 1] } < BigUint { digits: vec![1, 2, 3] }); - } - - #[test] - fn can_try_build_numbers_from_types() { - use sp_std::convert::TryFrom; - assert_eq!(u64::try_from(with_limbs(1)).unwrap(), 1); - assert_eq!(u64::try_from(with_limbs(2)).unwrap(), u32::max_value() as u64 + 2); - assert_eq!( - u64::try_from(with_limbs(3)).unwrap_err(), - "cannot fit a number into u64", - ); - assert_eq!( - u128::try_from(with_limbs(3)).unwrap(), - u32::max_value() as u128 + u64::max_value() as u128 + 3 - ); - } - - #[test] - fn zero_works() { - assert_eq!(BigUint::zero(), BigUint { digits: vec![0] }); - assert_eq!(BigUint { digits: vec![0, 1, 0] }.is_zero(), false); - assert_eq!(BigUint { digits: vec![0, 0, 0] }.is_zero(), true); - - let a = BigUint::zero(); - let b = BigUint::zero(); - let c = a * b; - assert_eq!(c.digits, vec![0, 0]); - } - - #[test] - fn sub_negative_works() { - assert_eq!( - BigUint::from(10 as Single).sub(&BigUint::from(5 as Single)).unwrap(), - BigUint::from(5 as Single) - ); - assert_eq!( - BigUint::from(10 as Single).sub(&BigUint::from(10 as Single)).unwrap(), - BigUint::from(0 as Single) - ); - assert_eq!( - BigUint::from(10 as Single).sub(&BigUint::from(13 as Single)).unwrap_err(), - BigUint::from((B - 3) as Single), - ); - } - - #[test] - fn mul_always_appends_one_digit() { - let a = BigUint::from(10 as Single); - let b = BigUint::from(4 as Single); - assert_eq!(a.len(), 1); - assert_eq!(b.len(), 1); - - let n = a.mul(&b); - - assert_eq!(n.len(), 2); - assert_eq!(n.digits, vec![0, 40]); - } - - #[test] - fn div_conditions_work() { - let a = BigUint { digits: vec![2] }; - let b = BigUint { digits: vec![1, 2] }; - let c = BigUint { digits: vec![1, 1, 2] }; - let d = BigUint { digits: vec![0, 2] }; - let e = BigUint { digits: vec![0, 1, 1, 2] }; - - assert!(a.clone().div(&b, true).is_none()); - assert!(c.clone().div(&a, true).is_none()); - assert!(c.clone().div(&d, true).is_none()); - assert!(e.clone().div(&a, true).is_none()); - - assert!(c.clone().div(&b, true).is_some()); - } - - #[test] - fn div_unit_works() { - let a = BigUint { digits: vec![100] }; - let b = BigUint { digits: vec![1, 100] }; - - assert_eq!(a.clone().div_unit(1), a); - assert_eq!(a.clone().div_unit(0), a); - assert_eq!(a.clone().div_unit(2), BigUint::from(50 as Single)); - assert_eq!(a.clone().div_unit(7), BigUint::from(14 as Single)); - - assert_eq!(b.clone().div_unit(1), b); - assert_eq!(b.clone().div_unit(0), b); - assert_eq!(b.clone().div_unit(2), BigUint::from(((B + 100) / 2) as Single)); - assert_eq!(b.clone().div_unit(7), BigUint::from(((B + 100) / 7) as Single)); - - } + use super::*; + + fn with_limbs(n: usize) -> BigUint { + BigUint { digits: vec![1; n] } + } + + #[test] + fn split_works() { + let a = SHIFT / 2; + let b = SHIFT * 3 / 2; + let num: Double = 1 << a | 1 << b; + // example when `Single = u8` + // assert_eq!(num, 0b_0001_0000_0001_0000) + assert_eq!(split(num), (1 << a, 1 << a)); + } + + #[test] + fn strip_works() { + let mut a = BigUint::from_limbs(&[0, 1, 0]); + a.lstrip(); + assert_eq!(a, BigUint { digits: vec![1, 0] }); + + let mut a = BigUint::from_limbs(&[0, 0, 1]); + a.lstrip(); + assert_eq!(a, BigUint { digits: vec![1] }); + + let mut a = BigUint::from_limbs(&[0, 0]); + a.lstrip(); + assert_eq!(a, BigUint { digits: vec![0] }); + + let mut a = BigUint::from_limbs(&[0, 0, 0]); + a.lstrip(); + assert_eq!(a, BigUint { digits: vec![0] }); + } + + #[test] + fn lpad_works() { + let mut a = BigUint::from_limbs(&[0, 1, 0]); + a.lpad(2); + assert_eq!(a.digits, vec![0, 1, 0]); + + let mut a = BigUint::from_limbs(&[0, 1, 0]); + a.lpad(3); + assert_eq!(a.digits, vec![0, 1, 0]); + + let mut a = BigUint::from_limbs(&[0, 1, 0]); + a.lpad(4); + assert_eq!(a.digits, vec![0, 0, 1, 0]); + } + + #[test] + fn equality_works() { + assert_eq!( + BigUint { + digits: vec![1, 2, 3] + } == BigUint { + digits: vec![1, 2, 3] + }, + true, + ); + assert_eq!( + BigUint { + digits: vec![3, 2, 3] + } == BigUint { + digits: vec![1, 2, 3] + }, + false, + ); + assert_eq!( + BigUint { + digits: vec![0, 1, 2, 3] + } == BigUint { + digits: vec![1, 2, 3] + }, + true, + ); + } + + #[test] + fn ordering_works() { + assert!(BigUint { digits: vec![0] } < BigUint { digits: vec![1] }); + assert!(BigUint { digits: vec![0] } == BigUint { digits: vec![0] }); + assert!(BigUint { digits: vec![] } == BigUint { digits: vec![0] }); + assert!(BigUint { digits: vec![] } == BigUint { digits: vec![] }); + assert!(BigUint { digits: vec![] } < BigUint { digits: vec![1] }); + + assert!( + BigUint { + digits: vec![1, 2, 3] + } == BigUint { + digits: vec![1, 2, 3] + } + ); + assert!( + BigUint { + digits: vec![0, 1, 2, 3] + } == BigUint { + digits: vec![1, 2, 3] + } + ); + + assert!( + BigUint { + digits: vec![1, 2, 4] + } > BigUint { + digits: vec![1, 2, 3] + } + ); + assert!( + BigUint { + digits: vec![0, 1, 2, 4] + } > BigUint { + digits: vec![1, 2, 3] + } + ); + assert!( + BigUint { + digits: vec![1, 2, 1, 0] + } > BigUint { + digits: vec![1, 2, 3] + } + ); + + assert!( + BigUint { + digits: vec![0, 1, 2, 1] + } < BigUint { + digits: vec![1, 2, 3] + } + ); + } + + #[test] + fn can_try_build_numbers_from_types() { + use sp_std::convert::TryFrom; + assert_eq!(u64::try_from(with_limbs(1)).unwrap(), 1); + assert_eq!( + u64::try_from(with_limbs(2)).unwrap(), + u32::max_value() as u64 + 2 + ); + assert_eq!( + u64::try_from(with_limbs(3)).unwrap_err(), + "cannot fit a number into u64", + ); + assert_eq!( + u128::try_from(with_limbs(3)).unwrap(), + u32::max_value() as u128 + u64::max_value() as u128 + 3 + ); + } + + #[test] + fn zero_works() { + assert_eq!(BigUint::zero(), BigUint { digits: vec![0] }); + assert_eq!( + BigUint { + digits: vec![0, 1, 0] + } + .is_zero(), + false + ); + assert_eq!( + BigUint { + digits: vec![0, 0, 0] + } + .is_zero(), + true + ); + + let a = BigUint::zero(); + let b = BigUint::zero(); + let c = a * b; + assert_eq!(c.digits, vec![0, 0]); + } + + #[test] + fn sub_negative_works() { + assert_eq!( + BigUint::from(10 as Single) + .sub(&BigUint::from(5 as Single)) + .unwrap(), + BigUint::from(5 as Single) + ); + assert_eq!( + BigUint::from(10 as Single) + .sub(&BigUint::from(10 as Single)) + .unwrap(), + BigUint::from(0 as Single) + ); + assert_eq!( + BigUint::from(10 as Single) + .sub(&BigUint::from(13 as Single)) + .unwrap_err(), + BigUint::from((B - 3) as Single), + ); + } + + #[test] + fn mul_always_appends_one_digit() { + let a = BigUint::from(10 as Single); + let b = BigUint::from(4 as Single); + assert_eq!(a.len(), 1); + assert_eq!(b.len(), 1); + + let n = a.mul(&b); + + assert_eq!(n.len(), 2); + assert_eq!(n.digits, vec![0, 40]); + } + + #[test] + fn div_conditions_work() { + let a = BigUint { digits: vec![2] }; + let b = BigUint { digits: vec![1, 2] }; + let c = BigUint { + digits: vec![1, 1, 2], + }; + let d = BigUint { digits: vec![0, 2] }; + let e = BigUint { + digits: vec![0, 1, 1, 2], + }; + + assert!(a.clone().div(&b, true).is_none()); + assert!(c.clone().div(&a, true).is_none()); + assert!(c.clone().div(&d, true).is_none()); + assert!(e.clone().div(&a, true).is_none()); + + assert!(c.clone().div(&b, true).is_some()); + } + + #[test] + fn div_unit_works() { + let a = BigUint { digits: vec![100] }; + let b = BigUint { + digits: vec![1, 100], + }; + + assert_eq!(a.clone().div_unit(1), a); + assert_eq!(a.clone().div_unit(0), a); + assert_eq!(a.clone().div_unit(2), BigUint::from(50 as Single)); + assert_eq!(a.clone().div_unit(7), BigUint::from(14 as Single)); + + assert_eq!(b.clone().div_unit(1), b); + assert_eq!(b.clone().div_unit(0), b); + assert_eq!( + b.clone().div_unit(2), + BigUint::from(((B + 100) / 2) as Single) + ); + assert_eq!( + b.clone().div_unit(7), + BigUint::from(((B + 100) / 7) as Single) + ); + } } diff --git a/primitives/arithmetic/src/fixed128.rs b/primitives/arithmetic/src/fixed128.rs index a0fafe5ee3..8934a076f7 100644 --- a/primitives/arithmetic/src/fixed128.rs +++ b/primitives/arithmetic/src/fixed128.rs @@ -14,16 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use codec::{Decode, Encode}; -use primitive_types::U256; use crate::{ - traits::{Bounded, Saturating, UniqueSaturatedInto, SaturatedConversion}, - PerThing, Perquintill, + traits::{Bounded, SaturatedConversion, Saturating, UniqueSaturatedInto}, + PerThing, Perquintill, }; +use codec::{Decode, Encode}; +use primitive_types::U256; use sp_std::{ - convert::{Into, TryFrom, TryInto}, - fmt, ops, - num::NonZeroI128, + convert::{Into, TryFrom, TryInto}, + fmt, + num::NonZeroI128, + ops, }; #[cfg(feature = "std")] @@ -38,694 +39,794 @@ pub struct Fixed128(i128); const DIV: i128 = 1_000_000_000_000_000_000; impl Fixed128 { - /// Create self from a natural number. - /// - /// Note that this might be lossy. - pub fn from_natural(int: i128) -> Self { - Self(int.saturating_mul(DIV)) - } - - /// Accuracy of `Fixed128`. - pub const fn accuracy() -> i128 { - DIV - } - - /// Raw constructor. Equal to `parts / DIV`. - pub const fn from_parts(parts: i128) -> Self { - Self(parts) - } - - /// Creates self from a rational number. Equal to `n/d`. - /// - /// Note that this might be lossy. Only use this if you are sure that `n * DIV` can fit into an - /// i128. - pub fn from_rational>(n: N, d: NonZeroI128) -> Self { - let n = n.unique_saturated_into(); - Self(n.saturating_mul(DIV.into()) / d.get()) - } - - /// Consume self and return the inner raw `i128` value. - /// - /// Note this is a low level function, as the returned value is represented with accuracy. - pub fn deconstruct(self) -> i128 { - self.0 - } - - /// Takes the reciprocal(inverse) of Fixed128, 1/x - pub fn recip(&self) -> Option { - Self::from_natural(1i128).checked_div(self) - } - - /// Checked add. Same semantic to `num_traits::CheckedAdd`. - pub fn checked_add(&self, rhs: &Self) -> Option { - self.0.checked_add(rhs.0).map(Self) - } - - /// Checked sub. Same semantic to `num_traits::CheckedSub`. - pub fn checked_sub(&self, rhs: &Self) -> Option { - self.0.checked_sub(rhs.0).map(Self) - } - - /// Checked mul. Same semantic to `num_traits::CheckedMul`. - pub fn checked_mul(&self, rhs: &Self) -> Option { - let signum = self.0.signum() * rhs.0.signum(); - let mut lhs = self.0; - if lhs.is_negative() { - lhs = lhs.saturating_mul(-1); - } - let mut rhs: i128 = rhs.0.saturated_into(); - if rhs.is_negative() { - rhs = rhs.saturating_mul(-1); - } - - U256::from(lhs) - .checked_mul(U256::from(rhs)) - .and_then(|n| n.checked_div(U256::from(DIV))) - .and_then(|n| TryInto::::try_into(n).ok()) - .map(|n| Self(n * signum)) - } - - /// Checked div. Same semantic to `num_traits::CheckedDiv`. - pub fn checked_div(&self, rhs: &Self) -> Option { - if rhs.0.signum() == 0 { - return None; - } - if self.0 == 0 { - return Some(*self); - } - - let signum = self.0.signum() / rhs.0.signum(); - let mut lhs: i128 = self.0; - if lhs.is_negative() { - lhs = lhs.saturating_mul(-1); - } - let mut rhs: i128 = rhs.0.saturated_into(); - if rhs.is_negative() { - rhs = rhs.saturating_mul(-1); - } - - U256::from(lhs) - .checked_mul(U256::from(DIV)) - .and_then(|n| n.checked_div(U256::from(rhs))) - .and_then(|n| TryInto::::try_into(n).ok()) - .map(|n| Self(n * signum)) - } - - /// Checked mul for int type `N`. - pub fn checked_mul_int(&self, other: &N) -> Option - where - N: Copy + TryFrom + TryInto, - { - N::try_into(*other).ok().and_then(|rhs| { - let mut lhs = self.0; - if lhs.is_negative() { - lhs = lhs.saturating_mul(-1); - } - let mut rhs: i128 = rhs.saturated_into(); - let signum = self.0.signum() * rhs.signum(); - if rhs.is_negative() { - rhs = rhs.saturating_mul(-1); - } - - U256::from(lhs) - .checked_mul(U256::from(rhs)) - .and_then(|n| n.checked_div(U256::from(DIV))) - .and_then(|n| TryInto::::try_into(n).ok()) - .and_then(|n| TryInto::::try_into(n * signum).ok()) - }) - } - - /// Checked mul for int type `N`. - pub fn saturating_mul_int(&self, other: &N) -> N - where - N: Copy + TryFrom + TryInto + Bounded, - { - self.checked_mul_int(other).unwrap_or_else(|| { - N::try_into(*other) - .map(|n| n.signum()) - .map(|n| n * self.0.signum()) - .map(|signum| { - if signum.is_negative() { - Bounded::min_value() - } else { - Bounded::max_value() - } - }) - .unwrap_or(Bounded::max_value()) - }) - } - - /// Checked div for int type `N`. - pub fn checked_div_int(&self, other: &N) -> Option - where - N: Copy + TryFrom + TryInto, - { - N::try_into(*other) - .ok() - .and_then(|n| self.0.checked_div(n)) - .and_then(|n| n.checked_div(DIV)) - .and_then(|n| TryInto::::try_into(n).ok()) - } - - pub fn zero() -> Self { - Self(0) - } - - pub fn is_zero(&self) -> bool { - self.0 == 0 - } - - /// Saturating absolute value. Returning MAX if `parts` == i128::MIN instead of overflowing. - pub fn saturating_abs(&self) -> Self { - if self.0 == i128::min_value() { - return Fixed128::max_value(); - } - - if self.0.is_negative() { - Fixed128::from_parts(self.0 * -1) - } else { - *self - } - } - - pub fn is_positive(&self) -> bool { - self.0.is_positive() - } - - pub fn is_negative(&self) -> bool { - self.0.is_negative() - } - - /// Performs a saturated multiply and accumulate by unsigned number. - /// - /// Returns a saturated `int + (self * int)`. - pub fn saturated_multiply_accumulate(self, int: N) -> N - where - N: TryFrom + From + UniqueSaturatedInto + Bounded + Clone + Saturating + - ops::Rem + ops::Div + ops::Mul + - ops::Add, - { - let div = DIV as u128; - let positive = self.0 > 0; - // safe to convert as absolute value. - let parts = self.0.checked_abs().map(|v| v as u128).unwrap_or(i128::max_value() as u128 + 1); - - - // will always fit. - let natural_parts = parts / div; - // might saturate. - let natural_parts: N = natural_parts.saturated_into(); - // fractional parts can always fit into u64. - let perquintill_parts = (parts % div) as u64; - - let n = int.clone().saturating_mul(natural_parts); - let p = Perquintill::from_parts(perquintill_parts) * int.clone(); - - // everything that needs to be either added or subtracted from the original weight. - let excess = n.saturating_add(p); - - if positive { - int.saturating_add(excess) - } else { - int.saturating_sub(excess) - } - } + /// Create self from a natural number. + /// + /// Note that this might be lossy. + pub fn from_natural(int: i128) -> Self { + Self(int.saturating_mul(DIV)) + } + + /// Accuracy of `Fixed128`. + pub const fn accuracy() -> i128 { + DIV + } + + /// Raw constructor. Equal to `parts / DIV`. + pub const fn from_parts(parts: i128) -> Self { + Self(parts) + } + + /// Creates self from a rational number. Equal to `n/d`. + /// + /// Note that this might be lossy. Only use this if you are sure that `n * DIV` can fit into an + /// i128. + pub fn from_rational>(n: N, d: NonZeroI128) -> Self { + let n = n.unique_saturated_into(); + Self(n.saturating_mul(DIV.into()) / d.get()) + } + + /// Consume self and return the inner raw `i128` value. + /// + /// Note this is a low level function, as the returned value is represented with accuracy. + pub fn deconstruct(self) -> i128 { + self.0 + } + + /// Takes the reciprocal(inverse) of Fixed128, 1/x + pub fn recip(&self) -> Option { + Self::from_natural(1i128).checked_div(self) + } + + /// Checked add. Same semantic to `num_traits::CheckedAdd`. + pub fn checked_add(&self, rhs: &Self) -> Option { + self.0.checked_add(rhs.0).map(Self) + } + + /// Checked sub. Same semantic to `num_traits::CheckedSub`. + pub fn checked_sub(&self, rhs: &Self) -> Option { + self.0.checked_sub(rhs.0).map(Self) + } + + /// Checked mul. Same semantic to `num_traits::CheckedMul`. + pub fn checked_mul(&self, rhs: &Self) -> Option { + let signum = self.0.signum() * rhs.0.signum(); + let mut lhs = self.0; + if lhs.is_negative() { + lhs = lhs.saturating_mul(-1); + } + let mut rhs: i128 = rhs.0.saturated_into(); + if rhs.is_negative() { + rhs = rhs.saturating_mul(-1); + } + + U256::from(lhs) + .checked_mul(U256::from(rhs)) + .and_then(|n| n.checked_div(U256::from(DIV))) + .and_then(|n| TryInto::::try_into(n).ok()) + .map(|n| Self(n * signum)) + } + + /// Checked div. Same semantic to `num_traits::CheckedDiv`. + pub fn checked_div(&self, rhs: &Self) -> Option { + if rhs.0.signum() == 0 { + return None; + } + if self.0 == 0 { + return Some(*self); + } + + let signum = self.0.signum() / rhs.0.signum(); + let mut lhs: i128 = self.0; + if lhs.is_negative() { + lhs = lhs.saturating_mul(-1); + } + let mut rhs: i128 = rhs.0.saturated_into(); + if rhs.is_negative() { + rhs = rhs.saturating_mul(-1); + } + + U256::from(lhs) + .checked_mul(U256::from(DIV)) + .and_then(|n| n.checked_div(U256::from(rhs))) + .and_then(|n| TryInto::::try_into(n).ok()) + .map(|n| Self(n * signum)) + } + + /// Checked mul for int type `N`. + pub fn checked_mul_int(&self, other: &N) -> Option + where + N: Copy + TryFrom + TryInto, + { + N::try_into(*other).ok().and_then(|rhs| { + let mut lhs = self.0; + if lhs.is_negative() { + lhs = lhs.saturating_mul(-1); + } + let mut rhs: i128 = rhs.saturated_into(); + let signum = self.0.signum() * rhs.signum(); + if rhs.is_negative() { + rhs = rhs.saturating_mul(-1); + } + + U256::from(lhs) + .checked_mul(U256::from(rhs)) + .and_then(|n| n.checked_div(U256::from(DIV))) + .and_then(|n| TryInto::::try_into(n).ok()) + .and_then(|n| TryInto::::try_into(n * signum).ok()) + }) + } + + /// Checked mul for int type `N`. + pub fn saturating_mul_int(&self, other: &N) -> N + where + N: Copy + TryFrom + TryInto + Bounded, + { + self.checked_mul_int(other).unwrap_or_else(|| { + N::try_into(*other) + .map(|n| n.signum()) + .map(|n| n * self.0.signum()) + .map(|signum| { + if signum.is_negative() { + Bounded::min_value() + } else { + Bounded::max_value() + } + }) + .unwrap_or(Bounded::max_value()) + }) + } + + /// Checked div for int type `N`. + pub fn checked_div_int(&self, other: &N) -> Option + where + N: Copy + TryFrom + TryInto, + { + N::try_into(*other) + .ok() + .and_then(|n| self.0.checked_div(n)) + .and_then(|n| n.checked_div(DIV)) + .and_then(|n| TryInto::::try_into(n).ok()) + } + + pub fn zero() -> Self { + Self(0) + } + + pub fn is_zero(&self) -> bool { + self.0 == 0 + } + + /// Saturating absolute value. Returning MAX if `parts` == i128::MIN instead of overflowing. + pub fn saturating_abs(&self) -> Self { + if self.0 == i128::min_value() { + return Fixed128::max_value(); + } + + if self.0.is_negative() { + Fixed128::from_parts(self.0 * -1) + } else { + *self + } + } + + pub fn is_positive(&self) -> bool { + self.0.is_positive() + } + + pub fn is_negative(&self) -> bool { + self.0.is_negative() + } + + /// Performs a saturated multiply and accumulate by unsigned number. + /// + /// Returns a saturated `int + (self * int)`. + pub fn saturated_multiply_accumulate(self, int: N) -> N + where + N: TryFrom + + From + + UniqueSaturatedInto + + Bounded + + Clone + + Saturating + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add, + { + let div = DIV as u128; + let positive = self.0 > 0; + // safe to convert as absolute value. + let parts = self + .0 + .checked_abs() + .map(|v| v as u128) + .unwrap_or(i128::max_value() as u128 + 1); + + // will always fit. + let natural_parts = parts / div; + // might saturate. + let natural_parts: N = natural_parts.saturated_into(); + // fractional parts can always fit into u64. + let perquintill_parts = (parts % div) as u64; + + let n = int.clone().saturating_mul(natural_parts); + let p = Perquintill::from_parts(perquintill_parts) * int.clone(); + + // everything that needs to be either added or subtracted from the original weight. + let excess = n.saturating_add(p); + + if positive { + int.saturating_add(excess) + } else { + int.saturating_sub(excess) + } + } } /// Note that this is a standard, _potentially-panicking_, implementation. Use `Saturating` trait /// for safe addition. impl ops::Add for Fixed128 { - type Output = Self; + type Output = Self; - fn add(self, rhs: Self) -> Self::Output { - Self(self.0 + rhs.0) - } + fn add(self, rhs: Self) -> Self::Output { + Self(self.0 + rhs.0) + } } /// Note that this is a standard, _potentially-panicking_, implementation. Use `Saturating` trait /// for safe subtraction. impl ops::Sub for Fixed128 { - type Output = Self; + type Output = Self; - fn sub(self, rhs: Self) -> Self::Output { - Self(self.0 - rhs.0) - } + fn sub(self, rhs: Self) -> Self::Output { + Self(self.0 - rhs.0) + } } impl Saturating for Fixed128 { - fn saturating_add(self, rhs: Self) -> Self { - Self(self.0.saturating_add(rhs.0)) - } - - fn saturating_sub(self, rhs: Self) -> Self { - Self(self.0.saturating_sub(rhs.0)) - } - - fn saturating_mul(self, rhs: Self) -> Self { - self.checked_mul(&rhs).unwrap_or_else(|| { - if (self.0.signum() * rhs.0.signum()).is_negative() { - Bounded::min_value() - } else { - Bounded::max_value() - } - }) - } - - fn saturating_pow(self, exp: usize) -> Self { - if exp == 0 { - return Self::from_natural(1); - } - - let exp = exp as u64; - let msb_pos = 64 - exp.leading_zeros(); - - let mut result = Self::from_natural(1); - let mut pow_val = self; - for i in 0..msb_pos { - if ((1 << i) & exp) > 0 { - result = result.saturating_mul(pow_val); - } - pow_val = pow_val.saturating_mul(pow_val); - } - result - } + fn saturating_add(self, rhs: Self) -> Self { + Self(self.0.saturating_add(rhs.0)) + } + + fn saturating_sub(self, rhs: Self) -> Self { + Self(self.0.saturating_sub(rhs.0)) + } + + fn saturating_mul(self, rhs: Self) -> Self { + self.checked_mul(&rhs).unwrap_or_else(|| { + if (self.0.signum() * rhs.0.signum()).is_negative() { + Bounded::min_value() + } else { + Bounded::max_value() + } + }) + } + + fn saturating_pow(self, exp: usize) -> Self { + if exp == 0 { + return Self::from_natural(1); + } + + let exp = exp as u64; + let msb_pos = 64 - exp.leading_zeros(); + + let mut result = Self::from_natural(1); + let mut pow_val = self; + for i in 0..msb_pos { + if ((1 << i) & exp) > 0 { + result = result.saturating_mul(pow_val); + } + pow_val = pow_val.saturating_mul(pow_val); + } + result + } } impl Bounded for Fixed128 { - fn min_value() -> Self { - Self(Bounded::min_value()) - } + fn min_value() -> Self { + Self(Bounded::min_value()) + } - fn max_value() -> Self { - Self(Bounded::max_value()) - } + fn max_value() -> Self { + Self(Bounded::max_value()) + } } impl fmt::Debug for Fixed128 { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let integral = { - let int = self.0 / DIV; - let signum_for_zero = if int == 0 && self.is_negative() { "-" } else { "" }; - format!("{}{}", signum_for_zero, int) - }; - let fractional = format!("{:0>18}", (self.0 % DIV).abs()); - write!(f, "Fixed128({}.{})", integral, fractional) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let integral = { + let int = self.0 / DIV; + let signum_for_zero = if int == 0 && self.is_negative() { + "-" + } else { + "" + }; + format!("{}{}", signum_for_zero, int) + }; + let fractional = format!("{:0>18}", (self.0 % DIV).abs()); + write!(f, "Fixed128({}.{})", integral, fractional) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + Ok(()) + } } impl From

for Fixed128 { - fn from(val: P) -> Self { - let accuracy = P::ACCURACY.saturated_into().max(1) as i128; - let value = val.deconstruct().saturated_into() as i128; - Fixed128::from_rational(value, NonZeroI128::new(accuracy).unwrap()) - } + fn from(val: P) -> Self { + let accuracy = P::ACCURACY.saturated_into().max(1) as i128; + let value = val.deconstruct().saturated_into() as i128; + Fixed128::from_rational(value, NonZeroI128::new(accuracy).unwrap()) + } } #[cfg(feature = "std")] impl Fixed128 { - fn i128_str(&self) -> String { - format!("{}", &self.0) - } - - fn try_from_i128_str(s: &str) -> Result { - let parts: i128 = s.parse().map_err(|_| "invalid string input")?; - Ok(Self::from_parts(parts)) - } + fn i128_str(&self) -> String { + format!("{}", &self.0) + } + + fn try_from_i128_str(s: &str) -> Result { + let parts: i128 = s.parse().map_err(|_| "invalid string input")?; + Ok(Self::from_parts(parts)) + } } // Manual impl `Serialize` as serde_json does not support i128. // TODO: remove impl if issue https://github.com/serde-rs/json/issues/548 fixed. #[cfg(feature = "std")] impl Serialize for Fixed128 { - fn serialize(&self, serializer: S) -> Result - where - S: Serializer, - { - serializer.serialize_str(&self.i128_str()) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.i128_str()) + } } // Manual impl `Serialize` as serde_json does not support i128. // TODO: remove impl if issue https://github.com/serde-rs/json/issues/548 fixed. #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Fixed128 { - fn deserialize(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let s = String::deserialize(deserializer)?; - Fixed128::try_from_i128_str(&s).map_err(|err_str| de::Error::custom(err_str)) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let s = String::deserialize(deserializer)?; + Fixed128::try_from_i128_str(&s).map_err(|err_str| de::Error::custom(err_str)) + } } #[cfg(test)] mod tests { - use super::*; - use crate::{Perbill, Percent, Permill, Perquintill}; - - fn max() -> Fixed128 { - Fixed128::max_value() - } - - fn min() -> Fixed128 { - Fixed128::min_value() - } - - #[test] - fn fixed128_semantics() { - let a = Fixed128::from_rational(5, NonZeroI128::new(2).unwrap()); - let b = Fixed128::from_rational(10, NonZeroI128::new(4).unwrap()); - assert_eq!(a.0, 5 * DIV / 2); - assert_eq!(a, b); - - let a = Fixed128::from_rational(-5, NonZeroI128::new(1).unwrap()); - assert_eq!(a, Fixed128::from_natural(-5)); - - let a = Fixed128::from_rational(5, NonZeroI128::new(-1).unwrap()); - assert_eq!(a, Fixed128::from_natural(-5)); - - // biggest value that can be created. - assert_ne!(max(), Fixed128::from_natural(170_141_183_460_469_231_731)); - assert_eq!(max(), Fixed128::from_natural(170_141_183_460_469_231_732)); - - // the smallest value that can be created. - assert_ne!(min(), Fixed128::from_natural(-170_141_183_460_469_231_731)); - assert_eq!(min(), Fixed128::from_natural(-170_141_183_460_469_231_732)); - } - - #[test] - fn fixed128_operation() { - let a = Fixed128::from_natural(2); - let b = Fixed128::from_natural(1); - assert_eq!(a.checked_add(&b), Some(Fixed128::from_natural(1 + 2))); - assert_eq!(a.checked_sub(&b), Some(Fixed128::from_natural(2 - 1))); - assert_eq!(a.checked_mul(&b), Some(Fixed128::from_natural(1 * 2))); - assert_eq!( - a.checked_div(&b), - Some(Fixed128::from_rational(2, NonZeroI128::new(1).unwrap())) - ); - - let a = Fixed128::from_rational(5, NonZeroI128::new(2).unwrap()); - let b = Fixed128::from_rational(3, NonZeroI128::new(2).unwrap()); - assert_eq!( - a.checked_add(&b), - Some(Fixed128::from_rational(8, NonZeroI128::new(2).unwrap())) - ); - assert_eq!( - a.checked_sub(&b), - Some(Fixed128::from_rational(2, NonZeroI128::new(2).unwrap())) - ); - assert_eq!( - a.checked_mul(&b), - Some(Fixed128::from_rational(15, NonZeroI128::new(4).unwrap())) - ); - assert_eq!( - a.checked_div(&b), - Some(Fixed128::from_rational(10, NonZeroI128::new(6).unwrap())) - ); - - let a = Fixed128::from_natural(120); - assert_eq!(a.checked_div_int(&2i32), Some(60)); - - let a = Fixed128::from_rational(20, NonZeroI128::new(1).unwrap()); - assert_eq!(a.checked_div_int(&2i32), Some(10)); - - let a = Fixed128::from_natural(120); - assert_eq!(a.checked_mul_int(&2i32), Some(240)); - - let a = Fixed128::from_rational(1, NonZeroI128::new(2).unwrap()); - assert_eq!(a.checked_mul_int(&20i32), Some(10)); - - let a = Fixed128::from_rational(-1, NonZeroI128::new(2).unwrap()); - assert_eq!(a.checked_mul_int(&20i32), Some(-10)); - } - - #[test] - fn saturating_mul_should_work() { - let a = Fixed128::from_natural(-1); - assert_eq!(min().saturating_mul(a), max()); - - assert_eq!(Fixed128::from_natural(125).saturating_mul(a).deconstruct(), -125 * DIV); - - let a = Fixed128::from_rational(1, NonZeroI128::new(5).unwrap()); - assert_eq!(Fixed128::from_natural(125).saturating_mul(a).deconstruct(), 25 * DIV); - } - - #[test] - fn saturating_mul_int_works() { - let a = Fixed128::from_rational(10, NonZeroI128::new(1).unwrap()); - assert_eq!(a.saturating_mul_int(&i32::max_value()), i32::max_value()); - - let a = Fixed128::from_rational(-10, NonZeroI128::new(1).unwrap()); - assert_eq!(a.saturating_mul_int(&i32::max_value()), i32::min_value()); - - let a = Fixed128::from_rational(3, NonZeroI128::new(1).unwrap()); - assert_eq!(a.saturating_mul_int(&100i8), i8::max_value()); - - let a = Fixed128::from_rational(10, NonZeroI128::new(1).unwrap()); - assert_eq!(a.saturating_mul_int(&123i128), 1230); - - let a = Fixed128::from_rational(-10, NonZeroI128::new(1).unwrap()); - assert_eq!(a.saturating_mul_int(&123i128), -1230); - - assert_eq!(max().saturating_mul_int(&2i128), 340_282_366_920_938_463_463); - - assert_eq!(max().saturating_mul_int(&i128::min_value()), i128::min_value()); - - assert_eq!(min().saturating_mul_int(&i128::max_value()), i128::min_value()); - - assert_eq!(min().saturating_mul_int(&i128::min_value()), i128::max_value()); - } - - #[test] - fn zero_works() { - assert_eq!(Fixed128::zero(), Fixed128::from_natural(0)); - } - - #[test] - fn is_zero_works() { - assert!(Fixed128::zero().is_zero()); - assert!(!Fixed128::from_natural(1).is_zero()); - } - - #[test] - fn checked_div_with_zero_should_be_none() { - let a = Fixed128::from_natural(1); - let b = Fixed128::from_natural(0); - assert_eq!(a.checked_div(&b), None); - assert_eq!(b.checked_div(&a), Some(b)); - } - - #[test] - fn checked_div_int_with_zero_should_be_none() { - let a = Fixed128::from_natural(1); - assert_eq!(a.checked_div_int(&0i32), None); - let a = Fixed128::from_natural(0); - assert_eq!(a.checked_div_int(&1i32), Some(0)); - } - - #[test] - fn checked_div_with_zero_dividend_should_be_zero() { - let a = Fixed128::zero(); - let b = Fixed128::from_parts(1); - - assert_eq!(a.checked_div(&b), Some(Fixed128::zero())); - } - - #[test] - fn under_flow_should_be_none() { - let b = Fixed128::from_natural(1); - assert_eq!(min().checked_sub(&b), None); - } - - #[test] - fn over_flow_should_be_none() { - let a = Fixed128::from_parts(i128::max_value() - 1); - let b = Fixed128::from_parts(2); - assert_eq!(a.checked_add(&b), None); - - let a = Fixed128::max_value(); - let b = Fixed128::from_rational(2, NonZeroI128::new(1).unwrap()); - assert_eq!(a.checked_mul(&b), None); - - let a = Fixed128::from_natural(255); - let b = 2u8; - assert_eq!(a.checked_mul_int(&b), None); - - let a = Fixed128::from_natural(256); - let b = 1u8; - assert_eq!(a.checked_div_int(&b), None); - - let a = Fixed128::from_natural(256); - let b = -1i8; - assert_eq!(a.checked_div_int(&b), None); - } - - #[test] - fn checked_div_int_should_work() { - // 256 / 10 = 25 (25.6 as int = 25) - let a = Fixed128::from_natural(256); - let result = a.checked_div_int(&10i128).unwrap(); - assert_eq!(result, 25); - - // 256 / 100 = 2 (2.56 as int = 2) - let a = Fixed128::from_natural(256); - let result = a.checked_div_int(&100i128).unwrap(); - assert_eq!(result, 2); - - // 256 / 1000 = 0 (0.256 as int = 0) - let a = Fixed128::from_natural(256); - let result = a.checked_div_int(&1000i128).unwrap(); - assert_eq!(result, 0); - - // 256 / -1 = -256 - let a = Fixed128::from_natural(256); - let result = a.checked_div_int(&-1i128).unwrap(); - assert_eq!(result, -256); - - // -256 / -1 = 256 - let a = Fixed128::from_natural(-256); - let result = a.checked_div_int(&-1i128).unwrap(); - assert_eq!(result, 256); - - // 10 / -5 = -2 - let a = Fixed128::from_rational(20, NonZeroI128::new(2).unwrap()); - let result = a.checked_div_int(&-5i128).unwrap(); - assert_eq!(result, -2); - - // -170_141_183_460_469_231_731 / -2 = 85_070_591_730_234_615_865 - let result = min().checked_div_int(&-2i128).unwrap(); - assert_eq!(result, 85_070_591_730_234_615_865); - - // 85_070_591_730_234_615_865 * -2 = -170_141_183_460_469_231_730 - let result = Fixed128::from_natural(result).checked_mul_int(&-2i128).unwrap(); - assert_eq!(result, -170_141_183_460_469_231_730); - } - - #[test] - fn perthing_into_fixed_i128() { - let ten_percent_percent: Fixed128 = Percent::from_percent(10).into(); - assert_eq!(ten_percent_percent.deconstruct(), DIV / 10); - - let ten_percent_permill: Fixed128 = Permill::from_percent(10).into(); - assert_eq!(ten_percent_permill.deconstruct(), DIV / 10); - - let ten_percent_perbill: Fixed128 = Perbill::from_percent(10).into(); - assert_eq!(ten_percent_perbill.deconstruct(), DIV / 10); - - let ten_percent_perquintill: Fixed128 = Perquintill::from_percent(10).into(); - assert_eq!(ten_percent_perquintill.deconstruct(), DIV / 10); - } - - #[test] - fn recip_should_work() { - let a = Fixed128::from_natural(2); - assert_eq!( - a.recip(), - Some(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())) - ); - - let a = Fixed128::from_natural(2); - assert_eq!(a.recip().unwrap().checked_mul_int(&4i32), Some(2i32)); - - let a = Fixed128::from_rational(100, NonZeroI128::new(121).unwrap()); - assert_eq!( - a.recip(), - Some(Fixed128::from_rational(121, NonZeroI128::new(100).unwrap())) - ); - - let a = Fixed128::from_rational(1, NonZeroI128::new(2).unwrap()); - assert_eq!(a.recip().unwrap().checked_mul(&a), Some(Fixed128::from_natural(1))); - - let a = Fixed128::from_natural(0); - assert_eq!(a.recip(), None); - - let a = Fixed128::from_rational(-1, NonZeroI128::new(2).unwrap()); - assert_eq!(a.recip(), Some(Fixed128::from_natural(-2))); - } - - #[test] - fn serialize_deserialize_should_work() { - let two_point_five = Fixed128::from_rational(5, NonZeroI128::new(2).unwrap()); - let serialized = serde_json::to_string(&two_point_five).unwrap(); - assert_eq!(serialized, "\"2500000000000000000\""); - let deserialized: Fixed128 = serde_json::from_str(&serialized).unwrap(); - assert_eq!(deserialized, two_point_five); - - let minus_two_point_five = Fixed128::from_rational(-5, NonZeroI128::new(2).unwrap()); - let serialized = serde_json::to_string(&minus_two_point_five).unwrap(); - assert_eq!(serialized, "\"-2500000000000000000\""); - let deserialized: Fixed128 = serde_json::from_str(&serialized).unwrap(); - assert_eq!(deserialized, minus_two_point_five); - } - - #[test] - fn saturating_abs_should_work() { - // normal - assert_eq!(Fixed128::from_parts(1).saturating_abs(), Fixed128::from_parts(1)); - assert_eq!(Fixed128::from_parts(-1).saturating_abs(), Fixed128::from_parts(1)); - - // saturating - assert_eq!(Fixed128::min_value().saturating_abs(), Fixed128::max_value()); - } - - #[test] - fn is_positive_negative_should_work() { - let positive = Fixed128::from_parts(1); - assert!(positive.is_positive()); - assert!(!positive.is_negative()); - - let negative = Fixed128::from_parts(-1); - assert!(!negative.is_positive()); - assert!(negative.is_negative()); - - let zero = Fixed128::zero(); - assert!(!zero.is_positive()); - assert!(!zero.is_negative()); - } - - #[test] - fn fmt_should_work() { - let positive = Fixed128::from_parts(1000000000000000001); - assert_eq!(format!("{:?}", positive), "Fixed128(1.000000000000000001)"); - let negative = Fixed128::from_parts(-1000000000000000001); - assert_eq!(format!("{:?}", negative), "Fixed128(-1.000000000000000001)"); - - let positive_fractional = Fixed128::from_parts(1); - assert_eq!(format!("{:?}", positive_fractional), "Fixed128(0.000000000000000001)"); - let negative_fractional = Fixed128::from_parts(-1); - assert_eq!(format!("{:?}", negative_fractional), "Fixed128(-0.000000000000000001)"); - - let zero = Fixed128::zero(); - assert_eq!(format!("{:?}", zero), "Fixed128(0.000000000000000000)"); - } - - #[test] - fn saturating_pow_should_work() { - assert_eq!(Fixed128::from_natural(2).saturating_pow(0), Fixed128::from_natural(1)); - assert_eq!(Fixed128::from_natural(2).saturating_pow(1), Fixed128::from_natural(2)); - assert_eq!(Fixed128::from_natural(2).saturating_pow(2), Fixed128::from_natural(4)); - assert_eq!(Fixed128::from_natural(2).saturating_pow(3), Fixed128::from_natural(8)); - assert_eq!(Fixed128::from_natural(2).saturating_pow(50), Fixed128::from_natural(1125899906842624)); - - assert_eq!(Fixed128::from_natural(1).saturating_pow(1000), Fixed128::from_natural(1)); - assert_eq!(Fixed128::from_natural(-1).saturating_pow(1000), Fixed128::from_natural(1)); - assert_eq!(Fixed128::from_natural(-1).saturating_pow(1001), Fixed128::from_natural(-1)); - assert_eq!(Fixed128::from_natural(1).saturating_pow(usize::max_value()), Fixed128::from_natural(1)); - assert_eq!(Fixed128::from_natural(-1).saturating_pow(usize::max_value()), Fixed128::from_natural(-1)); - assert_eq!(Fixed128::from_natural(-1).saturating_pow(usize::max_value() - 1), Fixed128::from_natural(1)); - - assert_eq!(Fixed128::from_natural(114209).saturating_pow(4), Fixed128::from_natural(170137997018538053761)); - assert_eq!(Fixed128::from_natural(114209).saturating_pow(5), Fixed128::max_value()); - - assert_eq!(Fixed128::from_natural(1).saturating_pow(usize::max_value()), Fixed128::from_natural(1)); - assert_eq!(Fixed128::from_natural(0).saturating_pow(usize::max_value()), Fixed128::from_natural(0)); - assert_eq!(Fixed128::from_natural(2).saturating_pow(usize::max_value()), Fixed128::max_value()); - } + use super::*; + use crate::{Perbill, Percent, Permill, Perquintill}; + + fn max() -> Fixed128 { + Fixed128::max_value() + } + + fn min() -> Fixed128 { + Fixed128::min_value() + } + + #[test] + fn fixed128_semantics() { + let a = Fixed128::from_rational(5, NonZeroI128::new(2).unwrap()); + let b = Fixed128::from_rational(10, NonZeroI128::new(4).unwrap()); + assert_eq!(a.0, 5 * DIV / 2); + assert_eq!(a, b); + + let a = Fixed128::from_rational(-5, NonZeroI128::new(1).unwrap()); + assert_eq!(a, Fixed128::from_natural(-5)); + + let a = Fixed128::from_rational(5, NonZeroI128::new(-1).unwrap()); + assert_eq!(a, Fixed128::from_natural(-5)); + + // biggest value that can be created. + assert_ne!(max(), Fixed128::from_natural(170_141_183_460_469_231_731)); + assert_eq!(max(), Fixed128::from_natural(170_141_183_460_469_231_732)); + + // the smallest value that can be created. + assert_ne!(min(), Fixed128::from_natural(-170_141_183_460_469_231_731)); + assert_eq!(min(), Fixed128::from_natural(-170_141_183_460_469_231_732)); + } + + #[test] + fn fixed128_operation() { + let a = Fixed128::from_natural(2); + let b = Fixed128::from_natural(1); + assert_eq!(a.checked_add(&b), Some(Fixed128::from_natural(1 + 2))); + assert_eq!(a.checked_sub(&b), Some(Fixed128::from_natural(2 - 1))); + assert_eq!(a.checked_mul(&b), Some(Fixed128::from_natural(1 * 2))); + assert_eq!( + a.checked_div(&b), + Some(Fixed128::from_rational(2, NonZeroI128::new(1).unwrap())) + ); + + let a = Fixed128::from_rational(5, NonZeroI128::new(2).unwrap()); + let b = Fixed128::from_rational(3, NonZeroI128::new(2).unwrap()); + assert_eq!( + a.checked_add(&b), + Some(Fixed128::from_rational(8, NonZeroI128::new(2).unwrap())) + ); + assert_eq!( + a.checked_sub(&b), + Some(Fixed128::from_rational(2, NonZeroI128::new(2).unwrap())) + ); + assert_eq!( + a.checked_mul(&b), + Some(Fixed128::from_rational(15, NonZeroI128::new(4).unwrap())) + ); + assert_eq!( + a.checked_div(&b), + Some(Fixed128::from_rational(10, NonZeroI128::new(6).unwrap())) + ); + + let a = Fixed128::from_natural(120); + assert_eq!(a.checked_div_int(&2i32), Some(60)); + + let a = Fixed128::from_rational(20, NonZeroI128::new(1).unwrap()); + assert_eq!(a.checked_div_int(&2i32), Some(10)); + + let a = Fixed128::from_natural(120); + assert_eq!(a.checked_mul_int(&2i32), Some(240)); + + let a = Fixed128::from_rational(1, NonZeroI128::new(2).unwrap()); + assert_eq!(a.checked_mul_int(&20i32), Some(10)); + + let a = Fixed128::from_rational(-1, NonZeroI128::new(2).unwrap()); + assert_eq!(a.checked_mul_int(&20i32), Some(-10)); + } + + #[test] + fn saturating_mul_should_work() { + let a = Fixed128::from_natural(-1); + assert_eq!(min().saturating_mul(a), max()); + + assert_eq!( + Fixed128::from_natural(125).saturating_mul(a).deconstruct(), + -125 * DIV + ); + + let a = Fixed128::from_rational(1, NonZeroI128::new(5).unwrap()); + assert_eq!( + Fixed128::from_natural(125).saturating_mul(a).deconstruct(), + 25 * DIV + ); + } + + #[test] + fn saturating_mul_int_works() { + let a = Fixed128::from_rational(10, NonZeroI128::new(1).unwrap()); + assert_eq!(a.saturating_mul_int(&i32::max_value()), i32::max_value()); + + let a = Fixed128::from_rational(-10, NonZeroI128::new(1).unwrap()); + assert_eq!(a.saturating_mul_int(&i32::max_value()), i32::min_value()); + + let a = Fixed128::from_rational(3, NonZeroI128::new(1).unwrap()); + assert_eq!(a.saturating_mul_int(&100i8), i8::max_value()); + + let a = Fixed128::from_rational(10, NonZeroI128::new(1).unwrap()); + assert_eq!(a.saturating_mul_int(&123i128), 1230); + + let a = Fixed128::from_rational(-10, NonZeroI128::new(1).unwrap()); + assert_eq!(a.saturating_mul_int(&123i128), -1230); + + assert_eq!( + max().saturating_mul_int(&2i128), + 340_282_366_920_938_463_463 + ); + + assert_eq!( + max().saturating_mul_int(&i128::min_value()), + i128::min_value() + ); + + assert_eq!( + min().saturating_mul_int(&i128::max_value()), + i128::min_value() + ); + + assert_eq!( + min().saturating_mul_int(&i128::min_value()), + i128::max_value() + ); + } + + #[test] + fn zero_works() { + assert_eq!(Fixed128::zero(), Fixed128::from_natural(0)); + } + + #[test] + fn is_zero_works() { + assert!(Fixed128::zero().is_zero()); + assert!(!Fixed128::from_natural(1).is_zero()); + } + + #[test] + fn checked_div_with_zero_should_be_none() { + let a = Fixed128::from_natural(1); + let b = Fixed128::from_natural(0); + assert_eq!(a.checked_div(&b), None); + assert_eq!(b.checked_div(&a), Some(b)); + } + + #[test] + fn checked_div_int_with_zero_should_be_none() { + let a = Fixed128::from_natural(1); + assert_eq!(a.checked_div_int(&0i32), None); + let a = Fixed128::from_natural(0); + assert_eq!(a.checked_div_int(&1i32), Some(0)); + } + + #[test] + fn checked_div_with_zero_dividend_should_be_zero() { + let a = Fixed128::zero(); + let b = Fixed128::from_parts(1); + + assert_eq!(a.checked_div(&b), Some(Fixed128::zero())); + } + + #[test] + fn under_flow_should_be_none() { + let b = Fixed128::from_natural(1); + assert_eq!(min().checked_sub(&b), None); + } + + #[test] + fn over_flow_should_be_none() { + let a = Fixed128::from_parts(i128::max_value() - 1); + let b = Fixed128::from_parts(2); + assert_eq!(a.checked_add(&b), None); + + let a = Fixed128::max_value(); + let b = Fixed128::from_rational(2, NonZeroI128::new(1).unwrap()); + assert_eq!(a.checked_mul(&b), None); + + let a = Fixed128::from_natural(255); + let b = 2u8; + assert_eq!(a.checked_mul_int(&b), None); + + let a = Fixed128::from_natural(256); + let b = 1u8; + assert_eq!(a.checked_div_int(&b), None); + + let a = Fixed128::from_natural(256); + let b = -1i8; + assert_eq!(a.checked_div_int(&b), None); + } + + #[test] + fn checked_div_int_should_work() { + // 256 / 10 = 25 (25.6 as int = 25) + let a = Fixed128::from_natural(256); + let result = a.checked_div_int(&10i128).unwrap(); + assert_eq!(result, 25); + + // 256 / 100 = 2 (2.56 as int = 2) + let a = Fixed128::from_natural(256); + let result = a.checked_div_int(&100i128).unwrap(); + assert_eq!(result, 2); + + // 256 / 1000 = 0 (0.256 as int = 0) + let a = Fixed128::from_natural(256); + let result = a.checked_div_int(&1000i128).unwrap(); + assert_eq!(result, 0); + + // 256 / -1 = -256 + let a = Fixed128::from_natural(256); + let result = a.checked_div_int(&-1i128).unwrap(); + assert_eq!(result, -256); + + // -256 / -1 = 256 + let a = Fixed128::from_natural(-256); + let result = a.checked_div_int(&-1i128).unwrap(); + assert_eq!(result, 256); + + // 10 / -5 = -2 + let a = Fixed128::from_rational(20, NonZeroI128::new(2).unwrap()); + let result = a.checked_div_int(&-5i128).unwrap(); + assert_eq!(result, -2); + + // -170_141_183_460_469_231_731 / -2 = 85_070_591_730_234_615_865 + let result = min().checked_div_int(&-2i128).unwrap(); + assert_eq!(result, 85_070_591_730_234_615_865); + + // 85_070_591_730_234_615_865 * -2 = -170_141_183_460_469_231_730 + let result = Fixed128::from_natural(result) + .checked_mul_int(&-2i128) + .unwrap(); + assert_eq!(result, -170_141_183_460_469_231_730); + } + + #[test] + fn perthing_into_fixed_i128() { + let ten_percent_percent: Fixed128 = Percent::from_percent(10).into(); + assert_eq!(ten_percent_percent.deconstruct(), DIV / 10); + + let ten_percent_permill: Fixed128 = Permill::from_percent(10).into(); + assert_eq!(ten_percent_permill.deconstruct(), DIV / 10); + + let ten_percent_perbill: Fixed128 = Perbill::from_percent(10).into(); + assert_eq!(ten_percent_perbill.deconstruct(), DIV / 10); + + let ten_percent_perquintill: Fixed128 = Perquintill::from_percent(10).into(); + assert_eq!(ten_percent_perquintill.deconstruct(), DIV / 10); + } + + #[test] + fn recip_should_work() { + let a = Fixed128::from_natural(2); + assert_eq!( + a.recip(), + Some(Fixed128::from_rational(1, NonZeroI128::new(2).unwrap())) + ); + + let a = Fixed128::from_natural(2); + assert_eq!(a.recip().unwrap().checked_mul_int(&4i32), Some(2i32)); + + let a = Fixed128::from_rational(100, NonZeroI128::new(121).unwrap()); + assert_eq!( + a.recip(), + Some(Fixed128::from_rational(121, NonZeroI128::new(100).unwrap())) + ); + + let a = Fixed128::from_rational(1, NonZeroI128::new(2).unwrap()); + assert_eq!( + a.recip().unwrap().checked_mul(&a), + Some(Fixed128::from_natural(1)) + ); + + let a = Fixed128::from_natural(0); + assert_eq!(a.recip(), None); + + let a = Fixed128::from_rational(-1, NonZeroI128::new(2).unwrap()); + assert_eq!(a.recip(), Some(Fixed128::from_natural(-2))); + } + + #[test] + fn serialize_deserialize_should_work() { + let two_point_five = Fixed128::from_rational(5, NonZeroI128::new(2).unwrap()); + let serialized = serde_json::to_string(&two_point_five).unwrap(); + assert_eq!(serialized, "\"2500000000000000000\""); + let deserialized: Fixed128 = serde_json::from_str(&serialized).unwrap(); + assert_eq!(deserialized, two_point_five); + + let minus_two_point_five = Fixed128::from_rational(-5, NonZeroI128::new(2).unwrap()); + let serialized = serde_json::to_string(&minus_two_point_five).unwrap(); + assert_eq!(serialized, "\"-2500000000000000000\""); + let deserialized: Fixed128 = serde_json::from_str(&serialized).unwrap(); + assert_eq!(deserialized, minus_two_point_five); + } + + #[test] + fn saturating_abs_should_work() { + // normal + assert_eq!( + Fixed128::from_parts(1).saturating_abs(), + Fixed128::from_parts(1) + ); + assert_eq!( + Fixed128::from_parts(-1).saturating_abs(), + Fixed128::from_parts(1) + ); + + // saturating + assert_eq!( + Fixed128::min_value().saturating_abs(), + Fixed128::max_value() + ); + } + + #[test] + fn is_positive_negative_should_work() { + let positive = Fixed128::from_parts(1); + assert!(positive.is_positive()); + assert!(!positive.is_negative()); + + let negative = Fixed128::from_parts(-1); + assert!(!negative.is_positive()); + assert!(negative.is_negative()); + + let zero = Fixed128::zero(); + assert!(!zero.is_positive()); + assert!(!zero.is_negative()); + } + + #[test] + fn fmt_should_work() { + let positive = Fixed128::from_parts(1000000000000000001); + assert_eq!(format!("{:?}", positive), "Fixed128(1.000000000000000001)"); + let negative = Fixed128::from_parts(-1000000000000000001); + assert_eq!(format!("{:?}", negative), "Fixed128(-1.000000000000000001)"); + + let positive_fractional = Fixed128::from_parts(1); + assert_eq!( + format!("{:?}", positive_fractional), + "Fixed128(0.000000000000000001)" + ); + let negative_fractional = Fixed128::from_parts(-1); + assert_eq!( + format!("{:?}", negative_fractional), + "Fixed128(-0.000000000000000001)" + ); + + let zero = Fixed128::zero(); + assert_eq!(format!("{:?}", zero), "Fixed128(0.000000000000000000)"); + } + + #[test] + fn saturating_pow_should_work() { + assert_eq!( + Fixed128::from_natural(2).saturating_pow(0), + Fixed128::from_natural(1) + ); + assert_eq!( + Fixed128::from_natural(2).saturating_pow(1), + Fixed128::from_natural(2) + ); + assert_eq!( + Fixed128::from_natural(2).saturating_pow(2), + Fixed128::from_natural(4) + ); + assert_eq!( + Fixed128::from_natural(2).saturating_pow(3), + Fixed128::from_natural(8) + ); + assert_eq!( + Fixed128::from_natural(2).saturating_pow(50), + Fixed128::from_natural(1125899906842624) + ); + + assert_eq!( + Fixed128::from_natural(1).saturating_pow(1000), + Fixed128::from_natural(1) + ); + assert_eq!( + Fixed128::from_natural(-1).saturating_pow(1000), + Fixed128::from_natural(1) + ); + assert_eq!( + Fixed128::from_natural(-1).saturating_pow(1001), + Fixed128::from_natural(-1) + ); + assert_eq!( + Fixed128::from_natural(1).saturating_pow(usize::max_value()), + Fixed128::from_natural(1) + ); + assert_eq!( + Fixed128::from_natural(-1).saturating_pow(usize::max_value()), + Fixed128::from_natural(-1) + ); + assert_eq!( + Fixed128::from_natural(-1).saturating_pow(usize::max_value() - 1), + Fixed128::from_natural(1) + ); + + assert_eq!( + Fixed128::from_natural(114209).saturating_pow(4), + Fixed128::from_natural(170137997018538053761) + ); + assert_eq!( + Fixed128::from_natural(114209).saturating_pow(5), + Fixed128::max_value() + ); + + assert_eq!( + Fixed128::from_natural(1).saturating_pow(usize::max_value()), + Fixed128::from_natural(1) + ); + assert_eq!( + Fixed128::from_natural(0).saturating_pow(usize::max_value()), + Fixed128::from_natural(0) + ); + assert_eq!( + Fixed128::from_natural(2).saturating_pow(usize::max_value()), + Fixed128::max_value() + ); + } } diff --git a/primitives/arithmetic/src/fixed64.rs b/primitives/arithmetic/src/fixed64.rs index af4dbf34e2..8fa656174f 100644 --- a/primitives/arithmetic/src/fixed64.rs +++ b/primitives/arithmetic/src/fixed64.rs @@ -14,16 +14,18 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sp_std::{ - ops, prelude::*, - convert::{TryFrom, TryInto}, -}; -use codec::{Encode, Decode}; use crate::{ - Perbill, - traits::{ - SaturatedConversion, CheckedSub, CheckedAdd, CheckedDiv, Bounded, UniqueSaturatedInto, Saturating - } + traits::{ + Bounded, CheckedAdd, CheckedDiv, CheckedSub, SaturatedConversion, Saturating, + UniqueSaturatedInto, + }, + Perbill, +}; +use codec::{Decode, Encode}; +use sp_std::{ + convert::{TryFrom, TryInto}, + ops, + prelude::*, }; /// An unsigned fixed point number. Can hold any value in the range [-9_223_372_036, 9_223_372_036] @@ -35,302 +37,321 @@ pub struct Fixed64(i64); const DIV: i64 = 1_000_000_000; impl Fixed64 { - /// creates self from a natural number. - /// - /// Note that this might be lossy. - pub fn from_natural(int: i64) -> Self { - Self(int.saturating_mul(DIV)) - } - - /// Return the accuracy of the type. Given that this function returns the value `X`, it means - /// that an instance composed of `X` parts (`Fixed64::from_parts(X)`) is equal to `1`. - pub fn accuracy() -> i64 { - DIV - } - - /// Consume self and return the inner value. - pub fn into_inner(self) -> i64 { self.0 } - - /// Raw constructor. Equal to `parts / 1_000_000_000`. - pub fn from_parts(parts: i64) -> Self { - Self(parts) - } - - /// creates self from a rational number. Equal to `n/d`. - /// - /// Note that this might be lossy. - pub fn from_rational(n: i64, d: u64) -> Self { - Self( - (i128::from(n).saturating_mul(i128::from(DIV)) / i128::from(d).max(1)) - .try_into() - .unwrap_or_else(|_| Bounded::max_value()) - ) - } - - /// Performs a saturated multiply and accumulate by unsigned number. - /// - /// Returns a saturated `int + (self * int)`. - pub fn saturated_multiply_accumulate(self, int: N) -> N - where - N: TryFrom + From + UniqueSaturatedInto + Bounded + Clone + Saturating + - ops::Rem + ops::Div + ops::Mul + - ops::Add, - { - let div = DIV as u64; - let positive = self.0 > 0; - // safe to convert as absolute value. - let parts = self.0.checked_abs().map(|v| v as u64).unwrap_or(i64::max_value() as u64 + 1); - - - // will always fit. - let natural_parts = parts / div; - // might saturate. - let natural_parts: N = natural_parts.saturated_into(); - // fractional parts can always fit into u32. - let perbill_parts = (parts % div) as u32; - - let n = int.clone().saturating_mul(natural_parts); - let p = Perbill::from_parts(perbill_parts) * int.clone(); - - // everything that needs to be either added or subtracted from the original weight. - let excess = n.saturating_add(p); - - if positive { - int.saturating_add(excess) - } else { - int.saturating_sub(excess) - } - } - - pub fn is_negative(&self) -> bool { - self.0.is_negative() - } + /// creates self from a natural number. + /// + /// Note that this might be lossy. + pub fn from_natural(int: i64) -> Self { + Self(int.saturating_mul(DIV)) + } + + /// Return the accuracy of the type. Given that this function returns the value `X`, it means + /// that an instance composed of `X` parts (`Fixed64::from_parts(X)`) is equal to `1`. + pub fn accuracy() -> i64 { + DIV + } + + /// Consume self and return the inner value. + pub fn into_inner(self) -> i64 { + self.0 + } + + /// Raw constructor. Equal to `parts / 1_000_000_000`. + pub fn from_parts(parts: i64) -> Self { + Self(parts) + } + + /// creates self from a rational number. Equal to `n/d`. + /// + /// Note that this might be lossy. + pub fn from_rational(n: i64, d: u64) -> Self { + Self( + (i128::from(n).saturating_mul(i128::from(DIV)) / i128::from(d).max(1)) + .try_into() + .unwrap_or_else(|_| Bounded::max_value()), + ) + } + + /// Performs a saturated multiply and accumulate by unsigned number. + /// + /// Returns a saturated `int + (self * int)`. + pub fn saturated_multiply_accumulate(self, int: N) -> N + where + N: TryFrom + + From + + UniqueSaturatedInto + + Bounded + + Clone + + Saturating + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add, + { + let div = DIV as u64; + let positive = self.0 > 0; + // safe to convert as absolute value. + let parts = self + .0 + .checked_abs() + .map(|v| v as u64) + .unwrap_or(i64::max_value() as u64 + 1); + + // will always fit. + let natural_parts = parts / div; + // might saturate. + let natural_parts: N = natural_parts.saturated_into(); + // fractional parts can always fit into u32. + let perbill_parts = (parts % div) as u32; + + let n = int.clone().saturating_mul(natural_parts); + let p = Perbill::from_parts(perbill_parts) * int.clone(); + + // everything that needs to be either added or subtracted from the original weight. + let excess = n.saturating_add(p); + + if positive { + int.saturating_add(excess) + } else { + int.saturating_sub(excess) + } + } + + pub fn is_negative(&self) -> bool { + self.0.is_negative() + } } impl Saturating for Fixed64 { - fn saturating_add(self, rhs: Self) -> Self { - Self(self.0.saturating_add(rhs.0)) - } + fn saturating_add(self, rhs: Self) -> Self { + Self(self.0.saturating_add(rhs.0)) + } - fn saturating_mul(self, rhs: Self) -> Self { - Self(self.0.saturating_mul(rhs.0) / DIV) - } + fn saturating_mul(self, rhs: Self) -> Self { + Self(self.0.saturating_mul(rhs.0) / DIV) + } - fn saturating_sub(self, rhs: Self) -> Self { - Self(self.0.saturating_sub(rhs.0)) - } + fn saturating_sub(self, rhs: Self) -> Self { + Self(self.0.saturating_sub(rhs.0)) + } - fn saturating_pow(self, exp: usize) -> Self { - Self(self.0.saturating_pow(exp as u32)) - } + fn saturating_pow(self, exp: usize) -> Self { + Self(self.0.saturating_pow(exp as u32)) + } } /// Use `Saturating` trait for safe addition. impl ops::Add for Fixed64 { - type Output = Self; + type Output = Self; - fn add(self, rhs: Self) -> Self::Output { - Self(self.0 + rhs.0) - } + fn add(self, rhs: Self) -> Self::Output { + Self(self.0 + rhs.0) + } } /// Use `Saturating` trait for safe subtraction. impl ops::Sub for Fixed64 { - type Output = Self; + type Output = Self; - fn sub(self, rhs: Self) -> Self::Output { - Self(self.0 - rhs.0) - } + fn sub(self, rhs: Self) -> Self::Output { + Self(self.0 - rhs.0) + } } /// Use `CheckedDiv` trait for safe division. impl ops::Div for Fixed64 { - type Output = Self; - - fn div(self, rhs: Self) -> Self::Output { - if rhs.0 == 0 { - let zero = 0; - return Fixed64::from_parts( self.0 / zero); - } - let (n, d) = if rhs.0 < 0 { - (-self.0, rhs.0.abs() as u64) - } else { - (self.0, rhs.0 as u64) - }; - Fixed64::from_rational(n, d) - } + type Output = Self; + + fn div(self, rhs: Self) -> Self::Output { + if rhs.0 == 0 { + let zero = 0; + return Fixed64::from_parts(self.0 / zero); + } + let (n, d) = if rhs.0 < 0 { + (-self.0, rhs.0.abs() as u64) + } else { + (self.0, rhs.0 as u64) + }; + Fixed64::from_rational(n, d) + } } impl CheckedSub for Fixed64 { - fn checked_sub(&self, rhs: &Self) -> Option { - self.0.checked_sub(rhs.0).map(Self) - } + fn checked_sub(&self, rhs: &Self) -> Option { + self.0.checked_sub(rhs.0).map(Self) + } } impl CheckedAdd for Fixed64 { - fn checked_add(&self, rhs: &Self) -> Option { - self.0.checked_add(rhs.0).map(Self) - } + fn checked_add(&self, rhs: &Self) -> Option { + self.0.checked_add(rhs.0).map(Self) + } } impl CheckedDiv for Fixed64 { - fn checked_div(&self, rhs: &Self) -> Option { - if rhs.0 == 0 { - None - } else { - Some(*self / *rhs) - } - } + fn checked_div(&self, rhs: &Self) -> Option { + if rhs.0 == 0 { + None + } else { + Some(*self / *rhs) + } + } } impl sp_std::fmt::Debug for Fixed64 { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let integral = { - let int = self.0 / DIV; - let signum_for_zero = if int == 0 && self.is_negative() { "-" } else { "" }; - format!("{}{}", signum_for_zero, int) - }; - let fractional = format!("{:0>9}", (self.0 % DIV).abs()); - write!(f, "Fixed64({}.{})", integral, fractional) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let integral = { + let int = self.0 / DIV; + let signum_for_zero = if int == 0 && self.is_negative() { + "-" + } else { + "" + }; + format!("{}{}", signum_for_zero, int) + }; + let fractional = format!("{:0>9}", (self.0 % DIV).abs()); + write!(f, "Fixed64({}.{})", integral, fractional) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - - fn max() -> Fixed64 { - Fixed64::from_parts(i64::max_value()) - } - - #[test] - fn fixed64_semantics() { - assert_eq!(Fixed64::from_rational(5, 2).0, 5 * 1_000_000_000 / 2); - assert_eq!(Fixed64::from_rational(5, 2), Fixed64::from_rational(10, 4)); - assert_eq!(Fixed64::from_rational(5, 0), Fixed64::from_rational(5, 1)); - - // biggest value that can be created. - assert_ne!(max(), Fixed64::from_natural(9_223_372_036)); - assert_eq!(max(), Fixed64::from_natural(9_223_372_037)); - } - - #[test] - fn fixed_64_growth_decrease_curve() { - let test_set = vec![0u32, 1, 10, 1000, 1_000_000_000]; - - // negative (1/2) - let mut fm = Fixed64::from_rational(-1, 2); - test_set.clone().into_iter().for_each(|i| { - assert_eq!(fm.saturated_multiply_accumulate(i) as i32, i as i32 - i as i32 / 2); - }); - - // unit (1) multiplier - fm = Fixed64::from_parts(0); - test_set.clone().into_iter().for_each(|i| { - assert_eq!(fm.saturated_multiply_accumulate(i), i); - }); - - // i.5 multiplier - fm = Fixed64::from_rational(1, 2); - test_set.clone().into_iter().for_each(|i| { - assert_eq!(fm.saturated_multiply_accumulate(i), i * 3 / 2); - }); - - // dual multiplier - fm = Fixed64::from_rational(1, 1); - test_set.clone().into_iter().for_each(|i| { - assert_eq!(fm.saturated_multiply_accumulate(i), i * 2); - }); - } - - macro_rules! saturating_mul_acc_test { - ($num_type:tt) => { - assert_eq!( - Fixed64::from_rational(100, 1).saturated_multiply_accumulate(10 as $num_type), - 1010, - ); - assert_eq!( - Fixed64::from_rational(100, 2).saturated_multiply_accumulate(10 as $num_type), - 510, - ); - assert_eq!( - Fixed64::from_rational(100, 3).saturated_multiply_accumulate(0 as $num_type), - 0, - ); - assert_eq!( - Fixed64::from_rational(5, 1).saturated_multiply_accumulate($num_type::max_value()), - $num_type::max_value() - ); - assert_eq!( - max().saturated_multiply_accumulate($num_type::max_value()), - $num_type::max_value() - ); - } - } - - #[test] - fn fixed64_multiply_accumulate_works() { - saturating_mul_acc_test!(u32); - saturating_mul_acc_test!(u64); - saturating_mul_acc_test!(u128); - } - - #[test] - fn div_works() { - let a = Fixed64::from_rational(12, 10); - let b = Fixed64::from_rational(10, 1); - assert_eq!(a / b, Fixed64::from_rational(12, 100)); - - let a = Fixed64::from_rational(12, 10); - let b = Fixed64::from_rational(1, 100); - assert_eq!(a / b, Fixed64::from_rational(120, 1)); - - let a = Fixed64::from_rational(12, 100); - let b = Fixed64::from_rational(10, 1); - assert_eq!(a / b, Fixed64::from_rational(12, 1000)); - - let a = Fixed64::from_rational(12, 100); - let b = Fixed64::from_rational(1, 100); - assert_eq!(a / b, Fixed64::from_rational(12, 1)); - - let a = Fixed64::from_rational(-12, 10); - let b = Fixed64::from_rational(10, 1); - assert_eq!(a / b, Fixed64::from_rational(-12, 100)); - - let a = Fixed64::from_rational(12, 10); - let b = Fixed64::from_rational(-10, 1); - assert_eq!(a / b, Fixed64::from_rational(-12, 100)); - - let a = Fixed64::from_rational(-12, 10); - let b = Fixed64::from_rational(-10, 1); - assert_eq!(a / b, Fixed64::from_rational(12, 100)); - } - - #[test] - #[should_panic(expected = "attempt to divide by zero")] - fn div_zero() { - let a = Fixed64::from_rational(12, 10); - let b = Fixed64::from_natural(0); - let _ = a / b; - } - - #[test] - fn checked_div_zero() { - let a = Fixed64::from_rational(12, 10); - let b = Fixed64::from_natural(0); - assert_eq!(a.checked_div(&b), None); - } - - #[test] - fn checked_div_non_zero() { - let a = Fixed64::from_rational(12, 10); - let b = Fixed64::from_rational(1, 100); - assert_eq!(a.checked_div(&b), Some(Fixed64::from_rational(120, 1))); - } + use super::*; + + fn max() -> Fixed64 { + Fixed64::from_parts(i64::max_value()) + } + + #[test] + fn fixed64_semantics() { + assert_eq!(Fixed64::from_rational(5, 2).0, 5 * 1_000_000_000 / 2); + assert_eq!(Fixed64::from_rational(5, 2), Fixed64::from_rational(10, 4)); + assert_eq!(Fixed64::from_rational(5, 0), Fixed64::from_rational(5, 1)); + + // biggest value that can be created. + assert_ne!(max(), Fixed64::from_natural(9_223_372_036)); + assert_eq!(max(), Fixed64::from_natural(9_223_372_037)); + } + + #[test] + fn fixed_64_growth_decrease_curve() { + let test_set = vec![0u32, 1, 10, 1000, 1_000_000_000]; + + // negative (1/2) + let mut fm = Fixed64::from_rational(-1, 2); + test_set.clone().into_iter().for_each(|i| { + assert_eq!( + fm.saturated_multiply_accumulate(i) as i32, + i as i32 - i as i32 / 2 + ); + }); + + // unit (1) multiplier + fm = Fixed64::from_parts(0); + test_set.clone().into_iter().for_each(|i| { + assert_eq!(fm.saturated_multiply_accumulate(i), i); + }); + + // i.5 multiplier + fm = Fixed64::from_rational(1, 2); + test_set.clone().into_iter().for_each(|i| { + assert_eq!(fm.saturated_multiply_accumulate(i), i * 3 / 2); + }); + + // dual multiplier + fm = Fixed64::from_rational(1, 1); + test_set.clone().into_iter().for_each(|i| { + assert_eq!(fm.saturated_multiply_accumulate(i), i * 2); + }); + } + + macro_rules! saturating_mul_acc_test { + ($num_type:tt) => { + assert_eq!( + Fixed64::from_rational(100, 1).saturated_multiply_accumulate(10 as $num_type), + 1010, + ); + assert_eq!( + Fixed64::from_rational(100, 2).saturated_multiply_accumulate(10 as $num_type), + 510, + ); + assert_eq!( + Fixed64::from_rational(100, 3).saturated_multiply_accumulate(0 as $num_type), + 0, + ); + assert_eq!( + Fixed64::from_rational(5, 1).saturated_multiply_accumulate($num_type::max_value()), + $num_type::max_value() + ); + assert_eq!( + max().saturated_multiply_accumulate($num_type::max_value()), + $num_type::max_value() + ); + }; + } + + #[test] + fn fixed64_multiply_accumulate_works() { + saturating_mul_acc_test!(u32); + saturating_mul_acc_test!(u64); + saturating_mul_acc_test!(u128); + } + + #[test] + fn div_works() { + let a = Fixed64::from_rational(12, 10); + let b = Fixed64::from_rational(10, 1); + assert_eq!(a / b, Fixed64::from_rational(12, 100)); + + let a = Fixed64::from_rational(12, 10); + let b = Fixed64::from_rational(1, 100); + assert_eq!(a / b, Fixed64::from_rational(120, 1)); + + let a = Fixed64::from_rational(12, 100); + let b = Fixed64::from_rational(10, 1); + assert_eq!(a / b, Fixed64::from_rational(12, 1000)); + + let a = Fixed64::from_rational(12, 100); + let b = Fixed64::from_rational(1, 100); + assert_eq!(a / b, Fixed64::from_rational(12, 1)); + + let a = Fixed64::from_rational(-12, 10); + let b = Fixed64::from_rational(10, 1); + assert_eq!(a / b, Fixed64::from_rational(-12, 100)); + + let a = Fixed64::from_rational(12, 10); + let b = Fixed64::from_rational(-10, 1); + assert_eq!(a / b, Fixed64::from_rational(-12, 100)); + + let a = Fixed64::from_rational(-12, 10); + let b = Fixed64::from_rational(-10, 1); + assert_eq!(a / b, Fixed64::from_rational(12, 100)); + } + + #[test] + #[should_panic(expected = "attempt to divide by zero")] + fn div_zero() { + let a = Fixed64::from_rational(12, 10); + let b = Fixed64::from_natural(0); + let _ = a / b; + } + + #[test] + fn checked_div_zero() { + let a = Fixed64::from_rational(12, 10); + let b = Fixed64::from_natural(0); + assert_eq!(a.checked_div(&b), None); + } + + #[test] + fn checked_div_non_zero() { + let a = Fixed64::from_rational(12, 10); + let b = Fixed64::from_rational(1, 100); + assert_eq!(a.checked_div(&b), Some(Fixed64::from_rational(120, 1))); + } } diff --git a/primitives/arithmetic/src/helpers_128bit.rs b/primitives/arithmetic/src/helpers_128bit.rs index bf8315b9b6..f56d413ca7 100644 --- a/primitives/arithmetic/src/helpers_128bit.rs +++ b/primitives/arithmetic/src/helpers_128bit.rs @@ -21,38 +21,42 @@ use crate::biguint; use num_traits::Zero; -use sp_std::{cmp::{min, max}, convert::TryInto, mem}; +use sp_std::{ + cmp::{max, min}, + convert::TryInto, + mem, +}; /// Helper gcd function used in Rational128 implementation. pub fn gcd(a: u128, b: u128) -> u128 { - match ((a, b), (a & 1, b & 1)) { - ((x, y), _) if x == y => y, - ((0, x), _) | ((x, 0), _) => x, - ((x, y), (0, 1)) | ((y, x), (1, 0)) => gcd(x >> 1, y), - ((x, y), (0, 0)) => gcd(x >> 1, y >> 1) << 1, - ((x, y), (1, 1)) => { - let (x, y) = (min(x, y), max(x, y)); - gcd((y - x) >> 1, x) - }, - _ => unreachable!(), - } + match ((a, b), (a & 1, b & 1)) { + ((x, y), _) if x == y => y, + ((0, x), _) | ((x, 0), _) => x, + ((x, y), (0, 1)) | ((y, x), (1, 0)) => gcd(x >> 1, y), + ((x, y), (0, 0)) => gcd(x >> 1, y >> 1) << 1, + ((x, y), (1, 1)) => { + let (x, y) = (min(x, y), max(x, y)); + gcd((y - x) >> 1, x) + } + _ => unreachable!(), + } } /// split a u128 into two u64 limbs pub fn split(a: u128) -> (u64, u64) { - let al = a as u64; - let ah = (a >> 64) as u64; - (ah, al) + let al = a as u64; + let ah = (a >> 64) as u64; + (ah, al) } /// Convert a u128 to a u32 based biguint. pub fn to_big_uint(x: u128) -> biguint::BigUint { - let (xh, xl) = split(x); - let (xhh, xhl) = biguint::split(xh); - let (xlh, xll) = biguint::split(xl); - let mut n = biguint::BigUint::from_limbs(&[xhh, xhl, xlh, xll]); - n.lstrip(); - n + let (xh, xl) = split(x); + let (xhh, xhl) = biguint::split(xh); + let (xlh, xll) = biguint::split(xl); + let mut n = biguint::BigUint::from_limbs(&[xhh, xhl, xlh, xll]); + n.lstrip(); + n } /// Safely and accurately compute `a * b / c`. The approach is: @@ -62,51 +66,56 @@ pub fn to_big_uint(x: u128) -> biguint::BigUint { /// /// Invariant: c must be greater than or equal to 1. pub fn multiply_by_rational(mut a: u128, mut b: u128, mut c: u128) -> Result { - if a.is_zero() || b.is_zero() { return Ok(Zero::zero()); } - c = c.max(1); + if a.is_zero() || b.is_zero() { + return Ok(Zero::zero()); + } + c = c.max(1); - // a and b are interchangeable by definition in this function. It always helps to assume the - // bigger of which is being multiplied by a `0 < b/c < 1`. Hence, a should be the bigger and - // b the smaller one. - if b > a { - mem::swap(&mut a, &mut b); - } + // a and b are interchangeable by definition in this function. It always helps to assume the + // bigger of which is being multiplied by a `0 < b/c < 1`. Hence, a should be the bigger and + // b the smaller one. + if b > a { + mem::swap(&mut a, &mut b); + } - // Attempt to perform the division first - if a % c == 0 { - a /= c; - c = 1; - } else if b % c == 0 { - b /= c; - c = 1; - } + // Attempt to perform the division first + if a % c == 0 { + a /= c; + c = 1; + } else if b % c == 0 { + b /= c; + c = 1; + } - if let Some(x) = a.checked_mul(b) { - // This is the safest way to go. Try it. - Ok(x / c) - } else { - let a_num = to_big_uint(a); - let b_num = to_big_uint(b); - let c_num = to_big_uint(c); + if let Some(x) = a.checked_mul(b) { + // This is the safest way to go. Try it. + Ok(x / c) + } else { + let a_num = to_big_uint(a); + let b_num = to_big_uint(b); + let c_num = to_big_uint(c); - let mut ab = a_num * b_num; - ab.lstrip(); - let mut q = if c_num.len() == 1 { - // PROOF: if `c_num.len() == 1` then `c` fits in one limb. - ab.div_unit(c as biguint::Single) - } else { - // PROOF: both `ab` and `c` cannot have leading zero limbs; if length of `c` is 1, - // the previous branch would handle. Also, if ab for sure has a bigger size than - // c, because `a.checked_mul(b)` has failed, hence ab must be at least one limb - // bigger than c. In this case, returning zero is defensive-only and div should - // always return Some. - let (mut q, r) = ab.div(&c_num, true).unwrap_or((Zero::zero(), Zero::zero())); - let r: u128 = r.try_into() - .expect("reminder of div by c is always less than c; qed"); - if r > (c / 2) { q = q.add(&to_big_uint(1)); } - q - }; - q.lstrip(); - q.try_into().map_err(|_| "result cannot fit in u128") - } + let mut ab = a_num * b_num; + ab.lstrip(); + let mut q = if c_num.len() == 1 { + // PROOF: if `c_num.len() == 1` then `c` fits in one limb. + ab.div_unit(c as biguint::Single) + } else { + // PROOF: both `ab` and `c` cannot have leading zero limbs; if length of `c` is 1, + // the previous branch would handle. Also, if ab for sure has a bigger size than + // c, because `a.checked_mul(b)` has failed, hence ab must be at least one limb + // bigger than c. In this case, returning zero is defensive-only and div should + // always return Some. + let (mut q, r) = ab.div(&c_num, true).unwrap_or((Zero::zero(), Zero::zero())); + let r: u128 = r + .try_into() + .expect("reminder of div by c is always less than c; qed"); + if r > (c / 2) { + q = q.add(&to_big_uint(1)); + } + q + }; + q.lstrip(); + q.try_into().map_err(|_| "result cannot fit in u128") + } } diff --git a/primitives/arithmetic/src/lib.rs b/primitives/arithmetic/src/lib.rs index fb70b13a15..a34418335b 100644 --- a/primitives/arithmetic/src/lib.rs +++ b/primitives/arithmetic/src/lib.rs @@ -21,38 +21,38 @@ /// Copied from `sp-runtime` and documented there. #[macro_export] macro_rules! assert_eq_error_rate { - ($x:expr, $y:expr, $error:expr $(,)?) => { - assert!( - ($x) >= (($y) - ($error)) && ($x) <= (($y) + ($error)), - "{:?} != {:?} (with error rate {:?})", - $x, - $y, - $error, - ); - }; + ($x:expr, $y:expr, $error:expr $(,)?) => { + assert!( + ($x) >= (($y) - ($error)) && ($x) <= (($y) + ($error)), + "{:?} != {:?} (with error rate {:?})", + $x, + $y, + $error, + ); + }; } pub mod biguint; +mod fixed128; +mod fixed64; pub mod helpers_128bit; -pub mod traits; mod per_things; -mod fixed64; -mod fixed128; mod rational128; +pub mod traits; -pub use fixed64::Fixed64; pub use fixed128::Fixed128; -pub use per_things::{PerThing, Percent, PerU16, Permill, Perbill, Perquintill}; +pub use fixed64::Fixed64; +pub use per_things::{PerThing, PerU16, Perbill, Percent, Permill, Perquintill}; pub use rational128::Rational128; #[cfg(test)] mod tests { - use super::*; - - #[test] - fn peru16_rational_does_not_overflow() { - // A historical example that will panic only for per_thing type that are created with - // maximum capacity of their type, e.g. PerU16. - let _ = PerU16::from_rational_approximation(17424870u32, 17424870); - } + use super::*; + + #[test] + fn peru16_rational_does_not_overflow() { + // A historical example that will panic only for per_thing type that are created with + // maximum capacity of their type, e.g. PerU16. + let _ = PerU16::from_rational_approximation(17424870u32, 17424870); + } } diff --git a/primitives/arithmetic/src/per_things.rs b/primitives/arithmetic/src/per_things.rs index 56fc562cd1..a0b83d8f54 100644 --- a/primitives/arithmetic/src/per_things.rs +++ b/primitives/arithmetic/src/per_things.rs @@ -15,188 +15,236 @@ // along with Substrate. If not, see . #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use sp_std::{ops, fmt, prelude::*, convert::TryInto}; -use codec::{Encode, CompactAs}; use crate::traits::{ - SaturatedConversion, UniqueSaturatedInto, Saturating, BaseArithmetic, Bounded, Zero, + BaseArithmetic, Bounded, SaturatedConversion, Saturating, UniqueSaturatedInto, Zero, }; +use codec::{CompactAs, Encode}; use sp_debug_derive::RuntimeDebug; +use sp_std::{convert::TryInto, fmt, ops, prelude::*}; /// Something that implements a fixed point ration with an arbitrary granularity `X`, as _parts per /// `X`_. pub trait PerThing: - Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug + Sized + Saturating + Copy + Default + Eq + PartialEq + Ord + PartialOrd + Bounded + fmt::Debug { - /// The data type used to build this per-thingy. - type Inner: BaseArithmetic + Copy + fmt::Debug; - - /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. - /// It must be able to compute `ACCURACY^2`. - type Upper: BaseArithmetic + Copy + From + TryInto + fmt::Debug; - - /// The accuracy of this type. - const ACCURACY: Self::Inner; - - /// Equivalent to `Self::from_parts(0)`. - fn zero() -> Self { Self::from_parts(Self::Inner::zero()) } - - /// Return `true` if this is nothing. - fn is_zero(&self) -> bool { self.deconstruct() == Self::Inner::zero() } - - /// Equivalent to `Self::from_parts(Self::ACCURACY)`. - fn one() -> Self { Self::from_parts(Self::ACCURACY) } - - /// Return `true` if this is one. - fn is_one(&self) -> bool { self.deconstruct() == Self::ACCURACY } - - /// Build this type from a percent. Equivalent to `Self::from_parts(x * Self::ACCURACY / 100)` - /// but more accurate. - fn from_percent(x: Self::Inner) -> Self { - let a = x.min(100.into()); - let b = Self::ACCURACY; - // if Self::ACCURACY % 100 > 0 then we need the correction for accuracy - let c = rational_mul_correction::(b, a, 100.into(), Rounding::Nearest); - Self::from_parts(a / 100.into() * b + c) - } - - /// Return the product of multiplication of this value by itself. - fn square(self) -> Self { - let p = Self::Upper::from(self.deconstruct()); - let q = Self::Upper::from(Self::ACCURACY); - Self::from_rational_approximation(p * p, q * q) - } - - /// Multiplication that always rounds down to a whole number. The standard `Mul` rounds to the - /// nearest whole number. - /// - /// ```rust - /// # use sp_arithmetic::{Percent, PerThing}; - /// # fn main () { - /// // round to nearest - /// assert_eq!(Percent::from_percent(34) * 10u64, 3); - /// assert_eq!(Percent::from_percent(36) * 10u64, 4); - /// - /// // round down - /// assert_eq!(Percent::from_percent(34).mul_floor(10u64), 3); - /// assert_eq!(Percent::from_percent(36).mul_floor(10u64), 3); - /// # } - /// ``` - fn mul_floor(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add - { - overflow_prune_mul::(b, self.deconstruct(), Rounding::Down) - } - - /// Multiplication that always rounds the result up to a whole number. The standard `Mul` - /// rounds to the nearest whole number. - /// - /// ```rust - /// # use sp_arithmetic::{Percent, PerThing}; - /// # fn main () { - /// // round to nearest - /// assert_eq!(Percent::from_percent(34) * 10u64, 3); - /// assert_eq!(Percent::from_percent(36) * 10u64, 4); - /// - /// // round up - /// assert_eq!(Percent::from_percent(34).mul_ceil(10u64), 4); - /// assert_eq!(Percent::from_percent(36).mul_ceil(10u64), 4); - /// # } - /// ``` - fn mul_ceil(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add - { - overflow_prune_mul::(b, self.deconstruct(), Rounding::Up) - } - - /// Saturating multiplication by the reciprocal of `self`. The result is rounded to the - /// nearest whole number and saturates at the numeric bounds instead of overflowing. - /// - /// ```rust - /// # use sp_arithmetic::{Percent, PerThing}; - /// # fn main () { - /// assert_eq!(Percent::from_percent(50).saturating_reciprocal_mul(10u64), 20); - /// # } - /// ``` - fn saturating_reciprocal_mul(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating - { - saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Nearest) - } - - /// Saturating multiplication by the reciprocal of `self`. The result is rounded down to the - /// nearest whole number and saturates at the numeric bounds instead of overflowing. - /// - /// ```rust - /// # use sp_arithmetic::{Percent, PerThing}; - /// # fn main () { - /// // round to nearest - /// assert_eq!(Percent::from_percent(60).saturating_reciprocal_mul(10u64), 17); - /// // round down - /// assert_eq!(Percent::from_percent(60).saturating_reciprocal_mul_floor(10u64), 16); - /// # } - /// ``` - fn saturating_reciprocal_mul_floor(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating - { - saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Down) - } - - /// Saturating multiplication by the reciprocal of `self`. The result is rounded up to the - /// nearest whole number and saturates at the numeric bounds instead of overflowing. - /// - /// ```rust - /// # use sp_arithmetic::{Percent, PerThing}; - /// # fn main () { - /// // round to nearest - /// assert_eq!(Percent::from_percent(61).saturating_reciprocal_mul(10u64), 16); - /// // round up - /// assert_eq!(Percent::from_percent(61).saturating_reciprocal_mul_ceil(10u64), 17); - /// # } - /// ``` - fn saturating_reciprocal_mul_ceil(self, b: N) -> N - where N: Clone + From + UniqueSaturatedInto + ops::Rem + - ops::Div + ops::Mul + ops::Add + Saturating - { - saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Up) - } - - /// Consume self and return the number of parts per thing. - fn deconstruct(self) -> Self::Inner; - - /// Build this type from a number of parts per thing. - fn from_parts(parts: Self::Inner) -> Self; - - /// Converts a fraction into `Self`. - #[cfg(feature = "std")] - fn from_fraction(x: f64) -> Self; - - /// Approximate the fraction `p/q` into a per-thing fraction. This will never overflow. - /// - /// The computation of this approximation is performed in the generic type `N`. Given - /// `M` as the data type that can hold the maximum value of this per-thing (e.g. u32 for - /// perbill), this can only work if `N == M` or `N: From + TryInto`. - /// - /// Note that this always rounds _down_, i.e. - /// - /// ```rust - /// # use sp_arithmetic::{Percent, PerThing}; - /// # fn main () { - /// // 989/100 is technically closer to 99%. - /// assert_eq!( - /// Percent::from_rational_approximation(989, 1000), - /// Percent::from_parts(98), - /// ); - /// # } - /// ``` - fn from_rational_approximation(p: N, q: N) -> Self - where N: Clone + Ord + From + TryInto + TryInto + - ops::Div + ops::Rem + ops::Add; + /// The data type used to build this per-thingy. + type Inner: BaseArithmetic + Copy + fmt::Debug; + + /// A data type larger than `Self::Inner`, used to avoid overflow in some computations. + /// It must be able to compute `ACCURACY^2`. + type Upper: BaseArithmetic + Copy + From + TryInto + fmt::Debug; + + /// The accuracy of this type. + const ACCURACY: Self::Inner; + + /// Equivalent to `Self::from_parts(0)`. + fn zero() -> Self { + Self::from_parts(Self::Inner::zero()) + } + + /// Return `true` if this is nothing. + fn is_zero(&self) -> bool { + self.deconstruct() == Self::Inner::zero() + } + + /// Equivalent to `Self::from_parts(Self::ACCURACY)`. + fn one() -> Self { + Self::from_parts(Self::ACCURACY) + } + + /// Return `true` if this is one. + fn is_one(&self) -> bool { + self.deconstruct() == Self::ACCURACY + } + + /// Build this type from a percent. Equivalent to `Self::from_parts(x * Self::ACCURACY / 100)` + /// but more accurate. + fn from_percent(x: Self::Inner) -> Self { + let a = x.min(100.into()); + let b = Self::ACCURACY; + // if Self::ACCURACY % 100 > 0 then we need the correction for accuracy + let c = rational_mul_correction::(b, a, 100.into(), Rounding::Nearest); + Self::from_parts(a / 100.into() * b + c) + } + + /// Return the product of multiplication of this value by itself. + fn square(self) -> Self { + let p = Self::Upper::from(self.deconstruct()); + let q = Self::Upper::from(Self::ACCURACY); + Self::from_rational_approximation(p * p, q * q) + } + + /// Multiplication that always rounds down to a whole number. The standard `Mul` rounds to the + /// nearest whole number. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // round to nearest + /// assert_eq!(Percent::from_percent(34) * 10u64, 3); + /// assert_eq!(Percent::from_percent(36) * 10u64, 4); + /// + /// // round down + /// assert_eq!(Percent::from_percent(34).mul_floor(10u64), 3); + /// assert_eq!(Percent::from_percent(36).mul_floor(10u64), 3); + /// # } + /// ``` + fn mul_floor(self, b: N) -> N + where + N: Clone + + From + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add, + { + overflow_prune_mul::(b, self.deconstruct(), Rounding::Down) + } + + /// Multiplication that always rounds the result up to a whole number. The standard `Mul` + /// rounds to the nearest whole number. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // round to nearest + /// assert_eq!(Percent::from_percent(34) * 10u64, 3); + /// assert_eq!(Percent::from_percent(36) * 10u64, 4); + /// + /// // round up + /// assert_eq!(Percent::from_percent(34).mul_ceil(10u64), 4); + /// assert_eq!(Percent::from_percent(36).mul_ceil(10u64), 4); + /// # } + /// ``` + fn mul_ceil(self, b: N) -> N + where + N: Clone + + From + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add, + { + overflow_prune_mul::(b, self.deconstruct(), Rounding::Up) + } + + /// Saturating multiplication by the reciprocal of `self`. The result is rounded to the + /// nearest whole number and saturates at the numeric bounds instead of overflowing. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// assert_eq!(Percent::from_percent(50).saturating_reciprocal_mul(10u64), 20); + /// # } + /// ``` + fn saturating_reciprocal_mul(self, b: N) -> N + where + N: Clone + + From + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating, + { + saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Nearest) + } + + /// Saturating multiplication by the reciprocal of `self`. The result is rounded down to the + /// nearest whole number and saturates at the numeric bounds instead of overflowing. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // round to nearest + /// assert_eq!(Percent::from_percent(60).saturating_reciprocal_mul(10u64), 17); + /// // round down + /// assert_eq!(Percent::from_percent(60).saturating_reciprocal_mul_floor(10u64), 16); + /// # } + /// ``` + fn saturating_reciprocal_mul_floor(self, b: N) -> N + where + N: Clone + + From + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating, + { + saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Down) + } + + /// Saturating multiplication by the reciprocal of `self`. The result is rounded up to the + /// nearest whole number and saturates at the numeric bounds instead of overflowing. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // round to nearest + /// assert_eq!(Percent::from_percent(61).saturating_reciprocal_mul(10u64), 16); + /// // round up + /// assert_eq!(Percent::from_percent(61).saturating_reciprocal_mul_ceil(10u64), 17); + /// # } + /// ``` + fn saturating_reciprocal_mul_ceil(self, b: N) -> N + where + N: Clone + + From + + UniqueSaturatedInto + + ops::Rem + + ops::Div + + ops::Mul + + ops::Add + + Saturating, + { + saturating_reciprocal_mul::(b, self.deconstruct(), Rounding::Up) + } + + /// Consume self and return the number of parts per thing. + fn deconstruct(self) -> Self::Inner; + + /// Build this type from a number of parts per thing. + fn from_parts(parts: Self::Inner) -> Self; + + /// Converts a fraction into `Self`. + #[cfg(feature = "std")] + fn from_fraction(x: f64) -> Self; + + /// Approximate the fraction `p/q` into a per-thing fraction. This will never overflow. + /// + /// The computation of this approximation is performed in the generic type `N`. Given + /// `M` as the data type that can hold the maximum value of this per-thing (e.g. u32 for + /// perbill), this can only work if `N == M` or `N: From + TryInto`. + /// + /// Note that this always rounds _down_, i.e. + /// + /// ```rust + /// # use sp_arithmetic::{Percent, PerThing}; + /// # fn main () { + /// // 989/100 is technically closer to 99%. + /// assert_eq!( + /// Percent::from_rational_approximation(989, 1000), + /// Percent::from_parts(98), + /// ); + /// # } + /// ``` + fn from_rational_approximation(p: N, q: N) -> Self + where + N: Clone + + Ord + + From + + TryInto + + TryInto + + ops::Div + + ops::Rem + + ops::Add; } /// The rounding method to use. @@ -204,97 +252,93 @@ pub trait PerThing: /// `PerThing`s are unsigned so `Up` means towards infinity and `Down` means towards zero. /// `Nearest` will round an exact half down. enum Rounding { - Up, - Down, - Nearest, + Up, + Down, + Nearest, } /// Saturating reciprocal multiplication. Compute `x / self`, saturating at the numeric /// bounds instead of overflowing. -fn saturating_reciprocal_mul( - x: N, - part: P::Inner, - rounding: Rounding, -) -> N +fn saturating_reciprocal_mul(x: N, part: P::Inner, rounding: Rounding) -> N where - N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem + Saturating, - P: PerThing, + N: Clone + + From + + UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem + + Saturating, + P: PerThing, { - let maximum: N = P::ACCURACY.into(); - let c = rational_mul_correction::( - x.clone(), - P::ACCURACY, - part, - rounding, - ); - (x / part.into()).saturating_mul(maximum).saturating_add(c) + let maximum: N = P::ACCURACY.into(); + let c = rational_mul_correction::(x.clone(), P::ACCURACY, part, rounding); + (x / part.into()).saturating_mul(maximum).saturating_add(c) } /// Overflow-prune multiplication. Accurately multiply a value by `self` without overflowing. -fn overflow_prune_mul( - x: N, - part: P::Inner, - rounding: Rounding, -) -> N +fn overflow_prune_mul(x: N, part: P::Inner, rounding: Rounding) -> N where - N: Clone + From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem, - P: PerThing, + N: Clone + + From + + UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem, + P: PerThing, { - let maximum: N = P::ACCURACY.into(); - let part_n: N = part.into(); - let c = rational_mul_correction::( - x.clone(), - part, - P::ACCURACY, - rounding, - ); - (x / maximum) * part_n + c + let maximum: N = P::ACCURACY.into(); + let part_n: N = part.into(); + let c = rational_mul_correction::(x.clone(), part, P::ACCURACY, rounding); + (x / maximum) * part_n + c } /// Compute the error due to integer division in the expression `x / denom * numer`. /// /// Take the remainder of `x / denom` and multiply by `numer / denom`. The result can be added /// to `x / denom * numer` for an accurate result. -fn rational_mul_correction( - x: N, - numer: P::Inner, - denom: P::Inner, - rounding: Rounding, -) -> N +fn rational_mul_correction(x: N, numer: P::Inner, denom: P::Inner, rounding: Rounding) -> N where - N: From + UniqueSaturatedInto + ops::Div + ops::Mul + ops::Add + ops::Rem, - P: PerThing, + N: From + + UniqueSaturatedInto + + ops::Div + + ops::Mul + + ops::Add + + ops::Rem, + P: PerThing, { - let numer_upper = P::Upper::from(numer); - let denom_n = N::from(denom); - let denom_upper = P::Upper::from(denom); - let rem = x.rem(denom_n); - // `rem` is less than `denom`, which fits in `P::Inner`. - let rem_inner = rem.saturated_into::(); - // `P::Upper` always fits `P::Inner::max_value().pow(2)`, thus it fits `rem * numer`. - let rem_mul_upper = P::Upper::from(rem_inner) * numer_upper; - // `rem` is less than `denom`, so `rem * numer / denom` is less than `numer`, which fits in - // `P::Inner`. - let mut rem_mul_div_inner = (rem_mul_upper / denom_upper).saturated_into::(); - match rounding { - // Already rounded down - Rounding::Down => {}, - // Round up if the fractional part of the result is non-zero. - Rounding::Up => if rem_mul_upper % denom_upper > 0.into() { - // `rem * numer / denom` is less than `numer`, so this will not overflow. - rem_mul_div_inner = rem_mul_div_inner + 1.into(); - }, - // Round up if the fractional part of the result is greater than a half. An exact half is - // rounded down. - Rounding::Nearest => if rem_mul_upper % denom_upper > denom_upper / 2.into() { - // `rem * numer / denom` is less than `numer`, so this will not overflow. - rem_mul_div_inner = rem_mul_div_inner + 1.into(); - }, - } - rem_mul_div_inner.into() + let numer_upper = P::Upper::from(numer); + let denom_n = N::from(denom); + let denom_upper = P::Upper::from(denom); + let rem = x.rem(denom_n); + // `rem` is less than `denom`, which fits in `P::Inner`. + let rem_inner = rem.saturated_into::(); + // `P::Upper` always fits `P::Inner::max_value().pow(2)`, thus it fits `rem * numer`. + let rem_mul_upper = P::Upper::from(rem_inner) * numer_upper; + // `rem` is less than `denom`, so `rem * numer / denom` is less than `numer`, which fits in + // `P::Inner`. + let mut rem_mul_div_inner = (rem_mul_upper / denom_upper).saturated_into::(); + match rounding { + // Already rounded down + Rounding::Down => {} + // Round up if the fractional part of the result is non-zero. + Rounding::Up => { + if rem_mul_upper % denom_upper > 0.into() { + // `rem * numer / denom` is less than `numer`, so this will not overflow. + rem_mul_div_inner = rem_mul_div_inner + 1.into(); + } + } + // Round up if the fractional part of the result is greater than a half. An exact half is + // rounded down. + Rounding::Nearest => { + if rem_mul_upper % denom_upper > denom_upper / 2.into() { + // `rem * numer / denom` is less than `numer`, so this will not overflow. + rem_mul_div_inner = rem_mul_div_inner + 1.into(); + } + } + } + rem_mul_div_inner.into() } macro_rules! implement_per_thing { @@ -1134,47 +1178,47 @@ macro_rules! implement_per_thing { } implement_per_thing!( - Percent, - test_per_cent, - [u32, u64, u128], - 100u8, - u8, - u16, - "_Percent_", + Percent, + test_per_cent, + [u32, u64, u128], + 100u8, + u8, + u16, + "_Percent_", ); implement_per_thing!( - PerU16, - test_peru16, - [u32, u64, u128], - 65535_u16, - u16, - u32, - "_Parts per 65535_", + PerU16, + test_peru16, + [u32, u64, u128], + 65535_u16, + u16, + u32, + "_Parts per 65535_", ); implement_per_thing!( - Permill, - test_permill, - [u32, u64, u128], - 1_000_000u32, - u32, - u64, - "_Parts per Million_", + Permill, + test_permill, + [u32, u64, u128], + 1_000_000u32, + u32, + u64, + "_Parts per Million_", ); implement_per_thing!( - Perbill, - test_perbill, - [u32, u64, u128], - 1_000_000_000u32, - u32, - u64, - "_Parts per Billion_", + Perbill, + test_perbill, + [u32, u64, u128], + 1_000_000_000u32, + u32, + u64, + "_Parts per Billion_", ); implement_per_thing!( - Perquintill, - test_perquintill, - [u64, u128], - 1_000_000_000_000_000_000u64, - u64, - u128, - "_Parts per Quintillion_", + Perquintill, + test_perquintill, + [u64, u128], + 1_000_000_000_000_000_000u64, + u64, + u128, + "_Parts per Quintillion_", ); diff --git a/primitives/arithmetic/src/rational128.rs b/primitives/arithmetic/src/rational128.rs index 248df70794..9e8a430fbb 100644 --- a/primitives/arithmetic/src/rational128.rs +++ b/primitives/arithmetic/src/rational128.rs @@ -14,371 +14,388 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sp_std::{cmp::Ordering, prelude::*}; use crate::helpers_128bit; use num_traits::Zero; use sp_debug_derive::RuntimeDebug; +use sp_std::{cmp::Ordering, prelude::*}; /// A wrapper for any rational number with a 128 bit numerator and denominator. #[derive(Clone, Copy, Default, Eq, RuntimeDebug)] pub struct Rational128(u128, u128); impl Rational128 { - /// Nothing. - pub fn zero() -> Self { - Self(0, 1) - } - - /// If it is zero or not - pub fn is_zero(&self) -> bool { - self.0.is_zero() - } - - /// Build from a raw `n/d`. - pub fn from(n: u128, d: u128) -> Self { - Self(n, d.max(1)) - } - - /// Build from a raw `n/d`. This could lead to / 0 if not properly handled. - pub fn from_unchecked(n: u128, d: u128) -> Self { - Self(n, d) - } - - /// Return the numerator. - pub fn n(&self) -> u128 { - self.0 - } - - /// Return the denominator. - pub fn d(&self) -> u128 { - self.1 - } - - /// Convert `self` to a similar rational number where denominator is the given `den`. - // - /// This only returns if the result is accurate. `Err` is returned if the result cannot be - /// accurately calculated. - pub fn to_den(self, den: u128) -> Result { - if den == self.1 { - Ok(self) - } else { - helpers_128bit::multiply_by_rational(self.0, den, self.1).map(|n| Self(n, den)) - } - } - - /// Get the least common divisor of `self` and `other`. - /// - /// This only returns if the result is accurate. `Err` is returned if the result cannot be - /// accurately calculated. - pub fn lcm(&self, other: &Self) -> Result { - // this should be tested better: two large numbers that are almost the same. - if self.1 == other.1 { return Ok(self.1) } - let g = helpers_128bit::gcd(self.1, other.1); - helpers_128bit::multiply_by_rational(self.1 , other.1, g) - } - - /// A saturating add that assumes `self` and `other` have the same denominator. - pub fn lazy_saturating_add(self, other: Self) -> Self { - if other.is_zero() { - self - } else { - Self(self.0.saturating_add(other.0) ,self.1) - } - } - - /// A saturating subtraction that assumes `self` and `other` have the same denominator. - pub fn lazy_saturating_sub(self, other: Self) -> Self { - if other.is_zero() { - self - } else { - Self(self.0.saturating_sub(other.0) ,self.1) - } - } - - /// Addition. Simply tries to unify the denominators and add the numerators. - /// - /// Overflow might happen during any of the steps. Error is returned in such cases. - pub fn checked_add(self, other: Self) -> Result { - let lcm = self.lcm(&other).map_err(|_| "failed to scale to denominator")?; - let self_scaled = self.to_den(lcm).map_err(|_| "failed to scale to denominator")?; - let other_scaled = other.to_den(lcm).map_err(|_| "failed to scale to denominator")?; - let n = self_scaled.0.checked_add(other_scaled.0) - .ok_or("overflow while adding numerators")?; - Ok(Self(n, self_scaled.1)) - } - - /// Subtraction. Simply tries to unify the denominators and subtract the numerators. - /// - /// Overflow might happen during any of the steps. None is returned in such cases. - pub fn checked_sub(self, other: Self) -> Result { - let lcm = self.lcm(&other).map_err(|_| "failed to scale to denominator")?; - let self_scaled = self.to_den(lcm).map_err(|_| "failed to scale to denominator")?; - let other_scaled = other.to_den(lcm).map_err(|_| "failed to scale to denominator")?; - - let n = self_scaled.0.checked_sub(other_scaled.0) - .ok_or("overflow while subtracting numerators")?; - Ok(Self(n, self_scaled.1)) - } + /// Nothing. + pub fn zero() -> Self { + Self(0, 1) + } + + /// If it is zero or not + pub fn is_zero(&self) -> bool { + self.0.is_zero() + } + + /// Build from a raw `n/d`. + pub fn from(n: u128, d: u128) -> Self { + Self(n, d.max(1)) + } + + /// Build from a raw `n/d`. This could lead to / 0 if not properly handled. + pub fn from_unchecked(n: u128, d: u128) -> Self { + Self(n, d) + } + + /// Return the numerator. + pub fn n(&self) -> u128 { + self.0 + } + + /// Return the denominator. + pub fn d(&self) -> u128 { + self.1 + } + + /// Convert `self` to a similar rational number where denominator is the given `den`. + // + /// This only returns if the result is accurate. `Err` is returned if the result cannot be + /// accurately calculated. + pub fn to_den(self, den: u128) -> Result { + if den == self.1 { + Ok(self) + } else { + helpers_128bit::multiply_by_rational(self.0, den, self.1).map(|n| Self(n, den)) + } + } + + /// Get the least common divisor of `self` and `other`. + /// + /// This only returns if the result is accurate. `Err` is returned if the result cannot be + /// accurately calculated. + pub fn lcm(&self, other: &Self) -> Result { + // this should be tested better: two large numbers that are almost the same. + if self.1 == other.1 { + return Ok(self.1); + } + let g = helpers_128bit::gcd(self.1, other.1); + helpers_128bit::multiply_by_rational(self.1, other.1, g) + } + + /// A saturating add that assumes `self` and `other` have the same denominator. + pub fn lazy_saturating_add(self, other: Self) -> Self { + if other.is_zero() { + self + } else { + Self(self.0.saturating_add(other.0), self.1) + } + } + + /// A saturating subtraction that assumes `self` and `other` have the same denominator. + pub fn lazy_saturating_sub(self, other: Self) -> Self { + if other.is_zero() { + self + } else { + Self(self.0.saturating_sub(other.0), self.1) + } + } + + /// Addition. Simply tries to unify the denominators and add the numerators. + /// + /// Overflow might happen during any of the steps. Error is returned in such cases. + pub fn checked_add(self, other: Self) -> Result { + let lcm = self + .lcm(&other) + .map_err(|_| "failed to scale to denominator")?; + let self_scaled = self + .to_den(lcm) + .map_err(|_| "failed to scale to denominator")?; + let other_scaled = other + .to_den(lcm) + .map_err(|_| "failed to scale to denominator")?; + let n = self_scaled + .0 + .checked_add(other_scaled.0) + .ok_or("overflow while adding numerators")?; + Ok(Self(n, self_scaled.1)) + } + + /// Subtraction. Simply tries to unify the denominators and subtract the numerators. + /// + /// Overflow might happen during any of the steps. None is returned in such cases. + pub fn checked_sub(self, other: Self) -> Result { + let lcm = self + .lcm(&other) + .map_err(|_| "failed to scale to denominator")?; + let self_scaled = self + .to_den(lcm) + .map_err(|_| "failed to scale to denominator")?; + let other_scaled = other + .to_den(lcm) + .map_err(|_| "failed to scale to denominator")?; + + let n = self_scaled + .0 + .checked_sub(other_scaled.0) + .ok_or("overflow while subtracting numerators")?; + Ok(Self(n, self_scaled.1)) + } } impl PartialOrd for Rational128 { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl Ord for Rational128 { - fn cmp(&self, other: &Self) -> Ordering { - // handle some edge cases. - if self.1 == other.1 { - self.0.cmp(&other.0) - } else if self.1.is_zero() { - Ordering::Greater - } else if other.1.is_zero() { - Ordering::Less - } else { - // Don't even compute gcd. - let self_n = helpers_128bit::to_big_uint(self.0) * helpers_128bit::to_big_uint(other.1); - let other_n = helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); - self_n.cmp(&other_n) - } - } + fn cmp(&self, other: &Self) -> Ordering { + // handle some edge cases. + if self.1 == other.1 { + self.0.cmp(&other.0) + } else if self.1.is_zero() { + Ordering::Greater + } else if other.1.is_zero() { + Ordering::Less + } else { + // Don't even compute gcd. + let self_n = helpers_128bit::to_big_uint(self.0) * helpers_128bit::to_big_uint(other.1); + let other_n = + helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); + self_n.cmp(&other_n) + } + } } impl PartialEq for Rational128 { - fn eq(&self, other: &Self) -> bool { - // handle some edge cases. - if self.1 == other.1 { - self.0.eq(&other.0) - } else { - let self_n = helpers_128bit::to_big_uint(self.0) * helpers_128bit::to_big_uint(other.1); - let other_n = helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); - self_n.eq(&other_n) - } - } + fn eq(&self, other: &Self) -> bool { + // handle some edge cases. + if self.1 == other.1 { + self.0.eq(&other.0) + } else { + let self_n = helpers_128bit::to_big_uint(self.0) * helpers_128bit::to_big_uint(other.1); + let other_n = + helpers_128bit::to_big_uint(other.0) * helpers_128bit::to_big_uint(self.1); + self_n.eq(&other_n) + } + } } #[cfg(test)] mod tests { - use super::*; - use super::helpers_128bit::*; - - const MAX128: u128 = u128::max_value(); - const MAX64: u128 = u64::max_value() as u128; - const MAX64_2: u128 = 2 * u64::max_value() as u128; - - fn r(p: u128, q: u128) -> Rational128 { - Rational128(p, q) - } - - fn mul_div(a: u128, b: u128, c: u128) -> u128 { - use primitive_types::U256; - if a.is_zero() { return Zero::zero(); } - let c = c.max(1); - - // e for extended - let ae: U256 = a.into(); - let be: U256 = b.into(); - let ce: U256 = c.into(); - - let r = ae * be / ce; - if r > u128::max_value().into() { - a - } else { - r.as_u128() - } - } - - #[test] - fn truth_value_function_works() { - assert_eq!( - mul_div(2u128.pow(100), 8, 4), - 2u128.pow(101) - ); - assert_eq!( - mul_div(2u128.pow(100), 4, 8), - 2u128.pow(99) - ); - - // and it returns a if result cannot fit - assert_eq!(mul_div(MAX128 - 10, 2, 1), MAX128 - 10); - } - - #[test] - fn to_denom_works() { - // simple up and down - assert_eq!(r(1, 5).to_den(10), Ok(r(2, 10))); - assert_eq!(r(4, 10).to_den(5), Ok(r(2, 5))); - - // up and down with large numbers - assert_eq!(r(MAX128 - 10, MAX128).to_den(10), Ok(r(10, 10))); - assert_eq!(r(MAX128 / 2, MAX128).to_den(10), Ok(r(5, 10))); - - // large to perbill. This is very well needed for phragmen. - assert_eq!( - r(MAX128 / 2, MAX128).to_den(1000_000_000), - Ok(r(500_000_000, 1000_000_000)) - ); - - // large to large - assert_eq!(r(MAX128 / 2, MAX128).to_den(MAX128/2), Ok(r(MAX128/4, MAX128/2))); - } - - #[test] - fn gdc_works() { - assert_eq!(gcd(10, 5), 5); - assert_eq!(gcd(7, 22), 1); - } - - #[test] - fn lcm_works() { - // simple stuff - assert_eq!(r(3, 10).lcm(&r(4, 15)).unwrap(), 30); - assert_eq!(r(5, 30).lcm(&r(1, 7)).unwrap(), 210); - assert_eq!(r(5, 30).lcm(&r(1, 10)).unwrap(), 30); - - // large numbers - assert_eq!( - r(1_000_000_000, MAX128).lcm(&r(7_000_000_000, MAX128-1)), - Err("result cannot fit in u128"), - ); - assert_eq!( - r(1_000_000_000, MAX64).lcm(&r(7_000_000_000, MAX64-1)), - Ok(340282366920938463408034375210639556610), - ); - assert!(340282366920938463408034375210639556610 < MAX128); - assert!(340282366920938463408034375210639556610 == MAX64 * (MAX64 - 1)); - } - - #[test] - fn add_works() { - // works - assert_eq!(r(3, 10).checked_add(r(1, 10)).unwrap(), r(2, 5)); - assert_eq!(r(3, 10).checked_add(r(3, 7)).unwrap(), r(51, 70)); - - // errors - assert_eq!( - r(1, MAX128).checked_add(r(1, MAX128-1)), - Err("failed to scale to denominator"), - ); - assert_eq!( - r(7, MAX128).checked_add(r(MAX128, MAX128)), - Err("overflow while adding numerators"), - ); - assert_eq!( - r(MAX128, MAX128).checked_add(r(MAX128, MAX128)), - Err("overflow while adding numerators"), - ); - } - - #[test] - fn sub_works() { - // works - assert_eq!(r(3, 10).checked_sub(r(1, 10)).unwrap(), r(1, 5)); - assert_eq!(r(6, 10).checked_sub(r(3, 7)).unwrap(), r(12, 70)); - - // errors - assert_eq!( - r(2, MAX128).checked_sub(r(1, MAX128-1)), - Err("failed to scale to denominator"), - ); - assert_eq!( - r(7, MAX128).checked_sub(r(MAX128, MAX128)), - Err("overflow while subtracting numerators"), - ); - assert_eq!( - r(1, 10).checked_sub(r(2,10)), - Err("overflow while subtracting numerators"), - ); - } - - #[test] - fn ordering_and_eq_works() { - assert!(r(1, 2) > r(1, 3)); - assert!(r(1, 2) > r(2, 6)); - - assert!(r(1, 2) < r(6, 6)); - assert!(r(2, 1) > r(2, 6)); - - assert!(r(5, 10) == r(1, 2)); - assert!(r(1, 2) == r(1, 2)); - - assert!(r(1, 1490000000000200000) > r(1, 1490000000000200001)); - } - - #[test] - fn multiply_by_rational_works() { - assert_eq!(multiply_by_rational(7, 2, 3).unwrap(), 7 * 2 / 3); - assert_eq!(multiply_by_rational(7, 20, 30).unwrap(), 7 * 2 / 3); - assert_eq!(multiply_by_rational(20, 7, 30).unwrap(), 7 * 2 / 3); - - assert_eq!( - // MAX128 % 3 == 0 - multiply_by_rational(MAX128, 2, 3).unwrap(), - MAX128 / 3 * 2, - ); - assert_eq!( - // MAX128 % 7 == 3 - multiply_by_rational(MAX128, 5, 7).unwrap(), - (MAX128 / 7 * 5) + (3 * 5 / 7), - ); - assert_eq!( - // MAX128 % 7 == 3 - multiply_by_rational(MAX128, 11 , 13).unwrap(), - (MAX128 / 13 * 11) + (8 * 11 / 13), - ); - assert_eq!( - // MAX128 % 1000 == 455 - multiply_by_rational(MAX128, 555, 1000).unwrap(), - (MAX128 / 1000 * 555) + (455 * 555 / 1000), - ); - - assert_eq!( - multiply_by_rational(2 * MAX64 - 1, MAX64, MAX64).unwrap(), - 2 * MAX64 - 1, - ); - assert_eq!( - multiply_by_rational(2 * MAX64 - 1, MAX64 - 1, MAX64).unwrap(), - 2 * MAX64 - 3, - ); - - assert_eq!( - multiply_by_rational(MAX64 + 100, MAX64_2, MAX64_2 / 2).unwrap(), - (MAX64 + 100) * 2, - ); - assert_eq!( - multiply_by_rational(MAX64 + 100, MAX64_2 / 100, MAX64_2 / 200).unwrap(), - (MAX64 + 100) * 2, - ); - - assert_eq!( - multiply_by_rational(2u128.pow(66) - 1, 2u128.pow(65) - 1, 2u128.pow(65)).unwrap(), - 73786976294838206461, - ); - assert_eq!( - multiply_by_rational(1_000_000_000, MAX128 / 8, MAX128 / 2).unwrap(), - 250000000, - ); - } - - #[test] - fn multiply_by_rational_a_b_are_interchangeable() { - assert_eq!( - multiply_by_rational(10, MAX128, MAX128 / 2), - Ok(20), - ); - assert_eq!( - multiply_by_rational(MAX128, 10, MAX128 / 2), - Ok(20), - ); - } - - #[test] - #[ignore] - fn multiply_by_rational_fuzzed_equation() { - assert_eq!( - multiply_by_rational(154742576605164960401588224, 9223376310179529214, 549756068598), - Ok(2596149632101417846585204209223679) - ); - } + use super::helpers_128bit::*; + use super::*; + + const MAX128: u128 = u128::max_value(); + const MAX64: u128 = u64::max_value() as u128; + const MAX64_2: u128 = 2 * u64::max_value() as u128; + + fn r(p: u128, q: u128) -> Rational128 { + Rational128(p, q) + } + + fn mul_div(a: u128, b: u128, c: u128) -> u128 { + use primitive_types::U256; + if a.is_zero() { + return Zero::zero(); + } + let c = c.max(1); + + // e for extended + let ae: U256 = a.into(); + let be: U256 = b.into(); + let ce: U256 = c.into(); + + let r = ae * be / ce; + if r > u128::max_value().into() { + a + } else { + r.as_u128() + } + } + + #[test] + fn truth_value_function_works() { + assert_eq!(mul_div(2u128.pow(100), 8, 4), 2u128.pow(101)); + assert_eq!(mul_div(2u128.pow(100), 4, 8), 2u128.pow(99)); + + // and it returns a if result cannot fit + assert_eq!(mul_div(MAX128 - 10, 2, 1), MAX128 - 10); + } + + #[test] + fn to_denom_works() { + // simple up and down + assert_eq!(r(1, 5).to_den(10), Ok(r(2, 10))); + assert_eq!(r(4, 10).to_den(5), Ok(r(2, 5))); + + // up and down with large numbers + assert_eq!(r(MAX128 - 10, MAX128).to_den(10), Ok(r(10, 10))); + assert_eq!(r(MAX128 / 2, MAX128).to_den(10), Ok(r(5, 10))); + + // large to perbill. This is very well needed for phragmen. + assert_eq!( + r(MAX128 / 2, MAX128).to_den(1000_000_000), + Ok(r(500_000_000, 1000_000_000)) + ); + + // large to large + assert_eq!( + r(MAX128 / 2, MAX128).to_den(MAX128 / 2), + Ok(r(MAX128 / 4, MAX128 / 2)) + ); + } + + #[test] + fn gdc_works() { + assert_eq!(gcd(10, 5), 5); + assert_eq!(gcd(7, 22), 1); + } + + #[test] + fn lcm_works() { + // simple stuff + assert_eq!(r(3, 10).lcm(&r(4, 15)).unwrap(), 30); + assert_eq!(r(5, 30).lcm(&r(1, 7)).unwrap(), 210); + assert_eq!(r(5, 30).lcm(&r(1, 10)).unwrap(), 30); + + // large numbers + assert_eq!( + r(1_000_000_000, MAX128).lcm(&r(7_000_000_000, MAX128 - 1)), + Err("result cannot fit in u128"), + ); + assert_eq!( + r(1_000_000_000, MAX64).lcm(&r(7_000_000_000, MAX64 - 1)), + Ok(340282366920938463408034375210639556610), + ); + assert!(340282366920938463408034375210639556610 < MAX128); + assert!(340282366920938463408034375210639556610 == MAX64 * (MAX64 - 1)); + } + + #[test] + fn add_works() { + // works + assert_eq!(r(3, 10).checked_add(r(1, 10)).unwrap(), r(2, 5)); + assert_eq!(r(3, 10).checked_add(r(3, 7)).unwrap(), r(51, 70)); + + // errors + assert_eq!( + r(1, MAX128).checked_add(r(1, MAX128 - 1)), + Err("failed to scale to denominator"), + ); + assert_eq!( + r(7, MAX128).checked_add(r(MAX128, MAX128)), + Err("overflow while adding numerators"), + ); + assert_eq!( + r(MAX128, MAX128).checked_add(r(MAX128, MAX128)), + Err("overflow while adding numerators"), + ); + } + + #[test] + fn sub_works() { + // works + assert_eq!(r(3, 10).checked_sub(r(1, 10)).unwrap(), r(1, 5)); + assert_eq!(r(6, 10).checked_sub(r(3, 7)).unwrap(), r(12, 70)); + + // errors + assert_eq!( + r(2, MAX128).checked_sub(r(1, MAX128 - 1)), + Err("failed to scale to denominator"), + ); + assert_eq!( + r(7, MAX128).checked_sub(r(MAX128, MAX128)), + Err("overflow while subtracting numerators"), + ); + assert_eq!( + r(1, 10).checked_sub(r(2, 10)), + Err("overflow while subtracting numerators"), + ); + } + + #[test] + fn ordering_and_eq_works() { + assert!(r(1, 2) > r(1, 3)); + assert!(r(1, 2) > r(2, 6)); + + assert!(r(1, 2) < r(6, 6)); + assert!(r(2, 1) > r(2, 6)); + + assert!(r(5, 10) == r(1, 2)); + assert!(r(1, 2) == r(1, 2)); + + assert!(r(1, 1490000000000200000) > r(1, 1490000000000200001)); + } + + #[test] + fn multiply_by_rational_works() { + assert_eq!(multiply_by_rational(7, 2, 3).unwrap(), 7 * 2 / 3); + assert_eq!(multiply_by_rational(7, 20, 30).unwrap(), 7 * 2 / 3); + assert_eq!(multiply_by_rational(20, 7, 30).unwrap(), 7 * 2 / 3); + + assert_eq!( + // MAX128 % 3 == 0 + multiply_by_rational(MAX128, 2, 3).unwrap(), + MAX128 / 3 * 2, + ); + assert_eq!( + // MAX128 % 7 == 3 + multiply_by_rational(MAX128, 5, 7).unwrap(), + (MAX128 / 7 * 5) + (3 * 5 / 7), + ); + assert_eq!( + // MAX128 % 7 == 3 + multiply_by_rational(MAX128, 11, 13).unwrap(), + (MAX128 / 13 * 11) + (8 * 11 / 13), + ); + assert_eq!( + // MAX128 % 1000 == 455 + multiply_by_rational(MAX128, 555, 1000).unwrap(), + (MAX128 / 1000 * 555) + (455 * 555 / 1000), + ); + + assert_eq!( + multiply_by_rational(2 * MAX64 - 1, MAX64, MAX64).unwrap(), + 2 * MAX64 - 1, + ); + assert_eq!( + multiply_by_rational(2 * MAX64 - 1, MAX64 - 1, MAX64).unwrap(), + 2 * MAX64 - 3, + ); + + assert_eq!( + multiply_by_rational(MAX64 + 100, MAX64_2, MAX64_2 / 2).unwrap(), + (MAX64 + 100) * 2, + ); + assert_eq!( + multiply_by_rational(MAX64 + 100, MAX64_2 / 100, MAX64_2 / 200).unwrap(), + (MAX64 + 100) * 2, + ); + + assert_eq!( + multiply_by_rational(2u128.pow(66) - 1, 2u128.pow(65) - 1, 2u128.pow(65)).unwrap(), + 73786976294838206461, + ); + assert_eq!( + multiply_by_rational(1_000_000_000, MAX128 / 8, MAX128 / 2).unwrap(), + 250000000, + ); + } + + #[test] + fn multiply_by_rational_a_b_are_interchangeable() { + assert_eq!(multiply_by_rational(10, MAX128, MAX128 / 2), Ok(20),); + assert_eq!(multiply_by_rational(MAX128, 10, MAX128 / 2), Ok(20),); + } + + #[test] + #[ignore] + fn multiply_by_rational_fuzzed_equation() { + assert_eq!( + multiply_by_rational( + 154742576605164960401588224, + 9223376310179529214, + 549756068598 + ), + Ok(2596149632101417846585204209223679) + ); + } } diff --git a/primitives/arithmetic/src/traits.rs b/primitives/arithmetic/src/traits.rs index 6b5e324464..809c5d8814 100644 --- a/primitives/arithmetic/src/traits.rs +++ b/primitives/arithmetic/src/traits.rs @@ -16,58 +16,129 @@ //! Primitive traits for the runtime arithmetic. -use sp_std::{self, convert::{TryFrom, TryInto}}; use codec::HasCompact; pub use integer_sqrt::IntegerSquareRoot; pub use num_traits::{ - Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, - CheckedShl, CheckedShr, checked_pow + checked_pow, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedShl, CheckedShr, CheckedSub, + One, Zero, }; use sp_std::ops::{ - Add, Sub, Mul, Div, Rem, AddAssign, SubAssign, MulAssign, DivAssign, - RemAssign, Shl, Shr + Add, AddAssign, Div, DivAssign, Mul, MulAssign, Rem, RemAssign, Shl, Shr, Sub, SubAssign, +}; +use sp_std::{ + self, + convert::{TryFrom, TryInto}, }; /// A meta trait for arithmetic type operations, regardless of any limitation on size. pub trait BaseArithmetic: - From + - Zero + One + IntegerSquareRoot + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - Shl + Shr + - CheckedShl + CheckedShr + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Saturating + - PartialOrd + Ord + Bounded + HasCompact + Sized + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto -{} - -impl + - Zero + One + IntegerSquareRoot + - Add + AddAssign + - Sub + SubAssign + - Mul + MulAssign + - Div + DivAssign + - Rem + RemAssign + - Shl + Shr + - CheckedShl + CheckedShr + CheckedAdd + CheckedSub + CheckedMul + CheckedDiv + Saturating + - PartialOrd + Ord + Bounded + HasCompact + Sized + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - TryFrom + TryInto + TryFrom + TryInto + TryFrom + TryInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto + - UniqueSaturatedFrom + UniqueSaturatedInto -> BaseArithmetic for T {} + From + + Zero + + One + + IntegerSquareRoot + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + Shl + + Shr + + CheckedShl + + CheckedShr + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Saturating + + PartialOrd + + Ord + + Bounded + + HasCompact + + Sized + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto +{ +} + +impl< + T: From + + Zero + + One + + IntegerSquareRoot + + Add + + AddAssign + + Sub + + SubAssign + + Mul + + MulAssign + + Div + + DivAssign + + Rem + + RemAssign + + Shl + + Shr + + CheckedShl + + CheckedShr + + CheckedAdd + + CheckedSub + + CheckedMul + + CheckedDiv + + Saturating + + PartialOrd + + Ord + + Bounded + + HasCompact + + Sized + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + TryFrom + + TryInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto + + UniqueSaturatedFrom + + UniqueSaturatedInto, + > BaseArithmetic for T +{ +} /// A meta trait for arithmetic. /// @@ -81,86 +152,92 @@ impl + From> AtLeast32Bit for T {} /// Just like `From` except that if the source value is too big to fit into the destination type /// then it'll saturate the destination. pub trait UniqueSaturatedFrom: Sized { - /// Convert from a value of `T` into an equivalent instance of `Self`. - fn unique_saturated_from(t: T) -> Self; + /// Convert from a value of `T` into an equivalent instance of `Self`. + fn unique_saturated_from(t: T) -> Self; } /// Just like `Into` except that if the source value is too big to fit into the destination type /// then it'll saturate the destination. pub trait UniqueSaturatedInto: Sized { - /// Consume self to return an equivalent value of `T`. - fn unique_saturated_into(self) -> T; + /// Consume self to return an equivalent value of `T`. + fn unique_saturated_into(self) -> T; } impl + Bounded + Sized> UniqueSaturatedFrom for S { - fn unique_saturated_from(t: T) -> Self { - S::try_from(t).unwrap_or_else(|_| Bounded::max_value()) - } + fn unique_saturated_from(t: T) -> Self { + S::try_from(t).unwrap_or_else(|_| Bounded::max_value()) + } } impl + Sized> UniqueSaturatedInto for S { - fn unique_saturated_into(self) -> T { - self.try_into().unwrap_or_else(|_| Bounded::max_value()) - } + fn unique_saturated_into(self) -> T { + self.try_into().unwrap_or_else(|_| Bounded::max_value()) + } } /// Saturating arithmetic operations, returning maximum or minimum values instead of overflowing. pub trait Saturating { - /// Saturating addition. Compute `self + rhs`, saturating at the numeric bounds instead of - /// overflowing. - fn saturating_add(self, rhs: Self) -> Self; + /// Saturating addition. Compute `self + rhs`, saturating at the numeric bounds instead of + /// overflowing. + fn saturating_add(self, rhs: Self) -> Self; - /// Saturating subtraction. Compute `self - rhs`, saturating at the numeric bounds instead of - /// overflowing. - fn saturating_sub(self, rhs: Self) -> Self; + /// Saturating subtraction. Compute `self - rhs`, saturating at the numeric bounds instead of + /// overflowing. + fn saturating_sub(self, rhs: Self) -> Self; - /// Saturating multiply. Compute `self * rhs`, saturating at the numeric bounds instead of - /// overflowing. - fn saturating_mul(self, rhs: Self) -> Self; + /// Saturating multiply. Compute `self * rhs`, saturating at the numeric bounds instead of + /// overflowing. + fn saturating_mul(self, rhs: Self) -> Self; - /// Saturating exponentiation. Compute `self.pow(exp)`, saturating at the numeric bounds - /// instead of overflowing. - fn saturating_pow(self, exp: usize) -> Self; + /// Saturating exponentiation. Compute `self.pow(exp)`, saturating at the numeric bounds + /// instead of overflowing. + fn saturating_pow(self, exp: usize) -> Self; } impl Saturating for T { - fn saturating_add(self, o: Self) -> Self { - ::saturating_add(self, o) - } + fn saturating_add(self, o: Self) -> Self { + ::saturating_add(self, o) + } - fn saturating_sub(self, o: Self) -> Self { - ::saturating_sub(self, o) - } + fn saturating_sub(self, o: Self) -> Self { + ::saturating_sub(self, o) + } - fn saturating_mul(self, o: Self) -> Self { - self.checked_mul(&o).unwrap_or_else(Bounded::max_value) - } + fn saturating_mul(self, o: Self) -> Self { + self.checked_mul(&o).unwrap_or_else(Bounded::max_value) + } - fn saturating_pow(self, exp: usize) -> Self { - checked_pow(self, exp).unwrap_or_else(Bounded::max_value) - } + fn saturating_pow(self, exp: usize) -> Self { + checked_pow(self, exp).unwrap_or_else(Bounded::max_value) + } } /// Convenience type to work around the highly unergonomic syntax needed /// to invoke the functions of overloaded generic traits, in this case /// `SaturatedFrom` and `SaturatedInto`. pub trait SaturatedConversion { - /// Convert from a value of `T` into an equivalent instance of `Self`. - /// - /// This just uses `UniqueSaturatedFrom` internally but with this - /// variant you can provide the destination type using turbofish syntax - /// in case Rust happens not to assume the correct type. - fn saturated_from(t: T) -> Self where Self: UniqueSaturatedFrom { - >::unique_saturated_from(t) - } - - /// Consume self to return an equivalent value of `T`. - /// - /// This just uses `UniqueSaturatedInto` internally but with this - /// variant you can provide the destination type using turbofish syntax - /// in case Rust happens not to assume the correct type. - fn saturated_into(self) -> T where Self: UniqueSaturatedInto { - >::unique_saturated_into(self) - } + /// Convert from a value of `T` into an equivalent instance of `Self`. + /// + /// This just uses `UniqueSaturatedFrom` internally but with this + /// variant you can provide the destination type using turbofish syntax + /// in case Rust happens not to assume the correct type. + fn saturated_from(t: T) -> Self + where + Self: UniqueSaturatedFrom, + { + >::unique_saturated_from(t) + } + + /// Consume self to return an equivalent value of `T`. + /// + /// This just uses `UniqueSaturatedInto` internally but with this + /// variant you can provide the destination type using turbofish syntax + /// in case Rust happens not to assume the correct type. + fn saturated_into(self) -> T + where + Self: UniqueSaturatedInto, + { + >::unique_saturated_into(self) + } } impl SaturatedConversion for T {} diff --git a/primitives/authority-discovery/src/lib.rs b/primitives/authority-discovery/src/lib.rs index 68680ad759..f34ed9b554 100644 --- a/primitives/authority-discovery/src/lib.rs +++ b/primitives/authority-discovery/src/lib.rs @@ -21,30 +21,27 @@ use sp_std::vec::Vec; mod app { - use sp_application_crypto::{ - CryptoTypePublicPair, - key_types::AUTHORITY_DISCOVERY, - Public as _, - app_crypto, - sr25519}; - app_crypto!(sr25519, AUTHORITY_DISCOVERY); - - impl From for CryptoTypePublicPair { - fn from(key: Public) -> Self { - (&key).into() - } - } - - impl From<&Public> for CryptoTypePublicPair { - fn from(key: &Public) -> Self { - CryptoTypePublicPair(sr25519::CRYPTO_ID, key.to_raw_vec()) - } - } + use sp_application_crypto::{ + app_crypto, key_types::AUTHORITY_DISCOVERY, sr25519, CryptoTypePublicPair, Public as _, + }; + app_crypto!(sr25519, AUTHORITY_DISCOVERY); + + impl From for CryptoTypePublicPair { + fn from(key: Public) -> Self { + (&key).into() + } + } + + impl From<&Public> for CryptoTypePublicPair { + fn from(key: &Public) -> Self { + CryptoTypePublicPair(sr25519::CRYPTO_ID, key.to_raw_vec()) + } + } } sp_application_crypto::with_pair! { - /// An authority discovery authority keypair. - pub type AuthorityPair = app::Pair; + /// An authority discovery authority keypair. + pub type AuthorityPair = app::Pair; } /// An authority discovery authority identifier. @@ -54,12 +51,12 @@ pub type AuthorityId = app::Public; pub type AuthoritySignature = app::Signature; sp_api::decl_runtime_apis! { - /// The authority discovery api. - /// - /// This api is used by the `client/authority-discovery` module to retrieve identifiers - /// of the current authority set. - pub trait AuthorityDiscoveryApi { - /// Retrieve authority identifiers of the current authority set. - fn authorities() -> Vec; - } + /// The authority discovery api. + /// + /// This api is used by the `client/authority-discovery` module to retrieve identifiers + /// of the current authority set. + pub trait AuthorityDiscoveryApi { + /// Retrieve authority identifiers of the current authority set. + fn authorities() -> Vec; + } } diff --git a/primitives/authorship/src/lib.rs b/primitives/authorship/src/lib.rs index 53dac56dc4..6208954aee 100644 --- a/primitives/authorship/src/lib.rs +++ b/primitives/authorship/src/lib.rs @@ -18,10 +18,10 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{result::Result, prelude::*}; +use sp_std::{prelude::*, result::Result}; -use codec::{Encode, Decode}; -use sp_inherents::{Error, InherentIdentifier, InherentData, IsFatalError}; +use codec::{Decode, Encode}; +use sp_inherents::{Error, InherentData, InherentIdentifier, IsFatalError}; use sp_runtime::RuntimeString; /// The identifier for the `uncles` inherent. @@ -31,61 +31,66 @@ pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"uncles00"; #[derive(Encode, sp_runtime::RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode))] pub enum InherentError { - Uncles(RuntimeString), + Uncles(RuntimeString), } impl IsFatalError for InherentError { - fn is_fatal_error(&self) -> bool { - match self { - InherentError::Uncles(_) => true, - } - } + fn is_fatal_error(&self) -> bool { + match self { + InherentError::Uncles(_) => true, + } + } } /// Auxiliary trait to extract uncles inherent data. pub trait UnclesInherentData { - /// Get uncles. - fn uncles(&self) -> Result, Error>; + /// Get uncles. + fn uncles(&self) -> Result, Error>; } impl UnclesInherentData for InherentData { - fn uncles(&self) -> Result, Error> { - Ok(self.get_data(&INHERENT_IDENTIFIER)?.unwrap_or_default()) - } + fn uncles(&self) -> Result, Error> { + Ok(self.get_data(&INHERENT_IDENTIFIER)?.unwrap_or_default()) + } } /// Provider for inherent data. #[cfg(feature = "std")] pub struct InherentDataProvider { - inner: F, - _marker: std::marker::PhantomData, + inner: F, + _marker: std::marker::PhantomData, } #[cfg(feature = "std")] impl InherentDataProvider { - pub fn new(uncles_oracle: F) -> Self { - InherentDataProvider { inner: uncles_oracle, _marker: Default::default() } - } + pub fn new(uncles_oracle: F) -> Self { + InherentDataProvider { + inner: uncles_oracle, + _marker: Default::default(), + } + } } #[cfg(feature = "std")] -impl sp_inherents::ProvideInherentData for InherentDataProvider -where F: Fn() -> Vec +impl sp_inherents::ProvideInherentData + for InherentDataProvider +where + F: Fn() -> Vec, { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER - } - - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { - let uncles = (self.inner)(); - if !uncles.is_empty() { - inherent_data.put_data(INHERENT_IDENTIFIER, &uncles) - } else { - Ok(()) - } - } - - fn error_to_string(&self, _error: &[u8]) -> Option { - Some(format!("no further information")) - } + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + let uncles = (self.inner)(); + if !uncles.is_empty() { + inherent_data.put_data(INHERENT_IDENTIFIER, &uncles) + } else { + Ok(()) + } + } + + fn error_to_string(&self, _error: &[u8]) -> Option { + Some(format!("no further information")) + } } diff --git a/primitives/block-builder/src/lib.rs b/primitives/block-builder/src/lib.rs index 732c937c1a..a717b55e63 100644 --- a/primitives/block-builder/src/lib.rs +++ b/primitives/block-builder/src/lib.rs @@ -20,27 +20,27 @@ use sp_runtime::{traits::Block as BlockT, ApplyExtrinsicResult}; -use sp_inherents::{InherentData, CheckInherentsResult}; +use sp_inherents::{CheckInherentsResult, InherentData}; sp_api::decl_runtime_apis! { - /// The `BlockBuilder` api trait that provides the required functionality for building a block. - #[api_version(4)] - pub trait BlockBuilder { - /// Apply the given extrinsic. - /// - /// Returns an inclusion outcome which specifies if this extrinsic is included in - /// this block or not. - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult; - /// Finish the current block. - #[renamed("finalise_block", 3)] - fn finalize_block() -> ::Header; - /// Generate inherent extrinsics. The inherent data will vary from chain to chain. - fn inherent_extrinsics( - inherent: InherentData, - ) -> sp_std::vec::Vec<::Extrinsic>; - /// Check that the inherents are valid. The inherent data will vary from chain to chain. - fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult; - /// Generate a random seed. - fn random_seed() -> ::Hash; - } + /// The `BlockBuilder` api trait that provides the required functionality for building a block. + #[api_version(4)] + pub trait BlockBuilder { + /// Apply the given extrinsic. + /// + /// Returns an inclusion outcome which specifies if this extrinsic is included in + /// this block or not. + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult; + /// Finish the current block. + #[renamed("finalise_block", 3)] + fn finalize_block() -> ::Header; + /// Generate inherent extrinsics. The inherent data will vary from chain to chain. + fn inherent_extrinsics( + inherent: InherentData, + ) -> sp_std::vec::Vec<::Extrinsic>; + /// Check that the inherents are valid. The inherent data will vary from chain to chain. + fn check_inherents(block: Block, data: InherentData) -> CheckInherentsResult; + /// Generate a random seed. + fn random_seed() -> ::Hash; + } } diff --git a/primitives/blockchain/src/backend.rs b/primitives/blockchain/src/backend.rs index 45d627a1c2..769593bdd3 100644 --- a/primitives/blockchain/src/backend.rs +++ b/primitives/blockchain/src/backend.rs @@ -18,11 +18,11 @@ use std::sync::Arc; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; -use sp_runtime::generic::BlockId; -use sp_runtime::Justification; use log::warn; use parking_lot::RwLock; +use sp_runtime::generic::BlockId; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; +use sp_runtime::Justification; use crate::header_metadata::HeaderMetadata; @@ -30,254 +30,271 @@ use crate::error::{Error, Result}; /// Blockchain database header backend. Does not perform any validation. pub trait HeaderBackend: Send + Sync { - /// Get block header. Returns `None` if block is not found. - fn header(&self, id: BlockId) -> Result>; - /// Get blockchain info. - fn info(&self) -> Info; - /// Get block status. - fn status(&self, id: BlockId) -> Result; - /// Get block number by hash. Returns `None` if the header is not in the chain. - fn number(&self, hash: Block::Hash) -> Result::Header as HeaderT>::Number>>; - /// Get block hash by number. Returns `None` if the header is not in the chain. - fn hash(&self, number: NumberFor) -> Result>; - - /// Convert an arbitrary block ID into a block hash. - fn block_hash_from_id(&self, id: &BlockId) -> Result> { - match *id { - BlockId::Hash(h) => Ok(Some(h)), - BlockId::Number(n) => self.hash(n), - } - } - - /// Convert an arbitrary block ID into a block hash. - fn block_number_from_id(&self, id: &BlockId) -> Result>> { - match *id { - BlockId::Hash(_) => Ok(self.header(*id)?.map(|h| h.number().clone())), - BlockId::Number(n) => Ok(Some(n)), - } - } - - /// Get block header. Returns `UnknownBlock` error if block is not found. - fn expect_header(&self, id: BlockId) -> Result { - self.header(id)?.ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", id))) - } - - /// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is not found. - fn expect_block_number_from_id(&self, id: &BlockId) -> Result> { - self.block_number_from_id(id) - .and_then(|n| n.ok_or_else(|| - Error::UnknownBlock(format!("Expect block number from id: {}", id)) - )) - } - - /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is not found. - fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { - self.block_hash_from_id(id) - .and_then(|n| n.ok_or_else(|| - Error::UnknownBlock(format!("Expect block hash from id: {}", id)) - )) - } + /// Get block header. Returns `None` if block is not found. + fn header(&self, id: BlockId) -> Result>; + /// Get blockchain info. + fn info(&self) -> Info; + /// Get block status. + fn status(&self, id: BlockId) -> Result; + /// Get block number by hash. Returns `None` if the header is not in the chain. + fn number( + &self, + hash: Block::Hash, + ) -> Result::Header as HeaderT>::Number>>; + /// Get block hash by number. Returns `None` if the header is not in the chain. + fn hash(&self, number: NumberFor) -> Result>; + + /// Convert an arbitrary block ID into a block hash. + fn block_hash_from_id(&self, id: &BlockId) -> Result> { + match *id { + BlockId::Hash(h) => Ok(Some(h)), + BlockId::Number(n) => self.hash(n), + } + } + + /// Convert an arbitrary block ID into a block hash. + fn block_number_from_id(&self, id: &BlockId) -> Result>> { + match *id { + BlockId::Hash(_) => Ok(self.header(*id)?.map(|h| h.number().clone())), + BlockId::Number(n) => Ok(Some(n)), + } + } + + /// Get block header. Returns `UnknownBlock` error if block is not found. + fn expect_header(&self, id: BlockId) -> Result { + self.header(id)? + .ok_or_else(|| Error::UnknownBlock(format!("Expect header: {}", id))) + } + + /// Convert an arbitrary block ID into a block number. Returns `UnknownBlock` error if block is not found. + fn expect_block_number_from_id(&self, id: &BlockId) -> Result> { + self.block_number_from_id(id).and_then(|n| { + n.ok_or_else(|| Error::UnknownBlock(format!("Expect block number from id: {}", id))) + }) + } + + /// Convert an arbitrary block ID into a block hash. Returns `UnknownBlock` error if block is not found. + fn expect_block_hash_from_id(&self, id: &BlockId) -> Result { + self.block_hash_from_id(id).and_then(|n| { + n.ok_or_else(|| Error::UnknownBlock(format!("Expect block hash from id: {}", id))) + }) + } } /// Blockchain database backend. Does not perform any validation. -pub trait Backend: HeaderBackend + HeaderMetadata { - /// Get block body. Returns `None` if block is not found. - fn body(&self, id: BlockId) -> Result::Extrinsic>>>; - /// Get block justification. Returns `None` if justification does not exist. - fn justification(&self, id: BlockId) -> Result>; - /// Get last finalized block hash. - fn last_finalized(&self) -> Result; - /// Returns data cache reference, if it is enabled on this backend. - fn cache(&self) -> Option>>; - - /// Returns hashes of all blocks that are leaves of the block tree. - /// in other words, that have no children, are chain heads. - /// Results must be ordered best (longest, highest) chain first. - fn leaves(&self) -> Result>; - - /// Return hashes of all blocks that are children of the block with `parent_hash`. - fn children(&self, parent_hash: Block::Hash) -> Result>; - - /// Get the most recent block hash of the best (longest) chains - /// that contain block with the given `target_hash`. - /// - /// The search space is always limited to blocks which are in the finalized - /// chain or descendents of it. - /// - /// If `maybe_max_block_number` is `Some(max_block_number)` - /// the search is limited to block `numbers <= max_block_number`. - /// in other words as if there were no blocks greater `max_block_number`. - /// Returns `Ok(None)` if `target_hash` is not found in search space. - /// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) - fn best_containing( - &self, - target_hash: Block::Hash, - maybe_max_number: Option>, - import_lock: &RwLock<()>, - ) -> Result> { - let target_header = { - match self.header(BlockId::Hash(target_hash))? { - Some(x) => x, - // target not in blockchain - None => { return Ok(None); }, - } - }; - - if let Some(max_number) = maybe_max_number { - // target outside search range - if target_header.number() > &max_number { - return Ok(None); - } - } - - let leaves = { - // ensure no blocks are imported during this code block. - // an import could trigger a reorg which could change the canonical chain. - // we depend on the canonical chain staying the same during this code block. - let _import_guard = import_lock.read(); - - let info = self.info(); - - // this can be `None` if the best chain is shorter than the target header. - let maybe_canon_hash = self.hash(*target_header.number())?; - - if maybe_canon_hash.as_ref() == Some(&target_hash) { - // if a `max_number` is given we try to fetch the block at the - // given depth, if it doesn't exist or `max_number` is not - // provided, we continue to search from all leaves below. - if let Some(max_number) = maybe_max_number { - if let Some(header) = self.hash(max_number)? { - return Ok(Some(header)); - } - } - } else if info.finalized_number >= *target_header.number() { - // header is on a dead fork. - return Ok(None); - } - - self.leaves()? - }; - - // for each chain. longest chain first. shortest last - for leaf_hash in leaves { - // start at the leaf - let mut current_hash = leaf_hash; - - // if search is not restricted then the leaf is the best - let mut best_hash = leaf_hash; - - // go backwards entering the search space - // waiting until we are <= max_number - if let Some(max_number) = maybe_max_number { - loop { - let current_header = self.header(BlockId::Hash(current_hash.clone()))? - .ok_or_else(|| Error::from(format!("failed to get header for hash {}", current_hash)))?; - - if current_header.number() <= &max_number { - best_hash = current_header.hash(); - break; - } - - current_hash = *current_header.parent_hash(); - } - } - - // go backwards through the chain (via parent links) - loop { - // until we find target - if current_hash == target_hash { - return Ok(Some(best_hash)); - } - - let current_header = self.header(BlockId::Hash(current_hash.clone()))? - .ok_or_else(|| Error::from(format!("failed to get header for hash {}", current_hash)))?; - - // stop search in this chain once we go below the target's block number - if current_header.number() < target_header.number() { - break; - } - - current_hash = *current_header.parent_hash(); - } - } - - // header may be on a dead fork -- the only leaves that are considered are - // those which can still be finalized. - // - // FIXME #1558 only issue this warning when not on a dead fork - warn!( - "Block {:?} exists in chain but not found when following all \ +pub trait Backend: + HeaderBackend + HeaderMetadata +{ + /// Get block body. Returns `None` if block is not found. + fn body(&self, id: BlockId) -> Result::Extrinsic>>>; + /// Get block justification. Returns `None` if justification does not exist. + fn justification(&self, id: BlockId) -> Result>; + /// Get last finalized block hash. + fn last_finalized(&self) -> Result; + /// Returns data cache reference, if it is enabled on this backend. + fn cache(&self) -> Option>>; + + /// Returns hashes of all blocks that are leaves of the block tree. + /// in other words, that have no children, are chain heads. + /// Results must be ordered best (longest, highest) chain first. + fn leaves(&self) -> Result>; + + /// Return hashes of all blocks that are children of the block with `parent_hash`. + fn children(&self, parent_hash: Block::Hash) -> Result>; + + /// Get the most recent block hash of the best (longest) chains + /// that contain block with the given `target_hash`. + /// + /// The search space is always limited to blocks which are in the finalized + /// chain or descendents of it. + /// + /// If `maybe_max_block_number` is `Some(max_block_number)` + /// the search is limited to block `numbers <= max_block_number`. + /// in other words as if there were no blocks greater `max_block_number`. + /// Returns `Ok(None)` if `target_hash` is not found in search space. + /// TODO: document time complexity of this, see [#1444](https://github.com/paritytech/substrate/issues/1444) + fn best_containing( + &self, + target_hash: Block::Hash, + maybe_max_number: Option>, + import_lock: &RwLock<()>, + ) -> Result> { + let target_header = { + match self.header(BlockId::Hash(target_hash))? { + Some(x) => x, + // target not in blockchain + None => { + return Ok(None); + } + } + }; + + if let Some(max_number) = maybe_max_number { + // target outside search range + if target_header.number() > &max_number { + return Ok(None); + } + } + + let leaves = { + // ensure no blocks are imported during this code block. + // an import could trigger a reorg which could change the canonical chain. + // we depend on the canonical chain staying the same during this code block. + let _import_guard = import_lock.read(); + + let info = self.info(); + + // this can be `None` if the best chain is shorter than the target header. + let maybe_canon_hash = self.hash(*target_header.number())?; + + if maybe_canon_hash.as_ref() == Some(&target_hash) { + // if a `max_number` is given we try to fetch the block at the + // given depth, if it doesn't exist or `max_number` is not + // provided, we continue to search from all leaves below. + if let Some(max_number) = maybe_max_number { + if let Some(header) = self.hash(max_number)? { + return Ok(Some(header)); + } + } + } else if info.finalized_number >= *target_header.number() { + // header is on a dead fork. + return Ok(None); + } + + self.leaves()? + }; + + // for each chain. longest chain first. shortest last + for leaf_hash in leaves { + // start at the leaf + let mut current_hash = leaf_hash; + + // if search is not restricted then the leaf is the best + let mut best_hash = leaf_hash; + + // go backwards entering the search space + // waiting until we are <= max_number + if let Some(max_number) = maybe_max_number { + loop { + let current_header = self + .header(BlockId::Hash(current_hash.clone()))? + .ok_or_else(|| { + Error::from(format!("failed to get header for hash {}", current_hash)) + })?; + + if current_header.number() <= &max_number { + best_hash = current_header.hash(); + break; + } + + current_hash = *current_header.parent_hash(); + } + } + + // go backwards through the chain (via parent links) + loop { + // until we find target + if current_hash == target_hash { + return Ok(Some(best_hash)); + } + + let current_header = self + .header(BlockId::Hash(current_hash.clone()))? + .ok_or_else(|| { + Error::from(format!("failed to get header for hash {}", current_hash)) + })?; + + // stop search in this chain once we go below the target's block number + if current_header.number() < target_header.number() { + break; + } + + current_hash = *current_header.parent_hash(); + } + } + + // header may be on a dead fork -- the only leaves that are considered are + // those which can still be finalized. + // + // FIXME #1558 only issue this warning when not on a dead fork + warn!( + "Block {:?} exists in chain but not found when following all \ leaves backwards. Number limit = {:?}", - target_hash, - maybe_max_number, - ); + target_hash, maybe_max_number, + ); - Ok(None) - } + Ok(None) + } } /// Provides access to the optional cache. pub trait ProvideCache { - /// Returns data cache reference, if it is enabled on this backend. - fn cache(&self) -> Option>>; + /// Returns data cache reference, if it is enabled on this backend. + fn cache(&self) -> Option>>; } /// Blockchain optional data cache. pub trait Cache: Send + Sync { - /// Initialize genesis value for the given cache. - /// - /// The operation should be performed once before anything else is inserted in the cache. - /// Otherwise cache may end up in inconsistent state. - fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec) -> Result<()>; - /// Returns cached value by the given key. - /// - /// Returned tuple is the range where value has been active and the value itself. - /// Fails if read from cache storage fails or if the value for block is discarded - /// (i.e. if block is earlier that best finalized, but it is not in canonical chain). - fn get_at( - &self, - key: &well_known_cache_keys::Id, - block: &BlockId, - ) -> Result, Block::Hash), Option<(NumberFor, Block::Hash)>, Vec)>>; + /// Initialize genesis value for the given cache. + /// + /// The operation should be performed once before anything else is inserted in the cache. + /// Otherwise cache may end up in inconsistent state. + fn initialize(&self, key: &well_known_cache_keys::Id, value_at_genesis: Vec) -> Result<()>; + /// Returns cached value by the given key. + /// + /// Returned tuple is the range where value has been active and the value itself. + /// Fails if read from cache storage fails or if the value for block is discarded + /// (i.e. if block is earlier that best finalized, but it is not in canonical chain). + fn get_at( + &self, + key: &well_known_cache_keys::Id, + block: &BlockId, + ) -> Result< + Option<( + (NumberFor, Block::Hash), + Option<(NumberFor, Block::Hash)>, + Vec, + )>, + >; } /// Blockchain info #[derive(Debug, Eq, PartialEq)] pub struct Info { - /// Best block hash. - pub best_hash: Block::Hash, - /// Best block number. - pub best_number: <::Header as HeaderT>::Number, - /// Genesis block hash. - pub genesis_hash: Block::Hash, - /// The head of the finalized chain. - pub finalized_hash: Block::Hash, - /// Last finalized block number. - pub finalized_number: <::Header as HeaderT>::Number, - /// Number of concurrent leave forks. - pub number_leaves: usize + /// Best block hash. + pub best_hash: Block::Hash, + /// Best block number. + pub best_number: <::Header as HeaderT>::Number, + /// Genesis block hash. + pub genesis_hash: Block::Hash, + /// The head of the finalized chain. + pub finalized_hash: Block::Hash, + /// Last finalized block number. + pub finalized_number: <::Header as HeaderT>::Number, + /// Number of concurrent leave forks. + pub number_leaves: usize, } /// Block status. #[derive(Debug, PartialEq, Eq)] pub enum BlockStatus { - /// Already in the blockchain. - InChain, - /// Not in the queue or the blockchain. - Unknown, + /// Already in the blockchain. + InChain, + /// Not in the queue or the blockchain. + Unknown, } /// A list of all well known keys in the blockchain cache. pub mod well_known_cache_keys { - /// The type representing cache keys. - pub type Id = sp_consensus::import_queue::CacheKeyId; + /// The type representing cache keys. + pub type Id = sp_consensus::import_queue::CacheKeyId; - /// A list of authorities. - pub const AUTHORITIES: Id = *b"auth"; + /// A list of authorities. + pub const AUTHORITIES: Id = *b"auth"; - /// Current Epoch data. - pub const EPOCH: Id = *b"epch"; + /// Current Epoch data. + pub const EPOCH: Id = *b"epch"; - /// Changes trie configuration. - pub const CHANGES_TRIE_CONFIG: Id = *b"chtr"; + /// Changes trie configuration. + pub const CHANGES_TRIE_CONFIG: Id = *b"chtr"; } diff --git a/primitives/blockchain/src/error.rs b/primitives/blockchain/src/error.rs index e479b8abe9..03a02931cf 100644 --- a/primitives/blockchain/src/error.rs +++ b/primitives/blockchain/src/error.rs @@ -16,12 +16,12 @@ //! Substrate client possible errors. -use std::{self, error, result}; -use sp_state_machine; -use sp_runtime::transaction_validity::TransactionValidityError; -use sp_consensus; -use derive_more::{Display, From}; use codec::Error as CodecError; +use derive_more::{Display, From}; +use sp_consensus; +use sp_runtime::transaction_validity::TransactionValidityError; +use sp_state_machine; +use std::{self, error, result}; /// Client Result type alias pub type Result = result::Result; @@ -29,135 +29,135 @@ pub type Result = result::Result; /// Error when the runtime failed to apply an extrinsic. #[derive(Debug, Display)] pub enum ApplyExtrinsicFailed { - /// The transaction cannot be included into the current block. - /// - /// This doesn't necessary mean that the transaction itself is invalid, but it might be just - /// unappliable onto the current block. - #[display(fmt = "Extrinsic is not valid: {:?}", _0)] - Validity(TransactionValidityError), - /// This is used for miscellaneous errors that can be represented by string and not handleable. - /// - /// This will become obsolete with complete migration to v4 APIs. - #[display(fmt = "Extrinsic failed: {:?}", _0)] - Msg(String), + /// The transaction cannot be included into the current block. + /// + /// This doesn't necessary mean that the transaction itself is invalid, but it might be just + /// unappliable onto the current block. + #[display(fmt = "Extrinsic is not valid: {:?}", _0)] + Validity(TransactionValidityError), + /// This is used for miscellaneous errors that can be represented by string and not handleable. + /// + /// This will become obsolete with complete migration to v4 APIs. + #[display(fmt = "Extrinsic failed: {:?}", _0)] + Msg(String), } /// Substrate Client error #[derive(Debug, Display, From)] pub enum Error { - /// Consensus Error - #[display(fmt = "Consensus: {}", _0)] - Consensus(sp_consensus::Error), - /// Backend error. - #[display(fmt = "Backend error: {}", _0)] - #[from(ignore)] - Backend(String), - /// Unknown block. - #[display(fmt = "UnknownBlock: {}", _0)] - #[from(ignore)] - UnknownBlock(String), - /// The `apply_extrinsic` is not valid due to the given `TransactionValidityError`. - #[display(fmt = "{:?}", _0)] - ApplyExtrinsicFailed(ApplyExtrinsicFailed), - /// Execution error. - #[display(fmt = "Execution: {}", _0)] - Execution(Box), - /// Blockchain error. - #[display(fmt = "Blockchain: {}", _0)] - Blockchain(Box), - /// Invalid authorities set received from the runtime. - #[display(fmt = "Current state of blockchain has invalid authorities set")] - InvalidAuthoritiesSet, - /// Could not get runtime version. - #[display(fmt = "Failed to get runtime version: {}", _0)] - #[from(ignore)] - VersionInvalid(String), - /// Genesis config is invalid. - #[display(fmt = "Genesis config provided is invalid")] - GenesisInvalid, - /// Error decoding header justification. - #[display(fmt = "error decoding justification for header")] - JustificationDecode, - /// Justification for header is correctly encoded, but invalid. - #[display(fmt = "bad justification for header: {}", _0)] - #[from(ignore)] - BadJustification(String), - /// Not available on light client. - #[display(fmt = "This method is not currently available when running in light client mode")] - NotAvailableOnLightClient, - /// Invalid remote CHT-based proof. - #[display(fmt = "Remote node has responded with invalid header proof")] - InvalidCHTProof, - /// Remote fetch has been cancelled. - #[display(fmt = "Remote data fetch has been cancelled")] - RemoteFetchCancelled, - /// Remote fetch has been failed. - #[display(fmt = "Remote data fetch has been failed")] - RemoteFetchFailed, - /// Error decoding call result. - #[display(fmt = "Error decoding call result of {}: {}", _0, _1)] - CallResultDecode(&'static str, CodecError), - /// Error converting a parameter between runtime and node. - #[display(fmt = "Error converting `{}` between runtime and node", _0)] - #[from(ignore)] - RuntimeParamConversion(String), - /// Changes tries are not supported. - #[display(fmt = "Changes tries are not supported by the runtime")] - ChangesTriesNotSupported, - /// Error reading changes tries configuration. - #[display(fmt = "Error reading changes tries configuration")] - ErrorReadingChangesTriesConfig, - /// Key changes query has failed. - #[display(fmt = "Failed to check changes proof: {}", _0)] - #[from(ignore)] - ChangesTrieAccessFailed(String), - /// Last finalized block not parent of current. - #[display(fmt = "Did not finalize blocks in sequential order.")] - #[from(ignore)] - NonSequentialFinalization(String), - /// Safety violation: new best block not descendent of last finalized. - #[display(fmt = "Potential long-range attack: block not in finalized chain.")] - NotInFinalizedChain, - /// Hash that is required for building CHT is missing. - #[display(fmt = "Failed to get hash of block for building CHT")] - MissingHashRequiredForCHT, - /// Invalid calculated state root on block import. - #[display(fmt = "Calculated state root does not match.")] - InvalidStateRoot, - /// Incomplete block import pipeline. - #[display(fmt = "Incomplete block import pipeline.")] - IncompletePipeline, - #[display(fmt = "Transaction pool not ready for block production.")] - TransactionPoolNotReady, - /// A convenience variant for String - #[display(fmt = "{}", _0)] - Msg(String), + /// Consensus Error + #[display(fmt = "Consensus: {}", _0)] + Consensus(sp_consensus::Error), + /// Backend error. + #[display(fmt = "Backend error: {}", _0)] + #[from(ignore)] + Backend(String), + /// Unknown block. + #[display(fmt = "UnknownBlock: {}", _0)] + #[from(ignore)] + UnknownBlock(String), + /// The `apply_extrinsic` is not valid due to the given `TransactionValidityError`. + #[display(fmt = "{:?}", _0)] + ApplyExtrinsicFailed(ApplyExtrinsicFailed), + /// Execution error. + #[display(fmt = "Execution: {}", _0)] + Execution(Box), + /// Blockchain error. + #[display(fmt = "Blockchain: {}", _0)] + Blockchain(Box), + /// Invalid authorities set received from the runtime. + #[display(fmt = "Current state of blockchain has invalid authorities set")] + InvalidAuthoritiesSet, + /// Could not get runtime version. + #[display(fmt = "Failed to get runtime version: {}", _0)] + #[from(ignore)] + VersionInvalid(String), + /// Genesis config is invalid. + #[display(fmt = "Genesis config provided is invalid")] + GenesisInvalid, + /// Error decoding header justification. + #[display(fmt = "error decoding justification for header")] + JustificationDecode, + /// Justification for header is correctly encoded, but invalid. + #[display(fmt = "bad justification for header: {}", _0)] + #[from(ignore)] + BadJustification(String), + /// Not available on light client. + #[display(fmt = "This method is not currently available when running in light client mode")] + NotAvailableOnLightClient, + /// Invalid remote CHT-based proof. + #[display(fmt = "Remote node has responded with invalid header proof")] + InvalidCHTProof, + /// Remote fetch has been cancelled. + #[display(fmt = "Remote data fetch has been cancelled")] + RemoteFetchCancelled, + /// Remote fetch has been failed. + #[display(fmt = "Remote data fetch has been failed")] + RemoteFetchFailed, + /// Error decoding call result. + #[display(fmt = "Error decoding call result of {}: {}", _0, _1)] + CallResultDecode(&'static str, CodecError), + /// Error converting a parameter between runtime and node. + #[display(fmt = "Error converting `{}` between runtime and node", _0)] + #[from(ignore)] + RuntimeParamConversion(String), + /// Changes tries are not supported. + #[display(fmt = "Changes tries are not supported by the runtime")] + ChangesTriesNotSupported, + /// Error reading changes tries configuration. + #[display(fmt = "Error reading changes tries configuration")] + ErrorReadingChangesTriesConfig, + /// Key changes query has failed. + #[display(fmt = "Failed to check changes proof: {}", _0)] + #[from(ignore)] + ChangesTrieAccessFailed(String), + /// Last finalized block not parent of current. + #[display(fmt = "Did not finalize blocks in sequential order.")] + #[from(ignore)] + NonSequentialFinalization(String), + /// Safety violation: new best block not descendent of last finalized. + #[display(fmt = "Potential long-range attack: block not in finalized chain.")] + NotInFinalizedChain, + /// Hash that is required for building CHT is missing. + #[display(fmt = "Failed to get hash of block for building CHT")] + MissingHashRequiredForCHT, + /// Invalid calculated state root on block import. + #[display(fmt = "Calculated state root does not match.")] + InvalidStateRoot, + /// Incomplete block import pipeline. + #[display(fmt = "Incomplete block import pipeline.")] + IncompletePipeline, + #[display(fmt = "Transaction pool not ready for block production.")] + TransactionPoolNotReady, + /// A convenience variant for String + #[display(fmt = "{}", _0)] + Msg(String), } impl error::Error for Error { - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - Error::Consensus(e) => Some(e), - Error::Blockchain(e) => Some(e), - _ => None, - } - } + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self { + Error::Consensus(e) => Some(e), + Error::Blockchain(e) => Some(e), + _ => None, + } + } } impl<'a> From<&'a str> for Error { - fn from(s: &'a str) -> Self { - Error::Msg(s.into()) - } + fn from(s: &'a str) -> Self { + Error::Msg(s.into()) + } } impl Error { - /// Chain a blockchain error. - pub fn from_blockchain(e: Box) -> Self { - Error::Blockchain(e) - } + /// Chain a blockchain error. + pub fn from_blockchain(e: Box) -> Self { + Error::Blockchain(e) + } - /// Chain a state error. - pub fn from_state(e: Box) -> Self { - Error::Execution(e) - } + /// Chain a state error. + pub fn from_state(e: Box) -> Self { + Error::Execution(e) + } } diff --git a/primitives/blockchain/src/header_metadata.rs b/primitives/blockchain/src/header_metadata.rs index 85a94624c9..21ee2bfb26 100644 --- a/primitives/blockchain/src/header_metadata.rs +++ b/primitives/blockchain/src/header_metadata.rs @@ -17,9 +17,9 @@ //! Implements tree backend, cached header metadata and algorithms //! to compute routes efficiently over the tree of headers. -use sp_runtime::traits::{Block as BlockT, NumberFor, Header}; -use parking_lot::RwLock; use lru::LruCache; +use parking_lot::RwLock; +use sp_runtime::traits::{Block as BlockT, Header, NumberFor}; /// Set to the expected max difference between `best` and `finalized` blocks at sync. const LRU_CACHE_SIZE: usize = 5_000; @@ -31,132 +31,132 @@ const LRU_CACHE_SIZE: usize = 5_000; /// lca(best, final), lca(best + 1, final), lca(best + 2, final), etc. /// The first call is O(h) but the others are O(1). pub fn lowest_common_ancestor + ?Sized>( - backend: &T, - id_one: Block::Hash, - id_two: Block::Hash, + backend: &T, + id_one: Block::Hash, + id_two: Block::Hash, ) -> Result, T::Error> { - let mut header_one = backend.header_metadata(id_one)?; - let mut header_two = backend.header_metadata(id_two)?; - - let mut orig_header_one = header_one.clone(); - let mut orig_header_two = header_two.clone(); - - // We move through ancestor links as much as possible, since ancestor >= parent. - - while header_one.number > header_two.number { - let ancestor_one = backend.header_metadata(header_one.ancestor)?; - - if ancestor_one.number >= header_two.number { - header_one = ancestor_one; - } else { - break - } - } - - while header_one.number < header_two.number { - let ancestor_two = backend.header_metadata(header_two.ancestor)?; - - if ancestor_two.number >= header_one.number { - header_two = ancestor_two; - } else { - break - } - } - - // Then we move the remaining path using parent links. - - while header_one.hash != header_two.hash { - if header_one.number > header_two.number { - header_one = backend.header_metadata(header_one.parent)?; - } else { - header_two = backend.header_metadata(header_two.parent)?; - } - } - - // Update cached ancestor links. - - if orig_header_one.number > header_one.number { - orig_header_one.ancestor = header_one.hash; - backend.insert_header_metadata(orig_header_one.hash, orig_header_one); - } - - if orig_header_two.number > header_one.number { - orig_header_two.ancestor = header_one.hash; - backend.insert_header_metadata(orig_header_two.hash, orig_header_two); - } - - Ok(HashAndNumber { - hash: header_one.hash, - number: header_one.number, - }) + let mut header_one = backend.header_metadata(id_one)?; + let mut header_two = backend.header_metadata(id_two)?; + + let mut orig_header_one = header_one.clone(); + let mut orig_header_two = header_two.clone(); + + // We move through ancestor links as much as possible, since ancestor >= parent. + + while header_one.number > header_two.number { + let ancestor_one = backend.header_metadata(header_one.ancestor)?; + + if ancestor_one.number >= header_two.number { + header_one = ancestor_one; + } else { + break; + } + } + + while header_one.number < header_two.number { + let ancestor_two = backend.header_metadata(header_two.ancestor)?; + + if ancestor_two.number >= header_one.number { + header_two = ancestor_two; + } else { + break; + } + } + + // Then we move the remaining path using parent links. + + while header_one.hash != header_two.hash { + if header_one.number > header_two.number { + header_one = backend.header_metadata(header_one.parent)?; + } else { + header_two = backend.header_metadata(header_two.parent)?; + } + } + + // Update cached ancestor links. + + if orig_header_one.number > header_one.number { + orig_header_one.ancestor = header_one.hash; + backend.insert_header_metadata(orig_header_one.hash, orig_header_one); + } + + if orig_header_two.number > header_one.number { + orig_header_two.ancestor = header_one.hash; + backend.insert_header_metadata(orig_header_two.hash, orig_header_two); + } + + Ok(HashAndNumber { + hash: header_one.hash, + number: header_one.number, + }) } /// Compute a tree-route between two blocks. See tree-route docs for more details. pub fn tree_route>( - backend: &T, - from: Block::Hash, - to: Block::Hash, + backend: &T, + from: Block::Hash, + to: Block::Hash, ) -> Result, T::Error> { - let mut from = backend.header_metadata(from)?; - let mut to = backend.header_metadata(to)?; - - let mut from_branch = Vec::new(); - let mut to_branch = Vec::new(); - - while to.number > from.number { - to_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); - - to = backend.header_metadata(to.parent)?; - } - - while from.number > to.number { - from_branch.push(HashAndNumber { - number: from.number, - hash: from.hash, - }); - from = backend.header_metadata(from.parent)?; - } - - // numbers are equal now. walk backwards until the block is the same - - while to.hash != from.hash { - to_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); - to = backend.header_metadata(to.parent)?; - - from_branch.push(HashAndNumber { - number: from.number, - hash: from.hash, - }); - from = backend.header_metadata(from.parent)?; - } - - // add the pivot block. and append the reversed to-branch (note that it's reverse order originals) - let pivot = from_branch.len(); - from_branch.push(HashAndNumber { - number: to.number, - hash: to.hash, - }); - from_branch.extend(to_branch.into_iter().rev()); - - Ok(TreeRoute { - route: from_branch, - pivot, - }) + let mut from = backend.header_metadata(from)?; + let mut to = backend.header_metadata(to)?; + + let mut from_branch = Vec::new(); + let mut to_branch = Vec::new(); + + while to.number > from.number { + to_branch.push(HashAndNumber { + number: to.number, + hash: to.hash, + }); + + to = backend.header_metadata(to.parent)?; + } + + while from.number > to.number { + from_branch.push(HashAndNumber { + number: from.number, + hash: from.hash, + }); + from = backend.header_metadata(from.parent)?; + } + + // numbers are equal now. walk backwards until the block is the same + + while to.hash != from.hash { + to_branch.push(HashAndNumber { + number: to.number, + hash: to.hash, + }); + to = backend.header_metadata(to.parent)?; + + from_branch.push(HashAndNumber { + number: from.number, + hash: from.hash, + }); + from = backend.header_metadata(from.parent)?; + } + + // add the pivot block. and append the reversed to-branch (note that it's reverse order originals) + let pivot = from_branch.len(); + from_branch.push(HashAndNumber { + number: to.number, + hash: to.hash, + }); + from_branch.extend(to_branch.into_iter().rev()); + + Ok(TreeRoute { + route: from_branch, + pivot, + }) } /// Hash and number of a block. #[derive(Debug, Clone)] pub struct HashAndNumber { - /// The number of the block. - pub number: NumberFor, - /// The hash of the block. - pub hash: Block::Hash, + /// The number of the block. + pub number: NumberFor, + /// The hash of the block. + pub hash: Block::Hash, } /// A tree-route from one block to another in the chain. @@ -183,99 +183,114 @@ pub struct HashAndNumber { /// ``` #[derive(Debug)] pub struct TreeRoute { - route: Vec>, - pivot: usize, + route: Vec>, + pivot: usize, } impl TreeRoute { - /// Get a slice of all retracted blocks in reverse order (towards common ancestor) - pub fn retracted(&self) -> &[HashAndNumber] { - &self.route[..self.pivot] - } - - /// Get the common ancestor block. This might be one of the two blocks of the - /// route. - pub fn common_block(&self) -> &HashAndNumber { - self.route.get(self.pivot).expect("tree-routes are computed between blocks; \ + /// Get a slice of all retracted blocks in reverse order (towards common ancestor) + pub fn retracted(&self) -> &[HashAndNumber] { + &self.route[..self.pivot] + } + + /// Get the common ancestor block. This might be one of the two blocks of the + /// route. + pub fn common_block(&self) -> &HashAndNumber { + self.route.get(self.pivot).expect( + "tree-routes are computed between blocks; \ which are included in the route; \ - thus it is never empty; qed") - } - - /// Get a slice of enacted blocks (descendents of the common ancestor) - pub fn enacted(&self) -> &[HashAndNumber] { - &self.route[self.pivot + 1 ..] - } + thus it is never empty; qed", + ) + } + + /// Get a slice of enacted blocks (descendents of the common ancestor) + pub fn enacted(&self) -> &[HashAndNumber] { + &self.route[self.pivot + 1..] + } } /// Handles header metadata: hash, number, parent hash, etc. pub trait HeaderMetadata { - /// Error used in case the header metadata is not found. - type Error; - - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error>; - fn insert_header_metadata(&self, hash: Block::Hash, header_metadata: CachedHeaderMetadata); - fn remove_header_metadata(&self, hash: Block::Hash); + /// Error used in case the header metadata is not found. + type Error; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error>; + fn insert_header_metadata( + &self, + hash: Block::Hash, + header_metadata: CachedHeaderMetadata, + ); + fn remove_header_metadata(&self, hash: Block::Hash); } /// Caches header metadata in an in-memory LRU cache. pub struct HeaderMetadataCache { - cache: RwLock>>, + cache: RwLock>>, } impl HeaderMetadataCache { - /// Creates a new LRU header metadata cache with `capacity`. - pub fn new(capacity: usize) -> Self { - HeaderMetadataCache { - cache: RwLock::new(LruCache::new(capacity)), - } - } + /// Creates a new LRU header metadata cache with `capacity`. + pub fn new(capacity: usize) -> Self { + HeaderMetadataCache { + cache: RwLock::new(LruCache::new(capacity)), + } + } } impl Default for HeaderMetadataCache { - fn default() -> Self { - HeaderMetadataCache { - cache: RwLock::new(LruCache::new(LRU_CACHE_SIZE)), - } - } + fn default() -> Self { + HeaderMetadataCache { + cache: RwLock::new(LruCache::new(LRU_CACHE_SIZE)), + } + } } impl HeaderMetadata for HeaderMetadataCache { - type Error = String; - - fn header_metadata(&self, hash: Block::Hash) -> Result, Self::Error> { - self.cache.write().get(&hash).cloned() - .ok_or("header metadata not found in cache".to_owned()) - } - - fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { - self.cache.write().put(hash, metadata); - } - - fn remove_header_metadata(&self, hash: Block::Hash) { - self.cache.write().pop(&hash); - } + type Error = String; + + fn header_metadata( + &self, + hash: Block::Hash, + ) -> Result, Self::Error> { + self.cache + .write() + .get(&hash) + .cloned() + .ok_or("header metadata not found in cache".to_owned()) + } + + fn insert_header_metadata(&self, hash: Block::Hash, metadata: CachedHeaderMetadata) { + self.cache.write().put(hash, metadata); + } + + fn remove_header_metadata(&self, hash: Block::Hash) { + self.cache.write().pop(&hash); + } } /// Cached header metadata. Used to efficiently traverse the tree. #[derive(Debug, Clone)] pub struct CachedHeaderMetadata { - /// Hash of the header. - pub hash: Block::Hash, - /// Block number. - pub number: NumberFor, - /// Hash of parent header. - pub parent: Block::Hash, - /// Hash of an ancestor header. Used to jump through the tree. - ancestor: Block::Hash, + /// Hash of the header. + pub hash: Block::Hash, + /// Block number. + pub number: NumberFor, + /// Hash of parent header. + pub parent: Block::Hash, + /// Hash of an ancestor header. Used to jump through the tree. + ancestor: Block::Hash, } impl From<&Block::Header> for CachedHeaderMetadata { - fn from(header: &Block::Header) -> Self { - CachedHeaderMetadata { - hash: header.hash().clone(), - number: header.number().clone(), - parent: header.parent_hash().clone(), - ancestor: header.parent_hash().clone(), - } - } + fn from(header: &Block::Header) -> Self { + CachedHeaderMetadata { + hash: header.hash().clone(), + number: header.number().clone(), + parent: header.parent_hash().clone(), + ancestor: header.parent_hash().clone(), + } + } } diff --git a/primitives/blockchain/src/lib.rs b/primitives/blockchain/src/lib.rs index 8f83c7aec5..123fc68304 100644 --- a/primitives/blockchain/src/lib.rs +++ b/primitives/blockchain/src/lib.rs @@ -17,9 +17,9 @@ //! Substrate blockchain traits and primitives. mod backend; -mod header_metadata; mod error; +mod header_metadata; -pub use error::*; pub use backend::*; +pub use error::*; pub use header_metadata::*; diff --git a/primitives/chain-spec/src/lib.rs b/primitives/chain-spec/src/lib.rs index 13ebc09b6c..1c37bd35ea 100644 --- a/primitives/chain-spec/src/lib.rs +++ b/primitives/chain-spec/src/lib.rs @@ -22,20 +22,20 @@ /// additional information or enabling additional features. #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Clone)] pub enum ChainType { - /// A development chain that runs mainly on one node. - Development, - /// A local chain that runs locally on multiple nodes for testing purposes. - Local, - /// A live chain. - Live, - /// Some custom chain type. - Custom(String), + /// A development chain that runs mainly on one node. + Development, + /// A local chain that runs locally on multiple nodes for testing purposes. + Local, + /// A live chain. + Live, + /// Some custom chain type. + Custom(String), } impl Default for ChainType { - fn default() -> Self { - Self::Live - } + fn default() -> Self { + Self::Live + } } /// Arbitrary properties defined in chain spec as a JSON object diff --git a/primitives/consensus/aura/src/inherents.rs b/primitives/consensus/aura/src/inherents.rs index 77ec03c6f4..1b59a0463e 100644 --- a/primitives/consensus/aura/src/inherents.rs +++ b/primitives/consensus/aura/src/inherents.rs @@ -15,8 +15,7 @@ // along with Substrate. If not, see . /// Contains the inherents for the AURA module - -use sp_inherents::{InherentIdentifier, InherentData, Error}; +use sp_inherents::{Error, InherentData, InherentIdentifier}; #[cfg(feature = "std")] use sp_inherents::{InherentDataProviders, ProvideInherentData}; @@ -29,70 +28,64 @@ pub type InherentType = u64; /// Auxiliary trait to extract Aura inherent data. pub trait AuraInherentData { - /// Get aura inherent data. - fn aura_inherent_data(&self) ->Result; - /// Replace aura inherent data. - fn aura_replace_inherent_data(&mut self, new: InherentType); + /// Get aura inherent data. + fn aura_inherent_data(&self) -> Result; + /// Replace aura inherent data. + fn aura_replace_inherent_data(&mut self, new: InherentType); } impl AuraInherentData for InherentData { - fn aura_inherent_data(&self) ->Result { - self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Aura inherent data not found".into())) - } - - fn aura_replace_inherent_data(&mut self, new: InherentType) { - self.replace_data(INHERENT_IDENTIFIER, &new); - } + fn aura_inherent_data(&self) -> Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "Aura inherent data not found".into())) + } + + fn aura_replace_inherent_data(&mut self, new: InherentType) { + self.replace_data(INHERENT_IDENTIFIER, &new); + } } /// Provides the slot duration inherent data for `Aura`. #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: u64, + slot_duration: u64, } #[cfg(feature = "std")] impl InherentDataProvider { - pub fn new(slot_duration: u64) -> Self { - Self { - slot_duration - } - } + pub fn new(slot_duration: u64) -> Self { + Self { slot_duration } + } } #[cfg(feature = "std")] impl ProvideInherentData for InherentDataProvider { - fn on_register( - &self, - providers: &InherentDataProviders, - ) ->Result<(), Error> { - if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - // Add the timestamp inherent data provider, as we require it. - providers.register_provider(sp_timestamp::InherentDataProvider) - } else { - Ok(()) - } - } - - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER - } - - fn provide_inherent_data( - &self, - inherent_data: &mut InherentData, - ) ->Result<(), Error> { - use sp_timestamp::TimestampInherentData; - - let timestamp = inherent_data.timestamp_inherent_data()?; - let slot_num = timestamp / self.slot_duration; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot_num) - } - - fn error_to_string(&self, error: &[u8]) -> Option { - use codec::Decode; - - sp_inherents::Error::decode(&mut &error[..]).map(|e| e.into_string()).ok() - } + fn on_register(&self, providers: &InherentDataProviders) -> Result<(), Error> { + if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { + // Add the timestamp inherent data provider, as we require it. + providers.register_provider(sp_timestamp::InherentDataProvider) + } else { + Ok(()) + } + } + + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + use sp_timestamp::TimestampInherentData; + + let timestamp = inherent_data.timestamp_inherent_data()?; + let slot_num = timestamp / self.slot_duration; + inherent_data.put_data(INHERENT_IDENTIFIER, &slot_num) + } + + fn error_to_string(&self, error: &[u8]) -> Option { + use codec::Decode; + + sp_inherents::Error::decode(&mut &error[..]) + .map(|e| e.into_string()) + .ok() + } } diff --git a/primitives/consensus/aura/src/lib.rs b/primitives/consensus/aura/src/lib.rs index 2dda5b28bf..87eff4d0f2 100644 --- a/primitives/consensus/aura/src/lib.rs +++ b/primitives/consensus/aura/src/lib.rs @@ -18,46 +18,46 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode, Codec}; -use sp_std::vec::Vec; +use codec::{Codec, Decode, Encode}; use sp_runtime::ConsensusEngineId; +use sp_std::vec::Vec; pub mod inherents; pub mod sr25519 { - mod app_sr25519 { - use sp_application_crypto::{app_crypto, key_types::AURA, sr25519}; - app_crypto!(sr25519, AURA); - } + mod app_sr25519 { + use sp_application_crypto::{app_crypto, key_types::AURA, sr25519}; + app_crypto!(sr25519, AURA); + } - sp_application_crypto::with_pair! { - /// An Aura authority keypair using S/R 25519 as its crypto. - pub type AuthorityPair = app_sr25519::Pair; - } + sp_application_crypto::with_pair! { + /// An Aura authority keypair using S/R 25519 as its crypto. + pub type AuthorityPair = app_sr25519::Pair; + } - /// An Aura authority signature using S/R 25519 as its crypto. - pub type AuthoritySignature = app_sr25519::Signature; + /// An Aura authority signature using S/R 25519 as its crypto. + pub type AuthoritySignature = app_sr25519::Signature; - /// An Aura authority identifier using S/R 25519 as its crypto. - pub type AuthorityId = app_sr25519::Public; + /// An Aura authority identifier using S/R 25519 as its crypto. + pub type AuthorityId = app_sr25519::Public; } pub mod ed25519 { - mod app_ed25519 { - use sp_application_crypto::{app_crypto, key_types::AURA, ed25519}; - app_crypto!(ed25519, AURA); - } + mod app_ed25519 { + use sp_application_crypto::{app_crypto, ed25519, key_types::AURA}; + app_crypto!(ed25519, AURA); + } - sp_application_crypto::with_pair! { - /// An Aura authority keypair using Ed25519 as its crypto. - pub type AuthorityPair = app_ed25519::Pair; - } + sp_application_crypto::with_pair! { + /// An Aura authority keypair using Ed25519 as its crypto. + pub type AuthorityPair = app_ed25519::Pair; + } - /// An Aura authority signature using Ed25519 as its crypto. - pub type AuthoritySignature = app_ed25519::Signature; + /// An Aura authority signature using Ed25519 as its crypto. + pub type AuthoritySignature = app_ed25519::Signature; - /// An Aura authority identifier using Ed25519 as its crypto. - pub type AuthorityId = app_ed25519::Public; + /// An Aura authority identifier using Ed25519 as its crypto. + pub type AuthorityId = app_ed25519::Public; } /// The `ConsensusEngineId` of AuRa. @@ -69,25 +69,25 @@ pub type AuthorityIndex = u32; /// An consensus log item for Aura. #[derive(Decode, Encode)] pub enum ConsensusLog { - /// The authorities have changed. - #[codec(index = "1")] - AuthoritiesChange(Vec), - /// Disable the authority with given index. - #[codec(index = "2")] - OnDisabled(AuthorityIndex), + /// The authorities have changed. + #[codec(index = "1")] + AuthoritiesChange(Vec), + /// Disable the authority with given index. + #[codec(index = "2")] + OnDisabled(AuthorityIndex), } sp_api::decl_runtime_apis! { - /// API necessary for block authorship with aura. - pub trait AuraApi { - /// Return the slot duration in seconds for Aura. - /// Currently, only the value provided by this type at genesis - /// will be used. - /// - /// Dynamic slot duration may be supported in the future. - fn slot_duration() -> u64; - - // Return the current set of authorities. - fn authorities() -> Vec; - } + /// API necessary for block authorship with aura. + pub trait AuraApi { + /// Return the slot duration in seconds for Aura. + /// Currently, only the value provided by this type at genesis + /// will be used. + /// + /// Dynamic slot duration may be supported in the future. + fn slot_duration() -> u64; + + // Return the current set of authorities. + fn authorities() -> Vec; + } } diff --git a/primitives/consensus/babe/src/digests.rs b/primitives/consensus/babe/src/digests.rs index 6079aa88c8..8279638da5 100644 --- a/primitives/consensus/babe/src/digests.rs +++ b/primitives/consensus/babe/src/digests.rs @@ -16,33 +16,39 @@ //! Private implementation details of BABE digests. +use super::{AuthorityId, AuthorityIndex, BabeAuthorityWeight, SlotNumber}; #[cfg(feature = "std")] -use super::{BABE_ENGINE_ID, AuthoritySignature}; -use super::{AuthorityId, AuthorityIndex, SlotNumber, BabeAuthorityWeight}; +use super::{AuthoritySignature, BABE_ENGINE_ID}; #[cfg(feature = "std")] -use sp_runtime::{DigestItem, generic::OpaqueDigestItemId}; -#[cfg(feature = "std")] -use std::{fmt::Debug, convert::{TryFrom, TryInto}}; +use codec::Codec; use codec::{Decode, Encode}; #[cfg(feature = "std")] -use codec::Codec; -use sp_std::vec::Vec; -use sp_runtime::RuntimeDebug; +use sp_consensus_vrf::schnorrkel::SignatureError; use sp_consensus_vrf::schnorrkel::{self, Randomness}; +use sp_runtime::RuntimeDebug; #[cfg(feature = "std")] -use sp_consensus_vrf::schnorrkel::SignatureError; +use sp_runtime::{generic::OpaqueDigestItemId, DigestItem}; +use sp_std::vec::Vec; +#[cfg(feature = "std")] +use std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, +}; /// Raw BABE primary slot assignment pre-digest. #[derive(Clone, RuntimeDebug, Encode, Decode)] -pub struct RawPrimaryPreDigest { - /// Authority index - pub authority_index: super::AuthorityIndex, - /// Slot number - pub slot_number: SlotNumber, - /// VRF output - pub vrf_output: VRFOutput, - /// VRF proof - pub vrf_proof: VRFProof, +pub struct RawPrimaryPreDigest< + VRFOutput = schnorrkel::RawVRFOutput, + VRFProof = schnorrkel::RawVRFProof, +> { + /// Authority index + pub authority_index: super::AuthorityIndex, + /// Slot number + pub slot_number: SlotNumber, + /// VRF output + pub vrf_output: VRFOutput, + /// VRF proof + pub vrf_proof: VRFProof, } #[cfg(feature = "std")] @@ -51,43 +57,43 @@ pub type PrimaryPreDigest = RawPrimaryPreDigest for PrimaryPreDigest { - type Error = SignatureError; - - fn try_from(raw: RawPrimaryPreDigest) -> Result { - Ok(PrimaryPreDigest { - authority_index: raw.authority_index, - slot_number: raw.slot_number, - vrf_output: raw.vrf_output.try_into()?, - vrf_proof: raw.vrf_proof.try_into()?, - }) - } + type Error = SignatureError; + + fn try_from(raw: RawPrimaryPreDigest) -> Result { + Ok(PrimaryPreDigest { + authority_index: raw.authority_index, + slot_number: raw.slot_number, + vrf_output: raw.vrf_output.try_into()?, + vrf_proof: raw.vrf_proof.try_into()?, + }) + } } /// BABE secondary slot assignment pre-digest. #[derive(Clone, RuntimeDebug, Encode, Decode)] pub struct SecondaryPreDigest { - /// Authority index - /// - /// This is not strictly-speaking necessary, since the secondary slots - /// are assigned based on slot number and epoch randomness. But including - /// it makes things easier for higher-level users of the chain data to - /// be aware of the author of a secondary-slot block. - pub authority_index: super::AuthorityIndex, - /// Slot number - pub slot_number: SlotNumber, + /// Authority index + /// + /// This is not strictly-speaking necessary, since the secondary slots + /// are assigned based on slot number and epoch randomness. But including + /// it makes things easier for higher-level users of the chain data to + /// be aware of the author of a secondary-slot block. + pub authority_index: super::AuthorityIndex, + /// Slot number + pub slot_number: SlotNumber, } /// A BABE pre-runtime digest. This contains all data required to validate a /// block and for the BABE runtime module. Slots can be assigned to a primary /// (VRF based) and to a secondary (slot number based). #[derive(Clone, RuntimeDebug, Encode, Decode)] -pub enum RawPreDigest { - /// A primary VRF-based slot assignment. - #[codec(index = "1")] - Primary(RawPrimaryPreDigest), - /// A secondary deterministic slot assignment. - #[codec(index = "2")] - Secondary(SecondaryPreDigest), +pub enum RawPreDigest { + /// A primary VRF-based slot assignment. + #[codec(index = "1")] + Primary(RawPrimaryPreDigest), + /// A secondary deterministic slot assignment. + #[codec(index = "2")] + Secondary(SecondaryPreDigest), } #[cfg(feature = "std")] @@ -95,99 +101,100 @@ pub enum RawPreDigest; impl RawPreDigest { - /// Returns the slot number of the pre digest. - pub fn authority_index(&self) -> AuthorityIndex { - match self { - RawPreDigest::Primary(primary) => primary.authority_index, - RawPreDigest::Secondary(secondary) => secondary.authority_index, - } - } - - /// Returns the slot number of the pre digest. - pub fn slot_number(&self) -> SlotNumber { - match self { - RawPreDigest::Primary(primary) => primary.slot_number, - RawPreDigest::Secondary(secondary) => secondary.slot_number, - } - } - - /// Returns the weight _added_ by this digest, not the cumulative weight - /// of the chain. - pub fn added_weight(&self) -> crate::BabeBlockWeight { - match self { - RawPreDigest::Primary(_) => 1, - RawPreDigest::Secondary(_) => 0, - } - } + /// Returns the slot number of the pre digest. + pub fn authority_index(&self) -> AuthorityIndex { + match self { + RawPreDigest::Primary(primary) => primary.authority_index, + RawPreDigest::Secondary(secondary) => secondary.authority_index, + } + } + + /// Returns the slot number of the pre digest. + pub fn slot_number(&self) -> SlotNumber { + match self { + RawPreDigest::Primary(primary) => primary.slot_number, + RawPreDigest::Secondary(secondary) => secondary.slot_number, + } + } + + /// Returns the weight _added_ by this digest, not the cumulative weight + /// of the chain. + pub fn added_weight(&self) -> crate::BabeBlockWeight { + match self { + RawPreDigest::Primary(_) => 1, + RawPreDigest::Secondary(_) => 0, + } + } } #[cfg(feature = "std")] impl TryFrom for PreDigest { - type Error = SignatureError; - - fn try_from(raw: RawPreDigest) -> Result { - Ok(match raw { - RawPreDigest::Primary(primary) => PreDigest::Primary(primary.try_into()?), - RawPreDigest::Secondary(secondary) => PreDigest::Secondary(secondary), - }) - } + type Error = SignatureError; + + fn try_from(raw: RawPreDigest) -> Result { + Ok(match raw { + RawPreDigest::Primary(primary) => PreDigest::Primary(primary.try_into()?), + RawPreDigest::Secondary(secondary) => PreDigest::Secondary(secondary), + }) + } } /// Information about the next epoch. This is broadcast in the first block /// of the epoch. #[derive(Decode, Encode, Default, PartialEq, Eq, Clone, RuntimeDebug)] pub struct NextEpochDescriptor { - /// The authorities. - pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + /// The authorities. + pub authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - /// The value of randomness to use for the slot-assignment. - pub randomness: Randomness, + /// The value of randomness to use for the slot-assignment. + pub randomness: Randomness, } /// A digest item which is usable with BABE consensus. #[cfg(feature = "std")] pub trait CompatibleDigestItem: Sized { - /// Construct a digest item which contains a BABE pre-digest. - fn babe_pre_digest(seal: PreDigest) -> Self; + /// Construct a digest item which contains a BABE pre-digest. + fn babe_pre_digest(seal: PreDigest) -> Self; - /// If this item is an BABE pre-digest, return it. - fn as_babe_pre_digest(&self) -> Option; + /// If this item is an BABE pre-digest, return it. + fn as_babe_pre_digest(&self) -> Option; - /// Construct a digest item which contains a BABE seal. - fn babe_seal(signature: AuthoritySignature) -> Self; + /// Construct a digest item which contains a BABE seal. + fn babe_seal(signature: AuthoritySignature) -> Self; - /// If this item is a BABE signature, return the signature. - fn as_babe_seal(&self) -> Option; + /// If this item is a BABE signature, return the signature. + fn as_babe_seal(&self) -> Option; - /// If this item is a BABE epoch, return it. - fn as_next_epoch_descriptor(&self) -> Option; + /// If this item is a BABE epoch, return it. + fn as_next_epoch_descriptor(&self) -> Option; } #[cfg(feature = "std")] -impl CompatibleDigestItem for DigestItem where - Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static +impl CompatibleDigestItem for DigestItem +where + Hash: Debug + Send + Sync + Eq + Clone + Codec + 'static, { - fn babe_pre_digest(digest: PreDigest) -> Self { - DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) - } - - fn as_babe_pre_digest(&self) -> Option { - self.try_to(OpaqueDigestItemId::PreRuntime(&BABE_ENGINE_ID)) - } - - fn babe_seal(signature: AuthoritySignature) -> Self { - DigestItem::Seal(BABE_ENGINE_ID, signature.encode()) - } - - fn as_babe_seal(&self) -> Option { - self.try_to(OpaqueDigestItemId::Seal(&BABE_ENGINE_ID)) - } - - fn as_next_epoch_descriptor(&self) -> Option { - self.try_to(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)) - .and_then(|x: super::ConsensusLog| match x { - super::ConsensusLog::NextEpochData(n) => Some(n), - _ => None, - }) - } + fn babe_pre_digest(digest: PreDigest) -> Self { + DigestItem::PreRuntime(BABE_ENGINE_ID, digest.encode()) + } + + fn as_babe_pre_digest(&self) -> Option { + self.try_to(OpaqueDigestItemId::PreRuntime(&BABE_ENGINE_ID)) + } + + fn babe_seal(signature: AuthoritySignature) -> Self { + DigestItem::Seal(BABE_ENGINE_ID, signature.encode()) + } + + fn as_babe_seal(&self) -> Option { + self.try_to(OpaqueDigestItemId::Seal(&BABE_ENGINE_ID)) + } + + fn as_next_epoch_descriptor(&self) -> Option { + self.try_to(OpaqueDigestItemId::Consensus(&BABE_ENGINE_ID)) + .and_then(|x: super::ConsensusLog| match x { + super::ConsensusLog::NextEpochData(n) => Some(n), + _ => None, + }) + } } diff --git a/primitives/consensus/babe/src/inherents.rs b/primitives/consensus/babe/src/inherents.rs index 7c0744ac6e..4a460ec6f7 100644 --- a/primitives/consensus/babe/src/inherents.rs +++ b/primitives/consensus/babe/src/inherents.rs @@ -32,59 +32,59 @@ pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"babeslot"; pub type InherentType = u64; /// Auxiliary trait to extract BABE inherent data. pub trait BabeInherentData { - /// Get BABE inherent data. - fn babe_inherent_data(&self) -> Result; - /// Replace BABE inherent data. - fn babe_replace_inherent_data(&mut self, new: InherentType); + /// Get BABE inherent data. + fn babe_inherent_data(&self) -> Result; + /// Replace BABE inherent data. + fn babe_replace_inherent_data(&mut self, new: InherentType); } impl BabeInherentData for InherentData { - fn babe_inherent_data(&self) -> Result { - self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "BABE inherent data not found".into())) - } + fn babe_inherent_data(&self) -> Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "BABE inherent data not found".into())) + } - fn babe_replace_inherent_data(&mut self, new: InherentType) { - self.replace_data(INHERENT_IDENTIFIER, &new); - } + fn babe_replace_inherent_data(&mut self, new: InherentType) { + self.replace_data(INHERENT_IDENTIFIER, &new); + } } /// Provides the slot duration inherent data for BABE. #[cfg(feature = "std")] pub struct InherentDataProvider { - slot_duration: u64, + slot_duration: u64, } #[cfg(feature = "std")] impl InherentDataProvider { - /// Constructs `Self` - pub fn new(slot_duration: u64) -> Self { - Self { slot_duration } - } + /// Constructs `Self` + pub fn new(slot_duration: u64) -> Self { + Self { slot_duration } + } } #[cfg(feature = "std")] impl ProvideInherentData for InherentDataProvider { - fn on_register(&self, providers: &InherentDataProviders) -> Result<(), Error> { - if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { - // Add the timestamp inherent data provider, as we require it. - providers.register_provider(sp_timestamp::InherentDataProvider) - } else { - Ok(()) - } - } + fn on_register(&self, providers: &InherentDataProviders) -> Result<(), Error> { + if !providers.has_provider(&sp_timestamp::INHERENT_IDENTIFIER) { + // Add the timestamp inherent data provider, as we require it. + providers.register_provider(sp_timestamp::InherentDataProvider) + } else { + Ok(()) + } + } - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER - } + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { - let timestamp = inherent_data.timestamp_inherent_data()?; - let slot_number = timestamp / self.slot_duration; - inherent_data.put_data(INHERENT_IDENTIFIER, &slot_number) - } + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + let timestamp = inherent_data.timestamp_inherent_data()?; + let slot_number = timestamp / self.slot_duration; + inherent_data.put_data(INHERENT_IDENTIFIER, &slot_number) + } - fn error_to_string(&self, error: &[u8]) -> Option { - Error::decode(&mut &error[..]).map(|e| e.into_string()).ok() - } + fn error_to_string(&self, error: &[u8]) -> Option { + Error::decode(&mut &error[..]).map(|e| e.into_string()).ok() + } } diff --git a/primitives/consensus/babe/src/lib.rs b/primitives/consensus/babe/src/lib.rs index 33701860d1..076733ff42 100644 --- a/primitives/consensus/babe/src/lib.rs +++ b/primitives/consensus/babe/src/lib.rs @@ -23,17 +23,17 @@ pub mod digests; pub mod inherents; pub use sp_consensus_vrf::schnorrkel::{ - Randomness, VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH, RANDOMNESS_LENGTH + Randomness, RANDOMNESS_LENGTH, VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH, }; -use codec::{Encode, Decode}; -use sp_std::vec::Vec; -use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use crate::digests::NextEpochDescriptor; +use codec::{Decode, Encode}; +use sp_runtime::{ConsensusEngineId, RuntimeDebug}; +use sp_std::vec::Vec; mod app { - use sp_application_crypto::{app_crypto, key_types::BABE, sr25519}; - app_crypto!(sr25519, BABE); + use sp_application_crypto::{app_crypto, key_types::BABE, sr25519}; + app_crypto!(sr25519, BABE); } /// The prefix used by BABE for its VRF keys. @@ -79,66 +79,66 @@ pub type BabeBlockWeight = u32; /// An consensus log item for BABE. #[derive(Decode, Encode, Clone, PartialEq, Eq)] pub enum ConsensusLog { - /// The epoch has changed. This provides information about the _next_ - /// epoch - information about the _current_ epoch (i.e. the one we've just - /// entered) should already be available earlier in the chain. - #[codec(index = "1")] - NextEpochData(NextEpochDescriptor), - /// Disable the authority with given index. - #[codec(index = "2")] - OnDisabled(AuthorityIndex), + /// The epoch has changed. This provides information about the _next_ + /// epoch - information about the _current_ epoch (i.e. the one we've just + /// entered) should already be available earlier in the chain. + #[codec(index = "1")] + NextEpochData(NextEpochDescriptor), + /// Disable the authority with given index. + #[codec(index = "2")] + OnDisabled(AuthorityIndex), } /// Configuration data used by the BABE consensus engine. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct BabeConfiguration { - /// The slot duration in milliseconds for BABE. Currently, only - /// the value provided by this type at genesis will be used. - /// - /// Dynamic slot duration may be supported in the future. - pub slot_duration: u64, - - /// The duration of epochs in slots. - pub epoch_length: SlotNumber, - - /// A constant value that is used in the threshold calculation formula. - /// Expressed as a rational where the first member of the tuple is the - /// numerator and the second is the denominator. The rational should - /// represent a value between 0 and 1. - /// In the threshold formula calculation, `1 - c` represents the probability - /// of a slot being empty. - pub c: (u64, u64), - - /// The authorities for the genesis epoch. - pub genesis_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, - - /// The randomness for the genesis epoch. - pub randomness: Randomness, - - /// Whether this chain should run with secondary slots, which are assigned - /// in round-robin manner. - pub secondary_slots: bool, + /// The slot duration in milliseconds for BABE. Currently, only + /// the value provided by this type at genesis will be used. + /// + /// Dynamic slot duration may be supported in the future. + pub slot_duration: u64, + + /// The duration of epochs in slots. + pub epoch_length: SlotNumber, + + /// A constant value that is used in the threshold calculation formula. + /// Expressed as a rational where the first member of the tuple is the + /// numerator and the second is the denominator. The rational should + /// represent a value between 0 and 1. + /// In the threshold formula calculation, `1 - c` represents the probability + /// of a slot being empty. + pub c: (u64, u64), + + /// The authorities for the genesis epoch. + pub genesis_authorities: Vec<(AuthorityId, BabeAuthorityWeight)>, + + /// The randomness for the genesis epoch. + pub randomness: Randomness, + + /// Whether this chain should run with secondary slots, which are assigned + /// in round-robin manner. + pub secondary_slots: bool, } #[cfg(feature = "std")] impl sp_consensus::SlotData for BabeConfiguration { - fn slot_duration(&self) -> u64 { - self.slot_duration - } + fn slot_duration(&self) -> u64 { + self.slot_duration + } - const SLOT_KEY: &'static [u8] = b"babe_configuration"; + const SLOT_KEY: &'static [u8] = b"babe_configuration"; } sp_api::decl_runtime_apis! { - /// API necessary for block authorship with BABE. - pub trait BabeApi { - /// Return the configuration for BABE. Currently, - /// only the value provided by this type at genesis will be used. - /// - /// Dynamic configuration may be supported in the future. - fn configuration() -> BabeConfiguration; - - /// Returns the slot number that started the current epoch. - fn current_epoch_start() -> SlotNumber; - } + /// API necessary for block authorship with BABE. + pub trait BabeApi { + /// Return the configuration for BABE. Currently, + /// only the value provided by this type at genesis will be used. + /// + /// Dynamic configuration may be supported in the future. + fn configuration() -> BabeConfiguration; + + /// Returns the slot number that started the current epoch. + fn current_epoch_start() -> SlotNumber; + } } diff --git a/primitives/consensus/common/src/block_import.rs b/primitives/consensus/common/src/block_import.rs index eb90ac9f1d..d84384c5e2 100644 --- a/primitives/consensus/common/src/block_import.rs +++ b/primitives/consensus/common/src/block_import.rs @@ -16,349 +16,349 @@ //! Block import helpers. -use sp_runtime::traits::{Block as BlockT, DigestItemFor, Header as HeaderT, NumberFor, HashFor}; +use serde::{Deserialize, Serialize}; +use sp_runtime::traits::{Block as BlockT, DigestItemFor, HashFor, Header as HeaderT, NumberFor}; use sp_runtime::Justification; -use serde::{Serialize, Deserialize}; +use std::any::Any; use std::borrow::Cow; use std::collections::HashMap; use std::sync::Arc; -use std::any::Any; +use crate::import_queue::{CacheKeyId, Verifier}; use crate::Error; -use crate::import_queue::{Verifier, CacheKeyId}; /// Block import result. #[derive(Debug, PartialEq, Eq)] pub enum ImportResult { - /// Block imported. - Imported(ImportedAux), - /// Already in the blockchain. - AlreadyInChain, - /// Block or parent is known to be bad. - KnownBad, - /// Block parent is not in the chain. - UnknownParent, - /// Parent state is missing. - MissingState, + /// Block imported. + Imported(ImportedAux), + /// Already in the blockchain. + AlreadyInChain, + /// Block or parent is known to be bad. + KnownBad, + /// Block parent is not in the chain. + UnknownParent, + /// Parent state is missing. + MissingState, } /// Auxiliary data associated with an imported block result. #[derive(Debug, Default, PartialEq, Eq, Serialize, Deserialize)] pub struct ImportedAux { - /// Only the header has been imported. Block body verification was skipped. - pub header_only: bool, - /// Clear all pending justification requests. - pub clear_justification_requests: bool, - /// Request a justification for the given block. - pub needs_justification: bool, - /// Received a bad justification. - pub bad_justification: bool, - /// Request a finality proof for the given block. - pub needs_finality_proof: bool, - /// Whether the block that was imported is the new best block. - pub is_new_best: bool, + /// Only the header has been imported. Block body verification was skipped. + pub header_only: bool, + /// Clear all pending justification requests. + pub clear_justification_requests: bool, + /// Request a justification for the given block. + pub needs_justification: bool, + /// Received a bad justification. + pub bad_justification: bool, + /// Request a finality proof for the given block. + pub needs_finality_proof: bool, + /// Whether the block that was imported is the new best block. + pub is_new_best: bool, } impl ImportResult { - /// Returns default value for `ImportResult::Imported` with - /// `clear_justification_requests`, `needs_justification`, - /// `bad_justification` and `needs_finality_proof` set to false. - pub fn imported(is_new_best: bool) -> ImportResult { - let mut aux = ImportedAux::default(); - aux.is_new_best = is_new_best; - - ImportResult::Imported(aux) - } + /// Returns default value for `ImportResult::Imported` with + /// `clear_justification_requests`, `needs_justification`, + /// `bad_justification` and `needs_finality_proof` set to false. + pub fn imported(is_new_best: bool) -> ImportResult { + let mut aux = ImportedAux::default(); + aux.is_new_best = is_new_best; + + ImportResult::Imported(aux) + } } /// Block data origin. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum BlockOrigin { - /// Genesis block built into the client. - Genesis, - /// Block is part of the initial sync with the network. - NetworkInitialSync, - /// Block was broadcasted on the network. - NetworkBroadcast, - /// Block that was received from the network and validated in the consensus process. - ConsensusBroadcast, - /// Block that was collated by this node. - Own, - /// Block was imported from a file. - File, + /// Genesis block built into the client. + Genesis, + /// Block is part of the initial sync with the network. + NetworkInitialSync, + /// Block was broadcasted on the network. + NetworkBroadcast, + /// Block that was received from the network and validated in the consensus process. + ConsensusBroadcast, + /// Block that was collated by this node. + Own, + /// Block was imported from a file. + File, } /// Fork choice strategy. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub enum ForkChoiceStrategy { - /// Longest chain fork choice. - LongestChain, - /// Custom fork choice rule, where true indicates the new block should be the best block. - Custom(bool), + /// Longest chain fork choice. + LongestChain, + /// Custom fork choice rule, where true indicates the new block should be the best block. + Custom(bool), } /// Data required to check validity of a Block. #[derive(Debug, PartialEq, Eq, Clone)] pub struct BlockCheckParams { - /// Hash of the block that we verify. - pub hash: Block::Hash, - /// Block number of the block that we verify. - pub number: NumberFor, - /// Parent hash of the block that we verify. - pub parent_hash: Block::Hash, - /// Allow importing the block skipping state verification if parent state is missing. - pub allow_missing_state: bool, - /// Re-validate existing block. - pub import_existing: bool, + /// Hash of the block that we verify. + pub hash: Block::Hash, + /// Block number of the block that we verify. + pub number: NumberFor, + /// Parent hash of the block that we verify. + pub parent_hash: Block::Hash, + /// Allow importing the block skipping state verification if parent state is missing. + pub allow_missing_state: bool, + /// Re-validate existing block. + pub import_existing: bool, } /// Data required to import a Block. #[non_exhaustive] pub struct BlockImportParams { - /// Origin of the Block - pub origin: BlockOrigin, - /// The header, without consensus post-digests applied. This should be in the same - /// state as it comes out of the runtime. - /// - /// Consensus engines which alter the header (by adding post-runtime digests) - /// should strip those off in the initial verification process and pass them - /// via the `post_digests` field. During block authorship, they should - /// not be pushed to the header directly. - /// - /// The reason for this distinction is so the header can be directly - /// re-executed in a runtime that checks digest equivalence -- the - /// post-runtime digests are pushed back on after. - pub header: Block::Header, - /// Justification provided for this block from the outside. - pub justification: Option, - /// Digest items that have been added after the runtime for external - /// work, like a consensus signature. - pub post_digests: Vec>, - /// The body of the block. - pub body: Option>, - /// The changes to the storage to create the state for the block. If this is `Some(_)`, - /// the block import will not need to re-execute the block for importing it. - pub storage_changes: Option< - sp_state_machine::StorageChanges, NumberFor> - >, - /// Is this block finalized already? - /// `true` implies instant finality. - pub finalized: bool, - /// Intermediate values that are interpreted by block importers. Each block importer, - /// upon handling a value, removes it from the intermediate list. The final block importer - /// rejects block import if there are still intermediate values that remain unhandled. - pub intermediates: HashMap, Box>, - /// Auxiliary consensus data produced by the block. - /// Contains a list of key-value pairs. If values are `None`, the keys - /// will be deleted. - pub auxiliary: Vec<(Vec, Option>)>, - /// Fork choice strategy of this import. This should only be set by a - /// synchronous import, otherwise it may race against other imports. - /// `None` indicates that the current verifier or importer cannot yet - /// determine the fork choice value, and it expects subsequent importer - /// to modify it. If `None` is passed all the way down to bottom block - /// importer, the import fails with an `IncompletePipeline` error. - pub fork_choice: Option, - /// Allow importing the block skipping state verification if parent state is missing. - pub allow_missing_state: bool, - /// Re-validate existing block. - pub import_existing: bool, - /// Cached full header hash (with post-digests applied). - pub post_hash: Option, + /// Origin of the Block + pub origin: BlockOrigin, + /// The header, without consensus post-digests applied. This should be in the same + /// state as it comes out of the runtime. + /// + /// Consensus engines which alter the header (by adding post-runtime digests) + /// should strip those off in the initial verification process and pass them + /// via the `post_digests` field. During block authorship, they should + /// not be pushed to the header directly. + /// + /// The reason for this distinction is so the header can be directly + /// re-executed in a runtime that checks digest equivalence -- the + /// post-runtime digests are pushed back on after. + pub header: Block::Header, + /// Justification provided for this block from the outside. + pub justification: Option, + /// Digest items that have been added after the runtime for external + /// work, like a consensus signature. + pub post_digests: Vec>, + /// The body of the block. + pub body: Option>, + /// The changes to the storage to create the state for the block. If this is `Some(_)`, + /// the block import will not need to re-execute the block for importing it. + pub storage_changes: + Option, NumberFor>>, + /// Is this block finalized already? + /// `true` implies instant finality. + pub finalized: bool, + /// Intermediate values that are interpreted by block importers. Each block importer, + /// upon handling a value, removes it from the intermediate list. The final block importer + /// rejects block import if there are still intermediate values that remain unhandled. + pub intermediates: HashMap, Box>, + /// Auxiliary consensus data produced by the block. + /// Contains a list of key-value pairs. If values are `None`, the keys + /// will be deleted. + pub auxiliary: Vec<(Vec, Option>)>, + /// Fork choice strategy of this import. This should only be set by a + /// synchronous import, otherwise it may race against other imports. + /// `None` indicates that the current verifier or importer cannot yet + /// determine the fork choice value, and it expects subsequent importer + /// to modify it. If `None` is passed all the way down to bottom block + /// importer, the import fails with an `IncompletePipeline` error. + pub fork_choice: Option, + /// Allow importing the block skipping state verification if parent state is missing. + pub allow_missing_state: bool, + /// Re-validate existing block. + pub import_existing: bool, + /// Cached full header hash (with post-digests applied). + pub post_hash: Option, } impl BlockImportParams { - /// Create a new block import params. - pub fn new( - origin: BlockOrigin, - header: Block::Header, - ) -> Self { - Self { - origin, header, - justification: None, - post_digests: Vec::new(), - body: None, - storage_changes: None, - finalized: false, - intermediates: HashMap::new(), - auxiliary: Vec::new(), - fork_choice: None, - allow_missing_state: false, - import_existing: false, - post_hash: None, - } - } - - /// Get the full header hash (with post-digests applied). - pub fn post_hash(&self) -> Block::Hash { - if let Some(hash) = self.post_hash { - hash - } else { - if self.post_digests.is_empty() { - self.header.hash() - } else { - let mut hdr = self.header.clone(); - for digest_item in &self.post_digests { - hdr.digest_mut().push(digest_item.clone()); - } - - hdr.hash() - } - } - } - - /// Auxiliary function for "converting" the transaction type. - /// - /// Actually this just sets `storage_changes` to `None` and makes rustc think that `Self` now - /// uses a different transaction type. - pub fn convert_transaction(self) -> BlockImportParams { - BlockImportParams { - origin: self.origin, - header: self.header, - justification: self.justification, - post_digests: self.post_digests, - body: self.body, - storage_changes: None, - finalized: self.finalized, - auxiliary: self.auxiliary, - intermediates: self.intermediates, - allow_missing_state: self.allow_missing_state, - fork_choice: self.fork_choice, - import_existing: self.import_existing, - post_hash: self.post_hash, - } - } - - /// Take intermediate by given key, and remove it from the processing list. - pub fn take_intermediate(&mut self, key: &[u8]) -> Result, Error> { - let (k, v) = self.intermediates.remove_entry(key).ok_or(Error::NoIntermediate)?; - - match v.downcast::() { - Ok(v) => Ok(v), - Err(v) => { - self.intermediates.insert(k, v); - Err(Error::InvalidIntermediate) - }, - } - } - - /// Get a reference to a given intermediate. - pub fn intermediate(&self, key: &[u8]) -> Result<&T, Error> { - self.intermediates.get(key) - .ok_or(Error::NoIntermediate)? - .downcast_ref::() - .ok_or(Error::InvalidIntermediate) - } - - /// Get a mutable reference to a given intermediate. - pub fn intermediate_mut(&mut self, key: &[u8]) -> Result<&mut T, Error> { - self.intermediates.get_mut(key) - .ok_or(Error::NoIntermediate)? - .downcast_mut::() - .ok_or(Error::InvalidIntermediate) - } + /// Create a new block import params. + pub fn new(origin: BlockOrigin, header: Block::Header) -> Self { + Self { + origin, + header, + justification: None, + post_digests: Vec::new(), + body: None, + storage_changes: None, + finalized: false, + intermediates: HashMap::new(), + auxiliary: Vec::new(), + fork_choice: None, + allow_missing_state: false, + import_existing: false, + post_hash: None, + } + } + + /// Get the full header hash (with post-digests applied). + pub fn post_hash(&self) -> Block::Hash { + if let Some(hash) = self.post_hash { + hash + } else { + if self.post_digests.is_empty() { + self.header.hash() + } else { + let mut hdr = self.header.clone(); + for digest_item in &self.post_digests { + hdr.digest_mut().push(digest_item.clone()); + } + + hdr.hash() + } + } + } + + /// Auxiliary function for "converting" the transaction type. + /// + /// Actually this just sets `storage_changes` to `None` and makes rustc think that `Self` now + /// uses a different transaction type. + pub fn convert_transaction(self) -> BlockImportParams { + BlockImportParams { + origin: self.origin, + header: self.header, + justification: self.justification, + post_digests: self.post_digests, + body: self.body, + storage_changes: None, + finalized: self.finalized, + auxiliary: self.auxiliary, + intermediates: self.intermediates, + allow_missing_state: self.allow_missing_state, + fork_choice: self.fork_choice, + import_existing: self.import_existing, + post_hash: self.post_hash, + } + } + + /// Take intermediate by given key, and remove it from the processing list. + pub fn take_intermediate(&mut self, key: &[u8]) -> Result, Error> { + let (k, v) = self + .intermediates + .remove_entry(key) + .ok_or(Error::NoIntermediate)?; + + match v.downcast::() { + Ok(v) => Ok(v), + Err(v) => { + self.intermediates.insert(k, v); + Err(Error::InvalidIntermediate) + } + } + } + + /// Get a reference to a given intermediate. + pub fn intermediate(&self, key: &[u8]) -> Result<&T, Error> { + self.intermediates + .get(key) + .ok_or(Error::NoIntermediate)? + .downcast_ref::() + .ok_or(Error::InvalidIntermediate) + } + + /// Get a mutable reference to a given intermediate. + pub fn intermediate_mut(&mut self, key: &[u8]) -> Result<&mut T, Error> { + self.intermediates + .get_mut(key) + .ok_or(Error::NoIntermediate)? + .downcast_mut::() + .ok_or(Error::InvalidIntermediate) + } } /// Block import trait. pub trait BlockImport { - /// The error type. - type Error: std::error::Error + Send + 'static; - /// The transaction type used by the backend. - type Transaction; - - /// Check block preconditions. - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result; - - /// Import a block. - /// - /// Cached data can be accessed through the blockchain cache. - fn import_block( - &mut self, - block: BlockImportParams, - cache: HashMap>, - ) -> Result; + /// The error type. + type Error: std::error::Error + Send + 'static; + /// The transaction type used by the backend. + type Transaction; + + /// Check block preconditions. + fn check_block(&mut self, block: BlockCheckParams) -> Result; + + /// Import a block. + /// + /// Cached data can be accessed through the blockchain cache. + fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result; } -impl BlockImport for crate::import_queue::BoxBlockImport { - type Error = crate::error::Error; - type Transaction = Transaction; - - /// Check block preconditions. - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - (**self).check_block(block) - } - - /// Import a block. - /// - /// Cached data can be accessed through the blockchain cache. - fn import_block( - &mut self, - block: BlockImportParams, - cache: HashMap>, - ) -> Result { - (**self).import_block(block, cache) - } +impl BlockImport + for crate::import_queue::BoxBlockImport +{ + type Error = crate::error::Error; + type Transaction = Transaction; + + /// Check block preconditions. + fn check_block(&mut self, block: BlockCheckParams) -> Result { + (**self).check_block(block) + } + + /// Import a block. + /// + /// Cached data can be accessed through the blockchain cache. + fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result { + (**self).import_block(block, cache) + } } impl BlockImport for Arc - where for<'r> &'r T: BlockImport +where + for<'r> &'r T: BlockImport, { - type Error = E; - type Transaction = Transaction; - - fn check_block( - &mut self, - block: BlockCheckParams, - ) -> Result { - (&**self).check_block(block) - } - - fn import_block( - &mut self, - block: BlockImportParams, - cache: HashMap>, - ) -> Result { - (&**self).import_block(block, cache) - } + type Error = E; + type Transaction = Transaction; + + fn check_block(&mut self, block: BlockCheckParams) -> Result { + (&**self).check_block(block) + } + + fn import_block( + &mut self, + block: BlockImportParams, + cache: HashMap>, + ) -> Result { + (&**self).import_block(block, cache) + } } /// Justification import trait pub trait JustificationImport { - type Error: std::error::Error + Send + 'static; - - /// Called by the import queue when it is started. Returns a list of justifications to request - /// from the network. - fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { Vec::new() } - - /// Import a Block justification and finalize the given block. - fn import_justification( - &mut self, - hash: B::Hash, - number: NumberFor, - justification: Justification, - ) -> Result<(), Self::Error>; + type Error: std::error::Error + Send + 'static; + + /// Called by the import queue when it is started. Returns a list of justifications to request + /// from the network. + fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { + Vec::new() + } + + /// Import a Block justification and finalize the given block. + fn import_justification( + &mut self, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ) -> Result<(), Self::Error>; } /// Finality proof import trait. pub trait FinalityProofImport { - type Error: std::error::Error + Send + 'static; - - /// Called by the import queue when it is started. Returns a list of finality proofs to request - /// from the network. - fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { Vec::new() } - - /// Import a Block justification and finalize the given block. Returns finalized block or error. - fn import_finality_proof( - &mut self, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec, - verifier: &mut dyn Verifier, - ) -> Result<(B::Hash, NumberFor), Self::Error>; + type Error: std::error::Error + Send + 'static; + + /// Called by the import queue when it is started. Returns a list of finality proofs to request + /// from the network. + fn on_start(&mut self) -> Vec<(B::Hash, NumberFor)> { + Vec::new() + } + + /// Import a Block justification and finalize the given block. Returns finalized block or error. + fn import_finality_proof( + &mut self, + hash: B::Hash, + number: NumberFor, + finality_proof: Vec, + verifier: &mut dyn Verifier, + ) -> Result<(B::Hash, NumberFor), Self::Error>; } diff --git a/primitives/consensus/common/src/block_validation.rs b/primitives/consensus/common/src/block_validation.rs index e8054f3ae4..b0235b58e7 100644 --- a/primitives/consensus/common/src/block_validation.rs +++ b/primitives/consensus/common/src/block_validation.rs @@ -22,45 +22,49 @@ use std::{error::Error, sync::Arc}; /// A type which provides access to chain information. pub trait Chain { - /// Retrieve the status of the block denoted by the given [`BlockId`]. - fn block_status(&self, id: &BlockId) -> Result>; + /// Retrieve the status of the block denoted by the given [`BlockId`]. + fn block_status(&self, id: &BlockId) -> Result>; } impl, B: Block> Chain for Arc { - fn block_status(&self, id: &BlockId) -> Result> { - (&**self).block_status(id) - } + fn block_status(&self, id: &BlockId) -> Result> { + (&**self).block_status(id) + } } /// Result of `BlockAnnounceValidator::validate`. #[derive(Debug, PartialEq, Eq)] pub enum Validation { - /// Valid block announcement. - Success, - /// Invalid block announcement. - Failure, + /// Valid block announcement. + Success, + /// Invalid block announcement. + Failure, } /// Type which checks incoming block announcements. pub trait BlockAnnounceValidator { - /// Validate the announced header and its associated data. - fn validate(&mut self, header: &B::Header, data: &[u8]) -> Result>; + /// Validate the announced header and its associated data. + fn validate( + &mut self, + header: &B::Header, + data: &[u8], + ) -> Result>; } /// Default implementation of `BlockAnnounceValidator`. #[derive(Debug)] pub struct DefaultBlockAnnounceValidator { - chain: C + chain: C, } impl DefaultBlockAnnounceValidator { - pub fn new(chain: C) -> Self { - Self { chain } - } + pub fn new(chain: C) -> Self { + Self { chain } + } } impl> BlockAnnounceValidator for DefaultBlockAnnounceValidator { - fn validate(&mut self, _h: &B::Header, _d: &[u8]) -> Result> { - Ok(Validation::Success) - } + fn validate(&mut self, _h: &B::Header, _d: &[u8]) -> Result> { + Ok(Validation::Success) + } } diff --git a/primitives/consensus/common/src/error.rs b/primitives/consensus/common/src/error.rs index d7e396223a..9876aeab05 100644 --- a/primitives/consensus/common/src/error.rs +++ b/primitives/consensus/common/src/error.rs @@ -15,8 +15,8 @@ // along with Substrate. If not, see . //! Error types in Consensus -use sp_version::RuntimeVersion; use sp_core::ed25519::{Public, Signature}; +use sp_version::RuntimeVersion; use std::error; /// Result type alias. @@ -25,68 +25,75 @@ pub type Result = std::result::Result; /// Error type. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Missing state at block with given descriptor. - #[display(fmt="State unavailable at block {}", _0)] - StateUnavailable(String), - /// I/O terminated unexpectedly - #[display(fmt="I/O terminated unexpectedly.")] - IoTerminated, - /// Intermediate missing. - #[display(fmt="Missing intermediate.")] - NoIntermediate, - /// Intermediate is of wrong type. - #[display(fmt="Invalid intermediate.")] - InvalidIntermediate, - /// Unable to schedule wake-up. - #[display(fmt="Timer error: {}", _0)] - FaultyTimer(std::io::Error), - /// Error while working with inherent data. - #[display(fmt="InherentData error: {}", _0)] - InherentData(sp_inherents::Error), - /// Unable to propose a block. - #[display(fmt="Unable to create block proposal.")] - CannotPropose, - /// Error checking signature - #[display(fmt="Message signature {:?} by {:?} is invalid.", _0, _1)] - InvalidSignature(Signature, Public), - /// Invalid authorities set received from the runtime. - #[display(fmt="Current state of blockchain has invalid authorities set")] - InvalidAuthoritiesSet, - /// Account is not an authority. - #[display(fmt="Message sender {:?} is not a valid authority.", _0)] - InvalidAuthority(Public), - /// Authoring interface does not match the runtime. - #[display(fmt="Authoring for current \ - runtime is not supported. Native ({}) cannot author for on-chain ({}).", native, on_chain)] - IncompatibleAuthoringRuntime { native: RuntimeVersion, on_chain: RuntimeVersion }, - /// Authoring interface does not match the runtime. - #[display(fmt="Authoring for current runtime is not supported since it has no version.")] - RuntimeVersionMissing, - /// Authoring interface does not match the runtime. - #[display(fmt="Authoring in current build is not supported since it has no runtime.")] - NativeRuntimeMissing, - /// Justification requirements not met. - #[display(fmt="Invalid justification.")] - InvalidJustification, - /// Some other error. - #[display(fmt="Other error: {}", _0)] - Other(Box), - /// Error from the client while importing - #[display(fmt="Import failed: {}", _0)] - #[from(ignore)] - ClientImport(String), - /// Error from the client while importing - #[display(fmt="Chain lookup failed: {}", _0)] - #[from(ignore)] - ChainLookup(String), + /// Missing state at block with given descriptor. + #[display(fmt = "State unavailable at block {}", _0)] + StateUnavailable(String), + /// I/O terminated unexpectedly + #[display(fmt = "I/O terminated unexpectedly.")] + IoTerminated, + /// Intermediate missing. + #[display(fmt = "Missing intermediate.")] + NoIntermediate, + /// Intermediate is of wrong type. + #[display(fmt = "Invalid intermediate.")] + InvalidIntermediate, + /// Unable to schedule wake-up. + #[display(fmt = "Timer error: {}", _0)] + FaultyTimer(std::io::Error), + /// Error while working with inherent data. + #[display(fmt = "InherentData error: {}", _0)] + InherentData(sp_inherents::Error), + /// Unable to propose a block. + #[display(fmt = "Unable to create block proposal.")] + CannotPropose, + /// Error checking signature + #[display(fmt = "Message signature {:?} by {:?} is invalid.", _0, _1)] + InvalidSignature(Signature, Public), + /// Invalid authorities set received from the runtime. + #[display(fmt = "Current state of blockchain has invalid authorities set")] + InvalidAuthoritiesSet, + /// Account is not an authority. + #[display(fmt = "Message sender {:?} is not a valid authority.", _0)] + InvalidAuthority(Public), + /// Authoring interface does not match the runtime. + #[display( + fmt = "Authoring for current \ + runtime is not supported. Native ({}) cannot author for on-chain ({}).", + native, + on_chain + )] + IncompatibleAuthoringRuntime { + native: RuntimeVersion, + on_chain: RuntimeVersion, + }, + /// Authoring interface does not match the runtime. + #[display(fmt = "Authoring for current runtime is not supported since it has no version.")] + RuntimeVersionMissing, + /// Authoring interface does not match the runtime. + #[display(fmt = "Authoring in current build is not supported since it has no runtime.")] + NativeRuntimeMissing, + /// Justification requirements not met. + #[display(fmt = "Invalid justification.")] + InvalidJustification, + /// Some other error. + #[display(fmt = "Other error: {}", _0)] + Other(Box), + /// Error from the client while importing + #[display(fmt = "Import failed: {}", _0)] + #[from(ignore)] + ClientImport(String), + /// Error from the client while importing + #[display(fmt = "Chain lookup failed: {}", _0)] + #[from(ignore)] + ChainLookup(String), } impl error::Error for Error { - fn source(&self) -> Option<&(dyn error::Error + 'static)> { - match self { - Error::FaultyTimer(ref err) => Some(err), - Error::Other(ref err) => Some(&**err), - _ => None, - } - } + fn source(&self) -> Option<&(dyn error::Error + 'static)> { + match self { + Error::FaultyTimer(ref err) => Some(err), + Error::Other(ref err) => Some(&**err), + _ => None, + } + } } diff --git a/primitives/consensus/common/src/evaluation.rs b/primitives/consensus/common/src/evaluation.rs index 5542042fed..8e08fac61f 100644 --- a/primitives/consensus/common/src/evaluation.rs +++ b/primitives/consensus/common/src/evaluation.rs @@ -19,7 +19,7 @@ use super::MAX_BLOCK_SIZE; use codec::Encode; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, One, CheckedConversion}; +use sp_runtime::traits::{Block as BlockT, CheckedConversion, Header as HeaderT, One}; // This is just a best effort to encode the number. None indicated that it's too big to encode // in a u128. @@ -31,21 +31,33 @@ pub type Result = std::result::Result; /// Error type. #[derive(Debug, derive_more::Display)] pub enum Error { - /// Proposal provided not a block. - #[display(fmt="Proposal provided not a block: decoding error: {}", _0)] - BadProposalFormat(codec::Error), - /// Proposal had wrong parent hash. - #[display(fmt="Proposal had wrong parent hash. Expected {:?}, got {:?}", expected, got)] - WrongParentHash { expected: String, got: String }, - /// Proposal had wrong number. - #[display(fmt="Proposal had wrong number. Expected {:?}, got {:?}", expected, got)] - WrongNumber { expected: BlockNumber, got: BlockNumber }, - /// Proposal exceeded the maximum size. - #[display( - fmt="Proposal exceeded the maximum size of {} by {} bytes.", - "MAX_BLOCK_SIZE", "_0.saturating_sub(MAX_BLOCK_SIZE)" - )] - ProposalTooLarge(usize), + /// Proposal provided not a block. + #[display(fmt = "Proposal provided not a block: decoding error: {}", _0)] + BadProposalFormat(codec::Error), + /// Proposal had wrong parent hash. + #[display( + fmt = "Proposal had wrong parent hash. Expected {:?}, got {:?}", + expected, + got + )] + WrongParentHash { expected: String, got: String }, + /// Proposal had wrong number. + #[display( + fmt = "Proposal had wrong number. Expected {:?}, got {:?}", + expected, + got + )] + WrongNumber { + expected: BlockNumber, + got: BlockNumber, + }, + /// Proposal exceeded the maximum size. + #[display( + fmt = "Proposal exceeded the maximum size of {} by {} bytes.", + "MAX_BLOCK_SIZE", + "_0.saturating_sub(MAX_BLOCK_SIZE)" + )] + ProposalTooLarge(usize), } impl std::error::Error for Error {} @@ -53,32 +65,30 @@ impl std::error::Error for Error {} /// Attempt to evaluate a substrate block as a node block, returning error /// upon any initial validity checks failing. pub fn evaluate_initial( - proposal: &Block, - parent_hash: &::Hash, - parent_number: <::Header as HeaderT>::Number, + proposal: &Block, + parent_hash: &::Hash, + parent_number: <::Header as HeaderT>::Number, ) -> Result<()> { - - let encoded = Encode::encode(proposal); - let proposal = Block::decode(&mut &encoded[..]) - .map_err(|e| Error::BadProposalFormat(e))?; - - if encoded.len() > MAX_BLOCK_SIZE { - return Err(Error::ProposalTooLarge(encoded.len())) - } - - if *parent_hash != *proposal.header().parent_hash() { - return Err(Error::WrongParentHash { - expected: format!("{:?}", *parent_hash), - got: format!("{:?}", proposal.header().parent_hash()) - }); - } - - if parent_number + One::one() != *proposal.header().number() { - return Err(Error::WrongNumber { - expected: parent_number.checked_into::().map(|x| x + 1), - got: (*proposal.header().number()).checked_into::(), - }); - } - - Ok(()) + let encoded = Encode::encode(proposal); + let proposal = Block::decode(&mut &encoded[..]).map_err(|e| Error::BadProposalFormat(e))?; + + if encoded.len() > MAX_BLOCK_SIZE { + return Err(Error::ProposalTooLarge(encoded.len())); + } + + if *parent_hash != *proposal.header().parent_hash() { + return Err(Error::WrongParentHash { + expected: format!("{:?}", *parent_hash), + got: format!("{:?}", proposal.header().parent_hash()), + }); + } + + if parent_number + One::one() != *proposal.header().number() { + return Err(Error::WrongNumber { + expected: parent_number.checked_into::().map(|x| x + 1), + got: (*proposal.header().number()).checked_into::(), + }); + } + + Ok(()) } diff --git a/primitives/consensus/common/src/import_queue.rs b/primitives/consensus/common/src/import_queue.rs index 2da0bcac0c..5c29cfcdd5 100644 --- a/primitives/consensus/common/src/import_queue.rs +++ b/primitives/consensus/common/src/import_queue.rs @@ -25,13 +25,16 @@ //! instantiated. The `BasicQueue` and `BasicVerifier` traits allow serial //! queues to be instantiated simply. -use std::collections::HashMap; -use sp_runtime::{Justification, traits::{Block as BlockT, Header as _, NumberFor}}; -use crate::error::Error as ConsensusError; use crate::block_import::{ - BlockImport, BlockOrigin, BlockImportParams, ImportedAux, JustificationImport, ImportResult, - BlockCheckParams, FinalityProofImport, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, FinalityProofImport, + ImportResult, ImportedAux, JustificationImport, }; +use crate::error::Error as ConsensusError; +use sp_runtime::{ + traits::{Block as BlockT, Header as _, NumberFor}, + Justification, +}; +use std::collections::HashMap; pub use basic_queue::BasicQueue; @@ -39,17 +42,16 @@ mod basic_queue; pub mod buffered_link; /// Shared block import struct used by the queue. -pub type BoxBlockImport = Box< - dyn BlockImport + Send + Sync ->; +pub type BoxBlockImport = + Box + Send + Sync>; /// Shared justification import struct used by the queue. -pub type BoxJustificationImport = Box + Send + Sync>; +pub type BoxJustificationImport = + Box + Send + Sync>; /// Shared finality proof import struct used by the queue. -pub type BoxFinalityProofImport = Box< - dyn FinalityProofImport + Send + Sync ->; +pub type BoxFinalityProofImport = + Box + Send + Sync>; /// Maps to the Origin used by the network. pub type Origin = libp2p::PeerId; @@ -57,20 +59,20 @@ pub type Origin = libp2p::PeerId; /// Block data used by the queue. #[derive(Debug, PartialEq, Eq, Clone)] pub struct IncomingBlock { - /// Block header hash. - pub hash: ::Hash, - /// Block header if requested. - pub header: Option<::Header>, - /// Block body if requested. - pub body: Option::Extrinsic>>, - /// Justification if requested. - pub justification: Option, - /// The peer, we received this from - pub origin: Option, - /// Allow importing the block skipping state verification if parent state is missing. - pub allow_missing_state: bool, - /// Re-validate existing block. - pub import_existing: bool, + /// Block header hash. + pub hash: ::Hash, + /// Block header if requested. + pub header: Option<::Header>, + /// Block body if requested. + pub body: Option::Extrinsic>>, + /// Justification if requested. + pub justification: Option, + /// The peer, we received this from + pub origin: Option, + /// Allow importing the block skipping state verification if parent state is missing. + pub allow_missing_state: bool, + /// Re-validate existing block. + pub import_existing: bool, } /// Type of keys in the blockchain cache that consensus module could use for its needs. @@ -78,16 +80,16 @@ pub type CacheKeyId = [u8; 4]; /// Verify a justification of a block pub trait Verifier: Send + Sync { - /// Verify the given data and return the BlockImportParams and an optional - /// new set of validators to import. If not, err with an Error-Message - /// presented to the User in the logs. - fn verify( - &mut self, - origin: BlockOrigin, - header: B::Header, - justification: Option, - body: Option>, - ) -> Result<(BlockImportParams, Option)>>), String>; + /// Verify the given data and return the BlockImportParams and an optional + /// new set of validators to import. If not, err with an Error-Message + /// presented to the User in the logs. + fn verify( + &mut self, + origin: BlockOrigin, + header: B::Header, + justification: Option, + body: Option>, + ) -> Result<(BlockImportParams, Option)>>), String>; } /// Blocks import queue API. @@ -95,154 +97,168 @@ pub trait Verifier: Send + Sync { /// The `import_*` methods can be called in order to send elements for the import queue to verify. /// Afterwards, call `poll_actions` to determine how to respond to these elements. pub trait ImportQueue: Send { - /// Import bunch of blocks. - fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); - /// Import a block justification. - fn import_justification( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - justification: Justification - ); - /// Import block finality proof. - fn import_finality_proof( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec - ); + /// Import bunch of blocks. + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>); + /// Import a block justification. + fn import_justification( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ); + /// Import block finality proof. + fn import_finality_proof( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + finality_proof: Vec, + ); - /// Polls for actions to perform on the network. - /// - /// This method should behave in a way similar to `Future::poll`. It can register the current - /// task and notify later when more actions are ready to be polled. To continue the comparison, - /// it is as if this method always returned `Poll::Pending`. - fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); + /// Polls for actions to perform on the network. + /// + /// This method should behave in a way similar to `Future::poll`. It can register the current + /// task and notify later when more actions are ready to be polled. To continue the comparison, + /// it is as if this method always returned `Poll::Pending`. + fn poll_actions(&mut self, cx: &mut futures::task::Context, link: &mut dyn Link); } /// Hooks that the verification queue can use to influence the synchronization /// algorithm. pub trait Link: Send { - /// Batch of blocks imported, with or without error. - fn blocks_processed( - &mut self, - _imported: usize, - _count: usize, - _results: Vec<(Result>, BlockImportError>, B::Hash)> - ) {} - /// Justification import result. - fn justification_imported(&mut self, _who: Origin, _hash: &B::Hash, _number: NumberFor, _success: bool) {} - /// Request a justification for the given block. - fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} - /// Finality proof import result. - /// - /// Even though we have asked for finality proof of block A, provider could return proof of - /// some earlier block B, if the proof for A was too large. The sync module should continue - /// asking for proof of A in this case. - fn finality_proof_imported( - &mut self, - _who: Origin, - _request_block: (B::Hash, NumberFor), - _finalization_result: Result<(B::Hash, NumberFor), ()>, - ) {} - /// Request a finality proof for the given block. - fn request_finality_proof(&mut self, _hash: &B::Hash, _number: NumberFor) {} + /// Batch of blocks imported, with or without error. + fn blocks_processed( + &mut self, + _imported: usize, + _count: usize, + _results: Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + ) { + } + /// Justification import result. + fn justification_imported( + &mut self, + _who: Origin, + _hash: &B::Hash, + _number: NumberFor, + _success: bool, + ) { + } + /// Request a justification for the given block. + fn request_justification(&mut self, _hash: &B::Hash, _number: NumberFor) {} + /// Finality proof import result. + /// + /// Even though we have asked for finality proof of block A, provider could return proof of + /// some earlier block B, if the proof for A was too large. The sync module should continue + /// asking for proof of A in this case. + fn finality_proof_imported( + &mut self, + _who: Origin, + _request_block: (B::Hash, NumberFor), + _finalization_result: Result<(B::Hash, NumberFor), ()>, + ) { + } + /// Request a finality proof for the given block. + fn request_finality_proof(&mut self, _hash: &B::Hash, _number: NumberFor) {} } /// Block import successful result. #[derive(Debug, PartialEq)] pub enum BlockImportResult { - /// Imported known block. - ImportedKnown(N), - /// Imported unknown block. - ImportedUnknown(N, ImportedAux, Option), + /// Imported known block. + ImportedKnown(N), + /// Imported unknown block. + ImportedUnknown(N, ImportedAux, Option), } /// Block import error. #[derive(Debug)] pub enum BlockImportError { - /// Block missed header, can't be imported - IncompleteHeader(Option), - /// Block verification failed, can't be imported - VerificationFailed(Option, String), - /// Block is known to be Bad - BadBlock(Option), - /// Parent state is missing. - MissingState, - /// Block has an unknown parent - UnknownParent, - /// Block import has been cancelled. This can happen if the parent block fails to be imported. - Cancelled, - /// Other error. - Other(ConsensusError), + /// Block missed header, can't be imported + IncompleteHeader(Option), + /// Block verification failed, can't be imported + VerificationFailed(Option, String), + /// Block is known to be Bad + BadBlock(Option), + /// Parent state is missing. + MissingState, + /// Block has an unknown parent + UnknownParent, + /// Block import has been cancelled. This can happen if the parent block fails to be imported. + Cancelled, + /// Other error. + Other(ConsensusError), } /// Single block import function. pub fn import_single_block, Transaction>( - import_handle: &mut dyn BlockImport, - block_origin: BlockOrigin, - block: IncomingBlock, - verifier: &mut V, + import_handle: &mut dyn BlockImport, + block_origin: BlockOrigin, + block: IncomingBlock, + verifier: &mut V, ) -> Result>, BlockImportError> { - let peer = block.origin; + let peer = block.origin; - let (header, justification) = match (block.header, block.justification) { - (Some(header), justification) => (header, justification), - (None, _) => { - if let Some(ref peer) = peer { - debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); - } else { - debug!(target: "sync", "Header {} was not provided ", block.hash); - } - return Err(BlockImportError::IncompleteHeader(peer)) - }, - }; + let (header, justification) = match (block.header, block.justification) { + (Some(header), justification) => (header, justification), + (None, _) => { + if let Some(ref peer) = peer { + debug!(target: "sync", "Header {} was not provided by {} ", block.hash, peer); + } else { + debug!(target: "sync", "Header {} was not provided ", block.hash); + } + return Err(BlockImportError::IncompleteHeader(peer)); + } + }; - trace!(target: "sync", "Header {} has {:?} logs", block.hash, header.digest().logs().len()); + trace!(target: "sync", "Header {} has {:?} logs", block.hash, header.digest().logs().len()); - let number = header.number().clone(); - let hash = header.hash(); - let parent_hash = header.parent_hash().clone(); + let number = header.number().clone(); + let hash = header.hash(); + let parent_hash = header.parent_hash().clone(); - let import_error = |e| { - match e { - Ok(ImportResult::AlreadyInChain) => { - trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); - Ok(BlockImportResult::ImportedKnown(number)) - }, - Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown(number, aux, peer.clone())), - Ok(ImportResult::MissingState) => { - debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); - Err(BlockImportError::MissingState) - }, - Ok(ImportResult::UnknownParent) => { - debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent_hash); - Err(BlockImportError::UnknownParent) - }, - Ok(ImportResult::KnownBad) => { - debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); - Err(BlockImportError::BadBlock(peer.clone())) - }, - Err(e) => { - debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); - Err(BlockImportError::Other(e)) - } - } - }; - match import_error(import_handle.check_block(BlockCheckParams { - hash, - number, - parent_hash, - allow_missing_state: block.allow_missing_state, - import_existing: block.import_existing, - }))? { - BlockImportResult::ImportedUnknown { .. } => (), - r => return Ok(r), // Any other successful result means that the block is already imported. - } + let import_error = |e| match e { + Ok(ImportResult::AlreadyInChain) => { + trace!(target: "sync", "Block already in chain {}: {:?}", number, hash); + Ok(BlockImportResult::ImportedKnown(number)) + } + Ok(ImportResult::Imported(aux)) => Ok(BlockImportResult::ImportedUnknown( + number, + aux, + peer.clone(), + )), + Ok(ImportResult::MissingState) => { + debug!(target: "sync", "Parent state is missing for {}: {:?}, parent: {:?}", number, hash, parent_hash); + Err(BlockImportError::MissingState) + } + Ok(ImportResult::UnknownParent) => { + debug!(target: "sync", "Block with unknown parent {}: {:?}, parent: {:?}", number, hash, parent_hash); + Err(BlockImportError::UnknownParent) + } + Ok(ImportResult::KnownBad) => { + debug!(target: "sync", "Peer gave us a bad block {}: {:?}", number, hash); + Err(BlockImportError::BadBlock(peer.clone())) + } + Err(e) => { + debug!(target: "sync", "Error importing block {}: {:?}: {:?}", number, hash, e); + Err(BlockImportError::Other(e)) + } + }; + match import_error(import_handle.check_block(BlockCheckParams { + hash, + number, + parent_hash, + allow_missing_state: block.allow_missing_state, + import_existing: block.import_existing, + }))? { + BlockImportResult::ImportedUnknown { .. } => (), + r => return Ok(r), // Any other successful result means that the block is already imported. + } - let (mut import_block, maybe_keys) = verifier.verify(block_origin, header, justification, block.body) + let (mut import_block, maybe_keys) = verifier.verify(block_origin, header, justification, block.body) .map_err(|msg| { if let Some(ref peer) = peer { trace!(target: "sync", "Verifying {}({}) from {} failed: {}", number, hash, peer, msg); @@ -252,11 +268,11 @@ pub fn import_single_block, Transaction>( BlockImportError::VerificationFailed(peer.clone(), msg) })?; - let mut cache = HashMap::new(); - if let Some(keys) = maybe_keys { - cache.extend(keys.into_iter()); - } - import_block.allow_missing_state = block.allow_missing_state; + let mut cache = HashMap::new(); + if let Some(keys) = maybe_keys { + cache.extend(keys.into_iter()); + } + import_block.allow_missing_state = block.allow_missing_state; - import_error(import_handle.import_block(import_block.convert_transaction(), cache)) + import_error(import_handle.import_block(import_block.convert_transaction(), cache)) } diff --git a/primitives/consensus/common/src/import_queue/basic_queue.rs b/primitives/consensus/common/src/import_queue/basic_queue.rs index 024e473849..694cf8d413 100644 --- a/primitives/consensus/common/src/import_queue/basic_queue.rs +++ b/primitives/consensus/common/src/import_queue/basic_queue.rs @@ -14,328 +14,358 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use std::{mem, pin::Pin, time::Duration, marker::PhantomData, sync::Arc}; use futures::{prelude::*, task::Context, task::Poll}; use futures_timer::Delay; -use parking_lot::{Mutex, Condvar}; -use sp_runtime::{Justification, traits::{Block as BlockT, Header as HeaderT, NumberFor}}; -use sp_utils::mpsc::{TracingUnboundedSender, tracing_unbounded}; +use parking_lot::{Condvar, Mutex}; +use sp_runtime::{ + traits::{Block as BlockT, Header as HeaderT, NumberFor}, + Justification, +}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedSender}; +use std::{marker::PhantomData, mem, pin::Pin, sync::Arc, time::Duration}; use crate::block_import::BlockOrigin; use crate::import_queue::{ - BlockImportResult, BlockImportError, Verifier, BoxBlockImport, BoxFinalityProofImport, - BoxJustificationImport, ImportQueue, Link, Origin, - IncomingBlock, import_single_block, - buffered_link::{self, BufferedLinkSender, BufferedLinkReceiver} + buffered_link::{self, BufferedLinkReceiver, BufferedLinkSender}, + import_single_block, BlockImportError, BlockImportResult, BoxBlockImport, + BoxFinalityProofImport, BoxJustificationImport, ImportQueue, IncomingBlock, Link, Origin, + Verifier, }; /// Interface to a basic block import queue that is importing blocks sequentially in a separate /// task, with plugable verification. pub struct BasicQueue { - /// Channel to send messages to the background task. - sender: TracingUnboundedSender>, - /// Results coming from the worker task. - result_port: BufferedLinkReceiver, - /// If it isn't possible to spawn the future in `future_to_spawn` (which is notably the case in - /// "no std" environment), we instead put it in `manual_poll`. It is then polled manually from - /// `poll_actions`. - manual_poll: Option + Send>>>, - /// A thread pool where the background worker is being run. - pool: Option, - pool_guard: Arc<(Mutex, Condvar)>, - _phantom: PhantomData, + /// Channel to send messages to the background task. + sender: TracingUnboundedSender>, + /// Results coming from the worker task. + result_port: BufferedLinkReceiver, + /// If it isn't possible to spawn the future in `future_to_spawn` (which is notably the case in + /// "no std" environment), we instead put it in `manual_poll`. It is then polled manually from + /// `poll_actions`. + manual_poll: Option + Send>>>, + /// A thread pool where the background worker is being run. + pool: Option, + pool_guard: Arc<(Mutex, Condvar)>, + _phantom: PhantomData, } impl Drop for BasicQueue { - fn drop(&mut self) { - self.pool = None; - // Flush the queue and close the receiver to terminate the future. - self.sender.close_channel(); - self.result_port.close(); - - // Make sure all pool threads terminate. - // https://github.com/rust-lang/futures-rs/issues/1470 - // https://github.com/rust-lang/futures-rs/issues/1349 - let (ref mutex, ref condvar) = *self.pool_guard; - let mut lock = mutex.lock(); - while *lock != 0 { - condvar.wait(&mut lock); - } - } + fn drop(&mut self) { + self.pool = None; + // Flush the queue and close the receiver to terminate the future. + self.sender.close_channel(); + self.result_port.close(); + + // Make sure all pool threads terminate. + // https://github.com/rust-lang/futures-rs/issues/1470 + // https://github.com/rust-lang/futures-rs/issues/1349 + let (ref mutex, ref condvar) = *self.pool_guard; + let mut lock = mutex.lock(); + while *lock != 0 { + condvar.wait(&mut lock); + } + } } impl BasicQueue { - /// Instantiate a new basic queue, with given verifier. - /// - /// This creates a background task, and calls `on_start` on the justification importer and - /// finality proof importer. - pub fn new>( - verifier: V, - block_import: BoxBlockImport, - justification_import: Option>, - finality_proof_import: Option>, - ) -> Self { - let (result_sender, result_port) = buffered_link::buffered_link(); - let (future, worker_sender) = BlockImportWorker::new( - result_sender, - verifier, - block_import, - justification_import, - finality_proof_import, - ); - - let guard = Arc::new((Mutex::new(0usize), Condvar::new())); - let guard_start = guard.clone(); - let guard_end = guard.clone(); - - let mut pool = futures::executor::ThreadPool::builder() - .name_prefix("import-queue-worker-") - .pool_size(1) - .after_start(move |_| *guard_start.0.lock() += 1) - .before_stop(move |_| { - let (ref mutex, ref condvar) = *guard_end; - let mut lock = mutex.lock(); - *lock -= 1; - if *lock == 0 { - condvar.notify_one(); - } - }) - .create() - .ok(); - - let manual_poll; - if let Some(pool) = &mut pool { - pool.spawn_ok(futures_diagnose::diagnose("import-queue", future)); - manual_poll = None; - } else { - manual_poll = Some(Box::pin(future) as Pin>); - } - - Self { - sender: worker_sender, - result_port, - manual_poll, - pool, - pool_guard: guard, - _phantom: PhantomData, - } - } + /// Instantiate a new basic queue, with given verifier. + /// + /// This creates a background task, and calls `on_start` on the justification importer and + /// finality proof importer. + pub fn new>( + verifier: V, + block_import: BoxBlockImport, + justification_import: Option>, + finality_proof_import: Option>, + ) -> Self { + let (result_sender, result_port) = buffered_link::buffered_link(); + let (future, worker_sender) = BlockImportWorker::new( + result_sender, + verifier, + block_import, + justification_import, + finality_proof_import, + ); + + let guard = Arc::new((Mutex::new(0usize), Condvar::new())); + let guard_start = guard.clone(); + let guard_end = guard.clone(); + + let mut pool = futures::executor::ThreadPool::builder() + .name_prefix("import-queue-worker-") + .pool_size(1) + .after_start(move |_| *guard_start.0.lock() += 1) + .before_stop(move |_| { + let (ref mutex, ref condvar) = *guard_end; + let mut lock = mutex.lock(); + *lock -= 1; + if *lock == 0 { + condvar.notify_one(); + } + }) + .create() + .ok(); + + let manual_poll; + if let Some(pool) = &mut pool { + pool.spawn_ok(futures_diagnose::diagnose("import-queue", future)); + manual_poll = None; + } else { + manual_poll = Some(Box::pin(future) as Pin>); + } + + Self { + sender: worker_sender, + result_port, + manual_poll, + pool, + pool_guard: guard, + _phantom: PhantomData, + } + } } impl ImportQueue for BasicQueue { - fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { - if blocks.is_empty() { - return; - } - - trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); - let _ = self.sender.unbounded_send(ToWorkerMsg::ImportBlocks(origin, blocks)); - } - - fn import_justification( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - justification: Justification - ) { - let _ = self.sender - .unbounded_send( - ToWorkerMsg::ImportJustification(who.clone(), hash, number, justification) - ); - } - - fn import_finality_proof( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec, - ) { - trace!(target: "sync", "Scheduling finality proof of {}/{} for import", number, hash); - let _ = self.sender - .unbounded_send( - ToWorkerMsg::ImportFinalityProof(who, hash, number, finality_proof) - ); - } - - fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { - // As a backup mechanism, if we failed to spawn the `future_to_spawn`, we instead poll - // manually here. - if let Some(manual_poll) = self.manual_poll.as_mut() { - match Future::poll(Pin::new(manual_poll), cx) { - Poll::Pending => {} - _ => self.manual_poll = None, - } - } - - self.result_port.poll_actions(cx, link); - } + fn import_blocks(&mut self, origin: BlockOrigin, blocks: Vec>) { + if blocks.is_empty() { + return; + } + + trace!(target: "sync", "Scheduling {} blocks for import", blocks.len()); + let _ = self + .sender + .unbounded_send(ToWorkerMsg::ImportBlocks(origin, blocks)); + } + + fn import_justification( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ) { + let _ = self.sender.unbounded_send(ToWorkerMsg::ImportJustification( + who.clone(), + hash, + number, + justification, + )); + } + + fn import_finality_proof( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + finality_proof: Vec, + ) { + trace!(target: "sync", "Scheduling finality proof of {}/{} for import", number, hash); + let _ = self.sender.unbounded_send(ToWorkerMsg::ImportFinalityProof( + who, + hash, + number, + finality_proof, + )); + } + + fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { + // As a backup mechanism, if we failed to spawn the `future_to_spawn`, we instead poll + // manually here. + if let Some(manual_poll) = self.manual_poll.as_mut() { + match Future::poll(Pin::new(manual_poll), cx) { + Poll::Pending => {} + _ => self.manual_poll = None, + } + } + + self.result_port.poll_actions(cx, link); + } } /// Message destinated to the background worker. #[derive(Debug)] enum ToWorkerMsg { - ImportBlocks(BlockOrigin, Vec>), - ImportJustification(Origin, B::Hash, NumberFor, Justification), - ImportFinalityProof(Origin, B::Hash, NumberFor, Vec), + ImportBlocks(BlockOrigin, Vec>), + ImportJustification(Origin, B::Hash, NumberFor, Justification), + ImportFinalityProof(Origin, B::Hash, NumberFor, Vec), } struct BlockImportWorker { - result_sender: BufferedLinkSender, - justification_import: Option>, - finality_proof_import: Option>, - delay_between_blocks: Duration, - _phantom: PhantomData, + result_sender: BufferedLinkSender, + justification_import: Option>, + finality_proof_import: Option>, + delay_between_blocks: Duration, + _phantom: PhantomData, } impl BlockImportWorker { - fn new>( - result_sender: BufferedLinkSender, - verifier: V, - block_import: BoxBlockImport, - justification_import: Option>, - finality_proof_import: Option>, - ) -> (impl Future + Send, TracingUnboundedSender>) { - let (sender, mut port) = tracing_unbounded("mpsc_block_import_worker"); - - let mut worker = BlockImportWorker { - result_sender, - justification_import, - finality_proof_import, - delay_between_blocks: Duration::new(0, 0), - _phantom: PhantomData, - }; - - // Let's initialize `justification_import` and `finality_proof_import`. - if let Some(justification_import) = worker.justification_import.as_mut() { - for (hash, number) in justification_import.on_start() { - worker.result_sender.request_justification(&hash, number); - } - } - if let Some(finality_proof_import) = worker.finality_proof_import.as_mut() { - for (hash, number) in finality_proof_import.on_start() { - worker.result_sender.request_finality_proof(&hash, number); - } - } - - // The future below has two possible states: - // - // - Currently importing many blocks, in which case `importing` is `Some` and contains a - // `Future`, and `block_import` is `None`. - // - Something else, in which case `block_import` is `Some` and `importing` is None. - // - let mut block_import_verifier = Some((block_import, verifier)); - let mut importing = None; - - let future = futures::future::poll_fn(move |cx| { - loop { - // If the results sender is closed, that means that the import queue is shutting - // down and we should end this future. - if worker.result_sender.is_closed() { - return Poll::Ready(()) - } - - // If we are in the process of importing a bunch of block, let's resume this - // process before doing anything more. - if let Some(imp_fut) = importing.as_mut() { - match Future::poll(Pin::new(imp_fut), cx) { - Poll::Pending => return Poll::Pending, - Poll::Ready((bi, verif)) => { - block_import_verifier = Some((bi, verif)); - importing = None; - }, - } - } - - debug_assert!(importing.is_none()); - debug_assert!(block_import_verifier.is_some()); - - // Grab the next action request sent to the import queue. - let msg = match Stream::poll_next(Pin::new(&mut port), cx) { - Poll::Ready(Some(msg)) => msg, - Poll::Ready(None) => return Poll::Ready(()), - Poll::Pending => return Poll::Pending, - }; - - match msg { - ToWorkerMsg::ImportBlocks(origin, blocks) => { - // On blocks import request, we merely *start* the process and store - // a `Future` into `importing`. - let (bi, verif) = block_import_verifier.take() - .expect("block_import_verifier is always Some; qed"); - importing = Some(worker.import_a_batch_of_blocks(bi, verif, origin, blocks)); - }, - ToWorkerMsg::ImportFinalityProof(who, hash, number, proof) => { - let (_, verif) = block_import_verifier.as_mut() - .expect("block_import_verifier is always Some; qed"); - worker.import_finality_proof(verif, who, hash, number, proof); - }, - ToWorkerMsg::ImportJustification(who, hash, number, justification) => { - worker.import_justification(who, hash, number, justification); - } - } - } - }); - - (future, sender) - } - - /// Returns a `Future` that imports the given blocks and sends the results on - /// `self.result_sender`. - /// - /// For lifetime reasons, the `BlockImport` implementation must be passed by value, and is - /// yielded back in the output once the import is finished. - fn import_a_batch_of_blocks>( - &mut self, - block_import: BoxBlockImport, - verifier: V, - origin: BlockOrigin, - blocks: Vec> - ) -> impl Future, V)> { - let mut result_sender = self.result_sender.clone(); - - import_many_blocks(block_import, origin, blocks, verifier, self.delay_between_blocks) - .then(move |(imported, count, results, block_import, verifier)| { - result_sender.blocks_processed(imported, count, results); - future::ready((block_import, verifier)) - }) - } - - fn import_finality_proof>( - &mut self, - verifier: &mut V, - who: Origin, - hash: B::Hash, - number: NumberFor, - finality_proof: Vec - ) { - let result = self.finality_proof_import.as_mut().map(|finality_proof_import| { - finality_proof_import.import_finality_proof(hash, number, finality_proof, verifier) - .map_err(|e| { - debug!( + fn new>( + result_sender: BufferedLinkSender, + verifier: V, + block_import: BoxBlockImport, + justification_import: Option>, + finality_proof_import: Option>, + ) -> ( + impl Future + Send, + TracingUnboundedSender>, + ) { + let (sender, mut port) = tracing_unbounded("mpsc_block_import_worker"); + + let mut worker = BlockImportWorker { + result_sender, + justification_import, + finality_proof_import, + delay_between_blocks: Duration::new(0, 0), + _phantom: PhantomData, + }; + + // Let's initialize `justification_import` and `finality_proof_import`. + if let Some(justification_import) = worker.justification_import.as_mut() { + for (hash, number) in justification_import.on_start() { + worker.result_sender.request_justification(&hash, number); + } + } + if let Some(finality_proof_import) = worker.finality_proof_import.as_mut() { + for (hash, number) in finality_proof_import.on_start() { + worker.result_sender.request_finality_proof(&hash, number); + } + } + + // The future below has two possible states: + // + // - Currently importing many blocks, in which case `importing` is `Some` and contains a + // `Future`, and `block_import` is `None`. + // - Something else, in which case `block_import` is `Some` and `importing` is None. + // + let mut block_import_verifier = Some((block_import, verifier)); + let mut importing = None; + + let future = futures::future::poll_fn(move |cx| { + loop { + // If the results sender is closed, that means that the import queue is shutting + // down and we should end this future. + if worker.result_sender.is_closed() { + return Poll::Ready(()); + } + + // If we are in the process of importing a bunch of block, let's resume this + // process before doing anything more. + if let Some(imp_fut) = importing.as_mut() { + match Future::poll(Pin::new(imp_fut), cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready((bi, verif)) => { + block_import_verifier = Some((bi, verif)); + importing = None; + } + } + } + + debug_assert!(importing.is_none()); + debug_assert!(block_import_verifier.is_some()); + + // Grab the next action request sent to the import queue. + let msg = match Stream::poll_next(Pin::new(&mut port), cx) { + Poll::Ready(Some(msg)) => msg, + Poll::Ready(None) => return Poll::Ready(()), + Poll::Pending => return Poll::Pending, + }; + + match msg { + ToWorkerMsg::ImportBlocks(origin, blocks) => { + // On blocks import request, we merely *start* the process and store + // a `Future` into `importing`. + let (bi, verif) = block_import_verifier + .take() + .expect("block_import_verifier is always Some; qed"); + importing = + Some(worker.import_a_batch_of_blocks(bi, verif, origin, blocks)); + } + ToWorkerMsg::ImportFinalityProof(who, hash, number, proof) => { + let (_, verif) = block_import_verifier + .as_mut() + .expect("block_import_verifier is always Some; qed"); + worker.import_finality_proof(verif, who, hash, number, proof); + } + ToWorkerMsg::ImportJustification(who, hash, number, justification) => { + worker.import_justification(who, hash, number, justification); + } + } + } + }); + + (future, sender) + } + + /// Returns a `Future` that imports the given blocks and sends the results on + /// `self.result_sender`. + /// + /// For lifetime reasons, the `BlockImport` implementation must be passed by value, and is + /// yielded back in the output once the import is finished. + fn import_a_batch_of_blocks>( + &mut self, + block_import: BoxBlockImport, + verifier: V, + origin: BlockOrigin, + blocks: Vec>, + ) -> impl Future, V)> { + let mut result_sender = self.result_sender.clone(); + + import_many_blocks( + block_import, + origin, + blocks, + verifier, + self.delay_between_blocks, + ) + .then(move |(imported, count, results, block_import, verifier)| { + result_sender.blocks_processed(imported, count, results); + future::ready((block_import, verifier)) + }) + } + + fn import_finality_proof>( + &mut self, + verifier: &mut V, + who: Origin, + hash: B::Hash, + number: NumberFor, + finality_proof: Vec, + ) { + let result = self + .finality_proof_import + .as_mut() + .map(|finality_proof_import| { + finality_proof_import + .import_finality_proof(hash, number, finality_proof, verifier) + .map_err(|e| { + debug!( "Finality proof import failed with {:?} for hash: {:?} number: {:?} coming from node: {:?}", e, hash, number, who, ); - }) - }).unwrap_or(Err(())); - - trace!(target: "sync", "Imported finality proof for {}/{}", number, hash); - self.result_sender.finality_proof_imported(who, (hash, number), result); - } - - fn import_justification( - &mut self, - who: Origin, - hash: B::Hash, - number: NumberFor, - justification: Justification - ) { - let success = self.justification_import.as_mut().map(|justification_import| { - justification_import.import_justification(hash, number, justification) + }) + }) + .unwrap_or(Err(())); + + trace!(target: "sync", "Imported finality proof for {}/{}", number, hash); + self.result_sender + .finality_proof_imported(who, (hash, number), result); + } + + fn import_justification( + &mut self, + who: Origin, + hash: B::Hash, + number: NumberFor, + justification: Justification, + ) { + let success = self + .justification_import + .as_mut() + .map(|justification_import| { + justification_import.import_justification(hash, number, justification) .map_err(|e| { debug!( target: "sync", @@ -347,10 +377,12 @@ impl BlockImportWorker { ); e }).is_ok() - }).unwrap_or(false); + }) + .unwrap_or(false); - self.result_sender.justification_imported(who, &hash, number, success); - } + self.result_sender + .justification_imported(who, &hash, number, success); + } } /// Import several blocks at once, returning import result for each block. @@ -361,105 +393,110 @@ impl BlockImportWorker { /// The returned `Future` yields at every imported block, which makes the execution more /// fine-grained and making it possible to interrupt the process. fn import_many_blocks, Transaction>( - import_handle: BoxBlockImport, - blocks_origin: BlockOrigin, - blocks: Vec>, - verifier: V, - delay_between_blocks: Duration, + import_handle: BoxBlockImport, + blocks_origin: BlockOrigin, + blocks: Vec>, + verifier: V, + delay_between_blocks: Duration, ) -> impl Future< - Output = ( - usize, - usize, - Vec<(Result>, BlockImportError>, B::Hash,)>, - BoxBlockImport, - V - ) -> -{ - let count = blocks.len(); - - let blocks_range = match ( - blocks.first().and_then(|b| b.header.as_ref().map(|h| h.number())), - blocks.last().and_then(|b| b.header.as_ref().map(|h| h.number())), - ) { - (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), - (Some(first), Some(_)) => format!(" ({})", first), - _ => Default::default(), - }; - - trace!(target: "sync", "Starting import of {} blocks {}", count, blocks_range); - - let mut imported = 0; - let mut results = vec![]; - let mut has_error = false; - let mut blocks = blocks.into_iter(); - let mut import_handle = Some(import_handle); - let mut waiting = None; - let mut verifier = Some(verifier); - - // Blocks in the response/drain should be in ascending order. - - future::poll_fn(move |cx| { - // Handle the optional timer that makes us wait before the next import. - if let Some(waiting) = &mut waiting { - match Future::poll(Pin::new(waiting), cx) { - Poll::Ready(_) => {}, - Poll::Pending => return Poll::Pending, - } - } - waiting = None; - - // Is there any block left to import? - let block = match blocks.next() { - Some(b) => b, - None => { - // No block left to import, success! - let import_handle = import_handle.take() - .expect("Future polled again after it has finished"); - let verifier = verifier.take() - .expect("Future polled again after it has finished"); - let results = mem::replace(&mut results, Vec::new()); - return Poll::Ready((imported, count, results, import_handle, verifier)); - }, - }; - - // We extract the content of `import_handle` and `verifier` only when the future ends, - // therefore `import_handle` and `verifier` are always `Some` here. It is illegal to poll - // a `Future` again after it has ended. - let import_handle = import_handle.as_mut() - .expect("Future polled again after it has finished"); - let verifier = verifier.as_mut() - .expect("Future polled again after it has finished"); - - let block_number = block.header.as_ref().map(|h| h.number().clone()); - let block_hash = block.hash; - let import_result = if has_error { - Err(BlockImportError::Cancelled) - } else { - // The actual import. - import_single_block( - &mut **import_handle, - blocks_origin.clone(), - block, - verifier, - ) - }; - - if import_result.is_ok() { - trace!(target: "sync", "Block imported successfully {:?} ({})", block_number, block_hash); - imported += 1; - } else { - has_error = true; - } - - results.push((import_result, block_hash)); - - // Notifies the current task again so that we re-execute this closure again for the next - // block. - if delay_between_blocks != Duration::new(0, 0) { - waiting = Some(Delay::new(delay_between_blocks)); - } - cx.waker().wake_by_ref(); - Poll::Pending - }) + Output = ( + usize, + usize, + Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + BoxBlockImport, + V, + ), +> { + let count = blocks.len(); + + let blocks_range = match ( + blocks + .first() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + blocks + .last() + .and_then(|b| b.header.as_ref().map(|h| h.number())), + ) { + (Some(first), Some(last)) if first != last => format!(" ({}..{})", first, last), + (Some(first), Some(_)) => format!(" ({})", first), + _ => Default::default(), + }; + + trace!(target: "sync", "Starting import of {} blocks {}", count, blocks_range); + + let mut imported = 0; + let mut results = vec![]; + let mut has_error = false; + let mut blocks = blocks.into_iter(); + let mut import_handle = Some(import_handle); + let mut waiting = None; + let mut verifier = Some(verifier); + + // Blocks in the response/drain should be in ascending order. + + future::poll_fn(move |cx| { + // Handle the optional timer that makes us wait before the next import. + if let Some(waiting) = &mut waiting { + match Future::poll(Pin::new(waiting), cx) { + Poll::Ready(_) => {} + Poll::Pending => return Poll::Pending, + } + } + waiting = None; + + // Is there any block left to import? + let block = match blocks.next() { + Some(b) => b, + None => { + // No block left to import, success! + let import_handle = import_handle + .take() + .expect("Future polled again after it has finished"); + let verifier = verifier + .take() + .expect("Future polled again after it has finished"); + let results = mem::replace(&mut results, Vec::new()); + return Poll::Ready((imported, count, results, import_handle, verifier)); + } + }; + + // We extract the content of `import_handle` and `verifier` only when the future ends, + // therefore `import_handle` and `verifier` are always `Some` here. It is illegal to poll + // a `Future` again after it has ended. + let import_handle = import_handle + .as_mut() + .expect("Future polled again after it has finished"); + let verifier = verifier + .as_mut() + .expect("Future polled again after it has finished"); + + let block_number = block.header.as_ref().map(|h| h.number().clone()); + let block_hash = block.hash; + let import_result = if has_error { + Err(BlockImportError::Cancelled) + } else { + // The actual import. + import_single_block(&mut **import_handle, blocks_origin.clone(), block, verifier) + }; + + if import_result.is_ok() { + trace!(target: "sync", "Block imported successfully {:?} ({})", block_number, block_hash); + imported += 1; + } else { + has_error = true; + } + + results.push((import_result, block_hash)); + + // Notifies the current task again so that we re-execute this closure again for the next + // block. + if delay_between_blocks != Duration::new(0, 0) { + waiting = Some(Delay::new(delay_between_blocks)); + } + cx.waker().wake_by_ref(); + Poll::Pending + }) } diff --git a/primitives/consensus/common/src/import_queue/buffered_link.rs b/primitives/consensus/common/src/import_queue/buffered_link.rs index ea77fc97f0..fddfbac3c9 100644 --- a/primitives/consensus/common/src/import_queue/buffered_link.rs +++ b/primitives/consensus/common/src/import_queue/buffered_link.rs @@ -37,143 +37,178 @@ //! ``` //! +use crate::import_queue::{BlockImportError, BlockImportResult, Link, Origin}; use futures::prelude::*; use sp_runtime::traits::{Block as BlockT, NumberFor}; -use sp_utils::mpsc::{TracingUnboundedSender, TracingUnboundedReceiver, tracing_unbounded}; +use sp_utils::mpsc::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; use std::{pin::Pin, task::Context, task::Poll}; -use crate::import_queue::{Origin, Link, BlockImportResult, BlockImportError}; /// Wraps around an unbounded channel from the `futures` crate. The sender implements `Link` and /// can be used to buffer commands, and the receiver can be used to poll said commands and transfer /// them to another link. pub fn buffered_link() -> (BufferedLinkSender, BufferedLinkReceiver) { - let (tx, rx) = tracing_unbounded("mpsc_buffered_link"); - let tx = BufferedLinkSender { tx }; - let rx = BufferedLinkReceiver { rx }; - (tx, rx) + let (tx, rx) = tracing_unbounded("mpsc_buffered_link"); + let tx = BufferedLinkSender { tx }; + let rx = BufferedLinkReceiver { rx }; + (tx, rx) } /// See [`buffered_link`]. pub struct BufferedLinkSender { - tx: TracingUnboundedSender>, + tx: TracingUnboundedSender>, } impl BufferedLinkSender { - /// Returns true if the sender points to nowhere. - /// - /// Once `true` is returned, it is pointless to use the sender anymore. - pub fn is_closed(&self) -> bool { - self.tx.is_closed() - } + /// Returns true if the sender points to nowhere. + /// + /// Once `true` is returned, it is pointless to use the sender anymore. + pub fn is_closed(&self) -> bool { + self.tx.is_closed() + } } impl Clone for BufferedLinkSender { - fn clone(&self) -> Self { - BufferedLinkSender { - tx: self.tx.clone(), - } - } + fn clone(&self) -> Self { + BufferedLinkSender { + tx: self.tx.clone(), + } + } } /// Internal buffered message. enum BlockImportWorkerMsg { - BlocksProcessed(usize, usize, Vec<(Result>, BlockImportError>, B::Hash)>), - JustificationImported(Origin, B::Hash, NumberFor, bool), - RequestJustification(B::Hash, NumberFor), - FinalityProofImported(Origin, (B::Hash, NumberFor), Result<(B::Hash, NumberFor), ()>), - RequestFinalityProof(B::Hash, NumberFor), + BlocksProcessed( + usize, + usize, + Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + ), + JustificationImported(Origin, B::Hash, NumberFor, bool), + RequestJustification(B::Hash, NumberFor), + FinalityProofImported( + Origin, + (B::Hash, NumberFor), + Result<(B::Hash, NumberFor), ()>, + ), + RequestFinalityProof(B::Hash, NumberFor), } impl Link for BufferedLinkSender { - fn blocks_processed( - &mut self, - imported: usize, - count: usize, - results: Vec<(Result>, BlockImportError>, B::Hash)> - ) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::BlocksProcessed(imported, count, results)); - } - - fn justification_imported( - &mut self, - who: Origin, - hash: &B::Hash, - number: NumberFor, - success: bool - ) { - let msg = BlockImportWorkerMsg::JustificationImported(who, hash.clone(), number, success); - let _ = self.tx.unbounded_send(msg); - } - - fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestJustification(hash.clone(), number)); - } - - fn finality_proof_imported( - &mut self, - who: Origin, - request_block: (B::Hash, NumberFor), - finalization_result: Result<(B::Hash, NumberFor), ()>, - ) { - let msg = BlockImportWorkerMsg::FinalityProofImported(who, request_block, finalization_result); - let _ = self.tx.unbounded_send(msg); - } - - fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { - let _ = self.tx.unbounded_send(BlockImportWorkerMsg::RequestFinalityProof(hash.clone(), number)); - } + fn blocks_processed( + &mut self, + imported: usize, + count: usize, + results: Vec<( + Result>, BlockImportError>, + B::Hash, + )>, + ) { + let _ = self + .tx + .unbounded_send(BlockImportWorkerMsg::BlocksProcessed( + imported, count, results, + )); + } + + fn justification_imported( + &mut self, + who: Origin, + hash: &B::Hash, + number: NumberFor, + success: bool, + ) { + let msg = BlockImportWorkerMsg::JustificationImported(who, hash.clone(), number, success); + let _ = self.tx.unbounded_send(msg); + } + + fn request_justification(&mut self, hash: &B::Hash, number: NumberFor) { + let _ = self + .tx + .unbounded_send(BlockImportWorkerMsg::RequestJustification( + hash.clone(), + number, + )); + } + + fn finality_proof_imported( + &mut self, + who: Origin, + request_block: (B::Hash, NumberFor), + finalization_result: Result<(B::Hash, NumberFor), ()>, + ) { + let msg = + BlockImportWorkerMsg::FinalityProofImported(who, request_block, finalization_result); + let _ = self.tx.unbounded_send(msg); + } + + fn request_finality_proof(&mut self, hash: &B::Hash, number: NumberFor) { + let _ = self + .tx + .unbounded_send(BlockImportWorkerMsg::RequestFinalityProof( + hash.clone(), + number, + )); + } } /// See [`buffered_link`]. pub struct BufferedLinkReceiver { - rx: TracingUnboundedReceiver>, + rx: TracingUnboundedReceiver>, } impl BufferedLinkReceiver { - /// Polls for the buffered link actions. Any enqueued action will be propagated to the link - /// passed as parameter. - /// - /// This method should behave in a way similar to `Future::poll`. It can register the current - /// task and notify later when more actions are ready to be polled. To continue the comparison, - /// it is as if this method always returned `Poll::Pending`. - pub fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { - loop { - let msg = if let Poll::Ready(Some(msg)) = Stream::poll_next(Pin::new(&mut self.rx), cx) { - msg - } else { - break - }; - - match msg { - BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => - link.blocks_processed(imported, count, results), - BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => - link.justification_imported(who, &hash, number, success), - BlockImportWorkerMsg::RequestJustification(hash, number) => - link.request_justification(&hash, number), - BlockImportWorkerMsg::FinalityProofImported(who, block, result) => - link.finality_proof_imported(who, block, result), - BlockImportWorkerMsg::RequestFinalityProof(hash, number) => - link.request_finality_proof(&hash, number), - } - } - } - - /// Close the channel. - pub fn close(&mut self) { - self.rx.close() - } + /// Polls for the buffered link actions. Any enqueued action will be propagated to the link + /// passed as parameter. + /// + /// This method should behave in a way similar to `Future::poll`. It can register the current + /// task and notify later when more actions are ready to be polled. To continue the comparison, + /// it is as if this method always returned `Poll::Pending`. + pub fn poll_actions(&mut self, cx: &mut Context, link: &mut dyn Link) { + loop { + let msg = if let Poll::Ready(Some(msg)) = Stream::poll_next(Pin::new(&mut self.rx), cx) + { + msg + } else { + break; + }; + + match msg { + BlockImportWorkerMsg::BlocksProcessed(imported, count, results) => { + link.blocks_processed(imported, count, results) + } + BlockImportWorkerMsg::JustificationImported(who, hash, number, success) => { + link.justification_imported(who, &hash, number, success) + } + BlockImportWorkerMsg::RequestJustification(hash, number) => { + link.request_justification(&hash, number) + } + BlockImportWorkerMsg::FinalityProofImported(who, block, result) => { + link.finality_proof_imported(who, block, result) + } + BlockImportWorkerMsg::RequestFinalityProof(hash, number) => { + link.request_finality_proof(&hash, number) + } + } + } + } + + /// Close the channel. + pub fn close(&mut self) { + self.rx.close() + } } #[cfg(test)] mod tests { - use sp_test_primitives::Block; - - #[test] - fn is_closed() { - let (tx, rx) = super::buffered_link::(); - assert!(!tx.is_closed()); - drop(rx); - assert!(tx.is_closed()); - } + use sp_test_primitives::Block; + + #[test] + fn is_closed() { + let (tx, rx) = super::buffered_link::(); + assert!(!tx.is_closed()); + drop(rx); + assert!(tx.is_closed()); + } } diff --git a/primitives/consensus/common/src/lib.rs b/primitives/consensus/common/src/lib.rs index 09dc031dc9..4476a09129 100644 --- a/primitives/consensus/common/src/lib.rs +++ b/primitives/consensus/common/src/lib.rs @@ -22,36 +22,37 @@ // This provides "unused" building blocks to other crates #![allow(dead_code)] - // our error-chain could potentially blow up otherwise -#![recursion_limit="128"] +#![recursion_limit = "128"] -#[macro_use] extern crate log; +#[macro_use] +extern crate log; use std::sync::Arc; use std::time::Duration; -use sp_runtime::{ - generic::BlockId, traits::{Block as BlockT, DigestFor, NumberFor, HashFor}, -}; use futures::prelude::*; pub use sp_inherents::InherentData; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, DigestFor, HashFor, NumberFor}, +}; +pub mod block_import; pub mod block_validation; -pub mod offline_tracker; pub mod error; -pub mod block_import; -mod select_chain; -pub mod import_queue; pub mod evaluation; +pub mod import_queue; +pub mod offline_tracker; +mod select_chain; // block size limit. const MAX_BLOCK_SIZE: usize = 4 * 1024 * 1024 + 512; pub use self::error::Error; pub use block_import::{ - BlockImport, BlockOrigin, ForkChoiceStrategy, ImportedAux, BlockImportParams, BlockCheckParams, - ImportResult, JustificationImport, FinalityProofImport, + BlockCheckParams, BlockImport, BlockImportParams, BlockOrigin, FinalityProofImport, + ForkChoiceStrategy, ImportResult, ImportedAux, JustificationImport, }; pub use select_chain::SelectChain; pub use sp_state_machine::Backend as StateBackend; @@ -59,41 +60,44 @@ pub use sp_state_machine::Backend as StateBackend; /// Block status. #[derive(Debug, PartialEq, Eq)] pub enum BlockStatus { - /// Added to the import queue. - Queued, - /// Already in the blockchain and the state is available. - InChainWithState, - /// In the blockchain, but the state is not available. - InChainPruned, - /// Block or parent is known to be bad. - KnownBad, - /// Not in the queue or the blockchain. - Unknown, + /// Added to the import queue. + Queued, + /// Already in the blockchain and the state is available. + InChainWithState, + /// In the blockchain, but the state is not available. + InChainPruned, + /// Block or parent is known to be bad. + KnownBad, + /// Not in the queue or the blockchain. + Unknown, } /// Environment producer for a Consensus instance. Creates proposer instance and communication streams. pub trait Environment { - /// The proposer type this creates. - type Proposer: Proposer + Send + 'static; - /// A future that resolves to the proposer. - type CreateProposer: Future> - + Send + Unpin + 'static; - /// Error which can occur upon creation. - type Error: From + std::fmt::Debug + 'static; - - /// Initialize the proposal logic on top of a specific header. Provide - /// the authorities at that header. - fn init(&mut self, parent_header: &B::Header) -> Self::CreateProposer; + /// The proposer type this creates. + type Proposer: Proposer + Send + 'static; + /// A future that resolves to the proposer. + type CreateProposer: Future> + + Send + + Unpin + + 'static; + /// Error which can occur upon creation. + type Error: From + std::fmt::Debug + 'static; + + /// Initialize the proposal logic on top of a specific header. Provide + /// the authorities at that header. + fn init(&mut self, parent_header: &B::Header) -> Self::CreateProposer; } /// A proposal that is created by a [`Proposer`]. pub struct Proposal { - /// The block that was build. - pub block: Block, - /// Optional proof that was recorded while building the block. - pub proof: Option, - /// The storage changes while building this block. - pub storage_changes: sp_state_machine::StorageChanges, NumberFor>, + /// The block that was build. + pub block: Block, + /// Optional proof that was recorded while building the block. + pub proof: Option, + /// The storage changes while building this block. + pub storage_changes: + sp_state_machine::StorageChanges, NumberFor>, } /// Used as parameter to [`Proposer`] to tell the requirement on recording a proof. @@ -103,30 +107,30 @@ pub struct Proposal { /// full storage. #[derive(Copy, Clone, PartialEq)] pub enum RecordProof { - /// `Yes`, record a proof. - Yes, - /// `No`, don't record any proof. - No, + /// `Yes`, record a proof. + Yes, + /// `No`, don't record any proof. + No, } impl RecordProof { - /// Returns if `Self` == `Yes`. - pub fn yes(&self) -> bool { - match self { - Self::Yes => true, - Self::No => false, - } - } + /// Returns if `Self` == `Yes`. + pub fn yes(&self) -> bool { + match self { + Self::Yes => true, + Self::No => false, + } + } } impl From for RecordProof { - fn from(val: bool) -> Self { - if val { - Self::Yes - } else { - Self::No - } - } + fn from(val: bool) -> Self { + if val { + Self::Yes + } else { + Self::No + } + } } /// Logic for a proposer. @@ -136,30 +140,32 @@ impl From for RecordProof { /// /// Proposers are generic over bits of "consensus data" which are engine-specific. pub trait Proposer { - /// Error type which can occur when proposing or evaluating. - type Error: From + std::fmt::Debug + 'static; - /// The transaction type used by the backend. - type Transaction: Default + Send + 'static; - /// Future that resolves to a committed proposal with an optional proof. - type Proposal: Future, Self::Error>> + - Send + Unpin + 'static; - - /// Create a proposal. - /// - /// Gets the `inherent_data` and `inherent_digests` as input for the proposal. Additionally - /// a maximum duration for building this proposal is given. If building the proposal takes - /// longer than this maximum, the proposal will be very likely discarded. - /// - /// # Return - /// - /// Returns a future that resolves to a [`Proposal`] or to [`Self::Error`]. - fn propose( - &mut self, - inherent_data: InherentData, - inherent_digests: DigestFor, - max_duration: Duration, - record_proof: RecordProof, - ) -> Self::Proposal; + /// Error type which can occur when proposing or evaluating. + type Error: From + std::fmt::Debug + 'static; + /// The transaction type used by the backend. + type Transaction: Default + Send + 'static; + /// Future that resolves to a committed proposal with an optional proof. + type Proposal: Future, Self::Error>> + + Send + + Unpin + + 'static; + + /// Create a proposal. + /// + /// Gets the `inherent_data` and `inherent_digests` as input for the proposal. Additionally + /// a maximum duration for building this proposal is given. If building the proposal takes + /// longer than this maximum, the proposal will be very likely discarded. + /// + /// # Return + /// + /// Returns a future that resolves to a [`Proposal`] or to [`Self::Error`]. + fn propose( + &mut self, + inherent_data: InherentData, + inherent_digests: DigestFor, + max_duration: Duration, + record_proof: RecordProof, + ) -> Self::Proposal; } /// An oracle for when major synchronization work is being undertaken. @@ -167,12 +173,12 @@ pub trait Proposer { /// Generally, consensus authoring work isn't undertaken while well behind /// the head of the chain. pub trait SyncOracle { - /// Whether the synchronization service is undergoing major sync. - /// Returns true if so. - fn is_major_syncing(&mut self) -> bool; - /// Whether the synchronization service is offline. - /// Returns true if so. - fn is_offline(&mut self) -> bool; + /// Whether the synchronization service is undergoing major sync. + /// Returns true if so. + fn is_major_syncing(&mut self) -> bool; + /// Whether the synchronization service is offline. + /// Returns true if so. + fn is_offline(&mut self) -> bool; } /// A synchronization oracle for when there is no network. @@ -180,30 +186,38 @@ pub trait SyncOracle { pub struct NoNetwork; impl SyncOracle for NoNetwork { - fn is_major_syncing(&mut self) -> bool { false } - fn is_offline(&mut self) -> bool { false } + fn is_major_syncing(&mut self) -> bool { + false + } + fn is_offline(&mut self) -> bool { + false + } } -impl SyncOracle for Arc where T: ?Sized, for<'r> &'r T: SyncOracle { - fn is_major_syncing(&mut self) -> bool { - <&T>::is_major_syncing(&mut &**self) - } +impl SyncOracle for Arc +where + T: ?Sized, + for<'r> &'r T: SyncOracle, +{ + fn is_major_syncing(&mut self) -> bool { + <&T>::is_major_syncing(&mut &**self) + } - fn is_offline(&mut self) -> bool { - <&T>::is_offline(&mut &**self) - } + fn is_offline(&mut self) -> bool { + <&T>::is_offline(&mut &**self) + } } /// Checks if the current active native block authoring implementation can author with the runtime /// at the given block. pub trait CanAuthorWith { - /// See trait docs for more information. - /// - /// # Return - /// - /// - Returns `Ok(())` when authoring is supported. - /// - Returns `Err(_)` when authoring is not supported. - fn can_author_with(&self, at: &BlockId) -> Result<(), String>; + /// See trait docs for more information. + /// + /// # Return + /// + /// - Returns `Ok(())` when authoring is supported. + /// - Returns `Err(_)` when authoring is not supported. + fn can_author_with(&self, at: &BlockId) -> Result<(), String>; } /// Checks if the node can author blocks by using @@ -211,51 +225,48 @@ pub trait CanAuthorWith { pub struct CanAuthorWithNativeVersion(T); impl CanAuthorWithNativeVersion { - /// Creates a new instance of `Self`. - pub fn new(inner: T) -> Self { - Self(inner) - } + /// Creates a new instance of `Self`. + pub fn new(inner: T) -> Self { + Self(inner) + } } impl, Block: BlockT> CanAuthorWith - for CanAuthorWithNativeVersion + for CanAuthorWithNativeVersion { - fn can_author_with(&self, at: &BlockId) -> Result<(), String> { - match self.0.runtime_version(at) { - Ok(version) => self.0.native_version().can_author_with(&version), - Err(e) => { - Err(format!( - "Failed to get runtime version at `{}` and will disable authoring. Error: {}", - at, - e, - )) - } - } - } + fn can_author_with(&self, at: &BlockId) -> Result<(), String> { + match self.0.runtime_version(at) { + Ok(version) => self.0.native_version().can_author_with(&version), + Err(e) => Err(format!( + "Failed to get runtime version at `{}` and will disable authoring. Error: {}", + at, e, + )), + } + } } /// Returns always `true` for `can_author_with`. This is useful for tests. pub struct AlwaysCanAuthor; impl CanAuthorWith for AlwaysCanAuthor { - fn can_author_with(&self, _: &BlockId) -> Result<(), String> { - Ok(()) - } + fn can_author_with(&self, _: &BlockId) -> Result<(), String> { + Ok(()) + } } /// A type from which a slot duration can be obtained. pub trait SlotData { - /// Gets the slot duration. - fn slot_duration(&self) -> u64; + /// Gets the slot duration. + fn slot_duration(&self) -> u64; - /// The static slot key - const SLOT_KEY: &'static [u8]; + /// The static slot key + const SLOT_KEY: &'static [u8]; } impl SlotData for u64 { - fn slot_duration(&self) -> u64 { - *self - } + fn slot_duration(&self) -> u64 { + *self + } - const SLOT_KEY: &'static [u8] = b"aura_slot_duration"; + const SLOT_KEY: &'static [u8] = b"aura_slot_duration"; } diff --git a/primitives/consensus/common/src/offline_tracker.rs b/primitives/consensus/common/src/offline_tracker.rs index b4959503b1..89a19c8cf7 100644 --- a/primitives/consensus/common/src/offline_tracker.rs +++ b/primitives/consensus/common/src/offline_tracker.rs @@ -17,119 +17,130 @@ //! Tracks offline validators. use std::collections::HashMap; -use std::time::{Instant, Duration}; +use std::time::{Duration, Instant}; // time before we report a validator. const REPORT_TIME: Duration = Duration::from_secs(60 * 5); struct Observed { - last_round_end: Instant, - offline_since: Instant, + last_round_end: Instant, + offline_since: Instant, } impl Observed { - fn new() -> Observed { - let now = Instant::now(); - Observed { - last_round_end: now, - offline_since: now, - } - } - - fn note_round_end(&mut self, was_online: bool) { - let now = Instant::now(); - - self.last_round_end = now; - if was_online { - self.offline_since = now; - } - } - - fn is_active(&self) -> bool { - // can happen if clocks are not monotonic - if self.offline_since > self.last_round_end { return true } - self.last_round_end.duration_since(self.offline_since) < REPORT_TIME - } + fn new() -> Observed { + let now = Instant::now(); + Observed { + last_round_end: now, + offline_since: now, + } + } + + fn note_round_end(&mut self, was_online: bool) { + let now = Instant::now(); + + self.last_round_end = now; + if was_online { + self.offline_since = now; + } + } + + fn is_active(&self) -> bool { + // can happen if clocks are not monotonic + if self.offline_since > self.last_round_end { + return true; + } + self.last_round_end.duration_since(self.offline_since) < REPORT_TIME + } } /// Tracks offline validators and can issue a report for those offline. pub struct OfflineTracker { - observed: HashMap, + observed: HashMap, } impl OfflineTracker { - /// Create a new tracker. - pub fn new() -> Self { - OfflineTracker { observed: HashMap::new() } - } - - /// Note new consensus is starting with the given set of validators. - pub fn note_new_block(&mut self, validators: &[AuthorityId]) { - use std::collections::HashSet; - - let set: HashSet<_> = validators.iter().cloned().collect(); - self.observed.retain(|k, _| set.contains(k)); - } - - /// Note that a round has ended. - pub fn note_round_end(&mut self, validator: AuthorityId, was_online: bool) { - self.observed.entry(validator) - .or_insert_with(Observed::new) - .note_round_end(was_online); - } - - /// Generate a vector of indices for offline account IDs. - pub fn reports(&self, validators: &[AuthorityId]) -> Vec { - validators.iter() - .enumerate() - .filter_map(|(i, v)| if self.is_online(v) { - None - } else { - Some(i as u32) - }) - .collect() - } - - /// Whether reports on a validator set are consistent with our view of things. - pub fn check_consistency(&self, validators: &[AuthorityId], reports: &[u32]) -> bool { - reports.iter().cloned().all(|r| { - let v = match validators.get(r as usize) { - Some(v) => v, - None => return false, - }; - - // we must think all validators reported externally are offline. - let thinks_online = self.is_online(v); - !thinks_online - }) - } - - fn is_online(&self, v: &AuthorityId) -> bool { - self.observed.get(v).map(Observed::is_active).unwrap_or(true) - } + /// Create a new tracker. + pub fn new() -> Self { + OfflineTracker { + observed: HashMap::new(), + } + } + + /// Note new consensus is starting with the given set of validators. + pub fn note_new_block(&mut self, validators: &[AuthorityId]) { + use std::collections::HashSet; + + let set: HashSet<_> = validators.iter().cloned().collect(); + self.observed.retain(|k, _| set.contains(k)); + } + + /// Note that a round has ended. + pub fn note_round_end(&mut self, validator: AuthorityId, was_online: bool) { + self.observed + .entry(validator) + .or_insert_with(Observed::new) + .note_round_end(was_online); + } + + /// Generate a vector of indices for offline account IDs. + pub fn reports(&self, validators: &[AuthorityId]) -> Vec { + validators + .iter() + .enumerate() + .filter_map(|(i, v)| { + if self.is_online(v) { + None + } else { + Some(i as u32) + } + }) + .collect() + } + + /// Whether reports on a validator set are consistent with our view of things. + pub fn check_consistency(&self, validators: &[AuthorityId], reports: &[u32]) -> bool { + reports.iter().cloned().all(|r| { + let v = match validators.get(r as usize) { + Some(v) => v, + None => return false, + }; + + // we must think all validators reported externally are offline. + let thinks_online = self.is_online(v); + !thinks_online + }) + } + + fn is_online(&self, v: &AuthorityId) -> bool { + self.observed + .get(v) + .map(Observed::is_active) + .unwrap_or(true) + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn validator_offline() { - let mut tracker = OfflineTracker::::new(); - let v1 = 1; - let v2 = 2; - let v3 = 3; - tracker.note_round_end(v1, true); - tracker.note_round_end(v2, true); - tracker.note_round_end(v3, true); - - let slash_time = REPORT_TIME + Duration::from_secs(5); - tracker.observed.get_mut(&v1).unwrap().offline_since -= slash_time; - tracker.observed.get_mut(&v2).unwrap().offline_since -= slash_time; - - assert_eq!(tracker.reports(&[v1, v2, v3]), vec![0, 1]); - - tracker.note_new_block(&[v1, v3]); - assert_eq!(tracker.reports(&[v1, v2, v3]), vec![0]); - } + use super::*; + + #[test] + fn validator_offline() { + let mut tracker = OfflineTracker::::new(); + let v1 = 1; + let v2 = 2; + let v3 = 3; + tracker.note_round_end(v1, true); + tracker.note_round_end(v2, true); + tracker.note_round_end(v3, true); + + let slash_time = REPORT_TIME + Duration::from_secs(5); + tracker.observed.get_mut(&v1).unwrap().offline_since -= slash_time; + tracker.observed.get_mut(&v2).unwrap().offline_since -= slash_time; + + assert_eq!(tracker.reports(&[v1, v2, v3]), vec![0, 1]); + + tracker.note_new_block(&[v1, v3]); + assert_eq!(tracker.reports(&[v1, v2, v3]), vec![0]); + } } diff --git a/primitives/consensus/common/src/select_chain.rs b/primitives/consensus/common/src/select_chain.rs index fe0d397204..6c89a26b77 100644 --- a/primitives/consensus/common/src/select_chain.rs +++ b/primitives/consensus/common/src/select_chain.rs @@ -17,7 +17,6 @@ use crate::error::Error; use sp_runtime::traits::{Block as BlockT, NumberFor}; - /// The SelectChain trait defines the strategy upon which the head is chosen /// if multiple forks are present for an opaque definition of "best" in the /// specific chain build. @@ -33,23 +32,22 @@ use sp_runtime::traits::{Block as BlockT, NumberFor}; /// /// Non-deterministically finalizing chains may only use the `_authoring` functions. pub trait SelectChain: Sync + Send + Clone { - - /// Get all leaves of the chain: block hashes that have no children currently. - /// Leaves that can never be finalized will not be returned. - fn leaves(&self) -> Result::Hash>, Error>; - - /// Among those `leaves` deterministically pick one chain as the generally - /// best chain to author new blocks upon and probably finalize. - fn best_chain(&self) -> Result<::Header, Error>; - - /// Get the best descendent of `target_hash` that we should attempt to - /// finalize next, if any. It is valid to return the given `target_hash` - /// itself if no better descendent exists. - fn finality_target( - &self, - target_hash: ::Hash, - _maybe_max_number: Option> - ) -> Result::Hash>, Error> { - Ok(Some(target_hash)) - } + /// Get all leaves of the chain: block hashes that have no children currently. + /// Leaves that can never be finalized will not be returned. + fn leaves(&self) -> Result::Hash>, Error>; + + /// Among those `leaves` deterministically pick one chain as the generally + /// best chain to author new blocks upon and probably finalize. + fn best_chain(&self) -> Result<::Header, Error>; + + /// Get the best descendent of `target_hash` that we should attempt to + /// finalize next, if any. It is valid to return the given `target_hash` + /// itself if no better descendent exists. + fn finality_target( + &self, + target_hash: ::Hash, + _maybe_max_number: Option>, + ) -> Result::Hash>, Error> { + Ok(Some(target_hash)) + } } diff --git a/primitives/consensus/pow/src/lib.rs b/primitives/consensus/pow/src/lib.rs index fa8f75d1be..97a4659dd9 100644 --- a/primitives/consensus/pow/src/lib.rs +++ b/primitives/consensus/pow/src/lib.rs @@ -18,9 +18,9 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::vec::Vec; -use sp_runtime::ConsensusEngineId; use codec::Decode; +use sp_runtime::ConsensusEngineId; +use sp_std::vec::Vec; /// The `ConsensusEngineId` of PoW. pub const POW_ENGINE_ID: ConsensusEngineId = [b'p', b'o', b'w', b'_']; @@ -30,35 +30,35 @@ pub type Seal = Vec; /// Define methods that total difficulty should implement. pub trait TotalDifficulty { - fn increment(&mut self, other: Self); + fn increment(&mut self, other: Self); } impl TotalDifficulty for sp_core::U256 { - fn increment(&mut self, other: Self) { - let ret = self.saturating_add(other); - *self = ret; - } + fn increment(&mut self, other: Self) { + let ret = self.saturating_add(other); + *self = ret; + } } impl TotalDifficulty for u128 { - fn increment(&mut self, other: Self) { - let ret = self.saturating_add(other); - *self = ret; - } + fn increment(&mut self, other: Self) { + let ret = self.saturating_add(other); + *self = ret; + } } sp_api::decl_runtime_apis! { - /// API necessary for timestamp-based difficulty adjustment algorithms. - pub trait TimestampApi { - /// Return the timestamp in the current block. - fn timestamp() -> Moment; - } + /// API necessary for timestamp-based difficulty adjustment algorithms. + pub trait TimestampApi { + /// Return the timestamp in the current block. + fn timestamp() -> Moment; + } - /// API for those chains that put their difficulty adjustment algorithm directly - /// onto runtime. Note that while putting difficulty adjustment algorithm to - /// runtime is safe, putting the PoW algorithm on runtime is not. - pub trait DifficultyApi { - /// Return the target difficulty of the next block. - fn difficulty() -> Difficulty; - } + /// API for those chains that put their difficulty adjustment algorithm directly + /// onto runtime. Note that while putting difficulty adjustment algorithm to + /// runtime is safe, putting the PoW algorithm on runtime is not. + pub trait DifficultyApi { + /// Return the target difficulty of the next block. + fn difficulty() -> Difficulty; + } } diff --git a/primitives/consensus/vrf/src/schnorrkel.rs b/primitives/consensus/vrf/src/schnorrkel.rs index 265572dbda..d573387405 100644 --- a/primitives/consensus/vrf/src/schnorrkel.rs +++ b/primitives/consensus/vrf/src/schnorrkel.rs @@ -16,20 +16,23 @@ //! Schnorrkel-based VRF. -use codec::{Encode, Decode}; -use sp_runtime::RuntimeDebug; -use sp_std::ops::{Deref, DerefMut}; -#[cfg(feature = "std")] -use std::convert::TryFrom; #[cfg(feature = "std")] use codec::EncodeLike; +use codec::{Decode, Encode}; #[cfg(feature = "std")] use schnorrkel::errors::MultiSignatureStage; #[cfg(feature = "std")] use sp_core::U512; +use sp_runtime::RuntimeDebug; +use sp_std::ops::{Deref, DerefMut}; +#[cfg(feature = "std")] +use std::convert::TryFrom; #[cfg(feature = "std")] -pub use schnorrkel::{SignatureError, vrf::{VRF_PROOF_LENGTH, VRF_OUTPUT_LENGTH}}; +pub use schnorrkel::{ + vrf::{VRF_OUTPUT_LENGTH, VRF_PROOF_LENGTH}, + SignatureError, +}; /// The length of the VRF proof. #[cfg(not(feature = "std"))] @@ -47,12 +50,16 @@ pub const RANDOMNESS_LENGTH: usize = VRF_OUTPUT_LENGTH; pub struct RawVRFOutput(pub [u8; VRF_OUTPUT_LENGTH]); impl Deref for RawVRFOutput { - type Target = [u8; VRF_OUTPUT_LENGTH]; - fn deref(&self) -> &Self::Target { &self.0 } + type Target = [u8; VRF_OUTPUT_LENGTH]; + fn deref(&self) -> &Self::Target { + &self.0 + } } impl DerefMut for RawVRFOutput { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } /// VRF output type available for `std` environment, suitable for schnorrkel operations. @@ -62,56 +69,62 @@ pub struct VRFOutput(pub schnorrkel::vrf::VRFOutput); #[cfg(feature = "std")] impl Deref for VRFOutput { - type Target = schnorrkel::vrf::VRFOutput; - fn deref(&self) -> &Self::Target { &self.0 } + type Target = schnorrkel::vrf::VRFOutput; + fn deref(&self) -> &Self::Target { + &self.0 + } } #[cfg(feature = "std")] impl DerefMut for VRFOutput { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } #[cfg(feature = "std")] impl Encode for VRFOutput { - fn encode(&self) -> Vec { - self.0.as_bytes().encode() - } + fn encode(&self) -> Vec { + self.0.as_bytes().encode() + } } #[cfg(feature = "std")] -impl EncodeLike for VRFOutput { } +impl EncodeLike for VRFOutput {} #[cfg(feature = "std")] impl Decode for VRFOutput { - fn decode(i: &mut R) -> Result { - let decoded = <[u8; VRF_OUTPUT_LENGTH]>::decode(i)?; - Ok(Self(schnorrkel::vrf::VRFOutput::from_bytes(&decoded).map_err(convert_error)?)) - } + fn decode(i: &mut R) -> Result { + let decoded = <[u8; VRF_OUTPUT_LENGTH]>::decode(i)?; + Ok(Self( + schnorrkel::vrf::VRFOutput::from_bytes(&decoded).map_err(convert_error)?, + )) + } } #[cfg(feature = "std")] impl TryFrom<[u8; VRF_OUTPUT_LENGTH]> for VRFOutput { - type Error = SignatureError; + type Error = SignatureError; - fn try_from(raw: [u8; VRF_OUTPUT_LENGTH]) -> Result { - schnorrkel::vrf::VRFOutput::from_bytes(&raw).map(VRFOutput) - } + fn try_from(raw: [u8; VRF_OUTPUT_LENGTH]) -> Result { + schnorrkel::vrf::VRFOutput::from_bytes(&raw).map(VRFOutput) + } } #[cfg(feature = "std")] impl TryFrom for VRFOutput { - type Error = SignatureError; + type Error = SignatureError; - fn try_from(raw: RawVRFOutput) -> Result { - schnorrkel::vrf::VRFOutput::from_bytes(&raw.0).map(VRFOutput) - } + fn try_from(raw: RawVRFOutput) -> Result { + schnorrkel::vrf::VRFOutput::from_bytes(&raw.0).map(VRFOutput) + } } #[cfg(feature = "std")] impl From for RawVRFOutput { - fn from(output: VRFOutput) -> RawVRFOutput { - RawVRFOutput(output.to_bytes()) - } + fn from(output: VRFOutput) -> RawVRFOutput { + RawVRFOutput(output.to_bytes()) + } } /// Raw VRF proof. @@ -119,28 +132,32 @@ impl From for RawVRFOutput { pub struct RawVRFProof(pub [u8; VRF_PROOF_LENGTH]); impl Deref for RawVRFProof { - type Target = [u8; VRF_PROOF_LENGTH]; - fn deref(&self) -> &Self::Target { &self.0 } + type Target = [u8; VRF_PROOF_LENGTH]; + fn deref(&self) -> &Self::Target { + &self.0 + } } impl DerefMut for RawVRFProof { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } #[cfg(feature = "std")] impl std::fmt::Debug for RawVRFProof { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{:?}", &self) - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{:?}", &self) + } } impl core::cmp::PartialEq for RawVRFProof { - fn eq(&self, other: &Self) -> bool { - self == other - } + fn eq(&self, other: &Self) -> bool { + self == other + } } -impl core::cmp::Eq for RawVRFProof { } +impl core::cmp::Eq for RawVRFProof {} /// VRF proof type available for `std` environment, suitable for schnorrkel operations. #[cfg(feature = "std")] @@ -149,101 +166,122 @@ pub struct VRFProof(pub schnorrkel::vrf::VRFProof); #[cfg(feature = "std")] impl PartialOrd for VRFProof { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } #[cfg(feature = "std")] impl Ord for VRFProof { - fn cmp(&self, other: &Self) -> core::cmp::Ordering { - U512::from(self.0.to_bytes()).cmp(&U512::from(other.0.to_bytes())) - } + fn cmp(&self, other: &Self) -> core::cmp::Ordering { + U512::from(self.0.to_bytes()).cmp(&U512::from(other.0.to_bytes())) + } } #[cfg(feature = "std")] impl Deref for VRFProof { - type Target = schnorrkel::vrf::VRFProof; - fn deref(&self) -> &Self::Target { &self.0 } + type Target = schnorrkel::vrf::VRFProof; + fn deref(&self) -> &Self::Target { + &self.0 + } } #[cfg(feature = "std")] impl DerefMut for VRFProof { - fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } #[cfg(feature = "std")] impl Encode for VRFProof { - fn encode(&self) -> Vec { - self.0.to_bytes().encode() - } + fn encode(&self) -> Vec { + self.0.to_bytes().encode() + } } #[cfg(feature = "std")] -impl EncodeLike for VRFProof { } +impl EncodeLike for VRFProof {} #[cfg(feature = "std")] impl Decode for VRFProof { - fn decode(i: &mut R) -> Result { - let decoded = <[u8; VRF_PROOF_LENGTH]>::decode(i)?; - Ok(Self(schnorrkel::vrf::VRFProof::from_bytes(&decoded).map_err(convert_error)?)) - } + fn decode(i: &mut R) -> Result { + let decoded = <[u8; VRF_PROOF_LENGTH]>::decode(i)?; + Ok(Self( + schnorrkel::vrf::VRFProof::from_bytes(&decoded).map_err(convert_error)?, + )) + } } #[cfg(feature = "std")] impl TryFrom<[u8; VRF_PROOF_LENGTH]> for VRFProof { - type Error = SignatureError; + type Error = SignatureError; - fn try_from(raw: [u8; VRF_PROOF_LENGTH]) -> Result { - schnorrkel::vrf::VRFProof::from_bytes(&raw).map(VRFProof) - } + fn try_from(raw: [u8; VRF_PROOF_LENGTH]) -> Result { + schnorrkel::vrf::VRFProof::from_bytes(&raw).map(VRFProof) + } } #[cfg(feature = "std")] impl TryFrom for VRFProof { - type Error = SignatureError; + type Error = SignatureError; - fn try_from(raw: RawVRFProof) -> Result { - schnorrkel::vrf::VRFProof::from_bytes(&raw.0).map(VRFProof) - } + fn try_from(raw: RawVRFProof) -> Result { + schnorrkel::vrf::VRFProof::from_bytes(&raw.0).map(VRFProof) + } } #[cfg(feature = "std")] impl From for RawVRFProof { - fn from(output: VRFProof) -> RawVRFProof { - RawVRFProof(output.to_bytes()) - } + fn from(output: VRFProof) -> RawVRFProof { + RawVRFProof(output.to_bytes()) + } } #[cfg(feature = "std")] fn convert_error(e: SignatureError) -> codec::Error { - use SignatureError::*; - use MultiSignatureStage::*; - match e { - EquationFalse => "Signature error: `EquationFalse`".into(), - PointDecompressionError => "Signature error: `PointDecompressionError`".into(), - ScalarFormatError => "Signature error: `ScalarFormatError`".into(), - NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), - BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), - MuSigAbsent { musig_stage: Commitment } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigAbsent { musig_stage: Reveal } => - "Signature error: `MuSigAbsent` at stage `Reveal`".into(), - MuSigAbsent { musig_stage: Cosignature } => - "Signature error: `MuSigAbsent` at stage `Commitment`".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), - MuSigInconsistent { musig_stage: Commitment, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), - MuSigInconsistent { musig_stage: Reveal, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: true } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), - MuSigInconsistent { musig_stage: Cosignature, duplicate: false } => - "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), - } + use MultiSignatureStage::*; + use SignatureError::*; + match e { + EquationFalse => "Signature error: `EquationFalse`".into(), + PointDecompressionError => "Signature error: `PointDecompressionError`".into(), + ScalarFormatError => "Signature error: `ScalarFormatError`".into(), + NotMarkedSchnorrkel => "Signature error: `NotMarkedSchnorrkel`".into(), + BytesLengthError { .. } => "Signature error: `BytesLengthError`".into(), + MuSigAbsent { + musig_stage: Commitment, + } => "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigAbsent { + musig_stage: Reveal, + } => "Signature error: `MuSigAbsent` at stage `Reveal`".into(), + MuSigAbsent { + musig_stage: Cosignature, + } => "Signature error: `MuSigAbsent` at stage `Commitment`".into(), + MuSigInconsistent { + musig_stage: Commitment, + duplicate: true, + } => "Signature error: `MuSigInconsistent` at stage `Commitment` on duplicate".into(), + MuSigInconsistent { + musig_stage: Commitment, + duplicate: false, + } => "Signature error: `MuSigInconsistent` at stage `Commitment` on not duplicate".into(), + MuSigInconsistent { + musig_stage: Reveal, + duplicate: true, + } => "Signature error: `MuSigInconsistent` at stage `Reveal` on duplicate".into(), + MuSigInconsistent { + musig_stage: Reveal, + duplicate: false, + } => "Signature error: `MuSigInconsistent` at stage `Reveal` on not duplicate".into(), + MuSigInconsistent { + musig_stage: Cosignature, + duplicate: true, + } => "Signature error: `MuSigInconsistent` at stage `Cosignature` on duplicate".into(), + MuSigInconsistent { + musig_stage: Cosignature, + duplicate: false, + } => "Signature error: `MuSigInconsistent` at stage `Cosignature` on not duplicate".into(), + } } /// Schnorrkel randomness value. Same size as `VRFOutput`. diff --git a/primitives/core/benches/bench.rs b/primitives/core/benches/bench.rs index 7db9d72e6b..9576dbdffb 100644 --- a/primitives/core/benches/bench.rs +++ b/primitives/core/benches/bench.rs @@ -12,103 +12,124 @@ // See the License for the specific language governing permissions and // limitations under the License. - #[macro_use] extern crate criterion; -use criterion::{Criterion, black_box, Bencher, Fun}; -use std::time::Duration; +use criterion::{black_box, Bencher, Criterion, Fun}; use sp_core::crypto::Pair as _; -use sp_core::hashing::{twox_128, blake2_128}; +use sp_core::hashing::{blake2_128, twox_128}; +use std::time::Duration; const MAX_KEY_SIZE: u32 = 32; fn get_key(key_size: u32) -> Vec { - use rand::SeedableRng; - use rand::Rng; + use rand::Rng; + use rand::SeedableRng; - let rnd: [u8; 32] = rand::rngs::StdRng::seed_from_u64(12).gen(); - let mut rnd = rnd.iter().cycle(); + let rnd: [u8; 32] = rand::rngs::StdRng::seed_from_u64(12).gen(); + let mut rnd = rnd.iter().cycle(); - (0..key_size) - .map(|_| rnd.next().unwrap().clone()) - .collect() + (0..key_size).map(|_| rnd.next().unwrap().clone()).collect() } fn bench_blake2_128(b: &mut Bencher, key: &Vec) { - b.iter(|| { - let _a = blake2_128(black_box(key)); - }); + b.iter(|| { + let _a = blake2_128(black_box(key)); + }); } fn bench_twox_128(b: &mut Bencher, key: &Vec) { - b.iter(|| { - let _a = twox_128(black_box(key)); - }); + b.iter(|| { + let _a = twox_128(black_box(key)); + }); } fn bench_hash_128_fix_size(c: &mut Criterion) { - let key = get_key(MAX_KEY_SIZE); - let blake_fn = Fun::new("blake2_128", bench_blake2_128); - let twox_fn = Fun::new("twox_128", bench_twox_128); - let fns = vec![blake_fn, twox_fn]; + let key = get_key(MAX_KEY_SIZE); + let blake_fn = Fun::new("blake2_128", bench_blake2_128); + let twox_fn = Fun::new("twox_128", bench_twox_128); + let fns = vec![blake_fn, twox_fn]; - c.bench_functions("fixed size hashing", fns, key); + c.bench_functions("fixed size hashing", fns, key); } fn bench_hash_128_dyn_size(c: &mut Criterion) { - let mut keys = Vec::new(); - for i in (2..MAX_KEY_SIZE).step_by(4) { - keys.push(get_key(i).clone()) - } - - c.bench_function_over_inputs("dyn size hashing - blake2", |b, key| bench_blake2_128(b, &key), keys.clone()); - c.bench_function_over_inputs("dyn size hashing - twox", |b, key| bench_twox_128(b, &key), keys); + let mut keys = Vec::new(); + for i in (2..MAX_KEY_SIZE).step_by(4) { + keys.push(get_key(i).clone()) + } + + c.bench_function_over_inputs( + "dyn size hashing - blake2", + |b, key| bench_blake2_128(b, &key), + keys.clone(), + ); + c.bench_function_over_inputs( + "dyn size hashing - twox", + |b, key| bench_twox_128(b, &key), + keys, + ); } fn bench_ed25519(c: &mut Criterion) { - c.bench_function_over_inputs("signing - ed25519", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); - let key = sp_core::ed25519::Pair::generate().0; - b.iter(|| key.sign(&msg)) - }, vec![32, 1024, 1024 * 1024]); - - c.bench_function_over_inputs("verifying - ed25519", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); - let key = sp_core::ed25519::Pair::generate().0; - let sig = key.sign(&msg); - let public = key.public(); - b.iter(|| sp_core::ed25519::Pair::verify(&sig, &msg, &public)) - }, vec![32, 1024, 1024 * 1024]); + c.bench_function_over_inputs( + "signing - ed25519", + |b, &msg_size| { + let msg = (0..msg_size) + .map(|_| rand::random::()) + .collect::>(); + let key = sp_core::ed25519::Pair::generate().0; + b.iter(|| key.sign(&msg)) + }, + vec![32, 1024, 1024 * 1024], + ); + + c.bench_function_over_inputs( + "verifying - ed25519", + |b, &msg_size| { + let msg = (0..msg_size) + .map(|_| rand::random::()) + .collect::>(); + let key = sp_core::ed25519::Pair::generate().0; + let sig = key.sign(&msg); + let public = key.public(); + b.iter(|| sp_core::ed25519::Pair::verify(&sig, &msg, &public)) + }, + vec![32, 1024, 1024 * 1024], + ); } fn bench_sr25519(c: &mut Criterion) { - c.bench_function_over_inputs("signing - sr25519", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); - let key = sp_core::sr25519::Pair::generate().0; - b.iter(|| key.sign(&msg)) - }, vec![32, 1024, 1024 * 1024]); - - c.bench_function_over_inputs("verifying - sr25519", |b, &msg_size| { - let msg = (0..msg_size) - .map(|_| rand::random::()) - .collect::>(); - let key = sp_core::sr25519::Pair::generate().0; - let sig = key.sign(&msg); - let public = key.public(); - b.iter(|| sp_core::sr25519::Pair::verify(&sig, &msg, &public)) - }, vec![32, 1024, 1024 * 1024]); + c.bench_function_over_inputs( + "signing - sr25519", + |b, &msg_size| { + let msg = (0..msg_size) + .map(|_| rand::random::()) + .collect::>(); + let key = sp_core::sr25519::Pair::generate().0; + b.iter(|| key.sign(&msg)) + }, + vec![32, 1024, 1024 * 1024], + ); + + c.bench_function_over_inputs( + "verifying - sr25519", + |b, &msg_size| { + let msg = (0..msg_size) + .map(|_| rand::random::()) + .collect::>(); + let key = sp_core::sr25519::Pair::generate().0; + let sig = key.sign(&msg); + let public = key.public(); + b.iter(|| sp_core::sr25519::Pair::verify(&sig, &msg, &public)) + }, + vec![32, 1024, 1024 * 1024], + ); } -criterion_group!{ - name = benches; - config = Criterion::default().warm_up_time(Duration::from_millis(500)).without_plots(); - targets = bench_hash_128_fix_size, bench_hash_128_dyn_size, bench_ed25519, bench_sr25519 +criterion_group! { + name = benches; + config = Criterion::default().warm_up_time(Duration::from_millis(500)).without_plots(); + targets = bench_hash_128_fix_size, bench_hash_128_dyn_size, bench_ed25519, bench_sr25519 } criterion_main!(benches); diff --git a/primitives/core/src/changes_trie.rs b/primitives/core/src/changes_trie.rs index cb21ffe13d..59f1fe79ab 100644 --- a/primitives/core/src/changes_trie.rs +++ b/primitives/core/src/changes_trie.rs @@ -16,290 +16,355 @@ //! Substrate changes trie configuration. -#[cfg(any(feature = "std", test))] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use num_traits::Zero; +#[cfg(any(feature = "std", test))] +use serde::{Deserialize, Serialize}; /// Substrate changes trie configuration. -#[cfg_attr(any(feature = "std", test), derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] +#[cfg_attr( + any(feature = "std", test), + derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf) +)] #[derive(Debug, Clone, PartialEq, Eq, Default, Encode, Decode)] pub struct ChangesTrieConfiguration { - /// Interval (in blocks) at which level1-digests are created. Digests are not - /// created when this is less or equal to 1. - pub digest_interval: u32, - /// Maximal number of digest levels in hierarchy. 0 means that digests are not - /// created at all (even level1 digests). 1 means only level1-digests are created. - /// 2 means that every digest_interval^2 there will be a level2-digest, and so on. - /// Please ensure that maximum digest interval (i.e. digest_interval^digest_levels) - /// is within `u32` limits. Otherwise you'll never see digests covering such intervals - /// && maximal digests interval will be truncated to the last interval that fits - /// `u32` limits. - pub digest_levels: u32, + /// Interval (in blocks) at which level1-digests are created. Digests are not + /// created when this is less or equal to 1. + pub digest_interval: u32, + /// Maximal number of digest levels in hierarchy. 0 means that digests are not + /// created at all (even level1 digests). 1 means only level1-digests are created. + /// 2 means that every digest_interval^2 there will be a level2-digest, and so on. + /// Please ensure that maximum digest interval (i.e. digest_interval^digest_levels) + /// is within `u32` limits. Otherwise you'll never see digests covering such intervals + /// && maximal digests interval will be truncated to the last interval that fits + /// `u32` limits. + pub digest_levels: u32, } /// Substrate changes trie configuration range. #[derive(Debug, Clone, PartialEq, Eq)] pub struct ChangesTrieConfigurationRange { - /// Zero block of configuration. - pub zero: (Number, Hash), - /// Last block of configuration (if configuration has been deactivated at some point). - pub end: Option<(Number, Hash)>, - /// The configuration itself. None if changes tries were disabled in this range. - pub config: Option, + /// Zero block of configuration. + pub zero: (Number, Hash), + /// Last block of configuration (if configuration has been deactivated at some point). + pub end: Option<(Number, Hash)>, + /// The configuration itself. None if changes tries were disabled in this range. + pub config: Option, } impl ChangesTrieConfiguration { - /// Create new configuration given digest interval and levels. - pub fn new(digest_interval: u32, digest_levels: u32) -> Self { - Self { digest_interval, digest_levels } - } - - /// Is digest build enabled? - pub fn is_digest_build_enabled(&self) -> bool { - self.digest_interval > 1 && self.digest_levels > 0 - } - - /// Do we need to build digest at given block? - pub fn is_digest_build_required_at_block( - &self, - zero: Number, - block: Number, - ) -> bool - where - Number: From + PartialEq + - ::sp_std::ops::Rem + ::sp_std::ops::Sub + - ::sp_std::cmp::PartialOrd + Zero, - { - block > zero - && self.is_digest_build_enabled() - && ((block - zero) % self.digest_interval.into()).is_zero() - } - - /// Returns max digest interval. One if digests are not created at all. - pub fn max_digest_interval(&self) -> u32 { - if !self.is_digest_build_enabled() { - return 1; - } - - // we'll get >1 loop iteration only when bad configuration parameters are selected - let mut current_level = self.digest_levels; - loop { - if let Some(max_digest_interval) = self.digest_interval.checked_pow(current_level) { - return max_digest_interval; - } - - current_level = current_level - 1; - } - } - - /// Returns max level digest block number that has been created at block <= passed block number. - /// - /// Returns None if digests are not created at all. - pub fn prev_max_level_digest_block( - &self, - zero: Number, - block: Number, - ) -> Option - where - Number: Clone + From + PartialOrd + PartialEq + - ::sp_std::ops::Add + ::sp_std::ops::Sub + - ::sp_std::ops::Div + ::sp_std::ops::Mul + Zero, - { - if block <= zero { - return None; - } - - let (next_begin, next_end) = self.next_max_level_digest_range(zero.clone(), block.clone())?; - - // if 'next' digest includes our block, then it is a also a previous digest - if next_end == block { - return Some(block); - } - - // if previous digest ends at zero block, then there are no previous digest - let prev_end = next_begin - 1.into(); - if prev_end == zero { - None - } else { - Some(prev_end) - } - } - - /// Returns max level digest blocks range (inclusive) which includes passed block. - /// - /// Returns None if digests are not created at all. - /// It will return the first max-level digest if block is <= zero. - pub fn next_max_level_digest_range( - &self, - zero: Number, - mut block: Number, - ) -> Option<(Number, Number)> - where - Number: Clone + From + PartialOrd + PartialEq + - ::sp_std::ops::Add + ::sp_std::ops::Sub + - ::sp_std::ops::Div + ::sp_std::ops::Mul, - { - if !self.is_digest_build_enabled() { - return None; - } - - if block <= zero { - block = zero.clone() + 1.into(); - } - - let max_digest_interval: Number = self.max_digest_interval().into(); - let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); - if max_digests_since_zero == 0.into() { - return Some((zero.clone() + 1.into(), zero + max_digest_interval)); - } - let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); - Some(if block == last_max_digest_block { - (block.clone() - max_digest_interval + 1.into(), block) - } else { - (last_max_digest_block.clone() + 1.into(), last_max_digest_block + max_digest_interval) - }) - } - - /// Returns Some if digest must be built at given block number. - /// The tuple is: - /// ( - /// digest level - /// digest interval (in blocks) - /// step between blocks we're interested in when digest is built - /// ) - pub fn digest_level_at_block(&self, zero: Number, block: Number) -> Option<(u32, u32, u32)> - where - Number: Clone + From + PartialEq + - ::sp_std::ops::Rem + ::sp_std::ops::Sub + - ::sp_std::cmp::PartialOrd + Zero, - { - if !self.is_digest_build_required_at_block(zero.clone(), block.clone()) { - return None; - } - - let relative_block = block - zero; - let mut digest_interval = self.digest_interval; - let mut current_level = 1u32; - let mut digest_step = 1u32; - while current_level < self.digest_levels { - let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { - Some(new_digest_interval) if (relative_block.clone() % new_digest_interval.into()).is_zero() - => new_digest_interval, - _ => break, - }; - - digest_step = digest_interval; - digest_interval = new_digest_interval; - current_level = current_level + 1; - } - - Some(( - current_level, - digest_interval, - digest_step, - )) - } + /// Create new configuration given digest interval and levels. + pub fn new(digest_interval: u32, digest_levels: u32) -> Self { + Self { + digest_interval, + digest_levels, + } + } + + /// Is digest build enabled? + pub fn is_digest_build_enabled(&self) -> bool { + self.digest_interval > 1 && self.digest_levels > 0 + } + + /// Do we need to build digest at given block? + pub fn is_digest_build_required_at_block(&self, zero: Number, block: Number) -> bool + where + Number: From + + PartialEq + + ::sp_std::ops::Rem + + ::sp_std::ops::Sub + + ::sp_std::cmp::PartialOrd + + Zero, + { + block > zero + && self.is_digest_build_enabled() + && ((block - zero) % self.digest_interval.into()).is_zero() + } + + /// Returns max digest interval. One if digests are not created at all. + pub fn max_digest_interval(&self) -> u32 { + if !self.is_digest_build_enabled() { + return 1; + } + + // we'll get >1 loop iteration only when bad configuration parameters are selected + let mut current_level = self.digest_levels; + loop { + if let Some(max_digest_interval) = self.digest_interval.checked_pow(current_level) { + return max_digest_interval; + } + + current_level = current_level - 1; + } + } + + /// Returns max level digest block number that has been created at block <= passed block number. + /// + /// Returns None if digests are not created at all. + pub fn prev_max_level_digest_block(&self, zero: Number, block: Number) -> Option + where + Number: Clone + + From + + PartialOrd + + PartialEq + + ::sp_std::ops::Add + + ::sp_std::ops::Sub + + ::sp_std::ops::Div + + ::sp_std::ops::Mul + + Zero, + { + if block <= zero { + return None; + } + + let (next_begin, next_end) = + self.next_max_level_digest_range(zero.clone(), block.clone())?; + + // if 'next' digest includes our block, then it is a also a previous digest + if next_end == block { + return Some(block); + } + + // if previous digest ends at zero block, then there are no previous digest + let prev_end = next_begin - 1.into(); + if prev_end == zero { + None + } else { + Some(prev_end) + } + } + + /// Returns max level digest blocks range (inclusive) which includes passed block. + /// + /// Returns None if digests are not created at all. + /// It will return the first max-level digest if block is <= zero. + pub fn next_max_level_digest_range( + &self, + zero: Number, + mut block: Number, + ) -> Option<(Number, Number)> + where + Number: Clone + + From + + PartialOrd + + PartialEq + + ::sp_std::ops::Add + + ::sp_std::ops::Sub + + ::sp_std::ops::Div + + ::sp_std::ops::Mul, + { + if !self.is_digest_build_enabled() { + return None; + } + + if block <= zero { + block = zero.clone() + 1.into(); + } + + let max_digest_interval: Number = self.max_digest_interval().into(); + let max_digests_since_zero = (block.clone() - zero.clone()) / max_digest_interval.clone(); + if max_digests_since_zero == 0.into() { + return Some((zero.clone() + 1.into(), zero + max_digest_interval)); + } + let last_max_digest_block = zero + max_digests_since_zero * max_digest_interval.clone(); + Some(if block == last_max_digest_block { + (block.clone() - max_digest_interval + 1.into(), block) + } else { + ( + last_max_digest_block.clone() + 1.into(), + last_max_digest_block + max_digest_interval, + ) + }) + } + + /// Returns Some if digest must be built at given block number. + /// The tuple is: + /// ( + /// digest level + /// digest interval (in blocks) + /// step between blocks we're interested in when digest is built + /// ) + pub fn digest_level_at_block( + &self, + zero: Number, + block: Number, + ) -> Option<(u32, u32, u32)> + where + Number: Clone + + From + + PartialEq + + ::sp_std::ops::Rem + + ::sp_std::ops::Sub + + ::sp_std::cmp::PartialOrd + + Zero, + { + if !self.is_digest_build_required_at_block(zero.clone(), block.clone()) { + return None; + } + + let relative_block = block - zero; + let mut digest_interval = self.digest_interval; + let mut current_level = 1u32; + let mut digest_step = 1u32; + while current_level < self.digest_levels { + let new_digest_interval = match digest_interval.checked_mul(self.digest_interval) { + Some(new_digest_interval) + if (relative_block.clone() % new_digest_interval.into()).is_zero() => + { + new_digest_interval + } + _ => break, + }; + + digest_step = digest_interval; + digest_interval = new_digest_interval; + current_level = current_level + 1; + } + + Some((current_level, digest_interval, digest_step)) + } } #[cfg(test)] mod tests { - use super::ChangesTrieConfiguration; - - fn config(interval: u32, levels: u32) -> ChangesTrieConfiguration { - ChangesTrieConfiguration { - digest_interval: interval, - digest_levels: levels, - } - } - - #[test] - fn is_digest_build_enabled_works() { - assert!(!config(0, 100).is_digest_build_enabled()); - assert!(!config(1, 100).is_digest_build_enabled()); - assert!(config(2, 100).is_digest_build_enabled()); - assert!(!config(100, 0).is_digest_build_enabled()); - assert!(config(100, 1).is_digest_build_enabled()); - } - - #[test] - fn is_digest_build_required_at_block_works() { - fn test_with_zero(zero: u64) { - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 0u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 1u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 2u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 8u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 9u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 512u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4096u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4103u64)); - assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4104u64)); - assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4108u64)); - } - - test_with_zero(0); - test_with_zero(8); - test_with_zero(17); - } - - #[test] - fn digest_level_at_block_works() { - fn test_with_zero(zero: u64) { - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 0u64), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 7u64), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 63u64), None); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 8u64), Some((1, 8, 1))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 64u64), Some((2, 64, 8))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 512u64), Some((3, 512, 64))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4096u64), Some((4, 4096, 512))); - assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 4112u64), Some((1, 8, 1))); - } - - test_with_zero(0); - test_with_zero(8); - test_with_zero(17); - } - - #[test] - fn max_digest_interval_works() { - assert_eq!(config(0, 0).max_digest_interval(), 1); - assert_eq!(config(2, 2).max_digest_interval(), 4); - assert_eq!(config(8, 4).max_digest_interval(), 4096); - assert_eq!(config(::std::u32::MAX, 1024).max_digest_interval(), ::std::u32::MAX); - } - - #[test] - fn next_max_level_digest_range_works() { - assert_eq!(config(0, 0).next_max_level_digest_range(0u64, 16), None); - assert_eq!(config(1, 1).next_max_level_digest_range(0u64, 16), None); - assert_eq!(config(2, 1).next_max_level_digest_range(0u64, 16), Some((15, 16))); - assert_eq!(config(4, 1).next_max_level_digest_range(0u64, 16), Some((13, 16))); - assert_eq!(config(32, 1).next_max_level_digest_range(0u64, 16), Some((1, 32))); - assert_eq!(config(2, 3).next_max_level_digest_range(0u64, 10), Some((9, 16))); - assert_eq!(config(2, 3).next_max_level_digest_range(0u64, 8), Some((1, 8))); - assert_eq!(config(2, 1).next_max_level_digest_range(1u64, 1), Some((2, 3))); - assert_eq!(config(2, 2).next_max_level_digest_range(7u64, 9), Some((8, 11))); - - assert_eq!(config(2, 2).next_max_level_digest_range(7u64, 5), Some((8, 11))); - } - - #[test] - fn prev_max_level_digest_block_works() { - assert_eq!(config(0, 0).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(1, 1).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(2, 1).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 1).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 16), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 17), Some(16)); - assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 33), Some(32)); - assert_eq!(config(32, 1).prev_max_level_digest_block(0u64, 16), None); - assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 10), Some(8)); - assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 8), Some(8)); - assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 8), None); - - assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 5), None); - } + use super::ChangesTrieConfiguration; + + fn config(interval: u32, levels: u32) -> ChangesTrieConfiguration { + ChangesTrieConfiguration { + digest_interval: interval, + digest_levels: levels, + } + } + + #[test] + fn is_digest_build_enabled_works() { + assert!(!config(0, 100).is_digest_build_enabled()); + assert!(!config(1, 100).is_digest_build_enabled()); + assert!(config(2, 100).is_digest_build_enabled()); + assert!(!config(100, 0).is_digest_build_enabled()); + assert!(config(100, 1).is_digest_build_enabled()); + } + + #[test] + fn is_digest_build_required_at_block_works() { + fn test_with_zero(zero: u64) { + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 0u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 1u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 2u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 8u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 9u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 64u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 512u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4096u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4103u64)); + assert!(config(8, 4).is_digest_build_required_at_block(zero, zero + 4104u64)); + assert!(!config(8, 4).is_digest_build_required_at_block(zero, zero + 4108u64)); + } + + test_with_zero(0); + test_with_zero(8); + test_with_zero(17); + } + + #[test] + fn digest_level_at_block_works() { + fn test_with_zero(zero: u64) { + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 0u64), None); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 7u64), None); + assert_eq!(config(8, 4).digest_level_at_block(zero, zero + 63u64), None); + assert_eq!( + config(8, 4).digest_level_at_block(zero, zero + 8u64), + Some((1, 8, 1)) + ); + assert_eq!( + config(8, 4).digest_level_at_block(zero, zero + 64u64), + Some((2, 64, 8)) + ); + assert_eq!( + config(8, 4).digest_level_at_block(zero, zero + 512u64), + Some((3, 512, 64)) + ); + assert_eq!( + config(8, 4).digest_level_at_block(zero, zero + 4096u64), + Some((4, 4096, 512)) + ); + assert_eq!( + config(8, 4).digest_level_at_block(zero, zero + 4112u64), + Some((1, 8, 1)) + ); + } + + test_with_zero(0); + test_with_zero(8); + test_with_zero(17); + } + + #[test] + fn max_digest_interval_works() { + assert_eq!(config(0, 0).max_digest_interval(), 1); + assert_eq!(config(2, 2).max_digest_interval(), 4); + assert_eq!(config(8, 4).max_digest_interval(), 4096); + assert_eq!( + config(::std::u32::MAX, 1024).max_digest_interval(), + ::std::u32::MAX + ); + } + + #[test] + fn next_max_level_digest_range_works() { + assert_eq!(config(0, 0).next_max_level_digest_range(0u64, 16), None); + assert_eq!(config(1, 1).next_max_level_digest_range(0u64, 16), None); + assert_eq!( + config(2, 1).next_max_level_digest_range(0u64, 16), + Some((15, 16)) + ); + assert_eq!( + config(4, 1).next_max_level_digest_range(0u64, 16), + Some((13, 16)) + ); + assert_eq!( + config(32, 1).next_max_level_digest_range(0u64, 16), + Some((1, 32)) + ); + assert_eq!( + config(2, 3).next_max_level_digest_range(0u64, 10), + Some((9, 16)) + ); + assert_eq!( + config(2, 3).next_max_level_digest_range(0u64, 8), + Some((1, 8)) + ); + assert_eq!( + config(2, 1).next_max_level_digest_range(1u64, 1), + Some((2, 3)) + ); + assert_eq!( + config(2, 2).next_max_level_digest_range(7u64, 9), + Some((8, 11)) + ); + + assert_eq!( + config(2, 2).next_max_level_digest_range(7u64, 5), + Some((8, 11)) + ); + } + + #[test] + fn prev_max_level_digest_block_works() { + assert_eq!(config(0, 0).prev_max_level_digest_block(0u64, 16), None); + assert_eq!(config(1, 1).prev_max_level_digest_block(0u64, 16), None); + assert_eq!(config(2, 1).prev_max_level_digest_block(0u64, 16), Some(16)); + assert_eq!(config(4, 1).prev_max_level_digest_block(0u64, 16), Some(16)); + assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 16), Some(16)); + assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 17), Some(16)); + assert_eq!(config(4, 2).prev_max_level_digest_block(0u64, 33), Some(32)); + assert_eq!(config(32, 1).prev_max_level_digest_block(0u64, 16), None); + assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 10), Some(8)); + assert_eq!(config(2, 3).prev_max_level_digest_block(0u64, 8), Some(8)); + assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 8), None); + + assert_eq!(config(2, 2).prev_max_level_digest_block(7u64, 5), None); + } } diff --git a/primitives/core/src/crypto.rs b/primitives/core/src/crypto.rs index 79a36b2ad2..a67f381b8e 100644 --- a/primitives/core/src/crypto.rs +++ b/primitives/core/src/crypto.rs @@ -18,31 +18,32 @@ //! Cryptographic utilities. // end::description[] -use crate::{sr25519, ed25519}; -use sp_std::hash::Hash; -use sp_std::vec::Vec; -use sp_std::str; #[cfg(feature = "std")] -use sp_std::convert::TryInto; -use sp_std::convert::TryFrom; +use crate::hexdisplay::HexDisplay; +use crate::{ed25519, sr25519}; +#[cfg(feature = "std")] +use base58::{FromBase58, ToBase58}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] use parking_lot::Mutex; #[cfg(feature = "std")] -use rand::{RngCore, rngs::OsRng}; -use codec::{Encode, Decode}; +use rand::{rngs::OsRng, RngCore}; #[cfg(feature = "std")] use regex::Regex; +use sp_runtime_interface::pass_by::PassByInner; +use sp_std::convert::TryFrom; #[cfg(feature = "std")] -use base58::{FromBase58, ToBase58}; -#[cfg(feature = "std")] -use crate::hexdisplay::HexDisplay; -use zeroize::Zeroize; +use sp_std::convert::TryInto; +use sp_std::hash::Hash; #[doc(hidden)] pub use sp_std::ops::Deref; -use sp_runtime_interface::pass_by::PassByInner; +use sp_std::str; +use sp_std::vec::Vec; +use zeroize::Zeroize; /// The root phrase for our publicly known keys. -pub const DEV_PHRASE: &str = "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; +pub const DEV_PHRASE: &str = + "bottom drive obey lake curtain smoke basket hold race lonely fit walk"; /// The address of the associated root phrase for our publicly known keys. pub const DEV_ADDRESS: &str = "5DfhGyQdFobKM8NsWvEeAKk5EQQgYe9AydgJ7rMB6E1EqRzV"; @@ -60,22 +61,22 @@ pub const JUNCTION_ID_LEN: usize = 32; /// that data passed in makes sense. Basically, you're not guaranteed to get anything /// sensible out. pub trait UncheckedFrom { - /// Convert from an instance of `T` to Self. This is not guaranteed to be - /// whatever counts as a valid instance of `T` and it's up to the caller to - /// ensure that it makes sense. - fn unchecked_from(t: T) -> Self; + /// Convert from an instance of `T` to Self. This is not guaranteed to be + /// whatever counts as a valid instance of `T` and it's up to the caller to + /// ensure that it makes sense. + fn unchecked_from(t: T) -> Self; } /// The counterpart to `UncheckedFrom`. pub trait UncheckedInto { - /// The counterpart to `unchecked_from`. - fn unchecked_into(self) -> T; + /// The counterpart to `unchecked_from`. + fn unchecked_into(self) -> T; } impl> UncheckedInto for S { - fn unchecked_into(self) -> T { - T::unchecked_from(self) - } + fn unchecked_into(self) -> T { + T::unchecked_from(self) + } } /// A store for sensitive data. @@ -85,60 +86,60 @@ impl> UncheckedInto for S { pub struct Protected(T); impl AsRef for Protected { - fn as_ref(&self) -> &T { - &self.0 - } + fn as_ref(&self) -> &T { + &self.0 + } } impl sp_std::ops::Deref for Protected { - type Target = T; + type Target = T; - fn deref(&self) -> &T { - &self.0 - } + fn deref(&self) -> &T { + &self.0 + } } #[cfg(feature = "std")] impl std::fmt::Debug for Protected { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(fmt, "") - } + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(fmt, "") + } } impl From for Protected { - fn from(t: T) -> Self { - Protected(t) - } + fn from(t: T) -> Self { + Protected(t) + } } impl Zeroize for Protected { - fn zeroize(&mut self) { - self.0.zeroize() - } + fn zeroize(&mut self) { + self.0.zeroize() + } } impl Drop for Protected { - fn drop(&mut self) { - self.zeroize() - } + fn drop(&mut self) { + self.zeroize() + } } /// An error with the interpretation of a secret. #[derive(Debug, Clone, PartialEq, Eq)] #[cfg(feature = "full_crypto")] pub enum SecretStringError { - /// The overall format was invalid (e.g. the seed phrase contained symbols). - InvalidFormat, - /// The seed phrase provided is not a valid BIP39 phrase. - InvalidPhrase, - /// The supplied password was invalid. - InvalidPassword, - /// The seed is invalid (bad content). - InvalidSeed, - /// The seed has an invalid length. - InvalidSeedLength, - /// The derivation path was invalid (e.g. contains soft junctions when they are not supported). - InvalidPath, + /// The overall format was invalid (e.g. the seed phrase contained symbols). + InvalidFormat, + /// The seed phrase provided is not a valid BIP39 phrase. + InvalidPhrase, + /// The supplied password was invalid. + InvalidPassword, + /// The seed is invalid (bad content). + InvalidSeed, + /// The seed has an invalid length. + InvalidSeedLength, + /// The derivation path was invalid (e.g. contains soft junctions when they are not supported). + InvalidPath, } /// A since derivation junction description. It is the single parameter used when creating @@ -147,191 +148,199 @@ pub enum SecretStringError { #[derive(Copy, Clone, Eq, PartialEq, Hash, Debug, Encode, Decode)] #[cfg(feature = "full_crypto")] pub enum DeriveJunction { - /// Soft (vanilla) derivation. Public keys have a correspondent derivation. - Soft([u8; JUNCTION_ID_LEN]), - /// Hard ("hardened") derivation. Public keys do not have a correspondent derivation. - Hard([u8; JUNCTION_ID_LEN]), + /// Soft (vanilla) derivation. Public keys have a correspondent derivation. + Soft([u8; JUNCTION_ID_LEN]), + /// Hard ("hardened") derivation. Public keys do not have a correspondent derivation. + Hard([u8; JUNCTION_ID_LEN]), } #[cfg(feature = "full_crypto")] impl DeriveJunction { - /// Consume self to return a soft derive junction with the same chain code. - pub fn soften(self) -> Self { DeriveJunction::Soft(self.unwrap_inner()) } - - /// Consume self to return a hard derive junction with the same chain code. - pub fn harden(self) -> Self { DeriveJunction::Hard(self.unwrap_inner()) } - - /// Create a new soft (vanilla) DeriveJunction from a given, encodable, value. - /// - /// If you need a hard junction, use `hard()`. - pub fn soft(index: T) -> Self { - let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); - index.using_encoded(|data| if data.len() > JUNCTION_ID_LEN { - let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); - let hash = hash_result.as_bytes(); - cc.copy_from_slice(hash); - } else { - cc[0..data.len()].copy_from_slice(data); - }); - DeriveJunction::Soft(cc) - } - - /// Create a new hard (hardened) DeriveJunction from a given, encodable, value. - /// - /// If you need a soft junction, use `soft()`. - pub fn hard(index: T) -> Self { - Self::soft(index).harden() - } - - /// Consume self to return the chain code. - pub fn unwrap_inner(self) -> [u8; JUNCTION_ID_LEN] { - match self { - DeriveJunction::Hard(c) | DeriveJunction::Soft(c) => c, - } - } - - /// Get a reference to the inner junction id. - pub fn inner(&self) -> &[u8; JUNCTION_ID_LEN] { - match self { - DeriveJunction::Hard(ref c) | DeriveJunction::Soft(ref c) => c, - } - } - - /// Return `true` if the junction is soft. - pub fn is_soft(&self) -> bool { - match *self { - DeriveJunction::Soft(_) => true, - _ => false, - } - } - - /// Return `true` if the junction is hard. - pub fn is_hard(&self) -> bool { - match *self { - DeriveJunction::Hard(_) => true, - _ => false, - } - } + /// Consume self to return a soft derive junction with the same chain code. + pub fn soften(self) -> Self { + DeriveJunction::Soft(self.unwrap_inner()) + } + + /// Consume self to return a hard derive junction with the same chain code. + pub fn harden(self) -> Self { + DeriveJunction::Hard(self.unwrap_inner()) + } + + /// Create a new soft (vanilla) DeriveJunction from a given, encodable, value. + /// + /// If you need a hard junction, use `hard()`. + pub fn soft(index: T) -> Self { + let mut cc: [u8; JUNCTION_ID_LEN] = Default::default(); + index.using_encoded(|data| { + if data.len() > JUNCTION_ID_LEN { + let hash_result = blake2_rfc::blake2b::blake2b(JUNCTION_ID_LEN, &[], data); + let hash = hash_result.as_bytes(); + cc.copy_from_slice(hash); + } else { + cc[0..data.len()].copy_from_slice(data); + } + }); + DeriveJunction::Soft(cc) + } + + /// Create a new hard (hardened) DeriveJunction from a given, encodable, value. + /// + /// If you need a soft junction, use `soft()`. + pub fn hard(index: T) -> Self { + Self::soft(index).harden() + } + + /// Consume self to return the chain code. + pub fn unwrap_inner(self) -> [u8; JUNCTION_ID_LEN] { + match self { + DeriveJunction::Hard(c) | DeriveJunction::Soft(c) => c, + } + } + + /// Get a reference to the inner junction id. + pub fn inner(&self) -> &[u8; JUNCTION_ID_LEN] { + match self { + DeriveJunction::Hard(ref c) | DeriveJunction::Soft(ref c) => c, + } + } + + /// Return `true` if the junction is soft. + pub fn is_soft(&self) -> bool { + match *self { + DeriveJunction::Soft(_) => true, + _ => false, + } + } + + /// Return `true` if the junction is hard. + pub fn is_hard(&self) -> bool { + match *self { + DeriveJunction::Hard(_) => true, + _ => false, + } + } } #[cfg(feature = "full_crypto")] impl> From for DeriveJunction { - fn from(j: T) -> DeriveJunction { - let j = j.as_ref(); - let (code, hard) = if j.starts_with("/") { - (&j[1..], true) - } else { - (j, false) - }; - - let res = if let Ok(n) = str::parse::(code) { - // number - DeriveJunction::soft(n) - } else { - // something else - DeriveJunction::soft(code) - }; - - if hard { - res.harden() - } else { - res - } - } + fn from(j: T) -> DeriveJunction { + let j = j.as_ref(); + let (code, hard) = if j.starts_with("/") { + (&j[1..], true) + } else { + (j, false) + }; + + let res = if let Ok(n) = str::parse::(code) { + // number + DeriveJunction::soft(n) + } else { + // something else + DeriveJunction::soft(code) + }; + + if hard { + res.harden() + } else { + res + } + } } /// An error type for SS58 decoding. #[cfg(feature = "full_crypto")] #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub enum PublicError { - /// Bad alphabet. - BadBase58, - /// Bad length. - BadLength, - /// Unknown version. - UnknownVersion, - /// Invalid checksum. - InvalidChecksum, - /// Invalid format. - InvalidFormat, - /// Invalid derivation path. - InvalidPath, + /// Bad alphabet. + BadBase58, + /// Bad length. + BadLength, + /// Unknown version. + UnknownVersion, + /// Invalid checksum. + InvalidChecksum, + /// Invalid format. + InvalidFormat, + /// Invalid derivation path. + InvalidPath, } /// Key that can be encoded to/from SS58. #[cfg(feature = "full_crypto")] pub trait Ss58Codec: Sized + AsMut<[u8]> + AsRef<[u8]> + Default { - /// Some if the string is a properly encoded SS58Check address. - #[cfg(feature = "std")] - fn from_ss58check(s: &str) -> Result { - Self::from_ss58check_with_version(s) - .and_then(|(r, v)| match v { - v if !v.is_custom() => Ok(r), - v if v == *DEFAULT_VERSION.lock() => Ok(r), - _ => Err(PublicError::UnknownVersion), - }) - } - /// Some if the string is a properly encoded SS58Check address. - #[cfg(feature = "std")] - fn from_ss58check_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - let mut res = Self::default(); - let len = res.as_mut().len(); - let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. - if d.len() != len + 3 { - // Invalid length. - return Err(PublicError::BadLength); - } - let ver = d[0].try_into().map_err(|_: ()| PublicError::UnknownVersion)?; - - if d[len + 1..len + 3] != ss58hash(&d[0..len + 1]).as_bytes()[0..2] { - // Invalid checksum. - return Err(PublicError::InvalidChecksum); - } - res.as_mut().copy_from_slice(&d[1..len + 1]); - Ok((res, ver)) - } - /// Some if the string is a properly encoded SS58Check address, optionally with - /// a derivation path following. - #[cfg(feature = "std")] - fn from_string(s: &str) -> Result { - Self::from_string_with_version(s) - .and_then(|(r, v)| match v { - v if !v.is_custom() => Ok(r), - v if v == *DEFAULT_VERSION.lock() => Ok(r), - _ => Err(PublicError::UnknownVersion), - }) - } - - /// Return the ss58-check string for this key. - - #[cfg(feature = "std")] - fn to_ss58check_with_version(&self, version: Ss58AddressFormat) -> String { - let mut v = vec![version.into()]; - v.extend(self.as_ref()); - let r = ss58hash(&v); - v.extend(&r.as_bytes()[0..2]); - v.to_base58() - } - /// Return the ss58-check string for this key. - #[cfg(feature = "std")] - fn to_ss58check(&self) -> String { self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) } - /// Some if the string is a properly encoded SS58Check address, optionally with - /// a derivation path following. - #[cfg(feature = "std")] - fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - Self::from_ss58check_with_version(s) - } + /// Some if the string is a properly encoded SS58Check address. + #[cfg(feature = "std")] + fn from_ss58check(s: &str) -> Result { + Self::from_ss58check_with_version(s).and_then(|(r, v)| match v { + v if !v.is_custom() => Ok(r), + v if v == *DEFAULT_VERSION.lock() => Ok(r), + _ => Err(PublicError::UnknownVersion), + }) + } + /// Some if the string is a properly encoded SS58Check address. + #[cfg(feature = "std")] + fn from_ss58check_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { + let mut res = Self::default(); + let len = res.as_mut().len(); + let d = s.from_base58().map_err(|_| PublicError::BadBase58)?; // failure here would be invalid encoding. + if d.len() != len + 3 { + // Invalid length. + return Err(PublicError::BadLength); + } + let ver = d[0] + .try_into() + .map_err(|_: ()| PublicError::UnknownVersion)?; + + if d[len + 1..len + 3] != ss58hash(&d[0..len + 1]).as_bytes()[0..2] { + // Invalid checksum. + return Err(PublicError::InvalidChecksum); + } + res.as_mut().copy_from_slice(&d[1..len + 1]); + Ok((res, ver)) + } + /// Some if the string is a properly encoded SS58Check address, optionally with + /// a derivation path following. + #[cfg(feature = "std")] + fn from_string(s: &str) -> Result { + Self::from_string_with_version(s).and_then(|(r, v)| match v { + v if !v.is_custom() => Ok(r), + v if v == *DEFAULT_VERSION.lock() => Ok(r), + _ => Err(PublicError::UnknownVersion), + }) + } + + /// Return the ss58-check string for this key. + + #[cfg(feature = "std")] + fn to_ss58check_with_version(&self, version: Ss58AddressFormat) -> String { + let mut v = vec![version.into()]; + v.extend(self.as_ref()); + let r = ss58hash(&v); + v.extend(&r.as_bytes()[0..2]); + v.to_base58() + } + /// Return the ss58-check string for this key. + #[cfg(feature = "std")] + fn to_ss58check(&self) -> String { + self.to_ss58check_with_version(*DEFAULT_VERSION.lock()) + } + /// Some if the string is a properly encoded SS58Check address, optionally with + /// a derivation path following. + #[cfg(feature = "std")] + fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { + Self::from_ss58check_with_version(s) + } } /// Derivable key trait. pub trait Derive: Sized { - /// Derive a child key from a series of given junctions. - /// - /// Will be `None` for public keys if there are any hard junctions in there. - #[cfg(feature = "std")] - fn derive>(&self, _path: Iter) -> Option { - None - } + /// Derive a child key from a series of given junctions. + /// + /// Will be `None` for public keys if there are any hard junctions in there. + #[cfg(feature = "std")] + fn derive>(&self, _path: Iter) -> Option { + None + } } #[cfg(feature = "std")] @@ -339,16 +348,16 @@ const PREFIX: &[u8] = b"SS58PRE"; #[cfg(feature = "std")] fn ss58hash(data: &[u8]) -> blake2_rfc::blake2b::Blake2bResult { - let mut context = blake2_rfc::blake2b::Blake2b::new(64); - context.update(PREFIX); - context.update(data); - context.finalize() + let mut context = blake2_rfc::blake2b::Blake2b::new(64); + context.update(PREFIX); + context.update(data); + context.finalize() } #[cfg(feature = "std")] lazy_static::lazy_static! { - static ref DEFAULT_VERSION: Mutex - = Mutex::new(Ss58AddressFormat::SubstrateAccount); + static ref DEFAULT_VERSION: Mutex + = Mutex::new(Ss58AddressFormat::SubstrateAccount); } #[cfg(feature = "full_crypto")] @@ -483,80 +492,81 @@ ss58_address_format!( /// See `ss58_address_format!` for all current known "versions". #[cfg(feature = "std")] pub fn set_default_ss58_version(version: Ss58AddressFormat) { - *DEFAULT_VERSION.lock() = version + *DEFAULT_VERSION.lock() = version } #[cfg(feature = "std")] impl + AsRef<[u8]> + Default + Derive> Ss58Codec for T { - fn from_string(s: &str) -> Result { - let re = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); - let s = cap.name("ss58") - .map(|r| r.as_str()) - .unwrap_or(DEV_ADDRESS); - let addr = if s.starts_with("0x") { - let d = hex::decode(&s[2..]).map_err(|_| PublicError::InvalidFormat)?; - let mut r = Self::default(); - if d.len() == r.as_ref().len() { - r.as_mut().copy_from_slice(&d); - r - } else { - Err(PublicError::BadLength)? - } - } else { - Self::from_ss58check(s)? - }; - if cap["path"].is_empty() { - Ok(addr) - } else { - let path = re_junction.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - addr.derive(path) - .ok_or(PublicError::InvalidPath) - } - } - - fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { - let re = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); - let (addr, v) = Self::from_ss58check_with_version( - cap.name("ss58") - .map(|r| r.as_str()) - .unwrap_or(DEV_ADDRESS) - )?; - if cap["path"].is_empty() { - Ok((addr, v)) - } else { - let path = re_junction.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - addr.derive(path) - .ok_or(PublicError::InvalidPath) - .map(|a| (a, v)) - } - } + fn from_string(s: &str) -> Result { + let re = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") + .expect("constructed from known-good static value; qed"); + let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; + let re_junction = + Regex::new(r"/(/?[^/]+)").expect("constructed from known-good static value; qed"); + let s = cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS); + let addr = if s.starts_with("0x") { + let d = hex::decode(&s[2..]).map_err(|_| PublicError::InvalidFormat)?; + let mut r = Self::default(); + if d.len() == r.as_ref().len() { + r.as_mut().copy_from_slice(&d); + r + } else { + Err(PublicError::BadLength)? + } + } else { + Self::from_ss58check(s)? + }; + if cap["path"].is_empty() { + Ok(addr) + } else { + let path = re_junction + .captures_iter(&cap["path"]) + .map(|f| DeriveJunction::from(&f[1])); + addr.derive(path).ok_or(PublicError::InvalidPath) + } + } + + fn from_string_with_version(s: &str) -> Result<(Self, Ss58AddressFormat), PublicError> { + let re = Regex::new(r"^(?P[\w\d ]+)?(?P(//?[^/]+)*)$") + .expect("constructed from known-good static value; qed"); + let cap = re.captures(s).ok_or(PublicError::InvalidFormat)?; + let re_junction = + Regex::new(r"/(/?[^/]+)").expect("constructed from known-good static value; qed"); + let (addr, v) = Self::from_ss58check_with_version( + cap.name("ss58").map(|r| r.as_str()).unwrap_or(DEV_ADDRESS), + )?; + if cap["path"].is_empty() { + Ok((addr, v)) + } else { + let path = re_junction + .captures_iter(&cap["path"]) + .map(|f| DeriveJunction::from(&f[1])); + addr.derive(path) + .ok_or(PublicError::InvalidPath) + .map(|a| (a, v)) + } + } } /// Trait suitable for typical cryptographic PKI key public type. pub trait Public: - AsRef<[u8]> + AsMut<[u8]> + Default + Derive + CryptoType + PartialEq + Eq + Clone + Send + Sync + AsRef<[u8]> + AsMut<[u8]> + Default + Derive + CryptoType + PartialEq + Eq + Clone + Send + Sync { - /// A new instance from the given slice. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - fn from_slice(data: &[u8]) -> Self; - - /// Return a `Vec` filled with raw data. - fn to_raw_vec(&self) -> Vec { self.as_slice().to_vec() } - - /// Return a slice filled with raw data. - fn as_slice(&self) -> &[u8] { self.as_ref() } + /// A new instance from the given slice. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + fn from_slice(data: &[u8]) -> Self; + + /// Return a `Vec` filled with raw data. + fn to_raw_vec(&self) -> Vec { + self.as_slice().to_vec() + } + + /// Return a slice filled with raw data. + fn as_slice(&self) -> &[u8] { + self.as_ref() + } } /// An opaque 32-byte cryptographic identifier. @@ -565,108 +575,119 @@ pub trait Public: pub struct AccountId32([u8; 32]); impl UncheckedFrom for AccountId32 { - fn unchecked_from(h: crate::hash::H256) -> Self { - AccountId32(h.into()) - } + fn unchecked_from(h: crate::hash::H256) -> Self { + AccountId32(h.into()) + } } #[cfg(feature = "std")] impl Ss58Codec for AccountId32 {} impl AsRef<[u8]> for AccountId32 { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for AccountId32 { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } impl AsRef<[u8; 32]> for AccountId32 { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } } impl AsMut<[u8; 32]> for AccountId32 { - fn as_mut(&mut self) -> &mut [u8; 32] { - &mut self.0 - } + fn as_mut(&mut self) -> &mut [u8; 32] { + &mut self.0 + } } impl From<[u8; 32]> for AccountId32 { - fn from(x: [u8; 32]) -> AccountId32 { - AccountId32(x) - } + fn from(x: [u8; 32]) -> AccountId32 { + AccountId32(x) + } } impl<'a> sp_std::convert::TryFrom<&'a [u8]> for AccountId32 { - type Error = (); - fn try_from(x: &'a [u8]) -> Result { - if x.len() == 32 { - let mut r = AccountId32::default(); - r.0.copy_from_slice(x); - Ok(r) - } else { - Err(()) - } - } + type Error = (); + fn try_from(x: &'a [u8]) -> Result { + if x.len() == 32 { + let mut r = AccountId32::default(); + r.0.copy_from_slice(x); + Ok(r) + } else { + Err(()) + } + } } impl From for [u8; 32] { - fn from(x: AccountId32) -> [u8; 32] { - x.0 - } + fn from(x: AccountId32) -> [u8; 32] { + x.0 + } } impl From for AccountId32 { - fn from(k: sr25519::Public) -> Self { - k.0.into() - } + fn from(k: sr25519::Public) -> Self { + k.0.into() + } } impl From for AccountId32 { - fn from(k: ed25519::Public) -> Self { - k.0.into() - } + fn from(k: ed25519::Public) -> Self { + k.0.into() + } } #[cfg(feature = "std")] impl std::fmt::Display for AccountId32 { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } } impl sp_std::fmt::Debug for AccountId32 { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let s = self.to_ss58check(); + write!( + f, + "{} ({}...)", + crate::hexdisplay::HexDisplay::from(&self.0), + &s[0..8] + ) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } #[cfg(feature = "std")] impl serde::Serialize for AccountId32 { - fn serialize(&self, serializer: S) -> Result where S: serde::Serializer { - serializer.serialize_str(&self.to_ss58check()) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } } #[cfg(feature = "std")] impl<'de> serde::Deserialize<'de> for AccountId32 { - fn deserialize(deserializer: D) -> Result where D: serde::Deserializer<'de> { - Ss58Codec::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| serde::de::Error::custom(format!("{:?}", e))) - } + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + Ss58Codec::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| serde::de::Error::custom(format!("{:?}", e))) + } } #[cfg(feature = "std")] @@ -674,62 +695,88 @@ pub use self::dummy::*; #[cfg(feature = "std")] mod dummy { - use super::*; - - /// Dummy cryptography. Doesn't do anything. - #[derive(Clone, Hash, Default, Eq, PartialEq)] - pub struct Dummy; - - impl AsRef<[u8]> for Dummy { - fn as_ref(&self) -> &[u8] { &b""[..] } - } - - impl AsMut<[u8]> for Dummy { - fn as_mut(&mut self) -> &mut[u8] { - unsafe { - #[allow(mutable_transmutes)] - sp_std::mem::transmute::<_, &'static mut [u8]>(&b""[..]) - } - } - } - - impl CryptoType for Dummy { - type Pair = Dummy; - } - - impl Derive for Dummy {} - - impl Public for Dummy { - fn from_slice(_: &[u8]) -> Self { Self } - #[cfg(feature = "std")] - fn to_raw_vec(&self) -> Vec { vec![] } - fn as_slice(&self) -> &[u8] { b"" } - } - - impl Pair for Dummy { - type Public = Dummy; - type Seed = Dummy; - type Signature = Dummy; - type DeriveError = (); - #[cfg(feature = "std")] - fn generate_with_phrase(_: Option<&str>) -> (Self, String, Self::Seed) { Default::default() } - #[cfg(feature = "std")] - fn from_phrase(_: &str, _: Option<&str>) - -> Result<(Self, Self::Seed), SecretStringError> - { - Ok(Default::default()) - } - fn derive< - Iter: Iterator, - >(&self, _: Iter, _: Option) -> Result<(Self, Option), Self::DeriveError> { Ok((Self, None)) } - fn from_seed(_: &Self::Seed) -> Self { Self } - fn from_seed_slice(_: &[u8]) -> Result { Ok(Self) } - fn sign(&self, _: &[u8]) -> Self::Signature { Self } - fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { true } - fn verify_weak, M: AsRef<[u8]>>(_: &[u8], _: M, _: P) -> bool { true } - fn public(&self) -> Self::Public { Self } - fn to_raw_vec(&self) -> Vec { vec![] } - } + use super::*; + + /// Dummy cryptography. Doesn't do anything. + #[derive(Clone, Hash, Default, Eq, PartialEq)] + pub struct Dummy; + + impl AsRef<[u8]> for Dummy { + fn as_ref(&self) -> &[u8] { + &b""[..] + } + } + + impl AsMut<[u8]> for Dummy { + fn as_mut(&mut self) -> &mut [u8] { + unsafe { + #[allow(mutable_transmutes)] + sp_std::mem::transmute::<_, &'static mut [u8]>(&b""[..]) + } + } + } + + impl CryptoType for Dummy { + type Pair = Dummy; + } + + impl Derive for Dummy {} + + impl Public for Dummy { + fn from_slice(_: &[u8]) -> Self { + Self + } + #[cfg(feature = "std")] + fn to_raw_vec(&self) -> Vec { + vec![] + } + fn as_slice(&self) -> &[u8] { + b"" + } + } + + impl Pair for Dummy { + type Public = Dummy; + type Seed = Dummy; + type Signature = Dummy; + type DeriveError = (); + #[cfg(feature = "std")] + fn generate_with_phrase(_: Option<&str>) -> (Self, String, Self::Seed) { + Default::default() + } + #[cfg(feature = "std")] + fn from_phrase(_: &str, _: Option<&str>) -> Result<(Self, Self::Seed), SecretStringError> { + Ok(Default::default()) + } + fn derive>( + &self, + _: Iter, + _: Option, + ) -> Result<(Self, Option), Self::DeriveError> { + Ok((Self, None)) + } + fn from_seed(_: &Self::Seed) -> Self { + Self + } + fn from_seed_slice(_: &[u8]) -> Result { + Ok(Self) + } + fn sign(&self, _: &[u8]) -> Self::Signature { + Self + } + fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { + true + } + fn verify_weak, M: AsRef<[u8]>>(_: &[u8], _: M, _: P) -> bool { + true + } + fn public(&self) -> Self::Public { + Self + } + fn to_raw_vec(&self) -> Vec { + vec![] + } + } } /// Trait suitable for typical cryptographic PKI key pair type. @@ -737,190 +784,203 @@ mod dummy { /// For now it just specifies how to create a key from a phrase and derivation path. #[cfg(feature = "full_crypto")] pub trait Pair: CryptoType + Sized + Clone + Send + Sync + 'static { - /// The type which is used to encode a public key. - type Public: Public + Hash; - - /// The type used to (minimally) encode the data required to securely create - /// a new key pair. - type Seed: Default + AsRef<[u8]> + AsMut<[u8]> + Clone; - - /// The type used to represent a signature. Can be created from a key pair and a message - /// and verified with the message and a public key. - type Signature: AsRef<[u8]>; - - /// Error returned from the `derive` function. - type DeriveError; - - /// Generate new secure (random) key pair. - /// - /// This is only for ephemeral keys really, since you won't have access to the secret key - /// for storage. If you want a persistent key pair, use `generate_with_phrase` instead. - #[cfg(feature = "std")] - fn generate() -> (Self, Self::Seed) { - let mut seed = Self::Seed::default(); - OsRng.fill_bytes(seed.as_mut()); - (Self::from_seed(&seed), seed) - } - - /// Generate new secure (random) key pair and provide the recovery phrase. - /// - /// You can recover the same key later with `from_phrase`. - /// - /// This is generally slower than `generate()`, so prefer that unless you need to persist - /// the key from the current session. - #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed); - - /// Returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. - #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Self, Self::Seed), SecretStringError>; - - /// Derive a child key from a series of given junctions. - fn derive>(&self, - path: Iter, - seed: Option, - ) -> Result<(Self, Option), Self::DeriveError>; - - /// Generate new key pair from the provided `seed`. - /// - /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed - /// by an attacker then they can also derive your key. - fn from_seed(seed: &Self::Seed) -> Self; - - /// Make a new key pair from secret seed material. The slice must be the correct size or - /// it will return `None`. - /// - /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed - /// by an attacker then they can also derive your key. - fn from_seed_slice(seed: &[u8]) -> Result; - - /// Sign a message. - fn sign(&self, message: &[u8]) -> Self::Signature; - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool; - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool; - - /// Get the public key. - fn public(&self) -> Self::Public; - - /// Interprets the string `s` in order to generate a key Pair. Returns both the pair and an optional seed, in the - /// case that the pair can be expressed as a direct derivation from a seed (some cases, such as Sr25519 derivations - /// with path components, cannot). - /// - /// This takes a helper function to do the key generation from a phrase, password and - /// junction iterator. - /// - /// - If `s` is a possibly `0x` prefixed 64-digit hex string, then it will be interpreted - /// directly as a `MiniSecretKey` (aka "seed" in `subkey`). - /// - If `s` is a valid BIP-39 key phrase of 12, 15, 18, 21 or 24 words, then the key will - /// be derived from it. In this case: - /// - the phrase may be followed by one or more items delimited by `/` characters. - /// - the path may be followed by `///`, in which case everything after the `///` is treated - /// as a password. - /// - If `s` begins with a `/` character it is prefixed with the Substrate public `DEV_PHRASE` and - /// interpreted as above. - /// - /// In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as - /// integers, non-numeric items as strings. Junctions prefixed with `/` are interpreted as soft - /// junctions, and with `//` as hard junctions. - /// - /// There is no correspondence mapping between SURI strings and the keys they represent. - /// Two different non-identical strings can actually lead to the same secret being derived. - /// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. - /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will generally - /// be equivalent to no password at all. - /// - /// `None` is returned if no matches are found. - #[cfg(feature = "std")] - fn from_string_with_seed(s: &str, password_override: Option<&str>) - -> Result<(Self, Option), SecretStringError> - { - let re = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") - .expect("constructed from known-good static value; qed"); - let cap = re.captures(s).ok_or(SecretStringError::InvalidFormat)?; - - let re_junction = Regex::new(r"/(/?[^/]+)") - .expect("constructed from known-good static value; qed"); - let path = re_junction.captures_iter(&cap["path"]) - .map(|f| DeriveJunction::from(&f[1])); - - let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); - let password = password_override.or_else(|| cap.name("password").map(|m| m.as_str())); - - let (root, seed) = if phrase.starts_with("0x") { - hex::decode(&phrase[2..]).ok() - .and_then(|seed_vec| { - let mut seed = Self::Seed::default(); - if seed.as_ref().len() == seed_vec.len() { - seed.as_mut().copy_from_slice(&seed_vec); - Some((Self::from_seed(&seed), seed)) - } else { - None - } - }) - .ok_or(SecretStringError::InvalidSeed)? - } else { - Self::from_phrase(phrase, password) - .map_err(|_| SecretStringError::InvalidPhrase)? - }; - root.derive(path, Some(seed)).map_err(|_| SecretStringError::InvalidPath) - } - - /// Interprets the string `s` in order to generate a key pair. - /// - /// See [`from_string_with_seed`](Self::from_string_with_seed) for more extensive documentation. - #[cfg(feature = "std")] - fn from_string(s: &str, password_override: Option<&str>) -> Result { - Self::from_string_with_seed(s, password_override).map(|x| x.0) - } - - /// Return a vec filled with raw data. - fn to_raw_vec(&self) -> Vec; + /// The type which is used to encode a public key. + type Public: Public + Hash; + + /// The type used to (minimally) encode the data required to securely create + /// a new key pair. + type Seed: Default + AsRef<[u8]> + AsMut<[u8]> + Clone; + + /// The type used to represent a signature. Can be created from a key pair and a message + /// and verified with the message and a public key. + type Signature: AsRef<[u8]>; + + /// Error returned from the `derive` function. + type DeriveError; + + /// Generate new secure (random) key pair. + /// + /// This is only for ephemeral keys really, since you won't have access to the secret key + /// for storage. If you want a persistent key pair, use `generate_with_phrase` instead. + #[cfg(feature = "std")] + fn generate() -> (Self, Self::Seed) { + let mut seed = Self::Seed::default(); + OsRng.fill_bytes(seed.as_mut()); + (Self::from_seed(&seed), seed) + } + + /// Generate new secure (random) key pair and provide the recovery phrase. + /// + /// You can recover the same key later with `from_phrase`. + /// + /// This is generally slower than `generate()`, so prefer that unless you need to persist + /// the key from the current session. + #[cfg(feature = "std")] + fn generate_with_phrase(password: Option<&str>) -> (Self, String, Self::Seed); + + /// Returns the KeyPair from the English BIP39 seed `phrase`, or `None` if it's invalid. + #[cfg(feature = "std")] + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, Self::Seed), SecretStringError>; + + /// Derive a child key from a series of given junctions. + fn derive>( + &self, + path: Iter, + seed: Option, + ) -> Result<(Self, Option), Self::DeriveError>; + + /// Generate new key pair from the provided `seed`. + /// + /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed + /// by an attacker then they can also derive your key. + fn from_seed(seed: &Self::Seed) -> Self; + + /// Make a new key pair from secret seed material. The slice must be the correct size or + /// it will return `None`. + /// + /// @WARNING: THIS WILL ONLY BE SECURE IF THE `seed` IS SECURE. If it can be guessed + /// by an attacker then they can also derive your key. + fn from_seed_slice(seed: &[u8]) -> Result; + + /// Sign a message. + fn sign(&self, message: &[u8]) -> Self::Signature; + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool; + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool; + + /// Get the public key. + fn public(&self) -> Self::Public; + + /// Interprets the string `s` in order to generate a key Pair. Returns both the pair and an optional seed, in the + /// case that the pair can be expressed as a direct derivation from a seed (some cases, such as Sr25519 derivations + /// with path components, cannot). + /// + /// This takes a helper function to do the key generation from a phrase, password and + /// junction iterator. + /// + /// - If `s` is a possibly `0x` prefixed 64-digit hex string, then it will be interpreted + /// directly as a `MiniSecretKey` (aka "seed" in `subkey`). + /// - If `s` is a valid BIP-39 key phrase of 12, 15, 18, 21 or 24 words, then the key will + /// be derived from it. In this case: + /// - the phrase may be followed by one or more items delimited by `/` characters. + /// - the path may be followed by `///`, in which case everything after the `///` is treated + /// as a password. + /// - If `s` begins with a `/` character it is prefixed with the Substrate public `DEV_PHRASE` and + /// interpreted as above. + /// + /// In this case they are interpreted as HDKD junctions; purely numeric items are interpreted as + /// integers, non-numeric items as strings. Junctions prefixed with `/` are interpreted as soft + /// junctions, and with `//` as hard junctions. + /// + /// There is no correspondence mapping between SURI strings and the keys they represent. + /// Two different non-identical strings can actually lead to the same secret being derived. + /// Notably, integer junction indices may be legally prefixed with arbitrary number of zeros. + /// Similarly an empty password (ending the SURI with `///`) is perfectly valid and will generally + /// be equivalent to no password at all. + /// + /// `None` is returned if no matches are found. + #[cfg(feature = "std")] + fn from_string_with_seed( + s: &str, + password_override: Option<&str>, + ) -> Result<(Self, Option), SecretStringError> { + let re = Regex::new(r"^(?P[\d\w ]+)?(?P(//?[^/]+)*)(///(?P.*))?$") + .expect("constructed from known-good static value; qed"); + let cap = re.captures(s).ok_or(SecretStringError::InvalidFormat)?; + + let re_junction = + Regex::new(r"/(/?[^/]+)").expect("constructed from known-good static value; qed"); + let path = re_junction + .captures_iter(&cap["path"]) + .map(|f| DeriveJunction::from(&f[1])); + + let phrase = cap.name("phrase").map(|r| r.as_str()).unwrap_or(DEV_PHRASE); + let password = password_override.or_else(|| cap.name("password").map(|m| m.as_str())); + + let (root, seed) = if phrase.starts_with("0x") { + hex::decode(&phrase[2..]) + .ok() + .and_then(|seed_vec| { + let mut seed = Self::Seed::default(); + if seed.as_ref().len() == seed_vec.len() { + seed.as_mut().copy_from_slice(&seed_vec); + Some((Self::from_seed(&seed), seed)) + } else { + None + } + }) + .ok_or(SecretStringError::InvalidSeed)? + } else { + Self::from_phrase(phrase, password).map_err(|_| SecretStringError::InvalidPhrase)? + }; + root.derive(path, Some(seed)) + .map_err(|_| SecretStringError::InvalidPath) + } + + /// Interprets the string `s` in order to generate a key pair. + /// + /// See [`from_string_with_seed`](Self::from_string_with_seed) for more extensive documentation. + #[cfg(feature = "std")] + fn from_string(s: &str, password_override: Option<&str>) -> Result { + Self::from_string_with_seed(s, password_override).map(|x| x.0) + } + + /// Return a vec filled with raw data. + fn to_raw_vec(&self) -> Vec; } /// One type is wrapped by another. pub trait IsWrappedBy: From + Into { - /// Get a reference to the inner from the outer. - fn from_ref(outer: &Outer) -> &Self; - /// Get a mutable reference to the inner from the outer. - fn from_mut(outer: &mut Outer) -> &mut Self; + /// Get a reference to the inner from the outer. + fn from_ref(outer: &Outer) -> &Self; + /// Get a mutable reference to the inner from the outer. + fn from_mut(outer: &mut Outer) -> &mut Self; } /// Opposite of `IsWrappedBy` - denotes a type which is a simple wrapper around another type. pub trait Wraps: Sized { - /// The inner type it is wrapping. - type Inner: IsWrappedBy; + /// The inner type it is wrapping. + type Inner: IsWrappedBy; } -impl IsWrappedBy for T where - Outer: AsRef + AsMut + From, - T: From, +impl IsWrappedBy for T +where + Outer: AsRef + AsMut + From, + T: From, { - /// Get a reference to the inner from the outer. - fn from_ref(outer: &Outer) -> &Self { outer.as_ref() } - - /// Get a mutable reference to the inner from the outer. - fn from_mut(outer: &mut Outer) -> &mut Self { outer.as_mut() } + /// Get a reference to the inner from the outer. + fn from_ref(outer: &Outer) -> &Self { + outer.as_ref() + } + + /// Get a mutable reference to the inner from the outer. + fn from_mut(outer: &mut Outer) -> &mut Self { + outer.as_mut() + } } -impl UncheckedFrom for Outer where - Outer: Wraps, - Inner: IsWrappedBy + UncheckedFrom, +impl UncheckedFrom for Outer +where + Outer: Wraps, + Inner: IsWrappedBy + UncheckedFrom, { - fn unchecked_from(t: T) -> Self { - let inner: Inner = t.unchecked_into(); - inner.into() - } + fn unchecked_from(t: T) -> Self { + let inner: Inner = t.unchecked_into(); + inner.into() + } } /// Type which has a particular kind of crypto associated with it. pub trait CryptoType { - /// The pair key type of this crypto. - #[cfg(feature = "full_crypto")] - type Pair: Pair; + /// The pair key type of this crypto. + #[cfg(feature = "full_crypto")] + type Pair: Pair; } /// An identifier for a type of cryptographic key. @@ -931,34 +991,44 @@ pub trait CryptoType { /// Values whose first character is `_` are reserved for private use and won't conflict with any /// public modules. #[derive( - Copy, Clone, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Encode, Decode, PassByInner, - crate::RuntimeDebug + Copy, + Clone, + Default, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Encode, + Decode, + PassByInner, + crate::RuntimeDebug, )] pub struct KeyTypeId(pub [u8; 4]); impl From for KeyTypeId { - fn from(x: u32) -> Self { - Self(x.to_le_bytes()) - } + fn from(x: u32) -> Self { + Self(x.to_le_bytes()) + } } impl From for u32 { - fn from(x: KeyTypeId) -> Self { - u32::from_le_bytes(x.0) - } + fn from(x: KeyTypeId) -> Self { + u32::from_le_bytes(x.0) + } } impl<'a> TryFrom<&'a str> for KeyTypeId { - type Error = (); - fn try_from(x: &'a str) -> Result { - let b = x.as_bytes(); - if b.len() != 4 { - return Err(()); - } - let mut res = KeyTypeId::default(); - res.0.copy_from_slice(&b[0..4]); - Ok(res) - } + type Error = (); + fn try_from(x: &'a str) -> Result { + let b = x.as_bytes(); + if b.len() != 4 { + return Err(()); + } + let mut res = KeyTypeId::default(); + res.0.copy_from_slice(&b[0..4]); + Ok(res) + } } /// An identifier for a specific cryptographic algorithm used by a key pair @@ -971,15 +1041,13 @@ pub struct CryptoTypePublicPair(pub CryptoTypeId, pub Vec); #[cfg(feature = "std")] impl sp_std::fmt::Display for CryptoTypePublicPair { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let id = match str::from_utf8(&(self.0).0[..]) { - Ok(id) => id.to_string(), - Err(_) => { - format!("{:#?}", self.0) - } - }; - write!(f, "{}-{}", id, HexDisplay::from(&self.1)) - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let id = match str::from_utf8(&(self.0).0[..]) { + Ok(id) => id.to_string(), + Err(_) => format!("{:#?}", self.0), + }; + write!(f, "{}-{}", id, HexDisplay::from(&self.1)) + } } /// Known key types; this also functions as a global registry of key types for projects wishing to @@ -988,185 +1056,267 @@ impl sp_std::fmt::Display for CryptoTypePublicPair { /// It's not universal in the sense that *all* key types need to be mentioned here, it's just a /// handy place to put common key types. pub mod key_types { - use super::KeyTypeId; - - /// Key type for Babe module, build-in. - pub const BABE: KeyTypeId = KeyTypeId(*b"babe"); - /// Key type for Grandpa module, build-in. - pub const GRANDPA: KeyTypeId = KeyTypeId(*b"gran"); - /// Key type for controlling an account in a Substrate runtime, built-in. - pub const ACCOUNT: KeyTypeId = KeyTypeId(*b"acco"); - /// Key type for Aura module, built-in. - pub const AURA: KeyTypeId = KeyTypeId(*b"aura"); - /// Key type for ImOnline module, built-in. - pub const IM_ONLINE: KeyTypeId = KeyTypeId(*b"imon"); - /// Key type for AuthorityDiscovery module, built-in. - pub const AUTHORITY_DISCOVERY: KeyTypeId = KeyTypeId(*b"audi"); - /// Key type for staking, built-in. - pub const STAKING: KeyTypeId = KeyTypeId(*b"stak"); - /// A key type ID useful for tests. - pub const DUMMY: KeyTypeId = KeyTypeId(*b"dumy"); + use super::KeyTypeId; + + /// Key type for Babe module, build-in. + pub const BABE: KeyTypeId = KeyTypeId(*b"babe"); + /// Key type for Grandpa module, build-in. + pub const GRANDPA: KeyTypeId = KeyTypeId(*b"gran"); + /// Key type for controlling an account in a Substrate runtime, built-in. + pub const ACCOUNT: KeyTypeId = KeyTypeId(*b"acco"); + /// Key type for Aura module, built-in. + pub const AURA: KeyTypeId = KeyTypeId(*b"aura"); + /// Key type for ImOnline module, built-in. + pub const IM_ONLINE: KeyTypeId = KeyTypeId(*b"imon"); + /// Key type for AuthorityDiscovery module, built-in. + pub const AUTHORITY_DISCOVERY: KeyTypeId = KeyTypeId(*b"audi"); + /// Key type for staking, built-in. + pub const STAKING: KeyTypeId = KeyTypeId(*b"stak"); + /// A key type ID useful for tests. + pub const DUMMY: KeyTypeId = KeyTypeId(*b"dumy"); } #[cfg(test)] mod tests { - use crate::DeriveJunction; - use hex_literal::hex; - use super::*; - - #[derive(Clone, Eq, PartialEq, Debug)] - enum TestPair { - Generated, - GeneratedWithPhrase, - GeneratedFromPhrase{phrase: String, password: Option}, - Standard{phrase: String, password: Option, path: Vec}, - Seed(Vec), - } - impl Default for TestPair { - fn default() -> Self { - TestPair::Generated - } - } - impl CryptoType for TestPair { - type Pair = Self; - } - - #[derive(Clone, PartialEq, Eq, Hash, Default)] - struct TestPublic; - impl AsRef<[u8]> for TestPublic { - fn as_ref(&self) -> &[u8] { - &[] - } - } - impl AsMut<[u8]> for TestPublic { - fn as_mut(&mut self) -> &mut [u8] { - &mut [] - } - } - impl CryptoType for TestPublic { - type Pair = TestPair; - } - impl Derive for TestPublic {} - impl Public for TestPublic { - fn from_slice(_bytes: &[u8]) -> Self { - Self - } - fn as_slice(&self) -> &[u8] { - &[] - } - fn to_raw_vec(&self) -> Vec { - vec![] - } - } - impl Pair for TestPair { - type Public = TestPublic; - type Seed = [u8; 8]; - type Signature = [u8; 0]; - type DeriveError = (); - - fn generate() -> (Self, ::Seed) { (TestPair::Generated, [0u8; 8]) } - fn generate_with_phrase(_password: Option<&str>) -> (Self, String, ::Seed) { - (TestPair::GeneratedWithPhrase, "".into(), [0u8; 8]) - } - fn from_phrase(phrase: &str, password: Option<&str>) - -> Result<(Self, ::Seed), SecretStringError> - { - Ok((TestPair::GeneratedFromPhrase { - phrase: phrase.to_owned(), - password: password.map(Into::into) - }, [0u8; 8])) - } - fn derive>(&self, path_iter: Iter, _: Option<[u8; 8]>) - -> Result<(Self, Option<[u8; 8]>), Self::DeriveError> - { - Ok((match self.clone() { - TestPair::Standard {phrase, password, path} => - TestPair::Standard { phrase, password, path: path.into_iter().chain(path_iter).collect() }, - TestPair::GeneratedFromPhrase {phrase, password} => - TestPair::Standard { phrase, password, path: path_iter.collect() }, - x => if path_iter.count() == 0 { x } else { return Err(()) }, - }, None)) - } - fn from_seed(_seed: &::Seed) -> Self { TestPair::Seed(_seed.as_ref().to_owned()) } - fn sign(&self, _message: &[u8]) -> Self::Signature { [] } - fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { true } - fn verify_weak, M: AsRef<[u8]>>( - _sig: &[u8], - _message: M, - _pubkey: P - ) -> bool { true } - fn public(&self) -> Self::Public { TestPublic } - fn from_seed_slice(seed: &[u8]) - -> Result - { - Ok(TestPair::Seed(seed.to_owned())) - } - fn to_raw_vec(&self) -> Vec { - vec![] - } - } - - #[test] - fn interpret_std_seed_should_work() { - assert_eq!( - TestPair::from_string("0x0123456789abcdef", None), - Ok(TestPair::Seed(hex!["0123456789abcdef"][..].to_owned())) - ); - } - - #[test] - fn password_override_should_work() { - assert_eq!( - TestPair::from_string("hello world///password", None), - TestPair::from_string("hello world", Some("password")), - ); - assert_eq!( - TestPair::from_string("hello world///password", None), - TestPair::from_string("hello world///other password", Some("password")), - ); - } - - #[test] - fn interpret_std_secret_string_should_work() { - assert_eq!( - TestPair::from_string("hello world", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![]}) - ); - assert_eq!( - TestPair::from_string("hello world/1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::soft(1)]}) - ); - assert_eq!( - TestPair::from_string("hello world/DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::soft("DOT")]}) - ); - assert_eq!( - TestPair::from_string("hello world//1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard(1)]}) - ); - assert_eq!( - TestPair::from_string("hello world//DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard("DOT")]}) - ); - assert_eq!( - TestPair::from_string("hello world//1/DOT", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")]}) - ); - assert_eq!( - TestPair::from_string("hello world//DOT/1", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: None, path: vec![DeriveJunction::hard("DOT"), DeriveJunction::soft(1)]}) - ); - assert_eq!( - TestPair::from_string("hello world///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![]}) - ); - assert_eq!( - TestPair::from_string("hello world//1/DOT///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")]}) - ); - assert_eq!( - TestPair::from_string("hello world/1//DOT///password", None), - Ok(TestPair::Standard{phrase: "hello world".to_owned(), password: Some("password".to_owned()), path: vec![DeriveJunction::soft(1), DeriveJunction::hard("DOT")]}) - ); - } + use super::*; + use crate::DeriveJunction; + use hex_literal::hex; + + #[derive(Clone, Eq, PartialEq, Debug)] + enum TestPair { + Generated, + GeneratedWithPhrase, + GeneratedFromPhrase { + phrase: String, + password: Option, + }, + Standard { + phrase: String, + password: Option, + path: Vec, + }, + Seed(Vec), + } + impl Default for TestPair { + fn default() -> Self { + TestPair::Generated + } + } + impl CryptoType for TestPair { + type Pair = Self; + } + + #[derive(Clone, PartialEq, Eq, Hash, Default)] + struct TestPublic; + impl AsRef<[u8]> for TestPublic { + fn as_ref(&self) -> &[u8] { + &[] + } + } + impl AsMut<[u8]> for TestPublic { + fn as_mut(&mut self) -> &mut [u8] { + &mut [] + } + } + impl CryptoType for TestPublic { + type Pair = TestPair; + } + impl Derive for TestPublic {} + impl Public for TestPublic { + fn from_slice(_bytes: &[u8]) -> Self { + Self + } + fn as_slice(&self) -> &[u8] { + &[] + } + fn to_raw_vec(&self) -> Vec { + vec![] + } + } + impl Pair for TestPair { + type Public = TestPublic; + type Seed = [u8; 8]; + type Signature = [u8; 0]; + type DeriveError = (); + + fn generate() -> (Self, ::Seed) { + (TestPair::Generated, [0u8; 8]) + } + fn generate_with_phrase(_password: Option<&str>) -> (Self, String, ::Seed) { + (TestPair::GeneratedWithPhrase, "".into(), [0u8; 8]) + } + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Self, ::Seed), SecretStringError> { + Ok(( + TestPair::GeneratedFromPhrase { + phrase: phrase.to_owned(), + password: password.map(Into::into), + }, + [0u8; 8], + )) + } + fn derive>( + &self, + path_iter: Iter, + _: Option<[u8; 8]>, + ) -> Result<(Self, Option<[u8; 8]>), Self::DeriveError> { + Ok(( + match self.clone() { + TestPair::Standard { + phrase, + password, + path, + } => TestPair::Standard { + phrase, + password, + path: path.into_iter().chain(path_iter).collect(), + }, + TestPair::GeneratedFromPhrase { phrase, password } => TestPair::Standard { + phrase, + password, + path: path_iter.collect(), + }, + x => { + if path_iter.count() == 0 { + x + } else { + return Err(()); + } + } + }, + None, + )) + } + fn from_seed(_seed: &::Seed) -> Self { + TestPair::Seed(_seed.as_ref().to_owned()) + } + fn sign(&self, _message: &[u8]) -> Self::Signature { + [] + } + fn verify>(_: &Self::Signature, _: M, _: &Self::Public) -> bool { + true + } + fn verify_weak, M: AsRef<[u8]>>( + _sig: &[u8], + _message: M, + _pubkey: P, + ) -> bool { + true + } + fn public(&self) -> Self::Public { + TestPublic + } + fn from_seed_slice(seed: &[u8]) -> Result { + Ok(TestPair::Seed(seed.to_owned())) + } + fn to_raw_vec(&self) -> Vec { + vec![] + } + } + + #[test] + fn interpret_std_seed_should_work() { + assert_eq!( + TestPair::from_string("0x0123456789abcdef", None), + Ok(TestPair::Seed(hex!["0123456789abcdef"][..].to_owned())) + ); + } + + #[test] + fn password_override_should_work() { + assert_eq!( + TestPair::from_string("hello world///password", None), + TestPair::from_string("hello world", Some("password")), + ); + assert_eq!( + TestPair::from_string("hello world///password", None), + TestPair::from_string("hello world///other password", Some("password")), + ); + } + + #[test] + fn interpret_std_secret_string_should_work() { + assert_eq!( + TestPair::from_string("hello world", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![] + }) + ); + assert_eq!( + TestPair::from_string("hello world/1", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft(1)] + }) + ); + assert_eq!( + TestPair::from_string("hello world/DOT", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::soft("DOT")] + }) + ); + assert_eq!( + TestPair::from_string("hello world//1", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard(1)] + }) + ); + assert_eq!( + TestPair::from_string("hello world//DOT", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("DOT")] + }) + ); + assert_eq!( + TestPair::from_string("hello world//1/DOT", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")] + }) + ); + assert_eq!( + TestPair::from_string("hello world//DOT/1", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: None, + path: vec![DeriveJunction::hard("DOT"), DeriveJunction::soft(1)] + }) + ); + assert_eq!( + TestPair::from_string("hello world///password", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![] + }) + ); + assert_eq!( + TestPair::from_string("hello world//1/DOT///password", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![DeriveJunction::hard(1), DeriveJunction::soft("DOT")] + }) + ); + assert_eq!( + TestPair::from_string("hello world/1//DOT///password", None), + Ok(TestPair::Standard { + phrase: "hello world".to_owned(), + password: Some("password".to_owned()), + path: vec![DeriveJunction::soft(1), DeriveJunction::hard("DOT")] + }) + ); + } } diff --git a/primitives/core/src/ecdsa.rs b/primitives/core/src/ecdsa.rs index 8a45157844..1683ddab91 100644 --- a/primitives/core/src/ecdsa.rs +++ b/primitives/core/src/ecdsa.rs @@ -21,24 +21,27 @@ #[cfg(feature = "full_crypto")] use sp_std::vec::Vec; +use codec::{Decode, Encode}; use sp_std::cmp::Ordering; -use codec::{Encode, Decode}; -#[cfg(feature = "full_crypto")] -use core::convert::{TryFrom, TryInto}; -#[cfg(feature = "std")] -use substrate_bip39::seed_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "full_crypto")] -use crate::{hashing::blake2_256, crypto::{Pair as TraitPair, DeriveJunction, SecretStringError}}; #[cfg(feature = "std")] use crate::crypto::Ss58Codec; +use crate::crypto::{CryptoType, CryptoTypeId, Derive, Public as TraitPublic, UncheckedFrom}; +#[cfg(feature = "full_crypto")] +use crate::{ + crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}, + hashing::blake2_256, +}; #[cfg(feature = "std")] -use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::crypto::{Public as TraitPublic, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; +use bip39::{Language, Mnemonic, MnemonicType}; +#[cfg(feature = "full_crypto")] +use core::convert::{TryFrom, TryInto}; #[cfg(feature = "full_crypto")] use secp256k1::{PublicKey, SecretKey}; +#[cfg(feature = "std")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; +#[cfg(feature = "std")] +use substrate_bip39::seed_from_entropy; /// An identifier used to match public keys against ecdsa keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ecds"); @@ -54,21 +57,21 @@ type Seed = [u8; 32]; pub struct Public([u8; 33]); impl PartialOrd for Public { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } } impl Ord for Public { - fn cmp(&self, other: &Self) -> Ordering { - self.as_ref().cmp(&other.as_ref()) - } + fn cmp(&self, other: &Self) -> Ordering { + self.as_ref().cmp(&other.as_ref()) + } } impl PartialEq for Public { - fn eq(&self, other: &Self) -> bool { - self.as_ref() == other.as_ref() - } + fn eq(&self, other: &Self) -> bool { + self.as_ref() == other.as_ref() + } } impl Eq for Public {} @@ -77,130 +80,140 @@ impl Eq for Public {} #[cfg(feature = "std")] #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub enum PublicError { - /// Bad alphabet. - BadBase58, - /// Bad length. - BadLength, - /// Unknown version. - UnknownVersion, - /// Invalid checksum. - InvalidChecksum, + /// Bad alphabet. + BadBase58, + /// Bad length. + BadLength, + /// Unknown version. + UnknownVersion, + /// Invalid checksum. + InvalidChecksum, } impl Public { - /// A new instance from the given 33-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; 33]) -> Self { - Self(data) - } - - /// Create a new instance from the given full public key. - /// - /// This will convert the full public key into the compressed format. - #[cfg(feature = "std")] - pub fn from_full(full: &[u8]) -> Result { - secp256k1::PublicKey::parse_slice(full, None) - .map(|k| k.serialize_compressed()) - .map(Self) - .map_err(|_| ()) - } + /// A new instance from the given 33-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_raw(data: [u8; 33]) -> Self { + Self(data) + } + + /// Create a new instance from the given full public key. + /// + /// This will convert the full public key into the compressed format. + #[cfg(feature = "std")] + pub fn from_full(full: &[u8]) -> Result { + secp256k1::PublicKey::parse_slice(full, None) + .map(|k| k.serialize_compressed()) + .map(Self) + .map_err(|_| ()) + } } impl TraitPublic for Public { - /// A new instance from the given slice that should be 33 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 33]; - r.copy_from_slice(data); - Self(r) - } + /// A new instance from the given slice that should be 33 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 33]; + r.copy_from_slice(data); + Self(r) + } } impl Derive for Public {} impl Default for Public { - fn default() -> Self { - Public([0u8; 33]) - } + fn default() -> Self { + Public([0u8; 33]) + } } impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } impl sp_std::convert::TryFrom<&[u8]> for Public { - type Error = (); + type Error = (); - fn try_from(data: &[u8]) -> Result { - if data.len() == 33 { - Ok(Self::from_slice(data)) - } else { - - Err(()) - } - } + fn try_from(data: &[u8]) -> Result { + if data.len() == 33 { + Ok(Self::from_slice(data)) + } else { + Err(()) + } + } } #[cfg(feature = "full_crypto")] impl From for Public { - fn from(x: Pair) -> Self { - x.public() - } + fn from(x: Pair) -> Self { + x.public() + } } impl UncheckedFrom<[u8; 33]> for Public { - fn unchecked_from(x: [u8; 33]) -> Self { - Public(x) - } + fn unchecked_from(x: [u8; 33]) -> Self { + Public(x) + } } #[cfg(feature = "std")] impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } } #[cfg(feature = "std")] impl std::fmt::Debug for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.as_ref()), &s[0..8]) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + let s = self.to_ss58check(); + write!( + f, + "{} ({}...)", + crate::hexdisplay::HexDisplay::from(&self.as_ref()), + &s[0..8] + ) + } } #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - serializer.serialize_str(&self.to_ss58check()) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } } #[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Public { - fn hash(&self, state: &mut H) { - self.as_ref().hash(state); - } + fn hash(&self, state: &mut H) { + self.as_ref().hash(state); + } } /// A signature (a 512-bit value, plus 8 bits for recovery ID). @@ -208,466 +221,492 @@ impl sp_std::hash::Hash for Public { pub struct Signature([u8; 65]); impl sp_std::convert::TryFrom<&[u8]> for Signature { - type Error = (); + type Error = (); - fn try_from(data: &[u8]) -> Result { - if data.len() == 65 { - let mut inner = [0u8; 65]; - inner.copy_from_slice(data); - Ok(Signature(inner)) - } else { - Err(()) - } - } + fn try_from(data: &[u8]) -> Result { + if data.len() == 65 { + let mut inner = [0u8; 65]; + inner.copy_from_slice(data); + Ok(Signature(inner)) + } else { + Err(()) + } + } } #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - serializer.serialize_str(&hex::encode(self)) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&hex::encode(self)) + } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - let signature_hex = hex::decode(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Ok(Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let signature_hex = hex::decode(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?; + Ok(Signature::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?) + } } impl Clone for Signature { - fn clone(&self) -> Self { - let mut r = [0u8; 65]; - r.copy_from_slice(&self.0[..]); - Signature(r) - } + fn clone(&self) -> Self { + let mut r = [0u8; 65]; + r.copy_from_slice(&self.0[..]); + Signature(r) + } } impl Default for Signature { - fn default() -> Self { - Signature([0u8; 65]) - } + fn default() -> Self { + Signature([0u8; 65]) + } } impl PartialEq for Signature { - fn eq(&self, b: &Self) -> bool { - self.0[..] == b.0[..] - } + fn eq(&self, b: &Self) -> bool { + self.0[..] == b.0[..] + } } impl Eq for Signature {} impl From for [u8; 65] { - fn from(v: Signature) -> [u8; 65] { - v.0 - } + fn from(v: Signature) -> [u8; 65] { + v.0 + } } impl AsRef<[u8; 65]> for Signature { - fn as_ref(&self) -> &[u8; 65] { - &self.0 - } + fn as_ref(&self) -> &[u8; 65] { + &self.0 + } } impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } #[cfg(feature = "std")] impl std::fmt::Debug for Signature { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) + } } #[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - sp_std::hash::Hash::hash(&self.0[..], state); - } + fn hash(&self, state: &mut H) { + sp_std::hash::Hash::hash(&self.0[..], state); + } } impl Signature { - /// A new instance from the given 65-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_raw(data: [u8; 65]) -> Signature { - Signature(data) - } - - /// A new instance from the given slice that should be 65 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 65]; - r.copy_from_slice(data); - Signature(r) - } - - /// Recover the public key from this signature and a message. - #[cfg(feature = "full_crypto")] - pub fn recover>(&self, message: M) -> Option { - let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); - let sig: (_, _) = self.try_into().ok()?; - secp256k1::recover(&message, &sig.0, &sig.1) - .ok() - .map(|recovered| Public(recovered.serialize_compressed())) - } + /// A new instance from the given 65-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_raw(data: [u8; 65]) -> Signature { + Signature(data) + } + + /// A new instance from the given slice that should be 65 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 65]; + r.copy_from_slice(data); + Signature(r) + } + + /// Recover the public key from this signature and a message. + #[cfg(feature = "full_crypto")] + pub fn recover>(&self, message: M) -> Option { + let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); + let sig: (_, _) = self.try_into().ok()?; + secp256k1::recover(&message, &sig.0, &sig.1) + .ok() + .map(|recovered| Public(recovered.serialize_compressed())) + } } #[cfg(feature = "full_crypto")] impl From<(secp256k1::Signature, secp256k1::RecoveryId)> for Signature { - fn from(x: (secp256k1::Signature, secp256k1::RecoveryId)) -> Signature { - let mut r = Self::default(); - r.0[0..64].copy_from_slice(&x.0.serialize()[..]); - r.0[64] = x.1.serialize(); - r - } + fn from(x: (secp256k1::Signature, secp256k1::RecoveryId)) -> Signature { + let mut r = Self::default(); + r.0[0..64].copy_from_slice(&x.0.serialize()[..]); + r.0[64] = x.1.serialize(); + r + } } #[cfg(feature = "full_crypto")] impl<'a> TryFrom<&'a Signature> for (secp256k1::Signature, secp256k1::RecoveryId) { - type Error = (); - fn try_from(x: &'a Signature) -> Result<(secp256k1::Signature, secp256k1::RecoveryId), Self::Error> { - Ok(( - secp256k1::Signature::parse_slice(&x.0[0..64]).expect("hardcoded to 64 bytes; qed"), - secp256k1::RecoveryId::parse(x.0[64]).map_err(|_| ())?, - )) - } + type Error = (); + fn try_from( + x: &'a Signature, + ) -> Result<(secp256k1::Signature, secp256k1::RecoveryId), Self::Error> { + Ok(( + secp256k1::Signature::parse_slice(&x.0[0..64]).expect("hardcoded to 64 bytes; qed"), + secp256k1::RecoveryId::parse(x.0[64]).map_err(|_| ())?, + )) + } } /// Derive a single hard junction. #[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { - ("Secp256k1HDKD", secret_seed, cc).using_encoded(|data| { - let mut res = [0u8; 32]; - res.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); - res - }) + ("Secp256k1HDKD", secret_seed, cc).using_encoded(|data| { + let mut res = [0u8; 32]; + res.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); + res + }) } /// An error when deriving a key. #[cfg(feature = "full_crypto")] pub enum DeriveError { - /// A soft key was found in the path (and is unsupported). - SoftKeyInPath, + /// A soft key was found in the path (and is unsupported). + SoftKeyInPath, } /// A key pair. #[cfg(feature = "full_crypto")] #[derive(Clone)] pub struct Pair { - public: PublicKey, - secret: SecretKey, + public: PublicKey, + secret: SecretKey, } #[cfg(feature = "full_crypto")] impl TraitPair for Pair { - type Public = Public; - type Seed = Seed; - type Signature = Signature; - type DeriveError = DeriveError; - - /// Generate new secure (random) key pair and provide the recovery phrase. - /// - /// You can recover the same key later with `from_phrase`. - #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - let (pair, seed) = Self::from_phrase(phrase, password) - .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) - } - - /// Generate key pair from given recovery phrase and password. - #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { - let big_seed = seed_from_entropy( - Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)?.entropy(), - password.unwrap_or(""), - ).map_err(|_| SecretStringError::InvalidSeed)?; - let mut seed = Seed::default(); - seed.copy_from_slice(&big_seed[0..32]); - Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) - } - - /// Make a new key pair from secret seed material. - /// - /// You should never need to use this; generate(), generate_with_phrase - fn from_seed(seed: &Seed) -> Pair { - Self::from_seed_slice(&seed[..]).expect("seed has valid length; qed") - } - - /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it - /// will return `None`. - /// - /// You should never need to use this; generate(), generate_with_phrase - fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = SecretKey::parse_slice(seed_slice) - .map_err(|_| SecretStringError::InvalidSeedLength)?; - let public = PublicKey::from_secret_key(&secret); - Ok(Pair{ secret, public }) - } - - /// Derive a child key from a series of given junctions. - fn derive>(&self, - path: Iter, - _seed: Option - ) -> Result<(Pair, Option), DeriveError> { - let mut acc = self.secret.serialize(); - for j in path { - match j { - DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), - DeriveJunction::Hard(cc) => acc = derive_hard_junction(&acc, &cc), - } - } - Ok((Self::from_seed(&acc), Some(acc))) - } - - /// Get the public key. - fn public(&self) -> Public { - Public(self.public.serialize_compressed()) - } - - /// Sign a message. - fn sign(&self, message: &[u8]) -> Signature { - let message = secp256k1::Message::parse(&blake2_256(message)); - secp256k1::sign(&message, &self.secret).into() - } - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { - let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); - let sig: (_, _) = match sig.try_into() { Ok(x) => x, _ => return false }; - match secp256k1::recover(&message, &sig.0, &sig.1) { - Ok(actual) => &pubkey.0[..] == &actual.serialize_compressed()[..], - _ => false, - } - } - - /// Verify a signature on a message. Returns true if the signature is good. - /// - /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct - /// size. Use it only if you're coming from byte buffers and need the speed. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); - if sig.len() != 65 { return false } - let ri = match secp256k1::RecoveryId::parse(sig[64]) { Ok(x) => x, _ => return false }; - let sig = match secp256k1::Signature::parse_slice(&sig[0..64]) { Ok(x) => x, _ => return false }; - match secp256k1::recover(&message, &sig, &ri) { - Ok(actual) => pubkey.as_ref() == &actual.serialize()[1..], - _ => false, - } - } - - /// Return a vec filled with raw data. - fn to_raw_vec(&self) -> Vec { - self.seed().to_vec() - } + type Public = Public; + type Seed = Seed; + type Signature = Signature; + type DeriveError = DeriveError; + + /// Generate new secure (random) key pair and provide the recovery phrase. + /// + /// You can recover the same key later with `from_phrase`. + #[cfg(feature = "std")] + fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { + let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); + let phrase = mnemonic.phrase(); + let (pair, seed) = Self::from_phrase(phrase, password) + .expect("All phrases generated by Mnemonic are valid; qed"); + (pair, phrase.to_owned(), seed) + } + + /// Generate key pair from given recovery phrase and password. + #[cfg(feature = "std")] + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { + let big_seed = seed_from_entropy( + Mnemonic::from_phrase(phrase, Language::English) + .map_err(|_| SecretStringError::InvalidPhrase)? + .entropy(), + password.unwrap_or(""), + ) + .map_err(|_| SecretStringError::InvalidSeed)?; + let mut seed = Seed::default(); + seed.copy_from_slice(&big_seed[0..32]); + Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) + } + + /// Make a new key pair from secret seed material. + /// + /// You should never need to use this; generate(), generate_with_phrase + fn from_seed(seed: &Seed) -> Pair { + Self::from_seed_slice(&seed[..]).expect("seed has valid length; qed") + } + + /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it + /// will return `None`. + /// + /// You should never need to use this; generate(), generate_with_phrase + fn from_seed_slice(seed_slice: &[u8]) -> Result { + let secret = + SecretKey::parse_slice(seed_slice).map_err(|_| SecretStringError::InvalidSeedLength)?; + let public = PublicKey::from_secret_key(&secret); + Ok(Pair { secret, public }) + } + + /// Derive a child key from a series of given junctions. + fn derive>( + &self, + path: Iter, + _seed: Option, + ) -> Result<(Pair, Option), DeriveError> { + let mut acc = self.secret.serialize(); + for j in path { + match j { + DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), + DeriveJunction::Hard(cc) => acc = derive_hard_junction(&acc, &cc), + } + } + Ok((Self::from_seed(&acc), Some(acc))) + } + + /// Get the public key. + fn public(&self) -> Public { + Public(self.public.serialize_compressed()) + } + + /// Sign a message. + fn sign(&self, message: &[u8]) -> Signature { + let message = secp256k1::Message::parse(&blake2_256(message)); + secp256k1::sign(&message, &self.secret).into() + } + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { + let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); + let sig: (_, _) = match sig.try_into() { + Ok(x) => x, + _ => return false, + }; + match secp256k1::recover(&message, &sig.0, &sig.1) { + Ok(actual) => &pubkey.0[..] == &actual.serialize_compressed()[..], + _ => false, + } + } + + /// Verify a signature on a message. Returns true if the signature is good. + /// + /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct + /// size. Use it only if you're coming from byte buffers and need the speed. + fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { + let message = secp256k1::Message::parse(&blake2_256(message.as_ref())); + if sig.len() != 65 { + return false; + } + let ri = match secp256k1::RecoveryId::parse(sig[64]) { + Ok(x) => x, + _ => return false, + }; + let sig = match secp256k1::Signature::parse_slice(&sig[0..64]) { + Ok(x) => x, + _ => return false, + }; + match secp256k1::recover(&message, &sig, &ri) { + Ok(actual) => pubkey.as_ref() == &actual.serialize()[1..], + _ => false, + } + } + + /// Return a vec filled with raw data. + fn to_raw_vec(&self) -> Vec { + self.seed().to_vec() + } } #[cfg(feature = "full_crypto")] impl Pair { - /// Get the seed for this key. - pub fn seed(&self) -> Seed { - self.secret.serialize() - } - - /// Exactly as `from_string` except that if no matches are found then, the the first 32 - /// characters are taken (padded with spaces as necessary) and used as the MiniSecretKey. - #[cfg(feature = "std")] - pub fn from_legacy_string(s: &str, password_override: Option<&str>) -> Pair { - Self::from_string(s, password_override).unwrap_or_else(|_| { - let mut padded_seed: Seed = [' ' as u8; 32]; - let len = s.len().min(32); - padded_seed[..len].copy_from_slice(&s.as_bytes()[..len]); - Self::from_seed(&padded_seed) - }) - } + /// Get the seed for this key. + pub fn seed(&self) -> Seed { + self.secret.serialize() + } + + /// Exactly as `from_string` except that if no matches are found then, the the first 32 + /// characters are taken (padded with spaces as necessary) and used as the MiniSecretKey. + #[cfg(feature = "std")] + pub fn from_legacy_string(s: &str, password_override: Option<&str>) -> Pair { + Self::from_string(s, password_override).unwrap_or_else(|_| { + let mut padded_seed: Seed = [' ' as u8; 32]; + let len = s.len().min(32); + padded_seed[..len].copy_from_slice(&s.as_bytes()[..len]); + Self::from_seed(&padded_seed) + }) + } } impl CryptoType for Public { - #[cfg(feature="full_crypto")] - type Pair = Pair; + #[cfg(feature = "full_crypto")] + type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature="full_crypto")] - type Pair = Pair; + #[cfg(feature = "full_crypto")] + type Pair = Pair; } -#[cfg(feature="full_crypto")] +#[cfg(feature = "full_crypto")] impl CryptoType for Pair { - type Pair = Pair; + type Pair = Pair; } #[cfg(test)] mod test { - use super::*; - use hex_literal::hex; - use crate::crypto::DEV_PHRASE; - use serde_json; - - #[test] - fn default_phrase_should_be_used() { - assert_eq!( - Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), - ); - } - - #[test] - fn seed_and_derive_should_work() { - let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); - let pair = Pair::from_seed(&seed); - assert_eq!(pair.seed(), seed); - let path = vec![DeriveJunction::Hard([0u8; 32])]; - let derived = pair.derive(path.into_iter(), None).ok().unwrap(); - assert_eq!( - derived.0.seed(), - hex!("b8eefc4937200a8382d00050e050ced2d4ab72cc2ef1b061477afb51564fdd61") - ); - } - - #[test] - fn test_vector_should_work() { - let pair = Pair::from_seed( - &hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60") - ); - let public = pair.public(); - assert_eq!( + use super::*; + use crate::crypto::DEV_PHRASE; + use hex_literal::hex; + use serde_json; + + #[test] + fn default_phrase_should_be_used() { + assert_eq!( + Pair::from_string("//Alice///password", None) + .unwrap() + .public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), + ); + } + + #[test] + fn seed_and_derive_should_work() { + let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); + let pair = Pair::from_seed(&seed); + assert_eq!(pair.seed(), seed); + let path = vec![DeriveJunction::Hard([0u8; 32])]; + let derived = pair.derive(path.into_iter(), None).ok().unwrap(); + assert_eq!( + derived.0.seed(), + hex!("b8eefc4937200a8382d00050e050ced2d4ab72cc2ef1b061477afb51564fdd61") + ); + } + + #[test] + fn test_vector_should_work() { + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let public = pair.public(); + assert_eq!( public, Public::from_full( &hex!("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4")[..], ).unwrap(), ); - let message = b""; - let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); - let signature = Signature::from_raw(signature); - assert!(&pair.sign(&message[..]) == &signature); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn test_vector_by_string_should_work() { - let pair = Pair::from_string( - "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - None - ).unwrap(); - let public = pair.public(); - assert_eq!( + let message = b""; + let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); + let signature = Signature::from_raw(signature); + assert!(&pair.sign(&message[..]) == &signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn test_vector_by_string_should_work() { + let pair = Pair::from_string( + "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + None, + ) + .unwrap(); + let public = pair.public(); + assert_eq!( public, Public::from_full( &hex!("8db55b05db86c0b1786ca49f095d76344c9e6056b2f02701a7e7f3c20aabfd913ebbe148dd17c56551a52952371071a6c604b3f3abe8f2c8fa742158ea6dd7d4")[..], ).unwrap(), ); - let message = b""; - let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); - let signature = Signature::from_raw(signature); - assert!(&pair.sign(&message[..]) == &signature); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn generated_pair_should_work() { - let (pair, _) = Pair::generate(); - let public = pair.public(); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - assert!(Pair::verify(&signature, &message[..], &public)); - assert!(!Pair::verify(&signature, b"Something else", &public)); - } - - #[test] - fn seeded_pair_should_work() { - let pair = Pair::from_seed(b"12345678901234567890123456789012"); - let public = pair.public(); - assert_eq!( + let message = b""; + let signature = hex!("3dde91174bd9359027be59a428b8146513df80a2a3c7eda2194f64de04a69ab97b753169e94db6ffd50921a2668a48b94ca11e3d32c1ff19cfe88890aa7e8f3c00"); + let signature = Signature::from_raw(signature); + assert!(&pair.sign(&message[..]) == &signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn generated_pair_should_work() { + let (pair, _) = Pair::generate(); + let public = pair.public(); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + assert!(Pair::verify(&signature, &message[..], &public)); + assert!(!Pair::verify(&signature, b"Something else", &public)); + } + + #[test] + fn seeded_pair_should_work() { + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + assert_eq!( public, Public::from_full( &hex!("5676109c54b9a16d271abeb4954316a40a32bcce023ac14c8e26e958aa68fba995840f3de562156558efbfdac3f16af0065e5f66795f4dd8262a228ef8c6d813")[..], ).unwrap(), ); - let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); - let signature = pair.sign(&message[..]); - println!("Correct signature: {:?}", signature); - assert!(Pair::verify(&signature, &message[..], &public)); - assert!(!Pair::verify(&signature, "Other message", &public)); - } - - #[test] - fn generate_with_phrase_recovery_possible() { - let (pair1, phrase, _) = Pair::generate_with_phrase(None); - let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); - - assert_eq!(pair1.public(), pair2.public()); - } - - #[test] - fn generate_with_password_phrase_recovery_possible() { - let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); - let (pair2, _) = Pair::from_phrase(&phrase, Some("password")).unwrap(); - - assert_eq!(pair1.public(), pair2.public()); - } - - #[test] - fn password_does_something() { - let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); - let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); - - assert_ne!(pair1.public(), pair2.public()); - } - - #[test] - fn ss58check_roundtrip_works() { - let pair = Pair::from_seed(b"12345678901234567890123456789012"); - let public = pair.public(); - let s = public.to_ss58check(); - println!("Correct: {}", s); - let cmp = Public::from_ss58check(&s).unwrap(); - assert_eq!(cmp, public); - } - - #[test] - fn signature_serialization_works() { - let pair = Pair::from_seed(b"12345678901234567890123456789012"); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - let serialized_signature = serde_json::to_string(&signature).unwrap(); - // Signature is 65 bytes, so 130 chars + 2 quote chars - assert_eq!(serialized_signature.len(), 132); - let signature = serde_json::from_str(&serialized_signature).unwrap(); - assert!(Pair::verify(&signature, &message[..], &pair.public())); - } - - #[test] - fn signature_serialization_doesnt_panic() { - fn deserialize_signature(text: &str) -> Result { - Ok(serde_json::from_str(text)?) - } - assert!(deserialize_signature("Not valid json.").is_err()); - assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); - // Poorly-sized - assert!(deserialize_signature("\"abc123\"").is_err()); - } + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + println!("Correct signature: {:?}", signature); + assert!(Pair::verify(&signature, &message[..], &public)); + assert!(!Pair::verify(&signature, "Other message", &public)); + } + + #[test] + fn generate_with_phrase_recovery_possible() { + let (pair1, phrase, _) = Pair::generate_with_phrase(None); + let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); + + assert_eq!(pair1.public(), pair2.public()); + } + + #[test] + fn generate_with_password_phrase_recovery_possible() { + let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); + let (pair2, _) = Pair::from_phrase(&phrase, Some("password")).unwrap(); + + assert_eq!(pair1.public(), pair2.public()); + } + + #[test] + fn password_does_something() { + let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); + let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); + + assert_ne!(pair1.public(), pair2.public()); + } + + #[test] + fn ss58check_roundtrip_works() { + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let s = public.to_ss58check(); + println!("Correct: {}", s); + let cmp = Public::from_ss58check(&s).unwrap(); + assert_eq!(cmp, public); + } + + #[test] + fn signature_serialization_works() { + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + let serialized_signature = serde_json::to_string(&signature).unwrap(); + // Signature is 65 bytes, so 130 chars + 2 quote chars + assert_eq!(serialized_signature.len(), 132); + let signature = serde_json::from_str(&serialized_signature).unwrap(); + assert!(Pair::verify(&signature, &message[..], &pair.public())); + } + + #[test] + fn signature_serialization_doesnt_panic() { + fn deserialize_signature(text: &str) -> Result { + Ok(serde_json::from_str(text)?) + } + assert!(deserialize_signature("Not valid json.").is_err()); + assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); + // Poorly-sized + assert!(deserialize_signature("\"abc123\"").is_err()); + } } diff --git a/primitives/core/src/ed25519.rs b/primitives/core/src/ed25519.rs index abeac05388..6b3523ea45 100644 --- a/primitives/core/src/ed25519.rs +++ b/primitives/core/src/ed25519.rs @@ -22,25 +22,27 @@ use sp_std::vec::Vec; use crate::{hash::H256, hash::H512}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; +#[cfg(feature = "std")] +use crate::crypto::Ss58Codec; +use crate::crypto::{ + CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, UncheckedFrom, +}; +#[cfg(feature = "full_crypto")] +use crate::crypto::{DeriveJunction, Pair as TraitPair, SecretStringError}; +#[cfg(feature = "std")] +use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] use blake2_rfc; #[cfg(feature = "full_crypto")] use core::convert::TryFrom; #[cfg(feature = "std")] -use substrate_bip39::seed_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "full_crypto")] -use crate::crypto::{Pair as TraitPair, DeriveJunction, SecretStringError}; -#[cfg(feature = "std")] -use crate::crypto::Ss58Codec; -#[cfg(feature = "std")] -use serde::{de, Serializer, Serialize, Deserializer, Deserialize}; -use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; use sp_std::ops::Deref; +#[cfg(feature = "std")] +use substrate_bip39::seed_from_entropy; /// An identifier used to match public keys against ed25519 keys pub const CRYPTO_ID: CryptoTypeId = CryptoTypeId(*b"ed25"); @@ -62,128 +64,139 @@ pub struct Pair(ed25519_dalek::Keypair); #[cfg(feature = "full_crypto")] impl Clone for Pair { - fn clone(&self) -> Self { - Pair(ed25519_dalek::Keypair { - public: self.0.public.clone(), - secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()) - .expect("key is always the correct size; qed") - }) - } + fn clone(&self) -> Self { + Pair(ed25519_dalek::Keypair { + public: self.0.public.clone(), + secret: ed25519_dalek::SecretKey::from_bytes(self.0.secret.as_bytes()) + .expect("key is always the correct size; qed"), + }) + } } impl AsRef<[u8; 32]> for Public { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } } impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } impl Deref for Public { - type Target = [u8]; + type Target = [u8]; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl sp_std::convert::TryFrom<&[u8]> for Public { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() == 32 { - let mut inner = [0u8; 32]; - inner.copy_from_slice(data); - Ok(Public(inner)) - } else { - Err(()) - } - } + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() == 32 { + let mut inner = [0u8; 32]; + inner.copy_from_slice(data); + Ok(Public(inner)) + } else { + Err(()) + } + } } impl From for [u8; 32] { - fn from(x: Public) -> Self { - x.0 - } + fn from(x: Public) -> Self { + x.0 + } } #[cfg(feature = "full_crypto")] impl From for Public { - fn from(x: Pair) -> Self { - x.public() - } + fn from(x: Pair) -> Self { + x.public() + } } impl From for H256 { - fn from(x: Public) -> Self { - x.0.into() - } + fn from(x: Public) -> Self { + x.0.into() + } } #[cfg(feature = "std")] impl std::str::FromStr for Public { - type Err = crate::crypto::PublicError; + type Err = crate::crypto::PublicError; - fn from_str(s: &str) -> Result { - Self::from_ss58check(s) - } + fn from_str(s: &str) -> Result { + Self::from_ss58check(s) + } } impl UncheckedFrom<[u8; 32]> for Public { - fn unchecked_from(x: [u8; 32]) -> Self { - Public::from_raw(x) - } + fn unchecked_from(x: [u8; 32]) -> Self { + Public::from_raw(x) + } } impl UncheckedFrom for Public { - fn unchecked_from(x: H256) -> Self { - Public::from_h256(x) - } + fn unchecked_from(x: H256) -> Self { + Public::from_h256(x) + } } #[cfg(feature = "std")] impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } } impl sp_std::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let s = self.to_ss58check(); + write!( + f, + "{} ({}...)", + crate::hexdisplay::HexDisplay::from(&self.0), + &s[0..8] + ) + } - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - serializer.serialize_str(&self.to_ss58check()) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } } /// A signature (a 512-bit value). @@ -191,192 +204,198 @@ impl<'de> Deserialize<'de> for Public { pub struct Signature(pub [u8; 64]); impl sp_std::convert::TryFrom<&[u8]> for Signature { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() == 64 { - let mut inner = [0u8; 64]; - inner.copy_from_slice(data); - Ok(Signature(inner)) - } else { - Err(()) - } - } + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() == 64 { + let mut inner = [0u8; 64]; + inner.copy_from_slice(data); + Ok(Signature(inner)) + } else { + Err(()) + } + } } #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - serializer.serialize_str(&hex::encode(self)) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&hex::encode(self)) + } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - let signature_hex = hex::decode(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Ok(Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let signature_hex = hex::decode(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?; + Ok(Signature::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?) + } } impl Clone for Signature { - fn clone(&self) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(&self.0[..]); - Signature(r) - } + fn clone(&self) -> Self { + let mut r = [0u8; 64]; + r.copy_from_slice(&self.0[..]); + Signature(r) + } } impl Default for Signature { - fn default() -> Self { - Signature([0u8; 64]) - } + fn default() -> Self { + Signature([0u8; 64]) + } } impl PartialEq for Signature { - fn eq(&self, b: &Self) -> bool { - self.0[..] == b.0[..] - } + fn eq(&self, b: &Self) -> bool { + self.0[..] == b.0[..] + } } impl Eq for Signature {} impl From for H512 { - fn from(v: Signature) -> H512 { - H512::from(v.0) - } + fn from(v: Signature) -> H512 { + H512::from(v.0) + } } impl From for [u8; 64] { - fn from(v: Signature) -> [u8; 64] { - v.0 - } + fn from(v: Signature) -> [u8; 64] { + v.0 + } } impl AsRef<[u8; 64]> for Signature { - fn as_ref(&self) -> &[u8; 64] { - &self.0 - } + fn as_ref(&self) -> &[u8; 64] { + &self.0 + } } impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) + } - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } #[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - sp_std::hash::Hash::hash(&self.0[..], state); - } + fn hash(&self, state: &mut H) { + sp_std::hash::Hash::hash(&self.0[..], state); + } } impl Signature { - /// A new instance from the given 64-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_raw(data: [u8; 64]) -> Signature { - Signature(data) - } - - /// A new instance from the given slice that should be 64 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(data); - Signature(r) - } - - /// A new instance from an H512. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_h512(v: H512) -> Signature { - Signature(v.into()) - } + /// A new instance from the given 64-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_raw(data: [u8; 64]) -> Signature { + Signature(data) + } + + /// A new instance from the given slice that should be 64 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 64]; + r.copy_from_slice(data); + Signature(r) + } + + /// A new instance from an H512. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_h512(v: H512) -> Signature { + Signature(v.into()) + } } /// A localized signature also contains sender information. #[cfg(feature = "std")] #[derive(PartialEq, Eq, Clone, Debug, Encode, Decode)] pub struct LocalizedSignature { - /// The signer of the signature. - pub signer: Public, - /// The signature itself. - pub signature: Signature, + /// The signer of the signature. + pub signer: Public, + /// The signature itself. + pub signature: Signature, } /// An error type for SS58 decoding. #[cfg(feature = "std")] #[derive(Clone, Copy, Eq, PartialEq, Debug)] pub enum PublicError { - /// Bad alphabet. - BadBase58, - /// Bad length. - BadLength, - /// Unknown version. - UnknownVersion, - /// Invalid checksum. - InvalidChecksum, + /// Bad alphabet. + BadBase58, + /// Bad length. + BadLength, + /// Unknown version. + UnknownVersion, + /// Invalid checksum. + InvalidChecksum, } impl Public { - /// A new instance from the given 32-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; 32]) -> Self { - Public(data) - } - - /// A new instance from an H256. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_h256(x: H256) -> Self { - Public(x.into()) - } - - /// Return a slice filled with raw data. - pub fn as_array_ref(&self) -> &[u8; 32] { - self.as_ref() - } + /// A new instance from the given 32-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_raw(data: [u8; 32]) -> Self { + Public(data) + } + + /// A new instance from an H256. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_h256(x: H256) -> Self { + Public(x.into()) + } + + /// Return a slice filled with raw data. + pub fn as_array_ref(&self) -> &[u8; 32] { + self.as_ref() + } } impl TraitPublic for Public { - /// A new instance from the given slice that should be 32 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 32]; - r.copy_from_slice(data); - Public(r) - } + /// A new instance from the given slice that should be 32 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 32]; + r.copy_from_slice(data); + Public(r) + } } impl Derive for Public {} @@ -396,309 +415,325 @@ impl From<&Public> for CryptoTypePublicPair { /// Derive a single hard junction. #[cfg(feature = "full_crypto")] fn derive_hard_junction(secret_seed: &Seed, cc: &[u8; 32]) -> Seed { - ("Ed25519HDKD", secret_seed, cc).using_encoded(|data| { - let mut res = [0u8; 32]; - res.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); - res - }) + ("Ed25519HDKD", secret_seed, cc).using_encoded(|data| { + let mut res = [0u8; 32]; + res.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); + res + }) } /// An error when deriving a key. #[cfg(feature = "full_crypto")] pub enum DeriveError { - /// A soft key was found in the path (and is unsupported). - SoftKeyInPath, + /// A soft key was found in the path (and is unsupported). + SoftKeyInPath, } #[cfg(feature = "full_crypto")] impl TraitPair for Pair { - type Public = Public; - type Seed = Seed; - type Signature = Signature; - type DeriveError = DeriveError; - - /// Generate new secure (random) key pair and provide the recovery phrase. - /// - /// You can recover the same key later with `from_phrase`. - #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - let (pair, seed) = Self::from_phrase(phrase, password) - .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) - } - - /// Generate key pair from given recovery phrase and password. - #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { - let big_seed = seed_from_entropy( - Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase)?.entropy(), - password.unwrap_or(""), - ).map_err(|_| SecretStringError::InvalidSeed)?; - let mut seed = Seed::default(); - seed.copy_from_slice(&big_seed[0..32]); - Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) - } - - /// Make a new key pair from secret seed material. - /// - /// You should never need to use this; generate(), generate_with_phrase - fn from_seed(seed: &Seed) -> Pair { - Self::from_seed_slice(&seed[..]).expect("seed has valid length; qed") - } - - /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it - /// will return `None`. - /// - /// You should never need to use this; generate(), generate_with_phrase - fn from_seed_slice(seed_slice: &[u8]) -> Result { - let secret = ed25519_dalek::SecretKey::from_bytes(seed_slice) - .map_err(|_| SecretStringError::InvalidSeedLength)?; - let public = ed25519_dalek::PublicKey::from(&secret); - Ok(Pair(ed25519_dalek::Keypair { secret, public })) - } - - /// Derive a child key from a series of given junctions. - fn derive>(&self, - path: Iter, - _seed: Option, - ) -> Result<(Pair, Option), DeriveError> { - let mut acc = self.0.secret.to_bytes(); - for j in path { - match j { - DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), - DeriveJunction::Hard(cc) => acc = derive_hard_junction(&acc, &cc), - } - } - Ok((Self::from_seed(&acc), Some(acc))) - } - - /// Get the public key. - fn public(&self) -> Public { - let mut r = [0u8; 32]; - let pk = self.0.public.as_bytes(); - r.copy_from_slice(pk); - Public(r) - } - - /// Sign a message. - fn sign(&self, message: &[u8]) -> Signature { - let r = self.0.sign(message).to_bytes(); - Signature::from_raw(r) - } - - /// Verify a signature on a message. Returns true if the signature is good. - fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { - Self::verify_weak(&sig.0[..], message.as_ref(), pubkey) - } - - /// Verify a signature on a message. Returns true if the signature is good. - /// - /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct - /// size. Use it only if you're coming from byte buffers and need the speed. - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let public_key = match ed25519_dalek::PublicKey::from_bytes(pubkey.as_ref()) { - Ok(pk) => pk, - Err(_) => return false, - }; - - let sig = match ed25519_dalek::Signature::from_bytes(sig) { - Ok(s) => s, - Err(_) => return false - }; - - match public_key.verify(message.as_ref(), &sig) { - Ok(_) => true, - _ => false, - } - } - - /// Return a vec filled with raw data. - fn to_raw_vec(&self) -> Vec { - self.seed().to_vec() - } + type Public = Public; + type Seed = Seed; + type Signature = Signature; + type DeriveError = DeriveError; + + /// Generate new secure (random) key pair and provide the recovery phrase. + /// + /// You can recover the same key later with `from_phrase`. + #[cfg(feature = "std")] + fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { + let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); + let phrase = mnemonic.phrase(); + let (pair, seed) = Self::from_phrase(phrase, password) + .expect("All phrases generated by Mnemonic are valid; qed"); + (pair, phrase.to_owned(), seed) + } + + /// Generate key pair from given recovery phrase and password. + #[cfg(feature = "std")] + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { + let big_seed = seed_from_entropy( + Mnemonic::from_phrase(phrase, Language::English) + .map_err(|_| SecretStringError::InvalidPhrase)? + .entropy(), + password.unwrap_or(""), + ) + .map_err(|_| SecretStringError::InvalidSeed)?; + let mut seed = Seed::default(); + seed.copy_from_slice(&big_seed[0..32]); + Self::from_seed_slice(&big_seed[0..32]).map(|x| (x, seed)) + } + + /// Make a new key pair from secret seed material. + /// + /// You should never need to use this; generate(), generate_with_phrase + fn from_seed(seed: &Seed) -> Pair { + Self::from_seed_slice(&seed[..]).expect("seed has valid length; qed") + } + + /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it + /// will return `None`. + /// + /// You should never need to use this; generate(), generate_with_phrase + fn from_seed_slice(seed_slice: &[u8]) -> Result { + let secret = ed25519_dalek::SecretKey::from_bytes(seed_slice) + .map_err(|_| SecretStringError::InvalidSeedLength)?; + let public = ed25519_dalek::PublicKey::from(&secret); + Ok(Pair(ed25519_dalek::Keypair { secret, public })) + } + + /// Derive a child key from a series of given junctions. + fn derive>( + &self, + path: Iter, + _seed: Option, + ) -> Result<(Pair, Option), DeriveError> { + let mut acc = self.0.secret.to_bytes(); + for j in path { + match j { + DeriveJunction::Soft(_cc) => return Err(DeriveError::SoftKeyInPath), + DeriveJunction::Hard(cc) => acc = derive_hard_junction(&acc, &cc), + } + } + Ok((Self::from_seed(&acc), Some(acc))) + } + + /// Get the public key. + fn public(&self) -> Public { + let mut r = [0u8; 32]; + let pk = self.0.public.as_bytes(); + r.copy_from_slice(pk); + Public(r) + } + + /// Sign a message. + fn sign(&self, message: &[u8]) -> Signature { + let r = self.0.sign(message).to_bytes(); + Signature::from_raw(r) + } + + /// Verify a signature on a message. Returns true if the signature is good. + fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { + Self::verify_weak(&sig.0[..], message.as_ref(), pubkey) + } + + /// Verify a signature on a message. Returns true if the signature is good. + /// + /// This doesn't use the type system to ensure that `sig` and `pubkey` are the correct + /// size. Use it only if you're coming from byte buffers and need the speed. + fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { + let public_key = match ed25519_dalek::PublicKey::from_bytes(pubkey.as_ref()) { + Ok(pk) => pk, + Err(_) => return false, + }; + + let sig = match ed25519_dalek::Signature::from_bytes(sig) { + Ok(s) => s, + Err(_) => return false, + }; + + match public_key.verify(message.as_ref(), &sig) { + Ok(_) => true, + _ => false, + } + } + + /// Return a vec filled with raw data. + fn to_raw_vec(&self) -> Vec { + self.seed().to_vec() + } } #[cfg(feature = "full_crypto")] impl Pair { - /// Get the seed for this key. - pub fn seed(&self) -> &Seed { - self.0.secret.as_bytes() - } - - /// Exactly as `from_string` except that if no matches are found then, the the first 32 - /// characters are taken (padded with spaces as necessary) and used as the MiniSecretKey. - #[cfg(feature = "std")] - pub fn from_legacy_string(s: &str, password_override: Option<&str>) -> Pair { - Self::from_string(s, password_override).unwrap_or_else(|_| { - let mut padded_seed: Seed = [' ' as u8; 32]; - let len = s.len().min(32); - padded_seed[..len].copy_from_slice(&s.as_bytes()[..len]); - Self::from_seed(&padded_seed) - }) - } + /// Get the seed for this key. + pub fn seed(&self) -> &Seed { + self.0.secret.as_bytes() + } + + /// Exactly as `from_string` except that if no matches are found then, the the first 32 + /// characters are taken (padded with spaces as necessary) and used as the MiniSecretKey. + #[cfg(feature = "std")] + pub fn from_legacy_string(s: &str, password_override: Option<&str>) -> Pair { + Self::from_string(s, password_override).unwrap_or_else(|_| { + let mut padded_seed: Seed = [' ' as u8; 32]; + let len = s.len().min(32); + padded_seed[..len].copy_from_slice(&s.as_bytes()[..len]); + Self::from_seed(&padded_seed) + }) + } } impl CryptoType for Public { - #[cfg(feature = "full_crypto")] - type Pair = Pair; + #[cfg(feature = "full_crypto")] + type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] - type Pair = Pair; + #[cfg(feature = "full_crypto")] + type Pair = Pair; } #[cfg(feature = "full_crypto")] impl CryptoType for Pair { - type Pair = Pair; + type Pair = Pair; } #[cfg(test)] mod test { - use super::*; - use hex_literal::hex; - use crate::crypto::DEV_PHRASE; - use serde_json; - - #[test] - fn default_phrase_should_be_used() { - assert_eq!( - Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), - ); - } - - #[test] - fn seed_and_derive_should_work() { - let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); - let pair = Pair::from_seed(&seed); - assert_eq!(pair.seed(), &seed); - let path = vec![DeriveJunction::Hard([0u8; 32])]; - let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; - assert_eq!( - derived.seed(), - &hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") - ); - } - - #[test] - fn test_vector_should_work() { - let pair = Pair::from_seed( - &hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60") - ); - let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a") - )); - let message = b""; - let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); - let signature = Signature::from_raw(signature); - assert!(&pair.sign(&message[..]) == &signature); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn test_vector_by_string_should_work() { - let pair = Pair::from_string( - "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", - None - ).unwrap(); - let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a") - )); - let message = b""; - let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); - let signature = Signature::from_raw(signature); - assert!(&pair.sign(&message[..]) == &signature); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn generated_pair_should_work() { - let (pair, _) = Pair::generate(); - let public = pair.public(); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - assert!(Pair::verify(&signature, &message[..], &public)); - assert!(!Pair::verify(&signature, b"Something else", &public)); - } - - #[test] - fn seeded_pair_should_work() { - let pair = Pair::from_seed(b"12345678901234567890123456789012"); - let public = pair.public(); - assert_eq!(public, Public::from_raw( - hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee") - )); - let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); - let signature = pair.sign(&message[..]); - println!("Correct signature: {:?}", signature); - assert!(Pair::verify(&signature, &message[..], &public)); - assert!(!Pair::verify(&signature, "Other message", &public)); - } - - #[test] - fn generate_with_phrase_recovery_possible() { - let (pair1, phrase, _) = Pair::generate_with_phrase(None); - let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); - - assert_eq!(pair1.public(), pair2.public()); - } - - #[test] - fn generate_with_password_phrase_recovery_possible() { - let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); - let (pair2, _) = Pair::from_phrase(&phrase, Some("password")).unwrap(); - - assert_eq!(pair1.public(), pair2.public()); - } - - #[test] - fn password_does_something() { - let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); - let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); - - assert_ne!(pair1.public(), pair2.public()); - } - - #[test] - fn ss58check_roundtrip_works() { - let pair = Pair::from_seed(b"12345678901234567890123456789012"); - let public = pair.public(); - let s = public.to_ss58check(); - println!("Correct: {}", s); - let cmp = Public::from_ss58check(&s).unwrap(); - assert_eq!(cmp, public); - } - - #[test] - fn signature_serialization_works() { - let pair = Pair::from_seed(b"12345678901234567890123456789012"); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - let serialized_signature = serde_json::to_string(&signature).unwrap(); - // Signature is 64 bytes, so 128 chars + 2 quote chars - assert_eq!(serialized_signature.len(), 130); - let signature = serde_json::from_str(&serialized_signature).unwrap(); - assert!(Pair::verify(&signature, &message[..], &pair.public())); - } - - #[test] - fn signature_serialization_doesnt_panic() { - fn deserialize_signature(text: &str) -> Result { - Ok(serde_json::from_str(text)?) - } - assert!(deserialize_signature("Not valid json.").is_err()); - assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); - // Poorly-sized - assert!(deserialize_signature("\"abc123\"").is_err()); - } + use super::*; + use crate::crypto::DEV_PHRASE; + use hex_literal::hex; + use serde_json; + + #[test] + fn default_phrase_should_be_used() { + assert_eq!( + Pair::from_string("//Alice///password", None) + .unwrap() + .public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), + ); + } + + #[test] + fn seed_and_derive_should_work() { + let seed = hex!("9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60"); + let pair = Pair::from_seed(&seed); + assert_eq!(pair.seed(), &seed); + let path = vec![DeriveJunction::Hard([0u8; 32])]; + let derived = pair.derive(path.into_iter(), None).ok().unwrap().0; + assert_eq!( + derived.seed(), + &hex!("ede3354e133f9c8e337ddd6ee5415ed4b4ffe5fc7d21e933f4930a3730e5b21c") + ); + } + + #[test] + fn test_vector_should_work() { + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); + let message = b""; + let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); + let signature = Signature::from_raw(signature); + assert!(&pair.sign(&message[..]) == &signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn test_vector_by_string_should_work() { + let pair = Pair::from_string( + "0x9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60", + None, + ) + .unwrap(); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a" + )) + ); + let message = b""; + let signature = hex!("e5564300c360ac729086e2cc806e828a84877f1eb8e5d974d873e065224901555fb8821590a33bacc61e39701cf9b46bd25bf5f0595bbe24655141438e7a100b"); + let signature = Signature::from_raw(signature); + assert!(&pair.sign(&message[..]) == &signature); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn generated_pair_should_work() { + let (pair, _) = Pair::generate(); + let public = pair.public(); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + assert!(Pair::verify(&signature, &message[..], &public)); + assert!(!Pair::verify(&signature, b"Something else", &public)); + } + + #[test] + fn seeded_pair_should_work() { + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee" + )) + ); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + println!("Correct signature: {:?}", signature); + assert!(Pair::verify(&signature, &message[..], &public)); + assert!(!Pair::verify(&signature, "Other message", &public)); + } + + #[test] + fn generate_with_phrase_recovery_possible() { + let (pair1, phrase, _) = Pair::generate_with_phrase(None); + let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); + + assert_eq!(pair1.public(), pair2.public()); + } + + #[test] + fn generate_with_password_phrase_recovery_possible() { + let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); + let (pair2, _) = Pair::from_phrase(&phrase, Some("password")).unwrap(); + + assert_eq!(pair1.public(), pair2.public()); + } + + #[test] + fn password_does_something() { + let (pair1, phrase, _) = Pair::generate_with_phrase(Some("password")); + let (pair2, _) = Pair::from_phrase(&phrase, None).unwrap(); + + assert_ne!(pair1.public(), pair2.public()); + } + + #[test] + fn ss58check_roundtrip_works() { + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + let s = public.to_ss58check(); + println!("Correct: {}", s); + let cmp = Public::from_ss58check(&s).unwrap(); + assert_eq!(cmp, public); + } + + #[test] + fn signature_serialization_works() { + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + let serialized_signature = serde_json::to_string(&signature).unwrap(); + // Signature is 64 bytes, so 128 chars + 2 quote chars + assert_eq!(serialized_signature.len(), 130); + let signature = serde_json::from_str(&serialized_signature).unwrap(); + assert!(Pair::verify(&signature, &message[..], &pair.public())); + } + + #[test] + fn signature_serialization_doesnt_panic() { + fn deserialize_signature(text: &str) -> Result { + Ok(serde_json::from_str(text)?) + } + assert!(deserialize_signature("Not valid json.").is_err()); + assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); + // Poorly-sized + assert!(deserialize_signature("\"abc123\"").is_err()); + } } diff --git a/primitives/core/src/hash.rs b/primitives/core/src/hash.rs index 424fefbe6a..8562785941 100644 --- a/primitives/core/src/hash.rs +++ b/primitives/core/src/hash.rs @@ -22,60 +22,114 @@ pub use primitive_types::{H160, H256, H512}; /// implemented by the same hash type. /// Panics if used to convert between different hash types. pub fn convert_hash, H2: AsRef<[u8]>>(src: &H2) -> H1 { - let mut dest = H1::default(); - assert_eq!(dest.as_mut().len(), src.as_ref().len()); - dest.as_mut().copy_from_slice(src.as_ref()); - dest + let mut dest = H1::default(); + assert_eq!(dest.as_mut().len(), src.as_ref().len()); + dest.as_mut().copy_from_slice(src.as_ref()); + dest } #[cfg(test)] mod tests { - use super::*; - use sp_serializer as ser; + use super::*; + use sp_serializer as ser; - #[test] - fn test_h160() { - let tests = vec![ - (Default::default(), "0x0000000000000000000000000000000000000000"), - (H160::from_low_u64_be(2), "0x0000000000000000000000000000000000000002"), - (H160::from_low_u64_be(15), "0x000000000000000000000000000000000000000f"), - (H160::from_low_u64_be(16), "0x0000000000000000000000000000000000000010"), - (H160::from_low_u64_be(1_000), "0x00000000000000000000000000000000000003e8"), - (H160::from_low_u64_be(100_000), "0x00000000000000000000000000000000000186a0"), - (H160::from_low_u64_be(u64::max_value()), "0x000000000000000000000000ffffffffffffffff"), - ]; + #[test] + fn test_h160() { + let tests = vec![ + ( + Default::default(), + "0x0000000000000000000000000000000000000000", + ), + ( + H160::from_low_u64_be(2), + "0x0000000000000000000000000000000000000002", + ), + ( + H160::from_low_u64_be(15), + "0x000000000000000000000000000000000000000f", + ), + ( + H160::from_low_u64_be(16), + "0x0000000000000000000000000000000000000010", + ), + ( + H160::from_low_u64_be(1_000), + "0x00000000000000000000000000000000000003e8", + ), + ( + H160::from_low_u64_be(100_000), + "0x00000000000000000000000000000000000186a0", + ), + ( + H160::from_low_u64_be(u64::max_value()), + "0x000000000000000000000000ffffffffffffffff", + ), + ]; - for (number, expected) in tests { - assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); - assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); - } - } + for (number, expected) in tests { + assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } + } - #[test] - fn test_h256() { - let tests = vec![ - (Default::default(), "0x0000000000000000000000000000000000000000000000000000000000000000"), - (H256::from_low_u64_be(2), "0x0000000000000000000000000000000000000000000000000000000000000002"), - (H256::from_low_u64_be(15), "0x000000000000000000000000000000000000000000000000000000000000000f"), - (H256::from_low_u64_be(16), "0x0000000000000000000000000000000000000000000000000000000000000010"), - (H256::from_low_u64_be(1_000), "0x00000000000000000000000000000000000000000000000000000000000003e8"), - (H256::from_low_u64_be(100_000), "0x00000000000000000000000000000000000000000000000000000000000186a0"), - (H256::from_low_u64_be(u64::max_value()), "0x000000000000000000000000000000000000000000000000ffffffffffffffff"), - ]; + #[test] + fn test_h256() { + let tests = vec![ + ( + Default::default(), + "0x0000000000000000000000000000000000000000000000000000000000000000", + ), + ( + H256::from_low_u64_be(2), + "0x0000000000000000000000000000000000000000000000000000000000000002", + ), + ( + H256::from_low_u64_be(15), + "0x000000000000000000000000000000000000000000000000000000000000000f", + ), + ( + H256::from_low_u64_be(16), + "0x0000000000000000000000000000000000000000000000000000000000000010", + ), + ( + H256::from_low_u64_be(1_000), + "0x00000000000000000000000000000000000000000000000000000000000003e8", + ), + ( + H256::from_low_u64_be(100_000), + "0x00000000000000000000000000000000000000000000000000000000000186a0", + ), + ( + H256::from_low_u64_be(u64::max_value()), + "0x000000000000000000000000000000000000000000000000ffffffffffffffff", + ), + ]; - for (number, expected) in tests { - assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); - assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); - } - } + for (number, expected) in tests { + assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } + } - #[test] - fn test_invalid() { - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x000000000000000000000000000000000000000000000000000000000000000g\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0x00000000000000000000000000000000000000000000000000000000000000000\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); - assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); - } + #[test] + fn test_invalid() { + assert!(ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::( + "\"0x000000000000000000000000000000000000000000000000000000000000000g\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::( + "\"0x00000000000000000000000000000000000000000000000000000000000000000\"" + ) + .unwrap_err() + .is_data()); + assert!(ser::from_str::("\"\"").unwrap_err().is_data()); + assert!(ser::from_str::("\"0\"").unwrap_err().is_data()); + assert!(ser::from_str::("\"10\"").unwrap_err().is_data()); + } } diff --git a/primitives/core/src/hasher.rs b/primitives/core/src/hasher.rs index 28da432da7..f91e6391d1 100644 --- a/primitives/core/src/hasher.rs +++ b/primitives/core/src/hasher.rs @@ -17,21 +17,21 @@ //! Substrate Blake2b Hasher implementation pub mod blake2 { - use hash_db::Hasher; - use hash256_std_hasher::Hash256StdHasher; - use crate::hash::H256; - - /// Concrete implementation of Hasher using Blake2b 256-bit hashes - #[derive(Debug)] - pub struct Blake2Hasher; - - impl Hasher for Blake2Hasher { - type Out = H256; - type StdHasher = Hash256StdHasher; - const LENGTH: usize = 32; - - fn hash(x: &[u8]) -> Self::Out { - crate::hashing::blake2_256(x).into() - } - } + use crate::hash::H256; + use hash256_std_hasher::Hash256StdHasher; + use hash_db::Hasher; + + /// Concrete implementation of Hasher using Blake2b 256-bit hashes + #[derive(Debug)] + pub struct Blake2Hasher; + + impl Hasher for Blake2Hasher { + type Out = H256; + type StdHasher = Hash256StdHasher; + const LENGTH: usize = 32; + + fn hash(x: &[u8]) -> Self::Out { + crate::hashing::blake2_256(x).into() + } + } } diff --git a/primitives/core/src/hashing.rs b/primitives/core/src/hashing.rs index d958da6c32..83702e0f06 100644 --- a/primitives/core/src/hashing.rs +++ b/primitives/core/src/hashing.rs @@ -23,133 +23,133 @@ use twox_hash; /// Do a Blake2 512-bit hash and place result in `dest`. pub fn blake2_512_into(data: &[u8], dest: &mut [u8; 64]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(64, &[], data).as_bytes()); + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(64, &[], data).as_bytes()); } /// Do a Blake2 512-bit hash and return result. pub fn blake2_512(data: &[u8]) -> [u8; 64] { - let mut r = [0; 64]; - blake2_512_into(data, &mut r); - r + let mut r = [0; 64]; + blake2_512_into(data, &mut r); + r } /// Do a Blake2 256-bit hash and place result in `dest`. pub fn blake2_256_into(data: &[u8], dest: &mut [u8; 32]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(32, &[], data).as_bytes()); } /// Do a Blake2 256-bit hash and return result. pub fn blake2_256(data: &[u8]) -> [u8; 32] { - let mut r = [0; 32]; - blake2_256_into(data, &mut r); - r + let mut r = [0; 32]; + blake2_256_into(data, &mut r); + r } /// Do a Blake2 128-bit hash and place result in `dest`. pub fn blake2_128_into(data: &[u8], dest: &mut [u8; 16]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(16, &[], data).as_bytes()); + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(16, &[], data).as_bytes()); } /// Do a Blake2 128-bit hash and return result. pub fn blake2_128(data: &[u8]) -> [u8; 16] { - let mut r = [0; 16]; - blake2_128_into(data, &mut r); - r + let mut r = [0; 16]; + blake2_128_into(data, &mut r); + r } /// Do a Blake2 64-bit hash and place result in `dest`. pub fn blake2_64_into(data: &[u8], dest: &mut [u8; 8]) { - dest.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], data).as_bytes()); + dest.copy_from_slice(blake2_rfc::blake2b::blake2b(8, &[], data).as_bytes()); } /// Do a Blake2 64-bit hash and return result. pub fn blake2_64(data: &[u8]) -> [u8; 8] { - let mut r = [0; 8]; - blake2_64_into(data, &mut r); - r + let mut r = [0; 8]; + blake2_64_into(data, &mut r); + r } /// Do a XX 64-bit hash and place result in `dest`. pub fn twox_64_into(data: &[u8], dest: &mut [u8; 8]) { - use ::core::hash::Hasher; - let mut h0 = twox_hash::XxHash::with_seed(0); - h0.write(data); - let r0 = h0.finish(); - use byteorder::{ByteOrder, LittleEndian}; - LittleEndian::write_u64(&mut dest[0..8], r0); + use ::core::hash::Hasher; + let mut h0 = twox_hash::XxHash::with_seed(0); + h0.write(data); + let r0 = h0.finish(); + use byteorder::{ByteOrder, LittleEndian}; + LittleEndian::write_u64(&mut dest[0..8], r0); } /// Do a XX 64-bit hash and return result. pub fn twox_64(data: &[u8]) -> [u8; 8] { - let mut r: [u8; 8] = [0; 8]; - twox_64_into(data, &mut r); - r + let mut r: [u8; 8] = [0; 8]; + twox_64_into(data, &mut r); + r } /// Do a XX 128-bit hash and place result in `dest`. pub fn twox_128_into(data: &[u8], dest: &mut [u8; 16]) { - use ::core::hash::Hasher; - let mut h0 = twox_hash::XxHash::with_seed(0); - let mut h1 = twox_hash::XxHash::with_seed(1); - h0.write(data); - h1.write(data); - let r0 = h0.finish(); - let r1 = h1.finish(); - use byteorder::{ByteOrder, LittleEndian}; - LittleEndian::write_u64(&mut dest[0..8], r0); - LittleEndian::write_u64(&mut dest[8..16], r1); + use ::core::hash::Hasher; + let mut h0 = twox_hash::XxHash::with_seed(0); + let mut h1 = twox_hash::XxHash::with_seed(1); + h0.write(data); + h1.write(data); + let r0 = h0.finish(); + let r1 = h1.finish(); + use byteorder::{ByteOrder, LittleEndian}; + LittleEndian::write_u64(&mut dest[0..8], r0); + LittleEndian::write_u64(&mut dest[8..16], r1); } /// Do a XX 128-bit hash and return result. pub fn twox_128(data: &[u8]) -> [u8; 16] { - let mut r: [u8; 16] = [0; 16]; - twox_128_into(data, &mut r); - r + let mut r: [u8; 16] = [0; 16]; + twox_128_into(data, &mut r); + r } /// Do a XX 256-bit hash and place result in `dest`. pub fn twox_256_into(data: &[u8], dest: &mut [u8; 32]) { - use ::core::hash::Hasher; - use byteorder::{ByteOrder, LittleEndian}; - let mut h0 = twox_hash::XxHash::with_seed(0); - let mut h1 = twox_hash::XxHash::with_seed(1); - let mut h2 = twox_hash::XxHash::with_seed(2); - let mut h3 = twox_hash::XxHash::with_seed(3); - h0.write(data); - h1.write(data); - h2.write(data); - h3.write(data); - let r0 = h0.finish(); - let r1 = h1.finish(); - let r2 = h2.finish(); - let r3 = h3.finish(); - LittleEndian::write_u64(&mut dest[0..8], r0); - LittleEndian::write_u64(&mut dest[8..16], r1); - LittleEndian::write_u64(&mut dest[16..24], r2); - LittleEndian::write_u64(&mut dest[24..32], r3); + use ::core::hash::Hasher; + use byteorder::{ByteOrder, LittleEndian}; + let mut h0 = twox_hash::XxHash::with_seed(0); + let mut h1 = twox_hash::XxHash::with_seed(1); + let mut h2 = twox_hash::XxHash::with_seed(2); + let mut h3 = twox_hash::XxHash::with_seed(3); + h0.write(data); + h1.write(data); + h2.write(data); + h3.write(data); + let r0 = h0.finish(); + let r1 = h1.finish(); + let r2 = h2.finish(); + let r3 = h3.finish(); + LittleEndian::write_u64(&mut dest[0..8], r0); + LittleEndian::write_u64(&mut dest[8..16], r1); + LittleEndian::write_u64(&mut dest[16..24], r2); + LittleEndian::write_u64(&mut dest[24..32], r3); } /// Do a XX 256-bit hash and return result. pub fn twox_256(data: &[u8]) -> [u8; 32] { - let mut r: [u8; 32] = [0; 32]; - twox_256_into(data, &mut r); - r + let mut r: [u8; 32] = [0; 32]; + twox_256_into(data, &mut r); + r } /// Do a keccak 256-bit hash and return result. pub fn keccak_256(data: &[u8]) -> [u8; 32] { - let mut keccak = Keccak::v256(); - keccak.update(data); - let mut output = [0u8; 32]; - keccak.finalize(&mut output); - output + let mut keccak = Keccak::v256(); + keccak.update(data); + let mut output = [0u8; 32]; + keccak.finalize(&mut output); + output } /// Do a sha2 256-bit hash and return result. pub fn sha2_256(data: &[u8]) -> [u8; 32] { - let mut hasher = Sha256::new(); - hasher.input(data); - let mut output = [0u8; 32]; - output.copy_from_slice(&hasher.result()); - output + let mut hasher = Sha256::new(); + hasher.input(data); + let mut output = [0u8; 32]; + output.copy_from_slice(&hasher.result()); + output } diff --git a/primitives/core/src/hexdisplay.rs b/primitives/core/src/hexdisplay.rs index 14fedc205c..952f84303d 100644 --- a/primitives/core/src/hexdisplay.rs +++ b/primitives/core/src/hexdisplay.rs @@ -20,54 +20,62 @@ pub struct HexDisplay<'a>(&'a [u8]); impl<'a> HexDisplay<'a> { - /// Create new instance that will display `d` as a hex string when displayed. - pub fn from(d: &'a R) -> Self { HexDisplay(d.as_bytes_ref()) } + /// Create new instance that will display `d` as a hex string when displayed. + pub fn from(d: &'a R) -> Self { + HexDisplay(d.as_bytes_ref()) + } } impl<'a> sp_std::fmt::Display for HexDisplay<'a> { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> Result<(), sp_std::fmt::Error> { - if self.0.len() < 1027 { - for byte in self.0 { - f.write_fmt(format_args!("{:02x}", byte))?; - } - } else { - for byte in &self.0[0..512] { - f.write_fmt(format_args!("{:02x}", byte))?; - } - f.write_str("...")?; - for byte in &self.0[self.0.len() - 512..] { - f.write_fmt(format_args!("{:02x}", byte))?; - } - } - Ok(()) - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> Result<(), sp_std::fmt::Error> { + if self.0.len() < 1027 { + for byte in self.0 { + f.write_fmt(format_args!("{:02x}", byte))?; + } + } else { + for byte in &self.0[0..512] { + f.write_fmt(format_args!("{:02x}", byte))?; + } + f.write_str("...")?; + for byte in &self.0[self.0.len() - 512..] { + f.write_fmt(format_args!("{:02x}", byte))?; + } + } + Ok(()) + } } impl<'a> sp_std::fmt::Debug for HexDisplay<'a> { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> Result<(), sp_std::fmt::Error> { - for byte in self.0 { - f.write_fmt(format_args!("{:02x}", byte))?; - } - Ok(()) - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> Result<(), sp_std::fmt::Error> { + for byte in self.0 { + f.write_fmt(format_args!("{:02x}", byte))?; + } + Ok(()) + } } /// Simple trait to transform various types to `&[u8]` pub trait AsBytesRef { - /// Transform `self` into `&[u8]`. - fn as_bytes_ref(&self) -> &[u8]; + /// Transform `self` into `&[u8]`. + fn as_bytes_ref(&self) -> &[u8]; } impl AsBytesRef for &[u8] { - fn as_bytes_ref(&self) -> &[u8] { self } + fn as_bytes_ref(&self) -> &[u8] { + self + } } impl AsBytesRef for [u8] { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } impl AsBytesRef for Vec { - fn as_bytes_ref(&self) -> &[u8] { &self } + fn as_bytes_ref(&self) -> &[u8] { + &self + } } macro_rules! impl_non_endians { @@ -78,25 +86,27 @@ macro_rules! impl_non_endians { )* } } -impl_non_endians!([u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], - [u8; 10], [u8; 12], [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], - [u8; 48], [u8; 56], [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128]); +impl_non_endians!( + [u8; 1], [u8; 2], [u8; 3], [u8; 4], [u8; 5], [u8; 6], [u8; 7], [u8; 8], [u8; 10], [u8; 12], + [u8; 14], [u8; 16], [u8; 20], [u8; 24], [u8; 28], [u8; 32], [u8; 40], [u8; 48], [u8; 56], + [u8; 64], [u8; 65], [u8; 80], [u8; 96], [u8; 112], [u8; 128] +); /// Format into ASCII + # + hex, suitable for storage key preimages. pub fn ascii_format(asciish: &[u8]) -> String { - let mut r = String::new(); - let mut latch = false; - for c in asciish { - match (latch, *c) { - (false, 32..=127) => r.push(*c as char), - _ => { - if !latch { - r.push('#'); - latch = true; - } - r.push_str(&format!("{:02x}", *c)); - } - } - } - r + let mut r = String::new(); + let mut latch = false; + for c in asciish { + match (latch, *c) { + (false, 32..=127) => r.push(*c as char), + _ => { + if !latch { + r.push('#'); + latch = true; + } + r.push_str(&format!("{:02x}", *c)); + } + } + } + r } diff --git a/primitives/core/src/lib.rs b/primitives/core/src/lib.rs index 8d5ad7daae..c8e70e176f 100644 --- a/primitives/core/src/lib.rs +++ b/primitives/core/src/lib.rs @@ -17,7 +17,6 @@ //! Shareable Substrate types. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] /// Initialize a key-value collection from array. @@ -31,16 +30,16 @@ macro_rules! map { ); } -use sp_std::prelude::*; -use sp_std::ops::Deref; +#[doc(hidden)] +pub use codec::{Decode, Encode}; #[cfg(feature = "std")] -use std::borrow::Cow; +pub use serde; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; +use sp_std::ops::Deref; +use sp_std::prelude::*; #[cfg(feature = "std")] -pub use serde; -#[doc(hidden)] -pub use codec::{Encode, Decode}; +use std::borrow::Cow; pub use sp_debug_derive::RuntimeDebug; @@ -50,38 +49,38 @@ pub use impl_serde::serialize as bytes; #[cfg(feature = "full_crypto")] pub mod hashing; #[cfg(feature = "full_crypto")] -pub use hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256, keccak_256}; +pub use hashing::{blake2_128, blake2_256, keccak_256, twox_128, twox_256, twox_64}; +pub mod crypto; #[cfg(feature = "std")] pub mod hexdisplay; -pub mod crypto; pub mod u32_trait; -pub mod ed25519; -pub mod sr25519; +mod changes_trie; pub mod ecdsa; +pub mod ed25519; pub mod hash; #[cfg(feature = "std")] mod hasher; pub mod offchain; pub mod sandbox; -pub mod uint; -mod changes_trie; +pub mod sr25519; #[cfg(feature = "std")] -pub mod traits; +pub mod tasks; pub mod testing; #[cfg(feature = "std")] -pub mod tasks; +pub mod traits; +pub mod uint; -pub use self::hash::{H160, H256, H512, convert_hash}; +pub use self::hash::{convert_hash, H160, H256, H512}; pub use self::uint::{U256, U512}; pub use changes_trie::{ChangesTrieConfiguration, ChangesTrieConfigurationRange}; #[cfg(feature = "full_crypto")] pub use crypto::{DeriveJunction, Pair, Public}; -pub use hash_db::Hasher; #[cfg(feature = "std")] pub use self::hasher::blake2::Blake2Hasher; +pub use hash_db::Hasher; pub use sp_storage as storage; @@ -90,58 +89,63 @@ pub use sp_std; /// Context for executing a call into the runtime. pub enum ExecutionContext { - /// Context for general importing (including own blocks). - Importing, - /// Context used when syncing the blockchain. - Syncing, - /// Context used for block construction. - BlockConstruction, - /// Context used for offchain calls. - /// - /// This allows passing offchain extension and customizing available capabilities. - OffchainCall(Option<(Box, offchain::Capabilities)>), + /// Context for general importing (including own blocks). + Importing, + /// Context used when syncing the blockchain. + Syncing, + /// Context used for block construction. + BlockConstruction, + /// Context used for offchain calls. + /// + /// This allows passing offchain extension and customizing available capabilities. + OffchainCall(Option<(Box, offchain::Capabilities)>), } impl ExecutionContext { - /// Returns the capabilities of particular context. - pub fn capabilities(&self) -> offchain::Capabilities { - use ExecutionContext::*; - - match self { - Importing | Syncing | BlockConstruction => - offchain::Capabilities::none(), - // Enable keystore by default for offchain calls. CC @bkchr - OffchainCall(None) => [offchain::Capability::Keystore][..].into(), - OffchainCall(Some((_, capabilities))) => *capabilities, - } - } + /// Returns the capabilities of particular context. + pub fn capabilities(&self) -> offchain::Capabilities { + use ExecutionContext::*; + + match self { + Importing | Syncing | BlockConstruction => offchain::Capabilities::none(), + // Enable keystore by default for offchain calls. CC @bkchr + OffchainCall(None) => [offchain::Capability::Keystore][..].into(), + OffchainCall(Some((_, capabilities))) => *capabilities, + } + } } /// Hex-serialized shim for `Vec`. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord))] -pub struct Bytes(#[cfg_attr(feature = "std", serde(with="bytes"))] pub Vec); +pub struct Bytes(#[cfg_attr(feature = "std", serde(with = "bytes"))] pub Vec); impl From> for Bytes { - fn from(s: Vec) -> Self { Bytes(s) } + fn from(s: Vec) -> Self { + Bytes(s) + } } impl From for Bytes { - fn from(s: OpaqueMetadata) -> Self { Bytes(s.0) } + fn from(s: OpaqueMetadata) -> Self { + Bytes(s.0) + } } impl Deref for Bytes { - type Target = [u8]; - fn deref(&self) -> &[u8] { &self.0[..] } + type Target = [u8]; + fn deref(&self) -> &[u8] { + &self.0[..] + } } #[cfg(feature = "std")] impl sp_std::str::FromStr for Bytes { - type Err = bytes::FromHexError; + type Err = bytes::FromHexError; - fn from_str(s: &str) -> Result { - bytes::from_hex(s).map(Bytes) - } + fn from_str(s: &str) -> Result { + bytes::from_hex(s).map(Bytes) + } } /// Stores the encoded `RuntimeMetadata` for the native side as opaque type. @@ -149,66 +153,67 @@ impl sp_std::str::FromStr for Bytes { pub struct OpaqueMetadata(Vec); impl OpaqueMetadata { - /// Creates a new instance with the given metadata blob. - pub fn new(metadata: Vec) -> Self { - OpaqueMetadata(metadata) - } + /// Creates a new instance with the given metadata blob. + pub fn new(metadata: Vec) -> Self { + OpaqueMetadata(metadata) + } } impl sp_std::ops::Deref for OpaqueMetadata { - type Target = Vec; + type Target = Vec; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } /// Something that is either a native or an encoded value. #[cfg(feature = "std")] pub enum NativeOrEncoded { - /// The native representation. - Native(R), - /// The encoded representation. - Encoded(Vec) + /// The native representation. + Native(R), + /// The encoded representation. + Encoded(Vec), } #[cfg(feature = "std")] impl sp_std::fmt::Debug for NativeOrEncoded { - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - hexdisplay::HexDisplay::from(&self.as_encoded().as_ref()).fmt(f) - } + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + hexdisplay::HexDisplay::from(&self.as_encoded().as_ref()).fmt(f) + } } #[cfg(feature = "std")] impl NativeOrEncoded { - /// Return the value as the encoded format. - pub fn as_encoded(&self) -> Cow<'_, [u8]> { - match self { - NativeOrEncoded::Encoded(e) => Cow::Borrowed(e.as_slice()), - NativeOrEncoded::Native(n) => Cow::Owned(n.encode()), - } - } - - /// Return the value as the encoded format. - pub fn into_encoded(self) -> Vec { - match self { - NativeOrEncoded::Encoded(e) => e, - NativeOrEncoded::Native(n) => n.encode(), - } - } + /// Return the value as the encoded format. + pub fn as_encoded(&self) -> Cow<'_, [u8]> { + match self { + NativeOrEncoded::Encoded(e) => Cow::Borrowed(e.as_slice()), + NativeOrEncoded::Native(n) => Cow::Owned(n.encode()), + } + } + + /// Return the value as the encoded format. + pub fn into_encoded(self) -> Vec { + match self { + NativeOrEncoded::Encoded(e) => e, + NativeOrEncoded::Native(n) => n.encode(), + } + } } #[cfg(feature = "std")] impl PartialEq for NativeOrEncoded { - fn eq(&self, other: &Self) -> bool { - match (self, other) { - (NativeOrEncoded::Native(l), NativeOrEncoded::Native(r)) => l == r, - (NativeOrEncoded::Native(n), NativeOrEncoded::Encoded(e)) | - (NativeOrEncoded::Encoded(e), NativeOrEncoded::Native(n)) => - Some(n) == codec::Decode::decode(&mut &e[..]).ok().as_ref(), - (NativeOrEncoded::Encoded(l), NativeOrEncoded::Encoded(r)) => l == r, - } - } + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (NativeOrEncoded::Native(l), NativeOrEncoded::Native(r)) => l == r, + (NativeOrEncoded::Native(n), NativeOrEncoded::Encoded(e)) + | (NativeOrEncoded::Encoded(e), NativeOrEncoded::Native(n)) => { + Some(n) == codec::Decode::decode(&mut &e[..]).ok().as_ref() + } + (NativeOrEncoded::Encoded(l), NativeOrEncoded::Encoded(r)) => l == r, + } + } } /// A value that is never in a native representation. @@ -219,10 +224,10 @@ pub enum NeverNativeValue {} #[cfg(feature = "std")] impl codec::Encode for NeverNativeValue { - fn encode(&self) -> Vec { - // The enum is not constructable, so this function should never be callable! - unreachable!() - } + fn encode(&self) -> Vec { + // The enum is not constructable, so this function should never be callable! + unreachable!() + } } #[cfg(feature = "std")] @@ -230,15 +235,15 @@ impl codec::EncodeLike for NeverNativeValue {} #[cfg(feature = "std")] impl codec::Decode for NeverNativeValue { - fn decode(_: &mut I) -> Result { - Err("`NeverNativeValue` should never be decoded".into()) - } + fn decode(_: &mut I) -> Result { + Err("`NeverNativeValue` should never be decoded".into()) + } } /// Provide a simple 4 byte identifier for a type. pub trait TypeId { - /// Simple 4 byte identifier. - const TYPE_ID: [u8; 4]; + /// Simple 4 byte identifier. + const TYPE_ID: [u8; 4]; } /// A log level matching the one from `log` crate. @@ -246,54 +251,54 @@ pub trait TypeId { /// Used internally by `sp_io::log` method. #[derive(Encode, Decode, sp_runtime_interface::pass_by::PassByEnum, Copy, Clone)] pub enum LogLevel { - /// `Error` log level. - Error = 1, - /// `Warn` log level. - Warn = 2, - /// `Info` log level. - Info = 3, - /// `Debug` log level. - Debug = 4, - /// `Trace` log level. - Trace = 5, + /// `Error` log level. + Error = 1, + /// `Warn` log level. + Warn = 2, + /// `Info` log level. + Info = 3, + /// `Debug` log level. + Debug = 4, + /// `Trace` log level. + Trace = 5, } impl From for LogLevel { - fn from(val: u32) -> Self { - match val { - x if x == LogLevel::Warn as u32 => LogLevel::Warn, - x if x == LogLevel::Info as u32 => LogLevel::Info, - x if x == LogLevel::Debug as u32 => LogLevel::Debug, - x if x == LogLevel::Trace as u32 => LogLevel::Trace, - _ => LogLevel::Error, - } - } + fn from(val: u32) -> Self { + match val { + x if x == LogLevel::Warn as u32 => LogLevel::Warn, + x if x == LogLevel::Info as u32 => LogLevel::Info, + x if x == LogLevel::Debug as u32 => LogLevel::Debug, + x if x == LogLevel::Trace as u32 => LogLevel::Trace, + _ => LogLevel::Error, + } + } } impl From for LogLevel { - fn from(l: log::Level) -> Self { - use log::Level::*; - match l { - Error => Self::Error, - Warn => Self::Warn, - Info => Self::Info, - Debug => Self::Debug, - Trace => Self::Trace, - } - } + fn from(l: log::Level) -> Self { + use log::Level::*; + match l { + Error => Self::Error, + Warn => Self::Warn, + Info => Self::Info, + Debug => Self::Debug, + Trace => Self::Trace, + } + } } impl From for log::Level { - fn from(l: LogLevel) -> Self { - use self::LogLevel::*; - match l { - Error => Self::Error, - Warn => Self::Warn, - Info => Self::Info, - Debug => Self::Debug, - Trace => Self::Trace, - } - } + fn from(l: LogLevel) -> Self { + use self::LogLevel::*; + match l { + Error => Self::Error, + Warn => Self::Warn, + Info => Self::Info, + Debug => Self::Debug, + Trace => Self::Trace, + } + } } /// Encodes the given value into a buffer and returns the pointer and the length as a single `u64`. @@ -304,18 +309,18 @@ impl From for log::Level { /// The low `32bits` are reserved for the pointer, followed by `32bit` for the length. #[cfg(not(feature = "std"))] pub fn to_substrate_wasm_fn_return_value(value: &impl Encode) -> u64 { - let encoded = value.encode(); + let encoded = value.encode(); - let ptr = encoded.as_ptr() as u64; - let length = encoded.len() as u64; - let res = ptr | (length << 32); + let ptr = encoded.as_ptr() as u64; + let length = encoded.len() as u64; + let res = ptr | (length << 32); - // Leak the output vector to avoid it being freed. - // This is fine in a WASM context since the heap - // will be discarded after the call. - sp_std::mem::forget(encoded); + // Leak the output vector to avoid it being freed. + // This is fine in a WASM context since the heap + // will be discarded after the call. + sp_std::mem::forget(encoded); - res + res } /// Macro for creating `Maybe*` marker traits. diff --git a/primitives/core/src/offchain/mod.rs b/primitives/core/src/offchain/mod.rs index c393b0f9f8..32ec2da409 100644 --- a/primitives/core/src/offchain/mod.rs +++ b/primitives/core/src/offchain/mod.rs @@ -16,10 +16,13 @@ //! Offchain workers types -use codec::{Encode, Decode}; -use sp_std::{prelude::{Vec, Box}, convert::TryFrom}; use crate::RuntimeDebug; -use sp_runtime_interface::pass_by::{PassByCodec, PassByInner, PassByEnum}; +use codec::{Decode, Encode}; +use sp_runtime_interface::pass_by::{PassByCodec, PassByEnum, PassByInner}; +use sp_std::{ + convert::TryFrom, + prelude::{Box, Vec}, +}; pub use crate::crypto::KeyTypeId; @@ -30,22 +33,22 @@ pub mod testing; /// Offchain workers local storage. pub trait OffchainStorage: Clone + Send + Sync { - /// Persist a value in storage under given key and prefix. - fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]); - - /// Retrieve a value from storage under given key and prefix. - fn get(&self, prefix: &[u8], key: &[u8]) -> Option>; - - /// Replace the value in storage if given old_value matches the current one. - /// - /// Returns `true` if the value has been set and false otherwise. - fn compare_and_set( - &mut self, - prefix: &[u8], - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool; + /// Persist a value in storage under given key and prefix. + fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]); + + /// Retrieve a value from storage under given key and prefix. + fn get(&self, prefix: &[u8], key: &[u8]) -> Option>; + + /// Replace the value in storage if given old_value matches the current one. + /// + /// Returns `true` if the value has been set and false otherwise. + fn compare_and_set( + &mut self, + prefix: &[u8], + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool; } /// A type of supported crypto. @@ -53,36 +56,36 @@ pub trait OffchainStorage: Clone + Send + Sync { #[cfg_attr(feature = "std", derive(serde::Serialize, serde::Deserialize))] #[repr(C)] pub enum StorageKind { - /// Persistent storage is non-revertible and not fork-aware. It means that any value - /// set by the offchain worker triggered at block `N(hash1)` is persisted even - /// if that block is reverted as non-canonical and is available for the worker - /// that is re-run at block `N(hash2)`. - /// This storage can be used by offchain workers to handle forks - /// and coordinate offchain workers running on different forks. - PERSISTENT = 1, - /// Local storage is revertible and fork-aware. It means that any value - /// set by the offchain worker triggered at block `N(hash1)` is reverted - /// if that block is reverted as non-canonical and is NOT available for the worker - /// that is re-run at block `N(hash2)`. - LOCAL = 2, + /// Persistent storage is non-revertible and not fork-aware. It means that any value + /// set by the offchain worker triggered at block `N(hash1)` is persisted even + /// if that block is reverted as non-canonical and is available for the worker + /// that is re-run at block `N(hash2)`. + /// This storage can be used by offchain workers to handle forks + /// and coordinate offchain workers running on different forks. + PERSISTENT = 1, + /// Local storage is revertible and fork-aware. It means that any value + /// set by the offchain worker triggered at block `N(hash1)` is reverted + /// if that block is reverted as non-canonical and is NOT available for the worker + /// that is re-run at block `N(hash2)`. + LOCAL = 2, } impl TryFrom for StorageKind { - type Error = (); - - fn try_from(kind: u32) -> Result { - match kind { - e if e == u32::from(StorageKind::PERSISTENT as u8) => Ok(StorageKind::PERSISTENT), - e if e == u32::from(StorageKind::LOCAL as u8) => Ok(StorageKind::LOCAL), - _ => Err(()), - } - } + type Error = (); + + fn try_from(kind: u32) -> Result { + match kind { + e if e == u32::from(StorageKind::PERSISTENT as u8) => Ok(StorageKind::PERSISTENT), + e if e == u32::from(StorageKind::LOCAL as u8) => Ok(StorageKind::LOCAL), + _ => Err(()), + } + } } impl From for u32 { - fn from(c: StorageKind) -> Self { - c as u8 as u32 - } + fn from(c: StorageKind) -> Self { + c as u8 as u32 + } } /// Opaque type for offchain http requests. @@ -91,85 +94,87 @@ impl From for u32 { pub struct HttpRequestId(pub u16); impl From for u32 { - fn from(c: HttpRequestId) -> Self { - c.0 as u32 - } + fn from(c: HttpRequestId) -> Self { + c.0 as u32 + } } /// An error enum returned by some http methods. #[derive(Clone, Copy, PartialEq, Eq, RuntimeDebug, Encode, Decode, PassByEnum)] #[repr(C)] pub enum HttpError { - /// The requested action couldn't been completed within a deadline. - DeadlineReached = 1, - /// There was an IO Error while processing the request. - IoError = 2, - /// The ID of the request is invalid in this context. - Invalid = 3, + /// The requested action couldn't been completed within a deadline. + DeadlineReached = 1, + /// There was an IO Error while processing the request. + IoError = 2, + /// The ID of the request is invalid in this context. + Invalid = 3, } impl TryFrom for HttpError { - type Error = (); - - fn try_from(error: u32) -> Result { - match error { - e if e == HttpError::DeadlineReached as u8 as u32 => Ok(HttpError::DeadlineReached), - e if e == HttpError::IoError as u8 as u32 => Ok(HttpError::IoError), - e if e == HttpError::Invalid as u8 as u32 => Ok(HttpError::Invalid), - _ => Err(()) - } - } + type Error = (); + + fn try_from(error: u32) -> Result { + match error { + e if e == HttpError::DeadlineReached as u8 as u32 => Ok(HttpError::DeadlineReached), + e if e == HttpError::IoError as u8 as u32 => Ok(HttpError::IoError), + e if e == HttpError::Invalid as u8 as u32 => Ok(HttpError::Invalid), + _ => Err(()), + } + } } impl From for u32 { - fn from(c: HttpError) -> Self { - c as u8 as u32 - } + fn from(c: HttpError) -> Self { + c as u8 as u32 + } } /// Status of the HTTP request #[derive(Clone, Copy, PartialEq, Eq, RuntimeDebug, Encode, Decode, PassByCodec)] pub enum HttpRequestStatus { - /// Deadline was reached while we waited for this request to finish. - /// - /// Note the deadline is controlled by the calling part, it not necessarily - /// means that the request has timed out. - DeadlineReached, - /// An error has occurred during the request, for example a timeout or the - /// remote has closed our socket. - /// - /// The request is now considered destroyed. To retry the request you need - /// to construct it again. - IoError, - /// The passed ID is invalid in this context. - Invalid, - /// The request has finished with given status code. - Finished(u16), + /// Deadline was reached while we waited for this request to finish. + /// + /// Note the deadline is controlled by the calling part, it not necessarily + /// means that the request has timed out. + DeadlineReached, + /// An error has occurred during the request, for example a timeout or the + /// remote has closed our socket. + /// + /// The request is now considered destroyed. To retry the request you need + /// to construct it again. + IoError, + /// The passed ID is invalid in this context. + Invalid, + /// The request has finished with given status code. + Finished(u16), } impl From for u32 { - fn from(status: HttpRequestStatus) -> Self { - match status { - HttpRequestStatus::Invalid => 0, - HttpRequestStatus::DeadlineReached => 10, - HttpRequestStatus::IoError => 20, - HttpRequestStatus::Finished(code) => u32::from(code), - } - } + fn from(status: HttpRequestStatus) -> Self { + match status { + HttpRequestStatus::Invalid => 0, + HttpRequestStatus::DeadlineReached => 10, + HttpRequestStatus::IoError => 20, + HttpRequestStatus::Finished(code) => u32::from(code), + } + } } impl TryFrom for HttpRequestStatus { - type Error = (); - - fn try_from(status: u32) -> Result { - match status { - 0 => Ok(HttpRequestStatus::Invalid), - 10 => Ok(HttpRequestStatus::DeadlineReached), - 20 => Ok(HttpRequestStatus::IoError), - 100..=999 => u16::try_from(status).map(HttpRequestStatus::Finished).map_err(|_| ()), - _ => Err(()), - } - } + type Error = (); + + fn try_from(status: u32) -> Result { + match status { + 0 => Ok(HttpRequestStatus::Invalid), + 10 => Ok(HttpRequestStatus::DeadlineReached), + 20 => Ok(HttpRequestStatus::IoError), + 100..=999 => u16::try_from(status) + .map(HttpRequestStatus::Finished) + .map_err(|_| ()), + _ => Err(()), + } + } } /// A blob to hold information about the local node's network state @@ -177,10 +182,10 @@ impl TryFrom for HttpRequestStatus { #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug, PassByCodec)] #[cfg_attr(feature = "std", derive(Default))] pub struct OpaqueNetworkState { - /// PeerId of the local node. - pub peer_id: OpaquePeerId, - /// List of addresses the node knows it can be reached as. - pub external_addresses: Vec, + /// PeerId of the local node. + pub peer_id: OpaquePeerId, + /// List of addresses the node knows it can be reached as. + pub external_addresses: Vec, } /// Simple blob to hold a `PeerId` without committing to its format. @@ -188,10 +193,10 @@ pub struct OpaqueNetworkState { pub struct OpaquePeerId(pub Vec); impl OpaquePeerId { - /// Create new `OpaquePeerId` - pub fn new(vec: Vec) -> Self { - OpaquePeerId(vec) - } + /// Create new `OpaquePeerId` + pub fn new(vec: Vec) -> Self { + OpaquePeerId(vec) + } } /// Simple blob to hold a `Multiaddr` without committing to its format. @@ -199,77 +204,81 @@ impl OpaquePeerId { pub struct OpaqueMultiaddr(pub Vec); impl OpaqueMultiaddr { - /// Create new `OpaqueMultiaddr` - pub fn new(vec: Vec) -> Self { - OpaqueMultiaddr(vec) - } + /// Create new `OpaqueMultiaddr` + pub fn new(vec: Vec) -> Self { + OpaqueMultiaddr(vec) + } } /// Opaque timestamp type -#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode)] +#[derive( + Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode, +)] pub struct Timestamp(u64); /// Duration type -#[derive(Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode)] +#[derive( + Clone, Copy, PartialEq, Eq, Ord, PartialOrd, Default, RuntimeDebug, PassByInner, Encode, Decode, +)] pub struct Duration(u64); impl Duration { - /// Create new duration representing given number of milliseconds. - pub fn from_millis(millis: u64) -> Self { - Duration(millis) - } - - /// Returns number of milliseconds this Duration represents. - pub fn millis(&self) -> u64 { - self.0 - } + /// Create new duration representing given number of milliseconds. + pub fn from_millis(millis: u64) -> Self { + Duration(millis) + } + + /// Returns number of milliseconds this Duration represents. + pub fn millis(&self) -> u64 { + self.0 + } } impl Timestamp { - /// Creates new `Timestamp` given unix timestamp in milliseconds. - pub fn from_unix_millis(millis: u64) -> Self { - Timestamp(millis) - } - - /// Increase the timestamp by given `Duration`. - pub fn add(&self, duration: Duration) -> Timestamp { - Timestamp(self.0.saturating_add(duration.0)) - } - - /// Decrease the timestamp by given `Duration` - pub fn sub(&self, duration: Duration) -> Timestamp { - Timestamp(self.0.saturating_sub(duration.0)) - } - - /// Returns a saturated difference (Duration) between two Timestamps. - pub fn diff(&self, other: &Self) -> Duration { - Duration(self.0.saturating_sub(other.0)) - } - - /// Return number of milliseconds since UNIX epoch. - pub fn unix_millis(&self) -> u64 { - self.0 - } + /// Creates new `Timestamp` given unix timestamp in milliseconds. + pub fn from_unix_millis(millis: u64) -> Self { + Timestamp(millis) + } + + /// Increase the timestamp by given `Duration`. + pub fn add(&self, duration: Duration) -> Timestamp { + Timestamp(self.0.saturating_add(duration.0)) + } + + /// Decrease the timestamp by given `Duration` + pub fn sub(&self, duration: Duration) -> Timestamp { + Timestamp(self.0.saturating_sub(duration.0)) + } + + /// Returns a saturated difference (Duration) between two Timestamps. + pub fn diff(&self, other: &Self) -> Duration { + Duration(self.0.saturating_sub(other.0)) + } + + /// Return number of milliseconds since UNIX epoch. + pub fn unix_millis(&self) -> u64 { + self.0 + } } /// Execution context extra capabilities. #[derive(Debug, PartialEq, Eq, Clone, Copy)] #[repr(u8)] pub enum Capability { - /// Access to transaction pool. - TransactionPool = 1, - /// External http calls. - Http = 2, - /// Keystore access. - Keystore = 4, - /// Randomness source. - Randomness = 8, - /// Access to opaque network state. - NetworkState = 16, - /// Access to offchain worker DB (read only). - OffchainWorkerDbRead = 32, - /// Access to offchain worker DB (writes). - OffchainWorkerDbWrite = 64, + /// Access to transaction pool. + TransactionPool = 1, + /// External http calls. + Http = 2, + /// Keystore access. + Keystore = 4, + /// Randomness source. + Randomness = 8, + /// Access to opaque network state. + NetworkState = 16, + /// Access to offchain worker DB (read only). + OffchainWorkerDbRead = 32, + /// Access to offchain worker DB (writes). + OffchainWorkerDbWrite = 64, } /// A set of capabilities @@ -277,411 +286,446 @@ pub enum Capability { pub struct Capabilities(u8); impl Capabilities { - /// Return an object representing an empty set of capabilities. - pub fn none() -> Self { - Self(0) - } - - /// Return an object representing all capabilities enabled. - pub fn all() -> Self { - Self(u8::max_value()) - } - - /// Return capabilities for rich offchain calls. - /// - /// Those calls should be allowed to sign and submit transactions - /// and access offchain workers database (but read only!). - pub fn rich_offchain_call() -> Self { - [ - Capability::TransactionPool, - Capability::Keystore, - Capability::OffchainWorkerDbRead, - ][..].into() - } - - /// Check if particular capability is enabled. - pub fn has(&self, capability: Capability) -> bool { - self.0 & capability as u8 != 0 - } - - /// Check if this capability object represents all capabilities. - pub fn has_all(&self) -> bool { - self == &Capabilities::all() - } + /// Return an object representing an empty set of capabilities. + pub fn none() -> Self { + Self(0) + } + + /// Return an object representing all capabilities enabled. + pub fn all() -> Self { + Self(u8::max_value()) + } + + /// Return capabilities for rich offchain calls. + /// + /// Those calls should be allowed to sign and submit transactions + /// and access offchain workers database (but read only!). + pub fn rich_offchain_call() -> Self { + [ + Capability::TransactionPool, + Capability::Keystore, + Capability::OffchainWorkerDbRead, + ][..] + .into() + } + + /// Check if particular capability is enabled. + pub fn has(&self, capability: Capability) -> bool { + self.0 & capability as u8 != 0 + } + + /// Check if this capability object represents all capabilities. + pub fn has_all(&self) -> bool { + self == &Capabilities::all() + } } impl<'a> From<&'a [Capability]> for Capabilities { - fn from(list: &'a [Capability]) -> Self { - Capabilities(list.iter().fold(0_u8, |a, b| a | *b as u8)) - } + fn from(list: &'a [Capability]) -> Self { + Capabilities(list.iter().fold(0_u8, |a, b| a | *b as u8)) + } } /// An extended externalities for offchain workers. pub trait Externalities: Send { - /// Returns if the local node is a potential validator. - /// - /// Even if this function returns `true`, it does not mean that any keys are configured - /// and that the validator is registered in the chain. - fn is_validator(&self) -> bool; - - /// Returns information about the local node's network state. - fn network_state(&self) -> Result; - - /// Returns current UNIX timestamp (in millis) - fn timestamp(&mut self) -> Timestamp; - - /// Pause the execution until `deadline` is reached. - fn sleep_until(&mut self, deadline: Timestamp); - - /// Returns a random seed. - /// - /// This is a truly random non deterministic seed generated by host environment. - /// Obviously fine in the off-chain worker context. - fn random_seed(&mut self) -> [u8; 32]; - - /// Sets a value in the local storage. - /// - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It IS persisted between runs. - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]); - - /// Sets a value in the local storage if it matches current value. - /// - /// Since multiple offchain workers may be running concurrently, to prevent - /// data races use CAS to coordinate between them. - /// - /// Returns `true` if the value has been set, `false` otherwise. - /// - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It IS persisted between runs. - fn local_storage_compare_and_set( - &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool; - - /// Gets a value from the local storage. - /// - /// If the value does not exist in the storage `None` will be returned. - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It IS persisted between runs. - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option>; - - /// Initiates a http request given HTTP verb and the URL. - /// - /// Meta is a future-reserved field containing additional, parity-scale-codec encoded parameters. - /// Returns the id of newly started request. - /// - /// Returns an error if: - /// - No new request identifier could be allocated. - /// - The method or URI contain invalid characters. - /// - fn http_request_start( - &mut self, - method: &str, - uri: &str, - meta: &[u8] - ) -> Result; - - /// Append header to the request. - /// - /// Calling this function multiple times with the same header name continues appending new - /// headers. In other words, headers are never replaced. - /// - /// Returns an error if: - /// - The request identifier is invalid. - /// - You have called `http_request_write_body` on that request. - /// - The name or value contain invalid characters. - /// - /// An error doesn't poison the request, and you can continue as if the call had never been - /// made. - /// - fn http_request_add_header( - &mut self, - request_id: HttpRequestId, - name: &str, - value: &str - ) -> Result<(), ()>; - - /// Write a chunk of request body. - /// - /// Calling this function with a non-empty slice may or may not start the - /// HTTP request. Calling this function with an empty chunks finalizes the - /// request and always starts it. It is no longer valid to write more data - /// afterwards. - /// Passing `None` as deadline blocks forever. - /// - /// Returns an error if: - /// - The request identifier is invalid. - /// - `http_response_wait` has already been called on this request. - /// - The deadline is reached. - /// - An I/O error has happened, for example the remote has closed our - /// request. The request is then considered invalid. - /// - fn http_request_write_body( - &mut self, - request_id: HttpRequestId, - chunk: &[u8], - deadline: Option - ) -> Result<(), HttpError>; - - /// Block and wait for the responses for given requests. - /// - /// Returns a vector of request statuses (the len is the same as ids). - /// Note that if deadline is not provided the method will block indefinitely, - /// otherwise unready responses will produce `DeadlineReached` status. - /// - /// If a response returns an `IoError`, it is then considered destroyed. - /// Its id is then invalid. - /// - /// Passing `None` as deadline blocks forever. - fn http_response_wait( - &mut self, - ids: &[HttpRequestId], - deadline: Option - ) -> Vec; - - /// Read all response headers. - /// - /// Returns a vector of pairs `(HeaderKey, HeaderValue)`. - /// - /// Dispatches the request if it hasn't been done yet. It is no longer - /// valid to modify the headers or write data to the request. - /// - /// Returns an empty list if the identifier is unknown/invalid, hasn't - /// received a response, or has finished. - fn http_response_headers( - &mut self, - request_id: HttpRequestId - ) -> Vec<(Vec, Vec)>; - - /// Read a chunk of body response to given buffer. - /// - /// Dispatches the request if it hasn't been done yet. It is no longer - /// valid to modify the headers or write data to the request. - /// - /// Returns the number of bytes written or an error in case a deadline - /// is reached or server closed the connection. - /// Passing `None` as a deadline blocks forever. - /// - /// If `Ok(0)` or `Err(IoError)` is returned, the request is considered - /// destroyed. Doing another read or getting the response's headers, for - /// example, is then invalid. - /// - /// Returns an error if: - /// - The request identifier is invalid. - /// - The deadline is reached. - /// - An I/O error has happened, for example the remote has closed our - /// request. The request is then considered invalid. - /// - fn http_response_read_body( - &mut self, - request_id: HttpRequestId, - buffer: &mut [u8], - deadline: Option - ) -> Result; - + /// Returns if the local node is a potential validator. + /// + /// Even if this function returns `true`, it does not mean that any keys are configured + /// and that the validator is registered in the chain. + fn is_validator(&self) -> bool; + + /// Returns information about the local node's network state. + fn network_state(&self) -> Result; + + /// Returns current UNIX timestamp (in millis) + fn timestamp(&mut self) -> Timestamp; + + /// Pause the execution until `deadline` is reached. + fn sleep_until(&mut self, deadline: Timestamp); + + /// Returns a random seed. + /// + /// This is a truly random non deterministic seed generated by host environment. + /// Obviously fine in the off-chain worker context. + fn random_seed(&mut self) -> [u8; 32]; + + /// Sets a value in the local storage. + /// + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It IS persisted between runs. + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]); + + /// Sets a value in the local storage if it matches current value. + /// + /// Since multiple offchain workers may be running concurrently, to prevent + /// data races use CAS to coordinate between them. + /// + /// Returns `true` if the value has been set, `false` otherwise. + /// + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It IS persisted between runs. + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool; + + /// Gets a value from the local storage. + /// + /// If the value does not exist in the storage `None` will be returned. + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It IS persisted between runs. + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option>; + + /// Initiates a http request given HTTP verb and the URL. + /// + /// Meta is a future-reserved field containing additional, parity-scale-codec encoded parameters. + /// Returns the id of newly started request. + /// + /// Returns an error if: + /// - No new request identifier could be allocated. + /// - The method or URI contain invalid characters. + /// + fn http_request_start( + &mut self, + method: &str, + uri: &str, + meta: &[u8], + ) -> Result; + + /// Append header to the request. + /// + /// Calling this function multiple times with the same header name continues appending new + /// headers. In other words, headers are never replaced. + /// + /// Returns an error if: + /// - The request identifier is invalid. + /// - You have called `http_request_write_body` on that request. + /// - The name or value contain invalid characters. + /// + /// An error doesn't poison the request, and you can continue as if the call had never been + /// made. + /// + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()>; + + /// Write a chunk of request body. + /// + /// Calling this function with a non-empty slice may or may not start the + /// HTTP request. Calling this function with an empty chunks finalizes the + /// request and always starts it. It is no longer valid to write more data + /// afterwards. + /// Passing `None` as deadline blocks forever. + /// + /// Returns an error if: + /// - The request identifier is invalid. + /// - `http_response_wait` has already been called on this request. + /// - The deadline is reached. + /// - An I/O error has happened, for example the remote has closed our + /// request. The request is then considered invalid. + /// + fn http_request_write_body( + &mut self, + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option, + ) -> Result<(), HttpError>; + + /// Block and wait for the responses for given requests. + /// + /// Returns a vector of request statuses (the len is the same as ids). + /// Note that if deadline is not provided the method will block indefinitely, + /// otherwise unready responses will produce `DeadlineReached` status. + /// + /// If a response returns an `IoError`, it is then considered destroyed. + /// Its id is then invalid. + /// + /// Passing `None` as deadline blocks forever. + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec; + + /// Read all response headers. + /// + /// Returns a vector of pairs `(HeaderKey, HeaderValue)`. + /// + /// Dispatches the request if it hasn't been done yet. It is no longer + /// valid to modify the headers or write data to the request. + /// + /// Returns an empty list if the identifier is unknown/invalid, hasn't + /// received a response, or has finished. + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)>; + + /// Read a chunk of body response to given buffer. + /// + /// Dispatches the request if it hasn't been done yet. It is no longer + /// valid to modify the headers or write data to the request. + /// + /// Returns the number of bytes written or an error in case a deadline + /// is reached or server closed the connection. + /// Passing `None` as a deadline blocks forever. + /// + /// If `Ok(0)` or `Err(IoError)` is returned, the request is considered + /// destroyed. Doing another read or getting the response's headers, for + /// example, is then invalid. + /// + /// Returns an error if: + /// - The request identifier is invalid. + /// - The deadline is reached. + /// - An I/O error has happened, for example the remote has closed our + /// request. The request is then considered invalid. + /// + fn http_response_read_body( + &mut self, + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option, + ) -> Result; } impl Externalities for Box { - fn is_validator(&self) -> bool { - (& **self).is_validator() - } - - fn network_state(&self) -> Result { - (& **self).network_state() - } - - fn timestamp(&mut self) -> Timestamp { - (&mut **self).timestamp() - } - - fn sleep_until(&mut self, deadline: Timestamp) { - (&mut **self).sleep_until(deadline) - } - - fn random_seed(&mut self) -> [u8; 32] { - (&mut **self).random_seed() - } - - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - (&mut **self).local_storage_set(kind, key, value) - } - - fn local_storage_compare_and_set( - &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - (&mut **self).local_storage_compare_and_set(kind, key, old_value, new_value) - } - - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - (&mut **self).local_storage_get(kind, key) - } - - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { - (&mut **self).http_request_start(method, uri, meta) - } - - fn http_request_add_header(&mut self, request_id: HttpRequestId, name: &str, value: &str) -> Result<(), ()> { - (&mut **self).http_request_add_header(request_id, name, value) - } - - fn http_request_write_body( - &mut self, - request_id: HttpRequestId, - chunk: &[u8], - deadline: Option - ) -> Result<(), HttpError> { - (&mut **self).http_request_write_body(request_id, chunk, deadline) - } - - fn http_response_wait(&mut self, ids: &[HttpRequestId], deadline: Option) -> Vec { - (&mut **self).http_response_wait(ids, deadline) - } - - fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { - (&mut **self).http_response_headers(request_id) - } - - fn http_response_read_body( - &mut self, - request_id: HttpRequestId, - buffer: &mut [u8], - deadline: Option - ) -> Result { - (&mut **self).http_response_read_body(request_id, buffer, deadline) - } + fn is_validator(&self) -> bool { + (&**self).is_validator() + } + + fn network_state(&self) -> Result { + (&**self).network_state() + } + + fn timestamp(&mut self) -> Timestamp { + (&mut **self).timestamp() + } + + fn sleep_until(&mut self, deadline: Timestamp) { + (&mut **self).sleep_until(deadline) + } + + fn random_seed(&mut self) -> [u8; 32] { + (&mut **self).random_seed() + } + + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + (&mut **self).local_storage_set(kind, key, value) + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + (&mut **self).local_storage_compare_and_set(kind, key, old_value, new_value) + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + (&mut **self).local_storage_get(kind, key) + } + + fn http_request_start( + &mut self, + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { + (&mut **self).http_request_start(method, uri, meta) + } + + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { + (&mut **self).http_request_add_header(request_id, name, value) + } + + fn http_request_write_body( + &mut self, + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option, + ) -> Result<(), HttpError> { + (&mut **self).http_request_write_body(request_id, chunk, deadline) + } + + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { + (&mut **self).http_response_wait(ids, deadline) + } + + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { + (&mut **self).http_response_headers(request_id) + } + + fn http_response_read_body( + &mut self, + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option, + ) -> Result { + (&mut **self).http_response_read_body(request_id, buffer, deadline) + } } /// An `OffchainExternalities` implementation with limited capabilities. pub struct LimitedExternalities { - capabilities: Capabilities, - externalities: T, + capabilities: Capabilities, + externalities: T, } impl LimitedExternalities { - /// Create new externalities limited to given `capabilities`. - pub fn new(capabilities: Capabilities, externalities: T) -> Self { - Self { - capabilities, - externalities, - } - } - - /// Check if given capability is allowed. - /// - /// Panics in case it is not. - fn check(&self, capability: Capability, name: &'static str) { - if !self.capabilities.has(capability) { - panic!("Accessing a forbidden API: {}. No: {:?} capability.", name, capability); - } - } + /// Create new externalities limited to given `capabilities`. + pub fn new(capabilities: Capabilities, externalities: T) -> Self { + Self { + capabilities, + externalities, + } + } + + /// Check if given capability is allowed. + /// + /// Panics in case it is not. + fn check(&self, capability: Capability, name: &'static str) { + if !self.capabilities.has(capability) { + panic!( + "Accessing a forbidden API: {}. No: {:?} capability.", + name, capability + ); + } + } } impl Externalities for LimitedExternalities { - fn is_validator(&self) -> bool { - self.check(Capability::Keystore, "is_validator"); - self.externalities.is_validator() - } - - fn network_state(&self) -> Result { - self.check(Capability::NetworkState, "network_state"); - self.externalities.network_state() - } - - fn timestamp(&mut self) -> Timestamp { - self.check(Capability::Http, "timestamp"); - self.externalities.timestamp() - } - - fn sleep_until(&mut self, deadline: Timestamp) { - self.check(Capability::Http, "sleep_until"); - self.externalities.sleep_until(deadline) - } - - fn random_seed(&mut self) -> [u8; 32] { - self.check(Capability::Randomness, "random_seed"); - self.externalities.random_seed() - } - - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - self.check(Capability::OffchainWorkerDbWrite, "local_storage_set"); - self.externalities.local_storage_set(kind, key, value) - } - - fn local_storage_compare_and_set( - &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - self.check(Capability::OffchainWorkerDbWrite, "local_storage_compare_and_set"); - self.externalities.local_storage_compare_and_set(kind, key, old_value, new_value) - } - - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - self.check(Capability::OffchainWorkerDbRead, "local_storage_get"); - self.externalities.local_storage_get(kind, key) - } - - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { - self.check(Capability::Http, "http_request_start"); - self.externalities.http_request_start(method, uri, meta) - } - - fn http_request_add_header(&mut self, request_id: HttpRequestId, name: &str, value: &str) -> Result<(), ()> { - self.check(Capability::Http, "http_request_add_header"); - self.externalities.http_request_add_header(request_id, name, value) - } - - fn http_request_write_body( - &mut self, - request_id: HttpRequestId, - chunk: &[u8], - deadline: Option - ) -> Result<(), HttpError> { - self.check(Capability::Http, "http_request_write_body"); - self.externalities.http_request_write_body(request_id, chunk, deadline) - } - - fn http_response_wait(&mut self, ids: &[HttpRequestId], deadline: Option) -> Vec { - self.check(Capability::Http, "http_response_wait"); - self.externalities.http_response_wait(ids, deadline) - } - - fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { - self.check(Capability::Http, "http_response_headers"); - self.externalities.http_response_headers(request_id) - } - - fn http_response_read_body( - &mut self, - request_id: HttpRequestId, - buffer: &mut [u8], - deadline: Option - ) -> Result { - self.check(Capability::Http, "http_response_read_body"); - self.externalities.http_response_read_body(request_id, buffer, deadline) - } + fn is_validator(&self) -> bool { + self.check(Capability::Keystore, "is_validator"); + self.externalities.is_validator() + } + + fn network_state(&self) -> Result { + self.check(Capability::NetworkState, "network_state"); + self.externalities.network_state() + } + + fn timestamp(&mut self) -> Timestamp { + self.check(Capability::Http, "timestamp"); + self.externalities.timestamp() + } + + fn sleep_until(&mut self, deadline: Timestamp) { + self.check(Capability::Http, "sleep_until"); + self.externalities.sleep_until(deadline) + } + + fn random_seed(&mut self) -> [u8; 32] { + self.check(Capability::Randomness, "random_seed"); + self.externalities.random_seed() + } + + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + self.check(Capability::OffchainWorkerDbWrite, "local_storage_set"); + self.externalities.local_storage_set(kind, key, value) + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + self.check( + Capability::OffchainWorkerDbWrite, + "local_storage_compare_and_set", + ); + self.externalities + .local_storage_compare_and_set(kind, key, old_value, new_value) + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + self.check(Capability::OffchainWorkerDbRead, "local_storage_get"); + self.externalities.local_storage_get(kind, key) + } + + fn http_request_start( + &mut self, + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { + self.check(Capability::Http, "http_request_start"); + self.externalities.http_request_start(method, uri, meta) + } + + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { + self.check(Capability::Http, "http_request_add_header"); + self.externalities + .http_request_add_header(request_id, name, value) + } + + fn http_request_write_body( + &mut self, + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option, + ) -> Result<(), HttpError> { + self.check(Capability::Http, "http_request_write_body"); + self.externalities + .http_request_write_body(request_id, chunk, deadline) + } + + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { + self.check(Capability::Http, "http_response_wait"); + self.externalities.http_response_wait(ids, deadline) + } + + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { + self.check(Capability::Http, "http_response_headers"); + self.externalities.http_response_headers(request_id) + } + + fn http_response_read_body( + &mut self, + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option, + ) -> Result { + self.check(Capability::Http, "http_response_read_body"); + self.externalities + .http_response_read_body(request_id, buffer, deadline) + } } #[cfg(feature = "std")] sp_externalities::decl_extension! { - /// The offchain extension that will be registered at the Substrate externalities. - pub struct OffchainExt(Box); + /// The offchain extension that will be registered at the Substrate externalities. + pub struct OffchainExt(Box); } #[cfg(feature = "std")] impl OffchainExt { - /// Create a new instance of `Self`. - pub fn new(offchain: O) -> Self { - Self(Box::new(offchain)) - } + /// Create a new instance of `Self`. + pub fn new(offchain: O) -> Self { + Self(Box::new(offchain)) + } } /// Abstraction over transaction pool. @@ -691,50 +735,49 @@ impl OffchainExt { /// tight coupling with any pool implementation. #[cfg(feature = "std")] pub trait TransactionPool { - /// Submit transaction. - /// - /// The transaction will end up in the pool and be propagated to others. - fn submit_transaction(&mut self, extrinsic: Vec) -> Result<(), ()>; + /// Submit transaction. + /// + /// The transaction will end up in the pool and be propagated to others. + fn submit_transaction(&mut self, extrinsic: Vec) -> Result<(), ()>; } #[cfg(feature = "std")] sp_externalities::decl_extension! { - /// An externalities extension to submit transactions to the pool. - pub struct TransactionPoolExt(Box); + /// An externalities extension to submit transactions to the pool. + pub struct TransactionPoolExt(Box); } #[cfg(feature = "std")] impl TransactionPoolExt { - /// Create a new instance of `TransactionPoolExt`. - pub fn new(pool: O) -> Self { - Self(Box::new(pool)) - } + /// Create a new instance of `TransactionPoolExt`. + pub fn new(pool: O) -> Self { + Self(Box::new(pool)) + } } - #[cfg(test)] mod tests { - use super::*; - - #[test] - fn timestamp_ops() { - let t = Timestamp(5); - assert_eq!(t.add(Duration::from_millis(10)), Timestamp(15)); - assert_eq!(t.sub(Duration::from_millis(10)), Timestamp(0)); - assert_eq!(t.diff(&Timestamp(3)), Duration(2)); - } - - #[test] - fn capabilities() { - let none = Capabilities::none(); - let all = Capabilities::all(); - let some = Capabilities::from(&[Capability::Keystore, Capability::Randomness][..]); - - assert!(!none.has(Capability::Keystore)); - assert!(all.has(Capability::Keystore)); - assert!(some.has(Capability::Keystore)); - assert!(!none.has(Capability::TransactionPool)); - assert!(all.has(Capability::TransactionPool)); - assert!(!some.has(Capability::TransactionPool)); - } + use super::*; + + #[test] + fn timestamp_ops() { + let t = Timestamp(5); + assert_eq!(t.add(Duration::from_millis(10)), Timestamp(15)); + assert_eq!(t.sub(Duration::from_millis(10)), Timestamp(0)); + assert_eq!(t.diff(&Timestamp(3)), Duration(2)); + } + + #[test] + fn capabilities() { + let none = Capabilities::none(); + let all = Capabilities::all(); + let some = Capabilities::from(&[Capability::Keystore, Capability::Randomness][..]); + + assert!(!none.has(Capability::Keystore)); + assert!(all.has(Capability::Keystore)); + assert!(some.has(Capability::Keystore)); + assert!(!none.has(Capability::TransactionPool)); + assert!(all.has(Capability::TransactionPool)); + assert!(!some.has(Capability::TransactionPool)); + } } diff --git a/primitives/core/src/offchain/storage.rs b/primitives/core/src/offchain/storage.rs index 31b6423e5d..595352dfbb 100644 --- a/primitives/core/src/offchain/storage.rs +++ b/primitives/core/src/offchain/storage.rs @@ -16,45 +16,49 @@ //! In-memory implementation of offchain workers database. -use std::collections::hash_map::{HashMap, Entry}; use crate::offchain::OffchainStorage; +use std::collections::hash_map::{Entry, HashMap}; /// In-memory storage for offchain workers. #[derive(Debug, Clone, Default)] pub struct InMemOffchainStorage { - storage: HashMap, Vec>, + storage: HashMap, Vec>, } impl OffchainStorage for InMemOffchainStorage { - fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { - let key = prefix.iter().chain(key).cloned().collect(); - self.storage.insert(key, value.to_vec()); - } - - fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { - let key: Vec = prefix.iter().chain(key).cloned().collect(); - self.storage.get(&key).cloned() - } - - fn compare_and_set( - &mut self, - prefix: &[u8], - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8], - ) -> bool { - let key = prefix.iter().chain(key).cloned().collect(); - - match self.storage.entry(key) { - Entry::Vacant(entry) => if old_value.is_none() { - entry.insert(new_value.to_vec()); - true - } else { false }, - Entry::Occupied(ref mut entry) if Some(entry.get().as_slice()) == old_value => { - entry.insert(new_value.to_vec()); - true - }, - _ => false, - } - } + fn set(&mut self, prefix: &[u8], key: &[u8], value: &[u8]) { + let key = prefix.iter().chain(key).cloned().collect(); + self.storage.insert(key, value.to_vec()); + } + + fn get(&self, prefix: &[u8], key: &[u8]) -> Option> { + let key: Vec = prefix.iter().chain(key).cloned().collect(); + self.storage.get(&key).cloned() + } + + fn compare_and_set( + &mut self, + prefix: &[u8], + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + let key = prefix.iter().chain(key).cloned().collect(); + + match self.storage.entry(key) { + Entry::Vacant(entry) => { + if old_value.is_none() { + entry.insert(new_value.to_vec()); + true + } else { + false + } + } + Entry::Occupied(ref mut entry) if Some(entry.get().as_slice()) == old_value => { + entry.insert(new_value.to_vec()); + true + } + _ => false, + } + } } diff --git a/primitives/core/src/offchain/testing.rs b/primitives/core/src/offchain/testing.rs index f4faee6b02..64aa14d7c0 100644 --- a/primitives/core/src/offchain/testing.rs +++ b/primitives/core/src/offchain/testing.rs @@ -19,45 +19,35 @@ //! Namely all ExecutionExtensions that allow mocking //! the extra APIs. -use std::{ - collections::BTreeMap, - sync::Arc, -}; use crate::offchain::{ - self, - storage::InMemOffchainStorage, - HttpError, - HttpRequestId as RequestId, - HttpRequestStatus as RequestStatus, - Timestamp, - StorageKind, - OpaqueNetworkState, - TransactionPool, - OffchainStorage, + self, storage::InMemOffchainStorage, HttpError, HttpRequestId as RequestId, + HttpRequestStatus as RequestStatus, OffchainStorage, OpaqueNetworkState, StorageKind, + Timestamp, TransactionPool, }; use parking_lot::RwLock; +use std::{collections::BTreeMap, sync::Arc}; /// Pending request. #[derive(Debug, Default, PartialEq, Eq)] pub struct PendingRequest { - /// HTTP method - pub method: String, - /// URI - pub uri: String, - /// Encoded Metadata - pub meta: Vec, - /// Request headers - pub headers: Vec<(String, String)>, - /// Request body - pub body: Vec, - /// Has the request been sent already. - pub sent: bool, - /// Response body - pub response: Option>, - /// Number of bytes already read from the response body. - pub read: usize, - /// Response headers - pub response_headers: Vec<(String, String)>, + /// HTTP method + pub method: String, + /// URI + pub uri: String, + /// Encoded Metadata + pub meta: Vec, + /// Request headers + pub headers: Vec<(String, String)>, + /// Request body + pub body: Vec, + /// Has the request been sent already. + pub sent: bool, + /// Response body + pub response: Option>, + /// Number of bytes already read from the response body. + pub read: usize, + /// Response headers + pub response_headers: Vec<(String, String)>, } /// Internal state of the externalities. @@ -65,70 +55,73 @@ pub struct PendingRequest { /// This can be used in tests to respond or assert stuff about interactions. #[derive(Debug, Default)] pub struct OffchainState { - /// A list of pending requests. - pub requests: BTreeMap, - expected_requests: BTreeMap, - /// Persistent local storage - pub persistent_storage: InMemOffchainStorage, - /// Local storage - pub local_storage: InMemOffchainStorage, - /// Current timestamp (unix millis) - pub timestamp: u64, + /// A list of pending requests. + pub requests: BTreeMap, + expected_requests: BTreeMap, + /// Persistent local storage + pub persistent_storage: InMemOffchainStorage, + /// Local storage + pub local_storage: InMemOffchainStorage, + /// Current timestamp (unix millis) + pub timestamp: u64, } impl OffchainState { - /// Asserts that pending request has been submitted and fills it's response. - pub fn fulfill_pending_request( - &mut self, - id: u16, - expected: PendingRequest, - response: impl Into>, - response_headers: impl IntoIterator, - ) { - match self.requests.get_mut(&RequestId(id)) { - None => { - panic!("Missing pending request: {:?}.\n\nAll: {:?}", id, self.requests); - } - Some(req) => { - assert_eq!( - *req, - expected, - ); - req.response = Some(response.into()); - req.response_headers = response_headers.into_iter().collect(); - } - } - } - - fn fulfill_expected(&mut self, id: u16) { - if let Some(mut req) = self.expected_requests.remove(&RequestId(id)) { - let response = req.response.take().expect("Response checked while added."); - let headers = std::mem::replace(&mut req.response_headers, vec![]); - self.fulfill_pending_request(id, req, response, headers); - } - } - - /// Add expected HTTP request. - /// - /// This method can be used to initialize expected HTTP requests and their responses - /// before running the actual code that utilizes them (for instance before calling into runtime). - /// Expected request has to be fulfilled before this struct is dropped, - /// the `response` and `response_headers` fields will be used to return results to the callers. - pub fn expect_request(&mut self, id: u16, expected: PendingRequest) { - if expected.response.is_none() { - panic!("Expected request needs to have a response."); - } - self.expected_requests.insert(RequestId(id), expected); - } + /// Asserts that pending request has been submitted and fills it's response. + pub fn fulfill_pending_request( + &mut self, + id: u16, + expected: PendingRequest, + response: impl Into>, + response_headers: impl IntoIterator, + ) { + match self.requests.get_mut(&RequestId(id)) { + None => { + panic!( + "Missing pending request: {:?}.\n\nAll: {:?}", + id, self.requests + ); + } + Some(req) => { + assert_eq!(*req, expected,); + req.response = Some(response.into()); + req.response_headers = response_headers.into_iter().collect(); + } + } + } + + fn fulfill_expected(&mut self, id: u16) { + if let Some(mut req) = self.expected_requests.remove(&RequestId(id)) { + let response = req.response.take().expect("Response checked while added."); + let headers = std::mem::replace(&mut req.response_headers, vec![]); + self.fulfill_pending_request(id, req, response, headers); + } + } + + /// Add expected HTTP request. + /// + /// This method can be used to initialize expected HTTP requests and their responses + /// before running the actual code that utilizes them (for instance before calling into runtime). + /// Expected request has to be fulfilled before this struct is dropped, + /// the `response` and `response_headers` fields will be used to return results to the callers. + pub fn expect_request(&mut self, id: u16, expected: PendingRequest) { + if expected.response.is_none() { + panic!("Expected request needs to have a response."); + } + self.expected_requests.insert(RequestId(id), expected); + } } impl Drop for OffchainState { - fn drop(&mut self) { - // If we panic! while we are already in a panic, the test dies with an illegal instruction. - if !self.expected_requests.is_empty() && !std::thread::panicking() { - panic!("Unfulfilled expected requests: {:?}", self.expected_requests); - } - } + fn drop(&mut self) { + // If we panic! while we are already in a panic, the test dies with an illegal instruction. + if !self.expected_requests.is_empty() && !std::thread::panicking() { + panic!( + "Unfulfilled expected requests: {:?}", + self.expected_requests + ); + } + } } /// Implementation of offchain externalities used for tests. @@ -136,180 +129,198 @@ impl Drop for OffchainState { pub struct TestOffchainExt(pub Arc>); impl TestOffchainExt { - /// Create new `TestOffchainExt` and a reference to the internal state. - pub fn new() -> (Self, Arc>) { - let ext = Self::default(); - let state = ext.0.clone(); - (ext, state) - } + /// Create new `TestOffchainExt` and a reference to the internal state. + pub fn new() -> (Self, Arc>) { + let ext = Self::default(); + let state = ext.0.clone(); + (ext, state) + } } impl offchain::Externalities for TestOffchainExt { - fn is_validator(&self) -> bool { - true - } - - fn network_state(&self) -> Result { - Ok(OpaqueNetworkState { - peer_id: Default::default(), - external_addresses: vec![], - }) - } - - fn timestamp(&mut self) -> Timestamp { - Timestamp::from_unix_millis(self.0.read().timestamp) - } - - fn sleep_until(&mut self, _deadline: Timestamp) { - unimplemented!("not needed in tests so far") - } - - fn random_seed(&mut self) -> [u8; 32] { - unimplemented!("not needed in tests so far") - } - - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - let mut state = self.0.write(); - match kind { - StorageKind::LOCAL => &mut state.local_storage, - StorageKind::PERSISTENT => &mut state.persistent_storage, - }.set(b"", key, value); - } - - fn local_storage_compare_and_set( - &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option<&[u8]>, - new_value: &[u8] - ) -> bool { - let mut state = self.0.write(); - match kind { - StorageKind::LOCAL => &mut state.local_storage, - StorageKind::PERSISTENT => &mut state.persistent_storage, - }.compare_and_set(b"", key, old_value, new_value) - } - - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - let state = self.0.read(); - match kind { - StorageKind::LOCAL => &state.local_storage, - StorageKind::PERSISTENT => &state.persistent_storage, - }.get(b"", key) - } - - fn http_request_start(&mut self, method: &str, uri: &str, meta: &[u8]) -> Result { - let mut state = self.0.write(); - let id = RequestId(state.requests.len() as u16); - state.requests.insert(id.clone(), PendingRequest { - method: method.into(), - uri: uri.into(), - meta: meta.into(), - ..Default::default() - }); - Ok(id) - } - - fn http_request_add_header( - &mut self, - request_id: RequestId, - name: &str, - value: &str, - ) -> Result<(), ()> { - let mut state = self.0.write(); - if let Some(req) = state.requests.get_mut(&request_id) { - req.headers.push((name.into(), value.into())); - Ok(()) - } else { - Err(()) - } - } - - fn http_request_write_body( - &mut self, - request_id: RequestId, - chunk: &[u8], - _deadline: Option - ) -> Result<(), HttpError> { - let mut state = self.0.write(); - - let sent = { - let req = state.requests.get_mut(&request_id).ok_or(HttpError::IoError)?; - req.body.extend(chunk); - if chunk.is_empty() { - req.sent = true; - } - req.sent - }; - - if sent { - state.fulfill_expected(request_id.0); - } - - Ok(()) - } - - fn http_response_wait( - &mut self, - ids: &[RequestId], - _deadline: Option, - ) -> Vec { - let state = self.0.read(); - - ids.iter().map(|id| match state.requests.get(id) { - Some(req) if req.response.is_none() => - panic!("No `response` provided for request with id: {:?}", id), - None => RequestStatus::Invalid, - _ => RequestStatus::Finished(200), - }).collect() - } - - fn http_response_headers(&mut self, request_id: RequestId) -> Vec<(Vec, Vec)> { - let state = self.0.read(); - if let Some(req) = state.requests.get(&request_id) { - req.response_headers - .clone() - .into_iter() - .map(|(k, v)| (k.into_bytes(), v.into_bytes())) - .collect() - } else { - Default::default() - } - } - - fn http_response_read_body( - &mut self, - request_id: RequestId, - buffer: &mut [u8], - _deadline: Option - ) -> Result { - let mut state = self.0.write(); - if let Some(req) = state.requests.get_mut(&request_id) { - let response = req.response - .as_mut() - .expect(&format!("No response provided for request: {:?}", request_id)); - - if req.read >= response.len() { - // Remove the pending request as per spec. - state.requests.remove(&request_id); - Ok(0) - } else { - let read = std::cmp::min(buffer.len(), response[req.read..].len()); - buffer[0..read].copy_from_slice(&response[req.read..read]); - req.read += read; - Ok(read) - } - } else { - Err(HttpError::IoError) - } - } + fn is_validator(&self) -> bool { + true + } + + fn network_state(&self) -> Result { + Ok(OpaqueNetworkState { + peer_id: Default::default(), + external_addresses: vec![], + }) + } + + fn timestamp(&mut self) -> Timestamp { + Timestamp::from_unix_millis(self.0.read().timestamp) + } + + fn sleep_until(&mut self, _deadline: Timestamp) { + unimplemented!("not needed in tests so far") + } + + fn random_seed(&mut self) -> [u8; 32] { + unimplemented!("not needed in tests so far") + } + + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + let mut state = self.0.write(); + match kind { + StorageKind::LOCAL => &mut state.local_storage, + StorageKind::PERSISTENT => &mut state.persistent_storage, + } + .set(b"", key, value); + } + + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option<&[u8]>, + new_value: &[u8], + ) -> bool { + let mut state = self.0.write(); + match kind { + StorageKind::LOCAL => &mut state.local_storage, + StorageKind::PERSISTENT => &mut state.persistent_storage, + } + .compare_and_set(b"", key, old_value, new_value) + } + + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + let state = self.0.read(); + match kind { + StorageKind::LOCAL => &state.local_storage, + StorageKind::PERSISTENT => &state.persistent_storage, + } + .get(b"", key) + } + + fn http_request_start( + &mut self, + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { + let mut state = self.0.write(); + let id = RequestId(state.requests.len() as u16); + state.requests.insert( + id.clone(), + PendingRequest { + method: method.into(), + uri: uri.into(), + meta: meta.into(), + ..Default::default() + }, + ); + Ok(id) + } + + fn http_request_add_header( + &mut self, + request_id: RequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { + let mut state = self.0.write(); + if let Some(req) = state.requests.get_mut(&request_id) { + req.headers.push((name.into(), value.into())); + Ok(()) + } else { + Err(()) + } + } + + fn http_request_write_body( + &mut self, + request_id: RequestId, + chunk: &[u8], + _deadline: Option, + ) -> Result<(), HttpError> { + let mut state = self.0.write(); + + let sent = { + let req = state + .requests + .get_mut(&request_id) + .ok_or(HttpError::IoError)?; + req.body.extend(chunk); + if chunk.is_empty() { + req.sent = true; + } + req.sent + }; + + if sent { + state.fulfill_expected(request_id.0); + } + + Ok(()) + } + + fn http_response_wait( + &mut self, + ids: &[RequestId], + _deadline: Option, + ) -> Vec { + let state = self.0.read(); + + ids.iter() + .map(|id| match state.requests.get(id) { + Some(req) if req.response.is_none() => { + panic!("No `response` provided for request with id: {:?}", id) + } + None => RequestStatus::Invalid, + _ => RequestStatus::Finished(200), + }) + .collect() + } + + fn http_response_headers(&mut self, request_id: RequestId) -> Vec<(Vec, Vec)> { + let state = self.0.read(); + if let Some(req) = state.requests.get(&request_id) { + req.response_headers + .clone() + .into_iter() + .map(|(k, v)| (k.into_bytes(), v.into_bytes())) + .collect() + } else { + Default::default() + } + } + + fn http_response_read_body( + &mut self, + request_id: RequestId, + buffer: &mut [u8], + _deadline: Option, + ) -> Result { + let mut state = self.0.write(); + if let Some(req) = state.requests.get_mut(&request_id) { + let response = req.response.as_mut().expect(&format!( + "No response provided for request: {:?}", + request_id + )); + + if req.read >= response.len() { + // Remove the pending request as per spec. + state.requests.remove(&request_id); + Ok(0) + } else { + let read = std::cmp::min(buffer.len(), response[req.read..].len()); + buffer[0..read].copy_from_slice(&response[req.read..read]); + req.read += read; + Ok(read) + } + } else { + Err(HttpError::IoError) + } + } } /// The internal state of the fake transaction pool. #[derive(Default)] pub struct PoolState { - /// A vector of transactions submitted from the runtime. - pub transactions: Vec>, + /// A vector of transactions submitted from the runtime. + pub transactions: Vec>, } /// Implementation of transaction pool used for test. @@ -325,17 +336,17 @@ pub struct PoolState { pub struct TestTransactionPoolExt(Arc>); impl TestTransactionPoolExt { - /// Create new `TestTransactionPoolExt` and a reference to the internal state. - pub fn new() -> (Self, Arc>) { - let ext = Self::default(); - let state = ext.0.clone(); - (ext, state) - } + /// Create new `TestTransactionPoolExt` and a reference to the internal state. + pub fn new() -> (Self, Arc>) { + let ext = Self::default(); + let state = ext.0.clone(); + (ext, state) + } } impl TransactionPool for TestTransactionPoolExt { - fn submit_transaction(&mut self, extrinsic: Vec) -> Result<(), ()> { - self.0.write().transactions.push(extrinsic); - Ok(()) - } + fn submit_transaction(&mut self, extrinsic: Vec) -> Result<(), ()> { + self.0.write().transactions.push(extrinsic); + Ok(()) + } } diff --git a/primitives/core/src/sandbox.rs b/primitives/core/src/sandbox.rs index 73fbcfb572..b2c11dcc4d 100644 --- a/primitives/core/src/sandbox.rs +++ b/primitives/core/src/sandbox.rs @@ -16,50 +16,46 @@ //! Definition of a sandbox environment. -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::vec::Vec; /// Error error that can be returned from host function. -#[derive(Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Encode, Decode, crate::RuntimeDebug)] pub struct HostError; /// Describes an entity to define or import into the environment. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub enum ExternEntity { - /// Function that is specified by an index in a default table of - /// a module that creates the sandbox. - #[codec(index = "1")] - Function(u32), - - /// Linear memory that is specified by some identifier returned by sandbox - /// module upon creation new sandboxed memory. - #[codec(index = "2")] - Memory(u32), + /// Function that is specified by an index in a default table of + /// a module that creates the sandbox. + #[codec(index = "1")] + Function(u32), + + /// Linear memory that is specified by some identifier returned by sandbox + /// module upon creation new sandboxed memory. + #[codec(index = "2")] + Memory(u32), } /// An entry in a environment definition table. /// /// Each entry has a two-level name and description of an entity /// being defined. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub struct Entry { - /// Module name of which corresponding entity being defined. - pub module_name: Vec, - /// Field name in which corresponding entity being defined. - pub field_name: Vec, - /// External entity being defined. - pub entity: ExternEntity, + /// Module name of which corresponding entity being defined. + pub module_name: Vec, + /// Field name in which corresponding entity being defined. + pub field_name: Vec, + /// External entity being defined. + pub entity: ExternEntity, } /// Definition of runtime that could be used by sandboxed code. -#[derive(Clone, PartialEq, Eq, Encode, Decode)] -#[derive(crate::RuntimeDebug)] +#[derive(Clone, PartialEq, Eq, Encode, Decode, crate::RuntimeDebug)] pub struct EnvironmentDefinition { - /// Vector of all entries in the environment definition. - pub entries: Vec, + /// Vector of all entries in the environment definition. + pub entries: Vec, } /// Constant for specifying no limit when creating a sandboxed @@ -89,39 +85,33 @@ pub const ERR_EXECUTION: u32 = -3i32 as u32; #[cfg(test)] mod tests { - use super::*; - use std::fmt; - use codec::Codec; - - fn roundtrip(s: S) { - let encoded = s.encode(); - assert_eq!(S::decode(&mut &encoded[..]).unwrap(), s); - } - - #[test] - fn env_def_roundtrip() { - roundtrip(EnvironmentDefinition { - entries: vec![], - }); - - roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"kernel"[..].into(), - field_name: b"memory"[..].into(), - entity: ExternEntity::Memory(1337), - }, - ], - }); - - roundtrip(EnvironmentDefinition { - entries: vec![ - Entry { - module_name: b"env"[..].into(), - field_name: b"abort"[..].into(), - entity: ExternEntity::Function(228), - }, - ], - }); - } + use super::*; + use codec::Codec; + use std::fmt; + + fn roundtrip(s: S) { + let encoded = s.encode(); + assert_eq!(S::decode(&mut &encoded[..]).unwrap(), s); + } + + #[test] + fn env_def_roundtrip() { + roundtrip(EnvironmentDefinition { entries: vec![] }); + + roundtrip(EnvironmentDefinition { + entries: vec![Entry { + module_name: b"kernel"[..].into(), + field_name: b"memory"[..].into(), + entity: ExternEntity::Memory(1337), + }], + }); + + roundtrip(EnvironmentDefinition { + entries: vec![Entry { + module_name: b"env"[..].into(), + field_name: b"abort"[..].into(), + entity: ExternEntity::Function(228), + }], + }); + } } diff --git a/primitives/core/src/sr25519.rs b/primitives/core/src/sr25519.rs index cadfb25776..11be50bc22 100644 --- a/primitives/core/src/sr25519.rs +++ b/primitives/core/src/sr25519.rs @@ -20,34 +20,35 @@ //! Note: `CHAIN_CODE_LENGTH` must be equal to `crate::crypto::JUNCTION_ID_LEN` //! for this to work. // end::description[] +#[cfg(feature = "std")] +use crate::crypto::Ss58Codec; #[cfg(feature = "full_crypto")] -use sp_std::vec::Vec; +use crate::crypto::{DeriveJunction, Infallible, Pair as TraitPair, SecretStringError}; +#[cfg(feature = "std")] +use bip39::{Language, Mnemonic, MnemonicType}; #[cfg(feature = "full_crypto")] -use schnorrkel::{signing_context, ExpansionMode, Keypair, SecretKey, MiniSecretKey, PublicKey, - derive::{Derivation, ChainCode, CHAIN_CODE_LENGTH} +use core::convert::TryFrom; +#[cfg(feature = "full_crypto")] +use schnorrkel::{ + derive::{ChainCode, Derivation, CHAIN_CODE_LENGTH}, + signing_context, ExpansionMode, Keypair, MiniSecretKey, PublicKey, SecretKey, }; #[cfg(feature = "full_crypto")] -use core::convert::TryFrom; +use sp_std::vec::Vec; #[cfg(feature = "std")] use substrate_bip39::mini_secret_from_entropy; -#[cfg(feature = "std")] -use bip39::{Mnemonic, Language, MnemonicType}; -#[cfg(feature = "full_crypto")] + use crate::crypto::{ - Pair as TraitPair, DeriveJunction, Infallible, SecretStringError + CryptoType, CryptoTypeId, CryptoTypePublicPair, Derive, Public as TraitPublic, UncheckedFrom, }; -#[cfg(feature = "std")] -use crate::crypto::Ss58Codec; - -use crate::crypto::{Public as TraitPublic, CryptoTypePublicPair, UncheckedFrom, CryptoType, Derive, CryptoTypeId}; use crate::hash::{H256, H512}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::ops::Deref; -#[cfg(feature = "std")] -use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; #[cfg(feature = "full_crypto")] use schnorrkel::keys::{MINI_SECRET_KEY_LENGTH, SECRET_KEY_LENGTH}; +#[cfg(feature = "std")] +use serde::{de, Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime_interface::pass_by::PassByInner; // signing context @@ -68,121 +69,132 @@ pub struct Pair(Keypair); #[cfg(feature = "full_crypto")] impl Clone for Pair { - fn clone(&self) -> Self { - Pair(schnorrkel::Keypair { - public: self.0.public, - secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) - .expect("key is always the correct size; qed") - }) - } + fn clone(&self) -> Self { + Pair(schnorrkel::Keypair { + public: self.0.public, + secret: schnorrkel::SecretKey::from_bytes(&self.0.secret.to_bytes()[..]) + .expect("key is always the correct size; qed"), + }) + } } impl AsRef<[u8; 32]> for Public { - fn as_ref(&self) -> &[u8; 32] { - &self.0 - } + fn as_ref(&self) -> &[u8; 32] { + &self.0 + } } impl AsRef<[u8]> for Public { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Public { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } impl Deref for Public { - type Target = [u8]; + type Target = [u8]; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } impl From for [u8; 32] { - fn from(x: Public) -> [u8; 32] { - x.0 - } + fn from(x: Public) -> [u8; 32] { + x.0 + } } impl From for H256 { - fn from(x: Public) -> H256 { - x.0.into() - } + fn from(x: Public) -> H256 { + x.0.into() + } } #[cfg(feature = "std")] impl std::str::FromStr for Public { - type Err = crate::crypto::PublicError; + type Err = crate::crypto::PublicError; - fn from_str(s: &str) -> Result { - Self::from_ss58check(s) - } + fn from_str(s: &str) -> Result { + Self::from_ss58check(s) + } } impl sp_std::convert::TryFrom<&[u8]> for Public { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() == 32 { - let mut inner = [0u8; 32]; - inner.copy_from_slice(data); - Ok(Public(inner)) - } else { - Err(()) - } - } + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() == 32 { + let mut inner = [0u8; 32]; + inner.copy_from_slice(data); + Ok(Public(inner)) + } else { + Err(()) + } + } } impl UncheckedFrom<[u8; 32]> for Public { - fn unchecked_from(x: [u8; 32]) -> Self { - Public::from_raw(x) - } + fn unchecked_from(x: [u8; 32]) -> Self { + Public::from_raw(x) + } } impl UncheckedFrom for Public { - fn unchecked_from(x: H256) -> Self { - Public::from_h256(x) - } + fn unchecked_from(x: H256) -> Self { + Public::from_h256(x) + } } #[cfg(feature = "std")] impl std::fmt::Display for Public { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "{}", self.to_ss58check()) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "{}", self.to_ss58check()) + } } impl sp_std::fmt::Debug for Public { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - let s = self.to_ss58check(); - write!(f, "{} ({}...)", crate::hexdisplay::HexDisplay::from(&self.0), &s[0..8]) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + let s = self.to_ss58check(); + write!( + f, + "{} ({}...)", + crate::hexdisplay::HexDisplay::from(&self.0), + &s[0..8] + ) + } - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } #[cfg(feature = "std")] impl Serialize for Public { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - serializer.serialize_str(&self.to_ss58check()) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&self.to_ss58check()) + } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Public { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - Public::from_ss58check(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e))) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + Public::from_ss58check(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e))) + } } /// An Schnorrkel/Ristretto x25519 ("sr25519") signature. @@ -192,112 +204,118 @@ impl<'de> Deserialize<'de> for Public { pub struct Signature(pub [u8; 64]); impl sp_std::convert::TryFrom<&[u8]> for Signature { - type Error = (); - - fn try_from(data: &[u8]) -> Result { - if data.len() == 64 { - let mut inner = [0u8; 64]; - inner.copy_from_slice(data); - Ok(Signature(inner)) - } else { - Err(()) - } - } + type Error = (); + + fn try_from(data: &[u8]) -> Result { + if data.len() == 64 { + let mut inner = [0u8; 64]; + inner.copy_from_slice(data); + Ok(Signature(inner)) + } else { + Err(()) + } + } } #[cfg(feature = "std")] impl Serialize for Signature { - fn serialize(&self, serializer: S) -> Result where S: Serializer { - serializer.serialize_str(&hex::encode(self)) - } + fn serialize(&self, serializer: S) -> Result + where + S: Serializer, + { + serializer.serialize_str(&hex::encode(self)) + } } #[cfg(feature = "std")] impl<'de> Deserialize<'de> for Signature { - fn deserialize(deserializer: D) -> Result where D: Deserializer<'de> { - let signature_hex = hex::decode(&String::deserialize(deserializer)?) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?; - Ok(Signature::try_from(signature_hex.as_ref()) - .map_err(|e| de::Error::custom(format!("{:?}", e)))?) - } + fn deserialize(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let signature_hex = hex::decode(&String::deserialize(deserializer)?) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?; + Ok(Signature::try_from(signature_hex.as_ref()) + .map_err(|e| de::Error::custom(format!("{:?}", e)))?) + } } impl Clone for Signature { - fn clone(&self) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(&self.0[..]); - Signature(r) - } + fn clone(&self) -> Self { + let mut r = [0u8; 64]; + r.copy_from_slice(&self.0[..]); + Signature(r) + } } impl Default for Signature { - fn default() -> Self { - Signature([0u8; 64]) - } + fn default() -> Self { + Signature([0u8; 64]) + } } impl PartialEq for Signature { - fn eq(&self, b: &Self) -> bool { - self.0[..] == b.0[..] - } + fn eq(&self, b: &Self) -> bool { + self.0[..] == b.0[..] + } } impl Eq for Signature {} impl From for [u8; 64] { - fn from(v: Signature) -> [u8; 64] { - v.0 - } + fn from(v: Signature) -> [u8; 64] { + v.0 + } } impl From for H512 { - fn from(v: Signature) -> H512 { - H512::from(v.0) - } + fn from(v: Signature) -> H512 { + H512::from(v.0) + } } impl AsRef<[u8; 64]> for Signature { - fn as_ref(&self) -> &[u8; 64] { - &self.0 - } + fn as_ref(&self) -> &[u8; 64] { + &self.0 + } } impl AsRef<[u8]> for Signature { - fn as_ref(&self) -> &[u8] { - &self.0[..] - } + fn as_ref(&self) -> &[u8] { + &self.0[..] + } } impl AsMut<[u8]> for Signature { - fn as_mut(&mut self) -> &mut [u8] { - &mut self.0[..] - } + fn as_mut(&mut self) -> &mut [u8] { + &mut self.0[..] + } } #[cfg(feature = "full_crypto")] impl From for Signature { - fn from(s: schnorrkel::Signature) -> Signature { - Signature(s.to_bytes()) - } + fn from(s: schnorrkel::Signature) -> Signature { + Signature(s.to_bytes()) + } } impl sp_std::fmt::Debug for Signature { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "{}", crate::hexdisplay::HexDisplay::from(&self.0)) + } - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } #[cfg(feature = "full_crypto")] impl sp_std::hash::Hash for Signature { - fn hash(&self, state: &mut H) { - sp_std::hash::Hash::hash(&self.0[..], state); - } + fn hash(&self, state: &mut H) { + sp_std::hash::Hash::hash(&self.0[..], state); + } } /// A localized signature also contains sender information. @@ -305,92 +323,92 @@ impl sp_std::hash::Hash for Signature { #[cfg(feature = "std")] #[derive(PartialEq, Eq, Clone, Debug)] pub struct LocalizedSignature { - /// The signer of the signature. - pub signer: Public, - /// The signature itself. - pub signature: Signature, + /// The signer of the signature. + pub signer: Public, + /// The signature itself. + pub signature: Signature, } impl Signature { - /// A new instance from the given 64-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use - /// it if you are certain that the array actually is a signature, or if you - /// immediately verify the signature. All functions that verify signatures - /// will fail if the `Signature` is not actually a valid signature. - pub fn from_raw(data: [u8; 64]) -> Signature { - Signature(data) - } - - /// A new instance from the given slice that should be 64 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 64]; - r.copy_from_slice(data); - Signature(r) - } - - /// A new instance from an H512. - /// - /// NOTE: No checking goes on to ensure this is a real signature. Only use it if - /// you are certain that the array actually is a signature. GIGO! - pub fn from_h512(v: H512) -> Signature { - Signature(v.into()) - } + /// A new instance from the given 64-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use + /// it if you are certain that the array actually is a signature, or if you + /// immediately verify the signature. All functions that verify signatures + /// will fail if the `Signature` is not actually a valid signature. + pub fn from_raw(data: [u8; 64]) -> Signature { + Signature(data) + } + + /// A new instance from the given slice that should be 64 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 64]; + r.copy_from_slice(data); + Signature(r) + } + + /// A new instance from an H512. + /// + /// NOTE: No checking goes on to ensure this is a real signature. Only use it if + /// you are certain that the array actually is a signature. GIGO! + pub fn from_h512(v: H512) -> Signature { + Signature(v.into()) + } } impl Derive for Public { - /// Derive a child key from a series of given junctions. - /// - /// `None` if there are any hard junctions in there. - #[cfg(feature = "std")] - fn derive>(&self, path: Iter) -> Option { - let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; - for j in path { - match j { - DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, - DeriveJunction::Hard(_cc) => return None, - } - } - Some(Self(acc.to_bytes())) - } + /// Derive a child key from a series of given junctions. + /// + /// `None` if there are any hard junctions in there. + #[cfg(feature = "std")] + fn derive>(&self, path: Iter) -> Option { + let mut acc = PublicKey::from_bytes(self.as_ref()).ok()?; + for j in path { + match j { + DeriveJunction::Soft(cc) => acc = acc.derived_key_simple(ChainCode(cc), &[]).0, + DeriveJunction::Hard(_cc) => return None, + } + } + Some(Self(acc.to_bytes())) + } } impl Public { - /// A new instance from the given 32-byte `data`. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_raw(data: [u8; 32]) -> Self { - Public(data) - } - - /// A new instance from an H256. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - pub fn from_h256(x: H256) -> Self { - Public(x.into()) - } - - /// Return a slice filled with raw data. - pub fn as_array_ref(&self) -> &[u8; 32] { - self.as_ref() - } + /// A new instance from the given 32-byte `data`. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_raw(data: [u8; 32]) -> Self { + Public(data) + } + + /// A new instance from an H256. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + pub fn from_h256(x: H256) -> Self { + Public(x.into()) + } + + /// Return a slice filled with raw data. + pub fn as_array_ref(&self) -> &[u8; 32] { + self.as_ref() + } } impl TraitPublic for Public { - /// A new instance from the given slice that should be 32 bytes long. - /// - /// NOTE: No checking goes on to ensure this is a real public key. Only use it if - /// you are certain that the array actually is a pubkey. GIGO! - fn from_slice(data: &[u8]) -> Self { - let mut r = [0u8; 32]; - r.copy_from_slice(data); - Public(r) - } + /// A new instance from the given slice that should be 32 bytes long. + /// + /// NOTE: No checking goes on to ensure this is a real public key. Only use it if + /// you are certain that the array actually is a pubkey. GIGO! + fn from_slice(data: &[u8]) -> Self { + let mut r = [0u8; 32]; + r.copy_from_slice(data); + Public(r) + } } impl From for CryptoTypePublicPair { @@ -407,43 +425,45 @@ impl From<&Public> for CryptoTypePublicPair { #[cfg(feature = "std")] impl From for Pair { - fn from(sec: MiniSecretKey) -> Pair { - Pair(sec.expand_to_keypair(ExpansionMode::Ed25519)) - } + fn from(sec: MiniSecretKey) -> Pair { + Pair(sec.expand_to_keypair(ExpansionMode::Ed25519)) + } } #[cfg(feature = "std")] impl From for Pair { - fn from(sec: SecretKey) -> Pair { - Pair(Keypair::from(sec)) - } + fn from(sec: SecretKey) -> Pair { + Pair(Keypair::from(sec)) + } } #[cfg(feature = "full_crypto")] impl From for Pair { - fn from(p: schnorrkel::Keypair) -> Pair { - Pair(p) - } + fn from(p: schnorrkel::Keypair) -> Pair { + Pair(p) + } } #[cfg(feature = "full_crypto")] impl From for schnorrkel::Keypair { - fn from(p: Pair) -> schnorrkel::Keypair { - p.0 - } + fn from(p: Pair) -> schnorrkel::Keypair { + p.0 + } } #[cfg(feature = "full_crypto")] impl AsRef for Pair { - fn as_ref(&self) -> &schnorrkel::Keypair { - &self.0 - } + fn as_ref(&self) -> &schnorrkel::Keypair { + &self.0 + } } /// Derive a single hard junction. #[cfg(feature = "full_crypto")] fn derive_hard_junction(secret: &SecretKey, cc: &[u8; CHAIN_CODE_LENGTH]) -> MiniSecretKey { - secret.hard_derive_mini_secret_key(Some(ChainCode(cc.clone())), b"").0 + secret + .hard_derive_mini_secret_key(Some(ChainCode(cc.clone())), b"") + .0 } /// The raw secret seed, which can be used to recreate the `Pair`. @@ -452,163 +472,171 @@ type Seed = [u8; MINI_SECRET_KEY_LENGTH]; #[cfg(feature = "full_crypto")] impl TraitPair for Pair { - type Public = Public; - type Seed = Seed; - type Signature = Signature; - type DeriveError = Infallible; - - /// Make a new key pair from raw secret seed material. - /// - /// This is generated using schnorrkel's Mini-Secret-Keys. - /// - /// A MiniSecretKey is literally what Ed25519 calls a SecretKey, which is just 32 random bytes. - fn from_seed(seed: &Seed) -> Pair { - Self::from_seed_slice(&seed[..]) - .expect("32 bytes can always build a key; qed") - } - - /// Get the public key. - fn public(&self) -> Public { - let mut pk = [0u8; 32]; - pk.copy_from_slice(&self.0.public.to_bytes()); - Public(pk) - } - - /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it - /// will return `None`. - /// - /// You should never need to use this; generate(), generate_with_phrase(), from_phrase() - fn from_seed_slice(seed: &[u8]) -> Result { - match seed.len() { - MINI_SECRET_KEY_LENGTH => { - Ok(Pair( - MiniSecretKey::from_bytes(seed) - .map_err(|_| SecretStringError::InvalidSeed)? - .expand_to_keypair(ExpansionMode::Ed25519) - )) - } - SECRET_KEY_LENGTH => { - Ok(Pair( - SecretKey::from_bytes(seed) - .map_err(|_| SecretStringError::InvalidSeed)? - .to_keypair() - )) - } - _ => Err(SecretStringError::InvalidSeedLength) - } - } - #[cfg(feature = "std")] - fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { - let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); - let phrase = mnemonic.phrase(); - let (pair, seed) = Self::from_phrase(phrase, password) - .expect("All phrases generated by Mnemonic are valid; qed"); - ( - pair, - phrase.to_owned(), - seed, - ) - } - #[cfg(feature = "std")] - fn from_phrase(phrase: &str, password: Option<&str>) -> Result<(Pair, Seed), SecretStringError> { - Mnemonic::from_phrase(phrase, Language::English) - .map_err(|_| SecretStringError::InvalidPhrase) - .map(|m| Self::from_entropy(m.entropy(), password)) - } - - fn derive>(&self, - path: Iter, - seed: Option, - ) -> Result<(Pair, Option), Self::DeriveError> { - let seed = if let Some(s) = seed { - if let Ok(msk) = MiniSecretKey::from_bytes(&s) { - if msk.expand(ExpansionMode::Ed25519) == self.0.secret { - Some(msk) - } else { None } - } else { None } - } else { None }; - let init = self.0.secret.clone(); - let (result, seed) = path.fold((init, seed), |(acc, acc_seed), j| match (j, acc_seed) { - (DeriveJunction::Soft(cc), _) => - (acc.derived_key_simple(ChainCode(cc), &[]).0, None), - (DeriveJunction::Hard(cc), maybe_seed) => { - let seed = derive_hard_junction(&acc, &cc); - (seed.expand(ExpansionMode::Ed25519), maybe_seed.map(|_| seed)) - } - }); - Ok((Self(result.into()), seed.map(|s| MiniSecretKey::to_bytes(&s)))) - } - - fn sign(&self, message: &[u8]) -> Signature { - let context = signing_context(SIGNING_CTX); - self.0.sign(context.bytes(message)).into() - } - - fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { - Self::verify_weak(&sig.0[..], message, pubkey) - } - - fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { - let signature = match schnorrkel::Signature::from_bytes(sig) { - Ok(signature) => signature, - Err(_) => return false, - }; - - let pub_key = match PublicKey::from_bytes(pubkey.as_ref()) { - Ok(pub_key) => pub_key, - Err(_) => return false, - }; - - pub_key.verify_simple(SIGNING_CTX, message.as_ref(), &signature).is_ok() - } - - fn to_raw_vec(&self) -> Vec { - self.0.secret.to_bytes().to_vec() - } + type Public = Public; + type Seed = Seed; + type Signature = Signature; + type DeriveError = Infallible; + + /// Make a new key pair from raw secret seed material. + /// + /// This is generated using schnorrkel's Mini-Secret-Keys. + /// + /// A MiniSecretKey is literally what Ed25519 calls a SecretKey, which is just 32 random bytes. + fn from_seed(seed: &Seed) -> Pair { + Self::from_seed_slice(&seed[..]).expect("32 bytes can always build a key; qed") + } + + /// Get the public key. + fn public(&self) -> Public { + let mut pk = [0u8; 32]; + pk.copy_from_slice(&self.0.public.to_bytes()); + Public(pk) + } + + /// Make a new key pair from secret seed material. The slice must be 32 bytes long or it + /// will return `None`. + /// + /// You should never need to use this; generate(), generate_with_phrase(), from_phrase() + fn from_seed_slice(seed: &[u8]) -> Result { + match seed.len() { + MINI_SECRET_KEY_LENGTH => Ok(Pair( + MiniSecretKey::from_bytes(seed) + .map_err(|_| SecretStringError::InvalidSeed)? + .expand_to_keypair(ExpansionMode::Ed25519), + )), + SECRET_KEY_LENGTH => Ok(Pair( + SecretKey::from_bytes(seed) + .map_err(|_| SecretStringError::InvalidSeed)? + .to_keypair(), + )), + _ => Err(SecretStringError::InvalidSeedLength), + } + } + #[cfg(feature = "std")] + fn generate_with_phrase(password: Option<&str>) -> (Pair, String, Seed) { + let mnemonic = Mnemonic::new(MnemonicType::Words12, Language::English); + let phrase = mnemonic.phrase(); + let (pair, seed) = Self::from_phrase(phrase, password) + .expect("All phrases generated by Mnemonic are valid; qed"); + (pair, phrase.to_owned(), seed) + } + #[cfg(feature = "std")] + fn from_phrase( + phrase: &str, + password: Option<&str>, + ) -> Result<(Pair, Seed), SecretStringError> { + Mnemonic::from_phrase(phrase, Language::English) + .map_err(|_| SecretStringError::InvalidPhrase) + .map(|m| Self::from_entropy(m.entropy(), password)) + } + + fn derive>( + &self, + path: Iter, + seed: Option, + ) -> Result<(Pair, Option), Self::DeriveError> { + let seed = if let Some(s) = seed { + if let Ok(msk) = MiniSecretKey::from_bytes(&s) { + if msk.expand(ExpansionMode::Ed25519) == self.0.secret { + Some(msk) + } else { + None + } + } else { + None + } + } else { + None + }; + let init = self.0.secret.clone(); + let (result, seed) = path.fold((init, seed), |(acc, acc_seed), j| match (j, acc_seed) { + (DeriveJunction::Soft(cc), _) => (acc.derived_key_simple(ChainCode(cc), &[]).0, None), + (DeriveJunction::Hard(cc), maybe_seed) => { + let seed = derive_hard_junction(&acc, &cc); + ( + seed.expand(ExpansionMode::Ed25519), + maybe_seed.map(|_| seed), + ) + } + }); + Ok(( + Self(result.into()), + seed.map(|s| MiniSecretKey::to_bytes(&s)), + )) + } + + fn sign(&self, message: &[u8]) -> Signature { + let context = signing_context(SIGNING_CTX); + self.0.sign(context.bytes(message)).into() + } + + fn verify>(sig: &Self::Signature, message: M, pubkey: &Self::Public) -> bool { + Self::verify_weak(&sig.0[..], message, pubkey) + } + + fn verify_weak, M: AsRef<[u8]>>(sig: &[u8], message: M, pubkey: P) -> bool { + let signature = match schnorrkel::Signature::from_bytes(sig) { + Ok(signature) => signature, + Err(_) => return false, + }; + + let pub_key = match PublicKey::from_bytes(pubkey.as_ref()) { + Ok(pub_key) => pub_key, + Err(_) => return false, + }; + + pub_key + .verify_simple(SIGNING_CTX, message.as_ref(), &signature) + .is_ok() + } + + fn to_raw_vec(&self) -> Vec { + self.0.secret.to_bytes().to_vec() + } } #[cfg(feature = "std")] impl Pair { - /// Make a new key pair from binary data derived from a valid seed phrase. - /// - /// This uses a key derivation function to convert the entropy into a seed, then returns - /// the pair generated from it. - pub fn from_entropy(entropy: &[u8], password: Option<&str>) -> (Pair, Seed) { - let mini_key: MiniSecretKey = mini_secret_from_entropy(entropy, password.unwrap_or("")) - .expect("32 bytes can always build a key; qed"); - - let kp = mini_key.expand_to_keypair(ExpansionMode::Ed25519); - (Pair(kp), mini_key.to_bytes()) - } - - /// Verify a signature on a message. Returns `true` if the signature is good. - /// Supports old 0.1.1 deprecated signatures and should be used only for backward - /// compatibility. - pub fn verify_deprecated>(sig: &Signature, message: M, pubkey: &Public) -> bool { - // Match both schnorrkel 0.1.1 and 0.8.0+ signatures, supporting both wallets - // that have not been upgraded and those that have. - match PublicKey::from_bytes(pubkey.as_ref()) { - Ok(pk) => pk.verify_simple_preaudit_deprecated( - SIGNING_CTX, message.as_ref(), &sig.0[..], - ).is_ok(), - Err(_) => false, - } - } + /// Make a new key pair from binary data derived from a valid seed phrase. + /// + /// This uses a key derivation function to convert the entropy into a seed, then returns + /// the pair generated from it. + pub fn from_entropy(entropy: &[u8], password: Option<&str>) -> (Pair, Seed) { + let mini_key: MiniSecretKey = mini_secret_from_entropy(entropy, password.unwrap_or("")) + .expect("32 bytes can always build a key; qed"); + + let kp = mini_key.expand_to_keypair(ExpansionMode::Ed25519); + (Pair(kp), mini_key.to_bytes()) + } + + /// Verify a signature on a message. Returns `true` if the signature is good. + /// Supports old 0.1.1 deprecated signatures and should be used only for backward + /// compatibility. + pub fn verify_deprecated>(sig: &Signature, message: M, pubkey: &Public) -> bool { + // Match both schnorrkel 0.1.1 and 0.8.0+ signatures, supporting both wallets + // that have not been upgraded and those that have. + match PublicKey::from_bytes(pubkey.as_ref()) { + Ok(pk) => pk + .verify_simple_preaudit_deprecated(SIGNING_CTX, message.as_ref(), &sig.0[..]) + .is_ok(), + Err(_) => false, + } + } } impl CryptoType for Public { - #[cfg(feature = "full_crypto")] - type Pair = Pair; + #[cfg(feature = "full_crypto")] + type Pair = Pair; } impl CryptoType for Signature { - #[cfg(feature = "full_crypto")] - type Pair = Pair; + #[cfg(feature = "full_crypto")] + type Pair = Pair; } #[cfg(feature = "full_crypto")] impl CryptoType for Pair { - type Pair = Pair; + type Pair = Pair; } /// Batch verification. @@ -618,267 +646,299 @@ impl CryptoType for Pair { /// Returns `true` if all signatures are correct, `false` otherwise. #[cfg(feature = "std")] pub fn verify_batch( - messages: Vec<&[u8]>, - signatures: Vec<&Signature>, - pub_keys: Vec<&Public>, + messages: Vec<&[u8]>, + signatures: Vec<&Signature>, + pub_keys: Vec<&Public>, ) -> bool { - let mut sr_pub_keys = Vec::with_capacity(pub_keys.len()); - for pub_key in pub_keys { - match schnorrkel::PublicKey::from_bytes(pub_key.as_ref()) { - Ok(pk) => sr_pub_keys.push(pk), - Err(_) => return false, - }; - } - - let mut sr_signatures = Vec::with_capacity(signatures.len()); - for signature in signatures { - match schnorrkel::Signature::from_bytes(signature.as_ref()) { - Ok(s) => sr_signatures.push(s), - Err(_) => return false - }; - } - - let mut messages: Vec = messages.into_iter().map( - |msg| signing_context(SIGNING_CTX).bytes(msg) - ).collect(); - - schnorrkel::verify_batch( - &mut messages, - &sr_signatures, - &sr_pub_keys, - true, - ).is_ok() + let mut sr_pub_keys = Vec::with_capacity(pub_keys.len()); + for pub_key in pub_keys { + match schnorrkel::PublicKey::from_bytes(pub_key.as_ref()) { + Ok(pk) => sr_pub_keys.push(pk), + Err(_) => return false, + }; + } + + let mut sr_signatures = Vec::with_capacity(signatures.len()); + for signature in signatures { + match schnorrkel::Signature::from_bytes(signature.as_ref()) { + Ok(s) => sr_signatures.push(s), + Err(_) => return false, + }; + } + + let mut messages: Vec = messages + .into_iter() + .map(|msg| signing_context(SIGNING_CTX).bytes(msg)) + .collect(); + + schnorrkel::verify_batch(&mut messages, &sr_signatures, &sr_pub_keys, true).is_ok() } #[cfg(test)] mod compatibility_test { - use super::*; - use crate::crypto::DEV_PHRASE; - use hex_literal::hex; - - // NOTE: tests to ensure addresses that are created with the `0.1.x` version (pre-audit) are - // still functional. - - #[test] - fn derive_soft_known_pair_should_work() { - let pair = Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).unwrap(); - // known address of DEV_PHRASE with 1.1 - let known = hex!("d6c71059dbbe9ad2b0ed3f289738b800836eb425544ce694825285b958ca755e"); - assert_eq!(pair.public().to_raw_vec(), known); - } - - #[test] - fn derive_hard_known_pair_should_work() { - let pair = Pair::from_string(&format!("{}//Alice", DEV_PHRASE), None).unwrap(); - // known address of DEV_PHRASE with 1.1 - let known = hex!("d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"); - assert_eq!(pair.public().to_raw_vec(), known); - } - - #[test] - fn verify_known_old_message_should_work() { - let public = Public::from_raw(hex!("b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918")); - // signature generated by the 1.1 version with the same ^^ public key. - let signature = Signature::from_raw(hex!( + use super::*; + use crate::crypto::DEV_PHRASE; + use hex_literal::hex; + + // NOTE: tests to ensure addresses that are created with the `0.1.x` version (pre-audit) are + // still functional. + + #[test] + fn derive_soft_known_pair_should_work() { + let pair = Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).unwrap(); + // known address of DEV_PHRASE with 1.1 + let known = hex!("d6c71059dbbe9ad2b0ed3f289738b800836eb425544ce694825285b958ca755e"); + assert_eq!(pair.public().to_raw_vec(), known); + } + + #[test] + fn derive_hard_known_pair_should_work() { + let pair = Pair::from_string(&format!("{}//Alice", DEV_PHRASE), None).unwrap(); + // known address of DEV_PHRASE with 1.1 + let known = hex!("d43593c715fdd31c61141abd04a99fd6822c8558854ccde39a5684e7a56da27d"); + assert_eq!(pair.public().to_raw_vec(), known); + } + + #[test] + fn verify_known_old_message_should_work() { + let public = Public::from_raw(hex!( + "b4bfa1f7a5166695eb75299fd1c4c03ea212871c342f2c5dfea0902b2c246918" + )); + // signature generated by the 1.1 version with the same ^^ public key. + let signature = Signature::from_raw(hex!( "5a9755f069939f45d96aaf125cf5ce7ba1db998686f87f2fb3cbdea922078741a73891ba265f70c31436e18a9acd14d189d73c12317ab6c313285cd938453202" )); - let message = b"Verifying that I am the owner of 5G9hQLdsKQswNPgB499DeA5PkFBbgkLPJWkkS6FAM6xGQ8xD. Hash: 221455a3\n"; - assert!(Pair::verify_deprecated(&signature, &message[..], &public)); - assert!(!Pair::verify(&signature, &message[..], &public)); - } + let message = b"Verifying that I am the owner of 5G9hQLdsKQswNPgB499DeA5PkFBbgkLPJWkkS6FAM6xGQ8xD. Hash: 221455a3\n"; + assert!(Pair::verify_deprecated(&signature, &message[..], &public)); + assert!(!Pair::verify(&signature, &message[..], &public)); + } } #[cfg(test)] mod test { - use super::*; - use crate::crypto::{Ss58Codec, DEV_PHRASE, DEV_ADDRESS}; - use hex_literal::hex; - use serde_json; - - #[test] - fn default_phrase_should_be_used() { - assert_eq!( - Pair::from_string("//Alice///password", None).unwrap().public(), - Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")).unwrap().public(), - ); - assert_eq!( - Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).as_ref().map(Pair::public), - Pair::from_string("/Alice", None).as_ref().map(Pair::public) - ); - } - - #[test] - fn default_address_should_be_used() { - assert_eq!( - Public::from_string(&format!("{}/Alice", DEV_ADDRESS)), - Public::from_string("/Alice") - ); - } - - #[test] - fn default_phrase_should_correspond_to_default_address() { - assert_eq!( - Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None).unwrap().public(), - Public::from_string(&format!("{}/Alice", DEV_ADDRESS)).unwrap(), - ); - assert_eq!( - Pair::from_string("/Alice", None).unwrap().public(), - Public::from_string("/Alice").unwrap() - ); - } - - #[test] - fn derive_soft_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let derive_1 = pair.derive(Some(DeriveJunction::soft(1)).into_iter(), None).unwrap().0; - let derive_1b = pair.derive(Some(DeriveJunction::soft(1)).into_iter(), None).unwrap().0; - let derive_2 = pair.derive(Some(DeriveJunction::soft(2)).into_iter(), None).unwrap().0; - assert_eq!(derive_1.public(), derive_1b.public()); - assert_ne!(derive_1.public(), derive_2.public()); - } - - #[test] - fn derive_hard_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let derive_1 = pair.derive(Some(DeriveJunction::hard(1)).into_iter(), None).unwrap().0; - let derive_1b = pair.derive(Some(DeriveJunction::hard(1)).into_iter(), None).unwrap().0; - let derive_2 = pair.derive(Some(DeriveJunction::hard(2)).into_iter(), None).unwrap().0; - assert_eq!(derive_1.public(), derive_1b.public()); - assert_ne!(derive_1.public(), derive_2.public()); - } - - #[test] - fn derive_soft_public_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let path = Some(DeriveJunction::soft(1)); - let pair_1 = pair.derive(path.clone().into_iter(), None).unwrap().0; - let public_1 = pair.public().derive(path.into_iter()).unwrap(); - assert_eq!(pair_1.public(), public_1); - } - - #[test] - fn derive_hard_public_should_fail() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let path = Some(DeriveJunction::hard(1)); - assert!(pair.public().derive(path.into_iter()).is_none()); - } - - #[test] - fn sr_test_vector_should_work() { - let pair = Pair::from_seed(&hex!( - "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" - )); - let public = pair.public(); - assert_eq!( - public, - Public::from_raw(hex!( - "44a996beb1eef7bdcab976ab6d2ca26104834164ecf28fb375600576fcc6eb0f" - )) - ); - let message = b""; - let signature = pair.sign(message); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn generated_pair_should_work() { - let (pair, _) = Pair::generate(); - let public = pair.public(); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn messed_signature_should_not_work() { - let (pair, _) = Pair::generate(); - let public = pair.public(); - let message = b"Signed payload"; - let Signature(mut bytes) = pair.sign(&message[..]); - bytes[0] = !bytes[0]; - bytes[2] = !bytes[2]; - let signature = Signature(bytes); - assert!(!Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn messed_message_should_not_work() { - let (pair, _) = Pair::generate(); - let public = pair.public(); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - assert!(!Pair::verify(&signature, &b"Something unimportant", &public)); - } - - #[test] - fn seeded_pair_should_work() { - let pair = Pair::from_seed(b"12345678901234567890123456789012"); - let public = pair.public(); - assert_eq!( - public, - Public::from_raw(hex!( - "741c08a06f41c596608f6774259bd9043304adfa5d3eea62760bd9be97634d63" - )) - ); - let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); - let signature = pair.sign(&message[..]); - assert!(Pair::verify(&signature, &message[..], &public)); - } - - #[test] - fn ss58check_roundtrip_works() { - let (pair, _) = Pair::generate(); - let public = pair.public(); - let s = public.to_ss58check(); - println!("Correct: {}", s); - let cmp = Public::from_ss58check(&s).unwrap(); - assert_eq!(cmp, public); - } - - #[test] - fn verify_from_old_wasm_works() { - // The values in this test case are compared to the output of `node-test.js` in schnorrkel-js. - // - // This is to make sure that the wasm library is compatible. - let pk = Pair::from_seed( - &hex!("0000000000000000000000000000000000000000000000000000000000000000") - ); - let public = pk.public(); - let js_signature = Signature::from_raw(hex!( + use super::*; + use crate::crypto::{Ss58Codec, DEV_ADDRESS, DEV_PHRASE}; + use hex_literal::hex; + use serde_json; + + #[test] + fn default_phrase_should_be_used() { + assert_eq!( + Pair::from_string("//Alice///password", None) + .unwrap() + .public(), + Pair::from_string(&format!("{}//Alice", DEV_PHRASE), Some("password")) + .unwrap() + .public(), + ); + assert_eq!( + Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None) + .as_ref() + .map(Pair::public), + Pair::from_string("/Alice", None).as_ref().map(Pair::public) + ); + } + + #[test] + fn default_address_should_be_used() { + assert_eq!( + Public::from_string(&format!("{}/Alice", DEV_ADDRESS)), + Public::from_string("/Alice") + ); + } + + #[test] + fn default_phrase_should_correspond_to_default_address() { + assert_eq!( + Pair::from_string(&format!("{}/Alice", DEV_PHRASE), None) + .unwrap() + .public(), + Public::from_string(&format!("{}/Alice", DEV_ADDRESS)).unwrap(), + ); + assert_eq!( + Pair::from_string("/Alice", None).unwrap().public(), + Public::from_string("/Alice").unwrap() + ); + } + + #[test] + fn derive_soft_should_work() { + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let derive_1 = pair + .derive(Some(DeriveJunction::soft(1)).into_iter(), None) + .unwrap() + .0; + let derive_1b = pair + .derive(Some(DeriveJunction::soft(1)).into_iter(), None) + .unwrap() + .0; + let derive_2 = pair + .derive(Some(DeriveJunction::soft(2)).into_iter(), None) + .unwrap() + .0; + assert_eq!(derive_1.public(), derive_1b.public()); + assert_ne!(derive_1.public(), derive_2.public()); + } + + #[test] + fn derive_hard_should_work() { + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let derive_1 = pair + .derive(Some(DeriveJunction::hard(1)).into_iter(), None) + .unwrap() + .0; + let derive_1b = pair + .derive(Some(DeriveJunction::hard(1)).into_iter(), None) + .unwrap() + .0; + let derive_2 = pair + .derive(Some(DeriveJunction::hard(2)).into_iter(), None) + .unwrap() + .0; + assert_eq!(derive_1.public(), derive_1b.public()); + assert_ne!(derive_1.public(), derive_2.public()); + } + + #[test] + fn derive_soft_public_should_work() { + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let path = Some(DeriveJunction::soft(1)); + let pair_1 = pair.derive(path.clone().into_iter(), None).unwrap().0; + let public_1 = pair.public().derive(path.into_iter()).unwrap(); + assert_eq!(pair_1.public(), public_1); + } + + #[test] + fn derive_hard_public_should_fail() { + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let path = Some(DeriveJunction::hard(1)); + assert!(pair.public().derive(path.into_iter()).is_none()); + } + + #[test] + fn sr_test_vector_should_work() { + let pair = Pair::from_seed(&hex!( + "9d61b19deffd5a60ba844af492ec2cc44449c5697b326919703bac031cae7f60" + )); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "44a996beb1eef7bdcab976ab6d2ca26104834164ecf28fb375600576fcc6eb0f" + )) + ); + let message = b""; + let signature = pair.sign(message); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn generated_pair_should_work() { + let (pair, _) = Pair::generate(); + let public = pair.public(); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn messed_signature_should_not_work() { + let (pair, _) = Pair::generate(); + let public = pair.public(); + let message = b"Signed payload"; + let Signature(mut bytes) = pair.sign(&message[..]); + bytes[0] = !bytes[0]; + bytes[2] = !bytes[2]; + let signature = Signature(bytes); + assert!(!Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn messed_message_should_not_work() { + let (pair, _) = Pair::generate(); + let public = pair.public(); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + assert!(!Pair::verify( + &signature, + &b"Something unimportant", + &public + )); + } + + #[test] + fn seeded_pair_should_work() { + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let public = pair.public(); + assert_eq!( + public, + Public::from_raw(hex!( + "741c08a06f41c596608f6774259bd9043304adfa5d3eea62760bd9be97634d63" + )) + ); + let message = hex!("2f8c6129d816cf51c374bc7f08c3e63ed156cf78aefb4a6550d97b87997977ee00000000000000000200d75a980182b10ab7d54bfed3c964073a0ee172f3daa62325af021a68f707511a4500000000000000"); + let signature = pair.sign(&message[..]); + assert!(Pair::verify(&signature, &message[..], &public)); + } + + #[test] + fn ss58check_roundtrip_works() { + let (pair, _) = Pair::generate(); + let public = pair.public(); + let s = public.to_ss58check(); + println!("Correct: {}", s); + let cmp = Public::from_ss58check(&s).unwrap(); + assert_eq!(cmp, public); + } + + #[test] + fn verify_from_old_wasm_works() { + // The values in this test case are compared to the output of `node-test.js` in schnorrkel-js. + // + // This is to make sure that the wasm library is compatible. + let pk = Pair::from_seed(&hex!( + "0000000000000000000000000000000000000000000000000000000000000000" + )); + let public = pk.public(); + let js_signature = Signature::from_raw(hex!( "28a854d54903e056f89581c691c1f7d2ff39f8f896c9e9c22475e60902cc2b3547199e0e91fa32902028f2ca2355e8cdd16cfe19ba5e8b658c94aa80f3b81a00" )); - assert!(Pair::verify_deprecated(&js_signature, b"SUBSTRATE", &public)); - assert!(!Pair::verify(&js_signature, b"SUBSTRATE", &public)); - } - - #[test] - fn signature_serialization_works() { - let pair = Pair::from_seed(b"12345678901234567890123456789012"); - let message = b"Something important"; - let signature = pair.sign(&message[..]); - let serialized_signature = serde_json::to_string(&signature).unwrap(); - // Signature is 64 bytes, so 128 chars + 2 quote chars - assert_eq!(serialized_signature.len(), 130); - let signature = serde_json::from_str(&serialized_signature).unwrap(); - assert!(Pair::verify(&signature, &message[..], &pair.public())); - } - - #[test] - fn signature_serialization_doesnt_panic() { - fn deserialize_signature(text: &str) -> Result { - Ok(serde_json::from_str(text)?) - } - assert!(deserialize_signature("Not valid json.").is_err()); - assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); - // Poorly-sized - assert!(deserialize_signature("\"abc123\"").is_err()); - } + assert!(Pair::verify_deprecated( + &js_signature, + b"SUBSTRATE", + &public + )); + assert!(!Pair::verify(&js_signature, b"SUBSTRATE", &public)); + } + + #[test] + fn signature_serialization_works() { + let pair = Pair::from_seed(b"12345678901234567890123456789012"); + let message = b"Something important"; + let signature = pair.sign(&message[..]); + let serialized_signature = serde_json::to_string(&signature).unwrap(); + // Signature is 64 bytes, so 128 chars + 2 quote chars + assert_eq!(serialized_signature.len(), 130); + let signature = serde_json::from_str(&serialized_signature).unwrap(); + assert!(Pair::verify(&signature, &message[..], &pair.public())); + } + + #[test] + fn signature_serialization_doesnt_panic() { + fn deserialize_signature(text: &str) -> Result { + Ok(serde_json::from_str(text)?) + } + assert!(deserialize_signature("Not valid json.").is_err()); + assert!(deserialize_signature("\"Not an actual signature.\"").is_err()); + // Poorly-sized + assert!(deserialize_signature("\"abc123\"").is_err()); + } } diff --git a/primitives/core/src/tasks.rs b/primitives/core/src/tasks.rs index 199a185e53..913bceaae3 100644 --- a/primitives/core/src/tasks.rs +++ b/primitives/core/src/tasks.rs @@ -25,32 +25,33 @@ use futures::{executor, task}; /// runtime host (implements `CloneableSpawn`). #[derive(Debug, Clone)] pub struct Executor { - pool: executor::ThreadPool, + pool: executor::ThreadPool, } impl Executor { - fn new() -> Self { - Self { - pool: executor::ThreadPool::builder().pool_size(1).create() - .expect("Failed to create task executor") - } - } + fn new() -> Self { + Self { + pool: executor::ThreadPool::builder() + .pool_size(1) + .create() + .expect("Failed to create task executor"), + } + } } impl task::Spawn for Executor { - fn spawn_obj(&self, future: task::FutureObj<'static, ()>) - -> Result<(), task::SpawnError> { - self.pool.spawn_obj(future) - } + fn spawn_obj(&self, future: task::FutureObj<'static, ()>) -> Result<(), task::SpawnError> { + self.pool.spawn_obj(future) + } } impl CloneableSpawn for Executor { - fn clone(&self) -> Box { - Box::new(Clone::clone(self)) - } + fn clone(&self) -> Box { + Box::new(Clone::clone(self)) + } } /// Create tasks executor. pub fn executor() -> Box { - Box::new(Executor::new()) -} \ No newline at end of file + Box::new(Executor::new()) +} diff --git a/primitives/core/src/testing.rs b/primitives/core/src/testing.rs index b5e6f4c7af..29d69f56e8 100644 --- a/primitives/core/src/testing.rs +++ b/primitives/core/src/testing.rs @@ -16,16 +16,16 @@ //! Types that should only be used for testing! -use crate::crypto::{KeyTypeId, CryptoTypePublicPair}; +use crate::crypto::{CryptoTypePublicPair, KeyTypeId}; #[cfg(feature = "std")] use crate::{ - crypto::{Pair, Public}, - ed25519, sr25519, - traits::BareCryptoStoreError + crypto::{Pair, Public}, + ed25519, sr25519, + traits::BareCryptoStoreError, }; +use codec::Encode; #[cfg(feature = "std")] use std::collections::HashSet; -use codec::Encode; /// Key type for generic Ed25519 key. pub const ED25519: KeyTypeId = KeyTypeId(*b"ed25"); /// Key type for generic Sr 25519 key. @@ -35,159 +35,182 @@ pub const SR25519: KeyTypeId = KeyTypeId(*b"sr25"); #[cfg(feature = "std")] #[derive(Default)] pub struct KeyStore { - /// `KeyTypeId` maps to public keys and public keys map to private keys. - keys: std::collections::HashMap, String>>, + /// `KeyTypeId` maps to public keys and public keys map to private keys. + keys: std::collections::HashMap, String>>, } #[cfg(feature = "std")] impl KeyStore { - /// Creates a new instance of `Self`. - pub fn new() -> crate::traits::BareCryptoStorePtr { - std::sync::Arc::new(parking_lot::RwLock::new(Self::default())) - } - - fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option { - self.keys.get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid")) - ) - } - - fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option { - self.keys.get(&id) - .and_then(|inner| - inner.get(pub_key.as_slice()) - .map(|s| ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid")) - ) - } - + /// Creates a new instance of `Self`. + pub fn new() -> crate::traits::BareCryptoStorePtr { + std::sync::Arc::new(parking_lot::RwLock::new(Self::default())) + } + + fn sr25519_key_pair(&self, id: KeyTypeId, pub_key: &sr25519::Public) -> Option { + self.keys.get(&id).and_then(|inner| { + inner.get(pub_key.as_slice()).map(|s| { + sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid") + }) + }) + } + + fn ed25519_key_pair(&self, id: KeyTypeId, pub_key: &ed25519::Public) -> Option { + self.keys.get(&id).and_then(|inner| { + inner.get(pub_key.as_slice()).map(|s| { + ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid") + }) + }) + } } #[cfg(feature = "std")] impl crate::traits::BareCryptoStore for KeyStore { - fn keys(&self, id: KeyTypeId) -> Result, BareCryptoStoreError> { - self.keys - .get(&id) - .map(|map| { - Ok(map.keys() - .fold(Vec::new(), |mut v, k| { - v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); - v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); - v - })) - }) - .unwrap_or(Ok(vec![])) - } - - fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys.get(&id) - .map(|keys| - keys.values() - .map(|s| sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid")) - .map(|p| p.public()) - .collect() - ) - .unwrap_or_default() - } - - fn sr25519_generate_new( - &mut self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result { - match seed { - Some(seed) => { - let pair = sr25519::Pair::from_string(seed, None) - .map_err(|_| BareCryptoStoreError::ValidationError("Generates an `sr25519` pair.".to_owned()))?; - self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); - Ok(pair.public()) - }, - None => { - let (pair, phrase, _) = sr25519::Pair::generate_with_phrase(None); - self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), phrase); - Ok(pair.public()) - } - } - } - - fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec { - self.keys.get(&id) - .map(|keys| - keys.values() - .map(|s| ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid")) - .map(|p| p.public()) - .collect() - ) - .unwrap_or_default() - } - - fn ed25519_generate_new( - &mut self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result { - match seed { - Some(seed) => { - let pair = ed25519::Pair::from_string(seed, None) - .map_err(|_| BareCryptoStoreError::ValidationError("Generates an `ed25519` pair.".to_owned()))?; - self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), seed.into()); - Ok(pair.public()) - }, - None => { - let (pair, phrase, _) = ed25519::Pair::generate_with_phrase(None); - self.keys.entry(id).or_default().insert(pair.public().to_raw_vec(), phrase); - Ok(pair.public()) - } - } - } - - fn insert_unknown(&mut self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { - self.keys.entry(id).or_default().insert(public.to_owned(), suri.to_string()); - Ok(()) - } - - fn password(&self) -> Option<&str> { - None - } - - fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { - public_keys.iter().all(|(k, t)| self.keys.get(&t).and_then(|s| s.get(k)).is_some()) - } - - fn supported_keys( - &self, - id: KeyTypeId, - keys: Vec, - ) -> std::result::Result, BareCryptoStoreError> { - let provided_keys = keys.into_iter().collect::>(); - let all_keys = self.keys(id)?.into_iter().collect::>(); - - Ok(provided_keys.intersection(&all_keys).cloned().collect()) - } - - fn sign_with( - &self, - id: KeyTypeId, - key: &CryptoTypePublicPair, - msg: &[u8], - ) -> Result, BareCryptoStoreError> { - match key.0 { - ed25519::CRYPTO_ID => { - let key_pair: ed25519::Pair = self - .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())) - .ok_or(BareCryptoStoreError::PairNotFound("ed25519".to_owned()))?; - return Ok(key_pair.sign(msg).encode()); - } - sr25519::CRYPTO_ID => { - let key_pair: sr25519::Pair = self - .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())) - .ok_or(BareCryptoStoreError::PairNotFound("sr25519".to_owned()))?; - return Ok(key_pair.sign(msg).encode()); - } - _ => Err(BareCryptoStoreError::KeyNotSupported(id)) - } - } + fn keys(&self, id: KeyTypeId) -> Result, BareCryptoStoreError> { + self.keys + .get(&id) + .map(|map| { + Ok(map.keys().fold(Vec::new(), |mut v, k| { + v.push(CryptoTypePublicPair(sr25519::CRYPTO_ID, k.clone())); + v.push(CryptoTypePublicPair(ed25519::CRYPTO_ID, k.clone())); + v + })) + }) + .unwrap_or(Ok(vec![])) + } + + fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec { + self.keys + .get(&id) + .map(|keys| { + keys.values() + .map(|s| { + sr25519::Pair::from_string(s, None).expect("`sr25519` seed slice is valid") + }) + .map(|p| p.public()) + .collect() + }) + .unwrap_or_default() + } + + fn sr25519_generate_new( + &mut self, + id: KeyTypeId, + seed: Option<&str>, + ) -> Result { + match seed { + Some(seed) => { + let pair = sr25519::Pair::from_string(seed, None).map_err(|_| { + BareCryptoStoreError::ValidationError("Generates an `sr25519` pair.".to_owned()) + })?; + self.keys + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), seed.into()); + Ok(pair.public()) + } + None => { + let (pair, phrase, _) = sr25519::Pair::generate_with_phrase(None); + self.keys + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), phrase); + Ok(pair.public()) + } + } + } + + fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec { + self.keys + .get(&id) + .map(|keys| { + keys.values() + .map(|s| { + ed25519::Pair::from_string(s, None).expect("`ed25519` seed slice is valid") + }) + .map(|p| p.public()) + .collect() + }) + .unwrap_or_default() + } + + fn ed25519_generate_new( + &mut self, + id: KeyTypeId, + seed: Option<&str>, + ) -> Result { + match seed { + Some(seed) => { + let pair = ed25519::Pair::from_string(seed, None).map_err(|_| { + BareCryptoStoreError::ValidationError("Generates an `ed25519` pair.".to_owned()) + })?; + self.keys + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), seed.into()); + Ok(pair.public()) + } + None => { + let (pair, phrase, _) = ed25519::Pair::generate_with_phrase(None); + self.keys + .entry(id) + .or_default() + .insert(pair.public().to_raw_vec(), phrase); + Ok(pair.public()) + } + } + } + + fn insert_unknown(&mut self, id: KeyTypeId, suri: &str, public: &[u8]) -> Result<(), ()> { + self.keys + .entry(id) + .or_default() + .insert(public.to_owned(), suri.to_string()); + Ok(()) + } + + fn password(&self) -> Option<&str> { + None + } + + fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool { + public_keys + .iter() + .all(|(k, t)| self.keys.get(&t).and_then(|s| s.get(k)).is_some()) + } + + fn supported_keys( + &self, + id: KeyTypeId, + keys: Vec, + ) -> std::result::Result, BareCryptoStoreError> { + let provided_keys = keys.into_iter().collect::>(); + let all_keys = self.keys(id)?.into_iter().collect::>(); + + Ok(provided_keys.intersection(&all_keys).cloned().collect()) + } + + fn sign_with( + &self, + id: KeyTypeId, + key: &CryptoTypePublicPair, + msg: &[u8], + ) -> Result, BareCryptoStoreError> { + match key.0 { + ed25519::CRYPTO_ID => { + let key_pair: ed25519::Pair = self + .ed25519_key_pair(id, &ed25519::Public::from_slice(key.1.as_slice())) + .ok_or(BareCryptoStoreError::PairNotFound("ed25519".to_owned()))?; + return Ok(key_pair.sign(msg).encode()); + } + sr25519::CRYPTO_ID => { + let key_pair: sr25519::Pair = self + .sr25519_key_pair(id, &sr25519::Public::from_slice(key.1.as_slice())) + .ok_or(BareCryptoStoreError::PairNotFound("sr25519".to_owned()))?; + return Ok(key_pair.sign(msg).encode()); + } + _ => Err(BareCryptoStoreError::KeyNotSupported(id)), + } + } } /// Macro for exporting functions from wasm in with the expected signature for using it with the @@ -292,38 +315,38 @@ macro_rules! wasm_export_functions { #[cfg(test)] mod tests { - use super::*; - use crate::sr25519; - use crate::testing::{ED25519, SR25519}; + use super::*; + use crate::sr25519; + use crate::testing::{ED25519, SR25519}; - #[test] - fn store_key_and_extract() { - let store = KeyStore::new(); + #[test] + fn store_key_and_extract() { + let store = KeyStore::new(); - let public = store.write() - .ed25519_generate_new(ED25519, None) - .expect("Generates key"); + let public = store + .write() + .ed25519_generate_new(ED25519, None) + .expect("Generates key"); - let public_keys = store.read().keys(ED25519).unwrap(); + let public_keys = store.read().keys(ED25519).unwrap(); - assert!(public_keys.contains(&public.into())); - } + assert!(public_keys.contains(&public.into())); + } - #[test] - fn store_unknown_and_extract_it() { - let store = KeyStore::new(); + #[test] + fn store_unknown_and_extract_it() { + let store = KeyStore::new(); - let secret_uri = "//Alice"; - let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); + let secret_uri = "//Alice"; + let key_pair = sr25519::Pair::from_string(secret_uri, None).expect("Generates key pair"); - store.write().insert_unknown( - SR25519, - secret_uri, - key_pair.public().as_ref(), - ).expect("Inserts unknown key"); + store + .write() + .insert_unknown(SR25519, secret_uri, key_pair.public().as_ref()) + .expect("Inserts unknown key"); - let public_keys = store.read().keys(SR25519).unwrap(); + let public_keys = store.read().keys(SR25519).unwrap(); - assert!(public_keys.contains(&key_pair.public().into())); - } + assert!(public_keys.contains(&key_pair.public().into())); + } } diff --git a/primitives/core/src/traits.rs b/primitives/core/src/traits.rs index 14839fb585..f058506659 100644 --- a/primitives/core/src/traits.rs +++ b/primitives/core/src/traits.rs @@ -17,15 +17,15 @@ //! Shareable Substrate traits. use crate::{ - crypto::{KeyTypeId, CryptoTypePublicPair}, - ed25519, sr25519, + crypto::{CryptoTypePublicPair, KeyTypeId}, + ed25519, sr25519, }; use std::{ - borrow::Cow, - fmt::{Debug, Display}, - panic::UnwindSafe, - sync::Arc, + borrow::Cow, + fmt::{Debug, Display}, + panic::UnwindSafe, + sync::Arc, }; pub use sp_externalities::{Externalities, ExternalitiesExt}; @@ -33,223 +33,227 @@ pub use sp_externalities::{Externalities, ExternalitiesExt}; /// BareCryptoStore error #[derive(Debug)] pub enum BareCryptoStoreError { - /// Public key type is not supported - KeyNotSupported(KeyTypeId), - /// Pair not found for public key and KeyTypeId - PairNotFound(String), - /// Validation error - ValidationError(String), - /// Keystore unavailable - Unavailable, - /// Programming errors - Other(String) + /// Public key type is not supported + KeyNotSupported(KeyTypeId), + /// Pair not found for public key and KeyTypeId + PairNotFound(String), + /// Validation error + ValidationError(String), + /// Keystore unavailable + Unavailable, + /// Programming errors + Other(String), } /// Something that generates, stores and provides access to keys. pub trait BareCryptoStore: Send + Sync { - /// Returns all sr25519 public keys for the given key type. - fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec; - /// Generate a new sr25519 key pair for the given key type and an optional seed. - /// - /// If the given seed is `Some(_)`, the key pair will only be stored in memory. - /// - /// Returns the public key of the generated key pair. - fn sr25519_generate_new( - &mut self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result; - /// Returns all ed25519 public keys for the given key type. - fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec; - /// Generate a new ed25519 key pair for the given key type and an optional seed. - /// - /// If the given seed is `Some(_)`, the key pair will only be stored in memory. - /// - /// Returns the public key of the generated key pair. - fn ed25519_generate_new( - &mut self, - id: KeyTypeId, - seed: Option<&str>, - ) -> Result; - - /// Insert a new key. This doesn't require any known of the crypto; but a public key must be - /// manually provided. - /// - /// Places it into the file system store. - /// - /// `Err` if there's some sort of weird filesystem error, but should generally be `Ok`. - fn insert_unknown(&mut self, _key_type: KeyTypeId, _suri: &str, _public: &[u8]) -> Result<(), ()>; - - /// Get the password for this store. - fn password(&self) -> Option<&str>; - /// Find intersection between provided keys and supported keys - /// - /// Provided a list of (CryptoTypeId,[u8]) pairs, this would return - /// a filtered set of public keys which are supported by the keystore. - fn supported_keys( - &self, - id: KeyTypeId, - keys: Vec - ) -> Result, BareCryptoStoreError>; - /// List all supported keys - /// - /// Returns a set of public keys the signer supports. - fn keys(&self, id: KeyTypeId) -> Result, BareCryptoStoreError>; - - /// Checks if the private keys for the given public key and key type combinations exist. - /// - /// Returns `true` iff all private keys could be found. - fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool; - - /// Sign with key - /// - /// Signs a message with the private key that matches - /// the public key passed. - /// - /// Returns the SCALE encoded signature if key is found & supported, - /// an error otherwise. - fn sign_with( - &self, - id: KeyTypeId, - key: &CryptoTypePublicPair, - msg: &[u8], - ) -> Result, BareCryptoStoreError>; - - /// Sign with any key - /// - /// Given a list of public keys, find the first supported key and - /// sign the provided message with that key. - /// - /// Returns a tuple of the used key and the signature - fn sign_with_any( - &self, - id: KeyTypeId, - keys: Vec, - msg: &[u8] - ) -> Result<(CryptoTypePublicPair, Vec), BareCryptoStoreError> { - if keys.len() == 1 { - return self.sign_with(id, &keys[0], msg).map(|s| (keys[0].clone(), s)); - } else { - for k in self.supported_keys(id, keys)? { - if let Ok(sign) = self.sign_with(id, &k, msg) { - return Ok((k, sign)); - } - } - } - Err(BareCryptoStoreError::KeyNotSupported(id)) - } - - /// Sign with all keys - /// - /// Provided a list of public keys, sign a message with - /// each key given that the key is supported. - /// - /// Returns a list of `Result`s each representing the signature of each key or - /// a BareCryptoStoreError for non-supported keys. - fn sign_with_all( - &self, - id: KeyTypeId, - keys: Vec, - msg: &[u8], - ) -> Result, BareCryptoStoreError>>, ()>{ - Ok(keys.iter().map(|k| self.sign_with(id, k, msg)).collect()) - } + /// Returns all sr25519 public keys for the given key type. + fn sr25519_public_keys(&self, id: KeyTypeId) -> Vec; + /// Generate a new sr25519 key pair for the given key type and an optional seed. + /// + /// If the given seed is `Some(_)`, the key pair will only be stored in memory. + /// + /// Returns the public key of the generated key pair. + fn sr25519_generate_new( + &mut self, + id: KeyTypeId, + seed: Option<&str>, + ) -> Result; + /// Returns all ed25519 public keys for the given key type. + fn ed25519_public_keys(&self, id: KeyTypeId) -> Vec; + /// Generate a new ed25519 key pair for the given key type and an optional seed. + /// + /// If the given seed is `Some(_)`, the key pair will only be stored in memory. + /// + /// Returns the public key of the generated key pair. + fn ed25519_generate_new( + &mut self, + id: KeyTypeId, + seed: Option<&str>, + ) -> Result; + + /// Insert a new key. This doesn't require any known of the crypto; but a public key must be + /// manually provided. + /// + /// Places it into the file system store. + /// + /// `Err` if there's some sort of weird filesystem error, but should generally be `Ok`. + fn insert_unknown( + &mut self, + _key_type: KeyTypeId, + _suri: &str, + _public: &[u8], + ) -> Result<(), ()>; + + /// Get the password for this store. + fn password(&self) -> Option<&str>; + /// Find intersection between provided keys and supported keys + /// + /// Provided a list of (CryptoTypeId,[u8]) pairs, this would return + /// a filtered set of public keys which are supported by the keystore. + fn supported_keys( + &self, + id: KeyTypeId, + keys: Vec, + ) -> Result, BareCryptoStoreError>; + /// List all supported keys + /// + /// Returns a set of public keys the signer supports. + fn keys(&self, id: KeyTypeId) -> Result, BareCryptoStoreError>; + + /// Checks if the private keys for the given public key and key type combinations exist. + /// + /// Returns `true` iff all private keys could be found. + fn has_keys(&self, public_keys: &[(Vec, KeyTypeId)]) -> bool; + + /// Sign with key + /// + /// Signs a message with the private key that matches + /// the public key passed. + /// + /// Returns the SCALE encoded signature if key is found & supported, + /// an error otherwise. + fn sign_with( + &self, + id: KeyTypeId, + key: &CryptoTypePublicPair, + msg: &[u8], + ) -> Result, BareCryptoStoreError>; + + /// Sign with any key + /// + /// Given a list of public keys, find the first supported key and + /// sign the provided message with that key. + /// + /// Returns a tuple of the used key and the signature + fn sign_with_any( + &self, + id: KeyTypeId, + keys: Vec, + msg: &[u8], + ) -> Result<(CryptoTypePublicPair, Vec), BareCryptoStoreError> { + if keys.len() == 1 { + return self + .sign_with(id, &keys[0], msg) + .map(|s| (keys[0].clone(), s)); + } else { + for k in self.supported_keys(id, keys)? { + if let Ok(sign) = self.sign_with(id, &k, msg) { + return Ok((k, sign)); + } + } + } + Err(BareCryptoStoreError::KeyNotSupported(id)) + } + + /// Sign with all keys + /// + /// Provided a list of public keys, sign a message with + /// each key given that the key is supported. + /// + /// Returns a list of `Result`s each representing the signature of each key or + /// a BareCryptoStoreError for non-supported keys. + fn sign_with_all( + &self, + id: KeyTypeId, + keys: Vec, + msg: &[u8], + ) -> Result, BareCryptoStoreError>>, ()> { + Ok(keys.iter().map(|k| self.sign_with(id, k, msg)).collect()) + } } /// A pointer to the key store. pub type BareCryptoStorePtr = Arc>; sp_externalities::decl_extension! { - /// The keystore extension to register/retrieve from the externalities. - pub struct KeystoreExt(BareCryptoStorePtr); + /// The keystore extension to register/retrieve from the externalities. + pub struct KeystoreExt(BareCryptoStorePtr); } /// Code execution engine. pub trait CodeExecutor: Sized + Send + Sync + CallInWasm + Clone + 'static { - /// Externalities error type. - type Error: Display + Debug + Send + 'static; - - /// Call a given method in the runtime. Returns a tuple of the result (either the output data - /// or an execution error) together with a `bool`, which is true if native execution was used. - fn call< - R: codec::Codec + PartialEq, - NC: FnOnce() -> Result + UnwindSafe, - >( - &self, - ext: &mut dyn Externalities, - runtime_code: &RuntimeCode, - method: &str, - data: &[u8], - use_native: bool, - native_call: Option, - ) -> (Result, Self::Error>, bool); + /// Externalities error type. + type Error: Display + Debug + Send + 'static; + + /// Call a given method in the runtime. Returns a tuple of the result (either the output data + /// or an execution error) together with a `bool`, which is true if native execution was used. + fn call Result + UnwindSafe>( + &self, + ext: &mut dyn Externalities, + runtime_code: &RuntimeCode, + method: &str, + data: &[u8], + use_native: bool, + native_call: Option, + ) -> (Result, Self::Error>, bool); } /// Something that can fetch the runtime `:code`. pub trait FetchRuntimeCode { - /// Fetch the runtime `:code`. - /// - /// If the `:code` could not be found/not available, `None` should be returned. - fn fetch_runtime_code<'a>(&'a self) -> Option>; + /// Fetch the runtime `:code`. + /// + /// If the `:code` could not be found/not available, `None` should be returned. + fn fetch_runtime_code<'a>(&'a self) -> Option>; } /// Wrapper to use a `u8` slice or `Vec` as [`FetchRuntimeCode`]. pub struct WrappedRuntimeCode<'a>(pub std::borrow::Cow<'a, [u8]>); impl<'a> FetchRuntimeCode for WrappedRuntimeCode<'a> { - fn fetch_runtime_code<'b>(&'b self) -> Option> { - Some(self.0.as_ref().into()) - } + fn fetch_runtime_code<'b>(&'b self) -> Option> { + Some(self.0.as_ref().into()) + } } /// Type that implements [`FetchRuntimeCode`] and always returns `None`. pub struct NoneFetchRuntimeCode; impl FetchRuntimeCode for NoneFetchRuntimeCode { - fn fetch_runtime_code<'a>(&'a self) -> Option> { - None - } + fn fetch_runtime_code<'a>(&'a self) -> Option> { + None + } } /// The Wasm code of a Substrate runtime. #[derive(Clone)] pub struct RuntimeCode<'a> { - /// The code fetcher that can be used to lazily fetch the code. - pub code_fetcher: &'a dyn FetchRuntimeCode, - /// The optional heap pages this `code` should be executed with. - /// - /// If `None` are given, the default value of the executor will be used. - pub heap_pages: Option, - /// The SCALE encoded hash of `code`. - /// - /// The hashing algorithm isn't that important, as long as all runtime - /// code instances use the same. - pub hash: Vec, + /// The code fetcher that can be used to lazily fetch the code. + pub code_fetcher: &'a dyn FetchRuntimeCode, + /// The optional heap pages this `code` should be executed with. + /// + /// If `None` are given, the default value of the executor will be used. + pub heap_pages: Option, + /// The SCALE encoded hash of `code`. + /// + /// The hashing algorithm isn't that important, as long as all runtime + /// code instances use the same. + pub hash: Vec, } impl<'a> PartialEq for RuntimeCode<'a> { - fn eq(&self, other: &Self) -> bool { - self.hash == other.hash - } + fn eq(&self, other: &Self) -> bool { + self.hash == other.hash + } } impl<'a> RuntimeCode<'a> { - /// Create an empty instance. - /// - /// This is only useful for tests that don't want to execute any code. - pub fn empty() -> Self { - Self { - code_fetcher: &NoneFetchRuntimeCode, - hash: Vec::new(), - heap_pages: None, - } - } + /// Create an empty instance. + /// + /// This is only useful for tests that don't want to execute any code. + pub fn empty() -> Self { + Self { + code_fetcher: &NoneFetchRuntimeCode, + hash: Vec::new(), + heap_pages: None, + } + } } impl<'a> FetchRuntimeCode for RuntimeCode<'a> { - fn fetch_runtime_code<'b>(&'b self) -> Option> { - self.code_fetcher.fetch_runtime_code() - } + fn fetch_runtime_code<'b>(&'b self) -> Option> { + self.code_fetcher.fetch_runtime_code() + } } /// Could not find the `:code` in the externalities while initializing the [`RuntimeCode`]. @@ -257,58 +261,58 @@ impl<'a> FetchRuntimeCode for RuntimeCode<'a> { pub struct CodeNotFound; impl std::fmt::Display for CodeNotFound { - fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { - write!(f, "the storage entry `:code` doesn't have any code") - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> Result<(), std::fmt::Error> { + write!(f, "the storage entry `:code` doesn't have any code") + } } /// Something that can call a method in a WASM blob. pub trait CallInWasm: Send + Sync { - /// Call the given `method` in the given `wasm_blob` using `call_data` (SCALE encoded arguments) - /// to decode the arguments for the method. - /// - /// Returns the SCALE encoded return value of the method. - /// - /// # Note - /// - /// If `code_hash` is `Some(_)` the `wasm_code` module and instance will be cached internally, - /// otherwise it is thrown away after the call. - fn call_in_wasm( - &self, - wasm_code: &[u8], - code_hash: Option>, - method: &str, - call_data: &[u8], - ext: &mut dyn Externalities, - ) -> Result, String>; + /// Call the given `method` in the given `wasm_blob` using `call_data` (SCALE encoded arguments) + /// to decode the arguments for the method. + /// + /// Returns the SCALE encoded return value of the method. + /// + /// # Note + /// + /// If `code_hash` is `Some(_)` the `wasm_code` module and instance will be cached internally, + /// otherwise it is thrown away after the call. + fn call_in_wasm( + &self, + wasm_code: &[u8], + code_hash: Option>, + method: &str, + call_data: &[u8], + ext: &mut dyn Externalities, + ) -> Result, String>; } sp_externalities::decl_extension! { - /// The call-in-wasm extension to register/retrieve from the externalities. - pub struct CallInWasmExt(Box); + /// The call-in-wasm extension to register/retrieve from the externalities. + pub struct CallInWasmExt(Box); } impl CallInWasmExt { - /// Creates a new instance of `Self`. - pub fn new(inner: T) -> Self { - Self(Box::new(inner)) - } + /// Creates a new instance of `Self`. + pub fn new(inner: T) -> Self { + Self(Box::new(inner)) + } } /// Something that can spawn tasks and also can be cloned. pub trait CloneableSpawn: futures::task::Spawn + Send + Sync { - /// Clone as heap-allocated handle. - fn clone(&self) -> Box; + /// Clone as heap-allocated handle. + fn clone(&self) -> Box; } sp_externalities::decl_extension! { - /// Task executor extension. - pub struct TaskExecutorExt(Box); + /// Task executor extension. + pub struct TaskExecutorExt(Box); } impl TaskExecutorExt { - /// New instance of task executor extension. - pub fn new(spawn_handle: Box) -> Self { - Self(spawn_handle) - } + /// New instance of task executor extension. + pub fn new(spawn_handle: Box) -> Self { + Self(spawn_handle) + } } diff --git a/primitives/core/src/u32_trait.rs b/primitives/core/src/u32_trait.rs index 975b4aa909..4924a3005d 100644 --- a/primitives/core/src/u32_trait.rs +++ b/primitives/core/src/u32_trait.rs @@ -18,226 +18,552 @@ /// A u32 value, wrapped in a trait because we don't yet have const generics. pub trait Value { - /// The actual value represented by the impl'ing type. - const VALUE: u32; + /// The actual value represented by the impl'ing type. + const VALUE: u32; } /// Type representing the value 0 for the `Value` trait. -pub struct _0; impl Value for _0 { const VALUE: u32 = 0; } +pub struct _0; +impl Value for _0 { + const VALUE: u32 = 0; +} /// Type representing the value 1 for the `Value` trait. -pub struct _1; impl Value for _1 { const VALUE: u32 = 1; } +pub struct _1; +impl Value for _1 { + const VALUE: u32 = 1; +} /// Type representing the value 2 for the `Value` trait. -pub struct _2; impl Value for _2 { const VALUE: u32 = 2; } +pub struct _2; +impl Value for _2 { + const VALUE: u32 = 2; +} /// Type representing the value 3 for the `Value` trait. -pub struct _3; impl Value for _3 { const VALUE: u32 = 3; } +pub struct _3; +impl Value for _3 { + const VALUE: u32 = 3; +} /// Type representing the value 4 for the `Value` trait. -pub struct _4; impl Value for _4 { const VALUE: u32 = 4; } +pub struct _4; +impl Value for _4 { + const VALUE: u32 = 4; +} /// Type representing the value 5 for the `Value` trait. -pub struct _5; impl Value for _5 { const VALUE: u32 = 5; } +pub struct _5; +impl Value for _5 { + const VALUE: u32 = 5; +} /// Type representing the value 6 for the `Value` trait. -pub struct _6; impl Value for _6 { const VALUE: u32 = 6; } +pub struct _6; +impl Value for _6 { + const VALUE: u32 = 6; +} /// Type representing the value 7 for the `Value` trait. -pub struct _7; impl Value for _7 { const VALUE: u32 = 7; } +pub struct _7; +impl Value for _7 { + const VALUE: u32 = 7; +} /// Type representing the value 8 for the `Value` trait. -pub struct _8; impl Value for _8 { const VALUE: u32 = 8; } +pub struct _8; +impl Value for _8 { + const VALUE: u32 = 8; +} /// Type representing the value 9 for the `Value` trait. -pub struct _9; impl Value for _9 { const VALUE: u32 = 9; } +pub struct _9; +impl Value for _9 { + const VALUE: u32 = 9; +} /// Type representing the value 10 for the `Value` trait. -pub struct _10; impl Value for _10 { const VALUE: u32 = 10; } +pub struct _10; +impl Value for _10 { + const VALUE: u32 = 10; +} /// Type representing the value 11 for the `Value` trait. -pub struct _11; impl Value for _11 { const VALUE: u32 = 11; } +pub struct _11; +impl Value for _11 { + const VALUE: u32 = 11; +} /// Type representing the value 12 for the `Value` trait. -pub struct _12; impl Value for _12 { const VALUE: u32 = 12; } +pub struct _12; +impl Value for _12 { + const VALUE: u32 = 12; +} /// Type representing the value 13 for the `Value` trait. -pub struct _13; impl Value for _13 { const VALUE: u32 = 13; } +pub struct _13; +impl Value for _13 { + const VALUE: u32 = 13; +} /// Type representing the value 14 for the `Value` trait. -pub struct _14; impl Value for _14 { const VALUE: u32 = 14; } +pub struct _14; +impl Value for _14 { + const VALUE: u32 = 14; +} /// Type representing the value 15 for the `Value` trait. -pub struct _15; impl Value for _15 { const VALUE: u32 = 15; } +pub struct _15; +impl Value for _15 { + const VALUE: u32 = 15; +} /// Type representing the value 16 for the `Value` trait. -pub struct _16; impl Value for _16 { const VALUE: u32 = 16; } +pub struct _16; +impl Value for _16 { + const VALUE: u32 = 16; +} /// Type representing the value 17 for the `Value` trait. -pub struct _17; impl Value for _17 { const VALUE: u32 = 17; } +pub struct _17; +impl Value for _17 { + const VALUE: u32 = 17; +} /// Type representing the value 18 for the `Value` trait. -pub struct _18; impl Value for _18 { const VALUE: u32 = 18; } +pub struct _18; +impl Value for _18 { + const VALUE: u32 = 18; +} /// Type representing the value 19 for the `Value` trait. -pub struct _19; impl Value for _19 { const VALUE: u32 = 19; } +pub struct _19; +impl Value for _19 { + const VALUE: u32 = 19; +} /// Type representing the value 20 for the `Value` trait. -pub struct _20; impl Value for _20 { const VALUE: u32 = 20; } +pub struct _20; +impl Value for _20 { + const VALUE: u32 = 20; +} /// Type representing the value 21 for the `Value` trait. -pub struct _21; impl Value for _21 { const VALUE: u32 = 21; } +pub struct _21; +impl Value for _21 { + const VALUE: u32 = 21; +} /// Type representing the value 22 for the `Value` trait. -pub struct _22; impl Value for _22 { const VALUE: u32 = 22; } +pub struct _22; +impl Value for _22 { + const VALUE: u32 = 22; +} /// Type representing the value 23 for the `Value` trait. -pub struct _23; impl Value for _23 { const VALUE: u32 = 23; } +pub struct _23; +impl Value for _23 { + const VALUE: u32 = 23; +} /// Type representing the value 24 for the `Value` trait. -pub struct _24; impl Value for _24 { const VALUE: u32 = 24; } +pub struct _24; +impl Value for _24 { + const VALUE: u32 = 24; +} /// Type representing the value 25 for the `Value` trait. -pub struct _25; impl Value for _25 { const VALUE: u32 = 25; } +pub struct _25; +impl Value for _25 { + const VALUE: u32 = 25; +} /// Type representing the value 26 for the `Value` trait. -pub struct _26; impl Value for _26 { const VALUE: u32 = 26; } +pub struct _26; +impl Value for _26 { + const VALUE: u32 = 26; +} /// Type representing the value 27 for the `Value` trait. -pub struct _27; impl Value for _27 { const VALUE: u32 = 27; } +pub struct _27; +impl Value for _27 { + const VALUE: u32 = 27; +} /// Type representing the value 28 for the `Value` trait. -pub struct _28; impl Value for _28 { const VALUE: u32 = 28; } +pub struct _28; +impl Value for _28 { + const VALUE: u32 = 28; +} /// Type representing the value 29 for the `Value` trait. -pub struct _29; impl Value for _29 { const VALUE: u32 = 29; } +pub struct _29; +impl Value for _29 { + const VALUE: u32 = 29; +} /// Type representing the value 30 for the `Value` trait. -pub struct _30; impl Value for _30 { const VALUE: u32 = 30; } +pub struct _30; +impl Value for _30 { + const VALUE: u32 = 30; +} /// Type representing the value 31 for the `Value` trait. -pub struct _31; impl Value for _31 { const VALUE: u32 = 31; } +pub struct _31; +impl Value for _31 { + const VALUE: u32 = 31; +} /// Type representing the value 32 for the `Value` trait. -pub struct _32; impl Value for _32 { const VALUE: u32 = 32; } +pub struct _32; +impl Value for _32 { + const VALUE: u32 = 32; +} /// Type representing the value 33 for the `Value` trait. -pub struct _33; impl Value for _33 { const VALUE: u32 = 33; } +pub struct _33; +impl Value for _33 { + const VALUE: u32 = 33; +} /// Type representing the value 34 for the `Value` trait. -pub struct _34; impl Value for _34 { const VALUE: u32 = 34; } +pub struct _34; +impl Value for _34 { + const VALUE: u32 = 34; +} /// Type representing the value 35 for the `Value` trait. -pub struct _35; impl Value for _35 { const VALUE: u32 = 35; } +pub struct _35; +impl Value for _35 { + const VALUE: u32 = 35; +} /// Type representing the value 36 for the `Value` trait. -pub struct _36; impl Value for _36 { const VALUE: u32 = 36; } +pub struct _36; +impl Value for _36 { + const VALUE: u32 = 36; +} /// Type representing the value 37 for the `Value` trait. -pub struct _37; impl Value for _37 { const VALUE: u32 = 37; } +pub struct _37; +impl Value for _37 { + const VALUE: u32 = 37; +} /// Type representing the value 38 for the `Value` trait. -pub struct _38; impl Value for _38 { const VALUE: u32 = 38; } +pub struct _38; +impl Value for _38 { + const VALUE: u32 = 38; +} /// Type representing the value 39 for the `Value` trait. -pub struct _39; impl Value for _39 { const VALUE: u32 = 39; } +pub struct _39; +impl Value for _39 { + const VALUE: u32 = 39; +} /// Type representing the value 40 for the `Value` trait. -pub struct _40; impl Value for _40 { const VALUE: u32 = 40; } +pub struct _40; +impl Value for _40 { + const VALUE: u32 = 40; +} /// Type representing the value 41 for the `Value` trait. -pub struct _41; impl Value for _41 { const VALUE: u32 = 41; } +pub struct _41; +impl Value for _41 { + const VALUE: u32 = 41; +} /// Type representing the value 42 for the `Value` trait. -pub struct _42; impl Value for _42 { const VALUE: u32 = 42; } +pub struct _42; +impl Value for _42 { + const VALUE: u32 = 42; +} /// Type representing the value 43 for the `Value` trait. -pub struct _43; impl Value for _43 { const VALUE: u32 = 43; } +pub struct _43; +impl Value for _43 { + const VALUE: u32 = 43; +} /// Type representing the value 44 for the `Value` trait. -pub struct _44; impl Value for _44 { const VALUE: u32 = 44; } +pub struct _44; +impl Value for _44 { + const VALUE: u32 = 44; +} /// Type representing the value 45 for the `Value` trait. -pub struct _45; impl Value for _45 { const VALUE: u32 = 45; } +pub struct _45; +impl Value for _45 { + const VALUE: u32 = 45; +} /// Type representing the value 46 for the `Value` trait. -pub struct _46; impl Value for _46 { const VALUE: u32 = 46; } +pub struct _46; +impl Value for _46 { + const VALUE: u32 = 46; +} /// Type representing the value 47 for the `Value` trait. -pub struct _47; impl Value for _47 { const VALUE: u32 = 47; } +pub struct _47; +impl Value for _47 { + const VALUE: u32 = 47; +} /// Type representing the value 48 for the `Value` trait. -pub struct _48; impl Value for _48 { const VALUE: u32 = 48; } +pub struct _48; +impl Value for _48 { + const VALUE: u32 = 48; +} /// Type representing the value 49 for the `Value` trait. -pub struct _49; impl Value for _49 { const VALUE: u32 = 49; } +pub struct _49; +impl Value for _49 { + const VALUE: u32 = 49; +} /// Type representing the value 50 for the `Value` trait. -pub struct _50; impl Value for _50 { const VALUE: u32 = 50; } +pub struct _50; +impl Value for _50 { + const VALUE: u32 = 50; +} /// Type representing the value 51 for the `Value` trait. -pub struct _51; impl Value for _51 { const VALUE: u32 = 51; } +pub struct _51; +impl Value for _51 { + const VALUE: u32 = 51; +} /// Type representing the value 52 for the `Value` trait. -pub struct _52; impl Value for _52 { const VALUE: u32 = 52; } +pub struct _52; +impl Value for _52 { + const VALUE: u32 = 52; +} /// Type representing the value 53 for the `Value` trait. -pub struct _53; impl Value for _53 { const VALUE: u32 = 53; } +pub struct _53; +impl Value for _53 { + const VALUE: u32 = 53; +} /// Type representing the value 54 for the `Value` trait. -pub struct _54; impl Value for _54 { const VALUE: u32 = 54; } +pub struct _54; +impl Value for _54 { + const VALUE: u32 = 54; +} /// Type representing the value 55 for the `Value` trait. -pub struct _55; impl Value for _55 { const VALUE: u32 = 55; } +pub struct _55; +impl Value for _55 { + const VALUE: u32 = 55; +} /// Type representing the value 56 for the `Value` trait. -pub struct _56; impl Value for _56 { const VALUE: u32 = 56; } +pub struct _56; +impl Value for _56 { + const VALUE: u32 = 56; +} /// Type representing the value 57 for the `Value` trait. -pub struct _57; impl Value for _57 { const VALUE: u32 = 57; } +pub struct _57; +impl Value for _57 { + const VALUE: u32 = 57; +} /// Type representing the value 58 for the `Value` trait. -pub struct _58; impl Value for _58 { const VALUE: u32 = 58; } +pub struct _58; +impl Value for _58 { + const VALUE: u32 = 58; +} /// Type representing the value 59 for the `Value` trait. -pub struct _59; impl Value for _59 { const VALUE: u32 = 59; } +pub struct _59; +impl Value for _59 { + const VALUE: u32 = 59; +} /// Type representing the value 60 for the `Value` trait. -pub struct _60; impl Value for _60 { const VALUE: u32 = 60; } +pub struct _60; +impl Value for _60 { + const VALUE: u32 = 60; +} /// Type representing the value 61 for the `Value` trait. -pub struct _61; impl Value for _61 { const VALUE: u32 = 61; } +pub struct _61; +impl Value for _61 { + const VALUE: u32 = 61; +} /// Type representing the value 62 for the `Value` trait. -pub struct _62; impl Value for _62 { const VALUE: u32 = 62; } +pub struct _62; +impl Value for _62 { + const VALUE: u32 = 62; +} /// Type representing the value 63 for the `Value` trait. -pub struct _63; impl Value for _63 { const VALUE: u32 = 63; } +pub struct _63; +impl Value for _63 { + const VALUE: u32 = 63; +} /// Type representing the value 64 for the `Value` trait. -pub struct _64; impl Value for _64 { const VALUE: u32 = 64; } +pub struct _64; +impl Value for _64 { + const VALUE: u32 = 64; +} /// Type representing the value 65 for the `Value` trait. -pub struct _65; impl Value for _65 { const VALUE: u32 = 65; } +pub struct _65; +impl Value for _65 { + const VALUE: u32 = 65; +} /// Type representing the value 66 for the `Value` trait. -pub struct _66; impl Value for _66 { const VALUE: u32 = 66; } +pub struct _66; +impl Value for _66 { + const VALUE: u32 = 66; +} /// Type representing the value 67 for the `Value` trait. -pub struct _67; impl Value for _67 { const VALUE: u32 = 67; } +pub struct _67; +impl Value for _67 { + const VALUE: u32 = 67; +} /// Type representing the value 68 for the `Value` trait. -pub struct _68; impl Value for _68 { const VALUE: u32 = 68; } +pub struct _68; +impl Value for _68 { + const VALUE: u32 = 68; +} /// Type representing the value 69 for the `Value` trait. -pub struct _69; impl Value for _69 { const VALUE: u32 = 69; } +pub struct _69; +impl Value for _69 { + const VALUE: u32 = 69; +} /// Type representing the value 70 for the `Value` trait. -pub struct _70; impl Value for _70 { const VALUE: u32 = 70; } +pub struct _70; +impl Value for _70 { + const VALUE: u32 = 70; +} /// Type representing the value 71 for the `Value` trait. -pub struct _71; impl Value for _71 { const VALUE: u32 = 71; } +pub struct _71; +impl Value for _71 { + const VALUE: u32 = 71; +} /// Type representing the value 72 for the `Value` trait. -pub struct _72; impl Value for _72 { const VALUE: u32 = 72; } +pub struct _72; +impl Value for _72 { + const VALUE: u32 = 72; +} /// Type representing the value 73 for the `Value` trait. -pub struct _73; impl Value for _73 { const VALUE: u32 = 73; } +pub struct _73; +impl Value for _73 { + const VALUE: u32 = 73; +} /// Type representing the value 74 for the `Value` trait. -pub struct _74; impl Value for _74 { const VALUE: u32 = 74; } +pub struct _74; +impl Value for _74 { + const VALUE: u32 = 74; +} /// Type representing the value 75 for the `Value` trait. -pub struct _75; impl Value for _75 { const VALUE: u32 = 75; } +pub struct _75; +impl Value for _75 { + const VALUE: u32 = 75; +} /// Type representing the value 76 for the `Value` trait. -pub struct _76; impl Value for _76 { const VALUE: u32 = 76; } +pub struct _76; +impl Value for _76 { + const VALUE: u32 = 76; +} /// Type representing the value 77 for the `Value` trait. -pub struct _77; impl Value for _77 { const VALUE: u32 = 77; } +pub struct _77; +impl Value for _77 { + const VALUE: u32 = 77; +} /// Type representing the value 78 for the `Value` trait. -pub struct _78; impl Value for _78 { const VALUE: u32 = 78; } +pub struct _78; +impl Value for _78 { + const VALUE: u32 = 78; +} /// Type representing the value 79 for the `Value` trait. -pub struct _79; impl Value for _79 { const VALUE: u32 = 79; } +pub struct _79; +impl Value for _79 { + const VALUE: u32 = 79; +} /// Type representing the value 80 for the `Value` trait. -pub struct _80; impl Value for _80 { const VALUE: u32 = 80; } +pub struct _80; +impl Value for _80 { + const VALUE: u32 = 80; +} /// Type representing the value 81 for the `Value` trait. -pub struct _81; impl Value for _81 { const VALUE: u32 = 81; } +pub struct _81; +impl Value for _81 { + const VALUE: u32 = 81; +} /// Type representing the value 82 for the `Value` trait. -pub struct _82; impl Value for _82 { const VALUE: u32 = 82; } +pub struct _82; +impl Value for _82 { + const VALUE: u32 = 82; +} /// Type representing the value 83 for the `Value` trait. -pub struct _83; impl Value for _83 { const VALUE: u32 = 83; } +pub struct _83; +impl Value for _83 { + const VALUE: u32 = 83; +} /// Type representing the value 84 for the `Value` trait. -pub struct _84; impl Value for _84 { const VALUE: u32 = 84; } +pub struct _84; +impl Value for _84 { + const VALUE: u32 = 84; +} /// Type representing the value 85 for the `Value` trait. -pub struct _85; impl Value for _85 { const VALUE: u32 = 85; } +pub struct _85; +impl Value for _85 { + const VALUE: u32 = 85; +} /// Type representing the value 86 for the `Value` trait. -pub struct _86; impl Value for _86 { const VALUE: u32 = 86; } +pub struct _86; +impl Value for _86 { + const VALUE: u32 = 86; +} /// Type representing the value 87 for the `Value` trait. -pub struct _87; impl Value for _87 { const VALUE: u32 = 87; } +pub struct _87; +impl Value for _87 { + const VALUE: u32 = 87; +} /// Type representing the value 88 for the `Value` trait. -pub struct _88; impl Value for _88 { const VALUE: u32 = 88; } +pub struct _88; +impl Value for _88 { + const VALUE: u32 = 88; +} /// Type representing the value 89 for the `Value` trait. -pub struct _89; impl Value for _89 { const VALUE: u32 = 89; } +pub struct _89; +impl Value for _89 { + const VALUE: u32 = 89; +} /// Type representing the value 90 for the `Value` trait. -pub struct _90; impl Value for _90 { const VALUE: u32 = 90; } +pub struct _90; +impl Value for _90 { + const VALUE: u32 = 90; +} /// Type representing the value 91 for the `Value` trait. -pub struct _91; impl Value for _91 { const VALUE: u32 = 91; } +pub struct _91; +impl Value for _91 { + const VALUE: u32 = 91; +} /// Type representing the value 92 for the `Value` trait. -pub struct _92; impl Value for _92 { const VALUE: u32 = 92; } +pub struct _92; +impl Value for _92 { + const VALUE: u32 = 92; +} /// Type representing the value 93 for the `Value` trait. -pub struct _93; impl Value for _93 { const VALUE: u32 = 93; } +pub struct _93; +impl Value for _93 { + const VALUE: u32 = 93; +} /// Type representing the value 94 for the `Value` trait. -pub struct _94; impl Value for _94 { const VALUE: u32 = 94; } +pub struct _94; +impl Value for _94 { + const VALUE: u32 = 94; +} /// Type representing the value 95 for the `Value` trait. -pub struct _95; impl Value for _95 { const VALUE: u32 = 95; } +pub struct _95; +impl Value for _95 { + const VALUE: u32 = 95; +} /// Type representing the value 96 for the `Value` trait. -pub struct _96; impl Value for _96 { const VALUE: u32 = 96; } +pub struct _96; +impl Value for _96 { + const VALUE: u32 = 96; +} /// Type representing the value 97 for the `Value` trait. -pub struct _97; impl Value for _97 { const VALUE: u32 = 97; } +pub struct _97; +impl Value for _97 { + const VALUE: u32 = 97; +} /// Type representing the value 98 for the `Value` trait. -pub struct _98; impl Value for _98 { const VALUE: u32 = 98; } +pub struct _98; +impl Value for _98 { + const VALUE: u32 = 98; +} /// Type representing the value 99 for the `Value` trait. -pub struct _99; impl Value for _99 { const VALUE: u32 = 99; } +pub struct _99; +impl Value for _99 { + const VALUE: u32 = 99; +} /// Type representing the value 100 for the `Value` trait. -pub struct _100; impl Value for _100 { const VALUE: u32 = 100; } +pub struct _100; +impl Value for _100 { + const VALUE: u32 = 100; +} /// Type representing the value 112 for the `Value` trait. -pub struct _112; impl Value for _112 { const VALUE: u32 = 112; } +pub struct _112; +impl Value for _112 { + const VALUE: u32 = 112; +} /// Type representing the value 128 for the `Value` trait. -pub struct _128; impl Value for _128 { const VALUE: u32 = 128; } +pub struct _128; +impl Value for _128 { + const VALUE: u32 = 128; +} /// Type representing the value 160 for the `Value` trait. -pub struct _160; impl Value for _160 { const VALUE: u32 = 160; } +pub struct _160; +impl Value for _160 { + const VALUE: u32 = 160; +} /// Type representing the value 192 for the `Value` trait. -pub struct _192; impl Value for _192 { const VALUE: u32 = 192; } +pub struct _192; +impl Value for _192 { + const VALUE: u32 = 192; +} /// Type representing the value 224 for the `Value` trait. -pub struct _224; impl Value for _224 { const VALUE: u32 = 224; } +pub struct _224; +impl Value for _224 { + const VALUE: u32 = 224; +} /// Type representing the value 256 for the `Value` trait. -pub struct _256; impl Value for _256 { const VALUE: u32 = 256; } +pub struct _256; +impl Value for _256 { + const VALUE: u32 = 256; +} /// Type representing the value 384 for the `Value` trait. -pub struct _384; impl Value for _384 { const VALUE: u32 = 384; } +pub struct _384; +impl Value for _384 { + const VALUE: u32 = 384; +} /// Type representing the value 512 for the `Value` trait. -pub struct _512; impl Value for _512 { const VALUE: u32 = 512; } - +pub struct _512; +impl Value for _512 { + const VALUE: u32 = 512; +} diff --git a/primitives/core/src/uint.rs b/primitives/core/src/uint.rs index e666137c08..39170972da 100644 --- a/primitives/core/src/uint.rs +++ b/primitives/core/src/uint.rs @@ -20,80 +20,76 @@ pub use primitive_types::{U256, U512}; #[cfg(test)] mod tests { - use super::*; - use codec::{Encode, Decode}; - use sp_serializer as ser; + use super::*; + use codec::{Decode, Encode}; + use sp_serializer as ser; - macro_rules! test { - ($name: ident, $test_name: ident) => { - #[test] - fn $test_name() { - let tests = vec![ - ($name::from(0), "0x0"), - ($name::from(1), "0x1"), - ($name::from(2), "0x2"), - ($name::from(10), "0xa"), - ($name::from(15), "0xf"), - ($name::from(15), "0xf"), - ($name::from(16), "0x10"), - ($name::from(1_000), "0x3e8"), - ($name::from(100_000), "0x186a0"), - ($name::from(u64::max_value()), "0xffffffffffffffff"), - ($name::from(u64::max_value()) + $name::from(1), "0x10000000000000000"), - ]; + macro_rules! test { + ($name: ident, $test_name: ident) => { + #[test] + fn $test_name() { + let tests = vec![ + ($name::from(0), "0x0"), + ($name::from(1), "0x1"), + ($name::from(2), "0x2"), + ($name::from(10), "0xa"), + ($name::from(15), "0xf"), + ($name::from(15), "0xf"), + ($name::from(16), "0x10"), + ($name::from(1_000), "0x3e8"), + ($name::from(100_000), "0x186a0"), + ($name::from(u64::max_value()), "0xffffffffffffffff"), + ( + $name::from(u64::max_value()) + $name::from(1), + "0x10000000000000000", + ), + ]; - for (number, expected) in tests { - assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); - assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); - } + for (number, expected) in tests { + assert_eq!(format!("{:?}", expected), ser::to_string_pretty(&number)); + assert_eq!(number, ser::from_str(&format!("{:?}", expected)).unwrap()); + } - // Invalid examples - assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); - assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); - } - } - } + // Invalid examples + assert!(ser::from_str::<$name>("\"0x\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"0xg\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"10\"").unwrap_err().is_data()); + assert!(ser::from_str::<$name>("\"0\"").unwrap_err().is_data()); + } + }; + } - test!(U256, test_u256); + test!(U256, test_u256); - #[test] - fn test_u256_codec() { - let res1 = vec![120, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0]; - let res2 = vec![0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, - 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff]; + #[test] + fn test_u256_codec() { + let res1 = vec![ + 120, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, + ]; + let res2 = vec![ + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, + ]; - assert_eq!( - U256::from(120).encode(), - res1); - assert_eq!( - U256::max_value().encode(), - res2); - assert_eq!( - U256::decode(&mut &res1[..]), - Ok(U256::from(120))); - assert_eq!( - U256::decode(&mut &res2[..]), - Ok(U256::max_value())); - } + assert_eq!(U256::from(120).encode(), res1); + assert_eq!(U256::max_value().encode(), res2); + assert_eq!(U256::decode(&mut &res1[..]), Ok(U256::from(120))); + assert_eq!(U256::decode(&mut &res2[..]), Ok(U256::max_value())); + } - #[test] - fn test_large_values() { - assert_eq!( - ser::to_string_pretty(&!U256::zero()), - "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" - ); - assert!( - ser::from_str::("\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"") - .unwrap_err() - .is_data() - ); - } + #[test] + fn test_large_values() { + assert_eq!( + ser::to_string_pretty(&!U256::zero()), + "\"0xffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ); + assert!(ser::from_str::( + "\"0x1ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff\"" + ) + .unwrap_err() + .is_data()); + } } diff --git a/primitives/database/src/kvdb.rs b/primitives/database/src/kvdb.rs index 85a324b5c1..7e8482eec1 100644 --- a/primitives/database/src/kvdb.rs +++ b/primitives/database/src/kvdb.rs @@ -15,45 +15,44 @@ // along with Substrate. If not, see . /// A wrapper around `kvdb::Database` that implements `sp_database::Database` trait - use ::kvdb::{DBTransaction, KeyValueDB}; -use crate::{Database, Change, Transaction, ColumnId}; +use crate::{Change, ColumnId, Database, Transaction}; struct DbAdapter(D); fn handle_err(result: std::io::Result) -> T { - match result { - Ok(r) => r, - Err(e) => { - panic!("Critical database eror: {:?}", e); - } - } + match result { + Ok(r) => r, + Err(e) => { + panic!("Critical database eror: {:?}", e); + } + } } /// Wrap RocksDb database into a trait object that implements `sp_database::Database` pub fn as_database(db: D) -> std::sync::Arc> { - std::sync::Arc::new(DbAdapter(db)) + std::sync::Arc::new(DbAdapter(db)) } impl Database for DbAdapter { - fn commit(&self, transaction: Transaction) { - let mut tx = DBTransaction::new(); - for change in transaction.0.into_iter() { - match change { - Change::Set(col, key, value) => tx.put_vec(col, &key, value), - Change::Remove(col, key) => tx.delete(col, &key), - _ => unimplemented!(), - } - } - handle_err(self.0.write(tx)); - } - - fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - handle_err(self.0.get(col, key)) - } - - fn lookup(&self, _hash: &H) -> Option> { - unimplemented!(); - } + fn commit(&self, transaction: Transaction) { + let mut tx = DBTransaction::new(); + for change in transaction.0.into_iter() { + match change { + Change::Set(col, key, value) => tx.put_vec(col, &key, value), + Change::Remove(col, key) => tx.delete(col, &key), + _ => unimplemented!(), + } + } + handle_err(self.0.write(tx)); + } + + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + handle_err(self.0.get(col, key)) + } + + fn lookup(&self, _hash: &H) -> Option> { + unimplemented!(); + } } diff --git a/primitives/database/src/lib.rs b/primitives/database/src/lib.rs index bd9bd2eb54..ae014bfad7 100644 --- a/primitives/database/src/lib.rs +++ b/primitives/database/src/lib.rs @@ -16,11 +16,11 @@ //! The main database trait, allowing Substrate to store data persistently. -mod mem; mod kvdb; +mod mem; -pub use mem::MemDb; pub use crate::kvdb::as_database; +pub use mem::MemDb; /// An identifier for a column. pub type ColumnId = u32; @@ -28,18 +28,18 @@ pub type ColumnId = u32; /// An alteration to the database. #[derive(Clone)] pub enum Change { - Set(ColumnId, Vec, Vec), - Remove(ColumnId, Vec), - Store(H, Vec), - Release(H), + Set(ColumnId, Vec, Vec), + Remove(ColumnId, Vec), + Store(H, Vec), + Release(H), } /// An alteration to the database that references the data. pub enum ChangeRef<'a, H> { - Set(ColumnId, &'a [u8], &'a [u8]), - Remove(ColumnId, &'a [u8]), - Store(H, &'a [u8]), - Release(H), + Set(ColumnId, &'a [u8], &'a [u8]), + Remove(ColumnId, &'a [u8]), + Store(H, &'a [u8]), + Release(H), } /// A series of changes to the database that can be committed atomically. They do not take effect @@ -48,140 +48,153 @@ pub enum ChangeRef<'a, H> { pub struct Transaction(pub Vec>); impl Transaction { - /// Create a new transaction to be prepared and committed atomically. - pub fn new() -> Self { - Transaction(Vec::new()) - } - /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. - pub fn set(&mut self, col: ColumnId, key: &[u8], value: &[u8]) { - self.0.push(Change::Set(col, key.to_vec(), value.to_vec())) - } - /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. - pub fn set_from_vec(&mut self, col: ColumnId, key: &[u8], value: Vec) { - self.0.push(Change::Set(col, key.to_vec(), value)) - } - /// Remove the value of `key` in `col`. - pub fn remove(&mut self, col: ColumnId, key: &[u8]) { - self.0.push(Change::Remove(col, key.to_vec())) - } - /// Store the `preimage` of `hash` into the database, so that it may be looked up later with - /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent - /// calls will ignore `preimage` and simply increase the number of references on `hash`. - pub fn store(&mut self, hash: H, preimage: &[u8]) { - self.0.push(Change::Store(hash, preimage.to_vec())) - } - /// Release the preimage of `hash` from the database. An equal number of these to the number of - /// corresponding `store`s must have been given before it is legal for `Database::lookup` to - /// be unable to provide the preimage. - pub fn release(&mut self, hash: H) { - self.0.push(Change::Release(hash)) - } + /// Create a new transaction to be prepared and committed atomically. + pub fn new() -> Self { + Transaction(Vec::new()) + } + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. + pub fn set(&mut self, col: ColumnId, key: &[u8], value: &[u8]) { + self.0.push(Change::Set(col, key.to_vec(), value.to_vec())) + } + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. + pub fn set_from_vec(&mut self, col: ColumnId, key: &[u8], value: Vec) { + self.0.push(Change::Set(col, key.to_vec(), value)) + } + /// Remove the value of `key` in `col`. + pub fn remove(&mut self, col: ColumnId, key: &[u8]) { + self.0.push(Change::Remove(col, key.to_vec())) + } + /// Store the `preimage` of `hash` into the database, so that it may be looked up later with + /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent + /// calls will ignore `preimage` and simply increase the number of references on `hash`. + pub fn store(&mut self, hash: H, preimage: &[u8]) { + self.0.push(Change::Store(hash, preimage.to_vec())) + } + /// Release the preimage of `hash` from the database. An equal number of these to the number of + /// corresponding `store`s must have been given before it is legal for `Database::lookup` to + /// be unable to provide the preimage. + pub fn release(&mut self, hash: H) { + self.0.push(Change::Release(hash)) + } } pub trait Database: Send + Sync { - /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` - /// will reflect the new state. - fn commit(&self, transaction: Transaction) { - for change in transaction.0.into_iter() { - match change { - Change::Set(col, key, value) => self.set(col, &key, &value), - Change::Remove(col, key) => self.remove(col, &key), - Change::Store(hash, preimage) => self.store(&hash, &preimage), - Change::Release(hash) => self.release(&hash), - } - } - } - - /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` - /// will reflect the new state. - fn commit_ref<'a>(&self, transaction: &mut dyn Iterator>) { - let mut tx = Transaction::new(); - for change in transaction { - match change { - ChangeRef::Set(col, key, value) => tx.set(col, key, value), - ChangeRef::Remove(col, key) => tx.remove(col, key), - ChangeRef::Store(hash, preimage) => tx.store(hash, preimage), - ChangeRef::Release(hash) => tx.release(hash), - } - } - self.commit(tx); - } - - /// Retrieve the value previously stored against `key` or `None` if - /// `key` is not currently in the database. - fn get(&self, col: ColumnId, key: &[u8]) -> Option>; - - /// Call `f` with the value previously stored against `key`. - /// - /// This may be faster than `get` since it doesn't allocate. - /// Use `with_get` helper function if you need `f` to return a value from `f` - fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { - self.get(col, key).map(|v| f(&v)); - } - - /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. - fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) { - let mut t = Transaction::new(); - t.set(col, key, value); - self.commit(t); - } - /// Remove the value of `key` in `col`. - fn remove(&self, col: ColumnId, key: &[u8]) { - let mut t = Transaction::new(); - t.remove(col, key); - self.commit(t); - } - - /// Retrieve the first preimage previously `store`d for `hash` or `None` if no preimage is - /// currently stored. - fn lookup(&self, hash: &H) -> Option>; - - /// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage - /// is currently stored. - /// - /// This may be faster than `lookup` since it doesn't allocate. - /// Use `with_lookup` helper function if you need `f` to return a value from `f` - fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { - self.lookup(hash).map(|v| f(&v)); - } - - /// Store the `preimage` of `hash` into the database, so that it may be looked up later with - /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent - /// calls will ignore `preimage` and simply increase the number of references on `hash`. - fn store(&self, hash: &H, preimage: &[u8]) { - let mut t = Transaction::new(); - t.store(hash.clone(), preimage); - self.commit(t); - } - - /// Release the preimage of `hash` from the database. An equal number of these to the number of - /// corresponding `store`s must have been given before it is legal for `Database::lookup` to - /// be unable to provide the preimage. - fn release(&self, hash: &H) { - let mut t = Transaction::new(); - t.release(hash.clone()); - self.commit(t); - } + /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` + /// will reflect the new state. + fn commit(&self, transaction: Transaction) { + for change in transaction.0.into_iter() { + match change { + Change::Set(col, key, value) => self.set(col, &key, &value), + Change::Remove(col, key) => self.remove(col, &key), + Change::Store(hash, preimage) => self.store(&hash, &preimage), + Change::Release(hash) => self.release(&hash), + } + } + } + + /// Commit the `transaction` to the database atomically. Any further calls to `get` or `lookup` + /// will reflect the new state. + fn commit_ref<'a>(&self, transaction: &mut dyn Iterator>) { + let mut tx = Transaction::new(); + for change in transaction { + match change { + ChangeRef::Set(col, key, value) => tx.set(col, key, value), + ChangeRef::Remove(col, key) => tx.remove(col, key), + ChangeRef::Store(hash, preimage) => tx.store(hash, preimage), + ChangeRef::Release(hash) => tx.release(hash), + } + } + self.commit(tx); + } + + /// Retrieve the value previously stored against `key` or `None` if + /// `key` is not currently in the database. + fn get(&self, col: ColumnId, key: &[u8]) -> Option>; + + /// Call `f` with the value previously stored against `key`. + /// + /// This may be faster than `get` since it doesn't allocate. + /// Use `with_get` helper function if you need `f` to return a value from `f` + fn with_get(&self, col: ColumnId, key: &[u8], f: &mut dyn FnMut(&[u8])) { + self.get(col, key).map(|v| f(&v)); + } + + /// Set the value of `key` in `col` to `value`, replacing anything that is there currently. + fn set(&self, col: ColumnId, key: &[u8], value: &[u8]) { + let mut t = Transaction::new(); + t.set(col, key, value); + self.commit(t); + } + /// Remove the value of `key` in `col`. + fn remove(&self, col: ColumnId, key: &[u8]) { + let mut t = Transaction::new(); + t.remove(col, key); + self.commit(t); + } + + /// Retrieve the first preimage previously `store`d for `hash` or `None` if no preimage is + /// currently stored. + fn lookup(&self, hash: &H) -> Option>; + + /// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage + /// is currently stored. + /// + /// This may be faster than `lookup` since it doesn't allocate. + /// Use `with_lookup` helper function if you need `f` to return a value from `f` + fn with_lookup(&self, hash: &H, f: &mut dyn FnMut(&[u8])) { + self.lookup(hash).map(|v| f(&v)); + } + + /// Store the `preimage` of `hash` into the database, so that it may be looked up later with + /// `Database::lookup`. This may be called multiple times, but `Database::lookup` but subsequent + /// calls will ignore `preimage` and simply increase the number of references on `hash`. + fn store(&self, hash: &H, preimage: &[u8]) { + let mut t = Transaction::new(); + t.store(hash.clone(), preimage); + self.commit(t); + } + + /// Release the preimage of `hash` from the database. An equal number of these to the number of + /// corresponding `store`s must have been given before it is legal for `Database::lookup` to + /// be unable to provide the preimage. + fn release(&self, hash: &H) { + let mut t = Transaction::new(); + t.release(hash.clone()); + self.commit(t); + } } /// Call `f` with the value previously stored against `key` and return the result, or `None` if /// `key` is not currently in the database. /// /// This may be faster than `get` since it doesn't allocate. -pub fn with_get(db: &dyn Database, col: ColumnId, key: &[u8], mut f: impl FnMut(&[u8]) -> R) -> Option { - let mut result: Option = None; - let mut adapter = |k: &_| { result = Some(f(k)); }; - db.with_get(col, key, &mut adapter); - result +pub fn with_get( + db: &dyn Database, + col: ColumnId, + key: &[u8], + mut f: impl FnMut(&[u8]) -> R, +) -> Option { + let mut result: Option = None; + let mut adapter = |k: &_| { + result = Some(f(k)); + }; + db.with_get(col, key, &mut adapter); + result } /// Call `f` with the preimage stored for `hash` and return the result, or `None` if no preimage /// is currently stored. /// /// This may be faster than `lookup` since it doesn't allocate. -pub fn with_lookup(db: &dyn Database, hash: &H, mut f: impl FnMut(&[u8]) -> R) -> Option { - let mut result: Option = None; - let mut adapter = |k: &_| { result = Some(f(k)); }; - db.with_lookup(hash, &mut adapter); - result +pub fn with_lookup( + db: &dyn Database, + hash: &H, + mut f: impl FnMut(&[u8]) -> R, +) -> Option { + let mut result: Option = None; + let mut adapter = |k: &_| { + result = Some(f(k)); + }; + db.with_lookup(hash, &mut adapter); + result } diff --git a/primitives/database/src/mem.rs b/primitives/database/src/mem.rs index 09d6149bed..fe2fd02c4c 100644 --- a/primitives/database/src/mem.rs +++ b/primitives/database/src/mem.rs @@ -16,53 +16,66 @@ //! In-memory implementation of `Database` -use std::collections::HashMap; -use crate::{Database, Transaction, ColumnId, Change}; +use crate::{Change, ColumnId, Database, Transaction}; use parking_lot::RwLock; +use std::collections::HashMap; #[derive(Default)] /// This implements `Database` as an in-memory hash map. `commit` is not atomic. -pub struct MemDb - (RwLock<(HashMap, Vec>>, HashMap>)>); +pub struct MemDb( + RwLock<( + HashMap, Vec>>, + HashMap>, + )>, +); impl Database for MemDb - where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash +where + H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash, { - fn commit(&self, transaction: Transaction) { - let mut s = self.0.write(); - for change in transaction.0.into_iter() { - match change { - Change::Set(col, key, value) => { s.0.entry(col).or_default().insert(key, value); }, - Change::Remove(col, key) => { s.0.entry(col).or_default().remove(&key); }, - Change::Store(hash, preimage) => { s.1.insert(hash, preimage); }, - Change::Release(hash) => { s.1.remove(&hash); }, - } - } - } + fn commit(&self, transaction: Transaction) { + let mut s = self.0.write(); + for change in transaction.0.into_iter() { + match change { + Change::Set(col, key, value) => { + s.0.entry(col).or_default().insert(key, value); + } + Change::Remove(col, key) => { + s.0.entry(col).or_default().remove(&key); + } + Change::Store(hash, preimage) => { + s.1.insert(hash, preimage); + } + Change::Release(hash) => { + s.1.remove(&hash); + } + } + } + } - fn get(&self, col: ColumnId, key: &[u8]) -> Option> { - let s = self.0.read(); - s.0.get(&col).and_then(|c| c.get(key).cloned()) - } + fn get(&self, col: ColumnId, key: &[u8]) -> Option> { + let s = self.0.read(); + s.0.get(&col).and_then(|c| c.get(key).cloned()) + } - fn lookup(&self, hash: &H) -> Option> { - let s = self.0.read(); - s.1.get(hash).cloned() - } + fn lookup(&self, hash: &H) -> Option> { + let s = self.0.read(); + s.1.get(hash).cloned() + } } impl MemDb - where H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash +where + H: Clone + Send + Sync + Eq + PartialEq + Default + std::hash::Hash, { - /// Create a new instance - pub fn new() -> Self { - MemDb::default() - } + /// Create a new instance + pub fn new() -> Self { + MemDb::default() + } - /// Count number of values in a column - pub fn count(&self, col: ColumnId) -> usize { - let s = self.0.read(); - s.0.get(&col).map(|c| c.len()).unwrap_or(0) - } + /// Count number of values in a column + pub fn count(&self, col: ColumnId) -> usize { + let s = self.0.read(); + s.0.get(&col).map(|c| c.len()).unwrap_or(0) + } } - diff --git a/primitives/debug-derive/src/impls.rs b/primitives/debug-derive/src/impls.rs index b0e6dfa3ee..057e2362e5 100644 --- a/primitives/debug-derive/src/impls.rs +++ b/primitives/debug-derive/src/impls.rs @@ -14,204 +14,194 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use quote::quote; use proc_macro2::TokenStream; -use syn::{Data, DeriveInput, parse_quote}; +use quote::quote; +use syn::{parse_quote, Data, DeriveInput}; pub fn debug_derive(ast: DeriveInput) -> proc_macro::TokenStream { - let name_str = ast.ident.to_string(); - let implementation = implementation::derive(&name_str, &ast.data); - let name = &ast.ident; - let mut generics = ast.generics.clone(); - let (impl_generics, ty_generics, where_clause) = { - let wh = generics.make_where_clause(); - for t in ast.generics.type_params() { - let name = &t.ident; - wh.predicates.push(parse_quote!{ #name : core::fmt::Debug }); - } - generics.split_for_impl() - }; - let gen = quote!{ - impl #impl_generics core::fmt::Debug for #name #ty_generics #where_clause { - fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { - #implementation - } - } - }; - - gen.into() + let name_str = ast.ident.to_string(); + let implementation = implementation::derive(&name_str, &ast.data); + let name = &ast.ident; + let mut generics = ast.generics.clone(); + let (impl_generics, ty_generics, where_clause) = { + let wh = generics.make_where_clause(); + for t in ast.generics.type_params() { + let name = &t.ident; + wh.predicates + .push(parse_quote! { #name : core::fmt::Debug }); + } + generics.split_for_impl() + }; + let gen = quote! { + impl #impl_generics core::fmt::Debug for #name #ty_generics #where_clause { + fn fmt(&self, fmt: &mut core::fmt::Formatter) -> core::fmt::Result { + #implementation + } + } + }; + + gen.into() } #[cfg(not(feature = "std"))] mod implementation { - use super::*; - - /// Derive the inner implementation of `Debug::fmt` function. - /// - /// Non-std environment. We do nothing to prevent bloating the size of runtime. - /// Implement `Printable` if you need to print the details. - pub fn derive(_name_str: &str, _data: &Data) -> TokenStream { - quote! { - fmt.write_str("") - } - } + use super::*; + + /// Derive the inner implementation of `Debug::fmt` function. + /// + /// Non-std environment. We do nothing to prevent bloating the size of runtime. + /// Implement `Printable` if you need to print the details. + pub fn derive(_name_str: &str, _data: &Data) -> TokenStream { + quote! { + fmt.write_str("") + } + } } #[cfg(feature = "std")] mod implementation { - use super::*; - use proc_macro2::Span; - use syn::{Ident, Index, token::SelfValue}; - - /// Derive the inner implementation of `Debug::fmt` function. - pub fn derive(name_str: &str, data: &Data) -> TokenStream { - match *data { - Data::Struct(ref s) => derive_struct(&name_str, &s.fields), - Data::Union(ref u) => derive_fields(&name_str, Fields::new(u.fields.named.iter(), None)), - Data::Enum(ref e) => derive_enum(&name_str, &e), - } - } - - enum Fields { - Indexed { - indices: Vec, - }, - Unnamed { - vars: Vec, - }, - Named { - names: Vec, - this: Option, - }, - } - - impl Fields { - fn new<'a>(fields: impl Iterator, this: Option) -> Self { - let mut indices = vec![]; - let mut names = vec![]; - - for (i, f) in fields.enumerate() { - if let Some(ident) = f.ident.clone() { - names.push(ident); - } else { - indices.push(Index::from(i)); - } - } - - if names.is_empty() { - Self::Indexed { - indices, - } - } else { - Self::Named { - names, - this, - } - } - } - } - - fn derive_fields<'a>( - name_str: &str, - fields: Fields, - ) -> TokenStream { - match fields { - Fields::Named { names, this } => { - let names_str: Vec<_> = names.iter() - .map(|x| x.to_string()) - .collect(); - - let fields = match this { - None => quote! { #( .field(#names_str, #names) )* }, - Some(this) => quote! { #( .field(#names_str, &#this.#names) )* }, - }; - - quote! { - fmt.debug_struct(#name_str) - #fields - .finish() - } - - }, - Fields::Indexed { indices } => { - quote! { - fmt.debug_tuple(#name_str) - #( .field(&self.#indices) )* - .finish() - } - }, - Fields::Unnamed { vars } => { - quote! { - fmt.debug_tuple(#name_str) - #( .field(#vars) )* - .finish() - } - }, - } - } - - fn derive_enum( - name: &str, - e: &syn::DataEnum, - ) -> TokenStream { - let v = e.variants - .iter() - .map(|v| { - let name = format!("{}::{}", name, v.ident); - let ident = &v.ident; - match v.fields { - syn::Fields::Named(ref f) => { - let names: Vec<_> = f.named.iter().flat_map(|f| f.ident.clone()).collect(); - let fields_impl = derive_fields(&name, Fields::Named { - names: names.clone(), - this: None, - }); - (ident, (quote!{ { #( ref #names ),* } }, fields_impl)) - }, - syn::Fields::Unnamed(ref f) => { - let names = f.unnamed.iter() - .enumerate() - .map(|(id, _)| Ident::new(&format!("a{}", id), Span::call_site())) - .collect::>(); - let fields_impl = derive_fields(&name, Fields::Unnamed { vars: names.clone() }); - (ident, (quote! { ( #( ref #names ),* ) }, fields_impl)) - }, - syn::Fields::Unit => { - let fields_impl = derive_fields(&name, Fields::Indexed { indices: vec![] }); - (ident, (quote! { }, fields_impl)) - }, - } - }); - - type Vecs = (Vec, Vec); - let (variants, others): Vecs<_, _> = v.unzip(); - let (match_fields, variants_impl): Vecs<_, _> = others.into_iter().unzip(); - - quote! { - match self { - #( Self::#variants #match_fields => #variants_impl, )* - _ => Ok(()), - } - } - } - - fn derive_struct( - name_str: &str, - fields: &syn::Fields, - ) -> TokenStream { - match *fields { - syn::Fields::Named(ref f) => derive_fields( - name_str, - Fields::new(f.named.iter(), Some(syn::Token!(self)(Span::call_site()))), - ), - syn::Fields::Unnamed(ref f) => derive_fields( - name_str, - Fields::new(f.unnamed.iter(), None), - ), - syn::Fields::Unit => derive_fields( - name_str, - Fields::Indexed { indices: vec![] }, - ), - } - } + use super::*; + use proc_macro2::Span; + use syn::{token::SelfValue, Ident, Index}; + + /// Derive the inner implementation of `Debug::fmt` function. + pub fn derive(name_str: &str, data: &Data) -> TokenStream { + match *data { + Data::Struct(ref s) => derive_struct(&name_str, &s.fields), + Data::Union(ref u) => { + derive_fields(&name_str, Fields::new(u.fields.named.iter(), None)) + } + Data::Enum(ref e) => derive_enum(&name_str, &e), + } + } + + enum Fields { + Indexed { + indices: Vec, + }, + Unnamed { + vars: Vec, + }, + Named { + names: Vec, + this: Option, + }, + } + + impl Fields { + fn new<'a>(fields: impl Iterator, this: Option) -> Self { + let mut indices = vec![]; + let mut names = vec![]; + + for (i, f) in fields.enumerate() { + if let Some(ident) = f.ident.clone() { + names.push(ident); + } else { + indices.push(Index::from(i)); + } + } + + if names.is_empty() { + Self::Indexed { indices } + } else { + Self::Named { names, this } + } + } + } + + fn derive_fields<'a>(name_str: &str, fields: Fields) -> TokenStream { + match fields { + Fields::Named { names, this } => { + let names_str: Vec<_> = names.iter().map(|x| x.to_string()).collect(); + + let fields = match this { + None => quote! { #( .field(#names_str, #names) )* }, + Some(this) => quote! { #( .field(#names_str, &#this.#names) )* }, + }; + + quote! { + fmt.debug_struct(#name_str) + #fields + .finish() + } + } + Fields::Indexed { indices } => { + quote! { + fmt.debug_tuple(#name_str) + #( .field(&self.#indices) )* + .finish() + } + } + Fields::Unnamed { vars } => { + quote! { + fmt.debug_tuple(#name_str) + #( .field(#vars) )* + .finish() + } + } + } + } + + fn derive_enum(name: &str, e: &syn::DataEnum) -> TokenStream { + let v = e.variants.iter().map(|v| { + let name = format!("{}::{}", name, v.ident); + let ident = &v.ident; + match v.fields { + syn::Fields::Named(ref f) => { + let names: Vec<_> = f.named.iter().flat_map(|f| f.ident.clone()).collect(); + let fields_impl = derive_fields( + &name, + Fields::Named { + names: names.clone(), + this: None, + }, + ); + (ident, (quote! { { #( ref #names ),* } }, fields_impl)) + } + syn::Fields::Unnamed(ref f) => { + let names = f + .unnamed + .iter() + .enumerate() + .map(|(id, _)| Ident::new(&format!("a{}", id), Span::call_site())) + .collect::>(); + let fields_impl = derive_fields( + &name, + Fields::Unnamed { + vars: names.clone(), + }, + ); + (ident, (quote! { ( #( ref #names ),* ) }, fields_impl)) + } + syn::Fields::Unit => { + let fields_impl = derive_fields(&name, Fields::Indexed { indices: vec![] }); + (ident, (quote! {}, fields_impl)) + } + } + }); + + type Vecs = (Vec, Vec); + let (variants, others): Vecs<_, _> = v.unzip(); + let (match_fields, variants_impl): Vecs<_, _> = others.into_iter().unzip(); + + quote! { + match self { + #( Self::#variants #match_fields => #variants_impl, )* + _ => Ok(()), + } + } + } + + fn derive_struct(name_str: &str, fields: &syn::Fields) -> TokenStream { + match *fields { + syn::Fields::Named(ref f) => derive_fields( + name_str, + Fields::new(f.named.iter(), Some(syn::Token!(self)(Span::call_site()))), + ), + syn::Fields::Unnamed(ref f) => { + derive_fields(name_str, Fields::new(f.unnamed.iter(), None)) + } + syn::Fields::Unit => derive_fields(name_str, Fields::Indexed { indices: vec![] }), + } + } } diff --git a/primitives/debug-derive/src/lib.rs b/primitives/debug-derive/src/lib.rs index 68bbb94e1b..7e50472774 100644 --- a/primitives/debug-derive/src/lib.rs +++ b/primitives/debug-derive/src/lib.rs @@ -37,6 +37,5 @@ use proc_macro::TokenStream; #[proc_macro_derive(RuntimeDebug)] pub fn debug_derive(input: TokenStream) -> TokenStream { - impls::debug_derive(syn::parse_macro_input!(input)) + impls::debug_derive(syn::parse_macro_input!(input)) } - diff --git a/primitives/debug-derive/tests/tests.rs b/primitives/debug-derive/tests/tests.rs index 77b3d53a2d..6164de9b3f 100644 --- a/primitives/debug-derive/tests/tests.rs +++ b/primitives/debug-derive/tests/tests.rs @@ -21,43 +21,48 @@ struct Unnamed(u64, String); #[derive(RuntimeDebug)] struct Named { - a: u64, - b: String, + a: u64, + b: String, } #[derive(RuntimeDebug)] enum EnumLongName { - A, - B(A, String), - VariantLongName { - a: A, - b: String, - }, + A, + B(A, String), + VariantLongName { a: A, b: String }, } - #[test] fn should_display_proper_debug() { - use self::EnumLongName as Enum; - - assert_eq!( - format!("{:?}", Unnamed(1, "abc".into())), - "Unnamed(1, \"abc\")" - ); - assert_eq!( - format!("{:?}", Named { a: 1, b: "abc".into() }), - "Named { a: 1, b: \"abc\" }" - ); - assert_eq!( - format!("{:?}", Enum::::A), - "EnumLongName::A" - ); - assert_eq!( - format!("{:?}", Enum::B(1, "abc".into())), - "EnumLongName::B(1, \"abc\")" - ); - assert_eq!( - format!("{:?}", Enum::VariantLongName { a: 1, b: "abc".into() }), - "EnumLongName::VariantLongName { a: 1, b: \"abc\" }" - ); + use self::EnumLongName as Enum; + + assert_eq!( + format!("{:?}", Unnamed(1, "abc".into())), + "Unnamed(1, \"abc\")" + ); + assert_eq!( + format!( + "{:?}", + Named { + a: 1, + b: "abc".into() + } + ), + "Named { a: 1, b: \"abc\" }" + ); + assert_eq!(format!("{:?}", Enum::::A), "EnumLongName::A"); + assert_eq!( + format!("{:?}", Enum::B(1, "abc".into())), + "EnumLongName::B(1, \"abc\")" + ); + assert_eq!( + format!( + "{:?}", + Enum::VariantLongName { + a: 1, + b: "abc".into() + } + ), + "EnumLongName::VariantLongName { a: 1, b: \"abc\" }" + ); } diff --git a/primitives/externalities/src/extensions.rs b/primitives/externalities/src/extensions.rs index f38f256bb9..5d44c8d71a 100644 --- a/primitives/externalities/src/extensions.rs +++ b/primitives/externalities/src/extensions.rs @@ -21,18 +21,23 @@ //! //! It is required that each extension implements the [`Extension`] trait. -use std::{collections::HashMap, collections::hash_map::Entry, any::{Any, TypeId}, ops::DerefMut}; use crate::Error; +use std::{ + any::{Any, TypeId}, + collections::hash_map::Entry, + collections::HashMap, + ops::DerefMut, +}; /// Marker trait for types that should be registered as [`Externalities`](crate::Externalities) extension. /// /// As extensions are stored as `Box`, this trait should give more confidence that the correct /// type is registered and requested. pub trait Extension: Send + Any { - /// Return the extension as `&mut dyn Any`. - /// - /// This is a trick to make the trait type castable into an `Any`. - fn as_mut_any(&mut self) -> &mut dyn Any; + /// Return the extension as `&mut dyn Any`. + /// + /// This is a trick to make the trait type castable into an `Any`. + fn as_mut_any(&mut self) -> &mut dyn Any; } /// Macro for declaring an extension that usable with [`Extensions`]. @@ -83,27 +88,31 @@ macro_rules! decl_extension { /// /// This is a super trait of the [`Externalities`](crate::Externalities). pub trait ExtensionStore { - /// Tries to find a registered extension by the given `type_id` and returns it as a `&mut dyn Any`. - /// - /// It is advised to use [`ExternalitiesExt::extension`](crate::ExternalitiesExt::extension) - /// instead of this function to get type system support and automatic type downcasting. - fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any>; - - /// Register extension `extension` with speciifed `type_id`. - /// - /// It should return error if extension is already registered. - fn register_extension_with_type_id(&mut self, type_id: TypeId, extension: Box) -> Result<(), Error>; - - /// Deregister extension with speicifed 'type_id' and drop it. - /// - /// It should return error if extension is not registered. - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), Error>; + /// Tries to find a registered extension by the given `type_id` and returns it as a `&mut dyn Any`. + /// + /// It is advised to use [`ExternalitiesExt::extension`](crate::ExternalitiesExt::extension) + /// instead of this function to get type system support and automatic type downcasting. + fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any>; + + /// Register extension `extension` with speciifed `type_id`. + /// + /// It should return error if extension is already registered. + fn register_extension_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), Error>; + + /// Deregister extension with speicifed 'type_id' and drop it. + /// + /// It should return error if extension is not registered. + fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), Error>; } /// Stores extensions that should be made available through the externalities. #[derive(Default)] pub struct Extensions { - extensions: HashMap>, + extensions: HashMap>, } impl std::fmt::Debug for Extensions { @@ -113,55 +122,67 @@ impl std::fmt::Debug for Extensions { } impl Extensions { - /// Create new instance of `Self`. - pub fn new() -> Self { - Self::default() - } + /// Create new instance of `Self`. + pub fn new() -> Self { + Self::default() + } - /// Register the given extension. - pub fn register(&mut self, ext: E) { - self.extensions.insert(ext.type_id(), Box::new(ext)); - } + /// Register the given extension. + pub fn register(&mut self, ext: E) { + self.extensions.insert(ext.type_id(), Box::new(ext)); + } - /// Register extension `ext`. - pub fn register_with_type_id(&mut self, type_id: TypeId, extension: Box) -> Result<(), Error> { - match self.extensions.entry(type_id) { - Entry::Vacant(vacant) => { vacant.insert(extension); Ok(()) }, - Entry::Occupied(_) => Err(Error::ExtensionAlreadyRegistered), - } - } + /// Register extension `ext`. + pub fn register_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), Error> { + match self.extensions.entry(type_id) { + Entry::Vacant(vacant) => { + vacant.insert(extension); + Ok(()) + } + Entry::Occupied(_) => Err(Error::ExtensionAlreadyRegistered), + } + } - /// Return a mutable reference to the requested extension. - pub fn get_mut(&mut self, ext_type_id: TypeId) -> Option<&mut dyn Any> { - self.extensions.get_mut(&ext_type_id).map(DerefMut::deref_mut).map(Extension::as_mut_any) - } + /// Return a mutable reference to the requested extension. + pub fn get_mut(&mut self, ext_type_id: TypeId) -> Option<&mut dyn Any> { + self.extensions + .get_mut(&ext_type_id) + .map(DerefMut::deref_mut) + .map(Extension::as_mut_any) + } - /// Deregister extension of type `E`. - pub fn deregister(&mut self, type_id: TypeId) -> Option> { - self.extensions.remove(&type_id) - } + /// Deregister extension of type `E`. + pub fn deregister(&mut self, type_id: TypeId) -> Option> { + self.extensions.remove(&type_id) + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - decl_extension! { - struct DummyExt(u32); - } - decl_extension! { - struct DummyExt2(u32); - } + decl_extension! { + struct DummyExt(u32); + } + decl_extension! { + struct DummyExt2(u32); + } - #[test] - fn register_and_retrieve_extension() { - let mut exts = Extensions::new(); - exts.register(DummyExt(1)); - exts.register(DummyExt2(2)); + #[test] + fn register_and_retrieve_extension() { + let mut exts = Extensions::new(); + exts.register(DummyExt(1)); + exts.register(DummyExt2(2)); - let ext = exts.get_mut(TypeId::of::()).expect("Extension is registered"); - let ext_ty = ext.downcast_mut::().expect("Downcasting works"); + let ext = exts + .get_mut(TypeId::of::()) + .expect("Extension is registered"); + let ext_ty = ext.downcast_mut::().expect("Downcasting works"); - assert_eq!(ext_ty.0, 1); - } + assert_eq!(ext_ty.0, 1); + } } diff --git a/primitives/externalities/src/lib.rs b/primitives/externalities/src/lib.rs index 2c0f50cd74..5f06412d36 100644 --- a/primitives/externalities/src/lib.rs +++ b/primitives/externalities/src/lib.rs @@ -24,10 +24,10 @@ use std::any::{Any, TypeId}; -use sp_storage::{ChildStorageKey, ChildInfo}; +use sp_storage::{ChildInfo, ChildStorageKey}; +pub use extensions::{Extension, ExtensionStore, Extensions}; pub use scope_limited::{set_and_run_with_externalities, with_externalities}; -pub use extensions::{Extension, Extensions, ExtensionStore}; mod extensions; mod scope_limited; @@ -35,203 +35,201 @@ mod scope_limited; /// Externalities error. #[derive(Debug)] pub enum Error { - /// Same extension cannot be registered twice. - ExtensionAlreadyRegistered, - /// Extensions are not supported. - ExtensionsAreNotSupported, - /// Extension `TypeId` is not registered. - ExtensionIsNotRegistered(TypeId), + /// Same extension cannot be registered twice. + ExtensionAlreadyRegistered, + /// Extensions are not supported. + ExtensionsAreNotSupported, + /// Extension `TypeId` is not registered. + ExtensionIsNotRegistered(TypeId), } /// The Substrate externalities. /// /// Provides access to the storage and to other registered extensions. pub trait Externalities: ExtensionStore { - /// Read runtime storage. - fn storage(&self, key: &[u8]) -> Option>; - - /// Get storage value hash. - /// - /// This may be optimized for large values. - fn storage_hash(&self, key: &[u8]) -> Option>; - - /// Get child storage value hash. - /// - /// This may be optimized for large values. - /// - /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage_hash( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> Option>; - - /// Read child runtime storage. - /// - /// Returns an `Option` that holds the SCALE encoded hash. - fn child_storage( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> Option>; - - /// Set storage entry `key` of current contract being called (effective immediately). - fn set_storage(&mut self, key: Vec, value: Vec) { - self.place_storage(key, Some(value)); - } - - /// Set child storage entry `key` of current contract being called (effective immediately). - fn set_child_storage( - &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: Vec, - value: Vec, - ) { - self.place_child_storage(storage_key, child_info, key, Some(value)) - } - - /// Clear a storage entry (`key`) of current contract being called (effective immediately). - fn clear_storage(&mut self, key: &[u8]) { - self.place_storage(key.to_vec(), None); - } - - /// Clear a child storage entry (`key`) of current contract being called (effective immediately). - fn clear_child_storage( - &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) { - self.place_child_storage(storage_key, child_info, key.to_vec(), None) - } - - /// Whether a storage entry exists. - fn exists_storage(&self, key: &[u8]) -> bool { - self.storage(key).is_some() - } - - /// Whether a child storage entry exists. - fn exists_child_storage( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> bool { - self.child_storage(storage_key, child_info, key).is_some() - } - - /// Returns the key immediately following the given key, if it exists. - fn next_storage_key(&self, key: &[u8]) -> Option>; - - /// Returns the key immediately following the given key, if it exists, in child storage. - fn next_child_storage_key( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> Option>; - - /// Clear an entire child storage. - fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo); - - /// Clear storage entries which keys are start with the given prefix. - fn clear_prefix(&mut self, prefix: &[u8]); - - /// Clear child storage entries which keys are start with the given prefix. - fn clear_child_prefix( - &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - prefix: &[u8], - ); - - /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). - fn place_storage(&mut self, key: Vec, value: Option>); - - /// Set or clear a child storage entry. - fn place_child_storage( - &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: Vec, - value: Option>, - ); - - /// Get the identity of the chain. - fn chain_id(&self) -> u64; - - /// Get the trie root of the current storage map. - /// - /// This will also update all child storage keys in the top-level storage map. - /// - /// The returned hash is defined by the `Block` and is SCALE encoded. - fn storage_root(&mut self) -> Vec; - - /// Get the trie root of a child storage map. - /// - /// This will also update the value of the child storage keys in the top-level storage map. - /// - /// If the storage root equals the default hash as defined by the trie, the key in the top-level - /// storage map will be removed. - fn child_storage_root( - &mut self, - storage_key: ChildStorageKey, - ) -> Vec; - - /// Get the changes trie root of the current storage overlay at a block with given `parent`. - /// - /// `parent` expects a SCALE encoded hash. - /// - /// The returned hash is defined by the `Block` and is SCALE encoded. - fn storage_changes_root(&mut self, parent: &[u8]) -> Result>, ()>; - - /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - /// Benchmarking related functionality and shouldn't be used anywhere else! - /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - /// - /// Wipes all changes from caches and the database. - /// - /// The state will be reset to genesis. - fn wipe(&mut self); - - /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - /// Benchmarking related functionality and shouldn't be used anywhere else! - /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! - /// - /// Commits all changes to the database and clears all caches. - fn commit(&mut self); + /// Read runtime storage. + fn storage(&self, key: &[u8]) -> Option>; + + /// Get storage value hash. + /// + /// This may be optimized for large values. + fn storage_hash(&self, key: &[u8]) -> Option>; + + /// Get child storage value hash. + /// + /// This may be optimized for large values. + /// + /// Returns an `Option` that holds the SCALE encoded hash. + fn child_storage_hash( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option>; + + /// Read child runtime storage. + /// + /// Returns an `Option` that holds the SCALE encoded hash. + fn child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option>; + + /// Set storage entry `key` of current contract being called (effective immediately). + fn set_storage(&mut self, key: Vec, value: Vec) { + self.place_storage(key, Some(value)); + } + + /// Set child storage entry `key` of current contract being called (effective immediately). + fn set_child_storage( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: Vec, + value: Vec, + ) { + self.place_child_storage(storage_key, child_info, key, Some(value)) + } + + /// Clear a storage entry (`key`) of current contract being called (effective immediately). + fn clear_storage(&mut self, key: &[u8]) { + self.place_storage(key.to_vec(), None); + } + + /// Clear a child storage entry (`key`) of current contract being called (effective immediately). + fn clear_child_storage( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) { + self.place_child_storage(storage_key, child_info, key.to_vec(), None) + } + + /// Whether a storage entry exists. + fn exists_storage(&self, key: &[u8]) -> bool { + self.storage(key).is_some() + } + + /// Whether a child storage entry exists. + fn exists_child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> bool { + self.child_storage(storage_key, child_info, key).is_some() + } + + /// Returns the key immediately following the given key, if it exists. + fn next_storage_key(&self, key: &[u8]) -> Option>; + + /// Returns the key immediately following the given key, if it exists, in child storage. + fn next_child_storage_key( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option>; + + /// Clear an entire child storage. + fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo); + + /// Clear storage entries which keys are start with the given prefix. + fn clear_prefix(&mut self, prefix: &[u8]); + + /// Clear child storage entries which keys are start with the given prefix. + fn clear_child_prefix( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + prefix: &[u8], + ); + + /// Set or clear a storage entry (`key`) of current contract being called (effective immediately). + fn place_storage(&mut self, key: Vec, value: Option>); + + /// Set or clear a child storage entry. + fn place_child_storage( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: Vec, + value: Option>, + ); + + /// Get the identity of the chain. + fn chain_id(&self) -> u64; + + /// Get the trie root of the current storage map. + /// + /// This will also update all child storage keys in the top-level storage map. + /// + /// The returned hash is defined by the `Block` and is SCALE encoded. + fn storage_root(&mut self) -> Vec; + + /// Get the trie root of a child storage map. + /// + /// This will also update the value of the child storage keys in the top-level storage map. + /// + /// If the storage root equals the default hash as defined by the trie, the key in the top-level + /// storage map will be removed. + fn child_storage_root(&mut self, storage_key: ChildStorageKey) -> Vec; + + /// Get the changes trie root of the current storage overlay at a block with given `parent`. + /// + /// `parent` expects a SCALE encoded hash. + /// + /// The returned hash is defined by the `Block` and is SCALE encoded. + fn storage_changes_root(&mut self, parent: &[u8]) -> Result>, ()>; + + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Wipes all changes from caches and the database. + /// + /// The state will be reset to genesis. + fn wipe(&mut self); + + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// Benchmarking related functionality and shouldn't be used anywhere else! + /// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! + /// + /// Commits all changes to the database and clears all caches. + fn commit(&mut self); } /// Extension for the [`Externalities`] trait. pub trait ExternalitiesExt { - /// Tries to find a registered extension and returns a mutable reference. - fn extension(&mut self) -> Option<&mut T>; - - /// Register extension `ext`. - /// - /// Should return error if extension is already registered or extensions are not supported. - fn register_extension(&mut self, ext: T) -> Result<(), Error>; - - /// Deregister and drop extension of `T` type. - /// - /// Should return error if extension of type `T` is not registered or - /// extensions are not supported. - fn deregister_extension(&mut self) -> Result<(), Error>; + /// Tries to find a registered extension and returns a mutable reference. + fn extension(&mut self) -> Option<&mut T>; + + /// Register extension `ext`. + /// + /// Should return error if extension is already registered or extensions are not supported. + fn register_extension(&mut self, ext: T) -> Result<(), Error>; + + /// Deregister and drop extension of `T` type. + /// + /// Should return error if extension of type `T` is not registered or + /// extensions are not supported. + fn deregister_extension(&mut self) -> Result<(), Error>; } impl ExternalitiesExt for &mut dyn Externalities { - fn extension(&mut self) -> Option<&mut T> { - self.extension_by_type_id(TypeId::of::()).and_then(Any::downcast_mut) - } - - fn register_extension(&mut self, ext: T) -> Result<(), Error> { - self.register_extension_with_type_id(TypeId::of::(), Box::new(ext)) - } - - fn deregister_extension(&mut self) -> Result<(), Error> { - self.deregister_extension_by_type_id(TypeId::of::()) - } + fn extension(&mut self) -> Option<&mut T> { + self.extension_by_type_id(TypeId::of::()) + .and_then(Any::downcast_mut) + } + + fn register_extension(&mut self, ext: T) -> Result<(), Error> { + self.register_extension_with_type_id(TypeId::of::(), Box::new(ext)) + } + + fn deregister_extension(&mut self) -> Result<(), Error> { + self.deregister_extension_by_type_id(TypeId::of::()) + } } diff --git a/primitives/externalities/src/scope_limited.rs b/primitives/externalities/src/scope_limited.rs index 263858aa5f..736d5b3027 100644 --- a/primitives/externalities/src/scope_limited.rs +++ b/primitives/externalities/src/scope_limited.rs @@ -24,14 +24,15 @@ environmental::environmental!(ext: trait Externalities); /// while executing the given closure [`with_externalities`] grants access to them. The externalities /// are only set for the same thread this function was called from. pub fn set_and_run_with_externalities(ext: &mut dyn Externalities, f: F) -> R - where F: FnOnce() -> R +where + F: FnOnce() -> R, { - ext::using(ext, f) + ext::using(ext, f) } /// Execute the given closure with the currently set externalities. /// /// Returns `None` if no externalities are set or `Some(_)` with the result of the closure. pub fn with_externalities R, R>(f: F) -> Option { - ext::with(f) + ext::with(f) } diff --git a/primitives/finality-grandpa/src/lib.rs b/primitives/finality-grandpa/src/lib.rs index 9dcb1c2363..3f19a6b5f9 100644 --- a/primitives/finality-grandpa/src/lib.rs +++ b/primitives/finality-grandpa/src/lib.rs @@ -21,21 +21,21 @@ #[cfg(not(feature = "std"))] extern crate alloc; +use codec::{Codec, Decode, Encode, Input}; #[cfg(feature = "std")] use serde::Serialize; -use codec::{Encode, Decode, Input, Codec}; use sp_runtime::{ConsensusEngineId, RuntimeDebug}; use sp_std::borrow::Cow; use sp_std::vec::Vec; mod app { - use sp_application_crypto::{app_crypto, key_types::GRANDPA, ed25519}; - app_crypto!(ed25519, GRANDPA); + use sp_application_crypto::{app_crypto, ed25519, key_types::GRANDPA}; + app_crypto!(ed25519, GRANDPA); } sp_application_crypto::with_pair! { - /// The grandpa crypto scheme defined via the keypair type. - pub type AuthorityPair = app::Pair; + /// The grandpa crypto scheme defined via the keypair type. + pub type AuthorityPair = app::Pair; } /// Identity of a Grandpa authority. @@ -70,91 +70,91 @@ pub type AuthorityList = Vec<(AuthorityId, AuthorityWeight)>; #[cfg_attr(feature = "std", derive(Serialize))] #[derive(Clone, Eq, PartialEq, Encode, Decode, RuntimeDebug)] pub struct ScheduledChange { - /// The new authorities after the change, along with their respective weights. - pub next_authorities: AuthorityList, - /// The number of blocks to delay. - pub delay: N, + /// The new authorities after the change, along with their respective weights. + pub next_authorities: AuthorityList, + /// The number of blocks to delay. + pub delay: N, } /// An consensus log item for GRANDPA. #[cfg_attr(feature = "std", derive(Serialize))] #[derive(Decode, Encode, PartialEq, Eq, Clone, RuntimeDebug)] pub enum ConsensusLog { - /// Schedule an authority set change. - /// - /// The earliest digest of this type in a single block will be respected, - /// provided that there is no `ForcedChange` digest. If there is, then the - /// `ForcedChange` will take precedence. - /// - /// No change should be scheduled if one is already and the delay has not - /// passed completely. - /// - /// This should be a pure function: i.e. as long as the runtime can interpret - /// the digest type it should return the same result regardless of the current - /// state. - #[codec(index = "1")] - ScheduledChange(ScheduledChange), - /// Force an authority set change. - /// - /// Forced changes are applied after a delay of _imported_ blocks, - /// while pending changes are applied after a delay of _finalized_ blocks. - /// - /// The earliest digest of this type in a single block will be respected, - /// with others ignored. - /// - /// No change should be scheduled if one is already and the delay has not - /// passed completely. - /// - /// This should be a pure function: i.e. as long as the runtime can interpret - /// the digest type it should return the same result regardless of the current - /// state. - #[codec(index = "2")] - ForcedChange(N, ScheduledChange), - /// Note that the authority with given index is disabled until the next change. - #[codec(index = "3")] - OnDisabled(AuthorityIndex), - /// A signal to pause the current authority set after the given delay. - /// After finalizing the block at _delay_ the authorities should stop voting. - #[codec(index = "4")] - Pause(N), - /// A signal to resume the current authority set after the given delay. - /// After authoring the block at _delay_ the authorities should resume voting. - #[codec(index = "5")] - Resume(N), + /// Schedule an authority set change. + /// + /// The earliest digest of this type in a single block will be respected, + /// provided that there is no `ForcedChange` digest. If there is, then the + /// `ForcedChange` will take precedence. + /// + /// No change should be scheduled if one is already and the delay has not + /// passed completely. + /// + /// This should be a pure function: i.e. as long as the runtime can interpret + /// the digest type it should return the same result regardless of the current + /// state. + #[codec(index = "1")] + ScheduledChange(ScheduledChange), + /// Force an authority set change. + /// + /// Forced changes are applied after a delay of _imported_ blocks, + /// while pending changes are applied after a delay of _finalized_ blocks. + /// + /// The earliest digest of this type in a single block will be respected, + /// with others ignored. + /// + /// No change should be scheduled if one is already and the delay has not + /// passed completely. + /// + /// This should be a pure function: i.e. as long as the runtime can interpret + /// the digest type it should return the same result regardless of the current + /// state. + #[codec(index = "2")] + ForcedChange(N, ScheduledChange), + /// Note that the authority with given index is disabled until the next change. + #[codec(index = "3")] + OnDisabled(AuthorityIndex), + /// A signal to pause the current authority set after the given delay. + /// After finalizing the block at _delay_ the authorities should stop voting. + #[codec(index = "4")] + Pause(N), + /// A signal to resume the current authority set after the given delay. + /// After authoring the block at _delay_ the authorities should resume voting. + #[codec(index = "5")] + Resume(N), } impl ConsensusLog { - /// Try to cast the log entry as a contained signal. - pub fn try_into_change(self) -> Option> { - match self { - ConsensusLog::ScheduledChange(change) => Some(change), - _ => None, - } - } - - /// Try to cast the log entry as a contained forced signal. - pub fn try_into_forced_change(self) -> Option<(N, ScheduledChange)> { - match self { - ConsensusLog::ForcedChange(median, change) => Some((median, change)), - _ => None, - } - } - - /// Try to cast the log entry as a contained pause signal. - pub fn try_into_pause(self) -> Option { - match self { - ConsensusLog::Pause(delay) => Some(delay), - _ => None, - } - } - - /// Try to cast the log entry as a contained resume signal. - pub fn try_into_resume(self) -> Option { - match self { - ConsensusLog::Resume(delay) => Some(delay), - _ => None, - } - } + /// Try to cast the log entry as a contained signal. + pub fn try_into_change(self) -> Option> { + match self { + ConsensusLog::ScheduledChange(change) => Some(change), + _ => None, + } + } + + /// Try to cast the log entry as a contained forced signal. + pub fn try_into_forced_change(self) -> Option<(N, ScheduledChange)> { + match self { + ConsensusLog::ForcedChange(median, change) => Some((median, change)), + _ => None, + } + } + + /// Try to cast the log entry as a contained pause signal. + pub fn try_into_pause(self) -> Option { + match self { + ConsensusLog::Pause(delay) => Some(delay), + _ => None, + } + } + + /// Try to cast the log entry as a contained resume signal. + pub fn try_into_resume(self) -> Option { + match self { + ConsensusLog::Resume(delay) => Some(delay), + _ => None, + } + } } /// WASM function call to check for pending changes. @@ -174,61 +174,61 @@ const AUTHORITIES_VERSION: u8 = 1; pub struct VersionedAuthorityList<'a>(Cow<'a, AuthorityList>); impl<'a> From for VersionedAuthorityList<'a> { - fn from(authorities: AuthorityList) -> Self { - VersionedAuthorityList(Cow::Owned(authorities)) - } + fn from(authorities: AuthorityList) -> Self { + VersionedAuthorityList(Cow::Owned(authorities)) + } } impl<'a> From<&'a AuthorityList> for VersionedAuthorityList<'a> { - fn from(authorities: &'a AuthorityList) -> Self { - VersionedAuthorityList(Cow::Borrowed(authorities)) - } + fn from(authorities: &'a AuthorityList) -> Self { + VersionedAuthorityList(Cow::Borrowed(authorities)) + } } impl<'a> Into for VersionedAuthorityList<'a> { - fn into(self) -> AuthorityList { - self.0.into_owned() - } + fn into(self) -> AuthorityList { + self.0.into_owned() + } } impl<'a> Encode for VersionedAuthorityList<'a> { - fn size_hint(&self) -> usize { - (AUTHORITIES_VERSION, self.0.as_ref()).size_hint() - } + fn size_hint(&self) -> usize { + (AUTHORITIES_VERSION, self.0.as_ref()).size_hint() + } - fn using_encoded R>(&self, f: F) -> R { - (AUTHORITIES_VERSION, self.0.as_ref()).using_encoded(f) - } + fn using_encoded R>(&self, f: F) -> R { + (AUTHORITIES_VERSION, self.0.as_ref()).using_encoded(f) + } } impl<'a> Decode for VersionedAuthorityList<'a> { - fn decode(value: &mut I) -> Result { - let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; - if version != AUTHORITIES_VERSION { - return Err("unknown Grandpa authorities version".into()); - } - Ok(authorities.into()) - } + fn decode(value: &mut I) -> Result { + let (version, authorities): (u8, AuthorityList) = Decode::decode(value)?; + if version != AUTHORITIES_VERSION { + return Err("unknown Grandpa authorities version".into()); + } + Ok(authorities.into()) + } } sp_api::decl_runtime_apis! { - /// APIs for integrating the GRANDPA finality gadget into runtimes. - /// This should be implemented on the runtime side. - /// - /// This is primarily used for negotiating authority-set changes for the - /// gadget. GRANDPA uses a signaling model of changing authority sets: - /// changes should be signaled with a delay of N blocks, and then automatically - /// applied in the runtime after those N blocks have passed. - /// - /// The consensus protocol will coordinate the handoff externally. - #[api_version(2)] - pub trait GrandpaApi { - /// Get the current GRANDPA authorities and weights. This should not change except - /// for when changes are scheduled and the corresponding delay has passed. - /// - /// When called at block B, it will return the set of authorities that should be - /// used to finalize descendants of this block (B+1, B+2, ...). The block B itself - /// is finalized by the authorities from block B-1. - fn grandpa_authorities() -> AuthorityList; - } + /// APIs for integrating the GRANDPA finality gadget into runtimes. + /// This should be implemented on the runtime side. + /// + /// This is primarily used for negotiating authority-set changes for the + /// gadget. GRANDPA uses a signaling model of changing authority sets: + /// changes should be signaled with a delay of N blocks, and then automatically + /// applied in the runtime after those N blocks have passed. + /// + /// The consensus protocol will coordinate the handoff externally. + #[api_version(2)] + pub trait GrandpaApi { + /// Get the current GRANDPA authorities and weights. This should not change except + /// for when changes are scheduled and the corresponding delay has passed. + /// + /// When called at block B, it will return the set of authorities that should be + /// used to finalize descendants of this block (B+1, B+2, ...). The block B itself + /// is finalized by the authorities from block B-1. + fn grandpa_authorities() -> AuthorityList; + } } diff --git a/primitives/finality-tracker/src/lib.rs b/primitives/finality-tracker/src/lib.rs index a7157139dc..d94e9442aa 100644 --- a/primitives/finality-tracker/src/lib.rs +++ b/primitives/finality-tracker/src/lib.rs @@ -18,8 +18,8 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_inherents::{InherentIdentifier, InherentData, Error}; use codec::Decode; +use sp_inherents::{Error, InherentData, InherentIdentifier}; #[cfg(feature = "std")] use codec::Encode; @@ -29,48 +29,48 @@ pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"finalnum"; /// Auxiliary trait to extract finalized inherent data. pub trait FinalizedInherentData { - /// Get finalized inherent data. - fn finalized_number(&self) -> Result; + /// Get finalized inherent data. + fn finalized_number(&self) -> Result; } impl FinalizedInherentData for InherentData { - fn finalized_number(&self) -> Result { - self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Finalized number inherent data not found".into())) - } + fn finalized_number(&self) -> Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "Finalized number inherent data not found".into())) + } } /// Provider for inherent data. #[cfg(feature = "std")] pub struct InherentDataProvider { - inner: F, - _marker: std::marker::PhantomData, + inner: F, + _marker: std::marker::PhantomData, } #[cfg(feature = "std")] impl InherentDataProvider { - pub fn new(final_oracle: F) -> Self { - InherentDataProvider { inner: final_oracle, _marker: Default::default() } - } + pub fn new(final_oracle: F) -> Self { + InherentDataProvider { + inner: final_oracle, + _marker: Default::default(), + } + } } #[cfg(feature = "std")] impl sp_inherents::ProvideInherentData for InherentDataProvider - where F: Fn() -> Result +where + F: Fn() -> Result, { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER - } + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } - fn provide_inherent_data( - &self, - inherent_data: &mut InherentData, - ) -> Result<(), Error> { - (self.inner)() - .and_then(|n| inherent_data.put_data(INHERENT_IDENTIFIER, &n)) - } + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error> { + (self.inner)().and_then(|n| inherent_data.put_data(INHERENT_IDENTIFIER, &n)) + } - fn error_to_string(&self, _error: &[u8]) -> Option { - Some(format!("no further information")) - } + fn error_to_string(&self, _error: &[u8]) -> Option { + Some(format!("no further information")) + } } diff --git a/primitives/inherents/src/lib.rs b/primitives/inherents/src/lib.rs index e8df2c49e5..45b7a8dc1c 100644 --- a/primitives/inherents/src/lib.rs +++ b/primitives/inherents/src/lib.rs @@ -33,15 +33,18 @@ #![cfg_attr(not(feature = "std"), no_std)] #![warn(missing_docs)] -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; -use sp_std::{collections::btree_map::{BTreeMap, IntoIter, Entry}, vec::Vec}; +use sp_std::{ + collections::btree_map::{BTreeMap, Entry, IntoIter}, + vec::Vec, +}; #[cfg(feature = "std")] use parking_lot::RwLock; #[cfg(feature = "std")] -use std::{sync::Arc, format}; +use std::{format, sync::Arc}; /// An error that can occur within the inherent data system. #[cfg(feature = "std")] @@ -50,17 +53,17 @@ pub struct Error(String); #[cfg(feature = "std")] impl> From for Error { - fn from(data: T) -> Error { - Self(data.into()) - } + fn from(data: T) -> Error { + Self(data.into()) + } } #[cfg(feature = "std")] impl Error { - /// Convert this error into a `String`. - pub fn into_string(self) -> String { - self.0 - } + /// Convert this error into a `String`. + pub fn into_string(self) -> String { + self.0 + } } /// An error that can occur within the inherent data system. @@ -70,9 +73,9 @@ pub struct Error(&'static str); #[cfg(not(feature = "std"))] impl From<&'static str> for Error { - fn from(data: &'static str) -> Error { - Self(data) - } + fn from(data: &'static str) -> Error { + Self(data) + } } /// An identifier for an inherent. @@ -81,77 +84,68 @@ pub type InherentIdentifier = [u8; 8]; /// Inherent data to include in a block. #[derive(Clone, Default, Encode, Decode)] pub struct InherentData { - /// All inherent data encoded with parity-scale-codec and an identifier. - data: BTreeMap> + /// All inherent data encoded with parity-scale-codec and an identifier. + data: BTreeMap>, } impl InherentData { - /// Create a new instance. - pub fn new() -> Self { - Self::default() - } - - /// Put data for an inherent into the internal storage. - /// - /// # Return - /// - /// Returns `Ok(())` if the data could be inserted and no data for an inherent with the same - /// identifier existed, otherwise an error is returned. - /// - /// Inherent identifiers need to be unique, otherwise decoding of these values will not work! - pub fn put_data( - &mut self, - identifier: InherentIdentifier, - inherent: &I, - ) -> Result<(), Error> { - match self.data.entry(identifier) { - Entry::Vacant(entry) => { - entry.insert(inherent.encode()); - Ok(()) - }, - Entry::Occupied(_) => { - Err("Inherent with same identifier already exists!".into()) - } - } - } - - /// Replace the data for an inherent. - /// - /// If it does not exist, the data is just inserted. - pub fn replace_data( - &mut self, - identifier: InherentIdentifier, - inherent: &I, - ) { - self.data.insert(identifier, inherent.encode()); - } - - /// Returns the data for the requested inherent. - /// - /// # Return - /// - /// - `Ok(Some(I))` if the data could be found and deserialized. - /// - `Ok(None)` if the data could not be found. - /// - `Err(_)` if the data could be found, but deserialization did not work. - pub fn get_data( - &self, - identifier: &InherentIdentifier, - ) -> Result, Error> { - match self.data.get(identifier) { - Some(inherent) => - I::decode(&mut &inherent[..]) - .map_err(|_| { - "Could not decode requested inherent type!".into() - }) - .map(Some), - None => Ok(None) - } - } - - /// Get the number of inherents in this instance - pub fn len(&self) -> usize { - self.data.len() - } + /// Create a new instance. + pub fn new() -> Self { + Self::default() + } + + /// Put data for an inherent into the internal storage. + /// + /// # Return + /// + /// Returns `Ok(())` if the data could be inserted and no data for an inherent with the same + /// identifier existed, otherwise an error is returned. + /// + /// Inherent identifiers need to be unique, otherwise decoding of these values will not work! + pub fn put_data( + &mut self, + identifier: InherentIdentifier, + inherent: &I, + ) -> Result<(), Error> { + match self.data.entry(identifier) { + Entry::Vacant(entry) => { + entry.insert(inherent.encode()); + Ok(()) + } + Entry::Occupied(_) => Err("Inherent with same identifier already exists!".into()), + } + } + + /// Replace the data for an inherent. + /// + /// If it does not exist, the data is just inserted. + pub fn replace_data(&mut self, identifier: InherentIdentifier, inherent: &I) { + self.data.insert(identifier, inherent.encode()); + } + + /// Returns the data for the requested inherent. + /// + /// # Return + /// + /// - `Ok(Some(I))` if the data could be found and deserialized. + /// - `Ok(None)` if the data could not be found. + /// - `Err(_)` if the data could be found, but deserialization did not work. + pub fn get_data( + &self, + identifier: &InherentIdentifier, + ) -> Result, Error> { + match self.data.get(identifier) { + Some(inherent) => I::decode(&mut &inherent[..]) + .map_err(|_| "Could not decode requested inherent type!".into()) + .map(Some), + None => Ok(None), + } + } + + /// Get the number of inherents in this instance + pub fn len(&self) -> usize { + self.data.len() + } } /// The result of checking inherents. @@ -162,209 +156,216 @@ impl InherentData { /// abort checking inherents. #[derive(Encode, Decode, Clone)] pub struct CheckInherentsResult { - /// Did the check succeed? - okay: bool, - /// Did we encounter a fatal error? - fatal_error: bool, - /// We use the `InherentData` to store our errors. - errors: InherentData, + /// Did the check succeed? + okay: bool, + /// Did we encounter a fatal error? + fatal_error: bool, + /// We use the `InherentData` to store our errors. + errors: InherentData, } impl Default for CheckInherentsResult { - fn default() -> Self { - Self { - okay: true, - errors: InherentData::new(), - fatal_error: false, - } - } + fn default() -> Self { + Self { + okay: true, + errors: InherentData::new(), + fatal_error: false, + } + } } impl CheckInherentsResult { - /// Create a new instance. - pub fn new() -> Self { - Self::default() - } - - /// Put an error into the result. - /// - /// This makes this result resolve to `ok() == false`. - /// - /// # Parameters - /// - /// - identifier - The identifier of the inherent that generated the error. - /// - error - The error that will be encoded. - pub fn put_error( - &mut self, - identifier: InherentIdentifier, - error: &E, - ) -> Result<(), Error> { - // Don't accept any other error - if self.fatal_error { - return Err("No other errors are accepted after an hard error!".into()) - } - - if error.is_fatal_error() { - // remove the other errors. - self.errors.data.clear(); - } - - self.errors.put_data(identifier, error)?; - - self.okay = false; - self.fatal_error = error.is_fatal_error(); - Ok(()) - } - - /// Get an error out of the result. - /// - /// # Return - /// - /// - `Ok(Some(I))` if the error could be found and deserialized. - /// - `Ok(None)` if the error could not be found. - /// - `Err(_)` if the error could be found, but deserialization did not work. - pub fn get_error( - &self, - identifier: &InherentIdentifier, - ) -> Result, Error> { - self.errors.get_data(identifier) - } - - /// Convert into an iterator over all contained errors. - pub fn into_errors(self) -> IntoIter> { - self.errors.data.into_iter() - } - - /// Is this result ok? - pub fn ok(&self) -> bool { - self.okay - } - - /// Is this a fatal error? - pub fn fatal_error(&self) -> bool { - self.fatal_error - } + /// Create a new instance. + pub fn new() -> Self { + Self::default() + } + + /// Put an error into the result. + /// + /// This makes this result resolve to `ok() == false`. + /// + /// # Parameters + /// + /// - identifier - The identifier of the inherent that generated the error. + /// - error - The error that will be encoded. + pub fn put_error( + &mut self, + identifier: InherentIdentifier, + error: &E, + ) -> Result<(), Error> { + // Don't accept any other error + if self.fatal_error { + return Err("No other errors are accepted after an hard error!".into()); + } + + if error.is_fatal_error() { + // remove the other errors. + self.errors.data.clear(); + } + + self.errors.put_data(identifier, error)?; + + self.okay = false; + self.fatal_error = error.is_fatal_error(); + Ok(()) + } + + /// Get an error out of the result. + /// + /// # Return + /// + /// - `Ok(Some(I))` if the error could be found and deserialized. + /// - `Ok(None)` if the error could not be found. + /// - `Err(_)` if the error could be found, but deserialization did not work. + pub fn get_error( + &self, + identifier: &InherentIdentifier, + ) -> Result, Error> { + self.errors.get_data(identifier) + } + + /// Convert into an iterator over all contained errors. + pub fn into_errors(self) -> IntoIter> { + self.errors.data.into_iter() + } + + /// Is this result ok? + pub fn ok(&self) -> bool { + self.okay + } + + /// Is this a fatal error? + pub fn fatal_error(&self) -> bool { + self.fatal_error + } } #[cfg(feature = "std")] impl PartialEq for CheckInherentsResult { - fn eq(&self, other: &Self) -> bool { - self.fatal_error == other.fatal_error && - self.okay == other.okay && - self.errors.data == other.errors.data - } + fn eq(&self, other: &Self) -> bool { + self.fatal_error == other.fatal_error + && self.okay == other.okay + && self.errors.data == other.errors.data + } } /// All `InherentData` providers. #[cfg(feature = "std")] #[derive(Clone, Default)] pub struct InherentDataProviders { - providers: Arc>>>, + providers: Arc>>>, } #[cfg(feature = "std")] impl InherentDataProviders { - /// Create a new instance. - pub fn new() -> Self { - Self::default() - } - - /// Register an `InherentData` provider. - /// - /// The registration order is preserved and this order will also be used when creating the - /// inherent data. - /// - /// # Result - /// - /// Will return an error, if a provider with the same identifier already exists. - pub fn register_provider( - &self, - provider: P, - ) -> Result<(), Error> { - if self.has_provider(&provider.inherent_identifier()) { - Err( - format!( - "Inherent data provider with identifier {:?} already exists!", - &provider.inherent_identifier() - ).into() - ) - } else { - provider.on_register(self)?; - self.providers.write().push(Box::new(provider)); - Ok(()) - } - } - - /// Returns if a provider for the given identifier exists. - pub fn has_provider(&self, identifier: &InherentIdentifier) -> bool { - self.providers.read().iter().any(|p| p.inherent_identifier() == identifier) - } - - /// Create inherent data. - pub fn create_inherent_data(&self) -> Result { - let mut data = InherentData::new(); - self.providers.read().iter().try_for_each(|p| { - p.provide_inherent_data(&mut data) - .map_err(|e| format!("Error for `{:?}`: {:?}", p.inherent_identifier(), e)) - })?; - Ok(data) - } - - /// Converts a given encoded error into a `String`. - /// - /// Useful if the implementation encounters an error for an identifier it does not know. - pub fn error_to_string(&self, identifier: &InherentIdentifier, error: &[u8]) -> String { - let res = self.providers.read().iter().filter_map(|p| - if p.inherent_identifier() == identifier { - Some( - p.error_to_string(error) - .unwrap_or_else(|| error_to_string_fallback(identifier)) - ) - } else { - None - } - ).next(); - - match res { - Some(res) => res, - None => format!( - "Error while checking inherent of type \"{}\", but this inherent type is unknown.", - String::from_utf8_lossy(identifier) - ) - } - } + /// Create a new instance. + pub fn new() -> Self { + Self::default() + } + + /// Register an `InherentData` provider. + /// + /// The registration order is preserved and this order will also be used when creating the + /// inherent data. + /// + /// # Result + /// + /// Will return an error, if a provider with the same identifier already exists. + pub fn register_provider( + &self, + provider: P, + ) -> Result<(), Error> { + if self.has_provider(&provider.inherent_identifier()) { + Err(format!( + "Inherent data provider with identifier {:?} already exists!", + &provider.inherent_identifier() + ) + .into()) + } else { + provider.on_register(self)?; + self.providers.write().push(Box::new(provider)); + Ok(()) + } + } + + /// Returns if a provider for the given identifier exists. + pub fn has_provider(&self, identifier: &InherentIdentifier) -> bool { + self.providers + .read() + .iter() + .any(|p| p.inherent_identifier() == identifier) + } + + /// Create inherent data. + pub fn create_inherent_data(&self) -> Result { + let mut data = InherentData::new(); + self.providers.read().iter().try_for_each(|p| { + p.provide_inherent_data(&mut data) + .map_err(|e| format!("Error for `{:?}`: {:?}", p.inherent_identifier(), e)) + })?; + Ok(data) + } + + /// Converts a given encoded error into a `String`. + /// + /// Useful if the implementation encounters an error for an identifier it does not know. + pub fn error_to_string(&self, identifier: &InherentIdentifier, error: &[u8]) -> String { + let res = self + .providers + .read() + .iter() + .filter_map(|p| { + if p.inherent_identifier() == identifier { + Some( + p.error_to_string(error) + .unwrap_or_else(|| error_to_string_fallback(identifier)), + ) + } else { + None + } + }) + .next(); + + match res { + Some(res) => res, + None => format!( + "Error while checking inherent of type \"{}\", but this inherent type is unknown.", + String::from_utf8_lossy(identifier) + ), + } + } } /// Something that provides inherent data. #[cfg(feature = "std")] pub trait ProvideInherentData { - /// Is called when this inherent data provider is registered at the given - /// `InherentDataProviders`. - fn on_register(&self, _: &InherentDataProviders) -> Result<(), Error> { - Ok(()) - } - - /// The identifier of the inherent for that data will be provided. - fn inherent_identifier(&self) -> &'static InherentIdentifier; - - /// Provide inherent data that should be included in a block. - /// - /// The data should be stored in the given `InherentData` structure. - fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error>; - - /// Convert the given encoded error to a string. - /// - /// If the given error could not be decoded, `None` should be returned. - fn error_to_string(&self, error: &[u8]) -> Option; + /// Is called when this inherent data provider is registered at the given + /// `InherentDataProviders`. + fn on_register(&self, _: &InherentDataProviders) -> Result<(), Error> { + Ok(()) + } + + /// The identifier of the inherent for that data will be provided. + fn inherent_identifier(&self) -> &'static InherentIdentifier; + + /// Provide inherent data that should be included in a block. + /// + /// The data should be stored in the given `InherentData` structure. + fn provide_inherent_data(&self, inherent_data: &mut InherentData) -> Result<(), Error>; + + /// Convert the given encoded error to a string. + /// + /// If the given error could not be decoded, `None` should be returned. + fn error_to_string(&self, error: &[u8]) -> Option; } /// A fallback function, if the decoding of an error fails. #[cfg(feature = "std")] fn error_to_string_fallback(identifier: &InherentIdentifier) -> String { - format!( - "Error while checking inherent of type \"{}\", but error could not be decoded.", - String::from_utf8_lossy(identifier) - ) + format!( + "Error while checking inherent of type \"{}\", but error could not be decoded.", + String::from_utf8_lossy(identifier) + ) } /// Did we encounter a fatal error while checking an inherent? @@ -376,8 +377,8 @@ fn error_to_string_fallback(identifier: &InherentIdentifier) -> String { /// correct, but it is required to verify the block at a later time again and then the inherent /// check will succeed. pub trait IsFatalError { - /// Is this a fatal error? - fn is_fatal_error(&self) -> bool; + /// Is this a fatal error? + fn is_fatal_error(&self) -> bool; } /// Auxiliary to make any given error resolve to `is_fatal_error() == true`. @@ -385,203 +386,225 @@ pub trait IsFatalError { pub struct MakeFatalError(E); impl From for MakeFatalError { - fn from(err: E) -> Self { - MakeFatalError(err) - } + fn from(err: E) -> Self { + MakeFatalError(err) + } } impl IsFatalError for MakeFatalError { - fn is_fatal_error(&self) -> bool { - true - } + fn is_fatal_error(&self) -> bool { + true + } } /// A module that provides an inherent and may also verifies it. pub trait ProvideInherent { - /// The call type of the module. - type Call; - /// The error returned by `check_inherent`. - type Error: codec::Encode + IsFatalError; - /// The inherent identifier used by this inherent. - const INHERENT_IDENTIFIER: self::InherentIdentifier; - - /// Create an inherent out of the given `InherentData`. - fn create_inherent(data: &InherentData) -> Option; - - /// Check the given inherent if it is valid. - /// Checking the inherent is optional and can be omitted. - fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { - Ok(()) - } + /// The call type of the module. + type Call; + /// The error returned by `check_inherent`. + type Error: codec::Encode + IsFatalError; + /// The inherent identifier used by this inherent. + const INHERENT_IDENTIFIER: self::InherentIdentifier; + + /// Create an inherent out of the given `InherentData`. + fn create_inherent(data: &InherentData) -> Option; + + /// Check the given inherent if it is valid. + /// Checking the inherent is optional and can be omitted. + fn check_inherent(_: &Self::Call, _: &InherentData) -> Result<(), Self::Error> { + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - use codec::{Encode, Decode}; - - const TEST_INHERENT_0: InherentIdentifier = *b"testinh0"; - const TEST_INHERENT_1: InherentIdentifier = *b"testinh1"; - - #[derive(Encode)] - struct NoFatalError(E); - impl IsFatalError for NoFatalError { - fn is_fatal_error(&self) -> bool { - false - } - } - - #[test] - fn inherent_data_encodes_and_decodes() { - let inherent_0 = vec![1, 2, 3]; - let inherent_1: u32 = 7; - - let mut data = InherentData::new(); - data.put_data(TEST_INHERENT_0, &inherent_0).unwrap(); - data.put_data(TEST_INHERENT_1, &inherent_1).unwrap(); - - let encoded = data.encode(); - - let decoded = InherentData::decode(&mut &encoded[..]).unwrap(); - - assert_eq!(decoded.get_data::>(&TEST_INHERENT_0).unwrap().unwrap(), inherent_0); - assert_eq!(decoded.get_data::(&TEST_INHERENT_1).unwrap().unwrap(), inherent_1); - } - - #[test] - fn adding_same_inherent_returns_an_error() { - let mut data = InherentData::new(); - data.put_data(TEST_INHERENT_0, &8).unwrap(); - assert!(data.put_data(TEST_INHERENT_0, &10).is_err()); - } - - #[derive(Clone)] - struct TestInherentDataProvider { - registered: Arc>, - } - - impl TestInherentDataProvider { - fn new() -> Self { - let inst = Self { - registered: Default::default(), - }; - - // just make sure - assert!(!inst.is_registered()); - - inst - } - - fn is_registered(&self) -> bool { - *self.registered.read() - } - } - - const ERROR_TO_STRING: &str = "Found error!"; - - impl ProvideInherentData for TestInherentDataProvider { - fn on_register(&self, _: &InherentDataProviders) -> Result<(), Error> { - *self.registered.write() = true; - Ok(()) - } - - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &TEST_INHERENT_0 - } - - fn provide_inherent_data(&self, data: &mut InherentData) -> Result<(), Error> { - data.put_data(TEST_INHERENT_0, &42) - } - - fn error_to_string(&self, _: &[u8]) -> Option { - Some(ERROR_TO_STRING.into()) - } - } - - #[test] - fn registering_inherent_provider() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - assert!(providers.has_provider(provider.inherent_identifier())); - - // Second time should fail - assert!(providers.register_provider(provider.clone()).is_err()); - } - - #[test] - fn create_inherent_data_from_all_providers() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - - let inherent_data = providers.create_inherent_data().unwrap(); - - assert_eq!( - inherent_data.get_data::(provider.inherent_identifier()).unwrap().unwrap(), - 42u32 - ); - } - - #[test] - fn encoded_error_to_string() { - let provider = TestInherentDataProvider::new(); - let providers = InherentDataProviders::new(); - - providers.register_provider(provider.clone()).unwrap(); - assert!(provider.is_registered()); - - assert_eq!( - &providers.error_to_string(&TEST_INHERENT_0, &[1, 2]), ERROR_TO_STRING - ); - - assert!( - providers - .error_to_string(&TEST_INHERENT_1, &[1, 2]) - .contains("inherent type is unknown") - ); - } - - #[test] - fn check_inherents_result_encodes_and_decodes() { - let mut result = CheckInherentsResult::new(); - assert!(result.ok()); - - result.put_error(TEST_INHERENT_0, &NoFatalError(2u32)).unwrap(); - assert!(!result.ok()); - assert!(!result.fatal_error()); - - let encoded = result.encode(); - - let decoded = CheckInherentsResult::decode(&mut &encoded[..]).unwrap(); - - assert_eq!(decoded.get_error::(&TEST_INHERENT_0).unwrap().unwrap(), 2); - assert!(!decoded.ok()); - assert!(!decoded.fatal_error()); - } - - #[test] - fn check_inherents_result_removes_other_errors_on_fatal_error() { - let mut result = CheckInherentsResult::new(); - assert!(result.ok()); - - result.put_error(TEST_INHERENT_0, &NoFatalError(2u32)).unwrap(); - assert!(!result.ok()); - assert!(!result.fatal_error()); - - result.put_error(TEST_INHERENT_1, &MakeFatalError(4u32)).unwrap(); - assert!(!result.ok()); - assert!(result.fatal_error()); - - assert!(result.put_error(TEST_INHERENT_0, &NoFatalError(5u32)).is_err()); - - result.into_errors().for_each(|(i, e)| match i { - TEST_INHERENT_1 => assert_eq!(u32::decode(&mut &e[..]).unwrap(), 4), - _ => panic!("There should be no other error!"), - }); - } + use super::*; + use codec::{Decode, Encode}; + + const TEST_INHERENT_0: InherentIdentifier = *b"testinh0"; + const TEST_INHERENT_1: InherentIdentifier = *b"testinh1"; + + #[derive(Encode)] + struct NoFatalError(E); + impl IsFatalError for NoFatalError { + fn is_fatal_error(&self) -> bool { + false + } + } + + #[test] + fn inherent_data_encodes_and_decodes() { + let inherent_0 = vec![1, 2, 3]; + let inherent_1: u32 = 7; + + let mut data = InherentData::new(); + data.put_data(TEST_INHERENT_0, &inherent_0).unwrap(); + data.put_data(TEST_INHERENT_1, &inherent_1).unwrap(); + + let encoded = data.encode(); + + let decoded = InherentData::decode(&mut &encoded[..]).unwrap(); + + assert_eq!( + decoded + .get_data::>(&TEST_INHERENT_0) + .unwrap() + .unwrap(), + inherent_0 + ); + assert_eq!( + decoded.get_data::(&TEST_INHERENT_1).unwrap().unwrap(), + inherent_1 + ); + } + + #[test] + fn adding_same_inherent_returns_an_error() { + let mut data = InherentData::new(); + data.put_data(TEST_INHERENT_0, &8).unwrap(); + assert!(data.put_data(TEST_INHERENT_0, &10).is_err()); + } + + #[derive(Clone)] + struct TestInherentDataProvider { + registered: Arc>, + } + + impl TestInherentDataProvider { + fn new() -> Self { + let inst = Self { + registered: Default::default(), + }; + + // just make sure + assert!(!inst.is_registered()); + + inst + } + + fn is_registered(&self) -> bool { + *self.registered.read() + } + } + + const ERROR_TO_STRING: &str = "Found error!"; + + impl ProvideInherentData for TestInherentDataProvider { + fn on_register(&self, _: &InherentDataProviders) -> Result<(), Error> { + *self.registered.write() = true; + Ok(()) + } + + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &TEST_INHERENT_0 + } + + fn provide_inherent_data(&self, data: &mut InherentData) -> Result<(), Error> { + data.put_data(TEST_INHERENT_0, &42) + } + + fn error_to_string(&self, _: &[u8]) -> Option { + Some(ERROR_TO_STRING.into()) + } + } + + #[test] + fn registering_inherent_provider() { + let provider = TestInherentDataProvider::new(); + let providers = InherentDataProviders::new(); + + providers.register_provider(provider.clone()).unwrap(); + assert!(provider.is_registered()); + assert!(providers.has_provider(provider.inherent_identifier())); + + // Second time should fail + assert!(providers.register_provider(provider.clone()).is_err()); + } + + #[test] + fn create_inherent_data_from_all_providers() { + let provider = TestInherentDataProvider::new(); + let providers = InherentDataProviders::new(); + + providers.register_provider(provider.clone()).unwrap(); + assert!(provider.is_registered()); + + let inherent_data = providers.create_inherent_data().unwrap(); + + assert_eq!( + inherent_data + .get_data::(provider.inherent_identifier()) + .unwrap() + .unwrap(), + 42u32 + ); + } + + #[test] + fn encoded_error_to_string() { + let provider = TestInherentDataProvider::new(); + let providers = InherentDataProviders::new(); + + providers.register_provider(provider.clone()).unwrap(); + assert!(provider.is_registered()); + + assert_eq!( + &providers.error_to_string(&TEST_INHERENT_0, &[1, 2]), + ERROR_TO_STRING + ); + + assert!(providers + .error_to_string(&TEST_INHERENT_1, &[1, 2]) + .contains("inherent type is unknown")); + } + + #[test] + fn check_inherents_result_encodes_and_decodes() { + let mut result = CheckInherentsResult::new(); + assert!(result.ok()); + + result + .put_error(TEST_INHERENT_0, &NoFatalError(2u32)) + .unwrap(); + assert!(!result.ok()); + assert!(!result.fatal_error()); + + let encoded = result.encode(); + + let decoded = CheckInherentsResult::decode(&mut &encoded[..]).unwrap(); + + assert_eq!( + decoded.get_error::(&TEST_INHERENT_0).unwrap().unwrap(), + 2 + ); + assert!(!decoded.ok()); + assert!(!decoded.fatal_error()); + } + + #[test] + fn check_inherents_result_removes_other_errors_on_fatal_error() { + let mut result = CheckInherentsResult::new(); + assert!(result.ok()); + + result + .put_error(TEST_INHERENT_0, &NoFatalError(2u32)) + .unwrap(); + assert!(!result.ok()); + assert!(!result.fatal_error()); + + result + .put_error(TEST_INHERENT_1, &MakeFatalError(4u32)) + .unwrap(); + assert!(!result.ok()); + assert!(result.fatal_error()); + + assert!(result + .put_error(TEST_INHERENT_0, &NoFatalError(5u32)) + .is_err()); + + result.into_errors().for_each(|(i, e)| match i { + TEST_INHERENT_1 => assert_eq!(u32::decode(&mut &e[..]).unwrap(), 4), + _ => panic!("There should be no other error!"), + }); + } } diff --git a/primitives/io/src/batch_verifier.rs b/primitives/io/src/batch_verifier.rs index a23b8fcbc2..ebd79495eb 100644 --- a/primitives/io/src/batch_verifier.rs +++ b/primitives/io/src/batch_verifier.rs @@ -16,15 +16,18 @@ //! Batch/parallel verification. -use sp_core::{ed25519, sr25519, crypto::Pair, traits::CloneableSpawn}; -use std::sync::{Arc, atomic::{AtomicBool, Ordering as AtomicOrdering}}; -use futures::{future::FutureExt, task::FutureObj, channel::oneshot}; +use futures::{channel::oneshot, future::FutureExt, task::FutureObj}; +use sp_core::{crypto::Pair, ed25519, sr25519, traits::CloneableSpawn}; +use std::sync::{ + atomic::{AtomicBool, Ordering as AtomicOrdering}, + Arc, +}; #[derive(Debug, Clone)] struct Sr25519BatchItem { - signature: sr25519::Signature, - pub_key: sr25519::Public, - message: Vec, + signature: sr25519::Signature, + pub_key: sr25519::Public, + message: Vec, } /// Batch verifier. @@ -34,130 +37,172 @@ struct Sr25519BatchItem { /// call `verify_and_clear to get a result. After that, batch verifier is ready for the /// next batching job. pub struct BatchVerifier { - scheduler: Box, - sr25519_items: Vec, - invalid: Arc, - pending_tasks: Vec>, + scheduler: Box, + sr25519_items: Vec, + invalid: Arc, + pending_tasks: Vec>, } impl BatchVerifier { - pub fn new(scheduler: Box) -> Self { - BatchVerifier { - scheduler, - sr25519_items: Default::default(), - invalid: Arc::new(false.into()), - pending_tasks: vec![], - } - } - - fn spawn_verification_task( - &mut self, f: impl FnOnce() -> bool + Send + 'static, - ) -> Result<(), ()> { - // there is already invalid transaction encountered - if self.invalid.load(AtomicOrdering::Relaxed) { return Err(()); } - - let invalid_clone = self.invalid.clone(); - let (sender, receiver) = oneshot::channel(); - self.pending_tasks.push(receiver); - - self.scheduler.spawn_obj(FutureObj::new(async move { - if !f() { - invalid_clone.store(true, AtomicOrdering::Relaxed); - } - if sender.send(()).is_err() { - // sanity - log::warn!("Verification halted while result was pending"); - invalid_clone.store(true, AtomicOrdering::Relaxed); - } - }.boxed())).map_err(drop) - } - - /// Push ed25519 signature to verify. - /// - /// Returns false if some of the pushed signatures before already failed the check - /// (in this case it won't verify anything else) - pub fn push_ed25519( - &mut self, - signature: ed25519::Signature, - pub_key: ed25519::Public, - message: Vec, - ) -> bool { - if self.invalid.load(AtomicOrdering::Relaxed) { return false; } - - if self.spawn_verification_task(move || ed25519::Pair::verify(&signature, &message, &pub_key)).is_err() { - log::debug!( - target: "runtime", - "Batch-verification returns false because failed to spawn background task.", - ); - - return false; - } - true - } - - /// Push sr25519 signature to verify. - /// - /// Returns false if some of the pushed signatures before already failed the check. - /// (in this case it won't verify anything else) - pub fn push_sr25519( - &mut self, - signature: sr25519::Signature, - pub_key: sr25519::Public, - message: Vec, - ) -> bool { - if self.invalid.load(AtomicOrdering::Relaxed) { return false; } - self.sr25519_items.push(Sr25519BatchItem { signature, pub_key, message }); - true - } - - /// Verify all previously pushed signatures since last call and return - /// aggregated result. - #[must_use] - pub fn verify_and_clear(&mut self) -> bool { - use std::sync::{Mutex, Condvar}; - - let pending = std::mem::replace(&mut self.pending_tasks, vec![]); - - log::trace!( - target: "runtime", - "Batch-verification: {} pending tasks, {} sr25519 signatures", - pending.len(), - self.sr25519_items.len(), - ); - - let messages = self.sr25519_items.iter().map(|item| &item.message[..]).collect(); - let signatures = self.sr25519_items.iter().map(|item| &item.signature).collect(); - let pub_keys = self.sr25519_items.iter().map(|item| &item.pub_key).collect(); - - if !sr25519::verify_batch(messages, signatures, pub_keys) { - self.sr25519_items.clear(); - - return false; - } - - self.sr25519_items.clear(); - - if pending.len() > 0 { - let pair = Arc::new((Mutex::new(()), Condvar::new())); - let pair_clone = pair.clone(); - - if self.scheduler.spawn_obj(FutureObj::new(async move { - futures::future::join_all(pending).await; - pair_clone.1.notify_all(); - }.boxed())).is_err() { - log::debug!( - target: "runtime", - "Batch-verification returns false because failed to spawn background task.", - ); - - return false; - } - - let (mtx, cond_var) = &*pair; - let mtx = mtx.lock().expect("Locking can only fail when the mutex is poisoned; qed"); - let _ = cond_var.wait(mtx).expect("Waiting can only fail when the mutex waited on is poisoned; qed"); - } - - !self.invalid.swap(false, AtomicOrdering::Relaxed) - } + pub fn new(scheduler: Box) -> Self { + BatchVerifier { + scheduler, + sr25519_items: Default::default(), + invalid: Arc::new(false.into()), + pending_tasks: vec![], + } + } + + fn spawn_verification_task( + &mut self, + f: impl FnOnce() -> bool + Send + 'static, + ) -> Result<(), ()> { + // there is already invalid transaction encountered + if self.invalid.load(AtomicOrdering::Relaxed) { + return Err(()); + } + + let invalid_clone = self.invalid.clone(); + let (sender, receiver) = oneshot::channel(); + self.pending_tasks.push(receiver); + + self.scheduler + .spawn_obj(FutureObj::new( + async move { + if !f() { + invalid_clone.store(true, AtomicOrdering::Relaxed); + } + if sender.send(()).is_err() { + // sanity + log::warn!("Verification halted while result was pending"); + invalid_clone.store(true, AtomicOrdering::Relaxed); + } + } + .boxed(), + )) + .map_err(drop) + } + + /// Push ed25519 signature to verify. + /// + /// Returns false if some of the pushed signatures before already failed the check + /// (in this case it won't verify anything else) + pub fn push_ed25519( + &mut self, + signature: ed25519::Signature, + pub_key: ed25519::Public, + message: Vec, + ) -> bool { + if self.invalid.load(AtomicOrdering::Relaxed) { + return false; + } + + if self + .spawn_verification_task(move || ed25519::Pair::verify(&signature, &message, &pub_key)) + .is_err() + { + log::debug!( + target: "runtime", + "Batch-verification returns false because failed to spawn background task.", + ); + + return false; + } + true + } + + /// Push sr25519 signature to verify. + /// + /// Returns false if some of the pushed signatures before already failed the check. + /// (in this case it won't verify anything else) + pub fn push_sr25519( + &mut self, + signature: sr25519::Signature, + pub_key: sr25519::Public, + message: Vec, + ) -> bool { + if self.invalid.load(AtomicOrdering::Relaxed) { + return false; + } + self.sr25519_items.push(Sr25519BatchItem { + signature, + pub_key, + message, + }); + true + } + + /// Verify all previously pushed signatures since last call and return + /// aggregated result. + #[must_use] + pub fn verify_and_clear(&mut self) -> bool { + use std::sync::{Condvar, Mutex}; + + let pending = std::mem::replace(&mut self.pending_tasks, vec![]); + + log::trace!( + target: "runtime", + "Batch-verification: {} pending tasks, {} sr25519 signatures", + pending.len(), + self.sr25519_items.len(), + ); + + let messages = self + .sr25519_items + .iter() + .map(|item| &item.message[..]) + .collect(); + let signatures = self + .sr25519_items + .iter() + .map(|item| &item.signature) + .collect(); + let pub_keys = self + .sr25519_items + .iter() + .map(|item| &item.pub_key) + .collect(); + + if !sr25519::verify_batch(messages, signatures, pub_keys) { + self.sr25519_items.clear(); + + return false; + } + + self.sr25519_items.clear(); + + if pending.len() > 0 { + let pair = Arc::new((Mutex::new(()), Condvar::new())); + let pair_clone = pair.clone(); + + if self + .scheduler + .spawn_obj(FutureObj::new( + async move { + futures::future::join_all(pending).await; + pair_clone.1.notify_all(); + } + .boxed(), + )) + .is_err() + { + log::debug!( + target: "runtime", + "Batch-verification returns false because failed to spawn background task.", + ); + + return false; + } + + let (mtx, cond_var) = &*pair; + let mtx = mtx + .lock() + .expect("Locking can only fail when the mutex is poisoned; qed"); + let _ = cond_var + .wait(mtx) + .expect("Waiting can only fail when the mutex waited on is poisoned; qed"); + } + + !self.invalid.swap(false, AtomicOrdering::Relaxed) + } } diff --git a/primitives/io/src/lib.rs b/primitives/io/src/lib.rs index ee146dbc29..e7df231fdf 100644 --- a/primitives/io/src/lib.rs +++ b/primitives/io/src/lib.rs @@ -17,14 +17,16 @@ //! I/O host interface for substrate runtime. #![warn(missing_docs)] - #![cfg_attr(not(feature = "std"), no_std)] #![cfg_attr(not(feature = "std"), feature(alloc_error_handler))] - -#![cfg_attr(feature = "std", - doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), - doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] use sp_std::vec::Vec; @@ -33,29 +35,31 @@ use sp_std::ops::Deref; #[cfg(feature = "std")] use sp_core::{ - crypto::Pair, - traits::{KeystoreExt, CallInWasmExt, TaskExecutorExt}, - offchain::{OffchainExt, TransactionPoolExt}, - hexdisplay::HexDisplay, - storage::{ChildStorageKey, ChildInfo}, + crypto::Pair, + hexdisplay::HexDisplay, + offchain::{OffchainExt, TransactionPoolExt}, + storage::{ChildInfo, ChildStorageKey}, + traits::{CallInWasmExt, KeystoreExt, TaskExecutorExt}, }; use sp_core::{ - crypto::KeyTypeId, ed25519, sr25519, H256, LogLevel, - offchain::{ - Timestamp, HttpRequestId, HttpRequestStatus, HttpError, StorageKind, OpaqueNetworkState, - }, + crypto::KeyTypeId, + ed25519, + offchain::{ + HttpError, HttpRequestId, HttpRequestStatus, OpaqueNetworkState, StorageKind, Timestamp, + }, + sr25519, LogLevel, H256, }; #[cfg(feature = "std")] -use sp_trie::{TrieConfiguration, trie_types::Layout}; +use sp_trie::{trie_types::Layout, TrieConfiguration}; use sp_runtime_interface::{runtime_interface, Pointer}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use sp_externalities::{ExternalitiesExt, Externalities}; +use sp_externalities::{Externalities, ExternalitiesExt}; #[cfg(feature = "std")] mod batch_verifier; @@ -66,12 +70,12 @@ use batch_verifier::BatchVerifier; /// Error verifying ECDSA signature #[derive(Encode, Decode)] pub enum EcdsaVerifyError { - /// Incorrect value of R or S - BadRS, - /// Incorrect value of V - BadV, - /// Invalid signature - BadSignature, + /// Incorrect value of R or S + BadRS, + /// Incorrect value of V + BadV, + /// Invalid signature + BadSignature, } /// Returns a `ChildStorageKey` if the given `storage_key` slice is a valid storage @@ -81,886 +85,894 @@ pub enum EcdsaVerifyError { /// in the case of an invalid child storage key. #[cfg(feature = "std")] fn child_storage_key_or_panic(storage_key: &[u8]) -> ChildStorageKey { - match ChildStorageKey::from_slice(storage_key) { - Some(storage_key) => storage_key, - None => panic!("child storage key is invalid"), - } + match ChildStorageKey::from_slice(storage_key) { + Some(storage_key) => storage_key, + None => panic!("child storage key is invalid"), + } } /// Interface for accessing the storage from within the runtime. #[runtime_interface] pub trait Storage { - /// Returns the data for `key` in the storage or `None` if the key can not be found. - fn get(&self, key: &[u8]) -> Option> { - self.storage(key).map(|s| s.to_vec()) - } - - /// All Child api uses : - /// - A `child_storage_key` to define the anchor point for the child proof - /// (commonly the location where the child root is stored in its parent trie). - /// - A `child_storage_types` to identify the kind of the child type and how its - /// `child definition` parameter is encoded. - /// - A `child_definition_parameter` which is the additional information required - /// to use the child trie. For instance defaults child tries requires this to - /// contain a collision free unique id. - /// - /// This function specifically returns the data for `key` in the child storage or `None` - /// if the key can not be found. - fn child_get( - &self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key).map(|s| s.to_vec()) - } - - /// Get `key` from storage, placing the value into `value_out` and return the number of - /// bytes that the entry in storage has beyond the offset or `None` if the storage entry - /// doesn't exist at all. - /// If `value_out` length is smaller than the returned length, only `value_out` length bytes - /// are copied into `value_out`. - fn read(&self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { - self.storage(key).map(|value| { - let value_offset = value_offset as usize; - let data = &value[value_offset.min(value.len())..]; - let written = std::cmp::min(data.len(), value_out.len()); - value_out[..written].copy_from_slice(&data[..written]); - value.len() as u32 - }) - } - - /// Get `key` from child storage, placing the value into `value_out` and return the number - /// of bytes that the entry in storage has beyond the offset or `None` if the storage entry - /// doesn't exist at all. - /// If `value_out` length is smaller than the returned length, only `value_out` length bytes - /// are copied into `value_out`. - /// - /// See `child_get` for common child api parameters. - fn child_read( - &self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - value_out: &mut [u8], - value_offset: u32, - ) -> Option { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.child_storage(storage_key, child_info, key) - .map(|value| { - let value_offset = value_offset as usize; - let data = &value[value_offset.min(value.len())..]; - let written = std::cmp::min(data.len(), value_out.len()); - value_out[..written].copy_from_slice(&data[..written]); - value.len() as u32 - }) - } - - /// Set `key` to `value` in the storage. - fn set(&mut self, key: &[u8], value: &[u8]) { - self.set_storage(key.to_vec(), value.to_vec()); - } - - /// Set `key` to `value` in the child storage denoted by `child_storage_key`. - /// - /// See `child_get` for common child api parameters. - fn child_set( - &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - value: &[u8], - ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec()); - } - - /// Clear the storage of the given `key` and its value. - fn clear(&mut self, key: &[u8]) { - self.clear_storage(key) - } - - /// Clear the given child storage of the given `key` and its value. - /// - /// See `child_get` for common child api parameters. - fn child_clear( - &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.clear_child_storage(storage_key, child_info, key); - } - - /// Clear an entire child storage. - /// - /// See `child_get` for common child api parameters. - fn child_storage_kill( - &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.kill_child_storage(storage_key, child_info); - } - - /// Check whether the given `key` exists in storage. - fn exists(&self, key: &[u8]) -> bool { - self.exists_storage(key) - } - - /// Check whether the given `key` exists in storage. - /// - /// See `child_get` for common child api parameters. - fn child_exists( - &self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) -> bool { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.exists_child_storage(storage_key, child_info, key) - } - - /// Clear the storage of each key-value pair where the key starts with the given `prefix`. - fn clear_prefix(&mut self, prefix: &[u8]) { - Externalities::clear_prefix(*self, prefix) - } - - /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. - /// - /// See `child_get` for common child api parameters. - fn child_clear_prefix( - &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - prefix: &[u8], - ) { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.clear_child_prefix(storage_key, child_info, prefix); - } - - /// "Commit" all existing operations and compute the resulting storage root. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns the SCALE encoded hash. - fn root(&mut self) -> Vec { - self.storage_root() - } - - /// "Commit" all existing operations and compute the resulting child storage root. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns the SCALE encoded hash. - /// - /// See `child_get` for common child api parameters. - fn child_root( - &mut self, - child_storage_key: &[u8], - ) -> Vec { - let storage_key = child_storage_key_or_panic(child_storage_key); - self.child_storage_root(storage_key) - } - - /// "Commit" all existing operations and get the resulting storage change root. - /// `parent_hash` is a SCALE encoded hash. - /// - /// The hashing algorithm is defined by the `Block`. - /// - /// Returns an `Some(_)` which holds the SCALE encoded hash or `None` when - /// changes trie is disabled. - fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { - self.storage_changes_root(parent_hash) - .expect("Invalid `parent_hash` given to `changes_root`.") - } - - /// Get the next key in storage after the given one in lexicographic order. - fn next_key(&mut self, key: &[u8]) -> Option> { - self.next_storage_key(&key) - } - - /// Get the next key in storage after the given one in lexicographic order in child storage. - fn child_next_key( - &mut self, - child_storage_key: &[u8], - child_definition: &[u8], - child_type: u32, - key: &[u8], - ) -> Option> { - let storage_key = child_storage_key_or_panic(child_storage_key); - let child_info = ChildInfo::resolve_child_info(child_type, child_definition) - .expect("Invalid child definition"); - self.next_child_storage_key(storage_key, child_info, key) - } + /// Returns the data for `key` in the storage or `None` if the key can not be found. + fn get(&self, key: &[u8]) -> Option> { + self.storage(key).map(|s| s.to_vec()) + } + + /// All Child api uses : + /// - A `child_storage_key` to define the anchor point for the child proof + /// (commonly the location where the child root is stored in its parent trie). + /// - A `child_storage_types` to identify the kind of the child type and how its + /// `child definition` parameter is encoded. + /// - A `child_definition_parameter` which is the additional information required + /// to use the child trie. For instance defaults child tries requires this to + /// contain a collision free unique id. + /// + /// This function specifically returns the data for `key` in the child storage or `None` + /// if the key can not be found. + fn child_get( + &self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> Option> { + let storage_key = child_storage_key_or_panic(child_storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(storage_key, child_info, key) + .map(|s| s.to_vec()) + } + + /// Get `key` from storage, placing the value into `value_out` and return the number of + /// bytes that the entry in storage has beyond the offset or `None` if the storage entry + /// doesn't exist at all. + /// If `value_out` length is smaller than the returned length, only `value_out` length bytes + /// are copied into `value_out`. + fn read(&self, key: &[u8], value_out: &mut [u8], value_offset: u32) -> Option { + self.storage(key).map(|value| { + let value_offset = value_offset as usize; + let data = &value[value_offset.min(value.len())..]; + let written = std::cmp::min(data.len(), value_out.len()); + value_out[..written].copy_from_slice(&data[..written]); + value.len() as u32 + }) + } + + /// Get `key` from child storage, placing the value into `value_out` and return the number + /// of bytes that the entry in storage has beyond the offset or `None` if the storage entry + /// doesn't exist at all. + /// If `value_out` length is smaller than the returned length, only `value_out` length bytes + /// are copied into `value_out`. + /// + /// See `child_get` for common child api parameters. + fn child_read( + &self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + value_out: &mut [u8], + value_offset: u32, + ) -> Option { + let storage_key = child_storage_key_or_panic(child_storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.child_storage(storage_key, child_info, key) + .map(|value| { + let value_offset = value_offset as usize; + let data = &value[value_offset.min(value.len())..]; + let written = std::cmp::min(data.len(), value_out.len()); + value_out[..written].copy_from_slice(&data[..written]); + value.len() as u32 + }) + } + + /// Set `key` to `value` in the storage. + fn set(&mut self, key: &[u8], value: &[u8]) { + self.set_storage(key.to_vec(), value.to_vec()); + } + + /// Set `key` to `value` in the child storage denoted by `child_storage_key`. + /// + /// See `child_get` for common child api parameters. + fn child_set( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + value: &[u8], + ) { + let storage_key = child_storage_key_or_panic(child_storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.set_child_storage(storage_key, child_info, key.to_vec(), value.to_vec()); + } + + /// Clear the storage of the given `key` and its value. + fn clear(&mut self, key: &[u8]) { + self.clear_storage(key) + } + + /// Clear the given child storage of the given `key` and its value. + /// + /// See `child_get` for common child api parameters. + fn child_clear( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) { + let storage_key = child_storage_key_or_panic(child_storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_storage(storage_key, child_info, key); + } + + /// Clear an entire child storage. + /// + /// See `child_get` for common child api parameters. + fn child_storage_kill( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + ) { + let storage_key = child_storage_key_or_panic(child_storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.kill_child_storage(storage_key, child_info); + } + + /// Check whether the given `key` exists in storage. + fn exists(&self, key: &[u8]) -> bool { + self.exists_storage(key) + } + + /// Check whether the given `key` exists in storage. + /// + /// See `child_get` for common child api parameters. + fn child_exists( + &self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> bool { + let storage_key = child_storage_key_or_panic(child_storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.exists_child_storage(storage_key, child_info, key) + } + + /// Clear the storage of each key-value pair where the key starts with the given `prefix`. + fn clear_prefix(&mut self, prefix: &[u8]) { + Externalities::clear_prefix(*self, prefix) + } + + /// Clear the child storage of each key-value pair where the key starts with the given `prefix`. + /// + /// See `child_get` for common child api parameters. + fn child_clear_prefix( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + prefix: &[u8], + ) { + let storage_key = child_storage_key_or_panic(child_storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.clear_child_prefix(storage_key, child_info, prefix); + } + + /// "Commit" all existing operations and compute the resulting storage root. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns the SCALE encoded hash. + fn root(&mut self) -> Vec { + self.storage_root() + } + + /// "Commit" all existing operations and compute the resulting child storage root. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns the SCALE encoded hash. + /// + /// See `child_get` for common child api parameters. + fn child_root(&mut self, child_storage_key: &[u8]) -> Vec { + let storage_key = child_storage_key_or_panic(child_storage_key); + self.child_storage_root(storage_key) + } + + /// "Commit" all existing operations and get the resulting storage change root. + /// `parent_hash` is a SCALE encoded hash. + /// + /// The hashing algorithm is defined by the `Block`. + /// + /// Returns an `Some(_)` which holds the SCALE encoded hash or `None` when + /// changes trie is disabled. + fn changes_root(&mut self, parent_hash: &[u8]) -> Option> { + self.storage_changes_root(parent_hash) + .expect("Invalid `parent_hash` given to `changes_root`.") + } + + /// Get the next key in storage after the given one in lexicographic order. + fn next_key(&mut self, key: &[u8]) -> Option> { + self.next_storage_key(&key) + } + + /// Get the next key in storage after the given one in lexicographic order in child storage. + fn child_next_key( + &mut self, + child_storage_key: &[u8], + child_definition: &[u8], + child_type: u32, + key: &[u8], + ) -> Option> { + let storage_key = child_storage_key_or_panic(child_storage_key); + let child_info = ChildInfo::resolve_child_info(child_type, child_definition) + .expect("Invalid child definition"); + self.next_child_storage_key(storage_key, child_info, key) + } } /// Interface that provides trie related functionality. #[runtime_interface] pub trait Trie { - /// A trie root formed from the iterated items. - fn blake2_256_root(input: Vec<(Vec, Vec)>) -> H256 { - Layout::::trie_root(input) - } - - /// A trie root formed from the enumerated items. - fn blake2_256_ordered_root(input: Vec>) -> H256 { - Layout::::ordered_trie_root(input) - } + /// A trie root formed from the iterated items. + fn blake2_256_root(input: Vec<(Vec, Vec)>) -> H256 { + Layout::::trie_root(input) + } + + /// A trie root formed from the enumerated items. + fn blake2_256_ordered_root(input: Vec>) -> H256 { + Layout::::ordered_trie_root(input) + } } /// Interface that provides miscellaneous functions for communicating between the runtime and the node. #[runtime_interface] pub trait Misc { - /// The current relay chain identifier. - fn chain_id(&self) -> u64 { - sp_externalities::Externalities::chain_id(*self) - } - - /// Print a number. - fn print_num(val: u64) { - log::debug!(target: "runtime", "{}", val); - } - - /// Print any valid `utf8` buffer. - fn print_utf8(utf8: &[u8]) { - if let Ok(data) = std::str::from_utf8(utf8) { - log::debug!(target: "runtime", "{}", data) - } - } - - /// Print any `u8` slice as hex. - fn print_hex(data: &[u8]) { - log::debug!(target: "runtime", "{}", HexDisplay::from(&data)); - } - - /// Extract the runtime version of the given wasm blob by calling `Core_version`. - /// - /// Returns the SCALE encoded runtime version and `None` if the call failed. - /// - /// # Performance - /// - /// Calling this function is very expensive and should only be done very occasionally. - /// For getting the runtime version, it requires instantiating the wasm blob and calling a - /// function in this blob. - fn runtime_version(&mut self, wasm: &[u8]) -> Option> { - // Create some dummy externalities, `Core_version` should not write data anyway. - let mut ext = sp_state_machine::BasicExternalities::default(); - - self.extension::() - .expect("No `CallInWasmExt` associated for the current context!") - .call_in_wasm(wasm, None, "Core_version", &[], &mut ext) - .ok() - } + /// The current relay chain identifier. + fn chain_id(&self) -> u64 { + sp_externalities::Externalities::chain_id(*self) + } + + /// Print a number. + fn print_num(val: u64) { + log::debug!(target: "runtime", "{}", val); + } + + /// Print any valid `utf8` buffer. + fn print_utf8(utf8: &[u8]) { + if let Ok(data) = std::str::from_utf8(utf8) { + log::debug!(target: "runtime", "{}", data) + } + } + + /// Print any `u8` slice as hex. + fn print_hex(data: &[u8]) { + log::debug!(target: "runtime", "{}", HexDisplay::from(&data)); + } + + /// Extract the runtime version of the given wasm blob by calling `Core_version`. + /// + /// Returns the SCALE encoded runtime version and `None` if the call failed. + /// + /// # Performance + /// + /// Calling this function is very expensive and should only be done very occasionally. + /// For getting the runtime version, it requires instantiating the wasm blob and calling a + /// function in this blob. + fn runtime_version(&mut self, wasm: &[u8]) -> Option> { + // Create some dummy externalities, `Core_version` should not write data anyway. + let mut ext = sp_state_machine::BasicExternalities::default(); + + self.extension::() + .expect("No `CallInWasmExt` associated for the current context!") + .call_in_wasm(wasm, None, "Core_version", &[], &mut ext) + .ok() + } } /// Interfaces for working with crypto related types from within the runtime. #[runtime_interface] pub trait Crypto { - /// Returns all `ed25519` public keys for the given key id from the keystore. - fn ed25519_public_keys(&mut self, id: KeyTypeId) -> Vec { - self.extension::() - .expect("No `keystore` associated for the current context!") - .read() - .ed25519_public_keys(id) - } - - /// Generate an `ed22519` key for the given key type using an optional `seed` and - /// store it in the keystore. - /// - /// The `seed` needs to be a valid utf8. - /// - /// Returns the public key. - fn ed25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> ed25519::Public { - let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); - self.extension::() - .expect("No `keystore` associated for the current context!") - .write() - .ed25519_generate_new(id, seed) - .expect("`ed25519_generate` failed") - } - - /// Sign the given `msg` with the `ed25519` key that corresponds to the given public key and - /// key type in the keystore. - /// - /// Returns the signature. - fn ed25519_sign( - &mut self, - id: KeyTypeId, - pub_key: &ed25519::Public, - msg: &[u8], - ) -> Option { - self.extension::() - .expect("No `keystore` associated for the current context!") - .read() - .sign_with(id, &pub_key.into(), msg) - .map(|sig| ed25519::Signature::from_slice(sig.as_slice())) - .ok() - } - - /// Verify `ed25519` signature. - /// - /// Returns `true` when the verification is either successful or batched. - /// If no batching verification extension registered, this will return the result - /// of verification immediately. If batching verification extension is registered - /// caller should call `crypto::finish_batch_verify` to actualy check all submitted - /// signatures. - fn ed25519_verify( - sig: &ed25519::Signature, - msg: &[u8], - pub_key: &ed25519::Public, - ) -> bool { - // TODO: see #5554, this is used outside of externalities context/runtime, thus this manual - // `with_externalities`. - // - // This `with_externalities(..)` block returns Some(Some(result)) if signature verification was successfully - // batched, everything else (Some(None)/None) means it was not batched and needs to be verified. - let evaluated = sp_externalities::with_externalities(|mut instance| - instance.extension::().map( - |extension| extension.push_ed25519( - sig.clone(), - pub_key.clone(), - msg.to_vec(), - ) - ) - ); - - match evaluated { - Some(Some(val)) => val, - _ => ed25519::Pair::verify(sig, msg, pub_key), - } - } - - /// Verify `sr25519` signature. - /// - /// Returns `true` when the verification is either successful or batched. - /// If no batching verification extension registered, this will return the result - /// of verification immediately. If batching verification extension is registered, - /// caller should call `crypto::finish_batch_verify` to actualy check all submitted - #[version(2)] - fn sr25519_verify( - sig: &sr25519::Signature, - msg: &[u8], - pub_key: &sr25519::Public, - ) -> bool { - // TODO: see #5554, this is used outside of externalities context/runtime, thus this manual - // `with_externalities`. - // - // This `with_externalities(..)` block returns Some(Some(result)) if signature verification was successfully - // batched, everything else (Some(None)/None) means it was not batched and needs to be verified. - let evaluated = sp_externalities::with_externalities(|mut instance| - instance.extension::().map( - |extension| extension.push_sr25519( - sig.clone(), - pub_key.clone(), - msg.to_vec(), - ) - ) - ); - - match evaluated { - Some(Some(val)) => val, - _ => sr25519::Pair::verify(sig, msg, pub_key), - } - } - - /// Start verification extension. - fn start_batch_verify(&mut self) { - let scheduler = self.extension::() - .expect("No task executor associated with the current context!") - .0 - .clone(); - - self.register_extension(VerificationExt(BatchVerifier::new(scheduler))) - .expect("Failed to register required extension: `VerificationExt`"); - } - - /// Finish batch-verification of signatures. - /// - /// Verify or wait for verification to finish for all signatures which were previously - /// deferred by `sr25519_verify`/`ed25519_verify`. - /// - /// Will panic if no `VerificationExt` is registered (`start_batch_verify` was not called). - fn finish_batch_verify(&mut self) -> bool { - let result = self.extension::() - .expect("`finish_batch_verify` should only be called after `start_batch_verify`") - .verify_and_clear(); - - self.deregister_extension::() - .expect("No verification extension in current context!"); - - result - } - - /// Returns all `sr25519` public keys for the given key id from the keystore. - fn sr25519_public_keys(&mut self, id: KeyTypeId) -> Vec { - self.extension::() - .expect("No `keystore` associated for the current context!") - .read() - .sr25519_public_keys(id) - } - - /// Generate an `sr22519` key for the given key type using an optional seed and - /// store it in the keystore. - /// - /// The `seed` needs to be a valid utf8. - /// - /// Returns the public key. - fn sr25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> sr25519::Public { - let seed = seed.as_ref().map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); - self.extension::() - .expect("No `keystore` associated for the current context!") - .write() - .sr25519_generate_new(id, seed) - .expect("`sr25519_generate` failed") - } - - /// Sign the given `msg` with the `sr25519` key that corresponds to the given public key and - /// key type in the keystore. - /// - /// Returns the signature. - fn sr25519_sign( - &mut self, - id: KeyTypeId, - pub_key: &sr25519::Public, - msg: &[u8], - ) -> Option { - self.extension::() - .expect("No `keystore` associated for the current context!") - .read() - .sign_with(id, &pub_key.into(), msg) - .map(|sig| sr25519::Signature::from_slice(sig.as_slice())) - .ok() - } - - /// Verify an `sr25519` signature. - /// - /// Returns `true` when the verification in successful regardless of - /// signature version. - fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pubkey: &sr25519::Public) -> bool { - sr25519::Pair::verify_deprecated(sig, msg, pubkey) - } - - /// Verify and recover a SECP256k1 ECDSA signature. - /// - /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. - /// - `msg` is the blake2-256 hash of the message. - /// - /// Returns `Err` if the signature is bad, otherwise the 64-byte pubkey - /// (doesn't include the 0x04 prefix). - fn secp256k1_ecdsa_recover( - sig: &[u8; 65], - msg: &[u8; 32], - ) -> Result<[u8; 64], EcdsaVerifyError> { - let rs = secp256k1::Signature::parse_slice(&sig[0..64]) - .map_err(|_| EcdsaVerifyError::BadRS)?; - let v = secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) - .map_err(|_| EcdsaVerifyError::BadV)?; - let pubkey = secp256k1::recover(&secp256k1::Message::parse(msg), &rs, &v) - .map_err(|_| EcdsaVerifyError::BadSignature)?; - let mut res = [0u8; 64]; - res.copy_from_slice(&pubkey.serialize()[1..65]); - Ok(res) - } - - /// Verify and recover a SECP256k1 ECDSA signature. - /// - /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. - /// - `msg` is the blake2-256 hash of the message. - /// - /// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey. - fn secp256k1_ecdsa_recover_compressed( - sig: &[u8; 65], - msg: &[u8; 32], - ) -> Result<[u8; 33], EcdsaVerifyError> { - let rs = secp256k1::Signature::parse_slice(&sig[0..64]) - .map_err(|_| EcdsaVerifyError::BadRS)?; - let v = secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) - .map_err(|_| EcdsaVerifyError::BadV)?; - let pubkey = secp256k1::recover(&secp256k1::Message::parse(msg), &rs, &v) - .map_err(|_| EcdsaVerifyError::BadSignature)?; - Ok(pubkey.serialize_compressed()) - } + /// Returns all `ed25519` public keys for the given key id from the keystore. + fn ed25519_public_keys(&mut self, id: KeyTypeId) -> Vec { + self.extension::() + .expect("No `keystore` associated for the current context!") + .read() + .ed25519_public_keys(id) + } + + /// Generate an `ed22519` key for the given key type using an optional `seed` and + /// store it in the keystore. + /// + /// The `seed` needs to be a valid utf8. + /// + /// Returns the public key. + fn ed25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> ed25519::Public { + let seed = seed + .as_ref() + .map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); + self.extension::() + .expect("No `keystore` associated for the current context!") + .write() + .ed25519_generate_new(id, seed) + .expect("`ed25519_generate` failed") + } + + /// Sign the given `msg` with the `ed25519` key that corresponds to the given public key and + /// key type in the keystore. + /// + /// Returns the signature. + fn ed25519_sign( + &mut self, + id: KeyTypeId, + pub_key: &ed25519::Public, + msg: &[u8], + ) -> Option { + self.extension::() + .expect("No `keystore` associated for the current context!") + .read() + .sign_with(id, &pub_key.into(), msg) + .map(|sig| ed25519::Signature::from_slice(sig.as_slice())) + .ok() + } + + /// Verify `ed25519` signature. + /// + /// Returns `true` when the verification is either successful or batched. + /// If no batching verification extension registered, this will return the result + /// of verification immediately. If batching verification extension is registered + /// caller should call `crypto::finish_batch_verify` to actualy check all submitted + /// signatures. + fn ed25519_verify(sig: &ed25519::Signature, msg: &[u8], pub_key: &ed25519::Public) -> bool { + // TODO: see #5554, this is used outside of externalities context/runtime, thus this manual + // `with_externalities`. + // + // This `with_externalities(..)` block returns Some(Some(result)) if signature verification was successfully + // batched, everything else (Some(None)/None) means it was not batched and needs to be verified. + let evaluated = sp_externalities::with_externalities(|mut instance| { + instance + .extension::() + .map(|extension| extension.push_ed25519(sig.clone(), pub_key.clone(), msg.to_vec())) + }); + + match evaluated { + Some(Some(val)) => val, + _ => ed25519::Pair::verify(sig, msg, pub_key), + } + } + + /// Verify `sr25519` signature. + /// + /// Returns `true` when the verification is either successful or batched. + /// If no batching verification extension registered, this will return the result + /// of verification immediately. If batching verification extension is registered, + /// caller should call `crypto::finish_batch_verify` to actualy check all submitted + #[version(2)] + fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pub_key: &sr25519::Public) -> bool { + // TODO: see #5554, this is used outside of externalities context/runtime, thus this manual + // `with_externalities`. + // + // This `with_externalities(..)` block returns Some(Some(result)) if signature verification was successfully + // batched, everything else (Some(None)/None) means it was not batched and needs to be verified. + let evaluated = sp_externalities::with_externalities(|mut instance| { + instance + .extension::() + .map(|extension| extension.push_sr25519(sig.clone(), pub_key.clone(), msg.to_vec())) + }); + + match evaluated { + Some(Some(val)) => val, + _ => sr25519::Pair::verify(sig, msg, pub_key), + } + } + + /// Start verification extension. + fn start_batch_verify(&mut self) { + let scheduler = self + .extension::() + .expect("No task executor associated with the current context!") + .0 + .clone(); + + self.register_extension(VerificationExt(BatchVerifier::new(scheduler))) + .expect("Failed to register required extension: `VerificationExt`"); + } + + /// Finish batch-verification of signatures. + /// + /// Verify or wait for verification to finish for all signatures which were previously + /// deferred by `sr25519_verify`/`ed25519_verify`. + /// + /// Will panic if no `VerificationExt` is registered (`start_batch_verify` was not called). + fn finish_batch_verify(&mut self) -> bool { + let result = self + .extension::() + .expect("`finish_batch_verify` should only be called after `start_batch_verify`") + .verify_and_clear(); + + self.deregister_extension::() + .expect("No verification extension in current context!"); + + result + } + + /// Returns all `sr25519` public keys for the given key id from the keystore. + fn sr25519_public_keys(&mut self, id: KeyTypeId) -> Vec { + self.extension::() + .expect("No `keystore` associated for the current context!") + .read() + .sr25519_public_keys(id) + } + + /// Generate an `sr22519` key for the given key type using an optional seed and + /// store it in the keystore. + /// + /// The `seed` needs to be a valid utf8. + /// + /// Returns the public key. + fn sr25519_generate(&mut self, id: KeyTypeId, seed: Option>) -> sr25519::Public { + let seed = seed + .as_ref() + .map(|s| std::str::from_utf8(&s).expect("Seed is valid utf8!")); + self.extension::() + .expect("No `keystore` associated for the current context!") + .write() + .sr25519_generate_new(id, seed) + .expect("`sr25519_generate` failed") + } + + /// Sign the given `msg` with the `sr25519` key that corresponds to the given public key and + /// key type in the keystore. + /// + /// Returns the signature. + fn sr25519_sign( + &mut self, + id: KeyTypeId, + pub_key: &sr25519::Public, + msg: &[u8], + ) -> Option { + self.extension::() + .expect("No `keystore` associated for the current context!") + .read() + .sign_with(id, &pub_key.into(), msg) + .map(|sig| sr25519::Signature::from_slice(sig.as_slice())) + .ok() + } + + /// Verify an `sr25519` signature. + /// + /// Returns `true` when the verification in successful regardless of + /// signature version. + fn sr25519_verify(sig: &sr25519::Signature, msg: &[u8], pubkey: &sr25519::Public) -> bool { + sr25519::Pair::verify_deprecated(sig, msg, pubkey) + } + + /// Verify and recover a SECP256k1 ECDSA signature. + /// + /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. + /// - `msg` is the blake2-256 hash of the message. + /// + /// Returns `Err` if the signature is bad, otherwise the 64-byte pubkey + /// (doesn't include the 0x04 prefix). + fn secp256k1_ecdsa_recover( + sig: &[u8; 65], + msg: &[u8; 32], + ) -> Result<[u8; 64], EcdsaVerifyError> { + let rs = + secp256k1::Signature::parse_slice(&sig[0..64]).map_err(|_| EcdsaVerifyError::BadRS)?; + let v = + secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) + .map_err(|_| EcdsaVerifyError::BadV)?; + let pubkey = secp256k1::recover(&secp256k1::Message::parse(msg), &rs, &v) + .map_err(|_| EcdsaVerifyError::BadSignature)?; + let mut res = [0u8; 64]; + res.copy_from_slice(&pubkey.serialize()[1..65]); + Ok(res) + } + + /// Verify and recover a SECP256k1 ECDSA signature. + /// + /// - `sig` is passed in RSV format. V should be either `0/1` or `27/28`. + /// - `msg` is the blake2-256 hash of the message. + /// + /// Returns `Err` if the signature is bad, otherwise the 33-byte compressed pubkey. + fn secp256k1_ecdsa_recover_compressed( + sig: &[u8; 65], + msg: &[u8; 32], + ) -> Result<[u8; 33], EcdsaVerifyError> { + let rs = + secp256k1::Signature::parse_slice(&sig[0..64]).map_err(|_| EcdsaVerifyError::BadRS)?; + let v = + secp256k1::RecoveryId::parse(if sig[64] > 26 { sig[64] - 27 } else { sig[64] } as u8) + .map_err(|_| EcdsaVerifyError::BadV)?; + let pubkey = secp256k1::recover(&secp256k1::Message::parse(msg), &rs, &v) + .map_err(|_| EcdsaVerifyError::BadSignature)?; + Ok(pubkey.serialize_compressed()) + } } /// Interface that provides functions for hashing with different algorithms. #[runtime_interface] pub trait Hashing { - /// Conduct a 256-bit Keccak hash. - fn keccak_256(data: &[u8]) -> [u8; 32] { - sp_core::hashing::keccak_256(data) - } - - /// Conduct a 256-bit Sha2 hash. - fn sha2_256(data: &[u8]) -> [u8; 32] { - sp_core::hashing::sha2_256(data) - } - - /// Conduct a 128-bit Blake2 hash. - fn blake2_128(data: &[u8]) -> [u8; 16] { - sp_core::hashing::blake2_128(data) - } - - /// Conduct a 256-bit Blake2 hash. - fn blake2_256(data: &[u8]) -> [u8; 32] { - sp_core::hashing::blake2_256(data) - } - - /// Conduct four XX hashes to give a 256-bit result. - fn twox_256(data: &[u8]) -> [u8; 32] { - sp_core::hashing::twox_256(data) - } - - /// Conduct two XX hashes to give a 128-bit result. - fn twox_128(data: &[u8]) -> [u8; 16] { - sp_core::hashing::twox_128(data) - } - - /// Conduct two XX hashes to give a 64-bit result. - fn twox_64(data: &[u8]) -> [u8; 8] { - sp_core::hashing::twox_64(data) - } + /// Conduct a 256-bit Keccak hash. + fn keccak_256(data: &[u8]) -> [u8; 32] { + sp_core::hashing::keccak_256(data) + } + + /// Conduct a 256-bit Sha2 hash. + fn sha2_256(data: &[u8]) -> [u8; 32] { + sp_core::hashing::sha2_256(data) + } + + /// Conduct a 128-bit Blake2 hash. + fn blake2_128(data: &[u8]) -> [u8; 16] { + sp_core::hashing::blake2_128(data) + } + + /// Conduct a 256-bit Blake2 hash. + fn blake2_256(data: &[u8]) -> [u8; 32] { + sp_core::hashing::blake2_256(data) + } + + /// Conduct four XX hashes to give a 256-bit result. + fn twox_256(data: &[u8]) -> [u8; 32] { + sp_core::hashing::twox_256(data) + } + + /// Conduct two XX hashes to give a 128-bit result. + fn twox_128(data: &[u8]) -> [u8; 16] { + sp_core::hashing::twox_128(data) + } + + /// Conduct two XX hashes to give a 64-bit result. + fn twox_64(data: &[u8]) -> [u8; 8] { + sp_core::hashing::twox_64(data) + } } #[cfg(feature = "std")] sp_externalities::decl_extension! { - /// The keystore extension to register/retrieve from the externalities. - pub struct VerificationExt(BatchVerifier); + /// The keystore extension to register/retrieve from the externalities. + pub struct VerificationExt(BatchVerifier); } /// Interface that provides functions to access the offchain functionality. #[runtime_interface] pub trait Offchain { - /// Returns if the local node is a potential validator. - /// - /// Even if this function returns `true`, it does not mean that any keys are configured - /// and that the validator is registered in the chain. - fn is_validator(&mut self) -> bool { - self.extension::() - .expect("is_validator can be called only in the offchain worker context") - .is_validator() - } - - /// Submit an encoded transaction to the pool. - /// - /// The transaction will end up in the pool. - fn submit_transaction(&mut self, data: Vec) -> Result<(), ()> { - self.extension::() - .expect("submit_transaction can be called only in the offchain call context with - TransactionPool capabilities enabled") - .submit_transaction(data) - } - - /// Returns information about the local node's network state. - fn network_state(&mut self) -> Result { - self.extension::() - .expect("network_state can be called only in the offchain worker context") - .network_state() - } - - /// Returns current UNIX timestamp (in millis) - fn timestamp(&mut self) -> Timestamp { - self.extension::() - .expect("timestamp can be called only in the offchain worker context") - .timestamp() - } - - /// Pause the execution until `deadline` is reached. - fn sleep_until(&mut self, deadline: Timestamp) { - self.extension::() - .expect("sleep_until can be called only in the offchain worker context") - .sleep_until(deadline) - } - - /// Returns a random seed. - /// - /// This is a truly random, non-deterministic seed generated by host environment. - /// Obviously fine in the off-chain worker context. - fn random_seed(&mut self) -> [u8; 32] { - self.extension::() - .expect("random_seed can be called only in the offchain worker context") - .random_seed() - } - - /// Sets a value in the local storage. - /// - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It IS persisted between runs. - fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { - self.extension::() - .expect("local_storage_set can be called only in the offchain worker context") - .local_storage_set(kind, key, value) - } - - /// Sets a value in the local storage if it matches current value. - /// - /// Since multiple offchain workers may be running concurrently, to prevent - /// data races use CAS to coordinate between them. - /// - /// Returns `true` if the value has been set, `false` otherwise. - /// - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It IS persisted between runs. - fn local_storage_compare_and_set( - &mut self, - kind: StorageKind, - key: &[u8], - old_value: Option>, - new_value: &[u8], - ) -> bool { - self.extension::() - .expect("local_storage_compare_and_set can be called only in the offchain worker context") - .local_storage_compare_and_set(kind, key, old_value.as_ref().map(|v| v.deref()), new_value) - } - - /// Gets a value from the local storage. - /// - /// If the value does not exist in the storage `None` will be returned. - /// Note this storage is not part of the consensus, it's only accessible by - /// offchain worker tasks running on the same machine. It IS persisted between runs. - fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { - self.extension::() - .expect("local_storage_get can be called only in the offchain worker context") - .local_storage_get(kind, key) - } - - /// Initiates a http request given HTTP verb and the URL. - /// - /// Meta is a future-reserved field containing additional, parity-scale-codec encoded parameters. - /// Returns the id of newly started request. - fn http_request_start( - &mut self, - method: &str, - uri: &str, - meta: &[u8], - ) -> Result { - self.extension::() - .expect("http_request_start can be called only in the offchain worker context") - .http_request_start(method, uri, meta) - } - - /// Append header to the request. - fn http_request_add_header( - &mut self, - request_id: HttpRequestId, - name: &str, - value: &str, - ) -> Result<(), ()> { - self.extension::() - .expect("http_request_add_header can be called only in the offchain worker context") - .http_request_add_header(request_id, name, value) - } - - /// Write a chunk of request body. - /// - /// Writing an empty chunks finalizes the request. - /// Passing `None` as deadline blocks forever. - /// - /// Returns an error in case deadline is reached or the chunk couldn't be written. - fn http_request_write_body( - &mut self, - request_id: HttpRequestId, - chunk: &[u8], - deadline: Option, - ) -> Result<(), HttpError> { - self.extension::() - .expect("http_request_write_body can be called only in the offchain worker context") - .http_request_write_body(request_id, chunk, deadline) - } - - /// Block and wait for the responses for given requests. - /// - /// Returns a vector of request statuses (the len is the same as ids). - /// Note that if deadline is not provided the method will block indefinitely, - /// otherwise unready responses will produce `DeadlineReached` status. - /// - /// Passing `None` as deadline blocks forever. - fn http_response_wait( - &mut self, - ids: &[HttpRequestId], - deadline: Option, - ) -> Vec { - self.extension::() - .expect("http_response_wait can be called only in the offchain worker context") - .http_response_wait(ids, deadline) - } - - /// Read all response headers. - /// - /// Returns a vector of pairs `(HeaderKey, HeaderValue)`. - /// NOTE response headers have to be read before response body. - fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { - self.extension::() - .expect("http_response_headers can be called only in the offchain worker context") - .http_response_headers(request_id) - } - - /// Read a chunk of body response to given buffer. - /// - /// Returns the number of bytes written or an error in case a deadline - /// is reached or server closed the connection. - /// If `0` is returned it means that the response has been fully consumed - /// and the `request_id` is now invalid. - /// NOTE this implies that response headers must be read before draining the body. - /// Passing `None` as a deadline blocks forever. - fn http_response_read_body( - &mut self, - request_id: HttpRequestId, - buffer: &mut [u8], - deadline: Option, - ) -> Result { - self.extension::() - .expect("http_response_read_body can be called only in the offchain worker context") - .http_response_read_body(request_id, buffer, deadline) - .map(|r| r as u32) - } + /// Returns if the local node is a potential validator. + /// + /// Even if this function returns `true`, it does not mean that any keys are configured + /// and that the validator is registered in the chain. + fn is_validator(&mut self) -> bool { + self.extension::() + .expect("is_validator can be called only in the offchain worker context") + .is_validator() + } + + /// Submit an encoded transaction to the pool. + /// + /// The transaction will end up in the pool. + fn submit_transaction(&mut self, data: Vec) -> Result<(), ()> { + self.extension::() + .expect( + "submit_transaction can be called only in the offchain call context with + TransactionPool capabilities enabled", + ) + .submit_transaction(data) + } + + /// Returns information about the local node's network state. + fn network_state(&mut self) -> Result { + self.extension::() + .expect("network_state can be called only in the offchain worker context") + .network_state() + } + + /// Returns current UNIX timestamp (in millis) + fn timestamp(&mut self) -> Timestamp { + self.extension::() + .expect("timestamp can be called only in the offchain worker context") + .timestamp() + } + + /// Pause the execution until `deadline` is reached. + fn sleep_until(&mut self, deadline: Timestamp) { + self.extension::() + .expect("sleep_until can be called only in the offchain worker context") + .sleep_until(deadline) + } + + /// Returns a random seed. + /// + /// This is a truly random, non-deterministic seed generated by host environment. + /// Obviously fine in the off-chain worker context. + fn random_seed(&mut self) -> [u8; 32] { + self.extension::() + .expect("random_seed can be called only in the offchain worker context") + .random_seed() + } + + /// Sets a value in the local storage. + /// + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It IS persisted between runs. + fn local_storage_set(&mut self, kind: StorageKind, key: &[u8], value: &[u8]) { + self.extension::() + .expect("local_storage_set can be called only in the offchain worker context") + .local_storage_set(kind, key, value) + } + + /// Sets a value in the local storage if it matches current value. + /// + /// Since multiple offchain workers may be running concurrently, to prevent + /// data races use CAS to coordinate between them. + /// + /// Returns `true` if the value has been set, `false` otherwise. + /// + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It IS persisted between runs. + fn local_storage_compare_and_set( + &mut self, + kind: StorageKind, + key: &[u8], + old_value: Option>, + new_value: &[u8], + ) -> bool { + self.extension::() + .expect( + "local_storage_compare_and_set can be called only in the offchain worker context", + ) + .local_storage_compare_and_set( + kind, + key, + old_value.as_ref().map(|v| v.deref()), + new_value, + ) + } + + /// Gets a value from the local storage. + /// + /// If the value does not exist in the storage `None` will be returned. + /// Note this storage is not part of the consensus, it's only accessible by + /// offchain worker tasks running on the same machine. It IS persisted between runs. + fn local_storage_get(&mut self, kind: StorageKind, key: &[u8]) -> Option> { + self.extension::() + .expect("local_storage_get can be called only in the offchain worker context") + .local_storage_get(kind, key) + } + + /// Initiates a http request given HTTP verb and the URL. + /// + /// Meta is a future-reserved field containing additional, parity-scale-codec encoded parameters. + /// Returns the id of newly started request. + fn http_request_start( + &mut self, + method: &str, + uri: &str, + meta: &[u8], + ) -> Result { + self.extension::() + .expect("http_request_start can be called only in the offchain worker context") + .http_request_start(method, uri, meta) + } + + /// Append header to the request. + fn http_request_add_header( + &mut self, + request_id: HttpRequestId, + name: &str, + value: &str, + ) -> Result<(), ()> { + self.extension::() + .expect("http_request_add_header can be called only in the offchain worker context") + .http_request_add_header(request_id, name, value) + } + + /// Write a chunk of request body. + /// + /// Writing an empty chunks finalizes the request. + /// Passing `None` as deadline blocks forever. + /// + /// Returns an error in case deadline is reached or the chunk couldn't be written. + fn http_request_write_body( + &mut self, + request_id: HttpRequestId, + chunk: &[u8], + deadline: Option, + ) -> Result<(), HttpError> { + self.extension::() + .expect("http_request_write_body can be called only in the offchain worker context") + .http_request_write_body(request_id, chunk, deadline) + } + + /// Block and wait for the responses for given requests. + /// + /// Returns a vector of request statuses (the len is the same as ids). + /// Note that if deadline is not provided the method will block indefinitely, + /// otherwise unready responses will produce `DeadlineReached` status. + /// + /// Passing `None` as deadline blocks forever. + fn http_response_wait( + &mut self, + ids: &[HttpRequestId], + deadline: Option, + ) -> Vec { + self.extension::() + .expect("http_response_wait can be called only in the offchain worker context") + .http_response_wait(ids, deadline) + } + + /// Read all response headers. + /// + /// Returns a vector of pairs `(HeaderKey, HeaderValue)`. + /// NOTE response headers have to be read before response body. + fn http_response_headers(&mut self, request_id: HttpRequestId) -> Vec<(Vec, Vec)> { + self.extension::() + .expect("http_response_headers can be called only in the offchain worker context") + .http_response_headers(request_id) + } + + /// Read a chunk of body response to given buffer. + /// + /// Returns the number of bytes written or an error in case a deadline + /// is reached or server closed the connection. + /// If `0` is returned it means that the response has been fully consumed + /// and the `request_id` is now invalid. + /// NOTE this implies that response headers must be read before draining the body. + /// Passing `None` as a deadline blocks forever. + fn http_response_read_body( + &mut self, + request_id: HttpRequestId, + buffer: &mut [u8], + deadline: Option, + ) -> Result { + self.extension::() + .expect("http_response_read_body can be called only in the offchain worker context") + .http_response_read_body(request_id, buffer, deadline) + .map(|r| r as u32) + } } /// Wasm only interface that provides functions for calling into the allocator. #[runtime_interface(wasm_only)] trait Allocator { - /// Malloc the given number of bytes and return the pointer to the allocated memory location. - fn malloc(&mut self, size: u32) -> Pointer { - self.allocate_memory(size).expect("Failed to allocate memory") - } - - /// Free the given pointer. - fn free(&mut self, ptr: Pointer) { - self.deallocate_memory(ptr).expect("Failed to deallocate memory") - } + /// Malloc the given number of bytes and return the pointer to the allocated memory location. + fn malloc(&mut self, size: u32) -> Pointer { + self.allocate_memory(size) + .expect("Failed to allocate memory") + } + + /// Free the given pointer. + fn free(&mut self, ptr: Pointer) { + self.deallocate_memory(ptr) + .expect("Failed to deallocate memory") + } } /// Interface that provides functions for logging from within the runtime. #[runtime_interface] pub trait Logging { - /// Request to print a log message on the host. - /// - /// Note that this will be only displayed if the host is enabled to display log messages with - /// given level and target. - /// - /// Instead of using directly, prefer setting up `RuntimeLogger` and using `log` macros. - fn log(level: LogLevel, target: &str, message: &[u8]) { - if let Ok(message) = std::str::from_utf8(message) { - log::log!( - target: target, - log::Level::from(level), - "{}", - message, - ) - } - } + /// Request to print a log message on the host. + /// + /// Note that this will be only displayed if the host is enabled to display log messages with + /// given level and target. + /// + /// Instead of using directly, prefer setting up `RuntimeLogger` and using `log` macros. + fn log(level: LogLevel, target: &str, message: &[u8]) { + if let Ok(message) = std::str::from_utf8(message) { + log::log!(target: target, log::Level::from(level), "{}", message,) + } + } } /// Wasm-only interface that provides functions for interacting with the sandbox. #[runtime_interface(wasm_only)] pub trait Sandbox { - /// Instantiate a new sandbox instance with the given `wasm_code`. - fn instantiate( - &mut self, - dispatch_thunk: u32, - wasm_code: &[u8], - env_def: &[u8], - state_ptr: Pointer, - ) -> u32 { - self.sandbox() - .instance_new(dispatch_thunk, wasm_code, env_def, state_ptr.into()) - .expect("Failed to instantiate a new sandbox") - } - - /// Invoke `function` in the sandbox with `sandbox_idx`. - fn invoke( - &mut self, - instance_idx: u32, - function: &str, - args: &[u8], - return_val_ptr: Pointer, - return_val_len: u32, - state_ptr: Pointer, - ) -> u32 { - self.sandbox().invoke( - instance_idx, - &function, - &args, - return_val_ptr, - return_val_len, - state_ptr.into(), - ).expect("Failed to invoke function with sandbox") - } - - /// Create a new memory instance with the given `initial` and `maximum` size. - fn memory_new(&mut self, initial: u32, maximum: u32) -> u32 { - self.sandbox() - .memory_new(initial, maximum) - .expect("Failed to create new memory with sandbox") - } - - /// Get the memory starting at `offset` from the instance with `memory_idx` into the buffer. - fn memory_get( - &mut self, - memory_idx: u32, - offset: u32, - buf_ptr: Pointer, - buf_len: u32, - ) -> u32 { - self.sandbox() - .memory_get(memory_idx, offset, buf_ptr, buf_len) - .expect("Failed to get memory with sandbox") - } - - /// Set the memory in the given `memory_idx` to the given value at `offset`. - fn memory_set( - &mut self, - memory_idx: u32, - offset: u32, - val_ptr: Pointer, - val_len: u32, - ) -> u32 { - self.sandbox() - .memory_set(memory_idx, offset, val_ptr, val_len) - .expect("Failed to set memory with sandbox") - } - - /// Teardown the memory instance with the given `memory_idx`. - fn memory_teardown(&mut self, memory_idx: u32) { - self.sandbox().memory_teardown(memory_idx).expect("Failed to teardown memory with sandbox") - } - - /// Teardown the sandbox instance with the given `instance_idx`. - fn instance_teardown(&mut self, instance_idx: u32) { - self.sandbox().instance_teardown(instance_idx).expect("Failed to teardown sandbox instance") - } - - /// Get the value from a global with the given `name`. The sandbox is determined by the given - /// `instance_idx`. - /// - /// Returns `Some(_)` when the requested global variable could be found. - fn get_global_val(&mut self, instance_idx: u32, name: &str) -> Option { - self.sandbox().get_global_val(instance_idx, name).expect("Failed to get global from sandbox") - } + /// Instantiate a new sandbox instance with the given `wasm_code`. + fn instantiate( + &mut self, + dispatch_thunk: u32, + wasm_code: &[u8], + env_def: &[u8], + state_ptr: Pointer, + ) -> u32 { + self.sandbox() + .instance_new(dispatch_thunk, wasm_code, env_def, state_ptr.into()) + .expect("Failed to instantiate a new sandbox") + } + + /// Invoke `function` in the sandbox with `sandbox_idx`. + fn invoke( + &mut self, + instance_idx: u32, + function: &str, + args: &[u8], + return_val_ptr: Pointer, + return_val_len: u32, + state_ptr: Pointer, + ) -> u32 { + self.sandbox() + .invoke( + instance_idx, + &function, + &args, + return_val_ptr, + return_val_len, + state_ptr.into(), + ) + .expect("Failed to invoke function with sandbox") + } + + /// Create a new memory instance with the given `initial` and `maximum` size. + fn memory_new(&mut self, initial: u32, maximum: u32) -> u32 { + self.sandbox() + .memory_new(initial, maximum) + .expect("Failed to create new memory with sandbox") + } + + /// Get the memory starting at `offset` from the instance with `memory_idx` into the buffer. + fn memory_get( + &mut self, + memory_idx: u32, + offset: u32, + buf_ptr: Pointer, + buf_len: u32, + ) -> u32 { + self.sandbox() + .memory_get(memory_idx, offset, buf_ptr, buf_len) + .expect("Failed to get memory with sandbox") + } + + /// Set the memory in the given `memory_idx` to the given value at `offset`. + fn memory_set( + &mut self, + memory_idx: u32, + offset: u32, + val_ptr: Pointer, + val_len: u32, + ) -> u32 { + self.sandbox() + .memory_set(memory_idx, offset, val_ptr, val_len) + .expect("Failed to set memory with sandbox") + } + + /// Teardown the memory instance with the given `memory_idx`. + fn memory_teardown(&mut self, memory_idx: u32) { + self.sandbox() + .memory_teardown(memory_idx) + .expect("Failed to teardown memory with sandbox") + } + + /// Teardown the sandbox instance with the given `instance_idx`. + fn instance_teardown(&mut self, instance_idx: u32) { + self.sandbox() + .instance_teardown(instance_idx) + .expect("Failed to teardown sandbox instance") + } + + /// Get the value from a global with the given `name`. The sandbox is determined by the given + /// `instance_idx`. + /// + /// Returns `Some(_)` when the requested global variable could be found. + fn get_global_val( + &mut self, + instance_idx: u32, + name: &str, + ) -> Option { + self.sandbox() + .get_global_val(instance_idx, name) + .expect("Failed to get global from sandbox") + } } /// Allocator used by Substrate when executing the Wasm runtime. @@ -973,18 +985,18 @@ static ALLOCATOR: WasmAllocator = WasmAllocator; #[cfg(not(feature = "std"))] mod allocator_impl { - use super::*; - use core::alloc::{GlobalAlloc, Layout}; - - unsafe impl GlobalAlloc for WasmAllocator { - unsafe fn alloc(&self, layout: Layout) -> *mut u8 { - allocator::malloc(layout.size() as u32) - } - - unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) { - allocator::free(ptr) - } - } + use super::*; + use core::alloc::{GlobalAlloc, Layout}; + + unsafe impl GlobalAlloc for WasmAllocator { + unsafe fn alloc(&self, layout: Layout) -> *mut u8 { + allocator::malloc(layout.size() as u32) + } + + unsafe fn dealloc(&self, ptr: *mut u8, _: Layout) { + allocator::free(ptr) + } + } } /// A default panic handler for WASM environment. @@ -992,21 +1004,25 @@ mod allocator_impl { #[panic_handler] #[no_mangle] pub fn panic(info: &core::panic::PanicInfo) -> ! { - unsafe { - let message = sp_std::alloc::format!("{}", info); - logging::log(LogLevel::Error, "runtime", message.as_bytes()); - core::arch::wasm32::unreachable(); - } + unsafe { + let message = sp_std::alloc::format!("{}", info); + logging::log(LogLevel::Error, "runtime", message.as_bytes()); + core::arch::wasm32::unreachable(); + } } /// A default OOM handler for WASM environment. #[cfg(all(not(feature = "disable_oom"), not(feature = "std")))] #[alloc_error_handler] pub fn oom(_: core::alloc::Layout) -> ! { - unsafe { - logging::log(LogLevel::Error, "runtime", b"Runtime memory exhausted. Aborting"); - core::arch::wasm32::unreachable(); - } + unsafe { + logging::log( + LogLevel::Error, + "runtime", + b"Runtime memory exhausted. Aborting", + ); + core::arch::wasm32::unreachable(); + } } /// Type alias for Externalities implementation used in tests. @@ -1018,211 +1034,201 @@ pub type TestExternalities = sp_state_machine::TestExternalities b"bar".to_vec()], - children: map![], - }); - - t.execute_with(|| { - assert_eq!(storage::get(b"hello"), None); - assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec())); - }); - } - - #[test] - fn read_storage_works() { - let mut t = BasicExternalities::new(Storage { - top: map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()], - children: map![], - }); - - t.execute_with(|| { - let mut v = [0u8; 4]; - assert!(storage::read(b":test", &mut v[..], 0).unwrap() >= 4); - assert_eq!(v, [11u8, 0, 0, 0]); - let mut w = [0u8; 11]; - assert!(storage::read(b":test", &mut w[..], 4).unwrap() >= 11); - assert_eq!(&w, b"Hello world"); - }); - } - - #[test] - fn clear_prefix_works() { - let mut t = BasicExternalities::new(Storage { - top: map![ - b":a".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), - b":abcd".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), - b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), - b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() - ], - children: map![], - }); - - t.execute_with(|| { - storage::clear_prefix(b":abc"); - - assert!(storage::get(b":a").is_some()); - assert!(storage::get(b":abdd").is_some()); - assert!(storage::get(b":abcd").is_none()); - assert!(storage::get(b":abc").is_none()); - }); - } - - #[test] - fn dynamic_extensions_work() { - let mut ext = BasicExternalities::with_tasks_executor(); - ext.execute_with(|| { - crypto::start_batch_verify(); - }); - - assert!(ext.extensions().get_mut(TypeId::of::()).is_some()); - - ext.execute_with(|| { - crypto::finish_batch_verify(); - }); - - assert!(ext.extensions().get_mut(TypeId::of::()).is_none()); - } - - #[test] - fn long_sr25519_batching() { - let mut ext = BasicExternalities::with_tasks_executor(); - ext.execute_with(|| { - let pair = sr25519::Pair::generate_with_phrase(None).0; - crypto::start_batch_verify(); - for it in 0..70 { - let msg = format!("Schnorrkel {}!", it); - let signature = pair.sign(msg.as_bytes()); - crypto::sr25519_verify(&signature, msg.as_bytes(), &pair.public()); - } - - // push invlaid - crypto::sr25519_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); - assert!(!crypto::finish_batch_verify()); - - crypto::start_batch_verify(); - for it in 0..70 { - let msg = format!("Schnorrkel {}!", it); - let signature = pair.sign(msg.as_bytes()); - crypto::sr25519_verify(&signature, msg.as_bytes(), &pair.public()); - } - assert!(crypto::finish_batch_verify()); - }); - } - - #[test] - fn batching_works() { - let mut ext = BasicExternalities::with_tasks_executor(); - ext.execute_with(|| { - // invalid ed25519 signature - crypto::start_batch_verify(); - crypto::ed25519_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); - assert!(!crypto::finish_batch_verify()); - - // 2 valid ed25519 signatures - crypto::start_batch_verify(); - - let pair = ed25519::Pair::generate_with_phrase(None).0; - let msg = b"Important message"; - let signature = pair.sign(msg); - crypto::ed25519_verify(&signature, msg, &pair.public()); - - let pair = ed25519::Pair::generate_with_phrase(None).0; - let msg = b"Even more important message"; - let signature = pair.sign(msg); - crypto::ed25519_verify(&signature, msg, &pair.public()); - - assert!(crypto::finish_batch_verify()); - - // 1 valid, 1 invalid ed25519 signature - crypto::start_batch_verify(); - - let pair = ed25519::Pair::generate_with_phrase(None).0; - let msg = b"Important message"; - let signature = pair.sign(msg); - crypto::ed25519_verify(&signature, msg, &pair.public()); - - crypto::ed25519_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); - - assert!(!crypto::finish_batch_verify()); - - // 1 valid ed25519, 2 valid sr25519 - crypto::start_batch_verify(); - - let pair = ed25519::Pair::generate_with_phrase(None).0; - let msg = b"Ed25519 batching"; - let signature = pair.sign(msg); - crypto::ed25519_verify(&signature, msg, &pair.public()); - - let pair = sr25519::Pair::generate_with_phrase(None).0; - let msg = b"Schnorrkel rules"; - let signature = pair.sign(msg); - crypto::sr25519_verify(&signature, msg, &pair.public()); - - let pair = sr25519::Pair::generate_with_phrase(None).0; - let msg = b"Schnorrkel batches!"; - let signature = pair.sign(msg); - crypto::sr25519_verify(&signature, msg, &pair.public()); - - assert!(crypto::finish_batch_verify()); - - // 1 valid sr25519, 1 invalid sr25519 - crypto::start_batch_verify(); - - let pair = sr25519::Pair::generate_with_phrase(None).0; - let msg = b"Schnorrkcel!"; - let signature = pair.sign(msg); - crypto::sr25519_verify(&signature, msg, &pair.public()); - - crypto::sr25519_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); - - assert!(!crypto::finish_batch_verify()); - }); - } + use super::*; + use sp_core::map; + use sp_core::storage::Storage; + use sp_state_machine::BasicExternalities; + use std::any::TypeId; + + #[test] + fn storage_works() { + let mut t = BasicExternalities::default(); + t.execute_with(|| { + assert_eq!(storage::get(b"hello"), None); + storage::set(b"hello", b"world"); + assert_eq!(storage::get(b"hello"), Some(b"world".to_vec())); + assert_eq!(storage::get(b"foo"), None); + storage::set(b"foo", &[1, 2, 3][..]); + }); + + t = BasicExternalities::new(Storage { + top: map![b"foo".to_vec() => b"bar".to_vec()], + children: map![], + }); + + t.execute_with(|| { + assert_eq!(storage::get(b"hello"), None); + assert_eq!(storage::get(b"foo"), Some(b"bar".to_vec())); + }); + } + + #[test] + fn read_storage_works() { + let mut t = BasicExternalities::new(Storage { + top: map![b":test".to_vec() => b"\x0b\0\0\0Hello world".to_vec()], + children: map![], + }); + + t.execute_with(|| { + let mut v = [0u8; 4]; + assert!(storage::read(b":test", &mut v[..], 0).unwrap() >= 4); + assert_eq!(v, [11u8, 0, 0, 0]); + let mut w = [0u8; 11]; + assert!(storage::read(b":test", &mut w[..], 4).unwrap() >= 11); + assert_eq!(&w, b"Hello world"); + }); + } + + #[test] + fn clear_prefix_works() { + let mut t = BasicExternalities::new(Storage { + top: map![ + b":a".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), + b":abcd".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), + b":abc".to_vec() => b"\x0b\0\0\0Hello world".to_vec(), + b":abdd".to_vec() => b"\x0b\0\0\0Hello world".to_vec() + ], + children: map![], + }); + + t.execute_with(|| { + storage::clear_prefix(b":abc"); + + assert!(storage::get(b":a").is_some()); + assert!(storage::get(b":abdd").is_some()); + assert!(storage::get(b":abcd").is_none()); + assert!(storage::get(b":abc").is_none()); + }); + } + + #[test] + fn dynamic_extensions_work() { + let mut ext = BasicExternalities::with_tasks_executor(); + ext.execute_with(|| { + crypto::start_batch_verify(); + }); + + assert!(ext + .extensions() + .get_mut(TypeId::of::()) + .is_some()); + + ext.execute_with(|| { + crypto::finish_batch_verify(); + }); + + assert!(ext + .extensions() + .get_mut(TypeId::of::()) + .is_none()); + } + + #[test] + fn long_sr25519_batching() { + let mut ext = BasicExternalities::with_tasks_executor(); + ext.execute_with(|| { + let pair = sr25519::Pair::generate_with_phrase(None).0; + crypto::start_batch_verify(); + for it in 0..70 { + let msg = format!("Schnorrkel {}!", it); + let signature = pair.sign(msg.as_bytes()); + crypto::sr25519_verify(&signature, msg.as_bytes(), &pair.public()); + } + + // push invlaid + crypto::sr25519_verify(&Default::default(), &Vec::new(), &Default::default()); + assert!(!crypto::finish_batch_verify()); + + crypto::start_batch_verify(); + for it in 0..70 { + let msg = format!("Schnorrkel {}!", it); + let signature = pair.sign(msg.as_bytes()); + crypto::sr25519_verify(&signature, msg.as_bytes(), &pair.public()); + } + assert!(crypto::finish_batch_verify()); + }); + } + + #[test] + fn batching_works() { + let mut ext = BasicExternalities::with_tasks_executor(); + ext.execute_with(|| { + // invalid ed25519 signature + crypto::start_batch_verify(); + crypto::ed25519_verify(&Default::default(), &Vec::new(), &Default::default()); + assert!(!crypto::finish_batch_verify()); + + // 2 valid ed25519 signatures + crypto::start_batch_verify(); + + let pair = ed25519::Pair::generate_with_phrase(None).0; + let msg = b"Important message"; + let signature = pair.sign(msg); + crypto::ed25519_verify(&signature, msg, &pair.public()); + + let pair = ed25519::Pair::generate_with_phrase(None).0; + let msg = b"Even more important message"; + let signature = pair.sign(msg); + crypto::ed25519_verify(&signature, msg, &pair.public()); + + assert!(crypto::finish_batch_verify()); + + // 1 valid, 1 invalid ed25519 signature + crypto::start_batch_verify(); + + let pair = ed25519::Pair::generate_with_phrase(None).0; + let msg = b"Important message"; + let signature = pair.sign(msg); + crypto::ed25519_verify(&signature, msg, &pair.public()); + + crypto::ed25519_verify(&Default::default(), &Vec::new(), &Default::default()); + + assert!(!crypto::finish_batch_verify()); + + // 1 valid ed25519, 2 valid sr25519 + crypto::start_batch_verify(); + + let pair = ed25519::Pair::generate_with_phrase(None).0; + let msg = b"Ed25519 batching"; + let signature = pair.sign(msg); + crypto::ed25519_verify(&signature, msg, &pair.public()); + + let pair = sr25519::Pair::generate_with_phrase(None).0; + let msg = b"Schnorrkel rules"; + let signature = pair.sign(msg); + crypto::sr25519_verify(&signature, msg, &pair.public()); + + let pair = sr25519::Pair::generate_with_phrase(None).0; + let msg = b"Schnorrkel batches!"; + let signature = pair.sign(msg); + crypto::sr25519_verify(&signature, msg, &pair.public()); + + assert!(crypto::finish_batch_verify()); + + // 1 valid sr25519, 1 invalid sr25519 + crypto::start_batch_verify(); + + let pair = sr25519::Pair::generate_with_phrase(None).0; + let msg = b"Schnorrkcel!"; + let signature = pair.sign(msg); + crypto::sr25519_verify(&signature, msg, &pair.public()); + + crypto::sr25519_verify(&Default::default(), &Vec::new(), &Default::default()); + + assert!(!crypto::finish_batch_verify()); + }); + } } diff --git a/primitives/keyring/src/ed25519.rs b/primitives/keyring/src/ed25519.rs index 197b9ded87..298c8f37cc 100644 --- a/primitives/keyring/src/ed25519.rs +++ b/primitives/keyring/src/ed25519.rs @@ -16,194 +16,192 @@ //! Support code for the runtime. A set of test accounts. -use std::{collections::HashMap, ops::Deref}; use lazy_static::lazy_static; -use sp_core::{ed25519::{Pair, Public, Signature}, Pair as PairT, Public as PublicT, H256}; pub use sp_core::ed25519; +use sp_core::{ + ed25519::{Pair, Public, Signature}, + Pair as PairT, Public as PublicT, H256, +}; use sp_runtime::AccountId32; +use std::{collections::HashMap, ops::Deref}; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] pub enum Keyring { - Alice, - Bob, - Charlie, - Dave, - Eve, - Ferdie, - One, - Two, + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, } impl Keyring { - pub fn from_public(who: &Public) -> Option { - Self::iter().find(|&k| &Public::from(k) == who) - } - - pub fn from_account_id(who: &AccountId32) -> Option { - Self::iter().find(|&k| &k.to_account_id() == who) - } - - pub fn from_raw_public(who: [u8; 32]) -> Option { - Self::from_public(&Public::from_raw(who)) - } - - pub fn to_raw_public(self) -> [u8; 32] { - *Public::from(self).as_array_ref() - } - - pub fn from_h256_public(who: H256) -> Option { - Self::from_public(&Public::from_raw(who.into())) - } - - pub fn to_h256_public(self) -> H256 { - Public::from(self).as_array_ref().into() - } - - pub fn to_raw_public_vec(self) -> Vec { - Public::from(self).to_raw_vec() - } - - pub fn to_account_id(self) -> AccountId32 { - self.to_raw_public().into() - } - - pub fn sign(self, msg: &[u8]) -> Signature { - Pair::from(self).sign(msg) - } - - pub fn pair(self) -> Pair { - Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) - .expect("static values are known good; qed") - } - - /// Returns an iterator over all test accounts. - pub fn iter() -> impl Iterator { - ::iter() - } - - pub fn public(self) -> Public { - self.pair().public() - } - - pub fn to_seed(self) -> String { - format!("//{}", self) - } + pub fn from_public(who: &Public) -> Option { + Self::iter().find(|&k| &Public::from(k) == who) + } + + pub fn from_account_id(who: &AccountId32) -> Option { + Self::iter().find(|&k| &k.to_account_id() == who) + } + + pub fn from_raw_public(who: [u8; 32]) -> Option { + Self::from_public(&Public::from_raw(who)) + } + + pub fn to_raw_public(self) -> [u8; 32] { + *Public::from(self).as_array_ref() + } + + pub fn from_h256_public(who: H256) -> Option { + Self::from_public(&Public::from_raw(who.into())) + } + + pub fn to_h256_public(self) -> H256 { + Public::from(self).as_array_ref().into() + } + + pub fn to_raw_public_vec(self) -> Vec { + Public::from(self).to_raw_vec() + } + + pub fn to_account_id(self) -> AccountId32 { + self.to_raw_public().into() + } + + pub fn sign(self, msg: &[u8]) -> Signature { + Pair::from(self).sign(msg) + } + + pub fn pair(self) -> Pair { + Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) + .expect("static values are known good; qed") + } + + /// Returns an iterator over all test accounts. + pub fn iter() -> impl Iterator { + ::iter() + } + + pub fn public(self) -> Public { + self.pair().public() + } + + pub fn to_seed(self) -> String { + format!("//{}", self) + } } impl From for &'static str { - fn from(k: Keyring) -> Self { - match k { - Keyring::Alice => "Alice", - Keyring::Bob => "Bob", - Keyring::Charlie => "Charlie", - Keyring::Dave => "Dave", - Keyring::Eve => "Eve", - Keyring::Ferdie => "Ferdie", - Keyring::One => "One", - Keyring::Two => "Two", - } - } + fn from(k: Keyring) -> Self { + match k { + Keyring::Alice => "Alice", + Keyring::Bob => "Bob", + Keyring::Charlie => "Charlie", + Keyring::Dave => "Dave", + Keyring::Eve => "Eve", + Keyring::Ferdie => "Ferdie", + Keyring::One => "One", + Keyring::Two => "Two", + } + } } impl From for sp_runtime::MultiSigner { - fn from(x: Keyring) -> Self { - sp_runtime::MultiSigner::Ed25519(x.into()) - } + fn from(x: Keyring) -> Self { + sp_runtime::MultiSigner::Ed25519(x.into()) + } } lazy_static! { - static ref PRIVATE_KEYS: HashMap = { - Keyring::iter().map(|i| (i, i.pair())).collect() - }; - - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() - }; + static ref PRIVATE_KEYS: HashMap = + { Keyring::iter().map(|i| (i, i.pair())).collect() }; + static ref PUBLIC_KEYS: HashMap = { + PRIVATE_KEYS + .iter() + .map(|(&name, pair)| (name, pair.public())) + .collect() + }; } impl From for Public { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().clone() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().clone() + } } impl From for AccountId32 { - fn from(k: Keyring) -> Self { - k.to_account_id() - } + fn from(k: Keyring) -> Self { + k.to_account_id() + } } impl From for Pair { - fn from(k: Keyring) -> Self { - k.pair() - } + fn from(k: Keyring) -> Self { + k.pair() + } } impl From for [u8; 32] { - fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } + fn from(k: Keyring) -> Self { + *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + } } impl From for H256 { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() + } } impl From for &'static [u8; 32] { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + } } impl AsRef<[u8; 32]> for Keyring { - fn as_ref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } + fn as_ref(&self) -> &[u8; 32] { + (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + } } impl AsRef for Keyring { - fn as_ref(&self) -> &Public { - (*PUBLIC_KEYS).get(self).unwrap() - } + fn as_ref(&self) -> &Public { + (*PUBLIC_KEYS).get(self).unwrap() + } } impl Deref for Keyring { - type Target = [u8; 32]; - fn deref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } + type Target = [u8; 32]; + fn deref(&self) -> &[u8; 32] { + (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::{ed25519::Pair, Pair as PairT}; - - #[test] - fn should_work() { - assert!( - Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Bob!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Bob.public(), - ) - ); - } + use super::*; + use sp_core::{ed25519::Pair, Pair as PairT}; + + #[test] + fn should_work() { + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Bob.public(), + )); + } } diff --git a/primitives/keyring/src/lib.rs b/primitives/keyring/src/lib.rs index 18f8cdf2c4..3492fba54e 100644 --- a/primitives/keyring/src/lib.rs +++ b/primitives/keyring/src/lib.rs @@ -31,6 +31,6 @@ pub use ed25519::Keyring as Ed25519Keyring; pub use sr25519::Keyring as Sr25519Keyring; pub mod test { - /// The keyring for use with accounts when using the test runtime. - pub use super::ed25519::Keyring as AccountKeyring; + /// The keyring for use with accounts when using the test runtime. + pub use super::ed25519::Keyring as AccountKeyring; } diff --git a/primitives/keyring/src/sr25519.rs b/primitives/keyring/src/sr25519.rs index 476997f2db..e1d474884b 100644 --- a/primitives/keyring/src/sr25519.rs +++ b/primitives/keyring/src/sr25519.rs @@ -16,221 +16,219 @@ //! Support code for the runtime. A set of test accounts. -use std::collections::HashMap; -use std::ops::Deref; use lazy_static::lazy_static; -use sp_core::{sr25519::{Pair, Public, Signature}, Pair as PairT, Public as PublicT, H256}; pub use sp_core::sr25519; +use sp_core::{ + sr25519::{Pair, Public, Signature}, + Pair as PairT, Public as PublicT, H256, +}; use sp_runtime::AccountId32; +use std::collections::HashMap; +use std::ops::Deref; /// Set of test accounts. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, strum::Display, strum::EnumIter)] pub enum Keyring { - Alice, - Bob, - Charlie, - Dave, - Eve, - Ferdie, - One, - Two, + Alice, + Bob, + Charlie, + Dave, + Eve, + Ferdie, + One, + Two, } impl Keyring { - pub fn from_public(who: &Public) -> Option { - Self::iter().find(|&k| &Public::from(k) == who) - } + pub fn from_public(who: &Public) -> Option { + Self::iter().find(|&k| &Public::from(k) == who) + } - pub fn from_account_id(who: &AccountId32) -> Option { - Self::iter().find(|&k| &k.to_account_id() == who) - } + pub fn from_account_id(who: &AccountId32) -> Option { + Self::iter().find(|&k| &k.to_account_id() == who) + } - pub fn from_raw_public(who: [u8; 32]) -> Option { - Self::from_public(&Public::from_raw(who)) - } + pub fn from_raw_public(who: [u8; 32]) -> Option { + Self::from_public(&Public::from_raw(who)) + } - pub fn to_raw_public(self) -> [u8; 32] { - *Public::from(self).as_array_ref() - } + pub fn to_raw_public(self) -> [u8; 32] { + *Public::from(self).as_array_ref() + } - pub fn from_h256_public(who: H256) -> Option { - Self::from_public(&Public::from_raw(who.into())) - } + pub fn from_h256_public(who: H256) -> Option { + Self::from_public(&Public::from_raw(who.into())) + } - pub fn to_h256_public(self) -> H256 { - Public::from(self).as_array_ref().into() - } + pub fn to_h256_public(self) -> H256 { + Public::from(self).as_array_ref().into() + } - pub fn to_raw_public_vec(self) -> Vec { - Public::from(self).to_raw_vec() - } + pub fn to_raw_public_vec(self) -> Vec { + Public::from(self).to_raw_vec() + } - pub fn to_account_id(self) -> AccountId32 { - self.to_raw_public().into() - } + pub fn to_account_id(self) -> AccountId32 { + self.to_raw_public().into() + } - pub fn sign(self, msg: &[u8]) -> Signature { - Pair::from(self).sign(msg) - } + pub fn sign(self, msg: &[u8]) -> Signature { + Pair::from(self).sign(msg) + } - pub fn pair(self) -> Pair { - Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) - .expect("static values are known good; qed") - } + pub fn pair(self) -> Pair { + Pair::from_string(&format!("//{}", <&'static str>::from(self)), None) + .expect("static values are known good; qed") + } - /// Returns an iterator over all test accounts. - pub fn iter() -> impl Iterator { - ::iter() - } + /// Returns an iterator over all test accounts. + pub fn iter() -> impl Iterator { + ::iter() + } - pub fn public(self) -> Public { - self.pair().public() - } - pub fn to_seed(self) -> String { - format!("//{}", self) - } + pub fn public(self) -> Public { + self.pair().public() + } + pub fn to_seed(self) -> String { + format!("//{}", self) + } } impl From for &'static str { - fn from(k: Keyring) -> Self { - match k { - Keyring::Alice => "Alice", - Keyring::Bob => "Bob", - Keyring::Charlie => "Charlie", - Keyring::Dave => "Dave", - Keyring::Eve => "Eve", - Keyring::Ferdie => "Ferdie", - Keyring::One => "One", - Keyring::Two => "Two", - } - } + fn from(k: Keyring) -> Self { + match k { + Keyring::Alice => "Alice", + Keyring::Bob => "Bob", + Keyring::Charlie => "Charlie", + Keyring::Dave => "Dave", + Keyring::Eve => "Eve", + Keyring::Ferdie => "Ferdie", + Keyring::One => "One", + Keyring::Two => "Two", + } + } } impl From for sp_runtime::MultiSigner { - fn from(x: Keyring) -> Self { - sp_runtime::MultiSigner::Sr25519(x.into()) - } + fn from(x: Keyring) -> Self { + sp_runtime::MultiSigner::Sr25519(x.into()) + } } #[derive(Debug)] pub struct ParseKeyringError; impl std::fmt::Display for ParseKeyringError { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "ParseKeyringError") - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ParseKeyringError") + } } impl std::str::FromStr for Keyring { - type Err = ParseKeyringError; - - fn from_str(s: &str) -> Result::Err> { - match s { - "alice" => Ok(Keyring::Alice), - "bob" => Ok(Keyring::Bob), - "charlie" => Ok(Keyring::Charlie), - "dave" => Ok(Keyring::Dave), - "eve" => Ok(Keyring::Eve), - "ferdie" => Ok(Keyring::Ferdie), - "one" => Ok(Keyring::One), - "two" => Ok(Keyring::Two), - _ => Err(ParseKeyringError) - } - } + type Err = ParseKeyringError; + + fn from_str(s: &str) -> Result::Err> { + match s { + "alice" => Ok(Keyring::Alice), + "bob" => Ok(Keyring::Bob), + "charlie" => Ok(Keyring::Charlie), + "dave" => Ok(Keyring::Dave), + "eve" => Ok(Keyring::Eve), + "ferdie" => Ok(Keyring::Ferdie), + "one" => Ok(Keyring::One), + "two" => Ok(Keyring::Two), + _ => Err(ParseKeyringError), + } + } } lazy_static! { - static ref PRIVATE_KEYS: HashMap = { - Keyring::iter().map(|i| (i, i.pair())).collect() - }; - - static ref PUBLIC_KEYS: HashMap = { - PRIVATE_KEYS.iter().map(|(&name, pair)| (name, pair.public())).collect() - }; + static ref PRIVATE_KEYS: HashMap = + { Keyring::iter().map(|i| (i, i.pair())).collect() }; + static ref PUBLIC_KEYS: HashMap = { + PRIVATE_KEYS + .iter() + .map(|(&name, pair)| (name, pair.public())) + .collect() + }; } impl From for AccountId32 { - fn from(k: Keyring) -> Self { - k.to_account_id() - } + fn from(k: Keyring) -> Self { + k.to_account_id() + } } impl From for Public { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().clone() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().clone() + } } impl From for Pair { - fn from(k: Keyring) -> Self { - k.pair() - } + fn from(k: Keyring) -> Self { + k.pair() + } } impl From for [u8; 32] { - fn from(k: Keyring) -> Self { - *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } + fn from(k: Keyring) -> Self { + *(*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + } } impl From for H256 { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref().into() + } } impl From for &'static [u8; 32] { - fn from(k: Keyring) -> Self { - (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() - } + fn from(k: Keyring) -> Self { + (*PUBLIC_KEYS).get(&k).unwrap().as_array_ref() + } } impl AsRef<[u8; 32]> for Keyring { - fn as_ref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } + fn as_ref(&self) -> &[u8; 32] { + (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + } } impl AsRef for Keyring { - fn as_ref(&self) -> &Public { - (*PUBLIC_KEYS).get(self).unwrap() - } + fn as_ref(&self) -> &Public { + (*PUBLIC_KEYS).get(self).unwrap() + } } impl Deref for Keyring { - type Target = [u8; 32]; - fn deref(&self) -> &[u8; 32] { - (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() - } + type Target = [u8; 32]; + fn deref(&self) -> &[u8; 32] { + (*PUBLIC_KEYS).get(self).unwrap().as_array_ref() + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::{sr25519::Pair, Pair as PairT}; - - #[test] - fn should_work() { - assert!( - Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Bob!", - &Keyring::Alice.public(), - ) - ); - assert!( - !Pair::verify( - &Keyring::Alice.sign(b"I am Alice!"), - b"I am Alice!", - &Keyring::Bob.public(), - ) - ); - } + use super::*; + use sp_core::{sr25519::Pair, Pair as PairT}; + + #[test] + fn should_work() { + assert!(Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Bob!", + &Keyring::Alice.public(), + )); + assert!(!Pair::verify( + &Keyring::Alice.sign(b"I am Alice!"), + b"I am Alice!", + &Keyring::Bob.public(), + )); + } } diff --git a/primitives/offchain/src/lib.rs b/primitives/offchain/src/lib.rs index ae02fed496..1dd7c273de 100644 --- a/primitives/offchain/src/lib.rs +++ b/primitives/offchain/src/lib.rs @@ -23,16 +23,16 @@ pub const STORAGE_PREFIX: &[u8] = b"storage"; sp_api::decl_runtime_apis! { - /// The offchain worker api. - #[api_version(2)] - pub trait OffchainWorkerApi { - /// Starts the off-chain task for given block number. - #[skip_initialize_block] - #[changed_in(2)] - fn offchain_worker(number: sp_runtime::traits::NumberFor); - - /// Starts the off-chain task for given block header. - #[skip_initialize_block] - fn offchain_worker(header: &Block::Header); - } + /// The offchain worker api. + #[api_version(2)] + pub trait OffchainWorkerApi { + /// Starts the off-chain task for given block number. + #[skip_initialize_block] + #[changed_in(2)] + fn offchain_worker(number: sp_runtime::traits::NumberFor); + + /// Starts the off-chain task for given block header. + #[skip_initialize_block] + fn offchain_worker(header: &Block::Header); + } } diff --git a/primitives/panic-handler/src/lib.rs b/primitives/panic-handler/src/lib.rs index c0f70d9d14..8730489d99 100644 --- a/primitives/panic-handler/src/lib.rs +++ b/primitives/panic-handler/src/lib.rs @@ -24,25 +24,25 @@ //! temporarily be disabled by using an [`AbortGuard`]. use backtrace::Backtrace; +use std::cell::Cell; use std::io::{self, Write}; use std::marker::PhantomData; use std::panic::{self, PanicInfo}; -use std::cell::Cell; use std::thread; thread_local! { - static ON_PANIC: Cell = Cell::new(OnPanic::Abort); + static ON_PANIC: Cell = Cell::new(OnPanic::Abort); } /// Panic action. #[derive(Debug, Clone, Copy, PartialEq)] enum OnPanic { - /// Abort when panic occurs. - Abort, - /// Unwind when panic occurs. - Unwind, - /// Always unwind even if someone changes strategy to Abort afterwards. - NeverAbort, + /// Abort when panic occurs. + Abort, + /// Unwind when panic occurs. + Unwind, + /// Always unwind even if someone changes strategy to Abort afterwards. + NeverAbort, } /// Set the panic hook. @@ -52,31 +52,32 @@ enum OnPanic { /// The `bug_url` parameter is an invitation for users to visit that URL to submit a bug report /// in the case where a panic happens. pub fn set(bug_url: &'static str, version: &str) { - panic::set_hook(Box::new({ - let version = version.to_string(); - move |c| { - panic_hook(c, bug_url, &version) - } - })); + panic::set_hook(Box::new({ + let version = version.to_string(); + move |c| panic_hook(c, bug_url, &version) + })); } macro_rules! ABOUT_PANIC { - () => (" + () => { + " This is a bug. Please report it at: {} -")} +" + }; +} /// Set aborting flag. Returns previous value of the flag. fn set_abort(on_panic: OnPanic) -> OnPanic { - ON_PANIC.with(|val| { - let prev = val.get(); - match prev { - OnPanic::Abort | OnPanic::Unwind => val.set(on_panic), - OnPanic::NeverAbort => (), - } - prev - }) + ON_PANIC.with(|val| { + let prev = val.get(); + match prev { + OnPanic::Abort | OnPanic::Unwind => val.set(on_panic), + OnPanic::NeverAbort => (), + } + prev + }) } /// RAII guard for whether panics in the current thread should unwind or abort. @@ -87,105 +88,105 @@ fn set_abort(on_panic: OnPanic) -> OnPanic { /// > **Note**: Because we restore the previous value when dropped, you are encouraged to leave /// > the `AbortGuard` on the stack and let it destroy itself naturally. pub struct AbortGuard { - /// Value that was in `ABORT` before we created this guard. - previous_val: OnPanic, - /// Marker so that `AbortGuard` doesn't implement `Send`. - _not_send: PhantomData> + /// Value that was in `ABORT` before we created this guard. + previous_val: OnPanic, + /// Marker so that `AbortGuard` doesn't implement `Send`. + _not_send: PhantomData>, } impl AbortGuard { - /// Create a new guard. While the guard is alive, panics that happen in the current thread will - /// unwind the stack (unless another guard is created afterwards). - pub fn force_unwind() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::Unwind), - _not_send: PhantomData - } - } - - /// Create a new guard. While the guard is alive, panics that happen in the current thread will - /// abort the process (unless another guard is created afterwards). - pub fn force_abort() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::Abort), - _not_send: PhantomData - } - } - - /// Create a new guard. While the guard is alive, panics that happen in the current thread will - /// **never** abort the process (even if `AbortGuard::force_abort()` guard will be created afterwards). - pub fn never_abort() -> AbortGuard { - AbortGuard { - previous_val: set_abort(OnPanic::NeverAbort), - _not_send: PhantomData - } - } + /// Create a new guard. While the guard is alive, panics that happen in the current thread will + /// unwind the stack (unless another guard is created afterwards). + pub fn force_unwind() -> AbortGuard { + AbortGuard { + previous_val: set_abort(OnPanic::Unwind), + _not_send: PhantomData, + } + } + + /// Create a new guard. While the guard is alive, panics that happen in the current thread will + /// abort the process (unless another guard is created afterwards). + pub fn force_abort() -> AbortGuard { + AbortGuard { + previous_val: set_abort(OnPanic::Abort), + _not_send: PhantomData, + } + } + + /// Create a new guard. While the guard is alive, panics that happen in the current thread will + /// **never** abort the process (even if `AbortGuard::force_abort()` guard will be created afterwards). + pub fn never_abort() -> AbortGuard { + AbortGuard { + previous_val: set_abort(OnPanic::NeverAbort), + _not_send: PhantomData, + } + } } impl Drop for AbortGuard { - fn drop(&mut self) { - set_abort(self.previous_val); - } + fn drop(&mut self) { + set_abort(self.previous_val); + } } /// Function being called when a panic happens. fn panic_hook(info: &PanicInfo, report_url: &'static str, version: &str) { - let location = info.location(); - let file = location.as_ref().map(|l| l.file()).unwrap_or(""); - let line = location.as_ref().map(|l| l.line()).unwrap_or(0); - - let msg = match info.payload().downcast_ref::<&'static str>() { - Some(s) => *s, - None => match info.payload().downcast_ref::() { - Some(s) => &s[..], - None => "Box", - } - }; - - let thread = thread::current(); - let name = thread.name().unwrap_or(""); - - let backtrace = Backtrace::new(); - - let mut stderr = io::stderr(); - - let _ = writeln!(stderr, ""); - let _ = writeln!(stderr, "===================="); - let _ = writeln!(stderr, ""); - let _ = writeln!(stderr, "Version: {}", version); - let _ = writeln!(stderr, ""); - let _ = writeln!(stderr, "{:?}", backtrace); - let _ = writeln!(stderr, ""); - let _ = writeln!( - stderr, - "Thread '{}' panicked at '{}', {}:{}", - name, msg, file, line - ); - - let _ = writeln!(stderr, ABOUT_PANIC!(), report_url); - ON_PANIC.with(|val| { - if val.get() == OnPanic::Abort { - ::std::process::exit(1); - } - }) + let location = info.location(); + let file = location.as_ref().map(|l| l.file()).unwrap_or(""); + let line = location.as_ref().map(|l| l.line()).unwrap_or(0); + + let msg = match info.payload().downcast_ref::<&'static str>() { + Some(s) => *s, + None => match info.payload().downcast_ref::() { + Some(s) => &s[..], + None => "Box", + }, + }; + + let thread = thread::current(); + let name = thread.name().unwrap_or(""); + + let backtrace = Backtrace::new(); + + let mut stderr = io::stderr(); + + let _ = writeln!(stderr, ""); + let _ = writeln!(stderr, "===================="); + let _ = writeln!(stderr, ""); + let _ = writeln!(stderr, "Version: {}", version); + let _ = writeln!(stderr, ""); + let _ = writeln!(stderr, "{:?}", backtrace); + let _ = writeln!(stderr, ""); + let _ = writeln!( + stderr, + "Thread '{}' panicked at '{}', {}:{}", + name, msg, file, line + ); + + let _ = writeln!(stderr, ABOUT_PANIC!(), report_url); + ON_PANIC.with(|val| { + if val.get() == OnPanic::Abort { + ::std::process::exit(1); + } + }) } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn does_not_abort() { - set("test", "1.2.3"); - let _guard = AbortGuard::force_unwind(); - ::std::panic::catch_unwind(|| panic!()).ok(); - } - - #[test] - fn does_not_abort_after_never_abort() { - set("test", "1.2.3"); - let _guard = AbortGuard::never_abort(); - let _guard = AbortGuard::force_abort(); - std::panic::catch_unwind(|| panic!()).ok(); - } + use super::*; + + #[test] + fn does_not_abort() { + set("test", "1.2.3"); + let _guard = AbortGuard::force_unwind(); + ::std::panic::catch_unwind(|| panic!()).ok(); + } + + #[test] + fn does_not_abort_after_never_abort() { + set("test", "1.2.3"); + let _guard = AbortGuard::never_abort(); + let _guard = AbortGuard::force_abort(); + std::panic::catch_unwind(|| panic!()).ok(); + } } diff --git a/primitives/phragmen/benches/phragmen.rs b/primitives/phragmen/benches/phragmen.rs index e274586f60..1bb469cace 100644 --- a/primitives/phragmen/benches/phragmen.rs +++ b/primitives/phragmen/benches/phragmen.rs @@ -26,8 +26,8 @@ use test::Bencher; use rand::{self, Rng}; use sp_phragmen::{PhragmenResult, VoteWeight}; +use sp_runtime::{traits::Zero, Perbill}; use std::collections::BTreeMap; -use sp_runtime::{Perbill, traits::Zero}; // default params. Each will be scaled by the benchmarks individually. const VALIDATORS: u64 = 100; @@ -41,125 +41,138 @@ const PREFIX: AccountId = 1000_000; type AccountId = u64; mod bench_closure_and_slice { - use sp_phragmen::{ - VoteWeight, ExtendedBalance, Assignment, StakedAssignment, IdentifierT, - assignment_ratio_to_staked, - }; - use sp_runtime::{Perbill, PerThing}; - use rand::{self, Rng, RngCore}; - use test::Bencher; - - fn random_assignment() -> Assignment { - let mut rng = rand::thread_rng(); - let who = rng.next_u32(); - let distribution = (0..5) - .map(|x| (x + rng.next_u32(), Perbill::from_percent(rng.next_u32() % 100))) - .collect::>(); - Assignment { who, distribution } - } - - /// Converts a vector of ratio assignments into ones with absolute budget value. - pub fn assignment_ratio_to_staked_slice( - ratio: Vec>, - stakes: &[VoteWeight], - ) -> Vec> - where - T: sp_std::ops::Mul, - ExtendedBalance: From<::Inner>, - { - ratio - .into_iter() - .zip(stakes.into_iter().map(|x| *x as ExtendedBalance)) - .map(|(a, stake)| { - a.into_staked(stake.into(), true) - }) - .collect() - } - - #[bench] - fn closure(b: &mut Bencher) { - let assignments = (0..1000).map(|_| random_assignment()).collect::>>(); - let stake_of = |x: &u32| -> VoteWeight { (x * 2 + 100).into() }; - - // each have one clone of assignments - b.iter(|| assignment_ratio_to_staked(assignments.clone(), stake_of)); - } - - #[bench] - fn slice(b: &mut Bencher) { - let assignments = (0..1000).map(|_| random_assignment()).collect::>>(); - let stake_of = |x: &u32| -> VoteWeight { (x * 2 + 100).into() }; - - b.iter(|| { - let local = assignments.clone(); - let stakes = local.iter().map(|x| stake_of(&x.who)).collect::>(); - assignment_ratio_to_staked_slice(local, stakes.as_ref()); - }); - } + use rand::{self, Rng, RngCore}; + use sp_phragmen::{ + assignment_ratio_to_staked, Assignment, ExtendedBalance, IdentifierT, StakedAssignment, + VoteWeight, + }; + use sp_runtime::{PerThing, Perbill}; + use test::Bencher; + + fn random_assignment() -> Assignment { + let mut rng = rand::thread_rng(); + let who = rng.next_u32(); + let distribution = (0..5) + .map(|x| { + ( + x + rng.next_u32(), + Perbill::from_percent(rng.next_u32() % 100), + ) + }) + .collect::>(); + Assignment { who, distribution } + } + + /// Converts a vector of ratio assignments into ones with absolute budget value. + pub fn assignment_ratio_to_staked_slice( + ratio: Vec>, + stakes: &[VoteWeight], + ) -> Vec> + where + T: sp_std::ops::Mul, + ExtendedBalance: From<::Inner>, + { + ratio + .into_iter() + .zip(stakes.into_iter().map(|x| *x as ExtendedBalance)) + .map(|(a, stake)| a.into_staked(stake.into(), true)) + .collect() + } + + #[bench] + fn closure(b: &mut Bencher) { + let assignments = (0..1000) + .map(|_| random_assignment()) + .collect::>>(); + let stake_of = |x: &u32| -> VoteWeight { (x * 2 + 100).into() }; + + // each have one clone of assignments + b.iter(|| assignment_ratio_to_staked(assignments.clone(), stake_of)); + } + + #[bench] + fn slice(b: &mut Bencher) { + let assignments = (0..1000) + .map(|_| random_assignment()) + .collect::>>(); + let stake_of = |x: &u32| -> VoteWeight { (x * 2 + 100).into() }; + + b.iter(|| { + let local = assignments.clone(); + let stakes = local.iter().map(|x| stake_of(&x.who)).collect::>(); + assignment_ratio_to_staked_slice(local, stakes.as_ref()); + }); + } } fn do_phragmen( - b: &mut Bencher, - num_validators: u64, - num_nominators: u64, - to_elect: usize, - edge_per_voter: u64, - eq_iters: usize, - eq_tolerance: u128, + b: &mut Bencher, + num_validators: u64, + num_nominators: u64, + to_elect: usize, + edge_per_voter: u64, + eq_iters: usize, + eq_tolerance: u128, ) { - assert!(num_validators > edge_per_voter); - let rr = |a, b| rand::thread_rng().gen_range(a as usize, b as usize) as VoteWeight; - - let mut candidates = Vec::with_capacity(num_validators as usize); - let mut stake_of_tree: BTreeMap = BTreeMap::new(); - - (1 ..= num_validators).for_each(|acc| { - candidates.push(acc); - stake_of_tree.insert(acc, STAKE + rr(10, 1000)); - }); - - let mut voters = Vec::with_capacity(num_nominators as usize); - (PREFIX ..= (PREFIX + num_nominators)).for_each(|acc| { - // all possible targets - let mut all_targets = candidates.clone(); - // we remove and pop into `targets` `edge_per_voter` times. - let targets = (0 .. edge_per_voter).map(|_| { - all_targets.remove(rr(0, all_targets.len()) as usize) - }) - .collect::>(); - - let stake = STAKE + rr(10, 1000); - stake_of_tree.insert(acc, stake); - voters.push((acc, stake, targets)); - }); - - b.iter(|| { - let PhragmenResult { winners, assignments } = sp_phragmen::elect::( - to_elect, - Zero::zero(), - candidates.clone(), - voters.clone(), - ).unwrap(); - - let stake_of = |who: &AccountId| -> VoteWeight { - *stake_of_tree.get(who).unwrap() - }; - - // Do the benchmarking with equalize. - if eq_iters > 0 { - use sp_phragmen::{equalize, assignment_ratio_to_staked, build_support_map, to_without_backing}; - let staked = assignment_ratio_to_staked(assignments, &stake_of); - let winners = to_without_backing(winners); - let mut support = build_support_map(winners.as_ref(), staked.as_ref()).0; - - equalize( - staked.into_iter().map(|a| (a.clone(), stake_of(&a.who))).collect(), - &mut support, - eq_tolerance, - eq_iters, - ); - } - }) + assert!(num_validators > edge_per_voter); + let rr = |a, b| rand::thread_rng().gen_range(a as usize, b as usize) as VoteWeight; + + let mut candidates = Vec::with_capacity(num_validators as usize); + let mut stake_of_tree: BTreeMap = BTreeMap::new(); + + (1..=num_validators).for_each(|acc| { + candidates.push(acc); + stake_of_tree.insert(acc, STAKE + rr(10, 1000)); + }); + + let mut voters = Vec::with_capacity(num_nominators as usize); + (PREFIX..=(PREFIX + num_nominators)).for_each(|acc| { + // all possible targets + let mut all_targets = candidates.clone(); + // we remove and pop into `targets` `edge_per_voter` times. + let targets = (0..edge_per_voter) + .map(|_| all_targets.remove(rr(0, all_targets.len()) as usize)) + .collect::>(); + + let stake = STAKE + rr(10, 1000); + stake_of_tree.insert(acc, stake); + voters.push((acc, stake, targets)); + }); + + b.iter(|| { + let PhragmenResult { + winners, + assignments, + } = sp_phragmen::elect::( + to_elect, + Zero::zero(), + candidates.clone(), + voters.clone(), + ) + .unwrap(); + + let stake_of = |who: &AccountId| -> VoteWeight { *stake_of_tree.get(who).unwrap() }; + + // Do the benchmarking with equalize. + if eq_iters > 0 { + use sp_phragmen::{ + assignment_ratio_to_staked, build_support_map, equalize, to_without_backing, + }; + let staked = assignment_ratio_to_staked(assignments, &stake_of); + let winners = to_without_backing(winners); + let mut support = build_support_map(winners.as_ref(), staked.as_ref()).0; + + equalize( + staked + .into_iter() + .map(|a| (a.clone(), stake_of(&a.who))) + .collect(), + &mut support, + eq_tolerance, + eq_iters, + ); + } + }) } macro_rules! phragmen_benches { @@ -181,39 +194,39 @@ macro_rules! phragmen_benches { } phragmen_benches! { - bench_1_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_1_2: (VALIDATORS * 2, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_1_3: (VALIDATORS * 4, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_1_4: (VALIDATORS * 8, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_1_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_1_2_eq: (VALIDATORS * 2, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_1_3_eq: (VALIDATORS * 4, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_1_4_eq: (VALIDATORS * 8, NOMINATORS, TO_ELECT, EDGES, 2, 0), - - bench_0_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_0_2: (VALIDATORS, NOMINATORS, TO_ELECT * 4, EDGES, 0, 0), - bench_0_3: (VALIDATORS, NOMINATORS, TO_ELECT * 8, EDGES, 0, 0), - bench_0_4: (VALIDATORS, NOMINATORS, TO_ELECT * 16, EDGES , 0, 0), - bench_0_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_0_2_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 4, EDGES, 2, 0), - bench_0_3_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 8, EDGES, 2, 0), - bench_0_4_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 16, EDGES , 2, 0), - - bench_2_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), - bench_2_2: (VALIDATORS, NOMINATORS * 2, TO_ELECT, EDGES, 0, 0), - bench_2_3: (VALIDATORS, NOMINATORS * 4, TO_ELECT, EDGES, 0, 0), - bench_2_4: (VALIDATORS, NOMINATORS * 8, TO_ELECT, EDGES, 0, 0), - bench_2_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_2_2_eq: (VALIDATORS, NOMINATORS * 2, TO_ELECT, EDGES, 2, 0), - bench_2_3_eq: (VALIDATORS, NOMINATORS * 4, TO_ELECT, EDGES, 2, 0), - bench_2_4_eq: (VALIDATORS, NOMINATORS * 8, TO_ELECT, EDGES, 2, 0), - - bench_3_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0 ), - bench_3_2: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 2, 0, 0), - bench_3_3: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 4, 0, 0), - bench_3_4: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 8, 0, 0), - bench_3_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), - bench_3_2_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 2, 2, 0), - bench_3_3_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 4, 2, 0), - bench_3_4_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 8, 2, 0), + bench_1_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), + bench_1_2: (VALIDATORS * 2, NOMINATORS, TO_ELECT, EDGES, 0, 0), + bench_1_3: (VALIDATORS * 4, NOMINATORS, TO_ELECT, EDGES, 0, 0), + bench_1_4: (VALIDATORS * 8, NOMINATORS, TO_ELECT, EDGES, 0, 0), + bench_1_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), + bench_1_2_eq: (VALIDATORS * 2, NOMINATORS, TO_ELECT, EDGES, 2, 0), + bench_1_3_eq: (VALIDATORS * 4, NOMINATORS, TO_ELECT, EDGES, 2, 0), + bench_1_4_eq: (VALIDATORS * 8, NOMINATORS, TO_ELECT, EDGES, 2, 0), + + bench_0_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), + bench_0_2: (VALIDATORS, NOMINATORS, TO_ELECT * 4, EDGES, 0, 0), + bench_0_3: (VALIDATORS, NOMINATORS, TO_ELECT * 8, EDGES, 0, 0), + bench_0_4: (VALIDATORS, NOMINATORS, TO_ELECT * 16, EDGES , 0, 0), + bench_0_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), + bench_0_2_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 4, EDGES, 2, 0), + bench_0_3_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 8, EDGES, 2, 0), + bench_0_4_eq: (VALIDATORS, NOMINATORS, TO_ELECT * 16, EDGES , 2, 0), + + bench_2_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0), + bench_2_2: (VALIDATORS, NOMINATORS * 2, TO_ELECT, EDGES, 0, 0), + bench_2_3: (VALIDATORS, NOMINATORS * 4, TO_ELECT, EDGES, 0, 0), + bench_2_4: (VALIDATORS, NOMINATORS * 8, TO_ELECT, EDGES, 0, 0), + bench_2_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), + bench_2_2_eq: (VALIDATORS, NOMINATORS * 2, TO_ELECT, EDGES, 2, 0), + bench_2_3_eq: (VALIDATORS, NOMINATORS * 4, TO_ELECT, EDGES, 2, 0), + bench_2_4_eq: (VALIDATORS, NOMINATORS * 8, TO_ELECT, EDGES, 2, 0), + + bench_3_1: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 0, 0 ), + bench_3_2: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 2, 0, 0), + bench_3_3: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 4, 0, 0), + bench_3_4: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 8, 0, 0), + bench_3_1_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES, 2, 0), + bench_3_2_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 2, 2, 0), + bench_3_3_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 4, 2, 0), + bench_3_4_eq: (VALIDATORS, NOMINATORS, TO_ELECT, EDGES * 8, 2, 0), } diff --git a/primitives/phragmen/compact/src/assignment.rs b/primitives/phragmen/compact/src/assignment.rs index 587e482ccb..8555bd6fff 100644 --- a/primitives/phragmen/compact/src/assignment.rs +++ b/primitives/phragmen/compact/src/assignment.rs @@ -17,24 +17,24 @@ //! Code generation for the ratio assignment type. use crate::field_name_for; -use proc_macro2::{TokenStream as TokenStream2}; -use syn::{GenericArgument}; +use proc_macro2::TokenStream as TokenStream2; use quote::quote; +use syn::GenericArgument; fn from_impl(count: usize) -> TokenStream2 { - let from_impl_single = { - let name = field_name_for(1); - quote!(1 => compact.#name.push( + let from_impl_single = { + let name = field_name_for(1); + quote!(1 => compact.#name.push( ( index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, ) ),) - }; + }; - let from_impl_double = { - let name = field_name_for(2); - quote!(2 => compact.#name.push( + let from_impl_double = { + let name = field_name_for(2); + quote!(2 => compact.#name.push( ( index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, ( @@ -44,9 +44,9 @@ fn from_impl(count: usize) -> TokenStream2 { index_of_target(&distribution[1].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, ) ),) - }; + }; - let from_impl_rest = (3..=count).map(|c| { + let from_impl_rest = (3..=count).map(|c| { let inner = (0..c-1).map(|i| quote!((index_of_target(&distribution[#i].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, distribution[#i].1),) ).collect::(); @@ -60,54 +60,54 @@ fn from_impl(count: usize) -> TokenStream2 { ) }).collect::(); - quote!( - #from_impl_single - #from_impl_double - #from_impl_rest - ) + quote!( + #from_impl_single + #from_impl_double + #from_impl_rest + ) } fn into_impl(count: usize) -> TokenStream2 { - let into_impl_single = { - let name = field_name_for(1); - quote!( - for (voter_index, target_index) in self.#name { - assignments.push(_phragmen::Assignment { - who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, - distribution: vec![ - (target_at(target_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, Accuracy::one()) - ], - }) - } - ) - }; - - let into_impl_double = { - let name = field_name_for(2); - quote!( - for (voter_index, (t1_idx, p1), t2_idx) in self.#name { - if p1 >= Accuracy::one() { - return Err(_phragmen::Error::CompactStakeOverflow); - } - - // defensive only. Since Percent doesn't have `Sub`. - let p2 = _phragmen::sp_runtime::traits::Saturating::saturating_sub( - Accuracy::one(), - p1, - ); - - assignments.push( _phragmen::Assignment { - who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, - distribution: vec![ - (target_at(t1_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p1), - (target_at(t2_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p2), - ] - }); - } - ) - }; - - let into_impl_rest = (3..=count).map(|c| { + let into_impl_single = { + let name = field_name_for(1); + quote!( + for (voter_index, target_index) in self.#name { + assignments.push(_phragmen::Assignment { + who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, + distribution: vec![ + (target_at(target_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, Accuracy::one()) + ], + }) + } + ) + }; + + let into_impl_double = { + let name = field_name_for(2); + quote!( + for (voter_index, (t1_idx, p1), t2_idx) in self.#name { + if p1 >= Accuracy::one() { + return Err(_phragmen::Error::CompactStakeOverflow); + } + + // defensive only. Since Percent doesn't have `Sub`. + let p2 = _phragmen::sp_runtime::traits::Saturating::saturating_sub( + Accuracy::one(), + p1, + ); + + assignments.push( _phragmen::Assignment { + who: voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, + distribution: vec![ + (target_at(t1_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p1), + (target_at(t2_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, p2), + ] + }); + } + ) + }; + + let into_impl_rest = (3..=count).map(|c| { let name = field_name_for(c); quote!( for (voter_index, inners, t_last_idx) in self.#name { @@ -141,70 +141,69 @@ fn into_impl(count: usize) -> TokenStream2 { ) }).collect::(); - quote!( - #into_impl_single - #into_impl_double - #into_impl_rest - ) + quote!( + #into_impl_single + #into_impl_double + #into_impl_rest + ) } pub(crate) fn assignment( - ident: syn::Ident, - voter_type: GenericArgument, - target_type: GenericArgument, - count: usize, + ident: syn::Ident, + voter_type: GenericArgument, + target_type: GenericArgument, + count: usize, ) -> TokenStream2 { - - let from_impl = from_impl(count); - let into_impl = into_impl(count); - - quote!( - impl< - #voter_type: _phragmen::codec::Codec + Default + Copy, - #target_type: _phragmen::codec::Codec + Default + Copy, - Accuracy: - _phragmen::codec::Codec + Default + Clone + _phragmen::sp_runtime::PerThing + - PartialOrd, - > - #ident<#voter_type, #target_type, Accuracy> - { - pub fn from_assignment( - assignments: Vec<_phragmen::Assignment>, - index_of_voter: FV, - index_of_target: FT, - ) -> Result - where - for<'r> FV: Fn(&'r A) -> Option<#voter_type>, - for<'r> FT: Fn(&'r A) -> Option<#target_type>, - A: _phragmen::IdentifierT, - { - let mut compact: #ident< - #voter_type, - #target_type, - Accuracy, - > = Default::default(); - - for _phragmen::Assignment { who, distribution } in assignments { - match distribution.len() { - 0 => continue, - #from_impl - _ => { - return Err(_phragmen::Error::CompactTargetOverflow); - } - } - }; - Ok(compact) - } - - pub fn into_assignment( - self, - voter_at: impl Fn(#voter_type) -> Option, - target_at: impl Fn(#target_type) -> Option, - ) -> Result>, _phragmen::Error> { - let mut assignments: Vec<_phragmen::Assignment> = Default::default(); - #into_impl - Ok(assignments) - } - } - ) + let from_impl = from_impl(count); + let into_impl = into_impl(count); + + quote!( + impl< + #voter_type: _phragmen::codec::Codec + Default + Copy, + #target_type: _phragmen::codec::Codec + Default + Copy, + Accuracy: + _phragmen::codec::Codec + Default + Clone + _phragmen::sp_runtime::PerThing + + PartialOrd, + > + #ident<#voter_type, #target_type, Accuracy> + { + pub fn from_assignment( + assignments: Vec<_phragmen::Assignment>, + index_of_voter: FV, + index_of_target: FT, + ) -> Result + where + for<'r> FV: Fn(&'r A) -> Option<#voter_type>, + for<'r> FT: Fn(&'r A) -> Option<#target_type>, + A: _phragmen::IdentifierT, + { + let mut compact: #ident< + #voter_type, + #target_type, + Accuracy, + > = Default::default(); + + for _phragmen::Assignment { who, distribution } in assignments { + match distribution.len() { + 0 => continue, + #from_impl + _ => { + return Err(_phragmen::Error::CompactTargetOverflow); + } + } + }; + Ok(compact) + } + + pub fn into_assignment( + self, + voter_at: impl Fn(#voter_type) -> Option, + target_at: impl Fn(#target_type) -> Option, + ) -> Result>, _phragmen::Error> { + let mut assignments: Vec<_phragmen::Assignment> = Default::default(); + #into_impl + Ok(assignments) + } + } + ) } diff --git a/primitives/phragmen/compact/src/lib.rs b/primitives/phragmen/compact/src/lib.rs index 114aeaeb32..1180e82edb 100644 --- a/primitives/phragmen/compact/src/lib.rs +++ b/primitives/phragmen/compact/src/lib.rs @@ -17,10 +17,13 @@ //! Proc macro for phragmen compact assignment. use proc_macro::TokenStream; -use proc_macro2::{TokenStream as TokenStream2, Span, Ident}; +use proc_macro2::{Ident, Span, TokenStream as TokenStream2}; use proc_macro_crate::crate_name; use quote::quote; -use syn::{GenericArgument, Type, parse::{Parse, ParseStream, Result}}; +use syn::{ + parse::{Parse, ParseStream, Result}, + GenericArgument, Type, +}; mod assignment; mod staked; @@ -77,143 +80,139 @@ const PREFIX: &'static str = "votes"; /// #[proc_macro] pub fn generate_compact_solution_type(item: TokenStream) -> TokenStream { - let CompactSolutionDef { - vis, - ident, - count, - } = syn::parse_macro_input!(item as CompactSolutionDef); - - let voter_type = GenericArgument::Type(Type::Verbatim(quote!(V))); - let target_type = GenericArgument::Type(Type::Verbatim(quote!(T))); - let weight_type = GenericArgument::Type(Type::Verbatim(quote!(W))); - - let imports = imports().unwrap_or_else(|e| e.to_compile_error()); - - let compact_def = struct_def( - vis, - ident.clone(), - count, - voter_type.clone(), - target_type.clone(), - weight_type, - ).unwrap_or_else(|e| e.to_compile_error()); - - let assignment_impls = assignment::assignment( - ident.clone(), - voter_type.clone(), - target_type.clone(), - count, - ); - - let staked_impls = staked::staked( - ident, - voter_type, - target_type, - count, - ); - - quote!( - #imports - #compact_def - #assignment_impls - #staked_impls - ).into() + let CompactSolutionDef { vis, ident, count } = + syn::parse_macro_input!(item as CompactSolutionDef); + + let voter_type = GenericArgument::Type(Type::Verbatim(quote!(V))); + let target_type = GenericArgument::Type(Type::Verbatim(quote!(T))); + let weight_type = GenericArgument::Type(Type::Verbatim(quote!(W))); + + let imports = imports().unwrap_or_else(|e| e.to_compile_error()); + + let compact_def = struct_def( + vis, + ident.clone(), + count, + voter_type.clone(), + target_type.clone(), + weight_type, + ) + .unwrap_or_else(|e| e.to_compile_error()); + + let assignment_impls = assignment::assignment( + ident.clone(), + voter_type.clone(), + target_type.clone(), + count, + ); + + let staked_impls = staked::staked(ident, voter_type, target_type, count); + + quote!( + #imports + #compact_def + #assignment_impls + #staked_impls + ) + .into() } fn struct_def( - vis: syn::Visibility, - ident: syn::Ident, - count: usize, - voter_type: GenericArgument, - target_type: GenericArgument, - weight_type: GenericArgument, + vis: syn::Visibility, + ident: syn::Ident, + count: usize, + voter_type: GenericArgument, + target_type: GenericArgument, + weight_type: GenericArgument, ) -> Result { - if count <= 2 { - Err(syn::Error::new( - Span::call_site(), - "cannot build compact solution struct with capacity less than 2." - ))? - } - - let singles = { - let name = field_name_for(1); - quote!(#name: Vec<(#voter_type, #target_type)>,) - }; - - let doubles = { - let name = field_name_for(2); - quote!(#name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>,) - }; - - let rest = (3..=count).map(|c| { - let field_name = field_name_for(c); - let array_len = c - 1; - quote!( - #field_name: Vec<( - #voter_type, - [(#target_type, #weight_type); #array_len], - #target_type - )>, - ) - }).collect::(); - - Ok(quote! ( - /// A struct to encode a Phragmen assignment in a compact way. - #[derive( - Default, - PartialEq, - Eq, - Clone, - _phragmen::sp_runtime::RuntimeDebug, - _phragmen::codec::Encode, - _phragmen::codec::Decode, - )] - #vis struct #ident<#voter_type, #target_type, #weight_type> { - // _marker: sp_std::marker::PhantomData, - #singles - #doubles - #rest - } - - impl<#voter_type, #target_type, #weight_type> _phragmen::VotingLimit - for #ident<#voter_type, #target_type, #weight_type> - { - const LIMIT: usize = #count; - } - )) + if count <= 2 { + Err(syn::Error::new( + Span::call_site(), + "cannot build compact solution struct with capacity less than 2.", + ))? + } + + let singles = { + let name = field_name_for(1); + quote!(#name: Vec<(#voter_type, #target_type)>,) + }; + + let doubles = { + let name = field_name_for(2); + quote!(#name: Vec<(#voter_type, (#target_type, #weight_type), #target_type)>,) + }; + + let rest = (3..=count) + .map(|c| { + let field_name = field_name_for(c); + let array_len = c - 1; + quote!( + #field_name: Vec<( + #voter_type, + [(#target_type, #weight_type); #array_len], + #target_type + )>, + ) + }) + .collect::(); + + Ok(quote! ( + /// A struct to encode a Phragmen assignment in a compact way. + #[derive( + Default, + PartialEq, + Eq, + Clone, + _phragmen::sp_runtime::RuntimeDebug, + _phragmen::codec::Encode, + _phragmen::codec::Decode, + )] + #vis struct #ident<#voter_type, #target_type, #weight_type> { + // _marker: sp_std::marker::PhantomData, + #singles + #doubles + #rest + } + + impl<#voter_type, #target_type, #weight_type> _phragmen::VotingLimit + for #ident<#voter_type, #target_type, #weight_type> + { + const LIMIT: usize = #count; + } + )) } fn imports() -> Result { - let sp_phragmen_imports = match crate_name("sp-phragmen") { - Ok(sp_phragmen) => { - let ident = syn::Ident::new(&sp_phragmen, Span::call_site()); - quote!( extern crate #ident as _phragmen; ) - } - Err(e) => return Err(syn::Error::new(Span::call_site(), &e)), - }; - - Ok(quote!( - #sp_phragmen_imports - )) + let sp_phragmen_imports = match crate_name("sp-phragmen") { + Ok(sp_phragmen) => { + let ident = syn::Ident::new(&sp_phragmen, Span::call_site()); + quote!( extern crate #ident as _phragmen; ) + } + Err(e) => return Err(syn::Error::new(Span::call_site(), &e)), + }; + + Ok(quote!( + #sp_phragmen_imports + )) } struct CompactSolutionDef { - vis: syn::Visibility, - ident: syn::Ident, - count: usize, + vis: syn::Visibility, + ident: syn::Ident, + count: usize, } impl Parse for CompactSolutionDef { - fn parse(input: ParseStream) -> syn::Result { - let vis: syn::Visibility = input.parse()?; - let ident: syn::Ident = input.parse()?; - let _ = ::parse(input)?; - let count_literal: syn::LitInt = input.parse()?; - let count = count_literal.base10_parse::()?; - Ok(Self { vis, ident, count } ) - } + fn parse(input: ParseStream) -> syn::Result { + let vis: syn::Visibility = input.parse()?; + let ident: syn::Ident = input.parse()?; + let _ = ::parse(input)?; + let count_literal: syn::LitInt = input.parse()?; + let count = count_literal.base10_parse::()?; + Ok(Self { vis, ident, count }) + } } fn field_name_for(n: usize) -> Ident { - Ident::new(&format!("{}{}", PREFIX, n), Span::call_site()) + Ident::new(&format!("{}{}", PREFIX, n), Span::call_site()) } diff --git a/primitives/phragmen/compact/src/staked.rs b/primitives/phragmen/compact/src/staked.rs index 81ccb5c559..bce71ee718 100644 --- a/primitives/phragmen/compact/src/staked.rs +++ b/primitives/phragmen/compact/src/staked.rs @@ -17,24 +17,24 @@ //! Code generation for the staked assignment type. use crate::field_name_for; -use proc_macro2::{TokenStream as TokenStream2}; -use syn::{GenericArgument}; +use proc_macro2::TokenStream as TokenStream2; use quote::quote; +use syn::GenericArgument; fn from_impl(count: usize) -> TokenStream2 { - let from_impl_single = { - let name = field_name_for(1); - quote!(1 => compact.#name.push( + let from_impl_single = { + let name = field_name_for(1); + quote!(1 => compact.#name.push( ( index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, index_of_target(&distribution[0].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, ) ),) - }; + }; - let from_impl_double = { - let name = field_name_for(2); - quote!(2 => compact.#name.push( + let from_impl_double = { + let name = field_name_for(2); + quote!(2 => compact.#name.push( ( index_of_voter(&who).ok_or(_phragmen::Error::CompactInvalidIndex)?, ( @@ -44,9 +44,9 @@ fn from_impl(count: usize) -> TokenStream2 { index_of_target(&distribution[1].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, ) ),) - }; + }; - let from_impl_rest = (3..=count).map(|c| { + let from_impl_rest = (3..=count).map(|c| { let inner = (0..c-1).map(|i| quote!((index_of_target(&distribution[#i].0).ok_or(_phragmen::Error::CompactInvalidIndex)?, distribution[#i].1),) ).collect::(); @@ -60,53 +60,53 @@ fn from_impl(count: usize) -> TokenStream2 { ) }).collect::(); - quote!( - #from_impl_single - #from_impl_double - #from_impl_rest - ) + quote!( + #from_impl_single + #from_impl_double + #from_impl_rest + ) } fn into_impl(count: usize) -> TokenStream2 { - let into_impl_single = { - let name = field_name_for(1); - quote!( - for (voter_index, target_index) in self.#name { - let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; - let all_stake: u128 = max_of(&who).into(); - assignments.push(_phragmen::StakedAssignment { - who, - distribution: vec![(target_at(target_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, all_stake)], - }) - } - ) - }; - - let into_impl_double = { - let name = field_name_for(2); - quote!( - for (voter_index, (t1_idx, w1), t2_idx) in self.#name { - let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; - let all_stake: u128 = max_of(&who).into(); - - if w1 >= all_stake { - return Err(_phragmen::Error::CompactStakeOverflow); - } - - // w2 is ensured to be positive. - let w2 = all_stake - w1; - assignments.push( _phragmen::StakedAssignment { - who, - distribution: vec![ - (target_at(t1_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w1), - (target_at(t2_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w2), - ] - }); - } - ) - }; - - let into_impl_rest = (3..=count).map(|c| { + let into_impl_single = { + let name = field_name_for(1); + quote!( + for (voter_index, target_index) in self.#name { + let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; + let all_stake: u128 = max_of(&who).into(); + assignments.push(_phragmen::StakedAssignment { + who, + distribution: vec![(target_at(target_index).ok_or(_phragmen::Error::CompactInvalidIndex)?, all_stake)], + }) + } + ) + }; + + let into_impl_double = { + let name = field_name_for(2); + quote!( + for (voter_index, (t1_idx, w1), t2_idx) in self.#name { + let who = voter_at(voter_index).ok_or(_phragmen::Error::CompactInvalidIndex)?; + let all_stake: u128 = max_of(&who).into(); + + if w1 >= all_stake { + return Err(_phragmen::Error::CompactStakeOverflow); + } + + // w2 is ensured to be positive. + let w2 = all_stake - w1; + assignments.push( _phragmen::StakedAssignment { + who, + distribution: vec![ + (target_at(t1_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w1), + (target_at(t2_idx).ok_or(_phragmen::Error::CompactInvalidIndex)?, w2), + ] + }); + } + ) + }; + + let into_impl_rest = (3..=count).map(|c| { let name = field_name_for(c); quote!( for (voter_index, inners, t_last_idx) in self.#name { @@ -138,72 +138,71 @@ fn into_impl(count: usize) -> TokenStream2 { ) }).collect::(); - quote!( - #into_impl_single - #into_impl_double - #into_impl_rest - ) + quote!( + #into_impl_single + #into_impl_double + #into_impl_rest + ) } pub(crate) fn staked( - ident: syn::Ident, - voter_type: GenericArgument, - target_type: GenericArgument, - count: usize, + ident: syn::Ident, + voter_type: GenericArgument, + target_type: GenericArgument, + count: usize, ) -> TokenStream2 { - - let from_impl = from_impl(count); - let into_impl = into_impl(count); - - quote!( - impl< - #voter_type: _phragmen::codec::Codec + Default + Copy, - #target_type: _phragmen::codec::Codec + Default + Copy, - > - #ident<#voter_type, #target_type, u128> - { - /// Generate self from a vector of `StakedAssignment`. - pub fn from_staked( - assignments: Vec<_phragmen::StakedAssignment>, - index_of_voter: FV, - index_of_target: FT, - ) -> Result - where - for<'r> FV: Fn(&'r A) -> Option<#voter_type>, - for<'r> FT: Fn(&'r A) -> Option<#target_type>, - A: _phragmen::IdentifierT - { - let mut compact: #ident<#voter_type, #target_type, u128> = Default::default(); - for _phragmen::StakedAssignment { who, distribution } in assignments { - match distribution.len() { - 0 => continue, - #from_impl - _ => { - return Err(_phragmen::Error::CompactTargetOverflow); - } - } - }; - Ok(compact) - } - - /// Convert self into `StakedAssignment`. The given function should return the total - /// weight of a voter. It is used to subtract the sum of all the encoded weights to - /// infer the last one. - pub fn into_staked( - self, - max_of: FM, - voter_at: impl Fn(#voter_type) -> Option, - target_at: impl Fn(#target_type) -> Option, - ) - -> Result>, _phragmen::Error> - where - for<'r> FM: Fn(&'r A) -> u64, - A: _phragmen::IdentifierT, - { - let mut assignments: Vec<_phragmen::StakedAssignment> = Default::default(); - #into_impl - Ok(assignments) - } - } - ) + let from_impl = from_impl(count); + let into_impl = into_impl(count); + + quote!( + impl< + #voter_type: _phragmen::codec::Codec + Default + Copy, + #target_type: _phragmen::codec::Codec + Default + Copy, + > + #ident<#voter_type, #target_type, u128> + { + /// Generate self from a vector of `StakedAssignment`. + pub fn from_staked( + assignments: Vec<_phragmen::StakedAssignment>, + index_of_voter: FV, + index_of_target: FT, + ) -> Result + where + for<'r> FV: Fn(&'r A) -> Option<#voter_type>, + for<'r> FT: Fn(&'r A) -> Option<#target_type>, + A: _phragmen::IdentifierT + { + let mut compact: #ident<#voter_type, #target_type, u128> = Default::default(); + for _phragmen::StakedAssignment { who, distribution } in assignments { + match distribution.len() { + 0 => continue, + #from_impl + _ => { + return Err(_phragmen::Error::CompactTargetOverflow); + } + } + }; + Ok(compact) + } + + /// Convert self into `StakedAssignment`. The given function should return the total + /// weight of a voter. It is used to subtract the sum of all the encoded weights to + /// infer the last one. + pub fn into_staked( + self, + max_of: FM, + voter_at: impl Fn(#voter_type) -> Option, + target_at: impl Fn(#target_type) -> Option, + ) + -> Result>, _phragmen::Error> + where + for<'r> FM: Fn(&'r A) -> u64, + A: _phragmen::IdentifierT, + { + let mut assignments: Vec<_phragmen::StakedAssignment> = Default::default(); + #into_impl + Ok(assignments) + } + } + ) } diff --git a/primitives/phragmen/fuzzer/src/common.rs b/primitives/phragmen/fuzzer/src/common.rs index 3429dcb20a..90d3c813ed 100644 --- a/primitives/phragmen/fuzzer/src/common.rs +++ b/primitives/phragmen/fuzzer/src/common.rs @@ -18,12 +18,12 @@ /// converts x into the range [a, b] in a pseudo-fair way. pub fn to_range(x: usize, a: usize, b: usize) -> usize { - // does not work correctly if b < 2*a - assert!(b > 2 * a); - let collapsed = x % b; - if collapsed >= a { - collapsed - } else { - collapsed + a - } + // does not work correctly if b < 2*a + assert!(b > 2 * a); + let collapsed = x % b; + if collapsed >= a { + collapsed + } else { + collapsed + a + } } diff --git a/primitives/phragmen/fuzzer/src/equalize.rs b/primitives/phragmen/fuzzer/src/equalize.rs index cb4f98c4eb..5eee383bd9 100644 --- a/primitives/phragmen/fuzzer/src/equalize.rs +++ b/primitives/phragmen/fuzzer/src/equalize.rs @@ -22,125 +22,130 @@ mod common; use common::to_range; use honggfuzz::fuzz; +use rand::{self, Rng, RngCore, SeedableRng}; use sp_phragmen::{ - equalize, assignment_ratio_to_staked, build_support_map, to_without_backing, elect, - PhragmenResult, VoteWeight, evaluate_support, is_score_better, + assignment_ratio_to_staked, build_support_map, elect, equalize, evaluate_support, + is_score_better, to_without_backing, PhragmenResult, VoteWeight, }; -use sp_std::collections::btree_map::BTreeMap; use sp_runtime::Perbill; -use rand::{self, Rng, SeedableRng, RngCore}; +use sp_std::collections::btree_map::BTreeMap; type AccountId = u64; fn generate_random_phragmen_result( - voter_count: u64, - target_count: u64, - to_elect: usize, - edge_per_voter: u64, - mut rng: impl RngCore, -) -> (PhragmenResult, BTreeMap) { - let prefix = 100_000; - // Note, it is important that stakes are always bigger than ed and - let base_stake: u64 = 1_000_000_000; - let ed: u64 = base_stake; - - let mut candidates = Vec::with_capacity(target_count as usize); - let mut stake_of_tree: BTreeMap = BTreeMap::new(); - - (1..=target_count).for_each(|acc| { - candidates.push(acc); - let stake_var = rng.gen_range(ed, 100 * ed); - stake_of_tree.insert(acc, base_stake + stake_var); - }); - - let mut voters = Vec::with_capacity(voter_count as usize); - (prefix ..= (prefix + voter_count)).for_each(|acc| { - // all possible targets - let mut all_targets = candidates.clone(); - // we remove and pop into `targets` `edge_per_voter` times. - let targets = (0..edge_per_voter).map(|_| { - let upper = all_targets.len() - 1; - let idx = rng.gen_range(0, upper); - all_targets.remove(idx) - }) - .collect::>(); - - let stake_var = rng.gen_range(ed, 100 * ed) ; - let stake = base_stake + stake_var; - stake_of_tree.insert(acc, stake); - voters.push((acc, stake, targets)); - }); - - ( - elect::( - to_elect, - 0, - candidates, - voters, - ).unwrap(), - stake_of_tree, - ) + voter_count: u64, + target_count: u64, + to_elect: usize, + edge_per_voter: u64, + mut rng: impl RngCore, +) -> ( + PhragmenResult, + BTreeMap, +) { + let prefix = 100_000; + // Note, it is important that stakes are always bigger than ed and + let base_stake: u64 = 1_000_000_000; + let ed: u64 = base_stake; + + let mut candidates = Vec::with_capacity(target_count as usize); + let mut stake_of_tree: BTreeMap = BTreeMap::new(); + + (1..=target_count).for_each(|acc| { + candidates.push(acc); + let stake_var = rng.gen_range(ed, 100 * ed); + stake_of_tree.insert(acc, base_stake + stake_var); + }); + + let mut voters = Vec::with_capacity(voter_count as usize); + (prefix..=(prefix + voter_count)).for_each(|acc| { + // all possible targets + let mut all_targets = candidates.clone(); + // we remove and pop into `targets` `edge_per_voter` times. + let targets = (0..edge_per_voter) + .map(|_| { + let upper = all_targets.len() - 1; + let idx = rng.gen_range(0, upper); + all_targets.remove(idx) + }) + .collect::>(); + + let stake_var = rng.gen_range(ed, 100 * ed); + let stake = base_stake + stake_var; + stake_of_tree.insert(acc, stake); + voters.push((acc, stake, targets)); + }); + + ( + elect::(to_elect, 0, candidates, voters).unwrap(), + stake_of_tree, + ) } fn main() { - loop { - fuzz!(|data: (usize, usize, usize, usize, usize, u64)| { - let (mut target_count, mut voter_count, mut iterations, mut edge_per_voter, mut to_elect, seed) = data; - let rng = rand::rngs::SmallRng::seed_from_u64(seed); - target_count = to_range(target_count, 50, 2000); - voter_count = to_range(voter_count, 50, 1000); - iterations = to_range(iterations, 1, 20); - to_elect = to_range(to_elect, 25, target_count); - edge_per_voter = to_range(edge_per_voter, 1, target_count); - - println!("++ [{} / {} / {} / {}]", voter_count, target_count, to_elect, iterations); - let (PhragmenResult { winners, assignments }, stake_of_tree) = generate_random_phragmen_result( - voter_count as u64, - target_count as u64, - to_elect, - edge_per_voter as u64, - rng, - ); - - let stake_of = |who: &AccountId| -> VoteWeight { - *stake_of_tree.get(who).unwrap() - }; - - let mut staked = assignment_ratio_to_staked(assignments.clone(), &stake_of); - let winners = to_without_backing(winners); - let mut support = build_support_map(winners.as_ref(), staked.as_ref()).0; - - let initial_score = evaluate_support(&support); - if initial_score[0] == 0 { - // such cases cannot be improved by reduce. - return; - } - - let i = equalize( - &mut staked, - &mut support, - 10, - iterations, - ); - - let final_score = evaluate_support(&support); - if final_score[0] == initial_score[0] { - // such solutions can only be improved by such a tiny fiction that it is most often - // wrong due to rounding errors. - return; - } - - let enhance = is_score_better(initial_score, final_score); - - println!( - "iter = {} // {:?} -> {:?} [{}]", - i, - initial_score, - final_score, - enhance, - ); - // if more than one iteration has been done, or they must be equal. - assert!(enhance || initial_score == final_score || i == 0) - }); - } + loop { + fuzz!(|data: (usize, usize, usize, usize, usize, u64)| { + let ( + mut target_count, + mut voter_count, + mut iterations, + mut edge_per_voter, + mut to_elect, + seed, + ) = data; + let rng = rand::rngs::SmallRng::seed_from_u64(seed); + target_count = to_range(target_count, 50, 2000); + voter_count = to_range(voter_count, 50, 1000); + iterations = to_range(iterations, 1, 20); + to_elect = to_range(to_elect, 25, target_count); + edge_per_voter = to_range(edge_per_voter, 1, target_count); + + println!( + "++ [{} / {} / {} / {}]", + voter_count, target_count, to_elect, iterations + ); + let ( + PhragmenResult { + winners, + assignments, + }, + stake_of_tree, + ) = generate_random_phragmen_result( + voter_count as u64, + target_count as u64, + to_elect, + edge_per_voter as u64, + rng, + ); + + let stake_of = |who: &AccountId| -> VoteWeight { *stake_of_tree.get(who).unwrap() }; + + let mut staked = assignment_ratio_to_staked(assignments.clone(), &stake_of); + let winners = to_without_backing(winners); + let mut support = build_support_map(winners.as_ref(), staked.as_ref()).0; + + let initial_score = evaluate_support(&support); + if initial_score[0] == 0 { + // such cases cannot be improved by reduce. + return; + } + + let i = equalize(&mut staked, &mut support, 10, iterations); + + let final_score = evaluate_support(&support); + if final_score[0] == initial_score[0] { + // such solutions can only be improved by such a tiny fiction that it is most often + // wrong due to rounding errors. + return; + } + + let enhance = is_score_better(initial_score, final_score); + + println!( + "iter = {} // {:?} -> {:?} [{}]", + i, initial_score, final_score, enhance, + ); + // if more than one iteration has been done, or they must be equal. + assert!(enhance || initial_score == final_score || i == 0) + }); + } } diff --git a/primitives/phragmen/fuzzer/src/reduce.rs b/primitives/phragmen/fuzzer/src/reduce.rs index f0a1646663..65306f577b 100644 --- a/primitives/phragmen/fuzzer/src/reduce.rs +++ b/primitives/phragmen/fuzzer/src/reduce.rs @@ -33,8 +33,8 @@ use honggfuzz::fuzz; mod common; use common::to_range; -use sp_phragmen::{StakedAssignment, ExtendedBalance, build_support_map, reduce}; -use rand::{self, Rng, SeedableRng, RngCore}; +use rand::{self, Rng, RngCore, SeedableRng}; +use sp_phragmen::{build_support_map, reduce, ExtendedBalance, StakedAssignment}; type Balance = u128; type AccountId = u64; @@ -43,110 +43,106 @@ type AccountId = u64; const KSM: Balance = 1_000_000_000_000; fn main() { - loop { - fuzz!(|data: (usize, usize, u64)| { - let (mut voter_count, mut target_count, seed) = data; - let rng = rand::rngs::SmallRng::seed_from_u64(seed); - target_count = to_range(target_count, 100, 1000); - voter_count = to_range(voter_count, 100, 2000); - let (assignments, winners) = generate_random_phragmen_assignment( - voter_count, - target_count, - 8, - 8, - rng - ); - reduce_and_compare(&assignments, &winners); - }); - } + loop { + fuzz!(|data: (usize, usize, u64)| { + let (mut voter_count, mut target_count, seed) = data; + let rng = rand::rngs::SmallRng::seed_from_u64(seed); + target_count = to_range(target_count, 100, 1000); + voter_count = to_range(voter_count, 100, 2000); + let (assignments, winners) = + generate_random_phragmen_assignment(voter_count, target_count, 8, 8, rng); + reduce_and_compare(&assignments, &winners); + }); + } } fn generate_random_phragmen_assignment( - voter_count: usize, - target_count: usize, - avg_edge_per_voter: usize, - edge_per_voter_var: usize, - mut rng: impl RngCore, + voter_count: usize, + target_count: usize, + avg_edge_per_voter: usize, + edge_per_voter_var: usize, + mut rng: impl RngCore, ) -> (Vec>, Vec) { - // prefix to distinguish the voter and target account ranges. - let target_prefix = 1_000_000; - assert!(voter_count < target_prefix); - - let mut assignments = Vec::with_capacity(voter_count as usize); - let mut winners: Vec = Vec::new(); - - let all_targets = (target_prefix..(target_prefix + target_count)) - .map(|a| a as AccountId) - .collect::>(); - - (1..=voter_count).for_each(|acc| { - let mut targets_to_chose_from = all_targets.clone(); - let targets_to_chose = if edge_per_voter_var > 0 { rng.gen_range( - avg_edge_per_voter - edge_per_voter_var, - avg_edge_per_voter + edge_per_voter_var, - ) } else { avg_edge_per_voter }; - - let distribution = (0..targets_to_chose).map(|_| { - let target = targets_to_chose_from.remove(rng.gen_range(0, targets_to_chose_from.len())); - if winners.iter().find(|w| **w == target).is_none() { - winners.push(target.clone()); - } - (target, rng.gen_range(1 * KSM, 100 * KSM)) - }).collect::>(); - - assignments.push(StakedAssignment { - who: (acc as AccountId), - distribution, - }); - }); - - (assignments, winners) + // prefix to distinguish the voter and target account ranges. + let target_prefix = 1_000_000; + assert!(voter_count < target_prefix); + + let mut assignments = Vec::with_capacity(voter_count as usize); + let mut winners: Vec = Vec::new(); + + let all_targets = (target_prefix..(target_prefix + target_count)) + .map(|a| a as AccountId) + .collect::>(); + + (1..=voter_count).for_each(|acc| { + let mut targets_to_chose_from = all_targets.clone(); + let targets_to_chose = if edge_per_voter_var > 0 { + rng.gen_range( + avg_edge_per_voter - edge_per_voter_var, + avg_edge_per_voter + edge_per_voter_var, + ) + } else { + avg_edge_per_voter + }; + + let distribution = (0..targets_to_chose) + .map(|_| { + let target = + targets_to_chose_from.remove(rng.gen_range(0, targets_to_chose_from.len())); + if winners.iter().find(|w| **w == target).is_none() { + winners.push(target.clone()); + } + (target, rng.gen_range(1 * KSM, 100 * KSM)) + }) + .collect::>(); + + assignments.push(StakedAssignment { + who: (acc as AccountId), + distribution, + }); + }); + + (assignments, winners) } fn assert_assignments_equal( - winners: &Vec, - ass1: &Vec>, - ass2: &Vec>, + winners: &Vec, + ass1: &Vec>, + ass2: &Vec>, ) { + let (support_1, _) = build_support_map::(winners, ass1); + let (support_2, _) = build_support_map::(winners, ass2); - let (support_1, _) = build_support_map::(winners, ass1); - let (support_2, _) = build_support_map::(winners, ass2); - - for (who, support) in support_1.iter() { - assert_eq!(support.total, support_2.get(who).unwrap().total); - } + for (who, support) in support_1.iter() { + assert_eq!(support.total, support_2.get(who).unwrap().total); + } } -fn reduce_and_compare( - assignment: &Vec>, - winners: &Vec, -) { - let mut altered_assignment = assignment.clone(); - let n = assignment.len() as u32; - let m = winners.len() as u32; - - let edges_before = assignment_len(&assignment); - let num_changed = reduce(&mut altered_assignment); - let edges_after = edges_before - num_changed; - - assert!( - edges_after <= m + n, - "reduce bound not satisfied. n = {}, m = {}, edges after reduce = {} (removed {})", - n, - m, - edges_after, - num_changed, - ); - - assert_assignments_equal( - winners, - &assignment, - &altered_assignment, - ); +fn reduce_and_compare(assignment: &Vec>, winners: &Vec) { + let mut altered_assignment = assignment.clone(); + let n = assignment.len() as u32; + let m = winners.len() as u32; + + let edges_before = assignment_len(&assignment); + let num_changed = reduce(&mut altered_assignment); + let edges_after = edges_before - num_changed; + + assert!( + edges_after <= m + n, + "reduce bound not satisfied. n = {}, m = {}, edges after reduce = {} (removed {})", + n, + m, + edges_after, + num_changed, + ); + + assert_assignments_equal(winners, &assignment, &altered_assignment); } fn assignment_len(assignments: &[StakedAssignment]) -> u32 { - let mut counter = 0; - assignments.iter().for_each(|x| x.distribution.iter().for_each(|_| counter += 1)); - counter + let mut counter = 0; + assignments + .iter() + .for_each(|x| x.distribution.iter().for_each(|_| counter += 1)); + counter } diff --git a/primitives/phragmen/src/helpers.rs b/primitives/phragmen/src/helpers.rs index 216de9243e..370caf19f5 100644 --- a/primitives/phragmen/src/helpers.rs +++ b/primitives/phragmen/src/helpers.rs @@ -16,83 +16,88 @@ //! Helper methods for phragmen. -use crate::{Assignment, ExtendedBalance, VoteWeight, IdentifierT, StakedAssignment, WithApprovalOf}; +use crate::{ + Assignment, ExtendedBalance, IdentifierT, StakedAssignment, VoteWeight, WithApprovalOf, +}; use sp_runtime::PerThing; use sp_std::prelude::*; /// Converts a vector of ratio assignments into ones with absolute budget value. pub fn assignment_ratio_to_staked( - ratio: Vec>, - stake_of: FS, + ratio: Vec>, + stake_of: FS, ) -> Vec> where - for<'r> FS: Fn(&'r A) -> VoteWeight, - T: sp_std::ops::Mul, - ExtendedBalance: From<::Inner>, + for<'r> FS: Fn(&'r A) -> VoteWeight, + T: sp_std::ops::Mul, + ExtendedBalance: From<::Inner>, { - ratio - .into_iter() - .map(|a| { - let stake = stake_of(&a.who); - a.into_staked(stake.into(), true) - }) - .collect() + ratio + .into_iter() + .map(|a| { + let stake = stake_of(&a.who); + a.into_staked(stake.into(), true) + }) + .collect() } /// Converts a vector of staked assignments into ones with ratio values. pub fn assignment_staked_to_ratio( - staked: Vec>, + staked: Vec>, ) -> Vec> where - ExtendedBalance: From<::Inner>, + ExtendedBalance: From<::Inner>, { - staked.into_iter().map(|a| a.into_assignment(true)).collect() + staked + .into_iter() + .map(|a| a.into_assignment(true)) + .collect() } /// consumes a vector of winners with backing stake to just winners. pub fn to_without_backing(winners: Vec>) -> Vec { - winners.into_iter().map(|(who, _)| who).collect::>() + winners.into_iter().map(|(who, _)| who).collect::>() } #[cfg(test)] mod tests { - use super::*; - use sp_runtime::Perbill; + use super::*; + use sp_runtime::Perbill; - #[test] - fn into_staked_works() { - let assignments = vec![ - Assignment { - who: 1u32, - distribution: vec![ - (10u32, Perbill::from_fraction(0.5)), - (20, Perbill::from_fraction(0.5)), - ], - }, - Assignment { - who: 2u32, - distribution: vec![ - (10, Perbill::from_fraction(0.33)), - (20, Perbill::from_fraction(0.67)), - ], - }, - ]; + #[test] + fn into_staked_works() { + let assignments = vec![ + Assignment { + who: 1u32, + distribution: vec![ + (10u32, Perbill::from_fraction(0.5)), + (20, Perbill::from_fraction(0.5)), + ], + }, + Assignment { + who: 2u32, + distribution: vec![ + (10, Perbill::from_fraction(0.33)), + (20, Perbill::from_fraction(0.67)), + ], + }, + ]; - let stake_of = |_: &u32| -> VoteWeight { 100 }; - let staked = assignment_ratio_to_staked(assignments, stake_of); + let stake_of = |_: &u32| -> VoteWeight { 100 }; + let staked = assignment_ratio_to_staked(assignments, stake_of); - assert_eq!( - staked, - vec![ - StakedAssignment { - who: 1u32, - distribution: vec![(10u32, 50), (20, 50),] - }, - StakedAssignment { - who: 2u32, - distribution: vec![(10u32, 33), (20, 67),] - } - ] - ); - } + assert_eq!( + staked, + vec![ + StakedAssignment { + who: 1u32, + distribution: vec![(10u32, 50), (20, 50),] + }, + StakedAssignment { + who: 2u32, + distribution: vec![(10u32, 33), (20, 67),] + } + ] + ); + } } diff --git a/primitives/phragmen/src/lib.rs b/primitives/phragmen/src/lib.rs index 01d034a95e..3af47ead53 100644 --- a/primitives/phragmen/src/lib.rs +++ b/primitives/phragmen/src/lib.rs @@ -33,22 +33,26 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{prelude::*, collections::btree_map::BTreeMap, fmt::Debug, cmp::Ordering, convert::TryFrom}; -use sp_runtime::{helpers_128bit::multiply_by_rational, PerThing, Rational128, RuntimeDebug, SaturatedConversion}; -use sp_runtime::traits::{Zero, Member, Saturating, Bounded}; +use sp_runtime::traits::{Bounded, Member, Saturating, Zero}; +use sp_runtime::{ + helpers_128bit::multiply_by_rational, PerThing, Rational128, RuntimeDebug, SaturatedConversion, +}; +use sp_std::{ + cmp::Ordering, collections::btree_map::BTreeMap, convert::TryFrom, fmt::Debug, prelude::*, +}; #[cfg(test)] mod mock; #[cfg(test)] mod tests; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use codec::{Encode, Decode}; +use serde::{Deserialize, Serialize}; +mod helpers; mod node; mod reduce; -mod helpers; // re-export reduce stuff. pub use reduce::reduce; @@ -67,7 +71,7 @@ pub use sp_phragmen_compact::generate_compact_solution_type; /// A trait to limit the number of votes per voter. The generated compact type will implement this. pub trait VotingLimit { - const LIMIT: usize; + const LIMIT: usize; } /// an aggregator trait for a generic type of a voter/target identifier. This usually maps to @@ -79,13 +83,13 @@ impl IdentifierT for T {} /// The errors that might occur in the this crate and compact. #[derive(Debug, Eq, PartialEq)] pub enum Error { - /// While going from compact to staked, the stake of all the edges has gone above the - /// total and the last stake cannot be assigned. - CompactStakeOverflow, - /// The compact type has a voter who's number of targets is out of bound. - CompactTargetOverflow, - /// One of the index functions returned none. - CompactInvalidIndex, + /// While going from compact to staked, the stake of all the edges has gone above the + /// total and the last stake cannot be assigned. + CompactStakeOverflow, + /// The compact type has a voter who's number of targets is out of bound. + CompactTargetOverflow, + /// One of the index functions returned none. + CompactInvalidIndex, } /// A type which is used in the API of this crate as a numeric weight of a vote, most often the @@ -109,116 +113,116 @@ const DEN: u128 = u128::max_value(); /// A candidate entity for phragmen election. #[derive(Clone, Default, RuntimeDebug)] struct Candidate { - /// Identifier. - who: AccountId, - /// Intermediary value used to sort candidates. - score: Rational128, - /// Sum of the stake of this candidate based on received votes. - approval_stake: ExtendedBalance, - /// Flag for being elected. - elected: bool, + /// Identifier. + who: AccountId, + /// Intermediary value used to sort candidates. + score: Rational128, + /// Sum of the stake of this candidate based on received votes. + approval_stake: ExtendedBalance, + /// Flag for being elected. + elected: bool, } /// A voter entity. #[derive(Clone, Default, RuntimeDebug)] struct Voter { - /// Identifier. - who: AccountId, - /// List of candidates proposed by this voter. - edges: Vec>, - /// The stake of this voter. - budget: ExtendedBalance, - /// Incremented each time a candidate that this voter voted for has been elected. - load: Rational128, + /// Identifier. + who: AccountId, + /// List of candidates proposed by this voter. + edges: Vec>, + /// The stake of this voter. + budget: ExtendedBalance, + /// Incremented each time a candidate that this voter voted for has been elected. + load: Rational128, } /// A candidate being backed by a voter. #[derive(Clone, Default, RuntimeDebug)] struct Edge { - /// Identifier. - who: AccountId, - /// Load of this vote. - load: Rational128, - /// Index of the candidate stored in the 'candidates' vector. - candidate_index: usize, + /// Identifier. + who: AccountId, + /// Load of this vote. + load: Rational128, + /// Index of the candidate stored in the 'candidates' vector. + candidate_index: usize, } /// Final result of the phragmen election. #[derive(RuntimeDebug)] pub struct PhragmenResult { - /// Just winners zipped with their approval stake. Note that the approval stake is merely the - /// sub of their received stake and could be used for very basic sorting and approval voting. - pub winners: Vec>, - /// Individual assignments. for each tuple, the first elements is a voter and the second - /// is the list of candidates that it supports. - pub assignments: Vec>, + /// Just winners zipped with their approval stake. Note that the approval stake is merely the + /// sub of their received stake and could be used for very basic sorting and approval voting. + pub winners: Vec>, + /// Individual assignments. for each tuple, the first elements is a voter and the second + /// is the list of candidates that it supports. + pub assignments: Vec>, } /// A voter's stake assignment among a set of targets, represented as ratios. #[derive(RuntimeDebug, Clone, Default)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] pub struct Assignment { - /// Voter's identifier - pub who: AccountId, - /// The distribution of the voter's stake. - pub distribution: Vec<(AccountId, T)>, + /// Voter's identifier + pub who: AccountId, + /// The distribution of the voter's stake. + pub distribution: Vec<(AccountId, T)>, } impl Assignment where - ExtendedBalance: From<::Inner>, + ExtendedBalance: From<::Inner>, { - /// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`]. - /// - /// It needs `stake` which is the total budget of the voter. If `fill` is set to true, - /// it _tries_ to ensure that all the potential rounding errors are compensated and the - /// distribution's sum is exactly equal to the total budget, by adding or subtracting the - /// remainder from the last distribution. - /// - /// If an edge ratio is [`Bounded::max_value()`], it is dropped. This edge can never mean - /// anything useful. - pub fn into_staked(self, stake: ExtendedBalance, fill: bool) -> StakedAssignment - where - T: sp_std::ops::Mul, - { - let mut sum: ExtendedBalance = Bounded::min_value(); - let mut distribution = self - .distribution - .into_iter() - .filter_map(|(target, p)| { - // if this ratio is zero, then skip it. - if p == Bounded::min_value() { - None - } else { - // NOTE: this mul impl will always round to the nearest number, so we might both - // overflow and underflow. - let distribution_stake = p * stake; - // defensive only. We assume that balance cannot exceed extended balance. - sum = sum.saturating_add(distribution_stake); - Some((target, distribution_stake)) - } - }) - .collect::>(); - - if fill { - // NOTE: we can do this better. - // https://revs.runtime-revolution.com/getting-100-with-rounded-percentages-273ffa70252b - if let Some(leftover) = stake.checked_sub(sum) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_add(leftover); - } - } else if let Some(excess) = sum.checked_sub(stake) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_sub(excess); - } - } - } - - StakedAssignment { - who: self.who, - distribution, - } - } + /// Convert from a ratio assignment into one with absolute values aka. [`StakedAssignment`]. + /// + /// It needs `stake` which is the total budget of the voter. If `fill` is set to true, + /// it _tries_ to ensure that all the potential rounding errors are compensated and the + /// distribution's sum is exactly equal to the total budget, by adding or subtracting the + /// remainder from the last distribution. + /// + /// If an edge ratio is [`Bounded::max_value()`], it is dropped. This edge can never mean + /// anything useful. + pub fn into_staked(self, stake: ExtendedBalance, fill: bool) -> StakedAssignment + where + T: sp_std::ops::Mul, + { + let mut sum: ExtendedBalance = Bounded::min_value(); + let mut distribution = self + .distribution + .into_iter() + .filter_map(|(target, p)| { + // if this ratio is zero, then skip it. + if p == Bounded::min_value() { + None + } else { + // NOTE: this mul impl will always round to the nearest number, so we might both + // overflow and underflow. + let distribution_stake = p * stake; + // defensive only. We assume that balance cannot exceed extended balance. + sum = sum.saturating_add(distribution_stake); + Some((target, distribution_stake)) + } + }) + .collect::>(); + + if fill { + // NOTE: we can do this better. + // https://revs.runtime-revolution.com/getting-100-with-rounded-percentages-273ffa70252b + if let Some(leftover) = stake.checked_sub(sum) { + if let Some(last) = distribution.last_mut() { + last.1 = last.1.saturating_add(leftover); + } + } else if let Some(excess) = sum.checked_sub(stake) { + if let Some(last) = distribution.last_mut() { + last.1 = last.1.saturating_sub(excess); + } + } + } + + StakedAssignment { + who: self.who, + distribution, + } + } } /// A voter's stake assignment among a set of targets, represented as absolute values in the scale @@ -226,72 +230,74 @@ where #[derive(RuntimeDebug, Clone, Default)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Encode, Decode))] pub struct StakedAssignment { - /// Voter's identifier - pub who: AccountId, - /// The distribution of the voter's stake. - pub distribution: Vec<(AccountId, ExtendedBalance)>, + /// Voter's identifier + pub who: AccountId, + /// The distribution of the voter's stake. + pub distribution: Vec<(AccountId, ExtendedBalance)>, } impl StakedAssignment { - /// Converts self into the normal [`Assignment`] type. - /// - /// If `fill` is set to true, it _tries_ to ensure that all the potential rounding errors are - /// compensated and the distribution's sum is exactly equal to 100%, by adding or subtracting - /// the remainder from the last distribution. - /// - /// NOTE: it is quite critical that this attempt always works. The data type returned here will - /// potentially get used to create a compact type; a compact type requires sum of ratios to be - /// less than 100% upon un-compacting. - /// - /// If an edge stake is so small that it cannot be represented in `T`, it is ignored. This edge - /// can never be re-created and does not mean anything useful anymore. - pub fn into_assignment(self, fill: bool) -> Assignment - where - ExtendedBalance: From<::Inner>, - { - let accuracy: u128 = T::ACCURACY.saturated_into(); - let mut sum: u128 = Zero::zero(); - let stake = self.distribution.iter().map(|x| x.1).sum(); - let mut distribution = self - .distribution - .into_iter() - .filter_map(|(target, w)| { - let per_thing = T::from_rational_approximation(w, stake); - if per_thing == Bounded::min_value() { - None - } else { - sum += per_thing.clone().deconstruct().saturated_into(); - Some((target, per_thing)) - } - }) - .collect::>(); - - if fill { - if let Some(leftover) = accuracy.checked_sub(sum) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_add( - T::from_parts(leftover.saturated_into()) - ); - } - } else if let Some(excess) = sum.checked_sub(accuracy) { - if let Some(last) = distribution.last_mut() { - last.1 = last.1.saturating_sub( - T::from_parts(excess.saturated_into()) - ); - } - } - } - - Assignment { - who: self.who, - distribution, - } - } - - /// Get the total stake of this assignment (aka voter budget). - pub fn total(&self) -> ExtendedBalance { - self.distribution.iter().fold(Zero::zero(), |a, b| a.saturating_add(b.1)) - } + /// Converts self into the normal [`Assignment`] type. + /// + /// If `fill` is set to true, it _tries_ to ensure that all the potential rounding errors are + /// compensated and the distribution's sum is exactly equal to 100%, by adding or subtracting + /// the remainder from the last distribution. + /// + /// NOTE: it is quite critical that this attempt always works. The data type returned here will + /// potentially get used to create a compact type; a compact type requires sum of ratios to be + /// less than 100% upon un-compacting. + /// + /// If an edge stake is so small that it cannot be represented in `T`, it is ignored. This edge + /// can never be re-created and does not mean anything useful anymore. + pub fn into_assignment(self, fill: bool) -> Assignment + where + ExtendedBalance: From<::Inner>, + { + let accuracy: u128 = T::ACCURACY.saturated_into(); + let mut sum: u128 = Zero::zero(); + let stake = self.distribution.iter().map(|x| x.1).sum(); + let mut distribution = self + .distribution + .into_iter() + .filter_map(|(target, w)| { + let per_thing = T::from_rational_approximation(w, stake); + if per_thing == Bounded::min_value() { + None + } else { + sum += per_thing.clone().deconstruct().saturated_into(); + Some((target, per_thing)) + } + }) + .collect::>(); + + if fill { + if let Some(leftover) = accuracy.checked_sub(sum) { + if let Some(last) = distribution.last_mut() { + last.1 = last + .1 + .saturating_add(T::from_parts(leftover.saturated_into())); + } + } else if let Some(excess) = sum.checked_sub(accuracy) { + if let Some(last) = distribution.last_mut() { + last.1 = last + .1 + .saturating_sub(T::from_parts(excess.saturated_into())); + } + } + } + + Assignment { + who: self.who, + distribution, + } + } + + /// Get the total stake of this assignment (aka voter budget). + pub fn total(&self) -> ExtendedBalance { + self.distribution + .iter() + .fold(Zero::zero(), |a, b| a.saturating_add(b.1)) + } } /// A structure to demonstrate the phragmen result from the perspective of the candidate, i.e. how @@ -304,10 +310,10 @@ impl StakedAssignment { #[derive(Default, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize, Eq, PartialEq))] pub struct Support { - /// Total support. - pub total: ExtendedBalance, - /// Support from voters. - pub voters: Vec<(AccountId, ExtendedBalance)>, + /// Total support. + pub total: ExtendedBalance, + /// Support from voters. + pub voters: Vec<(AccountId, ExtendedBalance)>, } /// A linkage from a candidate and its [`Support`]. @@ -329,201 +335,209 @@ pub type SupportMap = BTreeMap>; /// value are passed in. From the perspective of this function, a candidate can easily be among the /// winner with no backing stake. pub fn elect( - candidate_count: usize, - minimum_candidate_count: usize, - initial_candidates: Vec, - initial_voters: Vec<(AccountId, VoteWeight, Vec)>, -) -> Option> where - AccountId: Default + Ord + Member, - R: PerThing, + candidate_count: usize, + minimum_candidate_count: usize, + initial_candidates: Vec, + initial_voters: Vec<(AccountId, VoteWeight, Vec)>, +) -> Option> +where + AccountId: Default + Ord + Member, + R: PerThing, { - // return structures - let mut elected_candidates: Vec<(AccountId, ExtendedBalance)>; - let mut assigned: Vec>; - - // used to cache and access candidates index. - let mut c_idx_cache = BTreeMap::::new(); - - // voters list. - let num_voters = initial_candidates.len() + initial_voters.len(); - let mut voters: Vec> = Vec::with_capacity(num_voters); - - // Iterate once to create a cache of candidates indexes. This could be optimized by being - // provided by the call site. - let mut candidates = initial_candidates - .into_iter() - .enumerate() - .map(|(idx, who)| { - c_idx_cache.insert(who.clone(), idx); - Candidate { who, ..Default::default() } - }) - .collect::>>(); - - // early return if we don't have enough candidates - if candidates.len() < minimum_candidate_count { return None; } - - // collect voters. use `c_idx_cache` for fast access and aggregate `approval_stake` of - // candidates. - voters.extend(initial_voters.into_iter().map(|(who, voter_stake, votes)| { - let mut edges: Vec> = Vec::with_capacity(votes.len()); - for v in votes { - if let Some(idx) = c_idx_cache.get(&v) { - // This candidate is valid + already cached. - candidates[*idx].approval_stake = candidates[*idx].approval_stake - .saturating_add(voter_stake.into()); - edges.push(Edge { who: v.clone(), candidate_index: *idx, ..Default::default() }); - } // else {} would be wrong votes. We don't really care about it. - } - Voter { - who, - edges: edges, - budget: voter_stake.into(), - load: Rational128::zero(), - } - })); - - - // we have already checked that we have more candidates than minimum_candidate_count. - // run phragmen. - let to_elect = candidate_count.min(candidates.len()); - elected_candidates = Vec::with_capacity(candidate_count); - assigned = Vec::with_capacity(candidate_count); - - // main election loop - for _round in 0..to_elect { - // loop 1: initialize score - for c in &mut candidates { - if !c.elected { - // 1 / approval_stake == (DEN / approval_stake) / DEN. If approval_stake is zero, - // then the ratio should be as large as possible, essentially `infinity`. - if c.approval_stake.is_zero() { - c.score = Rational128::from_unchecked(DEN, 0); - } else { - c.score = Rational128::from(DEN / c.approval_stake, DEN); - } - } - } - - // loop 2: increment score - for n in &voters { - for e in &n.edges { - let c = &mut candidates[e.candidate_index]; - if !c.elected && !c.approval_stake.is_zero() { - let temp_n = multiply_by_rational( - n.load.n(), - n.budget, - c.approval_stake, - ).unwrap_or(Bounded::max_value()); - let temp_d = n.load.d(); - let temp = Rational128::from(temp_n, temp_d); - c.score = c.score.lazy_saturating_add(temp); - } - } - } - - // loop 3: find the best - if let Some(winner) = candidates - .iter_mut() - .filter(|c| !c.elected) - .min_by_key(|c| c.score) - { - // loop 3: update voter and edge load - winner.elected = true; - for n in &mut voters { - for e in &mut n.edges { - if e.who == winner.who { - e.load = winner.score.lazy_saturating_sub(n.load); - n.load = winner.score; - } - } - } - - elected_candidates.push((winner.who.clone(), winner.approval_stake)); - } else { - break - } - } // end of all rounds - - // update backing stake of candidates and voters - for n in &mut voters { - let mut assignment = Assignment { - who: n.who.clone(), - ..Default::default() - }; - for e in &mut n.edges { - if elected_candidates.iter().position(|(ref c, _)| *c == e.who).is_some() { - let per_bill_parts: R::Inner = - { - if n.load == e.load { - // Full support. No need to calculate. - R::ACCURACY - } else { - if e.load.d() == n.load.d() { - // return e.load / n.load. - let desired_scale: u128 = R::ACCURACY.saturated_into(); - let parts = multiply_by_rational( - desired_scale, - e.load.n(), - n.load.n(), - ) - // If result cannot fit in u128. Not much we can do about it. - .unwrap_or(Bounded::max_value()); - - TryFrom::try_from(parts) - // If the result cannot fit into R::Inner. Defensive only. This can - // never happen. `desired_scale * e / n`, where `e / n < 1` always - // yields a value smaller than `desired_scale`, which will fit into - // R::Inner. - .unwrap_or(Bounded::max_value()) - } else { - // defensive only. Both edge and voter loads are built from - // scores, hence MUST have the same denominator. - Zero::zero() - } - } - }; - let per_thing = R::from_parts(per_bill_parts); - assignment.distribution.push((e.who.clone(), per_thing)); - } - } - - let len = assignment.distribution.len(); - if len > 0 { - // To ensure an assertion indicating: no stake from the voter going to waste, - // we add a minimal post-processing to equally assign all of the leftover stake ratios. - let vote_count: R::Inner = len.saturated_into(); - let accuracy = R::ACCURACY; - let mut sum: R::Inner = Zero::zero(); - assignment.distribution.iter().for_each(|a| sum = sum.saturating_add(a.1.deconstruct())); - - let diff = accuracy.saturating_sub(sum); - let diff_per_vote = (diff / vote_count).min(accuracy); - - if !diff_per_vote.is_zero() { - for i in 0..len { - let current_ratio = assignment.distribution[i % len].1; - let next_ratio = current_ratio - .saturating_add(R::from_parts(diff_per_vote)); - assignment.distribution[i % len].1 = next_ratio; - } - } - - // `remainder` is set to be less than maximum votes of a voter (currently 16). - // safe to cast it to usize. - let remainder = diff - diff_per_vote * vote_count; - for i in 0..remainder.saturated_into::() { - let current_ratio = assignment.distribution[i % len].1; - let next_ratio = current_ratio.saturating_add(R::from_parts(1u8.into())); - assignment.distribution[i % len].1 = next_ratio; - } - assigned.push(assignment); - } - } - - Some(PhragmenResult { - winners: elected_candidates, - assignments: assigned, - }) + // return structures + let mut elected_candidates: Vec<(AccountId, ExtendedBalance)>; + let mut assigned: Vec>; + + // used to cache and access candidates index. + let mut c_idx_cache = BTreeMap::::new(); + + // voters list. + let num_voters = initial_candidates.len() + initial_voters.len(); + let mut voters: Vec> = Vec::with_capacity(num_voters); + + // Iterate once to create a cache of candidates indexes. This could be optimized by being + // provided by the call site. + let mut candidates = initial_candidates + .into_iter() + .enumerate() + .map(|(idx, who)| { + c_idx_cache.insert(who.clone(), idx); + Candidate { + who, + ..Default::default() + } + }) + .collect::>>(); + + // early return if we don't have enough candidates + if candidates.len() < minimum_candidate_count { + return None; + } + + // collect voters. use `c_idx_cache` for fast access and aggregate `approval_stake` of + // candidates. + voters.extend(initial_voters.into_iter().map(|(who, voter_stake, votes)| { + let mut edges: Vec> = Vec::with_capacity(votes.len()); + for v in votes { + if let Some(idx) = c_idx_cache.get(&v) { + // This candidate is valid + already cached. + candidates[*idx].approval_stake = candidates[*idx] + .approval_stake + .saturating_add(voter_stake.into()); + edges.push(Edge { + who: v.clone(), + candidate_index: *idx, + ..Default::default() + }); + } // else {} would be wrong votes. We don't really care about it. + } + Voter { + who, + edges: edges, + budget: voter_stake.into(), + load: Rational128::zero(), + } + })); + + // we have already checked that we have more candidates than minimum_candidate_count. + // run phragmen. + let to_elect = candidate_count.min(candidates.len()); + elected_candidates = Vec::with_capacity(candidate_count); + assigned = Vec::with_capacity(candidate_count); + + // main election loop + for _round in 0..to_elect { + // loop 1: initialize score + for c in &mut candidates { + if !c.elected { + // 1 / approval_stake == (DEN / approval_stake) / DEN. If approval_stake is zero, + // then the ratio should be as large as possible, essentially `infinity`. + if c.approval_stake.is_zero() { + c.score = Rational128::from_unchecked(DEN, 0); + } else { + c.score = Rational128::from(DEN / c.approval_stake, DEN); + } + } + } + + // loop 2: increment score + for n in &voters { + for e in &n.edges { + let c = &mut candidates[e.candidate_index]; + if !c.elected && !c.approval_stake.is_zero() { + let temp_n = multiply_by_rational(n.load.n(), n.budget, c.approval_stake) + .unwrap_or(Bounded::max_value()); + let temp_d = n.load.d(); + let temp = Rational128::from(temp_n, temp_d); + c.score = c.score.lazy_saturating_add(temp); + } + } + } + + // loop 3: find the best + if let Some(winner) = candidates + .iter_mut() + .filter(|c| !c.elected) + .min_by_key(|c| c.score) + { + // loop 3: update voter and edge load + winner.elected = true; + for n in &mut voters { + for e in &mut n.edges { + if e.who == winner.who { + e.load = winner.score.lazy_saturating_sub(n.load); + n.load = winner.score; + } + } + } + + elected_candidates.push((winner.who.clone(), winner.approval_stake)); + } else { + break; + } + } // end of all rounds + + // update backing stake of candidates and voters + for n in &mut voters { + let mut assignment = Assignment { + who: n.who.clone(), + ..Default::default() + }; + for e in &mut n.edges { + if elected_candidates + .iter() + .position(|(ref c, _)| *c == e.who) + .is_some() + { + let per_bill_parts: R::Inner = { + if n.load == e.load { + // Full support. No need to calculate. + R::ACCURACY + } else { + if e.load.d() == n.load.d() { + // return e.load / n.load. + let desired_scale: u128 = R::ACCURACY.saturated_into(); + let parts = multiply_by_rational(desired_scale, e.load.n(), n.load.n()) + // If result cannot fit in u128. Not much we can do about it. + .unwrap_or(Bounded::max_value()); + + TryFrom::try_from(parts) + // If the result cannot fit into R::Inner. Defensive only. This can + // never happen. `desired_scale * e / n`, where `e / n < 1` always + // yields a value smaller than `desired_scale`, which will fit into + // R::Inner. + .unwrap_or(Bounded::max_value()) + } else { + // defensive only. Both edge and voter loads are built from + // scores, hence MUST have the same denominator. + Zero::zero() + } + } + }; + let per_thing = R::from_parts(per_bill_parts); + assignment.distribution.push((e.who.clone(), per_thing)); + } + } + + let len = assignment.distribution.len(); + if len > 0 { + // To ensure an assertion indicating: no stake from the voter going to waste, + // we add a minimal post-processing to equally assign all of the leftover stake ratios. + let vote_count: R::Inner = len.saturated_into(); + let accuracy = R::ACCURACY; + let mut sum: R::Inner = Zero::zero(); + assignment + .distribution + .iter() + .for_each(|a| sum = sum.saturating_add(a.1.deconstruct())); + + let diff = accuracy.saturating_sub(sum); + let diff_per_vote = (diff / vote_count).min(accuracy); + + if !diff_per_vote.is_zero() { + for i in 0..len { + let current_ratio = assignment.distribution[i % len].1; + let next_ratio = current_ratio.saturating_add(R::from_parts(diff_per_vote)); + assignment.distribution[i % len].1 = next_ratio; + } + } + + // `remainder` is set to be less than maximum votes of a voter (currently 16). + // safe to cast it to usize. + let remainder = diff - diff_per_vote * vote_count; + for i in 0..remainder.saturated_into::() { + let current_ratio = assignment.distribution[i % len].1; + let next_ratio = current_ratio.saturating_add(R::from_parts(1u8.into())); + assignment.distribution[i % len].1 = next_ratio; + } + assigned.push(assignment); + } + } + + Some(PhragmenResult { + winners: elected_candidates, + assignments: assigned, + }) } /// Build the support map from the given phragmen result. It maps a flat structure like @@ -558,30 +572,31 @@ pub fn elect( /// /// `O(E)` where `E` is the total number of edges. pub fn build_support_map( - winners: &[AccountId], - assignments: &[StakedAssignment], -) -> (SupportMap, u32) where - AccountId: Default + Ord + Member, + winners: &[AccountId], + assignments: &[StakedAssignment], +) -> (SupportMap, u32) +where + AccountId: Default + Ord + Member, { - let mut errors = 0; - // Initialize the support of each candidate. - let mut supports = >::new(); - winners - .iter() - .for_each(|e| { supports.insert(e.clone(), Default::default()); }); - - // build support struct. - for StakedAssignment { who, distribution } in assignments.iter() { - for (c, weight_extended) in distribution.iter() { - if let Some(support) = supports.get_mut(c) { - support.total = support.total.saturating_add(*weight_extended); - support.voters.push((who.clone(), *weight_extended)); - } else { - errors = errors.saturating_add(1); - } - } - } - (supports, errors) + let mut errors = 0; + // Initialize the support of each candidate. + let mut supports = >::new(); + winners.iter().for_each(|e| { + supports.insert(e.clone(), Default::default()); + }); + + // build support struct. + for StakedAssignment { who, distribution } in assignments.iter() { + for (c, weight_extended) in distribution.iter() { + if let Some(support) = supports.get_mut(c) { + support.total = support.total.saturating_add(*weight_extended); + support.voters.push((who.clone(), *weight_extended)); + } else { + errors = errors.saturating_add(1); + } + } + } + (supports, errors) } /// Evaluate a phragmen result, given the support map. The returned tuple contains: @@ -591,23 +606,21 @@ pub fn build_support_map( /// - Sum of all supports squared. This value must be **minimized**. /// /// `O(E)` where `E` is the total number of edges. -pub fn evaluate_support( - support: &SupportMap, -) -> PhragmenScore { - let mut min_support = ExtendedBalance::max_value(); - let mut sum: ExtendedBalance = Zero::zero(); - // NOTE: this will probably saturate but using big num makes it even slower. We'll have to see. - // This must run on chain.. - let mut sum_squared: ExtendedBalance = Zero::zero(); - for (_, support) in support.iter() { - sum += support.total; - let squared = support.total.saturating_mul(support.total); - sum_squared = sum_squared.saturating_add(squared); - if support.total < min_support { - min_support = support.total; - } - } - [min_support, sum, sum_squared] +pub fn evaluate_support(support: &SupportMap) -> PhragmenScore { + let mut min_support = ExtendedBalance::max_value(); + let mut sum: ExtendedBalance = Zero::zero(); + // NOTE: this will probably saturate but using big num makes it even slower. We'll have to see. + // This must run on chain.. + let mut sum_squared: ExtendedBalance = Zero::zero(); + for (_, support) in support.iter() { + sum += support.total; + let squared = support.total.saturating_mul(support.total); + sum_squared = sum_squared.saturating_add(squared); + if support.total < min_support { + min_support = support.total; + } + } + [min_support, sum, sum_squared] } /// Compares two sets of phragmen scores based on desirability and returns true if `that` is @@ -617,18 +630,18 @@ pub fn evaluate_support( /// /// Note that the third component should be minimized. pub fn is_score_better(this: PhragmenScore, that: PhragmenScore) -> bool { - match that - .iter() - .enumerate() - .map(|(i, e)| e.cmp(&this[i])) - .collect::>() - .as_slice() - { - [Ordering::Greater, _, _] => true, - [Ordering::Equal, Ordering::Greater, _] => true, - [Ordering::Equal, Ordering::Equal, Ordering::Less] => true, - _ => false, - } + match that + .iter() + .enumerate() + .map(|(i, e)| e.cmp(&this[i])) + .collect::>() + .as_slice() + { + [Ordering::Greater, _, _] => true, + [Ordering::Equal, Ordering::Greater, _] => true, + [Ordering::Equal, Ordering::Equal, Ordering::Less] => true, + _ => false, + } } /// Performs equalize post-processing to the output of the election algorithm. This happens in @@ -642,128 +655,138 @@ pub fn is_score_better(this: PhragmenScore, that: PhragmenScore) -> bool { /// - `tolerance`: maximum difference that can occur before an early quite happens. /// - `iterations`: maximum number of iterations that will be processed. pub fn equalize( - assignments: &mut Vec>, - supports: &mut SupportMap, - tolerance: ExtendedBalance, - iterations: usize, -) -> usize where AccountId: Ord + Clone { - if iterations == 0 { return 0; } - - let mut i = 0 ; - loop { - let mut max_diff = 0; - for assignment in assignments.iter_mut() { - let voter_budget = assignment.total(); - let StakedAssignment { who, distribution } = assignment; - let diff = do_equalize( - who, - voter_budget, - distribution, - supports, - tolerance, - ); - if diff > max_diff { max_diff = diff; } - } - - i += 1; - if max_diff <= tolerance || i >= iterations { - break i; - } - } + assignments: &mut Vec>, + supports: &mut SupportMap, + tolerance: ExtendedBalance, + iterations: usize, +) -> usize +where + AccountId: Ord + Clone, +{ + if iterations == 0 { + return 0; + } + + let mut i = 0; + loop { + let mut max_diff = 0; + for assignment in assignments.iter_mut() { + let voter_budget = assignment.total(); + let StakedAssignment { who, distribution } = assignment; + let diff = do_equalize(who, voter_budget, distribution, supports, tolerance); + if diff > max_diff { + max_diff = diff; + } + } + + i += 1; + if max_diff <= tolerance || i >= iterations { + break i; + } + } } /// actually perform equalize. same interface is `equalize`. Just called in loops with a check for /// maximum difference. fn do_equalize( - voter: &AccountId, - budget: ExtendedBalance, - elected_edges: &mut Vec<(AccountId, ExtendedBalance)>, - support_map: &mut SupportMap, - tolerance: ExtendedBalance -) -> ExtendedBalance where AccountId: Ord + Clone { - // Nothing to do. This voter had nothing useful. - // Defensive only. Assignment list should always be populated. 1 might happen for self vote. - if elected_edges.is_empty() || elected_edges.len() == 1 { return 0; } - - let stake_used = elected_edges - .iter() - .fold(0 as ExtendedBalance, |s, e| s.saturating_add(e.1)); - - let backed_stakes_iter = elected_edges - .iter() - .filter_map(|e| support_map.get(&e.0)) - .map(|e| e.total); - - let backing_backed_stake = elected_edges - .iter() - .filter(|e| e.1 > 0) - .filter_map(|e| support_map.get(&e.0)) - .map(|e| e.total) - .collect::>(); - - let mut difference; - if backing_backed_stake.len() > 0 { - let max_stake = backing_backed_stake - .iter() - .max() - .expect("vector with positive length will have a max; qed"); - let min_stake = backed_stakes_iter - .min() - .expect("iterator with positive length will have a min; qed"); - - difference = max_stake.saturating_sub(min_stake); - difference = difference.saturating_add(budget.saturating_sub(stake_used)); - if difference < tolerance { - return difference; - } - } else { - difference = budget; - } - - // Undo updates to support - elected_edges.iter_mut().for_each(|e| { - if let Some(support) = support_map.get_mut(&e.0) { - support.total = support.total.saturating_sub(e.1); - support.voters.retain(|i_support| i_support.0 != *voter); - } - e.1 = 0; - }); - - elected_edges.sort_unstable_by_key(|e| - if let Some(e) = support_map.get(&e.0) { e.total } else { Zero::zero() } - ); - - let mut cumulative_stake: ExtendedBalance = 0; - let mut last_index = elected_edges.len() - 1; - let mut idx = 0usize; - for e in &mut elected_edges[..] { - if let Some(support) = support_map.get_mut(&e.0) { - let stake = support.total; - let stake_mul = stake.saturating_mul(idx as ExtendedBalance); - let stake_sub = stake_mul.saturating_sub(cumulative_stake); - if stake_sub > budget { - last_index = idx.checked_sub(1).unwrap_or(0); - break; - } - cumulative_stake = cumulative_stake.saturating_add(stake); - } - idx += 1; - } - - let last_stake = elected_edges[last_index].1; - let split_ways = last_index + 1; - let excess = budget - .saturating_add(cumulative_stake) - .saturating_sub(last_stake.saturating_mul(split_ways as ExtendedBalance)); - elected_edges.iter_mut().take(split_ways).for_each(|e| { - if let Some(support) = support_map.get_mut(&e.0) { - e.1 = (excess / split_ways as ExtendedBalance) - .saturating_add(last_stake) - .saturating_sub(support.total); - support.total = support.total.saturating_add(e.1); - support.voters.push((voter.clone(), e.1)); - } - }); - - difference + voter: &AccountId, + budget: ExtendedBalance, + elected_edges: &mut Vec<(AccountId, ExtendedBalance)>, + support_map: &mut SupportMap, + tolerance: ExtendedBalance, +) -> ExtendedBalance +where + AccountId: Ord + Clone, +{ + // Nothing to do. This voter had nothing useful. + // Defensive only. Assignment list should always be populated. 1 might happen for self vote. + if elected_edges.is_empty() || elected_edges.len() == 1 { + return 0; + } + + let stake_used = elected_edges + .iter() + .fold(0 as ExtendedBalance, |s, e| s.saturating_add(e.1)); + + let backed_stakes_iter = elected_edges + .iter() + .filter_map(|e| support_map.get(&e.0)) + .map(|e| e.total); + + let backing_backed_stake = elected_edges + .iter() + .filter(|e| e.1 > 0) + .filter_map(|e| support_map.get(&e.0)) + .map(|e| e.total) + .collect::>(); + + let mut difference; + if backing_backed_stake.len() > 0 { + let max_stake = backing_backed_stake + .iter() + .max() + .expect("vector with positive length will have a max; qed"); + let min_stake = backed_stakes_iter + .min() + .expect("iterator with positive length will have a min; qed"); + + difference = max_stake.saturating_sub(min_stake); + difference = difference.saturating_add(budget.saturating_sub(stake_used)); + if difference < tolerance { + return difference; + } + } else { + difference = budget; + } + + // Undo updates to support + elected_edges.iter_mut().for_each(|e| { + if let Some(support) = support_map.get_mut(&e.0) { + support.total = support.total.saturating_sub(e.1); + support.voters.retain(|i_support| i_support.0 != *voter); + } + e.1 = 0; + }); + + elected_edges.sort_unstable_by_key(|e| { + if let Some(e) = support_map.get(&e.0) { + e.total + } else { + Zero::zero() + } + }); + + let mut cumulative_stake: ExtendedBalance = 0; + let mut last_index = elected_edges.len() - 1; + let mut idx = 0usize; + for e in &mut elected_edges[..] { + if let Some(support) = support_map.get_mut(&e.0) { + let stake = support.total; + let stake_mul = stake.saturating_mul(idx as ExtendedBalance); + let stake_sub = stake_mul.saturating_sub(cumulative_stake); + if stake_sub > budget { + last_index = idx.checked_sub(1).unwrap_or(0); + break; + } + cumulative_stake = cumulative_stake.saturating_add(stake); + } + idx += 1; + } + + let last_stake = elected_edges[last_index].1; + let split_ways = last_index + 1; + let excess = budget + .saturating_add(cumulative_stake) + .saturating_sub(last_stake.saturating_mul(split_ways as ExtendedBalance)); + elected_edges.iter_mut().take(split_ways).for_each(|e| { + if let Some(support) = support_map.get_mut(&e.0) { + e.1 = (excess / split_ways as ExtendedBalance) + .saturating_add(last_stake) + .saturating_sub(support.total); + support.total = support.total.saturating_add(e.1); + support.voters.push((voter.clone(), e.1)); + } + }); + + difference } diff --git a/primitives/phragmen/src/mock.rs b/primitives/phragmen/src/mock.rs index 5aab5ff2f7..511e0df799 100644 --- a/primitives/phragmen/src/mock.rs +++ b/primitives/phragmen/src/mock.rs @@ -18,41 +18,42 @@ #![cfg(test)] -use crate::{elect, PhragmenResult, Assignment, VoteWeight, ExtendedBalance}; +use crate::{elect, Assignment, ExtendedBalance, PhragmenResult, VoteWeight}; use sp_runtime::{ - assert_eq_error_rate, PerThing, - traits::{Member, SaturatedConversion, Zero, One} + assert_eq_error_rate, + traits::{Member, One, SaturatedConversion, Zero}, + PerThing, }; use sp_std::collections::btree_map::BTreeMap; #[derive(Default, Debug)] pub(crate) struct _Candidate { - who: A, - score: f64, - approval_stake: f64, - elected: bool, + who: A, + score: f64, + approval_stake: f64, + elected: bool, } #[derive(Default, Debug)] pub(crate) struct _Voter { - who: A, - edges: Vec<_Edge>, - budget: f64, - load: f64, + who: A, + edges: Vec<_Edge>, + budget: f64, + load: f64, } #[derive(Default, Debug)] pub(crate) struct _Edge { - who: A, - load: f64, - candidate_index: usize, + who: A, + load: f64, + candidate_index: usize, } #[derive(Default, Debug, PartialEq)] pub(crate) struct _Support { - pub own: f64, - pub total: f64, - pub others: Vec<_PhragmenAssignment>, + pub own: f64, + pub total: f64, + pub others: Vec<_PhragmenAssignment>, } pub(crate) type _PhragmenAssignment = (A, f64); @@ -62,335 +63,369 @@ pub(crate) type AccountId = u64; #[derive(Debug, Clone)] pub(crate) struct _PhragmenResult { - pub winners: Vec<(A, ExtendedBalance)>, - pub assignments: Vec<(A, Vec<_PhragmenAssignment>)> + pub winners: Vec<(A, ExtendedBalance)>, + pub assignments: Vec<(A, Vec<_PhragmenAssignment>)>, } pub(crate) fn auto_generate_self_voters(candidates: &[A]) -> Vec<(A, Vec)> { - candidates.iter().map(|c| (c.clone(), vec![c.clone()])).collect() + candidates + .iter() + .map(|c| (c.clone(), vec![c.clone()])) + .collect() } pub(crate) fn elect_float( - candidate_count: usize, - minimum_candidate_count: usize, - initial_candidates: Vec, - initial_voters: Vec<(A, Vec)>, - stake_of: FS, -) -> Option<_PhragmenResult> where - A: Default + Ord + Member + Copy, - for<'r> FS: Fn(&'r A) -> VoteWeight, + candidate_count: usize, + minimum_candidate_count: usize, + initial_candidates: Vec, + initial_voters: Vec<(A, Vec)>, + stake_of: FS, +) -> Option<_PhragmenResult> +where + A: Default + Ord + Member + Copy, + for<'r> FS: Fn(&'r A) -> VoteWeight, { - let mut elected_candidates: Vec<(A, ExtendedBalance)>; - let mut assigned: Vec<(A, Vec<_PhragmenAssignment>)>; - let mut c_idx_cache = BTreeMap::::new(); - let num_voters = initial_candidates.len() + initial_voters.len(); - let mut voters: Vec<_Voter> = Vec::with_capacity(num_voters); - - let mut candidates = initial_candidates - .into_iter() - .enumerate() - .map(|(idx, who)| { - c_idx_cache.insert(who.clone(), idx); - _Candidate { who, ..Default::default() } - }) - .collect::>>(); - - if candidates.len() < minimum_candidate_count { - return None; - } - - voters.extend(initial_voters.into_iter().map(|(who, votes)| { - let voter_stake = stake_of(&who) as f64; - let mut edges: Vec<_Edge> = Vec::with_capacity(votes.len()); - for v in votes { - if let Some(idx) = c_idx_cache.get(&v) { - candidates[*idx].approval_stake = candidates[*idx].approval_stake + voter_stake; - edges.push( - _Edge { who: v.clone(), candidate_index: *idx, ..Default::default() } - ); - } - } - _Voter { - who, - edges: edges, - budget: voter_stake, - load: 0f64, - } - })); - - let to_elect = candidate_count.min(candidates.len()); - elected_candidates = Vec::with_capacity(candidate_count); - assigned = Vec::with_capacity(candidate_count); - - for _round in 0..to_elect { - for c in &mut candidates { - if !c.elected { - c.score = 1.0 / c.approval_stake; - } - } - for n in &voters { - for e in &n.edges { - let c = &mut candidates[e.candidate_index]; - if !c.elected && !(c.approval_stake == 0f64) { - c.score += n.budget * n.load / c.approval_stake; - } - } - } - - if let Some(winner) = candidates - .iter_mut() - .filter(|c| !c.elected) - .min_by(|x, y| x.score.partial_cmp(&y.score).unwrap_or(sp_std::cmp::Ordering::Equal)) - { - winner.elected = true; - for n in &mut voters { - for e in &mut n.edges { - if e.who == winner.who { - e.load = winner.score - n.load; - n.load = winner.score; - } - } - } - - elected_candidates.push((winner.who.clone(), winner.approval_stake as ExtendedBalance)); - } else { - break - } - } - - for n in &mut voters { - let mut assignment = (n.who.clone(), vec![]); - for e in &mut n.edges { - if let Some(c) = elected_candidates.iter().cloned().map(|(c, _)| c).find(|c| *c == e.who) { - if c != n.who { - let ratio = e.load / n.load; - assignment.1.push((e.who.clone(), ratio)); - } - } - } - if assignment.1.len() > 0 { - assigned.push(assignment); - } - } - - Some(_PhragmenResult { - winners: elected_candidates, - assignments: assigned, - }) + let mut elected_candidates: Vec<(A, ExtendedBalance)>; + let mut assigned: Vec<(A, Vec<_PhragmenAssignment>)>; + let mut c_idx_cache = BTreeMap::::new(); + let num_voters = initial_candidates.len() + initial_voters.len(); + let mut voters: Vec<_Voter> = Vec::with_capacity(num_voters); + + let mut candidates = initial_candidates + .into_iter() + .enumerate() + .map(|(idx, who)| { + c_idx_cache.insert(who.clone(), idx); + _Candidate { + who, + ..Default::default() + } + }) + .collect::>>(); + + if candidates.len() < minimum_candidate_count { + return None; + } + + voters.extend(initial_voters.into_iter().map(|(who, votes)| { + let voter_stake = stake_of(&who) as f64; + let mut edges: Vec<_Edge> = Vec::with_capacity(votes.len()); + for v in votes { + if let Some(idx) = c_idx_cache.get(&v) { + candidates[*idx].approval_stake = candidates[*idx].approval_stake + voter_stake; + edges.push(_Edge { + who: v.clone(), + candidate_index: *idx, + ..Default::default() + }); + } + } + _Voter { + who, + edges: edges, + budget: voter_stake, + load: 0f64, + } + })); + + let to_elect = candidate_count.min(candidates.len()); + elected_candidates = Vec::with_capacity(candidate_count); + assigned = Vec::with_capacity(candidate_count); + + for _round in 0..to_elect { + for c in &mut candidates { + if !c.elected { + c.score = 1.0 / c.approval_stake; + } + } + for n in &voters { + for e in &n.edges { + let c = &mut candidates[e.candidate_index]; + if !c.elected && !(c.approval_stake == 0f64) { + c.score += n.budget * n.load / c.approval_stake; + } + } + } + + if let Some(winner) = candidates.iter_mut().filter(|c| !c.elected).min_by(|x, y| { + x.score + .partial_cmp(&y.score) + .unwrap_or(sp_std::cmp::Ordering::Equal) + }) { + winner.elected = true; + for n in &mut voters { + for e in &mut n.edges { + if e.who == winner.who { + e.load = winner.score - n.load; + n.load = winner.score; + } + } + } + + elected_candidates.push((winner.who.clone(), winner.approval_stake as ExtendedBalance)); + } else { + break; + } + } + + for n in &mut voters { + let mut assignment = (n.who.clone(), vec![]); + for e in &mut n.edges { + if let Some(c) = elected_candidates + .iter() + .cloned() + .map(|(c, _)| c) + .find(|c| *c == e.who) + { + if c != n.who { + let ratio = e.load / n.load; + assignment.1.push((e.who.clone(), ratio)); + } + } + } + if assignment.1.len() > 0 { + assigned.push(assignment); + } + } + + Some(_PhragmenResult { + winners: elected_candidates, + assignments: assigned, + }) } pub(crate) fn equalize_float( - mut assignments: Vec<(A, Vec<_PhragmenAssignment>)>, - supports: &mut _SupportMap, - tolerance: f64, - iterations: usize, - stake_of: FS, + mut assignments: Vec<(A, Vec<_PhragmenAssignment>)>, + supports: &mut _SupportMap, + tolerance: f64, + iterations: usize, + stake_of: FS, ) where - for<'r> FS: Fn(&'r A) -> VoteWeight, - A: Ord + Clone + std::fmt::Debug, + for<'r> FS: Fn(&'r A) -> VoteWeight, + A: Ord + Clone + std::fmt::Debug, { - for _i in 0..iterations { - let mut max_diff = 0.0; - for (voter, assignment) in assignments.iter_mut() { - let voter_budget = stake_of(&voter); - let diff = do_equalize_float( - voter, - voter_budget, - assignment, - supports, - tolerance, - ); - if diff > max_diff { max_diff = diff; } - } - - if max_diff < tolerance { - break; - } - } + for _i in 0..iterations { + let mut max_diff = 0.0; + for (voter, assignment) in assignments.iter_mut() { + let voter_budget = stake_of(&voter); + let diff = do_equalize_float(voter, voter_budget, assignment, supports, tolerance); + if diff > max_diff { + max_diff = diff; + } + } + + if max_diff < tolerance { + break; + } + } } pub(crate) fn do_equalize_float( - voter: &A, - budget_balance: VoteWeight, - elected_edges: &mut Vec<_PhragmenAssignment>, - support_map: &mut _SupportMap, - tolerance: f64 -) -> f64 where - A: Ord + Clone, + voter: &A, + budget_balance: VoteWeight, + elected_edges: &mut Vec<_PhragmenAssignment>, + support_map: &mut _SupportMap, + tolerance: f64, +) -> f64 +where + A: Ord + Clone, { - let budget = budget_balance as f64; - if elected_edges.is_empty() { return 0.0; } - - let stake_used = elected_edges - .iter() - .fold(0.0, |s, e| s + e.1); - - let backed_stakes_iter = elected_edges - .iter() - .filter_map(|e| support_map.get(&e.0)) - .map(|e| e.total); - - let backing_backed_stake = elected_edges - .iter() - .filter(|e| e.1 > 0.0) - .filter_map(|e| support_map.get(&e.0)) - .map(|e| e.total) - .collect::>(); - - let mut difference; - if backing_backed_stake.len() > 0 { - let max_stake = backing_backed_stake - .iter() - .max_by(|x, y| x.partial_cmp(&y).unwrap_or(sp_std::cmp::Ordering::Equal)) - .expect("vector with positive length will have a max; qed"); - let min_stake = backed_stakes_iter - .min_by(|x, y| x.partial_cmp(&y).unwrap_or(sp_std::cmp::Ordering::Equal)) - .expect("iterator with positive length will have a min; qed"); - - difference = max_stake - min_stake; - difference = difference + budget - stake_used; - if difference < tolerance { - return difference; - } - } else { - difference = budget; - } - - // Undo updates to support - elected_edges.iter_mut().for_each(|e| { - if let Some(support) = support_map.get_mut(&e.0) { - support.total = support.total - e.1; - support.others.retain(|i_support| i_support.0 != *voter); - } - e.1 = 0.0; - }); - - elected_edges.sort_unstable_by(|x, y| - support_map.get(&x.0) - .and_then(|x| support_map.get(&y.0).and_then(|y| x.total.partial_cmp(&y.total))) - .unwrap_or(sp_std::cmp::Ordering::Equal) - ); - - let mut cumulative_stake = 0.0; - let mut last_index = elected_edges.len() - 1; - elected_edges.iter_mut().enumerate().for_each(|(idx, e)| { - if let Some(support) = support_map.get_mut(&e.0) { - let stake = support.total; - let stake_mul = stake * (idx as f64); - let stake_sub = stake_mul - cumulative_stake; - if stake_sub > budget { - last_index = idx.checked_sub(1).unwrap_or(0); - return - } - cumulative_stake = cumulative_stake + stake; - } - }); - - let last_stake = elected_edges[last_index].1; - let split_ways = last_index + 1; - let excess = budget + cumulative_stake - last_stake * (split_ways as f64); - elected_edges.iter_mut().take(split_ways).for_each(|e| { - if let Some(support) = support_map.get_mut(&e.0) { - e.1 = excess / (split_ways as f64) + last_stake - support.total; - support.total = support.total + e.1; - support.others.push((voter.clone(), e.1)); - } - }); - - difference + let budget = budget_balance as f64; + if elected_edges.is_empty() { + return 0.0; + } + + let stake_used = elected_edges.iter().fold(0.0, |s, e| s + e.1); + + let backed_stakes_iter = elected_edges + .iter() + .filter_map(|e| support_map.get(&e.0)) + .map(|e| e.total); + + let backing_backed_stake = elected_edges + .iter() + .filter(|e| e.1 > 0.0) + .filter_map(|e| support_map.get(&e.0)) + .map(|e| e.total) + .collect::>(); + + let mut difference; + if backing_backed_stake.len() > 0 { + let max_stake = backing_backed_stake + .iter() + .max_by(|x, y| x.partial_cmp(&y).unwrap_or(sp_std::cmp::Ordering::Equal)) + .expect("vector with positive length will have a max; qed"); + let min_stake = backed_stakes_iter + .min_by(|x, y| x.partial_cmp(&y).unwrap_or(sp_std::cmp::Ordering::Equal)) + .expect("iterator with positive length will have a min; qed"); + + difference = max_stake - min_stake; + difference = difference + budget - stake_used; + if difference < tolerance { + return difference; + } + } else { + difference = budget; + } + + // Undo updates to support + elected_edges.iter_mut().for_each(|e| { + if let Some(support) = support_map.get_mut(&e.0) { + support.total = support.total - e.1; + support.others.retain(|i_support| i_support.0 != *voter); + } + e.1 = 0.0; + }); + + elected_edges.sort_unstable_by(|x, y| { + support_map + .get(&x.0) + .and_then(|x| { + support_map + .get(&y.0) + .and_then(|y| x.total.partial_cmp(&y.total)) + }) + .unwrap_or(sp_std::cmp::Ordering::Equal) + }); + + let mut cumulative_stake = 0.0; + let mut last_index = elected_edges.len() - 1; + elected_edges.iter_mut().enumerate().for_each(|(idx, e)| { + if let Some(support) = support_map.get_mut(&e.0) { + let stake = support.total; + let stake_mul = stake * (idx as f64); + let stake_sub = stake_mul - cumulative_stake; + if stake_sub > budget { + last_index = idx.checked_sub(1).unwrap_or(0); + return; + } + cumulative_stake = cumulative_stake + stake; + } + }); + + let last_stake = elected_edges[last_index].1; + let split_ways = last_index + 1; + let excess = budget + cumulative_stake - last_stake * (split_ways as f64); + elected_edges.iter_mut().take(split_ways).for_each(|e| { + if let Some(support) = support_map.get_mut(&e.0) { + e.1 = excess / (split_ways as f64) + last_stake - support.total; + support.total = support.total + e.1; + support.others.push((voter.clone(), e.1)); + } + }); + + difference } - -pub(crate) fn create_stake_of(stakes: &[(AccountId, VoteWeight)]) - -> Box VoteWeight> -{ - let mut storage = BTreeMap::::new(); - stakes.iter().for_each(|s| { storage.insert(s.0, s.1); }); - let stake_of = move |who: &AccountId| -> VoteWeight { storage.get(who).unwrap().to_owned() }; - Box::new(stake_of) +pub(crate) fn create_stake_of( + stakes: &[(AccountId, VoteWeight)], +) -> Box VoteWeight> { + let mut storage = BTreeMap::::new(); + stakes.iter().for_each(|s| { + storage.insert(s.0, s.1); + }); + let stake_of = move |who: &AccountId| -> VoteWeight { storage.get(who).unwrap().to_owned() }; + Box::new(stake_of) } - pub fn check_assignments_sum(assignments: Vec>) { - for Assignment { distribution, .. } in assignments { - let mut sum: u128 = Zero::zero(); - distribution.iter().for_each(|(_, p)| sum += p.deconstruct().saturated_into()); - assert_eq_error_rate!(sum, T::ACCURACY.saturated_into(), 1); - } + for Assignment { distribution, .. } in assignments { + let mut sum: u128 = Zero::zero(); + distribution + .iter() + .for_each(|(_, p)| sum += p.deconstruct().saturated_into()); + assert_eq_error_rate!(sum, T::ACCURACY.saturated_into(), 1); + } } pub(crate) fn run_and_compare( - candidates: Vec, - voters: Vec<(AccountId, Vec)>, - stake_of: &Box VoteWeight>, - to_elect: usize, - min_to_elect: usize, + candidates: Vec, + voters: Vec<(AccountId, Vec)>, + stake_of: &Box VoteWeight>, + to_elect: usize, + min_to_elect: usize, ) { - // run fixed point code. - let PhragmenResult { winners, assignments } = elect::<_, Output>( - to_elect, - min_to_elect, - candidates.clone(), - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - // run float poc code. - let truth_value = elect_float( - to_elect, - min_to_elect, - candidates, - voters, - &stake_of, - ).unwrap(); - - assert_eq!(winners.iter().map(|(x, _)| x).collect::>(), truth_value.winners.iter().map(|(x, _)| x).collect::>()); - - for Assignment { who, distribution } in assignments.clone() { - if let Some(float_assignments) = truth_value.assignments.iter().find(|x| x.0 == who) { - for (candidate, per_thingy) in distribution { - if let Some(float_assignment) = float_assignments.1.iter().find(|x| x.0 == candidate ) { - assert_eq_error_rate!( - Output::from_fraction(float_assignment.1).deconstruct(), - per_thingy.deconstruct(), - Output::Inner::one(), - ); - } else { - panic!("candidate mismatch. This should never happen.") - } - } - } else { - panic!("nominator mismatch. This should never happen.") - } - } - - check_assignments_sum(assignments); + // run fixed point code. + let PhragmenResult { + winners, + assignments, + } = elect::<_, Output>( + to_elect, + min_to_elect, + candidates.clone(), + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + // run float poc code. + let truth_value = elect_float(to_elect, min_to_elect, candidates, voters, &stake_of).unwrap(); + + assert_eq!( + winners.iter().map(|(x, _)| x).collect::>(), + truth_value + .winners + .iter() + .map(|(x, _)| x) + .collect::>() + ); + + for Assignment { who, distribution } in assignments.clone() { + if let Some(float_assignments) = truth_value.assignments.iter().find(|x| x.0 == who) { + for (candidate, per_thingy) in distribution { + if let Some(float_assignment) = + float_assignments.1.iter().find(|x| x.0 == candidate) + { + assert_eq_error_rate!( + Output::from_fraction(float_assignment.1).deconstruct(), + per_thingy.deconstruct(), + Output::Inner::one(), + ); + } else { + panic!("candidate mismatch. This should never happen.") + } + } + } else { + panic!("nominator mismatch. This should never happen.") + } + } + + check_assignments_sum(assignments); } pub(crate) fn build_support_map_float( - result: &mut _PhragmenResult, - stake_of: FS, + result: &mut _PhragmenResult, + stake_of: FS, ) -> _SupportMap - where for<'r> FS: Fn(&'r AccountId) -> VoteWeight +where + for<'r> FS: Fn(&'r AccountId) -> VoteWeight, { - let mut supports = <_SupportMap>::new(); - result.winners - .iter() - .map(|(e, _)| (e, stake_of(e) as f64)) - .for_each(|(e, s)| { - let item = _Support { own: s, total: s, ..Default::default() }; - supports.insert(e.clone(), item); - }); - - for (n, assignment) in result.assignments.iter_mut() { - for (c, r) in assignment.iter_mut() { - let nominator_stake = stake_of(n) as f64; - let other_stake = nominator_stake * *r; - if let Some(support) = supports.get_mut(c) { - support.total = support.total + other_stake; - support.others.push((n.clone(), other_stake)); - } - *r = other_stake; - } - } - supports + let mut supports = <_SupportMap>::new(); + result + .winners + .iter() + .map(|(e, _)| (e, stake_of(e) as f64)) + .for_each(|(e, s)| { + let item = _Support { + own: s, + total: s, + ..Default::default() + }; + supports.insert(e.clone(), item); + }); + + for (n, assignment) in result.assignments.iter_mut() { + for (c, r) in assignment.iter_mut() { + let nominator_stake = stake_of(n) as f64; + let other_stake = nominator_stake * *r; + if let Some(support) = supports.get_mut(c) { + support.total = support.total + other_stake; + support.others.push((n.clone(), other_stake)); + } + *r = other_stake; + } + } + supports } diff --git a/primitives/phragmen/src/node.rs b/primitives/phragmen/src/node.rs index 92ef325a34..908155f55d 100644 --- a/primitives/phragmen/src/node.rs +++ b/primitives/phragmen/src/node.rs @@ -22,10 +22,10 @@ use sp_std::{cell::RefCell, fmt, prelude::*, rc::Rc}; /// The role that a node can accept. #[derive(PartialEq, Eq, Ord, PartialOrd, Clone, RuntimeDebug)] pub(crate) enum NodeRole { - /// A voter. This is synonym to a nominator in a staking context. - Voter, - /// A target. This is synonym to a candidate/validator in a staking context. - Target, + /// A voter. This is synonym to a nominator in a staking context. + Voter, + /// A target. This is synonym to a candidate/validator in a staking context. + Target, } pub(crate) type RefCellOf = Rc>; @@ -35,253 +35,253 @@ pub(crate) type NodeRef = RefCellOf>; /// Otherwise, self votes wouldn't have been indistinguishable. #[derive(PartialOrd, Ord, Clone, PartialEq, Eq)] pub(crate) struct NodeId { - /// An account-like identifier representing the node. - pub who: A, - /// The role of the node. - pub role: NodeRole, + /// An account-like identifier representing the node. + pub who: A, + /// The role of the node. + pub role: NodeRole, } impl NodeId { - /// Create a new [`NodeId`]. - pub fn from(who: A, role: NodeRole) -> Self { - Self { who, role } - } + /// Create a new [`NodeId`]. + pub fn from(who: A, role: NodeRole) -> Self { + Self { who, role } + } } #[cfg(feature = "std")] impl sp_std::fmt::Debug for NodeId { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> sp_std::fmt::Result { - write!( - f, - "Node({:?}, {:?})", - self.who, - if self.role == NodeRole::Voter { - "V" - } else { - "T" - } - ) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> sp_std::fmt::Result { + write!( + f, + "Node({:?}, {:?})", + self.who, + if self.role == NodeRole::Voter { + "V" + } else { + "T" + } + ) + } } /// A one-way graph note. This can only store a pointer to its parent. #[derive(Clone)] pub(crate) struct Node { - /// The identifier of the note. - pub(crate) id: NodeId, - /// The parent pointer. - pub(crate) parent: Option>, + /// The identifier of the note. + pub(crate) id: NodeId, + /// The parent pointer. + pub(crate) parent: Option>, } impl PartialEq for Node { - fn eq(&self, other: &Node) -> bool { - self.id == other.id - } + fn eq(&self, other: &Node) -> bool { + self.id == other.id + } } impl Eq for Node {} #[cfg(feature = "std")] impl fmt::Debug for Node { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!( - f, - "({:?} --> {:?})", - self.id, - self.parent.as_ref().map(|p| p.borrow().id.clone()) - ) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!( + f, + "({:?} --> {:?})", + self.id, + self.parent.as_ref().map(|p| p.borrow().id.clone()) + ) + } } impl Node { - /// Create a new [`Node`] - pub fn new(id: NodeId) -> Node { - Self { id, parent: None } - } - - /// Returns true if `other` is the parent of `who`. - pub fn is_parent_of(who: &NodeRef, other: &NodeRef) -> bool { - if who.borrow().parent.is_none() { - return false; - } - who.borrow().parent.as_ref() == Some(other) - } - - /// Removes the parent of `who`. - pub fn remove_parent(who: &NodeRef) { - who.borrow_mut().parent = None; - } - - /// Sets `who`'s parent to be `parent`. - pub fn set_parent_of(who: &NodeRef, parent: &NodeRef) { - who.borrow_mut().parent = Some(parent.clone()); - } - - /// Finds the root of `start`. It return a tuple of `(root, root_vec)` where `root_vec` is the - /// vector of Nodes leading to the root. Hence the first element is the start itself and the - /// last one is the root. As convenient, the root itself is also returned as the first element - /// of the tuple. - /// - /// This function detects cycles and breaks as soon a duplicate node is visited, returning the - /// cycle up to but not including the duplicate node. - /// - /// If you are certain that no cycles exist, you can use [`root_unchecked`]. - pub fn root(start: &NodeRef) -> (NodeRef, Vec>) { - let mut parent_path: Vec> = Vec::new(); - let mut visited: Vec> = Vec::new(); - - parent_path.push(start.clone()); - visited.push(start.clone()); - let mut current = start.clone(); - - while let Some(ref next_parent) = current.clone().borrow().parent { - if visited.contains(next_parent) { - break; - } - parent_path.push(next_parent.clone()); - current = next_parent.clone(); - visited.push(current.clone()); - } - - (current, parent_path) - } - - /// Consumes self and wraps it in a `Rc>`. This type can be used as the pointer type - /// to a parent node. - pub fn into_ref(self) -> NodeRef { - Rc::from(RefCell::from(self)) - } + /// Create a new [`Node`] + pub fn new(id: NodeId) -> Node { + Self { id, parent: None } + } + + /// Returns true if `other` is the parent of `who`. + pub fn is_parent_of(who: &NodeRef, other: &NodeRef) -> bool { + if who.borrow().parent.is_none() { + return false; + } + who.borrow().parent.as_ref() == Some(other) + } + + /// Removes the parent of `who`. + pub fn remove_parent(who: &NodeRef) { + who.borrow_mut().parent = None; + } + + /// Sets `who`'s parent to be `parent`. + pub fn set_parent_of(who: &NodeRef, parent: &NodeRef) { + who.borrow_mut().parent = Some(parent.clone()); + } + + /// Finds the root of `start`. It return a tuple of `(root, root_vec)` where `root_vec` is the + /// vector of Nodes leading to the root. Hence the first element is the start itself and the + /// last one is the root. As convenient, the root itself is also returned as the first element + /// of the tuple. + /// + /// This function detects cycles and breaks as soon a duplicate node is visited, returning the + /// cycle up to but not including the duplicate node. + /// + /// If you are certain that no cycles exist, you can use [`root_unchecked`]. + pub fn root(start: &NodeRef) -> (NodeRef, Vec>) { + let mut parent_path: Vec> = Vec::new(); + let mut visited: Vec> = Vec::new(); + + parent_path.push(start.clone()); + visited.push(start.clone()); + let mut current = start.clone(); + + while let Some(ref next_parent) = current.clone().borrow().parent { + if visited.contains(next_parent) { + break; + } + parent_path.push(next_parent.clone()); + current = next_parent.clone(); + visited.push(current.clone()); + } + + (current, parent_path) + } + + /// Consumes self and wraps it in a `Rc>`. This type can be used as the pointer type + /// to a parent node. + pub fn into_ref(self) -> NodeRef { + Rc::from(RefCell::from(self)) + } } #[cfg(test)] mod tests { - use super::*; - - fn id(i: u32) -> NodeId { - NodeId::from(i, NodeRole::Target) - } - - #[test] - fn basic_create_works() { - let node = Node::new(id(10)); - assert_eq!( - node, - Node { - id: NodeId { - who: 10, - role: NodeRole::Target - }, - parent: None - } - ); - } - - #[test] - fn set_parent_works() { - let a = Node::new(id(10)).into_ref(); - let b = Node::new(id(20)).into_ref(); - - assert_eq!(a.borrow().parent, None); - Node::set_parent_of(&a, &b); - assert_eq!(*a.borrow().parent.as_ref().unwrap(), b); - } - - #[test] - fn get_root_singular() { - let a = Node::new(id(1)).into_ref(); - assert_eq!(Node::root(&a), (a.clone(), vec![a.clone()])); - } - - #[test] - fn get_root_works() { - // D <-- A <-- B <-- C - // \ - // <-- E - let a = Node::new(id(1)).into_ref(); - let b = Node::new(id(2)).into_ref(); - let c = Node::new(id(3)).into_ref(); - let d = Node::new(id(4)).into_ref(); - let e = Node::new(id(5)).into_ref(); - let f = Node::new(id(6)).into_ref(); - - Node::set_parent_of(&c, &b); - Node::set_parent_of(&b, &a); - Node::set_parent_of(&e, &a); - Node::set_parent_of(&a, &d); - - assert_eq!( - Node::root(&e), - (d.clone(), vec![e.clone(), a.clone(), d.clone()]), - ); - - assert_eq!(Node::root(&a), (d.clone(), vec![a.clone(), d.clone()]),); - - assert_eq!( - Node::root(&c), - (d.clone(), vec![c.clone(), b.clone(), a.clone(), d.clone()]), - ); - - // D A <-- B <-- C - // F <-- / \ - // <-- E - Node::set_parent_of(&a, &f); - - assert_eq!(Node::root(&a), (f.clone(), vec![a.clone(), f.clone()]),); - - assert_eq!( - Node::root(&c), - (f.clone(), vec![c.clone(), b.clone(), a.clone(), f.clone()]), - ); - } - - #[test] - fn get_root_on_cycle() { - // A ---> B - // | | - // <---- C - let a = Node::new(id(1)).into_ref(); - let b = Node::new(id(2)).into_ref(); - let c = Node::new(id(3)).into_ref(); - - Node::set_parent_of(&a, &b); - Node::set_parent_of(&b, &c); - Node::set_parent_of(&c, &a); - - let (root, path) = Node::root(&a); - assert_eq!(root, c); - assert_eq!(path.clone(), vec![a.clone(), b.clone(), c.clone()]); - } - - #[test] - fn get_root_on_cycle_2() { - // A ---> B - // | | | - // - C - let a = Node::new(id(1)).into_ref(); - let b = Node::new(id(2)).into_ref(); - let c = Node::new(id(3)).into_ref(); - - Node::set_parent_of(&a, &b); - Node::set_parent_of(&b, &c); - Node::set_parent_of(&c, &b); - - let (root, path) = Node::root(&a); - assert_eq!(root, c); - assert_eq!(path.clone(), vec![a.clone(), b.clone(), c.clone()]); - } - - #[test] - fn node_cmp_stack_overflows_on_non_unique_elements() { - // To make sure we don't stack overflow on duplicate who. This needs manual impl of - // PartialEq. - let a = Node::new(id(1)).into_ref(); - let b = Node::new(id(2)).into_ref(); - let c = Node::new(id(3)).into_ref(); - - Node::set_parent_of(&a, &b); - Node::set_parent_of(&b, &c); - Node::set_parent_of(&c, &a); - - Node::root(&a); - } + use super::*; + + fn id(i: u32) -> NodeId { + NodeId::from(i, NodeRole::Target) + } + + #[test] + fn basic_create_works() { + let node = Node::new(id(10)); + assert_eq!( + node, + Node { + id: NodeId { + who: 10, + role: NodeRole::Target + }, + parent: None + } + ); + } + + #[test] + fn set_parent_works() { + let a = Node::new(id(10)).into_ref(); + let b = Node::new(id(20)).into_ref(); + + assert_eq!(a.borrow().parent, None); + Node::set_parent_of(&a, &b); + assert_eq!(*a.borrow().parent.as_ref().unwrap(), b); + } + + #[test] + fn get_root_singular() { + let a = Node::new(id(1)).into_ref(); + assert_eq!(Node::root(&a), (a.clone(), vec![a.clone()])); + } + + #[test] + fn get_root_works() { + // D <-- A <-- B <-- C + // \ + // <-- E + let a = Node::new(id(1)).into_ref(); + let b = Node::new(id(2)).into_ref(); + let c = Node::new(id(3)).into_ref(); + let d = Node::new(id(4)).into_ref(); + let e = Node::new(id(5)).into_ref(); + let f = Node::new(id(6)).into_ref(); + + Node::set_parent_of(&c, &b); + Node::set_parent_of(&b, &a); + Node::set_parent_of(&e, &a); + Node::set_parent_of(&a, &d); + + assert_eq!( + Node::root(&e), + (d.clone(), vec![e.clone(), a.clone(), d.clone()]), + ); + + assert_eq!(Node::root(&a), (d.clone(), vec![a.clone(), d.clone()]),); + + assert_eq!( + Node::root(&c), + (d.clone(), vec![c.clone(), b.clone(), a.clone(), d.clone()]), + ); + + // D A <-- B <-- C + // F <-- / \ + // <-- E + Node::set_parent_of(&a, &f); + + assert_eq!(Node::root(&a), (f.clone(), vec![a.clone(), f.clone()]),); + + assert_eq!( + Node::root(&c), + (f.clone(), vec![c.clone(), b.clone(), a.clone(), f.clone()]), + ); + } + + #[test] + fn get_root_on_cycle() { + // A ---> B + // | | + // <---- C + let a = Node::new(id(1)).into_ref(); + let b = Node::new(id(2)).into_ref(); + let c = Node::new(id(3)).into_ref(); + + Node::set_parent_of(&a, &b); + Node::set_parent_of(&b, &c); + Node::set_parent_of(&c, &a); + + let (root, path) = Node::root(&a); + assert_eq!(root, c); + assert_eq!(path.clone(), vec![a.clone(), b.clone(), c.clone()]); + } + + #[test] + fn get_root_on_cycle_2() { + // A ---> B + // | | | + // - C + let a = Node::new(id(1)).into_ref(); + let b = Node::new(id(2)).into_ref(); + let c = Node::new(id(3)).into_ref(); + + Node::set_parent_of(&a, &b); + Node::set_parent_of(&b, &c); + Node::set_parent_of(&c, &b); + + let (root, path) = Node::root(&a); + assert_eq!(root, c); + assert_eq!(path.clone(), vec![a.clone(), b.clone(), c.clone()]); + } + + #[test] + fn node_cmp_stack_overflows_on_non_unique_elements() { + // To make sure we don't stack overflow on duplicate who. This needs manual impl of + // PartialEq. + let a = Node::new(id(1)).into_ref(); + let b = Node::new(id(2)).into_ref(); + let c = Node::new(id(3)).into_ref(); + + Node::set_parent_of(&a, &b); + Node::set_parent_of(&b, &c); + Node::set_parent_of(&c, &a); + + Node::root(&a); + } } diff --git a/primitives/phragmen/src/reduce.rs b/primitives/phragmen/src/reduce.rs index 1f6f6c3b99..8587357ae2 100644 --- a/primitives/phragmen/src/reduce.rs +++ b/primitives/phragmen/src/reduce.rs @@ -50,8 +50,8 @@ use crate::node::{Node, NodeId, NodeRef, NodeRole}; use crate::{ExtendedBalance, IdentifierT, StakedAssignment}; use sp_runtime::traits::{Bounded, Zero}; use sp_std::{ - collections::btree_map::{BTreeMap, Entry::*}, - prelude::*, + collections::btree_map::{BTreeMap, Entry::*}, + prelude::*, }; /// Map type used for reduce_4. Can be easily swapped with HashMap. @@ -59,40 +59,44 @@ type Map = BTreeMap<(A, A), A>; /// Returns all combinations of size two in the collection `input` with no repetition. fn combinations_2(input: &[T]) -> Vec<(T, T)> { - let n = input.len(); - if n < 2 { - return Default::default(); - } - - let mut comb = Vec::with_capacity(n * (n - 1) / 2); - for i in 0..n { - for j in i + 1..n { - comb.push((input[i].clone(), input[j].clone())) - } - } - comb + let n = input.len(); + if n < 2 { + return Default::default(); + } + + let mut comb = Vec::with_capacity(n * (n - 1) / 2); + for i in 0..n { + for j in i + 1..n { + comb.push((input[i].clone(), input[j].clone())) + } + } + comb } /// Returns the count of trailing common elements in two slices. pub(crate) fn trailing_common(t1: &[T], t2: &[T]) -> usize { - t1.iter().rev().zip(t2.iter().rev()).take_while(|e| e.0 == e.1).count() + t1.iter() + .rev() + .zip(t2.iter().rev()) + .take_while(|e| e.0 == e.1) + .count() } /// Merges two parent roots as described by the reduce algorithm. fn merge(voter_root_path: Vec>, target_root_path: Vec>) { - let (shorter_path, longer_path) = if voter_root_path.len() <= target_root_path.len() { - (voter_root_path, target_root_path) - } else { - (target_root_path, voter_root_path) - }; - - // iterate from last to beginning, skipping the first one. This asserts that - // indexing is always correct. - shorter_path - .iter() - .zip(shorter_path.iter().skip(1)) - .for_each(|(voter, next)| Node::set_parent_of(&next, &voter)); - Node::set_parent_of(&shorter_path[0], &longer_path[0]); + let (shorter_path, longer_path) = if voter_root_path.len() <= target_root_path.len() { + (voter_root_path, target_root_path) + } else { + (target_root_path, voter_root_path) + }; + + // iterate from last to beginning, skipping the first one. This asserts that + // indexing is always correct. + shorter_path + .iter() + .zip(shorter_path.iter().skip(1)) + .for_each(|(voter, next)| Node::set_parent_of(&next, &voter)); + Node::set_parent_of(&shorter_path[0], &longer_path[0]); } /// Reduce only redundant edges with cycle length of 4. @@ -104,217 +108,218 @@ fn merge(voter_root_path: Vec>, target_root_path: Vec /// /// O(|E_w| ⋅ k). fn reduce_4(assignments: &mut Vec>) -> u32 { - let mut combination_map: Map = Map::new(); - let mut num_changed: u32 = Zero::zero(); - - // we have to use the old fashioned loops here with manual indexing. Borrowing assignments will - // not work since then there is NO way to mutate it inside. - for assignment_index in 0..assignments.len() { - let who = assignments[assignment_index].who.clone(); - - // all combinations for this particular voter - let distribution_ids = &assignments[assignment_index] - .distribution - .iter() - .map(|(t, _p)| t.clone()) - .collect::>(); - let candidate_combinations = combinations_2(distribution_ids); - - for (v1, v2) in candidate_combinations { - match combination_map.entry((v1.clone(), v2.clone())) { - Vacant(entry) => { - entry.insert(who.clone()); - } - Occupied(mut entry) => { - let other_who = entry.get_mut(); - - // double check if who is still voting for this pair. If not, it means that this - // pair is no longer valid and must have been removed in previous rounds. The - // reason for this is subtle; candidate_combinations is created once while the - // inner loop might remove some edges. Note that if count() > 2, the we have - // duplicates. - if assignments[assignment_index] - .distribution - .iter() - .filter(|(t, _)| *t == v1 || *t == v2) - .count() != 2 - { - continue; - } - - // check if other_who voted for the same pair v1, v2. - let maybe_other_assignments = assignments.iter().find(|a| a.who == *other_who); - if maybe_other_assignments.is_none() { - continue; - } - let other_assignment = - maybe_other_assignments.expect("value is checked to be 'Some'"); - - // Collect potential cycle votes - let mut other_cycle_votes = other_assignment - .distribution - .iter() - .filter_map(|(t, w)| { - if *t == v1 || *t == v2 { - Some((t.clone(), *w)) - } else { - None - } - }) - .collect::>(); - - let other_votes_count = other_cycle_votes.len(); - - // If the length is more than 2, then we have identified duplicates. For now, we - // just skip. Later on we can early exit and stop processing this data since it - // is corrupt anyhow. - debug_assert!(other_votes_count <= 2); - - if other_votes_count < 2 { - // This is not a cycle. Replace and continue. - *other_who = who.clone(); - continue; - } else if other_votes_count == 2 { - // This is a cycle. - let mut who_cycle_votes: Vec<(A, ExtendedBalance)> = Vec::with_capacity(2); - assignments[assignment_index] - .distribution - .iter() - .for_each(|(t, w)| { - if *t == v1 || *t == v2 { - who_cycle_votes.push((t.clone(), *w)); - } - }); - - if who_cycle_votes.len() != 2 { - continue; - } - - // Align the targets similarly. This helps with the circulation below. - if other_cycle_votes[0].0 != who_cycle_votes[0].0 { - other_cycle_votes.swap(0, 1); - } - - // Find min - let mut min_value: ExtendedBalance = Bounded::max_value(); - let mut min_index: usize = 0; - let cycle = who_cycle_votes - .iter() - .chain(other_cycle_votes.iter()) - .enumerate() - .map(|(index, (t, w))| { - if *w <= min_value { - min_value = *w; - min_index = index; - } - (t.clone(), *w) - }) - .collect::>(); - - // min was in the first part of the chained iters - let mut increase_indices: Vec = Vec::new(); - let mut decrease_indices: Vec = Vec::new(); - decrease_indices.push(min_index); - if min_index < 2 { - // min_index == 0 => sibling_index <- 1 - // min_index == 1 => sibling_index <- 0 - let sibling_index = 1 - min_index; - increase_indices.push(sibling_index); - // valid because the two chained sections of `cycle` are aligned; - // index [0, 2] are both voting for v1 or both v2. Same goes for [1, 3]. - decrease_indices.push(sibling_index + 2); - increase_indices.push(min_index + 2); - } else { - // min_index == 2 => sibling_index <- 3 - // min_index == 3 => sibling_index <- 2 - let sibling_index = 3 - min_index % 2; - increase_indices.push(sibling_index); - // valid because the two chained sections of `cycle` are aligned; - // index [0, 2] are both voting for v1 or both v2. Same goes for [1, 3]. - decrease_indices.push(sibling_index - 2); - increase_indices.push(min_index - 2); - } - - // apply changes - let mut remove_indices: Vec = Vec::with_capacity(1); - increase_indices.into_iter().for_each(|i| { - let voter = if i < 2 { - who.clone() - } else { - other_who.clone() - }; - // Note: so this is pretty ambiguous. We should only look for one - // assignment that meets this criteria and if we find multiple then that - // is a corrupt input. Same goes for the next block. - assignments - .iter_mut() - .filter(|a| a.who == voter) - .for_each(|ass| { - ass.distribution - .iter_mut() - .position(|(t, _)| *t == cycle[i].0) - .map(|idx| { - let next_value = - ass.distribution[idx].1.saturating_add(min_value); - ass.distribution[idx].1 = next_value; - }); - }); - }); - decrease_indices.into_iter().for_each(|i| { - let voter = if i < 2 { - who.clone() - } else { - other_who.clone() - }; - assignments - .iter_mut() - .filter(|a| a.who == voter) - .for_each(|ass| { - ass.distribution - .iter_mut() - .position(|(t, _)| *t == cycle[i].0) - .map(|idx| { - let next_value = - ass.distribution[idx].1.saturating_sub(min_value); - if next_value.is_zero() { - ass.distribution.remove(idx); - remove_indices.push(i); - num_changed += 1; - } else { - ass.distribution[idx].1 = next_value; - } - }); - }); - }); - - // remove either one of them. - let who_removed = remove_indices.iter().find(|i| **i < 2usize).is_some(); - let other_removed = - remove_indices.into_iter().find(|i| *i >= 2usize).is_some(); - - match (who_removed, other_removed) { - (false, true) => { - *other_who = who.clone(); - } - (true, false) => { - // nothing, other_who can stay there. - } - (true, true) => { - // remove and don't replace - entry.remove(); - } - (false, false) => { - // Neither of the edges was removed? impossible. - panic!("Duplicate voter (or other corrupt input)."); - } - } - } - } - } - } - } - - num_changed + let mut combination_map: Map = Map::new(); + let mut num_changed: u32 = Zero::zero(); + + // we have to use the old fashioned loops here with manual indexing. Borrowing assignments will + // not work since then there is NO way to mutate it inside. + for assignment_index in 0..assignments.len() { + let who = assignments[assignment_index].who.clone(); + + // all combinations for this particular voter + let distribution_ids = &assignments[assignment_index] + .distribution + .iter() + .map(|(t, _p)| t.clone()) + .collect::>(); + let candidate_combinations = combinations_2(distribution_ids); + + for (v1, v2) in candidate_combinations { + match combination_map.entry((v1.clone(), v2.clone())) { + Vacant(entry) => { + entry.insert(who.clone()); + } + Occupied(mut entry) => { + let other_who = entry.get_mut(); + + // double check if who is still voting for this pair. If not, it means that this + // pair is no longer valid and must have been removed in previous rounds. The + // reason for this is subtle; candidate_combinations is created once while the + // inner loop might remove some edges. Note that if count() > 2, the we have + // duplicates. + if assignments[assignment_index] + .distribution + .iter() + .filter(|(t, _)| *t == v1 || *t == v2) + .count() + != 2 + { + continue; + } + + // check if other_who voted for the same pair v1, v2. + let maybe_other_assignments = assignments.iter().find(|a| a.who == *other_who); + if maybe_other_assignments.is_none() { + continue; + } + let other_assignment = + maybe_other_assignments.expect("value is checked to be 'Some'"); + + // Collect potential cycle votes + let mut other_cycle_votes = other_assignment + .distribution + .iter() + .filter_map(|(t, w)| { + if *t == v1 || *t == v2 { + Some((t.clone(), *w)) + } else { + None + } + }) + .collect::>(); + + let other_votes_count = other_cycle_votes.len(); + + // If the length is more than 2, then we have identified duplicates. For now, we + // just skip. Later on we can early exit and stop processing this data since it + // is corrupt anyhow. + debug_assert!(other_votes_count <= 2); + + if other_votes_count < 2 { + // This is not a cycle. Replace and continue. + *other_who = who.clone(); + continue; + } else if other_votes_count == 2 { + // This is a cycle. + let mut who_cycle_votes: Vec<(A, ExtendedBalance)> = Vec::with_capacity(2); + assignments[assignment_index] + .distribution + .iter() + .for_each(|(t, w)| { + if *t == v1 || *t == v2 { + who_cycle_votes.push((t.clone(), *w)); + } + }); + + if who_cycle_votes.len() != 2 { + continue; + } + + // Align the targets similarly. This helps with the circulation below. + if other_cycle_votes[0].0 != who_cycle_votes[0].0 { + other_cycle_votes.swap(0, 1); + } + + // Find min + let mut min_value: ExtendedBalance = Bounded::max_value(); + let mut min_index: usize = 0; + let cycle = who_cycle_votes + .iter() + .chain(other_cycle_votes.iter()) + .enumerate() + .map(|(index, (t, w))| { + if *w <= min_value { + min_value = *w; + min_index = index; + } + (t.clone(), *w) + }) + .collect::>(); + + // min was in the first part of the chained iters + let mut increase_indices: Vec = Vec::new(); + let mut decrease_indices: Vec = Vec::new(); + decrease_indices.push(min_index); + if min_index < 2 { + // min_index == 0 => sibling_index <- 1 + // min_index == 1 => sibling_index <- 0 + let sibling_index = 1 - min_index; + increase_indices.push(sibling_index); + // valid because the two chained sections of `cycle` are aligned; + // index [0, 2] are both voting for v1 or both v2. Same goes for [1, 3]. + decrease_indices.push(sibling_index + 2); + increase_indices.push(min_index + 2); + } else { + // min_index == 2 => sibling_index <- 3 + // min_index == 3 => sibling_index <- 2 + let sibling_index = 3 - min_index % 2; + increase_indices.push(sibling_index); + // valid because the two chained sections of `cycle` are aligned; + // index [0, 2] are both voting for v1 or both v2. Same goes for [1, 3]. + decrease_indices.push(sibling_index - 2); + increase_indices.push(min_index - 2); + } + + // apply changes + let mut remove_indices: Vec = Vec::with_capacity(1); + increase_indices.into_iter().for_each(|i| { + let voter = if i < 2 { + who.clone() + } else { + other_who.clone() + }; + // Note: so this is pretty ambiguous. We should only look for one + // assignment that meets this criteria and if we find multiple then that + // is a corrupt input. Same goes for the next block. + assignments + .iter_mut() + .filter(|a| a.who == voter) + .for_each(|ass| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == cycle[i].0) + .map(|idx| { + let next_value = + ass.distribution[idx].1.saturating_add(min_value); + ass.distribution[idx].1 = next_value; + }); + }); + }); + decrease_indices.into_iter().for_each(|i| { + let voter = if i < 2 { + who.clone() + } else { + other_who.clone() + }; + assignments + .iter_mut() + .filter(|a| a.who == voter) + .for_each(|ass| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == cycle[i].0) + .map(|idx| { + let next_value = + ass.distribution[idx].1.saturating_sub(min_value); + if next_value.is_zero() { + ass.distribution.remove(idx); + remove_indices.push(i); + num_changed += 1; + } else { + ass.distribution[idx].1 = next_value; + } + }); + }); + }); + + // remove either one of them. + let who_removed = remove_indices.iter().find(|i| **i < 2usize).is_some(); + let other_removed = + remove_indices.into_iter().find(|i| *i >= 2usize).is_some(); + + match (who_removed, other_removed) { + (false, true) => { + *other_who = who.clone(); + } + (true, false) => { + // nothing, other_who can stay there. + } + (true, true) => { + // remove and don't replace + entry.remove(); + } + (false, false) => { + // Neither of the edges was removed? impossible. + panic!("Duplicate voter (or other corrupt input)."); + } + } + } + } + } + } + } + + num_changed } /// Reduce redundant edges from the edge weight graph, with all possible length. @@ -328,315 +333,315 @@ fn reduce_4(assignments: &mut Vec>) -> u32 { /// /// O(|Ew| ⋅ m) fn reduce_all(assignments: &mut Vec>) -> u32 { - let mut num_changed: u32 = Zero::zero(); - let mut tree: BTreeMap, NodeRef> = BTreeMap::new(); - - // NOTE: This code can heavily use an index cache. Looking up a pair of (voter, target) in the - // assignments happens numerous times and and we can save time. For now it is written as such - // because abstracting some of this code into a function/closure is super hard due to borrow - // checks (and most likely needs unsafe code at the end). For now I will keep it as it and - // refactor later. - - // a flat iterator of (voter, target) over all pairs of votes. Similar to reduce_4, we loop - // without borrowing. - for assignment_index in 0..assignments.len() { - let voter = assignments[assignment_index].who.clone(); - - let mut dist_index = 0; - loop { - // A distribution could have been removed. We don't know for sure. Hence, we check. - let maybe_dist = assignments[assignment_index].distribution.get(dist_index); - if maybe_dist.is_none() { - // The rest of this loop is moot. - break; - } - let (target, _) = maybe_dist.expect("Value checked to be some").clone(); - - // store if they existed already. - let voter_id = NodeId::from(voter.clone(), NodeRole::Voter); - let target_id = NodeId::from(target.clone(), NodeRole::Target); - let voter_exists = tree.contains_key(&voter_id); - let target_exists = tree.contains_key(&target_id); - - // create both. - let voter_node = tree - .entry(voter_id.clone()) - .or_insert(Node::new(voter_id).into_ref()) - .clone(); - let target_node = tree - .entry(target_id.clone()) - .or_insert(Node::new(target_id).into_ref()) - .clone(); - - // If one exists but the other one doesn't, or if both does not, then set the existing - // one as the parent of the non-existing one and move on. Else, continue with the rest - // of the code. - match (voter_exists, target_exists) { - (false, false) => { - Node::set_parent_of(&target_node, &voter_node); - dist_index += 1; - continue; - } - (false, true) => { - Node::set_parent_of(&voter_node, &target_node); - dist_index += 1; - continue; - } - (true, false) => { - Node::set_parent_of(&target_node, &voter_node); - dist_index += 1; - continue; - } - (true, true) => { /* don't continue and execute the rest */ } - }; - - let (voter_root, voter_root_path) = Node::root(&voter_node); - let (target_root, target_root_path) = Node::root(&target_node); - - if voter_root != target_root { - // swap - merge(voter_root_path, target_root_path); - dist_index += 1; - } else { - // find common and cycle. - let common_count = trailing_common(&voter_root_path, &target_root_path); - - // because roots are the same. - #[cfg(feature = "std")] - debug_assert_eq!( - target_root_path.last().unwrap(), - voter_root_path.last().unwrap() - ); - debug_assert!(common_count > 0); - - // cycle part of each path will be `path[path.len() - common_count - 1 : 0]` - // NOTE: the order of chaining is important! it is always build from [target, ..., - // voter] - let cycle = target_root_path - .iter() - .take(target_root_path.len() - common_count + 1) - .cloned() - .chain( - voter_root_path - .iter() - .take(voter_root_path.len() - common_count) - .rev() - .cloned(), - ) - .collect::>>(); - - // a cycle's length shall always be multiple of two. - #[cfg(feature = "std")] - debug_assert_eq!(cycle.len() % 2, 0); - - // find minimum of cycle. - let mut min_value: ExtendedBalance = Bounded::max_value(); - // The voter and the target pair that create the min edge. - let mut min_target: A = Default::default(); - let mut min_voter: A = Default::default(); - // The index of the min in opaque cycle list. - let mut min_index = 0usize; - // 1 -> next // 0 -> prev - let mut min_direction = 0u32; - // helpers - let next_index = |i| { - if i < (cycle.len() - 1) { - i + 1 - } else { - 0 - } - }; - let prev_index = |i| { - if i > 0 { - i - 1 - } else { - cycle.len() - 1 - } - }; - for i in 0..cycle.len() { - if cycle[i].borrow().id.role == NodeRole::Voter { - // NOTE: sadly way too many clones since I don't want to make A: Copy - let current = cycle[i].borrow().id.who.clone(); - let next = cycle[next_index(i)].borrow().id.who.clone(); - let prev = cycle[prev_index(i)].borrow().id.who.clone(); - assignments.iter().find(|a| a.who == current).map(|ass| { - ass.distribution.iter().find(|d| d.0 == next).map(|(_, w)| { - if *w < min_value { - min_value = *w; - min_target = next.clone(); - min_voter = current.clone(); - min_index = i; - min_direction = 1; - } - }) - }); - assignments.iter().find(|a| a.who == current).map(|ass| { - ass.distribution.iter().find(|d| d.0 == prev).map(|(_, w)| { - if *w < min_value { - min_value = *w; - min_target = prev.clone(); - min_voter = current.clone(); - min_index = i; - min_direction = 0; - } - }) - }); - } - } - - // if the min edge is in the voter's sub-chain. - // [target, ..., X, Y, ... voter] - let target_chunk = target_root_path.len() - common_count; - let min_chain_in_voter = (min_index + min_direction as usize) > target_chunk; - - // walk over the cycle and update the weights - let mut should_inc_counter = true; - let start_operation_add = ((min_index % 2) + min_direction as usize) % 2 == 1; - let mut additional_removed = Vec::new(); - for i in 0..cycle.len() { - let current = cycle[i].borrow(); - if current.id.role == NodeRole::Voter { - let prev = cycle[prev_index(i)].borrow(); - assignments - .iter_mut() - .enumerate() - .filter(|(_, a)| a.who == current.id.who) - .for_each(|(target_ass_index, ass)| { - ass.distribution - .iter_mut() - .position(|(t, _)| *t == prev.id.who) - .map(|idx| { - let next_value = if i % 2 == 0 { - if start_operation_add { - ass.distribution[idx].1.saturating_add(min_value) - } else { - ass.distribution[idx].1.saturating_sub(min_value) - } - } else { - if start_operation_add { - ass.distribution[idx].1.saturating_sub(min_value) - } else { - ass.distribution[idx].1.saturating_add(min_value) - } - }; - - if next_value.is_zero() { - // if the removed edge is from the current assignment, dis_index - // should NOT be increased. - if target_ass_index == assignment_index { - should_inc_counter = false - } - ass.distribution.remove(idx); - num_changed += 1; - // only add if this is not the min itself. - if !(i == min_index && min_direction == 0) { - additional_removed.push(( - cycle[i].clone(), - cycle[prev_index(i)].clone(), - )); - } - } else { - ass.distribution[idx].1 = next_value; - } - }); - }); - - let next = cycle[next_index(i)].borrow(); - assignments - .iter_mut() - .enumerate() - .filter(|(_, a)| a.who == current.id.who) - .for_each(|(target_ass_index, ass)| { - ass.distribution - .iter_mut() - .position(|(t, _)| *t == next.id.who) - .map(|idx| { - let next_value = if i % 2 == 0 { - if start_operation_add { - ass.distribution[idx].1.saturating_sub(min_value) - } else { - ass.distribution[idx].1.saturating_add(min_value) - } - } else { - if start_operation_add { - ass.distribution[idx].1.saturating_add(min_value) - } else { - ass.distribution[idx].1.saturating_sub(min_value) - } - }; - - if next_value.is_zero() { - // if the removed edge is from the current assignment, dis_index - // should NOT be increased. - if target_ass_index == assignment_index { - should_inc_counter = false - } - ass.distribution.remove(idx); - num_changed += 1; - if !(i == min_index && min_direction == 1) { - additional_removed.push(( - cycle[i].clone(), - cycle[next_index(i)].clone(), - )); - } - } else { - ass.distribution[idx].1 = next_value; - } - }); - }); - } - } - - // don't do anything if the edge removed itself. This is always the first and last - // element - let should_reorg = !(min_index == (cycle.len() - 1) && min_direction == 1); - - // re-org. - if should_reorg { - let min_edge = vec![min_voter, min_target]; - if min_chain_in_voter { - // NOTE: safe; voter_root_path is always bigger than 1 element. - for i in 0..voter_root_path.len() - 1 { - let current = voter_root_path[i].clone().borrow().id.who.clone(); - let next = voter_root_path[i + 1].clone().borrow().id.who.clone(); - if min_edge.contains(¤t) && min_edge.contains(&next) { - break; - } - Node::set_parent_of(&voter_root_path[i + 1], &voter_root_path[i]); - } - Node::set_parent_of(&voter_node, &target_node); - } else { - // NOTE: safe; target_root_path is always bigger than 1 element. - for i in 0..target_root_path.len() - 1 { - let current = target_root_path[i].clone().borrow().id.who.clone(); - let next = target_root_path[i + 1].clone().borrow().id.who.clone(); - if min_edge.contains(¤t) && min_edge.contains(&next) { - break; - } - Node::set_parent_of(&target_root_path[i + 1], &target_root_path[i]); - } - Node::set_parent_of(&target_node, &voter_node); - } - } - - // remove every other node which has collapsed to zero - for (r1, r2) in additional_removed { - if Node::is_parent_of(&r1, &r2) { - Node::remove_parent(&r1); - } else if Node::is_parent_of(&r2, &r1) { - Node::remove_parent(&r2); - } - } - - // increment the counter if needed. - if should_inc_counter { - dist_index += 1; - } - } - } - } - - num_changed + let mut num_changed: u32 = Zero::zero(); + let mut tree: BTreeMap, NodeRef> = BTreeMap::new(); + + // NOTE: This code can heavily use an index cache. Looking up a pair of (voter, target) in the + // assignments happens numerous times and and we can save time. For now it is written as such + // because abstracting some of this code into a function/closure is super hard due to borrow + // checks (and most likely needs unsafe code at the end). For now I will keep it as it and + // refactor later. + + // a flat iterator of (voter, target) over all pairs of votes. Similar to reduce_4, we loop + // without borrowing. + for assignment_index in 0..assignments.len() { + let voter = assignments[assignment_index].who.clone(); + + let mut dist_index = 0; + loop { + // A distribution could have been removed. We don't know for sure. Hence, we check. + let maybe_dist = assignments[assignment_index].distribution.get(dist_index); + if maybe_dist.is_none() { + // The rest of this loop is moot. + break; + } + let (target, _) = maybe_dist.expect("Value checked to be some").clone(); + + // store if they existed already. + let voter_id = NodeId::from(voter.clone(), NodeRole::Voter); + let target_id = NodeId::from(target.clone(), NodeRole::Target); + let voter_exists = tree.contains_key(&voter_id); + let target_exists = tree.contains_key(&target_id); + + // create both. + let voter_node = tree + .entry(voter_id.clone()) + .or_insert(Node::new(voter_id).into_ref()) + .clone(); + let target_node = tree + .entry(target_id.clone()) + .or_insert(Node::new(target_id).into_ref()) + .clone(); + + // If one exists but the other one doesn't, or if both does not, then set the existing + // one as the parent of the non-existing one and move on. Else, continue with the rest + // of the code. + match (voter_exists, target_exists) { + (false, false) => { + Node::set_parent_of(&target_node, &voter_node); + dist_index += 1; + continue; + } + (false, true) => { + Node::set_parent_of(&voter_node, &target_node); + dist_index += 1; + continue; + } + (true, false) => { + Node::set_parent_of(&target_node, &voter_node); + dist_index += 1; + continue; + } + (true, true) => { /* don't continue and execute the rest */ } + }; + + let (voter_root, voter_root_path) = Node::root(&voter_node); + let (target_root, target_root_path) = Node::root(&target_node); + + if voter_root != target_root { + // swap + merge(voter_root_path, target_root_path); + dist_index += 1; + } else { + // find common and cycle. + let common_count = trailing_common(&voter_root_path, &target_root_path); + + // because roots are the same. + #[cfg(feature = "std")] + debug_assert_eq!( + target_root_path.last().unwrap(), + voter_root_path.last().unwrap() + ); + debug_assert!(common_count > 0); + + // cycle part of each path will be `path[path.len() - common_count - 1 : 0]` + // NOTE: the order of chaining is important! it is always build from [target, ..., + // voter] + let cycle = target_root_path + .iter() + .take(target_root_path.len() - common_count + 1) + .cloned() + .chain( + voter_root_path + .iter() + .take(voter_root_path.len() - common_count) + .rev() + .cloned(), + ) + .collect::>>(); + + // a cycle's length shall always be multiple of two. + #[cfg(feature = "std")] + debug_assert_eq!(cycle.len() % 2, 0); + + // find minimum of cycle. + let mut min_value: ExtendedBalance = Bounded::max_value(); + // The voter and the target pair that create the min edge. + let mut min_target: A = Default::default(); + let mut min_voter: A = Default::default(); + // The index of the min in opaque cycle list. + let mut min_index = 0usize; + // 1 -> next // 0 -> prev + let mut min_direction = 0u32; + // helpers + let next_index = |i| { + if i < (cycle.len() - 1) { + i + 1 + } else { + 0 + } + }; + let prev_index = |i| { + if i > 0 { + i - 1 + } else { + cycle.len() - 1 + } + }; + for i in 0..cycle.len() { + if cycle[i].borrow().id.role == NodeRole::Voter { + // NOTE: sadly way too many clones since I don't want to make A: Copy + let current = cycle[i].borrow().id.who.clone(); + let next = cycle[next_index(i)].borrow().id.who.clone(); + let prev = cycle[prev_index(i)].borrow().id.who.clone(); + assignments.iter().find(|a| a.who == current).map(|ass| { + ass.distribution.iter().find(|d| d.0 == next).map(|(_, w)| { + if *w < min_value { + min_value = *w; + min_target = next.clone(); + min_voter = current.clone(); + min_index = i; + min_direction = 1; + } + }) + }); + assignments.iter().find(|a| a.who == current).map(|ass| { + ass.distribution.iter().find(|d| d.0 == prev).map(|(_, w)| { + if *w < min_value { + min_value = *w; + min_target = prev.clone(); + min_voter = current.clone(); + min_index = i; + min_direction = 0; + } + }) + }); + } + } + + // if the min edge is in the voter's sub-chain. + // [target, ..., X, Y, ... voter] + let target_chunk = target_root_path.len() - common_count; + let min_chain_in_voter = (min_index + min_direction as usize) > target_chunk; + + // walk over the cycle and update the weights + let mut should_inc_counter = true; + let start_operation_add = ((min_index % 2) + min_direction as usize) % 2 == 1; + let mut additional_removed = Vec::new(); + for i in 0..cycle.len() { + let current = cycle[i].borrow(); + if current.id.role == NodeRole::Voter { + let prev = cycle[prev_index(i)].borrow(); + assignments + .iter_mut() + .enumerate() + .filter(|(_, a)| a.who == current.id.who) + .for_each(|(target_ass_index, ass)| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == prev.id.who) + .map(|idx| { + let next_value = if i % 2 == 0 { + if start_operation_add { + ass.distribution[idx].1.saturating_add(min_value) + } else { + ass.distribution[idx].1.saturating_sub(min_value) + } + } else { + if start_operation_add { + ass.distribution[idx].1.saturating_sub(min_value) + } else { + ass.distribution[idx].1.saturating_add(min_value) + } + }; + + if next_value.is_zero() { + // if the removed edge is from the current assignment, dis_index + // should NOT be increased. + if target_ass_index == assignment_index { + should_inc_counter = false + } + ass.distribution.remove(idx); + num_changed += 1; + // only add if this is not the min itself. + if !(i == min_index && min_direction == 0) { + additional_removed.push(( + cycle[i].clone(), + cycle[prev_index(i)].clone(), + )); + } + } else { + ass.distribution[idx].1 = next_value; + } + }); + }); + + let next = cycle[next_index(i)].borrow(); + assignments + .iter_mut() + .enumerate() + .filter(|(_, a)| a.who == current.id.who) + .for_each(|(target_ass_index, ass)| { + ass.distribution + .iter_mut() + .position(|(t, _)| *t == next.id.who) + .map(|idx| { + let next_value = if i % 2 == 0 { + if start_operation_add { + ass.distribution[idx].1.saturating_sub(min_value) + } else { + ass.distribution[idx].1.saturating_add(min_value) + } + } else { + if start_operation_add { + ass.distribution[idx].1.saturating_add(min_value) + } else { + ass.distribution[idx].1.saturating_sub(min_value) + } + }; + + if next_value.is_zero() { + // if the removed edge is from the current assignment, dis_index + // should NOT be increased. + if target_ass_index == assignment_index { + should_inc_counter = false + } + ass.distribution.remove(idx); + num_changed += 1; + if !(i == min_index && min_direction == 1) { + additional_removed.push(( + cycle[i].clone(), + cycle[next_index(i)].clone(), + )); + } + } else { + ass.distribution[idx].1 = next_value; + } + }); + }); + } + } + + // don't do anything if the edge removed itself. This is always the first and last + // element + let should_reorg = !(min_index == (cycle.len() - 1) && min_direction == 1); + + // re-org. + if should_reorg { + let min_edge = vec![min_voter, min_target]; + if min_chain_in_voter { + // NOTE: safe; voter_root_path is always bigger than 1 element. + for i in 0..voter_root_path.len() - 1 { + let current = voter_root_path[i].clone().borrow().id.who.clone(); + let next = voter_root_path[i + 1].clone().borrow().id.who.clone(); + if min_edge.contains(¤t) && min_edge.contains(&next) { + break; + } + Node::set_parent_of(&voter_root_path[i + 1], &voter_root_path[i]); + } + Node::set_parent_of(&voter_node, &target_node); + } else { + // NOTE: safe; target_root_path is always bigger than 1 element. + for i in 0..target_root_path.len() - 1 { + let current = target_root_path[i].clone().borrow().id.who.clone(); + let next = target_root_path[i + 1].clone().borrow().id.who.clone(); + if min_edge.contains(¤t) && min_edge.contains(&next) { + break; + } + Node::set_parent_of(&target_root_path[i + 1], &target_root_path[i]); + } + Node::set_parent_of(&target_node, &voter_node); + } + } + + // remove every other node which has collapsed to zero + for (r1, r2) in additional_removed { + if Node::is_parent_of(&r1, &r2) { + Node::remove_parent(&r1); + } else if Node::is_parent_of(&r2, &r1) { + Node::remove_parent(&r2); + } + } + + // increment the counter if needed. + if should_inc_counter { + dist_index += 1; + } + } + } + } + + num_changed } /// Reduce the given [`PhragmenResult`]. This removes redundant edges from without changing the @@ -650,427 +655,401 @@ fn reduce_all(assignments: &mut Vec>) -> u32 /// /// O(min{ |Ew| ⋅ k + m3 , |Ew| ⋅ m }) pub fn reduce(assignments: &mut Vec>) -> u32 where { - let mut num_changed = reduce_4(assignments); - num_changed += reduce_all(assignments); - num_changed + let mut num_changed = reduce_4(assignments); + num_changed += reduce_all(assignments); + num_changed } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn merging_works() { - // D <-- A <-- B <-- C - // - // F <-- E - let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); - let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); - let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); - let c = Node::new(NodeId::from(4, NodeRole::Target)).into_ref(); - let e = Node::new(NodeId::from(5, NodeRole::Target)).into_ref(); - let f = Node::new(NodeId::from(6, NodeRole::Target)).into_ref(); - - Node::set_parent_of(&c, &b); - Node::set_parent_of(&b, &a); - Node::set_parent_of(&a, &d); - Node::set_parent_of(&e, &f); - - let path1 = vec![c.clone(), b.clone(), a.clone(), d.clone()]; - let path2 = vec![e.clone(), f.clone()]; - - merge(path1, path2); - // D <-- A <-- B <-- C - // | - // F --> E --> --> - assert_eq!(e.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c - } - - #[test] - fn merge_with_len_one() { - // D <-- A <-- B <-- C - // - // F <-- E - let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); - let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); - let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); - let c = Node::new(NodeId::from(4, NodeRole::Target)).into_ref(); - let f = Node::new(NodeId::from(6, NodeRole::Target)).into_ref(); - - Node::set_parent_of(&c, &b); - Node::set_parent_of(&b, &a); - Node::set_parent_of(&a, &d); - - let path1 = vec![c.clone(), b.clone(), a.clone(), d.clone()]; - let path2 = vec![f.clone()]; - - merge(path1, path2); - // D <-- A <-- B <-- C - // | - // F --> --> - assert_eq!(f.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c - } - - #[test] - fn basic_reduce_4_cycle_works() { - use super::*; - - let assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 25), (20, 75)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 50), (20, 50)], - }, - ]; - - let mut new_assignments = assignments.clone(); - let num_reduced = reduce_4(&mut new_assignments); - - assert_eq!(num_reduced, 1); - assert_eq!( - new_assignments, - vec![ - StakedAssignment { - who: 1, - distribution: vec![(20, 100),], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 75), (20, 25),], - }, - ], - ); - } - - #[test] - fn basic_reduce_all_cycles_works() { - let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, - ]; - - assert_eq!(3, reduce_all(&mut assignments)); - - assert_eq!( - assignments, - vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, - ], - ) - } - - #[test] - fn basic_reduce_works() { - let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, - ]; - - assert_eq!(3, reduce(&mut assignments)); - - assert_eq!( - assignments, - vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, - ], - ) - } - - #[test] - fn should_deal_with_self_vote() { - let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 15), (40, 15)], - }, - StakedAssignment { - who: 4, - distribution: vec![(20, 10), (30, 10), (40, 20)], - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 20), (30, 10), (40, 20)], - }, - // self vote from 10 and 20 to itself. - StakedAssignment { - who: 10, - distribution: vec![(10, 100)], - }, - StakedAssignment { - who: 20, - distribution: vec![(20, 200)], - }, - ]; - - assert_eq!(3, reduce(&mut assignments)); - - assert_eq!( - assignments, - vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10),] - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 5),], - }, - StakedAssignment { - who: 3, - distribution: vec![(20, 30),], - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 40),] - }, - StakedAssignment { - who: 5, - distribution: vec![(20, 15), (30, 20), (40, 15),], - }, - // should stay untouched. - StakedAssignment { - who: 10, - distribution: vec![(10, 100)] - }, - StakedAssignment { - who: 20, - distribution: vec![(20, 200)] - }, - ], - ) - } - - #[test] - fn reduce_3_common_votes_same_weight() { - let mut assignments = vec![ - StakedAssignment { - who: 4, - distribution: vec![ - ( - 1000000, - 100, - ), - ( - 1000002, - 100, - ), - ( - 1000004, - 100, - ), - ], - }, - StakedAssignment { - who: 5, - distribution: vec![ - ( - 1000000, - 100, - ), - ( - 1000002, - 100, - ), - ( - 1000004, - 100, - ), - ], - }, - ]; - - reduce_4(&mut assignments); - - assert_eq!( - assignments, - vec![ - StakedAssignment { - who: 4, - distribution: vec![(1000000, 200,), (1000004, 100,),], - }, - StakedAssignment { - who: 5, - distribution: vec![(1000002, 200,), (1000004, 100,),], - }, - ], - ) - } - - #[test] - #[should_panic] - fn reduce_panics_on_duplicate_voter() { - let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 10), (20, 10)], - }, - StakedAssignment { - who: 1, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 2, - distribution: vec![(10, 15), (20, 15)], - }, - ]; - - reduce(&mut assignments); - } - - #[test] - fn should_deal_with_duplicates_target() { - let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 15), (20, 5)], - }, - StakedAssignment { - who: 2, - distribution: vec![ - (10, 15), - (20, 15), - // duplicate - (10, 1), - // duplicate - (20, 1), - ], - }, - ]; - - reduce(&mut assignments); - - assert_eq!( - assignments, - vec![ - StakedAssignment { - who: 1, - distribution: vec![(10, 20),], - }, - StakedAssignment { - who: 2, - distribution: vec![ - (10, 10), - (20, 20), - // duplicate votes are silently ignored. - (10, 1), - (20, 1), - ], - }, - ], - ) - } - - #[test] - fn bound_should_be_kept() { - let mut assignments = vec![ - StakedAssignment { - who: 1, - distribution: vec![(103, 72), (101, 53), (100, 83), (102, 38)], - }, - StakedAssignment { - who: 2, - distribution: vec![(103, 18), (101, 36), (102, 54), (100, 94)], - }, - StakedAssignment { - who: 3, - distribution: vec![(100, 96), (101, 35), (102, 52), (103, 69)], - }, - StakedAssignment { - who: 4, - distribution: vec![(102, 34), (100, 47), (103, 91), (101, 73)], - }, - ]; - - let winners = vec![103, 101, 100, 102]; - - let n = 4; - let m = winners.len() as u32; - let num_reduced = reduce_all(&mut assignments); - assert!(16 - num_reduced <= n + m); - } + use super::*; + + #[test] + fn merging_works() { + // D <-- A <-- B <-- C + // + // F <-- E + let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); + let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); + let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); + let c = Node::new(NodeId::from(4, NodeRole::Target)).into_ref(); + let e = Node::new(NodeId::from(5, NodeRole::Target)).into_ref(); + let f = Node::new(NodeId::from(6, NodeRole::Target)).into_ref(); + + Node::set_parent_of(&c, &b); + Node::set_parent_of(&b, &a); + Node::set_parent_of(&a, &d); + Node::set_parent_of(&e, &f); + + let path1 = vec![c.clone(), b.clone(), a.clone(), d.clone()]; + let path2 = vec![e.clone(), f.clone()]; + + merge(path1, path2); + // D <-- A <-- B <-- C + // | + // F --> E --> --> + assert_eq!(e.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c + } + + #[test] + fn merge_with_len_one() { + // D <-- A <-- B <-- C + // + // F <-- E + let d = Node::new(NodeId::from(1, NodeRole::Target)).into_ref(); + let a = Node::new(NodeId::from(2, NodeRole::Target)).into_ref(); + let b = Node::new(NodeId::from(3, NodeRole::Target)).into_ref(); + let c = Node::new(NodeId::from(4, NodeRole::Target)).into_ref(); + let f = Node::new(NodeId::from(6, NodeRole::Target)).into_ref(); + + Node::set_parent_of(&c, &b); + Node::set_parent_of(&b, &a); + Node::set_parent_of(&a, &d); + + let path1 = vec![c.clone(), b.clone(), a.clone(), d.clone()]; + let path2 = vec![f.clone()]; + + merge(path1, path2); + // D <-- A <-- B <-- C + // | + // F --> --> + assert_eq!(f.borrow().clone().parent.unwrap().borrow().id.who, 4u32); // c + } + + #[test] + fn basic_reduce_4_cycle_works() { + use super::*; + + let assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 25), (20, 75)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 50), (20, 50)], + }, + ]; + + let mut new_assignments = assignments.clone(); + let num_reduced = reduce_4(&mut new_assignments); + + assert_eq!(num_reduced, 1); + assert_eq!( + new_assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(20, 100),], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 75), (20, 25),], + }, + ], + ); + } + + #[test] + fn basic_reduce_all_cycles_works() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 15), (40, 15)], + }, + StakedAssignment { + who: 4, + distribution: vec![(20, 10), (30, 10), (40, 20)], + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 20), (30, 10), (40, 20)], + }, + ]; + + assert_eq!(3, reduce_all(&mut assignments)); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10),] + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5),], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 30),], + }, + StakedAssignment { + who: 4, + distribution: vec![(40, 40),] + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 15), (30, 20), (40, 15),], + }, + ], + ) + } + + #[test] + fn basic_reduce_works() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 15), (40, 15)], + }, + StakedAssignment { + who: 4, + distribution: vec![(20, 10), (30, 10), (40, 20)], + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 20), (30, 10), (40, 20)], + }, + ]; + + assert_eq!(3, reduce(&mut assignments)); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10),] + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5),], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 30),], + }, + StakedAssignment { + who: 4, + distribution: vec![(40, 40),] + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 15), (30, 20), (40, 15),], + }, + ], + ) + } + + #[test] + fn should_deal_with_self_vote() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 15), (40, 15)], + }, + StakedAssignment { + who: 4, + distribution: vec![(20, 10), (30, 10), (40, 20)], + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 20), (30, 10), (40, 20)], + }, + // self vote from 10 and 20 to itself. + StakedAssignment { + who: 10, + distribution: vec![(10, 100)], + }, + StakedAssignment { + who: 20, + distribution: vec![(20, 200)], + }, + ]; + + assert_eq!(3, reduce(&mut assignments)); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10),] + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 5),], + }, + StakedAssignment { + who: 3, + distribution: vec![(20, 30),], + }, + StakedAssignment { + who: 4, + distribution: vec![(40, 40),] + }, + StakedAssignment { + who: 5, + distribution: vec![(20, 15), (30, 20), (40, 15),], + }, + // should stay untouched. + StakedAssignment { + who: 10, + distribution: vec![(10, 100)] + }, + StakedAssignment { + who: 20, + distribution: vec![(20, 200)] + }, + ], + ) + } + + #[test] + fn reduce_3_common_votes_same_weight() { + let mut assignments = vec![ + StakedAssignment { + who: 4, + distribution: vec![(1000000, 100), (1000002, 100), (1000004, 100)], + }, + StakedAssignment { + who: 5, + distribution: vec![(1000000, 100), (1000002, 100), (1000004, 100)], + }, + ]; + + reduce_4(&mut assignments); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 4, + distribution: vec![(1000000, 200,), (1000004, 100,),], + }, + StakedAssignment { + who: 5, + distribution: vec![(1000002, 200,), (1000004, 100,),], + }, + ], + ) + } + + #[test] + #[should_panic] + fn reduce_panics_on_duplicate_voter() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 10), (20, 10)], + }, + StakedAssignment { + who: 1, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 2, + distribution: vec![(10, 15), (20, 15)], + }, + ]; + + reduce(&mut assignments); + } + + #[test] + fn should_deal_with_duplicates_target() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 15), (20, 5)], + }, + StakedAssignment { + who: 2, + distribution: vec![ + (10, 15), + (20, 15), + // duplicate + (10, 1), + // duplicate + (20, 1), + ], + }, + ]; + + reduce(&mut assignments); + + assert_eq!( + assignments, + vec![ + StakedAssignment { + who: 1, + distribution: vec![(10, 20),], + }, + StakedAssignment { + who: 2, + distribution: vec![ + (10, 10), + (20, 20), + // duplicate votes are silently ignored. + (10, 1), + (20, 1), + ], + }, + ], + ) + } + + #[test] + fn bound_should_be_kept() { + let mut assignments = vec![ + StakedAssignment { + who: 1, + distribution: vec![(103, 72), (101, 53), (100, 83), (102, 38)], + }, + StakedAssignment { + who: 2, + distribution: vec![(103, 18), (101, 36), (102, 54), (100, 94)], + }, + StakedAssignment { + who: 3, + distribution: vec![(100, 96), (101, 35), (102, 52), (103, 69)], + }, + StakedAssignment { + who: 4, + distribution: vec![(102, 34), (100, 47), (103, 91), (101, 73)], + }, + ]; + + let winners = vec![103, 101, 100, 102]; + + let n = 4; + let m = winners.len() as u32; + let num_reduced = reduce_all(&mut assignments); + assert!(16 - num_reduced <= n + m); + } } diff --git a/primitives/phragmen/src/tests.rs b/primitives/phragmen/src/tests.rs index 9d16d67495..170b1de93d 100644 --- a/primitives/phragmen/src/tests.rs +++ b/primitives/phragmen/src/tests.rs @@ -20,1027 +20,1101 @@ use crate::mock::*; use crate::{ - elect, equalize, build_support_map, is_score_better, helpers::*, - Support, StakedAssignment, Assignment, PhragmenResult, ExtendedBalance, + build_support_map, elect, equalize, helpers::*, is_score_better, Assignment, ExtendedBalance, + PhragmenResult, StakedAssignment, Support, }; +use sp_runtime::{PerU16, Perbill, Percent, Permill}; use substrate_test_utils::assert_eq_uvec; -use sp_runtime::{Perbill, Permill, Percent, PerU16}; #[test] fn float_phragmen_poc_works() { - let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 3]), - (30, vec![2, 3]), - ]; - let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30), (1, 0), (2, 0), (3, 0)]); - let mut phragmen_result = elect_float(2, 2, candidates, voters, &stake_of).unwrap(); - let winners = phragmen_result.clone().winners; - let assignments = phragmen_result.clone().assignments; - - assert_eq_uvec!(winners, vec![(2, 40), (3, 50)]); - assert_eq_uvec!( - assignments, - vec![ - (10, vec![(2, 1.0)]), - (20, vec![(3, 1.0)]), - (30, vec![(2, 0.5), (3, 0.5)]), - ] - ); - - let mut support_map = build_support_map_float(&mut phragmen_result, &stake_of); - - assert_eq!( - support_map.get(&2).unwrap(), - &_Support { own: 0.0, total: 25.0, others: vec![(10u64, 10.0), (30u64, 15.0)]} - ); - assert_eq!( - support_map.get(&3).unwrap(), - &_Support { own: 0.0, total: 35.0, others: vec![(20u64, 20.0), (30u64, 15.0)]} - ); - - equalize_float(phragmen_result.assignments, &mut support_map, 0.0, 2, stake_of); - - assert_eq!( - support_map.get(&2).unwrap(), - &_Support { own: 0.0, total: 30.0, others: vec![(10u64, 10.0), (30u64, 20.0)]} - ); - assert_eq!( - support_map.get(&3).unwrap(), - &_Support { own: 0.0, total: 30.0, others: vec![(20u64, 20.0), (30u64, 10.0)]} - ); + let candidates = vec![1, 2, 3]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; + let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30), (1, 0), (2, 0), (3, 0)]); + let mut phragmen_result = elect_float(2, 2, candidates, voters, &stake_of).unwrap(); + let winners = phragmen_result.clone().winners; + let assignments = phragmen_result.clone().assignments; + + assert_eq_uvec!(winners, vec![(2, 40), (3, 50)]); + assert_eq_uvec!( + assignments, + vec![ + (10, vec![(2, 1.0)]), + (20, vec![(3, 1.0)]), + (30, vec![(2, 0.5), (3, 0.5)]), + ] + ); + + let mut support_map = build_support_map_float(&mut phragmen_result, &stake_of); + + assert_eq!( + support_map.get(&2).unwrap(), + &_Support { + own: 0.0, + total: 25.0, + others: vec![(10u64, 10.0), (30u64, 15.0)] + } + ); + assert_eq!( + support_map.get(&3).unwrap(), + &_Support { + own: 0.0, + total: 35.0, + others: vec![(20u64, 20.0), (30u64, 15.0)] + } + ); + + equalize_float( + phragmen_result.assignments, + &mut support_map, + 0.0, + 2, + stake_of, + ); + + assert_eq!( + support_map.get(&2).unwrap(), + &_Support { + own: 0.0, + total: 30.0, + others: vec![(10u64, 10.0), (30u64, 20.0)] + } + ); + assert_eq!( + support_map.get(&3).unwrap(), + &_Support { + own: 0.0, + total: 30.0, + others: vec![(20u64, 20.0), (30u64, 10.0)] + } + ); } #[test] fn phragmen_poc_works() { - let candidates = vec![1, 2, 3]; - let voters = vec![ - (10, vec![1, 2]), - (20, vec![1, 3]), - (30, vec![2, 3]), - ]; - - let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); - let PhragmenResult { winners, assignments } = elect::<_, Perbill>( - 2, - 2, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - assert_eq_uvec!(winners, vec![(2, 40), (3, 50)]); - assert_eq_uvec!( - assignments, - vec![ - Assignment { - who: 10u64, - distribution: vec![(2, Perbill::from_percent(100))], - }, - Assignment { - who: 20, - distribution: vec![(3, Perbill::from_percent(100))], - }, - Assignment { - who: 30, - distribution: vec![ - (2, Perbill::from_percent(100/2)), - (3, Perbill::from_percent(100/2)), - ], - }, - ] - ); - - let mut staked = assignment_ratio_to_staked(assignments, &stake_of); - let winners = to_without_backing(winners); - let mut support_map = build_support_map::(&winners, &staked).0; - - assert_eq_uvec!( - staked, - vec![ - StakedAssignment { - who: 10u64, - distribution: vec![(2, 10)], - }, - StakedAssignment { - who: 20, - distribution: vec![(3, 20)], - }, - StakedAssignment { - who: 30, - distribution: vec![ - (2, 15), - (3, 15), - ], - }, - ] - ); - - assert_eq!( - *support_map.get(&2).unwrap(), - Support:: { total: 25, voters: vec![(10, 10), (30, 15)] }, - ); - assert_eq!( - *support_map.get(&3).unwrap(), - Support:: { total: 35, voters: vec![(20, 20), (30, 15)] }, - ); - - equalize( - &mut staked, - &mut support_map, - 0, - 2, - ); - - assert_eq_uvec!( - staked, - vec![ - StakedAssignment { - who: 10u64, - distribution: vec![(2, 10)], - }, - StakedAssignment { - who: 20, - distribution: vec![(3, 20)], - }, - StakedAssignment { - who: 30, - distribution: vec![ - (2, 20), - (3, 10), - ], - }, - ] - ); - - assert_eq!( - *support_map.get(&2).unwrap(), - Support:: { total: 30, voters: vec![(10, 10), (30, 20)] }, - ); - assert_eq!( - *support_map.get(&3).unwrap(), - Support:: { total: 30, voters: vec![(20, 20), (30, 10)] }, - ); + let candidates = vec![1, 2, 3]; + let voters = vec![(10, vec![1, 2]), (20, vec![1, 3]), (30, vec![2, 3])]; + + let stake_of = create_stake_of(&[(10, 10), (20, 20), (30, 30)]); + let PhragmenResult { + winners, + assignments, + } = elect::<_, Perbill>( + 2, + 2, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + assert_eq_uvec!(winners, vec![(2, 40), (3, 50)]); + assert_eq_uvec!( + assignments, + vec![ + Assignment { + who: 10u64, + distribution: vec![(2, Perbill::from_percent(100))], + }, + Assignment { + who: 20, + distribution: vec![(3, Perbill::from_percent(100))], + }, + Assignment { + who: 30, + distribution: vec![ + (2, Perbill::from_percent(100 / 2)), + (3, Perbill::from_percent(100 / 2)), + ], + }, + ] + ); + + let mut staked = assignment_ratio_to_staked(assignments, &stake_of); + let winners = to_without_backing(winners); + let mut support_map = build_support_map::(&winners, &staked).0; + + assert_eq_uvec!( + staked, + vec![ + StakedAssignment { + who: 10u64, + distribution: vec![(2, 10)], + }, + StakedAssignment { + who: 20, + distribution: vec![(3, 20)], + }, + StakedAssignment { + who: 30, + distribution: vec![(2, 15), (3, 15),], + }, + ] + ); + + assert_eq!( + *support_map.get(&2).unwrap(), + Support:: { + total: 25, + voters: vec![(10, 10), (30, 15)] + }, + ); + assert_eq!( + *support_map.get(&3).unwrap(), + Support:: { + total: 35, + voters: vec![(20, 20), (30, 15)] + }, + ); + + equalize(&mut staked, &mut support_map, 0, 2); + + assert_eq_uvec!( + staked, + vec![ + StakedAssignment { + who: 10u64, + distribution: vec![(2, 10)], + }, + StakedAssignment { + who: 20, + distribution: vec![(3, 20)], + }, + StakedAssignment { + who: 30, + distribution: vec![(2, 20), (3, 10),], + }, + ] + ); + + assert_eq!( + *support_map.get(&2).unwrap(), + Support:: { + total: 30, + voters: vec![(10, 10), (30, 20)] + }, + ); + assert_eq!( + *support_map.get(&3).unwrap(), + Support:: { + total: 30, + voters: vec![(20, 20), (30, 10)] + }, + ); } #[test] fn phragmen_poc_2_works() { - let candidates = vec![10, 20, 30]; - let voters = vec![ - (2, vec![10, 20, 30]), - (4, vec![10, 20, 40]), - ]; - let stake_of = create_stake_of(&[ - (10, 1000), - (20, 1000), - (30, 1000), - (40, 1000), - (2, 500), - (4, 500), - ]); - - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); - run_and_compare::(candidates, voters, &stake_of, 2, 2); + let candidates = vec![10, 20, 30]; + let voters = vec![(2, vec![10, 20, 30]), (4, vec![10, 20, 40])]; + let stake_of = create_stake_of(&[ + (10, 1000), + (20, 1000), + (30, 1000), + (40, 1000), + (2, 500), + (4, 500), + ]); + + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates, voters, &stake_of, 2, 2); } #[test] fn phragmen_poc_3_works() { - let candidates = vec![10, 20, 30]; - let voters = vec![ - (2, vec![10, 20, 30]), - (4, vec![10, 20, 40]), - ]; - let stake_of = create_stake_of(&[ - (10, 1000), - (20, 1000), - (30, 1000), - (2, 50), - (4, 1000), - ]); - - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); - run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); - run_and_compare::(candidates, voters, &stake_of, 2, 2); + let candidates = vec![10, 20, 30]; + let voters = vec![(2, vec![10, 20, 30]), (4, vec![10, 20, 40])]; + let stake_of = create_stake_of(&[(10, 1000), (20, 1000), (30, 1000), (2, 50), (4, 1000)]); + + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates.clone(), voters.clone(), &stake_of, 2, 2); + run_and_compare::(candidates, voters, &stake_of, 2, 2); } #[test] fn phragmen_accuracy_on_large_scale_only_validators() { - // because of this particular situation we had per_u128 and now rational128. In practice, a - // candidate can have the maximum amount of tokens, and also supported by the maximum. - let candidates = vec![1, 2, 3, 4, 5]; - let stake_of = create_stake_of(&[ - (1, (u64::max_value() - 1).into()), - (2, (u64::max_value() - 4).into()), - (3, (u64::max_value() - 5).into()), - (4, (u64::max_value() - 3).into()), - (5, (u64::max_value() - 2).into()), - ]); - - let PhragmenResult { winners, assignments } = elect::<_, Perbill>( - 2, - 2, - candidates.clone(), - auto_generate_self_voters(&candidates).iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - assert_eq_uvec!(winners, vec![(1, 18446744073709551614u128), (5, 18446744073709551613u128)]); - assert_eq!(assignments.len(), 2); - check_assignments_sum(assignments); + // because of this particular situation we had per_u128 and now rational128. In practice, a + // candidate can have the maximum amount of tokens, and also supported by the maximum. + let candidates = vec![1, 2, 3, 4, 5]; + let stake_of = create_stake_of(&[ + (1, (u64::max_value() - 1).into()), + (2, (u64::max_value() - 4).into()), + (3, (u64::max_value() - 5).into()), + (4, (u64::max_value() - 3).into()), + (5, (u64::max_value() - 2).into()), + ]); + + let PhragmenResult { + winners, + assignments, + } = elect::<_, Perbill>( + 2, + 2, + candidates.clone(), + auto_generate_self_voters(&candidates) + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + assert_eq_uvec!( + winners, + vec![(1, 18446744073709551614u128), (5, 18446744073709551613u128)] + ); + assert_eq!(assignments.len(), 2); + check_assignments_sum(assignments); } #[test] fn phragmen_accuracy_on_large_scale_validators_and_nominators() { - let candidates = vec![1, 2, 3, 4, 5]; - let mut voters = vec![ - (13, vec![1, 3, 5]), - (14, vec![2, 4]), - ]; - voters.extend(auto_generate_self_voters(&candidates)); - let stake_of = create_stake_of(&[ - (1, (u64::max_value() - 1).into()), - (2, (u64::max_value() - 4).into()), - (3, (u64::max_value() - 5).into()), - (4, (u64::max_value() - 3).into()), - (5, (u64::max_value() - 2).into()), - (13, (u64::max_value() - 10).into()), - (14, u64::max_value().into()), - ]); - - let PhragmenResult { winners, assignments } = elect::<_, Perbill>( - 2, - 2, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - assert_eq_uvec!(winners, vec![(2, 36893488147419103226u128), (1, 36893488147419103219u128)]); - assert_eq!( - assignments, - vec![ - Assignment { - who: 13u64, - distribution: vec![(1, Perbill::one())], - }, - Assignment { - who: 14, - distribution: vec![(2, Perbill::one())], - }, - Assignment { - who: 1, - distribution: vec![(1, Perbill::one())], - }, - Assignment { - who: 2, - distribution: vec![(2, Perbill::one())], - }, - ] - ); - check_assignments_sum(assignments); + let candidates = vec![1, 2, 3, 4, 5]; + let mut voters = vec![(13, vec![1, 3, 5]), (14, vec![2, 4])]; + voters.extend(auto_generate_self_voters(&candidates)); + let stake_of = create_stake_of(&[ + (1, (u64::max_value() - 1).into()), + (2, (u64::max_value() - 4).into()), + (3, (u64::max_value() - 5).into()), + (4, (u64::max_value() - 3).into()), + (5, (u64::max_value() - 2).into()), + (13, (u64::max_value() - 10).into()), + (14, u64::max_value().into()), + ]); + + let PhragmenResult { + winners, + assignments, + } = elect::<_, Perbill>( + 2, + 2, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + assert_eq_uvec!( + winners, + vec![(2, 36893488147419103226u128), (1, 36893488147419103219u128)] + ); + assert_eq!( + assignments, + vec![ + Assignment { + who: 13u64, + distribution: vec![(1, Perbill::one())], + }, + Assignment { + who: 14, + distribution: vec![(2, Perbill::one())], + }, + Assignment { + who: 1, + distribution: vec![(1, Perbill::one())], + }, + Assignment { + who: 2, + distribution: vec![(2, Perbill::one())], + }, + ] + ); + check_assignments_sum(assignments); } #[test] fn phragmen_accuracy_on_small_scale_self_vote() { - let candidates = vec![40, 10, 20, 30]; - let voters = auto_generate_self_voters(&candidates); - let stake_of = create_stake_of(&[ - (40, 0), - (10, 1), - (20, 2), - (30, 1), - ]); - - let PhragmenResult { winners, assignments: _ } = elect::<_, Perbill>( - 3, - 3, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); + let candidates = vec![40, 10, 20, 30]; + let voters = auto_generate_self_voters(&candidates); + let stake_of = create_stake_of(&[(40, 0), (10, 1), (20, 2), (30, 1)]); + + let PhragmenResult { + winners, + assignments: _, + } = elect::<_, Perbill>( + 3, + 3, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); } #[test] fn phragmen_accuracy_on_small_scale_no_self_vote() { - let candidates = vec![40, 10, 20, 30]; - let voters = vec![ - (1, vec![10]), - (2, vec![20]), - (3, vec![30]), - (4, vec![40]), - ]; - let stake_of = create_stake_of(&[ - (40, 1000), // don't care - (10, 1000), // don't care - (20, 1000), // don't care - (30, 1000), // don't care - (4, 0), - (1, 1), - (2, 2), - (3, 1), - ]); - - let PhragmenResult { winners, assignments: _ } = elect::<_, Perbill>( - 3, - 3, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); + let candidates = vec![40, 10, 20, 30]; + let voters = vec![(1, vec![10]), (2, vec![20]), (3, vec![30]), (4, vec![40])]; + let stake_of = create_stake_of(&[ + (40, 1000), // don't care + (10, 1000), // don't care + (20, 1000), // don't care + (30, 1000), // don't care + (4, 0), + (1, 1), + (2, 2), + (3, 1), + ]); + + let PhragmenResult { + winners, + assignments: _, + } = elect::<_, Perbill>( + 3, + 3, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + assert_eq_uvec!(winners, vec![(20, 2), (10, 1), (30, 1)]); } #[test] fn phragmen_large_scale_test() { - let candidates = vec![2, 4, 6, 8, 10, 12, 14, 16 ,18, 20, 22, 24]; - let mut voters = vec![ - (50, vec![2, 4, 6, 8, 10, 12, 14, 16 ,18, 20, 22, 24]), - ]; - voters.extend(auto_generate_self_voters(&candidates)); - let stake_of = create_stake_of(&[ - (2, 1), - (4, 100), - (6, 1000000), - (8, 100000000001000), - (10, 100000000002000), - (12, 100000000003000), - (14, 400000000000000), - (16, 400000000001000), - (18, 18000000000000000), - (20, 20000000000000000), - (22, 500000000000100000), - (24, 500000000000200000), - (50, 990000000000000000), - ]); - - let PhragmenResult { winners, assignments } = elect::<_, Perbill>( - 2, - 2, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - assert_eq_uvec!(winners, vec![(24, 1490000000000200000u128), (22, 1490000000000100000u128)]); - check_assignments_sum(assignments); + let candidates = vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24]; + let mut voters = vec![(50, vec![2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24])]; + voters.extend(auto_generate_self_voters(&candidates)); + let stake_of = create_stake_of(&[ + (2, 1), + (4, 100), + (6, 1000000), + (8, 100000000001000), + (10, 100000000002000), + (12, 100000000003000), + (14, 400000000000000), + (16, 400000000001000), + (18, 18000000000000000), + (20, 20000000000000000), + (22, 500000000000100000), + (24, 500000000000200000), + (50, 990000000000000000), + ]); + + let PhragmenResult { + winners, + assignments, + } = elect::<_, Perbill>( + 2, + 2, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + assert_eq_uvec!( + winners, + vec![(24, 1490000000000200000u128), (22, 1490000000000100000u128)] + ); + check_assignments_sum(assignments); } #[test] fn phragmen_large_scale_test_2() { - let nom_budget: u64 = 1_000_000_000_000_000_000; - let c_budget: u64 = 4_000_000; - - let candidates = vec![2, 4]; - let mut voters = vec![(50, vec![2, 4])]; - voters.extend(auto_generate_self_voters(&candidates)); - - let stake_of = create_stake_of(&[ - (2, c_budget.into()), - (4, c_budget.into()), - (50, nom_budget.into()), - ]); - - let PhragmenResult { winners, assignments } = elect::<_, Perbill>( - 2, - 2, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - assert_eq_uvec!(winners, vec![(2, 1000000000004000000u128), (4, 1000000000004000000u128)]); - assert_eq!( - assignments, - vec![ - Assignment { - who: 50u64, - distribution: vec![ - (2, Perbill::from_parts(500000001)), - (4, Perbill::from_parts(499999999)) - ], - }, - Assignment { - who: 2, - distribution: vec![(2, Perbill::one())], - }, - Assignment { - who: 4, - distribution: vec![(4, Perbill::one())], - }, - ], - ); - check_assignments_sum(assignments); + let nom_budget: u64 = 1_000_000_000_000_000_000; + let c_budget: u64 = 4_000_000; + + let candidates = vec![2, 4]; + let mut voters = vec![(50, vec![2, 4])]; + voters.extend(auto_generate_self_voters(&candidates)); + + let stake_of = create_stake_of(&[ + (2, c_budget.into()), + (4, c_budget.into()), + (50, nom_budget.into()), + ]); + + let PhragmenResult { + winners, + assignments, + } = elect::<_, Perbill>( + 2, + 2, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + assert_eq_uvec!( + winners, + vec![(2, 1000000000004000000u128), (4, 1000000000004000000u128)] + ); + assert_eq!( + assignments, + vec![ + Assignment { + who: 50u64, + distribution: vec![ + (2, Perbill::from_parts(500000001)), + (4, Perbill::from_parts(499999999)) + ], + }, + Assignment { + who: 2, + distribution: vec![(2, Perbill::one())], + }, + Assignment { + who: 4, + distribution: vec![(4, Perbill::one())], + }, + ], + ); + check_assignments_sum(assignments); } #[test] fn phragmen_linear_equalize() { - let candidates = vec![11, 21, 31, 41, 51, 61, 71]; - let voters = vec![ - (2, vec![11]), - (4, vec![11, 21]), - (6, vec![21, 31]), - (8, vec![31, 41]), - (110, vec![41, 51]), - (120, vec![51, 61]), - (130, vec![61, 71]), - ]; - let stake_of = create_stake_of(&[ - (11, 1000), - (21, 1000), - (31, 1000), - (41, 1000), - (51, 1000), - (61, 1000), - (71, 1000), - - (2, 2000), - (4, 1000), - (6, 1000), - (8, 1000), - (110, 1000), - (120, 1000), - (130, 1000), - ]); - - run_and_compare::(candidates, voters, &stake_of, 2, 2); + let candidates = vec![11, 21, 31, 41, 51, 61, 71]; + let voters = vec![ + (2, vec![11]), + (4, vec![11, 21]), + (6, vec![21, 31]), + (8, vec![31, 41]), + (110, vec![41, 51]), + (120, vec![51, 61]), + (130, vec![61, 71]), + ]; + let stake_of = create_stake_of(&[ + (11, 1000), + (21, 1000), + (31, 1000), + (41, 1000), + (51, 1000), + (61, 1000), + (71, 1000), + (2, 2000), + (4, 1000), + (6, 1000), + (8, 1000), + (110, 1000), + (120, 1000), + (130, 1000), + ]); + + run_and_compare::(candidates, voters, &stake_of, 2, 2); } #[test] fn elect_has_no_entry_barrier() { - let candidates = vec![10, 20, 30]; - let voters = vec![ - (1, vec![10]), - (2, vec![20]), - ]; - let stake_of = create_stake_of(&[ - (1, 10), - (2, 10), - ]); - - let PhragmenResult { winners, assignments: _ } = elect::<_, Perbill>( - 3, - 3, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - // 30 is elected with stake 0. The caller is responsible for stripping this. - assert_eq_uvec!(winners, vec![ - (10, 10), - (20, 10), - (30, 0), - ]); + let candidates = vec![10, 20, 30]; + let voters = vec![(1, vec![10]), (2, vec![20])]; + let stake_of = create_stake_of(&[(1, 10), (2, 10)]); + + let PhragmenResult { + winners, + assignments: _, + } = elect::<_, Perbill>( + 3, + 3, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + // 30 is elected with stake 0. The caller is responsible for stripping this. + assert_eq_uvec!(winners, vec![(10, 10), (20, 10), (30, 0),]); } #[test] fn minimum_to_elect_is_respected() { - let candidates = vec![10, 20, 30]; - let voters = vec![ - (1, vec![10]), - (2, vec![20]), - ]; - let stake_of = create_stake_of(&[ - (1, 10), - (2, 10), - ]); - - let maybe_result = elect::<_, Perbill>( - 10, - 10, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ); - - assert!(maybe_result.is_none()); + let candidates = vec![10, 20, 30]; + let voters = vec![(1, vec![10]), (2, vec![20])]; + let stake_of = create_stake_of(&[(1, 10), (2, 10)]); + + let maybe_result = elect::<_, Perbill>( + 10, + 10, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ); + + assert!(maybe_result.is_none()); } #[test] fn self_votes_should_be_kept() { - let candidates = vec![5, 10, 20, 30]; - let voters = vec![ - (5, vec![5]), - (10, vec![10]), - (20, vec![20]), - (1, vec![10, 20]) - ]; - let stake_of = create_stake_of(&[ - (5, 5), - (10, 10), - (20, 20), - (1, 8), - ]); - - let result = elect::<_, Perbill>( - 2, - 2, - candidates, - voters.iter().map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())).collect::>(), - ).unwrap(); - - assert_eq!(result.winners, vec![(20, 28), (10, 18)]); - assert_eq!( - result.assignments, - vec![ - Assignment { who: 10, distribution: vec![(10, Perbill::from_percent(100))] }, - Assignment { who: 20, distribution: vec![(20, Perbill::from_percent(100))] }, - Assignment { who: 1, distribution: vec![ - (10, Perbill::from_percent(50)), - (20, Perbill::from_percent(50)) - ] - }, - ], - ); - - let mut staked_assignments = assignment_ratio_to_staked(result.assignments, &stake_of); - let winners = to_without_backing(result.winners); - - let (mut supports, _) = build_support_map::( - &winners, - &staked_assignments, - ); - - assert_eq!(supports.get(&5u64), None); - assert_eq!( - supports.get(&10u64).unwrap(), - &Support { total: 14u128, voters: vec![(10u64, 10u128), (1u64, 4u128)] }, - ); - assert_eq!( - supports.get(&20u64).unwrap(), - &Support { total: 24u128, voters: vec![(20u64, 20u128), (1u64, 4u128)] }, - ); - - equalize( - &mut staked_assignments, - &mut supports, - 0, - 2usize, - ); - - assert_eq!( - supports.get(&10u64).unwrap(), - &Support { total: 18u128, voters: vec![(10u64, 10u128), (1u64, 8u128)] }, - ); - assert_eq!( - supports.get(&20u64).unwrap(), - &Support { total: 20u128, voters: vec![(20u64, 20u128)] }, - ); + let candidates = vec![5, 10, 20, 30]; + let voters = vec![ + (5, vec![5]), + (10, vec![10]), + (20, vec![20]), + (1, vec![10, 20]), + ]; + let stake_of = create_stake_of(&[(5, 5), (10, 10), (20, 20), (1, 8)]); + + let result = elect::<_, Perbill>( + 2, + 2, + candidates, + voters + .iter() + .map(|(ref v, ref vs)| (v.clone(), stake_of(v), vs.clone())) + .collect::>(), + ) + .unwrap(); + + assert_eq!(result.winners, vec![(20, 28), (10, 18)]); + assert_eq!( + result.assignments, + vec![ + Assignment { + who: 10, + distribution: vec![(10, Perbill::from_percent(100))] + }, + Assignment { + who: 20, + distribution: vec![(20, Perbill::from_percent(100))] + }, + Assignment { + who: 1, + distribution: vec![ + (10, Perbill::from_percent(50)), + (20, Perbill::from_percent(50)) + ] + }, + ], + ); + + let mut staked_assignments = assignment_ratio_to_staked(result.assignments, &stake_of); + let winners = to_without_backing(result.winners); + + let (mut supports, _) = build_support_map::(&winners, &staked_assignments); + + assert_eq!(supports.get(&5u64), None); + assert_eq!( + supports.get(&10u64).unwrap(), + &Support { + total: 14u128, + voters: vec![(10u64, 10u128), (1u64, 4u128)] + }, + ); + assert_eq!( + supports.get(&20u64).unwrap(), + &Support { + total: 24u128, + voters: vec![(20u64, 20u128), (1u64, 4u128)] + }, + ); + + equalize(&mut staked_assignments, &mut supports, 0, 2usize); + + assert_eq!( + supports.get(&10u64).unwrap(), + &Support { + total: 18u128, + voters: vec![(10u64, 10u128), (1u64, 8u128)] + }, + ); + assert_eq!( + supports.get(&20u64).unwrap(), + &Support { + total: 20u128, + voters: vec![(20u64, 20u128)] + }, + ); } #[test] fn assignment_convert_works() { - let staked = StakedAssignment { - who: 1 as AccountId, - distribution: vec![ - (20, 100 as ExtendedBalance), - (30, 25), - ], - }; - - let assignment = staked.clone().into_assignment(true); - assert_eq!( - assignment, - Assignment { - who: 1, - distribution: vec![ - (20, Perbill::from_percent(80)), - (30, Perbill::from_percent(20)), - ] - } - ); - - assert_eq!( - assignment.into_staked(125, true), - staked, - ); + let staked = StakedAssignment { + who: 1 as AccountId, + distribution: vec![(20, 100 as ExtendedBalance), (30, 25)], + }; + + let assignment = staked.clone().into_assignment(true); + assert_eq!( + assignment, + Assignment { + who: 1, + distribution: vec![ + (20, Perbill::from_percent(80)), + (30, Perbill::from_percent(20)), + ] + } + ); + + assert_eq!(assignment.into_staked(125, true), staked,); } #[test] fn score_comparison_is_lexicographical() { - // only better in the fist parameter, worse in the other two ✅ - assert_eq!( - is_score_better([10, 20, 30], [12, 10, 35]), - true, - ); - - // worse in the first, better in the other two ❌ - assert_eq!( - is_score_better([10, 20, 30], [9, 30, 10]), - false, - ); - - // equal in the first, the second one dictates. - assert_eq!( - is_score_better([10, 20, 30], [10, 25, 40]), - true, - ); - - // equal in the first two, the last one dictates. - assert_eq!( - is_score_better([10, 20, 30], [10, 20, 40]), - false, - ); + // only better in the fist parameter, worse in the other two ✅ + assert_eq!(is_score_better([10, 20, 30], [12, 10, 35]), true,); + + // worse in the first, better in the other two ❌ + assert_eq!(is_score_better([10, 20, 30], [9, 30, 10]), false,); + + // equal in the first, the second one dictates. + assert_eq!(is_score_better([10, 20, 30], [10, 25, 40]), true,); + + // equal in the first two, the last one dictates. + assert_eq!(is_score_better([10, 20, 30], [10, 20, 40]), false,); } mod compact { - use codec::{Decode, Encode}; - use crate::{generate_compact_solution_type, VoteWeight}; - use super::{AccountId}; - // these need to come from the same dev-dependency `sp-phragmen`, not from the crate. - use sp_phragmen::{Assignment, StakedAssignment, Error as PhragmenError, ExtendedBalance}; - use sp_std::{convert::{TryInto, TryFrom}, fmt::Debug}; - use sp_runtime::Percent; - - type Accuracy = Percent; - - generate_compact_solution_type!(TestCompact, 16); - - #[test] - fn compact_struct_is_codec() { - let compact = TestCompact::<_, _, _> { - votes1: vec![(2u64, 20), (4, 40)], - votes2: vec![ - (1, (10, Accuracy::from_percent(80)), 11), - (5, (50, Accuracy::from_percent(85)), 51), - ], - ..Default::default() - }; - - let encoded = compact.encode(); - - assert_eq!( - compact, - Decode::decode(&mut &encoded[..]).unwrap(), - ); - } - - fn basic_ratio_test_with() where - V: codec::Codec + Copy + Default + PartialEq + Eq + TryInto + TryFrom + From + Debug, - T: codec::Codec + Copy + Default + PartialEq + Eq + TryInto + TryFrom + From + Debug, - >::Error: std::fmt::Debug, - >::Error: std::fmt::Debug, - >::Error: std::fmt::Debug, - >::Error: std::fmt::Debug, - { - let voters = vec![ - 2 as AccountId, - 4, - 1, - 5, - 3, - ]; - let targets = vec![ - 10 as AccountId, - 11, - 20, // 2 - 30, - 31, // 4 - 32, - 40, // 6 - 50, - 51, // 8 - ]; - - let assignments = vec![ - Assignment { - who: 2 as AccountId, - distribution: vec![(20u64, Accuracy::from_percent(100))] - }, - Assignment { - who: 4, - distribution: vec![(40, Accuracy::from_percent(100))], - }, - Assignment { - who: 1, - distribution: vec![ - (10, Accuracy::from_percent(80)), - (11, Accuracy::from_percent(20)) - ], - }, - Assignment { - who: 5, - distribution: vec![ - (50, Accuracy::from_percent(85)), - (51, Accuracy::from_percent(15)), - ] - }, - Assignment { - who: 3, - distribution: vec![ - (30, Accuracy::from_percent(50)), - (31, Accuracy::from_percent(25)), - (32, Accuracy::from_percent(25)), - ], - }, - ]; - - let voter_index = |a: &AccountId| -> Option { - voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() - }; - let target_index = |a: &AccountId| -> Option { - targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() - }; - - let compacted = >::from_assignment( - assignments.clone(), - voter_index, - target_index, - ).unwrap(); - - assert_eq!( - compacted, - TestCompact { - votes1: vec![(V::from(0u8), T::from(2u8)), (V::from(1u8), T::from(6u8))], - votes2: vec![ - (V::from(2u8), (T::from(0u8), Accuracy::from_percent(80)), T::from(1u8)), - (V::from(3u8), (T::from(7u8), Accuracy::from_percent(85)), T::from(8u8)), - ], - votes3: vec![ - ( - V::from(4), - [(T::from(3u8), Accuracy::from_percent(50)), (T::from(4u8), Accuracy::from_percent(25))], - T::from(5u8), - ), - ], - ..Default::default() - } - ); - - let voter_at = |a: V| -> Option { voters.get(>::try_into(a).unwrap()).cloned() }; - let target_at = |a: T| -> Option { targets.get(>::try_into(a).unwrap()).cloned() }; - - assert_eq!( - compacted.into_assignment(voter_at, target_at).unwrap(), - assignments, - ); - } - - #[test] - fn basic_from_and_into_compact_works_assignments() { - basic_ratio_test_with::(); - basic_ratio_test_with::(); - basic_ratio_test_with::(); - } - - #[test] - fn basic_from_and_into_compact_works_staked_assignments() { - let voters = vec![ - 2 as AccountId, - 4, - 1, - 5, - 3, - ]; - let targets = vec![ - 10 as AccountId, 11, - 20, - 30, 31, 32, - 40, - 50, 51, - ]; - - let assignments = vec![ - StakedAssignment { - who: 2 as AccountId, - distribution: vec![(20, 100 as ExtendedBalance)] - }, - StakedAssignment { - who: 4, - distribution: vec![(40, 100)], - }, - StakedAssignment { - who: 1, - distribution: vec![ - (10, 80), - (11, 20) - ], - }, - StakedAssignment { - who: 5, distribution: - vec![ - (50, 85), - (51, 15), - ] - }, - StakedAssignment { - who: 3, - distribution: vec![ - (30, 50), - (31, 25), - (32, 25), - ], - }, - ]; - - let voter_index = |a: &AccountId| -> Option { - voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() - }; - let target_index = |a: &AccountId| -> Option { - targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() - }; - - let compacted = >::from_staked( - assignments.clone(), - voter_index, - target_index, - ).unwrap(); - - assert_eq!( - compacted, - TestCompact { - votes1: vec![(0, 2), (1, 6)], - votes2: vec![ - (2, (0, 80), 1), - (3, (7, 85), 8), - ], - votes3: vec![ - (4, [(3, 50), (4, 25)], 5), - ], - ..Default::default() - } - ); - - let max_of_fn = |_: &AccountId| -> VoteWeight { 100 }; - let voter_at = |a: u16| -> Option { voters.get(a as usize).cloned() }; - let target_at = |a: u16| -> Option { targets.get(a as usize).cloned() }; - - assert_eq!( - compacted.into_staked( - max_of_fn, - voter_at, - target_at, - ).unwrap(), - assignments, - ); - } - - #[test] - fn compact_into_stake_must_report_overflow() { - // The last edge which is computed from the rest should ALWAYS be positive. - // in votes2 - let compact = TestCompact:: { - votes1: Default::default(), - votes2: vec![(0, (1, 10), 2)], - ..Default::default() - }; - - let entity_at = |a: u16| -> Option { Some(a as AccountId) }; - let max_of = |_: &AccountId| -> VoteWeight { 5 }; - - assert_eq!( - compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, - ); - - // in votes3 onwards - let compact = TestCompact:: { - votes1: Default::default(), - votes2: Default::default(), - votes3: vec![(0, [(1, 7), (2, 8)], 3)], - ..Default::default() - }; - - assert_eq!( - compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, - ); - - // Also if equal - let compact = TestCompact:: { - votes1: Default::default(), - votes2: Default::default(), - // 5 is total, we cannot leave none for 30 here. - votes3: vec![(0, [(1, 3), (2, 2)], 3)], - ..Default::default() - }; - - assert_eq!( - compact.into_staked(&max_of, &entity_at, &entity_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, - ); - } - - #[test] - fn compact_into_assignment_must_report_overflow() { - // in votes2 - let compact = TestCompact:: { - votes1: Default::default(), - votes2: vec![(0, (1, Accuracy::from_percent(100)), 2)], - ..Default::default() - }; - - let entity_at = |a: u16| -> Option { Some(a as AccountId) }; - - assert_eq!( - compact.into_assignment(&entity_at, &entity_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, - ); - - // in votes3 onwards - let compact = TestCompact:: { - votes1: Default::default(), - votes2: Default::default(), - votes3: vec![(0, [(1, Accuracy::from_percent(70)), (2, Accuracy::from_percent(80))], 3)], - ..Default::default() - }; - - assert_eq!( - compact.into_assignment(&entity_at, &entity_at).unwrap_err(), - PhragmenError::CompactStakeOverflow, - ); - } - - #[test] - fn target_count_overflow_is_detected() { - let assignments = vec![ - StakedAssignment { - who: 1 as AccountId, - distribution: (10..26).map(|i| (i as AccountId, i as ExtendedBalance)).collect::>(), - }, - ]; - - let entity_index = |a: &AccountId| -> Option { Some(*a as u16) }; - - let compacted = >::from_staked( - assignments.clone(), - entity_index, - entity_index, - ); - - assert!(compacted.is_ok()); - - let assignments = vec![ - StakedAssignment { - who: 1 as AccountId, - distribution: (10..27).map(|i| (i as AccountId, i as ExtendedBalance)).collect::>(), - }, - ]; - - let compacted = >::from_staked( - assignments.clone(), - entity_index, - entity_index, - ); - - assert_eq!( - compacted.unwrap_err(), - PhragmenError::CompactTargetOverflow, - ); - - let assignments = vec![ - Assignment { - who: 1 as AccountId, - distribution: (10..27).map(|i| (i as AccountId, Percent::from_parts(i as u8))).collect::>(), - }, - ]; - - let compacted = >::from_assignment( - assignments.clone(), - entity_index, - entity_index, - ); - - assert_eq!( - compacted.unwrap_err(), - PhragmenError::CompactTargetOverflow, - ); - } - - #[test] - fn zero_target_count_is_ignored() { - let voters = vec![1 as AccountId, 2]; - let targets = vec![10 as AccountId, 11]; - - let assignments = vec![ - StakedAssignment { - who: 1 as AccountId, - distribution: vec![(10, 100 as ExtendedBalance), (11, 100)] - }, - StakedAssignment { - who: 2, - distribution: vec![], - }, - ]; - - let voter_index = |a: &AccountId| -> Option { - voters.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() - }; - let target_index = |a: &AccountId| -> Option { - targets.iter().position(|x| x == a).map(TryInto::try_into).unwrap().ok() - }; - - let compacted = >::from_staked( - assignments.clone(), - voter_index, - target_index, - ).unwrap(); - - assert_eq!( - compacted, - TestCompact { - votes1: Default::default(), - votes2: vec![(0, (0, 100), 1)], - ..Default::default() - } - ); - } + use super::AccountId; + use crate::{generate_compact_solution_type, VoteWeight}; + use codec::{Decode, Encode}; + // these need to come from the same dev-dependency `sp-phragmen`, not from the crate. + use sp_phragmen::{Assignment, Error as PhragmenError, ExtendedBalance, StakedAssignment}; + use sp_runtime::Percent; + use sp_std::{ + convert::{TryFrom, TryInto}, + fmt::Debug, + }; + + type Accuracy = Percent; + + generate_compact_solution_type!(TestCompact, 16); + + #[test] + fn compact_struct_is_codec() { + let compact = TestCompact::<_, _, _> { + votes1: vec![(2u64, 20), (4, 40)], + votes2: vec![ + (1, (10, Accuracy::from_percent(80)), 11), + (5, (50, Accuracy::from_percent(85)), 51), + ], + ..Default::default() + }; + + let encoded = compact.encode(); + + assert_eq!(compact, Decode::decode(&mut &encoded[..]).unwrap(),); + } + + fn basic_ratio_test_with() + where + V: codec::Codec + + Copy + + Default + + PartialEq + + Eq + + TryInto + + TryFrom + + From + + Debug, + T: codec::Codec + + Copy + + Default + + PartialEq + + Eq + + TryInto + + TryFrom + + From + + Debug, + >::Error: std::fmt::Debug, + >::Error: std::fmt::Debug, + >::Error: std::fmt::Debug, + >::Error: std::fmt::Debug, + { + let voters = vec![2 as AccountId, 4, 1, 5, 3]; + let targets = vec![ + 10 as AccountId, + 11, + 20, // 2 + 30, + 31, // 4 + 32, + 40, // 6 + 50, + 51, // 8 + ]; + + let assignments = vec![ + Assignment { + who: 2 as AccountId, + distribution: vec![(20u64, Accuracy::from_percent(100))], + }, + Assignment { + who: 4, + distribution: vec![(40, Accuracy::from_percent(100))], + }, + Assignment { + who: 1, + distribution: vec![ + (10, Accuracy::from_percent(80)), + (11, Accuracy::from_percent(20)), + ], + }, + Assignment { + who: 5, + distribution: vec![ + (50, Accuracy::from_percent(85)), + (51, Accuracy::from_percent(15)), + ], + }, + Assignment { + who: 3, + distribution: vec![ + (30, Accuracy::from_percent(50)), + (31, Accuracy::from_percent(25)), + (32, Accuracy::from_percent(25)), + ], + }, + ]; + + let voter_index = |a: &AccountId| -> Option { + voters + .iter() + .position(|x| x == a) + .map(TryInto::try_into) + .unwrap() + .ok() + }; + let target_index = |a: &AccountId| -> Option { + targets + .iter() + .position(|x| x == a) + .map(TryInto::try_into) + .unwrap() + .ok() + }; + + let compacted = >::from_assignment( + assignments.clone(), + voter_index, + target_index, + ) + .unwrap(); + + assert_eq!( + compacted, + TestCompact { + votes1: vec![(V::from(0u8), T::from(2u8)), (V::from(1u8), T::from(6u8))], + votes2: vec![ + ( + V::from(2u8), + (T::from(0u8), Accuracy::from_percent(80)), + T::from(1u8) + ), + ( + V::from(3u8), + (T::from(7u8), Accuracy::from_percent(85)), + T::from(8u8) + ), + ], + votes3: vec![( + V::from(4), + [ + (T::from(3u8), Accuracy::from_percent(50)), + (T::from(4u8), Accuracy::from_percent(25)) + ], + T::from(5u8), + ),], + ..Default::default() + } + ); + + let voter_at = |a: V| -> Option { + voters + .get(>::try_into(a).unwrap()) + .cloned() + }; + let target_at = |a: T| -> Option { + targets + .get(>::try_into(a).unwrap()) + .cloned() + }; + + assert_eq!( + compacted.into_assignment(voter_at, target_at).unwrap(), + assignments, + ); + } + + #[test] + fn basic_from_and_into_compact_works_assignments() { + basic_ratio_test_with::(); + basic_ratio_test_with::(); + basic_ratio_test_with::(); + } + + #[test] + fn basic_from_and_into_compact_works_staked_assignments() { + let voters = vec![2 as AccountId, 4, 1, 5, 3]; + let targets = vec![10 as AccountId, 11, 20, 30, 31, 32, 40, 50, 51]; + + let assignments = vec![ + StakedAssignment { + who: 2 as AccountId, + distribution: vec![(20, 100 as ExtendedBalance)], + }, + StakedAssignment { + who: 4, + distribution: vec![(40, 100)], + }, + StakedAssignment { + who: 1, + distribution: vec![(10, 80), (11, 20)], + }, + StakedAssignment { + who: 5, + distribution: vec![(50, 85), (51, 15)], + }, + StakedAssignment { + who: 3, + distribution: vec![(30, 50), (31, 25), (32, 25)], + }, + ]; + + let voter_index = |a: &AccountId| -> Option { + voters + .iter() + .position(|x| x == a) + .map(TryInto::try_into) + .unwrap() + .ok() + }; + let target_index = |a: &AccountId| -> Option { + targets + .iter() + .position(|x| x == a) + .map(TryInto::try_into) + .unwrap() + .ok() + }; + + let compacted = >::from_staked( + assignments.clone(), + voter_index, + target_index, + ) + .unwrap(); + + assert_eq!( + compacted, + TestCompact { + votes1: vec![(0, 2), (1, 6)], + votes2: vec![(2, (0, 80), 1), (3, (7, 85), 8),], + votes3: vec![(4, [(3, 50), (4, 25)], 5),], + ..Default::default() + } + ); + + let max_of_fn = |_: &AccountId| -> VoteWeight { 100 }; + let voter_at = |a: u16| -> Option { voters.get(a as usize).cloned() }; + let target_at = |a: u16| -> Option { targets.get(a as usize).cloned() }; + + assert_eq!( + compacted + .into_staked(max_of_fn, voter_at, target_at,) + .unwrap(), + assignments, + ); + } + + #[test] + fn compact_into_stake_must_report_overflow() { + // The last edge which is computed from the rest should ALWAYS be positive. + // in votes2 + let compact = TestCompact:: { + votes1: Default::default(), + votes2: vec![(0, (1, 10), 2)], + ..Default::default() + }; + + let entity_at = |a: u16| -> Option { Some(a as AccountId) }; + let max_of = |_: &AccountId| -> VoteWeight { 5 }; + + assert_eq!( + compact + .into_staked(&max_of, &entity_at, &entity_at) + .unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + + // in votes3 onwards + let compact = TestCompact:: { + votes1: Default::default(), + votes2: Default::default(), + votes3: vec![(0, [(1, 7), (2, 8)], 3)], + ..Default::default() + }; + + assert_eq!( + compact + .into_staked(&max_of, &entity_at, &entity_at) + .unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + + // Also if equal + let compact = TestCompact:: { + votes1: Default::default(), + votes2: Default::default(), + // 5 is total, we cannot leave none for 30 here. + votes3: vec![(0, [(1, 3), (2, 2)], 3)], + ..Default::default() + }; + + assert_eq!( + compact + .into_staked(&max_of, &entity_at, &entity_at) + .unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + } + + #[test] + fn compact_into_assignment_must_report_overflow() { + // in votes2 + let compact = TestCompact:: { + votes1: Default::default(), + votes2: vec![(0, (1, Accuracy::from_percent(100)), 2)], + ..Default::default() + }; + + let entity_at = |a: u16| -> Option { Some(a as AccountId) }; + + assert_eq!( + compact.into_assignment(&entity_at, &entity_at).unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + + // in votes3 onwards + let compact = TestCompact:: { + votes1: Default::default(), + votes2: Default::default(), + votes3: vec![( + 0, + [ + (1, Accuracy::from_percent(70)), + (2, Accuracy::from_percent(80)), + ], + 3, + )], + ..Default::default() + }; + + assert_eq!( + compact.into_assignment(&entity_at, &entity_at).unwrap_err(), + PhragmenError::CompactStakeOverflow, + ); + } + + #[test] + fn target_count_overflow_is_detected() { + let assignments = vec![StakedAssignment { + who: 1 as AccountId, + distribution: (10..26) + .map(|i| (i as AccountId, i as ExtendedBalance)) + .collect::>(), + }]; + + let entity_index = |a: &AccountId| -> Option { Some(*a as u16) }; + + let compacted = >::from_staked( + assignments.clone(), + entity_index, + entity_index, + ); + + assert!(compacted.is_ok()); + + let assignments = vec![StakedAssignment { + who: 1 as AccountId, + distribution: (10..27) + .map(|i| (i as AccountId, i as ExtendedBalance)) + .collect::>(), + }]; + + let compacted = >::from_staked( + assignments.clone(), + entity_index, + entity_index, + ); + + assert_eq!(compacted.unwrap_err(), PhragmenError::CompactTargetOverflow,); + + let assignments = vec![Assignment { + who: 1 as AccountId, + distribution: (10..27) + .map(|i| (i as AccountId, Percent::from_parts(i as u8))) + .collect::>(), + }]; + + let compacted = >::from_assignment( + assignments.clone(), + entity_index, + entity_index, + ); + + assert_eq!(compacted.unwrap_err(), PhragmenError::CompactTargetOverflow,); + } + + #[test] + fn zero_target_count_is_ignored() { + let voters = vec![1 as AccountId, 2]; + let targets = vec![10 as AccountId, 11]; + + let assignments = vec![ + StakedAssignment { + who: 1 as AccountId, + distribution: vec![(10, 100 as ExtendedBalance), (11, 100)], + }, + StakedAssignment { + who: 2, + distribution: vec![], + }, + ]; + + let voter_index = |a: &AccountId| -> Option { + voters + .iter() + .position(|x| x == a) + .map(TryInto::try_into) + .unwrap() + .ok() + }; + let target_index = |a: &AccountId| -> Option { + targets + .iter() + .position(|x| x == a) + .map(TryInto::try_into) + .unwrap() + .ok() + }; + + let compacted = >::from_staked( + assignments.clone(), + voter_index, + target_index, + ) + .unwrap(); + + assert_eq!( + compacted, + TestCompact { + votes1: Default::default(), + votes2: vec![(0, (0, 100), 1)], + ..Default::default() + } + ); + } } diff --git a/primitives/rpc/src/lib.rs b/primitives/rpc/src/lib.rs index 7c22daf5cd..c326e52834 100644 --- a/primitives/rpc/src/lib.rs +++ b/primitives/rpc/src/lib.rs @@ -18,21 +18,15 @@ #![warn(missing_docs)] -pub mod number; pub mod list; +pub mod number; /// A util function to assert the result of serialization and deserialization is the same. #[cfg(test)] -pub(crate) fn assert_deser(s: &str, expected: T) where - T: std::fmt::Debug + serde::ser::Serialize + serde::de::DeserializeOwned + PartialEq +pub(crate) fn assert_deser(s: &str, expected: T) +where + T: std::fmt::Debug + serde::ser::Serialize + serde::de::DeserializeOwned + PartialEq, { - assert_eq!( - serde_json::from_str::(s).unwrap(), - expected - ); - assert_eq!( - serde_json::to_string(&expected).unwrap(), - s - ); + assert_eq!(serde_json::from_str::(s).unwrap(), expected); + assert_eq!(serde_json::to_string(&expected).unwrap(), s); } - diff --git a/primitives/rpc/src/list.rs b/primitives/rpc/src/list.rs index 469eae3d14..3a416fca36 100644 --- a/primitives/rpc/src/list.rs +++ b/primitives/rpc/src/list.rs @@ -16,7 +16,7 @@ //! RPC a lenient list or value type. -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; /// RPC list or value wrapper. /// @@ -31,45 +31,45 @@ use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, Debug, PartialEq)] #[serde(untagged)] pub enum ListOrValue { - /// A list of values of given type. - List(Vec), - /// A single value of given type. - Value(T), + /// A list of values of given type. + List(Vec), + /// A single value of given type. + Value(T), } impl ListOrValue { - /// Map every contained value using function `F`. - /// - /// This allows to easily convert all values in any of the variants. - pub fn map X, X>(self, f: F) -> ListOrValue { - match self { - ListOrValue::List(v) => ListOrValue::List(v.into_iter().map(f).collect()), - ListOrValue::Value(v) => ListOrValue::Value(f(v)), - } - } + /// Map every contained value using function `F`. + /// + /// This allows to easily convert all values in any of the variants. + pub fn map X, X>(self, f: F) -> ListOrValue { + match self { + ListOrValue::List(v) => ListOrValue::List(v.into_iter().map(f).collect()), + ListOrValue::Value(v) => ListOrValue::Value(f(v)), + } + } } impl From for ListOrValue { - fn from(n: T) -> Self { - ListOrValue::Value(n) - } + fn from(n: T) -> Self { + ListOrValue::Value(n) + } } impl From> for ListOrValue { - fn from(n: Vec) -> Self { - ListOrValue::List(n) - } + fn from(n: Vec) -> Self { + ListOrValue::List(n) + } } #[cfg(test)] mod tests { - use super::*; - use crate::assert_deser; + use super::*; + use crate::assert_deser; - #[test] - fn should_serialize_and_deserialize() { - assert_deser(r#"5"#, ListOrValue::Value(5_u64)); - assert_deser(r#""str""#, ListOrValue::Value("str".to_string())); - assert_deser(r#"[1,2,3]"#, ListOrValue::List(vec![1_u64, 2_u64, 3_u64])); - } + #[test] + fn should_serialize_and_deserialize() { + assert_deser(r#"5"#, ListOrValue::Value(5_u64)); + assert_deser(r#""str""#, ListOrValue::Value("str".to_string())); + assert_deser(r#"[1,2,3]"#, ListOrValue::List(vec![1_u64, 2_u64, 3_u64])); + } } diff --git a/primitives/rpc/src/number.rs b/primitives/rpc/src/number.rs index 1d41dd234f..3524f97146 100644 --- a/primitives/rpc/src/number.rs +++ b/primitives/rpc/src/number.rs @@ -16,9 +16,9 @@ //! Chain RPC Block number type. -use serde::{Serialize, Deserialize}; -use std::{convert::TryFrom, fmt::Debug}; +use serde::{Deserialize, Serialize}; use sp_core::U256; +use std::{convert::TryFrom, fmt::Debug}; /// RPC Block number type /// @@ -30,60 +30,66 @@ use sp_core::U256; #[derive(Serialize, Deserialize, Debug, PartialEq)] #[serde(untagged)] pub enum NumberOrHex { - /// The original header number type of block. - Number(Number), - /// Hex representation of the block number. - Hex(U256), + /// The original header number type of block. + Number(Number), + /// Hex representation of the block number. + Hex(U256), } impl + From + Debug + PartialOrd> NumberOrHex { - /// Attempts to convert into concrete block number. - /// - /// Fails in case hex number is too big. - pub fn to_number(self) -> Result { - let num = match self { - NumberOrHex::Number(n) => n, - NumberOrHex::Hex(h) => { - let l = h.low_u64(); - if U256::from(l) != h { - return Err(format!("`{}` does not fit into u64 type; unsupported for now.", h)) - } else { - Number::try_from(l) - .map_err(|_| format!("`{}` does not fit into block number type.", h))? - } - }, - }; - // FIXME <2329>: Database seems to limit the block number to u32 for no reason - if num > Number::from(u32::max_value()) { - return Err(format!("`{:?}` > u32::max_value(), the max block number is u32.", num)) - } - Ok(num) - } + /// Attempts to convert into concrete block number. + /// + /// Fails in case hex number is too big. + pub fn to_number(self) -> Result { + let num = match self { + NumberOrHex::Number(n) => n, + NumberOrHex::Hex(h) => { + let l = h.low_u64(); + if U256::from(l) != h { + return Err(format!( + "`{}` does not fit into u64 type; unsupported for now.", + h + )); + } else { + Number::try_from(l) + .map_err(|_| format!("`{}` does not fit into block number type.", h))? + } + } + }; + // FIXME <2329>: Database seems to limit the block number to u32 for no reason + if num > Number::from(u32::max_value()) { + return Err(format!( + "`{:?}` > u32::max_value(), the max block number is u32.", + num + )); + } + Ok(num) + } } impl From for NumberOrHex { - fn from(n: u64) -> Self { - NumberOrHex::Number(n) - } + fn from(n: u64) -> Self { + NumberOrHex::Number(n) + } } impl From for NumberOrHex { - fn from(n: U256) -> Self { - NumberOrHex::Hex(n) - } + fn from(n: U256) -> Self { + NumberOrHex::Hex(n) + } } #[cfg(test)] mod tests { - use super::*; - use crate::assert_deser; + use super::*; + use crate::assert_deser; - #[test] - fn should_serialize_and_deserialize() { - assert_deser(r#""0x1234""#, NumberOrHex::::Hex(0x1234.into())); - assert_deser(r#""0x0""#, NumberOrHex::::Hex(0.into())); - assert_deser(r#"5"#, NumberOrHex::Number(5_u64)); - assert_deser(r#"10000"#, NumberOrHex::Number(10000_u32)); - assert_deser(r#"0"#, NumberOrHex::Number(0_u16)); - } + #[test] + fn should_serialize_and_deserialize() { + assert_deser(r#""0x1234""#, NumberOrHex::::Hex(0x1234.into())); + assert_deser(r#""0x0""#, NumberOrHex::::Hex(0.into())); + assert_deser(r#"5"#, NumberOrHex::Number(5_u64)); + assert_deser(r#"10000"#, NumberOrHex::Number(10000_u32)); + assert_deser(r#"0"#, NumberOrHex::Number(0_u16)); + } } diff --git a/primitives/runtime-interface/proc-macro/src/lib.rs b/primitives/runtime-interface/proc-macro/src/lib.rs index 2ed8b1a228..fd95786e9f 100644 --- a/primitives/runtime-interface/proc-macro/src/lib.rs +++ b/primitives/runtime-interface/proc-macro/src/lib.rs @@ -25,7 +25,7 @@ //! 3. The [`PassByEnum`](derive.PassByInner.html) derive macro for implementing `PassBy` with `Enum`. //! 4. The [`PassByInner`](derive.PassByInner.html) derive macro for implementing `PassBy` with `Inner`. -use syn::{parse_macro_input, ItemTrait, DeriveInput}; +use syn::{parse_macro_input, DeriveInput, ItemTrait}; mod pass_by; mod runtime_interface; @@ -33,31 +33,37 @@ mod utils; #[proc_macro_attribute] pub fn runtime_interface( - attrs: proc_macro::TokenStream, - input: proc_macro::TokenStream, + attrs: proc_macro::TokenStream, + input: proc_macro::TokenStream, ) -> proc_macro::TokenStream { - let trait_def = parse_macro_input!(input as ItemTrait); - let wasm_only = parse_macro_input!(attrs as Option); + let trait_def = parse_macro_input!(input as ItemTrait); + let wasm_only = parse_macro_input!(attrs as Option); - runtime_interface::runtime_interface_impl(trait_def, wasm_only.is_some()) - .unwrap_or_else(|e| e.to_compile_error()) - .into() + runtime_interface::runtime_interface_impl(trait_def, wasm_only.is_some()) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } #[proc_macro_derive(PassByCodec)] pub fn pass_by_codec(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let input = parse_macro_input!(input as DeriveInput); - pass_by::codec_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() + let input = parse_macro_input!(input as DeriveInput); + pass_by::codec_derive_impl(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } #[proc_macro_derive(PassByInner)] pub fn pass_by_inner(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let input = parse_macro_input!(input as DeriveInput); - pass_by::inner_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() + let input = parse_macro_input!(input as DeriveInput); + pass_by::inner_derive_impl(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } #[proc_macro_derive(PassByEnum)] pub fn pass_by_enum(input: proc_macro::TokenStream) -> proc_macro::TokenStream { - let input = parse_macro_input!(input as DeriveInput); - pass_by::enum_derive_impl(input).unwrap_or_else(|e| e.to_compile_error()).into() + let input = parse_macro_input!(input as DeriveInput); + pass_by::enum_derive_impl(input) + .unwrap_or_else(|e| e.to_compile_error()) + .into() } diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs b/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs index 5e30870a82..1c0eb045c7 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/codec.rs @@ -21,7 +21,7 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Generics, parse_quote}; +use syn::{parse_quote, DeriveInput, Generics, Result}; use quote::quote; @@ -29,30 +29,30 @@ use proc_macro2::TokenStream; /// The derive implementation for `PassBy` with `Codec`. pub fn derive_impl(mut input: DeriveInput) -> Result { - add_trait_bounds(&mut input.generics); - let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); - let crate_include = generate_runtime_interface_include(); - let crate_ = generate_crate_access(); - let ident = input.ident; - - let res = quote! { - const _: () = { - #crate_include - - impl #impl_generics #crate_::pass_by::PassBy for #ident #ty_generics #where_clause { - type PassBy = #crate_::pass_by::Codec<#ident>; - } - }; - }; - - Ok(res) + add_trait_bounds(&mut input.generics); + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let crate_include = generate_runtime_interface_include(); + let crate_ = generate_crate_access(); + let ident = input.ident; + + let res = quote! { + const _: () = { + #crate_include + + impl #impl_generics #crate_::pass_by::PassBy for #ident #ty_generics #where_clause { + type PassBy = #crate_::pass_by::Codec<#ident>; + } + }; + }; + + Ok(res) } /// Add the `codec::Codec` trait bound to every type parameter. fn add_trait_bounds(generics: &mut Generics) { - let crate_ = generate_crate_access(); + let crate_ = generate_crate_access(); - generics.type_params_mut() - .for_each(|type_param| type_param.bounds.push(parse_quote!(#crate_::codec::Codec))); + generics + .type_params_mut() + .for_each(|type_param| type_param.bounds.push(parse_quote!(#crate_::codec::Codec))); } - diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs index 5d5b3ae43b..89c9041936 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/enum_.rs @@ -20,58 +20,58 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Data, Fields, Error, Ident}; +use syn::{Data, DeriveInput, Error, Fields, Ident, Result}; use quote::quote; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; /// The derive implementation for `PassBy` with `Enum`. pub fn derive_impl(input: DeriveInput) -> Result { - let crate_include = generate_runtime_interface_include(); - let crate_ = generate_crate_access(); - let ident = input.ident; - let enum_fields = get_enum_field_idents(&input.data)? - .enumerate() - .map(|(i, v)| { - let i = i as u8; - - v.map(|v| (quote!(#i => Ok(#ident::#v)), quote!(#ident::#v => #i))) - }) - .collect::>>()?; - let try_from_variants = enum_fields.iter().map(|i| &i.0); - let into_variants = enum_fields.iter().map(|i| &i.1); - - let res = quote! { - const _: () = { - #crate_include - - impl #crate_::pass_by::PassBy for #ident { - type PassBy = #crate_::pass_by::Enum<#ident>; - } - - impl #crate_::sp_std::convert::TryFrom for #ident { - type Error = (); - - fn try_from(inner: u8) -> #crate_::sp_std::result::Result { - match inner { - #( #try_from_variants, )* - _ => Err(()), - } - } - } - - impl From<#ident> for u8 { - fn from(var: #ident) -> u8 { - match var { - #( #into_variants ),* - } - } - } - }; - }; - - Ok(res) + let crate_include = generate_runtime_interface_include(); + let crate_ = generate_crate_access(); + let ident = input.ident; + let enum_fields = get_enum_field_idents(&input.data)? + .enumerate() + .map(|(i, v)| { + let i = i as u8; + + v.map(|v| (quote!(#i => Ok(#ident::#v)), quote!(#ident::#v => #i))) + }) + .collect::>>()?; + let try_from_variants = enum_fields.iter().map(|i| &i.0); + let into_variants = enum_fields.iter().map(|i| &i.1); + + let res = quote! { + const _: () = { + #crate_include + + impl #crate_::pass_by::PassBy for #ident { + type PassBy = #crate_::pass_by::Enum<#ident>; + } + + impl #crate_::sp_std::convert::TryFrom for #ident { + type Error = (); + + fn try_from(inner: u8) -> #crate_::sp_std::result::Result { + match inner { + #( #try_from_variants, )* + _ => Err(()), + } + } + } + + impl From<#ident> for u8 { + fn from(var: #ident) -> u8 { + match var { + #( #into_variants ),* + } + } + } + }; + }; + + Ok(res) } /// Get the enum fields idents of the given `data` object as iterator. @@ -79,23 +79,29 @@ pub fn derive_impl(input: DeriveInput) -> Result { /// Returns an error if the number of variants is greater than `256`, the given `data` is not an /// enum or a variant is not an unit. fn get_enum_field_idents<'a>(data: &'a Data) -> Result>> { - match data { - Data::Enum(d) => { - if d.variants.len() <= 256 { - Ok( - d.variants.iter().map(|v| if let Fields::Unit = v.fields { - Ok(&v.ident) - } else { - Err(Error::new( - Span::call_site(), - "`PassByEnum` only supports unit variants.", - )) - }) - ) - } else { - Err(Error::new(Span::call_site(), "`PassByEnum` only supports `256` variants.")) - } - }, - _ => Err(Error::new(Span::call_site(), "`PassByEnum` only supports enums as input type.")) - } + match data { + Data::Enum(d) => { + if d.variants.len() <= 256 { + Ok(d.variants.iter().map(|v| { + if let Fields::Unit = v.fields { + Ok(&v.ident) + } else { + Err(Error::new( + Span::call_site(), + "`PassByEnum` only supports unit variants.", + )) + } + })) + } else { + Err(Error::new( + Span::call_site(), + "`PassByEnum` only supports `256` variants.", + )) + } + } + _ => Err(Error::new( + Span::call_site(), + "`PassByEnum` only supports enums as input type.", + )), + } } diff --git a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs index 2e1caaa96c..00c4a8d0c2 100644 --- a/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs +++ b/primitives/runtime-interface/proc-macro/src/pass_by/inner.rs @@ -21,90 +21,89 @@ use crate::utils::{generate_crate_access, generate_runtime_interface_include}; -use syn::{DeriveInput, Result, Generics, parse_quote, Type, Data, Error, Fields, Ident}; +use syn::{parse_quote, Data, DeriveInput, Error, Fields, Generics, Ident, Result, Type}; use quote::quote; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; /// The derive implementation for `PassBy` with `Inner` and `PassByInner`. pub fn derive_impl(mut input: DeriveInput) -> Result { - add_trait_bounds(&mut input.generics); - let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); - let crate_include = generate_runtime_interface_include(); - let crate_ = generate_crate_access(); - let ident = input.ident; - let (inner_ty, inner_name) = extract_inner_ty_and_name(&input.data)?; - - let access_inner = match inner_name { - Some(ref name) => quote!(self.#name), - None => quote!(self.0), - }; - - let from_inner = match inner_name { - Some(name) => quote!(Self { #name: inner }), - None => quote!(Self(inner)), - }; - - let res = quote! { - const _: () = { - #crate_include - - impl #impl_generics #crate_::pass_by::PassBy for #ident #ty_generics #where_clause { - type PassBy = #crate_::pass_by::Inner<#ident, #inner_ty>; - } - - impl #impl_generics #crate_::pass_by::PassByInner for #ident #ty_generics #where_clause { - type Inner = #inner_ty; - - fn into_inner(self) -> Self::Inner { - #access_inner - } - - fn inner(&self) -> &Self::Inner { - &#access_inner - } - - fn from_inner(inner: Self::Inner) -> Self { - #from_inner - } - } - }; - }; - - Ok(res) + add_trait_bounds(&mut input.generics); + let (impl_generics, ty_generics, where_clause) = input.generics.split_for_impl(); + let crate_include = generate_runtime_interface_include(); + let crate_ = generate_crate_access(); + let ident = input.ident; + let (inner_ty, inner_name) = extract_inner_ty_and_name(&input.data)?; + + let access_inner = match inner_name { + Some(ref name) => quote!(self.#name), + None => quote!(self.0), + }; + + let from_inner = match inner_name { + Some(name) => quote!(Self { #name: inner }), + None => quote!(Self(inner)), + }; + + let res = quote! { + const _: () = { + #crate_include + + impl #impl_generics #crate_::pass_by::PassBy for #ident #ty_generics #where_clause { + type PassBy = #crate_::pass_by::Inner<#ident, #inner_ty>; + } + + impl #impl_generics #crate_::pass_by::PassByInner for #ident #ty_generics #where_clause { + type Inner = #inner_ty; + + fn into_inner(self) -> Self::Inner { + #access_inner + } + + fn inner(&self) -> &Self::Inner { + &#access_inner + } + + fn from_inner(inner: Self::Inner) -> Self { + #from_inner + } + } + }; + }; + + Ok(res) } /// Add the `RIType` trait bound to every type parameter. fn add_trait_bounds(generics: &mut Generics) { - let crate_ = generate_crate_access(); + let crate_ = generate_crate_access(); - generics.type_params_mut() - .for_each(|type_param| type_param.bounds.push(parse_quote!(#crate_::RIType))); + generics + .type_params_mut() + .for_each(|type_param| type_param.bounds.push(parse_quote!(#crate_::RIType))); } /// Extract the inner type and optional name from given input data. /// /// It also checks that the input data is a newtype struct. fn extract_inner_ty_and_name(data: &Data) -> Result<(Type, Option)> { - if let Data::Struct(ref struct_data) = data { - match struct_data.fields { - Fields::Named(ref named) if named.named.len() == 1 => { - let field = &named.named[0]; - return Ok((field.ty.clone(), field.ident.clone())) - }, - Fields::Unnamed(ref unnamed) if unnamed.unnamed.len() == 1 => { - let field = &unnamed.unnamed[0]; - return Ok((field.ty.clone(), field.ident.clone())) - } - _ => {}, - } - } - - Err( - Error::new( - Span::call_site(), - "Only newtype/one field structs are supported by `PassByInner`!", - ) - ) + if let Data::Struct(ref struct_data) = data { + match struct_data.fields { + Fields::Named(ref named) if named.named.len() == 1 => { + let field = &named.named[0]; + return Ok((field.ty.clone(), field.ident.clone())); + } + Fields::Unnamed(ref unnamed) if unnamed.unnamed.len() == 1 => { + let field = &unnamed.unnamed[0]; + return Ok((field.ty.clone(), field.ident.clone())); + } + _ => {} + } + } + + Err(Error::new( + Span::call_site(), + "Only newtype/one field structs are supported by `PassByInner`!", + )) } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs index e7c34fbf99..f0bc596573 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/bare_function_interface.rs @@ -29,15 +29,16 @@ //! are feature-gated, so that one is compiled for the native and the other for the wasm side. use crate::utils::{ - generate_crate_access, create_exchangeable_host_function_ident, get_function_arguments, - get_function_argument_names, get_runtime_interface, create_function_ident_with_version, + create_exchangeable_host_function_ident, create_function_ident_with_version, + generate_crate_access, get_function_argument_names, get_function_arguments, + get_runtime_interface, }; use syn::{ - Ident, ItemTrait, TraitItemMethod, FnArg, Signature, Result, spanned::Spanned, parse_quote, + parse_quote, spanned::Spanned, FnArg, Ident, ItemTrait, Result, Signature, TraitItemMethod, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::{quote, quote_spanned}; @@ -46,189 +47,186 @@ use std::iter; /// Generate one bare function per trait method. The name of the bare function is equal to the name /// of the trait method. pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { - let trait_name = &trait_def.ident; - let runtime_interface = get_runtime_interface(trait_def)?; - - // latest version dispatch - let token_stream: Result = runtime_interface.latest_versions() - .try_fold( - TokenStream::new(), - |mut t, (latest_version, method)| { - t.extend(function_for_method(method, latest_version, is_wasm_only)?); - Ok(t) - } - ); - - // earlier versions compatibility dispatch (only std variant) - let result: Result = runtime_interface.all_versions().try_fold(token_stream?, |mut t, (version, method)| - { - t.extend(function_std_impl(trait_name, method, version, is_wasm_only)?); - Ok(t) - }); - - result + let trait_name = &trait_def.ident; + let runtime_interface = get_runtime_interface(trait_def)?; + + // latest version dispatch + let token_stream: Result = runtime_interface.latest_versions().try_fold( + TokenStream::new(), + |mut t, (latest_version, method)| { + t.extend(function_for_method(method, latest_version, is_wasm_only)?); + Ok(t) + }, + ); + + // earlier versions compatibility dispatch (only std variant) + let result: Result = + runtime_interface + .all_versions() + .try_fold(token_stream?, |mut t, (version, method)| { + t.extend(function_std_impl( + trait_name, + method, + version, + is_wasm_only, + )?); + Ok(t) + }); + + result } /// Generates the bare function implementation for the given method for the host and wasm side. fn function_for_method( - method: &TraitItemMethod, - latest_version: u32, - is_wasm_only: bool, + method: &TraitItemMethod, + latest_version: u32, + is_wasm_only: bool, ) -> Result { - let std_impl = if !is_wasm_only { - function_std_latest_impl(method, latest_version)? - } else { - quote!() - }; + let std_impl = if !is_wasm_only { + function_std_latest_impl(method, latest_version)? + } else { + quote!() + }; - let no_std_impl = function_no_std_impl(method)?; + let no_std_impl = function_no_std_impl(method)?; - Ok( - quote! { - #std_impl + Ok(quote! { + #std_impl - #no_std_impl - } - ) + #no_std_impl + }) } /// Generates the bare function implementation for `cfg(not(feature = "std"))`. fn function_no_std_impl(method: &TraitItemMethod) -> Result { - let function_name = &method.sig.ident; - let host_function_name = create_exchangeable_host_function_ident(&method.sig.ident); - let args = get_function_arguments(&method.sig); - let arg_names = get_function_argument_names(&method.sig); - let return_value = &method.sig.output; - let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); - - Ok( - quote! { - #[cfg(not(feature = "std"))] - #( #attrs )* - pub fn #function_name( #( #args, )* ) #return_value { - // Call the host function - #host_function_name.get()( #( #arg_names, )* ) - } - } - ) + let function_name = &method.sig.ident; + let host_function_name = create_exchangeable_host_function_ident(&method.sig.ident); + let args = get_function_arguments(&method.sig); + let arg_names = get_function_argument_names(&method.sig); + let return_value = &method.sig.output; + let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); + + Ok(quote! { + #[cfg(not(feature = "std"))] + #( #attrs )* + pub fn #function_name( #( #args, )* ) #return_value { + // Call the host function + #host_function_name.get()( #( #arg_names, )* ) + } + }) } /// Generate call to latest function version for `cfg((feature = "std")` /// /// This should generate simple `fn func(..) { func_version_(..) }`. -fn function_std_latest_impl( - method: &TraitItemMethod, - latest_version: u32, -) -> Result { - let function_name = &method.sig.ident; - let args = get_function_arguments(&method.sig).map(FnArg::Typed); - let arg_names = get_function_argument_names(&method.sig).collect::>(); - let return_value = &method.sig.output; - let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); - let latest_function_name = create_function_ident_with_version(&method.sig.ident, latest_version); - - Ok(quote_spanned! { method.span() => - #[cfg(feature = "std")] - #( #attrs )* - pub fn #function_name( #( #args, )* ) #return_value { - #latest_function_name( - #( #arg_names, )* - ) - } - }) +fn function_std_latest_impl(method: &TraitItemMethod, latest_version: u32) -> Result { + let function_name = &method.sig.ident; + let args = get_function_arguments(&method.sig).map(FnArg::Typed); + let arg_names = get_function_argument_names(&method.sig).collect::>(); + let return_value = &method.sig.output; + let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); + let latest_function_name = + create_function_ident_with_version(&method.sig.ident, latest_version); + + Ok(quote_spanned! { method.span() => + #[cfg(feature = "std")] + #( #attrs )* + pub fn #function_name( #( #args, )* ) #return_value { + #latest_function_name( + #( #arg_names, )* + ) + } + }) } /// Generates the bare function implementation for `cfg(feature = "std")`. fn function_std_impl( - trait_name: &Ident, - method: &TraitItemMethod, - version: u32, - is_wasm_only: bool, + trait_name: &Ident, + method: &TraitItemMethod, + version: u32, + is_wasm_only: bool, ) -> Result { - let function_name = create_function_ident_with_version(&method.sig.ident, version); - - let crate_ = generate_crate_access(); - let args = get_function_arguments(&method.sig).map(FnArg::Typed).chain( - // Add the function context as last parameter when this is a wasm only interface. - iter::from_fn(|| - if is_wasm_only { - Some( - parse_quote!( - mut __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext - ) - ) - } else { - None - } - ).take(1), - ); - let return_value = &method.sig.output; - let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); - // Don't make the function public accessible when this is a wasm only interface. - let call_to_trait = generate_call_to_trait(trait_name, method, version, is_wasm_only); - - Ok( - quote_spanned! { method.span() => - #[cfg(feature = "std")] - #( #attrs )* - fn #function_name( #( #args, )* ) #return_value { - #call_to_trait - } - } - ) + let function_name = create_function_ident_with_version(&method.sig.ident, version); + + let crate_ = generate_crate_access(); + let args = get_function_arguments(&method.sig).map(FnArg::Typed).chain( + // Add the function context as last parameter when this is a wasm only interface. + iter::from_fn(|| { + if is_wasm_only { + Some(parse_quote!( + mut __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext + )) + } else { + None + } + }) + .take(1), + ); + let return_value = &method.sig.output; + let attrs = method.attrs.iter().filter(|a| !a.path.is_ident("version")); + // Don't make the function public accessible when this is a wasm only interface. + let call_to_trait = generate_call_to_trait(trait_name, method, version, is_wasm_only); + + Ok(quote_spanned! { method.span() => + #[cfg(feature = "std")] + #( #attrs )* + fn #function_name( #( #args, )* ) #return_value { + #call_to_trait + } + }) } /// Generate the call to the interface trait. fn generate_call_to_trait( - trait_name: &Ident, - method: &TraitItemMethod, - version: u32, - is_wasm_only: bool, + trait_name: &Ident, + method: &TraitItemMethod, + version: u32, + is_wasm_only: bool, ) -> TokenStream { - let crate_ = generate_crate_access(); - let method_name = create_function_ident_with_version(&method.sig.ident, version); - let expect_msg = format!( - "`{}` called outside of an Externalities-provided environment.", - method_name, - ); - let arg_names = get_function_argument_names(&method.sig); - - if takes_self_argument(&method.sig) { - let instance = if is_wasm_only { - Ident::new("__function_context__", Span::call_site()) - } else { - Ident::new("__externalities__", Span::call_site()) - }; - - let impl_ = quote!( #trait_name::#method_name(&mut #instance, #( #arg_names, )*) ); - - if is_wasm_only { - quote_spanned! { method.span() => #impl_ } - } else { - quote_spanned! { method.span() => - #crate_::with_externalities(|mut #instance| #impl_).expect(#expect_msg) - } - } - } else { - // The name of the trait the interface trait is implemented for - let impl_trait_name = if is_wasm_only { - quote!( #crate_::sp_wasm_interface::FunctionContext ) - } else { - quote!( #crate_::Externalities ) - }; - - quote_spanned! { method.span() => - <&mut dyn #impl_trait_name as #trait_name>::#method_name( - #( #arg_names, )* - ) - } - } + let crate_ = generate_crate_access(); + let method_name = create_function_ident_with_version(&method.sig.ident, version); + let expect_msg = format!( + "`{}` called outside of an Externalities-provided environment.", + method_name, + ); + let arg_names = get_function_argument_names(&method.sig); + + if takes_self_argument(&method.sig) { + let instance = if is_wasm_only { + Ident::new("__function_context__", Span::call_site()) + } else { + Ident::new("__externalities__", Span::call_site()) + }; + + let impl_ = quote!( #trait_name::#method_name(&mut #instance, #( #arg_names, )*) ); + + if is_wasm_only { + quote_spanned! { method.span() => #impl_ } + } else { + quote_spanned! { method.span() => + #crate_::with_externalities(|mut #instance| #impl_).expect(#expect_msg) + } + } + } else { + // The name of the trait the interface trait is implemented for + let impl_trait_name = if is_wasm_only { + quote!( #crate_::sp_wasm_interface::FunctionContext ) + } else { + quote!( #crate_::Externalities ) + }; + + quote_spanned! { method.span() => + <&mut dyn #impl_trait_name as #trait_name>::#method_name( + #( #arg_names, )* + ) + } + } } /// Returns if the given `Signature` takes a `self` argument. fn takes_self_argument(sig: &Signature) -> bool { - match sig.inputs.first() { - Some(FnArg::Receiver(_)) => true, - _ => false, - } + match sig.inputs.first() { + Some(FnArg::Receiver(_)) => true, + _ => false, + } } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs index 205ee87105..174432f26c 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/host_function_interface.rs @@ -21,166 +21,168 @@ //! executor. These implementations call the bare function interface. use crate::utils::{ - generate_crate_access, create_host_function_ident, get_function_argument_names, - get_function_argument_types_without_ref, get_function_argument_types_ref_and_mut, - get_function_argument_names_and_types_without_ref, get_function_arguments, - get_function_argument_types, create_exchangeable_host_function_ident, get_runtime_interface, - create_function_ident_with_version, + create_exchangeable_host_function_ident, create_function_ident_with_version, + create_host_function_ident, generate_crate_access, get_function_argument_names, + get_function_argument_names_and_types_without_ref, get_function_argument_types, + get_function_argument_types_ref_and_mut, get_function_argument_types_without_ref, + get_function_arguments, get_runtime_interface, }; use syn::{ - ItemTrait, TraitItemMethod, Result, ReturnType, Ident, Pat, Error, Signature, spanned::Spanned, + spanned::Spanned, Error, Ident, ItemTrait, Pat, Result, ReturnType, Signature, TraitItemMethod, }; -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use quote::{quote, ToTokens}; use inflector::Inflector; -use std::iter::{Iterator, self}; +use std::iter::{self, Iterator}; /// Generate the extern host functions for wasm and the `HostFunctions` struct that provides the /// implementations for the host functions on the host. pub fn generate(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { - let trait_name = &trait_def.ident; - let extern_host_function_impls = get_runtime_interface(trait_def)? - .latest_versions() - .try_fold(TokenStream::new(), |mut t, (version, method)| { - t.extend(generate_extern_host_function(method, version, trait_name)?); - Ok::<_, Error>(t) - })?; - let exchangeable_host_functions = get_runtime_interface(trait_def)? - .latest_versions() - .try_fold(TokenStream::new(), |mut t, (_, m)| { - t.extend(generate_exchangeable_host_function(m)?); - Ok::<_, Error>(t) - })?; - let host_functions_struct = generate_host_functions_struct(trait_def, is_wasm_only)?; - - Ok( - quote! { - /// The implementations of the extern host functions. This special implementation module - /// is required to change the extern host functions signature to - /// `unsafe fn name(args) -> ret` to make the function implementations exchangeable. - #[cfg(not(feature = "std"))] - mod extern_host_function_impls { - use super::*; - - #extern_host_function_impls - } - - #exchangeable_host_functions - - #host_functions_struct - } - ) + let trait_name = &trait_def.ident; + let extern_host_function_impls = get_runtime_interface(trait_def)? + .latest_versions() + .try_fold(TokenStream::new(), |mut t, (version, method)| { + t.extend(generate_extern_host_function(method, version, trait_name)?); + Ok::<_, Error>(t) + })?; + let exchangeable_host_functions = get_runtime_interface(trait_def)? + .latest_versions() + .try_fold(TokenStream::new(), |mut t, (_, m)| { + t.extend(generate_exchangeable_host_function(m)?); + Ok::<_, Error>(t) + })?; + let host_functions_struct = generate_host_functions_struct(trait_def, is_wasm_only)?; + + Ok(quote! { + /// The implementations of the extern host functions. This special implementation module + /// is required to change the extern host functions signature to + /// `unsafe fn name(args) -> ret` to make the function implementations exchangeable. + #[cfg(not(feature = "std"))] + mod extern_host_function_impls { + use super::*; + + #extern_host_function_impls + } + + #exchangeable_host_functions + + #host_functions_struct + }) } /// Generate the extern host function for the given method. -fn generate_extern_host_function(method: &TraitItemMethod, version: u32, trait_name: &Ident) -> Result { - let crate_ = generate_crate_access(); - let args = get_function_arguments(&method.sig); - let arg_types = get_function_argument_types_without_ref(&method.sig); - let arg_types2 = get_function_argument_types_without_ref(&method.sig); - let arg_names = get_function_argument_names(&method.sig); - let arg_names2 = get_function_argument_names(&method.sig); - let arg_names3 = get_function_argument_names(&method.sig); - let function = &method.sig.ident; - let ext_function = create_host_function_ident(&method.sig.ident, version, trait_name); - let doc_string = format!( - " Default extern host function implementation for [`super::{}`].", - method.sig.ident, - ); - let return_value = &method.sig.output; - - let ffi_return_value = match method.sig.output { - ReturnType::Default => quote!(), - ReturnType::Type(_, ref ty) => quote! { - -> <#ty as #crate_::RIType>::FFIType - }, - }; - - let convert_return_value = match return_value { - ReturnType::Default => quote!(), - ReturnType::Type(_, ref ty) => quote! { - <#ty as #crate_::wasm::FromFFIValue>::from_ffi_value(result) - } - }; - - Ok( - quote! { - #[doc = #doc_string] - pub fn #function ( #( #args ),* ) #return_value { - extern "C" { - /// The extern function. - pub fn #ext_function ( - #( #arg_names: <#arg_types as #crate_::RIType>::FFIType ),* - ) #ffi_return_value; - } - - // Generate all wrapped ffi values. - #( - let #arg_names2 = <#arg_types2 as #crate_::wasm::IntoFFIValue>::into_ffi_value( - &#arg_names2, - ); - )* - - let result = unsafe { #ext_function( #( #arg_names3.get() ),* ) }; - - #convert_return_value - } - } - ) +fn generate_extern_host_function( + method: &TraitItemMethod, + version: u32, + trait_name: &Ident, +) -> Result { + let crate_ = generate_crate_access(); + let args = get_function_arguments(&method.sig); + let arg_types = get_function_argument_types_without_ref(&method.sig); + let arg_types2 = get_function_argument_types_without_ref(&method.sig); + let arg_names = get_function_argument_names(&method.sig); + let arg_names2 = get_function_argument_names(&method.sig); + let arg_names3 = get_function_argument_names(&method.sig); + let function = &method.sig.ident; + let ext_function = create_host_function_ident(&method.sig.ident, version, trait_name); + let doc_string = format!( + " Default extern host function implementation for [`super::{}`].", + method.sig.ident, + ); + let return_value = &method.sig.output; + + let ffi_return_value = match method.sig.output { + ReturnType::Default => quote!(), + ReturnType::Type(_, ref ty) => quote! { + -> <#ty as #crate_::RIType>::FFIType + }, + }; + + let convert_return_value = match return_value { + ReturnType::Default => quote!(), + ReturnType::Type(_, ref ty) => quote! { + <#ty as #crate_::wasm::FromFFIValue>::from_ffi_value(result) + }, + }; + + Ok(quote! { + #[doc = #doc_string] + pub fn #function ( #( #args ),* ) #return_value { + extern "C" { + /// The extern function. + pub fn #ext_function ( + #( #arg_names: <#arg_types as #crate_::RIType>::FFIType ),* + ) #ffi_return_value; + } + + // Generate all wrapped ffi values. + #( + let #arg_names2 = <#arg_types2 as #crate_::wasm::IntoFFIValue>::into_ffi_value( + &#arg_names2, + ); + )* + + let result = unsafe { #ext_function( #( #arg_names3.get() ),* ) }; + + #convert_return_value + } + }) } /// Generate the host exchangeable function for the given method. fn generate_exchangeable_host_function(method: &TraitItemMethod) -> Result { - let crate_ = generate_crate_access(); - let arg_types = get_function_argument_types(&method.sig); - let function = &method.sig.ident; - let exchangeable_function = create_exchangeable_host_function_ident(&method.sig.ident); - let doc_string = format!(" Exchangeable host function used by [`{}`].", method.sig.ident); - let output = &method.sig.output; - - Ok( - quote! { - #[cfg(not(feature = "std"))] - #[allow(non_upper_case_globals)] - #[doc = #doc_string] - pub static #exchangeable_function : #crate_::wasm::ExchangeableFunction< - fn ( #( #arg_types ),* ) #output - > = #crate_::wasm::ExchangeableFunction::new(extern_host_function_impls::#function); - } - ) + let crate_ = generate_crate_access(); + let arg_types = get_function_argument_types(&method.sig); + let function = &method.sig.ident; + let exchangeable_function = create_exchangeable_host_function_ident(&method.sig.ident); + let doc_string = format!( + " Exchangeable host function used by [`{}`].", + method.sig.ident + ); + let output = &method.sig.output; + + Ok(quote! { + #[cfg(not(feature = "std"))] + #[allow(non_upper_case_globals)] + #[doc = #doc_string] + pub static #exchangeable_function : #crate_::wasm::ExchangeableFunction< + fn ( #( #arg_types ),* ) #output + > = #crate_::wasm::ExchangeableFunction::new(extern_host_function_impls::#function); + }) } /// Generate the `HostFunctions` struct that implements `wasm-interface::HostFunctions` to provide /// implementations for the extern host functions. -fn generate_host_functions_struct(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { - let crate_ = generate_crate_access(); - - let host_functions = get_runtime_interface(trait_def)? - .all_versions() - .map(|(version, method)| - generate_host_function_implementation(&trait_def.ident, method, version, is_wasm_only) - ) - .collect::>>()?; - - Ok( - quote! { - /// Provides implementations for the extern host functions. - #[cfg(feature = "std")] - pub struct HostFunctions; - - #[cfg(feature = "std")] - impl #crate_::sp_wasm_interface::HostFunctions for HostFunctions { - fn host_functions() -> Vec<&'static dyn #crate_::sp_wasm_interface::Function> { - vec![ #( #host_functions ),* ] - } - } - } - ) +fn generate_host_functions_struct( + trait_def: &ItemTrait, + is_wasm_only: bool, +) -> Result { + let crate_ = generate_crate_access(); + + let host_functions = get_runtime_interface(trait_def)? + .all_versions() + .map(|(version, method)| { + generate_host_function_implementation(&trait_def.ident, method, version, is_wasm_only) + }) + .collect::>>()?; + + Ok(quote! { + /// Provides implementations for the extern host functions. + #[cfg(feature = "std")] + pub struct HostFunctions; + + #[cfg(feature = "std")] + impl #crate_::sp_wasm_interface::HostFunctions for HostFunctions { + fn host_functions() -> Vec<&'static dyn #crate_::sp_wasm_interface::Function> { + vec![ #( #host_functions ),* ] + } + } + }) } /// Generates the host function struct that implements `wasm_interface::Function` and returns a static @@ -189,181 +191,179 @@ fn generate_host_functions_struct(trait_def: &ItemTrait, is_wasm_only: bool) -> /// When calling from wasm into the host, we will call the `execute` function that calls the native /// implementation of the function. fn generate_host_function_implementation( - trait_name: &Ident, - method: &TraitItemMethod, - version: u32, - is_wasm_only: bool, + trait_name: &Ident, + method: &TraitItemMethod, + version: u32, + is_wasm_only: bool, ) -> Result { - let name = create_host_function_ident(&method.sig.ident, version, trait_name).to_string(); - let struct_name = Ident::new(&name.to_pascal_case(), Span::call_site()); - let crate_ = generate_crate_access(); - let signature = generate_wasm_interface_signature_for_host_function(&method.sig)?; - let wasm_to_ffi_values = generate_wasm_to_ffi_values( - &method.sig, - trait_name, - ).collect::>>()?; - let ffi_to_host_values = generate_ffi_to_host_value(&method.sig).collect::>>()?; - let host_function_call = generate_host_function_call(&method.sig, version, is_wasm_only); - let into_preallocated_ffi_value = generate_into_preallocated_ffi_value(&method.sig)?; - let convert_return_value = generate_return_value_into_wasm_value(&method.sig); - - Ok( - quote! { - { - struct #struct_name; - - impl #crate_::sp_wasm_interface::Function for #struct_name { - fn name(&self) -> &str { - #name - } - - fn signature(&self) -> #crate_::sp_wasm_interface::Signature { - #signature - } - - fn execute( - &self, - __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext, - args: &mut dyn Iterator, - ) -> std::result::Result, String> { - #( #wasm_to_ffi_values )* - #( #ffi_to_host_values )* - #host_function_call - #into_preallocated_ffi_value - #convert_return_value - } - } - - &#struct_name as &dyn #crate_::sp_wasm_interface::Function - } - } - ) + let name = create_host_function_ident(&method.sig.ident, version, trait_name).to_string(); + let struct_name = Ident::new(&name.to_pascal_case(), Span::call_site()); + let crate_ = generate_crate_access(); + let signature = generate_wasm_interface_signature_for_host_function(&method.sig)?; + let wasm_to_ffi_values = + generate_wasm_to_ffi_values(&method.sig, trait_name).collect::>>()?; + let ffi_to_host_values = generate_ffi_to_host_value(&method.sig).collect::>>()?; + let host_function_call = generate_host_function_call(&method.sig, version, is_wasm_only); + let into_preallocated_ffi_value = generate_into_preallocated_ffi_value(&method.sig)?; + let convert_return_value = generate_return_value_into_wasm_value(&method.sig); + + Ok(quote! { + { + struct #struct_name; + + impl #crate_::sp_wasm_interface::Function for #struct_name { + fn name(&self) -> &str { + #name + } + + fn signature(&self) -> #crate_::sp_wasm_interface::Signature { + #signature + } + + fn execute( + &self, + __function_context__: &mut dyn #crate_::sp_wasm_interface::FunctionContext, + args: &mut dyn Iterator, + ) -> std::result::Result, String> { + #( #wasm_to_ffi_values )* + #( #ffi_to_host_values )* + #host_function_call + #into_preallocated_ffi_value + #convert_return_value + } + } + + &#struct_name as &dyn #crate_::sp_wasm_interface::Function + } + }) } /// Generate the `wasm_interface::Signature` for the given host function `sig`. fn generate_wasm_interface_signature_for_host_function(sig: &Signature) -> Result { - let crate_ = generate_crate_access(); - let return_value = match &sig.output { - ReturnType::Type(_, ty) => - quote! { - Some( <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE ) - }, - ReturnType::Default => quote!( None ), - }; - let arg_types = get_function_argument_types_without_ref(sig) - .map(|ty| quote! { - <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE - }); - - Ok( - quote! { - #crate_::sp_wasm_interface::Signature { - args: std::borrow::Cow::Borrowed(&[ #( #arg_types ),* ][..]), - return_value: #return_value, - } - } - ) + let crate_ = generate_crate_access(); + let return_value = match &sig.output { + ReturnType::Type(_, ty) => quote! { + Some( <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE ) + }, + ReturnType::Default => quote!(None), + }; + let arg_types = get_function_argument_types_without_ref(sig).map(|ty| { + quote! { + <<#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::IntoValue>::VALUE_TYPE + } + }); + + Ok(quote! { + #crate_::sp_wasm_interface::Signature { + args: std::borrow::Cow::Borrowed(&[ #( #arg_types ),* ][..]), + return_value: #return_value, + } + }) } /// Generate the code that converts the wasm values given to `HostFunctions::execute` into the FFI /// values. fn generate_wasm_to_ffi_values<'a>( - sig: &'a Signature, - trait_name: &'a Ident, + sig: &'a Signature, + trait_name: &'a Ident, ) -> impl Iterator> + 'a { - let crate_ = generate_crate_access(); - let function_name = &sig.ident; - let error_message = format!( - "Number of arguments given to `{}` does not match the expected number of arguments!", - function_name, - ); - - get_function_argument_names_and_types_without_ref(sig) - .map(move |(name, ty)| { - let try_from_error = format!( - "Could not instantiate `{}` from wasm value while executing `{}` from interface `{}`!", - name.to_token_stream(), - function_name, - trait_name, - ); - - let var_name = generate_ffi_value_var_name(&name)?; - - Ok(quote! { - let val = args.next().ok_or_else(|| #error_message)?; - let #var_name = < - <#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::TryFromValue - >::try_from_value(val).ok_or_else(|| #try_from_error)?; - }) - }) + let crate_ = generate_crate_access(); + let function_name = &sig.ident; + let error_message = format!( + "Number of arguments given to `{}` does not match the expected number of arguments!", + function_name, + ); + + get_function_argument_names_and_types_without_ref(sig).map(move |(name, ty)| { + let try_from_error = format!( + "Could not instantiate `{}` from wasm value while executing `{}` from interface `{}`!", + name.to_token_stream(), + function_name, + trait_name, + ); + + let var_name = generate_ffi_value_var_name(&name)?; + + Ok(quote! { + let val = args.next().ok_or_else(|| #error_message)?; + let #var_name = < + <#ty as #crate_::RIType>::FFIType as #crate_::sp_wasm_interface::TryFromValue + >::try_from_value(val).ok_or_else(|| #try_from_error)?; + }) + }) } /// Generate the code to convert the ffi values on the host to the host values using `FromFFIValue`. fn generate_ffi_to_host_value<'a>( - sig: &'a Signature, + sig: &'a Signature, ) -> impl Iterator> + 'a { - let mut_access = get_function_argument_types_ref_and_mut(sig); - let crate_ = generate_crate_access(); - - get_function_argument_names_and_types_without_ref(sig) - .zip(mut_access.map(|v| v.and_then(|m| m.1))) - .map(move |((name, ty), mut_access)| { - let ffi_value_var_name = generate_ffi_value_var_name(&name)?; - - Ok( - quote! { - let #mut_access #name = <#ty as #crate_::host::FromFFIValue>::from_ffi_value( - __function_context__, - #ffi_value_var_name, - )?; - } - ) - }) + let mut_access = get_function_argument_types_ref_and_mut(sig); + let crate_ = generate_crate_access(); + + get_function_argument_names_and_types_without_ref(sig) + .zip(mut_access.map(|v| v.and_then(|m| m.1))) + .map(move |((name, ty), mut_access)| { + let ffi_value_var_name = generate_ffi_value_var_name(&name)?; + + Ok(quote! { + let #mut_access #name = <#ty as #crate_::host::FromFFIValue>::from_ffi_value( + __function_context__, + #ffi_value_var_name, + )?; + }) + }) } /// Generate the code to call the host function and the ident that stores the result. fn generate_host_function_call(sig: &Signature, version: u32, is_wasm_only: bool) -> TokenStream { - let host_function_name = create_function_ident_with_version(&sig.ident, version); - let result_var_name = generate_host_function_result_var_name(&sig.ident); - let ref_and_mut = get_function_argument_types_ref_and_mut(sig).map(|ram| - ram.map(|(vr, vm)| quote!(#vr #vm)) - ); - let names = get_function_argument_names(sig); - - let var_access = names.zip(ref_and_mut) - .map(|(n, ref_and_mut)| { - quote!( #ref_and_mut #n ) - }) - // If this is a wasm only interface, we add the function context as last parameter. - .chain( - iter::from_fn(|| if is_wasm_only { Some(quote!(__function_context__)) } else { None }) - .take(1) - ); - - quote! { - let #result_var_name = #host_function_name ( #( #var_access ),* ); - } + let host_function_name = create_function_ident_with_version(&sig.ident, version); + let result_var_name = generate_host_function_result_var_name(&sig.ident); + let ref_and_mut = + get_function_argument_types_ref_and_mut(sig).map(|ram| ram.map(|(vr, vm)| quote!(#vr #vm))); + let names = get_function_argument_names(sig); + + let var_access = names + .zip(ref_and_mut) + .map(|(n, ref_and_mut)| quote!( #ref_and_mut #n )) + // If this is a wasm only interface, we add the function context as last parameter. + .chain( + iter::from_fn(|| { + if is_wasm_only { + Some(quote!(__function_context__)) + } else { + None + } + }) + .take(1), + ); + + quote! { + let #result_var_name = #host_function_name ( #( #var_access ),* ); + } } /// Generate the variable name that stores the result of the host function. fn generate_host_function_result_var_name(name: &Ident) -> Ident { - Ident::new(&format!("{}_result", name), Span::call_site()) + Ident::new(&format!("{}_result", name), Span::call_site()) } /// Generate the variable name that stores the FFI value. fn generate_ffi_value_var_name(pat: &Pat) -> Result { - match pat { - Pat::Ident(pat_ident) => { - if let Some(by_ref) = pat_ident.by_ref { - Err(Error::new(by_ref.span(), "`ref` not supported!")) - } else if let Some(sub_pattern) = &pat_ident.subpat { - Err(Error::new(sub_pattern.0.span(), "Not supported!")) - } else { - Ok(Ident::new(&format!("{}_ffi_value", pat_ident.ident), Span::call_site())) - } - } - _ => Err(Error::new(pat.span(), "Not supported as variable name!")) - } + match pat { + Pat::Ident(pat_ident) => { + if let Some(by_ref) = pat_ident.by_ref { + Err(Error::new(by_ref.span(), "`ref` not supported!")) + } else if let Some(sub_pattern) = &pat_ident.subpat { + Err(Error::new(sub_pattern.0.span(), "Not supported!")) + } else { + Ok(Ident::new( + &format!("{}_ffi_value", pat_ident.ident), + Span::call_site(), + )) + } + } + _ => Err(Error::new(pat.span(), "Not supported as variable name!")), + } } /// Generate code that copies data from the host back to preallocated wasm memory. @@ -371,45 +371,43 @@ fn generate_ffi_value_var_name(pat: &Pat) -> Result { /// Any argument that is given as `&mut` is interpreted as preallocated memory and it is expected /// that the type implements `IntoPreAllocatedFFIValue`. fn generate_into_preallocated_ffi_value(sig: &Signature) -> Result { - let crate_ = generate_crate_access(); - let ref_and_mut = get_function_argument_types_ref_and_mut(sig).map(|ram| - ram.and_then(|(vr, vm)| vm.map(|v| (vr, v))) - ); - let names_and_types = get_function_argument_names_and_types_without_ref(sig); - - ref_and_mut.zip(names_and_types) - .filter_map(|(ram, (name, ty))| ram.map(|_| (name, ty))) - .map(|(name, ty)| { - let ffi_var_name = generate_ffi_value_var_name(&name)?; - - Ok( - quote! { - <#ty as #crate_::host::IntoPreallocatedFFIValue>::into_preallocated_ffi_value( - #name, - __function_context__, - #ffi_var_name, - )?; - } - ) - }) - .collect() + let crate_ = generate_crate_access(); + let ref_and_mut = get_function_argument_types_ref_and_mut(sig) + .map(|ram| ram.and_then(|(vr, vm)| vm.map(|v| (vr, v)))); + let names_and_types = get_function_argument_names_and_types_without_ref(sig); + + ref_and_mut + .zip(names_and_types) + .filter_map(|(ram, (name, ty))| ram.map(|_| (name, ty))) + .map(|(name, ty)| { + let ffi_var_name = generate_ffi_value_var_name(&name)?; + + Ok(quote! { + <#ty as #crate_::host::IntoPreallocatedFFIValue>::into_preallocated_ffi_value( + #name, + __function_context__, + #ffi_var_name, + )?; + }) + }) + .collect() } /// Generate the code that converts the return value into the appropriate wasm value. fn generate_return_value_into_wasm_value(sig: &Signature) -> TokenStream { - let crate_ = generate_crate_access(); - - match &sig.output { - ReturnType::Default => quote!( Ok(None) ), - ReturnType::Type(_, ty) => { - let result_var_name = generate_host_function_result_var_name(&sig.ident); - - quote! { - <#ty as #crate_::host::IntoFFIValue>::into_ffi_value( - #result_var_name, - __function_context__, - ).map(#crate_::sp_wasm_interface::IntoValue::into_value).map(Some) - } - } - } + let crate_ = generate_crate_access(); + + match &sig.output { + ReturnType::Default => quote!(Ok(None)), + ReturnType::Type(_, ty) => { + let result_var_name = generate_host_function_result_var_name(&sig.ident); + + quote! { + <#ty as #crate_::host::IntoFFIValue>::into_ffi_value( + #result_var_name, + __function_context__, + ).map(#crate_::sp_wasm_interface::IntoValue::into_value).map(Some) + } + } + } } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs index 1c88198d6e..4c2a53b016 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/mod.rs @@ -30,8 +30,8 @@ mod trait_decl_impl; /// Custom keywords supported by the `runtime_interface` attribute. pub mod keywords { - // Custom keyword `wasm_only` that can be given as attribute to [`runtime_interface`]. - syn::custom_keyword!(wasm_only); + // Custom keyword `wasm_only` that can be given as attribute to [`runtime_interface`]. + syn::custom_keyword!(wasm_only); } /// Implementation of the `runtime_interface` attribute. @@ -39,27 +39,30 @@ pub mod keywords { /// It expects the trait definition the attribute was put above and if this should be an wasm only /// interface. pub fn runtime_interface_impl(trait_def: ItemTrait, is_wasm_only: bool) -> Result { - let bare_functions = bare_function_interface::generate(&trait_def, is_wasm_only)?; - let crate_include = generate_runtime_interface_include(); - let mod_name = Ident::new(&trait_def.ident.to_string().to_snake_case(), Span::call_site()); - let trait_decl_impl = trait_decl_impl::process(&trait_def, is_wasm_only)?; - let host_functions = host_function_interface::generate(&trait_def, is_wasm_only)?; - let vis = trait_def.vis; - let attrs = &trait_def.attrs; - - let res = quote! { - #( #attrs )* - #vis mod #mod_name { - use super::*; - #crate_include - - #bare_functions - - #trait_decl_impl - - #host_functions - } - }; - - Ok(res) + let bare_functions = bare_function_interface::generate(&trait_def, is_wasm_only)?; + let crate_include = generate_runtime_interface_include(); + let mod_name = Ident::new( + &trait_def.ident.to_string().to_snake_case(), + Span::call_site(), + ); + let trait_decl_impl = trait_decl_impl::process(&trait_def, is_wasm_only)?; + let host_functions = host_function_interface::generate(&trait_def, is_wasm_only)?; + let vis = trait_def.vis; + let attrs = &trait_def.attrs; + + let res = quote! { + #( #attrs )* + #vis mod #mod_name { + use super::*; + #crate_include + + #bare_functions + + #trait_decl_impl + + #host_functions + } + }; + + Ok(res) } diff --git a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs index 542c4ca4b8..92cc6a2dc7 100644 --- a/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs +++ b/primitives/runtime-interface/proc-macro/src/runtime_interface/trait_decl_impl.rs @@ -18,15 +18,14 @@ //! default implementations and implements the trait for `&mut dyn Externalities`. use crate::utils::{ - generate_crate_access, - get_function_argument_types_without_ref, - get_runtime_interface, - create_function_ident_with_version, + create_function_ident_with_version, generate_crate_access, + get_function_argument_types_without_ref, get_runtime_interface, }; use syn::{ - ItemTrait, TraitItemMethod, Result, Error, fold::{self, Fold}, spanned::Spanned, - Visibility, Receiver, Type, Generics, + fold::{self, Fold}, + spanned::Spanned, + Error, Generics, ItemTrait, Receiver, Result, TraitItemMethod, Type, Visibility, }; use proc_macro2::TokenStream; @@ -36,147 +35,145 @@ use quote::quote; /// Process the given trait definition, by checking that the definition is valid, fold it to the /// essential definition and implement this essential definition for `dyn Externalities`. pub fn process(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { - let impl_trait = impl_trait_for_externalities(trait_def, is_wasm_only)?; - let essential_trait_def = declare_essential_trait(trait_def)?; + let impl_trait = impl_trait_for_externalities(trait_def, is_wasm_only)?; + let essential_trait_def = declare_essential_trait(trait_def)?; - Ok( - quote! { - #impl_trait + Ok(quote! { + #impl_trait - #essential_trait_def - } - ) + #essential_trait_def + }) } /// Converts the given trait definition into the essential trait definition without method /// default implementations and visibility set to inherited. struct ToEssentialTraitDef { - /// All errors found while doing the conversion. - errors: Vec, - methods: Vec, + /// All errors found while doing the conversion. + errors: Vec, + methods: Vec, } impl ToEssentialTraitDef { - fn new() -> Self { - ToEssentialTraitDef { errors: vec![], methods: vec![] } - } - - fn into_methods(self) -> Result> { - let mut errors = self.errors; - let methods = self.methods; - if let Some(first_error) = errors.pop() { - Err( - errors.into_iter().fold(first_error, |mut o, n| { - o.combine(n); - o - }) - ) - } else { - Ok(methods) - } - } - - fn process(&mut self, method: &TraitItemMethod, version: u32) { - let mut folded = self.fold_trait_item_method(method.clone()); - folded.sig.ident = create_function_ident_with_version(&folded.sig.ident, version); - self.methods.push(folded); - } - - fn push_error(&mut self, span: &S, msg: &str) { - self.errors.push(Error::new(span.span(), msg)); - } - - fn error_on_generic_parameters(&mut self, generics: &Generics) { - if let Some(param) = generics.params.first() { - self.push_error(param, "Generic parameters not supported."); - } - } + fn new() -> Self { + ToEssentialTraitDef { + errors: vec![], + methods: vec![], + } + } + + fn into_methods(self) -> Result> { + let mut errors = self.errors; + let methods = self.methods; + if let Some(first_error) = errors.pop() { + Err(errors.into_iter().fold(first_error, |mut o, n| { + o.combine(n); + o + })) + } else { + Ok(methods) + } + } + + fn process(&mut self, method: &TraitItemMethod, version: u32) { + let mut folded = self.fold_trait_item_method(method.clone()); + folded.sig.ident = create_function_ident_with_version(&folded.sig.ident, version); + self.methods.push(folded); + } + + fn push_error(&mut self, span: &S, msg: &str) { + self.errors.push(Error::new(span.span(), msg)); + } + + fn error_on_generic_parameters(&mut self, generics: &Generics) { + if let Some(param) = generics.params.first() { + self.push_error(param, "Generic parameters not supported."); + } + } } impl Fold for ToEssentialTraitDef { - fn fold_trait_item_method(&mut self, mut method: TraitItemMethod) -> TraitItemMethod { - if method.default.take().is_none() { - self.push_error(&method, "Methods need to have an implementation."); - } + fn fold_trait_item_method(&mut self, mut method: TraitItemMethod) -> TraitItemMethod { + if method.default.take().is_none() { + self.push_error(&method, "Methods need to have an implementation."); + } - let arg_types = get_function_argument_types_without_ref(&method.sig); - arg_types.filter_map(|ty| - match *ty { - Type::ImplTrait(impl_trait) => Some(impl_trait), - _ => None - } - ).for_each(|invalid| self.push_error(&invalid, "`impl Trait` syntax not supported.")); + let arg_types = get_function_argument_types_without_ref(&method.sig); + arg_types + .filter_map(|ty| match *ty { + Type::ImplTrait(impl_trait) => Some(impl_trait), + _ => None, + }) + .for_each(|invalid| self.push_error(&invalid, "`impl Trait` syntax not supported.")); - self.error_on_generic_parameters(&method.sig.generics); + self.error_on_generic_parameters(&method.sig.generics); - method.attrs.retain(|a| !a.path.is_ident("version")); + method.attrs.retain(|a| !a.path.is_ident("version")); - fold::fold_trait_item_method(self, method) - } + fold::fold_trait_item_method(self, method) + } - fn fold_item_trait(&mut self, mut trait_def: ItemTrait) -> ItemTrait { - self.error_on_generic_parameters(&trait_def.generics); + fn fold_item_trait(&mut self, mut trait_def: ItemTrait) -> ItemTrait { + self.error_on_generic_parameters(&trait_def.generics); - trait_def.vis = Visibility::Inherited; - fold::fold_item_trait(self, trait_def) - } + trait_def.vis = Visibility::Inherited; + fold::fold_item_trait(self, trait_def) + } - fn fold_receiver(&mut self, receiver: Receiver) -> Receiver { - if receiver.reference.is_none() { - self.push_error(&receiver, "Taking `Self` by value is not allowed."); - } + fn fold_receiver(&mut self, receiver: Receiver) -> Receiver { + if receiver.reference.is_none() { + self.push_error(&receiver, "Taking `Self` by value is not allowed."); + } - fold::fold_receiver(self, receiver) - } + fold::fold_receiver(self, receiver) + } } fn declare_essential_trait(trait_def: &ItemTrait) -> Result { - let trait_ = &trait_def.ident; - - if let Some(param) = trait_def.generics.params.first() { - return Err(Error::new(param.span(), "Generic parameters not supported.")) - } - - let interface = get_runtime_interface(trait_def)?; - let mut folder = ToEssentialTraitDef::new(); - for (version, interface_method) in interface.all_versions() { - folder.process(interface_method, version); - } - let methods = folder.into_methods()?; - - Ok( - quote! { - trait #trait_ { - #( #methods )* - } - } - ) + let trait_ = &trait_def.ident; + + if let Some(param) = trait_def.generics.params.first() { + return Err(Error::new( + param.span(), + "Generic parameters not supported.", + )); + } + + let interface = get_runtime_interface(trait_def)?; + let mut folder = ToEssentialTraitDef::new(); + for (version, interface_method) in interface.all_versions() { + folder.process(interface_method, version); + } + let methods = folder.into_methods()?; + + Ok(quote! { + trait #trait_ { + #( #methods )* + } + }) } /// Implements the given trait definition for `dyn Externalities`. fn impl_trait_for_externalities(trait_def: &ItemTrait, is_wasm_only: bool) -> Result { - let trait_ = &trait_def.ident; - let crate_ = generate_crate_access(); - let interface = get_runtime_interface(trait_def)?; - let methods = interface.all_versions().map(|(version, method)| { - let mut cloned = method.clone(); - cloned.attrs.retain(|a| !a.path.is_ident("version")); - cloned.sig.ident = create_function_ident_with_version(&cloned.sig.ident, version); - cloned - }); - - let impl_type = if is_wasm_only { - quote!( &mut dyn #crate_::sp_wasm_interface::FunctionContext ) - } else { - quote!( &mut dyn #crate_::Externalities ) - }; - - Ok( - quote! { - #[cfg(feature = "std")] - impl #trait_ for #impl_type { - #( #methods )* - } - } - ) + let trait_ = &trait_def.ident; + let crate_ = generate_crate_access(); + let interface = get_runtime_interface(trait_def)?; + let methods = interface.all_versions().map(|(version, method)| { + let mut cloned = method.clone(); + cloned.attrs.retain(|a| !a.path.is_ident("version")); + cloned.sig.ident = create_function_ident_with_version(&cloned.sig.ident, version); + cloned + }); + + let impl_type = if is_wasm_only { + quote!( &mut dyn #crate_::sp_wasm_interface::FunctionContext ) + } else { + quote!( &mut dyn #crate_::Externalities ) + }; + + Ok(quote! { + #[cfg(feature = "std")] + impl #trait_ for #impl_type { + #( #methods )* + } + }) } diff --git a/primitives/runtime-interface/proc-macro/src/utils.rs b/primitives/runtime-interface/proc-macro/src/utils.rs index 45f66e3bf6..31ee08e291 100644 --- a/primitives/runtime-interface/proc-macro/src/utils.rs +++ b/primitives/runtime-interface/proc-macro/src/utils.rs @@ -16,17 +16,17 @@ //! Util function used by this crate. -use proc_macro2::{TokenStream, Span}; +use proc_macro2::{Span, TokenStream}; use syn::{ - Ident, Error, Signature, Pat, PatType, FnArg, Type, token, TraitItemMethod, ItemTrait, - TraitItem, parse_quote, spanned::Spanned, Result, Meta, NestedMeta, Lit, Attribute, + parse_quote, spanned::Spanned, token, Attribute, Error, FnArg, Ident, ItemTrait, Lit, Meta, + NestedMeta, Pat, PatType, Result, Signature, TraitItem, TraitItemMethod, Type, }; use proc_macro_crate::crate_name; +use std::collections::{btree_map::Entry, BTreeMap}; use std::env; -use std::collections::{BTreeMap, btree_map::Entry}; use quote::quote; @@ -34,267 +34,263 @@ use inflector::Inflector; /// Runtime interface function with all associated versions of this function. pub struct RuntimeInterfaceFunction<'a> { - latest_version: u32, - versions: BTreeMap, + latest_version: u32, + versions: BTreeMap, } impl<'a> RuntimeInterfaceFunction<'a> { - fn new(version: u32, trait_item: &'a TraitItemMethod) -> Self { - Self { - latest_version: version, - versions: { - let mut res = BTreeMap::new(); - res.insert(version, trait_item); - res - }, - } - } - - pub fn latest_version(&self) -> (u32, &TraitItemMethod) { - ( - self.latest_version, - self.versions.get(&self.latest_version) - .expect("If latest_version has a value, the key with this value is in the versions; qed") - ) - } + fn new(version: u32, trait_item: &'a TraitItemMethod) -> Self { + Self { + latest_version: version, + versions: { + let mut res = BTreeMap::new(); + res.insert(version, trait_item); + res + }, + } + } + + pub fn latest_version(&self) -> (u32, &TraitItemMethod) { + ( + self.latest_version, + self.versions.get(&self.latest_version).expect( + "If latest_version has a value, the key with this value is in the versions; qed", + ), + ) + } } /// All functions of a runtime interface grouped by the function names. pub struct RuntimeInterface<'a> { - items: BTreeMap>, + items: BTreeMap>, } impl<'a> RuntimeInterface<'a> { - pub fn latest_versions(&self) -> impl Iterator { - self.items.iter().map(|(_, item)| item.latest_version()) - } - - pub fn all_versions(&self) -> impl Iterator { - self.items.iter().flat_map(|(_, item)| item.versions.iter()).map(|(v, i)| (*v, *i)) - } - } + pub fn latest_versions(&self) -> impl Iterator { + self.items.iter().map(|(_, item)| item.latest_version()) + } + + pub fn all_versions(&self) -> impl Iterator { + self.items + .iter() + .flat_map(|(_, item)| item.versions.iter()) + .map(|(v, i)| (*v, *i)) + } +} /// Generates the include for the runtime-interface crate. pub fn generate_runtime_interface_include() -> TokenStream { - if env::var("CARGO_PKG_NAME").unwrap() == "sp-runtime-interface" { - TokenStream::new() - } else { - match crate_name("sp-runtime-interface") { - Ok(crate_name) => { - let crate_name = Ident::new(&crate_name, Span::call_site()); - quote!( - #[doc(hidden)] - extern crate #crate_name as proc_macro_runtime_interface; - ) - }, - Err(e) => { - let err = Error::new(Span::call_site(), &e).to_compile_error(); - quote!( #err ) - } - } - } + if env::var("CARGO_PKG_NAME").unwrap() == "sp-runtime-interface" { + TokenStream::new() + } else { + match crate_name("sp-runtime-interface") { + Ok(crate_name) => { + let crate_name = Ident::new(&crate_name, Span::call_site()); + quote!( + #[doc(hidden)] + extern crate #crate_name as proc_macro_runtime_interface; + ) + } + Err(e) => { + let err = Error::new(Span::call_site(), &e).to_compile_error(); + quote!( #err ) + } + } + } } /// Generates the access to the `sp-runtime-interface` crate. pub fn generate_crate_access() -> TokenStream { - if env::var("CARGO_PKG_NAME").unwrap() == "sp-runtime-interface" { - quote!( sp_runtime_interface ) - } else { - quote!( proc_macro_runtime_interface ) - } + if env::var("CARGO_PKG_NAME").unwrap() == "sp-runtime-interface" { + quote!(sp_runtime_interface) + } else { + quote!(proc_macro_runtime_interface) + } } /// Create the exchangeable host function identifier for the given function name. pub fn create_exchangeable_host_function_ident(name: &Ident) -> Ident { - Ident::new(&format!("host_{}", name), Span::call_site()) + Ident::new(&format!("host_{}", name), Span::call_site()) } /// Create the host function identifier for the given function name. pub fn create_host_function_ident(name: &Ident, version: u32, trait_name: &Ident) -> Ident { - Ident::new( - &format!( - "ext_{}_{}_version_{}", - trait_name.to_string().to_snake_case(), - name, - version, - ), - Span::call_site(), - ) + Ident::new( + &format!( + "ext_{}_{}_version_{}", + trait_name.to_string().to_snake_case(), + name, + version, + ), + Span::call_site(), + ) } /// Create the host function identifier for the given function name. pub fn create_function_ident_with_version(name: &Ident, version: u32) -> Ident { - Ident::new( - &format!( - "{}_version_{}", - name, - version, - ), - Span::call_site(), - ) + Ident::new(&format!("{}_version_{}", name, version,), Span::call_site()) } /// Returns the function arguments of the given `Signature`, minus any `self` arguments. pub fn get_function_arguments<'a>(sig: &'a Signature) -> impl Iterator + 'a { - sig.inputs - .iter() - .filter_map(|a| match a { - FnArg::Receiver(_) => None, - FnArg::Typed(pat_type) => Some(pat_type), - }) - .enumerate() - .map(|(i, arg)| { - let mut res = arg.clone(); - if let Pat::Wild(wild) = &*arg.pat { - let ident = Ident::new( - &format!("__runtime_interface_generated_{}_", i), - wild.span(), - ); - - res.pat = Box::new(parse_quote!( #ident )) - } - - res - }) + sig.inputs + .iter() + .filter_map(|a| match a { + FnArg::Receiver(_) => None, + FnArg::Typed(pat_type) => Some(pat_type), + }) + .enumerate() + .map(|(i, arg)| { + let mut res = arg.clone(); + if let Pat::Wild(wild) = &*arg.pat { + let ident = Ident::new( + &format!("__runtime_interface_generated_{}_", i), + wild.span(), + ); + + res.pat = Box::new(parse_quote!( #ident )) + } + + res + }) } /// Returns the function argument names of the given `Signature`, minus any `self`. pub fn get_function_argument_names<'a>(sig: &'a Signature) -> impl Iterator> + 'a { - get_function_arguments(sig).map(|pt| pt.pat) + get_function_arguments(sig).map(|pt| pt.pat) } /// Returns the function argument types of the given `Signature`, minus any `Self` type. pub fn get_function_argument_types<'a>(sig: &'a Signature) -> impl Iterator> + 'a { - get_function_arguments(sig).map(|pt| pt.ty) + get_function_arguments(sig).map(|pt| pt.ty) } /// Returns the function argument types, minus any `Self` type. If any of the arguments /// is a reference, the underlying type without the ref is returned. pub fn get_function_argument_types_without_ref<'a>( - sig: &'a Signature, + sig: &'a Signature, ) -> impl Iterator> + 'a { - get_function_arguments(sig) - .map(|pt| pt.ty) - .map(|ty| match *ty { - Type::Reference(type_ref) => type_ref.elem, - _ => ty, - }) + get_function_arguments(sig) + .map(|pt| pt.ty) + .map(|ty| match *ty { + Type::Reference(type_ref) => type_ref.elem, + _ => ty, + }) } /// Returns the function argument names and types, minus any `self`. If any of the arguments /// is a reference, the underlying type without the ref is returned. pub fn get_function_argument_names_and_types_without_ref<'a>( - sig: &'a Signature, + sig: &'a Signature, ) -> impl Iterator, Box)> + 'a { - get_function_arguments(sig) - .map(|pt| match *pt.ty { - Type::Reference(type_ref) => (pt.pat, type_ref.elem), - _ => (pt.pat, pt.ty), - }) + get_function_arguments(sig).map(|pt| match *pt.ty { + Type::Reference(type_ref) => (pt.pat, type_ref.elem), + _ => (pt.pat, pt.ty), + }) } /// Returns the `&`/`&mut` for all function argument types, minus the `self` arg. If a function /// argument is not a reference, `None` is returned. pub fn get_function_argument_types_ref_and_mut<'a>( - sig: &'a Signature, + sig: &'a Signature, ) -> impl Iterator)>> + 'a { - get_function_arguments(sig) - .map(|pt| pt.ty) - .map(|ty| match *ty { - Type::Reference(type_ref) => Some((type_ref.and_token, type_ref.mutability)), - _ => None, - }) + get_function_arguments(sig) + .map(|pt| pt.ty) + .map(|ty| match *ty { + Type::Reference(type_ref) => Some((type_ref.and_token, type_ref.mutability)), + _ => None, + }) } /// Returns an iterator over all trait methods for the given trait definition. fn get_trait_methods<'a>(trait_def: &'a ItemTrait) -> impl Iterator { - trait_def - .items - .iter() - .filter_map(|i| match i { - TraitItem::Method(ref method) => Some(method), - _ => None, - }) + trait_def.items.iter().filter_map(|i| match i { + TraitItem::Method(ref method) => Some(method), + _ => None, + }) } /// Parse version attribute. /// /// Returns error if it is in incorrent format. Correct format is only `#[version(X)]`. fn parse_version_attribute(version: &Attribute) -> Result { - let meta = version.parse_meta()?; - - let err = Err(Error::new( - meta.span(), - "Unexpected `version` attribute. The supported format is `#[version(1)]`", - ) - ); - - match meta { - Meta::List(list) => { - if list.nested.len() != 1 { - err - } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { - i.base10_parse() - } else { - err - } - }, - _ => err, - } + let meta = version.parse_meta()?; + + let err = Err(Error::new( + meta.span(), + "Unexpected `version` attribute. The supported format is `#[version(1)]`", + )); + + match meta { + Meta::List(list) => { + if list.nested.len() != 1 { + err + } else if let Some(NestedMeta::Lit(Lit::Int(i))) = list.nested.first() { + i.base10_parse() + } else { + err + } + } + _ => err, + } } /// Return item version (`#[version(X)]`) attribute, if present. fn get_item_version(item: &TraitItemMethod) -> Result> { - item.attrs.iter().find(|attr| attr.path.is_ident("version")) - .map(|attr| parse_version_attribute(attr)) - .transpose() + item.attrs + .iter() + .find(|attr| attr.path.is_ident("version")) + .map(|attr| parse_version_attribute(attr)) + .transpose() } /// Returns all runtime interface members, with versions. -pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) - -> Result> -{ - let mut functions: BTreeMap> = BTreeMap::new(); - - for item in get_trait_methods(trait_def) { - let name = item.sig.ident.clone(); - let version = get_item_version(item)?.unwrap_or(1); - - match functions.entry(name.clone()) { - Entry::Vacant(entry) => { entry.insert(RuntimeInterfaceFunction::new(version, item)); }, - Entry::Occupied(mut entry) => { - if let Some(existing_item) = entry.get().versions.get(&version) { - let mut err = Error::new( - item.span(), - "Duplicated version attribute", - ); - err.combine(Error::new( - existing_item.span(), - "Previous version with the same number defined here", - )); - - return Err(err); - } - - let interface_item = entry.get_mut(); - if interface_item.latest_version < version { interface_item.latest_version = version; } - interface_item.versions.insert(version, item); - } - } - } - - for function in functions.values() { - let mut next_expected = 1; - for (version, item) in function.versions.iter() { - if next_expected != *version { - return Err(Error::new( - item.span(), - format!("Unexpected version attribute: missing version '{}' for this function", next_expected), - )); - } - next_expected += 1; - } - } - - Ok(RuntimeInterface { items: functions }) -} \ No newline at end of file +pub fn get_runtime_interface<'a>(trait_def: &'a ItemTrait) -> Result> { + let mut functions: BTreeMap> = BTreeMap::new(); + + for item in get_trait_methods(trait_def) { + let name = item.sig.ident.clone(); + let version = get_item_version(item)?.unwrap_or(1); + + match functions.entry(name.clone()) { + Entry::Vacant(entry) => { + entry.insert(RuntimeInterfaceFunction::new(version, item)); + } + Entry::Occupied(mut entry) => { + if let Some(existing_item) = entry.get().versions.get(&version) { + let mut err = Error::new(item.span(), "Duplicated version attribute"); + err.combine(Error::new( + existing_item.span(), + "Previous version with the same number defined here", + )); + + return Err(err); + } + + let interface_item = entry.get_mut(); + if interface_item.latest_version < version { + interface_item.latest_version = version; + } + interface_item.versions.insert(version, item); + } + } + } + + for function in functions.values() { + let mut next_expected = 1; + for (version, item) in function.versions.iter() { + if next_expected != *version { + return Err(Error::new( + item.span(), + format!( + "Unexpected version attribute: missing version '{}' for this function", + next_expected + ), + )); + } + next_expected += 1; + } + } + + Ok(RuntimeInterface { items: functions }) +} diff --git a/primitives/runtime-interface/src/host.rs b/primitives/runtime-interface/src/host.rs index cf03e6623a..8c462c4992 100644 --- a/primitives/runtime-interface/src/host.rs +++ b/primitives/runtime-interface/src/host.rs @@ -22,8 +22,8 @@ use sp_wasm_interface::{FunctionContext, Result}; /// Something that can be converted into a ffi value. pub trait IntoFFIValue: RIType { - /// Convert `self` into a ffi value. - fn into_ffi_value(self, context: &mut dyn FunctionContext) -> Result; + /// Convert `self` into a ffi value. + fn into_ffi_value(self, context: &mut dyn FunctionContext) -> Result; } /// Something that can be converted into a preallocated ffi value. @@ -36,16 +36,16 @@ pub trait IntoFFIValue: RIType { /// do not work with this interface, as we can not call into wasm to reallocate memory. So, this /// trait should be implemented carefully. pub trait IntoPreallocatedFFIValue: RIType { - /// As `Self` can be an unsized type, it needs to be represented by a sized type at the host. - /// This `SelfInstance` is the sized type. - type SelfInstance; + /// As `Self` can be an unsized type, it needs to be represented by a sized type at the host. + /// This `SelfInstance` is the sized type. + type SelfInstance; - /// Convert `self_instance` into the given preallocated ffi value. - fn into_preallocated_ffi_value( - self_instance: Self::SelfInstance, - context: &mut dyn FunctionContext, - allocated: Self::FFIType, - ) -> Result<()>; + /// Convert `self_instance` into the given preallocated ffi value. + fn into_preallocated_ffi_value( + self_instance: Self::SelfInstance, + context: &mut dyn FunctionContext, + allocated: Self::FFIType, + ) -> Result<()>; } /// Something that can be created from a ffi value. @@ -53,13 +53,13 @@ pub trait IntoPreallocatedFFIValue: RIType { /// is only generated by the corresponding [`wasm::IntoFFIValue`](crate::wasm::IntoFFIValue) /// implementation. pub trait FromFFIValue: RIType { - /// As `Self` can be an unsized type, it needs to be represented by a sized type at the host. - /// This `SelfInstance` is the sized type. - type SelfInstance; + /// As `Self` can be an unsized type, it needs to be represented by a sized type at the host. + /// This `SelfInstance` is the sized type. + type SelfInstance; - /// Create `SelfInstance` from the given - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result; + /// Create `SelfInstance` from the given + fn from_ffi_value( + context: &mut dyn FunctionContext, + arg: Self::FFIType, + ) -> Result; } diff --git a/primitives/runtime-interface/src/impls.rs b/primitives/runtime-interface/src/impls.rs index 084b5e11eb..2424ef9ea9 100644 --- a/primitives/runtime-interface/src/impls.rs +++ b/primitives/runtime-interface/src/impls.rs @@ -16,22 +16,26 @@ //! Provides implementations for the runtime interface traits. -use crate::{ - RIType, Pointer, pass_by::{PassBy, Codec, Inner, PassByInner, Enum}, - util::{unpack_ptr_and_len, pack_ptr_and_len}, -}; #[cfg(feature = "std")] use crate::host::*; #[cfg(not(feature = "std"))] use crate::wasm::*; +use crate::{ + pass_by::{Codec, Enum, Inner, PassBy, PassByInner}, + util::{pack_ptr_and_len, unpack_ptr_and_len}, + Pointer, RIType, +}; -#[cfg(all(not(feature = "std"), not(feature = "disable_target_static_assertions")))] +#[cfg(all( + not(feature = "std"), + not(feature = "disable_target_static_assertions") +))] use static_assertions::assert_eq_size; #[cfg(feature = "std")] use sp_wasm_interface::{FunctionContext, Result}; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_std::{any::TypeId, mem, vec::Vec}; @@ -39,9 +43,15 @@ use sp_std::{any::TypeId, mem, vec::Vec}; use sp_std::borrow::Cow; // Make sure that our assumptions for storing a pointer + its size in `u64` is valid. -#[cfg(all(not(feature = "std"), not(feature = "disable_target_static_assertions")))] +#[cfg(all( + not(feature = "std"), + not(feature = "disable_target_static_assertions") +))] assert_eq_size!(usize, u32); -#[cfg(all(not(feature = "std"), not(feature = "disable_target_static_assertions")))] +#[cfg(all( + not(feature = "std"), + not(feature = "disable_target_static_assertions") +))] assert_eq_size!(*const u8, u32); /// Implement the traits for the given primitive traits. @@ -93,14 +103,14 @@ macro_rules! impl_traits_for_primitives { } impl_traits_for_primitives! { - u8, u8, - u16, u16, - u32, u32, - u64, u64, - i8, i8, - i16, i16, - i32, i32, - i64, i64, + u8, u8, + u16, u16, + u32, u32, + u64, u64, + i8, i8, + i16, i16, + i32, i32, + i64, i64, } /// `bool` is passed as `u8`. @@ -108,39 +118,39 @@ impl_traits_for_primitives! { /// - `1`: true /// - `0`: false impl RIType for bool { - type FFIType = u8; + type FFIType = u8; } #[cfg(not(feature = "std"))] impl IntoFFIValue for bool { - type Owned = (); + type Owned = (); - fn into_ffi_value(&self) -> WrappedFFIValue { - if *self { 1 } else { 0 }.into() - } + fn into_ffi_value(&self) -> WrappedFFIValue { + if *self { 1 } else { 0 }.into() + } } #[cfg(not(feature = "std"))] impl FromFFIValue for bool { - fn from_ffi_value(arg: u8) -> bool { - arg == 1 - } + fn from_ffi_value(arg: u8) -> bool { + arg == 1 + } } #[cfg(feature = "std")] impl FromFFIValue for bool { - type SelfInstance = bool; + type SelfInstance = bool; - fn from_ffi_value(_: &mut dyn FunctionContext, arg: u8) -> Result { - Ok(arg == 1) - } + fn from_ffi_value(_: &mut dyn FunctionContext, arg: u8) -> Result { + Ok(arg == 1) + } } #[cfg(feature = "std")] impl IntoFFIValue for bool { - fn into_ffi_value(self, _: &mut dyn FunctionContext) -> Result { - Ok(if self { 1 } else { 0 }) - } + fn into_ffi_value(self, _: &mut dyn FunctionContext) -> Result { + Ok(if self { 1 } else { 0 }) + } } /// The type is passed as `u64`. @@ -150,61 +160,61 @@ impl IntoFFIValue for bool { /// If `T == u8` the length and the pointer are taken directly from `Self`. /// Otherwise `Self` is encoded and the length and the pointer are taken from the encoded vector. impl RIType for Vec { - type FFIType = u64; + type FFIType = u64; } #[cfg(feature = "std")] impl IntoFFIValue for Vec { - fn into_ffi_value(self, context: &mut dyn FunctionContext) -> Result { - let vec: Cow<'_, [u8]> = if TypeId::of::() == TypeId::of::() { - unsafe { Cow::Borrowed(mem::transmute(&self[..])) } - } else { - Cow::Owned(self.encode()) - }; + fn into_ffi_value(self, context: &mut dyn FunctionContext) -> Result { + let vec: Cow<'_, [u8]> = if TypeId::of::() == TypeId::of::() { + unsafe { Cow::Borrowed(mem::transmute(&self[..])) } + } else { + Cow::Owned(self.encode()) + }; - let ptr = context.allocate_memory(vec.as_ref().len() as u32)?; - context.write_memory(ptr, &vec)?; + let ptr = context.allocate_memory(vec.as_ref().len() as u32)?; + context.write_memory(ptr, &vec)?; - Ok(pack_ptr_and_len(ptr.into(), vec.len() as u32)) - } + Ok(pack_ptr_and_len(ptr.into(), vec.len() as u32)) + } } #[cfg(feature = "std")] impl FromFFIValue for Vec { - type SelfInstance = Vec; + type SelfInstance = Vec; - fn from_ffi_value(context: &mut dyn FunctionContext, arg: u64) -> Result> { - <[T] as FromFFIValue>::from_ffi_value(context, arg) - } + fn from_ffi_value(context: &mut dyn FunctionContext, arg: u64) -> Result> { + <[T] as FromFFIValue>::from_ffi_value(context, arg) + } } #[cfg(not(feature = "std"))] impl IntoFFIValue for Vec { - type Owned = Vec; + type Owned = Vec; - fn into_ffi_value(&self) -> WrappedFFIValue> { - self[..].into_ffi_value() - } + fn into_ffi_value(&self) -> WrappedFFIValue> { + self[..].into_ffi_value() + } } #[cfg(not(feature = "std"))] impl FromFFIValue for Vec { - fn from_ffi_value(arg: u64) -> Vec { - let (ptr, len) = unpack_ptr_and_len(arg); - let len = len as usize; + fn from_ffi_value(arg: u64) -> Vec { + let (ptr, len) = unpack_ptr_and_len(arg); + let len = len as usize; - if len == 0 { - return Vec::new(); - } + if len == 0 { + return Vec::new(); + } - let data = unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }; + let data = unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) }; - if TypeId::of::() == TypeId::of::() { - unsafe { mem::transmute(data) } - } else { - Self::decode(&mut &data[..]).expect("Host to wasm values are encoded correctly; qed") - } - } + if TypeId::of::() == TypeId::of::() { + unsafe { mem::transmute(data) } + } else { + Self::decode(&mut &data[..]).expect("Host to wasm values are encoded correctly; qed") + } + } } /// The type is passed as `u64`. @@ -214,65 +224,64 @@ impl FromFFIValue for Vec { /// If `T == u8` the length and the pointer are taken directly from `Self`. /// Otherwise `Self` is encoded and the length and the pointer are taken from the encoded vector. impl RIType for [T] { - type FFIType = u64; + type FFIType = u64; } #[cfg(feature = "std")] impl FromFFIValue for [T] { - type SelfInstance = Vec; + type SelfInstance = Vec; - fn from_ffi_value(context: &mut dyn FunctionContext, arg: u64) -> Result> { - let (ptr, len) = unpack_ptr_and_len(arg); + fn from_ffi_value(context: &mut dyn FunctionContext, arg: u64) -> Result> { + let (ptr, len) = unpack_ptr_and_len(arg); - let vec = context.read_memory(Pointer::new(ptr), len)?; + let vec = context.read_memory(Pointer::new(ptr), len)?; - if TypeId::of::() == TypeId::of::() { - Ok(unsafe { mem::transmute(vec) }) - } else { - Ok(Vec::::decode(&mut &vec[..]).expect("Wasm to host values are encoded correctly; qed")) - } - } + if TypeId::of::() == TypeId::of::() { + Ok(unsafe { mem::transmute(vec) }) + } else { + Ok(Vec::::decode(&mut &vec[..]) + .expect("Wasm to host values are encoded correctly; qed")) + } + } } #[cfg(feature = "std")] impl IntoPreallocatedFFIValue for [u8] { - type SelfInstance = Vec; - - fn into_preallocated_ffi_value( - self_instance: Self::SelfInstance, - context: &mut dyn FunctionContext, - allocated: u64, - ) -> Result<()> { - let (ptr, len) = unpack_ptr_and_len(allocated); - - if (len as usize) < self_instance.len() { - Err( - format!( - "Preallocated buffer is not big enough (given {} vs needed {})!", - len, - self_instance.len() - ) - ) - } else { - context.write_memory(Pointer::new(ptr), &self_instance) - } - } + type SelfInstance = Vec; + + fn into_preallocated_ffi_value( + self_instance: Self::SelfInstance, + context: &mut dyn FunctionContext, + allocated: u64, + ) -> Result<()> { + let (ptr, len) = unpack_ptr_and_len(allocated); + + if (len as usize) < self_instance.len() { + Err(format!( + "Preallocated buffer is not big enough (given {} vs needed {})!", + len, + self_instance.len() + )) + } else { + context.write_memory(Pointer::new(ptr), &self_instance) + } + } } #[cfg(not(feature = "std"))] impl IntoFFIValue for [T] { - type Owned = Vec; - - fn into_ffi_value(&self) -> WrappedFFIValue> { - if TypeId::of::() == TypeId::of::() { - let slice = unsafe { mem::transmute::<&[T], &[u8]>(self) }; - pack_ptr_and_len(slice.as_ptr() as u32, slice.len() as u32).into() - } else { - let data = self.encode(); - let ffi_value = pack_ptr_and_len(data.as_ptr() as u32, data.len() as u32); - (ffi_value, data).into() - } - } + type Owned = Vec; + + fn into_ffi_value(&self) -> WrappedFFIValue> { + if TypeId::of::() == TypeId::of::() { + let slice = unsafe { mem::transmute::<&[T], &[u8]>(self) }; + pack_ptr_and_len(slice.as_ptr() as u32, slice.len() as u32).into() + } else { + let data = self.encode(); + let ffi_value = pack_ptr_and_len(data.as_ptr() as u32, data.len() as u32); + (ffi_value, data).into() + } + } } /// Implement the traits for the `[u8; N]` arrays, where `N` is the input to this macro. @@ -350,18 +359,18 @@ macro_rules! impl_traits_for_arrays { } impl_traits_for_arrays! { - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, - 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, - 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, - 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, + 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, + 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, + 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, } impl PassBy for sp_std::result::Result { - type PassBy = Codec; + type PassBy = Codec; } impl PassBy for Option { - type PassBy = Codec; + type PassBy = Codec; } /// Implement `PassBy` with `Inner` for the given fixed sized hash types. @@ -392,9 +401,9 @@ macro_rules! for_primitive_types { } for_primitive_types! { - H160 20, - H256 32, - H512 64, + H160 20, + H256 32, + H512 64, } /// The type is passed as `u64`. @@ -403,132 +412,133 @@ for_primitive_types! { /// /// The length and the pointer are taken directly from `Self`. impl RIType for str { - type FFIType = u64; + type FFIType = u64; } #[cfg(feature = "std")] impl FromFFIValue for str { - type SelfInstance = String; + type SelfInstance = String; - fn from_ffi_value(context: &mut dyn FunctionContext, arg: u64) -> Result { - let (ptr, len) = unpack_ptr_and_len(arg); + fn from_ffi_value(context: &mut dyn FunctionContext, arg: u64) -> Result { + let (ptr, len) = unpack_ptr_and_len(arg); - let vec = context.read_memory(Pointer::new(ptr), len)?; + let vec = context.read_memory(Pointer::new(ptr), len)?; - // The data is valid utf8, as it is stored as `&str` in wasm. - String::from_utf8(vec).map_err(|_| "Invalid utf8 data provided".into()) - } + // The data is valid utf8, as it is stored as `&str` in wasm. + String::from_utf8(vec).map_err(|_| "Invalid utf8 data provided".into()) + } } #[cfg(not(feature = "std"))] impl IntoFFIValue for str { - type Owned = (); + type Owned = (); - fn into_ffi_value(&self) -> WrappedFFIValue { - let bytes = self.as_bytes(); - pack_ptr_and_len(bytes.as_ptr() as u32, bytes.len() as u32).into() - } + fn into_ffi_value(&self) -> WrappedFFIValue { + let bytes = self.as_bytes(); + pack_ptr_and_len(bytes.as_ptr() as u32, bytes.len() as u32).into() + } } #[cfg(feature = "std")] impl RIType for Pointer { - type FFIType = u32; + type FFIType = u32; } /// The type is passed as `u32`. #[cfg(not(feature = "std"))] impl RIType for Pointer { - type FFIType = u32; + type FFIType = u32; } #[cfg(not(feature = "std"))] impl IntoFFIValue for Pointer { - type Owned = (); + type Owned = (); - fn into_ffi_value(&self) -> WrappedFFIValue { - (*self as u32).into() - } + fn into_ffi_value(&self) -> WrappedFFIValue { + (*self as u32).into() + } } #[cfg(not(feature = "std"))] impl FromFFIValue for Pointer { - fn from_ffi_value(arg: u32) -> Self { - arg as _ - } + fn from_ffi_value(arg: u32) -> Self { + arg as _ + } } #[cfg(feature = "std")] impl FromFFIValue for Pointer { - type SelfInstance = Self; + type SelfInstance = Self; - fn from_ffi_value(_: &mut dyn FunctionContext, arg: u32) -> Result { - Ok(Pointer::new(arg)) - } + fn from_ffi_value(_: &mut dyn FunctionContext, arg: u32) -> Result { + Ok(Pointer::new(arg)) + } } #[cfg(feature = "std")] impl IntoFFIValue for Pointer { - fn into_ffi_value(self, _: &mut dyn FunctionContext) -> Result { - Ok(self.into()) - } + fn into_ffi_value(self, _: &mut dyn FunctionContext) -> Result { + Ok(self.into()) + } } /// Implement the traits for `u128`/`i128` macro_rules! for_u128_i128 { - ($type:ty) => { - /// `u128`/`i128` is passed as `u32`. - /// - /// The `u32` is a pointer to an `[u8; 16]` array. - impl RIType for $type { - type FFIType = u32; - } - - #[cfg(not(feature = "std"))] - impl IntoFFIValue for $type { - type Owned = (); - - fn into_ffi_value(&self) -> WrappedFFIValue { - unsafe { (mem::transmute::<&Self, *const u8>(self) as u32).into() } - } - } - - #[cfg(not(feature = "std"))] - impl FromFFIValue for $type { - fn from_ffi_value(arg: u32) -> $type { - <$type>::from_le_bytes(<[u8; mem::size_of::<$type>()]>::from_ffi_value(arg)) - } - } - - #[cfg(feature = "std")] - impl FromFFIValue for $type { - type SelfInstance = $type; - - fn from_ffi_value(context: &mut dyn FunctionContext, arg: u32) -> Result<$type> { - let data = context.read_memory(Pointer::new(arg), mem::size_of::<$type>() as u32)?; - let mut res = [0u8; mem::size_of::<$type>()]; - res.copy_from_slice(&data); - Ok(<$type>::from_le_bytes(res)) - } - } - - #[cfg(feature = "std")] - impl IntoFFIValue for $type { - fn into_ffi_value(self, context: &mut dyn FunctionContext) -> Result { - let addr = context.allocate_memory(mem::size_of::<$type>() as u32)?; - context.write_memory(addr, &self.to_le_bytes())?; - Ok(addr.into()) - } - } - } + ($type:ty) => { + /// `u128`/`i128` is passed as `u32`. + /// + /// The `u32` is a pointer to an `[u8; 16]` array. + impl RIType for $type { + type FFIType = u32; + } + + #[cfg(not(feature = "std"))] + impl IntoFFIValue for $type { + type Owned = (); + + fn into_ffi_value(&self) -> WrappedFFIValue { + unsafe { (mem::transmute::<&Self, *const u8>(self) as u32).into() } + } + } + + #[cfg(not(feature = "std"))] + impl FromFFIValue for $type { + fn from_ffi_value(arg: u32) -> $type { + <$type>::from_le_bytes(<[u8; mem::size_of::<$type>()]>::from_ffi_value(arg)) + } + } + + #[cfg(feature = "std")] + impl FromFFIValue for $type { + type SelfInstance = $type; + + fn from_ffi_value(context: &mut dyn FunctionContext, arg: u32) -> Result<$type> { + let data = + context.read_memory(Pointer::new(arg), mem::size_of::<$type>() as u32)?; + let mut res = [0u8; mem::size_of::<$type>()]; + res.copy_from_slice(&data); + Ok(<$type>::from_le_bytes(res)) + } + } + + #[cfg(feature = "std")] + impl IntoFFIValue for $type { + fn into_ffi_value(self, context: &mut dyn FunctionContext) -> Result { + let addr = context.allocate_memory(mem::size_of::<$type>() as u32)?; + context.write_memory(addr, &self.to_le_bytes())?; + Ok(addr.into()) + } + } + }; } for_u128_i128!(u128); for_u128_i128!(i128); impl PassBy for sp_wasm_interface::ValueType { - type PassBy = Enum; + type PassBy = Enum; } impl PassBy for sp_wasm_interface::Value { - type PassBy = Codec; + type PassBy = Codec; } diff --git a/primitives/runtime-interface/src/lib.rs b/primitives/runtime-interface/src/lib.rs index fd158d4b8a..b94cde74f9 100644 --- a/primitives/runtime-interface/src/lib.rs +++ b/primitives/runtime-interface/src/lib.rs @@ -285,18 +285,19 @@ pub use sp_runtime_interface_proc_macro::runtime_interface; #[doc(hidden)] #[cfg(feature = "std")] pub use sp_externalities::{ - set_and_run_with_externalities, with_externalities, Externalities, ExternalitiesExt, ExtensionStore, + set_and_run_with_externalities, with_externalities, ExtensionStore, Externalities, + ExternalitiesExt, }; #[doc(hidden)] pub use codec; -pub(crate) mod impls; #[cfg(feature = "std")] pub mod host; +pub(crate) mod impls; +pub mod pass_by; #[cfg(any(not(feature = "std"), doc))] pub mod wasm; -pub mod pass_by; mod util; @@ -308,11 +309,11 @@ pub use util::unpack_ptr_and_len; /// Every type that should be used in a runtime interface function signature needs to implement /// this trait. pub trait RIType { - /// The ffi type that is used to represent `Self`. - #[cfg(feature = "std")] - type FFIType: sp_wasm_interface::IntoValue + sp_wasm_interface::TryFromValue; - #[cfg(not(feature = "std"))] - type FFIType; + /// The ffi type that is used to represent `Self`. + #[cfg(feature = "std")] + type FFIType: sp_wasm_interface::IntoValue + sp_wasm_interface::TryFromValue; + #[cfg(not(feature = "std"))] + type FFIType; } /// A pointer that can be used in a runtime interface function signature. diff --git a/primitives/runtime-interface/src/pass_by.rs b/primitives/runtime-interface/src/pass_by.rs index d6767b5ebb..a145301823 100644 --- a/primitives/runtime-interface/src/pass_by.rs +++ b/primitives/runtime-interface/src/pass_by.rs @@ -19,7 +19,10 @@ //! //! [`Codec`], [`Inner`] and [`Enum`] are the provided strategy implementations. -use crate::{RIType, util::{unpack_ptr_and_len, pack_ptr_and_len}}; +use crate::{ + util::{pack_ptr_and_len, unpack_ptr_and_len}, + RIType, +}; #[cfg(feature = "std")] use crate::host::*; @@ -29,7 +32,7 @@ use crate::wasm::*; #[cfg(feature = "std")] use sp_wasm_interface::{FunctionContext, Pointer, Result}; -use sp_std::{marker::PhantomData, convert::TryFrom}; +use sp_std::{convert::TryFrom, marker::PhantomData}; #[cfg(not(feature = "std"))] use sp_std::vec::Vec; @@ -103,8 +106,8 @@ pub use sp_runtime_interface_proc_macro::PassByEnum; /// /// See [`Codec`], [`Inner`] or [`Enum`] for more information about the provided strategies. pub trait PassBy: Sized { - /// The strategy that should be used to pass the type. - type PassBy: PassByImpl; + /// The strategy that should be used to pass the type. + type PassBy: PassByImpl; } /// Something that provides a strategy for passing a type between wasm and the host. @@ -115,21 +118,15 @@ pub trait PassBy: Sized { /// This trait is used for the host implementation. #[cfg(feature = "std")] pub trait PassByImpl: RIType { - /// Convert the given instance to the ffi value. - /// - /// For more information see: [`crate::host::IntoFFIValue::into_ffi_value`] - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result; - - /// Create `T` from the given ffi value. - /// - /// For more information see: [`crate::host::FromFFIValue::from_ffi_value`] - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result; + /// Convert the given instance to the ffi value. + /// + /// For more information see: [`crate::host::IntoFFIValue::into_ffi_value`] + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result; + + /// Create `T` from the given ffi value. + /// + /// For more information see: [`crate::host::FromFFIValue::from_ffi_value`] + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result; } /// Something that provides a strategy for passing a type between wasm and the host. @@ -140,60 +137,60 @@ pub trait PassByImpl: RIType { /// This trait is used for the wasm implementation. #[cfg(not(feature = "std"))] pub trait PassByImpl: RIType { - /// The owned rust type that is stored with the ffi value in [`crate::wasm::WrappedFFIValue`]. - type Owned; - - /// Convert the given `instance` into [`crate::wasm::WrappedFFIValue`]. - /// - /// For more information see: [`crate::wasm::IntoFFIValue::into_ffi_value`] - fn into_ffi_value(instance: &T) -> WrappedFFIValue; - - /// Create `T` from the given ffi value. - /// - /// For more information see: [`crate::wasm::FromFFIValue::from_ffi_value`] - fn from_ffi_value(arg: Self::FFIType) -> T; + /// The owned rust type that is stored with the ffi value in [`crate::wasm::WrappedFFIValue`]. + type Owned; + + /// Convert the given `instance` into [`crate::wasm::WrappedFFIValue`]. + /// + /// For more information see: [`crate::wasm::IntoFFIValue::into_ffi_value`] + fn into_ffi_value(instance: &T) -> WrappedFFIValue; + + /// Create `T` from the given ffi value. + /// + /// For more information see: [`crate::wasm::FromFFIValue::from_ffi_value`] + fn from_ffi_value(arg: Self::FFIType) -> T; } impl RIType for T { - type FFIType = ::FFIType; + type FFIType = ::FFIType; } #[cfg(feature = "std")] impl IntoFFIValue for T { - fn into_ffi_value( - self, - context: &mut dyn FunctionContext, - ) -> Result<::FFIType> { - T::PassBy::into_ffi_value(self, context) - } + fn into_ffi_value( + self, + context: &mut dyn FunctionContext, + ) -> Result<::FFIType> { + T::PassBy::into_ffi_value(self, context) + } } #[cfg(feature = "std")] impl FromFFIValue for T { - type SelfInstance = Self; - - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: ::FFIType, - ) -> Result { - T::PassBy::from_ffi_value(context, arg) - } + type SelfInstance = Self; + + fn from_ffi_value( + context: &mut dyn FunctionContext, + arg: ::FFIType, + ) -> Result { + T::PassBy::from_ffi_value(context, arg) + } } #[cfg(not(feature = "std"))] impl IntoFFIValue for T { - type Owned = >::Owned; + type Owned = >::Owned; - fn into_ffi_value(&self) -> WrappedFFIValue<::FFIType, Self::Owned> { - T::PassBy::into_ffi_value(self) - } + fn into_ffi_value(&self) -> WrappedFFIValue<::FFIType, Self::Owned> { + T::PassBy::into_ffi_value(self) + } } #[cfg(not(feature = "std"))] impl FromFFIValue for T { - fn from_ffi_value(arg: ::FFIType) -> Self { - T::PassBy::from_ffi_value(arg) - } + fn from_ffi_value(arg: ::FFIType) -> Self { + T::PassBy::from_ffi_value(arg) + } } /// The implementation of the pass by codec strategy. This strategy uses a SCALE encoded @@ -219,50 +216,44 @@ pub struct Codec(PhantomData); #[cfg(feature = "std")] impl PassByImpl for Codec { - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result { - let vec = instance.encode(); - let ptr = context.allocate_memory(vec.len() as u32)?; - context.write_memory(ptr, &vec)?; - - Ok(pack_ptr_and_len(ptr.into(), vec.len() as u32)) - } - - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { - let (ptr, len) = unpack_ptr_and_len(arg); - let vec = context.read_memory(Pointer::new(ptr), len)?; - T::decode(&mut &vec[..]) - .map_err(|e| format!("Could not decode value from wasm: {}", e.what())) - } + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result { + let vec = instance.encode(); + let ptr = context.allocate_memory(vec.len() as u32)?; + context.write_memory(ptr, &vec)?; + + Ok(pack_ptr_and_len(ptr.into(), vec.len() as u32)) + } + + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { + let (ptr, len) = unpack_ptr_and_len(arg); + let vec = context.read_memory(Pointer::new(ptr), len)?; + T::decode(&mut &vec[..]) + .map_err(|e| format!("Could not decode value from wasm: {}", e.what())) + } } #[cfg(not(feature = "std"))] impl PassByImpl for Codec { - type Owned = Vec; - - fn into_ffi_value(instance: &T) -> WrappedFFIValue { - let data = instance.encode(); - let ffi_value = pack_ptr_and_len(data.as_ptr() as u32, data.len() as u32); - (ffi_value, data).into() - } - - fn from_ffi_value(arg: Self::FFIType) -> T { - let (ptr, len) = unpack_ptr_and_len(arg); - let len = len as usize; - - let encoded = if len == 0 { - Vec::new() - } else { - unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) } - }; - - T::decode(&mut &encoded[..]).expect("Host to wasm values are encoded correctly; qed") - } + type Owned = Vec; + + fn into_ffi_value(instance: &T) -> WrappedFFIValue { + let data = instance.encode(); + let ffi_value = pack_ptr_and_len(data.as_ptr() as u32, data.len() as u32); + (ffi_value, data).into() + } + + fn from_ffi_value(arg: Self::FFIType) -> T { + let (ptr, len) = unpack_ptr_and_len(arg); + let len = len as usize; + + let encoded = if len == 0 { + Vec::new() + } else { + unsafe { Vec::from_raw_parts(ptr as *mut u8, len, len) } + }; + + T::decode(&mut &encoded[..]).expect("Host to wasm values are encoded correctly; qed") + } } /// The type is passed as `u64`. @@ -271,23 +262,23 @@ impl PassByImpl for Codec { /// /// `Self` is encoded and the length and the pointer are taken from the encoded vector. impl RIType for Codec { - type FFIType = u64; + type FFIType = u64; } /// Trait that needs to be implemented by a type that should be passed between wasm and the host, /// by using the inner type. See [`Inner`] for more information. pub trait PassByInner: Sized { - /// The inner type that is wrapped by `Self`. - type Inner: RIType; + /// The inner type that is wrapped by `Self`. + type Inner: RIType; - /// Consumes `self` and returns the inner type. - fn into_inner(self) -> Self::Inner; + /// Consumes `self` and returns the inner type. + fn into_inner(self) -> Self::Inner; - /// Returns the reference to the inner type. - fn inner(&self) -> &Self::Inner; + /// Returns the reference to the inner type. + fn inner(&self) -> &Self::Inner; - /// Construct `Self` from the given `inner`. - fn from_inner(inner: Self::Inner) -> Self; + /// Construct `Self` from the given `inner`. + fn from_inner(inner: Self::Inner) -> Self; } /// The implementation of the pass by inner type strategy. The type that uses this strategy will be @@ -329,41 +320,37 @@ pub struct Inner, I: RIType>(PhantomData<(T, I)>); #[cfg(feature = "std")] impl, I: RIType> PassByImpl for Inner - where I: IntoFFIValue + FromFFIValue +where + I: IntoFFIValue + FromFFIValue, { - fn into_ffi_value( - instance: T, - context: &mut dyn FunctionContext, - ) -> Result { - instance.into_inner().into_ffi_value(context) - } - - fn from_ffi_value( - context: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { - I::from_ffi_value(context, arg).map(T::from_inner) - } + fn into_ffi_value(instance: T, context: &mut dyn FunctionContext) -> Result { + instance.into_inner().into_ffi_value(context) + } + + fn from_ffi_value(context: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { + I::from_ffi_value(context, arg).map(T::from_inner) + } } #[cfg(not(feature = "std"))] impl, I: RIType> PassByImpl for Inner - where I: IntoFFIValue + FromFFIValue +where + I: IntoFFIValue + FromFFIValue, { - type Owned = I::Owned; + type Owned = I::Owned; - fn into_ffi_value(instance: &T) -> WrappedFFIValue { - instance.inner().into_ffi_value() - } + fn into_ffi_value(instance: &T) -> WrappedFFIValue { + instance.inner().into_ffi_value() + } - fn from_ffi_value(arg: Self::FFIType) -> T { - T::from_inner(I::from_ffi_value(arg)) - } + fn from_ffi_value(arg: Self::FFIType) -> T { + T::from_inner(I::from_ffi_value(arg)) + } } /// The type is passed as the inner type. impl, I: RIType> RIType for Inner { - type FFIType = I::FFIType; + type FFIType = I::FFIType; } /// The implementation of the pass by enum strategy. This strategy uses an `u8` internally to pass @@ -414,38 +401,32 @@ pub struct Enum + TryFrom>(PhantomData); #[cfg(feature = "std")] impl + TryFrom> PassByImpl for Enum { - fn into_ffi_value( - instance: T, - _: &mut dyn FunctionContext, - ) -> Result { - Ok(instance.into()) - } - - fn from_ffi_value( - _: &mut dyn FunctionContext, - arg: Self::FFIType, - ) -> Result { - T::try_from(arg).map_err(|_| format!("Invalid enum discriminant: {}", arg)) - } + fn into_ffi_value(instance: T, _: &mut dyn FunctionContext) -> Result { + Ok(instance.into()) + } + + fn from_ffi_value(_: &mut dyn FunctionContext, arg: Self::FFIType) -> Result { + T::try_from(arg).map_err(|_| format!("Invalid enum discriminant: {}", arg)) + } } #[cfg(not(feature = "std"))] impl + TryFrom> PassByImpl for Enum { - type Owned = (); + type Owned = (); - fn into_ffi_value(instance: &T) -> WrappedFFIValue { - let value: u8 = (*instance).into(); - value.into() - } + fn into_ffi_value(instance: &T) -> WrappedFFIValue { + let value: u8 = (*instance).into(); + value.into() + } - fn from_ffi_value(arg: Self::FFIType) -> T { - T::try_from(arg).expect("Host to wasm provides a valid enum discriminant; qed") - } + fn from_ffi_value(arg: Self::FFIType) -> T { + T::try_from(arg).expect("Host to wasm provides a valid enum discriminant; qed") + } } /// The type is passed as `u8`. /// /// The value is corresponds to the discriminant of the variant. impl + TryFrom> RIType for Enum { - type FFIType = u8; + type FFIType = u8; } diff --git a/primitives/runtime-interface/src/util.rs b/primitives/runtime-interface/src/util.rs index fa7016a2b0..5dbf10bbd1 100644 --- a/primitives/runtime-interface/src/util.rs +++ b/primitives/runtime-interface/src/util.rs @@ -18,11 +18,11 @@ /// Pack a pointer and length into an `u64`. pub fn pack_ptr_and_len(ptr: u32, len: u32) -> u64 { - // The static assertions from above are changed into a runtime check. - #[cfg(all(not(feature = "std"), feature = "disable_target_static_assertions"))] - assert_eq!(4, sp_std::mem::size_of::()); + // The static assertions from above are changed into a runtime check. + #[cfg(all(not(feature = "std"), feature = "disable_target_static_assertions"))] + assert_eq!(4, sp_std::mem::size_of::()); - (u64::from(len) << 32) | u64::from(ptr) + (u64::from(len) << 32) | u64::from(ptr) } /// Unpacks an `u64` into the pointer and length. @@ -31,29 +31,29 @@ pub fn pack_ptr_and_len(ptr: u32, len: u32) -> u64 { /// 32-bits and a length in the most-significant 32 bits. This interprets the returned value as a pointer, /// length tuple. pub fn unpack_ptr_and_len(val: u64) -> (u32, u32) { - // The static assertions from above are changed into a runtime check. - #[cfg(all(not(feature = "std"), feature = "disable_target_static_assertions"))] - assert_eq!(4, sp_std::mem::size_of::()); + // The static assertions from above are changed into a runtime check. + #[cfg(all(not(feature = "std"), feature = "disable_target_static_assertions"))] + assert_eq!(4, sp_std::mem::size_of::()); - let ptr = (val & (!0u32 as u64)) as u32; - let len = (val >> 32) as u32; + let ptr = (val & (!0u32 as u64)) as u32; + let len = (val >> 32) as u32; - (ptr, len) + (ptr, len) } #[cfg(test)] mod tests { - use super::{pack_ptr_and_len, unpack_ptr_and_len}; + use super::{pack_ptr_and_len, unpack_ptr_and_len}; - #[test] - fn ptr_len_packing_unpacking() { - const PTR: u32 = 0x1337; - const LEN: u32 = 0x7f000000; + #[test] + fn ptr_len_packing_unpacking() { + const PTR: u32 = 0x1337; + const LEN: u32 = 0x7f000000; - let packed = pack_ptr_and_len(PTR, LEN); - let (ptr, len) = unpack_ptr_and_len(packed); + let packed = pack_ptr_and_len(PTR, LEN); + let (ptr, len) = unpack_ptr_and_len(packed); - assert_eq!(PTR, ptr); - assert_eq!(LEN, len); - } + assert_eq!(PTR, ptr); + assert_eq!(LEN, len); + } } diff --git a/primitives/runtime-interface/src/wasm.rs b/primitives/runtime-interface/src/wasm.rs index a0801c2bfb..1aeeec7803 100644 --- a/primitives/runtime-interface/src/wasm.rs +++ b/primitives/runtime-interface/src/wasm.rs @@ -29,19 +29,19 @@ use sp_std::cell::Cell; /// is only generated by the corresponding [`host::IntoFFIValue`](crate::host::IntoFFIValue) /// implementation. pub trait FromFFIValue: Sized + RIType { - /// Create `Self` from the given ffi value. - fn from_ffi_value(arg: Self::FFIType) -> Self; + /// Create `Self` from the given ffi value. + fn from_ffi_value(arg: Self::FFIType) -> Self; } /// Something that can be converted into a ffi value. pub trait IntoFFIValue: RIType { - /// The owned rust type that is stored with the ffi value in [`WrappedFFIValue`]. - /// - /// If no owned value is required, `()` can be used as a type. - type Owned; + /// The owned rust type that is stored with the ffi value in [`WrappedFFIValue`]. + /// + /// If no owned value is required, `()` can be used as a type. + type Owned; - /// Convert `self` into a [`WrappedFFIValue`]. - fn into_ffi_value(&self) -> WrappedFFIValue; + /// Convert `self` into a [`WrappedFFIValue`]. + fn into_ffi_value(&self) -> WrappedFFIValue; } /// Represents a wrapped ffi value. @@ -51,38 +51,38 @@ pub trait IntoFFIValue: RIType { /// optimizations can be applied. For example using the pointer to a `Vec`, while using the /// pointer to a SCALE encoded `Vec` that is stored in this wrapper for any other `Vec`. pub enum WrappedFFIValue { - Wrapped(T), - WrappedAndOwned(T, O), + Wrapped(T), + WrappedAndOwned(T, O), } impl WrappedFFIValue { - /// Returns the wrapped ffi value. - pub fn get(&self) -> T { - match self { - Self::Wrapped(data) | Self::WrappedAndOwned(data, _) => *data, - } - } + /// Returns the wrapped ffi value. + pub fn get(&self) -> T { + match self { + Self::Wrapped(data) | Self::WrappedAndOwned(data, _) => *data, + } + } } impl From for WrappedFFIValue { - fn from(val: T) -> Self { - WrappedFFIValue::Wrapped(val) - } + fn from(val: T) -> Self { + WrappedFFIValue::Wrapped(val) + } } impl From<(T, O)> for WrappedFFIValue { - fn from(val: (T, O)) -> Self { - WrappedFFIValue::WrappedAndOwned(val.0, val.1) - } + fn from(val: (T, O)) -> Self { + WrappedFFIValue::WrappedAndOwned(val.0, val.1) + } } /// The state of an exchangeable function. #[derive(Clone, Copy)] enum ExchangeableFunctionState { - /// Original function is present - Original, - /// The function has been replaced. - Replaced, + /// Original function is present + Original, + /// The function has been replaced. + Replaced, } /// A function which implementation can be exchanged. @@ -91,41 +91,43 @@ enum ExchangeableFunctionState { pub struct ExchangeableFunction(Cell<(T, ExchangeableFunctionState)>); impl ExchangeableFunction { - /// Create a new instance of `ExchangeableFunction`. - pub const fn new(impl_: T) -> Self { - Self(Cell::new((impl_, ExchangeableFunctionState::Original))) - } + /// Create a new instance of `ExchangeableFunction`. + pub const fn new(impl_: T) -> Self { + Self(Cell::new((impl_, ExchangeableFunctionState::Original))) + } } impl ExchangeableFunction { - /// Replace the implementation with `new_impl`. - /// - /// # Panics - /// - /// Panics when trying to replace an already replaced implementation. - /// - /// # Returns - /// - /// Returns the original implementation wrapped in [`RestoreImplementation`]. - pub fn replace_implementation(&'static self, new_impl: T) -> RestoreImplementation { - if let ExchangeableFunctionState::Replaced = self.0.get().1 { - panic!("Trying to replace an already replaced implementation!") - } - - let old = self.0.replace((new_impl, ExchangeableFunctionState::Replaced)); - - RestoreImplementation(self, Some(old.0)) - } - - /// Restore the original implementation. - fn restore_orig_implementation(&self, orig: T) { - self.0.set((orig, ExchangeableFunctionState::Original)); - } - - /// Returns the internal function pointer. - pub fn get(&self) -> T { - self.0.get().0 - } + /// Replace the implementation with `new_impl`. + /// + /// # Panics + /// + /// Panics when trying to replace an already replaced implementation. + /// + /// # Returns + /// + /// Returns the original implementation wrapped in [`RestoreImplementation`]. + pub fn replace_implementation(&'static self, new_impl: T) -> RestoreImplementation { + if let ExchangeableFunctionState::Replaced = self.0.get().1 { + panic!("Trying to replace an already replaced implementation!") + } + + let old = self + .0 + .replace((new_impl, ExchangeableFunctionState::Replaced)); + + RestoreImplementation(self, Some(old.0)) + } + + /// Restore the original implementation. + fn restore_orig_implementation(&self, orig: T) { + self.0.set((orig, ExchangeableFunctionState::Original)); + } + + /// Returns the internal function pointer. + pub fn get(&self) -> T { + self.0.get().0 + } } // Wasm does not support threads, so this is safe; qed. @@ -137,7 +139,8 @@ unsafe impl Sync for ExchangeableFunction {} pub struct RestoreImplementation(&'static ExchangeableFunction, Option); impl Drop for RestoreImplementation { - fn drop(&mut self) { - self.0.restore_orig_implementation(self.1.take().expect("Value is only taken on drop; qed")); - } + fn drop(&mut self) { + self.0 + .restore_orig_implementation(self.1.take().expect("Value is only taken on drop; qed")); + } } diff --git a/primitives/runtime-interface/test-wasm-deprecated/build.rs b/primitives/runtime-interface/test-wasm-deprecated/build.rs index 647b476814..c54e86d8db 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/build.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/build.rs @@ -17,10 +17,10 @@ use wasm_builder_runner::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") - .export_heap_base() - .import_memory() - .build() + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") + .export_heap_base() + .import_memory() + .build() } diff --git a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs index 29d28c75fa..4991f3b7ca 100644 --- a/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs +++ b/primitives/runtime-interface/test-wasm-deprecated/src/lib.rs @@ -29,24 +29,24 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// `sp-io` is required for its panic and oom handler. #[no_mangle] pub fn import_sp_io() { - sp_io::misc::print_utf8(&[]); + sp_io::misc::print_utf8(&[]); } #[runtime_interface] pub trait TestApi { - fn test_versionning(&self, _data: u32) -> bool { - // should not be called - unimplemented!() - } + fn test_versionning(&self, _data: u32) -> bool { + // should not be called + unimplemented!() + } } wasm_export_functions! { - fn test_versionning_works() { - // old api allows only 42 and 50 - assert!(test_api::test_versionning(42)); - assert!(test_api::test_versionning(50)); - - assert!(!test_api::test_versionning(142)); - assert!(!test_api::test_versionning(0)); - } -} \ No newline at end of file + fn test_versionning_works() { + // old api allows only 42 and 50 + assert!(test_api::test_versionning(42)); + assert!(test_api::test_versionning(50)); + + assert!(!test_api::test_versionning(142)); + assert!(!test_api::test_versionning(0)); + } +} diff --git a/primitives/runtime-interface/test-wasm/build.rs b/primitives/runtime-interface/test-wasm/build.rs index 647b476814..c54e86d8db 100644 --- a/primitives/runtime-interface/test-wasm/build.rs +++ b/primitives/runtime-interface/test-wasm/build.rs @@ -17,10 +17,10 @@ use wasm_builder_runner::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") - .export_heap_base() - .import_memory() - .build() + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates_or_path("1.0.9", "../../../utils/wasm-builder") + .export_heap_base() + .import_memory() + .build() } diff --git a/primitives/runtime-interface/test-wasm/src/lib.rs b/primitives/runtime-interface/test-wasm/src/lib.rs index 700c77854a..4762464f0d 100644 --- a/primitives/runtime-interface/test-wasm/src/lib.rs +++ b/primitives/runtime-interface/test-wasm/src/lib.rs @@ -21,7 +21,7 @@ use sp_runtime_interface::runtime_interface; #[cfg(not(feature = "std"))] -use sp_std::{prelude::*, mem, convert::TryFrom}; +use sp_std::{convert::TryFrom, mem, prelude::*}; use sp_core::{sr25519::Public, wasm_export_functions}; @@ -34,220 +34,220 @@ const TEST_ARRAY: [u8; 16] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, #[runtime_interface] pub trait TestApi { - /// Returns the input data as result. - fn return_input(data: Vec) -> Vec { - data - } - - /// Returns 16kb data. - /// - /// # Note - /// - /// We return a `Vec` because this will use the code path that uses SCALE - /// to pass the data between native/wasm. (Vec is passed without encoding the - /// data) - fn return_16kb() -> Vec { - vec![0; 4 * 1024] - } - - /// Set the storage at key with value. - fn set_storage(&mut self, key: &[u8], data: &[u8]) { - self.place_storage(key.to_vec(), Some(data.to_vec())); - } - - /// Copy `hello` into the given mutable reference - fn return_value_into_mutable_reference(&self, data: &mut [u8]) { - let res = "hello"; - data[..res.as_bytes().len()].copy_from_slice(res.as_bytes()); - } - - /// Returns the input data wrapped in an `Option` as result. - fn return_option_input(data: Vec) -> Option> { - Some(data) - } - - /// Get an array as input and returns a subset of this array. - fn get_and_return_array(data: [u8; 34]) -> [u8; 16] { - let mut res = [0u8; 16]; - res.copy_from_slice(&data[..16]); - res - } - - /// Take and fill mutable array. - fn array_as_mutable_reference(data: &mut [u8; 16]) { - data.copy_from_slice(&TEST_ARRAY); - } - - /// Returns the given public key as result. - fn return_input_public_key(key: Public) -> Public { - key - } - - /// A function that is called with invalid utf8 data from the runtime. - /// - /// This also checks that we accept `_` (wild card) argument names. - fn invalid_utf8_data(_: &str) {} - - /// Overwrite the native implementation in wasm. The native implementation always returns - /// `false` and the replacement function will return always `true`. - fn overwrite_native_function_implementation() -> bool { - false - } - - /// Gets an `u128` and returns this value - fn get_and_return_u128(val: u128) -> u128 { - val - } - - /// Gets an `i128` and returns this value - fn get_and_return_i128(val: i128) -> i128 { - val - } - - fn test_versionning(&self, data: u32) -> bool { - data == 42 || data == 50 - } - - #[version(2)] - fn test_versionning(&self, data: u32) -> bool { - data == 42 - } + /// Returns the input data as result. + fn return_input(data: Vec) -> Vec { + data + } + + /// Returns 16kb data. + /// + /// # Note + /// + /// We return a `Vec` because this will use the code path that uses SCALE + /// to pass the data between native/wasm. (Vec is passed without encoding the + /// data) + fn return_16kb() -> Vec { + vec![0; 4 * 1024] + } + + /// Set the storage at key with value. + fn set_storage(&mut self, key: &[u8], data: &[u8]) { + self.place_storage(key.to_vec(), Some(data.to_vec())); + } + + /// Copy `hello` into the given mutable reference + fn return_value_into_mutable_reference(&self, data: &mut [u8]) { + let res = "hello"; + data[..res.as_bytes().len()].copy_from_slice(res.as_bytes()); + } + + /// Returns the input data wrapped in an `Option` as result. + fn return_option_input(data: Vec) -> Option> { + Some(data) + } + + /// Get an array as input and returns a subset of this array. + fn get_and_return_array(data: [u8; 34]) -> [u8; 16] { + let mut res = [0u8; 16]; + res.copy_from_slice(&data[..16]); + res + } + + /// Take and fill mutable array. + fn array_as_mutable_reference(data: &mut [u8; 16]) { + data.copy_from_slice(&TEST_ARRAY); + } + + /// Returns the given public key as result. + fn return_input_public_key(key: Public) -> Public { + key + } + + /// A function that is called with invalid utf8 data from the runtime. + /// + /// This also checks that we accept `_` (wild card) argument names. + fn invalid_utf8_data(_: &str) {} + + /// Overwrite the native implementation in wasm. The native implementation always returns + /// `false` and the replacement function will return always `true`. + fn overwrite_native_function_implementation() -> bool { + false + } + + /// Gets an `u128` and returns this value + fn get_and_return_u128(val: u128) -> u128 { + val + } + + /// Gets an `i128` and returns this value + fn get_and_return_i128(val: i128) -> i128 { + val + } + + fn test_versionning(&self, data: u32) -> bool { + data == 42 || data == 50 + } + + #[version(2)] + fn test_versionning(&self, data: u32) -> bool { + data == 42 + } } /// This function is not used, but we require it for the compiler to include `sp-io`. /// `sp-io` is required for its panic and oom handler. #[no_mangle] pub fn import_sp_io() { - sp_io::misc::print_utf8(&[]); + sp_io::misc::print_utf8(&[]); } wasm_export_functions! { - fn test_return_data() { - let input = vec![1, 2, 3, 4, 5, 6]; - let res = test_api::return_input(input.clone()); - - assert_eq!(input, res); - } - - fn test_return_option_data() { - let input = vec![1, 2, 3, 4, 5, 6]; - let res = test_api::return_option_input(input.clone()); - - assert_eq!(Some(input), res); - } - - fn test_set_storage() { - let key = "hello"; - let value = "world"; - - test_api::set_storage(key.as_bytes(), value.as_bytes()); - } - - fn test_return_value_into_mutable_reference() { - let mut data = vec![1, 2, 3, 4, 5, 6]; - - test_api::return_value_into_mutable_reference(&mut data); - - let expected = "hello"; - assert_eq!(expected.as_bytes(), &data[..expected.len()]); - } - - fn test_get_and_return_array() { - let mut input = unsafe { mem::MaybeUninit::<[u8; 34]>::zeroed().assume_init() }; - input.copy_from_slice(&[ - 24, 3, 23, 20, 2, 16, 32, 1, 12, 26, 27, 8, 29, 31, 6, 5, 4, 19, 10, 28, 34, 21, 18, 33, 9, - 13, 22, 25, 15, 11, 30, 7, 14, 17, - ]); - - let res = test_api::get_and_return_array(input); - - assert_eq!(&res, &input[..16]); - } - - fn test_array_as_mutable_reference() { - let mut array = [0u8; 16]; - test_api::array_as_mutable_reference(&mut array); - - assert_eq!(array, TEST_ARRAY); - } - - fn test_return_input_public_key() { - let key = Public::try_from( - &[ - 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, - 25, 26, 27, 28, 29, 30, 31, 32, - ][..], - ).unwrap(); - let ret_key = test_api::return_input_public_key(key.clone()); - - let key_data: &[u8] = key.as_ref(); - let ret_key_data: &[u8] = ret_key.as_ref(); - assert_eq!(key_data, ret_key_data); - } - - fn test_invalid_utf8_data_should_return_an_error() { - let data = vec![0, 159, 146, 150]; - // I'm an evil hacker, trying to hack! - let data_str = unsafe { sp_std::str::from_utf8_unchecked(&data) }; - - test_api::invalid_utf8_data(data_str); - } - - fn test_overwrite_native_function_implementation() { - fn new_implementation() -> bool { - true - } - - // Check native implementation - assert!(!test_api::overwrite_native_function_implementation()); - - let _guard = test_api::host_overwrite_native_function_implementation - .replace_implementation(new_implementation); - - assert!(test_api::overwrite_native_function_implementation()); - } - - fn test_u128_i128_as_parameter_and_return_value() { - for val in &[u128::max_value(), 1u128, 5000u128, u64::max_value() as u128] { - assert_eq!(*val, test_api::get_and_return_u128(*val)); - } - - for val in &[i128::max_value(), i128::min_value(), 1i128, 5000i128, u64::max_value() as i128] { - assert_eq!(*val, test_api::get_and_return_i128(*val)); - } - } - - fn test_vec_return_value_memory_is_freed() { - let mut len = 0; - for _ in 0..1024 { - len += test_api::return_16kb().len(); - } - assert_eq!(1024 * 1024 * 4, len); - } - - fn test_encoded_return_value_memory_is_freed() { - let mut len = 0; - for _ in 0..1024 { - len += test_api::return_option_input(vec![0; 16 * 1024]).map(|v| v.len()).unwrap(); - } - assert_eq!(1024 * 1024 * 16, len); - } - - fn test_array_return_value_memory_is_freed() { - let mut len = 0; - for _ in 0..1024 * 1024 { - len += test_api::get_and_return_array([0; 34])[1]; - } - assert_eq!(0, len); - } - - fn test_versionning_works() { - // we fix new api to accept only 42 as a proper input - // as opposed to sp-runtime-interface-test-wasm-deprecated::test_api::verify_input - // which accepted 42 and 50. - assert!(test_api::test_versionning(42)); - - assert!(!test_api::test_versionning(50)); - assert!(!test_api::test_versionning(102)); - } + fn test_return_data() { + let input = vec![1, 2, 3, 4, 5, 6]; + let res = test_api::return_input(input.clone()); + + assert_eq!(input, res); + } + + fn test_return_option_data() { + let input = vec![1, 2, 3, 4, 5, 6]; + let res = test_api::return_option_input(input.clone()); + + assert_eq!(Some(input), res); + } + + fn test_set_storage() { + let key = "hello"; + let value = "world"; + + test_api::set_storage(key.as_bytes(), value.as_bytes()); + } + + fn test_return_value_into_mutable_reference() { + let mut data = vec![1, 2, 3, 4, 5, 6]; + + test_api::return_value_into_mutable_reference(&mut data); + + let expected = "hello"; + assert_eq!(expected.as_bytes(), &data[..expected.len()]); + } + + fn test_get_and_return_array() { + let mut input = unsafe { mem::MaybeUninit::<[u8; 34]>::zeroed().assume_init() }; + input.copy_from_slice(&[ + 24, 3, 23, 20, 2, 16, 32, 1, 12, 26, 27, 8, 29, 31, 6, 5, 4, 19, 10, 28, 34, 21, 18, 33, 9, + 13, 22, 25, 15, 11, 30, 7, 14, 17, + ]); + + let res = test_api::get_and_return_array(input); + + assert_eq!(&res, &input[..16]); + } + + fn test_array_as_mutable_reference() { + let mut array = [0u8; 16]; + test_api::array_as_mutable_reference(&mut array); + + assert_eq!(array, TEST_ARRAY); + } + + fn test_return_input_public_key() { + let key = Public::try_from( + &[ + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, + 25, 26, 27, 28, 29, 30, 31, 32, + ][..], + ).unwrap(); + let ret_key = test_api::return_input_public_key(key.clone()); + + let key_data: &[u8] = key.as_ref(); + let ret_key_data: &[u8] = ret_key.as_ref(); + assert_eq!(key_data, ret_key_data); + } + + fn test_invalid_utf8_data_should_return_an_error() { + let data = vec![0, 159, 146, 150]; + // I'm an evil hacker, trying to hack! + let data_str = unsafe { sp_std::str::from_utf8_unchecked(&data) }; + + test_api::invalid_utf8_data(data_str); + } + + fn test_overwrite_native_function_implementation() { + fn new_implementation() -> bool { + true + } + + // Check native implementation + assert!(!test_api::overwrite_native_function_implementation()); + + let _guard = test_api::host_overwrite_native_function_implementation + .replace_implementation(new_implementation); + + assert!(test_api::overwrite_native_function_implementation()); + } + + fn test_u128_i128_as_parameter_and_return_value() { + for val in &[u128::max_value(), 1u128, 5000u128, u64::max_value() as u128] { + assert_eq!(*val, test_api::get_and_return_u128(*val)); + } + + for val in &[i128::max_value(), i128::min_value(), 1i128, 5000i128, u64::max_value() as i128] { + assert_eq!(*val, test_api::get_and_return_i128(*val)); + } + } + + fn test_vec_return_value_memory_is_freed() { + let mut len = 0; + for _ in 0..1024 { + len += test_api::return_16kb().len(); + } + assert_eq!(1024 * 1024 * 4, len); + } + + fn test_encoded_return_value_memory_is_freed() { + let mut len = 0; + for _ in 0..1024 { + len += test_api::return_option_input(vec![0; 16 * 1024]).map(|v| v.len()).unwrap(); + } + assert_eq!(1024 * 1024 * 16, len); + } + + fn test_array_return_value_memory_is_freed() { + let mut len = 0; + for _ in 0..1024 * 1024 { + len += test_api::get_and_return_array([0; 34])[1]; + } + assert_eq!(0, len); + } + + fn test_versionning_works() { + // we fix new api to accept only 42 as a proper input + // as opposed to sp-runtime-interface-test-wasm-deprecated::test_api::verify_input + // which accepted 42 and 50. + assert!(test_api::test_versionning(42)); + + assert!(!test_api::test_versionning(50)); + assert!(!test_api::test_versionning(102)); + } } diff --git a/primitives/runtime-interface/test/src/lib.rs b/primitives/runtime-interface/test/src/lib.rs index 110eda980f..cffae8de68 100644 --- a/primitives/runtime-interface/test/src/lib.rs +++ b/primitives/runtime-interface/test/src/lib.rs @@ -16,137 +16,138 @@ //! Integration tests for runtime interface primitives #![cfg(test)] - #![cfg(test)] use sp_runtime_interface::*; -use sp_runtime_interface_test_wasm::{WASM_BINARY, test_api::HostFunctions}; +use sp_runtime_interface_test_wasm::{test_api::HostFunctions, WASM_BINARY}; use sp_runtime_interface_test_wasm_deprecated::WASM_BINARY as WASM_BINARY_DEPRECATED; -use sp_wasm_interface::HostFunctions as HostFunctionsT; use sc_executor::CallInWasm; +use sp_wasm_interface::HostFunctions as HostFunctionsT; type TestExternalities = sp_state_machine::TestExternalities; fn call_wasm_method(binary: &[u8], method: &str) -> TestExternalities { - let mut ext = TestExternalities::default(); - let mut ext_ext = ext.ext(); - let mut host_functions = HF::host_functions(); - host_functions.extend(sp_io::SubstrateHostFunctions::host_functions()); - - let executor = sc_executor::WasmExecutor::new( - sc_executor::WasmExecutionMethod::Interpreted, - Some(8), - host_functions, - false, - 8, - ); - executor.call_in_wasm( - binary, - None, - method, - &[], - &mut ext_ext, - ).expect(&format!("Executes `{}`", method)); - - ext + let mut ext = TestExternalities::default(); + let mut ext_ext = ext.ext(); + let mut host_functions = HF::host_functions(); + host_functions.extend(sp_io::SubstrateHostFunctions::host_functions()); + + let executor = sc_executor::WasmExecutor::new( + sc_executor::WasmExecutionMethod::Interpreted, + Some(8), + host_functions, + false, + 8, + ); + executor + .call_in_wasm(binary, None, method, &[], &mut ext_ext) + .expect(&format!("Executes `{}`", method)); + + ext } #[test] fn test_return_data() { - call_wasm_method::(&WASM_BINARY[..], "test_return_data"); + call_wasm_method::(&WASM_BINARY[..], "test_return_data"); } #[test] fn test_return_option_data() { - call_wasm_method::(&WASM_BINARY[..], "test_return_option_data"); + call_wasm_method::(&WASM_BINARY[..], "test_return_option_data"); } #[test] fn test_set_storage() { - let mut ext = call_wasm_method::(&WASM_BINARY[..], "test_set_storage"); + let mut ext = call_wasm_method::(&WASM_BINARY[..], "test_set_storage"); - let expected = "world"; - assert_eq!(expected.as_bytes(), &ext.ext().storage("hello".as_bytes()).unwrap()[..]); + let expected = "world"; + assert_eq!( + expected.as_bytes(), + &ext.ext().storage("hello".as_bytes()).unwrap()[..] + ); } #[test] fn test_return_value_into_mutable_reference() { - call_wasm_method::(&WASM_BINARY[..], "test_return_value_into_mutable_reference"); + call_wasm_method::(&WASM_BINARY[..], "test_return_value_into_mutable_reference"); } #[test] fn test_get_and_return_array() { - call_wasm_method::(&WASM_BINARY[..], "test_get_and_return_array"); + call_wasm_method::(&WASM_BINARY[..], "test_get_and_return_array"); } #[test] fn test_array_as_mutable_reference() { - call_wasm_method::(&WASM_BINARY[..], "test_array_as_mutable_reference"); + call_wasm_method::(&WASM_BINARY[..], "test_array_as_mutable_reference"); } #[test] fn test_return_input_public_key() { - call_wasm_method::(&WASM_BINARY[..], "test_return_input_public_key"); + call_wasm_method::(&WASM_BINARY[..], "test_return_input_public_key"); } #[test] -#[should_panic( - expected = "Instantiation: Export ext_test_api_return_input_version_1 not found" -)] +#[should_panic(expected = "Instantiation: Export ext_test_api_return_input_version_1 not found")] fn host_function_not_found() { - call_wasm_method::<()>(&WASM_BINARY[..], "test_return_data"); + call_wasm_method::<()>(&WASM_BINARY[..], "test_return_data"); } #[test] #[should_panic( - expected = - "Executes `test_invalid_utf8_data_should_return_an_error`: \ + expected = "Executes `test_invalid_utf8_data_should_return_an_error`: \ \"Trap: Trap { kind: Host(FunctionExecution(\\\"ext_test_api_invalid_utf8_data_version_1\\\", \ \\\"Invalid utf8 data provided\\\")) }\"" )] fn test_invalid_utf8_data_should_return_an_error() { - call_wasm_method::(&WASM_BINARY[..], "test_invalid_utf8_data_should_return_an_error"); + call_wasm_method::( + &WASM_BINARY[..], + "test_invalid_utf8_data_should_return_an_error", + ); } #[test] fn test_overwrite_native_function_implementation() { - call_wasm_method::(&WASM_BINARY[..], "test_overwrite_native_function_implementation"); + call_wasm_method::( + &WASM_BINARY[..], + "test_overwrite_native_function_implementation", + ); } #[test] fn test_u128_i128_as_parameter_and_return_value() { - call_wasm_method::(&WASM_BINARY[..], "test_u128_i128_as_parameter_and_return_value"); + call_wasm_method::( + &WASM_BINARY[..], + "test_u128_i128_as_parameter_and_return_value", + ); } #[test] fn test_vec_return_value_memory_is_freed() { - call_wasm_method::(&WASM_BINARY[..], "test_vec_return_value_memory_is_freed"); + call_wasm_method::(&WASM_BINARY[..], "test_vec_return_value_memory_is_freed"); } #[test] fn test_encoded_return_value_memory_is_freed() { - call_wasm_method::(&WASM_BINARY[..], "test_encoded_return_value_memory_is_freed"); + call_wasm_method::( + &WASM_BINARY[..], + "test_encoded_return_value_memory_is_freed", + ); } #[test] fn test_array_return_value_memory_is_freed() { - call_wasm_method::(&WASM_BINARY[..], "test_array_return_value_memory_is_freed"); + call_wasm_method::(&WASM_BINARY[..], "test_array_return_value_memory_is_freed"); } #[test] fn test_versionining_with_new_host_works() { - // We call to the new wasm binary with new host function. - call_wasm_method::( - &WASM_BINARY[..], - "test_versionning_works", - ); - - // we call to the old wasm binary with a new host functions - // old versions of host functions should be called and test should be ok! - call_wasm_method::( - &WASM_BINARY_DEPRECATED[..], - "test_versionning_works", - ); + // We call to the new wasm binary with new host function. + call_wasm_method::(&WASM_BINARY[..], "test_versionning_works"); + + // we call to the old wasm binary with a new host functions + // old versions of host functions should be called and test should be ok! + call_wasm_method::(&WASM_BINARY_DEPRECATED[..], "test_versionning_works"); } diff --git a/primitives/runtime-interface/tests/ui.rs b/primitives/runtime-interface/tests/ui.rs index 910771f938..22aef0964b 100644 --- a/primitives/runtime-interface/tests/ui.rs +++ b/primitives/runtime-interface/tests/ui.rs @@ -19,9 +19,9 @@ use std::env; #[rustversion::attr(not(stable), ignore)] #[test] fn ui() { - // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); - let t = trybuild::TestCases::new(); - t.compile_fail("tests/ui/*.rs"); + let t = trybuild::TestCases::new(); + t.compile_fail("tests/ui/*.rs"); } diff --git a/primitives/runtime/src/curve.rs b/primitives/runtime/src/curve.rs index b00cbed652..41c18e2fcb 100644 --- a/primitives/runtime/src/curve.rs +++ b/primitives/runtime/src/curve.rs @@ -16,151 +16,183 @@ //! Provides some utilities to define a piecewise linear function. -use crate::{Perbill, traits::{AtLeast32Bit, SaturatedConversion}}; +use crate::{ + traits::{AtLeast32Bit, SaturatedConversion}, + Perbill, +}; use core::ops::Sub; /// Piecewise Linear function in [0, 1] -> [0, 1]. #[derive(PartialEq, Eq, sp_core::RuntimeDebug)] pub struct PiecewiseLinear<'a> { - /// Array of points. Must be in order from the lowest abscissas to the highest. - pub points: &'a [(Perbill, Perbill)], - /// The maximum value that can be returned. - pub maximum: Perbill, + /// Array of points. Must be in order from the lowest abscissas to the highest. + pub points: &'a [(Perbill, Perbill)], + /// The maximum value that can be returned. + pub maximum: Perbill, } -fn abs_sub + Clone>(a: N, b: N) -> N where { - a.clone().max(b.clone()) - a.min(b) +fn abs_sub + Clone>(a: N, b: N) -> N where { + a.clone().max(b.clone()) - a.min(b) } impl<'a> PiecewiseLinear<'a> { - /// Compute `f(n/d)*d` with `n <= d`. This is useful to avoid loss of precision. - pub fn calculate_for_fraction_times_denominator(&self, n: N, d: N) -> N where - N: AtLeast32Bit + Clone - { - let n = n.min(d.clone()); - - if self.points.len() == 0 { - return N::zero() - } - - let next_point_index = self.points.iter() - .position(|p| n < p.0 * d.clone()); - - let (prev, next) = if let Some(next_point_index) = next_point_index { - if let Some(previous_point_index) = next_point_index.checked_sub(1) { - (self.points[previous_point_index], self.points[next_point_index]) - } else { - // There is no previous points, take first point ordinate - return self.points.first().map(|p| p.1).unwrap_or_else(Perbill::zero) * d - } - } else { - // There is no next points, take last point ordinate - return self.points.last().map(|p| p.1).unwrap_or_else(Perbill::zero) * d - }; - - let delta_y = multiply_by_rational_saturating( - abs_sub(n.clone(), prev.0 * d.clone()), - abs_sub(next.1.deconstruct(), prev.1.deconstruct()), - // Must not saturate as prev abscissa > next abscissa - next.0.deconstruct().saturating_sub(prev.0.deconstruct()), - ); - - // If both subtractions are same sign then result is positive - if (n > prev.0 * d.clone()) == (next.1.deconstruct() > prev.1.deconstruct()) { - (prev.1 * d).saturating_add(delta_y) - // Otherwise result is negative - } else { - (prev.1 * d).saturating_sub(delta_y) - } - } + /// Compute `f(n/d)*d` with `n <= d`. This is useful to avoid loss of precision. + pub fn calculate_for_fraction_times_denominator(&self, n: N, d: N) -> N + where + N: AtLeast32Bit + Clone, + { + let n = n.min(d.clone()); + + if self.points.len() == 0 { + return N::zero(); + } + + let next_point_index = self.points.iter().position(|p| n < p.0 * d.clone()); + + let (prev, next) = if let Some(next_point_index) = next_point_index { + if let Some(previous_point_index) = next_point_index.checked_sub(1) { + ( + self.points[previous_point_index], + self.points[next_point_index], + ) + } else { + // There is no previous points, take first point ordinate + return self + .points + .first() + .map(|p| p.1) + .unwrap_or_else(Perbill::zero) + * d; + } + } else { + // There is no next points, take last point ordinate + return self + .points + .last() + .map(|p| p.1) + .unwrap_or_else(Perbill::zero) + * d; + }; + + let delta_y = multiply_by_rational_saturating( + abs_sub(n.clone(), prev.0 * d.clone()), + abs_sub(next.1.deconstruct(), prev.1.deconstruct()), + // Must not saturate as prev abscissa > next abscissa + next.0.deconstruct().saturating_sub(prev.0.deconstruct()), + ); + + // If both subtractions are same sign then result is positive + if (n > prev.0 * d.clone()) == (next.1.deconstruct() > prev.1.deconstruct()) { + (prev.1 * d).saturating_add(delta_y) + // Otherwise result is negative + } else { + (prev.1 * d).saturating_sub(delta_y) + } + } } // Compute value * p / q. // This is guaranteed not to overflow on whatever values nor lose precision. // `q` must be superior to zero. fn multiply_by_rational_saturating(value: N, p: u32, q: u32) -> N - where N: AtLeast32Bit + Clone +where + N: AtLeast32Bit + Clone, { - let q = q.max(1); + let q = q.max(1); - // Mul can saturate if p > q - let result_divisor_part = (value.clone() / q.into()).saturating_mul(p.into()); + // Mul can saturate if p > q + let result_divisor_part = (value.clone() / q.into()).saturating_mul(p.into()); - let result_remainder_part = { - let rem = value % q.into(); + let result_remainder_part = { + let rem = value % q.into(); - // Fits into u32 because q is u32 and remainder < q - let rem_u32 = rem.saturated_into::(); + // Fits into u32 because q is u32 and remainder < q + let rem_u32 = rem.saturated_into::(); - // Multiplication fits into u64 as both term are u32 - let rem_part = rem_u32 as u64 * p as u64 / q as u64; + // Multiplication fits into u64 as both term are u32 + let rem_part = rem_u32 as u64 * p as u64 / q as u64; - // Can saturate if p > q - rem_part.saturated_into::() - }; + // Can saturate if p > q + rem_part.saturated_into::() + }; - // Can saturate if p > q - result_divisor_part.saturating_add(result_remainder_part) + // Can saturate if p > q + result_divisor_part.saturating_add(result_remainder_part) } #[test] fn test_multiply_by_rational_saturating() { - use std::convert::TryInto; - - let div = 100u32; - for value in 0..=div { - for p in 0..=div { - for q in 1..=div { - let value: u64 = (value as u128 * u64::max_value() as u128 / div as u128) - .try_into().unwrap(); - let p = (p as u64 * u32::max_value() as u64 / div as u64) - .try_into().unwrap(); - let q = (q as u64 * u32::max_value() as u64 / div as u64) - .try_into().unwrap(); - - assert_eq!( - multiply_by_rational_saturating(value, p, q), - (value as u128 * p as u128 / q as u128) - .try_into().unwrap_or(u64::max_value()) - ); - } - } - } + use std::convert::TryInto; + + let div = 100u32; + for value in 0..=div { + for p in 0..=div { + for q in 1..=div { + let value: u64 = (value as u128 * u64::max_value() as u128 / div as u128) + .try_into() + .unwrap(); + let p = (p as u64 * u32::max_value() as u64 / div as u64) + .try_into() + .unwrap(); + let q = (q as u64 * u32::max_value() as u64 / div as u64) + .try_into() + .unwrap(); + + assert_eq!( + multiply_by_rational_saturating(value, p, q), + (value as u128 * p as u128 / q as u128) + .try_into() + .unwrap_or(u64::max_value()) + ); + } + } + } } #[test] fn test_calculate_for_fraction_times_denominator() { - use std::convert::TryInto; - - let curve = PiecewiseLinear { - points: &[ - (Perbill::from_parts(0_000_000_000), Perbill::from_parts(0_500_000_000)), - (Perbill::from_parts(0_500_000_000), Perbill::from_parts(1_000_000_000)), - (Perbill::from_parts(1_000_000_000), Perbill::from_parts(0_000_000_000)), - ], - maximum: Perbill::from_parts(1_000_000_000), - }; - - pub fn formal_calculate_for_fraction_times_denominator(n: u64, d: u64) -> u64 { - if n <= Perbill::from_parts(0_500_000_000) * d.clone() { - n + d / 2 - } else { - (d as u128 * 2 - n as u128 * 2).try_into().unwrap() - } - } - - let div = 100u32; - for d in 0..=div { - for n in 0..=d { - let d: u64 = (d as u128 * u64::max_value() as u128 / div as u128) - .try_into().unwrap(); - let n: u64 = (n as u128 * u64::max_value() as u128 / div as u128) - .try_into().unwrap(); - - let res = curve.calculate_for_fraction_times_denominator(n, d); - let expected = formal_calculate_for_fraction_times_denominator(n, d); - - assert!(abs_sub(res, expected) <= 1); - } - } + use std::convert::TryInto; + + let curve = PiecewiseLinear { + points: &[ + ( + Perbill::from_parts(0_000_000_000), + Perbill::from_parts(0_500_000_000), + ), + ( + Perbill::from_parts(0_500_000_000), + Perbill::from_parts(1_000_000_000), + ), + ( + Perbill::from_parts(1_000_000_000), + Perbill::from_parts(0_000_000_000), + ), + ], + maximum: Perbill::from_parts(1_000_000_000), + }; + + pub fn formal_calculate_for_fraction_times_denominator(n: u64, d: u64) -> u64 { + if n <= Perbill::from_parts(0_500_000_000) * d.clone() { + n + d / 2 + } else { + (d as u128 * 2 - n as u128 * 2).try_into().unwrap() + } + } + + let div = 100u32; + for d in 0..=div { + for n in 0..=d { + let d: u64 = (d as u128 * u64::max_value() as u128 / div as u128) + .try_into() + .unwrap(); + let n: u64 = (n as u128 * u64::max_value() as u128 / div as u128) + .try_into() + .unwrap(); + + let res = curve.calculate_for_fraction_times_denominator(n, d); + let expected = formal_calculate_for_fraction_times_denominator(n, d); + + assert!(abs_sub(res, expected) <= 1); + } + } } diff --git a/primitives/runtime/src/generic/block.rs b/primitives/runtime/src/generic/block.rs index fb07d6c215..234cf2ec47 100644 --- a/primitives/runtime/src/generic/block.rs +++ b/primitives/runtime/src/generic/block.rs @@ -22,14 +22,13 @@ use std::fmt; #[cfg(feature = "std")] use serde::{Deserialize, Serialize}; -use sp_std::prelude::*; -use sp_core::RuntimeDebug; -use crate::codec::{Codec, Encode, Decode}; +use crate::codec::{Codec, Decode, Encode}; use crate::traits::{ - self, Member, Block as BlockT, Header as HeaderT, MaybeSerialize, MaybeMallocSizeOf, - NumberFor, + self, Block as BlockT, Header as HeaderT, MaybeMallocSizeOf, MaybeSerialize, Member, NumberFor, }; use crate::Justification; +use sp_core::RuntimeDebug; +use sp_std::prelude::*; /// Something to identify a block. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] @@ -37,69 +36,72 @@ use crate::Justification; #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub enum BlockId { - /// Identify by block header hash. - Hash(Block::Hash), - /// Identify by block number. - Number(NumberFor), + /// Identify by block header hash. + Hash(Block::Hash), + /// Identify by block number. + Number(NumberFor), } impl BlockId { - /// Create a block ID from a hash. - pub fn hash(hash: Block::Hash) -> Self { - BlockId::Hash(hash) - } - - /// Create a block ID from a number. - pub fn number(number: NumberFor) -> Self { - BlockId::Number(number) - } + /// Create a block ID from a hash. + pub fn hash(hash: Block::Hash) -> Self { + BlockId::Hash(hash) + } + + /// Create a block ID from a number. + pub fn number(number: NumberFor) -> Self { + BlockId::Number(number) + } } impl Copy for BlockId {} #[cfg(feature = "std")] impl fmt::Display for BlockId { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{:?}", self) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "{:?}", self) + } } /// Abstraction over a substrate block. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf) +)] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub struct Block { - /// The block header. - pub header: Header, - /// The accompanying extrinsics. - pub extrinsics: Vec, + /// The block header. + pub header: Header, + /// The accompanying extrinsics. + pub extrinsics: Vec, } impl traits::Block for Block where - Header: HeaderT, - Extrinsic: Member + Codec + traits::Extrinsic + MaybeMallocSizeOf, + Header: HeaderT, + Extrinsic: Member + Codec + traits::Extrinsic + MaybeMallocSizeOf, { - type Extrinsic = Extrinsic; - type Header = Header; - type Hash = ::Hash; - - fn header(&self) -> &Self::Header { - &self.header - } - fn extrinsics(&self) -> &[Self::Extrinsic] { - &self.extrinsics[..] - } - fn deconstruct(self) -> (Self::Header, Vec) { - (self.header, self.extrinsics) - } - fn new(header: Self::Header, extrinsics: Vec) -> Self { - Block { header, extrinsics } - } - fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec { - (header, extrinsics).encode() - } + type Extrinsic = Extrinsic; + type Header = Header; + type Hash = ::Hash; + + fn header(&self) -> &Self::Header { + &self.header + } + fn extrinsics(&self) -> &[Self::Extrinsic] { + &self.extrinsics[..] + } + fn deconstruct(self) -> (Self::Header, Vec) { + (self.header, self.extrinsics) + } + fn new(header: Self::Header, extrinsics: Vec) -> Self { + Block { header, extrinsics } + } + fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec { + (header, extrinsics).encode() + } } /// Abstraction over a substrate block and justification. @@ -108,8 +110,8 @@ where #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub struct SignedBlock { - /// Full block. - pub block: Block, - /// Block justification. - pub justification: Option, + /// Full block. + pub block: Block, + /// Block justification. + pub justification: Option, } diff --git a/primitives/runtime/src/generic/checked_extrinsic.rs b/primitives/runtime/src/generic/checked_extrinsic.rs index a329f334c0..8775156cc0 100644 --- a/primitives/runtime/src/generic/checked_extrinsic.rs +++ b/primitives/runtime/src/generic/checked_extrinsic.rs @@ -18,70 +18,69 @@ //! stage. use crate::traits::{ - self, Member, MaybeDisplay, SignedExtension, Dispatchable, DispatchInfoOf, ValidateUnsigned, + self, DispatchInfoOf, Dispatchable, MaybeDisplay, Member, SignedExtension, ValidateUnsigned, }; -use crate::transaction_validity::{TransactionValidity, TransactionSource}; +use crate::transaction_validity::{TransactionSource, TransactionValidity}; /// Definition of something that the external world might want to say; its /// existence implies that it has been checked and is good, particularly with /// regards to the signature. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] pub struct CheckedExtrinsic { - /// Who this purports to be from and the number of extrinsics have come before - /// from the same signer, if anyone (note this is not a signature). - pub signed: Option<(AccountId, Extra)>, + /// Who this purports to be from and the number of extrinsics have come before + /// from the same signer, if anyone (note this is not a signature). + pub signed: Option<(AccountId, Extra)>, - /// The function that should be called. - pub function: Call, + /// The function that should be called. + pub function: Call, } -impl traits::Applyable for - CheckedExtrinsic +impl traits::Applyable for CheckedExtrinsic where - AccountId: Member + MaybeDisplay, - Call: Member + Dispatchable, - Extra: SignedExtension, - Origin: From>, + AccountId: Member + MaybeDisplay, + Call: Member + Dispatchable, + Extra: SignedExtension, + Origin: From>, { - type Call = Call; + type Call = Call; - fn validate>( - &self, - // TODO [#5006;ToDr] should source be passed to `SignedExtension`s? - // Perhaps a change for 2.0 to avoid breaking too much APIs? - source: TransactionSource, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - if let Some((ref id, ref extra)) = self.signed { - Extra::validate(extra, id, &self.function, info, len) - } else { - let valid = Extra::validate_unsigned(&self.function, info, len)?; - let unsigned_validation = U::validate_unsigned(source, &self.function)?; - Ok(valid.combine_with(unsigned_validation)) - } - } + fn validate>( + &self, + // TODO [#5006;ToDr] should source be passed to `SignedExtension`s? + // Perhaps a change for 2.0 to avoid breaking too much APIs? + source: TransactionSource, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + if let Some((ref id, ref extra)) = self.signed { + Extra::validate(extra, id, &self.function, info, len) + } else { + let valid = Extra::validate_unsigned(&self.function, info, len)?; + let unsigned_validation = U::validate_unsigned(source, &self.function)?; + Ok(valid.combine_with(unsigned_validation)) + } + } - fn apply>( - self, - info: &DispatchInfoOf, - len: usize, - ) -> crate::ApplyExtrinsicResult { - let (maybe_who, pre) = if let Some((id, extra)) = self.signed { - let pre = Extra::pre_dispatch(extra, &id, &self.function, info, len)?; - (Some(id), pre) - } else { - let pre = Extra::pre_dispatch_unsigned(&self.function, info, len)?; - U::pre_dispatch(&self.function)?; - (None, pre) - }; - let res = self.function.dispatch(Origin::from(maybe_who)); - let post_info = match res { - Ok(info) => info, - Err(err) => err.post_info, - }; - let res = res.map(|_| ()).map_err(|e| e.error); - Extra::post_dispatch(pre, info, &post_info, len, &res)?; - Ok(res) - } + fn apply>( + self, + info: &DispatchInfoOf, + len: usize, + ) -> crate::ApplyExtrinsicResult { + let (maybe_who, pre) = if let Some((id, extra)) = self.signed { + let pre = Extra::pre_dispatch(extra, &id, &self.function, info, len)?; + (Some(id), pre) + } else { + let pre = Extra::pre_dispatch_unsigned(&self.function, info, len)?; + U::pre_dispatch(&self.function)?; + (None, pre) + }; + let res = self.function.dispatch(Origin::from(maybe_who)); + let post_info = match res { + Ok(info) => info, + Err(err) => err.post_info, + }; + let res = res.map(|_| ()).map_err(|e| e.error); + Extra::post_dispatch(pre, info, &post_info, len, &res)?; + Ok(res) + } } diff --git a/primitives/runtime/src/generic/digest.rs b/primitives/runtime/src/generic/digest.rs index dad3e1fc26..b2c51ed23f 100644 --- a/primitives/runtime/src/generic/digest.rs +++ b/primitives/runtime/src/generic/digest.rs @@ -21,160 +21,166 @@ use serde::{Deserialize, Serialize}; use sp_std::prelude::*; +use crate::codec::{Decode, Encode, Error, Input}; use crate::ConsensusEngineId; -use crate::codec::{Decode, Encode, Input, Error}; use sp_core::{ChangesTrieConfiguration, RuntimeDebug}; /// Generic header digest. #[derive(PartialEq, Eq, Clone, Encode, Decode, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf))] +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, parity_util_mem::MallocSizeOf) +)] pub struct Digest { - /// A list of logs in the digest. - pub logs: Vec>, + /// A list of logs in the digest. + pub logs: Vec>, } impl Default for Digest { - fn default() -> Self { - Digest { logs: Vec::new(), } - } + fn default() -> Self { + Digest { logs: Vec::new() } + } } impl Digest { - /// Get reference to all digest items. - pub fn logs(&self) -> &[DigestItem] { - &self.logs - } - - /// Push new digest item. - pub fn push(&mut self, item: DigestItem) { - self.logs.push(item); - } - - /// Pop a digest item. - pub fn pop(&mut self) -> Option> { - self.logs.pop() - } - - /// Get reference to the first digest item that matches the passed predicate. - pub fn log) -> Option<&T>>(&self, predicate: F) -> Option<&T> { - self.logs().iter() - .filter_map(predicate) - .next() - } - - /// Get a conversion of the first digest item that successfully converts using the function. - pub fn convert_first) -> Option>(&self, predicate: F) -> Option { - self.logs().iter() - .filter_map(predicate) - .next() - } + /// Get reference to all digest items. + pub fn logs(&self) -> &[DigestItem] { + &self.logs + } + + /// Push new digest item. + pub fn push(&mut self, item: DigestItem) { + self.logs.push(item); + } + + /// Pop a digest item. + pub fn pop(&mut self) -> Option> { + self.logs.pop() + } + + /// Get reference to the first digest item that matches the passed predicate. + pub fn log) -> Option<&T>>( + &self, + predicate: F, + ) -> Option<&T> { + self.logs().iter().filter_map(predicate).next() + } + + /// Get a conversion of the first digest item that successfully converts using the function. + pub fn convert_first) -> Option>( + &self, + predicate: F, + ) -> Option { + self.logs().iter().filter_map(predicate).next() + } } - /// Digest item that is able to encode/decode 'system' digest items and /// provide opaque access to other items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] #[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] pub enum DigestItem { - /// System digest item that contains the root of changes trie at given - /// block. It is created for every block iff runtime supports changes - /// trie creation. - ChangesTrieRoot(Hash), - - /// A pre-runtime digest. - /// - /// These are messages from the consensus engine to the runtime, although - /// the consensus engine can (and should) read them itself to avoid - /// code and state duplication. It is erroneous for a runtime to produce - /// these, but this is not (yet) checked. - /// - /// NOTE: the runtime is not allowed to panic or fail in an `on_initialize` - /// call if an expected `PreRuntime` digest is not present. It is the - /// responsibility of a external block verifier to check this. Runtime API calls - /// will initialize the block without pre-runtime digests, so initialization - /// cannot fail when they are missing. - PreRuntime(ConsensusEngineId, Vec), - - /// A message from the runtime to the consensus engine. This should *never* - /// be generated by the native code of any consensus engine, but this is not - /// checked (yet). - Consensus(ConsensusEngineId, Vec), - - /// Put a Seal on it. This is only used by native code, and is never seen - /// by runtimes. - Seal(ConsensusEngineId, Vec), - - /// Digest item that contains signal from changes tries manager to the - /// native code. - ChangesTrieSignal(ChangesTrieSignal), - - /// Some other thing. Unsupported and experimental. - Other(Vec), + /// System digest item that contains the root of changes trie at given + /// block. It is created for every block iff runtime supports changes + /// trie creation. + ChangesTrieRoot(Hash), + + /// A pre-runtime digest. + /// + /// These are messages from the consensus engine to the runtime, although + /// the consensus engine can (and should) read them itself to avoid + /// code and state duplication. It is erroneous for a runtime to produce + /// these, but this is not (yet) checked. + /// + /// NOTE: the runtime is not allowed to panic or fail in an `on_initialize` + /// call if an expected `PreRuntime` digest is not present. It is the + /// responsibility of a external block verifier to check this. Runtime API calls + /// will initialize the block without pre-runtime digests, so initialization + /// cannot fail when they are missing. + PreRuntime(ConsensusEngineId, Vec), + + /// A message from the runtime to the consensus engine. This should *never* + /// be generated by the native code of any consensus engine, but this is not + /// checked (yet). + Consensus(ConsensusEngineId, Vec), + + /// Put a Seal on it. This is only used by native code, and is never seen + /// by runtimes. + Seal(ConsensusEngineId, Vec), + + /// Digest item that contains signal from changes tries manager to the + /// native code. + ChangesTrieSignal(ChangesTrieSignal), + + /// Some other thing. Unsupported and experimental. + Other(Vec), } /// Available changes trie signals. #[derive(PartialEq, Eq, Clone, Encode, Decode)] #[cfg_attr(feature = "std", derive(Debug, parity_util_mem::MallocSizeOf))] pub enum ChangesTrieSignal { - /// New changes trie configuration is enacted, starting from **next block**. - /// - /// The block that emits this signal will contain changes trie (CT) that covers - /// blocks range [BEGIN; current block], where BEGIN is (order matters): - /// - LAST_TOP_LEVEL_DIGEST_BLOCK+1 if top level digest CT has ever been created - /// using current configuration AND the last top level digest CT has been created - /// at block LAST_TOP_LEVEL_DIGEST_BLOCK; - /// - LAST_CONFIGURATION_CHANGE_BLOCK+1 if there has been CT configuration change - /// before and the last configuration change happened at block - /// LAST_CONFIGURATION_CHANGE_BLOCK; - /// - 1 otherwise. - NewConfiguration(Option), + /// New changes trie configuration is enacted, starting from **next block**. + /// + /// The block that emits this signal will contain changes trie (CT) that covers + /// blocks range [BEGIN; current block], where BEGIN is (order matters): + /// - LAST_TOP_LEVEL_DIGEST_BLOCK+1 if top level digest CT has ever been created + /// using current configuration AND the last top level digest CT has been created + /// at block LAST_TOP_LEVEL_DIGEST_BLOCK; + /// - LAST_CONFIGURATION_CHANGE_BLOCK+1 if there has been CT configuration change + /// before and the last configuration change happened at block + /// LAST_CONFIGURATION_CHANGE_BLOCK; + /// - 1 otherwise. + NewConfiguration(Option), } #[cfg(feature = "std")] impl serde::Serialize for DigestItem { - fn serialize(&self, seq: S) -> Result where S: serde::Serializer { - self.using_encoded(|bytes| { - sp_core::bytes::serialize(bytes, seq) - }) - } + fn serialize(&self, seq: S) -> Result + where + S: serde::Serializer, + { + self.using_encoded(|bytes| sp_core::bytes::serialize(bytes, seq)) + } } #[cfg(feature = "std")] impl<'a, Hash: Decode> serde::Deserialize<'a> for DigestItem { - fn deserialize(de: D) -> Result where - D: serde::Deserializer<'a>, - { - let r = sp_core::bytes::deserialize(de)?; - Decode::decode(&mut &r[..]) - .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) - } + fn deserialize(de: D) -> Result + where + D: serde::Deserializer<'a>, + { + let r = sp_core::bytes::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) + } } /// A 'referencing view' for digest item. Does not own its contents. Used by /// final runtime implementations for encoding/decoding its log items. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] pub enum DigestItemRef<'a, Hash: 'a> { - /// Reference to `DigestItem::ChangesTrieRoot`. - ChangesTrieRoot(&'a Hash), - /// A pre-runtime digest. - /// - /// These are messages from the consensus engine to the runtime, although - /// the consensus engine can (and should) read them itself to avoid - /// code and state duplication. It is erroneous for a runtime to produce - /// these, but this is not (yet) checked. - PreRuntime(&'a ConsensusEngineId, &'a Vec), - /// A message from the runtime to the consensus engine. This should *never* - /// be generated by the native code of any consensus engine, but this is not - /// checked (yet). - Consensus(&'a ConsensusEngineId, &'a Vec), - /// Put a Seal on it. This is only used by native code, and is never seen - /// by runtimes. - Seal(&'a ConsensusEngineId, &'a Vec), - /// Digest item that contains signal from changes tries manager to the - /// native code. - ChangesTrieSignal(&'a ChangesTrieSignal), - /// Any 'non-system' digest item, opaque to the native code. - Other(&'a Vec), + /// Reference to `DigestItem::ChangesTrieRoot`. + ChangesTrieRoot(&'a Hash), + /// A pre-runtime digest. + /// + /// These are messages from the consensus engine to the runtime, although + /// the consensus engine can (and should) read them itself to avoid + /// code and state duplication. It is erroneous for a runtime to produce + /// these, but this is not (yet) checked. + PreRuntime(&'a ConsensusEngineId, &'a Vec), + /// A message from the runtime to the consensus engine. This should *never* + /// be generated by the native code of any consensus engine, but this is not + /// checked (yet). + Consensus(&'a ConsensusEngineId, &'a Vec), + /// Put a Seal on it. This is only used by native code, and is never seen + /// by runtimes. + Seal(&'a ConsensusEngineId, &'a Vec), + /// Digest item that contains signal from changes tries manager to the + /// native code. + ChangesTrieSignal(&'a ChangesTrieSignal), + /// Any 'non-system' digest item, opaque to the native code. + Other(&'a Vec), } /// Type of the digest item. Used to gain explicit control over `DigestItem` encoding @@ -184,256 +190,264 @@ pub enum DigestItemRef<'a, Hash: 'a> { #[repr(u32)] #[derive(Encode, Decode)] pub enum DigestItemType { - Other = 0, - ChangesTrieRoot = 2, - Consensus = 4, - Seal = 5, - PreRuntime = 6, - ChangesTrieSignal = 7, + Other = 0, + ChangesTrieRoot = 2, + Consensus = 4, + Seal = 5, + PreRuntime = 6, + ChangesTrieSignal = 7, } /// Type of a digest item that contains raw data; this also names the consensus engine ID where /// applicable. Used to identify one or more digest items of interest. #[derive(Copy, Clone, Eq, PartialEq, Ord, PartialOrd)] pub enum OpaqueDigestItemId<'a> { - /// Type corresponding to DigestItem::PreRuntime. - PreRuntime(&'a ConsensusEngineId), - /// Type corresponding to DigestItem::Consensus. - Consensus(&'a ConsensusEngineId), - /// Type corresponding to DigestItem::Seal. - Seal(&'a ConsensusEngineId), - /// Some other (non-prescribed) type. - Other, + /// Type corresponding to DigestItem::PreRuntime. + PreRuntime(&'a ConsensusEngineId), + /// Type corresponding to DigestItem::Consensus. + Consensus(&'a ConsensusEngineId), + /// Type corresponding to DigestItem::Seal. + Seal(&'a ConsensusEngineId), + /// Some other (non-prescribed) type. + Other, } impl DigestItem { - /// Returns a 'referencing view' for this digest item. - pub fn dref<'a>(&'a self) -> DigestItemRef<'a, Hash> { - match *self { - DigestItem::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), - DigestItem::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), - DigestItem::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), - DigestItem::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), - DigestItem::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), - DigestItem::Other(ref v) => DigestItemRef::Other(v), - } - } - - /// Returns `Some` if the entry is the `ChangesTrieRoot` entry. - pub fn as_changes_trie_root(&self) -> Option<&Hash> { - self.dref().as_changes_trie_root() - } - - /// Returns `Some` if this entry is the `PreRuntime` entry. - pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &[u8])> { - self.dref().as_pre_runtime() - } - - /// Returns `Some` if this entry is the `Consensus` entry. - pub fn as_consensus(&self) -> Option<(ConsensusEngineId, &[u8])> { - self.dref().as_consensus() - } - - /// Returns `Some` if this entry is the `Seal` entry. - pub fn as_seal(&self) -> Option<(ConsensusEngineId, &[u8])> { - self.dref().as_seal() - } - - /// Returns `Some` if the entry is the `ChangesTrieSignal` entry. - pub fn as_changes_trie_signal(&self) -> Option<&ChangesTrieSignal> { - self.dref().as_changes_trie_signal() - } - - /// Returns Some if `self` is a `DigestItem::Other`. - pub fn as_other(&self) -> Option<&[u8]> { - match *self { - DigestItem::Other(ref v) => Some(&v[..]), - _ => None, - } - } - - /// Returns the opaque data contained in the item if `Some` if this entry has the id given. - pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&[u8]> { - self.dref().try_as_raw(id) - } - - /// Returns the data contained in the item if `Some` if this entry has the id given, decoded - /// to the type provided `T`. - pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { - self.dref().try_to::(id) - } + /// Returns a 'referencing view' for this digest item. + pub fn dref<'a>(&'a self) -> DigestItemRef<'a, Hash> { + match *self { + DigestItem::ChangesTrieRoot(ref v) => DigestItemRef::ChangesTrieRoot(v), + DigestItem::PreRuntime(ref v, ref s) => DigestItemRef::PreRuntime(v, s), + DigestItem::Consensus(ref v, ref s) => DigestItemRef::Consensus(v, s), + DigestItem::Seal(ref v, ref s) => DigestItemRef::Seal(v, s), + DigestItem::ChangesTrieSignal(ref s) => DigestItemRef::ChangesTrieSignal(s), + DigestItem::Other(ref v) => DigestItemRef::Other(v), + } + } + + /// Returns `Some` if the entry is the `ChangesTrieRoot` entry. + pub fn as_changes_trie_root(&self) -> Option<&Hash> { + self.dref().as_changes_trie_root() + } + + /// Returns `Some` if this entry is the `PreRuntime` entry. + pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &[u8])> { + self.dref().as_pre_runtime() + } + + /// Returns `Some` if this entry is the `Consensus` entry. + pub fn as_consensus(&self) -> Option<(ConsensusEngineId, &[u8])> { + self.dref().as_consensus() + } + + /// Returns `Some` if this entry is the `Seal` entry. + pub fn as_seal(&self) -> Option<(ConsensusEngineId, &[u8])> { + self.dref().as_seal() + } + + /// Returns `Some` if the entry is the `ChangesTrieSignal` entry. + pub fn as_changes_trie_signal(&self) -> Option<&ChangesTrieSignal> { + self.dref().as_changes_trie_signal() + } + + /// Returns Some if `self` is a `DigestItem::Other`. + pub fn as_other(&self) -> Option<&[u8]> { + match *self { + DigestItem::Other(ref v) => Some(&v[..]), + _ => None, + } + } + + /// Returns the opaque data contained in the item if `Some` if this entry has the id given. + pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&[u8]> { + self.dref().try_as_raw(id) + } + + /// Returns the data contained in the item if `Some` if this entry has the id given, decoded + /// to the type provided `T`. + pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { + self.dref().try_to::(id) + } } impl Encode for DigestItem { - fn encode(&self) -> Vec { - self.dref().encode() - } + fn encode(&self) -> Vec { + self.dref().encode() + } } impl codec::EncodeLike for DigestItem {} impl Decode for DigestItem { - #[allow(deprecated)] - fn decode(input: &mut I) -> Result { - let item_type: DigestItemType = Decode::decode(input)?; - match item_type { - DigestItemType::ChangesTrieRoot => Ok(DigestItem::ChangesTrieRoot( - Decode::decode(input)?, - )), - DigestItemType::PreRuntime => { - let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::PreRuntime(vals.0, vals.1)) - }, - DigestItemType::Consensus => { - let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::Consensus(vals.0, vals.1)) - } - DigestItemType::Seal => { - let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; - Ok(DigestItem::Seal(vals.0, vals.1)) - }, - DigestItemType::ChangesTrieSignal => Ok(DigestItem::ChangesTrieSignal( - Decode::decode(input)?, - )), - DigestItemType::Other => Ok(DigestItem::Other( - Decode::decode(input)?, - )), - } - } + #[allow(deprecated)] + fn decode(input: &mut I) -> Result { + let item_type: DigestItemType = Decode::decode(input)?; + match item_type { + DigestItemType::ChangesTrieRoot => { + Ok(DigestItem::ChangesTrieRoot(Decode::decode(input)?)) + } + DigestItemType::PreRuntime => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(DigestItem::PreRuntime(vals.0, vals.1)) + } + DigestItemType::Consensus => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(DigestItem::Consensus(vals.0, vals.1)) + } + DigestItemType::Seal => { + let vals: (ConsensusEngineId, Vec) = Decode::decode(input)?; + Ok(DigestItem::Seal(vals.0, vals.1)) + } + DigestItemType::ChangesTrieSignal => { + Ok(DigestItem::ChangesTrieSignal(Decode::decode(input)?)) + } + DigestItemType::Other => Ok(DigestItem::Other(Decode::decode(input)?)), + } + } } impl<'a, Hash> DigestItemRef<'a, Hash> { - /// Cast this digest item into `ChangesTrieRoot`. - pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { - match *self { - DigestItemRef::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), - _ => None, - } - } - - /// Cast this digest item into `PreRuntime` - pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &'a [u8])> { - match *self { - DigestItemRef::PreRuntime(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), - _ => None, - } - } - - /// Cast this digest item into `Consensus` - pub fn as_consensus(&self) -> Option<(ConsensusEngineId, &'a [u8])> { - match *self { - DigestItemRef::Consensus(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), - _ => None, - } - } - - /// Cast this digest item into `Seal` - pub fn as_seal(&self) -> Option<(ConsensusEngineId, &'a [u8])> { - match *self { - DigestItemRef::Seal(consensus_engine_id, ref data) => Some((*consensus_engine_id, data)), - _ => None, - } - } - - /// Cast this digest item into `ChangesTrieSignal`. - pub fn as_changes_trie_signal(&self) -> Option<&'a ChangesTrieSignal> { - match *self { - DigestItemRef::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), - _ => None, - } - } - - /// Cast this digest item into `PreRuntime` - pub fn as_other(&self) -> Option<&'a [u8]> { - match *self { - DigestItemRef::Other(ref data) => Some(data), - _ => None, - } - } - - /// Try to match this digest item to the given opaque item identifier; if it matches, then - /// return the opaque data it contains. - pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&'a [u8]> { - match (id, self) { - (OpaqueDigestItemId::Consensus(w), &DigestItemRef::Consensus(v, s)) | - (OpaqueDigestItemId::Seal(w), &DigestItemRef::Seal(v, s)) | - (OpaqueDigestItemId::PreRuntime(w), &DigestItemRef::PreRuntime(v, s)) - if v == w => Some(&s[..]), - (OpaqueDigestItemId::Other, &DigestItemRef::Other(s)) => Some(&s[..]), - _ => None, - } - } - - /// Try to match this digest item to the given opaque item identifier; if it matches, then - /// try to cast to the given data type; if that works, return it. - pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { - self.try_as_raw(id).and_then(|mut x| Decode::decode(&mut x).ok()) - } + /// Cast this digest item into `ChangesTrieRoot`. + pub fn as_changes_trie_root(&self) -> Option<&'a Hash> { + match *self { + DigestItemRef::ChangesTrieRoot(ref changes_trie_root) => Some(changes_trie_root), + _ => None, + } + } + + /// Cast this digest item into `PreRuntime` + pub fn as_pre_runtime(&self) -> Option<(ConsensusEngineId, &'a [u8])> { + match *self { + DigestItemRef::PreRuntime(consensus_engine_id, ref data) => { + Some((*consensus_engine_id, data)) + } + _ => None, + } + } + + /// Cast this digest item into `Consensus` + pub fn as_consensus(&self) -> Option<(ConsensusEngineId, &'a [u8])> { + match *self { + DigestItemRef::Consensus(consensus_engine_id, ref data) => { + Some((*consensus_engine_id, data)) + } + _ => None, + } + } + + /// Cast this digest item into `Seal` + pub fn as_seal(&self) -> Option<(ConsensusEngineId, &'a [u8])> { + match *self { + DigestItemRef::Seal(consensus_engine_id, ref data) => { + Some((*consensus_engine_id, data)) + } + _ => None, + } + } + + /// Cast this digest item into `ChangesTrieSignal`. + pub fn as_changes_trie_signal(&self) -> Option<&'a ChangesTrieSignal> { + match *self { + DigestItemRef::ChangesTrieSignal(ref changes_trie_signal) => Some(changes_trie_signal), + _ => None, + } + } + + /// Cast this digest item into `PreRuntime` + pub fn as_other(&self) -> Option<&'a [u8]> { + match *self { + DigestItemRef::Other(ref data) => Some(data), + _ => None, + } + } + + /// Try to match this digest item to the given opaque item identifier; if it matches, then + /// return the opaque data it contains. + pub fn try_as_raw(&self, id: OpaqueDigestItemId) -> Option<&'a [u8]> { + match (id, self) { + (OpaqueDigestItemId::Consensus(w), &DigestItemRef::Consensus(v, s)) + | (OpaqueDigestItemId::Seal(w), &DigestItemRef::Seal(v, s)) + | (OpaqueDigestItemId::PreRuntime(w), &DigestItemRef::PreRuntime(v, s)) + if v == w => + { + Some(&s[..]) + } + (OpaqueDigestItemId::Other, &DigestItemRef::Other(s)) => Some(&s[..]), + _ => None, + } + } + + /// Try to match this digest item to the given opaque item identifier; if it matches, then + /// try to cast to the given data type; if that works, return it. + pub fn try_to(&self, id: OpaqueDigestItemId) -> Option { + self.try_as_raw(id) + .and_then(|mut x| Decode::decode(&mut x).ok()) + } } impl<'a, Hash: Encode> Encode for DigestItemRef<'a, Hash> { - fn encode(&self) -> Vec { - let mut v = Vec::new(); - - match *self { - DigestItemRef::ChangesTrieRoot(changes_trie_root) => { - DigestItemType::ChangesTrieRoot.encode_to(&mut v); - changes_trie_root.encode_to(&mut v); - }, - DigestItemRef::Consensus(val, data) => { - DigestItemType::Consensus.encode_to(&mut v); - (val, data).encode_to(&mut v); - }, - DigestItemRef::Seal(val, sig) => { - DigestItemType::Seal.encode_to(&mut v); - (val, sig).encode_to(&mut v); - }, - DigestItemRef::PreRuntime(val, data) => { - DigestItemType::PreRuntime.encode_to(&mut v); - (val, data).encode_to(&mut v); - }, - DigestItemRef::ChangesTrieSignal(changes_trie_signal) => { - DigestItemType::ChangesTrieSignal.encode_to(&mut v); - changes_trie_signal.encode_to(&mut v); - }, - DigestItemRef::Other(val) => { - DigestItemType::Other.encode_to(&mut v); - val.encode_to(&mut v); - }, - } - - v - } + fn encode(&self) -> Vec { + let mut v = Vec::new(); + + match *self { + DigestItemRef::ChangesTrieRoot(changes_trie_root) => { + DigestItemType::ChangesTrieRoot.encode_to(&mut v); + changes_trie_root.encode_to(&mut v); + } + DigestItemRef::Consensus(val, data) => { + DigestItemType::Consensus.encode_to(&mut v); + (val, data).encode_to(&mut v); + } + DigestItemRef::Seal(val, sig) => { + DigestItemType::Seal.encode_to(&mut v); + (val, sig).encode_to(&mut v); + } + DigestItemRef::PreRuntime(val, data) => { + DigestItemType::PreRuntime.encode_to(&mut v); + (val, data).encode_to(&mut v); + } + DigestItemRef::ChangesTrieSignal(changes_trie_signal) => { + DigestItemType::ChangesTrieSignal.encode_to(&mut v); + changes_trie_signal.encode_to(&mut v); + } + DigestItemRef::Other(val) => { + DigestItemType::Other.encode_to(&mut v); + val.encode_to(&mut v); + } + } + + v + } } impl ChangesTrieSignal { - /// Try to cast this signal to NewConfiguration. - pub fn as_new_configuration(&self) -> Option<&Option> { - match self { - ChangesTrieSignal::NewConfiguration(config) => Some(config), - } - } + /// Try to cast this signal to NewConfiguration. + pub fn as_new_configuration(&self) -> Option<&Option> { + match self { + ChangesTrieSignal::NewConfiguration(config) => Some(config), + } + } } impl<'a, Hash: Encode> codec::EncodeLike for DigestItemRef<'a, Hash> {} #[cfg(test)] mod tests { - use super::*; - - #[test] - fn should_serialize_digest() { - let digest = Digest { - logs: vec![ - DigestItem::ChangesTrieRoot(4), - DigestItem::Other(vec![1, 2, 3]), - DigestItem::Seal(*b"test", vec![1, 2, 3]) - ], - }; - - assert_eq!( - ::serde_json::to_string(&digest).unwrap(), - r#"{"logs":["0x0204000000","0x000c010203","0x05746573740c010203"]}"# - ); - } + use super::*; + + #[test] + fn should_serialize_digest() { + let digest = Digest { + logs: vec![ + DigestItem::ChangesTrieRoot(4), + DigestItem::Other(vec![1, 2, 3]), + DigestItem::Seal(*b"test", vec![1, 2, 3]), + ], + }; + + assert_eq!( + ::serde_json::to_string(&digest).unwrap(), + r#"{"logs":["0x0204000000","0x000c010203","0x05746573740c010203"]}"# + ); + } } diff --git a/primitives/runtime/src/generic/era.rs b/primitives/runtime/src/generic/era.rs index 37b4b495fe..f8a97fac62 100644 --- a/primitives/runtime/src/generic/era.rs +++ b/primitives/runtime/src/generic/era.rs @@ -17,9 +17,9 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; -use crate::codec::{Decode, Encode, Input, Output, Error}; +use crate::codec::{Decode, Encode, Error, Input, Output}; /// Era period pub type Period = u64; @@ -31,19 +31,19 @@ pub type Phase = u64; #[derive(PartialEq, Eq, Clone, Copy, sp_core::RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum Era { - /// The transaction is valid forever. The genesis hash must be present in the signed content. - Immortal, - - /// Period and phase are encoded: - /// - The period of validity from the block hash found in the signing material. - /// - The phase in the period that this transaction's lifetime begins (and, importantly, - /// implies which block hash is included in the signature material). If the `period` is - /// greater than 1 << 12, then it will be a factor of the times greater than 1<<12 that - /// `period` is. - /// - /// When used on `FRAME`-based runtimes, `period` cannot exceed `BlockHashCount` parameter - /// of `system` module. - Mortal(Period, Phase), + /// The transaction is valid forever. The genesis hash must be present in the signed content. + Immortal, + + /// Period and phase are encoded: + /// - The period of validity from the block hash found in the signing material. + /// - The phase in the period that this transaction's lifetime begins (and, importantly, + /// implies which block hash is included in the signature material). If the `period` is + /// greater than 1 << 12, then it will be a factor of the times greater than 1<<12 that + /// `period` is. + /// + /// When used on `FRAME`-based runtimes, `period` cannot exceed `BlockHashCount` parameter + /// of `system` module. + Mortal(Period, Phase), } /* @@ -56,158 +56,163 @@ pub enum Era { * n = Q(current - phase, period) + phase */ impl Era { - /// Create a new era based on a period (which should be a power of two between 4 and 65536 inclusive) - /// and a block number on which it should start (or, for long periods, be shortly after the start). - /// - /// If using `Era` in the context of `FRAME` runtime, make sure that `period` - /// does not exceed `BlockHashCount` parameter passed to `system` module, since that - /// prunes old blocks and renders transactions immediately invalid. - pub fn mortal(period: u64, current: u64) -> Self { - let period = period.checked_next_power_of_two() - .unwrap_or(1 << 16) - .max(4) - .min(1 << 16); - let phase = current % period; - let quantize_factor = (period >> 12).max(1); - let quantized_phase = phase / quantize_factor * quantize_factor; - - Era::Mortal(period, quantized_phase) - } - - /// Create an "immortal" transaction. - pub fn immortal() -> Self { - Era::Immortal - } - - /// `true` if this is an immortal transaction. - pub fn is_immortal(&self) -> bool { - match self { - Era::Immortal => true, - _ => false, - } - } - - /// Get the block number of the start of the era whose properties this object - /// describes that `current` belongs to. - pub fn birth(self, current: u64) -> u64 { - match self { - Era::Immortal => 0, - Era::Mortal(period, phase) => (current.max(phase) - phase) / period * period + phase, - } - } - - /// Get the block number of the first block at which the era has ended. - pub fn death(self, current: u64) -> u64 { - match self { - Era::Immortal => u64::max_value(), - Era::Mortal(period, _) => self.birth(current) + period, - } - } + /// Create a new era based on a period (which should be a power of two between 4 and 65536 inclusive) + /// and a block number on which it should start (or, for long periods, be shortly after the start). + /// + /// If using `Era` in the context of `FRAME` runtime, make sure that `period` + /// does not exceed `BlockHashCount` parameter passed to `system` module, since that + /// prunes old blocks and renders transactions immediately invalid. + pub fn mortal(period: u64, current: u64) -> Self { + let period = period + .checked_next_power_of_two() + .unwrap_or(1 << 16) + .max(4) + .min(1 << 16); + let phase = current % period; + let quantize_factor = (period >> 12).max(1); + let quantized_phase = phase / quantize_factor * quantize_factor; + + Era::Mortal(period, quantized_phase) + } + + /// Create an "immortal" transaction. + pub fn immortal() -> Self { + Era::Immortal + } + + /// `true` if this is an immortal transaction. + pub fn is_immortal(&self) -> bool { + match self { + Era::Immortal => true, + _ => false, + } + } + + /// Get the block number of the start of the era whose properties this object + /// describes that `current` belongs to. + pub fn birth(self, current: u64) -> u64 { + match self { + Era::Immortal => 0, + Era::Mortal(period, phase) => (current.max(phase) - phase) / period * period + phase, + } + } + + /// Get the block number of the first block at which the era has ended. + pub fn death(self, current: u64) -> u64 { + match self { + Era::Immortal => u64::max_value(), + Era::Mortal(period, _) => self.birth(current) + period, + } + } } impl Encode for Era { - fn encode_to(&self, output: &mut T) { - match self { - Era::Immortal => output.push_byte(0), - Era::Mortal(period, phase) => { - let quantize_factor = (*period as u64 >> 12).max(1); - let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 | ((phase / quantize_factor) << 4) as u16; - output.push(&encoded); - } - } - } + fn encode_to(&self, output: &mut T) { + match self { + Era::Immortal => output.push_byte(0), + Era::Mortal(period, phase) => { + let quantize_factor = (*period as u64 >> 12).max(1); + let encoded = (period.trailing_zeros() - 1).max(1).min(15) as u16 + | ((phase / quantize_factor) << 4) as u16; + output.push(&encoded); + } + } + } } impl codec::EncodeLike for Era {} impl Decode for Era { - fn decode(input: &mut I) -> Result { - let first = input.read_byte()?; - if first == 0 { - Ok(Era::Immortal) - } else { - let encoded = first as u64 + ((input.read_byte()? as u64) << 8); - let period = 2 << (encoded % (1 << 4)); - let quantize_factor = (period >> 12).max(1); - let phase = (encoded >> 4) * quantize_factor; - if period >= 4 && phase < period { - Ok(Era::Mortal(period, phase)) - } else { - Err("Invalid period and phase".into()) - } - } - } + fn decode(input: &mut I) -> Result { + let first = input.read_byte()?; + if first == 0 { + Ok(Era::Immortal) + } else { + let encoded = first as u64 + ((input.read_byte()? as u64) << 8); + let period = 2 << (encoded % (1 << 4)); + let quantize_factor = (period >> 12).max(1); + let phase = (encoded >> 4) * quantize_factor; + if period >= 4 && phase < period { + Ok(Era::Mortal(period, phase)) + } else { + Err("Invalid period and phase".into()) + } + } + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn immortal_works() { - let e = Era::immortal(); - assert_eq!(e.birth(0), 0); - assert_eq!(e.death(0), u64::max_value()); - assert_eq!(e.birth(1), 0); - assert_eq!(e.death(1), u64::max_value()); - assert_eq!(e.birth(u64::max_value()), 0); - assert_eq!(e.death(u64::max_value()), u64::max_value()); - assert!(e.is_immortal()); - - assert_eq!(e.encode(), vec![0u8]); - assert_eq!(e, Era::decode(&mut&[0u8][..]).unwrap()); - } - - #[test] - fn mortal_codec_works() { - let e = Era::mortal(64, 42); - assert!(!e.is_immortal()); - - let expected = vec![5 + 42 % 16 * 16, 42 / 16]; - assert_eq!(e.encode(), expected); - assert_eq!(e, Era::decode(&mut&expected[..]).unwrap()); - } - - #[test] - fn long_period_mortal_codec_works() { - let e = Era::mortal(32768, 20000); - - let expected = vec![(14 + 2500 % 16 * 16) as u8, (2500 / 16) as u8]; - assert_eq!(e.encode(), expected); - assert_eq!(e, Era::decode(&mut&expected[..]).unwrap()); - } - - #[test] - fn era_initialization_works() { - assert_eq!(Era::mortal(64, 42), Era::Mortal(64, 42)); - assert_eq!(Era::mortal(32768, 20000), Era::Mortal(32768, 20000)); - assert_eq!(Era::mortal(200, 513), Era::Mortal(256, 1)); - assert_eq!(Era::mortal(2, 1), Era::Mortal(4, 1)); - assert_eq!(Era::mortal(4, 5), Era::Mortal(4, 1)); - } - - #[test] - fn quantized_clamped_era_initialization_works() { - // clamp 1000000 to 65536, quantize 1000001 % 65536 to the nearest 4 - assert_eq!(Era::mortal(1000000, 1000001), Era::Mortal(65536, 1000001 % 65536 / 4 * 4)); - } - - #[test] - fn mortal_birth_death_works() { - let e = Era::mortal(4, 6); - for i in 6..10 { - assert_eq!(e.birth(i), 6); - assert_eq!(e.death(i), 10); - } - - // wrong because it's outside of the (current...current + period) range - assert_ne!(e.birth(10), 6); - assert_ne!(e.birth(5), 6); - } - - #[test] - fn current_less_than_phase() { - // should not panic - Era::mortal(4, 3).birth(1); - } + use super::*; + + #[test] + fn immortal_works() { + let e = Era::immortal(); + assert_eq!(e.birth(0), 0); + assert_eq!(e.death(0), u64::max_value()); + assert_eq!(e.birth(1), 0); + assert_eq!(e.death(1), u64::max_value()); + assert_eq!(e.birth(u64::max_value()), 0); + assert_eq!(e.death(u64::max_value()), u64::max_value()); + assert!(e.is_immortal()); + + assert_eq!(e.encode(), vec![0u8]); + assert_eq!(e, Era::decode(&mut &[0u8][..]).unwrap()); + } + + #[test] + fn mortal_codec_works() { + let e = Era::mortal(64, 42); + assert!(!e.is_immortal()); + + let expected = vec![5 + 42 % 16 * 16, 42 / 16]; + assert_eq!(e.encode(), expected); + assert_eq!(e, Era::decode(&mut &expected[..]).unwrap()); + } + + #[test] + fn long_period_mortal_codec_works() { + let e = Era::mortal(32768, 20000); + + let expected = vec![(14 + 2500 % 16 * 16) as u8, (2500 / 16) as u8]; + assert_eq!(e.encode(), expected); + assert_eq!(e, Era::decode(&mut &expected[..]).unwrap()); + } + + #[test] + fn era_initialization_works() { + assert_eq!(Era::mortal(64, 42), Era::Mortal(64, 42)); + assert_eq!(Era::mortal(32768, 20000), Era::Mortal(32768, 20000)); + assert_eq!(Era::mortal(200, 513), Era::Mortal(256, 1)); + assert_eq!(Era::mortal(2, 1), Era::Mortal(4, 1)); + assert_eq!(Era::mortal(4, 5), Era::Mortal(4, 1)); + } + + #[test] + fn quantized_clamped_era_initialization_works() { + // clamp 1000000 to 65536, quantize 1000001 % 65536 to the nearest 4 + assert_eq!( + Era::mortal(1000000, 1000001), + Era::Mortal(65536, 1000001 % 65536 / 4 * 4) + ); + } + + #[test] + fn mortal_birth_death_works() { + let e = Era::mortal(4, 6); + for i in 6..10 { + assert_eq!(e.birth(i), 6); + assert_eq!(e.death(i), 10); + } + + // wrong because it's outside of the (current...current + period) range + assert_ne!(e.birth(10), 6); + assert_ne!(e.birth(5), 6); + } + + #[test] + fn current_less_than_phase() { + // should not panic + Era::mortal(4, 3).birth(1); + } } diff --git a/primitives/runtime/src/generic/header.rs b/primitives/runtime/src/generic/header.rs index 5efb36603d..f0ac169256 100644 --- a/primitives/runtime/src/generic/header.rs +++ b/primitives/runtime/src/generic/header.rs @@ -16,20 +16,16 @@ //! Generic implementation of a block header. -#[cfg(feature = "std")] -use serde::{Deserialize, Serialize}; -use crate::codec::{Decode, Encode, Codec, Input, Output, HasCompact, EncodeAsRef, Error}; +use crate::codec::{Codec, Decode, Encode, EncodeAsRef, Error, HasCompact, Input, Output}; +use crate::generic::Digest; use crate::traits::{ - self, Member, AtLeast32Bit, SimpleBitOps, Hash as HashT, - MaybeSerializeDeserialize, MaybeSerialize, MaybeDisplay, - MaybeMallocSizeOf, + self, AtLeast32Bit, Hash as HashT, MaybeDisplay, MaybeMallocSizeOf, MaybeSerialize, + MaybeSerializeDeserialize, Member, SimpleBitOps, }; -use crate::generic::Digest; +#[cfg(feature = "std")] +use serde::{Deserialize, Serialize}; use sp_core::U256; -use sp_std::{ - convert::TryFrom, - fmt::Debug, -}; +use sp_std::{convert::TryFrom, fmt::Debug}; /// Abstraction over a block header for a substrate chain. #[derive(PartialEq, Eq, Clone, sp_core::RuntimeDebug)] @@ -37,181 +33,252 @@ use sp_std::{ #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[cfg_attr(feature = "std", serde(deny_unknown_fields))] pub struct Header + TryFrom, Hash: HashT> { - /// The parent hash. - pub parent_hash: Hash::Output, - /// The block number. - #[cfg_attr(feature = "std", serde( - serialize_with = "serialize_number", - deserialize_with = "deserialize_number"))] - pub number: Number, - /// The state trie merkle root - pub state_root: Hash::Output, - /// The merkle root of the extrinsics. - pub extrinsics_root: Hash::Output, - /// A chain-specific digest of data useful for light clients or referencing auxiliary data. - pub digest: Digest, + /// The parent hash. + pub parent_hash: Hash::Output, + /// The block number. + #[cfg_attr( + feature = "std", + serde( + serialize_with = "serialize_number", + deserialize_with = "deserialize_number" + ) + )] + pub number: Number, + /// The state trie merkle root + pub state_root: Hash::Output, + /// The merkle root of the extrinsics. + pub extrinsics_root: Hash::Output, + /// A chain-specific digest of data useful for light clients or referencing auxiliary data. + pub digest: Digest, } #[cfg(feature = "std")] impl parity_util_mem::MallocSizeOf for Header where - Number: Copy + Into + TryFrom + parity_util_mem::MallocSizeOf, - Hash: HashT, - Hash::Output: parity_util_mem::MallocSizeOf, + Number: Copy + Into + TryFrom + parity_util_mem::MallocSizeOf, + Hash: HashT, + Hash::Output: parity_util_mem::MallocSizeOf, { - fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - self.parent_hash.size_of(ops) + - self.number.size_of(ops) + - self.state_root.size_of(ops) + - self.extrinsics_root.size_of(ops) + - self.digest.size_of(ops) - } + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + self.parent_hash.size_of(ops) + + self.number.size_of(ops) + + self.state_root.size_of(ops) + + self.extrinsics_root.size_of(ops) + + self.digest.size_of(ops) + } } #[cfg(feature = "std")] pub fn serialize_number + TryFrom>( - val: &T, s: S, -) -> Result where S: serde::Serializer { - let u256: U256 = (*val).into(); - serde::Serialize::serialize(&u256, s) + val: &T, + s: S, +) -> Result +where + S: serde::Serializer, +{ + let u256: U256 = (*val).into(); + serde::Serialize::serialize(&u256, s) } #[cfg(feature = "std")] -pub fn deserialize_number<'a, D, T: Copy + Into + TryFrom>( - d: D, -) -> Result where D: serde::Deserializer<'a> { - let u256: U256 = serde::Deserialize::deserialize(d)?; - TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) +pub fn deserialize_number<'a, D, T: Copy + Into + TryFrom>(d: D) -> Result +where + D: serde::Deserializer<'a>, +{ + let u256: U256 = serde::Deserialize::deserialize(d)?; + TryFrom::try_from(u256).map_err(|_| serde::de::Error::custom("Try from failed")) } -impl Decode for Header where - Number: HasCompact + Copy + Into + TryFrom, - Hash: HashT, - Hash::Output: Decode, +impl Decode for Header +where + Number: HasCompact + Copy + Into + TryFrom, + Hash: HashT, + Hash::Output: Decode, { - fn decode(input: &mut I) -> Result { - Ok(Header { - parent_hash: Decode::decode(input)?, - number: <::Type>::decode(input)?.into(), - state_root: Decode::decode(input)?, - extrinsics_root: Decode::decode(input)?, - digest: Decode::decode(input)?, - }) - } + fn decode(input: &mut I) -> Result { + Ok(Header { + parent_hash: Decode::decode(input)?, + number: <::Type>::decode(input)?.into(), + state_root: Decode::decode(input)?, + extrinsics_root: Decode::decode(input)?, + digest: Decode::decode(input)?, + }) + } } -impl Encode for Header where - Number: HasCompact + Copy + Into + TryFrom, - Hash: HashT, - Hash::Output: Encode, +impl Encode for Header +where + Number: HasCompact + Copy + Into + TryFrom, + Hash: HashT, + Hash::Output: Encode, { - fn encode_to(&self, dest: &mut T) { - dest.push(&self.parent_hash); - dest.push(&<<::Type as EncodeAsRef<_>>::RefType>::from(&self.number)); - dest.push(&self.state_root); - dest.push(&self.extrinsics_root); - dest.push(&self.digest); - } + fn encode_to(&self, dest: &mut T) { + dest.push(&self.parent_hash); + dest.push(&<<::Type as EncodeAsRef<_>>::RefType>::from(&self.number)); + dest.push(&self.state_root); + dest.push(&self.extrinsics_root); + dest.push(&self.digest); + } } -impl codec::EncodeLike for Header where - Number: HasCompact + Copy + Into + TryFrom, - Hash: HashT, - Hash::Output: Encode, -{} - -impl traits::Header for Header where - Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + MaybeDisplay + - AtLeast32Bit + Codec + Copy + Into + TryFrom + sp_std::str::FromStr + - MaybeMallocSizeOf, - Hash: HashT, - Hash::Output: Default + sp_std::hash::Hash + Copy + Member + Ord + - MaybeSerialize + Debug + MaybeDisplay + SimpleBitOps + Codec + MaybeMallocSizeOf, +impl codec::EncodeLike for Header +where + Number: HasCompact + Copy + Into + TryFrom, + Hash: HashT, + Hash::Output: Encode, { - type Number = Number; - type Hash = ::Output; - type Hashing = Hash; - - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } - - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } - - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } - - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } - - fn digest(&self) -> &Digest { &self.digest } - - fn digest_mut(&mut self) -> &mut Digest { - #[cfg(feature = "std")] - log::debug!(target: "header", "Retrieving mutable reference to digest"); - &mut self.digest - } - - fn new( - number: Self::Number, - extrinsics_root: Self::Hash, - state_root: Self::Hash, - parent_hash: Self::Hash, - digest: Digest, - ) -> Self { - Header { - number, - extrinsics_root, - state_root, - parent_hash, - digest, - } - } } -impl Header where - Number: Member + sp_std::hash::Hash + Copy + MaybeDisplay + AtLeast32Bit + Codec + Into + TryFrom, - Hash: HashT, - Hash::Output: Default + sp_std::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, - { - /// Convenience helper for computing the hash of the header without having - /// to import the trait. - pub fn hash(&self) -> Hash::Output { - Hash::hash_of(self) - } +impl traits::Header for Header +where + Number: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + MaybeDisplay + + AtLeast32Bit + + Codec + + Copy + + Into + + TryFrom + + sp_std::str::FromStr + + MaybeMallocSizeOf, + Hash: HashT, + Hash::Output: Default + + sp_std::hash::Hash + + Copy + + Member + + Ord + + MaybeSerialize + + Debug + + MaybeDisplay + + SimpleBitOps + + Codec + + MaybeMallocSizeOf, +{ + type Number = Number; + type Hash = ::Output; + type Hashing = Hash; + + fn number(&self) -> &Self::Number { + &self.number + } + fn set_number(&mut self, num: Self::Number) { + self.number = num + } + + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } + + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } + + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } + + fn digest(&self) -> &Digest { + &self.digest + } + + fn digest_mut(&mut self) -> &mut Digest { + #[cfg(feature = "std")] + log::debug!(target: "header", "Retrieving mutable reference to digest"); + &mut self.digest + } + + fn new( + number: Self::Number, + extrinsics_root: Self::Hash, + state_root: Self::Hash, + parent_hash: Self::Hash, + digest: Digest, + ) -> Self { + Header { + number, + extrinsics_root, + state_root, + parent_hash, + digest, + } + } +} + +impl Header +where + Number: Member + + sp_std::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32Bit + + Codec + + Into + + TryFrom, + Hash: HashT, + Hash::Output: + Default + sp_std::hash::Hash + Copy + Member + MaybeDisplay + SimpleBitOps + Codec, +{ + /// Convenience helper for computing the hash of the header without having + /// to import the trait. + pub fn hash(&self) -> Hash::Output { + Hash::hash_of(self) + } } #[cfg(all(test, feature = "std"))] mod tests { - use super::*; - - #[test] - fn should_serialize_numbers() { - fn serialize(num: u128) -> String { - let mut v = vec![]; - { - let mut ser = serde_json::Serializer::new(std::io::Cursor::new(&mut v)); - serialize_number(&num, &mut ser).unwrap(); - } - String::from_utf8(v).unwrap() - } - - assert_eq!(serialize(0), "\"0x0\"".to_owned()); - assert_eq!(serialize(1), "\"0x1\"".to_owned()); - assert_eq!(serialize(u64::max_value() as u128), "\"0xffffffffffffffff\"".to_owned()); - assert_eq!(serialize(u64::max_value() as u128 + 1), "\"0x10000000000000000\"".to_owned()); - } - - #[test] - fn should_deserialize_number() { - fn deserialize(num: &str) -> u128 { - let mut der = serde_json::Deserializer::new(serde_json::de::StrRead::new(num)); - deserialize_number(&mut der).unwrap() - } - - assert_eq!(deserialize("\"0x0\""), 0); - assert_eq!(deserialize("\"0x1\""), 1); - assert_eq!(deserialize("\"0xffffffffffffffff\""), u64::max_value() as u128); - assert_eq!(deserialize("\"0x10000000000000000\""), u64::max_value() as u128 + 1); - } + use super::*; + + #[test] + fn should_serialize_numbers() { + fn serialize(num: u128) -> String { + let mut v = vec![]; + { + let mut ser = serde_json::Serializer::new(std::io::Cursor::new(&mut v)); + serialize_number(&num, &mut ser).unwrap(); + } + String::from_utf8(v).unwrap() + } + + assert_eq!(serialize(0), "\"0x0\"".to_owned()); + assert_eq!(serialize(1), "\"0x1\"".to_owned()); + assert_eq!( + serialize(u64::max_value() as u128), + "\"0xffffffffffffffff\"".to_owned() + ); + assert_eq!( + serialize(u64::max_value() as u128 + 1), + "\"0x10000000000000000\"".to_owned() + ); + } + + #[test] + fn should_deserialize_number() { + fn deserialize(num: &str) -> u128 { + let mut der = serde_json::Deserializer::new(serde_json::de::StrRead::new(num)); + deserialize_number(&mut der).unwrap() + } + + assert_eq!(deserialize("\"0x0\""), 0); + assert_eq!(deserialize("\"0x1\""), 1); + assert_eq!( + deserialize("\"0xffffffffffffffff\""), + u64::max_value() as u128 + ); + assert_eq!( + deserialize("\"0x10000000000000000\""), + u64::max_value() as u128 + 1 + ); + } } diff --git a/primitives/runtime/src/generic/mod.rs b/primitives/runtime/src/generic/mod.rs index 5e9928ba19..2c71539c62 100644 --- a/primitives/runtime/src/generic/mod.rs +++ b/primitives/runtime/src/generic/mod.rs @@ -18,45 +18,43 @@ //! Generic implementations of Extrinsic/Header/Block. // end::description[] -mod unchecked_extrinsic; -mod era; -mod checked_extrinsic; -mod header; mod block; +mod checked_extrinsic; mod digest; +mod era; +mod header; #[cfg(test)] mod tests; +mod unchecked_extrinsic; -pub use self::unchecked_extrinsic::{UncheckedExtrinsic, SignedPayload}; -pub use self::era::{Era, Phase}; +pub use self::block::{Block, BlockId, SignedBlock}; pub use self::checked_extrinsic::CheckedExtrinsic; +pub use self::digest::{ChangesTrieSignal, Digest, DigestItem, DigestItemRef, OpaqueDigestItemId}; +pub use self::era::{Era, Phase}; pub use self::header::Header; -pub use self::block::{Block, SignedBlock, BlockId}; -pub use self::digest::{ - Digest, DigestItem, DigestItemRef, OpaqueDigestItemId, ChangesTrieSignal, -}; +pub use self::unchecked_extrinsic::{SignedPayload, UncheckedExtrinsic}; use crate::codec::Encode; use sp_std::prelude::*; fn encode_with_vec_prefix)>(encoder: F) -> Vec { - let size = ::sp_std::mem::size_of::(); - let reserve = match size { - 0..=0b00111111 => 1, - 0..=0b00111111_11111111 => 2, - _ => 4, - }; - let mut v = Vec::with_capacity(reserve + size); - v.resize(reserve, 0); - encoder(&mut v); - - // need to prefix with the total length to ensure it's binary compatible with - // Vec. - let mut length: Vec<()> = Vec::new(); - length.resize(v.len() - reserve, ()); - length.using_encoded(|s| { - v.splice(0..reserve, s.iter().cloned()); - }); - - v + let size = ::sp_std::mem::size_of::(); + let reserve = match size { + 0..=0b00111111 => 1, + 0..=0b00111111_11111111 => 2, + _ => 4, + }; + let mut v = Vec::with_capacity(reserve + size); + v.resize(reserve, 0); + encoder(&mut v); + + // need to prefix with the total length to ensure it's binary compatible with + // Vec. + let mut length: Vec<()> = Vec::new(); + length.resize(v.len() - reserve, ()); + length.using_encoded(|s| { + v.splice(0..reserve, s.iter().cloned()); + }); + + v } diff --git a/primitives/runtime/src/generic/tests.rs b/primitives/runtime/src/generic/tests.rs index de2f4a19d9..017b58ff1a 100644 --- a/primitives/runtime/src/generic/tests.rs +++ b/primitives/runtime/src/generic/tests.rs @@ -16,45 +16,42 @@ //! Tests for the generic implementations of Extrinsic/Header/Block. +use super::DigestItem; use crate::codec::{Decode, Encode}; use sp_core::H256; -use super::DigestItem; #[test] fn system_digest_item_encoding() { - let item = DigestItem::ChangesTrieRoot::(H256::default()); - let encoded = item.encode(); - assert_eq!(encoded, vec![ - // type = DigestItemType::ChangesTrieRoot - 2, - // trie root - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - 0, 0, 0, 0, - ]); - - let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); - assert_eq!(item, decoded); + let item = DigestItem::ChangesTrieRoot::(H256::default()); + let encoded = item.encode(); + assert_eq!( + encoded, + vec![ + // type = DigestItemType::ChangesTrieRoot + 2, // trie root + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, + ] + ); + + let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); + assert_eq!(item, decoded); } #[test] fn non_system_digest_item_encoding() { - let item = DigestItem::Other::(vec![10, 20, 30]); - let encoded = item.encode(); - assert_eq!(encoded, vec![ - // type = DigestItemType::Other - 0, - // length of other data - 12, - // authorities - 10, 20, 30, - ]); - - let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); - assert_eq!(item, decoded); + let item = DigestItem::Other::(vec![10, 20, 30]); + let encoded = item.encode(); + assert_eq!( + encoded, + vec![ + // type = DigestItemType::Other + 0, // length of other data + 12, // authorities + 10, 20, 30, + ] + ); + + let decoded: DigestItem = Decode::decode(&mut &encoded[..]).unwrap(); + assert_eq!(item, decoded); } diff --git a/primitives/runtime/src/generic/unchecked_extrinsic.rs b/primitives/runtime/src/generic/unchecked_extrinsic.rs index 4aae575b2c..02f7c13fb5 100644 --- a/primitives/runtime/src/generic/unchecked_extrinsic.rs +++ b/primitives/runtime/src/generic/unchecked_extrinsic.rs @@ -16,17 +16,17 @@ //! Generic implementation of an unchecked (pre-verification) extrinsic. -use sp_std::{fmt, prelude::*}; -use sp_io::hashing::blake2_256; -use codec::{Decode, Encode, EncodeLike, Input, Error}; use crate::{ - traits::{ - self, Member, MaybeDisplay, SignedExtension, Checkable, Extrinsic, ExtrinsicMetadata, - IdentifyAccount, - }, - generic::CheckedExtrinsic, - transaction_validity::{TransactionValidityError, InvalidTransaction}, + generic::CheckedExtrinsic, + traits::{ + self, Checkable, Extrinsic, ExtrinsicMetadata, IdentifyAccount, MaybeDisplay, Member, + SignedExtension, + }, + transaction_validity::{InvalidTransaction, TransactionValidityError}, }; +use codec::{Decode, Encode, EncodeLike, Error, Input}; +use sp_io::hashing::blake2_256; +use sp_std::{fmt, prelude::*}; const TRANSACTION_VERSION: u8 = 4; @@ -35,122 +35,111 @@ const TRANSACTION_VERSION: u8 = 4; #[derive(PartialEq, Eq, Clone)] pub struct UncheckedExtrinsic where - Extra: SignedExtension + Extra: SignedExtension, { - /// The signature, address, number of extrinsics have come before from - /// the same signer and an era describing the longevity of this transaction, - /// if this is a signed extrinsic. - pub signature: Option<(Address, Signature, Extra)>, - /// The function that should be called. - pub function: Call, + /// The signature, address, number of extrinsics have come before from + /// the same signer and an era describing the longevity of this transaction, + /// if this is a signed extrinsic. + pub signature: Option<(Address, Signature, Extra)>, + /// The function that should be called. + pub function: Call, } #[cfg(feature = "std")] impl parity_util_mem::MallocSizeOf - for UncheckedExtrinsic + for UncheckedExtrinsic where - Extra: SignedExtension + Extra: SignedExtension, { - fn size_of(&self, _ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - // Instantiated only in runtime. - 0 - } + fn size_of(&self, _ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + // Instantiated only in runtime. + 0 + } } impl - UncheckedExtrinsic + UncheckedExtrinsic { - /// New instance of a signed extrinsic aka "transaction". - pub fn new_signed( - function: Call, - signed: Address, - signature: Signature, - extra: Extra - ) -> Self { - UncheckedExtrinsic { - signature: Some((signed, signature, extra)), - function, - } - } - - /// New instance of an unsigned extrinsic aka "inherent". - pub fn new_unsigned(function: Call) -> Self { - UncheckedExtrinsic { - signature: None, - function, - } - } + /// New instance of a signed extrinsic aka "transaction". + pub fn new_signed(function: Call, signed: Address, signature: Signature, extra: Extra) -> Self { + UncheckedExtrinsic { + signature: Some((signed, signature, extra)), + function, + } + } + + /// New instance of an unsigned extrinsic aka "inherent". + pub fn new_unsigned(function: Call) -> Self { + UncheckedExtrinsic { + signature: None, + function, + } + } } impl Extrinsic - for UncheckedExtrinsic + for UncheckedExtrinsic { - type Call = Call; - - type SignaturePayload = ( - Address, - Signature, - Extra, - ); - - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } - - fn new(function: Call, signed_data: Option) -> Option { - Some(if let Some((address, signature, extra)) = signed_data { - UncheckedExtrinsic::new_signed(function, address, signature, extra) - } else { - UncheckedExtrinsic::new_unsigned(function) - }) - } + type Call = Call; + + type SignaturePayload = (Address, Signature, Extra); + + fn is_signed(&self) -> Option { + Some(self.signature.is_some()) + } + + fn new(function: Call, signed_data: Option) -> Option { + Some(if let Some((address, signature, extra)) = signed_data { + UncheckedExtrinsic::new_signed(function, address, signature, extra) + } else { + UncheckedExtrinsic::new_unsigned(function) + }) + } } -impl - Checkable -for - UncheckedExtrinsic +impl Checkable + for UncheckedExtrinsic where - Address: Member + MaybeDisplay, - Call: Encode + Member, - Signature: Member + traits::Verify, - ::Signer: IdentifyAccount, - Extra: SignedExtension, - AccountId: Member + MaybeDisplay, - Lookup: traits::Lookup, + Address: Member + MaybeDisplay, + Call: Encode + Member, + Signature: Member + traits::Verify, + ::Signer: IdentifyAccount, + Extra: SignedExtension, + AccountId: Member + MaybeDisplay, + Lookup: traits::Lookup, { - type Checked = CheckedExtrinsic; - - fn check(self, lookup: &Lookup) -> Result { - Ok(match self.signature { - Some((signed, signature, extra)) => { - let signed = lookup.lookup(signed)?; - let raw_payload = SignedPayload::new(self.function, extra)?; - if !raw_payload.using_encoded(|payload| signature.verify(payload, &signed)) { - return Err(InvalidTransaction::BadProof.into()) - } - - let (function, extra, _) = raw_payload.deconstruct(); - CheckedExtrinsic { - signed: Some((signed, extra)), - function, - } - } - None => CheckedExtrinsic { - signed: None, - function: self.function, - }, - }) - } + type Checked = CheckedExtrinsic; + + fn check(self, lookup: &Lookup) -> Result { + Ok(match self.signature { + Some((signed, signature, extra)) => { + let signed = lookup.lookup(signed)?; + let raw_payload = SignedPayload::new(self.function, extra)?; + if !raw_payload.using_encoded(|payload| signature.verify(payload, &signed)) { + return Err(InvalidTransaction::BadProof.into()); + } + + let (function, extra, _) = raw_payload.deconstruct(); + CheckedExtrinsic { + signed: Some((signed, extra)), + function, + } + } + None => CheckedExtrinsic { + signed: None, + function: self.function, + }, + }) + } } impl ExtrinsicMetadata - for UncheckedExtrinsic - where - Extra: SignedExtension, + for UncheckedExtrinsic +where + Extra: SignedExtension, { - const VERSION: u8 = TRANSACTION_VERSION; - type SignedExtensions = Extra; + const VERSION: u8 = TRANSACTION_VERSION; + type SignedExtensions = Extra; } /// A payload that has been signed for an unchecked extrinsics. @@ -158,288 +147,311 @@ impl ExtrinsicMetadata /// Note that the payload that we sign to produce unchecked extrinsic signature /// is going to be different than the `SignaturePayload` - so the thing the extrinsic /// actually contains. -pub struct SignedPayload(( - Call, - Extra, - Extra::AdditionalSigned, -)); - -impl SignedPayload where - Call: Encode, - Extra: SignedExtension, +pub struct SignedPayload((Call, Extra, Extra::AdditionalSigned)); + +impl SignedPayload +where + Call: Encode, + Extra: SignedExtension, { - /// Create new `SignedPayload`. - /// - /// This function may fail if `additional_signed` of `Extra` is not available. - pub fn new(call: Call, extra: Extra) -> Result { - let additional_signed = extra.additional_signed()?; - let raw_payload = (call, extra, additional_signed); - Ok(Self(raw_payload)) - } - - /// Create new `SignedPayload` from raw components. - pub fn from_raw(call: Call, extra: Extra, additional_signed: Extra::AdditionalSigned) -> Self { - Self((call, extra, additional_signed)) - } - - /// Deconstruct the payload into it's components. - pub fn deconstruct(self) -> (Call, Extra, Extra::AdditionalSigned) { - self.0 - } + /// Create new `SignedPayload`. + /// + /// This function may fail if `additional_signed` of `Extra` is not available. + pub fn new(call: Call, extra: Extra) -> Result { + let additional_signed = extra.additional_signed()?; + let raw_payload = (call, extra, additional_signed); + Ok(Self(raw_payload)) + } + + /// Create new `SignedPayload` from raw components. + pub fn from_raw(call: Call, extra: Extra, additional_signed: Extra::AdditionalSigned) -> Self { + Self((call, extra, additional_signed)) + } + + /// Deconstruct the payload into it's components. + pub fn deconstruct(self) -> (Call, Extra, Extra::AdditionalSigned) { + self.0 + } } -impl Encode for SignedPayload where - Call: Encode, - Extra: SignedExtension, +impl Encode for SignedPayload +where + Call: Encode, + Extra: SignedExtension, { - /// Get an encoded version of this payload. - /// - /// Payloads longer than 256 bytes are going to be `blake2_256`-hashed. - fn using_encoded R>(&self, f: F) -> R { - self.0.using_encoded(|payload| { - if payload.len() > 256 { - f(&blake2_256(payload)[..]) - } else { - f(payload) - } - }) - } + /// Get an encoded version of this payload. + /// + /// Payloads longer than 256 bytes are going to be `blake2_256`-hashed. + fn using_encoded R>(&self, f: F) -> R { + self.0.using_encoded(|payload| { + if payload.len() > 256 { + f(&blake2_256(payload)[..]) + } else { + f(payload) + } + }) + } } impl EncodeLike for SignedPayload where - Call: Encode, - Extra: SignedExtension, -{} + Call: Encode, + Extra: SignedExtension, +{ +} -impl Decode - for UncheckedExtrinsic +impl Decode for UncheckedExtrinsic where - Address: Decode, - Signature: Decode, - Call: Decode, - Extra: SignedExtension, + Address: Decode, + Signature: Decode, + Call: Decode, + Extra: SignedExtension, { - fn decode(input: &mut I) -> Result { - // This is a little more complicated than usual since the binary format must be compatible - // with substrate's generic `Vec` type. Basically this just means accepting that there - // will be a prefix of vector length (we don't need - // to use this). - let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; - - let version = input.read_byte()?; - - let is_signed = version & 0b1000_0000 != 0; - let version = version & 0b0111_1111; - if version != TRANSACTION_VERSION { - return Err("Invalid transaction version".into()); - } - - Ok(UncheckedExtrinsic { - signature: if is_signed { Some(Decode::decode(input)?) } else { None }, - function: Decode::decode(input)?, - }) - } + fn decode(input: &mut I) -> Result { + // This is a little more complicated than usual since the binary format must be compatible + // with substrate's generic `Vec` type. Basically this just means accepting that there + // will be a prefix of vector length (we don't need + // to use this). + let _length_do_not_remove_me_see_above: Vec<()> = Decode::decode(input)?; + + let version = input.read_byte()?; + + let is_signed = version & 0b1000_0000 != 0; + let version = version & 0b0111_1111; + if version != TRANSACTION_VERSION { + return Err("Invalid transaction version".into()); + } + + Ok(UncheckedExtrinsic { + signature: if is_signed { + Some(Decode::decode(input)?) + } else { + None + }, + function: Decode::decode(input)?, + }) + } } -impl Encode - for UncheckedExtrinsic +impl Encode for UncheckedExtrinsic where - Address: Encode, - Signature: Encode, - Call: Encode, - Extra: SignedExtension, + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: SignedExtension, { - fn encode(&self) -> Vec { - super::encode_with_vec_prefix::(|v| { - // 1 byte version id. - match self.signature.as_ref() { - Some(s) => { - v.push(TRANSACTION_VERSION | 0b1000_0000); - s.encode_to(v); - } - None => { - v.push(TRANSACTION_VERSION & 0b0111_1111); - } - } - self.function.encode_to(v); - }) - } + fn encode(&self) -> Vec { + super::encode_with_vec_prefix::(|v| { + // 1 byte version id. + match self.signature.as_ref() { + Some(s) => { + v.push(TRANSACTION_VERSION | 0b1000_0000); + s.encode_to(v); + } + None => { + v.push(TRANSACTION_VERSION & 0b0111_1111); + } + } + self.function.encode_to(v); + }) + } } impl EncodeLike - for UncheckedExtrinsic + for UncheckedExtrinsic where - Address: Encode, - Signature: Encode, - Call: Encode, - Extra: SignedExtension, -{} + Address: Encode, + Signature: Encode, + Call: Encode, + Extra: SignedExtension, +{ +} #[cfg(feature = "std")] impl serde::Serialize - for UncheckedExtrinsic + for UncheckedExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } #[cfg(feature = "std")] -impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> serde::Deserialize<'a> - for UncheckedExtrinsic +impl<'a, Address: Decode, Signature: Decode, Call: Decode, Extra: SignedExtension> + serde::Deserialize<'a> for UncheckedExtrinsic { - fn deserialize(de: D) -> Result where - D: serde::Deserializer<'a>, - { - let r = sp_core::bytes::deserialize(de)?; - Decode::decode(&mut &r[..]) - .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) - } + fn deserialize(de: D) -> Result + where + D: serde::Deserializer<'a>, + { + let r = sp_core::bytes::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| serde::de::Error::custom(format!("Decode error: {}", e))) + } } impl fmt::Debug - for UncheckedExtrinsic + for UncheckedExtrinsic where - Address: fmt::Debug, - Call: fmt::Debug, - Extra: SignedExtension, + Address: fmt::Debug, + Call: fmt::Debug, + Extra: SignedExtension, { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!( - f, - "UncheckedExtrinsic({:?}, {:?})", - self.signature.as_ref().map(|x| (&x.0, &x.2)), - self.function, - ) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "UncheckedExtrinsic({:?}, {:?})", + self.signature.as_ref().map(|x| (&x.0, &x.2)), + self.function, + ) + } } #[cfg(test)] mod tests { - use super::*; - use sp_io::hashing::blake2_256; - use crate::codec::{Encode, Decode}; - use crate::traits::{SignedExtension, IdentifyAccount, IdentityLookup}; - use serde::{Serialize, Deserialize}; - - type TestContext = IdentityLookup; - - #[derive(Eq, PartialEq, Clone, Copy, Debug, Serialize, Deserialize, Encode, Decode)] - pub struct TestSigner(pub u64); - impl From for TestSigner { fn from(x: u64) -> Self { Self(x) } } - impl From for u64 { fn from(x: TestSigner) -> Self { x.0 } } - impl IdentifyAccount for TestSigner { - type AccountId = u64; - fn into_account(self) -> u64 { self.into() } - } - - #[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize, Encode, Decode)] - struct TestSig(u64, Vec); - impl traits::Verify for TestSig { - type Signer = TestSigner; - fn verify>(&self, mut msg: L, signer: &u64) -> bool { - signer == &self.0 && msg.get() == &self.1[..] - } - } - - type TestAccountId = u64; - type TestCall = Vec; - - const TEST_ACCOUNT: TestAccountId = 0; - - // NOTE: this is demonstration. One can simply use `()` for testing. - #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd)] - struct TestExtra; - impl SignedExtension for TestExtra { - const IDENTIFIER: &'static str = "TestExtra"; - type AccountId = u64; - type Call = (); - type AdditionalSigned = (); - type Pre = (); - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } - } - - type Ex = UncheckedExtrinsic; - type CEx = CheckedExtrinsic; - - #[test] - fn unsigned_codec_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); - } - - #[test] - fn signed_codec_should_work() { - let ux = Ex::new_signed( - vec![0u8; 0], - TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra - ); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); - } - - #[test] - fn large_signed_codec_should_work() { - let ux = Ex::new_signed( - vec![0u8; 0], - TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 257], TestExtra) - .using_encoded(blake2_256)[..].to_owned()), - TestExtra - ); - let encoded = ux.encode(); - assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); - } - - #[test] - fn unsigned_check_should_work() { - let ux = Ex::new_unsigned(vec![0u8; 0]); - assert!(!ux.is_signed().unwrap_or(false)); - assert!(>::check(ux, &Default::default()).is_ok()); - } - - #[test] - fn badly_signed_check_should_fail() { - let ux = Ex::new_signed( - vec![0u8; 0], - TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, vec![0u8; 0]), - TestExtra, - ); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!( - >::check(ux, &Default::default()), - Err(InvalidTransaction::BadProof.into()), - ); - } - - #[test] - fn signed_check_should_work() { - let ux = Ex::new_signed( - vec![0u8; 0], - TEST_ACCOUNT, - TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), - TestExtra, - ); - assert!(ux.is_signed().unwrap_or(false)); - assert_eq!( - >::check(ux, &Default::default()), - Ok(CEx { signed: Some((TEST_ACCOUNT, TestExtra)), function: vec![0u8; 0] }), - ); - } - - #[test] - fn encoding_matches_vec() { - let ex = Ex::new_unsigned(vec![0u8; 0]); - let encoded = ex.encode(); - let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(decoded, ex); - let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); - assert_eq!(as_vec.encode(), encoded); - } + use super::*; + use crate::codec::{Decode, Encode}; + use crate::traits::{IdentifyAccount, IdentityLookup, SignedExtension}; + use serde::{Deserialize, Serialize}; + use sp_io::hashing::blake2_256; + + type TestContext = IdentityLookup; + + #[derive(Eq, PartialEq, Clone, Copy, Debug, Serialize, Deserialize, Encode, Decode)] + pub struct TestSigner(pub u64); + impl From for TestSigner { + fn from(x: u64) -> Self { + Self(x) + } + } + impl From for u64 { + fn from(x: TestSigner) -> Self { + x.0 + } + } + impl IdentifyAccount for TestSigner { + type AccountId = u64; + fn into_account(self) -> u64 { + self.into() + } + } + + #[derive(Eq, PartialEq, Clone, Debug, Serialize, Deserialize, Encode, Decode)] + struct TestSig(u64, Vec); + impl traits::Verify for TestSig { + type Signer = TestSigner; + fn verify>(&self, mut msg: L, signer: &u64) -> bool { + signer == &self.0 && msg.get() == &self.1[..] + } + } + + type TestAccountId = u64; + type TestCall = Vec; + + const TEST_ACCOUNT: TestAccountId = 0; + + // NOTE: this is demonstration. One can simply use `()` for testing. + #[derive(Debug, Encode, Decode, Clone, Eq, PartialEq, Ord, PartialOrd)] + struct TestExtra; + impl SignedExtension for TestExtra { + const IDENTIFIER: &'static str = "TestExtra"; + type AccountId = u64; + type Call = (); + type AdditionalSigned = (); + type Pre = (); + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + } + + type Ex = UncheckedExtrinsic; + type CEx = CheckedExtrinsic; + + #[test] + fn unsigned_codec_should_work() { + let ux = Ex::new_unsigned(vec![0u8; 0]); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); + } + + #[test] + fn signed_codec_should_work() { + let ux = Ex::new_signed( + vec![0u8; 0], + TEST_ACCOUNT, + TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), + TestExtra, + ); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); + } + + #[test] + fn large_signed_codec_should_work() { + let ux = Ex::new_signed( + vec![0u8; 0], + TEST_ACCOUNT, + TestSig( + TEST_ACCOUNT, + (vec![0u8; 257], TestExtra).using_encoded(blake2_256)[..].to_owned(), + ), + TestExtra, + ); + let encoded = ux.encode(); + assert_eq!(Ex::decode(&mut &encoded[..]), Ok(ux)); + } + + #[test] + fn unsigned_check_should_work() { + let ux = Ex::new_unsigned(vec![0u8; 0]); + assert!(!ux.is_signed().unwrap_or(false)); + assert!(>::check(ux, &Default::default()).is_ok()); + } + + #[test] + fn badly_signed_check_should_fail() { + let ux = Ex::new_signed( + vec![0u8; 0], + TEST_ACCOUNT, + TestSig(TEST_ACCOUNT, vec![0u8; 0]), + TestExtra, + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &Default::default()), + Err(InvalidTransaction::BadProof.into()), + ); + } + + #[test] + fn signed_check_should_work() { + let ux = Ex::new_signed( + vec![0u8; 0], + TEST_ACCOUNT, + TestSig(TEST_ACCOUNT, (vec![0u8; 0], TestExtra).encode()), + TestExtra, + ); + assert!(ux.is_signed().unwrap_or(false)); + assert_eq!( + >::check(ux, &Default::default()), + Ok(CEx { + signed: Some((TEST_ACCOUNT, TestExtra)), + function: vec![0u8; 0] + }), + ); + } + + #[test] + fn encoding_matches_vec() { + let ex = Ex::new_unsigned(vec![0u8; 0]); + let encoded = ex.encode(); + let decoded = Ex::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(decoded, ex); + let as_vec: Vec = Decode::decode(&mut encoded.as_slice()).unwrap(); + assert_eq!(as_vec.encode(), encoded); + } } diff --git a/primitives/runtime/src/lib.rs b/primitives/runtime/src/lib.rs index e69f892626..9258ef3d31 100644 --- a/primitives/runtime/src/lib.rs +++ b/primitives/runtime/src/lib.rs @@ -18,10 +18,10 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] - // to allow benchmarking #![cfg_attr(feature = "bench", feature(test))] -#[cfg(feature = "bench")] extern crate test; +#[cfg(feature = "bench")] +extern crate test; #[doc(hidden)] pub use codec; @@ -40,43 +40,51 @@ pub use sp_application_crypto as app_crypto; #[cfg(feature = "std")] pub use sp_core::storage::{Storage, StorageChild}; -use sp_std::prelude::*; +use sp_core::{ + crypto::{self, Public}, + ecdsa, ed25519, + hash::{H256, H512}, + sr25519, +}; use sp_std::convert::TryFrom; -use sp_core::{crypto::{self, Public}, ed25519, sr25519, ecdsa, hash::{H256, H512}}; +use sp_std::prelude::*; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; pub mod curve; pub mod generic; pub mod offchain; +pub mod random_number_generator; +mod runtime_string; #[cfg(feature = "std")] pub mod testing; pub mod traits; pub mod transaction_validity; -pub mod random_number_generator; -mod runtime_string; pub use crate::runtime_string::*; /// Re-export these since they're only "kind of" generic. -pub use generic::{DigestItem, Digest}; +pub use generic::{Digest, DigestItem}; +pub use sp_application_crypto::{BoundToRuntimeAppPublic, RuntimeAppPublic}; /// Re-export this since it's part of the API of this crate. -pub use sp_core::{TypeId, crypto::{key_types, KeyTypeId, CryptoType, AccountId32}}; -pub use sp_application_crypto::{RuntimeAppPublic, BoundToRuntimeAppPublic}; +pub use sp_core::{ + crypto::{key_types, AccountId32, CryptoType, KeyTypeId}, + TypeId, +}; /// Re-export `RuntimeDebug`, to avoid dependency clutter. pub use sp_core::RuntimeDebug; +/// Re-export big_uint stuff. +pub use sp_arithmetic::biguint; +/// Re-export 128 bit helpers. +pub use sp_arithmetic::helpers_128bit; /// Re-export top-level arithmetic stuff. pub use sp_arithmetic::{ - Perquintill, Perbill, Permill, Percent, PerU16, Rational128, Fixed64, Fixed128, - PerThing, traits::SaturatedConversion, + traits::SaturatedConversion, Fixed128, Fixed64, PerThing, PerU16, Perbill, Percent, Permill, + Perquintill, Rational128, }; -/// Re-export 128 bit helpers. -pub use sp_arithmetic::helpers_128bit; -/// Re-export big_uint stuff. -pub use sp_arithmetic::biguint; pub use random_number_generator::RandomNumberGenerator; @@ -89,76 +97,70 @@ pub use random_number_generator::RandomNumberGenerator; /// bypasses this problem. pub type Justification = Vec; -use traits::{Verify, Lazy}; +use traits::{Lazy, Verify}; /// A module identifier. These are per module and should be stored in a registry somewhere. #[derive(Clone, Copy, Eq, PartialEq, Encode, Decode)] pub struct ModuleId(pub [u8; 8]); impl TypeId for ModuleId { - const TYPE_ID: [u8; 4] = *b"modl"; + const TYPE_ID: [u8; 4] = *b"modl"; } -#[cfg(feature = "std")] -pub use serde::{Serialize, Deserialize, de::DeserializeOwned}; use crate::traits::IdentifyAccount; +#[cfg(feature = "std")] +pub use serde::{de::DeserializeOwned, Deserialize, Serialize}; /// Complex storage builder stuff. #[cfg(feature = "std")] pub trait BuildStorage { - /// Build the storage out of this builder. - fn build_storage(&self) -> Result { - let mut storage = Default::default(); - self.assimilate_storage(&mut storage)?; - Ok(storage) - } - /// Assimilate the storage for this module into pre-existing overlays. - fn assimilate_storage( - &self, - storage: &mut sp_core::storage::Storage, - ) -> Result<(), String>; + /// Build the storage out of this builder. + fn build_storage(&self) -> Result { + let mut storage = Default::default(); + self.assimilate_storage(&mut storage)?; + Ok(storage) + } + /// Assimilate the storage for this module into pre-existing overlays. + fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String>; } /// Something that can build the genesis storage of a module. #[cfg(feature = "std")] pub trait BuildModuleGenesisStorage: Sized { - /// Create the module genesis storage into the given `storage` and `child_storage`. - fn build_module_genesis_storage( - &self, - storage: &mut sp_core::storage::Storage, - ) -> Result<(), String>; + /// Create the module genesis storage into the given `storage` and `child_storage`. + fn build_module_genesis_storage( + &self, + storage: &mut sp_core::storage::Storage, + ) -> Result<(), String>; } #[cfg(feature = "std")] impl BuildStorage for sp_core::storage::Storage { - fn assimilate_storage( - &self, - storage: &mut sp_core::storage::Storage, - )-> Result<(), String> { - storage.top.extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); - for (k, other_map) in self.children.iter() { - let k = k.clone(); - if let Some(map) = storage.children.get_mut(&k) { - map.data.extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); - if !map.child_info.try_update(other_map.child_info.as_ref()) { - return Err("Incompatible child info update".to_string()); - } - } else { - storage.children.insert(k, other_map.clone()); - } - } - Ok(()) - } + fn assimilate_storage(&self, storage: &mut sp_core::storage::Storage) -> Result<(), String> { + storage + .top + .extend(self.top.iter().map(|(k, v)| (k.clone(), v.clone()))); + for (k, other_map) in self.children.iter() { + let k = k.clone(); + if let Some(map) = storage.children.get_mut(&k) { + map.data + .extend(other_map.data.iter().map(|(k, v)| (k.clone(), v.clone()))); + if !map.child_info.try_update(other_map.child_info.as_ref()) { + return Err("Incompatible child info update".to_string()); + } + } else { + storage.children.insert(k, other_map.clone()); + } + } + Ok(()) + } } #[cfg(feature = "std")] impl BuildStorage for () { - fn assimilate_storage( - &self, - _: &mut sp_core::storage::Storage, - )-> Result<(), String> { - Err("`assimilate_storage` not implemented for `()`".into()) - } + fn assimilate_storage(&self, _: &mut sp_core::storage::Storage) -> Result<(), String> { + Err("`assimilate_storage` not implemented for `()`".into()) + } } /// Consensus engine unique ID. @@ -168,152 +170,169 @@ pub type ConsensusEngineId = [u8; 4]; #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[derive(Eq, PartialEq, Clone, Encode, Decode, RuntimeDebug)] pub enum MultiSignature { - /// An Ed25519 signature. - Ed25519(ed25519::Signature), - /// An Sr25519 signature. - Sr25519(sr25519::Signature), - /// An ECDSA/SECP256k1 signature. - Ecdsa(ecdsa::Signature), + /// An Ed25519 signature. + Ed25519(ed25519::Signature), + /// An Sr25519 signature. + Sr25519(sr25519::Signature), + /// An ECDSA/SECP256k1 signature. + Ecdsa(ecdsa::Signature), } impl From for MultiSignature { - fn from(x: ed25519::Signature) -> Self { - MultiSignature::Ed25519(x) - } + fn from(x: ed25519::Signature) -> Self { + MultiSignature::Ed25519(x) + } } impl From for MultiSignature { - fn from(x: sr25519::Signature) -> Self { - MultiSignature::Sr25519(x) - } + fn from(x: sr25519::Signature) -> Self { + MultiSignature::Sr25519(x) + } } impl From for MultiSignature { - fn from(x: ecdsa::Signature) -> Self { - MultiSignature::Ecdsa(x) - } + fn from(x: ecdsa::Signature) -> Self { + MultiSignature::Ecdsa(x) + } } impl Default for MultiSignature { - fn default() -> Self { - MultiSignature::Ed25519(Default::default()) - } + fn default() -> Self { + MultiSignature::Ed25519(Default::default()) + } } /// Public key for any known crypto algorithm. #[derive(Eq, PartialEq, Ord, PartialOrd, Clone, Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] pub enum MultiSigner { - /// An Ed25519 identity. - Ed25519(ed25519::Public), - /// An Sr25519 identity. - Sr25519(sr25519::Public), - /// An SECP256k1/ECDSA identity (actually, the Blake2 hash of the compressed pub key). - Ecdsa(ecdsa::Public), + /// An Ed25519 identity. + Ed25519(ed25519::Public), + /// An Sr25519 identity. + Sr25519(sr25519::Public), + /// An SECP256k1/ECDSA identity (actually, the Blake2 hash of the compressed pub key). + Ecdsa(ecdsa::Public), } impl Default for MultiSigner { - fn default() -> Self { - MultiSigner::Ed25519(Default::default()) - } + fn default() -> Self { + MultiSigner::Ed25519(Default::default()) + } } /// NOTE: This implementations is required by `SimpleAddressDeterminer`, /// we convert the hash into some AccountId, it's fine to use any scheme. impl> crypto::UncheckedFrom for MultiSigner { - fn unchecked_from(x: T) -> Self { - ed25519::Public::unchecked_from(x.into()).into() - } + fn unchecked_from(x: T) -> Self { + ed25519::Public::unchecked_from(x.into()).into() + } } impl AsRef<[u8]> for MultiSigner { - fn as_ref(&self) -> &[u8] { - match *self { - MultiSigner::Ed25519(ref who) => who.as_ref(), - MultiSigner::Sr25519(ref who) => who.as_ref(), - MultiSigner::Ecdsa(ref who) => who.as_ref(), - } - } + fn as_ref(&self) -> &[u8] { + match *self { + MultiSigner::Ed25519(ref who) => who.as_ref(), + MultiSigner::Sr25519(ref who) => who.as_ref(), + MultiSigner::Ecdsa(ref who) => who.as_ref(), + } + } } impl traits::IdentifyAccount for MultiSigner { - type AccountId = AccountId32; - fn into_account(self) -> AccountId32 { - match self { - MultiSigner::Ed25519(who) => <[u8; 32]>::from(who).into(), - MultiSigner::Sr25519(who) => <[u8; 32]>::from(who).into(), - MultiSigner::Ecdsa(who) => sp_io::hashing::blake2_256(&who.as_ref()[..]).into(), - } - } + type AccountId = AccountId32; + fn into_account(self) -> AccountId32 { + match self { + MultiSigner::Ed25519(who) => <[u8; 32]>::from(who).into(), + MultiSigner::Sr25519(who) => <[u8; 32]>::from(who).into(), + MultiSigner::Ecdsa(who) => sp_io::hashing::blake2_256(&who.as_ref()[..]).into(), + } + } } impl From for MultiSigner { - fn from(x: ed25519::Public) -> Self { - MultiSigner::Ed25519(x) - } + fn from(x: ed25519::Public) -> Self { + MultiSigner::Ed25519(x) + } } impl TryFrom for ed25519::Public { - type Error = (); - fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Ed25519(x) = m { Ok(x) } else { Err(()) } - } + type Error = (); + fn try_from(m: MultiSigner) -> Result { + if let MultiSigner::Ed25519(x) = m { + Ok(x) + } else { + Err(()) + } + } } impl From for MultiSigner { - fn from(x: sr25519::Public) -> Self { - MultiSigner::Sr25519(x) - } + fn from(x: sr25519::Public) -> Self { + MultiSigner::Sr25519(x) + } } impl TryFrom for sr25519::Public { - type Error = (); - fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Sr25519(x) = m { Ok(x) } else { Err(()) } - } + type Error = (); + fn try_from(m: MultiSigner) -> Result { + if let MultiSigner::Sr25519(x) = m { + Ok(x) + } else { + Err(()) + } + } } impl From for MultiSigner { - fn from(x: ecdsa::Public) -> Self { - MultiSigner::Ecdsa(x) - } + fn from(x: ecdsa::Public) -> Self { + MultiSigner::Ecdsa(x) + } } impl TryFrom for ecdsa::Public { - type Error = (); - fn try_from(m: MultiSigner) -> Result { - if let MultiSigner::Ecdsa(x) = m { Ok(x) } else { Err(()) } - } + type Error = (); + fn try_from(m: MultiSigner) -> Result { + if let MultiSigner::Ecdsa(x) = m { + Ok(x) + } else { + Err(()) + } + } } #[cfg(feature = "std")] impl std::fmt::Display for MultiSigner { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - match *self { - MultiSigner::Ed25519(ref who) => write!(fmt, "ed25519: {}", who), - MultiSigner::Sr25519(ref who) => write!(fmt, "sr25519: {}", who), - MultiSigner::Ecdsa(ref who) => write!(fmt, "ecdsa: {}", who), - } - } + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + match *self { + MultiSigner::Ed25519(ref who) => write!(fmt, "ed25519: {}", who), + MultiSigner::Sr25519(ref who) => write!(fmt, "sr25519: {}", who), + MultiSigner::Ecdsa(ref who) => write!(fmt, "ecdsa: {}", who), + } + } } impl Verify for MultiSignature { - type Signer = MultiSigner; - fn verify>(&self, mut msg: L, signer: &AccountId32) -> bool { - match (self, signer) { - (MultiSignature::Ed25519(ref sig), who) => sig.verify(msg, &ed25519::Public::from_slice(who.as_ref())), - (MultiSignature::Sr25519(ref sig), who) => sig.verify(msg, &sr25519::Public::from_slice(who.as_ref())), - (MultiSignature::Ecdsa(ref sig), who) => { - let m = sp_io::hashing::blake2_256(msg.get()); - match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { - Ok(pubkey) => - &sp_io::hashing::blake2_256(pubkey.as_ref()) - == >::as_ref(who), - _ => false, - } - } - } - } + type Signer = MultiSigner; + fn verify>(&self, mut msg: L, signer: &AccountId32) -> bool { + match (self, signer) { + (MultiSignature::Ed25519(ref sig), who) => { + sig.verify(msg, &ed25519::Public::from_slice(who.as_ref())) + } + (MultiSignature::Sr25519(ref sig), who) => { + sig.verify(msg, &sr25519::Public::from_slice(who.as_ref())) + } + (MultiSignature::Ecdsa(ref sig), who) => { + let m = sp_io::hashing::blake2_256(msg.get()); + match sp_io::crypto::secp256k1_ecdsa_recover_compressed(sig.as_ref(), &m) { + Ok(pubkey) => { + &sp_io::hashing::blake2_256(pubkey.as_ref()) + == >::as_ref(who) + } + _ => false, + } + } + } + } } /// Signature verify that can work with any known signature types.. @@ -322,34 +341,34 @@ impl Verify for MultiSignature { pub struct AnySignature(H512); impl Verify for AnySignature { - type Signer = sr25519::Public; - fn verify>(&self, mut msg: L, signer: &sr25519::Public) -> bool { - let msg = msg.get(); - sr25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) - .map(|s| s.verify(msg, signer)) - .unwrap_or(false) - || ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) - .map(|s| s.verify(msg, &ed25519::Public::from_slice(signer.as_ref()))) - .unwrap_or(false) - } + type Signer = sr25519::Public; + fn verify>(&self, mut msg: L, signer: &sr25519::Public) -> bool { + let msg = msg.get(); + sr25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) + .map(|s| s.verify(msg, signer)) + .unwrap_or(false) + || ed25519::Signature::try_from(self.0.as_fixed_bytes().as_ref()) + .map(|s| s.verify(msg, &ed25519::Public::from_slice(signer.as_ref()))) + .unwrap_or(false) + } } impl From for AnySignature { - fn from(s: sr25519::Signature) -> Self { - AnySignature(s.into()) - } + fn from(s: sr25519::Signature) -> Self { + AnySignature(s.into()) + } } impl From for AnySignature { - fn from(s: ed25519::Signature) -> Self { - AnySignature(s.into()) - } + fn from(s: ed25519::Signature) -> Self { + AnySignature(s.into()) + } } impl From for DispatchOutcome { - fn from(err: DispatchError) -> Self { - Err(err) - } + fn from(err: DispatchError) -> Self { + Err(err) + } } /// This is the legacy return type of `Dispatchable`. It is still exposed for compatibilty @@ -365,122 +384,137 @@ pub type DispatchResultWithInfo = sp_std::result::Result, - }, + /// Some error occurred. + Other(#[codec(skip)] &'static str), + /// Failed to lookup some data. + CannotLookup, + /// A bad origin. + BadOrigin, + /// A custom error in a module + Module { + /// Module index, matching the metadata module index + index: u8, + /// Module specific error value + error: u8, + /// Optional error message. + #[codec(skip)] + message: Option<&'static str>, + }, } /// Result of a `Dispatchable` which contains the `DispatchResult` and additional information /// about the `Dispatchable` that is only known post dispatch. #[derive(Eq, PartialEq, Clone, Copy, Encode, Decode, RuntimeDebug)] -pub struct DispatchErrorWithPostInfo where - Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable +pub struct DispatchErrorWithPostInfo +where + Info: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { - /// Addditional information about the `Dispatchable` which is only known post dispatch. - pub post_info: Info, - /// The actual `DispatchResult` indicating whether the dispatch was succesfull. - pub error: DispatchError, + /// Addditional information about the `Dispatchable` which is only known post dispatch. + pub post_info: Info, + /// The actual `DispatchResult` indicating whether the dispatch was succesfull. + pub error: DispatchError, } impl DispatchError { - /// Return the same error but without the attached message. - pub fn stripped(self) -> Self { - match self { - DispatchError::Module { index, error, message: Some(_) } - => DispatchError::Module { index, error, message: None }, - m => m, - } - } -} - -impl From for DispatchErrorWithPostInfo where - T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable + Default, - E: Into + /// Return the same error but without the attached message. + pub fn stripped(self) -> Self { + match self { + DispatchError::Module { + index, + error, + message: Some(_), + } => DispatchError::Module { + index, + error, + message: None, + }, + m => m, + } + } +} + +impl From for DispatchErrorWithPostInfo +where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable + Default, + E: Into, { - fn from(error: E) -> Self { - Self { - post_info: Default::default(), - error: error.into(), - } - } + fn from(error: E) -> Self { + Self { + post_info: Default::default(), + error: error.into(), + } + } } impl From for DispatchError { - fn from(_: crate::traits::LookupError) -> Self { - Self::CannotLookup - } + fn from(_: crate::traits::LookupError) -> Self { + Self::CannotLookup + } } impl From for DispatchError { - fn from(_: crate::traits::BadOrigin) -> Self { - Self::BadOrigin - } + fn from(_: crate::traits::BadOrigin) -> Self { + Self::BadOrigin + } } impl From<&'static str> for DispatchError { - fn from(err: &'static str) -> DispatchError { - DispatchError::Other(err) - } + fn from(err: &'static str) -> DispatchError { + DispatchError::Other(err) + } } impl From for &'static str { - fn from(err: DispatchError) -> &'static str { - match err { - DispatchError::Other(msg) => msg, - DispatchError::CannotLookup => "Can not lookup", - DispatchError::BadOrigin => "Bad origin", - DispatchError::Module { message, .. } => message.unwrap_or("Unknown module error"), - } - } -} - -impl From> for &'static str where - T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable + fn from(err: DispatchError) -> &'static str { + match err { + DispatchError::Other(msg) => msg, + DispatchError::CannotLookup => "Can not lookup", + DispatchError::BadOrigin => "Bad origin", + DispatchError::Module { message, .. } => message.unwrap_or("Unknown module error"), + } + } +} + +impl From> for &'static str +where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { - fn from(err: DispatchErrorWithPostInfo) -> &'static str { - err.error.into() - } + fn from(err: DispatchErrorWithPostInfo) -> &'static str { + err.error.into() + } } impl traits::Printable for DispatchError { - fn print(&self) { - "DispatchError".print(); - match self { - Self::Other(err) => err.print(), - Self::CannotLookup => "Can not lookup".print(), - Self::BadOrigin => "Bad origin".print(), - Self::Module { index, error, message } => { - index.print(); - error.print(); - if let Some(msg) = message { - msg.print(); - } - } - } - } -} - -impl traits::Printable for DispatchErrorWithPostInfo where - T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable + fn print(&self) { + "DispatchError".print(); + match self { + Self::Other(err) => err.print(), + Self::CannotLookup => "Can not lookup".print(), + Self::BadOrigin => "Bad origin".print(), + Self::Module { + index, + error, + message, + } => { + index.print(); + error.print(); + if let Some(msg) = message { + msg.print(); + } + } + } + } +} + +impl traits::Printable for DispatchErrorWithPostInfo +where + T: Eq + PartialEq + Clone + Copy + Encode + Decode + traits::Printable, { - fn print(&self) { - self.error.print(); - "PostInfo: ".print(); - self.post_info.print(); - } + fn print(&self) { + self.error.print(); + "PostInfo: ".print(); + self.post_info.print(); + } } /// This type specifies the outcome of dispatching a call to a module. @@ -512,34 +546,38 @@ pub type DispatchOutcome = Result<(), DispatchError>; /// - The sender doesn't have enough funds to pay the transaction inclusion fee. Including such /// a transaction in the block doesn't make sense. /// - The extrinsic supplied a bad signature. This transaction won't become valid ever. -pub type ApplyExtrinsicResult = Result; +pub type ApplyExtrinsicResult = + Result; /// Verify a signature on an encoded value in a lazy manner. This can be /// an optimization if the signature scheme has an "unsigned" escape hash. pub fn verify_encoded_lazy( - sig: &V, - item: &T, - signer: &::AccountId + sig: &V, + item: &T, + signer: &::AccountId, ) -> bool { - // The `Lazy` trait expresses something like `X: FnMut &'a T>`. - // unfortunately this is a lifetime relationship that can't - // be expressed without generic associated types, better unification of HRTBs in type position, - // and some kind of integration into the Fn* traits. - struct LazyEncode { - inner: F, - encoded: Option>, - } - - impl Vec> traits::Lazy<[u8]> for LazyEncode { - fn get(&mut self) -> &[u8] { - self.encoded.get_or_insert_with(&self.inner).as_slice() - } - } - - sig.verify( - LazyEncode { inner: || item.encode(), encoded: None }, - signer, - ) + // The `Lazy` trait expresses something like `X: FnMut &'a T>`. + // unfortunately this is a lifetime relationship that can't + // be expressed without generic associated types, better unification of HRTBs in type position, + // and some kind of integration into the Fn* traits. + struct LazyEncode { + inner: F, + encoded: Option>, + } + + impl Vec> traits::Lazy<[u8]> for LazyEncode { + fn get(&mut self) -> &[u8] { + self.encoded.get_or_insert_with(&self.inner).as_slice() + } + } + + sig.verify( + LazyEncode { + inner: || item.encode(), + encoded: None, + }, + signer, + ) } /// Helper macro for `impl_outer_config` @@ -672,15 +710,15 @@ macro_rules! impl_outer_config { #[macro_export] #[cfg(feature = "std")] macro_rules! assert_eq_error_rate { - ($x:expr, $y:expr, $error:expr $(,)?) => { - assert!( - ($x) >= (($y) - ($error)) && ($x) <= (($y) + ($error)), - "{:?} != {:?} (with error rate {:?})", - $x, - $y, - $error, - ); - }; + ($x:expr, $y:expr, $error:expr $(,)?) => { + assert!( + ($x) >= (($y) - ($error)) && ($x) <= (($y) + ($error)), + "{:?} != {:?} (with error rate {:?})", + $x, + $y, + $error, + ); + }; } /// Simple blob to hold an extrinsic without committing to its format and ensure it is serialized @@ -690,51 +728,55 @@ pub struct OpaqueExtrinsic(pub Vec); #[cfg(feature = "std")] impl parity_util_mem::MallocSizeOf for OpaqueExtrinsic { - fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { - self.0.size_of(ops) - } + fn size_of(&self, ops: &mut parity_util_mem::MallocSizeOfOps) -> usize { + self.0.size_of(ops) + } } impl sp_std::fmt::Debug for OpaqueExtrinsic { - #[cfg(feature = "std")] - fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(fmt, "{}", sp_core::hexdisplay::HexDisplay::from(&self.0)) - } + #[cfg(feature = "std")] + fn fmt(&self, fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(fmt, "{}", sp_core::hexdisplay::HexDisplay::from(&self.0)) + } - #[cfg(not(feature = "std"))] - fn fmt(&self, _fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(not(feature = "std"))] + fn fmt(&self, _fmt: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } - #[cfg(feature = "std")] impl ::serde::Serialize for OpaqueExtrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - codec::Encode::using_encoded(&self.0, |bytes| ::sp_core::bytes::serialize(bytes, seq)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + codec::Encode::using_encoded(&self.0, |bytes| ::sp_core::bytes::serialize(bytes, seq)) + } } #[cfg(feature = "std")] impl<'a> ::serde::Deserialize<'a> for OpaqueExtrinsic { - fn deserialize(de: D) -> Result where D: ::serde::Deserializer<'a> { - let r = ::sp_core::bytes::deserialize(de)?; - Decode::decode(&mut &r[..]) - .map_err(|e| ::serde::de::Error::custom(format!("Decode error: {}", e))) - } + fn deserialize(de: D) -> Result + where + D: ::serde::Deserializer<'a>, + { + let r = ::sp_core::bytes::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| ::serde::de::Error::custom(format!("Decode error: {}", e))) + } } impl traits::Extrinsic for OpaqueExtrinsic { - type Call = (); - type SignaturePayload = (); + type Call = (); + type SignaturePayload = (); } /// Print something that implements `Printable` from the runtime. pub fn print(print: impl traits::Printable) { - print.print(); + print.print(); } - /// Batching session. /// /// To be used in runtime only. Outside of runtime, just construct @@ -743,90 +785,87 @@ pub fn print(print: impl traits::Printable) { pub struct SignatureBatching(bool); impl SignatureBatching { - /// Start new batching session. - pub fn start() -> Self { - sp_io::crypto::start_batch_verify(); - SignatureBatching(false) - } + /// Start new batching session. + pub fn start() -> Self { + sp_io::crypto::start_batch_verify(); + SignatureBatching(false) + } - /// Verify all signatures submitted during the batching session. - #[must_use] - pub fn verify(mut self) -> bool { - self.0 = true; - sp_io::crypto::finish_batch_verify() - } + /// Verify all signatures submitted during the batching session. + #[must_use] + pub fn verify(mut self) -> bool { + self.0 = true; + sp_io::crypto::finish_batch_verify() + } } impl Drop for SignatureBatching { - fn drop(&mut self) { - // Sanity check. If user forgets to actually call `verify()`. - if !self.0 { - panic!("Signature verification has not been called before `SignatureBatching::drop`") - } - } + fn drop(&mut self) { + // Sanity check. If user forgets to actually call `verify()`. + if !self.0 { + panic!("Signature verification has not been called before `SignatureBatching::drop`") + } + } } - #[cfg(test)] mod tests { - use super::*; - use codec::{Encode, Decode}; - use sp_core::crypto::Pair; - - #[test] - fn opaque_extrinsic_serialization() { - let ex = super::OpaqueExtrinsic(vec![1, 2, 3, 4]); - assert_eq!(serde_json::to_string(&ex).unwrap(), "\"0x1001020304\"".to_owned()); - } - - #[test] - fn dispatch_error_encoding() { - let error = DispatchError::Module { - index: 1, - error: 2, - message: Some("error message"), - }; - let encoded = error.encode(); - let decoded = DispatchError::decode(&mut &encoded[..]).unwrap(); - assert_eq!(encoded, vec![3, 1, 2]); - assert_eq!( - decoded, - DispatchError::Module { - index: 1, - error: 2, - message: None, - }, - ); - } - - #[test] - fn multi_signature_ecdsa_verify_works() { - let msg = &b"test-message"[..]; - let (pair, _) = ecdsa::Pair::generate(); - - let signature = pair.sign(&msg); - assert!(ecdsa::Pair::verify(&signature, msg, &pair.public())); - - let multi_sig = MultiSignature::from(signature); - let multi_signer = MultiSigner::from(pair.public()); - assert!(multi_sig.verify(msg, &multi_signer.into_account())); - - let multi_signer = MultiSigner::from(pair.public()); - assert!(multi_sig.verify(msg, &multi_signer.into_account())); - } - - - #[test] - #[should_panic(expected = "Signature verification has not been called")] - fn batching_still_finishes_when_not_called_directly() { - let mut ext = sp_state_machine::BasicExternalities::with_tasks_executor(); - ext.execute_with(|| { - let _batching = SignatureBatching::start(); - sp_io::crypto::sr25519_verify( - &Default::default(), - &Vec::new(), - &Default::default(), - ); - }); - } + use super::*; + use codec::{Decode, Encode}; + use sp_core::crypto::Pair; + + #[test] + fn opaque_extrinsic_serialization() { + let ex = super::OpaqueExtrinsic(vec![1, 2, 3, 4]); + assert_eq!( + serde_json::to_string(&ex).unwrap(), + "\"0x1001020304\"".to_owned() + ); + } + + #[test] + fn dispatch_error_encoding() { + let error = DispatchError::Module { + index: 1, + error: 2, + message: Some("error message"), + }; + let encoded = error.encode(); + let decoded = DispatchError::decode(&mut &encoded[..]).unwrap(); + assert_eq!(encoded, vec![3, 1, 2]); + assert_eq!( + decoded, + DispatchError::Module { + index: 1, + error: 2, + message: None, + }, + ); + } + + #[test] + fn multi_signature_ecdsa_verify_works() { + let msg = &b"test-message"[..]; + let (pair, _) = ecdsa::Pair::generate(); + + let signature = pair.sign(&msg); + assert!(ecdsa::Pair::verify(&signature, msg, &pair.public())); + + let multi_sig = MultiSignature::from(signature); + let multi_signer = MultiSigner::from(pair.public()); + assert!(multi_sig.verify(msg, &multi_signer.into_account())); + + let multi_signer = MultiSigner::from(pair.public()); + assert!(multi_sig.verify(msg, &multi_signer.into_account())); + } + + #[test] + #[should_panic(expected = "Signature verification has not been called")] + fn batching_still_finishes_when_not_called_directly() { + let mut ext = sp_state_machine::BasicExternalities::with_tasks_executor(); + ext.execute_with(|| { + let _batching = SignatureBatching::start(); + sp_io::crypto::sr25519_verify(&Default::default(), &Vec::new(), &Default::default()); + }); + } } diff --git a/primitives/runtime/src/offchain/http.rs b/primitives/runtime/src/offchain/http.rs index bbc929526b..a1684e75df 100644 --- a/primitives/runtime/src/offchain/http.rs +++ b/primitives/runtime/src/offchain/http.rs @@ -47,315 +47,309 @@ //! assert_eq!(body.error(), &None); //! ``` -use sp_std::str; -use sp_std::prelude::Vec; -#[cfg(not(feature = "std"))] -use sp_std::prelude::vec; -use sp_core::RuntimeDebug; use sp_core::offchain::{ - Timestamp, - HttpRequestId as RequestId, - HttpRequestStatus as RequestStatus, - HttpError, + HttpError, HttpRequestId as RequestId, HttpRequestStatus as RequestStatus, Timestamp, }; +use sp_core::RuntimeDebug; +#[cfg(not(feature = "std"))] +use sp_std::prelude::vec; +use sp_std::prelude::Vec; +use sp_std::str; /// Request method (HTTP verb) #[derive(Clone, PartialEq, Eq, RuntimeDebug)] pub enum Method { - /// GET request - Get, - /// POST request - Post, - /// PUT request - Put, - /// PATCH request - Patch, - /// DELETE request - Delete, - /// Custom verb - Other(&'static str), + /// GET request + Get, + /// POST request + Post, + /// PUT request + Put, + /// PATCH request + Patch, + /// DELETE request + Delete, + /// Custom verb + Other(&'static str), } impl AsRef for Method { - fn as_ref(&self) -> &str { - match *self { - Method::Get => "GET", - Method::Post => "POST", - Method::Put => "PUT", - Method::Patch => "PATCH", - Method::Delete => "DELETE", - Method::Other(m) => m, - } - } + fn as_ref(&self) -> &str { + match *self { + Method::Get => "GET", + Method::Post => "POST", + Method::Put => "PUT", + Method::Patch => "PATCH", + Method::Delete => "DELETE", + Method::Other(m) => m, + } + } } mod header { - use super::*; - - /// A header type. - #[derive(Clone, PartialEq, Eq, RuntimeDebug)] - pub struct Header { - name: Vec, - value: Vec, - } - - impl Header { - /// Creates new header given it's name and value. - pub fn new(name: &str, value: &str) -> Self { - Header { - name: name.as_bytes().to_vec(), - value: value.as_bytes().to_vec(), - } - } - - /// Returns the name of this header. - pub fn name(&self) -> &str { - // Header keys are always produced from `&str` so this is safe. - // we don't store them as `Strings` to avoid bringing `alloc::String` to sp-std - // or here. - unsafe { str::from_utf8_unchecked(&self.name) } - } - - /// Returns the value of this header. - pub fn value(&self) -> &str { - // Header values are always produced from `&str` so this is safe. - // we don't store them as `Strings` to avoid bringing `alloc::String` to sp-std - // or here. - unsafe { str::from_utf8_unchecked(&self.value) } - } - } + use super::*; + + /// A header type. + #[derive(Clone, PartialEq, Eq, RuntimeDebug)] + pub struct Header { + name: Vec, + value: Vec, + } + + impl Header { + /// Creates new header given it's name and value. + pub fn new(name: &str, value: &str) -> Self { + Header { + name: name.as_bytes().to_vec(), + value: value.as_bytes().to_vec(), + } + } + + /// Returns the name of this header. + pub fn name(&self) -> &str { + // Header keys are always produced from `&str` so this is safe. + // we don't store them as `Strings` to avoid bringing `alloc::String` to sp-std + // or here. + unsafe { str::from_utf8_unchecked(&self.name) } + } + + /// Returns the value of this header. + pub fn value(&self) -> &str { + // Header values are always produced from `&str` so this is safe. + // we don't store them as `Strings` to avoid bringing `alloc::String` to sp-std + // or here. + unsafe { str::from_utf8_unchecked(&self.value) } + } + } } /// An HTTP request builder. #[derive(Clone, PartialEq, Eq, RuntimeDebug)] pub struct Request<'a, T = Vec<&'static [u8]>> { - /// Request method - pub method: Method, - /// Request URL - pub url: &'a str, - /// Body of the request - pub body: T, - /// Deadline to finish sending the request - pub deadline: Option, - /// Request list of headers. - headers: Vec, + /// Request method + pub method: Method, + /// Request URL + pub url: &'a str, + /// Body of the request + pub body: T, + /// Deadline to finish sending the request + pub deadline: Option, + /// Request list of headers. + headers: Vec, } impl Default for Request<'static, T> { - fn default() -> Self { - Request { - method: Method::Get, - url: "http://localhost", - headers: Vec::new(), - body: Default::default(), - deadline: None, - } - } + fn default() -> Self { + Request { + method: Method::Get, + url: "http://localhost", + headers: Vec::new(), + body: Default::default(), + deadline: None, + } + } } impl<'a> Request<'a> { - /// Start a simple GET request - pub fn get(url: &'a str) -> Self { - Self::new(url) - } + /// Start a simple GET request + pub fn get(url: &'a str) -> Self { + Self::new(url) + } } impl<'a, T> Request<'a, T> { - /// Create new POST request with given body. - pub fn post(url: &'a str, body: T) -> Self { - let req: Request = Request::default(); - - Request { - url, - body, - method: Method::Post, - headers: req.headers, - deadline: req.deadline, - } - } + /// Create new POST request with given body. + pub fn post(url: &'a str, body: T) -> Self { + let req: Request = Request::default(); + + Request { + url, + body, + method: Method::Post, + headers: req.headers, + deadline: req.deadline, + } + } } impl<'a, T: Default> Request<'a, T> { - /// Create a new Request builder with the given URL. - pub fn new(url: &'a str) -> Self { - Request::default().url(url) - } - - /// Change the method of the request - pub fn method(mut self, method: Method) -> Self { - self.method = method; - self - } - - /// Change the URL of the request. - pub fn url(mut self, url: &'a str) -> Self { - self.url = url; - self - } - - /// Set the body of the request. - pub fn body(mut self, body: T) -> Self { - self.body = body; - self - } - - /// Add a header. - pub fn add_header(mut self, name: &str, value: &str) -> Self { - self.headers.push(header::Header::new(name, value)); - self - } - - /// Set the deadline of the request. - pub fn deadline(mut self, deadline: Timestamp) -> Self { - self.deadline = Some(deadline); - self - } + /// Create a new Request builder with the given URL. + pub fn new(url: &'a str) -> Self { + Request::default().url(url) + } + + /// Change the method of the request + pub fn method(mut self, method: Method) -> Self { + self.method = method; + self + } + + /// Change the URL of the request. + pub fn url(mut self, url: &'a str) -> Self { + self.url = url; + self + } + + /// Set the body of the request. + pub fn body(mut self, body: T) -> Self { + self.body = body; + self + } + + /// Add a header. + pub fn add_header(mut self, name: &str, value: &str) -> Self { + self.headers.push(header::Header::new(name, value)); + self + } + + /// Set the deadline of the request. + pub fn deadline(mut self, deadline: Timestamp) -> Self { + self.deadline = Some(deadline); + self + } } -impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { - /// Send the request and return a handle. - /// - /// Err is returned in case the deadline is reached - /// or the request timeouts. - pub fn send(self) -> Result { - let meta = &[]; - - // start an http request. - let id = sp_io::offchain::http_request_start( - self.method.as_ref(), - self.url, - meta, - ).map_err(|_| HttpError::IoError)?; - - // add custom headers - for header in &self.headers { - sp_io::offchain::http_request_add_header( - id, - header.name(), - header.value(), - ).map_err(|_| HttpError::IoError)? - } - - // write body - for chunk in self.body { - sp_io::offchain::http_request_write_body(id, chunk.as_ref(), self.deadline)?; - } - - // finalize the request - sp_io::offchain::http_request_write_body(id, &[], self.deadline)?; - - Ok(PendingRequest { - id, - }) - } +impl<'a, I: AsRef<[u8]>, T: IntoIterator> Request<'a, T> { + /// Send the request and return a handle. + /// + /// Err is returned in case the deadline is reached + /// or the request timeouts. + pub fn send(self) -> Result { + let meta = &[]; + + // start an http request. + let id = sp_io::offchain::http_request_start(self.method.as_ref(), self.url, meta) + .map_err(|_| HttpError::IoError)?; + + // add custom headers + for header in &self.headers { + sp_io::offchain::http_request_add_header(id, header.name(), header.value()) + .map_err(|_| HttpError::IoError)? + } + + // write body + for chunk in self.body { + sp_io::offchain::http_request_write_body(id, chunk.as_ref(), self.deadline)?; + } + + // finalize the request + sp_io::offchain::http_request_write_body(id, &[], self.deadline)?; + + Ok(PendingRequest { id }) + } } /// A request error #[derive(Clone, PartialEq, Eq, RuntimeDebug)] pub enum Error { - /// Deadline has been reached. - DeadlineReached, - /// Request had timed out. - IoError, - /// Unknown error has been encountered. - Unknown, + /// Deadline has been reached. + DeadlineReached, + /// Request had timed out. + IoError, + /// Unknown error has been encountered. + Unknown, } /// A struct representing an uncompleted http request. #[derive(PartialEq, Eq, RuntimeDebug)] pub struct PendingRequest { - /// Request ID - pub id: RequestId, + /// Request ID + pub id: RequestId, } /// A result of waiting for a pending request. pub type HttpResult = Result; impl PendingRequest { - /// Wait for the request to complete. - /// - /// NOTE this waits for the request indefinitely. - pub fn wait(self) -> HttpResult { - match self.try_wait(None) { - Ok(res) => res, - Err(_) => panic!("Since `None` is passed we will never get a deadline error; qed"), - } - } - - /// Attempts to wait for the request to finish, - /// but will return `Err` in case the deadline is reached. - pub fn try_wait(self, deadline: impl Into>) -> Result { - Self::try_wait_all(vec![self], deadline).pop().expect("One request passed, one status received; qed") - } - - /// Wait for all provided requests. - pub fn wait_all(requests: Vec) -> Vec { - Self::try_wait_all(requests, None) - .into_iter() - .map(|r| match r { - Ok(r) => r, - Err(_) => panic!("Since `None` is passed we will never get a deadline error; qed"), - }) - .collect() - } - - /// Attempt to wait for all provided requests, but up to given deadline. - /// - /// Requests that are complete will resolve to an `Ok` others will return a `DeadlineReached` error. - pub fn try_wait_all( - requests: Vec, - deadline: impl Into> - ) -> Vec> { - let ids = requests.iter().map(|r| r.id).collect::>(); - let statuses = sp_io::offchain::http_response_wait(&ids, deadline.into()); - - statuses - .into_iter() - .zip(requests.into_iter()) - .map(|(status, req)| match status { - RequestStatus::DeadlineReached => Err(req), - RequestStatus::IoError => Ok(Err(Error::IoError)), - RequestStatus::Invalid => Ok(Err(Error::Unknown)), - RequestStatus::Finished(code) => Ok(Ok(Response::new(req.id, code))), - }) - .collect() - } + /// Wait for the request to complete. + /// + /// NOTE this waits for the request indefinitely. + pub fn wait(self) -> HttpResult { + match self.try_wait(None) { + Ok(res) => res, + Err(_) => panic!("Since `None` is passed we will never get a deadline error; qed"), + } + } + + /// Attempts to wait for the request to finish, + /// but will return `Err` in case the deadline is reached. + pub fn try_wait( + self, + deadline: impl Into>, + ) -> Result { + Self::try_wait_all(vec![self], deadline) + .pop() + .expect("One request passed, one status received; qed") + } + + /// Wait for all provided requests. + pub fn wait_all(requests: Vec) -> Vec { + Self::try_wait_all(requests, None) + .into_iter() + .map(|r| match r { + Ok(r) => r, + Err(_) => panic!("Since `None` is passed we will never get a deadline error; qed"), + }) + .collect() + } + + /// Attempt to wait for all provided requests, but up to given deadline. + /// + /// Requests that are complete will resolve to an `Ok` others will return a `DeadlineReached` error. + pub fn try_wait_all( + requests: Vec, + deadline: impl Into>, + ) -> Vec> { + let ids = requests.iter().map(|r| r.id).collect::>(); + let statuses = sp_io::offchain::http_response_wait(&ids, deadline.into()); + + statuses + .into_iter() + .zip(requests.into_iter()) + .map(|(status, req)| match status { + RequestStatus::DeadlineReached => Err(req), + RequestStatus::IoError => Ok(Err(Error::IoError)), + RequestStatus::Invalid => Ok(Err(Error::Unknown)), + RequestStatus::Finished(code) => Ok(Ok(Response::new(req.id, code))), + }) + .collect() + } } /// A HTTP response. #[derive(RuntimeDebug)] pub struct Response { - /// Request id - pub id: RequestId, - /// Response status code - pub code: u16, - /// A collection of headers. - headers: Option, + /// Request id + pub id: RequestId, + /// Response status code + pub code: u16, + /// A collection of headers. + headers: Option, } impl Response { - fn new(id: RequestId, code: u16) -> Self { - Self { - id, - code, - headers: None, - } - } - - /// Retrieve the headers for this response. - pub fn headers(&mut self) -> &Headers { - if self.headers.is_none() { - self.headers = Some( - Headers { raw: sp_io::offchain::http_response_headers(self.id) }, - ); - } - self.headers.as_ref().expect("Headers were just set; qed") - } - - /// Retrieve the body of this response. - pub fn body(&self) -> ResponseBody { - ResponseBody::new(self.id) - } + fn new(id: RequestId, code: u16) -> Self { + Self { + id, + code, + headers: None, + } + } + + /// Retrieve the headers for this response. + pub fn headers(&mut self) -> &Headers { + if self.headers.is_none() { + self.headers = Some(Headers { + raw: sp_io::offchain::http_response_headers(self.id), + }); + } + self.headers.as_ref().expect("Headers were just set; qed") + } + + /// Retrieve the body of this response. + pub fn body(&self) -> ResponseBody { + ResponseBody::new(self.id) + } } /// A buffered byte iterator over response body. @@ -368,239 +362,238 @@ impl Response { /// 3. The body has been returned. The reader will keep returning `None`. #[derive(Clone)] pub struct ResponseBody { - id: RequestId, - error: Option, - buffer: [u8; 4096], - filled_up_to: Option, - position: usize, - deadline: Option, + id: RequestId, + error: Option, + buffer: [u8; 4096], + filled_up_to: Option, + position: usize, + deadline: Option, } #[cfg(feature = "std")] impl std::fmt::Debug for ResponseBody { - fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { - fmt.debug_struct("ResponseBody") - .field("id", &self.id) - .field("error", &self.error) - .field("buffer", &self.buffer.len()) - .field("filled_up_to", &self.filled_up_to) - .field("position", &self.position) - .field("deadline", &self.deadline) - .finish() - } + fn fmt(&self, fmt: &mut std::fmt::Formatter) -> std::fmt::Result { + fmt.debug_struct("ResponseBody") + .field("id", &self.id) + .field("error", &self.error) + .field("buffer", &self.buffer.len()) + .field("filled_up_to", &self.filled_up_to) + .field("position", &self.position) + .field("deadline", &self.deadline) + .finish() + } } impl ResponseBody { - fn new(id: RequestId) -> Self { - ResponseBody { - id, - error: None, - buffer: [0_u8; 4096], - filled_up_to: None, - position: 0, - deadline: None, - } - } - - /// Set the deadline for reading the body. - pub fn deadline(&mut self, deadline: impl Into>) { - self.deadline = deadline.into(); - self.error = None; - } - - /// Return an error that caused the iterator to return `None`. - /// - /// If the error is `DeadlineReached` you can resume the iterator by setting - /// a new deadline. - pub fn error(&self) -> &Option { - &self.error - } + fn new(id: RequestId) -> Self { + ResponseBody { + id, + error: None, + buffer: [0_u8; 4096], + filled_up_to: None, + position: 0, + deadline: None, + } + } + + /// Set the deadline for reading the body. + pub fn deadline(&mut self, deadline: impl Into>) { + self.deadline = deadline.into(); + self.error = None; + } + + /// Return an error that caused the iterator to return `None`. + /// + /// If the error is `DeadlineReached` you can resume the iterator by setting + /// a new deadline. + pub fn error(&self) -> &Option { + &self.error + } } impl Iterator for ResponseBody { - type Item = u8; - - fn next(&mut self) -> Option { - if self.error.is_some() { - return None; - } - - if self.filled_up_to.is_none() { - let result = sp_io::offchain::http_response_read_body( - self.id, - &mut self.buffer, - self.deadline); - match result { - Err(e) => { - self.error = Some(e); - return None; - } - Ok(0) => { - return None; - } - Ok(size) => { - self.position = 0; - self.filled_up_to = Some(size as usize); - } - } - } - - if Some(self.position) == self.filled_up_to { - self.filled_up_to = None; - return self.next(); - } - - let result = self.buffer[self.position]; - self.position += 1; - Some(result) - } + type Item = u8; + + fn next(&mut self) -> Option { + if self.error.is_some() { + return None; + } + + if self.filled_up_to.is_none() { + let result = + sp_io::offchain::http_response_read_body(self.id, &mut self.buffer, self.deadline); + match result { + Err(e) => { + self.error = Some(e); + return None; + } + Ok(0) => { + return None; + } + Ok(size) => { + self.position = 0; + self.filled_up_to = Some(size as usize); + } + } + } + + if Some(self.position) == self.filled_up_to { + self.filled_up_to = None; + return self.next(); + } + + let result = self.buffer[self.position]; + self.position += 1; + Some(result) + } } /// A collection of Headers in the response. #[derive(Clone, PartialEq, Eq, RuntimeDebug)] pub struct Headers { - /// Raw headers - pub raw: Vec<(Vec, Vec)>, + /// Raw headers + pub raw: Vec<(Vec, Vec)>, } impl Headers { - /// Retrieve a single header from the list of headers. - /// - /// Note this method is linearly looking from all the headers - /// comparing them with the needle byte-by-byte. - /// If you want to consume multiple headers it's better to iterate - /// and collect them on your own. - pub fn find(&self, name: &str) -> Option<&str> { - let raw = name.as_bytes(); - for &(ref key, ref val) in &self.raw { - if &**key == raw { - return str::from_utf8(&val).ok() - } - } - None - } - - /// Convert this headers into an iterator. - pub fn into_iter(&self) -> HeadersIterator { - HeadersIterator { collection: &self.raw, index: None } - } + /// Retrieve a single header from the list of headers. + /// + /// Note this method is linearly looking from all the headers + /// comparing them with the needle byte-by-byte. + /// If you want to consume multiple headers it's better to iterate + /// and collect them on your own. + pub fn find(&self, name: &str) -> Option<&str> { + let raw = name.as_bytes(); + for &(ref key, ref val) in &self.raw { + if &**key == raw { + return str::from_utf8(&val).ok(); + } + } + None + } + + /// Convert this headers into an iterator. + pub fn into_iter(&self) -> HeadersIterator { + HeadersIterator { + collection: &self.raw, + index: None, + } + } } /// A custom iterator traversing all the headers. #[derive(Clone, RuntimeDebug)] pub struct HeadersIterator<'a> { - collection: &'a [(Vec, Vec)], - index: Option, + collection: &'a [(Vec, Vec)], + index: Option, } impl<'a> HeadersIterator<'a> { - /// Move the iterator to the next position. - /// - /// Returns `true` is `current` has been set by this call. - pub fn next(&mut self) -> bool { - let index = self.index.map(|x| x + 1).unwrap_or(0); - self.index = Some(index); - index < self.collection.len() - } - - /// Returns current element (if any). - /// - /// Note that you have to call `next` prior to calling this - pub fn current(&self) -> Option<(&str, &str)> { - self.collection.get(self.index?) - .map(|val| (str::from_utf8(&val.0).unwrap_or(""), str::from_utf8(&val.1).unwrap_or(""))) - } + /// Move the iterator to the next position. + /// + /// Returns `true` is `current` has been set by this call. + pub fn next(&mut self) -> bool { + let index = self.index.map(|x| x + 1).unwrap_or(0); + self.index = Some(index); + index < self.collection.len() + } + + /// Returns current element (if any). + /// + /// Note that you have to call `next` prior to calling this + pub fn current(&self) -> Option<(&str, &str)> { + self.collection.get(self.index?).map(|val| { + ( + str::from_utf8(&val.0).unwrap_or(""), + str::from_utf8(&val.1).unwrap_or(""), + ) + }) + } } #[cfg(test)] mod tests { - use super::*; - use sp_io::TestExternalities; - use sp_core::offchain::{ - OffchainExt, - testing, - }; - - #[test] - fn should_send_a_basic_request_and_get_response() { - let (offchain, state) = testing::TestOffchainExt::new(); - let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); - - t.execute_with(|| { - let request: Request = Request::get("http://localhost:1234"); - let pending = request - .add_header("X-Auth", "hunter2") - .send() - .unwrap(); - // make sure it's sent correctly - state.write().fulfill_pending_request( - 0, - testing::PendingRequest { - method: "GET".into(), - uri: "http://localhost:1234".into(), - headers: vec![("X-Auth".into(), "hunter2".into())], - sent: true, - ..Default::default() - }, - b"1234".to_vec(), - None, - ); - - // wait - let mut response = pending.wait().unwrap(); - - // then check the response - let mut headers = response.headers().into_iter(); - assert_eq!(headers.current(), None); - assert_eq!(headers.next(), false); - assert_eq!(headers.current(), None); - - let body = response.body(); - assert_eq!(body.clone().collect::>(), b"1234".to_vec()); - assert_eq!(body.error(), &None); - }) - } - - #[test] - fn should_send_a_post_request() { - let (offchain, state) = testing::TestOffchainExt::new(); - let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); - - t.execute_with(|| { - let pending = Request::default() - .method(Method::Post) - .url("http://localhost:1234") - .body(vec![b"1234"]) - .send() - .unwrap(); - // make sure it's sent correctly - state.write().fulfill_pending_request( - 0, - testing::PendingRequest { - method: "POST".into(), - uri: "http://localhost:1234".into(), - body: b"1234".to_vec(), - sent: true, - ..Default::default() - }, - b"1234".to_vec(), - Some(("Test".to_owned(), "Header".to_owned())), - ); - - // wait - let mut response = pending.wait().unwrap(); - - // then check the response - let mut headers = response.headers().into_iter(); - assert_eq!(headers.current(), None); - assert_eq!(headers.next(), true); - assert_eq!(headers.current(), Some(("Test", "Header"))); - - let body = response.body(); - assert_eq!(body.clone().collect::>(), b"1234".to_vec()); - assert_eq!(body.error(), &None); - }) - } + use super::*; + use sp_core::offchain::{testing, OffchainExt}; + use sp_io::TestExternalities; + + #[test] + fn should_send_a_basic_request_and_get_response() { + let (offchain, state) = testing::TestOffchainExt::new(); + let mut t = TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + + t.execute_with(|| { + let request: Request = Request::get("http://localhost:1234"); + let pending = request.add_header("X-Auth", "hunter2").send().unwrap(); + // make sure it's sent correctly + state.write().fulfill_pending_request( + 0, + testing::PendingRequest { + method: "GET".into(), + uri: "http://localhost:1234".into(), + headers: vec![("X-Auth".into(), "hunter2".into())], + sent: true, + ..Default::default() + }, + b"1234".to_vec(), + None, + ); + + // wait + let mut response = pending.wait().unwrap(); + + // then check the response + let mut headers = response.headers().into_iter(); + assert_eq!(headers.current(), None); + assert_eq!(headers.next(), false); + assert_eq!(headers.current(), None); + + let body = response.body(); + assert_eq!(body.clone().collect::>(), b"1234".to_vec()); + assert_eq!(body.error(), &None); + }) + } + + #[test] + fn should_send_a_post_request() { + let (offchain, state) = testing::TestOffchainExt::new(); + let mut t = TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + + t.execute_with(|| { + let pending = Request::default() + .method(Method::Post) + .url("http://localhost:1234") + .body(vec![b"1234"]) + .send() + .unwrap(); + // make sure it's sent correctly + state.write().fulfill_pending_request( + 0, + testing::PendingRequest { + method: "POST".into(), + uri: "http://localhost:1234".into(), + body: b"1234".to_vec(), + sent: true, + ..Default::default() + }, + b"1234".to_vec(), + Some(("Test".to_owned(), "Header".to_owned())), + ); + + // wait + let mut response = pending.wait().unwrap(); + + // then check the response + let mut headers = response.headers().into_iter(); + assert_eq!(headers.current(), None); + assert_eq!(headers.next(), true); + assert_eq!(headers.current(), Some(("Test", "Header"))); + + let body = response.body(); + assert_eq!(body.clone().collect::>(), b"1234".to_vec()); + assert_eq!(body.error(), &None); + }) + } } diff --git a/primitives/runtime/src/offchain/storage.rs b/primitives/runtime/src/offchain/storage.rs index 681bc14451..5e2e708890 100644 --- a/primitives/runtime/src/offchain/storage.rs +++ b/primitives/runtime/src/offchain/storage.rs @@ -23,134 +23,130 @@ pub type StorageValue = StorageValueRef<'static>; /// An abstraction over local storage value. pub struct StorageValueRef<'a> { - key: &'a [u8], - kind: StorageKind, + key: &'a [u8], + kind: StorageKind, } impl<'a> StorageValueRef<'a> { - /// Create a new reference to a value in the persistent local storage. - pub fn persistent(key: &'a [u8]) -> Self { - Self { key, kind: StorageKind::PERSISTENT } - } - - /// Create a new reference to a value in the fork-aware local storage. - pub fn local(key: &'a [u8]) -> Self { - Self { key, kind: StorageKind::LOCAL } - } - - /// Set the value of the storage to encoding of given parameter. - /// - /// Note that the storage may be accessed by workers running concurrently, - /// if you happen to write a `get-check-set` pattern you should most likely - /// be using `mutate` instead. - pub fn set(&self, value: &impl codec::Encode) { - value.using_encoded(|val| { - sp_io::offchain::local_storage_set(self.kind, self.key, val) - }) - } - - /// Retrieve & decode the value from storage. - /// - /// Note that if you want to do some checks based on the value - /// and write changes after that you should rather be using `mutate`. - /// - /// The function returns `None` if the value was not found in storage, - /// otherwise a decoding of the value to requested type. - pub fn get(&self) -> Option> { - sp_io::offchain::local_storage_get(self.kind, self.key) - .map(|val| T::decode(&mut &*val).ok()) - } - - /// Retrieve & decode the value and set it to a new one atomically. - /// - /// Function `f` should return a new value that we should attempt to write to storage. - /// This function returns: - /// 1. `Ok(Ok(T))` in case the value has been successfully set. - /// 2. `Ok(Err(T))` in case the value was returned, but it couldn't have been set. - /// 3. `Err(_)` in case `f` returns an error. - pub fn mutate(&self, f: F) -> Result, E> where - T: codec::Codec, - F: FnOnce(Option>) -> Result - { - let value = sp_io::offchain::local_storage_get(self.kind, self.key); - let decoded = value.as_deref().map(|mut v| T::decode(&mut v).ok()); - let val = f(decoded)?; - let set = val.using_encoded(|new_val| { - sp_io::offchain::local_storage_compare_and_set( - self.kind, - self.key, - value, - new_val, - ) - }); - - if set { - Ok(Ok(val)) - } else { - Ok(Err(val)) - } - } + /// Create a new reference to a value in the persistent local storage. + pub fn persistent(key: &'a [u8]) -> Self { + Self { + key, + kind: StorageKind::PERSISTENT, + } + } + + /// Create a new reference to a value in the fork-aware local storage. + pub fn local(key: &'a [u8]) -> Self { + Self { + key, + kind: StorageKind::LOCAL, + } + } + + /// Set the value of the storage to encoding of given parameter. + /// + /// Note that the storage may be accessed by workers running concurrently, + /// if you happen to write a `get-check-set` pattern you should most likely + /// be using `mutate` instead. + pub fn set(&self, value: &impl codec::Encode) { + value.using_encoded(|val| sp_io::offchain::local_storage_set(self.kind, self.key, val)) + } + + /// Retrieve & decode the value from storage. + /// + /// Note that if you want to do some checks based on the value + /// and write changes after that you should rather be using `mutate`. + /// + /// The function returns `None` if the value was not found in storage, + /// otherwise a decoding of the value to requested type. + pub fn get(&self) -> Option> { + sp_io::offchain::local_storage_get(self.kind, self.key) + .map(|val| T::decode(&mut &*val).ok()) + } + + /// Retrieve & decode the value and set it to a new one atomically. + /// + /// Function `f` should return a new value that we should attempt to write to storage. + /// This function returns: + /// 1. `Ok(Ok(T))` in case the value has been successfully set. + /// 2. `Ok(Err(T))` in case the value was returned, but it couldn't have been set. + /// 3. `Err(_)` in case `f` returns an error. + pub fn mutate(&self, f: F) -> Result, E> + where + T: codec::Codec, + F: FnOnce(Option>) -> Result, + { + let value = sp_io::offchain::local_storage_get(self.kind, self.key); + let decoded = value.as_deref().map(|mut v| T::decode(&mut v).ok()); + let val = f(decoded)?; + let set = val.using_encoded(|new_val| { + sp_io::offchain::local_storage_compare_and_set(self.kind, self.key, value, new_val) + }); + + if set { + Ok(Ok(val)) + } else { + Ok(Err(val)) + } + } } #[cfg(test)] mod tests { - use super::*; - use sp_io::TestExternalities; - use sp_core::offchain::{ - OffchainExt, - OffchainStorage, - testing, - }; - - #[test] - fn should_set_and_get() { - let (offchain, state) = testing::TestOffchainExt::new(); - let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); - - t.execute_with(|| { - let val = StorageValue::persistent(b"testval"); - - assert_eq!(val.get::(), None); - - val.set(&15_u32); - - assert_eq!(val.get::(), Some(Some(15_u32))); - assert_eq!(val.get::>(), Some(None)); - assert_eq!( - state.read().persistent_storage.get(b"", b"testval"), - Some(vec![15_u8, 0, 0, 0]) - ); - }) - } - - #[test] - fn should_mutate() { - let (offchain, state) = testing::TestOffchainExt::new(); - let mut t = TestExternalities::default(); - t.register_extension(OffchainExt::new(offchain)); - - t.execute_with(|| { - let val = StorageValue::persistent(b"testval"); - - let result = val.mutate::(|val| { - assert_eq!(val, None); - - Ok(16_u32) - }); - assert_eq!(result, Ok(Ok(16_u32))); - assert_eq!(val.get::(), Some(Some(16_u32))); - assert_eq!( - state.read().persistent_storage.get(b"", b"testval"), - Some(vec![16_u8, 0, 0, 0]) - ); - - // mutate again, but this time early-exit. - let res = val.mutate::(|val| { - assert_eq!(val, Some(Some(16_u32))); - Err(()) - }); - assert_eq!(res, Err(())); - }) - } + use super::*; + use sp_core::offchain::{testing, OffchainExt, OffchainStorage}; + use sp_io::TestExternalities; + + #[test] + fn should_set_and_get() { + let (offchain, state) = testing::TestOffchainExt::new(); + let mut t = TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + + t.execute_with(|| { + let val = StorageValue::persistent(b"testval"); + + assert_eq!(val.get::(), None); + + val.set(&15_u32); + + assert_eq!(val.get::(), Some(Some(15_u32))); + assert_eq!(val.get::>(), Some(None)); + assert_eq!( + state.read().persistent_storage.get(b"", b"testval"), + Some(vec![15_u8, 0, 0, 0]) + ); + }) + } + + #[test] + fn should_mutate() { + let (offchain, state) = testing::TestOffchainExt::new(); + let mut t = TestExternalities::default(); + t.register_extension(OffchainExt::new(offchain)); + + t.execute_with(|| { + let val = StorageValue::persistent(b"testval"); + + let result = val.mutate::(|val| { + assert_eq!(val, None); + + Ok(16_u32) + }); + assert_eq!(result, Ok(Ok(16_u32))); + assert_eq!(val.get::(), Some(Some(16_u32))); + assert_eq!( + state.read().persistent_storage.get(b"", b"testval"), + Some(vec![16_u8, 0, 0, 0]) + ); + + // mutate again, but this time early-exit. + let res = val.mutate::(|val| { + assert_eq!(val, Some(Some(16_u32))); + Err(()) + }); + assert_eq!(res, Err(())); + }) + } } diff --git a/primitives/runtime/src/random_number_generator.rs b/primitives/runtime/src/random_number_generator.rs index c3cd3dfb90..eb205f8a4c 100644 --- a/primitives/runtime/src/random_number_generator.rs +++ b/primitives/runtime/src/random_number_generator.rs @@ -17,8 +17,8 @@ //! A simple pseudo random number generator that allows a stream of random numbers to be efficiently //! created from a single initial seed hash. -use codec::{Encode, Decode}; use crate::traits::{Hash, TrailingZeroInput}; +use codec::{Decode, Encode}; /// Pseudo-random number streamer. This retains the state of the random number stream. It's as /// secure as the combination of the seed with which it is constructed and the hash function it uses @@ -44,60 +44,62 @@ use crate::traits::{Hash, TrailingZeroInput}; /// megabytes of data from it. #[derive(Encode, Decode)] pub struct RandomNumberGenerator { - current: Hashing::Output, - offset: u32, + current: Hashing::Output, + offset: u32, } impl RandomNumberGenerator { - /// A new source of random data. - pub fn new(seed: Hashing::Output) -> Self { - Self { - current: seed, - offset: 0, - } - } + /// A new source of random data. + pub fn new(seed: Hashing::Output) -> Self { + Self { + current: seed, + offset: 0, + } + } - fn offset(&self) -> usize { self.offset as usize } + fn offset(&self) -> usize { + self.offset as usize + } - /// Returns a number at least zero, at most `max`. - pub fn pick_u32(&mut self, max: u32) -> u32 { - let needed = (4 - max.leading_zeros() / 8) as usize; - let top = ((1 << (needed as u64 * 8)) / ((max + 1) as u64) * ((max + 1) as u64) - 1) as u32; - loop { - if self.offset() + needed > self.current.as_ref().len() { - // rehash - self.current = ::hash(self.current.as_ref()); - self.offset = 0; - } - let data = &self.current.as_ref()[self.offset()..self.offset() + needed]; - self.offset += needed as u32; - let raw = u32::decode(&mut TrailingZeroInput::new(data)).unwrap_or(0); - if raw <= top { - break if max < u32::max_value() { - raw % (max + 1) - } else { - raw - } - } - } - } + /// Returns a number at least zero, at most `max`. + pub fn pick_u32(&mut self, max: u32) -> u32 { + let needed = (4 - max.leading_zeros() / 8) as usize; + let top = ((1 << (needed as u64 * 8)) / ((max + 1) as u64) * ((max + 1) as u64) - 1) as u32; + loop { + if self.offset() + needed > self.current.as_ref().len() { + // rehash + self.current = ::hash(self.current.as_ref()); + self.offset = 0; + } + let data = &self.current.as_ref()[self.offset()..self.offset() + needed]; + self.offset += needed as u32; + let raw = u32::decode(&mut TrailingZeroInput::new(data)).unwrap_or(0); + if raw <= top { + break if max < u32::max_value() { + raw % (max + 1) + } else { + raw + }; + } + } + } - /// Returns a number at least zero, at most `max`. - /// - /// This returns a `usize`, but internally it only uses `u32` so avoid consensus problems. - pub fn pick_usize(&mut self, max: usize) -> usize { - self.pick_u32(max as u32) as usize - } + /// Returns a number at least zero, at most `max`. + /// + /// This returns a `usize`, but internally it only uses `u32` so avoid consensus problems. + pub fn pick_usize(&mut self, max: usize) -> usize { + self.pick_u32(max as u32) as usize + } - /// Pick a random element from an array of `items`. - /// - /// This is guaranteed to return `Some` except in the case that the given array `items` is - /// empty. - pub fn pick_item<'a, T>(&mut self, items: &'a [T]) -> Option<&'a T> { - if items.is_empty() { - None - } else { - Some(&items[self.pick_usize(items.len() - 1)]) - } - } + /// Pick a random element from an array of `items`. + /// + /// This is guaranteed to return `Some` except in the case that the given array `items` is + /// empty. + pub fn pick_item<'a, T>(&mut self, items: &'a [T]) -> Option<&'a T> { + if items.is_empty() { + None + } else { + Some(&items[self.pick_usize(items.len() - 1)]) + } + } } diff --git a/primitives/runtime/src/runtime_string.rs b/primitives/runtime/src/runtime_string.rs index e7ee927e08..f958069a06 100644 --- a/primitives/runtime/src/runtime_string.rs +++ b/primitives/runtime/src/runtime_string.rs @@ -14,104 +14,106 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_core::RuntimeDebug; use sp_std::vec::Vec; /// A string that wraps a `&'static str` in the runtime and `String`/`Vec` on decode. #[derive(Eq, RuntimeDebug, Clone)] pub enum RuntimeString { - /// The borrowed mode that wraps a `&'static str`. - Borrowed(&'static str), - /// The owned mode that wraps a `String`. - #[cfg(feature = "std")] - Owned(String), - /// The owned mode that wraps a `Vec`. - #[cfg(not(feature = "std"))] - Owned(Vec), + /// The borrowed mode that wraps a `&'static str`. + Borrowed(&'static str), + /// The owned mode that wraps a `String`. + #[cfg(feature = "std")] + Owned(String), + /// The owned mode that wraps a `Vec`. + #[cfg(not(feature = "std"))] + Owned(Vec), } impl From<&'static str> for RuntimeString { - fn from(data: &'static str) -> Self { - Self::Borrowed(data) - } + fn from(data: &'static str) -> Self { + Self::Borrowed(data) + } } #[cfg(feature = "std")] impl From for String { - fn from(string: RuntimeString) -> Self { - match string { - RuntimeString::Borrowed(data) => data.to_owned(), - RuntimeString::Owned(data) => data, - } - } + fn from(string: RuntimeString) -> Self { + match string { + RuntimeString::Borrowed(data) => data.to_owned(), + RuntimeString::Owned(data) => data, + } + } } impl Default for RuntimeString { - fn default() -> Self { - Self::Borrowed(Default::default()) - } + fn default() -> Self { + Self::Borrowed(Default::default()) + } } impl PartialEq for RuntimeString { - fn eq(&self, other: &Self) -> bool { - self.as_ref() == other.as_ref() - } + fn eq(&self, other: &Self) -> bool { + self.as_ref() == other.as_ref() + } } impl AsRef<[u8]> for RuntimeString { - fn as_ref(&self) -> &[u8] { - match self { - Self::Borrowed(val) => val.as_ref(), - Self::Owned(val) => val.as_ref(), - } - } + fn as_ref(&self) -> &[u8] { + match self { + Self::Borrowed(val) => val.as_ref(), + Self::Owned(val) => val.as_ref(), + } + } } impl Encode for RuntimeString { - fn encode(&self) -> Vec { - match self { - Self::Borrowed(val) => val.encode(), - Self::Owned(val) => val.encode(), - } - } + fn encode(&self) -> Vec { + match self { + Self::Borrowed(val) => val.encode(), + Self::Owned(val) => val.encode(), + } + } } impl Decode for RuntimeString { - fn decode(value: &mut I) -> Result { - Decode::decode(value).map(Self::Owned) - } + fn decode(value: &mut I) -> Result { + Decode::decode(value).map(Self::Owned) + } } #[cfg(feature = "std")] impl std::fmt::Display for RuntimeString { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - match self { - Self::Borrowed(val) => write!(f, "{}", val), - Self::Owned(val) => write!(f, "{}", val), - } - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + match self { + Self::Borrowed(val) => write!(f, "{}", val), + Self::Owned(val) => write!(f, "{}", val), + } + } } #[cfg(feature = "std")] impl serde::Serialize for RuntimeString { - fn serialize(&self, serializer: S) -> Result { - match self { - Self::Borrowed(val) => val.serialize(serializer), - Self::Owned(val) => val.serialize(serializer), - } - } + fn serialize(&self, serializer: S) -> Result { + match self { + Self::Borrowed(val) => val.serialize(serializer), + Self::Owned(val) => val.serialize(serializer), + } + } } #[cfg(feature = "std")] impl<'de> serde::Deserialize<'de> for RuntimeString { - fn deserialize>(de: D) -> Result { - String::deserialize(de).map(Self::Owned) - } + fn deserialize>(de: D) -> Result { + String::deserialize(de).map(Self::Owned) + } } /// Create a const [`RuntimeString`]. #[macro_export] macro_rules! create_runtime_str { - ( $y:expr ) => {{ $crate::RuntimeString::Borrowed($y) }} + ( $y:expr ) => {{ + $crate::RuntimeString::Borrowed($y) + }}; } diff --git a/primitives/runtime/src/testing.rs b/primitives/runtime/src/testing.rs index 1414a5f4f0..7982397021 100644 --- a/primitives/runtime/src/testing.rs +++ b/primitives/runtime/src/testing.rs @@ -16,128 +16,155 @@ //! Testing utilities. -use serde::{Serialize, Serializer, Deserialize, de::Error as DeError, Deserializer}; -use std::{fmt::Debug, ops::Deref, fmt, cell::RefCell}; -use crate::codec::{Codec, Encode, Decode}; +use crate::codec::{Codec, Decode, Encode}; +use crate::traits::ValidateUnsigned; use crate::traits::{ - self, Checkable, Applyable, BlakeTwo256, OpaqueKeys, - SignedExtension, Dispatchable, DispatchInfoOf, + self, Applyable, BlakeTwo256, Checkable, DispatchInfoOf, Dispatchable, OpaqueKeys, + SignedExtension, }; -use crate::traits::ValidateUnsigned; -use crate::{generic, KeyTypeId, ApplyExtrinsicResult}; -pub use sp_core::{H256, sr25519}; -use sp_core::{crypto::{CryptoType, Dummy, key_types, Public}, U256}; -use crate::transaction_validity::{TransactionValidity, TransactionValidityError, TransactionSource}; +use crate::transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, +}; +use crate::{generic, ApplyExtrinsicResult, KeyTypeId}; +use serde::{de::Error as DeError, Deserialize, Deserializer, Serialize, Serializer}; +use sp_core::{ + crypto::{key_types, CryptoType, Dummy, Public}, + U256, +}; +pub use sp_core::{sr25519, H256}; +use std::{cell::RefCell, fmt, fmt::Debug, ops::Deref}; /// Authority Id -#[derive(Default, PartialEq, Eq, Clone, Encode, Decode, Debug, Hash, Serialize, Deserialize, PartialOrd, Ord)] +#[derive( + Default, + PartialEq, + Eq, + Clone, + Encode, + Decode, + Debug, + Hash, + Serialize, + Deserialize, + PartialOrd, + Ord, +)] pub struct UintAuthorityId(pub u64); impl From for UintAuthorityId { - fn from(id: u64) -> Self { - UintAuthorityId(id) - } + fn from(id: u64) -> Self { + UintAuthorityId(id) + } } impl From for u64 { - fn from(id: UintAuthorityId) -> u64 { - id.0 - } + fn from(id: UintAuthorityId) -> u64 { + id.0 + } } impl UintAuthorityId { - /// Convert this authority id into a public key. - pub fn to_public_key(&self) -> T { - let bytes: [u8; 32] = U256::from(self.0).into(); - T::from_slice(&bytes) - } + /// Convert this authority id into a public key. + pub fn to_public_key(&self) -> T { + let bytes: [u8; 32] = U256::from(self.0).into(); + T::from_slice(&bytes) + } } impl CryptoType for UintAuthorityId { - type Pair = Dummy; + type Pair = Dummy; } impl AsRef<[u8]> for UintAuthorityId { - fn as_ref(&self) -> &[u8] { - // Unsafe, i know, but it's test code and it's just there because it's really convenient to - // keep `UintAuthorityId` as a u64 under the hood. - unsafe { - std::slice::from_raw_parts(&self.0 as *const u64 as *const _, std::mem::size_of::()) - } - } + fn as_ref(&self) -> &[u8] { + // Unsafe, i know, but it's test code and it's just there because it's really convenient to + // keep `UintAuthorityId` as a u64 under the hood. + unsafe { + std::slice::from_raw_parts( + &self.0 as *const u64 as *const _, + std::mem::size_of::(), + ) + } + } } thread_local! { - /// A list of all UintAuthorityId keys returned to the runtime. - static ALL_KEYS: RefCell> = RefCell::new(vec![]); + /// A list of all UintAuthorityId keys returned to the runtime. + static ALL_KEYS: RefCell> = RefCell::new(vec![]); } impl UintAuthorityId { - /// Set the list of keys returned by the runtime call for all keys of that type. - pub fn set_all_keys>(keys: impl IntoIterator) { - ALL_KEYS.with(|l| *l.borrow_mut() = keys.into_iter().map(Into::into).collect()) - } + /// Set the list of keys returned by the runtime call for all keys of that type. + pub fn set_all_keys>(keys: impl IntoIterator) { + ALL_KEYS.with(|l| *l.borrow_mut() = keys.into_iter().map(Into::into).collect()) + } } impl sp_application_crypto::RuntimeAppPublic for UintAuthorityId { - const ID: KeyTypeId = key_types::DUMMY; - - type Signature = u64; - - fn all() -> Vec { - ALL_KEYS.with(|l| l.borrow().clone()) - } - - fn generate_pair(_: Option>) -> Self { - use rand::RngCore; - UintAuthorityId(rand::thread_rng().next_u64()) - } - - fn sign>(&self, msg: &M) -> Option { - let mut signature = [0u8; 8]; - msg.as_ref().iter() - .chain(std::iter::repeat(&42u8)) - .take(8) - .enumerate() - .for_each(|(i, v)| { signature[i] = *v; }); - - Some(u64::from_le_bytes(signature)) - } - - fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { - let mut msg_signature = [0u8; 8]; - msg.as_ref().iter() - .chain(std::iter::repeat(&42)) - .take(8) - .enumerate() - .for_each(|(i, v)| { msg_signature[i] = *v; }); - - u64::from_le_bytes(msg_signature) == *signature - } - - fn to_raw_vec(&self) -> Vec { - AsRef::<[u8]>::as_ref(self).to_vec() - } + const ID: KeyTypeId = key_types::DUMMY; + + type Signature = u64; + + fn all() -> Vec { + ALL_KEYS.with(|l| l.borrow().clone()) + } + + fn generate_pair(_: Option>) -> Self { + use rand::RngCore; + UintAuthorityId(rand::thread_rng().next_u64()) + } + + fn sign>(&self, msg: &M) -> Option { + let mut signature = [0u8; 8]; + msg.as_ref() + .iter() + .chain(std::iter::repeat(&42u8)) + .take(8) + .enumerate() + .for_each(|(i, v)| { + signature[i] = *v; + }); + + Some(u64::from_le_bytes(signature)) + } + + fn verify>(&self, msg: &M, signature: &Self::Signature) -> bool { + let mut msg_signature = [0u8; 8]; + msg.as_ref() + .iter() + .chain(std::iter::repeat(&42)) + .take(8) + .enumerate() + .for_each(|(i, v)| { + msg_signature[i] = *v; + }); + + u64::from_le_bytes(msg_signature) == *signature + } + + fn to_raw_vec(&self) -> Vec { + AsRef::<[u8]>::as_ref(self).to_vec() + } } impl OpaqueKeys for UintAuthorityId { - type KeyTypeIdProviders = (); + type KeyTypeIdProviders = (); - fn key_ids() -> &'static [KeyTypeId] { - &[key_types::DUMMY] - } + fn key_ids() -> &'static [KeyTypeId] { + &[key_types::DUMMY] + } - fn get_raw(&self, _: KeyTypeId) -> &[u8] { - self.as_ref() - } + fn get_raw(&self, _: KeyTypeId) -> &[u8] { + self.as_ref() + } - fn get(&self, _: KeyTypeId) -> Option { - self.using_encoded(|mut x| T::decode(&mut x)).ok() - } + fn get(&self, _: KeyTypeId) -> Option { + self.using_encoded(|mut x| T::decode(&mut x)).ok() + } } impl crate::BoundToRuntimeAppPublic for UintAuthorityId { - type Public = Self; + type Public = Self; } /// Digest item @@ -147,75 +174,97 @@ pub type DigestItem = generic::DigestItem; pub type Digest = generic::Digest; /// Block Header -#[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, Default, parity_util_mem::MallocSizeOf)] +#[derive( + PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, Default, parity_util_mem::MallocSizeOf, +)] #[serde(rename_all = "camelCase")] #[serde(deny_unknown_fields)] pub struct Header { - /// Parent hash - pub parent_hash: H256, - /// Block Number - pub number: u64, - /// Post-execution state trie root - pub state_root: H256, - /// Merkle root of block's extrinsics - pub extrinsics_root: H256, - /// Digest items - pub digest: Digest, + /// Parent hash + pub parent_hash: H256, + /// Block Number + pub number: u64, + /// Post-execution state trie root + pub state_root: H256, + /// Merkle root of block's extrinsics + pub extrinsics_root: H256, + /// Digest items + pub digest: Digest, } impl traits::Header for Header { - type Number = u64; - type Hashing = BlakeTwo256; - type Hash = H256; - - fn number(&self) -> &Self::Number { &self.number } - fn set_number(&mut self, num: Self::Number) { self.number = num } - - fn extrinsics_root(&self) -> &Self::Hash { &self.extrinsics_root } - fn set_extrinsics_root(&mut self, root: Self::Hash) { self.extrinsics_root = root } - - fn state_root(&self) -> &Self::Hash { &self.state_root } - fn set_state_root(&mut self, root: Self::Hash) { self.state_root = root } - - fn parent_hash(&self) -> &Self::Hash { &self.parent_hash } - fn set_parent_hash(&mut self, hash: Self::Hash) { self.parent_hash = hash } - - fn digest(&self) -> &Digest { &self.digest } - fn digest_mut(&mut self) -> &mut Digest { &mut self.digest } - - fn new( - number: Self::Number, - extrinsics_root: Self::Hash, - state_root: Self::Hash, - parent_hash: Self::Hash, - digest: Digest, - ) -> Self { - Header { - number, - extrinsics_root, - state_root, - parent_hash, - digest, - } - } + type Number = u64; + type Hashing = BlakeTwo256; + type Hash = H256; + + fn number(&self) -> &Self::Number { + &self.number + } + fn set_number(&mut self, num: Self::Number) { + self.number = num + } + + fn extrinsics_root(&self) -> &Self::Hash { + &self.extrinsics_root + } + fn set_extrinsics_root(&mut self, root: Self::Hash) { + self.extrinsics_root = root + } + + fn state_root(&self) -> &Self::Hash { + &self.state_root + } + fn set_state_root(&mut self, root: Self::Hash) { + self.state_root = root + } + + fn parent_hash(&self) -> &Self::Hash { + &self.parent_hash + } + fn set_parent_hash(&mut self, hash: Self::Hash) { + self.parent_hash = hash + } + + fn digest(&self) -> &Digest { + &self.digest + } + fn digest_mut(&mut self) -> &mut Digest { + &mut self.digest + } + + fn new( + number: Self::Number, + extrinsics_root: Self::Hash, + state_root: Self::Hash, + parent_hash: Self::Hash, + digest: Digest, + ) -> Self { + Header { + number, + extrinsics_root, + state_root, + parent_hash, + digest, + } + } } impl Header { - /// A new header with the given number and default hash for all other fields. - pub fn new_from_number(number: ::Number) -> Self { - Self { - number, - ..Default::default() - } - } + /// A new header with the given number and default hash for all other fields. + pub fn new_from_number(number: ::Number) -> Self { + Self { + number, + ..Default::default() + } + } } impl<'a> Deserialize<'a> for Header { - fn deserialize>(de: D) -> Result { - let r = >::deserialize(de)?; - Decode::decode(&mut &r[..]) - .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e.what()))) - } + fn deserialize>(de: D) -> Result { + let r = >::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e.what()))) + } } /// An opaque extrinsic wrapper type. @@ -223,75 +272,83 @@ impl<'a> Deserialize<'a> for Header { pub struct ExtrinsicWrapper(Xt); impl traits::Extrinsic for ExtrinsicWrapper -where Xt: parity_util_mem::MallocSizeOf +where + Xt: parity_util_mem::MallocSizeOf, { - type Call = (); - type SignaturePayload = (); + type Call = (); + type SignaturePayload = (); - fn is_signed(&self) -> Option { - None - } + fn is_signed(&self) -> Option { + None + } } impl serde::Serialize for ExtrinsicWrapper { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } impl From for ExtrinsicWrapper { - fn from(xt: Xt) -> Self { - ExtrinsicWrapper(xt) - } + fn from(xt: Xt) -> Self { + ExtrinsicWrapper(xt) + } } impl Deref for ExtrinsicWrapper { - type Target = Xt; + type Target = Xt; - fn deref(&self) -> &Self::Target { - &self.0 - } + fn deref(&self) -> &Self::Target { + &self.0 + } } /// Testing block #[derive(PartialEq, Eq, Clone, Serialize, Debug, Encode, Decode, parity_util_mem::MallocSizeOf)] pub struct Block { - /// Block header - pub header: Header, - /// List of extrinsics - pub extrinsics: Vec, + /// Block header + pub header: Header, + /// List of extrinsics + pub extrinsics: Vec, } -impl traits::Block - for Block +impl< + Xt: 'static + Codec + Sized + Send + Sync + Serialize + Clone + Eq + Debug + traits::Extrinsic, + > traits::Block for Block { - type Extrinsic = Xt; - type Header = Header; - type Hash =

::Hash; - - fn header(&self) -> &Self::Header { - &self.header - } - fn extrinsics(&self) -> &[Self::Extrinsic] { - &self.extrinsics[..] - } - fn deconstruct(self) -> (Self::Header, Vec) { - (self.header, self.extrinsics) - } - fn new(header: Self::Header, extrinsics: Vec) -> Self { - Block { header, extrinsics } - } - fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec { - (header, extrinsics).encode() - } + type Extrinsic = Xt; + type Header = Header; + type Hash =
::Hash; + + fn header(&self) -> &Self::Header { + &self.header + } + fn extrinsics(&self) -> &[Self::Extrinsic] { + &self.extrinsics[..] + } + fn deconstruct(self) -> (Self::Header, Vec) { + (self.header, self.extrinsics) + } + fn new(header: Self::Header, extrinsics: Vec) -> Self { + Block { header, extrinsics } + } + fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec { + (header, extrinsics).encode() + } } -impl<'a, Xt> Deserialize<'a> for Block where Block: Decode { - fn deserialize>(de: D) -> Result { - let r = >::deserialize(de)?; - Decode::decode(&mut &r[..]) - .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e.what()))) - } +impl<'a, Xt> Deserialize<'a> for Block +where + Block: Decode, +{ + fn deserialize>(de: D) -> Result { + let r = >::deserialize(de)?; + Decode::decode(&mut &r[..]) + .map_err(|e| DeError::custom(format!("Invalid value passed into decode: {}", e.what()))) + } } /// Test transaction, tuple of (sender, call, signed_extra) @@ -300,83 +357,104 @@ impl<'a, Xt> Deserialize<'a> for Block where Block: Decode { /// If sender is some then the transaction is signed otherwise it is unsigned. #[derive(PartialEq, Eq, Clone, Encode, Decode)] pub struct TestXt { - /// Signature of the extrinsic. - pub signature: Option<(u64, Extra)>, - /// Call of the extrinsic. - pub call: Call, + /// Signature of the extrinsic. + pub signature: Option<(u64, Extra)>, + /// Call of the extrinsic. + pub call: Call, } impl TestXt { - /// Create a new `TextXt`. - pub fn new(call: Call, signature: Option<(u64, Extra)>) -> Self { - Self { call, signature } - } + /// Create a new `TextXt`. + pub fn new(call: Call, signature: Option<(u64, Extra)>) -> Self { + Self { call, signature } + } } // Non-opaque extrinsics always 0. parity_util_mem::malloc_size_of_is_0!(any: TestXt); -impl Serialize for TestXt where TestXt: Encode { - fn serialize(&self, seq: S) -> Result where S: Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } +impl Serialize for TestXt +where + TestXt: Encode, +{ + fn serialize(&self, seq: S) -> Result + where + S: Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } impl Debug for TestXt { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "TestXt({:?}, ...)", self.signature.as_ref().map(|x| &x.0)) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "TestXt({:?}, ...)", + self.signature.as_ref().map(|x| &x.0) + ) + } } impl Checkable for TestXt { - type Checked = Self; - fn check(self, _: &Context) -> Result { Ok(self) } + type Checked = Self; + fn check(self, _: &Context) -> Result { + Ok(self) + } } impl traits::Extrinsic for TestXt { - type Call = Call; - type SignaturePayload = (u64, Extra); - - fn is_signed(&self) -> Option { - Some(self.signature.is_some()) - } - - fn new(c: Call, sig: Option) -> Option { - Some(TestXt { signature: sig, call: c }) - } + type Call = Call; + type SignaturePayload = (u64, Extra); + + fn is_signed(&self) -> Option { + Some(self.signature.is_some()) + } + + fn new(c: Call, sig: Option) -> Option { + Some(TestXt { + signature: sig, + call: c, + }) + } } -impl Applyable for TestXt where - Call: 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, - Extra: SignedExtension, - Origin: From>, +impl Applyable for TestXt +where + Call: + 'static + Sized + Send + Sync + Clone + Eq + Codec + Debug + Dispatchable, + Extra: SignedExtension, + Origin: From>, { - type Call = Call; - - /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( - &self, - _source: TransactionSource, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - Ok(Default::default()) - } - - /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, - /// index and sender. - fn apply>( - self, - info: &DispatchInfoOf, - len: usize, - ) -> ApplyExtrinsicResult { - let maybe_who = if let Some((who, extra)) = self.signature { - Extra::pre_dispatch(extra, &who, &self.call, info, len)?; - Some(who) - } else { - Extra::pre_dispatch_unsigned(&self.call, info, len)?; - None - }; - - Ok(self.call.dispatch(maybe_who.into()).map(|_| ()).map_err(|e| e.error)) - } + type Call = Call; + + /// Checks to see if this is a valid *transaction*. It returns information on it if so. + fn validate>( + &self, + _source: TransactionSource, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + Ok(Default::default()) + } + + /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, + /// index and sender. + fn apply>( + self, + info: &DispatchInfoOf, + len: usize, + ) -> ApplyExtrinsicResult { + let maybe_who = if let Some((who, extra)) = self.signature { + Extra::pre_dispatch(extra, &who, &self.call, info, len)?; + Some(who) + } else { + Extra::pre_dispatch_unsigned(&self.call, info, len)?; + None + }; + + Ok(self + .call + .dispatch(maybe_who.into()) + .map(|_| ()) + .map_err(|e| e.error)) + } } diff --git a/primitives/runtime/src/traits.rs b/primitives/runtime/src/traits.rs index d843bdc478..9851de2bd0 100644 --- a/primitives/runtime/src/traits.rs +++ b/primitives/runtime/src/traits.rs @@ -16,130 +16,155 @@ //! Primitives for the runtime modules. -use sp_std::prelude::*; -use sp_std::{self, marker::PhantomData, convert::{TryFrom, TryInto}, fmt::Debug}; +use crate::codec::{Codec, Decode, Encode}; +use crate::generic::{Digest, DigestItem}; +use crate::transaction_validity::{ + TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, + ValidTransaction, +}; +use crate::DispatchResult; +use impl_trait_for_tuples::impl_for_tuples; +#[cfg(feature = "std")] +use serde::{de::DeserializeOwned, Deserialize, Serialize}; +use sp_application_crypto::AppKey; +pub use sp_arithmetic::traits::{ + AtLeast32Bit, Bounded, CheckedAdd, CheckedDiv, CheckedMul, CheckedShl, CheckedShr, CheckedSub, + IntegerSquareRoot, One, SaturatedConversion, Saturating, UniqueSaturatedFrom, + UniqueSaturatedInto, Zero, +}; +use sp_core::{self, Hasher, RuntimeDebug, TypeId}; use sp_io; +use sp_std::prelude::*; +use sp_std::{ + self, + convert::{TryFrom, TryInto}, + fmt::Debug, + marker::PhantomData, +}; #[cfg(feature = "std")] use std::fmt::Display; #[cfg(feature = "std")] use std::str::FromStr; -#[cfg(feature = "std")] -use serde::{Serialize, Deserialize, de::DeserializeOwned}; -use sp_core::{self, Hasher, TypeId, RuntimeDebug}; -use crate::codec::{Codec, Encode, Decode}; -use crate::transaction_validity::{ - ValidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, - UnknownTransaction, -}; -use crate::generic::{Digest, DigestItem}; -pub use sp_arithmetic::traits::{ - AtLeast32Bit, UniqueSaturatedInto, UniqueSaturatedFrom, Saturating, SaturatedConversion, - Zero, One, Bounded, CheckedAdd, CheckedSub, CheckedMul, CheckedDiv, - CheckedShl, CheckedShr, IntegerSquareRoot -}; -use sp_application_crypto::AppKey; -use impl_trait_for_tuples::impl_for_tuples; -use crate::DispatchResult; /// A lazy value. pub trait Lazy { - /// Get a reference to the underlying value. - /// - /// This will compute the value if the function is invoked for the first time. - fn get(&mut self) -> &T; + /// Get a reference to the underlying value. + /// + /// This will compute the value if the function is invoked for the first time. + fn get(&mut self) -> &T; } impl<'a> Lazy<[u8]> for &'a [u8] { - fn get(&mut self) -> &[u8] { &**self } + fn get(&mut self) -> &[u8] { + &**self + } } /// Some type that is able to be collapsed into an account ID. It is not possible to recreate the /// original value from the account ID. pub trait IdentifyAccount { - /// The account ID that this can be transformed into. - type AccountId; - /// Transform into an account. - fn into_account(self) -> Self::AccountId; + /// The account ID that this can be transformed into. + type AccountId; + /// Transform into an account. + fn into_account(self) -> Self::AccountId; } impl IdentifyAccount for sp_core::ed25519::Public { - type AccountId = Self; - fn into_account(self) -> Self { self } + type AccountId = Self; + fn into_account(self) -> Self { + self + } } impl IdentifyAccount for sp_core::sr25519::Public { - type AccountId = Self; - fn into_account(self) -> Self { self } + type AccountId = Self; + fn into_account(self) -> Self { + self + } } impl IdentifyAccount for sp_core::ecdsa::Public { - type AccountId = Self; - fn into_account(self) -> Self { self } + type AccountId = Self; + fn into_account(self) -> Self { + self + } } /// Means of signature verification. pub trait Verify { - /// Type of the signer. - type Signer: IdentifyAccount; - /// Verify a signature. - /// - /// Return `true` if signature is valid for the value. - fn verify>(&self, msg: L, signer: &::AccountId) -> bool; + /// Type of the signer. + type Signer: IdentifyAccount; + /// Verify a signature. + /// + /// Return `true` if signature is valid for the value. + fn verify>( + &self, + msg: L, + signer: &::AccountId, + ) -> bool; } impl Verify for sp_core::ed25519::Signature { - type Signer = sp_core::ed25519::Public; + type Signer = sp_core::ed25519::Public; - fn verify>(&self, mut msg: L, signer: &sp_core::ed25519::Public) -> bool { - sp_io::crypto::ed25519_verify(self, msg.get(), signer) - } + fn verify>(&self, mut msg: L, signer: &sp_core::ed25519::Public) -> bool { + sp_io::crypto::ed25519_verify(self, msg.get(), signer) + } } impl Verify for sp_core::sr25519::Signature { - type Signer = sp_core::sr25519::Public; + type Signer = sp_core::sr25519::Public; - fn verify>(&self, mut msg: L, signer: &sp_core::sr25519::Public) -> bool { - sp_io::crypto::sr25519_verify(self, msg.get(), signer) - } + fn verify>(&self, mut msg: L, signer: &sp_core::sr25519::Public) -> bool { + sp_io::crypto::sr25519_verify(self, msg.get(), signer) + } } impl Verify for sp_core::ecdsa::Signature { - type Signer = sp_core::ecdsa::Public; - fn verify>(&self, mut msg: L, signer: &sp_core::ecdsa::Public) -> bool { - match sp_io::crypto::secp256k1_ecdsa_recover_compressed( - self.as_ref(), - &sp_io::hashing::blake2_256(msg.get()), - ) { - Ok(pubkey) => &signer.as_ref()[..] == &pubkey[..], - _ => false, - } - } + type Signer = sp_core::ecdsa::Public; + fn verify>(&self, mut msg: L, signer: &sp_core::ecdsa::Public) -> bool { + match sp_io::crypto::secp256k1_ecdsa_recover_compressed( + self.as_ref(), + &sp_io::hashing::blake2_256(msg.get()), + ) { + Ok(pubkey) => &signer.as_ref()[..] == &pubkey[..], + _ => false, + } + } } /// Means of signature verification of an application key. pub trait AppVerify { - /// Type of the signer. - type AccountId; - /// Verify a signature. Return `true` if signature is valid for the value. - fn verify>(&self, msg: L, signer: &Self::AccountId) -> bool; + /// Type of the signer. + type AccountId; + /// Verify a signature. Return `true` if signature is valid for the value. + fn verify>(&self, msg: L, signer: &Self::AccountId) -> bool; } impl< - S: Verify::Public as sp_application_crypto::AppPublic>::Generic> + From, - T: sp_application_crypto::Wraps + sp_application_crypto::AppKey + sp_application_crypto::AppSignature + - AsRef + AsMut + From, -> AppVerify for T where - ::Signer: IdentifyAccount::Signer>, - <::Public as sp_application_crypto::AppPublic>::Generic: - IdentifyAccount::Public as sp_application_crypto::AppPublic>::Generic>, + S: Verify::Public as sp_application_crypto::AppPublic>::Generic> + + From, + T: sp_application_crypto::Wraps + + sp_application_crypto::AppKey + + sp_application_crypto::AppSignature + + AsRef + + AsMut + + From, + > AppVerify for T +where + ::Signer: IdentifyAccount::Signer>, + <::Public as sp_application_crypto::AppPublic>::Generic: IdentifyAccount< + AccountId = <::Public as sp_application_crypto::AppPublic>::Generic, + >, { - type AccountId = ::Public; - fn verify>(&self, msg: L, signer: &::Public) -> bool { - use sp_application_crypto::IsWrappedBy; - let inner: &S = self.as_ref(); - let inner_pubkey = <::Public as sp_application_crypto::AppPublic>::Generic::from_ref(&signer); - Verify::verify(inner, msg, inner_pubkey) - } + type AccountId = ::Public; + fn verify>(&self, msg: L, signer: &::Public) -> bool { + use sp_application_crypto::IsWrappedBy; + let inner: &S = self.as_ref(); + let inner_pubkey = + <::Public as sp_application_crypto::AppPublic>::Generic::from_ref(&signer); + Verify::verify(inner, msg, inner_pubkey) + } } /// An error type that indicates that the origin is invalid. @@ -147,9 +172,9 @@ impl< pub struct BadOrigin; impl From for &'static str { - fn from(_: BadOrigin) -> &'static str { - "Bad origin" - } + fn from(_: BadOrigin) -> &'static str { + "Bad origin" + } } /// An error that indicates that a lookup failed. @@ -157,126 +182,150 @@ impl From for &'static str { pub struct LookupError; impl From for &'static str { - fn from(_: LookupError) -> &'static str { - "Can not lookup" - } + fn from(_: LookupError) -> &'static str { + "Can not lookup" + } } impl From for TransactionValidityError { - fn from(_: LookupError) -> Self { - UnknownTransaction::CannotLookup.into() - } + fn from(_: LookupError) -> Self { + UnknownTransaction::CannotLookup.into() + } } /// Means of changing one type into another in a manner dependent on the source type. pub trait Lookup { - /// Type to lookup from. - type Source; - /// Type to lookup into. - type Target; - /// Attempt a lookup. - fn lookup(&self, s: Self::Source) -> Result; + /// Type to lookup from. + type Source; + /// Type to lookup into. + type Target; + /// Attempt a lookup. + fn lookup(&self, s: Self::Source) -> Result; } /// Means of changing one type into another in a manner dependent on the source type. /// This variant is different to `Lookup` in that it doesn't (can cannot) require any /// context. pub trait StaticLookup { - /// Type to lookup from. - type Source: Codec + Clone + PartialEq + Debug; - /// Type to lookup into. - type Target; - /// Attempt a lookup. - fn lookup(s: Self::Source) -> Result; - /// Convert from Target back to Source. - fn unlookup(t: Self::Target) -> Self::Source; + /// Type to lookup from. + type Source: Codec + Clone + PartialEq + Debug; + /// Type to lookup into. + type Target; + /// Attempt a lookup. + fn lookup(s: Self::Source) -> Result; + /// Convert from Target back to Source. + fn unlookup(t: Self::Target) -> Self::Source; } /// A lookup implementation returning the input value. #[derive(Default)] pub struct IdentityLookup(PhantomData); impl StaticLookup for IdentityLookup { - type Source = T; - type Target = T; - fn lookup(x: T) -> Result { Ok(x) } - fn unlookup(x: T) -> T { x } + type Source = T; + type Target = T; + fn lookup(x: T) -> Result { + Ok(x) + } + fn unlookup(x: T) -> T { + x + } } impl Lookup for IdentityLookup { - type Source = T; - type Target = T; - fn lookup(&self, x: T) -> Result { Ok(x) } + type Source = T; + type Target = T; + fn lookup(&self, x: T) -> Result { + Ok(x) + } } /// Extensible conversion trait. Generic over both source and destination types. pub trait Convert { - /// Make conversion. - fn convert(a: A) -> B; + /// Make conversion. + fn convert(a: A) -> B; } impl Convert for () { - fn convert(_: A) -> B { Default::default() } + fn convert(_: A) -> B { + Default::default() + } } /// A structure that performs identity conversion. pub struct Identity; impl Convert for Identity { - fn convert(a: T) -> T { a } + fn convert(a: T) -> T { + a + } } /// A structure that performs standard conversion using the standard Rust conversion traits. pub struct ConvertInto; impl> Convert for ConvertInto { - fn convert(a: A) -> B { a.into() } + fn convert(a: A) -> B { + a.into() + } } /// Convenience type to work around the highly unergonomic syntax needed /// to invoke the functions of overloaded generic traits, in this case /// `TryFrom` and `TryInto`. pub trait CheckedConversion { - /// Convert from a value of `T` into an equivalent instance of `Option`. - /// - /// This just uses `TryFrom` internally but with this - /// variant you can provide the destination type using turbofish syntax - /// in case Rust happens not to assume the correct type. - fn checked_from(t: T) -> Option where Self: TryFrom { - >::try_from(t).ok() - } - /// Consume self to return `Some` equivalent value of `Option`. - /// - /// This just uses `TryInto` internally but with this - /// variant you can provide the destination type using turbofish syntax - /// in case Rust happens not to assume the correct type. - fn checked_into(self) -> Option where Self: TryInto { - >::try_into(self).ok() - } + /// Convert from a value of `T` into an equivalent instance of `Option`. + /// + /// This just uses `TryFrom` internally but with this + /// variant you can provide the destination type using turbofish syntax + /// in case Rust happens not to assume the correct type. + fn checked_from(t: T) -> Option + where + Self: TryFrom, + { + >::try_from(t).ok() + } + /// Consume self to return `Some` equivalent value of `Option`. + /// + /// This just uses `TryInto` internally but with this + /// variant you can provide the destination type using turbofish syntax + /// in case Rust happens not to assume the correct type. + fn checked_into(self) -> Option + where + Self: TryInto, + { + >::try_into(self).ok() + } } impl CheckedConversion for T {} /// Multiply and divide by a number that isn't necessarily the same type. Basically just the same /// as `Mul` and `Div` except it can be used for all basic numeric types. pub trait Scale { - /// The output type of the product of `self` and `Other`. - type Output; + /// The output type of the product of `self` and `Other`. + type Output; - /// @return the product of `self` and `other`. - fn mul(self, other: Other) -> Self::Output; + /// @return the product of `self` and `other`. + fn mul(self, other: Other) -> Self::Output; - /// @return the integer division of `self` and `other`. - fn div(self, other: Other) -> Self::Output; + /// @return the integer division of `self` and `other`. + fn div(self, other: Other) -> Self::Output; - /// @return the modulo remainder of `self` and `other`. - fn rem(self, other: Other) -> Self::Output; + /// @return the modulo remainder of `self` and `other`. + fn rem(self, other: Other) -> Self::Output; } macro_rules! impl_scale { - ($self:ty, $other:ty) => { - impl Scale<$other> for $self { - type Output = Self; - fn mul(self, other: $other) -> Self::Output { self * (other as Self) } - fn div(self, other: $other) -> Self::Output { self / (other as Self) } - fn rem(self, other: $other) -> Self::Output { self % (other as Self) } - } - } + ($self:ty, $other:ty) => { + impl Scale<$other> for $self { + type Output = Self; + fn mul(self, other: $other) -> Self::Output { + self * (other as Self) + } + fn div(self, other: $other) -> Self::Output { + self / (other as Self) + } + fn rem(self, other: $other) -> Self::Output { + self % (other as Self) + } + } + }; } impl_scale!(u128, u128); impl_scale!(u128, u64); @@ -297,55 +346,80 @@ impl_scale!(u8, u8); /// Trait for things that can be clear (have no bits set). For numeric types, essentially the same /// as `Zero`. pub trait Clear { - /// True iff no bits are set. - fn is_clear(&self) -> bool; + /// True iff no bits are set. + fn is_clear(&self) -> bool; - /// Return the value of Self that is clear. - fn clear() -> Self; + /// Return the value of Self that is clear. + fn clear() -> Self; } impl Clear for T { - fn is_clear(&self) -> bool { *self == Self::clear() } - fn clear() -> Self { Default::default() } + fn is_clear(&self) -> bool { + *self == Self::clear() + } + fn clear() -> Self { + Default::default() + } } /// A meta trait for all bit ops. pub trait SimpleBitOps: - Sized + Clear + - sp_std::ops::BitOr + - sp_std::ops::BitXor + - sp_std::ops::BitAnd -{} -impl + - sp_std::ops::BitXor + - sp_std::ops::BitAnd -> SimpleBitOps for T {} + Sized + + Clear + + sp_std::ops::BitOr + + sp_std::ops::BitXor + + sp_std::ops::BitAnd +{ +} +impl< + T: Sized + + Clear + + sp_std::ops::BitOr + + sp_std::ops::BitXor + + sp_std::ops::BitAnd, + > SimpleBitOps for T +{ +} /// Abstraction around hashing // Stupid bug in the Rust compiler believes derived // traits must be fulfilled by all type parameters. -pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + PartialEq + Hasher::Output> { - /// The hash type produced. - type Output: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash - + AsRef<[u8]> + AsMut<[u8]> + Copy + Default + Encode + Decode; - - /// Produce the hash of some byte-slice. - fn hash(s: &[u8]) -> Self::Output { - ::hash(s) - } - - /// Produce the hash of some codec-encodable value. - fn hash_of(s: &S) -> Self::Output { - Encode::using_encoded(s, ::hash) - } - - /// The ordered Patricia tree root of the given `input`. - fn ordered_trie_root(input: Vec>) -> Self::Output; - - /// The Patricia tree root of the given mapping. - fn trie_root(input: Vec<(Vec, Vec)>) -> Self::Output; +pub trait Hash: + 'static + + MaybeSerializeDeserialize + + Debug + + Clone + + Eq + + PartialEq + + Hasher::Output> +{ + /// The hash type produced. + type Output: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + Copy + + Default + + Encode + + Decode; + + /// Produce the hash of some byte-slice. + fn hash(s: &[u8]) -> Self::Output { + ::hash(s) + } + + /// Produce the hash of some codec-encodable value. + fn hash_of(s: &S) -> Self::Output { + Encode::using_encoded(s, ::hash) + } + + /// The ordered Patricia tree root of the given `input`. + fn ordered_trie_root(input: Vec>) -> Self::Output; + + /// The Patricia tree root of the given mapping. + fn trie_root(input: Vec<(Vec, Vec)>) -> Self::Output; } /// Blake2-256 Hash implementation. @@ -354,72 +428,75 @@ pub trait Hash: 'static + MaybeSerializeDeserialize + Debug + Clone + Eq + Parti pub struct BlakeTwo256; impl Hasher for BlakeTwo256 { - type Out = sp_core::H256; - type StdHasher = hash256_std_hasher::Hash256StdHasher; - const LENGTH: usize = 32; + type Out = sp_core::H256; + type StdHasher = hash256_std_hasher::Hash256StdHasher; + const LENGTH: usize = 32; - fn hash(s: &[u8]) -> Self::Out { - sp_io::hashing::blake2_256(s).into() - } + fn hash(s: &[u8]) -> Self::Out { + sp_io::hashing::blake2_256(s).into() + } } impl Hash for BlakeTwo256 { - type Output = sp_core::H256; + type Output = sp_core::H256; - fn trie_root(input: Vec<(Vec, Vec)>) -> Self::Output { - sp_io::trie::blake2_256_root(input) - } + fn trie_root(input: Vec<(Vec, Vec)>) -> Self::Output { + sp_io::trie::blake2_256_root(input) + } - fn ordered_trie_root(input: Vec>) -> Self::Output { - sp_io::trie::blake2_256_ordered_root(input) - } + fn ordered_trie_root(input: Vec>) -> Self::Output { + sp_io::trie::blake2_256_ordered_root(input) + } } /// Something that can be checked for equality and printed out to a debug channel if bad. pub trait CheckEqual { - /// Perform the equality check. - fn check_equal(&self, other: &Self); + /// Perform the equality check. + fn check_equal(&self, other: &Self); } impl CheckEqual for sp_core::H256 { - #[cfg(feature = "std")] - fn check_equal(&self, other: &Self) { - use sp_core::hexdisplay::HexDisplay; - if self != other { - println!( - "Hash: given={}, expected={}", - HexDisplay::from(self.as_fixed_bytes()), - HexDisplay::from(other.as_fixed_bytes()), - ); - } - } - - #[cfg(not(feature = "std"))] - fn check_equal(&self, other: &Self) { - if self != other { - "Hash not equal".print(); - self.as_bytes().print(); - other.as_bytes().print(); - } - } -} - -impl CheckEqual for super::generic::DigestItem where H: Encode { - #[cfg(feature = "std")] - fn check_equal(&self, other: &Self) { - if self != other { - println!("DigestItem: given={:?}, expected={:?}", self, other); - } - } - - #[cfg(not(feature = "std"))] - fn check_equal(&self, other: &Self) { - if self != other { - "DigestItem not equal".print(); - (&Encode::encode(self)[..]).print(); - (&Encode::encode(other)[..]).print(); - } - } + #[cfg(feature = "std")] + fn check_equal(&self, other: &Self) { + use sp_core::hexdisplay::HexDisplay; + if self != other { + println!( + "Hash: given={}, expected={}", + HexDisplay::from(self.as_fixed_bytes()), + HexDisplay::from(other.as_fixed_bytes()), + ); + } + } + + #[cfg(not(feature = "std"))] + fn check_equal(&self, other: &Self) { + if self != other { + "Hash not equal".print(); + self.as_bytes().print(); + other.as_bytes().print(); + } + } +} + +impl CheckEqual for super::generic::DigestItem +where + H: Encode, +{ + #[cfg(feature = "std")] + fn check_equal(&self, other: &Self) { + if self != other { + println!("DigestItem: given={:?}, expected={:?}", self, other); + } + } + + #[cfg(not(feature = "std"))] + fn check_equal(&self, other: &Self) { + if self != other { + "DigestItem not equal".print(); + (&Encode::encode(self)[..]).print(); + (&Encode::encode(other)[..]).print(); + } + } } sp_core::impl_maybe_marker!( @@ -448,8 +525,8 @@ impl Member f /// Determine if a `MemberId` is a valid member. pub trait IsMember { - /// Is the given `MemberId` a valid member? - fn is_member(member_id: &MemberId) -> bool; + /// Is the given `MemberId` a valid member? + fn is_member(member_id: &MemberId) -> bool; } /// Something which fulfills the abstract idea of a Substrate header. It has types for a `Number`, @@ -458,124 +535,155 @@ pub trait IsMember { /// /// You can also create a `new` one from those fields. pub trait Header: - Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + - MaybeMallocSizeOf + 'static + Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static { - /// Header number. - type Number: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash - + Copy + MaybeDisplay + AtLeast32Bit + Codec + sp_std::str::FromStr - + MaybeMallocSizeOf; - /// Header hash type - type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord - + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> - + AsMut<[u8]> + MaybeMallocSizeOf; - /// Hashing algorithm - type Hashing: Hash; - - /// Creates new header. - fn new( - number: Self::Number, - extrinsics_root: Self::Hash, - state_root: Self::Hash, - parent_hash: Self::Hash, - digest: Digest, - ) -> Self; - - /// Returns a reference to the header number. - fn number(&self) -> &Self::Number; - /// Sets the header number. - fn set_number(&mut self, number: Self::Number); - - /// Returns a reference to the extrinsics root. - fn extrinsics_root(&self) -> &Self::Hash; - /// Sets the extrinsic root. - fn set_extrinsics_root(&mut self, root: Self::Hash); - - /// Returns a reference to the state root. - fn state_root(&self) -> &Self::Hash; - /// Sets the state root. - fn set_state_root(&mut self, root: Self::Hash); - - /// Returns a reference to the parent hash. - fn parent_hash(&self) -> &Self::Hash; - /// Sets the parent hash. - fn set_parent_hash(&mut self, hash: Self::Hash); - - /// Returns a reference to the digest. - fn digest(&self) -> &Digest; - /// Get a mutable reference to the digest. - fn digest_mut(&mut self) -> &mut Digest; - - /// Returns the hash of the header. - fn hash(&self) -> Self::Hash { - ::hash_of(self) - } + /// Header number. + type Number: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Copy + + MaybeDisplay + + AtLeast32Bit + + Codec + + sp_std::str::FromStr + + MaybeMallocSizeOf; + /// Header hash type + type Hash: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Ord + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Codec + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf; + /// Hashing algorithm + type Hashing: Hash; + + /// Creates new header. + fn new( + number: Self::Number, + extrinsics_root: Self::Hash, + state_root: Self::Hash, + parent_hash: Self::Hash, + digest: Digest, + ) -> Self; + + /// Returns a reference to the header number. + fn number(&self) -> &Self::Number; + /// Sets the header number. + fn set_number(&mut self, number: Self::Number); + + /// Returns a reference to the extrinsics root. + fn extrinsics_root(&self) -> &Self::Hash; + /// Sets the extrinsic root. + fn set_extrinsics_root(&mut self, root: Self::Hash); + + /// Returns a reference to the state root. + fn state_root(&self) -> &Self::Hash; + /// Sets the state root. + fn set_state_root(&mut self, root: Self::Hash); + + /// Returns a reference to the parent hash. + fn parent_hash(&self) -> &Self::Hash; + /// Sets the parent hash. + fn set_parent_hash(&mut self, hash: Self::Hash); + + /// Returns a reference to the digest. + fn digest(&self) -> &Digest; + /// Get a mutable reference to the digest. + fn digest_mut(&mut self) -> &mut Digest; + + /// Returns the hash of the header. + fn hash(&self) -> Self::Hash { + ::hash_of(self) + } } /// Something which fulfills the abstract idea of a Substrate block. It has types for /// `Extrinsic` pieces of information as well as a `Header`. /// /// You can get an iterator over each of the `extrinsics` and retrieve the `header`. -pub trait Block: Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static { - /// Type for extrinsics. - type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize + MaybeMallocSizeOf; - /// Header type. - type Header: Header + MaybeMallocSizeOf; - /// Block hash type. - type Hash: Member + MaybeSerializeDeserialize + Debug + sp_std::hash::Hash + Ord - + Copy + MaybeDisplay + Default + SimpleBitOps + Codec + AsRef<[u8]> + AsMut<[u8]> - + MaybeMallocSizeOf; - - /// Returns a reference to the header. - fn header(&self) -> &Self::Header; - /// Returns a reference to the list of extrinsics. - fn extrinsics(&self) -> &[Self::Extrinsic]; - /// Split the block into header and list of extrinsics. - fn deconstruct(self) -> (Self::Header, Vec); - /// Creates new block from header and extrinsics. - fn new(header: Self::Header, extrinsics: Vec) -> Self; - /// Returns the hash of the block. - fn hash(&self) -> Self::Hash { - <::Hashing as Hash>::hash_of(self.header()) - } - /// Creates an encoded block from the given `header` and `extrinsics` without requiring the - /// creation of an instance. - fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec; +pub trait Block: + Clone + Send + Sync + Codec + Eq + MaybeSerialize + Debug + MaybeMallocSizeOf + 'static +{ + /// Type for extrinsics. + type Extrinsic: Member + Codec + Extrinsic + MaybeSerialize + MaybeMallocSizeOf; + /// Header type. + type Header: Header + MaybeMallocSizeOf; + /// Block hash type. + type Hash: Member + + MaybeSerializeDeserialize + + Debug + + sp_std::hash::Hash + + Ord + + Copy + + MaybeDisplay + + Default + + SimpleBitOps + + Codec + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf; + + /// Returns a reference to the header. + fn header(&self) -> &Self::Header; + /// Returns a reference to the list of extrinsics. + fn extrinsics(&self) -> &[Self::Extrinsic]; + /// Split the block into header and list of extrinsics. + fn deconstruct(self) -> (Self::Header, Vec); + /// Creates new block from header and extrinsics. + fn new(header: Self::Header, extrinsics: Vec) -> Self; + /// Returns the hash of the block. + fn hash(&self) -> Self::Hash { + <::Hashing as Hash>::hash_of(self.header()) + } + /// Creates an encoded block from the given `header` and `extrinsics` without requiring the + /// creation of an instance. + fn encode_from(header: &Self::Header, extrinsics: &[Self::Extrinsic]) -> Vec; } - /// Something that acts like an `Extrinsic`. pub trait Extrinsic: Sized + MaybeMallocSizeOf { - /// The function call. - type Call; - - /// The payload we carry for signed extrinsics. - /// - /// Usually it will contain a `Signature` and - /// may include some additional data that are specific to signed - /// extrinsics. - type SignaturePayload; - - /// Is this `Extrinsic` signed? - /// If no information are available about signed/unsigned, `None` should be returned. - fn is_signed(&self) -> Option { None } - - /// Create new instance of the extrinsic. - /// - /// Extrinsics can be split into: - /// 1. Inherents (no signature; created by validators during block production) - /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of calls) - /// 3. Signed Transactions (with signature; a regular transactions with known origin) - fn new(_call: Self::Call, _signed_data: Option) -> Option { None } + /// The function call. + type Call; + + /// The payload we carry for signed extrinsics. + /// + /// Usually it will contain a `Signature` and + /// may include some additional data that are specific to signed + /// extrinsics. + type SignaturePayload; + + /// Is this `Extrinsic` signed? + /// If no information are available about signed/unsigned, `None` should be returned. + fn is_signed(&self) -> Option { + None + } + + /// Create new instance of the extrinsic. + /// + /// Extrinsics can be split into: + /// 1. Inherents (no signature; created by validators during block production) + /// 2. Unsigned Transactions (no signature; represent "system calls" or other special kinds of calls) + /// 3. Signed Transactions (with signature; a regular transactions with known origin) + fn new(_call: Self::Call, _signed_data: Option) -> Option { + None + } } /// Implementor is an [`Extrinsic`] and provides metadata about this extrinsic. pub trait ExtrinsicMetadata { - /// The version of the `Extrinsic`. - const VERSION: u8; + /// The version of the `Extrinsic`. + const VERSION: u8; - /// Signed extensions attached to this `Extrinsic`. - type SignedExtensions: SignedExtension; + /// Signed extensions attached to this `Extrinsic`. + type SignedExtensions: SignedExtension; } /// Extract the hashing type for a block. @@ -592,11 +700,11 @@ pub type DigestItemFor = DigestItem<<::Header as Header>::Hash>; /// Implement for pieces of information that require some additional context `Context` in order to be /// checked. pub trait Checkable: Sized { - /// Returned if `check` succeeds. - type Checked; + /// Returned if `check` succeeds. + type Checked; - /// Check self, given an instance of Context. - fn check(self, c: &Context) -> Result; + /// Check self, given an instance of Context. + fn check(self, c: &Context) -> Result; } /// A "checkable" piece of information, used by the standard Substrate Executive in order to @@ -604,40 +712,40 @@ pub trait Checkable: Sized { /// Implement for pieces of information that don't require additional context in order to be /// checked. pub trait BlindCheckable: Sized { - /// Returned if `check` succeeds. - type Checked; + /// Returned if `check` succeeds. + type Checked; - /// Check self. - fn check(self) -> Result; + /// Check self. + fn check(self) -> Result; } // Every `BlindCheckable` is also a `StaticCheckable` for arbitrary `Context`. impl Checkable for T { - type Checked = ::Checked; + type Checked = ::Checked; - fn check(self, _c: &Context) -> Result { - BlindCheckable::check(self) - } + fn check(self, _c: &Context) -> Result { + BlindCheckable::check(self) + } } /// A lazy call (module function and argument values) that can be executed via its `dispatch` /// method. pub trait Dispatchable { - /// Every function call from your runtime has an origin, which specifies where the extrinsic was - /// generated from. In the case of a signed extrinsic (transaction), the origin contains an - /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. - type Origin; - /// ... - type Trait; - /// An opaque set of information attached to the transaction. This could be constructed anywhere - /// down the line in a runtime. The current Substrate runtime uses a struct with the same name - /// to represent the dispatch class and weight. - type Info; - /// Additional information that is returned by `dispatch`. Can be used to supply the caller - /// with information about a `Dispatchable` that is ownly known post dispatch. - type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; - /// Actually dispatch this call and return the result of it. - fn dispatch(self, origin: Self::Origin) -> crate::DispatchResultWithInfo; + /// Every function call from your runtime has an origin, which specifies where the extrinsic was + /// generated from. In the case of a signed extrinsic (transaction), the origin contains an + /// identifier for the caller. The origin can be empty in the case of an inherent extrinsic. + type Origin; + /// ... + type Trait; + /// An opaque set of information attached to the transaction. This could be constructed anywhere + /// down the line in a runtime. The current Substrate runtime uses a struct with the same name + /// to represent the dispatch class and weight. + type Info; + /// Additional information that is returned by `dispatch`. Can be used to supply the caller + /// with information about a `Dispatchable` that is ownly known post dispatch. + type PostInfo: Eq + PartialEq + Clone + Copy + Encode + Decode + Printable; + /// Actually dispatch this call and return the result of it. + fn dispatch(self, origin: Self::Origin) -> crate::DispatchResultWithInfo; } /// Shortcut to reference the `Info` type of a `Dispatchable`. @@ -646,225 +754,231 @@ pub type DispatchInfoOf = ::Info; pub type PostDispatchInfoOf = ::PostInfo; impl Dispatchable for () { - type Origin = (); - type Trait = (); - type Info = (); - type PostInfo = (); - fn dispatch(self, _origin: Self::Origin) -> crate::DispatchResultWithInfo { - panic!("This implemention should not be used for actual dispatch."); - } + type Origin = (); + type Trait = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::Origin) -> crate::DispatchResultWithInfo { + panic!("This implemention should not be used for actual dispatch."); + } } /// Means by which a transaction may be extended. This type embodies both the data and the logic /// that should be additionally associated with the transaction. It should be plain old data. pub trait SignedExtension: Codec + Debug + Sync + Send + Clone + Eq + PartialEq { - /// Unique identifier of this signed extension. - /// - /// This will be exposed in the metadata to identify the signed extension used - /// in an extrinsic. - const IDENTIFIER: &'static str; - - /// The type which encodes the sender identity. - type AccountId; - - /// The type which encodes the call to be dispatched. - type Call: Dispatchable; - - /// Any additional data that will go into the signed payload. This may be created dynamically - /// from the transaction using the `additional_signed` function. - type AdditionalSigned: Encode; - - /// The type that encodes information that can be passed from pre_dispatch to post-dispatch. - type Pre: Default; - - /// Construct any additional data that should be in the signed payload of the transaction. Can - /// also perform any pre-signature-verification checks and return an error if needed. - fn additional_signed(&self) -> Result; - - /// Validate a signed transaction for the transaction queue. - /// - /// This function can be called frequently by the transaction queue, - /// to obtain transaction validity against current state. - /// It should perform all checks that determine a valid transaction, - /// that can pay for its execution and quickly eliminate ones - /// that are stale or incorrect. - /// - /// Make sure to perform the same checks in `pre_dispatch` function. - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - Ok(ValidTransaction::default()) - } - - /// Do any pre-flight stuff for a signed transaction. - /// - /// Note this function by default delegates to `validate`, so that - /// all checks performed for the transaction queue are also performed during - /// the dispatch phase (applying the extrinsic). - /// - /// If you ever override this function, you need to make sure to always - /// perform the same validation as in `validate`. - fn pre_dispatch( - self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - self.validate(who, call, info.clone(), len) - .map(|_| Self::Pre::default()) - .map_err(Into::into) - } - - /// Validate an unsigned transaction for the transaction queue. - /// - /// This function can be called frequently by the transaction queue - /// to obtain transaction validity against current state. - /// It should perform all checks that determine a valid unsigned transaction, - /// and quickly eliminate ones that are stale or incorrect. - /// - /// Make sure to perform the same checks in `pre_dispatch_unsigned` function. - fn validate_unsigned( - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - Ok(ValidTransaction::default()) - } - - /// Do any pre-flight stuff for a unsigned transaction. - /// - /// Note this function by default delegates to `validate_unsigned`, so that - /// all checks performed for the transaction queue are also performed during - /// the dispatch phase (applying the extrinsic). - /// - /// If you ever override this function, you need to make sure to always - /// perform the same validation as in `validate_unsigned`. - fn pre_dispatch_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - Self::validate_unsigned(call, info.clone(), len) - .map(|_| Self::Pre::default()) - .map_err(Into::into) - } - - /// Do any post-flight stuff for an extrinsic. - /// - /// This gets given the `DispatchResult` `_result` from the extrinsic and can, if desired, - /// introduce a `TransactionValidityError`, causing the block to become invalid for including - /// it. - /// - /// WARNING: It is dangerous to return an error here. To do so will fundamentally invalidate the - /// transaction and any block that it is included in, causing the block author to not be - /// compensated for their work in validating the transaction or producing the block so far. - /// - /// It can only be used safely when you *know* that the extrinsic is one that can only be - /// introduced by the current block author; generally this implies that it is an inherent and - /// will come from either an offchain-worker or via `InherentData`. - fn post_dispatch( - _pre: Self::Pre, - _info: &DispatchInfoOf, - _post_info: &PostDispatchInfoOf, - _len: usize, - _result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - Ok(()) - } - - /// Returns the list of unique identifier for this signed extension. - /// - /// As a [`SignedExtension`] can be a tuple of [`SignedExtension`]`s we need to return a `Vec` - /// that holds all the unique identifiers. Each individual `SignedExtension` must return - /// *exactly* one identifier. - /// - /// This method provides a default implementation that returns `vec![SELF::IDENTIFIER]`. - fn identifier() -> Vec<&'static str> { - sp_std::vec![Self::IDENTIFIER] - } + /// Unique identifier of this signed extension. + /// + /// This will be exposed in the metadata to identify the signed extension used + /// in an extrinsic. + const IDENTIFIER: &'static str; + + /// The type which encodes the sender identity. + type AccountId; + + /// The type which encodes the call to be dispatched. + type Call: Dispatchable; + + /// Any additional data that will go into the signed payload. This may be created dynamically + /// from the transaction using the `additional_signed` function. + type AdditionalSigned: Encode; + + /// The type that encodes information that can be passed from pre_dispatch to post-dispatch. + type Pre: Default; + + /// Construct any additional data that should be in the signed payload of the transaction. Can + /// also perform any pre-signature-verification checks and return an error if needed. + fn additional_signed(&self) -> Result; + + /// Validate a signed transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue, + /// to obtain transaction validity against current state. + /// It should perform all checks that determine a valid transaction, + /// that can pay for its execution and quickly eliminate ones + /// that are stale or incorrect. + /// + /// Make sure to perform the same checks in `pre_dispatch` function. + fn validate( + &self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + Ok(ValidTransaction::default()) + } + + /// Do any pre-flight stuff for a signed transaction. + /// + /// Note this function by default delegates to `validate`, so that + /// all checks performed for the transaction queue are also performed during + /// the dispatch phase (applying the extrinsic). + /// + /// If you ever override this function, you need to make sure to always + /// perform the same validation as in `validate`. + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + self.validate(who, call, info.clone(), len) + .map(|_| Self::Pre::default()) + .map_err(Into::into) + } + + /// Validate an unsigned transaction for the transaction queue. + /// + /// This function can be called frequently by the transaction queue + /// to obtain transaction validity against current state. + /// It should perform all checks that determine a valid unsigned transaction, + /// and quickly eliminate ones that are stale or incorrect. + /// + /// Make sure to perform the same checks in `pre_dispatch_unsigned` function. + fn validate_unsigned( + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + Ok(ValidTransaction::default()) + } + + /// Do any pre-flight stuff for a unsigned transaction. + /// + /// Note this function by default delegates to `validate_unsigned`, so that + /// all checks performed for the transaction queue are also performed during + /// the dispatch phase (applying the extrinsic). + /// + /// If you ever override this function, you need to make sure to always + /// perform the same validation as in `validate_unsigned`. + fn pre_dispatch_unsigned( + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + Self::validate_unsigned(call, info.clone(), len) + .map(|_| Self::Pre::default()) + .map_err(Into::into) + } + + /// Do any post-flight stuff for an extrinsic. + /// + /// This gets given the `DispatchResult` `_result` from the extrinsic and can, if desired, + /// introduce a `TransactionValidityError`, causing the block to become invalid for including + /// it. + /// + /// WARNING: It is dangerous to return an error here. To do so will fundamentally invalidate the + /// transaction and any block that it is included in, causing the block author to not be + /// compensated for their work in validating the transaction or producing the block so far. + /// + /// It can only be used safely when you *know* that the extrinsic is one that can only be + /// introduced by the current block author; generally this implies that it is an inherent and + /// will come from either an offchain-worker or via `InherentData`. + fn post_dispatch( + _pre: Self::Pre, + _info: &DispatchInfoOf, + _post_info: &PostDispatchInfoOf, + _len: usize, + _result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + Ok(()) + } + + /// Returns the list of unique identifier for this signed extension. + /// + /// As a [`SignedExtension`] can be a tuple of [`SignedExtension`]`s we need to return a `Vec` + /// that holds all the unique identifiers. Each individual `SignedExtension` must return + /// *exactly* one identifier. + /// + /// This method provides a default implementation that returns `vec![SELF::IDENTIFIER]`. + fn identifier() -> Vec<&'static str> { + sp_std::vec![Self::IDENTIFIER] + } } #[impl_for_tuples(1, 12)] impl SignedExtension for Tuple { - for_tuples!( where #( Tuple: SignedExtension )* ); - type AccountId = AccountId; - type Call = Call; - const IDENTIFIER: &'static str = "You should call `identifier()`!"; - for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); - for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); - - fn additional_signed(&self) -> Result { - Ok(for_tuples!( ( #( Tuple.additional_signed()? ),* ) )) - } - - fn validate( - &self, - who: &Self::AccountId, - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple.validate(who, call, info, len)?); )* ); - Ok(valid) - } - - fn pre_dispatch(self, who: &Self::AccountId, call: &Self::Call, info: &DispatchInfoOf, len: usize) - -> Result - { - Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) - } - - fn validate_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - let valid = ValidTransaction::default(); - for_tuples!( #( let valid = valid.combine_with(Tuple::validate_unsigned(call, info, len)?); )* ); - Ok(valid) - } - - fn pre_dispatch_unsigned( - call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result { - Ok(for_tuples!( ( #( Tuple::pre_dispatch_unsigned(call, info, len)? ),* ) )) - } - - fn post_dispatch( - pre: Self::Pre, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - for_tuples!( #( Tuple::post_dispatch(pre.Tuple, info, post_info, len, result)?; )* ); - Ok(()) - } - - fn identifier() -> Vec<&'static str> { - let mut ids = Vec::new(); - for_tuples!( #( ids.extend(Tuple::identifier()); )* ); - ids - } + for_tuples!( where #( Tuple: SignedExtension )* ); + type AccountId = AccountId; + type Call = Call; + const IDENTIFIER: &'static str = "You should call `identifier()`!"; + for_tuples!( type AdditionalSigned = ( #( Tuple::AdditionalSigned ),* ); ); + for_tuples!( type Pre = ( #( Tuple::Pre ),* ); ); + + fn additional_signed(&self) -> Result { + Ok(for_tuples!( ( #( Tuple.additional_signed()? ),* ) )) + } + + fn validate( + &self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + let valid = ValidTransaction::default(); + for_tuples!( #( let valid = valid.combine_with(Tuple.validate(who, call, info, len)?); )* ); + Ok(valid) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + Ok(for_tuples!( ( #( Tuple.pre_dispatch(who, call, info, len)? ),* ) )) + } + + fn validate_unsigned( + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + let valid = ValidTransaction::default(); + for_tuples!( #( let valid = valid.combine_with(Tuple::validate_unsigned(call, info, len)?); )* ); + Ok(valid) + } + + fn pre_dispatch_unsigned( + call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result { + Ok(for_tuples!( ( #( Tuple::pre_dispatch_unsigned(call, info, len)? ),* ) )) + } + + fn post_dispatch( + pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + for_tuples!( #( Tuple::post_dispatch(pre.Tuple, info, post_info, len, result)?; )* ); + Ok(()) + } + + fn identifier() -> Vec<&'static str> { + let mut ids = Vec::new(); + for_tuples!( #( ids.extend(Tuple::identifier()); )* ); + ids + } } /// Only for bare bone testing when you don't care about signed extensions at all. #[cfg(feature = "std")] impl SignedExtension for () { - type AccountId = u64; - type AdditionalSigned = (); - type Call = (); - type Pre = (); - const IDENTIFIER: &'static str = "UnitSignedExtension"; - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } + type AccountId = u64; + type AdditionalSigned = (); + type Call = (); + type Pre = (); + const IDENTIFIER: &'static str = "UnitSignedExtension"; + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } } /// An "executable" piece of information, used by the standard Substrate Executive in order to @@ -874,36 +988,36 @@ impl SignedExtension for () { /// Also provides information on to whom this information is attributable and an index that allows /// each piece of attributable information to be disambiguated. pub trait Applyable: Sized + Send + Sync { - /// Type by which we can dispatch. Restricts the `UnsignedValidator` type. - type Call: Dispatchable; - - /// Checks to see if this is a valid *transaction*. It returns information on it if so. - fn validate>( - &self, - source: TransactionSource, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity; - - /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, - /// index and sender. - fn apply>( - self, - info: &DispatchInfoOf, - len: usize, - ) -> crate::ApplyExtrinsicResult; + /// Type by which we can dispatch. Restricts the `UnsignedValidator` type. + type Call: Dispatchable; + + /// Checks to see if this is a valid *transaction*. It returns information on it if so. + fn validate>( + &self, + source: TransactionSource, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity; + + /// Executes all necessary logic needed prior to dispatch and deconstructs into function call, + /// index and sender. + fn apply>( + self, + info: &DispatchInfoOf, + len: usize, + ) -> crate::ApplyExtrinsicResult; } /// A marker trait for something that knows the type of the runtime block. pub trait GetRuntimeBlockType { - /// The `RuntimeBlock` type. - type RuntimeBlock: self::Block; + /// The `RuntimeBlock` type. + type RuntimeBlock: self::Block; } /// A marker trait for something that knows the type of the node block. pub trait GetNodeBlockType { - /// The `NodeBlock` type. - type NodeBlock: self::Block; + /// The `NodeBlock` type. + type NodeBlock: self::Block; } /// Something that can validate unsigned extrinsics for the transaction pool. @@ -913,50 +1027,52 @@ pub trait GetNodeBlockType { /// During block execution phase one need to perform the same checks anyway, /// since this function is not being called. pub trait ValidateUnsigned { - /// The call to validate - type Call; - - /// Validate the call right before dispatch. - /// - /// This method should be used to prevent transactions already in the pool - /// (i.e. passing `validate_unsigned`) from being included in blocks - /// in case we know they now became invalid. - /// - /// By default it's a good idea to call `validate_unsigned` from within - /// this function again to make sure we never include an invalid transaction. - /// - /// Changes made to storage WILL be persisted if the call returns `Ok`. - fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { - Self::validate_unsigned(TransactionSource::InBlock, call) - .map(|_| ()) - .map_err(Into::into) - } - - /// Return the validity of the call - /// - /// This doesn't execute any side-effects; it merely checks - /// whether the transaction would panic if it were included or not. - /// - /// Changes made to storage should be discarded by caller. - fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity; + /// The call to validate + type Call; + + /// Validate the call right before dispatch. + /// + /// This method should be used to prevent transactions already in the pool + /// (i.e. passing `validate_unsigned`) from being included in blocks + /// in case we know they now became invalid. + /// + /// By default it's a good idea to call `validate_unsigned` from within + /// this function again to make sure we never include an invalid transaction. + /// + /// Changes made to storage WILL be persisted if the call returns `Ok`. + fn pre_dispatch(call: &Self::Call) -> Result<(), TransactionValidityError> { + Self::validate_unsigned(TransactionSource::InBlock, call) + .map(|_| ()) + .map_err(Into::into) + } + + /// Return the validity of the call + /// + /// This doesn't execute any side-effects; it merely checks + /// whether the transaction would panic if it were included or not. + /// + /// Changes made to storage should be discarded by caller. + fn validate_unsigned(source: TransactionSource, call: &Self::Call) -> TransactionValidity; } /// Opaque data type that may be destructured into a series of raw byte slices (which represent /// individual keys). pub trait OpaqueKeys: Clone { - /// Types bound to this opaque keys that provide the key type ids returned. - type KeyTypeIdProviders; - - /// Return the key-type IDs supported by this set. - fn key_ids() -> &'static [crate::KeyTypeId]; - /// Get the raw bytes of key with key-type ID `i`. - fn get_raw(&self, i: super::KeyTypeId) -> &[u8]; - /// Get the decoded key with key-type ID `i`. - fn get(&self, i: super::KeyTypeId) -> Option { - T::decode(&mut self.get_raw(i)).ok() - } - /// Verify a proof of ownership for the keys. - fn ownership_proof_is_valid(&self, _proof: &[u8]) -> bool { true } + /// Types bound to this opaque keys that provide the key type ids returned. + type KeyTypeIdProviders; + + /// Return the key-type IDs supported by this set. + fn key_ids() -> &'static [crate::KeyTypeId]; + /// Get the raw bytes of key with key-type ID `i`. + fn get_raw(&self, i: super::KeyTypeId) -> &[u8]; + /// Get the decoded key with key-type ID `i`. + fn get(&self, i: super::KeyTypeId) -> Option { + T::decode(&mut self.get_raw(i)).ok() + } + /// Verify a proof of ownership for the keys. + fn ownership_proof_is_valid(&self, _proof: &[u8]) -> bool { + true + } } /// Input that adds infinite number of zero after wrapped input. @@ -966,118 +1082,122 @@ pub trait OpaqueKeys: Clone { pub struct AppendZerosInput<'a, T>(&'a mut T); impl<'a, T> AppendZerosInput<'a, T> { - /// Create a new instance from the given byte array. - pub fn new(input: &'a mut T) -> Self { - Self(input) - } + /// Create a new instance from the given byte array. + pub fn new(input: &'a mut T) -> Self { + Self(input) + } } impl<'a, T: codec::Input> codec::Input for AppendZerosInput<'a, T> { - fn remaining_len(&mut self) -> Result, codec::Error> { - Ok(None) - } - - fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { - let remaining = self.0.remaining_len()?; - let completed = if let Some(n) = remaining { - let readable = into.len().min(n); - // this should never fail if `remaining_len` API is implemented correctly. - self.0.read(&mut into[..readable])?; - readable - } else { - // Fill it byte-by-byte. - let mut i = 0; - while i < into.len() { - if let Ok(b) = self.0.read_byte() { - into[i] = b; - i += 1; - } else { - break; - } - } - i - }; - // Fill the rest with zeros. - for i in &mut into[completed..] { - *i = 0; - } - Ok(()) - } + fn remaining_len(&mut self) -> Result, codec::Error> { + Ok(None) + } + + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + let remaining = self.0.remaining_len()?; + let completed = if let Some(n) = remaining { + let readable = into.len().min(n); + // this should never fail if `remaining_len` API is implemented correctly. + self.0.read(&mut into[..readable])?; + readable + } else { + // Fill it byte-by-byte. + let mut i = 0; + while i < into.len() { + if let Ok(b) = self.0.read_byte() { + into[i] = b; + i += 1; + } else { + break; + } + } + i + }; + // Fill the rest with zeros. + for i in &mut into[completed..] { + *i = 0; + } + Ok(()) + } } /// Input that adds infinite number of zero after wrapped input. pub struct TrailingZeroInput<'a>(&'a [u8]); impl<'a> TrailingZeroInput<'a> { - /// Create a new instance from the given byte array. - pub fn new(data: &'a [u8]) -> Self { - Self(data) - } + /// Create a new instance from the given byte array. + pub fn new(data: &'a [u8]) -> Self { + Self(data) + } } impl<'a> codec::Input for TrailingZeroInput<'a> { - fn remaining_len(&mut self) -> Result, codec::Error> { - Ok(None) - } - - fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { - let len_from_inner = into.len().min(self.0.len()); - into[..len_from_inner].copy_from_slice(&self.0[..len_from_inner]); - for i in &mut into[len_from_inner..] { - *i = 0; - } - self.0 = &self.0[len_from_inner..]; + fn remaining_len(&mut self) -> Result, codec::Error> { + Ok(None) + } + + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + let len_from_inner = into.len().min(self.0.len()); + into[..len_from_inner].copy_from_slice(&self.0[..len_from_inner]); + for i in &mut into[len_from_inner..] { + *i = 0; + } + self.0 = &self.0[len_from_inner..]; - Ok(()) - } + Ok(()) + } } /// This type can be converted into and possibly from an AccountId (which itself is generic). pub trait AccountIdConversion: Sized { - /// Convert into an account ID. This is infallible. - fn into_account(&self) -> AccountId { self.into_sub_account(&()) } - - /// Try to convert an account ID into this type. Might not succeed. - fn try_from_account(a: &AccountId) -> Option { - Self::try_from_sub_account::<()>(a).map(|x| x.0) - } - - /// Convert this value amalgamated with the a secondary "sub" value into an account ID. This is - /// infallible. - /// - /// NOTE: The account IDs from this and from `into_account` are *not* guaranteed to be distinct - /// for any given value of `self`, nor are different invocations to this with different types - /// `T`. For example, the following will all encode to the same account ID value: - /// - `self.into_sub_account(0u32)` - /// - `self.into_sub_account(vec![0u8; 0])` - /// - `self.into_account()` - fn into_sub_account(&self, sub: S) -> AccountId; - - /// Try to convert an account ID into this type. Might not succeed. - fn try_from_sub_account(x: &AccountId) -> Option<(Self, S)>; + /// Convert into an account ID. This is infallible. + fn into_account(&self) -> AccountId { + self.into_sub_account(&()) + } + + /// Try to convert an account ID into this type. Might not succeed. + fn try_from_account(a: &AccountId) -> Option { + Self::try_from_sub_account::<()>(a).map(|x| x.0) + } + + /// Convert this value amalgamated with the a secondary "sub" value into an account ID. This is + /// infallible. + /// + /// NOTE: The account IDs from this and from `into_account` are *not* guaranteed to be distinct + /// for any given value of `self`, nor are different invocations to this with different types + /// `T`. For example, the following will all encode to the same account ID value: + /// - `self.into_sub_account(0u32)` + /// - `self.into_sub_account(vec![0u8; 0])` + /// - `self.into_account()` + fn into_sub_account(&self, sub: S) -> AccountId; + + /// Try to convert an account ID into this type. Might not succeed. + fn try_from_sub_account(x: &AccountId) -> Option<(Self, S)>; } /// Format is TYPE_ID ++ encode(parachain ID) ++ 00.... where 00... is indefinite trailing zeroes to /// fill AccountId. impl AccountIdConversion for Id { - fn into_sub_account(&self, sub: S) -> T { - (Id::TYPE_ID, self, sub).using_encoded(|b| - T::decode(&mut TrailingZeroInput(b)) - ).unwrap_or_default() - } - - fn try_from_sub_account(x: &T) -> Option<(Self, S)> { - x.using_encoded(|d| { - if &d[0..4] != Id::TYPE_ID { return None } - let mut cursor = &d[4..]; - let result = Decode::decode(&mut cursor).ok()?; - if cursor.iter().all(|x| *x == 0) { - Some(result) - } else { - None - } - }) - } + fn into_sub_account(&self, sub: S) -> T { + (Id::TYPE_ID, self, sub) + .using_encoded(|b| T::decode(&mut TrailingZeroInput(b))) + .unwrap_or_default() + } + + fn try_from_sub_account(x: &T) -> Option<(Self, S)> { + x.using_encoded(|d| { + if &d[0..4] != Id::TYPE_ID { + return None; + } + let mut cursor = &d[4..]; + let result = Decode::decode(&mut cursor).ok()?; + if cursor.iter().all(|x| *x == 0) { + Some(result) + } else { + None + } + }) + } } /// Calls a given macro a number of times with a set of fixed params and an incrementing numeral. @@ -1232,188 +1352,188 @@ macro_rules! impl_opaque_keys { /// Trait for things which can be printed from the runtime. pub trait Printable { - /// Print the object. - fn print(&self); + /// Print the object. + fn print(&self); } impl Printable for &T { - fn print(&self) { - (*self).print() - } + fn print(&self) { + (*self).print() + } } impl Printable for u8 { - fn print(&self) { - (*self as u64).print() - } + fn print(&self) { + (*self as u64).print() + } } impl Printable for u32 { - fn print(&self) { - (*self as u64).print() - } + fn print(&self) { + (*self as u64).print() + } } impl Printable for usize { - fn print(&self) { - (*self as u64).print() - } + fn print(&self) { + (*self as u64).print() + } } impl Printable for u64 { - fn print(&self) { - sp_io::misc::print_num(*self); - } + fn print(&self) { + sp_io::misc::print_num(*self); + } } impl Printable for &[u8] { - fn print(&self) { - sp_io::misc::print_hex(self); - } + fn print(&self) { + sp_io::misc::print_hex(self); + } } impl Printable for &str { - fn print(&self) { - sp_io::misc::print_utf8(self.as_bytes()); - } + fn print(&self) { + sp_io::misc::print_utf8(self.as_bytes()); + } } impl Printable for bool { - fn print(&self) { - if *self { - "true".print() - } else { - "false".print() - } - } + fn print(&self) { + if *self { + "true".print() + } else { + "false".print() + } + } } impl Printable for () { - fn print(&self) { - "()".print() - } + fn print(&self) { + "()".print() + } } #[impl_for_tuples(1, 12)] impl Printable for Tuple { - fn print(&self) { - for_tuples!( #( Tuple.print(); )* ) - } + fn print(&self) { + for_tuples!( #( Tuple.print(); )* ) + } } /// Something that can convert a [`BlockId`] to a number or a hash. #[cfg(feature = "std")] pub trait BlockIdTo { - /// The error type that will be returned by the functions. - type Error: std::fmt::Debug; + /// The error type that will be returned by the functions. + type Error: std::fmt::Debug; - /// Convert the given `block_id` to the corresponding block hash. - fn to_hash( - &self, - block_id: &crate::generic::BlockId, - ) -> Result, Self::Error>; + /// Convert the given `block_id` to the corresponding block hash. + fn to_hash( + &self, + block_id: &crate::generic::BlockId, + ) -> Result, Self::Error>; - /// Convert the given `block_id` to the corresponding block number. - fn to_number( - &self, - block_id: &crate::generic::BlockId, - ) -> Result>, Self::Error>; + /// Convert the given `block_id` to the corresponding block number. + fn to_number( + &self, + block_id: &crate::generic::BlockId, + ) -> Result>, Self::Error>; } #[cfg(test)] mod tests { - use super::*; - use crate::codec::{Encode, Decode, Input}; - use sp_core::{crypto::Pair, ecdsa}; - - mod t { - use sp_core::crypto::KeyTypeId; - use sp_application_crypto::{app_crypto, sr25519}; - app_crypto!(sr25519, KeyTypeId(*b"test")); - } - - #[test] - fn app_verify_works() { - use t::*; - use super::AppVerify; - - let s = Signature::default(); - let _ = s.verify(&[0u8; 100][..], &Public::default()); - } - - #[derive(Encode, Decode, Default, PartialEq, Debug)] - struct U32Value(u32); - impl super::TypeId for U32Value { - const TYPE_ID: [u8; 4] = [0x0d, 0xf0, 0xfe, 0xca]; - } - // cafef00d - - #[derive(Encode, Decode, Default, PartialEq, Debug)] - struct U16Value(u16); - impl super::TypeId for U16Value { - const TYPE_ID: [u8; 4] = [0xfe, 0xca, 0x0d, 0xf0]; - } - // f00dcafe - - type AccountId = u64; - - #[test] - fn into_account_should_work() { - let r: AccountId = U32Value::into_account(&U32Value(0xdeadbeef)); - assert_eq!(r, 0x_deadbeef_cafef00d); - } - - #[test] - fn try_from_account_should_work() { - let r = U32Value::try_from_account(&0x_deadbeef_cafef00d_u64); - assert_eq!(r.unwrap(), U32Value(0xdeadbeef)); - } - - #[test] - fn into_account_with_fill_should_work() { - let r: AccountId = U16Value::into_account(&U16Value(0xc0da)); - assert_eq!(r, 0x_0000_c0da_f00dcafe); - } - - #[test] - fn try_from_account_with_fill_should_work() { - let r = U16Value::try_from_account(&0x0000_c0da_f00dcafe_u64); - assert_eq!(r.unwrap(), U16Value(0xc0da)); - } - - #[test] - fn bad_try_from_account_should_fail() { - let r = U16Value::try_from_account(&0x0000_c0de_baadcafe_u64); - assert!(r.is_none()); - let r = U16Value::try_from_account(&0x0100_c0da_f00dcafe_u64); - assert!(r.is_none()); - } - - #[test] - fn trailing_zero_should_work() { - let mut t = super::TrailingZeroInput(&[1, 2, 3]); - assert_eq!(t.remaining_len(), Ok(None)); - let mut buffer = [0u8; 2]; - assert_eq!(t.read(&mut buffer), Ok(())); - assert_eq!(t.remaining_len(), Ok(None)); - assert_eq!(buffer, [1, 2]); - assert_eq!(t.read(&mut buffer), Ok(())); - assert_eq!(t.remaining_len(), Ok(None)); - assert_eq!(buffer, [3, 0]); - assert_eq!(t.read(&mut buffer), Ok(())); - assert_eq!(t.remaining_len(), Ok(None)); - assert_eq!(buffer, [0, 0]); - } - - #[test] - fn ecdsa_verify_works() { - let msg = &b"test-message"[..]; - let (pair, _) = ecdsa::Pair::generate(); - - let signature = pair.sign(&msg); - assert!(ecdsa::Pair::verify(&signature, msg, &pair.public())); - - assert!(signature.verify(msg, &pair.public())); - assert!(signature.verify(msg, &pair.public())); - } + use super::*; + use crate::codec::{Decode, Encode, Input}; + use sp_core::{crypto::Pair, ecdsa}; + + mod t { + use sp_application_crypto::{app_crypto, sr25519}; + use sp_core::crypto::KeyTypeId; + app_crypto!(sr25519, KeyTypeId(*b"test")); + } + + #[test] + fn app_verify_works() { + use super::AppVerify; + use t::*; + + let s = Signature::default(); + let _ = s.verify(&[0u8; 100][..], &Public::default()); + } + + #[derive(Encode, Decode, Default, PartialEq, Debug)] + struct U32Value(u32); + impl super::TypeId for U32Value { + const TYPE_ID: [u8; 4] = [0x0d, 0xf0, 0xfe, 0xca]; + } + // cafef00d + + #[derive(Encode, Decode, Default, PartialEq, Debug)] + struct U16Value(u16); + impl super::TypeId for U16Value { + const TYPE_ID: [u8; 4] = [0xfe, 0xca, 0x0d, 0xf0]; + } + // f00dcafe + + type AccountId = u64; + + #[test] + fn into_account_should_work() { + let r: AccountId = U32Value::into_account(&U32Value(0xdeadbeef)); + assert_eq!(r, 0x_deadbeef_cafef00d); + } + + #[test] + fn try_from_account_should_work() { + let r = U32Value::try_from_account(&0x_deadbeef_cafef00d_u64); + assert_eq!(r.unwrap(), U32Value(0xdeadbeef)); + } + + #[test] + fn into_account_with_fill_should_work() { + let r: AccountId = U16Value::into_account(&U16Value(0xc0da)); + assert_eq!(r, 0x_0000_c0da_f00dcafe); + } + + #[test] + fn try_from_account_with_fill_should_work() { + let r = U16Value::try_from_account(&0x0000_c0da_f00dcafe_u64); + assert_eq!(r.unwrap(), U16Value(0xc0da)); + } + + #[test] + fn bad_try_from_account_should_fail() { + let r = U16Value::try_from_account(&0x0000_c0de_baadcafe_u64); + assert!(r.is_none()); + let r = U16Value::try_from_account(&0x0100_c0da_f00dcafe_u64); + assert!(r.is_none()); + } + + #[test] + fn trailing_zero_should_work() { + let mut t = super::TrailingZeroInput(&[1, 2, 3]); + assert_eq!(t.remaining_len(), Ok(None)); + let mut buffer = [0u8; 2]; + assert_eq!(t.read(&mut buffer), Ok(())); + assert_eq!(t.remaining_len(), Ok(None)); + assert_eq!(buffer, [1, 2]); + assert_eq!(t.read(&mut buffer), Ok(())); + assert_eq!(t.remaining_len(), Ok(None)); + assert_eq!(buffer, [3, 0]); + assert_eq!(t.read(&mut buffer), Ok(())); + assert_eq!(t.remaining_len(), Ok(None)); + assert_eq!(buffer, [0, 0]); + } + + #[test] + fn ecdsa_verify_works() { + let msg = &b"test-message"[..]; + let (pair, _) = ecdsa::Pair::generate(); + + let signature = pair.sign(&msg); + assert!(ecdsa::Pair::verify(&signature, msg, &pair.public())); + + assert!(signature.verify(msg, &pair.public())); + assert!(signature.verify(msg, &pair.public())); + } } diff --git a/primitives/runtime/src/transaction_validity.rs b/primitives/runtime/src/transaction_validity.rs index 95903b4876..cce2662cd4 100644 --- a/primitives/runtime/src/transaction_validity.rs +++ b/primitives/runtime/src/transaction_validity.rs @@ -16,9 +16,9 @@ //! Transaction validity interface. -use sp_std::prelude::*; -use crate::codec::{Encode, Decode}; +use crate::codec::{Decode, Encode}; use crate::RuntimeDebug; +use sp_std::prelude::*; /// Priority for a transaction. Additive. Higher is better. pub type TransactionPriority = u64; @@ -34,159 +34,163 @@ pub type TransactionTag = Vec; #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug)] #[cfg_attr(feature = "std", derive(serde::Serialize))] pub enum InvalidTransaction { - /// The call of the transaction is not expected. - Call, - /// General error to do with the inability to pay some fees (e.g. account balance too low). - Payment, - /// General error to do with the transaction not yet being valid (e.g. nonce too high). - Future, - /// General error to do with the transaction being outdated (e.g. nonce too low). - Stale, - /// General error to do with the transaction's proofs (e.g. signature). - BadProof, - /// The transaction birth block is ancient. - AncientBirthBlock, - /// The transaction would exhaust the resources of current block. - /// - /// The transaction might be valid, but there are not enough resources left in the current block. - ExhaustsResources, - /// Any other custom invalid validity that is not covered by this enum. - Custom(u8), - /// An extrinsic with a Mandatory dispatch resulted in Error. This is indicative of either a - /// malicious validator or a buggy `provide_inherent`. In any case, it can result in dangerously - /// overweight blocks and therefore if found, invalidates the block. - BadMandatory, - /// A transaction with a mandatory dispatch. This is invalid; only inherent extrinsics are - /// allowed to have mandatory dispatches. - MandatoryDispatch, + /// The call of the transaction is not expected. + Call, + /// General error to do with the inability to pay some fees (e.g. account balance too low). + Payment, + /// General error to do with the transaction not yet being valid (e.g. nonce too high). + Future, + /// General error to do with the transaction being outdated (e.g. nonce too low). + Stale, + /// General error to do with the transaction's proofs (e.g. signature). + BadProof, + /// The transaction birth block is ancient. + AncientBirthBlock, + /// The transaction would exhaust the resources of current block. + /// + /// The transaction might be valid, but there are not enough resources left in the current block. + ExhaustsResources, + /// Any other custom invalid validity that is not covered by this enum. + Custom(u8), + /// An extrinsic with a Mandatory dispatch resulted in Error. This is indicative of either a + /// malicious validator or a buggy `provide_inherent`. In any case, it can result in dangerously + /// overweight blocks and therefore if found, invalidates the block. + BadMandatory, + /// A transaction with a mandatory dispatch. This is invalid; only inherent extrinsics are + /// allowed to have mandatory dispatches. + MandatoryDispatch, } impl InvalidTransaction { - /// Returns if the reason for the invalidity was block resource exhaustion. - pub fn exhausted_resources(&self) -> bool { - match self { - Self::ExhaustsResources => true, - _ => false, - } - } - - /// Returns if the reason for the invalidity was a mandatory call failing. - pub fn was_mandatory(&self) -> bool { - match self { - Self::BadMandatory => true, - _ => false, - } - } + /// Returns if the reason for the invalidity was block resource exhaustion. + pub fn exhausted_resources(&self) -> bool { + match self { + Self::ExhaustsResources => true, + _ => false, + } + } + + /// Returns if the reason for the invalidity was a mandatory call failing. + pub fn was_mandatory(&self) -> bool { + match self { + Self::BadMandatory => true, + _ => false, + } + } } impl From for &'static str { - fn from(invalid: InvalidTransaction) -> &'static str { - match invalid { - InvalidTransaction::Call => "Transaction call is not expected", - InvalidTransaction::Future => "Transaction will be valid in the future", - InvalidTransaction::Stale => "Transaction is outdated", - InvalidTransaction::BadProof => "Transaction has a bad signature", - InvalidTransaction::AncientBirthBlock => "Transaction has an ancient birth block", - InvalidTransaction::ExhaustsResources => - "Transaction would exhausts the block limits", - InvalidTransaction::Payment => - "Inability to pay some fees (e.g. account balance too low)", - InvalidTransaction::BadMandatory => - "A call was labelled as mandatory, but resulted in an Error.", - InvalidTransaction::MandatoryDispatch => - "Tranaction dispatch is mandatory; transactions may not have mandatory dispatches.", - InvalidTransaction::Custom(_) => "InvalidTransaction custom error", - } - } + fn from(invalid: InvalidTransaction) -> &'static str { + match invalid { + InvalidTransaction::Call => "Transaction call is not expected", + InvalidTransaction::Future => "Transaction will be valid in the future", + InvalidTransaction::Stale => "Transaction is outdated", + InvalidTransaction::BadProof => "Transaction has a bad signature", + InvalidTransaction::AncientBirthBlock => "Transaction has an ancient birth block", + InvalidTransaction::ExhaustsResources => "Transaction would exhausts the block limits", + InvalidTransaction::Payment => { + "Inability to pay some fees (e.g. account balance too low)" + } + InvalidTransaction::BadMandatory => { + "A call was labelled as mandatory, but resulted in an Error." + } + InvalidTransaction::MandatoryDispatch => { + "Tranaction dispatch is mandatory; transactions may not have mandatory dispatches." + } + InvalidTransaction::Custom(_) => "InvalidTransaction custom error", + } + } } /// An unknown transaction validity. #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug)] #[cfg_attr(feature = "std", derive(serde::Serialize))] pub enum UnknownTransaction { - /// Could not lookup some information that is required to validate the transaction. - CannotLookup, - /// No validator found for the given unsigned transaction. - NoUnsignedValidator, - /// Any other custom unknown validity that is not covered by this enum. - Custom(u8), + /// Could not lookup some information that is required to validate the transaction. + CannotLookup, + /// No validator found for the given unsigned transaction. + NoUnsignedValidator, + /// Any other custom unknown validity that is not covered by this enum. + Custom(u8), } impl From for &'static str { - fn from(unknown: UnknownTransaction) -> &'static str { - match unknown { - UnknownTransaction::CannotLookup => - "Could not lookup information required to validate the transaction", - UnknownTransaction::NoUnsignedValidator => - "Could not find an unsigned validator for the unsigned transaction", - UnknownTransaction::Custom(_) => "UnknownTransaction custom error", - } - } + fn from(unknown: UnknownTransaction) -> &'static str { + match unknown { + UnknownTransaction::CannotLookup => { + "Could not lookup information required to validate the transaction" + } + UnknownTransaction::NoUnsignedValidator => { + "Could not find an unsigned validator for the unsigned transaction" + } + UnknownTransaction::Custom(_) => "UnknownTransaction custom error", + } + } } /// Errors that can occur while checking the validity of a transaction. #[derive(Clone, PartialEq, Eq, Encode, Decode, Copy, RuntimeDebug)] #[cfg_attr(feature = "std", derive(serde::Serialize))] pub enum TransactionValidityError { - /// The transaction is invalid. - Invalid(InvalidTransaction), - /// Transaction validity can't be determined. - Unknown(UnknownTransaction), + /// The transaction is invalid. + Invalid(InvalidTransaction), + /// Transaction validity can't be determined. + Unknown(UnknownTransaction), } impl TransactionValidityError { - /// Returns `true` if the reason for the error was block resource exhaustion. - pub fn exhausted_resources(&self) -> bool { - match self { - Self::Invalid(e) => e.exhausted_resources(), - Self::Unknown(_) => false, - } - } - - /// Returns `true` if the reason for the error was it being a mandatory dispatch that could not - /// be completed successfully. - pub fn was_mandatory(&self) -> bool { - match self { - Self::Invalid(e) => e.was_mandatory(), - Self::Unknown(_) => false, - } - } + /// Returns `true` if the reason for the error was block resource exhaustion. + pub fn exhausted_resources(&self) -> bool { + match self { + Self::Invalid(e) => e.exhausted_resources(), + Self::Unknown(_) => false, + } + } + + /// Returns `true` if the reason for the error was it being a mandatory dispatch that could not + /// be completed successfully. + pub fn was_mandatory(&self) -> bool { + match self { + Self::Invalid(e) => e.was_mandatory(), + Self::Unknown(_) => false, + } + } } impl From for &'static str { - fn from(err: TransactionValidityError) -> &'static str { - match err { - TransactionValidityError::Invalid(invalid) => invalid.into(), - TransactionValidityError::Unknown(unknown) => unknown.into(), - } - } + fn from(err: TransactionValidityError) -> &'static str { + match err { + TransactionValidityError::Invalid(invalid) => invalid.into(), + TransactionValidityError::Unknown(unknown) => unknown.into(), + } + } } impl From for TransactionValidityError { - fn from(err: InvalidTransaction) -> Self { - TransactionValidityError::Invalid(err) - } + fn from(err: InvalidTransaction) -> Self { + TransactionValidityError::Invalid(err) + } } impl From for TransactionValidityError { - fn from(err: UnknownTransaction) -> Self { - TransactionValidityError::Unknown(err) - } + fn from(err: UnknownTransaction) -> Self { + TransactionValidityError::Unknown(err) + } } /// Information on a transaction's validity and, if valid, on how it relates to other transactions. pub type TransactionValidity = Result; impl Into for InvalidTransaction { - fn into(self) -> TransactionValidity { - Err(self.into()) - } + fn into(self) -> TransactionValidity { + Err(self.into()) + } } impl Into for UnknownTransaction { - fn into(self) -> TransactionValidity { - Err(self.into()) - } + fn into(self) -> TransactionValidity { + Err(self.into()) + } } /// The source of the transaction. @@ -194,99 +198,107 @@ impl Into for UnknownTransaction { /// Depending on the source we might apply different validation schemes. /// For instance we can disallow specific kinds of transactions if they were not produced /// by our local node (for instance off-chain workers). -#[derive(Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf)] +#[derive( + Copy, Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug, parity_util_mem::MallocSizeOf, +)] pub enum TransactionSource { - /// Transaction is already included in block. - /// - /// This means that we can't really tell where the transaction is coming from, - /// since it's already in the received block. Note that the custom validation logic - /// using either `Local` or `External` should most likely just allow `InBlock` - /// transactions as well. - InBlock, - - /// Transaction is coming from a local source. - /// - /// This means that the transaction was produced internally by the node - /// (for instance an Off-Chain Worker, or an Off-Chain Call), as opposed - /// to being received over the network. - Local, - - /// Transaction has been received externally. - /// - /// This means the transaction has been received from (usually) "untrusted" source, - /// for instance received over the network or RPC. - External, + /// Transaction is already included in block. + /// + /// This means that we can't really tell where the transaction is coming from, + /// since it's already in the received block. Note that the custom validation logic + /// using either `Local` or `External` should most likely just allow `InBlock` + /// transactions as well. + InBlock, + + /// Transaction is coming from a local source. + /// + /// This means that the transaction was produced internally by the node + /// (for instance an Off-Chain Worker, or an Off-Chain Call), as opposed + /// to being received over the network. + Local, + + /// Transaction has been received externally. + /// + /// This means the transaction has been received from (usually) "untrusted" source, + /// for instance received over the network or RPC. + External, } /// Information concerning a valid transaction. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct ValidTransaction { - /// Priority of the transaction. - /// - /// Priority determines the ordering of two transactions that have all - /// their dependencies (required tags) satisfied. - pub priority: TransactionPriority, - /// Transaction dependencies - /// - /// A non-empty list signifies that some other transactions which provide - /// given tags are required to be included before that one. - pub requires: Vec, - /// Provided tags - /// - /// A list of tags this transaction provides. Successfully importing the transaction - /// will enable other transactions that depend on (require) those tags to be included as well. - /// Provided and required tags allow Substrate to build a dependency graph of transactions - /// and import them in the right (linear) order. - pub provides: Vec, - /// Transaction longevity - /// - /// Longevity describes minimum number of blocks the validity is correct. - /// After this period transaction should be removed from the pool or revalidated. - pub longevity: TransactionLongevity, - /// A flag indicating if the transaction should be propagated to other peers. - /// - /// By setting `false` here the transaction will still be considered for - /// including in blocks that are authored on the current node, but will - /// never be sent to other peers. - pub propagate: bool, + /// Priority of the transaction. + /// + /// Priority determines the ordering of two transactions that have all + /// their dependencies (required tags) satisfied. + pub priority: TransactionPriority, + /// Transaction dependencies + /// + /// A non-empty list signifies that some other transactions which provide + /// given tags are required to be included before that one. + pub requires: Vec, + /// Provided tags + /// + /// A list of tags this transaction provides. Successfully importing the transaction + /// will enable other transactions that depend on (require) those tags to be included as well. + /// Provided and required tags allow Substrate to build a dependency graph of transactions + /// and import them in the right (linear) order. + pub provides: Vec, + /// Transaction longevity + /// + /// Longevity describes minimum number of blocks the validity is correct. + /// After this period transaction should be removed from the pool or revalidated. + pub longevity: TransactionLongevity, + /// A flag indicating if the transaction should be propagated to other peers. + /// + /// By setting `false` here the transaction will still be considered for + /// including in blocks that are authored on the current node, but will + /// never be sent to other peers. + pub propagate: bool, } impl Default for ValidTransaction { - fn default() -> Self { - ValidTransaction { - priority: 0, - requires: vec![], - provides: vec![], - longevity: TransactionLongevity::max_value(), - propagate: true, - } - } + fn default() -> Self { + ValidTransaction { + priority: 0, + requires: vec![], + provides: vec![], + longevity: TransactionLongevity::max_value(), + propagate: true, + } + } } impl ValidTransaction { - /// Initiate `ValidTransaction` builder object with a particular prefix for tags. - /// - /// To avoid conflicts between different parts in runtime it's recommended to build `requires` - /// and `provides` tags with a unique prefix. - pub fn with_tag_prefix(prefix: &'static str) -> ValidTransactionBuilder { - ValidTransactionBuilder { - prefix: Some(prefix), - validity: Default::default(), - } - } - - /// Combine two instances into one, as a best effort. This will take the superset of each of the - /// `provides` and `requires` tags, it will sum the priorities, take the minimum longevity and - /// the logic *And* of the propagate flags. - pub fn combine_with(mut self, mut other: ValidTransaction) -> Self { - ValidTransaction { - priority: self.priority.saturating_add(other.priority), - requires: { self.requires.append(&mut other.requires); self.requires }, - provides: { self.provides.append(&mut other.provides); self.provides }, - longevity: self.longevity.min(other.longevity), - propagate: self.propagate && other.propagate, - } - } + /// Initiate `ValidTransaction` builder object with a particular prefix for tags. + /// + /// To avoid conflicts between different parts in runtime it's recommended to build `requires` + /// and `provides` tags with a unique prefix. + pub fn with_tag_prefix(prefix: &'static str) -> ValidTransactionBuilder { + ValidTransactionBuilder { + prefix: Some(prefix), + validity: Default::default(), + } + } + + /// Combine two instances into one, as a best effort. This will take the superset of each of the + /// `provides` and `requires` tags, it will sum the priorities, take the minimum longevity and + /// the logic *And* of the propagate flags. + pub fn combine_with(mut self, mut other: ValidTransaction) -> Self { + ValidTransaction { + priority: self.priority.saturating_add(other.priority), + requires: { + self.requires.append(&mut other.requires); + self.requires + }, + provides: { + self.provides.append(&mut other.provides); + self.provides + }, + longevity: self.longevity.min(other.longevity), + propagate: self.propagate && other.propagate, + } + } } /// `ValidTransaction` builder. @@ -296,140 +308,145 @@ impl ValidTransaction { /// prefixing `requires` and `provides` tags to avoid conflicts. #[derive(Default, Clone, RuntimeDebug)] pub struct ValidTransactionBuilder { - prefix: Option<&'static str>, - validity: ValidTransaction, + prefix: Option<&'static str>, + validity: ValidTransaction, } impl ValidTransactionBuilder { - /// Set the priority of a transaction. - /// - /// Note that the final priority for `FRAME` is combined from all `SignedExtension`s. - /// Most likely for unsigned transactions you want the priority to be higher - /// than for regular transactions. We recommend exposing a base priority for unsigned - /// transactions as a runtime module parameter, so that the runtime can tune inter-module - /// priorities. - pub fn priority(mut self, priority: TransactionPriority) -> Self { - self.validity.priority = priority; - self - } - - /// Set the longevity of a transaction. - /// - /// By default the transaction will be considered valid forever and will not be revalidated - /// by the transaction pool. It's recommended though to set the longevity to a finite value - /// though. If unsure, it's also reasonable to expose this parameter via module configuration - /// and let the runtime decide. - pub fn longevity(mut self, longevity: TransactionLongevity) -> Self { - self.validity.longevity = longevity; - self - } - - /// Set the propagate flag. - /// - /// Set to `false` if the transaction is not meant to be gossiped to peers. Combined with - /// `TransactionSource::Local` validation it can be used to have special kind of - /// transactions that are only produced and included by the validator nodes. - pub fn propagate(mut self, propagate: bool) -> Self { - self.validity.propagate = propagate; - self - } - - /// Add a `TransactionTag` to the set of required tags. - /// - /// The tag will be encoded and prefixed with module prefix (if any). - /// If you'd rather add a raw `require` tag, consider using `#combine_with` method. - pub fn and_requires(mut self, tag: impl Encode) -> Self { - self.validity.requires.push(match self.prefix.as_ref() { - Some(prefix) => (prefix, tag).encode(), - None => tag.encode(), - }); - self - } - - /// Add a `TransactionTag` to the set of provided tags. - /// - /// The tag will be encoded and prefixed with module prefix (if any). - /// If you'd rather add a raw `require` tag, consider using `#combine_with` method. - pub fn and_provides(mut self, tag: impl Encode) -> Self { - self.validity.provides.push(match self.prefix.as_ref() { - Some(prefix) => (prefix, tag).encode(), - None => tag.encode(), - }); - self - } - - /// Augment the builder with existing `ValidTransaction`. - /// - /// This method does add the prefix to `require` or `provides` tags. - pub fn combine_with(mut self, validity: ValidTransaction) -> Self { - self.validity = core::mem::take(&mut self.validity).combine_with(validity); - self - } - - /// Finalize the builder and produce `TransactionValidity`. - /// - /// Note the result will always be `Ok`. Use `Into` to produce `ValidTransaction`. - pub fn build(self) -> TransactionValidity { - self.into() - } + /// Set the priority of a transaction. + /// + /// Note that the final priority for `FRAME` is combined from all `SignedExtension`s. + /// Most likely for unsigned transactions you want the priority to be higher + /// than for regular transactions. We recommend exposing a base priority for unsigned + /// transactions as a runtime module parameter, so that the runtime can tune inter-module + /// priorities. + pub fn priority(mut self, priority: TransactionPriority) -> Self { + self.validity.priority = priority; + self + } + + /// Set the longevity of a transaction. + /// + /// By default the transaction will be considered valid forever and will not be revalidated + /// by the transaction pool. It's recommended though to set the longevity to a finite value + /// though. If unsure, it's also reasonable to expose this parameter via module configuration + /// and let the runtime decide. + pub fn longevity(mut self, longevity: TransactionLongevity) -> Self { + self.validity.longevity = longevity; + self + } + + /// Set the propagate flag. + /// + /// Set to `false` if the transaction is not meant to be gossiped to peers. Combined with + /// `TransactionSource::Local` validation it can be used to have special kind of + /// transactions that are only produced and included by the validator nodes. + pub fn propagate(mut self, propagate: bool) -> Self { + self.validity.propagate = propagate; + self + } + + /// Add a `TransactionTag` to the set of required tags. + /// + /// The tag will be encoded and prefixed with module prefix (if any). + /// If you'd rather add a raw `require` tag, consider using `#combine_with` method. + pub fn and_requires(mut self, tag: impl Encode) -> Self { + self.validity.requires.push(match self.prefix.as_ref() { + Some(prefix) => (prefix, tag).encode(), + None => tag.encode(), + }); + self + } + + /// Add a `TransactionTag` to the set of provided tags. + /// + /// The tag will be encoded and prefixed with module prefix (if any). + /// If you'd rather add a raw `require` tag, consider using `#combine_with` method. + pub fn and_provides(mut self, tag: impl Encode) -> Self { + self.validity.provides.push(match self.prefix.as_ref() { + Some(prefix) => (prefix, tag).encode(), + None => tag.encode(), + }); + self + } + + /// Augment the builder with existing `ValidTransaction`. + /// + /// This method does add the prefix to `require` or `provides` tags. + pub fn combine_with(mut self, validity: ValidTransaction) -> Self { + self.validity = core::mem::take(&mut self.validity).combine_with(validity); + self + } + + /// Finalize the builder and produce `TransactionValidity`. + /// + /// Note the result will always be `Ok`. Use `Into` to produce `ValidTransaction`. + pub fn build(self) -> TransactionValidity { + self.into() + } } impl From for TransactionValidity { - fn from(builder: ValidTransactionBuilder) -> Self { - Ok(builder.into()) - } + fn from(builder: ValidTransactionBuilder) -> Self { + Ok(builder.into()) + } } impl From for ValidTransaction { - fn from(builder: ValidTransactionBuilder) -> Self { - builder.validity - } + fn from(builder: ValidTransactionBuilder) -> Self { + builder.validity + } } - #[cfg(test)] mod tests { - use super::*; - - #[test] - fn should_encode_and_decode() { - let v: TransactionValidity = Ok(ValidTransaction { - priority: 5, - requires: vec![vec![1, 2, 3, 4]], - provides: vec![vec![4, 5, 6]], - longevity: 42, - propagate: false, - }); - - let encoded = v.encode(); - assert_eq!( - encoded, - vec![0, 5, 0, 0, 0, 0, 0, 0, 0, 4, 16, 1, 2, 3, 4, 4, 12, 4, 5, 6, 42, 0, 0, 0, 0, 0, 0, 0, 0] - ); - - // decode back - assert_eq!(TransactionValidity::decode(&mut &*encoded), Ok(v)); - } - - #[test] - fn builder_should_prefix_the_tags() { - const PREFIX: &str = "test"; - let a: ValidTransaction = ValidTransaction::with_tag_prefix(PREFIX) - .and_requires(1) - .and_requires(2) - .and_provides(3) - .and_provides(4) - .propagate(false) - .longevity(5) - .priority(3) - .priority(6) - .into(); - assert_eq!(a, ValidTransaction { - propagate: false, - longevity: 5, - priority: 6, - requires: vec![(PREFIX, 1).encode(), (PREFIX, 2).encode()], - provides: vec![(PREFIX, 3).encode(), (PREFIX, 4).encode()], - }); - } + use super::*; + + #[test] + fn should_encode_and_decode() { + let v: TransactionValidity = Ok(ValidTransaction { + priority: 5, + requires: vec![vec![1, 2, 3, 4]], + provides: vec![vec![4, 5, 6]], + longevity: 42, + propagate: false, + }); + + let encoded = v.encode(); + assert_eq!( + encoded, + vec![ + 0, 5, 0, 0, 0, 0, 0, 0, 0, 4, 16, 1, 2, 3, 4, 4, 12, 4, 5, 6, 42, 0, 0, 0, 0, 0, 0, + 0, 0 + ] + ); + + // decode back + assert_eq!(TransactionValidity::decode(&mut &*encoded), Ok(v)); + } + + #[test] + fn builder_should_prefix_the_tags() { + const PREFIX: &str = "test"; + let a: ValidTransaction = ValidTransaction::with_tag_prefix(PREFIX) + .and_requires(1) + .and_requires(2) + .and_provides(3) + .and_provides(4) + .propagate(false) + .longevity(5) + .priority(3) + .priority(6) + .into(); + assert_eq!( + a, + ValidTransaction { + propagate: false, + longevity: 5, + priority: 6, + requires: vec![(PREFIX, 1).encode(), (PREFIX, 2).encode()], + provides: vec![(PREFIX, 3).encode(), (PREFIX, 4).encode()], + } + ); + } } diff --git a/primitives/sandbox/src/lib.rs b/primitives/sandbox/src/lib.rs index 1ef30ca5db..04f4adb770 100755 --- a/primitives/sandbox/src/lib.rs +++ b/primitives/sandbox/src/lib.rs @@ -40,35 +40,35 @@ use sp_std::prelude::*; pub use sp_core::sandbox::HostError; -pub use sp_wasm_interface::{Value, ReturnValue}; +pub use sp_wasm_interface::{ReturnValue, Value}; mod imp { - #[cfg(feature = "std")] - include!("../with_std.rs"); + #[cfg(feature = "std")] + include!("../with_std.rs"); - #[cfg(not(feature = "std"))] - include!("../without_std.rs"); + #[cfg(not(feature = "std"))] + include!("../without_std.rs"); } /// Error that can occur while using this crate. #[derive(sp_core::RuntimeDebug)] pub enum Error { - /// Module is not valid, couldn't be instantiated. - Module, + /// Module is not valid, couldn't be instantiated. + Module, - /// Access to a memory or table was made with an address or an index which is out of bounds. - /// - /// Note that if wasm module makes an out-of-bounds access then trap will occur. - OutOfBounds, + /// Access to a memory or table was made with an address or an index which is out of bounds. + /// + /// Note that if wasm module makes an out-of-bounds access then trap will occur. + OutOfBounds, - /// Failed to invoke the start function or an exported function for some reason. - Execution, + /// Failed to invoke the start function or an exported function for some reason. + Execution, } impl From for HostError { - fn from(_e: Error) -> HostError { - HostError - } + fn from(_e: Error) -> HostError { + HostError + } } /// Function pointer for specifying functions by the @@ -84,39 +84,39 @@ pub type HostFuncType = fn(&mut T, &[Value]) -> Result32 = 4GiB = 65536 * 64KiB). - /// - /// It is possible to limit maximum number of pages this memory instance can have by specifying - /// `maximum`. If not specified, this memory instance would be able to allocate up to 4GiB. - /// - /// Allocated memory is always zeroed. - pub fn new(initial: u32, maximum: Option) -> Result { - Ok(Memory { - inner: imp::Memory::new(initial, maximum)?, - }) - } - - /// Read a memory area at the address `ptr` with the size of the provided slice `buf`. - /// - /// Returns `Err` if the range is out-of-bounds. - pub fn get(&self, ptr: u32, buf: &mut [u8]) -> Result<(), Error> { - self.inner.get(ptr, buf) - } - - /// Write a memory area at the address `ptr` with contents of the provided slice `buf`. - /// - /// Returns `Err` if the range is out-of-bounds. - pub fn set(&self, ptr: u32, value: &[u8]) -> Result<(), Error> { - self.inner.set(ptr, value) - } + /// Construct a new linear memory instance. + /// + /// The memory allocated with initial number of pages specified by `initial`. + /// Minimal possible value for `initial` is 0 and maximum possible is `65536`. + /// (Since maximum addressable memory is 232 = 4GiB = 65536 * 64KiB). + /// + /// It is possible to limit maximum number of pages this memory instance can have by specifying + /// `maximum`. If not specified, this memory instance would be able to allocate up to 4GiB. + /// + /// Allocated memory is always zeroed. + pub fn new(initial: u32, maximum: Option) -> Result { + Ok(Memory { + inner: imp::Memory::new(initial, maximum)?, + }) + } + + /// Read a memory area at the address `ptr` with the size of the provided slice `buf`. + /// + /// Returns `Err` if the range is out-of-bounds. + pub fn get(&self, ptr: u32, buf: &mut [u8]) -> Result<(), Error> { + self.inner.get(ptr, buf) + } + + /// Write a memory area at the address `ptr` with contents of the provided slice `buf`. + /// + /// Returns `Err` if the range is out-of-bounds. + pub fn set(&self, ptr: u32, value: &[u8]) -> Result<(), Error> { + self.inner.set(ptr, value) + } } /// Struct that can be used for defining an environment for a sandboxed module. @@ -124,89 +124,91 @@ impl Memory { /// The sandboxed module can access only the entities which were defined and passed /// to the module at the instantiation time. pub struct EnvironmentDefinitionBuilder { - inner: imp::EnvironmentDefinitionBuilder, + inner: imp::EnvironmentDefinitionBuilder, } impl EnvironmentDefinitionBuilder { - /// Construct a new `EnvironmentDefinitionBuilder`. - pub fn new() -> EnvironmentDefinitionBuilder { - EnvironmentDefinitionBuilder { - inner: imp::EnvironmentDefinitionBuilder::new(), - } - } - - /// Register a host function in this environment definition. - /// - /// NOTE that there is no constraints on type of this function. An instance - /// can import function passed here with any signature it wants. It can even import - /// the same function (i.e. with same `module` and `field`) several times. It's up to - /// the user code to check or constrain the types of signatures. - pub fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) - where - N1: Into>, - N2: Into>, - { - self.inner.add_host_func(module, field, f); - } - - /// Register a memory in this environment definition. - pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) - where - N1: Into>, - N2: Into>, - { - self.inner.add_memory(module, field, mem.inner); - } + /// Construct a new `EnvironmentDefinitionBuilder`. + pub fn new() -> EnvironmentDefinitionBuilder { + EnvironmentDefinitionBuilder { + inner: imp::EnvironmentDefinitionBuilder::new(), + } + } + + /// Register a host function in this environment definition. + /// + /// NOTE that there is no constraints on type of this function. An instance + /// can import function passed here with any signature it wants. It can even import + /// the same function (i.e. with same `module` and `field`) several times. It's up to + /// the user code to check or constrain the types of signatures. + pub fn add_host_func(&mut self, module: N1, field: N2, f: HostFuncType) + where + N1: Into>, + N2: Into>, + { + self.inner.add_host_func(module, field, f); + } + + /// Register a memory in this environment definition. + pub fn add_memory(&mut self, module: N1, field: N2, mem: Memory) + where + N1: Into>, + N2: Into>, + { + self.inner.add_memory(module, field, mem.inner); + } } /// Sandboxed instance of a wasm module. /// /// This instance can be used for invoking exported functions. pub struct Instance { - inner: imp::Instance, + inner: imp::Instance, } impl Instance { - /// Instantiate a module with the given [`EnvironmentDefinitionBuilder`]. It will - /// run the `start` function (if it is present in the module) with the given `state`. - /// - /// Returns `Err(Error::Module)` if this module can't be instantiated with the given - /// environment. If execution of `start` function generated a trap, then `Err(Error::Execution)` will - /// be returned. - /// - /// [`EnvironmentDefinitionBuilder`]: struct.EnvironmentDefinitionBuilder.html - pub fn new(code: &[u8], env_def_builder: &EnvironmentDefinitionBuilder, state: &mut T) - -> Result, Error> - { - Ok(Instance { - inner: imp::Instance::new(code, &env_def_builder.inner, state)?, - }) - } - - /// Invoke an exported function with the given name. - /// - /// # Errors - /// - /// Returns `Err(Error::Execution)` if: - /// - /// - An export function name isn't a proper utf8 byte sequence, - /// - This module doesn't have an exported function with the given name, - /// - If types of the arguments passed to the function doesn't match function signature - /// then trap occurs (as if the exported function was called via call_indirect), - /// - Trap occurred at the execution time. - pub fn invoke( - &mut self, - name: &str, - args: &[Value], - state: &mut T, - ) -> Result { - self.inner.invoke(name, args, state) - } - - /// Get the value from a global with the given `name`. - /// - /// Returns `Some(_)` if the global could be found. - pub fn get_global_val(&self, name: &str) -> Option { - self.inner.get_global_val(name) - } + /// Instantiate a module with the given [`EnvironmentDefinitionBuilder`]. It will + /// run the `start` function (if it is present in the module) with the given `state`. + /// + /// Returns `Err(Error::Module)` if this module can't be instantiated with the given + /// environment. If execution of `start` function generated a trap, then `Err(Error::Execution)` will + /// be returned. + /// + /// [`EnvironmentDefinitionBuilder`]: struct.EnvironmentDefinitionBuilder.html + pub fn new( + code: &[u8], + env_def_builder: &EnvironmentDefinitionBuilder, + state: &mut T, + ) -> Result, Error> { + Ok(Instance { + inner: imp::Instance::new(code, &env_def_builder.inner, state)?, + }) + } + + /// Invoke an exported function with the given name. + /// + /// # Errors + /// + /// Returns `Err(Error::Execution)` if: + /// + /// - An export function name isn't a proper utf8 byte sequence, + /// - This module doesn't have an exported function with the given name, + /// - If types of the arguments passed to the function doesn't match function signature + /// then trap occurs (as if the exported function was called via call_indirect), + /// - Trap occurred at the execution time. + pub fn invoke( + &mut self, + name: &str, + args: &[Value], + state: &mut T, + ) -> Result { + self.inner.invoke(name, args, state) + } + + /// Get the value from a global with the given `name`. + /// + /// Returns `Some(_)` if the global could be found. + pub fn get_global_val(&self, name: &str) -> Option { + self.inner.get_global_val(name) + } } diff --git a/primitives/serializer/src/lib.rs b/primitives/serializer/src/lib.rs index 3138c3e63b..b3caca94cf 100644 --- a/primitives/serializer/src/lib.rs +++ b/primitives/serializer/src/lib.rs @@ -21,21 +21,24 @@ #![warn(missing_docs)] -pub use serde_json::{from_str, from_slice, from_reader, Result, Error}; +pub use serde_json::{from_reader, from_slice, from_str, Error, Result}; const PROOF: &str = "Serializers are infallible; qed"; /// Serialize the given data structure as a pretty-printed String of JSON. pub fn to_string_pretty(value: &T) -> String { - serde_json::to_string_pretty(value).expect(PROOF) + serde_json::to_string_pretty(value).expect(PROOF) } /// Serialize the given data structure as a JSON byte vector. pub fn encode(value: &T) -> Vec { - serde_json::to_vec(value).expect(PROOF) + serde_json::to_vec(value).expect(PROOF) } /// Serialize the given data structure as JSON into the IO stream. -pub fn to_writer(writer: W, value: &T) -> Result<()> { - serde_json::to_writer(writer, value) +pub fn to_writer( + writer: W, + value: &T, +) -> Result<()> { + serde_json::to_writer(writer, value) } diff --git a/primitives/session/src/lib.rs b/primitives/session/src/lib.rs index 8e2a68d050..cef461dabc 100644 --- a/primitives/session/src/lib.rs +++ b/primitives/session/src/lib.rs @@ -20,50 +20,50 @@ use sp_std::vec::Vec; -#[cfg(feature = "std")] -use sp_runtime::{generic::BlockId, traits::Block as BlockT}; #[cfg(feature = "std")] use sp_api::ProvideRuntimeApi; +#[cfg(feature = "std")] +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; use sp_core::crypto::KeyTypeId; sp_api::decl_runtime_apis! { - /// Session keys runtime api. - pub trait SessionKeys { - /// Generate a set of session keys with optionally using the given seed. - /// The keys should be stored within the keystore exposed via runtime - /// externalities. - /// - /// The seed needs to be a valid `utf8` string. - /// - /// Returns the concatenated SCALE encoded public keys. - fn generate_session_keys(seed: Option>) -> Vec; + /// Session keys runtime api. + pub trait SessionKeys { + /// Generate a set of session keys with optionally using the given seed. + /// The keys should be stored within the keystore exposed via runtime + /// externalities. + /// + /// The seed needs to be a valid `utf8` string. + /// + /// Returns the concatenated SCALE encoded public keys. + fn generate_session_keys(seed: Option>) -> Vec; - /// Decode the given public session keys. - /// - /// Returns the list of public raw public keys + key type. - fn decode_session_keys(encoded: Vec) -> Option, KeyTypeId)>>; - } + /// Decode the given public session keys. + /// + /// Returns the list of public raw public keys + key type. + fn decode_session_keys(encoded: Vec) -> Option, KeyTypeId)>>; + } } /// Generate the initial session keys with the given seeds, at the given block and store them in /// the client's keystore. #[cfg(feature = "std")] pub fn generate_initial_session_keys( - client: std::sync::Arc, - at: &BlockId, - seeds: Vec, + client: std::sync::Arc, + at: &BlockId, + seeds: Vec, ) -> Result<(), sp_api::ApiErrorFor> where - Block: BlockT, - T: ProvideRuntimeApi, - T::Api: SessionKeys, + Block: BlockT, + T: ProvideRuntimeApi, + T::Api: SessionKeys, { - let runtime_api = client.runtime_api(); + let runtime_api = client.runtime_api(); - for seed in seeds { - runtime_api.generate_session_keys(at, Some(seed.as_bytes().to_vec()))?; - } + for seed in seeds { + runtime_api.generate_session_keys(at, Some(seed.as_bytes().to_vec()))?; + } - Ok(()) + Ok(()) } diff --git a/primitives/staking/src/lib.rs b/primitives/staking/src/lib.rs index 3f6c1873ff..0268a36cba 100644 --- a/primitives/staking/src/lib.rs +++ b/primitives/staking/src/lib.rs @@ -1,4 +1,3 @@ - // Copyright 2019-2020 Parity Technologies (UK) Ltd. // This file is part of Substrate. diff --git a/primitives/staking/src/offence.rs b/primitives/staking/src/offence.rs index 584f3a75ea..9fddef8d7e 100644 --- a/primitives/staking/src/offence.rs +++ b/primitives/staking/src/offence.rs @@ -19,7 +19,7 @@ use sp_std::vec::Vec; -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; use sp_runtime::Perbill; use crate::SessionIndex; @@ -44,84 +44,83 @@ pub type OffenceCount = u32; /// /// Examples of offences include: a BABE equivocation or a GRANDPA unjustified vote. pub trait Offence { - /// Identifier which is unique for this kind of an offence. - const ID: Kind; - - /// A type that represents a point in time on an abstract timescale. - /// - /// See `Offence::time_slot` for details. The only requirement is that such timescale could be - /// represented by a single `u128` value. - type TimeSlot: Clone + codec::Codec + Ord; - - /// The list of all offenders involved in this incident. - /// - /// The list has no duplicates, so it is rather a set. - fn offenders(&self) -> Vec; - - /// The session index that is used for querying the validator set for the `slash_fraction` - /// function. - /// - /// This is used for filtering historical sessions. - fn session_index(&self) -> SessionIndex; - - /// Return a validator set count at the time when the offence took place. - fn validator_set_count(&self) -> u32; - - /// A point in time when this offence happened. - /// - /// This is used for looking up offences that happened at the "same time". - /// - /// The timescale is abstract and doesn't have to be the same across different implementations - /// of this trait. The value doesn't represent absolute timescale though since it is interpreted - /// along with the `session_index`. Two offences are considered to happen at the same time iff - /// both `session_index` and `time_slot` are equal. - /// - /// As an example, for GRANDPA timescale could be a round number and for BABE it could be a slot - /// number. Note that for GRANDPA the round number is reset each epoch. - fn time_slot(&self) -> Self::TimeSlot; - - /// A slash fraction of the total exposure that should be slashed for this - /// particular offence kind for the given parameters that happened at a singular `TimeSlot`. - /// - /// `offenders_count` - the count of unique offending authorities. It is >0. - /// `validator_set_count` - the cardinality of the validator set at the time of offence. - fn slash_fraction( - offenders_count: u32, - validator_set_count: u32, - ) -> Perbill; + /// Identifier which is unique for this kind of an offence. + const ID: Kind; + + /// A type that represents a point in time on an abstract timescale. + /// + /// See `Offence::time_slot` for details. The only requirement is that such timescale could be + /// represented by a single `u128` value. + type TimeSlot: Clone + codec::Codec + Ord; + + /// The list of all offenders involved in this incident. + /// + /// The list has no duplicates, so it is rather a set. + fn offenders(&self) -> Vec; + + /// The session index that is used for querying the validator set for the `slash_fraction` + /// function. + /// + /// This is used for filtering historical sessions. + fn session_index(&self) -> SessionIndex; + + /// Return a validator set count at the time when the offence took place. + fn validator_set_count(&self) -> u32; + + /// A point in time when this offence happened. + /// + /// This is used for looking up offences that happened at the "same time". + /// + /// The timescale is abstract and doesn't have to be the same across different implementations + /// of this trait. The value doesn't represent absolute timescale though since it is interpreted + /// along with the `session_index`. Two offences are considered to happen at the same time iff + /// both `session_index` and `time_slot` are equal. + /// + /// As an example, for GRANDPA timescale could be a round number and for BABE it could be a slot + /// number. Note that for GRANDPA the round number is reset each epoch. + fn time_slot(&self) -> Self::TimeSlot; + + /// A slash fraction of the total exposure that should be slashed for this + /// particular offence kind for the given parameters that happened at a singular `TimeSlot`. + /// + /// `offenders_count` - the count of unique offending authorities. It is >0. + /// `validator_set_count` - the cardinality of the validator set at the time of offence. + fn slash_fraction(offenders_count: u32, validator_set_count: u32) -> Perbill; } /// Errors that may happen on offence reports. #[derive(PartialEq, sp_runtime::RuntimeDebug)] pub enum OffenceError { - /// The report has already been sumbmitted. - DuplicateReport, + /// The report has already been sumbmitted. + DuplicateReport, - /// Other error has happened. - Other(u8), + /// Other error has happened. + Other(u8), } impl sp_runtime::traits::Printable for OffenceError { - fn print(&self) { - "OffenceError".print(); - match self { - Self::DuplicateReport => "DuplicateReport".print(), - Self::Other(e) => { - "Other".print(); - e.print(); - } - } - } + fn print(&self) { + "OffenceError".print(); + match self { + Self::DuplicateReport => "DuplicateReport".print(), + Self::Other(e) => { + "Other".print(); + e.print(); + } + } + } } /// A trait for decoupling offence reporters from the actual handling of offence reports. pub trait ReportOffence> { - /// Report an `offence` and reward given `reporters`. - fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError>; + /// Report an `offence` and reward given `reporters`. + fn report_offence(reporters: Vec, offence: O) -> Result<(), OffenceError>; } impl> ReportOffence for () { - fn report_offence(_reporters: Vec, _offence: O) -> Result<(), OffenceError> { Ok(()) } + fn report_offence(_reporters: Vec, _offence: O) -> Result<(), OffenceError> { + Ok(()) + } } /// A trait to take action on an offence. @@ -129,51 +128,55 @@ impl> ReportOffence { - /// A handler for an offence of a particular kind. - /// - /// Note that this contains a list of all previous offenders - /// as well. The implementer should cater for a case, where - /// the same authorities were reported for the same offence - /// in the past (see `OffenceCount`). - /// - /// The vector of `slash_fraction` contains `Perbill`s - /// the authorities should be slashed and is computed - /// according to the `OffenceCount` already. This is of the same length as `offenders.` - /// Zero is a valid value for a fraction. - /// - /// The `session` parameter is the session index of the offence. - /// - /// The receiver might decide to not accept this offence. In this case, the call site is - /// responsible for queuing the report and re-submitting again. - fn on_offence( - offenders: &[OffenceDetails], - slash_fraction: &[Perbill], - session: SessionIndex, - ) -> Result<(), ()>; - - /// Can an offence be reported now or not. This is an method to short-circuit a call into - /// `on_offence`. Ideally, a correct implementation should return `false` if `on_offence` will - /// return `Err`. Nonetheless, this is up to the implementation and this trait cannot guarantee - /// it. - fn can_report() -> bool; + /// A handler for an offence of a particular kind. + /// + /// Note that this contains a list of all previous offenders + /// as well. The implementer should cater for a case, where + /// the same authorities were reported for the same offence + /// in the past (see `OffenceCount`). + /// + /// The vector of `slash_fraction` contains `Perbill`s + /// the authorities should be slashed and is computed + /// according to the `OffenceCount` already. This is of the same length as `offenders.` + /// Zero is a valid value for a fraction. + /// + /// The `session` parameter is the session index of the offence. + /// + /// The receiver might decide to not accept this offence. In this case, the call site is + /// responsible for queuing the report and re-submitting again. + fn on_offence( + offenders: &[OffenceDetails], + slash_fraction: &[Perbill], + session: SessionIndex, + ) -> Result<(), ()>; + + /// Can an offence be reported now or not. This is an method to short-circuit a call into + /// `on_offence`. Ideally, a correct implementation should return `false` if `on_offence` will + /// return `Err`. Nonetheless, this is up to the implementation and this trait cannot guarantee + /// it. + fn can_report() -> bool; } impl OnOffenceHandler for () { - fn on_offence( - _offenders: &[OffenceDetails], - _slash_fraction: &[Perbill], - _session: SessionIndex, - ) -> Result<(), ()> { Ok(()) } - - fn can_report() -> bool { true } + fn on_offence( + _offenders: &[OffenceDetails], + _slash_fraction: &[Perbill], + _session: SessionIndex, + ) -> Result<(), ()> { + Ok(()) + } + + fn can_report() -> bool { + true + } } /// A details about an offending authority for a particular kind of offence. #[derive(Clone, PartialEq, Eq, Encode, Decode, sp_runtime::RuntimeDebug)] pub struct OffenceDetails { - /// The offending authority id - pub offender: Offender, - /// A list of reporters of offences of this authority ID. Possibly empty where there are no - /// particular reporters. - pub reporters: Vec, + /// The offending authority id + pub offender: Offender, + /// A list of reporters of offences of this authority ID. Possibly empty where there are no + /// particular reporters. + pub reporters: Vec, } diff --git a/primitives/state-machine/src/backend.rs b/primitives/state-machine/src/backend.rs index 94144fdb90..0317e12203 100644 --- a/primitives/state-machine/src/backend.rs +++ b/primitives/state-machine/src/backend.rs @@ -16,17 +16,19 @@ //! State machine backends. These manage the code and storage of contracts. -use log::warn; -use hash_db::Hasher; use codec::{Decode, Encode}; +use hash_db::Hasher; +use log::warn; -use sp_core::{traits::RuntimeCode, storage::{ChildInfo, OwnedChildInfo, well_known_keys}}; -use sp_trie::{TrieMut, MemoryDB, trie_types::TrieDBMut}; +use sp_core::{ + storage::{well_known_keys, ChildInfo, OwnedChildInfo}, + traits::RuntimeCode, +}; +use sp_trie::{trie_types::TrieDBMut, MemoryDB, TrieMut}; use crate::{ - trie_backend::TrieBackend, - trie_backend_essence::TrieBackendStorage, - UsageInfo, StorageKey, StorageValue, StorageCollection, + trie_backend::TrieBackend, trie_backend_essence::TrieBackendStorage, StorageCollection, + StorageKey, StorageValue, UsageInfo, }; /// A state backend is used to read state data and can have changes committed @@ -34,372 +36,387 @@ use crate::{ /// /// The clone operation (if implemented) should be cheap. pub trait Backend: std::fmt::Debug { - /// An error type when fetching data is not possible. - type Error: super::Error; - - /// Storage changes to be applied if committing - type Transaction: Consolidate + Default + Send; - - /// Type of trie backend storage. - type TrieBackendStorage: TrieBackendStorage; - - /// Get keyed storage or None if there is nothing associated. - fn storage(&self, key: &[u8]) -> Result, Self::Error>; - - /// Get keyed storage value hash or None if there is nothing associated. - fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { - self.storage(key).map(|v| v.map(|v| H::hash(&v))) - } - - /// Get keyed child storage or None if there is nothing associated. - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, Self::Error>; - - /// Get child keyed storage value hash or None if there is nothing associated. - fn child_storage_hash( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.child_storage(storage_key, child_info, key).map(|v| v.map(|v| H::hash(&v))) - } - - /// true if a key exists in storage. - fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.storage(key)?.is_some()) - } - - /// true if a key exists in child storage. - fn exists_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result { - Ok(self.child_storage(storage_key, child_info, key)?.is_some()) - } - - /// Return the next key in storage in lexicographic order or `None` if there is no value. - fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error>; - - /// Return the next key in child storage in lexicographic order or `None` if there is no value. - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8] - ) -> Result, Self::Error>; - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ); - - /// Retrieve all entries keys which start with the given prefix and - /// call `f` for each of those keys. - fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.for_key_values_with_prefix(prefix, |k, _v| f(k)) - } - - /// Retrieve all entries keys and values of which start with the given prefix and - /// call `f` for each of those keys. - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F); - - - /// Retrieve all child entries keys which start with the given prefix and - /// call `f` for each of those keys. - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ); - - /// Calculate the storage root, with given delta over what is already stored in - /// the backend, and produce a "transaction" that can be used to commit. - /// Does not include child storage updates. - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator)>, - H::Out: Ord; - - /// Calculate the child storage root, with given delta over what is already stored in - /// the backend, and produce a "transaction" that can be used to commit. The second argument - /// is true if child storage root equals default storage root. - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (H::Out, bool, Self::Transaction) - where - I: IntoIterator)>, - H::Out: Ord; - - /// Get all key/value pairs into a Vec. - fn pairs(&self) -> Vec<(StorageKey, StorageValue)>; - - /// Get all keys with given prefix - fn keys(&self, prefix: &[u8]) -> Vec { - let mut all = Vec::new(); - self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec())); - all - } - - /// Get all keys of child storage with given prefix - fn child_keys( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - ) -> Vec { - let mut all = Vec::new(); - self.for_child_keys_with_prefix(storage_key, child_info, prefix, |k| all.push(k.to_vec())); - all - } - - /// Try convert into trie backend. - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { - None - } - - /// Calculate the storage root, with given delta over what is already stored - /// in the backend, and produce a "transaction" that can be used to commit. - /// Does include child storage updates. - fn full_storage_root( - &self, - delta: I1, - child_deltas: I2) - -> (H::Out, Self::Transaction) - where - I1: IntoIterator)>, - I2i: IntoIterator)>, - I2: IntoIterator, - H::Out: Ord + Encode, - { - let mut txs: Self::Transaction = Default::default(); - let mut child_roots: Vec<_> = Default::default(); - // child first - for (storage_key, child_delta, child_info) in child_deltas { - let (child_root, empty, child_txs) = - self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); - txs.consolidate(child_txs); - if empty { - child_roots.push((storage_key, None)); - } else { - child_roots.push((storage_key, Some(child_root.encode()))); - } - } - let (root, parent_txs) = self.storage_root( - delta.into_iter().chain(child_roots.into_iter()) - ); - txs.consolidate(parent_txs); - (root, txs) - } - - /// Register stats from overlay of state machine. - /// - /// By default nothing is registered. - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats); - - /// Query backend usage statistics (i/o, memory) - /// - /// Not all implementations are expected to be able to do this. In the - /// case when they don't, empty statistics is returned. - fn usage_info(&self) -> UsageInfo; - - /// Wipe the state database. - fn wipe(&self) -> Result<(), Self::Error> { - unimplemented!() - } - - /// Commit given transaction to storage. - fn commit(&self, _storage_root: H::Out, _transaction: Self::Transaction) -> Result<(), Self::Error> { - unimplemented!() - } + /// An error type when fetching data is not possible. + type Error: super::Error; + + /// Storage changes to be applied if committing + type Transaction: Consolidate + Default + Send; + + /// Type of trie backend storage. + type TrieBackendStorage: TrieBackendStorage; + + /// Get keyed storage or None if there is nothing associated. + fn storage(&self, key: &[u8]) -> Result, Self::Error>; + + /// Get keyed storage value hash or None if there is nothing associated. + fn storage_hash(&self, key: &[u8]) -> Result, Self::Error> { + self.storage(key).map(|v| v.map(|v| H::hash(&v))) + } + + /// Get keyed child storage or None if there is nothing associated. + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error>; + + /// Get child keyed storage value hash or None if there is nothing associated. + fn child_storage_hash( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.child_storage(storage_key, child_info, key) + .map(|v| v.map(|v| H::hash(&v))) + } + + /// true if a key exists in storage. + fn exists_storage(&self, key: &[u8]) -> Result { + Ok(self.storage(key)?.is_some()) + } + + /// true if a key exists in child storage. + fn exists_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result { + Ok(self.child_storage(storage_key, child_info, key)?.is_some()) + } + + /// Return the next key in storage in lexicographic order or `None` if there is no value. + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error>; + + /// Return the next key in child storage in lexicographic order or `None` if there is no value. + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error>; + + /// Retrieve all entries keys of child storage and call `f` for each of those keys. + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ); + + /// Retrieve all entries keys which start with the given prefix and + /// call `f` for each of those keys. + fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { + self.for_key_values_with_prefix(prefix, |k, _v| f(k)) + } + + /// Retrieve all entries keys and values of which start with the given prefix and + /// call `f` for each of those keys. + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F); + + /// Retrieve all child entries keys which start with the given prefix and + /// call `f` for each of those keys. + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ); + + /// Calculate the storage root, with given delta over what is already stored in + /// the backend, and produce a "transaction" that can be used to commit. + /// Does not include child storage updates. + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator)>, + H::Out: Ord; + + /// Calculate the child storage root, with given delta over what is already stored in + /// the backend, and produce a "transaction" that can be used to commit. The second argument + /// is true if child storage root equals default storage root. + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) + where + I: IntoIterator)>, + H::Out: Ord; + + /// Get all key/value pairs into a Vec. + fn pairs(&self) -> Vec<(StorageKey, StorageValue)>; + + /// Get all keys with given prefix + fn keys(&self, prefix: &[u8]) -> Vec { + let mut all = Vec::new(); + self.for_keys_with_prefix(prefix, |k| all.push(k.to_vec())); + all + } + + /// Get all keys of child storage with given prefix + fn child_keys( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) -> Vec { + let mut all = Vec::new(); + self.for_child_keys_with_prefix(storage_key, child_info, prefix, |k| all.push(k.to_vec())); + all + } + + /// Try convert into trie backend. + fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + None + } + + /// Calculate the storage root, with given delta over what is already stored + /// in the backend, and produce a "transaction" that can be used to commit. + /// Does include child storage updates. + fn full_storage_root( + &self, + delta: I1, + child_deltas: I2, + ) -> (H::Out, Self::Transaction) + where + I1: IntoIterator)>, + I2i: IntoIterator)>, + I2: IntoIterator, + H::Out: Ord + Encode, + { + let mut txs: Self::Transaction = Default::default(); + let mut child_roots: Vec<_> = Default::default(); + // child first + for (storage_key, child_delta, child_info) in child_deltas { + let (child_root, empty, child_txs) = + self.child_storage_root(&storage_key[..], child_info.as_ref(), child_delta); + txs.consolidate(child_txs); + if empty { + child_roots.push((storage_key, None)); + } else { + child_roots.push((storage_key, Some(child_root.encode()))); + } + } + let (root, parent_txs) = + self.storage_root(delta.into_iter().chain(child_roots.into_iter())); + txs.consolidate(parent_txs); + (root, txs) + } + + /// Register stats from overlay of state machine. + /// + /// By default nothing is registered. + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats); + + /// Query backend usage statistics (i/o, memory) + /// + /// Not all implementations are expected to be able to do this. In the + /// case when they don't, empty statistics is returned. + fn usage_info(&self) -> UsageInfo; + + /// Wipe the state database. + fn wipe(&self) -> Result<(), Self::Error> { + unimplemented!() + } + + /// Commit given transaction to storage. + fn commit( + &self, + _storage_root: H::Out, + _transaction: Self::Transaction, + ) -> Result<(), Self::Error> { + unimplemented!() + } } impl<'a, T: Backend, H: Hasher> Backend for &'a T { - type Error = T::Error; - type Transaction = T::Transaction; - type TrieBackendStorage = T::TrieBackendStorage; - - fn storage(&self, key: &[u8]) -> Result, Self::Error> { - (*self).storage(key) - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - (*self).child_storage(storage_key, child_info, key) - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ) { - (*self).for_keys_in_child_storage(storage_key, child_info, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { - (*self).next_storage_key(key) - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - (*self).next_child_storage_key(storage_key, child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - (*self).for_keys_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ) { - (*self).for_child_keys_with_prefix(storage_key, child_info, prefix, f) - } - - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator)>, - H::Out: Ord, - { - (*self).storage_root(delta) - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (H::Out, bool, Self::Transaction) - where - I: IntoIterator)>, - H::Out: Ord, - { - (*self).child_storage_root(storage_key, child_info, delta) - } - - fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - (*self).pairs() - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - (*self).for_key_values_with_prefix(prefix, f); - } - - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } - - fn usage_info(&self) -> UsageInfo { - (*self).usage_info() - } + type Error = T::Error; + type Transaction = T::Transaction; + type TrieBackendStorage = T::TrieBackendStorage; + + fn storage(&self, key: &[u8]) -> Result, Self::Error> { + (*self).storage(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + (*self).child_storage(storage_key, child_info, key) + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + (*self).for_keys_in_child_storage(storage_key, child_info, f) + } + + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { + (*self).next_storage_key(key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + (*self).next_child_storage_key(storage_key, child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + (*self).for_keys_with_prefix(prefix, f) + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + (*self).for_child_keys_with_prefix(storage_key, child_info, prefix, f) + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator)>, + H::Out: Ord, + { + (*self).storage_root(delta) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) + where + I: IntoIterator)>, + H::Out: Ord, + { + (*self).child_storage_root(storage_key, child_info, delta) + } + + fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { + (*self).pairs() + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + (*self).for_key_values_with_prefix(prefix, f); + } + + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) {} + + fn usage_info(&self) -> UsageInfo { + (*self).usage_info() + } } /// Trait that allows consolidate two transactions together. pub trait Consolidate { - /// Consolidate two transactions into one. - fn consolidate(&mut self, other: Self); + /// Consolidate two transactions into one. + fn consolidate(&mut self, other: Self); } impl Consolidate for () { - fn consolidate(&mut self, _: Self) { - () - } + fn consolidate(&mut self, _: Self) { + () + } } -impl Consolidate for Vec<( - Option<(StorageKey, OwnedChildInfo)>, - StorageCollection, - )> { - fn consolidate(&mut self, mut other: Self) { - self.append(&mut other); - } +impl Consolidate for Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)> { + fn consolidate(&mut self, mut other: Self) { + self.append(&mut other); + } } impl> Consolidate for sp_trie::GenericMemoryDB { - fn consolidate(&mut self, other: Self) { - sp_trie::GenericMemoryDB::consolidate(self, other) - } + fn consolidate(&mut self, other: Self) { + sp_trie::GenericMemoryDB::consolidate(self, other) + } } /// Insert input pairs into memory db. pub(crate) fn insert_into_memory_db(mdb: &mut MemoryDB, input: I) -> Option - where - H: Hasher, - I: IntoIterator, +where + H: Hasher, + I: IntoIterator, { - let mut root = ::Out::default(); - { - let mut trie = TrieDBMut::::new(mdb, &mut root); - for (key, value) in input { - if let Err(e) = trie.insert(&key, &value) { - warn!(target: "trie", "Failed to write to trie: {}", e); - return None; - } - } - } - - Some(root) + let mut root = ::Out::default(); + { + let mut trie = TrieDBMut::::new(mdb, &mut root); + for (key, value) in input { + if let Err(e) = trie.insert(&key, &value) { + warn!(target: "trie", "Failed to write to trie: {}", e); + return None; + } + } + } + + Some(root) } /// Wrapper to create a [`RuntimeCode`] from a type that implements [`Backend`]. pub struct BackendRuntimeCode<'a, B, H> { - backend: &'a B, - _marker: std::marker::PhantomData, + backend: &'a B, + _marker: std::marker::PhantomData, } -impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode for - BackendRuntimeCode<'a, B, H> +impl<'a, B: Backend, H: Hasher> sp_core::traits::FetchRuntimeCode + for BackendRuntimeCode<'a, B, H> { - fn fetch_runtime_code<'b>(&'b self) -> Option> { - self.backend.storage(well_known_keys::CODE).ok().flatten().map(Into::into) - } + fn fetch_runtime_code<'b>(&'b self) -> Option> { + self.backend + .storage(well_known_keys::CODE) + .ok() + .flatten() + .map(Into::into) + } } -impl<'a, B: Backend, H: Hasher> BackendRuntimeCode<'a, B, H> where H::Out: Encode { - /// Create a new instance. - pub fn new(backend: &'a B) -> Self { - Self { - backend, - _marker: std::marker::PhantomData, - } - } - - /// Return the [`RuntimeCode`] build from the wrapped `backend`. - pub fn runtime_code(&self) -> Result { - let hash = self.backend.storage_hash(well_known_keys::CODE) - .ok() - .flatten() - .ok_or("`:code` hash not found")? - .encode(); - let heap_pages = self.backend.storage(well_known_keys::HEAP_PAGES) - .ok() - .flatten() - .and_then(|d| Decode::decode(&mut &d[..]).ok()); - - Ok(RuntimeCode { code_fetcher: self, hash, heap_pages }) - } +impl<'a, B: Backend, H: Hasher> BackendRuntimeCode<'a, B, H> +where + H::Out: Encode, +{ + /// Create a new instance. + pub fn new(backend: &'a B) -> Self { + Self { + backend, + _marker: std::marker::PhantomData, + } + } + + /// Return the [`RuntimeCode`] build from the wrapped `backend`. + pub fn runtime_code(&self) -> Result { + let hash = self + .backend + .storage_hash(well_known_keys::CODE) + .ok() + .flatten() + .ok_or("`:code` hash not found")? + .encode(); + let heap_pages = self + .backend + .storage(well_known_keys::HEAP_PAGES) + .ok() + .flatten() + .and_then(|d| Decode::decode(&mut &d[..]).ok()); + + Ok(RuntimeCode { + code_fetcher: self, + hash, + heap_pages, + }) + } } diff --git a/primitives/state-machine/src/basic.rs b/primitives/state-machine/src/basic.rs index b49913418a..47ac65fa76 100644 --- a/primitives/state-machine/src/basic.rs +++ b/primitives/state-machine/src/basic.rs @@ -16,382 +16,429 @@ //! Basic implementation for Externalities. -use std::{ - collections::BTreeMap, any::{TypeId, Any}, iter::FromIterator, ops::Bound -}; use crate::{Backend, InMemoryBackend, StorageKey, StorageValue}; +use codec::Encode; use hash_db::Hasher; -use sp_trie::{TrieConfiguration, default_child_trie_root}; -use sp_trie::trie_types::Layout; +use log::warn; use sp_core::{ - storage::{ - well_known_keys::is_child_storage_key, ChildStorageKey, Storage, - ChildInfo, StorageChild, - }, - traits::Externalities, Blake2Hasher, + storage::{ + well_known_keys::is_child_storage_key, ChildInfo, ChildStorageKey, Storage, StorageChild, + }, + traits::Externalities, + Blake2Hasher, }; -use log::warn; -use codec::Encode; use sp_externalities::Extensions; +use sp_trie::trie_types::Layout; +use sp_trie::{default_child_trie_root, TrieConfiguration}; +use std::{ + any::{Any, TypeId}, + collections::BTreeMap, + iter::FromIterator, + ops::Bound, +}; /// Simple Map-based Externalities impl. #[derive(Debug)] pub struct BasicExternalities { - inner: Storage, - extensions: Extensions, + inner: Storage, + extensions: Extensions, } impl BasicExternalities { - /// Create a new instance of `BasicExternalities` - pub fn new(inner: Storage) -> Self { - BasicExternalities { inner, extensions: Default::default() } - } - - /// New basic externalities with empty storage. - pub fn new_empty() -> Self { - Self::new(Storage::default()) - } - - /// New basic extternalities with tasks executor. - pub fn with_tasks_executor() -> Self { - let mut extensions = Extensions::default(); - extensions.register(sp_core::traits::TaskExecutorExt(sp_core::tasks::executor())); - - Self { - inner: Storage::default(), - extensions, - } - } - - /// Insert key/value - pub fn insert(&mut self, k: StorageKey, v: StorageValue) -> Option { - self.inner.top.insert(k, v) - } - - /// Consume self and returns inner storages - pub fn into_storages(self) -> Storage { - self.inner - } - - /// Execute the given closure `f` with the externalities set and initialized with `storage`. - /// - /// Returns the result of the closure and updates `storage` with all changes. - pub fn execute_with_storage( - storage: &mut sp_core::storage::Storage, - f: impl FnOnce() -> R, - ) -> R { - let mut ext = Self { - inner: Storage { - top: std::mem::replace(&mut storage.top, Default::default()), - children: std::mem::replace(&mut storage.children, Default::default()), - }, - extensions: Default::default(), - }; - - let r = ext.execute_with(f); - - *storage = ext.into_storages(); - - r - } - - /// Execute the given closure while `self` is set as externalities. - /// - /// Returns the result of the given closure. - pub fn execute_with(&mut self, f: impl FnOnce() -> R) -> R { - sp_externalities::set_and_run_with_externalities(self, f) - } - - /// List of active extensions. - pub fn extensions(&mut self) -> &mut Extensions { - &mut self.extensions - } + /// Create a new instance of `BasicExternalities` + pub fn new(inner: Storage) -> Self { + BasicExternalities { + inner, + extensions: Default::default(), + } + } + + /// New basic externalities with empty storage. + pub fn new_empty() -> Self { + Self::new(Storage::default()) + } + + /// New basic extternalities with tasks executor. + pub fn with_tasks_executor() -> Self { + let mut extensions = Extensions::default(); + extensions.register(sp_core::traits::TaskExecutorExt(sp_core::tasks::executor())); + + Self { + inner: Storage::default(), + extensions, + } + } + + /// Insert key/value + pub fn insert(&mut self, k: StorageKey, v: StorageValue) -> Option { + self.inner.top.insert(k, v) + } + + /// Consume self and returns inner storages + pub fn into_storages(self) -> Storage { + self.inner + } + + /// Execute the given closure `f` with the externalities set and initialized with `storage`. + /// + /// Returns the result of the closure and updates `storage` with all changes. + pub fn execute_with_storage( + storage: &mut sp_core::storage::Storage, + f: impl FnOnce() -> R, + ) -> R { + let mut ext = Self { + inner: Storage { + top: std::mem::replace(&mut storage.top, Default::default()), + children: std::mem::replace(&mut storage.children, Default::default()), + }, + extensions: Default::default(), + }; + + let r = ext.execute_with(f); + + *storage = ext.into_storages(); + + r + } + + /// Execute the given closure while `self` is set as externalities. + /// + /// Returns the result of the given closure. + pub fn execute_with(&mut self, f: impl FnOnce() -> R) -> R { + sp_externalities::set_and_run_with_externalities(self, f) + } + + /// List of active extensions. + pub fn extensions(&mut self) -> &mut Extensions { + &mut self.extensions + } } impl PartialEq for BasicExternalities { - fn eq(&self, other: &BasicExternalities) -> bool { - self.inner.top.eq(&other.inner.top) - && self.inner.children.eq(&other.inner.children) - } + fn eq(&self, other: &BasicExternalities) -> bool { + self.inner.top.eq(&other.inner.top) && self.inner.children.eq(&other.inner.children) + } } impl FromIterator<(StorageKey, StorageValue)> for BasicExternalities { - fn from_iter>(iter: I) -> Self { - let mut t = Self::default(); - t.inner.top.extend(iter); - t - } + fn from_iter>(iter: I) -> Self { + let mut t = Self::default(); + t.inner.top.extend(iter); + t + } } impl Default for BasicExternalities { - fn default() -> Self { Self::new(Default::default()) } + fn default() -> Self { + Self::new(Default::default()) + } } impl From> for BasicExternalities { - fn from(hashmap: BTreeMap) -> Self { - BasicExternalities { - inner: Storage { - top: hashmap, - children: Default::default(), - }, - extensions: Default::default(), - } - } + fn from(hashmap: BTreeMap) -> Self { + BasicExternalities { + inner: Storage { + top: hashmap, + children: Default::default(), + }, + extensions: Default::default(), + } + } } impl Externalities for BasicExternalities { - fn storage(&self, key: &[u8]) -> Option { - self.inner.top.get(key).cloned() - } - - fn storage_hash(&self, key: &[u8]) -> Option> { - self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) - } - - fn child_storage( - &self, - storage_key: ChildStorageKey, - _child_info: ChildInfo, - key: &[u8], - ) -> Option { - self.inner.children.get(storage_key.as_ref()).and_then(|child| child.data.get(key)).cloned() - } - - fn child_storage_hash( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> Option> { - self.child_storage(storage_key, child_info, key).map(|v| Blake2Hasher::hash(&v).encode()) - } - - fn next_storage_key(&self, key: &[u8]) -> Option { - let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.top.range::<[u8], _>(range).next().map(|(k, _)| k).cloned() - } - - fn next_child_storage_key( - &self, - storage_key: ChildStorageKey, - _child_info: ChildInfo, - key: &[u8], - ) -> Option { - let range = (Bound::Excluded(key), Bound::Unbounded); - self.inner.children.get(storage_key.as_ref()) - .and_then(|child| child.data.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()) - } - - fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { - if is_child_storage_key(&key) { - warn!(target: "trie", "Refuse to set child storage key via main storage"); - return; - } - - match maybe_value { - Some(value) => { self.inner.top.insert(key, value); } - None => { self.inner.top.remove(&key); } - } - } - - fn place_child_storage( - &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: StorageKey, - value: Option, - ) { - let child_map = self.inner.children.entry(storage_key.into_owned()) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: child_info.to_owned(), - }); - if let Some(value) = value { - child_map.data.insert(key, value); - } else { - child_map.data.remove(&key); - } - } - - fn kill_child_storage( - &mut self, - storage_key: ChildStorageKey, - _child_info: ChildInfo, - ) { - self.inner.children.remove(storage_key.as_ref()); - } - - fn clear_prefix(&mut self, prefix: &[u8]) { - if is_child_storage_key(prefix) { - warn!( - target: "trie", - "Refuse to clear prefix that is part of child storage key via main storage" - ); - return; - } - - let to_remove = self.inner.top.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) - .map(|(k, _)| k) - .take_while(|k| k.starts_with(prefix)) - .cloned() - .collect::>(); - - for key in to_remove { - self.inner.top.remove(&key); - } - } - - fn clear_child_prefix( - &mut self, - storage_key: ChildStorageKey, - _child_info: ChildInfo, - prefix: &[u8], - ) { - if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) { - let to_remove = child.data.range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) - .map(|(k, _)| k) - .take_while(|k| k.starts_with(prefix)) - .cloned() - .collect::>(); - - for key in to_remove { - child.data.remove(&key); - } - } - } - - fn chain_id(&self) -> u64 { 42 } - - fn storage_root(&mut self) -> Vec { - let mut top = self.inner.top.clone(); - let keys: Vec<_> = self.inner.children.keys().map(|k| k.to_vec()).collect(); - // Single child trie implementation currently allows using the same child - // empty root for all child trie. Using null storage key until multiple - // type of child trie support. - let empty_hash = default_child_trie_root::>(&[]); - for storage_key in keys { - let child_root = self.child_storage_root( - ChildStorageKey::from_slice(storage_key.as_slice()) - .expect("Map only feed by valid keys; qed"), - ); - if &empty_hash[..] == &child_root[..] { - top.remove(storage_key.as_slice()); - } else { - top.insert(storage_key, child_root); - } - } - - Layout::::trie_root(self.inner.top.clone()).as_ref().into() - } - - fn child_storage_root( - &mut self, - storage_key: ChildStorageKey, - ) -> Vec { - if let Some(child) = self.inner.children.get(storage_key.as_ref()) { - let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); - - InMemoryBackend::::default() - .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta).0 - } else { - default_child_trie_root::>(storage_key.as_ref()) - }.encode() - } - - fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { - Ok(None) - } - - fn wipe(&mut self) {} - - fn commit(&mut self) {} + fn storage(&self, key: &[u8]) -> Option { + self.inner.top.get(key).cloned() + } + + fn storage_hash(&self, key: &[u8]) -> Option> { + self.storage(key).map(|v| Blake2Hasher::hash(&v).encode()) + } + + fn child_storage( + &self, + storage_key: ChildStorageKey, + _child_info: ChildInfo, + key: &[u8], + ) -> Option { + self.inner + .children + .get(storage_key.as_ref()) + .and_then(|child| child.data.get(key)) + .cloned() + } + + fn child_storage_hash( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option> { + self.child_storage(storage_key, child_info, key) + .map(|v| Blake2Hasher::hash(&v).encode()) + } + + fn next_storage_key(&self, key: &[u8]) -> Option { + let range = (Bound::Excluded(key), Bound::Unbounded); + self.inner + .top + .range::<[u8], _>(range) + .next() + .map(|(k, _)| k) + .cloned() + } + + fn next_child_storage_key( + &self, + storage_key: ChildStorageKey, + _child_info: ChildInfo, + key: &[u8], + ) -> Option { + let range = (Bound::Excluded(key), Bound::Unbounded); + self.inner + .children + .get(storage_key.as_ref()) + .and_then(|child| { + child + .data + .range::<[u8], _>(range) + .next() + .map(|(k, _)| k) + .cloned() + }) + } + + fn place_storage(&mut self, key: StorageKey, maybe_value: Option) { + if is_child_storage_key(&key) { + warn!(target: "trie", "Refuse to set child storage key via main storage"); + return; + } + + match maybe_value { + Some(value) => { + self.inner.top.insert(key, value); + } + None => { + self.inner.top.remove(&key); + } + } + } + + fn place_child_storage( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: StorageKey, + value: Option, + ) { + let child_map = self + .inner + .children + .entry(storage_key.into_owned()) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: child_info.to_owned(), + }); + if let Some(value) = value { + child_map.data.insert(key, value); + } else { + child_map.data.remove(&key); + } + } + + fn kill_child_storage(&mut self, storage_key: ChildStorageKey, _child_info: ChildInfo) { + self.inner.children.remove(storage_key.as_ref()); + } + + fn clear_prefix(&mut self, prefix: &[u8]) { + if is_child_storage_key(prefix) { + warn!( + target: "trie", + "Refuse to clear prefix that is part of child storage key via main storage" + ); + return; + } + + let to_remove = self + .inner + .top + .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + .map(|(k, _)| k) + .take_while(|k| k.starts_with(prefix)) + .cloned() + .collect::>(); + + for key in to_remove { + self.inner.top.remove(&key); + } + } + + fn clear_child_prefix( + &mut self, + storage_key: ChildStorageKey, + _child_info: ChildInfo, + prefix: &[u8], + ) { + if let Some(child) = self.inner.children.get_mut(storage_key.as_ref()) { + let to_remove = child + .data + .range::<[u8], _>((Bound::Included(prefix), Bound::Unbounded)) + .map(|(k, _)| k) + .take_while(|k| k.starts_with(prefix)) + .cloned() + .collect::>(); + + for key in to_remove { + child.data.remove(&key); + } + } + } + + fn chain_id(&self) -> u64 { + 42 + } + + fn storage_root(&mut self) -> Vec { + let mut top = self.inner.top.clone(); + let keys: Vec<_> = self.inner.children.keys().map(|k| k.to_vec()).collect(); + // Single child trie implementation currently allows using the same child + // empty root for all child trie. Using null storage key until multiple + // type of child trie support. + let empty_hash = default_child_trie_root::>(&[]); + for storage_key in keys { + let child_root = self.child_storage_root( + ChildStorageKey::from_slice(storage_key.as_slice()) + .expect("Map only feed by valid keys; qed"), + ); + if &empty_hash[..] == &child_root[..] { + top.remove(storage_key.as_slice()); + } else { + top.insert(storage_key, child_root); + } + } + + Layout::::trie_root(self.inner.top.clone()) + .as_ref() + .into() + } + + fn child_storage_root(&mut self, storage_key: ChildStorageKey) -> Vec { + if let Some(child) = self.inner.children.get(storage_key.as_ref()) { + let delta = child.data.clone().into_iter().map(|(k, v)| (k, Some(v))); + + InMemoryBackend::::default() + .child_storage_root(storage_key.as_ref(), child.child_info.as_ref(), delta) + .0 + } else { + default_child_trie_root::>(storage_key.as_ref()) + } + .encode() + } + + fn storage_changes_root(&mut self, _parent: &[u8]) -> Result>, ()> { + Ok(None) + } + + fn wipe(&mut self) {} + + fn commit(&mut self) {} } impl sp_externalities::ExtensionStore for BasicExternalities { - fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { - self.extensions.get_mut(type_id) - } - - fn register_extension_with_type_id( - &mut self, - type_id: TypeId, - extension: Box, - ) -> Result<(), sp_externalities::Error> { - self.extensions.register_with_type_id(type_id, extension) - } - - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { - self.extensions - .deregister(type_id) - .ok_or(sp_externalities::Error::ExtensionIsNotRegistered(type_id)) - .map(drop) - } + fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { + self.extensions.get_mut(type_id) + } + + fn register_extension_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), sp_externalities::Error> { + self.extensions.register_with_type_id(type_id, extension) + } + + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { + self.extensions + .deregister(type_id) + .ok_or(sp_externalities::Error::ExtensionIsNotRegistered(type_id)) + .map(drop) + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::map; - use sp_core::storage::{Storage, StorageChild}; - use sp_core::storage::well_known_keys::CODE; - use hex_literal::hex; - - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - - #[test] - fn commit_should_work() { - let mut ext = BasicExternalities::default(); - ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); - ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); - ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); - - assert_eq!(&ext.storage_root()[..], &ROOT); - } - - #[test] - fn set_and_retrieve_code() { - let mut ext = BasicExternalities::default(); - - let code = vec![1, 2, 3]; - ext.set_storage(CODE.to_vec(), code.clone()); - - assert_eq!(&ext.storage(CODE).unwrap(), &code); - } - - #[test] - fn children_works() { - let child_storage = b":child_storage:default:test".to_vec(); - - let mut ext = BasicExternalities::new(Storage { - top: Default::default(), - children: map![ - child_storage.clone() => StorageChild { - data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], - child_info: CHILD_INFO_1.to_owned(), - } - ] - }); - - let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); - - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), Some(b"reindeer".to_vec())); - - ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), Some(b"puppy".to_vec())); - - ext.clear_child_storage(child(), CHILD_INFO_1, b"dog"); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None); - - ext.kill_child_storage(child(), CHILD_INFO_1); - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None); - } - - #[test] - fn basic_externalities_is_empty() { - // Make sure no values are set by default in `BasicExternalities`. - let storage = BasicExternalities::new_empty().into_storages(); - assert!(storage.top.is_empty()); - assert!(storage.children.is_empty()); - } + use super::*; + use hex_literal::hex; + use sp_core::map; + use sp_core::storage::well_known_keys::CODE; + use sp_core::storage::{Storage, StorageChild}; + + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + + #[test] + fn commit_should_work() { + let mut ext = BasicExternalities::default(); + ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); + ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); + ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); + const ROOT: [u8; 32] = + hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); + + assert_eq!(&ext.storage_root()[..], &ROOT); + } + + #[test] + fn set_and_retrieve_code() { + let mut ext = BasicExternalities::default(); + + let code = vec![1, 2, 3]; + ext.set_storage(CODE.to_vec(), code.clone()); + + assert_eq!(&ext.storage(CODE).unwrap(), &code); + } + + #[test] + fn children_works() { + let child_storage = b":child_storage:default:test".to_vec(); + + let mut ext = BasicExternalities::new(Storage { + top: Default::default(), + children: map![ + child_storage.clone() => StorageChild { + data: map![ b"doe".to_vec() => b"reindeer".to_vec() ], + child_info: CHILD_INFO_1.to_owned(), + } + ], + }); + + let child = || ChildStorageKey::from_vec(child_storage.clone()).unwrap(); + + assert_eq!( + ext.child_storage(child(), CHILD_INFO_1, b"doe"), + Some(b"reindeer".to_vec()) + ); + + ext.set_child_storage(child(), CHILD_INFO_1, b"dog".to_vec(), b"puppy".to_vec()); + assert_eq!( + ext.child_storage(child(), CHILD_INFO_1, b"dog"), + Some(b"puppy".to_vec()) + ); + + ext.clear_child_storage(child(), CHILD_INFO_1, b"dog"); + assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"dog"), None); + + ext.kill_child_storage(child(), CHILD_INFO_1); + assert_eq!(ext.child_storage(child(), CHILD_INFO_1, b"doe"), None); + } + + #[test] + fn basic_externalities_is_empty() { + // Make sure no values are set by default in `BasicExternalities`. + let storage = BasicExternalities::new_empty().into_storages(); + assert!(storage.top.is_empty()); + assert!(storage.children.is_empty()); + } } diff --git a/primitives/state-machine/src/changes_trie/build.rs b/primitives/state-machine/src/changes_trie/build.rs index 39ad81ed59..4160ec5d9e 100644 --- a/primitives/state-machine/src/changes_trie/build.rs +++ b/primitives/state-machine/src/changes_trie/build.rs @@ -16,761 +16,1544 @@ //! Structures and functions required to build changes trie for given block. -use std::collections::{BTreeMap, BTreeSet}; -use std::collections::btree_map::Entry; +use crate::{ + backend::Backend, + changes_trie::{ + build_iterator::digest_build_iterator, + input::{ChildIndex, DigestIndex, ExtrinsicIndex, InputKey, InputPair}, + AnchorBlockId, BlockNumber, ConfigurationRange, Storage, + }, + overlayed_changes::OverlayedChanges, + trie_backend_essence::TrieBackendEssence, + StorageKey, +}; use codec::{Decode, Encode}; use hash_db::Hasher; use num_traits::One; -use crate::{ - StorageKey, - backend::Backend, - overlayed_changes::OverlayedChanges, - trie_backend_essence::TrieBackendEssence, - changes_trie::{ - AnchorBlockId, ConfigurationRange, Storage, BlockNumber, - build_iterator::digest_build_iterator, - input::{InputKey, InputPair, DigestIndex, ExtrinsicIndex, ChildIndex}, - }, -}; +use std::collections::btree_map::Entry; +use std::collections::{BTreeMap, BTreeSet}; /// Prepare input pairs for building a changes trie of given block. /// /// Returns Err if storage error has occurred OR if storage haven't returned /// required data. pub(crate) fn prepare_input<'a, B, H, Number>( - backend: &'a B, - storage: &'a dyn Storage, - config: ConfigurationRange<'a, Number>, - changes: &'a OverlayedChanges, - parent: &'a AnchorBlockId, -) -> Result<( - impl Iterator> + 'a, - Vec<(ChildIndex, impl Iterator> + 'a)>, - Vec, - ), String> - where - B: Backend, - H: Hasher + 'a, - H::Out: Encode, - Number: BlockNumber, + backend: &'a B, + storage: &'a dyn Storage, + config: ConfigurationRange<'a, Number>, + changes: &'a OverlayedChanges, + parent: &'a AnchorBlockId, +) -> Result< + ( + impl Iterator> + 'a, + Vec<( + ChildIndex, + impl Iterator> + 'a, + )>, + Vec, + ), + String, +> +where + B: Backend, + H: Hasher + 'a, + H::Out: Encode, + Number: BlockNumber, { - let number = parent.number.clone() + One::one(); - let (extrinsics_input, children_extrinsics_input) = prepare_extrinsics_input( - backend, - &number, - changes, - )?; - let (digest_input, mut children_digest_input, digest_input_blocks) = prepare_digest_input::( - parent, - config, - number, - storage, - )?; - - let mut children_digest = Vec::with_capacity(children_extrinsics_input.len()); - for (child_index, ext_iter) in children_extrinsics_input.into_iter() { - let dig_iter = children_digest_input.remove(&child_index); - children_digest.push(( - child_index, - Some(ext_iter).into_iter().flatten() - .chain(dig_iter.into_iter().flatten()), - )); - } - for (child_index, dig_iter) in children_digest_input.into_iter() { - children_digest.push(( - child_index, - None.into_iter().flatten() - .chain(Some(dig_iter).into_iter().flatten()), - )); - } - - Ok(( - extrinsics_input.chain(digest_input), - children_digest, - digest_input_blocks, - )) + let number = parent.number.clone() + One::one(); + let (extrinsics_input, children_extrinsics_input) = + prepare_extrinsics_input(backend, &number, changes)?; + let (digest_input, mut children_digest_input, digest_input_blocks) = + prepare_digest_input::(parent, config, number, storage)?; + + let mut children_digest = Vec::with_capacity(children_extrinsics_input.len()); + for (child_index, ext_iter) in children_extrinsics_input.into_iter() { + let dig_iter = children_digest_input.remove(&child_index); + children_digest.push(( + child_index, + Some(ext_iter) + .into_iter() + .flatten() + .chain(dig_iter.into_iter().flatten()), + )); + } + for (child_index, dig_iter) in children_digest_input.into_iter() { + children_digest.push(( + child_index, + None.into_iter() + .flatten() + .chain(Some(dig_iter).into_iter().flatten()), + )); + } + + Ok(( + extrinsics_input.chain(digest_input), + children_digest, + digest_input_blocks, + )) } /// Prepare ExtrinsicIndex input pairs. fn prepare_extrinsics_input<'a, B, H, Number>( - backend: &'a B, - block: &Number, - changes: &'a OverlayedChanges, -) -> Result<( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - ), String> - where - B: Backend, - H: Hasher + 'a, - Number: BlockNumber, + backend: &'a B, + block: &Number, + changes: &'a OverlayedChanges, +) -> Result< + ( + impl Iterator> + 'a, + BTreeMap, impl Iterator> + 'a>, + ), + String, +> +where + B: Backend, + H: Hasher + 'a, + Number: BlockNumber, { - - let mut children_keys = BTreeSet::::new(); - let mut children_result = BTreeMap::new(); - for (storage_key, _) in changes.prospective.children.iter() - .chain(changes.committed.children.iter()) { - children_keys.insert(storage_key.clone()); - } - for storage_key in children_keys { - let child_index = ChildIndex:: { - block: block.clone(), - storage_key: storage_key.clone(), - }; - - let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(storage_key))?; - children_result.insert(child_index, iter); - } - - let top = prepare_extrinsics_input_inner(backend, block, changes, None)?; - - Ok((top, children_result)) + let mut children_keys = BTreeSet::::new(); + let mut children_result = BTreeMap::new(); + for (storage_key, _) in changes + .prospective + .children + .iter() + .chain(changes.committed.children.iter()) + { + children_keys.insert(storage_key.clone()); + } + for storage_key in children_keys { + let child_index = ChildIndex:: { + block: block.clone(), + storage_key: storage_key.clone(), + }; + + let iter = prepare_extrinsics_input_inner(backend, block, changes, Some(storage_key))?; + children_result.insert(child_index, iter); + } + + let top = prepare_extrinsics_input_inner(backend, block, changes, None)?; + + Ok((top, children_result)) } fn prepare_extrinsics_input_inner<'a, B, H, Number>( - backend: &'a B, - block: &Number, - changes: &'a OverlayedChanges, - storage_key: Option, -) -> Result> + 'a, String> - where - B: Backend, - H: Hasher, - Number: BlockNumber, + backend: &'a B, + block: &Number, + changes: &'a OverlayedChanges, + storage_key: Option, +) -> Result> + 'a, String> +where + B: Backend, + H: Hasher, + Number: BlockNumber, { - let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { - let child_info = changes.child_info(sk).cloned(); - ( - changes.committed.children.get(sk).map(|c| &c.0), - changes.prospective.children.get(sk).map(|c| &c.0), - child_info, - ) - } else { - (Some(&changes.committed.top), Some(&changes.prospective.top), None) - }; - committed.iter().flat_map(|c| c.iter()) - .chain(prospective.iter().flat_map(|c| c.iter())) - .filter(|( _, v)| v.extrinsics.is_some()) - .try_fold(BTreeMap::new(), |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, v)| { - match map.entry(k) { - Entry::Vacant(entry) => { - // ignore temporary values (values that have null value at the end of operation - // AND are not in storage at the beginning of operation - if let Some(sk) = storage_key.as_ref() { - if !changes.child_storage(sk, k).map(|v| v.is_some()).unwrap_or_default() { - if let Some(child_info) = child_info.as_ref() { - if !backend.exists_child_storage(sk, child_info.as_ref(), k) - .map_err(|e| format!("{}", e))? { - return Ok(map); - } - } - } - } else { - if !changes.storage(k).map(|v| v.is_some()).unwrap_or_default() { - if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { - return Ok(map); - } - } - }; - - let extrinsics = v.extrinsics.as_ref() - .expect("filtered by filter() call above; qed") - .iter().cloned().collect(); - entry.insert((ExtrinsicIndex { - block: block.clone(), - key: k.to_vec(), - }, extrinsics)); - }, - Entry::Occupied(mut entry) => { - // we do not need to check for temporary values here, because entry is Occupied - // AND we are checking it before insertion - let extrinsics = &mut entry.get_mut().1; - extrinsics.extend( - v.extrinsics.as_ref() - .expect("filtered by filter() call above; qed") - .iter() - .cloned() - ); - extrinsics.sort_unstable(); - }, - } - - Ok(map) - }) - .map(|pairs| pairs.into_iter().map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v))) + let (committed, prospective, child_info) = if let Some(sk) = storage_key.as_ref() { + let child_info = changes.child_info(sk).cloned(); + ( + changes.committed.children.get(sk).map(|c| &c.0), + changes.prospective.children.get(sk).map(|c| &c.0), + child_info, + ) + } else { + ( + Some(&changes.committed.top), + Some(&changes.prospective.top), + None, + ) + }; + committed + .iter() + .flat_map(|c| c.iter()) + .chain(prospective.iter().flat_map(|c| c.iter())) + .filter(|(_, v)| v.extrinsics.is_some()) + .try_fold( + BTreeMap::new(), + |mut map: BTreeMap<&[u8], (ExtrinsicIndex, Vec)>, (k, v)| { + match map.entry(k) { + Entry::Vacant(entry) => { + // ignore temporary values (values that have null value at the end of operation + // AND are not in storage at the beginning of operation + if let Some(sk) = storage_key.as_ref() { + if !changes + .child_storage(sk, k) + .map(|v| v.is_some()) + .unwrap_or_default() + { + if let Some(child_info) = child_info.as_ref() { + if !backend + .exists_child_storage(sk, child_info.as_ref(), k) + .map_err(|e| format!("{}", e))? + { + return Ok(map); + } + } + } + } else { + if !changes.storage(k).map(|v| v.is_some()).unwrap_or_default() { + if !backend.exists_storage(k).map_err(|e| format!("{}", e))? { + return Ok(map); + } + } + }; + + let extrinsics = v + .extrinsics + .as_ref() + .expect("filtered by filter() call above; qed") + .iter() + .cloned() + .collect(); + entry.insert(( + ExtrinsicIndex { + block: block.clone(), + key: k.to_vec(), + }, + extrinsics, + )); + } + Entry::Occupied(mut entry) => { + // we do not need to check for temporary values here, because entry is Occupied + // AND we are checking it before insertion + let extrinsics = &mut entry.get_mut().1; + extrinsics.extend( + v.extrinsics + .as_ref() + .expect("filtered by filter() call above; qed") + .iter() + .cloned(), + ); + extrinsics.sort_unstable(); + } + } + + Ok(map) + }, + ) + .map(|pairs| { + pairs + .into_iter() + .map(|(_, (k, v))| InputPair::ExtrinsicIndex(k, v)) + }) } - /// Prepare DigestIndex input pairs. fn prepare_digest_input<'a, H, Number>( - parent: &'a AnchorBlockId, - config: ConfigurationRange, - block: Number, - storage: &'a dyn Storage, -) -> Result<( - impl Iterator> + 'a, - BTreeMap, impl Iterator> + 'a>, - Vec, - ), String> - where - H: Hasher, - H::Out: 'a + Encode, - Number: BlockNumber, + parent: &'a AnchorBlockId, + config: ConfigurationRange, + block: Number, + storage: &'a dyn Storage, +) -> Result< + ( + impl Iterator> + 'a, + BTreeMap, impl Iterator> + 'a>, + Vec, + ), + String, +> +where + H: Hasher, + H::Out: 'a + Encode, + Number: BlockNumber, { - let build_skewed_digest = config.end.as_ref() == Some(&block); - let block_for_digest = if build_skewed_digest { - config.config.next_max_level_digest_range(config.zero.clone(), block.clone()) - .map(|(_, end)| end) - .unwrap_or_else(|| block.clone()) - } else { - block.clone() - }; - - let digest_input_blocks = digest_build_iterator(config, block_for_digest).collect::>(); - digest_input_blocks.clone().into_iter() - .try_fold( - (BTreeMap::new(), BTreeMap::new()), move |(mut map, mut child_map), digest_build_block| { - let extrinsic_prefix = ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); - let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); - let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); - let trie_root = storage.root(parent, digest_build_block.clone())?; - let trie_root = trie_root.ok_or_else(|| format!("No changes trie root for block {}", digest_build_block.clone()))?; - - let insert_to_map = |map: &mut BTreeMap<_,_>, key: StorageKey| { - match map.entry(key.clone()) { - Entry::Vacant(entry) => { - entry.insert((DigestIndex { - block: block.clone(), - key, - }, vec![digest_build_block.clone()])); - }, - Entry::Occupied(mut entry) => { - // DigestIndexValue must be sorted. Here we are relying on the fact that digest_build_iterator() - // returns blocks in ascending order => we only need to check for duplicates - // - // is_dup_block could be true when key has been changed in both digest block - // AND other blocks that it covers - let is_dup_block = entry.get().1.last() == Some(&digest_build_block); - if !is_dup_block { - entry.get_mut().1.push(digest_build_block.clone()); - } - }, - } - }; - - // try to get all updated keys from cache - let populated_from_cache = storage.with_cached_changed_keys( - &trie_root, - &mut |changed_keys| { - for (storage_key, changed_keys) in changed_keys { - let map = match storage_key { - Some(storage_key) => child_map - .entry(ChildIndex:: { - block: block.clone(), - storage_key: storage_key.clone(), - }) - .or_default(), - None => &mut map, - }; - for changed_key in changed_keys.iter().cloned() { - insert_to_map(map, changed_key); - } - } - } - ); - if populated_from_cache { - return Ok((map, child_map)); - } - - let mut children_roots = BTreeMap::::new(); - { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| - if let Ok(InputKey::ChildIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.insert(trie_key.storage_key, trie_root); - } - }); - - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - insert_to_map(&mut map, trie_key.key); - }); - - trie_storage.for_keys_with_prefix(&digest_prefix, |key| - if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - insert_to_map(&mut map, trie_key.key); - }); - } - - for (storage_key, trie_root) in children_roots.into_iter() { - let child_index = ChildIndex:: { - block: block.clone(), - storage_key, - }; - - let mut map = child_map.entry(child_index).or_default(); - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - trie_root, - ); - trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| - if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - insert_to_map(&mut map, trie_key.key); - }); - - trie_storage.for_keys_with_prefix(&digest_prefix, |key| - if let Ok(InputKey::DigestIndex::(trie_key)) = Decode::decode(&mut &key[..]) { - insert_to_map(&mut map, trie_key.key); - }); - } - Ok((map, child_map)) - }) - .map(|(pairs, child_pairs)| ( - pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)), - child_pairs.into_iter().map(|(sk, pairs)| - (sk, pairs.into_iter().map(|(_, (k, v))| InputPair::DigestIndex(k, v)))).collect(), - digest_input_blocks, - )) + let build_skewed_digest = config.end.as_ref() == Some(&block); + let block_for_digest = if build_skewed_digest { + config + .config + .next_max_level_digest_range(config.zero.clone(), block.clone()) + .map(|(_, end)| end) + .unwrap_or_else(|| block.clone()) + } else { + block.clone() + }; + + let digest_input_blocks = digest_build_iterator(config, block_for_digest).collect::>(); + digest_input_blocks + .clone() + .into_iter() + .try_fold( + (BTreeMap::new(), BTreeMap::new()), + move |(mut map, mut child_map), digest_build_block| { + let extrinsic_prefix = + ExtrinsicIndex::key_neutral_prefix(digest_build_block.clone()); + let digest_prefix = DigestIndex::key_neutral_prefix(digest_build_block.clone()); + let child_prefix = ChildIndex::key_neutral_prefix(digest_build_block.clone()); + let trie_root = storage.root(parent, digest_build_block.clone())?; + let trie_root = trie_root.ok_or_else(|| { + format!( + "No changes trie root for block {}", + digest_build_block.clone() + ) + })?; + + let insert_to_map = |map: &mut BTreeMap<_, _>, key: StorageKey| { + match map.entry(key.clone()) { + Entry::Vacant(entry) => { + entry.insert(( + DigestIndex { + block: block.clone(), + key, + }, + vec![digest_build_block.clone()], + )); + } + Entry::Occupied(mut entry) => { + // DigestIndexValue must be sorted. Here we are relying on the fact that digest_build_iterator() + // returns blocks in ascending order => we only need to check for duplicates + // + // is_dup_block could be true when key has been changed in both digest block + // AND other blocks that it covers + let is_dup_block = entry.get().1.last() == Some(&digest_build_block); + if !is_dup_block { + entry.get_mut().1.push(digest_build_block.clone()); + } + } + } + }; + + // try to get all updated keys from cache + let populated_from_cache = + storage.with_cached_changed_keys(&trie_root, &mut |changed_keys| { + for (storage_key, changed_keys) in changed_keys { + let map = match storage_key { + Some(storage_key) => child_map + .entry(ChildIndex:: { + block: block.clone(), + storage_key: storage_key.clone(), + }) + .or_default(), + None => &mut map, + }; + for changed_key in changed_keys.iter().cloned() { + insert_to_map(map, changed_key); + } + } + }); + if populated_from_cache { + return Ok((map, child_map)); + } + + let mut children_roots = BTreeMap::::new(); + { + let trie_storage = TrieBackendEssence::<_, H>::new( + crate::changes_trie::TrieBackendStorageAdapter(storage), + trie_root, + ); + + trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { + if let Ok(InputKey::ChildIndex::(trie_key)) = + Decode::decode(&mut &key[..]) + { + if let Ok(value) = >::decode(&mut &value[..]) { + let mut trie_root = ::Out::default(); + trie_root.as_mut().copy_from_slice(&value[..]); + children_roots.insert(trie_key.storage_key, trie_root); + } + } + }); + + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| { + if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = + Decode::decode(&mut &key[..]) + { + insert_to_map(&mut map, trie_key.key); + } + }); + + trie_storage.for_keys_with_prefix(&digest_prefix, |key| { + if let Ok(InputKey::DigestIndex::(trie_key)) = + Decode::decode(&mut &key[..]) + { + insert_to_map(&mut map, trie_key.key); + } + }); + } + + for (storage_key, trie_root) in children_roots.into_iter() { + let child_index = ChildIndex:: { + block: block.clone(), + storage_key, + }; + + let mut map = child_map.entry(child_index).or_default(); + let trie_storage = TrieBackendEssence::<_, H>::new( + crate::changes_trie::TrieBackendStorageAdapter(storage), + trie_root, + ); + trie_storage.for_keys_with_prefix(&extrinsic_prefix, |key| { + if let Ok(InputKey::ExtrinsicIndex::(trie_key)) = + Decode::decode(&mut &key[..]) + { + insert_to_map(&mut map, trie_key.key); + } + }); + + trie_storage.for_keys_with_prefix(&digest_prefix, |key| { + if let Ok(InputKey::DigestIndex::(trie_key)) = + Decode::decode(&mut &key[..]) + { + insert_to_map(&mut map, trie_key.key); + } + }); + } + Ok((map, child_map)) + }, + ) + .map(|(pairs, child_pairs)| { + ( + pairs + .into_iter() + .map(|(_, (k, v))| InputPair::DigestIndex(k, v)), + child_pairs + .into_iter() + .map(|(sk, pairs)| { + ( + sk, + pairs + .into_iter() + .map(|(_, (k, v))| InputPair::DigestIndex(k, v)), + ) + }) + .collect(), + digest_input_blocks, + ) + }) } #[cfg(test)] mod test { - use codec::Encode; - use sp_core::Blake2Hasher; - use sp_core::storage::well_known_keys::EXTRINSIC_INDEX; - use sp_core::storage::ChildInfo; - use crate::InMemoryBackend; - use crate::changes_trie::{RootsStorage, Configuration, storage::InMemoryStorage}; - use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; - use crate::overlayed_changes::{OverlayedValue, OverlayedChangeSet}; - use super::*; - - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); - - fn prepare_for_build(zero: u64) -> ( - InMemoryBackend, - InMemoryStorage, - OverlayedChanges, - Configuration, - ) { - let backend: InMemoryBackend<_> = vec![ - (vec![100], vec![255]), - (vec![101], vec![255]), - (vec![102], vec![255]), - (vec![103], vec![255]), - (vec![104], vec![255]), - (vec![105], vec![255]), - ].into_iter().collect::>().into(); - let child_trie_key1 = b"1".to_vec(); - let child_trie_key2 = b"2".to_vec(); - let storage = InMemoryStorage::with_inputs(vec![ - (zero + 1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![105] }, vec![0, 2, 4]), - ]), - (zero + 2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0]), - ]), - (zero + 3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 3, key: vec![100] }, vec![0]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 3, key: vec![105] }, vec![1]), - ]), - (zero + 4, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]), - (zero + 5, Vec::new()), - (zero + 6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 6, key: vec![105] }, vec![2]), - ]), - (zero + 7, Vec::new()), - (zero + 8, vec![ - InputPair::DigestIndex(DigestIndex { block: zero + 8, key: vec![105] }, vec![zero + 6]), - ]), - (zero + 9, Vec::new()), (zero + 10, Vec::new()), (zero + 11, Vec::new()), (zero + 12, Vec::new()), - (zero + 13, Vec::new()), (zero + 14, Vec::new()), (zero + 15, Vec::new()), - ], vec![(child_trie_key1.clone(), vec![ - (zero + 1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![100] }, vec![1, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![101] }, vec![0, 2]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 1, key: vec![105] }, vec![0, 2, 4]), - ]), - (zero + 2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0]), - ]), - (zero + 4, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 2, key: vec![102] }, vec![0, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - ]), - ]), - ]); - let changes = OverlayedChanges { - prospective: OverlayedChangeSet { top: vec![ - (vec![100], OverlayedValue { - value: Some(vec![200]), - extrinsics: Some(vec![0, 2].into_iter().collect()) - }), - (vec![103], OverlayedValue { - value: None, - extrinsics: Some(vec![0, 1].into_iter().collect()) - }), - ].into_iter().collect(), - children: vec![ - (child_trie_key1.clone(), (vec![ - (vec![100], OverlayedValue { - value: Some(vec![200]), - extrinsics: Some(vec![0, 2].into_iter().collect()) - }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), - (child_trie_key2, (vec![ - (vec![100], OverlayedValue { - value: Some(vec![200]), - extrinsics: Some(vec![0, 2].into_iter().collect()) - }) - ].into_iter().collect(), CHILD_INFO_2.to_owned())), - ].into_iter().collect() - }, - committed: OverlayedChangeSet { top: vec![ - (EXTRINSIC_INDEX.to_vec(), OverlayedValue { - value: Some(3u32.encode()), - extrinsics: None, - }), - (vec![100], OverlayedValue { - value: Some(vec![202]), - extrinsics: Some(vec![3].into_iter().collect()) - }), - (vec![101], OverlayedValue { - value: Some(vec![203]), - extrinsics: Some(vec![1].into_iter().collect()) - }), - ].into_iter().collect(), - children: vec![ - (child_trie_key1, (vec![ - (vec![100], OverlayedValue { - value: Some(vec![202]), - extrinsics: Some(vec![3].into_iter().collect()) - }) - ].into_iter().collect(), CHILD_INFO_1.to_owned())), - ].into_iter().collect(), - }, - collect_extrinsics: true, - stats: Default::default(), - }; - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - - (backend, storage, changes, config) - } - - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } - } - - #[test] - fn build_changes_trie_nodes_on_non_digest_block() { - fn test_with_zero(zero: u64) { - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 4 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![103] }, vec![0, 1]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 5u64, storage_key: b"1".to_vec() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5u64, key: vec![100] }, vec![0, 2, 3]), - ]), - (ChildIndex { block: zero + 5, storage_key: b"2".to_vec() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 5, key: vec![100] }, vec![0, 2]), - ]), - ]); - - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_digest_block_l1() { - fn test_with_zero(zero: u64) { - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), - ]), - (ChildIndex { block: zero + 4, storage_key: b"2".to_vec() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), - ]), - ]); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_digest_block_l2() { - fn test_with_zero(zero: u64) { - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 15 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![100] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![101] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![103] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![105] }, vec![zero + 4, zero + 8]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 16u64, storage_key: b"1".to_vec() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 16, key: vec![102] }, vec![zero + 4]), - ]), - (ChildIndex { block: zero + 16, storage_key: b"2".to_vec() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 16, key: vec![100] }, vec![0, 2]), - ]), - ]); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_on_skewed_digest_block() { - fn test_with_zero(zero: u64) { - let (backend, storage, changes, config) = prepare_for_build(zero); - let parent = AnchorBlockId { hash: Default::default(), number: zero + 10 }; - - let mut configuration_range = configuration_range(&config, zero); - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range.clone(), - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![103] }, vec![0, 1]), - ]); - - configuration_range.end = Some(zero + 11); - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range, - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 11, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![100] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![101] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![102] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![103] }, vec![zero + 4]), - InputPair::DigestIndex(DigestIndex { block: zero + 11, key: vec![105] }, vec![zero + 4, zero + 8]), - ]); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn build_changes_trie_nodes_ignores_temporary_storage_values() { - fn test_with_zero(zero: u64) { - let (backend, storage, mut changes, config) = prepare_for_build(zero); - - // 110: missing from backend, set to None in overlay - changes.prospective.top.insert(vec![110], OverlayedValue { - value: None, - extrinsics: Some(vec![1].into_iter().collect()) - }); - - let parent = AnchorBlockId { hash: Default::default(), number: zero + 3 }; - let changes_trie_nodes = prepare_input( - &backend, - &storage, - configuration_range(&config, zero), - &changes, - &parent, - ).unwrap(); - assert_eq!(changes_trie_nodes.0.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1, zero + 3]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1, zero + 3]), - ]); - assert_eq!(changes_trie_nodes.1.into_iter() - .map(|(k,v)| (k, v.collect::>())).collect::>(), vec![ - (ChildIndex { block: zero + 4u64, storage_key: b"1".to_vec() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![100] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![101] }, vec![zero + 1]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![102] }, vec![zero + 2]), - InputPair::DigestIndex(DigestIndex { block: zero + 4, key: vec![105] }, vec![zero + 1]), - ]), - (ChildIndex { block: zero + 4, storage_key: b"2".to_vec() }, - vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: zero + 4, key: vec![100] }, vec![0, 2]), - ]), - ]); - - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn cache_is_used_when_changes_trie_is_built() { - let (backend, mut storage, changes, config) = prepare_for_build(0); - let parent = AnchorBlockId { hash: Default::default(), number: 15 }; - - // override some actual values from storage with values from the cache - // - // top-level storage: - // (keys 100, 101, 103, 105 are now missing from block#4 => they do not appear - // in l2 digest at block 16) - // - // "1" child storage: - // key 102 is now missing from block#4 => it doesn't appear in l2 digest at block 16 - // (keys 103, 104) are now added to block#4 => they appear in l2 digest at block 16 - // - // "2" child storage: - // (keys 105, 106) are now added to block#4 => they appear in l2 digest at block 16 - let trie_root4 = storage.root(&parent, 4).unwrap().unwrap(); - let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) - .set_digest_input_blocks(vec![1, 2, 3]) - .insert(None, vec![vec![100], vec![102]].into_iter().collect()) - .insert(Some(b"1".to_vec()), vec![vec![103], vec![104]].into_iter().collect()) - .insert(Some(b"2".to_vec()), vec![vec![105], vec![106]].into_iter().collect()) - .complete(4, &trie_root4); - storage.cache_mut().perform(cached_data4); - - let (root_changes_trie_nodes, child_changes_tries_nodes, _) = prepare_input( - &backend, - &storage, - configuration_range(&config, 0), - &changes, - &parent, - ).unwrap(); - assert_eq!(root_changes_trie_nodes.collect::>>(), vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![100] }, vec![0, 2, 3]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![101] }, vec![1]), - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![103] }, vec![0, 1]), - - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![100] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![102] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![105] }, vec![8]), - ]); - - let child_changes_tries_nodes = child_changes_tries_nodes - .into_iter() - .map(|(k, i)| (k, i.collect::>())) - .collect::>(); - assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"1".to_vec() }).unwrap(), - &vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2, 3]), - - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![103] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![104] }, vec![4]), - ], - ); - assert_eq!( - child_changes_tries_nodes.get(&ChildIndex { block: 16u64, storage_key: b"2".to_vec() }).unwrap(), - &vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16u64, key: vec![100] }, vec![0, 2]), - - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![105] }, vec![4]), - InputPair::DigestIndex(DigestIndex { block: 16u64, key: vec![106] }, vec![4]), - ], - ); - } + use super::*; + use crate::changes_trie::build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}; + use crate::changes_trie::{storage::InMemoryStorage, Configuration, RootsStorage}; + use crate::overlayed_changes::{OverlayedChangeSet, OverlayedValue}; + use crate::InMemoryBackend; + use codec::Encode; + use sp_core::storage::well_known_keys::EXTRINSIC_INDEX; + use sp_core::storage::ChildInfo; + use sp_core::Blake2Hasher; + + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + + fn prepare_for_build( + zero: u64, + ) -> ( + InMemoryBackend, + InMemoryStorage, + OverlayedChanges, + Configuration, + ) { + let backend: InMemoryBackend<_> = vec![ + (vec![100], vec![255]), + (vec![101], vec![255]), + (vec![102], vec![255]), + (vec![103], vec![255]), + (vec![104], vec![255]), + (vec![105], vec![255]), + ] + .into_iter() + .collect::>() + .into(); + let child_trie_key1 = b"1".to_vec(); + let child_trie_key2 = b"2".to_vec(); + let storage = InMemoryStorage::with_inputs( + vec![ + ( + zero + 1, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 1, + key: vec![100], + }, + vec![1, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 1, + key: vec![101], + }, + vec![0, 2], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 1, + key: vec![105], + }, + vec![0, 2, 4], + ), + ], + ), + ( + zero + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 2, + key: vec![102], + }, + vec![0], + )], + ), + ( + zero + 3, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 3, + key: vec![100], + }, + vec![0], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 3, + key: vec![105], + }, + vec![1], + ), + ], + ), + ( + zero + 4, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![100], + }, + vec![0, 2, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![101], + }, + vec![1], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![103], + }, + vec![0, 1], + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![100], + }, + vec![zero + 1, zero + 3], + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![101], + }, + vec![zero + 1], + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![102], + }, + vec![zero + 2], + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![105], + }, + vec![zero + 1, zero + 3], + ), + ], + ), + (zero + 5, Vec::new()), + ( + zero + 6, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 6, + key: vec![105], + }, + vec![2], + )], + ), + (zero + 7, Vec::new()), + ( + zero + 8, + vec![InputPair::DigestIndex( + DigestIndex { + block: zero + 8, + key: vec![105], + }, + vec![zero + 6], + )], + ), + (zero + 9, Vec::new()), + (zero + 10, Vec::new()), + (zero + 11, Vec::new()), + (zero + 12, Vec::new()), + (zero + 13, Vec::new()), + (zero + 14, Vec::new()), + (zero + 15, Vec::new()), + ], + vec![( + child_trie_key1.clone(), + vec![ + ( + zero + 1, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 1, + key: vec![100], + }, + vec![1, 3], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 1, + key: vec![101], + }, + vec![0, 2], + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 1, + key: vec![105], + }, + vec![0, 2, 4], + ), + ], + ), + ( + zero + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 2, + key: vec![102], + }, + vec![0], + )], + ), + ( + zero + 4, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 2, + key: vec![102], + }, + vec![0, 3], + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![102], + }, + vec![zero + 2], + ), + ], + ), + ], + )], + ); + let changes = OverlayedChanges { + prospective: OverlayedChangeSet { + top: vec![ + ( + vec![100], + OverlayedValue { + value: Some(vec![200]), + extrinsics: Some(vec![0, 2].into_iter().collect()), + }, + ), + ( + vec![103], + OverlayedValue { + value: None, + extrinsics: Some(vec![0, 1].into_iter().collect()), + }, + ), + ] + .into_iter() + .collect(), + children: vec![ + ( + child_trie_key1.clone(), + ( + vec![( + vec![100], + OverlayedValue { + value: Some(vec![200]), + extrinsics: Some(vec![0, 2].into_iter().collect()), + }, + )] + .into_iter() + .collect(), + CHILD_INFO_1.to_owned(), + ), + ), + ( + child_trie_key2, + ( + vec![( + vec![100], + OverlayedValue { + value: Some(vec![200]), + extrinsics: Some(vec![0, 2].into_iter().collect()), + }, + )] + .into_iter() + .collect(), + CHILD_INFO_2.to_owned(), + ), + ), + ] + .into_iter() + .collect(), + }, + committed: OverlayedChangeSet { + top: vec![ + ( + EXTRINSIC_INDEX.to_vec(), + OverlayedValue { + value: Some(3u32.encode()), + extrinsics: None, + }, + ), + ( + vec![100], + OverlayedValue { + value: Some(vec![202]), + extrinsics: Some(vec![3].into_iter().collect()), + }, + ), + ( + vec![101], + OverlayedValue { + value: Some(vec![203]), + extrinsics: Some(vec![1].into_iter().collect()), + }, + ), + ] + .into_iter() + .collect(), + children: vec![( + child_trie_key1, + ( + vec![( + vec![100], + OverlayedValue { + value: Some(vec![202]), + extrinsics: Some(vec![3].into_iter().collect()), + }, + )] + .into_iter() + .collect(), + CHILD_INFO_1.to_owned(), + ), + )] + .into_iter() + .collect(), + }, + collect_extrinsics: true, + stats: Default::default(), + }; + let config = Configuration { + digest_interval: 4, + digest_levels: 2, + }; + + (backend, storage, changes, config) + } + + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { + config, + zero, + end: None, + } + } + + #[test] + fn build_changes_trie_nodes_on_non_digest_block() { + fn test_with_zero(zero: u64) { + let (backend, storage, changes, config) = prepare_for_build(zero); + let parent = AnchorBlockId { + hash: Default::default(), + number: zero + 4, + }; + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range(&config, zero), + &changes, + &parent, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 5, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 5, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 5, + key: vec![103] + }, + vec![0, 1] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { + block: zero + 5u64, + storage_key: b"1".to_vec() + }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 5u64, + key: vec![100] + }, + vec![0, 2, 3] + ),] + ), + ( + ChildIndex { + block: zero + 5, + storage_key: b"2".to_vec() + }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 5, + key: vec![100] + }, + vec![0, 2] + ),] + ), + ] + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn build_changes_trie_nodes_on_digest_block_l1() { + fn test_with_zero(zero: u64) { + let (backend, storage, changes, config) = prepare_for_build(zero); + let parent = AnchorBlockId { + hash: Default::default(), + number: zero + 3, + }; + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range(&config, zero), + &changes, + &parent, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![103] + }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![100] + }, + vec![zero + 1, zero + 3] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![101] + }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![102] + }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![105] + }, + vec![zero + 1, zero + 3] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { + block: zero + 4u64, + storage_key: b"1".to_vec() + }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4u64, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![100] + }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![101] + }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![102] + }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![105] + }, + vec![zero + 1] + ), + ] + ), + ( + ChildIndex { + block: zero + 4, + storage_key: b"2".to_vec() + }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![100] + }, + vec![0, 2] + ),] + ), + ] + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn build_changes_trie_nodes_on_digest_block_l2() { + fn test_with_zero(zero: u64) { + let (backend, storage, changes, config) = prepare_for_build(zero); + let parent = AnchorBlockId { + hash: Default::default(), + number: zero + 15, + }; + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range(&config, zero), + &changes, + &parent, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 16, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 16, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 16, + key: vec![103] + }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 16, + key: vec![100] + }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 16, + key: vec![101] + }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 16, + key: vec![102] + }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 16, + key: vec![103] + }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 16, + key: vec![105] + }, + vec![zero + 4, zero + 8] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { + block: zero + 16u64, + storage_key: b"1".to_vec() + }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 16u64, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 16, + key: vec![102] + }, + vec![zero + 4] + ), + ] + ), + ( + ChildIndex { + block: zero + 16, + storage_key: b"2".to_vec() + }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 16, + key: vec![100] + }, + vec![0, 2] + ),] + ), + ] + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn build_changes_trie_nodes_on_skewed_digest_block() { + fn test_with_zero(zero: u64) { + let (backend, storage, changes, config) = prepare_for_build(zero); + let parent = AnchorBlockId { + hash: Default::default(), + number: zero + 10, + }; + + let mut configuration_range = configuration_range(&config, zero); + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range.clone(), + &changes, + &parent, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 11, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 11, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 11, + key: vec![103] + }, + vec![0, 1] + ), + ] + ); + + configuration_range.end = Some(zero + 11); + let changes_trie_nodes = + prepare_input(&backend, &storage, configuration_range, &changes, &parent).unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 11, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 11, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 11, + key: vec![103] + }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 11, + key: vec![100] + }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 11, + key: vec![101] + }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 11, + key: vec![102] + }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 11, + key: vec![103] + }, + vec![zero + 4] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 11, + key: vec![105] + }, + vec![zero + 4, zero + 8] + ), + ] + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn build_changes_trie_nodes_ignores_temporary_storage_values() { + fn test_with_zero(zero: u64) { + let (backend, storage, mut changes, config) = prepare_for_build(zero); + + // 110: missing from backend, set to None in overlay + changes.prospective.top.insert( + vec![110], + OverlayedValue { + value: None, + extrinsics: Some(vec![1].into_iter().collect()), + }, + ); + + let parent = AnchorBlockId { + hash: Default::default(), + number: zero + 3, + }; + let changes_trie_nodes = prepare_input( + &backend, + &storage, + configuration_range(&config, zero), + &changes, + &parent, + ) + .unwrap(); + assert_eq!( + changes_trie_nodes.0.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![103] + }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![100] + }, + vec![zero + 1, zero + 3] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![101] + }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![102] + }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![105] + }, + vec![zero + 1, zero + 3] + ), + ] + ); + assert_eq!( + changes_trie_nodes + .1 + .into_iter() + .map(|(k, v)| (k, v.collect::>())) + .collect::>(), + vec![ + ( + ChildIndex { + block: zero + 4u64, + storage_key: b"1".to_vec() + }, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4u64, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![100] + }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![101] + }, + vec![zero + 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![102] + }, + vec![zero + 2] + ), + InputPair::DigestIndex( + DigestIndex { + block: zero + 4, + key: vec![105] + }, + vec![zero + 1] + ), + ] + ), + ( + ChildIndex { + block: zero + 4, + storage_key: b"2".to_vec() + }, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: zero + 4, + key: vec![100] + }, + vec![0, 2] + ),] + ), + ] + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn cache_is_used_when_changes_trie_is_built() { + let (backend, mut storage, changes, config) = prepare_for_build(0); + let parent = AnchorBlockId { + hash: Default::default(), + number: 15, + }; + + // override some actual values from storage with values from the cache + // + // top-level storage: + // (keys 100, 101, 103, 105 are now missing from block#4 => they do not appear + // in l2 digest at block 16) + // + // "1" child storage: + // key 102 is now missing from block#4 => it doesn't appear in l2 digest at block 16 + // (keys 103, 104) are now added to block#4 => they appear in l2 digest at block 16 + // + // "2" child storage: + // (keys 105, 106) are now added to block#4 => they appear in l2 digest at block 16 + let trie_root4 = storage.root(&parent, 4).unwrap().unwrap(); + let cached_data4 = IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()) + .set_digest_input_blocks(vec![1, 2, 3]) + .insert(None, vec![vec![100], vec![102]].into_iter().collect()) + .insert( + Some(b"1".to_vec()), + vec![vec![103], vec![104]].into_iter().collect(), + ) + .insert( + Some(b"2".to_vec()), + vec![vec![105], vec![106]].into_iter().collect(), + ) + .complete(4, &trie_root4); + storage.cache_mut().perform(cached_data4); + + let (root_changes_trie_nodes, child_changes_tries_nodes, _) = prepare_input( + &backend, + &storage, + configuration_range(&config, 0), + &changes, + &parent, + ) + .unwrap(); + assert_eq!( + root_changes_trie_nodes.collect::>>(), + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16, + key: vec![101] + }, + vec![1] + ), + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16, + key: vec![103] + }, + vec![0, 1] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![100] + }, + vec![4] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![102] + }, + vec![4] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![105] + }, + vec![8] + ), + ] + ); + + let child_changes_tries_nodes = child_changes_tries_nodes + .into_iter() + .map(|(k, i)| (k, i.collect::>())) + .collect::>(); + assert_eq!( + child_changes_tries_nodes + .get(&ChildIndex { + block: 16u64, + storage_key: b"1".to_vec() + }) + .unwrap(), + &vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16u64, + key: vec![100] + }, + vec![0, 2, 3] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16u64, + key: vec![103] + }, + vec![4] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16u64, + key: vec![104] + }, + vec![4] + ), + ], + ); + assert_eq!( + child_changes_tries_nodes + .get(&ChildIndex { + block: 16u64, + storage_key: b"2".to_vec() + }) + .unwrap(), + &vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16u64, + key: vec![100] + }, + vec![0, 2] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16u64, + key: vec![105] + }, + vec![4] + ), + InputPair::DigestIndex( + DigestIndex { + block: 16u64, + key: vec![106] + }, + vec![4] + ), + ], + ); + } } diff --git a/primitives/state-machine/src/changes_trie/build_cache.rs b/primitives/state-machine/src/changes_trie/build_cache.rs index 9d0dbb4c1f..98db3774cb 100644 --- a/primitives/state-machine/src/changes_trie/build_cache.rs +++ b/primitives/state-machine/src/changes_trie/build_cache.rs @@ -32,233 +32,253 @@ use crate::StorageKey; /// is inserted (because digest block will includes all keys from this entry). /// When there's a fork, entries are pruned when first changes trie is inserted. pub struct BuildCache { - /// Map of block (implies changes true) number => changes trie root. - roots_by_number: HashMap, - /// Map of changes trie root => set of storage keys that are in this trie. - /// The `Option>` in inner `HashMap` stands for the child storage key. - /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. - /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key. - changed_keys: HashMap, HashSet>>, + /// Map of block (implies changes true) number => changes trie root. + roots_by_number: HashMap, + /// Map of changes trie root => set of storage keys that are in this trie. + /// The `Option>` in inner `HashMap` stands for the child storage key. + /// If it is `None`, then the `HashSet` contains keys changed in top-level storage. + /// If it is `Some`, then the `HashSet` contains keys changed in child storage, identified by the key. + changed_keys: HashMap, HashSet>>, } /// The action to perform when block-with-changes-trie is imported. #[derive(Debug, PartialEq)] pub enum CacheAction { - /// Cache data that has been collected when CT has been built. - CacheBuildData(CachedBuildData), - /// Clear cache from all existing entries. - Clear, + /// Cache data that has been collected when CT has been built. + CacheBuildData(CachedBuildData), + /// Clear cache from all existing entries. + Clear, } /// The data that has been cached during changes trie building. #[derive(Debug, PartialEq)] pub struct CachedBuildData { - block: N, - trie_root: H, - digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, + block: N, + trie_root: H, + digest_input_blocks: Vec, + changed_keys: HashMap, HashSet>, } /// The action to perform when block-with-changes-trie is imported. #[derive(Debug, PartialEq)] pub(crate) enum IncompleteCacheAction { - /// Cache data that has been collected when CT has been built. - CacheBuildData(IncompleteCachedBuildData), - /// Clear cache from all existing entries. - Clear, + /// Cache data that has been collected when CT has been built. + CacheBuildData(IncompleteCachedBuildData), + /// Clear cache from all existing entries. + Clear, } /// The data (without changes trie root) that has been cached during changes trie building. #[derive(Debug, PartialEq)] pub(crate) struct IncompleteCachedBuildData { - digest_input_blocks: Vec, - changed_keys: HashMap, HashSet>, + digest_input_blocks: Vec, + changed_keys: HashMap, HashSet>, } impl BuildCache - where - N: Eq + ::std::hash::Hash, - H: Eq + ::std::hash::Hash + Clone, +where + N: Eq + ::std::hash::Hash, + H: Eq + ::std::hash::Hash + Clone, { - /// Create new changes trie build cache. - pub fn new() -> Self { - BuildCache { - roots_by_number: HashMap::new(), - changed_keys: HashMap::new(), - } - } - - /// Get cached changed keys for changes trie with given root. - pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { - self.changed_keys.get(&root) - } - - /// Execute given functor with cached entry for given block. - /// Returns true if the functor has been called and false otherwise. - pub fn with_changed_keys( - &self, - root: &H, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool { - match self.changed_keys.get(&root) { - Some(changed_keys) => { - functor(changed_keys); - true - }, - None => false, - } - } - - /// Insert data into cache. - pub fn perform(&mut self, action: CacheAction) { - match action { - CacheAction::CacheBuildData(data) => { - self.roots_by_number.insert(data.block, data.trie_root.clone()); - self.changed_keys.insert(data.trie_root, data.changed_keys); - - for digest_input_block in data.digest_input_blocks { - let digest_input_block_hash = self.roots_by_number.remove(&digest_input_block); - if let Some(digest_input_block_hash) = digest_input_block_hash { - self.changed_keys.remove(&digest_input_block_hash); - } - } - }, - CacheAction::Clear => { - self.roots_by_number.clear(); - self.changed_keys.clear(); - }, - } - } + /// Create new changes trie build cache. + pub fn new() -> Self { + BuildCache { + roots_by_number: HashMap::new(), + changed_keys: HashMap::new(), + } + } + + /// Get cached changed keys for changes trie with given root. + pub fn get(&self, root: &H) -> Option<&HashMap, HashSet>> { + self.changed_keys.get(&root) + } + + /// Execute given functor with cached entry for given block. + /// Returns true if the functor has been called and false otherwise. + pub fn with_changed_keys( + &self, + root: &H, + functor: &mut dyn FnMut(&HashMap, HashSet>), + ) -> bool { + match self.changed_keys.get(&root) { + Some(changed_keys) => { + functor(changed_keys); + true + } + None => false, + } + } + + /// Insert data into cache. + pub fn perform(&mut self, action: CacheAction) { + match action { + CacheAction::CacheBuildData(data) => { + self.roots_by_number + .insert(data.block, data.trie_root.clone()); + self.changed_keys.insert(data.trie_root, data.changed_keys); + + for digest_input_block in data.digest_input_blocks { + let digest_input_block_hash = self.roots_by_number.remove(&digest_input_block); + if let Some(digest_input_block_hash) = digest_input_block_hash { + self.changed_keys.remove(&digest_input_block_hash); + } + } + } + CacheAction::Clear => { + self.roots_by_number.clear(); + self.changed_keys.clear(); + } + } + } } impl IncompleteCacheAction { - /// Returns true if we need to collect changed keys for this action. - pub fn collects_changed_keys(&self) -> bool { - match *self { - IncompleteCacheAction::CacheBuildData(_) => true, - IncompleteCacheAction::Clear => false, - } - } - - /// Complete cache action with computed changes trie root. - pub(crate) fn complete(self, block: N, trie_root: &H) -> CacheAction { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - CacheAction::CacheBuildData(build_data.complete(block, trie_root.clone())), - IncompleteCacheAction::Clear => CacheAction::Clear, - } - } - - /// Set numbers of blocks that are superseded by this new entry. - /// - /// If/when this build data is committed to the cache, entries for these blocks - /// will be removed from the cache. - pub(crate) fn set_digest_input_blocks(self, digest_input_blocks: Vec) -> Self { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - IncompleteCacheAction::CacheBuildData(build_data.set_digest_input_blocks(digest_input_blocks)), - IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, - } - } - - /// Insert changed keys of given storage into cached data. - pub(crate) fn insert( - self, - storage_key: Option, - changed_keys: HashSet, - ) -> Self { - match self { - IncompleteCacheAction::CacheBuildData(build_data) => - IncompleteCacheAction::CacheBuildData(build_data.insert(storage_key, changed_keys)), - IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, - } - } + /// Returns true if we need to collect changed keys for this action. + pub fn collects_changed_keys(&self) -> bool { + match *self { + IncompleteCacheAction::CacheBuildData(_) => true, + IncompleteCacheAction::Clear => false, + } + } + + /// Complete cache action with computed changes trie root. + pub(crate) fn complete(self, block: N, trie_root: &H) -> CacheAction { + match self { + IncompleteCacheAction::CacheBuildData(build_data) => { + CacheAction::CacheBuildData(build_data.complete(block, trie_root.clone())) + } + IncompleteCacheAction::Clear => CacheAction::Clear, + } + } + + /// Set numbers of blocks that are superseded by this new entry. + /// + /// If/when this build data is committed to the cache, entries for these blocks + /// will be removed from the cache. + pub(crate) fn set_digest_input_blocks(self, digest_input_blocks: Vec) -> Self { + match self { + IncompleteCacheAction::CacheBuildData(build_data) => { + IncompleteCacheAction::CacheBuildData( + build_data.set_digest_input_blocks(digest_input_blocks), + ) + } + IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, + } + } + + /// Insert changed keys of given storage into cached data. + pub(crate) fn insert( + self, + storage_key: Option, + changed_keys: HashSet, + ) -> Self { + match self { + IncompleteCacheAction::CacheBuildData(build_data) => { + IncompleteCacheAction::CacheBuildData(build_data.insert(storage_key, changed_keys)) + } + IncompleteCacheAction::Clear => IncompleteCacheAction::Clear, + } + } } impl IncompleteCachedBuildData { - /// Create new cached data. - pub(crate) fn new() -> Self { - IncompleteCachedBuildData { - digest_input_blocks: Vec::new(), - changed_keys: HashMap::new(), - } - } - - fn complete(self, block: N, trie_root: H) -> CachedBuildData { - CachedBuildData { - block, - trie_root, - digest_input_blocks: self.digest_input_blocks, - changed_keys: self.changed_keys, - } - } - - fn set_digest_input_blocks(mut self, digest_input_blocks: Vec) -> Self { - self.digest_input_blocks = digest_input_blocks; - self - } - - fn insert( - mut self, - storage_key: Option, - changed_keys: HashSet, - ) -> Self { - self.changed_keys.insert(storage_key, changed_keys); - self - } + /// Create new cached data. + pub(crate) fn new() -> Self { + IncompleteCachedBuildData { + digest_input_blocks: Vec::new(), + changed_keys: HashMap::new(), + } + } + + fn complete(self, block: N, trie_root: H) -> CachedBuildData { + CachedBuildData { + block, + trie_root, + digest_input_blocks: self.digest_input_blocks, + changed_keys: self.changed_keys, + } + } + + fn set_digest_input_blocks(mut self, digest_input_blocks: Vec) -> Self { + self.digest_input_blocks = digest_input_blocks; + self + } + + fn insert( + mut self, + storage_key: Option, + changed_keys: HashSet, + ) -> Self { + self.changed_keys.insert(storage_key, changed_keys); + self + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn updated_keys_are_stored_when_non_top_level_digest_is_built() { - let mut data = IncompleteCachedBuildData::::new(); - data = data.insert(None, vec![vec![1]].into_iter().collect()); - assert_eq!(data.changed_keys.len(), 1); - - let mut cache = BuildCache::new(); - cache.perform(CacheAction::CacheBuildData(data.complete(1, 1))); - assert_eq!(cache.changed_keys.len(), 1); - assert_eq!( - cache.get(&1).unwrap().clone(), - vec![(None, vec![vec![1]].into_iter().collect())].into_iter().collect(), - ); - } - - #[test] - fn obsolete_entries_are_purged_when_new_ct_is_built() { - let mut cache = BuildCache::::new(); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![1]].into_iter().collect()) - .complete(1, 1))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![2]].into_iter().collect()) - .complete(2, 2))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![3]].into_iter().collect()) - .complete(3, 3))); - - assert_eq!(cache.changed_keys.len(), 3); - - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .set_digest_input_blocks(vec![1, 2, 3]) - .complete(4, 4))); - - assert_eq!(cache.changed_keys.len(), 1); - - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![8]].into_iter().collect()) - .complete(8, 8))); - cache.perform(CacheAction::CacheBuildData(IncompleteCachedBuildData::new() - .insert(None, vec![vec![12]].into_iter().collect()) - .complete(12, 12))); - - assert_eq!(cache.changed_keys.len(), 3); - - cache.perform(CacheAction::Clear); - - assert_eq!(cache.changed_keys.len(), 0); - } + use super::*; + + #[test] + fn updated_keys_are_stored_when_non_top_level_digest_is_built() { + let mut data = IncompleteCachedBuildData::::new(); + data = data.insert(None, vec![vec![1]].into_iter().collect()); + assert_eq!(data.changed_keys.len(), 1); + + let mut cache = BuildCache::new(); + cache.perform(CacheAction::CacheBuildData(data.complete(1, 1))); + assert_eq!(cache.changed_keys.len(), 1); + assert_eq!( + cache.get(&1).unwrap().clone(), + vec![(None, vec![vec![1]].into_iter().collect())] + .into_iter() + .collect(), + ); + } + + #[test] + fn obsolete_entries_are_purged_when_new_ct_is_built() { + let mut cache = BuildCache::::new(); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![1]].into_iter().collect()) + .complete(1, 1), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![2]].into_iter().collect()) + .complete(2, 2), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![3]].into_iter().collect()) + .complete(3, 3), + )); + + assert_eq!(cache.changed_keys.len(), 3); + + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .set_digest_input_blocks(vec![1, 2, 3]) + .complete(4, 4), + )); + + assert_eq!(cache.changed_keys.len(), 1); + + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![8]].into_iter().collect()) + .complete(8, 8), + )); + cache.perform(CacheAction::CacheBuildData( + IncompleteCachedBuildData::new() + .insert(None, vec![vec![12]].into_iter().collect()) + .complete(12, 12), + )); + + assert_eq!(cache.changed_keys.len(), 3); + + cache.perform(CacheAction::Clear); + + assert_eq!(cache.changed_keys.len(), 0); + } } diff --git a/primitives/state-machine/src/changes_trie/build_iterator.rs b/primitives/state-machine/src/changes_trie/build_iterator.rs index bb93ce98a8..6662510d04 100644 --- a/primitives/state-machine/src/changes_trie/build_iterator.rs +++ b/primitives/state-machine/src/changes_trie/build_iterator.rs @@ -17,8 +17,8 @@ //! Structures and functions to return blocks whose changes are to be included //! in given block's changes trie. +use crate::changes_trie::{BlockNumber, ConfigurationRange}; use num_traits::Zero; -use crate::changes_trie::{ConfigurationRange, BlockNumber}; /// Returns iterator of OTHER blocks that are required for inclusion into /// changes trie of given block. Blocks are guaranteed to be returned in @@ -26,406 +26,473 @@ use crate::changes_trie::{ConfigurationRange, BlockNumber}; /// /// Skewed digest is built IF block >= config.end. pub fn digest_build_iterator<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - block: Number, + config: ConfigurationRange<'a, Number>, + block: Number, ) -> DigestBuildIterator { - // prepare digest build parameters - let (_, _, digest_step) = match config.config.digest_level_at_block(config.zero, block.clone()) { - Some((current_level, digest_interval, digest_step)) => - (current_level, digest_interval, digest_step), - None => return DigestBuildIterator::empty(), - }; - - DigestBuildIterator::new(block.clone(), config.end.unwrap_or(block), config.config.digest_interval, digest_step) + // prepare digest build parameters + let (_, _, digest_step) = match config + .config + .digest_level_at_block(config.zero, block.clone()) + { + Some((current_level, digest_interval, digest_step)) => { + (current_level, digest_interval, digest_step) + } + None => return DigestBuildIterator::empty(), + }; + + DigestBuildIterator::new( + block.clone(), + config.end.unwrap_or(block), + config.config.digest_interval, + digest_step, + ) } /// Changes trie build iterator that returns numbers of OTHER blocks that are /// required for inclusion into changes trie of given block. #[derive(Debug)] pub struct DigestBuildIterator { - /// Block we're building changes trie for. It could (logically) be a post-end block if we are creating - /// skewed digest. - block: Number, - /// Block that is a last block where current configuration is active. We have never yet created anything - /// after this block => digest that we're creating can't reference any blocks that are >= end. - end: Number, - /// Interval of L1 digest blocks. - digest_interval: u32, - /// Max step that could be used when digest is created. - max_step: u32, - - // Mutable data below: - - /// Step of current blocks range. - current_step: u32, - /// Reverse step of current blocks range. - current_step_reverse: u32, - /// Current blocks range. - current_range: Option>, - /// Last block that we have returned. - last_block: Option, + /// Block we're building changes trie for. It could (logically) be a post-end block if we are creating + /// skewed digest. + block: Number, + /// Block that is a last block where current configuration is active. We have never yet created anything + /// after this block => digest that we're creating can't reference any blocks that are >= end. + end: Number, + /// Interval of L1 digest blocks. + digest_interval: u32, + /// Max step that could be used when digest is created. + max_step: u32, + + // Mutable data below: + /// Step of current blocks range. + current_step: u32, + /// Reverse step of current blocks range. + current_step_reverse: u32, + /// Current blocks range. + current_range: Option>, + /// Last block that we have returned. + last_block: Option, } impl DigestBuildIterator { - /// Create new digest build iterator. - pub fn new(block: Number, end: Number, digest_interval: u32, max_step: u32) -> Self { - DigestBuildIterator { - block, - end, - digest_interval, - max_step, - current_step: max_step, - current_step_reverse: 0, - current_range: None, - last_block: None, - } - } - - /// Create empty digest build iterator. - pub fn empty() -> Self { - Self::new(Zero::zero(), Zero::zero(), 0, 0) - } + /// Create new digest build iterator. + pub fn new(block: Number, end: Number, digest_interval: u32, max_step: u32) -> Self { + DigestBuildIterator { + block, + end, + digest_interval, + max_step, + current_step: max_step, + current_step_reverse: 0, + current_range: None, + last_block: None, + } + } + + /// Create empty digest build iterator. + pub fn empty() -> Self { + Self::new(Zero::zero(), Zero::zero(), 0, 0) + } } impl Iterator for DigestBuildIterator { - type Item = Number; - - fn next(&mut self) -> Option { - // when we're building skewed digest, we might want to skip some blocks if - // they're not covered by current configuration - loop { - if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { - if next < self.end { - self.last_block = Some(next.clone()); - return Some(next); - } - } - - // we are safe to use non-checking mul/sub versions here because: - // DigestBuildIterator is created only by internal function that is checking - // that all multiplications/subtractions are safe within max_step limit - - let next_step_reverse = if self.current_step_reverse == 0 { - 1 - } else { - self.current_step_reverse * self.digest_interval - }; - if next_step_reverse > self.max_step { - return None; - } - - self.current_step_reverse = next_step_reverse; - self.current_range = Some(BlocksRange::new( - match self.last_block.clone() { - Some(last_block) => last_block + self.current_step.into(), - None => self.block.clone() - (self.current_step * self.digest_interval - self.current_step).into(), - }, - self.block.clone(), - self.current_step.into(), - )); - - self.current_step = self.current_step / self.digest_interval; - if self.current_step == 0 { - self.current_step = 1; - } - } - } + type Item = Number; + + fn next(&mut self) -> Option { + // when we're building skewed digest, we might want to skip some blocks if + // they're not covered by current configuration + loop { + if let Some(next) = self.current_range.as_mut().and_then(|iter| iter.next()) { + if next < self.end { + self.last_block = Some(next.clone()); + return Some(next); + } + } + + // we are safe to use non-checking mul/sub versions here because: + // DigestBuildIterator is created only by internal function that is checking + // that all multiplications/subtractions are safe within max_step limit + + let next_step_reverse = if self.current_step_reverse == 0 { + 1 + } else { + self.current_step_reverse * self.digest_interval + }; + if next_step_reverse > self.max_step { + return None; + } + + self.current_step_reverse = next_step_reverse; + self.current_range = Some(BlocksRange::new( + match self.last_block.clone() { + Some(last_block) => last_block + self.current_step.into(), + None => { + self.block.clone() + - (self.current_step * self.digest_interval - self.current_step).into() + } + }, + self.block.clone(), + self.current_step.into(), + )); + + self.current_step = self.current_step / self.digest_interval; + if self.current_step == 0 { + self.current_step = 1; + } + } + } } /// Blocks range iterator with builtin step_by support. #[derive(Debug)] struct BlocksRange { - current: Number, - end: Number, - step: Number, + current: Number, + end: Number, + step: Number, } impl BlocksRange { - pub fn new(begin: Number, end: Number, step: Number) -> Self { - BlocksRange { - current: begin, - end, - step, - } - } + pub fn new(begin: Number, end: Number, step: Number) -> Self { + BlocksRange { + current: begin, + end, + step, + } + } } impl Iterator for BlocksRange { - type Item = Number; + type Item = Number; - fn next(&mut self) -> Option { - if self.current >= self.end { - return None; - } + fn next(&mut self) -> Option { + if self.current >= self.end { + return None; + } - let current = Some(self.current.clone()); - self.current += self.step.clone(); - current - } + let current = Some(self.current.clone()); + self.current += self.step.clone(); + current + } } #[cfg(test)] mod tests { - use crate::changes_trie::Configuration; - use super::*; - - fn digest_build_iterator( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - end: Option, - ) -> DigestBuildIterator { - super::digest_build_iterator( - ConfigurationRange { - config: &Configuration { - digest_interval, - digest_levels, - }, - zero, - end, - }, - block, - ) - } - - fn digest_build_iterator_basic( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - ) -> (u64, u32, u32) { - let iter = digest_build_iterator(digest_interval, digest_levels, zero, block, None); - (iter.block, iter.digest_interval, iter.max_step) - } - - fn digest_build_iterator_blocks( - digest_interval: u32, - digest_levels: u32, - zero: u64, - block: u64, - end: Option, - ) -> Vec { - digest_build_iterator(digest_interval, digest_levels, zero, block, end).collect() - } - - #[test] - fn suggest_digest_inclusion_returns_empty_iterator() { - fn test_with_zero(zero: u64) { - let empty = (0, 0, 0); - assert_eq!(digest_build_iterator_basic(4, 16, zero, zero + 0), empty, "block is 0"); - assert_eq!(digest_build_iterator_basic(0, 16, zero, zero + 64), empty, "digest_interval is 0"); - assert_eq!(digest_build_iterator_basic(1, 16, zero, zero + 64), empty, "digest_interval is 1"); - assert_eq!(digest_build_iterator_basic(4, 0, zero, zero + 64), empty, "digest_levels is 0"); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 1), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 2), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 15), - empty, - "digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(4, 16, zero, zero + 17), - empty, - "digest is not required for this block", - ); - assert_eq!(digest_build_iterator_basic( - ::std::u32::MAX / 2 + 1, - 16, - zero, - ::std::u64::MAX, - ), empty, "digest_interval * 2 is greater than u64::MAX"); - } - - test_with_zero(0); - test_with_zero(1); - test_with_zero(2); - test_with_zero(4); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level1_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 1, zero, zero + 16), - (zero + 16, 16, 1), - "!(block % interval) && first digest level == block", - ); - assert_eq!( - digest_build_iterator_basic(16, 1, zero, zero + 256), - (zero + 256, 16, 1), - "!(block % interval^2), but there's only 1 digest level", - ); - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 32), - (zero + 32, 16, 1), - "second level digest is not required for this block", - ); - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 4080), - (zero + 4080, 16, 1), - "second && third level digest are not required for this block", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level2_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 256), - (zero + 256, 16, 16), - "second level digest", - ); - assert_eq!( - digest_build_iterator_basic(16, 2, zero, zero + 4096), - (zero + 4096, 16, 16), - "!(block % interval^3), but there's only 2 digest levels", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn suggest_digest_inclusion_returns_level3_iterator() { - fn test_with_zero(zero: u64) { - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 4096), - (zero + 4096, 16, 256), - "third level digest: beginning", - ); - assert_eq!( - digest_build_iterator_basic(16, 3, zero, zero + 8192), - (zero + 8192, 16, 256), - "third level digest: next", - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 16, None), - [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 1, zero, zero + 256, None), - [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 32, None), - [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] - .iter().map(|item| zero + item).collect::>()); - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), - [4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, 4078, 4079] - .iter().map(|item| zero + item).collect::>()); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_and_level2_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 256, None), - [ - // level2 points to previous 16-1 level1 digests: - 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, - // level2 is a level1 digest of 16-1 previous blocks: - 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, - ].iter().map(|item| zero + item).collect::>(), - ); - assert_eq!(digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), - [ - // level2 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - // level2 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - ].iter().map(|item| zero + item).collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), - [ - // level3 points to previous 16-1 level2 digests: - 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, 3840, - // level3 points to previous 16-1 level1 digests: - 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, 4064, 4080, - // level3 is a level1 digest of 16-1 previous blocks: - 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, 4094, 4095, - ].iter().map(|item| zero + item).collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_skewed_digest_blocks() { - fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), - [ - // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: - 256, 512, 768, 1024, 1280, - // level3 MUST point to previous 16-1 level1 digests, BUT there are only 3: - 1296, 1312, 1328, - // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 9: - 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, - ].iter().map(|item| zero + item).collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } - - #[test] - fn digest_iterator_returns_skewed_digest_blocks_skipping_level() { - fn test_with_zero(zero: u64) { - assert_eq!(digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), - [ - // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: - 256, 512, 768, 1024, 1280, - // level3 MUST point to previous 16-1 level1 digests, BUT there are NO ANY L1-digests: - // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 3: - 1281, 1282, 1283, - ].iter().map(|item| zero + item).collect::>(), - ); - } - - test_with_zero(0); - test_with_zero(16); - test_with_zero(17); - } + use super::*; + use crate::changes_trie::Configuration; + + fn digest_build_iterator( + digest_interval: u32, + digest_levels: u32, + zero: u64, + block: u64, + end: Option, + ) -> DigestBuildIterator { + super::digest_build_iterator( + ConfigurationRange { + config: &Configuration { + digest_interval, + digest_levels, + }, + zero, + end, + }, + block, + ) + } + + fn digest_build_iterator_basic( + digest_interval: u32, + digest_levels: u32, + zero: u64, + block: u64, + ) -> (u64, u32, u32) { + let iter = digest_build_iterator(digest_interval, digest_levels, zero, block, None); + (iter.block, iter.digest_interval, iter.max_step) + } + + fn digest_build_iterator_blocks( + digest_interval: u32, + digest_levels: u32, + zero: u64, + block: u64, + end: Option, + ) -> Vec { + digest_build_iterator(digest_interval, digest_levels, zero, block, end).collect() + } + + #[test] + fn suggest_digest_inclusion_returns_empty_iterator() { + fn test_with_zero(zero: u64) { + let empty = (0, 0, 0); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 0), + empty, + "block is 0" + ); + assert_eq!( + digest_build_iterator_basic(0, 16, zero, zero + 64), + empty, + "digest_interval is 0" + ); + assert_eq!( + digest_build_iterator_basic(1, 16, zero, zero + 64), + empty, + "digest_interval is 1" + ); + assert_eq!( + digest_build_iterator_basic(4, 0, zero, zero + 64), + empty, + "digest_levels is 0" + ); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 1), + empty, + "digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 2), + empty, + "digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 15), + empty, + "digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(4, 16, zero, zero + 17), + empty, + "digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(::std::u32::MAX / 2 + 1, 16, zero, ::std::u64::MAX,), + empty, + "digest_interval * 2 is greater than u64::MAX" + ); + } + + test_with_zero(0); + test_with_zero(1); + test_with_zero(2); + test_with_zero(4); + test_with_zero(17); + } + + #[test] + fn suggest_digest_inclusion_returns_level1_iterator() { + fn test_with_zero(zero: u64) { + assert_eq!( + digest_build_iterator_basic(16, 1, zero, zero + 16), + (zero + 16, 16, 1), + "!(block % interval) && first digest level == block", + ); + assert_eq!( + digest_build_iterator_basic(16, 1, zero, zero + 256), + (zero + 256, 16, 1), + "!(block % interval^2), but there's only 1 digest level", + ); + assert_eq!( + digest_build_iterator_basic(16, 2, zero, zero + 32), + (zero + 32, 16, 1), + "second level digest is not required for this block", + ); + assert_eq!( + digest_build_iterator_basic(16, 3, zero, zero + 4080), + (zero + 4080, 16, 1), + "second && third level digest are not required for this block", + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn suggest_digest_inclusion_returns_level2_iterator() { + fn test_with_zero(zero: u64) { + assert_eq!( + digest_build_iterator_basic(16, 2, zero, zero + 256), + (zero + 256, 16, 16), + "second level digest", + ); + assert_eq!( + digest_build_iterator_basic(16, 2, zero, zero + 4096), + (zero + 4096, 16, 16), + "!(block % interval^3), but there's only 2 digest levels", + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn suggest_digest_inclusion_returns_level3_iterator() { + fn test_with_zero(zero: u64) { + assert_eq!( + digest_build_iterator_basic(16, 3, zero, zero + 4096), + (zero + 4096, 16, 256), + "third level digest: beginning", + ); + assert_eq!( + digest_build_iterator_basic(16, 3, zero, zero + 8192), + (zero + 8192, 16, 256), + "third level digest: next", + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn digest_iterator_returns_level1_blocks() { + fn test_with_zero(zero: u64) { + assert_eq!( + digest_build_iterator_blocks(16, 1, zero, zero + 16, None), + [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15] + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 1, zero, zero + 256, None), + [241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255] + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 32, None), + [17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31] + .iter() + .map(|item| zero + item) + .collect::>() + ); + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4080, None), + [ + 4065, 4066, 4067, 4068, 4069, 4070, 4071, 4072, 4073, 4074, 4075, 4076, 4077, + 4078, 4079 + ] + .iter() + .map(|item| zero + item) + .collect::>() + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn digest_iterator_returns_level1_and_level2_blocks() { + fn test_with_zero(zero: u64) { + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 256, None), + [ + // level2 points to previous 16-1 level1 digests: + 16, 32, 48, 64, 80, 96, 112, 128, 144, 160, 176, 192, 208, 224, 240, + // level2 is a level1 digest of 16-1 previous blocks: + 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, + ] + .iter() + .map(|item| zero + item) + .collect::>(), + ); + assert_eq!( + digest_build_iterator_blocks(16, 2, zero, zero + 4096, None), + [ + // level2 points to previous 16-1 level1 digests: + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, + 4064, 4080, // level2 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, + 4094, 4095, + ] + .iter() + .map(|item| zero + item) + .collect::>(), + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn digest_iterator_returns_level1_and_level2_and_level3_blocks() { + fn test_with_zero(zero: u64) { + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, None), + [ + // level3 points to previous 16-1 level2 digests: + 256, 512, 768, 1024, 1280, 1536, 1792, 2048, 2304, 2560, 2816, 3072, 3328, 3584, + 3840, // level3 points to previous 16-1 level1 digests: + 3856, 3872, 3888, 3904, 3920, 3936, 3952, 3968, 3984, 4000, 4016, 4032, 4048, + 4064, 4080, // level3 is a level1 digest of 16-1 previous blocks: + 4081, 4082, 4083, 4084, 4085, 4086, 4087, 4088, 4089, 4090, 4091, 4092, 4093, + 4094, 4095, + ] + .iter() + .map(|item| zero + item) + .collect::>(), + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn digest_iterator_returns_skewed_digest_blocks() { + fn test_with_zero(zero: u64) { + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1338)), + [ + // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: + 256, 512, 768, 1024, 1280, + // level3 MUST point to previous 16-1 level1 digests, BUT there are only 3: + 1296, 1312, 1328, + // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 9: + 1329, 1330, 1331, 1332, 1333, 1334, 1335, 1336, 1337, + ] + .iter() + .map(|item| zero + item) + .collect::>(), + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } + + #[test] + fn digest_iterator_returns_skewed_digest_blocks_skipping_level() { + fn test_with_zero(zero: u64) { + assert_eq!( + digest_build_iterator_blocks(16, 3, zero, zero + 4096, Some(zero + 1284)), + [ + // level3 MUST point to previous 16-1 level2 digests, BUT there are only 5: + 256, 512, 768, 1024, 1280, + // level3 MUST point to previous 16-1 level1 digests, BUT there are NO ANY L1-digests: + // level3 MUST be a level1 digest of 16-1 previous blocks, BUT there are only 3: + 1281, 1282, 1283, + ] + .iter() + .map(|item| zero + item) + .collect::>(), + ); + } + + test_with_zero(0); + test_with_zero(16); + test_with_zero(17); + } } diff --git a/primitives/state-machine/src/changes_trie/changes_iterator.rs b/primitives/state-machine/src/changes_trie/changes_iterator.rs index 685786218c..b8c9fa332e 100644 --- a/primitives/state-machine/src/changes_trie/changes_iterator.rs +++ b/primitives/state-machine/src/changes_trie/changes_iterator.rs @@ -17,619 +17,851 @@ //! Functions + iterator that traverses changes tries and returns all //! (block, extrinsic) pairs where given key has been changed. -use std::cell::RefCell; -use std::collections::VecDeque; -use codec::{Decode, Encode, Codec}; -use hash_db::Hasher; -use num_traits::Zero; -use sp_trie::Recorder; -use crate::changes_trie::{AnchorBlockId, ConfigurationRange, RootsStorage, Storage, BlockNumber}; -use crate::changes_trie::input::{DigestIndex, ExtrinsicIndex, DigestIndexValue, ExtrinsicIndexValue}; -use crate::changes_trie::storage::{TrieBackendAdapter, InMemoryStorage}; use crate::changes_trie::input::ChildIndex; +use crate::changes_trie::input::{ + DigestIndex, DigestIndexValue, ExtrinsicIndex, ExtrinsicIndexValue, +}; +use crate::changes_trie::storage::{InMemoryStorage, TrieBackendAdapter}; use crate::changes_trie::surface_iterator::{surface_iterator, SurfaceIterator}; +use crate::changes_trie::{AnchorBlockId, BlockNumber, ConfigurationRange, RootsStorage, Storage}; use crate::proving_backend::ProvingBackendRecorder; -use crate::trie_backend_essence::{TrieBackendEssence}; +use crate::trie_backend_essence::TrieBackendEssence; +use codec::{Codec, Decode, Encode}; +use hash_db::Hasher; +use num_traits::Zero; +use sp_trie::Recorder; +use std::cell::RefCell; +use std::collections::VecDeque; /// Return changes of given key at given blocks range. /// `max` is the number of best known block. /// Changes are returned in descending order (i.e. last block comes first). pub fn key_changes<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - storage: &'a dyn Storage, - begin: Number, - end: &'a AnchorBlockId, - max: Number, - storage_key: Option<&'a [u8]>, - key: &'a [u8], + config: ConfigurationRange<'a, Number>, + storage: &'a dyn Storage, + begin: Number, + end: &'a AnchorBlockId, + max: Number, + storage_key: Option<&'a [u8]>, + key: &'a [u8], ) -> Result, String> { - // we can't query any roots before root - let max = ::std::cmp::min(max.clone(), end.number.clone()); - - Ok(DrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage: storage.as_roots_storage(), - storage, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - }) + // we can't query any roots before root + let max = ::std::cmp::min(max.clone(), end.number.clone()); + + Ok(DrilldownIterator { + essence: DrilldownIteratorEssence { + storage_key, + key, + roots_storage: storage.as_roots_storage(), + storage, + begin: begin.clone(), + end, + config: config.clone(), + surface: surface_iterator(config, max, begin, end.number.clone())?, + + extrinsics: Default::default(), + blocks: Default::default(), + + _hasher: ::std::marker::PhantomData::::default(), + }, + }) } - /// Returns proof of changes of given key at given blocks range. /// `max` is the number of best known block. pub fn key_changes_proof<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - storage: &dyn Storage, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&[u8]>, - key: &[u8], -) -> Result>, String> where H::Out: Codec { - // we can't query any roots before root - let max = ::std::cmp::min(max.clone(), end.number.clone()); - - let mut iter = ProvingDrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage: storage.as_roots_storage(), - storage, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - proof_recorder: Default::default(), - }; - - // iterate to collect proof - while let Some(item) = iter.next() { - item?; - } - - Ok(iter.extract_proof()) + config: ConfigurationRange<'a, Number>, + storage: &dyn Storage, + begin: Number, + end: &AnchorBlockId, + max: Number, + storage_key: Option<&[u8]>, + key: &[u8], +) -> Result>, String> +where + H::Out: Codec, +{ + // we can't query any roots before root + let max = ::std::cmp::min(max.clone(), end.number.clone()); + + let mut iter = ProvingDrilldownIterator { + essence: DrilldownIteratorEssence { + storage_key, + key, + roots_storage: storage.as_roots_storage(), + storage, + begin: begin.clone(), + end, + config: config.clone(), + surface: surface_iterator(config, max, begin, end.number.clone())?, + + extrinsics: Default::default(), + blocks: Default::default(), + + _hasher: ::std::marker::PhantomData::::default(), + }, + proof_recorder: Default::default(), + }; + + // iterate to collect proof + while let Some(item) = iter.next() { + item?; + } + + Ok(iter.extract_proof()) } /// Check key changes proof and return changes of the key at given blocks range. /// `max` is the number of best known block. /// Changes are returned in descending order (i.e. last block comes first). pub fn key_changes_proof_check<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - roots_storage: &dyn RootsStorage, - proof: Vec>, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&[u8]>, - key: &[u8] -) -> Result, String> where H::Out: Encode { - key_changes_proof_check_with_db( - config, - roots_storage, - &InMemoryStorage::with_proof(proof), - begin, - end, - max, - storage_key, - key, - ) + config: ConfigurationRange<'a, Number>, + roots_storage: &dyn RootsStorage, + proof: Vec>, + begin: Number, + end: &AnchorBlockId, + max: Number, + storage_key: Option<&[u8]>, + key: &[u8], +) -> Result, String> +where + H::Out: Encode, +{ + key_changes_proof_check_with_db( + config, + roots_storage, + &InMemoryStorage::with_proof(proof), + begin, + end, + max, + storage_key, + key, + ) } /// Similar to the `key_changes_proof_check` function, but works with prepared proof storage. pub fn key_changes_proof_check_with_db<'a, H: Hasher, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - roots_storage: &dyn RootsStorage, - proof_db: &InMemoryStorage, - begin: Number, - end: &AnchorBlockId, - max: Number, - storage_key: Option<&[u8]>, - key: &[u8] -) -> Result, String> where H::Out: Encode { - // we can't query any roots before root - let max = ::std::cmp::min(max.clone(), end.number.clone()); - - DrilldownIterator { - essence: DrilldownIteratorEssence { - storage_key, - key, - roots_storage, - storage: proof_db, - begin: begin.clone(), - end, - config: config.clone(), - surface: surface_iterator( - config, - max, - begin, - end.number.clone(), - )?, - - extrinsics: Default::default(), - blocks: Default::default(), - - _hasher: ::std::marker::PhantomData::::default(), - }, - }.collect() + config: ConfigurationRange<'a, Number>, + roots_storage: &dyn RootsStorage, + proof_db: &InMemoryStorage, + begin: Number, + end: &AnchorBlockId, + max: Number, + storage_key: Option<&[u8]>, + key: &[u8], +) -> Result, String> +where + H::Out: Encode, +{ + // we can't query any roots before root + let max = ::std::cmp::min(max.clone(), end.number.clone()); + + DrilldownIterator { + essence: DrilldownIteratorEssence { + storage_key, + key, + roots_storage, + storage: proof_db, + begin: begin.clone(), + end, + config: config.clone(), + surface: surface_iterator(config, max, begin, end.number.clone())?, + + extrinsics: Default::default(), + blocks: Default::default(), + + _hasher: ::std::marker::PhantomData::::default(), + }, + } + .collect() } /// Drilldown iterator - receives 'digest points' from surface iterator and explores /// every point until extrinsic is found. pub struct DrilldownIteratorEssence<'a, H, Number> - where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, +where + H: Hasher, + Number: BlockNumber, + H::Out: 'a, { - storage_key: Option<&'a [u8]>, - key: &'a [u8], - roots_storage: &'a dyn RootsStorage, - storage: &'a dyn Storage, - begin: Number, - end: &'a AnchorBlockId, - config: ConfigurationRange<'a, Number>, - surface: SurfaceIterator<'a, Number>, - - extrinsics: VecDeque<(Number, u32)>, - blocks: VecDeque<(Number, Option)>, - - _hasher: ::std::marker::PhantomData, + storage_key: Option<&'a [u8]>, + key: &'a [u8], + roots_storage: &'a dyn RootsStorage, + storage: &'a dyn Storage, + begin: Number, + end: &'a AnchorBlockId, + config: ConfigurationRange<'a, Number>, + surface: SurfaceIterator<'a, Number>, + + extrinsics: VecDeque<(Number, u32)>, + blocks: VecDeque<(Number, Option)>, + + _hasher: ::std::marker::PhantomData, } impl<'a, H, Number> DrilldownIteratorEssence<'a, H, Number> - where - H: Hasher, - Number: BlockNumber, - H::Out: 'a, +where + H: Hasher, + Number: BlockNumber, + H::Out: 'a, { - pub fn next(&mut self, trie_reader: F) -> Option> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, - { - match self.do_next(trie_reader) { - Ok(Some(res)) => Some(Ok(res)), - Ok(None) => None, - Err(err) => Some(Err(err)), - } - } - - fn do_next(&mut self, mut trie_reader: F) -> Result, String> - where - F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, - { - loop { - if let Some((block, extrinsic)) = self.extrinsics.pop_front() { - return Ok(Some((block, extrinsic))); - } - - if let Some((block, level)) = self.blocks.pop_front() { - // not having a changes trie root is an error because: - // we never query roots for future blocks - // AND trie roots for old blocks are known (both on full + light node) - let trie_root = self.roots_storage.root(&self.end, block.clone())? - .ok_or_else(|| format!("Changes trie root for block {} is not found", block.clone()))?; - let trie_root = if let Some(storage_key) = self.storage_key { - let child_key = ChildIndex { - block: block.clone(), - storage_key: storage_key.to_vec(), - }.encode(); - if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? - .and_then(|v| >::decode(&mut &v[..]).ok()) - .map(|v| { - let mut hash = H::Out::default(); - hash.as_mut().copy_from_slice(&v[..]); - hash - }) { - trie_root - } else { - continue; - } - } else { - trie_root - }; - - // only return extrinsics for blocks before self.max - // most of blocks will be filtered out before pushing to `self.blocks` - // here we just throwing away changes at digest blocks we're processing - debug_assert!(block >= self.begin, "We shall not touch digests earlier than a range' begin"); - if block <= self.end.number { - let extrinsics_key = ExtrinsicIndex { block: block.clone(), key: self.key.to_vec() }.encode(); - let extrinsics = trie_reader(self.storage, trie_root, &extrinsics_key); - if let Some(extrinsics) = extrinsics? { - if let Ok(extrinsics) = ExtrinsicIndexValue::decode(&mut &extrinsics[..]) { - self.extrinsics.extend(extrinsics.into_iter().rev().map(|e| (block.clone(), e))); - } - } - } - - let blocks_key = DigestIndex { block: block.clone(), key: self.key.to_vec() }.encode(); - let blocks = trie_reader(self.storage, trie_root, &blocks_key); - if let Some(blocks) = blocks? { - if let Ok(blocks) = >::decode(&mut &blocks[..]) { - // filter level0 blocks here because we tend to use digest blocks, - // AND digest block changes could also include changes for out-of-range blocks - let begin = self.begin.clone(); - let end = self.end.number.clone(); - let config = self.config.clone(); - self.blocks.extend(blocks.into_iter() - .rev() - .filter(|b| level.map(|level| level > 1).unwrap_or(true) || (*b >= begin && *b <= end)) - .map(|b| { - let prev_level = level - .map(|level| Some(level - 1)) - .unwrap_or_else(|| - Some(config.config.digest_level_at_block(config.zero.clone(), b.clone()) - .map(|(level, _, _)| level) - .unwrap_or_else(|| Zero::zero()))); - (b, prev_level) - }) - ); - } - } - - continue; - } - - match self.surface.next() { - Some(Ok(block)) => self.blocks.push_back(block), - Some(Err(err)) => return Err(err), - None => return Ok(None), - } - } - } + pub fn next(&mut self, trie_reader: F) -> Option> + where + F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, + { + match self.do_next(trie_reader) { + Ok(Some(res)) => Some(Ok(res)), + Ok(None) => None, + Err(err) => Some(Err(err)), + } + } + + fn do_next(&mut self, mut trie_reader: F) -> Result, String> + where + F: FnMut(&dyn Storage, H::Out, &[u8]) -> Result>, String>, + { + loop { + if let Some((block, extrinsic)) = self.extrinsics.pop_front() { + return Ok(Some((block, extrinsic))); + } + + if let Some((block, level)) = self.blocks.pop_front() { + // not having a changes trie root is an error because: + // we never query roots for future blocks + // AND trie roots for old blocks are known (both on full + light node) + let trie_root = self + .roots_storage + .root(&self.end, block.clone())? + .ok_or_else(|| { + format!("Changes trie root for block {} is not found", block.clone()) + })?; + let trie_root = if let Some(storage_key) = self.storage_key { + let child_key = ChildIndex { + block: block.clone(), + storage_key: storage_key.to_vec(), + } + .encode(); + if let Some(trie_root) = trie_reader(self.storage, trie_root, &child_key)? + .and_then(|v| >::decode(&mut &v[..]).ok()) + .map(|v| { + let mut hash = H::Out::default(); + hash.as_mut().copy_from_slice(&v[..]); + hash + }) + { + trie_root + } else { + continue; + } + } else { + trie_root + }; + + // only return extrinsics for blocks before self.max + // most of blocks will be filtered out before pushing to `self.blocks` + // here we just throwing away changes at digest blocks we're processing + debug_assert!( + block >= self.begin, + "We shall not touch digests earlier than a range' begin" + ); + if block <= self.end.number { + let extrinsics_key = ExtrinsicIndex { + block: block.clone(), + key: self.key.to_vec(), + } + .encode(); + let extrinsics = trie_reader(self.storage, trie_root, &extrinsics_key); + if let Some(extrinsics) = extrinsics? { + if let Ok(extrinsics) = ExtrinsicIndexValue::decode(&mut &extrinsics[..]) { + self.extrinsics + .extend(extrinsics.into_iter().rev().map(|e| (block.clone(), e))); + } + } + } + + let blocks_key = DigestIndex { + block: block.clone(), + key: self.key.to_vec(), + } + .encode(); + let blocks = trie_reader(self.storage, trie_root, &blocks_key); + if let Some(blocks) = blocks? { + if let Ok(blocks) = >::decode(&mut &blocks[..]) { + // filter level0 blocks here because we tend to use digest blocks, + // AND digest block changes could also include changes for out-of-range blocks + let begin = self.begin.clone(); + let end = self.end.number.clone(); + let config = self.config.clone(); + self.blocks.extend( + blocks + .into_iter() + .rev() + .filter(|b| { + level.map(|level| level > 1).unwrap_or(true) + || (*b >= begin && *b <= end) + }) + .map(|b| { + let prev_level = + level.map(|level| Some(level - 1)).unwrap_or_else(|| { + Some( + config + .config + .digest_level_at_block( + config.zero.clone(), + b.clone(), + ) + .map(|(level, _, _)| level) + .unwrap_or_else(|| Zero::zero()), + ) + }); + (b, prev_level) + }), + ); + } + } + + continue; + } + + match self.surface.next() { + Some(Ok(block)) => self.blocks.push_back(block), + Some(Err(err)) => return Err(err), + None => return Ok(None), + } + } + } } /// Exploring drilldown operator. pub struct DrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { - essence: DrilldownIteratorEssence<'a, H, Number>, + essence: DrilldownIteratorEssence<'a, H, Number>, } impl<'a, H: Hasher, Number: BlockNumber> Iterator for DrilldownIterator<'a, H, Number> - where H::Out: Encode +where + H::Out: Encode, { - type Item = Result<(Number, u32), String>; + type Item = Result<(Number, u32), String>; - fn next(&mut self) -> Option { - self.essence.next(|storage, root, key| - TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key)) - } + fn next(&mut self) -> Option { + self.essence.next(|storage, root, key| { + TrieBackendEssence::<_, H>::new(TrieBackendAdapter::new(storage), root).storage(key) + }) + } } /// Proving drilldown iterator. struct ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { - essence: DrilldownIteratorEssence<'a, H, Number>, - proof_recorder: RefCell>, + essence: DrilldownIteratorEssence<'a, H, Number>, + proof_recorder: RefCell>, } impl<'a, H, Number> ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a, { - /// Consume the iterator, extracting the gathered proof in lexicographical order - /// by value. - pub fn extract_proof(self) -> Vec> { - self.proof_recorder.into_inner().drain() - .into_iter() - .map(|n| n.data.to_vec()) - .collect() - } + /// Consume the iterator, extracting the gathered proof in lexicographical order + /// by value. + pub fn extract_proof(self) -> Vec> { + self.proof_recorder + .into_inner() + .drain() + .into_iter() + .map(|n| n.data.to_vec()) + .collect() + } } impl<'a, H, Number> Iterator for ProvingDrilldownIterator<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, - H::Out: 'a + Codec, +where + Number: BlockNumber, + H: Hasher, + H::Out: 'a + Codec, { - type Item = Result<(Number, u32), String>; - - fn next(&mut self) -> Option { - let proof_recorder = &mut *self.proof_recorder.try_borrow_mut() - .expect("only fails when already borrowed; storage() is non-reentrant; qed"); - self.essence.next(|storage, root, key| - ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), - proof_recorder, - }.storage(key)) - } + type Item = Result<(Number, u32), String>; + + fn next(&mut self) -> Option { + let proof_recorder = &mut *self + .proof_recorder + .try_borrow_mut() + .expect("only fails when already borrowed; storage() is non-reentrant; qed"); + self.essence.next(|storage, root, key| { + ProvingBackendRecorder::<_, H> { + backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), + proof_recorder, + } + .storage(key) + }) + } } #[cfg(test)] mod tests { - use std::iter::FromIterator; - use crate::changes_trie::Configuration; - use crate::changes_trie::input::InputPair; - use crate::changes_trie::storage::InMemoryStorage; - use sp_runtime::traits::BlakeTwo256; - use super::*; - - fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let backend = InMemoryStorage::with_inputs(vec![ - // digest: 1..4 => [(3, 0)] - (1, vec![ - ]), - (2, vec![ - ]), - (3, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 3, key: vec![42] }, vec![0]), - ]), - (4, vec![ - InputPair::DigestIndex(DigestIndex { block: 4, key: vec![42] }, vec![3]), - ]), - // digest: 5..8 => [(6, 3), (8, 1+2)] - (5, vec![]), - (6, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 6, key: vec![42] }, vec![3]), - ]), - (7, vec![]), - (8, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 8, key: vec![42] }, vec![1, 2]), - InputPair::DigestIndex(DigestIndex { block: 8, key: vec![42] }, vec![6]), - ]), - // digest: 9..12 => [] - (9, vec![]), - (10, vec![]), - (11, vec![]), - (12, vec![]), - // digest: 0..16 => [4, 8] - (13, vec![]), - (14, vec![]), - (15, vec![]), - (16, vec![ - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![4, 8]), - ]), - ], vec![(b"1".to_vec(), vec![ - (1, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 1, key: vec![42] }, vec![0]), - ]), - (2, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 2, key: vec![42] }, vec![3]), - ]), - (16, vec![ - InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 16, key: vec![42] }, vec![5]), - - InputPair::DigestIndex(DigestIndex { block: 16, key: vec![42] }, vec![2]), - ]), - ]), - ]); - - (config, backend) - } - - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } - } - - #[test] - fn drilldown_iterator_works() { - let (config, storage) = prepare_for_drilldown(); - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, - 16, - None, - &[42], - ).and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 2 }, - 4, - None, - &[42], - ).and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 3 }, - 4, - None, - &[42], - ).and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 7 }, - 7, - None, - &[42], - ).and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 7, - &AnchorBlockId { hash: Default::default(), number: 8 }, - 8, - None, - &[42], - ).and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); - - let drilldown_result = key_changes::( - configuration_range(&config, 0), - &storage, - 5, - &AnchorBlockId { hash: Default::default(), number: 7 }, - 8, - None, - &[42], - ).and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(6, 3)])); - } - - #[test] - fn drilldown_iterator_fails_when_storage_fails() { - let (config, storage) = prepare_for_drilldown(); - storage.clear_storage(); - - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 1000, - None, - &[42], - ).and_then(|i| i.collect::, _>>()).is_err()); - - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 1000, - Some(&b"1"[..]), - &[42], - ).and_then(|i| i.collect::, _>>()).is_err()); - } - - #[test] - fn drilldown_iterator_fails_when_range_is_invalid() { - let (config, storage) = prepare_for_drilldown(); - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 100 }, - 50, - None, - &[42], - ).is_err()); - assert!(key_changes::( - configuration_range(&config, 0), - &storage, - 20, - &AnchorBlockId { hash: Default::default(), number: 10 }, - 100, - None, - &[42], - ).is_err()); - } - - - #[test] - fn proving_drilldown_iterator_works() { - // happens on remote full node: - - // create drilldown iterator that records all trie nodes during drilldown - let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof = key_changes_proof::( - configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, None, &[42]).unwrap(); - - let (remote_config, remote_storage) = prepare_for_drilldown(); - let remote_proof_child = key_changes_proof::( - configuration_range(&remote_config, 0), &remote_storage, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]).unwrap(); - - // happens on local light node: - - // create drilldown iterator that works the same, but only depends on trie - let (local_config, local_storage) = prepare_for_drilldown(); - local_storage.clear_storage(); - let local_result = key_changes_proof_check::( - configuration_range(&local_config, 0), &local_storage, remote_proof, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, None, &[42]); - - let (local_config, local_storage) = prepare_for_drilldown(); - local_storage.clear_storage(); - let local_result_child = key_changes_proof_check::( - configuration_range(&local_config, 0), &local_storage, remote_proof_child, 1, - &AnchorBlockId { hash: Default::default(), number: 16 }, 16, Some(&b"1"[..]), &[42]); - - // check that drilldown result is the same as if it was happening at the full node - assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); - assert_eq!(local_result_child, Ok(vec![(16, 5), (2, 3)])); - } - - #[test] - fn drilldown_iterator_works_with_skewed_digest() { - let config = Configuration { digest_interval: 4, digest_levels: 3 }; - let mut config_range = configuration_range(&config, 0); - config_range.end = Some(91); - - // when 4^3 deactivates at block 91: - // last L3 digest has been created at block#64 - // skewed digest covers: - // L2 digests at blocks: 80 - // L1 digests at blocks: 84, 88 - // regular blocks: 89, 90, 91 - let mut input = (1u64..92u64).map(|b| (b, vec![])).collect::>(); - // changed at block#63 and covered by L3 digest at block#64 - input[63 - 1].1.push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 63, key: vec![42] }, vec![0])); - input[64 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 64, key: vec![42] }, vec![63])); - // changed at block#79 and covered by L2 digest at block#80 + skewed digest at block#91 - input[79 - 1].1.push(InputPair::ExtrinsicIndex(ExtrinsicIndex { block: 79, key: vec![42] }, vec![1])); - input[80 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 80, key: vec![42] }, vec![79])); - input[91 - 1].1.push(InputPair::DigestIndex(DigestIndex { block: 91, key: vec![42] }, vec![80])); - let storage = InMemoryStorage::with_inputs(input, vec![]); - - let drilldown_result = key_changes::( - config_range, - &storage, - 1, - &AnchorBlockId { hash: Default::default(), number: 91 }, - 100_000u64, - None, - &[42], - ).and_then(Result::from_iter); - assert_eq!(drilldown_result, Ok(vec![(79, 1), (63, 0)])); - } + use super::*; + use crate::changes_trie::input::InputPair; + use crate::changes_trie::storage::InMemoryStorage; + use crate::changes_trie::Configuration; + use sp_runtime::traits::BlakeTwo256; + use std::iter::FromIterator; + + fn prepare_for_drilldown() -> (Configuration, InMemoryStorage) { + let config = Configuration { + digest_interval: 4, + digest_levels: 2, + }; + let backend = InMemoryStorage::with_inputs( + vec![ + // digest: 1..4 => [(3, 0)] + (1, vec![]), + (2, vec![]), + ( + 3, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 3, + key: vec![42], + }, + vec![0], + )], + ), + ( + 4, + vec![InputPair::DigestIndex( + DigestIndex { + block: 4, + key: vec![42], + }, + vec![3], + )], + ), + // digest: 5..8 => [(6, 3), (8, 1+2)] + (5, vec![]), + ( + 6, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 6, + key: vec![42], + }, + vec![3], + )], + ), + (7, vec![]), + ( + 8, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 8, + key: vec![42], + }, + vec![1, 2], + ), + InputPair::DigestIndex( + DigestIndex { + block: 8, + key: vec![42], + }, + vec![6], + ), + ], + ), + // digest: 9..12 => [] + (9, vec![]), + (10, vec![]), + (11, vec![]), + (12, vec![]), + // digest: 0..16 => [4, 8] + (13, vec![]), + (14, vec![]), + (15, vec![]), + ( + 16, + vec![InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![42], + }, + vec![4, 8], + )], + ), + ], + vec![( + b"1".to_vec(), + vec![ + ( + 1, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 1, + key: vec![42], + }, + vec![0], + )], + ), + ( + 2, + vec![InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 2, + key: vec![42], + }, + vec![3], + )], + ), + ( + 16, + vec![ + InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 16, + key: vec![42], + }, + vec![5], + ), + InputPair::DigestIndex( + DigestIndex { + block: 16, + key: vec![42], + }, + vec![2], + ), + ], + ), + ], + )], + ); + + (config, backend) + } + + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { + config, + zero, + end: None, + } + } + + #[test] + fn drilldown_iterator_works() { + let (config, storage) = prepare_for_drilldown(); + let drilldown_result = key_changes::( + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 16, + }, + 16, + None, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); + + let drilldown_result = key_changes::( + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 2, + }, + 4, + None, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![])); + + let drilldown_result = key_changes::( + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 3, + }, + 4, + None, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(3, 0)])); + + let drilldown_result = key_changes::( + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 7, + }, + 7, + None, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(6, 3), (3, 0)])); + + let drilldown_result = key_changes::( + configuration_range(&config, 0), + &storage, + 7, + &AnchorBlockId { + hash: Default::default(), + number: 8, + }, + 8, + None, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(8, 2), (8, 1)])); + + let drilldown_result = key_changes::( + configuration_range(&config, 0), + &storage, + 5, + &AnchorBlockId { + hash: Default::default(), + number: 7, + }, + 8, + None, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(6, 3)])); + } + + #[test] + fn drilldown_iterator_fails_when_storage_fails() { + let (config, storage) = prepare_for_drilldown(); + storage.clear_storage(); + + assert!(key_changes::( + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 100 + }, + 1000, + None, + &[42], + ) + .and_then(|i| i.collect::, _>>()) + .is_err()); + + assert!(key_changes::( + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 100 + }, + 1000, + Some(&b"1"[..]), + &[42], + ) + .and_then(|i| i.collect::, _>>()) + .is_err()); + } + + #[test] + fn drilldown_iterator_fails_when_range_is_invalid() { + let (config, storage) = prepare_for_drilldown(); + assert!(key_changes::( + configuration_range(&config, 0), + &storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 100 + }, + 50, + None, + &[42], + ) + .is_err()); + assert!(key_changes::( + configuration_range(&config, 0), + &storage, + 20, + &AnchorBlockId { + hash: Default::default(), + number: 10 + }, + 100, + None, + &[42], + ) + .is_err()); + } + + #[test] + fn proving_drilldown_iterator_works() { + // happens on remote full node: + + // create drilldown iterator that records all trie nodes during drilldown + let (remote_config, remote_storage) = prepare_for_drilldown(); + let remote_proof = key_changes_proof::( + configuration_range(&remote_config, 0), + &remote_storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 16, + }, + 16, + None, + &[42], + ) + .unwrap(); + + let (remote_config, remote_storage) = prepare_for_drilldown(); + let remote_proof_child = key_changes_proof::( + configuration_range(&remote_config, 0), + &remote_storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 16, + }, + 16, + Some(&b"1"[..]), + &[42], + ) + .unwrap(); + + // happens on local light node: + + // create drilldown iterator that works the same, but only depends on trie + let (local_config, local_storage) = prepare_for_drilldown(); + local_storage.clear_storage(); + let local_result = key_changes_proof_check::( + configuration_range(&local_config, 0), + &local_storage, + remote_proof, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 16, + }, + 16, + None, + &[42], + ); + + let (local_config, local_storage) = prepare_for_drilldown(); + local_storage.clear_storage(); + let local_result_child = key_changes_proof_check::( + configuration_range(&local_config, 0), + &local_storage, + remote_proof_child, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 16, + }, + 16, + Some(&b"1"[..]), + &[42], + ); + + // check that drilldown result is the same as if it was happening at the full node + assert_eq!(local_result, Ok(vec![(8, 2), (8, 1), (6, 3), (3, 0)])); + assert_eq!(local_result_child, Ok(vec![(16, 5), (2, 3)])); + } + + #[test] + fn drilldown_iterator_works_with_skewed_digest() { + let config = Configuration { + digest_interval: 4, + digest_levels: 3, + }; + let mut config_range = configuration_range(&config, 0); + config_range.end = Some(91); + + // when 4^3 deactivates at block 91: + // last L3 digest has been created at block#64 + // skewed digest covers: + // L2 digests at blocks: 80 + // L1 digests at blocks: 84, 88 + // regular blocks: 89, 90, 91 + let mut input = (1u64..92u64).map(|b| (b, vec![])).collect::>(); + // changed at block#63 and covered by L3 digest at block#64 + input[63 - 1].1.push(InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 63, + key: vec![42], + }, + vec![0], + )); + input[64 - 1].1.push(InputPair::DigestIndex( + DigestIndex { + block: 64, + key: vec![42], + }, + vec![63], + )); + // changed at block#79 and covered by L2 digest at block#80 + skewed digest at block#91 + input[79 - 1].1.push(InputPair::ExtrinsicIndex( + ExtrinsicIndex { + block: 79, + key: vec![42], + }, + vec![1], + )); + input[80 - 1].1.push(InputPair::DigestIndex( + DigestIndex { + block: 80, + key: vec![42], + }, + vec![79], + )); + input[91 - 1].1.push(InputPair::DigestIndex( + DigestIndex { + block: 91, + key: vec![42], + }, + vec![80], + )); + let storage = InMemoryStorage::with_inputs(input, vec![]); + + let drilldown_result = key_changes::( + config_range, + &storage, + 1, + &AnchorBlockId { + hash: Default::default(), + number: 91, + }, + 100_000u64, + None, + &[42], + ) + .and_then(Result::from_iter); + assert_eq!(drilldown_result, Ok(vec![(79, 1), (63, 0)])); + } } diff --git a/primitives/state-machine/src/changes_trie/input.rs b/primitives/state-machine/src/changes_trie/input.rs index 4a1420f848..0a3f74fb38 100644 --- a/primitives/state-machine/src/changes_trie/input.rs +++ b/primitives/state-machine/src/changes_trie/input.rs @@ -16,19 +16,16 @@ //! Different types of changes trie input pairs. -use codec::{Decode, Encode, Input, Output, Error}; -use crate::{ - StorageKey, StorageValue, - changes_trie::BlockNumber -}; +use crate::{changes_trie::BlockNumber, StorageKey, StorageValue}; +use codec::{Decode, Encode, Error, Input, Output}; /// Key of { changed key => set of extrinsic indices } mapping. #[derive(Clone, Debug, PartialEq, Eq)] pub struct ExtrinsicIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub key: StorageKey, + /// Block at which this key has been inserted in the trie. + pub block: Number, + /// Storage key this node is responsible for. + pub key: StorageKey, } /// Value of { changed key => set of extrinsic indices } mapping. @@ -37,19 +34,19 @@ pub type ExtrinsicIndexValue = Vec; /// Key of { changed key => block/digest block numbers } mapping. #[derive(Clone, Debug, PartialEq, Eq)] pub struct DigestIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub key: StorageKey, + /// Block at which this key has been inserted in the trie. + pub block: Number, + /// Storage key this node is responsible for. + pub key: StorageKey, } /// Key of { childtrie key => Childchange trie } mapping. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] pub struct ChildIndex { - /// Block at which this key has been inserted in the trie. - pub block: Number, - /// Storage key this node is responsible for. - pub storage_key: StorageKey, + /// Block at which this key has been inserted in the trie. + pub block: Number, + /// Storage key this node is responsible for. + pub storage_key: StorageKey, } /// Value of { changed key => block/digest block numbers } mapping. @@ -62,146 +59,151 @@ pub type ChildIndexValue = Vec; /// Single input pair of changes trie. #[derive(Clone, Debug, PartialEq, Eq)] pub enum InputPair { - /// Element of { key => set of extrinsics where key has been changed } element mapping. - ExtrinsicIndex(ExtrinsicIndex, ExtrinsicIndexValue), - /// Element of { key => set of blocks/digest blocks where key has been changed } element mapping. - DigestIndex(DigestIndex, DigestIndexValue), - /// Element of { childtrie key => Childchange trie } where key has been changed } element mapping. - ChildIndex(ChildIndex, ChildIndexValue), + /// Element of { key => set of extrinsics where key has been changed } element mapping. + ExtrinsicIndex(ExtrinsicIndex, ExtrinsicIndexValue), + /// Element of { key => set of blocks/digest blocks where key has been changed } element mapping. + DigestIndex(DigestIndex, DigestIndexValue), + /// Element of { childtrie key => Childchange trie } where key has been changed } element mapping. + ChildIndex(ChildIndex, ChildIndexValue), } /// Single input key of changes trie. #[derive(Clone, Debug, PartialEq, Eq)] pub enum InputKey { - /// Key of { key => set of extrinsics where key has been changed } element mapping. - ExtrinsicIndex(ExtrinsicIndex), - /// Key of { key => set of blocks/digest blocks where key has been changed } element mapping. - DigestIndex(DigestIndex), - /// Key of { childtrie key => Childchange trie } where key has been changed } element mapping. - ChildIndex(ChildIndex), + /// Key of { key => set of extrinsics where key has been changed } element mapping. + ExtrinsicIndex(ExtrinsicIndex), + /// Key of { key => set of blocks/digest blocks where key has been changed } element mapping. + DigestIndex(DigestIndex), + /// Key of { childtrie key => Childchange trie } where key has been changed } element mapping. + ChildIndex(ChildIndex), } impl InputPair { - /// Extract storage key that this pair corresponds to. - pub fn key(&self) -> Option<&[u8]> { - match *self { - InputPair::ExtrinsicIndex(ref key, _) => Some(&key.key), - InputPair::DigestIndex(ref key, _) => Some(&key.key), - InputPair::ChildIndex(_, _) => None, - } - } + /// Extract storage key that this pair corresponds to. + pub fn key(&self) -> Option<&[u8]> { + match *self { + InputPair::ExtrinsicIndex(ref key, _) => Some(&key.key), + InputPair::DigestIndex(ref key, _) => Some(&key.key), + InputPair::ChildIndex(_, _) => None, + } + } } impl Into<(StorageKey, StorageValue)> for InputPair { - fn into(self) -> (StorageKey, StorageValue) { - match self { - InputPair::ExtrinsicIndex(key, value) => (key.encode(), value.encode()), - InputPair::DigestIndex(key, value) => (key.encode(), value.encode()), - InputPair::ChildIndex(key, value) => (key.encode(), value.encode()), - } - } + fn into(self) -> (StorageKey, StorageValue) { + match self { + InputPair::ExtrinsicIndex(key, value) => (key.encode(), value.encode()), + InputPair::DigestIndex(key, value) => (key.encode(), value.encode()), + InputPair::ChildIndex(key, value) => (key.encode(), value.encode()), + } + } } impl Into> for InputPair { - fn into(self) -> InputKey { - match self { - InputPair::ExtrinsicIndex(key, _) => InputKey::ExtrinsicIndex(key), - InputPair::DigestIndex(key, _) => InputKey::DigestIndex(key), - InputPair::ChildIndex(key, _) => InputKey::ChildIndex(key), - } - } + fn into(self) -> InputKey { + match self { + InputPair::ExtrinsicIndex(key, _) => InputKey::ExtrinsicIndex(key), + InputPair::DigestIndex(key, _) => InputKey::DigestIndex(key), + InputPair::ChildIndex(key, _) => InputKey::ChildIndex(key), + } + } } impl ExtrinsicIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![1]; - prefix.extend(block.encode()); - prefix - } + pub fn key_neutral_prefix(block: Number) -> Vec { + let mut prefix = vec![1]; + prefix.extend(block.encode()); + prefix + } } impl Encode for ExtrinsicIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(1); - self.block.encode_to(dest); - self.key.encode_to(dest); - } + fn encode_to(&self, dest: &mut W) { + dest.push_byte(1); + self.block.encode_to(dest); + self.key.encode_to(dest); + } } impl codec::EncodeLike for ExtrinsicIndex {} impl DigestIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![2]; - prefix.extend(block.encode()); - prefix - } + pub fn key_neutral_prefix(block: Number) -> Vec { + let mut prefix = vec![2]; + prefix.extend(block.encode()); + prefix + } } - impl Encode for DigestIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(2); - self.block.encode_to(dest); - self.key.encode_to(dest); - } + fn encode_to(&self, dest: &mut W) { + dest.push_byte(2); + self.block.encode_to(dest); + self.key.encode_to(dest); + } } impl ChildIndex { - pub fn key_neutral_prefix(block: Number) -> Vec { - let mut prefix = vec![3]; - prefix.extend(block.encode()); - prefix - } + pub fn key_neutral_prefix(block: Number) -> Vec { + let mut prefix = vec![3]; + prefix.extend(block.encode()); + prefix + } } impl Encode for ChildIndex { - fn encode_to(&self, dest: &mut W) { - dest.push_byte(3); - self.block.encode_to(dest); - self.storage_key.encode_to(dest); - } + fn encode_to(&self, dest: &mut W) { + dest.push_byte(3); + self.block.encode_to(dest); + self.storage_key.encode_to(dest); + } } impl codec::EncodeLike for DigestIndex {} impl Decode for InputKey { - fn decode(input: &mut I) -> Result { - match input.read_byte()? { - 1 => Ok(InputKey::ExtrinsicIndex(ExtrinsicIndex { - block: Decode::decode(input)?, - key: Decode::decode(input)?, - })), - 2 => Ok(InputKey::DigestIndex(DigestIndex { - block: Decode::decode(input)?, - key: Decode::decode(input)?, - })), - 3 => Ok(InputKey::ChildIndex(ChildIndex { - block: Decode::decode(input)?, - storage_key: Decode::decode(input)?, - })), - _ => Err("Invalid input key variant".into()), - } - } + fn decode(input: &mut I) -> Result { + match input.read_byte()? { + 1 => Ok(InputKey::ExtrinsicIndex(ExtrinsicIndex { + block: Decode::decode(input)?, + key: Decode::decode(input)?, + })), + 2 => Ok(InputKey::DigestIndex(DigestIndex { + block: Decode::decode(input)?, + key: Decode::decode(input)?, + })), + 3 => Ok(InputKey::ChildIndex(ChildIndex { + block: Decode::decode(input)?, + storage_key: Decode::decode(input)?, + })), + _ => Err("Invalid input key variant".into()), + } + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn extrinsic_index_serialized_and_deserialized() { - let original = ExtrinsicIndex { block: 777u64, key: vec![42] }; - let serialized = original.encode(); - let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); - assert_eq!(InputKey::ExtrinsicIndex(original), deserialized); - } - - #[test] - fn digest_index_serialized_and_deserialized() { - let original = DigestIndex { block: 777u64, key: vec![42] }; - let serialized = original.encode(); - let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); - assert_eq!(InputKey::DigestIndex(original), deserialized); - } + use super::*; + + #[test] + fn extrinsic_index_serialized_and_deserialized() { + let original = ExtrinsicIndex { + block: 777u64, + key: vec![42], + }; + let serialized = original.encode(); + let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); + assert_eq!(InputKey::ExtrinsicIndex(original), deserialized); + } + + #[test] + fn digest_index_serialized_and_deserialized() { + let original = DigestIndex { + block: 777u64, + key: vec![42], + }; + let serialized = original.encode(); + let deserialized: InputKey = Decode::decode(&mut &serialized[..]).unwrap(); + assert_eq!(InputKey::DigestIndex(original), deserialized); + } } diff --git a/primitives/state-machine/src/changes_trie/mod.rs b/primitives/state-machine/src/changes_trie/mod.rs index d614992df3..79aa093b20 100644 --- a/primitives/state-machine/src/changes_trie/mod.rs +++ b/primitives/state-machine/src/changes_trie/mod.rs @@ -57,120 +57,149 @@ mod prune; mod storage; mod surface_iterator; -pub use self::build_cache::{BuildCache, CachedBuildData, CacheAction}; -pub use self::storage::InMemoryStorage; +pub use self::build_cache::{BuildCache, CacheAction, CachedBuildData}; pub use self::changes_iterator::{ - key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, + key_changes, key_changes_proof, key_changes_proof_check, key_changes_proof_check_with_db, }; pub use self::prune::prune; +pub use self::storage::InMemoryStorage; -use std::collections::{HashMap, HashSet}; -use std::convert::TryInto; +use crate::{ + backend::Backend, + changes_trie::{ + build::prepare_input, + build_cache::{IncompleteCacheAction, IncompleteCachedBuildData}, + }, + overlayed_changes::OverlayedChanges, + StorageKey, +}; +use codec::{Decode, Encode}; use hash_db::{Hasher, Prefix}; use num_traits::{One, Zero}; -use codec::{Decode, Encode}; use sp_core; -use sp_trie::{MemoryDB, DBValue, TrieMut}; use sp_trie::trie_types::TrieDBMut; -use crate::{ - StorageKey, - backend::Backend, - overlayed_changes::OverlayedChanges, - changes_trie::{ - build::prepare_input, - build_cache::{IncompleteCachedBuildData, IncompleteCacheAction}, - }, -}; +use sp_trie::{DBValue, MemoryDB, TrieMut}; +use std::collections::{HashMap, HashSet}; +use std::convert::TryInto; /// Changes that are made outside of extrinsics are marked with this index; pub const NO_EXTRINSIC_INDEX: u32 = 0xffffffff; /// Requirements for block number that can be used with changes tries. pub trait BlockNumber: - Send + Sync + 'static + - std::fmt::Display + - Clone + - From + TryInto + One + Zero + - PartialEq + Ord + - std::hash::Hash + - std::ops::Add + ::std::ops::Sub + - std::ops::Mul + ::std::ops::Div + - std::ops::Rem + - std::ops::AddAssign + - num_traits::CheckedMul + num_traits::CheckedSub + - Decode + Encode -{} - -impl BlockNumber for T where T: - Send + Sync + 'static + - std::fmt::Display + - Clone + - From + TryInto + One + Zero + - PartialEq + Ord + - std::hash::Hash + - std::ops::Add + ::std::ops::Sub + - std::ops::Mul + ::std::ops::Div + - std::ops::Rem + - std::ops::AddAssign + - num_traits::CheckedMul + num_traits::CheckedSub + - Decode + Encode, -{} + Send + + Sync + + 'static + + std::fmt::Display + + Clone + + From + + TryInto + + One + + Zero + + PartialEq + + Ord + + std::hash::Hash + + std::ops::Add + + ::std::ops::Sub + + std::ops::Mul + + ::std::ops::Div + + std::ops::Rem + + std::ops::AddAssign + + num_traits::CheckedMul + + num_traits::CheckedSub + + Decode + + Encode +{ +} + +impl BlockNumber for T where + T: Send + + Sync + + 'static + + std::fmt::Display + + Clone + + From + + TryInto + + One + + Zero + + PartialEq + + Ord + + std::hash::Hash + + std::ops::Add + + ::std::ops::Sub + + std::ops::Mul + + ::std::ops::Div + + std::ops::Rem + + std::ops::AddAssign + + num_traits::CheckedMul + + num_traits::CheckedSub + + Decode + + Encode +{ +} /// Block identifier that could be used to determine fork of this block. #[derive(Debug)] pub struct AnchorBlockId { - /// Hash of this block. - pub hash: Hash, - /// Number of this block. - pub number: Number, + /// Hash of this block. + pub hash: Hash, + /// Number of this block. + pub number: Number, } /// Changes tries state at some block. pub struct State<'a, H, Number> { - /// Configuration that is active at given block. - pub config: Configuration, - /// Configuration activation block number. Zero if it is the first configuration on the chain, - /// or number of the block that have emit NewConfiguration signal (thus activating configuration - /// starting from the **next** block). - pub zero: Number, - /// Underlying changes tries storage reference. - pub storage: &'a dyn Storage, + /// Configuration that is active at given block. + pub config: Configuration, + /// Configuration activation block number. Zero if it is the first configuration on the chain, + /// or number of the block that have emit NewConfiguration signal (thus activating configuration + /// starting from the **next** block). + pub zero: Number, + /// Underlying changes tries storage reference. + pub storage: &'a dyn Storage, } /// Changes trie storage. Provides access to trie roots and trie nodes. pub trait RootsStorage: Send + Sync { - /// Resolve hash of the block into anchor. - fn build_anchor(&self, hash: H::Out) -> Result, String>; - /// Get changes trie root for the block with given number which is an ancestor (or the block - /// itself) of the anchor_block (i.e. anchor_block.number >= block). - fn root(&self, anchor: &AnchorBlockId, block: Number) -> Result, String>; + /// Resolve hash of the block into anchor. + fn build_anchor(&self, hash: H::Out) -> Result, String>; + /// Get changes trie root for the block with given number which is an ancestor (or the block + /// itself) of the anchor_block (i.e. anchor_block.number >= block). + fn root( + &self, + anchor: &AnchorBlockId, + block: Number, + ) -> Result, String>; } /// Changes trie storage. Provides access to trie roots and trie nodes. pub trait Storage: RootsStorage { - /// Casts from self reference to RootsStorage reference. - fn as_roots_storage(&self) -> &dyn RootsStorage; - /// Execute given functor with cached entry for given trie root. - /// Returns true if the functor has been called (cache entry exists) and false otherwise. - fn with_cached_changed_keys( - &self, - root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool; - /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + /// Casts from self reference to RootsStorage reference. + fn as_roots_storage(&self) -> &dyn RootsStorage; + /// Execute given functor with cached entry for given trie root. + /// Returns true if the functor has been called (cache entry exists) and false otherwise. + fn with_cached_changed_keys( + &self, + root: &H::Out, + functor: &mut dyn FnMut(&HashMap, HashSet>), + ) -> bool; + /// Get a trie node. + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } /// Changes trie storage -> trie backend essence adapter. -pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>(pub &'a dyn Storage); +pub struct TrieBackendStorageAdapter<'a, H: Hasher, Number: BlockNumber>( + pub &'a dyn Storage, +); -impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage for TrieBackendStorageAdapter<'a, H, N> { - type Overlay = sp_trie::MemoryDB; +impl<'a, H: Hasher, N: BlockNumber> crate::TrieBackendStorage + for TrieBackendStorageAdapter<'a, H, N> +{ + type Overlay = sp_trie::MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.0.get(key, prefix) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + self.0.get(key, prefix) + } } /// Changes trie configuration. @@ -179,42 +208,38 @@ pub type Configuration = sp_core::ChangesTrieConfiguration; /// Blocks range where configuration has been constant. #[derive(Clone)] pub struct ConfigurationRange<'a, N> { - /// Active configuration. - pub config: &'a Configuration, - /// Zero block of this configuration. The configuration is active starting from the next block. - pub zero: N, - /// End block of this configuration. It is the last block where configuration has been active. - pub end: Option, + /// Active configuration. + pub config: &'a Configuration, + /// Zero block of this configuration. The configuration is active starting from the next block. + pub zero: N, + /// End block of this configuration. It is the last block where configuration has been active. + pub end: Option, } impl<'a, H, Number> State<'a, H, Number> { - /// Create state with given config and storage. - pub fn new( - config: Configuration, - zero: Number, - storage: &'a dyn Storage, - ) -> Self { - Self { - config, - zero, - storage, - } - } + /// Create state with given config and storage. + pub fn new(config: Configuration, zero: Number, storage: &'a dyn Storage) -> Self { + Self { + config, + zero, + storage, + } + } } impl<'a, H, Number: Clone> Clone for State<'a, H, Number> { - fn clone(&self) -> Self { - State { - config: self.config.clone(), - zero: self.zero.clone(), - storage: self.storage, - } - } + fn clone(&self) -> Self { + State { + config: self.config.clone(), + zero: self.zero.clone(), + storage: self.storage, + } + } } /// Create state where changes tries are disabled. pub fn disabled_state<'a, H, Number>() -> Option> { - None + None } /// Compute the changes trie root and transaction for given block. @@ -222,184 +247,233 @@ pub fn disabled_state<'a, H, Number>() -> Option> { /// Returns Ok(None) if there's no data to perform computation. /// Panics if background storage returns an error OR if insert to MemoryDB fails. pub fn build_changes_trie<'a, B: Backend, H: Hasher, Number: BlockNumber>( - backend: &B, - state: Option<&'a State<'a, H, Number>>, - changes: &OverlayedChanges, - parent_hash: H::Out, - panic_on_storage_error: bool, + backend: &B, + state: Option<&'a State<'a, H, Number>>, + changes: &OverlayedChanges, + parent_hash: H::Out, + panic_on_storage_error: bool, ) -> Result, H::Out, CacheAction)>, ()> - where - H::Out: Ord + 'static + Encode, +where + H::Out: Ord + 'static + Encode, { - /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. - fn maybe_panic( - res: std::result::Result, - panic: bool, - ) -> std::result::Result { - res.map(Ok) - .unwrap_or_else(|e| if panic { - panic!("changes trie: storage access is not allowed to fail within runtime: {:?}", e) - } else { - Err(()) - }) - } - - // when storage isn't provided, changes tries aren't created - let state = match state { - Some(state) => state, - None => return Ok(None), - }; - - // build_anchor error should not be considered fatal - let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; - let block = parent.number.clone() + One::one(); - - // prepare configuration range - we already know zero block. Current block may be the end block if configuration - // has been changed in this block - let is_config_changed = match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { - Some(Some(new_config)) => new_config != &state.config.encode()[..], - Some(None) => true, - None => false, - }; - let config_range = ConfigurationRange { - config: &state.config, - zero: state.zero.clone(), - end: if is_config_changed { Some(block.clone()) } else { None }, - }; - - // storage errors are considered fatal (similar to situations when runtime fetches values from storage) - let (input_pairs, child_input_pairs, digest_input_blocks) = maybe_panic( - prepare_input::( - backend, - state.storage, - config_range.clone(), - changes, - &parent, - ), - panic_on_storage_error, - )?; - - // prepare cached data - let mut cache_action = prepare_cached_build_data(config_range, block.clone()); - let needs_changed_keys = cache_action.collects_changed_keys(); - cache_action = cache_action.set_digest_input_blocks(digest_input_blocks); - - let mut mdb = MemoryDB::default(); - let mut child_roots = Vec::with_capacity(child_input_pairs.len()); - for (child_index, input_pairs) in child_input_pairs { - let mut not_empty = false; - let mut root = Default::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - let mut storage_changed_keys = HashSet::new(); - for input_pair in input_pairs { - if needs_changed_keys { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); - } - } - - let (key, value) = input_pair.into(); - not_empty = true; - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - cache_action = cache_action.insert( - Some(child_index.storage_key.clone()), - storage_changed_keys, - ); - } - if not_empty { - child_roots.push(input::InputPair::ChildIndex(child_index, root.as_ref().to_vec())); - } - } - let mut root = Default::default(); - { - let mut trie = TrieDBMut::::new(&mut mdb, &mut root); - for (key, value) in child_roots.into_iter().map(Into::into) { - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - let mut storage_changed_keys = HashSet::new(); - for input_pair in input_pairs { - if needs_changed_keys { - if let Some(key) = input_pair.key() { - storage_changed_keys.insert(key.to_vec()); - } - } - - let (key, value) = input_pair.into(); - maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; - } - - cache_action = cache_action.insert( - None, - storage_changed_keys, - ); - } - - let cache_action = cache_action.complete(block, &root); - Ok(Some((mdb, root, cache_action))) + /// Panics when `res.is_err() && panic`, otherwise it returns `Err(())` on an error. + fn maybe_panic( + res: std::result::Result, + panic: bool, + ) -> std::result::Result { + res.map(Ok).unwrap_or_else(|e| { + if panic { + panic!( + "changes trie: storage access is not allowed to fail within runtime: {:?}", + e + ) + } else { + Err(()) + } + }) + } + + // when storage isn't provided, changes tries aren't created + let state = match state { + Some(state) => state, + None => return Ok(None), + }; + + // build_anchor error should not be considered fatal + let parent = state.storage.build_anchor(parent_hash).map_err(|_| ())?; + let block = parent.number.clone() + One::one(); + + // prepare configuration range - we already know zero block. Current block may be the end block if configuration + // has been changed in this block + let is_config_changed = + match changes.storage(sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG) { + Some(Some(new_config)) => new_config != &state.config.encode()[..], + Some(None) => true, + None => false, + }; + let config_range = ConfigurationRange { + config: &state.config, + zero: state.zero.clone(), + end: if is_config_changed { + Some(block.clone()) + } else { + None + }, + }; + + // storage errors are considered fatal (similar to situations when runtime fetches values from storage) + let (input_pairs, child_input_pairs, digest_input_blocks) = maybe_panic( + prepare_input::( + backend, + state.storage, + config_range.clone(), + changes, + &parent, + ), + panic_on_storage_error, + )?; + + // prepare cached data + let mut cache_action = prepare_cached_build_data(config_range, block.clone()); + let needs_changed_keys = cache_action.collects_changed_keys(); + cache_action = cache_action.set_digest_input_blocks(digest_input_blocks); + + let mut mdb = MemoryDB::default(); + let mut child_roots = Vec::with_capacity(child_input_pairs.len()); + for (child_index, input_pairs) in child_input_pairs { + let mut not_empty = false; + let mut root = Default::default(); + { + let mut trie = TrieDBMut::::new(&mut mdb, &mut root); + let mut storage_changed_keys = HashSet::new(); + for input_pair in input_pairs { + if needs_changed_keys { + if let Some(key) = input_pair.key() { + storage_changed_keys.insert(key.to_vec()); + } + } + + let (key, value) = input_pair.into(); + not_empty = true; + maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; + } + + cache_action = + cache_action.insert(Some(child_index.storage_key.clone()), storage_changed_keys); + } + if not_empty { + child_roots.push(input::InputPair::ChildIndex( + child_index, + root.as_ref().to_vec(), + )); + } + } + let mut root = Default::default(); + { + let mut trie = TrieDBMut::::new(&mut mdb, &mut root); + for (key, value) in child_roots.into_iter().map(Into::into) { + maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; + } + + let mut storage_changed_keys = HashSet::new(); + for input_pair in input_pairs { + if needs_changed_keys { + if let Some(key) = input_pair.key() { + storage_changed_keys.insert(key.to_vec()); + } + } + + let (key, value) = input_pair.into(); + maybe_panic(trie.insert(&key, &value), panic_on_storage_error)?; + } + + cache_action = cache_action.insert(None, storage_changed_keys); + } + + let cache_action = cache_action.complete(block, &root); + Ok(Some((mdb, root, cache_action))) } /// Prepare empty cached build data for given block. fn prepare_cached_build_data( - config: ConfigurationRange, - block: Number, + config: ConfigurationRange, + block: Number, ) -> IncompleteCacheAction { - // when digests are not enabled in configuration, we do not need to cache anything - // because it'll never be used again for building other tries - // => let's clear the cache - if !config.config.is_digest_build_enabled() { - return IncompleteCacheAction::Clear; - } - - // when this is the last block where current configuration is active - // => let's clear the cache - if config.end.as_ref() == Some(&block) { - return IncompleteCacheAction::Clear; - } - - // we do not need to cache anything when top-level digest trie is created, because - // it'll never be used again for building other tries - // => let's clear the cache - match config.config.digest_level_at_block(config.zero.clone(), block) { - Some((digest_level, _, _)) if digest_level == config.config.digest_levels => IncompleteCacheAction::Clear, - _ => IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()), - } + // when digests are not enabled in configuration, we do not need to cache anything + // because it'll never be used again for building other tries + // => let's clear the cache + if !config.config.is_digest_build_enabled() { + return IncompleteCacheAction::Clear; + } + + // when this is the last block where current configuration is active + // => let's clear the cache + if config.end.as_ref() == Some(&block) { + return IncompleteCacheAction::Clear; + } + + // we do not need to cache anything when top-level digest trie is created, because + // it'll never be used again for building other tries + // => let's clear the cache + match config + .config + .digest_level_at_block(config.zero.clone(), block) + { + Some((digest_level, _, _)) if digest_level == config.config.digest_levels => { + IncompleteCacheAction::Clear + } + _ => IncompleteCacheAction::CacheBuildData(IncompleteCachedBuildData::new()), + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn cache_is_cleared_when_digests_are_disabled() { - let config = Configuration { digest_interval: 0, digest_levels: 0 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 8u32), IncompleteCacheAction::Clear); - } - - #[test] - fn build_data_is_cached_when_digests_are_enabled() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert!(prepare_cached_build_data(config_range.clone(), 4u32).collects_changed_keys()); - assert!(prepare_cached_build_data(config_range.clone(), 7u32).collects_changed_keys()); - assert!(prepare_cached_build_data(config_range, 8u32).collects_changed_keys()); - } - - #[test] - fn cache_is_cleared_when_digests_are_enabled_and_top_level_digest_is_built() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: None, config: &config }; - assert_eq!(prepare_cached_build_data(config_range, 64u32), IncompleteCacheAction::Clear); - } - - #[test] - fn cache_is_cleared_when_end_block_of_configuration_is_built() { - let config = Configuration { digest_interval: 8, digest_levels: 2 }; - let config_range = ConfigurationRange { zero: 0, end: Some(4u32), config: &config }; - assert_eq!(prepare_cached_build_data(config_range.clone(), 4u32), IncompleteCacheAction::Clear); - } + use super::*; + + #[test] + fn cache_is_cleared_when_digests_are_disabled() { + let config = Configuration { + digest_interval: 0, + digest_levels: 0, + }; + let config_range = ConfigurationRange { + zero: 0, + end: None, + config: &config, + }; + assert_eq!( + prepare_cached_build_data(config_range, 8u32), + IncompleteCacheAction::Clear + ); + } + + #[test] + fn build_data_is_cached_when_digests_are_enabled() { + let config = Configuration { + digest_interval: 8, + digest_levels: 2, + }; + let config_range = ConfigurationRange { + zero: 0, + end: None, + config: &config, + }; + assert!(prepare_cached_build_data(config_range.clone(), 4u32).collects_changed_keys()); + assert!(prepare_cached_build_data(config_range.clone(), 7u32).collects_changed_keys()); + assert!(prepare_cached_build_data(config_range, 8u32).collects_changed_keys()); + } + + #[test] + fn cache_is_cleared_when_digests_are_enabled_and_top_level_digest_is_built() { + let config = Configuration { + digest_interval: 8, + digest_levels: 2, + }; + let config_range = ConfigurationRange { + zero: 0, + end: None, + config: &config, + }; + assert_eq!( + prepare_cached_build_data(config_range, 64u32), + IncompleteCacheAction::Clear + ); + } + + #[test] + fn cache_is_cleared_when_end_block_of_configuration_is_built() { + let config = Configuration { + digest_interval: 8, + digest_levels: 2, + }; + let config_range = ConfigurationRange { + zero: 0, + end: Some(4u32), + config: &config, + }; + assert_eq!( + prepare_cached_build_data(config_range.clone(), 4u32), + IncompleteCacheAction::Clear + ); + } } diff --git a/primitives/state-machine/src/changes_trie/prune.rs b/primitives/state-machine/src/changes_trie/prune.rs index 87923dc2f5..1b7a1667de 100644 --- a/primitives/state-machine/src/changes_trie/prune.rs +++ b/primitives/state-machine/src/changes_trie/prune.rs @@ -16,172 +16,190 @@ //! Changes trie pruning-related functions. +use crate::changes_trie::input::{ChildIndex, InputKey}; +use crate::changes_trie::storage::TrieBackendAdapter; +use crate::changes_trie::{AnchorBlockId, BlockNumber, Storage}; +use crate::proving_backend::ProvingBackendRecorder; +use crate::trie_backend_essence::TrieBackendEssence; +use codec::{Codec, Decode}; use hash_db::Hasher; -use sp_trie::Recorder; use log::warn; use num_traits::One; -use crate::proving_backend::ProvingBackendRecorder; -use crate::trie_backend_essence::TrieBackendEssence; -use crate::changes_trie::{AnchorBlockId, Storage, BlockNumber}; -use crate::changes_trie::storage::TrieBackendAdapter; -use crate::changes_trie::input::{ChildIndex, InputKey}; -use codec::{Decode, Codec}; +use sp_trie::Recorder; /// Prune obsolete changes tries. Pruning happens at the same block, where highest /// level digest is created. Pruning guarantees to save changes tries for last /// `min_blocks_to_keep` blocks. We only prune changes tries at `max_digest_interval` /// ranges. pub fn prune( - storage: &dyn Storage, - first: Number, - last: Number, - current_block: &AnchorBlockId, - mut remove_trie_node: F, -) where H::Out: Codec { - // delete changes trie for every block in range - let mut block = first; - loop { - if block >= last.clone() + One::one() { - break; - } - - let prev_block = block.clone(); - block += One::one(); - - let block = prev_block; - let root = match storage.root(current_block, block.clone()) { - Ok(Some(root)) => root, - Ok(None) => continue, - Err(error) => { - // try to delete other tries - warn!(target: "trie", "Failed to read changes trie root from DB: {}", error); - continue; - }, - }; - let children_roots = { - let trie_storage = TrieBackendEssence::<_, H>::new( - crate::changes_trie::TrieBackendStorageAdapter(storage), - root, - ); - let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); - let mut children_roots = Vec::new(); - trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { - if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) { - if let Ok(value) = >::decode(&mut &value[..]) { - let mut trie_root = ::Out::default(); - trie_root.as_mut().copy_from_slice(&value[..]); - children_roots.push(trie_root); - } - } - }); - - children_roots - }; - for root in children_roots.into_iter() { - prune_trie(storage, root, &mut remove_trie_node); - } - - prune_trie(storage, root, &mut remove_trie_node); - } + storage: &dyn Storage, + first: Number, + last: Number, + current_block: &AnchorBlockId, + mut remove_trie_node: F, +) where + H::Out: Codec, +{ + // delete changes trie for every block in range + let mut block = first; + loop { + if block >= last.clone() + One::one() { + break; + } + + let prev_block = block.clone(); + block += One::one(); + + let block = prev_block; + let root = match storage.root(current_block, block.clone()) { + Ok(Some(root)) => root, + Ok(None) => continue, + Err(error) => { + // try to delete other tries + warn!(target: "trie", "Failed to read changes trie root from DB: {}", error); + continue; + } + }; + let children_roots = { + let trie_storage = TrieBackendEssence::<_, H>::new( + crate::changes_trie::TrieBackendStorageAdapter(storage), + root, + ); + let child_prefix = ChildIndex::key_neutral_prefix(block.clone()); + let mut children_roots = Vec::new(); + trie_storage.for_key_values_with_prefix(&child_prefix, |key, value| { + if let Ok(InputKey::ChildIndex::(_trie_key)) = Decode::decode(&mut &key[..]) + { + if let Ok(value) = >::decode(&mut &value[..]) { + let mut trie_root = ::Out::default(); + trie_root.as_mut().copy_from_slice(&value[..]); + children_roots.push(trie_root); + } + } + }); + + children_roots + }; + for root in children_roots.into_iter() { + prune_trie(storage, root, &mut remove_trie_node); + } + + prune_trie(storage, root, &mut remove_trie_node); + } } // Prune a trie. fn prune_trie( - storage: &dyn Storage, - root: H::Out, - remove_trie_node: &mut F, -) where H::Out: Codec { - - // enumerate all changes trie' keys, recording all nodes that have been 'touched' - // (effectively - all changes trie nodes) - let mut proof_recorder: Recorder = Default::default(); - { - let mut trie = ProvingBackendRecorder::<_, H> { - backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), - proof_recorder: &mut proof_recorder, - }; - trie.record_all_keys(); - } - - // all nodes of this changes trie should be pruned - remove_trie_node(root); - for node in proof_recorder.drain().into_iter().map(|n| n.hash) { - remove_trie_node(node); - } + storage: &dyn Storage, + root: H::Out, + remove_trie_node: &mut F, +) where + H::Out: Codec, +{ + // enumerate all changes trie' keys, recording all nodes that have been 'touched' + // (effectively - all changes trie nodes) + let mut proof_recorder: Recorder = Default::default(); + { + let mut trie = ProvingBackendRecorder::<_, H> { + backend: &TrieBackendEssence::new(TrieBackendAdapter::new(storage), root), + proof_recorder: &mut proof_recorder, + }; + trie.record_all_keys(); + } + + // all nodes of this changes trie should be pruned + remove_trie_node(root); + for node in proof_recorder.drain().into_iter().map(|n| n.hash) { + remove_trie_node(node); + } } #[cfg(test)] mod tests { - use std::collections::HashSet; - use sp_trie::MemoryDB; - use sp_core::H256; - use crate::backend::insert_into_memory_db; - use crate::changes_trie::storage::InMemoryStorage; - use codec::Encode; - use sp_runtime::traits::BlakeTwo256; - use super::*; - - fn prune_by_collect( - storage: &dyn Storage, - first: u64, - last: u64, - current_block: u64, - ) -> HashSet { - let mut pruned_trie_nodes = HashSet::new(); - let anchor = AnchorBlockId { hash: Default::default(), number: current_block }; - prune(storage, first, last, &anchor, - |node| { pruned_trie_nodes.insert(node); }); - pruned_trie_nodes - } - - #[test] - fn prune_works() { - fn prepare_storage() -> InMemoryStorage { - let child_key = ChildIndex { block: 67u64, storage_key: b"1".to_vec() }.encode(); - let mut mdb1 = MemoryDB::::default(); - let root1 = insert_into_memory_db::( - &mut mdb1, vec![(vec![10], vec![20])]).unwrap(); - let mut mdb2 = MemoryDB::::default(); - let root2 = insert_into_memory_db::( - &mut mdb2, - vec![(vec![11], vec![21]), (vec![12], vec![22])], - ).unwrap(); - let mut mdb3 = MemoryDB::::default(); - let ch_root3 = insert_into_memory_db::( - &mut mdb3, vec![(vec![110], vec![120])]).unwrap(); - let root3 = insert_into_memory_db::(&mut mdb3, vec![ - (vec![13], vec![23]), - (vec![14], vec![24]), - (child_key, ch_root3.as_ref().encode()), - ]).unwrap(); - let mut mdb4 = MemoryDB::::default(); - let root4 = insert_into_memory_db::( - &mut mdb4, - vec![(vec![15], vec![25])], - ).unwrap(); - let storage = InMemoryStorage::new(); - storage.insert(65, root1, mdb1); - storage.insert(66, root2, mdb2); - storage.insert(67, root3, mdb3); - storage.insert(68, root4, mdb4); - - storage - } - - let storage = prepare_storage(); - assert!(prune_by_collect(&storage, 20, 30, 90).is_empty()); - assert!(!storage.into_mdb().drain().is_empty()); - - let storage = prepare_storage(); - let prune60_65 = prune_by_collect(&storage, 60, 65, 90); - assert!(!prune60_65.is_empty()); - storage.remove_from_storage(&prune60_65); - assert!(!storage.into_mdb().drain().is_empty()); - - let storage = prepare_storage(); - let prune60_70 = prune_by_collect(&storage, 60, 70, 90); - assert!(!prune60_70.is_empty()); - storage.remove_from_storage(&prune60_70); - assert!(storage.into_mdb().drain().is_empty()); - } + use super::*; + use crate::backend::insert_into_memory_db; + use crate::changes_trie::storage::InMemoryStorage; + use codec::Encode; + use sp_core::H256; + use sp_runtime::traits::BlakeTwo256; + use sp_trie::MemoryDB; + use std::collections::HashSet; + + fn prune_by_collect( + storage: &dyn Storage, + first: u64, + last: u64, + current_block: u64, + ) -> HashSet { + let mut pruned_trie_nodes = HashSet::new(); + let anchor = AnchorBlockId { + hash: Default::default(), + number: current_block, + }; + prune(storage, first, last, &anchor, |node| { + pruned_trie_nodes.insert(node); + }); + pruned_trie_nodes + } + + #[test] + fn prune_works() { + fn prepare_storage() -> InMemoryStorage { + let child_key = ChildIndex { + block: 67u64, + storage_key: b"1".to_vec(), + } + .encode(); + let mut mdb1 = MemoryDB::::default(); + let root1 = + insert_into_memory_db::(&mut mdb1, vec![(vec![10], vec![20])]) + .unwrap(); + let mut mdb2 = MemoryDB::::default(); + let root2 = insert_into_memory_db::( + &mut mdb2, + vec![(vec![11], vec![21]), (vec![12], vec![22])], + ) + .unwrap(); + let mut mdb3 = MemoryDB::::default(); + let ch_root3 = + insert_into_memory_db::(&mut mdb3, vec![(vec![110], vec![120])]) + .unwrap(); + let root3 = insert_into_memory_db::( + &mut mdb3, + vec![ + (vec![13], vec![23]), + (vec![14], vec![24]), + (child_key, ch_root3.as_ref().encode()), + ], + ) + .unwrap(); + let mut mdb4 = MemoryDB::::default(); + let root4 = + insert_into_memory_db::(&mut mdb4, vec![(vec![15], vec![25])]) + .unwrap(); + let storage = InMemoryStorage::new(); + storage.insert(65, root1, mdb1); + storage.insert(66, root2, mdb2); + storage.insert(67, root3, mdb3); + storage.insert(68, root4, mdb4); + + storage + } + + let storage = prepare_storage(); + assert!(prune_by_collect(&storage, 20, 30, 90).is_empty()); + assert!(!storage.into_mdb().drain().is_empty()); + + let storage = prepare_storage(); + let prune60_65 = prune_by_collect(&storage, 60, 65, 90); + assert!(!prune60_65.is_empty()); + storage.remove_from_storage(&prune60_65); + assert!(!storage.into_mdb().drain().is_empty()); + + let storage = prepare_storage(); + let prune60_70 = prune_by_collect(&storage, 60, 70, 90); + assert!(!prune60_70.is_empty()); + storage.remove_from_storage(&prune60_70); + assert!(storage.into_mdb().drain().is_empty()); + } } diff --git a/primitives/state-machine/src/changes_trie/storage.rs b/primitives/state-machine/src/changes_trie/storage.rs index 7fb4186728..c88014a69a 100644 --- a/primitives/state-machine/src/changes_trie/storage.rs +++ b/primitives/state-machine/src/changes_trie/storage.rs @@ -16,196 +16,210 @@ //! Changes trie storage utilities. -use std::collections::{BTreeMap, HashSet, HashMap}; +use crate::{ + changes_trie::{AnchorBlockId, BlockNumber, BuildCache, RootsStorage, Storage}, + trie_backend_essence::TrieBackendStorage, + StorageKey, +}; use hash_db::{Hasher, Prefix, EMPTY_PREFIX}; +use parking_lot::RwLock; use sp_trie::DBValue; use sp_trie::MemoryDB; -use parking_lot::RwLock; -use crate::{ - StorageKey, - trie_backend_essence::TrieBackendStorage, - changes_trie::{BuildCache, RootsStorage, Storage, AnchorBlockId, BlockNumber}, -}; +use std::collections::{BTreeMap, HashMap, HashSet}; #[cfg(test)] use crate::backend::insert_into_memory_db; #[cfg(test)] -use crate::changes_trie::input::{InputPair, ChildIndex}; +use crate::changes_trie::input::{ChildIndex, InputPair}; /// In-memory implementation of changes trie storage. pub struct InMemoryStorage { - data: RwLock>, - cache: BuildCache, + data: RwLock>, + cache: BuildCache, } /// Adapter for using changes trie storage as a TrieBackendEssence' storage. pub struct TrieBackendAdapter<'a, H: Hasher, Number: BlockNumber> { - storage: &'a dyn Storage, - _hasher: std::marker::PhantomData<(H, Number)>, + storage: &'a dyn Storage, + _hasher: std::marker::PhantomData<(H, Number)>, } struct InMemoryStorageData { - roots: BTreeMap, - mdb: MemoryDB, + roots: BTreeMap, + mdb: MemoryDB, } impl InMemoryStorage { - /// Creates storage from given in-memory database. - pub fn with_db(mdb: MemoryDB) -> Self { - Self { - data: RwLock::new(InMemoryStorageData { - roots: BTreeMap::new(), - mdb, - }), - cache: BuildCache::new(), - } - } - - /// Creates storage with empty database. - pub fn new() -> Self { - Self::with_db(Default::default()) - } - - /// Creates storage with given proof. - pub fn with_proof(proof: Vec>) -> Self { - use hash_db::HashDB; - - let mut proof_db = MemoryDB::::default(); - for item in proof { - proof_db.insert(EMPTY_PREFIX, &item); - } - Self::with_db(proof_db) - } - - /// Get mutable cache reference. - pub fn cache_mut(&mut self) -> &mut BuildCache { - &mut self.cache - } - - /// Create the storage with given blocks. - pub fn with_blocks(blocks: Vec<(Number, H::Out)>) -> Self { - Self { - data: RwLock::new(InMemoryStorageData { - roots: blocks.into_iter().collect(), - mdb: MemoryDB::default(), - }), - cache: BuildCache::new(), - } - } - - #[cfg(test)] - pub fn with_inputs( - mut top_inputs: Vec<(Number, Vec>)>, - children_inputs: Vec<(StorageKey, Vec<(Number, Vec>)>)>, - ) -> Self { - let mut mdb = MemoryDB::default(); - let mut roots = BTreeMap::new(); - for (storage_key, child_input) in children_inputs { - for (block, pairs) in child_input { - let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); - - if let Some(root) = root { - let ix = if let Some(ix) = top_inputs.iter().position(|v| v.0 == block) { - ix - } else { - top_inputs.push((block.clone(), Default::default())); - top_inputs.len() - 1 - }; - top_inputs[ix].1.push(InputPair::ChildIndex( - ChildIndex { block: block.clone(), storage_key: storage_key.clone() }, - root.as_ref().to_vec(), - )); - } - } - } - - for (block, pairs) in top_inputs { - let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); - if let Some(root) = root { - roots.insert(block, root); - } - } - - InMemoryStorage { - data: RwLock::new(InMemoryStorageData { - roots, - mdb, - }), - cache: BuildCache::new(), - } - } - - #[cfg(test)] - pub fn clear_storage(&self) { - self.data.write().mdb = MemoryDB::default(); // use new to be more correct - } - - #[cfg(test)] - pub fn remove_from_storage(&self, keys: &HashSet) { - let mut data = self.data.write(); - for key in keys { - data.mdb.remove_and_purge(key, hash_db::EMPTY_PREFIX); - } - } - - #[cfg(test)] - pub fn into_mdb(self) -> MemoryDB { - self.data.into_inner().mdb - } - - /// Insert changes trie for given block. - pub fn insert(&self, block: Number, changes_trie_root: H::Out, trie: MemoryDB) { - let mut data = self.data.write(); - data.roots.insert(block, changes_trie_root); - data.mdb.consolidate(trie); - } + /// Creates storage from given in-memory database. + pub fn with_db(mdb: MemoryDB) -> Self { + Self { + data: RwLock::new(InMemoryStorageData { + roots: BTreeMap::new(), + mdb, + }), + cache: BuildCache::new(), + } + } + + /// Creates storage with empty database. + pub fn new() -> Self { + Self::with_db(Default::default()) + } + + /// Creates storage with given proof. + pub fn with_proof(proof: Vec>) -> Self { + use hash_db::HashDB; + + let mut proof_db = MemoryDB::::default(); + for item in proof { + proof_db.insert(EMPTY_PREFIX, &item); + } + Self::with_db(proof_db) + } + + /// Get mutable cache reference. + pub fn cache_mut(&mut self) -> &mut BuildCache { + &mut self.cache + } + + /// Create the storage with given blocks. + pub fn with_blocks(blocks: Vec<(Number, H::Out)>) -> Self { + Self { + data: RwLock::new(InMemoryStorageData { + roots: blocks.into_iter().collect(), + mdb: MemoryDB::default(), + }), + cache: BuildCache::new(), + } + } + + #[cfg(test)] + pub fn with_inputs( + mut top_inputs: Vec<(Number, Vec>)>, + children_inputs: Vec<(StorageKey, Vec<(Number, Vec>)>)>, + ) -> Self { + let mut mdb = MemoryDB::default(); + let mut roots = BTreeMap::new(); + for (storage_key, child_input) in children_inputs { + for (block, pairs) in child_input { + let root = + insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); + + if let Some(root) = root { + let ix = if let Some(ix) = top_inputs.iter().position(|v| v.0 == block) { + ix + } else { + top_inputs.push((block.clone(), Default::default())); + top_inputs.len() - 1 + }; + top_inputs[ix].1.push(InputPair::ChildIndex( + ChildIndex { + block: block.clone(), + storage_key: storage_key.clone(), + }, + root.as_ref().to_vec(), + )); + } + } + } + + for (block, pairs) in top_inputs { + let root = insert_into_memory_db::(&mut mdb, pairs.into_iter().map(Into::into)); + if let Some(root) = root { + roots.insert(block, root); + } + } + + InMemoryStorage { + data: RwLock::new(InMemoryStorageData { roots, mdb }), + cache: BuildCache::new(), + } + } + + #[cfg(test)] + pub fn clear_storage(&self) { + self.data.write().mdb = MemoryDB::default(); // use new to be more correct + } + + #[cfg(test)] + pub fn remove_from_storage(&self, keys: &HashSet) { + let mut data = self.data.write(); + for key in keys { + data.mdb.remove_and_purge(key, hash_db::EMPTY_PREFIX); + } + } + + #[cfg(test)] + pub fn into_mdb(self) -> MemoryDB { + self.data.into_inner().mdb + } + + /// Insert changes trie for given block. + pub fn insert(&self, block: Number, changes_trie_root: H::Out, trie: MemoryDB) { + let mut data = self.data.write(); + data.roots.insert(block, changes_trie_root); + data.mdb.consolidate(trie); + } } impl RootsStorage for InMemoryStorage { - fn build_anchor(&self, parent_hash: H::Out) -> Result, String> { - self.data.read().roots.iter() - .find(|(_, v)| **v == parent_hash) - .map(|(k, _)| AnchorBlockId { hash: parent_hash, number: k.clone() }) - .ok_or_else(|| format!("Can't find associated number for block {:?}", parent_hash)) - } - - fn root(&self, _anchor_block: &AnchorBlockId, block: Number) -> Result, String> { - Ok(self.data.read().roots.get(&block).cloned()) - } + fn build_anchor(&self, parent_hash: H::Out) -> Result, String> { + self.data + .read() + .roots + .iter() + .find(|(_, v)| **v == parent_hash) + .map(|(k, _)| AnchorBlockId { + hash: parent_hash, + number: k.clone(), + }) + .ok_or_else(|| format!("Can't find associated number for block {:?}", parent_hash)) + } + + fn root( + &self, + _anchor_block: &AnchorBlockId, + block: Number, + ) -> Result, String> { + Ok(self.data.read().roots.get(&block).cloned()) + } } impl Storage for InMemoryStorage { - fn as_roots_storage(&self) -> &dyn RootsStorage { - self - } - - fn with_cached_changed_keys( - &self, - root: &H::Out, - functor: &mut dyn FnMut(&HashMap, HashSet>), - ) -> bool { - self.cache.with_changed_keys(root, functor) - } - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - MemoryDB::::get(&self.data.read().mdb, key, prefix) - } + fn as_roots_storage(&self) -> &dyn RootsStorage { + self + } + + fn with_cached_changed_keys( + &self, + root: &H::Out, + functor: &mut dyn FnMut(&HashMap, HashSet>), + ) -> bool { + self.cache.with_changed_keys(root, functor) + } + + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + MemoryDB::::get(&self.data.read().mdb, key, prefix) + } } impl<'a, H: Hasher, Number: BlockNumber> TrieBackendAdapter<'a, H, Number> { - pub fn new(storage: &'a dyn Storage) -> Self { - Self { storage, _hasher: Default::default() } - } + pub fn new(storage: &'a dyn Storage) -> Self { + Self { + storage, + _hasher: Default::default(), + } + } } impl<'a, H, Number> TrieBackendStorage for TrieBackendAdapter<'a, H, Number> - where - Number: BlockNumber, - H: Hasher, +where + Number: BlockNumber, + H: Hasher, { - type Overlay = MemoryDB; + type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - self.storage.get(key, prefix) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + self.storage.get(key, prefix) + } } diff --git a/primitives/state-machine/src/changes_trie/surface_iterator.rs b/primitives/state-machine/src/changes_trie/surface_iterator.rs index 02a7c277d9..c7379d16dd 100644 --- a/primitives/state-machine/src/changes_trie/surface_iterator.rs +++ b/primitives/state-machine/src/changes_trie/surface_iterator.rs @@ -20,34 +20,30 @@ //! of points at the terrain (mountains and valleys) inside this range that have to be drilled down to //! search for gems. +use crate::changes_trie::{BlockNumber, ConfigurationRange}; use num_traits::One; -use crate::changes_trie::{ConfigurationRange, BlockNumber}; /// Returns surface iterator for given range of blocks. /// /// `max` is the number of best block, known to caller. We can't access any changes tries /// that are built after this block, even though we may have them built already. pub fn surface_iterator<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - max: Number, - begin: Number, - end: Number, + config: ConfigurationRange<'a, Number>, + max: Number, + begin: Number, + end: Number, ) -> Result, String> { - let (current, current_begin, digest_step, digest_level) = lower_bound_max_digest( - config.clone(), - max.clone(), - begin.clone(), - end, - )?; - Ok(SurfaceIterator { - config, - begin, - max, - current: Some(current), - current_begin, - digest_step, - digest_level, - }) + let (current, current_begin, digest_step, digest_level) = + lower_bound_max_digest(config.clone(), max.clone(), begin.clone(), end)?; + Ok(SurfaceIterator { + config, + begin, + max, + current: Some(current), + current_begin, + digest_step, + digest_level, + }) } /// Surface iterator - only traverses top-level digests from given range and tries to find @@ -57,229 +53,310 @@ pub fn surface_iterator<'a, Number: BlockNumber>( /// Digest level is Some(0) when it is regular block, is Some(non-zero) when it is digest block and None /// if it is skewed digest block. pub struct SurfaceIterator<'a, Number: BlockNumber> { - config: ConfigurationRange<'a, Number>, - begin: Number, - max: Number, - current: Option, - current_begin: Number, - digest_step: u32, - digest_level: Option, + config: ConfigurationRange<'a, Number>, + begin: Number, + max: Number, + current: Option, + current_begin: Number, + digest_step: u32, + digest_level: Option, } impl<'a, Number: BlockNumber> Iterator for SurfaceIterator<'a, Number> { - type Item = Result<(Number, Option), String>; + type Item = Result<(Number, Option), String>; - fn next(&mut self) -> Option { - let current = self.current.clone()?; - let digest_level = self.digest_level; + fn next(&mut self) -> Option { + let current = self.current.clone()?; + let digest_level = self.digest_level; - if current < self.digest_step.into() { - self.current = None; - } else { - let next = current.clone() - self.digest_step.into(); - if next.is_zero() || next < self.begin { - self.current = None; - } else if next > self.current_begin { - self.current = Some(next); - } else { - let max_digest_interval = lower_bound_max_digest( - self.config.clone(), - self.max.clone(), - self.begin.clone(), - next, - ); - let (current, current_begin, digest_step, digest_level) = match max_digest_interval { - Err(err) => return Some(Err(err)), - Ok(range) => range, - }; + if current < self.digest_step.into() { + self.current = None; + } else { + let next = current.clone() - self.digest_step.into(); + if next.is_zero() || next < self.begin { + self.current = None; + } else if next > self.current_begin { + self.current = Some(next); + } else { + let max_digest_interval = lower_bound_max_digest( + self.config.clone(), + self.max.clone(), + self.begin.clone(), + next, + ); + let (current, current_begin, digest_step, digest_level) = match max_digest_interval + { + Err(err) => return Some(Err(err)), + Ok(range) => range, + }; - self.current = Some(current); - self.current_begin = current_begin; - self.digest_step = digest_step; - self.digest_level = digest_level; - } - } + self.current = Some(current); + self.current_begin = current_begin; + self.digest_step = digest_step; + self.digest_level = digest_level; + } + } - Some(Ok((current, digest_level))) - } + Some(Ok((current, digest_level))) + } } /// Returns parameters of highest level digest block that includes the end of given range /// and tends to include the whole range. fn lower_bound_max_digest<'a, Number: BlockNumber>( - config: ConfigurationRange<'a, Number>, - max: Number, - begin: Number, - end: Number, + config: ConfigurationRange<'a, Number>, + max: Number, + begin: Number, + end: Number, ) -> Result<(Number, Number, u32, Option), String> { - if end > max || begin > end { - return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)); - } - if begin <= config.zero || config.end.as_ref().map(|config_end| end > *config_end).unwrap_or(false) { - return Err(format!("changes trie range is not covered by configuration: {}..{}/{}..{}", - begin, end, config.zero, match config.end.as_ref() { - Some(config_end) => format!("{}", config_end), - None => "None".into(), - })); - } + if end > max || begin > end { + return Err(format!("invalid changes range: {}..{}/{}", begin, end, max)); + } + if begin <= config.zero + || config + .end + .as_ref() + .map(|config_end| end > *config_end) + .unwrap_or(false) + { + return Err(format!( + "changes trie range is not covered by configuration: {}..{}/{}..{}", + begin, + end, + config.zero, + match config.end.as_ref() { + Some(config_end) => format!("{}", config_end), + None => "None".into(), + } + )); + } - let mut digest_level = 0u32; - let mut digest_step = 1u32; - let mut digest_interval = 0u32; - let mut current = end.clone(); - let mut current_begin = begin.clone(); - if current_begin != current { - while digest_level != config.config.digest_levels { - // try to use next level digest - let new_digest_level = digest_level + 1; - let new_digest_step = digest_step * config.config.digest_interval; - let new_digest_interval = config.config.digest_interval * { - if digest_interval == 0 { 1 } else { digest_interval } - }; - let new_digest_begin = config.zero.clone() + ((current.clone() - One::one() - config.zero.clone()) - / new_digest_interval.into()) * new_digest_interval.into(); - let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); - let new_current = new_digest_begin.clone() + new_digest_interval.into(); + let mut digest_level = 0u32; + let mut digest_step = 1u32; + let mut digest_interval = 0u32; + let mut current = end.clone(); + let mut current_begin = begin.clone(); + if current_begin != current { + while digest_level != config.config.digest_levels { + // try to use next level digest + let new_digest_level = digest_level + 1; + let new_digest_step = digest_step * config.config.digest_interval; + let new_digest_interval = config.config.digest_interval * { + if digest_interval == 0 { + 1 + } else { + digest_interval + } + }; + let new_digest_begin = config.zero.clone() + + ((current.clone() - One::one() - config.zero.clone()) + / new_digest_interval.into()) + * new_digest_interval.into(); + let new_digest_end = new_digest_begin.clone() + new_digest_interval.into(); + let new_current = new_digest_begin.clone() + new_digest_interval.into(); - // check if we met skewed digest - if let Some(skewed_digest_end) = config.end.as_ref() { - if new_digest_end > *skewed_digest_end { - let skewed_digest_start = config.config.prev_max_level_digest_block( - config.zero.clone(), - skewed_digest_end.clone(), - ); - if let Some(skewed_digest_start) = skewed_digest_start { - let skewed_digest_range = (skewed_digest_end.clone() - skewed_digest_start.clone()) - .try_into().ok() - .expect("skewed digest range is always <= max level digest range;\ - max level digest range always fits u32; qed"); - return Ok(( - skewed_digest_end.clone(), - skewed_digest_start, - skewed_digest_range, - None, - )); - } - } - } + // check if we met skewed digest + if let Some(skewed_digest_end) = config.end.as_ref() { + if new_digest_end > *skewed_digest_end { + let skewed_digest_start = config.config.prev_max_level_digest_block( + config.zero.clone(), + skewed_digest_end.clone(), + ); + if let Some(skewed_digest_start) = skewed_digest_start { + let skewed_digest_range = (skewed_digest_end.clone() + - skewed_digest_start.clone()) + .try_into() + .ok() + .expect( + "skewed digest range is always <= max level digest range;\ + max level digest range always fits u32; qed", + ); + return Ok(( + skewed_digest_end.clone(), + skewed_digest_start, + skewed_digest_range, + None, + )); + } + } + } - // we can't use next level digest if it touches any unknown (> max) blocks - if new_digest_end > max { - if begin < new_digest_begin { - current_begin = new_digest_begin; - } - break; - } + // we can't use next level digest if it touches any unknown (> max) blocks + if new_digest_end > max { + if begin < new_digest_begin { + current_begin = new_digest_begin; + } + break; + } - // we can (and will) use this digest - digest_level = new_digest_level; - digest_step = new_digest_step; - digest_interval = new_digest_interval; - current = new_current; - current_begin = new_digest_begin; + // we can (and will) use this digest + digest_level = new_digest_level; + digest_step = new_digest_step; + digest_interval = new_digest_interval; + current = new_current; + current_begin = new_digest_begin; - // if current digest covers the whole range => no need to use next level digest - if current_begin <= begin && new_digest_end >= end { - break; - } - } - } + // if current digest covers the whole range => no need to use next level digest + if current_begin <= begin && new_digest_end >= end { + break; + } + } + } - Ok(( - current, - current_begin, - digest_step, - Some(digest_level), - )) + Ok((current, current_begin, digest_step, Some(digest_level))) } #[cfg(test)] mod tests { - use crate::changes_trie::{Configuration}; - use super::*; + use super::*; + use crate::changes_trie::Configuration; - fn configuration_range<'a>(config: &'a Configuration, zero: u64) -> ConfigurationRange<'a, u64> { - ConfigurationRange { - config, - zero, - end: None, - } - } + fn configuration_range<'a>( + config: &'a Configuration, + zero: u64, + ) -> ConfigurationRange<'a, u64> { + ConfigurationRange { + config, + zero, + end: None, + } + } - #[test] - fn lower_bound_max_digest_works() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; + #[test] + fn lower_bound_max_digest_works() { + let config = Configuration { + digest_interval: 4, + digest_levels: 2, + }; - // when config activates at 0 - assert_eq!( - lower_bound_max_digest(configuration_range(&config, 0u64), 100_000u64, 20u64, 180u64).unwrap(), - (192, 176, 16, Some(2)), - ); + // when config activates at 0 + assert_eq!( + lower_bound_max_digest( + configuration_range(&config, 0u64), + 100_000u64, + 20u64, + 180u64 + ) + .unwrap(), + (192, 176, 16, Some(2)), + ); - // when config activates at 30 - assert_eq!( - lower_bound_max_digest(configuration_range(&config, 30u64), 100_000u64, 50u64, 210u64).unwrap(), - (222, 206, 16, Some(2)), - ); - } + // when config activates at 30 + assert_eq!( + lower_bound_max_digest( + configuration_range(&config, 30u64), + 100_000u64, + 50u64, + 210u64 + ) + .unwrap(), + (222, 206, 16, Some(2)), + ); + } - #[test] - fn surface_iterator_works() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; + #[test] + fn surface_iterator_works() { + let config = Configuration { + digest_interval: 4, + digest_levels: 2, + }; - // when config activates at 0 - assert_eq!( - surface_iterator( - configuration_range(&config, 0u64), - 100_000u64, - 40u64, - 180u64, - ).unwrap().collect::>(), - vec![ - Ok((192, Some(2))), Ok((176, Some(2))), Ok((160, Some(2))), Ok((144, Some(2))), - Ok((128, Some(2))), Ok((112, Some(2))), Ok((96, Some(2))), Ok((80, Some(2))), - Ok((64, Some(2))), Ok((48, Some(2))), - ], - ); + // when config activates at 0 + assert_eq!( + surface_iterator( + configuration_range(&config, 0u64), + 100_000u64, + 40u64, + 180u64, + ) + .unwrap() + .collect::>(), + vec![ + Ok((192, Some(2))), + Ok((176, Some(2))), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), + ], + ); - // when config activates at 30 - assert_eq!( - surface_iterator( - configuration_range(&config, 30u64), - 100_000u64, - 40u64, - 180u64, - ).unwrap().collect::>(), - vec![ - Ok((190, Some(2))), Ok((174, Some(2))), Ok((158, Some(2))), Ok((142, Some(2))), Ok((126, Some(2))), - Ok((110, Some(2))), Ok((94, Some(2))), Ok((78, Some(2))), Ok((62, Some(2))), Ok((46, Some(2))), - ], - ); + // when config activates at 30 + assert_eq!( + surface_iterator( + configuration_range(&config, 30u64), + 100_000u64, + 40u64, + 180u64, + ) + .unwrap() + .collect::>(), + vec![ + Ok((190, Some(2))), + Ok((174, Some(2))), + Ok((158, Some(2))), + Ok((142, Some(2))), + Ok((126, Some(2))), + Ok((110, Some(2))), + Ok((94, Some(2))), + Ok((78, Some(2))), + Ok((62, Some(2))), + Ok((46, Some(2))), + ], + ); - // when config activates at 0 AND max block is before next digest - assert_eq!( - surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64).unwrap().collect::>(), - vec![ - Ok((183, Some(0))), Ok((182, Some(0))), Ok((181, Some(0))), Ok((180, Some(1))), - Ok((176, Some(2))), Ok((160, Some(2))), Ok((144, Some(2))), Ok((128, Some(2))), Ok((112, Some(2))), - Ok((96, Some(2))), Ok((80, Some(2))), Ok((64, Some(2))), Ok((48, Some(2))), - ], - ); - } + // when config activates at 0 AND max block is before next digest + assert_eq!( + surface_iterator(configuration_range(&config, 0u64), 183u64, 40u64, 183u64) + .unwrap() + .collect::>(), + vec![ + Ok((183, Some(0))), + Ok((182, Some(0))), + Ok((181, Some(0))), + Ok((180, Some(1))), + Ok((176, Some(2))), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), + ], + ); + } - #[test] - fn surface_iterator_works_with_skewed_digest() { - let config = Configuration { digest_interval: 4, digest_levels: 2 }; - let mut config_range = configuration_range(&config, 0u64); + #[test] + fn surface_iterator_works_with_skewed_digest() { + let config = Configuration { + digest_interval: 4, + digest_levels: 2, + }; + let mut config_range = configuration_range(&config, 0u64); - // when config activates at 0 AND ends at 170 - config_range.end = Some(170); - assert_eq!( - surface_iterator(config_range, 100_000u64, 40u64, 170u64).unwrap().collect::>(), - vec![ - Ok((170, None)), Ok((160, Some(2))), Ok((144, Some(2))), Ok((128, Some(2))), Ok((112, Some(2))), - Ok((96, Some(2))), Ok((80, Some(2))), Ok((64, Some(2))), Ok((48, Some(2))), - ], - ); - } + // when config activates at 0 AND ends at 170 + config_range.end = Some(170); + assert_eq!( + surface_iterator(config_range, 100_000u64, 40u64, 170u64) + .unwrap() + .collect::>(), + vec![ + Ok((170, None)), + Ok((160, Some(2))), + Ok((144, Some(2))), + Ok((128, Some(2))), + Ok((112, Some(2))), + Ok((96, Some(2))), + Ok((80, Some(2))), + Ok((64, Some(2))), + Ok((48, Some(2))), + ], + ); + } } diff --git a/primitives/state-machine/src/error.rs b/primitives/state-machine/src/error.rs index 464403c2f8..934337a4d7 100644 --- a/primitives/state-machine/src/error.rs +++ b/primitives/state-machine/src/error.rs @@ -15,7 +15,6 @@ // along with Substrate. If not, see . /// State Machine Errors - use std::fmt; /// State Machine Error bound. @@ -32,16 +31,18 @@ impl Error for T {} /// and as a transition away from the pre-existing framework. #[derive(Debug, Eq, PartialEq)] pub enum ExecutionError { - /// Backend error. - Backend(String), - /// The entry `:code` doesn't exist in storage so there's no way we can execute anything. - CodeEntryDoesNotExist, - /// Backend is incompatible with execution proof generation process. - UnableToGenerateProof, - /// Invalid execution proof. - InvalidProof, + /// Backend error. + Backend(String), + /// The entry `:code` doesn't exist in storage so there's no way we can execute anything. + CodeEntryDoesNotExist, + /// Backend is incompatible with execution proof generation process. + UnableToGenerateProof, + /// Invalid execution proof. + InvalidProof, } impl fmt::Display for ExecutionError { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "Externalities Error") } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!(f, "Externalities Error") + } } diff --git a/primitives/state-machine/src/ext.rs b/primitives/state-machine/src/ext.rs index 3a6b544290..d366dd3247 100644 --- a/primitives/state-machine/src/ext.rs +++ b/primitives/state-machine/src/ext.rs @@ -17,783 +17,919 @@ //! Concrete externalities implementation. use crate::{ - StorageKey, StorageValue, OverlayedChanges, StorageTransactionCache, - backend::Backend, - changes_trie::State as ChangesTrieState, + backend::Backend, changes_trie::State as ChangesTrieState, OverlayedChanges, StorageKey, + StorageTransactionCache, StorageValue, }; +use codec::{Decode, Encode}; use hash_db::Hasher; use sp_core::{ - storage::{ChildStorageKey, well_known_keys::is_child_storage_key, ChildInfo}, - traits::Externalities, hexdisplay::HexDisplay, + hexdisplay::HexDisplay, + storage::{well_known_keys::is_child_storage_key, ChildInfo, ChildStorageKey}, + traits::Externalities, }; -use sp_trie::{trie_types::Layout, default_child_trie_root}; -use sp_externalities::{Extensions, Extension}; -use codec::{Decode, Encode}; +use sp_externalities::{Extension, Extensions}; +use sp_trie::{default_child_trie_root, trie_types::Layout}; -use std::{error, fmt, any::{Any, TypeId}}; -use log::{warn, trace}; +use log::{trace, warn}; +use std::{ + any::{Any, TypeId}, + error, fmt, +}; const EXT_NOT_ALLOWED_TO_FAIL: &str = "Externalities not allowed to fail within runtime"; /// Errors that can occur when interacting with the externalities. #[derive(Debug, Copy, Clone)] pub enum Error { - /// Failure to load state data from the backend. - #[allow(unused)] - Backend(B), - /// Failure to execute a function. - #[allow(unused)] - Executor(E), + /// Failure to load state data from the backend. + #[allow(unused)] + Backend(B), + /// Failure to execute a function. + #[allow(unused)] + Executor(E), } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match *self { - Error::Backend(ref e) => write!(f, "Storage backend error: {}", e), - Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match *self { + Error::Backend(ref e) => write!(f, "Storage backend error: {}", e), + Error::Executor(ref e) => write!(f, "Sub-call execution error: {}", e), + } + } } impl error::Error for Error { - fn description(&self) -> &str { - match *self { - Error::Backend(..) => "backend error", - Error::Executor(..) => "executor error", - } - } + fn description(&self) -> &str { + match *self { + Error::Backend(..) => "backend error", + Error::Executor(..) => "executor error", + } + } } /// Wraps a read-only backend, call executor, and current overlayed changes. pub struct Ext<'a, H, N, B> - where - H: Hasher, - B: 'a + Backend, - N: crate::changes_trie::BlockNumber, +where + H: Hasher, + B: 'a + Backend, + N: crate::changes_trie::BlockNumber, { - /// The overlayed changes to write to. - overlay: &'a mut OverlayedChanges, - /// The storage backend to read from. - backend: &'a B, - /// The cache for the storage transactions. - storage_transaction_cache: &'a mut StorageTransactionCache, - /// Changes trie state to read from. - changes_trie_state: Option>, - /// Pseudo-unique id used for tracing. - pub id: u16, - /// Dummy usage of N arg. - _phantom: std::marker::PhantomData, - /// Extensions registered with this instance. - extensions: Option<&'a mut Extensions>, + /// The overlayed changes to write to. + overlay: &'a mut OverlayedChanges, + /// The storage backend to read from. + backend: &'a B, + /// The cache for the storage transactions. + storage_transaction_cache: &'a mut StorageTransactionCache, + /// Changes trie state to read from. + changes_trie_state: Option>, + /// Pseudo-unique id used for tracing. + pub id: u16, + /// Dummy usage of N arg. + _phantom: std::marker::PhantomData, + /// Extensions registered with this instance. + extensions: Option<&'a mut Extensions>, } impl<'a, H, N, B> Ext<'a, H, N, B> where - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - B: 'a + Backend, - N: crate::changes_trie::BlockNumber, + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + B: 'a + Backend, + N: crate::changes_trie::BlockNumber, { - - /// Create a new `Ext` from overlayed changes and read-only backend - pub fn new( - overlay: &'a mut OverlayedChanges, - storage_transaction_cache: &'a mut StorageTransactionCache, - backend: &'a B, - changes_trie_state: Option>, - extensions: Option<&'a mut Extensions>, - ) -> Self { - Ext { - overlay, - backend, - changes_trie_state, - storage_transaction_cache, - id: rand::random(), - _phantom: Default::default(), - extensions, - } - } - - /// Invalidates the currently cached storage root and the db transaction. - /// - /// Called when there are changes that likely will invalidate the storage root. - fn mark_dirty(&mut self) { - self.storage_transaction_cache.reset(); - } + /// Create a new `Ext` from overlayed changes and read-only backend + pub fn new( + overlay: &'a mut OverlayedChanges, + storage_transaction_cache: &'a mut StorageTransactionCache, + backend: &'a B, + changes_trie_state: Option>, + extensions: Option<&'a mut Extensions>, + ) -> Self { + Ext { + overlay, + backend, + changes_trie_state, + storage_transaction_cache, + id: rand::random(), + _phantom: Default::default(), + extensions, + } + } + + /// Invalidates the currently cached storage root and the db transaction. + /// + /// Called when there are changes that likely will invalidate the storage root. + fn mark_dirty(&mut self) { + self.storage_transaction_cache.reset(); + } } #[cfg(test)] impl<'a, H, N, B> Ext<'a, H, N, B> where - H: Hasher, - H::Out: Ord + 'static, - B: 'a + Backend, - N: crate::changes_trie::BlockNumber, + H: Hasher, + H::Out: Ord + 'static, + B: 'a + Backend, + N: crate::changes_trie::BlockNumber, { - pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { - use std::collections::HashMap; - - self.backend.pairs().iter() - .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) - .chain(self.overlay.committed.top.clone().into_iter().map(|(k, v)| (k, v.value))) - .chain(self.overlay.prospective.top.clone().into_iter().map(|(k, v)| (k, v.value))) - .collect::>() - .into_iter() - .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) - .collect() - } + pub fn storage_pairs(&self) -> Vec<(StorageKey, StorageValue)> { + use std::collections::HashMap; + + self.backend + .pairs() + .iter() + .map(|&(ref k, ref v)| (k.to_vec(), Some(v.to_vec()))) + .chain( + self.overlay + .committed + .top + .clone() + .into_iter() + .map(|(k, v)| (k, v.value)), + ) + .chain( + self.overlay + .prospective + .top + .clone() + .into_iter() + .map(|(k, v)| (k, v.value)), + ) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) + .collect() + } } impl<'a, H, B, N> Externalities for Ext<'a, H, N, B> where - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - B: 'a + Backend, - N: crate::changes_trie::BlockNumber, + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + B: 'a + Backend, + N: crate::changes_trie::BlockNumber, { - fn storage(&self, key: &[u8]) -> Option { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = self.overlay.storage(key).map(|x| x.map(|x| x.to_vec())).unwrap_or_else(|| - self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); - trace!(target: "state-trace", "{:04x}: Get {}={:?}", - self.id, - HexDisplay::from(&key), - result.as_ref().map(HexDisplay::from) - ); - result - } - - fn storage_hash(&self, key: &[u8]) -> Option> { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = self.overlay - .storage(key) - .map(|x| x.map(|x| H::hash(x))) - .unwrap_or_else(|| self.backend.storage_hash(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); - - trace!(target: "state-trace", "{:04x}: Hash {}={:?}", - self.id, - HexDisplay::from(&key), - result, - ); - result.map(|r| r.encode()) - } - - fn child_storage( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> Option { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = self.overlay - .child_storage(storage_key.as_ref(), key) - .map(|x| x.map(|x| x.to_vec())) - .unwrap_or_else(|| - self.backend.child_storage(storage_key.as_ref(), child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL) - ); - - trace!(target: "state-trace", "{:04x}: GetChild({}) {}={:?}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&key), - result.as_ref().map(HexDisplay::from) - ); - - result - } - - fn child_storage_hash( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> Option> { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = self.overlay - .child_storage(storage_key.as_ref(), key) - .map(|x| x.map(|x| H::hash(x))) - .unwrap_or_else(|| - self.backend.child_storage_hash(storage_key.as_ref(), child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL) - ); - - trace!(target: "state-trace", "{:04x}: ChildHash({}) {}={:?}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&key), - result, - ); - - result.map(|r| r.encode()) - } - - fn exists_storage(&self, key: &[u8]) -> bool { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - let result = match self.overlay.storage(key) { - Some(x) => x.is_some(), - _ => self.backend.exists_storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL), - }; - - trace!(target: "state-trace", "{:04x}: Exists {}={:?}", - self.id, - HexDisplay::from(&key), - result, - ); - - result - } - - fn exists_child_storage( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> bool { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - - let result = match self.overlay.child_storage(storage_key.as_ref(), key) { - Some(x) => x.is_some(), - _ => self.backend - .exists_child_storage(storage_key.as_ref(), child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL), - }; - - trace!(target: "state-trace", "{:04x}: ChildExists({}) {}={:?}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&key), - result, - ); - result - } - - fn next_storage_key(&self, key: &[u8]) -> Option { - let next_backend_key = self.backend.next_storage_key(key).expect(EXT_NOT_ALLOWED_TO_FAIL); - let next_overlay_key_change = self.overlay.next_storage_key_change(key); - - match (next_backend_key, next_overlay_key_change) { - (Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => Some(backend_key), - (backend_key, None) => backend_key, - (_, Some(overlay_key)) => if overlay_key.1.value.is_some() { - Some(overlay_key.0.to_vec()) - } else { - self.next_storage_key(&overlay_key.0[..]) - }, - } - } - - fn next_child_storage_key( - &self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: &[u8], - ) -> Option { - let next_backend_key = self.backend - .next_child_storage_key(storage_key.as_ref(), child_info, key) - .expect(EXT_NOT_ALLOWED_TO_FAIL); - let next_overlay_key_change = self.overlay.next_child_storage_key_change( - storage_key.as_ref(), - key - ); - - match (next_backend_key, next_overlay_key_change) { - (Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => Some(backend_key), - (backend_key, None) => backend_key, - (_, Some(overlay_key)) => if overlay_key.1.value.is_some() { - Some(overlay_key.0.to_vec()) - } else { - self.next_child_storage_key( - storage_key, - child_info, - &overlay_key.0[..], - ) - }, - } - } - - fn place_storage(&mut self, key: StorageKey, value: Option) { - trace!(target: "state-trace", "{:04x}: Put {}={:?}", - self.id, - HexDisplay::from(&key), - value.as_ref().map(HexDisplay::from) - ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); - if is_child_storage_key(&key) { - warn!(target: "trie", "Refuse to directly set child storage key"); - return; - } - - self.mark_dirty(); - self.overlay.set_storage(key, value); - } - - fn place_child_storage( - &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - key: StorageKey, - value: Option, - ) { - trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&key), - value.as_ref().map(HexDisplay::from) - ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); - - self.mark_dirty(); - self.overlay.set_child_storage(storage_key.into_owned(), child_info, key, value); - } - - fn kill_child_storage( - &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - ) { - trace!(target: "state-trace", "{:04x}: KillChild({})", - self.id, - HexDisplay::from(&storage_key.as_ref()), - ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); - - self.mark_dirty(); - self.overlay.clear_child_storage(storage_key.as_ref(), child_info); - self.backend.for_keys_in_child_storage(storage_key.as_ref(), child_info, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); - }); - } - - fn clear_prefix(&mut self, prefix: &[u8]) { - trace!(target: "state-trace", "{:04x}: ClearPrefix {}", - self.id, - HexDisplay::from(&prefix), - ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); - if is_child_storage_key(prefix) { - warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key"); - return; - } - - self.mark_dirty(); - self.overlay.clear_prefix(prefix); - self.backend.for_keys_with_prefix(prefix, |key| { - self.overlay.set_storage(key.to_vec(), None); - }); - } - - fn clear_child_prefix( - &mut self, - storage_key: ChildStorageKey, - child_info: ChildInfo, - prefix: &[u8], - ) { - trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&prefix), - ); - let _guard = sp_panic_handler::AbortGuard::force_abort(); - - self.mark_dirty(); - self.overlay.clear_child_prefix(storage_key.as_ref(), child_info, prefix); - self.backend.for_child_keys_with_prefix(storage_key.as_ref(), child_info, prefix, |key| { - self.overlay.set_child_storage(storage_key.as_ref().to_vec(), child_info, key.to_vec(), None); - }); - } - - fn chain_id(&self) -> u64 { - 42 - } - - fn storage_root(&mut self) -> Vec { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root { - trace!(target: "state-trace", "{:04x}: Root (cached) {}", - self.id, - HexDisplay::from(&root.as_ref()), - ); - return root.encode(); - } - - let root = self.overlay.storage_root(self.backend, self.storage_transaction_cache); - trace!(target: "state-trace", "{:04x}: Root {}", self.id, HexDisplay::from(&root.as_ref())); - root.encode() - } - - fn child_storage_root( - &mut self, - storage_key: ChildStorageKey, - ) -> Vec { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - if self.storage_transaction_cache.transaction_storage_root.is_some() { - let root = self - .storage(storage_key.as_ref()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) - ); - trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&root.as_ref()), - ); - root.encode() - } else { - let storage_key = storage_key.as_ref(); - - if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { - let (root, is_empty, _) = { - let delta = self.overlay.committed.children.get(storage_key) - .into_iter() - .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) - .chain( - self.overlay.prospective.children.get(storage_key) - .into_iter() - .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) - ); - - self.backend.child_storage_root(storage_key, child_info.as_ref(), delta) - }; - - let root = root.encode(); - // We store update in the overlay in order to be able to use 'self.storage_transaction' - // cache. This is brittle as it rely on Ext only querying the trie backend for - // storage root. - // A better design would be to manage 'child_storage_transaction' in a - // similar way as 'storage_transaction' but for each child trie. - if is_empty { - self.overlay.set_storage(storage_key.into(), None); - } else { - self.overlay.set_storage(storage_key.into(), Some(root.clone())); - } - - trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&root.as_ref()), - ); - root - } else { - // empty overlay - let root = self - .storage(storage_key.as_ref()) - .and_then(|k| Decode::decode(&mut &k[..]).ok()) - .unwrap_or( - default_child_trie_root::>(storage_key.as_ref()) - ); - trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", - self.id, - HexDisplay::from(&storage_key.as_ref()), - HexDisplay::from(&root.as_ref()), - ); - root.encode() - } - } - } - - fn storage_changes_root(&mut self, parent_hash: &[u8]) -> Result>, ()> { - let _guard = sp_panic_handler::AbortGuard::force_abort(); - let root = self.overlay.changes_trie_root( - self.backend, - self.changes_trie_state.as_ref(), - Decode::decode(&mut &parent_hash[..]).map_err(|e| - trace!( - target: "state-trace", - "Failed to decode changes root parent hash: {}", - e, - ) - )?, - true, - self.storage_transaction_cache, - ); - - trace!(target: "state-trace", "{:04x}: ChangesRoot({}) {:?}", - self.id, - HexDisplay::from(&parent_hash), - root, - ); - - root.map(|r| r.map(|o| o.encode())) - } - - fn wipe(&mut self) { - self.overlay.discard_prospective(); - self.overlay.drain_storage_changes(&self.backend, None, Default::default(), self.storage_transaction_cache) - .expect(EXT_NOT_ALLOWED_TO_FAIL); - self.storage_transaction_cache.reset(); - self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL) - } - - fn commit(&mut self) { - self.overlay.commit_prospective(); - let changes = self.overlay.drain_storage_changes(&self.backend, None, Default::default(), self.storage_transaction_cache) - .expect(EXT_NOT_ALLOWED_TO_FAIL); - self.backend.commit( - changes.transaction_storage_root, - changes.transaction, - ).expect(EXT_NOT_ALLOWED_TO_FAIL); - self.storage_transaction_cache.reset(); - } + fn storage(&self, key: &[u8]) -> Option { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + let result = self + .overlay + .storage(key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| self.backend.storage(key).expect(EXT_NOT_ALLOWED_TO_FAIL)); + trace!(target: "state-trace", "{:04x}: Get {}={:?}", + self.id, + HexDisplay::from(&key), + result.as_ref().map(HexDisplay::from) + ); + result + } + + fn storage_hash(&self, key: &[u8]) -> Option> { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + let result = self + .overlay + .storage(key) + .map(|x| x.map(|x| H::hash(x))) + .unwrap_or_else(|| { + self.backend + .storage_hash(key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }); + + trace!(target: "state-trace", "{:04x}: Hash {}={:?}", + self.id, + HexDisplay::from(&key), + result, + ); + result.map(|r| r.encode()) + } + + fn child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + let result = self + .overlay + .child_storage(storage_key.as_ref(), key) + .map(|x| x.map(|x| x.to_vec())) + .unwrap_or_else(|| { + self.backend + .child_storage(storage_key.as_ref(), child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }); + + trace!(target: "state-trace", "{:04x}: GetChild({}) {}={:?}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&key), + result.as_ref().map(HexDisplay::from) + ); + + result + } + + fn child_storage_hash( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option> { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + let result = self + .overlay + .child_storage(storage_key.as_ref(), key) + .map(|x| x.map(|x| H::hash(x))) + .unwrap_or_else(|| { + self.backend + .child_storage_hash(storage_key.as_ref(), child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL) + }); + + trace!(target: "state-trace", "{:04x}: ChildHash({}) {}={:?}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&key), + result, + ); + + result.map(|r| r.encode()) + } + + fn exists_storage(&self, key: &[u8]) -> bool { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + let result = match self.overlay.storage(key) { + Some(x) => x.is_some(), + _ => self + .backend + .exists_storage(key) + .expect(EXT_NOT_ALLOWED_TO_FAIL), + }; + + trace!(target: "state-trace", "{:04x}: Exists {}={:?}", + self.id, + HexDisplay::from(&key), + result, + ); + + result + } + + fn exists_child_storage( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> bool { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + + let result = match self.overlay.child_storage(storage_key.as_ref(), key) { + Some(x) => x.is_some(), + _ => self + .backend + .exists_child_storage(storage_key.as_ref(), child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL), + }; + + trace!(target: "state-trace", "{:04x}: ChildExists({}) {}={:?}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&key), + result, + ); + result + } + + fn next_storage_key(&self, key: &[u8]) -> Option { + let next_backend_key = self + .backend + .next_storage_key(key) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + let next_overlay_key_change = self.overlay.next_storage_key_change(key); + + match (next_backend_key, next_overlay_key_change) { + (Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => { + Some(backend_key) + } + (backend_key, None) => backend_key, + (_, Some(overlay_key)) => { + if overlay_key.1.value.is_some() { + Some(overlay_key.0.to_vec()) + } else { + self.next_storage_key(&overlay_key.0[..]) + } + } + } + } + + fn next_child_storage_key( + &self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: &[u8], + ) -> Option { + let next_backend_key = self + .backend + .next_child_storage_key(storage_key.as_ref(), child_info, key) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + let next_overlay_key_change = self + .overlay + .next_child_storage_key_change(storage_key.as_ref(), key); + + match (next_backend_key, next_overlay_key_change) { + (Some(backend_key), Some(overlay_key)) if &backend_key[..] < overlay_key.0 => { + Some(backend_key) + } + (backend_key, None) => backend_key, + (_, Some(overlay_key)) => { + if overlay_key.1.value.is_some() { + Some(overlay_key.0.to_vec()) + } else { + self.next_child_storage_key(storage_key, child_info, &overlay_key.0[..]) + } + } + } + } + + fn place_storage(&mut self, key: StorageKey, value: Option) { + trace!(target: "state-trace", "{:04x}: Put {}={:?}", + self.id, + HexDisplay::from(&key), + value.as_ref().map(HexDisplay::from) + ); + let _guard = sp_panic_handler::AbortGuard::force_abort(); + if is_child_storage_key(&key) { + warn!(target: "trie", "Refuse to directly set child storage key"); + return; + } + + self.mark_dirty(); + self.overlay.set_storage(key, value); + } + + fn place_child_storage( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + key: StorageKey, + value: Option, + ) { + trace!(target: "state-trace", "{:04x}: PutChild({}) {}={:?}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&key), + value.as_ref().map(HexDisplay::from) + ); + let _guard = sp_panic_handler::AbortGuard::force_abort(); + + self.mark_dirty(); + self.overlay + .set_child_storage(storage_key.into_owned(), child_info, key, value); + } + + fn kill_child_storage(&mut self, storage_key: ChildStorageKey, child_info: ChildInfo) { + trace!(target: "state-trace", "{:04x}: KillChild({})", + self.id, + HexDisplay::from(&storage_key.as_ref()), + ); + let _guard = sp_panic_handler::AbortGuard::force_abort(); + + self.mark_dirty(); + self.overlay + .clear_child_storage(storage_key.as_ref(), child_info); + self.backend + .for_keys_in_child_storage(storage_key.as_ref(), child_info, |key| { + self.overlay.set_child_storage( + storage_key.as_ref().to_vec(), + child_info, + key.to_vec(), + None, + ); + }); + } + + fn clear_prefix(&mut self, prefix: &[u8]) { + trace!(target: "state-trace", "{:04x}: ClearPrefix {}", + self.id, + HexDisplay::from(&prefix), + ); + let _guard = sp_panic_handler::AbortGuard::force_abort(); + if is_child_storage_key(prefix) { + warn!(target: "trie", "Refuse to directly clear prefix that is part of child storage key"); + return; + } + + self.mark_dirty(); + self.overlay.clear_prefix(prefix); + self.backend.for_keys_with_prefix(prefix, |key| { + self.overlay.set_storage(key.to_vec(), None); + }); + } + + fn clear_child_prefix( + &mut self, + storage_key: ChildStorageKey, + child_info: ChildInfo, + prefix: &[u8], + ) { + trace!(target: "state-trace", "{:04x}: ClearChildPrefix({}) {}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&prefix), + ); + let _guard = sp_panic_handler::AbortGuard::force_abort(); + + self.mark_dirty(); + self.overlay + .clear_child_prefix(storage_key.as_ref(), child_info, prefix); + self.backend + .for_child_keys_with_prefix(storage_key.as_ref(), child_info, prefix, |key| { + self.overlay.set_child_storage( + storage_key.as_ref().to_vec(), + child_info, + key.to_vec(), + None, + ); + }); + } + + fn chain_id(&self) -> u64 { + 42 + } + + fn storage_root(&mut self) -> Vec { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + if let Some(ref root) = self.storage_transaction_cache.transaction_storage_root { + trace!(target: "state-trace", "{:04x}: Root (cached) {}", + self.id, + HexDisplay::from(&root.as_ref()), + ); + return root.encode(); + } + + let root = self + .overlay + .storage_root(self.backend, self.storage_transaction_cache); + trace!(target: "state-trace", "{:04x}: Root {}", self.id, HexDisplay::from(&root.as_ref())); + root.encode() + } + + fn child_storage_root(&mut self, storage_key: ChildStorageKey) -> Vec { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + if self + .storage_transaction_cache + .transaction_storage_root + .is_some() + { + let root = self + .storage(storage_key.as_ref()) + .and_then(|k| Decode::decode(&mut &k[..]).ok()) + .unwrap_or(default_child_trie_root::>(storage_key.as_ref())); + trace!(target: "state-trace", "{:04x}: ChildRoot({}) (cached) {}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&root.as_ref()), + ); + root.encode() + } else { + let storage_key = storage_key.as_ref(); + + if let Some(child_info) = self.overlay.child_info(storage_key).cloned() { + let (root, is_empty, _) = { + let delta = self + .overlay + .committed + .children + .get(storage_key) + .into_iter() + .flat_map(|(map, _)| map.clone().into_iter().map(|(k, v)| (k, v.value))) + .chain( + self.overlay + .prospective + .children + .get(storage_key) + .into_iter() + .flat_map(|(map, _)| { + map.clone().into_iter().map(|(k, v)| (k, v.value)) + }), + ); + + self.backend + .child_storage_root(storage_key, child_info.as_ref(), delta) + }; + + let root = root.encode(); + // We store update in the overlay in order to be able to use 'self.storage_transaction' + // cache. This is brittle as it rely on Ext only querying the trie backend for + // storage root. + // A better design would be to manage 'child_storage_transaction' in a + // similar way as 'storage_transaction' but for each child trie. + if is_empty { + self.overlay.set_storage(storage_key.into(), None); + } else { + self.overlay + .set_storage(storage_key.into(), Some(root.clone())); + } + + trace!(target: "state-trace", "{:04x}: ChildRoot({}) {}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&root.as_ref()), + ); + root + } else { + // empty overlay + let root = self + .storage(storage_key.as_ref()) + .and_then(|k| Decode::decode(&mut &k[..]).ok()) + .unwrap_or(default_child_trie_root::>(storage_key.as_ref())); + trace!(target: "state-trace", "{:04x}: ChildRoot({}) (no change) {}", + self.id, + HexDisplay::from(&storage_key.as_ref()), + HexDisplay::from(&root.as_ref()), + ); + root.encode() + } + } + } + + fn storage_changes_root(&mut self, parent_hash: &[u8]) -> Result>, ()> { + let _guard = sp_panic_handler::AbortGuard::force_abort(); + let root = self.overlay.changes_trie_root( + self.backend, + self.changes_trie_state.as_ref(), + Decode::decode(&mut &parent_hash[..]).map_err(|e| { + trace!( + target: "state-trace", + "Failed to decode changes root parent hash: {}", + e, + ) + })?, + true, + self.storage_transaction_cache, + ); + + trace!(target: "state-trace", "{:04x}: ChangesRoot({}) {:?}", + self.id, + HexDisplay::from(&parent_hash), + root, + ); + + root.map(|r| r.map(|o| o.encode())) + } + + fn wipe(&mut self) { + self.overlay.discard_prospective(); + self.overlay + .drain_storage_changes( + &self.backend, + None, + Default::default(), + self.storage_transaction_cache, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.storage_transaction_cache.reset(); + self.backend.wipe().expect(EXT_NOT_ALLOWED_TO_FAIL) + } + + fn commit(&mut self) { + self.overlay.commit_prospective(); + let changes = self + .overlay + .drain_storage_changes( + &self.backend, + None, + Default::default(), + self.storage_transaction_cache, + ) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.backend + .commit(changes.transaction_storage_root, changes.transaction) + .expect(EXT_NOT_ALLOWED_TO_FAIL); + self.storage_transaction_cache.reset(); + } } impl<'a, H, B, N> sp_externalities::ExtensionStore for Ext<'a, H, N, B> where - H: Hasher, - B: 'a + Backend, - N: crate::changes_trie::BlockNumber, + H: Hasher, + B: 'a + Backend, + N: crate::changes_trie::BlockNumber, { - fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { - self.extensions.as_mut().and_then(|exts| exts.get_mut(type_id)) - } - - fn register_extension_with_type_id( - &mut self, - type_id: TypeId, - extension: Box, - ) -> Result<(), sp_externalities::Error> { - if let Some(ref mut extensions) = self.extensions { - extensions.register_with_type_id(type_id, extension) - } else { - Err(sp_externalities::Error::ExtensionsAreNotSupported) - } - } - - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { - if let Some(ref mut extensions) = self.extensions { - match extensions.deregister(type_id) { - Some(_) => Ok(()), - None => Err(sp_externalities::Error::ExtensionIsNotRegistered(type_id)) - } - } else { - Err(sp_externalities::Error::ExtensionsAreNotSupported) - } - } + fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { + self.extensions + .as_mut() + .and_then(|exts| exts.get_mut(type_id)) + } + + fn register_extension_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), sp_externalities::Error> { + if let Some(ref mut extensions) = self.extensions { + extensions.register_with_type_id(type_id, extension) + } else { + Err(sp_externalities::Error::ExtensionsAreNotSupported) + } + } + + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { + if let Some(ref mut extensions) = self.extensions { + match extensions.deregister(type_id) { + Some(_) => Ok(()), + None => Err(sp_externalities::Error::ExtensionIsNotRegistered(type_id)), + } + } else { + Err(sp_externalities::Error::ExtensionsAreNotSupported) + } + } } #[cfg(test)] mod tests { - use super::*; - use hex_literal::hex; - use num_traits::Zero; - use codec::Encode; - use sp_core::{H256, Blake2Hasher, storage::well_known_keys::EXTRINSIC_INDEX, map}; - use crate::{ - changes_trie::{ - Configuration as ChangesTrieConfiguration, - InMemoryStorage as TestChangesTrieStorage, - }, InMemoryBackend, overlayed_changes::OverlayedValue, - }; - use sp_core::storage::{Storage, StorageChild}; - - type TestBackend = InMemoryBackend; - type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; - - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - - fn prepare_overlay_with_changes() -> OverlayedChanges { - OverlayedChanges { - prospective: vec![ - (EXTRINSIC_INDEX.to_vec(), OverlayedValue { - value: Some(3u32.encode()), - extrinsics: Some(vec![1].into_iter().collect()) - }), - (vec![1], OverlayedValue { - value: Some(vec![100].into_iter().collect()), - extrinsics: Some(vec![1].into_iter().collect()) - }), - ].into_iter().collect(), - committed: Default::default(), - collect_extrinsics: true, - stats: Default::default(), - } - } - - fn changes_trie_config() -> ChangesTrieConfiguration { - ChangesTrieConfiguration { - digest_interval: 0, - digest_levels: 0, - } - } - - #[test] - fn storage_changes_root_is_none_when_storage_is_not_provided() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); - } - - #[test] - fn storage_changes_root_is_none_when_state_is_not_provided() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - assert_eq!(ext.storage_changes_root(&H256::default().encode()).unwrap(), None); - } - - #[test] - fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); - let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); - assert_eq!( - ext.storage_changes_root(&H256::default().encode()).unwrap(), - Some(hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").to_vec()), - ); - } - - #[test] - fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { - let mut overlay = prepare_overlay_with_changes(); - let mut cache = StorageTransactionCache::default(); - overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None; - let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); - let state = Some(ChangesTrieState::new(changes_trie_config(), Zero::zero(), &storage)); - let backend = TestBackend::default(); - let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); - assert_eq!( - ext.storage_changes_root(&H256::default().encode()).unwrap(), - Some(hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").to_vec()), - ); - } - - #[test] - fn next_storage_key_works() { - let mut cache = StorageTransactionCache::default(); - let mut overlay = OverlayedChanges::default(); - overlay.set_storage(vec![20], None); - overlay.set_storage(vec![30], Some(vec![31])); - let backend = Storage { - top: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![40] => vec![40] - ], - children: map![] - }.into(); - - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - - // next_backend < next_overlay - assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); - - // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_storage_key(&[10]), Some(vec![30])); - - // next_overlay < next_backend - assert_eq!(ext.next_storage_key(&[20]), Some(vec![30])); - - // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_storage_key(&[30]), Some(vec![40])); - - drop(ext); - overlay.set_storage(vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - - // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); - } - - #[test] - fn next_child_storage_key_works() { - const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - - let mut cache = StorageTransactionCache::default(); - let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); - let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); - let backend = Storage { - top: map![], - children: map![ - child().as_ref().to_vec() => StorageChild { - data: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![40] => vec![40] - ], - child_info: CHILD_INFO_1.to_owned(), - } - ], - }.into(); - - - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - - // next_backend < next_overlay - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), Some(vec![10])); - - // next_backend == next_overlay but next_overlay is a delete - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), Some(vec![30])); - - // next_overlay < next_backend - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), Some(vec![30])); - - // next_backend exist but next_overlay doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), Some(vec![40])); - - drop(ext); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![50], Some(vec![50])); - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - - // next_overlay exist but next_backend doesn't exist - assert_eq!(ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), Some(vec![50])); - } - - #[test] - fn child_storage_works() { - let mut cache = StorageTransactionCache::default(); - let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); - let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); - overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![30], Some(vec![31])); - let backend = Storage { - top: map![], - children: map![ - child().as_ref().to_vec() => StorageChild { - data: map![ - vec![10] => vec![10], - vec![20] => vec![20], - vec![30] => vec![40] - ], - child_info: CHILD_INFO_1.to_owned(), - } - ], - }.into(); - - let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); - - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[10]), Some(vec![10])); - assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[10]), - Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), - ); - - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[20]), None); - assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[20]), - None, - ); - - assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[30]), Some(vec![31])); - assert_eq!( - ext.child_storage_hash(child(), CHILD_INFO_1, &[30]), - Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), - ); - - } + use super::*; + use crate::{ + changes_trie::{ + Configuration as ChangesTrieConfiguration, InMemoryStorage as TestChangesTrieStorage, + }, + overlayed_changes::OverlayedValue, + InMemoryBackend, + }; + use codec::Encode; + use hex_literal::hex; + use num_traits::Zero; + use sp_core::storage::{Storage, StorageChild}; + use sp_core::{map, storage::well_known_keys::EXTRINSIC_INDEX, Blake2Hasher, H256}; + + type TestBackend = InMemoryBackend; + type TestExt<'a> = Ext<'a, Blake2Hasher, u64, TestBackend>; + + const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; + + const CHILD_UUID_1: &[u8] = b"unique_id_1"; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + + fn prepare_overlay_with_changes() -> OverlayedChanges { + OverlayedChanges { + prospective: vec![ + ( + EXTRINSIC_INDEX.to_vec(), + OverlayedValue { + value: Some(3u32.encode()), + extrinsics: Some(vec![1].into_iter().collect()), + }, + ), + ( + vec![1], + OverlayedValue { + value: Some(vec![100].into_iter().collect()), + extrinsics: Some(vec![1].into_iter().collect()), + }, + ), + ] + .into_iter() + .collect(), + committed: Default::default(), + collect_extrinsics: true, + stats: Default::default(), + } + } + + fn changes_trie_config() -> ChangesTrieConfiguration { + ChangesTrieConfiguration { + digest_interval: 0, + digest_levels: 0, + } + } + + #[test] + fn storage_changes_root_is_none_when_storage_is_not_provided() { + let mut overlay = prepare_overlay_with_changes(); + let mut cache = StorageTransactionCache::default(); + let backend = TestBackend::default(); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + assert_eq!( + ext.storage_changes_root(&H256::default().encode()).unwrap(), + None + ); + } + + #[test] + fn storage_changes_root_is_none_when_state_is_not_provided() { + let mut overlay = prepare_overlay_with_changes(); + let mut cache = StorageTransactionCache::default(); + let backend = TestBackend::default(); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + assert_eq!( + ext.storage_changes_root(&H256::default().encode()).unwrap(), + None + ); + } + + #[test] + fn storage_changes_root_is_some_when_extrinsic_changes_are_non_empty() { + let mut overlay = prepare_overlay_with_changes(); + let mut cache = StorageTransactionCache::default(); + let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); + let state = Some(ChangesTrieState::new( + changes_trie_config(), + Zero::zero(), + &storage, + )); + let backend = TestBackend::default(); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); + assert_eq!( + ext.storage_changes_root(&H256::default().encode()).unwrap(), + Some(hex!("bb0c2ef6e1d36d5490f9766cfcc7dfe2a6ca804504c3bb206053890d6dd02376").to_vec()), + ); + } + + #[test] + fn storage_changes_root_is_some_when_extrinsic_changes_are_empty() { + let mut overlay = prepare_overlay_with_changes(); + let mut cache = StorageTransactionCache::default(); + overlay.prospective.top.get_mut(&vec![1]).unwrap().value = None; + let storage = TestChangesTrieStorage::with_blocks(vec![(99, Default::default())]); + let state = Some(ChangesTrieState::new( + changes_trie_config(), + Zero::zero(), + &storage, + )); + let backend = TestBackend::default(); + let mut ext = TestExt::new(&mut overlay, &mut cache, &backend, state, None); + assert_eq!( + ext.storage_changes_root(&H256::default().encode()).unwrap(), + Some(hex!("96f5aae4690e7302737b6f9b7f8567d5bbb9eac1c315f80101235a92d9ec27f4").to_vec()), + ); + } + + #[test] + fn next_storage_key_works() { + let mut cache = StorageTransactionCache::default(); + let mut overlay = OverlayedChanges::default(); + overlay.set_storage(vec![20], None); + overlay.set_storage(vec![30], Some(vec![31])); + let backend = Storage { + top: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![40] => vec![40] + ], + children: map![], + } + .into(); + + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + + // next_backend < next_overlay + assert_eq!(ext.next_storage_key(&[5]), Some(vec![10])); + + // next_backend == next_overlay but next_overlay is a delete + assert_eq!(ext.next_storage_key(&[10]), Some(vec![30])); + + // next_overlay < next_backend + assert_eq!(ext.next_storage_key(&[20]), Some(vec![30])); + + // next_backend exist but next_overlay doesn't exist + assert_eq!(ext.next_storage_key(&[30]), Some(vec![40])); + + drop(ext); + overlay.set_storage(vec![50], Some(vec![50])); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + + // next_overlay exist but next_backend doesn't exist + assert_eq!(ext.next_storage_key(&[40]), Some(vec![50])); + } + + #[test] + fn next_child_storage_key_works() { + const CHILD_KEY_1: &[u8] = b":child_storage:default:Child1"; + + const CHILD_UUID_1: &[u8] = b"unique_id_1"; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + + let mut cache = StorageTransactionCache::default(); + let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); + let mut overlay = OverlayedChanges::default(); + overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); + overlay.set_child_storage( + child().as_ref().to_vec(), + CHILD_INFO_1, + vec![30], + Some(vec![31]), + ); + let backend = Storage { + top: map![], + children: map![ + child().as_ref().to_vec() => StorageChild { + data: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![40] => vec![40] + ], + child_info: CHILD_INFO_1.to_owned(), + } + ], + } + .into(); + + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + + // next_backend < next_overlay + assert_eq!( + ext.next_child_storage_key(child(), CHILD_INFO_1, &[5]), + Some(vec![10]) + ); + + // next_backend == next_overlay but next_overlay is a delete + assert_eq!( + ext.next_child_storage_key(child(), CHILD_INFO_1, &[10]), + Some(vec![30]) + ); + + // next_overlay < next_backend + assert_eq!( + ext.next_child_storage_key(child(), CHILD_INFO_1, &[20]), + Some(vec![30]) + ); + + // next_backend exist but next_overlay doesn't exist + assert_eq!( + ext.next_child_storage_key(child(), CHILD_INFO_1, &[30]), + Some(vec![40]) + ); + + drop(ext); + overlay.set_child_storage( + child().as_ref().to_vec(), + CHILD_INFO_1, + vec![50], + Some(vec![50]), + ); + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + + // next_overlay exist but next_backend doesn't exist + assert_eq!( + ext.next_child_storage_key(child(), CHILD_INFO_1, &[40]), + Some(vec![50]) + ); + } + + #[test] + fn child_storage_works() { + let mut cache = StorageTransactionCache::default(); + let child = || ChildStorageKey::from_slice(CHILD_KEY_1).unwrap(); + let mut overlay = OverlayedChanges::default(); + overlay.set_child_storage(child().as_ref().to_vec(), CHILD_INFO_1, vec![20], None); + overlay.set_child_storage( + child().as_ref().to_vec(), + CHILD_INFO_1, + vec![30], + Some(vec![31]), + ); + let backend = Storage { + top: map![], + children: map![ + child().as_ref().to_vec() => StorageChild { + data: map![ + vec![10] => vec![10], + vec![20] => vec![20], + vec![30] => vec![40] + ], + child_info: CHILD_INFO_1.to_owned(), + } + ], + } + .into(); + + let ext = TestExt::new(&mut overlay, &mut cache, &backend, None, None); + + assert_eq!( + ext.child_storage(child(), CHILD_INFO_1, &[10]), + Some(vec![10]) + ); + assert_eq!( + ext.child_storage_hash(child(), CHILD_INFO_1, &[10]), + Some(Blake2Hasher::hash(&[10]).as_ref().to_vec()), + ); + + assert_eq!(ext.child_storage(child(), CHILD_INFO_1, &[20]), None); + assert_eq!(ext.child_storage_hash(child(), CHILD_INFO_1, &[20]), None,); + + assert_eq!( + ext.child_storage(child(), CHILD_INFO_1, &[30]), + Some(vec![31]) + ); + assert_eq!( + ext.child_storage_hash(child(), CHILD_INFO_1, &[30]), + Some(Blake2Hasher::hash(&[31]).as_ref().to_vec()), + ); + } } diff --git a/primitives/state-machine/src/in_memory_backend.rs b/primitives/state-machine/src/in_memory_backend.rs index ecd4532cf2..856adfcfef 100644 --- a/primitives/state-machine/src/in_memory_backend.rs +++ b/primitives/state-machine/src/in_memory_backend.rs @@ -17,18 +17,23 @@ //! State machine in memory backend. use crate::{ - StorageKey, StorageValue, StorageCollection, - trie_backend::TrieBackend, - backend::{Backend, insert_into_memory_db}, - stats::UsageInfo, + backend::{insert_into_memory_db, Backend}, + stats::UsageInfo, + trie_backend::TrieBackend, + StorageCollection, StorageKey, StorageValue, }; -use std::{error, fmt, collections::{BTreeMap, HashMap}, marker::PhantomData, ops}; +use codec::Codec; use hash_db::Hasher; +use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; use sp_trie::{ - MemoryDB, child_trie_root, default_child_trie_root, TrieConfiguration, trie_types::Layout, + child_trie_root, default_child_trie_root, trie_types::Layout, MemoryDB, TrieConfiguration, +}; +use std::{ + collections::{BTreeMap, HashMap}, + error, fmt, + marker::PhantomData, + ops, }; -use codec::Codec; -use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; /// Error impossible. // FIXME: use `!` type when stabilized. https://github.com/rust-lang/rust/issues/35121 @@ -36,359 +41,411 @@ use sp_core::storage::{ChildInfo, OwnedChildInfo, Storage}; pub enum Void {} impl fmt::Display for Void { - fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { - match *self {} - } + fn fmt(&self, _: &mut fmt::Formatter) -> fmt::Result { + match *self {} + } } impl error::Error for Void { - fn description(&self) -> &str { "unreachable error" } + fn description(&self) -> &str { + "unreachable error" + } } /// In-memory backend. Fully recomputes tries each time `as_trie_backend` is called but useful for /// tests and proof checking. pub struct InMemory { - inner: HashMap, BTreeMap>, - // This field is only needed for returning reference in `as_trie_backend`. - trie: Option, H>>, - _hasher: PhantomData, + inner: HashMap, BTreeMap>, + // This field is only needed for returning reference in `as_trie_backend`. + trie: Option, H>>, + _hasher: PhantomData, } impl std::fmt::Debug for InMemory { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - write!(f, "InMemory ({} values)", self.inner.len()) - } + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + write!(f, "InMemory ({} values)", self.inner.len()) + } } impl Default for InMemory { - fn default() -> Self { - InMemory { - inner: Default::default(), - trie: None, - _hasher: PhantomData, - } - } + fn default() -> Self { + InMemory { + inner: Default::default(), + trie: None, + _hasher: PhantomData, + } + } } impl Clone for InMemory { - fn clone(&self) -> Self { - InMemory { - inner: self.inner.clone(), - trie: None, - _hasher: PhantomData, - } - } + fn clone(&self) -> Self { + InMemory { + inner: self.inner.clone(), + trie: None, + _hasher: PhantomData, + } + } } impl PartialEq for InMemory { - fn eq(&self, other: &Self) -> bool { - self.inner.eq(&other.inner) - } + fn eq(&self, other: &Self) -> bool { + self.inner.eq(&other.inner) + } } impl InMemory { - /// Copy the state, with applied updates - pub fn update< - T: IntoIterator, StorageCollection)> - >( - &self, - changes: T, - ) -> Self { - let mut inner = self.inner.clone(); - for (child_info, key_values) in changes.into_iter() { - let entry = inner.entry(child_info).or_default(); - for (key, val) in key_values { - match val { - Some(v) => { entry.insert(key, v); }, - None => { entry.remove(&key); }, - } - } - } - inner.into() - } + /// Copy the state, with applied updates + pub fn update< + T: IntoIterator, StorageCollection)>, + >( + &self, + changes: T, + ) -> Self { + let mut inner = self.inner.clone(); + for (child_info, key_values) in changes.into_iter() { + let entry = inner.entry(child_info).or_default(); + for (key, val) in key_values { + match val { + Some(v) => { + entry.insert(key, v); + } + None => { + entry.remove(&key); + } + } + } + } + inner.into() + } } -impl From, BTreeMap>> - for InMemory +impl + From, BTreeMap>> + for InMemory { - fn from(inner: HashMap, BTreeMap>) -> Self { - InMemory { - inner, - trie: None, - _hasher: PhantomData, - } - } + fn from( + inner: HashMap, BTreeMap>, + ) -> Self { + InMemory { + inner, + trie: None, + _hasher: PhantomData, + } + } } impl From for InMemory { - fn from(inners: Storage) -> Self { - let mut inner: HashMap, BTreeMap> - = inners.children.into_iter().map(|(k, c)| (Some((k, c.child_info)), c.data)).collect(); - inner.insert(None, inners.top); - InMemory { - inner, - trie: None, - _hasher: PhantomData, - } - } + fn from(inners: Storage) -> Self { + let mut inner: HashMap< + Option<(StorageKey, OwnedChildInfo)>, + BTreeMap, + > = inners + .children + .into_iter() + .map(|(k, c)| (Some((k, c.child_info)), c.data)) + .collect(); + inner.insert(None, inners.top); + InMemory { + inner, + trie: None, + _hasher: PhantomData, + } + } } impl From> for InMemory { - fn from(inner: BTreeMap) -> Self { - let mut expanded = HashMap::new(); - expanded.insert(None, inner); - InMemory { - inner: expanded, - trie: None, - _hasher: PhantomData, - } - } + fn from(inner: BTreeMap) -> Self { + let mut expanded = HashMap::new(); + expanded.insert(None, inner); + InMemory { + inner: expanded, + trie: None, + _hasher: PhantomData, + } + } } impl From, StorageCollection)>> - for InMemory { - fn from( - inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>, - ) -> Self { - let mut expanded: HashMap, BTreeMap> - = HashMap::new(); - for (child_info, key_values) in inner { - let entry = expanded.entry(child_info).or_default(); - for (key, value) in key_values { - if let Some(value) = value { - entry.insert(key, value); - } - } - } - expanded.into() - } + for InMemory +{ + fn from(inner: Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>) -> Self { + let mut expanded: HashMap< + Option<(StorageKey, OwnedChildInfo)>, + BTreeMap, + > = HashMap::new(); + for (child_info, key_values) in inner { + let entry = expanded.entry(child_info).or_default(); + for (key, value) in key_values { + if let Some(value) = value { + entry.insert(key, value); + } + } + } + expanded.into() + } } impl InMemory { - /// child storage key iterator - pub fn child_storage_keys(&self) -> impl Iterator { - self.inner.iter().filter_map(|item| - item.0.as_ref().map(|v|(&v.0[..], v.1.as_ref())) - ) - } + /// child storage key iterator + pub fn child_storage_keys(&self) -> impl Iterator { + self.inner + .iter() + .filter_map(|item| item.0.as_ref().map(|v| (&v.0[..], v.1.as_ref()))) + } } -impl Backend for InMemory where H::Out: Codec { - type Error = Void; - type Transaction = Vec<( - Option<(StorageKey, OwnedChildInfo)>, - StorageCollection, - )>; - type TrieBackendStorage = MemoryDB; - - fn storage(&self, key: &[u8]) -> Result, Self::Error> { - Ok(self.inner.get(&None).and_then(|map| map.get(key).map(Clone::clone))) - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - Ok(self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) - .and_then(|map| map.get(key).map(Clone::clone))) - } - - fn exists_storage(&self, key: &[u8]) -> Result { - Ok(self.inner.get(&None).map(|map| map.get(key).is_some()).unwrap_or(false)) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.inner.get(&None) - .map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], mut f: F) { - self.inner.get(&None).map(|map| map.iter().filter(|(key, _val)| key.starts_with(prefix)) - .for_each(|(k, v)| f(k, v))); - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - mut f: F, - ) { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) - .map(|map| map.keys().for_each(|k| f(&k))); - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ) { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) - .map(|map| map.keys().filter(|key| key.starts_with(prefix)).map(|k| &**k).for_each(f)); - } - - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where - I: IntoIterator, Option>)>, - ::Out: Ord, - { - let existing_pairs = self.inner.get(&None) - .into_iter() - .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); - - let transaction: Vec<_> = delta.into_iter().collect(); - let root = Layout::::trie_root(existing_pairs.chain(transaction.iter().cloned()) - .collect::>() - .into_iter() - .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) - ); - - let full_transaction = transaction.into_iter().collect(); - - (root, vec![(None, full_transaction)]) - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (H::Out, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord - { - let storage_key = storage_key.to_vec(); - let child_info = Some((storage_key.clone(), child_info.to_owned())); - - let existing_pairs = self.inner.get(&child_info) - .into_iter() - .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); - - let transaction: Vec<_> = delta.into_iter().collect(); - let root = child_trie_root::, _, _, _>( - &storage_key, - existing_pairs.chain(transaction.iter().cloned()) - .collect::>() - .into_iter() - .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))) - ); - - let full_transaction = transaction.into_iter().collect(); - - let is_default = root == default_child_trie_root::>(&storage_key); - - (root, is_default, vec![(child_info, full_transaction)]) - } - - fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { - let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_key = self.inner.get(&None) - .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); - - Ok(next_key) - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - let next_key = self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) - .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); - - Ok(next_key) - } - - fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - self.inner.get(&None) - .into_iter() - .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) - .collect() - } - - fn keys(&self, prefix: &[u8]) -> Vec { - self.inner.get(&None) - .into_iter() - .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) - .collect() - } - - fn child_keys( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - ) -> Vec { - self.inner.get(&Some((storage_key.to_vec(), child_info.to_owned()))) - .into_iter() - .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) - .collect() - } - - fn as_trie_backend(&mut self)-> Option<&TrieBackend> { - let mut mdb = MemoryDB::default(); - let mut new_child_roots = Vec::new(); - let mut root_map = None; - for (child_info, map) in &self.inner { - if let Some((storage_key, _child_info)) = child_info.as_ref() { - // no need to use child_info at this point because we use a MemoryDB for - // proof (with PrefixedMemoryDB it would be needed). - let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; - new_child_roots.push((storage_key.clone(), ch.as_ref().into())); - } else { - root_map = Some(map); - } - } - let root = match root_map { - Some(map) => insert_into_memory_db::( - &mut mdb, - map.clone().into_iter().chain(new_child_roots.into_iter()), - )?, - None => insert_into_memory_db::( - &mut mdb, - new_child_roots.into_iter(), - )?, - }; - self.trie = Some(TrieBackend::new(mdb, root)); - self.trie.as_ref() - } - - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } - - fn usage_info(&self) -> UsageInfo { - UsageInfo::empty() - } - - fn wipe(&self) -> Result<(), Self::Error> { - Ok(()) - } +impl Backend for InMemory +where + H::Out: Codec, +{ + type Error = Void; + type Transaction = Vec<(Option<(StorageKey, OwnedChildInfo)>, StorageCollection)>; + type TrieBackendStorage = MemoryDB; + + fn storage(&self, key: &[u8]) -> Result, Self::Error> { + Ok(self + .inner + .get(&None) + .and_then(|map| map.get(key).map(Clone::clone))) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + Ok(self + .inner + .get(&Some((storage_key.to_vec(), child_info.to_owned()))) + .and_then(|map| map.get(key).map(Clone::clone))) + } + + fn exists_storage(&self, key: &[u8]) -> Result { + Ok(self + .inner + .get(&None) + .map(|map| map.get(key).is_some()) + .unwrap_or(false)) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.inner.get(&None).map(|map| { + map.keys() + .filter(|key| key.starts_with(prefix)) + .map(|k| &**k) + .for_each(f) + }); + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], mut f: F) { + self.inner.get(&None).map(|map| { + map.iter() + .filter(|(key, _val)| key.starts_with(prefix)) + .for_each(|(k, v)| f(k, v)) + }); + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + mut f: F, + ) { + self.inner + .get(&Some((storage_key.to_vec(), child_info.to_owned()))) + .map(|map| map.keys().for_each(|k| f(&k))); + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.inner + .get(&Some((storage_key.to_vec(), child_info.to_owned()))) + .map(|map| { + map.keys() + .filter(|key| key.starts_with(prefix)) + .map(|k| &**k) + .for_each(f) + }); + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + ::Out: Ord, + { + let existing_pairs = self + .inner + .get(&None) + .into_iter() + .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + + let transaction: Vec<_> = delta.into_iter().collect(); + let root = Layout::::trie_root( + existing_pairs + .chain(transaction.iter().cloned()) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))), + ); + + let full_transaction = transaction.into_iter().collect(); + + (root, vec![(None, full_transaction)]) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord, + { + let storage_key = storage_key.to_vec(); + let child_info = Some((storage_key.clone(), child_info.to_owned())); + + let existing_pairs = self + .inner + .get(&child_info) + .into_iter() + .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), Some(v.clone())))); + + let transaction: Vec<_> = delta.into_iter().collect(); + let root = child_trie_root::, _, _, _>( + &storage_key, + existing_pairs + .chain(transaction.iter().cloned()) + .collect::>() + .into_iter() + .filter_map(|(k, maybe_val)| maybe_val.map(|val| (k, val))), + ); + + let full_transaction = transaction.into_iter().collect(); + + let is_default = root == default_child_trie_root::>(&storage_key); + + (root, is_default, vec![(child_info, full_transaction)]) + } + + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { + let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); + let next_key = self + .inner + .get(&None) + .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); + + Ok(next_key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); + let next_key = self + .inner + .get(&Some((storage_key.to_vec(), child_info.to_owned()))) + .and_then(|map| map.range::<[u8], _>(range).next().map(|(k, _)| k).cloned()); + + Ok(next_key) + } + + fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { + self.inner + .get(&None) + .into_iter() + .flat_map(|map| map.iter().map(|(k, v)| (k.clone(), v.clone()))) + .collect() + } + + fn keys(&self, prefix: &[u8]) -> Vec { + self.inner + .get(&None) + .into_iter() + .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) + .collect() + } + + fn child_keys( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) -> Vec { + self.inner + .get(&Some((storage_key.to_vec(), child_info.to_owned()))) + .into_iter() + .flat_map(|map| map.keys().filter(|k| k.starts_with(prefix)).cloned()) + .collect() + } + + fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + let mut mdb = MemoryDB::default(); + let mut new_child_roots = Vec::new(); + let mut root_map = None; + for (child_info, map) in &self.inner { + if let Some((storage_key, _child_info)) = child_info.as_ref() { + // no need to use child_info at this point because we use a MemoryDB for + // proof (with PrefixedMemoryDB it would be needed). + let ch = insert_into_memory_db::(&mut mdb, map.clone().into_iter())?; + new_child_roots.push((storage_key.clone(), ch.as_ref().into())); + } else { + root_map = Some(map); + } + } + let root = match root_map { + Some(map) => insert_into_memory_db::( + &mut mdb, + map.clone().into_iter().chain(new_child_roots.into_iter()), + )?, + None => insert_into_memory_db::(&mut mdb, new_child_roots.into_iter())?, + }; + self.trie = Some(TrieBackend::new(mdb, root)); + self.trie.as_ref() + } + + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) {} + + fn usage_info(&self) -> UsageInfo { + UsageInfo::empty() + } + + fn wipe(&self) -> Result<(), Self::Error> { + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - use sp_runtime::traits::BlakeTwo256; - - /// Assert in memory backend with only child trie keys works as trie backend. - #[test] - fn in_memory_with_child_trie_only() { - let storage = InMemory::::default(); - let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); - let mut storage = storage.update( - vec![( - Some((b"1".to_vec(), child_info.clone())), - vec![(b"2".to_vec(), Some(b"3".to_vec()))] - )] - ); - let trie_backend = storage.as_trie_backend().unwrap(); - assert_eq!(trie_backend.child_storage(b"1", child_info.as_ref(), b"2").unwrap(), - Some(b"3".to_vec())); - assert!(trie_backend.storage(b"1").unwrap().is_some()); - } + use super::*; + use sp_runtime::traits::BlakeTwo256; + + /// Assert in memory backend with only child trie keys works as trie backend. + #[test] + fn in_memory_with_child_trie_only() { + let storage = InMemory::::default(); + let child_info = OwnedChildInfo::new_default(b"unique_id_1".to_vec()); + let mut storage = storage.update(vec![( + Some((b"1".to_vec(), child_info.clone())), + vec![(b"2".to_vec(), Some(b"3".to_vec()))], + )]); + let trie_backend = storage.as_trie_backend().unwrap(); + assert_eq!( + trie_backend + .child_storage(b"1", child_info.as_ref(), b"2") + .unwrap(), + Some(b"3".to_vec()) + ); + assert!(trie_backend.storage(b"1").unwrap().is_some()); + } } diff --git a/primitives/state-machine/src/lib.rs b/primitives/state-machine/src/lib.rs index 9a2dc52cca..ddbe27e52d 100644 --- a/primitives/state-machine/src/lib.rs +++ b/primitives/state-machine/src/lib.rs @@ -18,63 +18,62 @@ #![warn(missing_docs)] -use std::{fmt, result, collections::HashMap, panic::UnwindSafe}; -use log::{warn, trace}; +use codec::{Codec, Decode, Encode}; use hash_db::Hasher; -use codec::{Decode, Encode, Codec}; +use log::{trace, warn}; +use overlayed_changes::OverlayedChangeSet; use sp_core::{ - storage::ChildInfo, NativeOrEncoded, NeverNativeValue, hexdisplay::HexDisplay, - traits::{CodeExecutor, CallInWasmExt, RuntimeCode}, + hexdisplay::HexDisplay, + storage::ChildInfo, + traits::{CallInWasmExt, CodeExecutor, RuntimeCode}, + NativeOrEncoded, NeverNativeValue, }; -use overlayed_changes::OverlayedChangeSet; use sp_externalities::Extensions; +use std::{collections::HashMap, fmt, panic::UnwindSafe, result}; pub mod backend; -mod in_memory_backend; +mod basic; mod changes_trie; mod error; mod ext; -mod testing; -mod basic; +mod in_memory_backend; mod overlayed_changes; mod proving_backend; +mod stats; +mod testing; mod trie_backend; mod trie_backend_essence; -mod stats; -pub use sp_trie::{trie_types::{Layout, TrieDBMut}, StorageProof, TrieMut, DBValue, MemoryDB}; -pub use testing::TestExternalities; -pub use basic::BasicExternalities; -pub use ext::Ext; pub use backend::Backend; +pub use basic::BasicExternalities; pub use changes_trie::{ - AnchorBlockId as ChangesTrieAnchorBlockId, - State as ChangesTrieState, - Storage as ChangesTrieStorage, - RootsStorage as ChangesTrieRootsStorage, - InMemoryStorage as InMemoryChangesTrieStorage, - BuildCache as ChangesTrieBuildCache, - CacheAction as ChangesTrieCacheAction, - ConfigurationRange as ChangesTrieConfigurationRange, - key_changes, key_changes_proof, - key_changes_proof_check, key_changes_proof_check_with_db, - prune as prune_changes_tries, - disabled_state as disabled_changes_trie_state, - BlockNumber as ChangesTrieBlockNumber, + disabled_state as disabled_changes_trie_state, key_changes, key_changes_proof, + key_changes_proof_check, key_changes_proof_check_with_db, prune as prune_changes_tries, + AnchorBlockId as ChangesTrieAnchorBlockId, BlockNumber as ChangesTrieBlockNumber, + BuildCache as ChangesTrieBuildCache, CacheAction as ChangesTrieCacheAction, + ConfigurationRange as ChangesTrieConfigurationRange, + InMemoryStorage as InMemoryChangesTrieStorage, RootsStorage as ChangesTrieRootsStorage, + State as ChangesTrieState, Storage as ChangesTrieStorage, }; +pub use error::{Error, ExecutionError}; +pub use ext::Ext; +pub use in_memory_backend::InMemory as InMemoryBackend; pub use overlayed_changes::{ - OverlayedChanges, StorageChanges, StorageTransactionCache, StorageKey, StorageValue, - StorageCollection, ChildStorageCollection, + ChildStorageCollection, OverlayedChanges, StorageChanges, StorageCollection, StorageKey, + StorageTransactionCache, StorageValue, }; pub use proving_backend::{ - create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, + create_proof_check_backend, ProofRecorder, ProvingBackend, ProvingBackendRecorder, }; -pub use trie_backend_essence::{TrieBackendStorage, Storage}; -pub use trie_backend::TrieBackend; -pub use error::{Error, ExecutionError}; -pub use in_memory_backend::InMemory as InMemoryBackend; -pub use stats::{UsageInfo, UsageUnit, StateMachineStats}; pub use sp_core::traits::CloneableSpawn; +pub use sp_trie::{ + trie_types::{Layout, TrieDBMut}, + DBValue, MemoryDB, StorageProof, TrieMut, +}; +pub use stats::{StateMachineStats, UsageInfo, UsageUnit}; +pub use testing::TestExternalities; +pub use trie_backend::TrieBackend; +pub use trie_backend_essence::{Storage, TrieBackendStorage}; type CallResult = Result, E>; @@ -82,397 +81,388 @@ type CallResult = Result, E>; pub type DefaultHandler = fn(CallResult, CallResult) -> CallResult; /// Type of changes trie transaction. -pub type ChangesTrieTransaction = ( - MemoryDB, - ChangesTrieCacheAction<::Out, N>, -); +pub type ChangesTrieTransaction = + (MemoryDB, ChangesTrieCacheAction<::Out, N>); /// Strategy for executing a call into the runtime. #[derive(Copy, Clone, Eq, PartialEq, Debug)] pub enum ExecutionStrategy { - /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. - NativeWhenPossible, - /// Use the given wasm module. - AlwaysWasm, - /// Run with both the wasm and the native variant (if compatible). Report any discrepancy as an error. - Both, - /// First native, then if that fails or is not possible, wasm. - NativeElseWasm, + /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. + NativeWhenPossible, + /// Use the given wasm module. + AlwaysWasm, + /// Run with both the wasm and the native variant (if compatible). Report any discrepancy as an error. + Both, + /// First native, then if that fails or is not possible, wasm. + NativeElseWasm, } /// Storage backend trust level. #[derive(Debug, Clone)] pub enum BackendTrustLevel { - /// Panics from trusted backends are considered justified, and never caught. - Trusted, - /// Panics from untrusted backend are caught and interpreted as runtime error. - /// Untrusted backend may be missing some parts of the trie, so panics are not considered - /// fatal. - Untrusted, + /// Panics from trusted backends are considered justified, and never caught. + Trusted, + /// Panics from untrusted backend are caught and interpreted as runtime error. + /// Untrusted backend may be missing some parts of the trie, so panics are not considered + /// fatal. + Untrusted, } /// Like `ExecutionStrategy` only it also stores a handler in case of consensus failure. #[derive(Clone)] pub enum ExecutionManager { - /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. - NativeWhenPossible, - /// Use the given wasm module. The backend on which code is executed code could be - /// trusted to provide all storage or not (i.e. the light client cannot be trusted to provide - /// for all storage queries since the storage entries it has come from an external node). - AlwaysWasm(BackendTrustLevel), - /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of any discrepancy. - Both(F), - /// First native, then if that fails or is not possible, wasm. - NativeElseWasm, + /// Execute with the native equivalent if it is compatible with the given wasm module; otherwise fall back to the wasm. + NativeWhenPossible, + /// Use the given wasm module. The backend on which code is executed code could be + /// trusted to provide all storage or not (i.e. the light client cannot be trusted to provide + /// for all storage queries since the storage entries it has come from an external node). + AlwaysWasm(BackendTrustLevel), + /// Run with both the wasm and the native variant (if compatible). Call `F` in the case of any discrepancy. + Both(F), + /// First native, then if that fails or is not possible, wasm. + NativeElseWasm, } impl<'a, F> From<&'a ExecutionManager> for ExecutionStrategy { - fn from(s: &'a ExecutionManager) -> Self { - match *s { - ExecutionManager::NativeWhenPossible => ExecutionStrategy::NativeWhenPossible, - ExecutionManager::AlwaysWasm(_) => ExecutionStrategy::AlwaysWasm, - ExecutionManager::NativeElseWasm => ExecutionStrategy::NativeElseWasm, - ExecutionManager::Both(_) => ExecutionStrategy::Both, - } - } + fn from(s: &'a ExecutionManager) -> Self { + match *s { + ExecutionManager::NativeWhenPossible => ExecutionStrategy::NativeWhenPossible, + ExecutionManager::AlwaysWasm(_) => ExecutionStrategy::AlwaysWasm, + ExecutionManager::NativeElseWasm => ExecutionStrategy::NativeElseWasm, + ExecutionManager::Both(_) => ExecutionStrategy::Both, + } + } } impl ExecutionStrategy { - /// Gets the corresponding manager for the execution strategy. - pub fn get_manager( - self, - ) -> ExecutionManager> { - match self { - ExecutionStrategy::AlwaysWasm => ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted), - ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, - ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, - ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { - warn!( - "Consensus error between wasm {:?} and native {:?}. Using wasm.", - wasm_result, - native_result, - ); - warn!(" Native result {:?}", native_result); - warn!(" Wasm result {:?}", wasm_result); - wasm_result - }), - } - } + /// Gets the corresponding manager for the execution strategy. + pub fn get_manager( + self, + ) -> ExecutionManager> { + match self { + ExecutionStrategy::AlwaysWasm => { + ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) + } + ExecutionStrategy::NativeWhenPossible => ExecutionManager::NativeWhenPossible, + ExecutionStrategy::NativeElseWasm => ExecutionManager::NativeElseWasm, + ExecutionStrategy::Both => ExecutionManager::Both(|wasm_result, native_result| { + warn!( + "Consensus error between wasm {:?} and native {:?}. Using wasm.", + wasm_result, native_result, + ); + warn!(" Native result {:?}", native_result); + warn!(" Wasm result {:?}", wasm_result); + wasm_result + }), + } + } } /// Evaluate to ExecutionManager::NativeElseWasm, without having to figure out the type. pub fn native_else_wasm() -> ExecutionManager> { - ExecutionManager::NativeElseWasm + ExecutionManager::NativeElseWasm } /// Evaluate to ExecutionManager::AlwaysWasm with trusted backend, without having to figure out the type. fn always_wasm() -> ExecutionManager> { - ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) + ExecutionManager::AlwaysWasm(BackendTrustLevel::Trusted) } /// Evaluate ExecutionManager::AlwaysWasm with untrusted backend, without having to figure out the type. fn always_untrusted_wasm() -> ExecutionManager> { - ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted) + ExecutionManager::AlwaysWasm(BackendTrustLevel::Untrusted) } /// The substrate state machine. pub struct StateMachine<'a, B, H, N, Exec> - where - H: Hasher, - B: Backend, - N: ChangesTrieBlockNumber, +where + H: Hasher, + B: Backend, + N: ChangesTrieBlockNumber, { - backend: &'a B, - exec: &'a Exec, - method: &'a str, - call_data: &'a [u8], - overlay: &'a mut OverlayedChanges, - extensions: Extensions, - changes_trie_state: Option>, - storage_transaction_cache: Option<&'a mut StorageTransactionCache>, - runtime_code: &'a RuntimeCode<'a>, - stats: StateMachineStats, + backend: &'a B, + exec: &'a Exec, + method: &'a str, + call_data: &'a [u8], + overlay: &'a mut OverlayedChanges, + extensions: Extensions, + changes_trie_state: Option>, + storage_transaction_cache: Option<&'a mut StorageTransactionCache>, + runtime_code: &'a RuntimeCode<'a>, + stats: StateMachineStats, } -impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> where - H: Hasher, - B: Backend, - N: ChangesTrieBlockNumber, +impl<'a, B, H, N, Exec> Drop for StateMachine<'a, B, H, N, Exec> +where + H: Hasher, + B: Backend, + N: ChangesTrieBlockNumber, { - fn drop(&mut self) { - self.backend.register_overlay_stats(&self.stats); - } + fn drop(&mut self) { + self.backend.register_overlay_stats(&self.stats); + } } -impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> where - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + Clone + 'static, - B: Backend, - N: crate::changes_trie::BlockNumber, +impl<'a, B, H, N, Exec> StateMachine<'a, B, H, N, Exec> +where + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + Clone + 'static, + B: Backend, + N: crate::changes_trie::BlockNumber, { - /// Creates new substrate state machine. - pub fn new( - backend: &'a B, - changes_trie_state: Option>, - overlay: &'a mut OverlayedChanges, - exec: &'a Exec, - method: &'a str, - call_data: &'a [u8], - mut extensions: Extensions, - runtime_code: &'a RuntimeCode, - spawn_handle: Box, - ) -> Self { - extensions.register(CallInWasmExt::new(exec.clone())); - extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle)); - - Self { - backend, - exec, - method, - call_data, - extensions, - overlay, - changes_trie_state, - storage_transaction_cache: None, - runtime_code, - stats: StateMachineStats::default(), - } - } - - /// Use given `cache` as storage transaction cache. - /// - /// The cache will be used to cache storage transactions that can be build while executing a - /// function in the runtime. For example, when calculating the storage root a transaction is - /// build that will be cached. - pub fn with_storage_transaction_cache( - mut self, - cache: Option<&'a mut StorageTransactionCache>, - ) -> Self { - self.storage_transaction_cache = cache; - self - } - - /// Execute a call using the given state backend, overlayed changes, and call executor. - /// - /// On an error, no prospective changes are written to the overlay. - /// - /// Note: changes to code will be in place if this call is made again. For running partial - /// blocks (e.g. a transaction at a time), ensure a different method is used. - /// - /// Returns the SCALE encoded result of the executed function. - pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result, Box> { - // We are not giving a native call and thus we are sure that the result can never be a native - // value. - self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - strategy.get_manager(), - None, - ).map(NativeOrEncoded::into_encoded) - } - - fn execute_aux( - &mut self, - use_native: bool, - native_call: Option, - ) -> ( - CallResult, - bool, - ) where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - { - let mut cache = StorageTransactionCache::default(); - - let cache = match self.storage_transaction_cache.as_mut() { - Some(cache) => cache, - None => &mut cache, - }; - - let mut ext = Ext::new( - self.overlay, - cache, - self.backend, - self.changes_trie_state.clone(), - Some(&mut self.extensions), - ); - - let id = ext.id; - trace!( - target: "state-trace", "{:04x}: Call {} at {:?}. Input={:?}", - id, - self.method, - self.backend, - HexDisplay::from(&self.call_data), - ); - - let (result, was_native) = self.exec.call( - &mut ext, - self.runtime_code, - self.method, - self.call_data, - use_native, - native_call, - ); - - trace!( - target: "state-trace", "{:04x}: Return. Native={:?}, Result={:?}", - id, - was_native, - result, - ); - - (result, was_native) - } - - fn execute_call_with_both_strategy( - &mut self, - mut native_call: Option, - orig_prospective: OverlayedChangeSet, - on_consensus_failure: Handler, - ) -> CallResult - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult - { - let (result, was_native) = self.execute_aux(true, native_call.take()); - - if was_native { - self.overlay.prospective = orig_prospective.clone(); - let (wasm_result, _) = self.execute_aux( - false, - native_call, - ); - - if (result.is_ok() && wasm_result.is_ok() - && result.as_ref().ok() == wasm_result.as_ref().ok()) - || result.is_err() && wasm_result.is_err() - { - result - } else { - on_consensus_failure(wasm_result, result) - } - } else { - result - } - } - - fn execute_call_with_native_else_wasm_strategy( - &mut self, - mut native_call: Option, - orig_prospective: OverlayedChangeSet, - ) -> CallResult - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - { - let (result, was_native) = self.execute_aux( - true, - native_call.take(), - ); - - if !was_native || result.is_ok() { - result - } else { - self.overlay.prospective = orig_prospective.clone(); - let (wasm_result, _) = self.execute_aux( - false, - native_call, - ); - wasm_result - } - } - - /// Execute a call using the given state backend, overlayed changes, and call executor. - /// - /// On an error, no prospective changes are written to the overlay. - /// - /// Note: changes to code will be in place if this call is made again. For running partial - /// blocks (e.g. a transaction at a time), ensure a different method is used. - /// - /// Returns the result of the executed function either in native representation `R` or - /// in SCALE encoded representation. - pub fn execute_using_consensus_failure_handler( - &mut self, - manager: ExecutionManager, - mut native_call: Option, - ) -> Result, Box> - where - R: Decode + Encode + PartialEq, - NC: FnOnce() -> result::Result + UnwindSafe, - Handler: FnOnce( - CallResult, - CallResult, - ) -> CallResult - { - let changes_tries_enabled = self.changes_trie_state.is_some(); - self.overlay.set_collect_extrinsics(changes_tries_enabled); - - let result = { - let orig_prospective = self.overlay.prospective.clone(); - - match manager { - ExecutionManager::Both(on_consensus_failure) => { - self.execute_call_with_both_strategy( - native_call.take(), - orig_prospective, - on_consensus_failure, - ) - }, - ExecutionManager::NativeElseWasm => { - self.execute_call_with_native_else_wasm_strategy( - native_call.take(), - orig_prospective, - ) - }, - ExecutionManager::AlwaysWasm(trust_level) => { - let _abort_guard = match trust_level { - BackendTrustLevel::Trusted => None, - BackendTrustLevel::Untrusted => Some(sp_panic_handler::AbortGuard::never_abort()), - }; - self.execute_aux(false, native_call).0 - }, - ExecutionManager::NativeWhenPossible => { - self.execute_aux(true, native_call).0 - }, - } - }; - - result.map_err(|e| Box::new(e) as _) - } + /// Creates new substrate state machine. + pub fn new( + backend: &'a B, + changes_trie_state: Option>, + overlay: &'a mut OverlayedChanges, + exec: &'a Exec, + method: &'a str, + call_data: &'a [u8], + mut extensions: Extensions, + runtime_code: &'a RuntimeCode, + spawn_handle: Box, + ) -> Self { + extensions.register(CallInWasmExt::new(exec.clone())); + extensions.register(sp_core::traits::TaskExecutorExt::new(spawn_handle)); + + Self { + backend, + exec, + method, + call_data, + extensions, + overlay, + changes_trie_state, + storage_transaction_cache: None, + runtime_code, + stats: StateMachineStats::default(), + } + } + + /// Use given `cache` as storage transaction cache. + /// + /// The cache will be used to cache storage transactions that can be build while executing a + /// function in the runtime. For example, when calculating the storage root a transaction is + /// build that will be cached. + pub fn with_storage_transaction_cache( + mut self, + cache: Option<&'a mut StorageTransactionCache>, + ) -> Self { + self.storage_transaction_cache = cache; + self + } + + /// Execute a call using the given state backend, overlayed changes, and call executor. + /// + /// On an error, no prospective changes are written to the overlay. + /// + /// Note: changes to code will be in place if this call is made again. For running partial + /// blocks (e.g. a transaction at a time), ensure a different method is used. + /// + /// Returns the SCALE encoded result of the executed function. + pub fn execute(&mut self, strategy: ExecutionStrategy) -> Result, Box> { + // We are not giving a native call and thus we are sure that the result can never be a native + // value. + self.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + strategy.get_manager(), + None, + ) + .map(NativeOrEncoded::into_encoded) + } + + fn execute_aux( + &mut self, + use_native: bool, + native_call: Option, + ) -> (CallResult, bool) + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + { + let mut cache = StorageTransactionCache::default(); + + let cache = match self.storage_transaction_cache.as_mut() { + Some(cache) => cache, + None => &mut cache, + }; + + let mut ext = Ext::new( + self.overlay, + cache, + self.backend, + self.changes_trie_state.clone(), + Some(&mut self.extensions), + ); + + let id = ext.id; + trace!( + target: "state-trace", "{:04x}: Call {} at {:?}. Input={:?}", + id, + self.method, + self.backend, + HexDisplay::from(&self.call_data), + ); + + let (result, was_native) = self.exec.call( + &mut ext, + self.runtime_code, + self.method, + self.call_data, + use_native, + native_call, + ); + + trace!( + target: "state-trace", "{:04x}: Return. Native={:?}, Result={:?}", + id, + was_native, + result, + ); + + (result, was_native) + } + + fn execute_call_with_both_strategy( + &mut self, + mut native_call: Option, + orig_prospective: OverlayedChangeSet, + on_consensus_failure: Handler, + ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, + { + let (result, was_native) = self.execute_aux(true, native_call.take()); + + if was_native { + self.overlay.prospective = orig_prospective.clone(); + let (wasm_result, _) = self.execute_aux(false, native_call); + + if (result.is_ok() + && wasm_result.is_ok() + && result.as_ref().ok() == wasm_result.as_ref().ok()) + || result.is_err() && wasm_result.is_err() + { + result + } else { + on_consensus_failure(wasm_result, result) + } + } else { + result + } + } + + fn execute_call_with_native_else_wasm_strategy( + &mut self, + mut native_call: Option, + orig_prospective: OverlayedChangeSet, + ) -> CallResult + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + { + let (result, was_native) = self.execute_aux(true, native_call.take()); + + if !was_native || result.is_ok() { + result + } else { + self.overlay.prospective = orig_prospective.clone(); + let (wasm_result, _) = self.execute_aux(false, native_call); + wasm_result + } + } + + /// Execute a call using the given state backend, overlayed changes, and call executor. + /// + /// On an error, no prospective changes are written to the overlay. + /// + /// Note: changes to code will be in place if this call is made again. For running partial + /// blocks (e.g. a transaction at a time), ensure a different method is used. + /// + /// Returns the result of the executed function either in native representation `R` or + /// in SCALE encoded representation. + pub fn execute_using_consensus_failure_handler( + &mut self, + manager: ExecutionManager, + mut native_call: Option, + ) -> Result, Box> + where + R: Decode + Encode + PartialEq, + NC: FnOnce() -> result::Result + UnwindSafe, + Handler: FnOnce( + CallResult, + CallResult, + ) -> CallResult, + { + let changes_tries_enabled = self.changes_trie_state.is_some(); + self.overlay.set_collect_extrinsics(changes_tries_enabled); + + let result = { + let orig_prospective = self.overlay.prospective.clone(); + + match manager { + ExecutionManager::Both(on_consensus_failure) => self + .execute_call_with_both_strategy( + native_call.take(), + orig_prospective, + on_consensus_failure, + ), + ExecutionManager::NativeElseWasm => self + .execute_call_with_native_else_wasm_strategy( + native_call.take(), + orig_prospective, + ), + ExecutionManager::AlwaysWasm(trust_level) => { + let _abort_guard = match trust_level { + BackendTrustLevel::Trusted => None, + BackendTrustLevel::Untrusted => { + Some(sp_panic_handler::AbortGuard::never_abort()) + } + }; + self.execute_aux(false, native_call).0 + } + ExecutionManager::NativeWhenPossible => self.execute_aux(true, native_call).0, + } + }; + + result.map_err(|e| Box::new(e) as _) + } } /// Prove execution using the given state backend, overlayed changes, and call executor. pub fn prove_execution( - mut backend: B, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Box, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, + mut backend: B, + overlay: &mut OverlayedChanges, + exec: &Exec, + spawn_handle: Box, + method: &str, + call_data: &[u8], + runtime_code: &RuntimeCode, ) -> Result<(Vec, StorageProof), Box> where - B: Backend, - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, + B: Backend, + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + Clone + 'static, + N: crate::changes_trie::BlockNumber, { - let trie_backend = backend.as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_execution_on_trie_backend::<_, _, N, _>( - trie_backend, - overlay, - exec, - spawn_handle, - method, - call_data, - runtime_code, - ) + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_execution_on_trie_backend::<_, _, N, _>( + trie_backend, + overlay, + exec, + spawn_handle, + method, + call_data, + runtime_code, + ) } /// Prove execution using the given trie backend, overlayed changes, and call executor. @@ -485,650 +475,650 @@ where /// Note: changes to code will be in place if this call is made again. For running partial /// blocks (e.g. a transaction at a time), ensure a different method is used. pub fn prove_execution_on_trie_backend( - trie_backend: &TrieBackend, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Box, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, + trie_backend: &TrieBackend, + overlay: &mut OverlayedChanges, + exec: &Exec, + spawn_handle: Box, + method: &str, + call_data: &[u8], + runtime_code: &RuntimeCode, ) -> Result<(Vec, StorageProof), Box> where - S: trie_backend_essence::TrieBackendStorage, - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + 'static + Clone, - N: crate::changes_trie::BlockNumber, + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + 'static + Clone, + N: crate::changes_trie::BlockNumber, { - let proving_backend = proving_backend::ProvingBackend::new(trie_backend); - let mut sm = StateMachine::<_, H, N, Exec>::new( - &proving_backend, - None, - overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); - - let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_wasm(), - None, - )?; - let proof = sm.backend.extract_proof(); - Ok((result.into_encoded(), proof)) + let proving_backend = proving_backend::ProvingBackend::new(trie_backend); + let mut sm = StateMachine::<_, H, N, Exec>::new( + &proving_backend, + None, + overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ); + + let result = sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_wasm(), + None, + )?; + let proof = sm.backend.extract_proof(); + Ok((result.into_encoded(), proof)) } /// Check execution proof, generated by `prove_execution` call. pub fn execution_proof_check( - root: H::Out, - proof: StorageProof, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Box, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, + root: H::Out, + proof: StorageProof, + overlay: &mut OverlayedChanges, + exec: &Exec, + spawn_handle: Box, + method: &str, + call_data: &[u8], + runtime_code: &RuntimeCode, ) -> Result, Box> where - H: Hasher, - Exec: CodeExecutor + Clone + 'static, - H::Out: Ord + 'static + codec::Codec, - N: crate::changes_trie::BlockNumber, + H: Hasher, + Exec: CodeExecutor + Clone + 'static, + H::Out: Ord + 'static + codec::Codec, + N: crate::changes_trie::BlockNumber, { - let trie_backend = create_proof_check_backend::(root.into(), proof)?; - execution_proof_check_on_trie_backend::<_, N, _>( - &trie_backend, - overlay, - exec, - spawn_handle, - method, - call_data, - runtime_code, - ) + let trie_backend = create_proof_check_backend::(root.into(), proof)?; + execution_proof_check_on_trie_backend::<_, N, _>( + &trie_backend, + overlay, + exec, + spawn_handle, + method, + call_data, + runtime_code, + ) } /// Check execution proof on proving backend, generated by `prove_execution` call. pub fn execution_proof_check_on_trie_backend( - trie_backend: &TrieBackend, H>, - overlay: &mut OverlayedChanges, - exec: &Exec, - spawn_handle: Box, - method: &str, - call_data: &[u8], - runtime_code: &RuntimeCode, + trie_backend: &TrieBackend, H>, + overlay: &mut OverlayedChanges, + exec: &Exec, + spawn_handle: Box, + method: &str, + call_data: &[u8], + runtime_code: &RuntimeCode, ) -> Result, Box> where - H: Hasher, - H::Out: Ord + 'static + codec::Codec, - Exec: CodeExecutor + Clone + 'static, - N: crate::changes_trie::BlockNumber, + H: Hasher, + H::Out: Ord + 'static + codec::Codec, + Exec: CodeExecutor + Clone + 'static, + N: crate::changes_trie::BlockNumber, { - let mut sm = StateMachine::<_, H, N, Exec>::new( - trie_backend, - None, - overlay, - exec, - method, - call_data, - Extensions::default(), - runtime_code, - spawn_handle, - ); - - sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - always_untrusted_wasm(), - None, - ).map(NativeOrEncoded::into_encoded) + let mut sm = StateMachine::<_, H, N, Exec>::new( + trie_backend, + None, + overlay, + exec, + method, + call_data, + Extensions::default(), + runtime_code, + spawn_handle, + ); + + sm.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + always_untrusted_wasm(), + None, + ) + .map(NativeOrEncoded::into_encoded) } /// Generate storage read proof. -pub fn prove_read( - mut backend: B, - keys: I, -) -> Result> +pub fn prove_read(mut backend: B, keys: I) -> Result> where - B: Backend, - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() - .ok_or_else( - || Box::new(ExecutionError::UnableToGenerateProof) as Box - )?; - prove_read_on_trie_backend(trie_backend, keys) + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_read_on_trie_backend(trie_backend, keys) } /// Generate child storage read proof. pub fn prove_child_read( - mut backend: B, - storage_key: &[u8], - child_info: ChildInfo, - keys: I, + mut backend: B, + storage_key: &[u8], + child_info: ChildInfo, + keys: I, ) -> Result> where - B: Backend, - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, + B: Backend, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let trie_backend = backend.as_trie_backend() - .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; - prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys) + let trie_backend = backend + .as_trie_backend() + .ok_or_else(|| Box::new(ExecutionError::UnableToGenerateProof) as Box)?; + prove_child_read_on_trie_backend(trie_backend, storage_key, child_info, keys) } /// Generate storage read proof on pre-created trie backend. pub fn prove_read_on_trie_backend( - trie_backend: &TrieBackend, - keys: I, + trie_backend: &TrieBackend, + keys: I, ) -> Result> where - S: trie_backend_essence::TrieBackendStorage, - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); - for key in keys.into_iter() { - proving_backend - .storage(key.as_ref()) - .map_err(|e| Box::new(e) as Box)?; - } - Ok(proving_backend.extract_proof()) + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + for key in keys.into_iter() { + proving_backend + .storage(key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + Ok(proving_backend.extract_proof()) } /// Generate storage read proof on pre-created trie backend. pub fn prove_child_read_on_trie_backend( - trie_backend: &TrieBackend, - storage_key: &[u8], - child_info: ChildInfo, - keys: I, + trie_backend: &TrieBackend, + storage_key: &[u8], + child_info: ChildInfo, + keys: I, ) -> Result> where - S: trie_backend_essence::TrieBackendStorage, - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, + S: trie_backend_essence::TrieBackendStorage, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); - for key in keys.into_iter() { - proving_backend - .child_storage(storage_key, child_info.clone(), key.as_ref()) - .map_err(|e| Box::new(e) as Box)?; - } - Ok(proving_backend.extract_proof()) + let proving_backend = proving_backend::ProvingBackend::<_, H>::new(trie_backend); + for key in keys.into_iter() { + proving_backend + .child_storage(storage_key, child_info.clone(), key.as_ref()) + .map_err(|e| Box::new(e) as Box)?; + } + Ok(proving_backend.extract_proof()) } /// Check storage read proof, generated by `prove_read` call. pub fn read_proof_check( - root: H::Out, - proof: StorageProof, - keys: I, + root: H::Out, + proof: StorageProof, + keys: I, ) -> Result, Option>>, Box> where - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let proving_backend = create_proof_check_backend::(root, proof)?; - let mut result = HashMap::new(); - for key in keys.into_iter() { - let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; - result.insert(key.as_ref().to_vec(), value); - } - Ok(result) + let proving_backend = create_proof_check_backend::(root, proof)?; + let mut result = HashMap::new(); + for key in keys.into_iter() { + let value = read_proof_check_on_proving_backend(&proving_backend, key.as_ref())?; + result.insert(key.as_ref().to_vec(), value); + } + Ok(result) } /// Check child storage read proof, generated by `prove_child_read` call. pub fn read_child_proof_check( - root: H::Out, - proof: StorageProof, - storage_key: &[u8], - keys: I, + root: H::Out, + proof: StorageProof, + storage_key: &[u8], + keys: I, ) -> Result, Option>>, Box> where - H: Hasher, - H::Out: Ord + Codec, - I: IntoIterator, - I::Item: AsRef<[u8]>, + H: Hasher, + H::Out: Ord + Codec, + I: IntoIterator, + I::Item: AsRef<[u8]>, { - let proving_backend = create_proof_check_backend::(root, proof)?; - let mut result = HashMap::new(); - for key in keys.into_iter() { - let value = read_child_proof_check_on_proving_backend( - &proving_backend, - storage_key, - key.as_ref(), - )?; - result.insert(key.as_ref().to_vec(), value); - } - Ok(result) + let proving_backend = create_proof_check_backend::(root, proof)?; + let mut result = HashMap::new(); + for key in keys.into_iter() { + let value = + read_child_proof_check_on_proving_backend(&proving_backend, storage_key, key.as_ref())?; + result.insert(key.as_ref().to_vec(), value); + } + Ok(result) } /// Check storage read proof on pre-created proving backend. pub fn read_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, - key: &[u8], + proving_backend: &TrieBackend, H>, + key: &[u8], ) -> Result>, Box> where - H: Hasher, - H::Out: Ord + Codec, + H: Hasher, + H::Out: Ord + Codec, { - proving_backend.storage(key).map_err(|e| Box::new(e) as Box) + proving_backend + .storage(key) + .map_err(|e| Box::new(e) as Box) } /// Check child storage read proof on pre-created proving backend. pub fn read_child_proof_check_on_proving_backend( - proving_backend: &TrieBackend, H>, - storage_key: &[u8], - key: &[u8], + proving_backend: &TrieBackend, H>, + storage_key: &[u8], + key: &[u8], ) -> Result>, Box> where - H: Hasher, - H::Out: Ord + Codec, + H: Hasher, + H::Out: Ord + Codec, { - // Not a prefixed memory db, using empty unique id and include root resolution. - proving_backend.child_storage(storage_key, ChildInfo::new_default(&[]), key) - .map_err(|e| Box::new(e) as Box) + // Not a prefixed memory db, using empty unique id and include root resolution. + proving_backend + .child_storage(storage_key, ChildInfo::new_default(&[]), key) + .map_err(|e| Box::new(e) as Box) } #[cfg(test)] mod tests { - use std::collections::BTreeMap; - use codec::Encode; - use overlayed_changes::OverlayedValue; - use super::*; - use super::ext::Ext; - use super::changes_trie::Configuration as ChangesTrieConfig; - use sp_core::{map, traits::{Externalities, RuntimeCode}, storage::ChildStorageKey}; - use sp_runtime::traits::BlakeTwo256; - - #[derive(Clone)] - struct DummyCodeExecutor { - change_changes_trie_config: bool, - native_available: bool, - native_succeeds: bool, - fallback_succeeds: bool, - } - - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - - impl CodeExecutor for DummyCodeExecutor { - type Error = u8; - - fn call< - R: Encode + Decode + PartialEq, - NC: FnOnce() -> result::Result, - >( - &self, - ext: &mut dyn Externalities, - _: &RuntimeCode, - _method: &str, - _data: &[u8], - use_native: bool, - _native_call: Option, - ) -> (CallResult, bool) { - if self.change_changes_trie_config { - ext.place_storage( - sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), - Some( - ChangesTrieConfig { - digest_interval: 777, - digest_levels: 333, - }.encode() - ) - ); - } - - let using_native = use_native && self.native_available; - match (using_native, self.native_succeeds, self.fallback_succeeds) { - (true, true, _) | (false, _, true) => { - ( - Ok( - NativeOrEncoded::Encoded( - vec![ - ext.storage(b"value1").unwrap()[0] + - ext.storage(b"value2").unwrap()[0] - ] - ) - ), - using_native - ) - }, - _ => (Err(0), using_native), - } - } - } - - impl sp_core::traits::CallInWasm for DummyCodeExecutor { - fn call_in_wasm( - &self, - _: &[u8], - _: Option>, - _: &str, - _: &[u8], - _: &mut dyn Externalities, - ) -> std::result::Result, String> { - unimplemented!("Not required in tests.") - } - } - - #[test] - fn execute_works() { - let backend = trie_backend::tests::test_trie(); - let mut overlayed_changes = Default::default(); - let wasm_code = RuntimeCode::empty(); - - let mut state_machine = StateMachine::new( - &backend, - changes_trie::disabled_state::<_, u64>(), - &mut overlayed_changes, - &DummyCodeExecutor { - change_changes_trie_config: false, - native_available: true, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - Default::default(), - &wasm_code, - sp_core::tasks::executor(), - ); - - assert_eq!( - state_machine.execute(ExecutionStrategy::NativeWhenPossible).unwrap(), - vec![66], - ); - } - - - #[test] - fn execute_works_with_native_else_wasm() { - let backend = trie_backend::tests::test_trie(); - let mut overlayed_changes = Default::default(); - let wasm_code = RuntimeCode::empty(); - - let mut state_machine = StateMachine::new( - &backend, - changes_trie::disabled_state::<_, u64>(), - &mut overlayed_changes, - &DummyCodeExecutor { - change_changes_trie_config: false, - native_available: true, - native_succeeds: true, - fallback_succeeds: true, - }, - "test", - &[], - Default::default(), - &wasm_code, - sp_core::tasks::executor(), - ); - - assert_eq!(state_machine.execute(ExecutionStrategy::NativeElseWasm).unwrap(), vec![66]); - } - - #[test] - fn dual_execution_strategy_detects_consensus_failure() { - let mut consensus_failed = false; - let backend = trie_backend::tests::test_trie(); - let mut overlayed_changes = Default::default(); - let wasm_code = RuntimeCode::empty(); - - let mut state_machine = StateMachine::new( - &backend, - changes_trie::disabled_state::<_, u64>(), - &mut overlayed_changes, - &DummyCodeExecutor { - change_changes_trie_config: false, - native_available: true, - native_succeeds: true, - fallback_succeeds: false, - }, - "test", - &[], - Default::default(), - &wasm_code, - sp_core::tasks::executor(), - ); - - assert!( - state_machine.execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( - ExecutionManager::Both(|we, _ne| { - consensus_failed = true; - we - }), - None, - ).is_err() - ); - assert!(consensus_failed); - } - - #[test] - fn prove_execution_and_proof_check_works() { - let executor = DummyCodeExecutor { - change_changes_trie_config: false, - native_available: true, - native_succeeds: true, - fallback_succeeds: true, - }; - - // fetch execution proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(std::iter::empty()).0; - let (remote_result, remote_proof) = prove_execution::<_, _, u64, _>( - remote_backend, - &mut Default::default(), - &executor, - sp_core::tasks::executor(), - "test", - &[], - &RuntimeCode::empty(), - ).unwrap(); - - // check proof locally - let local_result = execution_proof_check::( - remote_root, - remote_proof, - &mut Default::default(), - &executor, - sp_core::tasks::executor(), - "test", - &[], - &RuntimeCode::empty(), - ).unwrap(); - - // check that both results are correct - assert_eq!(remote_result, vec![66]); - assert_eq!(remote_result, local_result); - } - - #[test] - fn clear_prefix_in_ext_works() { - let initial: BTreeMap<_, _> = map![ - b"aaa".to_vec() => b"0".to_vec(), - b"abb".to_vec() => b"1".to_vec(), - b"abc".to_vec() => b"2".to_vec(), - b"bbb".to_vec() => b"3".to_vec() - ]; - let mut state = InMemoryBackend::::from(initial); - let backend = state.as_trie_backend().unwrap(); - let mut overlay = OverlayedChanges { - committed: map![ - b"aba".to_vec() => OverlayedValue::from(Some(b"1312".to_vec())), - b"bab".to_vec() => OverlayedValue::from(Some(b"228".to_vec())) - ], - prospective: map![ - b"abd".to_vec() => OverlayedValue::from(Some(b"69".to_vec())), - b"bbd".to_vec() => OverlayedValue::from(Some(b"42".to_vec())) - ], - ..Default::default() - }; - - { - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); - ext.clear_prefix(b"ab"); - } - overlay.commit_prospective(); - - assert_eq!( - overlay.committed, - map![ - b"abc".to_vec() => None.into(), - b"abb".to_vec() => None.into(), - b"aba".to_vec() => None.into(), - b"abd".to_vec() => None.into(), - - b"bab".to_vec() => Some(b"228".to_vec()).into(), - b"bbd".to_vec() => Some(b"42".to_vec()).into() - ], - ); - } - - #[test] - fn set_child_storage_works() { - let mut state = InMemoryBackend::::default(); - let backend = state.as_trie_backend().unwrap(); - let mut overlay = OverlayedChanges::default(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); - - ext.set_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, - b"abc".to_vec(), - b"def".to_vec() - ); - assert_eq!( - ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, - b"abc" - ), - Some(b"def".to_vec()) - ); - ext.kill_child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, - ); - assert_eq!( - ext.child_storage( - ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), - CHILD_INFO_1, - b"abc" - ), - None - ); - } - - #[test] - fn prove_read_and_proof_check_works() { - // fetch read proof from 'remote' full node - let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); - // check proof locally - let local_result1 = read_proof_check::( - remote_root, - remote_proof.clone(), - &[b"value2"], - ).unwrap(); - let local_result2 = read_proof_check::( - remote_root, - remote_proof.clone(), - &[&[0xff]], - ).is_ok(); - // check that results are correct - assert_eq!( - local_result1.into_iter().collect::>(), - vec![(b"value2".to_vec(), Some(vec![24]))], - ); - assert_eq!(local_result2, false); - // on child trie - let remote_backend = trie_backend::tests::test_trie(); - let remote_root = remote_backend.storage_root(::std::iter::empty()).0; - let remote_proof = prove_child_read( - remote_backend, - b":child_storage:default:sub1", - CHILD_INFO_1, - &[b"value3"], - ).unwrap(); - let local_result1 = read_child_proof_check::( - remote_root, - remote_proof.clone(), - b":child_storage:default:sub1", - &[b"value3"], - ).unwrap(); - let local_result2 = read_child_proof_check::( - remote_root, - remote_proof.clone(), - b":child_storage:default:sub1", - &[b"value2"], - ).unwrap(); - assert_eq!( - local_result1.into_iter().collect::>(), - vec![(b"value3".to_vec(), Some(vec![142]))], - ); - assert_eq!( - local_result2.into_iter().collect::>(), - vec![(b"value2".to_vec(), None)], - ); - } - - #[test] - fn child_storage_uuid() { - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); - use crate::trie_backend::tests::test_trie; - let mut overlay = OverlayedChanges::default(); - - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); - let mut transaction = { - let backend = test_trie(); - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - changes_trie::disabled_state::<_, u64>(), - None, - ); - ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); - ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); - ext.storage_root(); - cache.transaction.unwrap() - }; - let mut duplicate = false; - for (k, (value, rc)) in transaction.drain().iter() { - // look for a key inserted twice: transaction rc is 2 - if *rc == 2 { - duplicate = true; - println!("test duplicate for {:?} {:?}", k, value); - } - } - assert!(!duplicate); - } + use super::changes_trie::Configuration as ChangesTrieConfig; + use super::ext::Ext; + use super::*; + use codec::Encode; + use overlayed_changes::OverlayedValue; + use sp_core::{ + map, + storage::ChildStorageKey, + traits::{Externalities, RuntimeCode}, + }; + use sp_runtime::traits::BlakeTwo256; + use std::collections::BTreeMap; + + #[derive(Clone)] + struct DummyCodeExecutor { + change_changes_trie_config: bool, + native_available: bool, + native_succeeds: bool, + fallback_succeeds: bool, + } + + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + + impl CodeExecutor for DummyCodeExecutor { + type Error = u8; + + fn call result::Result>( + &self, + ext: &mut dyn Externalities, + _: &RuntimeCode, + _method: &str, + _data: &[u8], + use_native: bool, + _native_call: Option, + ) -> (CallResult, bool) { + if self.change_changes_trie_config { + ext.place_storage( + sp_core::storage::well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), + Some( + ChangesTrieConfig { + digest_interval: 777, + digest_levels: 333, + } + .encode(), + ), + ); + } + + let using_native = use_native && self.native_available; + match (using_native, self.native_succeeds, self.fallback_succeeds) { + (true, true, _) | (false, _, true) => ( + Ok(NativeOrEncoded::Encoded(vec![ + ext.storage(b"value1").unwrap()[0] + ext.storage(b"value2").unwrap()[0], + ])), + using_native, + ), + _ => (Err(0), using_native), + } + } + } + + impl sp_core::traits::CallInWasm for DummyCodeExecutor { + fn call_in_wasm( + &self, + _: &[u8], + _: Option>, + _: &str, + _: &[u8], + _: &mut dyn Externalities, + ) -> std::result::Result, String> { + unimplemented!("Not required in tests.") + } + } + + #[test] + fn execute_works() { + let backend = trie_backend::tests::test_trie(); + let mut overlayed_changes = Default::default(); + let wasm_code = RuntimeCode::empty(); + + let mut state_machine = StateMachine::new( + &backend, + changes_trie::disabled_state::<_, u64>(), + &mut overlayed_changes, + &DummyCodeExecutor { + change_changes_trie_config: false, + native_available: true, + native_succeeds: true, + fallback_succeeds: true, + }, + "test", + &[], + Default::default(), + &wasm_code, + sp_core::tasks::executor(), + ); + + assert_eq!( + state_machine + .execute(ExecutionStrategy::NativeWhenPossible) + .unwrap(), + vec![66], + ); + } + + #[test] + fn execute_works_with_native_else_wasm() { + let backend = trie_backend::tests::test_trie(); + let mut overlayed_changes = Default::default(); + let wasm_code = RuntimeCode::empty(); + + let mut state_machine = StateMachine::new( + &backend, + changes_trie::disabled_state::<_, u64>(), + &mut overlayed_changes, + &DummyCodeExecutor { + change_changes_trie_config: false, + native_available: true, + native_succeeds: true, + fallback_succeeds: true, + }, + "test", + &[], + Default::default(), + &wasm_code, + sp_core::tasks::executor(), + ); + + assert_eq!( + state_machine + .execute(ExecutionStrategy::NativeElseWasm) + .unwrap(), + vec![66] + ); + } + + #[test] + fn dual_execution_strategy_detects_consensus_failure() { + let mut consensus_failed = false; + let backend = trie_backend::tests::test_trie(); + let mut overlayed_changes = Default::default(); + let wasm_code = RuntimeCode::empty(); + + let mut state_machine = StateMachine::new( + &backend, + changes_trie::disabled_state::<_, u64>(), + &mut overlayed_changes, + &DummyCodeExecutor { + change_changes_trie_config: false, + native_available: true, + native_succeeds: true, + fallback_succeeds: false, + }, + "test", + &[], + Default::default(), + &wasm_code, + sp_core::tasks::executor(), + ); + + assert!(state_machine + .execute_using_consensus_failure_handler::<_, NeverNativeValue, fn() -> _>( + ExecutionManager::Both(|we, _ne| { + consensus_failed = true; + we + }), + None, + ) + .is_err()); + assert!(consensus_failed); + } + + #[test] + fn prove_execution_and_proof_check_works() { + let executor = DummyCodeExecutor { + change_changes_trie_config: false, + native_available: true, + native_succeeds: true, + fallback_succeeds: true, + }; + + // fetch execution proof from 'remote' full node + let remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(std::iter::empty()).0; + let (remote_result, remote_proof) = prove_execution::<_, _, u64, _>( + remote_backend, + &mut Default::default(), + &executor, + sp_core::tasks::executor(), + "test", + &[], + &RuntimeCode::empty(), + ) + .unwrap(); + + // check proof locally + let local_result = execution_proof_check::( + remote_root, + remote_proof, + &mut Default::default(), + &executor, + sp_core::tasks::executor(), + "test", + &[], + &RuntimeCode::empty(), + ) + .unwrap(); + + // check that both results are correct + assert_eq!(remote_result, vec![66]); + assert_eq!(remote_result, local_result); + } + + #[test] + fn clear_prefix_in_ext_works() { + let initial: BTreeMap<_, _> = map![ + b"aaa".to_vec() => b"0".to_vec(), + b"abb".to_vec() => b"1".to_vec(), + b"abc".to_vec() => b"2".to_vec(), + b"bbb".to_vec() => b"3".to_vec() + ]; + let mut state = InMemoryBackend::::from(initial); + let backend = state.as_trie_backend().unwrap(); + let mut overlay = OverlayedChanges { + committed: map![ + b"aba".to_vec() => OverlayedValue::from(Some(b"1312".to_vec())), + b"bab".to_vec() => OverlayedValue::from(Some(b"228".to_vec())) + ], + prospective: map![ + b"abd".to_vec() => OverlayedValue::from(Some(b"69".to_vec())), + b"bbd".to_vec() => OverlayedValue::from(Some(b"42".to_vec())) + ], + ..Default::default() + }; + + { + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + ext.clear_prefix(b"ab"); + } + overlay.commit_prospective(); + + assert_eq!( + overlay.committed, + map![ + b"abc".to_vec() => None.into(), + b"abb".to_vec() => None.into(), + b"aba".to_vec() => None.into(), + b"abd".to_vec() => None.into(), + + b"bab".to_vec() => Some(b"228".to_vec()).into(), + b"bbd".to_vec() => Some(b"42".to_vec()).into() + ], + ); + } + + #[test] + fn set_child_storage_works() { + let mut state = InMemoryBackend::::default(); + let backend = state.as_trie_backend().unwrap(); + let mut overlay = OverlayedChanges::default(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + + ext.set_child_storage( + ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + CHILD_INFO_1, + b"abc".to_vec(), + b"def".to_vec(), + ); + assert_eq!( + ext.child_storage( + ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + CHILD_INFO_1, + b"abc" + ), + Some(b"def".to_vec()) + ); + ext.kill_child_storage( + ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + CHILD_INFO_1, + ); + assert_eq!( + ext.child_storage( + ChildStorageKey::from_slice(b":child_storage:default:testchild").unwrap(), + CHILD_INFO_1, + b"abc" + ), + None + ); + } + + #[test] + fn prove_read_and_proof_check_works() { + // fetch read proof from 'remote' full node + let remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_proof = prove_read(remote_backend, &[b"value2"]).unwrap(); + // check proof locally + let local_result1 = + read_proof_check::(remote_root, remote_proof.clone(), &[b"value2"]) + .unwrap(); + let local_result2 = + read_proof_check::(remote_root, remote_proof.clone(), &[&[0xff]]) + .is_ok(); + // check that results are correct + assert_eq!( + local_result1.into_iter().collect::>(), + vec![(b"value2".to_vec(), Some(vec![24]))], + ); + assert_eq!(local_result2, false); + // on child trie + let remote_backend = trie_backend::tests::test_trie(); + let remote_root = remote_backend.storage_root(::std::iter::empty()).0; + let remote_proof = prove_child_read( + remote_backend, + b":child_storage:default:sub1", + CHILD_INFO_1, + &[b"value3"], + ) + .unwrap(); + let local_result1 = read_child_proof_check::( + remote_root, + remote_proof.clone(), + b":child_storage:default:sub1", + &[b"value3"], + ) + .unwrap(); + let local_result2 = read_child_proof_check::( + remote_root, + remote_proof.clone(), + b":child_storage:default:sub1", + &[b"value2"], + ) + .unwrap(); + assert_eq!( + local_result1.into_iter().collect::>(), + vec![(b"value3".to_vec(), Some(vec![142]))], + ); + assert_eq!( + local_result2.into_iter().collect::>(), + vec![(b"value2".to_vec(), None)], + ); + } + + #[test] + fn child_storage_uuid() { + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + use crate::trie_backend::tests::test_trie; + let mut overlay = OverlayedChanges::default(); + + let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub_test1").unwrap(); + let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub_test2").unwrap(); + let mut transaction = { + let backend = test_trie(); + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &backend, + changes_trie::disabled_state::<_, u64>(), + None, + ); + ext.set_child_storage(subtrie1, CHILD_INFO_1, b"abc".to_vec(), b"def".to_vec()); + ext.set_child_storage(subtrie2, CHILD_INFO_2, b"abc".to_vec(), b"def".to_vec()); + ext.storage_root(); + cache.transaction.unwrap() + }; + let mut duplicate = false; + for (k, (value, rc)) in transaction.drain().iter() { + // look for a key inserted twice: transaction rc is 2 + if *rc == 2 { + duplicate = true; + println!("test duplicate for {:?} {:?}", k, value); + } + } + assert!(!duplicate); + } } diff --git a/primitives/state-machine/src/overlayed_changes.rs b/primitives/state-machine/src/overlayed_changes.rs index ab50c61391..380e638213 100644 --- a/primitives/state-machine/src/overlayed_changes.rs +++ b/primitives/state-machine/src/overlayed_changes.rs @@ -17,19 +17,19 @@ //! The overlayed changes to state. use crate::{ - backend::Backend, ChangesTrieTransaction, - changes_trie::{ - NO_EXTRINSIC_INDEX, BlockNumber, build_changes_trie, - State as ChangesTrieState, - }, - stats::StateMachineStats, + backend::Backend, + changes_trie::{ + build_changes_trie, BlockNumber, State as ChangesTrieState, NO_EXTRINSIC_INDEX, + }, + stats::StateMachineStats, + ChangesTrieTransaction, }; +use codec::{Decode, Encode}; +use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, ChildInfo, OwnedChildInfo}; +use std::collections::{BTreeMap, BTreeSet, HashMap}; #[cfg(test)] use std::iter::FromIterator; -use std::collections::{HashMap, BTreeMap, BTreeSet}; -use codec::{Decode, Encode}; -use sp_core::storage::{well_known_keys::EXTRINSIC_INDEX, OwnedChildInfo, ChildInfo}; use std::{mem, ops}; use hash_db::Hasher; @@ -52,35 +52,35 @@ pub type ChildStorageCollection = Vec<(StorageKey, StorageCollection)>; /// that can be cleared. #[derive(Debug, Default, Clone)] pub struct OverlayedChanges { - /// Changes that are not yet committed. - pub(crate) prospective: OverlayedChangeSet, - /// Committed changes. - pub(crate) committed: OverlayedChangeSet, - /// True if extrinsics stats must be collected. - pub(crate) collect_extrinsics: bool, - /// Collect statistic on this execution. - pub(crate) stats: StateMachineStats, + /// Changes that are not yet committed. + pub(crate) prospective: OverlayedChangeSet, + /// Committed changes. + pub(crate) committed: OverlayedChangeSet, + /// True if extrinsics stats must be collected. + pub(crate) collect_extrinsics: bool, + /// Collect statistic on this execution. + pub(crate) stats: StateMachineStats, } /// The storage value, used inside OverlayedChanges. #[derive(Debug, Default, Clone)] #[cfg_attr(test, derive(PartialEq))] pub struct OverlayedValue { - /// Current value. None if value has been deleted. - pub value: Option, - /// The set of extrinsic indices where the values has been changed. - /// Is filled only if runtime has announced changes trie support. - pub extrinsics: Option>, + /// Current value. None if value has been deleted. + pub value: Option, + /// The set of extrinsic indices where the values has been changed. + /// Is filled only if runtime has announced changes trie support. + pub extrinsics: Option>, } /// Prospective or committed overlayed change set. #[derive(Debug, Default, Clone)] #[cfg_attr(test, derive(PartialEq))] pub struct OverlayedChangeSet { - /// Top level storage changes. - pub top: BTreeMap, - /// Child storage changes. - pub children: HashMap, OwnedChildInfo)>, + /// Top level storage changes. + pub top: BTreeMap, + /// Child storage changes. + pub children: HashMap, OwnedChildInfo)>, } /// A storage changes structure that can be generated by the data collected in [`OverlayedChanges`]. @@ -88,818 +88,996 @@ pub struct OverlayedChangeSet { /// This contains all the changes to the storage and transactions to apply theses changes to the /// backend. pub struct StorageChanges { - /// All changes to the main storage. - /// - /// A value of `None` means that it was deleted. - pub main_storage_changes: StorageCollection, - /// All changes to the child storages. - pub child_storage_changes: ChildStorageCollection, - /// A transaction for the backend that contains all changes from - /// [`main_storage_changes`](Self::main_storage_changes) and from - /// [`child_storage_changes`](Self::child_storage_changes). - pub transaction: Transaction, - /// The storage root after applying the transaction. - pub transaction_storage_root: H::Out, - /// Contains the transaction for the backend for the changes trie. - /// - /// If changes trie is disabled the value is set to `None`. - pub changes_trie_transaction: Option>, + /// All changes to the main storage. + /// + /// A value of `None` means that it was deleted. + pub main_storage_changes: StorageCollection, + /// All changes to the child storages. + pub child_storage_changes: ChildStorageCollection, + /// A transaction for the backend that contains all changes from + /// [`main_storage_changes`](Self::main_storage_changes) and from + /// [`child_storage_changes`](Self::child_storage_changes). + pub transaction: Transaction, + /// The storage root after applying the transaction. + pub transaction_storage_root: H::Out, + /// Contains the transaction for the backend for the changes trie. + /// + /// If changes trie is disabled the value is set to `None`. + pub changes_trie_transaction: Option>, } impl StorageChanges { - /// Deconstruct into the inner values - pub fn into_inner(self) -> ( - StorageCollection, - ChildStorageCollection, - Transaction, - H::Out, - Option>, - ) { - ( - self.main_storage_changes, - self.child_storage_changes, - self.transaction, - self.transaction_storage_root, - self.changes_trie_transaction, - ) - } + /// Deconstruct into the inner values + pub fn into_inner( + self, + ) -> ( + StorageCollection, + ChildStorageCollection, + Transaction, + H::Out, + Option>, + ) { + ( + self.main_storage_changes, + self.child_storage_changes, + self.transaction, + self.transaction_storage_root, + self.changes_trie_transaction, + ) + } } /// The storage transaction are calculated as part of the `storage_root` and /// `changes_trie_storage_root`. These transactions can be reused for importing the block into the /// storage. So, we cache them to not require a recomputation of those transactions. pub struct StorageTransactionCache { - /// Contains the changes for the main and the child storages as one transaction. - pub(crate) transaction: Option, - /// The storage root after applying the transaction. - pub(crate) transaction_storage_root: Option, - /// Contains the changes trie transaction. - pub(crate) changes_trie_transaction: Option>>, - /// The storage root after applying the changes trie transaction. - pub(crate) changes_trie_transaction_storage_root: Option>, + /// Contains the changes for the main and the child storages as one transaction. + pub(crate) transaction: Option, + /// The storage root after applying the transaction. + pub(crate) transaction_storage_root: Option, + /// Contains the changes trie transaction. + pub(crate) changes_trie_transaction: Option>>, + /// The storage root after applying the changes trie transaction. + pub(crate) changes_trie_transaction_storage_root: Option>, } impl StorageTransactionCache { - /// Reset the cached transactions. - pub fn reset(&mut self) { - *self = Self::default(); - } + /// Reset the cached transactions. + pub fn reset(&mut self) { + *self = Self::default(); + } } -impl Default for StorageTransactionCache { - fn default() -> Self { - Self { - transaction: None, - transaction_storage_root: None, - changes_trie_transaction: None, - changes_trie_transaction_storage_root: None, - } - } +impl Default + for StorageTransactionCache +{ + fn default() -> Self { + Self { + transaction: None, + transaction_storage_root: None, + changes_trie_transaction: None, + changes_trie_transaction_storage_root: None, + } + } } -impl Default for StorageChanges { - fn default() -> Self { - Self { - main_storage_changes: Default::default(), - child_storage_changes: Default::default(), - transaction: Default::default(), - transaction_storage_root: Default::default(), - changes_trie_transaction: None, - } - } +impl Default + for StorageChanges +{ + fn default() -> Self { + Self { + main_storage_changes: Default::default(), + child_storage_changes: Default::default(), + transaction: Default::default(), + transaction_storage_root: Default::default(), + changes_trie_transaction: None, + } + } } #[cfg(test)] impl FromIterator<(StorageKey, OverlayedValue)> for OverlayedChangeSet { - fn from_iter>(iter: T) -> Self { - Self { - top: iter.into_iter().collect(), - children: Default::default(), - } - } + fn from_iter>(iter: T) -> Self { + Self { + top: iter.into_iter().collect(), + children: Default::default(), + } + } } impl OverlayedChangeSet { - /// Whether the change set is empty. - pub fn is_empty(&self) -> bool { - self.top.is_empty() && self.children.is_empty() - } - - /// Clear the change set. - pub fn clear(&mut self) { - self.top.clear(); - self.children.clear(); - } + /// Whether the change set is empty. + pub fn is_empty(&self) -> bool { + self.top.is_empty() && self.children.is_empty() + } + + /// Clear the change set. + pub fn clear(&mut self) { + self.top.clear(); + self.children.clear(); + } } impl OverlayedChanges { - /// Whether the overlayed changes are empty. - pub fn is_empty(&self) -> bool { - self.prospective.is_empty() && self.committed.is_empty() - } - - /// Ask to collect/not to collect extrinsics indices where key(s) has been changed. - pub fn set_collect_extrinsics(&mut self, collect_extrinsics: bool) { - self.collect_extrinsics = collect_extrinsics; - } - - /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred - /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose - /// value has been set. - pub fn storage(&self, key: &[u8]) -> Option> { - self.prospective.top.get(key) - .or_else(|| self.committed.top.get(key)) - .map(|x| { - let size_read = x.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); - self.stats.tally_read_modified(size_read); - x.value.as_ref().map(AsRef::as_ref) - }) - } - - /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred - /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose - /// value has been set. - pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { - if let Some(map) = self.prospective.children.get(storage_key) { - if let Some(val) = map.0.get(key) { - let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); - self.stats.tally_read_modified(size_read); - return Some(val.value.as_ref().map(AsRef::as_ref)); - } - } - - if let Some(map) = self.committed.children.get(storage_key) { - if let Some(val) = map.0.get(key) { - let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); - self.stats.tally_read_modified(size_read); - return Some(val.value.as_ref().map(AsRef::as_ref)); - } - } - - None - } - - /// Inserts the given key-value pair into the prospective change set. - /// - /// `None` can be used to delete a value specified by the given key. - pub(crate) fn set_storage(&mut self, key: StorageKey, val: Option) { - let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); - self.stats.tally_write_overlay(size_write); - let extrinsic_index = self.extrinsic_index(); - let entry = self.prospective.top.entry(key).or_default(); - entry.value = val; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - - /// Inserts the given key-value pair into the prospective child change set. - /// - /// `None` can be used to delete a value specified by the given key. - pub(crate) fn set_child_storage( - &mut self, - storage_key: StorageKey, - child_info: ChildInfo, - key: StorageKey, - val: Option, - ) { - let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); - self.stats.tally_write_overlay(size_write); - let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); - debug_assert!(updatable); - - let entry = map_entry.0.entry(key).or_default(); - entry.value = val; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - - /// Clear child storage of given storage key. - /// - /// NOTE that this doesn't take place immediately but written into the prospective - /// change set, and still can be reverted by [`discard_prospective`]. - /// - /// [`discard_prospective`]: #method.discard_prospective - pub(crate) fn clear_child_storage( - &mut self, - storage_key: &[u8], - child_info: ChildInfo, - ) { - let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); - debug_assert!(updatable); - - map_entry.0.values_mut().for_each(|e| { - if let Some(extrinsic) = extrinsic_index { - e.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - - e.value = None; - }); - - if let Some((committed_map, _child_info)) = self.committed.children.get(storage_key) { - for (key, value) in committed_map.iter() { - if !map_entry.0.contains_key(key) { - map_entry.0.insert(key.clone(), OverlayedValue { - value: None, - extrinsics: extrinsic_index.map(|i| { - let mut e = value.extrinsics.clone() - .unwrap_or_else(|| BTreeSet::default()); - e.insert(i); - e - }), - }); - } - } - } - } - - /// Removes all key-value pairs which keys share the given prefix. - /// - /// NOTE that this doesn't take place immediately but written into the prospective - /// change set, and still can be reverted by [`discard_prospective`]. - /// - /// [`discard_prospective`]: #method.discard_prospective - pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) { - let extrinsic_index = self.extrinsic_index(); - - // Iterate over all prospective and mark all keys that share - // the given prefix as removed (None). - for (key, entry) in self.prospective.top.iter_mut() { - if key.starts_with(prefix) { - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } - - // Then do the same with keys from committed changes. - // NOTE that we are making changes in the prospective change set. - for key in self.committed.top.keys() { - if key.starts_with(prefix) { - let entry = self.prospective.top.entry(key.clone()).or_default(); - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } - } - - pub(crate) fn clear_child_prefix( - &mut self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - ) { - let extrinsic_index = self.extrinsic_index(); - let map_entry = self.prospective.children.entry(storage_key.to_vec()) - .or_insert_with(|| (Default::default(), child_info.to_owned())); - let updatable = map_entry.1.try_update(child_info); - debug_assert!(updatable); - - for (key, entry) in map_entry.0.iter_mut() { - if key.starts_with(prefix) { - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } - - if let Some((child_committed, _child_info)) = self.committed.children.get(storage_key) { - // Then do the same with keys from committed changes. - // NOTE that we are making changes in the prospective change set. - for key in child_committed.keys() { - if key.starts_with(prefix) { - let entry = map_entry.0.entry(key.clone()).or_default(); - entry.value = None; - - if let Some(extrinsic) = extrinsic_index { - entry.extrinsics.get_or_insert_with(Default::default) - .insert(extrinsic); - } - } - } - } - } - - /// Discard prospective changes to state. - pub fn discard_prospective(&mut self) { - self.prospective.clear(); - } - - /// Commit prospective changes to state. - pub fn commit_prospective(&mut self) { - if self.committed.is_empty() { - mem::swap(&mut self.prospective, &mut self.committed); - } else { - let top_to_commit = mem::replace(&mut self.prospective.top, BTreeMap::new()); - for (key, val) in top_to_commit.into_iter() { - let entry = self.committed.top.entry(key).or_default(); - entry.value = val.value; - - if let Some(prospective_extrinsics) = val.extrinsics { - entry.extrinsics.get_or_insert_with(Default::default) - .extend(prospective_extrinsics); - } - } - for (storage_key, (map, child_info)) in self.prospective.children.drain() { - let child_content = self.committed.children.entry(storage_key) - .or_insert_with(|| (Default::default(), child_info)); - // No update to child info at this point (will be needed for deletion). - for (key, val) in map.into_iter() { - let entry = child_content.0.entry(key).or_default(); - entry.value = val.value; - - if let Some(prospective_extrinsics) = val.extrinsics { - entry.extrinsics.get_or_insert_with(Default::default) - .extend(prospective_extrinsics); - } - } - } - } - } - - /// Consume `OverlayedChanges` and take committed set. - /// - /// Panics: - /// Will panic if there are any uncommitted prospective changes. - fn drain_committed(&mut self) -> ( - impl Iterator)>, - impl Iterator)>, OwnedChildInfo))>, - ) { - assert!(self.prospective.is_empty()); - ( - std::mem::replace(&mut self.committed.top, Default::default()) - .into_iter() - .map(|(k, v)| (k, v.value)), - std::mem::replace(&mut self.committed.children, Default::default()) - .into_iter() - .map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci))), - ) - } - - /// Convert this instance with all changes into a [`StorageChanges`] instance. - pub fn into_storage_changes< - B: Backend, H: Hasher, N: BlockNumber - >( - mut self, - backend: &B, - changes_trie_state: Option<&ChangesTrieState>, - parent_hash: H::Out, - mut cache: StorageTransactionCache, - ) -> Result, String> where H::Out: Ord + Encode + 'static { - self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) - } - - /// Drain all changes into a [`StorageChanges`] instance. Leave empty overlay in place. - pub fn drain_storage_changes, H: Hasher, N: BlockNumber>( - &mut self, - backend: &B, - changes_trie_state: Option<&ChangesTrieState>, - parent_hash: H::Out, - mut cache: &mut StorageTransactionCache, - ) -> Result, String> where H::Out: Ord + Encode + 'static { - // If the transaction does not exist, we generate it. - if cache.transaction.is_none() { - self.storage_root(backend, &mut cache); - } - - let (transaction, transaction_storage_root) = cache.transaction.take() - .and_then(|t| cache.transaction_storage_root.take().map(|tr| (t, tr))) - .expect("Transaction was be generated as part of `storage_root`; qed"); - - // If the transaction does not exist, we generate it. - if cache.changes_trie_transaction.is_none() { - self.changes_trie_root( - backend, - changes_trie_state, - parent_hash, - false, - &mut cache, - ).map_err(|_| "Failed to generate changes trie transaction")?; - } - - let changes_trie_transaction = cache.changes_trie_transaction - .take() - .expect("Changes trie transaction was generated by `changes_trie_root`; qed"); - - let (main_storage_changes, child_storage_changes) = self.drain_committed(); - - Ok(StorageChanges { - main_storage_changes: main_storage_changes.collect(), - child_storage_changes: child_storage_changes.map(|(sk, it)| (sk, it.0.collect())).collect(), - transaction, - transaction_storage_root, - changes_trie_transaction, - }) - } - - /// Inserts storage entry responsible for current extrinsic index. - #[cfg(test)] - pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) { - self.prospective.top.insert(EXTRINSIC_INDEX.to_vec(), OverlayedValue { - value: Some(extrinsic_index.encode()), - extrinsics: None, - }); - } - - /// Returns current extrinsic index to use in changes trie construction. - /// None is returned if it is not set or changes trie config is not set. - /// Persistent value (from the backend) can be ignored because runtime must - /// set this index before first and unset after last extrinsic is executed. - /// Changes that are made outside of extrinsics, are marked with - /// `NO_EXTRINSIC_INDEX` index. - fn extrinsic_index(&self) -> Option { - match self.collect_extrinsics { - true => Some( - self.storage(EXTRINSIC_INDEX) - .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) - .unwrap_or(NO_EXTRINSIC_INDEX)), - false => None, - } - } - - /// Generate the storage root using `backend` and all changes from `prospective` and `committed`. - /// - /// Returns the storage root and caches storage transaction in the given `cache`. - pub fn storage_root>( - &self, - backend: &B, - cache: &mut StorageTransactionCache, - ) -> H::Out - where H::Out: Ord + Encode, - { - let child_storage_keys = self.prospective.children.keys() - .chain(self.committed.children.keys()); - let child_delta_iter = child_storage_keys.map(|storage_key| - ( - storage_key.clone(), - self.committed.children.get(storage_key) - .into_iter() - .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) - .chain( - self.prospective.children.get(storage_key) - .into_iter() - .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) - ), - self.child_info(storage_key).cloned() - .expect("child info initialized in either committed or prospective"), - ) - ); - - // compute and memoize - let delta = self.committed.top.iter().map(|(k, v)| (k.clone(), v.value.clone())) - .chain(self.prospective.top.iter().map(|(k, v)| (k.clone(), v.value.clone()))); - - let (root, transaction) = backend.full_storage_root(delta, child_delta_iter); - - cache.transaction = Some(transaction); - cache.transaction_storage_root = Some(root); - - root - } - - /// Generate the changes trie root. - /// - /// Returns the changes trie root and caches the storage transaction into the given `cache`. - /// - /// # Panics - /// - /// Panics on storage error, when `panic_on_storage_error` is set. - pub fn changes_trie_root<'a, H: Hasher, N: BlockNumber, B: Backend>( - &self, - backend: &B, - changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, - parent_hash: H::Out, - panic_on_storage_error: bool, - cache: &mut StorageTransactionCache, - ) -> Result, ()> where H::Out: Ord + Encode + 'static { - build_changes_trie::<_, H, N>( - backend, - changes_trie_state, - self, - parent_hash, - panic_on_storage_error, - ).map(|r| { - let root = r.as_ref().map(|r| r.1).clone(); - cache.changes_trie_transaction = Some(r.map(|(db, _, cache)| (db, cache))); - cache.changes_trie_transaction_storage_root = Some(root); - root - }) - } - - /// Get child info for a storage key. - /// Take the latest value so prospective first. - pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> { - if let Some((_, ci)) = self.prospective.children.get(storage_key) { - return Some(&ci); - } - if let Some((_, ci)) = self.committed.children.get(storage_key) { - return Some(&ci); - } - None - } - - /// Returns the next (in lexicographic order) storage key in the overlayed alongside its value. - /// If no value is next then `None` is returned. - pub fn next_storage_key_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { - let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - - let next_prospective_key = self.prospective.top - .range::<[u8], _>(range) - .next() - .map(|(k, v)| (&k[..], v)); - - let next_committed_key = self.committed.top - .range::<[u8], _>(range) - .next() - .map(|(k, v)| (&k[..], v)); - - match (next_committed_key, next_prospective_key) { - // Committed is strictly less than prospective - (Some(committed_key), Some(prospective_key)) if committed_key.0 < prospective_key.0 => - Some(committed_key), - (committed_key, None) => committed_key, - // Prospective key is less or equal to committed or committed doesn't exist - (_, prospective_key) => prospective_key, - } - } - - /// Returns the next (in lexicographic order) child storage key in the overlayed alongside its - /// value. If no value is next then `None` is returned. - pub fn next_child_storage_key_change( - &self, - storage_key: &[u8], - key: &[u8] - ) -> Option<(&[u8], &OverlayedValue)> { - let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); - - let next_prospective_key = self.prospective.children.get(storage_key) - .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); - - let next_committed_key = self.committed.children.get(storage_key) - .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); - - match (next_committed_key, next_prospective_key) { - // Committed is strictly less than prospective - (Some(committed_key), Some(prospective_key)) if committed_key.0 < prospective_key.0 => - Some(committed_key), - (committed_key, None) => committed_key, - // Prospective key is less or equal to committed or committed doesn't exist - (_, prospective_key) => prospective_key, - } - } + /// Whether the overlayed changes are empty. + pub fn is_empty(&self) -> bool { + self.prospective.is_empty() && self.committed.is_empty() + } + + /// Ask to collect/not to collect extrinsics indices where key(s) has been changed. + pub fn set_collect_extrinsics(&mut self, collect_extrinsics: bool) { + self.collect_extrinsics = collect_extrinsics; + } + + /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred + /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose + /// value has been set. + pub fn storage(&self, key: &[u8]) -> Option> { + self.prospective + .top + .get(key) + .or_else(|| self.committed.top.get(key)) + .map(|x| { + let size_read = x.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_read_modified(size_read); + x.value.as_ref().map(AsRef::as_ref) + }) + } + + /// Returns a double-Option: None if the key is unknown (i.e. and the query should be referred + /// to the backend); Some(None) if the key has been deleted. Some(Some(...)) for a key whose + /// value has been set. + pub fn child_storage(&self, storage_key: &[u8], key: &[u8]) -> Option> { + if let Some(map) = self.prospective.children.get(storage_key) { + if let Some(val) = map.0.get(key) { + let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_read_modified(size_read); + return Some(val.value.as_ref().map(AsRef::as_ref)); + } + } + + if let Some(map) = self.committed.children.get(storage_key) { + if let Some(val) = map.0.get(key) { + let size_read = val.value.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_read_modified(size_read); + return Some(val.value.as_ref().map(AsRef::as_ref)); + } + } + + None + } + + /// Inserts the given key-value pair into the prospective change set. + /// + /// `None` can be used to delete a value specified by the given key. + pub(crate) fn set_storage(&mut self, key: StorageKey, val: Option) { + let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_write_overlay(size_write); + let extrinsic_index = self.extrinsic_index(); + let entry = self.prospective.top.entry(key).or_default(); + entry.value = val; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + + /// Inserts the given key-value pair into the prospective child change set. + /// + /// `None` can be used to delete a value specified by the given key. + pub(crate) fn set_child_storage( + &mut self, + storage_key: StorageKey, + child_info: ChildInfo, + key: StorageKey, + val: Option, + ) { + let size_write = val.as_ref().map(|x| x.len() as u64).unwrap_or(0); + self.stats.tally_write_overlay(size_write); + let extrinsic_index = self.extrinsic_index(); + let map_entry = self + .prospective + .children + .entry(storage_key) + .or_insert_with(|| (Default::default(), child_info.to_owned())); + let updatable = map_entry.1.try_update(child_info); + debug_assert!(updatable); + + let entry = map_entry.0.entry(key).or_default(); + entry.value = val; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + + /// Clear child storage of given storage key. + /// + /// NOTE that this doesn't take place immediately but written into the prospective + /// change set, and still can be reverted by [`discard_prospective`]. + /// + /// [`discard_prospective`]: #method.discard_prospective + pub(crate) fn clear_child_storage(&mut self, storage_key: &[u8], child_info: ChildInfo) { + let extrinsic_index = self.extrinsic_index(); + let map_entry = self + .prospective + .children + .entry(storage_key.to_vec()) + .or_insert_with(|| (Default::default(), child_info.to_owned())); + let updatable = map_entry.1.try_update(child_info); + debug_assert!(updatable); + + map_entry.0.values_mut().for_each(|e| { + if let Some(extrinsic) = extrinsic_index { + e.extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + + e.value = None; + }); + + if let Some((committed_map, _child_info)) = self.committed.children.get(storage_key) { + for (key, value) in committed_map.iter() { + if !map_entry.0.contains_key(key) { + map_entry.0.insert( + key.clone(), + OverlayedValue { + value: None, + extrinsics: extrinsic_index.map(|i| { + let mut e = value + .extrinsics + .clone() + .unwrap_or_else(|| BTreeSet::default()); + e.insert(i); + e + }), + }, + ); + } + } + } + } + + /// Removes all key-value pairs which keys share the given prefix. + /// + /// NOTE that this doesn't take place immediately but written into the prospective + /// change set, and still can be reverted by [`discard_prospective`]. + /// + /// [`discard_prospective`]: #method.discard_prospective + pub(crate) fn clear_prefix(&mut self, prefix: &[u8]) { + let extrinsic_index = self.extrinsic_index(); + + // Iterate over all prospective and mark all keys that share + // the given prefix as removed (None). + for (key, entry) in self.prospective.top.iter_mut() { + if key.starts_with(prefix) { + entry.value = None; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + } + + // Then do the same with keys from committed changes. + // NOTE that we are making changes in the prospective change set. + for key in self.committed.top.keys() { + if key.starts_with(prefix) { + let entry = self.prospective.top.entry(key.clone()).or_default(); + entry.value = None; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + } + } + + pub(crate) fn clear_child_prefix( + &mut self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + ) { + let extrinsic_index = self.extrinsic_index(); + let map_entry = self + .prospective + .children + .entry(storage_key.to_vec()) + .or_insert_with(|| (Default::default(), child_info.to_owned())); + let updatable = map_entry.1.try_update(child_info); + debug_assert!(updatable); + + for (key, entry) in map_entry.0.iter_mut() { + if key.starts_with(prefix) { + entry.value = None; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + } + + if let Some((child_committed, _child_info)) = self.committed.children.get(storage_key) { + // Then do the same with keys from committed changes. + // NOTE that we are making changes in the prospective change set. + for key in child_committed.keys() { + if key.starts_with(prefix) { + let entry = map_entry.0.entry(key.clone()).or_default(); + entry.value = None; + + if let Some(extrinsic) = extrinsic_index { + entry + .extrinsics + .get_or_insert_with(Default::default) + .insert(extrinsic); + } + } + } + } + } + + /// Discard prospective changes to state. + pub fn discard_prospective(&mut self) { + self.prospective.clear(); + } + + /// Commit prospective changes to state. + pub fn commit_prospective(&mut self) { + if self.committed.is_empty() { + mem::swap(&mut self.prospective, &mut self.committed); + } else { + let top_to_commit = mem::replace(&mut self.prospective.top, BTreeMap::new()); + for (key, val) in top_to_commit.into_iter() { + let entry = self.committed.top.entry(key).or_default(); + entry.value = val.value; + + if let Some(prospective_extrinsics) = val.extrinsics { + entry + .extrinsics + .get_or_insert_with(Default::default) + .extend(prospective_extrinsics); + } + } + for (storage_key, (map, child_info)) in self.prospective.children.drain() { + let child_content = self + .committed + .children + .entry(storage_key) + .or_insert_with(|| (Default::default(), child_info)); + // No update to child info at this point (will be needed for deletion). + for (key, val) in map.into_iter() { + let entry = child_content.0.entry(key).or_default(); + entry.value = val.value; + + if let Some(prospective_extrinsics) = val.extrinsics { + entry + .extrinsics + .get_or_insert_with(Default::default) + .extend(prospective_extrinsics); + } + } + } + } + } + + /// Consume `OverlayedChanges` and take committed set. + /// + /// Panics: + /// Will panic if there are any uncommitted prospective changes. + fn drain_committed( + &mut self, + ) -> ( + impl Iterator)>, + impl Iterator< + Item = ( + StorageKey, + ( + impl Iterator)>, + OwnedChildInfo, + ), + ), + >, + ) { + assert!(self.prospective.is_empty()); + ( + std::mem::replace(&mut self.committed.top, Default::default()) + .into_iter() + .map(|(k, v)| (k, v.value)), + std::mem::replace(&mut self.committed.children, Default::default()) + .into_iter() + .map(|(sk, (v, ci))| (sk, (v.into_iter().map(|(k, v)| (k, v.value)), ci))), + ) + } + + /// Convert this instance with all changes into a [`StorageChanges`] instance. + pub fn into_storage_changes, H: Hasher, N: BlockNumber>( + mut self, + backend: &B, + changes_trie_state: Option<&ChangesTrieState>, + parent_hash: H::Out, + mut cache: StorageTransactionCache, + ) -> Result, String> + where + H::Out: Ord + Encode + 'static, + { + self.drain_storage_changes(backend, changes_trie_state, parent_hash, &mut cache) + } + + /// Drain all changes into a [`StorageChanges`] instance. Leave empty overlay in place. + pub fn drain_storage_changes, H: Hasher, N: BlockNumber>( + &mut self, + backend: &B, + changes_trie_state: Option<&ChangesTrieState>, + parent_hash: H::Out, + mut cache: &mut StorageTransactionCache, + ) -> Result, String> + where + H::Out: Ord + Encode + 'static, + { + // If the transaction does not exist, we generate it. + if cache.transaction.is_none() { + self.storage_root(backend, &mut cache); + } + + let (transaction, transaction_storage_root) = cache + .transaction + .take() + .and_then(|t| cache.transaction_storage_root.take().map(|tr| (t, tr))) + .expect("Transaction was be generated as part of `storage_root`; qed"); + + // If the transaction does not exist, we generate it. + if cache.changes_trie_transaction.is_none() { + self.changes_trie_root(backend, changes_trie_state, parent_hash, false, &mut cache) + .map_err(|_| "Failed to generate changes trie transaction")?; + } + + let changes_trie_transaction = cache + .changes_trie_transaction + .take() + .expect("Changes trie transaction was generated by `changes_trie_root`; qed"); + + let (main_storage_changes, child_storage_changes) = self.drain_committed(); + + Ok(StorageChanges { + main_storage_changes: main_storage_changes.collect(), + child_storage_changes: child_storage_changes + .map(|(sk, it)| (sk, it.0.collect())) + .collect(), + transaction, + transaction_storage_root, + changes_trie_transaction, + }) + } + + /// Inserts storage entry responsible for current extrinsic index. + #[cfg(test)] + pub(crate) fn set_extrinsic_index(&mut self, extrinsic_index: u32) { + self.prospective.top.insert( + EXTRINSIC_INDEX.to_vec(), + OverlayedValue { + value: Some(extrinsic_index.encode()), + extrinsics: None, + }, + ); + } + + /// Returns current extrinsic index to use in changes trie construction. + /// None is returned if it is not set or changes trie config is not set. + /// Persistent value (from the backend) can be ignored because runtime must + /// set this index before first and unset after last extrinsic is executed. + /// Changes that are made outside of extrinsics, are marked with + /// `NO_EXTRINSIC_INDEX` index. + fn extrinsic_index(&self) -> Option { + match self.collect_extrinsics { + true => Some( + self.storage(EXTRINSIC_INDEX) + .and_then(|idx| idx.and_then(|idx| Decode::decode(&mut &*idx).ok())) + .unwrap_or(NO_EXTRINSIC_INDEX), + ), + false => None, + } + } + + /// Generate the storage root using `backend` and all changes from `prospective` and `committed`. + /// + /// Returns the storage root and caches storage transaction in the given `cache`. + pub fn storage_root>( + &self, + backend: &B, + cache: &mut StorageTransactionCache, + ) -> H::Out + where + H::Out: Ord + Encode, + { + let child_storage_keys = self + .prospective + .children + .keys() + .chain(self.committed.children.keys()); + let child_delta_iter = child_storage_keys.map(|storage_key| { + ( + storage_key.clone(), + self.committed + .children + .get(storage_key) + .into_iter() + .flat_map(|(map, _)| map.iter().map(|(k, v)| (k.clone(), v.value.clone()))) + .chain( + self.prospective + .children + .get(storage_key) + .into_iter() + .flat_map(|(map, _)| { + map.iter().map(|(k, v)| (k.clone(), v.value.clone())) + }), + ), + self.child_info(storage_key) + .cloned() + .expect("child info initialized in either committed or prospective"), + ) + }); + + // compute and memoize + let delta = self + .committed + .top + .iter() + .map(|(k, v)| (k.clone(), v.value.clone())) + .chain( + self.prospective + .top + .iter() + .map(|(k, v)| (k.clone(), v.value.clone())), + ); + + let (root, transaction) = backend.full_storage_root(delta, child_delta_iter); + + cache.transaction = Some(transaction); + cache.transaction_storage_root = Some(root); + + root + } + + /// Generate the changes trie root. + /// + /// Returns the changes trie root and caches the storage transaction into the given `cache`. + /// + /// # Panics + /// + /// Panics on storage error, when `panic_on_storage_error` is set. + pub fn changes_trie_root<'a, H: Hasher, N: BlockNumber, B: Backend>( + &self, + backend: &B, + changes_trie_state: Option<&'a ChangesTrieState<'a, H, N>>, + parent_hash: H::Out, + panic_on_storage_error: bool, + cache: &mut StorageTransactionCache, + ) -> Result, ()> + where + H::Out: Ord + Encode + 'static, + { + build_changes_trie::<_, H, N>( + backend, + changes_trie_state, + self, + parent_hash, + panic_on_storage_error, + ) + .map(|r| { + let root = r.as_ref().map(|r| r.1).clone(); + cache.changes_trie_transaction = Some(r.map(|(db, _, cache)| (db, cache))); + cache.changes_trie_transaction_storage_root = Some(root); + root + }) + } + + /// Get child info for a storage key. + /// Take the latest value so prospective first. + pub fn child_info(&self, storage_key: &[u8]) -> Option<&OwnedChildInfo> { + if let Some((_, ci)) = self.prospective.children.get(storage_key) { + return Some(&ci); + } + if let Some((_, ci)) = self.committed.children.get(storage_key) { + return Some(&ci); + } + None + } + + /// Returns the next (in lexicographic order) storage key in the overlayed alongside its value. + /// If no value is next then `None` is returned. + pub fn next_storage_key_change(&self, key: &[u8]) -> Option<(&[u8], &OverlayedValue)> { + let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); + + let next_prospective_key = self + .prospective + .top + .range::<[u8], _>(range) + .next() + .map(|(k, v)| (&k[..], v)); + + let next_committed_key = self + .committed + .top + .range::<[u8], _>(range) + .next() + .map(|(k, v)| (&k[..], v)); + + match (next_committed_key, next_prospective_key) { + // Committed is strictly less than prospective + (Some(committed_key), Some(prospective_key)) if committed_key.0 < prospective_key.0 => { + Some(committed_key) + } + (committed_key, None) => committed_key, + // Prospective key is less or equal to committed or committed doesn't exist + (_, prospective_key) => prospective_key, + } + } + + /// Returns the next (in lexicographic order) child storage key in the overlayed alongside its + /// value. If no value is next then `None` is returned. + pub fn next_child_storage_key_change( + &self, + storage_key: &[u8], + key: &[u8], + ) -> Option<(&[u8], &OverlayedValue)> { + let range = (ops::Bound::Excluded(key), ops::Bound::Unbounded); + + let next_prospective_key = self + .prospective + .children + .get(storage_key) + .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); + + let next_committed_key = self + .committed + .children + .get(storage_key) + .and_then(|(map, _)| map.range::<[u8], _>(range).next().map(|(k, v)| (&k[..], v))); + + match (next_committed_key, next_prospective_key) { + // Committed is strictly less than prospective + (Some(committed_key), Some(prospective_key)) if committed_key.0 < prospective_key.0 => { + Some(committed_key) + } + (committed_key, None) => committed_key, + // Prospective key is less or equal to committed or committed doesn't exist + (_, prospective_key) => prospective_key, + } + } } #[cfg(test)] impl From> for OverlayedValue { - fn from(value: Option) -> OverlayedValue { - OverlayedValue { value, ..Default::default() } - } + fn from(value: Option) -> OverlayedValue { + OverlayedValue { + value, + ..Default::default() + } + } } #[cfg(test)] mod tests { - use hex_literal::hex; - use sp_core::{ - Blake2Hasher, traits::Externalities, storage::well_known_keys::EXTRINSIC_INDEX, - }; - use crate::InMemoryBackend; - use crate::ext::Ext; - use super::*; - - fn strip_extrinsic_index(map: &BTreeMap) - -> BTreeMap - { - let mut clone = map.clone(); - clone.remove(&EXTRINSIC_INDEX.to_vec()); - clone - } - - #[test] - fn overlayed_storage_works() { - let mut overlayed = OverlayedChanges::default(); - - let key = vec![42, 69, 169, 142]; - - assert!(overlayed.storage(&key).is_none()); - - overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])); - assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - - overlayed.commit_prospective(); - assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - - overlayed.set_storage(key.clone(), Some(vec![])); - assert_eq!(overlayed.storage(&key).unwrap(), Some(&[][..])); - - overlayed.set_storage(key.clone(), None); - assert!(overlayed.storage(&key).unwrap().is_none()); - - overlayed.discard_prospective(); - assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); - - overlayed.set_storage(key.clone(), None); - overlayed.commit_prospective(); - assert!(overlayed.storage(&key).unwrap().is_none()); - } - - #[test] - fn overlayed_storage_root_works() { - let initial: BTreeMap<_, _> = vec![ - (b"doe".to_vec(), b"reindeer".to_vec()), - (b"dog".to_vec(), b"puppyXXX".to_vec()), - (b"dogglesworth".to_vec(), b"catXXX".to_vec()), - (b"doug".to_vec(), b"notadog".to_vec()), - ].into_iter().collect(); - let backend = InMemoryBackend::::from(initial); - let mut overlay = OverlayedChanges { - committed: vec![ - (b"dog".to_vec(), Some(b"puppy".to_vec()).into()), - (b"dogglesworth".to_vec(), Some(b"catYYY".to_vec()).into()), - (b"doug".to_vec(), Some(vec![]).into()), - ].into_iter().collect(), - prospective: vec![ - (b"dogglesworth".to_vec(), Some(b"cat".to_vec()).into()), - (b"doug".to_vec(), None.into()), - ].into_iter().collect(), - ..Default::default() - }; - - let mut cache = StorageTransactionCache::default(); - let mut ext = Ext::new( - &mut overlay, - &mut cache, - &backend, - crate::changes_trie::disabled_state::<_, u64>(), - None, - ); - const ROOT: [u8; 32] = hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); - - assert_eq!(&ext.storage_root()[..], &ROOT); - } - - #[test] - fn extrinsic_changes_are_collected() { - let mut overlay = OverlayedChanges::default(); - overlay.set_collect_extrinsics(true); - - overlay.set_storage(vec![100], Some(vec![101])); - - overlay.set_extrinsic_index(0); - overlay.set_storage(vec![1], Some(vec![2])); - - overlay.set_extrinsic_index(1); - overlay.set_storage(vec![3], Some(vec![4])); - - overlay.set_extrinsic_index(2); - overlay.set_storage(vec![1], Some(vec![6])); - - assert_eq!(strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![6]), - extrinsics: Some(vec![0, 2].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![4]), - extrinsics: Some(vec![1].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), - extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); - - overlay.commit_prospective(); - - overlay.set_extrinsic_index(3); - overlay.set_storage(vec![3], Some(vec![7])); - - overlay.set_extrinsic_index(4); - overlay.set_storage(vec![1], Some(vec![8])); - - assert_eq!(strip_extrinsic_index(&overlay.committed.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![6]), - extrinsics: Some(vec![0, 2].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![4]), - extrinsics: Some(vec![1].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), - extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); - - assert_eq!(strip_extrinsic_index(&overlay.prospective.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![8]), - extrinsics: Some(vec![4].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![7]), - extrinsics: Some(vec![3].into_iter().collect()) }), - ].into_iter().collect()); - - overlay.commit_prospective(); - - assert_eq!(strip_extrinsic_index(&overlay.committed.top), - vec![ - (vec![1], OverlayedValue { value: Some(vec![8]), - extrinsics: Some(vec![0, 2, 4].into_iter().collect()) }), - (vec![3], OverlayedValue { value: Some(vec![7]), - extrinsics: Some(vec![1, 3].into_iter().collect()) }), - (vec![100], OverlayedValue { value: Some(vec![101]), - extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) }), - ].into_iter().collect()); - - assert_eq!(overlay.prospective, - Default::default()); - } - - #[test] - fn next_storage_key_change_works() { - let mut overlay = OverlayedChanges::default(); - overlay.set_storage(vec![20], Some(vec![20])); - overlay.set_storage(vec![30], Some(vec![30])); - overlay.set_storage(vec![40], Some(vec![40])); - overlay.commit_prospective(); - overlay.set_storage(vec![10], Some(vec![10])); - overlay.set_storage(vec![30], None); - - // next_prospective < next_committed - let next_to_5 = overlay.next_storage_key_change(&[5]).unwrap(); - assert_eq!(next_to_5.0.to_vec(), vec![10]); - assert_eq!(next_to_5.1.value, Some(vec![10])); - - // next_committed < next_prospective - let next_to_10 = overlay.next_storage_key_change(&[10]).unwrap(); - assert_eq!(next_to_10.0.to_vec(), vec![20]); - assert_eq!(next_to_10.1.value, Some(vec![20])); - - // next_committed == next_prospective - let next_to_20 = overlay.next_storage_key_change(&[20]).unwrap(); - assert_eq!(next_to_20.0.to_vec(), vec![30]); - assert_eq!(next_to_20.1.value, None); - - // next_committed, no next_prospective - let next_to_30 = overlay.next_storage_key_change(&[30]).unwrap(); - assert_eq!(next_to_30.0.to_vec(), vec![40]); - assert_eq!(next_to_30.1.value, Some(vec![40])); - - overlay.set_storage(vec![50], Some(vec![50])); - // next_prospective, no next_committed - let next_to_40 = overlay.next_storage_key_change(&[40]).unwrap(); - assert_eq!(next_to_40.0.to_vec(), vec![50]); - assert_eq!(next_to_40.1.value, Some(vec![50])); - } - - #[test] - fn next_child_storage_key_change_works() { - let child = b"Child1".to_vec(); - let child_info = ChildInfo::new_default(b"uniqueid"); - let mut overlay = OverlayedChanges::default(); - overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); - overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); - overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40])); - overlay.commit_prospective(); - overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10])); - overlay.set_child_storage(child.clone(), child_info, vec![30], None); - - // next_prospective < next_committed - let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap(); - assert_eq!(next_to_5.0.to_vec(), vec![10]); - assert_eq!(next_to_5.1.value, Some(vec![10])); - - // next_committed < next_prospective - let next_to_10 = overlay.next_child_storage_key_change(&child, &[10]).unwrap(); - assert_eq!(next_to_10.0.to_vec(), vec![20]); - assert_eq!(next_to_10.1.value, Some(vec![20])); - - // next_committed == next_prospective - let next_to_20 = overlay.next_child_storage_key_change(&child, &[20]).unwrap(); - assert_eq!(next_to_20.0.to_vec(), vec![30]); - assert_eq!(next_to_20.1.value, None); - - // next_committed, no next_prospective - let next_to_30 = overlay.next_child_storage_key_change(&child, &[30]).unwrap(); - assert_eq!(next_to_30.0.to_vec(), vec![40]); - assert_eq!(next_to_30.1.value, Some(vec![40])); - - overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50])); - // next_prospective, no next_committed - let next_to_40 = overlay.next_child_storage_key_change(&child, &[40]).unwrap(); - assert_eq!(next_to_40.0.to_vec(), vec![50]); - assert_eq!(next_to_40.1.value, Some(vec![50])); - } + use super::*; + use crate::ext::Ext; + use crate::InMemoryBackend; + use hex_literal::hex; + use sp_core::{storage::well_known_keys::EXTRINSIC_INDEX, traits::Externalities, Blake2Hasher}; + + fn strip_extrinsic_index( + map: &BTreeMap, + ) -> BTreeMap { + let mut clone = map.clone(); + clone.remove(&EXTRINSIC_INDEX.to_vec()); + clone + } + + #[test] + fn overlayed_storage_works() { + let mut overlayed = OverlayedChanges::default(); + + let key = vec![42, 69, 169, 142]; + + assert!(overlayed.storage(&key).is_none()); + + overlayed.set_storage(key.clone(), Some(vec![1, 2, 3])); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); + + overlayed.commit_prospective(); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); + + overlayed.set_storage(key.clone(), Some(vec![])); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[][..])); + + overlayed.set_storage(key.clone(), None); + assert!(overlayed.storage(&key).unwrap().is_none()); + + overlayed.discard_prospective(); + assert_eq!(overlayed.storage(&key).unwrap(), Some(&[1, 2, 3][..])); + + overlayed.set_storage(key.clone(), None); + overlayed.commit_prospective(); + assert!(overlayed.storage(&key).unwrap().is_none()); + } + + #[test] + fn overlayed_storage_root_works() { + let initial: BTreeMap<_, _> = vec![ + (b"doe".to_vec(), b"reindeer".to_vec()), + (b"dog".to_vec(), b"puppyXXX".to_vec()), + (b"dogglesworth".to_vec(), b"catXXX".to_vec()), + (b"doug".to_vec(), b"notadog".to_vec()), + ] + .into_iter() + .collect(); + let backend = InMemoryBackend::::from(initial); + let mut overlay = OverlayedChanges { + committed: vec![ + (b"dog".to_vec(), Some(b"puppy".to_vec()).into()), + (b"dogglesworth".to_vec(), Some(b"catYYY".to_vec()).into()), + (b"doug".to_vec(), Some(vec![]).into()), + ] + .into_iter() + .collect(), + prospective: vec![ + (b"dogglesworth".to_vec(), Some(b"cat".to_vec()).into()), + (b"doug".to_vec(), None.into()), + ] + .into_iter() + .collect(), + ..Default::default() + }; + + let mut cache = StorageTransactionCache::default(); + let mut ext = Ext::new( + &mut overlay, + &mut cache, + &backend, + crate::changes_trie::disabled_state::<_, u64>(), + None, + ); + const ROOT: [u8; 32] = + hex!("39245109cef3758c2eed2ccba8d9b370a917850af3824bc8348d505df2c298fa"); + + assert_eq!(&ext.storage_root()[..], &ROOT); + } + + #[test] + fn extrinsic_changes_are_collected() { + let mut overlay = OverlayedChanges::default(); + overlay.set_collect_extrinsics(true); + + overlay.set_storage(vec![100], Some(vec![101])); + + overlay.set_extrinsic_index(0); + overlay.set_storage(vec![1], Some(vec![2])); + + overlay.set_extrinsic_index(1); + overlay.set_storage(vec![3], Some(vec![4])); + + overlay.set_extrinsic_index(2); + overlay.set_storage(vec![1], Some(vec![6])); + + assert_eq!( + strip_extrinsic_index(&overlay.prospective.top), + vec![ + ( + vec![1], + OverlayedValue { + value: Some(vec![6]), + extrinsics: Some(vec![0, 2].into_iter().collect()) + } + ), + ( + vec![3], + OverlayedValue { + value: Some(vec![4]), + extrinsics: Some(vec![1].into_iter().collect()) + } + ), + ( + vec![100], + OverlayedValue { + value: Some(vec![101]), + extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) + } + ), + ] + .into_iter() + .collect() + ); + + overlay.commit_prospective(); + + overlay.set_extrinsic_index(3); + overlay.set_storage(vec![3], Some(vec![7])); + + overlay.set_extrinsic_index(4); + overlay.set_storage(vec![1], Some(vec![8])); + + assert_eq!( + strip_extrinsic_index(&overlay.committed.top), + vec![ + ( + vec![1], + OverlayedValue { + value: Some(vec![6]), + extrinsics: Some(vec![0, 2].into_iter().collect()) + } + ), + ( + vec![3], + OverlayedValue { + value: Some(vec![4]), + extrinsics: Some(vec![1].into_iter().collect()) + } + ), + ( + vec![100], + OverlayedValue { + value: Some(vec![101]), + extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) + } + ), + ] + .into_iter() + .collect() + ); + + assert_eq!( + strip_extrinsic_index(&overlay.prospective.top), + vec![ + ( + vec![1], + OverlayedValue { + value: Some(vec![8]), + extrinsics: Some(vec![4].into_iter().collect()) + } + ), + ( + vec![3], + OverlayedValue { + value: Some(vec![7]), + extrinsics: Some(vec![3].into_iter().collect()) + } + ), + ] + .into_iter() + .collect() + ); + + overlay.commit_prospective(); + + assert_eq!( + strip_extrinsic_index(&overlay.committed.top), + vec![ + ( + vec![1], + OverlayedValue { + value: Some(vec![8]), + extrinsics: Some(vec![0, 2, 4].into_iter().collect()) + } + ), + ( + vec![3], + OverlayedValue { + value: Some(vec![7]), + extrinsics: Some(vec![1, 3].into_iter().collect()) + } + ), + ( + vec![100], + OverlayedValue { + value: Some(vec![101]), + extrinsics: Some(vec![NO_EXTRINSIC_INDEX].into_iter().collect()) + } + ), + ] + .into_iter() + .collect() + ); + + assert_eq!(overlay.prospective, Default::default()); + } + + #[test] + fn next_storage_key_change_works() { + let mut overlay = OverlayedChanges::default(); + overlay.set_storage(vec![20], Some(vec![20])); + overlay.set_storage(vec![30], Some(vec![30])); + overlay.set_storage(vec![40], Some(vec![40])); + overlay.commit_prospective(); + overlay.set_storage(vec![10], Some(vec![10])); + overlay.set_storage(vec![30], None); + + // next_prospective < next_committed + let next_to_5 = overlay.next_storage_key_change(&[5]).unwrap(); + assert_eq!(next_to_5.0.to_vec(), vec![10]); + assert_eq!(next_to_5.1.value, Some(vec![10])); + + // next_committed < next_prospective + let next_to_10 = overlay.next_storage_key_change(&[10]).unwrap(); + assert_eq!(next_to_10.0.to_vec(), vec![20]); + assert_eq!(next_to_10.1.value, Some(vec![20])); + + // next_committed == next_prospective + let next_to_20 = overlay.next_storage_key_change(&[20]).unwrap(); + assert_eq!(next_to_20.0.to_vec(), vec![30]); + assert_eq!(next_to_20.1.value, None); + + // next_committed, no next_prospective + let next_to_30 = overlay.next_storage_key_change(&[30]).unwrap(); + assert_eq!(next_to_30.0.to_vec(), vec![40]); + assert_eq!(next_to_30.1.value, Some(vec![40])); + + overlay.set_storage(vec![50], Some(vec![50])); + // next_prospective, no next_committed + let next_to_40 = overlay.next_storage_key_change(&[40]).unwrap(); + assert_eq!(next_to_40.0.to_vec(), vec![50]); + assert_eq!(next_to_40.1.value, Some(vec![50])); + } + + #[test] + fn next_child_storage_key_change_works() { + let child = b"Child1".to_vec(); + let child_info = ChildInfo::new_default(b"uniqueid"); + let mut overlay = OverlayedChanges::default(); + overlay.set_child_storage(child.clone(), child_info, vec![20], Some(vec![20])); + overlay.set_child_storage(child.clone(), child_info, vec![30], Some(vec![30])); + overlay.set_child_storage(child.clone(), child_info, vec![40], Some(vec![40])); + overlay.commit_prospective(); + overlay.set_child_storage(child.clone(), child_info, vec![10], Some(vec![10])); + overlay.set_child_storage(child.clone(), child_info, vec![30], None); + + // next_prospective < next_committed + let next_to_5 = overlay.next_child_storage_key_change(&child, &[5]).unwrap(); + assert_eq!(next_to_5.0.to_vec(), vec![10]); + assert_eq!(next_to_5.1.value, Some(vec![10])); + + // next_committed < next_prospective + let next_to_10 = overlay + .next_child_storage_key_change(&child, &[10]) + .unwrap(); + assert_eq!(next_to_10.0.to_vec(), vec![20]); + assert_eq!(next_to_10.1.value, Some(vec![20])); + + // next_committed == next_prospective + let next_to_20 = overlay + .next_child_storage_key_change(&child, &[20]) + .unwrap(); + assert_eq!(next_to_20.0.to_vec(), vec![30]); + assert_eq!(next_to_20.1.value, None); + + // next_committed, no next_prospective + let next_to_30 = overlay + .next_child_storage_key_change(&child, &[30]) + .unwrap(); + assert_eq!(next_to_30.0.to_vec(), vec![40]); + assert_eq!(next_to_30.1.value, Some(vec![40])); + + overlay.set_child_storage(child.clone(), child_info, vec![50], Some(vec![50])); + // next_prospective, no next_committed + let next_to_40 = overlay + .next_child_storage_key_change(&child, &[40]) + .unwrap(); + assert_eq!(next_to_40.0.to_vec(), vec![50]); + assert_eq!(next_to_40.1.value, Some(vec![50])); + } } diff --git a/primitives/state-machine/src/proving_backend.rs b/primitives/state-machine/src/proving_backend.rs index 747872af83..7ce47d43f3 100644 --- a/primitives/state-machine/src/proving_backend.rs +++ b/primitives/state-machine/src/proving_backend.rs @@ -16,100 +16,94 @@ //! Proving state machine backend. -use std::sync::Arc; -use parking_lot::RwLock; -use codec::{Decode, Codec}; -use log::debug; -use hash_db::{Hasher, HashDB, EMPTY_PREFIX, Prefix}; -use sp_trie::{ - MemoryDB, default_child_trie_root, read_trie_value_with, read_child_trie_value_with, - record_all_keys, StorageProof, -}; -pub use sp_trie::Recorder; -pub use sp_trie::trie_types::{Layout, TrieError}; use crate::trie_backend::TrieBackend; use crate::trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}; -use crate::{Error, ExecutionError, Backend}; -use std::collections::HashMap; use crate::DBValue; +use crate::{Backend, Error, ExecutionError}; +use codec::{Codec, Decode}; +use hash_db::{HashDB, Hasher, Prefix, EMPTY_PREFIX}; +use log::debug; +use parking_lot::RwLock; use sp_core::storage::ChildInfo; +pub use sp_trie::trie_types::{Layout, TrieError}; +pub use sp_trie::Recorder; +use sp_trie::{ + default_child_trie_root, read_child_trie_value_with, read_trie_value_with, record_all_keys, + MemoryDB, StorageProof, +}; +use std::collections::HashMap; +use std::sync::Arc; /// Patricia trie-based backend specialized in get value proofs. pub struct ProvingBackendRecorder<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - pub(crate) backend: &'a TrieBackendEssence, - pub(crate) proof_recorder: &'a mut Recorder, + pub(crate) backend: &'a TrieBackendEssence, + pub(crate) proof_recorder: &'a mut Recorder, } impl<'a, S, H> ProvingBackendRecorder<'a, S, H> - where - S: TrieBackendStorage, - H: Hasher, - H::Out: Codec, +where + S: TrieBackendStorage, + H: Hasher, + H::Out: Codec, { - /// Produce proof for a key query. - pub fn storage(&mut self, key: &[u8]) -> Result>, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_trie_value_with::, _, Ephemeral>( - &eph, - self.backend.root(), - key, - &mut *self.proof_recorder, - ).map_err(map_e) - } - - /// Produce proof for a child key query. - pub fn child_storage( - &mut self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8] - ) -> Result>, String> { - let root = self.storage(storage_key)? - .and_then(|r| Decode::decode(&mut &r[..]).ok()) - .unwrap_or(default_child_trie_root::>(storage_key)); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value_with::, _, _>( - storage_key, - child_info.keyspace(), - &eph, - &root.as_ref(), - key, - &mut *self.proof_recorder - ).map_err(map_e) - } - - /// Produce proof for the whole backend. - pub fn record_all_keys(&mut self) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new( - self.backend.backend_storage(), - &mut read_overlay, - ); - - let mut iter = move || -> Result<(), Box>> { - let root = self.backend.root(); - record_all_keys::, _>(&eph, root, &mut *self.proof_recorder) - }; - - if let Err(e) = iter() { - debug!(target: "trie", "Error while recording all keys: {}", e); - } - } + /// Produce proof for a key query. + pub fn storage(&mut self, key: &[u8]) -> Result>, String> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_trie_value_with::, _, Ephemeral>( + &eph, + self.backend.root(), + key, + &mut *self.proof_recorder, + ) + .map_err(map_e) + } + + /// Produce proof for a child key query. + pub fn child_storage( + &mut self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, String> { + let root = self + .storage(storage_key)? + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .unwrap_or(default_child_trie_root::>(storage_key)); + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_child_trie_value_with::, _, _>( + storage_key, + child_info.keyspace(), + &eph, + &root.as_ref(), + key, + &mut *self.proof_recorder, + ) + .map_err(map_e) + } + + /// Produce proof for the whole backend. + pub fn record_all_keys(&mut self) { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.backend.backend_storage(), &mut read_overlay); + + let mut iter = move || -> Result<(), Box>> { + let root = self.backend.root(); + record_all_keys::, _>(&eph, root, &mut *self.proof_recorder) + }; + + if let Err(e) = iter() { + debug!(target: "trie", "Error while recording all keys: {}", e); + } + } } /// Global proof recorder, act as a layer over a hash db for recording queried @@ -118,341 +112,365 @@ pub type ProofRecorder = Arc::Out, Option, H: 'a + Hasher> ( - TrieBackend, H>, +pub struct ProvingBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher>( + TrieBackend, H>, ); /// Trie backend storage with its proof recorder. pub struct ProofRecorderBackend<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - backend: &'a S, - proof_recorder: ProofRecorder, + backend: &'a S, + proof_recorder: ProofRecorder, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> ProvingBackend<'a, S, H> - where H::Out: Codec +where + H::Out: Codec, { - /// Create new proving backend. - pub fn new(backend: &'a TrieBackend) -> Self { - let proof_recorder = Default::default(); - Self::new_with_recorder(backend, proof_recorder) - } - - /// Create new proving backend with the given recorder. - pub fn new_with_recorder( - backend: &'a TrieBackend, - proof_recorder: ProofRecorder, - ) -> Self { - let essence = backend.essence(); - let root = essence.root().clone(); - let recorder = ProofRecorderBackend { - backend: essence.backend_storage(), - proof_recorder, - }; - ProvingBackend(TrieBackend::new(recorder, root)) - } - - /// Extracting the gathered unordered proof. - pub fn extract_proof(&self) -> StorageProof { - let trie_nodes = self.0.essence().backend_storage().proof_recorder - .read() - .iter() - .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) - .collect(); - StorageProof::new(trie_nodes) - } + /// Create new proving backend. + pub fn new(backend: &'a TrieBackend) -> Self { + let proof_recorder = Default::default(); + Self::new_with_recorder(backend, proof_recorder) + } + + /// Create new proving backend with the given recorder. + pub fn new_with_recorder( + backend: &'a TrieBackend, + proof_recorder: ProofRecorder, + ) -> Self { + let essence = backend.essence(); + let root = essence.root().clone(); + let recorder = ProofRecorderBackend { + backend: essence.backend_storage(), + proof_recorder, + }; + ProvingBackend(TrieBackend::new(recorder, root)) + } + + /// Extracting the gathered unordered proof. + pub fn extract_proof(&self) -> StorageProof { + let trie_nodes = self + .0 + .essence() + .backend_storage() + .proof_recorder + .read() + .iter() + .filter_map(|(_k, v)| v.as_ref().map(|v| v.to_vec())) + .collect(); + StorageProof::new(trie_nodes) + } } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> TrieBackendStorage - for ProofRecorderBackend<'a, S, H> + for ProofRecorderBackend<'a, S, H> { - type Overlay = S::Overlay; - - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - if let Some(v) = self.proof_recorder.read().get(key) { - return Ok(v.clone()); - } - let backend_value = self.backend.get(key, prefix)?; - self.proof_recorder.write().insert(key.clone(), backend_value.clone()); - Ok(backend_value) - } + type Overlay = S::Overlay; + + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + if let Some(v) = self.proof_recorder.read().get(key) { + return Ok(v.clone()); + } + let backend_value = self.backend.get(key, prefix)?; + self.proof_recorder + .write() + .insert(key.clone(), backend_value.clone()); + Ok(backend_value) + } } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> std::fmt::Debug - for ProvingBackend<'a, S, H> + for ProvingBackend<'a, S, H> { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "ProvingBackend") - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "ProvingBackend") + } } impl<'a, S, H> Backend for ProvingBackend<'a, S, H> - where - S: 'a + TrieBackendStorage, - H: 'a + Hasher, - H::Out: Ord + Codec, +where + S: 'a + TrieBackendStorage, + H: 'a + Hasher, + H::Out: Ord + Codec, { - type Error = String; - type Transaction = S::Overlay; - type TrieBackendStorage = S; - - fn storage(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.storage(key) - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.0.child_storage(storage_key, child_info, key) - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ) { - self.0.for_keys_in_child_storage(storage_key, child_info, f) - } - - fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { - self.0.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result>, Self::Error> { - self.0.next_child_storage_key(storage_key, child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.0.for_key_values_with_prefix(prefix, f) - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ) { - self.0.for_child_keys_with_prefix(storage_key, child_info, prefix, f) - } - - fn pairs(&self) -> Vec<(Vec, Vec)> { - self.0.pairs() - } - - fn keys(&self, prefix: &[u8]) -> Vec> { - self.0.keys(prefix) - } - - fn child_keys( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - ) -> Vec> { - self.0.child_keys(storage_key, child_info, prefix) - } - - fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) - where I: IntoIterator, Option>)> - { - self.0.storage_root(delta) - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (H::Out, bool, Self::Transaction) - where - I: IntoIterator, Option>)>, - H::Out: Ord - { - self.0.child_storage_root(storage_key, child_info, delta) - } - - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } - - fn usage_info(&self) -> crate::stats::UsageInfo { - self.0.usage_info() - } + type Error = String; + type Transaction = S::Overlay; + type TrieBackendStorage = S; + + fn storage(&self, key: &[u8]) -> Result>, Self::Error> { + self.0.storage(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.0.child_storage(storage_key, child_info, key) + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.0.for_keys_in_child_storage(storage_key, child_info, f) + } + + fn next_storage_key(&self, key: &[u8]) -> Result>, Self::Error> { + self.0.next_storage_key(key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result>, Self::Error> { + self.0.next_child_storage_key(storage_key, child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.0.for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.0.for_key_values_with_prefix(prefix, f) + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.0 + .for_child_keys_with_prefix(storage_key, child_info, prefix, f) + } + + fn pairs(&self) -> Vec<(Vec, Vec)> { + self.0.pairs() + } + + fn keys(&self, prefix: &[u8]) -> Vec> { + self.0.keys(prefix) + } + + fn child_keys(&self, storage_key: &[u8], child_info: ChildInfo, prefix: &[u8]) -> Vec> { + self.0.child_keys(storage_key, child_info, prefix) + } + + fn storage_root(&self, delta: I) -> (H::Out, Self::Transaction) + where + I: IntoIterator, Option>)>, + { + self.0.storage_root(delta) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) + where + I: IntoIterator, Option>)>, + H::Out: Ord, + { + self.0.child_storage_root(storage_key, child_info, delta) + } + + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) {} + + fn usage_info(&self) -> crate::stats::UsageInfo { + self.0.usage_info() + } } /// Create proof check backend. pub fn create_proof_check_backend( - root: H::Out, - proof: StorageProof, + root: H::Out, + proof: StorageProof, ) -> Result, H>, Box> where - H: Hasher, - H::Out: Codec, + H: Hasher, + H::Out: Codec, { - let db = proof.into_memory_db(); + let db = proof.into_memory_db(); - if db.contains(&root, EMPTY_PREFIX) { - Ok(TrieBackend::new(db, root)) - } else { - Err(Box::new(ExecutionError::InvalidProof)) - } + if db.contains(&root, EMPTY_PREFIX) { + Ok(TrieBackend::new(db, root)) + } else { + Err(Box::new(ExecutionError::InvalidProof)) + } } #[cfg(test)] mod tests { - use crate::InMemoryBackend; - use crate::trie_backend::tests::test_trie; - use super::*; - use sp_core::storage::ChildStorageKey; - use crate::proving_backend::create_proof_check_backend; - use sp_trie::PrefixedMemoryDB; - use sp_runtime::traits::BlakeTwo256; - - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); - const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); - - fn test_proving<'a>( - trie_backend: &'a TrieBackend,BlakeTwo256>, - ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { - ProvingBackend::new(trie_backend) - } - - #[test] - fn proof_is_empty_until_value_is_read() { - let trie_backend = test_trie(); - assert!(test_proving(&trie_backend).extract_proof().is_empty()); - } - - #[test] - fn proof_is_non_empty_after_value_is_read() { - let trie_backend = test_trie(); - let backend = test_proving(&trie_backend); - assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); - assert!(!backend.extract_proof().is_empty()); - } - - #[test] - fn proof_is_invalid_when_does_not_contains_root() { - use sp_core::H256; - let result = create_proof_check_backend::( - H256::from_low_u64_be(1), - StorageProof::empty() - ); - assert!(result.is_err()); - } - - #[test] - fn passes_through_backend_calls() { - let trie_backend = test_trie(); - let proving_backend = test_proving(&trie_backend); - assert_eq!(trie_backend.storage(b"key").unwrap(), proving_backend.storage(b"key").unwrap()); - assert_eq!(trie_backend.pairs(), proving_backend.pairs()); - - let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); - let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); - assert_eq!(trie_root, proving_root); - assert_eq!(trie_mdb.drain(), proving_mdb.drain()); - } - - #[test] - fn proof_recorded_and_checked() { - let contents = (0..64).map(|i| (vec![i], Some(vec![i]))).collect::>(); - let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(vec![(None, contents)]); - let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; - (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); - - let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty()).0; - assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - } - - #[test] - fn proof_recorded_and_checked_with_child() { - let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); - let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); - let own1 = subtrie1.into_owned(); - let own2 = subtrie2.into_owned(); - let contents = vec![ - (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own1.clone(), CHILD_INFO_1.to_owned())), - (28..65).map(|i| (vec![i], Some(vec![i]))).collect()), - (Some((own2.clone(), CHILD_INFO_2.to_owned())), - (10..15).map(|i| (vec![i], Some(vec![i]))).collect()), - ]; - let in_memory = InMemoryBackend::::default(); - let mut in_memory = in_memory.update(contents); - let in_memory_root = in_memory.full_storage_root::<_, Vec<_>, _>( - ::std::iter::empty(), - in_memory.child_storage_keys().map(|k|(k.0.to_vec(), Vec::new(), k.1.to_owned())) - ).0; - (0..64).for_each(|i| assert_eq!( - in_memory.storage(&[i]).unwrap().unwrap(), - vec![i] - )); - (28..65).for_each(|i| assert_eq!( - in_memory.child_storage(&own1[..], CHILD_INFO_1, &[i]).unwrap().unwrap(), - vec![i] - )); - (10..15).for_each(|i| assert_eq!( - in_memory.child_storage(&own2[..], CHILD_INFO_2, &[i]).unwrap().unwrap(), - vec![i] - )); - - let trie = in_memory.as_trie_backend().unwrap(); - let trie_root = trie.storage_root(::std::iter::empty()).0; - assert_eq!(in_memory_root, trie_root); - (0..64).for_each(|i| assert_eq!( - trie.storage(&[i]).unwrap().unwrap(), - vec![i] - )); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); - - let proof = proving.extract_proof(); - - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - assert!(proof_check.storage(&[0]).is_err()); - assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); - // note that it is include in root because proof close - assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); - assert_eq!(proof_check.storage(&[64]).unwrap(), None); - - let proving = ProvingBackend::new(trie); - assert_eq!(proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), Ok(Some(vec![64]))); - - let proof = proving.extract_proof(); - let proof_check = create_proof_check_backend::( - in_memory_root.into(), - proof - ).unwrap(); - assert_eq!( - proof_check.child_storage(&own1[..], CHILD_INFO_1, &[64]).unwrap().unwrap(), - vec![64] - ); - } + use super::*; + use crate::proving_backend::create_proof_check_backend; + use crate::trie_backend::tests::test_trie; + use crate::InMemoryBackend; + use sp_core::storage::ChildStorageKey; + use sp_runtime::traits::BlakeTwo256; + use sp_trie::PrefixedMemoryDB; + + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_1"); + const CHILD_INFO_2: ChildInfo<'static> = ChildInfo::new_default(b"unique_id_2"); + + fn test_proving<'a>( + trie_backend: &'a TrieBackend, BlakeTwo256>, + ) -> ProvingBackend<'a, PrefixedMemoryDB, BlakeTwo256> { + ProvingBackend::new(trie_backend) + } + + #[test] + fn proof_is_empty_until_value_is_read() { + let trie_backend = test_trie(); + assert!(test_proving(&trie_backend).extract_proof().is_empty()); + } + + #[test] + fn proof_is_non_empty_after_value_is_read() { + let trie_backend = test_trie(); + let backend = test_proving(&trie_backend); + assert_eq!(backend.storage(b"key").unwrap(), Some(b"value".to_vec())); + assert!(!backend.extract_proof().is_empty()); + } + + #[test] + fn proof_is_invalid_when_does_not_contains_root() { + use sp_core::H256; + let result = create_proof_check_backend::( + H256::from_low_u64_be(1), + StorageProof::empty(), + ); + assert!(result.is_err()); + } + + #[test] + fn passes_through_backend_calls() { + let trie_backend = test_trie(); + let proving_backend = test_proving(&trie_backend); + assert_eq!( + trie_backend.storage(b"key").unwrap(), + proving_backend.storage(b"key").unwrap() + ); + assert_eq!(trie_backend.pairs(), proving_backend.pairs()); + + let (trie_root, mut trie_mdb) = trie_backend.storage_root(::std::iter::empty()); + let (proving_root, mut proving_mdb) = proving_backend.storage_root(::std::iter::empty()); + assert_eq!(trie_root, proving_root); + assert_eq!(trie_mdb.drain(), proving_mdb.drain()); + } + + #[test] + fn proof_recorded_and_checked() { + let contents = (0..64) + .map(|i| (vec![i], Some(vec![i]))) + .collect::>(); + let in_memory = InMemoryBackend::::default(); + let mut in_memory = in_memory.update(vec![(None, contents)]); + let in_memory_root = in_memory.storage_root(::std::iter::empty()).0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + + let trie = in_memory.as_trie_backend().unwrap(); + let trie_root = trie.storage_root(::std::iter::empty()).0; + assert_eq!(in_memory_root, trie_root); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + + let proving = ProvingBackend::new(trie); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + + let proof = proving.extract_proof(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + } + + #[test] + fn proof_recorded_and_checked_with_child() { + let subtrie1 = ChildStorageKey::from_slice(b":child_storage:default:sub1").unwrap(); + let subtrie2 = ChildStorageKey::from_slice(b":child_storage:default:sub2").unwrap(); + let own1 = subtrie1.into_owned(); + let own2 = subtrie2.into_owned(); + let contents = vec![ + (None, (0..64).map(|i| (vec![i], Some(vec![i]))).collect()), + ( + Some((own1.clone(), CHILD_INFO_1.to_owned())), + (28..65).map(|i| (vec![i], Some(vec![i]))).collect(), + ), + ( + Some((own2.clone(), CHILD_INFO_2.to_owned())), + (10..15).map(|i| (vec![i], Some(vec![i]))).collect(), + ), + ]; + let in_memory = InMemoryBackend::::default(); + let mut in_memory = in_memory.update(contents); + let in_memory_root = in_memory + .full_storage_root::<_, Vec<_>, _>( + ::std::iter::empty(), + in_memory + .child_storage_keys() + .map(|k| (k.0.to_vec(), Vec::new(), k.1.to_owned())), + ) + .0; + (0..64).for_each(|i| assert_eq!(in_memory.storage(&[i]).unwrap().unwrap(), vec![i])); + (28..65).for_each(|i| { + assert_eq!( + in_memory + .child_storage(&own1[..], CHILD_INFO_1, &[i]) + .unwrap() + .unwrap(), + vec![i] + ) + }); + (10..15).for_each(|i| { + assert_eq!( + in_memory + .child_storage(&own2[..], CHILD_INFO_2, &[i]) + .unwrap() + .unwrap(), + vec![i] + ) + }); + + let trie = in_memory.as_trie_backend().unwrap(); + let trie_root = trie.storage_root(::std::iter::empty()).0; + assert_eq!(in_memory_root, trie_root); + (0..64).for_each(|i| assert_eq!(trie.storage(&[i]).unwrap().unwrap(), vec![i])); + + let proving = ProvingBackend::new(trie); + assert_eq!(proving.storage(&[42]).unwrap().unwrap(), vec![42]); + + let proof = proving.extract_proof(); + + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert!(proof_check.storage(&[0]).is_err()); + assert_eq!(proof_check.storage(&[42]).unwrap().unwrap(), vec![42]); + // note that it is include in root because proof close + assert_eq!(proof_check.storage(&[41]).unwrap().unwrap(), vec![41]); + assert_eq!(proof_check.storage(&[64]).unwrap(), None); + + let proving = ProvingBackend::new(trie); + assert_eq!( + proving.child_storage(&own1[..], CHILD_INFO_1, &[64]), + Ok(Some(vec![64])) + ); + + let proof = proving.extract_proof(); + let proof_check = + create_proof_check_backend::(in_memory_root.into(), proof).unwrap(); + assert_eq!( + proof_check + .child_storage(&own1[..], CHILD_INFO_1, &[64]) + .unwrap() + .unwrap(), + vec![64] + ); + } } diff --git a/primitives/state-machine/src/stats.rs b/primitives/state-machine/src/stats.rs index 8fa03344ad..409a7e9f36 100644 --- a/primitives/state-machine/src/stats.rs +++ b/primitives/state-machine/src/stats.rs @@ -16,110 +16,110 @@ //! Usage statistics for state db -use std::time::{Instant, Duration}; use std::cell::RefCell; +use std::time::{Duration, Instant}; /// Measured count of operations and total bytes. #[derive(Clone, Debug, Default)] pub struct UsageUnit { - /// Number of operations. - pub ops: u64, - /// Number of bytes. - pub bytes: u64, + /// Number of operations. + pub ops: u64, + /// Number of bytes. + pub bytes: u64, } /// Usage statistics for state backend. #[derive(Clone, Debug)] pub struct UsageInfo { - /// Read statistics (total). - pub reads: UsageUnit, - /// Write statistics (total). - pub writes: UsageUnit, - /// Write trie nodes statistics. - pub nodes_writes: UsageUnit, - /// Write into cached state machine - /// change overlay. - pub overlay_writes: UsageUnit, - /// Removed trie nodes statistics. - pub removed_nodes: UsageUnit, - /// Cache read statistics. - pub cache_reads: UsageUnit, - /// Modified value read statistics. - pub modified_reads: UsageUnit, - /// Memory used. - pub memory: usize, + /// Read statistics (total). + pub reads: UsageUnit, + /// Write statistics (total). + pub writes: UsageUnit, + /// Write trie nodes statistics. + pub nodes_writes: UsageUnit, + /// Write into cached state machine + /// change overlay. + pub overlay_writes: UsageUnit, + /// Removed trie nodes statistics. + pub removed_nodes: UsageUnit, + /// Cache read statistics. + pub cache_reads: UsageUnit, + /// Modified value read statistics. + pub modified_reads: UsageUnit, + /// Memory used. + pub memory: usize, - /// Moment at which current statistics has been started being collected. - pub started: Instant, - /// Timespan of the statistics. - pub span: Duration, + /// Moment at which current statistics has been started being collected. + pub started: Instant, + /// Timespan of the statistics. + pub span: Duration, } /// Accumulated usage statistics specific to state machine /// crate. #[derive(Debug, Default, Clone)] pub struct StateMachineStats { - /// Number of read query from runtime - /// that hit a modified value (in state - /// machine overlay). - pub reads_modified: RefCell, - /// Size in byte of read queries that - /// hit a modified value. - pub bytes_read_modified: RefCell, - /// Number of time a write operation - /// occurs into the state machine overlay. - pub writes_overlay: RefCell, - /// Size in bytes of the writes overlay - /// operation. - pub bytes_writes_overlay: RefCell, + /// Number of read query from runtime + /// that hit a modified value (in state + /// machine overlay). + pub reads_modified: RefCell, + /// Size in byte of read queries that + /// hit a modified value. + pub bytes_read_modified: RefCell, + /// Number of time a write operation + /// occurs into the state machine overlay. + pub writes_overlay: RefCell, + /// Size in bytes of the writes overlay + /// operation. + pub bytes_writes_overlay: RefCell, } impl StateMachineStats { - /// Accumulates some registered stats. - pub fn add(&self, other: &StateMachineStats) { - *self.reads_modified.borrow_mut() += *other.reads_modified.borrow(); - *self.bytes_read_modified.borrow_mut() += *other.bytes_read_modified.borrow(); - *self.writes_overlay.borrow_mut() += *other.writes_overlay.borrow(); - *self.bytes_writes_overlay.borrow_mut() += *other.bytes_writes_overlay.borrow(); - } + /// Accumulates some registered stats. + pub fn add(&self, other: &StateMachineStats) { + *self.reads_modified.borrow_mut() += *other.reads_modified.borrow(); + *self.bytes_read_modified.borrow_mut() += *other.bytes_read_modified.borrow(); + *self.writes_overlay.borrow_mut() += *other.writes_overlay.borrow(); + *self.bytes_writes_overlay.borrow_mut() += *other.bytes_writes_overlay.borrow(); + } } impl UsageInfo { - /// Empty statistics. - /// - /// Means no data was collected. - pub fn empty() -> Self { - Self { - reads: UsageUnit::default(), - writes: UsageUnit::default(), - overlay_writes: UsageUnit::default(), - nodes_writes: UsageUnit::default(), - removed_nodes: UsageUnit::default(), - cache_reads: UsageUnit::default(), - modified_reads: UsageUnit::default(), - memory: 0, - started: Instant::now(), - span: Default::default(), - } - } - /// Add collected state machine to this state. - pub fn include_state_machine_states(&mut self, count: &StateMachineStats) { - self.modified_reads.ops += *count.reads_modified.borrow(); - self.modified_reads.bytes += *count.bytes_read_modified.borrow(); - self.overlay_writes.ops += *count.writes_overlay.borrow(); - self.overlay_writes.bytes += *count.bytes_writes_overlay.borrow(); - } + /// Empty statistics. + /// + /// Means no data was collected. + pub fn empty() -> Self { + Self { + reads: UsageUnit::default(), + writes: UsageUnit::default(), + overlay_writes: UsageUnit::default(), + nodes_writes: UsageUnit::default(), + removed_nodes: UsageUnit::default(), + cache_reads: UsageUnit::default(), + modified_reads: UsageUnit::default(), + memory: 0, + started: Instant::now(), + span: Default::default(), + } + } + /// Add collected state machine to this state. + pub fn include_state_machine_states(&mut self, count: &StateMachineStats) { + self.modified_reads.ops += *count.reads_modified.borrow(); + self.modified_reads.bytes += *count.bytes_read_modified.borrow(); + self.overlay_writes.ops += *count.writes_overlay.borrow(); + self.overlay_writes.bytes += *count.bytes_writes_overlay.borrow(); + } } impl StateMachineStats { - /// Tally one read modified operation, of some length. - pub fn tally_read_modified(&self, data_bytes: u64) { - *self.reads_modified.borrow_mut() += 1; - *self.bytes_read_modified.borrow_mut() += data_bytes; - } - /// Tally one write overlay operation, of some length. - pub fn tally_write_overlay(&self, data_bytes: u64) { - *self.writes_overlay.borrow_mut() += 1; - *self.bytes_writes_overlay.borrow_mut() += data_bytes; - } + /// Tally one read modified operation, of some length. + pub fn tally_read_modified(&self, data_bytes: u64) { + *self.reads_modified.borrow_mut() += 1; + *self.bytes_read_modified.borrow_mut() += data_bytes; + } + /// Tally one write overlay operation, of some length. + pub fn tally_write_overlay(&self, data_bytes: u64) { + *self.writes_overlay.borrow_mut() += 1; + *self.bytes_writes_overlay.borrow_mut() += data_bytes; + } } diff --git a/primitives/state-machine/src/testing.rs b/primitives/state-machine/src/testing.rs index 2b971d816a..bd699705ba 100644 --- a/primitives/state-machine/src/testing.rs +++ b/primitives/state-machine/src/testing.rs @@ -16,238 +16,259 @@ //! Test implementation for Externalities. -use std::any::{Any, TypeId}; -use codec::Decode; -use hash_db::Hasher; use crate::{ - backend::Backend, OverlayedChanges, StorageTransactionCache, ext::Ext, InMemoryBackend, - StorageKey, StorageValue, - changes_trie::{ - Configuration as ChangesTrieConfiguration, - InMemoryStorage as ChangesTrieInMemoryStorage, - BlockNumber as ChangesTrieBlockNumber, - State as ChangesTrieState, - }, -}; -use sp_core::{ - storage::{ - well_known_keys::{CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES, is_child_storage_key}, - Storage, - }, + backend::Backend, + changes_trie::{ + BlockNumber as ChangesTrieBlockNumber, Configuration as ChangesTrieConfiguration, + InMemoryStorage as ChangesTrieInMemoryStorage, State as ChangesTrieState, + }, + ext::Ext, + InMemoryBackend, OverlayedChanges, StorageKey, StorageTransactionCache, StorageValue, }; +use codec::Decode; use codec::Encode; -use sp_externalities::{Extensions, Extension}; +use hash_db::Hasher; +use sp_core::storage::{ + well_known_keys::{is_child_storage_key, CHANGES_TRIE_CONFIG, CODE, HEAP_PAGES}, + Storage, +}; +use sp_externalities::{Extension, Extensions}; +use std::any::{Any, TypeId}; /// Simple HashMap-based Externalities impl. pub struct TestExternalities where - H::Out: codec::Codec, + H::Out: codec::Codec, { - overlay: OverlayedChanges, - storage_transaction_cache: StorageTransactionCache< - as Backend>::Transaction, H, N - >, - backend: InMemoryBackend, - changes_trie_config: Option, - changes_trie_storage: ChangesTrieInMemoryStorage, - extensions: Extensions, + overlay: OverlayedChanges, + storage_transaction_cache: + StorageTransactionCache< as Backend>::Transaction, H, N>, + backend: InMemoryBackend, + changes_trie_config: Option, + changes_trie_storage: ChangesTrieInMemoryStorage, + extensions: Extensions, } impl TestExternalities - where - H::Out: Ord + 'static + codec::Codec +where + H::Out: Ord + 'static + codec::Codec, { - /// Get externalities implementation. - pub fn ext(&mut self) -> Ext> { - Ext::new( - &mut self.overlay, - &mut self.storage_transaction_cache, - &self.backend, - match self.changes_trie_config.clone() { - Some(config) => Some(ChangesTrieState { - config, - zero: 0.into(), - storage: &self.changes_trie_storage, - }), - None => None, - }, - Some(&mut self.extensions), - ) - } - - /// Create a new instance of `TestExternalities` with storage. - pub fn new(storage: Storage) -> Self { - Self::new_with_code(&[], storage) - } - - /// New empty test externalities. - pub fn new_empty() -> Self { - Self::new_with_code(&[], Storage::default()) - } - - /// Create a new instance of `TestExternalities` with code and storage. - pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { - let mut overlay = OverlayedChanges::default(); - let changes_trie_config = storage.top.get(CHANGES_TRIE_CONFIG) - .and_then(|v| Decode::decode(&mut &v[..]).ok()); - overlay.set_collect_extrinsics(changes_trie_config.is_some()); - - assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); - assert!(storage.children.keys().all(|key| is_child_storage_key(key))); - - storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); - storage.top.insert(CODE.to_vec(), code.to_vec()); - - let mut extensions = Extensions::default(); - extensions.register(sp_core::traits::TaskExecutorExt(sp_core::tasks::executor())); - - TestExternalities { - overlay, - changes_trie_config, - extensions, - changes_trie_storage: ChangesTrieInMemoryStorage::new(), - backend: storage.into(), - storage_transaction_cache: Default::default(), - } - } - - /// Insert key/value into backend - pub fn insert(&mut self, k: StorageKey, v: StorageValue) { - self.backend = self.backend.update(vec![(None, vec![(k, Some(v))])]); - } - - /// Registers the given extension for this instance. - pub fn register_extension(&mut self, ext: E) { - self.extensions.register(ext); - } - - /// Get mutable reference to changes trie storage. - pub fn changes_trie_storage(&mut self) -> &mut ChangesTrieInMemoryStorage { - &mut self.changes_trie_storage - } - - /// Return a new backend with all pending value. - pub fn commit_all(&self) -> InMemoryBackend { - let top: Vec<_> = self.overlay.committed.top.clone().into_iter() - .chain(self.overlay.prospective.top.clone().into_iter()) - .map(|(k, v)| (k, v.value)).collect(); - let mut transaction = vec![(None, top)]; - - self.overlay.committed.children.clone().into_iter() - .chain(self.overlay.prospective.children.clone().into_iter()) - .for_each(|(keyspace, (map, child_info))| { - transaction.push(( - Some((keyspace, child_info)), - map.into_iter() - .map(|(k, v)| (k, v.value)) - .collect::>(), - )) - }); - - self.backend.update(transaction) - } - - /// Execute the given closure while `self` is set as externalities. - /// - /// Returns the result of the given closure. - pub fn execute_with(&mut self, execute: impl FnOnce() -> R) -> R { - let mut ext = self.ext(); - sp_externalities::set_and_run_with_externalities(&mut ext, execute) - } + /// Get externalities implementation. + pub fn ext(&mut self) -> Ext> { + Ext::new( + &mut self.overlay, + &mut self.storage_transaction_cache, + &self.backend, + match self.changes_trie_config.clone() { + Some(config) => Some(ChangesTrieState { + config, + zero: 0.into(), + storage: &self.changes_trie_storage, + }), + None => None, + }, + Some(&mut self.extensions), + ) + } + + /// Create a new instance of `TestExternalities` with storage. + pub fn new(storage: Storage) -> Self { + Self::new_with_code(&[], storage) + } + + /// New empty test externalities. + pub fn new_empty() -> Self { + Self::new_with_code(&[], Storage::default()) + } + + /// Create a new instance of `TestExternalities` with code and storage. + pub fn new_with_code(code: &[u8], mut storage: Storage) -> Self { + let mut overlay = OverlayedChanges::default(); + let changes_trie_config = storage + .top + .get(CHANGES_TRIE_CONFIG) + .and_then(|v| Decode::decode(&mut &v[..]).ok()); + overlay.set_collect_extrinsics(changes_trie_config.is_some()); + + assert!(storage.top.keys().all(|key| !is_child_storage_key(key))); + assert!(storage.children.keys().all(|key| is_child_storage_key(key))); + + storage.top.insert(HEAP_PAGES.to_vec(), 8u64.encode()); + storage.top.insert(CODE.to_vec(), code.to_vec()); + + let mut extensions = Extensions::default(); + extensions.register(sp_core::traits::TaskExecutorExt(sp_core::tasks::executor())); + + TestExternalities { + overlay, + changes_trie_config, + extensions, + changes_trie_storage: ChangesTrieInMemoryStorage::new(), + backend: storage.into(), + storage_transaction_cache: Default::default(), + } + } + + /// Insert key/value into backend + pub fn insert(&mut self, k: StorageKey, v: StorageValue) { + self.backend = self.backend.update(vec![(None, vec![(k, Some(v))])]); + } + + /// Registers the given extension for this instance. + pub fn register_extension(&mut self, ext: E) { + self.extensions.register(ext); + } + + /// Get mutable reference to changes trie storage. + pub fn changes_trie_storage(&mut self) -> &mut ChangesTrieInMemoryStorage { + &mut self.changes_trie_storage + } + + /// Return a new backend with all pending value. + pub fn commit_all(&self) -> InMemoryBackend { + let top: Vec<_> = self + .overlay + .committed + .top + .clone() + .into_iter() + .chain(self.overlay.prospective.top.clone().into_iter()) + .map(|(k, v)| (k, v.value)) + .collect(); + let mut transaction = vec![(None, top)]; + + self.overlay + .committed + .children + .clone() + .into_iter() + .chain(self.overlay.prospective.children.clone().into_iter()) + .for_each(|(keyspace, (map, child_info))| { + transaction.push(( + Some((keyspace, child_info)), + map.into_iter() + .map(|(k, v)| (k, v.value)) + .collect::>(), + )) + }); + + self.backend.update(transaction) + } + + /// Execute the given closure while `self` is set as externalities. + /// + /// Returns the result of the given closure. + pub fn execute_with(&mut self, execute: impl FnOnce() -> R) -> R { + let mut ext = self.ext(); + sp_externalities::set_and_run_with_externalities(&mut ext, execute) + } } impl std::fmt::Debug for TestExternalities - where H::Out: codec::Codec, +where + H::Out: codec::Codec, { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "overlay: {:?}\nbackend: {:?}", self.overlay, self.backend.pairs()) - } + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!( + f, + "overlay: {:?}\nbackend: {:?}", + self.overlay, + self.backend.pairs() + ) + } } impl PartialEq for TestExternalities - where - H::Out: Ord + 'static + codec::Codec +where + H::Out: Ord + 'static + codec::Codec, { - /// This doesn't test if they are in the same state, only if they contains the - /// same data at this state - fn eq(&self, other: &TestExternalities) -> bool { - self.commit_all().eq(&other.commit_all()) - } + /// This doesn't test if they are in the same state, only if they contains the + /// same data at this state + fn eq(&self, other: &TestExternalities) -> bool { + self.commit_all().eq(&other.commit_all()) + } } impl Default for TestExternalities - where - H::Out: Ord + 'static + codec::Codec, +where + H::Out: Ord + 'static + codec::Codec, { - fn default() -> Self { Self::new(Default::default()) } + fn default() -> Self { + Self::new(Default::default()) + } } impl From for TestExternalities - where - H::Out: Ord + 'static + codec::Codec, +where + H::Out: Ord + 'static + codec::Codec, { - fn from(storage: Storage) -> Self { - Self::new(storage) - } + fn from(storage: Storage) -> Self { + Self::new(storage) + } } -impl sp_externalities::ExtensionStore for TestExternalities where - H: Hasher, - H::Out: codec::Codec, - N: ChangesTrieBlockNumber, +impl sp_externalities::ExtensionStore for TestExternalities +where + H: Hasher, + H::Out: codec::Codec, + N: ChangesTrieBlockNumber, { - fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { - self.extensions.get_mut(type_id) - } - - fn register_extension_with_type_id( - &mut self, - type_id: TypeId, - extension: Box, - ) -> Result<(), sp_externalities::Error> { - self.extensions.register_with_type_id(type_id, extension) - } - - fn deregister_extension_by_type_id(&mut self, type_id: TypeId) -> Result<(), sp_externalities::Error> { - self.extensions - .deregister(type_id) - .expect("There should be an extension we try to remove in TestExternalities"); - Ok(()) - } + fn extension_by_type_id(&mut self, type_id: TypeId) -> Option<&mut dyn Any> { + self.extensions.get_mut(type_id) + } + + fn register_extension_with_type_id( + &mut self, + type_id: TypeId, + extension: Box, + ) -> Result<(), sp_externalities::Error> { + self.extensions.register_with_type_id(type_id, extension) + } + + fn deregister_extension_by_type_id( + &mut self, + type_id: TypeId, + ) -> Result<(), sp_externalities::Error> { + self.extensions + .deregister(type_id) + .expect("There should be an extension we try to remove in TestExternalities"); + Ok(()) + } } #[cfg(test)] mod tests { - use super::*; - use sp_core::traits::Externalities; - use sp_runtime::traits::BlakeTwo256; - use hex_literal::hex; - - #[test] - fn commit_should_work() { - let mut ext = TestExternalities::::default(); - let mut ext = ext.ext(); - ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); - ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); - ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); - const ROOT: [u8; 32] = hex!("2a340d3dfd52f5992c6b117e9e45f479e6da5afffafeb26ab619cf137a95aeb8"); - assert_eq!(&ext.storage_root()[..], &ROOT); - } - - #[test] - fn set_and_retrieve_code() { - let mut ext = TestExternalities::::default(); - let mut ext = ext.ext(); - - let code = vec![1, 2, 3]; - ext.set_storage(CODE.to_vec(), code.clone()); - - assert_eq!(&ext.storage(CODE).unwrap(), &code); - } - - #[test] - fn check_send() { - fn assert_send() {} - assert_send::>(); - } + use super::*; + use hex_literal::hex; + use sp_core::traits::Externalities; + use sp_runtime::traits::BlakeTwo256; + + #[test] + fn commit_should_work() { + let mut ext = TestExternalities::::default(); + let mut ext = ext.ext(); + ext.set_storage(b"doe".to_vec(), b"reindeer".to_vec()); + ext.set_storage(b"dog".to_vec(), b"puppy".to_vec()); + ext.set_storage(b"dogglesworth".to_vec(), b"cat".to_vec()); + const ROOT: [u8; 32] = + hex!("2a340d3dfd52f5992c6b117e9e45f479e6da5afffafeb26ab619cf137a95aeb8"); + assert_eq!(&ext.storage_root()[..], &ROOT); + } + + #[test] + fn set_and_retrieve_code() { + let mut ext = TestExternalities::::default(); + let mut ext = ext.ext(); + + let code = vec![1, 2, 3]; + ext.set_storage(CODE.to_vec(), code.clone()); + + assert_eq!(&ext.storage(CODE).unwrap(), &code); + } + + #[test] + fn check_send() { + fn assert_send() {} + assert_send::>(); + } } diff --git a/primitives/state-machine/src/trie_backend.rs b/primitives/state-machine/src/trie_backend.rs index f88e306a2f..0a2be0c181 100644 --- a/primitives/state-machine/src/trie_backend.rs +++ b/primitives/state-machine/src/trie_backend.rs @@ -16,345 +16,365 @@ //! Trie-based state machine backend. -use log::{warn, debug}; -use hash_db::Hasher; -use sp_trie::{Trie, delta_trie_root, default_child_trie_root, child_delta_trie_root}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; -use sp_core::storage::ChildInfo; -use codec::{Codec, Decode}; use crate::{ - StorageKey, StorageValue, Backend, - trie_backend_essence::{TrieBackendEssence, TrieBackendStorage, Ephemeral}, + trie_backend_essence::{Ephemeral, TrieBackendEssence, TrieBackendStorage}, + Backend, StorageKey, StorageValue, }; +use codec::{Codec, Decode}; +use hash_db::Hasher; +use log::{debug, warn}; +use sp_core::storage::ChildInfo; +use sp_trie::trie_types::{Layout, TrieDB, TrieError}; +use sp_trie::{child_delta_trie_root, default_child_trie_root, delta_trie_root, Trie}; /// Patricia trie-based backend. Transaction type is an overlay of changes to commit. pub struct TrieBackend, H: Hasher> { - essence: TrieBackendEssence, + essence: TrieBackendEssence, } -impl, H: Hasher> TrieBackend where H::Out: Codec { - /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { - TrieBackend { - essence: TrieBackendEssence::new(storage, root), - } - } - - /// Get backend essence reference. - pub fn essence(&self) -> &TrieBackendEssence { - &self.essence - } - - /// Get backend storage reference. - pub fn backend_storage(&self) -> &S { - self.essence.backend_storage() - } - - /// Get trie root. - pub fn root(&self) -> &H::Out { - self.essence.root() - } - - /// Consumes self and returns underlying storage. - pub fn into_storage(self) -> S { - self.essence.into_storage() - } +impl, H: Hasher> TrieBackend +where + H::Out: Codec, +{ + /// Create new trie-based backend. + pub fn new(storage: S, root: H::Out) -> Self { + TrieBackend { + essence: TrieBackendEssence::new(storage, root), + } + } + + /// Get backend essence reference. + pub fn essence(&self) -> &TrieBackendEssence { + &self.essence + } + + /// Get backend storage reference. + pub fn backend_storage(&self) -> &S { + self.essence.backend_storage() + } + + /// Get trie root. + pub fn root(&self) -> &H::Out { + self.essence.root() + } + + /// Consumes self and returns underlying storage. + pub fn into_storage(self) -> S { + self.essence.into_storage() + } } impl, H: Hasher> std::fmt::Debug for TrieBackend { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "TrieBackend") - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "TrieBackend") + } } -impl, H: Hasher> Backend for TrieBackend where - H::Out: Ord + Codec, +impl, H: Hasher> Backend for TrieBackend +where + H::Out: Ord + Codec, { - type Error = String; - type Transaction = S::Overlay; - type TrieBackendStorage = S; - - fn storage(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.storage(key) - } - - fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.essence.child_storage(storage_key, child_info, key) - } - - fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { - self.essence.next_storage_key(key) - } - - fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, Self::Error> { - self.essence.next_child_storage_key(storage_key, child_info, key) - } - - fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_keys_with_prefix(prefix, f) - } - - fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.essence.for_key_values_with_prefix(prefix, f) - } - - fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ) { - self.essence.for_keys_in_child_storage(storage_key, child_info, f) - } - - fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - f: F, - ) { - self.essence.for_child_keys_with_prefix(storage_key, child_info, prefix, f) - } - - fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); - - let collect_all = || -> Result<_, Box>> { - let trie = TrieDB::::new(&eph, self.essence.root())?; - let mut v = Vec::new(); - for x in trie.iter()? { - let (key, value) = x?; - v.push((key.to_vec(), value.to_vec())); - } - - Ok(v) - }; - - match collect_all() { - Ok(v) => v, - Err(e) => { - debug!(target: "trie", "Error extracting trie values: {}", e); - Vec::new() - } - } - } - - fn keys(&self, prefix: &[u8]) -> Vec { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); - - let collect_all = || -> Result<_, Box>> { - let trie = TrieDB::::new(&eph, self.essence.root())?; - let mut v = Vec::new(); - for x in trie.iter()? { - let (key, _) = x?; - if key.starts_with(prefix) { - v.push(key.to_vec()); - } - } - - Ok(v) - }; - - collect_all().map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)).unwrap_or_default() - } - - fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) - where I: IntoIterator)> - { - let mut write_overlay = S::Overlay::default(); - let mut root = *self.essence.root(); - - { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); - - match delta_trie_root::, _, _, _, _>(&mut eph, root, delta) { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), - } - } - - (root, write_overlay) - } - - fn child_storage_root( - &self, - storage_key: &[u8], - child_info: ChildInfo, - delta: I, - ) -> (H::Out, bool, Self::Transaction) - where - I: IntoIterator)>, - H::Out: Ord, - { - let default_root = default_child_trie_root::>(storage_key); - - let mut write_overlay = S::Overlay::default(); - let mut root = match self.storage(storage_key) { - Ok(value) => - value.and_then(|r| Decode::decode(&mut &r[..]).ok()).unwrap_or(default_root.clone()), - Err(e) => { - warn!(target: "trie", "Failed to read child storage root: {}", e); - default_root.clone() - }, - }; - - { - let mut eph = Ephemeral::new( - self.essence.backend_storage(), - &mut write_overlay, - ); - - match child_delta_trie_root::, _, _, _, _, _>( - storage_key, - child_info.keyspace(), - &mut eph, - root, - delta - ) { - Ok(ret) => root = ret, - Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), - } - } - - let is_default = root == default_root; - - (root, is_default, write_overlay) - } - - fn as_trie_backend(&mut self) -> Option<&TrieBackend> { - Some(self) - } - - fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) { } - - fn usage_info(&self) -> crate::UsageInfo { - crate::UsageInfo::empty() - } + type Error = String; + type Transaction = S::Overlay; + type TrieBackendStorage = S; + + fn storage(&self, key: &[u8]) -> Result, Self::Error> { + self.essence.storage(key) + } + + fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.essence.child_storage(storage_key, child_info, key) + } + + fn next_storage_key(&self, key: &[u8]) -> Result, Self::Error> { + self.essence.next_storage_key(key) + } + + fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, Self::Error> { + self.essence + .next_child_storage_key(storage_key, child_info, key) + } + + fn for_keys_with_prefix(&self, prefix: &[u8], f: F) { + self.essence.for_keys_with_prefix(prefix, f) + } + + fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.essence.for_key_values_with_prefix(prefix, f) + } + + fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + self.essence + .for_keys_in_child_storage(storage_key, child_info, f) + } + + fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + f: F, + ) { + self.essence + .for_child_keys_with_prefix(storage_key, child_info, prefix, f) + } + + fn pairs(&self) -> Vec<(StorageKey, StorageValue)> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + + let collect_all = || -> Result<_, Box>> { + let trie = TrieDB::::new(&eph, self.essence.root())?; + let mut v = Vec::new(); + for x in trie.iter()? { + let (key, value) = x?; + v.push((key.to_vec(), value.to_vec())); + } + + Ok(v) + }; + + match collect_all() { + Ok(v) => v, + Err(e) => { + debug!(target: "trie", "Error extracting trie values: {}", e); + Vec::new() + } + } + } + + fn keys(&self, prefix: &[u8]) -> Vec { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral::new(self.essence.backend_storage(), &mut read_overlay); + + let collect_all = || -> Result<_, Box>> { + let trie = TrieDB::::new(&eph, self.essence.root())?; + let mut v = Vec::new(); + for x in trie.iter()? { + let (key, _) = x?; + if key.starts_with(prefix) { + v.push(key.to_vec()); + } + } + + Ok(v) + }; + + collect_all() + .map_err(|e| debug!(target: "trie", "Error extracting trie keys: {}", e)) + .unwrap_or_default() + } + + fn storage_root(&self, delta: I) -> (H::Out, S::Overlay) + where + I: IntoIterator)>, + { + let mut write_overlay = S::Overlay::default(); + let mut root = *self.essence.root(); + + { + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); + + match delta_trie_root::, _, _, _, _>(&mut eph, root, delta) { + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + } + } + + (root, write_overlay) + } + + fn child_storage_root( + &self, + storage_key: &[u8], + child_info: ChildInfo, + delta: I, + ) -> (H::Out, bool, Self::Transaction) + where + I: IntoIterator)>, + H::Out: Ord, + { + let default_root = default_child_trie_root::>(storage_key); + + let mut write_overlay = S::Overlay::default(); + let mut root = match self.storage(storage_key) { + Ok(value) => value + .and_then(|r| Decode::decode(&mut &r[..]).ok()) + .unwrap_or(default_root.clone()), + Err(e) => { + warn!(target: "trie", "Failed to read child storage root: {}", e); + default_root.clone() + } + }; + + { + let mut eph = Ephemeral::new(self.essence.backend_storage(), &mut write_overlay); + + match child_delta_trie_root::, _, _, _, _, _>( + storage_key, + child_info.keyspace(), + &mut eph, + root, + delta, + ) { + Ok(ret) => root = ret, + Err(e) => warn!(target: "trie", "Failed to write to trie: {}", e), + } + } + + let is_default = root == default_root; + + (root, is_default, write_overlay) + } + + fn as_trie_backend(&mut self) -> Option<&TrieBackend> { + Some(self) + } + + fn register_overlay_stats(&mut self, _stats: &crate::stats::StateMachineStats) {} + + fn usage_info(&self) -> crate::UsageInfo { + crate::UsageInfo::empty() + } } #[cfg(test)] pub mod tests { - use std::collections::HashSet; - use sp_core::H256; - use codec::Encode; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; - use sp_runtime::traits::BlakeTwo256; - use super::*; - - const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; - - const CHILD_UUID_1: &[u8] = b"unique_id_1"; - const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); - - fn test_db() -> (PrefixedMemoryDB, H256) { - let mut root = H256::default(); - let mut mdb = PrefixedMemoryDB::::default(); - { - let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); - let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(b"value3", &[142]).expect("insert failed"); - trie.insert(b"value4", &[124]).expect("insert failed"); - }; - - { - let mut sub_root = Vec::new(); - root.encode_to(&mut sub_root); - let mut trie = TrieDBMut::new(&mut mdb, &mut root); - trie.insert(CHILD_KEY_1, &sub_root[..]).expect("insert failed"); - trie.insert(b"key", b"value").expect("insert failed"); - trie.insert(b"value1", &[42]).expect("insert failed"); - trie.insert(b"value2", &[24]).expect("insert failed"); - trie.insert(b":code", b"return 42").expect("insert failed"); - for i in 128u8..255u8 { - trie.insert(&[i], &[i]).unwrap(); - } - } - (mdb, root) - } - - pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256> { - let (mdb, root) = test_db(); - TrieBackend::new(mdb, root) - } - - #[test] - fn read_from_storage_returns_some() { - assert_eq!(test_trie().storage(b"key").unwrap(), Some(b"value".to_vec())); - } - - #[test] - fn read_from_child_storage_returns_some() { - let test_trie = test_trie(); - assert_eq!( - test_trie.child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3").unwrap(), - Some(vec![142u8]), - ); - } - - #[test] - fn read_from_storage_returns_none() { - assert_eq!(test_trie().storage(b"non-existing-key").unwrap(), None); - } - - #[test] - fn pairs_are_not_empty_on_non_empty_storage() { - assert!(!test_trie().pairs().is_empty()); - } - - #[test] - fn pairs_are_empty_on_empty_storage() { - assert!(TrieBackend::, BlakeTwo256>::new( - PrefixedMemoryDB::default(), - Default::default(), - ).pairs().is_empty()); - } - - #[test] - fn storage_root_is_non_default() { - assert!(test_trie().storage_root(::std::iter::empty()).0 != H256::repeat_byte(0)); - } - - #[test] - fn storage_root_transaction_is_empty() { - assert!(test_trie().storage_root(::std::iter::empty()).1.drain().is_empty()); - } - - #[test] - fn storage_root_transaction_is_non_empty() { - let (new_root, mut tx) = test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); - assert!(!tx.drain().is_empty()); - assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); - } - - #[test] - fn prefix_walking_works() { - let trie = test_trie(); - - let mut seen = HashSet::new(); - trie.for_keys_with_prefix(b"value", |key| { - let for_first_time = seen.insert(key.to_vec()); - assert!(for_first_time, "Seen key '{:?}' more than once", key); - }); - - let mut expected = HashSet::new(); - expected.insert(b"value1".to_vec()); - expected.insert(b"value2".to_vec()); - assert_eq!(seen, expected); - } + use super::*; + use codec::Encode; + use sp_core::H256; + use sp_runtime::traits::BlakeTwo256; + use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; + use std::collections::HashSet; + + const CHILD_KEY_1: &[u8] = b":child_storage:default:sub1"; + + const CHILD_UUID_1: &[u8] = b"unique_id_1"; + const CHILD_INFO_1: ChildInfo<'static> = ChildInfo::new_default(CHILD_UUID_1); + + fn test_db() -> (PrefixedMemoryDB, H256) { + let mut root = H256::default(); + let mut mdb = PrefixedMemoryDB::::default(); + { + let mut mdb = KeySpacedDBMut::new(&mut mdb, CHILD_UUID_1); + let mut trie = TrieDBMut::new(&mut mdb, &mut root); + trie.insert(b"value3", &[142]).expect("insert failed"); + trie.insert(b"value4", &[124]).expect("insert failed"); + }; + + { + let mut sub_root = Vec::new(); + root.encode_to(&mut sub_root); + let mut trie = TrieDBMut::new(&mut mdb, &mut root); + trie.insert(CHILD_KEY_1, &sub_root[..]) + .expect("insert failed"); + trie.insert(b"key", b"value").expect("insert failed"); + trie.insert(b"value1", &[42]).expect("insert failed"); + trie.insert(b"value2", &[24]).expect("insert failed"); + trie.insert(b":code", b"return 42").expect("insert failed"); + for i in 128u8..255u8 { + trie.insert(&[i], &[i]).unwrap(); + } + } + (mdb, root) + } + + pub(crate) fn test_trie() -> TrieBackend, BlakeTwo256> { + let (mdb, root) = test_db(); + TrieBackend::new(mdb, root) + } + + #[test] + fn read_from_storage_returns_some() { + assert_eq!( + test_trie().storage(b"key").unwrap(), + Some(b"value".to_vec()) + ); + } + + #[test] + fn read_from_child_storage_returns_some() { + let test_trie = test_trie(); + assert_eq!( + test_trie + .child_storage(CHILD_KEY_1, CHILD_INFO_1, b"value3") + .unwrap(), + Some(vec![142u8]), + ); + } + + #[test] + fn read_from_storage_returns_none() { + assert_eq!(test_trie().storage(b"non-existing-key").unwrap(), None); + } + + #[test] + fn pairs_are_not_empty_on_non_empty_storage() { + assert!(!test_trie().pairs().is_empty()); + } + + #[test] + fn pairs_are_empty_on_empty_storage() { + assert!( + TrieBackend::, BlakeTwo256>::new( + PrefixedMemoryDB::default(), + Default::default(), + ) + .pairs() + .is_empty() + ); + } + + #[test] + fn storage_root_is_non_default() { + assert!(test_trie().storage_root(::std::iter::empty()).0 != H256::repeat_byte(0)); + } + + #[test] + fn storage_root_transaction_is_empty() { + assert!(test_trie() + .storage_root(::std::iter::empty()) + .1 + .drain() + .is_empty()); + } + + #[test] + fn storage_root_transaction_is_non_empty() { + let (new_root, mut tx) = + test_trie().storage_root(vec![(b"new-key".to_vec(), Some(b"new-value".to_vec()))]); + assert!(!tx.drain().is_empty()); + assert!(new_root != test_trie().storage_root(::std::iter::empty()).0); + } + + #[test] + fn prefix_walking_works() { + let trie = test_trie(); + + let mut seen = HashSet::new(); + trie.for_keys_with_prefix(b"value", |key| { + let for_first_time = seen.insert(key.to_vec()); + assert!(for_first_time, "Seen key '{:?}' more than once", key); + }); + + let mut expected = HashSet::new(); + expected.insert(b"value1".to_vec()); + expected.insert(b"value2".to_vec()); + assert_eq!(seen, expected); + } } diff --git a/primitives/state-machine/src/trie_backend_essence.rs b/primitives/state-machine/src/trie_backend_essence.rs index 125a823f57..0281fd6266 100644 --- a/primitives/state-machine/src/trie_backend_essence.rs +++ b/primitives/state-machine/src/trie_backend_essence.rs @@ -17,477 +17,493 @@ //! Trie-based state machine backend essence used to read values //! from storage. -use std::ops::Deref; -use std::sync::Arc; -use log::{debug, warn}; -use hash_db::{self, Hasher, EMPTY_PREFIX, Prefix}; -use sp_trie::{Trie, MemoryDB, PrefixedMemoryDB, DBValue, - default_child_trie_root, read_trie_value, read_child_trie_value, - for_keys_in_child_trie, KeySpacedDB, TrieDBIterator}; -use sp_trie::trie_types::{TrieDB, TrieError, Layout}; use crate::{backend::Consolidate, StorageKey, StorageValue}; -use sp_core::storage::ChildInfo; use codec::Encode; +use hash_db::{self, Hasher, Prefix, EMPTY_PREFIX}; +use log::{debug, warn}; +use sp_core::storage::ChildInfo; +use sp_trie::trie_types::{Layout, TrieDB, TrieError}; +use sp_trie::{ + default_child_trie_root, for_keys_in_child_trie, read_child_trie_value, read_trie_value, + DBValue, KeySpacedDB, MemoryDB, PrefixedMemoryDB, Trie, TrieDBIterator, +}; +use std::ops::Deref; +use std::sync::Arc; /// Patricia trie-based storage trait. pub trait Storage: Send + Sync { - /// Get a trie node. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + /// Get a trie node. + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } /// Patricia trie-based pairs storage essence. pub struct TrieBackendEssence, H: Hasher> { - storage: S, - root: H::Out, + storage: S, + root: H::Out, } -impl, H: Hasher> TrieBackendEssence where H::Out: Encode { - /// Create new trie-based backend. - pub fn new(storage: S, root: H::Out) -> Self { - TrieBackendEssence { - storage, - root, - } - } - - /// Get backend storage reference. - pub fn backend_storage(&self) -> &S { - &self.storage - } - - /// Get trie root. - pub fn root(&self) -> &H::Out { - &self.root - } - - /// Consumes self and returns underlying storage. - pub fn into_storage(self) -> S { - self.storage - } - - /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in - /// lexicographic order. - pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { - self.next_storage_key_from_root(&self.root, None, key) - } - - /// Return the next key in the child trie i.e. the minimum key that is strictly superior to - /// `key` in lexicographic order. - pub fn next_child_storage_key( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, String> { - let child_root = match self.storage(storage_key)? { - Some(child_root) => child_root, - None => return Ok(None), - }; - - let mut hash = H::Out::default(); - - if child_root.len() != hash.as_ref().len() { - return Err(format!("Invalid child storage hash at {:?}", storage_key)); - } - // note: child_root and hash must be same size, panics otherwise. - hash.as_mut().copy_from_slice(&child_root[..]); - - self.next_storage_key_from_root(&hash, Some(child_info), key) - } - - /// Return next key from main trie or child trie by providing corresponding root. - fn next_storage_key_from_root( - &self, - root: &H::Out, - child_info: Option, - key: &[u8], - ) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - let dyn_eph: &dyn hash_db::HashDBRef<_, _>; - let keyspace_eph; - if let Some(child_info) = child_info.as_ref() { - keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); - dyn_eph = &keyspace_eph; - } else { - dyn_eph = &eph; - } - - let trie = TrieDB::::new(dyn_eph, root) - .map_err(|e| format!("TrieDB creation error: {}", e))?; - let mut iter = trie.iter() - .map_err(|e| format!("TrieDB iteration error: {}", e))?; - - // The key just after the one given in input, basically `key++0`. - // Note: We are sure this is the next key if: - // * size of key has no limit (i.e. we can always add 0 to the path), - // * and no keys can be inserted between `key` and `key++0` (this is ensured by sp-io). - let mut potential_next_key = Vec::with_capacity(key.len() + 1); - potential_next_key.extend_from_slice(key); - potential_next_key.push(0); - - iter.seek(&potential_next_key) - .map_err(|e| format!("TrieDB iterator seek error: {}", e))?; - - let next_element = iter.next(); - - let next_key = if let Some(next_element) = next_element { - let (next_key, _) = next_element - .map_err(|e| format!("TrieDB iterator next error: {}", e))?; - Some(next_key) - } else { - None - }; - - Ok(next_key) - } - - /// Get the value of storage at given key. - pub fn storage(&self, key: &[u8]) -> Result, String> { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_trie_value::, _>(&eph, &self.root, key).map_err(map_e) - } - - /// Get the value of child storage at given key. - pub fn child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - ) -> Result, String> { - let root = self.storage(storage_key)? - .unwrap_or(default_child_trie_root::>(storage_key).encode()); - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let map_e = |e| format!("Trie lookup error: {}", e); - - read_child_trie_value::, _>(storage_key, child_info.keyspace(), &eph, &root, key) - .map_err(map_e) - } - - /// Retrieve all entries keys of child storage and call `f` for each of those keys. - pub fn for_keys_in_child_storage( - &self, - storage_key: &[u8], - child_info: ChildInfo, - f: F, - ) { - let root = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( - storage_key, - child_info.keyspace(), - &eph, - &root, - f, - ) { - debug!(target: "trie", "Error while iterating child storage: {}", e); - } - } - - /// Execute given closure for all keys starting with prefix. - pub fn for_child_keys_with_prefix( - &self, - storage_key: &[u8], - child_info: ChildInfo, - prefix: &[u8], - mut f: F, - ) { - let root_vec = match self.storage(storage_key) { - Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), - Err(e) => { - debug!(target: "trie", "Error while iterating child storage: {}", e); - return; - } - }; - let mut root = H::Out::default(); - root.as_mut().copy_from_slice(&root_vec); - self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) - } - - /// Execute given closure for all keys starting with prefix. - pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) - } - - fn keys_values_with_prefix_inner( - &self, - root: &H::Out, - prefix: &[u8], - mut f: F, - child_info: Option, - ) { - let mut read_overlay = S::Overlay::default(); - let eph = Ephemeral { - storage: &self.storage, - overlay: &mut read_overlay, - }; - - let mut iter = move |db| -> Result<(), Box>> { - let trie = TrieDB::::new(db, root)?; - - for x in TrieDBIterator::new_prefixed(&trie, prefix)? { - let (key, value) = x?; - - debug_assert!(key.starts_with(prefix)); - - f(&key, &value); - } - - Ok(()) - }; - - let result = if let Some(child_info) = child_info { - let db = KeySpacedDB::new(&eph, child_info.keyspace()); - iter(&db) - } else { - iter(&eph) - }; - if let Err(e) = result { - debug!(target: "trie", "Error while iterating by prefix: {}", e); - } - } - - /// Execute given closure for all key and values starting with prefix. - pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { - self.keys_values_with_prefix_inner(&self.root, prefix, f, None) - } +impl, H: Hasher> TrieBackendEssence +where + H::Out: Encode, +{ + /// Create new trie-based backend. + pub fn new(storage: S, root: H::Out) -> Self { + TrieBackendEssence { storage, root } + } + + /// Get backend storage reference. + pub fn backend_storage(&self) -> &S { + &self.storage + } + + /// Get trie root. + pub fn root(&self) -> &H::Out { + &self.root + } + + /// Consumes self and returns underlying storage. + pub fn into_storage(self) -> S { + self.storage + } + + /// Return the next key in the trie i.e. the minimum key that is strictly superior to `key` in + /// lexicographic order. + pub fn next_storage_key(&self, key: &[u8]) -> Result, String> { + self.next_storage_key_from_root(&self.root, None, key) + } + + /// Return the next key in the child trie i.e. the minimum key that is strictly superior to + /// `key` in lexicographic order. + pub fn next_child_storage_key( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, String> { + let child_root = match self.storage(storage_key)? { + Some(child_root) => child_root, + None => return Ok(None), + }; + + let mut hash = H::Out::default(); + + if child_root.len() != hash.as_ref().len() { + return Err(format!("Invalid child storage hash at {:?}", storage_key)); + } + // note: child_root and hash must be same size, panics otherwise. + hash.as_mut().copy_from_slice(&child_root[..]); + + self.next_storage_key_from_root(&hash, Some(child_info), key) + } + + /// Return next key from main trie or child trie by providing corresponding root. + fn next_storage_key_from_root( + &self, + root: &H::Out, + child_info: Option, + key: &[u8], + ) -> Result, String> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + let dyn_eph: &dyn hash_db::HashDBRef<_, _>; + let keyspace_eph; + if let Some(child_info) = child_info.as_ref() { + keyspace_eph = KeySpacedDB::new(&eph, child_info.keyspace()); + dyn_eph = &keyspace_eph; + } else { + dyn_eph = &eph; + } + + let trie = + TrieDB::::new(dyn_eph, root).map_err(|e| format!("TrieDB creation error: {}", e))?; + let mut iter = trie + .iter() + .map_err(|e| format!("TrieDB iteration error: {}", e))?; + + // The key just after the one given in input, basically `key++0`. + // Note: We are sure this is the next key if: + // * size of key has no limit (i.e. we can always add 0 to the path), + // * and no keys can be inserted between `key` and `key++0` (this is ensured by sp-io). + let mut potential_next_key = Vec::with_capacity(key.len() + 1); + potential_next_key.extend_from_slice(key); + potential_next_key.push(0); + + iter.seek(&potential_next_key) + .map_err(|e| format!("TrieDB iterator seek error: {}", e))?; + + let next_element = iter.next(); + + let next_key = if let Some(next_element) = next_element { + let (next_key, _) = + next_element.map_err(|e| format!("TrieDB iterator next error: {}", e))?; + Some(next_key) + } else { + None + }; + + Ok(next_key) + } + + /// Get the value of storage at given key. + pub fn storage(&self, key: &[u8]) -> Result, String> { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_trie_value::, _>(&eph, &self.root, key).map_err(map_e) + } + + /// Get the value of child storage at given key. + pub fn child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + ) -> Result, String> { + let root = self + .storage(storage_key)? + .unwrap_or(default_child_trie_root::>(storage_key).encode()); + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + let map_e = |e| format!("Trie lookup error: {}", e); + + read_child_trie_value::, _>(storage_key, child_info.keyspace(), &eph, &root, key) + .map_err(map_e) + } + + /// Retrieve all entries keys of child storage and call `f` for each of those keys. + pub fn for_keys_in_child_storage( + &self, + storage_key: &[u8], + child_info: ChildInfo, + f: F, + ) { + let root = match self.storage(storage_key) { + Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), + Err(e) => { + debug!(target: "trie", "Error while iterating child storage: {}", e); + return; + } + }; + + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + if let Err(e) = for_keys_in_child_trie::, _, Ephemeral>( + storage_key, + child_info.keyspace(), + &eph, + &root, + f, + ) { + debug!(target: "trie", "Error while iterating child storage: {}", e); + } + } + + /// Execute given closure for all keys starting with prefix. + pub fn for_child_keys_with_prefix( + &self, + storage_key: &[u8], + child_info: ChildInfo, + prefix: &[u8], + mut f: F, + ) { + let root_vec = match self.storage(storage_key) { + Ok(v) => v.unwrap_or(default_child_trie_root::>(storage_key).encode()), + Err(e) => { + debug!(target: "trie", "Error while iterating child storage: {}", e); + return; + } + }; + let mut root = H::Out::default(); + root.as_mut().copy_from_slice(&root_vec); + self.keys_values_with_prefix_inner(&root, prefix, |k, _v| f(k), Some(child_info)) + } + + /// Execute given closure for all keys starting with prefix. + pub fn for_keys_with_prefix(&self, prefix: &[u8], mut f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, |k, _v| f(k), None) + } + + fn keys_values_with_prefix_inner( + &self, + root: &H::Out, + prefix: &[u8], + mut f: F, + child_info: Option, + ) { + let mut read_overlay = S::Overlay::default(); + let eph = Ephemeral { + storage: &self.storage, + overlay: &mut read_overlay, + }; + + let mut iter = move |db| -> Result<(), Box>> { + let trie = TrieDB::::new(db, root)?; + + for x in TrieDBIterator::new_prefixed(&trie, prefix)? { + let (key, value) = x?; + + debug_assert!(key.starts_with(prefix)); + + f(&key, &value); + } + + Ok(()) + }; + + let result = if let Some(child_info) = child_info { + let db = KeySpacedDB::new(&eph, child_info.keyspace()); + iter(&db) + } else { + iter(&eph) + }; + if let Err(e) = result { + debug!(target: "trie", "Error while iterating by prefix: {}", e); + } + } + + /// Execute given closure for all key and values starting with prefix. + pub fn for_key_values_with_prefix(&self, prefix: &[u8], f: F) { + self.keys_values_with_prefix_inner(&self.root, prefix, f, None) + } } pub(crate) struct Ephemeral<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> { - storage: &'a S, - overlay: &'a mut S::Overlay, + storage: &'a S, + overlay: &'a mut S::Overlay, } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsPlainDB - for Ephemeral<'a, S, H> + for Ephemeral<'a, S, H> { - fn as_plain_db<'b>(&'b self) -> &'b (dyn hash_db::PlainDB + 'b) { self } - fn as_plain_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::PlainDB + 'b) { - self - } + fn as_plain_db<'b>(&'b self) -> &'b (dyn hash_db::PlainDB + 'b) { + self + } + fn as_plain_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::PlainDB + 'b) { + self + } } impl<'a, S: 'a + TrieBackendStorage, H: 'a + Hasher> hash_db::AsHashDB - for Ephemeral<'a, S, H> + for Ephemeral<'a, S, H> { - fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { self } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { self } + fn as_hash_db<'b>(&'b self) -> &'b (dyn hash_db::HashDB + 'b) { + self + } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + self + } } impl<'a, S: TrieBackendStorage, H: Hasher> Ephemeral<'a, S, H> { - pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { - Ephemeral { - storage, - overlay, - } - } + pub fn new(storage: &'a S, overlay: &'a mut S::Overlay) -> Self { + Ephemeral { storage, overlay } + } } impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDB - for Ephemeral<'a, S, H> + for Ephemeral<'a, S, H> { - fn get(&self, key: &H::Out) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { - Some(val) - } else { - match self.storage.get(&key, EMPTY_PREFIX) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } - } - } - - fn contains(&self, key: &H::Out) -> bool { - hash_db::HashDB::get(self, key, EMPTY_PREFIX).is_some() - } - - fn emplace(&mut self, key: H::Out, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, EMPTY_PREFIX, value) - } - - fn remove(&mut self, key: &H::Out) { - hash_db::HashDB::remove(self.overlay, key, EMPTY_PREFIX) - } + fn get(&self, key: &H::Out) -> Option { + if let Some(val) = hash_db::HashDB::get(self.overlay, key, EMPTY_PREFIX) { + Some(val) + } else { + match self.storage.get(&key, EMPTY_PREFIX) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + } + } + } + } + + fn contains(&self, key: &H::Out) -> bool { + hash_db::HashDB::get(self, key, EMPTY_PREFIX).is_some() + } + + fn emplace(&mut self, key: H::Out, value: DBValue) { + hash_db::HashDB::emplace(self.overlay, key, EMPTY_PREFIX, value) + } + + fn remove(&mut self, key: &H::Out) { + hash_db::HashDB::remove(self.overlay, key, EMPTY_PREFIX) + } } impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::PlainDBRef - for Ephemeral<'a, S, H> + for Ephemeral<'a, S, H> { - fn get(&self, key: &H::Out) -> Option { hash_db::PlainDB::get(self, key) } - fn contains(&self, key: &H::Out) -> bool { hash_db::PlainDB::contains(self, key) } + fn get(&self, key: &H::Out) -> Option { + hash_db::PlainDB::get(self, key) + } + fn contains(&self, key: &H::Out) -> bool { + hash_db::PlainDB::contains(self, key) + } } impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDB - for Ephemeral<'a, S, H> + for Ephemeral<'a, S, H> { - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { - Some(val) - } else { - match self.storage.get(&key, prefix) { - Ok(x) => x, - Err(e) => { - warn!(target: "trie", "Failed to read from DB: {}", e); - None - }, - } - } - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::get(self, key, prefix).is_some() - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - hash_db::HashDB::insert(self.overlay, prefix, value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { - hash_db::HashDB::emplace(self.overlay, key, prefix, value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - hash_db::HashDB::remove(self.overlay, key, prefix) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + if let Some(val) = hash_db::HashDB::get(self.overlay, key, prefix) { + Some(val) + } else { + match self.storage.get(&key, prefix) { + Ok(x) => x, + Err(e) => { + warn!(target: "trie", "Failed to read from DB: {}", e); + None + } + } + } + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + hash_db::HashDB::get(self, key, prefix).is_some() + } + + fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { + hash_db::HashDB::insert(self.overlay, prefix, value) + } + + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: DBValue) { + hash_db::HashDB::emplace(self.overlay, key, prefix, value) + } + + fn remove(&mut self, key: &H::Out, prefix: Prefix) { + hash_db::HashDB::remove(self.overlay, key, prefix) + } } impl<'a, S: 'a + TrieBackendStorage, H: Hasher> hash_db::HashDBRef - for Ephemeral<'a, S, H> + for Ephemeral<'a, S, H> { - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - hash_db::HashDB::get(self, key, prefix) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + hash_db::HashDB::get(self, key, prefix) + } - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - hash_db::HashDB::contains(self, key, prefix) - } + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + hash_db::HashDB::contains(self, key, prefix) + } } /// Key-value pairs storage that is used by trie backend essence. pub trait TrieBackendStorage: Send + Sync { - /// Type of in-memory overlay. - type Overlay: hash_db::HashDB + Default + Consolidate; - /// Get the value stored at key. - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; + /// Type of in-memory overlay. + type Overlay: hash_db::HashDB + Default + Consolidate; + /// Get the value stored at key. + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String>; } // This implementation is used by normal storage trie clients. impl TrieBackendStorage for Arc> { - type Overlay = PrefixedMemoryDB; + type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - Storage::::get(self.deref(), key, prefix) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + Storage::::get(self.deref(), key, prefix) + } } // This implementation is used by test storage trie clients. impl TrieBackendStorage for PrefixedMemoryDB { - type Overlay = PrefixedMemoryDB; + type Overlay = PrefixedMemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - Ok(hash_db::HashDB::get(self, key, prefix)) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + Ok(hash_db::HashDB::get(self, key, prefix)) + } } impl TrieBackendStorage for MemoryDB { - type Overlay = MemoryDB; + type Overlay = MemoryDB; - fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { - Ok(hash_db::HashDB::get(self, key, prefix)) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Result, String> { + Ok(hash_db::HashDB::get(self, key, prefix)) + } } #[cfg(test)] mod test { - use sp_core::{Blake2Hasher, H256}; - use sp_trie::{TrieMut, PrefixedMemoryDB, trie_types::TrieDBMut, KeySpacedDBMut}; - use super::*; - - #[test] - fn next_storage_key_and_next_child_storage_key_work() { - let child_info = ChildInfo::new_default(b"uniqueid"); - // Contains values - let mut root_1 = H256::default(); - // Contains child trie - let mut root_2 = H256::default(); - - let mut mdb = PrefixedMemoryDB::::default(); - { - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); - trie.insert(b"3", &[1]).expect("insert failed"); - trie.insert(b"4", &[1]).expect("insert failed"); - trie.insert(b"6", &[1]).expect("insert failed"); - } - { - let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); - // reuse of root_1 implicitly assert child trie root is same - // as top trie (contents must remain the same). - let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); - trie.insert(b"3", &[1]).expect("insert failed"); - trie.insert(b"4", &[1]).expect("insert failed"); - trie.insert(b"6", &[1]).expect("insert failed"); - } - { - let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); - trie.insert(b"MyChild", root_1.as_ref()).expect("insert failed"); - }; - - let essence_1 = TrieBackendEssence::new(mdb, root_1); - - assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); - assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); - assert_eq!(essence_1.next_storage_key(b"4"), Ok(Some(b"6".to_vec()))); - assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); - assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); - - let mdb = essence_1.into_storage(); - let essence_2 = TrieBackendEssence::new(mdb, root_2); - - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), Ok(Some(b"3".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), Ok(Some(b"4".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), Ok(Some(b"6".to_vec())) - ); - assert_eq!( - essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), Ok(None) - ); - } + use super::*; + use sp_core::{Blake2Hasher, H256}; + use sp_trie::{trie_types::TrieDBMut, KeySpacedDBMut, PrefixedMemoryDB, TrieMut}; + + #[test] + fn next_storage_key_and_next_child_storage_key_work() { + let child_info = ChildInfo::new_default(b"uniqueid"); + // Contains values + let mut root_1 = H256::default(); + // Contains child trie + let mut root_2 = H256::default(); + + let mut mdb = PrefixedMemoryDB::::default(); + { + let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); + trie.insert(b"3", &[1]).expect("insert failed"); + trie.insert(b"4", &[1]).expect("insert failed"); + trie.insert(b"6", &[1]).expect("insert failed"); + } + { + let mut mdb = KeySpacedDBMut::new(&mut mdb, child_info.keyspace()); + // reuse of root_1 implicitly assert child trie root is same + // as top trie (contents must remain the same). + let mut trie = TrieDBMut::new(&mut mdb, &mut root_1); + trie.insert(b"3", &[1]).expect("insert failed"); + trie.insert(b"4", &[1]).expect("insert failed"); + trie.insert(b"6", &[1]).expect("insert failed"); + } + { + let mut trie = TrieDBMut::new(&mut mdb, &mut root_2); + trie.insert(b"MyChild", root_1.as_ref()) + .expect("insert failed"); + }; + + let essence_1 = TrieBackendEssence::new(mdb, root_1); + + assert_eq!(essence_1.next_storage_key(b"2"), Ok(Some(b"3".to_vec()))); + assert_eq!(essence_1.next_storage_key(b"3"), Ok(Some(b"4".to_vec()))); + assert_eq!(essence_1.next_storage_key(b"4"), Ok(Some(b"6".to_vec()))); + assert_eq!(essence_1.next_storage_key(b"5"), Ok(Some(b"6".to_vec()))); + assert_eq!(essence_1.next_storage_key(b"6"), Ok(None)); + + let mdb = essence_1.into_storage(); + let essence_2 = TrieBackendEssence::new(mdb, root_2); + + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"2"), + Ok(Some(b"3".to_vec())) + ); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"3"), + Ok(Some(b"4".to_vec())) + ); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"4"), + Ok(Some(b"6".to_vec())) + ); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"5"), + Ok(Some(b"6".to_vec())) + ); + assert_eq!( + essence_2.next_child_storage_key(b"MyChild", child_info, b"6"), + Ok(None) + ); + } } diff --git a/primitives/std/src/lib.rs b/primitives/std/src/lib.rs index 856b095403..43c104d10a 100644 --- a/primitives/std/src/lib.rs +++ b/primitives/std/src/lib.rs @@ -18,12 +18,14 @@ //! or client/alloc to be used with any code that depends on the runtime. #![cfg_attr(not(feature = "std"), no_std)] - - -#![cfg_attr(feature = "std", - doc = "Substrate runtime standard library as compiled when linked with Rust's standard library.")] -#![cfg_attr(not(feature = "std"), - doc = "Substrate's runtime standard library as compiled without Rust's standard library.")] +#![cfg_attr( + feature = "std", + doc = "Substrate runtime standard library as compiled when linked with Rust's standard library." +)] +#![cfg_attr( + not(feature = "std"), + doc = "Substrate's runtime standard library as compiled without Rust's standard library." +)] #[macro_export] macro_rules! map { @@ -55,7 +57,7 @@ macro_rules! if_std { #[cfg(not(feature = "std"))] #[macro_export] macro_rules! if_std { - ( $( $code:tt )* ) => {} + ( $( $code:tt )* ) => {}; } #[cfg(feature = "std")] @@ -68,13 +70,13 @@ include!("../without_std.rs"); /// /// This should include only things which are in the normal std prelude. pub mod prelude { - pub use crate::vec::Vec; - pub use crate::boxed::Box; - pub use crate::cmp::{Eq, PartialEq, Reverse}; - pub use crate::clone::Clone; + pub use crate::boxed::Box; + pub use crate::clone::Clone; + pub use crate::cmp::{Eq, PartialEq, Reverse}; + pub use crate::vec::Vec; - // Re-export `vec!` macro here, but not in `std` mode, since - // std's prelude already brings `vec!` into the scope. - #[cfg(not(feature = "std"))] - pub use crate::vec; + // Re-export `vec!` macro here, but not in `std` mode, since + // std's prelude already brings `vec!` into the scope. + #[cfg(not(feature = "std"))] + pub use crate::vec; } diff --git a/primitives/storage/src/lib.rs b/primitives/storage/src/lib.rs index 76fd4baac9..2d5142e55e 100644 --- a/primitives/storage/src/lib.rs +++ b/primitives/storage/src/lib.rs @@ -19,25 +19,29 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; use sp_debug_derive::RuntimeDebug; -use sp_std::{vec::Vec, borrow::Cow}; +use sp_std::{borrow::Cow, vec::Vec}; /// Storage key. #[derive(PartialEq, Eq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone) +)] pub struct StorageKey( - #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] - pub Vec, + #[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] pub Vec, ); /// Storage data associated to a [`StorageKey`]. #[derive(PartialEq, Eq, RuntimeDebug)] -#[cfg_attr(feature = "std", derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone))] +#[cfg_attr( + feature = "std", + derive(Serialize, Deserialize, Hash, PartialOrd, Ord, Clone) +)] pub struct StorageData( - #[cfg_attr(feature = "std", serde(with="impl_serde::serialize"))] - pub Vec, + #[cfg_attr(feature = "std", serde(with = "impl_serde::serialize"))] pub Vec, ); /// Map of data to use in a storage, it is a collection of @@ -49,21 +53,21 @@ pub type StorageMap = std::collections::BTreeMap, Vec>; #[derive(Debug, PartialEq, Eq, Clone)] /// Child trie storage data. pub struct StorageChild { - /// Child data for storage. - pub data: StorageMap, - /// Associated child info for a child - /// trie. - pub child_info: OwnedChildInfo, + /// Child data for storage. + pub data: StorageMap, + /// Associated child info for a child + /// trie. + pub child_info: OwnedChildInfo, } #[cfg(feature = "std")] #[derive(Default, Debug, Clone)] /// Struct containing data needed for a storage. pub struct Storage { - /// Top trie storage data. - pub top: StorageMap, - /// Children trie storage data by storage key. - pub children: std::collections::HashMap, StorageChild>, + /// Top trie storage data. + pub top: StorageMap, + /// Children trie storage data by storage key. + pub children: std::collections::HashMap, StorageChild>, } /// Storage change set @@ -71,59 +75,59 @@ pub struct Storage { #[cfg_attr(feature = "std", derive(Serialize, Deserialize, PartialEq, Eq))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct StorageChangeSet { - /// Block hash - pub block: Hash, - /// A list of changes - pub changes: Vec<(StorageKey, Option)>, + /// Block hash + pub block: Hash, + /// A list of changes + pub changes: Vec<(StorageKey, Option)>, } /// List of all well known keys and prefixes in storage. pub mod well_known_keys { - /// Wasm code of the runtime. - /// - /// Stored as a raw byte vector. Required by substrate. - pub const CODE: &'static [u8] = b":code"; - - /// Number of wasm linear memory pages required for execution of the runtime. - /// - /// The type of this value is encoded `u64`. - pub const HEAP_PAGES: &'static [u8] = b":heappages"; - - /// Current extrinsic index (u32) is stored under this key. - pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index"; - - /// Changes trie configuration is stored under this key. - pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; - - /// Prefix of child storage keys. - pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; - - /// Whether a key is a child storage key. - /// - /// This is convenience function which basically checks if the given `key` starts - /// with `CHILD_STORAGE_KEY_PREFIX` and doesn't do anything apart from that. - pub fn is_child_storage_key(key: &[u8]) -> bool { - // Other code might depend on this, so be careful changing this. - key.starts_with(CHILD_STORAGE_KEY_PREFIX) - } - - /// Determine whether a child trie key is valid. - /// - /// For now, the only valid child trie keys are those starting with `:child_storage:default:`. - /// - /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. - pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { - let has_right_prefix = storage_key.starts_with(b":child_storage:default:"); - if has_right_prefix { - // This is an attempt to catch a change of `is_child_storage_key`, which - // just checks if the key has prefix `:child_storage:` at the moment of writing. - debug_assert!( - is_child_storage_key(&storage_key), - "`is_child_trie_key_valid` is a subset of `is_child_storage_key`", - ); - } - has_right_prefix - } + /// Wasm code of the runtime. + /// + /// Stored as a raw byte vector. Required by substrate. + pub const CODE: &'static [u8] = b":code"; + + /// Number of wasm linear memory pages required for execution of the runtime. + /// + /// The type of this value is encoded `u64`. + pub const HEAP_PAGES: &'static [u8] = b":heappages"; + + /// Current extrinsic index (u32) is stored under this key. + pub const EXTRINSIC_INDEX: &'static [u8] = b":extrinsic_index"; + + /// Changes trie configuration is stored under this key. + pub const CHANGES_TRIE_CONFIG: &'static [u8] = b":changes_trie"; + + /// Prefix of child storage keys. + pub const CHILD_STORAGE_KEY_PREFIX: &'static [u8] = b":child_storage:"; + + /// Whether a key is a child storage key. + /// + /// This is convenience function which basically checks if the given `key` starts + /// with `CHILD_STORAGE_KEY_PREFIX` and doesn't do anything apart from that. + pub fn is_child_storage_key(key: &[u8]) -> bool { + // Other code might depend on this, so be careful changing this. + key.starts_with(CHILD_STORAGE_KEY_PREFIX) + } + + /// Determine whether a child trie key is valid. + /// + /// For now, the only valid child trie keys are those starting with `:child_storage:default:`. + /// + /// `child_trie_root` and `child_delta_trie_root` can panic if invalid value is provided to them. + pub fn is_child_trie_key_valid(storage_key: &[u8]) -> bool { + let has_right_prefix = storage_key.starts_with(b":child_storage:default:"); + if has_right_prefix { + // This is an attempt to catch a change of `is_child_storage_key`, which + // just checks if the key has prefix `:child_storage:` at the moment of writing. + debug_assert!( + is_child_storage_key(&storage_key), + "`is_child_trie_key_valid` is a subset of `is_child_storage_key`", + ); + } + has_right_prefix + } } /// A wrapper around a child storage key. @@ -131,54 +135,54 @@ pub mod well_known_keys { /// This wrapper ensures that the child storage key is correct and properly used. It is /// impossible to create an instance of this struct without providing a correct `storage_key`. pub struct ChildStorageKey<'a> { - storage_key: Cow<'a, [u8]>, + storage_key: Cow<'a, [u8]>, } impl<'a> ChildStorageKey<'a> { - /// Create new instance of `Self`. - fn new(storage_key: Cow<'a, [u8]>) -> Option { - if well_known_keys::is_child_trie_key_valid(&storage_key) { - Some(ChildStorageKey { storage_key }) - } else { - None - } - } - - /// Create a new `ChildStorageKey` from a vector. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_vec(key: Vec) -> Option { - Self::new(Cow::Owned(key)) - } - - /// Create a new `ChildStorageKey` from a slice. - /// - /// `storage_key` need to start with `:child_storage:default:` - /// See `is_child_trie_key_valid` for more details. - pub fn from_slice(key: &'a [u8]) -> Option { - Self::new(Cow::Borrowed(key)) - } - - /// Get access to the byte representation of the storage key. - /// - /// This key is guaranteed to be correct. - pub fn as_ref(&self) -> &[u8] { - &*self.storage_key - } - - /// Destruct this instance into an owned vector that represents the storage key. - /// - /// This key is guaranteed to be correct. - pub fn into_owned(self) -> Vec { - self.storage_key.into_owned() - } + /// Create new instance of `Self`. + fn new(storage_key: Cow<'a, [u8]>) -> Option { + if well_known_keys::is_child_trie_key_valid(&storage_key) { + Some(ChildStorageKey { storage_key }) + } else { + None + } + } + + /// Create a new `ChildStorageKey` from a vector. + /// + /// `storage_key` need to start with `:child_storage:default:` + /// See `is_child_trie_key_valid` for more details. + pub fn from_vec(key: Vec) -> Option { + Self::new(Cow::Owned(key)) + } + + /// Create a new `ChildStorageKey` from a slice. + /// + /// `storage_key` need to start with `:child_storage:default:` + /// See `is_child_trie_key_valid` for more details. + pub fn from_slice(key: &'a [u8]) -> Option { + Self::new(Cow::Borrowed(key)) + } + + /// Get access to the byte representation of the storage key. + /// + /// This key is guaranteed to be correct. + pub fn as_ref(&self) -> &[u8] { + &*self.storage_key + } + + /// Destruct this instance into an owned vector that represents the storage key. + /// + /// This key is guaranteed to be correct. + pub fn into_owned(self) -> Vec { + self.storage_key.into_owned() + } } #[derive(Clone, Copy)] /// Information related to a child state. pub enum ChildInfo<'a> { - Default(ChildTrie<'a>), + Default(ChildTrie<'a>), } /// Owned version of `ChildInfo`. @@ -186,55 +190,48 @@ pub enum ChildInfo<'a> { #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub enum OwnedChildInfo { - Default(OwnedChildTrie), + Default(OwnedChildTrie), } impl<'a> ChildInfo<'a> { - /// Instantiates information for a default child trie. - pub const fn new_default(unique_id: &'a[u8]) -> Self { - ChildInfo::Default(ChildTrie { - data: unique_id, - }) - } - - /// Instantiates a owned version of this child info. - pub fn to_owned(&self) -> OwnedChildInfo { - match self { - ChildInfo::Default(ChildTrie { data }) - => OwnedChildInfo::Default(OwnedChildTrie { - data: data.to_vec(), - }), - } - } - - /// Create child info from a linear byte packed value and a given type. - pub fn resolve_child_info(child_type: u32, data: &'a[u8]) -> Option { - match child_type { - x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), - _ => None, - } - } - - /// Return a single byte vector containing packed child info content and its child info type. - /// This can be use as input for `resolve_child_info`. - pub fn info(&self) -> (&[u8], u32) { - match self { - ChildInfo::Default(ChildTrie { - data, - }) => (data, ChildType::CryptoUniqueId as u32), - } - } - - /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. - /// This is a unique id of the child trie. The collision resistance of this value - /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. - pub fn keyspace(&self) -> &[u8] { - match self { - ChildInfo::Default(ChildTrie { - data, - }) => &data[..], - } - } + /// Instantiates information for a default child trie. + pub const fn new_default(unique_id: &'a [u8]) -> Self { + ChildInfo::Default(ChildTrie { data: unique_id }) + } + + /// Instantiates a owned version of this child info. + pub fn to_owned(&self) -> OwnedChildInfo { + match self { + ChildInfo::Default(ChildTrie { data }) => OwnedChildInfo::Default(OwnedChildTrie { + data: data.to_vec(), + }), + } + } + + /// Create child info from a linear byte packed value and a given type. + pub fn resolve_child_info(child_type: u32, data: &'a [u8]) -> Option { + match child_type { + x if x == ChildType::CryptoUniqueId as u32 => Some(ChildInfo::new_default(data)), + _ => None, + } + } + + /// Return a single byte vector containing packed child info content and its child info type. + /// This can be use as input for `resolve_child_info`. + pub fn info(&self) -> (&[u8], u32) { + match self { + ChildInfo::Default(ChildTrie { data }) => (data, ChildType::CryptoUniqueId as u32), + } + } + + /// Return byte sequence (keyspace) that can be use by underlying db to isolate keys. + /// This is a unique id of the child trie. The collision resistance of this value + /// depends on the type of child info use. For `ChildInfo::Default` it is and need to be. + pub fn keyspace(&self) -> &[u8] { + match self { + ChildInfo::Default(ChildTrie { data }) => &data[..], + } + } } /// Type of child. @@ -242,35 +239,32 @@ impl<'a> ChildInfo<'a> { /// be related to technical consideration or api variant. #[repr(u32)] pub enum ChildType { - /// Default, it uses a cryptographic strong unique id as input. - CryptoUniqueId = 1, + /// Default, it uses a cryptographic strong unique id as input. + CryptoUniqueId = 1, } impl OwnedChildInfo { - /// Instantiates info for a default child trie. - pub fn new_default(unique_id: Vec) -> Self { - OwnedChildInfo::Default(OwnedChildTrie { - data: unique_id, - }) - } - - /// Try to update with another instance, return false if both instance - /// are not compatible. - pub fn try_update(&mut self, other: ChildInfo) -> bool { - match self { - OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), - } - } - - /// Get `ChildInfo` reference to this owned child info. - pub fn as_ref(&self) -> ChildInfo { - match self { - OwnedChildInfo::Default(OwnedChildTrie { data }) - => ChildInfo::Default(ChildTrie { - data: data.as_slice(), - }), - } - } + /// Instantiates info for a default child trie. + pub fn new_default(unique_id: Vec) -> Self { + OwnedChildInfo::Default(OwnedChildTrie { data: unique_id }) + } + + /// Try to update with another instance, return false if both instance + /// are not compatible. + pub fn try_update(&mut self, other: ChildInfo) -> bool { + match self { + OwnedChildInfo::Default(owned_child_trie) => owned_child_trie.try_update(other), + } + } + + /// Get `ChildInfo` reference to this owned child info. + pub fn as_ref(&self) -> ChildInfo { + match self { + OwnedChildInfo::Default(OwnedChildTrie { data }) => ChildInfo::Default(ChildTrie { + data: data.as_slice(), + }), + } + } } /// A child trie of default type. @@ -280,26 +274,26 @@ impl OwnedChildInfo { /// crypto hash). #[derive(Clone, Copy)] pub struct ChildTrie<'a> { - /// Data containing unique id. - /// Unique id must but unique and free of any possible key collision - /// (depending on its storage behavior). - data: &'a[u8], + /// Data containing unique id. + /// Unique id must but unique and free of any possible key collision + /// (depending on its storage behavior). + data: &'a [u8], } /// Owned version of default child trie `ChildTrie`. #[derive(Debug, Clone)] #[cfg_attr(feature = "std", derive(PartialEq, Eq, Hash, PartialOrd, Ord))] pub struct OwnedChildTrie { - /// See `ChildTrie` reference field documentation. - data: Vec, + /// See `ChildTrie` reference field documentation. + data: Vec, } impl OwnedChildTrie { - /// Try to update with another instance, return false if both instance - /// are not compatible. - fn try_update(&mut self, other: ChildInfo) -> bool { - match other { - ChildInfo::Default(other) => self.data[..] == other.data[..], - } - } + /// Try to update with another instance, return false if both instance + /// are not compatible. + fn try_update(&mut self, other: ChildInfo) -> bool { + match other { + ChildInfo::Default(other) => self.data[..] == other.data[..], + } + } } diff --git a/primitives/test-primitives/src/lib.rs b/primitives/test-primitives/src/lib.rs index 302b24fcc1..8d602a9f77 100644 --- a/primitives/test-primitives/src/lib.rs +++ b/primitives/test-primitives/src/lib.rs @@ -18,44 +18,47 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::{Encode, Decode}; +use codec::{Decode, Encode}; -use sp_application_crypto::sr25519; pub use sp_application_crypto; +use sp_application_crypto::sr25519; pub use sp_core::{hash::H256, RuntimeDebug}; -use sp_runtime::traits::{BlakeTwo256, Verify, Extrinsic as ExtrinsicT,}; +use sp_runtime::traits::{BlakeTwo256, Extrinsic as ExtrinsicT, Verify}; /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(parity_util_mem::MallocSizeOf))] pub enum Extrinsic { - IncludeData(Vec), - StorageChange(Vec, Option>), + IncludeData(Vec), + StorageChange(Vec, Option>), } #[cfg(feature = "std")] impl serde::Serialize for Extrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } impl ExtrinsicT for Extrinsic { - type Call = Extrinsic; - type SignaturePayload = (); + type Call = Extrinsic; + type SignaturePayload = (); - fn is_signed(&self) -> Option { - if let Extrinsic::IncludeData(_) = *self { - Some(false) - } else { - Some(true) - } - } + fn is_signed(&self) -> Option { + if let Extrinsic::IncludeData(_) = *self { + Some(false) + } else { + Some(true) + } + } - fn new(call: Self::Call, _signature_payload: Option) -> Option { - Some(call) - } + fn new(call: Self::Call, _signature_payload: Option) -> Option { + Some(call) + } } /// The signature type used by accounts/transactions. @@ -79,8 +82,8 @@ pub type Header = sp_runtime::generic::Header; /// Changes trie configuration (optionally) used in tests. pub fn changes_trie_config() -> sp_core::ChangesTrieConfiguration { - sp_core::ChangesTrieConfiguration { - digest_interval: 4, - digest_levels: 2, - } + sp_core::ChangesTrieConfiguration { + digest_interval: 4, + digest_levels: 2, + } } diff --git a/primitives/timestamp/src/lib.rs b/primitives/timestamp/src/lib.rs index f1fd06a44a..16d22c57f6 100644 --- a/primitives/timestamp/src/lib.rs +++ b/primitives/timestamp/src/lib.rs @@ -18,12 +18,12 @@ #![cfg_attr(not(feature = "std"), no_std)] -use codec::Encode; #[cfg(feature = "std")] use codec::Decode; +use codec::Encode; #[cfg(feature = "std")] use sp_inherents::ProvideInherentData; -use sp_inherents::{InherentIdentifier, IsFatalError, InherentData}; +use sp_inherents::{InherentData, InherentIdentifier, IsFatalError}; use sp_runtime::RuntimeString; @@ -36,45 +36,45 @@ pub type InherentType = u64; #[derive(Encode, sp_runtime::RuntimeDebug)] #[cfg_attr(feature = "std", derive(Decode))] pub enum InherentError { - /// The timestamp is valid in the future. - /// This is a non-fatal-error and will not stop checking the inherents. - ValidAtTimestamp(InherentType), - /// Some other error. - Other(RuntimeString), + /// The timestamp is valid in the future. + /// This is a non-fatal-error and will not stop checking the inherents. + ValidAtTimestamp(InherentType), + /// Some other error. + Other(RuntimeString), } impl IsFatalError for InherentError { - fn is_fatal_error(&self) -> bool { - match self { - InherentError::ValidAtTimestamp(_) => false, - InherentError::Other(_) => true, - } - } + fn is_fatal_error(&self) -> bool { + match self { + InherentError::ValidAtTimestamp(_) => false, + InherentError::Other(_) => true, + } + } } impl InherentError { - /// Try to create an instance ouf of the given identifier and data. - #[cfg(feature = "std")] - pub fn try_from(id: &InherentIdentifier, data: &[u8]) -> Option { - if id == &INHERENT_IDENTIFIER { - ::decode(&mut &data[..]).ok() - } else { - None - } - } + /// Try to create an instance ouf of the given identifier and data. + #[cfg(feature = "std")] + pub fn try_from(id: &InherentIdentifier, data: &[u8]) -> Option { + if id == &INHERENT_IDENTIFIER { + ::decode(&mut &data[..]).ok() + } else { + None + } + } } /// Auxiliary trait to extract timestamp inherent data. pub trait TimestampInherentData { - /// Get timestamp inherent data. - fn timestamp_inherent_data(&self) -> Result; + /// Get timestamp inherent data. + fn timestamp_inherent_data(&self) -> Result; } impl TimestampInherentData for InherentData { - fn timestamp_inherent_data(&self) -> Result { - self.get_data(&INHERENT_IDENTIFIER) - .and_then(|r| r.ok_or_else(|| "Timestamp inherent data not found".into())) - } + fn timestamp_inherent_data(&self) -> Result { + self.get_data(&INHERENT_IDENTIFIER) + .and_then(|r| r.ok_or_else(|| "Timestamp inherent data not found".into())) + } } /// Provide duration since unix epoch in millisecond for timestamp inherent. @@ -83,34 +83,32 @@ pub struct InherentDataProvider; #[cfg(feature = "std")] impl ProvideInherentData for InherentDataProvider { - fn inherent_identifier(&self) -> &'static InherentIdentifier { - &INHERENT_IDENTIFIER - } - - fn provide_inherent_data( - &self, - inherent_data: &mut InherentData, - ) -> Result<(), sp_inherents::Error> { - use wasm_timer::SystemTime; - - let now = SystemTime::now(); - now.duration_since(SystemTime::UNIX_EPOCH) - .map_err(|_| { - "Current time is before unix epoch".into() - }).and_then(|d| { - let duration: InherentType = d.as_millis() as u64; - inherent_data.put_data(INHERENT_IDENTIFIER, &duration) - }) - } - - fn error_to_string(&self, error: &[u8]) -> Option { - InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) - } + fn inherent_identifier(&self) -> &'static InherentIdentifier { + &INHERENT_IDENTIFIER + } + + fn provide_inherent_data( + &self, + inherent_data: &mut InherentData, + ) -> Result<(), sp_inherents::Error> { + use wasm_timer::SystemTime; + + let now = SystemTime::now(); + now.duration_since(SystemTime::UNIX_EPOCH) + .map_err(|_| "Current time is before unix epoch".into()) + .and_then(|d| { + let duration: InherentType = d.as_millis() as u64; + inherent_data.put_data(INHERENT_IDENTIFIER, &duration) + }) + } + + fn error_to_string(&self, error: &[u8]) -> Option { + InherentError::try_from(&INHERENT_IDENTIFIER, error).map(|e| format!("{:?}", e)) + } } - /// A trait which is called when the timestamp is set. #[impl_trait_for_tuples::impl_for_tuples(30)] pub trait OnTimestampSet { - fn on_timestamp_set(moment: Moment); + fn on_timestamp_set(moment: Moment); } diff --git a/primitives/transaction-pool/src/error.rs b/primitives/transaction-pool/src/error.rs index 1a456ca4fd..dea2c36dfb 100644 --- a/primitives/transaction-pool/src/error.rs +++ b/primitives/transaction-pool/src/error.rs @@ -17,7 +17,7 @@ //! Transaction pool errors. use sp_runtime::transaction_validity::{ - TransactionPriority as Priority, InvalidTransaction, UnknownTransaction, + InvalidTransaction, TransactionPriority as Priority, UnknownTransaction, }; /// Transaction pool result. @@ -26,57 +26,61 @@ pub type Result = std::result::Result; /// Transaction pool error type. #[derive(Debug, derive_more::Display, derive_more::From)] pub enum Error { - /// Transaction is not verifiable yet, but might be in the future. - #[display(fmt="Unknown transaction validity: {:?}", _0)] - UnknownTransaction(UnknownTransaction), - /// Transaction is invalid. - #[display(fmt="Invalid transaction validity: {:?}", _0)] - InvalidTransaction(InvalidTransaction), - /// The transaction validity returned no "provides" tag. - /// - /// Such transactions are not accepted to the pool, since we use those tags - /// to define identity of transactions (occupance of the same "slot"). - #[display(fmt="The transaction does not provide any tags, so the pool can't identify it.")] - NoTagsProvided, - /// The transaction is temporarily banned. - #[display(fmt="Temporarily Banned")] - TemporarilyBanned, - /// The transaction is already in the pool. - #[display(fmt="[{:?}] Already imported", _0)] - AlreadyImported(Box), - /// The transaction cannot be imported cause it's a replacement and has too low priority. - #[display(fmt="Too low priority ({} > {})", old, new)] - TooLowPriority { - /// Transaction already in the pool. - old: Priority, - /// Transaction entering the pool. - new: Priority - }, - /// Deps cycle detected and we couldn't import transaction. - #[display(fmt="Cycle Detected")] - CycleDetected, - /// Transaction was dropped immediately after it got inserted. - #[display(fmt="Transaction couldn't enter the pool because of the limit.")] - ImmediatelyDropped, - /// Invalid block id. - InvalidBlockId(String), - /// The pool is not accepting future transactions. - #[display(fmt="The pool is not accepting future transactions")] - RejectedFutureTransaction, + /// Transaction is not verifiable yet, but might be in the future. + #[display(fmt = "Unknown transaction validity: {:?}", _0)] + UnknownTransaction(UnknownTransaction), + /// Transaction is invalid. + #[display(fmt = "Invalid transaction validity: {:?}", _0)] + InvalidTransaction(InvalidTransaction), + /// The transaction validity returned no "provides" tag. + /// + /// Such transactions are not accepted to the pool, since we use those tags + /// to define identity of transactions (occupance of the same "slot"). + #[display(fmt = "The transaction does not provide any tags, so the pool can't identify it.")] + NoTagsProvided, + /// The transaction is temporarily banned. + #[display(fmt = "Temporarily Banned")] + TemporarilyBanned, + /// The transaction is already in the pool. + #[display(fmt = "[{:?}] Already imported", _0)] + AlreadyImported(Box), + /// The transaction cannot be imported cause it's a replacement and has too low priority. + #[display(fmt = "Too low priority ({} > {})", old, new)] + TooLowPriority { + /// Transaction already in the pool. + old: Priority, + /// Transaction entering the pool. + new: Priority, + }, + /// Deps cycle detected and we couldn't import transaction. + #[display(fmt = "Cycle Detected")] + CycleDetected, + /// Transaction was dropped immediately after it got inserted. + #[display(fmt = "Transaction couldn't enter the pool because of the limit.")] + ImmediatelyDropped, + /// Invalid block id. + InvalidBlockId(String), + /// The pool is not accepting future transactions. + #[display(fmt = "The pool is not accepting future transactions")] + RejectedFutureTransaction, } impl std::error::Error for Error {} /// Transaction pool error conversion. pub trait IntoPoolError: std::error::Error + Send + Sized { - /// Try to extract original `Error` - /// - /// This implementation is optional and used only to - /// provide more descriptive error messages for end users - /// of RPC API. - fn into_pool_error(self) -> std::result::Result { Err(self) } + /// Try to extract original `Error` + /// + /// This implementation is optional and used only to + /// provide more descriptive error messages for end users + /// of RPC API. + fn into_pool_error(self) -> std::result::Result { + Err(self) + } } impl IntoPoolError for Error { - fn into_pool_error(self) -> std::result::Result { Ok(self) } + fn into_pool_error(self) -> std::result::Result { + Ok(self) + } } diff --git a/primitives/transaction-pool/src/lib.rs b/primitives/transaction-pool/src/lib.rs index e4498bd024..77eba27fd6 100644 --- a/primitives/transaction-pool/src/lib.rs +++ b/primitives/transaction-pool/src/lib.rs @@ -19,15 +19,15 @@ #![warn(missing_docs)] #![cfg_attr(not(feature = "std"), no_std)] -pub mod runtime_api; #[cfg(feature = "std")] pub mod error; #[cfg(feature = "std")] mod pool; +pub mod runtime_api; #[cfg(feature = "std")] pub use pool::*; pub use sp_runtime::transaction_validity::{ - TransactionLongevity, TransactionPriority, TransactionTag, TransactionSource, + TransactionLongevity, TransactionPriority, TransactionSource, TransactionTag, }; diff --git a/primitives/transaction-pool/src/pool.rs b/primitives/transaction-pool/src/pool.rs index ddc3fffa15..5ada3d73ea 100644 --- a/primitives/transaction-pool/src/pool.rs +++ b/primitives/transaction-pool/src/pool.rs @@ -16,41 +16,36 @@ //! Transaction pool primitives types & Runtime API. -use std::{ - collections::HashMap, - hash::Hash, - sync::Arc, - pin::Pin, -}; -use futures::{Future, Stream,}; +use futures::{Future, Stream}; use serde::{Deserialize, Serialize}; -use sp_utils::mpsc; use sp_runtime::{ - generic::BlockId, - traits::{Block as BlockT, Member, NumberFor}, - transaction_validity::{ - TransactionLongevity, TransactionPriority, TransactionTag, TransactionSource, - }, + generic::BlockId, + traits::{Block as BlockT, Member, NumberFor}, + transaction_validity::{ + TransactionLongevity, TransactionPriority, TransactionSource, TransactionTag, + }, }; +use sp_utils::mpsc; +use std::{collections::HashMap, hash::Hash, pin::Pin, sync::Arc}; /// Transaction pool status. #[derive(Debug)] pub struct PoolStatus { - /// Number of transactions in the ready queue. - pub ready: usize, - /// Sum of bytes of ready transaction encodings. - pub ready_bytes: usize, - /// Number of transactions in the future queue. - pub future: usize, - /// Sum of bytes of ready transaction encodings. - pub future_bytes: usize, + /// Number of transactions in the ready queue. + pub ready: usize, + /// Sum of bytes of ready transaction encodings. + pub ready_bytes: usize, + /// Number of transactions in the future queue. + pub future: usize, + /// Sum of bytes of ready transaction encodings. + pub future_bytes: usize, } impl PoolStatus { - /// Returns true if the are no transactions in the pool. - pub fn is_empty(&self) -> bool { - self.ready == 0 && self.future == 0 - } + /// Returns true if the are no transactions in the pool. + pub fn is_empty(&self) -> bool { + self.ready == 0 && self.future == 0 + } } /// Possible transaction status events. @@ -102,32 +97,33 @@ impl PoolStatus { #[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub enum TransactionStatus { - /// Transaction is part of the future queue. - Future, - /// Transaction is part of the ready queue. - Ready, - /// The transaction has been broadcast to the given peers. - Broadcast(Vec), - /// Transaction has been included in block with given hash. - InBlock(BlockHash), - /// The block this transaction was included in has been retracted. - Retracted(BlockHash), - /// Maximum number of finality watchers has been reached, - /// old watchers are being removed. - FinalityTimeout(BlockHash), - /// Transaction has been finalized by a finality-gadget, e.g GRANDPA - Finalized(BlockHash), - /// Transaction has been replaced in the pool, by another transaction - /// that provides the same tags. (e.g. same (sender, nonce)). - Usurped(Hash), - /// Transaction has been dropped from the pool because of the limit. - Dropped, - /// Transaction is no longer valid in the current state. - Invalid, + /// Transaction is part of the future queue. + Future, + /// Transaction is part of the ready queue. + Ready, + /// The transaction has been broadcast to the given peers. + Broadcast(Vec), + /// Transaction has been included in block with given hash. + InBlock(BlockHash), + /// The block this transaction was included in has been retracted. + Retracted(BlockHash), + /// Maximum number of finality watchers has been reached, + /// old watchers are being removed. + FinalityTimeout(BlockHash), + /// Transaction has been finalized by a finality-gadget, e.g GRANDPA + Finalized(BlockHash), + /// Transaction has been replaced in the pool, by another transaction + /// that provides the same tags. (e.g. same (sender, nonce)). + Usurped(Hash), + /// Transaction has been dropped from the pool because of the limit. + Dropped, + /// Transaction is no longer valid in the current state. + Invalid, } /// The stream of transaction events. -pub type TransactionStatusStream = dyn Stream> + Send + Unpin; +pub type TransactionStatusStream = + dyn Stream> + Send + Unpin; /// The import notification event stream. pub type ImportNotificationStream = mpsc::TracingUnboundedReceiver; @@ -142,132 +138,139 @@ pub type TransactionFor

= <

::Block as BlockT>::Extrinsi pub type TransactionStatusStreamFor

= TransactionStatusStream, BlockHash

>; /// Typical future type used in transaction pool api. -pub type PoolFuture = std::pin::Pin> + Send>>; +pub type PoolFuture = std::pin::Pin> + Send>>; /// In-pool transaction interface. /// /// The pool is container of transactions that are implementing this trait. /// See `sp_runtime::ValidTransaction` for details about every field. pub trait InPoolTransaction { - /// Transaction type. - type Transaction; - /// Transaction hash type. - type Hash; - - /// Get the reference to the transaction data. - fn data(&self) -> &Self::Transaction; - /// Get hash of the transaction. - fn hash(&self) -> &Self::Hash; - /// Get priority of the transaction. - fn priority(&self) -> &TransactionPriority; - /// Get longevity of the transaction. - fn longevity(&self) ->&TransactionLongevity; - /// Get transaction dependencies. - fn requires(&self) -> &[TransactionTag]; - /// Get tags that transaction provides. - fn provides(&self) -> &[TransactionTag]; - /// Return a flag indicating if the transaction should be propagated to other peers. - fn is_propagable(&self) -> bool; + /// Transaction type. + type Transaction; + /// Transaction hash type. + type Hash; + + /// Get the reference to the transaction data. + fn data(&self) -> &Self::Transaction; + /// Get hash of the transaction. + fn hash(&self) -> &Self::Hash; + /// Get priority of the transaction. + fn priority(&self) -> &TransactionPriority; + /// Get longevity of the transaction. + fn longevity(&self) -> &TransactionLongevity; + /// Get transaction dependencies. + fn requires(&self) -> &[TransactionTag]; + /// Get tags that transaction provides. + fn provides(&self) -> &[TransactionTag]; + /// Return a flag indicating if the transaction should be propagated to other peers. + fn is_propagable(&self) -> bool; } /// Transaction pool interface. pub trait TransactionPool: Send + Sync { - /// Block type. - type Block: BlockT; - /// Transaction hash type. - type Hash: Hash + Eq + Member + Serialize; - /// In-pool transaction type. - type InPoolTransaction: InPoolTransaction< - Transaction = TransactionFor, - Hash = TxHash - >; - /// Error type. - type Error: From + crate::error::IntoPoolError; - - // *** RPC - - /// Returns a future that imports a bunch of unverified transactions to the pool. - fn submit_at( - &self, - at: &BlockId, - source: TransactionSource, - xts: Vec>, - ) -> PoolFuture, Self::Error>>, Self::Error>; - - /// Returns a future that imports one unverified transaction to the pool. - fn submit_one( - &self, - at: &BlockId, - source: TransactionSource, - xt: TransactionFor, - ) -> PoolFuture, Self::Error>; - - /// Returns a future that import a single transaction and starts to watch their progress in the pool. - fn submit_and_watch( - &self, - at: &BlockId, - source: TransactionSource, - xt: TransactionFor, - ) -> PoolFuture>, Self::Error>; - - // *** Block production / Networking - /// Get an iterator for ready transactions ordered by priority. - /// - /// Guarantees to return only when transaction pool got updated at `at` block. - /// Guarantees to return immediately when `None` is passed. - fn ready_at(&self, at: NumberFor) - -> Pin> + Send>> + Send>>; - - /// Get an iterator for ready transactions ordered by priority. - fn ready(&self) -> Box> + Send>; - - // *** Block production - /// Remove transactions identified by given hashes (and dependent transactions) from the pool. - fn remove_invalid(&self, hashes: &[TxHash]) -> Vec>; - - // *** logging - /// Returns pool status. - fn status(&self) -> PoolStatus; - - // *** logging / RPC / networking - /// Return an event stream of transactions imported to the pool. - fn import_notification_stream(&self) -> ImportNotificationStream>; - - // *** networking - /// Notify the pool about transactions broadcast. - fn on_broadcasted(&self, propagations: HashMap, Vec>); - - /// Returns transaction hash - fn hash_of(&self, xt: &TransactionFor) -> TxHash; - - /// Return specific ready transaction by hash, if there is one. - fn ready_transaction(&self, hash: &TxHash) -> Option>; + /// Block type. + type Block: BlockT; + /// Transaction hash type. + type Hash: Hash + Eq + Member + Serialize; + /// In-pool transaction type. + type InPoolTransaction: InPoolTransaction< + Transaction = TransactionFor, + Hash = TxHash, + >; + /// Error type. + type Error: From + crate::error::IntoPoolError; + + // *** RPC + + /// Returns a future that imports a bunch of unverified transactions to the pool. + fn submit_at( + &self, + at: &BlockId, + source: TransactionSource, + xts: Vec>, + ) -> PoolFuture, Self::Error>>, Self::Error>; + + /// Returns a future that imports one unverified transaction to the pool. + fn submit_one( + &self, + at: &BlockId, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture, Self::Error>; + + /// Returns a future that import a single transaction and starts to watch their progress in the pool. + fn submit_and_watch( + &self, + at: &BlockId, + source: TransactionSource, + xt: TransactionFor, + ) -> PoolFuture>, Self::Error>; + + // *** Block production / Networking + /// Get an iterator for ready transactions ordered by priority. + /// + /// Guarantees to return only when transaction pool got updated at `at` block. + /// Guarantees to return immediately when `None` is passed. + fn ready_at( + &self, + at: NumberFor, + ) -> Pin< + Box< + dyn Future> + Send>> + + Send, + >, + >; + + /// Get an iterator for ready transactions ordered by priority. + fn ready(&self) -> Box> + Send>; + + // *** Block production + /// Remove transactions identified by given hashes (and dependent transactions) from the pool. + fn remove_invalid(&self, hashes: &[TxHash]) -> Vec>; + + // *** logging + /// Returns pool status. + fn status(&self) -> PoolStatus; + + // *** logging / RPC / networking + /// Return an event stream of transactions imported to the pool. + fn import_notification_stream(&self) -> ImportNotificationStream>; + + // *** networking + /// Notify the pool about transactions broadcast. + fn on_broadcasted(&self, propagations: HashMap, Vec>); + + /// Returns transaction hash + fn hash_of(&self, xt: &TransactionFor) -> TxHash; + + /// Return specific ready transaction by hash, if there is one. + fn ready_transaction(&self, hash: &TxHash) -> Option>; } /// Events that the transaction pool listens for. pub enum ChainEvent { - /// New blocks have been added to the chain - NewBlock { - /// Is this the new best block. - is_new_best: bool, - /// Id of the just imported block. - id: BlockId, - /// Header of the just imported block - header: B::Header, - /// List of retracted blocks ordered by block number. - retracted: Vec, - }, - /// An existing block has been finalized. - Finalized { - /// Hash of just finalized block - hash: B::Hash, - }, + /// New blocks have been added to the chain + NewBlock { + /// Is this the new best block. + is_new_best: bool, + /// Id of the just imported block. + id: BlockId, + /// Header of the just imported block + header: B::Header, + /// List of retracted blocks ordered by block number. + retracted: Vec, + }, + /// An existing block has been finalized. + Finalized { + /// Hash of just finalized block + hash: B::Hash, + }, } /// Trait for transaction pool maintenance. pub trait MaintainedTransactionPool: TransactionPool { - /// Perform maintenance - fn maintain(&self, event: ChainEvent) -> Pin + Send>>; + /// Perform maintenance + fn maintain(&self, event: ChainEvent) -> Pin + Send>>; } /// An abstraction for transaction pool. @@ -278,37 +281,33 @@ pub trait MaintainedTransactionPool: TransactionPool { /// be also used in context of other offchain calls. For one may generate and submit /// a transaction for some misbehavior reports (say equivocation). pub trait OffchainSubmitTransaction: Send + Sync { - /// Submit transaction. - /// - /// The transaction will end up in the pool and be propagated to others. - fn submit_at( - &self, - at: &BlockId, - extrinsic: Block::Extrinsic, - ) -> Result<(), ()>; + /// Submit transaction. + /// + /// The transaction will end up in the pool and be propagated to others. + fn submit_at(&self, at: &BlockId, extrinsic: Block::Extrinsic) -> Result<(), ()>; } impl OffchainSubmitTransaction for TPool { - fn submit_at( - &self, - at: &BlockId, - extrinsic: ::Extrinsic, - ) -> Result<(), ()> { - log::debug!( - target: "txpool", - "(offchain call) Submitting a transaction to the pool: {:?}", - extrinsic - ); - - let result = futures::executor::block_on(self.submit_one( - &at, TransactionSource::Local, extrinsic, - )); - - result.map(|_| ()) - .map_err(|e| log::warn!( - target: "txpool", - "(offchain call) Error submitting a transaction to the pool: {:?}", - e - )) - } + fn submit_at( + &self, + at: &BlockId, + extrinsic: ::Extrinsic, + ) -> Result<(), ()> { + log::debug!( + target: "txpool", + "(offchain call) Submitting a transaction to the pool: {:?}", + extrinsic + ); + + let result = + futures::executor::block_on(self.submit_one(&at, TransactionSource::Local, extrinsic)); + + result.map(|_| ()).map_err(|e| { + log::warn!( + target: "txpool", + "(offchain call) Error submitting a transaction to the pool: {:?}", + e + ) + }) + } } diff --git a/primitives/transaction-pool/src/runtime_api.rs b/primitives/transaction-pool/src/runtime_api.rs index fa2e51653b..395a1cbc9a 100644 --- a/primitives/transaction-pool/src/runtime_api.rs +++ b/primitives/transaction-pool/src/runtime_api.rs @@ -16,27 +16,27 @@ //! Tagged Transaction Queue Runtime API. -use sp_runtime::transaction_validity::{TransactionValidity, TransactionSource}; use sp_runtime::traits::Block as BlockT; +use sp_runtime::transaction_validity::{TransactionSource, TransactionValidity}; sp_api::decl_runtime_apis! { - /// The `TaggedTransactionQueue` api trait for interfering with the transaction queue. - #[api_version(2)] - pub trait TaggedTransactionQueue { - /// Validate the transaction. - #[changed_in(2)] - fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity; + /// The `TaggedTransactionQueue` api trait for interfering with the transaction queue. + #[api_version(2)] + pub trait TaggedTransactionQueue { + /// Validate the transaction. + #[changed_in(2)] + fn validate_transaction(tx: ::Extrinsic) -> TransactionValidity; - /// Validate the transaction. - /// - /// This method is invoked by the transaction pool to learn details about given transaction. - /// The implementation should make sure to verify the correctness of the transaction - /// against current state. - /// Note that this call may be performed by the pool multiple times and transactions - /// might be verified in any possible order. - fn validate_transaction( - source: TransactionSource, - tx: ::Extrinsic, - ) -> TransactionValidity; - } + /// Validate the transaction. + /// + /// This method is invoked by the transaction pool to learn details about given transaction. + /// The implementation should make sure to verify the correctness of the transaction + /// against current state. + /// Note that this call may be performed by the pool multiple times and transactions + /// might be verified in any possible order. + fn validate_transaction( + source: TransactionSource, + tx: ::Extrinsic, + ) -> TransactionValidity; + } } diff --git a/primitives/trie/benches/bench.rs b/primitives/trie/benches/bench.rs index d385b4bacd..ebe6768f0e 100644 --- a/primitives/trie/benches/bench.rs +++ b/primitives/trie/benches/bench.rs @@ -14,17 +14,17 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . -use criterion::{Criterion, criterion_group, criterion_main}; +use criterion::{criterion_group, criterion_main, Criterion}; criterion_group!(benches, benchmark); criterion_main!(benches); fn benchmark(c: &mut Criterion) { - trie_bench::standard_benchmark::< - sp_trie::Layout, - sp_trie::TrieStream, - >(c, "substrate-blake2"); - trie_bench::standard_benchmark::< - sp_trie::Layout, - sp_trie::TrieStream, - >(c, "substrate-keccak"); + trie_bench::standard_benchmark::< + sp_trie::Layout, + sp_trie::TrieStream, + >(c, "substrate-blake2"); + trie_bench::standard_benchmark::< + sp_trie::Layout, + sp_trie::TrieStream, + >(c, "substrate-keccak"); } diff --git a/primitives/trie/src/error.rs b/primitives/trie/src/error.rs index 2d3a1b7928..474fb43e5a 100644 --- a/primitives/trie/src/error.rs +++ b/primitives/trie/src/error.rs @@ -6,42 +6,42 @@ // option. This file may not be copied, modified, or distributed // except according to those terms. -#[cfg(feature="std")] -use std::fmt; -#[cfg(feature="std")] +#[cfg(feature = "std")] use std::error::Error as StdError; +#[cfg(feature = "std")] +use std::fmt; #[derive(Debug, PartialEq, Eq, Clone)] /// Error for trie node decoding. pub enum Error { - /// Bad format. - BadFormat, - /// Decoding error. - Decode(codec::Error) + /// Bad format. + BadFormat, + /// Decoding error. + Decode(codec::Error), } impl From for Error { - fn from(x: codec::Error) -> Self { - Error::Decode(x) - } + fn from(x: codec::Error) -> Self { + Error::Decode(x) + } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl StdError for Error { - fn description(&self) -> &str { - match self { - Error::BadFormat => "Bad format error", - Error::Decode(_) => "Decoding error", - } - } + fn description(&self) -> &str { + match self { + Error::BadFormat => "Bad format error", + Error::Decode(_) => "Decoding error", + } + } } -#[cfg(feature="std")] +#[cfg(feature = "std")] impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { - Error::Decode(e) => write!(f, "Decode error: {}", e.what()), - Error::BadFormat => write!(f, "Bad format"), - } - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + match self { + Error::Decode(e) => write!(f, "Decode error: {}", e.what()), + Error::BadFormat => write!(f, "Bad format"), + } + } } diff --git a/primitives/trie/src/lib.rs b/primitives/trie/src/lib.rs index 80570a9792..ee16306a8a 100644 --- a/primitives/trie/src/lib.rs +++ b/primitives/trie/src/lib.rs @@ -19,64 +19,67 @@ #![cfg_attr(not(feature = "std"), no_std)] mod error; -mod node_header; mod node_codec; +mod node_header; mod storage_proof; mod trie_stream; -use sp_std::boxed::Box; -use sp_std::marker::PhantomData; -use sp_std::vec::Vec; -use hash_db::{Hasher, Prefix}; -use trie_db::proof::{generate_proof, verify_proof}; -pub use trie_db::proof::VerifyError; /// Our `NodeCodec`-specific error. pub use error::Error; -/// The Substrate format implementation of `TrieStream`. -pub use trie_stream::TrieStream; +/// Various re-exports from the `hash-db` crate. +pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +use hash_db::{Hasher, Prefix}; +pub use memory_db::prefixed_key; +/// Various re-exports from the `memory-db` crate. +pub use memory_db::KeyFunction; /// The Substrate format implementation of `NodeCodec`. pub use node_codec::NodeCodec; +use sp_std::boxed::Box; +use sp_std::marker::PhantomData; +use sp_std::vec::Vec; pub use storage_proof::StorageProof; +pub use trie_db::proof::VerifyError; +use trie_db::proof::{generate_proof, verify_proof}; /// Various re-exports from the `trie-db` crate. pub use trie_db::{ - Trie, TrieMut, DBValue, Recorder, CError, Query, TrieLayout, TrieConfiguration, nibble_ops, TrieDBIterator, + nibble_ops, CError, DBValue, Query, Recorder, Trie, TrieConfiguration, TrieDBIterator, + TrieLayout, TrieMut, }; -/// Various re-exports from the `memory-db` crate. -pub use memory_db::KeyFunction; -pub use memory_db::prefixed_key; -/// Various re-exports from the `hash-db` crate. -pub use hash_db::{HashDB as HashDBT, EMPTY_PREFIX}; +/// The Substrate format implementation of `TrieStream`. +pub use trie_stream::TrieStream; #[derive(Default)] /// substrate trie layout pub struct Layout(sp_std::marker::PhantomData); impl TrieLayout for Layout { - const USE_EXTENSION: bool = false; - type Hash = H; - type Codec = NodeCodec; + const USE_EXTENSION: bool = false; + type Hash = H; + type Codec = NodeCodec; } impl TrieConfiguration for Layout { - fn trie_root(input: I) -> ::Out where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - { - trie_root::trie_root_no_extension::(input) - } - - fn trie_root_unhashed(input: I) -> Vec where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - { - trie_root::unhashed_trie_no_extension::(input) - } - - fn encode_index(input: u32) -> Vec { - codec::Encode::encode(&codec::Compact(input)) - } + fn trie_root(input: I) -> ::Out + where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + { + trie_root::trie_root_no_extension::(input) + } + + fn trie_root_unhashed(input: I) -> Vec + where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + { + trie_root::unhashed_trie_no_extension::(input) + } + + fn encode_index(input: u32) -> Vec { + codec::Encode::encode(&codec::Compact(input)) + } } /// TrieDB error over `TrieConfiguration` trait. @@ -111,15 +114,15 @@ pub type TrieHash = <::Hash as Hasher>::Out; /// This module is for non generic definition of trie type. /// Only the `Hasher` trait is generic in this case. pub mod trie_types { - pub type Layout = super::Layout; - /// Persistent trie database read-access interface for the a given hasher. - pub type TrieDB<'a, H> = super::TrieDB<'a, Layout>; - /// Persistent trie database write-access interface for the a given hasher. - pub type TrieDBMut<'a, H> = super::TrieDBMut<'a, Layout>; - /// Querying interface, as in `trie_db` but less generic. - pub type Lookup<'a, H, Q> = trie_db::Lookup<'a, Layout, Q>; - /// As in `trie_db`, but less generic, error type for the crate. - pub type TrieError = trie_db::TrieError; + pub type Layout = super::Layout; + /// Persistent trie database read-access interface for the a given hasher. + pub type TrieDB<'a, H> = super::TrieDB<'a, Layout>; + /// Persistent trie database write-access interface for the a given hasher. + pub type TrieDBMut<'a, H> = super::TrieDBMut<'a, Layout>; + /// Querying interface, as in `trie_db` but less generic. + pub type Lookup<'a, H, Q> = trie_db::Lookup<'a, Layout, Q>; + /// As in `trie_db`, but less generic, error type for the crate. + pub type TrieError = trie_db::TrieError; } /// Create a proof for a subset of keys in a trie. @@ -131,16 +134,17 @@ pub mod trie_types { /// For a key `K` that is not included in the `db` a proof of non-inclusion is generated. /// These can be later checked in `verify_trie_proof`. pub fn generate_trie_proof<'a, L: TrieConfiguration, I, K, DB>( - db: &DB, - root: TrieHash, - keys: I, -) -> Result>, Box>> where - I: IntoIterator, - K: 'a + AsRef<[u8]>, - DB: hash_db::HashDBRef, + db: &DB, + root: TrieHash, + keys: I, +) -> Result>, Box>> +where + I: IntoIterator, + K: 'a + AsRef<[u8]>, + DB: hash_db::HashDBRef, { - let trie = TrieDB::::new(db, &root)?; - generate_proof(&trie, keys) + let trie = TrieDB::::new(db, &root)?; + generate_proof(&trie, keys) } /// Verify a set of key-value pairs against a trie root and a proof. @@ -152,212 +156,223 @@ pub fn generate_trie_proof<'a, L: TrieConfiguration, I, K, DB>( /// If the value is omitted (`(key, None)`), this key will be checked for non-inclusion in the /// proof. pub fn verify_trie_proof<'a, L: TrieConfiguration, I, K, V>( - root: &TrieHash, - proof: &[Vec], - items: I, -) -> Result<(), VerifyError, error::Error>> where - I: IntoIterator)>, - K: 'a + AsRef<[u8]>, - V: 'a + AsRef<[u8]>, + root: &TrieHash, + proof: &[Vec], + items: I, +) -> Result<(), VerifyError, error::Error>> +where + I: IntoIterator)>, + K: 'a + AsRef<[u8]>, + V: 'a + AsRef<[u8]>, { - verify_proof::, _, _, _>(root, proof, items) + verify_proof::, _, _, _>(root, proof, items) } /// Determine a trie root given a hash DB and delta values. pub fn delta_trie_root( - db: &mut DB, - mut root: TrieHash, - delta: I -) -> Result, Box>> where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - DB: hash_db::HashDB, + db: &mut DB, + mut root: TrieHash, + delta: I, +) -> Result, Box>> +where + I: IntoIterator)>, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + DB: hash_db::HashDB, { - { - let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; - - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } - } - - Ok(root) + { + let mut trie = TrieDBMut::::from_existing(&mut *db, &mut root)?; + + for (key, change) in delta { + match change { + Some(val) => trie.insert(key.as_ref(), val.as_ref())?, + None => trie.remove(key.as_ref())?, + }; + } + } + + Ok(root) } /// Read a value from the trie. pub fn read_trie_value>( - db: &DB, - root: &TrieHash, - key: &[u8] + db: &DB, + root: &TrieHash, + key: &[u8], ) -> Result>, Box>> { - Ok(TrieDB::::new(&*db, root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + Ok(TrieDB::::new(&*db, root)? + .get(key) + .map(|x| x.map(|val| val.to_vec()))?) } /// Read a value from the trie with given Query. pub fn read_trie_value_with< - L: TrieConfiguration, - Q: Query, - DB: hash_db::HashDBRef + L: TrieConfiguration, + Q: Query, + DB: hash_db::HashDBRef, >( - db: &DB, - root: &TrieHash, - key: &[u8], - query: Q + db: &DB, + root: &TrieHash, + key: &[u8], + query: Q, ) -> Result>, Box>> { - Ok(TrieDB::::new(&*db, root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + Ok(TrieDB::::new(&*db, root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec()))?) } /// Determine the default child trie root. pub fn default_child_trie_root( - _storage_key: &[u8], + _storage_key: &[u8], ) -> ::Out { - L::trie_root::<_, Vec, Vec>(core::iter::empty()) + L::trie_root::<_, Vec, Vec>(core::iter::empty()) } /// Determine a child trie root given its ordered contents, closed form. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_trie_root( - _storage_key: &[u8], - input: I, + _storage_key: &[u8], + input: I, ) -> ::Out - where - I: IntoIterator, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, +where + I: IntoIterator, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, { - L::trie_root(input) + L::trie_root(input) } /// Determine a child trie root given a hash DB and delta values. H is the default hasher, /// but a generic implementation may ignore this type parameter and use other hashers. pub fn child_delta_trie_root( - _storage_key: &[u8], - keyspace: &[u8], - db: &mut DB, - root_data: RD, - delta: I, + _storage_key: &[u8], + keyspace: &[u8], + db: &mut DB, + root_data: RD, + delta: I, ) -> Result<::Out, Box>> - where - I: IntoIterator)>, - A: AsRef<[u8]> + Ord, - B: AsRef<[u8]>, - RD: AsRef<[u8]>, - DB: hash_db::HashDB - + hash_db::PlainDB, trie_db::DBValue>, +where + I: IntoIterator)>, + A: AsRef<[u8]> + Ord, + B: AsRef<[u8]>, + RD: AsRef<[u8]>, + DB: hash_db::HashDB + + hash_db::PlainDB, trie_db::DBValue>, { - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_data.as_ref()); - - { - let mut db = KeySpacedDBMut::new(&mut *db, keyspace); - let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; - - for (key, change) in delta { - match change { - Some(val) => trie.insert(key.as_ref(), val.as_ref())?, - None => trie.remove(key.as_ref())?, - }; - } - } - - Ok(root) + let mut root = TrieHash::::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + root.as_mut().copy_from_slice(root_data.as_ref()); + + { + let mut db = KeySpacedDBMut::new(&mut *db, keyspace); + let mut trie = TrieDBMut::::from_existing(&mut db, &mut root)?; + + for (key, change) in delta { + match change { + Some(val) => trie.insert(key.as_ref(), val.as_ref())?, + None => trie.remove(key.as_ref())?, + }; + } + } + + Ok(root) } /// Call `f` for all keys in a child trie. pub fn for_keys_in_child_trie( - _storage_key: &[u8], - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - mut f: F + _storage_key: &[u8], + keyspace: &[u8], + db: &DB, + root_slice: &[u8], + mut f: F, ) -> Result<(), Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, +where + DB: hash_db::HashDBRef + + hash_db::PlainDBRef, trie_db::DBValue>, { - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); + let mut root = TrieHash::::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + root.as_mut().copy_from_slice(root_slice); - let db = KeySpacedDB::new(&*db, keyspace); - let trie = TrieDB::::new(&db, &root)?; - let iter = trie.iter()?; + let db = KeySpacedDB::new(&*db, keyspace); + let trie = TrieDB::::new(&db, &root)?; + let iter = trie.iter()?; - for x in iter { - let (key, _) = x?; - f(&key); - } + for x in iter { + let (key, _) = x?; + f(&key); + } - Ok(()) + Ok(()) } /// Record all keys for a given root. pub fn record_all_keys( - db: &DB, - root: &TrieHash, - recorder: &mut Recorder> -) -> Result<(), Box>> where - DB: hash_db::HashDBRef + db: &DB, + root: &TrieHash, + recorder: &mut Recorder>, +) -> Result<(), Box>> +where + DB: hash_db::HashDBRef, { - let trie = TrieDB::::new(&*db, root)?; - let iter = trie.iter()?; + let trie = TrieDB::::new(&*db, root)?; + let iter = trie.iter()?; - for x in iter { - let (key, _) = x?; + for x in iter { + let (key, _) = x?; - // there's currently no API like iter_with() - // => use iter to enumerate all keys AND lookup each - // key using get_with - trie.get_with(&key, &mut *recorder)?; - } + // there's currently no API like iter_with() + // => use iter to enumerate all keys AND lookup each + // key using get_with + trie.get_with(&key, &mut *recorder)?; + } - Ok(()) + Ok(()) } /// Read a value from the child trie. pub fn read_child_trie_value( - _storage_key: &[u8], - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8] + _storage_key: &[u8], + keyspace: &[u8], + db: &DB, + root_slice: &[u8], + key: &[u8], ) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, +where + DB: hash_db::HashDBRef + + hash_db::PlainDBRef, trie_db::DBValue>, { - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get(key).map(|x| x.map(|val| val.to_vec()))?) + let mut root = TrieHash::::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + root.as_mut().copy_from_slice(root_slice); + + let db = KeySpacedDB::new(&*db, keyspace); + Ok(TrieDB::::new(&db, &root)? + .get(key) + .map(|x| x.map(|val| val.to_vec()))?) } /// Read a value from the child trie with given query. -pub fn read_child_trie_value_with, DB>( - _storage_key: &[u8], - keyspace: &[u8], - db: &DB, - root_slice: &[u8], - key: &[u8], - query: Q +pub fn read_child_trie_value_with, DB>( + _storage_key: &[u8], + keyspace: &[u8], + db: &DB, + root_slice: &[u8], + key: &[u8], + query: Q, ) -> Result>, Box>> - where - DB: hash_db::HashDBRef - + hash_db::PlainDBRef, trie_db::DBValue>, +where + DB: hash_db::HashDBRef + + hash_db::PlainDBRef, trie_db::DBValue>, { - let mut root = TrieHash::::default(); - // root is fetched from DB, not writable by runtime, so it's always valid. - root.as_mut().copy_from_slice(root_slice); - - let db = KeySpacedDB::new(&*db, keyspace); - Ok(TrieDB::::new(&db, &root)?.get_with(key, query).map(|x| x.map(|val| val.to_vec()))?) + let mut root = TrieHash::::default(); + // root is fetched from DB, not writable by runtime, so it's always valid. + root.as_mut().copy_from_slice(root_slice); + + let db = KeySpacedDB::new(&*db, keyspace); + Ok(TrieDB::::new(&db, &root)? + .get_with(key, query) + .map(|x| x.map(|val| val.to_vec()))?) } /// `HashDB` implementation that append a encoded prefix (unique id bytes) in addition to the @@ -373,487 +388,509 @@ pub struct KeySpacedDBMut<'a, DB, H>(&'a mut DB, &'a [u8], PhantomData); /// Utility function used to merge some byte data (keyspace) and `prefix` data /// before calling key value database primitives. fn keyspace_as_prefix_alloc(ks: &[u8], prefix: Prefix) -> (Vec, Option) { - let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; - result[..ks.len()].copy_from_slice(ks); - result[ks.len()..].copy_from_slice(prefix.0); - (result, prefix.1) + let mut result = sp_std::vec![0; ks.len() + prefix.0.len()]; + result[..ks.len()].copy_from_slice(ks); + result[ks.len()..].copy_from_slice(prefix.0); + (result, prefix.1) } -impl<'a, DB, H> KeySpacedDB<'a, DB, H> where - H: Hasher, +impl<'a, DB, H> KeySpacedDB<'a, DB, H> +where + H: Hasher, { - /// instantiate new keyspaced db - pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { - KeySpacedDB(db, ks, PhantomData) - } + /// instantiate new keyspaced db + pub fn new(db: &'a DB, ks: &'a [u8]) -> Self { + KeySpacedDB(db, ks, PhantomData) + } } -impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> where - H: Hasher, +impl<'a, DB, H> KeySpacedDBMut<'a, DB, H> +where + H: Hasher, { - /// instantiate new keyspaced db - pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { - KeySpacedDBMut(db, ks, PhantomData) - } + /// instantiate new keyspaced db + pub fn new(db: &'a mut DB, ks: &'a [u8]) -> Self { + KeySpacedDBMut(db, ks, PhantomData) + } } -impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> where - DB: hash_db::HashDBRef, - H: Hasher, - T: From<&'static [u8]>, +impl<'a, DB, H, T> hash_db::HashDBRef for KeySpacedDB<'a, DB, H> +where + DB: hash_db::HashDBRef, + H: Hasher, + T: From<&'static [u8]>, { - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) + } } -impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, +impl<'a, DB, H, T> hash_db::HashDB for KeySpacedDBMut<'a, DB, H> +where + DB: hash_db::HashDB, + H: Hasher, + T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { - fn get(&self, key: &H::Out, prefix: Prefix) -> Option { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.get(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) - } - - fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.insert((&derived_prefix.0, derived_prefix.1), value) - } - - fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.emplace(key, (&derived_prefix.0, derived_prefix.1), value) - } - - fn remove(&mut self, key: &H::Out, prefix: Prefix) { - let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); - self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) - } + fn get(&self, key: &H::Out, prefix: Prefix) -> Option { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.get(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn contains(&self, key: &H::Out, prefix: Prefix) -> bool { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.contains(key, (&derived_prefix.0, derived_prefix.1)) + } + + fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H::Out { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.insert((&derived_prefix.0, derived_prefix.1), value) + } + + fn emplace(&mut self, key: H::Out, prefix: Prefix, value: T) { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0 + .emplace(key, (&derived_prefix.0, derived_prefix.1), value) + } + + fn remove(&mut self, key: &H::Out, prefix: Prefix) { + let derived_prefix = keyspace_as_prefix_alloc(self.1, prefix); + self.0.remove(key, (&derived_prefix.0, derived_prefix.1)) + } } -impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> where - DB: hash_db::HashDB, - H: Hasher, - T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, +impl<'a, DB, H, T> hash_db::AsHashDB for KeySpacedDBMut<'a, DB, H> +where + DB: hash_db::HashDB, + H: Hasher, + T: Default + PartialEq + for<'b> From<&'b [u8]> + Clone + Send + Sync, { - fn as_hash_db(&self) -> &dyn hash_db::HashDB { &*self } + fn as_hash_db(&self) -> &dyn hash_db::HashDB { + &*self + } - fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { - &mut *self - } + fn as_hash_db_mut<'b>(&'b mut self) -> &'b mut (dyn hash_db::HashDB + 'b) { + &mut *self + } } /// Constants used into trie simplification codec. mod trie_constants { - pub const EMPTY_TRIE: u8 = 0; - pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; - pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; - pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; - pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; + pub const EMPTY_TRIE: u8 = 0; + pub const NIBBLE_SIZE_BOUND: usize = u16::max_value() as usize; + pub const LEAF_PREFIX_MASK: u8 = 0b_01 << 6; + pub const BRANCH_WITHOUT_MASK: u8 = 0b_10 << 6; + pub const BRANCH_WITH_MASK: u8 = 0b_11 << 6; } #[cfg(test)] mod tests { - use super::*; - use codec::{Encode, Compact}; - use sp_core::Blake2Hasher; - use hash_db::{HashDB, Hasher}; - use trie_db::{DBValue, TrieMut, Trie, NodeCodec as NodeCodecT}; - use trie_standardmap::{Alphabet, ValueMode, StandardMap}; - use hex_literal::hex; - - type Layout = super::Layout; - - fn hashed_null_node() -> TrieHash { - ::hashed_null_node() - } - - fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { - { - let closed_form = T::trie_root(input.clone()); - let d = T::trie_root_unhashed(input.clone()); - println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); - let persistent = { - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - let mut t = TrieDBMut::::new(&mut memdb, &mut root); - for (x, y) in input.iter().rev() { - t.insert(x, y).unwrap(); - } - t.root().clone() - }; - assert_eq!(closed_form, persistent); - } - } - - fn check_iteration(input: &Vec<(&[u8], &[u8])>) { - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - { - let mut t = TrieDBMut::::new(&mut memdb, &mut root); - for (x, y) in input.clone() { - t.insert(x, y).unwrap(); - } - } - { - let t = TrieDB::::new(&mut memdb, &root).unwrap(); - assert_eq!( - input.iter().map(|(i, j)| (i.to_vec(), j.to_vec())).collect::>(), - t.iter().unwrap() - .map(|x| x.map(|y| (y.0, y.1.to_vec())).unwrap()) - .collect::>() - ); - } - } - - #[test] - fn default_trie_root() { - let mut db = MemoryDB::default(); - let mut root = TrieHash::::default(); - let mut empty = TrieDBMut::::new(&mut db, &mut root); - empty.commit(); - let root1 = empty.root().as_ref().to_vec(); - let root2: Vec = Layout::trie_root::<_, Vec, Vec>( - std::iter::empty(), - ).as_ref().iter().cloned().collect(); - - assert_eq!(root1, root2); - } - - #[test] - fn empty_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![]; - check_equivalent::(&input); - check_iteration::(&input); - } - - #[test] - fn leaf_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0xbb][..])]; - check_equivalent::(&input); - check_iteration::(&input); - } - - #[test] - fn branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0x10][..]), - (&[0xba][..], &[0x11][..]), - ]; - check_equivalent::(&input); - check_iteration::(&input); - } - - #[test] - fn extension_and_branch_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0x10][..]), - (&[0xab][..], &[0x11][..]), - ]; - check_equivalent::(&input); - check_iteration::(&input); - } - - #[test] - fn standard_is_equivalent() { - let st = StandardMap { - alphabet: Alphabet::All, - min_key: 32, - journal_key: 0, - value_mode: ValueMode::Random, - count: 1000, - }; - let mut d = st.make(); - d.sort_unstable_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); - let dr = d.iter().map(|v| (&v.0[..], &v.1[..])).collect(); - check_equivalent::(&dr); - check_iteration::(&dr); - } - - #[test] - fn extension_and_branch_with_value_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0xa0][..]), - (&[0xaa, 0xaa][..], &[0xaa][..]), - (&[0xaa, 0xbb][..], &[0xab][..]) - ]; - check_equivalent::(&input); - check_iteration::(&input); - } - - #[test] - fn bigger_extension_and_branch_with_value_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &[0xa0][..]), - (&[0xaa, 0xaa][..], &[0xaa][..]), - (&[0xaa, 0xbb][..], &[0xab][..]), - (&[0xbb][..], &[0xb0][..]), - (&[0xbb, 0xbb][..], &[0xbb][..]), - (&[0xbb, 0xcc][..], &[0xbc][..]), - ]; - check_equivalent::(&input); - check_iteration::(&input); - } - - #[test] - fn single_long_leaf_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), - (&[0xba][..], &[0x11][..]), - ]; - check_equivalent::(&input); - check_iteration::(&input); - } - - #[test] - fn two_long_leaves_is_equivalent() { - let input: Vec<(&[u8], &[u8])> = vec![ - (&[0xaa][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]), - (&[0xba][..], &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..]) - ]; - check_equivalent::(&input); - check_iteration::(&input); - } - - fn populate_trie<'db, T: TrieConfiguration>( - db: &'db mut dyn HashDB, - root: &'db mut TrieHash, - v: &[(Vec, Vec)] - ) -> TrieDBMut<'db, T> { - let mut t = TrieDBMut::::new(db, root); - for i in 0..v.len() { - let key: &[u8]= &v[i].0; - let val: &[u8] = &v[i].1; - t.insert(key, val).unwrap(); - } - t - } - - fn unpopulate_trie<'db, T: TrieConfiguration>( - t: &mut TrieDBMut<'db, T>, - v: &[(Vec, Vec)], - ) { - for i in v { - let key: &[u8]= &i.0; - t.remove(key).unwrap(); - } - } - - #[test] - fn random_should_work() { - let mut seed = ::Out::zero(); - for test_i in 0..10000 { - if test_i % 50 == 0 { - println!("{:?} of 10000 stress tests done", test_i); - } - let x = StandardMap { - alphabet: Alphabet::Custom(b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_".to_vec()), - min_key: 5, - journal_key: 0, - value_mode: ValueMode::Index, - count: 100, - }.make_with(seed.as_fixed_bytes_mut()); - - let real = Layout::trie_root(x.clone()); - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - let mut memtrie = populate_trie::(&mut memdb, &mut root, &x); - - memtrie.commit(); - if *memtrie.root() != real { - println!("TRIE MISMATCH"); - println!(""); - println!("{:?} vs {:?}", memtrie.root(), real); - for i in &x { - println!("{:#x?} -> {:#x?}", i.0, i.1); - } - } - assert_eq!(*memtrie.root(), real); - unpopulate_trie::(&mut memtrie, &x); - memtrie.commit(); - let hashed_null_node = hashed_null_node::(); - if *memtrie.root() != hashed_null_node { - println!("- TRIE MISMATCH"); - println!(""); - println!("{:?} vs {:?}", memtrie.root(), hashed_null_node); - for i in &x { - println!("{:#x?} -> {:#x?}", i.0, i.1); - } - } - assert_eq!(*memtrie.root(), hashed_null_node); - } - } - - fn to_compact(n: u8) -> u8 { - Compact(n).encode()[0] - } - - #[test] - fn codec_trie_empty() { - let input: Vec<(&[u8], &[u8])> = vec![]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); - println!("trie: {:#x?}", trie); - assert_eq!(trie, vec![0x0]); - } - - #[test] - fn codec_trie_single_tuple() { - let input = vec![ - (vec![0xaa], vec![0xbb]) - ]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); - println!("trie: {:#x?}", trie); - assert_eq!(trie, vec![ - 0x42, // leaf 0x40 (2^6) with (+) key of 2 nibbles (0x02) - 0xaa, // key data - to_compact(1), // length of value in bytes as Compact - 0xbb // value data - ]); - } - - #[test] - fn codec_trie_two_tuples_disjoint_keys() { - let input = vec![(&[0x48, 0x19], &[0xfe]), (&[0x13, 0x14], &[0xff])]; - let trie = Layout::trie_root_unhashed::<_, _, _>(input); - println!("trie: {:#x?}", trie); - let mut ex = Vec::::new(); - ex.push(0x80); // branch, no value (0b_10..) no nibble - ex.push(0x12); // slots 1 & 4 are taken from 0-7 - ex.push(0x00); // no slots from 8-15 - ex.push(to_compact(0x05)); // first slot: LEAF, 5 bytes long. - ex.push(0x43); // leaf 0x40 with 3 nibbles - ex.push(0x03); // first nibble - ex.push(0x14); // second & third nibble - ex.push(to_compact(0x01)); // 1 byte data - ex.push(0xff); // value data - ex.push(to_compact(0x05)); // second slot: LEAF, 5 bytes long. - ex.push(0x43); // leaf with 3 nibbles - ex.push(0x08); // first nibble - ex.push(0x19); // second & third nibble - ex.push(to_compact(0x01)); // 1 byte data - ex.push(0xfe); // value data - - assert_eq!(trie, ex); - } - - #[test] - fn iterator_works() { - let pairs = vec![ - (hex!("0103000000000000000464").to_vec(), hex!("0400000000").to_vec()), - (hex!("0103000000000000000469").to_vec(), hex!("0401000000").to_vec()), - ]; - - let mut mdb = MemoryDB::default(); - let mut root = Default::default(); - let _ = populate_trie::(&mut mdb, &mut root, &pairs); - - let trie = TrieDB::::new(&mdb, &root).unwrap(); - - let iter = trie.iter().unwrap(); - let mut iter_pairs = Vec::new(); - for pair in iter { - let (key, value) = pair.unwrap(); - iter_pairs.push((key, value.to_vec())); - } - - assert_eq!(pairs, iter_pairs); - } - - #[test] - fn proof_non_inclusion_works() { - let pairs = vec![ - (hex!("0102").to_vec(), hex!("01").to_vec()), - (hex!("0203").to_vec(), hex!("0405").to_vec()), - ]; - - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - populate_trie::(&mut memdb, &mut root, &pairs); - - let non_included_key: Vec = hex!("0909").to_vec(); - let proof = generate_trie_proof::( - &memdb, - root, - &[non_included_key.clone()] - ).unwrap(); - - // Verifying that the K was not included into the trie should work. - assert!(verify_trie_proof::>( - &root, - &proof, - &[(non_included_key.clone(), None)], - ).is_ok() - ); - - // Verifying that the K was included into the trie should fail. - assert!(verify_trie_proof::>( - &root, - &proof, - &[(non_included_key, Some(hex!("1010").to_vec()))], - ).is_err() - ); - } - - #[test] - fn proof_inclusion_works() { - let pairs = vec![ - (hex!("0102").to_vec(), hex!("01").to_vec()), - (hex!("0203").to_vec(), hex!("0405").to_vec()), - ]; - - let mut memdb = MemoryDB::default(); - let mut root = Default::default(); - populate_trie::(&mut memdb, &mut root, &pairs); - - let proof = generate_trie_proof::( - &memdb, - root, - &[pairs[0].0.clone()] - ).unwrap(); - - // Check that a K, V included into the proof are verified. - assert!(verify_trie_proof::( - &root, - &proof, - &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] - ).is_ok() - ); - - // Absence of the V is not verified with the proof that has K, V included. - assert!(verify_trie_proof::>( - &root, - &proof, - &[(pairs[0].0.clone(), None)] - ).is_err() - ); - - // K not included into the trie is not verified. - assert!(verify_trie_proof::( - &root, - &proof, - &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] - ).is_err() - ); - - // K included into the trie but not included into the proof is not verified. - assert!(verify_trie_proof::( - &root, - &proof, - &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] - ).is_err() - ); - } + use super::*; + use codec::{Compact, Encode}; + use hash_db::{HashDB, Hasher}; + use hex_literal::hex; + use sp_core::Blake2Hasher; + use trie_db::{DBValue, NodeCodec as NodeCodecT, Trie, TrieMut}; + use trie_standardmap::{Alphabet, StandardMap, ValueMode}; + + type Layout = super::Layout; + + fn hashed_null_node() -> TrieHash { + ::hashed_null_node() + } + + fn check_equivalent(input: &Vec<(&[u8], &[u8])>) { + { + let closed_form = T::trie_root(input.clone()); + let d = T::trie_root_unhashed(input.clone()); + println!("Data: {:#x?}, {:#x?}", d, Blake2Hasher::hash(&d[..])); + let persistent = { + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + let mut t = TrieDBMut::::new(&mut memdb, &mut root); + for (x, y) in input.iter().rev() { + t.insert(x, y).unwrap(); + } + t.root().clone() + }; + assert_eq!(closed_form, persistent); + } + } + + fn check_iteration(input: &Vec<(&[u8], &[u8])>) { + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + { + let mut t = TrieDBMut::::new(&mut memdb, &mut root); + for (x, y) in input.clone() { + t.insert(x, y).unwrap(); + } + } + { + let t = TrieDB::::new(&mut memdb, &root).unwrap(); + assert_eq!( + input + .iter() + .map(|(i, j)| (i.to_vec(), j.to_vec())) + .collect::>(), + t.iter() + .unwrap() + .map(|x| x.map(|y| (y.0, y.1.to_vec())).unwrap()) + .collect::>() + ); + } + } + + #[test] + fn default_trie_root() { + let mut db = MemoryDB::default(); + let mut root = TrieHash::::default(); + let mut empty = TrieDBMut::::new(&mut db, &mut root); + empty.commit(); + let root1 = empty.root().as_ref().to_vec(); + let root2: Vec = Layout::trie_root::<_, Vec, Vec>(std::iter::empty()) + .as_ref() + .iter() + .cloned() + .collect(); + + assert_eq!(root1, root2); + } + + #[test] + fn empty_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![]; + check_equivalent::(&input); + check_iteration::(&input); + } + + #[test] + fn leaf_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![(&[0xaa][..], &[0xbb][..])]; + check_equivalent::(&input); + check_iteration::(&input); + } + + #[test] + fn branch_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xba][..], &[0x11][..])]; + check_equivalent::(&input); + check_iteration::(&input); + } + + #[test] + fn extension_and_branch_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = + vec![(&[0xaa][..], &[0x10][..]), (&[0xab][..], &[0x11][..])]; + check_equivalent::(&input); + check_iteration::(&input); + } + + #[test] + fn standard_is_equivalent() { + let st = StandardMap { + alphabet: Alphabet::All, + min_key: 32, + journal_key: 0, + value_mode: ValueMode::Random, + count: 1000, + }; + let mut d = st.make(); + d.sort_unstable_by(|&(ref a, _), &(ref b, _)| a.cmp(b)); + let dr = d.iter().map(|v| (&v.0[..], &v.1[..])).collect(); + check_equivalent::(&dr); + check_iteration::(&dr); + } + + #[test] + fn extension_and_branch_with_value_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![ + (&[0xaa][..], &[0xa0][..]), + (&[0xaa, 0xaa][..], &[0xaa][..]), + (&[0xaa, 0xbb][..], &[0xab][..]), + ]; + check_equivalent::(&input); + check_iteration::(&input); + } + + #[test] + fn bigger_extension_and_branch_with_value_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![ + (&[0xaa][..], &[0xa0][..]), + (&[0xaa, 0xaa][..], &[0xaa][..]), + (&[0xaa, 0xbb][..], &[0xab][..]), + (&[0xbb][..], &[0xb0][..]), + (&[0xbb, 0xbb][..], &[0xbb][..]), + (&[0xbb, 0xcc][..], &[0xbc][..]), + ]; + check_equivalent::(&input); + check_iteration::(&input); + } + + #[test] + fn single_long_leaf_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![ + ( + &[0xaa][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), + (&[0xba][..], &[0x11][..]), + ]; + check_equivalent::(&input); + check_iteration::(&input); + } + + #[test] + fn two_long_leaves_is_equivalent() { + let input: Vec<(&[u8], &[u8])> = vec![ + ( + &[0xaa][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), + ( + &[0xba][..], + &b"ABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABCABC"[..], + ), + ]; + check_equivalent::(&input); + check_iteration::(&input); + } + + fn populate_trie<'db, T: TrieConfiguration>( + db: &'db mut dyn HashDB, + root: &'db mut TrieHash, + v: &[(Vec, Vec)], + ) -> TrieDBMut<'db, T> { + let mut t = TrieDBMut::::new(db, root); + for i in 0..v.len() { + let key: &[u8] = &v[i].0; + let val: &[u8] = &v[i].1; + t.insert(key, val).unwrap(); + } + t + } + + fn unpopulate_trie<'db, T: TrieConfiguration>( + t: &mut TrieDBMut<'db, T>, + v: &[(Vec, Vec)], + ) { + for i in v { + let key: &[u8] = &i.0; + t.remove(key).unwrap(); + } + } + + #[test] + fn random_should_work() { + let mut seed = ::Out::zero(); + for test_i in 0..10000 { + if test_i % 50 == 0 { + println!("{:?} of 10000 stress tests done", test_i); + } + let x = StandardMap { + alphabet: Alphabet::Custom(b"@QWERTYUIOPASDFGHJKLZXCVBNM[/]^_".to_vec()), + min_key: 5, + journal_key: 0, + value_mode: ValueMode::Index, + count: 100, + } + .make_with(seed.as_fixed_bytes_mut()); + + let real = Layout::trie_root(x.clone()); + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + let mut memtrie = populate_trie::(&mut memdb, &mut root, &x); + + memtrie.commit(); + if *memtrie.root() != real { + println!("TRIE MISMATCH"); + println!(""); + println!("{:?} vs {:?}", memtrie.root(), real); + for i in &x { + println!("{:#x?} -> {:#x?}", i.0, i.1); + } + } + assert_eq!(*memtrie.root(), real); + unpopulate_trie::(&mut memtrie, &x); + memtrie.commit(); + let hashed_null_node = hashed_null_node::(); + if *memtrie.root() != hashed_null_node { + println!("- TRIE MISMATCH"); + println!(""); + println!("{:?} vs {:?}", memtrie.root(), hashed_null_node); + for i in &x { + println!("{:#x?} -> {:#x?}", i.0, i.1); + } + } + assert_eq!(*memtrie.root(), hashed_null_node); + } + } + + fn to_compact(n: u8) -> u8 { + Compact(n).encode()[0] + } + + #[test] + fn codec_trie_empty() { + let input: Vec<(&[u8], &[u8])> = vec![]; + let trie = Layout::trie_root_unhashed::<_, _, _>(input); + println!("trie: {:#x?}", trie); + assert_eq!(trie, vec![0x0]); + } + + #[test] + fn codec_trie_single_tuple() { + let input = vec![(vec![0xaa], vec![0xbb])]; + let trie = Layout::trie_root_unhashed::<_, _, _>(input); + println!("trie: {:#x?}", trie); + assert_eq!( + trie, + vec![ + 0x42, // leaf 0x40 (2^6) with (+) key of 2 nibbles (0x02) + 0xaa, // key data + to_compact(1), // length of value in bytes as Compact + 0xbb // value data + ] + ); + } + + #[test] + fn codec_trie_two_tuples_disjoint_keys() { + let input = vec![(&[0x48, 0x19], &[0xfe]), (&[0x13, 0x14], &[0xff])]; + let trie = Layout::trie_root_unhashed::<_, _, _>(input); + println!("trie: {:#x?}", trie); + let mut ex = Vec::::new(); + ex.push(0x80); // branch, no value (0b_10..) no nibble + ex.push(0x12); // slots 1 & 4 are taken from 0-7 + ex.push(0x00); // no slots from 8-15 + ex.push(to_compact(0x05)); // first slot: LEAF, 5 bytes long. + ex.push(0x43); // leaf 0x40 with 3 nibbles + ex.push(0x03); // first nibble + ex.push(0x14); // second & third nibble + ex.push(to_compact(0x01)); // 1 byte data + ex.push(0xff); // value data + ex.push(to_compact(0x05)); // second slot: LEAF, 5 bytes long. + ex.push(0x43); // leaf with 3 nibbles + ex.push(0x08); // first nibble + ex.push(0x19); // second & third nibble + ex.push(to_compact(0x01)); // 1 byte data + ex.push(0xfe); // value data + + assert_eq!(trie, ex); + } + + #[test] + fn iterator_works() { + let pairs = vec![ + ( + hex!("0103000000000000000464").to_vec(), + hex!("0400000000").to_vec(), + ), + ( + hex!("0103000000000000000469").to_vec(), + hex!("0401000000").to_vec(), + ), + ]; + + let mut mdb = MemoryDB::default(); + let mut root = Default::default(); + let _ = populate_trie::(&mut mdb, &mut root, &pairs); + + let trie = TrieDB::::new(&mdb, &root).unwrap(); + + let iter = trie.iter().unwrap(); + let mut iter_pairs = Vec::new(); + for pair in iter { + let (key, value) = pair.unwrap(); + iter_pairs.push((key, value.to_vec())); + } + + assert_eq!(pairs, iter_pairs); + } + + #[test] + fn proof_non_inclusion_works() { + let pairs = vec![ + (hex!("0102").to_vec(), hex!("01").to_vec()), + (hex!("0203").to_vec(), hex!("0405").to_vec()), + ]; + + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + populate_trie::(&mut memdb, &mut root, &pairs); + + let non_included_key: Vec = hex!("0909").to_vec(); + let proof = + generate_trie_proof::(&memdb, root, &[non_included_key.clone()]) + .unwrap(); + + // Verifying that the K was not included into the trie should work. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(non_included_key.clone(), None)], + ) + .is_ok()); + + // Verifying that the K was included into the trie should fail. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(non_included_key, Some(hex!("1010").to_vec()))], + ) + .is_err()); + } + + #[test] + fn proof_inclusion_works() { + let pairs = vec![ + (hex!("0102").to_vec(), hex!("01").to_vec()), + (hex!("0203").to_vec(), hex!("0405").to_vec()), + ]; + + let mut memdb = MemoryDB::default(); + let mut root = Default::default(); + populate_trie::(&mut memdb, &mut root, &pairs); + + let proof = + generate_trie_proof::(&memdb, root, &[pairs[0].0.clone()]).unwrap(); + + // Check that a K, V included into the proof are verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(pairs[0].0.clone(), Some(pairs[0].1.clone()))] + ) + .is_ok()); + + // Absence of the V is not verified with the proof that has K, V included. + assert!(verify_trie_proof::>( + &root, + &proof, + &[(pairs[0].0.clone(), None)] + ) + .is_err()); + + // K not included into the trie is not verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(hex!("4242").to_vec(), Some(pairs[0].1.clone()))] + ) + .is_err()); + + // K included into the trie but not included into the proof is not verified. + assert!(verify_trie_proof::( + &root, + &proof, + &[(pairs[1].0.clone(), Some(pairs[1].1.clone()))] + ) + .is_err()); + } } diff --git a/primitives/trie/src/node_codec.rs b/primitives/trie/src/node_codec.rs index 8a61f372cf..30ab422a28 100644 --- a/primitives/trie/src/node_codec.rs +++ b/primitives/trie/src/node_codec.rs @@ -16,70 +16,70 @@ //! `NodeCodec` implementation for Substrate's trie format. +use super::node_header::{NodeHeader, NodeKind}; +use crate::error::Error; +use crate::trie_constants; +use codec::{Compact, Decode, Encode, Input}; +use hash_db::Hasher; +use sp_std::borrow::Borrow; use sp_std::marker::PhantomData; use sp_std::ops::Range; use sp_std::vec::Vec; -use sp_std::borrow::Borrow; -use codec::{Encode, Decode, Input, Compact}; -use hash_db::Hasher; -use trie_db::{self, node::{NibbleSlicePlan, NodePlan, NodeHandlePlan}, ChildReference, - nibble_ops, Partial, NodeCodec as NodeCodecT}; -use crate::error::Error; -use crate::trie_constants; -use super::{node_header::{NodeHeader, NodeKind}}; +use trie_db::{ + self, nibble_ops, + node::{NibbleSlicePlan, NodeHandlePlan, NodePlan}, + ChildReference, NodeCodec as NodeCodecT, Partial, +}; /// Helper struct for trie node decoder. This implements `codec::Input` on a byte slice, while /// tracking the absolute position. This is similar to `std::io::Cursor` but does not implement /// `Read` and `io` is not in `sp-std`. struct ByteSliceInput<'a> { - data: &'a [u8], - offset: usize, + data: &'a [u8], + offset: usize, } impl<'a> ByteSliceInput<'a> { - fn new(data: &'a [u8]) -> Self { - ByteSliceInput { - data, - offset: 0, - } - } - - fn take(&mut self, count: usize) -> Result, codec::Error> { - if self.offset + count > self.data.len() { - return Err("out of data".into()); - } - - let range = self.offset..(self.offset + count); - self.offset += count; - Ok(range) - } + fn new(data: &'a [u8]) -> Self { + ByteSliceInput { data, offset: 0 } + } + + fn take(&mut self, count: usize) -> Result, codec::Error> { + if self.offset + count > self.data.len() { + return Err("out of data".into()); + } + + let range = self.offset..(self.offset + count); + self.offset += count; + Ok(range) + } } impl<'a> Input for ByteSliceInput<'a> { - fn remaining_len(&mut self) -> Result, codec::Error> { - let remaining = if self.offset <= self.data.len() { - Some(self.data.len() - self.offset) - } else { - None - }; - Ok(remaining) - } - - fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { - let range = self.take(into.len())?; - into.copy_from_slice(&self.data[range]); - Ok(()) - } - - fn read_byte(&mut self) -> Result { - if self.offset + 1 > self.data.len() { - return Err("out of data".into()); - } - - let byte = self.data[self.offset]; - self.offset += 1; - Ok(byte) - } + fn remaining_len(&mut self) -> Result, codec::Error> { + let remaining = if self.offset <= self.data.len() { + Some(self.data.len() - self.offset) + } else { + None + }; + Ok(remaining) + } + + fn read(&mut self, into: &mut [u8]) -> Result<(), codec::Error> { + let range = self.take(into.len())?; + into.copy_from_slice(&self.data[range]); + Ok(()) + } + + fn read_byte(&mut self) -> Result { + if self.offset + 1 > self.data.len() { + return Err("out of data".into()); + } + + let byte = self.data[self.offset]; + self.offset += 1; + Ok(byte) + } } /// Concrete implementation of a `NodeCodec` with Parity Codec encoding, generic over the `Hasher` @@ -87,137 +87,141 @@ impl<'a> Input for ByteSliceInput<'a> { pub struct NodeCodec(PhantomData); impl NodeCodecT for NodeCodec { - type Error = Error; - type HashOut = H::Out; - - fn hashed_null_node() -> ::Out { - H::hash(::empty_node()) - } - - fn decode_plan(data: &[u8]) -> sp_std::result::Result { - let mut input = ByteSliceInput::new(data); - match NodeHeader::decode(&mut input)? { - NodeHeader::Null => Ok(NodePlan::Empty), - NodeHeader::Branch(has_value, nibble_count) => { - let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; - // check that the padding is valid (if any) - if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat); - } - let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / nibble_ops::NIBBLE_PER_BYTE, - )?; - let partial_padding = nibble_ops::number_padding(nibble_count); - let bitmap_range = input.take(BITMAP_LENGTH)?; - let bitmap = Bitmap::decode(&data[bitmap_range])?; - let value = if has_value { - let count = >::decode(&mut input)?.0 as usize; - Some(input.take(count)?) - } else { - None - }; - let mut children = [ - None, None, None, None, None, None, None, None, - None, None, None, None, None, None, None, None, - ]; - for i in 0..nibble_ops::NIBBLE_LENGTH { - if bitmap.value_at(i) { - let count = >::decode(&mut input)?.0 as usize; - let range = input.take(count)?; - children[i] = Some(if count == H::LENGTH { - NodeHandlePlan::Hash(range) - } else { - NodeHandlePlan::Inline(range) - }); - } - } - Ok(NodePlan::NibbledBranch { - partial: NibbleSlicePlan::new(partial, partial_padding), - value, - children, - }) - } - NodeHeader::Leaf(nibble_count) => { - let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; - // check that the padding is valid (if any) - if padding && nibble_ops::pad_left(data[input.offset]) != 0 { - return Err(Error::BadFormat); - } - let partial = input.take( - (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) / nibble_ops::NIBBLE_PER_BYTE, - )?; - let partial_padding = nibble_ops::number_padding(nibble_count); - let count = >::decode(&mut input)?.0 as usize; - Ok(NodePlan::Leaf { - partial: NibbleSlicePlan::new(partial, partial_padding), - value: input.take(count)?, - }) - } - } - } - - fn is_empty_node(data: &[u8]) -> bool { - data == ::empty_node() - } - - fn empty_node() -> &'static [u8] { - &[trie_constants::EMPTY_TRIE] - } - - fn leaf_node(partial: Partial, value: &[u8]) -> Vec { - let mut output = partial_encode(partial, NodeKind::Leaf); - value.encode_to(&mut output); - output - } - - fn extension_node( - _partial: impl Iterator, - _nbnibble: usize, - _child: ChildReference<::Out>, - ) -> Vec { - unreachable!() - } - - fn branch_node( - _children: impl Iterator::Out>>>>, - _maybe_value: Option<&[u8]>, - ) -> Vec { - unreachable!() - } - - fn branch_node_nibbled( - partial: impl Iterator, - number_nibble: usize, - children: impl Iterator::Out>>>>, - maybe_value: Option<&[u8]>, - ) -> Vec { - let mut output = if maybe_value.is_some() { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) - } else { - partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) - }; - let bitmap_index = output.len(); - let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; - (0..BITMAP_LENGTH).for_each(|_|output.push(0)); - if let Some(value) = maybe_value { - value.encode_to(&mut output); - }; - Bitmap::encode(children.map(|maybe_child| match maybe_child.borrow() { - Some(ChildReference::Hash(h)) => { - h.as_ref().encode_to(&mut output); - true - } - &Some(ChildReference::Inline(inline_data, len)) => { - inline_data.as_ref()[..len].encode_to(&mut output); - true - } - None => false, - }), bitmap.as_mut()); - output[bitmap_index..bitmap_index + BITMAP_LENGTH] - .copy_from_slice(&bitmap[..BITMAP_LENGTH]); - output - } - + type Error = Error; + type HashOut = H::Out; + + fn hashed_null_node() -> ::Out { + H::hash(::empty_node()) + } + + fn decode_plan(data: &[u8]) -> sp_std::result::Result { + let mut input = ByteSliceInput::new(data); + match NodeHeader::decode(&mut input)? { + NodeHeader::Null => Ok(NodePlan::Empty), + NodeHeader::Branch(has_value, nibble_count) => { + let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; + // check that the padding is valid (if any) + if padding && nibble_ops::pad_left(data[input.offset]) != 0 { + return Err(Error::BadFormat); + } + let partial = input.take( + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) + / nibble_ops::NIBBLE_PER_BYTE, + )?; + let partial_padding = nibble_ops::number_padding(nibble_count); + let bitmap_range = input.take(BITMAP_LENGTH)?; + let bitmap = Bitmap::decode(&data[bitmap_range])?; + let value = if has_value { + let count = >::decode(&mut input)?.0 as usize; + Some(input.take(count)?) + } else { + None + }; + let mut children = [ + None, None, None, None, None, None, None, None, None, None, None, None, None, + None, None, None, + ]; + for i in 0..nibble_ops::NIBBLE_LENGTH { + if bitmap.value_at(i) { + let count = >::decode(&mut input)?.0 as usize; + let range = input.take(count)?; + children[i] = Some(if count == H::LENGTH { + NodeHandlePlan::Hash(range) + } else { + NodeHandlePlan::Inline(range) + }); + } + } + Ok(NodePlan::NibbledBranch { + partial: NibbleSlicePlan::new(partial, partial_padding), + value, + children, + }) + } + NodeHeader::Leaf(nibble_count) => { + let padding = nibble_count % nibble_ops::NIBBLE_PER_BYTE != 0; + // check that the padding is valid (if any) + if padding && nibble_ops::pad_left(data[input.offset]) != 0 { + return Err(Error::BadFormat); + } + let partial = input.take( + (nibble_count + (nibble_ops::NIBBLE_PER_BYTE - 1)) + / nibble_ops::NIBBLE_PER_BYTE, + )?; + let partial_padding = nibble_ops::number_padding(nibble_count); + let count = >::decode(&mut input)?.0 as usize; + Ok(NodePlan::Leaf { + partial: NibbleSlicePlan::new(partial, partial_padding), + value: input.take(count)?, + }) + } + } + } + + fn is_empty_node(data: &[u8]) -> bool { + data == ::empty_node() + } + + fn empty_node() -> &'static [u8] { + &[trie_constants::EMPTY_TRIE] + } + + fn leaf_node(partial: Partial, value: &[u8]) -> Vec { + let mut output = partial_encode(partial, NodeKind::Leaf); + value.encode_to(&mut output); + output + } + + fn extension_node( + _partial: impl Iterator, + _nbnibble: usize, + _child: ChildReference<::Out>, + ) -> Vec { + unreachable!() + } + + fn branch_node( + _children: impl Iterator::Out>>>>, + _maybe_value: Option<&[u8]>, + ) -> Vec { + unreachable!() + } + + fn branch_node_nibbled( + partial: impl Iterator, + number_nibble: usize, + children: impl Iterator::Out>>>>, + maybe_value: Option<&[u8]>, + ) -> Vec { + let mut output = if maybe_value.is_some() { + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchWithValue) + } else { + partial_from_iterator_encode(partial, number_nibble, NodeKind::BranchNoValue) + }; + let bitmap_index = output.len(); + let mut bitmap: [u8; BITMAP_LENGTH] = [0; BITMAP_LENGTH]; + (0..BITMAP_LENGTH).for_each(|_| output.push(0)); + if let Some(value) = maybe_value { + value.encode_to(&mut output); + }; + Bitmap::encode( + children.map(|maybe_child| match maybe_child.borrow() { + Some(ChildReference::Hash(h)) => { + h.as_ref().encode_to(&mut output); + true + } + &Some(ChildReference::Inline(inline_data, len)) => { + inline_data.as_ref()[..len].encode_to(&mut output); + true + } + None => false, + }), + bitmap.as_mut(), + ); + output[bitmap_index..bitmap_index + BITMAP_LENGTH] + .copy_from_slice(&bitmap[..BITMAP_LENGTH]); + output + } } // utils @@ -225,41 +229,41 @@ impl NodeCodecT for NodeCodec { /// Encode and allocate node type header (type and size), and partial value. /// It uses an iterator over encoded partial bytes as input. fn partial_from_iterator_encode>( - partial: I, - nibble_count: usize, - node_kind: NodeKind, + partial: I, + nibble_count: usize, + node_kind: NodeKind, ) -> Vec { - let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - - let mut output = Vec::with_capacity(3 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE)); - match node_kind { - NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), - NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), - NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - }; - output.extend(partial); - output + let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); + + let mut output = Vec::with_capacity(3 + (nibble_count / nibble_ops::NIBBLE_PER_BYTE)); + match node_kind { + NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), + NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), + NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), + }; + output.extend(partial); + output } /// Encode and allocate node type header (type and size), and partial value. /// Same as `partial_from_iterator_encode` but uses non encoded `Partial` as input. fn partial_encode(partial: Partial, node_kind: NodeKind) -> Vec { - let number_nibble_encoded = (partial.0).0 as usize; - let nibble_count = partial.1.len() * nibble_ops::NIBBLE_PER_BYTE + number_nibble_encoded; - - let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); - - let mut output = Vec::with_capacity(3 + partial.1.len()); - match node_kind { - NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), - NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), - NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), - }; - if number_nibble_encoded > 0 { - output.push(nibble_ops::pad_right((partial.0).1)); - } - output.extend_from_slice(&partial.1[..]); - output + let number_nibble_encoded = (partial.0).0 as usize; + let nibble_count = partial.1.len() * nibble_ops::NIBBLE_PER_BYTE + number_nibble_encoded; + + let nibble_count = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibble_count); + + let mut output = Vec::with_capacity(3 + partial.1.len()); + match node_kind { + NodeKind::Leaf => NodeHeader::Leaf(nibble_count).encode_to(&mut output), + NodeKind::BranchWithValue => NodeHeader::Branch(true, nibble_count).encode_to(&mut output), + NodeKind::BranchNoValue => NodeHeader::Branch(false, nibble_count).encode_to(&mut output), + }; + if number_nibble_encoded > 0 { + output.push(nibble_ops::pad_right((partial.0).1)); + } + output.extend_from_slice(&partial.1[..]); + output } const BITMAP_LENGTH: usize = 2; @@ -271,22 +275,24 @@ const BITMAP_LENGTH: usize = 2; pub(crate) struct Bitmap(u16); impl Bitmap { - pub fn decode(data: &[u8]) -> Result { - Ok(Bitmap(u16::decode(&mut &data[..])?)) - } - - pub fn value_at(&self, i: usize) -> bool { - self.0 & (1u16 << i) != 0 - } - - pub fn encode>(has_children: I , dest: &mut [u8]) { - let mut bitmap: u16 = 0; - let mut cursor: u16 = 1; - for v in has_children { - if v { bitmap |= cursor } - cursor <<= 1; - } - dest[0] = (bitmap % 256) as u8; - dest[1] = (bitmap / 256) as u8; - } + pub fn decode(data: &[u8]) -> Result { + Ok(Bitmap(u16::decode(&mut &data[..])?)) + } + + pub fn value_at(&self, i: usize) -> bool { + self.0 & (1u16 << i) != 0 + } + + pub fn encode>(has_children: I, dest: &mut [u8]) { + let mut bitmap: u16 = 0; + let mut cursor: u16 = 1; + for v in has_children { + if v { + bitmap |= cursor + } + cursor <<= 1; + } + dest[0] = (bitmap % 256) as u8; + dest[1] = (bitmap / 256) as u8; + } } diff --git a/primitives/trie/src/node_header.rs b/primitives/trie/src/node_header.rs index 7aa1629254..9dd5d09a0c 100644 --- a/primitives/trie/src/node_header.rs +++ b/primitives/trie/src/node_header.rs @@ -17,106 +17,112 @@ //! The node header. use crate::trie_constants; -use codec::{Encode, Decode, Input, Output}; +use codec::{Decode, Encode, Input, Output}; use sp_std::iter::once; /// A node header -#[derive(Copy, Clone, PartialEq, Eq)] -#[derive(sp_core::RuntimeDebug)] +#[derive(Copy, Clone, PartialEq, Eq, sp_core::RuntimeDebug)] pub(crate) enum NodeHeader { - Null, - Branch(bool, usize), - Leaf(usize), + Null, + Branch(bool, usize), + Leaf(usize), } /// NodeHeader without content pub(crate) enum NodeKind { - Leaf, - BranchNoValue, - BranchWithValue, + Leaf, + BranchNoValue, + BranchWithValue, } impl Encode for NodeHeader { - fn encode_to(&self, output: &mut T) { - match self { - NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), - NodeHeader::Branch(true, nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, output), - NodeHeader::Branch(false, nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, output), - NodeHeader::Leaf(nibble_count) => - encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, output), - } - } + fn encode_to(&self, output: &mut T) { + match self { + NodeHeader::Null => output.push_byte(trie_constants::EMPTY_TRIE), + NodeHeader::Branch(true, nibble_count) => { + encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITH_MASK, output) + } + NodeHeader::Branch(false, nibble_count) => { + encode_size_and_prefix(*nibble_count, trie_constants::BRANCH_WITHOUT_MASK, output) + } + NodeHeader::Leaf(nibble_count) => { + encode_size_and_prefix(*nibble_count, trie_constants::LEAF_PREFIX_MASK, output) + } + } + } } impl codec::EncodeLike for NodeHeader {} impl Decode for NodeHeader { - fn decode(input: &mut I) -> Result { - let i = input.read_byte()?; - if i == trie_constants::EMPTY_TRIE { - return Ok(NodeHeader::Null); - } - match i & (0b11 << 6) { - trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input)?)), - trie_constants::BRANCH_WITHOUT_MASK => Ok(NodeHeader::Branch(false, decode_size(i, input)?)), - trie_constants::BRANCH_WITH_MASK => Ok(NodeHeader::Branch(true, decode_size(i, input)?)), - // do not allow any special encoding - _ => Err("Unallowed encoding".into()), - } - } + fn decode(input: &mut I) -> Result { + let i = input.read_byte()?; + if i == trie_constants::EMPTY_TRIE { + return Ok(NodeHeader::Null); + } + match i & (0b11 << 6) { + trie_constants::LEAF_PREFIX_MASK => Ok(NodeHeader::Leaf(decode_size(i, input)?)), + trie_constants::BRANCH_WITHOUT_MASK => { + Ok(NodeHeader::Branch(false, decode_size(i, input)?)) + } + trie_constants::BRANCH_WITH_MASK => { + Ok(NodeHeader::Branch(true, decode_size(i, input)?)) + } + // do not allow any special encoding + _ => Err("Unallowed encoding".into()), + } + } } /// Returns an iterator over encoded bytes for node header and size. /// Size encoding allows unlimited, length inefficient, representation, but /// is bounded to 16 bit maximum value to avoid possible DOS. pub(crate) fn size_and_prefix_iterator(size: usize, prefix: u8) -> impl Iterator { - let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); + let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, size); - let l1 = sp_std::cmp::min(62, size); - let (first_byte, mut rem) = if size == l1 { - (once(prefix + l1 as u8), 0) - } else { - (once(prefix + 63), size - l1) - }; - let next_bytes = move || { - if rem > 0 { - if rem < 256 { - let result = rem - 1; - rem = 0; - Some(result as u8) - } else { - rem = rem.saturating_sub(255); - Some(255) - } - } else { - None - } - }; - first_byte.chain(sp_std::iter::from_fn(next_bytes)) + let l1 = sp_std::cmp::min(62, size); + let (first_byte, mut rem) = if size == l1 { + (once(prefix + l1 as u8), 0) + } else { + (once(prefix + 63), size - l1) + }; + let next_bytes = move || { + if rem > 0 { + if rem < 256 { + let result = rem - 1; + rem = 0; + Some(result as u8) + } else { + rem = rem.saturating_sub(255); + Some(255) + } + } else { + None + } + }; + first_byte.chain(sp_std::iter::from_fn(next_bytes)) } /// Encodes size and prefix to a stream output. fn encode_size_and_prefix(size: usize, prefix: u8, out: &mut impl Output) { - for b in size_and_prefix_iterator(size, prefix) { - out.push_byte(b) - } + for b in size_and_prefix_iterator(size, prefix) { + out.push_byte(b) + } } /// Decode size only from stream input and header byte. fn decode_size(first: u8, input: &mut impl Input) -> Result { - let mut result = (first & 255u8 >> 2) as usize; - if result < 63 { - return Ok(result); - } - result -= 1; - while result <= trie_constants::NIBBLE_SIZE_BOUND { - let n = input.read_byte()? as usize; - if n < 255 { - return Ok(result + n + 1); - } - result += 255; - } - Ok(trie_constants::NIBBLE_SIZE_BOUND) + let mut result = (first & 255u8 >> 2) as usize; + if result < 63 { + return Ok(result); + } + result -= 1; + while result <= trie_constants::NIBBLE_SIZE_BOUND { + let n = input.read_byte()? as usize; + if n < 255 { + return Ok(result + n + 1); + } + result += 255; + } + Ok(trie_constants::NIBBLE_SIZE_BOUND) } diff --git a/primitives/trie/src/storage_proof.rs b/primitives/trie/src/storage_proof.rs index 254adc2fcb..cf07232165 100644 --- a/primitives/trie/src/storage_proof.rs +++ b/primitives/trie/src/storage_proof.rs @@ -14,9 +14,9 @@ // You should have received a copy of the GNU General Public License // along with Parity. If not, see . +use codec::{Decode, Encode}; +use hash_db::{HashDB, Hasher}; use sp_std::vec::Vec; -use codec::{Encode, Decode}; -use hash_db::{Hasher, HashDB}; /// A proof that some set of key-value pairs are included in the storage trie. The proof contains /// the storage values so that the partial storage backend can be reconstructed by a verifier that @@ -27,83 +27,87 @@ use hash_db::{Hasher, HashDB}; /// the serialized nodes and performing the key lookups. #[derive(Debug, PartialEq, Eq, Clone, Encode, Decode)] pub struct StorageProof { - trie_nodes: Vec>, + trie_nodes: Vec>, } impl StorageProof { - /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. - pub fn new(trie_nodes: Vec>) -> Self { - StorageProof { trie_nodes } - } - - /// Returns a new empty proof. - /// - /// An empty proof is capable of only proving trivial statements (ie. that an empty set of - /// key-value pairs exist in storage). - pub fn empty() -> Self { - StorageProof { - trie_nodes: Vec::new(), - } - } - - /// Returns whether this is an empty proof. - pub fn is_empty(&self) -> bool { - self.trie_nodes.is_empty() - } - - /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed - /// to be traversed in any particular order. - pub fn iter_nodes(self) -> StorageProofNodeIterator { - StorageProofNodeIterator::new(self) - } - - /// Creates a `MemoryDB` from `Self`. - pub fn into_memory_db(self) -> crate::MemoryDB { - self.into() - } - - /// Merges multiple storage proofs covering potentially different sets of keys into one proof - /// covering all keys. The merged proof output may be smaller than the aggregate size of the input - /// proofs due to deduplication of trie nodes. - pub fn merge(proofs: I) -> Self where I: IntoIterator { - let trie_nodes = proofs.into_iter() - .flat_map(|proof| proof.iter_nodes()) - .collect::>() - .into_iter() - .collect(); - - Self { trie_nodes } - } + /// Constructs a storage proof from a subset of encoded trie nodes in a storage backend. + pub fn new(trie_nodes: Vec>) -> Self { + StorageProof { trie_nodes } + } + + /// Returns a new empty proof. + /// + /// An empty proof is capable of only proving trivial statements (ie. that an empty set of + /// key-value pairs exist in storage). + pub fn empty() -> Self { + StorageProof { + trie_nodes: Vec::new(), + } + } + + /// Returns whether this is an empty proof. + pub fn is_empty(&self) -> bool { + self.trie_nodes.is_empty() + } + + /// Create an iterator over trie nodes constructed from the proof. The nodes are not guaranteed + /// to be traversed in any particular order. + pub fn iter_nodes(self) -> StorageProofNodeIterator { + StorageProofNodeIterator::new(self) + } + + /// Creates a `MemoryDB` from `Self`. + pub fn into_memory_db(self) -> crate::MemoryDB { + self.into() + } + + /// Merges multiple storage proofs covering potentially different sets of keys into one proof + /// covering all keys. The merged proof output may be smaller than the aggregate size of the input + /// proofs due to deduplication of trie nodes. + pub fn merge(proofs: I) -> Self + where + I: IntoIterator, + { + let trie_nodes = proofs + .into_iter() + .flat_map(|proof| proof.iter_nodes()) + .collect::>() + .into_iter() + .collect(); + + Self { trie_nodes } + } } /// An iterator over trie nodes constructed from a storage proof. The nodes are not guaranteed to /// be traversed in any particular order. pub struct StorageProofNodeIterator { - inner: > as IntoIterator>::IntoIter, + inner: > as IntoIterator>::IntoIter, } impl StorageProofNodeIterator { - fn new(proof: StorageProof) -> Self { - StorageProofNodeIterator { - inner: proof.trie_nodes.into_iter(), - } - } + fn new(proof: StorageProof) -> Self { + StorageProofNodeIterator { + inner: proof.trie_nodes.into_iter(), + } + } } impl Iterator for StorageProofNodeIterator { - type Item = Vec; + type Item = Vec; - fn next(&mut self) -> Option { - self.inner.next() - } + fn next(&mut self) -> Option { + self.inner.next() + } } impl From for crate::MemoryDB { - fn from(proof: StorageProof) -> Self { - let mut db = crate::MemoryDB::default(); - for item in proof.iter_nodes() { - db.insert(crate::EMPTY_PREFIX, &item); - } - db - } + fn from(proof: StorageProof) -> Self { + let mut db = crate::MemoryDB::default(); + for item in proof.iter_nodes() { + db.insert(crate::EMPTY_PREFIX, &item); + } + db + } } diff --git a/primitives/trie/src/trie_stream.rs b/primitives/trie/src/trie_stream.rs index 0c92e673aa..cb0411447b 100644 --- a/primitives/trie/src/trie_stream.rs +++ b/primitives/trie/src/trie_stream.rs @@ -16,13 +16,13 @@ //! `TrieStream` implementation for Substrate's trie format. -use hash_db::Hasher; -use trie_root; +use crate::node_codec::Bitmap; +use crate::node_header::{size_and_prefix_iterator, NodeKind}; +use crate::trie_constants; use codec::Encode; +use hash_db::Hasher; use sp_std::vec::Vec; -use crate::trie_constants; -use crate::node_header::{NodeKind, size_and_prefix_iterator}; -use crate::node_codec::Bitmap; +use trie_root; const BRANCH_NODE_NO_VALUE: u8 = 254; const BRANCH_NODE_WITH_VALUE: u8 = 255; @@ -30,110 +30,126 @@ const BRANCH_NODE_WITH_VALUE: u8 = 255; #[derive(Default, Clone)] /// Codec-flavored TrieStream. pub struct TrieStream { - buffer: Vec, + buffer: Vec, } impl TrieStream { - // useful for debugging but not used otherwise - pub fn as_raw(&self) -> &[u8] { &self.buffer } + // useful for debugging but not used otherwise + pub fn as_raw(&self) -> &[u8] { + &self.buffer + } } fn branch_node_bit_mask(has_children: impl Iterator) -> (u8, u8) { - let mut bitmap: u16 = 0; - let mut cursor: u16 = 1; - for v in has_children { - if v { bitmap |= cursor } - cursor <<= 1; - } - ((bitmap % 256 ) as u8, (bitmap / 256 ) as u8) + let mut bitmap: u16 = 0; + let mut cursor: u16 = 1; + for v in has_children { + if v { + bitmap |= cursor + } + cursor <<= 1; + } + ((bitmap % 256) as u8, (bitmap / 256) as u8) } - /// Create a leaf/branch node, encoding a number of nibbles. fn fuse_nibbles_node<'a>(nibbles: &'a [u8], kind: NodeKind) -> impl Iterator + 'a { - let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len()); - - let iter_start = match kind { - NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK), - NodeKind::BranchNoValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK), - NodeKind::BranchWithValue => size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK), - }; - iter_start - .chain(if nibbles.len() % 2 == 1 { Some(nibbles[0]) } else { None }) - .chain(nibbles[nibbles.len() % 2..].chunks(2).map(|ch| ch[0] << 4 | ch[1])) + let size = sp_std::cmp::min(trie_constants::NIBBLE_SIZE_BOUND, nibbles.len()); + + let iter_start = match kind { + NodeKind::Leaf => size_and_prefix_iterator(size, trie_constants::LEAF_PREFIX_MASK), + NodeKind::BranchNoValue => { + size_and_prefix_iterator(size, trie_constants::BRANCH_WITHOUT_MASK) + } + NodeKind::BranchWithValue => { + size_and_prefix_iterator(size, trie_constants::BRANCH_WITH_MASK) + } + }; + iter_start + .chain(if nibbles.len() % 2 == 1 { + Some(nibbles[0]) + } else { + None + }) + .chain( + nibbles[nibbles.len() % 2..] + .chunks(2) + .map(|ch| ch[0] << 4 | ch[1]), + ) } - impl trie_root::TrieStream for TrieStream { - - fn new() -> Self { - TrieStream { - buffer: Vec::new() - } - } - - fn append_empty_data(&mut self) { - self.buffer.push(trie_constants::EMPTY_TRIE); - } - - fn append_leaf(&mut self, key: &[u8], value: &[u8]) { - self.buffer.extend(fuse_nibbles_node(key, NodeKind::Leaf)); - value.encode_to(&mut self.buffer); - } - - fn begin_branch( - &mut self, - maybe_partial: Option<&[u8]>, - maybe_value: Option<&[u8]>, - has_children: impl Iterator, - ) { - if let Some(partial) = maybe_partial { - if maybe_value.is_some() { - self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchWithValue)); - } else { - self.buffer.extend(fuse_nibbles_node(partial, NodeKind::BranchNoValue)); - } - let bm = branch_node_bit_mask(has_children); - self.buffer.extend([bm.0,bm.1].iter()); - } else { - debug_assert!(false, "trie stream codec only for no extension trie"); - self.buffer.extend(&branch_node(maybe_value.is_some(), has_children)); - } - if let Some(value) = maybe_value { - value.encode_to(&mut self.buffer); - } - } - - fn append_extension(&mut self, _key: &[u8]) { - debug_assert!(false, "trie stream codec only for no extension trie"); - } - - fn append_substream(&mut self, other: Self) { - let data = other.out(); - match data.len() { - 0..=31 => data.encode_to(&mut self.buffer), - _ => H::hash(&data).as_ref().encode_to(&mut self.buffer), - } - } - - fn out(self) -> Vec { self.buffer } + fn new() -> Self { + TrieStream { buffer: Vec::new() } + } + + fn append_empty_data(&mut self) { + self.buffer.push(trie_constants::EMPTY_TRIE); + } + + fn append_leaf(&mut self, key: &[u8], value: &[u8]) { + self.buffer.extend(fuse_nibbles_node(key, NodeKind::Leaf)); + value.encode_to(&mut self.buffer); + } + + fn begin_branch( + &mut self, + maybe_partial: Option<&[u8]>, + maybe_value: Option<&[u8]>, + has_children: impl Iterator, + ) { + if let Some(partial) = maybe_partial { + if maybe_value.is_some() { + self.buffer + .extend(fuse_nibbles_node(partial, NodeKind::BranchWithValue)); + } else { + self.buffer + .extend(fuse_nibbles_node(partial, NodeKind::BranchNoValue)); + } + let bm = branch_node_bit_mask(has_children); + self.buffer.extend([bm.0, bm.1].iter()); + } else { + debug_assert!(false, "trie stream codec only for no extension trie"); + self.buffer + .extend(&branch_node(maybe_value.is_some(), has_children)); + } + if let Some(value) = maybe_value { + value.encode_to(&mut self.buffer); + } + } + + fn append_extension(&mut self, _key: &[u8]) { + debug_assert!(false, "trie stream codec only for no extension trie"); + } + + fn append_substream(&mut self, other: Self) { + let data = other.out(); + match data.len() { + 0..=31 => data.encode_to(&mut self.buffer), + _ => H::hash(&data).as_ref().encode_to(&mut self.buffer), + } + } + + fn out(self) -> Vec { + self.buffer + } } fn branch_node(has_value: bool, has_children: impl Iterator) -> [u8; 3] { - let mut result = [0, 0, 0]; - branch_node_buffered(has_value, has_children, &mut result[..]); - result + let mut result = [0, 0, 0]; + branch_node_buffered(has_value, has_children, &mut result[..]); + result } -fn branch_node_buffered(has_value: bool, has_children: I, output: &mut[u8]) - where - I: Iterator, +fn branch_node_buffered(has_value: bool, has_children: I, output: &mut [u8]) +where + I: Iterator, { - let first = if has_value { - BRANCH_NODE_WITH_VALUE - } else { - BRANCH_NODE_NO_VALUE - }; - output[0] = first; - Bitmap::encode(has_children, &mut output[1..]); + let first = if has_value { + BRANCH_NODE_WITH_VALUE + } else { + BRANCH_NODE_NO_VALUE + }; + output[0] = first; + Bitmap::encode(has_children, &mut output[1..]); } diff --git a/primitives/utils/src/lib.rs b/primitives/utils/src/lib.rs index 385a9b6689..0c9418c5fc 100644 --- a/primitives/utils/src/lib.rs +++ b/primitives/utils/src/lib.rs @@ -17,4 +17,4 @@ //! Utilities Primitives for Substrate pub mod metrics; -pub mod mpsc; \ No newline at end of file +pub mod mpsc; diff --git a/primitives/utils/src/metrics.rs b/primitives/utils/src/metrics.rs index 160714fdca..fe090bbc48 100644 --- a/primitives/utils/src/metrics.rs +++ b/primitives/utils/src/metrics.rs @@ -18,41 +18,38 @@ use lazy_static::lazy_static; use prometheus::{ - Registry, Error as PrometheusError, - core::{ AtomicU64, GenericGauge, GenericCounter }, + core::{AtomicU64, GenericCounter, GenericGauge}, + Error as PrometheusError, Registry, }; #[cfg(features = "metered")] use prometheus::{core::GenericGaugeVec, Opts}; - lazy_static! { - pub static ref TOKIO_THREADS_TOTAL: GenericCounter = GenericCounter::new( - "tokio_threads_total", "Total number of threads created" - ).expect("Creating of statics doesn't fail. qed"); - - pub static ref TOKIO_THREADS_ALIVE: GenericGauge = GenericGauge::new( - "tokio_threads_alive", "Number of threads alive right now" - ).expect("Creating of statics doesn't fail. qed"); + pub static ref TOKIO_THREADS_TOTAL: GenericCounter = + GenericCounter::new("tokio_threads_total", "Total number of threads created") + .expect("Creating of statics doesn't fail. qed"); + pub static ref TOKIO_THREADS_ALIVE: GenericGauge = + GenericGauge::new("tokio_threads_alive", "Number of threads alive right now") + .expect("Creating of statics doesn't fail. qed"); } #[cfg(features = "metered")] lazy_static! { - pub static ref UNBOUNDED_CHANNELS_COUNTER : GenericGaugeVec = GenericGaugeVec::new( - Opts::new("unbounded_channel_len", "Items in each mpsc::unbounded instance"), - &["entity", "action"] // 'name of channel, send|received|dropped - ).expect("Creating of statics doesn't fail. qed"); + pub static ref UNBOUNDED_CHANNELS_COUNTER : GenericGaugeVec = GenericGaugeVec::new( + Opts::new("unbounded_channel_len", "Items in each mpsc::unbounded instance"), + &["entity", "action"] // 'name of channel, send|received|dropped + ).expect("Creating of statics doesn't fail. qed"); } - /// Register the statics to report to registry pub fn register_globals(registry: &Registry) -> Result<(), PrometheusError> { - registry.register(Box::new(TOKIO_THREADS_ALIVE.clone()))?; - registry.register(Box::new(TOKIO_THREADS_TOTAL.clone()))?; + registry.register(Box::new(TOKIO_THREADS_ALIVE.clone()))?; + registry.register(Box::new(TOKIO_THREADS_TOTAL.clone()))?; - #[cfg(features = "metered")] - registry.register(Box::new(UNBOUNDED_CHANNELS_COUNTER.clone()))?; + #[cfg(features = "metered")] + registry.register(Box::new(UNBOUNDED_CHANNELS_COUNTER.clone()))?; - Ok(()) + Ok(()) } diff --git a/primitives/utils/src/mpsc.rs b/primitives/utils/src/mpsc.rs index 42fb287c18..3ac5d6432b 100644 --- a/primitives/utils/src/mpsc.rs +++ b/primitives/utils/src/mpsc.rs @@ -18,215 +18,206 @@ #[cfg(not(features = "metered"))] mod inner { - // just aliased, non performance implications - use futures::channel::mpsc::{self, UnboundedReceiver, UnboundedSender}; - pub type TracingUnboundedSender = UnboundedSender; - pub type TracingUnboundedReceiver = UnboundedReceiver; - - /// Alias `mpsc::unbounded` - pub fn tracing_unbounded(_key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { - mpsc::unbounded() - } + // just aliased, non performance implications + use futures::channel::mpsc::{self, UnboundedReceiver, UnboundedSender}; + pub type TracingUnboundedSender = UnboundedSender; + pub type TracingUnboundedReceiver = UnboundedReceiver; + + /// Alias `mpsc::unbounded` + pub fn tracing_unbounded( + _key: &'static str, + ) -> (TracingUnboundedSender, TracingUnboundedReceiver) { + mpsc::unbounded() + } } - #[cfg(features = "metered")] mod inner { - //tracing implementation - use futures::channel::mpsc::{self, - UnboundedReceiver, UnboundedSender, - TryRecvError, TrySendError, SendError - }; - use futures::{sink::Sink, task::{Poll, Context}, stream::Stream}; - use std::pin::Pin; - use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; - - /// Wrapper Type around `UnboundedSender` that increases the global - /// measure when a message is added - #[derive(Debug, Clone)] - pub struct TracingUnboundedSender(&'static str, UnboundedSender); - - /// Wrapper Type around `UnboundedReceiver` that decreases the global - /// measure when a message is polled - #[derive(Debug)] - pub struct TracingUnboundedReceiver(&'static str, UnboundedReceiver); - - /// Wrapper around `mpsc::unbounded` that tracks the in- and outflow via - /// `UNBOUNDED_CHANNELS_COUNTER` - pub fn tracing_unbounded(key: &'static str) ->(TracingUnboundedSender, TracingUnboundedReceiver) { - let (s, r) = mpsc::unbounded(); - (TracingUnboundedSender(key.clone(), s), TracingUnboundedReceiver(key,r)) - } - - impl TracingUnboundedSender { - /// Proxy function to mpsc::UnboundedSender - pub fn poll_ready(&self, ctx: &mut Context) -> Poll> { - self.1.poll_ready(ctx) - } - - /// Proxy function to mpsc::UnboundedSender - pub fn is_closed(&self) -> bool { - self.1.is_closed() - } - - /// Proxy function to mpsc::UnboundedSender - pub fn close_channel(&self) { - self.1.close_channel() - } - - /// Proxy function to mpsc::UnboundedSender - pub fn disconnect(&mut self) { - self.1.disconnect() - } - - /// Proxy function to mpsc::UnboundedSender - pub fn start_send(&mut self, msg: T) -> Result<(), SendError> { - self.1.start_send(msg) - } - - /// Proxy function to mpsc::UnboundedSender - pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { - self.1.unbounded_send(msg).map(|s|{ - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"send"]).incr(); - s - }) - } - - /// Proxy function to mpsc::UnboundedSender - pub fn same_receiver(&self, other: &UnboundedSender) -> bool { - self.1.same_receiver(other) - } - } - - impl TracingUnboundedReceiver { - - fn consume(&mut self) { - // consume all items, make sure to reflect the updated count - let mut count = 0; - while let Ok(Some(..)) = self.try_next() { - count += 1; - } - - // and discount the messages - if count > 0 { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"dropped"]).incr_by(count); - } - - } - - /// Proxy function to mpsc::UnboundedReceiver - /// that consumes all messages first and updates the counter - pub fn close(&mut self) { - self.consume(); - self.1.close() - } - - /// Proxy function to mpsc::UnboundedReceiver - /// that discounts the messages taken out - pub fn try_next(&mut self) -> Result, TryRecvError> { - self.1.try_next().map(|s| { - if s.is_some() { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, &"received"]).incr(); - } - s - }) - } - } - - impl Drop for TracingUnboundedReceiver { - fn drop(&mut self) { - self.consume(); - } - } - - impl Unpin for TracingUnboundedReceiver {} - - impl Stream for TracingUnboundedReceiver { - type Item = T; - - fn poll_next( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - let s = self.get_mut(); - match Pin::new(&mut s.1).poll_next(cx) { - Poll::Ready(msg) => { - if msg.is_some() { - UNBOUNDED_CHANNELS_COUNTER.with_label_values(&[self.0, "received"]).incr(); - } - Poll::Ready(msg) - } - Poll::Pending => { - Poll::Pending - } - } - } - } - - - impl Sink for TracingUnboundedSender { - type Error = SendError; - - fn poll_ready( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - TracingUnboundedSender::poll_ready(&*self, cx) - } - - fn start_send( - mut self: Pin<&mut Self>, - msg: T, - ) -> Result<(), Self::Error> { - TracingUnboundedSender::start_send(&mut *self, msg) - } - - fn poll_flush( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close( - mut self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { - self.disconnect(); - Poll::Ready(Ok(())) - } - } - - impl Sink for &TracingUnboundedSender { - type Error = SendError; - - fn poll_ready( - self: Pin<&mut Self>, - cx: &mut Context<'_>, - ) -> Poll> { - TracingUnboundedSender::poll_ready(*self, cx) - } - - fn start_send(self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { - self.unbounded_send(msg) - .map_err(TrySendError::into_send_error) - } - - fn poll_flush( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - - fn poll_close( - self: Pin<&mut Self>, - _: &mut Context<'_>, - ) -> Poll> { - self.close_channel(); - Poll::Ready(Ok(())) - } - } + //tracing implementation + use crate::metrics::UNBOUNDED_CHANNELS_COUNTER; + use futures::channel::mpsc::{ + self, SendError, TryRecvError, TrySendError, UnboundedReceiver, UnboundedSender, + }; + use futures::{ + sink::Sink, + stream::Stream, + task::{Context, Poll}, + }; + use std::pin::Pin; + + /// Wrapper Type around `UnboundedSender` that increases the global + /// measure when a message is added + #[derive(Debug, Clone)] + pub struct TracingUnboundedSender(&'static str, UnboundedSender); + + /// Wrapper Type around `UnboundedReceiver` that decreases the global + /// measure when a message is polled + #[derive(Debug)] + pub struct TracingUnboundedReceiver(&'static str, UnboundedReceiver); + + /// Wrapper around `mpsc::unbounded` that tracks the in- and outflow via + /// `UNBOUNDED_CHANNELS_COUNTER` + pub fn tracing_unbounded( + key: &'static str, + ) -> (TracingUnboundedSender, TracingUnboundedReceiver) { + let (s, r) = mpsc::unbounded(); + ( + TracingUnboundedSender(key.clone(), s), + TracingUnboundedReceiver(key, r), + ) + } + + impl TracingUnboundedSender { + /// Proxy function to mpsc::UnboundedSender + pub fn poll_ready(&self, ctx: &mut Context) -> Poll> { + self.1.poll_ready(ctx) + } + + /// Proxy function to mpsc::UnboundedSender + pub fn is_closed(&self) -> bool { + self.1.is_closed() + } + + /// Proxy function to mpsc::UnboundedSender + pub fn close_channel(&self) { + self.1.close_channel() + } + + /// Proxy function to mpsc::UnboundedSender + pub fn disconnect(&mut self) { + self.1.disconnect() + } + + /// Proxy function to mpsc::UnboundedSender + pub fn start_send(&mut self, msg: T) -> Result<(), SendError> { + self.1.start_send(msg) + } + + /// Proxy function to mpsc::UnboundedSender + pub fn unbounded_send(&self, msg: T) -> Result<(), TrySendError> { + self.1.unbounded_send(msg).map(|s| { + UNBOUNDED_CHANNELS_COUNTER + .with_label_values(&[self.0, &"send"]) + .incr(); + s + }) + } + + /// Proxy function to mpsc::UnboundedSender + pub fn same_receiver(&self, other: &UnboundedSender) -> bool { + self.1.same_receiver(other) + } + } + + impl TracingUnboundedReceiver { + fn consume(&mut self) { + // consume all items, make sure to reflect the updated count + let mut count = 0; + while let Ok(Some(..)) = self.try_next() { + count += 1; + } + + // and discount the messages + if count > 0 { + UNBOUNDED_CHANNELS_COUNTER + .with_label_values(&[self.0, &"dropped"]) + .incr_by(count); + } + } + + /// Proxy function to mpsc::UnboundedReceiver + /// that consumes all messages first and updates the counter + pub fn close(&mut self) { + self.consume(); + self.1.close() + } + + /// Proxy function to mpsc::UnboundedReceiver + /// that discounts the messages taken out + pub fn try_next(&mut self) -> Result, TryRecvError> { + self.1.try_next().map(|s| { + if s.is_some() { + UNBOUNDED_CHANNELS_COUNTER + .with_label_values(&[self.0, &"received"]) + .incr(); + } + s + }) + } + } + + impl Drop for TracingUnboundedReceiver { + fn drop(&mut self) { + self.consume(); + } + } + + impl Unpin for TracingUnboundedReceiver {} + + impl Stream for TracingUnboundedReceiver { + type Item = T; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let s = self.get_mut(); + match Pin::new(&mut s.1).poll_next(cx) { + Poll::Ready(msg) => { + if msg.is_some() { + UNBOUNDED_CHANNELS_COUNTER + .with_label_values(&[self.0, "received"]) + .incr(); + } + Poll::Ready(msg) + } + Poll::Pending => Poll::Pending, + } + } + } + + impl Sink for TracingUnboundedSender { + type Error = SendError; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + TracingUnboundedSender::poll_ready(&*self, cx) + } + + fn start_send(mut self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { + TracingUnboundedSender::start_send(&mut *self, msg) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close( + mut self: Pin<&mut Self>, + _: &mut Context<'_>, + ) -> Poll> { + self.disconnect(); + Poll::Ready(Ok(())) + } + } + + impl Sink for &TracingUnboundedSender { + type Error = SendError; + + fn poll_ready(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + TracingUnboundedSender::poll_ready(*self, cx) + } + + fn start_send(self: Pin<&mut Self>, msg: T) -> Result<(), Self::Error> { + self.unbounded_send(msg) + .map_err(TrySendError::into_send_error) + } + + fn poll_flush(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + Poll::Ready(Ok(())) + } + + fn poll_close(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll> { + self.close_channel(); + Poll::Ready(Ok(())) + } + } } -pub use inner::{tracing_unbounded, TracingUnboundedSender, TracingUnboundedReceiver}; +pub use inner::{tracing_unbounded, TracingUnboundedReceiver, TracingUnboundedSender}; diff --git a/primitives/version/src/lib.rs b/primitives/version/src/lib.rs index 613b23156a..45e6ca5965 100644 --- a/primitives/version/src/lib.rs +++ b/primitives/version/src/lib.rs @@ -19,20 +19,20 @@ #![cfg_attr(not(feature = "std"), no_std)] #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -#[cfg(feature = "std")] -use std::fmt; +use serde::{Deserialize, Serialize}; #[cfg(feature = "std")] use std::collections::HashSet; +#[cfg(feature = "std")] +use std::fmt; -use codec::{Encode, Decode}; -use sp_runtime::RuntimeString; +use codec::{Decode, Encode}; pub use sp_runtime::create_runtime_str; +use sp_runtime::RuntimeString; #[doc(hidden)] pub use sp_std; #[cfg(feature = "std")] -use sp_runtime::{traits::Block as BlockT, generic::BlockId}; +use sp_runtime::{generic::BlockId, traits::Block as BlockT}; /// The identity of a particular API interface that the runtime might provide. pub type ApiId = [u8; 8]; @@ -43,7 +43,9 @@ pub type ApisVec = sp_std::borrow::Cow<'static, [(ApiId, u32)]>; /// Create a vector of Api declarations. #[macro_export] macro_rules! create_apis_vec { - ( $y:expr ) => { $crate::sp_std::borrow::Cow::Borrowed(& $y) } + ( $y:expr ) => { + $crate::sp_std::borrow::Cow::Borrowed(&$y) + }; } /// Runtime version. @@ -55,216 +57,217 @@ macro_rules! create_apis_vec { #[cfg_attr(feature = "std", derive(Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct RuntimeVersion { - /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. - /// A different on-chain spec_name to that of the native runtime would normally result - /// in node not attempting to sync or author blocks. - pub spec_name: RuntimeString, - - /// Name of the implementation of the spec. This is of little consequence for the node - /// and serves only to differentiate code of different implementation teams. For this - /// codebase, it will be parity-polkadot. If there were a non-Rust implementation of the - /// Polkadot runtime (e.g. C++), then it would identify itself with an accordingly different - /// `impl_name`. - pub impl_name: RuntimeString, - - /// `authoring_version` is the version of the authorship interface. An authoring node - /// will not attempt to author blocks unless this is equal to its native runtime. - pub authoring_version: u32, - - /// Version of the runtime specification. A full-node will not attempt to use its native - /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, - /// `spec_version` and `authoring_version` are the same between Wasm and native. - pub spec_version: u32, - - /// Version of the implementation of the specification. Nodes are free to ignore this; it - /// serves only as an indication that the code is different; as long as the other two versions - /// are the same then while the actual code may be different, it is nonetheless required to - /// do the same thing. - /// Non-consensus-breaking optimizations are about the only changes that could be made which - /// would result in only the `impl_version` changing. - pub impl_version: u32, - - /// List of supported API "features" along with their versions. - #[cfg_attr( - feature = "std", - serde( - serialize_with = "apis_serialize::serialize", - deserialize_with = "apis_serialize::deserialize", - ) - )] - pub apis: ApisVec, - - /// All existing dispatches are fully compatible when this number doesn't change. If this - /// number changes, then `spec_version` must change, also. - /// - /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, - /// either through an alteration in its user-level semantics, a parameter added/removed/changed, - /// a dispatchable being removed, a module being removed, or a dispatchable/module changing its - /// index. - /// - /// It need *not* change when a new module is added or when a dispatchable is added. - pub transaction_version: u32, + /// Identifies the different Substrate runtimes. There'll be at least polkadot and node. + /// A different on-chain spec_name to that of the native runtime would normally result + /// in node not attempting to sync or author blocks. + pub spec_name: RuntimeString, + + /// Name of the implementation of the spec. This is of little consequence for the node + /// and serves only to differentiate code of different implementation teams. For this + /// codebase, it will be parity-polkadot. If there were a non-Rust implementation of the + /// Polkadot runtime (e.g. C++), then it would identify itself with an accordingly different + /// `impl_name`. + pub impl_name: RuntimeString, + + /// `authoring_version` is the version of the authorship interface. An authoring node + /// will not attempt to author blocks unless this is equal to its native runtime. + pub authoring_version: u32, + + /// Version of the runtime specification. A full-node will not attempt to use its native + /// runtime in substitute for the on-chain Wasm runtime unless all of `spec_name`, + /// `spec_version` and `authoring_version` are the same between Wasm and native. + pub spec_version: u32, + + /// Version of the implementation of the specification. Nodes are free to ignore this; it + /// serves only as an indication that the code is different; as long as the other two versions + /// are the same then while the actual code may be different, it is nonetheless required to + /// do the same thing. + /// Non-consensus-breaking optimizations are about the only changes that could be made which + /// would result in only the `impl_version` changing. + pub impl_version: u32, + + /// List of supported API "features" along with their versions. + #[cfg_attr( + feature = "std", + serde( + serialize_with = "apis_serialize::serialize", + deserialize_with = "apis_serialize::deserialize", + ) + )] + pub apis: ApisVec, + + /// All existing dispatches are fully compatible when this number doesn't change. If this + /// number changes, then `spec_version` must change, also. + /// + /// This number must change when an existing dispatchable (module ID, dispatch ID) is changed, + /// either through an alteration in its user-level semantics, a parameter added/removed/changed, + /// a dispatchable being removed, a module being removed, or a dispatchable/module changing its + /// index. + /// + /// It need *not* change when a new module is added or when a dispatchable is added. + pub transaction_version: u32, } #[cfg(feature = "std")] impl fmt::Display for RuntimeVersion { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}-{} ({}-{}.tx{}.au{})", - self.spec_name, - self.spec_version, - self.impl_name, - self.impl_version, - self.transaction_version, - self.authoring_version, - ) - } + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}-{} ({}-{}.tx{}.au{})", + self.spec_name, + self.spec_version, + self.impl_name, + self.impl_version, + self.transaction_version, + self.authoring_version, + ) + } } #[cfg(feature = "std")] impl RuntimeVersion { - /// Check if this version matches other version for calling into runtime. - pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { - self.spec_version == other.spec_version && - self.spec_name == other.spec_name && - self.authoring_version == other.authoring_version - } - - /// Check if the given api with `api_id` is implemented and the version passes the given - /// `predicate`. - pub fn has_api_with bool>( - &self, - id: &ApiId, - predicate: P, - ) -> bool { - self.apis.iter().any(|(s, v)| s == id && predicate(*v)) - } + /// Check if this version matches other version for calling into runtime. + pub fn can_call_with(&self, other: &RuntimeVersion) -> bool { + self.spec_version == other.spec_version + && self.spec_name == other.spec_name + && self.authoring_version == other.authoring_version + } + + /// Check if the given api with `api_id` is implemented and the version passes the given + /// `predicate`. + pub fn has_api_with bool>(&self, id: &ApiId, predicate: P) -> bool { + self.apis.iter().any(|(s, v)| s == id && predicate(*v)) + } } #[cfg(feature = "std")] #[derive(Debug)] pub struct NativeVersion { - /// Basic runtime version info. - pub runtime_version: RuntimeVersion, - /// Authoring runtimes that this native runtime supports. - pub can_author_with: HashSet, + /// Basic runtime version info. + pub runtime_version: RuntimeVersion, + /// Authoring runtimes that this native runtime supports. + pub can_author_with: HashSet, } #[cfg(feature = "std")] impl NativeVersion { - /// Check if this version matches other version for authoring blocks. - /// - /// # Return - /// - /// - Returns `Ok(())` when authoring is supported. - /// - Returns `Err(_)` with a detailed error when authoring is not supported. - pub fn can_author_with(&self, other: &RuntimeVersion) -> Result<(), String> { - if self.runtime_version.spec_name != other.spec_name { - Err(format!( - "`spec_name` does not match `{}` vs `{}`", - self.runtime_version.spec_name, - other.spec_name, - )) - } else if self.runtime_version.authoring_version != other.authoring_version - && !self.can_author_with.contains(&other.authoring_version) - { - Err(format!( - "`authoring_version` does not match `{version}` vs `{other_version}` and \ + /// Check if this version matches other version for authoring blocks. + /// + /// # Return + /// + /// - Returns `Ok(())` when authoring is supported. + /// - Returns `Err(_)` with a detailed error when authoring is not supported. + pub fn can_author_with(&self, other: &RuntimeVersion) -> Result<(), String> { + if self.runtime_version.spec_name != other.spec_name { + Err(format!( + "`spec_name` does not match `{}` vs `{}`", + self.runtime_version.spec_name, other.spec_name, + )) + } else if self.runtime_version.authoring_version != other.authoring_version + && !self.can_author_with.contains(&other.authoring_version) + { + Err(format!( + "`authoring_version` does not match `{version}` vs `{other_version}` and \ `can_author_with` not contains `{other_version}`", - version = self.runtime_version.authoring_version, - other_version = other.authoring_version, - )) - } else { - Ok(()) - } - } + version = self.runtime_version.authoring_version, + other_version = other.authoring_version, + )) + } else { + Ok(()) + } + } } /// Something that can provide the runtime version at a given block and the native runtime version. #[cfg(feature = "std")] pub trait GetRuntimeVersion { - /// Returns the version of the native runtime. - fn native_version(&self) -> &NativeVersion; + /// Returns the version of the native runtime. + fn native_version(&self) -> &NativeVersion; - /// Returns the version of runtime at the given block. - fn runtime_version(&self, at: &BlockId) -> Result; + /// Returns the version of runtime at the given block. + fn runtime_version(&self, at: &BlockId) -> Result; } #[cfg(feature = "std")] impl, Block: BlockT> GetRuntimeVersion for std::sync::Arc { - fn native_version(&self) -> &NativeVersion { - (&**self).native_version() - } + fn native_version(&self) -> &NativeVersion { + (&**self).native_version() + } - fn runtime_version(&self, at: &BlockId) -> Result { - (&**self).runtime_version(at) - } + fn runtime_version(&self, at: &BlockId) -> Result { + (&**self).runtime_version(at) + } } #[cfg(feature = "std")] mod apis_serialize { - use super::*; - use impl_serde::serialize as bytes; - use serde::{Serializer, de, ser::SerializeTuple}; - - #[derive(Serialize)] - struct ApiId<'a>( - #[serde(serialize_with="serialize_bytesref")] &'a super::ApiId, - &'a u32, - ); - - pub fn serialize(apis: &ApisVec, ser: S) -> Result where - S: Serializer, - { - let len = apis.len(); - let mut seq = ser.serialize_tuple(len)?; - for (api, ver) in &**apis { - seq.serialize_element(&ApiId(api, ver))?; - } - seq.end() - } - - pub fn serialize_bytesref(&apis: &&super::ApiId, ser: S) -> Result where - S: Serializer, - { - bytes::serialize(apis, ser) - } - - #[derive(Deserialize)] - struct ApiIdOwned( - #[serde(deserialize_with="deserialize_bytes")] - super::ApiId, - u32, - ); - - pub fn deserialize<'de, D>(deserializer: D) -> Result where - D: de::Deserializer<'de>, - { - struct Visitor; - impl<'de> de::Visitor<'de> for Visitor { - type Value = ApisVec; - - fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { - formatter.write_str("a sequence of api id and version tuples") - } - - fn visit_seq(self, mut visitor: V) -> Result where - V: de::SeqAccess<'de>, - { - let mut apis = Vec::new(); - while let Some(value) = visitor.next_element::()? { - apis.push((value.0, value.1)); - } - Ok(apis.into()) - } - } - deserializer.deserialize_seq(Visitor) - } - - pub fn deserialize_bytes<'de, D>(d: D) -> Result where - D: de::Deserializer<'de> - { - let mut arr = [0; 8]; - bytes::deserialize_check_len(d, bytes::ExpectedLen::Exact(&mut arr[..]))?; - Ok(arr) - } + use super::*; + use impl_serde::serialize as bytes; + use serde::{de, ser::SerializeTuple, Serializer}; + + #[derive(Serialize)] + struct ApiId<'a>( + #[serde(serialize_with = "serialize_bytesref")] &'a super::ApiId, + &'a u32, + ); + + pub fn serialize(apis: &ApisVec, ser: S) -> Result + where + S: Serializer, + { + let len = apis.len(); + let mut seq = ser.serialize_tuple(len)?; + for (api, ver) in &**apis { + seq.serialize_element(&ApiId(api, ver))?; + } + seq.end() + } + + pub fn serialize_bytesref(&apis: &&super::ApiId, ser: S) -> Result + where + S: Serializer, + { + bytes::serialize(apis, ser) + } + + #[derive(Deserialize)] + struct ApiIdOwned( + #[serde(deserialize_with = "deserialize_bytes")] super::ApiId, + u32, + ); + + pub fn deserialize<'de, D>(deserializer: D) -> Result + where + D: de::Deserializer<'de>, + { + struct Visitor; + impl<'de> de::Visitor<'de> for Visitor { + type Value = ApisVec; + + fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { + formatter.write_str("a sequence of api id and version tuples") + } + + fn visit_seq(self, mut visitor: V) -> Result + where + V: de::SeqAccess<'de>, + { + let mut apis = Vec::new(); + while let Some(value) = visitor.next_element::()? { + apis.push((value.0, value.1)); + } + Ok(apis.into()) + } + } + deserializer.deserialize_seq(Visitor) + } + + pub fn deserialize_bytes<'de, D>(d: D) -> Result + where + D: de::Deserializer<'de>, + { + let mut arr = [0; 8]; + bytes::deserialize_check_len(d, bytes::ExpectedLen::Exact(&mut arr[..]))?; + Ok(arr) + } } diff --git a/primitives/wasm-interface/src/lib.rs b/primitives/wasm-interface/src/lib.rs index eda2ebb1b5..c1dea2ad14 100644 --- a/primitives/wasm-interface/src/lib.rs +++ b/primitives/wasm-interface/src/lib.rs @@ -18,9 +18,7 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::{ - borrow::Cow, marker::PhantomData, mem, iter::Iterator, result, vec::Vec, -}; +use sp_std::{borrow::Cow, iter::Iterator, marker::PhantomData, mem, result, vec::Vec}; #[cfg(feature = "std")] mod wasmi_impl; @@ -34,94 +32,94 @@ pub type Result = result::Result; /// Value types supported by Substrate on the boundary between host/Wasm. #[derive(Copy, Clone, PartialEq, Debug, Eq)] pub enum ValueType { - /// An `i32` value type. - I32, - /// An `i64` value type. - I64, - /// An `f32` value type. - F32, - /// An `f64` value type. - F64, + /// An `i32` value type. + I32, + /// An `i64` value type. + I64, + /// An `f32` value type. + F32, + /// An `f64` value type. + F64, } impl From for u8 { - fn from(val: ValueType) -> u8 { - match val { - ValueType::I32 => 0, - ValueType::I64 => 1, - ValueType::F32 => 2, - ValueType::F64 => 3, - } - } + fn from(val: ValueType) -> u8 { + match val { + ValueType::I32 => 0, + ValueType::I64 => 1, + ValueType::F32 => 2, + ValueType::F64 => 3, + } + } } impl sp_std::convert::TryFrom for ValueType { - type Error = (); - - fn try_from(val: u8) -> sp_std::result::Result { - match val { - 0 => Ok(Self::I32), - 1 => Ok(Self::I64), - 2 => Ok(Self::F32), - 3 => Ok(Self::F64), - _ => Err(()), - } - } + type Error = (); + + fn try_from(val: u8) -> sp_std::result::Result { + match val { + 0 => Ok(Self::I32), + 1 => Ok(Self::I64), + 2 => Ok(Self::F32), + 3 => Ok(Self::F64), + _ => Err(()), + } + } } /// Values supported by Substrate on the boundary between host/Wasm. #[derive(PartialEq, Debug, Clone, Copy, codec::Encode, codec::Decode)] pub enum Value { - /// A 32-bit integer. - I32(i32), - /// A 64-bit integer. - I64(i64), - /// A 32-bit floating-point number stored as raw bit pattern. - /// - /// You can materialize this value using `f32::from_bits`. - F32(u32), - /// A 64-bit floating-point number stored as raw bit pattern. - /// - /// You can materialize this value using `f64::from_bits`. - F64(u64), + /// A 32-bit integer. + I32(i32), + /// A 64-bit integer. + I64(i64), + /// A 32-bit floating-point number stored as raw bit pattern. + /// + /// You can materialize this value using `f32::from_bits`. + F32(u32), + /// A 64-bit floating-point number stored as raw bit pattern. + /// + /// You can materialize this value using `f64::from_bits`. + F64(u64), } impl Value { - /// Returns the type of this value. - pub fn value_type(&self) -> ValueType { - match self { - Value::I32(_) => ValueType::I32, - Value::I64(_) => ValueType::I64, - Value::F32(_) => ValueType::F32, - Value::F64(_) => ValueType::F64, - } - } - - /// Return `Self` as `i32`. - pub fn as_i32(&self) -> Option { - match self { - Self::I32(val) => Some(*val), - _ => None, - } - } + /// Returns the type of this value. + pub fn value_type(&self) -> ValueType { + match self { + Value::I32(_) => ValueType::I32, + Value::I64(_) => ValueType::I64, + Value::F32(_) => ValueType::F32, + Value::F64(_) => ValueType::F64, + } + } + + /// Return `Self` as `i32`. + pub fn as_i32(&self) -> Option { + match self { + Self::I32(val) => Some(*val), + _ => None, + } + } } /// Provides `Sealed` trait to prevent implementing trait `PointerType` outside of this crate. mod private { - pub trait Sealed {} + pub trait Sealed {} - impl Sealed for u8 {} - impl Sealed for u16 {} - impl Sealed for u32 {} - impl Sealed for u64 {} + impl Sealed for u8 {} + impl Sealed for u16 {} + impl Sealed for u32 {} + impl Sealed for u64 {} } /// Something that can be wrapped in a wasm `Pointer`. /// /// This trait is sealed. pub trait PointerType: Sized { - /// The size of the type in wasm. - const SIZE: u32 = mem::size_of::() as u32; + /// The size of the type in wasm. + const SIZE: u32 = mem::size_of::() as u32; } impl PointerType for u8 {} @@ -132,80 +130,83 @@ impl PointerType for u64 {} /// Type to represent a pointer in wasm at the host. #[derive(Debug, PartialEq, Eq, Clone, Copy)] pub struct Pointer { - ptr: u32, - _marker: PhantomData, + ptr: u32, + _marker: PhantomData, } impl Pointer { - /// Create a new instance of `Self`. - pub fn new(ptr: u32) -> Self { - Self { - ptr, - _marker: Default::default(), - } - } - - /// Calculate the offset from this pointer. - /// - /// `offset` is in units of `T`. So, `3` means `3 * mem::size_of::()` as offset to the pointer. - /// - /// Returns an `Option` to respect that the pointer could probably overflow. - pub fn offset(self, offset: u32) -> Option { - offset.checked_mul(T::SIZE).and_then(|o| self.ptr.checked_add(o)).map(|ptr| { - Self { - ptr, - _marker: Default::default(), - } - }) - } - - /// Create a null pointer. - pub fn null() -> Self { - Self::new(0) - } - - /// Cast this pointer of type `T` to a pointer of type `R`. - pub fn cast(self) -> Pointer { - Pointer::new(self.ptr) - } + /// Create a new instance of `Self`. + pub fn new(ptr: u32) -> Self { + Self { + ptr, + _marker: Default::default(), + } + } + + /// Calculate the offset from this pointer. + /// + /// `offset` is in units of `T`. So, `3` means `3 * mem::size_of::()` as offset to the pointer. + /// + /// Returns an `Option` to respect that the pointer could probably overflow. + pub fn offset(self, offset: u32) -> Option { + offset + .checked_mul(T::SIZE) + .and_then(|o| self.ptr.checked_add(o)) + .map(|ptr| Self { + ptr, + _marker: Default::default(), + }) + } + + /// Create a null pointer. + pub fn null() -> Self { + Self::new(0) + } + + /// Cast this pointer of type `T` to a pointer of type `R`. + pub fn cast(self) -> Pointer { + Pointer::new(self.ptr) + } } impl From for Pointer { - fn from(ptr: u32) -> Self { - Pointer::new(ptr) - } + fn from(ptr: u32) -> Self { + Pointer::new(ptr) + } } impl From> for u32 { - fn from(ptr: Pointer) -> Self { - ptr.ptr - } + fn from(ptr: Pointer) -> Self { + ptr.ptr + } } impl From> for u64 { - fn from(ptr: Pointer) -> Self { - u64::from(ptr.ptr) - } + fn from(ptr: Pointer) -> Self { + u64::from(ptr.ptr) + } } impl From> for usize { - fn from(ptr: Pointer) -> Self { - ptr.ptr as _ - } + fn from(ptr: Pointer) -> Self { + ptr.ptr as _ + } } impl IntoValue for Pointer { - const VALUE_TYPE: ValueType = ValueType::I32; - fn into_value(self) -> Value { Value::I32(self.ptr as _) } + const VALUE_TYPE: ValueType = ValueType::I32; + fn into_value(self) -> Value { + Value::I32(self.ptr as _) + } } impl TryFromValue for Pointer { - fn try_from_value(val: Value) -> Option { - match val { - Value::I32(val) => Some(Self::new(val as _)), - _ => None, - } - } + fn try_from_value(val: Value) -> Option { + match val { + Value::I32(val) => Some(Self::new(val as _)), + _ => None, + } + } } /// The word size used in wasm. Normally known as `usize` in Rust. @@ -214,28 +215,31 @@ pub type WordSize = u32; /// The Signature of a function #[derive(Eq, PartialEq, Debug, Clone)] pub struct Signature { - /// The arguments of a function. - pub args: Cow<'static, [ValueType]>, - /// The optional return value of a function. - pub return_value: Option, + /// The arguments of a function. + pub args: Cow<'static, [ValueType]>, + /// The optional return value of a function. + pub return_value: Option, } impl Signature { - /// Create a new instance of `Signature`. - pub fn new>>(args: T, return_value: Option) -> Self { - Self { - args: args.into(), - return_value, - } - } - - /// Create a new instance of `Signature` with the given `args` and without any return value. - pub fn new_with_args>>(args: T) -> Self { - Self { - args: args.into(), - return_value: None, - } - } + /// Create a new instance of `Signature`. + pub fn new>>( + args: T, + return_value: Option, + ) -> Self { + Self { + args: args.into(), + return_value, + } + } + + /// Create a new instance of `Signature` with the given `args` and without any return value. + pub fn new_with_args>>(args: T) -> Self { + Self { + args: args.into(), + return_value: None, + } + } } /// A trait that requires `RefUnwindSafe` when `feature = std`. @@ -252,43 +256,43 @@ impl MaybeRefUnwindSafe for T {} /// Something that provides a function implementation on the host for a wasm function. pub trait Function: MaybeRefUnwindSafe + Send + Sync { - /// Returns the name of this function. - fn name(&self) -> &str; - /// Returns the signature of this function. - fn signature(&self) -> Signature; - /// Execute this function with the given arguments. - fn execute( - &self, - context: &mut dyn FunctionContext, - args: &mut dyn Iterator, - ) -> Result>; + /// Returns the name of this function. + fn name(&self) -> &str; + /// Returns the signature of this function. + fn signature(&self) -> Signature; + /// Execute this function with the given arguments. + fn execute( + &self, + context: &mut dyn FunctionContext, + args: &mut dyn Iterator, + ) -> Result>; } impl PartialEq for dyn Function { - fn eq(&self, other: &Self) -> bool { - other.name() == self.name() && other.signature() == self.signature() - } + fn eq(&self, other: &Self) -> bool { + other.name() == self.name() && other.signature() == self.signature() + } } /// Context used by `Function` to interact with the allocator and the memory of the wasm instance. pub trait FunctionContext { - /// Read memory from `address` into a vector. - fn read_memory(&self, address: Pointer, size: WordSize) -> Result> { - let mut vec = Vec::with_capacity(size as usize); - vec.resize(size as usize, 0); - self.read_memory_into(address, &mut vec)?; - Ok(vec) - } - /// Read memory into the given `dest` buffer from `address`. - fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> Result<()>; - /// Write the given data at `address` into the memory. - fn write_memory(&mut self, address: Pointer, data: &[u8]) -> Result<()>; - /// Allocate a memory instance of `size` bytes. - fn allocate_memory(&mut self, size: WordSize) -> Result>; - /// Deallocate a given memory instance. - fn deallocate_memory(&mut self, ptr: Pointer) -> Result<()>; - /// Provides access to the sandbox. - fn sandbox(&mut self) -> &mut dyn Sandbox; + /// Read memory from `address` into a vector. + fn read_memory(&self, address: Pointer, size: WordSize) -> Result> { + let mut vec = Vec::with_capacity(size as usize); + vec.resize(size as usize, 0); + self.read_memory_into(address, &mut vec)?; + Ok(vec) + } + /// Read memory into the given `dest` buffer from `address`. + fn read_memory_into(&self, address: Pointer, dest: &mut [u8]) -> Result<()>; + /// Write the given data at `address` into the memory. + fn write_memory(&mut self, address: Pointer, data: &[u8]) -> Result<()>; + /// Allocate a memory instance of `size` bytes. + fn allocate_memory(&mut self, size: WordSize) -> Result>; + /// Deallocate a given memory instance. + fn deallocate_memory(&mut self, ptr: Pointer) -> Result<()>; + /// Provides access to the sandbox. + fn sandbox(&mut self) -> &mut dyn Sandbox; } /// Sandbox memory identifier. @@ -296,85 +300,85 @@ pub type MemoryId = u32; /// Something that provides access to the sandbox. pub trait Sandbox { - /// Get sandbox memory from the `memory_id` instance at `offset` into the given buffer. - fn memory_get( - &mut self, - memory_id: MemoryId, - offset: WordSize, - buf_ptr: Pointer, - buf_len: WordSize, - ) -> Result; - /// Set sandbox memory from the given value. - fn memory_set( - &mut self, - memory_id: MemoryId, - offset: WordSize, - val_ptr: Pointer, - val_len: WordSize, - ) -> Result; - /// Delete a memory instance. - fn memory_teardown(&mut self, memory_id: MemoryId) -> Result<()>; - /// Create a new memory instance with the given `initial` size and the `maximum` size. - /// The size is given in wasm pages. - fn memory_new(&mut self, initial: u32, maximum: u32) -> Result; - /// Invoke an exported function by a name. - fn invoke( - &mut self, - instance_id: u32, - export_name: &str, - args: &[u8], - return_val: Pointer, - return_val_len: WordSize, - state: u32, - ) -> Result; - /// Delete a sandbox instance. - fn instance_teardown(&mut self, instance_id: u32) -> Result<()>; - /// Create a new sandbox instance. - fn instance_new( - &mut self, - dispatch_thunk_id: u32, - wasm: &[u8], - raw_env_def: &[u8], - state: u32, - ) -> Result; - - /// Get the value from a global with the given `name`. The sandbox is determined by the - /// given `instance_idx` instance. - /// - /// Returns `Some(_)` when the requested global variable could be found. - fn get_global_val(&self, instance_idx: u32, name: &str) -> Result>; + /// Get sandbox memory from the `memory_id` instance at `offset` into the given buffer. + fn memory_get( + &mut self, + memory_id: MemoryId, + offset: WordSize, + buf_ptr: Pointer, + buf_len: WordSize, + ) -> Result; + /// Set sandbox memory from the given value. + fn memory_set( + &mut self, + memory_id: MemoryId, + offset: WordSize, + val_ptr: Pointer, + val_len: WordSize, + ) -> Result; + /// Delete a memory instance. + fn memory_teardown(&mut self, memory_id: MemoryId) -> Result<()>; + /// Create a new memory instance with the given `initial` size and the `maximum` size. + /// The size is given in wasm pages. + fn memory_new(&mut self, initial: u32, maximum: u32) -> Result; + /// Invoke an exported function by a name. + fn invoke( + &mut self, + instance_id: u32, + export_name: &str, + args: &[u8], + return_val: Pointer, + return_val_len: WordSize, + state: u32, + ) -> Result; + /// Delete a sandbox instance. + fn instance_teardown(&mut self, instance_id: u32) -> Result<()>; + /// Create a new sandbox instance. + fn instance_new( + &mut self, + dispatch_thunk_id: u32, + wasm: &[u8], + raw_env_def: &[u8], + state: u32, + ) -> Result; + + /// Get the value from a global with the given `name`. The sandbox is determined by the + /// given `instance_idx` instance. + /// + /// Returns `Some(_)` when the requested global variable could be found. + fn get_global_val(&self, instance_idx: u32, name: &str) -> Result>; } /// Something that provides implementations for host functions. pub trait HostFunctions: 'static { - /// Returns the host functions `Self` provides. - fn host_functions() -> Vec<&'static dyn Function>; + /// Returns the host functions `Self` provides. + fn host_functions() -> Vec<&'static dyn Function>; } #[impl_trait_for_tuples::impl_for_tuples(30)] impl HostFunctions for Tuple { - fn host_functions() -> Vec<&'static dyn Function> { - let mut host_functions = Vec::new(); + fn host_functions() -> Vec<&'static dyn Function> { + let mut host_functions = Vec::new(); - for_tuples!( #( host_functions.extend(Tuple::host_functions()); )* ); + for_tuples!( #( host_functions.extend(Tuple::host_functions()); )* ); - host_functions - } + host_functions + } } /// Something that can be converted into a wasm compatible `Value`. pub trait IntoValue { - /// The type of the value in wasm. - const VALUE_TYPE: ValueType; + /// The type of the value in wasm. + const VALUE_TYPE: ValueType; - /// Convert `self` into a wasm `Value`. - fn into_value(self) -> Value; + /// Convert `self` into a wasm `Value`. + fn into_value(self) -> Value; } /// Something that can may be created from a wasm `Value`. pub trait TryFromValue: Sized { - /// Try to convert the given `Value` into `Self`. - fn try_from_value(val: Value) -> Option; + /// Try to convert the given `Value` into `Self`. + fn try_from_value(val: Value) -> Option; } macro_rules! impl_into_and_from_value { @@ -402,56 +406,56 @@ macro_rules! impl_into_and_from_value { } impl_into_and_from_value! { - u8, I32, - u16, I32, - u32, I32, - u64, I64, - i8, I32, - i16, I32, - i32, I32, - i64, I64, + u8, I32, + u16, I32, + u32, I32, + u64, I64, + i8, I32, + i16, I32, + i32, I32, + i64, I64, } /// Something that can write a primitive to wasm memory location. pub trait WritePrimitive { - /// Write the given value `t` to the given memory location `ptr`. - fn write_primitive(&mut self, ptr: Pointer, t: T) -> Result<()>; + /// Write the given value `t` to the given memory location `ptr`. + fn write_primitive(&mut self, ptr: Pointer, t: T) -> Result<()>; } impl WritePrimitive for &mut dyn FunctionContext { - fn write_primitive(&mut self, ptr: Pointer, t: u32) -> Result<()> { - let r = t.to_le_bytes(); - self.write_memory(ptr.cast(), &r) - } + fn write_primitive(&mut self, ptr: Pointer, t: u32) -> Result<()> { + let r = t.to_le_bytes(); + self.write_memory(ptr.cast(), &r) + } } impl WritePrimitive for &mut dyn FunctionContext { - fn write_primitive(&mut self, ptr: Pointer, t: u64) -> Result<()> { - let r = t.to_le_bytes(); - self.write_memory(ptr.cast(), &r) - } + fn write_primitive(&mut self, ptr: Pointer, t: u64) -> Result<()> { + let r = t.to_le_bytes(); + self.write_memory(ptr.cast(), &r) + } } /// Something that can read a primitive from a wasm memory location. pub trait ReadPrimitive { - /// Read a primitive from the given memory location `ptr`. - fn read_primitive(&self, ptr: Pointer) -> Result; + /// Read a primitive from the given memory location `ptr`. + fn read_primitive(&self, ptr: Pointer) -> Result; } impl ReadPrimitive for &mut dyn FunctionContext { - fn read_primitive(&self, ptr: Pointer) -> Result { - let mut r = [0u8; 4]; - self.read_memory_into(ptr.cast(), &mut r)?; - Ok(u32::from_le_bytes(r)) - } + fn read_primitive(&self, ptr: Pointer) -> Result { + let mut r = [0u8; 4]; + self.read_memory_into(ptr.cast(), &mut r)?; + Ok(u32::from_le_bytes(r)) + } } impl ReadPrimitive for &mut dyn FunctionContext { - fn read_primitive(&self, ptr: Pointer) -> Result { - let mut r = [0u8; 8]; - self.read_memory_into(ptr.cast(), &mut r)?; - Ok(u64::from_le_bytes(r)) - } + fn read_primitive(&self, ptr: Pointer) -> Result { + let mut r = [0u8; 8]; + self.read_memory_into(ptr.cast(), &mut r)?; + Ok(u64::from_le_bytes(r)) + } } /// Typed value that can be returned from a function. @@ -459,50 +463,49 @@ impl ReadPrimitive for &mut dyn FunctionContext { /// Basically a `TypedValue` plus `Unit`, for functions which return nothing. #[derive(Clone, Copy, PartialEq, codec::Encode, codec::Decode, Debug)] pub enum ReturnValue { - /// For returning nothing. - Unit, - /// For returning some concrete value. - Value(Value), + /// For returning nothing. + Unit, + /// For returning some concrete value. + Value(Value), } impl From for ReturnValue { - fn from(v: Value) -> ReturnValue { - ReturnValue::Value(v) - } + fn from(v: Value) -> ReturnValue { + ReturnValue::Value(v) + } } impl ReturnValue { - /// Maximum number of bytes `ReturnValue` might occupy when serialized with `SCALE`. - /// - /// Breakdown: - /// 1 byte for encoding unit/value variant - /// 1 byte for encoding value type - /// 8 bytes for encoding the biggest value types available in wasm: f64, i64. - pub const ENCODED_MAX_SIZE: usize = 10; + /// Maximum number of bytes `ReturnValue` might occupy when serialized with `SCALE`. + /// + /// Breakdown: + /// 1 byte for encoding unit/value variant + /// 1 byte for encoding value type + /// 8 bytes for encoding the biggest value types available in wasm: f64, i64. + pub const ENCODED_MAX_SIZE: usize = 10; } #[cfg(test)] mod tests { - use super::*; - use codec::Encode; - - #[test] - fn pointer_offset_works() { - let ptr = Pointer::::null(); + use super::*; + use codec::Encode; - assert_eq!(ptr.offset(10).unwrap(), Pointer::new(40)); - assert_eq!(ptr.offset(32).unwrap(), Pointer::new(128)); + #[test] + fn pointer_offset_works() { + let ptr = Pointer::::null(); - let ptr = Pointer::::null(); + assert_eq!(ptr.offset(10).unwrap(), Pointer::new(40)); + assert_eq!(ptr.offset(32).unwrap(), Pointer::new(128)); - assert_eq!(ptr.offset(10).unwrap(), Pointer::new(80)); - assert_eq!(ptr.offset(32).unwrap(), Pointer::new(256)); - } + let ptr = Pointer::::null(); + assert_eq!(ptr.offset(10).unwrap(), Pointer::new(80)); + assert_eq!(ptr.offset(32).unwrap(), Pointer::new(256)); + } - #[test] - fn return_value_encoded_max_size() { - let encoded = ReturnValue::Value(Value::I64(-1)).encode(); - assert_eq!(encoded.len(), ReturnValue::ENCODED_MAX_SIZE); - } + #[test] + fn return_value_encoded_max_size() { + let encoded = ReturnValue::Value(Value::I64(-1)).encode(); + assert_eq!(encoded.len(), ReturnValue::ENCODED_MAX_SIZE); + } } diff --git a/primitives/wasm-interface/src/wasmi_impl.rs b/primitives/wasm-interface/src/wasmi_impl.rs index dea8519b71..ee47871daa 100644 --- a/primitives/wasm-interface/src/wasmi_impl.rs +++ b/primitives/wasm-interface/src/wasmi_impl.rs @@ -16,64 +16,68 @@ //! Implementation of conversions between Substrate and wasmi types. -use crate::{Value, ValueType, Signature}; +use crate::{Signature, Value, ValueType}; impl From for wasmi::RuntimeValue { - fn from(value: Value) -> Self { - match value { - Value::I32(val) => Self::I32(val), - Value::I64(val) => Self::I64(val), - Value::F32(val) => Self::F32(val.into()), - Value::F64(val) => Self::F64(val.into()), - } - } + fn from(value: Value) -> Self { + match value { + Value::I32(val) => Self::I32(val), + Value::I64(val) => Self::I64(val), + Value::F32(val) => Self::F32(val.into()), + Value::F64(val) => Self::F64(val.into()), + } + } } impl From for Value { - fn from(value: wasmi::RuntimeValue) -> Self { - match value { - wasmi::RuntimeValue::I32(val) => Self::I32(val), - wasmi::RuntimeValue::I64(val) => Self::I64(val), - wasmi::RuntimeValue::F32(val) => Self::F32(val.into()), - wasmi::RuntimeValue::F64(val) => Self::F64(val.into()), - } - } + fn from(value: wasmi::RuntimeValue) -> Self { + match value { + wasmi::RuntimeValue::I32(val) => Self::I32(val), + wasmi::RuntimeValue::I64(val) => Self::I64(val), + wasmi::RuntimeValue::F32(val) => Self::F32(val.into()), + wasmi::RuntimeValue::F64(val) => Self::F64(val.into()), + } + } } impl From for wasmi::ValueType { - fn from(value: ValueType) -> Self { - match value { - ValueType::I32 => Self::I32, - ValueType::I64 => Self::I64, - ValueType::F32 => Self::F32, - ValueType::F64 => Self::F64, - } - } + fn from(value: ValueType) -> Self { + match value { + ValueType::I32 => Self::I32, + ValueType::I64 => Self::I64, + ValueType::F32 => Self::F32, + ValueType::F64 => Self::F64, + } + } } impl From for ValueType { - fn from(value: wasmi::ValueType) -> Self { - match value { - wasmi::ValueType::I32 => Self::I32, - wasmi::ValueType::I64 => Self::I64, - wasmi::ValueType::F32 => Self::F32, - wasmi::ValueType::F64 => Self::F64, - } - } + fn from(value: wasmi::ValueType) -> Self { + match value { + wasmi::ValueType::I32 => Self::I32, + wasmi::ValueType::I64 => Self::I64, + wasmi::ValueType::F32 => Self::F32, + wasmi::ValueType::F64 => Self::F64, + } + } } impl From for wasmi::Signature { - fn from(sig: Signature) -> Self { - let args = sig.args.iter().map(|a| (*a).into()).collect::>(); - wasmi::Signature::new(args, sig.return_value.map(Into::into)) - } + fn from(sig: Signature) -> Self { + let args = sig.args.iter().map(|a| (*a).into()).collect::>(); + wasmi::Signature::new(args, sig.return_value.map(Into::into)) + } } impl From<&wasmi::Signature> for Signature { - fn from(sig: &wasmi::Signature) -> Self { - Signature::new( - sig.params().into_iter().copied().map(Into::into).collect::>(), - sig.return_type().map(Into::into), - ) - } + fn from(sig: &wasmi::Signature) -> Self { + Signature::new( + sig.params() + .into_iter() + .copied() + .map(Into::into) + .collect::>(), + sig.return_type().map(Into::into), + ) + } } diff --git a/test-utils/client/src/client_ext.rs b/test-utils/client/src/client_ext.rs index 6d6b539483..89c69d81af 100644 --- a/test-utils/client/src/client_ext.rs +++ b/test-utils/client/src/client_ext.rs @@ -16,165 +16,164 @@ //! Client extension for tests. +use codec::alloc::collections::hash_map::HashMap; use sc_client::{self, Client}; use sc_client_api::backend::Finalizer; use sp_consensus::{ - BlockImportParams, BlockImport, BlockOrigin, Error as ConsensusError, - ForkChoiceStrategy, + BlockImport, BlockImportParams, BlockOrigin, Error as ConsensusError, ForkChoiceStrategy, }; -use sp_runtime::Justification; -use sp_runtime::traits::{Block as BlockT}; use sp_runtime::generic::BlockId; -use codec::alloc::collections::hash_map::HashMap; +use sp_runtime::traits::Block as BlockT; +use sp_runtime::Justification; /// Extension trait for a test client. pub trait ClientExt: Sized { - /// Finalize a block. - fn finalize_block( - &self, - id: BlockId, - justification: Option, - ) -> sp_blockchain::Result<()>; - - /// Returns hash of the genesis block. - fn genesis_hash(&self) -> ::Hash; + /// Finalize a block. + fn finalize_block( + &self, + id: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()>; + + /// Returns hash of the genesis block. + fn genesis_hash(&self) -> ::Hash; } /// Extension trait for a test client around block importing. pub trait ClientBlockImportExt: Sized { - /// Import block to the chain. No finality. - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; - - /// Import a block and make it our best block if possible. - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; - - /// Import a block and finalize it. - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) - -> Result<(), ConsensusError>; - - /// Import block with justification, finalizes block. - fn import_justified( - &mut self, - origin: BlockOrigin, - block: Block, - justification: Justification - ) -> Result<(), ConsensusError>; + /// Import block to the chain. No finality. + fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + + /// Import a block and make it our best block if possible. + fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + + /// Import a block and finalize it. + fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError>; + + /// Import block with justification, finalizes block. + fn import_justified( + &mut self, + origin: BlockOrigin, + block: Block, + justification: Justification, + ) -> Result<(), ConsensusError>; } impl ClientExt for Client - where - B: sc_client_api::backend::Backend, - E: sc_client::CallExecutor + 'static, - Self: BlockImport, - Block: BlockT, +where + B: sc_client_api::backend::Backend, + E: sc_client::CallExecutor + 'static, + Self: BlockImport, + Block: BlockT, { - fn finalize_block( - &self, - id: BlockId, - justification: Option, - ) -> sp_blockchain::Result<()> { - Finalizer::finalize_block(self, id, justification, true) - } - - fn genesis_hash(&self) -> ::Hash { - self.block_hash(0.into()).unwrap().unwrap() - } + fn finalize_block( + &self, + id: BlockId, + justification: Option, + ) -> sp_blockchain::Result<()> { + Finalizer::finalize_block(self, id, justification, true) + } + + fn genesis_hash(&self) -> ::Hash { + self.block_hash(0.into()).unwrap().unwrap() + } } /// This implementation is required, because of the weird api requirements around `BlockImport`. impl ClientBlockImportExt for std::sync::Arc - where for<'r> &'r T: BlockImport +where + for<'r> &'r T: BlockImport, { - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.body = Some(extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) - } - - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.body = Some(extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) - } - - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.body = Some(extrinsics); - import.finalized = true; - import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) - } - - fn import_justified( - &mut self, - origin: BlockOrigin, - block: Block, - justification: Justification, - ) -> Result<(), ConsensusError> { - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.justification = Some(justification); - import.body = Some(extrinsics); - import.finalized = true; - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) - } + fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + } + + fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + + BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + } + + fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.finalized = true; + import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + + BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + } + + fn import_justified( + &mut self, + origin: BlockOrigin, + block: Block, + justification: Justification, + ) -> Result<(), ConsensusError> { + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.justification = Some(justification); + import.body = Some(extrinsics); + import.finalized = true; + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + } } impl ClientBlockImportExt for Client - where - Self: BlockImport, +where + Self: BlockImport, { - fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.body = Some(extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) - } - - fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.body = Some(extrinsics); - import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) - } - - fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.body = Some(extrinsics); - import.finalized = true; - import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); - - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) - } - - fn import_justified( - &mut self, - origin: BlockOrigin, - block: Block, - justification: Justification, - ) -> Result<(), ConsensusError> { - let (header, extrinsics) = block.deconstruct(); - let mut import = BlockImportParams::new(origin, header); - import.justification = Some(justification); - import.body = Some(extrinsics); - import.finalized = true; - import.fork_choice = Some(ForkChoiceStrategy::LongestChain); - - BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) - } + fn import(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + } + + fn import_as_best(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + + BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + } + + fn import_as_final(&mut self, origin: BlockOrigin, block: Block) -> Result<(), ConsensusError> { + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.body = Some(extrinsics); + import.finalized = true; + import.fork_choice = Some(ForkChoiceStrategy::Custom(true)); + + BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + } + + fn import_justified( + &mut self, + origin: BlockOrigin, + block: Block, + justification: Justification, + ) -> Result<(), ConsensusError> { + let (header, extrinsics) = block.deconstruct(); + let mut import = BlockImportParams::new(origin, header); + import.justification = Some(justification); + import.body = Some(extrinsics); + import.finalized = true; + import.fork_choice = Some(ForkChoiceStrategy::LongestChain); + + BlockImport::import_block(self, import, HashMap::new()).map(|_| ()) + } } diff --git a/test-utils/client/src/lib.rs b/test-utils/client/src/lib.rs index d04e85fd10..2bc44f030c 100644 --- a/test-utils/client/src/lib.rs +++ b/test-utils/client/src/lib.rs @@ -20,234 +20,233 @@ pub mod client_ext; -pub use sc_client::{blockchain, self}; +pub use self::client_ext::{ClientBlockImportExt, ClientExt}; +pub use sc_client::{self, blockchain}; pub use sc_client_api::{ - execution_extensions::{ExecutionStrategies, ExecutionExtensions}, - ForkBlocks, BadBlocks, CloneableSpawn, + execution_extensions::{ExecutionExtensions, ExecutionStrategies}, + BadBlocks, CloneableSpawn, ForkBlocks, }; -pub use sc_client_db::{Backend, self}; +pub use sc_client_db::{self, Backend}; +pub use sc_executor::{self, NativeExecutor, WasmExecutionMethod}; pub use sp_consensus; -pub use sc_executor::{NativeExecutor, WasmExecutionMethod, self}; +pub use sp_core::{tasks::executor as tasks_executor, traits::BareCryptoStorePtr}; pub use sp_keyring::{ - AccountKeyring, - ed25519::Keyring as Ed25519Keyring, - sr25519::Keyring as Sr25519Keyring, + ed25519::Keyring as Ed25519Keyring, sr25519::Keyring as Sr25519Keyring, AccountKeyring, }; -pub use sp_core::{traits::BareCryptoStorePtr, tasks::executor as tasks_executor}; pub use sp_runtime::{Storage, StorageChild}; pub use sp_state_machine::ExecutionStrategy; -pub use self::client_ext::{ClientExt, ClientBlockImportExt}; -use std::sync::Arc; -use std::collections::HashMap; -use sp_core::storage::{well_known_keys, ChildInfo}; -use sp_runtime::traits::{Block as BlockT, BlakeTwo256}; use sc_client::LocalCallExecutor; +use sp_core::storage::{well_known_keys, ChildInfo}; +use sp_runtime::traits::{BlakeTwo256, Block as BlockT}; +use std::collections::HashMap; +use std::sync::Arc; /// Test client light database backend. -pub type LightBackend = sc_client::light::backend::Backend< - sc_client_db::light::LightStorage, - BlakeTwo256, ->; +pub type LightBackend = + sc_client::light::backend::Backend, BlakeTwo256>; /// A genesis storage initialization trait. pub trait GenesisInit: Default { - /// Construct genesis storage. - fn genesis_storage(&self) -> Storage; + /// Construct genesis storage. + fn genesis_storage(&self) -> Storage; } impl GenesisInit for () { - fn genesis_storage(&self) -> Storage { - Default::default() - } + fn genesis_storage(&self) -> Storage { + Default::default() + } } /// A builder for creating a test client instance. pub struct TestClientBuilder { - execution_strategies: ExecutionStrategies, - genesis_init: G, - child_storage_extension: HashMap, StorageChild>, - backend: Arc, - _executor: std::marker::PhantomData, - keystore: Option, - fork_blocks: ForkBlocks, - bad_blocks: BadBlocks, + execution_strategies: ExecutionStrategies, + genesis_init: G, + child_storage_extension: HashMap, StorageChild>, + backend: Arc, + _executor: std::marker::PhantomData, + keystore: Option, + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, } impl Default - for TestClientBuilder, G> { - fn default() -> Self { - Self::with_default_backend() - } + for TestClientBuilder, G> +{ + fn default() -> Self { + Self::with_default_backend() + } } -impl TestClientBuilder, G> { - /// Create new `TestClientBuilder` with default backend. - pub fn with_default_backend() -> Self { - let backend = Arc::new(Backend::new_test(std::u32::MAX, std::u64::MAX)); - Self::with_backend(backend) - } - - /// Create new `TestClientBuilder` with default backend and pruning window size - pub fn with_pruning_window(keep_blocks: u32) -> Self { - let backend = Arc::new(Backend::new_test(keep_blocks, 0)); - Self::with_backend(backend) - } +impl + TestClientBuilder, G> +{ + /// Create new `TestClientBuilder` with default backend. + pub fn with_default_backend() -> Self { + let backend = Arc::new(Backend::new_test(std::u32::MAX, std::u64::MAX)); + Self::with_backend(backend) + } + + /// Create new `TestClientBuilder` with default backend and pruning window size + pub fn with_pruning_window(keep_blocks: u32) -> Self { + let backend = Arc::new(Backend::new_test(keep_blocks, 0)); + Self::with_backend(backend) + } } -impl TestClientBuilder { - /// Create a new instance of the test client builder. - pub fn with_backend(backend: Arc) -> Self { - TestClientBuilder { - backend, - execution_strategies: ExecutionStrategies::default(), - child_storage_extension: Default::default(), - genesis_init: Default::default(), - _executor: Default::default(), - keystore: None, - fork_blocks: None, - bad_blocks: None, - } - } - - /// Set the keystore that should be used by the externalities. - pub fn set_keystore(mut self, keystore: BareCryptoStorePtr) -> Self { - self.keystore = Some(keystore); - self - } - - /// Alter the genesis storage parameters. - pub fn genesis_init_mut(&mut self) -> &mut G { - &mut self.genesis_init - } - - /// Give access to the underlying backend of these clients - pub fn backend(&self) -> Arc { - self.backend.clone() - } - - /// Extend child storage - pub fn add_child_storage( - mut self, - key: impl AsRef<[u8]>, - child_key: impl AsRef<[u8]>, - child_info: ChildInfo, - value: impl AsRef<[u8]>, - ) -> Self { - let entry = self.child_storage_extension.entry(key.as_ref().to_vec()) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: child_info.to_owned(), - }); - entry.data.insert(child_key.as_ref().to_vec(), value.as_ref().to_vec()); - self - } - - /// Set the execution strategy that should be used by all contexts. - pub fn set_execution_strategy( - mut self, - execution_strategy: ExecutionStrategy - ) -> Self { - self.execution_strategies = ExecutionStrategies { - syncing: execution_strategy, - importing: execution_strategy, - block_construction: execution_strategy, - offchain_worker: execution_strategy, - other: execution_strategy, - }; - self - } - - /// Sets custom block rules. - pub fn set_block_rules(mut self, - fork_blocks: ForkBlocks, - bad_blocks: BadBlocks, - ) -> Self { - self.fork_blocks = fork_blocks; - self.bad_blocks = bad_blocks; - self - } - - /// Build the test client with the given native executor. - pub fn build_with_executor( - self, - executor: Executor, - ) -> ( - sc_client::Client< - Backend, - Executor, - Block, - RuntimeApi, - >, - sc_client::LongestChain, - ) where - Executor: sc_client::CallExecutor + 'static, - Backend: sc_client_api::backend::Backend, - { - let storage = { - let mut storage = self.genesis_init.genesis_storage(); - - // Add some child storage keys. - for (key, child_content) in self.child_storage_extension { - storage.children.insert( - well_known_keys::CHILD_STORAGE_KEY_PREFIX.iter().cloned().chain(key).collect(), - StorageChild { - data: child_content.data.into_iter().collect(), - child_info: child_content.child_info, - }, - ); - } - - storage - }; - - let client = sc_client::Client::new( - self.backend.clone(), - executor, - &storage, - self.fork_blocks, - self.bad_blocks, - ExecutionExtensions::new( - self.execution_strategies, - self.keystore.clone(), - ), - None, - ).expect("Creates new client"); - - let longest_chain = sc_client::LongestChain::new(self.backend); - - (client, longest_chain) - } +impl + TestClientBuilder +{ + /// Create a new instance of the test client builder. + pub fn with_backend(backend: Arc) -> Self { + TestClientBuilder { + backend, + execution_strategies: ExecutionStrategies::default(), + child_storage_extension: Default::default(), + genesis_init: Default::default(), + _executor: Default::default(), + keystore: None, + fork_blocks: None, + bad_blocks: None, + } + } + + /// Set the keystore that should be used by the externalities. + pub fn set_keystore(mut self, keystore: BareCryptoStorePtr) -> Self { + self.keystore = Some(keystore); + self + } + + /// Alter the genesis storage parameters. + pub fn genesis_init_mut(&mut self) -> &mut G { + &mut self.genesis_init + } + + /// Give access to the underlying backend of these clients + pub fn backend(&self) -> Arc { + self.backend.clone() + } + + /// Extend child storage + pub fn add_child_storage( + mut self, + key: impl AsRef<[u8]>, + child_key: impl AsRef<[u8]>, + child_info: ChildInfo, + value: impl AsRef<[u8]>, + ) -> Self { + let entry = self + .child_storage_extension + .entry(key.as_ref().to_vec()) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: child_info.to_owned(), + }); + entry + .data + .insert(child_key.as_ref().to_vec(), value.as_ref().to_vec()); + self + } + + /// Set the execution strategy that should be used by all contexts. + pub fn set_execution_strategy(mut self, execution_strategy: ExecutionStrategy) -> Self { + self.execution_strategies = ExecutionStrategies { + syncing: execution_strategy, + importing: execution_strategy, + block_construction: execution_strategy, + offchain_worker: execution_strategy, + other: execution_strategy, + }; + self + } + + /// Sets custom block rules. + pub fn set_block_rules( + mut self, + fork_blocks: ForkBlocks, + bad_blocks: BadBlocks, + ) -> Self { + self.fork_blocks = fork_blocks; + self.bad_blocks = bad_blocks; + self + } + + /// Build the test client with the given native executor. + pub fn build_with_executor( + self, + executor: Executor, + ) -> ( + sc_client::Client, + sc_client::LongestChain, + ) + where + Executor: sc_client::CallExecutor + 'static, + Backend: sc_client_api::backend::Backend, + { + let storage = { + let mut storage = self.genesis_init.genesis_storage(); + + // Add some child storage keys. + for (key, child_content) in self.child_storage_extension { + storage.children.insert( + well_known_keys::CHILD_STORAGE_KEY_PREFIX + .iter() + .cloned() + .chain(key) + .collect(), + StorageChild { + data: child_content.data.into_iter().collect(), + child_info: child_content.child_info, + }, + ); + } + + storage + }; + + let client = sc_client::Client::new( + self.backend.clone(), + executor, + &storage, + self.fork_blocks, + self.bad_blocks, + ExecutionExtensions::new(self.execution_strategies, self.keystore.clone()), + None, + ) + .expect("Creates new client"); + + let longest_chain = sc_client::LongestChain::new(self.backend); + + (client, longest_chain) + } } -impl TestClientBuilder< - Block, - sc_client::LocalCallExecutor>, - Backend, - G, -> { - /// Build the test client with the given native executor. - pub fn build_with_native_executor( - self, - executor: I, - ) -> ( - sc_client::Client< - Backend, - sc_client::LocalCallExecutor>, - Block, - RuntimeApi - >, - sc_client::LongestChain, - ) where - I: Into>>, - E: sc_executor::NativeExecutionDispatch + 'static, - Backend: sc_client_api::backend::Backend + 'static, - { - let executor = executor.into().unwrap_or_else(|| - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) - ); - let executor = LocalCallExecutor::new(self.backend.clone(), executor, tasks_executor()); - - self.build_with_executor(executor) - } +impl + TestClientBuilder>, Backend, G> +{ + /// Build the test client with the given native executor. + pub fn build_with_native_executor( + self, + executor: I, + ) -> ( + sc_client::Client< + Backend, + sc_client::LocalCallExecutor>, + Block, + RuntimeApi, + >, + sc_client::LongestChain, + ) + where + I: Into>>, + E: sc_executor::NativeExecutionDispatch + 'static, + Backend: sc_client_api::backend::Backend + 'static, + { + let executor = executor + .into() + .unwrap_or_else(|| NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8)); + let executor = LocalCallExecutor::new(self.backend.clone(), executor, tasks_executor()); + + self.build_with_executor(executor) + } } diff --git a/test-utils/runtime/build.rs b/test-utils/runtime/build.rs index 1fd3d52b2f..01a40b1313 100644 --- a/test-utils/runtime/build.rs +++ b/test-utils/runtime/build.rs @@ -17,14 +17,14 @@ use wasm_builder_runner::WasmBuilder; fn main() { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_from_crates_or_path("1.0.9", "../../utils/wasm-builder") - .export_heap_base() - // Note that we set the stack-size to 1MB explicitly even though it is set - // to this value by default. This is because some of our tests (`restoration_of_globals`) - // depend on the stack-size. - .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") - .import_memory() - .build() + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_from_crates_or_path("1.0.9", "../../utils/wasm-builder") + .export_heap_base() + // Note that we set the stack-size to 1MB explicitly even though it is set + // to this value by default. This is because some of our tests (`restoration_of_globals`) + // depend on the stack-size. + .append_to_rust_flags("-Clink-arg=-zstack-size=1048576") + .import_memory() + .build() } diff --git a/test-utils/runtime/client/src/block_builder_ext.rs b/test-utils/runtime/client/src/block_builder_ext.rs index 3a9f54d06c..5209e534ce 100644 --- a/test-utils/runtime/client/src/block_builder_ext.rs +++ b/test-utils/runtime/client/src/block_builder_ext.rs @@ -16,58 +16,68 @@ //! Block Builder extensions for tests. +use sc_client_api::backend; use sp_api::{ApiExt, ProvideRuntimeApi}; use sp_core::ChangesTrieConfiguration; -use sc_client_api::backend; use sp_runtime::traits::HashFor; use sc_block_builder::BlockBuilderApi; /// Extension trait for test block builder. pub trait BlockBuilderExt { - /// Add transfer extrinsic to the block. - fn push_transfer(&mut self, transfer: substrate_test_runtime::Transfer) -> Result<(), sp_blockchain::Error>; - /// Add storage change extrinsic to the block. - fn push_storage_change( - &mut self, - key: Vec, - value: Option>, - ) -> Result<(), sp_blockchain::Error>; - /// Add changes trie configuration update extrinsic to the block. - fn push_changes_trie_configuration_update( - &mut self, - new_config: Option, - ) -> Result<(), sp_blockchain::Error>; + /// Add transfer extrinsic to the block. + fn push_transfer( + &mut self, + transfer: substrate_test_runtime::Transfer, + ) -> Result<(), sp_blockchain::Error>; + /// Add storage change extrinsic to the block. + fn push_storage_change( + &mut self, + key: Vec, + value: Option>, + ) -> Result<(), sp_blockchain::Error>; + /// Add changes trie configuration update extrinsic to the block. + fn push_changes_trie_configuration_update( + &mut self, + new_config: Option, + ) -> Result<(), sp_blockchain::Error>; } -impl<'a, A, B> BlockBuilderExt for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> where - A: ProvideRuntimeApi + 'a, - A::Api: BlockBuilderApi + - ApiExt< - substrate_test_runtime::Block, - StateBackend = backend::StateBackendFor - >, - B: backend::Backend, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - backend::StateBackendFor: - sp_api::StateBackend>, +impl<'a, A, B> BlockBuilderExt + for sc_block_builder::BlockBuilder<'a, substrate_test_runtime::Block, A, B> +where + A: ProvideRuntimeApi + 'a, + A::Api: BlockBuilderApi + + ApiExt< + substrate_test_runtime::Block, + StateBackend = backend::StateBackendFor, + >, + B: backend::Backend, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + backend::StateBackendFor: + sp_api::StateBackend>, { - fn push_transfer(&mut self, transfer: substrate_test_runtime::Transfer) -> Result<(), sp_blockchain::Error> { - self.push(transfer.into_signed_tx()) - } + fn push_transfer( + &mut self, + transfer: substrate_test_runtime::Transfer, + ) -> Result<(), sp_blockchain::Error> { + self.push(transfer.into_signed_tx()) + } - fn push_storage_change( - &mut self, - key: Vec, - value: Option>, - ) -> Result<(), sp_blockchain::Error> { - self.push(substrate_test_runtime::Extrinsic::StorageChange(key, value)) - } + fn push_storage_change( + &mut self, + key: Vec, + value: Option>, + ) -> Result<(), sp_blockchain::Error> { + self.push(substrate_test_runtime::Extrinsic::StorageChange(key, value)) + } - fn push_changes_trie_configuration_update( - &mut self, - new_config: Option, - ) -> Result<(), sp_blockchain::Error> { - self.push(substrate_test_runtime::Extrinsic::ChangesTrieConfigUpdate(new_config)) - } + fn push_changes_trie_configuration_update( + &mut self, + new_config: Option, + ) -> Result<(), sp_blockchain::Error> { + self.push(substrate_test_runtime::Extrinsic::ChangesTrieConfigUpdate( + new_config, + )) + } } diff --git a/test-utils/runtime/client/src/lib.rs b/test-utils/runtime/client/src/lib.rs index f0a405e67e..b2df80820f 100644 --- a/test-utils/runtime/client/src/lib.rs +++ b/test-utils/runtime/client/src/lib.rs @@ -22,137 +22,133 @@ pub mod trait_tests; mod block_builder_ext; -use std::sync::Arc; +pub use sc_client::LongestChain; use std::collections::HashMap; +use std::sync::Arc; pub use substrate_test_client::*; pub use substrate_test_runtime as runtime; -pub use sc_client::LongestChain; pub use self::block_builder_ext::BlockBuilderExt; -use sp_core::{sr25519, ChangesTrieConfiguration}; -use sp_core::storage::{ChildInfo, Storage, StorageChild}; -use substrate_test_runtime::genesismap::{GenesisConfig, additional_storage_with_genesis}; -use sp_runtime::traits::{Block as BlockT, Header as HeaderT, Hash as HashT, NumberFor, HashFor}; -use sc_client::{ - light::fetcher::{ - Fetcher, - RemoteHeaderRequest, RemoteReadRequest, RemoteReadChildRequest, - RemoteCallRequest, RemoteChangesRequest, RemoteBodyRequest, - }, +use sc_client::light::fetcher::{ + Fetcher, RemoteBodyRequest, RemoteCallRequest, RemoteChangesRequest, RemoteHeaderRequest, + RemoteReadChildRequest, RemoteReadRequest, }; +use sp_core::storage::{ChildInfo, Storage, StorageChild}; +use sp_core::{sr25519, ChangesTrieConfiguration}; +use sp_runtime::traits::{Block as BlockT, Hash as HashT, HashFor, Header as HeaderT, NumberFor}; +use substrate_test_runtime::genesismap::{additional_storage_with_genesis, GenesisConfig}; /// A prelude to import in tests. pub mod prelude { - // Trait extensions - pub use super::{ - BlockBuilderExt, DefaultTestClientBuilderExt, TestClientBuilderExt, ClientExt, - ClientBlockImportExt, - }; - // Client structs - pub use super::{ - TestClient, TestClientBuilder, Backend, LightBackend, - Executor, LightExecutor, LocalExecutor, NativeExecutor, WasmExecutionMethod, - }; - // Keyring - pub use super::{AccountKeyring, Sr25519Keyring}; + // Trait extensions + pub use super::{ + BlockBuilderExt, ClientBlockImportExt, ClientExt, DefaultTestClientBuilderExt, + TestClientBuilderExt, + }; + // Client structs + pub use super::{ + Backend, Executor, LightBackend, LightExecutor, LocalExecutor, NativeExecutor, TestClient, + TestClientBuilder, WasmExecutionMethod, + }; + // Keyring + pub use super::{AccountKeyring, Sr25519Keyring}; } sc_executor::native_executor_instance! { - pub LocalExecutor, - substrate_test_runtime::api::dispatch, - substrate_test_runtime::native_version, + pub LocalExecutor, + substrate_test_runtime::api::dispatch, + substrate_test_runtime::native_version, } /// Test client database backend. pub type Backend = substrate_test_client::Backend; /// Test client executor. -pub type Executor = sc_client::LocalCallExecutor< - Backend, - NativeExecutor, ->; +pub type Executor = sc_client::LocalCallExecutor>; /// Test client light database backend. pub type LightBackend = substrate_test_client::LightBackend; /// Test client light executor. pub type LightExecutor = sc_client::light::call_executor::GenesisCallExecutor< - LightBackend, - sc_client::LocalCallExecutor< - sc_client::light::backend::Backend< - sc_client_db::light::LightStorage, - HashFor - >, - NativeExecutor - > + LightBackend, + sc_client::LocalCallExecutor< + sc_client::light::backend::Backend< + sc_client_db::light::LightStorage, + HashFor, + >, + NativeExecutor, + >, >; /// Parameters of test-client builder with test-runtime. #[derive(Default)] pub struct GenesisParameters { - changes_trie_config: Option, - heap_pages_override: Option, - extra_storage: Storage, + changes_trie_config: Option, + heap_pages_override: Option, + extra_storage: Storage, } impl GenesisParameters { - fn genesis_config(&self) -> GenesisConfig { - GenesisConfig::new( - self.changes_trie_config.clone(), - vec![ - sr25519::Public::from(Sr25519Keyring::Alice).into(), - sr25519::Public::from(Sr25519Keyring::Bob).into(), - sr25519::Public::from(Sr25519Keyring::Charlie).into(), - ], - vec![ - AccountKeyring::Alice.into(), - AccountKeyring::Bob.into(), - AccountKeyring::Charlie.into(), - ], - 1000, - self.heap_pages_override, - self.extra_storage.clone(), - ) - } + fn genesis_config(&self) -> GenesisConfig { + GenesisConfig::new( + self.changes_trie_config.clone(), + vec![ + sr25519::Public::from(Sr25519Keyring::Alice).into(), + sr25519::Public::from(Sr25519Keyring::Bob).into(), + sr25519::Public::from(Sr25519Keyring::Charlie).into(), + ], + vec![ + AccountKeyring::Alice.into(), + AccountKeyring::Bob.into(), + AccountKeyring::Charlie.into(), + ], + 1000, + self.heap_pages_override, + self.extra_storage.clone(), + ) + } } impl substrate_test_client::GenesisInit for GenesisParameters { - fn genesis_storage(&self) -> Storage { - use codec::Encode; - - let mut storage = self.genesis_config().genesis_map(); - - let child_roots = storage.children.iter().map(|(sk, child_content)| { - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_content.data.clone().into_iter().collect() - ); - (sk.clone(), state_root.encode()) - }); - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.top.clone().into_iter().chain(child_roots).collect() - ); - let block: runtime::Block = sc_client::genesis::construct_genesis_block(state_root); - storage.top.extend(additional_storage_with_genesis(&block)); - - storage - } + fn genesis_storage(&self) -> Storage { + use codec::Encode; + + let mut storage = self.genesis_config().genesis_map(); + + let child_roots = storage.children.iter().map(|(sk, child_content)| { + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + child_content.data.clone().into_iter().collect(), + ); + (sk.clone(), state_root.encode()) + }); + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + storage.top.clone().into_iter().chain(child_roots).collect(), + ); + let block: runtime::Block = sc_client::genesis::construct_genesis_block(state_root); + storage.top.extend(additional_storage_with_genesis(&block)); + + storage + } } /// A `TestClient` with `test-runtime` builder. pub type TestClientBuilder = substrate_test_client::TestClientBuilder< - substrate_test_runtime::Block, - E, - B, - GenesisParameters, + substrate_test_runtime::Block, + E, + B, + GenesisParameters, >; /// Test client type with `LocalExecutor` and generic Backend. pub type Client = sc_client::Client< - B, - sc_client::LocalCallExecutor>, - substrate_test_runtime::Block, - substrate_test_runtime::RuntimeApi, + B, + sc_client::LocalCallExecutor>, + substrate_test_runtime::Block, + substrate_test_runtime::RuntimeApi, >; /// A test client with default backend. @@ -160,107 +156,127 @@ pub type TestClient = Client; /// A `TestClientBuilder` with default backend and executor. pub trait DefaultTestClientBuilderExt: Sized { - /// Create new `TestClientBuilder` - fn new() -> Self; + /// Create new `TestClientBuilder` + fn new() -> Self; } impl DefaultTestClientBuilderExt for TestClientBuilder { - fn new() -> Self { - Self::with_default_backend() - } + fn new() -> Self { + Self::with_default_backend() + } } /// A `test-runtime` extensions to `TestClientBuilder`. pub trait TestClientBuilderExt: Sized { - /// Returns a mutable reference to the genesis parameters. - fn genesis_init_mut(&mut self) -> &mut GenesisParameters; - - /// Set changes trie configuration for genesis. - fn changes_trie_config(mut self, config: Option) -> Self { - self.genesis_init_mut().changes_trie_config = config; - self - } - - /// Override the default value for Wasm heap pages. - fn set_heap_pages(mut self, heap_pages: u64) -> Self { - self.genesis_init_mut().heap_pages_override = Some(heap_pages); - self - } - - /// Add an extra value into the genesis storage. - /// - /// # Panics - /// - /// Panics if the key is empty. - fn add_extra_child_storage>, K: Into>, V: Into>>( - mut self, - storage_key: SK, - child_info: ChildInfo, - key: K, - value: V, - ) -> Self { - let storage_key = storage_key.into(); - let key = key.into(); - assert!(!storage_key.is_empty()); - assert!(!key.is_empty()); - self.genesis_init_mut().extra_storage.children - .entry(storage_key) - .or_insert_with(|| StorageChild { - data: Default::default(), - child_info: child_info.to_owned(), - }).data.insert(key, value.into()); - self - } - - /// Add an extra child value into the genesis storage. - /// - /// # Panics - /// - /// Panics if the key is empty. - fn add_extra_storage>, V: Into>>(mut self, key: K, value: V) -> Self { - let key = key.into(); - assert!(!key.is_empty()); - self.genesis_init_mut().extra_storage.top.insert(key, value.into()); - self - } - - /// Build the test client. - fn build(self) -> Client { - self.build_with_longest_chain().0 - } - - /// Build the test client and longest chain selector. - fn build_with_longest_chain(self) -> (Client, sc_client::LongestChain); - - /// Build the test client and the backend. - fn build_with_backend(self) -> (Client, Arc); + /// Returns a mutable reference to the genesis parameters. + fn genesis_init_mut(&mut self) -> &mut GenesisParameters; + + /// Set changes trie configuration for genesis. + fn changes_trie_config(mut self, config: Option) -> Self { + self.genesis_init_mut().changes_trie_config = config; + self + } + + /// Override the default value for Wasm heap pages. + fn set_heap_pages(mut self, heap_pages: u64) -> Self { + self.genesis_init_mut().heap_pages_override = Some(heap_pages); + self + } + + /// Add an extra value into the genesis storage. + /// + /// # Panics + /// + /// Panics if the key is empty. + fn add_extra_child_storage>, K: Into>, V: Into>>( + mut self, + storage_key: SK, + child_info: ChildInfo, + key: K, + value: V, + ) -> Self { + let storage_key = storage_key.into(); + let key = key.into(); + assert!(!storage_key.is_empty()); + assert!(!key.is_empty()); + self.genesis_init_mut() + .extra_storage + .children + .entry(storage_key) + .or_insert_with(|| StorageChild { + data: Default::default(), + child_info: child_info.to_owned(), + }) + .data + .insert(key, value.into()); + self + } + + /// Add an extra child value into the genesis storage. + /// + /// # Panics + /// + /// Panics if the key is empty. + fn add_extra_storage>, V: Into>>(mut self, key: K, value: V) -> Self { + let key = key.into(); + assert!(!key.is_empty()); + self.genesis_init_mut() + .extra_storage + .top + .insert(key, value.into()); + self + } + + /// Build the test client. + fn build(self) -> Client { + self.build_with_longest_chain().0 + } + + /// Build the test client and longest chain selector. + fn build_with_longest_chain( + self, + ) -> ( + Client, + sc_client::LongestChain, + ); + + /// Build the test client and the backend. + fn build_with_backend(self) -> (Client, Arc); } -impl TestClientBuilderExt for TestClientBuilder< - sc_client::LocalCallExecutor>, - B -> where - B: sc_client_api::backend::Backend + 'static, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - >::State: - sp_api::StateBackend>, +impl TestClientBuilderExt + for TestClientBuilder< + sc_client::LocalCallExecutor>, + B, + > +where + B: sc_client_api::backend::Backend + 'static, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + >::State: + sp_api::StateBackend>, { - fn genesis_init_mut(&mut self) -> &mut GenesisParameters { - Self::genesis_init_mut(self) - } - - fn build_with_longest_chain(self) -> (Client, sc_client::LongestChain) { - self.build_with_native_executor(None) - } - - fn build_with_backend(self) -> (Client, Arc) { - let backend = self.backend(); - (self.build_with_native_executor(None).0, backend) - } + fn genesis_init_mut(&mut self) -> &mut GenesisParameters { + Self::genesis_init_mut(self) + } + + fn build_with_longest_chain( + self, + ) -> ( + Client, + sc_client::LongestChain, + ) { + self.build_with_native_executor(None) + } + + fn build_with_backend(self) -> (Client, Arc) { + let backend = self.backend(); + (self.build_with_native_executor(None).0, backend) + } } /// Type of optional fetch callback. -type MaybeFetcherCallback = Option Result + Send + Sync>>; +type MaybeFetcherCallback = + Option Result + Send + Sync>>; /// Type of fetcher future result. type FetcherFutureResult = futures::future::Ready>; @@ -268,107 +284,134 @@ type FetcherFutureResult = futures::future::Ready, Vec>, - body: MaybeFetcherCallback, Vec>, + call: MaybeFetcherCallback, Vec>, + body: MaybeFetcherCallback< + RemoteBodyRequest, + Vec, + >, } impl LightFetcher { - /// Sets remote call callback. - pub fn with_remote_call( - self, - call: MaybeFetcherCallback, Vec>, - ) -> Self { - LightFetcher { - call, - body: self.body, - } - } - - /// Sets remote body callback. - pub fn with_remote_body( - self, - body: MaybeFetcherCallback, Vec>, - ) -> Self { - LightFetcher { - call: self.call, - body, - } - } + /// Sets remote call callback. + pub fn with_remote_call( + self, + call: MaybeFetcherCallback, Vec>, + ) -> Self { + LightFetcher { + call, + body: self.body, + } + } + + /// Sets remote body callback. + pub fn with_remote_body( + self, + body: MaybeFetcherCallback< + RemoteBodyRequest, + Vec, + >, + ) -> Self { + LightFetcher { + call: self.call, + body, + } + } } impl Fetcher for LightFetcher { - type RemoteHeaderResult = FetcherFutureResult; - type RemoteReadResult = FetcherFutureResult, Option>>>; - type RemoteCallResult = FetcherFutureResult>; - type RemoteChangesResult = FetcherFutureResult, u32)>>; - type RemoteBodyResult = FetcherFutureResult>; - - fn remote_header(&self, _: RemoteHeaderRequest) -> Self::RemoteHeaderResult { - unimplemented!() - } - - fn remote_read(&self, _: RemoteReadRequest) -> Self::RemoteReadResult { - unimplemented!() - } - - fn remote_read_child(&self, _: RemoteReadChildRequest) -> Self::RemoteReadResult { - unimplemented!() - } - - fn remote_call(&self, req: RemoteCallRequest) -> Self::RemoteCallResult { - match self.call { - Some(ref call) => futures::future::ready(call(req)), - None => unimplemented!(), - } - } - - fn remote_changes(&self, _: RemoteChangesRequest) -> Self::RemoteChangesResult { - unimplemented!() - } - - fn remote_body(&self, req: RemoteBodyRequest) -> Self::RemoteBodyResult { - match self.body { - Some(ref body) => futures::future::ready(body(req)), - None => unimplemented!(), - } - } + type RemoteHeaderResult = FetcherFutureResult; + type RemoteReadResult = FetcherFutureResult, Option>>>; + type RemoteCallResult = FetcherFutureResult>; + type RemoteChangesResult = + FetcherFutureResult, u32)>>; + type RemoteBodyResult = FetcherFutureResult>; + + fn remote_header( + &self, + _: RemoteHeaderRequest, + ) -> Self::RemoteHeaderResult { + unimplemented!() + } + + fn remote_read( + &self, + _: RemoteReadRequest, + ) -> Self::RemoteReadResult { + unimplemented!() + } + + fn remote_read_child( + &self, + _: RemoteReadChildRequest, + ) -> Self::RemoteReadResult { + unimplemented!() + } + + fn remote_call( + &self, + req: RemoteCallRequest, + ) -> Self::RemoteCallResult { + match self.call { + Some(ref call) => futures::future::ready(call(req)), + None => unimplemented!(), + } + } + + fn remote_changes( + &self, + _: RemoteChangesRequest, + ) -> Self::RemoteChangesResult { + unimplemented!() + } + + fn remote_body( + &self, + req: RemoteBodyRequest, + ) -> Self::RemoteBodyResult { + match self.body { + Some(ref body) => futures::future::ready(body(req)), + None => unimplemented!(), + } + } } /// Creates new client instance used for tests. pub fn new() -> Client { - TestClientBuilder::new().build() + TestClientBuilder::new().build() } /// Creates new light client instance used for tests. pub fn new_light() -> ( - sc_client::Client, - Arc, + sc_client::Client< + LightBackend, + LightExecutor, + substrate_test_runtime::Block, + substrate_test_runtime::RuntimeApi, + >, + Arc, ) { - - let storage = sc_client_db::light::LightStorage::new_test(); - let blockchain = Arc::new(sc_client::light::blockchain::Blockchain::new(storage)); - let backend = Arc::new(LightBackend::new(blockchain.clone())); - let executor = new_native_executor(); - let local_call_executor = sc_client::LocalCallExecutor::new(backend.clone(), executor, sp_core::tasks::executor()); - let call_executor = LightExecutor::new( - backend.clone(), - local_call_executor, - ); - - ( - TestClientBuilder::with_backend(backend.clone()) - .build_with_executor(call_executor) - .0, - backend, - ) + let storage = sc_client_db::light::LightStorage::new_test(); + let blockchain = Arc::new(sc_client::light::blockchain::Blockchain::new(storage)); + let backend = Arc::new(LightBackend::new(blockchain.clone())); + let executor = new_native_executor(); + let local_call_executor = + sc_client::LocalCallExecutor::new(backend.clone(), executor, sp_core::tasks::executor()); + let call_executor = LightExecutor::new(backend.clone(), local_call_executor); + + ( + TestClientBuilder::with_backend(backend.clone()) + .build_with_executor(call_executor) + .0, + backend, + ) } /// Creates new light client fetcher used for tests. pub fn new_light_fetcher() -> LightFetcher { - LightFetcher::default() + LightFetcher::default() } /// Create a new native executor. pub fn new_native_executor() -> sc_executor::NativeExecutor { - sc_executor::NativeExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) + sc_executor::NativeExecutor::new(sc_executor::WasmExecutionMethod::Interpreted, None, 8) } diff --git a/test-utils/runtime/client/src/trait_tests.rs b/test-utils/runtime/client/src/trait_tests.rs index 4af8aa37b6..234d7697a1 100644 --- a/test-utils/runtime/client/src/trait_tests.rs +++ b/test-utils/runtime/client/src/trait_tests.rs @@ -22,447 +22,499 @@ use std::sync::Arc; use crate::{ - AccountKeyring, ClientBlockImportExt, BlockBuilderExt, TestClientBuilder, TestClientBuilderExt, + AccountKeyring, BlockBuilderExt, ClientBlockImportExt, TestClientBuilder, TestClientBuilderExt, }; +use sc_block_builder::BlockBuilderProvider; use sc_client_api::backend; use sc_client_api::blockchain::{Backend as BlockChainBackendT, HeaderBackend}; -use substrate_test_client::sp_consensus::BlockOrigin; -use substrate_test_runtime::{self, Transfer}; use sp_runtime::generic::BlockId; use sp_runtime::traits::{Block as BlockT, HashFor}; -use sc_block_builder::BlockBuilderProvider; +use substrate_test_client::sp_consensus::BlockOrigin; +use substrate_test_runtime::{self, Transfer}; /// helper to test the `leaves` implementation for various backends -pub fn test_leaves_for_backend(backend: Arc) where - B: backend::Backend, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - backend::StateBackendFor: - sp_api::StateBackend>, +pub fn test_leaves_for_backend(backend: Arc) +where + B: backend::Backend, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + backend::StateBackendFor: + sp_api::StateBackend>, { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - - let mut client = TestClientBuilder::with_backend(backend.clone()).build(); - let blockchain = backend.blockchain(); - - let genesis_hash = client.chain_info().genesis_hash; - - assert_eq!( - blockchain.leaves().unwrap(), - vec![genesis_hash]); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a1.hash()], - ); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - #[allow(deprecated)] - assert_eq!( - blockchain.leaves().unwrap(), - vec![a2.hash()], - ); - - // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - assert_eq!( - blockchain.leaves().unwrap(), - vec![a3.hash()], - ); - - // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a4.hash()], - ); - - // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash()], - ); - - // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b2.hash()], - ); - - // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b3.hash()], - ); - - // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash()], - ); - - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash(), c3.hash()], - ); - - // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - assert_eq!( - blockchain.leaves().unwrap(), - vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()], - ); + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + + let mut client = TestClientBuilder::with_backend(backend.clone()).build(); + let blockchain = backend.blockchain(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!(blockchain.leaves().unwrap(), vec![genesis_hash]); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a1.hash()],); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + #[allow(deprecated)] + assert_eq!(blockchain.leaves().unwrap(), vec![a2.hash()],); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + assert_eq!(blockchain.leaves().unwrap(), vec![a3.hash()],); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a4.hash()],); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash()],); + + // A1 -> B2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b2.hash()],); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b3.hash()],); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + assert_eq!(blockchain.leaves().unwrap(), vec![a5.hash(), b4.hash()],); + + // // B2 -> C3 + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + assert_eq!( + blockchain.leaves().unwrap(), + vec![a5.hash(), b4.hash(), c3.hash()], + ); + + // A1 -> D2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + assert_eq!( + blockchain.leaves().unwrap(), + vec![a5.hash(), b4.hash(), c3.hash(), d2.hash()], + ); } /// helper to test the `children` implementation for various backends -pub fn test_children_for_backend(backend: Arc) where - B: backend::LocalBackend, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - >::State: - sp_api::StateBackend>, +pub fn test_children_for_backend(backend: Arc) +where + B: backend::LocalBackend, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + >::State: + sp_api::StateBackend>, { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - - let mut client = TestClientBuilder::with_backend(backend.clone()).build(); - let blockchain = backend.blockchain(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - let genesis_hash = client.chain_info().genesis_hash; - - let children1 = blockchain.children(a4.hash()).unwrap(); - assert_eq!(vec![a5.hash()], children1); - - let children2 = blockchain.children(a1.hash()).unwrap(); - assert_eq!(vec![a2.hash(), b2.hash(), d2.hash()], children2); - - let children3 = blockchain.children(genesis_hash).unwrap(); - assert_eq!(vec![a1.hash()], children3); - - let children4 = blockchain.children(b2.hash()).unwrap(); - assert_eq!(vec![b3.hash(), c3.hash()], children4); + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + + let mut client = TestClientBuilder::with_backend(backend.clone()).build(); + let blockchain = backend.blockchain(); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + let children1 = blockchain.children(a4.hash()).unwrap(); + assert_eq!(vec![a5.hash()], children1); + + let children2 = blockchain.children(a1.hash()).unwrap(); + assert_eq!(vec![a2.hash(), b2.hash(), d2.hash()], children2); + + let children3 = blockchain.children(genesis_hash).unwrap(); + assert_eq!(vec![a1.hash()], children3); + + let children4 = blockchain.children(b2.hash()).unwrap(); + assert_eq!(vec![b3.hash(), c3.hash()], children4); } -pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc) where - B: backend::LocalBackend, - // Rust bug: https://github.com/rust-lang/rust/issues/24159 - >::State: - sp_api::StateBackend>, +pub fn test_blockchain_query_by_number_gets_canonical(backend: Arc) +where + B: backend::LocalBackend, + // Rust bug: https://github.com/rust-lang/rust/issues/24159 + >::State: + sp_api::StateBackend>, { - // block tree: - // G -> A1 -> A2 -> A3 -> A4 -> A5 - // A1 -> B2 -> B3 -> B4 - // B2 -> C3 - // A1 -> D2 - let mut client = TestClientBuilder::with_backend(backend.clone()).build(); - let blockchain = backend.blockchain(); - - // G -> A1 - let a1 = client.new_block(Default::default()).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a1.clone()).unwrap(); - - // A1 -> A2 - let a2 = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a2.clone()).unwrap(); - - // A2 -> A3 - let a3 = client.new_block_at( - &BlockId::Hash(a2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a3.clone()).unwrap(); - - // A3 -> A4 - let a4 = client.new_block_at( - &BlockId::Hash(a3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a4.clone()).unwrap(); - - // A4 -> A5 - let a5 = client.new_block_at( - &BlockId::Hash(a4.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, a5.clone()).unwrap(); - - // A1 -> B2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise B2 has the same hash as A2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 41, - nonce: 0, - }).unwrap(); - let b2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, b2.clone()).unwrap(); - - // B2 -> B3 - let b3 = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b3.clone()).unwrap(); - - // B3 -> B4 - let b4 = client.new_block_at( - &BlockId::Hash(b3.hash()), - Default::default(), - false, - ).unwrap().build().unwrap().block; - client.import(BlockOrigin::Own, b4.clone()).unwrap(); - - // // B2 -> C3 - let mut builder = client.new_block_at( - &BlockId::Hash(b2.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise C3 has the same hash as B3 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 1, - }).unwrap(); - let c3 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, c3.clone()).unwrap(); - - // A1 -> D2 - let mut builder = client.new_block_at( - &BlockId::Hash(a1.hash()), - Default::default(), - false, - ).unwrap(); - // this push is required as otherwise D2 has the same hash as B2 and won't get imported - builder.push_transfer(Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Ferdie.into(), - amount: 1, - nonce: 0, - }).unwrap(); - let d2 = builder.build().unwrap().block; - client.import(BlockOrigin::Own, d2.clone()).unwrap(); - - let genesis_hash = client.chain_info().genesis_hash; - - assert_eq!(blockchain.header(BlockId::Number(0)).unwrap().unwrap().hash(), genesis_hash); - assert_eq!(blockchain.hash(0).unwrap().unwrap(), genesis_hash); - - assert_eq!(blockchain.header(BlockId::Number(1)).unwrap().unwrap().hash(), a1.hash()); - assert_eq!(blockchain.hash(1).unwrap().unwrap(), a1.hash()); - - assert_eq!(blockchain.header(BlockId::Number(2)).unwrap().unwrap().hash(), a2.hash()); - assert_eq!(blockchain.hash(2).unwrap().unwrap(), a2.hash()); - - assert_eq!(blockchain.header(BlockId::Number(3)).unwrap().unwrap().hash(), a3.hash()); - assert_eq!(blockchain.hash(3).unwrap().unwrap(), a3.hash()); - - assert_eq!(blockchain.header(BlockId::Number(4)).unwrap().unwrap().hash(), a4.hash()); - assert_eq!(blockchain.hash(4).unwrap().unwrap(), a4.hash()); - - assert_eq!(blockchain.header(BlockId::Number(5)).unwrap().unwrap().hash(), a5.hash()); - assert_eq!(blockchain.hash(5).unwrap().unwrap(), a5.hash()); + // block tree: + // G -> A1 -> A2 -> A3 -> A4 -> A5 + // A1 -> B2 -> B3 -> B4 + // B2 -> C3 + // A1 -> D2 + let mut client = TestClientBuilder::with_backend(backend.clone()).build(); + let blockchain = backend.blockchain(); + + // G -> A1 + let a1 = client + .new_block(Default::default()) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a1.clone()).unwrap(); + + // A1 -> A2 + let a2 = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a2.clone()).unwrap(); + + // A2 -> A3 + let a3 = client + .new_block_at(&BlockId::Hash(a2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a3.clone()).unwrap(); + + // A3 -> A4 + let a4 = client + .new_block_at(&BlockId::Hash(a3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a4.clone()).unwrap(); + + // A4 -> A5 + let a5 = client + .new_block_at(&BlockId::Hash(a4.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, a5.clone()).unwrap(); + + // A1 -> B2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise B2 has the same hash as A2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 41, + nonce: 0, + }) + .unwrap(); + let b2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, b2.clone()).unwrap(); + + // B2 -> B3 + let b3 = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b3.clone()).unwrap(); + + // B3 -> B4 + let b4 = client + .new_block_at(&BlockId::Hash(b3.hash()), Default::default(), false) + .unwrap() + .build() + .unwrap() + .block; + client.import(BlockOrigin::Own, b4.clone()).unwrap(); + + // // B2 -> C3 + let mut builder = client + .new_block_at(&BlockId::Hash(b2.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise C3 has the same hash as B3 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 1, + }) + .unwrap(); + let c3 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, c3.clone()).unwrap(); + + // A1 -> D2 + let mut builder = client + .new_block_at(&BlockId::Hash(a1.hash()), Default::default(), false) + .unwrap(); + // this push is required as otherwise D2 has the same hash as B2 and won't get imported + builder + .push_transfer(Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Ferdie.into(), + amount: 1, + nonce: 0, + }) + .unwrap(); + let d2 = builder.build().unwrap().block; + client.import(BlockOrigin::Own, d2.clone()).unwrap(); + + let genesis_hash = client.chain_info().genesis_hash; + + assert_eq!( + blockchain + .header(BlockId::Number(0)) + .unwrap() + .unwrap() + .hash(), + genesis_hash + ); + assert_eq!(blockchain.hash(0).unwrap().unwrap(), genesis_hash); + + assert_eq!( + blockchain + .header(BlockId::Number(1)) + .unwrap() + .unwrap() + .hash(), + a1.hash() + ); + assert_eq!(blockchain.hash(1).unwrap().unwrap(), a1.hash()); + + assert_eq!( + blockchain + .header(BlockId::Number(2)) + .unwrap() + .unwrap() + .hash(), + a2.hash() + ); + assert_eq!(blockchain.hash(2).unwrap().unwrap(), a2.hash()); + + assert_eq!( + blockchain + .header(BlockId::Number(3)) + .unwrap() + .unwrap() + .hash(), + a3.hash() + ); + assert_eq!(blockchain.hash(3).unwrap().unwrap(), a3.hash()); + + assert_eq!( + blockchain + .header(BlockId::Number(4)) + .unwrap() + .unwrap() + .hash(), + a4.hash() + ); + assert_eq!(blockchain.hash(4).unwrap().unwrap(), a4.hash()); + + assert_eq!( + blockchain + .header(BlockId::Number(5)) + .unwrap() + .unwrap() + .hash(), + a5.hash() + ); + assert_eq!(blockchain.hash(5).unwrap().unwrap(), a5.hash()); } diff --git a/test-utils/runtime/src/genesismap.rs b/test-utils/runtime/src/genesismap.rs index 25d9a807cc..82cf50938e 100644 --- a/test-utils/runtime/src/genesismap.rs +++ b/test-utils/runtime/src/genesismap.rs @@ -16,94 +16,111 @@ //! Tool for creating the genesis block. -use std::collections::BTreeMap; -use sp_io::hashing::{blake2_256, twox_128}; -use super::{AuthorityId, AccountId, WASM_BINARY, system}; -use codec::{Encode, KeyedVec, Joiner}; -use sp_core::{ChangesTrieConfiguration, map}; +use super::{system, AccountId, AuthorityId, WASM_BINARY}; +use codec::{Encode, Joiner, KeyedVec}; use sp_core::storage::{well_known_keys, Storage}; +use sp_core::{map, ChangesTrieConfiguration}; +use sp_io::hashing::{blake2_256, twox_128}; use sp_runtime::traits::{Block as BlockT, Hash as HashT, Header as HeaderT}; +use std::collections::BTreeMap; /// Configuration of a general Substrate test genesis block. pub struct GenesisConfig { - changes_trie_config: Option, - authorities: Vec, - balances: Vec<(AccountId, u64)>, - heap_pages_override: Option, - /// Additional storage key pairs that will be added to the genesis map. - extra_storage: Storage, + changes_trie_config: Option, + authorities: Vec, + balances: Vec<(AccountId, u64)>, + heap_pages_override: Option, + /// Additional storage key pairs that will be added to the genesis map. + extra_storage: Storage, } impl GenesisConfig { - pub fn new( - changes_trie_config: Option, - authorities: Vec, - endowed_accounts: Vec, - balance: u64, - heap_pages_override: Option, - extra_storage: Storage, - ) -> Self { - GenesisConfig { - changes_trie_config, - authorities: authorities.clone(), - balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), - heap_pages_override, - extra_storage, - } - } + pub fn new( + changes_trie_config: Option, + authorities: Vec, + endowed_accounts: Vec, + balance: u64, + heap_pages_override: Option, + extra_storage: Storage, + ) -> Self { + GenesisConfig { + changes_trie_config, + authorities: authorities.clone(), + balances: endowed_accounts.into_iter().map(|a| (a, balance)).collect(), + heap_pages_override, + extra_storage, + } + } - pub fn genesis_map(&self) -> Storage { - let wasm_runtime = WASM_BINARY.to_vec(); - let mut map: BTreeMap, Vec> = self.balances.iter() - .map(|&(ref account, balance)| (account.to_keyed_vec(b"balance:"), vec![].and(&balance))) - .map(|(k, v)| (blake2_256(&k[..])[..].to_vec(), v.to_vec())) - .chain(vec![ - (well_known_keys::CODE.into(), wasm_runtime), - ( - well_known_keys::HEAP_PAGES.into(), - vec![].and(&(self.heap_pages_override.unwrap_or(16 as u64))), - ), - ].into_iter()) - .collect(); - if let Some(ref changes_trie_config) = self.changes_trie_config { - map.insert(well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), changes_trie_config.encode()); - } - map.insert(twox_128(&b"sys:auth"[..])[..].to_vec(), self.authorities.encode()); - // Add the extra storage entries. - map.extend(self.extra_storage.top.clone().into_iter()); + pub fn genesis_map(&self) -> Storage { + let wasm_runtime = WASM_BINARY.to_vec(); + let mut map: BTreeMap, Vec> = self + .balances + .iter() + .map(|&(ref account, balance)| { + (account.to_keyed_vec(b"balance:"), vec![].and(&balance)) + }) + .map(|(k, v)| (blake2_256(&k[..])[..].to_vec(), v.to_vec())) + .chain( + vec![ + (well_known_keys::CODE.into(), wasm_runtime), + ( + well_known_keys::HEAP_PAGES.into(), + vec![].and(&(self.heap_pages_override.unwrap_or(16 as u64))), + ), + ] + .into_iter(), + ) + .collect(); + if let Some(ref changes_trie_config) = self.changes_trie_config { + map.insert( + well_known_keys::CHANGES_TRIE_CONFIG.to_vec(), + changes_trie_config.encode(), + ); + } + map.insert( + twox_128(&b"sys:auth"[..])[..].to_vec(), + self.authorities.encode(), + ); + // Add the extra storage entries. + map.extend(self.extra_storage.top.clone().into_iter()); - // Assimilate the system genesis config. - let mut storage = Storage { top: map, children: self.extra_storage.children.clone()}; - let mut config = system::GenesisConfig::default(); - config.authorities = self.authorities.clone(); - config.assimilate_storage(&mut storage).expect("Adding `system::GensisConfig` to the genesis"); + // Assimilate the system genesis config. + let mut storage = Storage { + top: map, + children: self.extra_storage.children.clone(), + }; + let mut config = system::GenesisConfig::default(); + config.authorities = self.authorities.clone(); + config + .assimilate_storage(&mut storage) + .expect("Adding `system::GensisConfig` to the genesis"); - storage - } + storage + } } -pub fn insert_genesis_block( - storage: &mut Storage, -) -> sp_core::hash::H256 { - let child_roots = storage.children.iter().map(|(sk, child_content)| { - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - child_content.data.clone().into_iter().collect(), - ); - (sk.clone(), state_root.encode()) - }); - // add child roots to storage - storage.top.extend(child_roots); - let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( - storage.top.clone().into_iter().collect() - ); - let block: crate::Block = sc_client::genesis::construct_genesis_block(state_root); - let genesis_hash = block.header.hash(); - storage.top.extend(additional_storage_with_genesis(&block)); - genesis_hash +pub fn insert_genesis_block(storage: &mut Storage) -> sp_core::hash::H256 { + let child_roots = storage.children.iter().map(|(sk, child_content)| { + let state_root = + <<::Header as HeaderT>::Hashing as HashT>::trie_root( + child_content.data.clone().into_iter().collect(), + ); + (sk.clone(), state_root.encode()) + }); + // add child roots to storage + storage.top.extend(child_roots); + let state_root = <<::Header as HeaderT>::Hashing as HashT>::trie_root( + storage.top.clone().into_iter().collect(), + ); + let block: crate::Block = sc_client::genesis::construct_genesis_block(state_root); + let genesis_hash = block.header.hash(); + storage.top.extend(additional_storage_with_genesis(&block)); + genesis_hash } pub fn additional_storage_with_genesis(genesis_block: &crate::Block) -> BTreeMap, Vec> { - map![ - twox_128(&b"latest"[..]).to_vec() => genesis_block.hash().as_fixed_bytes().to_vec() - ] + map![ + twox_128(&b"latest"[..]).to_vec() => genesis_block.hash().as_fixed_bytes().to_vec() + ] } diff --git a/test-utils/runtime/src/lib.rs b/test-utils/runtime/src/lib.rs index 65fbf300bb..a2cef450ba 100644 --- a/test-utils/runtime/src/lib.rs +++ b/test-utils/runtime/src/lib.rs @@ -22,35 +22,39 @@ pub mod genesismap; pub mod system; -use sp_std::{prelude::*, marker::PhantomData}; -use codec::{Encode, Decode, Input, Error}; +use codec::{Decode, Encode, Error, Input}; +use sp_std::{marker::PhantomData, prelude::*}; -use sp_core::{OpaqueMetadata, RuntimeDebug, ChangesTrieConfiguration}; use sp_application_crypto::{ed25519, sr25519, RuntimeAppPublic}; -use trie_db::{TrieMut, Trie}; -use sp_trie::PrefixedMemoryDB; +use sp_core::{ChangesTrieConfiguration, OpaqueMetadata, RuntimeDebug}; use sp_trie::trie_types::{TrieDB, TrieDBMut}; +use sp_trie::PrefixedMemoryDB; +use trie_db::{Trie, TrieMut}; +use cfg_if::cfg_if; +use frame_support::{ + impl_outer_origin, parameter_types, + weights::{RuntimeDbWeight, Weight}, +}; use sp_api::{decl_runtime_apis, impl_runtime_apis}; +pub use sp_core::hash::H256; +use sp_core::storage::ChildType; +use sp_inherents::{CheckInherentsResult, InherentData}; use sp_runtime::{ - ApplyExtrinsicResult, create_runtime_str, Perbill, impl_opaque_keys, - transaction_validity::{ - TransactionValidity, ValidTransaction, TransactionValidityError, InvalidTransaction, - TransactionSource, - }, - traits::{ - BlindCheckable, BlakeTwo256, Block as BlockT, Extrinsic as ExtrinsicT, - GetNodeBlockType, GetRuntimeBlockType, Verify, IdentityLookup, - }, + create_runtime_str, impl_opaque_keys, + traits::{ + BlakeTwo256, BlindCheckable, Block as BlockT, Extrinsic as ExtrinsicT, GetNodeBlockType, + GetRuntimeBlockType, IdentityLookup, Verify, + }, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + ValidTransaction, + }, + ApplyExtrinsicResult, Perbill, }; -use sp_version::RuntimeVersion; -pub use sp_core::hash::H256; #[cfg(any(feature = "std", test))] use sp_version::NativeVersion; -use frame_support::{impl_outer_origin, parameter_types, weights::{Weight, RuntimeDbWeight}}; -use sp_inherents::{CheckInherentsResult, InherentData}; -use cfg_if::cfg_if; -use sp_core::storage::ChildType; +use sp_version::RuntimeVersion; // Ensure Babe and Aura use the same crypto to simplify things a bit. pub use sp_consensus_babe::{AuthorityId, SlotNumber}; @@ -62,143 +66,159 @@ include!(concat!(env!("OUT_DIR"), "/wasm_binary.rs")); /// Test runtime version. pub const VERSION: RuntimeVersion = RuntimeVersion { - spec_name: create_runtime_str!("test"), - impl_name: create_runtime_str!("parity-test"), - authoring_version: 1, - spec_version: 2, - impl_version: 2, - apis: RUNTIME_API_VERSIONS, - transaction_version: 1, + spec_name: create_runtime_str!("test"), + impl_name: create_runtime_str!("parity-test"), + authoring_version: 1, + spec_version: 2, + impl_version: 2, + apis: RUNTIME_API_VERSIONS, + transaction_version: 1, }; fn version() -> RuntimeVersion { - VERSION + VERSION } /// Native version. #[cfg(any(feature = "std", test))] pub fn native_version() -> NativeVersion { - NativeVersion { - runtime_version: VERSION, - can_author_with: Default::default(), - } + NativeVersion { + runtime_version: VERSION, + can_author_with: Default::default(), + } } /// Calls in transactions. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub struct Transfer { - pub from: AccountId, - pub to: AccountId, - pub amount: u64, - pub nonce: u64, + pub from: AccountId, + pub to: AccountId, + pub amount: u64, + pub nonce: u64, } impl Transfer { - /// Convert into a signed extrinsic. - #[cfg(feature = "std")] - pub fn into_signed_tx(self) -> Extrinsic { - let signature = sp_keyring::AccountKeyring::from_public(&self.from) - .expect("Creates keyring from public key.").sign(&self.encode()).into(); - Extrinsic::Transfer { - transfer: self, - signature, - exhaust_resources_when_not_first: false, - } - } - - /// Convert into a signed extrinsic, which will only end up included in the block - /// if it's the first transaction. Otherwise it will cause `ResourceExhaustion` error - /// which should be considered as block being full. - #[cfg(feature = "std")] - pub fn into_resources_exhausting_tx(self) -> Extrinsic { - let signature = sp_keyring::AccountKeyring::from_public(&self.from) - .expect("Creates keyring from public key.").sign(&self.encode()).into(); - Extrinsic::Transfer { - transfer: self, - signature, - exhaust_resources_when_not_first: true, - } - } + /// Convert into a signed extrinsic. + #[cfg(feature = "std")] + pub fn into_signed_tx(self) -> Extrinsic { + let signature = sp_keyring::AccountKeyring::from_public(&self.from) + .expect("Creates keyring from public key.") + .sign(&self.encode()) + .into(); + Extrinsic::Transfer { + transfer: self, + signature, + exhaust_resources_when_not_first: false, + } + } + + /// Convert into a signed extrinsic, which will only end up included in the block + /// if it's the first transaction. Otherwise it will cause `ResourceExhaustion` error + /// which should be considered as block being full. + #[cfg(feature = "std")] + pub fn into_resources_exhausting_tx(self) -> Extrinsic { + let signature = sp_keyring::AccountKeyring::from_public(&self.from) + .expect("Creates keyring from public key.") + .sign(&self.encode()) + .into(); + Extrinsic::Transfer { + transfer: self, + signature, + exhaust_resources_when_not_first: true, + } + } } /// Extrinsic for test-runtime. #[derive(Clone, PartialEq, Eq, Encode, Decode, RuntimeDebug)] pub enum Extrinsic { - AuthoritiesChange(Vec), - Transfer { - transfer: Transfer, - signature: AccountSignature, - exhaust_resources_when_not_first: bool, - }, - IncludeData(Vec), - StorageChange(Vec, Option>), - ChangesTrieConfigUpdate(Option), + AuthoritiesChange(Vec), + Transfer { + transfer: Transfer, + signature: AccountSignature, + exhaust_resources_when_not_first: bool, + }, + IncludeData(Vec), + StorageChange(Vec, Option>), + ChangesTrieConfigUpdate(Option), } parity_util_mem::malloc_size_of_is_0!(Extrinsic); // non-opaque extrinsic does not need this #[cfg(feature = "std")] impl serde::Serialize for Extrinsic { - fn serialize(&self, seq: S) -> Result where S: ::serde::Serializer { - self.using_encoded(|bytes| seq.serialize_bytes(bytes)) - } + fn serialize(&self, seq: S) -> Result + where + S: ::serde::Serializer, + { + self.using_encoded(|bytes| seq.serialize_bytes(bytes)) + } } impl BlindCheckable for Extrinsic { - type Checked = Self; - - fn check(self) -> Result { - match self { - Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), - Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first } => { - if sp_runtime::verify_encoded_lazy(&signature, &transfer, &transfer.from) { - Ok(Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first }) - } else { - Err(InvalidTransaction::BadProof.into()) - } - }, - Extrinsic::IncludeData(_) => Err(InvalidTransaction::BadProof.into()), - Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), - Extrinsic::ChangesTrieConfigUpdate(new_config) => - Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)), - } - } + type Checked = Self; + + fn check(self) -> Result { + match self { + Extrinsic::AuthoritiesChange(new_auth) => Ok(Extrinsic::AuthoritiesChange(new_auth)), + Extrinsic::Transfer { + transfer, + signature, + exhaust_resources_when_not_first, + } => { + if sp_runtime::verify_encoded_lazy(&signature, &transfer, &transfer.from) { + Ok(Extrinsic::Transfer { + transfer, + signature, + exhaust_resources_when_not_first, + }) + } else { + Err(InvalidTransaction::BadProof.into()) + } + } + Extrinsic::IncludeData(_) => Err(InvalidTransaction::BadProof.into()), + Extrinsic::StorageChange(key, value) => Ok(Extrinsic::StorageChange(key, value)), + Extrinsic::ChangesTrieConfigUpdate(new_config) => { + Ok(Extrinsic::ChangesTrieConfigUpdate(new_config)) + } + } + } } impl ExtrinsicT for Extrinsic { - type Call = Extrinsic; - type SignaturePayload = (); - - fn is_signed(&self) -> Option { - if let Extrinsic::IncludeData(_) = *self { - Some(false) - } else { - Some(true) - } - } - - fn new(call: Self::Call, _signature_payload: Option) -> Option { - Some(call) - } + type Call = Extrinsic; + type SignaturePayload = (); + + fn is_signed(&self) -> Option { + if let Extrinsic::IncludeData(_) = *self { + Some(false) + } else { + Some(true) + } + } + + fn new(call: Self::Call, _signature_payload: Option) -> Option { + Some(call) + } } impl sp_runtime::traits::Dispatchable for Extrinsic { - type Origin = (); - type Trait = (); - type Info = (); - type PostInfo = (); - fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { - panic!("This implemention should not be used for actual dispatch."); - } + type Origin = (); + type Trait = (); + type Info = (); + type PostInfo = (); + fn dispatch(self, _origin: Self::Origin) -> sp_runtime::DispatchResultWithInfo { + panic!("This implemention should not be used for actual dispatch."); + } } impl Extrinsic { - pub fn transfer(&self) -> &Transfer { - match self { - Extrinsic::Transfer { ref transfer, .. } => transfer, - _ => panic!("cannot convert to transfer ref"), - } - } + pub fn transfer(&self) -> &Transfer { + match self { + Extrinsic::Transfer { ref transfer, .. } => transfer, + _ => panic!("cannot convert to transfer ref"), + } + } } /// The signature type used by accounts/transactions. @@ -222,799 +242,810 @@ pub type Header = sp_runtime::generic::Header; /// Run whatever tests we have. pub fn run_tests(mut input: &[u8]) -> Vec { - use sp_runtime::print; - - print("run_tests..."); - let block = Block::decode(&mut input).unwrap(); - print("deserialized block."); - let stxs = block.extrinsics.iter().map(Encode::encode).collect::>(); - print("reserialized transactions."); - [stxs.len() as u8].encode() + use sp_runtime::print; + + print("run_tests..."); + let block = Block::decode(&mut input).unwrap(); + print("deserialized block."); + let stxs = block + .extrinsics + .iter() + .map(Encode::encode) + .collect::>(); + print("reserialized transactions."); + [stxs.len() as u8].encode() } /// A type that can not be decoded. #[derive(PartialEq)] pub struct DecodeFails { - _phantom: PhantomData, + _phantom: PhantomData, } impl Encode for DecodeFails { - fn encode(&self) -> Vec { - Vec::new() - } + fn encode(&self) -> Vec { + Vec::new() + } } impl codec::EncodeLike for DecodeFails {} impl DecodeFails { - /// Create a new instance. - pub fn new() -> DecodeFails { - DecodeFails { - _phantom: Default::default(), - } - } + /// Create a new instance. + pub fn new() -> DecodeFails { + DecodeFails { + _phantom: Default::default(), + } + } } impl Decode for DecodeFails { - fn decode(_: &mut I) -> Result { - Err("DecodeFails always fails".into()) - } + fn decode(_: &mut I) -> Result { + Err("DecodeFails always fails".into()) + } } cfg_if! { - if #[cfg(feature = "std")] { - decl_runtime_apis! { - #[api_version(2)] - pub trait TestAPI { - /// Return the balance of the given account id. - fn balance_of(id: AccountId) -> u64; - /// A benchmark function that adds one to the given value and returns the result. - fn benchmark_add_one(val: &u64) -> u64; - /// A benchmark function that adds one to each value in the given vector and returns the - /// result. - fn benchmark_vector_add_one(vec: &Vec) -> Vec; - /// A function that always fails to convert a parameter between runtime and node. - fn fail_convert_parameter(param: DecodeFails); - /// A function that always fails to convert its return value between runtime and node. - fn fail_convert_return_value() -> DecodeFails; - /// A function for that the signature changed in version `2`. - #[changed_in(2)] - fn function_signature_changed() -> Vec; - /// The new signature. - fn function_signature_changed() -> u64; - fn fail_on_native() -> u64; - fn fail_on_wasm() -> u64; - /// trie no_std testing - fn use_trie() -> u64; - fn benchmark_indirect_call() -> u64; - fn benchmark_direct_call() -> u64; - fn vec_with_capacity(size: u32) -> Vec; - /// Returns the initialized block number. - fn get_block_number() -> u64; - /// Takes and returns the initialized block number. - fn take_block_number() -> Option; - /// Returns if no block was initialized. - #[skip_initialize_block] - fn without_initialize_block() -> bool; - /// Test that `ed25519` crypto works in the runtime. - /// - /// Returns the signature generated for the message `ed25519` and the public key. - fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic); - /// Test that `sr25519` crypto works in the runtime. - /// - /// Returns the signature generated for the message `sr25519`. - fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic); - /// Run various tests against storage. - fn test_storage(); - } - } - } else { - decl_runtime_apis! { - pub trait TestAPI { - /// Return the balance of the given account id. - fn balance_of(id: AccountId) -> u64; - /// A benchmark function that adds one to the given value and returns the result. - fn benchmark_add_one(val: &u64) -> u64; - /// A benchmark function that adds one to each value in the given vector and returns the - /// result. - fn benchmark_vector_add_one(vec: &Vec) -> Vec; - /// A function that always fails to convert a parameter between runtime and node. - fn fail_convert_parameter(param: DecodeFails); - /// A function that always fails to convert its return value between runtime and node. - fn fail_convert_return_value() -> DecodeFails; - /// In wasm we just emulate the old behavior. - fn function_signature_changed() -> Vec; - fn fail_on_native() -> u64; - fn fail_on_wasm() -> u64; - /// trie no_std testing - fn use_trie() -> u64; - fn benchmark_indirect_call() -> u64; - fn benchmark_direct_call() -> u64; - fn vec_with_capacity(size: u32) -> Vec; - /// Returns the initialized block number. - fn get_block_number() -> u64; - /// Takes and returns the initialized block number. - fn take_block_number() -> Option; - /// Returns if no block was initialized. - #[skip_initialize_block] - fn without_initialize_block() -> bool; - /// Test that `ed25519` crypto works in the runtime. - /// - /// Returns the signature generated for the message `ed25519` and the public key. - fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic); - /// Test that `sr25519` crypto works in the runtime. - /// - /// Returns the signature generated for the message `sr25519`. - fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic); - /// Run various tests against storage. - fn test_storage(); - } - } - } + if #[cfg(feature = "std")] { + decl_runtime_apis! { + #[api_version(2)] + pub trait TestAPI { + /// Return the balance of the given account id. + fn balance_of(id: AccountId) -> u64; + /// A benchmark function that adds one to the given value and returns the result. + fn benchmark_add_one(val: &u64) -> u64; + /// A benchmark function that adds one to each value in the given vector and returns the + /// result. + fn benchmark_vector_add_one(vec: &Vec) -> Vec; + /// A function that always fails to convert a parameter between runtime and node. + fn fail_convert_parameter(param: DecodeFails); + /// A function that always fails to convert its return value between runtime and node. + fn fail_convert_return_value() -> DecodeFails; + /// A function for that the signature changed in version `2`. + #[changed_in(2)] + fn function_signature_changed() -> Vec; + /// The new signature. + fn function_signature_changed() -> u64; + fn fail_on_native() -> u64; + fn fail_on_wasm() -> u64; + /// trie no_std testing + fn use_trie() -> u64; + fn benchmark_indirect_call() -> u64; + fn benchmark_direct_call() -> u64; + fn vec_with_capacity(size: u32) -> Vec; + /// Returns the initialized block number. + fn get_block_number() -> u64; + /// Takes and returns the initialized block number. + fn take_block_number() -> Option; + /// Returns if no block was initialized. + #[skip_initialize_block] + fn without_initialize_block() -> bool; + /// Test that `ed25519` crypto works in the runtime. + /// + /// Returns the signature generated for the message `ed25519` and the public key. + fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic); + /// Test that `sr25519` crypto works in the runtime. + /// + /// Returns the signature generated for the message `sr25519`. + fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic); + /// Run various tests against storage. + fn test_storage(); + } + } + } else { + decl_runtime_apis! { + pub trait TestAPI { + /// Return the balance of the given account id. + fn balance_of(id: AccountId) -> u64; + /// A benchmark function that adds one to the given value and returns the result. + fn benchmark_add_one(val: &u64) -> u64; + /// A benchmark function that adds one to each value in the given vector and returns the + /// result. + fn benchmark_vector_add_one(vec: &Vec) -> Vec; + /// A function that always fails to convert a parameter between runtime and node. + fn fail_convert_parameter(param: DecodeFails); + /// A function that always fails to convert its return value between runtime and node. + fn fail_convert_return_value() -> DecodeFails; + /// In wasm we just emulate the old behavior. + fn function_signature_changed() -> Vec; + fn fail_on_native() -> u64; + fn fail_on_wasm() -> u64; + /// trie no_std testing + fn use_trie() -> u64; + fn benchmark_indirect_call() -> u64; + fn benchmark_direct_call() -> u64; + fn vec_with_capacity(size: u32) -> Vec; + /// Returns the initialized block number. + fn get_block_number() -> u64; + /// Takes and returns the initialized block number. + fn take_block_number() -> Option; + /// Returns if no block was initialized. + #[skip_initialize_block] + fn without_initialize_block() -> bool; + /// Test that `ed25519` crypto works in the runtime. + /// + /// Returns the signature generated for the message `ed25519` and the public key. + fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic); + /// Test that `sr25519` crypto works in the runtime. + /// + /// Returns the signature generated for the message `sr25519`. + fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic); + /// Run various tests against storage. + fn test_storage(); + } + } + } } #[derive(Clone, Eq, PartialEq)] pub struct Runtime; impl GetNodeBlockType for Runtime { - type NodeBlock = Block; + type NodeBlock = Block; } impl GetRuntimeBlockType for Runtime { - type RuntimeBlock = Block; + type RuntimeBlock = Block; } -impl_outer_origin!{ - pub enum Origin for Runtime where system = frame_system {} +impl_outer_origin! { + pub enum Origin for Runtime where system = frame_system {} } #[derive(Clone, Encode, Decode, Eq, PartialEq, RuntimeDebug)] pub struct Event; impl From> for Event { - fn from(_evt: frame_system::Event) -> Self { - unimplemented!("Not required in tests!") - } + fn from(_evt: frame_system::Event) -> Self { + unimplemented!("Not required in tests!") + } } parameter_types! { - pub const BlockHashCount: BlockNumber = 250; - pub const MinimumPeriod: u64 = 5; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 100, - write: 1000, - }; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const BlockHashCount: BlockNumber = 250; + pub const MinimumPeriod: u64 = 5; + pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 100, + write: 1000, + }; + pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } impl frame_system::Trait for Runtime { - type Origin = Origin; - type Call = Extrinsic; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Call = Extrinsic; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl pallet_timestamp::Trait for Runtime { - /// A timestamp: milliseconds since the unix epoch. - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; + /// A timestamp: milliseconds since the unix epoch. + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; } parameter_types! { - pub const EpochDuration: u64 = 6; - pub const ExpectedBlockTime: u64 = 10_000; + pub const EpochDuration: u64 = 6; + pub const ExpectedBlockTime: u64 = 10_000; } impl pallet_babe::Trait for Runtime { - type EpochDuration = EpochDuration; - type ExpectedBlockTime = ExpectedBlockTime; - // there is no actual runtime in this test-runtime, so testing crates - // are manually adding the digests. normally in this situation you'd use - // pallet_babe::SameAuthoritiesForever. - type EpochChangeTrigger = pallet_babe::ExternalTrigger; + type EpochDuration = EpochDuration; + type ExpectedBlockTime = ExpectedBlockTime; + // there is no actual runtime in this test-runtime, so testing crates + // are manually adding the digests. normally in this situation you'd use + // pallet_babe::SameAuthoritiesForever. + type EpochChangeTrigger = pallet_babe::ExternalTrigger; } /// Adds one to the given input and returns the final result. #[inline(never)] fn benchmark_add_one(i: u64) -> u64 { - i + 1 + i + 1 } /// The `benchmark_add_one` function as function pointer. #[cfg(not(feature = "std"))] static BENCHMARK_ADD_ONE: sp_runtime_interface::wasm::ExchangeableFunction u64> = - sp_runtime_interface::wasm::ExchangeableFunction::new(benchmark_add_one); + sp_runtime_interface::wasm::ExchangeableFunction::new(benchmark_add_one); fn code_using_trie() -> u64 { - let pairs = [ - (b"0103000000000000000464".to_vec(), b"0400000000".to_vec()), - (b"0103000000000000000469".to_vec(), b"0401000000".to_vec()), - ].to_vec(); - - let mut mdb = PrefixedMemoryDB::default(); - let mut root = sp_std::default::Default::default(); - let _ = { - let v = &pairs; - let mut t = TrieDBMut::::new(&mut mdb, &mut root); - for i in 0..v.len() { - let key: &[u8]= &v[i].0; - let val: &[u8] = &v[i].1; - if !t.insert(key, val).is_ok() { - return 101; - } - } - t - }; - - if let Ok(trie) = TrieDB::::new(&mdb, &root) { - if let Ok(iter) = trie.iter() { - let mut iter_pairs = Vec::new(); - for pair in iter { - if let Ok((key, value)) = pair { - iter_pairs.push((key, value.to_vec())); - } - } - iter_pairs.len() as u64 - } else { 102 } - } else { 103 } + let pairs = [ + (b"0103000000000000000464".to_vec(), b"0400000000".to_vec()), + (b"0103000000000000000469".to_vec(), b"0401000000".to_vec()), + ] + .to_vec(); + + let mut mdb = PrefixedMemoryDB::default(); + let mut root = sp_std::default::Default::default(); + let _ = { + let v = &pairs; + let mut t = TrieDBMut::::new(&mut mdb, &mut root); + for i in 0..v.len() { + let key: &[u8] = &v[i].0; + let val: &[u8] = &v[i].1; + if !t.insert(key, val).is_ok() { + return 101; + } + } + t + }; + + if let Ok(trie) = TrieDB::::new(&mdb, &root) { + if let Ok(iter) = trie.iter() { + let mut iter_pairs = Vec::new(); + for pair in iter { + if let Ok((key, value)) = pair { + iter_pairs.push((key, value.to_vec())); + } + } + iter_pairs.len() as u64 + } else { + 102 + } + } else { + 103 + } } impl_opaque_keys! { - pub struct SessionKeys { - pub ed25519: ed25519::AppPublic, - pub sr25519: sr25519::AppPublic, - } + pub struct SessionKeys { + pub ed25519: ed25519::AppPublic, + pub sr25519: sr25519::AppPublic, + } } cfg_if! { - if #[cfg(feature = "std")] { - impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - version() - } - - fn execute_block(block: Block) { - system::execute_block(block) - } - - fn initialize_block(header: &::Header) { - system::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - unimplemented!() - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - _source: TransactionSource, - utx: ::Extrinsic, - ) -> TransactionValidity { - if let Extrinsic::IncludeData(data) = utx { - return Ok(ValidTransaction { - priority: data.len() as u64, - requires: vec![], - provides: vec![data], - longevity: 1, - propagate: false, - }); - } - - system::validate_transaction(utx) - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - system::execute_transaction(extrinsic) - } - - fn finalize_block() -> ::Header { - system::finalize_block() - } - - fn inherent_extrinsics(_data: InherentData) -> Vec<::Extrinsic> { - vec![] - } - - fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { - CheckInherentsResult::new() - } - - fn random_seed() -> ::Hash { - unimplemented!() - } - } - - impl self::TestAPI for Runtime { - fn balance_of(id: AccountId) -> u64 { - system::balance_of(id) - } - - fn benchmark_add_one(val: &u64) -> u64 { - val + 1 - } - - fn benchmark_vector_add_one(vec: &Vec) -> Vec { - let mut vec = vec.clone(); - vec.iter_mut().for_each(|v| *v += 1); - vec - } - - fn fail_convert_parameter(_: DecodeFails) {} - - fn fail_convert_return_value() -> DecodeFails { - DecodeFails::new() - } - - fn function_signature_changed() -> u64 { - 1 - } - - fn fail_on_native() -> u64 { - panic!("Failing because we are on native") - } - fn fail_on_wasm() -> u64 { - 1 - } - - fn use_trie() -> u64 { - code_using_trie() - } - - fn benchmark_indirect_call() -> u64 { - let function = benchmark_add_one; - (0..1000).fold(0, |p, i| p + function(i)) - } - fn benchmark_direct_call() -> u64 { - (0..1000).fold(0, |p, i| p + benchmark_add_one(i)) - } - - fn vec_with_capacity(_size: u32) -> Vec { - unimplemented!("is not expected to be invoked from non-wasm builds"); - } - - fn get_block_number() -> u64 { - system::get_block_number().expect("Block number is initialized") - } - - fn without_initialize_block() -> bool { - system::get_block_number().is_none() - } - - fn take_block_number() -> Option { - system::take_block_number() - } - - fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { - test_ed25519_crypto() - } - - fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic) { - test_sr25519_crypto() - } - - fn test_storage() { - test_read_storage(); - test_read_child_storage(); - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { 1000 } - fn authorities() -> Vec { - system::authorities().into_iter().map(|a| { - let authority: sr25519::Public = a.into(); - AuraId::from(authority) - }).collect() - } - } - - impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeConfiguration { - sp_consensus_babe::BabeConfiguration { - slot_duration: 1000, - epoch_length: EpochDuration::get(), - c: (3, 10), - genesis_authorities: system::authorities() - .into_iter().map(|x|(x, 1)).collect(), - randomness: >::randomness(), - secondary_slots: true, - } - } - - fn current_epoch_start() -> SlotNumber { - >::current_epoch_start() - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - let ex = Extrinsic::IncludeData(header.number.encode()); - sp_io::offchain::submit_transaction(ex.encode()).unwrap(); - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(_: Option>) -> Vec { - SessionKeys::generate(None) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(_account: AccountId) -> Index { - 0 - } - } - } - } else { - impl_runtime_apis! { - impl sp_api::Core for Runtime { - fn version() -> RuntimeVersion { - version() - } - - fn execute_block(block: Block) { - system::execute_block(block) - } - - fn initialize_block(header: &::Header) { - system::initialize_block(header) - } - } - - impl sp_api::Metadata for Runtime { - fn metadata() -> OpaqueMetadata { - unimplemented!() - } - } - - impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { - fn validate_transaction( - _source: TransactionSource, - utx: ::Extrinsic, - ) -> TransactionValidity { - if let Extrinsic::IncludeData(data) = utx { - return Ok(ValidTransaction{ - priority: data.len() as u64, - requires: vec![], - provides: vec![data], - longevity: 1, - propagate: false, - }); - } - - system::validate_transaction(utx) - } - } - - impl sp_block_builder::BlockBuilder for Runtime { - fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { - system::execute_transaction(extrinsic) - } - - fn finalize_block() -> ::Header { - system::finalize_block() - } - - fn inherent_extrinsics(_data: InherentData) -> Vec<::Extrinsic> { - vec![] - } - - fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { - CheckInherentsResult::new() - } - - fn random_seed() -> ::Hash { - unimplemented!() - } - } - - impl self::TestAPI for Runtime { - fn balance_of(id: AccountId) -> u64 { - system::balance_of(id) - } - - fn benchmark_add_one(val: &u64) -> u64 { - val + 1 - } - - fn benchmark_vector_add_one(vec: &Vec) -> Vec { - let mut vec = vec.clone(); - vec.iter_mut().for_each(|v| *v += 1); - vec - } - - fn fail_convert_parameter(_: DecodeFails) {} - - fn fail_convert_return_value() -> DecodeFails { - DecodeFails::new() - } - - fn function_signature_changed() -> Vec { - let mut vec = Vec::new(); - vec.push(1); - vec.push(2); - vec - } - - fn fail_on_native() -> u64 { - 1 - } - - fn fail_on_wasm() -> u64 { - panic!("Failing because we are on wasm") - } - - fn use_trie() -> u64 { - code_using_trie() - } - - fn benchmark_indirect_call() -> u64 { - (0..10000).fold(0, |p, i| p + BENCHMARK_ADD_ONE.get()(i)) - } - - fn benchmark_direct_call() -> u64 { - (0..10000).fold(0, |p, i| p + benchmark_add_one(i)) - } - - fn vec_with_capacity(size: u32) -> Vec { - Vec::with_capacity(size as usize) - } - - fn get_block_number() -> u64 { - system::get_block_number().expect("Block number is initialized") - } - - fn without_initialize_block() -> bool { - system::get_block_number().is_none() - } - - fn take_block_number() -> Option { - system::take_block_number() - } - - fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { - test_ed25519_crypto() - } - - fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic) { - test_sr25519_crypto() - } - - fn test_storage() { - test_read_storage(); - test_read_child_storage(); - } - } - - impl sp_consensus_aura::AuraApi for Runtime { - fn slot_duration() -> u64 { 1000 } - fn authorities() -> Vec { - system::authorities().into_iter().map(|a| { - let authority: sr25519::Public = a.into(); - AuraId::from(authority) - }).collect() - } - } - - impl sp_consensus_babe::BabeApi for Runtime { - fn configuration() -> sp_consensus_babe::BabeConfiguration { - sp_consensus_babe::BabeConfiguration { - slot_duration: 1000, - epoch_length: EpochDuration::get(), - c: (3, 10), - genesis_authorities: system::authorities() - .into_iter().map(|x|(x, 1)).collect(), - randomness: >::randomness(), - secondary_slots: true, - } - } - - fn current_epoch_start() -> SlotNumber { - >::current_epoch_start() - } - } - - impl sp_offchain::OffchainWorkerApi for Runtime { - fn offchain_worker(header: &::Header) { - let ex = Extrinsic::IncludeData(header.number.encode()); - sp_io::offchain::submit_transaction(ex.encode()).unwrap() - } - } - - impl sp_session::SessionKeys for Runtime { - fn generate_session_keys(_: Option>) -> Vec { - SessionKeys::generate(None) - } - - fn decode_session_keys( - encoded: Vec, - ) -> Option, sp_core::crypto::KeyTypeId)>> { - SessionKeys::decode_into_raw_public_keys(&encoded) - } - } - - impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { - fn account_nonce(_account: AccountId) -> Index { - 0 - } - } - } - } + if #[cfg(feature = "std")] { + impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + version() + } + + fn execute_block(block: Block) { + system::execute_block(block) + } + + fn initialize_block(header: &::Header) { + system::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + _source: TransactionSource, + utx: ::Extrinsic, + ) -> TransactionValidity { + if let Extrinsic::IncludeData(data) = utx { + return Ok(ValidTransaction { + priority: data.len() as u64, + requires: vec![], + provides: vec![data], + longevity: 1, + propagate: false, + }); + } + + system::validate_transaction(utx) + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + system::execute_transaction(extrinsic) + } + + fn finalize_block() -> ::Header { + system::finalize_block() + } + + fn inherent_extrinsics(_data: InherentData) -> Vec<::Extrinsic> { + vec![] + } + + fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { + CheckInherentsResult::new() + } + + fn random_seed() -> ::Hash { + unimplemented!() + } + } + + impl self::TestAPI for Runtime { + fn balance_of(id: AccountId) -> u64 { + system::balance_of(id) + } + + fn benchmark_add_one(val: &u64) -> u64 { + val + 1 + } + + fn benchmark_vector_add_one(vec: &Vec) -> Vec { + let mut vec = vec.clone(); + vec.iter_mut().for_each(|v| *v += 1); + vec + } + + fn fail_convert_parameter(_: DecodeFails) {} + + fn fail_convert_return_value() -> DecodeFails { + DecodeFails::new() + } + + fn function_signature_changed() -> u64 { + 1 + } + + fn fail_on_native() -> u64 { + panic!("Failing because we are on native") + } + fn fail_on_wasm() -> u64 { + 1 + } + + fn use_trie() -> u64 { + code_using_trie() + } + + fn benchmark_indirect_call() -> u64 { + let function = benchmark_add_one; + (0..1000).fold(0, |p, i| p + function(i)) + } + fn benchmark_direct_call() -> u64 { + (0..1000).fold(0, |p, i| p + benchmark_add_one(i)) + } + + fn vec_with_capacity(_size: u32) -> Vec { + unimplemented!("is not expected to be invoked from non-wasm builds"); + } + + fn get_block_number() -> u64 { + system::get_block_number().expect("Block number is initialized") + } + + fn without_initialize_block() -> bool { + system::get_block_number().is_none() + } + + fn take_block_number() -> Option { + system::take_block_number() + } + + fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { + test_ed25519_crypto() + } + + fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic) { + test_sr25519_crypto() + } + + fn test_storage() { + test_read_storage(); + test_read_child_storage(); + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> u64 { 1000 } + fn authorities() -> Vec { + system::authorities().into_iter().map(|a| { + let authority: sr25519::Public = a.into(); + AuraId::from(authority) + }).collect() + } + } + + impl sp_consensus_babe::BabeApi for Runtime { + fn configuration() -> sp_consensus_babe::BabeConfiguration { + sp_consensus_babe::BabeConfiguration { + slot_duration: 1000, + epoch_length: EpochDuration::get(), + c: (3, 10), + genesis_authorities: system::authorities() + .into_iter().map(|x|(x, 1)).collect(), + randomness: >::randomness(), + secondary_slots: true, + } + } + + fn current_epoch_start() -> SlotNumber { + >::current_epoch_start() + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + let ex = Extrinsic::IncludeData(header.number.encode()); + sp_io::offchain::submit_transaction(ex.encode()).unwrap(); + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(_: Option>) -> Vec { + SessionKeys::generate(None) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(_account: AccountId) -> Index { + 0 + } + } + } + } else { + impl_runtime_apis! { + impl sp_api::Core for Runtime { + fn version() -> RuntimeVersion { + version() + } + + fn execute_block(block: Block) { + system::execute_block(block) + } + + fn initialize_block(header: &::Header) { + system::initialize_block(header) + } + } + + impl sp_api::Metadata for Runtime { + fn metadata() -> OpaqueMetadata { + unimplemented!() + } + } + + impl sp_transaction_pool::runtime_api::TaggedTransactionQueue for Runtime { + fn validate_transaction( + _source: TransactionSource, + utx: ::Extrinsic, + ) -> TransactionValidity { + if let Extrinsic::IncludeData(data) = utx { + return Ok(ValidTransaction{ + priority: data.len() as u64, + requires: vec![], + provides: vec![data], + longevity: 1, + propagate: false, + }); + } + + system::validate_transaction(utx) + } + } + + impl sp_block_builder::BlockBuilder for Runtime { + fn apply_extrinsic(extrinsic: ::Extrinsic) -> ApplyExtrinsicResult { + system::execute_transaction(extrinsic) + } + + fn finalize_block() -> ::Header { + system::finalize_block() + } + + fn inherent_extrinsics(_data: InherentData) -> Vec<::Extrinsic> { + vec![] + } + + fn check_inherents(_block: Block, _data: InherentData) -> CheckInherentsResult { + CheckInherentsResult::new() + } + + fn random_seed() -> ::Hash { + unimplemented!() + } + } + + impl self::TestAPI for Runtime { + fn balance_of(id: AccountId) -> u64 { + system::balance_of(id) + } + + fn benchmark_add_one(val: &u64) -> u64 { + val + 1 + } + + fn benchmark_vector_add_one(vec: &Vec) -> Vec { + let mut vec = vec.clone(); + vec.iter_mut().for_each(|v| *v += 1); + vec + } + + fn fail_convert_parameter(_: DecodeFails) {} + + fn fail_convert_return_value() -> DecodeFails { + DecodeFails::new() + } + + fn function_signature_changed() -> Vec { + let mut vec = Vec::new(); + vec.push(1); + vec.push(2); + vec + } + + fn fail_on_native() -> u64 { + 1 + } + + fn fail_on_wasm() -> u64 { + panic!("Failing because we are on wasm") + } + + fn use_trie() -> u64 { + code_using_trie() + } + + fn benchmark_indirect_call() -> u64 { + (0..10000).fold(0, |p, i| p + BENCHMARK_ADD_ONE.get()(i)) + } + + fn benchmark_direct_call() -> u64 { + (0..10000).fold(0, |p, i| p + benchmark_add_one(i)) + } + + fn vec_with_capacity(size: u32) -> Vec { + Vec::with_capacity(size as usize) + } + + fn get_block_number() -> u64 { + system::get_block_number().expect("Block number is initialized") + } + + fn without_initialize_block() -> bool { + system::get_block_number().is_none() + } + + fn take_block_number() -> Option { + system::take_block_number() + } + + fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { + test_ed25519_crypto() + } + + fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic) { + test_sr25519_crypto() + } + + fn test_storage() { + test_read_storage(); + test_read_child_storage(); + } + } + + impl sp_consensus_aura::AuraApi for Runtime { + fn slot_duration() -> u64 { 1000 } + fn authorities() -> Vec { + system::authorities().into_iter().map(|a| { + let authority: sr25519::Public = a.into(); + AuraId::from(authority) + }).collect() + } + } + + impl sp_consensus_babe::BabeApi for Runtime { + fn configuration() -> sp_consensus_babe::BabeConfiguration { + sp_consensus_babe::BabeConfiguration { + slot_duration: 1000, + epoch_length: EpochDuration::get(), + c: (3, 10), + genesis_authorities: system::authorities() + .into_iter().map(|x|(x, 1)).collect(), + randomness: >::randomness(), + secondary_slots: true, + } + } + + fn current_epoch_start() -> SlotNumber { + >::current_epoch_start() + } + } + + impl sp_offchain::OffchainWorkerApi for Runtime { + fn offchain_worker(header: &::Header) { + let ex = Extrinsic::IncludeData(header.number.encode()); + sp_io::offchain::submit_transaction(ex.encode()).unwrap() + } + } + + impl sp_session::SessionKeys for Runtime { + fn generate_session_keys(_: Option>) -> Vec { + SessionKeys::generate(None) + } + + fn decode_session_keys( + encoded: Vec, + ) -> Option, sp_core::crypto::KeyTypeId)>> { + SessionKeys::decode_into_raw_public_keys(&encoded) + } + } + + impl frame_system_rpc_runtime_api::AccountNonceApi for Runtime { + fn account_nonce(_account: AccountId) -> Index { + 0 + } + } + } + } } fn test_ed25519_crypto() -> (ed25519::AppSignature, ed25519::AppPublic) { - let public0 = ed25519::AppPublic::generate_pair(None); - let public1 = ed25519::AppPublic::generate_pair(None); - let public2 = ed25519::AppPublic::generate_pair(None); - - let all = ed25519::AppPublic::all(); - assert!(all.contains(&public0)); - assert!(all.contains(&public1)); - assert!(all.contains(&public2)); - - let signature = public0.sign(&"ed25519").expect("Generates a valid `ed25519` signature."); - assert!(public0.verify(&"ed25519", &signature)); - (signature, public0) + let public0 = ed25519::AppPublic::generate_pair(None); + let public1 = ed25519::AppPublic::generate_pair(None); + let public2 = ed25519::AppPublic::generate_pair(None); + + let all = ed25519::AppPublic::all(); + assert!(all.contains(&public0)); + assert!(all.contains(&public1)); + assert!(all.contains(&public2)); + + let signature = public0 + .sign(&"ed25519") + .expect("Generates a valid `ed25519` signature."); + assert!(public0.verify(&"ed25519", &signature)); + (signature, public0) } fn test_sr25519_crypto() -> (sr25519::AppSignature, sr25519::AppPublic) { - let public0 = sr25519::AppPublic::generate_pair(None); - let public1 = sr25519::AppPublic::generate_pair(None); - let public2 = sr25519::AppPublic::generate_pair(None); - - let all = sr25519::AppPublic::all(); - assert!(all.contains(&public0)); - assert!(all.contains(&public1)); - assert!(all.contains(&public2)); - - let signature = public0.sign(&"sr25519").expect("Generates a valid `sr25519` signature."); - assert!(public0.verify(&"sr25519", &signature)); - (signature, public0) + let public0 = sr25519::AppPublic::generate_pair(None); + let public1 = sr25519::AppPublic::generate_pair(None); + let public2 = sr25519::AppPublic::generate_pair(None); + + let all = sr25519::AppPublic::all(); + assert!(all.contains(&public0)); + assert!(all.contains(&public1)); + assert!(all.contains(&public2)); + + let signature = public0 + .sign(&"sr25519") + .expect("Generates a valid `sr25519` signature."); + assert!(public0.verify(&"sr25519", &signature)); + (signature, public0) } fn test_read_storage() { - const KEY: &[u8] = b":read_storage"; - sp_io::storage::set(KEY, b"test"); - - let mut v = [0u8; 4]; - let r = sp_io::storage::read( - KEY, - &mut v, - 0 - ); - assert_eq!(r, Some(4)); - assert_eq!(&v, b"test"); - - let mut v = [0u8; 4]; - let r = sp_io::storage::read(KEY, &mut v, 8); - assert_eq!(r, Some(4)); - assert_eq!(&v, &[0, 0, 0, 0]); + const KEY: &[u8] = b":read_storage"; + sp_io::storage::set(KEY, b"test"); + + let mut v = [0u8; 4]; + let r = sp_io::storage::read(KEY, &mut v, 0); + assert_eq!(r, Some(4)); + assert_eq!(&v, b"test"); + + let mut v = [0u8; 4]; + let r = sp_io::storage::read(KEY, &mut v, 8); + assert_eq!(r, Some(4)); + assert_eq!(&v, &[0, 0, 0, 0]); } fn test_read_child_storage() { - const CHILD_KEY: &[u8] = b":child_storage:default:read_child_storage"; - const UNIQUE_ID: &[u8] = b":unique_id"; - const KEY: &[u8] = b":read_child_storage"; - sp_io::storage::child_set( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, - KEY, - b"test", - ); - - let mut v = [0u8; 4]; - let r = sp_io::storage::child_read( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, - KEY, - &mut v, - 0, - ); - assert_eq!(r, Some(4)); - assert_eq!(&v, b"test"); - - let mut v = [0u8; 4]; - let r = sp_io::storage::child_read( - CHILD_KEY, - UNIQUE_ID, - ChildType::CryptoUniqueId as u32, - KEY, - &mut v, - 8, - ); - assert_eq!(r, Some(4)); - assert_eq!(&v, &[0, 0, 0, 0]); + const CHILD_KEY: &[u8] = b":child_storage:default:read_child_storage"; + const UNIQUE_ID: &[u8] = b":unique_id"; + const KEY: &[u8] = b":read_child_storage"; + sp_io::storage::child_set( + CHILD_KEY, + UNIQUE_ID, + ChildType::CryptoUniqueId as u32, + KEY, + b"test", + ); + + let mut v = [0u8; 4]; + let r = sp_io::storage::child_read( + CHILD_KEY, + UNIQUE_ID, + ChildType::CryptoUniqueId as u32, + KEY, + &mut v, + 0, + ); + assert_eq!(r, Some(4)); + assert_eq!(&v, b"test"); + + let mut v = [0u8; 4]; + let r = sp_io::storage::child_read( + CHILD_KEY, + UNIQUE_ID, + ChildType::CryptoUniqueId as u32, + KEY, + &mut v, + 8, + ); + assert_eq!(r, Some(4)); + assert_eq!(&v, &[0, 0, 0, 0]); } #[cfg(test)] mod tests { - use substrate_test_runtime_client::{ - prelude::*, - sp_consensus::BlockOrigin, - DefaultTestClientBuilderExt, TestClientBuilder, - runtime::TestAPI, - }; - use sp_api::ProvideRuntimeApi; - use sp_runtime::generic::BlockId; - use sp_core::storage::well_known_keys::HEAP_PAGES; - use sp_state_machine::ExecutionStrategy; - use codec::Encode; - use sc_block_builder::BlockBuilderProvider; - - #[test] - fn heap_pages_is_respected() { - // This tests that the on-chain HEAP_PAGES parameter is respected. - - // Create a client devoting only 8 pages of wasm memory. This gives us ~512k of heap memory. - let mut client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::AlwaysWasm) - .set_heap_pages(8) - .build(); - let block_id = BlockId::Number(client.chain_info().best_number); - - // Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger - // than the heap. - let ret = client.runtime_api().vec_with_capacity(&block_id, 1048576); - assert!(ret.is_err()); - - // Create a block that sets the `:heap_pages` to 32 pages of memory which corresponds to - // ~2048k of heap memory. - let (new_block_id, block) = { - let mut builder = client.new_block(Default::default()).unwrap(); - builder.push_storage_change(HEAP_PAGES.to_vec(), Some(32u64.encode())).unwrap(); - let block = builder.build().unwrap().block; - let hash = block.header.hash(); - (BlockId::Hash(hash), block) - }; - - client.import(BlockOrigin::Own, block).unwrap(); - - // Allocation of 1024k while having ~2048k should succeed. - let ret = client.runtime_api().vec_with_capacity(&new_block_id, 1048576); - assert!(ret.is_ok()); - } - - #[test] - fn test_storage() { - let client = TestClientBuilder::new() - .set_execution_strategy(ExecutionStrategy::Both) - .build(); - let runtime_api = client.runtime_api(); - let block_id = BlockId::Number(client.chain_info().best_number); - - runtime_api.test_storage(&block_id).unwrap(); - } + use codec::Encode; + use sc_block_builder::BlockBuilderProvider; + use sp_api::ProvideRuntimeApi; + use sp_core::storage::well_known_keys::HEAP_PAGES; + use sp_runtime::generic::BlockId; + use sp_state_machine::ExecutionStrategy; + use substrate_test_runtime_client::{ + prelude::*, runtime::TestAPI, sp_consensus::BlockOrigin, DefaultTestClientBuilderExt, + TestClientBuilder, + }; + + #[test] + fn heap_pages_is_respected() { + // This tests that the on-chain HEAP_PAGES parameter is respected. + + // Create a client devoting only 8 pages of wasm memory. This gives us ~512k of heap memory. + let mut client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::AlwaysWasm) + .set_heap_pages(8) + .build(); + let block_id = BlockId::Number(client.chain_info().best_number); + + // Try to allocate 1024k of memory on heap. This is going to fail since it is twice larger + // than the heap. + let ret = client.runtime_api().vec_with_capacity(&block_id, 1048576); + assert!(ret.is_err()); + + // Create a block that sets the `:heap_pages` to 32 pages of memory which corresponds to + // ~2048k of heap memory. + let (new_block_id, block) = { + let mut builder = client.new_block(Default::default()).unwrap(); + builder + .push_storage_change(HEAP_PAGES.to_vec(), Some(32u64.encode())) + .unwrap(); + let block = builder.build().unwrap().block; + let hash = block.header.hash(); + (BlockId::Hash(hash), block) + }; + + client.import(BlockOrigin::Own, block).unwrap(); + + // Allocation of 1024k while having ~2048k should succeed. + let ret = client + .runtime_api() + .vec_with_capacity(&new_block_id, 1048576); + assert!(ret.is_ok()); + } + + #[test] + fn test_storage() { + let client = TestClientBuilder::new() + .set_execution_strategy(ExecutionStrategy::Both) + .build(); + let runtime_api = client.runtime_api(); + let block_id = BlockId::Number(client.chain_info().best_number); + + runtime_api.test_storage(&block_id).unwrap(); + } } diff --git a/test-utils/runtime/src/system.rs b/test-utils/runtime/src/system.rs index c35850ae95..bb3cd3194f 100644 --- a/test-utils/runtime/src/system.rs +++ b/test-utils/runtime/src/system.rs @@ -17,508 +17,533 @@ //! System manager: Handles all of the top-level stuff; executing block/transaction, setting code //! and depositing logs. -use sp_std::prelude::*; -use sp_io::{ - storage::root as storage_root, storage::changes_root as storage_changes_root, - hashing::blake2_256, trie, +use crate::{ + AccountId, AuthorityId, Block, BlockNumber, Digest, Extrinsic, Header, Transfer, H256 as Hash, }; +use codec::{Decode, Encode, KeyedVec}; use frame_support::storage; -use frame_support::{decl_storage, decl_module}; -use sp_runtime::{ - traits::Header as _, generic, ApplyExtrinsicResult, - transaction_validity::{ - TransactionValidity, ValidTransaction, InvalidTransaction, TransactionValidityError, - }, -}; -use codec::{KeyedVec, Encode, Decode}; +use frame_support::{decl_module, decl_storage}; use frame_system::Trait; -use crate::{ - AccountId, BlockNumber, Extrinsic, Transfer, H256 as Hash, Block, Header, Digest, AuthorityId -}; use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; +use sp_io::{ + hashing::blake2_256, storage::changes_root as storage_changes_root, + storage::root as storage_root, trie, +}; +use sp_runtime::{ + generic, + traits::Header as _, + transaction_validity::{ + InvalidTransaction, TransactionValidity, TransactionValidityError, ValidTransaction, + }, + ApplyExtrinsicResult, +}; +use sp_std::prelude::*; const NONCE_OF: &[u8] = b"nonce:"; const BALANCE_OF: &[u8] = b"balance:"; decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } decl_storage! { - trait Store for Module as TestRuntime { - ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec; - // The current block number being processed. Set by `execute_block`. - Number get(fn number): Option; - ParentHash get(fn parent_hash): Hash; - NewAuthorities get(fn new_authorities): Option>; - NewChangesTrieConfig get(fn new_changes_trie_config): Option>; - StorageDigest get(fn storage_digest): Option; - Authorities get(fn authorities) config(): Vec; - } + trait Store for Module as TestRuntime { + ExtrinsicData: map hasher(blake2_128_concat) u32 => Vec; + // The current block number being processed. Set by `execute_block`. + Number get(fn number): Option; + ParentHash get(fn parent_hash): Hash; + NewAuthorities get(fn new_authorities): Option>; + NewChangesTrieConfig get(fn new_changes_trie_config): Option>; + StorageDigest get(fn storage_digest): Option; + Authorities get(fn authorities) config(): Vec; + } } pub fn balance_of_key(who: AccountId) -> Vec { - who.to_keyed_vec(BALANCE_OF) + who.to_keyed_vec(BALANCE_OF) } pub fn balance_of(who: AccountId) -> u64 { - storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) + storage::hashed::get_or(&blake2_256, &balance_of_key(who), 0) } pub fn nonce_of(who: AccountId) -> u64 { - storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) + storage::hashed::get_or(&blake2_256, &who.to_keyed_vec(NONCE_OF), 0) } pub fn initialize_block(header: &Header) { - // populate environment. - ::put(&header.number); - ::put(&header.parent_hash); - ::put(header.digest()); - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); - - // try to read something that depends on current header digest - // so that it'll be included in execution proof - if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { - let _: Option = storage::unhashed::get(&v); - } + // populate environment. + ::put(&header.number); + ::put(&header.parent_hash); + ::put(header.digest()); + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); + + // try to read something that depends on current header digest + // so that it'll be included in execution proof + if let Some(generic::DigestItem::Other(v)) = header.digest().logs().iter().next() { + let _: Option = storage::unhashed::get(&v); + } } pub fn authorities() -> Vec { - Authorities::get() + Authorities::get() } pub fn get_block_number() -> Option { - Number::get() + Number::get() } pub fn take_block_number() -> Option { - Number::take() + Number::take() } #[derive(Copy, Clone)] enum Mode { - Verify, - Overwrite, + Verify, + Overwrite, } /// Actually execute all transitioning for `block`. pub fn polish_block(block: &mut Block) { - execute_block_with_state_root_handler(block, Mode::Overwrite); + execute_block_with_state_root_handler(block, Mode::Overwrite); } pub fn execute_block(mut block: Block) { - execute_block_with_state_root_handler(&mut block, Mode::Verify); + execute_block_with_state_root_handler(&mut block, Mode::Verify); } fn execute_block_with_state_root_handler(block: &mut Block, mode: Mode) { - let header = &mut block.header; - - initialize_block(header); - - // execute transactions - block.extrinsics.iter().for_each(|e| { - let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); - }); - - let new_header = finalize_block(); - - if let Mode::Overwrite = mode { - header.state_root = new_header.state_root; - } else { - info_expect_equal_hash(&new_header.state_root, &header.state_root); - assert!( - new_header.state_root == header.state_root, - "Storage root must match that calculated.", - ); - } - - if let Mode::Overwrite = mode { - header.extrinsics_root = new_header.extrinsics_root; - } else { - info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); - assert!( - new_header.extrinsics_root == header.extrinsics_root, - "Transaction trie root must be valid.", - ); - } + let header = &mut block.header; + + initialize_block(header); + + // execute transactions + block.extrinsics.iter().for_each(|e| { + let _ = execute_transaction(e.clone()).unwrap_or_else(|_| panic!("Invalid transaction")); + }); + + let new_header = finalize_block(); + + if let Mode::Overwrite = mode { + header.state_root = new_header.state_root; + } else { + info_expect_equal_hash(&new_header.state_root, &header.state_root); + assert!( + new_header.state_root == header.state_root, + "Storage root must match that calculated.", + ); + } + + if let Mode::Overwrite = mode { + header.extrinsics_root = new_header.extrinsics_root; + } else { + info_expect_equal_hash(&new_header.extrinsics_root, &header.extrinsics_root); + assert!( + new_header.extrinsics_root == header.extrinsics_root, + "Transaction trie root must be valid.", + ); + } } /// The block executor. pub struct BlockExecutor; impl frame_executive::ExecuteBlock for BlockExecutor { - fn execute_block(block: Block) { - execute_block(block); - } + fn execute_block(block: Block) { + execute_block(block); + } } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn validate_transaction(utx: Extrinsic) -> TransactionValidity { - if check_signature(&utx).is_err() { - return InvalidTransaction::BadProof.into(); - } - - let tx = utx.transfer(); - let nonce_key = tx.from.to_keyed_vec(NONCE_OF); - let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); - if tx.nonce < expected_nonce { - return InvalidTransaction::Stale.into(); - } - if tx.nonce > expected_nonce + 64 { - return InvalidTransaction::Future.into(); - } - - let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); - let requires = if tx.nonce != expected_nonce && tx.nonce > 0 { - vec![encode(&tx.from, tx.nonce - 1)] - } else { - vec![] - }; - - let provides = vec![encode(&tx.from, tx.nonce)]; - - Ok(ValidTransaction { - priority: tx.amount, - requires, - provides, - longevity: 64, - propagate: true, - }) + if check_signature(&utx).is_err() { + return InvalidTransaction::BadProof.into(); + } + + let tx = utx.transfer(); + let nonce_key = tx.from.to_keyed_vec(NONCE_OF); + let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); + if tx.nonce < expected_nonce { + return InvalidTransaction::Stale.into(); + } + if tx.nonce > expected_nonce + 64 { + return InvalidTransaction::Future.into(); + } + + let encode = |from: &AccountId, nonce: u64| (from, nonce).encode(); + let requires = if tx.nonce != expected_nonce && tx.nonce > 0 { + vec![encode(&tx.from, tx.nonce - 1)] + } else { + vec![] + }; + + let provides = vec![encode(&tx.from, tx.nonce)]; + + Ok(ValidTransaction { + priority: tx.amount, + requires, + provides, + longevity: 64, + propagate: true, + }) } /// Execute a transaction outside of the block execution function. /// This doesn't attempt to validate anything regarding the block. pub fn execute_transaction(utx: Extrinsic) -> ApplyExtrinsicResult { - let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); - let result = execute_transaction_backend(&utx, extrinsic_index); - ExtrinsicData::insert(extrinsic_index, utx.encode()); - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); - result + let extrinsic_index: u32 = storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX).unwrap(); + let result = execute_transaction_backend(&utx, extrinsic_index); + ExtrinsicData::insert(extrinsic_index, utx.encode()); + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &(extrinsic_index + 1)); + result } /// Finalize the block. pub fn finalize_block() -> Header { - let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); - let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); - let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); - let number = ::take().expect("Number is set by `initialize_block`"); - let parent_hash = ::take(); - let mut digest = ::take().expect("StorageDigest is set by `initialize_block`"); - - let o_new_authorities = ::take(); - let new_changes_trie_config = ::take(); - - // This MUST come after all changes to storage are done. Otherwise we will fail the - // “Storage root does not match that calculated” assertion. - let storage_root = Hash::decode(&mut &storage_root()[..]) - .expect("`storage_root` is a valid hash"); - let storage_changes_root = storage_changes_root(&parent_hash.encode()) - .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); - - if let Some(storage_changes_root) = storage_changes_root { - digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); - } - - if let Some(new_authorities) = o_new_authorities { - digest.push(generic::DigestItem::Consensus(*b"aura", new_authorities.encode())); - digest.push(generic::DigestItem::Consensus(*b"babe", new_authorities.encode())); - } - - if let Some(new_config) = new_changes_trie_config { - digest.push(generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(new_config) - )); - } - - Header { - number, - extrinsics_root, - state_root: storage_root, - parent_hash, - digest, - } + let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap(); + let txs: Vec<_> = (0..extrinsic_index).map(ExtrinsicData::take).collect(); + let extrinsics_root = trie::blake2_256_ordered_root(txs).into(); + let number = ::take().expect("Number is set by `initialize_block`"); + let parent_hash = ::take(); + let mut digest = ::take().expect("StorageDigest is set by `initialize_block`"); + + let o_new_authorities = ::take(); + let new_changes_trie_config = ::take(); + + // This MUST come after all changes to storage are done. Otherwise we will fail the + // “Storage root does not match that calculated” assertion. + let storage_root = + Hash::decode(&mut &storage_root()[..]).expect("`storage_root` is a valid hash"); + let storage_changes_root = storage_changes_root(&parent_hash.encode()) + .map(|r| Hash::decode(&mut &r[..]).expect("`storage_changes_root` is a valid hash")); + + if let Some(storage_changes_root) = storage_changes_root { + digest.push(generic::DigestItem::ChangesTrieRoot(storage_changes_root)); + } + + if let Some(new_authorities) = o_new_authorities { + digest.push(generic::DigestItem::Consensus( + *b"aura", + new_authorities.encode(), + )); + digest.push(generic::DigestItem::Consensus( + *b"babe", + new_authorities.encode(), + )); + } + + if let Some(new_config) = new_changes_trie_config { + digest.push(generic::DigestItem::ChangesTrieSignal( + generic::ChangesTrieSignal::NewConfiguration(new_config), + )); + } + + Header { + number, + extrinsics_root, + state_root: storage_root, + parent_hash, + digest, + } } #[inline(always)] fn check_signature(utx: &Extrinsic) -> Result<(), TransactionValidityError> { - use sp_runtime::traits::BlindCheckable; - utx.clone().check().map_err(|_| InvalidTransaction::BadProof.into()).map(|_| ()) + use sp_runtime::traits::BlindCheckable; + utx.clone() + .check() + .map_err(|_| InvalidTransaction::BadProof.into()) + .map(|_| ()) } fn execute_transaction_backend(utx: &Extrinsic, extrinsic_index: u32) -> ApplyExtrinsicResult { - check_signature(utx)?; - match utx { - Extrinsic::Transfer { exhaust_resources_when_not_first: true, .. } if extrinsic_index != 0 => - Err(InvalidTransaction::ExhaustsResources.into()), - Extrinsic::Transfer { ref transfer, .. } => - execute_transfer_backend(transfer), - Extrinsic::AuthoritiesChange(ref new_auth) => - execute_new_authorities_backend(new_auth), - Extrinsic::IncludeData(_) => Ok(Ok(())), - Extrinsic::StorageChange(key, value) => - execute_storage_change(key, value.as_ref().map(|v| &**v)), - Extrinsic::ChangesTrieConfigUpdate(ref new_config) => - execute_changes_trie_config_update(new_config.clone()), - } + check_signature(utx)?; + match utx { + Extrinsic::Transfer { + exhaust_resources_when_not_first: true, + .. + } if extrinsic_index != 0 => Err(InvalidTransaction::ExhaustsResources.into()), + Extrinsic::Transfer { ref transfer, .. } => execute_transfer_backend(transfer), + Extrinsic::AuthoritiesChange(ref new_auth) => execute_new_authorities_backend(new_auth), + Extrinsic::IncludeData(_) => Ok(Ok(())), + Extrinsic::StorageChange(key, value) => { + execute_storage_change(key, value.as_ref().map(|v| &**v)) + } + Extrinsic::ChangesTrieConfigUpdate(ref new_config) => { + execute_changes_trie_config_update(new_config.clone()) + } + } } fn execute_transfer_backend(tx: &Transfer) -> ApplyExtrinsicResult { - // check nonce - let nonce_key = tx.from.to_keyed_vec(NONCE_OF); - let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); - if !(tx.nonce == expected_nonce) { - return Err(InvalidTransaction::Stale.into()); - } - - // increment nonce in storage - storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); - - // check sender balance - let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); - let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); - - // enact transfer - if !(tx.amount <= from_balance) { - return Err(InvalidTransaction::Payment.into()); - } - let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); - let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); - storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); - storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); - Ok(Ok(())) + // check nonce + let nonce_key = tx.from.to_keyed_vec(NONCE_OF); + let expected_nonce: u64 = storage::hashed::get_or(&blake2_256, &nonce_key, 0); + if !(tx.nonce == expected_nonce) { + return Err(InvalidTransaction::Stale.into()); + } + + // increment nonce in storage + storage::hashed::put(&blake2_256, &nonce_key, &(expected_nonce + 1)); + + // check sender balance + let from_balance_key = tx.from.to_keyed_vec(BALANCE_OF); + let from_balance: u64 = storage::hashed::get_or(&blake2_256, &from_balance_key, 0); + + // enact transfer + if !(tx.amount <= from_balance) { + return Err(InvalidTransaction::Payment.into()); + } + let to_balance_key = tx.to.to_keyed_vec(BALANCE_OF); + let to_balance: u64 = storage::hashed::get_or(&blake2_256, &to_balance_key, 0); + storage::hashed::put(&blake2_256, &from_balance_key, &(from_balance - tx.amount)); + storage::hashed::put(&blake2_256, &to_balance_key, &(to_balance + tx.amount)); + Ok(Ok(())) } fn execute_new_authorities_backend(new_authorities: &[AuthorityId]) -> ApplyExtrinsicResult { - NewAuthorities::put(new_authorities.to_vec()); - Ok(Ok(())) + NewAuthorities::put(new_authorities.to_vec()); + Ok(Ok(())) } fn execute_storage_change(key: &[u8], value: Option<&[u8]>) -> ApplyExtrinsicResult { - match value { - Some(value) => storage::unhashed::put_raw(key, value), - None => storage::unhashed::kill(key), - } - Ok(Ok(())) + match value { + Some(value) => storage::unhashed::put_raw(key, value), + None => storage::unhashed::kill(key), + } + Ok(Ok(())) } -fn execute_changes_trie_config_update(new_config: Option) -> ApplyExtrinsicResult { - match new_config.clone() { - Some(new_config) => storage::unhashed::put_raw( - well_known_keys::CHANGES_TRIE_CONFIG, - &new_config.encode(), - ), - None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), - } - ::put(new_config); - Ok(Ok(())) +fn execute_changes_trie_config_update( + new_config: Option, +) -> ApplyExtrinsicResult { + match new_config.clone() { + Some(new_config) => { + storage::unhashed::put_raw(well_known_keys::CHANGES_TRIE_CONFIG, &new_config.encode()) + } + None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), + } + ::put(new_config); + Ok(Ok(())) } #[cfg(feature = "std")] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { - use sp_core::hexdisplay::HexDisplay; - if given != expected { - println!( - "Hash: given={}, expected={}", - HexDisplay::from(given.as_fixed_bytes()), - HexDisplay::from(expected.as_fixed_bytes()), - ); - } + use sp_core::hexdisplay::HexDisplay; + if given != expected { + println!( + "Hash: given={}, expected={}", + HexDisplay::from(given.as_fixed_bytes()), + HexDisplay::from(expected.as_fixed_bytes()), + ); + } } #[cfg(not(feature = "std"))] fn info_expect_equal_hash(given: &Hash, expected: &Hash) { - if given != expected { - sp_runtime::print("Hash not equal"); - sp_runtime::print(given.as_bytes()); - sp_runtime::print(expected.as_bytes()); - } + if given != expected { + sp_runtime::print("Hash not equal"); + sp_runtime::print(given.as_bytes()); + sp_runtime::print(expected.as_bytes()); + } } #[cfg(test)] mod tests { - use super::*; - - use sp_io::TestExternalities; - use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; - use crate::{Header, Transfer, WASM_BINARY}; - use sp_core::{NeverNativeValue, map, traits::{CodeExecutor, RuntimeCode}}; - use sc_executor::{NativeExecutor, WasmExecutionMethod, native_executor_instance}; - use sp_io::hashing::twox_128; - - // Declare an instance of the native executor dispatch for the test runtime. - native_executor_instance!( - NativeDispatch, - crate::api::dispatch, - crate::native_version - ); - - fn executor() -> NativeExecutor { - NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) - } - - fn new_test_ext() -> TestExternalities { - let authorities = vec![ - Sr25519Keyring::Alice.to_raw_public(), - Sr25519Keyring::Bob.to_raw_public(), - Sr25519Keyring::Charlie.to_raw_public() - ]; - TestExternalities::new_with_code( - WASM_BINARY, - sp_core::storage::Storage { - top: map![ - twox_128(b"latest").to_vec() => vec![69u8; 32], - twox_128(b"sys:auth").to_vec() => authorities.encode(), - blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { - vec![111u8, 0, 0, 0, 0, 0, 0, 0] - } - ], - children: map![], - }, - ) - } - - fn block_import_works(block_executor: F) where F: Fn(Block, &mut TestExternalities) { - let h = Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }; - let mut b = Block { - header: h, - extrinsics: vec![], - }; - - new_test_ext().execute_with(|| polish_block(&mut b)); - - block_executor(b, &mut new_test_ext()); - } - - #[test] - fn block_import_works_native() { - block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); - } - - #[test] - fn block_import_works_wasm() { - block_import_works(|b, ext| { - let mut ext = ext.ext(); - let runtime_code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(WASM_BINARY.into()), - hash: Vec::new(), - heap_pages: None, - }; - - executor().call:: _>( - &mut ext, - &runtime_code, - "Core_execute_block", - &b.encode(), - false, - None, - ).0.unwrap(); - }) - } - - fn block_import_with_transaction_works(block_executor: F) - where F: Fn(Block, &mut TestExternalities) - { - let mut b1 = Block { - header: Header { - parent_hash: [69u8; 32].into(), - number: 1, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }, - extrinsics: vec![ - Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), - amount: 69, - nonce: 0, - }.into_signed_tx() - ], - }; - - let mut dummy_ext = new_test_ext(); - dummy_ext.execute_with(|| polish_block(&mut b1)); - - let mut b2 = Block { - header: Header { - parent_hash: b1.header.hash(), - number: 2, - state_root: Default::default(), - extrinsics_root: Default::default(), - digest: Default::default(), - }, - extrinsics: vec![ - Transfer { - from: AccountKeyring::Bob.into(), - to: AccountKeyring::Alice.into(), - amount: 27, - nonce: 0, - }.into_signed_tx(), - Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Charlie.into(), - amount: 69, - nonce: 1, - }.into_signed_tx(), - ], - }; - - dummy_ext.execute_with(|| polish_block(&mut b2)); - drop(dummy_ext); - - let mut t = new_test_ext(); - - t.execute_with(|| { - assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); - assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); - }); - - block_executor(b1, &mut t); - - t.execute_with(|| { - assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); - assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); - }); - - block_executor(b2, &mut t); - - t.execute_with(|| { - assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); - assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); - assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); - }); - } - - #[test] - fn block_import_with_transaction_works_native() { - block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); - } - - #[test] - fn block_import_with_transaction_works_wasm() { - block_import_with_transaction_works(|b, ext| { - let mut ext = ext.ext(); - let runtime_code = RuntimeCode { - code_fetcher: &sp_core::traits::WrappedRuntimeCode(WASM_BINARY.into()), - hash: Vec::new(), - heap_pages: None, - }; - - executor().call:: _>( - &mut ext, - &runtime_code, - "Core_execute_block", - &b.encode(), - false, - None, - ).0.unwrap(); - }) - } + use super::*; + + use crate::{Header, Transfer, WASM_BINARY}; + use sc_executor::{native_executor_instance, NativeExecutor, WasmExecutionMethod}; + use sp_core::{ + map, + traits::{CodeExecutor, RuntimeCode}, + NeverNativeValue, + }; + use sp_io::hashing::twox_128; + use sp_io::TestExternalities; + use substrate_test_runtime_client::{AccountKeyring, Sr25519Keyring}; + + // Declare an instance of the native executor dispatch for the test runtime. + native_executor_instance!(NativeDispatch, crate::api::dispatch, crate::native_version); + + fn executor() -> NativeExecutor { + NativeExecutor::new(WasmExecutionMethod::Interpreted, None, 8) + } + + fn new_test_ext() -> TestExternalities { + let authorities = vec![ + Sr25519Keyring::Alice.to_raw_public(), + Sr25519Keyring::Bob.to_raw_public(), + Sr25519Keyring::Charlie.to_raw_public(), + ]; + TestExternalities::new_with_code( + WASM_BINARY, + sp_core::storage::Storage { + top: map![ + twox_128(b"latest").to_vec() => vec![69u8; 32], + twox_128(b"sys:auth").to_vec() => authorities.encode(), + blake2_256(&AccountKeyring::Alice.to_raw_public().to_keyed_vec(b"balance:")).to_vec() => { + vec![111u8, 0, 0, 0, 0, 0, 0, 0] + } + ], + children: map![], + }, + ) + } + + fn block_import_works(block_executor: F) + where + F: Fn(Block, &mut TestExternalities), + { + let h = Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + }; + let mut b = Block { + header: h, + extrinsics: vec![], + }; + + new_test_ext().execute_with(|| polish_block(&mut b)); + + block_executor(b, &mut new_test_ext()); + } + + #[test] + fn block_import_works_native() { + block_import_works(|b, ext| ext.execute_with(|| execute_block(b))); + } + + #[test] + fn block_import_works_wasm() { + block_import_works(|b, ext| { + let mut ext = ext.ext(); + let runtime_code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(WASM_BINARY.into()), + hash: Vec::new(), + heap_pages: None, + }; + + executor() + .call:: _>( + &mut ext, + &runtime_code, + "Core_execute_block", + &b.encode(), + false, + None, + ) + .0 + .unwrap(); + }) + } + + fn block_import_with_transaction_works(block_executor: F) + where + F: Fn(Block, &mut TestExternalities), + { + let mut b1 = Block { + header: Header { + parent_hash: [69u8; 32].into(), + number: 1, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + }, + extrinsics: vec![Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 69, + nonce: 0, + } + .into_signed_tx()], + }; + + let mut dummy_ext = new_test_ext(); + dummy_ext.execute_with(|| polish_block(&mut b1)); + + let mut b2 = Block { + header: Header { + parent_hash: b1.header.hash(), + number: 2, + state_root: Default::default(), + extrinsics_root: Default::default(), + digest: Default::default(), + }, + extrinsics: vec![ + Transfer { + from: AccountKeyring::Bob.into(), + to: AccountKeyring::Alice.into(), + amount: 27, + nonce: 0, + } + .into_signed_tx(), + Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Charlie.into(), + amount: 69, + nonce: 1, + } + .into_signed_tx(), + ], + }; + + dummy_ext.execute_with(|| polish_block(&mut b2)); + drop(dummy_ext); + + let mut t = new_test_ext(); + + t.execute_with(|| { + assert_eq!(balance_of(AccountKeyring::Alice.into()), 111); + assert_eq!(balance_of(AccountKeyring::Bob.into()), 0); + }); + + block_executor(b1, &mut t); + + t.execute_with(|| { + assert_eq!(balance_of(AccountKeyring::Alice.into()), 42); + assert_eq!(balance_of(AccountKeyring::Bob.into()), 69); + }); + + block_executor(b2, &mut t); + + t.execute_with(|| { + assert_eq!(balance_of(AccountKeyring::Alice.into()), 0); + assert_eq!(balance_of(AccountKeyring::Bob.into()), 42); + assert_eq!(balance_of(AccountKeyring::Charlie.into()), 69); + }); + } + + #[test] + fn block_import_with_transaction_works_native() { + block_import_with_transaction_works(|b, ext| ext.execute_with(|| execute_block(b))); + } + + #[test] + fn block_import_with_transaction_works_wasm() { + block_import_with_transaction_works(|b, ext| { + let mut ext = ext.ext(); + let runtime_code = RuntimeCode { + code_fetcher: &sp_core::traits::WrappedRuntimeCode(WASM_BINARY.into()), + hash: Vec::new(), + heap_pages: None, + }; + + executor() + .call:: _>( + &mut ext, + &runtime_code, + "Core_execute_block", + &b.encode(), + false, + None, + ) + .0 + .unwrap(); + }) + } } diff --git a/test-utils/runtime/transaction-pool/src/lib.rs b/test-utils/runtime/transaction-pool/src/lib.rs index 432c9e520d..cdc025c2c5 100644 --- a/test-utils/runtime/transaction-pool/src/lib.rs +++ b/test-utils/runtime/transaction-pool/src/lib.rs @@ -21,17 +21,17 @@ use codec::Encode; use parking_lot::RwLock; use sp_runtime::{ - generic::{self, BlockId}, - traits::{BlakeTwo256, Hash as HashT}, - transaction_validity::{ - TransactionValidity, ValidTransaction, TransactionValidityError, InvalidTransaction, - TransactionSource, - }, + generic::{self, BlockId}, + traits::{BlakeTwo256, Hash as HashT}, + transaction_validity::{ + InvalidTransaction, TransactionSource, TransactionValidity, TransactionValidityError, + ValidTransaction, + }, }; -use std::collections::{HashSet, HashMap}; +use std::collections::{HashMap, HashSet}; use substrate_test_runtime_client::{ - runtime::{Index, AccountId, Block, BlockNumber, Extrinsic, Hash, Header, Transfer}, - AccountKeyring::{self, *}, + runtime::{AccountId, Block, BlockNumber, Extrinsic, Hash, Header, Index, Transfer}, + AccountKeyring::{self, *}, }; /// Error type used by [`TestApi`]. @@ -39,235 +39,258 @@ use substrate_test_runtime_client::{ pub struct Error(sp_transaction_pool::error::Error); impl sp_transaction_pool::error::IntoPoolError for Error { - fn into_pool_error(self) -> Result { - Ok(self.0) - } + fn into_pool_error(self) -> Result { + Ok(self.0) + } } impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - Some(&self.0) - } + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + Some(&self.0) + } } #[derive(Default)] pub struct ChainState { - pub block_by_number: HashMap>, - pub block_by_hash: HashMap>, - pub header_by_number: HashMap, - pub nonces: HashMap, - pub invalid_hashes: HashSet, + pub block_by_number: HashMap>, + pub block_by_hash: HashMap>, + pub header_by_number: HashMap, + pub nonces: HashMap, + pub invalid_hashes: HashSet, } /// Test Api for transaction pool. pub struct TestApi { - valid_modifier: RwLock>, - chain: RwLock, - validation_requests: RwLock>, + valid_modifier: RwLock>, + chain: RwLock, + validation_requests: RwLock>, } impl TestApi { - /// Test Api with Alice nonce set initially. - pub fn with_alice_nonce(nonce: u64) -> Self { - let api = TestApi { - valid_modifier: RwLock::new(Box::new(|_| {})), - chain: Default::default(), - validation_requests: RwLock::new(Default::default()), - }; - - api.chain.write().nonces.insert(Alice.into(), nonce); - - api - } - - /// Default Test Api - pub fn empty() -> Self { - let api = TestApi { - valid_modifier: RwLock::new(Box::new(|_| {})), - chain: Default::default(), - validation_requests: RwLock::new(Default::default()), - }; - - api - } - - /// Set hook on modify valid result of transaction. - pub fn set_valid_modifier(&self, modifier: Box) { - *self.valid_modifier.write() = modifier; - } - - /// Push block as a part of canonical chain under given number. - pub fn push_block(&self, block_number: BlockNumber, xts: Vec) -> Header { - let mut chain = self.chain.write(); - chain.block_by_number.insert(block_number, xts.clone()); - let header = Header { - number: block_number, - digest: Default::default(), - extrinsics_root: Default::default(), - parent_hash: block_number - .checked_sub(1) - .and_then(|num| { - chain.header_by_number.get(&num) - .cloned().map(|h| h.hash()) - }).unwrap_or_default(), - state_root: Default::default(), - }; - chain.block_by_hash.insert(header.hash(), xts); - chain.header_by_number.insert(block_number, header.clone()); - header - } - - /// Push a block without a number. - /// - /// As a part of non-canonical chain. - pub fn push_fork_block(&self, block_hash: Hash, xts: Vec) { - let mut chain = self.chain.write(); - chain.block_by_hash.insert(block_hash, xts); - } - - pub fn push_fork_block_with_parent(&self, parent: Hash, xts: Vec) -> Header { - let mut chain = self.chain.write(); - let blocknum = chain.block_by_number.keys().max().expect("block_by_number shouldn't be empty"); - let header = Header { - number: *blocknum, - digest: Default::default(), - extrinsics_root: Default::default(), - parent_hash: parent, - state_root: Default::default(), - }; - chain.block_by_hash.insert(header.hash(), xts); - header - } - - fn hash_and_length_inner(ex: &Extrinsic) -> (Hash, usize) { - let encoded = ex.encode(); - (BlakeTwo256::hash(&encoded), encoded.len()) - } - - /// Mark some transaction is invalid. - /// - /// Next time transaction pool will try to validate this - /// extrinsic, api will return invalid result. - pub fn add_invalid(&self, xts: &Extrinsic) { - self.chain.write().invalid_hashes.insert( - Self::hash_and_length_inner(xts).0 - ); - } - - /// Query validation requests received. - pub fn validation_requests(&self) -> Vec { - self.validation_requests.read().clone() - } - - /// get a reference to the chain state - pub fn chain(&self) -> &RwLock { - &self.chain - } - - /// Increment nonce in the inner state. - pub fn increment_nonce(&self, account: AccountId) { - let mut chain = self.chain.write(); - chain.nonces.entry(account).and_modify(|n| *n += 1).or_insert(1); - } + /// Test Api with Alice nonce set initially. + pub fn with_alice_nonce(nonce: u64) -> Self { + let api = TestApi { + valid_modifier: RwLock::new(Box::new(|_| {})), + chain: Default::default(), + validation_requests: RwLock::new(Default::default()), + }; + + api.chain.write().nonces.insert(Alice.into(), nonce); + + api + } + + /// Default Test Api + pub fn empty() -> Self { + let api = TestApi { + valid_modifier: RwLock::new(Box::new(|_| {})), + chain: Default::default(), + validation_requests: RwLock::new(Default::default()), + }; + + api + } + + /// Set hook on modify valid result of transaction. + pub fn set_valid_modifier(&self, modifier: Box) { + *self.valid_modifier.write() = modifier; + } + + /// Push block as a part of canonical chain under given number. + pub fn push_block(&self, block_number: BlockNumber, xts: Vec) -> Header { + let mut chain = self.chain.write(); + chain.block_by_number.insert(block_number, xts.clone()); + let header = Header { + number: block_number, + digest: Default::default(), + extrinsics_root: Default::default(), + parent_hash: block_number + .checked_sub(1) + .and_then(|num| chain.header_by_number.get(&num).cloned().map(|h| h.hash())) + .unwrap_or_default(), + state_root: Default::default(), + }; + chain.block_by_hash.insert(header.hash(), xts); + chain.header_by_number.insert(block_number, header.clone()); + header + } + + /// Push a block without a number. + /// + /// As a part of non-canonical chain. + pub fn push_fork_block(&self, block_hash: Hash, xts: Vec) { + let mut chain = self.chain.write(); + chain.block_by_hash.insert(block_hash, xts); + } + + pub fn push_fork_block_with_parent(&self, parent: Hash, xts: Vec) -> Header { + let mut chain = self.chain.write(); + let blocknum = chain + .block_by_number + .keys() + .max() + .expect("block_by_number shouldn't be empty"); + let header = Header { + number: *blocknum, + digest: Default::default(), + extrinsics_root: Default::default(), + parent_hash: parent, + state_root: Default::default(), + }; + chain.block_by_hash.insert(header.hash(), xts); + header + } + + fn hash_and_length_inner(ex: &Extrinsic) -> (Hash, usize) { + let encoded = ex.encode(); + (BlakeTwo256::hash(&encoded), encoded.len()) + } + + /// Mark some transaction is invalid. + /// + /// Next time transaction pool will try to validate this + /// extrinsic, api will return invalid result. + pub fn add_invalid(&self, xts: &Extrinsic) { + self.chain + .write() + .invalid_hashes + .insert(Self::hash_and_length_inner(xts).0); + } + + /// Query validation requests received. + pub fn validation_requests(&self) -> Vec { + self.validation_requests.read().clone() + } + + /// get a reference to the chain state + pub fn chain(&self) -> &RwLock { + &self.chain + } + + /// Increment nonce in the inner state. + pub fn increment_nonce(&self, account: AccountId) { + let mut chain = self.chain.write(); + chain + .nonces + .entry(account) + .and_modify(|n| *n += 1) + .or_insert(1); + } } impl sc_transaction_graph::ChainApi for TestApi { - type Block = Block; - type Hash = Hash; - type Error = Error; - type ValidationFuture = futures::future::Ready>; - type BodyFuture = futures::future::Ready>, Error>>; - - fn validate_transaction( - &self, - _at: &BlockId, - _source: TransactionSource, - uxt: sc_transaction_graph::ExtrinsicFor, - ) -> Self::ValidationFuture { - self.validation_requests.write().push(uxt.clone()); - - let chain_nonce = self.chain.read().nonces.get(&uxt.transfer().from).cloned().unwrap_or(0); - let requires = if chain_nonce == uxt.transfer().nonce { - vec![] - } else { - vec![vec![chain_nonce as u8]] - }; - let provides = vec![vec![uxt.transfer().nonce as u8]]; - - if self.chain.read().invalid_hashes.contains(&self.hash_and_length(&uxt).0) { - return futures::future::ready(Ok( - Err(TransactionValidityError::Invalid(InvalidTransaction::Custom(0)).into()) - )) - } - - let mut validity = ValidTransaction { - priority: 1, - requires, - provides, - longevity: 64, - propagate: true, - }; - - (self.valid_modifier.read())(&mut validity); - - futures::future::ready(Ok(Ok(validity))) - } - - fn block_id_to_number( - &self, - at: &BlockId, - ) -> Result>, Error> { - Ok(Some(number_of(at))) - } - - fn block_id_to_hash( - &self, - at: &BlockId, - ) -> Result>, Error> { - Ok(match at { - generic::BlockId::Hash(x) => Some(x.clone()), - generic::BlockId::Number(num) => { - self.chain.read() - .header_by_number.get(num) - .map(|h| h.hash()) - .or_else(|| Some(Default::default())) - }, - }) - } - - fn hash_and_length( - &self, - ex: &sc_transaction_graph::ExtrinsicFor, - ) -> (Self::Hash, usize) { - Self::hash_and_length_inner(ex) - } - - fn block_body(&self, id: &BlockId) -> Self::BodyFuture { - futures::future::ready(Ok(match id { - BlockId::Number(num) => self.chain.read().block_by_number.get(num).cloned(), - BlockId::Hash(hash) => self.chain.read().block_by_hash.get(hash).cloned(), - })) - } + type Block = Block; + type Hash = Hash; + type Error = Error; + type ValidationFuture = futures::future::Ready>; + type BodyFuture = futures::future::Ready>, Error>>; + + fn validate_transaction( + &self, + _at: &BlockId, + _source: TransactionSource, + uxt: sc_transaction_graph::ExtrinsicFor, + ) -> Self::ValidationFuture { + self.validation_requests.write().push(uxt.clone()); + + let chain_nonce = self + .chain + .read() + .nonces + .get(&uxt.transfer().from) + .cloned() + .unwrap_or(0); + let requires = if chain_nonce == uxt.transfer().nonce { + vec![] + } else { + vec![vec![chain_nonce as u8]] + }; + let provides = vec![vec![uxt.transfer().nonce as u8]]; + + if self + .chain + .read() + .invalid_hashes + .contains(&self.hash_and_length(&uxt).0) + { + return futures::future::ready(Ok(Err(TransactionValidityError::Invalid( + InvalidTransaction::Custom(0), + ) + .into()))); + } + + let mut validity = ValidTransaction { + priority: 1, + requires, + provides, + longevity: 64, + propagate: true, + }; + + (self.valid_modifier.read())(&mut validity); + + futures::future::ready(Ok(Ok(validity))) + } + + fn block_id_to_number( + &self, + at: &BlockId, + ) -> Result>, Error> { + Ok(Some(number_of(at))) + } + + fn block_id_to_hash( + &self, + at: &BlockId, + ) -> Result>, Error> { + Ok(match at { + generic::BlockId::Hash(x) => Some(x.clone()), + generic::BlockId::Number(num) => self + .chain + .read() + .header_by_number + .get(num) + .map(|h| h.hash()) + .or_else(|| Some(Default::default())), + }) + } + + fn hash_and_length( + &self, + ex: &sc_transaction_graph::ExtrinsicFor, + ) -> (Self::Hash, usize) { + Self::hash_and_length_inner(ex) + } + + fn block_body(&self, id: &BlockId) -> Self::BodyFuture { + futures::future::ready(Ok(match id { + BlockId::Number(num) => self.chain.read().block_by_number.get(num).cloned(), + BlockId::Hash(hash) => self.chain.read().block_by_hash.get(hash).cloned(), + })) + } } fn number_of(at: &BlockId) -> u64 { - match at { - generic::BlockId::Number(n) => *n as u64, - _ => 0, - } + match at { + generic::BlockId::Number(n) => *n as u64, + _ => 0, + } } /// Generate transfer extrinsic with a given nonce. /// /// Part of the test api. pub fn uxt(who: AccountKeyring, nonce: Index) -> Extrinsic { - let transfer = Transfer { - from: who.into(), - to: AccountId::default(), - nonce, - amount: 1, - }; - let signature = transfer.using_encoded(|e| who.sign(e)).into(); - Extrinsic::Transfer { transfer, signature, exhaust_resources_when_not_first: false } + let transfer = Transfer { + from: who.into(), + to: AccountId::default(), + nonce, + amount: 1, + }; + let signature = transfer.using_encoded(|e| who.sign(e)).into(); + Extrinsic::Transfer { + transfer, + signature, + exhaust_resources_when_not_first: false, + } } - diff --git a/test-utils/src/lib.rs b/test-utils/src/lib.rs index c1a18a1fa7..abbdebed7e 100644 --- a/test-utils/src/lib.rs +++ b/test-utils/src/lib.rs @@ -37,18 +37,20 @@ /// ``` #[macro_export] macro_rules! assert_eq_uvec { - ( $x:expr, $y:expr ) => { - $crate::__assert_eq_uvec!($x, $y); - $crate::__assert_eq_uvec!($y, $x); - } + ( $x:expr, $y:expr ) => { + $crate::__assert_eq_uvec!($x, $y); + $crate::__assert_eq_uvec!($y, $x); + }; } #[macro_export] #[doc(hidden)] macro_rules! __assert_eq_uvec { - ( $x:expr, $y:expr ) => { - $x.iter().for_each(|e| { - if !$y.contains(e) { panic!(format!("vectors not equal: {:?} != {:?}", $x, $y)); } - }); - } + ( $x:expr, $y:expr ) => { + $x.iter().for_each(|e| { + if !$y.contains(e) { + panic!(format!("vectors not equal: {:?} != {:?}", $x, $y)); + } + }); + }; } diff --git a/utils/browser/src/lib.rs b/utils/browser/src/lib.rs index 0c5f0f320f..f521f1e26c 100644 --- a/utils/browser/src/lib.rs +++ b/utils/browser/src/lib.rs @@ -14,21 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use futures::{ + channel::{mpsc, oneshot}, + compat::*, + future::{ok, poll_fn}, + prelude::*, +}; use futures01::sync::mpsc as mpsc01; +use libp2p_wasm_ext::{ffi, ExtTransport}; use log::{debug, info}; -use std::sync::Arc; +use sc_chain_spec::Extension; use sc_network::config::TransportConfig; use sc_service::{ - AbstractService, RpcSession, Role, Configuration, - config::{DatabaseConfig, KeystoreConfig, NetworkConfiguration}, - GenericChainSpec, RuntimeGenesis + config::{DatabaseConfig, KeystoreConfig, NetworkConfiguration}, + AbstractService, Configuration, GenericChainSpec, Role, RpcSession, RuntimeGenesis, }; -use wasm_bindgen::prelude::*; -use futures::{prelude::*, channel::{oneshot, mpsc}, future::{poll_fn, ok}, compat::*}; -use std::task::Poll; use std::pin::Pin; -use sc_chain_spec::Extension; -use libp2p_wasm_ext::{ExtTransport, ffi}; +use std::sync::Arc; +use std::task::Poll; +use wasm_bindgen::prelude::*; pub use console_error_panic_hook::set_once as set_console_error_panic_hook; pub use console_log::init_with_level as init_console_log; @@ -36,171 +40,168 @@ pub use console_log::init_with_level as init_console_log; /// Create a service configuration from a chain spec. /// /// This configuration contains good defaults for a browser light client. -pub async fn browser_configuration(chain_spec: GenericChainSpec) - -> Result> +pub async fn browser_configuration( + chain_spec: GenericChainSpec, +) -> Result> where - G: RuntimeGenesis + 'static, - E: Extension + 'static + Send, + G: RuntimeGenesis + 'static, + E: Extension + 'static + Send, { - let name = chain_spec.name().to_string(); - - let transport = ExtTransport::new(ffi::websocket_transport()); - let mut network = NetworkConfiguration::new( - format!("{} (Browser)", name), - "unknown", - Default::default(), - None, - ); - network.boot_nodes = chain_spec.boot_nodes().to_vec(); - network.transport = TransportConfig::Normal { - wasm_external_transport: Some(transport.clone()), - allow_private_ipv4: true, - enable_mdns: false, - use_yamux_flow_control: true, - }; - - let config = Configuration { - network, - telemetry_endpoints: chain_spec.telemetry_endpoints().clone(), - chain_spec: Box::new(chain_spec), - task_executor: Arc::new(move |fut| wasm_bindgen_futures::spawn_local(fut)), - telemetry_external_transport: Some(transport), - role: Role::Light, - database: { - info!("Opening Indexed DB database '{}'...", name); - let db = kvdb_web::Database::open(name, 10).await?; - - DatabaseConfig::Custom(sp_database::as_database(db)) - }, - keystore: KeystoreConfig::InMemory, - default_heap_pages: Default::default(), - dev_key_seed: Default::default(), - disable_grandpa: Default::default(), - execution_strategies: Default::default(), - force_authoring: Default::default(), - impl_name: "parity-substrate", - impl_version: "0.0.0", - offchain_worker: Default::default(), - prometheus_config: Default::default(), - pruning: Default::default(), - rpc_cors: Default::default(), - rpc_http: Default::default(), - rpc_ws: Default::default(), - rpc_ws_max_connections: Default::default(), - state_cache_child_ratio: Default::default(), - state_cache_size: Default::default(), - tracing_receiver: Default::default(), - tracing_targets: Default::default(), - transaction_pool: Default::default(), - wasm_method: Default::default(), - max_runtime_instances: 8, - announce_block: true, - }; - - Ok(config) + let name = chain_spec.name().to_string(); + + let transport = ExtTransport::new(ffi::websocket_transport()); + let mut network = NetworkConfiguration::new( + format!("{} (Browser)", name), + "unknown", + Default::default(), + None, + ); + network.boot_nodes = chain_spec.boot_nodes().to_vec(); + network.transport = TransportConfig::Normal { + wasm_external_transport: Some(transport.clone()), + allow_private_ipv4: true, + enable_mdns: false, + use_yamux_flow_control: true, + }; + + let config = Configuration { + network, + telemetry_endpoints: chain_spec.telemetry_endpoints().clone(), + chain_spec: Box::new(chain_spec), + task_executor: Arc::new(move |fut| wasm_bindgen_futures::spawn_local(fut)), + telemetry_external_transport: Some(transport), + role: Role::Light, + database: { + info!("Opening Indexed DB database '{}'...", name); + let db = kvdb_web::Database::open(name, 10).await?; + + DatabaseConfig::Custom(sp_database::as_database(db)) + }, + keystore: KeystoreConfig::InMemory, + default_heap_pages: Default::default(), + dev_key_seed: Default::default(), + disable_grandpa: Default::default(), + execution_strategies: Default::default(), + force_authoring: Default::default(), + impl_name: "parity-substrate", + impl_version: "0.0.0", + offchain_worker: Default::default(), + prometheus_config: Default::default(), + pruning: Default::default(), + rpc_cors: Default::default(), + rpc_http: Default::default(), + rpc_ws: Default::default(), + rpc_ws_max_connections: Default::default(), + state_cache_child_ratio: Default::default(), + state_cache_size: Default::default(), + tracing_receiver: Default::default(), + tracing_targets: Default::default(), + transaction_pool: Default::default(), + wasm_method: Default::default(), + max_runtime_instances: 8, + announce_block: true, + }; + + Ok(config) } /// A running client. #[wasm_bindgen] pub struct Client { - rpc_send_tx: mpsc::UnboundedSender, + rpc_send_tx: mpsc::UnboundedSender, } struct RpcMessage { - rpc_json: String, - session: RpcSession, - send_back: oneshot::Sender> + Send>>>, + rpc_json: String, + session: RpcSession, + send_back: oneshot::Sender> + Send>>>, } /// Create a Client object that connects to a service. pub fn start_client(mut service: impl AbstractService) -> Client { - // Spawn informant - wasm_bindgen_futures::spawn_local( - sc_informant::build(&service, sc_informant::OutputFormat::Plain).map(drop) - ); - - // We dispatch a background task responsible for processing the service. - // - // The main action performed by the code below consists in polling the service with - // `service.poll()`. - // The rest consists in handling RPC requests. - let (rpc_send_tx, mut rpc_send_rx) = mpsc::unbounded::(); - wasm_bindgen_futures::spawn_local(poll_fn(move |cx| { - loop { - match Pin::new(&mut rpc_send_rx).poll_next(cx) { - Poll::Ready(Some(message)) => { - let fut = service - .rpc_query(&message.session, &message.rpc_json) - .boxed(); - let _ = message.send_back.send(fut); - }, - Poll::Pending => break, - Poll::Ready(None) => return Poll::Ready(()), - } - } - - Pin::new(&mut service) - .poll(cx) - .map(drop) - })); - - Client { - rpc_send_tx, - } + // Spawn informant + wasm_bindgen_futures::spawn_local( + sc_informant::build(&service, sc_informant::OutputFormat::Plain).map(drop), + ); + + // We dispatch a background task responsible for processing the service. + // + // The main action performed by the code below consists in polling the service with + // `service.poll()`. + // The rest consists in handling RPC requests. + let (rpc_send_tx, mut rpc_send_rx) = mpsc::unbounded::(); + wasm_bindgen_futures::spawn_local(poll_fn(move |cx| { + loop { + match Pin::new(&mut rpc_send_rx).poll_next(cx) { + Poll::Ready(Some(message)) => { + let fut = service + .rpc_query(&message.session, &message.rpc_json) + .boxed(); + let _ = message.send_back.send(fut); + } + Poll::Pending => break, + Poll::Ready(None) => return Poll::Ready(()), + } + } + + Pin::new(&mut service).poll(cx).map(drop) + })); + + Client { rpc_send_tx } } #[wasm_bindgen] impl Client { - /// Allows starting an RPC request. Returns a `Promise` containing the result of that request. - #[wasm_bindgen(js_name = "rpcSend")] - pub fn rpc_send(&mut self, rpc: &str) -> js_sys::Promise { - let rpc_session = RpcSession::new(mpsc01::channel(1).0); - let (tx, rx) = oneshot::channel(); - let _ = self.rpc_send_tx.unbounded_send(RpcMessage { - rpc_json: rpc.to_owned(), - session: rpc_session, - send_back: tx, - }); - wasm_bindgen_futures::future_to_promise(async { - match rx.await { - Ok(fut) => { - fut.await - .map(|s| JsValue::from_str(&s)) - .ok_or_else(|| JsValue::NULL) - }, - Err(_) => Err(JsValue::NULL) - } - }) - } - - /// Subscribes to an RPC pubsub endpoint. - #[wasm_bindgen(js_name = "rpcSubscribe")] - pub fn rpc_subscribe(&mut self, rpc: &str, callback: js_sys::Function) { - let (tx, rx) = mpsc01::channel(4); - let rpc_session = RpcSession::new(tx); - let (fut_tx, fut_rx) = oneshot::channel(); - let _ = self.rpc_send_tx.unbounded_send(RpcMessage { - rpc_json: rpc.to_owned(), - session: rpc_session.clone(), - send_back: fut_tx, - }); - wasm_bindgen_futures::spawn_local(async { - if let Ok(fut) = fut_rx.await { - fut.await; - } - }); - - wasm_bindgen_futures::spawn_local(async move { - let _ = rx.compat() - .try_for_each(|s| { - let _ = callback.call1(&callback, &JsValue::from_str(&s)); - ok(()) - }) - .await; - - // We need to keep `rpc_session` alive. - debug!("RPC subscription has ended"); - drop(rpc_session); - }); - } + /// Allows starting an RPC request. Returns a `Promise` containing the result of that request. + #[wasm_bindgen(js_name = "rpcSend")] + pub fn rpc_send(&mut self, rpc: &str) -> js_sys::Promise { + let rpc_session = RpcSession::new(mpsc01::channel(1).0); + let (tx, rx) = oneshot::channel(); + let _ = self.rpc_send_tx.unbounded_send(RpcMessage { + rpc_json: rpc.to_owned(), + session: rpc_session, + send_back: tx, + }); + wasm_bindgen_futures::future_to_promise(async { + match rx.await { + Ok(fut) => fut + .await + .map(|s| JsValue::from_str(&s)) + .ok_or_else(|| JsValue::NULL), + Err(_) => Err(JsValue::NULL), + } + }) + } + + /// Subscribes to an RPC pubsub endpoint. + #[wasm_bindgen(js_name = "rpcSubscribe")] + pub fn rpc_subscribe(&mut self, rpc: &str, callback: js_sys::Function) { + let (tx, rx) = mpsc01::channel(4); + let rpc_session = RpcSession::new(tx); + let (fut_tx, fut_rx) = oneshot::channel(); + let _ = self.rpc_send_tx.unbounded_send(RpcMessage { + rpc_json: rpc.to_owned(), + session: rpc_session.clone(), + send_back: fut_tx, + }); + wasm_bindgen_futures::spawn_local(async { + if let Ok(fut) = fut_rx.await { + fut.await; + } + }); + + wasm_bindgen_futures::spawn_local(async move { + let _ = rx + .compat() + .try_for_each(|s| { + let _ = callback.call1(&callback, &JsValue::from_str(&s)); + ok(()) + }) + .await; + + // We need to keep `rpc_session` alive. + debug!("RPC subscription has ended"); + drop(rpc_session); + }); + } } diff --git a/utils/build-script-utils/src/git.rs b/utils/build-script-utils/src/git.rs index 10f5446cb4..fb81e0cf21 100644 --- a/utils/build-script-utils/src/git.rs +++ b/utils/build-script-utils/src/git.rs @@ -22,103 +22,103 @@ use std::{env, fs, fs::File, io, io::Read, path::PathBuf}; /// The file is searched from the `CARGO_MANIFEST_DIR` upwards. If the file can not be found, /// a warning is generated. pub fn rerun_if_git_head_changed() { - let mut manifest_dir = PathBuf::from( - env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` is always set by cargo."), - ); - let manifest_dir_copy = manifest_dir.clone(); - - while manifest_dir.parent().is_some() { - match get_git_paths(&manifest_dir) { - Err(err) => { - eprintln!("cargo:warning=Unable to read the Git repository: {}", err); - - return; - } - Ok(None) => {} - Ok(Some(paths)) => { - for p in paths { - println!("cargo:rerun-if-changed={}", p.display()); - } - - return; - } - } - - manifest_dir.pop(); - } - - println!( - "cargo:warning=Could not find `.git/HEAD` searching from `{}` upwards!", - manifest_dir_copy.display(), - ); + let mut manifest_dir = PathBuf::from( + env::var("CARGO_MANIFEST_DIR").expect("`CARGO_MANIFEST_DIR` is always set by cargo."), + ); + let manifest_dir_copy = manifest_dir.clone(); + + while manifest_dir.parent().is_some() { + match get_git_paths(&manifest_dir) { + Err(err) => { + eprintln!("cargo:warning=Unable to read the Git repository: {}", err); + + return; + } + Ok(None) => {} + Ok(Some(paths)) => { + for p in paths { + println!("cargo:rerun-if-changed={}", p.display()); + } + + return; + } + } + + manifest_dir.pop(); + } + + println!( + "cargo:warning=Could not find `.git/HEAD` searching from `{}` upwards!", + manifest_dir_copy.display(), + ); } // Code taken from https://github.com/rustyhorde/vergen/blob/8d522db8c8e16e26c0fc9ea8e6b0247cbf5cca84/src/output/envvar.rs fn get_git_paths(path: &PathBuf) -> Result>, io::Error> { - let git_dir_or_file = path.join(".git"); - - if let Ok(metadata) = fs::metadata(&git_dir_or_file) { - if metadata.is_dir() { - // Echo the HEAD path - let git_head_path = git_dir_or_file.join("HEAD"); - - // Determine where HEAD points and echo that path also. - let mut f = File::open(&git_head_path)?; - let mut git_head_contents = String::new(); - let _ = f.read_to_string(&mut git_head_contents)?; - let ref_vec: Vec<&str> = git_head_contents.split(": ").collect(); - - if ref_vec.len() == 2 { - let current_head_file = ref_vec[1]; - let git_refs_path = git_dir_or_file.join(current_head_file); - - Ok(Some(vec![git_head_path, git_refs_path])) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "You are most likely in a detached HEAD state", - )) - } - } else if metadata.is_file() { - // We are in a worktree, so find out where the actual worktrees//HEAD file is. - let mut git_file = File::open(&git_dir_or_file)?; - let mut git_contents = String::new(); - let _ = git_file.read_to_string(&mut git_contents)?; - let dir_vec: Vec<&str> = git_contents.split(": ").collect(); - let git_path = dir_vec[1].trim(); - - // Echo the HEAD psth - let git_head_path = PathBuf::from(git_path).join("HEAD"); - - // Find out what the full path to the .git dir is. - let mut actual_git_dir = PathBuf::from(git_path); - actual_git_dir.pop(); - actual_git_dir.pop(); - - // Determine where HEAD points and echo that path also. - let mut f = File::open(&git_head_path)?; - let mut git_head_contents = String::new(); - let _ = f.read_to_string(&mut git_head_contents)?; - let ref_vec: Vec<&str> = git_head_contents.split(": ").collect(); - - if ref_vec.len() == 2 { - let current_head_file = ref_vec[1]; - let git_refs_path = actual_git_dir.join(current_head_file); - - Ok(Some(vec![git_head_path, git_refs_path])) - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "You are most likely in a detached HEAD state", - )) - } - } else { - Err(io::Error::new( - io::ErrorKind::Other, - "Invalid .git format (Not a directory or a file)", - )) - } - } else { - Ok(None) - } + let git_dir_or_file = path.join(".git"); + + if let Ok(metadata) = fs::metadata(&git_dir_or_file) { + if metadata.is_dir() { + // Echo the HEAD path + let git_head_path = git_dir_or_file.join("HEAD"); + + // Determine where HEAD points and echo that path also. + let mut f = File::open(&git_head_path)?; + let mut git_head_contents = String::new(); + let _ = f.read_to_string(&mut git_head_contents)?; + let ref_vec: Vec<&str> = git_head_contents.split(": ").collect(); + + if ref_vec.len() == 2 { + let current_head_file = ref_vec[1]; + let git_refs_path = git_dir_or_file.join(current_head_file); + + Ok(Some(vec![git_head_path, git_refs_path])) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "You are most likely in a detached HEAD state", + )) + } + } else if metadata.is_file() { + // We are in a worktree, so find out where the actual worktrees//HEAD file is. + let mut git_file = File::open(&git_dir_or_file)?; + let mut git_contents = String::new(); + let _ = git_file.read_to_string(&mut git_contents)?; + let dir_vec: Vec<&str> = git_contents.split(": ").collect(); + let git_path = dir_vec[1].trim(); + + // Echo the HEAD psth + let git_head_path = PathBuf::from(git_path).join("HEAD"); + + // Find out what the full path to the .git dir is. + let mut actual_git_dir = PathBuf::from(git_path); + actual_git_dir.pop(); + actual_git_dir.pop(); + + // Determine where HEAD points and echo that path also. + let mut f = File::open(&git_head_path)?; + let mut git_head_contents = String::new(); + let _ = f.read_to_string(&mut git_head_contents)?; + let ref_vec: Vec<&str> = git_head_contents.split(": ").collect(); + + if ref_vec.len() == 2 { + let current_head_file = ref_vec[1]; + let git_refs_path = actual_git_dir.join(current_head_file); + + Ok(Some(vec![git_head_path, git_refs_path])) + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "You are most likely in a detached HEAD state", + )) + } + } else { + Err(io::Error::new( + io::ErrorKind::Other, + "Invalid .git format (Not a directory or a file)", + )) + } + } else { + Ok(None) + } } diff --git a/utils/build-script-utils/src/lib.rs b/utils/build-script-utils/src/lib.rs index 57a1e7c5cd..4d8a38c013 100644 --- a/utils/build-script-utils/src/lib.rs +++ b/utils/build-script-utils/src/lib.rs @@ -16,8 +16,8 @@ //! Crate with utility functions for `build.rs` scripts. -mod version; mod git; +mod version; pub use git::*; pub use version::*; diff --git a/utils/build-script-utils/src/version.rs b/utils/build-script-utils/src/version.rs index 01a97c6f38..5b4fcd14b7 100644 --- a/utils/build-script-utils/src/version.rs +++ b/utils/build-script-utils/src/version.rs @@ -19,48 +19,51 @@ use std::{borrow::Cow, process::Command}; /// Generate the `cargo:` key output pub fn generate_cargo_keys() { - let output = Command::new("git") - .args(&["rev-parse", "--short", "HEAD"]) - .output(); + let output = Command::new("git") + .args(&["rev-parse", "--short", "HEAD"]) + .output(); - let commit = match output { - Ok(o) if o.status.success() => { - let sha = String::from_utf8_lossy(&o.stdout).trim().to_owned(); - Cow::from(sha) - } - Ok(o) => { - println!("cargo:warning=Git command failed with status: {}", o.status); - Cow::from("unknown-commit") - }, - Err(err) => { - println!("cargo:warning=Failed to execute git command: {}", err); - Cow::from("unknown-commit") - }, - }; + let commit = match output { + Ok(o) if o.status.success() => { + let sha = String::from_utf8_lossy(&o.stdout).trim().to_owned(); + Cow::from(sha) + } + Ok(o) => { + println!("cargo:warning=Git command failed with status: {}", o.status); + Cow::from("unknown-commit") + } + Err(err) => { + println!("cargo:warning=Failed to execute git command: {}", err); + Cow::from("unknown-commit") + } + }; - println!("cargo:rustc-env=SUBSTRATE_CLI_IMPL_VERSION={}", get_version(&commit)) + println!( + "cargo:rustc-env=SUBSTRATE_CLI_IMPL_VERSION={}", + get_version(&commit) + ) } fn get_platform() -> String { - let env_dash = if TARGET_ENV.is_some() { "-" } else { "" }; + let env_dash = if TARGET_ENV.is_some() { "-" } else { "" }; - format!( - "{}-{}{}{}", - TARGET_ARCH.as_str(), - TARGET_OS.as_str(), - env_dash, - TARGET_ENV.map(|x| x.as_str()).unwrap_or(""), - ) + format!( + "{}-{}{}{}", + TARGET_ARCH.as_str(), + TARGET_OS.as_str(), + env_dash, + TARGET_ENV.map(|x| x.as_str()).unwrap_or(""), + ) } fn get_version(impl_commit: &str) -> String { - let commit_dash = if impl_commit.is_empty() { "" } else { "-" }; + let commit_dash = if impl_commit.is_empty() { "" } else { "-" }; - format!( - "{}{}{}-{}", - std::env::var("CARGO_PKG_VERSION").unwrap_or_default(), - commit_dash, - impl_commit, - get_platform(), - ) + format!( + "{}{}{}-{}", + std::env::var("CARGO_PKG_VERSION").unwrap_or_default(), + commit_dash, + impl_commit, + get_platform(), + ) } diff --git a/utils/fork-tree/src/lib.rs b/utils/fork-tree/src/lib.rs index 1e7b48fed0..cefee5a314 100644 --- a/utils/fork-tree/src/lib.rs +++ b/utils/fork-tree/src/lib.rs @@ -19,54 +19,54 @@ #![warn(missing_docs)] +use codec::{Decode, Encode}; use std::cmp::Reverse; use std::fmt; -use codec::{Decode, Encode}; /// Error occurred when iterating with the tree. #[derive(Clone, Debug, PartialEq)] pub enum Error { - /// Adding duplicate node to tree. - Duplicate, - /// Finalizing descendent of tree node without finalizing ancestor(s). - UnfinalizedAncestor, - /// Imported or finalized node that is an ancestor of previously finalized node. - Revert, - /// Error throw by client when checking for node ancestry. - Client(E), + /// Adding duplicate node to tree. + Duplicate, + /// Finalizing descendent of tree node without finalizing ancestor(s). + UnfinalizedAncestor, + /// Imported or finalized node that is an ancestor of previously finalized node. + Revert, + /// Error throw by client when checking for node ancestry. + Client(E), } impl fmt::Display for Error { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - let message = match *self { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + let message = match *self { Error::Duplicate => "Hash already exists in Tree".into(), Error::UnfinalizedAncestor => "Finalized descendent of Tree node without finalizing its ancestor(s) first".into(), Error::Revert => "Tried to import or finalize node that is an ancestor of a previously finalized node".into(), Error::Client(ref err) => format!("Client error: {}", err), }; - write!(f, "{}", message) - } + write!(f, "{}", message) + } } impl std::error::Error for Error { - fn cause(&self) -> Option<&dyn std::error::Error> { - None - } + fn cause(&self) -> Option<&dyn std::error::Error> { + None + } } impl From for Error { - fn from(err: E) -> Error { - Error::Client(err) - } + fn from(err: E) -> Error { + Error::Client(err) + } } /// Result of finalizing a node (that could be a part of the tree or not). #[derive(Debug, PartialEq)] pub enum FinalizationResult { - /// The tree has changed, optionally return the value associated with the finalized node. - Changed(Option), - /// The tree has not changed. - Unchanged, + /// The tree has changed, optionally return the value associated with the finalized node. + Changed(Option), + /// The tree has not changed. + Unchanged, } /// A tree data structure that stores several nodes across multiple branches. @@ -78,1173 +78,1205 @@ pub enum FinalizationResult { /// when interacting with the tree to establish a node's ancestry. #[derive(Clone, Debug, Decode, Encode, PartialEq)] pub struct ForkTree { - roots: Vec>, - best_finalized_number: Option, + roots: Vec>, + best_finalized_number: Option, } -impl ForkTree where - H: PartialEq + Clone, - N: Ord + Clone, - V: Clone, +impl ForkTree +where + H: PartialEq + Clone, + N: Ord + Clone, + V: Clone, { - /// Prune the tree, removing all non-canonical nodes. We find the node in the - /// tree that is the deepest ancestor of the given hash and that passes the - /// given predicate. If such a node exists, we re-root the tree to this - /// node. Otherwise the tree remains unchanged. The given function - /// `is_descendent_of` should return `true` if the second hash (target) is a - /// descendent of the first hash (base). - /// - /// Returns all pruned node data. - pub fn prune( - &mut self, - hash: &H, - number: &N, - is_descendent_of: &F, - predicate: &P, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - let new_root_index = self.find_node_index_where( - hash, - number, - is_descendent_of, - predicate, - )?; - - let removed = if let Some(mut root_index) = new_root_index { - let mut old_roots = std::mem::replace(&mut self.roots, Vec::new()); - - let mut root = None; - let mut cur_children = Some(&mut old_roots); - - while let Some(cur_index) = root_index.pop() { - if let Some(children) = cur_children.take() { - if root_index.is_empty() { - root = Some(children.remove(cur_index)); - } else { - cur_children = Some(&mut children[cur_index].children); - } - } - } - - let mut root = root - .expect("find_node_index_where will return array with at least one index; \ - this results in at least one item in removed; qed"); - - let mut removed = old_roots; - - // we found the deepest ancestor of the finalized block, so we prune - // out any children that don't include the finalized block. - let root_children = std::mem::replace(&mut root.children, Vec::new()); - let mut is_first = true; - - for child in root_children { - if is_first && - (child.number == *number && child.hash == *hash || - child.number < *number && is_descendent_of(&child.hash, hash).unwrap_or(false)) - { - root.children.push(child); - // assuming that the tree is well formed only one child should pass this requirement - // due to ancestry restrictions (i.e. they must be different forks). - is_first = false; - } else { - removed.push(child); - } - } - - self.roots = vec![root]; - - removed - } else { - Vec::new() - }; - - self.rebalance(); - - Ok(RemovedIterator { stack: removed }) - } + /// Prune the tree, removing all non-canonical nodes. We find the node in the + /// tree that is the deepest ancestor of the given hash and that passes the + /// given predicate. If such a node exists, we re-root the tree to this + /// node. Otherwise the tree remains unchanged. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is a + /// descendent of the first hash (base). + /// + /// Returns all pruned node data. + pub fn prune( + &mut self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + let new_root_index = + self.find_node_index_where(hash, number, is_descendent_of, predicate)?; + + let removed = if let Some(mut root_index) = new_root_index { + let mut old_roots = std::mem::replace(&mut self.roots, Vec::new()); + + let mut root = None; + let mut cur_children = Some(&mut old_roots); + + while let Some(cur_index) = root_index.pop() { + if let Some(children) = cur_children.take() { + if root_index.is_empty() { + root = Some(children.remove(cur_index)); + } else { + cur_children = Some(&mut children[cur_index].children); + } + } + } + + let mut root = root.expect( + "find_node_index_where will return array with at least one index; \ + this results in at least one item in removed; qed", + ); + + let mut removed = old_roots; + + // we found the deepest ancestor of the finalized block, so we prune + // out any children that don't include the finalized block. + let root_children = std::mem::replace(&mut root.children, Vec::new()); + let mut is_first = true; + + for child in root_children { + if is_first + && (child.number == *number && child.hash == *hash + || child.number < *number + && is_descendent_of(&child.hash, hash).unwrap_or(false)) + { + root.children.push(child); + // assuming that the tree is well formed only one child should pass this requirement + // due to ancestry restrictions (i.e. they must be different forks). + is_first = false; + } else { + removed.push(child); + } + } + + self.roots = vec![root]; + + removed + } else { + Vec::new() + }; + + self.rebalance(); + + Ok(RemovedIterator { stack: removed }) + } } -impl ForkTree where - H: PartialEq, - N: Ord, +impl ForkTree +where + H: PartialEq, + N: Ord, { - /// Create a new empty tree. - pub fn new() -> ForkTree { - ForkTree { - roots: Vec::new(), - best_finalized_number: None, - } - } - - /// Rebalance the tree, i.e. sort child nodes by max branch depth - /// (decreasing). - /// - /// Most operations in the tree are performed with depth-first search - /// starting from the leftmost node at every level, since this tree is meant - /// to be used in a blockchain context, a good heuristic is that the node - /// we'll be looking - /// for at any point will likely be in one of the deepest chains (i.e. the - /// longest ones). - pub fn rebalance(&mut self) { - self.roots.sort_by_key(|n| Reverse(n.max_depth())); - for root in &mut self.roots { - root.rebalance(); - } - } - - /// Import a new node into the tree. The given function `is_descendent_of` - /// should return `true` if the second hash (target) is a descendent of the - /// first hash (base). This method assumes that nodes in the same branch are - /// imported in order. - /// - /// Returns `true` if the imported node is a root. - pub fn import( - &mut self, - mut hash: H, - mut number: N, - mut data: V, - is_descendent_of: &F, - ) -> Result> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - for root in self.roots.iter_mut() { - if root.hash == hash { - return Err(Error::Duplicate); - } - - match root.import(hash, number, data, is_descendent_of)? { - Some((h, n, d)) => { - hash = h; - number = n; - data = d; - }, - None => return Ok(false), - } - } - - self.roots.push(Node { - data, - hash: hash, - number: number, - children: Vec::new(), - }); - - self.rebalance(); - - Ok(true) - } - - /// Iterates over the existing roots in the tree. - pub fn roots(&self) -> impl Iterator { - self.roots.iter().map(|node| (&node.hash, &node.number, &node.data)) - } - - fn node_iter(&self) -> impl Iterator> { - ForkTreeIterator { stack: self.roots.iter().collect() } - } - - /// Iterates the nodes in the tree in pre-order. - pub fn iter(&self) -> impl Iterator { - self.node_iter().map(|node| (&node.hash, &node.number, &node.data)) - } - - /// Find a node in the tree that is the deepest ancestor of the given - /// block hash and which passes the given predicate. The given function - /// `is_descendent_of` should return `true` if the second hash (target) - /// is a descendent of the first hash (base). - pub fn find_node_where( - &self, - hash: &H, - number: &N, - is_descendent_of: &F, - predicate: &P, - ) -> Result>, Error> where - E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - // search for node starting from all roots - for root in self.roots.iter() { - let node = root.find_node_where(hash, number, is_descendent_of, predicate)?; - - // found the node, early exit - if let FindOutcome::Found(node) = node { - return Ok(Some(node)); - } - } - - Ok(None) - } - - /// Map fork tree into values of new types. - pub fn map( - self, - f: &mut F, - ) -> ForkTree where - F: FnMut(&H, &N, V) -> VT, - { - let roots = self.roots - .into_iter() - .map(|root| { - root.map(f) - }) - .collect(); - - ForkTree { - roots, - best_finalized_number: self.best_finalized_number, - } - } - - /// Same as [`find_node_where`](Self::find_node_where), but returns mutable reference. - pub fn find_node_where_mut( - &mut self, - hash: &H, - number: &N, - is_descendent_of: &F, - predicate: &P, - ) -> Result>, Error> where - E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - // search for node starting from all roots - for root in self.roots.iter_mut() { - let node = root.find_node_where_mut(hash, number, is_descendent_of, predicate)?; - - // found the node, early exit - if let FindOutcome::Found(node) = node { - return Ok(Some(node)); - } - } - - Ok(None) - } - - /// Same as [`find_node_where`](Self::find_node_where), but returns indexes. - pub fn find_node_index_where( - &self, - hash: &H, - number: &N, - is_descendent_of: &F, - predicate: &P, - ) -> Result>, Error> where - E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - // search for node starting from all roots - for (index, root) in self.roots.iter().enumerate() { - let node = root.find_node_index_where(hash, number, is_descendent_of, predicate)?; - - // found the node, early exit - if let FindOutcome::Found(mut node) = node { - node.push(index); - return Ok(Some(node)); - } - } - - Ok(None) - } - - /// Finalize a root in the tree and return it, return `None` in case no root - /// with the given hash exists. All other roots are pruned, and the children - /// of the finalized node become the new roots. - pub fn finalize_root(&mut self, hash: &H) -> Option { - self.roots.iter().position(|node| node.hash == *hash) - .map(|position| self.finalize_root_at(position)) - } - - /// Finalize root at given position. See `finalize_root` comment for details. - fn finalize_root_at(&mut self, position: usize) -> V { - let node = self.roots.swap_remove(position); - self.roots = node.children; - self.best_finalized_number = Some(node.number); - return node.data; - } - - /// Finalize a node in the tree. This method will make sure that the node - /// being finalized is either an existing root (and return its data), or a - /// node from a competing branch (not in the tree), tree pruning is done - /// accordingly. The given function `is_descendent_of` should return `true` - /// if the second hash (target) is a descendent of the first hash (base). - pub fn finalize( - &mut self, - hash: &H, - number: N, - is_descendent_of: &F, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - // check if one of the current roots is being finalized - if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); - } - - // make sure we're not finalizing a descendent of any root - for root in self.roots.iter() { - if number > root.number && is_descendent_of(&root.hash, hash)? { - return Err(Error::UnfinalizedAncestor); - } - } - - // we finalized a block earlier than any existing root (or possibly - // another fork not part of the tree). make sure to only keep roots that - // are part of the finalized branch - let mut changed = false; - self.roots.retain(|root| { - let retain = root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); - - if !retain { - changed = true; - } - - retain - }); - - self.best_finalized_number = Some(number); - - if changed { - Ok(FinalizationResult::Changed(None)) - } else { - Ok(FinalizationResult::Unchanged) - } - } - - /// Finalize a node in the tree and all its ancestors. The given function - /// `is_descendent_of` should return `true` if the second hash (target) is - // a descendent of the first hash (base). - pub fn finalize_with_ancestors( - &mut self, - hash: &H, - number: N, - is_descendent_of: &F, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - // check if one of the current roots is being finalized - if let Some(root) = self.finalize_root(hash) { - return Ok(FinalizationResult::Changed(Some(root))); - } - - // we need to: - // 1) remove all roots that are not ancestors AND not descendants of finalized block; - // 2) if node is descendant - just leave it; - // 3) if node is ancestor - 'open it' - let mut changed = false; - let mut idx = 0; - while idx != self.roots.len() { - let (is_finalized, is_descendant, is_ancestor) = { - let root = &self.roots[idx]; - let is_finalized = root.hash == *hash; - let is_descendant = !is_finalized - && root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); - let is_ancestor = !is_finalized && !is_descendant - && root.number < number && is_descendent_of(&root.hash, hash).unwrap_or(false); - (is_finalized, is_descendant, is_ancestor) - }; - - // if we have met finalized root - open it and return - if is_finalized { - return Ok(FinalizationResult::Changed(Some(self.finalize_root_at(idx)))); - } - - // if node is descendant of finalized block - just leave it as is - if is_descendant { - idx += 1; - continue; - } - - // if node is ancestor of finalized block - remove it and continue with children - if is_ancestor { - let root = self.roots.swap_remove(idx); - self.roots.extend(root.children); - changed = true; - continue; - } - - // if node is neither ancestor, nor descendant of the finalized block - remove it - self.roots.swap_remove(idx); - changed = true; - } - - self.best_finalized_number = Some(number); - - if changed { - Ok(FinalizationResult::Changed(None)) - } else { - Ok(FinalizationResult::Unchanged) - } - } - - /// Checks if any node in the tree is finalized by either finalizing the - /// node itself or a child node that's not in the tree, guaranteeing that - /// the node being finalized isn't a descendent of any of the node's - /// children. Returns `Some(true)` if the node being finalized is a root, - /// `Some(false)` if the node being finalized is not a root, and `None` if - /// no node in the tree is finalized. The given `predicate` is checked on - /// the prospective finalized root and must pass for finalization to occur. - /// The given function `is_descendent_of` should return `true` if the second - /// hash (target) is a descendent of the first hash (base). - pub fn finalizes_any_with_descendent_if( - &self, - hash: &H, - number: N, - is_descendent_of: &F, - predicate: P, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - // check if the given hash is equal or a descendent of any node in the - // tree, if we find a valid node that passes the predicate then we must - // ensure that we're not finalizing past any of its child nodes. - for node in self.node_iter() { - if predicate(&node.data) { - if node.hash == *hash || is_descendent_of(&node.hash, hash)? { - for node in node.children.iter() { - if node.number <= number && is_descendent_of(&node.hash, &hash)? { - return Err(Error::UnfinalizedAncestor); - } - } - - return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))); - } - } - } - - Ok(None) - } - - /// Finalize a root in the tree by either finalizing the node itself or a - /// child node that's not in the tree, guaranteeing that the node being - /// finalized isn't a descendent of any of the root's children. The given - /// `predicate` is checked on the prospective finalized root and must pass for - /// finalization to occur. The given function `is_descendent_of` should - /// return `true` if the second hash (target) is a descendent of the first - /// hash (base). - pub fn finalize_with_descendent_if( - &mut self, - hash: &H, - number: N, - is_descendent_of: &F, - predicate: P, - ) -> Result, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - if let Some(ref best_finalized_number) = self.best_finalized_number { - if number <= *best_finalized_number { - return Err(Error::Revert); - } - } - - // check if the given hash is equal or a a descendent of any root, if we - // find a valid root that passes the predicate then we must ensure that - // we're not finalizing past any children node. - let mut position = None; - for (i, root) in self.roots.iter().enumerate() { - if predicate(&root.data) { - if root.hash == *hash || is_descendent_of(&root.hash, hash)? { - for node in root.children.iter() { - if node.number <= number && is_descendent_of(&node.hash, &hash)? { - return Err(Error::UnfinalizedAncestor); - } - } - - position = Some(i); - break; - } - } - } - - let node_data = position.map(|i| { - let node = self.roots.swap_remove(i); - self.roots = node.children; - self.best_finalized_number = Some(node.number); - node.data - }); - - // if the block being finalized is earlier than a given root, then it - // must be its ancestor, otherwise we can prune the root. if there's a - // root at the same height then the hashes must match. otherwise the - // node being finalized is higher than the root so it must be its - // descendent (in this case the node wasn't finalized earlier presumably - // because the predicate didn't pass). - let mut changed = false; - self.roots.retain(|root| { - let retain = - root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false) || - root.number == number && root.hash == *hash || - is_descendent_of(&root.hash, hash).unwrap_or(false); - - if !retain { - changed = true; - } - - retain - }); - - self.best_finalized_number = Some(number); - - match (node_data, changed) { - (Some(data), _) => Ok(FinalizationResult::Changed(Some(data))), - (None, true) => Ok(FinalizationResult::Changed(None)), - (None, false) => Ok(FinalizationResult::Unchanged), - } - } + /// Create a new empty tree. + pub fn new() -> ForkTree { + ForkTree { + roots: Vec::new(), + best_finalized_number: None, + } + } + + /// Rebalance the tree, i.e. sort child nodes by max branch depth + /// (decreasing). + /// + /// Most operations in the tree are performed with depth-first search + /// starting from the leftmost node at every level, since this tree is meant + /// to be used in a blockchain context, a good heuristic is that the node + /// we'll be looking + /// for at any point will likely be in one of the deepest chains (i.e. the + /// longest ones). + pub fn rebalance(&mut self) { + self.roots.sort_by_key(|n| Reverse(n.max_depth())); + for root in &mut self.roots { + root.rebalance(); + } + } + + /// Import a new node into the tree. The given function `is_descendent_of` + /// should return `true` if the second hash (target) is a descendent of the + /// first hash (base). This method assumes that nodes in the same branch are + /// imported in order. + /// + /// Returns `true` if the imported node is a root. + pub fn import( + &mut self, + mut hash: H, + mut number: N, + mut data: V, + is_descendent_of: &F, + ) -> Result> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + for root in self.roots.iter_mut() { + if root.hash == hash { + return Err(Error::Duplicate); + } + + match root.import(hash, number, data, is_descendent_of)? { + Some((h, n, d)) => { + hash = h; + number = n; + data = d; + } + None => return Ok(false), + } + } + + self.roots.push(Node { + data, + hash: hash, + number: number, + children: Vec::new(), + }); + + self.rebalance(); + + Ok(true) + } + + /// Iterates over the existing roots in the tree. + pub fn roots(&self) -> impl Iterator { + self.roots + .iter() + .map(|node| (&node.hash, &node.number, &node.data)) + } + + fn node_iter(&self) -> impl Iterator> { + ForkTreeIterator { + stack: self.roots.iter().collect(), + } + } + + /// Iterates the nodes in the tree in pre-order. + pub fn iter(&self) -> impl Iterator { + self.node_iter() + .map(|node| (&node.hash, &node.number, &node.data)) + } + + /// Find a node in the tree that is the deepest ancestor of the given + /// block hash and which passes the given predicate. The given function + /// `is_descendent_of` should return `true` if the second hash (target) + /// is a descendent of the first hash (base). + pub fn find_node_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + // search for node starting from all roots + for root in self.roots.iter() { + let node = root.find_node_where(hash, number, is_descendent_of, predicate)?; + + // found the node, early exit + if let FindOutcome::Found(node) = node { + return Ok(Some(node)); + } + } + + Ok(None) + } + + /// Map fork tree into values of new types. + pub fn map(self, f: &mut F) -> ForkTree + where + F: FnMut(&H, &N, V) -> VT, + { + let roots = self.roots.into_iter().map(|root| root.map(f)).collect(); + + ForkTree { + roots, + best_finalized_number: self.best_finalized_number, + } + } + + /// Same as [`find_node_where`](Self::find_node_where), but returns mutable reference. + pub fn find_node_where_mut( + &mut self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + // search for node starting from all roots + for root in self.roots.iter_mut() { + let node = root.find_node_where_mut(hash, number, is_descendent_of, predicate)?; + + // found the node, early exit + if let FindOutcome::Found(node) = node { + return Ok(Some(node)); + } + } + + Ok(None) + } + + /// Same as [`find_node_where`](Self::find_node_where), but returns indexes. + pub fn find_node_index_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + // search for node starting from all roots + for (index, root) in self.roots.iter().enumerate() { + let node = root.find_node_index_where(hash, number, is_descendent_of, predicate)?; + + // found the node, early exit + if let FindOutcome::Found(mut node) = node { + node.push(index); + return Ok(Some(node)); + } + } + + Ok(None) + } + + /// Finalize a root in the tree and return it, return `None` in case no root + /// with the given hash exists. All other roots are pruned, and the children + /// of the finalized node become the new roots. + pub fn finalize_root(&mut self, hash: &H) -> Option { + self.roots + .iter() + .position(|node| node.hash == *hash) + .map(|position| self.finalize_root_at(position)) + } + + /// Finalize root at given position. See `finalize_root` comment for details. + fn finalize_root_at(&mut self, position: usize) -> V { + let node = self.roots.swap_remove(position); + self.roots = node.children; + self.best_finalized_number = Some(node.number); + return node.data; + } + + /// Finalize a node in the tree. This method will make sure that the node + /// being finalized is either an existing root (and return its data), or a + /// node from a competing branch (not in the tree), tree pruning is done + /// accordingly. The given function `is_descendent_of` should return `true` + /// if the second hash (target) is a descendent of the first hash (base). + pub fn finalize( + &mut self, + hash: &H, + number: N, + is_descendent_of: &F, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + // check if one of the current roots is being finalized + if let Some(root) = self.finalize_root(hash) { + return Ok(FinalizationResult::Changed(Some(root))); + } + + // make sure we're not finalizing a descendent of any root + for root in self.roots.iter() { + if number > root.number && is_descendent_of(&root.hash, hash)? { + return Err(Error::UnfinalizedAncestor); + } + } + + // we finalized a block earlier than any existing root (or possibly + // another fork not part of the tree). make sure to only keep roots that + // are part of the finalized branch + let mut changed = false; + self.roots.retain(|root| { + let retain = + root.number > number && is_descendent_of(hash, &root.hash).unwrap_or(false); + + if !retain { + changed = true; + } + + retain + }); + + self.best_finalized_number = Some(number); + + if changed { + Ok(FinalizationResult::Changed(None)) + } else { + Ok(FinalizationResult::Unchanged) + } + } + + /// Finalize a node in the tree and all its ancestors. The given function + /// `is_descendent_of` should return `true` if the second hash (target) is + // a descendent of the first hash (base). + pub fn finalize_with_ancestors( + &mut self, + hash: &H, + number: N, + is_descendent_of: &F, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + // check if one of the current roots is being finalized + if let Some(root) = self.finalize_root(hash) { + return Ok(FinalizationResult::Changed(Some(root))); + } + + // we need to: + // 1) remove all roots that are not ancestors AND not descendants of finalized block; + // 2) if node is descendant - just leave it; + // 3) if node is ancestor - 'open it' + let mut changed = false; + let mut idx = 0; + while idx != self.roots.len() { + let (is_finalized, is_descendant, is_ancestor) = { + let root = &self.roots[idx]; + let is_finalized = root.hash == *hash; + let is_descendant = !is_finalized + && root.number > number + && is_descendent_of(hash, &root.hash).unwrap_or(false); + let is_ancestor = !is_finalized + && !is_descendant + && root.number < number + && is_descendent_of(&root.hash, hash).unwrap_or(false); + (is_finalized, is_descendant, is_ancestor) + }; + + // if we have met finalized root - open it and return + if is_finalized { + return Ok(FinalizationResult::Changed(Some( + self.finalize_root_at(idx), + ))); + } + + // if node is descendant of finalized block - just leave it as is + if is_descendant { + idx += 1; + continue; + } + + // if node is ancestor of finalized block - remove it and continue with children + if is_ancestor { + let root = self.roots.swap_remove(idx); + self.roots.extend(root.children); + changed = true; + continue; + } + + // if node is neither ancestor, nor descendant of the finalized block - remove it + self.roots.swap_remove(idx); + changed = true; + } + + self.best_finalized_number = Some(number); + + if changed { + Ok(FinalizationResult::Changed(None)) + } else { + Ok(FinalizationResult::Unchanged) + } + } + + /// Checks if any node in the tree is finalized by either finalizing the + /// node itself or a child node that's not in the tree, guaranteeing that + /// the node being finalized isn't a descendent of any of the node's + /// children. Returns `Some(true)` if the node being finalized is a root, + /// `Some(false)` if the node being finalized is not a root, and `None` if + /// no node in the tree is finalized. The given `predicate` is checked on + /// the prospective finalized root and must pass for finalization to occur. + /// The given function `is_descendent_of` should return `true` if the second + /// hash (target) is a descendent of the first hash (base). + pub fn finalizes_any_with_descendent_if( + &self, + hash: &H, + number: N, + is_descendent_of: &F, + predicate: P, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + // check if the given hash is equal or a descendent of any node in the + // tree, if we find a valid node that passes the predicate then we must + // ensure that we're not finalizing past any of its child nodes. + for node in self.node_iter() { + if predicate(&node.data) { + if node.hash == *hash || is_descendent_of(&node.hash, hash)? { + for node in node.children.iter() { + if node.number <= number && is_descendent_of(&node.hash, &hash)? { + return Err(Error::UnfinalizedAncestor); + } + } + + return Ok(Some(self.roots.iter().any(|root| root.hash == node.hash))); + } + } + } + + Ok(None) + } + + /// Finalize a root in the tree by either finalizing the node itself or a + /// child node that's not in the tree, guaranteeing that the node being + /// finalized isn't a descendent of any of the root's children. The given + /// `predicate` is checked on the prospective finalized root and must pass for + /// finalization to occur. The given function `is_descendent_of` should + /// return `true` if the second hash (target) is a descendent of the first + /// hash (base). + pub fn finalize_with_descendent_if( + &mut self, + hash: &H, + number: N, + is_descendent_of: &F, + predicate: P, + ) -> Result, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + if let Some(ref best_finalized_number) = self.best_finalized_number { + if number <= *best_finalized_number { + return Err(Error::Revert); + } + } + + // check if the given hash is equal or a a descendent of any root, if we + // find a valid root that passes the predicate then we must ensure that + // we're not finalizing past any children node. + let mut position = None; + for (i, root) in self.roots.iter().enumerate() { + if predicate(&root.data) { + if root.hash == *hash || is_descendent_of(&root.hash, hash)? { + for node in root.children.iter() { + if node.number <= number && is_descendent_of(&node.hash, &hash)? { + return Err(Error::UnfinalizedAncestor); + } + } + + position = Some(i); + break; + } + } + } + + let node_data = position.map(|i| { + let node = self.roots.swap_remove(i); + self.roots = node.children; + self.best_finalized_number = Some(node.number); + node.data + }); + + // if the block being finalized is earlier than a given root, then it + // must be its ancestor, otherwise we can prune the root. if there's a + // root at the same height then the hashes must match. otherwise the + // node being finalized is higher than the root so it must be its + // descendent (in this case the node wasn't finalized earlier presumably + // because the predicate didn't pass). + let mut changed = false; + self.roots.retain(|root| { + let retain = root.number > number + && is_descendent_of(hash, &root.hash).unwrap_or(false) + || root.number == number && root.hash == *hash + || is_descendent_of(&root.hash, hash).unwrap_or(false); + + if !retain { + changed = true; + } + + retain + }); + + self.best_finalized_number = Some(number); + + match (node_data, changed) { + (Some(data), _) => Ok(FinalizationResult::Changed(Some(data))), + (None, true) => Ok(FinalizationResult::Changed(None)), + (None, false) => Ok(FinalizationResult::Unchanged), + } + } } // Workaround for: https://github.com/rust-lang/rust/issues/34537 mod node_implementation { - use super::*; - - /// The outcome of a search within a node. - pub enum FindOutcome { - // this is the node we were looking for. - Found(T), - // not the node we're looking for. contains a flag indicating - // whether the node was a descendent. true implies the predicate failed. - Failure(bool), - // Abort search. - Abort, - } - - #[derive(Clone, Debug, Decode, Encode, PartialEq)] - pub struct Node { - pub hash: H, - pub number: N, - pub data: V, - pub children: Vec>, - } - - impl Node { - /// Rebalance the tree, i.e. sort child nodes by max branch depth (decreasing). - pub fn rebalance(&mut self) { - self.children.sort_by_key(|n| Reverse(n.max_depth())); - for child in &mut self.children { - child.rebalance(); - } - } - - /// Finds the max depth among all branches descendent from this node. - pub fn max_depth(&self) -> usize { - let mut max = 0; - - for node in &self.children { - max = node.max_depth().max(max) - } - - max + 1 - } - - /// Map node data into values of new types. - pub fn map( - self, - f: &mut F, - ) -> Node where - F: FnMut(&H, &N, V) -> VT, - { - let children = self.children - .into_iter() - .map(|node| { - node.map(f) - }) - .collect(); - - let vt = f(&self.hash, &self.number, self.data); - Node { - hash: self.hash, - number: self.number, - data: vt, - children, - } - } - - pub fn import( - &mut self, - mut hash: H, - mut number: N, - mut data: V, - is_descendent_of: &F, - ) -> Result, Error> - where E: fmt::Debug, - F: Fn(&H, &H) -> Result, - { - if self.hash == hash { - return Err(Error::Duplicate); - }; - - if number <= self.number { return Ok(Some((hash, number, data))); } - - for node in self.children.iter_mut() { - match node.import(hash, number, data, is_descendent_of)? { - Some((h, n, d)) => { - hash = h; - number = n; - data = d; - }, - None => return Ok(None), - } - } - - if is_descendent_of(&self.hash, &hash)? { - self.children.push(Node { - data, - hash: hash, - number: number, - children: Vec::new(), - }); - - Ok(None) - } else { - Ok(Some((hash, number, data))) - } - } - - /// Find a node in the tree that is the deepest ancestor of the given - /// block hash which also passes the given predicate, backtracking - /// when the predicate fails. - /// The given function `is_descendent_of` should return `true` if the second hash (target) - /// is a descendent of the first hash (base). - /// - /// The returned indices are from last to first. The earliest index in the traverse path - /// goes last, and the final index in the traverse path goes first. An empty list means - /// that the current node is the result. - pub fn find_node_index_where( - &self, - hash: &H, - number: &N, - is_descendent_of: &F, - predicate: &P, - ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - // stop searching this branch - if *number < self.number { - return Ok(FindOutcome::Failure(false)); - } - - let mut known_descendent_of = false; - - // continue depth-first search through all children - for (i, node) in self.children.iter().enumerate() { - // found node, early exit - match node.find_node_index_where(hash, number, is_descendent_of, predicate)? { - FindOutcome::Abort => return Ok(FindOutcome::Abort), - FindOutcome::Found(mut x) => { - x.push(i); - return Ok(FindOutcome::Found(x)) - }, - FindOutcome::Failure(true) => { - // if the block was a descendent of this child, - // then it cannot be a descendent of any others, - // so we don't search them. - known_descendent_of = true; - break; - }, - FindOutcome::Failure(false) => {}, - } - } - - // node not found in any of the descendents, if the node we're - // searching for is a descendent of this node then we will stop the - // search here, since there aren't any more children and we found - // the correct node so we don't want to backtrack. - let is_descendent_of = known_descendent_of || is_descendent_of(&self.hash, hash)?; - if is_descendent_of { - // if the predicate passes we return the node - if predicate(&self.data) { - return Ok(FindOutcome::Found(Vec::new())); - } - } - - // otherwise, tell our ancestor that we failed, and whether - // the block was a descendent. - Ok(FindOutcome::Failure(is_descendent_of)) - } - - /// Find a node in the tree that is the deepest ancestor of the given - /// block hash which also passes the given predicate, backtracking - /// when the predicate fails. - /// The given function `is_descendent_of` should return `true` if the second hash (target) - /// is a descendent of the first hash (base). - pub fn find_node_where( - &self, - hash: &H, - number: &N, - is_descendent_of: &F, - predicate: &P, - ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; - - match outcome { - FindOutcome::Abort => Ok(FindOutcome::Abort), - FindOutcome::Failure(f) => Ok(FindOutcome::Failure(f)), - FindOutcome::Found(mut indexes) => { - let mut cur = self; - - while let Some(i) = indexes.pop() { - cur = &cur.children[i]; - } - Ok(FindOutcome::Found(cur)) - }, - } - } - - /// Find a node in the tree that is the deepest ancestor of the given - /// block hash which also passes the given predicate, backtracking - /// when the predicate fails. - /// The given function `is_descendent_of` should return `true` if the second hash (target) - /// is a descendent of the first hash (base). - pub fn find_node_where_mut( - &mut self, - hash: &H, - number: &N, - is_descendent_of: &F, - predicate: &P, - ) -> Result>, Error> - where E: std::error::Error, - F: Fn(&H, &H) -> Result, - P: Fn(&V) -> bool, - { - let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; - - match outcome { - FindOutcome::Abort => Ok(FindOutcome::Abort), - FindOutcome::Failure(f) => Ok(FindOutcome::Failure(f)), - FindOutcome::Found(mut indexes) => { - let mut cur = self; - - while let Some(i) = indexes.pop() { - cur = &mut cur.children[i]; - } - Ok(FindOutcome::Found(cur)) - }, - } - } - } + use super::*; + + /// The outcome of a search within a node. + pub enum FindOutcome { + // this is the node we were looking for. + Found(T), + // not the node we're looking for. contains a flag indicating + // whether the node was a descendent. true implies the predicate failed. + Failure(bool), + // Abort search. + Abort, + } + + #[derive(Clone, Debug, Decode, Encode, PartialEq)] + pub struct Node { + pub hash: H, + pub number: N, + pub data: V, + pub children: Vec>, + } + + impl Node { + /// Rebalance the tree, i.e. sort child nodes by max branch depth (decreasing). + pub fn rebalance(&mut self) { + self.children.sort_by_key(|n| Reverse(n.max_depth())); + for child in &mut self.children { + child.rebalance(); + } + } + + /// Finds the max depth among all branches descendent from this node. + pub fn max_depth(&self) -> usize { + let mut max = 0; + + for node in &self.children { + max = node.max_depth().max(max) + } + + max + 1 + } + + /// Map node data into values of new types. + pub fn map(self, f: &mut F) -> Node + where + F: FnMut(&H, &N, V) -> VT, + { + let children = self.children.into_iter().map(|node| node.map(f)).collect(); + + let vt = f(&self.hash, &self.number, self.data); + Node { + hash: self.hash, + number: self.number, + data: vt, + children, + } + } + + pub fn import( + &mut self, + mut hash: H, + mut number: N, + mut data: V, + is_descendent_of: &F, + ) -> Result, Error> + where + E: fmt::Debug, + F: Fn(&H, &H) -> Result, + { + if self.hash == hash { + return Err(Error::Duplicate); + }; + + if number <= self.number { + return Ok(Some((hash, number, data))); + } + + for node in self.children.iter_mut() { + match node.import(hash, number, data, is_descendent_of)? { + Some((h, n, d)) => { + hash = h; + number = n; + data = d; + } + None => return Ok(None), + } + } + + if is_descendent_of(&self.hash, &hash)? { + self.children.push(Node { + data, + hash: hash, + number: number, + children: Vec::new(), + }); + + Ok(None) + } else { + Ok(Some((hash, number, data))) + } + } + + /// Find a node in the tree that is the deepest ancestor of the given + /// block hash which also passes the given predicate, backtracking + /// when the predicate fails. + /// The given function `is_descendent_of` should return `true` if the second hash (target) + /// is a descendent of the first hash (base). + /// + /// The returned indices are from last to first. The earliest index in the traverse path + /// goes last, and the final index in the traverse path goes first. An empty list means + /// that the current node is the result. + pub fn find_node_index_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + // stop searching this branch + if *number < self.number { + return Ok(FindOutcome::Failure(false)); + } + + let mut known_descendent_of = false; + + // continue depth-first search through all children + for (i, node) in self.children.iter().enumerate() { + // found node, early exit + match node.find_node_index_where(hash, number, is_descendent_of, predicate)? { + FindOutcome::Abort => return Ok(FindOutcome::Abort), + FindOutcome::Found(mut x) => { + x.push(i); + return Ok(FindOutcome::Found(x)); + } + FindOutcome::Failure(true) => { + // if the block was a descendent of this child, + // then it cannot be a descendent of any others, + // so we don't search them. + known_descendent_of = true; + break; + } + FindOutcome::Failure(false) => {} + } + } + + // node not found in any of the descendents, if the node we're + // searching for is a descendent of this node then we will stop the + // search here, since there aren't any more children and we found + // the correct node so we don't want to backtrack. + let is_descendent_of = known_descendent_of || is_descendent_of(&self.hash, hash)?; + if is_descendent_of { + // if the predicate passes we return the node + if predicate(&self.data) { + return Ok(FindOutcome::Found(Vec::new())); + } + } + + // otherwise, tell our ancestor that we failed, and whether + // the block was a descendent. + Ok(FindOutcome::Failure(is_descendent_of)) + } + + /// Find a node in the tree that is the deepest ancestor of the given + /// block hash which also passes the given predicate, backtracking + /// when the predicate fails. + /// The given function `is_descendent_of` should return `true` if the second hash (target) + /// is a descendent of the first hash (base). + pub fn find_node_where( + &self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; + + match outcome { + FindOutcome::Abort => Ok(FindOutcome::Abort), + FindOutcome::Failure(f) => Ok(FindOutcome::Failure(f)), + FindOutcome::Found(mut indexes) => { + let mut cur = self; + + while let Some(i) = indexes.pop() { + cur = &cur.children[i]; + } + Ok(FindOutcome::Found(cur)) + } + } + } + + /// Find a node in the tree that is the deepest ancestor of the given + /// block hash which also passes the given predicate, backtracking + /// when the predicate fails. + /// The given function `is_descendent_of` should return `true` if the second hash (target) + /// is a descendent of the first hash (base). + pub fn find_node_where_mut( + &mut self, + hash: &H, + number: &N, + is_descendent_of: &F, + predicate: &P, + ) -> Result>, Error> + where + E: std::error::Error, + F: Fn(&H, &H) -> Result, + P: Fn(&V) -> bool, + { + let outcome = self.find_node_index_where(hash, number, is_descendent_of, predicate)?; + + match outcome { + FindOutcome::Abort => Ok(FindOutcome::Abort), + FindOutcome::Failure(f) => Ok(FindOutcome::Failure(f)), + FindOutcome::Found(mut indexes) => { + let mut cur = self; + + while let Some(i) = indexes.pop() { + cur = &mut cur.children[i]; + } + Ok(FindOutcome::Found(cur)) + } + } + } + } } // Workaround for: https://github.com/rust-lang/rust/issues/34537 -use node_implementation::{Node, FindOutcome}; +use node_implementation::{FindOutcome, Node}; struct ForkTreeIterator<'a, H, N, V> { - stack: Vec<&'a Node>, + stack: Vec<&'a Node>, } impl<'a, H, N, V> Iterator for ForkTreeIterator<'a, H, N, V> { - type Item = &'a Node; - - fn next(&mut self) -> Option { - self.stack.pop().map(|node| { - // child nodes are stored ordered by max branch height (decreasing), - // we want to keep this ordering while iterating but since we're - // using a stack for iterator state we need to reverse it. - self.stack.extend(node.children.iter().rev()); - node - }) - } + type Item = &'a Node; + + fn next(&mut self) -> Option { + self.stack.pop().map(|node| { + // child nodes are stored ordered by max branch height (decreasing), + // we want to keep this ordering while iterating but since we're + // using a stack for iterator state we need to reverse it. + self.stack.extend(node.children.iter().rev()); + node + }) + } } struct RemovedIterator { - stack: Vec>, + stack: Vec>, } impl Iterator for RemovedIterator { - type Item = (H, N, V); - - fn next(&mut self) -> Option { - self.stack.pop().map(|mut node| { - // child nodes are stored ordered by max branch height (decreasing), - // we want to keep this ordering while iterating but since we're - // using a stack for iterator state we need to reverse it. - let mut children = Vec::new(); - std::mem::swap(&mut children, &mut node.children); - - self.stack.extend(children.into_iter().rev()); - (node.hash, node.number, node.data) - }) - } + type Item = (H, N, V); + + fn next(&mut self) -> Option { + self.stack.pop().map(|mut node| { + // child nodes are stored ordered by max branch height (decreasing), + // we want to keep this ordering while iterating but since we're + // using a stack for iterator state we need to reverse it. + let mut children = Vec::new(); + std::mem::swap(&mut children, &mut node.children); + + self.stack.extend(children.into_iter().rev()); + (node.hash, node.number, node.data) + }) + } } #[cfg(test)] mod test { - use super::{FinalizationResult, ForkTree, Error}; - - #[derive(Debug, PartialEq)] - struct TestError; - - impl std::fmt::Display for TestError { - fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { - write!(f, "TestError") - } - } - - impl std::error::Error for TestError {} - - fn test_fork_tree<'a>() -> (ForkTree<&'a str, u64, ()>, impl Fn(&&str, &&str) -> Result) { - let mut tree = ForkTree::new(); - - // - // - B - C - D - E - // / - // / - G - // / / - // A - F - H - I - // \ - // - L - M - // \ - // - O - // \ - // — J - K - // - // (where N is not a part of fork tree) - let is_descendent_of = |base: &&str, block: &&str| -> Result { - let letters = vec!["B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "O"]; - match (*base, *block) { - ("A", b) => Ok(letters.into_iter().any(|n| n == b)), - ("B", b) => Ok(b == "C" || b == "D" || b == "E"), - ("C", b) => Ok(b == "D" || b == "E"), - ("D", b) => Ok(b == "E"), - ("E", _) => Ok(false), - ("F", b) => Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "O"), - ("G", _) => Ok(false), - ("H", b) => Ok(b == "I" || b == "L" || b == "M" || b == "O"), - ("I", _) => Ok(false), - ("J", b) => Ok(b == "K"), - ("K", _) => Ok(false), - ("L", b) => Ok(b == "M" || b == "O"), - ("M", _) => Ok(false), - ("O", _) => Ok(false), - ("0", _) => Ok(true), - _ => Ok(false), - } - }; - - tree.import("A", 1, (), &is_descendent_of).unwrap(); - - tree.import("B", 2, (), &is_descendent_of).unwrap(); - tree.import("C", 3, (), &is_descendent_of).unwrap(); - tree.import("D", 4, (), &is_descendent_of).unwrap(); - tree.import("E", 5, (), &is_descendent_of).unwrap(); - - tree.import("F", 2, (), &is_descendent_of).unwrap(); - tree.import("G", 3, (), &is_descendent_of).unwrap(); - - tree.import("H", 3, (), &is_descendent_of).unwrap(); - tree.import("I", 4, (), &is_descendent_of).unwrap(); - tree.import("L", 4, (), &is_descendent_of).unwrap(); - tree.import("M", 5, (), &is_descendent_of).unwrap(); - tree.import("O", 5, (), &is_descendent_of).unwrap(); - - tree.import("J", 2, (), &is_descendent_of).unwrap(); - tree.import("K", 3, (), &is_descendent_of).unwrap(); - - (tree, is_descendent_of) - } - - #[test] - fn import_doesnt_revert() { - let (mut tree, is_descendent_of) = test_fork_tree(); - - tree.finalize_root(&"A"); - - assert_eq!( - tree.best_finalized_number, - Some(1), - ); - - assert_eq!( - tree.import("A", 1, (), &is_descendent_of), - Err(Error::Revert), - ); - } - - #[test] - fn import_doesnt_add_duplicates() { - let (mut tree, is_descendent_of) = test_fork_tree(); - - assert_eq!( - tree.import("A", 1, (), &is_descendent_of), - Err(Error::Duplicate), - ); - - assert_eq!( - tree.import("I", 4, (), &is_descendent_of), - Err(Error::Duplicate), - ); - - assert_eq!( - tree.import("G", 3, (), &is_descendent_of), - Err(Error::Duplicate), - ); - - assert_eq!( - tree.import("K", 3, (), &is_descendent_of), - Err(Error::Duplicate), - ); - } - - #[test] - fn finalize_root_works() { - let finalize_a = || { - let (mut tree, ..) = test_fork_tree(); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("A", 1)], - ); - - // finalizing "A" opens up three possible forks - tree.finalize_root(&"A"); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("B", 2), ("F", 2), ("J", 2)], - ); - - tree - }; - - { - let mut tree = finalize_a(); - - // finalizing "B" will progress on its fork and remove any other competing forks - tree.finalize_root(&"B"); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("C", 3)], - ); - - // all the other forks have been pruned - assert!(tree.roots.len() == 1); - } - - { - let mut tree = finalize_a(); - - // finalizing "J" will progress on its fork and remove any other competing forks - tree.finalize_root(&"J"); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("K", 3)], - ); - - // all the other forks have been pruned - assert!(tree.roots.len() == 1); - } - } - - #[test] - fn finalize_works() { - let (mut tree, is_descendent_of) = test_fork_tree(); - - let original_roots = tree.roots.clone(); - - // finalizing a block prior to any in the node doesn't change the tree - assert_eq!( - tree.finalize(&"0", 0, &is_descendent_of), - Ok(FinalizationResult::Unchanged), - ); - - assert_eq!(tree.roots, original_roots); - - // finalizing "A" opens up three possible forks - assert_eq!( - tree.finalize(&"A", 1, &is_descendent_of), - Ok(FinalizationResult::Changed(Some(()))), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("B", 2), ("F", 2), ("J", 2)], - ); - - // finalizing anything lower than what we observed will fail - assert_eq!( - tree.best_finalized_number, - Some(1), - ); - - assert_eq!( - tree.finalize(&"Z", 1, &is_descendent_of), - Err(Error::Revert), - ); - - // trying to finalize a node without finalizing its ancestors first will fail - assert_eq!( - tree.finalize(&"H", 3, &is_descendent_of), - Err(Error::UnfinalizedAncestor), - ); - - // after finalizing "F" we can finalize "H" - assert_eq!( - tree.finalize(&"F", 2, &is_descendent_of), - Ok(FinalizationResult::Changed(Some(()))), - ); - - assert_eq!( - tree.finalize(&"H", 3, &is_descendent_of), - Ok(FinalizationResult::Changed(Some(()))), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4), ("L", 4)], - ); - - // finalizing a node from another fork that isn't part of the tree clears the tree - assert_eq!( - tree.finalize(&"Z", 5, &is_descendent_of), - Ok(FinalizationResult::Changed(None)), - ); - - assert!(tree.roots.is_empty()); - } - - #[test] - fn finalize_with_ancestor_works() { - let (mut tree, is_descendent_of) = test_fork_tree(); - - let original_roots = tree.roots.clone(); - - // finalizing a block prior to any in the node doesn't change the tree - assert_eq!( - tree.finalize_with_ancestors(&"0", 0, &is_descendent_of), - Ok(FinalizationResult::Unchanged), - ); - - assert_eq!(tree.roots, original_roots); - - // finalizing "A" opens up three possible forks - assert_eq!( - tree.finalize_with_ancestors(&"A", 1, &is_descendent_of), - Ok(FinalizationResult::Changed(Some(()))), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("B", 2), ("F", 2), ("J", 2)], - ); - - // finalizing H: - // 1) removes roots that are not ancestors/descendants of H (B, J) - // 2) opens root that is ancestor of H (F -> G+H) - // 3) finalizes the just opened root H (H -> I + L) - assert_eq!( - tree.finalize_with_ancestors(&"H", 3, &is_descendent_of), - Ok(FinalizationResult::Changed(Some(()))), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("I", 4), ("L", 4)], - ); - - assert_eq!( - tree.best_finalized_number, - Some(3), - ); - - // finalizing N (which is not a part of the tree): - // 1) removes roots that are not ancestors/descendants of N (I) - // 2) opens root that is ancestor of N (L -> M+O) - // 3) removes roots that are not ancestors/descendants of N (O) - // 4) opens root that is ancestor of N (M -> {}) - assert_eq!( - tree.finalize_with_ancestors(&"N", 6, &is_descendent_of), - Ok(FinalizationResult::Changed(None)), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![], - ); - - assert_eq!( - tree.best_finalized_number, - Some(6), - ); - } - - #[test] - fn finalize_with_descendent_works() { - #[derive(Debug, PartialEq)] - struct Change { effective: u64 }; - - let (mut tree, is_descendent_of) = { - let mut tree = ForkTree::new(); - - let is_descendent_of = |base: &&str, block: &&str| -> Result { - - // - // A0 #1 - (B #2) - (C #5) - D #10 - E #15 - (F #100) - // \ - // - (G #100) - // - // A1 #1 - // - // Nodes B, C, F and G are not part of the tree. - match (*base, *block) { - ("A0", b) => Ok(b == "B" || b == "C" || b == "D" || b == "G"), - ("A1", _) => Ok(false), - ("C", b) => Ok(b == "D"), - ("D", b) => Ok(b == "E" || b == "F" || b == "G"), - ("E", b) => Ok(b == "F"), - _ => Ok(false), - } - }; - - tree.import("A0", 1, Change { effective: 5 }, &is_descendent_of).unwrap(); - tree.import("A1", 1, Change { effective: 5 }, &is_descendent_of).unwrap(); - tree.import("D", 10, Change { effective: 10 }, &is_descendent_of).unwrap(); - tree.import("E", 15, Change { effective: 50 }, &is_descendent_of).unwrap(); - - (tree, is_descendent_of) - }; - - assert_eq!( + use super::{Error, FinalizationResult, ForkTree}; + + #[derive(Debug, PartialEq)] + struct TestError; + + impl std::fmt::Display for TestError { + fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { + write!(f, "TestError") + } + } + + impl std::error::Error for TestError {} + + fn test_fork_tree<'a>() -> ( + ForkTree<&'a str, u64, ()>, + impl Fn(&&str, &&str) -> Result, + ) { + let mut tree = ForkTree::new(); + + // + // - B - C - D - E + // / + // / - G + // / / + // A - F - H - I + // \ + // - L - M + // \ + // - O + // \ + // — J - K + // + // (where N is not a part of fork tree) + let is_descendent_of = |base: &&str, block: &&str| -> Result { + let letters = vec![ + "B", "C", "D", "E", "F", "G", "H", "I", "J", "K", "L", "M", "O", + ]; + match (*base, *block) { + ("A", b) => Ok(letters.into_iter().any(|n| n == b)), + ("B", b) => Ok(b == "C" || b == "D" || b == "E"), + ("C", b) => Ok(b == "D" || b == "E"), + ("D", b) => Ok(b == "E"), + ("E", _) => Ok(false), + ("F", b) => { + Ok(b == "G" || b == "H" || b == "I" || b == "L" || b == "M" || b == "O") + } + ("G", _) => Ok(false), + ("H", b) => Ok(b == "I" || b == "L" || b == "M" || b == "O"), + ("I", _) => Ok(false), + ("J", b) => Ok(b == "K"), + ("K", _) => Ok(false), + ("L", b) => Ok(b == "M" || b == "O"), + ("M", _) => Ok(false), + ("O", _) => Ok(false), + ("0", _) => Ok(true), + _ => Ok(false), + } + }; + + tree.import("A", 1, (), &is_descendent_of).unwrap(); + + tree.import("B", 2, (), &is_descendent_of).unwrap(); + tree.import("C", 3, (), &is_descendent_of).unwrap(); + tree.import("D", 4, (), &is_descendent_of).unwrap(); + tree.import("E", 5, (), &is_descendent_of).unwrap(); + + tree.import("F", 2, (), &is_descendent_of).unwrap(); + tree.import("G", 3, (), &is_descendent_of).unwrap(); + + tree.import("H", 3, (), &is_descendent_of).unwrap(); + tree.import("I", 4, (), &is_descendent_of).unwrap(); + tree.import("L", 4, (), &is_descendent_of).unwrap(); + tree.import("M", 5, (), &is_descendent_of).unwrap(); + tree.import("O", 5, (), &is_descendent_of).unwrap(); + + tree.import("J", 2, (), &is_descendent_of).unwrap(); + tree.import("K", 3, (), &is_descendent_of).unwrap(); + + (tree, is_descendent_of) + } + + #[test] + fn import_doesnt_revert() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + tree.finalize_root(&"A"); + + assert_eq!(tree.best_finalized_number, Some(1),); + + assert_eq!( + tree.import("A", 1, (), &is_descendent_of), + Err(Error::Revert), + ); + } + + #[test] + fn import_doesnt_add_duplicates() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + assert_eq!( + tree.import("A", 1, (), &is_descendent_of), + Err(Error::Duplicate), + ); + + assert_eq!( + tree.import("I", 4, (), &is_descendent_of), + Err(Error::Duplicate), + ); + + assert_eq!( + tree.import("G", 3, (), &is_descendent_of), + Err(Error::Duplicate), + ); + + assert_eq!( + tree.import("K", 3, (), &is_descendent_of), + Err(Error::Duplicate), + ); + } + + #[test] + fn finalize_root_works() { + let finalize_a = || { + let (mut tree, ..) = test_fork_tree(); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("A", 1)], + ); + + // finalizing "A" opens up three possible forks + tree.finalize_root(&"A"); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("B", 2), ("F", 2), ("J", 2)], + ); + + tree + }; + + { + let mut tree = finalize_a(); + + // finalizing "B" will progress on its fork and remove any other competing forks + tree.finalize_root(&"B"); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("C", 3)], + ); + + // all the other forks have been pruned + assert!(tree.roots.len() == 1); + } + + { + let mut tree = finalize_a(); + + // finalizing "J" will progress on its fork and remove any other competing forks + tree.finalize_root(&"J"); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("K", 3)], + ); + + // all the other forks have been pruned + assert!(tree.roots.len() == 1); + } + } + + #[test] + fn finalize_works() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + let original_roots = tree.roots.clone(); + + // finalizing a block prior to any in the node doesn't change the tree + assert_eq!( + tree.finalize(&"0", 0, &is_descendent_of), + Ok(FinalizationResult::Unchanged), + ); + + assert_eq!(tree.roots, original_roots); + + // finalizing "A" opens up three possible forks + assert_eq!( + tree.finalize(&"A", 1, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("B", 2), ("F", 2), ("J", 2)], + ); + + // finalizing anything lower than what we observed will fail + assert_eq!(tree.best_finalized_number, Some(1),); + + assert_eq!( + tree.finalize(&"Z", 1, &is_descendent_of), + Err(Error::Revert), + ); + + // trying to finalize a node without finalizing its ancestors first will fail + assert_eq!( + tree.finalize(&"H", 3, &is_descendent_of), + Err(Error::UnfinalizedAncestor), + ); + + // after finalizing "F" we can finalize "H" + assert_eq!( + tree.finalize(&"F", 2, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.finalize(&"H", 3, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("I", 4), ("L", 4)], + ); + + // finalizing a node from another fork that isn't part of the tree clears the tree + assert_eq!( + tree.finalize(&"Z", 5, &is_descendent_of), + Ok(FinalizationResult::Changed(None)), + ); + + assert!(tree.roots.is_empty()); + } + + #[test] + fn finalize_with_ancestor_works() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + let original_roots = tree.roots.clone(); + + // finalizing a block prior to any in the node doesn't change the tree + assert_eq!( + tree.finalize_with_ancestors(&"0", 0, &is_descendent_of), + Ok(FinalizationResult::Unchanged), + ); + + assert_eq!(tree.roots, original_roots); + + // finalizing "A" opens up three possible forks + assert_eq!( + tree.finalize_with_ancestors(&"A", 1, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("B", 2), ("F", 2), ("J", 2)], + ); + + // finalizing H: + // 1) removes roots that are not ancestors/descendants of H (B, J) + // 2) opens root that is ancestor of H (F -> G+H) + // 3) finalizes the just opened root H (H -> I + L) + assert_eq!( + tree.finalize_with_ancestors(&"H", 3, &is_descendent_of), + Ok(FinalizationResult::Changed(Some(()))), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("I", 4), ("L", 4)], + ); + + assert_eq!(tree.best_finalized_number, Some(3),); + + // finalizing N (which is not a part of the tree): + // 1) removes roots that are not ancestors/descendants of N (I) + // 2) opens root that is ancestor of N (L -> M+O) + // 3) removes roots that are not ancestors/descendants of N (O) + // 4) opens root that is ancestor of N (M -> {}) + assert_eq!( + tree.finalize_with_ancestors(&"N", 6, &is_descendent_of), + Ok(FinalizationResult::Changed(None)), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![], + ); + + assert_eq!(tree.best_finalized_number, Some(6),); + } + + #[test] + fn finalize_with_descendent_works() { + #[derive(Debug, PartialEq)] + struct Change { + effective: u64, + }; + + let (mut tree, is_descendent_of) = { + let mut tree = ForkTree::new(); + + let is_descendent_of = |base: &&str, block: &&str| -> Result { + // + // A0 #1 - (B #2) - (C #5) - D #10 - E #15 - (F #100) + // \ + // - (G #100) + // + // A1 #1 + // + // Nodes B, C, F and G are not part of the tree. + match (*base, *block) { + ("A0", b) => Ok(b == "B" || b == "C" || b == "D" || b == "G"), + ("A1", _) => Ok(false), + ("C", b) => Ok(b == "D"), + ("D", b) => Ok(b == "E" || b == "F" || b == "G"), + ("E", b) => Ok(b == "F"), + _ => Ok(false), + } + }; + + tree.import("A0", 1, Change { effective: 5 }, &is_descendent_of) + .unwrap(); + tree.import("A1", 1, Change { effective: 5 }, &is_descendent_of) + .unwrap(); + tree.import("D", 10, Change { effective: 10 }, &is_descendent_of) + .unwrap(); + tree.import("E", 15, Change { effective: 50 }, &is_descendent_of) + .unwrap(); + + (tree, is_descendent_of) + }; + + assert_eq!( tree.finalizes_any_with_descendent_if( &"B", 2, @@ -1254,37 +1286,30 @@ mod test { Ok(None), ); - // finalizing "D" will finalize a block from the tree, but it can't be applied yet - // since it is not a root change - assert_eq!( - tree.finalizes_any_with_descendent_if( - &"D", - 10, - &is_descendent_of, - |c| c.effective == 10, - ), - Ok(Some(false)), - ); - - // finalizing "B" doesn't finalize "A0" since the predicate doesn't pass, - // although it will clear out "A1" from the tree - assert_eq!( - tree.finalize_with_descendent_if( - &"B", - 2, - &is_descendent_of, - |c| c.effective <= 2, - ), - Ok(FinalizationResult::Changed(None)), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("A0", 1)], - ); - - // finalizing "C" will finalize the node "A0" and prune it out of the tree - assert_eq!( + // finalizing "D" will finalize a block from the tree, but it can't be applied yet + // since it is not a root change + assert_eq!( + tree.finalizes_any_with_descendent_if(&"D", 10, &is_descendent_of, |c| c.effective + == 10,), + Ok(Some(false)), + ); + + // finalizing "B" doesn't finalize "A0" since the predicate doesn't pass, + // although it will clear out "A1" from the tree + assert_eq!( + tree.finalize_with_descendent_if(&"B", 2, &is_descendent_of, |c| c.effective <= 2,), + Ok(FinalizationResult::Changed(None)), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("A0", 1)], + ); + + // finalizing "C" will finalize the node "A0" and prune it out of the tree + assert_eq!( tree.finalizes_any_with_descendent_if( &"C", 5, @@ -1294,269 +1319,233 @@ mod test { Ok(Some(true)), ); - assert_eq!( - tree.finalize_with_descendent_if( - &"C", - 5, - &is_descendent_of, - |c| c.effective <= 5, - ), - Ok(FinalizationResult::Changed(Some(Change { effective: 5 }))), - ); - - assert_eq!( - tree.roots().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![("D", 10)], - ); - - // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first - assert_eq!( - tree.finalizes_any_with_descendent_if( - &"F", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), - Err(Error::UnfinalizedAncestor), - ); - - // it will work with "G" though since it is not in the same branch as "E" - assert_eq!( - tree.finalizes_any_with_descendent_if( - &"G", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), - Ok(Some(true)), - ); - - assert_eq!( - tree.finalize_with_descendent_if( - &"G", - 100, - &is_descendent_of, - |c| c.effective <= 100, - ), - Ok(FinalizationResult::Changed(Some(Change { effective: 10 }))), - ); - - // "E" will be pruned out - assert_eq!(tree.roots().count(), 0); - } - - #[test] - fn iter_iterates_in_preorder() { - let (tree, ..) = test_fork_tree(); - assert_eq!( - tree.iter().map(|(h, n, _)| (h.clone(), n.clone())).collect::>(), - vec![ - ("A", 1), - ("B", 2), ("C", 3), ("D", 4), ("E", 5), - ("F", 2), - ("G", 3), - ("H", 3), ("I", 4), - ("L", 4), ("M", 5), ("O", 5), - ("J", 2), ("K", 3) - ], - ); - } - - #[test] - fn minimizes_calls_to_is_descendent_of() { - use std::sync::atomic::{AtomicUsize, Ordering}; - - let n_is_descendent_of_calls = AtomicUsize::new(0); - - let is_descendent_of = |_: &&str, _: &&str| -> Result { - n_is_descendent_of_calls.fetch_add(1, Ordering::SeqCst); - Ok(true) - }; - - { - // Deep tree where we want to call `finalizes_any_with_descendent_if`. The - // search for the node should first check the predicate (which is cheaper) and - // only then call `is_descendent_of` - let mut tree = ForkTree::new(); - let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; - - for (i, letter) in letters.iter().enumerate() { - tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(true)).unwrap(); - } - - // "L" is a descendent of "K", but the predicate will only pass for "K", - // therefore only one call to `is_descendent_of` should be made - assert_eq!( - tree.finalizes_any_with_descendent_if( - &"L", - 11, - &is_descendent_of, - |i| *i == 10, - ), - Ok(Some(false)), - ); - - assert_eq!( - n_is_descendent_of_calls.load(Ordering::SeqCst), - 1, - ); - } - - n_is_descendent_of_calls.store(0, Ordering::SeqCst); - - { - // Multiple roots in the tree where we want to call `finalize_with_descendent_if`. - // The search for the root node should first check the predicate (which is cheaper) - // and only then call `is_descendent_of` - let mut tree = ForkTree::new(); - let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; - - for (i, letter) in letters.iter().enumerate() { - tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(false)).unwrap(); - } - - // "L" is a descendent of "K", but the predicate will only pass for "K", - // therefore only one call to `is_descendent_of` should be made - assert_eq!( - tree.finalize_with_descendent_if( - &"L", - 11, - &is_descendent_of, - |i| *i == 10, - ), - Ok(FinalizationResult::Changed(Some(10))), - ); - - assert_eq!( - n_is_descendent_of_calls.load(Ordering::SeqCst), - 1, - ); - } - } - - #[test] - fn find_node_works() { - let (tree, is_descendent_of) = test_fork_tree(); - - let node = tree.find_node_where( - &"D", - &4, - &is_descendent_of, - &|_| true, - ).unwrap().unwrap(); - - assert_eq!(node.hash, "C"); - assert_eq!(node.number, 3); - } - - #[test] - fn map_works() { - let (tree, _is_descendent_of) = test_fork_tree(); - - let _tree = tree.map(&mut |_, _, _| ()); - } - - #[test] - fn prune_works() { - let (mut tree, is_descendent_of) = test_fork_tree(); - - let removed = tree.prune( - &"C", - &3, - &is_descendent_of, - &|_| true, - ).unwrap(); - - assert_eq!( - tree.roots.iter().map(|node| node.hash).collect::>(), - vec!["B"], - ); - - assert_eq!( - tree.iter().map(|(hash, _, _)| *hash).collect::>(), - vec!["B", "C", "D", "E"], - ); - - assert_eq!( - removed.map(|(hash, _, _)| hash).collect::>(), - vec!["A", "F", "G", "H", "I", "L", "M", "O", "J", "K"] - ); - - let removed = tree.prune( - &"E", - &5, - &is_descendent_of, - &|_| true, - ).unwrap(); - - assert_eq!( - tree.roots.iter().map(|node| node.hash).collect::>(), - vec!["D"], - ); - - assert_eq!( - tree.iter().map(|(hash, _, _)| *hash).collect::>(), - vec!["D", "E"], - ); - - assert_eq!( - removed.map(|(hash, _, _)| hash).collect::>(), - vec!["B", "C"] - ); - } - - #[test] - fn find_node_backtracks_after_finding_highest_descending_node() { - let mut tree = ForkTree::new(); - - // - // A - B - // \ - // — C - // - let is_descendent_of = |base: &&str, block: &&str| -> Result { - match (*base, *block) { - ("A", b) => Ok(b == "B" || b == "C" || b == "D"), - ("B", b) | ("C", b) => Ok(b == "D"), - ("0", _) => Ok(true), - _ => Ok(false), - } - }; - - tree.import("A", 1, 1, &is_descendent_of).unwrap(); - tree.import("B", 2, 2, &is_descendent_of).unwrap(); - tree.import("C", 2, 4, &is_descendent_of).unwrap(); - - // when searching the tree we reach node `C`, but the - // predicate doesn't pass. we should backtrack to `B`, but not to `A`, - // since "B" fulfills the predicate. - let node = tree.find_node_where( - &"D", - &3, - &is_descendent_of, - &|data| *data < 3, - ).unwrap(); - - assert_eq!(node.unwrap().hash, "B"); - } - - #[test] - fn tree_rebalance() { - let (mut tree, _) = test_fork_tree(); - - assert_eq!( - tree.iter().map(|(h, _, _)| *h).collect::>(), - vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "L", "M", "O", "J", "K"], - ); - - // after rebalancing the tree we should iterate in preorder exploring - // the longest forks first. check the ascii art above to understand the - // expected output below. - tree.rebalance(); - - assert_eq!( - tree.iter().map(|(h, _, _)| *h).collect::>(), - ["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"] - ); - } + assert_eq!( + tree.finalize_with_descendent_if(&"C", 5, &is_descendent_of, |c| c.effective <= 5,), + Ok(FinalizationResult::Changed(Some(Change { effective: 5 }))), + ); + + assert_eq!( + tree.roots() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![("D", 10)], + ); + + // finalizing "F" will fail since it would finalize past "E" without finalizing "D" first + assert_eq!( + tree.finalizes_any_with_descendent_if(&"F", 100, &is_descendent_of, |c| c.effective + <= 100,), + Err(Error::UnfinalizedAncestor), + ); + + // it will work with "G" though since it is not in the same branch as "E" + assert_eq!( + tree.finalizes_any_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective + <= 100,), + Ok(Some(true)), + ); + + assert_eq!( + tree.finalize_with_descendent_if(&"G", 100, &is_descendent_of, |c| c.effective <= 100,), + Ok(FinalizationResult::Changed(Some(Change { effective: 10 }))), + ); + + // "E" will be pruned out + assert_eq!(tree.roots().count(), 0); + } + + #[test] + fn iter_iterates_in_preorder() { + let (tree, ..) = test_fork_tree(); + assert_eq!( + tree.iter() + .map(|(h, n, _)| (h.clone(), n.clone())) + .collect::>(), + vec![ + ("A", 1), + ("B", 2), + ("C", 3), + ("D", 4), + ("E", 5), + ("F", 2), + ("G", 3), + ("H", 3), + ("I", 4), + ("L", 4), + ("M", 5), + ("O", 5), + ("J", 2), + ("K", 3) + ], + ); + } + + #[test] + fn minimizes_calls_to_is_descendent_of() { + use std::sync::atomic::{AtomicUsize, Ordering}; + + let n_is_descendent_of_calls = AtomicUsize::new(0); + + let is_descendent_of = |_: &&str, _: &&str| -> Result { + n_is_descendent_of_calls.fetch_add(1, Ordering::SeqCst); + Ok(true) + }; + + { + // Deep tree where we want to call `finalizes_any_with_descendent_if`. The + // search for the node should first check the predicate (which is cheaper) and + // only then call `is_descendent_of` + let mut tree = ForkTree::new(); + let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; + + for (i, letter) in letters.iter().enumerate() { + tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(true)) + .unwrap(); + } + + // "L" is a descendent of "K", but the predicate will only pass for "K", + // therefore only one call to `is_descendent_of` should be made + assert_eq!( + tree.finalizes_any_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), + Ok(Some(false)), + ); + + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1,); + } + + n_is_descendent_of_calls.store(0, Ordering::SeqCst); + + { + // Multiple roots in the tree where we want to call `finalize_with_descendent_if`. + // The search for the root node should first check the predicate (which is cheaper) + // and only then call `is_descendent_of` + let mut tree = ForkTree::new(); + let letters = vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "J", "K"]; + + for (i, letter) in letters.iter().enumerate() { + tree.import::<_, TestError>(*letter, i, i, &|_, _| Ok(false)) + .unwrap(); + } + + // "L" is a descendent of "K", but the predicate will only pass for "K", + // therefore only one call to `is_descendent_of` should be made + assert_eq!( + tree.finalize_with_descendent_if(&"L", 11, &is_descendent_of, |i| *i == 10,), + Ok(FinalizationResult::Changed(Some(10))), + ); + + assert_eq!(n_is_descendent_of_calls.load(Ordering::SeqCst), 1,); + } + } + + #[test] + fn find_node_works() { + let (tree, is_descendent_of) = test_fork_tree(); + + let node = tree + .find_node_where(&"D", &4, &is_descendent_of, &|_| true) + .unwrap() + .unwrap(); + + assert_eq!(node.hash, "C"); + assert_eq!(node.number, 3); + } + + #[test] + fn map_works() { + let (tree, _is_descendent_of) = test_fork_tree(); + + let _tree = tree.map(&mut |_, _, _| ()); + } + + #[test] + fn prune_works() { + let (mut tree, is_descendent_of) = test_fork_tree(); + + let removed = tree.prune(&"C", &3, &is_descendent_of, &|_| true).unwrap(); + + assert_eq!( + tree.roots.iter().map(|node| node.hash).collect::>(), + vec!["B"], + ); + + assert_eq!( + tree.iter().map(|(hash, _, _)| *hash).collect::>(), + vec!["B", "C", "D", "E"], + ); + + assert_eq!( + removed.map(|(hash, _, _)| hash).collect::>(), + vec!["A", "F", "G", "H", "I", "L", "M", "O", "J", "K"] + ); + + let removed = tree.prune(&"E", &5, &is_descendent_of, &|_| true).unwrap(); + + assert_eq!( + tree.roots.iter().map(|node| node.hash).collect::>(), + vec!["D"], + ); + + assert_eq!( + tree.iter().map(|(hash, _, _)| *hash).collect::>(), + vec!["D", "E"], + ); + + assert_eq!( + removed.map(|(hash, _, _)| hash).collect::>(), + vec!["B", "C"] + ); + } + + #[test] + fn find_node_backtracks_after_finding_highest_descending_node() { + let mut tree = ForkTree::new(); + + // + // A - B + // \ + // — C + // + let is_descendent_of = |base: &&str, block: &&str| -> Result { + match (*base, *block) { + ("A", b) => Ok(b == "B" || b == "C" || b == "D"), + ("B", b) | ("C", b) => Ok(b == "D"), + ("0", _) => Ok(true), + _ => Ok(false), + } + }; + + tree.import("A", 1, 1, &is_descendent_of).unwrap(); + tree.import("B", 2, 2, &is_descendent_of).unwrap(); + tree.import("C", 2, 4, &is_descendent_of).unwrap(); + + // when searching the tree we reach node `C`, but the + // predicate doesn't pass. we should backtrack to `B`, but not to `A`, + // since "B" fulfills the predicate. + let node = tree + .find_node_where(&"D", &3, &is_descendent_of, &|data| *data < 3) + .unwrap(); + + assert_eq!(node.unwrap().hash, "B"); + } + + #[test] + fn tree_rebalance() { + let (mut tree, _) = test_fork_tree(); + + assert_eq!( + tree.iter().map(|(h, _, _)| *h).collect::>(), + vec!["A", "B", "C", "D", "E", "F", "G", "H", "I", "L", "M", "O", "J", "K"], + ); + + // after rebalancing the tree we should iterate in preorder exploring + // the longest forks first. check the ascii art above to understand the + // expected output below. + tree.rebalance(); + + assert_eq!( + tree.iter().map(|(h, _, _)| *h).collect::>(), + ["A", "B", "C", "D", "E", "F", "H", "L", "M", "O", "I", "G", "J", "K"] + ); + } } diff --git a/utils/frame/benchmarking-cli/src/command.rs b/utils/frame/benchmarking-cli/src/command.rs index 5e35d57cda..e9f4394beb 100644 --- a/utils/frame/benchmarking-cli/src/command.rs +++ b/utils/frame/benchmarking-cli/src/command.rs @@ -17,72 +17,73 @@ use crate::BenchmarkCmd; use codec::{Decode, Encode}; use frame_benchmarking::{Analysis, BenchmarkBatch}; -use sc_cli::{SharedParams, CliConfiguration, ExecutionStrategy, Result}; +use sc_cli::{CliConfiguration, ExecutionStrategy, Result, SharedParams}; use sc_client::StateMachine; use sc_client_db::BenchmarkingState; use sc_executor::NativeExecutor; -use sp_externalities::Extensions; use sc_service::{Configuration, NativeExecutionDispatch}; -use sp_runtime::{ - traits::{Block as BlockT, Header as HeaderT, NumberFor}, -}; use sp_core::{tasks, testing::KeyStore, traits::KeystoreExt}; +use sp_externalities::Extensions; +use sp_runtime::traits::{Block as BlockT, Header as HeaderT, NumberFor}; use std::fmt::Debug; impl BenchmarkCmd { - /// Runs the command and benchmarks the chain. - pub fn run(&self, config: Configuration) -> Result<()> - where - BB: BlockT + Debug, - <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, - ::Hash: std::str::FromStr, - ExecDispatch: NativeExecutionDispatch + 'static, - { - let spec = config.chain_spec; - let wasm_method = self.wasm_method.into(); - let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); - - let genesis_storage = spec.build_storage()?; - let mut changes = Default::default(); - let cache_size = Some(self.database_cache_size as usize); - let state = BenchmarkingState::::new(genesis_storage, cache_size)?; - let executor = NativeExecutor::::new( - wasm_method, - None, // heap pages - 2, // The runtime instances cache size. - ); - - let mut extensions = Extensions::default(); - extensions.register(KeystoreExt(KeyStore::new())); - - let result = StateMachine::<_, _, NumberFor, _>::new( - &state, - None, - &mut changes, - &executor, - "Benchmark_dispatch_benchmark", - &( - &self.pallet, - &self.extrinsic, - self.lowest_range_values.clone(), - self.highest_range_values.clone(), - self.steps.clone(), - self.repeat, - ).encode(), - extensions, - &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, - tasks::executor(), - ) - .execute(strategy.into()) - .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; - - let results = , String> as Decode>::decode(&mut &result[..]) - .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?; - - match results { - Ok(batches) => for batch in batches.into_iter() { - // Print benchmark metadata - println!( + /// Runs the command and benchmarks the chain. + pub fn run(&self, config: Configuration) -> Result<()> + where + BB: BlockT + Debug, + <<::Header as HeaderT>::Number as std::str::FromStr>::Err: std::fmt::Debug, + ::Hash: std::str::FromStr, + ExecDispatch: NativeExecutionDispatch + 'static, + { + let spec = config.chain_spec; + let wasm_method = self.wasm_method.into(); + let strategy = self.execution.unwrap_or(ExecutionStrategy::Native); + + let genesis_storage = spec.build_storage()?; + let mut changes = Default::default(); + let cache_size = Some(self.database_cache_size as usize); + let state = BenchmarkingState::::new(genesis_storage, cache_size)?; + let executor = NativeExecutor::::new( + wasm_method, + None, // heap pages + 2, // The runtime instances cache size. + ); + + let mut extensions = Extensions::default(); + extensions.register(KeystoreExt(KeyStore::new())); + + let result = StateMachine::<_, _, NumberFor, _>::new( + &state, + None, + &mut changes, + &executor, + "Benchmark_dispatch_benchmark", + &( + &self.pallet, + &self.extrinsic, + self.lowest_range_values.clone(), + self.highest_range_values.clone(), + self.steps.clone(), + self.repeat, + ) + .encode(), + extensions, + &sp_state_machine::backend::BackendRuntimeCode::new(&state).runtime_code()?, + tasks::executor(), + ) + .execute(strategy.into()) + .map_err(|e| format!("Error executing runtime benchmark: {:?}", e))?; + + let results = + , String> as Decode>::decode(&mut &result[..]) + .map_err(|e| format!("Failed to decode benchmark results: {:?}", e))?; + + match results { + Ok(batches) => { + for batch in batches.into_iter() { + // Print benchmark metadata + println!( "Pallet: {:?}, Extrinsic: {:?}, Lowest values: {:?}, Highest values: {:?}, Steps: {:?}, Repeat: {:?}", String::from_utf8(batch.pallet).expect("Encoded from String; qed"), String::from_utf8(batch.benchmark).expect("Encoded from String; qed"), @@ -92,50 +93,54 @@ impl BenchmarkCmd { self.repeat, ); - if self.raw_data { - // Print the table header - batch.results[0].0.iter().for_each(|param| print!("{:?},", param.0)); - - print!("extrinsic_time,storage_root_time\n"); - // Print the values - batch.results.iter().for_each(|result| { - let parameters = &result.0; - parameters.iter().for_each(|param| print!("{:?},", param.1)); - // Print extrinsic time and storage root time - print!("{:?},{:?}\n", result.1, result.2); - }); - - println!(); - } - - // Conduct analysis. - if !self.no_median_slopes { - if let Some(analysis) = Analysis::median_slopes(&batch.results) { - println!("Median Slopes Analysis\n========\n{}", analysis); - } - } - if !self.no_min_squares { - if let Some(analysis) = Analysis::min_squares_iqr(&batch.results) { - println!("Min Squares Analysis\n========\n{}", analysis); - } - } - }, - Err(error) => eprintln!("Error: {:?}", error), - } - - Ok(()) - } + if self.raw_data { + // Print the table header + batch.results[0] + .0 + .iter() + .for_each(|param| print!("{:?},", param.0)); + + print!("extrinsic_time,storage_root_time\n"); + // Print the values + batch.results.iter().for_each(|result| { + let parameters = &result.0; + parameters.iter().for_each(|param| print!("{:?},", param.1)); + // Print extrinsic time and storage root time + print!("{:?},{:?}\n", result.1, result.2); + }); + + println!(); + } + + // Conduct analysis. + if !self.no_median_slopes { + if let Some(analysis) = Analysis::median_slopes(&batch.results) { + println!("Median Slopes Analysis\n========\n{}", analysis); + } + } + if !self.no_min_squares { + if let Some(analysis) = Analysis::min_squares_iqr(&batch.results) { + println!("Min Squares Analysis\n========\n{}", analysis); + } + } + } + } + Err(error) => eprintln!("Error: {:?}", error), + } + + Ok(()) + } } impl CliConfiguration for BenchmarkCmd { - fn shared_params(&self) -> &SharedParams { - &self.shared_params - } - - fn chain_id(&self, _is_dev: bool) -> Result { - Ok(match self.shared_params.chain { - Some(ref chain) => chain.clone(), - None => "dev".into(), - }) - } + fn shared_params(&self) -> &SharedParams { + &self.shared_params + } + + fn chain_id(&self, _is_dev: bool) -> Result { + Ok(match self.shared_params.chain { + Some(ref chain) => chain.clone(), + None => "dev".into(), + }) + } } diff --git a/utils/frame/benchmarking-cli/src/lib.rs b/utils/frame/benchmarking-cli/src/lib.rs index 96204d1ae5..a7e0ed00e3 100644 --- a/utils/frame/benchmarking-cli/src/lib.rs +++ b/utils/frame/benchmarking-cli/src/lib.rs @@ -22,66 +22,66 @@ use std::fmt::Debug; /// The `benchmark` command used to benchmark FRAME Pallets. #[derive(Debug, structopt::StructOpt, Clone)] pub struct BenchmarkCmd { - /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). - #[structopt(short, long)] - pub pallet: String, + /// Select a FRAME Pallet to benchmark, or `*` for all (in which case `extrinsic` must be `*`). + #[structopt(short, long)] + pub pallet: String, - /// Select an extrinsic inside the pallet to benchmark, or `*` for all. - #[structopt(short, long)] - pub extrinsic: String, + /// Select an extrinsic inside the pallet to benchmark, or `*` for all. + #[structopt(short, long)] + pub extrinsic: String, - /// Select how many samples we should take across the variable components. - #[structopt(short, long, use_delimiter = true)] - pub steps: Vec, + /// Select how many samples we should take across the variable components. + #[structopt(short, long, use_delimiter = true)] + pub steps: Vec, - /// Indicates lowest values for each of the component ranges. - #[structopt(long = "low", use_delimiter = true)] - pub lowest_range_values: Vec, + /// Indicates lowest values for each of the component ranges. + #[structopt(long = "low", use_delimiter = true)] + pub lowest_range_values: Vec, - /// Indicates highest values for each of the component ranges. - #[structopt(long = "high", use_delimiter = true)] - pub highest_range_values: Vec, + /// Indicates highest values for each of the component ranges. + #[structopt(long = "high", use_delimiter = true)] + pub highest_range_values: Vec, - /// Select how many repetitions of this benchmark should run. - #[structopt(short, long, default_value = "1")] - pub repeat: u32, + /// Select how many repetitions of this benchmark should run. + #[structopt(short, long, default_value = "1")] + pub repeat: u32, - /// Print the raw results. - #[structopt(long = "raw")] - pub raw_data: bool, + /// Print the raw results. + #[structopt(long = "raw")] + pub raw_data: bool, - /// Don't print the median-slopes linear regression analysis. - #[structopt(long)] - pub no_median_slopes: bool, + /// Don't print the median-slopes linear regression analysis. + #[structopt(long)] + pub no_median_slopes: bool, - /// Don't print the min-squares linear regression analysis. - #[structopt(long)] - pub no_min_squares: bool, + /// Don't print the min-squares linear regression analysis. + #[structopt(long)] + pub no_min_squares: bool, - #[allow(missing_docs)] - #[structopt(flatten)] - pub shared_params: sc_cli::SharedParams, + #[allow(missing_docs)] + #[structopt(flatten)] + pub shared_params: sc_cli::SharedParams, - /// The execution strategy that should be used for benchmarks - #[structopt( + /// The execution strategy that should be used for benchmarks + #[structopt( long = "execution", value_name = "STRATEGY", possible_values = &ExecutionStrategy::variants(), case_insensitive = true, )] - pub execution: Option, + pub execution: Option, - /// Method for executing Wasm runtime code. - #[structopt( + /// Method for executing Wasm runtime code. + #[structopt( long = "wasm-execution", value_name = "METHOD", possible_values = &WasmExecutionMethod::enabled_variants(), case_insensitive = true, default_value = "Interpreted" )] - pub wasm_method: WasmExecutionMethod, + pub wasm_method: WasmExecutionMethod, - /// Limit the memory the database cache can use. - #[structopt(long = "db-cache", value_name = "MiB", default_value = "128")] - pub database_cache_size: u32, + /// Limit the memory the database cache can use. + #[structopt(long = "db-cache", value_name = "MiB", default_value = "128")] + pub database_cache_size: u32, } diff --git a/utils/frame/rpc/support/src/lib.rs b/utils/frame/rpc/support/src/lib.rs index 118f5709a6..6b6a9aa128 100644 --- a/utils/frame/rpc/support/src/lib.rs +++ b/utils/frame/rpc/support/src/lib.rs @@ -19,16 +19,14 @@ #![warn(missing_docs)] +use codec::{DecodeAll, FullCodec, FullEncode}; use core::marker::PhantomData; +use frame_support::storage::generator::{StorageDoubleMap, StorageMap, StorageValue}; use futures::compat::Future01CompatExt; use jsonrpc_client_transports::RpcError; -use codec::{DecodeAll, FullCodec, FullEncode}; +use sc_rpc_api::state::StateClient; use serde::{de::DeserializeOwned, Serialize}; -use frame_support::storage::generator::{ - StorageDoubleMap, StorageMap, StorageValue -}; use sp_storage::{StorageData, StorageKey}; -use sc_rpc_api::state::StateClient; /// A typed query on chain state usable from an RPC client. /// @@ -90,56 +88,56 @@ use sc_rpc_api::state::StateClient; /// ``` #[derive(Clone, Eq, PartialEq, Ord, PartialOrd, Hash, Debug)] pub struct StorageQuery { - key: StorageKey, - _spook: PhantomData, + key: StorageKey, + _spook: PhantomData, } impl StorageQuery { - /// Create a storage query for a StorageValue. - pub fn value>() -> Self { - Self { - key: StorageKey(St::storage_value_final_key().to_vec()), - _spook: PhantomData, - } - } + /// Create a storage query for a StorageValue. + pub fn value>() -> Self { + Self { + key: StorageKey(St::storage_value_final_key().to_vec()), + _spook: PhantomData, + } + } - /// Create a storage query for a value in a StorageMap. - pub fn map, K: FullEncode>(key: K) -> Self { - Self { - key: StorageKey(St::storage_map_final_key(key)), - _spook: PhantomData, - } - } + /// Create a storage query for a value in a StorageMap. + pub fn map, K: FullEncode>(key: K) -> Self { + Self { + key: StorageKey(St::storage_map_final_key(key)), + _spook: PhantomData, + } + } - /// Create a storage query for a value in a StorageDoubleMap. - pub fn double_map, K1: FullEncode, K2: FullEncode>( - key1: K1, - key2: K2, - ) -> Self { - Self { - key: StorageKey(St::storage_double_map_final_key(key1, key2)), - _spook: PhantomData, - } - } + /// Create a storage query for a value in a StorageDoubleMap. + pub fn double_map, K1: FullEncode, K2: FullEncode>( + key1: K1, + key2: K2, + ) -> Self { + Self { + key: StorageKey(St::storage_double_map_final_key(key1, key2)), + _spook: PhantomData, + } + } - /// Send this query over RPC, await the typed result. - /// - /// Hash should be ::Hash. - /// - /// # Arguments - /// - /// state_client represents a connection to the RPC server. - /// - /// block_index indicates the block for which state will be queried. A value of None indicates - /// the latest block. - pub async fn get( - self, - state_client: &StateClient, - block_index: Option, - ) -> Result, RpcError> { - let opt: Option = state_client.storage(self.key, block_index).compat().await?; - opt.map(|encoded| V::decode_all(&encoded.0)) - .transpose() - .map_err(|decode_err| RpcError::Other(decode_err.into())) - } + /// Send this query over RPC, await the typed result. + /// + /// Hash should be ::Hash. + /// + /// # Arguments + /// + /// state_client represents a connection to the RPC server. + /// + /// block_index indicates the block for which state will be queried. A value of None indicates + /// the latest block. + pub async fn get( + self, + state_client: &StateClient, + block_index: Option, + ) -> Result, RpcError> { + let opt: Option = state_client.storage(self.key, block_index).compat().await?; + opt.map(|encoded| V::decode_all(&encoded.0)) + .transpose() + .map_err(|decode_err| RpcError::Other(decode_err.into())) + } } diff --git a/utils/frame/rpc/system/src/lib.rs b/utils/frame/rpc/system/src/lib.rs index 4838e8e8f4..d960bd2948 100644 --- a/utils/frame/rpc/system/src/lib.rs +++ b/utils/frame/rpc/system/src/lib.rs @@ -19,29 +19,23 @@ use std::sync::Arc; use codec::{self, Codec, Decode, Encode}; -use sc_client::{ - light::blockchain::{future_header, RemoteBlockchain}, - light::fetcher::{Fetcher, RemoteCallRequest}, -}; +use futures::future::{ready, TryFutureExt}; use jsonrpc_core::{ - Error, ErrorCode, - futures::future::{result, Future}, + futures::future::{result, Future}, + Error, ErrorCode, }; use jsonrpc_derive::rpc; -use futures::future::{ready, TryFutureExt}; -use sp_blockchain::{ - HeaderBackend, - Error as ClientError -}; -use sp_runtime::{ - generic::BlockId, - traits, +use sc_client::{ + light::blockchain::{future_header, RemoteBlockchain}, + light::fetcher::{Fetcher, RemoteCallRequest}, }; +use sp_blockchain::{Error as ClientError, HeaderBackend}; use sp_core::hexdisplay::HexDisplay; -use sp_transaction_pool::{TransactionPool, InPoolTransaction}; +use sp_runtime::{generic::BlockId, traits}; +use sp_transaction_pool::{InPoolTransaction, TransactionPool}; -pub use frame_system_rpc_runtime_api::AccountNonceApi; pub use self::gen_client::Client as SystemClient; +pub use frame_system_rpc_runtime_api::AccountNonceApi; /// Future that resolves to account nonce. pub type FutureResult = Box + Send>; @@ -49,222 +43,220 @@ pub type FutureResult = Box + Send>; /// System RPC methods. #[rpc] pub trait SystemApi { - /// Returns the next valid index (aka nonce) for given account. - /// - /// This method takes into consideration all pending transactions - /// currently in the pool and if no transactions are found in the pool - /// it fallbacks to query the index from the runtime (aka. state nonce). - #[rpc(name = "system_accountNextIndex", alias("account_nextIndex"))] - fn nonce(&self, account: AccountId) -> FutureResult; + /// Returns the next valid index (aka nonce) for given account. + /// + /// This method takes into consideration all pending transactions + /// currently in the pool and if no transactions are found in the pool + /// it fallbacks to query the index from the runtime (aka. state nonce). + #[rpc(name = "system_accountNextIndex", alias("account_nextIndex"))] + fn nonce(&self, account: AccountId) -> FutureResult; } const RUNTIME_ERROR: i64 = 1; /// An implementation of System-specific RPC methods on full client. pub struct FullSystem { - client: Arc, - pool: Arc

, - _marker: std::marker::PhantomData, + client: Arc, + pool: Arc

, + _marker: std::marker::PhantomData, } impl FullSystem { - /// Create new `FullSystem` given client and transaction pool. - pub fn new(client: Arc, pool: Arc

) -> Self { - FullSystem { - client, - pool, - _marker: Default::default(), - } - } + /// Create new `FullSystem` given client and transaction pool. + pub fn new(client: Arc, pool: Arc

) -> Self { + FullSystem { + client, + pool, + _marker: Default::default(), + } + } } impl SystemApi for FullSystem where - C: sp_api::ProvideRuntimeApi, - C: HeaderBackend, - C: Send + Sync + 'static, - C::Api: AccountNonceApi, - P: TransactionPool + 'static, - Block: traits::Block, - AccountId: Clone + std::fmt::Display + Codec, - Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, + C: sp_api::ProvideRuntimeApi, + C: HeaderBackend, + C: Send + Sync + 'static, + C::Api: AccountNonceApi, + P: TransactionPool + 'static, + Block: traits::Block, + AccountId: Clone + std::fmt::Display + Codec, + Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, { - fn nonce(&self, account: AccountId) -> FutureResult { - let get_nonce = || { - let api = self.client.runtime_api(); - let best = self.client.info().best_hash; - let at = BlockId::hash(best); + fn nonce(&self, account: AccountId) -> FutureResult { + let get_nonce = || { + let api = self.client.runtime_api(); + let best = self.client.info().best_hash; + let at = BlockId::hash(best); - let nonce = api.account_nonce(&at, account.clone()).map_err(|e| Error { - code: ErrorCode::ServerError(RUNTIME_ERROR), - message: "Unable to query nonce.".into(), - data: Some(format!("{:?}", e).into()), - })?; + let nonce = api.account_nonce(&at, account.clone()).map_err(|e| Error { + code: ErrorCode::ServerError(RUNTIME_ERROR), + message: "Unable to query nonce.".into(), + data: Some(format!("{:?}", e).into()), + })?; - Ok(adjust_nonce(&*self.pool, account, nonce)) - }; + Ok(adjust_nonce(&*self.pool, account, nonce)) + }; - Box::new(result(get_nonce())) - } + Box::new(result(get_nonce())) + } } /// An implementation of System-specific RPC methods on light client. pub struct LightSystem { - client: Arc, - remote_blockchain: Arc>, - fetcher: Arc, - pool: Arc

, + client: Arc, + remote_blockchain: Arc>, + fetcher: Arc, + pool: Arc

, } impl LightSystem { - /// Create new `LightSystem`. - pub fn new( - client: Arc, - remote_blockchain: Arc>, - fetcher: Arc, - pool: Arc

, - ) -> Self { - LightSystem { - client, - remote_blockchain, - fetcher, - pool, - } - } + /// Create new `LightSystem`. + pub fn new( + client: Arc, + remote_blockchain: Arc>, + fetcher: Arc, + pool: Arc

, + ) -> Self { + LightSystem { + client, + remote_blockchain, + fetcher, + pool, + } + } } impl SystemApi for LightSystem where - P: TransactionPool + 'static, - C: HeaderBackend, - C: Send + Sync + 'static, - F: Fetcher + 'static, - Block: traits::Block, - AccountId: Clone + std::fmt::Display + Codec + Send + 'static, - Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, + P: TransactionPool + 'static, + C: HeaderBackend, + C: Send + Sync + 'static, + F: Fetcher + 'static, + Block: traits::Block, + AccountId: Clone + std::fmt::Display + Codec + Send + 'static, + Index: Clone + std::fmt::Display + Codec + Send + traits::AtLeast32Bit + 'static, { - fn nonce(&self, account: AccountId) -> FutureResult { - let best_hash = self.client.info().best_hash; - let best_id = BlockId::hash(best_hash); - let future_best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id); - let fetcher = self.fetcher.clone(); - let call_data = account.encode(); - let future_best_header = future_best_header - .and_then(move |maybe_best_header| ready( - match maybe_best_header { - Some(best_header) => Ok(best_header), - None => Err(ClientError::UnknownBlock(format!("{}", best_hash))), - } - )); - let future_nonce = future_best_header.and_then(move |best_header| - fetcher.remote_call(RemoteCallRequest { - block: best_hash, - header: best_header, - method: "AccountNonceApi_account_nonce".into(), - call_data, - retry_count: None, - }) - ).compat(); - let future_nonce = future_nonce.and_then(|nonce| Decode::decode(&mut &nonce[..]) - .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e))); - let future_nonce = future_nonce.map_err(|e| Error { - code: ErrorCode::ServerError(RUNTIME_ERROR), - message: "Unable to query nonce.".into(), - data: Some(format!("{:?}", e).into()), - }); + fn nonce(&self, account: AccountId) -> FutureResult { + let best_hash = self.client.info().best_hash; + let best_id = BlockId::hash(best_hash); + let future_best_header = future_header(&*self.remote_blockchain, &*self.fetcher, best_id); + let fetcher = self.fetcher.clone(); + let call_data = account.encode(); + let future_best_header = future_best_header.and_then(move |maybe_best_header| { + ready(match maybe_best_header { + Some(best_header) => Ok(best_header), + None => Err(ClientError::UnknownBlock(format!("{}", best_hash))), + }) + }); + let future_nonce = future_best_header + .and_then(move |best_header| { + fetcher.remote_call(RemoteCallRequest { + block: best_hash, + header: best_header, + method: "AccountNonceApi_account_nonce".into(), + call_data, + retry_count: None, + }) + }) + .compat(); + let future_nonce = future_nonce.and_then(|nonce| { + Decode::decode(&mut &nonce[..]) + .map_err(|e| ClientError::CallResultDecode("Cannot decode account nonce", e)) + }); + let future_nonce = future_nonce.map_err(|e| Error { + code: ErrorCode::ServerError(RUNTIME_ERROR), + message: "Unable to query nonce.".into(), + data: Some(format!("{:?}", e).into()), + }); - let pool = self.pool.clone(); - let future_nonce = future_nonce.map(move |nonce| adjust_nonce(&*pool, account, nonce)); + let pool = self.pool.clone(); + let future_nonce = future_nonce.map(move |nonce| adjust_nonce(&*pool, account, nonce)); - Box::new(future_nonce) - } + Box::new(future_nonce) + } } /// Adjust account nonce from state, so that tx with the nonce will be /// placed after all ready txpool transactions. -fn adjust_nonce( - pool: &P, - account: AccountId, - nonce: Index, -) -> Index where - P: TransactionPool, - AccountId: Clone + std::fmt::Display + Encode, - Index: Clone + std::fmt::Display + Encode + traits::AtLeast32Bit + 'static, +fn adjust_nonce(pool: &P, account: AccountId, nonce: Index) -> Index +where + P: TransactionPool, + AccountId: Clone + std::fmt::Display + Encode, + Index: Clone + std::fmt::Display + Encode + traits::AtLeast32Bit + 'static, { - log::debug!(target: "rpc", "State nonce for {}: {}", account, nonce); - // Now we need to query the transaction pool - // and find transactions originating from the same sender. - // - // Since extrinsics are opaque to us, we look for them using - // `provides` tag. And increment the nonce if we find a transaction - // that matches the current one. - let mut current_nonce = nonce.clone(); - let mut current_tag = (account.clone(), nonce.clone()).encode(); - for tx in pool.ready() { - log::debug!( - target: "rpc", - "Current nonce to {}, checking {} vs {:?}", - current_nonce, - HexDisplay::from(¤t_tag), - tx.provides().iter().map(|x| format!("{}", HexDisplay::from(x))).collect::>(), - ); - // since transactions in `ready()` need to be ordered by nonce - // it's fine to continue with current iterator. - if tx.provides().get(0) == Some(¤t_tag) { - current_nonce += traits::One::one(); - current_tag = (account.clone(), current_nonce.clone()).encode(); - } - } + log::debug!(target: "rpc", "State nonce for {}: {}", account, nonce); + // Now we need to query the transaction pool + // and find transactions originating from the same sender. + // + // Since extrinsics are opaque to us, we look for them using + // `provides` tag. And increment the nonce if we find a transaction + // that matches the current one. + let mut current_nonce = nonce.clone(); + let mut current_tag = (account.clone(), nonce.clone()).encode(); + for tx in pool.ready() { + log::debug!( + target: "rpc", + "Current nonce to {}, checking {} vs {:?}", + current_nonce, + HexDisplay::from(¤t_tag), + tx.provides().iter().map(|x| format!("{}", HexDisplay::from(x))).collect::>(), + ); + // since transactions in `ready()` need to be ordered by nonce + // it's fine to continue with current iterator. + if tx.provides().get(0) == Some(¤t_tag) { + current_nonce += traits::One::one(); + current_tag = (account.clone(), current_nonce.clone()).encode(); + } + } - current_nonce + current_nonce } #[cfg(test)] mod tests { - use super::*; + use super::*; - use futures::executor::block_on; - use substrate_test_runtime_client::{ - runtime::Transfer, - AccountKeyring, - }; - use sc_transaction_pool::{BasicPool, FullChainApi}; + use futures::executor::block_on; + use sc_transaction_pool::{BasicPool, FullChainApi}; + use substrate_test_runtime_client::{runtime::Transfer, AccountKeyring}; - #[test] - fn should_return_next_nonce_for_some_account() { - // given - let _ = env_logger::try_init(); - let client = Arc::new(substrate_test_runtime_client::new()); - let pool = Arc::new( - BasicPool::new( - Default::default(), - Arc::new(FullChainApi::new(client.clone())), - None, - ).0 - ); + #[test] + fn should_return_next_nonce_for_some_account() { + // given + let _ = env_logger::try_init(); + let client = Arc::new(substrate_test_runtime_client::new()); + let pool = Arc::new( + BasicPool::new( + Default::default(), + Arc::new(FullChainApi::new(client.clone())), + None, + ) + .0, + ); - let source = sp_runtime::transaction_validity::TransactionSource::External; - let new_transaction = |nonce: u64| { - let t = Transfer { - from: AccountKeyring::Alice.into(), - to: AccountKeyring::Bob.into(), - amount: 5, - nonce, - }; - t.into_signed_tx() - }; - // Populate the pool - let ext0 = new_transaction(0); - block_on(pool.submit_one(&BlockId::number(0), source, ext0)).unwrap(); - let ext1 = new_transaction(1); - block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); + let source = sp_runtime::transaction_validity::TransactionSource::External; + let new_transaction = |nonce: u64| { + let t = Transfer { + from: AccountKeyring::Alice.into(), + to: AccountKeyring::Bob.into(), + amount: 5, + nonce, + }; + t.into_signed_tx() + }; + // Populate the pool + let ext0 = new_transaction(0); + block_on(pool.submit_one(&BlockId::number(0), source, ext0)).unwrap(); + let ext1 = new_transaction(1); + block_on(pool.submit_one(&BlockId::number(0), source, ext1)).unwrap(); - let accounts = FullSystem::new(client, pool); + let accounts = FullSystem::new(client, pool); - // when - let nonce = accounts.nonce(AccountKeyring::Alice.into()); + // when + let nonce = accounts.nonce(AccountKeyring::Alice.into()); - // then - assert_eq!(nonce.wait().unwrap(), 2); - } + // then + assert_eq!(nonce.wait().unwrap(), 2); + } } diff --git a/utils/prometheus/src/lib.rs b/utils/prometheus/src/lib.rs index 9030704cb7..f19ee5ac2c 100644 --- a/utils/prometheus/src/lib.rs +++ b/utils/prometheus/src/lib.rs @@ -14,134 +14,145 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use futures_util::{FutureExt, future::Future}; +use futures_util::{future::Future, FutureExt}; pub use prometheus::{ - self, - Registry, Error as PrometheusError, Opts, - Histogram, HistogramOpts, HistogramVec, - exponential_buckets, - core::{ - GenericGauge as Gauge, GenericCounter as Counter, - GenericGaugeVec as GaugeVec, GenericCounterVec as CounterVec, - AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64, - } + self, + core::{ + AtomicF64 as F64, AtomicI64 as I64, AtomicU64 as U64, GenericCounter as Counter, + GenericCounterVec as CounterVec, GenericGauge as Gauge, GenericGaugeVec as GaugeVec, + }, + exponential_buckets, Error as PrometheusError, Histogram, HistogramOpts, HistogramVec, Opts, + Registry, }; -use prometheus::{Encoder, TextEncoder, core::Collector}; +use prometheus::{core::Collector, Encoder, TextEncoder}; use std::net::SocketAddr; #[cfg(not(target_os = "unknown"))] mod networking; -#[cfg(target_os = "unknown")] -pub use unknown_os::init_prometheus; #[cfg(not(target_os = "unknown"))] pub use known_os::init_prometheus; +#[cfg(target_os = "unknown")] +pub use unknown_os::init_prometheus; -pub fn register(metric: T, registry: &Registry) -> Result { - registry.register(Box::new(metric.clone()))?; - Ok(metric) +pub fn register( + metric: T, + registry: &Registry, +) -> Result { + registry.register(Box::new(metric.clone()))?; + Ok(metric) } // On WASM `init_prometheus` becomes a no-op. #[cfg(target_os = "unknown")] mod unknown_os { - use super::*; + use super::*; - pub enum Error {} + pub enum Error {} - pub async fn init_prometheus(_: SocketAddr, _registry: Registry) -> Result<(), Error> { - Ok(()) - } + pub async fn init_prometheus(_: SocketAddr, _registry: Registry) -> Result<(), Error> { + Ok(()) + } } #[cfg(not(target_os = "unknown"))] mod known_os { - use super::*; - use hyper::http::StatusCode; - use hyper::{Server, Body, Request, Response, service::{service_fn, make_service_fn}}; - - #[derive(Debug, derive_more::Display, derive_more::From)] - pub enum Error { - /// Hyper internal error. - Hyper(hyper::Error), - /// Http request error. - Http(hyper::http::Error), - /// i/o error. - Io(std::io::Error), - #[display(fmt = "Prometheus port {} already in use.", _0)] - PortInUse(SocketAddr) - } - - impl std::error::Error for Error { - fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { - match self { - Error::Hyper(error) => Some(error), - Error::Http(error) => Some(error), - Error::Io(error) => Some(error), - Error::PortInUse(_) => None - } - } - } - - async fn request_metrics(req: Request, registry: Registry) -> Result, Error> { - if req.uri().path() == "/metrics" { - let metric_families = registry.gather(); - let mut buffer = vec![]; - let encoder = TextEncoder::new(); - encoder.encode(&metric_families, &mut buffer).unwrap(); - - Response::builder().status(StatusCode::OK) - .header("Content-Type", encoder.format_type()) - .body(Body::from(buffer)) - .map_err(Error::Http) - } else { - Response::builder().status(StatusCode::NOT_FOUND) - .body(Body::from("Not found.")) - .map_err(Error::Http) - } - - } - - #[derive(Clone)] - pub struct Executor; - - impl hyper::rt::Executor for Executor - where - T: Future + Send + 'static, - T::Output: Send + 'static, - { - fn execute(&self, future: T) { - async_std::task::spawn(future); - } - } - - /// Initializes the metrics context, and starts an HTTP server - /// to serve metrics. - pub async fn init_prometheus(prometheus_addr: SocketAddr, registry: Registry) -> Result<(), Error>{ - use networking::Incoming; - let listener = async_std::net::TcpListener::bind(&prometheus_addr) - .await - .map_err(|_| Error::PortInUse(prometheus_addr))?; - - log::info!("〽️ Prometheus server started at {}", prometheus_addr); - - let service = make_service_fn(move |_| { - let registry = registry.clone(); - - async move { - Ok::<_, hyper::Error>(service_fn(move |req: Request| { - request_metrics(req, registry.clone()) - })) - } - }); - - let server = Server::builder(Incoming(listener.incoming())) - .executor(Executor) - .serve(service) - .boxed(); - - let result = server.await.map_err(Into::into); - - result - } + use super::*; + use hyper::http::StatusCode; + use hyper::{ + service::{make_service_fn, service_fn}, + Body, Request, Response, Server, + }; + + #[derive(Debug, derive_more::Display, derive_more::From)] + pub enum Error { + /// Hyper internal error. + Hyper(hyper::Error), + /// Http request error. + Http(hyper::http::Error), + /// i/o error. + Io(std::io::Error), + #[display(fmt = "Prometheus port {} already in use.", _0)] + PortInUse(SocketAddr), + } + + impl std::error::Error for Error { + fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { + match self { + Error::Hyper(error) => Some(error), + Error::Http(error) => Some(error), + Error::Io(error) => Some(error), + Error::PortInUse(_) => None, + } + } + } + + async fn request_metrics( + req: Request, + registry: Registry, + ) -> Result, Error> { + if req.uri().path() == "/metrics" { + let metric_families = registry.gather(); + let mut buffer = vec![]; + let encoder = TextEncoder::new(); + encoder.encode(&metric_families, &mut buffer).unwrap(); + + Response::builder() + .status(StatusCode::OK) + .header("Content-Type", encoder.format_type()) + .body(Body::from(buffer)) + .map_err(Error::Http) + } else { + Response::builder() + .status(StatusCode::NOT_FOUND) + .body(Body::from("Not found.")) + .map_err(Error::Http) + } + } + + #[derive(Clone)] + pub struct Executor; + + impl hyper::rt::Executor for Executor + where + T: Future + Send + 'static, + T::Output: Send + 'static, + { + fn execute(&self, future: T) { + async_std::task::spawn(future); + } + } + + /// Initializes the metrics context, and starts an HTTP server + /// to serve metrics. + pub async fn init_prometheus( + prometheus_addr: SocketAddr, + registry: Registry, + ) -> Result<(), Error> { + use networking::Incoming; + let listener = async_std::net::TcpListener::bind(&prometheus_addr) + .await + .map_err(|_| Error::PortInUse(prometheus_addr))?; + + log::info!("〽️ Prometheus server started at {}", prometheus_addr); + + let service = make_service_fn(move |_| { + let registry = registry.clone(); + + async move { + Ok::<_, hyper::Error>(service_fn(move |req: Request| { + request_metrics(req, registry.clone()) + })) + } + }); + + let server = Server::builder(Incoming(listener.incoming())) + .executor(Executor) + .serve(service) + .boxed(); + + let result = server.await.map_err(Into::into); + + result + } } diff --git a/utils/prometheus/src/networking.rs b/utils/prometheus/src/networking.rs index 5c8c036d44..25db1a2d1f 100644 --- a/utils/prometheus/src/networking.rs +++ b/utils/prometheus/src/networking.rs @@ -15,52 +15,54 @@ // along with Substrate. If not, see . use async_std::pin::Pin; -use std::task::{Poll, Context}; -use futures_util::{stream::Stream, io::{AsyncRead, AsyncWrite}}; +use futures_util::{ + io::{AsyncRead, AsyncWrite}, + stream::Stream, +}; +use std::task::{Context, Poll}; pub struct Incoming<'a>(pub async_std::net::Incoming<'a>); impl hyper::server::accept::Accept for Incoming<'_> { - type Conn = TcpStream; - type Error = async_std::io::Error; + type Conn = TcpStream; + type Error = async_std::io::Error; - fn poll_accept(self: Pin<&mut Self>, cx: &mut Context) -> Poll>> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_next(cx) - .map(|opt| opt.map(|res| res.map(TcpStream))) - } + fn poll_accept( + self: Pin<&mut Self>, + cx: &mut Context, + ) -> Poll>> { + Pin::new(&mut Pin::into_inner(self).0) + .poll_next(cx) + .map(|opt| opt.map(|res| res.map(TcpStream))) + } } pub struct TcpStream(pub async_std::net::TcpStream); impl tokio::io::AsyncRead for TcpStream { - fn poll_read( - self: Pin<&mut Self>, - cx: &mut Context, - buf: &mut [u8] - ) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_read(cx, buf) - } + fn poll_read( + self: Pin<&mut Self>, + cx: &mut Context, + buf: &mut [u8], + ) -> Poll> { + Pin::new(&mut Pin::into_inner(self).0).poll_read(cx, buf) + } } impl tokio::io::AsyncWrite for TcpStream { - fn poll_write( - self: Pin<&mut Self>, - cx: &mut Context, - buf: &[u8] - ) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_write(cx, buf) - } + fn poll_write( + self: Pin<&mut Self>, + cx: &mut Context, + buf: &[u8], + ) -> Poll> { + Pin::new(&mut Pin::into_inner(self).0).poll_write(cx, buf) + } - fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_flush(cx) - } + fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + Pin::new(&mut Pin::into_inner(self).0).poll_flush(cx) + } - fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { - Pin::new(&mut Pin::into_inner(self).0) - .poll_close(cx) - } + fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll> { + Pin::new(&mut Pin::into_inner(self).0).poll_close(cx) + } } diff --git a/utils/wasm-builder-runner/src/lib.rs b/utils/wasm-builder-runner/src/lib.rs index 0e4b1421f8..b16a8ef7eb 100644 --- a/utils/wasm-builder-runner/src/lib.rs +++ b/utils/wasm-builder-runner/src/lib.rs @@ -26,8 +26,11 @@ //! For more information see use std::{ - env, process::{Command, self}, fs, path::{PathBuf, Path}, hash::{Hash, Hasher}, - collections::hash_map::DefaultHasher, + collections::hash_map::DefaultHasher, + env, fs, + hash::{Hash, Hasher}, + path::{Path, PathBuf}, + process::{self, Command}, }; /// Environment variable that tells us to skip building the WASM binary. @@ -47,115 +50,115 @@ const TRIGGER_WASM_BUILD_ENV: &str = "TRIGGER_WASM_BUILD"; /// Replace all backslashes with slashes. fn replace_back_slashes(path: T) -> String { - path.to_string().replace("\\", "/") + path.to_string().replace("\\", "/") } /// Returns the manifest dir from the `CARGO_MANIFEST_DIR` env. fn get_manifest_dir() -> PathBuf { - env::var("CARGO_MANIFEST_DIR") - .expect("`CARGO_MANIFEST_DIR` is always set for `build.rs` files; qed") - .into() + env::var("CARGO_MANIFEST_DIR") + .expect("`CARGO_MANIFEST_DIR` is always set for `build.rs` files; qed") + .into() } /// First step of the [`WasmBuilder`] to select the project to build. pub struct WasmBuilderSelectProject { - /// This parameter just exists to make it impossible to construct - /// this type outside of this crate. - _ignore: (), + /// This parameter just exists to make it impossible to construct + /// this type outside of this crate. + _ignore: (), } impl WasmBuilderSelectProject { - /// Use the current project as project for building the WASM binary. - /// - /// # Panics - /// - /// Panics if the `CARGO_MANIFEST_DIR` variable is not set. This variable - /// is always set by `Cargo` in `build.rs` files. - pub fn with_current_project(self) -> WasmBuilderSelectSource { - WasmBuilderSelectSource(get_manifest_dir().join("Cargo.toml")) - } - - /// Use the given `path` as project for building the WASM binary. - /// - /// Returns an error if the given `path` does not points to a `Cargo.toml`. - pub fn with_project( - self, - path: impl Into, - ) -> Result { - let path = path.into(); - - if path.ends_with("Cargo.toml") { - Ok(WasmBuilderSelectSource(path)) - } else { - Err("Project path must point to the `Cargo.toml` of the project") - } - } + /// Use the current project as project for building the WASM binary. + /// + /// # Panics + /// + /// Panics if the `CARGO_MANIFEST_DIR` variable is not set. This variable + /// is always set by `Cargo` in `build.rs` files. + pub fn with_current_project(self) -> WasmBuilderSelectSource { + WasmBuilderSelectSource(get_manifest_dir().join("Cargo.toml")) + } + + /// Use the given `path` as project for building the WASM binary. + /// + /// Returns an error if the given `path` does not points to a `Cargo.toml`. + pub fn with_project( + self, + path: impl Into, + ) -> Result { + let path = path.into(); + + if path.ends_with("Cargo.toml") { + Ok(WasmBuilderSelectSource(path)) + } else { + Err("Project path must point to the `Cargo.toml` of the project") + } + } } /// Second step of the [`WasmBuilder`] to set the source of the `wasm-builder`. pub struct WasmBuilderSelectSource(PathBuf); impl WasmBuilderSelectSource { - /// Use the given `path` as source for `wasm-builder`. - /// - /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for - /// `wasm-builder`. - pub fn with_wasm_builder_from_path(self, path: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Path(path), - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `repo` and `rev` as source for `wasm-builder`. - pub fn with_wasm_builder_from_git(self, repo: &'static str, rev: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Git { repo, rev }, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `version` to fetch `wasm-builder` source from crates.io. - pub fn with_wasm_builder_from_crates(self, version: &'static str) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::Crates(version), - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `version` to fetch `wasm-builder` source from crates.io or use - /// the given `path` as source. - /// - /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for - /// `wasm-builder`. - pub fn with_wasm_builder_from_crates_or_path( - self, - version: &'static str, - path: &'static str, - ) -> WasmBuilder { - WasmBuilder { - source: WasmBuilderSource::CratesOrPath { version, path }, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } - - /// Use the given `source` as source for `wasm-builder`. - pub fn with_wasm_builder_source(self, source: WasmBuilderSource) -> WasmBuilder { - WasmBuilder { - source, - rust_flags: Vec::new(), - file_name: None, - project_cargo_toml: self.0, - } - } + /// Use the given `path` as source for `wasm-builder`. + /// + /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for + /// `wasm-builder`. + pub fn with_wasm_builder_from_path(self, path: &'static str) -> WasmBuilder { + WasmBuilder { + source: WasmBuilderSource::Path(path), + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: self.0, + } + } + + /// Use the given `repo` and `rev` as source for `wasm-builder`. + pub fn with_wasm_builder_from_git(self, repo: &'static str, rev: &'static str) -> WasmBuilder { + WasmBuilder { + source: WasmBuilderSource::Git { repo, rev }, + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: self.0, + } + } + + /// Use the given `version` to fetch `wasm-builder` source from crates.io. + pub fn with_wasm_builder_from_crates(self, version: &'static str) -> WasmBuilder { + WasmBuilder { + source: WasmBuilderSource::Crates(version), + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: self.0, + } + } + + /// Use the given `version` to fetch `wasm-builder` source from crates.io or use + /// the given `path` as source. + /// + /// The `path` must be relative and point to the directory that contains the `Cargo.toml` for + /// `wasm-builder`. + pub fn with_wasm_builder_from_crates_or_path( + self, + version: &'static str, + path: &'static str, + ) -> WasmBuilder { + WasmBuilder { + source: WasmBuilderSource::CratesOrPath { version, path }, + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: self.0, + } + } + + /// Use the given `source` as source for `wasm-builder`. + pub fn with_wasm_builder_source(self, source: WasmBuilderSource) -> WasmBuilder { + WasmBuilder { + source, + rust_flags: Vec::new(), + file_name: None, + project_cargo_toml: self.0, + } + } } /// The builder for building a wasm binary. @@ -172,167 +175,159 @@ impl WasmBuilderSelectSource { /// using methods of [`Self`]. /// 5. Build the WASM binary using [`Self::build`]. pub struct WasmBuilder { - /// Where should we pull the `wasm-builder` crate from. - source: WasmBuilderSource, - /// Flags that should be appended to `RUST_FLAGS` env variable. - rust_flags: Vec, - /// The name of the file that is being generated in `OUT_DIR`. - /// - /// Defaults to `wasm_binary.rs`. - file_name: Option, - /// The path to the `Cargo.toml` of the project that should be build - /// for wasm. - project_cargo_toml: PathBuf, + /// Where should we pull the `wasm-builder` crate from. + source: WasmBuilderSource, + /// Flags that should be appended to `RUST_FLAGS` env variable. + rust_flags: Vec, + /// The name of the file that is being generated in `OUT_DIR`. + /// + /// Defaults to `wasm_binary.rs`. + file_name: Option, + /// The path to the `Cargo.toml` of the project that should be build + /// for wasm. + project_cargo_toml: PathBuf, } impl WasmBuilder { - /// Create a new instance of the builder. - pub fn new() -> WasmBuilderSelectProject { - WasmBuilderSelectProject { - _ignore: (), - } - } - - /// Enable exporting `__heap_base` as global variable in the WASM binary. - /// - /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. - pub fn export_heap_base(mut self) -> Self { - self.rust_flags.push("-Clink-arg=--export=__heap_base".into()); - self - } - - /// Set the name of the file that will be generated in `OUT_DIR`. - /// - /// This file needs to be included to get access to the build WASM binary. - /// - /// If this function is not called, `file_name` defaults to `wasm_binary.rs` - pub fn set_file_name(mut self, file_name: impl Into) -> Self { - self.file_name = Some(file_name.into()); - self - } - - /// Instruct the linker to import the memory into the WASM binary. - /// - /// This adds `-C link-arg=--import-memory` to `RUST_FLAGS`. - pub fn import_memory(mut self) -> Self { - self.rust_flags.push("-C link-arg=--import-memory".into()); - self - } - - /// Append the given `flag` to `RUST_FLAGS`. - /// - /// `flag` is appended as is, so it needs to be a valid flag. - pub fn append_to_rust_flags(mut self, flag: impl Into) -> Self { - self.rust_flags.push(flag.into()); - self - } - - /// Build the WASM binary. - pub fn build(self) { - if check_skip_build() { - // If we skip the build, we still want to make sure to be called when an env variable - // changes - generate_rerun_if_changed_instructions(); - return; - } - - let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); - let file_path = out_dir.join(self.file_name.unwrap_or_else(|| "wasm_binary.rs".into())); - - // Hash the path to the project cargo toml. - let mut hasher = DefaultHasher::new(); - self.project_cargo_toml.hash(&mut hasher); - - let project_name = env::var("CARGO_PKG_NAME").expect("`CARGO_PKG_NAME` is set by cargo!"); - // Make sure the `wasm-builder-runner` path is unique by concatenating the name of the - // project that is compiling the WASM binary with the hash of the path to the project that - // should be compiled as WASM binary. - let project_folder = get_workspace_root() - .join(format!("{}{}", project_name, hasher.finish())); - - if check_provide_dummy_wasm_binary() { - provide_dummy_wasm_binary(&file_path); - } else { - create_project( - &project_folder, - &file_path, - self.source, - &self.project_cargo_toml, - &self.rust_flags.into_iter().map(|f| format!("{} ", f)).collect::(), - ); - run_project(&project_folder); - } - - // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't - // want to spam the output! - generate_rerun_if_changed_instructions(); - } + /// Create a new instance of the builder. + pub fn new() -> WasmBuilderSelectProject { + WasmBuilderSelectProject { _ignore: () } + } + + /// Enable exporting `__heap_base` as global variable in the WASM binary. + /// + /// This adds `-Clink-arg=--export=__heap_base` to `RUST_FLAGS`. + pub fn export_heap_base(mut self) -> Self { + self.rust_flags + .push("-Clink-arg=--export=__heap_base".into()); + self + } + + /// Set the name of the file that will be generated in `OUT_DIR`. + /// + /// This file needs to be included to get access to the build WASM binary. + /// + /// If this function is not called, `file_name` defaults to `wasm_binary.rs` + pub fn set_file_name(mut self, file_name: impl Into) -> Self { + self.file_name = Some(file_name.into()); + self + } + + /// Instruct the linker to import the memory into the WASM binary. + /// + /// This adds `-C link-arg=--import-memory` to `RUST_FLAGS`. + pub fn import_memory(mut self) -> Self { + self.rust_flags.push("-C link-arg=--import-memory".into()); + self + } + + /// Append the given `flag` to `RUST_FLAGS`. + /// + /// `flag` is appended as is, so it needs to be a valid flag. + pub fn append_to_rust_flags(mut self, flag: impl Into) -> Self { + self.rust_flags.push(flag.into()); + self + } + + /// Build the WASM binary. + pub fn build(self) { + if check_skip_build() { + // If we skip the build, we still want to make sure to be called when an env variable + // changes + generate_rerun_if_changed_instructions(); + return; + } + + let out_dir = PathBuf::from(env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!")); + let file_path = out_dir.join(self.file_name.unwrap_or_else(|| "wasm_binary.rs".into())); + + // Hash the path to the project cargo toml. + let mut hasher = DefaultHasher::new(); + self.project_cargo_toml.hash(&mut hasher); + + let project_name = env::var("CARGO_PKG_NAME").expect("`CARGO_PKG_NAME` is set by cargo!"); + // Make sure the `wasm-builder-runner` path is unique by concatenating the name of the + // project that is compiling the WASM binary with the hash of the path to the project that + // should be compiled as WASM binary. + let project_folder = + get_workspace_root().join(format!("{}{}", project_name, hasher.finish())); + + if check_provide_dummy_wasm_binary() { + provide_dummy_wasm_binary(&file_path); + } else { + create_project( + &project_folder, + &file_path, + self.source, + &self.project_cargo_toml, + &self + .rust_flags + .into_iter() + .map(|f| format!("{} ", f)) + .collect::(), + ); + run_project(&project_folder); + } + + // As last step we need to generate our `rerun-if-changed` stuff. If a build fails, we don't + // want to spam the output! + generate_rerun_if_changed_instructions(); + } } /// The `wasm-builder` dependency source. pub enum WasmBuilderSource { - /// The relative path to the source code from the current manifest dir. - Path(&'static str), - /// The git repository that contains the source code. - Git { - repo: &'static str, - rev: &'static str, - }, - /// Use the given version released on crates.io. - Crates(&'static str), - /// Use the given version released on crates.io or from the given path. - CratesOrPath { - version: &'static str, - path: &'static str, - } + /// The relative path to the source code from the current manifest dir. + Path(&'static str), + /// The git repository that contains the source code. + Git { + repo: &'static str, + rev: &'static str, + }, + /// Use the given version released on crates.io. + Crates(&'static str), + /// Use the given version released on crates.io or from the given path. + CratesOrPath { + version: &'static str, + path: &'static str, + }, } impl WasmBuilderSource { - /// Convert to a valid cargo source declaration. - /// - /// `absolute_path` - The manifest dir. - fn to_cargo_source(&self, manifest_dir: &Path) -> String { - match self { - WasmBuilderSource::Path(path) => { - replace_back_slashes(format!("path = \"{}\"", manifest_dir.join(path).display())) - } - WasmBuilderSource::Git { repo, rev } => { - format!("git = \"{}\", rev=\"{}\"", repo, rev) - } - WasmBuilderSource::Crates(version) => { - format!("version = \"{}\"", version) - } - WasmBuilderSource::CratesOrPath { version, path } => { - replace_back_slashes( - format!( - "path = \"{}\", version = \"{}\"", - manifest_dir.join(path).display(), - version - ) - ) - } - } - } + /// Convert to a valid cargo source declaration. + /// + /// `absolute_path` - The manifest dir. + fn to_cargo_source(&self, manifest_dir: &Path) -> String { + match self { + WasmBuilderSource::Path(path) => { + replace_back_slashes(format!("path = \"{}\"", manifest_dir.join(path).display())) + } + WasmBuilderSource::Git { repo, rev } => format!("git = \"{}\", rev=\"{}\"", repo, rev), + WasmBuilderSource::Crates(version) => format!("version = \"{}\"", version), + WasmBuilderSource::CratesOrPath { version, path } => replace_back_slashes(format!( + "path = \"{}\", version = \"{}\"", + manifest_dir.join(path).display(), + version + )), + } + } } /// Build the currently built project as WASM binary and extend `RUSTFLAGS` with the given rustflags. /// /// For more information, see [`build_current_project`]. -#[deprecated( - since = "1.0.5", - note = "Please switch to [`WasmBuilder`]", -)] +#[deprecated(since = "1.0.5", note = "Please switch to [`WasmBuilder`]")] pub fn build_current_project_with_rustflags( - file_name: &str, - wasm_builder_source: WasmBuilderSource, - default_rust_flags: &str, + file_name: &str, + wasm_builder_source: WasmBuilderSource, + default_rust_flags: &str, ) { - WasmBuilder::new() - .with_current_project() - .with_wasm_builder_source(wasm_builder_source) - .append_to_rust_flags(default_rust_flags) - .set_file_name(file_name) - .build() + WasmBuilder::new() + .with_current_project() + .with_wasm_builder_source(wasm_builder_source) + .append_to_rust_flags(default_rust_flags) + .set_file_name(file_name) + .build() } /// Build the currently built project as WASM binary. @@ -342,48 +337,47 @@ pub fn build_current_project_with_rustflags( /// `file_name` - The name of the file being generated in the `OUT_DIR`. The file contains the /// constant `WASM_BINARY` which contains the build wasm binary. /// `wasm_builder_path` - Path to the wasm-builder project, relative to `CARGO_MANIFEST_DIR`. -#[deprecated( - since = "1.0.5", - note = "Please switch to [`WasmBuilder`]", -)] +#[deprecated(since = "1.0.5", note = "Please switch to [`WasmBuilder`]")] pub fn build_current_project(file_name: &str, wasm_builder_source: WasmBuilderSource) { - #[allow(deprecated)] - build_current_project_with_rustflags(file_name, wasm_builder_source, ""); + #[allow(deprecated)] + build_current_project_with_rustflags(file_name, wasm_builder_source, ""); } /// Returns the root path of the wasm-builder workspace. /// /// The wasm-builder workspace contains all wasm-builder's projects. fn get_workspace_root() -> PathBuf { - let out_dir_env = env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!"); - let mut out_dir = PathBuf::from(&out_dir_env); - - loop { - match out_dir.parent() { - Some(parent) if out_dir.ends_with("build") => return parent.join("wbuild-runner"), - _ => if !out_dir.pop() { - break; - } - } - } - - panic!("Could not find target dir in: {}", out_dir_env) + let out_dir_env = env::var("OUT_DIR").expect("`OUT_DIR` is set by cargo!"); + let mut out_dir = PathBuf::from(&out_dir_env); + + loop { + match out_dir.parent() { + Some(parent) if out_dir.ends_with("build") => return parent.join("wbuild-runner"), + _ => { + if !out_dir.pop() { + break; + } + } + } + } + + panic!("Could not find target dir in: {}", out_dir_env) } fn create_project( - project_folder: &Path, - file_path: &Path, - wasm_builder_source: WasmBuilderSource, - cargo_toml_path: &Path, - default_rustflags: &str, + project_folder: &Path, + file_path: &Path, + wasm_builder_source: WasmBuilderSource, + cargo_toml_path: &Path, + default_rustflags: &str, ) { - fs::create_dir_all(project_folder.join("src")) - .expect("WASM build runner dir create can not fail; qed"); + fs::create_dir_all(project_folder.join("src")) + .expect("WASM build runner dir create can not fail; qed"); - fs::write( - project_folder.join("Cargo.toml"), - format!( - r#" + fs::write( + project_folder.join("Cargo.toml"), + format!( + r#" [package] name = "wasm-build-runner-impl" version = "1.0.0" @@ -394,14 +388,15 @@ fn create_project( [workspace] "#, - wasm_builder_source = wasm_builder_source.to_cargo_source(&get_manifest_dir()), - ) - ).expect("WASM build runner `Cargo.toml` writing can not fail; qed"); - - fs::write( - project_folder.join("src/main.rs"), - format!( - r#" + wasm_builder_source = wasm_builder_source.to_cargo_source(&get_manifest_dir()), + ), + ) + .expect("WASM build runner `Cargo.toml` writing can not fail; qed"); + + fs::write( + project_folder.join("src/main.rs"), + format!( + r#" use substrate_wasm_builder::build_project_with_default_rustflags; fn main() {{ @@ -412,69 +407,81 @@ fn create_project( ) }} "#, - file_path = replace_back_slashes(file_path.display()), - cargo_toml_path = replace_back_slashes(cargo_toml_path.display()), - default_rustflags = default_rustflags, - ) - ).expect("WASM build runner `main.rs` writing can not fail; qed"); + file_path = replace_back_slashes(file_path.display()), + cargo_toml_path = replace_back_slashes(cargo_toml_path.display()), + default_rustflags = default_rustflags, + ), + ) + .expect("WASM build runner `main.rs` writing can not fail; qed"); } fn run_project(project_folder: &Path) { - let cargo = env::var("CARGO").expect("`CARGO` env variable is always set when executing `build.rs`."); - let mut cmd = Command::new(cargo); - cmd.arg("run").arg(format!("--manifest-path={}", project_folder.join("Cargo.toml").display())); - - if env::var("DEBUG") != Ok(String::from("true")) { - cmd.arg("--release"); - } - - // Make sure we always run the `wasm-builder` project for the `HOST` architecture. - let host_triple = env::var("HOST").expect("`HOST` is always set when executing `build.rs`."); - cmd.arg(&format!("--target={}", host_triple)); - - // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). - // The runner project is created in `CARGO_TARGET_DIR` and executing it will create a sub target - // directory inside of `CARGO_TARGET_DIR`. - cmd.env_remove("CARGO_TARGET_DIR"); - - if !cmd.status().map(|s| s.success()).unwrap_or(false) { - // Don't spam the output with backtraces when a build failed! - process::exit(1); - } + let cargo = + env::var("CARGO").expect("`CARGO` env variable is always set when executing `build.rs`."); + let mut cmd = Command::new(cargo); + cmd.arg("run").arg(format!( + "--manifest-path={}", + project_folder.join("Cargo.toml").display() + )); + + if env::var("DEBUG") != Ok(String::from("true")) { + cmd.arg("--release"); + } + + // Make sure we always run the `wasm-builder` project for the `HOST` architecture. + let host_triple = env::var("HOST").expect("`HOST` is always set when executing `build.rs`."); + cmd.arg(&format!("--target={}", host_triple)); + + // Unset the `CARGO_TARGET_DIR` to prevent a cargo deadlock (cargo locks a target dir exclusive). + // The runner project is created in `CARGO_TARGET_DIR` and executing it will create a sub target + // directory inside of `CARGO_TARGET_DIR`. + cmd.env_remove("CARGO_TARGET_DIR"); + + if !cmd.status().map(|s| s.success()).unwrap_or(false) { + // Don't spam the output with backtraces when a build failed! + process::exit(1); + } } /// Generate the name of the skip build environment variable for the current crate. fn generate_crate_skip_build_env_name() -> String { - format!( - "SKIP_{}_WASM_BUILD", - env::var("CARGO_PKG_NAME").expect("Package name is set").to_uppercase().replace('-', "_"), - ) + format!( + "SKIP_{}_WASM_BUILD", + env::var("CARGO_PKG_NAME") + .expect("Package name is set") + .to_uppercase() + .replace('-', "_"), + ) } /// Checks if the build of the WASM binary should be skipped. fn check_skip_build() -> bool { - env::var(SKIP_BUILD_ENV).is_ok() || env::var(generate_crate_skip_build_env_name()).is_ok() + env::var(SKIP_BUILD_ENV).is_ok() || env::var(generate_crate_skip_build_env_name()).is_ok() } /// Check if we should provide a dummy WASM binary. fn check_provide_dummy_wasm_binary() -> bool { - env::var(DUMMY_WASM_BINARY_ENV).is_ok() + env::var(DUMMY_WASM_BINARY_ENV).is_ok() } /// Provide the dummy WASM binary fn provide_dummy_wasm_binary(file_path: &Path) { - fs::write( - file_path, - "pub const WASM_BINARY: &[u8] = &[]; pub const WASM_BINARY_BLOATY: &[u8] = &[];", - ).expect("Writing dummy WASM binary should not fail"); + fs::write( + file_path, + "pub const WASM_BINARY: &[u8] = &[]; pub const WASM_BINARY_BLOATY: &[u8] = &[];", + ) + .expect("Writing dummy WASM binary should not fail"); } /// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is /// rebuilt when needed. fn generate_rerun_if_changed_instructions() { - // Make sure that the `build.rs` is called again if one of the following env variables changes. - println!("cargo:rerun-if-env-changed={}", SKIP_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", DUMMY_WASM_BINARY_ENV); - println!("cargo:rerun-if-env-changed={}", TRIGGER_WASM_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", generate_crate_skip_build_env_name()); + // Make sure that the `build.rs` is called again if one of the following env variables changes. + println!("cargo:rerun-if-env-changed={}", SKIP_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", DUMMY_WASM_BINARY_ENV); + println!("cargo:rerun-if-env-changed={}", TRIGGER_WASM_BUILD_ENV); + println!( + "cargo:rerun-if-env-changed={}", + generate_crate_skip_build_env_name() + ); } diff --git a/utils/wasm-builder/src/lib.rs b/utils/wasm-builder/src/lib.rs index 195527a122..3ec009458e 100644 --- a/utils/wasm-builder/src/lib.rs +++ b/utils/wasm-builder/src/lib.rs @@ -83,7 +83,12 @@ //! as well. For example if installing the rust nightly from 20.02.2020 using `rustup install nightly-2020-02-20`, //! the wasm target needs to be installed as well `rustup target add wasm32-unknown-unknown --toolchain nightly-2020-02-20`. -use std::{env, fs, path::PathBuf, process::{Command, self}, io::BufRead}; +use std::{ + env, fs, + io::BufRead, + path::PathBuf, + process::{self, Command}, +}; mod prerequisites; mod wasm_project; @@ -119,7 +124,7 @@ const WASM_BUILD_TOOLCHAIN: &str = "WASM_BUILD_TOOLCHAIN"; /// constant `WASM_BINARY`, which contains the built WASM binary. /// `cargo_manifest` - The path to the `Cargo.toml` of the project that should be built. pub fn build_project(file_name: &str, cargo_manifest: &str) { - build_project_with_default_rustflags(file_name, cargo_manifest, ""); + build_project_with_default_rustflags(file_name, cargo_manifest, ""); } /// Build the currently built project as wasm binary. @@ -131,149 +136,163 @@ pub fn build_project(file_name: &str, cargo_manifest: &str) { /// `cargo_manifest` - The path to the `Cargo.toml` of the project that should be built. /// `default_rustflags` - Default `RUSTFLAGS` that will always be set for the build. pub fn build_project_with_default_rustflags( - file_name: &str, - cargo_manifest: &str, - default_rustflags: &str, + file_name: &str, + cargo_manifest: &str, + default_rustflags: &str, ) { - if check_skip_build() { - return; - } - - let cargo_manifest = PathBuf::from(cargo_manifest); - - if !cargo_manifest.exists() { - panic!("'{}' does not exist!", cargo_manifest.display()); - } - - if !cargo_manifest.ends_with("Cargo.toml") { - panic!("'{}' no valid path to a `Cargo.toml`!", cargo_manifest.display()); - } - - if let Some(err_msg) = prerequisites::check() { - eprintln!("{}", err_msg); - process::exit(1); - } - - let (wasm_binary, bloaty) = wasm_project::create_and_compile( - &cargo_manifest, - default_rustflags, - ); - - write_file_if_changed( - file_name.into(), - format!( - r#" + if check_skip_build() { + return; + } + + let cargo_manifest = PathBuf::from(cargo_manifest); + + if !cargo_manifest.exists() { + panic!("'{}' does not exist!", cargo_manifest.display()); + } + + if !cargo_manifest.ends_with("Cargo.toml") { + panic!( + "'{}' no valid path to a `Cargo.toml`!", + cargo_manifest.display() + ); + } + + if let Some(err_msg) = prerequisites::check() { + eprintln!("{}", err_msg); + process::exit(1); + } + + let (wasm_binary, bloaty) = + wasm_project::create_and_compile(&cargo_manifest, default_rustflags); + + write_file_if_changed( + file_name.into(), + format!( + r#" pub const WASM_BINARY: &[u8] = include_bytes!("{wasm_binary}"); pub const WASM_BINARY_BLOATY: &[u8] = include_bytes!("{wasm_binary_bloaty}"); "#, - wasm_binary = wasm_binary.wasm_binary_path(), - wasm_binary_bloaty = bloaty.wasm_binary_bloaty_path(), - ), - ); + wasm_binary = wasm_binary.wasm_binary_path(), + wasm_binary_bloaty = bloaty.wasm_binary_bloaty_path(), + ), + ); } /// Checks if the build of the WASM binary should be skipped. fn check_skip_build() -> bool { - env::var(SKIP_BUILD_ENV).is_ok() + env::var(SKIP_BUILD_ENV).is_ok() } /// Write to the given `file` if the `content` is different. fn write_file_if_changed(file: PathBuf, content: String) { - if fs::read_to_string(&file).ok().as_ref() != Some(&content) { - fs::write(&file, content).expect(&format!("Writing `{}` can not fail!", file.display())); - } + if fs::read_to_string(&file).ok().as_ref() != Some(&content) { + fs::write(&file, content).expect(&format!("Writing `{}` can not fail!", file.display())); + } } /// Get a cargo command that compiles with nightly fn get_nightly_cargo() -> CargoCommand { - let env_cargo = CargoCommand::new( - &env::var("CARGO").expect("`CARGO` env variable is always set by cargo"), - ); - let default_cargo = CargoCommand::new("cargo"); - let rustup_run_nightly = CargoCommand::new_with_args("rustup", &["run", "nightly", "cargo"]); - let wasm_toolchain = env::var(WASM_BUILD_TOOLCHAIN).ok(); - - // First check if the user requested a specific toolchain - if let Some(cmd) = wasm_toolchain.and_then(|t| get_rustup_nightly(Some(t))) { - cmd - } else if env_cargo.is_nightly() { - env_cargo - } else if default_cargo.is_nightly() { - default_cargo - } else if rustup_run_nightly.is_nightly() { - rustup_run_nightly - } else { - // If no command before provided us with a nightly compiler, we try to search one - // with rustup. If that fails as well, we return the default cargo and let the prequisities - // check fail. - get_rustup_nightly(None).unwrap_or(default_cargo) - } + let env_cargo = + CargoCommand::new(&env::var("CARGO").expect("`CARGO` env variable is always set by cargo")); + let default_cargo = CargoCommand::new("cargo"); + let rustup_run_nightly = CargoCommand::new_with_args("rustup", &["run", "nightly", "cargo"]); + let wasm_toolchain = env::var(WASM_BUILD_TOOLCHAIN).ok(); + + // First check if the user requested a specific toolchain + if let Some(cmd) = wasm_toolchain.and_then(|t| get_rustup_nightly(Some(t))) { + cmd + } else if env_cargo.is_nightly() { + env_cargo + } else if default_cargo.is_nightly() { + default_cargo + } else if rustup_run_nightly.is_nightly() { + rustup_run_nightly + } else { + // If no command before provided us with a nightly compiler, we try to search one + // with rustup. If that fails as well, we return the default cargo and let the prequisities + // check fail. + get_rustup_nightly(None).unwrap_or(default_cargo) + } } /// Get a nightly from rustup. If `selected` is `Some(_)`, a `CargoCommand` using the given /// nightly is returned. fn get_rustup_nightly(selected: Option) -> Option { - let host = format!("-{}", env::var("HOST").expect("`HOST` is always set by cargo")); - - let version = match selected { - Some(selected) => selected, - None => { - let output = Command::new("rustup").args(&["toolchain", "list"]).output().ok()?.stdout; - let lines = output.as_slice().lines(); - - let mut latest_nightly = None; - for line in lines.filter_map(|l| l.ok()) { - if line.starts_with("nightly-") && line.ends_with(&host) { - // Rustup prints them sorted - latest_nightly = Some(line.clone()); - } - } - - latest_nightly?.trim_end_matches(&host).into() - } - }; - - Some(CargoCommand::new_with_args("rustup", &["run", &version, "cargo"])) + let host = format!( + "-{}", + env::var("HOST").expect("`HOST` is always set by cargo") + ); + + let version = match selected { + Some(selected) => selected, + None => { + let output = Command::new("rustup") + .args(&["toolchain", "list"]) + .output() + .ok()? + .stdout; + let lines = output.as_slice().lines(); + + let mut latest_nightly = None; + for line in lines.filter_map(|l| l.ok()) { + if line.starts_with("nightly-") && line.ends_with(&host) { + // Rustup prints them sorted + latest_nightly = Some(line.clone()); + } + } + + latest_nightly?.trim_end_matches(&host).into() + } + }; + + Some(CargoCommand::new_with_args( + "rustup", + &["run", &version, "cargo"], + )) } /// Builder for cargo commands #[derive(Debug)] struct CargoCommand { - program: String, - args: Vec, + program: String, + args: Vec, } impl CargoCommand { - fn new(program: &str) -> Self { - CargoCommand { program: program.into(), args: Vec::new() } - } - - fn new_with_args(program: &str, args: &[&str]) -> Self { - CargoCommand { - program: program.into(), - args: args.iter().map(ToString::to_string).collect(), - } - } - - fn command(&self) -> Command { - let mut cmd = Command::new(&self.program); - cmd.args(&self.args); - cmd - } - - /// Check if the supplied cargo command is a nightly version - fn is_nightly(&self) -> bool { - // `RUSTC_BOOTSTRAP` tells a stable compiler to behave like a nightly. So, when this env - // variable is set, we can assume that whatever rust compiler we have, it is a nightly compiler. - // For "more" information, see: - // https://github.com/rust-lang/rust/blob/fa0f7d0080d8e7e9eb20aa9cbf8013f96c81287f/src/libsyntax/feature_gate/check.rs#L891 - env::var("RUSTC_BOOTSTRAP").is_ok() || - self.command() - .arg("--version") - .output() - .map_err(|_| ()) - .and_then(|o| String::from_utf8(o.stdout).map_err(|_| ())) - .unwrap_or_default() - .contains("-nightly") - } + fn new(program: &str) -> Self { + CargoCommand { + program: program.into(), + args: Vec::new(), + } + } + + fn new_with_args(program: &str, args: &[&str]) -> Self { + CargoCommand { + program: program.into(), + args: args.iter().map(ToString::to_string).collect(), + } + } + + fn command(&self) -> Command { + let mut cmd = Command::new(&self.program); + cmd.args(&self.args); + cmd + } + + /// Check if the supplied cargo command is a nightly version + fn is_nightly(&self) -> bool { + // `RUSTC_BOOTSTRAP` tells a stable compiler to behave like a nightly. So, when this env + // variable is set, we can assume that whatever rust compiler we have, it is a nightly compiler. + // For "more" information, see: + // https://github.com/rust-lang/rust/blob/fa0f7d0080d8e7e9eb20aa9cbf8013f96c81287f/src/libsyntax/feature_gate/check.rs#L891 + env::var("RUSTC_BOOTSTRAP").is_ok() + || self + .command() + .arg("--version") + .output() + .map_err(|_| ()) + .and_then(|o| String::from_utf8(o.stdout).map_err(|_| ())) + .unwrap_or_default() + .contains("-nightly") + } } diff --git a/utils/wasm-builder/src/prerequisites.rs b/utils/wasm-builder/src/prerequisites.rs index da118a71f9..f270f4c8cd 100644 --- a/utils/wasm-builder/src/prerequisites.rs +++ b/utils/wasm-builder/src/prerequisites.rs @@ -23,26 +23,27 @@ use tempfile::tempdir; /// # Returns /// Returns `None` if everything was found and `Some(ERR_MSG)` if something could not be found. pub fn check() -> Option<&'static str> { - if !check_nightly_installed(){ - return Some("Rust nightly not installed, please install it!") - } + if !check_nightly_installed() { + return Some("Rust nightly not installed, please install it!"); + } - check_wasm_toolchain_installed() + check_wasm_toolchain_installed() } fn check_nightly_installed() -> bool { - crate::get_nightly_cargo().is_nightly() + crate::get_nightly_cargo().is_nightly() } fn check_wasm_toolchain_installed() -> Option<&'static str> { - let temp = tempdir().expect("Creating temp dir does not fail; qed"); - fs::create_dir_all(temp.path().join("src")).expect("Creating src dir does not fail; qed"); + let temp = tempdir().expect("Creating temp dir does not fail; qed"); + fs::create_dir_all(temp.path().join("src")).expect("Creating src dir does not fail; qed"); - let test_file = temp.path().join("src/lib.rs"); - let manifest_path = temp.path().join("Cargo.toml"); + let test_file = temp.path().join("src/lib.rs"); + let manifest_path = temp.path().join("Cargo.toml"); - fs::write(&manifest_path, - r#" + fs::write( + &manifest_path, + r#" [package] name = "wasm-test" version = "1.0.0" @@ -54,28 +55,33 @@ fn check_wasm_toolchain_installed() -> Option<&'static str> { [workspace] "#, - ).expect("Writing wasm-test manifest does not fail; qed"); - fs::write(&test_file, "pub fn test() {}") - .expect("Writing to the test file does not fail; qed"); + ) + .expect("Writing wasm-test manifest does not fail; qed"); + fs::write(&test_file, "pub fn test() {}").expect("Writing to the test file does not fail; qed"); - let err_msg = "Rust WASM toolchain not installed, please install it!"; - let manifest_path = manifest_path.display().to_string(); - crate::get_nightly_cargo() - .command() - .args(&["build", "--target=wasm32-unknown-unknown", "--manifest-path", &manifest_path]) - .output() - .map_err(|_| err_msg) - .and_then(|s| - if s.status.success() { - Ok(()) - } else { - match String::from_utf8(s.stderr) { - Ok(ref err) if err.contains("linker `rust-lld` not found") => { - Err("`rust-lld` not found, please install it!") - }, - _ => Err(err_msg) - } - } - ) - .err() + let err_msg = "Rust WASM toolchain not installed, please install it!"; + let manifest_path = manifest_path.display().to_string(); + crate::get_nightly_cargo() + .command() + .args(&[ + "build", + "--target=wasm32-unknown-unknown", + "--manifest-path", + &manifest_path, + ]) + .output() + .map_err(|_| err_msg) + .and_then(|s| { + if s.status.success() { + Ok(()) + } else { + match String::from_utf8(s.stderr) { + Ok(ref err) if err.contains("linker `rust-lld` not found") => { + Err("`rust-lld` not found, please install it!") + } + _ => Err(err_msg), + } + } + }) + .err() } diff --git a/utils/wasm-builder/src/wasm_project.rs b/utils/wasm-builder/src/wasm_project.rs index 6248f81935..5afc7eb90a 100644 --- a/utils/wasm-builder/src/wasm_project.rs +++ b/utils/wasm-builder/src/wasm_project.rs @@ -17,8 +17,13 @@ use crate::write_file_if_changed; use std::{ - fs, path::{Path, PathBuf}, borrow::ToOwned, process, env, collections::HashSet, - hash::{Hash, Hasher}, ops::Deref, + borrow::ToOwned, + collections::HashSet, + env, fs, + hash::{Hash, Hasher}, + ops::Deref, + path::{Path, PathBuf}, + process, }; use toml::value::Table; @@ -37,45 +42,46 @@ use itertools::Itertools; pub struct WasmBinaryBloaty(PathBuf); impl WasmBinaryBloaty { - /// Returns the path to the bloaty wasm binary. - pub fn wasm_binary_bloaty_path(&self) -> String { - self.0.display().to_string().replace('\\', "/") - } + /// Returns the path to the bloaty wasm binary. + pub fn wasm_binary_bloaty_path(&self) -> String { + self.0.display().to_string().replace('\\', "/") + } } /// Holds the path to the WASM binary. pub struct WasmBinary(PathBuf); impl WasmBinary { - /// Returns the path to the wasm binary. - pub fn wasm_binary_path(&self) -> String { - self.0.display().to_string().replace('\\', "/") - } + /// Returns the path to the wasm binary. + pub fn wasm_binary_path(&self) -> String { + self.0.display().to_string().replace('\\', "/") + } } /// A lock for the WASM workspace. struct WorkspaceLock(fs::File); impl WorkspaceLock { - /// Create a new lock - fn new(wasm_workspace_root: &Path) -> Self { - let lock = fs::OpenOptions::new() - .read(true) - .write(true) - .create(true) - .open(wasm_workspace_root.join("wasm_workspace.lock")) - .expect("Opening the lock file does not fail"); - - lock.lock_exclusive().expect("Locking `wasm_workspace.lock` failed"); - - WorkspaceLock(lock) - } + /// Create a new lock + fn new(wasm_workspace_root: &Path) -> Self { + let lock = fs::OpenOptions::new() + .read(true) + .write(true) + .create(true) + .open(wasm_workspace_root.join("wasm_workspace.lock")) + .expect("Opening the lock file does not fail"); + + lock.lock_exclusive() + .expect("Locking `wasm_workspace.lock` failed"); + + WorkspaceLock(lock) + } } impl Drop for WorkspaceLock { - fn drop(&mut self) { - let _ = self.0.unlock(); - } + fn drop(&mut self) { + let _ = self.0.unlock(); + } } /// Creates the WASM project, compiles the WASM binary and compacts the WASM binary. @@ -83,103 +89,109 @@ impl Drop for WorkspaceLock { /// # Returns /// The path to the compact WASM binary and the bloaty WASM binary. pub fn create_and_compile( - cargo_manifest: &Path, - default_rustflags: &str, + cargo_manifest: &Path, + default_rustflags: &str, ) -> (WasmBinary, WasmBinaryBloaty) { - let wasm_workspace_root = get_wasm_workspace_root(); - let wasm_workspace = wasm_workspace_root.join("wbuild"); + let wasm_workspace_root = get_wasm_workspace_root(); + let wasm_workspace = wasm_workspace_root.join("wbuild"); - // Lock the workspace exclusively for us - let _lock = WorkspaceLock::new(&wasm_workspace_root); + // Lock the workspace exclusively for us + let _lock = WorkspaceLock::new(&wasm_workspace_root); - let crate_metadata = MetadataCommand::new() - .manifest_path(cargo_manifest) - .exec() - .expect("`cargo metadata` can not fail on project `Cargo.toml`; qed"); + let crate_metadata = MetadataCommand::new() + .manifest_path(cargo_manifest) + .exec() + .expect("`cargo metadata` can not fail on project `Cargo.toml`; qed"); - let project = create_project(cargo_manifest, &wasm_workspace, &crate_metadata); - create_wasm_workspace_project(&wasm_workspace, &crate_metadata.workspace_root); + let project = create_project(cargo_manifest, &wasm_workspace, &crate_metadata); + create_wasm_workspace_project(&wasm_workspace, &crate_metadata.workspace_root); - build_project(&project, default_rustflags); - let (wasm_binary, bloaty) = compact_wasm_file( - &project, - cargo_manifest, - &wasm_workspace, - ); + build_project(&project, default_rustflags); + let (wasm_binary, bloaty) = compact_wasm_file(&project, cargo_manifest, &wasm_workspace); - copy_wasm_to_target_directory(cargo_manifest, &wasm_binary); + copy_wasm_to_target_directory(cargo_manifest, &wasm_binary); - generate_rerun_if_changed_instructions(cargo_manifest, &project, &wasm_workspace); + generate_rerun_if_changed_instructions(cargo_manifest, &project, &wasm_workspace); - (wasm_binary, bloaty) + (wasm_binary, bloaty) } /// Find the `Cargo.lock` relative to the `OUT_DIR` environment variable. /// /// If the `Cargo.lock` cannot be found, we emit a warning and return `None`. fn find_cargo_lock(cargo_manifest: &Path) -> Option { - fn find_impl(mut path: PathBuf) -> Option { - loop { - if path.join("Cargo.lock").exists() { - return Some(path.join("Cargo.lock")) - } - - if !path.pop() { - return None; - } - } - } - - if let Some(path) = find_impl(build_helper::out_dir()) { - return Some(path); - } - - if let Some(path) = find_impl(cargo_manifest.to_path_buf()) { - return Some(path); - } - - build_helper::warning!( - "Could not find `Cargo.lock` for `{}`, while searching from `{}`.", - cargo_manifest.display(), - build_helper::out_dir().display() - ); - - None + fn find_impl(mut path: PathBuf) -> Option { + loop { + if path.join("Cargo.lock").exists() { + return Some(path.join("Cargo.lock")); + } + + if !path.pop() { + return None; + } + } + } + + if let Some(path) = find_impl(build_helper::out_dir()) { + return Some(path); + } + + if let Some(path) = find_impl(cargo_manifest.to_path_buf()) { + return Some(path); + } + + build_helper::warning!( + "Could not find `Cargo.lock` for `{}`, while searching from `{}`.", + cargo_manifest.display(), + build_helper::out_dir().display() + ); + + None } /// Extract the crate name from the given `Cargo.toml`. fn get_crate_name(cargo_manifest: &Path) -> String { - let cargo_toml: Table = toml::from_str( - &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed") - ).expect("Cargo manifest is a valid toml file; qed"); - - let package = cargo_toml - .get("package") - .and_then(|t| t.as_table()) - .expect("`package` key exists in valid `Cargo.toml`; qed"); - - package.get("name").and_then(|p| p.as_str()).map(ToOwned::to_owned).expect("Package name exists; qed") + let cargo_toml: Table = toml::from_str( + &fs::read_to_string(cargo_manifest).expect("File exists as checked before; qed"), + ) + .expect("Cargo manifest is a valid toml file; qed"); + + let package = cargo_toml + .get("package") + .and_then(|t| t.as_table()) + .expect("`package` key exists in valid `Cargo.toml`; qed"); + + package + .get("name") + .and_then(|p| p.as_str()) + .map(ToOwned::to_owned) + .expect("Package name exists; qed") } /// Returns the name for the wasm binary. fn get_wasm_binary_name(cargo_manifest: &Path) -> String { - get_crate_name(cargo_manifest).replace('-', "_") + get_crate_name(cargo_manifest).replace('-', "_") } /// Returns the root path of the wasm workspace. fn get_wasm_workspace_root() -> PathBuf { - let mut out_dir = build_helper::out_dir(); - - loop { - match out_dir.parent() { - Some(parent) if out_dir.ends_with("build") => return parent.to_path_buf(), - _ => if !out_dir.pop() { - break; - } - } - } - - panic!("Could not find target dir in: {}", build_helper::out_dir().display()) + let mut out_dir = build_helper::out_dir(); + + loop { + match out_dir.parent() { + Some(parent) if out_dir.ends_with("build") => return parent.to_path_buf(), + _ => { + if !out_dir.pop() { + break; + } + } + } + } + + panic!( + "Could not find target dir in: {}", + build_helper::out_dir().display() + ) } /// Find all workspace members. @@ -191,145 +203,160 @@ fn get_wasm_workspace_root() -> PathBuf { /// member is not valid anymore when the `wasm-project` dependency points to an non-existing /// folder or the package name is not valid. fn find_and_clear_workspace_members(wasm_workspace: &Path) -> Vec { - let mut members = WalkDir::new(wasm_workspace) - .min_depth(1) - .max_depth(1) - .into_iter() - .filter_map(|p| p.ok()) - .map(|d| d.into_path()) - .filter(|p| p.is_dir()) - .filter_map(|p| p.file_name().map(|f| f.to_owned()).and_then(|s| s.into_string().ok())) - .filter(|f| !f.starts_with(".") && f != "target") - .collect::>(); - - let mut i = 0; - while i != members.len() { - let path = wasm_workspace.join(&members[i]).join("Cargo.toml"); - - // Extract the `wasm-project` dependency. - // If the path can be extracted and is valid and the package name matches, - // the member is valid. - if let Some(mut wasm_project) = fs::read_to_string(path) - .ok() - .and_then(|s| toml::from_str::

{ + fn clone(&self) -> Self { + Self { + token: self.token.clone(), + content: self.content.clone(), + } + } + } + }; } groups_impl!(Braces, Brace, Brace, parse_braces); @@ -72,167 +78,165 @@ groups_impl!(Brackets, Bracket, Bracket, parse_brackets); groups_impl!(Parens, Paren, Parenthesis, parse_parens); #[derive(Debug)] -pub struct PunctuatedInner { - pub inner: syn::punctuated::Punctuated, - pub variant: V, +pub struct PunctuatedInner { + pub inner: syn::punctuated::Punctuated, + pub variant: V, } #[derive(Debug, Clone)] pub struct NoTrailing; - #[derive(Debug, Clone)] pub struct Trailing; -pub type Punctuated = PunctuatedInner; +pub type Punctuated = PunctuatedInner; -pub type PunctuatedTrailing = PunctuatedInner; +pub type PunctuatedTrailing = PunctuatedInner; -impl Parse for PunctuatedInner { - fn parse(input: ParseStream) -> Result { - Ok(PunctuatedInner { - inner: syn::punctuated::Punctuated::parse_separated_nonempty(input)?, - variant: Trailing, - }) - } +impl Parse for PunctuatedInner { + fn parse(input: ParseStream) -> Result { + Ok(PunctuatedInner { + inner: syn::punctuated::Punctuated::parse_separated_nonempty(input)?, + variant: Trailing, + }) + } } -impl Parse for PunctuatedInner { - fn parse(input: ParseStream) -> Result { - Ok(PunctuatedInner { - inner: syn::punctuated::Punctuated::parse_terminated(input)?, - variant: NoTrailing, - }) - } +impl Parse for PunctuatedInner { + fn parse(input: ParseStream) -> Result { + Ok(PunctuatedInner { + inner: syn::punctuated::Punctuated::parse_terminated(input)?, + variant: NoTrailing, + }) + } } -impl ToTokens for PunctuatedInner { - fn to_tokens(&self, tokens: &mut TokenStream) { - self.inner.to_tokens(tokens) - } +impl ToTokens for PunctuatedInner { + fn to_tokens(&self, tokens: &mut TokenStream) { + self.inner.to_tokens(tokens) + } } -impl Clone for PunctuatedInner { - fn clone(&self) -> Self { - Self { inner: self.inner.clone(), variant: self.variant.clone() } - } +impl Clone for PunctuatedInner { + fn clone(&self) -> Self { + Self { + inner: self.inner.clone(), + variant: self.variant.clone(), + } + } } /// Note that syn Meta is almost fine for use case (lacks only `ToToken`) #[derive(Debug, Clone)] pub struct Meta { - pub inner: syn::Meta, + pub inner: syn::Meta, } impl Parse for Meta { - fn parse(input: ParseStream) -> Result { - Ok(Meta { - inner: syn::Meta::parse(input)?, - }) - } + fn parse(input: ParseStream) -> Result { + Ok(Meta { + inner: syn::Meta::parse(input)?, + }) + } } impl ToTokens for Meta { - fn to_tokens(&self, tokens: &mut TokenStream) { - match self.inner { - syn::Meta::Path(ref path) => path.to_tokens(tokens), - syn::Meta::List(ref l) => l.to_tokens(tokens), - syn::Meta::NameValue(ref n) => n.to_tokens(tokens), - } - } + fn to_tokens(&self, tokens: &mut TokenStream) { + match self.inner { + syn::Meta::Path(ref path) => path.to_tokens(tokens), + syn::Meta::List(ref l) => l.to_tokens(tokens), + syn::Meta::NameValue(ref n) => n.to_tokens(tokens), + } + } } #[derive(Debug)] pub struct OuterAttributes { - pub inner: Vec, + pub inner: Vec, } impl Parse for OuterAttributes { - fn parse(input: ParseStream) -> Result { - let inner = syn::Attribute::parse_outer(input)?; - Ok(OuterAttributes { - inner, - }) - } + fn parse(input: ParseStream) -> Result { + let inner = syn::Attribute::parse_outer(input)?; + Ok(OuterAttributes { inner }) + } } impl ToTokens for OuterAttributes { - fn to_tokens(&self, tokens: &mut TokenStream) { - for att in self.inner.iter() { - att.to_tokens(tokens); - } - } + fn to_tokens(&self, tokens: &mut TokenStream) { + for att in self.inner.iter() { + att.to_tokens(tokens); + } + } } pub fn extract_type_option(typ: &syn::Type) -> Option { - if let syn::Type::Path(ref path) = typ { - let v = path.path.segments.last()?; - if v.ident == "Option" { - // Option has only one type argument in angle bracket. - if let syn::PathArguments::AngleBracketed(a) = &v.arguments { - if let syn::GenericArgument::Type(typ) = a.args.last()? { - return Some(typ.clone()) - } - } - } - } + if let syn::Type::Path(ref path) = typ { + let v = path.path.segments.last()?; + if v.ident == "Option" { + // Option has only one type argument in angle bracket. + if let syn::PathArguments::AngleBracketed(a) = &v.arguments { + if let syn::GenericArgument::Type(typ) = a.args.last()? { + return Some(typ.clone()); + } + } + } + } - None + None } /// Auxiliary structure to check if a given `Ident` is contained in an ast. struct ContainsIdent<'a> { - ident: &'a Ident, - result: bool, + ident: &'a Ident, + result: bool, } impl<'ast> ContainsIdent<'ast> { - fn visit_tokenstream(&mut self, stream: TokenStream) { - stream.into_iter().for_each(|tt| - match tt { - TokenTree::Ident(id) => self.visit_ident(&id), - TokenTree::Group(ref group) => self.visit_tokenstream(group.stream()), - _ => {} - } - ) - } - - fn visit_ident(&mut self, ident: &Ident) { - if ident == self.ident { - self.result = true; - } - } + fn visit_tokenstream(&mut self, stream: TokenStream) { + stream.into_iter().for_each(|tt| match tt { + TokenTree::Ident(id) => self.visit_ident(&id), + TokenTree::Group(ref group) => self.visit_tokenstream(group.stream()), + _ => {} + }) + } + + fn visit_ident(&mut self, ident: &Ident) { + if ident == self.ident { + self.result = true; + } + } } impl<'ast> Visit<'ast> for ContainsIdent<'ast> { - fn visit_ident(&mut self, input: &'ast Ident) { - self.visit_ident(input); - } + fn visit_ident(&mut self, input: &'ast Ident) { + self.visit_ident(input); + } - fn visit_macro(&mut self, input: &'ast syn::Macro) { - self.visit_tokenstream(input.tokens.clone()); - visit::visit_macro(self, input); - } + fn visit_macro(&mut self, input: &'ast syn::Macro) { + self.visit_tokenstream(input.tokens.clone()); + visit::visit_macro(self, input); + } } /// Check if a `Type` contains the given `Ident`. pub fn type_contains_ident(typ: &syn::Type, ident: &Ident) -> bool { - let mut visit = ContainsIdent { - result: false, - ident, - }; + let mut visit = ContainsIdent { + result: false, + ident, + }; - visit::visit_type(&mut visit, typ); - visit.result + visit::visit_type(&mut visit, typ); + visit.result } /// Check if a `Expr` contains the given `Ident`. pub fn expr_contains_ident(expr: &syn::Expr, ident: &Ident) -> bool { - let mut visit = ContainsIdent { - result: false, - ident, - }; + let mut visit = ContainsIdent { + result: false, + ident, + }; - visit::visit_expr(&mut visit, expr); - visit.result + visit::visit_expr(&mut visit, expr); + visit.result } diff --git a/frame/support/src/debug.rs b/frame/support/src/debug.rs index 4b7ff6cc39..52a54cdaa9 100644 --- a/frame/support/src/debug.rs +++ b/frame/support/src/debug.rs @@ -86,11 +86,11 @@ //! native::print!("My struct: {:?}", x); //! ``` -use sp_std::vec::Vec; use sp_std::fmt::{self, Debug}; +use sp_std::vec::Vec; -pub use log::{info, debug, error, trace, warn}; pub use crate::runtime_print as print; +pub use log::{debug, error, info, trace, warn}; /// Native-only logging. /// @@ -98,7 +98,7 @@ pub use crate::runtime_print as print; /// only if the runtime is running natively (i.e. not via WASM) #[cfg(feature = "std")] pub mod native { - pub use super::{info, debug, error, trace, warn, print}; + pub use super::{debug, error, info, print, trace, warn}; } /// Native-only logging. @@ -107,16 +107,16 @@ pub mod native { /// only if the runtime is running natively (i.e. not via WASM) #[cfg(not(feature = "std"))] pub mod native { - #[macro_export] - macro_rules! noop { - ($($arg:tt)+) => {} - } - pub use noop as info; - pub use noop as debug; - pub use noop as error; - pub use noop as trace; - pub use noop as warn; - pub use noop as print; + #[macro_export] + macro_rules! noop { + ($($arg:tt)+) => {}; + } + pub use noop as info; + pub use noop as debug; + pub use noop as error; + pub use noop as trace; + pub use noop as warn; + pub use noop as print; } /// Print out a formatted message. @@ -140,7 +140,7 @@ macro_rules! runtime_print { /// Print out the debuggable type. pub fn debug(data: &impl Debug) { - runtime_print!("{:?}", data); + runtime_print!("{:?}", data); } /// A target for `core::write!` macro - constructs a string in memory. @@ -148,17 +148,17 @@ pub fn debug(data: &impl Debug) { pub struct Writer(Vec); impl fmt::Write for Writer { - fn write_str(&mut self, s: &str) -> fmt::Result { - self.0.extend(s.as_bytes()); - Ok(()) - } + fn write_str(&mut self, s: &str) -> fmt::Result { + self.0.extend(s.as_bytes()); + Ok(()) + } } impl Writer { - /// Print the content of this `Writer` out. - pub fn print(&self) { - sp_io::misc::print_utf8(&self.0) - } + /// Print the content of this `Writer` out. + pub fn print(&self) { + sp_io::misc::print_utf8(&self.0) + } } /// Runtime logger implementation - `log` crate backend. @@ -176,42 +176,38 @@ impl Writer { pub struct RuntimeLogger; impl RuntimeLogger { - /// Initialize the logger. - /// - /// This is a no-op when running natively (`std`). - #[cfg(feature = "std")] - pub fn init() {} - - /// Initialize the logger. - /// - /// This is a no-op when running natively (`std`). - #[cfg(not(feature = "std"))] - pub fn init() { - static LOGGER: RuntimeLogger = RuntimeLogger;; - let _ = log::set_logger(&LOGGER); - } + /// Initialize the logger. + /// + /// This is a no-op when running natively (`std`). + #[cfg(feature = "std")] + pub fn init() {} + + /// Initialize the logger. + /// + /// This is a no-op when running natively (`std`). + #[cfg(not(feature = "std"))] + pub fn init() { + static LOGGER: RuntimeLogger = RuntimeLogger;; + let _ = log::set_logger(&LOGGER); + } } impl log::Log for RuntimeLogger { - fn enabled(&self, _metadata: &log::Metadata) -> bool { - // to avoid calling to host twice, we pass everything - // and let the host decide what to print. - // If someone is initializing the logger they should - // know what they are doing. - true - } - - fn log(&self, record: &log::Record) { - use fmt::Write; - let mut w = Writer::default(); - let _ = core::write!(&mut w, "{}", record.args()); - - sp_io::logging::log( - record.level().into(), - record.target(), - &w.0, - ); - } - - fn flush(&self) {} + fn enabled(&self, _metadata: &log::Metadata) -> bool { + // to avoid calling to host twice, we pass everything + // and let the host decide what to print. + // If someone is initializing the logger they should + // know what they are doing. + true + } + + fn log(&self, record: &log::Record) { + use fmt::Write; + let mut w = Writer::default(); + let _ = core::write!(&mut w, "{}", record.args()); + + sp_io::logging::log(record.level().into(), record.target(), &w.0); + } + + fn flush(&self) {} } diff --git a/frame/support/src/dispatch.rs b/frame/support/src/dispatch.rs index 8512ccb0a8..226109c446 100644 --- a/frame/support/src/dispatch.rs +++ b/frame/support/src/dispatch.rs @@ -17,24 +17,28 @@ //! Dispatch system. Contains a macro for defining runtime modules and //! generating values representing lazy module function calls. -pub use crate::sp_std::{result, fmt, prelude::{Vec, Clone, Eq, PartialEq}, marker}; -pub use crate::codec::{Codec, EncodeLike, Decode, Encode, Input, Output, HasCompact, EncodeAsRef}; -pub use frame_metadata::{ - FunctionMetadata, DecodeDifferent, DecodeDifferentArray, FunctionArgumentMetadata, - ModuleConstantMetadata, DefaultByte, DefaultByteGetter, ModuleErrorMetadata, ErrorMetadata +pub use crate::codec::{Codec, Decode, Encode, EncodeAsRef, EncodeLike, HasCompact, Input, Output}; +pub use crate::sp_std::{ + fmt, marker, + prelude::{Clone, Eq, PartialEq, Vec}, + result, }; +pub use crate::traits::{CallMetadata, GetCallMetadata, GetCallName}; pub use crate::weights::{ - SimpleDispatchInfo, GetDispatchInfo, DispatchInfo, WeighData, ClassifyDispatch, - TransactionPriority, Weight, PaysFee, PostDispatchInfo, WithPostDispatchInfo, + ClassifyDispatch, DispatchInfo, GetDispatchInfo, PaysFee, PostDispatchInfo, SimpleDispatchInfo, + TransactionPriority, WeighData, Weight, WithPostDispatchInfo, +}; +pub use frame_metadata::{ + DecodeDifferent, DecodeDifferentArray, DefaultByte, DefaultByteGetter, ErrorMetadata, + FunctionArgumentMetadata, FunctionMetadata, ModuleConstantMetadata, ModuleErrorMetadata, }; pub use sp_runtime::{traits::Dispatchable, DispatchError}; -pub use crate::traits::{CallMetadata, GetCallMetadata, GetCallName}; /// The return typ of a `Dispatchable` in frame. When returned explicitly from /// a dispatchable function it allows overriding the default `PostDispatchInfo` /// returned from a dispatch. pub type DispatchResultWithPostInfo = - sp_runtime::DispatchResultWithInfo; + sp_runtime::DispatchResultWithInfo; /// Unaugmented version of `DispatchResultWithPostInfo` that can be returned from /// dispatchable functions and is automatically converted to the augmented type. Should be @@ -44,8 +48,7 @@ pub type DispatchResult = Result<(), sp_runtime::DispatchError>; /// The error type contained in a `DispatchResultWithPostInfo`. pub type DispatchErrorWithPostInfo = - sp_runtime::DispatchErrorWithPostInfo; - + sp_runtime::DispatchErrorWithPostInfo; /// A type that cannot be instantiated. pub enum Never {} @@ -53,7 +56,11 @@ pub enum Never {} /// Serializable version of Dispatchable. /// This value can be used as a "function" in an extrinsic. pub trait Callable { - type Call: Dispatchable + Codec + Clone + PartialEq + Eq; + type Call: Dispatchable + + Codec + + Clone + + PartialEq + + Eq; } // dirty hack to work around serde_derive issue @@ -1655,7 +1662,7 @@ macro_rules! decl_module { } pub trait IsSubType, R> { - fn is_sub_type(&self) -> Option<&CallableCallFor>; + fn is_sub_type(&self) -> Option<&CallableCallFor>; } /// Implement a meta-dispatch module to dispatch to other dispatchers. @@ -1958,7 +1965,6 @@ macro_rules! __call_to_functions { }; } - /// Convert a list of functions into a list of `FunctionMetadata` items. #[macro_export] #[doc(hidden)] @@ -2085,242 +2091,267 @@ macro_rules! __check_reserved_fn_name { // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] mod tests { - use super::*; - use crate::weights::{MINIMUM_WEIGHT, DispatchInfo, DispatchClass}; - use crate::traits::{ - CallMetadata, GetCallMetadata, GetCallName, OnInitialize, OnFinalize, OnRuntimeUpgrade - }; - - pub trait Trait: system::Trait + Sized where Self::AccountId: From { - type Origin; - type BlockNumber: Into; - type Call: From>; - } - - pub mod system { - use super::*; - - pub trait Trait { - type AccountId; - } - - pub fn ensure_root(_: R) -> DispatchResult { - Ok(()) - } - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { - /// Hi, this is a comment. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn aux_0(_origin) -> DispatchResult { unreachable!() } - - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn aux_1(_origin, #[compact] _data: u32,) -> DispatchResult { unreachable!() } - - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn aux_2(_origin, _data: i32, _data2: String) -> DispatchResult { unreachable!() } - - #[weight = SimpleDispatchInfo::FixedNormal(3)] - fn aux_3(_origin) -> DispatchResult { unreachable!() } - - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn aux_4(_origin, _data: i32) -> DispatchResult { unreachable!() } - - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn aux_5(_origin, _data: i32, #[compact] _data2: u32,) -> DispatchResult { unreachable!() } - - #[weight = SimpleDispatchInfo::FixedOperational(5)] - fn operational(_origin) { unreachable!() } - - fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } 7 } - fn on_finalize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_finalize") } } - fn on_runtime_upgrade() -> Weight { 10 } - fn offchain_worker() {} - } - } - - const EXPECTED_METADATA: &'static [FunctionMetadata] = &[ - FunctionMetadata { - name: DecodeDifferent::Encode("aux_0"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ - " Hi, this is a comment." - ]) - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_1"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("Compact") - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_2"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("String"), - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_3"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_4"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("aux_5"), - arguments: DecodeDifferent::Encode(&[ - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data"), - ty: DecodeDifferent::Encode("i32"), - }, - FunctionArgumentMetadata { - name: DecodeDifferent::Encode("_data2"), - ty: DecodeDifferent::Encode("Compact") - } - ]), - documentation: DecodeDifferent::Encode(&[]), - }, - FunctionMetadata { - name: DecodeDifferent::Encode("operational"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]; - - pub struct TraitImpl {} - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - type Call = OuterCall; - } - - type Test = Module; - - impl_outer_dispatch! { - pub enum OuterCall for TraitImpl where origin: u32 { - self::Test, - } - } - - impl system::Trait for TraitImpl { - type AccountId = u32; - } - - #[test] - fn module_json_metadata() { - let metadata = Module::::call_functions(); - assert_eq!(EXPECTED_METADATA, metadata); - } - - #[test] - fn compact_attr() { - let call: Call = Call::aux_1(1); - let encoded = call.encode(); - assert_eq!(2, encoded.len()); - assert_eq!(vec![1, 4], encoded); - - let call: Call = Call::aux_5(1, 2); - let encoded = call.encode(); - assert_eq!(6, encoded.len()); - assert_eq!(vec![5, 1, 0, 0, 0, 8], encoded); - } - - #[test] - fn encode_is_correct_and_decode_works() { - let call: Call = Call::aux_0(); - let encoded = call.encode(); - assert_eq!(vec![0], encoded); - let decoded = Call::::decode(&mut &encoded[..]).unwrap(); - assert_eq!(decoded, call); - - let call: Call = Call::aux_2(32, "hello".into()); - let encoded = call.encode(); - assert_eq!(vec![2, 32, 0, 0, 0, 20, 104, 101, 108, 108, 111], encoded); - let decoded = Call::::decode(&mut &encoded[..]).unwrap(); - assert_eq!(decoded, call); - } - - #[test] - #[should_panic(expected = "on_initialize")] - fn on_initialize_should_work_1() { - as OnInitialize>::on_initialize(42); - } - - #[test] - fn on_initialize_should_work_2() { - assert_eq!( as OnInitialize>::on_initialize(10), 7); - } - - #[test] - #[should_panic(expected = "on_finalize")] - fn on_finalize_should_work() { - as OnFinalize>::on_finalize(42); - } - - #[test] - fn on_runtime_upgrade_should_work() { - assert_eq!( as OnRuntimeUpgrade>::on_runtime_upgrade(), 10); - } - - #[test] - fn weight_should_attach_to_call_enum() { - // operational. - assert_eq!( - Call::::operational().get_dispatch_info(), - DispatchInfo { weight: 5, class: DispatchClass::Operational, pays_fee: true }, - ); - // custom basic - assert_eq!( - Call::::aux_3().get_dispatch_info(), - DispatchInfo { weight: 3, class: DispatchClass::Normal, pays_fee: true }, - ); - } - - #[test] - fn call_name() { - let name = Call::::aux_3().get_call_name(); - assert_eq!("aux_3", name); - } - - #[test] - fn call_metadata() { - let call = OuterCall::Test(Call::::aux_3()); - let metadata = call.get_call_metadata(); - let expected = CallMetadata { function_name: "aux_3".into(), pallet_name: "Test".into() }; - assert_eq!(metadata, expected); - } - - #[test] - fn get_call_names() { - let call_names = Call::::get_call_names(); - assert_eq!(["aux_0", "aux_1", "aux_2", "aux_3", "aux_4", "aux_5", "operational"], call_names); - } - - #[test] - fn get_module_names() { - let module_names = OuterCall::get_module_names(); - assert_eq!(["Test"], module_names); - } + use super::*; + use crate::traits::{ + CallMetadata, GetCallMetadata, GetCallName, OnFinalize, OnInitialize, OnRuntimeUpgrade, + }; + use crate::weights::{DispatchClass, DispatchInfo, MINIMUM_WEIGHT}; + + pub trait Trait: system::Trait + Sized + where + Self::AccountId: From, + { + type Origin; + type BlockNumber: Into; + type Call: From>; + } + + pub mod system { + use super::*; + + pub trait Trait { + type AccountId; + } + + pub fn ensure_root(_: R) -> DispatchResult { + Ok(()) + } + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin, T::AccountId: From { + /// Hi, this is a comment. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn aux_0(_origin) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn aux_1(_origin, #[compact] _data: u32,) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn aux_2(_origin, _data: i32, _data2: String) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::FixedNormal(3)] + fn aux_3(_origin) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn aux_4(_origin, _data: i32) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn aux_5(_origin, _data: i32, #[compact] _data2: u32,) -> DispatchResult { unreachable!() } + + #[weight = SimpleDispatchInfo::FixedOperational(5)] + fn operational(_origin) { unreachable!() } + + fn on_initialize(n: T::BlockNumber,) -> Weight { if n.into() == 42 { panic!("on_initialize") } 7 } + fn on_finalize(n: T::BlockNumber,) { if n.into() == 42 { panic!("on_finalize") } } + fn on_runtime_upgrade() -> Weight { 10 } + fn offchain_worker() {} + } + } + + const EXPECTED_METADATA: &'static [FunctionMetadata] = &[ + FunctionMetadata { + name: DecodeDifferent::Encode("aux_0"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[" Hi, this is a comment."]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_1"), + arguments: DecodeDifferent::Encode(&[FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("Compact"), + }]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_2"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }, + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("String"), + }, + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_3"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_4"), + arguments: DecodeDifferent::Encode(&[FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("aux_5"), + arguments: DecodeDifferent::Encode(&[ + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data"), + ty: DecodeDifferent::Encode("i32"), + }, + FunctionArgumentMetadata { + name: DecodeDifferent::Encode("_data2"), + ty: DecodeDifferent::Encode("Compact"), + }, + ]), + documentation: DecodeDifferent::Encode(&[]), + }, + FunctionMetadata { + name: DecodeDifferent::Encode("operational"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }, + ]; + + pub struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + type Call = OuterCall; + } + + type Test = Module; + + impl_outer_dispatch! { + pub enum OuterCall for TraitImpl where origin: u32 { + self::Test, + } + } + + impl system::Trait for TraitImpl { + type AccountId = u32; + } + + #[test] + fn module_json_metadata() { + let metadata = Module::::call_functions(); + assert_eq!(EXPECTED_METADATA, metadata); + } + + #[test] + fn compact_attr() { + let call: Call = Call::aux_1(1); + let encoded = call.encode(); + assert_eq!(2, encoded.len()); + assert_eq!(vec![1, 4], encoded); + + let call: Call = Call::aux_5(1, 2); + let encoded = call.encode(); + assert_eq!(6, encoded.len()); + assert_eq!(vec![5, 1, 0, 0, 0, 8], encoded); + } + + #[test] + fn encode_is_correct_and_decode_works() { + let call: Call = Call::aux_0(); + let encoded = call.encode(); + assert_eq!(vec![0], encoded); + let decoded = Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); + + let call: Call = Call::aux_2(32, "hello".into()); + let encoded = call.encode(); + assert_eq!(vec![2, 32, 0, 0, 0, 20, 104, 101, 108, 108, 111], encoded); + let decoded = Call::::decode(&mut &encoded[..]).unwrap(); + assert_eq!(decoded, call); + } + + #[test] + #[should_panic(expected = "on_initialize")] + fn on_initialize_should_work_1() { + as OnInitialize>::on_initialize(42); + } + + #[test] + fn on_initialize_should_work_2() { + assert_eq!( + as OnInitialize>::on_initialize(10), + 7 + ); + } + + #[test] + #[should_panic(expected = "on_finalize")] + fn on_finalize_should_work() { + as OnFinalize>::on_finalize(42); + } + + #[test] + fn on_runtime_upgrade_should_work() { + assert_eq!( + as OnRuntimeUpgrade>::on_runtime_upgrade(), + 10 + ); + } + + #[test] + fn weight_should_attach_to_call_enum() { + // operational. + assert_eq!( + Call::::operational().get_dispatch_info(), + DispatchInfo { + weight: 5, + class: DispatchClass::Operational, + pays_fee: true + }, + ); + // custom basic + assert_eq!( + Call::::aux_3().get_dispatch_info(), + DispatchInfo { + weight: 3, + class: DispatchClass::Normal, + pays_fee: true + }, + ); + } + + #[test] + fn call_name() { + let name = Call::::aux_3().get_call_name(); + assert_eq!("aux_3", name); + } + + #[test] + fn call_metadata() { + let call = OuterCall::Test(Call::::aux_3()); + let metadata = call.get_call_metadata(); + let expected = CallMetadata { + function_name: "aux_3".into(), + pallet_name: "Test".into(), + }; + assert_eq!(metadata, expected); + } + + #[test] + fn get_call_names() { + let call_names = Call::::get_call_names(); + assert_eq!( + [ + "aux_0", + "aux_1", + "aux_2", + "aux_3", + "aux_4", + "aux_5", + "operational" + ], + call_names + ); + } + + #[test] + fn get_module_names() { + let module_names = OuterCall::get_module_names(); + assert_eq!(["Test"], module_names); + } } diff --git a/frame/support/src/error.rs b/frame/support/src/error.rs index a06f468892..2aa9204eae 100644 --- a/frame/support/src/error.rs +++ b/frame/support/src/error.rs @@ -17,9 +17,9 @@ //! Macro for declaring a module error. #[doc(hidden)] -pub use sp_runtime::traits::{LookupError, BadOrigin}; +pub use frame_metadata::{DecodeDifferent, ErrorMetadata, ModuleErrorMetadata}; #[doc(hidden)] -pub use frame_metadata::{ModuleErrorMetadata, ErrorMetadata, DecodeDifferent}; +pub use sp_runtime::traits::{BadOrigin, LookupError}; /// Declare an error type for a runtime module. /// diff --git a/frame/support/src/event.rs b/frame/support/src/event.rs index 1184b379f4..203ba76206 100644 --- a/frame/support/src/event.rs +++ b/frame/support/src/event.rs @@ -17,7 +17,7 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -pub use frame_metadata::{EventMetadata, DecodeDifferent, OuterEventMetadata, FnEncode}; +pub use frame_metadata::{DecodeDifferent, EventMetadata, FnEncode, OuterEventMetadata}; /// Implement the `Event` for a module. /// @@ -538,56 +538,56 @@ macro_rules! __impl_outer_event_json_metadata { #[cfg(test)] #[allow(dead_code)] mod tests { - use super::*; - use serde::Serialize; - use codec::{Encode, Decode}; - - mod system { - pub trait Trait { - type Origin; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - } - - mod system_renamed { - pub trait Trait { - type Origin; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - } - - mod event_module { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( + use super::*; + use codec::{Decode, Encode}; + use serde::Serialize; + + mod system { + pub trait Trait { + type Origin; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + pub enum Event { + SystemEvent, + } + ); + } + + mod system_renamed { + pub trait Trait { + type Origin; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + pub enum Event { + SystemEvent, + } + ); + } + + mod event_module { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( /// Event without renaming the generic parameter `Balance` and `Origin`. pub enum Event where ::Balance, ::Origin { @@ -597,51 +597,52 @@ mod tests { EventWithoutParams, } ); - } - - mod event_module2 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - /// Event with renamed generic parameter - pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin - { - TestEvent(BalanceRenamed), - TestOrigin(OriginRenamed), - } - ); - } - - mod event_module3 { - decl_event!( - pub enum Event { - HiEvent, - } - ); - } - - mod event_module4 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( + } + + mod event_module2 { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + /// Event with renamed generic parameter + pub enum Event + where + BalanceRenamed = ::Balance, + OriginRenamed = ::Origin, + { + TestEvent(BalanceRenamed), + TestOrigin(OriginRenamed), + } + ); + } + + mod event_module3 { + decl_event!( + pub enum Event { + HiEvent, + } + ); + } + + mod event_module4 { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( /// Event finish formatting on an unnamed one with trailing comma pub enum Event where ::Balance, @@ -650,150 +651,152 @@ mod tests { TestEvent(Balance, Origin), } ); - } - - mod event_module5 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - decl_event!( - /// Event finish formatting on an named one with trailing comma - pub enum Event where - BalanceRenamed = ::Balance, - OriginRenamed = ::Origin, - { - TestEvent(BalanceRenamed, OriginRenamed), - TrailingCommaInArgs( - u32, - u32, - ), - } - ); - } - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] - pub struct TestRuntime; - - impl_outer_event! { - pub enum TestEvent for TestRuntime { - system, - event_module, - event_module2, - event_module3, - } - } - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] - pub struct TestRuntime2; - - impl_outer_event! { - pub enum TestEventSystemRenamed for TestRuntime2 { - system_renamed, - event_module, - event_module2, - event_module3, - } - } - - impl event_module::Trait for TestRuntime { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl event_module2::Trait for TestRuntime { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl system::Trait for TestRuntime { - type Origin = u32; - type BlockNumber = u32; - } - - impl event_module::Trait for TestRuntime2 { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl event_module2::Trait for TestRuntime2 { - type Origin = u32; - type Balance = u32; - type BlockNumber = u32; - } - - impl system_renamed::Trait for TestRuntime2 { - type Origin = u32; - type BlockNumber = u32; - } - - const EXPECTED_METADATA: OuterEventMetadata = OuterEventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - events: DecodeDifferent::Encode(&[ - ( - "system", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("SystemEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - } - ]) - ), - ( - "event_module", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&[ "Balance", "Origin" ]), - documentation: DecodeDifferent::Encode(&[ " Hi, I am a comment." ]) - }, - EventMetadata { - name: DecodeDifferent::Encode("EventWithoutParams"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[ " Dog" ]), - }, - ]) - ), - ( - "event_module2", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&[ "BalanceRenamed" ]), - documentation: DecodeDifferent::Encode(&[]) - }, - EventMetadata { - name: DecodeDifferent::Encode("TestOrigin"), - arguments: DecodeDifferent::Encode(&[ "OriginRenamed" ]), - documentation: DecodeDifferent::Encode(&[]), - }, - ]) - ), - ( - "event_module3", - FnEncode(|| &[ - EventMetadata { - name: DecodeDifferent::Encode("HiEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - ) - ]) - }; - - #[test] - fn outer_event_metadata() { - assert_eq!(EXPECTED_METADATA, TestRuntime::outer_event_metadata()); - } + } + + mod event_module5 { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + decl_event!( + /// Event finish formatting on an named one with trailing comma + pub enum Event + where + BalanceRenamed = ::Balance, + OriginRenamed = ::Origin, + { + TestEvent(BalanceRenamed, OriginRenamed), + TrailingCommaInArgs(u32, u32), + } + ); + } + + #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] + pub struct TestRuntime; + + impl_outer_event! { + pub enum TestEvent for TestRuntime { + system, + event_module, + event_module2, + event_module3, + } + } + + #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode, Serialize)] + pub struct TestRuntime2; + + impl_outer_event! { + pub enum TestEventSystemRenamed for TestRuntime2 { + system_renamed, + event_module, + event_module2, + event_module3, + } + } + + impl event_module::Trait for TestRuntime { + type Origin = u32; + type Balance = u32; + type BlockNumber = u32; + } + + impl event_module2::Trait for TestRuntime { + type Origin = u32; + type Balance = u32; + type BlockNumber = u32; + } + + impl system::Trait for TestRuntime { + type Origin = u32; + type BlockNumber = u32; + } + + impl event_module::Trait for TestRuntime2 { + type Origin = u32; + type Balance = u32; + type BlockNumber = u32; + } + + impl event_module2::Trait for TestRuntime2 { + type Origin = u32; + type Balance = u32; + type BlockNumber = u32; + } + + impl system_renamed::Trait for TestRuntime2 { + type Origin = u32; + type BlockNumber = u32; + } + + const EXPECTED_METADATA: OuterEventMetadata = OuterEventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + events: DecodeDifferent::Encode(&[ + ( + "system", + FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("SystemEvent"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }), + ), + ( + "event_module", + FnEncode(|| { + &[ + EventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + arguments: DecodeDifferent::Encode(&["Balance", "Origin"]), + documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]), + }, + EventMetadata { + name: DecodeDifferent::Encode("EventWithoutParams"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[" Dog"]), + }, + ] + }), + ), + ( + "event_module2", + FnEncode(|| { + &[ + EventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + arguments: DecodeDifferent::Encode(&["BalanceRenamed"]), + documentation: DecodeDifferent::Encode(&[]), + }, + EventMetadata { + name: DecodeDifferent::Encode("TestOrigin"), + arguments: DecodeDifferent::Encode(&["OriginRenamed"]), + documentation: DecodeDifferent::Encode(&[]), + }, + ] + }), + ), + ( + "event_module3", + FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("HiEvent"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }), + ), + ]), + }; + + #[test] + fn outer_event_metadata() { + assert_eq!(EXPECTED_METADATA, TestRuntime::outer_event_metadata()); + } } diff --git a/frame/support/src/hash.rs b/frame/support/src/hash.rs index 40cb1f612f..177d3f7c3a 100644 --- a/frame/support/src/hash.rs +++ b/frame/support/src/hash.rs @@ -17,165 +17,167 @@ //! Hash utilities. use codec::Codec; +use sp_io::hashing::{blake2_128, blake2_256, twox_128, twox_256, twox_64}; use sp_std::prelude::Vec; -use sp_io::hashing::{blake2_128, blake2_256, twox_64, twox_128, twox_256}; // This trait must be kept coherent with frame-support-procedural HasherKind usage pub trait Hashable: Sized { - fn blake2_128(&self) -> [u8; 16]; - fn blake2_256(&self) -> [u8; 32]; - fn blake2_128_concat(&self) -> Vec; - fn twox_128(&self) -> [u8; 16]; - fn twox_256(&self) -> [u8; 32]; - fn twox_64_concat(&self) -> Vec; - fn identity(&self) -> Vec; + fn blake2_128(&self) -> [u8; 16]; + fn blake2_256(&self) -> [u8; 32]; + fn blake2_128_concat(&self) -> Vec; + fn twox_128(&self) -> [u8; 16]; + fn twox_256(&self) -> [u8; 32]; + fn twox_64_concat(&self) -> Vec; + fn identity(&self) -> Vec; } impl Hashable for T { - fn blake2_128(&self) -> [u8; 16] { - self.using_encoded(blake2_128) - } - fn blake2_256(&self) -> [u8; 32] { - self.using_encoded(blake2_256) - } - fn blake2_128_concat(&self) -> Vec { - self.using_encoded(Blake2_128Concat::hash) - } - fn twox_128(&self) -> [u8; 16] { - self.using_encoded(twox_128) - } - fn twox_256(&self) -> [u8; 32] { - self.using_encoded(twox_256) - } - fn twox_64_concat(&self) -> Vec { - self.using_encoded(Twox64Concat::hash) - } - fn identity(&self) -> Vec { self.encode() } + fn blake2_128(&self) -> [u8; 16] { + self.using_encoded(blake2_128) + } + fn blake2_256(&self) -> [u8; 32] { + self.using_encoded(blake2_256) + } + fn blake2_128_concat(&self) -> Vec { + self.using_encoded(Blake2_128Concat::hash) + } + fn twox_128(&self) -> [u8; 16] { + self.using_encoded(twox_128) + } + fn twox_256(&self) -> [u8; 32] { + self.using_encoded(twox_256) + } + fn twox_64_concat(&self) -> Vec { + self.using_encoded(Twox64Concat::hash) + } + fn identity(&self) -> Vec { + self.encode() + } } /// Hasher to use to hash keys to insert to storage. pub trait StorageHasher: 'static { - type Output: AsRef<[u8]>; - fn hash(x: &[u8]) -> Self::Output; + type Output: AsRef<[u8]>; + fn hash(x: &[u8]) -> Self::Output; } /// Hasher to use to hash keys to insert to storage. /// /// Reversible hasher store the encoded key after the hash part. pub trait ReversibleStorageHasher: StorageHasher { - /// Split the hash part out of the input. - /// - /// I.e. for input `&[hash ++ key ++ some]` returns `&[key ++ some]` - fn reverse(x: &[u8]) -> &[u8]; + /// Split the hash part out of the input. + /// + /// I.e. for input `&[hash ++ key ++ some]` returns `&[key ++ some]` + fn reverse(x: &[u8]) -> &[u8]; } /// Store the key directly. pub struct Identity; impl StorageHasher for Identity { - type Output = Vec; - fn hash(x: &[u8]) -> Vec { - x.to_vec() - } + type Output = Vec; + fn hash(x: &[u8]) -> Vec { + x.to_vec() + } } impl ReversibleStorageHasher for Identity { - fn reverse(x: &[u8]) -> &[u8] { - x - } + fn reverse(x: &[u8]) -> &[u8] { + x + } } /// Hash storage keys with `concat(twox64(key), key)` pub struct Twox64Concat; impl StorageHasher for Twox64Concat { - type Output = Vec; - fn hash(x: &[u8]) -> Vec { - twox_64(x) - .iter() - .chain(x.into_iter()) - .cloned() - .collect::>() - } + type Output = Vec; + fn hash(x: &[u8]) -> Vec { + twox_64(x) + .iter() + .chain(x.into_iter()) + .cloned() + .collect::>() + } } impl ReversibleStorageHasher for Twox64Concat { - fn reverse(x: &[u8]) -> &[u8] { - if x.len() < 8 { - crate::debug::error!("Invalid reverse: hash length too short"); - return &[] - } - &x[8..] - } + fn reverse(x: &[u8]) -> &[u8] { + if x.len() < 8 { + crate::debug::error!("Invalid reverse: hash length too short"); + return &[]; + } + &x[8..] + } } /// Hash storage keys with `concat(blake2_128(key), key)` pub struct Blake2_128Concat; impl StorageHasher for Blake2_128Concat { - type Output = Vec; - fn hash(x: &[u8]) -> Vec { - blake2_128(x) - .iter() - .chain(x.into_iter()) - .cloned() - .collect::>() - } + type Output = Vec; + fn hash(x: &[u8]) -> Vec { + blake2_128(x) + .iter() + .chain(x.into_iter()) + .cloned() + .collect::>() + } } impl ReversibleStorageHasher for Blake2_128Concat { - fn reverse(x: &[u8]) -> &[u8] { - if x.len() < 16 { - crate::debug::error!("Invalid reverse: hash length too short"); - return &[] - } - &x[16..] - } + fn reverse(x: &[u8]) -> &[u8] { + if x.len() < 16 { + crate::debug::error!("Invalid reverse: hash length too short"); + return &[]; + } + &x[16..] + } } /// Hash storage keys with blake2 128 pub struct Blake2_128; impl StorageHasher for Blake2_128 { - type Output = [u8; 16]; - fn hash(x: &[u8]) -> [u8; 16] { - blake2_128(x) - } + type Output = [u8; 16]; + fn hash(x: &[u8]) -> [u8; 16] { + blake2_128(x) + } } /// Hash storage keys with blake2 256 pub struct Blake2_256; impl StorageHasher for Blake2_256 { - type Output = [u8; 32]; - fn hash(x: &[u8]) -> [u8; 32] { - blake2_256(x) - } + type Output = [u8; 32]; + fn hash(x: &[u8]) -> [u8; 32] { + blake2_256(x) + } } /// Hash storage keys with twox 128 pub struct Twox128; impl StorageHasher for Twox128 { - type Output = [u8; 16]; - fn hash(x: &[u8]) -> [u8; 16] { - twox_128(x) - } + type Output = [u8; 16]; + fn hash(x: &[u8]) -> [u8; 16] { + twox_128(x) + } } /// Hash storage keys with twox 256 pub struct Twox256; impl StorageHasher for Twox256 { - type Output = [u8; 32]; - fn hash(x: &[u8]) -> [u8; 32] { - twox_256(x) - } + type Output = [u8; 32]; + fn hash(x: &[u8]) -> [u8; 32] { + twox_256(x) + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn test_twox_64_concat() { - let r = Twox64Concat::hash(b"foo"); - assert_eq!(r.split_at(8), (&twox_128(b"foo")[..8], &b"foo"[..])) - } - - #[test] - fn test_blake2_128_concat() { - let r = Blake2_128Concat::hash(b"foo"); - assert_eq!(r.split_at(16), (&blake2_128(b"foo")[..], &b"foo"[..])) - } + use super::*; + + #[test] + fn test_twox_64_concat() { + let r = Twox64Concat::hash(b"foo"); + assert_eq!(r.split_at(8), (&twox_128(b"foo")[..8], &b"foo"[..])) + } + + #[test] + fn test_blake2_128_concat() { + let r = Blake2_128Concat::hash(b"foo"); + assert_eq!(r.split_at(16), (&blake2_128(b"foo")[..], &b"foo"[..])) + } } diff --git a/frame/support/src/inherent.rs b/frame/support/src/inherent.rs index a21bd361b6..899d0619a1 100644 --- a/frame/support/src/inherent.rs +++ b/frame/support/src/inherent.rs @@ -14,13 +14,12 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -#[doc(hidden)] -pub use crate::sp_std::vec::Vec; #[doc(hidden)] pub use crate::sp_runtime::traits::{Block as BlockT, Extrinsic}; #[doc(hidden)] -pub use sp_inherents::{InherentData, ProvideInherent, CheckInherentsResult, IsFatalError}; - +pub use crate::sp_std::vec::Vec; +#[doc(hidden)] +pub use sp_inherents::{CheckInherentsResult, InherentData, IsFatalError, ProvideInherent}; /// Implement the outer inherent. /// All given modules need to implement `ProvideInherent`. diff --git a/frame/support/src/lib.rs b/frame/support/src/lib.rs index eed5c95b17..497672a3fe 100644 --- a/frame/support/src/lib.rs +++ b/frame/support/src/lib.rs @@ -26,10 +26,6 @@ extern crate bitmask; #[cfg(feature = "std")] pub extern crate tracing; -#[cfg(feature = "std")] -pub use serde; -#[doc(hidden)] -pub use sp_std; #[doc(hidden)] pub use codec; #[cfg(feature = "std")] @@ -38,19 +34,23 @@ pub use once_cell; #[doc(hidden)] pub use paste; #[cfg(feature = "std")] -#[doc(hidden)] -pub use sp_state_machine::BasicExternalities; +pub use serde; #[doc(hidden)] pub use sp_io::storage::root as storage_root; #[doc(hidden)] pub use sp_runtime::RuntimeDebug; +#[cfg(feature = "std")] +#[doc(hidden)] +pub use sp_state_machine::BasicExternalities; +#[doc(hidden)] +pub use sp_std; #[macro_use] pub mod debug; #[macro_use] pub mod dispatch; -pub mod storage; mod hash; +pub mod storage; #[macro_use] pub mod event; #[macro_use] @@ -66,16 +66,16 @@ pub mod error; pub mod traits; pub mod weights; +pub use self::dispatch::{Callable, IsSubType, Parameter}; pub use self::hash::{ - Twox256, Twox128, Blake2_256, Blake2_128, Identity, Twox64Concat, Blake2_128Concat, Hashable, - StorageHasher, ReversibleStorageHasher + Blake2_128, Blake2_128Concat, Blake2_256, Hashable, Identity, ReversibleStorageHasher, + StorageHasher, Twox128, Twox256, Twox64Concat, }; pub use self::storage::{ - StorageValue, StorageMap, StorageDoubleMap, StoragePrefixedMap, IterableStorageMap, - IterableStorageDoubleMap, migration + migration, IterableStorageDoubleMap, IterableStorageMap, StorageDoubleMap, StorageMap, + StoragePrefixedMap, StorageValue, }; -pub use self::dispatch::{Parameter, Callable, IsSubType}; -pub use sp_runtime::{self, ConsensusEngineId, print, traits::Printable}; +pub use sp_runtime::{self, print, traits::Printable, ConsensusEngineId}; /// Macro for easily creating a new implementation of the `Get` trait. Use similarly to /// how you would declare a `const`: @@ -147,16 +147,16 @@ macro_rules! ord_parameter_types { } #[doc(inline)] -pub use frame_support_procedural::{decl_storage, construct_runtime}; +pub use frame_support_procedural::{construct_runtime, decl_storage}; /// Return Err of the expression: `return Err($expression);`. /// /// Used as `fail!(expression)`. #[macro_export] macro_rules! fail { - ( $y:expr ) => {{ - return Err($y.into()); - }} + ( $y:expr ) => {{ + return Err($y.into()); + }}; } /// Evaluate `$x:expr` and if not true return `Err($y:expr)`. @@ -164,11 +164,11 @@ macro_rules! fail { /// Used as `ensure!(expression_to_ensure, expression_to_return_on_false)`. #[macro_export] macro_rules! ensure { - ( $x:expr, $y:expr $(,)? ) => {{ - if !$x { - $crate::fail!($y); - } - }} + ( $x:expr, $y:expr $(,)? ) => {{ + if !$x { + $crate::fail!($y); + } + }}; } /// Evaluate an expression, assert it returns an expected `Err` value and that @@ -178,14 +178,14 @@ macro_rules! ensure { #[macro_export] #[cfg(feature = "std")] macro_rules! assert_noop { - ( + ( $x:expr, $y:expr $(,)? ) => { - let h = $crate::storage_root(); - $crate::assert_err!($x, $y); - assert_eq!(h, $crate::storage_root()); - } + let h = $crate::storage_root(); + $crate::assert_err!($x, $y); + assert_eq!(h, $crate::storage_root()); + }; } /// Panic if an expression doesn't evaluate to an `Err`. @@ -198,9 +198,9 @@ macro_rules! assert_noop { #[macro_export] #[cfg(feature = "std")] macro_rules! assert_err { - ( $x:expr , $y:expr $(,)? ) => { - assert_eq!($x, Err($y.into())); - } + ( $x:expr , $y:expr $(,)? ) => { + assert_eq!($x, Err($y.into())); + }; } /// Panic if an expression doesn't evaluate to `Ok`. @@ -210,16 +210,16 @@ macro_rules! assert_err { #[macro_export] #[cfg(feature = "std")] macro_rules! assert_ok { - ( $x:expr $(,)? ) => { - let is = $x; - match is { - Ok(_) => (), - _ => assert!(false, "Expected Ok(_). Got {:#?}", is), - } - }; - ( $x:expr, $y:expr $(,)? ) => { - assert_eq!($x, Ok($y)); - } + ( $x:expr $(,)? ) => { + let is = $x; + match is { + Ok(_) => (), + _ => assert!(false, "Expected Ok(_). Got {:#?}", is), + } + }; + ( $x:expr, $y:expr $(,)? ) => { + assert_eq!($x, Ok($y)); + }; } /// Runs given code within a tracing span, measuring it's execution time. @@ -245,13 +245,17 @@ macro_rules! tracing_span { #[macro_export] #[cfg(feature = "tracing")] macro_rules! if_tracing { - ( $if:expr, $else:expr ) => {{ $if }} + ( $if:expr, $else:expr ) => {{ + $if + }}; } #[macro_export] #[cfg(not(feature = "tracing"))] macro_rules! if_tracing { - ( $if:expr, $else:expr ) => {{ $else }} + ( $if:expr, $else:expr ) => {{ + $else + }}; } /// The void type - it cannot exist. @@ -261,406 +265,425 @@ pub enum Void {} #[cfg(feature = "std")] #[doc(hidden)] -pub use serde::{Serialize, Deserialize}; +pub use serde::{Deserialize, Serialize}; #[cfg(test)] mod tests { - use super::*; - use codec::{Codec, EncodeLike}; - use frame_metadata::{ - DecodeDifferent, StorageEntryMetadata, StorageMetadata, StorageEntryType, - StorageEntryModifier, DefaultByteGetter, StorageHasher, - }; - use sp_std::marker::PhantomData; - - pub trait Trait { - type BlockNumber: Codec + EncodeLike + Default; - type Origin; - } - - mod module { - #![allow(dead_code)] - - use super::Trait; - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - } - use self::module::Module; - - decl_storage! { - trait Store for Module as Test { - pub Data get(fn data) build(|_| vec![(15u32, 42u64)]): - map hasher(twox_64_concat) u32 => u64; - pub OptionLinkedMap: map hasher(blake2_128_concat) u32 => Option; - pub GenericData get(fn generic_data): - map hasher(identity) T::BlockNumber => T::BlockNumber; - pub GenericData2 get(fn generic_data2): - map hasher(blake2_128_concat) T::BlockNumber => Option; - pub GetterNoFnKeyword get(no_fn): Option; - - pub DataDM config(test_config) build(|_| vec![(15u32, 16u32, 42u64)]): - double_map hasher(twox_64_concat) u32, hasher(blake2_128_concat) u32 => u64; - pub GenericDataDM: - double_map hasher(blake2_128_concat) T::BlockNumber, hasher(identity) T::BlockNumber - => T::BlockNumber; - pub GenericData2DM: - double_map hasher(blake2_128_concat) T::BlockNumber, hasher(twox_64_concat) T::BlockNumber - => Option; - pub AppendableDM: - double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Vec; - } - } - - struct Test; - impl Trait for Test { - type BlockNumber = u32; - type Origin = u32; - } - - fn new_test_ext() -> sp_io::TestExternalities { - GenesisConfig::default().build_storage().unwrap().into() - } - - type Map = Data; - - trait Sorted { fn sorted(self) -> Self; } - impl Sorted for Vec { - fn sorted(mut self) -> Self { - self.sort(); - self - } - } - - #[test] - fn map_issue_3318() { - new_test_ext().execute_with(|| { - OptionLinkedMap::insert(1, 1); - assert_eq!(OptionLinkedMap::get(1), Some(1)); - OptionLinkedMap::insert(1, 2); - assert_eq!(OptionLinkedMap::get(1), Some(2)); - }); - } - - #[test] - fn map_swap_works() { - new_test_ext().execute_with(|| { - OptionLinkedMap::insert(0, 0); - OptionLinkedMap::insert(1, 1); - OptionLinkedMap::insert(2, 2); - OptionLinkedMap::insert(3, 3); - - let collect = || OptionLinkedMap::iter().collect::>().sorted(); - assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); - - // Two existing - OptionLinkedMap::swap(1, 2); - assert_eq!(collect(), vec![(0, 0), (1, 2), (2, 1), (3, 3)]); - - // Back to normal - OptionLinkedMap::swap(2, 1); - assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); - - // Left existing - OptionLinkedMap::swap(2, 5); - assert_eq!(collect(), vec![(0, 0), (1, 1), (3, 3), (5, 2)]); - - // Right existing - OptionLinkedMap::swap(5, 2); - assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); - }); - } - - #[test] - fn double_map_swap_works() { - new_test_ext().execute_with(|| { - DataDM::insert(0, 1, 1); - DataDM::insert(1, 0, 2); - DataDM::insert(1, 1, 3); - - let get_all = || vec![ - DataDM::get(0, 1), - DataDM::get(1, 0), - DataDM::get(1, 1), - DataDM::get(2, 0), - DataDM::get(2, 1), - ]; - assert_eq!(get_all(), vec![1, 2, 3, 0, 0]); - - // Two existing - DataDM::swap(0, 1, 1, 0); - assert_eq!(get_all(), vec![2, 1, 3, 0, 0]); - - // Left existing - DataDM::swap(1, 0, 2, 0); - assert_eq!(get_all(), vec![2, 0, 3, 1, 0]); - - // Right existing - DataDM::swap(2, 1, 1, 1); - assert_eq!(get_all(), vec![2, 0, 0, 1, 3]); - }); - } - - #[test] - fn map_basic_insert_remove_should_work() { - new_test_ext().execute_with(|| { - // initialized during genesis - assert_eq!(Map::get(&15u32), 42u64); - - // get / insert / take - let key = 17u32; - assert_eq!(Map::get(&key), 0u64); - Map::insert(key, 4u64); - assert_eq!(Map::get(&key), 4u64); - assert_eq!(Map::take(&key), 4u64); - assert_eq!(Map::get(&key), 0u64); - - // mutate - Map::mutate(&key, |val| { - *val = 15; - }); - assert_eq!(Map::get(&key), 15u64); - - // remove - Map::remove(&key); - assert_eq!(Map::get(&key), 0u64); - }); - } - - #[test] - fn map_iteration_should_work() { - new_test_ext().execute_with(|| { - assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42)]); - // insert / remove - let key = 17u32; - Map::insert(key, 4u64); - assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42), (key, 4)]); - assert_eq!(Map::take(&15), 42u64); - assert_eq!(Map::take(&key), 4u64); - assert_eq!(Map::iter().collect::>().sorted(), vec![]); - - // Add couple of more elements - Map::insert(key, 42u64); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42)]); - Map::insert(key + 1, 43u64); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42), (key + 1, 43)]); - - // mutate - let key = key + 2; - Map::mutate(&key, |val| { - *val = 15; - }); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43), (key, 15)]); - Map::mutate(&key, |val| { - *val = 17; - }); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43), (key, 17)]); - - // remove first - Map::remove(&key); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 2, 42), (key - 1, 43)]); - - // remove last from the list - Map::remove(&(key - 2)); - assert_eq!(Map::iter().collect::>().sorted(), vec![(key - 1, 43)]); - - // remove the last element - Map::remove(&(key - 1)); - assert_eq!(Map::iter().collect::>().sorted(), vec![]); - }); - } - - #[test] - fn double_map_basic_insert_remove_remove_prefix_should_work() { - new_test_ext().execute_with(|| { - type DoubleMap = DataDM; - // initialized during genesis - assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); - - // get / insert / take - let key1 = 17u32; - let key2 = 18u32; - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - DoubleMap::insert(&key1, &key2, &4u64); - assert_eq!(DoubleMap::get(&key1, &key2), 4u64); - assert_eq!(DoubleMap::take(&key1, &key2), 4u64); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - - // mutate - DoubleMap::mutate(&key1, &key2, |val| { - *val = 15; - }); - assert_eq!(DoubleMap::get(&key1, &key2), 15u64); - - // remove - DoubleMap::remove(&key1, &key2); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - - // remove prefix - DoubleMap::insert(&key1, &key2, &4u64); - DoubleMap::insert(&key1, &(key2 + 1), &4u64); - DoubleMap::insert(&(key1 + 1), &key2, &4u64); - DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); - DoubleMap::remove_prefix(&key1); - assert_eq!(DoubleMap::get(&key1, &key2), 0u64); - assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); - assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); - assert_eq!(DoubleMap::get(&(key1 + 1), &(key2 + 1)), 4u64); - - }); - } - - #[test] - fn double_map_append_should_work() { - new_test_ext().execute_with(|| { - type DoubleMap = AppendableDM; - - let key1 = 17u32; - let key2 = 18u32; - - DoubleMap::insert(&key1, &key2, &vec![1]); - DoubleMap::append(&key1, &key2, &[2, 3]).unwrap(); - assert_eq!(DoubleMap::get(&key1, &key2), &[1, 2, 3]); - }); - } - - const EXPECTED_METADATA: StorageMetadata = StorageMetadata { - prefix: DecodeDifferent::Encode("Test"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("Data"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Twox64Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u64"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructData(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("OptionLinkedMap"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u32"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructOptionLinkedMap(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Identity, - key: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - unused: false - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData2"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map{ - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - unused: false - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GetterNoFnKeyword"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetterNoFnKeyword(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("DataDM"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Twox64Concat, - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("u64"), - key2_hasher: StorageHasher::Blake2_128Concat, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructDataDM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericDataDM"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("T::BlockNumber"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: StorageHasher::Identity, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericDataDM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GenericData2DM"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("T::BlockNumber"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("T::BlockNumber"), - key2_hasher: StorageHasher::Twox64Concat, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("AppendableDM"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap{ - hasher: StorageHasher::Blake2_128Concat, - key1: DecodeDifferent::Encode("u32"), - key2: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode("Vec"), - key2_hasher: StorageHasher::Blake2_128Concat, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGenericData2DM(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - ] - ), - }; - - #[test] - fn store_metadata() { - let metadata = Module::::storage_metadata(); - pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); - } + use super::*; + use codec::{Codec, EncodeLike}; + use frame_metadata::{ + DecodeDifferent, DefaultByteGetter, StorageEntryMetadata, StorageEntryModifier, + StorageEntryType, StorageHasher, StorageMetadata, + }; + use sp_std::marker::PhantomData; + + pub trait Trait { + type BlockNumber: Codec + EncodeLike + Default; + type Origin; + } + + mod module { + #![allow(dead_code)] + + use super::Trait; + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + } + use self::module::Module; + + decl_storage! { + trait Store for Module as Test { + pub Data get(fn data) build(|_| vec![(15u32, 42u64)]): + map hasher(twox_64_concat) u32 => u64; + pub OptionLinkedMap: map hasher(blake2_128_concat) u32 => Option; + pub GenericData get(fn generic_data): + map hasher(identity) T::BlockNumber => T::BlockNumber; + pub GenericData2 get(fn generic_data2): + map hasher(blake2_128_concat) T::BlockNumber => Option; + pub GetterNoFnKeyword get(no_fn): Option; + + pub DataDM config(test_config) build(|_| vec![(15u32, 16u32, 42u64)]): + double_map hasher(twox_64_concat) u32, hasher(blake2_128_concat) u32 => u64; + pub GenericDataDM: + double_map hasher(blake2_128_concat) T::BlockNumber, hasher(identity) T::BlockNumber + => T::BlockNumber; + pub GenericData2DM: + double_map hasher(blake2_128_concat) T::BlockNumber, hasher(twox_64_concat) T::BlockNumber + => Option; + pub AppendableDM: + double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Vec; + } + } + + struct Test; + impl Trait for Test { + type BlockNumber = u32; + type Origin = u32; + } + + fn new_test_ext() -> sp_io::TestExternalities { + GenesisConfig::default().build_storage().unwrap().into() + } + + type Map = Data; + + trait Sorted { + fn sorted(self) -> Self; + } + impl Sorted for Vec { + fn sorted(mut self) -> Self { + self.sort(); + self + } + } + + #[test] + fn map_issue_3318() { + new_test_ext().execute_with(|| { + OptionLinkedMap::insert(1, 1); + assert_eq!(OptionLinkedMap::get(1), Some(1)); + OptionLinkedMap::insert(1, 2); + assert_eq!(OptionLinkedMap::get(1), Some(2)); + }); + } + + #[test] + fn map_swap_works() { + new_test_ext().execute_with(|| { + OptionLinkedMap::insert(0, 0); + OptionLinkedMap::insert(1, 1); + OptionLinkedMap::insert(2, 2); + OptionLinkedMap::insert(3, 3); + + let collect = || OptionLinkedMap::iter().collect::>().sorted(); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); + + // Two existing + OptionLinkedMap::swap(1, 2); + assert_eq!(collect(), vec![(0, 0), (1, 2), (2, 1), (3, 3)]); + + // Back to normal + OptionLinkedMap::swap(2, 1); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); + + // Left existing + OptionLinkedMap::swap(2, 5); + assert_eq!(collect(), vec![(0, 0), (1, 1), (3, 3), (5, 2)]); + + // Right existing + OptionLinkedMap::swap(5, 2); + assert_eq!(collect(), vec![(0, 0), (1, 1), (2, 2), (3, 3)]); + }); + } + + #[test] + fn double_map_swap_works() { + new_test_ext().execute_with(|| { + DataDM::insert(0, 1, 1); + DataDM::insert(1, 0, 2); + DataDM::insert(1, 1, 3); + + let get_all = || { + vec![ + DataDM::get(0, 1), + DataDM::get(1, 0), + DataDM::get(1, 1), + DataDM::get(2, 0), + DataDM::get(2, 1), + ] + }; + assert_eq!(get_all(), vec![1, 2, 3, 0, 0]); + + // Two existing + DataDM::swap(0, 1, 1, 0); + assert_eq!(get_all(), vec![2, 1, 3, 0, 0]); + + // Left existing + DataDM::swap(1, 0, 2, 0); + assert_eq!(get_all(), vec![2, 0, 3, 1, 0]); + + // Right existing + DataDM::swap(2, 1, 1, 1); + assert_eq!(get_all(), vec![2, 0, 0, 1, 3]); + }); + } + + #[test] + fn map_basic_insert_remove_should_work() { + new_test_ext().execute_with(|| { + // initialized during genesis + assert_eq!(Map::get(&15u32), 42u64); + + // get / insert / take + let key = 17u32; + assert_eq!(Map::get(&key), 0u64); + Map::insert(key, 4u64); + assert_eq!(Map::get(&key), 4u64); + assert_eq!(Map::take(&key), 4u64); + assert_eq!(Map::get(&key), 0u64); + + // mutate + Map::mutate(&key, |val| { + *val = 15; + }); + assert_eq!(Map::get(&key), 15u64); + + // remove + Map::remove(&key); + assert_eq!(Map::get(&key), 0u64); + }); + } + + #[test] + fn map_iteration_should_work() { + new_test_ext().execute_with(|| { + assert_eq!(Map::iter().collect::>().sorted(), vec![(15, 42)]); + // insert / remove + let key = 17u32; + Map::insert(key, 4u64); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(15, 42), (key, 4)] + ); + assert_eq!(Map::take(&15), 42u64); + assert_eq!(Map::take(&key), 4u64); + assert_eq!(Map::iter().collect::>().sorted(), vec![]); + + // Add couple of more elements + Map::insert(key, 42u64); + assert_eq!(Map::iter().collect::>().sorted(), vec![(key, 42)]); + Map::insert(key + 1, 43u64); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key, 42), (key + 1, 43)] + ); + + // mutate + let key = key + 2; + Map::mutate(&key, |val| { + *val = 15; + }); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43), (key, 15)] + ); + Map::mutate(&key, |val| { + *val = 17; + }); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43), (key, 17)] + ); + + // remove first + Map::remove(&key); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 2, 42), (key - 1, 43)] + ); + + // remove last from the list + Map::remove(&(key - 2)); + assert_eq!( + Map::iter().collect::>().sorted(), + vec![(key - 1, 43)] + ); + + // remove the last element + Map::remove(&(key - 1)); + assert_eq!(Map::iter().collect::>().sorted(), vec![]); + }); + } + + #[test] + fn double_map_basic_insert_remove_remove_prefix_should_work() { + new_test_ext().execute_with(|| { + type DoubleMap = DataDM; + // initialized during genesis + assert_eq!(DoubleMap::get(&15u32, &16u32), 42u64); + + // get / insert / take + let key1 = 17u32; + let key2 = 18u32; + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + DoubleMap::insert(&key1, &key2, &4u64); + assert_eq!(DoubleMap::get(&key1, &key2), 4u64); + assert_eq!(DoubleMap::take(&key1, &key2), 4u64); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + + // mutate + DoubleMap::mutate(&key1, &key2, |val| { + *val = 15; + }); + assert_eq!(DoubleMap::get(&key1, &key2), 15u64); + + // remove + DoubleMap::remove(&key1, &key2); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + + // remove prefix + DoubleMap::insert(&key1, &key2, &4u64); + DoubleMap::insert(&key1, &(key2 + 1), &4u64); + DoubleMap::insert(&(key1 + 1), &key2, &4u64); + DoubleMap::insert(&(key1 + 1), &(key2 + 1), &4u64); + DoubleMap::remove_prefix(&key1); + assert_eq!(DoubleMap::get(&key1, &key2), 0u64); + assert_eq!(DoubleMap::get(&key1, &(key2 + 1)), 0u64); + assert_eq!(DoubleMap::get(&(key1 + 1), &key2), 4u64); + assert_eq!(DoubleMap::get(&(key1 + 1), &(key2 + 1)), 4u64); + }); + } + + #[test] + fn double_map_append_should_work() { + new_test_ext().execute_with(|| { + type DoubleMap = AppendableDM; + + let key1 = 17u32; + let key2 = 18u32; + + DoubleMap::insert(&key1, &key2, &vec![1]); + DoubleMap::append(&key1, &key2, &[2, 3]).unwrap(); + assert_eq!(DoubleMap::get(&key1, &key2), &[1, 2, 3]); + }); + } + + const EXPECTED_METADATA: StorageMetadata = StorageMetadata { + prefix: DecodeDifferent::Encode("Test"), + entries: DecodeDifferent::Encode(&[ + StorageEntryMetadata { + name: DecodeDifferent::Encode("Data"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Twox64Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("u64"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructData( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("OptionLinkedMap"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("u32"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructOptionLinkedMap(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GenericData"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Identity, + key: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericData( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GenericData2"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericData2( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GetterNoFnKeyword"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGetterNoFnKeyword(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("DataDM"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Twox64Concat, + key1: DecodeDifferent::Encode("u32"), + key2: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("u64"), + key2_hasher: StorageHasher::Blake2_128Concat, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructDataDM( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GenericDataDM"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Blake2_128Concat, + key1: DecodeDifferent::Encode("T::BlockNumber"), + key2: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + key2_hasher: StorageHasher::Identity, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGenericDataDM( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GenericData2DM"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Blake2_128Concat, + key1: DecodeDifferent::Encode("T::BlockNumber"), + key2: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("T::BlockNumber"), + key2_hasher: StorageHasher::Twox64Concat, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGenericData2DM(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("AppendableDM"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Blake2_128Concat, + key1: DecodeDifferent::Encode("u32"), + key2: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode("Vec"), + key2_hasher: StorageHasher::Blake2_128Concat, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGenericData2DM(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + ]), + }; + + #[test] + fn store_metadata() { + let metadata = Module::::storage_metadata(); + pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); + } } diff --git a/frame/support/src/metadata.rs b/frame/support/src/metadata.rs index 6a3e41b809..ff287bcd97 100644 --- a/frame/support/src/metadata.rs +++ b/frame/support/src/metadata.rs @@ -15,10 +15,10 @@ // along with Substrate. If not, see . pub use frame_metadata::{ - DecodeDifferent, FnEncode, RuntimeMetadata, ModuleMetadata, RuntimeMetadataLastVersion, - DefaultByteGetter, RuntimeMetadataPrefixed, StorageEntryMetadata, StorageMetadata, - StorageEntryType, StorageEntryModifier, DefaultByte, StorageHasher, ModuleErrorMetadata, - ExtrinsicMetadata, + DecodeDifferent, DefaultByte, DefaultByteGetter, ExtrinsicMetadata, FnEncode, + ModuleErrorMetadata, ModuleMetadata, RuntimeMetadata, RuntimeMetadataLastVersion, + RuntimeMetadataPrefixed, StorageEntryMetadata, StorageEntryModifier, StorageEntryType, + StorageHasher, StorageMetadata, }; /// Implements the metadata support for the given runtime and all its modules. @@ -164,7 +164,6 @@ macro_rules! __runtime_modules_to_metadata_calls_call { }; } - #[macro_export] #[doc(hidden)] macro_rules! __runtime_modules_to_metadata_calls_event { @@ -237,110 +236,109 @@ macro_rules! __runtime_modules_to_metadata_calls_storage { }; } - #[cfg(test)] // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] mod tests { - use super::*; - use frame_metadata::{ - EventMetadata, StorageEntryModifier, StorageEntryType, FunctionMetadata, StorageEntryMetadata, - ModuleMetadata, RuntimeMetadataPrefixed, DefaultByte, ModuleConstantMetadata, DefaultByteGetter, - ErrorMetadata, ExtrinsicMetadata, - }; - use codec::{Encode, Decode}; - use crate::traits::Get; - use sp_runtime::transaction_validity::TransactionValidityError; - - #[derive(Clone, Eq, Debug, PartialEq, Encode, Decode)] - struct TestExtension; - impl sp_runtime::traits::SignedExtension for TestExtension { - type AccountId = u32; - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "testextension"; - fn additional_signed(&self) -> Result { - Ok(1) - } - } - - #[derive(Clone, Eq, Debug, PartialEq, Encode, Decode)] - struct TestExtension2; - impl sp_runtime::traits::SignedExtension for TestExtension2 { - type AccountId = u32; - type Call = (); - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "testextension2"; - fn additional_signed(&self) -> Result { - Ok(1) - } - } - - struct TestExtrinsic; - - impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic { - const VERSION: u8 = 1; - type SignedExtensions = (TestExtension, TestExtension2); - } - - mod system { - use super::*; - - pub trait Trait: 'static { - const ASSOCIATED_CONST: u64 = 500; - type Origin: Into, Self::Origin>> - + From>; - type AccountId: From + Encode; - type BlockNumber: From + Encode; - type SomeValue: Get; - type ModuleToIndex: crate::traits::ModuleToIndex; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// Hi, I am a comment. - const BlockNumber: T::BlockNumber = 100.into(); - const GetType: T::AccountId = T::SomeValue::get().into(); - const ASSOCIATED_CONST: u64 = T::ASSOCIATED_CONST.into(); - } - } - - decl_event!( - pub enum Event { - SystemEvent, - } - ); - - #[derive(Clone, PartialEq, Eq, Debug)] - pub enum RawOrigin { - Root, - Signed(AccountId), - None, - } - - impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } - } - - pub type Origin = RawOrigin<::AccountId>; - } - - mod event_module { - use crate::dispatch::DispatchResult; - use crate::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; - - pub trait Trait: super::system::Trait { - type Balance; - } - - decl_event!( + use super::*; + use crate::traits::Get; + use codec::{Decode, Encode}; + use frame_metadata::{ + DefaultByte, DefaultByteGetter, ErrorMetadata, EventMetadata, ExtrinsicMetadata, + FunctionMetadata, ModuleConstantMetadata, ModuleMetadata, RuntimeMetadataPrefixed, + StorageEntryMetadata, StorageEntryModifier, StorageEntryType, + }; + use sp_runtime::transaction_validity::TransactionValidityError; + + #[derive(Clone, Eq, Debug, PartialEq, Encode, Decode)] + struct TestExtension; + impl sp_runtime::traits::SignedExtension for TestExtension { + type AccountId = u32; + type Call = (); + type AdditionalSigned = u32; + type Pre = (); + const IDENTIFIER: &'static str = "testextension"; + fn additional_signed(&self) -> Result { + Ok(1) + } + } + + #[derive(Clone, Eq, Debug, PartialEq, Encode, Decode)] + struct TestExtension2; + impl sp_runtime::traits::SignedExtension for TestExtension2 { + type AccountId = u32; + type Call = (); + type AdditionalSigned = u32; + type Pre = (); + const IDENTIFIER: &'static str = "testextension2"; + fn additional_signed(&self) -> Result { + Ok(1) + } + } + + struct TestExtrinsic; + + impl sp_runtime::traits::ExtrinsicMetadata for TestExtrinsic { + const VERSION: u8 = 1; + type SignedExtensions = (TestExtension, TestExtension2); + } + + mod system { + use super::*; + + pub trait Trait: 'static { + const ASSOCIATED_CONST: u64 = 500; + type Origin: Into, Self::Origin>> + + From>; + type AccountId: From + Encode; + type BlockNumber: From + Encode; + type SomeValue: Get; + type ModuleToIndex: crate::traits::ModuleToIndex; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + /// Hi, I am a comment. + const BlockNumber: T::BlockNumber = 100.into(); + const GetType: T::AccountId = T::SomeValue::get().into(); + const ASSOCIATED_CONST: u64 = T::ASSOCIATED_CONST.into(); + } + } + + decl_event!( + pub enum Event { + SystemEvent, + } + ); + + #[derive(Clone, PartialEq, Eq, Debug)] + pub enum RawOrigin { + Root, + Signed(AccountId), + None, + } + + impl From> for RawOrigin { + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::None, + } + } + } + + pub type Origin = RawOrigin<::AccountId>; + } + + mod event_module { + use crate::dispatch::DispatchResult; + use crate::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; + + pub trait Trait: super::system::Trait { + type Balance; + } + + decl_event!( pub enum Event where ::Balance { /// Hi, I am a comment. @@ -348,262 +346,249 @@ mod tests { } ); - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn aux_0(_origin) -> DispatchResult { unreachable!() } - } - } - - crate::decl_error! { - pub enum Error for Module { - /// Some user input error - UserInputError, - /// Something bad happened - /// this could be due to many reasons - BadThingHappened, - } - } - } - - mod event_module2 { - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - } - - decl_event!( + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn aux_0(_origin) -> DispatchResult { unreachable!() } + } + } + + crate::decl_error! { + pub enum Error for Module { + /// Some user input error + UserInputError, + /// Something bad happened + /// this could be due to many reasons + BadThingHappened, + } + } + } + + mod event_module2 { + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + } + + decl_event!( pub enum Event where ::Balance { TestEvent(Balance), } ); - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - crate::decl_storage! { - trait Store for Module as TestStorage { - StorageMethod : Option; - } - add_extra_genesis { - build(|_| {}); - } - } - } - - type EventModule = event_module::Module; - type EventModule2 = event_module2::Module; - - #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] - pub struct TestRuntime; - - impl_outer_event! { - pub enum TestEvent for TestRuntime { - system, - event_module, - event_module2, - } - } - - impl_outer_origin! { - pub enum Origin for TestRuntime {} - } - - impl_outer_dispatch! { - pub enum Call for TestRuntime where origin: Origin { - event_module::EventModule, - event_module2::EventModule2, - } - } - - impl event_module::Trait for TestRuntime { - type Balance = u32; - } - - impl event_module2::Trait for TestRuntime { - type Origin = Origin; - type Balance = u32; - type BlockNumber = u32; - } - - crate::parameter_types! { - pub const SystemValue: u32 = 600; - } - - impl system::Trait for TestRuntime { - type Origin = Origin; - type AccountId = u32; - type BlockNumber = u32; - type SomeValue = SystemValue; - type ModuleToIndex = (); - } - - impl_runtime_metadata!( + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + crate::decl_storage! { + trait Store for Module as TestStorage { + StorageMethod : Option; + } + add_extra_genesis { + build(|_| {}); + } + } + } + + type EventModule = event_module::Module; + type EventModule2 = event_module2::Module; + + #[derive(Debug, Clone, PartialEq, Eq, Encode, Decode)] + pub struct TestRuntime; + + impl_outer_event! { + pub enum TestEvent for TestRuntime { + system, + event_module, + event_module2, + } + } + + impl_outer_origin! { + pub enum Origin for TestRuntime {} + } + + impl_outer_dispatch! { + pub enum Call for TestRuntime where origin: Origin { + event_module::EventModule, + event_module2::EventModule2, + } + } + + impl event_module::Trait for TestRuntime { + type Balance = u32; + } + + impl event_module2::Trait for TestRuntime { + type Origin = Origin; + type Balance = u32; + type BlockNumber = u32; + } + + crate::parameter_types! { + pub const SystemValue: u32 = 600; + } + + impl system::Trait for TestRuntime { + type Origin = Origin; + type AccountId = u32; + type BlockNumber = u32; + type SomeValue = SystemValue; + type ModuleToIndex = (); + } + + impl_runtime_metadata!( for TestRuntime with modules where Extrinsic = TestExtrinsic system::Module as System with Event, event_module::Module as Module with Event Call, event_module2::Module as Module2 with Event Storage Call, ); - struct ConstantBlockNumberByteGetter; - impl DefaultByte for ConstantBlockNumberByteGetter { - fn default_byte(&self) -> Vec { - 100u32.encode() - } - } - - struct ConstantGetTypeByteGetter; - impl DefaultByte for ConstantGetTypeByteGetter { - fn default_byte(&self) -> Vec { - SystemValue::get().encode() - } - } - - struct ConstantAssociatedConstByteGetter; - impl DefaultByte for ConstantAssociatedConstByteGetter { - fn default_byte(&self) -> Vec { - ::ASSOCIATED_CONST.encode() - } - } - - #[test] - fn runtime_metadata() { - let expected_metadata: RuntimeMetadataLastVersion = RuntimeMetadataLastVersion { - modules: DecodeDifferent::Encode(&[ - ModuleMetadata { - name: DecodeDifferent::Encode("System"), - storage: None, - calls: None, - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("SystemEvent"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - )), - constants: DecodeDifferent::Encode( - FnEncode(|| &[ - ModuleConstantMetadata { - name: DecodeDifferent::Encode("BlockNumber"), - ty: DecodeDifferent::Encode("T::BlockNumber"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantBlockNumberByteGetter) - ), - documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]), - }, - ModuleConstantMetadata { - name: DecodeDifferent::Encode("GetType"), - ty: DecodeDifferent::Encode("T::AccountId"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantGetTypeByteGetter) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - ModuleConstantMetadata { - name: DecodeDifferent::Encode("ASSOCIATED_CONST"), - ty: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode( - DefaultByteGetter(&ConstantAssociatedConstByteGetter) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ]) - ), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module"), - storage: None, - calls: Some( - DecodeDifferent::Encode(FnEncode(|| &[ - FunctionMetadata { - name: DecodeDifferent::Encode("aux_0"), - arguments: DecodeDifferent::Encode(&[]), - documentation: DecodeDifferent::Encode(&[]), - } - ]))), - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&["Balance"]), - documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]) - } - ]) - )), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[ - ErrorMetadata { - name: DecodeDifferent::Encode("UserInputError"), - documentation: DecodeDifferent::Encode(&[" Some user input error"]), - }, - ErrorMetadata { - name: DecodeDifferent::Encode("BadThingHappened"), - documentation: DecodeDifferent::Encode(&[ - " Something bad happened", - " this could be due to many reasons", - ]), - }, - ])), - }, - ModuleMetadata { - name: DecodeDifferent::Encode("Module2"), - storage: Some(DecodeDifferent::Encode( - FnEncode(|| StorageMetadata { - prefix: DecodeDifferent::Encode("TestStorage"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("StorageMethod"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter( - &event_module2::__GetByteStructStorageMethod( - std::marker::PhantomData:: - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ] - ) - }), - )), - calls: Some(DecodeDifferent::Encode(FnEncode(|| &[]))), - event: Some(DecodeDifferent::Encode( - FnEncode(||&[ - EventMetadata { - name: DecodeDifferent::Encode("TestEvent"), - arguments: DecodeDifferent::Encode(&["Balance"]), - documentation: DecodeDifferent::Encode(&[]) - } - ]) - )), - constants: DecodeDifferent::Encode(FnEncode(|| &[])), - errors: DecodeDifferent::Encode(FnEncode(|| &[])), - }, - ]), - extrinsic: ExtrinsicMetadata { - version: 1, - signed_extensions: vec![ - DecodeDifferent::Encode("testextension"), - DecodeDifferent::Encode("testextension2"), - ], - } - }; - - let metadata_encoded = TestRuntime::metadata().encode(); - let metadata_decoded = RuntimeMetadataPrefixed::decode(&mut &metadata_encoded[..]); - let expected_metadata: RuntimeMetadataPrefixed = expected_metadata.into(); - - pretty_assertions::assert_eq!(expected_metadata, metadata_decoded.unwrap()); - } + struct ConstantBlockNumberByteGetter; + impl DefaultByte for ConstantBlockNumberByteGetter { + fn default_byte(&self) -> Vec { + 100u32.encode() + } + } + + struct ConstantGetTypeByteGetter; + impl DefaultByte for ConstantGetTypeByteGetter { + fn default_byte(&self) -> Vec { + SystemValue::get().encode() + } + } + + struct ConstantAssociatedConstByteGetter; + impl DefaultByte for ConstantAssociatedConstByteGetter { + fn default_byte(&self) -> Vec { + ::ASSOCIATED_CONST.encode() + } + } + + #[test] + fn runtime_metadata() { + let expected_metadata: RuntimeMetadataLastVersion = RuntimeMetadataLastVersion { + modules: DecodeDifferent::Encode(&[ + ModuleMetadata { + name: DecodeDifferent::Encode("System"), + storage: None, + calls: None, + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("SystemEvent"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + constants: DecodeDifferent::Encode(FnEncode(|| { + &[ + ModuleConstantMetadata { + name: DecodeDifferent::Encode("BlockNumber"), + ty: DecodeDifferent::Encode("T::BlockNumber"), + value: DecodeDifferent::Encode(DefaultByteGetter( + &ConstantBlockNumberByteGetter, + )), + documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]), + }, + ModuleConstantMetadata { + name: DecodeDifferent::Encode("GetType"), + ty: DecodeDifferent::Encode("T::AccountId"), + value: DecodeDifferent::Encode(DefaultByteGetter( + &ConstantGetTypeByteGetter, + )), + documentation: DecodeDifferent::Encode(&[]), + }, + ModuleConstantMetadata { + name: DecodeDifferent::Encode("ASSOCIATED_CONST"), + ty: DecodeDifferent::Encode("u64"), + value: DecodeDifferent::Encode(DefaultByteGetter( + &ConstantAssociatedConstByteGetter, + )), + documentation: DecodeDifferent::Encode(&[]), + }, + ] + })), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module"), + storage: None, + calls: Some(DecodeDifferent::Encode(FnEncode(|| { + &[FunctionMetadata { + name: DecodeDifferent::Encode("aux_0"), + arguments: DecodeDifferent::Encode(&[]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + arguments: DecodeDifferent::Encode(&["Balance"]), + documentation: DecodeDifferent::Encode(&[" Hi, I am a comment."]), + }] + }))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| { + &[ + ErrorMetadata { + name: DecodeDifferent::Encode("UserInputError"), + documentation: DecodeDifferent::Encode(&[" Some user input error"]), + }, + ErrorMetadata { + name: DecodeDifferent::Encode("BadThingHappened"), + documentation: DecodeDifferent::Encode(&[ + " Something bad happened", + " this could be due to many reasons", + ]), + }, + ] + })), + }, + ModuleMetadata { + name: DecodeDifferent::Encode("Module2"), + storage: Some(DecodeDifferent::Encode(FnEncode(|| StorageMetadata { + prefix: DecodeDifferent::Encode("TestStorage"), + entries: DecodeDifferent::Encode(&[StorageEntryMetadata { + name: DecodeDifferent::Encode("StorageMethod"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &event_module2::__GetByteStructStorageMethod( + std::marker::PhantomData::, + ), + )), + documentation: DecodeDifferent::Encode(&[]), + }]), + }))), + calls: Some(DecodeDifferent::Encode(FnEncode(|| &[]))), + event: Some(DecodeDifferent::Encode(FnEncode(|| { + &[EventMetadata { + name: DecodeDifferent::Encode("TestEvent"), + arguments: DecodeDifferent::Encode(&["Balance"]), + documentation: DecodeDifferent::Encode(&[]), + }] + }))), + constants: DecodeDifferent::Encode(FnEncode(|| &[])), + errors: DecodeDifferent::Encode(FnEncode(|| &[])), + }, + ]), + extrinsic: ExtrinsicMetadata { + version: 1, + signed_extensions: vec![ + DecodeDifferent::Encode("testextension"), + DecodeDifferent::Encode("testextension2"), + ], + }, + }; + + let metadata_encoded = TestRuntime::metadata().encode(); + let metadata_decoded = RuntimeMetadataPrefixed::decode(&mut &metadata_encoded[..]); + let expected_metadata: RuntimeMetadataPrefixed = expected_metadata.into(); + + pretty_assertions::assert_eq!(expected_metadata, metadata_decoded.unwrap()); + } } diff --git a/frame/support/src/origin.rs b/frame/support/src/origin.rs index 43d2e70953..354e4cce14 100644 --- a/frame/support/src/origin.rs +++ b/frame/support/src/origin.rs @@ -223,78 +223,78 @@ macro_rules! impl_outer_origin { #[cfg(test)] mod tests { - mod system { - pub trait Trait { - type AccountId; - } + mod system { + pub trait Trait { + type AccountId; + } - #[derive(Clone, PartialEq, Eq, Debug)] - pub enum RawOrigin { - Root, - Signed(AccountId), - None, - } + #[derive(Clone, PartialEq, Eq, Debug)] + pub enum RawOrigin { + Root, + Signed(AccountId), + None, + } - impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } - } + impl From> for RawOrigin { + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::None, + } + } + } - pub type Origin = RawOrigin<::AccountId>; - } + pub type Origin = RawOrigin<::AccountId>; + } - mod origin_without_generic { - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct Origin; - } + mod origin_without_generic { + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct Origin; + } - mod origin_with_generic { - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct Origin { - t: T - } - } + mod origin_with_generic { + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct Origin { + t: T, + } + } - #[derive(Clone, PartialEq, Eq, Debug)] - pub struct TestRuntime; + #[derive(Clone, PartialEq, Eq, Debug)] + pub struct TestRuntime; - impl system::Trait for TestRuntime { - type AccountId = u32; - } + impl system::Trait for TestRuntime { + type AccountId = u32; + } - impl_outer_origin!( + impl_outer_origin!( pub enum OriginWithoutSystem for TestRuntime { origin_without_generic, origin_with_generic, } ); - impl_outer_origin!( + impl_outer_origin!( pub enum OriginWithoutSystem2 for TestRuntime { origin_with_generic, origin_without_generic } ); - impl_outer_origin!( + impl_outer_origin!( pub enum OriginWithSystem for TestRuntime where system = system { origin_without_generic, origin_with_generic } ); - impl_outer_origin!( + impl_outer_origin!( pub enum OriginWithSystem2 for TestRuntime where system = system { origin_with_generic, origin_without_generic, } ); - impl_outer_origin!( + impl_outer_origin!( pub enum OriginEmpty for TestRuntime where system = system {} ); } diff --git a/frame/support/src/storage/child.rs b/frame/support/src/storage/child.rs index d4d046a9d4..09dcc9df9f 100644 --- a/frame/support/src/storage/child.rs +++ b/frame/support/src/storage/child.rs @@ -26,204 +26,137 @@ // NOTE: could replace unhashed by having only one kind of storage (root being null storage key (storage_key can become Option<&[u8]>). use crate::sp_std::prelude::*; -use codec::{Codec, Encode, Decode}; +use codec::{Codec, Decode, Encode}; pub use sp_core::storage::ChildInfo; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. -pub fn get( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], -) -> Option { - let (data, child_type) = child_info.info(); - sp_io::storage::child_get( - storage_key, - data, - child_type, - key, - ).and_then(|v| { - Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { - // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state in child trie at {:?}/{:?}", storage_key, key); - None - }) - }) +pub fn get(storage_key: &[u8], child_info: ChildInfo, key: &[u8]) -> Option { + let (data, child_type) = child_info.info(); + sp_io::storage::child_get(storage_key, data, child_type, key).and_then(|v| { + Decode::decode(&mut &v[..]).map(Some).unwrap_or_else(|_| { + // TODO #3700: error should be handleable. + runtime_print!( + "ERROR: Corrupted state in child trie at {:?}/{:?}", + storage_key, + key + ); + None + }) + }) } /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], ) -> T { - get(storage_key, child_info, key).unwrap_or_else(Default::default) + get(storage_key, child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - default_value: T, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + default_value: T, ) -> T { - get(storage_key, child_info, key).unwrap_or(default_value) + get(storage_key, child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - default_value: F, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + default_value: F, ) -> T { - get(storage_key, child_info, key).unwrap_or_else(default_value) + get(storage_key, child_info, key).unwrap_or_else(default_value) } /// Put `value` in storage under `key`. -pub fn put( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - value: &T, -) { - let (data, child_type) = child_info.info(); - value.using_encoded(|slice| - sp_io::storage::child_set( - storage_key, - data, - child_type, - key, - slice, - ) - ); +pub fn put(storage_key: &[u8], child_info: ChildInfo, key: &[u8], value: &T) { + let (data, child_type) = child_info.info(); + value.using_encoded(|slice| { + sp_io::storage::child_set(storage_key, data, child_type, key, slice) + }); } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. -pub fn take( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], -) -> Option { - let r = get(storage_key, child_info, key); - if r.is_some() { - kill(storage_key, child_info, key); - } - r +pub fn take(storage_key: &[u8], child_info: ChildInfo, key: &[u8]) -> Option { + let r = get(storage_key, child_info, key); + if r.is_some() { + kill(storage_key, child_info, key); + } + r } /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], ) -> T { - take(storage_key, child_info, key).unwrap_or_else(Default::default) + take(storage_key, child_info, key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - default_value: T, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + default_value: T, ) -> T { - take(storage_key, child_info, key).unwrap_or(default_value) + take(storage_key, child_info, key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - default_value: F, + storage_key: &[u8], + child_info: ChildInfo, + key: &[u8], + default_value: F, ) -> T { - take(storage_key, child_info, key).unwrap_or_else(default_value) + take(storage_key, child_info, key).unwrap_or_else(default_value) } /// Check to see if `key` has an explicit entry in storage. -pub fn exists( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], -) -> bool { - let (data, child_type) = child_info.info(); - sp_io::storage::child_read( - storage_key, data, child_type, - key, &mut [0;0][..], 0, - ).is_some() +pub fn exists(storage_key: &[u8], child_info: ChildInfo, key: &[u8]) -> bool { + let (data, child_type) = child_info.info(); + sp_io::storage::child_read(storage_key, data, child_type, key, &mut [0; 0][..], 0).is_some() } /// Remove all `storage_key` key/values -pub fn kill_storage( - storage_key: &[u8], - child_info: ChildInfo, -) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_storage_kill( - storage_key, - data, - child_type, - ) +pub fn kill_storage(storage_key: &[u8], child_info: ChildInfo) { + let (data, child_type) = child_info.info(); + sp_io::storage::child_storage_kill(storage_key, data, child_type) } /// Ensure `key` has no explicit entry in storage. -pub fn kill( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], -) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_clear( - storage_key, - data, - child_type, - key, - ); +pub fn kill(storage_key: &[u8], child_info: ChildInfo, key: &[u8]) { + let (data, child_type) = child_info.info(); + sp_io::storage::child_clear(storage_key, data, child_type, key); } /// Get a Vec of bytes from storage. -pub fn get_raw( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], -) -> Option> { - let (data, child_type) = child_info.info(); - sp_io::storage::child_get( - storage_key, - data, - child_type, - key, - ) +pub fn get_raw(storage_key: &[u8], child_info: ChildInfo, key: &[u8]) -> Option> { + let (data, child_type) = child_info.info(); + sp_io::storage::child_get(storage_key, data, child_type, key) } /// Put a raw byte slice into storage. -pub fn put_raw( - storage_key: &[u8], - child_info: ChildInfo, - key: &[u8], - value: &[u8], -) { - let (data, child_type) = child_info.info(); - sp_io::storage::child_set( - storage_key, - data, - child_type, - key, - value, - ) +pub fn put_raw(storage_key: &[u8], child_info: ChildInfo, key: &[u8], value: &[u8]) { + let (data, child_type) = child_info.info(); + sp_io::storage::child_set(storage_key, data, child_type, key, value) } /// Calculate current child root value. -pub fn child_root( - storage_key: &[u8], -) -> Vec { - sp_io::storage::child_root( - storage_key, - ) +pub fn child_root(storage_key: &[u8]) -> Vec { + sp_io::storage::child_root(storage_key) } diff --git a/frame/support/src/storage/generator/double_map.rs b/frame/support/src/storage/generator/double_map.rs index e23b332383..0e127ef4a5 100644 --- a/frame/support/src/storage/generator/double_map.rs +++ b/frame/support/src/storage/generator/double_map.rs @@ -14,11 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use sp_std::prelude::*; +use crate::hash::{ReversibleStorageHasher, StorageHasher, Twox128}; +use crate::{ + storage::{self, unhashed}, + traits::Len, +}; +use codec::{Decode, Encode, EncodeAppend, EncodeLike, FullCodec, FullEncode, Ref}; use sp_std::borrow::Borrow; -use codec::{Ref, FullCodec, FullEncode, Decode, Encode, EncodeLike, EncodeAppend}; -use crate::{storage::{self, unhashed}, traits::Len}; -use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; +use sp_std::prelude::*; /// Generator for `StorageDoubleMap` used by `decl_storage`. /// @@ -41,527 +44,552 @@ use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; /// `blake2_256` must be used for Hasher2. Otherwise, other items in storage with the same first /// key can be compromised. pub trait StorageDoubleMap { - /// The type that get/take returns. - type Query; - - /// Hasher for the first key. - type Hasher1: StorageHasher; - - /// Hasher for the second key. - type Hasher2: StorageHasher; - - /// Module prefix. Used for generating final key. - fn module_prefix() -> &'static [u8]; - - /// Storage prefix. Used for generating final key. - fn storage_prefix() -> &'static [u8]; - - /// The full prefix; just the hash of `module_prefix` concatenated to the hash of - /// `storage_prefix`. - fn prefix_hash() -> Vec { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - - let mut result = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() - ); - - result.extend_from_slice(&module_prefix_hashed[..]); - result.extend_from_slice(&storage_prefix_hashed[..]); - - result - } - - /// Convert an optional value retrieved from storage to the type queried. - fn from_optional_value_to_query(v: Option) -> Self::Query; - - /// Convert a query to an optional value into storage. - fn from_query_to_optional_value(v: Self::Query) -> Option; - - /// Generate the first part of the key used in top storage. - fn storage_double_map_final_key1(k1: KArg1) -> Vec where - KArg1: EncodeLike, - { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let key_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); - - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() - ); - - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key_hashed.as_ref()); - - final_key - } - - /// Generate the full key used in top storage. - fn storage_double_map_final_key(k1: KArg1, k2: KArg2) -> Vec where - KArg1: EncodeLike, - KArg2: EncodeLike, - { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let key1_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); - let key2_hashed = k2.borrow().using_encoded(Self::Hasher2::hash); - - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() - + storage_prefix_hashed.len() - + key1_hashed.as_ref().len() - + key2_hashed.as_ref().len() - ); - - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key1_hashed.as_ref()); - final_key.extend_from_slice(key2_hashed.as_ref()); - - final_key - } + /// The type that get/take returns. + type Query; + + /// Hasher for the first key. + type Hasher1: StorageHasher; + + /// Hasher for the second key. + type Hasher2: StorageHasher; + + /// Module prefix. Used for generating final key. + fn module_prefix() -> &'static [u8]; + + /// Storage prefix. Used for generating final key. + fn storage_prefix() -> &'static [u8]; + + /// The full prefix; just the hash of `module_prefix` concatenated to the hash of + /// `storage_prefix`. + fn prefix_hash() -> Vec { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + + let mut result = + Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len()); + + result.extend_from_slice(&module_prefix_hashed[..]); + result.extend_from_slice(&storage_prefix_hashed[..]); + + result + } + + /// Convert an optional value retrieved from storage to the type queried. + fn from_optional_value_to_query(v: Option) -> Self::Query; + + /// Convert a query to an optional value into storage. + fn from_query_to_optional_value(v: Self::Query) -> Option; + + /// Generate the first part of the key used in top storage. + fn storage_double_map_final_key1(k1: KArg1) -> Vec + where + KArg1: EncodeLike, + { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let key_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); + + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(), + ); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key_hashed.as_ref()); + + final_key + } + + /// Generate the full key used in top storage. + fn storage_double_map_final_key(k1: KArg1, k2: KArg2) -> Vec + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let key1_hashed = k1.borrow().using_encoded(Self::Hasher1::hash); + let key2_hashed = k2.borrow().using_encoded(Self::Hasher2::hash); + + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + + storage_prefix_hashed.len() + + key1_hashed.as_ref().len() + + key2_hashed.as_ref().len(), + ); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key1_hashed.as_ref()); + final_key.extend_from_slice(key2_hashed.as_ref()); + + final_key + } } -impl storage::StorageDoubleMap for G where - K1: FullEncode, - K2: FullEncode, - V: FullCodec, - G: StorageDoubleMap, +impl storage::StorageDoubleMap for G +where + K1: FullEncode, + K2: FullEncode, + V: FullCodec, + G: StorageDoubleMap, { - type Query = G::Query; - - fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec where - KArg1: EncodeLike, - KArg2: EncodeLike, - { - Self::storage_double_map_final_key(k1, k2) - } - - fn contains_key(k1: KArg1, k2: KArg2) -> bool where - KArg1: EncodeLike, - KArg2: EncodeLike, - { - unhashed::exists(&Self::storage_double_map_final_key(k1, k2)) - } - - fn get(k1: KArg1, k2: KArg2) -> Self::Query where - KArg1: EncodeLike, - KArg2: EncodeLike, - { - G::from_optional_value_to_query(unhashed::get(&Self::storage_double_map_final_key(k1, k2))) - } - - fn take(k1: KArg1, k2: KArg2) -> Self::Query where - KArg1: EncodeLike, - KArg2: EncodeLike, - { - let final_key = Self::storage_double_map_final_key(k1, k2); - - let value = unhashed::take(&final_key); - G::from_optional_value_to_query(value) - } - - fn swap( - x_k1: XKArg1, - x_k2: XKArg2, - y_k1: YKArg1, - y_k2: YKArg2 - ) where - XKArg1: EncodeLike, - XKArg2: EncodeLike, - YKArg1: EncodeLike, - YKArg2: EncodeLike - { - let final_x_key = Self::storage_double_map_final_key(x_k1, x_k2); - let final_y_key = Self::storage_double_map_final_key(y_k1, y_k2); - - let v1 = unhashed::get_raw(&final_x_key); - if let Some(val) = unhashed::get_raw(&final_y_key) { - unhashed::put_raw(&final_x_key, &val); - } else { - unhashed::kill(&final_x_key) - } - if let Some(val) = v1 { - unhashed::put_raw(&final_y_key, &val); - } else { - unhashed::kill(&final_y_key) - } - } - - fn insert(k1: KArg1, k2: KArg2, val: VArg) where - KArg1: EncodeLike, - KArg2: EncodeLike, - VArg: EncodeLike, - { - unhashed::put(&Self::storage_double_map_final_key(k1, k2), &val.borrow()) - } - - fn remove(k1: KArg1, k2: KArg2) where - KArg1: EncodeLike, - KArg2: EncodeLike, - { - unhashed::kill(&Self::storage_double_map_final_key(k1, k2)) - } - - fn remove_prefix(k1: KArg1) where KArg1: EncodeLike { - unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref()) - } - - fn iter_prefix_values(k1: KArg1) -> storage::PrefixIterator where - KArg1: ?Sized + EncodeLike - { - let prefix = Self::storage_double_map_final_key1(k1); - storage::PrefixIterator:: { - prefix: prefix.clone(), - previous_key: prefix, - phantom_data: Default::default(), - } - } - - fn mutate(k1: KArg1, k2: KArg2, f: F) -> R where - KArg1: EncodeLike, - KArg2: EncodeLike, - F: FnOnce(&mut Self::Query) -> R, - { - let final_key = Self::storage_double_map_final_key(k1, k2); - let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); - - let ret = f(&mut val); - match G::from_query_to_optional_value(val) { - Some(ref val) => unhashed::put(final_key.as_ref(), val), - None => unhashed::kill(final_key.as_ref()), - } - ret - } - - fn append( - k1: KArg1, - k2: KArg2, - items: Items, - ) -> Result<(), &'static str> where - KArg1: EncodeLike, - KArg2: EncodeLike, - Item: Encode, - EncodeLikeItem: EncodeLike, - V: EncodeAppend, - Items: IntoIterator, - Items::IntoIter: ExactSizeIterator - { - let final_key = Self::storage_double_map_final_key(k1, k2); - - let encoded_value = unhashed::get_raw(&final_key) - .unwrap_or_else(|| { - match G::from_query_to_optional_value(G::from_optional_value_to_query(None)) { - Some(value) => value.encode(), - None => Vec::new(), - } - }); - - let new_val = V::append_or_new( - encoded_value, - items, - ).map_err(|_| "Could not append given item")?; - unhashed::put_raw(&final_key, &new_val); - - Ok(()) - } - - fn append_or_insert( - k1: KArg1, - k2: KArg2, - items: Items, - ) where - KArg1: EncodeLike, - KArg2: EncodeLike, - Item: Encode, - EncodeLikeItem: EncodeLike, - V: EncodeAppend, - Items: IntoIterator + Clone + EncodeLike, - Items::IntoIter: ExactSizeIterator - { - Self::append(Ref::from(&k1), Ref::from(&k2), items.clone()) - .unwrap_or_else(|_| Self::insert(k1, k2, items)); - } - - fn decode_len(key1: KArg1, key2: KArg2) -> Result where - KArg1: EncodeLike, - KArg2: EncodeLike, - V: codec::DecodeLength + Len, - { - let final_key = Self::storage_double_map_final_key(key1, key2); - if let Some(v) = unhashed::get_raw(&final_key) { - ::len(&v).map_err(|e| e.what()) - } else { - let len = G::from_query_to_optional_value(G::from_optional_value_to_query(None)) - .map(|v| v.len()) - .unwrap_or(0); - - Ok(len) - } - } - - fn migrate_keys< - OldHasher1: StorageHasher, - OldHasher2: StorageHasher, - KeyArg1: EncodeLike, - KeyArg2: EncodeLike, - >(key1: KeyArg1, key2: KeyArg2) -> Option { - let old_key = { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let key1_hashed = key1.borrow().using_encoded(OldHasher1::hash); - let key2_hashed = key2.borrow().using_encoded(OldHasher2::hash); - - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() - + storage_prefix_hashed.len() - + key1_hashed.as_ref().len() - + key2_hashed.as_ref().len() - ); - - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key1_hashed.as_ref()); - final_key.extend_from_slice(key2_hashed.as_ref()); - - final_key - }; - unhashed::take(old_key.as_ref()).map(|value| { - unhashed::put(Self::storage_double_map_final_key(key1, key2).as_ref(), &value); - value - }) - } + type Query = G::Query; + + fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + Self::storage_double_map_final_key(k1, k2) + } + + fn contains_key(k1: KArg1, k2: KArg2) -> bool + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + unhashed::exists(&Self::storage_double_map_final_key(k1, k2)) + } + + fn get(k1: KArg1, k2: KArg2) -> Self::Query + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + G::from_optional_value_to_query(unhashed::get(&Self::storage_double_map_final_key(k1, k2))) + } + + fn take(k1: KArg1, k2: KArg2) -> Self::Query + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + let final_key = Self::storage_double_map_final_key(k1, k2); + + let value = unhashed::take(&final_key); + G::from_optional_value_to_query(value) + } + + fn swap(x_k1: XKArg1, x_k2: XKArg2, y_k1: YKArg1, y_k2: YKArg2) + where + XKArg1: EncodeLike, + XKArg2: EncodeLike, + YKArg1: EncodeLike, + YKArg2: EncodeLike, + { + let final_x_key = Self::storage_double_map_final_key(x_k1, x_k2); + let final_y_key = Self::storage_double_map_final_key(y_k1, y_k2); + + let v1 = unhashed::get_raw(&final_x_key); + if let Some(val) = unhashed::get_raw(&final_y_key) { + unhashed::put_raw(&final_x_key, &val); + } else { + unhashed::kill(&final_x_key) + } + if let Some(val) = v1 { + unhashed::put_raw(&final_y_key, &val); + } else { + unhashed::kill(&final_y_key) + } + } + + fn insert(k1: KArg1, k2: KArg2, val: VArg) + where + KArg1: EncodeLike, + KArg2: EncodeLike, + VArg: EncodeLike, + { + unhashed::put(&Self::storage_double_map_final_key(k1, k2), &val.borrow()) + } + + fn remove(k1: KArg1, k2: KArg2) + where + KArg1: EncodeLike, + KArg2: EncodeLike, + { + unhashed::kill(&Self::storage_double_map_final_key(k1, k2)) + } + + fn remove_prefix(k1: KArg1) + where + KArg1: EncodeLike, + { + unhashed::kill_prefix(Self::storage_double_map_final_key1(k1).as_ref()) + } + + fn iter_prefix_values(k1: KArg1) -> storage::PrefixIterator + where + KArg1: ?Sized + EncodeLike, + { + let prefix = Self::storage_double_map_final_key1(k1); + storage::PrefixIterator:: { + prefix: prefix.clone(), + previous_key: prefix, + phantom_data: Default::default(), + } + } + + fn mutate(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut Self::Query) -> R, + { + let final_key = Self::storage_double_map_final_key(k1, k2); + let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); + + let ret = f(&mut val); + match G::from_query_to_optional_value(val) { + Some(ref val) => unhashed::put(final_key.as_ref(), val), + None => unhashed::kill(final_key.as_ref()), + } + ret + } + + fn append( + k1: KArg1, + k2: KArg2, + items: Items, + ) -> Result<(), &'static str> + where + KArg1: EncodeLike, + KArg2: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: EncodeAppend, + Items: IntoIterator, + Items::IntoIter: ExactSizeIterator, + { + let final_key = Self::storage_double_map_final_key(k1, k2); + + let encoded_value = + unhashed::get_raw(&final_key).unwrap_or_else(|| match G::from_query_to_optional_value( + G::from_optional_value_to_query(None), + ) { + Some(value) => value.encode(), + None => Vec::new(), + }); + + let new_val = + V::append_or_new(encoded_value, items).map_err(|_| "Could not append given item")?; + unhashed::put_raw(&final_key, &new_val); + + Ok(()) + } + + fn append_or_insert( + k1: KArg1, + k2: KArg2, + items: Items, + ) where + KArg1: EncodeLike, + KArg2: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: EncodeAppend, + Items: IntoIterator + Clone + EncodeLike, + Items::IntoIter: ExactSizeIterator, + { + Self::append(Ref::from(&k1), Ref::from(&k2), items.clone()) + .unwrap_or_else(|_| Self::insert(k1, k2, items)); + } + + fn decode_len(key1: KArg1, key2: KArg2) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike, + V: codec::DecodeLength + Len, + { + let final_key = Self::storage_double_map_final_key(key1, key2); + if let Some(v) = unhashed::get_raw(&final_key) { + ::len(&v).map_err(|e| e.what()) + } else { + let len = G::from_query_to_optional_value(G::from_optional_value_to_query(None)) + .map(|v| v.len()) + .unwrap_or(0); + + Ok(len) + } + } + + fn migrate_keys< + OldHasher1: StorageHasher, + OldHasher2: StorageHasher, + KeyArg1: EncodeLike, + KeyArg2: EncodeLike, + >( + key1: KeyArg1, + key2: KeyArg2, + ) -> Option { + let old_key = { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let key1_hashed = key1.borrow().using_encoded(OldHasher1::hash); + let key2_hashed = key2.borrow().using_encoded(OldHasher2::hash); + + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + + storage_prefix_hashed.len() + + key1_hashed.as_ref().len() + + key2_hashed.as_ref().len(), + ); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key1_hashed.as_ref()); + final_key.extend_from_slice(key2_hashed.as_ref()); + + final_key + }; + unhashed::take(old_key.as_ref()).map(|value| { + unhashed::put( + Self::storage_double_map_final_key(key1, key2).as_ref(), + &value, + ); + value + }) + } } /// Iterate over a prefix and decode raw_key and raw_value into `T`. pub struct MapIterator { - prefix: Vec, - previous_key: Vec, - /// If true then value are removed while iterating - drain: bool, - /// Function that take `(raw_key_without_prefix, raw_value)` and decode `T`. - /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. - closure: fn(&[u8], &[u8]) -> Result, + prefix: Vec, + previous_key: Vec, + /// If true then value are removed while iterating + drain: bool, + /// Function that take `(raw_key_without_prefix, raw_value)` and decode `T`. + /// `raw_key_without_prefix` is the raw storage key without the prefix iterated on. + closure: fn(&[u8], &[u8]) -> Result, } impl Iterator for MapIterator { - type Item = T; - - fn next(&mut self) -> Option { - loop { - let maybe_next = sp_io::storage::next_key(&self.previous_key) - .filter(|n| n.starts_with(&self.prefix)); - break match maybe_next { - Some(next) => { - self.previous_key = next; - let raw_value = match unhashed::get_raw(&self.previous_key) { - Some(raw_value) => raw_value, - None => { - frame_support::print("ERROR: next_key returned a key with no value in MapIterator"); - continue - } - }; - if self.drain { - unhashed::kill(&self.previous_key) - } - let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; - let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { - Ok(item) => item, - Err(_e) => { - frame_support::print("ERROR: (key, value) failed to decode in MapIterator"); - continue - } - }; - - Some(item) - } - None => None, - } - } - } + type Item = T; + + fn next(&mut self) -> Option { + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + break match maybe_next { + Some(next) => { + self.previous_key = next; + let raw_value = match unhashed::get_raw(&self.previous_key) { + Some(raw_value) => raw_value, + None => { + frame_support::print( + "ERROR: next_key returned a key with no value in MapIterator", + ); + continue; + } + }; + if self.drain { + unhashed::kill(&self.previous_key) + } + let raw_key_without_prefix = &self.previous_key[self.prefix.len()..]; + let item = match (self.closure)(raw_key_without_prefix, &raw_value[..]) { + Ok(item) => item, + Err(_e) => { + frame_support::print( + "ERROR: (key, value) failed to decode in MapIterator", + ); + continue; + } + }; + + Some(item) + } + None => None, + }; + } + } } -impl< - K1: FullCodec, - K2: FullCodec, - V: FullCodec, - G: StorageDoubleMap, -> storage::IterableStorageDoubleMap for G where - G::Hasher1: ReversibleStorageHasher, - G::Hasher2: ReversibleStorageHasher +impl> + storage::IterableStorageDoubleMap for G +where + G::Hasher1: ReversibleStorageHasher, + G::Hasher2: ReversibleStorageHasher, { - type PrefixIterator = MapIterator<(K2, V)>; - type Iterator = MapIterator<(K1, K2, V)>; - - fn iter_prefix(k1: impl EncodeLike) -> Self::PrefixIterator { - let prefix = G::storage_double_map_final_key1(k1); - Self::PrefixIterator { - prefix: prefix.clone(), - previous_key: prefix, - drain: false, - closure: |raw_key_without_prefix, mut raw_value| { - let mut key_material = G::Hasher2::reverse(raw_key_without_prefix); - Ok((K2::decode(&mut key_material)?, V::decode(&mut raw_value)?)) - }, - } - } - - fn drain_prefix(k1: impl EncodeLike) -> Self::PrefixIterator { - let mut iterator = Self::iter_prefix(k1); - iterator.drain = true; - iterator - } - - fn iter() -> Self::Iterator { - let prefix = G::prefix_hash(); - Self::Iterator { - prefix: prefix.clone(), - previous_key: prefix, - drain: false, - closure: |raw_key_without_prefix, mut raw_value| { - let mut k1_k2_material = G::Hasher1::reverse(raw_key_without_prefix); - let k1 = K1::decode(&mut k1_k2_material)?; - let mut k2_material = G::Hasher2::reverse(k1_k2_material); - let k2 = K2::decode(&mut k2_material)?; - Ok((k1, k2, V::decode(&mut raw_value)?)) - }, - } - } - - fn drain() -> Self::Iterator { - let mut iterator = Self::iter(); - iterator.drain = true; - iterator - } - - fn translate Option>(f: F) { - let prefix = G::prefix_hash(); - let mut previous_key = prefix.clone(); - loop { - match sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { - Some(next) => { - previous_key = next; - let maybe_value = unhashed::get::(&previous_key); - match maybe_value { - Some(value) => match f(value) { - Some(new) => unhashed::put::(&previous_key, &new), - None => unhashed::kill(&previous_key), - }, - None => continue, - } - } - None => return, - } - } - } + type PrefixIterator = MapIterator<(K2, V)>; + type Iterator = MapIterator<(K1, K2, V)>; + + fn iter_prefix(k1: impl EncodeLike) -> Self::PrefixIterator { + let prefix = G::storage_double_map_final_key1(k1); + Self::PrefixIterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + closure: |raw_key_without_prefix, mut raw_value| { + let mut key_material = G::Hasher2::reverse(raw_key_without_prefix); + Ok((K2::decode(&mut key_material)?, V::decode(&mut raw_value)?)) + }, + } + } + + fn drain_prefix(k1: impl EncodeLike) -> Self::PrefixIterator { + let mut iterator = Self::iter_prefix(k1); + iterator.drain = true; + iterator + } + + fn iter() -> Self::Iterator { + let prefix = G::prefix_hash(); + Self::Iterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + closure: |raw_key_without_prefix, mut raw_value| { + let mut k1_k2_material = G::Hasher1::reverse(raw_key_without_prefix); + let k1 = K1::decode(&mut k1_k2_material)?; + let mut k2_material = G::Hasher2::reverse(k1_k2_material); + let k2 = K2::decode(&mut k2_material)?; + Ok((k1, k2, V::decode(&mut raw_value)?)) + }, + } + } + + fn drain() -> Self::Iterator { + let mut iterator = Self::iter(); + iterator.drain = true; + iterator + } + + fn translate Option>(f: F) { + let prefix = G::prefix_hash(); + let mut previous_key = prefix.clone(); + loop { + match sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { + Some(next) => { + previous_key = next; + let maybe_value = unhashed::get::(&previous_key); + match maybe_value { + Some(value) => match f(value) { + Some(new) => unhashed::put::(&previous_key, &new), + None => unhashed::kill(&previous_key), + }, + None => continue, + } + } + None => return, + } + } + } } /// Test iterators for StorageDoubleMap #[cfg(test)] #[allow(dead_code)] mod test_iterators { - use codec::{Encode, Decode}; - use crate::storage::{generator::StorageDoubleMap, IterableStorageDoubleMap, unhashed}; - - pub trait Trait { - type Origin; - type BlockNumber; - } - - crate::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - #[derive(PartialEq, Eq, Clone, Encode, Decode)] - struct NoDef(u32); - - crate::decl_storage! { - trait Store for Module as Test { - DoubleMap: double_map hasher(blake2_128_concat) u16, hasher(blake2_128_concat) u32 => u64; - } - } - - fn key_before_prefix(mut prefix: Vec) -> Vec { - let last = prefix.iter_mut().last().unwrap(); - assert!(*last != 0, "mock function not implemented for this prefix"); - *last -= 1; - prefix - } - - fn key_after_prefix(mut prefix: Vec) -> Vec { - let last = prefix.iter_mut().last().unwrap(); - assert!(*last != 255, "mock function not implemented for this prefix"); - *last += 1; - prefix - } - - fn key_in_prefix(mut prefix: Vec) -> Vec { - prefix.push(0); - prefix - } - - #[test] - fn double_map_reversible_reversible_iteration() { - sp_io::TestExternalities::default().execute_with(|| { - // All map iterator - let prefix = DoubleMap::prefix_hash(); - - unhashed::put(&key_before_prefix(prefix.clone()), &1u64); - unhashed::put(&key_after_prefix(prefix.clone()), &1u64); - - for i in 0..4 { - DoubleMap::insert(i as u16, i as u32, i as u64); - } - - assert_eq!( - DoubleMap::iter().collect::>(), - vec![(3, 3, 3), (0, 0, 0), (2, 2, 2), (1, 1, 1)], - ); - - assert_eq!( - DoubleMap::iter_values().collect::>(), - vec![3, 0, 2, 1], - ); - - assert_eq!( - DoubleMap::drain().collect::>(), - vec![(3, 3, 3), (0, 0, 0), (2, 2, 2), (1, 1, 1)], - ); - - assert_eq!(DoubleMap::iter().collect::>(), vec![]); - assert_eq!(unhashed::get(&key_before_prefix(prefix.clone())), Some(1u64)); - assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); - - // Prefix iterator - let k1 = 3 << 8; - let prefix = DoubleMap::storage_double_map_final_key1(k1); - - unhashed::put(&key_before_prefix(prefix.clone()), &1u64); - unhashed::put(&key_after_prefix(prefix.clone()), &1u64); - - for i in 0..4 { - DoubleMap::insert(k1, i as u32, i as u64); - } - - assert_eq!( - DoubleMap::iter_prefix(k1).collect::>(), - vec![(0, 0), (2, 2), (1, 1), (3, 3)], - ); - - assert_eq!( - DoubleMap::iter_prefix_values(k1).collect::>(), - vec![0, 2, 1, 3], - ); - - assert_eq!( - DoubleMap::drain_prefix(k1).collect::>(), - vec![(0, 0), (2, 2), (1, 1), (3, 3)], - ); - - assert_eq!(DoubleMap::iter_prefix(k1).collect::>(), vec![]); - assert_eq!(unhashed::get(&key_before_prefix(prefix.clone())), Some(1u64)); - assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); - }) - } + use crate::storage::{generator::StorageDoubleMap, unhashed, IterableStorageDoubleMap}; + use codec::{Decode, Encode}; + + pub trait Trait { + type Origin; + type BlockNumber; + } + + crate::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + #[derive(PartialEq, Eq, Clone, Encode, Decode)] + struct NoDef(u32); + + crate::decl_storage! { + trait Store for Module as Test { + DoubleMap: double_map hasher(blake2_128_concat) u16, hasher(blake2_128_concat) u32 => u64; + } + } + + fn key_before_prefix(mut prefix: Vec) -> Vec { + let last = prefix.iter_mut().last().unwrap(); + assert!(*last != 0, "mock function not implemented for this prefix"); + *last -= 1; + prefix + } + + fn key_after_prefix(mut prefix: Vec) -> Vec { + let last = prefix.iter_mut().last().unwrap(); + assert!( + *last != 255, + "mock function not implemented for this prefix" + ); + *last += 1; + prefix + } + + fn key_in_prefix(mut prefix: Vec) -> Vec { + prefix.push(0); + prefix + } + + #[test] + fn double_map_reversible_reversible_iteration() { + sp_io::TestExternalities::default().execute_with(|| { + // All map iterator + let prefix = DoubleMap::prefix_hash(); + + unhashed::put(&key_before_prefix(prefix.clone()), &1u64); + unhashed::put(&key_after_prefix(prefix.clone()), &1u64); + + for i in 0..4 { + DoubleMap::insert(i as u16, i as u32, i as u64); + } + + assert_eq!( + DoubleMap::iter().collect::>(), + vec![(3, 3, 3), (0, 0, 0), (2, 2, 2), (1, 1, 1)], + ); + + assert_eq!( + DoubleMap::iter_values().collect::>(), + vec![3, 0, 2, 1], + ); + + assert_eq!( + DoubleMap::drain().collect::>(), + vec![(3, 3, 3), (0, 0, 0), (2, 2, 2), (1, 1, 1)], + ); + + assert_eq!(DoubleMap::iter().collect::>(), vec![]); + assert_eq!( + unhashed::get(&key_before_prefix(prefix.clone())), + Some(1u64) + ); + assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); + + // Prefix iterator + let k1 = 3 << 8; + let prefix = DoubleMap::storage_double_map_final_key1(k1); + + unhashed::put(&key_before_prefix(prefix.clone()), &1u64); + unhashed::put(&key_after_prefix(prefix.clone()), &1u64); + + for i in 0..4 { + DoubleMap::insert(k1, i as u32, i as u64); + } + + assert_eq!( + DoubleMap::iter_prefix(k1).collect::>(), + vec![(0, 0), (2, 2), (1, 1), (3, 3)], + ); + + assert_eq!( + DoubleMap::iter_prefix_values(k1).collect::>(), + vec![0, 2, 1, 3], + ); + + assert_eq!( + DoubleMap::drain_prefix(k1).collect::>(), + vec![(0, 0), (2, 2), (1, 1), (3, 3)], + ); + + assert_eq!(DoubleMap::iter_prefix(k1).collect::>(), vec![]); + assert_eq!( + unhashed::get(&key_before_prefix(prefix.clone())), + Some(1u64) + ); + assert_eq!(unhashed::get(&key_after_prefix(prefix.clone())), Some(1u64)); + }) + } } diff --git a/frame/support/src/storage/generator/map.rs b/frame/support/src/storage/generator/map.rs index c29a9a223a..ea25d5e4b5 100644 --- a/frame/support/src/storage/generator/map.rs +++ b/frame/support/src/storage/generator/map.rs @@ -14,12 +14,15 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::hash::{ReversibleStorageHasher, StorageHasher, Twox128}; +use crate::{ + storage::{self, unhashed}, + traits::Len, +}; +use codec::{Decode, Encode, EncodeAppend, EncodeLike, FullCodec, FullEncode, Ref}; +use sp_std::borrow::Borrow; #[cfg(not(feature = "std"))] use sp_std::prelude::*; -use sp_std::borrow::Borrow; -use codec::{FullCodec, FullEncode, Decode, Encode, EncodeLike, Ref, EncodeAppend}; -use crate::{storage::{self, unhashed}, traits::Len}; -use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; /// Generator for `StorageMap` used by `decl_storage`. /// @@ -33,338 +36,341 @@ use crate::hash::{StorageHasher, Twox128, ReversibleStorageHasher}; /// If the keys are not trusted (e.g. can be set by a user), a cryptographic `hasher` such as /// `blake2_256` must be used. Otherwise, other values in storage can be compromised. pub trait StorageMap { - /// The type that get/take returns. - type Query; + /// The type that get/take returns. + type Query; - /// Hasher. Used for generating final key. - type Hasher: StorageHasher; + /// Hasher. Used for generating final key. + type Hasher: StorageHasher; - /// Module prefix. Used for generating final key. - fn module_prefix() -> &'static [u8]; + /// Module prefix. Used for generating final key. + fn module_prefix() -> &'static [u8]; - /// Storage prefix. Used for generating final key. - fn storage_prefix() -> &'static [u8]; + /// Storage prefix. Used for generating final key. + fn storage_prefix() -> &'static [u8]; - /// The full prefix; just the hash of `module_prefix` concatenated to the hash of - /// `storage_prefix`. - fn prefix_hash() -> Vec { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + /// The full prefix; just the hash of `module_prefix` concatenated to the hash of + /// `storage_prefix`. + fn prefix_hash() -> Vec { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let mut result = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() - ); + let mut result = + Vec::with_capacity(module_prefix_hashed.len() + storage_prefix_hashed.len()); - result.extend_from_slice(&module_prefix_hashed[..]); - result.extend_from_slice(&storage_prefix_hashed[..]); + result.extend_from_slice(&module_prefix_hashed[..]); + result.extend_from_slice(&storage_prefix_hashed[..]); - result - } + result + } - /// Convert an optional value retrieved from storage to the type queried. - fn from_optional_value_to_query(v: Option) -> Self::Query; + /// Convert an optional value retrieved from storage to the type queried. + fn from_optional_value_to_query(v: Option) -> Self::Query; - /// Convert a query to an optional value into storage. - fn from_query_to_optional_value(v: Self::Query) -> Option; + /// Convert a query to an optional value into storage. + fn from_query_to_optional_value(v: Self::Query) -> Option; - /// Generate the full key used in top storage. - fn storage_map_final_key(key: KeyArg) -> Vec where - KeyArg: EncodeLike, - { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let key_hashed = key.borrow().using_encoded(Self::Hasher::hash); + /// Generate the full key used in top storage. + fn storage_map_final_key(key: KeyArg) -> Vec + where + KeyArg: EncodeLike, + { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let key_hashed = key.borrow().using_encoded(Self::Hasher::hash); - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() - ); + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len(), + ); - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key_hashed.as_ref()); + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key_hashed.as_ref()); - final_key - } + final_key + } } /// Utility to iterate through items in a storage map. pub struct StorageMapIterator { - prefix: Vec, - previous_key: Vec, - drain: bool, - _phantom: ::sp_std::marker::PhantomData<(K, V, Hasher)>, + prefix: Vec, + previous_key: Vec, + drain: bool, + _phantom: ::sp_std::marker::PhantomData<(K, V, Hasher)>, } -impl< - K: Decode + Sized, - V: Decode + Sized, - Hasher: ReversibleStorageHasher -> Iterator for StorageMapIterator { - type Item = (K, V); - - fn next(&mut self) -> Option<(K, V)> { - loop { - let maybe_next = sp_io::storage::next_key(&self.previous_key) - .filter(|n| n.starts_with(&self.prefix)); - break match maybe_next { - Some(next) => { - self.previous_key = next; - match unhashed::get::(&self.previous_key) { - Some(value) => { - if self.drain { - unhashed::kill(&self.previous_key) - } - let mut key_material = Hasher::reverse(&self.previous_key[self.prefix.len()..]); - match K::decode(&mut key_material) { - Ok(key) => Some((key, value)), - Err(_) => continue, - } - } - None => continue, - } - } - None => None, - } - } - } +impl Iterator + for StorageMapIterator +{ + type Item = (K, V); + + fn next(&mut self) -> Option<(K, V)> { + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + break match maybe_next { + Some(next) => { + self.previous_key = next; + match unhashed::get::(&self.previous_key) { + Some(value) => { + if self.drain { + unhashed::kill(&self.previous_key) + } + let mut key_material = + Hasher::reverse(&self.previous_key[self.prefix.len()..]); + match K::decode(&mut key_material) { + Ok(key) => Some((key, value)), + Err(_) => continue, + } + } + None => continue, + } + } + None => None, + }; + } + } } -impl< - K: FullCodec, - V: FullCodec, - G: StorageMap, -> storage::IterableStorageMap for G where - G::Hasher: ReversibleStorageHasher +impl> storage::IterableStorageMap for G +where + G::Hasher: ReversibleStorageHasher, { - type Iterator = StorageMapIterator; - - /// Enumerate all elements in the map. - fn iter() -> Self::Iterator { - let prefix = G::prefix_hash(); - Self::Iterator { - prefix: prefix.clone(), - previous_key: prefix, - drain: false, - _phantom: Default::default(), - } - } - - /// Enumerate all elements in the map. - fn drain() -> Self::Iterator { - let prefix = G::prefix_hash(); - Self::Iterator { - prefix: prefix.clone(), - previous_key: prefix, - drain: true, - _phantom: Default::default(), - } - } - - fn translate Option>(f: F) { - let prefix = G::prefix_hash(); - let mut previous_key = prefix.clone(); - loop { - match sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { - Some(next) => { - previous_key = next; - let maybe_value = unhashed::get::(&previous_key); - match maybe_value { - Some(value) => { - let mut key_material = G::Hasher::reverse(&previous_key[prefix.len()..]); - match K::decode(&mut key_material) { - Ok(key) => match f(key, value) { - Some(new) => unhashed::put::(&previous_key, &new), - None => unhashed::kill(&previous_key), - }, - Err(_) => continue, - } - } - None => continue, - } - } - None => return, - } - } - } + type Iterator = StorageMapIterator; + + /// Enumerate all elements in the map. + fn iter() -> Self::Iterator { + let prefix = G::prefix_hash(); + Self::Iterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: false, + _phantom: Default::default(), + } + } + + /// Enumerate all elements in the map. + fn drain() -> Self::Iterator { + let prefix = G::prefix_hash(); + Self::Iterator { + prefix: prefix.clone(), + previous_key: prefix, + drain: true, + _phantom: Default::default(), + } + } + + fn translate Option>(f: F) { + let prefix = G::prefix_hash(); + let mut previous_key = prefix.clone(); + loop { + match sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix)) { + Some(next) => { + previous_key = next; + let maybe_value = unhashed::get::(&previous_key); + match maybe_value { + Some(value) => { + let mut key_material = + G::Hasher::reverse(&previous_key[prefix.len()..]); + match K::decode(&mut key_material) { + Ok(key) => match f(key, value) { + Some(new) => unhashed::put::(&previous_key, &new), + None => unhashed::kill(&previous_key), + }, + Err(_) => continue, + } + } + None => continue, + } + } + None => return, + } + } + } } impl> storage::StorageMap for G { - type Query = G::Query; - - fn hashed_key_for>(key: KeyArg) -> Vec { - Self::storage_map_final_key(key) - } - - fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2) { - let k1 = Self::storage_map_final_key(key1); - let k2 = Self::storage_map_final_key(key2); - - let v1 = unhashed::get_raw(k1.as_ref()); - if let Some(val) = unhashed::get_raw(k2.as_ref()) { - unhashed::put_raw(k1.as_ref(), &val); - } else { - unhashed::kill(k1.as_ref()) - } - if let Some(val) = v1 { - unhashed::put_raw(k2.as_ref(), &val); - } else { - unhashed::kill(k2.as_ref()) - } - } - - fn contains_key>(key: KeyArg) -> bool { - unhashed::exists(Self::storage_map_final_key(key).as_ref()) - } - - fn get>(key: KeyArg) -> Self::Query { - G::from_optional_value_to_query(unhashed::get(Self::storage_map_final_key(key).as_ref())) - } - - fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { - unhashed::put(Self::storage_map_final_key(key).as_ref(), &val) - } - - fn remove>(key: KeyArg) { - unhashed::kill(Self::storage_map_final_key(key).as_ref()) - } - - fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R { - let final_key = Self::storage_map_final_key(key); - let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); - - let ret = f(&mut val); - match G::from_query_to_optional_value(val) { - Some(ref val) => unhashed::put(final_key.as_ref(), &val), - None => unhashed::kill(final_key.as_ref()), - } - ret - } - - fn mutate_exists, R, F: FnOnce(&mut Option) -> R>(key: KeyArg, f: F) -> R { - let final_key = Self::storage_map_final_key(key); - let mut val = unhashed::get(final_key.as_ref()); - - let ret = f(&mut val); - match val { - Some(ref val) => unhashed::put(final_key.as_ref(), &val), - None => unhashed::kill(final_key.as_ref()), - } - ret - } - - fn try_mutate, R, E, F: FnOnce(&mut Self::Query) -> Result>( - key: KeyArg, - f: F - ) -> Result { - let final_key = Self::storage_map_final_key(key); - let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); - - let ret = f(&mut val); - if ret.is_ok() { - match G::from_query_to_optional_value(val) { - Some(ref val) => unhashed::put(final_key.as_ref(), &val.borrow()), - None => unhashed::kill(final_key.as_ref()), - } - } - ret - } - - fn try_mutate_exists, R, E, F: FnOnce(&mut Option) -> Result>( - key: KeyArg, - f: F - ) -> Result { - let final_key = Self::storage_map_final_key(key); - let mut val = unhashed::get(final_key.as_ref()); - - let ret = f(&mut val); - if ret.is_ok() { - match val { - Some(ref val) => unhashed::put(final_key.as_ref(), &val.borrow()), - None => unhashed::kill(final_key.as_ref()), - } - } - ret - } - - fn take>(key: KeyArg) -> Self::Query { - let key = Self::storage_map_final_key(key); - let value = unhashed::take(key.as_ref()); - G::from_optional_value_to_query(value) - } - - fn append(key: KeyArg, items: Items) -> Result<(), &'static str> - where - KeyArg: EncodeLike, - Item: Encode, - EncodeLikeItem: EncodeLike, - V: EncodeAppend, - Items: IntoIterator, - Items::IntoIter: ExactSizeIterator, - { - let key = Self::storage_map_final_key(key); - let encoded_value = unhashed::get_raw(key.as_ref()) - .unwrap_or_else(|| { - match G::from_query_to_optional_value(G::from_optional_value_to_query(None)) { - Some(value) => value.encode(), - None => Vec::new(), - } - }); - - let new_val = V::append_or_new( - encoded_value, - items, - ).map_err(|_| "Could not append given item")?; - unhashed::put_raw(key.as_ref(), &new_val); - Ok(()) - } - - fn append_or_insert(key: KeyArg, items: Items) - where - KeyArg: EncodeLike, - Item: Encode, - EncodeLikeItem: EncodeLike, - V: EncodeAppend, - Items: IntoIterator + Clone + EncodeLike, - Items::IntoIter: ExactSizeIterator, - { - Self::append(Ref::from(&key), items.clone()) - .unwrap_or_else(|_| Self::insert(key, items)); - } - - fn decode_len>(key: KeyArg) -> Result - where V: codec::DecodeLength + Len - { - let key = Self::storage_map_final_key(key); - if let Some(v) = unhashed::get_raw(key.as_ref()) { - ::len(&v).map_err(|e| e.what()) - } else { - let len = G::from_query_to_optional_value(G::from_optional_value_to_query(None)) - .map(|v| v.len()) - .unwrap_or(0); - - Ok(len) - } - } - - fn migrate_key>(key: KeyArg) -> Option { - let old_key = { - let module_prefix_hashed = Twox128::hash(Self::module_prefix()); - let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); - let key_hashed = key.borrow().using_encoded(OldHasher::hash); - - let mut final_key = Vec::with_capacity( - module_prefix_hashed.len() + storage_prefix_hashed.len() + key_hashed.as_ref().len() - ); - - final_key.extend_from_slice(&module_prefix_hashed[..]); - final_key.extend_from_slice(&storage_prefix_hashed[..]); - final_key.extend_from_slice(key_hashed.as_ref()); - - final_key - }; - unhashed::take(old_key.as_ref()).map(|value| { - unhashed::put(Self::storage_map_final_key(key).as_ref(), &value); - value - }) - } + type Query = G::Query; + + fn hashed_key_for>(key: KeyArg) -> Vec { + Self::storage_map_final_key(key) + } + + fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2) { + let k1 = Self::storage_map_final_key(key1); + let k2 = Self::storage_map_final_key(key2); + + let v1 = unhashed::get_raw(k1.as_ref()); + if let Some(val) = unhashed::get_raw(k2.as_ref()) { + unhashed::put_raw(k1.as_ref(), &val); + } else { + unhashed::kill(k1.as_ref()) + } + if let Some(val) = v1 { + unhashed::put_raw(k2.as_ref(), &val); + } else { + unhashed::kill(k2.as_ref()) + } + } + + fn contains_key>(key: KeyArg) -> bool { + unhashed::exists(Self::storage_map_final_key(key).as_ref()) + } + + fn get>(key: KeyArg) -> Self::Query { + G::from_optional_value_to_query(unhashed::get(Self::storage_map_final_key(key).as_ref())) + } + + fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg) { + unhashed::put(Self::storage_map_final_key(key).as_ref(), &val) + } + + fn remove>(key: KeyArg) { + unhashed::kill(Self::storage_map_final_key(key).as_ref()) + } + + fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R { + let final_key = Self::storage_map_final_key(key); + let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); + + let ret = f(&mut val); + match G::from_query_to_optional_value(val) { + Some(ref val) => unhashed::put(final_key.as_ref(), &val), + None => unhashed::kill(final_key.as_ref()), + } + ret + } + + fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R { + let final_key = Self::storage_map_final_key(key); + let mut val = unhashed::get(final_key.as_ref()); + + let ret = f(&mut val); + match val { + Some(ref val) => unhashed::put(final_key.as_ref(), &val), + None => unhashed::kill(final_key.as_ref()), + } + ret + } + + fn try_mutate, R, E, F: FnOnce(&mut Self::Query) -> Result>( + key: KeyArg, + f: F, + ) -> Result { + let final_key = Self::storage_map_final_key(key); + let mut val = G::from_optional_value_to_query(unhashed::get(final_key.as_ref())); + + let ret = f(&mut val); + if ret.is_ok() { + match G::from_query_to_optional_value(val) { + Some(ref val) => unhashed::put(final_key.as_ref(), &val.borrow()), + None => unhashed::kill(final_key.as_ref()), + } + } + ret + } + + fn try_mutate_exists, R, E, F: FnOnce(&mut Option) -> Result>( + key: KeyArg, + f: F, + ) -> Result { + let final_key = Self::storage_map_final_key(key); + let mut val = unhashed::get(final_key.as_ref()); + + let ret = f(&mut val); + if ret.is_ok() { + match val { + Some(ref val) => unhashed::put(final_key.as_ref(), &val.borrow()), + None => unhashed::kill(final_key.as_ref()), + } + } + ret + } + + fn take>(key: KeyArg) -> Self::Query { + let key = Self::storage_map_final_key(key); + let value = unhashed::take(key.as_ref()); + G::from_optional_value_to_query(value) + } + + fn append( + key: KeyArg, + items: Items, + ) -> Result<(), &'static str> + where + KeyArg: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: EncodeAppend, + Items: IntoIterator, + Items::IntoIter: ExactSizeIterator, + { + let key = Self::storage_map_final_key(key); + let encoded_value = + unhashed::get_raw(key.as_ref()).unwrap_or_else( + || match G::from_query_to_optional_value(G::from_optional_value_to_query(None)) { + Some(value) => value.encode(), + None => Vec::new(), + }, + ); + + let new_val = + V::append_or_new(encoded_value, items).map_err(|_| "Could not append given item")?; + unhashed::put_raw(key.as_ref(), &new_val); + Ok(()) + } + + fn append_or_insert(key: KeyArg, items: Items) + where + KeyArg: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: EncodeAppend, + Items: IntoIterator + Clone + EncodeLike, + Items::IntoIter: ExactSizeIterator, + { + Self::append(Ref::from(&key), items.clone()).unwrap_or_else(|_| Self::insert(key, items)); + } + + fn decode_len>(key: KeyArg) -> Result + where + V: codec::DecodeLength + Len, + { + let key = Self::storage_map_final_key(key); + if let Some(v) = unhashed::get_raw(key.as_ref()) { + ::len(&v).map_err(|e| e.what()) + } else { + let len = G::from_query_to_optional_value(G::from_optional_value_to_query(None)) + .map(|v| v.len()) + .unwrap_or(0); + + Ok(len) + } + } + + fn migrate_key>(key: KeyArg) -> Option { + let old_key = { + let module_prefix_hashed = Twox128::hash(Self::module_prefix()); + let storage_prefix_hashed = Twox128::hash(Self::storage_prefix()); + let key_hashed = key.borrow().using_encoded(OldHasher::hash); + + let mut final_key = Vec::with_capacity( + module_prefix_hashed.len() + + storage_prefix_hashed.len() + + key_hashed.as_ref().len(), + ); + + final_key.extend_from_slice(&module_prefix_hashed[..]); + final_key.extend_from_slice(&storage_prefix_hashed[..]); + final_key.extend_from_slice(key_hashed.as_ref()); + + final_key + }; + unhashed::take(old_key.as_ref()).map(|value| { + unhashed::put(Self::storage_map_final_key(key).as_ref(), &value); + value + }) + } } diff --git a/frame/support/src/storage/generator/mod.rs b/frame/support/src/storage/generator/mod.rs index 687d8a3c93..c7071289d7 100644 --- a/frame/support/src/storage/generator/mod.rs +++ b/frame/support/src/storage/generator/mod.rs @@ -23,83 +23,92 @@ //! //! This is internal api and is subject to change. -mod map; mod double_map; +mod map; mod value; -pub use map::StorageMap; pub use double_map::StorageDoubleMap; +pub use map::StorageMap; pub use value::StorageValue; #[cfg(test)] #[allow(dead_code)] mod tests { - use sp_io::TestExternalities; - use codec::Encode; - use crate::storage::{unhashed, generator::StorageValue, IterableStorageMap}; - - struct Runtime {} - pub trait Trait { - type Origin; - type BlockNumber; - } - - impl Trait for Runtime { - type Origin = u32; - type BlockNumber = u32; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - crate::decl_storage! { - trait Store for Module as Runtime { - Value get(fn value) config(): (u64, u64); - NumberMap: map hasher(identity) u32 => u64; - } - } - - #[test] - fn value_translate_works() { - let t = GenesisConfig::default().build_storage().unwrap(); - TestExternalities::new(t).execute_with(|| { - // put the old value `1111u32` in the storage. - let key = Value::storage_value_final_key(); - unhashed::put_raw(&key, &1111u32.encode()); - - // translate - let translate_fn = |old: Option| -> Option<(u64, u64)> { - old.map(|o| (o.into(), (o*2).into())) - }; - let _ = Value::translate(translate_fn); - - // new storage should be `(1111, 1111 * 2)` - assert_eq!(Value::get(), (1111, 2222)); - }) - } - - #[test] - fn map_translate_works() { - let t = GenesisConfig::default().build_storage().unwrap(); - TestExternalities::new(t).execute_with(|| { - // start with a map of u32 -> u32. - for i in 0u32..100u32 { - unhashed::put(&NumberMap::hashed_key_for(&i), &(i as u64)); - } - - assert_eq!( - NumberMap::iter().collect::>(), - (0..100).map(|x| (x as u32, x as u64)).collect::>(), - ); - - // do translation. - NumberMap::translate(|k: u32, v: u64| if k % 2 == 0 { Some((k as u64) << 32 | v) } else { None }); - - assert_eq!( - NumberMap::iter().collect::>(), - (0..50u32).map(|x| x * 2).map(|x| (x, (x as u64) << 32 | x as u64)).collect::>(), - ); - }) - } + use crate::storage::{generator::StorageValue, unhashed, IterableStorageMap}; + use codec::Encode; + use sp_io::TestExternalities; + + struct Runtime {} + pub trait Trait { + type Origin; + type BlockNumber; + } + + impl Trait for Runtime { + type Origin = u32; + type BlockNumber = u32; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + crate::decl_storage! { + trait Store for Module as Runtime { + Value get(fn value) config(): (u64, u64); + NumberMap: map hasher(identity) u32 => u64; + } + } + + #[test] + fn value_translate_works() { + let t = GenesisConfig::default().build_storage().unwrap(); + TestExternalities::new(t).execute_with(|| { + // put the old value `1111u32` in the storage. + let key = Value::storage_value_final_key(); + unhashed::put_raw(&key, &1111u32.encode()); + + // translate + let translate_fn = |old: Option| -> Option<(u64, u64)> { + old.map(|o| (o.into(), (o * 2).into())) + }; + let _ = Value::translate(translate_fn); + + // new storage should be `(1111, 1111 * 2)` + assert_eq!(Value::get(), (1111, 2222)); + }) + } + + #[test] + fn map_translate_works() { + let t = GenesisConfig::default().build_storage().unwrap(); + TestExternalities::new(t).execute_with(|| { + // start with a map of u32 -> u32. + for i in 0u32..100u32 { + unhashed::put(&NumberMap::hashed_key_for(&i), &(i as u64)); + } + + assert_eq!( + NumberMap::iter().collect::>(), + (0..100).map(|x| (x as u32, x as u64)).collect::>(), + ); + + // do translation. + NumberMap::translate(|k: u32, v: u64| { + if k % 2 == 0 { + Some((k as u64) << 32 | v) + } else { + None + } + }); + + assert_eq!( + NumberMap::iter().collect::>(), + (0..50u32) + .map(|x| x * 2) + .map(|x| (x, (x as u64) << 32 | x as u64)) + .collect::>(), + ); + }) + } } diff --git a/frame/support/src/storage/generator/value.rs b/frame/support/src/storage/generator/value.rs index 9e26131f48..75f8571302 100644 --- a/frame/support/src/storage/generator/value.rs +++ b/frame/support/src/storage/generator/value.rs @@ -14,10 +14,14 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use crate::{ + hash::{StorageHasher, Twox128}, + storage::{self, unhashed}, + traits::Len, +}; +use codec::{Decode, Encode, EncodeAppend, EncodeLike, FullCodec}; #[cfg(not(feature = "std"))] use sp_std::prelude::*; -use codec::{FullCodec, Encode, EncodeAppend, EncodeLike, Decode}; -use crate::{storage::{self, unhashed}, hash::{Twox128, StorageHasher}, traits::Len}; /// Generator for `StorageValue` used by `decl_storage`. /// @@ -26,164 +30,166 @@ use crate::{storage::{self, unhashed}, hash::{Twox128, StorageHasher}, traits::L /// Twox128(module_prefix) ++ Twox128(storage_prefix) /// ``` pub trait StorageValue { - /// The type that get/take returns. - type Query; + /// The type that get/take returns. + type Query; - /// Module prefix. Used for generating final key. - fn module_prefix() -> &'static [u8]; + /// Module prefix. Used for generating final key. + fn module_prefix() -> &'static [u8]; - /// Storage prefix. Used for generating final key. - fn storage_prefix() -> &'static [u8]; + /// Storage prefix. Used for generating final key. + fn storage_prefix() -> &'static [u8]; - /// Convert an optional value retrieved from storage to the type queried. - fn from_optional_value_to_query(v: Option) -> Self::Query; + /// Convert an optional value retrieved from storage to the type queried. + fn from_optional_value_to_query(v: Option) -> Self::Query; - /// Convert a query to an optional value into storage. - fn from_query_to_optional_value(v: Self::Query) -> Option; + /// Convert a query to an optional value into storage. + fn from_query_to_optional_value(v: Self::Query) -> Option; - /// Generate the full key used in top storage. - fn storage_value_final_key() -> [u8; 32] { - let mut final_key = [0u8; 32]; - final_key[0..16].copy_from_slice(&Twox128::hash(Self::module_prefix())); - final_key[16..32].copy_from_slice(&Twox128::hash(Self::storage_prefix())); - final_key - } + /// Generate the full key used in top storage. + fn storage_value_final_key() -> [u8; 32] { + let mut final_key = [0u8; 32]; + final_key[0..16].copy_from_slice(&Twox128::hash(Self::module_prefix())); + final_key[16..32].copy_from_slice(&Twox128::hash(Self::storage_prefix())); + final_key + } } impl> storage::StorageValue for G { - type Query = G::Query; - - fn hashed_key() -> [u8; 32] { - Self::storage_value_final_key() - } - - fn exists() -> bool { - unhashed::exists(&Self::storage_value_final_key()) - } - - fn get() -> Self::Query { - let value = unhashed::get(&Self::storage_value_final_key()); - G::from_optional_value_to_query(value) - } - - fn try_get() -> Result { - unhashed::get(&Self::storage_value_final_key()).ok_or(()) - } - - fn translate) -> Option>(f: F) -> Result, ()> { - let key = Self::storage_value_final_key(); - - // attempt to get the length directly. - let maybe_old = match unhashed::get_raw(&key) { - Some(old_data) => Some(O::decode(&mut &old_data[..]).map_err(|_| ())?), - None => None, - }; - let maybe_new = f(maybe_old); - if let Some(new) = maybe_new.as_ref() { - new.using_encoded(|d| unhashed::put_raw(&key, d)); - } else { - unhashed::kill(&key); - } - Ok(maybe_new) - } - - fn put>(val: Arg) { - unhashed::put(&Self::storage_value_final_key(), &val) - } - - fn set(maybe_val: Self::Query) { - if let Some(val) = G::from_query_to_optional_value(maybe_val) { - unhashed::put(&Self::storage_value_final_key(), &val) - } else { - unhashed::kill(&Self::storage_value_final_key()) - } - } - - fn kill() { - unhashed::kill(&Self::storage_value_final_key()) - } - - fn mutate R>(f: F) -> R { - let mut val = G::get(); - - let ret = f(&mut val); - match G::from_query_to_optional_value(val) { - Some(ref val) => G::put(val), - None => G::kill(), - } - ret - } - - fn take() -> G::Query { - let key = Self::storage_value_final_key(); - let value = unhashed::get(&key); - if value.is_some() { - unhashed::kill(&key) - } - G::from_optional_value_to_query(value) - } - - /// Append the given items to the value in the storage. - /// - /// `T` is required to implement `codec::EncodeAppend`. - fn append(items: Items) -> Result<(), &'static str> - where - Item: Encode, - EncodeLikeItem: EncodeLike, - T: EncodeAppend, - Items: IntoIterator, - Items::IntoIter: ExactSizeIterator, - { - let key = Self::storage_value_final_key(); - let encoded_value = unhashed::get_raw(&key) - .unwrap_or_else(|| { - match G::from_query_to_optional_value(G::from_optional_value_to_query(None)) { - Some(value) => value.encode(), - None => Vec::new(), - } - }); - - let new_val = T::append_or_new( - encoded_value, - items, - ).map_err(|_| "Could not append given item")?; - unhashed::put_raw(&key, &new_val); - Ok(()) - } - - /// Safely append the given items to the value in the storage. If a codec error occurs, then the - /// old (presumably corrupt) value is replaced with the given `items`. - /// - /// `T` is required to implement `codec::EncodeAppend`. - fn append_or_put(items: Items) where - Item: Encode, - EncodeLikeItem: EncodeLike, - T: EncodeAppend, - Items: IntoIterator + Clone + EncodeLike, - Items::IntoIter: ExactSizeIterator - { - Self::append(items.clone()).unwrap_or_else(|_| Self::put(items)); - } - - /// Read the length of the value in a fast way, without decoding the entire value. - /// - /// `T` is required to implement `Codec::DecodeLength`. - /// - /// Note that `0` is returned as the default value if no encoded value exists at the given key. - /// Therefore, this function cannot be used as a sign of _existence_. use the `::exists()` - /// function for this purpose. - fn decode_len() -> Result where T: codec::DecodeLength, T: Len { - let key = Self::storage_value_final_key(); - - // attempt to get the length directly. - if let Some(k) = unhashed::get_raw(&key) { - ::len(&k).map_err(|e| e.what()) - } else { - let len = G::from_query_to_optional_value(G::from_optional_value_to_query(None)) - .map(|v| v.len()) - .unwrap_or(0); - - Ok(len) - } - } + type Query = G::Query; + + fn hashed_key() -> [u8; 32] { + Self::storage_value_final_key() + } + + fn exists() -> bool { + unhashed::exists(&Self::storage_value_final_key()) + } + + fn get() -> Self::Query { + let value = unhashed::get(&Self::storage_value_final_key()); + G::from_optional_value_to_query(value) + } + + fn try_get() -> Result { + unhashed::get(&Self::storage_value_final_key()).ok_or(()) + } + + fn translate) -> Option>(f: F) -> Result, ()> { + let key = Self::storage_value_final_key(); + + // attempt to get the length directly. + let maybe_old = match unhashed::get_raw(&key) { + Some(old_data) => Some(O::decode(&mut &old_data[..]).map_err(|_| ())?), + None => None, + }; + let maybe_new = f(maybe_old); + if let Some(new) = maybe_new.as_ref() { + new.using_encoded(|d| unhashed::put_raw(&key, d)); + } else { + unhashed::kill(&key); + } + Ok(maybe_new) + } + + fn put>(val: Arg) { + unhashed::put(&Self::storage_value_final_key(), &val) + } + + fn set(maybe_val: Self::Query) { + if let Some(val) = G::from_query_to_optional_value(maybe_val) { + unhashed::put(&Self::storage_value_final_key(), &val) + } else { + unhashed::kill(&Self::storage_value_final_key()) + } + } + + fn kill() { + unhashed::kill(&Self::storage_value_final_key()) + } + + fn mutate R>(f: F) -> R { + let mut val = G::get(); + + let ret = f(&mut val); + match G::from_query_to_optional_value(val) { + Some(ref val) => G::put(val), + None => G::kill(), + } + ret + } + + fn take() -> G::Query { + let key = Self::storage_value_final_key(); + let value = unhashed::get(&key); + if value.is_some() { + unhashed::kill(&key) + } + G::from_optional_value_to_query(value) + } + + /// Append the given items to the value in the storage. + /// + /// `T` is required to implement `codec::EncodeAppend`. + fn append(items: Items) -> Result<(), &'static str> + where + Item: Encode, + EncodeLikeItem: EncodeLike, + T: EncodeAppend, + Items: IntoIterator, + Items::IntoIter: ExactSizeIterator, + { + let key = Self::storage_value_final_key(); + let encoded_value = unhashed::get_raw(&key).unwrap_or_else(|| { + match G::from_query_to_optional_value(G::from_optional_value_to_query(None)) { + Some(value) => value.encode(), + None => Vec::new(), + } + }); + + let new_val = + T::append_or_new(encoded_value, items).map_err(|_| "Could not append given item")?; + unhashed::put_raw(&key, &new_val); + Ok(()) + } + + /// Safely append the given items to the value in the storage. If a codec error occurs, then the + /// old (presumably corrupt) value is replaced with the given `items`. + /// + /// `T` is required to implement `codec::EncodeAppend`. + fn append_or_put(items: Items) + where + Item: Encode, + EncodeLikeItem: EncodeLike, + T: EncodeAppend, + Items: IntoIterator + Clone + EncodeLike, + Items::IntoIter: ExactSizeIterator, + { + Self::append(items.clone()).unwrap_or_else(|_| Self::put(items)); + } + + /// Read the length of the value in a fast way, without decoding the entire value. + /// + /// `T` is required to implement `Codec::DecodeLength`. + /// + /// Note that `0` is returned as the default value if no encoded value exists at the given key. + /// Therefore, this function cannot be used as a sign of _existence_. use the `::exists()` + /// function for this purpose. + fn decode_len() -> Result + where + T: codec::DecodeLength, + T: Len, + { + let key = Self::storage_value_final_key(); + + // attempt to get the length directly. + if let Some(k) = unhashed::get_raw(&key) { + ::len(&k).map_err(|e| e.what()) + } else { + let len = G::from_query_to_optional_value(G::from_optional_value_to_query(None)) + .map(|v| v.len()) + .unwrap_or(0); + + Ok(len) + } + } } diff --git a/frame/support/src/storage/hashed.rs b/frame/support/src/storage/hashed.rs index a3ffddcb84..5aa015c8f5 100644 --- a/frame/support/src/storage/hashed.rs +++ b/frame/support/src/storage/hashed.rs @@ -17,139 +17,139 @@ //! Operation on runtime storage using hashed keys. use super::unhashed; +use codec::{Decode, Encode}; use sp_std::prelude::*; -use codec::{Encode, Decode}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(hash: &HashFn, key: &[u8]) -> Option where - T: Decode + Sized, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Decode + Sized, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::get(&hash(key).as_ref()) + unhashed::get(&hash(key).as_ref()) } /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default(hash: &HashFn, key: &[u8]) -> T where - T: Decode + Sized + Default, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Decode + Sized + Default, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::get_or_default(&hash(key).as_ref()) + unhashed::get_or_default(&hash(key).as_ref()) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or(hash: &HashFn, key: &[u8], default_value: T) -> T where - T: Decode + Sized, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Decode + Sized, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::get_or(&hash(key).as_ref(), default_value) + unhashed::get_or(&hash(key).as_ref(), default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else(hash: &HashFn, key: &[u8], default_value: F) -> T where - T: Decode + Sized, - F: FnOnce() -> T, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Decode + Sized, + F: FnOnce() -> T, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::get_or_else(&hash(key).as_ref(), default_value) + unhashed::get_or_else(&hash(key).as_ref(), default_value) } /// Put `value` in storage under `key`. pub fn put(hash: &HashFn, key: &[u8], value: &T) where - T: Encode, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Encode, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::put(&hash(key).as_ref(), value) + unhashed::put(&hash(key).as_ref(), value) } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take(hash: &HashFn, key: &[u8]) -> Option where - T: Decode + Sized, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Decode + Sized, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::take(&hash(key).as_ref()) + unhashed::take(&hash(key).as_ref()) } /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default(hash: &HashFn, key: &[u8]) -> T where - T: Decode + Sized + Default, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Decode + Sized + Default, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::take_or_default(&hash(key).as_ref()) + unhashed::take_or_default(&hash(key).as_ref()) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or(hash: &HashFn, key: &[u8], default_value: T) -> T where - T: Decode + Sized, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Decode + Sized, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::take_or(&hash(key).as_ref(), default_value) + unhashed::take_or(&hash(key).as_ref(), default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else(hash: &HashFn, key: &[u8], default_value: F) -> T where - T: Decode + Sized, - F: FnOnce() -> T, - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + T: Decode + Sized, + F: FnOnce() -> T, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::take_or_else(&hash(key).as_ref(), default_value) + unhashed::take_or_else(&hash(key).as_ref(), default_value) } /// Check to see if `key` has an explicit entry in storage. pub fn exists(hash: &HashFn, key: &[u8]) -> bool where - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::exists(&hash(key).as_ref()) + unhashed::exists(&hash(key).as_ref()) } /// Ensure `key` has no explicit entry in storage. pub fn kill(hash: &HashFn, key: &[u8]) where - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::kill(&hash(key).as_ref()) + unhashed::kill(&hash(key).as_ref()) } /// Get a Vec of bytes from storage. pub fn get_raw(hash: &HashFn, key: &[u8]) -> Option> where - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::get_raw(&hash(key).as_ref()) + unhashed::get_raw(&hash(key).as_ref()) } /// Put a raw byte slice into storage. pub fn put_raw(hash: &HashFn, key: &[u8], value: &[u8]) where - HashFn: Fn(&[u8]) -> R, - R: AsRef<[u8]>, + HashFn: Fn(&[u8]) -> R, + R: AsRef<[u8]>, { - unhashed::put_raw(&hash(key).as_ref(), value) + unhashed::put_raw(&hash(key).as_ref(), value) } diff --git a/frame/support/src/storage/migration.rs b/frame/support/src/storage/migration.rs index 264c3c644e..1ef1e867a9 100644 --- a/frame/support/src/storage/migration.rs +++ b/frame/support/src/storage/migration.rs @@ -16,181 +16,191 @@ //! Some utilities for helping access storage with arbitrary key types. -use sp_std::prelude::*; -use codec::{Encode, Decode}; -use crate::{StorageHasher, Twox128}; use crate::hash::ReversibleStorageHasher; +use crate::{StorageHasher, Twox128}; +use codec::{Decode, Encode}; +use sp_std::prelude::*; /// Utility to iterate through raw items in storage. pub struct StorageIterator { - prefix: Vec, - previous_key: Vec, - drain: bool, - _phantom: ::sp_std::marker::PhantomData, + prefix: Vec, + previous_key: Vec, + drain: bool, + _phantom: ::sp_std::marker::PhantomData, } impl StorageIterator { - /// Construct iterator to iterate over map items in `module` for the map called `item`. - pub fn new(module: &[u8], item: &[u8]) -> Self { - Self::with_suffix(module, item, &[][..]) - } - - /// Construct iterator to iterate over map items in `module` for the map called `item`. - pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { - let mut prefix = Vec::new(); - prefix.extend_from_slice(&Twox128::hash(module)); - prefix.extend_from_slice(&Twox128::hash(item)); - prefix.extend_from_slice(suffix); - let previous_key = prefix.clone(); - Self { prefix, previous_key, drain: false, _phantom: Default::default() } - } - - /// Mutate this iterator into a draining iterator; items iterated are removed from storage. - pub fn drain(mut self) -> Self { - self.drain = true; - self - } + /// Construct iterator to iterate over map items in `module` for the map called `item`. + pub fn new(module: &[u8], item: &[u8]) -> Self { + Self::with_suffix(module, item, &[][..]) + } + + /// Construct iterator to iterate over map items in `module` for the map called `item`. + pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { + let mut prefix = Vec::new(); + prefix.extend_from_slice(&Twox128::hash(module)); + prefix.extend_from_slice(&Twox128::hash(item)); + prefix.extend_from_slice(suffix); + let previous_key = prefix.clone(); + Self { + prefix, + previous_key, + drain: false, + _phantom: Default::default(), + } + } + + /// Mutate this iterator into a draining iterator; items iterated are removed from storage. + pub fn drain(mut self) -> Self { + self.drain = true; + self + } } impl Iterator for StorageIterator { - type Item = (Vec, T); - - fn next(&mut self) -> Option<(Vec, T)> { - loop { - let maybe_next = sp_io::storage::next_key(&self.previous_key) - .filter(|n| n.starts_with(&self.prefix)); - break match maybe_next { - Some(next) => { - self.previous_key = next.clone(); - let maybe_value = frame_support::storage::unhashed::get::(&next); - match maybe_value { - Some(value) => { - if self.drain { - frame_support::storage::unhashed::kill(&next); - } - Some((self.previous_key[self.prefix.len()..].to_vec(), value)) - } - None => continue, - } - } - None => None, - } - } - } + type Item = (Vec, T); + + fn next(&mut self) -> Option<(Vec, T)> { + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + break match maybe_next { + Some(next) => { + self.previous_key = next.clone(); + let maybe_value = frame_support::storage::unhashed::get::(&next); + match maybe_value { + Some(value) => { + if self.drain { + frame_support::storage::unhashed::kill(&next); + } + Some((self.previous_key[self.prefix.len()..].to_vec(), value)) + } + None => continue, + } + } + None => None, + }; + } + } } /// Utility to iterate through raw items in storage. pub struct StorageKeyIterator { - prefix: Vec, - previous_key: Vec, - drain: bool, - _phantom: ::sp_std::marker::PhantomData<(K, T, H)>, + prefix: Vec, + previous_key: Vec, + drain: bool, + _phantom: ::sp_std::marker::PhantomData<(K, T, H)>, } impl StorageKeyIterator { - /// Construct iterator to iterate over map items in `module` for the map called `item`. - pub fn new(module: &[u8], item: &[u8]) -> Self { - Self::with_suffix(module, item, &[][..]) - } - - /// Construct iterator to iterate over map items in `module` for the map called `item`. - pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { - let mut prefix = Vec::new(); - prefix.extend_from_slice(&Twox128::hash(module)); - prefix.extend_from_slice(&Twox128::hash(item)); - prefix.extend_from_slice(suffix); - let previous_key = prefix.clone(); - Self { prefix, previous_key, drain: false, _phantom: Default::default() } - } - - /// Mutate this iterator into a draining iterator; items iterated are removed from storage. - pub fn drain(mut self) -> Self { - self.drain = true; - self - } + /// Construct iterator to iterate over map items in `module` for the map called `item`. + pub fn new(module: &[u8], item: &[u8]) -> Self { + Self::with_suffix(module, item, &[][..]) + } + + /// Construct iterator to iterate over map items in `module` for the map called `item`. + pub fn with_suffix(module: &[u8], item: &[u8], suffix: &[u8]) -> Self { + let mut prefix = Vec::new(); + prefix.extend_from_slice(&Twox128::hash(module)); + prefix.extend_from_slice(&Twox128::hash(item)); + prefix.extend_from_slice(suffix); + let previous_key = prefix.clone(); + Self { + prefix, + previous_key, + drain: false, + _phantom: Default::default(), + } + } + + /// Mutate this iterator into a draining iterator; items iterated are removed from storage. + pub fn drain(mut self) -> Self { + self.drain = true; + self + } } impl Iterator - for StorageKeyIterator + for StorageKeyIterator { - type Item = (K, T); - - fn next(&mut self) -> Option<(K, T)> { - loop { - let maybe_next = sp_io::storage::next_key(&self.previous_key) - .filter(|n| n.starts_with(&self.prefix)); - break match maybe_next { - Some(next) => { - self.previous_key = next.clone(); - let mut key_material = H::reverse(&next[self.prefix.len()..]); - match K::decode(&mut key_material) { - Ok(key) => { - let maybe_value = frame_support::storage::unhashed::get::(&next); - match maybe_value { - Some(value) => { - if self.drain { - frame_support::storage::unhashed::kill(&next); - } - Some((key, value)) - } - None => continue, - } - } - Err(_) => continue, - } - } - None => None, - } - } - } + type Item = (K, T); + + fn next(&mut self) -> Option<(K, T)> { + loop { + let maybe_next = sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix)); + break match maybe_next { + Some(next) => { + self.previous_key = next.clone(); + let mut key_material = H::reverse(&next[self.prefix.len()..]); + match K::decode(&mut key_material) { + Ok(key) => { + let maybe_value = frame_support::storage::unhashed::get::(&next); + match maybe_value { + Some(value) => { + if self.drain { + frame_support::storage::unhashed::kill(&next); + } + Some((key, value)) + } + None => continue, + } + } + Err(_) => continue, + } + } + None => None, + }; + } + } } /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn have_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> bool { - get_storage_value::<()>(module, item, hash).is_some() + get_storage_value::<()>(module, item, hash).is_some() } /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn get_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> Option { - let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); - key[32..].copy_from_slice(hash); - frame_support::storage::unhashed::get::(&key) + let mut key = vec![0u8; 32 + hash.len()]; + key[0..16].copy_from_slice(&Twox128::hash(module)); + key[16..32].copy_from_slice(&Twox128::hash(item)); + key[32..].copy_from_slice(hash); + frame_support::storage::unhashed::get::(&key) } /// Take a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn take_storage_value(module: &[u8], item: &[u8], hash: &[u8]) -> Option { - let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); - key[32..].copy_from_slice(hash); - frame_support::storage::unhashed::take::(&key) + let mut key = vec![0u8; 32 + hash.len()]; + key[0..16].copy_from_slice(&Twox128::hash(module)); + key[16..32].copy_from_slice(&Twox128::hash(item)); + key[32..].copy_from_slice(hash); + frame_support::storage::unhashed::take::(&key) } /// Put a particular value into storage by the `module`, the map's `item` name and the key `hash`. pub fn put_storage_value(module: &[u8], item: &[u8], hash: &[u8], value: T) { - let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); - key[32..].copy_from_slice(hash); - frame_support::storage::unhashed::put(&key, &value); + let mut key = vec![0u8; 32 + hash.len()]; + key[0..16].copy_from_slice(&Twox128::hash(module)); + key[16..32].copy_from_slice(&Twox128::hash(item)); + key[32..].copy_from_slice(hash); + frame_support::storage::unhashed::put(&key, &value); } /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn remove_storage_prefix(module: &[u8], item: &[u8], hash: &[u8]) { - let mut key = vec![0u8; 32 + hash.len()]; - key[0..16].copy_from_slice(&Twox128::hash(module)); - key[16..32].copy_from_slice(&Twox128::hash(item)); - key[32..].copy_from_slice(hash); - frame_support::storage::unhashed::kill_prefix(&key) + let mut key = vec![0u8; 32 + hash.len()]; + key[0..16].copy_from_slice(&Twox128::hash(module)); + key[16..32].copy_from_slice(&Twox128::hash(item)); + key[32..].copy_from_slice(hash); + frame_support::storage::unhashed::kill_prefix(&key) } /// Get a particular value in storage by the `module`, the map's `item` name and the key `hash`. pub fn take_storage_item( - module: &[u8], - item: &[u8], - key: K, + module: &[u8], + item: &[u8], + key: K, ) -> Option { - take_storage_value(module, item, key.using_encoded(H::hash).as_ref()) + take_storage_value(module, item, key.using_encoded(H::hash).as_ref()) } diff --git a/frame/support/src/storage/mod.rs b/frame/support/src/storage/mod.rs index 47201e22e6..e5cffbb3d2 100644 --- a/frame/support/src/storage/mod.rs +++ b/frame/support/src/storage/mod.rs @@ -16,111 +16,115 @@ //! Stuff to do with the runtime's storage. -use sp_std::{prelude::*, marker::PhantomData}; -use codec::{FullCodec, FullEncode, Encode, EncodeAppend, EncodeLike, Decode}; -use crate::{traits::Len, hash::{Twox128, StorageHasher}}; +use crate::{ + hash::{StorageHasher, Twox128}, + traits::Len, +}; +use codec::{Decode, Encode, EncodeAppend, EncodeLike, FullCodec, FullEncode}; +use sp_std::{marker::PhantomData, prelude::*}; -pub mod unhashed; -pub mod hashed; pub mod child; #[doc(hidden)] pub mod generator; +pub mod hashed; pub mod migration; +pub mod unhashed; /// A trait for working with macro-generated storage values under the substrate storage API. /// /// Details on implementation can be found at /// [`generator::StorageValue`] pub trait StorageValue { - /// The type that get/take return. - type Query; - - /// Get the storage key. - fn hashed_key() -> [u8; 32]; - - /// Does the value (explicitly) exist in storage? - fn exists() -> bool; - - /// Load the value from the provided storage instance. - fn get() -> Self::Query; - - /// Try to get the underlying value from the provided storage instance; `Ok` if it exists, - /// `Err` if not. - fn try_get() -> Result; - - /// Translate a value from some previous type (`O`) to the current type. - /// - /// `f: F` is the translation function. - /// - /// Returns `Err` if the storage item could not be interpreted as the old type, and Ok, along - /// with the new value if it could. - /// - /// NOTE: This operates from and to `Option<_>` types; no effort is made to respect the default - /// value of the original type. - /// - /// # Warning - /// - /// This function must be used with care, before being updated the storage still contains the - /// old type, thus other calls (such as `get`) will fail at decoding it. - /// - /// # Usage - /// - /// This would typically be called inside the module implementation of on_runtime_upgrade, while - /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More - /// precisely prior initialized modules doesn't make use of this storage). - fn translate) -> Option>(f: F) -> Result, ()>; - - /// Store a value under this key into the provided storage instance. - fn put>(val: Arg); - - /// Store a value under this key into the provided storage instance; this uses the query - /// type rather than the underlying value. - fn set(val: Self::Query); - - /// Mutate the value - fn mutate R>(f: F) -> R; - - /// Clear the storage value. - fn kill(); - - /// Take a value from storage, removing it afterwards. - fn take() -> Self::Query; - - /// Append the given item to the value in the storage. - /// - /// `T` is required to implement `codec::EncodeAppend`. - fn append(items: Items) -> Result<(), &'static str> - where - Item: Encode, - EncodeLikeItem: EncodeLike, - T: EncodeAppend, - Items: IntoIterator, - Items::IntoIter: ExactSizeIterator; - - /// Append the given items to the value in the storage. - /// - /// `T` is required to implement `Codec::EncodeAppend`. - /// - /// Upon any failure, it replaces `items` as the new value (assuming that the previous stored - /// data is simply corrupt and no longer usable). - /// - /// ### WARNING - /// - /// use with care; if your use-case is not _exactly_ as what this function is doing, - /// you should use append and sensibly handle failure within the runtime code if it happens. - fn append_or_put(items: Items) where - Item: Encode, - EncodeLikeItem: EncodeLike, - T: EncodeAppend, - Items: IntoIterator + Clone + EncodeLike, - Items::IntoIter: ExactSizeIterator; - - - /// Read the length of the value in a fast way, without decoding the entire value. - /// - /// `T` is required to implement `Codec::DecodeLength`. - fn decode_len() -> Result - where T: codec::DecodeLength + Len; + /// The type that get/take return. + type Query; + + /// Get the storage key. + fn hashed_key() -> [u8; 32]; + + /// Does the value (explicitly) exist in storage? + fn exists() -> bool; + + /// Load the value from the provided storage instance. + fn get() -> Self::Query; + + /// Try to get the underlying value from the provided storage instance; `Ok` if it exists, + /// `Err` if not. + fn try_get() -> Result; + + /// Translate a value from some previous type (`O`) to the current type. + /// + /// `f: F` is the translation function. + /// + /// Returns `Err` if the storage item could not be interpreted as the old type, and Ok, along + /// with the new value if it could. + /// + /// NOTE: This operates from and to `Option<_>` types; no effort is made to respect the default + /// value of the original type. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade, while + /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More + /// precisely prior initialized modules doesn't make use of this storage). + fn translate) -> Option>(f: F) -> Result, ()>; + + /// Store a value under this key into the provided storage instance. + fn put>(val: Arg); + + /// Store a value under this key into the provided storage instance; this uses the query + /// type rather than the underlying value. + fn set(val: Self::Query); + + /// Mutate the value + fn mutate R>(f: F) -> R; + + /// Clear the storage value. + fn kill(); + + /// Take a value from storage, removing it afterwards. + fn take() -> Self::Query; + + /// Append the given item to the value in the storage. + /// + /// `T` is required to implement `codec::EncodeAppend`. + fn append(items: Items) -> Result<(), &'static str> + where + Item: Encode, + EncodeLikeItem: EncodeLike, + T: EncodeAppend, + Items: IntoIterator, + Items::IntoIter: ExactSizeIterator; + + /// Append the given items to the value in the storage. + /// + /// `T` is required to implement `Codec::EncodeAppend`. + /// + /// Upon any failure, it replaces `items` as the new value (assuming that the previous stored + /// data is simply corrupt and no longer usable). + /// + /// ### WARNING + /// + /// use with care; if your use-case is not _exactly_ as what this function is doing, + /// you should use append and sensibly handle failure within the runtime code if it happens. + fn append_or_put(items: Items) + where + Item: Encode, + EncodeLikeItem: EncodeLike, + T: EncodeAppend, + Items: IntoIterator + Clone + EncodeLike, + Items::IntoIter: ExactSizeIterator; + + /// Read the length of the value in a fast way, without decoding the entire value. + /// + /// `T` is required to implement `Codec::DecodeLength`. + fn decode_len() -> Result + where + T: codec::DecodeLength + Len; } /// A strongly-typed map in storage. @@ -128,145 +132,152 @@ pub trait StorageValue { /// Details on implementation can be found at /// [`generator::StorageMap`] pub trait StorageMap { - /// The type that get/take return. - type Query; - - /// Get the storage key used to fetch a value corresponding to a specific key. - fn hashed_key_for>(key: KeyArg) -> Vec; - - /// Does the value (explicitly) exist in storage? - fn contains_key>(key: KeyArg) -> bool; - - /// Load the value associated with the given key from the map. - fn get>(key: KeyArg) -> Self::Query; - - /// Swap the values of two keys. - fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2); - - /// Store a value to be associated with the given key from the map. - fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg); - - /// Remove the value under a key. - fn remove>(key: KeyArg); - - /// Mutate the value under a key. - fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R; - - /// Mutate the item, only if an `Ok` value is returned. - fn try_mutate, R, E, F: FnOnce(&mut Self::Query) -> Result>( - key: KeyArg, - f: F, - ) -> Result; - - /// Mutate the value under a key. Deletes the item if mutated to a `None`. - fn mutate_exists, R, F: FnOnce(&mut Option) -> R>(key: KeyArg, f: F) -> R; - - /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. - fn try_mutate_exists, R, E, F: FnOnce(&mut Option) -> Result>( - key: KeyArg, - f: F, - ) -> Result; - - /// Take the value under a key. - fn take>(key: KeyArg) -> Self::Query; - - /// Append the given items to the value in the storage. - /// - /// `V` is required to implement `codec::EncodeAppend`. - fn append(key: KeyArg, items: Items) -> Result<(), &'static str> where - KeyArg: EncodeLike, - Item: Encode, - EncodeLikeItem: EncodeLike, - V: EncodeAppend, - Items: IntoIterator, - Items::IntoIter: ExactSizeIterator; - - /// Safely append the given items to the value in the storage. If a codec error occurs, then the - /// old (presumably corrupt) value is replaced with the given `items`. - /// - /// `V` is required to implement `codec::EncodeAppend`. - fn append_or_insert(key: KeyArg, items: Items) where - KeyArg: EncodeLike, - Item: Encode, - EncodeLikeItem: EncodeLike, - V: EncodeAppend, - Items: IntoIterator + Clone + EncodeLike, - Items::IntoIter: ExactSizeIterator; - - /// Read the length of the value in a fast way, without decoding the entire value. - /// - /// `T` is required to implement `Codec::DecodeLength`. - /// - /// Note that `0` is returned as the default value if no encoded value exists at the given key. - /// Therefore, this function cannot be used as a sign of _existence_. use the `::contains_key()` - /// function for this purpose. - fn decode_len>(key: KeyArg) -> Result - where V: codec::DecodeLength + Len; - - /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. - /// - /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. - fn migrate_key>(key: KeyArg) -> Option; - - /// Migrate an item with the given `key` from a `blake2_256` hasher to the current hasher. - /// - /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. - fn migrate_key_from_blake>(key: KeyArg) -> Option { - Self::migrate_key::(key) - } + /// The type that get/take return. + type Query; + + /// Get the storage key used to fetch a value corresponding to a specific key. + fn hashed_key_for>(key: KeyArg) -> Vec; + + /// Does the value (explicitly) exist in storage? + fn contains_key>(key: KeyArg) -> bool; + + /// Load the value associated with the given key from the map. + fn get>(key: KeyArg) -> Self::Query; + + /// Swap the values of two keys. + fn swap, KeyArg2: EncodeLike>(key1: KeyArg1, key2: KeyArg2); + + /// Store a value to be associated with the given key from the map. + fn insert, ValArg: EncodeLike>(key: KeyArg, val: ValArg); + + /// Remove the value under a key. + fn remove>(key: KeyArg); + + /// Mutate the value under a key. + fn mutate, R, F: FnOnce(&mut Self::Query) -> R>(key: KeyArg, f: F) -> R; + + /// Mutate the item, only if an `Ok` value is returned. + fn try_mutate, R, E, F: FnOnce(&mut Self::Query) -> Result>( + key: KeyArg, + f: F, + ) -> Result; + + /// Mutate the value under a key. Deletes the item if mutated to a `None`. + fn mutate_exists, R, F: FnOnce(&mut Option) -> R>( + key: KeyArg, + f: F, + ) -> R; + + /// Mutate the item, only if an `Ok` value is returned. Deletes the item if mutated to a `None`. + fn try_mutate_exists, R, E, F: FnOnce(&mut Option) -> Result>( + key: KeyArg, + f: F, + ) -> Result; + + /// Take the value under a key. + fn take>(key: KeyArg) -> Self::Query; + + /// Append the given items to the value in the storage. + /// + /// `V` is required to implement `codec::EncodeAppend`. + fn append( + key: KeyArg, + items: Items, + ) -> Result<(), &'static str> + where + KeyArg: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: EncodeAppend, + Items: IntoIterator, + Items::IntoIter: ExactSizeIterator; + + /// Safely append the given items to the value in the storage. If a codec error occurs, then the + /// old (presumably corrupt) value is replaced with the given `items`. + /// + /// `V` is required to implement `codec::EncodeAppend`. + fn append_or_insert(key: KeyArg, items: Items) + where + KeyArg: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: EncodeAppend, + Items: IntoIterator + Clone + EncodeLike, + Items::IntoIter: ExactSizeIterator; + + /// Read the length of the value in a fast way, without decoding the entire value. + /// + /// `T` is required to implement `Codec::DecodeLength`. + /// + /// Note that `0` is returned as the default value if no encoded value exists at the given key. + /// Therefore, this function cannot be used as a sign of _existence_. use the `::contains_key()` + /// function for this purpose. + fn decode_len>(key: KeyArg) -> Result + where + V: codec::DecodeLength + Len; + + /// Migrate an item with the given `key` from a defunct `OldHasher` to the current hasher. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + fn migrate_key>(key: KeyArg) -> Option; + + /// Migrate an item with the given `key` from a `blake2_256` hasher to the current hasher. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + fn migrate_key_from_blake>(key: KeyArg) -> Option { + Self::migrate_key::(key) + } } /// A strongly-typed map in storage whose keys and values can be iterated over. pub trait IterableStorageMap: StorageMap { - /// The type that iterates over all `(key, value)`. - type Iterator: Iterator; + /// The type that iterates over all `(key, value)`. + type Iterator: Iterator; - /// Enumerate all elements in the map in no particular order. If you alter the map while doing - /// this, you'll get undefined results. - fn iter() -> Self::Iterator; + /// Enumerate all elements in the map in no particular order. If you alter the map while doing + /// this, you'll get undefined results. + fn iter() -> Self::Iterator; - /// Remove all elements from the map and iterate through them in no particular order. If you - /// add elements to the map while doing this, you'll get undefined results. - fn drain() -> Self::Iterator; + /// Remove all elements from the map and iterate through them in no particular order. If you + /// add elements to the map while doing this, you'll get undefined results. + fn drain() -> Self::Iterator; - /// Translate the values of all elements by a function `f`, in the map in no particular order. - /// By returning `None` from `f` for an element, you'll remove it from the map. - fn translate Option>(f: F); + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. + fn translate Option>(f: F); } /// A strongly-typed double map in storage whose secondary keys and values can be iterated over. -pub trait IterableStorageDoubleMap< - K1: FullCodec, - K2: FullCodec, - V: FullCodec ->: StorageDoubleMap { - /// The type that iterates over all `(key2, value)`. - type PrefixIterator: Iterator; - - /// The type that iterates over all `(key1, key2, value)`. - type Iterator: Iterator; - - /// Enumerate all elements in the map with first key `k1` in no particular order. If you add or - /// remove values whose first key is `k1` to the map while doing this, you'll get undefined - /// results. - fn iter_prefix(k1: impl EncodeLike) -> Self::PrefixIterator; - - /// Remove all elements from the map with first key `k1` and iterate through them in no - /// particular order. If you add elements with first key `k1` to the map while doing this, - /// you'll get undefined results. - fn drain_prefix(k1: impl EncodeLike) -> Self::PrefixIterator; - - /// Enumerate all elements in the map in no particular order. If you add or remove values to - /// the map while doing this, you'll get undefined results. - fn iter() -> Self::Iterator; - - /// Remove all elements from the map and iterate through them in no particular order. If you - /// add elements to the map while doing this, you'll get undefined results. - fn drain() -> Self::Iterator; - - /// Translate the values of all elements by a function `f`, in the map in no particular order. - /// By returning `None` from `f` for an element, you'll remove it from the map. - fn translate Option>(f: F); +pub trait IterableStorageDoubleMap: + StorageDoubleMap +{ + /// The type that iterates over all `(key2, value)`. + type PrefixIterator: Iterator; + + /// The type that iterates over all `(key1, key2, value)`. + type Iterator: Iterator; + + /// Enumerate all elements in the map with first key `k1` in no particular order. If you add or + /// remove values whose first key is `k1` to the map while doing this, you'll get undefined + /// results. + fn iter_prefix(k1: impl EncodeLike) -> Self::PrefixIterator; + + /// Remove all elements from the map with first key `k1` and iterate through them in no + /// particular order. If you add elements with first key `k1` to the map while doing this, + /// you'll get undefined results. + fn drain_prefix(k1: impl EncodeLike) -> Self::PrefixIterator; + + /// Enumerate all elements in the map in no particular order. If you add or remove values to + /// the map while doing this, you'll get undefined results. + fn iter() -> Self::Iterator; + + /// Remove all elements from the map and iterate through them in no particular order. If you + /// add elements to the map while doing this, you'll get undefined results. + fn drain() -> Self::Iterator; + + /// Translate the values of all elements by a function `f`, in the map in no particular order. + /// By returning `None` from `f` for an element, you'll remove it from the map. + fn translate Option>(f: F); } /// An implementation of a map with a two keys. @@ -277,143 +288,149 @@ pub trait IterableStorageDoubleMap< /// Details on implementation can be found at /// [`generator::StorageDoubleMap`] pub trait StorageDoubleMap { - /// The type that get/take returns. - type Query; - - fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec - where - KArg1: EncodeLike, - KArg2: EncodeLike; - - fn contains_key(k1: KArg1, k2: KArg2) -> bool - where - KArg1: EncodeLike, - KArg2: EncodeLike; - - fn get(k1: KArg1, k2: KArg2) -> Self::Query - where - KArg1: EncodeLike, - KArg2: EncodeLike; - - fn take(k1: KArg1, k2: KArg2) -> Self::Query - where - KArg1: EncodeLike, - KArg2: EncodeLike; - - /// Swap the values of two key-pairs. - fn swap(x_k1: XKArg1, x_k2: XKArg2, y_k1: YKArg1, y_k2: YKArg2) - where - XKArg1: EncodeLike, - XKArg2: EncodeLike, - YKArg1: EncodeLike, - YKArg2: EncodeLike; - - fn insert(k1: KArg1, k2: KArg2, val: VArg) - where - KArg1: EncodeLike, - KArg2: EncodeLike, - VArg: EncodeLike; - - fn remove(k1: KArg1, k2: KArg2) - where - KArg1: EncodeLike, - KArg2: EncodeLike; - - fn remove_prefix(k1: KArg1) where KArg1: ?Sized + EncodeLike; - - fn iter_prefix_values(k1: KArg1) -> PrefixIterator - where KArg1: ?Sized + EncodeLike; - - fn mutate(k1: KArg1, k2: KArg2, f: F) -> R - where - KArg1: EncodeLike, - KArg2: EncodeLike, - F: FnOnce(&mut Self::Query) -> R; - - fn append( - k1: KArg1, - k2: KArg2, - items: Items, - ) -> Result<(), &'static str> - where - KArg1: EncodeLike, - KArg2: EncodeLike, - Item: Encode, - EncodeLikeItem: EncodeLike, - V: EncodeAppend, - Items: IntoIterator, - Items::IntoIter: ExactSizeIterator; - - fn append_or_insert( - k1: KArg1, - k2: KArg2, - items: Items, - ) - where - KArg1: EncodeLike, - KArg2: EncodeLike, - Item: Encode, - EncodeLikeItem: EncodeLike, - V: EncodeAppend, - Items: IntoIterator + Clone + EncodeLike, - Items::IntoIter: ExactSizeIterator; - - /// Read the length of the value in a fast way, without decoding the entire value. - /// - /// `V` is required to implement `Codec::DecodeLength`. - /// - /// Note that `0` is returned as the default value if no encoded value exists at the given key. - /// Therefore, this function cannot be used as a sign of _existence_. use the `::contains_key()` - /// function for this purpose. - fn decode_len(key1: KArg1, key2: KArg2) -> Result - where - KArg1: EncodeLike, - KArg2: EncodeLike, - V: codec::DecodeLength + Len; - - /// Migrate an item with the given `key1` and `key2` from defunct `OldHasher1` and - /// `OldHasher2` to the current hashers. - /// - /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. - fn migrate_keys< - OldHasher1: StorageHasher, - OldHasher2: StorageHasher, - KeyArg1: EncodeLike, - KeyArg2: EncodeLike, - >(key1: KeyArg1, key2: KeyArg2) -> Option; + /// The type that get/take returns. + type Query; + + fn hashed_key_for(k1: KArg1, k2: KArg2) -> Vec + where + KArg1: EncodeLike, + KArg2: EncodeLike; + + fn contains_key(k1: KArg1, k2: KArg2) -> bool + where + KArg1: EncodeLike, + KArg2: EncodeLike; + + fn get(k1: KArg1, k2: KArg2) -> Self::Query + where + KArg1: EncodeLike, + KArg2: EncodeLike; + + fn take(k1: KArg1, k2: KArg2) -> Self::Query + where + KArg1: EncodeLike, + KArg2: EncodeLike; + + /// Swap the values of two key-pairs. + fn swap(x_k1: XKArg1, x_k2: XKArg2, y_k1: YKArg1, y_k2: YKArg2) + where + XKArg1: EncodeLike, + XKArg2: EncodeLike, + YKArg1: EncodeLike, + YKArg2: EncodeLike; + + fn insert(k1: KArg1, k2: KArg2, val: VArg) + where + KArg1: EncodeLike, + KArg2: EncodeLike, + VArg: EncodeLike; + + fn remove(k1: KArg1, k2: KArg2) + where + KArg1: EncodeLike, + KArg2: EncodeLike; + + fn remove_prefix(k1: KArg1) + where + KArg1: ?Sized + EncodeLike; + + fn iter_prefix_values(k1: KArg1) -> PrefixIterator + where + KArg1: ?Sized + EncodeLike; + + fn mutate(k1: KArg1, k2: KArg2, f: F) -> R + where + KArg1: EncodeLike, + KArg2: EncodeLike, + F: FnOnce(&mut Self::Query) -> R; + + fn append( + k1: KArg1, + k2: KArg2, + items: Items, + ) -> Result<(), &'static str> + where + KArg1: EncodeLike, + KArg2: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: EncodeAppend, + Items: IntoIterator, + Items::IntoIter: ExactSizeIterator; + + fn append_or_insert( + k1: KArg1, + k2: KArg2, + items: Items, + ) where + KArg1: EncodeLike, + KArg2: EncodeLike, + Item: Encode, + EncodeLikeItem: EncodeLike, + V: EncodeAppend, + Items: IntoIterator + Clone + EncodeLike, + Items::IntoIter: ExactSizeIterator; + + /// Read the length of the value in a fast way, without decoding the entire value. + /// + /// `V` is required to implement `Codec::DecodeLength`. + /// + /// Note that `0` is returned as the default value if no encoded value exists at the given key. + /// Therefore, this function cannot be used as a sign of _existence_. use the `::contains_key()` + /// function for this purpose. + fn decode_len(key1: KArg1, key2: KArg2) -> Result + where + KArg1: EncodeLike, + KArg2: EncodeLike, + V: codec::DecodeLength + Len; + + /// Migrate an item with the given `key1` and `key2` from defunct `OldHasher1` and + /// `OldHasher2` to the current hashers. + /// + /// If the key doesn't exist, then it's a no-op. If it does, then it returns its value. + fn migrate_keys< + OldHasher1: StorageHasher, + OldHasher2: StorageHasher, + KeyArg1: EncodeLike, + KeyArg2: EncodeLike, + >( + key1: KeyArg1, + key2: KeyArg2, + ) -> Option; } /// Iterator for prefixed map. pub struct PrefixIterator { - prefix: Vec, - previous_key: Vec, - phantom_data: PhantomData, + prefix: Vec, + previous_key: Vec, + phantom_data: PhantomData, } impl Iterator for PrefixIterator { - type Item = Value; - - fn next(&mut self) -> Option { - match sp_io::storage::next_key(&self.previous_key) - .filter(|n| n.starts_with(&self.prefix[..])) - { - Some(next_key) => { - let value = unhashed::get(&next_key); - - if value.is_none() { - runtime_print!( - "ERROR: returned next_key has no value:\nkey is {:?}\nnext_key is {:?}", - &self.previous_key, &next_key, - ); - } - - self.previous_key = next_key; - - value - }, - _ => None, - } - } + type Item = Value; + + fn next(&mut self) -> Option { + match sp_io::storage::next_key(&self.previous_key) + .filter(|n| n.starts_with(&self.prefix[..])) + { + Some(next_key) => { + let value = unhashed::get(&next_key); + + if value.is_none() { + runtime_print!( + "ERROR: returned next_key has no value:\nkey is {:?}\nnext_key is {:?}", + &self.previous_key, + &next_key, + ); + } + + self.previous_key = next_key; + + value + } + _ => None, + } + } } /// Trait for maps that store all its value after a unique prefix. @@ -423,159 +440,163 @@ impl Iterator for PrefixIterator { /// Twox128(module_prefix) ++ Twox128(storage_prefix) /// ``` pub trait StoragePrefixedMap { - - /// Module prefix. Used for generating final key. - fn module_prefix() -> &'static [u8]; - - /// Storage prefix. Used for generating final key. - fn storage_prefix() -> &'static [u8]; - - /// Final full prefix that prefixes all keys. - fn final_prefix() -> [u8; 32] { - let mut final_key = [0u8; 32]; - final_key[0..16].copy_from_slice(&Twox128::hash(Self::module_prefix())); - final_key[16..32].copy_from_slice(&Twox128::hash(Self::storage_prefix())); - final_key - } - - /// Remove all value of the storage. - fn remove_all() { - sp_io::storage::clear_prefix(&Self::final_prefix()) - } - - /// Iter over all value of the storage. - fn iter_values() -> PrefixIterator { - let prefix = Self::final_prefix(); - PrefixIterator { - prefix: prefix.to_vec(), - previous_key: prefix.to_vec(), - phantom_data: Default::default(), - } - } - - /// Translate the values from some previous `OldValue` to the current type. - /// - /// `TV` translates values. - /// - /// Returns `Err` if the map could not be interpreted as the old type, and Ok if it could. - /// The `Err` contains the number of value that couldn't be interpreted, those value are - /// removed from the map. - /// - /// # Warning - /// - /// This function must be used with care, before being updated the storage still contains the - /// old type, thus other calls (such as `get`) will fail at decoding it. - /// - /// # Usage - /// - /// This would typically be called inside the module implementation of on_runtime_upgrade, while - /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More - /// precisely prior initialized modules doesn't make use of this storage). - fn translate_values(translate_val: TV) -> Result<(), u32> - where OldValue: Decode, TV: Fn(OldValue) -> Value - { - let prefix = Self::final_prefix(); - let mut previous_key = prefix.to_vec(); - let mut errors = 0; - while let Some(next_key) = sp_io::storage::next_key(&previous_key) - .filter(|n| n.starts_with(&prefix[..])) - { - if let Some(value) = unhashed::get(&next_key) { - unhashed::put(&next_key[..], &translate_val(value)); - } else { - // We failed to read the value. Remove the key and increment errors. - unhashed::kill(&next_key[..]); - errors += 1; - } - - previous_key = next_key; - } - - if errors == 0 { - Ok(()) - } else { - Err(errors) - } - } + /// Module prefix. Used for generating final key. + fn module_prefix() -> &'static [u8]; + + /// Storage prefix. Used for generating final key. + fn storage_prefix() -> &'static [u8]; + + /// Final full prefix that prefixes all keys. + fn final_prefix() -> [u8; 32] { + let mut final_key = [0u8; 32]; + final_key[0..16].copy_from_slice(&Twox128::hash(Self::module_prefix())); + final_key[16..32].copy_from_slice(&Twox128::hash(Self::storage_prefix())); + final_key + } + + /// Remove all value of the storage. + fn remove_all() { + sp_io::storage::clear_prefix(&Self::final_prefix()) + } + + /// Iter over all value of the storage. + fn iter_values() -> PrefixIterator { + let prefix = Self::final_prefix(); + PrefixIterator { + prefix: prefix.to_vec(), + previous_key: prefix.to_vec(), + phantom_data: Default::default(), + } + } + + /// Translate the values from some previous `OldValue` to the current type. + /// + /// `TV` translates values. + /// + /// Returns `Err` if the map could not be interpreted as the old type, and Ok if it could. + /// The `Err` contains the number of value that couldn't be interpreted, those value are + /// removed from the map. + /// + /// # Warning + /// + /// This function must be used with care, before being updated the storage still contains the + /// old type, thus other calls (such as `get`) will fail at decoding it. + /// + /// # Usage + /// + /// This would typically be called inside the module implementation of on_runtime_upgrade, while + /// ensuring **no usage of this storage are made before the call to `on_runtime_upgrade`**. (More + /// precisely prior initialized modules doesn't make use of this storage). + fn translate_values(translate_val: TV) -> Result<(), u32> + where + OldValue: Decode, + TV: Fn(OldValue) -> Value, + { + let prefix = Self::final_prefix(); + let mut previous_key = prefix.to_vec(); + let mut errors = 0; + while let Some(next_key) = + sp_io::storage::next_key(&previous_key).filter(|n| n.starts_with(&prefix[..])) + { + if let Some(value) = unhashed::get(&next_key) { + unhashed::put(&next_key[..], &translate_val(value)); + } else { + // We failed to read the value. Remove the key and increment errors. + unhashed::kill(&next_key[..]); + errors += 1; + } + + previous_key = next_key; + } + + if errors == 0 { + Ok(()) + } else { + Err(errors) + } + } } #[cfg(test)] mod test { - use sp_core::hashing::twox_128; - use sp_io::TestExternalities; - use crate::storage::{unhashed, StoragePrefixedMap}; - - #[test] - fn prefixed_map_works() { - TestExternalities::default().execute_with(|| { - struct MyStorage; - impl StoragePrefixedMap for MyStorage { - fn module_prefix() -> &'static [u8] { - b"MyModule" - } - - fn storage_prefix() -> &'static [u8] { - b"MyStorage" - } - } - - let key_before = { - let mut k = MyStorage::final_prefix(); - let last = k.iter_mut().last().unwrap(); - *last = last.checked_sub(1).unwrap(); - k - }; - let key_after = { - let mut k = MyStorage::final_prefix(); - let last = k.iter_mut().last().unwrap(); - *last = last.checked_add(1).unwrap(); - k - }; - - unhashed::put(&key_before[..], &32u64); - unhashed::put(&key_after[..], &33u64); - - let k = [twox_128(b"MyModule"), twox_128(b"MyStorage")].concat(); - assert_eq!(MyStorage::final_prefix().to_vec(), k); - - // test iteration - assert_eq!(MyStorage::iter_values().collect::>(), vec![]); - - unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u64); - unhashed::put(&[&k[..], &vec![1, 1][..]].concat(), &2u64); - unhashed::put(&[&k[..], &vec![8][..]].concat(), &3u64); - unhashed::put(&[&k[..], &vec![10][..]].concat(), &4u64); - - assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3, 4]); - - // test removal - MyStorage::remove_all(); - assert_eq!(MyStorage::iter_values().collect::>(), vec![]); - - // test migration - unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u32); - unhashed::put(&[&k[..], &vec![8][..]].concat(), &2u32); - - assert_eq!(MyStorage::iter_values().collect::>(), vec![]); - MyStorage::translate_values(|v: u32| v as u64).unwrap(); - assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2]); - MyStorage::remove_all(); - - // test migration 2 - unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u128); - unhashed::put(&[&k[..], &vec![1, 1][..]].concat(), &2u64); - unhashed::put(&[&k[..], &vec![8][..]].concat(), &3u128); - unhashed::put(&[&k[..], &vec![10][..]].concat(), &4u32); - - // (contains some value that successfully decoded to u64) - assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); - assert_eq!(MyStorage::translate_values(|v: u128| v as u64), Err(2)); - assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 3]); - MyStorage::remove_all(); - - // test that other values are not modified. - assert_eq!(unhashed::get(&key_before[..]), Some(32u64)); - assert_eq!(unhashed::get(&key_after[..]), Some(33u64)); - }); - } + use crate::storage::{unhashed, StoragePrefixedMap}; + use sp_core::hashing::twox_128; + use sp_io::TestExternalities; + + #[test] + fn prefixed_map_works() { + TestExternalities::default().execute_with(|| { + struct MyStorage; + impl StoragePrefixedMap for MyStorage { + fn module_prefix() -> &'static [u8] { + b"MyModule" + } + + fn storage_prefix() -> &'static [u8] { + b"MyStorage" + } + } + + let key_before = { + let mut k = MyStorage::final_prefix(); + let last = k.iter_mut().last().unwrap(); + *last = last.checked_sub(1).unwrap(); + k + }; + let key_after = { + let mut k = MyStorage::final_prefix(); + let last = k.iter_mut().last().unwrap(); + *last = last.checked_add(1).unwrap(); + k + }; + + unhashed::put(&key_before[..], &32u64); + unhashed::put(&key_after[..], &33u64); + + let k = [twox_128(b"MyModule"), twox_128(b"MyStorage")].concat(); + assert_eq!(MyStorage::final_prefix().to_vec(), k); + + // test iteration + assert_eq!(MyStorage::iter_values().collect::>(), vec![]); + + unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u64); + unhashed::put(&[&k[..], &vec![1, 1][..]].concat(), &2u64); + unhashed::put(&[&k[..], &vec![8][..]].concat(), &3u64); + unhashed::put(&[&k[..], &vec![10][..]].concat(), &4u64); + + assert_eq!( + MyStorage::iter_values().collect::>(), + vec![1, 2, 3, 4] + ); + + // test removal + MyStorage::remove_all(); + assert_eq!(MyStorage::iter_values().collect::>(), vec![]); + + // test migration + unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u32); + unhashed::put(&[&k[..], &vec![8][..]].concat(), &2u32); + + assert_eq!(MyStorage::iter_values().collect::>(), vec![]); + MyStorage::translate_values(|v: u32| v as u64).unwrap(); + assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2]); + MyStorage::remove_all(); + + // test migration 2 + unhashed::put(&[&k[..], &vec![1][..]].concat(), &1u128); + unhashed::put(&[&k[..], &vec![1, 1][..]].concat(), &2u64); + unhashed::put(&[&k[..], &vec![8][..]].concat(), &3u128); + unhashed::put(&[&k[..], &vec![10][..]].concat(), &4u32); + + // (contains some value that successfully decoded to u64) + assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 2, 3]); + assert_eq!(MyStorage::translate_values(|v: u128| v as u64), Err(2)); + assert_eq!(MyStorage::iter_values().collect::>(), vec![1, 3]); + MyStorage::remove_all(); + + // test that other values are not modified. + assert_eq!(unhashed::get(&key_before[..]), Some(32u64)); + assert_eq!(unhashed::get(&key_after[..]), Some(33u64)); + }); + } } diff --git a/frame/support/src/storage/unhashed.rs b/frame/support/src/storage/unhashed.rs index 1ecf46ef18..fafa18b2ae 100644 --- a/frame/support/src/storage/unhashed.rs +++ b/frame/support/src/storage/unhashed.rs @@ -16,88 +16,88 @@ //! Operation on unhashed runtime storage. +use codec::{Decode, Encode}; use sp_std::prelude::*; -use codec::{Encode, Decode}; /// Return the value of the item in storage under `key`, or `None` if there is no explicit entry. pub fn get(key: &[u8]) -> Option { - sp_io::storage::get(key).and_then(|val| { - Decode::decode(&mut &val[..]).map(Some).unwrap_or_else(|_| { - // TODO #3700: error should be handleable. - runtime_print!("ERROR: Corrupted state at {:?}", key); - None - }) - }) + sp_io::storage::get(key).and_then(|val| { + Decode::decode(&mut &val[..]).map(Some).unwrap_or_else(|_| { + // TODO #3700: error should be handleable. + runtime_print!("ERROR: Corrupted state at {:?}", key); + None + }) + }) } /// Return the value of the item in storage under `key`, or the type's default if there is no /// explicit entry. pub fn get_or_default(key: &[u8]) -> T { - get(key).unwrap_or_else(Default::default) + get(key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. pub fn get_or(key: &[u8], default_value: T) -> T { - get(key).unwrap_or(default_value) + get(key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. pub fn get_or_else T>(key: &[u8], default_value: F) -> T { - get(key).unwrap_or_else(default_value) + get(key).unwrap_or_else(default_value) } /// Put `value` in storage under `key`. pub fn put(key: &[u8], value: &T) { - value.using_encoded(|slice| sp_io::storage::set(key, slice)); + value.using_encoded(|slice| sp_io::storage::set(key, slice)); } /// Remove `key` from storage, returning its value if it had an explicit entry or `None` otherwise. pub fn take(key: &[u8]) -> Option { - let r = get(key); - if r.is_some() { - kill(key); - } - r + let r = get(key); + if r.is_some() { + kill(key); + } + r } /// Remove `key` from storage, returning its value, or, if there was no explicit entry in storage, /// the default for its type. pub fn take_or_default(key: &[u8]) -> T { - take(key).unwrap_or_else(Default::default) + take(key).unwrap_or_else(Default::default) } /// Return the value of the item in storage under `key`, or `default_value` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or(key: &[u8], default_value: T) -> T { - take(key).unwrap_or(default_value) + take(key).unwrap_or(default_value) } /// Return the value of the item in storage under `key`, or `default_value()` if there is no /// explicit entry. Ensure there is no explicit entry on return. pub fn take_or_else T>(key: &[u8], default_value: F) -> T { - take(key).unwrap_or_else(default_value) + take(key).unwrap_or_else(default_value) } /// Check to see if `key` has an explicit entry in storage. pub fn exists(key: &[u8]) -> bool { - sp_io::storage::read(key, &mut [0;0][..], 0).is_some() + sp_io::storage::read(key, &mut [0; 0][..], 0).is_some() } /// Ensure `key` has no explicit entry in storage. pub fn kill(key: &[u8]) { - sp_io::storage::clear(key); + sp_io::storage::clear(key); } /// Ensure keys with the given `prefix` have no entries in storage. pub fn kill_prefix(prefix: &[u8]) { - sp_io::storage::clear_prefix(prefix); + sp_io::storage::clear_prefix(prefix); } /// Get a Vec of bytes from storage. pub fn get_raw(key: &[u8]) -> Option> { - sp_io::storage::get(key) + sp_io::storage::get(key) } /// Put a raw byte slice into storage. @@ -106,5 +106,5 @@ pub fn get_raw(key: &[u8]) -> Option> { /// you should also call `frame_system::RuntimeUpgraded::put(true)` to trigger the /// `on_runtime_upgrade` logic. pub fn put_raw(key: &[u8], value: &[u8]) { - sp_io::storage::set(key, value) + sp_io::storage::set(key, value) } diff --git a/frame/support/src/traits.rs b/frame/support/src/traits.rs index 35e1231698..37fc5e01a7 100644 --- a/frame/support/src/traits.rs +++ b/frame/support/src/traits.rs @@ -18,51 +18,57 @@ //! //! NOTE: If you're looking for `parameter_types`, it has moved in to the top-level module. -use sp_std::{prelude::*, result, marker::PhantomData, ops::Div, fmt::Debug}; -use codec::{FullCodec, Codec, Encode, Decode, EncodeLike}; -use sp_core::u32_trait::Value as U32; -use sp_runtime::{ - RuntimeDebug, ConsensusEngineId, DispatchResult, DispatchError, traits::{ - MaybeSerializeDeserialize, AtLeast32Bit, Saturating, TrailingZeroInput, Bounded, Zero, - BadOrigin - }, -}; use crate::dispatch::Parameter; use crate::storage::StorageMap; +use codec::{Codec, Decode, Encode, EncodeLike, FullCodec}; use impl_trait_for_tuples::impl_for_tuples; +use sp_core::u32_trait::Value as U32; +use sp_runtime::{ + traits::{ + AtLeast32Bit, BadOrigin, Bounded, MaybeSerializeDeserialize, Saturating, TrailingZeroInput, + Zero, + }, + ConsensusEngineId, DispatchError, DispatchResult, RuntimeDebug, +}; +use sp_std::{fmt::Debug, marker::PhantomData, ops::Div, prelude::*, result}; /// An abstraction of a value stored within storage, but possibly as part of a larger composite /// item. pub trait StoredMap { - /// Get the item, or its default if it doesn't yet exist; we make no distinction between the - /// two. - fn get(k: &K) -> T; - /// Get whether the item takes up any storage. If this is `false`, then `get` will certainly - /// return the `T::default()`. If `true`, then there is no implication for `get` (i.e. it - /// may return any value, including the default). - /// - /// NOTE: This may still be `true`, even after `remove` is called. This is the case where - /// a single storage entry is shared between multiple `StoredMap` items single, without - /// additional logic to enforce it, deletion of any one them doesn't automatically imply - /// deletion of them all. - fn is_explicit(k: &K) -> bool; - /// Mutate the item. - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> R; - /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. - fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> R; - /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is - /// returned. It is removed or reset to default value if it has been mutated to `None` - fn try_mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> Result) -> Result; - /// Set the item to something new. - fn insert(k: &K, t: T) { Self::mutate(k, |i| *i = t); } - /// Remove the item or otherwise replace it with its default value; we don't care which. - fn remove(k: &K); + /// Get the item, or its default if it doesn't yet exist; we make no distinction between the + /// two. + fn get(k: &K) -> T; + /// Get whether the item takes up any storage. If this is `false`, then `get` will certainly + /// return the `T::default()`. If `true`, then there is no implication for `get` (i.e. it + /// may return any value, including the default). + /// + /// NOTE: This may still be `true`, even after `remove` is called. This is the case where + /// a single storage entry is shared between multiple `StoredMap` items single, without + /// additional logic to enforce it, deletion of any one them doesn't automatically imply + /// deletion of them all. + fn is_explicit(k: &K) -> bool; + /// Mutate the item. + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> R; + /// Mutate the item, removing or resetting to default value if it has been mutated to `None`. + fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> R; + /// Maybe mutate the item only if an `Ok` value is returned from `f`. Do nothing if an `Err` is + /// returned. It is removed or reset to default value if it has been mutated to `None` + fn try_mutate_exists( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result; + /// Set the item to something new. + fn insert(k: &K, t: T) { + Self::mutate(k, |i| *i = t); + } + /// Remove the item or otherwise replace it with its default value; we don't care which. + fn remove(k: &K); } /// A simple, generic one-parameter event notifier/handler. pub trait Happened { - /// The thing happened. - fn happened(t: &T); + /// The thing happened. + fn happened(t: &T); } /// A shim for placing around a storage item in order to use it as a `StoredValue`. Ideally this @@ -76,210 +82,228 @@ pub trait Happened { /// be the default value), or where the account is being removed or reset back to the default value /// where previously it did exist (though may have been in a default state). This works well with /// system module's `CallOnCreatedAccount` and `CallKillAccount`. -pub struct StorageMapShim< - S, - Created, - Removed, - K, - T ->(sp_std::marker::PhantomData<(S, Created, Removed, K, T)>); +pub struct StorageMapShim( + sp_std::marker::PhantomData<(S, Created, Removed, K, T)>, +); impl< - S: StorageMap, - Created: Happened, - Removed: Happened, - K: FullCodec, - T: FullCodec, -> StoredMap for StorageMapShim { - fn get(k: &K) -> T { S::get(k) } - fn is_explicit(k: &K) -> bool { S::contains_key(k) } - fn insert(k: &K, t: T) { - S::insert(k, t); - if !S::contains_key(&k) { - Created::happened(k); - } - } - fn remove(k: &K) { - if S::contains_key(&k) { - Removed::happened(&k); - } - S::remove(k); - } - fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> R { - let r = S::mutate(k, f); - if !S::contains_key(&k) { - Created::happened(k); - } - r - } - fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> R { - let (existed, exists, r) = S::mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let r = f(maybe_value); - (existed, maybe_value.is_some(), r) - }); - if !existed && exists { - Created::happened(k); - } else if existed && !exists { - Removed::happened(k); - } - r - } - fn try_mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> Result) -> Result { - S::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - f(maybe_value).map(|v| (existed, maybe_value.is_some(), v)) - }).map(|(existed, exists, v)| { - if !existed && exists { - Created::happened(k); - } else if existed && !exists { - Removed::happened(k); - } - v - }) - } + S: StorageMap, + Created: Happened, + Removed: Happened, + K: FullCodec, + T: FullCodec, + > StoredMap for StorageMapShim +{ + fn get(k: &K) -> T { + S::get(k) + } + fn is_explicit(k: &K) -> bool { + S::contains_key(k) + } + fn insert(k: &K, t: T) { + S::insert(k, t); + if !S::contains_key(&k) { + Created::happened(k); + } + } + fn remove(k: &K) { + if S::contains_key(&k) { + Removed::happened(&k); + } + S::remove(k); + } + fn mutate(k: &K, f: impl FnOnce(&mut T) -> R) -> R { + let r = S::mutate(k, f); + if !S::contains_key(&k) { + Created::happened(k); + } + r + } + fn mutate_exists(k: &K, f: impl FnOnce(&mut Option) -> R) -> R { + let (existed, exists, r) = S::mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + let r = f(maybe_value); + (existed, maybe_value.is_some(), r) + }); + if !existed && exists { + Created::happened(k); + } else if existed && !exists { + Removed::happened(k); + } + r + } + fn try_mutate_exists( + k: &K, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + S::try_mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + f(maybe_value).map(|v| (existed, maybe_value.is_some(), v)) + }) + .map(|(existed, exists, v)| { + if !existed && exists { + Created::happened(k); + } else if existed && !exists { + Removed::happened(k); + } + v + }) + } } /// Something that can estimate at which block the next session rotation will happen. This should /// be the same logical unit that dictates `ShouldEndSession` to the session module. No Assumptions /// are made about the scheduling of the sessions. pub trait EstimateNextSessionRotation { - /// Return the block number at which the next session rotation is estimated to happen. - /// - /// None should be returned if the estimation fails to come to an answer - fn estimate_next_session_rotation(now: BlockNumber) -> Option; + /// Return the block number at which the next session rotation is estimated to happen. + /// + /// None should be returned if the estimation fails to come to an answer + fn estimate_next_session_rotation(now: BlockNumber) -> Option; } impl EstimateNextSessionRotation for () { - fn estimate_next_session_rotation(_: BlockNumber) -> Option { - Default::default() - } + fn estimate_next_session_rotation(_: BlockNumber) -> Option { + Default::default() + } } /// Something that can estimate at which block the next `new_session` will be triggered. This must /// always be implemented by the session module. pub trait EstimateNextNewSession { - /// Return the block number at which the next new session is estimated to happen. - fn estimate_next_new_session(now: BlockNumber) -> Option; + /// Return the block number at which the next new session is estimated to happen. + fn estimate_next_new_session(now: BlockNumber) -> Option; } impl EstimateNextNewSession for () { - fn estimate_next_new_session(_: BlockNumber) -> Option { - Default::default() - } + fn estimate_next_new_session(_: BlockNumber) -> Option { + Default::default() + } } /// Anything that can have a `::len()` method. pub trait Len { - /// Return the length of data type. - fn len(&self) -> usize; + /// Return the length of data type. + fn len(&self) -> usize; } -impl Len for T where ::IntoIter: ExactSizeIterator { - fn len(&self) -> usize { - self.clone().into_iter().len() - } +impl Len for T +where + ::IntoIter: ExactSizeIterator, +{ + fn len(&self) -> usize { + self.clone().into_iter().len() + } } /// A trait for querying a single fixed value from a type. pub trait Get { - /// Return a constant value. - fn get() -> T; + /// Return a constant value. + fn get() -> T; } impl Get for () { - fn get() -> T { T::default() } + fn get() -> T { + T::default() + } } /// A trait for querying whether a type can be said to statically "contain" a value. Similar /// in nature to `Get`, except it is designed to be lazy rather than active (you can't ask it to /// enumerate all values that it contains) and work for multiple values rather than just one. pub trait Contains { - /// Return `true` if this "contains" the given value `t`. - fn contains(t: &T) -> bool { Self::sorted_members().binary_search(t).is_ok() } - - /// Get a vector of all members in the set, ordered. - fn sorted_members() -> Vec; - - /// Get the number of items in the set. - fn count() -> usize { Self::sorted_members().len() } - - /// Add an item that would satisfy `contains`. It does not make sure any other - /// state is correctly maintained or generated. - /// - /// **Should be used for benchmarking only!!!** - #[cfg(feature = "runtime-benchmarks")] - fn add(t: &T) { unimplemented!() } + /// Return `true` if this "contains" the given value `t`. + fn contains(t: &T) -> bool { + Self::sorted_members().binary_search(t).is_ok() + } + + /// Get a vector of all members in the set, ordered. + fn sorted_members() -> Vec; + + /// Get the number of items in the set. + fn count() -> usize { + Self::sorted_members().len() + } + + /// Add an item that would satisfy `contains`. It does not make sure any other + /// state is correctly maintained or generated. + /// + /// **Should be used for benchmarking only!!!** + #[cfg(feature = "runtime-benchmarks")] + fn add(t: &T) { + unimplemented!() + } } /// Determiner to say whether a given account is unused. pub trait IsDeadAccount { - /// Is the given account dead? - fn is_dead_account(who: &AccountId) -> bool; + /// Is the given account dead? + fn is_dead_account(who: &AccountId) -> bool; } impl IsDeadAccount for () { - fn is_dead_account(_who: &AccountId) -> bool { - true - } + fn is_dead_account(_who: &AccountId) -> bool { + true + } } /// Handler for when a new account has been created. #[impl_for_tuples(30)] pub trait OnNewAccount { - /// A new account `who` has been registered. - fn on_new_account(who: &AccountId); + /// A new account `who` has been registered. + fn on_new_account(who: &AccountId); } /// The account with the given id was reaped. #[impl_for_tuples(30)] pub trait OnKilledAccount { - /// The account with the given id was reaped. - fn on_killed_account(who: &AccountId); + /// The account with the given id was reaped. + fn on_killed_account(who: &AccountId); } /// A trait for finding the author of a block header based on the `PreRuntime` digests contained /// within it. pub trait FindAuthor { - /// Find the author of a block based on the pre-runtime digests. - fn find_author<'a, I>(digests: I) -> Option - where I: 'a + IntoIterator; + /// Find the author of a block based on the pre-runtime digests. + fn find_author<'a, I>(digests: I) -> Option + where + I: 'a + IntoIterator; } impl FindAuthor for () { - fn find_author<'a, I>(_: I) -> Option - where I: 'a + IntoIterator - { - None - } + fn find_author<'a, I>(_: I) -> Option + where + I: 'a + IntoIterator, + { + None + } } /// A trait for verifying the seal of a header and returning the author. pub trait VerifySeal { - /// Verify a header and return the author, if any. - fn verify_seal(header: &Header) -> Result, &'static str>; + /// Verify a header and return the author, if any. + fn verify_seal(header: &Header) -> Result, &'static str>; } /// Something which can compute and check proofs of /// a historical key owner and return full identification data of that /// key owner. pub trait KeyOwnerProofSystem { - /// The proof of membership itself. - type Proof: Codec; - /// The full identification of a key owner and the stash account. - type IdentificationTuple: Codec; - - /// Prove membership of a key owner in the current block-state. - /// - /// This should typically only be called off-chain, since it may be - /// computationally heavy. - /// - /// Returns `Some` iff the key owner referred to by the given `key` is a - /// member of the current set. - fn prove(key: Key) -> Option; - - /// Check a proof of membership on-chain. Return `Some` iff the proof is - /// valid and recent enough to check. - fn check_proof(key: Key, proof: Self::Proof) -> Option; + /// The proof of membership itself. + type Proof: Codec; + /// The full identification of a key owner and the stash account. + type IdentificationTuple: Codec; + + /// Prove membership of a key owner in the current block-state. + /// + /// This should typically only be called off-chain, since it may be + /// computationally heavy. + /// + /// Returns `Some` iff the key owner referred to by the given `key` is a + /// member of the current set. + fn prove(key: Key) -> Option; + + /// Check a proof of membership on-chain. Return `Some` iff the proof is + /// valid and recent enough to check. + fn check_proof(key: Key, proof: Self::Proof) -> Option; } /// Handler for when some currency "account" decreased in balance for @@ -293,21 +317,28 @@ pub trait KeyOwnerProofSystem { /// - Someone got slashed. /// - Someone paid for a transaction to be included. pub trait OnUnbalanced { - /// Handler for some imbalances. The different imbalances might have different origins or - /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all - /// of them. Infallible. - fn on_unbalanceds(amounts: impl Iterator) where Imbalance: crate::traits::Imbalance { - Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) - } - - /// Handler for some imbalance. Infallible. - fn on_unbalanced(amount: Imbalance) { - amount.try_drop().unwrap_or_else(Self::on_nonzero_unbalanced) - } - - /// Actually handle a non-zero imbalance. You probably want to implement this rather than - /// `on_unbalanced`. - fn on_nonzero_unbalanced(amount: Imbalance) { drop(amount); } + /// Handler for some imbalances. The different imbalances might have different origins or + /// meanings, dependent on the context. Will default to simply calling on_unbalanced for all + /// of them. Infallible. + fn on_unbalanceds(amounts: impl Iterator) + where + Imbalance: crate::traits::Imbalance, + { + Self::on_unbalanced(amounts.fold(Imbalance::zero(), |i, x| x.merge(i))) + } + + /// Handler for some imbalance. Infallible. + fn on_unbalanced(amount: Imbalance) { + amount + .try_drop() + .unwrap_or_else(Self::on_nonzero_unbalanced) + } + + /// Actually handle a non-zero imbalance. You probably want to implement this rather than + /// `on_unbalanced`. + fn on_nonzero_unbalanced(amount: Imbalance) { + drop(amount); + } } impl OnUnbalanced for () {} @@ -315,19 +346,19 @@ impl OnUnbalanced for () {} /// Simple boolean for whether an account needs to be kept in existence. #[derive(Copy, Clone, Eq, PartialEq)] pub enum ExistenceRequirement { - /// Operation must not result in the account going out of existence. - /// - /// Note this implies that if the account never existed in the first place, then the operation - /// may legitimately leave the account unchanged and still non-existent. - KeepAlive, - /// Operation may result in account going out of existence. - AllowDeath, + /// Operation must not result in the account going out of existence. + /// + /// Note this implies that if the account never existed in the first place, then the operation + /// may legitimately leave the account unchanged and still non-existent. + KeepAlive, + /// Operation may result in account going out of existence. + AllowDeath, } /// A type for which some values make sense to be able to drop without further consideration. pub trait TryDrop: Sized { - /// Drop an instance cleanly. Only works if its value represents "no-operation". - fn try_drop(self) -> Result<(), Self>; + /// Drop an instance cleanly. Only works if its value represents "no-operation". + fn try_drop(self) -> Result<(), Self>; } /// A trait for a not-quite Linear Type that tracks an imbalance. @@ -360,427 +391,420 @@ pub trait TryDrop: Sized { /// You can always retrieve the raw balance value using `peek`. #[must_use] pub trait Imbalance: Sized + TryDrop { - /// The oppositely imbalanced type. They come in pairs. - type Opposite: Imbalance; - - /// The zero imbalance. Can be destroyed with `drop_zero`. - fn zero() -> Self; - - /// Drop an instance cleanly. Only works if its `self.value()` is zero. - fn drop_zero(self) -> Result<(), Self>; - - /// Consume `self` and return two independent instances; the first - /// is guaranteed to be at most `amount` and the second will be the remainder. - fn split(self, amount: Balance) -> (Self, Self); - - /// Consume `self` and return two independent instances; the amounts returned will be in - /// approximately the same ratio as `first`:`second`. - /// - /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should - /// fit into a `u32`. Overflow will safely saturate in both cases. - fn ration(self, first: u32, second: u32) -> (Self, Self) - where Balance: From + Saturating + Div - { - let total: u32 = first.saturating_add(second); - let amount1 = self.peek().saturating_mul(first.into()) / total.into(); - self.split(amount1) - } - - /// Consume self and add its two components, defined by the first component's balance, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { - let (a, b) = self.split(amount); - (a.merge(others.0), b.merge(others.1)) - } - - /// Consume self and add its two components, defined by the ratio `first`:`second`, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) - where Balance: From + Saturating + Div - { - let (a, b) = self.ration(first, second); - (a.merge(others.0), b.merge(others.1)) - } - - /// Consume self and add its two components, defined by the first component's balance, - /// element-wise into two pre-existing Imbalance refs. - /// - /// A convenient replacement for `split` and `subsume`. - fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { - let (a, b) = self.split(amount); - others.0.subsume(a); - others.1.subsume(b); - } - - /// Consume self and add its two components, defined by the ratio `first`:`second`, - /// element-wise to two pre-existing Imbalances. - /// - /// A convenient replacement for `split` and `merge`. - fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) - where Balance: From + Saturating + Div - { - let (a, b) = self.ration(first, second); - others.0.subsume(a); - others.1.subsume(b); - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - fn merge(self, other: Self) -> Self; - - /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with - /// reversed arguments. - fn merge_into(self, other: &mut Self) { - other.subsume(self) - } - - /// Consume `self` and maybe an `other` to return a new instance that combines - /// both. - fn maybe_merge(self, other: Option) -> Self { - if let Some(o) = other { - self.merge(o) - } else { - self - } - } - - /// Consume an `other` to mutate `self` into a new instance that combines - /// both. - fn subsume(&mut self, other: Self); - - /// Maybe consume an `other` to mutate `self` into a new instance that combines - /// both. - fn maybe_subsume(&mut self, other: Option) { - if let Some(o) = other { - self.subsume(o) - } - } - - /// Consume self and along with an opposite counterpart to return - /// a combined result. - /// - /// Returns `Ok` along with a new instance of `Self` if this instance has a - /// greater value than the `other`. Otherwise returns `Err` with an instance of - /// the `Opposite`. In both cases the value represents the combination of `self` - /// and `other`. - fn offset(self, other: Self::Opposite) -> Result; - - /// The raw value of self. - fn peek(&self) -> Balance; + /// The oppositely imbalanced type. They come in pairs. + type Opposite: Imbalance; + + /// The zero imbalance. Can be destroyed with `drop_zero`. + fn zero() -> Self; + + /// Drop an instance cleanly. Only works if its `self.value()` is zero. + fn drop_zero(self) -> Result<(), Self>; + + /// Consume `self` and return two independent instances; the first + /// is guaranteed to be at most `amount` and the second will be the remainder. + fn split(self, amount: Balance) -> (Self, Self); + + /// Consume `self` and return two independent instances; the amounts returned will be in + /// approximately the same ratio as `first`:`second`. + /// + /// NOTE: This requires up to `first + second` room for a multiply, and `first + second` should + /// fit into a `u32`. Overflow will safely saturate in both cases. + fn ration(self, first: u32, second: u32) -> (Self, Self) + where + Balance: From + Saturating + Div, + { + let total: u32 = first.saturating_add(second); + let amount1 = self.peek().saturating_mul(first.into()) / total.into(); + self.split(amount1) + } + + /// Consume self and add its two components, defined by the first component's balance, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn split_merge(self, amount: Balance, others: (Self, Self)) -> (Self, Self) { + let (a, b) = self.split(amount); + (a.merge(others.0), b.merge(others.1)) + } + + /// Consume self and add its two components, defined by the ratio `first`:`second`, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn ration_merge(self, first: u32, second: u32, others: (Self, Self)) -> (Self, Self) + where + Balance: From + Saturating + Div, + { + let (a, b) = self.ration(first, second); + (a.merge(others.0), b.merge(others.1)) + } + + /// Consume self and add its two components, defined by the first component's balance, + /// element-wise into two pre-existing Imbalance refs. + /// + /// A convenient replacement for `split` and `subsume`. + fn split_merge_into(self, amount: Balance, others: &mut (Self, Self)) { + let (a, b) = self.split(amount); + others.0.subsume(a); + others.1.subsume(b); + } + + /// Consume self and add its two components, defined by the ratio `first`:`second`, + /// element-wise to two pre-existing Imbalances. + /// + /// A convenient replacement for `split` and `merge`. + fn ration_merge_into(self, first: u32, second: u32, others: &mut (Self, Self)) + where + Balance: From + Saturating + Div, + { + let (a, b) = self.ration(first, second); + others.0.subsume(a); + others.1.subsume(b); + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + fn merge(self, other: Self) -> Self; + + /// Consume self to mutate `other` so that it combines both. Just like `subsume`, only with + /// reversed arguments. + fn merge_into(self, other: &mut Self) { + other.subsume(self) + } + + /// Consume `self` and maybe an `other` to return a new instance that combines + /// both. + fn maybe_merge(self, other: Option) -> Self { + if let Some(o) = other { + self.merge(o) + } else { + self + } + } + + /// Consume an `other` to mutate `self` into a new instance that combines + /// both. + fn subsume(&mut self, other: Self); + + /// Maybe consume an `other` to mutate `self` into a new instance that combines + /// both. + fn maybe_subsume(&mut self, other: Option) { + if let Some(o) = other { + self.subsume(o) + } + } + + /// Consume self and along with an opposite counterpart to return + /// a combined result. + /// + /// Returns `Ok` along with a new instance of `Self` if this instance has a + /// greater value than the `other`. Otherwise returns `Err` with an instance of + /// the `Opposite`. In both cases the value represents the combination of `self` + /// and `other`. + fn offset(self, other: Self::Opposite) -> Result; + + /// The raw value of self. + fn peek(&self) -> Balance; } /// Either a positive or a negative imbalance. -pub enum SignedImbalance>{ - /// A positive imbalance (funds have been created but none destroyed). - Positive(P), - /// A negative imbalance (funds have been destroyed but none created). - Negative(P::Opposite), +pub enum SignedImbalance> { + /// A positive imbalance (funds have been created but none destroyed). + Positive(P), + /// A negative imbalance (funds have been destroyed but none created). + Negative(P::Opposite), } impl< - P: Imbalance, - N: Imbalance, - B: AtLeast32Bit + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, -> SignedImbalance { - pub fn zero() -> Self { - SignedImbalance::Positive(P::zero()) - } - - pub fn drop_zero(self) -> Result<(), Self> { - match self { - SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), - SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), - } - } - - /// Consume `self` and an `other` to return a new instance that combines - /// both. - pub fn merge(self, other: Self) -> Self { - match (self, other) { - (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => - SignedImbalance::Positive(one.merge(other)), - (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => - SignedImbalance::Negative(one.merge(other)), - (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => - if one.peek() > other.peek() { - SignedImbalance::Positive(one.offset(other).ok().unwrap_or_else(P::zero)) - } else { - SignedImbalance::Negative(other.offset(one).ok().unwrap_or_else(N::zero)) - }, - (one, other) => other.merge(one), - } - } + P: Imbalance, + N: Imbalance, + B: AtLeast32Bit + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default, + > SignedImbalance +{ + pub fn zero() -> Self { + SignedImbalance::Positive(P::zero()) + } + + pub fn drop_zero(self) -> Result<(), Self> { + match self { + SignedImbalance::Positive(x) => x.drop_zero().map_err(SignedImbalance::Positive), + SignedImbalance::Negative(x) => x.drop_zero().map_err(SignedImbalance::Negative), + } + } + + /// Consume `self` and an `other` to return a new instance that combines + /// both. + pub fn merge(self, other: Self) -> Self { + match (self, other) { + (SignedImbalance::Positive(one), SignedImbalance::Positive(other)) => { + SignedImbalance::Positive(one.merge(other)) + } + (SignedImbalance::Negative(one), SignedImbalance::Negative(other)) => { + SignedImbalance::Negative(one.merge(other)) + } + (SignedImbalance::Positive(one), SignedImbalance::Negative(other)) => { + if one.peek() > other.peek() { + SignedImbalance::Positive(one.offset(other).ok().unwrap_or_else(P::zero)) + } else { + SignedImbalance::Negative(other.offset(one).ok().unwrap_or_else(N::zero)) + } + } + (one, other) => other.merge(one), + } + } } /// Split an unbalanced amount two ways between a common divisor. -pub struct SplitTwoWays< - Balance, - Imbalance, - Part1, - Target1, - Part2, - Target2, ->(PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>); +pub struct SplitTwoWays( + PhantomData<(Balance, Imbalance, Part1, Target1, Part2, Target2)>, +); impl< - Balance: From + Saturating + Div, - I: Imbalance, - Part1: U32, - Target1: OnUnbalanced, - Part2: U32, - Target2: OnUnbalanced, -> OnUnbalanced for SplitTwoWays + Balance: From + Saturating + Div, + I: Imbalance, + Part1: U32, + Target1: OnUnbalanced, + Part2: U32, + Target2: OnUnbalanced, + > OnUnbalanced for SplitTwoWays { - fn on_nonzero_unbalanced(amount: I) { - let total: u32 = Part1::VALUE + Part2::VALUE; - let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); - let (imb1, imb2) = amount.split(amount1); - Target1::on_unbalanced(imb1); - Target2::on_unbalanced(imb2); - } + fn on_nonzero_unbalanced(amount: I) { + let total: u32 = Part1::VALUE + Part2::VALUE; + let amount1 = amount.peek().saturating_mul(Part1::VALUE.into()) / total.into(); + let (imb1, imb2) = amount.split(amount1); + Target1::on_unbalanced(imb1); + Target2::on_unbalanced(imb2); + } } /// Abstraction over a fungible assets system. pub trait Currency { - /// The balance of an account. - type Balance: AtLeast32Bit + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type PositiveImbalance: Imbalance; - - /// The opaque token type for an imbalance. This is returned by unbalanced operations - /// and must be dealt with. It may be dropped but cannot be cloned. - type NegativeImbalance: Imbalance; - - // PUBLIC IMMUTABLES - - /// The combined balance of `who`. - fn total_balance(who: &AccountId) -> Self::Balance; - - /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no - /// balance changes in the meantime and only the reserved balance is not taken into account. - fn can_slash(who: &AccountId, value: Self::Balance) -> bool; - - /// The total amount of issuance in the system. - fn total_issuance() -> Self::Balance; - - /// The minimum balance any single account may have. This is equivalent to the `Balances` module's - /// `ExistentialDeposit`. - fn minimum_balance() -> Self::Balance; - - /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will - /// typically be used to reduce an account by the same amount with e.g. `settle`. - /// - /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example - /// in the case of underflow. - fn burn(amount: Self::Balance) -> Self::PositiveImbalance; - - /// Increase the total issuance by `amount` and return the according imbalance. The imbalance - /// will typically be used to increase an account by the same amount with e.g. - /// `resolve_into_existing` or `resolve_creating`. - /// - /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example - /// in the case of overflow. - fn issue(amount: Self::Balance) -> Self::NegativeImbalance; - - /// The 'free' balance of a given account. - /// - /// This is the only balance that matters in terms of most operations on tokens. It alone - /// is used to determine the balance when in the contract execution environment. When this - /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is - /// deleted: specifically `FreeBalance`. - /// - /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn free_balance(who: &AccountId) -> Self::Balance; - - /// Returns `Ok` iff the account is able to make a withdrawal of the given amount - /// for the given reason. Basically, it's just a dry-run of `withdraw`. - /// - /// `Err(...)` with the reason why not otherwise. - fn ensure_can_withdraw( - who: &AccountId, - _amount: Self::Balance, - reasons: WithdrawReasons, - new_balance: Self::Balance, - ) -> DispatchResult; - - // PUBLIC MUTABLES (DANGEROUS) - - /// Transfer some liquid free balance to another staker. - /// - /// This is a very high-level function. It will ensure all appropriate fees are paid - /// and no imbalance in the system remains. - fn transfer( - source: &AccountId, - dest: &AccountId, - value: Self::Balance, - existence_requirement: ExistenceRequirement, - ) -> DispatchResult; - - /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the - /// free balance. This function cannot fail. - /// - /// The resulting imbalance is the first item of the tuple returned. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then a non-zero second item will be returned. - fn slash( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// Mints `value` to the free balance of `who`. - /// - /// If `who` doesn't exist, nothing is done and an Err returned. - fn deposit_into_existing( - who: &AccountId, - value: Self::Balance - ) -> result::Result; - - /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on - /// success. - fn resolve_into_existing( - who: &AccountId, - value: Self::NegativeImbalance, - ) -> result::Result<(), Self::NegativeImbalance> { - let v = value.peek(); - match Self::deposit_into_existing(who, v) { - Ok(opposite) => Ok(drop(value.offset(opposite))), - _ => Err(value), - } - } - - /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. - /// - /// Infallible. - fn deposit_creating( - who: &AccountId, - value: Self::Balance, - ) -> Self::PositiveImbalance; - - /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on - /// success. - fn resolve_creating( - who: &AccountId, - value: Self::NegativeImbalance, - ) { - let v = value.peek(); - drop(value.offset(Self::deposit_creating(who, v))); - } - - /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is - /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. - /// - /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, - /// then it returns `Err`. - /// - /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value - /// is `value`. - fn withdraw( - who: &AccountId, - value: Self::Balance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result; - - /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. - fn settle( - who: &AccountId, - value: Self::PositiveImbalance, - reasons: WithdrawReasons, - liveness: ExistenceRequirement, - ) -> result::Result<(), Self::PositiveImbalance> { - let v = value.peek(); - match Self::withdraw(who, v, reasons, liveness) { - Ok(opposite) => Ok(drop(value.offset(opposite))), - _ => Err(value), - } - } - - /// Ensure an account's free balance equals some value; this will create the account - /// if needed. - /// - /// Returns a signed imbalance and status to indicate if the account was successfully updated or update - /// has led to killing of the account. - fn make_free_balance_be( - who: &AccountId, - balance: Self::Balance, - ) -> SignedImbalance; + /// The balance of an account. + type Balance: AtLeast32Bit + FullCodec + Copy + MaybeSerializeDeserialize + Debug + Default; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type PositiveImbalance: Imbalance; + + /// The opaque token type for an imbalance. This is returned by unbalanced operations + /// and must be dealt with. It may be dropped but cannot be cloned. + type NegativeImbalance: Imbalance; + + // PUBLIC IMMUTABLES + + /// The combined balance of `who`. + fn total_balance(who: &AccountId) -> Self::Balance; + + /// Same result as `slash(who, value)` (but without the side-effects) assuming there are no + /// balance changes in the meantime and only the reserved balance is not taken into account. + fn can_slash(who: &AccountId, value: Self::Balance) -> bool; + + /// The total amount of issuance in the system. + fn total_issuance() -> Self::Balance; + + /// The minimum balance any single account may have. This is equivalent to the `Balances` module's + /// `ExistentialDeposit`. + fn minimum_balance() -> Self::Balance; + + /// Reduce the total issuance by `amount` and return the according imbalance. The imbalance will + /// typically be used to reduce an account by the same amount with e.g. `settle`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is burnt, for example + /// in the case of underflow. + fn burn(amount: Self::Balance) -> Self::PositiveImbalance; + + /// Increase the total issuance by `amount` and return the according imbalance. The imbalance + /// will typically be used to increase an account by the same amount with e.g. + /// `resolve_into_existing` or `resolve_creating`. + /// + /// This is infallible, but doesn't guarantee that the entire `amount` is issued, for example + /// in the case of overflow. + fn issue(amount: Self::Balance) -> Self::NegativeImbalance; + + /// The 'free' balance of a given account. + /// + /// This is the only balance that matters in terms of most operations on tokens. It alone + /// is used to determine the balance when in the contract execution environment. When this + /// balance falls below the value of `ExistentialDeposit`, then the 'current account' is + /// deleted: specifically `FreeBalance`. + /// + /// `system::AccountNonce` is also deleted if `ReservedBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn free_balance(who: &AccountId) -> Self::Balance; + + /// Returns `Ok` iff the account is able to make a withdrawal of the given amount + /// for the given reason. Basically, it's just a dry-run of `withdraw`. + /// + /// `Err(...)` with the reason why not otherwise. + fn ensure_can_withdraw( + who: &AccountId, + _amount: Self::Balance, + reasons: WithdrawReasons, + new_balance: Self::Balance, + ) -> DispatchResult; + + // PUBLIC MUTABLES (DANGEROUS) + + /// Transfer some liquid free balance to another staker. + /// + /// This is a very high-level function. It will ensure all appropriate fees are paid + /// and no imbalance in the system remains. + fn transfer( + source: &AccountId, + dest: &AccountId, + value: Self::Balance, + existence_requirement: ExistenceRequirement, + ) -> DispatchResult; + + /// Deducts up to `value` from the combined balance of `who`, preferring to deduct from the + /// free balance. This function cannot fail. + /// + /// The resulting imbalance is the first item of the tuple returned. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then a non-zero second item will be returned. + fn slash(who: &AccountId, value: Self::Balance) -> (Self::NegativeImbalance, Self::Balance); + + /// Mints `value` to the free balance of `who`. + /// + /// If `who` doesn't exist, nothing is done and an Err returned. + fn deposit_into_existing( + who: &AccountId, + value: Self::Balance, + ) -> result::Result; + + /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on + /// success. + fn resolve_into_existing( + who: &AccountId, + value: Self::NegativeImbalance, + ) -> result::Result<(), Self::NegativeImbalance> { + let v = value.peek(); + match Self::deposit_into_existing(who, v) { + Ok(opposite) => Ok(drop(value.offset(opposite))), + _ => Err(value), + } + } + + /// Adds up to `value` to the free balance of `who`. If `who` doesn't exist, it is created. + /// + /// Infallible. + fn deposit_creating(who: &AccountId, value: Self::Balance) -> Self::PositiveImbalance; + + /// Similar to deposit_creating, only accepts a `NegativeImbalance` and returns nothing on + /// success. + fn resolve_creating(who: &AccountId, value: Self::NegativeImbalance) { + let v = value.peek(); + drop(value.offset(Self::deposit_creating(who, v))); + } + + /// Removes some free balance from `who` account for `reason` if possible. If `liveness` is + /// `KeepAlive`, then no less than `ExistentialDeposit` must be left remaining. + /// + /// This checks any locks, vesting, and liquidity requirements. If the removal is not possible, + /// then it returns `Err`. + /// + /// If the operation is successful, this will return `Ok` with a `NegativeImbalance` whose value + /// is `value`. + fn withdraw( + who: &AccountId, + value: Self::Balance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> result::Result; + + /// Similar to withdraw, only accepts a `PositiveImbalance` and returns nothing on success. + fn settle( + who: &AccountId, + value: Self::PositiveImbalance, + reasons: WithdrawReasons, + liveness: ExistenceRequirement, + ) -> result::Result<(), Self::PositiveImbalance> { + let v = value.peek(); + match Self::withdraw(who, v, reasons, liveness) { + Ok(opposite) => Ok(drop(value.offset(opposite))), + _ => Err(value), + } + } + + /// Ensure an account's free balance equals some value; this will create the account + /// if needed. + /// + /// Returns a signed imbalance and status to indicate if the account was successfully updated or update + /// has led to killing of the account. + fn make_free_balance_be( + who: &AccountId, + balance: Self::Balance, + ) -> SignedImbalance; } /// Status of funds. pub enum BalanceStatus { - /// Funds are free, as corresponding to `free` item in Balances. - Free, - /// Funds are reserved, as corresponding to `reserved` item in Balances. - Reserved, + /// Funds are free, as corresponding to `free` item in Balances. + Free, + /// Funds are reserved, as corresponding to `reserved` item in Balances. + Reserved, } /// A currency where funds can be reserved from the user. pub trait ReservableCurrency: Currency { - /// Same result as `reserve(who, value)` (but without the side-effects) assuming there - /// are no balance changes in the meantime. - fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; - - /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. - /// - /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` - /// is less than `value`, then a non-zero second item will be returned. - fn slash_reserved( - who: &AccountId, - value: Self::Balance - ) -> (Self::NegativeImbalance, Self::Balance); - - /// The amount of the balance of a given account that is externally reserved; this can still get - /// slashed, but gets slashed last of all. - /// - /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens - /// that are still 'owned' by the account holder, but which are suspendable. - /// - /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' - /// is deleted: specifically, `ReservedBalance`. - /// - /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets - /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. - fn reserved_balance(who: &AccountId) -> Self::Balance; - - /// Moves `value` from balance to reserved balance. - /// - /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will - /// be returned to notify of this. This is different behavior than `unreserve`. - fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; - - /// Moves up to `value` from reserved balance to free balance. This function cannot fail. - /// - /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` - /// is less than `value`, then the remaining amount will be returned. - /// - /// # NOTES - /// - /// - This is different from `reserve`. - /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will - /// invoke `on_reserved_too_low` and could reap the account. - fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; - - /// Moves up to `value` from reserved balance of account `slashed` to balance of account - /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be - /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, - /// depending on the `status`. - /// - /// As much funds up to `value` will be deducted as possible. If this is less than `value`, - /// then `Ok(non_zero)` will be returned. - fn repatriate_reserved( - slashed: &AccountId, - beneficiary: &AccountId, - value: Self::Balance, - status: BalanceStatus, - ) -> result::Result; + /// Same result as `reserve(who, value)` (but without the side-effects) assuming there + /// are no balance changes in the meantime. + fn can_reserve(who: &AccountId, value: Self::Balance) -> bool; + + /// Deducts up to `value` from reserved balance of `who`. This function cannot fail. + /// + /// As much funds up to `value` will be deducted as possible. If the reserve balance of `who` + /// is less than `value`, then a non-zero second item will be returned. + fn slash_reserved( + who: &AccountId, + value: Self::Balance, + ) -> (Self::NegativeImbalance, Self::Balance); + + /// The amount of the balance of a given account that is externally reserved; this can still get + /// slashed, but gets slashed last of all. + /// + /// This balance is a 'reserve' balance that other subsystems use in order to set aside tokens + /// that are still 'owned' by the account holder, but which are suspendable. + /// + /// When this balance falls below the value of `ExistentialDeposit`, then this 'reserve account' + /// is deleted: specifically, `ReservedBalance`. + /// + /// `system::AccountNonce` is also deleted if `FreeBalance` is also zero (it also gets + /// collapsed to zero if it ever becomes less than `ExistentialDeposit`. + fn reserved_balance(who: &AccountId) -> Self::Balance; + + /// Moves `value` from balance to reserved balance. + /// + /// If the free balance is lower than `value`, then no funds will be moved and an `Err` will + /// be returned to notify of this. This is different behavior than `unreserve`. + fn reserve(who: &AccountId, value: Self::Balance) -> DispatchResult; + + /// Moves up to `value` from reserved balance to free balance. This function cannot fail. + /// + /// As much funds up to `value` will be moved as possible. If the reserve balance of `who` + /// is less than `value`, then the remaining amount will be returned. + /// + /// # NOTES + /// + /// - This is different from `reserve`. + /// - If the remaining reserved balance is less than `ExistentialDeposit`, it will + /// invoke `on_reserved_too_low` and could reap the account. + fn unreserve(who: &AccountId, value: Self::Balance) -> Self::Balance; + + /// Moves up to `value` from reserved balance of account `slashed` to balance of account + /// `beneficiary`. `beneficiary` must exist for this to succeed. If it does not, `Err` will be + /// returned. Funds will be placed in either the `free` balance or the `reserved` balance, + /// depending on the `status`. + /// + /// As much funds up to `value` will be deducted as possible. If this is less than `value`, + /// then `Ok(non_zero)` will be returned. + fn repatriate_reserved( + slashed: &AccountId, + beneficiary: &AccountId, + value: Self::Balance, + status: BalanceStatus, + ) -> result::Result; } /// An identifier for a lock. Used for disambiguating different locks so that @@ -789,354 +813,356 @@ pub type LockIdentifier = [u8; 8]; /// A currency whose accounts can have liquidity restrictions. pub trait LockableCurrency: Currency { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// Create a new balance lock on account `who`. - /// - /// If the new lock is valid (i.e. not already expired), it will push the struct to - /// the `Locks` vec in storage. Note that you can lock more funds than a user has. - /// - /// If the lock `id` already exists, this will update it. - fn set_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all - /// parameters or creates a new one if it does not exist. - /// - /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it - /// applies the most severe constraints of the two, while `set_lock` replaces the lock - /// with the new parameters. As in, `extend_lock` will set: - /// - maximum `amount` - /// - bitwise mask of all `reasons` - fn extend_lock( - id: LockIdentifier, - who: &AccountId, - amount: Self::Balance, - reasons: WithdrawReasons, - ); - - /// Remove an existing lock. - fn remove_lock( - id: LockIdentifier, - who: &AccountId, - ); + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// Create a new balance lock on account `who`. + /// + /// If the new lock is valid (i.e. not already expired), it will push the struct to + /// the `Locks` vec in storage. Note that you can lock more funds than a user has. + /// + /// If the lock `id` already exists, this will update it. + fn set_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Changes a balance lock (selected by `id`) so that it becomes less liquid in all + /// parameters or creates a new one if it does not exist. + /// + /// Calling `extend_lock` on an existing lock `id` differs from `set_lock` in that it + /// applies the most severe constraints of the two, while `set_lock` replaces the lock + /// with the new parameters. As in, `extend_lock` will set: + /// - maximum `amount` + /// - bitwise mask of all `reasons` + fn extend_lock( + id: LockIdentifier, + who: &AccountId, + amount: Self::Balance, + reasons: WithdrawReasons, + ); + + /// Remove an existing lock. + fn remove_lock(id: LockIdentifier, who: &AccountId); } /// A vesting schedule over a currency. This allows a particular currency to have vesting limits /// applied to it. pub trait VestingSchedule { - /// The quantity used to denote time; usually just a `BlockNumber`. - type Moment; - - /// The currency that this schedule applies to. - type Currency: Currency; - - /// Get the amount that is currently being vested and cannot be transferred out of this account. - /// Returns `None` if the account has no vesting schedule. - fn vesting_balance(who: &AccountId) -> Option<>::Balance>; - - /// Adds a vesting schedule to a given account. - /// - /// If there already exists a vesting schedule for the given account, an `Err` is returned - /// and nothing is updated. - /// - /// Is a no-op if the amount to be vested is zero. - /// - /// NOTE: This doesn't alter the free balance of the account. - fn add_vesting_schedule( - who: &AccountId, - locked: >::Balance, - per_block: >::Balance, - starting_block: Self::Moment, - ) -> DispatchResult; - - /// Remove a vesting schedule for a given account. - /// - /// NOTE: This doesn't alter the free balance of the account. - fn remove_vesting_schedule(who: &AccountId); + /// The quantity used to denote time; usually just a `BlockNumber`. + type Moment; + + /// The currency that this schedule applies to. + type Currency: Currency; + + /// Get the amount that is currently being vested and cannot be transferred out of this account. + /// Returns `None` if the account has no vesting schedule. + fn vesting_balance(who: &AccountId) + -> Option<>::Balance>; + + /// Adds a vesting schedule to a given account. + /// + /// If there already exists a vesting schedule for the given account, an `Err` is returned + /// and nothing is updated. + /// + /// Is a no-op if the amount to be vested is zero. + /// + /// NOTE: This doesn't alter the free balance of the account. + fn add_vesting_schedule( + who: &AccountId, + locked: >::Balance, + per_block: >::Balance, + starting_block: Self::Moment, + ) -> DispatchResult; + + /// Remove a vesting schedule for a given account. + /// + /// NOTE: This doesn't alter the free balance of the account. + fn remove_vesting_schedule(who: &AccountId); } bitmask! { - /// Reasons for moving funds out of an account. - #[derive(Encode, Decode)] - pub mask WithdrawReasons: i8 where - - /// Reason for moving funds out of an account. - #[derive(Encode, Decode)] - flags WithdrawReason { - /// In order to pay for (system) transaction costs. - TransactionPayment = 0b00000001, - /// In order to transfer ownership. - Transfer = 0b00000010, - /// In order to reserve some funds for a later return or repatriation. - Reserve = 0b00000100, - /// In order to pay some other (higher-level) fees. - Fee = 0b00001000, - /// In order to tip a validator for transaction inclusion. - Tip = 0b00010000, - } + /// Reasons for moving funds out of an account. + #[derive(Encode, Decode)] + pub mask WithdrawReasons: i8 where + + /// Reason for moving funds out of an account. + #[derive(Encode, Decode)] + flags WithdrawReason { + /// In order to pay for (system) transaction costs. + TransactionPayment = 0b00000001, + /// In order to transfer ownership. + Transfer = 0b00000010, + /// In order to reserve some funds for a later return or repatriation. + Reserve = 0b00000100, + /// In order to pay some other (higher-level) fees. + Fee = 0b00001000, + /// In order to tip a validator for transaction inclusion. + Tip = 0b00010000, + } } pub trait Time { - type Moment: AtLeast32Bit + Parameter + Default + Copy; + type Moment: AtLeast32Bit + Parameter + Default + Copy; - fn now() -> Self::Moment; + fn now() -> Self::Moment; } /// Trait to deal with unix time. pub trait UnixTime { - /// Return duration since `SystemTime::UNIX_EPOCH`. - fn now() -> core::time::Duration; + /// Return duration since `SystemTime::UNIX_EPOCH`. + fn now() -> core::time::Duration; } impl WithdrawReasons { - /// Choose all variants except for `one`. - /// - /// ```rust - /// # use frame_support::traits::{WithdrawReason, WithdrawReasons}; - /// # fn main() { - /// assert_eq!( - /// WithdrawReason::Fee | WithdrawReason::Transfer | WithdrawReason::Reserve | WithdrawReason::Tip, - /// WithdrawReasons::except(WithdrawReason::TransactionPayment), - /// ); - /// # } - /// ``` - pub fn except(one: WithdrawReason) -> WithdrawReasons { - let mut mask = Self::all(); - mask.toggle(one); - mask - } + /// Choose all variants except for `one`. + /// + /// ```rust + /// # use frame_support::traits::{WithdrawReason, WithdrawReasons}; + /// # fn main() { + /// assert_eq!( + /// WithdrawReason::Fee | WithdrawReason::Transfer | WithdrawReason::Reserve | WithdrawReason::Tip, + /// WithdrawReasons::except(WithdrawReason::TransactionPayment), + /// ); + /// # } + /// ``` + pub fn except(one: WithdrawReason) -> WithdrawReasons { + let mut mask = Self::all(); + mask.toggle(one); + mask + } } /// Trait for type that can handle incremental changes to a set of account IDs. pub trait ChangeMembers { - /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The - /// new set is given by `new`, and need not be sorted. - /// - /// This resets any previous value of prime. - fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { - new.sort_unstable(); - Self::change_members_sorted(incoming, outgoing, &new[..]); - } - - /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The - /// new set is thus given by `sorted_new` and **must be sorted**. - /// - /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. - /// - /// This resets any previous value of prime. - fn change_members_sorted( - incoming: &[AccountId], - outgoing: &[AccountId], - sorted_new: &[AccountId], - ); - - /// Set the new members; they **must already be sorted**. This will compute the diff and use it to - /// call `change_members_sorted`. - /// - /// This resets any previous value of prime. - fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { - let (incoming, outgoing) = Self::compute_members_diff(new_members, old_members); - Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); - } - - /// Set the new members; they **must already be sorted**. This will compute the diff and use it to - /// call `change_members_sorted`. - fn compute_members_diff( - new_members: &[AccountId], - old_members: &[AccountId] - ) -> (Vec, Vec) { - let mut old_iter = old_members.iter(); - let mut new_iter = new_members.iter(); - let mut incoming = Vec::new(); - let mut outgoing = Vec::new(); - let mut old_i = old_iter.next(); - let mut new_i = new_iter.next(); - loop { - match (old_i, new_i) { - (None, None) => break, - (Some(old), Some(new)) if old == new => { - old_i = old_iter.next(); - new_i = new_iter.next(); - } - (Some(old), Some(new)) if old < new => { - outgoing.push(old.clone()); - old_i = old_iter.next(); - } - (Some(old), None) => { - outgoing.push(old.clone()); - old_i = old_iter.next(); - } - (_, Some(new)) => { - incoming.push(new.clone()); - new_i = new_iter.next(); - } - } - } - (incoming, outgoing) - } - - /// Set the prime member. - fn set_prime(_prime: Option) {} + /// A number of members `incoming` just joined the set and replaced some `outgoing` ones. The + /// new set is given by `new`, and need not be sorted. + /// + /// This resets any previous value of prime. + fn change_members(incoming: &[AccountId], outgoing: &[AccountId], mut new: Vec) { + new.sort_unstable(); + Self::change_members_sorted(incoming, outgoing, &new[..]); + } + + /// A number of members `_incoming` just joined the set and replaced some `_outgoing` ones. The + /// new set is thus given by `sorted_new` and **must be sorted**. + /// + /// NOTE: This is the only function that needs to be implemented in `ChangeMembers`. + /// + /// This resets any previous value of prime. + fn change_members_sorted( + incoming: &[AccountId], + outgoing: &[AccountId], + sorted_new: &[AccountId], + ); + + /// Set the new members; they **must already be sorted**. This will compute the diff and use it to + /// call `change_members_sorted`. + /// + /// This resets any previous value of prime. + fn set_members_sorted(new_members: &[AccountId], old_members: &[AccountId]) { + let (incoming, outgoing) = Self::compute_members_diff(new_members, old_members); + Self::change_members_sorted(&incoming[..], &outgoing[..], &new_members); + } + + /// Set the new members; they **must already be sorted**. This will compute the diff and use it to + /// call `change_members_sorted`. + fn compute_members_diff( + new_members: &[AccountId], + old_members: &[AccountId], + ) -> (Vec, Vec) { + let mut old_iter = old_members.iter(); + let mut new_iter = new_members.iter(); + let mut incoming = Vec::new(); + let mut outgoing = Vec::new(); + let mut old_i = old_iter.next(); + let mut new_i = new_iter.next(); + loop { + match (old_i, new_i) { + (None, None) => break, + (Some(old), Some(new)) if old == new => { + old_i = old_iter.next(); + new_i = new_iter.next(); + } + (Some(old), Some(new)) if old < new => { + outgoing.push(old.clone()); + old_i = old_iter.next(); + } + (Some(old), None) => { + outgoing.push(old.clone()); + old_i = old_iter.next(); + } + (_, Some(new)) => { + incoming.push(new.clone()); + new_i = new_iter.next(); + } + } + } + (incoming, outgoing) + } + + /// Set the prime member. + fn set_prime(_prime: Option) {} } impl ChangeMembers for () { - fn change_members(_: &[T], _: &[T], _: Vec) {} - fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} - fn set_members_sorted(_: &[T], _: &[T]) {} - fn set_prime(_: Option) {} + fn change_members(_: &[T], _: &[T], _: Vec) {} + fn change_members_sorted(_: &[T], _: &[T], _: &[T]) {} + fn set_members_sorted(_: &[T], _: &[T]) {} + fn set_prime(_: Option) {} } - - /// Trait for type that can handle the initialization of account IDs at genesis. pub trait InitializeMembers { - /// Initialize the members to the given `members`. - fn initialize_members(members: &[AccountId]); + /// Initialize the members to the given `members`. + fn initialize_members(members: &[AccountId]); } impl InitializeMembers for () { - fn initialize_members(_: &[T]) {} + fn initialize_members(_: &[T]) {} } // A trait that is able to provide randomness. pub trait Randomness { - /// Get a "random" value - /// - /// Being a deterministic blockchain, real randomness is difficult to come by. This gives you - /// something that approximates it. At best, this will be randomness which was - /// hard to predict a long time ago, but that has become easy to predict recently. - /// - /// `subject` is a context identifier and allows you to get a - /// different result to other callers of this function; use it like - /// `random(&b"my context"[..])`. - fn random(subject: &[u8]) -> Output; - - /// Get the basic random seed. - /// - /// In general you won't want to use this, but rather `Self::random` which allows you to give a - /// subject for the random result and whose value will be independently low-influence random - /// from any other such seeds. - fn random_seed() -> Output { - Self::random(&[][..]) - } + /// Get a "random" value + /// + /// Being a deterministic blockchain, real randomness is difficult to come by. This gives you + /// something that approximates it. At best, this will be randomness which was + /// hard to predict a long time ago, but that has become easy to predict recently. + /// + /// `subject` is a context identifier and allows you to get a + /// different result to other callers of this function; use it like + /// `random(&b"my context"[..])`. + fn random(subject: &[u8]) -> Output; + + /// Get the basic random seed. + /// + /// In general you won't want to use this, but rather `Self::random` which allows you to give a + /// subject for the random result and whose value will be independently low-influence random + /// from any other such seeds. + fn random_seed() -> Output { + Self::random(&[][..]) + } } impl Randomness for () { - fn random(subject: &[u8]) -> Output { - Output::decode(&mut TrailingZeroInput::new(subject)).unwrap_or_default() - } + fn random(subject: &[u8]) -> Output { + Output::decode(&mut TrailingZeroInput::new(subject)).unwrap_or_default() + } } /// Trait to be used by block producing consensus engine modules to determine /// how late the current block is (e.g. in a slot-based proposal mechanism how /// many slots were skipped since the previous block). pub trait Lateness { - /// Returns a generic measure of how late the current block is compared to - /// its parent. - fn lateness(&self) -> N; + /// Returns a generic measure of how late the current block is compared to + /// its parent. + fn lateness(&self) -> N; } impl Lateness for () { - fn lateness(&self) -> N { - Zero::zero() - } + fn lateness(&self) -> N { + Zero::zero() + } } /// Implementors of this trait provide information about whether or not some validator has /// been registered with them. The [Session module](../../pallet_session/index.html) is an implementor. pub trait ValidatorRegistration { - /// Returns true if the provided validator ID has been registered with the implementing runtime - /// module - fn is_registered(id: &ValidatorId) -> bool; + /// Returns true if the provided validator ID has been registered with the implementing runtime + /// module + fn is_registered(id: &ValidatorId) -> bool; } /// Something that can convert a given module into the index of the module in the runtime. /// /// The index of a module is determined by the position it appears in `construct_runtime!`. pub trait ModuleToIndex { - /// Convert the given module `M` into an index. - fn module_to_index() -> Option; + /// Convert the given module `M` into an index. + fn module_to_index() -> Option; } impl ModuleToIndex for () { - fn module_to_index() -> Option { Some(0) } + fn module_to_index() -> Option { + Some(0) + } } /// The function and pallet name of the Call. #[derive(Clone, Eq, PartialEq, Default, RuntimeDebug)] pub struct CallMetadata { - /// Name of the function. - pub function_name: &'static str, - /// Name of the pallet to which the function belongs. - pub pallet_name: &'static str, + /// Name of the function. + pub function_name: &'static str, + /// Name of the pallet to which the function belongs. + pub pallet_name: &'static str, } /// Gets the function name of the Call. pub trait GetCallName { - /// Return all function names. - fn get_call_names() -> &'static [&'static str]; - /// Return the function name of the Call. - fn get_call_name(&self) -> &'static str; + /// Return all function names. + fn get_call_names() -> &'static [&'static str]; + /// Return the function name of the Call. + fn get_call_name(&self) -> &'static str; } /// Gets the metadata for the Call - function name and pallet name. pub trait GetCallMetadata { - /// Return all module names. - fn get_module_names() -> &'static [&'static str]; - /// Return all function names for the given `module`. - fn get_call_names(module: &str) -> &'static [&'static str]; - /// Return a [`CallMetadata`], containing function and pallet name of the Call. - fn get_call_metadata(&self) -> CallMetadata; + /// Return all module names. + fn get_module_names() -> &'static [&'static str]; + /// Return all function names for the given `module`. + fn get_call_names(module: &str) -> &'static [&'static str]; + /// Return a [`CallMetadata`], containing function and pallet name of the Call. + fn get_call_metadata(&self) -> CallMetadata; } /// The block finalization trait. Implementing this lets you express what should happen /// for your module when the block is ending. #[impl_for_tuples(30)] pub trait OnFinalize { - /// The block is being finalized. Implement to have something happen. - fn on_finalize(_n: BlockNumber) {} + /// The block is being finalized. Implement to have something happen. + fn on_finalize(_n: BlockNumber) {} } /// The block initialization trait. Implementing this lets you express what should happen /// for your module when the block is beginning (right before the first extrinsic is executed). pub trait OnInitialize { - /// The block is being initialized. Implement to have something happen. - /// - /// Return the non-negotiable weight consumed in the block. - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { 0 } + /// The block is being initialized. Implement to have something happen. + /// + /// Return the non-negotiable weight consumed in the block. + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + 0 + } } #[impl_for_tuples(30)] impl OnInitialize for Tuple { - fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(_n.clone())); )* ); - weight - } + fn on_initialize(_n: BlockNumber) -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_initialize(_n.clone())); )* ); + weight + } } /// The runtime upgrade trait. Implementing this lets you express what should happen /// when the runtime upgrades, and changes may need to occur to your module. pub trait OnRuntimeUpgrade { - /// Perform a module upgrade. - /// - /// Return the non-negotiable weight consumed for runtime upgrade. - fn on_runtime_upgrade() -> crate::weights::Weight { 0 } + /// Perform a module upgrade. + /// + /// Return the non-negotiable weight consumed for runtime upgrade. + fn on_runtime_upgrade() -> crate::weights::Weight { + 0 + } } #[impl_for_tuples(30)] impl OnRuntimeUpgrade for Tuple { - fn on_runtime_upgrade() -> crate::weights::Weight { - let mut weight = 0; - for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); - weight - } + fn on_runtime_upgrade() -> crate::weights::Weight { + let mut weight = 0; + for_tuples!( #( weight = weight.saturating_add(Tuple::on_runtime_upgrade()); )* ); + weight + } } /// Off-chain computation trait. @@ -1151,130 +1177,130 @@ impl OnRuntimeUpgrade for Tuple { /// has finished. #[impl_for_tuples(30)] pub trait OffchainWorker { - /// This function is being called after every block import (when fully synced). - /// - /// Implement this and use any of the `Offchain` `sp_io` set of APIs - /// to perform off-chain computations, calls and submit transactions - /// with results to trigger any on-chain changes. - /// Any state alterations are lost and are not persisted. - fn offchain_worker(_n: BlockNumber) {} + /// This function is being called after every block import (when fully synced). + /// + /// Implement this and use any of the `Offchain` `sp_io` set of APIs + /// to perform off-chain computations, calls and submit transactions + /// with results to trigger any on-chain changes. + /// Any state alterations are lost and are not persisted. + fn offchain_worker(_n: BlockNumber) {} } pub mod schedule { - use super::*; - - /// Information relating to the period of a scheduled task. First item is the length of the - /// period and the second is the number of times it should be executed in total before the task - /// is considered finished and removed. - pub type Period = (BlockNumber, u32); - - /// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning - /// higher priority. - pub type Priority = u8; - - /// The highest priority. We invert the value so that normal sorting will place the highest - /// priority at the beginning of the list. - pub const HIGHEST_PRORITY: Priority = 0; - /// Anything of this value or lower will definitely be scheduled on the block that they ask for, even - /// if it breaches the `MaximumWeight` limitation. - pub const HARD_DEADLINE: Priority = 63; - /// The lowest priority. Most stuff should be around here. - pub const LOWEST_PRORITY: Priority = 255; - - /// A type that can be used as a scheduler. - pub trait Anon { - /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + Debug; - - /// Schedule a one-off dispatch to happen at the beginning of some block in the future. - /// - /// This is not named. - /// - /// Infallible. - fn schedule( - when: BlockNumber, - maybe_periodic: Option>, - priority: Priority, - call: Call - ) -> Self::Address; - - /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, - /// also. - /// - /// Will return an error if the `address` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - /// - /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For - /// that, you must name the task explicitly using the `Named` trait. - fn cancel(address: Self::Address) -> Result<(), ()>; - } - - /// A type that can be used as a scheduler. - pub trait Named { - /// An address which can be used for removing a scheduled task. - type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; - - /// Schedule a one-off dispatch to happen at the beginning of some block in the future. - /// - /// - `id`: The identity of the task. This must be unique and will return an error if not. - fn schedule_named( - id: impl Encode, - when: BlockNumber, - maybe_periodic: Option>, - priority: Priority, - call: Call - ) -> Result; - - /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances - /// of that, also. - /// - /// Will return an error if the `id` is invalid. - /// - /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. - /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. - fn cancel_named(id: impl Encode) -> Result<(), ()>; - } + use super::*; + + /// Information relating to the period of a scheduled task. First item is the length of the + /// period and the second is the number of times it should be executed in total before the task + /// is considered finished and removed. + pub type Period = (BlockNumber, u32); + + /// Priority with which a call is scheduled. It's just a linear amount with lowest values meaning + /// higher priority. + pub type Priority = u8; + + /// The highest priority. We invert the value so that normal sorting will place the highest + /// priority at the beginning of the list. + pub const HIGHEST_PRORITY: Priority = 0; + /// Anything of this value or lower will definitely be scheduled on the block that they ask for, even + /// if it breaches the `MaximumWeight` limitation. + pub const HARD_DEADLINE: Priority = 63; + /// The lowest priority. Most stuff should be around here. + pub const LOWEST_PRORITY: Priority = 255; + + /// A type that can be used as a scheduler. + pub trait Anon { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + Debug; + + /// Schedule a one-off dispatch to happen at the beginning of some block in the future. + /// + /// This is not named. + /// + /// Infallible. + fn schedule( + when: BlockNumber, + maybe_periodic: Option>, + priority: Priority, + call: Call, + ) -> Self::Address; + + /// Cancel a scheduled task. If periodic, then it will cancel all further instances of that, + /// also. + /// + /// Will return an error if the `address` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + /// + /// NOTE2: This will not work to cancel periodic tasks after their initial execution. For + /// that, you must name the task explicitly using the `Named` trait. + fn cancel(address: Self::Address) -> Result<(), ()>; + } + + /// A type that can be used as a scheduler. + pub trait Named { + /// An address which can be used for removing a scheduled task. + type Address: Codec + Clone + Eq + EncodeLike + sp_std::fmt::Debug; + + /// Schedule a one-off dispatch to happen at the beginning of some block in the future. + /// + /// - `id`: The identity of the task. This must be unique and will return an error if not. + fn schedule_named( + id: impl Encode, + when: BlockNumber, + maybe_periodic: Option>, + priority: Priority, + call: Call, + ) -> Result; + + /// Cancel a scheduled, named task. If periodic, then it will cancel all further instances + /// of that, also. + /// + /// Will return an error if the `id` is invalid. + /// + /// NOTE: This guaranteed to work only *before* the point that it is due to be executed. + /// If it ends up being delayed beyond the point of execution, then it cannot be cancelled. + fn cancel_named(id: impl Encode) -> Result<(), ()>; + } } /// Some sort of check on the origin is performed by this object. pub trait EnsureOrigin { - /// A return type. - type Success; - /// Perform the origin check. - fn ensure_origin(o: OuterOrigin) -> result::Result { - Self::try_origin(o).map_err(|_| BadOrigin) - } - /// Perform the origin check. - fn try_origin(o: OuterOrigin) -> result::Result; - - /// Returns an outer origin capable of passing `try_origin` check. - /// - /// ** Should be used for benchmarking only!!! ** - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> OuterOrigin; + /// A return type. + type Success; + /// Perform the origin check. + fn ensure_origin(o: OuterOrigin) -> result::Result { + Self::try_origin(o).map_err(|_| BadOrigin) + } + /// Perform the origin check. + fn try_origin(o: OuterOrigin) -> result::Result; + + /// Returns an outer origin capable of passing `try_origin` check. + /// + /// ** Should be used for benchmarking only!!! ** + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> OuterOrigin; } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { - struct Test; - impl OnInitialize for Test { - fn on_initialize(_n: u8) -> crate::weights::Weight { - 10 - } - } - impl OnRuntimeUpgrade for Test { - fn on_runtime_upgrade() -> crate::weights::Weight { - 20 - } - } - - assert_eq!(<(Test, Test)>::on_initialize(0), 20); - assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); - } + use super::*; + + #[test] + fn on_initialize_and_on_runtime_upgrade_weight_merge_works() { + struct Test; + impl OnInitialize for Test { + fn on_initialize(_n: u8) -> crate::weights::Weight { + 10 + } + } + impl OnRuntimeUpgrade for Test { + fn on_runtime_upgrade() -> crate::weights::Weight { + 20 + } + } + + assert_eq!(<(Test, Test)>::on_initialize(0), 20); + assert_eq!(<(Test, Test)>::on_runtime_upgrade(), 40); + } } diff --git a/frame/support/src/unsigned.rs b/frame/support/src/unsigned.rs index 3bc6f692af..1aff4b0dac 100644 --- a/frame/support/src/unsigned.rs +++ b/frame/support/src/unsigned.rs @@ -18,10 +18,9 @@ pub use crate::sp_runtime::traits::ValidateUnsigned; #[doc(hidden)] pub use crate::sp_runtime::transaction_validity::{ - TransactionValidity, UnknownTransaction, TransactionValidityError, TransactionSource, + TransactionSource, TransactionValidity, TransactionValidityError, UnknownTransaction, }; - /// Implement `ValidateUnsigned` for `Runtime`. /// All given modules need to implement `ValidateUnsigned`. /// @@ -96,76 +95,76 @@ macro_rules! impl_outer_validate_unsigned { #[cfg(test)] mod test_empty_call { - pub enum Call {} + pub enum Call {} - #[allow(unused)] - pub struct Runtime; + #[allow(unused)] + pub struct Runtime; - impl_outer_validate_unsigned! { - impl ValidateUnsigned for Runtime { - } - } + impl_outer_validate_unsigned! { + impl ValidateUnsigned for Runtime { + } + } } #[cfg(test)] mod test_partial_and_full_call { - pub mod timestamp { - pub struct Module; - - impl super::super::ValidateUnsigned for Module { - type Call = Call; - - fn validate_unsigned( - _source: super::super::TransactionSource, - _call: &Self::Call - ) -> super::super::TransactionValidity { - unimplemented!(); - } - } - - pub enum Call { - Foo, - } - } - - mod test_full_unsigned { - pub type Timestamp = super::timestamp::Module; - - pub enum Call { - Timestamp(super::timestamp::Call), - } - - pub struct Runtime; - - impl_outer_validate_unsigned! { - impl ValidateUnsigned for Runtime { - Timestamp - } - } - - #[test] - fn used() { - let _ = Call::Timestamp(super::timestamp::Call::Foo); - let _ = Runtime; - } - } - - mod test_not_full_unsigned { - pub enum Call { - Timestamp(super::timestamp::Call), - } - - pub struct Runtime; - - impl_outer_validate_unsigned! { - impl ValidateUnsigned for Runtime { - } - } - - #[test] - fn used() { - let _ = Call::Timestamp(super::timestamp::Call::Foo); - let _ = Runtime; - } - } + pub mod timestamp { + pub struct Module; + + impl super::super::ValidateUnsigned for Module { + type Call = Call; + + fn validate_unsigned( + _source: super::super::TransactionSource, + _call: &Self::Call, + ) -> super::super::TransactionValidity { + unimplemented!(); + } + } + + pub enum Call { + Foo, + } + } + + mod test_full_unsigned { + pub type Timestamp = super::timestamp::Module; + + pub enum Call { + Timestamp(super::timestamp::Call), + } + + pub struct Runtime; + + impl_outer_validate_unsigned! { + impl ValidateUnsigned for Runtime { + Timestamp + } + } + + #[test] + fn used() { + let _ = Call::Timestamp(super::timestamp::Call::Foo); + let _ = Runtime; + } + } + + mod test_not_full_unsigned { + pub enum Call { + Timestamp(super::timestamp::Call), + } + + pub struct Runtime; + + impl_outer_validate_unsigned! { + impl ValidateUnsigned for Runtime { + } + } + + #[test] + fn used() { + let _ = Call::Timestamp(super::timestamp::Call::Foo); + let _ = Runtime; + } + } } diff --git a/frame/support/src/weights.rs b/frame/support/src/weights.rs index 79cfa1b397..8942728cdd 100644 --- a/frame/support/src/weights.rs +++ b/frame/support/src/weights.rs @@ -35,16 +35,16 @@ //! Note that the decl_module macro _cannot_ enforce this and will simply fail if an invalid struct //! (something that does not implement `Weighable`) is passed in. +use crate::dispatch::{DispatchError, DispatchErrorWithPostInfo}; +use codec::{Decode, Encode}; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize}; -use codec::{Encode, Decode}; +use serde::{Deserialize, Serialize}; use sp_arithmetic::traits::Bounded; use sp_runtime::{ - RuntimeDebug, - traits::SignedExtension, - generic::{CheckedExtrinsic, UncheckedExtrinsic}, + generic::{CheckedExtrinsic, UncheckedExtrinsic}, + traits::SignedExtension, + RuntimeDebug, }; -use crate::dispatch::{DispatchErrorWithPostInfo, DispatchError}; /// Re-export priority as type pub use sp_runtime::transaction_validity::TransactionPriority; @@ -60,25 +60,25 @@ pub const MINIMUM_WEIGHT: Weight = 10_000_000; /// Means of weighing some particular kind of data (`T`). pub trait WeighData { - /// Weigh the data `T` given by `target`. When implementing this for a dispatchable, `T` will be - /// a tuple of all arguments given to the function (except origin). - fn weigh_data(&self, target: T) -> Weight; + /// Weigh the data `T` given by `target`. When implementing this for a dispatchable, `T` will be + /// a tuple of all arguments given to the function (except origin). + fn weigh_data(&self, target: T) -> Weight; } /// Means of classifying a dispatchable function. pub trait ClassifyDispatch { - /// Classify the dispatch function based on input data `target` of type `T`. When implementing - /// this for a dispatchable, `T` will be a tuple of all arguments given to the function (except - /// origin). - fn classify_dispatch(&self, target: T) -> DispatchClass; + /// Classify the dispatch function based on input data `target` of type `T`. When implementing + /// this for a dispatchable, `T` will be a tuple of all arguments given to the function (except + /// origin). + fn classify_dispatch(&self, target: T) -> DispatchClass; } /// Indicates if dispatch function should pay fees or not. /// If set to false, the block resource limits are applied, yet no fee is deducted. pub trait PaysFee { - fn pays_fee(&self, _target: T) -> bool { - true - } + fn pays_fee(&self, _target: T) -> bool { + true + } } /// A generalized group of dispatch types. This is only distinguishing normal, user-triggered transactions @@ -87,156 +87,157 @@ pub trait PaysFee { #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] #[derive(PartialEq, Eq, Clone, Copy, Encode, Decode, RuntimeDebug)] pub enum DispatchClass { - /// A normal dispatch. - Normal, - /// An operational dispatch. - Operational, - /// A mandatory dispatch. These kinds of dispatch are always included regardless of their - /// weight, therefore it is critical that they are separately validated to ensure that a - /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just means - /// ensuring that the extrinsic can only be included once and that it is always very light. - /// - /// Do *NOT* use it for extrinsics that can be heavy. - /// - /// The only real use case for this is inherent extrinsics that are required to execute in a - /// block for the block to be valid, and it solves the issue in the case that the block - /// initialization is sufficiently heavy to mean that those inherents do not fit into the - /// block. Essentially, we assume that in these exceptional circumstances, it is better to - /// allow an overweight block to be created than to not allow any block at all to be created. - Mandatory, + /// A normal dispatch. + Normal, + /// An operational dispatch. + Operational, + /// A mandatory dispatch. These kinds of dispatch are always included regardless of their + /// weight, therefore it is critical that they are separately validated to ensure that a + /// malicious validator cannot craft a valid but impossibly heavy block. Usually this just means + /// ensuring that the extrinsic can only be included once and that it is always very light. + /// + /// Do *NOT* use it for extrinsics that can be heavy. + /// + /// The only real use case for this is inherent extrinsics that are required to execute in a + /// block for the block to be valid, and it solves the issue in the case that the block + /// initialization is sufficiently heavy to mean that those inherents do not fit into the + /// block. Essentially, we assume that in these exceptional circumstances, it is better to + /// allow an overweight block to be created than to not allow any block at all to be created. + Mandatory, } impl Default for DispatchClass { - fn default() -> Self { - DispatchClass::Normal - } + fn default() -> Self { + DispatchClass::Normal + } } // Implement traits for raw Weight value impl WeighData for Weight { - fn weigh_data(&self, _: T) -> Weight { - return *self - } + fn weigh_data(&self, _: T) -> Weight { + return *self; + } } impl ClassifyDispatch for Weight { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::default() - } + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::default() + } } impl PaysFee for Weight { - fn pays_fee(&self, _: T) -> bool { - true - } + fn pays_fee(&self, _: T) -> bool { + true + } } impl From for DispatchClass { - fn from(tx: SimpleDispatchInfo) -> Self { - match tx { - SimpleDispatchInfo::FixedOperational(_) => DispatchClass::Operational, - SimpleDispatchInfo::MaxOperational => DispatchClass::Operational, + fn from(tx: SimpleDispatchInfo) -> Self { + match tx { + SimpleDispatchInfo::FixedOperational(_) => DispatchClass::Operational, + SimpleDispatchInfo::MaxOperational => DispatchClass::Operational, - SimpleDispatchInfo::FixedNormal(_) => DispatchClass::Normal, - SimpleDispatchInfo::MaxNormal => DispatchClass::Normal, - SimpleDispatchInfo::InsecureFreeNormal => DispatchClass::Normal, + SimpleDispatchInfo::FixedNormal(_) => DispatchClass::Normal, + SimpleDispatchInfo::MaxNormal => DispatchClass::Normal, + SimpleDispatchInfo::InsecureFreeNormal => DispatchClass::Normal, - SimpleDispatchInfo::FixedMandatory(_) => DispatchClass::Mandatory, - } - } + SimpleDispatchInfo::FixedMandatory(_) => DispatchClass::Mandatory, + } + } } /// A bundle of static information collected from the `#[weight = $x]` attributes. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] pub struct DispatchInfo { - /// Weight of this transaction. - pub weight: Weight, - /// Class of this transaction. - pub class: DispatchClass, - /// Does this transaction pay fees. - pub pays_fee: bool, + /// Weight of this transaction. + pub weight: Weight, + /// Class of this transaction. + pub class: DispatchClass, + /// Does this transaction pay fees. + pub pays_fee: bool, } /// Weight information that is only available post dispatch. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] pub struct PostDispatchInfo { - /// Actual weight consumed by a call or `None` which stands for the worst case static weight. - pub actual_weight: Option, + /// Actual weight consumed by a call or `None` which stands for the worst case static weight. + pub actual_weight: Option, } impl PostDispatchInfo { - /// Calculate how much (if any) weight was not used by the `Dispatchable`. - pub fn calc_unspent(&self, info: &DispatchInfo) -> Weight { - if let Some(actual_weight) = self.actual_weight { - if actual_weight >= info.weight { - 0 - } else { - info.weight - actual_weight - } - } else { - 0 - } - } + /// Calculate how much (if any) weight was not used by the `Dispatchable`. + pub fn calc_unspent(&self, info: &DispatchInfo) -> Weight { + if let Some(actual_weight) = self.actual_weight { + if actual_weight >= info.weight { + 0 + } else { + info.weight - actual_weight + } + } else { + 0 + } + } } impl From> for PostDispatchInfo { - fn from(actual_weight: Option) -> Self { - Self { - actual_weight, - } - } + fn from(actual_weight: Option) -> Self { + Self { actual_weight } + } } impl From<()> for PostDispatchInfo { - fn from(_: ()) -> Self { - Self { - actual_weight: None, - } - } + fn from(_: ()) -> Self { + Self { + actual_weight: None, + } + } } impl sp_runtime::traits::Printable for PostDispatchInfo { - fn print(&self) { - "actual_weight=".print(); - match self.actual_weight { - Some(weight) => weight.print(), - None => "max-weight".print(), - } - } + fn print(&self) { + "actual_weight=".print(); + match self.actual_weight { + Some(weight) => weight.print(), + None => "max-weight".print(), + } + } } /// Allows easy conversion from `DispatchError` to `DispatchErrorWithPostInfo` for dispatchables /// that want to return a custom a posteriori weight on error. pub trait WithPostDispatchInfo { - /// Call this on your modules custom errors type in order to return a custom weight on error. - /// - /// # Example - /// - /// ```ignore - /// let who = ensure_signed(origin).map_err(|e| e.with_weight(100))?; - /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); - /// ``` - fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; -} - -impl WithPostDispatchInfo for T where - T: Into + /// Call this on your modules custom errors type in order to return a custom weight on error. + /// + /// # Example + /// + /// ```ignore + /// let who = ensure_signed(origin).map_err(|e| e.with_weight(100))?; + /// ensure!(who == me, Error::::NotMe.with_weight(200_000)); + /// ``` + fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo; +} + +impl WithPostDispatchInfo for T +where + T: Into, { - fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { - DispatchErrorWithPostInfo { - post_info: PostDispatchInfo { actual_weight: Some(actual_weight) }, - error: self.into(), - } - } + fn with_weight(self, actual_weight: Weight) -> DispatchErrorWithPostInfo { + DispatchErrorWithPostInfo { + post_info: PostDispatchInfo { + actual_weight: Some(actual_weight), + }, + error: self.into(), + } + } } /// A `Dispatchable` function (aka transaction) that can carry some static information along with /// it, using the `#[weight]` attribute. pub trait GetDispatchInfo { - /// Return a `DispatchInfo`, containing relevant information of this dispatch. - /// - /// This is done independently of its encoded size. - fn get_dispatch_info(&self) -> DispatchInfo; + /// Return a `DispatchInfo`, containing relevant information of this dispatch. + /// + /// This is done independently of its encoded size. + fn get_dispatch_info(&self) -> DispatchInfo; } /// Default type used with the `#[weight = x]` attribute in a substrate chain. @@ -257,60 +258,60 @@ pub trait GetDispatchInfo { /// the entire block resource limit. #[derive(Clone, Copy)] pub enum SimpleDispatchInfo { - /// A normal dispatch with fixed weight. - FixedNormal(Weight), - /// A normal dispatch with the maximum weight. - MaxNormal, - /// A normal dispatch with no weight. Base and bytes fees still need to be paid. - InsecureFreeNormal, - /// An operational dispatch with fixed weight. - FixedOperational(Weight), - /// An operational dispatch with the maximum weight. - MaxOperational, - /// A mandatory dispatch with fixed weight. - /// - /// NOTE: Signed transactions may not (directly) dispatch this kind of a call, so the other - /// attributes concerning transactability (e.g. priority, fee paying) are moot. - FixedMandatory(Weight), + /// A normal dispatch with fixed weight. + FixedNormal(Weight), + /// A normal dispatch with the maximum weight. + MaxNormal, + /// A normal dispatch with no weight. Base and bytes fees still need to be paid. + InsecureFreeNormal, + /// An operational dispatch with fixed weight. + FixedOperational(Weight), + /// An operational dispatch with the maximum weight. + MaxOperational, + /// A mandatory dispatch with fixed weight. + /// + /// NOTE: Signed transactions may not (directly) dispatch this kind of a call, so the other + /// attributes concerning transactability (e.g. priority, fee paying) are moot. + FixedMandatory(Weight), } impl WeighData for SimpleDispatchInfo { - fn weigh_data(&self, _: T) -> Weight { - match self { - SimpleDispatchInfo::FixedNormal(w) => *w, - SimpleDispatchInfo::MaxNormal => Bounded::max_value(), - SimpleDispatchInfo::InsecureFreeNormal => Bounded::min_value(), - SimpleDispatchInfo::FixedOperational(w) => *w, - SimpleDispatchInfo::MaxOperational => Bounded::max_value(), - SimpleDispatchInfo::FixedMandatory(w) => *w, - } - } + fn weigh_data(&self, _: T) -> Weight { + match self { + SimpleDispatchInfo::FixedNormal(w) => *w, + SimpleDispatchInfo::MaxNormal => Bounded::max_value(), + SimpleDispatchInfo::InsecureFreeNormal => Bounded::min_value(), + SimpleDispatchInfo::FixedOperational(w) => *w, + SimpleDispatchInfo::MaxOperational => Bounded::max_value(), + SimpleDispatchInfo::FixedMandatory(w) => *w, + } + } } impl ClassifyDispatch for SimpleDispatchInfo { - fn classify_dispatch(&self, _: T) -> DispatchClass { - DispatchClass::from(*self) - } + fn classify_dispatch(&self, _: T) -> DispatchClass { + DispatchClass::from(*self) + } } impl PaysFee for SimpleDispatchInfo { - fn pays_fee(&self, _: T) -> bool { - match self { - SimpleDispatchInfo::FixedNormal(_) => true, - SimpleDispatchInfo::MaxNormal => true, - SimpleDispatchInfo::InsecureFreeNormal => true, - SimpleDispatchInfo::FixedOperational(_) => true, - SimpleDispatchInfo::MaxOperational => true, - SimpleDispatchInfo::FixedMandatory(_) => true, - } - } + fn pays_fee(&self, _: T) -> bool { + match self { + SimpleDispatchInfo::FixedNormal(_) => true, + SimpleDispatchInfo::MaxNormal => true, + SimpleDispatchInfo::InsecureFreeNormal => true, + SimpleDispatchInfo::FixedOperational(_) => true, + SimpleDispatchInfo::MaxOperational => true, + SimpleDispatchInfo::FixedMandatory(_) => true, + } + } } impl SimpleDispatchInfo { - /// An _additive zero_ variant of SimpleDispatchInfo. - pub fn zero() -> Self { - Self::FixedNormal(0) - } + /// An _additive zero_ variant of SimpleDispatchInfo. + pub fn zero() -> Self { + Self::FixedNormal(0) + } } /// A struct to represent a weight which is a function of the input arguments. The given items have @@ -326,171 +327,185 @@ pub struct FunctionOf(pub WD, pub CD, pub PF); // `WeighData` as a raw value impl WeighData for FunctionOf { - fn weigh_data(&self, _: Args) -> Weight { - self.0 - } + fn weigh_data(&self, _: Args) -> Weight { + self.0 + } } // `WeighData` as a closure -impl WeighData for FunctionOf where - WD : Fn(Args) -> Weight +impl WeighData for FunctionOf +where + WD: Fn(Args) -> Weight, { - fn weigh_data(&self, args: Args) -> Weight { - (self.0)(args) - } + fn weigh_data(&self, args: Args) -> Weight { + (self.0)(args) + } } // `ClassifyDispatch` as a raw value impl ClassifyDispatch for FunctionOf { - fn classify_dispatch(&self, _: Args) -> DispatchClass { - self.1 - } + fn classify_dispatch(&self, _: Args) -> DispatchClass { + self.1 + } } // `ClassifyDispatch` as a raw value -impl ClassifyDispatch for FunctionOf where - CD : Fn(Args) -> DispatchClass +impl ClassifyDispatch for FunctionOf +where + CD: Fn(Args) -> DispatchClass, { - fn classify_dispatch(&self, args: Args) -> DispatchClass { - (self.1)(args) - } + fn classify_dispatch(&self, args: Args) -> DispatchClass { + (self.1)(args) + } } // `PaysFee` as a raw value impl PaysFee for FunctionOf { - fn pays_fee(&self, _: Args) -> bool { - self.2 - } + fn pays_fee(&self, _: Args) -> bool { + self.2 + } } // `PaysFee` as a closure -impl PaysFee for FunctionOf where - PF : Fn(Args) -> bool +impl PaysFee for FunctionOf +where + PF: Fn(Args) -> bool, { - fn pays_fee(&self, args: Args) -> bool { - (self.2)(args) - } + fn pays_fee(&self, args: Args) -> bool { + (self.2)(args) + } } /// Implementation for unchecked extrinsic. impl GetDispatchInfo - for UncheckedExtrinsic + for UncheckedExtrinsic where - Call: GetDispatchInfo, - Extra: SignedExtension, + Call: GetDispatchInfo, + Extra: SignedExtension, { - fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() - } + fn get_dispatch_info(&self) -> DispatchInfo { + self.function.get_dispatch_info() + } } /// Implementation for checked extrinsic. -impl GetDispatchInfo - for CheckedExtrinsic +impl GetDispatchInfo for CheckedExtrinsic where - Call: GetDispatchInfo, + Call: GetDispatchInfo, { - fn get_dispatch_info(&self) -> DispatchInfo { - self.function.get_dispatch_info() - } + fn get_dispatch_info(&self) -> DispatchInfo { + self.function.get_dispatch_info() + } } /// Implementation for test extrinsic. #[cfg(feature = "std")] impl GetDispatchInfo for sp_runtime::testing::TestXt { - fn get_dispatch_info(&self) -> DispatchInfo { - // for testing: weight == size. - DispatchInfo { - weight: self.encode().len() as _, - pays_fee: true, - ..Default::default() - } - } + fn get_dispatch_info(&self) -> DispatchInfo { + // for testing: weight == size. + DispatchInfo { + weight: self.encode().len() as _, + pays_fee: true, + ..Default::default() + } + } } /// The weight of database operations that the runtime can invoke. #[derive(Clone, Copy, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] pub struct RuntimeDbWeight { - pub read: Weight, - pub write: Weight, + pub read: Weight, + pub write: Weight, } impl RuntimeDbWeight { - pub fn reads(self, r: Weight) -> Weight { - self.read.saturating_mul(r) - } + pub fn reads(self, r: Weight) -> Weight { + self.read.saturating_mul(r) + } - pub fn writes(self, w: Weight) -> Weight { - self.write.saturating_mul(w) - } + pub fn writes(self, w: Weight) -> Weight { + self.write.saturating_mul(w) + } - pub fn reads_writes(self, r: Weight, w: Weight) -> Weight { - let read_weight = self.read.saturating_mul(r); - let write_weight = self.write.saturating_mul(w); - read_weight.saturating_add(write_weight) - } + pub fn reads_writes(self, r: Weight, w: Weight) -> Weight { + let read_weight = self.read.saturating_mul(r); + let write_weight = self.write.saturating_mul(w); + read_weight.saturating_add(write_weight) + } } #[cfg(test)] #[allow(dead_code)] mod tests { - use crate::{decl_module, parameter_types, traits::Get}; - use super::*; - - pub trait Trait { - type Origin; - type Balance; - type BlockNumber; - type DbWeight: Get; - } - - pub struct TraitImpl {} - - parameter_types! { - pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { - read: 100, - write: 1000, - }; - } - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - type Balance = u32; - type DbWeight = DbWeight; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - // no arguments, fixed weight - #[weight = SimpleDispatchInfo::FixedNormal(1000)] - fn f0(_origin) { unimplemented!(); } - - // weight = a x 10 + b - #[weight = FunctionOf(|args: (&u32, &u32)| (args.0 * 10 + args.1) as Weight, DispatchClass::Normal, true)] - fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } - - #[weight = FunctionOf(|_: (&u32, &u32)| 0, DispatchClass::Operational, true)] - fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } - - #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] - fn f2(_origin) { unimplemented!(); } - - #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] - fn f21(_origin) { unimplemented!(); } - - } - } - - #[test] - fn weights_are_correct() { - assert_eq!(Call::::f0().get_dispatch_info().weight, 1000); - assert_eq!(Call::::f11(10, 20).get_dispatch_info().weight, 120); - assert_eq!(Call::::f11(10, 20).get_dispatch_info().class, DispatchClass::Normal); - assert_eq!(Call::::f12(10, 20).get_dispatch_info().weight, 0); - assert_eq!(Call::::f12(10, 20).get_dispatch_info().class, DispatchClass::Operational); - assert_eq!(Call::::f2().get_dispatch_info().weight, 12300); - assert_eq!(Call::::f21().get_dispatch_info().weight, 45600); - assert_eq!(Call::::f2().get_dispatch_info().class, DispatchClass::Normal); - } + use super::*; + use crate::{decl_module, parameter_types, traits::Get}; + + pub trait Trait { + type Origin; + type Balance; + type BlockNumber; + type DbWeight: Get; + } + + pub struct TraitImpl {} + + parameter_types! { + pub const DbWeight: RuntimeDbWeight = RuntimeDbWeight { + read: 100, + write: 1000, + }; + } + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + type Balance = u32; + type DbWeight = DbWeight; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + // no arguments, fixed weight + #[weight = SimpleDispatchInfo::FixedNormal(1000)] + fn f0(_origin) { unimplemented!(); } + + // weight = a x 10 + b + #[weight = FunctionOf(|args: (&u32, &u32)| (args.0 * 10 + args.1) as Weight, DispatchClass::Normal, true)] + fn f11(_origin, _a: u32, _eb: u32) { unimplemented!(); } + + #[weight = FunctionOf(|_: (&u32, &u32)| 0, DispatchClass::Operational, true)] + fn f12(_origin, _a: u32, _eb: u32) { unimplemented!(); } + + #[weight = T::DbWeight::get().reads(3) + T::DbWeight::get().writes(2) + 10_000] + fn f2(_origin) { unimplemented!(); } + + #[weight = T::DbWeight::get().reads_writes(6, 5) + 40_000] + fn f21(_origin) { unimplemented!(); } + + } + } + + #[test] + fn weights_are_correct() { + assert_eq!(Call::::f0().get_dispatch_info().weight, 1000); + assert_eq!( + Call::::f11(10, 20).get_dispatch_info().weight, + 120 + ); + assert_eq!( + Call::::f11(10, 20).get_dispatch_info().class, + DispatchClass::Normal + ); + assert_eq!(Call::::f12(10, 20).get_dispatch_info().weight, 0); + assert_eq!( + Call::::f12(10, 20).get_dispatch_info().class, + DispatchClass::Operational + ); + assert_eq!(Call::::f2().get_dispatch_info().weight, 12300); + assert_eq!(Call::::f21().get_dispatch_info().weight, 45600); + assert_eq!( + Call::::f2().get_dispatch_info().class, + DispatchClass::Normal + ); + } } diff --git a/frame/support/test/src/lib.rs b/frame/support/test/src/lib.rs index f62f552268..5afaebf7c6 100644 --- a/frame/support/test/src/lib.rs +++ b/frame/support/test/src/lib.rs @@ -23,13 +23,13 @@ /// The configuration trait pub trait Trait { - /// The runtime origin type. - type Origin; - /// The block number type. - type BlockNumber; + /// The runtime origin type. + type Origin; + /// The block number type. + type BlockNumber; } frame_support::decl_module! { - /// Some test module - pub struct Module for enum Call where origin: T::Origin {} + /// Some test module + pub struct Module for enum Call where origin: T::Origin {} } diff --git a/frame/support/test/tests/construct_runtime_ui.rs b/frame/support/test/tests/construct_runtime_ui.rs index acddf01f03..912b2f994e 100644 --- a/frame/support/test/tests/construct_runtime_ui.rs +++ b/frame/support/test/tests/construct_runtime_ui.rs @@ -2,9 +2,9 @@ use std::env; #[test] fn ui() { - // As trybuild is using `cargo check`, we don't need the real WASM binaries. - env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); - let t = trybuild::TestCases::new(); - t.compile_fail("tests/construct_runtime_ui/*.rs"); + let t = trybuild::TestCases::new(); + t.compile_fail("tests/construct_runtime_ui/*.rs"); } diff --git a/frame/support/test/tests/decl_error.rs b/frame/support/test/tests/decl_error.rs index cf50b009dd..e423f88ac3 100644 --- a/frame/support/test/tests/decl_error.rs +++ b/frame/support/test/tests/decl_error.rs @@ -14,68 +14,72 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_runtime::{generic, traits::{BlakeTwo256, Block as _, Verify}, DispatchError}; -use sp_core::{H256, sr25519}; use frame_support::weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}; +use sp_core::{sr25519, H256}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Block as _, Verify}, + DispatchError, +}; mod system; pub trait Currency {} mod module1 { - use super::*; - - pub trait Trait: system::Trait {} - - frame_support::decl_module! { - pub struct Module, I: Instance = DefaultInstance> for enum Call - where origin: ::Origin - { - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { - Err(Error::::Something.into()) - } - } - } - - frame_support::decl_error! { - pub enum Error for Module, I: Instance> { - Something - } - } - - frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module {} - } + use super::*; + + pub trait Trait: system::Trait {} + + frame_support::decl_module! { + pub struct Module, I: Instance = DefaultInstance> for enum Call + where origin: ::Origin + { + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { + Err(Error::::Something.into()) + } + } + } + + frame_support::decl_error! { + pub enum Error for Module, I: Instance> { + Something + } + } + + frame_support::decl_storage! { + trait Store for Module, I: Instance=DefaultInstance> as Module {} + } } mod module2 { - use super::*; - - pub trait Trait: system::Trait {} - - frame_support::decl_module! { - pub struct Module for enum Call - where origin: ::Origin - { - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { - Err(Error::::Something.into()) - } - } - } - - frame_support::decl_error! { - pub enum Error for Module { - Something - } - } - - frame_support::decl_storage! { - trait Store for Module as Module {} - } + use super::*; + + pub trait Trait: system::Trait {} + + frame_support::decl_module! { + pub struct Module for enum Call + where origin: ::Origin + { + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + pub fn fail(_origin) -> frame_support::dispatch::DispatchResult { + Err(Error::::Something.into()) + } + } + } + + frame_support::decl_error! { + pub enum Error for Module { + Something + } + } + + frame_support::decl_storage! { + trait Store for Module as Module {} + } } impl module1::Trait for Runtime {} @@ -88,12 +92,12 @@ pub type BlockNumber = u64; pub type Index = u64; impl system::Trait for Runtime { - type Hash = H256; - type Origin = Origin; - type BlockNumber = BlockNumber; - type AccountId = AccountId; - type Event = Event; - type ModuleToIndex = ModuleToIndex; + type Hash = H256; + type Origin = Origin; + type BlockNumber = BlockNumber; + type AccountId = AccountId; + type Event = Event; + type ModuleToIndex = ModuleToIndex; } frame_support::construct_runtime!( @@ -115,24 +119,36 @@ pub type UncheckedExtrinsic = generic::UncheckedExtrinsic::Root.into()), - Err(DispatchError::Module { index: 1, error: 0, message: Some("Something") }), - ); + assert_eq!( + Module1_1::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { + index: 1, + error: 0, + message: Some("Something") + }), + ); } #[test] fn check_module1_2_error_type() { - assert_eq!( - Module1_2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module { index: 3, error: 0, message: Some("Something") }), - ); + assert_eq!( + Module1_2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { + index: 3, + error: 0, + message: Some("Something") + }), + ); } #[test] fn check_module2_error_type() { - assert_eq!( - Module2::fail(system::Origin::::Root.into()), - Err(DispatchError::Module { index: 2, error: 0, message: Some("Something") }), - ); + assert_eq!( + Module2::fail(system::Origin::::Root.into()), + Err(DispatchError::Module { + index: 2, + error: 0, + message: Some("Something") + }), + ); } diff --git a/frame/support/test/tests/decl_storage.rs b/frame/support/test/tests/decl_storage.rs index ea9b09f9d7..47ee5423ca 100644 --- a/frame/support/test/tests/decl_storage.rs +++ b/frame/support/test/tests/decl_storage.rs @@ -18,605 +18,607 @@ // Do not complain about unused `dispatch` and `dispatch_aux`. #[allow(dead_code)] mod tests { - use frame_support::metadata::*; - use sp_io::TestExternalities; - use std::marker::PhantomData; - use codec::{Encode, Decode, EncodeLike}; - - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - pub trait Trait { - type Origin: Encode + Decode + EncodeLike + std::default::Default; - type BlockNumber; - } - - frame_support::decl_storage! { - trait Store for Module as TestStorage { - // non-getters: pub / $default - - /// Hello, this is doc! - U32: Option; - pub PUBU32: Option; - U32MYDEF: Option; - pub PUBU32MYDEF: Option; - - // getters: pub / $default - // we need at least one type which uses T, otherwise GenesisConfig will complain. - GETU32 get(fn u32_getter): T::Origin; - pub PUBGETU32 get(fn pub_u32_getter): u32; - GETU32WITHCONFIG get(fn u32_getter_with_config) config(): u32; - pub PUBGETU32WITHCONFIG get(fn pub_u32_getter_with_config) config(): u32; - GETU32MYDEF get(fn u32_getter_mydef): Option; - pub PUBGETU32MYDEF get(fn pub_u32_getter_mydef) config(): u32 = 3; - GETU32WITHCONFIGMYDEF get(fn u32_getter_with_config_mydef) config(): u32 = 2; - pub PUBGETU32WITHCONFIGMYDEF get(fn pub_u32_getter_with_config_mydef) config(): u32 = 1; - PUBGETU32WITHCONFIGMYDEFOPT get(fn pub_u32_getter_with_config_mydef_opt) config(): Option; - - GetU32WithBuilder get(fn u32_with_builder) build(|_| 1): u32; - GetOptU32WithBuilderSome get(fn opt_u32_with_builder_some) build(|_| Some(1)): Option; - GetOptU32WithBuilderNone get(fn opt_u32_with_builder_none) build(|_| None): Option; - - // map non-getters: pub / $default - MAPU32: map hasher(blake2_128_concat) u32 => Option; - pub PUBMAPU32: map hasher(blake2_128_concat) u32 => Option; - MAPU32MYDEF: map hasher(blake2_128_concat) u32 => Option; - pub PUBMAPU32MYDEF: map hasher(blake2_128_concat) u32 => Option; - - // map getters: pub / $default - GETMAPU32 get(fn map_u32_getter): map hasher(blake2_128_concat) u32 => String; - pub PUBGETMAPU32 get(fn pub_map_u32_getter): map hasher(blake2_128_concat) u32 => String; - - GETMAPU32MYDEF get(fn map_u32_getter_mydef): - map hasher(blake2_128_concat) u32 => String = "map".into(); - pub PUBGETMAPU32MYDEF get(fn pub_map_u32_getter_mydef): - map hasher(blake2_128_concat) u32 => String = "pubmap".into(); - - COMPLEXTYPE1: ::std::vec::Vec<::Origin>; - COMPLEXTYPE2: (Vec)>>, u32); - COMPLEXTYPE3: [u32; 25]; - } - add_extra_genesis { - build(|_| {}); - } - } - - struct TraitImpl {} - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - } - - const EXPECTED_METADATA: StorageMetadata = StorageMetadata { - prefix: DecodeDifferent::Encode("TestStorage"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("U32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[ " Hello, this is doc!" ]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBU32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("U32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBU32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Origin")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32WITHCONFIG"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32WITHCONFIG(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIG"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIG(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32MYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETU32WITHCONFIGMYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETU32WITHCONFIGMYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIGMYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEFOPT"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETU32WITHCONFIGMYDEFOPT(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GetU32WithBuilder"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetU32WithBuilder(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GetOptU32WithBuilderSome"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetOptU32WithBuilderSome(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GetOptU32WithBuilderNone"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGetOptU32WithBuilderNone(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("MAPU32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBMAPU32"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("MAPU32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBMAPU32MYDEF"), - modifier: StorageEntryModifier::Optional, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETMAPU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETMAPU32"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETMAPU32(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("GETMAPU32MYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructGETMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("PUBGETMAPU32MYDEF"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Blake2_128Concat, - key: DecodeDifferent::Encode("u32"), - value: DecodeDifferent::Encode("String"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructPUBGETMAPU32MYDEF(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE1"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("::std::vec::Vec<::Origin>")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE1(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE2"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("(Vec)>>, u32)")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE2(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("COMPLEXTYPE3"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("[u32; 25]")), - default: DecodeDifferent::Encode( - DefaultByteGetter(&__GetByteStructCOMPLEXTYPE3(PhantomData::)) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - ] - ), - }; - - #[test] - fn store_metadata() { - let metadata = Module::::storage_metadata(); - pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); - } - - #[test] - fn check_genesis_config() { - let config = GenesisConfig::default(); - assert_eq!(config.u32_getter_with_config, 0u32); - assert_eq!(config.pub_u32_getter_with_config, 0u32); - - assert_eq!(config.pub_u32_getter_mydef, 3u32); - assert_eq!(config.u32_getter_with_config_mydef, 2u32); - assert_eq!(config.pub_u32_getter_with_config_mydef, 1u32); - assert_eq!(config.pub_u32_getter_with_config_mydef_opt, 0u32); - } - - #[test] - fn check_builder_config() { - let config = GenesisConfig::default(); - let storage = config.build_storage().unwrap(); - TestExternalities::from(storage).execute_with(|| { - assert_eq!(Module::::u32_with_builder(), 1); - assert_eq!(Module::::opt_u32_with_builder_some(), Some(1)); - assert_eq!(Module::::opt_u32_with_builder_none(), None); - }) - } + use codec::{Decode, Encode, EncodeLike}; + use frame_support::metadata::*; + use sp_io::TestExternalities; + use std::marker::PhantomData; + + frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + pub trait Trait { + type Origin: Encode + Decode + EncodeLike + std::default::Default; + type BlockNumber; + } + + frame_support::decl_storage! { + trait Store for Module as TestStorage { + // non-getters: pub / $default + + /// Hello, this is doc! + U32: Option; + pub PUBU32: Option; + U32MYDEF: Option; + pub PUBU32MYDEF: Option; + + // getters: pub / $default + // we need at least one type which uses T, otherwise GenesisConfig will complain. + GETU32 get(fn u32_getter): T::Origin; + pub PUBGETU32 get(fn pub_u32_getter): u32; + GETU32WITHCONFIG get(fn u32_getter_with_config) config(): u32; + pub PUBGETU32WITHCONFIG get(fn pub_u32_getter_with_config) config(): u32; + GETU32MYDEF get(fn u32_getter_mydef): Option; + pub PUBGETU32MYDEF get(fn pub_u32_getter_mydef) config(): u32 = 3; + GETU32WITHCONFIGMYDEF get(fn u32_getter_with_config_mydef) config(): u32 = 2; + pub PUBGETU32WITHCONFIGMYDEF get(fn pub_u32_getter_with_config_mydef) config(): u32 = 1; + PUBGETU32WITHCONFIGMYDEFOPT get(fn pub_u32_getter_with_config_mydef_opt) config(): Option; + + GetU32WithBuilder get(fn u32_with_builder) build(|_| 1): u32; + GetOptU32WithBuilderSome get(fn opt_u32_with_builder_some) build(|_| Some(1)): Option; + GetOptU32WithBuilderNone get(fn opt_u32_with_builder_none) build(|_| None): Option; + + // map non-getters: pub / $default + MAPU32: map hasher(blake2_128_concat) u32 => Option; + pub PUBMAPU32: map hasher(blake2_128_concat) u32 => Option; + MAPU32MYDEF: map hasher(blake2_128_concat) u32 => Option; + pub PUBMAPU32MYDEF: map hasher(blake2_128_concat) u32 => Option; + + // map getters: pub / $default + GETMAPU32 get(fn map_u32_getter): map hasher(blake2_128_concat) u32 => String; + pub PUBGETMAPU32 get(fn pub_map_u32_getter): map hasher(blake2_128_concat) u32 => String; + + GETMAPU32MYDEF get(fn map_u32_getter_mydef): + map hasher(blake2_128_concat) u32 => String = "map".into(); + pub PUBGETMAPU32MYDEF get(fn pub_map_u32_getter_mydef): + map hasher(blake2_128_concat) u32 => String = "pubmap".into(); + + COMPLEXTYPE1: ::std::vec::Vec<::Origin>; + COMPLEXTYPE2: (Vec)>>, u32); + COMPLEXTYPE3: [u32; 25]; + } + add_extra_genesis { + build(|_| {}); + } + } + + struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + } + + const EXPECTED_METADATA: StorageMetadata = StorageMetadata { + prefix: DecodeDifferent::Encode("TestStorage"), + entries: DecodeDifferent::Encode(&[ + StorageEntryMetadata { + name: DecodeDifferent::Encode("U32"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[" Hello, this is doc!"]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBU32"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("U32MYDEF"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBU32MYDEF"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETU32"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Origin")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBGETU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETU32WITHCONFIG"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETU32WITHCONFIG(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIG"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIG(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETU32MYDEF"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32MYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETU32WITHCONFIGMYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETU32WITHCONFIGMYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIGMYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETU32WITHCONFIGMYDEFOPT"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETU32WITHCONFIGMYDEFOPT(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GetU32WithBuilder"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGetU32WithBuilder(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GetOptU32WithBuilderSome"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGetOptU32WithBuilderSome(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GetOptU32WithBuilderNone"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("u32")), + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGetOptU32WithBuilderNone(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("MAPU32"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBMAPU32"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("MAPU32MYDEF"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructMAPU32MYDEF( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBMAPU32MYDEF"), + modifier: StorageEntryModifier::Optional, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETMAPU32"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructGETMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETMAPU32"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructPUBGETMAPU32( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("GETMAPU32MYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructGETMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("PUBGETMAPU32MYDEF"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Blake2_128Concat, + key: DecodeDifferent::Encode("u32"), + value: DecodeDifferent::Encode("String"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &__GetByteStructPUBGETMAPU32MYDEF(PhantomData::), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE1"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode( + "::std::vec::Vec<::Origin>", + )), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE1( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE2"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode( + "(Vec)>>, u32)", + )), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE2( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("COMPLEXTYPE3"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("[u32; 25]")), + default: DecodeDifferent::Encode(DefaultByteGetter(&__GetByteStructCOMPLEXTYPE3( + PhantomData::, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + ]), + }; + + #[test] + fn store_metadata() { + let metadata = Module::::storage_metadata(); + pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); + } + + #[test] + fn check_genesis_config() { + let config = GenesisConfig::default(); + assert_eq!(config.u32_getter_with_config, 0u32); + assert_eq!(config.pub_u32_getter_with_config, 0u32); + + assert_eq!(config.pub_u32_getter_mydef, 3u32); + assert_eq!(config.u32_getter_with_config_mydef, 2u32); + assert_eq!(config.pub_u32_getter_with_config_mydef, 1u32); + assert_eq!(config.pub_u32_getter_with_config_mydef_opt, 0u32); + } + + #[test] + fn check_builder_config() { + let config = GenesisConfig::default(); + let storage = config.build_storage().unwrap(); + TestExternalities::from(storage).execute_with(|| { + assert_eq!(Module::::u32_with_builder(), 1); + assert_eq!(Module::::opt_u32_with_builder_some(), Some(1)); + assert_eq!(Module::::opt_u32_with_builder_none(), None); + }) + } } #[cfg(test)] #[allow(dead_code)] mod test2 { - pub trait Trait { - type Origin; - type BlockNumber; - } - - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - type PairOf = (T, T); - - frame_support::decl_storage! { - trait Store for Module as TestStorage { - SingleDef : u32; - PairDef : PairOf; - Single : Option; - Pair : (u32, u32); - } - add_extra_genesis { - config(_marker) : ::std::marker::PhantomData; - config(extra_field) : u32 = 32; - build(|_| {}); - } - } - - struct TraitImpl {} - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - } + pub trait Trait { + type Origin; + type BlockNumber; + } + + frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + type PairOf = (T, T); + + frame_support::decl_storage! { + trait Store for Module as TestStorage { + SingleDef : u32; + PairDef : PairOf; + Single : Option; + Pair : (u32, u32); + } + add_extra_genesis { + config(_marker) : ::std::marker::PhantomData; + config(extra_field) : u32 = 32; + build(|_| {}); + } + } + + struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + } } #[cfg(test)] #[allow(dead_code)] mod test3 { - pub trait Trait { - type Origin; - type BlockNumber; - } - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - frame_support::decl_storage! { - trait Store for Module as Test { - Foo get(fn foo) config(initial_foo): u32; - } - } - - type PairOf = (T, T); - - struct TraitImpl {} - - impl Trait for TraitImpl { - type Origin = u32; - type BlockNumber = u32; - } + pub trait Trait { + type Origin; + type BlockNumber; + } + frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + frame_support::decl_storage! { + trait Store for Module as Test { + Foo get(fn foo) config(initial_foo): u32; + } + } + + type PairOf = (T, T); + + struct TraitImpl {} + + impl Trait for TraitImpl { + type Origin = u32; + type BlockNumber = u32; + } } #[cfg(test)] #[allow(dead_code)] mod test_append_and_len { - use sp_io::TestExternalities; - use codec::{Encode, Decode}; - - pub trait Trait { - type Origin; - type BlockNumber; - } - - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - #[derive(PartialEq, Eq, Clone, Encode, Decode)] - struct NoDef(u32); - - frame_support::decl_storage! { - trait Store for Module as Test { - NoDefault: Option; - - JustVec: Vec; - JustVecWithDefault: Vec = vec![6, 9]; - OptionVec: Option>; - - MapVec: map hasher(blake2_128_concat) u32 => Vec; - MapVecWithDefault: map hasher(blake2_128_concat) u32 => Vec = vec![6, 9]; - OptionMapVec: map hasher(blake2_128_concat) u32 => Option>; - - DoubleMapVec: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Vec; - DoubleMapVecWithDefault: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Vec = vec![6, 9]; - OptionDoubleMapVec: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Option>; - } - } - - struct Test {} - - impl Trait for Test { - type Origin = u32; - type BlockNumber = u32; - } - - #[test] - fn default_for_option() { - TestExternalities::default().execute_with(|| { - assert_eq!(OptionVec::get(), None); - assert_eq!(JustVec::get(), vec![]); - }); - } - - #[test] - fn append_works() { - TestExternalities::default().execute_with(|| { - let _ = MapVec::append(1, [1, 2, 3].iter()); - let _ = MapVec::append(1, [4, 5].iter()); - assert_eq!(MapVec::get(1), vec![1, 2, 3, 4, 5]); - - let _ = JustVec::append([1, 2, 3].iter()); - let _ = JustVec::append([4, 5].iter()); - assert_eq!(JustVec::get(), vec![1, 2, 3, 4, 5]); - }); - } - - #[test] - fn append_works_for_default() { - TestExternalities::default().execute_with(|| { - assert_eq!(JustVecWithDefault::get(), vec![6, 9]); - let _ = JustVecWithDefault::append([1].iter()); - assert_eq!(JustVecWithDefault::get(), vec![6, 9, 1]); - - assert_eq!(MapVecWithDefault::get(0), vec![6, 9]); - let _ = MapVecWithDefault::append(0, [1].iter()); - assert_eq!(MapVecWithDefault::get(0), vec![6, 9, 1]); - - assert_eq!(OptionVec::get(), None); - let _ = OptionVec::append([1].iter()); - assert_eq!(OptionVec::get(), Some(vec![1])); - }); - } - - #[test] - fn append_or_put_works() { - TestExternalities::default().execute_with(|| { - let _ = MapVec::append_or_insert(1, &[1, 2, 3][..]); - let _ = MapVec::append_or_insert(1, &[4, 5][..]); - assert_eq!(MapVec::get(1), vec![1, 2, 3, 4, 5]); - - let _ = JustVec::append_or_put(&[1, 2, 3][..]); - let _ = JustVec::append_or_put(&[4, 5][..]); - assert_eq!(JustVec::get(), vec![1, 2, 3, 4, 5]); - - let _ = OptionVec::append_or_put(&[1, 2, 3][..]); - let _ = OptionVec::append_or_put(&[4, 5][..]); - assert_eq!(OptionVec::get(), Some(vec![1, 2, 3, 4, 5])); - }); - } - - #[test] - fn len_works() { - TestExternalities::default().execute_with(|| { - JustVec::put(&vec![1, 2, 3, 4]); - OptionVec::put(&vec![1, 2, 3, 4, 5]); - MapVec::insert(1, &vec![1, 2, 3, 4, 5, 6]); - DoubleMapVec::insert(0, 1, &vec![1, 2]); - - assert_eq!(JustVec::decode_len().unwrap(), 4); - assert_eq!(OptionVec::decode_len().unwrap(), 5); - assert_eq!(MapVec::decode_len(1).unwrap(), 6); - assert_eq!(DoubleMapVec::decode_len(0, 1).unwrap(), 2); - }); - } - - #[test] - fn len_works_for_default() { - TestExternalities::default().execute_with(|| { - // vec - assert_eq!(JustVec::get(), vec![]); - assert_eq!(JustVec::decode_len(), Ok(0)); - - assert_eq!(JustVecWithDefault::get(), vec![6, 9]); - assert_eq!(JustVecWithDefault::decode_len(), Ok(2)); - - assert_eq!(OptionVec::get(), None); - assert_eq!(OptionVec::decode_len(), Ok(0)); - - // map - assert_eq!(MapVec::get(0), vec![]); - assert_eq!(MapVec::decode_len(0), Ok(0)); - - assert_eq!(MapVecWithDefault::get(0), vec![6, 9]); - assert_eq!(MapVecWithDefault::decode_len(0), Ok(2)); - - assert_eq!(OptionMapVec::get(0), None); - assert_eq!(OptionMapVec::decode_len(0), Ok(0)); - - // Double map - assert_eq!(DoubleMapVec::get(0, 0), vec![]); - assert_eq!(DoubleMapVec::decode_len(0, 1), Ok(0)); - - assert_eq!(DoubleMapVecWithDefault::get(0, 0), vec![6, 9]); - assert_eq!(DoubleMapVecWithDefault::decode_len(0, 1), Ok(2)); - - assert_eq!(OptionDoubleMapVec::get(0, 0), None); - assert_eq!(OptionDoubleMapVec::decode_len(0, 1), Ok(0)); - }); - } + use codec::{Decode, Encode}; + use sp_io::TestExternalities; + + pub trait Trait { + type Origin; + type BlockNumber; + } + + frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + #[derive(PartialEq, Eq, Clone, Encode, Decode)] + struct NoDef(u32); + + frame_support::decl_storage! { + trait Store for Module as Test { + NoDefault: Option; + + JustVec: Vec; + JustVecWithDefault: Vec = vec![6, 9]; + OptionVec: Option>; + + MapVec: map hasher(blake2_128_concat) u32 => Vec; + MapVecWithDefault: map hasher(blake2_128_concat) u32 => Vec = vec![6, 9]; + OptionMapVec: map hasher(blake2_128_concat) u32 => Option>; + + DoubleMapVec: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Vec; + DoubleMapVecWithDefault: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Vec = vec![6, 9]; + OptionDoubleMapVec: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => Option>; + } + } + + struct Test {} + + impl Trait for Test { + type Origin = u32; + type BlockNumber = u32; + } + + #[test] + fn default_for_option() { + TestExternalities::default().execute_with(|| { + assert_eq!(OptionVec::get(), None); + assert_eq!(JustVec::get(), vec![]); + }); + } + + #[test] + fn append_works() { + TestExternalities::default().execute_with(|| { + let _ = MapVec::append(1, [1, 2, 3].iter()); + let _ = MapVec::append(1, [4, 5].iter()); + assert_eq!(MapVec::get(1), vec![1, 2, 3, 4, 5]); + + let _ = JustVec::append([1, 2, 3].iter()); + let _ = JustVec::append([4, 5].iter()); + assert_eq!(JustVec::get(), vec![1, 2, 3, 4, 5]); + }); + } + + #[test] + fn append_works_for_default() { + TestExternalities::default().execute_with(|| { + assert_eq!(JustVecWithDefault::get(), vec![6, 9]); + let _ = JustVecWithDefault::append([1].iter()); + assert_eq!(JustVecWithDefault::get(), vec![6, 9, 1]); + + assert_eq!(MapVecWithDefault::get(0), vec![6, 9]); + let _ = MapVecWithDefault::append(0, [1].iter()); + assert_eq!(MapVecWithDefault::get(0), vec![6, 9, 1]); + + assert_eq!(OptionVec::get(), None); + let _ = OptionVec::append([1].iter()); + assert_eq!(OptionVec::get(), Some(vec![1])); + }); + } + + #[test] + fn append_or_put_works() { + TestExternalities::default().execute_with(|| { + let _ = MapVec::append_or_insert(1, &[1, 2, 3][..]); + let _ = MapVec::append_or_insert(1, &[4, 5][..]); + assert_eq!(MapVec::get(1), vec![1, 2, 3, 4, 5]); + + let _ = JustVec::append_or_put(&[1, 2, 3][..]); + let _ = JustVec::append_or_put(&[4, 5][..]); + assert_eq!(JustVec::get(), vec![1, 2, 3, 4, 5]); + + let _ = OptionVec::append_or_put(&[1, 2, 3][..]); + let _ = OptionVec::append_or_put(&[4, 5][..]); + assert_eq!(OptionVec::get(), Some(vec![1, 2, 3, 4, 5])); + }); + } + + #[test] + fn len_works() { + TestExternalities::default().execute_with(|| { + JustVec::put(&vec![1, 2, 3, 4]); + OptionVec::put(&vec![1, 2, 3, 4, 5]); + MapVec::insert(1, &vec![1, 2, 3, 4, 5, 6]); + DoubleMapVec::insert(0, 1, &vec![1, 2]); + + assert_eq!(JustVec::decode_len().unwrap(), 4); + assert_eq!(OptionVec::decode_len().unwrap(), 5); + assert_eq!(MapVec::decode_len(1).unwrap(), 6); + assert_eq!(DoubleMapVec::decode_len(0, 1).unwrap(), 2); + }); + } + + #[test] + fn len_works_for_default() { + TestExternalities::default().execute_with(|| { + // vec + assert_eq!(JustVec::get(), vec![]); + assert_eq!(JustVec::decode_len(), Ok(0)); + + assert_eq!(JustVecWithDefault::get(), vec![6, 9]); + assert_eq!(JustVecWithDefault::decode_len(), Ok(2)); + + assert_eq!(OptionVec::get(), None); + assert_eq!(OptionVec::decode_len(), Ok(0)); + + // map + assert_eq!(MapVec::get(0), vec![]); + assert_eq!(MapVec::decode_len(0), Ok(0)); + + assert_eq!(MapVecWithDefault::get(0), vec![6, 9]); + assert_eq!(MapVecWithDefault::decode_len(0), Ok(2)); + + assert_eq!(OptionMapVec::get(0), None); + assert_eq!(OptionMapVec::decode_len(0), Ok(0)); + + // Double map + assert_eq!(DoubleMapVec::get(0, 0), vec![]); + assert_eq!(DoubleMapVec::decode_len(0, 1), Ok(0)); + + assert_eq!(DoubleMapVecWithDefault::get(0, 0), vec![6, 9]); + assert_eq!(DoubleMapVecWithDefault::decode_len(0, 1), Ok(2)); + + assert_eq!(OptionDoubleMapVec::get(0, 0), None); + assert_eq!(OptionDoubleMapVec::decode_len(0, 1), Ok(0)); + }); + } } diff --git a/frame/support/test/tests/decl_storage_ui.rs b/frame/support/test/tests/decl_storage_ui.rs index 3aee5e9866..a507007a16 100644 --- a/frame/support/test/tests/decl_storage_ui.rs +++ b/frame/support/test/tests/decl_storage_ui.rs @@ -16,9 +16,9 @@ #[test] fn decl_storage_ui() { - // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); - let t = trybuild::TestCases::new(); - t.compile_fail("tests/decl_storage_ui/*.rs"); + let t = trybuild::TestCases::new(); + t.compile_fail("tests/decl_storage_ui/*.rs"); } diff --git a/frame/support/test/tests/final_keys.rs b/frame/support/test/tests/final_keys.rs index ae23c5a64c..733140a1b3 100644 --- a/frame/support/test/tests/final_keys.rs +++ b/frame/support/test/tests/final_keys.rs @@ -14,184 +14,211 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use frame_support::storage::unhashed; use codec::Encode; -use frame_support::{StorageDoubleMap, StorageMap, StorageValue, StoragePrefixedMap}; -use sp_io::{TestExternalities, hashing::{twox_64, twox_128, blake2_128}}; +use frame_support::storage::unhashed; +use frame_support::{StorageDoubleMap, StorageMap, StoragePrefixedMap, StorageValue}; +use sp_io::{ + hashing::{blake2_128, twox_128, twox_64}, + TestExternalities, +}; mod no_instance { - use codec::{Encode, Decode, EncodeLike}; + use codec::{Decode, Encode, EncodeLike}; - pub trait Trait { - type Origin; - type BlockNumber: Encode + Decode + EncodeLike + Default + Clone; - } + pub trait Trait { + type Origin; + type BlockNumber: Encode + Decode + EncodeLike + Default + Clone; + } - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } + frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } - frame_support::decl_storage!{ - trait Store for Module as FinalKeysNone { - pub Value config(value): u32; + frame_support::decl_storage! { + trait Store for Module as FinalKeysNone { + pub Value config(value): u32; - pub Map: map hasher(blake2_128_concat) u32 => u32; - pub Map2: map hasher(twox_64_concat) u32 => u32; + pub Map: map hasher(blake2_128_concat) u32 => u32; + pub Map2: map hasher(twox_64_concat) u32 => u32; - pub DoubleMap: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => u32; - pub DoubleMap2: double_map hasher(twox_64_concat) u32, hasher(twox_64_concat) u32 => u32; + pub DoubleMap: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => u32; + pub DoubleMap2: double_map hasher(twox_64_concat) u32, hasher(twox_64_concat) u32 => u32; - pub TestGenericValue get(fn test_generic_value) config(): Option; - pub TestGenericDoubleMap get(fn foo2) config(test_generic_double_map): - double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Option; - } - } + pub TestGenericValue get(fn test_generic_value) config(): Option; + pub TestGenericDoubleMap get(fn foo2) config(test_generic_double_map): + double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Option; + } + } } mod instance { - pub trait Trait: super::no_instance::Trait {} - - frame_support::decl_module! { - pub struct Module, I: Instantiable = DefaultInstance> - for enum Call where origin: T::Origin {} - } - - frame_support::decl_storage!{ - trait Store for Module, I: Instantiable = DefaultInstance> - as FinalKeysSome - { - pub Value config(value): u32; - - pub Map: map hasher(blake2_128_concat) u32 => u32; - pub Map2: map hasher(twox_64_concat) u32 => u32; - - pub DoubleMap: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => u32; - pub DoubleMap2: double_map hasher(twox_64_concat) u32, hasher(twox_64_concat) u32 => u32; - - pub TestGenericValue get(fn test_generic_value) config(): Option; - pub TestGenericDoubleMap get(fn foo2) config(test_generic_double_map): - double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Option; - } - add_extra_genesis { - // See `decl_storage` limitation. - config(phantom): core::marker::PhantomData; - } - } + pub trait Trait: super::no_instance::Trait {} + + frame_support::decl_module! { + pub struct Module, I: Instantiable = DefaultInstance> + for enum Call where origin: T::Origin {} + } + + frame_support::decl_storage! { + trait Store for Module, I: Instantiable = DefaultInstance> + as FinalKeysSome + { + pub Value config(value): u32; + + pub Map: map hasher(blake2_128_concat) u32 => u32; + pub Map2: map hasher(twox_64_concat) u32 => u32; + + pub DoubleMap: double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) u32 => u32; + pub DoubleMap2: double_map hasher(twox_64_concat) u32, hasher(twox_64_concat) u32 => u32; + + pub TestGenericValue get(fn test_generic_value) config(): Option; + pub TestGenericDoubleMap get(fn foo2) config(test_generic_double_map): + double_map hasher(blake2_128_concat) u32, hasher(blake2_128_concat) T::BlockNumber => Option; + } + add_extra_genesis { + // See `decl_storage` limitation. + config(phantom): core::marker::PhantomData; + } + } } fn twox_64_concat(d: &[u8]) -> Vec { - let mut v = twox_64(d).to_vec(); - v.extend_from_slice(d); - v + let mut v = twox_64(d).to_vec(); + v.extend_from_slice(d); + v } fn blake2_128_concat(d: &[u8]) -> Vec { - let mut v = blake2_128(d).to_vec(); - v.extend_from_slice(d); - v + let mut v = blake2_128(d).to_vec(); + v.extend_from_slice(d); + v } #[test] fn final_keys_no_instance() { - TestExternalities::default().execute_with(|| { - no_instance::Value::put(1); - let k = [twox_128(b"FinalKeysNone"), twox_128(b"Value")].concat(); - assert_eq!(unhashed::get::(&k), Some(1u32)); - - no_instance::Map::insert(1, 2); - let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"Map")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(&k[..32], &::final_prefix()); - - no_instance::Map2::insert(1, 2); - let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"Map2")].concat(); - k.extend(1u32.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(&k[..32], &::final_prefix()); - - no_instance::DoubleMap::insert(&1, &2, &3); - let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"DoubleMap")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - k.extend(2u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(3u32)); - assert_eq!(&k[..32], &::final_prefix()); - - no_instance::DoubleMap2::insert(&1, &2, &3); - let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"DoubleMap2")].concat(); - k.extend(1u32.using_encoded(twox_64_concat)); - k.extend(2u32.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u32)); - assert_eq!(&k[..32], &::final_prefix()); - }); + TestExternalities::default().execute_with(|| { + no_instance::Value::put(1); + let k = [twox_128(b"FinalKeysNone"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + no_instance::Map::insert(1, 2); + let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"Map")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!(&k[..32], &::final_prefix()); + + no_instance::Map2::insert(1, 2); + let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"Map2")].concat(); + k.extend(1u32.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!(&k[..32], &::final_prefix()); + + no_instance::DoubleMap::insert(&1, &2, &3); + let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"DoubleMap")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &::final_prefix()); + + no_instance::DoubleMap2::insert(&1, &2, &3); + let mut k = [twox_128(b"FinalKeysNone"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u32.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!(&k[..32], &::final_prefix()); + }); } #[test] fn final_keys_default_instance() { - TestExternalities::default().execute_with(|| { - >::put(1); - let k = [twox_128(b"FinalKeysSome"), twox_128(b"Value")].concat(); - assert_eq!(unhashed::get::(&k), Some(1u32)); - - >::insert(1, 2); - let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"Map")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(&k[..32], &>::final_prefix()); - - >::insert(1, 2); - let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"Map2")].concat(); - k.extend(1u32.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(&k[..32], &>::final_prefix()); - - >::insert(&1, &2, &3); - let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"DoubleMap")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - k.extend(2u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(3u32)); - assert_eq!(&k[..32], &>::final_prefix()); - - >::insert(&1, &2, &3); - let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"DoubleMap2")].concat(); - k.extend(1u32.using_encoded(twox_64_concat)); - k.extend(2u32.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u32)); - assert_eq!(&k[..32], &>::final_prefix()); - }); + TestExternalities::default().execute_with(|| { + >::put(1); + let k = [twox_128(b"FinalKeysSome"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + >::insert(1, 2); + let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"Map")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + + >::insert(1, 2); + let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"Map2")].concat(); + k.extend(1u32.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"DoubleMap")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"FinalKeysSome"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u32.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + }); } #[test] fn final_keys_instance_2() { - TestExternalities::default().execute_with(|| { - >::put(1); - let k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"Value")].concat(); - assert_eq!(unhashed::get::(&k), Some(1u32)); - - >::insert(1, 2); - let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"Map")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(&k[..32], &>::final_prefix()); - - >::insert(1, 2); - let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"Map2")].concat(); - k.extend(1u32.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(2u32)); - assert_eq!(&k[..32], &>::final_prefix()); - - >::insert(&1, &2, &3); - let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"DoubleMap")].concat(); - k.extend(1u32.using_encoded(blake2_128_concat)); - k.extend(2u32.using_encoded(blake2_128_concat)); - assert_eq!(unhashed::get::(&k), Some(3u32)); - assert_eq!(&k[..32], &>::final_prefix()); - - >::insert(&1, &2, &3); - let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"DoubleMap2")].concat(); - k.extend(1u32.using_encoded(twox_64_concat)); - k.extend(2u32.using_encoded(twox_64_concat)); - assert_eq!(unhashed::get::(&k), Some(3u32)); - assert_eq!(&k[..32], &>::final_prefix()); - }); + TestExternalities::default().execute_with(|| { + >::put(1); + let k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"Value")].concat(); + assert_eq!(unhashed::get::(&k), Some(1u32)); + + >::insert(1, 2); + let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"Map")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + + >::insert(1, 2); + let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"Map2")].concat(); + k.extend(1u32.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(2u32)); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"DoubleMap")].concat(); + k.extend(1u32.using_encoded(blake2_128_concat)); + k.extend(2u32.using_encoded(blake2_128_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + + >::insert(&1, &2, &3); + let mut k = [twox_128(b"Instance2FinalKeysSome"), twox_128(b"DoubleMap2")].concat(); + k.extend(1u32.using_encoded(twox_64_concat)); + k.extend(2u32.using_encoded(twox_64_concat)); + assert_eq!(unhashed::get::(&k), Some(3u32)); + assert_eq!( + &k[..32], + &>::final_prefix() + ); + }); } diff --git a/frame/support/test/tests/genesisconfig.rs b/frame/support/test/tests/genesisconfig.rs index bccffb7374..bda3dbda68 100644 --- a/frame/support/test/tests/genesisconfig.rs +++ b/frame/support/test/tests/genesisconfig.rs @@ -15,30 +15,30 @@ // along with Substrate. If not, see . pub trait Trait { - type BlockNumber: codec::Codec + codec::EncodeLike + Default; - type Origin; + type BlockNumber: codec::Codec + codec::EncodeLike + Default; + type Origin; } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } frame_support::decl_storage! { - trait Store for Module as Test { - pub AppendableDM config(t): double_map hasher(identity) u32, hasher(identity) T::BlockNumber => Vec; - } + trait Store for Module as Test { + pub AppendableDM config(t): double_map hasher(identity) u32, hasher(identity) T::BlockNumber => Vec; + } } struct Test; impl Trait for Test { - type BlockNumber = u32; - type Origin = (); + type BlockNumber = u32; + type Origin = (); } #[test] fn init_genesis_config() { - GenesisConfig:: { - t: Default::default(), - }; + GenesisConfig:: { + t: Default::default(), + }; } diff --git a/frame/support/test/tests/instance.rs b/frame/support/test/tests/instance.rs index 00b110ffb9..b5a3515e70 100644 --- a/frame/support/test/tests/instance.rs +++ b/frame/support/test/tests/instance.rs @@ -14,20 +14,25 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -#![recursion_limit="128"] +#![recursion_limit = "128"] -use sp_runtime::{generic, BuildStorage, traits::{BlakeTwo256, Block as _, Verify}}; use frame_support::{ - Parameter, traits::Get, parameter_types, - metadata::{ - DecodeDifferent, StorageMetadata, StorageEntryModifier, StorageEntryType, DefaultByteGetter, - StorageEntryMetadata, StorageHasher, - }, - weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, - StorageValue, StorageMap, StorageDoubleMap, + metadata::{ + DecodeDifferent, DefaultByteGetter, StorageEntryMetadata, StorageEntryModifier, + StorageEntryType, StorageHasher, StorageMetadata, + }, + parameter_types, + traits::Get, + weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, + Parameter, StorageDoubleMap, StorageMap, StorageValue, +}; +use sp_core::{sr25519, H256}; +use sp_inherents::{InherentData, InherentIdentifier, MakeFatalError, ProvideInherent}; +use sp_runtime::{ + generic, + traits::{BlakeTwo256, Block as _, Verify}, + BuildStorage, }; -use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier, MakeFatalError}; -use sp_core::{H256, sr25519}; mod system; @@ -38,193 +43,206 @@ pub trait Currency {} // * Custom InstantiableTrait // * Origin, Inherent, Event mod module1 { - use super::*; - - pub trait Trait: system::Trait where ::BlockNumber: From { - type Event: From> + Into<::Event>; - type Origin: From>; - type SomeParameter: Get; - type GenericType: Default + Clone + codec::Codec + codec::EncodeLike; - } - - frame_support::decl_module! { - pub struct Module, I: InstantiableThing> for enum Call where - origin: ::Origin, - T::BlockNumber: From - { - fn offchain_worker() {} - - fn deposit_event() = default; - - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn one(origin) { - system::ensure_root(origin)?; - Self::deposit_event(RawEvent::AnotherVariant(3)); - } - } - } - - frame_support::decl_storage! { - trait Store for Module, I: InstantiableThing> as Module1 where - T::BlockNumber: From + std::fmt::Display - { - pub Value config(value): T::GenericType; - pub Map: map hasher(identity) u32 => u64; - } - - add_extra_genesis { - config(test) : T::BlockNumber; - build(|config: &Self| { - println!("{}", config.test); - }); - } - } - - frame_support::decl_event! { - pub enum Event where Phantom = std::marker::PhantomData { - _Phantom(Phantom), - AnotherVariant(u32), - } - } - - #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] - pub enum Origin, I> where T::BlockNumber: From { - Members(u32), - _Phantom(std::marker::PhantomData<(T, I)>), - } - - pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - - impl, I: InstantiableThing> ProvideInherent for Module where - T::BlockNumber: From - { - type Call = Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_data: &InherentData) -> Option { - unimplemented!(); - } - - fn check_inherent(_: &Self::Call, _: &InherentData) -> std::result::Result<(), Self::Error> { - unimplemented!(); - } - } + use super::*; + + pub trait Trait: system::Trait + where + ::BlockNumber: From, + { + type Event: From> + Into<::Event>; + type Origin: From>; + type SomeParameter: Get; + type GenericType: Default + Clone + codec::Codec + codec::EncodeLike; + } + + frame_support::decl_module! { + pub struct Module, I: InstantiableThing> for enum Call where + origin: ::Origin, + T::BlockNumber: From + { + fn offchain_worker() {} + + fn deposit_event() = default; + + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn one(origin) { + system::ensure_root(origin)?; + Self::deposit_event(RawEvent::AnotherVariant(3)); + } + } + } + + frame_support::decl_storage! { + trait Store for Module, I: InstantiableThing> as Module1 where + T::BlockNumber: From + std::fmt::Display + { + pub Value config(value): T::GenericType; + pub Map: map hasher(identity) u32 => u64; + } + + add_extra_genesis { + config(test) : T::BlockNumber; + build(|config: &Self| { + println!("{}", config.test); + }); + } + } + + frame_support::decl_event! { + pub enum Event where Phantom = std::marker::PhantomData { + _Phantom(Phantom), + AnotherVariant(u32), + } + } + + #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] + pub enum Origin, I> + where + T::BlockNumber: From, + { + Members(u32), + _Phantom(std::marker::PhantomData<(T, I)>), + } + + pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; + + impl, I: InstantiableThing> ProvideInherent for Module + where + T::BlockNumber: From, + { + type Call = Call; + type Error = MakeFatalError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + unimplemented!(); + } + + fn check_inherent( + _: &Self::Call, + _: &InherentData, + ) -> std::result::Result<(), Self::Error> { + unimplemented!(); + } + } } // Test for: // * default instance // * use of no_genesis_config_phantom_data mod module2 { - use super::*; - - pub trait Trait: system::Trait { - type Amount: Parameter + Default; - type Event: From> + Into<::Event>; - type Origin: From>; - } - - impl, I: Instance> Currency for Module {} - - frame_support::decl_module! { - pub struct Module, I: Instance=DefaultInstance> for enum Call where - origin: ::Origin - { - fn deposit_event() = default; - } - } - - frame_support::decl_storage! { - trait Store for Module, I: Instance=DefaultInstance> as Module2 { - pub Value config(value): T::Amount; - pub Map config(map): map hasher(identity) u64 => u64; - pub DoubleMap config(double_map): double_map hasher(identity) u64, hasher(identity) u64 => u64; - } - } - - frame_support::decl_event! { - pub enum Event where Amount = >::Amount { - Variant(Amount), - } - } - - #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] - pub enum Origin, I=DefaultInstance> { - Members(u32), - _Phantom(std::marker::PhantomData<(T, I)>), - } - - pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; - - impl, I: Instance> ProvideInherent for Module { - type Call = Call; - type Error = MakeFatalError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(_data: &InherentData) -> Option { - unimplemented!(); - } - - fn check_inherent(_call: &Self::Call, _data: &InherentData) -> std::result::Result<(), Self::Error> { - unimplemented!(); - } - } + use super::*; + + pub trait Trait: system::Trait { + type Amount: Parameter + Default; + type Event: From> + Into<::Event>; + type Origin: From>; + } + + impl, I: Instance> Currency for Module {} + + frame_support::decl_module! { + pub struct Module, I: Instance=DefaultInstance> for enum Call where + origin: ::Origin + { + fn deposit_event() = default; + } + } + + frame_support::decl_storage! { + trait Store for Module, I: Instance=DefaultInstance> as Module2 { + pub Value config(value): T::Amount; + pub Map config(map): map hasher(identity) u64 => u64; + pub DoubleMap config(double_map): double_map hasher(identity) u64, hasher(identity) u64 => u64; + } + } + + frame_support::decl_event! { + pub enum Event where Amount = >::Amount { + Variant(Amount), + } + } + + #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] + pub enum Origin, I = DefaultInstance> { + Members(u32), + _Phantom(std::marker::PhantomData<(T, I)>), + } + + pub const INHERENT_IDENTIFIER: InherentIdentifier = *b"12345678"; + + impl, I: Instance> ProvideInherent for Module { + type Call = Call; + type Error = MakeFatalError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(_data: &InherentData) -> Option { + unimplemented!(); + } + + fn check_inherent( + _call: &Self::Call, + _data: &InherentData, + ) -> std::result::Result<(), Self::Error> { + unimplemented!(); + } + } } // Test for: // * Depends on multiple instances of a module with instances mod module3 { - use super::*; + use super::*; - pub trait Trait: module2::Trait + module2::Trait + system::Trait { - type Currency: Currency; - type Currency2: Currency; - } + pub trait Trait: module2::Trait + module2::Trait + system::Trait { + type Currency: Currency; + type Currency2: Currency; + } - frame_support::decl_module! { - pub struct Module for enum Call where origin: ::Origin {} - } + frame_support::decl_module! { + pub struct Module for enum Call where origin: ::Origin {} + } } parameter_types! { - pub const SomeValue: u32 = 100; + pub const SomeValue: u32 = 100; } impl module1::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type SomeParameter = SomeValue; - type GenericType = u32; + type Event = Event; + type Origin = Origin; + type SomeParameter = SomeValue; + type GenericType = u32; } impl module1::Trait for Runtime { - type Event = Event; - type Origin = Origin; - type SomeParameter = SomeValue; - type GenericType = u32; + type Event = Event; + type Origin = Origin; + type SomeParameter = SomeValue; + type GenericType = u32; } impl module2::Trait for Runtime { - type Amount = u16; - type Event = Event; - type Origin = Origin; + type Amount = u16; + type Event = Event; + type Origin = Origin; } impl module2::Trait for Runtime { - type Amount = u32; - type Event = Event; - type Origin = Origin; + type Amount = u32; + type Event = Event; + type Origin = Origin; } impl module2::Trait for Runtime { - type Amount = u32; - type Event = Event; - type Origin = Origin; + type Amount = u32; + type Event = Event; + type Origin = Origin; } impl module2::Trait for Runtime { - type Amount = u64; - type Event = Event; - type Origin = Origin; + type Amount = u64; + type Event = Event; + type Origin = Origin; } impl module3::Trait for Runtime { - type Currency = Module2_2; - type Currency2 = Module2_3; + type Currency = Module2_2; + type Currency2 = Module2_3; } pub type Signature = sr25519::Signature; @@ -233,12 +251,12 @@ pub type BlockNumber = u64; pub type Index = u64; impl system::Trait for Runtime { - type Hash = H256; - type Origin = Origin; - type BlockNumber = BlockNumber; - type AccountId = AccountId; - type Event = Event; - type ModuleToIndex = (); + type Hash = H256; + type Origin = Origin; + type BlockNumber = BlockNumber; + type AccountId = AccountId; + type Event = Event; + type ModuleToIndex = (); } frame_support::construct_runtime!( @@ -273,161 +291,146 @@ pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; fn new_test_ext() -> sp_io::TestExternalities { - GenesisConfig{ - module1_Instance1: Some(module1::GenesisConfig { - value: 3, - test: 2, - }), - module1_Instance2: Some(module1::GenesisConfig { - value: 4, - test: 5, - }), - module2: Some(module2::GenesisConfig { - value: 4, - map: vec![(0, 0)], - double_map: vec![(0, 0, 0)], - }), - module2_Instance1: Some(module2::GenesisConfig { - value: 4, - map: vec![(0, 0)], - double_map: vec![(0, 0, 0)], - }), - module2_Instance2: None, - module2_Instance3: None, - }.build_storage().unwrap().into() + GenesisConfig { + module1_Instance1: Some(module1::GenesisConfig { value: 3, test: 2 }), + module1_Instance2: Some(module1::GenesisConfig { value: 4, test: 5 }), + module2: Some(module2::GenesisConfig { + value: 4, + map: vec![(0, 0)], + double_map: vec![(0, 0, 0)], + }), + module2_Instance1: Some(module2::GenesisConfig { + value: 4, + map: vec![(0, 0)], + double_map: vec![(0, 0, 0)], + }), + module2_Instance2: None, + module2_Instance3: None, + } + .build_storage() + .unwrap() + .into() } #[test] fn storage_instance_independence() { - let mut storage = sp_core::storage::Storage { - top: std::collections::BTreeMap::new(), - children: std::collections::HashMap::new() - }; - sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { - module2::Value::::put(0); - module2::Value::::put(0); - module2::Value::::put(0); - module2::Value::::put(0); - module2::Map::::insert(0, 0); - module2::Map::::insert(0, 0); - module2::Map::::insert(0, 0); - module2::Map::::insert(0, 0); - module2::DoubleMap::::insert(&0, &0, &0); - module2::DoubleMap::::insert(&0, &0, &0); - module2::DoubleMap::::insert(&0, &0, &0); - module2::DoubleMap::::insert(&0, &0, &0); - }); - // 12 storage values. - assert_eq!(storage.top.len(), 12); + let mut storage = sp_core::storage::Storage { + top: std::collections::BTreeMap::new(), + children: std::collections::HashMap::new(), + }; + sp_state_machine::BasicExternalities::execute_with_storage(&mut storage, || { + module2::Value::::put(0); + module2::Value::::put(0); + module2::Value::::put(0); + module2::Value::::put(0); + module2::Map::::insert(0, 0); + module2::Map::::insert(0, 0); + module2::Map::::insert(0, 0); + module2::Map::::insert(0, 0); + module2::DoubleMap::::insert(&0, &0, &0); + module2::DoubleMap::::insert(&0, &0, &0); + module2::DoubleMap::::insert(&0, &0, &0); + module2::DoubleMap::::insert(&0, &0, &0); + }); + // 12 storage values. + assert_eq!(storage.top.len(), 12); } #[test] fn storage_with_instance_basic_operation() { - new_test_ext().execute_with(|| { - type Value = module2::Value; - type Map = module2::Map; - type DoubleMap = module2::DoubleMap; - - assert_eq!(Value::exists(), true); - assert_eq!(Value::get(), 4); - Value::put(1); - assert_eq!(Value::get(), 1); - assert_eq!(Value::take(), 1); - assert_eq!(Value::get(), 0); - Value::mutate(|a| *a=2); - assert_eq!(Value::get(), 2); - Value::kill(); - assert_eq!(Value::exists(), false); - assert_eq!(Value::get(), 0); - - let key = 1; - assert_eq!(Map::contains_key(0), true); - assert_eq!(Map::contains_key(key), false); - Map::insert(key, 1); - assert_eq!(Map::get(key), 1); - assert_eq!(Map::take(key), 1); - assert_eq!(Map::get(key), 0); - Map::mutate(key, |a| *a=2); - assert_eq!(Map::get(key), 2); - Map::remove(key); - assert_eq!(Map::contains_key(key), false); - assert_eq!(Map::get(key), 0); - - let key1 = 1; - let key2 = 1; - assert_eq!(DoubleMap::contains_key(&0, &0), true); - assert_eq!(DoubleMap::contains_key(&key1, &key2), false); - DoubleMap::insert(&key1, &key2, &1); - assert_eq!(DoubleMap::get(&key1, &key2), 1); - assert_eq!(DoubleMap::take(&key1, &key2), 1); - assert_eq!(DoubleMap::get(&key1, &key2), 0); - DoubleMap::mutate(&key1, &key2, |a| *a=2); - assert_eq!(DoubleMap::get(&key1, &key2), 2); - DoubleMap::remove(&key1, &key2); - assert_eq!(DoubleMap::get(&key1, &key2), 0); - }); + new_test_ext().execute_with(|| { + type Value = module2::Value; + type Map = module2::Map; + type DoubleMap = module2::DoubleMap; + + assert_eq!(Value::exists(), true); + assert_eq!(Value::get(), 4); + Value::put(1); + assert_eq!(Value::get(), 1); + assert_eq!(Value::take(), 1); + assert_eq!(Value::get(), 0); + Value::mutate(|a| *a = 2); + assert_eq!(Value::get(), 2); + Value::kill(); + assert_eq!(Value::exists(), false); + assert_eq!(Value::get(), 0); + + let key = 1; + assert_eq!(Map::contains_key(0), true); + assert_eq!(Map::contains_key(key), false); + Map::insert(key, 1); + assert_eq!(Map::get(key), 1); + assert_eq!(Map::take(key), 1); + assert_eq!(Map::get(key), 0); + Map::mutate(key, |a| *a = 2); + assert_eq!(Map::get(key), 2); + Map::remove(key); + assert_eq!(Map::contains_key(key), false); + assert_eq!(Map::get(key), 0); + + let key1 = 1; + let key2 = 1; + assert_eq!(DoubleMap::contains_key(&0, &0), true); + assert_eq!(DoubleMap::contains_key(&key1, &key2), false); + DoubleMap::insert(&key1, &key2, &1); + assert_eq!(DoubleMap::get(&key1, &key2), 1); + assert_eq!(DoubleMap::take(&key1, &key2), 1); + assert_eq!(DoubleMap::get(&key1, &key2), 0); + DoubleMap::mutate(&key1, &key2, |a| *a = 2); + assert_eq!(DoubleMap::get(&key1, &key2), 2); + DoubleMap::remove(&key1, &key2); + assert_eq!(DoubleMap::get(&key1, &key2), 0); + }); } const EXPECTED_METADATA: StorageMetadata = StorageMetadata { - prefix: DecodeDifferent::Encode("Instance2Module2"), - entries: DecodeDifferent::Encode( - &[ - StorageEntryMetadata { - name: DecodeDifferent::Encode("Value"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Amount")), - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructValue( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("Map"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::Map { - hasher: StorageHasher::Identity, - key: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode("u64"), - unused: false, - }, - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructMap( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - }, - StorageEntryMetadata { - name: DecodeDifferent::Encode("DoubleMap"), - modifier: StorageEntryModifier::Default, - ty: StorageEntryType::DoubleMap { - hasher: StorageHasher::Identity, - key2_hasher: StorageHasher::Identity, - key1: DecodeDifferent::Encode("u64"), - key2: DecodeDifferent::Encode("u64"), - value: DecodeDifferent::Encode("u64"), - }, - default: DecodeDifferent::Encode( - DefaultByteGetter( - &module2::__GetByteStructDoubleMap( - std::marker::PhantomData::<(Runtime, module2::Instance2)> - ) - ) - ), - documentation: DecodeDifferent::Encode(&[]), - } - ] - ) + prefix: DecodeDifferent::Encode("Instance2Module2"), + entries: DecodeDifferent::Encode(&[ + StorageEntryMetadata { + name: DecodeDifferent::Encode("Value"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Plain(DecodeDifferent::Encode("T::Amount")), + default: DecodeDifferent::Encode(DefaultByteGetter(&module2::__GetByteStructValue( + std::marker::PhantomData::<(Runtime, module2::Instance2)>, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("Map"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::Map { + hasher: StorageHasher::Identity, + key: DecodeDifferent::Encode("u64"), + value: DecodeDifferent::Encode("u64"), + unused: false, + }, + default: DecodeDifferent::Encode(DefaultByteGetter(&module2::__GetByteStructMap( + std::marker::PhantomData::<(Runtime, module2::Instance2)>, + ))), + documentation: DecodeDifferent::Encode(&[]), + }, + StorageEntryMetadata { + name: DecodeDifferent::Encode("DoubleMap"), + modifier: StorageEntryModifier::Default, + ty: StorageEntryType::DoubleMap { + hasher: StorageHasher::Identity, + key2_hasher: StorageHasher::Identity, + key1: DecodeDifferent::Encode("u64"), + key2: DecodeDifferent::Encode("u64"), + value: DecodeDifferent::Encode("u64"), + }, + default: DecodeDifferent::Encode(DefaultByteGetter( + &module2::__GetByteStructDoubleMap( + std::marker::PhantomData::<(Runtime, module2::Instance2)>, + ), + )), + documentation: DecodeDifferent::Encode(&[]), + }, + ]), }; #[test] fn test_instance_storage_metadata() { - let metadata = Module2_2::storage_metadata(); - pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); + let metadata = Module2_2::storage_metadata(); + pretty_assertions::assert_eq!(EXPECTED_METADATA, metadata); } diff --git a/frame/support/test/tests/issue2219.rs b/frame/support/test/tests/issue2219.rs index 8d8152a5ad..c5d0f321e7 100644 --- a/frame/support/test/tests/issue2219.rs +++ b/frame/support/test/tests/issue2219.rs @@ -14,138 +14,138 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . +use frame_support::codec::{Decode, Encode}; use frame_support::sp_runtime::generic; use frame_support::sp_runtime::traits::{BlakeTwo256, Block as _, Verify}; -use frame_support::codec::{Encode, Decode}; -use sp_core::{H256, sr25519}; -use serde::{Serialize, Deserialize}; +use serde::{Deserialize, Serialize}; +use sp_core::{sr25519, H256}; mod system; mod module { - use super::*; - - pub type Request = ( - ::AccountId, - Role, - ::BlockNumber, - ); - pub type Requests = Vec>; - - #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] - pub enum Role { - Storage, - } - - #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] - pub struct RoleParameters { - // minimum actors to maintain - if role is unstaking - // and remaining actors would be less that this value - prevent or punish for unstaking - pub min_actors: u32, - - // the maximum number of spots available to fill for a role - pub max_actors: u32, - - // payouts are made at this block interval - pub reward_period: T::BlockNumber, - - // minimum amount of time before being able to unstake - pub bonding_period: T::BlockNumber, - - // how long tokens remain locked for after unstaking - pub unbonding_period: T::BlockNumber, - - // minimum period required to be in service. unbonding before this time is highly penalized - pub min_service_period: T::BlockNumber, - - // "startup" time allowed for roles that need to sync their infrastructure - // with other providers before they are considered in service and punishable for - // not delivering required level of service. - pub startup_grace_period: T::BlockNumber, - } - - impl Default for RoleParameters { - fn default() -> Self { - Self { - max_actors: 10, - reward_period: T::BlockNumber::default(), - unbonding_period: T::BlockNumber::default(), - - // not currently used - min_actors: 5, - bonding_period: T::BlockNumber::default(), - min_service_period: T::BlockNumber::default(), - startup_grace_period: T::BlockNumber::default(), - } - } - } - - pub trait Trait: system::Trait {} - - frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} - } - - #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] - pub struct Data { - pub data: T::BlockNumber, - } - - impl Default for Data { - fn default() -> Self { - Self { - data: T::BlockNumber::default(), - } - } - } - - frame_support::decl_storage! { - trait Store for Module as Actors { - /// requirements to enter and maintain status in roles - pub Parameters get(fn parameters) build(|config: &GenesisConfig| { - if config.enable_storage_role { - let storage_params: RoleParameters = Default::default(); - vec![(Role::Storage, storage_params)] - } else { - vec![] - } - }): map hasher(blake2_128_concat) Role => Option>; - - /// the roles members can enter into - pub AvailableRoles get(fn available_roles) build(|config: &GenesisConfig| { - if config.enable_storage_role { - vec![(Role::Storage)] - } else { - vec![] - } - }): Vec; - - /// Actors list - pub ActorAccountIds get(fn actor_account_ids) : Vec; - - /// actor accounts associated with a role - pub AccountIdsByRole get(fn account_ids_by_role): - map hasher(blake2_128_concat) Role => Vec; - - /// tokens locked until given block number - pub Bondage get(fn bondage): - map hasher(blake2_128_concat) T::AccountId => T::BlockNumber; - - /// First step before enter a role is registering intent with a new account/key. - /// This is done by sending a role_entry_request() from the new account. - /// The member must then send a stake() transaction to approve the request and enter the desired role. - /// The account making the request will be bonded and must have - /// sufficient balance to cover the minimum stake for the role. - /// Bonding only occurs after successful entry into a role. - pub RoleEntryRequests get(fn role_entry_requests) : Requests; - - /// Entry request expires after this number of blocks - pub RequestLifeTime get(fn request_life_time) config(request_life_time) : u64 = 0; - } - add_extra_genesis { - config(enable_storage_role): bool; - } - } + use super::*; + + pub type Request = ( + ::AccountId, + Role, + ::BlockNumber, + ); + pub type Requests = Vec>; + + #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] + pub enum Role { + Storage, + } + + #[derive(Encode, Decode, Copy, Clone, Eq, PartialEq, Debug)] + pub struct RoleParameters { + // minimum actors to maintain - if role is unstaking + // and remaining actors would be less that this value - prevent or punish for unstaking + pub min_actors: u32, + + // the maximum number of spots available to fill for a role + pub max_actors: u32, + + // payouts are made at this block interval + pub reward_period: T::BlockNumber, + + // minimum amount of time before being able to unstake + pub bonding_period: T::BlockNumber, + + // how long tokens remain locked for after unstaking + pub unbonding_period: T::BlockNumber, + + // minimum period required to be in service. unbonding before this time is highly penalized + pub min_service_period: T::BlockNumber, + + // "startup" time allowed for roles that need to sync their infrastructure + // with other providers before they are considered in service and punishable for + // not delivering required level of service. + pub startup_grace_period: T::BlockNumber, + } + + impl Default for RoleParameters { + fn default() -> Self { + Self { + max_actors: 10, + reward_period: T::BlockNumber::default(), + unbonding_period: T::BlockNumber::default(), + + // not currently used + min_actors: 5, + bonding_period: T::BlockNumber::default(), + min_service_period: T::BlockNumber::default(), + startup_grace_period: T::BlockNumber::default(), + } + } + } + + pub trait Trait: system::Trait {} + + frame_support::decl_module! { + pub struct Module for enum Call where origin: T::Origin {} + } + + #[derive(Encode, Decode, Copy, Clone, Serialize, Deserialize)] + pub struct Data { + pub data: T::BlockNumber, + } + + impl Default for Data { + fn default() -> Self { + Self { + data: T::BlockNumber::default(), + } + } + } + + frame_support::decl_storage! { + trait Store for Module as Actors { + /// requirements to enter and maintain status in roles + pub Parameters get(fn parameters) build(|config: &GenesisConfig| { + if config.enable_storage_role { + let storage_params: RoleParameters = Default::default(); + vec![(Role::Storage, storage_params)] + } else { + vec![] + } + }): map hasher(blake2_128_concat) Role => Option>; + + /// the roles members can enter into + pub AvailableRoles get(fn available_roles) build(|config: &GenesisConfig| { + if config.enable_storage_role { + vec![(Role::Storage)] + } else { + vec![] + } + }): Vec; + + /// Actors list + pub ActorAccountIds get(fn actor_account_ids) : Vec; + + /// actor accounts associated with a role + pub AccountIdsByRole get(fn account_ids_by_role): + map hasher(blake2_128_concat) Role => Vec; + + /// tokens locked until given block number + pub Bondage get(fn bondage): + map hasher(blake2_128_concat) T::AccountId => T::BlockNumber; + + /// First step before enter a role is registering intent with a new account/key. + /// This is done by sending a role_entry_request() from the new account. + /// The member must then send a stake() transaction to approve the request and enter the desired role. + /// The account making the request will be bonded and must have + /// sufficient balance to cover the minimum stake for the role. + /// Bonding only occurs after successful entry into a role. + pub RoleEntryRequests get(fn role_entry_requests) : Requests; + + /// Entry request expires after this number of blocks + pub RequestLifeTime get(fn request_life_time) config(request_life_time) : u64 = 0; + } + add_extra_genesis { + config(enable_storage_role): bool; + } + } } pub type Signature = sr25519::Signature; @@ -157,12 +157,12 @@ pub type Block = generic::Block; pub type UncheckedExtrinsic = generic::UncheckedExtrinsic; impl system::Trait for Runtime { - type Hash = H256; - type Origin = Origin; - type BlockNumber = BlockNumber; - type AccountId = AccountId; - type Event = Event; - type ModuleToIndex = (); + type Hash = H256; + type Origin = Origin; + type BlockNumber = BlockNumber; + type AccountId = AccountId; + type Event = Event; + type ModuleToIndex = (); } impl module::Trait for Runtime {} @@ -180,10 +180,10 @@ frame_support::construct_runtime!( #[test] fn create_genesis_config() { - GenesisConfig { - module: Some(module::GenesisConfig { - request_life_time: 0, - enable_storage_role: true, - }) - }; + GenesisConfig { + module: Some(module::GenesisConfig { + request_life_time: 0, + enable_storage_role: true, + }), + }; } diff --git a/frame/support/test/tests/reserved_keyword.rs b/frame/support/test/tests/reserved_keyword.rs index d6cc4bba3b..c905d93f89 100644 --- a/frame/support/test/tests/reserved_keyword.rs +++ b/frame/support/test/tests/reserved_keyword.rs @@ -16,9 +16,9 @@ #[test] fn reserved_keyword() { - // As trybuild is using `cargo check`, we don't need the real WASM binaries. - std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); + // As trybuild is using `cargo check`, we don't need the real WASM binaries. + std::env::set_var("BUILD_DUMMY_WASM_BINARY", "1"); - let t = trybuild::TestCases::new(); - t.compile_fail("tests/reserved_keyword/*.rs"); + let t = trybuild::TestCases::new(); + t.compile_fail("tests/reserved_keyword/*.rs"); } diff --git a/frame/support/test/tests/system.rs b/frame/support/test/tests/system.rs index c7f60117bc..5dd1b1827a 100644 --- a/frame/support/test/tests/system.rs +++ b/frame/support/test/tests/system.rs @@ -14,67 +14,73 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use frame_support::codec::{Encode, Decode, EncodeLike}; +use frame_support::codec::{Decode, Encode, EncodeLike}; pub trait Trait: 'static + Eq + Clone { - type Origin: Into, Self::Origin>> - + From>; + type Origin: Into, Self::Origin>> + + From>; - type BlockNumber: Decode + Encode + EncodeLike + Clone + Default; - type Hash; - type AccountId: Encode + EncodeLike + Decode; - type Event: From>; - type ModuleToIndex: frame_support::traits::ModuleToIndex; + type BlockNumber: Decode + Encode + EncodeLike + Clone + Default; + type Hash; + type AccountId: Encode + EncodeLike + Decode; + type Event: From>; + type ModuleToIndex: frame_support::traits::ModuleToIndex; } frame_support::decl_module! { - pub struct Module for enum Call where origin: T::Origin {} + pub struct Module for enum Call where origin: T::Origin {} } impl Module { - pub fn deposit_event(_event: impl Into) {} + pub fn deposit_event(_event: impl Into) {} } frame_support::decl_event!( - pub enum Event where BlockNumber = ::BlockNumber { - ExtrinsicSuccess, - ExtrinsicFailed, - Ignore(BlockNumber), - } + pub enum Event + where + BlockNumber = ::BlockNumber, + { + ExtrinsicSuccess, + ExtrinsicFailed, + Ignore(BlockNumber), + } ); frame_support::decl_error! { - pub enum Error for Module { - /// Test error documentation - TestError, - /// Error documentation - /// with multiple lines - AnotherError - } + pub enum Error for Module { + /// Test error documentation + TestError, + /// Error documentation + /// with multiple lines + AnotherError + } } /// Origin for the system module. #[derive(PartialEq, Eq, Clone, sp_runtime::RuntimeDebug)] pub enum RawOrigin { - Root, - Signed(AccountId), - None, + Root, + Signed(AccountId), + None, } impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::None, + } + } } pub type Origin = RawOrigin<::AccountId>; #[allow(dead_code)] pub fn ensure_root(o: OuterOrigin) -> Result<(), &'static str> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { - o.into().map(|_| ()).map_err(|_| "bad origin: expected to be a root origin") + o.into() + .map(|_| ()) + .map_err(|_| "bad origin: expected to be a root origin") } diff --git a/frame/system/benches/bench.rs b/frame/system/benches/bench.rs index 36711d3177..043a5b839f 100644 --- a/frame/system/benches/bench.rs +++ b/frame/system/benches/bench.rs @@ -14,97 +14,109 @@ // You should have received a copy of the GNU General Public License // along with Substrate. If not, see . -use criterion::{Criterion, criterion_group, criterion_main, black_box}; +use criterion::{black_box, criterion_group, criterion_main, Criterion}; +use frame_support::{ + decl_event, decl_module, impl_outer_event, impl_outer_origin, weights::Weight, +}; use frame_system as system; -use frame_support::{decl_module, decl_event, impl_outer_origin, impl_outer_event, weights::Weight}; use sp_core::H256; -use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; +use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, +}; mod module { - use super::*; - - pub trait Trait: system::Trait { - type Event: From + Into<::Event>; - } - - decl_module! { - pub struct Module for enum Call where origin: T::Origin { - pub fn deposit_event() = default; - } - } - - decl_event!( - pub enum Event { - Complex(Vec, u32, u16, u128), - } - ); + use super::*; + + pub trait Trait: system::Trait { + type Event: From + Into<::Event>; + } + + decl_module! { + pub struct Module for enum Call where origin: T::Origin { + pub fn deposit_event() = default; + } + } + + decl_event!( + pub enum Event { + Complex(Vec, u32, u16, u128), + } + ); } -impl_outer_origin!{ - pub enum Origin for Runtime {} +impl_outer_origin! { + pub enum Origin for Runtime {} } impl_outer_event! { - pub enum Event for Runtime { - system, - module, - } + pub enum Event for Runtime { + system, + module, + } } frame_support::parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; - pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 4 * 1024 * 1024; + pub const MaximumBlockLength: u32 = 4 * 1024 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); } #[derive(Clone, Eq, PartialEq)] pub struct Runtime; impl system::Trait for Runtime { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = Event; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type MaximumBlockLength = MaximumBlockLength; - type AvailableBlockRatio = AvailableBlockRatio; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = Event; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type MaximumBlockLength = MaximumBlockLength; + type AvailableBlockRatio = AvailableBlockRatio; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); } impl module::Trait for Runtime { - type Event = Event; + type Event = Event; } fn new_test_ext() -> sp_io::TestExternalities { - system::GenesisConfig::default().build_storage::().unwrap().into() + system::GenesisConfig::default() + .build_storage::() + .unwrap() + .into() } fn deposit_events(n: usize) { - let mut t = new_test_ext(); - t.execute_with(|| { - for _ in 0..n { - module::Module::::deposit_event( - module::Event::Complex(vec![1, 2, 3], 2, 3, 899) - ); - } - }); + let mut t = new_test_ext(); + t.execute_with(|| { + for _ in 0..n { + module::Module::::deposit_event(module::Event::Complex( + vec![1, 2, 3], + 2, + 3, + 899, + )); + } + }); } fn sr_system_benchmark(c: &mut Criterion) { - c.bench_function("deposit 100 events", |b| { - b.iter(|| deposit_events(black_box(100))) - }); + c.bench_function("deposit 100 events", |b| { + b.iter(|| deposit_events(black_box(100))) + }); } criterion_group!(benches, sr_system_benchmark); diff --git a/frame/system/rpc/runtime-api/src/lib.rs b/frame/system/rpc/runtime-api/src/lib.rs index 3b05bd1624..8bb1721bb8 100644 --- a/frame/system/rpc/runtime-api/src/lib.rs +++ b/frame/system/rpc/runtime-api/src/lib.rs @@ -23,12 +23,12 @@ #![cfg_attr(not(feature = "std"), no_std)] sp_api::decl_runtime_apis! { - /// The API to query account nonce (aka transaction index). - pub trait AccountNonceApi where - AccountId: codec::Codec, - Index: codec::Codec, - { - /// Get current account nonce of given `AccountId`. - fn account_nonce(account: AccountId) -> Index; - } + /// The API to query account nonce (aka transaction index). + pub trait AccountNonceApi where + AccountId: codec::Codec, + Index: codec::Codec, + { + /// Get current account nonce of given `AccountId`. + fn account_nonce(account: AccountId) -> Index; + } } diff --git a/frame/system/src/lib.rs b/frame/system/src/lib.rs index 50f2b089f2..07a464f2d0 100644 --- a/frame/system/src/lib.rs +++ b/frame/system/src/lib.rs @@ -91,38 +91,42 @@ #[cfg(feature = "std")] use serde::Serialize; -use sp_std::prelude::*; +use sp_runtime::{ + generic::{self, Era}, + traits::{ + self, AtLeast32Bit, BadOrigin, Bounded, CheckEqual, DispatchInfoOf, Dispatchable, Hash, + Lookup, LookupError, MaybeDisplay, MaybeMallocSizeOf, MaybeSerialize, + MaybeSerializeDeserialize, Member, One, PostDispatchInfoOf, SaturatedConversion, + SignedExtension, SimpleBitOps, StaticLookup, Zero, + }, + transaction_validity::{ + InvalidTransaction, TransactionLongevity, TransactionPriority, TransactionValidity, + TransactionValidityError, ValidTransaction, + }, + DispatchError, DispatchOutcome, DispatchResult, Perbill, RuntimeDebug, +}; +use sp_std::convert::Infallible; +use sp_std::fmt::Debug; #[cfg(any(feature = "std", test))] use sp_std::map; -use sp_std::convert::Infallible; use sp_std::marker::PhantomData; -use sp_std::fmt::Debug; +use sp_std::prelude::*; use sp_version::RuntimeVersion; -use sp_runtime::{ - RuntimeDebug, Perbill, DispatchOutcome, DispatchError, DispatchResult, - generic::{self, Era}, - transaction_validity::{ - ValidTransaction, TransactionPriority, TransactionLongevity, TransactionValidityError, - InvalidTransaction, TransactionValidity, - }, - traits::{ - self, CheckEqual, AtLeast32Bit, Zero, SignedExtension, Lookup, LookupError, - SimpleBitOps, Hash, Member, MaybeDisplay, BadOrigin, SaturatedConversion, - MaybeSerialize, MaybeSerializeDeserialize, MaybeMallocSizeOf, StaticLookup, One, Bounded, - Dispatchable, DispatchInfoOf, PostDispatchInfoOf, - }, -}; -use sp_core::{ChangesTrieConfiguration, storage::well_known_keys}; +use codec::{Decode, Encode, EncodeLike, FullCodec}; use frame_support::{ - decl_module, decl_event, decl_storage, decl_error, storage, Parameter, ensure, debug, - traits::{ - Contains, Get, ModuleToIndex, OnNewAccount, OnKilledAccount, IsDeadAccount, Happened, - StoredMap, EnsureOrigin, - }, - weights::{Weight, MINIMUM_WEIGHT, RuntimeDbWeight, DispatchInfo, PostDispatchInfo, DispatchClass, SimpleDispatchInfo, FunctionOf} + debug, decl_error, decl_event, decl_module, decl_storage, ensure, storage, + traits::{ + Contains, EnsureOrigin, Get, Happened, IsDeadAccount, ModuleToIndex, OnKilledAccount, + OnNewAccount, StoredMap, + }, + weights::{ + DispatchClass, DispatchInfo, FunctionOf, PostDispatchInfo, RuntimeDbWeight, + SimpleDispatchInfo, Weight, MINIMUM_WEIGHT, + }, + Parameter, }; -use codec::{Encode, Decode, FullCodec, EncodeLike}; +use sp_core::{storage::well_known_keys, ChangesTrieConfiguration}; #[cfg(any(feature = "std", test))] use sp_io::TestExternalities; @@ -131,101 +135,127 @@ pub mod offchain; /// Compute the trie root of a list of extrinsics. pub fn extrinsics_root(extrinsics: &[E]) -> H::Output { - extrinsics_data_root::(extrinsics.iter().map(codec::Encode::encode).collect()) + extrinsics_data_root::(extrinsics.iter().map(codec::Encode::encode).collect()) } /// Compute the trie root of a list of extrinsics. pub fn extrinsics_data_root(xts: Vec>) -> H::Output { - H::ordered_trie_root(xts) + H::ordered_trie_root(xts) } pub trait Trait: 'static + Eq + Clone { - /// The aggregated `Origin` type used by dispatchable calls. - type Origin: - Into, Self::Origin>> - + From> - + Clone; - - /// The aggregated `Call` type. - type Call: Dispatchable + Debug; - - /// Account index (aka nonce) type. This stores the number of previous transactions associated - /// with a sender account. - type Index: - Parameter + Member + MaybeSerialize + Debug + Default + MaybeDisplay + AtLeast32Bit - + Copy; - - /// The block number type used by the runtime. - type BlockNumber: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + AtLeast32Bit - + Default + Bounded + Copy + sp_std::hash::Hash + sp_std::str::FromStr + MaybeMallocSizeOf; - - /// The output of the `Hashing` function. - type Hash: - Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + SimpleBitOps + Ord - + Default + Copy + CheckEqual + sp_std::hash::Hash + AsRef<[u8]> + AsMut<[u8]> + MaybeMallocSizeOf; - - /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). - type Hashing: Hash; - - /// The user account identifier type for the runtime. - type AccountId: Parameter + Member + MaybeSerializeDeserialize + Debug + MaybeDisplay + Ord - + Default; - - /// Converting trait to take a source type and convert to `AccountId`. - /// - /// Used to define the type and conversion mechanism for referencing accounts in transactions. - /// It's perfectly reasonable for this to be an identity conversion (with the source type being - /// `AccountId`), but other modules (e.g. Indices module) may provide more functional/efficient - /// alternatives. - type Lookup: StaticLookup; - - /// The block header. - type Header: Parameter + traits::Header< - Number = Self::BlockNumber, - Hash = Self::Hash, - >; - - /// The aggregated event type of the runtime. - type Event: Parameter + Member + From> + Debug; - - /// Maximum number of block number to block hash mappings to keep (oldest pruned first). - type BlockHashCount: Get; - - /// The maximum weight of a block. - type MaximumBlockWeight: Get; - - /// The weight of runtime database operations the runtime can invoke. - type DbWeight: Get; - - /// The maximum length of a block (in bytes). - type MaximumBlockLength: Get; - - /// The portion of the block that is available to normal transaction. The rest can only be used - /// by operational transactions. This can be applied to any resource limit managed by the system - /// module, including weight and length. - type AvailableBlockRatio: Get; - - /// Get the chain's current version. - type Version: Get; - - /// Convert a module to its index in the runtime. - /// - /// Expects the `ModuleToIndex` type that is being generated by `construct_runtime!` in the - /// runtime. For tests it is okay to use `()` as type (returns `0` for each input). - type ModuleToIndex: ModuleToIndex; - - /// Data to be associated with an account (other than nonce/transaction counter, which this - /// module does regardless). - type AccountData: Member + FullCodec + Clone + Default; - - /// Handler for when a new account has just been created. - type OnNewAccount: OnNewAccount; - - /// A function that is invoked when an account has been determined to be dead. - /// - /// All resources should be cleaned up associated with the given account. - type OnKilledAccount: OnKilledAccount; + /// The aggregated `Origin` type used by dispatchable calls. + type Origin: Into, Self::Origin>> + + From> + + Clone; + + /// The aggregated `Call` type. + type Call: Dispatchable + Debug; + + /// Account index (aka nonce) type. This stores the number of previous transactions associated + /// with a sender account. + type Index: Parameter + + Member + + MaybeSerialize + + Debug + + Default + + MaybeDisplay + + AtLeast32Bit + + Copy; + + /// The block number type used by the runtime. + type BlockNumber: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + AtLeast32Bit + + Default + + Bounded + + Copy + + sp_std::hash::Hash + + sp_std::str::FromStr + + MaybeMallocSizeOf; + + /// The output of the `Hashing` function. + type Hash: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + SimpleBitOps + + Ord + + Default + + Copy + + CheckEqual + + sp_std::hash::Hash + + AsRef<[u8]> + + AsMut<[u8]> + + MaybeMallocSizeOf; + + /// The hashing system (algorithm) being used in the runtime (e.g. Blake2). + type Hashing: Hash; + + /// The user account identifier type for the runtime. + type AccountId: Parameter + + Member + + MaybeSerializeDeserialize + + Debug + + MaybeDisplay + + Ord + + Default; + + /// Converting trait to take a source type and convert to `AccountId`. + /// + /// Used to define the type and conversion mechanism for referencing accounts in transactions. + /// It's perfectly reasonable for this to be an identity conversion (with the source type being + /// `AccountId`), but other modules (e.g. Indices module) may provide more functional/efficient + /// alternatives. + type Lookup: StaticLookup; + + /// The block header. + type Header: Parameter + traits::Header; + + /// The aggregated event type of the runtime. + type Event: Parameter + Member + From> + Debug; + + /// Maximum number of block number to block hash mappings to keep (oldest pruned first). + type BlockHashCount: Get; + + /// The maximum weight of a block. + type MaximumBlockWeight: Get; + + /// The weight of runtime database operations the runtime can invoke. + type DbWeight: Get; + + /// The maximum length of a block (in bytes). + type MaximumBlockLength: Get; + + /// The portion of the block that is available to normal transaction. The rest can only be used + /// by operational transactions. This can be applied to any resource limit managed by the system + /// module, including weight and length. + type AvailableBlockRatio: Get; + + /// Get the chain's current version. + type Version: Get; + + /// Convert a module to its index in the runtime. + /// + /// Expects the `ModuleToIndex` type that is being generated by `construct_runtime!` in the + /// runtime. For tests it is okay to use `()` as type (returns `0` for each input). + type ModuleToIndex: ModuleToIndex; + + /// Data to be associated with an account (other than nonce/transaction counter, which this + /// module does regardless). + type AccountData: Member + FullCodec + Clone + Default; + + /// Handler for when a new account has just been created. + type OnNewAccount: OnNewAccount; + + /// A function that is invoked when an account has been determined to be dead. + /// + /// All resources should be cleaned up associated with the given account. + type OnKilledAccount: OnKilledAccount; } pub type DigestOf = generic::Digest<::Hash>; @@ -238,52 +268,52 @@ pub type KeyValue = (Vec, Vec); #[derive(Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] pub enum Phase { - /// Applying an extrinsic. - ApplyExtrinsic(u32), - /// Finalizing the block. - Finalization, - /// Initializing the block. - Initialization, + /// Applying an extrinsic. + ApplyExtrinsic(u32), + /// Finalizing the block. + Finalization, + /// Initializing the block. + Initialization, } impl Default for Phase { - fn default() -> Self { - Self::Initialization - } + fn default() -> Self { + Self::Initialization + } } /// Record of an event happening. #[derive(Encode, Decode, RuntimeDebug)] #[cfg_attr(feature = "std", derive(Serialize, PartialEq, Eq, Clone))] pub struct EventRecord { - /// The phase of the block it happened in. - pub phase: Phase, - /// The event itself. - pub event: E, - /// The list of the topics this event has. - pub topics: Vec, + /// The phase of the block it happened in. + pub phase: Phase, + /// The event itself. + pub event: E, + /// The list of the topics this event has. + pub topics: Vec, } /// Origin for the System module. #[derive(PartialEq, Eq, Clone, RuntimeDebug)] pub enum RawOrigin { - /// The system itself ordained this dispatch to happen: this is the highest privilege level. - Root, - /// It is signed by some public key and we provide the `AccountId`. - Signed(AccountId), - /// It is signed by nobody, can be either: - /// * included and agreed upon by the validators anyway, - /// * or unsigned transaction validated by a module. - None, + /// The system itself ordained this dispatch to happen: this is the highest privilege level. + Root, + /// It is signed by some public key and we provide the `AccountId`. + Signed(AccountId), + /// It is signed by nobody, can be either: + /// * included and agreed upon by the validators anyway, + /// * or unsigned transaction validated by a module. + None, } impl From> for RawOrigin { - fn from(s: Option) -> RawOrigin { - match s { - Some(who) => RawOrigin::Signed(who), - None => RawOrigin::None, - } - } + fn from(s: Option) -> RawOrigin { + match s { + Some(who) => RawOrigin::Signed(who), + None => RawOrigin::None, + } + } } /// Exposed trait-generic origin type. @@ -293,9 +323,9 @@ pub type Origin = RawOrigin<::AccountId>; // only used to build genesis config. #[cfg(feature = "std")] fn hash69 + Default>() -> T { - let mut h = T::default(); - h.as_mut().iter_mut().for_each(|byte| *byte = 69); - h + let mut h = T::default(); + h.as_mut().iter_mut().for_each(|byte| *byte = 69); + h } /// This type alias represents an index of an event. @@ -310,14 +340,14 @@ pub type RefCount = u8; /// Information of an account. #[derive(Clone, Eq, PartialEq, Default, RuntimeDebug, Encode, Decode)] pub struct AccountInfo { - /// The number of transactions this account has sent. - pub nonce: Index, - /// The number of other modules that currently depend on this account's existence. The account - /// cannot be reaped until this is zero. - pub refcount: RefCount, - /// The additional data that belongs to this account. Used to store the balance(s) in a lot of - /// chains. - pub data: AccountData, + /// The number of transactions this account has sent. + pub nonce: Index, + /// The number of other modules that currently depend on this account's existence. The account + /// cannot be reaped until this is zero. + pub refcount: RefCount, + /// The additional data that belongs to this account. Used to store the balance(s) in a lot of + /// chains. + pub data: AccountData, } /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade @@ -325,1035 +355,1062 @@ pub struct AccountInfo { #[derive(sp_runtime::RuntimeDebug, Encode, Decode)] #[cfg_attr(feature = "std", derive(PartialEq))] pub struct LastRuntimeUpgradeInfo { - pub spec_version: codec::Compact, - pub spec_name: sp_runtime::RuntimeString, + pub spec_version: codec::Compact, + pub spec_name: sp_runtime::RuntimeString, } impl LastRuntimeUpgradeInfo { - /// Returns if the runtime was upgraded in comparison of `self` and `current`. - /// - /// Checks if either the `spec_version` increased or the `spec_name` changed. - pub fn was_upgraded(&self, current: &sp_version::RuntimeVersion) -> bool { - current.spec_version > self.spec_version.0 || current.spec_name != self.spec_name - } + /// Returns if the runtime was upgraded in comparison of `self` and `current`. + /// + /// Checks if either the `spec_version` increased or the `spec_name` changed. + pub fn was_upgraded(&self, current: &sp_version::RuntimeVersion) -> bool { + current.spec_version > self.spec_version.0 || current.spec_name != self.spec_name + } } impl From for LastRuntimeUpgradeInfo { - fn from(version: sp_version::RuntimeVersion) -> Self { - Self { - spec_version: version.spec_version.into(), - spec_name: version.spec_name, - } - } + fn from(version: sp_version::RuntimeVersion) -> Self { + Self { + spec_version: version.spec_version.into(), + spec_name: version.spec_name, + } + } } decl_storage! { - trait Store for Module as System { - /// The full account information for a particular account ID. - pub Account get(fn account): - map hasher(blake2_128_concat) T::AccountId => AccountInfo; - - /// Total extrinsics count for the current block. - ExtrinsicCount: Option; - - /// Total weight for all extrinsics put together, for the current block. - AllExtrinsicsWeight: Option; - - /// Total length (in bytes) for all extrinsics put together, for the current block. - AllExtrinsicsLen: Option; - - /// Map of block numbers to block hashes. - pub BlockHash get(fn block_hash) build(|_| vec![(T::BlockNumber::zero(), hash69())]): - map hasher(twox_64_concat) T::BlockNumber => T::Hash; - - /// Extrinsics data for the current block (maps an extrinsic's index to its data). - ExtrinsicData get(fn extrinsic_data): map hasher(twox_64_concat) u32 => Vec; - - /// The current block number being processed. Set by `execute_block`. - Number get(fn block_number): T::BlockNumber; - - /// Hash of the previous block. - ParentHash get(fn parent_hash) build(|_| hash69()): T::Hash; - - /// Extrinsics root of the current block, also part of the block header. - ExtrinsicsRoot get(fn extrinsics_root): T::Hash; - - /// Digest of the current block, also part of the block header. - Digest get(fn digest): DigestOf; - - /// Events deposited for the current block. - Events get(fn events): Vec>; - - /// The number of events in the `Events` list. - EventCount get(fn event_count): EventIndex; - - // TODO: https://github.com/paritytech/substrate/issues/2553 - // Possibly, we can improve it by using something like: - // `Option<(BlockNumber, Vec)>`, however in this case we won't be able to use - // `EventTopics::append`. - - /// Mapping between a topic (represented by T::Hash) and a vector of indexes - /// of events in the `>` list. - /// - /// All topic vectors have deterministic storage locations depending on the topic. This - /// allows light-clients to leverage the changes trie storage tracking mechanism and - /// in case of changes fetch the list of events of interest. - /// - /// The value has the type `(T::BlockNumber, EventIndex)` because if we used only just - /// the `EventIndex` then in case if the topic has the same contents on the next block - /// no notification will be triggered thus the event might be lost. - EventTopics get(fn event_topics): map hasher(blake2_128_concat) T::Hash => Vec<(T::BlockNumber, EventIndex)>; - - /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. - pub LastRuntimeUpgrade build(|_| Some(LastRuntimeUpgradeInfo::from(T::Version::get()))): Option; - - /// The execution phase of the block. - ExecutionPhase: Option; - } - add_extra_genesis { - config(changes_trie_config): Option; - #[serde(with = "sp_core::bytes")] - config(code): Vec; - - build(|config: &GenesisConfig| { - use codec::Encode; - - sp_io::storage::set(well_known_keys::CODE, &config.code); - sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); - - if let Some(ref changes_trie_config) = config.changes_trie_config { - sp_io::storage::set( - well_known_keys::CHANGES_TRIE_CONFIG, - &changes_trie_config.encode(), - ); - } - }); - } + trait Store for Module as System { + /// The full account information for a particular account ID. + pub Account get(fn account): + map hasher(blake2_128_concat) T::AccountId => AccountInfo; + + /// Total extrinsics count for the current block. + ExtrinsicCount: Option; + + /// Total weight for all extrinsics put together, for the current block. + AllExtrinsicsWeight: Option; + + /// Total length (in bytes) for all extrinsics put together, for the current block. + AllExtrinsicsLen: Option; + + /// Map of block numbers to block hashes. + pub BlockHash get(fn block_hash) build(|_| vec![(T::BlockNumber::zero(), hash69())]): + map hasher(twox_64_concat) T::BlockNumber => T::Hash; + + /// Extrinsics data for the current block (maps an extrinsic's index to its data). + ExtrinsicData get(fn extrinsic_data): map hasher(twox_64_concat) u32 => Vec; + + /// The current block number being processed. Set by `execute_block`. + Number get(fn block_number): T::BlockNumber; + + /// Hash of the previous block. + ParentHash get(fn parent_hash) build(|_| hash69()): T::Hash; + + /// Extrinsics root of the current block, also part of the block header. + ExtrinsicsRoot get(fn extrinsics_root): T::Hash; + + /// Digest of the current block, also part of the block header. + Digest get(fn digest): DigestOf; + + /// Events deposited for the current block. + Events get(fn events): Vec>; + + /// The number of events in the `Events` list. + EventCount get(fn event_count): EventIndex; + + // TODO: https://github.com/paritytech/substrate/issues/2553 + // Possibly, we can improve it by using something like: + // `Option<(BlockNumber, Vec)>`, however in this case we won't be able to use + // `EventTopics::append`. + + /// Mapping between a topic (represented by T::Hash) and a vector of indexes + /// of events in the `>` list. + /// + /// All topic vectors have deterministic storage locations depending on the topic. This + /// allows light-clients to leverage the changes trie storage tracking mechanism and + /// in case of changes fetch the list of events of interest. + /// + /// The value has the type `(T::BlockNumber, EventIndex)` because if we used only just + /// the `EventIndex` then in case if the topic has the same contents on the next block + /// no notification will be triggered thus the event might be lost. + EventTopics get(fn event_topics): map hasher(blake2_128_concat) T::Hash => Vec<(T::BlockNumber, EventIndex)>; + + /// Stores the `spec_version` and `spec_name` of when the last runtime upgrade happened. + pub LastRuntimeUpgrade build(|_| Some(LastRuntimeUpgradeInfo::from(T::Version::get()))): Option; + + /// The execution phase of the block. + ExecutionPhase: Option; + } + add_extra_genesis { + config(changes_trie_config): Option; + #[serde(with = "sp_core::bytes")] + config(code): Vec; + + build(|config: &GenesisConfig| { + use codec::Encode; + + sp_io::storage::set(well_known_keys::CODE, &config.code); + sp_io::storage::set(well_known_keys::EXTRINSIC_INDEX, &0u32.encode()); + + if let Some(ref changes_trie_config) = config.changes_trie_config { + sp_io::storage::set( + well_known_keys::CHANGES_TRIE_CONFIG, + &changes_trie_config.encode(), + ); + } + }); + } } decl_event!( - /// Event for the System module. - pub enum Event where AccountId = ::AccountId { - /// An extrinsic completed successfully. - ExtrinsicSuccess(DispatchInfo), - /// An extrinsic failed. - ExtrinsicFailed(DispatchError, DispatchInfo), - /// `:code` was updated. - CodeUpdated, - /// A new account was created. - NewAccount(AccountId), - /// An account was reaped. - KilledAccount(AccountId), - } + /// Event for the System module. + pub enum Event + where + AccountId = ::AccountId, + { + /// An extrinsic completed successfully. + ExtrinsicSuccess(DispatchInfo), + /// An extrinsic failed. + ExtrinsicFailed(DispatchError, DispatchInfo), + /// `:code` was updated. + CodeUpdated, + /// A new account was created. + NewAccount(AccountId), + /// An account was reaped. + KilledAccount(AccountId), + } ); decl_error! { - /// Error for the System module - pub enum Error for Module { - /// The name of specification does not match between the current runtime - /// and the new runtime. - InvalidSpecName, - /// The specification version is not allowed to decrease between the current runtime - /// and the new runtime. - SpecVersionNeedsToIncrease, - /// Failed to extract the runtime version from the new runtime. - /// - /// Either calling `Core_version` or decoding `RuntimeVersion` failed. - FailedToExtractRuntimeVersion, - - /// Suicide called when the account has non-default composite data. - NonDefaultComposite, - /// There is a non-zero reference count preventing the account from being purged. - NonZeroRefCount, - } + /// Error for the System module + pub enum Error for Module { + /// The name of specification does not match between the current runtime + /// and the new runtime. + InvalidSpecName, + /// The specification version is not allowed to decrease between the current runtime + /// and the new runtime. + SpecVersionNeedsToIncrease, + /// Failed to extract the runtime version from the new runtime. + /// + /// Either calling `Core_version` or decoding `RuntimeVersion` failed. + FailedToExtractRuntimeVersion, + + /// Suicide called when the account has non-default composite data. + NonDefaultComposite, + /// There is a non-zero reference count preventing the account from being purged. + NonZeroRefCount, + } } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - type Error = Error; - - /// A dispatch that will fill the block weight up to the given ratio. - // TODO: This should only be available for testing, rather than in general usage, but - // that's not possible at present (since it's within the decl_module macro). - #[weight = FunctionOf( - |(ratio,): (&Perbill,)| *ratio * T::MaximumBlockWeight::get(), - DispatchClass::Operational, - true, - )] - fn fill_block(origin, _ratio: Perbill) { - ensure_root(origin)?; - } - - /// Make some on-chain remark. - #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] - fn remark(origin, _remark: Vec) { - ensure_signed(origin)?; - } - - /// Set the number of pages in the WebAssembly environment's heap. - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn set_heap_pages(origin, pages: u64) { - ensure_root(origin)?; - storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); - } - - /// Set the new runtime code. - #[weight = SimpleDispatchInfo::FixedOperational(200_000_000)] - pub fn set_code(origin, code: Vec) { - Self::can_set_code(origin, &code)?; - - storage::unhashed::put_raw(well_known_keys::CODE, &code); - Self::deposit_event(RawEvent::CodeUpdated); - } - - /// Set the new runtime code without doing any checks of the given `code`. - #[weight = SimpleDispatchInfo::FixedOperational(200_000_000)] - pub fn set_code_without_checks(origin, code: Vec) { - ensure_root(origin)?; - storage::unhashed::put_raw(well_known_keys::CODE, &code); - Self::deposit_event(RawEvent::CodeUpdated); - } - - /// Set the new changes trie configuration. - #[weight = SimpleDispatchInfo::FixedOperational(20_000_000)] - pub fn set_changes_trie_config(origin, changes_trie_config: Option) { - ensure_root(origin)?; - match changes_trie_config.clone() { - Some(changes_trie_config) => storage::unhashed::put_raw( - well_known_keys::CHANGES_TRIE_CONFIG, - &changes_trie_config.encode(), - ), - None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), - } - - let log = generic::DigestItem::ChangesTrieSignal( - generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), - ); - Self::deposit_log(log.into()); - } - - /// Set some items of storage. - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn set_storage(origin, items: Vec) { - ensure_root(origin)?; - for i in &items { - storage::unhashed::put_raw(&i.0, &i.1); - } - } - - /// Kill some items from storage. - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn kill_storage(origin, keys: Vec) { - ensure_root(origin)?; - for key in &keys { - storage::unhashed::kill(&key); - } - } - - /// Kill all storage items with a key that starts with the given prefix. - #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] - fn kill_prefix(origin, prefix: Key) { - ensure_root(origin)?; - storage::unhashed::kill_prefix(&prefix); - } - - /// Kill the sending account, assuming there are no references outstanding and the composite - /// data is equal to its default value. - #[weight = SimpleDispatchInfo::FixedOperational(25_000_000)] - fn suicide(origin) { - let who = ensure_signed(origin)?; - let account = Account::::get(&who); - ensure!(account.refcount == 0, Error::::NonZeroRefCount); - ensure!(account.data == T::AccountData::default(), Error::::NonDefaultComposite); - Account::::remove(who); - } - } + pub struct Module for enum Call where origin: T::Origin { + type Error = Error; + + /// A dispatch that will fill the block weight up to the given ratio. + // TODO: This should only be available for testing, rather than in general usage, but + // that's not possible at present (since it's within the decl_module macro). + #[weight = FunctionOf( + |(ratio,): (&Perbill,)| *ratio * T::MaximumBlockWeight::get(), + DispatchClass::Operational, + true, + )] + fn fill_block(origin, _ratio: Perbill) { + ensure_root(origin)?; + } + + /// Make some on-chain remark. + #[weight = SimpleDispatchInfo::FixedNormal(MINIMUM_WEIGHT)] + fn remark(origin, _remark: Vec) { + ensure_signed(origin)?; + } + + /// Set the number of pages in the WebAssembly environment's heap. + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn set_heap_pages(origin, pages: u64) { + ensure_root(origin)?; + storage::unhashed::put_raw(well_known_keys::HEAP_PAGES, &pages.encode()); + } + + /// Set the new runtime code. + #[weight = SimpleDispatchInfo::FixedOperational(200_000_000)] + pub fn set_code(origin, code: Vec) { + Self::can_set_code(origin, &code)?; + + storage::unhashed::put_raw(well_known_keys::CODE, &code); + Self::deposit_event(RawEvent::CodeUpdated); + } + + /// Set the new runtime code without doing any checks of the given `code`. + #[weight = SimpleDispatchInfo::FixedOperational(200_000_000)] + pub fn set_code_without_checks(origin, code: Vec) { + ensure_root(origin)?; + storage::unhashed::put_raw(well_known_keys::CODE, &code); + Self::deposit_event(RawEvent::CodeUpdated); + } + + /// Set the new changes trie configuration. + #[weight = SimpleDispatchInfo::FixedOperational(20_000_000)] + pub fn set_changes_trie_config(origin, changes_trie_config: Option) { + ensure_root(origin)?; + match changes_trie_config.clone() { + Some(changes_trie_config) => storage::unhashed::put_raw( + well_known_keys::CHANGES_TRIE_CONFIG, + &changes_trie_config.encode(), + ), + None => storage::unhashed::kill(well_known_keys::CHANGES_TRIE_CONFIG), + } + + let log = generic::DigestItem::ChangesTrieSignal( + generic::ChangesTrieSignal::NewConfiguration(changes_trie_config), + ); + Self::deposit_log(log.into()); + } + + /// Set some items of storage. + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn set_storage(origin, items: Vec) { + ensure_root(origin)?; + for i in &items { + storage::unhashed::put_raw(&i.0, &i.1); + } + } + + /// Kill some items from storage. + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn kill_storage(origin, keys: Vec) { + ensure_root(origin)?; + for key in &keys { + storage::unhashed::kill(&key); + } + } + + /// Kill all storage items with a key that starts with the given prefix. + #[weight = SimpleDispatchInfo::FixedOperational(MINIMUM_WEIGHT)] + fn kill_prefix(origin, prefix: Key) { + ensure_root(origin)?; + storage::unhashed::kill_prefix(&prefix); + } + + /// Kill the sending account, assuming there are no references outstanding and the composite + /// data is equal to its default value. + #[weight = SimpleDispatchInfo::FixedOperational(25_000_000)] + fn suicide(origin) { + let who = ensure_signed(origin)?; + let account = Account::::get(&who); + ensure!(account.refcount == 0, Error::::NonZeroRefCount); + ensure!(account.data == T::AccountData::default(), Error::::NonDefaultComposite); + Account::::remove(who); + } + } } pub struct EnsureRoot(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId, -> EnsureOrigin for EnsureRoot { - type Success = (); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Root => Ok(()), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - O::from(RawOrigin::Root) - } +impl, O>> + From>, AccountId> + EnsureOrigin for EnsureRoot +{ + type Success = (); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + RawOrigin::Root => Ok(()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Root) + } } pub struct EnsureSigned(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId: Default, -> EnsureOrigin for EnsureSigned { - type Success = AccountId; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Signed(who) => Ok(who), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - O::from(RawOrigin::Signed(Default::default())) - } +impl, O>> + From>, AccountId: Default> + EnsureOrigin for EnsureSigned +{ + type Success = AccountId; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + RawOrigin::Signed(who) => Ok(who), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::Signed(Default::default())) + } } pub struct EnsureSignedBy(sp_std::marker::PhantomData<(Who, AccountId)>); impl< - O: Into, O>> + From>, - Who: Contains, - AccountId: PartialEq + Clone + Ord + Default, -> EnsureOrigin for EnsureSignedBy { - type Success = AccountId; - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::Signed(ref who) if Who::contains(who) => Ok(who.clone()), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - let members = Who::sorted_members(); - let first_member = match members.get(0) { - Some(account) => account.clone(), - None => Default::default(), - }; - O::from(RawOrigin::Signed(first_member.clone())) - } + O: Into, O>> + From>, + Who: Contains, + AccountId: PartialEq + Clone + Ord + Default, + > EnsureOrigin for EnsureSignedBy +{ + type Success = AccountId; + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + RawOrigin::Signed(ref who) if Who::contains(who) => Ok(who.clone()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + let members = Who::sorted_members(); + let first_member = match members.get(0) { + Some(account) => account.clone(), + None => Default::default(), + }; + O::from(RawOrigin::Signed(first_member.clone())) + } } pub struct EnsureNone(sp_std::marker::PhantomData); -impl< - O: Into, O>> + From>, - AccountId, -> EnsureOrigin for EnsureNone { - type Success = (); - fn try_origin(o: O) -> Result { - o.into().and_then(|o| match o { - RawOrigin::None => Ok(()), - r => Err(O::from(r)), - }) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - O::from(RawOrigin::None) - } +impl, O>> + From>, AccountId> + EnsureOrigin for EnsureNone +{ + type Success = (); + fn try_origin(o: O) -> Result { + o.into().and_then(|o| match o { + RawOrigin::None => Ok(()), + r => Err(O::from(r)), + }) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + O::from(RawOrigin::None) + } } pub struct EnsureNever(sp_std::marker::PhantomData); impl EnsureOrigin for EnsureNever { - type Success = T; - fn try_origin(o: O) -> Result { - Err(o) - } - - #[cfg(feature = "runtime-benchmarks")] - fn successful_origin() -> O { - unimplemented!() - } + type Success = T; + fn try_origin(o: O) -> Result { + Err(o) + } + + #[cfg(feature = "runtime-benchmarks")] + fn successful_origin() -> O { + unimplemented!() + } } /// Ensure that the origin `o` represents a signed extrinsic (i.e. transaction). /// Returns `Ok` with the account that signed the extrinsic or an `Err` otherwise. pub fn ensure_signed(o: OuterOrigin) -> Result - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { - match o.into() { - Ok(RawOrigin::Signed(t)) => Ok(t), - _ => Err(BadOrigin), - } + match o.into() { + Ok(RawOrigin::Signed(t)) => Ok(t), + _ => Err(BadOrigin), + } } /// Ensure that the origin `o` represents the root. Returns `Ok` or an `Err` otherwise. pub fn ensure_root(o: OuterOrigin) -> Result<(), BadOrigin> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { - match o.into() { - Ok(RawOrigin::Root) => Ok(()), - _ => Err(BadOrigin), - } + match o.into() { + Ok(RawOrigin::Root) => Ok(()), + _ => Err(BadOrigin), + } } /// Ensure that the origin `o` represents an unsigned extrinsic. Returns `Ok` or an `Err` otherwise. pub fn ensure_none(o: OuterOrigin) -> Result<(), BadOrigin> - where OuterOrigin: Into, OuterOrigin>> +where + OuterOrigin: Into, OuterOrigin>>, { - match o.into() { - Ok(RawOrigin::None) => Ok(()), - _ => Err(BadOrigin), - } + match o.into() { + Ok(RawOrigin::None) => Ok(()), + _ => Err(BadOrigin), + } } /// A type of block initialization to perform. pub enum InitKind { - /// Leave inspectable storage entries in state. - /// - /// i.e. `Events` are not being reset. - /// Should only be used for off-chain calls, - /// regular block execution should clear those. - Inspection, - - /// Reset also inspectable storage entries. - /// - /// This should be used for regular block execution. - Full, + /// Leave inspectable storage entries in state. + /// + /// i.e. `Events` are not being reset. + /// Should only be used for off-chain calls, + /// regular block execution should clear those. + Inspection, + + /// Reset also inspectable storage entries. + /// + /// This should be used for regular block execution. + Full, } impl Default for InitKind { - fn default() -> Self { - InitKind::Full - } + fn default() -> Self { + InitKind::Full + } } /// Reference status; can be either referenced or unreferenced. pub enum RefStatus { - Referenced, - Unreferenced, + Referenced, + Unreferenced, } impl Module { - /// Deposits an event into this block's event record. - pub fn deposit_event(event: impl Into) { - Self::deposit_event_indexed(&[], event.into()); - } - - /// Increment the reference counter on an account. - pub fn inc_ref(who: &T::AccountId) { - Account::::mutate(who, |a| a.refcount = a.refcount.saturating_add(1)); - } - - /// Decrement the reference counter on an account. This *MUST* only be done once for every time - /// you called `inc_ref` on `who`. - pub fn dec_ref(who: &T::AccountId) { - Account::::mutate(who, |a| a.refcount = a.refcount.saturating_sub(1)); - } - - /// The number of outstanding references for the account `who`. - pub fn refs(who: &T::AccountId) -> RefCount { - Account::::get(who).refcount - } - - /// True if the account has no outstanding references. - pub fn allow_death(who: &T::AccountId) -> bool { - Account::::get(who).refcount == 0 - } - - /// Deposits an event into this block's event record adding this event - /// to the corresponding topic indexes. - /// - /// This will update storage entries that correspond to the specified topics. - /// It is expected that light-clients could subscribe to this topics. - pub fn deposit_event_indexed(topics: &[T::Hash], event: T::Event) { - let block_number = Self::block_number(); - // Don't populate events on genesis. - if block_number.is_zero() { return } - - let phase = ExecutionPhase::get().unwrap_or_default(); - let event = EventRecord { - phase, - event, - topics: topics.iter().cloned().collect::>(), - }; - - // Index of the to be added event. - let event_idx = { - let old_event_count = EventCount::get(); - let new_event_count = match old_event_count.checked_add(1) { - // We've reached the maximum number of events at this block, just - // don't do anything and leave the event_count unaltered. - None => return, - Some(nc) => nc, - }; - EventCount::put(new_event_count); - old_event_count - }; - - // Appending can only fail if `Events` can not be decoded or - // when we try to insert more than `u32::max_value()` events. - // - // We perform early return if we've reached the maximum capacity of the event list, - // so `Events` seems to be corrupted. Also, this has happened after the start of execution - // (since the event list is cleared at the block initialization). - if >::append([event].iter()).is_err() { - // The most sensible thing to do here is to just ignore this event and wait until the - // new block. - return; - } - - for topic in topics { - // The same applies here. - if >::append(topic, &[(block_number, event_idx)]).is_err() { - return; - } - } - } - - /// Gets the index of extrinsic that is currently executing. - pub fn extrinsic_index() -> Option { - storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX) - } - - /// Gets extrinsics count. - pub fn extrinsic_count() -> u32 { - ExtrinsicCount::get().unwrap_or_default() - } - - /// Gets a total weight of all executed extrinsics. - pub fn all_extrinsics_weight() -> Weight { - AllExtrinsicsWeight::get().unwrap_or_default() - } - - pub fn all_extrinsics_len() -> u32 { - AllExtrinsicsLen::get().unwrap_or_default() - } - - /// Inform the system module of some additional weight that should be accounted for, in the - /// current block. - /// - /// NOTE: use with extra care; this function is made public only be used for certain modules - /// that need it. A runtime that does not have dynamic calls should never need this and should - /// stick to static weights. A typical use case for this is inner calls or smart contract calls. - /// Furthermore, it only makes sense to use this when it is presumably _cheap_ to provide the - /// argument `weight`; In other words, if this function is to be used to account for some - /// unknown, user provided call's weight, it would only make sense to use it if you are sure you - /// can rapidly compute the weight of the inner call. - /// - /// Even more dangerous is to note that this function does NOT take any action, if the new sum - /// of block weight is more than the block weight limit. This is what the _unchecked_. - /// - /// Another potential use-case could be for the `on_initialize` and `on_finalize` hooks. - /// - /// If no previous weight exists, the function initializes the weight to zero. - pub fn register_extra_weight_unchecked(weight: Weight) { - let current_weight = AllExtrinsicsWeight::get().unwrap_or_default(); - let next_weight = current_weight.saturating_add(weight).min(T::MaximumBlockWeight::get()); - AllExtrinsicsWeight::put(next_weight); - } - - /// Start the execution of a particular block. - pub fn initialize( - number: &T::BlockNumber, - parent_hash: &T::Hash, - txs_root: &T::Hash, - digest: &DigestOf, - kind: InitKind, - ) { - // populate environment - ExecutionPhase::put(Phase::Initialization); - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); - >::put(number); - >::put(digest); - >::put(parent_hash); - >::insert(*number - One::one(), parent_hash); - >::put(txs_root); - - if let InitKind::Full = kind { - >::kill(); - EventCount::kill(); - >::remove_all(); - } - } - - /// Remove temporary "environment" entries in storage. - pub fn finalize() -> T::Header { - ExecutionPhase::kill(); - ExtrinsicCount::kill(); - AllExtrinsicsWeight::kill(); - AllExtrinsicsLen::kill(); - - let number = >::take(); - let parent_hash = >::take(); - let mut digest = >::take(); - let extrinsics_root = >::take(); - - // move block hash pruning window by one block - let block_hash_count = ::get(); - if number > block_hash_count { - let to_remove = number - block_hash_count - One::one(); - - // keep genesis hash - if to_remove != Zero::zero() { - >::remove(to_remove); - } - } - - let storage_root = T::Hash::decode(&mut &sp_io::storage::root()[..]) - .expect("Node is configured to use the same hash; qed"); - let storage_changes_root = sp_io::storage::changes_root(&parent_hash.encode()); - - // we can't compute changes trie root earlier && put it to the Digest - // because it will include all currently existing temporaries. - if let Some(storage_changes_root) = storage_changes_root { - let item = generic::DigestItem::ChangesTrieRoot( - T::Hash::decode(&mut &storage_changes_root[..]) - .expect("Node is configured to use the same hash; qed") - ); - digest.push(item); - } - - // The following fields - // - // - > - // - > - // - > - // - // stay to be inspected by the client and will be cleared by `Self::initialize`. - - ::new(number, extrinsics_root, storage_root, parent_hash, digest) - } - - /// Deposits a log and ensures it matches the block's log data. - pub fn deposit_log(item: DigestItemOf) { - let mut l = >::get(); - l.push(item); - >::put(l); - } - - /// Get the basic externalities for this module, useful for tests. - #[cfg(any(feature = "std", test))] - pub fn externalities() -> TestExternalities { - TestExternalities::new(sp_core::storage::Storage { - top: map![ - >::hashed_key_for(T::BlockNumber::zero()) => [69u8; 32].encode(), - >::hashed_key().to_vec() => T::BlockNumber::one().encode(), - >::hashed_key().to_vec() => [69u8; 32].encode() - ], - children: map![], - }) - } - - /// Set the block number to something in particular. Can be used as an alternative to - /// `initialize` for tests that don't need to bother with the other environment entries. - #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] - pub fn set_block_number(n: T::BlockNumber) { - >::put(n); - } - - /// Sets the index of extrinsic that is currently executing. - #[cfg(any(feature = "std", test))] - pub fn set_extrinsic_index(extrinsic_index: u32) { - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &extrinsic_index) - } - - /// Set the parent hash number to something in particular. Can be used as an alternative to - /// `initialize` for tests that don't need to bother with the other environment entries. - #[cfg(any(feature = "std", test))] - pub fn set_parent_hash(n: T::Hash) { - >::put(n); - } - - /// Set the current block weight. This should only be used in some integration tests. - #[cfg(any(feature = "std", test))] - pub fn set_block_limits(weight: Weight, len: usize) { - AllExtrinsicsWeight::put(weight); - AllExtrinsicsLen::put(len as u32); - } - - /// Return the chain's current runtime version. - pub fn runtime_version() -> RuntimeVersion { T::Version::get() } - - /// Retrieve the account transaction counter from storage. - pub fn account_nonce(who: impl EncodeLike) -> T::Index { - Account::::get(who).nonce - } - - /// Increment a particular account's nonce by 1. - pub fn inc_account_nonce(who: impl EncodeLike) { - Account::::mutate(who, |a| a.nonce += T::Index::one()); - } - - /// Note what the extrinsic data of the current extrinsic index is. If this - /// is called, then ensure `derive_extrinsics` is also called before - /// block-building is completed. - /// - /// NOTE: This function is called only when the block is being constructed locally. - /// `execute_block` doesn't note any extrinsics. - pub fn note_extrinsic(encoded_xt: Vec) { - ExtrinsicData::insert(Self::extrinsic_index().unwrap_or_default(), encoded_xt); - } - - /// To be called immediately after an extrinsic has been applied. - pub fn note_applied_extrinsic(r: &DispatchOutcome, _encoded_len: u32, info: DispatchInfo) { - Self::deposit_event( - match r { - Ok(()) => RawEvent::ExtrinsicSuccess(info), - Err(err) => { - sp_runtime::print(err); - RawEvent::ExtrinsicFailed(err.clone(), info) - }, - } - ); - - let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32; - - storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index); - ExecutionPhase::put(Phase::ApplyExtrinsic(next_extrinsic_index)); - } - - /// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block - /// has been called. - pub fn note_finished_extrinsics() { - let extrinsic_index: u32 = storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX) - .unwrap_or_default(); - ExtrinsicCount::put(extrinsic_index); - ExecutionPhase::put(Phase::Finalization); - } - - /// To be called immediately after finishing the initialization of the block - /// (e.g., called `on_initialize` for all modules). - pub fn note_finished_initialize() { - ExecutionPhase::put(Phase::ApplyExtrinsic(0)) - } - - /// Remove all extrinsic data and save the extrinsics trie root. - pub fn derive_extrinsics() { - let extrinsics = (0..ExtrinsicCount::get().unwrap_or_default()) - .map(ExtrinsicData::take).collect(); - let xts_root = extrinsics_data_root::(extrinsics); - >::put(xts_root); - } - - /// An account is being created. - pub fn on_created_account(who: T::AccountId) { - T::OnNewAccount::on_new_account(&who); - Self::deposit_event(RawEvent::NewAccount(who)); - } - - /// Do anything that needs to be done after an account has been killed. - fn on_killed_account(who: T::AccountId) { - T::OnKilledAccount::on_killed_account(&who); - Self::deposit_event(RawEvent::KilledAccount(who)); - } - - /// Remove an account from storage. This should only be done when its refs are zero or you'll - /// get storage leaks in other modules. Nonetheless we assume that the calling logic knows best. - /// - /// This is a no-op if the account doesn't already exist. If it does then it will ensure - /// cleanups (those in `on_killed_account`) take place. - fn kill_account(who: &T::AccountId) { - if Account::::contains_key(who) { - let account = Account::::take(who); - if account.refcount > 0 { - debug::debug!( - target: "system", - "WARNING: Referenced account deleted. This is probably a bug." - ); - } - Module::::on_killed_account(who.clone()); - } - } - - /// Determine whether or not it is possible to update the code. - /// - /// This function has no side effects and is idempotent, but is fairly - /// heavy. It is automatically called by `set_code`; in most cases, - /// a direct call to `set_code` is preferable. It is useful to call - /// `can_set_code` when it is desirable to perform the appropriate - /// runtime checks without actually changing the code yet. - pub fn can_set_code(origin: T::Origin, code: &[u8]) -> Result<(), sp_runtime::DispatchError> { - ensure_root(origin)?; - - let current_version = T::Version::get(); - let new_version = sp_io::misc::runtime_version(&code) - .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) - .ok_or_else(|| Error::::FailedToExtractRuntimeVersion)?; - - if new_version.spec_name != current_version.spec_name { - Err(Error::::InvalidSpecName)? - } - - if new_version.spec_version <= current_version.spec_version { - Err(Error::::SpecVersionNeedsToIncrease)? - } - - Ok(()) - } + /// Deposits an event into this block's event record. + pub fn deposit_event(event: impl Into) { + Self::deposit_event_indexed(&[], event.into()); + } + + /// Increment the reference counter on an account. + pub fn inc_ref(who: &T::AccountId) { + Account::::mutate(who, |a| a.refcount = a.refcount.saturating_add(1)); + } + + /// Decrement the reference counter on an account. This *MUST* only be done once for every time + /// you called `inc_ref` on `who`. + pub fn dec_ref(who: &T::AccountId) { + Account::::mutate(who, |a| a.refcount = a.refcount.saturating_sub(1)); + } + + /// The number of outstanding references for the account `who`. + pub fn refs(who: &T::AccountId) -> RefCount { + Account::::get(who).refcount + } + + /// True if the account has no outstanding references. + pub fn allow_death(who: &T::AccountId) -> bool { + Account::::get(who).refcount == 0 + } + + /// Deposits an event into this block's event record adding this event + /// to the corresponding topic indexes. + /// + /// This will update storage entries that correspond to the specified topics. + /// It is expected that light-clients could subscribe to this topics. + pub fn deposit_event_indexed(topics: &[T::Hash], event: T::Event) { + let block_number = Self::block_number(); + // Don't populate events on genesis. + if block_number.is_zero() { + return; + } + + let phase = ExecutionPhase::get().unwrap_or_default(); + let event = EventRecord { + phase, + event, + topics: topics.iter().cloned().collect::>(), + }; + + // Index of the to be added event. + let event_idx = { + let old_event_count = EventCount::get(); + let new_event_count = match old_event_count.checked_add(1) { + // We've reached the maximum number of events at this block, just + // don't do anything and leave the event_count unaltered. + None => return, + Some(nc) => nc, + }; + EventCount::put(new_event_count); + old_event_count + }; + + // Appending can only fail if `Events` can not be decoded or + // when we try to insert more than `u32::max_value()` events. + // + // We perform early return if we've reached the maximum capacity of the event list, + // so `Events` seems to be corrupted. Also, this has happened after the start of execution + // (since the event list is cleared at the block initialization). + if >::append([event].iter()).is_err() { + // The most sensible thing to do here is to just ignore this event and wait until the + // new block. + return; + } + + for topic in topics { + // The same applies here. + if >::append(topic, &[(block_number, event_idx)]).is_err() { + return; + } + } + } + + /// Gets the index of extrinsic that is currently executing. + pub fn extrinsic_index() -> Option { + storage::unhashed::get(well_known_keys::EXTRINSIC_INDEX) + } + + /// Gets extrinsics count. + pub fn extrinsic_count() -> u32 { + ExtrinsicCount::get().unwrap_or_default() + } + + /// Gets a total weight of all executed extrinsics. + pub fn all_extrinsics_weight() -> Weight { + AllExtrinsicsWeight::get().unwrap_or_default() + } + + pub fn all_extrinsics_len() -> u32 { + AllExtrinsicsLen::get().unwrap_or_default() + } + + /// Inform the system module of some additional weight that should be accounted for, in the + /// current block. + /// + /// NOTE: use with extra care; this function is made public only be used for certain modules + /// that need it. A runtime that does not have dynamic calls should never need this and should + /// stick to static weights. A typical use case for this is inner calls or smart contract calls. + /// Furthermore, it only makes sense to use this when it is presumably _cheap_ to provide the + /// argument `weight`; In other words, if this function is to be used to account for some + /// unknown, user provided call's weight, it would only make sense to use it if you are sure you + /// can rapidly compute the weight of the inner call. + /// + /// Even more dangerous is to note that this function does NOT take any action, if the new sum + /// of block weight is more than the block weight limit. This is what the _unchecked_. + /// + /// Another potential use-case could be for the `on_initialize` and `on_finalize` hooks. + /// + /// If no previous weight exists, the function initializes the weight to zero. + pub fn register_extra_weight_unchecked(weight: Weight) { + let current_weight = AllExtrinsicsWeight::get().unwrap_or_default(); + let next_weight = current_weight + .saturating_add(weight) + .min(T::MaximumBlockWeight::get()); + AllExtrinsicsWeight::put(next_weight); + } + + /// Start the execution of a particular block. + pub fn initialize( + number: &T::BlockNumber, + parent_hash: &T::Hash, + txs_root: &T::Hash, + digest: &DigestOf, + kind: InitKind, + ) { + // populate environment + ExecutionPhase::put(Phase::Initialization); + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &0u32); + >::put(number); + >::put(digest); + >::put(parent_hash); + >::insert(*number - One::one(), parent_hash); + >::put(txs_root); + + if let InitKind::Full = kind { + >::kill(); + EventCount::kill(); + >::remove_all(); + } + } + + /// Remove temporary "environment" entries in storage. + pub fn finalize() -> T::Header { + ExecutionPhase::kill(); + ExtrinsicCount::kill(); + AllExtrinsicsWeight::kill(); + AllExtrinsicsLen::kill(); + + let number = >::take(); + let parent_hash = >::take(); + let mut digest = >::take(); + let extrinsics_root = >::take(); + + // move block hash pruning window by one block + let block_hash_count = ::get(); + if number > block_hash_count { + let to_remove = number - block_hash_count - One::one(); + + // keep genesis hash + if to_remove != Zero::zero() { + >::remove(to_remove); + } + } + + let storage_root = T::Hash::decode(&mut &sp_io::storage::root()[..]) + .expect("Node is configured to use the same hash; qed"); + let storage_changes_root = sp_io::storage::changes_root(&parent_hash.encode()); + + // we can't compute changes trie root earlier && put it to the Digest + // because it will include all currently existing temporaries. + if let Some(storage_changes_root) = storage_changes_root { + let item = generic::DigestItem::ChangesTrieRoot( + T::Hash::decode(&mut &storage_changes_root[..]) + .expect("Node is configured to use the same hash; qed"), + ); + digest.push(item); + } + + // The following fields + // + // - > + // - > + // - > + // + // stay to be inspected by the client and will be cleared by `Self::initialize`. + + ::new( + number, + extrinsics_root, + storage_root, + parent_hash, + digest, + ) + } + + /// Deposits a log and ensures it matches the block's log data. + pub fn deposit_log(item: DigestItemOf) { + let mut l = >::get(); + l.push(item); + >::put(l); + } + + /// Get the basic externalities for this module, useful for tests. + #[cfg(any(feature = "std", test))] + pub fn externalities() -> TestExternalities { + TestExternalities::new(sp_core::storage::Storage { + top: map![ + >::hashed_key_for(T::BlockNumber::zero()) => [69u8; 32].encode(), + >::hashed_key().to_vec() => T::BlockNumber::one().encode(), + >::hashed_key().to_vec() => [69u8; 32].encode() + ], + children: map![], + }) + } + + /// Set the block number to something in particular. Can be used as an alternative to + /// `initialize` for tests that don't need to bother with the other environment entries. + #[cfg(any(feature = "std", feature = "runtime-benchmarks", test))] + pub fn set_block_number(n: T::BlockNumber) { + >::put(n); + } + + /// Sets the index of extrinsic that is currently executing. + #[cfg(any(feature = "std", test))] + pub fn set_extrinsic_index(extrinsic_index: u32) { + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &extrinsic_index) + } + + /// Set the parent hash number to something in particular. Can be used as an alternative to + /// `initialize` for tests that don't need to bother with the other environment entries. + #[cfg(any(feature = "std", test))] + pub fn set_parent_hash(n: T::Hash) { + >::put(n); + } + + /// Set the current block weight. This should only be used in some integration tests. + #[cfg(any(feature = "std", test))] + pub fn set_block_limits(weight: Weight, len: usize) { + AllExtrinsicsWeight::put(weight); + AllExtrinsicsLen::put(len as u32); + } + + /// Return the chain's current runtime version. + pub fn runtime_version() -> RuntimeVersion { + T::Version::get() + } + + /// Retrieve the account transaction counter from storage. + pub fn account_nonce(who: impl EncodeLike) -> T::Index { + Account::::get(who).nonce + } + + /// Increment a particular account's nonce by 1. + pub fn inc_account_nonce(who: impl EncodeLike) { + Account::::mutate(who, |a| a.nonce += T::Index::one()); + } + + /// Note what the extrinsic data of the current extrinsic index is. If this + /// is called, then ensure `derive_extrinsics` is also called before + /// block-building is completed. + /// + /// NOTE: This function is called only when the block is being constructed locally. + /// `execute_block` doesn't note any extrinsics. + pub fn note_extrinsic(encoded_xt: Vec) { + ExtrinsicData::insert(Self::extrinsic_index().unwrap_or_default(), encoded_xt); + } + + /// To be called immediately after an extrinsic has been applied. + pub fn note_applied_extrinsic(r: &DispatchOutcome, _encoded_len: u32, info: DispatchInfo) { + Self::deposit_event(match r { + Ok(()) => RawEvent::ExtrinsicSuccess(info), + Err(err) => { + sp_runtime::print(err); + RawEvent::ExtrinsicFailed(err.clone(), info) + } + }); + + let next_extrinsic_index = Self::extrinsic_index().unwrap_or_default() + 1u32; + + storage::unhashed::put(well_known_keys::EXTRINSIC_INDEX, &next_extrinsic_index); + ExecutionPhase::put(Phase::ApplyExtrinsic(next_extrinsic_index)); + } + + /// To be called immediately after `note_applied_extrinsic` of the last extrinsic of the block + /// has been called. + pub fn note_finished_extrinsics() { + let extrinsic_index: u32 = + storage::unhashed::take(well_known_keys::EXTRINSIC_INDEX).unwrap_or_default(); + ExtrinsicCount::put(extrinsic_index); + ExecutionPhase::put(Phase::Finalization); + } + + /// To be called immediately after finishing the initialization of the block + /// (e.g., called `on_initialize` for all modules). + pub fn note_finished_initialize() { + ExecutionPhase::put(Phase::ApplyExtrinsic(0)) + } + + /// Remove all extrinsic data and save the extrinsics trie root. + pub fn derive_extrinsics() { + let extrinsics = (0..ExtrinsicCount::get().unwrap_or_default()) + .map(ExtrinsicData::take) + .collect(); + let xts_root = extrinsics_data_root::(extrinsics); + >::put(xts_root); + } + + /// An account is being created. + pub fn on_created_account(who: T::AccountId) { + T::OnNewAccount::on_new_account(&who); + Self::deposit_event(RawEvent::NewAccount(who)); + } + + /// Do anything that needs to be done after an account has been killed. + fn on_killed_account(who: T::AccountId) { + T::OnKilledAccount::on_killed_account(&who); + Self::deposit_event(RawEvent::KilledAccount(who)); + } + + /// Remove an account from storage. This should only be done when its refs are zero or you'll + /// get storage leaks in other modules. Nonetheless we assume that the calling logic knows best. + /// + /// This is a no-op if the account doesn't already exist. If it does then it will ensure + /// cleanups (those in `on_killed_account`) take place. + fn kill_account(who: &T::AccountId) { + if Account::::contains_key(who) { + let account = Account::::take(who); + if account.refcount > 0 { + debug::debug!( + target: "system", + "WARNING: Referenced account deleted. This is probably a bug." + ); + } + Module::::on_killed_account(who.clone()); + } + } + + /// Determine whether or not it is possible to update the code. + /// + /// This function has no side effects and is idempotent, but is fairly + /// heavy. It is automatically called by `set_code`; in most cases, + /// a direct call to `set_code` is preferable. It is useful to call + /// `can_set_code` when it is desirable to perform the appropriate + /// runtime checks without actually changing the code yet. + pub fn can_set_code(origin: T::Origin, code: &[u8]) -> Result<(), sp_runtime::DispatchError> { + ensure_root(origin)?; + + let current_version = T::Version::get(); + let new_version = sp_io::misc::runtime_version(&code) + .and_then(|v| RuntimeVersion::decode(&mut &v[..]).ok()) + .ok_or_else(|| Error::::FailedToExtractRuntimeVersion)?; + + if new_version.spec_name != current_version.spec_name { + Err(Error::::InvalidSpecName)? + } + + if new_version.spec_version <= current_version.spec_version { + Err(Error::::SpecVersionNeedsToIncrease)? + } + + Ok(()) + } } /// Event handler which calls on_created_account when it happens. pub struct CallOnCreatedAccount(PhantomData); impl Happened for CallOnCreatedAccount { - fn happened(who: &T::AccountId) { - Module::::on_created_account(who.clone()); - } + fn happened(who: &T::AccountId) { + Module::::on_created_account(who.clone()); + } } /// Event handler which calls kill_account when it happens. pub struct CallKillAccount(PhantomData); impl Happened for CallKillAccount { - fn happened(who: &T::AccountId) { - Module::::kill_account(who) - } + fn happened(who: &T::AccountId) { + Module::::kill_account(who) + } } // Implement StoredMap for a simple single-item, kill-account-on-remove system. This works fine for // storing a single item which is required to not be empty/default for the account to exist. // Anything more complex will need more sophisticated logic. impl StoredMap for Module { - fn get(k: &T::AccountId) -> T::AccountData { - Account::::get(k).data - } - fn is_explicit(k: &T::AccountId) -> bool { - Account::::contains_key(k) - } - fn insert(k: &T::AccountId, data: T::AccountData) { - let existed = Account::::contains_key(k); - Account::::mutate(k, |a| a.data = data); - if !existed { - Self::on_created_account(k.clone()); - } - } - fn remove(k: &T::AccountId) { - Self::kill_account(k) - } - fn mutate(k: &T::AccountId, f: impl FnOnce(&mut T::AccountData) -> R) -> R { - let existed = Account::::contains_key(k); - let r = Account::::mutate(k, |a| f(&mut a.data)); - if !existed { - Self::on_created_account(k.clone()); - } - r - } - fn mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> R) -> R { - Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }).expect("Infallible; qed") - } - fn try_mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> Result) -> Result { - Account::::try_mutate_exists(k, |maybe_value| { - let existed = maybe_value.is_some(); - let (maybe_prefix, mut maybe_data) = split_inner( - maybe_value.take(), - |account| ((account.nonce, account.refcount), account.data) - ); - f(&mut maybe_data).map(|result| { - *maybe_value = maybe_data.map(|data| { - let (nonce, refcount) = maybe_prefix.unwrap_or_default(); - AccountInfo { nonce, refcount, data } - }); - (existed, maybe_value.is_some(), result) - }) - }).map(|(existed, exists, v)| { - if !existed && exists { - Self::on_created_account(k.clone()); - } else if existed && !exists { - Self::on_killed_account(k.clone()); - } - v - }) - } + fn get(k: &T::AccountId) -> T::AccountData { + Account::::get(k).data + } + fn is_explicit(k: &T::AccountId) -> bool { + Account::::contains_key(k) + } + fn insert(k: &T::AccountId, data: T::AccountData) { + let existed = Account::::contains_key(k); + Account::::mutate(k, |a| a.data = data); + if !existed { + Self::on_created_account(k.clone()); + } + } + fn remove(k: &T::AccountId) { + Self::kill_account(k) + } + fn mutate(k: &T::AccountId, f: impl FnOnce(&mut T::AccountData) -> R) -> R { + let existed = Account::::contains_key(k); + let r = Account::::mutate(k, |a| f(&mut a.data)); + if !existed { + Self::on_created_account(k.clone()); + } + r + } + fn mutate_exists(k: &T::AccountId, f: impl FnOnce(&mut Option) -> R) -> R { + Self::try_mutate_exists(k, |x| -> Result { Ok(f(x)) }) + .expect("Infallible; qed") + } + fn try_mutate_exists( + k: &T::AccountId, + f: impl FnOnce(&mut Option) -> Result, + ) -> Result { + Account::::try_mutate_exists(k, |maybe_value| { + let existed = maybe_value.is_some(); + let (maybe_prefix, mut maybe_data) = split_inner(maybe_value.take(), |account| { + ((account.nonce, account.refcount), account.data) + }); + f(&mut maybe_data).map(|result| { + *maybe_value = maybe_data.map(|data| { + let (nonce, refcount) = maybe_prefix.unwrap_or_default(); + AccountInfo { + nonce, + refcount, + data, + } + }); + (existed, maybe_value.is_some(), result) + }) + }) + .map(|(existed, exists, v)| { + if !existed && exists { + Self::on_created_account(k.clone()); + } else if existed && !exists { + Self::on_killed_account(k.clone()); + } + v + }) + } } /// Split an `option` into two constituent options, as defined by a `splitter` function. -pub fn split_inner(option: Option, splitter: impl FnOnce(T) -> (R, S)) - -> (Option, Option) -{ - match option { - Some(inner) => { - let (r, s) = splitter(inner); - (Some(r), Some(s)) - } - None => (None, None), - } +pub fn split_inner( + option: Option, + splitter: impl FnOnce(T) -> (R, S), +) -> (Option, Option) { + match option { + Some(inner) => { + let (r, s) = splitter(inner); + (Some(r), Some(s)) + } + None => (None, None), + } } /// resource limit check. #[derive(Encode, Decode, Clone, Eq, PartialEq)] pub struct CheckWeight(PhantomData); -impl CheckWeight where - T::Call: Dispatchable +impl CheckWeight +where + T::Call: Dispatchable, { - /// Get the quota ratio of each dispatch class type. This indicates that all operational - /// dispatches can use the full capacity of any resource, while user-triggered ones can consume - /// a portion. - fn get_dispatch_limit_ratio(class: DispatchClass) -> Perbill { - match class { - DispatchClass::Operational | DispatchClass::Mandatory - => ::one(), - DispatchClass::Normal => T::AvailableBlockRatio::get(), - } - } - - /// Checks if the current extrinsic can fit into the block with respect to block weight limits. - /// - /// Upon successes, it returns the new block weight as a `Result`. - fn check_weight( - info: &DispatchInfoOf, - ) -> Result { - let current_weight = Module::::all_extrinsics_weight(); - let maximum_weight = T::MaximumBlockWeight::get(); - let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_weight; - let added_weight = info.weight.min(limit); - let next_weight = current_weight.saturating_add(added_weight); - if next_weight > limit && info.class != DispatchClass::Mandatory { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(next_weight) - } - } - - /// Checks if the current extrinsic can fit into the block with respect to block length limits. - /// - /// Upon successes, it returns the new block length as a `Result`. - fn check_block_length( - info: &DispatchInfoOf, - len: usize, - ) -> Result { - let current_len = Module::::all_extrinsics_len(); - let maximum_len = T::MaximumBlockLength::get(); - let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_len; - let added_len = len as u32; - let next_len = current_len.saturating_add(added_len); - if next_len > limit { - Err(InvalidTransaction::ExhaustsResources.into()) - } else { - Ok(next_len) - } - } - - /// get the priority of an extrinsic denoted by `info`. - fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { - match info.class { - DispatchClass::Normal => info.weight.into(), - DispatchClass::Operational => Bounded::max_value(), - // Mandatory extrinsics are only for inherents; never transactions. - DispatchClass::Mandatory => Bounded::min_value(), - } - } - - /// Creates new `SignedExtension` to check weight of the extrinsic. - pub fn new() -> Self { - Self(PhantomData) - } - - /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. - /// - /// It checks and notes the new weight and length. - fn do_pre_dispatch( - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - let next_len = Self::check_block_length(info, len)?; - let next_weight = Self::check_weight(info)?; - AllExtrinsicsLen::put(next_len); - AllExtrinsicsWeight::put(next_weight); - Ok(()) - } - - /// Do the validate checks. This can be applied to both signed and unsigned. - /// - /// It only checks that the block weight and length limit will not exceed. - fn do_validate( - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - // ignore the next weight and length. If they return `Ok`, then it is below the limit. - let _ = Self::check_block_length(info, len)?; - let _ = Self::check_weight(info)?; - - Ok(ValidTransaction { priority: Self::get_priority(info), ..Default::default() }) - } + /// Get the quota ratio of each dispatch class type. This indicates that all operational + /// dispatches can use the full capacity of any resource, while user-triggered ones can consume + /// a portion. + fn get_dispatch_limit_ratio(class: DispatchClass) -> Perbill { + match class { + DispatchClass::Operational | DispatchClass::Mandatory => { + ::one() + } + DispatchClass::Normal => T::AvailableBlockRatio::get(), + } + } + + /// Checks if the current extrinsic can fit into the block with respect to block weight limits. + /// + /// Upon successes, it returns the new block weight as a `Result`. + fn check_weight(info: &DispatchInfoOf) -> Result { + let current_weight = Module::::all_extrinsics_weight(); + let maximum_weight = T::MaximumBlockWeight::get(); + let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_weight; + let added_weight = info.weight.min(limit); + let next_weight = current_weight.saturating_add(added_weight); + if next_weight > limit && info.class != DispatchClass::Mandatory { + Err(InvalidTransaction::ExhaustsResources.into()) + } else { + Ok(next_weight) + } + } + + /// Checks if the current extrinsic can fit into the block with respect to block length limits. + /// + /// Upon successes, it returns the new block length as a `Result`. + fn check_block_length( + info: &DispatchInfoOf, + len: usize, + ) -> Result { + let current_len = Module::::all_extrinsics_len(); + let maximum_len = T::MaximumBlockLength::get(); + let limit = Self::get_dispatch_limit_ratio(info.class) * maximum_len; + let added_len = len as u32; + let next_len = current_len.saturating_add(added_len); + if next_len > limit { + Err(InvalidTransaction::ExhaustsResources.into()) + } else { + Ok(next_len) + } + } + + /// get the priority of an extrinsic denoted by `info`. + fn get_priority(info: &DispatchInfoOf) -> TransactionPriority { + match info.class { + DispatchClass::Normal => info.weight.into(), + DispatchClass::Operational => Bounded::max_value(), + // Mandatory extrinsics are only for inherents; never transactions. + DispatchClass::Mandatory => Bounded::min_value(), + } + } + + /// Creates new `SignedExtension` to check weight of the extrinsic. + pub fn new() -> Self { + Self(PhantomData) + } + + /// Do the pre-dispatch checks. This can be applied to both signed and unsigned. + /// + /// It checks and notes the new weight and length. + fn do_pre_dispatch( + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + let next_len = Self::check_block_length(info, len)?; + let next_weight = Self::check_weight(info)?; + AllExtrinsicsLen::put(next_len); + AllExtrinsicsWeight::put(next_weight); + Ok(()) + } + + /// Do the validate checks. This can be applied to both signed and unsigned. + /// + /// It only checks that the block weight and length limit will not exceed. + fn do_validate(info: &DispatchInfoOf, len: usize) -> TransactionValidity { + // ignore the next weight and length. If they return `Ok`, then it is below the limit. + let _ = Self::check_block_length(info, len)?; + let _ = Self::check_weight(info)?; + + Ok(ValidTransaction { + priority: Self::get_priority(info), + ..Default::default() + }) + } } -impl SignedExtension for CheckWeight where - T::Call: Dispatchable +impl SignedExtension for CheckWeight +where + T::Call: Dispatchable, { - type AccountId = T::AccountId; - type Call = T::Call; - type AdditionalSigned = (); - type Pre = (); - const IDENTIFIER: &'static str = "CheckWeight"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } - - fn pre_dispatch( - self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - if info.class == DispatchClass::Mandatory { - Err(InvalidTransaction::MandatoryDispatch)? - } - Self::do_pre_dispatch(info, len) - } - - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - if info.class == DispatchClass::Mandatory { - Err(InvalidTransaction::MandatoryDispatch)? - } - Self::do_validate(info, len) - } - - fn pre_dispatch_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> Result<(), TransactionValidityError> { - Self::do_pre_dispatch(info, len) - } - - fn validate_unsigned( - _call: &Self::Call, - info: &DispatchInfoOf, - len: usize, - ) -> TransactionValidity { - Self::do_validate(info, len) - } - - fn post_dispatch( - _pre: Self::Pre, - info: &DispatchInfoOf, - post_info: &PostDispatchInfoOf, - _len: usize, - result: &DispatchResult, - ) -> Result<(), TransactionValidityError> { - // Since mandatory dispatched do not get validated for being overweight, we are sensitive - // to them actually being useful. Block producers are thus not allowed to include mandatory - // extrinsics that result in error. - if info.class == DispatchClass::Mandatory && result.is_err() { - Err(InvalidTransaction::BadMandatory)? - } - - let unspent = post_info.calc_unspent(info); - if unspent > 0 { - AllExtrinsicsWeight::mutate(|weight| { - *weight = weight.map(|w| w.saturating_sub(unspent)); - }) - } - - Ok(()) - } + type AccountId = T::AccountId; + type Call = T::Call; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "CheckWeight"; + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + + fn pre_dispatch( + self, + _who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + if info.class == DispatchClass::Mandatory { + Err(InvalidTransaction::MandatoryDispatch)? + } + Self::do_pre_dispatch(info, len) + } + + fn validate( + &self, + _who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + if info.class == DispatchClass::Mandatory { + Err(InvalidTransaction::MandatoryDispatch)? + } + Self::do_validate(info, len) + } + + fn pre_dispatch_unsigned( + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> Result<(), TransactionValidityError> { + Self::do_pre_dispatch(info, len) + } + + fn validate_unsigned( + _call: &Self::Call, + info: &DispatchInfoOf, + len: usize, + ) -> TransactionValidity { + Self::do_validate(info, len) + } + + fn post_dispatch( + _pre: Self::Pre, + info: &DispatchInfoOf, + post_info: &PostDispatchInfoOf, + _len: usize, + result: &DispatchResult, + ) -> Result<(), TransactionValidityError> { + // Since mandatory dispatched do not get validated for being overweight, we are sensitive + // to them actually being useful. Block producers are thus not allowed to include mandatory + // extrinsics that result in error. + if info.class == DispatchClass::Mandatory && result.is_err() { + Err(InvalidTransaction::BadMandatory)? + } + + let unspent = post_info.calc_unspent(info); + if unspent > 0 { + AllExtrinsicsWeight::mutate(|weight| { + *weight = weight.map(|w| w.saturating_sub(unspent)); + }) + } + + Ok(()) + } } impl Debug for CheckWeight { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckWeight") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckWeight") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } /// Nonce check and increment to give replay protection for transactions. @@ -1361,91 +1418,93 @@ impl Debug for CheckWeight { pub struct CheckNonce(#[codec(compact)] T::Index); impl CheckNonce { - /// utility constructor. Used only in client/factory code. - pub fn from(nonce: T::Index) -> Self { - Self(nonce) - } + /// utility constructor. Used only in client/factory code. + pub fn from(nonce: T::Index) -> Self { + Self(nonce) + } } impl Debug for CheckNonce { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckNonce({})", self.0) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckNonce({})", self.0) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } -impl SignedExtension for CheckNonce where - T::Call: Dispatchable +impl SignedExtension for CheckNonce +where + T::Call: Dispatchable, { - type AccountId = T::AccountId; - type Call = T::Call; - type AdditionalSigned = (); - type Pre = (); - const IDENTIFIER: &'static str = "CheckNonce"; - - fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { Ok(()) } - - fn pre_dispatch( - self, - who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> Result<(), TransactionValidityError> { - let mut account = Account::::get(who); - if self.0 != account.nonce { - return Err( - if self.0 < account.nonce { - InvalidTransaction::Stale - } else { - InvalidTransaction::Future - }.into() - ) - } - account.nonce += T::Index::one(); - Account::::insert(who, account); - Ok(()) - } - - fn validate( - &self, - who: &Self::AccountId, - _call: &Self::Call, - info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - // check index - let account = Account::::get(who); - if self.0 < account.nonce { - return InvalidTransaction::Stale.into() - } - - let provides = vec![Encode::encode(&(who, self.0))]; - let requires = if account.nonce < self.0 { - vec![Encode::encode(&(who, self.0 - One::one()))] - } else { - vec![] - }; - - Ok(ValidTransaction { - priority: info.weight as TransactionPriority, - requires, - provides, - longevity: TransactionLongevity::max_value(), - propagate: true, - }) - } + type AccountId = T::AccountId; + type Call = T::Call; + type AdditionalSigned = (); + type Pre = (); + const IDENTIFIER: &'static str = "CheckNonce"; + + fn additional_signed(&self) -> sp_std::result::Result<(), TransactionValidityError> { + Ok(()) + } + + fn pre_dispatch( + self, + who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> Result<(), TransactionValidityError> { + let mut account = Account::::get(who); + if self.0 != account.nonce { + return Err(if self.0 < account.nonce { + InvalidTransaction::Stale + } else { + InvalidTransaction::Future + } + .into()); + } + account.nonce += T::Index::one(); + Account::::insert(who, account); + Ok(()) + } + + fn validate( + &self, + who: &Self::AccountId, + _call: &Self::Call, + info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + // check index + let account = Account::::get(who); + if self.0 < account.nonce { + return InvalidTransaction::Stale.into(); + } + + let provides = vec![Encode::encode(&(who, self.0))]; + let requires = if account.nonce < self.0 { + vec![Encode::encode(&(who, self.0 - One::one()))] + } else { + vec![] + }; + + Ok(ValidTransaction { + priority: info.weight as TransactionPriority, + requires, + provides, + longevity: TransactionLongevity::max_value(), + propagate: true, + }) + } } impl IsDeadAccount for Module { - fn is_dead_account(who: &T::AccountId) -> bool { - !Account::::contains_key(who) - } + fn is_dead_account(who: &T::AccountId) -> bool { + !Account::::contains_key(who) + } } /// Check for transaction mortality. @@ -1453,55 +1512,55 @@ impl IsDeadAccount for Module { pub struct CheckEra(Era, sp_std::marker::PhantomData); impl CheckEra { - /// utility constructor. Used only in client/factory code. - pub fn from(era: Era) -> Self { - Self(era, sp_std::marker::PhantomData) - } + /// utility constructor. Used only in client/factory code. + pub fn from(era: Era) -> Self { + Self(era, sp_std::marker::PhantomData) + } } impl Debug for CheckEra { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckEra({:?})", self.0) - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckEra({:?})", self.0) + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } impl SignedExtension for CheckEra { - type AccountId = T::AccountId; - type Call = T::Call; - type AdditionalSigned = T::Hash; - type Pre = (); - const IDENTIFIER: &'static str = "CheckEra"; - - fn validate( - &self, - _who: &Self::AccountId, - _call: &Self::Call, - _info: &DispatchInfoOf, - _len: usize, - ) -> TransactionValidity { - let current_u64 = >::block_number().saturated_into::(); - let valid_till = self.0.death(current_u64); - Ok(ValidTransaction { - longevity: valid_till.saturating_sub(current_u64), - ..Default::default() - }) - } - - fn additional_signed(&self) -> Result { - let current_u64 = >::block_number().saturated_into::(); - let n = self.0.birth(current_u64).saturated_into::(); - if !>::contains_key(n) { - Err(InvalidTransaction::AncientBirthBlock.into()) - } else { - Ok(>::block_hash(n)) - } - } + type AccountId = T::AccountId; + type Call = T::Call; + type AdditionalSigned = T::Hash; + type Pre = (); + const IDENTIFIER: &'static str = "CheckEra"; + + fn validate( + &self, + _who: &Self::AccountId, + _call: &Self::Call, + _info: &DispatchInfoOf, + _len: usize, + ) -> TransactionValidity { + let current_u64 = >::block_number().saturated_into::(); + let valid_till = self.0.death(current_u64); + Ok(ValidTransaction { + longevity: valid_till.saturating_sub(current_u64), + ..Default::default() + }) + } + + fn additional_signed(&self) -> Result { + let current_u64 = >::block_number().saturated_into::(); + let n = self.0.birth(current_u64).saturated_into::(); + if !>::contains_key(n) { + Err(InvalidTransaction::AncientBirthBlock.into()) + } else { + Ok(>::block_hash(n)) + } + } } /// Nonce check and increment to give replay protection for transactions. @@ -1509,34 +1568,34 @@ impl SignedExtension for CheckEra { pub struct CheckGenesis(sp_std::marker::PhantomData); impl Debug for CheckGenesis { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckGenesis") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckGenesis") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } impl CheckGenesis { - /// Creates new `SignedExtension` to check genesis hash. - pub fn new() -> Self { - Self(sp_std::marker::PhantomData) - } + /// Creates new `SignedExtension` to check genesis hash. + pub fn new() -> Self { + Self(sp_std::marker::PhantomData) + } } impl SignedExtension for CheckGenesis { - type AccountId = T::AccountId; - type Call = ::Call; - type AdditionalSigned = T::Hash; - type Pre = (); - const IDENTIFIER: &'static str = "CheckGenesis"; - - fn additional_signed(&self) -> Result { - Ok(>::block_hash(T::BlockNumber::zero())) - } + type AccountId = T::AccountId; + type Call = ::Call; + type AdditionalSigned = T::Hash; + type Pre = (); + const IDENTIFIER: &'static str = "CheckGenesis"; + + fn additional_signed(&self) -> Result { + Ok(>::block_hash(T::BlockNumber::zero())) + } } /// Ensure the runtime version registered in the transaction is the same as at present. @@ -1544,642 +1603,747 @@ impl SignedExtension for CheckGenesis { pub struct CheckVersion(sp_std::marker::PhantomData); impl Debug for CheckVersion { - #[cfg(feature = "std")] - fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - write!(f, "CheckVersion") - } - - #[cfg(not(feature = "std"))] - fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { - Ok(()) - } + #[cfg(feature = "std")] + fn fmt(&self, f: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + write!(f, "CheckVersion") + } + + #[cfg(not(feature = "std"))] + fn fmt(&self, _: &mut sp_std::fmt::Formatter) -> sp_std::fmt::Result { + Ok(()) + } } impl CheckVersion { - /// Create new `SignedExtension` to check runtime version. - pub fn new() -> Self { - Self(sp_std::marker::PhantomData) - } + /// Create new `SignedExtension` to check runtime version. + pub fn new() -> Self { + Self(sp_std::marker::PhantomData) + } } impl SignedExtension for CheckVersion { - type AccountId = T::AccountId; - type Call = ::Call; - type AdditionalSigned = u32; - type Pre = (); - const IDENTIFIER: &'static str = "CheckVersion"; - - fn additional_signed(&self) -> Result { - Ok(>::runtime_version().spec_version) - } + type AccountId = T::AccountId; + type Call = ::Call; + type AdditionalSigned = u32; + type Pre = (); + const IDENTIFIER: &'static str = "CheckVersion"; + + fn additional_signed(&self) -> Result { + Ok(>::runtime_version().spec_version) + } } pub struct ChainContext(sp_std::marker::PhantomData); impl Default for ChainContext { - fn default() -> Self { - ChainContext(sp_std::marker::PhantomData) - } + fn default() -> Self { + ChainContext(sp_std::marker::PhantomData) + } } impl Lookup for ChainContext { - type Source = ::Source; - type Target = ::Target; + type Source = ::Source; + type Target = ::Target; - fn lookup(&self, s: Self::Source) -> Result { - ::lookup(s) - } + fn lookup(&self, s: Self::Source) -> Result { + ::lookup(s) + } } #[cfg(test)] mod tests { - use super::*; - use sp_std::cell::RefCell; - use sp_core::H256; - use sp_runtime::{traits::{BlakeTwo256, IdentityLookup}, testing::Header, DispatchError}; - use frame_support::{impl_outer_origin, parameter_types}; - - impl_outer_origin! { - pub enum Origin for Test where system = super {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - - parameter_types! { - pub const BlockHashCount: u64 = 10; - pub const MaximumBlockWeight: Weight = 1024; - pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); - pub const MaximumBlockLength: u32 = 1024; - pub const Version: RuntimeVersion = RuntimeVersion { - spec_name: sp_version::create_runtime_str!("test"), - impl_name: sp_version::create_runtime_str!("system-test"), - authoring_version: 1, - spec_version: 1, - impl_version: 1, - apis: sp_version::create_apis_vec!([]), - transaction_version: 1, - }; - } - - thread_local!{ - pub static KILLED: RefCell> = RefCell::new(vec![]); - } - - pub struct RecordKilled; - impl OnKilledAccount for RecordKilled { - fn on_killed_account(who: &u64) { KILLED.with(|r| r.borrow_mut().push(*who)) } - } - - #[derive(Debug)] - pub struct Call {} - impl Dispatchable for Call { - type Origin = (); - type Trait = (); - type Info = DispatchInfo; - type PostInfo = PostDispatchInfo; - fn dispatch(self, _origin: Self::Origin) - -> sp_runtime::DispatchResultWithInfo { - panic!("Do not use dummy implementation for dispatch."); - } - } - - impl Trait for Test { - type Origin = Origin; - type Call = Call; - type Index = u64; - type BlockNumber = u64; - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = u16; - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = Version; - type ModuleToIndex = (); - type AccountData = u32; - type OnNewAccount = (); - type OnKilledAccount = RecordKilled; - } - - impl From> for u16 { - fn from(e: Event) -> u16 { - match e { - Event::::ExtrinsicSuccess(..) => 100, - Event::::ExtrinsicFailed(..) => 101, - Event::::CodeUpdated => 102, - _ => 103, - } - } - } - - type System = Module; - - const CALL: &::Call = &Call {}; - - fn new_test_ext() -> sp_io::TestExternalities { - GenesisConfig::default().build_storage::().unwrap().into() - } - - fn normal_weight_limit() -> Weight { - ::AvailableBlockRatio::get() * ::MaximumBlockWeight::get() - } - - fn normal_length_limit() -> u32 { - ::AvailableBlockRatio::get() * ::MaximumBlockLength::get() - } - - #[test] - fn origin_works() { - let o = Origin::from(RawOrigin::::Signed(1u64)); - let x: Result, Origin> = o.into(); - assert_eq!(x, Ok(RawOrigin::::Signed(1u64))); - } - - #[test] - fn stored_map_works() { - new_test_ext().execute_with(|| { - System::insert(&0, 42); - assert!(System::allow_death(&0)); - - System::inc_ref(&0); - assert!(!System::allow_death(&0)); - - System::insert(&0, 69); - assert!(!System::allow_death(&0)); - - System::dec_ref(&0); - assert!(System::allow_death(&0)); - - assert!(KILLED.with(|r| r.borrow().is_empty())); - System::kill_account(&0); - assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); - }); - } - - #[test] - fn deposit_event_should_work() { - new_test_ext().execute_with(|| { - System::initialize( - &1, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - System::note_finished_extrinsics(); - System::deposit_event(1u16); - System::finalize(); - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Finalization, - event: 1u16, - topics: vec![], - } - ] - ); - - System::initialize( - &2, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - System::deposit_event(32u16); - System::note_finished_initialize(); - System::deposit_event(42u16); - System::note_applied_extrinsic(&Ok(()), 0, Default::default()); - System::note_applied_extrinsic(&Err(DispatchError::BadOrigin), 0, Default::default()); - System::note_finished_extrinsics(); - System::deposit_event(3u16); - System::finalize(); - assert_eq!( - System::events(), - vec![ - EventRecord { phase: Phase::Initialization, event: 32u16, topics: vec![] }, - EventRecord { phase: Phase::ApplyExtrinsic(0), event: 42u16, topics: vec![] }, - EventRecord { phase: Phase::ApplyExtrinsic(0), event: 100u16, topics: vec![] }, - EventRecord { phase: Phase::ApplyExtrinsic(1), event: 101u16, topics: vec![] }, - EventRecord { phase: Phase::Finalization, event: 3u16, topics: vec![] } - ] - ); - }); - } - - #[test] - fn deposit_event_topics() { - new_test_ext().execute_with(|| { - const BLOCK_NUMBER: u64 = 1; - - System::initialize( - &BLOCK_NUMBER, - &[0u8; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - System::note_finished_extrinsics(); - - let topics = vec![ - H256::repeat_byte(1), - H256::repeat_byte(2), - H256::repeat_byte(3), - ]; - - // We deposit a few events with different sets of topics. - System::deposit_event_indexed(&topics[0..3], 1u16); - System::deposit_event_indexed(&topics[0..1], 2u16); - System::deposit_event_indexed(&topics[1..2], 3u16); - - System::finalize(); - - // Check that topics are reflected in the event record. - assert_eq!( - System::events(), - vec![ - EventRecord { - phase: Phase::Finalization, - event: 1u16, - topics: topics[0..3].to_vec(), - }, - EventRecord { - phase: Phase::Finalization, - event: 2u16, - topics: topics[0..1].to_vec(), - }, - EventRecord { - phase: Phase::Finalization, - event: 3u16, - topics: topics[1..2].to_vec(), - } - ] - ); - - // Check that the topic-events mapping reflects the deposited topics. - // Note that these are indexes of the events. - assert_eq!( - System::event_topics(&topics[0]), - vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 1)], - ); - assert_eq!( - System::event_topics(&topics[1]), - vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 2)], - ); - assert_eq!( - System::event_topics(&topics[2]), - vec![(BLOCK_NUMBER, 0)], - ); - }); - } - - #[test] - fn prunes_block_hash_mappings() { - new_test_ext().execute_with(|| { - // simulate import of 15 blocks - for n in 1..=15 { - System::initialize( - &n, - &[n as u8 - 1; 32].into(), - &[0u8; 32].into(), - &Default::default(), - InitKind::Full, - ); - - System::finalize(); - } - - // first 5 block hashes are pruned - for n in 0..5 { - assert_eq!( - System::block_hash(n), - H256::zero(), - ); - } - - // the remaining 10 are kept - for n in 5..15 { - assert_eq!( - System::block_hash(n), - [n as u8; 32].into(), - ); - } - }) - } - - #[test] - fn signed_ext_check_nonce_works() { - new_test_ext().execute_with(|| { - Account::::insert(1, AccountInfo { nonce: 1, refcount: 0, data: 0 }); - let info = DispatchInfo::default(); - let len = 0_usize; - // stale - assert!(CheckNonce::(0).validate(&1, CALL, &info, len).is_err()); - assert!(CheckNonce::(0).pre_dispatch(&1, CALL, &info, len).is_err()); - // correct - assert!(CheckNonce::(1).validate(&1, CALL, &info, len).is_ok()); - assert!(CheckNonce::(1).pre_dispatch(&1, CALL, &info, len).is_ok()); - // future - assert!(CheckNonce::(5).validate(&1, CALL, &info, len).is_ok()); - assert!(CheckNonce::(5).pre_dispatch(&1, CALL, &info, len).is_err()); - }) - } - - #[test] - fn signed_ext_check_weight_works_normal_tx() { - new_test_ext().execute_with(|| { - let normal_limit = normal_weight_limit(); - let small = DispatchInfo { weight: 100, ..Default::default() }; - let medium = DispatchInfo { - weight: normal_limit - 1, - ..Default::default() - }; - let big = DispatchInfo { - weight: normal_limit + 1, - ..Default::default() - }; - let len = 0_usize; - - let reset_check_weight = |i, f, s| { - AllExtrinsicsWeight::put(s); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); - if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } - }; - - reset_check_weight(&small, false, 0); - reset_check_weight(&medium, false, 0); - reset_check_weight(&big, true, 1); - }) - } - - #[test] - fn signed_ext_check_weight_refund_works() { - new_test_ext().execute_with(|| { - let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { actual_weight: Some(128), }; - let len = 0_usize; - - AllExtrinsicsWeight::put(256); - - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!(AllExtrinsicsWeight::get().unwrap(), info.weight + 256); - - assert!( - CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .is_ok() - ); - assert_eq!(AllExtrinsicsWeight::get().unwrap(), post_info.actual_weight.unwrap() + 256); - }) - } - - #[test] - fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { - new_test_ext().execute_with(|| { - let info = DispatchInfo { weight: 512, ..Default::default() }; - let post_info = PostDispatchInfo { actual_weight: Some(700), }; - let len = 0_usize; - - AllExtrinsicsWeight::put(128); - - let pre = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &info, len).unwrap(); - assert_eq!(AllExtrinsicsWeight::get().unwrap(), info.weight + 128); - - assert!( - CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())) - .is_ok() - ); - assert_eq!(AllExtrinsicsWeight::get().unwrap(), info.weight + 128); - }) - } - - #[test] - fn signed_ext_check_weight_fee_works() { - new_test_ext().execute_with(|| { - let free = DispatchInfo { weight: 0, ..Default::default() }; - let len = 0_usize; - - assert_eq!(System::all_extrinsics_weight(), 0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); - assert!(r.is_ok()); - assert_eq!(System::all_extrinsics_weight(), 0); - }) - } - - #[test] - fn signed_ext_check_weight_max_works() { - new_test_ext().execute_with(|| { - let max = DispatchInfo { weight: Weight::max_value(), ..Default::default() }; - let len = 0_usize; - let normal_limit = normal_weight_limit(); - - assert_eq!(System::all_extrinsics_weight(), 0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &max, len); - assert!(r.is_ok()); - assert_eq!(System::all_extrinsics_weight(), normal_limit); - }) - } - - #[test] - fn signed_ext_check_weight_works_operational_tx() { - new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, ..Default::default() }; - let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: true }; - let len = 0_usize; - let normal_limit = normal_weight_limit(); - - // given almost full block - AllExtrinsicsWeight::put(normal_limit); - // will not fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); - // will fit. - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); - - // likewise for length limit. - let len = 100_usize; - AllExtrinsicsLen::put(normal_length_limit()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &normal, len).is_err()); - assert!(CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &op, len).is_ok()); - }) - } - - #[test] - fn signed_ext_check_weight_priority_works() { - new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: true }; - let op = DispatchInfo { weight: 100, class: DispatchClass::Operational, pays_fee: true }; - let len = 0_usize; - - let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, &normal, len) - .unwrap() - .priority; - assert_eq!(priority, 100); - - let priority = CheckWeight::(PhantomData) - .validate(&1, CALL, &op, len) - .unwrap() - .priority; - assert_eq!(priority, u64::max_value()); - }) - } - - #[test] - fn signed_ext_check_weight_block_size_works() { - new_test_ext().execute_with(|| { - let normal = DispatchInfo::default(); - let normal_limit = normal_weight_limit() as usize; - let reset_check_weight = |tx, s, f| { - AllExtrinsicsLen::put(0); - let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); - if f { assert!(r.is_err()) } else { assert!(r.is_ok()) } - }; - - reset_check_weight(&normal, normal_limit - 1, false); - reset_check_weight(&normal, normal_limit, false); - reset_check_weight(&normal, normal_limit + 1, true); - - // Operational ones don't have this limit. - let op = DispatchInfo { weight: 0, class: DispatchClass::Operational, pays_fee: true }; - reset_check_weight(&op, normal_limit, false); - reset_check_weight(&op, normal_limit + 100, false); - reset_check_weight(&op, 1024, false); - reset_check_weight(&op, 1025, true); - }) - } - - #[test] - fn signed_ext_check_era_should_work() { - new_test_ext().execute_with(|| { - // future - assert_eq!( - CheckEra::::from(Era::mortal(4, 2)).additional_signed().err().unwrap(), - InvalidTransaction::AncientBirthBlock.into(), - ); - - // correct - System::set_block_number(13); - >::insert(12, H256::repeat_byte(1)); - assert!(CheckEra::::from(Era::mortal(4, 12)).additional_signed().is_ok()); - }) - } - - #[test] - fn signed_ext_check_era_should_change_longevity() { - new_test_ext().execute_with(|| { - let normal = DispatchInfo { weight: 100, class: DispatchClass::Normal, pays_fee: true }; - let len = 0_usize; - let ext = ( - CheckWeight::(PhantomData), - CheckEra::::from(Era::mortal(16, 256)), - ); - System::set_block_number(17); - >::insert(16, H256::repeat_byte(1)); - - assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); - }) - } - - - #[test] - fn set_code_checks_works() { - struct CallInWasm(Vec); - - impl sp_core::traits::CallInWasm for CallInWasm { - fn call_in_wasm( - &self, - _: &[u8], - _: Option>, - _: &str, - _: &[u8], - _: &mut dyn sp_externalities::Externalities, - ) -> Result, String> { - Ok(self.0.clone()) - } - } - - let test_data = vec![ - ("test", 1, 2, Err(Error::::SpecVersionNeedsToIncrease)), - ("test", 1, 1, Err(Error::::SpecVersionNeedsToIncrease)), - ("test2", 1, 1, Err(Error::::InvalidSpecName)), - ("test", 2, 1, Ok(())), - ("test", 0, 1, Err(Error::::SpecVersionNeedsToIncrease)), - ("test", 1, 0, Err(Error::::SpecVersionNeedsToIncrease)), - ]; - - for (spec_name, spec_version, impl_version, expected) in test_data.into_iter() { - let version = RuntimeVersion { - spec_name: spec_name.into(), - spec_version, - impl_version, - ..Default::default() - }; - let call_in_wasm = CallInWasm(version.encode()); - - let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(call_in_wasm)); - ext.execute_with(|| { - let res = System::set_code( - RawOrigin::Root.into(), - vec![1, 2, 3, 4], - ); - - assert_eq!(expected.map_err(DispatchError::from), res); - }); - } - } - - #[test] - fn set_code_with_real_wasm_blob() { - let executor = substrate_test_runtime_client::new_native_executor(); - let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); - ext.execute_with(|| { - System::set_block_number(1); - System::set_code( - RawOrigin::Root.into(), - substrate_test_runtime_client::runtime::WASM_BINARY.to_vec(), - ).unwrap(); - - assert_eq!( - System::events(), - vec![EventRecord { phase: Phase::Initialization, event: 102u16, topics: vec![] }], - ); - }); - } - - #[test] - fn runtime_upgraded_with_set_storage() { - let executor = substrate_test_runtime_client::new_native_executor(); - let mut ext = new_test_ext(); - ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); - ext.execute_with(|| { - System::set_storage( - RawOrigin::Root.into(), - vec![( - well_known_keys::CODE.to_vec(), - substrate_test_runtime_client::runtime::WASM_BINARY.to_vec() - )], - ).unwrap(); - }); - } - - #[test] - fn events_not_emitted_during_genesis() { - new_test_ext().execute_with(|| { - // Block Number is zero at genesis - assert!(System::block_number().is_zero()); - System::on_created_account(Default::default()); - assert!(System::events().is_empty()); - // Events will be emitted starting on block 1 - System::set_block_number(1); - System::on_created_account(Default::default()); - assert!(System::events().len() == 1); - }); - } + use super::*; + use frame_support::{impl_outer_origin, parameter_types}; + use sp_core::H256; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + DispatchError, + }; + use sp_std::cell::RefCell; + + impl_outer_origin! { + pub enum Origin for Test where system = super {} + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + + parameter_types! { + pub const BlockHashCount: u64 = 10; + pub const MaximumBlockWeight: Weight = 1024; + pub const AvailableBlockRatio: Perbill = Perbill::from_percent(75); + pub const MaximumBlockLength: u32 = 1024; + pub const Version: RuntimeVersion = RuntimeVersion { + spec_name: sp_version::create_runtime_str!("test"), + impl_name: sp_version::create_runtime_str!("system-test"), + authoring_version: 1, + spec_version: 1, + impl_version: 1, + apis: sp_version::create_apis_vec!([]), + transaction_version: 1, + }; + } + + thread_local! { + pub static KILLED: RefCell> = RefCell::new(vec![]); + } + + pub struct RecordKilled; + impl OnKilledAccount for RecordKilled { + fn on_killed_account(who: &u64) { + KILLED.with(|r| r.borrow_mut().push(*who)) + } + } + + #[derive(Debug)] + pub struct Call {} + impl Dispatchable for Call { + type Origin = (); + type Trait = (); + type Info = DispatchInfo; + type PostInfo = PostDispatchInfo; + fn dispatch( + self, + _origin: Self::Origin, + ) -> sp_runtime::DispatchResultWithInfo { + panic!("Do not use dummy implementation for dispatch."); + } + } + + impl Trait for Test { + type Origin = Origin; + type Call = Call; + type Index = u64; + type BlockNumber = u64; + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = u16; + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = Version; + type ModuleToIndex = (); + type AccountData = u32; + type OnNewAccount = (); + type OnKilledAccount = RecordKilled; + } + + impl From> for u16 { + fn from(e: Event) -> u16 { + match e { + Event::::ExtrinsicSuccess(..) => 100, + Event::::ExtrinsicFailed(..) => 101, + Event::::CodeUpdated => 102, + _ => 103, + } + } + } + + type System = Module; + + const CALL: &::Call = &Call {}; + + fn new_test_ext() -> sp_io::TestExternalities { + GenesisConfig::default() + .build_storage::() + .unwrap() + .into() + } + + fn normal_weight_limit() -> Weight { + ::AvailableBlockRatio::get() * ::MaximumBlockWeight::get() + } + + fn normal_length_limit() -> u32 { + ::AvailableBlockRatio::get() * ::MaximumBlockLength::get() + } + + #[test] + fn origin_works() { + let o = Origin::from(RawOrigin::::Signed(1u64)); + let x: Result, Origin> = o.into(); + assert_eq!(x, Ok(RawOrigin::::Signed(1u64))); + } + + #[test] + fn stored_map_works() { + new_test_ext().execute_with(|| { + System::insert(&0, 42); + assert!(System::allow_death(&0)); + + System::inc_ref(&0); + assert!(!System::allow_death(&0)); + + System::insert(&0, 69); + assert!(!System::allow_death(&0)); + + System::dec_ref(&0); + assert!(System::allow_death(&0)); + + assert!(KILLED.with(|r| r.borrow().is_empty())); + System::kill_account(&0); + assert_eq!(KILLED.with(|r| r.borrow().clone()), vec![0u64]); + }); + } + + #[test] + fn deposit_event_should_work() { + new_test_ext().execute_with(|| { + System::initialize( + &1, + &[0u8; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + System::note_finished_extrinsics(); + System::deposit_event(1u16); + System::finalize(); + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Finalization, + event: 1u16, + topics: vec![], + }] + ); + + System::initialize( + &2, + &[0u8; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + System::deposit_event(32u16); + System::note_finished_initialize(); + System::deposit_event(42u16); + System::note_applied_extrinsic(&Ok(()), 0, Default::default()); + System::note_applied_extrinsic(&Err(DispatchError::BadOrigin), 0, Default::default()); + System::note_finished_extrinsics(); + System::deposit_event(3u16); + System::finalize(); + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Initialization, + event: 32u16, + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: 42u16, + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(0), + event: 100u16, + topics: vec![] + }, + EventRecord { + phase: Phase::ApplyExtrinsic(1), + event: 101u16, + topics: vec![] + }, + EventRecord { + phase: Phase::Finalization, + event: 3u16, + topics: vec![] + } + ] + ); + }); + } + + #[test] + fn deposit_event_topics() { + new_test_ext().execute_with(|| { + const BLOCK_NUMBER: u64 = 1; + + System::initialize( + &BLOCK_NUMBER, + &[0u8; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + System::note_finished_extrinsics(); + + let topics = vec![ + H256::repeat_byte(1), + H256::repeat_byte(2), + H256::repeat_byte(3), + ]; + + // We deposit a few events with different sets of topics. + System::deposit_event_indexed(&topics[0..3], 1u16); + System::deposit_event_indexed(&topics[0..1], 2u16); + System::deposit_event_indexed(&topics[1..2], 3u16); + + System::finalize(); + + // Check that topics are reflected in the event record. + assert_eq!( + System::events(), + vec![ + EventRecord { + phase: Phase::Finalization, + event: 1u16, + topics: topics[0..3].to_vec(), + }, + EventRecord { + phase: Phase::Finalization, + event: 2u16, + topics: topics[0..1].to_vec(), + }, + EventRecord { + phase: Phase::Finalization, + event: 3u16, + topics: topics[1..2].to_vec(), + } + ] + ); + + // Check that the topic-events mapping reflects the deposited topics. + // Note that these are indexes of the events. + assert_eq!( + System::event_topics(&topics[0]), + vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 1)], + ); + assert_eq!( + System::event_topics(&topics[1]), + vec![(BLOCK_NUMBER, 0), (BLOCK_NUMBER, 2)], + ); + assert_eq!(System::event_topics(&topics[2]), vec![(BLOCK_NUMBER, 0)],); + }); + } + + #[test] + fn prunes_block_hash_mappings() { + new_test_ext().execute_with(|| { + // simulate import of 15 blocks + for n in 1..=15 { + System::initialize( + &n, + &[n as u8 - 1; 32].into(), + &[0u8; 32].into(), + &Default::default(), + InitKind::Full, + ); + + System::finalize(); + } + + // first 5 block hashes are pruned + for n in 0..5 { + assert_eq!(System::block_hash(n), H256::zero(),); + } + + // the remaining 10 are kept + for n in 5..15 { + assert_eq!(System::block_hash(n), [n as u8; 32].into(),); + } + }) + } + + #[test] + fn signed_ext_check_nonce_works() { + new_test_ext().execute_with(|| { + Account::::insert( + 1, + AccountInfo { + nonce: 1, + refcount: 0, + data: 0, + }, + ); + let info = DispatchInfo::default(); + let len = 0_usize; + // stale + assert!(CheckNonce::(0) + .validate(&1, CALL, &info, len) + .is_err()); + assert!(CheckNonce::(0) + .pre_dispatch(&1, CALL, &info, len) + .is_err()); + // correct + assert!(CheckNonce::(1).validate(&1, CALL, &info, len).is_ok()); + assert!(CheckNonce::(1) + .pre_dispatch(&1, CALL, &info, len) + .is_ok()); + // future + assert!(CheckNonce::(5).validate(&1, CALL, &info, len).is_ok()); + assert!(CheckNonce::(5) + .pre_dispatch(&1, CALL, &info, len) + .is_err()); + }) + } + + #[test] + fn signed_ext_check_weight_works_normal_tx() { + new_test_ext().execute_with(|| { + let normal_limit = normal_weight_limit(); + let small = DispatchInfo { + weight: 100, + ..Default::default() + }; + let medium = DispatchInfo { + weight: normal_limit - 1, + ..Default::default() + }; + let big = DispatchInfo { + weight: normal_limit + 1, + ..Default::default() + }; + let len = 0_usize; + + let reset_check_weight = |i, f, s| { + AllExtrinsicsWeight::put(s); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, i, len); + if f { + assert!(r.is_err()) + } else { + assert!(r.is_ok()) + } + }; + + reset_check_weight(&small, false, 0); + reset_check_weight(&medium, false, 0); + reset_check_weight(&big, true, 1); + }) + } + + #[test] + fn signed_ext_check_weight_refund_works() { + new_test_ext().execute_with(|| { + let info = DispatchInfo { + weight: 512, + ..Default::default() + }; + let post_info = PostDispatchInfo { + actual_weight: Some(128), + }; + let len = 0_usize; + + AllExtrinsicsWeight::put(256); + + let pre = CheckWeight::(PhantomData) + .pre_dispatch(&1, CALL, &info, len) + .unwrap(); + assert_eq!(AllExtrinsicsWeight::get().unwrap(), info.weight + 256); + + assert!( + CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())).is_ok() + ); + assert_eq!( + AllExtrinsicsWeight::get().unwrap(), + post_info.actual_weight.unwrap() + 256 + ); + }) + } + + #[test] + fn signed_ext_check_weight_actual_weight_higher_than_max_is_capped() { + new_test_ext().execute_with(|| { + let info = DispatchInfo { + weight: 512, + ..Default::default() + }; + let post_info = PostDispatchInfo { + actual_weight: Some(700), + }; + let len = 0_usize; + + AllExtrinsicsWeight::put(128); + + let pre = CheckWeight::(PhantomData) + .pre_dispatch(&1, CALL, &info, len) + .unwrap(); + assert_eq!(AllExtrinsicsWeight::get().unwrap(), info.weight + 128); + + assert!( + CheckWeight::::post_dispatch(pre, &info, &post_info, len, &Ok(())).is_ok() + ); + assert_eq!(AllExtrinsicsWeight::get().unwrap(), info.weight + 128); + }) + } + + #[test] + fn signed_ext_check_weight_fee_works() { + new_test_ext().execute_with(|| { + let free = DispatchInfo { + weight: 0, + ..Default::default() + }; + let len = 0_usize; + + assert_eq!(System::all_extrinsics_weight(), 0); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &free, len); + assert!(r.is_ok()); + assert_eq!(System::all_extrinsics_weight(), 0); + }) + } + + #[test] + fn signed_ext_check_weight_max_works() { + new_test_ext().execute_with(|| { + let max = DispatchInfo { + weight: Weight::max_value(), + ..Default::default() + }; + let len = 0_usize; + let normal_limit = normal_weight_limit(); + + assert_eq!(System::all_extrinsics_weight(), 0); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, &max, len); + assert!(r.is_ok()); + assert_eq!(System::all_extrinsics_weight(), normal_limit); + }) + } + + #[test] + fn signed_ext_check_weight_works_operational_tx() { + new_test_ext().execute_with(|| { + let normal = DispatchInfo { + weight: 100, + ..Default::default() + }; + let op = DispatchInfo { + weight: 100, + class: DispatchClass::Operational, + pays_fee: true, + }; + let len = 0_usize; + let normal_limit = normal_weight_limit(); + + // given almost full block + AllExtrinsicsWeight::put(normal_limit); + // will not fit. + assert!(CheckWeight::(PhantomData) + .pre_dispatch(&1, CALL, &normal, len) + .is_err()); + // will fit. + assert!(CheckWeight::(PhantomData) + .pre_dispatch(&1, CALL, &op, len) + .is_ok()); + + // likewise for length limit. + let len = 100_usize; + AllExtrinsicsLen::put(normal_length_limit()); + assert!(CheckWeight::(PhantomData) + .pre_dispatch(&1, CALL, &normal, len) + .is_err()); + assert!(CheckWeight::(PhantomData) + .pre_dispatch(&1, CALL, &op, len) + .is_ok()); + }) + } + + #[test] + fn signed_ext_check_weight_priority_works() { + new_test_ext().execute_with(|| { + let normal = DispatchInfo { + weight: 100, + class: DispatchClass::Normal, + pays_fee: true, + }; + let op = DispatchInfo { + weight: 100, + class: DispatchClass::Operational, + pays_fee: true, + }; + let len = 0_usize; + + let priority = CheckWeight::(PhantomData) + .validate(&1, CALL, &normal, len) + .unwrap() + .priority; + assert_eq!(priority, 100); + + let priority = CheckWeight::(PhantomData) + .validate(&1, CALL, &op, len) + .unwrap() + .priority; + assert_eq!(priority, u64::max_value()); + }) + } + + #[test] + fn signed_ext_check_weight_block_size_works() { + new_test_ext().execute_with(|| { + let normal = DispatchInfo::default(); + let normal_limit = normal_weight_limit() as usize; + let reset_check_weight = |tx, s, f| { + AllExtrinsicsLen::put(0); + let r = CheckWeight::(PhantomData).pre_dispatch(&1, CALL, tx, s); + if f { + assert!(r.is_err()) + } else { + assert!(r.is_ok()) + } + }; + + reset_check_weight(&normal, normal_limit - 1, false); + reset_check_weight(&normal, normal_limit, false); + reset_check_weight(&normal, normal_limit + 1, true); + + // Operational ones don't have this limit. + let op = DispatchInfo { + weight: 0, + class: DispatchClass::Operational, + pays_fee: true, + }; + reset_check_weight(&op, normal_limit, false); + reset_check_weight(&op, normal_limit + 100, false); + reset_check_weight(&op, 1024, false); + reset_check_weight(&op, 1025, true); + }) + } + + #[test] + fn signed_ext_check_era_should_work() { + new_test_ext().execute_with(|| { + // future + assert_eq!( + CheckEra::::from(Era::mortal(4, 2)) + .additional_signed() + .err() + .unwrap(), + InvalidTransaction::AncientBirthBlock.into(), + ); + + // correct + System::set_block_number(13); + >::insert(12, H256::repeat_byte(1)); + assert!(CheckEra::::from(Era::mortal(4, 12)) + .additional_signed() + .is_ok()); + }) + } + + #[test] + fn signed_ext_check_era_should_change_longevity() { + new_test_ext().execute_with(|| { + let normal = DispatchInfo { + weight: 100, + class: DispatchClass::Normal, + pays_fee: true, + }; + let len = 0_usize; + let ext = ( + CheckWeight::(PhantomData), + CheckEra::::from(Era::mortal(16, 256)), + ); + System::set_block_number(17); + >::insert(16, H256::repeat_byte(1)); + + assert_eq!(ext.validate(&1, CALL, &normal, len).unwrap().longevity, 15); + }) + } + + #[test] + fn set_code_checks_works() { + struct CallInWasm(Vec); + + impl sp_core::traits::CallInWasm for CallInWasm { + fn call_in_wasm( + &self, + _: &[u8], + _: Option>, + _: &str, + _: &[u8], + _: &mut dyn sp_externalities::Externalities, + ) -> Result, String> { + Ok(self.0.clone()) + } + } + + let test_data = vec![ + ("test", 1, 2, Err(Error::::SpecVersionNeedsToIncrease)), + ("test", 1, 1, Err(Error::::SpecVersionNeedsToIncrease)), + ("test2", 1, 1, Err(Error::::InvalidSpecName)), + ("test", 2, 1, Ok(())), + ("test", 0, 1, Err(Error::::SpecVersionNeedsToIncrease)), + ("test", 1, 0, Err(Error::::SpecVersionNeedsToIncrease)), + ]; + + for (spec_name, spec_version, impl_version, expected) in test_data.into_iter() { + let version = RuntimeVersion { + spec_name: spec_name.into(), + spec_version, + impl_version, + ..Default::default() + }; + let call_in_wasm = CallInWasm(version.encode()); + + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(call_in_wasm)); + ext.execute_with(|| { + let res = System::set_code(RawOrigin::Root.into(), vec![1, 2, 3, 4]); + + assert_eq!(expected.map_err(DispatchError::from), res); + }); + } + } + + #[test] + fn set_code_with_real_wasm_blob() { + let executor = substrate_test_runtime_client::new_native_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); + ext.execute_with(|| { + System::set_block_number(1); + System::set_code( + RawOrigin::Root.into(), + substrate_test_runtime_client::runtime::WASM_BINARY.to_vec(), + ) + .unwrap(); + + assert_eq!( + System::events(), + vec![EventRecord { + phase: Phase::Initialization, + event: 102u16, + topics: vec![] + }], + ); + }); + } + + #[test] + fn runtime_upgraded_with_set_storage() { + let executor = substrate_test_runtime_client::new_native_executor(); + let mut ext = new_test_ext(); + ext.register_extension(sp_core::traits::CallInWasmExt::new(executor)); + ext.execute_with(|| { + System::set_storage( + RawOrigin::Root.into(), + vec![( + well_known_keys::CODE.to_vec(), + substrate_test_runtime_client::runtime::WASM_BINARY.to_vec(), + )], + ) + .unwrap(); + }); + } + + #[test] + fn events_not_emitted_during_genesis() { + new_test_ext().execute_with(|| { + // Block Number is zero at genesis + assert!(System::block_number().is_zero()); + System::on_created_account(Default::default()); + assert!(System::events().is_empty()); + // Events will be emitted starting on block 1 + System::set_block_number(1); + System::on_created_account(Default::default()); + assert!(System::events().len() == 1); + }); + } } diff --git a/frame/system/src/offchain.rs b/frame/system/src/offchain.rs index a3fe3e00ca..6198416532 100644 --- a/frame/system/src/offchain.rs +++ b/frame/system/src/offchain.rs @@ -17,34 +17,34 @@ //! Module helpers for off-chain calls. use codec::Encode; +use frame_support::{debug, storage::StorageMap}; +use sp_runtime::app_crypto::{AppPublic, AppSignature, RuntimeAppPublic}; +use sp_runtime::traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}; use sp_std::convert::TryInto; use sp_std::prelude::Vec; -use sp_runtime::app_crypto::{RuntimeAppPublic, AppPublic, AppSignature}; -use sp_runtime::traits::{Extrinsic as ExtrinsicT, IdentifyAccount, One}; -use frame_support::{debug, storage::StorageMap}; /// Creates runtime-specific signed transaction. /// /// This trait should be implemented by your `Runtime` to be able /// to submit `SignedTransaction`s` to the pool from off-chain code. pub trait CreateTransaction { - /// A `Public` key representing a particular `AccountId`. - type Public: IdentifyAccount + Clone; - /// A `Signature` generated by the `Signer`. - type Signature; - - /// Attempt to create signed extrinsic data that encodes call from given account. - /// - /// Runtime implementation is free to construct the payload to sign and the signature - /// in any way it wants. - /// Returns `None` if signed extrinsic could not be created (either because signing failed - /// or because of any other runtime-specific reason). - fn create_transaction>( - call: Extrinsic::Call, - public: Self::Public, - account: T::AccountId, - nonce: T::Index, - ) -> Option<(Extrinsic::Call, Extrinsic::SignaturePayload)>; + /// A `Public` key representing a particular `AccountId`. + type Public: IdentifyAccount + Clone; + /// A `Signature` generated by the `Signer`. + type Signature; + + /// Attempt to create signed extrinsic data that encodes call from given account. + /// + /// Runtime implementation is free to construct the payload to sign and the signature + /// in any way it wants. + /// Returns `None` if signed extrinsic could not be created (either because signing failed + /// or because of any other runtime-specific reason). + fn create_transaction>( + call: Extrinsic::Call, + public: Self::Public, + account: T::AccountId, + nonce: T::Index, + ) -> Option<(Extrinsic::Call, Extrinsic::SignaturePayload)>; } /// A trait responsible for signing a payload using given account. @@ -59,11 +59,11 @@ pub trait CreateTransaction { /// To easily create `SignedTransaction`s have a look at the /// [`TransactionSubmitter`] type. pub trait Signer { - /// Sign any encodable payload with given account and produce a signature. - /// - /// Returns `Some` if signing succeeded and `None` in case the `account` couldn't - /// be used (for instance we couldn't convert it to required application specific crypto). - fn sign(public: Public, payload: &Payload) -> Option; + /// Sign any encodable payload with given account and produce a signature. + /// + /// Returns `Some` if signing succeeded and `None` in case the `account` couldn't + /// be used (for instance we couldn't convert it to required application specific crypto). + fn sign(public: Public, payload: &Payload) -> Option; } /// A `Signer` implementation for any `AppPublic` type. @@ -71,36 +71,32 @@ pub trait Signer { /// This implementation additionally supports conversion to/from multi-signature/multi-signer /// wrappers. /// If the wrapped crypto doesn't match `AppPublic`s crypto `None` is returned. -impl Signer for TAnyAppPublic where - TAnyAppPublic: RuntimeAppPublic - + AppPublic - + From<::Generic>, - ::Signature: AppSignature, - Signature: From< - <::Signature as AppSignature>::Generic - >, - Public: TryInto<::Generic> +impl Signer for TAnyAppPublic +where + TAnyAppPublic: RuntimeAppPublic + AppPublic + From<::Generic>, + ::Signature: AppSignature, + Signature: From<<::Signature as AppSignature>::Generic>, + Public: TryInto<::Generic>, { - fn sign(public: Public, raw_payload: &Payload) -> Option { - raw_payload.using_encoded(|payload| { - let public = public.try_into().ok()?; - TAnyAppPublic::from(public).sign(&payload) - .map( - <::Signature as AppSignature> - ::Generic::from - ) - .map(Signature::from) - }) - } + fn sign(public: Public, raw_payload: &Payload) -> Option { + raw_payload.using_encoded(|payload| { + let public = public.try_into().ok()?; + TAnyAppPublic::from(public) + .sign(&payload) + .map( + <::Signature as AppSignature>::Generic::from, + ) + .map(Signature::from) + }) + } } /// Retrieves a public key type for given `SignAndSubmitTransaction`. pub type PublicOf = -< - >::CreateTransaction - as - CreateTransaction>::Extrinsic> ->::Public; + <>::CreateTransaction as CreateTransaction< + T, + >::Extrinsic, + >>::Public; /// A trait to sign and submit transactions in off-chain calls. /// @@ -109,44 +105,48 @@ pub type PublicOf = /// [`TransactionSubmitter`] type, which /// you should use. pub trait SignAndSubmitTransaction { - /// Unchecked extrinsic type. - type Extrinsic: ExtrinsicT + Encode; - - /// A runtime-specific type to produce signed data for the extrinsic. - type CreateTransaction: CreateTransaction; - - /// A type used to sign transactions created using `CreateTransaction`. - type Signer: Signer< - PublicOf, - >::Signature, - >; - - /// Sign given call and submit it to the transaction pool. - /// - /// Returns `Ok` if the transaction was submitted correctly - /// and `Err` if the key for given `id` was not found or the - /// transaction was rejected from the pool. - fn sign_and_submit(call: impl Into, public: PublicOf) -> Result<(), ()> { - let call = call.into(); - let id = public.clone().into_account(); - let mut account = super::Account::::get(&id); - debug::native::debug!( - target: "offchain", - "Creating signed transaction from account: {:?} (nonce: {:?})", - id, - account.nonce, - ); - let (call, signature_data) = Self::CreateTransaction - ::create_transaction::(call, public, id.clone(), account.nonce) - .ok_or(())?; - // increment the nonce. This is fine, since the code should always - // be running in off-chain context, so we NEVER persists data. - account.nonce += One::one(); - super::Account::::insert(&id, account); - - let xt = Self::Extrinsic::new(call, Some(signature_data)).ok_or(())?; - sp_io::offchain::submit_transaction(xt.encode()) - } + /// Unchecked extrinsic type. + type Extrinsic: ExtrinsicT + Encode; + + /// A runtime-specific type to produce signed data for the extrinsic. + type CreateTransaction: CreateTransaction; + + /// A type used to sign transactions created using `CreateTransaction`. + type Signer: Signer< + PublicOf, + >::Signature, + >; + + /// Sign given call and submit it to the transaction pool. + /// + /// Returns `Ok` if the transaction was submitted correctly + /// and `Err` if the key for given `id` was not found or the + /// transaction was rejected from the pool. + fn sign_and_submit(call: impl Into, public: PublicOf) -> Result<(), ()> { + let call = call.into(); + let id = public.clone().into_account(); + let mut account = super::Account::::get(&id); + debug::native::debug!( + target: "offchain", + "Creating signed transaction from account: {:?} (nonce: {:?})", + id, + account.nonce, + ); + let (call, signature_data) = Self::CreateTransaction::create_transaction::( + call, + public, + id.clone(), + account.nonce, + ) + .ok_or(())?; + // increment the nonce. This is fine, since the code should always + // be running in off-chain context, so we NEVER persists data. + account.nonce += One::one(); + super::Account::::insert(&id, account); + + let xt = Self::Extrinsic::new(call, Some(signature_data)).ok_or(())?; + sp_io::offchain::submit_transaction(xt.encode()) + } } /// A trait to submit unsigned transactions in off-chain calls. @@ -156,18 +156,18 @@ pub trait SignAndSubmitTransaction { /// [`TransactionSubmitter`] type, which /// you should use. pub trait SubmitUnsignedTransaction { - /// Unchecked extrinsic type. - type Extrinsic: ExtrinsicT + Encode; - - /// Submit given call to the transaction pool as unsigned transaction. - /// - /// Returns `Ok` if the transaction was submitted correctly - /// and `Err` if transaction was rejected from the pool. - fn submit_unsigned(call: impl Into) -> Result<(), ()> { - let xt = Self::Extrinsic::new(call.into(), None).ok_or(())?; - let encoded_xt = xt.encode(); - sp_io::offchain::submit_transaction(encoded_xt) - } + /// Unchecked extrinsic type. + type Extrinsic: ExtrinsicT + Encode; + + /// Submit given call to the transaction pool as unsigned transaction. + /// + /// Returns `Ok` if the transaction was submitted correctly + /// and `Err` if transaction was rejected from the pool. + fn submit_unsigned(call: impl Into) -> Result<(), ()> { + let xt = Self::Extrinsic::new(call.into(), None).ok_or(())?; + let encoded_xt = xt.encode(); + sp_io::offchain::submit_transaction(encoded_xt) + } } /// A utility trait to easily create signed transactions @@ -178,86 +178,81 @@ pub trait SubmitUnsignedTransaction { /// [`TransactionSubmitter`] type, which /// you should use. pub trait SubmitSignedTransaction { - /// A `SignAndSubmitTransaction` implementation. - type SignAndSubmit: SignAndSubmitTransaction; - - /// Find local keys that match given list of accounts. - /// - /// Technically it finds an intersection between given list of `AccountId`s - /// and accounts that are represented by public keys in local keystore. - /// If `None` is passed it returns all accounts in the keystore. - /// - /// Returns both public keys and `AccountId`s of accounts that are available. - /// Such accounts can later be used to sign a payload or send signed transactions. - fn find_local_keys(accounts: Option>) -> Vec<( - T::AccountId, - PublicOf, - )>; - - /// Find all available local keys. - /// - /// This is equivalent of calling `find_local_keys(None)`. - fn find_all_local_keys() -> Vec<(T::AccountId, PublicOf)> { - Self::find_local_keys(None as Option>) - } - - /// Check if there are keys for any of given accounts that could be used to send a transaction. - /// - /// This check can be used as an early-exit condition to avoid doing too - /// much work, before we actually realise that there are no accounts that you - /// we could use for signing. - fn can_sign_with(accounts: Option>) -> bool { - !Self::find_local_keys(accounts).is_empty() - } - - /// Check if there are any keys that could be used for signing. - /// - /// This is equivalent of calling `can_sign_with(None)`. - fn can_sign() -> bool { - Self::can_sign_with(None as Option>) - } - - /// Create and submit signed transactions from supported accounts. - /// - /// This method should intersect given list of accounts with the ones - /// supported locally and submit signed transaction containing given `Call` - /// with every of them. - /// - /// Returns a vector of results and account ids that were supported. - #[must_use] - fn submit_signed_from( - call: impl Into + Clone, - accounts: impl IntoIterator, - ) -> Vec<(T::AccountId, Result<(), ()>)> { - let keys = Self::find_local_keys(Some(accounts)); - keys.into_iter().map(|(account, pub_key)| { - let call = call.clone().into(); - ( - account, - Self::SignAndSubmit::sign_and_submit(call, pub_key) - ) - }).collect() - } - - /// Create and submit signed transactions from all local accounts. - /// - /// This method submits a signed transaction from all local accounts - /// for given application crypto. - /// - /// Returns a vector of results and account ids that were supported. - #[must_use] - fn submit_signed( - call: impl Into + Clone, - ) -> Vec<(T::AccountId, Result<(), ()>)> { - let keys = Self::find_all_local_keys(); - keys.into_iter().map(|(account, pub_key)| { - let call = call.clone().into(); - ( - account, - Self::SignAndSubmit::sign_and_submit(call, pub_key) - ) - }).collect() - } + /// A `SignAndSubmitTransaction` implementation. + type SignAndSubmit: SignAndSubmitTransaction; + + /// Find local keys that match given list of accounts. + /// + /// Technically it finds an intersection between given list of `AccountId`s + /// and accounts that are represented by public keys in local keystore. + /// If `None` is passed it returns all accounts in the keystore. + /// + /// Returns both public keys and `AccountId`s of accounts that are available. + /// Such accounts can later be used to sign a payload or send signed transactions. + fn find_local_keys( + accounts: Option>, + ) -> Vec<(T::AccountId, PublicOf)>; + + /// Find all available local keys. + /// + /// This is equivalent of calling `find_local_keys(None)`. + fn find_all_local_keys() -> Vec<(T::AccountId, PublicOf)> { + Self::find_local_keys(None as Option>) + } + + /// Check if there are keys for any of given accounts that could be used to send a transaction. + /// + /// This check can be used as an early-exit condition to avoid doing too + /// much work, before we actually realise that there are no accounts that you + /// we could use for signing. + fn can_sign_with(accounts: Option>) -> bool { + !Self::find_local_keys(accounts).is_empty() + } + + /// Check if there are any keys that could be used for signing. + /// + /// This is equivalent of calling `can_sign_with(None)`. + fn can_sign() -> bool { + Self::can_sign_with(None as Option>) + } + + /// Create and submit signed transactions from supported accounts. + /// + /// This method should intersect given list of accounts with the ones + /// supported locally and submit signed transaction containing given `Call` + /// with every of them. + /// + /// Returns a vector of results and account ids that were supported. + #[must_use] + fn submit_signed_from( + call: impl Into + Clone, + accounts: impl IntoIterator, + ) -> Vec<(T::AccountId, Result<(), ()>)> { + let keys = Self::find_local_keys(Some(accounts)); + keys.into_iter() + .map(|(account, pub_key)| { + let call = call.clone().into(); + (account, Self::SignAndSubmit::sign_and_submit(call, pub_key)) + }) + .collect() + } + + /// Create and submit signed transactions from all local accounts. + /// + /// This method submits a signed transaction from all local accounts + /// for given application crypto. + /// + /// Returns a vector of results and account ids that were supported. + #[must_use] + fn submit_signed(call: impl Into + Clone) -> Vec<(T::AccountId, Result<(), ()>)> { + let keys = Self::find_all_local_keys(); + keys.into_iter() + .map(|(account, pub_key)| { + let call = call.clone().into(); + (account, Self::SignAndSubmit::sign_and_submit(call, pub_key)) + }) + .collect() + } } /// A default type used to submit transactions to the pool. @@ -277,91 +272,99 @@ pub trait SubmitSignedTransaction { /// If you only need the ability to submit unsigned transactions, /// you may substitute both `Signer` and `CreateTransaction` with any type. pub struct TransactionSubmitter { - _signer: sp_std::marker::PhantomData<(Signer, CreateTransaction, Extrinsic)>, + _signer: sp_std::marker::PhantomData<(Signer, CreateTransaction, Extrinsic)>, } impl Default for TransactionSubmitter { - fn default() -> Self { - Self { - _signer: Default::default(), - } - } + fn default() -> Self { + Self { + _signer: Default::default(), + } + } } /// A blanket implementation to simplify creation of transaction signer & submitter in the runtime. -impl SignAndSubmitTransaction for TransactionSubmitter where - T: crate::Trait, - C: CreateTransaction, - S: Signer<>::Public, >::Signature>, - E: ExtrinsicT + Encode, +impl SignAndSubmitTransaction for TransactionSubmitter +where + T: crate::Trait, + C: CreateTransaction, + S: Signer<>::Public, >::Signature>, + E: ExtrinsicT + Encode, { - type Extrinsic = E; - type CreateTransaction = C; - type Signer = S; + type Extrinsic = E; + type CreateTransaction = C; + type Signer = S; } /// A blanket implementation to use the same submitter for unsigned transactions as well. -impl SubmitUnsignedTransaction for TransactionSubmitter where - T: crate::Trait, - E: ExtrinsicT + Encode, +impl SubmitUnsignedTransaction for TransactionSubmitter +where + T: crate::Trait, + E: ExtrinsicT + Encode, { - type Extrinsic = E; + type Extrinsic = E; } /// A blanket implementation to support local keystore of application-crypto types. -impl SubmitSignedTransaction for TransactionSubmitter where - T: crate::Trait, - C: CreateTransaction, - E: ExtrinsicT + Encode, - S: Signer<>::Public, >::Signature>, - // Make sure we can unwrap the app crypto key. - S: RuntimeAppPublic + AppPublic + Into<::Generic>, - // Make sure we can convert from wrapped crypto to public key (e.g. `MultiSigner`) - S::Generic: Into>, - // For simplicity we require the same trait to implement `SignAndSubmitTransaction` too. - Self: SignAndSubmitTransaction, +impl SubmitSignedTransaction for TransactionSubmitter +where + T: crate::Trait, + C: CreateTransaction, + E: ExtrinsicT + Encode, + S: Signer<>::Public, >::Signature>, + // Make sure we can unwrap the app crypto key. + S: RuntimeAppPublic + AppPublic + Into<::Generic>, + // Make sure we can convert from wrapped crypto to public key (e.g. `MultiSigner`) + S::Generic: Into>, + // For simplicity we require the same trait to implement `SignAndSubmitTransaction` too. + Self: SignAndSubmitTransaction, { - type SignAndSubmit = Self; - - fn find_local_keys(accounts: Option>) -> Vec<( - T::AccountId, - PublicOf, - )> { - // Convert app-specific keys into generic ones. - let local_accounts_and_keys = S::all() - .into_iter() - .map(|app_key| { - // unwrap app-crypto - let generic_pub_key: ::Generic = app_key.into(); - // convert to expected public key type (might be MultiSigner) - let signer_pub_key: PublicOf = generic_pub_key.into(); - // lookup accountid for that pubkey - let account = signer_pub_key.clone().into_account(); - (account, signer_pub_key) - }).collect::>(); - - if let Some(accounts) = accounts { - let mut local_accounts_and_keys = local_accounts_and_keys; - // sort by accountId to allow bin-search. - local_accounts_and_keys.sort_by(|a, b| a.0.cmp(&b.0)); - - // get all the matching accounts - accounts.into_iter().filter_map(|acc| { - let idx = local_accounts_and_keys.binary_search_by(|a| a.0.cmp(&acc)).ok()?; - local_accounts_and_keys.get(idx).cloned() - }).collect() - } else { - // just return all account ids and keys - local_accounts_and_keys - } - } - - fn can_sign_with(accounts: Option>) -> bool { - // early exit if we care about any account. - if accounts.is_none() { - !S::all().is_empty() - } else { - !Self::find_local_keys(accounts).is_empty() - } - } + type SignAndSubmit = Self; + + fn find_local_keys( + accounts: Option>, + ) -> Vec<(T::AccountId, PublicOf)> { + // Convert app-specific keys into generic ones. + let local_accounts_and_keys = S::all() + .into_iter() + .map(|app_key| { + // unwrap app-crypto + let generic_pub_key: ::Generic = app_key.into(); + // convert to expected public key type (might be MultiSigner) + let signer_pub_key: PublicOf = generic_pub_key.into(); + // lookup accountid for that pubkey + let account = signer_pub_key.clone().into_account(); + (account, signer_pub_key) + }) + .collect::>(); + + if let Some(accounts) = accounts { + let mut local_accounts_and_keys = local_accounts_and_keys; + // sort by accountId to allow bin-search. + local_accounts_and_keys.sort_by(|a, b| a.0.cmp(&b.0)); + + // get all the matching accounts + accounts + .into_iter() + .filter_map(|acc| { + let idx = local_accounts_and_keys + .binary_search_by(|a| a.0.cmp(&acc)) + .ok()?; + local_accounts_and_keys.get(idx).cloned() + }) + .collect() + } else { + // just return all account ids and keys + local_accounts_and_keys + } + } + + fn can_sign_with(accounts: Option>) -> bool { + // early exit if we care about any account. + if accounts.is_none() { + !S::all().is_empty() + } else { + !Self::find_local_keys(accounts).is_empty() + } + } } diff --git a/frame/timestamp/src/benchmarking.rs b/frame/timestamp/src/benchmarking.rs index 01a3d502a8..4392623a74 100644 --- a/frame/timestamp/src/benchmarking.rs +++ b/frame/timestamp/src/benchmarking.rs @@ -19,32 +19,32 @@ #![cfg(feature = "runtime-benchmarks")] use super::*; -use sp_std::prelude::*; -use frame_system::RawOrigin; use frame_benchmarking::benchmarks; +use frame_system::RawOrigin; +use sp_std::prelude::*; const MAX_TIME: u32 = 100; benchmarks! { - _ { - let n in 1 .. MAX_TIME => (); - } + _ { + let n in 1 .. MAX_TIME => (); + } - set { - let n in ...; - }: _(RawOrigin::None, n.into()) + set { + let n in ...; + }: _(RawOrigin::None, n.into()) } #[cfg(test)] mod tests { - use super::*; - use crate::tests::{new_test_ext, Test}; - use frame_support::assert_ok; - - #[test] - fn test_benchmarks() { - new_test_ext().execute_with(|| { - assert_ok!(test_benchmark_set::()); - }); - } + use super::*; + use crate::tests::{new_test_ext, Test}; + use frame_support::assert_ok; + + #[test] + fn test_benchmarks() { + new_test_ext().execute_with(|| { + assert_ok!(test_benchmark_set::()); + }); + } } diff --git a/frame/timestamp/src/lib.rs b/frame/timestamp/src/lib.rs index 822848bf7d..62e1f12719 100644 --- a/frame/timestamp/src/lib.rs +++ b/frame/timestamp/src/lib.rs @@ -94,258 +94,267 @@ mod benchmarking; -use sp_std::{result, cmp}; -use sp_inherents::{ProvideInherent, InherentData, InherentIdentifier}; #[cfg(feature = "std")] use frame_support::debug; use frame_support::{ - Parameter, decl_storage, decl_module, - traits::{Time, UnixTime, Get}, - weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, -}; -use sp_runtime::{ - RuntimeString, - traits::{ - AtLeast32Bit, Zero, SaturatedConversion, Scale - } + decl_module, decl_storage, + traits::{Get, Time, UnixTime}, + weights::{SimpleDispatchInfo, MINIMUM_WEIGHT}, + Parameter, }; use frame_system::ensure_none; -use sp_timestamp::{ - InherentError, INHERENT_IDENTIFIER, InherentType, - OnTimestampSet, +use sp_inherents::{InherentData, InherentIdentifier, ProvideInherent}; +use sp_runtime::{ + traits::{AtLeast32Bit, SaturatedConversion, Scale, Zero}, + RuntimeString, }; +use sp_std::{cmp, result}; +use sp_timestamp::{InherentError, InherentType, OnTimestampSet, INHERENT_IDENTIFIER}; /// The module configuration trait pub trait Trait: frame_system::Trait { - /// Type used for expressing timestamp. - type Moment: Parameter + Default + AtLeast32Bit - + Scale + Copy; - - /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. - type OnTimestampSet: OnTimestampSet; - - /// The minimum period between blocks. Beware that this is different to the *expected* period - /// that the block production apparatus provides. Your chosen consensus system will generally - /// work with this to determine a sensible block time. e.g. For Aura, it will be double this - /// period on default settings. - type MinimumPeriod: Get; + /// Type used for expressing timestamp. + type Moment: Parameter + + Default + + AtLeast32Bit + + Scale + + Copy; + + /// Something which can be notified when the timestamp is set. Set this to `()` if not needed. + type OnTimestampSet: OnTimestampSet; + + /// The minimum period between blocks. Beware that this is different to the *expected* period + /// that the block production apparatus provides. Your chosen consensus system will generally + /// work with this to determine a sensible block time. e.g. For Aura, it will be double this + /// period on default settings. + type MinimumPeriod: Get; } decl_module! { - pub struct Module for enum Call where origin: T::Origin { - /// The minimum period between blocks. Beware that this is different to the *expected* period - /// that the block production apparatus provides. Your chosen consensus system will generally - /// work with this to determine a sensible block time. e.g. For Aura, it will be double this - /// period on default settings. - const MinimumPeriod: T::Moment = T::MinimumPeriod::get(); - - /// Set the current time. - /// - /// This call should be invoked exactly once per block. It will panic at the finalization - /// phase, if this call hasn't been invoked by that time. - /// - /// The timestamp should be greater than the previous one by the amount specified by - /// `MinimumPeriod`. - /// - /// The dispatch origin for this call must be `Inherent`. - #[weight = SimpleDispatchInfo::FixedMandatory(MINIMUM_WEIGHT)] - fn set(origin, #[compact] now: T::Moment) { - ensure_none(origin)?; - assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); - assert!( - Self::now().is_zero() || now >= Self::now() + T::MinimumPeriod::get(), - "Timestamp must increment by at least between sequential blocks" - ); - ::Now::put(now); - ::DidUpdate::put(true); - - >::on_timestamp_set(now); - } - - fn on_finalize() { - assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); - } - } + pub struct Module for enum Call where origin: T::Origin { + /// The minimum period between blocks. Beware that this is different to the *expected* period + /// that the block production apparatus provides. Your chosen consensus system will generally + /// work with this to determine a sensible block time. e.g. For Aura, it will be double this + /// period on default settings. + const MinimumPeriod: T::Moment = T::MinimumPeriod::get(); + + /// Set the current time. + /// + /// This call should be invoked exactly once per block. It will panic at the finalization + /// phase, if this call hasn't been invoked by that time. + /// + /// The timestamp should be greater than the previous one by the amount specified by + /// `MinimumPeriod`. + /// + /// The dispatch origin for this call must be `Inherent`. + #[weight = SimpleDispatchInfo::FixedMandatory(MINIMUM_WEIGHT)] + fn set(origin, #[compact] now: T::Moment) { + ensure_none(origin)?; + assert!(!::DidUpdate::exists(), "Timestamp must be updated only once in the block"); + assert!( + Self::now().is_zero() || now >= Self::now() + T::MinimumPeriod::get(), + "Timestamp must increment by at least between sequential blocks" + ); + ::Now::put(now); + ::DidUpdate::put(true); + + >::on_timestamp_set(now); + } + + fn on_finalize() { + assert!(::DidUpdate::take(), "Timestamp must be updated once in the block"); + } + } } decl_storage! { - trait Store for Module as Timestamp { - /// Current time for the current block. - pub Now get(fn now) build(|_| 0.into()): T::Moment; + trait Store for Module as Timestamp { + /// Current time for the current block. + pub Now get(fn now) build(|_| 0.into()): T::Moment; - /// Did the timestamp get updated in this block? - DidUpdate: bool; - } + /// Did the timestamp get updated in this block? + DidUpdate: bool; + } } impl Module { - /// Get the current time for the current block. - /// - /// NOTE: if this function is called prior to setting the timestamp, - /// it will return the timestamp of the previous block. - pub fn get() -> T::Moment { - Self::now() - } - - /// Set the timestamp to something in particular. Only used for tests. - #[cfg(feature = "std")] - pub fn set_timestamp(now: T::Moment) { - ::Now::put(now); - } + /// Get the current time for the current block. + /// + /// NOTE: if this function is called prior to setting the timestamp, + /// it will return the timestamp of the previous block. + pub fn get() -> T::Moment { + Self::now() + } + + /// Set the timestamp to something in particular. Only used for tests. + #[cfg(feature = "std")] + pub fn set_timestamp(now: T::Moment) { + ::Now::put(now); + } } fn extract_inherent_data(data: &InherentData) -> Result { - data.get_data::(&INHERENT_IDENTIFIER) - .map_err(|_| RuntimeString::from("Invalid timestamp inherent data encoding."))? - .ok_or_else(|| "Timestamp inherent data is not provided.".into()) + data.get_data::(&INHERENT_IDENTIFIER) + .map_err(|_| RuntimeString::from("Invalid timestamp inherent data encoding."))? + .ok_or_else(|| "Timestamp inherent data is not provided.".into()) } impl ProvideInherent for Module { - type Call = Call; - type Error = InherentError; - const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; - - fn create_inherent(data: &InherentData) -> Option { - let data: T::Moment = extract_inherent_data(data) - .expect("Gets and decodes timestamp inherent data") - .saturated_into(); - - let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); - Some(Call::set(next_time.into())) - } - - fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { - const MAX_TIMESTAMP_DRIFT_MILLIS: u64 = 30 * 1000; - - let t: u64 = match call { - Call::set(ref t) => t.clone().saturated_into::(), - _ => return Ok(()), - }; - - let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; - - let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); - if t > data + MAX_TIMESTAMP_DRIFT_MILLIS { - Err(InherentError::Other("Timestamp too far in future to accept".into())) - } else if t < minimum { - Err(InherentError::ValidAtTimestamp(minimum)) - } else { - Ok(()) - } - } + type Call = Call; + type Error = InherentError; + const INHERENT_IDENTIFIER: InherentIdentifier = INHERENT_IDENTIFIER; + + fn create_inherent(data: &InherentData) -> Option { + let data: T::Moment = extract_inherent_data(data) + .expect("Gets and decodes timestamp inherent data") + .saturated_into(); + + let next_time = cmp::max(data, Self::now() + T::MinimumPeriod::get()); + Some(Call::set(next_time.into())) + } + + fn check_inherent(call: &Self::Call, data: &InherentData) -> result::Result<(), Self::Error> { + const MAX_TIMESTAMP_DRIFT_MILLIS: u64 = 30 * 1000; + + let t: u64 = match call { + Call::set(ref t) => t.clone().saturated_into::(), + _ => return Ok(()), + }; + + let data = extract_inherent_data(data).map_err(|e| InherentError::Other(e))?; + + let minimum = (Self::now() + T::MinimumPeriod::get()).saturated_into::(); + if t > data + MAX_TIMESTAMP_DRIFT_MILLIS { + Err(InherentError::Other( + "Timestamp too far in future to accept".into(), + )) + } else if t < minimum { + Err(InherentError::ValidAtTimestamp(minimum)) + } else { + Ok(()) + } + } } impl Time for Module { - type Moment = T::Moment; + type Moment = T::Moment; - /// Before the first set of now with inherent the value returned is zero. - fn now() -> Self::Moment { - Self::now() - } + /// Before the first set of now with inherent the value returned is zero. + fn now() -> Self::Moment { + Self::now() + } } /// Before the timestamp inherent is applied, it returns the time of previous block. /// /// On genesis the time returned is not valid. impl UnixTime for Module { - fn now() -> core::time::Duration { - // now is duration since unix epoch in millisecond as documented in - // `sp_timestamp::InherentDataProvider`. - let now = Self::now(); - sp_std::if_std! { - if now == T::Moment::zero() { - debug::error!( - "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0" - ); - } - } - core::time::Duration::from_millis(now.saturated_into::()) - } + fn now() -> core::time::Duration { + // now is duration since unix epoch in millisecond as documented in + // `sp_timestamp::InherentDataProvider`. + let now = Self::now(); + sp_std::if_std! { + if now == T::Moment::zero() { + debug::error!( + "`pallet_timestamp::UnixTime::now` is called at genesis, invalid value returned: 0" + ); + } + } + core::time::Duration::from_millis(now.saturated_into::()) + } } #[cfg(test)] mod tests { - use super::*; - - use frame_support::{impl_outer_origin, assert_ok, parameter_types, weights::Weight}; - use sp_io::TestExternalities; - use sp_core::H256; - use sp_runtime::{Perbill, traits::{BlakeTwo256, IdentityLookup}, testing::Header}; - - pub fn new_test_ext() -> TestExternalities { - let t = frame_system::GenesisConfig::default().build_storage::().unwrap(); - TestExternalities::new(t) - } - - impl_outer_origin! { - pub enum Origin for Test where system = frame_system {} - } - - #[derive(Clone, Eq, PartialEq)] - pub struct Test; - parameter_types! { - pub const BlockHashCount: u64 = 250; - pub const MaximumBlockWeight: Weight = 1024; - pub const MaximumBlockLength: u32 = 2 * 1024; - pub const AvailableBlockRatio: Perbill = Perbill::one(); - } - impl frame_system::Trait for Test { - type Origin = Origin; - type Index = u64; - type BlockNumber = u64; - type Call = (); - type Hash = H256; - type Hashing = BlakeTwo256; - type AccountId = u64; - type Lookup = IdentityLookup; - type Header = Header; - type Event = (); - type BlockHashCount = BlockHashCount; - type MaximumBlockWeight = MaximumBlockWeight; - type DbWeight = (); - type AvailableBlockRatio = AvailableBlockRatio; - type MaximumBlockLength = MaximumBlockLength; - type Version = (); - type ModuleToIndex = (); - type AccountData = (); - type OnNewAccount = (); - type OnKilledAccount = (); - } - parameter_types! { - pub const MinimumPeriod: u64 = 5; - } - impl Trait for Test { - type Moment = u64; - type OnTimestampSet = (); - type MinimumPeriod = MinimumPeriod; - } - type Timestamp = Module; - - #[test] - fn timestamp_works() { - new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); - assert_ok!(Timestamp::dispatch(Call::set(69), Origin::NONE)); - assert_eq!(Timestamp::now(), 69); - }); - } - - #[test] - #[should_panic(expected = "Timestamp must be updated only once in the block")] - fn double_timestamp_should_fail() { - new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); - assert_ok!(Timestamp::dispatch(Call::set(69), Origin::NONE)); - let _ = Timestamp::dispatch(Call::set(70), Origin::NONE); - }); - } - - #[test] - #[should_panic(expected = "Timestamp must increment by at least between sequential blocks")] - fn block_period_minimum_enforced() { - new_test_ext().execute_with(|| { - Timestamp::set_timestamp(42); - let _ = Timestamp::dispatch(Call::set(46), Origin::NONE); - }); - } + use super::*; + + use frame_support::{assert_ok, impl_outer_origin, parameter_types, weights::Weight}; + use sp_core::H256; + use sp_io::TestExternalities; + use sp_runtime::{ + testing::Header, + traits::{BlakeTwo256, IdentityLookup}, + Perbill, + }; + + pub fn new_test_ext() -> TestExternalities { + let t = frame_system::GenesisConfig::default() + .build_storage::() + .unwrap(); + TestExternalities::new(t) + } + + impl_outer_origin! { + pub enum Origin for Test where system = frame_system {} + } + + #[derive(Clone, Eq, PartialEq)] + pub struct Test; + parameter_types! { + pub const BlockHashCount: u64 = 250; + pub const MaximumBlockWeight: Weight = 1024; + pub const MaximumBlockLength: u32 = 2 * 1024; + pub const AvailableBlockRatio: Perbill = Perbill::one(); + } + impl frame_system::Trait for Test { + type Origin = Origin; + type Index = u64; + type BlockNumber = u64; + type Call = (); + type Hash = H256; + type Hashing = BlakeTwo256; + type AccountId = u64; + type Lookup = IdentityLookup; + type Header = Header; + type Event = (); + type BlockHashCount = BlockHashCount; + type MaximumBlockWeight = MaximumBlockWeight; + type DbWeight = (); + type AvailableBlockRatio = AvailableBlockRatio; + type MaximumBlockLength = MaximumBlockLength; + type Version = (); + type ModuleToIndex = (); + type AccountData = (); + type OnNewAccount = (); + type OnKilledAccount = (); + } + parameter_types! { + pub const MinimumPeriod: u64 = 5; + } + impl Trait for Test { + type Moment = u64; + type OnTimestampSet = (); + type MinimumPeriod = MinimumPeriod; + } + type Timestamp = Module; + + #[test] + fn timestamp_works() { + new_test_ext().execute_with(|| { + Timestamp::set_timestamp(42); + assert_ok!(Timestamp::dispatch(Call::set(69), Origin::NONE)); + assert_eq!(Timestamp::now(), 69); + }); + } + + #[test] + #[should_panic(expected = "Timestamp must be updated only once in the block")] + fn double_timestamp_should_fail() { + new_test_ext().execute_with(|| { + Timestamp::set_timestamp(42); + assert_ok!(Timestamp::dispatch(Call::set(69), Origin::NONE)); + let _ = Timestamp::dispatch(Call::set(70), Origin::NONE); + }); + } + + #[test] + #[should_panic( + expected = "Timestamp must increment by at least between sequential blocks" + )] + fn block_period_minimum_enforced() { + new_test_ext().execute_with(|| { + Timestamp::set_timestamp(42); + let _ = Timestamp::dispatch(Call::set(46), Origin::NONE); + }); + } } diff --git a/frame/transaction-payment/rpc/runtime-api/src/lib.rs b/frame/transaction-payment/rpc/runtime-api/src/lib.rs index 77b6e3b454..c6d19589c5 100644 --- a/frame/transaction-payment/rpc/runtime-api/src/lib.rs +++ b/frame/transaction-payment/rpc/runtime-api/src/lib.rs @@ -18,86 +18,104 @@ #![cfg_attr(not(feature = "std"), no_std)] -use sp_std::prelude::*; -use frame_support::weights::{Weight, DispatchClass}; -use codec::{Encode, Codec, Decode}; +use codec::{Codec, Decode, Encode}; +use frame_support::weights::{DispatchClass, Weight}; #[cfg(feature = "std")] -use serde::{Serialize, Deserialize, Serializer, Deserializer}; +use serde::{Deserialize, Deserializer, Serialize, Serializer}; use sp_runtime::traits::{MaybeDisplay, MaybeFromStr}; +use sp_std::prelude::*; /// Some information related to a dispatchable that can be queried from the runtime. #[derive(Eq, PartialEq, Encode, Decode, Default)] #[cfg_attr(feature = "std", derive(Debug, Serialize, Deserialize))] #[cfg_attr(feature = "std", serde(rename_all = "camelCase"))] pub struct RuntimeDispatchInfo { - /// Weight of this dispatch. - pub weight: Weight, - /// Class of this dispatch. - pub class: DispatchClass, - /// The partial inclusion fee of this dispatch. This does not include tip or anything else which - /// is dependent on the signature (aka. depends on a `SignedExtension`). - #[cfg_attr(feature = "std", serde(bound(serialize = "Balance: std::fmt::Display")))] - #[cfg_attr(feature = "std", serde(serialize_with = "serialize_as_string"))] - #[cfg_attr(feature = "std", serde(bound(deserialize = "Balance: std::str::FromStr")))] - #[cfg_attr(feature = "std", serde(deserialize_with = "deserialize_from_string"))] - pub partial_fee: Balance, + /// Weight of this dispatch. + pub weight: Weight, + /// Class of this dispatch. + pub class: DispatchClass, + /// The partial inclusion fee of this dispatch. This does not include tip or anything else which + /// is dependent on the signature (aka. depends on a `SignedExtension`). + #[cfg_attr( + feature = "std", + serde(bound(serialize = "Balance: std::fmt::Display")) + )] + #[cfg_attr(feature = "std", serde(serialize_with = "serialize_as_string"))] + #[cfg_attr( + feature = "std", + serde(bound(deserialize = "Balance: std::str::FromStr")) + )] + #[cfg_attr(feature = "std", serde(deserialize_with = "deserialize_from_string"))] + pub partial_fee: Balance, } #[cfg(feature = "std")] -fn serialize_as_string(t: &T, serializer: S) -> Result { - serializer.serialize_str(&t.to_string()) +fn serialize_as_string( + t: &T, + serializer: S, +) -> Result { + serializer.serialize_str(&t.to_string()) } #[cfg(feature = "std")] -fn deserialize_from_string<'de, D: Deserializer<'de>, T: std::str::FromStr>(deserializer: D) -> Result { - let s = String::deserialize(deserializer)?; - s.parse::().map_err(|_| serde::de::Error::custom("Parse from string failed")) +fn deserialize_from_string<'de, D: Deserializer<'de>, T: std::str::FromStr>( + deserializer: D, +) -> Result { + let s = String::deserialize(deserializer)?; + s.parse::() + .map_err(|_| serde::de::Error::custom("Parse from string failed")) } sp_api::decl_runtime_apis! { - pub trait TransactionPaymentApi where - Balance: Codec + MaybeDisplay + MaybeFromStr, - Extrinsic: Codec, - { - fn query_info(uxt: Extrinsic, len: u32) -> RuntimeDispatchInfo; - } + pub trait TransactionPaymentApi where + Balance: Codec + MaybeDisplay + MaybeFromStr, + Extrinsic: Codec, + { + fn query_info(uxt: Extrinsic, len: u32) -> RuntimeDispatchInfo; + } } #[cfg(test)] mod tests { - use super::*; - - #[test] - fn should_serialize_and_deserialize_properly_with_string() { - let info = RuntimeDispatchInfo { - weight: 5, - class: DispatchClass::Normal, - partial_fee: 1_000_000_u64, - }; - - let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; - - assert_eq!(serde_json::to_string(&info).unwrap(), json_str); - assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); - - // should not panic - serde_json::to_value(&info).unwrap(); - } - - #[test] - fn should_serialize_and_deserialize_properly_large_value() { - let info = RuntimeDispatchInfo { - weight: 5, - class: DispatchClass::Normal, - partial_fee: u128::max_value(), - }; - - let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; - - assert_eq!(serde_json::to_string(&info).unwrap(), json_str); - assert_eq!(serde_json::from_str::>(json_str).unwrap(), info); - - // should not panic - serde_json::to_value(&info).unwrap(); - } + use super::*; + + #[test] + fn should_serialize_and_deserialize_properly_with_string() { + let info = RuntimeDispatchInfo { + weight: 5, + class: DispatchClass::Normal, + partial_fee: 1_000_000_u64, + }; + + let json_str = r#"{"weight":5,"class":"normal","partialFee":"1000000"}"#; + + assert_eq!(serde_json::to_string(&info).unwrap(), json_str); + assert_eq!( + serde_json::from_str::>(json_str).unwrap(), + info + ); + + // should not panic + serde_json::to_value(&info).unwrap(); + } + + #[test] + fn should_serialize_and_deserialize_properly_large_value() { + let info = RuntimeDispatchInfo { + weight: 5, + class: DispatchClass::Normal, + partial_fee: u128::max_value(), + }; + + let json_str = r#"{"weight":5,"class":"normal","partialFee":"340282366920938463463374607431768211455"}"#; + + assert_eq!(serde_json::to_string(&info).unwrap(), json_str); + assert_eq!( + serde_json::from_str::>(json_str).unwrap(), + info + ); + + // should not panic + serde_json::to_value(&info).unwrap(); + } } diff --git a/frame/transaction-payment/rpc/src/lib.rs b/frame/transaction-payment/rpc/src/lib.rs index 945b0119d7..5e2e2d9cd2 100644 --- a/frame/transaction-payment/rpc/src/lib.rs +++ b/frame/transaction-payment/rpc/src/lib.rs @@ -16,89 +16,91 @@ //! RPC interface for the transaction payment module. -use std::sync::Arc; +pub use self::gen_client::Client as TransactionPaymentClient; use codec::{Codec, Decode}; -use sp_blockchain::HeaderBackend; use jsonrpc_core::{Error as RpcError, ErrorCode, Result}; use jsonrpc_derive::rpc; -use sp_runtime::{generic::BlockId, traits::{Block as BlockT, MaybeDisplay, MaybeFromStr}}; -use sp_api::ProvideRuntimeApi; -use sp_core::Bytes; use pallet_transaction_payment_rpc_runtime_api::RuntimeDispatchInfo; pub use pallet_transaction_payment_rpc_runtime_api::TransactionPaymentApi as TransactionPaymentRuntimeApi; -pub use self::gen_client::Client as TransactionPaymentClient; +use sp_api::ProvideRuntimeApi; +use sp_blockchain::HeaderBackend; +use sp_core::Bytes; +use sp_runtime::{ + generic::BlockId, + traits::{Block as BlockT, MaybeDisplay, MaybeFromStr}, +}; +use std::sync::Arc; #[rpc] pub trait TransactionPaymentApi { - #[rpc(name = "payment_queryInfo")] - fn query_info( - &self, - encoded_xt: Bytes, - at: Option - ) -> Result; + #[rpc(name = "payment_queryInfo")] + fn query_info(&self, encoded_xt: Bytes, at: Option) -> Result; } /// A struct that implements the [`TransactionPaymentApi`]. pub struct TransactionPayment { - client: Arc, - _marker: std::marker::PhantomData

(&s).ok()) - .and_then(|mut t| t.remove("dependencies")) - .and_then(|p| p.try_into::
().ok()) - .and_then(|mut t| t.remove("wasm_project")) - .and_then(|p| p.try_into::
().ok()) - { - if let Some(path) = wasm_project.remove("path") - .and_then(|p| p.try_into::().ok()) - { - if let Some(name) = wasm_project.remove("package") - .and_then(|p| p.try_into::().ok()) - { - let path = PathBuf::from(path); - if path.exists() { - if name == get_crate_name(&path.join("Cargo.toml")) { - i += 1; - continue - } - } - } - } - } - - fs::remove_dir_all(wasm_workspace.join(&members[i])) - .expect("Removing invalid workspace member can not fail; qed"); - members.remove(i); - } - - members + let mut members = WalkDir::new(wasm_workspace) + .min_depth(1) + .max_depth(1) + .into_iter() + .filter_map(|p| p.ok()) + .map(|d| d.into_path()) + .filter(|p| p.is_dir()) + .filter_map(|p| { + p.file_name() + .map(|f| f.to_owned()) + .and_then(|s| s.into_string().ok()) + }) + .filter(|f| !f.starts_with(".") && f != "target") + .collect::>(); + + let mut i = 0; + while i != members.len() { + let path = wasm_workspace.join(&members[i]).join("Cargo.toml"); + + // Extract the `wasm-project` dependency. + // If the path can be extracted and is valid and the package name matches, + // the member is valid. + if let Some(mut wasm_project) = fs::read_to_string(path) + .ok() + .and_then(|s| toml::from_str::
(&s).ok()) + .and_then(|mut t| t.remove("dependencies")) + .and_then(|p| p.try_into::
().ok()) + .and_then(|mut t| t.remove("wasm_project")) + .and_then(|p| p.try_into::
().ok()) + { + if let Some(path) = wasm_project + .remove("path") + .and_then(|p| p.try_into::().ok()) + { + if let Some(name) = wasm_project + .remove("package") + .and_then(|p| p.try_into::().ok()) + { + let path = PathBuf::from(path); + if path.exists() { + if name == get_crate_name(&path.join("Cargo.toml")) { + i += 1; + continue; + } + } + } + } + } + + fs::remove_dir_all(wasm_workspace.join(&members[i])) + .expect("Removing invalid workspace member can not fail; qed"); + members.remove(i); + } + + members } fn create_wasm_workspace_project(wasm_workspace: &Path, workspace_root_path: &Path) { - let members = find_and_clear_workspace_members(wasm_workspace); - - let mut workspace_toml: Table = toml::from_str( - &fs::read_to_string( - workspace_root_path.join("Cargo.toml"), - ).expect("Workspace root `Cargo.toml` exists; qed") - ).expect("Workspace root `Cargo.toml` is a valid toml file; qed"); - - let mut wasm_workspace_toml = Table::new(); - - // Add `profile` with release and dev - let mut release_profile = Table::new(); - release_profile.insert("panic".into(), "abort".into()); - release_profile.insert("lto".into(), true.into()); - - let mut dev_profile = Table::new(); - dev_profile.insert("panic".into(), "abort".into()); - - let mut profile = Table::new(); - profile.insert("release".into(), release_profile.into()); - profile.insert("dev".into(), dev_profile.into()); - - wasm_workspace_toml.insert("profile".into(), profile.into()); - - // Add `workspace` with members - let mut workspace = Table::new(); - workspace.insert("members".into(), members.into()); - - wasm_workspace_toml.insert("workspace".into(), workspace.into()); - - // Add patch section from the project root `Cargo.toml` - if let Some(mut patch) = workspace_toml.remove("patch").and_then(|p| p.try_into::
().ok()) { - // Iterate over all patches and make the patch path absolute from the workspace root path. - patch.iter_mut() - .filter_map(|p| - p.1.as_table_mut().map(|t| t.iter_mut().filter_map(|t| t.1.as_table_mut())) - ) - .flatten() - .for_each(|p| - p.iter_mut() - .filter(|(k, _)| k == &"path") - .for_each(|(_, v)| { - if let Some(path) = v.as_str().map(PathBuf::from) { - if path.is_relative() { - *v = workspace_root_path.join(path).display().to_string().into(); - } - } - }) - ); - - wasm_workspace_toml.insert("patch".into(), patch.into()); - } - - fs::write( - wasm_workspace.join("Cargo.toml"), - toml::to_string_pretty(&wasm_workspace_toml).expect("Wasm workspace toml is valid; qed"), - ).expect("WASM workspace `Cargo.toml` writing can not fail; qed"); + let members = find_and_clear_workspace_members(wasm_workspace); + + let mut workspace_toml: Table = toml::from_str( + &fs::read_to_string(workspace_root_path.join("Cargo.toml")) + .expect("Workspace root `Cargo.toml` exists; qed"), + ) + .expect("Workspace root `Cargo.toml` is a valid toml file; qed"); + + let mut wasm_workspace_toml = Table::new(); + + // Add `profile` with release and dev + let mut release_profile = Table::new(); + release_profile.insert("panic".into(), "abort".into()); + release_profile.insert("lto".into(), true.into()); + + let mut dev_profile = Table::new(); + dev_profile.insert("panic".into(), "abort".into()); + + let mut profile = Table::new(); + profile.insert("release".into(), release_profile.into()); + profile.insert("dev".into(), dev_profile.into()); + + wasm_workspace_toml.insert("profile".into(), profile.into()); + + // Add `workspace` with members + let mut workspace = Table::new(); + workspace.insert("members".into(), members.into()); + + wasm_workspace_toml.insert("workspace".into(), workspace.into()); + + // Add patch section from the project root `Cargo.toml` + if let Some(mut patch) = workspace_toml + .remove("patch") + .and_then(|p| p.try_into::
().ok()) + { + // Iterate over all patches and make the patch path absolute from the workspace root path. + patch + .iter_mut() + .filter_map(|p| { + p.1.as_table_mut() + .map(|t| t.iter_mut().filter_map(|t| t.1.as_table_mut())) + }) + .flatten() + .for_each(|p| { + p.iter_mut() + .filter(|(k, _)| k == &"path") + .for_each(|(_, v)| { + if let Some(path) = v.as_str().map(PathBuf::from) { + if path.is_relative() { + *v = workspace_root_path.join(path).display().to_string().into(); + } + } + }) + }); + + wasm_workspace_toml.insert("patch".into(), patch.into()); + } + + fs::write( + wasm_workspace.join("Cargo.toml"), + toml::to_string_pretty(&wasm_workspace_toml).expect("Wasm workspace toml is valid; qed"), + ) + .expect("WASM workspace `Cargo.toml` writing can not fail; qed"); } /// Get a list of enabled features for the project. fn project_enabled_features( - cargo_manifest: &Path, - crate_metadata: &cargo_metadata::Metadata, + cargo_manifest: &Path, + crate_metadata: &cargo_metadata::Metadata, ) -> Vec { - let package = crate_metadata.packages - .iter() - .find(|p| p.manifest_path == cargo_manifest) - .expect("Wasm project exists in its own metadata; qed"); - - let mut enabled_features = package.features.keys() - .filter(|f| { - let mut feature_env = f.replace("-", "_"); - feature_env.make_ascii_uppercase(); - - // We don't want to enable the `std`/`default` feature for the wasm build and - // we need to check if the feature is enabled by checking the env variable. - *f != "std" - && *f != "default" - && env::var(format!("CARGO_FEATURE_{}", feature_env)) - .map(|v| v == "1") - .unwrap_or_default() - }) - .cloned() - .collect::>(); - - enabled_features.sort(); - enabled_features + let package = crate_metadata + .packages + .iter() + .find(|p| p.manifest_path == cargo_manifest) + .expect("Wasm project exists in its own metadata; qed"); + + let mut enabled_features = package + .features + .keys() + .filter(|f| { + let mut feature_env = f.replace("-", "_"); + feature_env.make_ascii_uppercase(); + + // We don't want to enable the `std`/`default` feature for the wasm build and + // we need to check if the feature is enabled by checking the env variable. + *f != "std" + && *f != "default" + && env::var(format!("CARGO_FEATURE_{}", feature_env)) + .map(|v| v == "1") + .unwrap_or_default() + }) + .cloned() + .collect::>(); + + enabled_features.sort(); + enabled_features } /// Create the project used to build the wasm binary. @@ -337,23 +364,24 @@ fn project_enabled_features( /// # Returns /// The path to the created project. fn create_project( - cargo_manifest: &Path, - wasm_workspace: &Path, - crate_metadata: &cargo_metadata::Metadata, + cargo_manifest: &Path, + wasm_workspace: &Path, + crate_metadata: &cargo_metadata::Metadata, ) -> PathBuf { - let crate_name = get_crate_name(cargo_manifest); - let crate_path = cargo_manifest.parent().expect("Parent path exists; qed"); - let wasm_binary = get_wasm_binary_name(cargo_manifest); - let project_folder = wasm_workspace.join(&crate_name); + let crate_name = get_crate_name(cargo_manifest); + let crate_path = cargo_manifest.parent().expect("Parent path exists; qed"); + let wasm_binary = get_wasm_binary_name(cargo_manifest); + let project_folder = wasm_workspace.join(&crate_name); - fs::create_dir_all(project_folder.join("src")).expect("Wasm project dir create can not fail; qed"); + fs::create_dir_all(project_folder.join("src")) + .expect("Wasm project dir create can not fail; qed"); - let enabled_features = project_enabled_features(&cargo_manifest, &crate_metadata); + let enabled_features = project_enabled_features(&cargo_manifest, &crate_metadata); - write_file_if_changed( - project_folder.join("Cargo.toml"), - format!( - r#" + write_file_if_changed( + project_folder.join("Cargo.toml"), + format!( + r#" [package] name = "{crate_name}-wasm" version = "1.0.0" @@ -366,31 +394,34 @@ fn create_project( [dependencies] wasm_project = {{ package = "{crate_name}", path = "{crate_path}", default-features = false, features = [ {features} ] }} "#, - crate_name = crate_name, - crate_path = crate_path.display(), - wasm_binary = wasm_binary, - features = enabled_features.into_iter().map(|f| format!("\"{}\"", f)).join(","), - ) - ); - - write_file_if_changed( - project_folder.join("src/lib.rs"), - "#![no_std] pub use wasm_project::*;".into(), - ); - - if let Some(crate_lock_file) = find_cargo_lock(cargo_manifest) { - // Use the `Cargo.lock` of the main project. - fs::copy(crate_lock_file, wasm_workspace.join("Cargo.lock")) - .expect("Copying the `Cargo.lock` can not fail; qed"); - } - - project_folder + crate_name = crate_name, + crate_path = crate_path.display(), + wasm_binary = wasm_binary, + features = enabled_features + .into_iter() + .map(|f| format!("\"{}\"", f)) + .join(","), + ), + ); + + write_file_if_changed( + project_folder.join("src/lib.rs"), + "#![no_std] pub use wasm_project::*;".into(), + ); + + if let Some(crate_lock_file) = find_cargo_lock(cargo_manifest) { + // Use the `Cargo.lock` of the main project. + fs::copy(crate_lock_file, wasm_workspace.join("Cargo.lock")) + .expect("Copying the `Cargo.lock` can not fail; qed"); + } + + project_folder } /// Returns if the project should be built as a release. fn is_release_build() -> bool { - if let Ok(var) = env::var(crate::WASM_BUILD_TYPE_ENV) { - match var.as_str() { + if let Ok(var) = env::var(crate::WASM_BUILD_TYPE_ENV) { + match var.as_str() { "release" => true, "debug" => false, var => panic!( @@ -399,189 +430,205 @@ fn is_release_build() -> bool { var, ), } - } else { - true - } + } else { + true + } } /// Build the project to create the WASM binary. fn build_project(project: &Path, default_rustflags: &str) { - let manifest_path = project.join("Cargo.toml"); - let mut build_cmd = crate::get_nightly_cargo().command(); - - let rustflags = format!( - "-C link-arg=--export-table {} {}", - default_rustflags, - env::var(crate::WASM_BUILD_RUSTFLAGS_ENV).unwrap_or_default(), - ); - - build_cmd.args(&["rustc", "--target=wasm32-unknown-unknown"]) - .arg(format!("--manifest-path={}", manifest_path.display())) - .env("RUSTFLAGS", rustflags) - // We don't want to call ourselves recursively - .env(crate::SKIP_BUILD_ENV, ""); - - if env::var(crate::WASM_BUILD_NO_COLOR).is_err() { - build_cmd.arg("--color=always"); - } - - if is_release_build() { - build_cmd.arg("--release"); - }; - - println!("Executing build command: {:?}", build_cmd); - - match build_cmd.status().map(|s| s.success()) { - Ok(true) => {}, - // Use `process.exit(1)` to have a clean error output. - _ => process::exit(1), - } + let manifest_path = project.join("Cargo.toml"); + let mut build_cmd = crate::get_nightly_cargo().command(); + + let rustflags = format!( + "-C link-arg=--export-table {} {}", + default_rustflags, + env::var(crate::WASM_BUILD_RUSTFLAGS_ENV).unwrap_or_default(), + ); + + build_cmd + .args(&["rustc", "--target=wasm32-unknown-unknown"]) + .arg(format!("--manifest-path={}", manifest_path.display())) + .env("RUSTFLAGS", rustflags) + // We don't want to call ourselves recursively + .env(crate::SKIP_BUILD_ENV, ""); + + if env::var(crate::WASM_BUILD_NO_COLOR).is_err() { + build_cmd.arg("--color=always"); + } + + if is_release_build() { + build_cmd.arg("--release"); + }; + + println!("Executing build command: {:?}", build_cmd); + + match build_cmd.status().map(|s| s.success()) { + Ok(true) => {} + // Use `process.exit(1)` to have a clean error output. + _ => process::exit(1), + } } /// Compact the WASM binary using `wasm-gc`. Returns the path to the bloaty WASM binary. fn compact_wasm_file( - project: &Path, - cargo_manifest: &Path, - wasm_workspace: &Path, + project: &Path, + cargo_manifest: &Path, + wasm_workspace: &Path, ) -> (WasmBinary, WasmBinaryBloaty) { - let target = if is_release_build() { "release" } else { "debug" }; - let wasm_binary = get_wasm_binary_name(cargo_manifest); - let wasm_file = wasm_workspace.join("target/wasm32-unknown-unknown") - .join(target) - .join(format!("{}.wasm", wasm_binary)); - let wasm_compact_file = project.join(format!("{}.compact.wasm", wasm_binary)); - - wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) - .expect("Failed to compact generated WASM binary."); - - (WasmBinary(wasm_compact_file), WasmBinaryBloaty(wasm_file)) + let target = if is_release_build() { + "release" + } else { + "debug" + }; + let wasm_binary = get_wasm_binary_name(cargo_manifest); + let wasm_file = wasm_workspace + .join("target/wasm32-unknown-unknown") + .join(target) + .join(format!("{}.wasm", wasm_binary)); + let wasm_compact_file = project.join(format!("{}.compact.wasm", wasm_binary)); + + wasm_gc::garbage_collect_file(&wasm_file, &wasm_compact_file) + .expect("Failed to compact generated WASM binary."); + + (WasmBinary(wasm_compact_file), WasmBinaryBloaty(wasm_file)) } /// Custom wrapper for a [`cargo_metadata::Package`] to store it in /// a `HashSet`. #[derive(Debug)] struct DeduplicatePackage<'a> { - package: &'a cargo_metadata::Package, - identifier: String, + package: &'a cargo_metadata::Package, + identifier: String, } impl<'a> From<&'a cargo_metadata::Package> for DeduplicatePackage<'a> { - fn from(package: &'a cargo_metadata::Package) -> Self { - Self { - package, - identifier: format!("{}{}{:?}", package.name, package.version, package.source), - } - } + fn from(package: &'a cargo_metadata::Package) -> Self { + Self { + package, + identifier: format!("{}{}{:?}", package.name, package.version, package.source), + } + } } impl<'a> Hash for DeduplicatePackage<'a> { - fn hash(&self, state: &mut H) { - self.identifier.hash(state); - } + fn hash(&self, state: &mut H) { + self.identifier.hash(state); + } } impl<'a> PartialEq for DeduplicatePackage<'a> { - fn eq(&self, other: &Self) -> bool { - self.identifier == other.identifier - } + fn eq(&self, other: &Self) -> bool { + self.identifier == other.identifier + } } impl<'a> Eq for DeduplicatePackage<'a> {} impl<'a> Deref for DeduplicatePackage<'a> { - type Target = cargo_metadata::Package; + type Target = cargo_metadata::Package; - fn deref(&self) -> &Self::Target { - self.package - } + fn deref(&self) -> &Self::Target { + self.package + } } /// Generate the `rerun-if-changed` instructions for cargo to make sure that the WASM binary is /// rebuilt when needed. fn generate_rerun_if_changed_instructions( - cargo_manifest: &Path, - project_folder: &Path, - wasm_workspace: &Path, + cargo_manifest: &Path, + project_folder: &Path, + wasm_workspace: &Path, ) { - // Rerun `build.rs` if the `Cargo.lock` changes - if let Some(cargo_lock) = find_cargo_lock(cargo_manifest) { - rerun_if_changed(cargo_lock); - } - - let metadata = MetadataCommand::new() - .manifest_path(project_folder.join("Cargo.toml")) - .exec() - .expect("`cargo metadata` can not fail!"); - - // Start with the dependencies of the crate we want to compile for wasm. - let mut dependencies = metadata.packages - .iter() - .find(|p| p.manifest_path == cargo_manifest) - .expect("The crate package is contained in its own metadata; qed") - .dependencies - .iter() - .collect::>(); - - // Collect all packages by follow the dependencies of all packages we find. - let mut packages = HashSet::new(); - while let Some(dependency) = dependencies.pop() { - let package = metadata.packages - .iter() - .filter(|p| !p.manifest_path.starts_with(wasm_workspace)) - .find(|p| dependency.req.matches(&p.version) && dependency.name == p.name); - - if let Some(package) = package { - if packages.insert(DeduplicatePackage::from(package)) { - dependencies.extend(package.dependencies.iter()); - } - } - } - - // Make sure that if any file/folder of a dependency change, we need to rerun the `build.rs` - packages.into_iter() - .filter(|p| !p.manifest_path.starts_with(wasm_workspace)) - .for_each(|package| { - let mut manifest_path = package.manifest_path.clone(); - if manifest_path.ends_with("Cargo.toml") { - manifest_path.pop(); - } - - rerun_if_changed(&manifest_path); - - WalkDir::new(manifest_path) - .into_iter() - .filter_map(|p| p.ok()) - .for_each(|p| rerun_if_changed(p.path())); - }); - - // Register our env variables - println!("cargo:rerun-if-env-changed={}", crate::SKIP_BUILD_ENV); - println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TYPE_ENV); - println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_RUSTFLAGS_ENV); - println!("cargo:rerun-if-env-changed={}", crate::WASM_TARGET_DIRECTORY); - println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TOOLCHAIN); + // Rerun `build.rs` if the `Cargo.lock` changes + if let Some(cargo_lock) = find_cargo_lock(cargo_manifest) { + rerun_if_changed(cargo_lock); + } + + let metadata = MetadataCommand::new() + .manifest_path(project_folder.join("Cargo.toml")) + .exec() + .expect("`cargo metadata` can not fail!"); + + // Start with the dependencies of the crate we want to compile for wasm. + let mut dependencies = metadata + .packages + .iter() + .find(|p| p.manifest_path == cargo_manifest) + .expect("The crate package is contained in its own metadata; qed") + .dependencies + .iter() + .collect::>(); + + // Collect all packages by follow the dependencies of all packages we find. + let mut packages = HashSet::new(); + while let Some(dependency) = dependencies.pop() { + let package = metadata + .packages + .iter() + .filter(|p| !p.manifest_path.starts_with(wasm_workspace)) + .find(|p| dependency.req.matches(&p.version) && dependency.name == p.name); + + if let Some(package) = package { + if packages.insert(DeduplicatePackage::from(package)) { + dependencies.extend(package.dependencies.iter()); + } + } + } + + // Make sure that if any file/folder of a dependency change, we need to rerun the `build.rs` + packages + .into_iter() + .filter(|p| !p.manifest_path.starts_with(wasm_workspace)) + .for_each(|package| { + let mut manifest_path = package.manifest_path.clone(); + if manifest_path.ends_with("Cargo.toml") { + manifest_path.pop(); + } + + rerun_if_changed(&manifest_path); + + WalkDir::new(manifest_path) + .into_iter() + .filter_map(|p| p.ok()) + .for_each(|p| rerun_if_changed(p.path())); + }); + + // Register our env variables + println!("cargo:rerun-if-env-changed={}", crate::SKIP_BUILD_ENV); + println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TYPE_ENV); + println!( + "cargo:rerun-if-env-changed={}", + crate::WASM_BUILD_RUSTFLAGS_ENV + ); + println!( + "cargo:rerun-if-env-changed={}", + crate::WASM_TARGET_DIRECTORY + ); + println!("cargo:rerun-if-env-changed={}", crate::WASM_BUILD_TOOLCHAIN); } /// Copy the WASM binary to the target directory set in `WASM_TARGET_DIRECTORY` environment variable. /// If the variable is not set, this is a no-op. fn copy_wasm_to_target_directory(cargo_manifest: &Path, wasm_binary: &WasmBinary) { - let target_dir = match env::var(crate::WASM_TARGET_DIRECTORY) { - Ok(path) => PathBuf::from(path), - Err(_) => return, - }; - - if !target_dir.is_absolute() { - panic!( - "Environment variable `{}` with `{}` is not an absolute path!", - crate::WASM_TARGET_DIRECTORY, - target_dir.display(), - ); - } - - fs::create_dir_all(&target_dir).expect("Creates `WASM_TARGET_DIRECTORY`."); - - fs::copy( - wasm_binary.wasm_binary_path(), - target_dir.join(format!("{}.wasm", get_wasm_binary_name(cargo_manifest))), - ).expect("Copies WASM binary to `WASM_TARGET_DIRECTORY`."); + let target_dir = match env::var(crate::WASM_TARGET_DIRECTORY) { + Ok(path) => PathBuf::from(path), + Err(_) => return, + }; + + if !target_dir.is_absolute() { + panic!( + "Environment variable `{}` with `{}` is not an absolute path!", + crate::WASM_TARGET_DIRECTORY, + target_dir.display(), + ); + } + + fs::create_dir_all(&target_dir).expect("Creates `WASM_TARGET_DIRECTORY`."); + + fs::copy( + wasm_binary.wasm_binary_path(), + target_dir.join(format!("{}.wasm", get_wasm_binary_name(cargo_manifest))), + ) + .expect("Copies WASM binary to `WASM_TARGET_DIRECTORY`."); } -- GitLab